summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ObsoleteFiles.inc16
-rw-r--r--contrib/llvm/LICENSE.TXT3
-rw-r--r--contrib/llvm/include/llvm-c/Analysis.h10
-rw-r--r--contrib/llvm/include/llvm-c/BitReader.h10
-rw-r--r--contrib/llvm/include/llvm-c/BitWriter.h10
-rw-r--r--contrib/llvm/include/llvm-c/Core.h1746
-rw-r--r--contrib/llvm/include/llvm-c/Disassembler.h11
-rw-r--r--contrib/llvm/include/llvm-c/EnhancedDisassembly.h17
-rw-r--r--contrib/llvm/include/llvm-c/ExecutionEngine.h11
-rw-r--r--contrib/llvm/include/llvm-c/Initialization.h14
-rw-r--r--contrib/llvm/include/llvm-c/LinkTimeOptimizer.h11
-rw-r--r--contrib/llvm/include/llvm-c/Object.h78
-rw-r--r--contrib/llvm/include/llvm-c/Target.h65
-rw-r--r--contrib/llvm/include/llvm-c/TargetMachine.h142
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/IPO.h11
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h11
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Scalar.h10
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Vectorize.h48
-rw-r--r--contrib/llvm/include/llvm-c/lto.h51
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h13
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h41
-rw-r--r--contrib/llvm/include/llvm/ADT/ArrayRef.h129
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h92
-rw-r--r--contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h1
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h70
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMapInfo.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h28
-rw-r--r--contrib/llvm/include/llvm/ADT/GraphTraits.h5
-rw-r--r--contrib/llvm/include/llvm/ADT/Hashing.h770
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableSet.h16
-rw-r--r--contrib/llvm/include/llvm/ADT/IntervalMap.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h25
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerUnion.h21
-rw-r--r--contrib/llvm/include/llvm/ADT/SetVector.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallPtrSet.h16
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallSet.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallString.h233
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h108
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseBitVector.h43
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseSet.h268
-rw-r--r--contrib/llvm/include/llvm/ADT/Statistic.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/StringExtras.h37
-rw-r--r--contrib/llvm/include/llvm/ADT/StringMap.h67
-rw-r--r--contrib/llvm/include/llvm/ADT/StringRef.h67
-rw-r--r--contrib/llvm/include/llvm/ADT/TinyPtrVector.h42
-rw-r--r--contrib/llvm/include/llvm/ADT/Trie.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h196
-rw-r--r--contrib/llvm/include/llvm/ADT/Twine.h3
-rw-r--r--contrib/llvm/include/llvm/ADT/ValueMap.h24
-rw-r--r--contrib/llvm/include/llvm/ADT/VariadicFunction.h331
-rw-r--r--contrib/llvm/include/llvm/ADT/VectorExtras.h41
-rw-r--r--contrib/llvm/include/llvm/ADT/edit_distance.h102
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h7
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasSetTracker.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h15
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h116
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFGPrinter.h12
-rw-r--r--contrib/llvm/include/llvm/Analysis/CaptureTracking.h34
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h87
-rw-r--r--contrib/llvm/include/llvm/Analysis/ConstantFolding.h26
-rw-r--r--contrib/llvm/include/llvm/Analysis/DIBuilder.h42
-rw-r--r--contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/DebugInfo.h92
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominanceFrontier.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominatorInternals.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/Dominators.h132
-rw-r--r--contrib/llvm/include/llvm/Analysis/IVUsers.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h193
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionSimplify.h97
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalIterator.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/LazyValueInfo.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/Loads.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h74
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/PHITransAddr.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/ProfileInfo.h9
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfo.h2
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h26
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h37
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h1
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h58
-rw-r--r--contrib/llvm/include/llvm/Argument.h1
-rw-r--r--contrib/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h20
-rw-r--r--contrib/llvm/include/llvm/Assembly/Parser.h1
-rw-r--r--contrib/llvm/include/llvm/Assembly/Writer.h1
-rw-r--r--contrib/llvm/include/llvm/Attributes.h185
-rw-r--r--contrib/llvm/include/llvm/AutoUpgrade.h8
-rw-r--r--contrib/llvm/include/llvm/BasicBlock.h7
-rw-r--r--contrib/llvm/include/llvm/Bitcode/Archive.h2
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitCodes.h13
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamReader.h99
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h83
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h40
-rw-r--r--contrib/llvm/include/llvm/Bitcode/ReaderWriter.h49
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Analysis.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h32
-rw-r--r--contrib/llvm/include/llvm/CodeGen/BinaryObject.h353
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CallingConvLower.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h167
-rw-r--r--contrib/llvm/include/llvm/CodeGen/EdgeBundles.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h24
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCStrategy.h13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h15
-rw-r--r--contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h1
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LexicalScopes.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h8
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h54
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h256
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h (renamed from contrib/llvm/lib/CodeGen/LiveRangeEdit.h)69
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveVariables.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h247
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h1
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineDominators.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h35
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h5
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h376
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h52
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h203
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h72
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h1
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h180
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineScheduler.h91
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h171
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h37
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h6
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h369
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h14
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h142
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h73
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h344
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h27
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h17
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h37
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h126
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h13
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h117
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td62
-rw-r--r--contrib/llvm/include/llvm/Constant.h23
-rw-r--r--contrib/llvm/include/llvm/Constants.h326
-rw-r--r--contrib/llvm/include/llvm/DebugInfoProbe.h67
-rw-r--r--contrib/llvm/include/llvm/DefaultPasses.h2
-rw-r--r--contrib/llvm/include/llvm/DerivedTypes.h10
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h61
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h102
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h50
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h27
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h124
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h33
-rw-r--r--contrib/llvm/include/llvm/Function.h8
-rw-r--r--contrib/llvm/include/llvm/GlobalValue.h7
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h27
-rw-r--r--contrib/llvm/include/llvm/InlineAsm.h1
-rw-r--r--contrib/llvm/include/llvm/InstrTypes.h1
-rw-r--r--contrib/llvm/include/llvm/Instruction.def125
-rw-r--r--contrib/llvm/include/llvm/Instruction.h26
-rw-r--r--contrib/llvm/include/llvm/Instructions.h326
-rw-r--r--contrib/llvm/include/llvm/IntrinsicInst.h28
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.td11
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsAlpha.td18
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsHexagon.td3671
-rw-r--r--contrib/llvm/include/llvm/IntrinsicsX86.td1176
-rw-r--r--contrib/llvm/include/llvm/LLVMContext.h11
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h5
-rw-r--r--contrib/llvm/include/llvm/Linker.h3
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmBackend.h49
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h14
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmLayout.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h47
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeEmitter.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeGenInfo.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h62
-rw-r--r--contrib/llvm/include/llvm/MC/MCDisassembler.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCDwarf.h67
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h75
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h28
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixup.h19
-rw-r--r--contrib/llvm/include/llvm/MC/MCInst.h28
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstPrinter.h11
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrAnalysis.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrDesc.h208
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrInfo.h17
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectFileInfo.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h7
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h8
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h14
-rw-r--r--contrib/llvm/include/llvm/MC/MCRegisterInfo.h184
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionCOFF.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionELF.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionMachO.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h49
-rw-r--r--contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h36
-rw-r--r--contrib/llvm/include/llvm/Metadata.h25
-rw-r--r--contrib/llvm/include/llvm/Module.h64
-rw-r--r--contrib/llvm/include/llvm/Object/Archive.h59
-rw-r--r--contrib/llvm/include/llvm/Object/Binary.h51
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h77
-rw-r--r--contrib/llvm/include/llvm/Object/ELF.h2209
-rw-r--r--contrib/llvm/include/llvm/Object/MachO.h37
-rw-r--r--contrib/llvm/include/llvm/Object/MachOObject.h6
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h275
-rw-r--r--contrib/llvm/include/llvm/Operator.h4
-rw-r--r--contrib/llvm/include/llvm/Pass.h38
-rw-r--r--contrib/llvm/include/llvm/PassAnalysisSupport.h1
-rw-r--r--contrib/llvm/include/llvm/PassManager.h22
-rw-r--r--contrib/llvm/include/llvm/PassManagers.h75
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h7
-rw-r--r--contrib/llvm/include/llvm/Support/BlockFrequency.h2
-rw-r--r--contrib/llvm/include/llvm/Support/BranchProbability.h31
-rw-r--r--contrib/llvm/include/llvm/Support/CFG.h8
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h294
-rw-r--r--contrib/llvm/include/llvm/Support/CallSite.h10
-rw-r--r--contrib/llvm/include/llvm/Support/Capacity.h2
-rw-r--r--contrib/llvm/include/llvm/Support/CodeGen.h20
-rw-r--r--contrib/llvm/include/llvm/Support/CommandLine.h154
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h27
-rw-r--r--contrib/llvm/include/llvm/Support/DOTGraphTraits.h30
-rw-r--r--contrib/llvm/include/llvm/Support/DataStream.h38
-rw-r--r--contrib/llvm/include/llvm/Support/DataTypes.h.in15
-rw-r--r--contrib/llvm/include/llvm/Support/Debug.h6
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.h6
-rw-r--r--contrib/llvm/include/llvm/Support/DynamicLibrary.h3
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h50
-rw-r--r--contrib/llvm/include/llvm/Support/Endian.h12
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h228
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h14
-rw-r--r--contrib/llvm/include/llvm/Support/Host.h6
-rw-r--r--contrib/llvm/include/llvm/Support/IRReader.h10
-rw-r--r--contrib/llvm/include/llvm/Support/InstVisitor.h105
-rw-r--r--contrib/llvm/include/llvm/Support/JSONParser.h448
-rw-r--r--contrib/llvm/include/llvm/Support/LockFileManager.h74
-rw-r--r--contrib/llvm/include/llvm/Support/MachO.h7
-rw-r--r--contrib/llvm/include/llvm/Support/ManagedStatic.h5
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h14
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryObject.h11
-rw-r--r--contrib/llvm/include/llvm/Support/PathV1.h14
-rw-r--r--contrib/llvm/include/llvm/Support/PatternMatch.h91
-rw-r--r--contrib/llvm/include/llvm/Support/Process.h3
-rw-r--r--contrib/llvm/include/llvm/Support/Program.h13
-rw-r--r--contrib/llvm/include/llvm/Support/Recycler.h3
-rw-r--r--contrib/llvm/include/llvm/Support/SMLoc.h22
-rw-r--r--contrib/llvm/include/llvm/Support/SaveAndRestore.h (renamed from contrib/llvm/tools/clang/include/clang/Analysis/Support/SaveAndRestore.h)6
-rw-r--r--contrib/llvm/include/llvm/Support/SourceMgr.h52
-rw-r--r--contrib/llvm/include/llvm/Support/StreamableMemoryObject.h181
-rw-r--r--contrib/llvm/include/llvm/Support/TargetRegistry.h50
-rw-r--r--contrib/llvm/include/llvm/Support/TargetSelect.h12
-rw-r--r--contrib/llvm/include/llvm/Support/Valgrind.h43
-rw-r--r--contrib/llvm/include/llvm/Support/ValueHandle.h43
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLParser.h549
-rw-r--r--contrib/llvm/include/llvm/Support/system_error.h15
-rw-r--r--contrib/llvm/include/llvm/Support/type_traits.h95
-rw-r--r--contrib/llvm/include/llvm/TableGen/Record.h222
-rw-r--r--contrib/llvm/include/llvm/TableGen/TableGenAction.h1
-rw-r--r--contrib/llvm/include/llvm/TableGen/TableGenBackend.h4
-rw-r--r--contrib/llvm/include/llvm/Target/Mangler.h4
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td62
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h4
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.td11
-rw-r--r--contrib/llvm/include/llvm/Target/TargetData.h34
-rw-r--r--contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h3
-rw-r--r--contrib/llvm/include/llvm/Target/TargetFrameLowering.h4
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h128
-rw-r--r--contrib/llvm/include/llvm/Target/TargetJITInfo.h21
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLibraryInfo.h248
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h138
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h30
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h138
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOpcodes.h7
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h294
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h124
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td47
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h1
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h27
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h20
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h9
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h14
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicInliner.h55
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h8
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h23
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h66
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h33
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h21
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h51
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h16
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h2
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize.h106
-rw-r--r--contrib/llvm/include/llvm/Type.h150
-rw-r--r--contrib/llvm/include/llvm/User.h6
-rw-r--r--contrib/llvm/include/llvm/Value.h47
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/AliasSetTracker.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp59
-rw-r--r--contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp401
-rw-r--r--contrib/llvm/lib/Analysis/CFGPrinter.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/CaptureTracking.cpp95
-rw-r--r--contrib/llvm/lib/Analysis/CodeMetrics.cpp184
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp373
-rw-r--r--contrib/llvm/lib/Analysis/DIBuilder.cpp112
-rw-r--r--contrib/llvm/lib/Analysis/DebugInfo.cpp110
-rw-r--r--contrib/llvm/lib/Analysis/DominanceFrontier.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp6
-rw-r--r--contrib/llvm/lib/Analysis/IVUsers.cpp67
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp1439
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp1167
-rw-r--r--contrib/llvm/lib/Analysis/LazyValueInfo.cpp123
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp19
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp127
-rw-r--r--contrib/llvm/lib/Analysis/LoopPass.cpp23
-rw-r--r--contrib/llvm/lib/Analysis/MemDepPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp94
-rw-r--r--contrib/llvm/lib/Analysis/PHITransAddr.cpp13
-rw-r--r--contrib/llvm/lib/Analysis/PathNumbering.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/PathProfileVerifier.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp18
-rw-r--r--contrib/llvm/lib/Analysis/RegionInfo.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp466
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp497
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/SparsePropagation.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/Trace.cpp2
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp989
-rw-r--r--contrib/llvm/lib/Archive/ArchiveReader.cpp33
-rw-r--r--contrib/llvm/lib/Archive/ArchiveWriter.cpp6
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp62
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.h1
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp127
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h15
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h4
-rw-r--r--contrib/llvm/lib/AsmParser/Parser.cpp2
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp680
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h40
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp318
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp44
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h4
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp29
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/AllocationOrder.h3
-rw-r--r--contrib/llvm/lib/CodeGen/Analysis.cpp73
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp19
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp302
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp45
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp42
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h87
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp287
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h290
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp299
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h68
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp427
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h56
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp46
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp129
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/CodeGen.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp72
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h3
-rw-r--r--contrib/llvm/lib/CodeGen/DFAPacketizer.cpp223
-rw-r--r--contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp31
-rw-r--r--contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp703
-rw-r--r--contrib/llvm/lib/CodeGen/ELF.h227
-rw-r--r--contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp205
-rw-r--r--contrib/llvm/lib/CodeGen/ELFCodeEmitter.h78
-rw-r--r--contrib/llvm/lib/CodeGen/ELFWriter.cpp1105
-rw-r--r--contrib/llvm/lib/CodeGen/ELFWriter.h251
-rw-r--r--contrib/llvm/lib/CodeGen/EdgeBundles.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp526
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/GCMetadata.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/GCStrategy.cpp160
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp132
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp191
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.cpp35
-rw-r--r--contrib/llvm/lib/CodeGen/InterferenceCache.h15
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/JITCodeEmitter.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp414
-rw-r--r--contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/LexicalScopes.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp36
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp2012
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalUnion.h2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp110
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp114
-rw-r--r--contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp219
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp1001
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp65
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp108
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCodeEmitter.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp340
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp93
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp254
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp278
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp608
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp65
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/MachineScheduler.cpp614
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp182
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp462
-rw-r--r--contrib/llvm/lib/CodeGen/ObjectCodeEmitter.cpp141
-rw-r--r--contrib/llvm/lib/CodeGen/OptimizePHIs.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp607
-rw-r--r--contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp35
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp256
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp36
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp41
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.h4
-rw-r--r--contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.cpp280
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBase.h36
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocBasic.cpp316
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp321
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp110
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp1543
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp173
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterClassInfo.h2
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp252
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.h6
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterScavenging.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp19
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp62
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp654
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h212
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp24
-rw-r--r--contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp1120
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp215
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp67
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp117
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp1251
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp10
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp134
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp20
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp13
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp140
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp163
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp657
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp30
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp650
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp130
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h40
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp (renamed from contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp)85
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp1050
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp629
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h18
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp631
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp268
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp215
-rw-r--r--contrib/llvm/lib/CodeGen/ShadowStackGC.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp775
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp12
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.cpp81
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.h1
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.cpp85
-rw-r--r--contrib/llvm/lib/CodeGen/SplitKit.h15
-rw-r--r--contrib/llvm/lib/CodeGen/Splitter.cpp827
-rw-r--r--contrib/llvm/lib/CodeGen/Splitter.h101
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp7
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp358
-rw-r--r--contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp39
-rw-r--r--contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp (renamed from contrib/llvm/lib/Target/TargetFrameLowering.cpp)2
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp106
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp123
-rw-r--r--contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp52
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp525
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.cpp168
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegMap.h335
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp2633
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegRewriter.h32
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.cpp2
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFContext.h1
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp2
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h6
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp5
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp7
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp4
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h4
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp7
-rw-r--r--contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp20
-rw-r--r--contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h67
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp110
-rw-r--r--contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp183
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp29
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp31
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h7
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp162
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp45
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.h11
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp211
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h116
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp43
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp194
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/Intercept.cpp162
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp41
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h17
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp14
-rw-r--r--contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h47
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp (renamed from contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp)99
-rw-r--r--contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp263
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp446
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp262
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h62
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h236
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp577
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h70
-rw-r--r--contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp41
-rw-r--r--contrib/llvm/lib/Linker/LinkArchives.cpp5
-rw-r--r--contrib/llvm/lib/Linker/LinkModules.cpp513
-rw-r--r--contrib/llvm/lib/Linker/Linker.cpp1
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.cpp935
-rw-r--r--contrib/llvm/lib/MC/ELFObjectWriter.h446
-rw-r--r--contrib/llvm/lib/MC/MCAsmBackend.cpp21
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfo.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp15
-rw-r--r--contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp7
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp106
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp124
-rw-r--r--contrib/llvm/lib/MC/MCCodeGenInfo.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp60
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp18
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/Disassembler.h10
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp110
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h23
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp280
-rw-r--r--contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp20
-rw-r--r--contrib/llvm/lib/MC/MCDwarf.cpp459
-rw-r--r--contrib/llvm/lib/MC/MCELF.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp29
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.cpp158
-rw-r--r--contrib/llvm/lib/MC/MCELFStreamer.h141
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp42
-rw-r--r--contrib/llvm/lib/MC/MCInst.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCInstPrinter.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCLoggingStreamer.cpp250
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp27
-rw-r--r--contrib/llvm/lib/MC/MCModule.cpp2
-rw-r--r--contrib/llvm/lib/MC/MCNullStreamer.cpp14
-rw-r--r--contrib/llvm/lib/MC/MCObjectFileInfo.cpp101
-rw-r--r--contrib/llvm/lib/MC/MCObjectStreamer.cpp52
-rw-r--r--contrib/llvm/lib/MC/MCObjectWriter.cpp14
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp277
-rw-r--r--contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp17
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp1
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp4
-rw-r--r--contrib/llvm/lib/MC/MCPureStreamer.cpp15
-rw-r--r--contrib/llvm/lib/MC/MCStreamer.cpp109
-rw-r--r--contrib/llvm/lib/MC/MCSymbol.cpp7
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp15
-rw-r--r--contrib/llvm/lib/MC/SubtargetFeature.cpp24
-rw-r--r--contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp89
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp20
-rw-r--r--contrib/llvm/lib/Object/Archive.cpp142
-rw-r--r--contrib/llvm/lib/Object/COFFObjectFile.cpp295
-rw-r--r--contrib/llvm/lib/Object/ELFObjectFile.cpp1411
-rw-r--r--contrib/llvm/lib/Object/MachOObject.cpp35
-rw-r--r--contrib/llvm/lib/Object/MachOObjectFile.cpp694
-rw-r--r--contrib/llvm/lib/Object/Object.cpp150
-rw-r--r--contrib/llvm/lib/Object/ObjectFile.cpp4
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp123
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp219
-rw-r--r--contrib/llvm/lib/Support/Allocator.cpp4
-rw-r--r--contrib/llvm/lib/Support/Atomic.cpp2
-rw-r--r--contrib/llvm/lib/Support/BlockFrequency.cpp12
-rw-r--r--contrib/llvm/lib/Support/BranchProbability.cpp13
-rw-r--r--contrib/llvm/lib/Support/CommandLine.cpp26
-rw-r--r--contrib/llvm/lib/Support/ConstantRange.cpp100
-rw-r--r--contrib/llvm/lib/Support/CrashRecoveryContext.cpp1
-rw-r--r--contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp3
-rw-r--r--contrib/llvm/lib/Support/DataExtractor.cpp2
-rw-r--r--contrib/llvm/lib/Support/DataStream.cpp98
-rw-r--r--contrib/llvm/lib/Support/Dwarf.cpp2
-rw-r--r--contrib/llvm/lib/Support/FileUtilities.cpp1
-rw-r--r--contrib/llvm/lib/Support/FoldingSet.cpp26
-rw-r--r--contrib/llvm/lib/Support/GraphWriter.cpp77
-rw-r--r--contrib/llvm/lib/Support/Hashing.cpp29
-rw-r--r--contrib/llvm/lib/Support/Host.cpp13
-rw-r--r--contrib/llvm/lib/Support/IntrusiveRefCntPtr.cpp14
-rw-r--r--contrib/llvm/lib/Support/JSONParser.cpp302
-rw-r--r--contrib/llvm/lib/Support/LockFileManager.cpp216
-rw-r--r--contrib/llvm/lib/Support/ManagedStatic.cpp8
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp38
-rw-r--r--contrib/llvm/lib/Support/Mutex.cpp119
-rw-r--r--contrib/llvm/lib/Support/Path.cpp36
-rw-r--r--contrib/llvm/lib/Support/PathV2.cpp179
-rw-r--r--contrib/llvm/lib/Support/Program.cpp1
-rw-r--r--contrib/llvm/lib/Support/RWMutex.cpp104
-rw-r--r--contrib/llvm/lib/Support/SmallPtrSet.cpp51
-rw-r--r--contrib/llvm/lib/Support/SourceMgr.cpp141
-rw-r--r--contrib/llvm/lib/Support/Statistic.cpp16
-rw-r--r--contrib/llvm/lib/Support/StreamableMemoryObject.cpp140
-rw-r--r--contrib/llvm/lib/Support/StringExtras.cpp21
-rw-r--r--contrib/llvm/lib/Support/StringMap.cpp61
-rw-r--r--contrib/llvm/lib/Support/StringRef.cpp168
-rw-r--r--contrib/llvm/lib/Support/TargetRegistry.cpp2
-rw-r--r--contrib/llvm/lib/Support/ThreadLocal.cpp2
-rw-r--r--contrib/llvm/lib/Support/Threading.cpp8
-rw-r--r--contrib/llvm/lib/Support/Timer.cpp16
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp642
-rw-r--r--contrib/llvm/lib/Support/Unix/Host.inc23
-rw-r--r--contrib/llvm/lib/Support/Unix/Path.inc16
-rw-r--r--contrib/llvm/lib/Support/Unix/PathV2.inc85
-rw-r--r--contrib/llvm/lib/Support/Unix/Process.inc8
-rw-r--r--contrib/llvm/lib/Support/Unix/Program.inc12
-rw-r--r--contrib/llvm/lib/Support/Unix/Signals.inc20
-rw-r--r--contrib/llvm/lib/Support/Valgrind.cpp13
-rw-r--r--contrib/llvm/lib/Support/Windows/Host.inc5
-rw-r--r--contrib/llvm/lib/Support/Windows/Path.inc35
-rw-r--r--contrib/llvm/lib/Support/Windows/PathV2.inc170
-rw-r--r--contrib/llvm/lib/Support/Windows/Process.inc4
-rw-r--r--contrib/llvm/lib/Support/Windows/Program.inc36
-rw-r--r--contrib/llvm/lib/Support/Windows/Signals.inc4
-rw-r--r--contrib/llvm/lib/Support/Windows/Windows.h106
-rw-r--r--contrib/llvm/lib/Support/YAMLParser.cpp2117
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp1
-rw-r--r--contrib/llvm/lib/TableGen/Error.cpp4
-rw-r--r--contrib/llvm/lib/TableGen/Record.cpp173
-rw-r--r--contrib/llvm/lib/TableGen/TGLexer.cpp57
-rw-r--r--contrib/llvm/lib/TableGen/TGLexer.h6
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.cpp554
-rw-r--r--contrib/llvm/lib/TableGen/TGParser.h73
-rw-r--r--contrib/llvm/lib/TableGen/TableGenAction.cpp15
-rw-r--r--contrib/llvm/lib/TableGen/TableGenBackend.cpp4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td20
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp165
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp665
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h18
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp220
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h28
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.h21
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCallingConv.td38
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp25
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp1323
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp33
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h1
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp581
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFastISel.cpp1317
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp454
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMFrameLowering.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp28
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp284
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp1150
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h29
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td112
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td753
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td3103
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td127
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td405
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td494
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp214
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td188
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRelocations.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSchedule.td10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA8.td19
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td36
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleV6.td12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h18
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp108
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.h24
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp34
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp9
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp3562
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp1359
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp192
-rw-r--r--contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h34
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h11
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp180
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h36
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp283
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h24
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h10
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp76
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp5
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp24
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp116
-rw-r--r--contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp19
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp22
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp35
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp21
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp31
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp141
-rw-r--r--contrib/llvm/lib/Target/Alpha/Alpha.h43
-rw-r--r--contrib/llvm/lib/Target/Alpha/Alpha.td68
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp166
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaBranchSelector.cpp66
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaCallingConv.td38
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp143
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h43
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp425
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp962
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h142
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td268
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp382
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h85
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td1159
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaLLRP.cpp158
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaMachineFunctionInfo.h62
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp199
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h56
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.td133
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRelocations.h31
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSchedule.td85
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.h31
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSubtarget.cpp35
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaSubtarget.h49
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp51
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h66
-rw-r--r--contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.cpp78
-rw-r--r--contrib/llvm/lib/Target/Alpha/TargetInfo/AlphaTargetInfo.cpp20
-rw-r--r--contrib/llvm/lib/Target/Blackfin/Blackfin.h31
-rw-r--r--contrib/llvm/lib/Target/Blackfin/Blackfin.td202
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp156
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinCallingConv.td30
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp130
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h47
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp180
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp645
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h83
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrFormats.td34
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp256
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h81
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td862
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp104
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.h32
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsics.td34
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp344
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h77
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td277
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.cpp24
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.h31
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.cpp44
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.h49
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp43
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h68
-rw-r--r--contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.cpp22
-rw-r--r--contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.h29
-rw-r--r--contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.cpp81
-rw-r--r--contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.h38
-rw-r--r--contrib/llvm/lib/Target/Blackfin/TargetInfo/BlackfinTargetInfo.cpp21
-rw-r--r--contrib/llvm/lib/Target/CBackend/CBackend.cpp3617
-rw-r--r--contrib/llvm/lib/Target/CBackend/CTargetMachine.h42
-rw-r--r--contrib/llvm/lib/Target/CBackend/TargetInfo/CBackendTargetInfo.cpp21
-rw-r--r--contrib/llvm/lib/Target/CellSPU/CellSDKIntrinsics.td2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU.h2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU.td4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU128InstrInfo.td4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp5
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp18
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp142
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h5
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrBuilder.h2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrFormats.td6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.cpp14
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.h3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMathInstr.td2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUNodes.td2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUOperands.td6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.td6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSchedule.td6
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp41
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h20
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp124
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h8
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Hexagon.h74
-rw-r--r--contrib/llvm/lib/Target/Hexagon/Hexagon.td72
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp313
-rwxr-xr-xcontrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h165
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp235
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td35
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp207
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h189
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp177
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp332
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h50
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp644
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp1485
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp1496
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h162
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td508
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td308
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td67
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp2732
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h185
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td3052
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td137
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td5746
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td3462
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td29
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV3.td50
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td369
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h41
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp93
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h75
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp288
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp315
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h90
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td167
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp82
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td54
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td59
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSelectCCInfo.td121
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp46
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h40
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp129
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp62
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h74
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp145
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h83
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp94
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h40
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp3642
-rw-r--r--contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h141
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp198
-rw-r--r--contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h75
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h70
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h (renamed from contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.h)13
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp95
-rw-r--r--contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h (renamed from contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.h)21
-rw-r--r--contrib/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp (renamed from contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp)10
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp10
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp3
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp31
-rw-r--r--contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h6
-rw-r--r--contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlaze.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp8
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp22
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp12
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h1
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp39
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h3
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp24
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h6
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td26
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td171
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.h2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp11
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h3
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.cpp14
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h5
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp10
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSchedule3.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSchedule5.td2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.h2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp51
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h9
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp53
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h5
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp77
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp19
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp13
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h8
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp1
-rw-r--r--contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h7
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h8
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp11
-rw-r--r--contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430.td2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp40
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrFormats.td2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp32
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp11
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h11
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp4
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h6
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp35
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h10
-rw-r--r--contrib/llvm/lib/Target/Mangler.cpp11
-rw-r--r--contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp66
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp70
-rw-r--r--contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h16
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp260
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h126
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp249
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h122
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp7
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp256
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp45
-rw-r--r--contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h25
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips.td10
-rw-r--r--contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td237
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp153
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h63
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp230
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCallingConv.td56
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp223
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsCondMov.td194
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp33
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp13
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp204
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp460
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp1297
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h25
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFPU.td268
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrFormats.td41
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp107
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.h91
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td654
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsJITInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp337
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCInstLower.h20
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.cpp70
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.h67
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp50
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMachineFunction.h26
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp256
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h7
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td136
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRelocations.h10
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSchedule.td2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp22
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsSubtarget.h8
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp120
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetMachine.h42
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp2
-rw-r--r--contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp77
-rw-r--r--contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h8
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h71
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h6
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp11
-rw-r--r--contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.h1
-rw-r--r--contrib/llvm/lib/Target/PTX/PTX.td2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp344
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h4
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp6
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp168
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXISelLowering.h9
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrFormats.td2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.h2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrInfo.td46
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp30
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp23
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h158
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXParamManager.cpp4
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXParamManager.h3
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp7
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp46
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h7
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td3
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp5
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp6
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXSubtarget.h3
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp338
-rw-r--r--contrib/llvm/lib/Target/PTX/PTXTargetMachine.h35
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp75
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp103
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp12
-rw-r--r--contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.h5
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPC.td16
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp52
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td35
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp38
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp189
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h28
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp12
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp252
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h36
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td102
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td54
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td63
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp129
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h13
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td97
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp4
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp15
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h3
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCPerfectShuffle.h2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp282
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h10
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRelocations.h6
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule.td71
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td616
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td652
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp24
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h12
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp75
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h33
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp18
-rw-r--r--contrib/llvm/lib/Target/Sparc/FPMover.cpp8
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h8
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.h5
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.td6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp22
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcCallingConv.td6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp1
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp50
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h8
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp19
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h11
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td6
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h1
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp12
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td7
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp4
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.h3
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp65
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h18
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp32
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h30
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp81
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h38
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.h52
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.td61
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp221
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td46
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp386
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h57
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp779
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp868
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h145
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h128
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td340
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td133
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp439
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h113
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td1147
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h51
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperands.td325
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp143
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h60
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td205
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp23
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h31
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp54
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h48
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp40
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h68
-rw-r--r--contrib/llvm/lib/Target/TargetData.cpp123
-rw-r--r--contrib/llvm/lib/Target/TargetInstrInfo.cpp42
-rw-r--r--contrib/llvm/lib/Target/TargetJITInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/TargetLibraryInfo.cpp112
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp38
-rw-r--r--contrib/llvm/lib/Target/TargetMachine.cpp228
-rw-r--r--contrib/llvm/lib/Target/TargetMachineC.cpp197
-rw-r--r--contrib/llvm/lib/Target/TargetRegisterInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp7
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp730
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp235
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h46
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c80
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h15
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h23
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp78
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h10
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp329
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h2
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp72
-rw-r--r--contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h9
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp80
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h111
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp224
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h2
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp25
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h22
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp377
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp105
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h11
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp65
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp193
-rw-r--r--contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h84
-rw-r--r--contrib/llvm/lib/Target/X86/X86.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86.td114
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp48
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmPrinter.h5
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h4
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td58
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp18
-rw-r--r--contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp75
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp33
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.cpp382
-rw-r--r--contrib/llvm/lib/Target/X86/X86FrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp548
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp5371
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h202
-rw-r--r--contrib/llvm/lib/Target/X86/X86Instr3DNow.td2
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrArithmetic.td319
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrBuilder.h1
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td28
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrCompiler.td288
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrControl.td196
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrExtension.td84
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFMA.td204
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFPStack.td63
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td340
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td215
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp441
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h9
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td223
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td30
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td6021
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSVM.td62
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td548
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSystem.td72
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrVMX.td38
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrXOP.td307
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86JITInfo.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.cpp45
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCInstLower.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h8
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp216
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h5
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td2
-rw-r--r--contrib/llvm/lib/Target/X86/X86Relocations.h2
-rw-r--r--contrib/llvm/lib/Target/X86/X86Schedule.td273
-rw-r--r--contrib/llvm/lib/Target/X86/X86ScheduleAtom.td305
-rw-r--r--contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp131
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h82
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp77
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.h44
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp3
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetObjectFile.h3
-rw-r--r--contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp234
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp8
-rw-r--r--contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.h2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCore.h3
-rw-r--r--contrib/llvm/lib/Target/XCore/XCore.td3
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp11
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp13
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp69
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.h8
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrFormats.td2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp24
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h13
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp4
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreSubtarget.h3
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp31
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h11
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp14
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp227
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp715
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp102
-rw-r--r--contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp149
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Internalize.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp27
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PruneEH.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h10
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp74
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp243
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp160
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp44
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp89
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp71
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp39
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp83
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp97
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp375
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h4
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp85
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp937
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp79
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h37
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp139
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp13
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp311
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp47
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp13
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp92
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp289
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp146
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp339
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp (renamed from contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp)39
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp553
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp35
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LICM.cpp17
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp166
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp985
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp58
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp467
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp36
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp1228
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SCCP.cpp500
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Scalar.cpp1
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp70
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp182
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Sink.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicInliner.cpp182
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp34
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp208
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp96
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp57
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp609
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp109
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp61
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp40
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp372
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp22
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp64
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp9
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp619
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp45
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp1907
-rw-r--r--contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp39
-rw-r--r--contrib/llvm/lib/VMCore/AsmWriter.cpp135
-rw-r--r--contrib/llvm/lib/VMCore/Attributes.cpp8
-rw-r--r--contrib/llvm/lib/VMCore/AutoUpgrade.cpp598
-rw-r--r--contrib/llvm/lib/VMCore/BasicBlock.cpp3
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.cpp546
-rw-r--r--contrib/llvm/lib/VMCore/Constants.cpp1223
-rw-r--r--contrib/llvm/lib/VMCore/ConstantsContext.h234
-rw-r--r--contrib/llvm/lib/VMCore/Core.cpp73
-rw-r--r--contrib/llvm/lib/VMCore/DebugInfoProbe.cpp225
-rw-r--r--contrib/llvm/lib/VMCore/DebugLoc.cpp5
-rw-r--r--contrib/llvm/lib/VMCore/Dominators.cpp202
-rw-r--r--contrib/llvm/lib/VMCore/Function.cpp52
-rw-r--r--contrib/llvm/lib/VMCore/GCOV.cpp2
-rw-r--r--contrib/llvm/lib/VMCore/IRBuilder.cpp4
-rw-r--r--contrib/llvm/lib/VMCore/Instruction.cpp56
-rw-r--r--contrib/llvm/lib/VMCore/Instructions.cpp257
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContext.cpp18
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.cpp51
-rw-r--r--contrib/llvm/lib/VMCore/LLVMContextImpl.h162
-rw-r--r--contrib/llvm/lib/VMCore/Metadata.cpp89
-rw-r--r--contrib/llvm/lib/VMCore/Module.cpp56
-rw-r--r--contrib/llvm/lib/VMCore/Pass.cpp24
-rw-r--r--contrib/llvm/lib/VMCore/PassManager.cpp161
-rw-r--r--contrib/llvm/lib/VMCore/Type.cpp185
-rw-r--r--contrib/llvm/lib/VMCore/Use.cpp1
-rw-r--r--contrib/llvm/lib/VMCore/User.cpp2
-rw-r--r--contrib/llvm/lib/VMCore/Value.cpp127
-rw-r--r--contrib/llvm/lib/VMCore/ValueTypes.cpp12
-rw-r--r--contrib/llvm/lib/VMCore/Verifier.cpp284
-rw-r--r--contrib/llvm/tools/bugpoint/BugDriver.cpp4
-rw-r--r--contrib/llvm/tools/bugpoint/CMakeLists.txt14
-rw-r--r--contrib/llvm/tools/bugpoint/CrashDebugger.cpp11
-rw-r--r--contrib/llvm/tools/bugpoint/ExecutionDriver.cpp50
-rw-r--r--contrib/llvm/tools/bugpoint/ExtractFunction.cpp62
-rw-r--r--contrib/llvm/tools/bugpoint/Makefile16
-rw-r--r--contrib/llvm/tools/bugpoint/Miscompilation.cpp3
-rw-r--r--contrib/llvm/tools/bugpoint/OptimizerDriver.cpp12
-rw-r--r--contrib/llvm/tools/bugpoint/ToolRunner.cpp93
-rw-r--r--contrib/llvm/tools/bugpoint/ToolRunner.h1
-rw-r--r--contrib/llvm/tools/bugpoint/bugpoint.cpp1
-rw-r--r--contrib/llvm/tools/clang/LICENSE.TXT2
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h952
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/APValue.h269
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h203
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def224
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h852
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h354
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h668
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h784
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h529
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h1157
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h1556
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h517
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Mangle.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NSAPI.h152
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h270
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h223
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h96
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h710
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h142
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h114
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h82
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h212
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h143
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h111
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h207
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFG.h188
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h257
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h113
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def109
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def689
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def271
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h276
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td98
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td127
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td175
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td246
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td1272
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td60
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LLVM.h22
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Lambda.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def20
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Linkage.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Module.h284
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h137
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h206
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h47
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Action.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td131
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Compilation.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h34
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h94
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Job.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td157
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Tool.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.def2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/Commit.h140
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h87
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h185
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h (renamed from contrib/llvm/tools/clang/include/clang/Serialization/ChainedIncludesSource.h)4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h78
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h47
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h149
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h57
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h20
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h120
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Utils.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h (renamed from contrib/llvm/tools/clang/include/clang/Index/CallGraph.h)33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h229
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h75
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h237
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h89
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h111
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h210
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h581
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h149
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h469
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h153
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Initialization.h252
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Lookup.h55
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/MultiInitializer.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Overload.h145
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Scope.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h274
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h1679
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Template.h64
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h148
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h215
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h544
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h235
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/Module.h229
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h197
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h394
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h44
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h83
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h95
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h228
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h545
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h66
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h210
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h107
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h204
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h244
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h260
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h19
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h102
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h51
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h173
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h164
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h213
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp165
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h19
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp226
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp358
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp84
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp328
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp112
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp33
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp344
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h118
-rw-r--r--contrib/llvm/tools/clang/lib/AST/APValue.cpp573
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp516
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp26
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp773
-rw-r--r--contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp919
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp418
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp467
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp442
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp198
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp16
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DumpXML.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp1003
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp536
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp90
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp5717
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp387
-rw-r--r--contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Mangle.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp33
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NSAPI.cpp312
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp766
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp412
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateName.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp277
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp113
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp (renamed from contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp)137
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp405
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp184
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp259
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h6
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp171
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp177
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp290
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp1533
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Builtins.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c143
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp229
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Module.cpp274
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp956
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h21
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp143
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp387
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h47
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp2692
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h34
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp621
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h154
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp362
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp946
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h42
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp243
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp220
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp145
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.h56
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp1017
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp580
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp296
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp879
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp335
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp836
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp296
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp851
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h28
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp59
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp183
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp40
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h84
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp180
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h407
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp617
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h115
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h4
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h109
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp334
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp882
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h5
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Action.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Arg.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp370
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp731
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Job.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Option.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp187
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp1513
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h337
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp1594
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h97
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp368
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/Commit.cpp345
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp329
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp587
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp746
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp (renamed from contrib/llvm/tools/clang/lib/Serialization/ChainedIncludesSource.cpp)40
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp813
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp818
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp386
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp137
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp314
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp206
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp592
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp881
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp1148
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp190
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx2intrin.h961
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avxintrin.h331
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmiintrin.h115
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cpuid.h33
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h56
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/float.h11
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fma4intrin.h231
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h18
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h55
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm3dnow.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/module.map108
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/popcntintrin.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h127
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tgmath.h23
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tmmintrin.h11
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/unwind.h124
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h21
-rw-r--r--contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Analyzer.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp (renamed from contrib/llvm/tools/clang/lib/Index/CallGraph.cpp)10
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp516
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp438
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp464
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp1437
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp354
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp134
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp211
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp265
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp149
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp35
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp120
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp1544
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp685
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp365
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp610
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp179
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp526
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h10
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp252
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp184
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp331
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp454
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp115
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp75
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp7275
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp3197
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp250
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp33
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp158
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp124
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/MultiInitializer.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Scope.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp269
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp326
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp167
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp271
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp2110
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp1135
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp2332
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp267
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp3141
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp779
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp3464
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp1341
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp346
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp1491
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp2136
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp820
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp825
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp382
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp2109
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp1351
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp532
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp1265
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp763
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp305
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp654
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp117
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp837
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h1141
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h1
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp1589
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp1529
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h25
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp377
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp929
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp435
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp295
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/Module.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp481
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp232
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp233
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td148
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp45
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp740
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h22
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp229
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp1341
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp211
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp159
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp44
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp900
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp204
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp87
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp241
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp66
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp750
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp237
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp592
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp147
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp1215
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp611
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp237
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp357
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp127
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp170
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp466
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp298
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp350
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp64
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp388
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp152
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp89
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp178
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h36
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp341
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp414
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp230
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp296
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp22
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp80
-rw-r--r--contrib/llvm/tools/clang/tools/driver/driver.cpp38
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/CMakeLists.txt12
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp318
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h39
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp113
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/Makefile19
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp91
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h34
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp26
-rw-r--r--contrib/llvm/tools/llc/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llc/Makefile21
-rw-r--r--contrib/llvm/tools/llc/llc.cpp206
-rw-r--r--contrib/llvm/tools/lli/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/lli/Makefile15
-rw-r--r--contrib/llvm/tools/lli/lli.cpp38
-rw-r--r--contrib/llvm/tools/llvm-ar/CMakeLists.txt8
-rw-r--r--contrib/llvm/tools/llvm-ar/Makefile25
-rw-r--r--contrib/llvm/tools/llvm-as/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/llvm-as/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-as/llvm-as.cpp2
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp123
-rw-r--r--contrib/llvm/tools/llvm-diff/CMakeLists.txt8
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.cpp6
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.h1
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp18
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.h4
-rw-r--r--contrib/llvm/tools/llvm-diff/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-diff/llvm-diff.cpp2
-rw-r--r--contrib/llvm/tools/llvm-dis/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/llvm-dis/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-dis/llvm-dis.cpp19
-rw-r--r--contrib/llvm/tools/llvm-extract/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llvm-extract/Makefile18
-rw-r--r--contrib/llvm/tools/llvm-extract/llvm-extract.cpp12
-rw-r--r--contrib/llvm/tools/llvm-ld/CMakeLists.txt8
-rw-r--r--contrib/llvm/tools/llvm-ld/Makefile15
-rw-r--r--contrib/llvm/tools/llvm-ld/llvm-ld.cpp4
-rw-r--r--contrib/llvm/tools/llvm-link/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llvm-link/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-link/llvm-link.cpp2
-rw-r--r--contrib/llvm/tools/llvm-mc/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/llvm-mc/Disassembler.cpp36
-rw-r--r--contrib/llvm/tools/llvm-mc/Makefile24
-rw-r--r--contrib/llvm/tools/llvm-mc/llvm-mc.cpp49
-rw-r--r--contrib/llvm/tools/llvm-nm/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llvm-nm/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-nm/llvm-nm.cpp102
-rw-r--r--contrib/llvm/tools/llvm-objdump/CMakeLists.txt14
-rw-r--r--contrib/llvm/tools/llvm-objdump/MachODump.cpp404
-rw-r--r--contrib/llvm/tools/llvm-objdump/Makefile18
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp228
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.h2
-rw-r--r--contrib/llvm/tools/llvm-prof/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llvm-prof/Makefile17
-rw-r--r--contrib/llvm/tools/llvm-prof/llvm-prof.cpp18
-rw-r--r--contrib/llvm/tools/llvm-ranlib/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/llvm-ranlib/Makefile18
-rw-r--r--contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp218
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/Makefile23
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp31
-rw-r--r--contrib/llvm/tools/llvm-stress/llvm-stress.cpp702
-rw-r--r--contrib/llvm/tools/llvm-stub/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/llvm-stub/Makefile13
-rw-r--r--contrib/llvm/tools/macho-dump/CMakeLists.txt5
-rw-r--r--contrib/llvm/tools/macho-dump/Makefile23
-rw-r--r--contrib/llvm/tools/opt/CMakeLists.txt8
-rw-r--r--contrib/llvm/tools/opt/Makefile14
-rw-r--r--contrib/llvm/tools/opt/PrintSCC.cpp4
-rw-r--r--contrib/llvm/tools/opt/opt.cpp9
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp1790
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.h49
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp439
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp185
-rw-r--r--contrib/llvm/utils/TableGen/AsmWriterEmitter.h2
-rw-r--r--contrib/llvm/utils/TableGen/CallingConvEmitter.cpp6
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp26
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp22
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h10
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp45
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.cpp1028
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenRegisters.h247
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp29
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.h10
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.cpp6
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcher.h3
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp11
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp2
-rw-r--r--contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp512
-rw-r--r--contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h52
-rw-r--r--contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp1
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.cpp36
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp3
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp334
-rw-r--r--contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h18
-rw-r--r--contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp48
-rw-r--r--contrib/llvm/utils/TableGen/InstrEnumEmitter.h33
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp49
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.h10
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp139
-rw-r--r--contrib/llvm/utils/TableGen/IntrinsicEmitter.h2
-rw-r--r--contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp9
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp821
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.h6
-rw-r--r--contrib/llvm/utils/TableGen/SequenceToOffsetTable.h139
-rw-r--r--contrib/llvm/utils/TableGen/SetTheory.cpp23
-rw-r--r--contrib/llvm/utils/TableGen/SetTheory.h8
-rw-r--r--contrib/llvm/utils/TableGen/StringToOffsetTable.h13
-rw-r--r--contrib/llvm/utils/TableGen/SubtargetEmitter.cpp82
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp173
-rw-r--r--contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp148
-rw-r--r--contrib/llvm/utils/TableGen/X86ModRMFilters.cpp26
-rw-r--r--contrib/llvm/utils/TableGen/X86ModRMFilters.h15
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp236
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h10
-rw-r--r--etc/mtree/BSD.include.dist2
-rw-r--r--lib/clang/Makefile6
-rw-r--r--lib/clang/clang.build.mk19
-rw-r--r--lib/clang/include/Makefile5
-rw-r--r--lib/clang/include/MipsGenMCCodeEmitter.inc2
-rw-r--r--lib/clang/include/clang/Basic/DiagnosticSerializationKinds.inc2
-rw-r--r--lib/clang/include/clang/Basic/Version.inc8
-rw-r--r--lib/clang/include/clang/Config/config.h20
-rw-r--r--lib/clang/include/clang/Sema/AttrParsedAttrKinds.inc2
-rw-r--r--lib/clang/include/clang/Sema/AttrParsedAttrList.inc2
-rw-r--r--lib/clang/include/clang/Sema/AttrTemplateInstantiate.inc2
-rw-r--r--lib/clang/include/llvm/Config/config.h63
-rw-r--r--lib/clang/include/llvm/Config/llvm-config.h18
-rw-r--r--lib/clang/libclanganalysis/Makefile5
-rw-r--r--lib/clang/libclangarcmigrate/Makefile4
-rw-r--r--lib/clang/libclangast/Makefile2
-rw-r--r--lib/clang/libclangbasic/Makefile2
-rw-r--r--lib/clang/libclangcodegen/Makefile1
-rw-r--r--lib/clang/libclangdriver/Makefile4
-rw-r--r--lib/clang/libclangedit/Makefile16
-rw-r--r--lib/clang/libclangfrontend/Makefile8
-rw-r--r--lib/clang/libclangindex/Makefile1
-rw-r--r--lib/clang/libclanglex/Makefile2
-rw-r--r--lib/clang/libclangparse/Makefile1
-rw-r--r--lib/clang/libclangrewrite/Makefile2
-rw-r--r--lib/clang/libclangsema/Makefile9
-rw-r--r--lib/clang/libclangserialization/Makefile3
-rw-r--r--lib/clang/libclangstaticanalyzercheckers/Makefile12
-rw-r--r--lib/clang/libclangstaticanalyzercore/Makefile5
-rw-r--r--lib/clang/libllvmanalysis/Makefile9
-rw-r--r--lib/clang/libllvmarmcodegen/Makefile2
-rw-r--r--lib/clang/libllvmarmdesc/Makefile1
-rw-r--r--lib/clang/libllvmasmprinter/Makefile1
-rw-r--r--lib/clang/libllvmcodegen/Makefile20
-rw-r--r--lib/clang/libllvmcore/Makefile1
-rw-r--r--lib/clang/libllvminstrumentation/Makefile7
-rw-r--r--lib/clang/libllvmjit/Makefile7
-rw-r--r--lib/clang/libllvmmc/Makefile1
-rw-r--r--lib/clang/libllvmmcjit/Makefile9
-rw-r--r--lib/clang/libllvmmipsasmparser/Makefile13
-rw-r--r--lib/clang/libllvmmipscodegen/Makefile5
-rw-r--r--lib/clang/libllvmmipsdesc/Makefile2
-rw-r--r--lib/clang/libllvmpowerpccodegen/Makefile1
-rw-r--r--lib/clang/libllvmpowerpcdesc/Makefile3
-rw-r--r--lib/clang/libllvmruntimedyld/Makefile1
-rw-r--r--lib/clang/libllvmscalaropts/Makefile3
-rw-r--r--lib/clang/libllvmselectiondag/Makefile4
-rw-r--r--lib/clang/libllvmsupport/Makefile7
-rw-r--r--lib/clang/libllvmtablegen/Makefile1
-rw-r--r--lib/clang/libllvmtarget/Makefile3
-rw-r--r--lib/clang/libllvmtransformutils/Makefile5
-rw-r--r--lib/clang/libllvmvectorize/Makefile11
-rw-r--r--lib/clang/libllvmx86codegen/Makefile1
-rw-r--r--lib/clang/libllvmx86desc/Makefile4
-rw-r--r--share/mk/bsd.sys.mk13
-rw-r--r--usr.bin/clang/bugpoint/Makefile2
-rw-r--r--usr.bin/clang/bugpoint/bugpoint.12
-rw-r--r--usr.bin/clang/clang/Makefile26
-rw-r--r--usr.bin/clang/clang/clang.14
-rw-r--r--usr.bin/clang/llc/llc.14
-rw-r--r--usr.bin/clang/lli/Makefile2
-rw-r--r--usr.bin/clang/lli/lli.12
-rw-r--r--usr.bin/clang/llvm-ar/llvm-ar.12
-rw-r--r--usr.bin/clang/llvm-as/llvm-as.12
-rw-r--r--usr.bin/clang/llvm-bcanalyzer/llvm-bcanalyzer.12
-rw-r--r--usr.bin/clang/llvm-diff/llvm-diff.12
-rw-r--r--usr.bin/clang/llvm-dis/llvm-dis.12
-rw-r--r--usr.bin/clang/llvm-extract/llvm-extract.12
-rw-r--r--usr.bin/clang/llvm-ld/Makefile2
-rw-r--r--usr.bin/clang/llvm-ld/llvm-ld.12
-rw-r--r--usr.bin/clang/llvm-link/llvm-link.12
-rw-r--r--usr.bin/clang/llvm-nm/llvm-nm.12
-rw-r--r--usr.bin/clang/llvm-prof/llvm-prof.12
-rw-r--r--usr.bin/clang/llvm-ranlib/llvm-ranlib.12
-rw-r--r--usr.bin/clang/opt/Makefile2
-rw-r--r--usr.bin/clang/opt/opt.12
-rw-r--r--usr.bin/clang/tblgen/Makefile6
-rw-r--r--usr.bin/clang/tblgen/tblgen.148
2267 files changed, 249085 insertions, 121373 deletions
diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc
index f9e3ca9..c77b36a 100644
--- a/ObsoleteFiles.inc
+++ b/ObsoleteFiles.inc
@@ -38,6 +38,22 @@
# xargs -n1 | sort | uniq -d;
# done
+# 20120415: new clang import which bumps version from 3.0 to 3.1
+OLD_FILES+=usr/include/clang/3.0/altivec.h
+OLD_FILES+=usr/include/clang/3.0/avxintrin.h
+OLD_FILES+=usr/include/clang/3.0/emmintrin.h
+OLD_FILES+=usr/include/clang/3.0/immintrin.h
+OLD_FILES+=usr/include/clang/3.0/mm3dnow.h
+OLD_FILES+=usr/include/clang/3.0/mm_malloc.h
+OLD_FILES+=usr/include/clang/3.0/mmintrin.h
+OLD_FILES+=usr/include/clang/3.0/nmmintrin.h
+OLD_FILES+=usr/include/clang/3.0/pmmintrin.h
+OLD_FILES+=usr/include/clang/3.0/smmintrin.h
+OLD_FILES+=usr/include/clang/3.0/tmmintrin.h
+OLD_FILES+=usr/include/clang/3.0/wmmintrin.h
+OLD_FILES+=usr/include/clang/3.0/x86intrin.h
+OLD_FILES+=usr/include/clang/3.0/xmmintrin.h
+OLD_DIRS+=usr/include/clang/3.0
# 20120322: Update heimdal to 1.5.1.
OLD_FILES+=usr/include/krb5-v4compat.h \
usr/include/krb_err.h \
diff --git a/contrib/llvm/LICENSE.TXT b/contrib/llvm/LICENSE.TXT
index 1b1047c..00cf601 100644
--- a/contrib/llvm/LICENSE.TXT
+++ b/contrib/llvm/LICENSE.TXT
@@ -4,7 +4,7 @@ LLVM Release License
University of Illinois/NCSA
Open Source License
-Copyright (c) 2003-2011 University of Illinois at Urbana-Champaign.
+Copyright (c) 2003-2012 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
@@ -67,3 +67,4 @@ Autoconf llvm/autoconf
CellSPU backend llvm/lib/Target/CellSPU/README.txt
Google Test llvm/utils/unittest/googletest
OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
diff --git a/contrib/llvm/include/llvm-c/Analysis.h b/contrib/llvm/include/llvm-c/Analysis.h
index e1e4487..f0bdddc 100644
--- a/contrib/llvm/include/llvm-c/Analysis.h
+++ b/contrib/llvm/include/llvm-c/Analysis.h
@@ -25,6 +25,12 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCAnalysis Analysis
+ * @ingroup LLVMC
+ *
+ * @{
+ */
typedef enum {
LLVMAbortProcessAction, /* verifier will print to stderr and abort() */
@@ -48,6 +54,10 @@ LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action);
void LLVMViewFunctionCFG(LLVMValueRef Fn);
void LLVMViewFunctionCFGOnly(LLVMValueRef Fn);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/include/llvm-c/BitReader.h b/contrib/llvm/include/llvm-c/BitReader.h
index 6db6607..5228035 100644
--- a/contrib/llvm/include/llvm-c/BitReader.h
+++ b/contrib/llvm/include/llvm-c/BitReader.h
@@ -25,6 +25,12 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCBitReader Bit Reader
+ * @ingroup LLVMC
+ *
+ * @{
+ */
/* Builds a module from the bitcode in the specified memory buffer, returning a
reference to the module via the OutModule parameter. Returns 0 on success.
@@ -59,6 +65,10 @@ LLVMBool LLVMGetBitcodeModuleProvider(LLVMMemoryBufferRef MemBuf,
LLVMModuleProviderRef *OutMP,
char **OutMessage);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/include/llvm-c/BitWriter.h b/contrib/llvm/include/llvm-c/BitWriter.h
index bcbfb11..ba5a677 100644
--- a/contrib/llvm/include/llvm-c/BitWriter.h
+++ b/contrib/llvm/include/llvm-c/BitWriter.h
@@ -25,6 +25,12 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCBitWriter Bit Writer
+ * @ingroup LLVMC
+ *
+ * @{
+ */
/*===-- Operations on modules ---------------------------------------------===*/
@@ -39,6 +45,10 @@ int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
descriptor. Returns 0 on success. Closes the Handle. */
int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int Handle);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index d23b91c..7774606 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -10,24 +10,6 @@
|* This header declares the C interface to libLLVMCore.a, which implements *|
|* the LLVM intermediate representation. *|
|* *|
-|* LLVM uses a polymorphic type hierarchy which C cannot represent, therefore *|
-|* parameters must be passed as base types. Despite the declared types, most *|
-|* of the functions provided operate only on branches of the type hierarchy. *|
-|* The declared parameter names are descriptive and specify which type is *|
-|* required. Additionally, each type hierarchy is documented along with the *|
-|* functions that operate upon it. For more detail, refer to LLVM's C++ code. *|
-|* If in doubt, refer to Core.cpp, which performs paramter downcasts in the *|
-|* form unwrap<RequiredType>(Param). *|
-|* *|
-|* Many exotic languages can interoperate with C code but have a harder time *|
-|* with C++ due to name mangling. So in addition to C, this interface enables *|
-|* tools written in such languages. *|
-|* *|
-|* When included into a C++ source file, also declares 'wrap' and 'unwrap' *|
-|* helpers to perform opaque reference<-->pointer conversions. These helpers *|
-|* are shorter and more tightly typed than writing the casts by hand when *|
-|* authoring bindings. In assert builds, they will do runtime type checking. *|
-|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_CORE_H
@@ -46,50 +28,121 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMC LLVM-C: C interface to LLVM
+ *
+ * This module exposes parts of the LLVM library as a C API.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup LLVMCTransforms Transforms
+ */
+
+/**
+ * @defgroup LLVMCCore Core
+ *
+ * This modules provide an interface to libLLVMCore, which implements
+ * the LLVM intermediate representation as well as other related types
+ * and utilities.
+ *
+ * LLVM uses a polymorphic type hierarchy which C cannot represent, therefore
+ * parameters must be passed as base types. Despite the declared types, most
+ * of the functions provided operate only on branches of the type hierarchy.
+ * The declared parameter names are descriptive and specify which type is
+ * required. Additionally, each type hierarchy is documented along with the
+ * functions that operate upon it. For more detail, refer to LLVM's C++ code.
+ * If in doubt, refer to Core.cpp, which performs paramter downcasts in the
+ * form unwrap<RequiredType>(Param).
+ *
+ * Many exotic languages can interoperate with C code but have a harder time
+ * with C++ due to name mangling. So in addition to C, this interface enables
+ * tools written in such languages.
+ *
+ * When included into a C++ source file, also declares 'wrap' and 'unwrap'
+ * helpers to perform opaque reference<-->pointer conversions. These helpers
+ * are shorter and more tightly typed than writing the casts by hand when
+ * authoring bindings. In assert builds, they will do runtime type checking.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup LLVMCCoreTypes Types and Enumerations
+ *
+ * @{
+ */
typedef int LLVMBool;
/* Opaque types. */
/**
- * The top-level container for all LLVM global data. See the LLVMContext class.
+ * The top-level container for all LLVM global data. See the LLVMContext class.
*/
typedef struct LLVMOpaqueContext *LLVMContextRef;
/**
* The top-level container for all other LLVM Intermediate Representation (IR)
- * objects. See the llvm::Module class.
+ * objects.
+ *
+ * @see llvm::Module
*/
typedef struct LLVMOpaqueModule *LLVMModuleRef;
/**
- * Each value in the LLVM IR has a type, an LLVMTypeRef. See the llvm::Type
- * class.
+ * Each value in the LLVM IR has a type, an LLVMTypeRef.
+ *
+ * @see llvm::Type
*/
typedef struct LLVMOpaqueType *LLVMTypeRef;
+/**
+ * Represents an individual value in LLVM IR.
+ *
+ * This models llvm::Value.
+ */
typedef struct LLVMOpaqueValue *LLVMValueRef;
+
+/**
+ * Represents a basic block of instruction in LLVM IR.
+ *
+ * This models llvm::BasicBlock.
+ */
typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
+
+/**
+ * Represents an LLVM basic block builder.
+ *
+ * This models llvm::IRBuilder.
+ */
typedef struct LLVMOpaqueBuilder *LLVMBuilderRef;
-/* Interface used to provide a module to JIT or interpreter. This is now just a
- * synonym for llvm::Module, but we have to keep using the different type to
- * keep binary compatibility.
+/**
+ * Interface used to provide a module to JIT or interpreter.
+ * This is now just a synonym for llvm::Module, but we have to keep using the
+ * different type to keep binary compatibility.
*/
typedef struct LLVMOpaqueModuleProvider *LLVMModuleProviderRef;
-/* Used to provide a module to JIT or interpreter.
- * See the llvm::MemoryBuffer class.
+/**
+ * Used to provide a module to JIT or interpreter.
+ *
+ * @see llvm::MemoryBuffer
*/
typedef struct LLVMOpaqueMemoryBuffer *LLVMMemoryBufferRef;
-/** See the llvm::PassManagerBase class. */
+/** @see llvm::PassManagerBase */
typedef struct LLVMOpaquePassManager *LLVMPassManagerRef;
-/** See the llvm::PassRegistry class. */
+/** @see llvm::PassRegistry */
typedef struct LLVMOpaquePassRegistry *LLVMPassRegistryRef;
-/** Used to get the users and usees of a Value. See the llvm::Use class. */
+/**
+ * Used to get the users and usees of a Value.
+ *
+ * @see llvm::Use */
typedef struct LLVMOpaqueUse *LLVMUseRef;
typedef enum {
@@ -119,6 +172,11 @@ typedef enum {
LLVMReturnsTwice = 1 << 29,
LLVMUWTable = 1 << 30,
LLVMNonLazyBind = 1 << 31
+
+ // FIXME: This attribute is currently not included in the C API as
+ // a temporary measure until the API/ABI impact to the C API is understood
+ // and the path forward agreed upon.
+ //LLVMAddressSafety = 1ULL << 32
} LLVMAttribute;
typedef enum {
@@ -195,14 +253,13 @@ typedef enum {
/* Exception Handling Operators */
LLVMResume = 58,
- LLVMLandingPad = 59,
- LLVMUnwind = 60
-
+ LLVMLandingPad = 59
} LLVMOpcode;
typedef enum {
LLVMVoidTypeKind, /**< type with no size */
+ LLVMHalfTypeKind, /**< 16 bit floating point type */
LLVMFloatTypeKind, /**< 32 bit floating point type */
LLVMDoubleTypeKind, /**< 64 bit floating point type */
LLVMX86_FP80TypeKind, /**< 80 bit floating point type (X87) */
@@ -294,6 +351,10 @@ typedef enum {
LLVMLandingPadFilter /**< A filter clause */
} LLVMLandingPadClauseTy;
+/**
+ * @}
+ */
+
void LLVMInitializeCore(LLVMPassRegistryRef R);
@@ -302,49 +363,233 @@ void LLVMInitializeCore(LLVMPassRegistryRef R);
void LLVMDisposeMessage(char *Message);
-/*===-- Contexts ----------------------------------------------------------===*/
+/**
+ * @defgroup LLVMCCoreContext Contexts
+ *
+ * Contexts are execution states for the core LLVM IR system.
+ *
+ * Most types are tied to a context instance. Multiple contexts can
+ * exist simultaneously. A single context is not thread safe. However,
+ * different contexts can execute on different threads simultaneously.
+ *
+ * @{
+ */
-/* Create and destroy contexts. */
+/**
+ * Create a new context.
+ *
+ * Every call to this function should be paired with a call to
+ * LLVMContextDispose() or the context will leak memory.
+ */
LLVMContextRef LLVMContextCreate(void);
+
+/**
+ * Obtain the global context instance.
+ */
LLVMContextRef LLVMGetGlobalContext(void);
+
+/**
+ * Destroy a context instance.
+ *
+ * This should be called for every call to LLVMContextCreate() or memory
+ * will be leaked.
+ */
void LLVMContextDispose(LLVMContextRef C);
unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char* Name,
unsigned SLen);
unsigned LLVMGetMDKindID(const char* Name, unsigned SLen);
-/*===-- Modules -----------------------------------------------------------===*/
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreModule Modules
+ *
+ * Modules represent the top-level structure in a LLVM program. An LLVM
+ * module is effectively a translation unit or a collection of
+ * translation units merged together.
+ *
+ * @{
+ */
-/* Create and destroy modules. */
-/** See llvm::Module::Module. */
+/**
+ * Create a new, empty module in the global context.
+ *
+ * This is equivalent to calling LLVMModuleCreateWithNameInContext with
+ * LLVMGetGlobalContext() as the context parameter.
+ *
+ * Every invocation should be paired with LLVMDisposeModule() or memory
+ * will be leaked.
+ */
LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID);
+
+/**
+ * Create a new, empty module in a specific context.
+ *
+ * Every invocation should be paired with LLVMDisposeModule() or memory
+ * will be leaked.
+ */
LLVMModuleRef LLVMModuleCreateWithNameInContext(const char *ModuleID,
LLVMContextRef C);
-/** See llvm::Module::~Module. */
+/**
+ * Destroy a module instance.
+ *
+ * This must be called for every created module or memory will be
+ * leaked.
+ */
void LLVMDisposeModule(LLVMModuleRef M);
-/** Data layout. See Module::getDataLayout. */
+/**
+ * Obtain the data layout for a module.
+ *
+ * @see Module::getDataLayout()
+ */
const char *LLVMGetDataLayout(LLVMModuleRef M);
+
+/**
+ * Set the data layout for a module.
+ *
+ * @see Module::setDataLayout()
+ */
void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple);
-/** Target triple. See Module::getTargetTriple. */
+/**
+ * Obtain the target triple for a module.
+ *
+ * @see Module::getTargetTriple()
+ */
const char *LLVMGetTarget(LLVMModuleRef M);
+
+/**
+ * Set the target triple for a module.
+ *
+ * @see Module::setTargetTriple()
+ */
void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
-/** See Module::dump. */
+/**
+ * Dump a representation of a module to stderr.
+ *
+ * @see Module::dump()
+ */
void LLVMDumpModule(LLVMModuleRef M);
-/** See Module::setModuleInlineAsm. */
+/**
+ * Set inline assembly for a module.
+ *
+ * @see Module::setModuleInlineAsm()
+ */
void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
-/** See Module::getContext. */
+/**
+ * Obtain the context to which this module is associated.
+ *
+ * @see Module::getContext()
+ */
LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M);
-/*===-- Types -------------------------------------------------------------===*/
+/**
+ * Obtain a Type from a module by its registered name.
+ */
+LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
+
+/**
+ * Obtain the number of operands for named metadata in a module.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ */
+unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name);
+
+/**
+ * Obtain the named metadata operands for a module.
+ *
+ * The passed LLVMValueRef pointer should refer to an array of
+ * LLVMValueRef at least LLVMGetNamedMetadataNumOperands long. This
+ * array will be populated with the LLVMValueRef instances. Each
+ * instance corresponds to a llvm::MDNode.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ * @see llvm::MDNode::getOperand()
+ */
+void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest);
-/* LLVM types conform to the following hierarchy:
- *
+/**
+ * Add an operand to named metadata.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ * @see llvm::MDNode::addOperand()
+ */
+void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name,
+ LLVMValueRef Val);
+
+/**
+ * Add a function to a module under a specified name.
+ *
+ * @see llvm::Function::Create()
+ */
+LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name,
+ LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain a Function value from a Module by its name.
+ *
+ * The returned value corresponds to a llvm::Function value.
+ *
+ * @see llvm::Module::getFunction()
+ */
+LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name);
+
+/**
+ * Obtain an iterator to the first Function in a Module.
+ *
+ * @see llvm::Module::begin()
+ */
+LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M);
+
+/**
+ * Obtain an iterator to the last Function in a Module.
+ *
+ * @see llvm::Module::end()
+ */
+LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M);
+
+/**
+ * Advance a Function iterator to the next Function.
+ *
+ * Returns NULL if the iterator was already at the end and there are no more
+ * functions.
+ */
+LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn);
+
+/**
+ * Decrement a Function iterator to the previous Function.
+ *
+ * Returns NULL if the iterator was already at the beginning and there are
+ * no previous functions.
+ */
+LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreType Types
+ *
+ * Types represent the type of a value.
+ *
+ * Types are associated with a context instance. The context internally
+ * deduplicates types so there is only 1 instance of a specific type
+ * alive at a time. In other words, a unique type is shared among all
+ * consumers within a context.
+ *
+ * A Type in the C API corresponds to llvm::Type.
+ *
+ * Types have the following hierarchy:
+ *
* types:
* integer type
* real type
@@ -356,16 +601,44 @@ LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M);
* void type
* label type
* opaque type
+ *
+ * @{
*/
-/** See llvm::LLVMTypeKind::getTypeID. */
+/**
+ * Obtain the enumerated type of a Type instance.
+ *
+ * @see llvm::Type:getTypeID()
+ */
LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty);
+
+/**
+ * Whether the type has a known size.
+ *
+ * Things that don't have a size are abstract types, labels, and void.a
+ *
+ * @see llvm::Type::isSized()
+ */
LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty);
-/** See llvm::LLVMType::getContext. */
+/**
+ * Obtain the context to which this type instance is associated.
+ *
+ * @see llvm::Type::getContext()
+ */
LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty);
-/* Operations on integer types */
+/**
+ * @defgroup LLVMCCoreTypeInt Integer Types
+ *
+ * Functions in this section operate on integer types.
+ *
+ * @{
+ */
+
+/**
+ * Obtain an integer type from a context with specified bit width.
+ */
LLVMTypeRef LLVMInt1TypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMInt8TypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMInt16TypeInContext(LLVMContextRef C);
@@ -373,6 +646,10 @@ LLVMTypeRef LLVMInt32TypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMInt64TypeInContext(LLVMContextRef C);
LLVMTypeRef LLVMIntTypeInContext(LLVMContextRef C, unsigned NumBits);
+/**
+ * Obtain an integer type from the global context with a specified bit
+ * width.
+ */
LLVMTypeRef LLVMInt1Type(void);
LLVMTypeRef LLVMInt8Type(void);
LLVMTypeRef LLVMInt16Type(void);
@@ -381,68 +658,336 @@ LLVMTypeRef LLVMInt64Type(void);
LLVMTypeRef LLVMIntType(unsigned NumBits);
unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy);
-/* Operations on real types */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeFloat Floating Point Types
+ *
+ * @{
+ */
+
+/**
+ * Obtain a 16-bit floating point type from a context.
+ */
+LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 32-bit floating point type from a context.
+ */
LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 64-bit floating point type from a context.
+ */
LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 80-bit floating point type (X87) from a context.
+ */
LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 128-bit floating point type (112-bit mantissa) from a
+ * context.
+ */
LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 128-bit floating point type (two 64-bits) from a context.
+ */
LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C);
+/**
+ * Obtain a floating point type from the global context.
+ *
+ * These map to the functions in this group of the same name.
+ */
+LLVMTypeRef LLVMHalfType(void);
LLVMTypeRef LLVMFloatType(void);
LLVMTypeRef LLVMDoubleType(void);
LLVMTypeRef LLVMX86FP80Type(void);
LLVMTypeRef LLVMFP128Type(void);
LLVMTypeRef LLVMPPCFP128Type(void);
-/* Operations on function types */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeFunction Function Types
+ *
+ * @{
+ */
+
+/**
+ * Obtain a function type consisting of a specified signature.
+ *
+ * The function is defined as a tuple of a return Type, a list of
+ * parameter types, and whether the function is variadic.
+ */
LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType,
LLVMTypeRef *ParamTypes, unsigned ParamCount,
LLVMBool IsVarArg);
+
+/**
+ * Returns whether a function type is variadic.
+ */
LLVMBool LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the Type this function Type returns.
+ */
LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the number of parameters this function accepts.
+ */
unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the types of a function's parameters.
+ *
+ * The Dest parameter should point to a pre-allocated array of
+ * LLVMTypeRef at least LLVMCountParamTypes() large. On return, the
+ * first LLVMCountParamTypes() entries in the array will be populated
+ * with LLVMTypeRef instances.
+ *
+ * @param FunctionTy The function type to operate on.
+ * @param Dest Memory address of an array to be filled with result.
+ */
void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest);
-/* Operations on struct types */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeStruct Structure Types
+ *
+ * These functions relate to LLVMTypeRef instances.
+ *
+ * @see llvm::StructType
+ *
+ * @{
+ */
+
+/**
+ * Create a new structure type in a context.
+ *
+ * A structure is specified by a list of inner elements/types and
+ * whether these can be packed together.
+ *
+ * @see llvm::StructType::create()
+ */
LLVMTypeRef LLVMStructTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
unsigned ElementCount, LLVMBool Packed);
+
+/**
+ * Create a new structure type in the global context.
+ *
+ * @see llvm::StructType::create()
+ */
LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes, unsigned ElementCount,
LLVMBool Packed);
+
+/**
+ * Create an empty structure in a context having a specified name.
+ *
+ * @see llvm::StructType::create()
+ */
LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name);
+
+/**
+ * Obtain the name of a structure.
+ *
+ * @see llvm::StructType::getName()
+ */
const char *LLVMGetStructName(LLVMTypeRef Ty);
+
+/**
+ * Set the contents of a structure type.
+ *
+ * @see llvm::StructType::setBody()
+ */
void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes,
unsigned ElementCount, LLVMBool Packed);
+/**
+ * Get the number of elements defined inside the structure.
+ *
+ * @see llvm::StructType::getNumElements()
+ */
unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy);
+
+/**
+ * Get the elements within a structure.
+ *
+ * The function is passed the address of a pre-allocated array of
+ * LLVMTypeRef at least LLVMCountStructElementTypes() long. After
+ * invocation, this array will be populated with the structure's
+ * elements. The objects in the destination array will have a lifetime
+ * of the structure type itself, which is the lifetime of the context it
+ * is contained in.
+ */
void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest);
+
+/**
+ * Determine whether a structure is packed.
+ *
+ * @see llvm::StructType::isPacked()
+ */
LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy);
+
+/**
+ * Determine whether a structure is opaque.
+ *
+ * @see llvm::StructType::isOpaque()
+ */
LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy);
-LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
+/**
+ * @}
+ */
-/* Operations on array, pointer, and vector types (sequence types) */
-LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount);
-LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace);
-LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount);
+/**
+ * @defgroup LLVMCCoreTypeSequential Sequential Types
+ *
+ * Sequential types represents "arrays" of types. This is a super class
+ * for array, vector, and pointer types.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the type of elements within a sequential type.
+ *
+ * This works on array, vector, and pointer types.
+ *
+ * @see llvm::SequentialType::getElementType()
+ */
LLVMTypeRef LLVMGetElementType(LLVMTypeRef Ty);
+
+/**
+ * Create a fixed size array type that refers to a specific type.
+ *
+ * The created type will exist in the context that its element type
+ * exists in.
+ *
+ * @see llvm::ArrayType::get()
+ */
+LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount);
+
+/**
+ * Obtain the length of an array type.
+ *
+ * This only works on types that represent arrays.
+ *
+ * @see llvm::ArrayType::getNumElements()
+ */
unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy);
+
+/**
+ * Create a pointer type that points to a defined type.
+ *
+ * The created type will exist in the context that its pointee type
+ * exists in.
+ *
+ * @see llvm::PointerType::get()
+ */
+LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace);
+
+/**
+ * Obtain the address space of a pointer type.
+ *
+ * This only works on types that represent pointers.
+ *
+ * @see llvm::PointerType::getAddressSpace()
+ */
unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy);
+
+/**
+ * Create a vector type that contains a defined type and has a specific
+ * number of elements.
+ *
+ * The created type will exist in the context thats its element type
+ * exists in.
+ *
+ * @see llvm::VectorType::get()
+ */
+LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount);
+
+/**
+ * Obtain the number of elements in a vector type.
+ *
+ * This only works on types that represent vectors.
+ *
+ * @see llvm::VectorType::getNumElements()
+ */
unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy);
-/* Operations on other types */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeOther Other Types
+ *
+ * @{
+ */
+
+/**
+ * Create a void type in a context.
+ */
LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C);
+
+/**
+ * Create a label type in a context.
+ */
LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C);
+
+/**
+ * Create a X86 MMX type in a context.
+ */
LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C);
+/**
+ * These are similar to the above functions except they operate on the
+ * global context.
+ */
LLVMTypeRef LLVMVoidType(void);
LLVMTypeRef LLVMLabelType(void);
LLVMTypeRef LLVMX86MMXType(void);
-/*===-- Values ------------------------------------------------------------===*/
+/**
+ * @}
+ */
-/* The bulk of LLVM's object model consists of values, which comprise a very
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValues Values
+ *
+ * The bulk of LLVM's object model consists of values, which comprise a very
* rich type hierarchy.
+ *
+ * LLVMValueRef essentially represents llvm::Value. There is a rich
+ * hierarchy of classes within this type. Depending on the instance
+ * obtain, not all APIs are available.
+ *
+ * Callers can determine the type of a LLVMValueRef by calling the
+ * LLVMIsA* family of functions (e.g. LLVMIsAArgument()). These
+ * functions are defined by a macro, so it isn't obvious which are
+ * available by looking at the Doxygen source code. Instead, look at the
+ * source definition of LLVM_FOR_EACH_VALUE_SUBCLASS and note the list
+ * of value names given. These value names also correspond to classes in
+ * the llvm::Value hierarchy.
+ *
+ * @{
*/
#define LLVM_FOR_EACH_VALUE_SUBCLASS(macro) \
@@ -473,8 +1018,6 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(IntrinsicInst) \
macro(DbgInfoIntrinsic) \
macro(DbgDeclareInst) \
- macro(EHExceptionInst) \
- macro(EHSelectorInst) \
macro(MemIntrinsic) \
macro(MemCpyInst) \
macro(MemMoveInst) \
@@ -518,92 +1061,399 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(LoadInst) \
macro(VAArgInst)
-/* Operations on all values */
+/**
+ * @defgroup LLVMCCoreValueGeneral General APIs
+ *
+ * Functions in this section work on all LLVMValueRef instances,
+ * regardless of their sub-type. They correspond to functions available
+ * on llvm::Value.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the type of a value.
+ *
+ * @see llvm::Value::getType()
+ */
LLVMTypeRef LLVMTypeOf(LLVMValueRef Val);
+
+/**
+ * Obtain the string name of a value.
+ *
+ * @see llvm::Value::getName()
+ */
const char *LLVMGetValueName(LLVMValueRef Val);
+
+/**
+ * Set the string name of a value.
+ *
+ * @see llvm::Value::setName()
+ */
void LLVMSetValueName(LLVMValueRef Val, const char *Name);
+
+/**
+ * Dump a representation of a value to stderr.
+ *
+ * @see llvm::Value::dump()
+ */
void LLVMDumpValue(LLVMValueRef Val);
+
+/**
+ * Replace all uses of a value with another one.
+ *
+ * @see llvm::Value::replaceAllUsesWith()
+ */
void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal);
-int LLVMHasMetadata(LLVMValueRef Val);
-LLVMValueRef LLVMGetMetadata(LLVMValueRef Val, unsigned KindID);
-void LLVMSetMetadata(LLVMValueRef Val, unsigned KindID, LLVMValueRef Node);
-/* Conversion functions. Return the input value if it is an instance of the
- specified class, otherwise NULL. See llvm::dyn_cast_or_null<>. */
+/**
+ * Determine whether the specified constant instance is constant.
+ */
+LLVMBool LLVMIsConstant(LLVMValueRef Val);
+
+/**
+ * Determine whether a value instance is undefined.
+ */
+LLVMBool LLVMIsUndef(LLVMValueRef Val);
+
+/**
+ * Convert value instances between types.
+ *
+ * Internally, a LLVMValueRef is "pinned" to a specific type. This
+ * series of functions allows you to cast an instance to a specific
+ * type.
+ *
+ * If the cast is not valid for the specified type, NULL is returned.
+ *
+ * @see llvm::dyn_cast_or_null<>
+ */
#define LLVM_DECLARE_VALUE_CAST(name) \
LLVMValueRef LLVMIsA##name(LLVMValueRef Val);
LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST)
-/* Operations on Uses */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueUses Usage
+ *
+ * This module defines functions that allow you to inspect the uses of a
+ * LLVMValueRef.
+ *
+ * It is possible to obtain a LLVMUseRef for any LLVMValueRef instance.
+ * Each LLVMUseRef (which corresponds to a llvm::Use instance) holds a
+ * llvm::User and llvm::Value.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the first use of a value.
+ *
+ * Uses are obtained in an iterator fashion. First, call this function
+ * to obtain a reference to the first use. Then, call LLVMGetNextUse()
+ * on that instance and all subsequently obtained instances untl
+ * LLVMGetNextUse() returns NULL.
+ *
+ * @see llvm::Value::use_begin()
+ */
LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val);
+
+/**
+ * Obtain the next use of a value.
+ *
+ * This effectively advances the iterator. It returns NULL if you are on
+ * the final use and no more are available.
+ */
LLVMUseRef LLVMGetNextUse(LLVMUseRef U);
+
+/**
+ * Obtain the user value for a user.
+ *
+ * The returned value corresponds to a llvm::User type.
+ *
+ * @see llvm::Use::getUser()
+ */
LLVMValueRef LLVMGetUser(LLVMUseRef U);
+
+/**
+ * Obtain the value this use corresponds to.
+ *
+ * @see llvm::Use::get().
+ */
LLVMValueRef LLVMGetUsedValue(LLVMUseRef U);
-/* Operations on Users */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueUser User value
+ *
+ * Function in this group pertain to LLVMValueRef instances that descent
+ * from llvm::User. This includes constants, instructions, and
+ * operators.
+ *
+ * @{
+ */
+
+/**
+ * Obtain an operand at a specific index in a llvm::User value.
+ *
+ * @see llvm::User::getOperand()
+ */
LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index);
+
+/**
+ * Set an operand at a specific index in a llvm::User value.
+ *
+ * @see llvm::User::setOperand()
+ */
void LLVMSetOperand(LLVMValueRef User, unsigned Index, LLVMValueRef Val);
+
+/**
+ * Obtain the number of operands in a llvm::User value.
+ *
+ * @see llvm::User::getNumOperands()
+ */
int LLVMGetNumOperands(LLVMValueRef Val);
-/* Operations on constants of any type */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstant Constants
+ *
+ * This section contains APIs for interacting with LLVMValueRef that
+ * correspond to llvm::Constant instances.
+ *
+ * These functions will work for any LLVMValueRef in the llvm::Constant
+ * class hierarchy.
+ *
+ * @{
+ */
+
+/**
+ * Obtain a constant value referring to the null instance of a type.
+ *
+ * @see llvm::Constant::getNullValue()
+ */
LLVMValueRef LLVMConstNull(LLVMTypeRef Ty); /* all zeroes */
-LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty); /* only for int/vector */
+
+/**
+ * Obtain a constant value referring to the instance of a type
+ * consisting of all ones.
+ *
+ * This is only valid for integer types.
+ *
+ * @see llvm::Constant::getAllOnesValue()
+ */
+LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty);
+
+/**
+ * Obtain a constant value referring to an undefined value of a type.
+ *
+ * @see llvm::UndefValue::get()
+ */
LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty);
-LLVMBool LLVMIsConstant(LLVMValueRef Val);
+
+/**
+ * Determine whether a value instance is null.
+ *
+ * @see llvm::Constant::isNullValue()
+ */
LLVMBool LLVMIsNull(LLVMValueRef Val);
-LLVMBool LLVMIsUndef(LLVMValueRef Val);
+
+/**
+ * Obtain a constant that is a constant pointer pointing to NULL for a
+ * specified type.
+ */
LLVMValueRef LLVMConstPointerNull(LLVMTypeRef Ty);
-/* Operations on metadata */
-LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str,
- unsigned SLen);
-LLVMValueRef LLVMMDString(const char *Str, unsigned SLen);
-LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
- unsigned Count);
-LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
-const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len);
-int LLVMGetMDNodeNumOperands(LLVMValueRef V);
-LLVMValueRef *LLVMGetMDNodeOperand(LLVMValueRef V, unsigned i);
-unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name);
-void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest);
+/**
+ * @defgroup LLVMCCoreValueConstantScalar Scalar constants
+ *
+ * Functions in this group model LLVMValueRef instances that correspond
+ * to constants referring to scalar types.
+ *
+ * For integer types, the LLVMTypeRef parameter should correspond to a
+ * llvm::IntegerType instance and the returned LLVMValueRef will
+ * correspond to a llvm::ConstantInt.
+ *
+ * For floating point types, the LLVMTypeRef returned corresponds to a
+ * llvm::ConstantFP.
+ *
+ * @{
+ */
-/* Operations on scalar constants */
+/**
+ * Obtain a constant value for an integer type.
+ *
+ * The returned value corresponds to a llvm::ConstantInt.
+ *
+ * @see llvm::ConstantInt::get()
+ *
+ * @param IntTy Integer type to obtain value of.
+ * @param N The value the returned instance should refer to.
+ * @param SignExtend Whether to sign extend the produced value.
+ */
LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
LLVMBool SignExtend);
+
+/**
+ * Obtain a constant value for an integer of arbitrary precision.
+ *
+ * @see llvm::ConstantInt::get()
+ */
LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
unsigned NumWords,
const uint64_t Words[]);
+
+/**
+ * Obtain a constant value for an integer parsed from a string.
+ *
+ * A similar API, LLVMConstIntOfStringAndSize is also available. If the
+ * string's length is available, it is preferred to call that function
+ * instead.
+ *
+ * @see llvm::ConstantInt::get()
+ */
LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char *Text,
uint8_t Radix);
+
+/**
+ * Obtain a constant value for an integer parsed from a string with
+ * specified length.
+ *
+ * @see llvm::ConstantInt::get()
+ */
LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char *Text,
unsigned SLen, uint8_t Radix);
+
+/**
+ * Obtain a constant value referring to a double floating point value.
+ */
LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N);
+
+/**
+ * Obtain a constant for a floating point value parsed from a string.
+ *
+ * A similar API, LLVMConstRealOfStringAndSize is also available. It
+ * should be used if the input string's length is known.
+ */
LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text);
+
+/**
+ * Obtain a constant for a floating point value parsed from a string.
+ */
LLVMValueRef LLVMConstRealOfStringAndSize(LLVMTypeRef RealTy, const char *Text,
unsigned SLen);
+
+/**
+ * Obtain the zero extended value for an integer constant value.
+ *
+ * @see llvm::ConstantInt::getZExtValue()
+ */
unsigned long long LLVMConstIntGetZExtValue(LLVMValueRef ConstantVal);
+
+/**
+ * Obtain the sign extended value for an integer constant value.
+ *
+ * @see llvm::ConstantInt::getSExtValue()
+ */
long long LLVMConstIntGetSExtValue(LLVMValueRef ConstantVal);
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantComposite Composite Constants
+ *
+ * Functions in this group operate on composite constants.
+ *
+ * @{
+ */
-/* Operations on composite constants */
+/**
+ * Create a ConstantDataSequential and initialize it with a string.
+ *
+ * @see llvm::ConstantDataArray::getString()
+ */
LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
unsigned Length, LLVMBool DontNullTerminate);
-LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
- LLVMValueRef *ConstantVals,
- unsigned Count, LLVMBool Packed);
+/**
+ * Create a ConstantDataSequential with string content in the global context.
+ *
+ * This is the same as LLVMConstStringInContext except it operates on the
+ * global context.
+ *
+ * @see LLVMConstStringInContext()
+ * @see llvm::ConstantDataArray::getString()
+ */
LLVMValueRef LLVMConstString(const char *Str, unsigned Length,
LLVMBool DontNullTerminate);
-LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
- LLVMValueRef *ConstantVals, unsigned Length);
+
+/**
+ * Create an anonymous ConstantStruct with the specified values.
+ *
+ * @see llvm::ConstantStruct::getAnon()
+ */
+LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
+ LLVMValueRef *ConstantVals,
+ unsigned Count, LLVMBool Packed);
+
+/**
+ * Create a ConstantStruct in the global Context.
+ *
+ * This is the same as LLVMConstStructInContext except it operates on the
+ * global Context.
+ *
+ * @see LLVMConstStructInContext()
+ */
LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
LLVMBool Packed);
+
+/**
+ * Create a ConstantArray from values.
+ *
+ * @see llvm::ConstantArray::get()
+ */
+LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
+ LLVMValueRef *ConstantVals, unsigned Length);
+
+/**
+ * Create a non-anonymous ConstantStruct from values.
+ *
+ * @see llvm::ConstantStruct::get()
+ */
LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy,
LLVMValueRef *ConstantVals,
unsigned Count);
+
+/**
+ * Create a ConstantVector from values.
+ *
+ * @see llvm::ConstantVector::get()
+ */
LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size);
-/* Constant expressions */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantExpressions Constant Expressions
+ *
+ * Functions in this group correspond to APIs on llvm::ConstantExpr.
+ *
+ * @see llvm::ConstantExpr.
+ *
+ * @{
+ */
LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal);
LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty);
LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty);
@@ -690,7 +1540,21 @@ LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty,
LLVMBool HasSideEffects, LLVMBool IsAlignStack);
LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB);
-/* Operations on global variables, functions, and aliases (globals) */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantGlobals Global Values
+ *
+ * This group contains functions that operate on global values. Functions in
+ * this group relate to functions in the llvm::GlobalValue class tree.
+ *
+ * @see llvm::GlobalValue
+ *
+ * @{
+ */
+
LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global);
LLVMBool LLVMIsDeclaration(LLVMValueRef Global);
LLVMLinkage LLVMGetLinkage(LLVMValueRef Global);
@@ -702,7 +1566,15 @@ void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz);
unsigned LLVMGetAlignment(LLVMValueRef Global);
void LLVMSetAlignment(LLVMValueRef Global, unsigned Bytes);
-/* Operations on global variables */
+/**
+ * @defgroup LLVMCoreValueConstantGlobalVariable Global Variables
+ *
+ * This group contains functions that operate on global variable values.
+ *
+ * @see llvm::GlobalVariable
+ *
+ * @{
+ */
LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name);
LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
const char *Name,
@@ -720,110 +1592,672 @@ void LLVMSetThreadLocal(LLVMValueRef GlobalVar, LLVMBool IsThreadLocal);
LLVMBool LLVMIsGlobalConstant(LLVMValueRef GlobalVar);
void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant);
-/* Operations on aliases */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCoreValueConstantGlobalAlias Global Aliases
+ *
+ * This group contains function that operate on global alias values.
+ *
+ * @see llvm::GlobalAlias
+ *
+ * @{
+ */
LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
const char *Name);
-/* Operations on functions */
-LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name,
- LLVMTypeRef FunctionTy);
-LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name);
-LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M);
-LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M);
-LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn);
-LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn);
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueFunction Function values
+ *
+ * Functions in this group operate on LLVMValueRef instances that
+ * correspond to llvm::Function instances.
+ *
+ * @see llvm::Function
+ *
+ * @{
+ */
+
+/**
+ * Remove a function from its containing module and deletes it.
+ *
+ * @see llvm::Function::eraseFromParent()
+ */
void LLVMDeleteFunction(LLVMValueRef Fn);
+
+/**
+ * Obtain the ID number from a function instance.
+ *
+ * @see llvm::Function::getIntrinsicID()
+ */
unsigned LLVMGetIntrinsicID(LLVMValueRef Fn);
+
+/**
+ * Obtain the calling function of a function.
+ *
+ * The returned value corresponds to the LLVMCallConv enumeration.
+ *
+ * @see llvm::Function::getCallingConv()
+ */
unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn);
+
+/**
+ * Set the calling convention of a function.
+ *
+ * @see llvm::Function::setCallingConv()
+ *
+ * @param Fn Function to operate on
+ * @param CC LLVMCallConv to set calling convention to
+ */
void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC);
+
+/**
+ * Obtain the name of the garbage collector to use during code
+ * generation.
+ *
+ * @see llvm::Function::getGC()
+ */
const char *LLVMGetGC(LLVMValueRef Fn);
+
+/**
+ * Define the garbage collector to use during code generation.
+ *
+ * @see llvm::Function::setGC()
+ */
void LLVMSetGC(LLVMValueRef Fn, const char *Name);
+
+/**
+ * Add an attribute to a function.
+ *
+ * @see llvm::Function::addAttribute()
+ */
void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
+
+/**
+ * Obtain an attribute from a function.
+ *
+ * @see llvm::Function::getAttributes()
+ */
LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn);
+
+/**
+ * Remove an attribute from a function.
+ */
void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
-/* Operations on parameters */
+/**
+ * @defgroup LLVMCCoreValueFunctionParameters Function Parameters
+ *
+ * Functions in this group relate to arguments/parameters on functions.
+ *
+ * Functions in this group expect LLVMValueRef instances that correspond
+ * to llvm::Function instances.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the number of parameters in a function.
+ *
+ * @see llvm::Function::arg_size()
+ */
unsigned LLVMCountParams(LLVMValueRef Fn);
+
+/**
+ * Obtain the parameters in a function.
+ *
+ * The takes a pointer to a pre-allocated array of LLVMValueRef that is
+ * at least LLVMCountParams() long. This array will be filled with
+ * LLVMValueRef instances which correspond to the parameters the
+ * function receives. Each LLVMValueRef corresponds to a llvm::Argument
+ * instance.
+ *
+ * @see llvm::Function::arg_begin()
+ */
void LLVMGetParams(LLVMValueRef Fn, LLVMValueRef *Params);
+
+/**
+ * Obtain the parameter at the specified index.
+ *
+ * Parameters are indexed from 0.
+ *
+ * @see llvm::Function::arg_begin()
+ */
LLVMValueRef LLVMGetParam(LLVMValueRef Fn, unsigned Index);
+
+/**
+ * Obtain the function to which this argument belongs.
+ *
+ * Unlike other functions in this group, this one takes a LLVMValueRef
+ * that corresponds to a llvm::Attribute.
+ *
+ * The returned LLVMValueRef is the llvm::Function to which this
+ * argument belongs.
+ */
LLVMValueRef LLVMGetParamParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the first parameter to a function.
+ *
+ * @see llvm::Function::arg_begin()
+ */
LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn);
+
+/**
+ * Obtain the last parameter to a function.
+ *
+ * @see llvm::Function::arg_end()
+ */
LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn);
+
+/**
+ * Obtain the next parameter to a function.
+ *
+ * This takes a LLVMValueRef obtained from LLVMGetFirstParam() (which is
+ * actually a wrapped iterator) and obtains the next parameter from the
+ * underlying iterator.
+ */
LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg);
+
+/**
+ * Obtain the previous parameter to a function.
+ *
+ * This is the opposite of LLVMGetNextParam().
+ */
LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg);
+
+/**
+ * Add an attribute to a function argument.
+ *
+ * @see llvm::Argument::addAttr()
+ */
void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA);
+
+/**
+ * Remove an attribute from a function argument.
+ *
+ * @see llvm::Argument::removeAttr()
+ */
void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA);
+
+/**
+ * Get an attribute from a function argument.
+ */
LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg);
+
+/**
+ * Set the alignment for a function parameter.
+ *
+ * @see llvm::Argument::addAttr()
+ * @see llvm::Attribute::constructAlignmentFromInt()
+ */
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align);
-/* Operations on basic blocks */
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueMetadata Metadata
+ *
+ * @{
+ */
+
+/**
+ * Obtain a MDString value from a context.
+ *
+ * The returned instance corresponds to the llvm::MDString class.
+ *
+ * The instance is specified by string data of a specified length. The
+ * string content is copied, so the backing memory can be freed after
+ * this function returns.
+ */
+LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str,
+ unsigned SLen);
+
+/**
+ * Obtain a MDString value from the global context.
+ */
+LLVMValueRef LLVMMDString(const char *Str, unsigned SLen);
+
+/**
+ * Obtain a MDNode value from a context.
+ *
+ * The returned value corresponds to the llvm::MDNode class.
+ */
+LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
+ unsigned Count);
+
+/**
+ * Obtain a MDNode value from the global context.
+ */
+LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
+
+/**
+ * Obtain the underlying string from a MDString value.
+ *
+ * @param V Instance to obtain string from.
+ * @param Len Memory address which will hold length of returned string.
+ * @return String data in MDString.
+ */
+const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueBasicBlock Basic Block
+ *
+ * A basic block represents a single entry single exit section of code.
+ * Basic blocks contain a list of instructions which form the body of
+ * the block.
+ *
+ * Basic blocks belong to functions. They have the type of label.
+ *
+ * Basic blocks are themselves values. However, the C API models them as
+ * LLVMBasicBlockRef.
+ *
+ * @see llvm::BasicBlock
+ *
+ * @{
+ */
+
+/**
+ * Convert a basic block instance to a value type.
+ */
LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB);
+
+/**
+ * Determine whether a LLVMValueRef is itself a basic block.
+ */
LLVMBool LLVMValueIsBasicBlock(LLVMValueRef Val);
+
+/**
+ * Convert a LLVMValueRef to a LLVMBasicBlockRef instance.
+ */
LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val);
+
+/**
+ * Obtain the function to which a basic block belongs.
+ *
+ * @see llvm::BasicBlock::getParent()
+ */
LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the terminator instruction for a basic block.
+ *
+ * If the basic block does not have a terminator (it is not well-formed
+ * if it doesn't), then NULL is returned.
+ *
+ * The returned LLVMValueRef corresponds to a llvm::TerminatorInst.
+ *
+ * @see llvm::BasicBlock::getTerminator()
+ */
LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the number of basic blocks in a function.
+ *
+ * @param Fn Function value to operate on.
+ */
unsigned LLVMCountBasicBlocks(LLVMValueRef Fn);
+
+/**
+ * Obtain all of the basic blocks in a function.
+ *
+ * This operates on a function value. The BasicBlocks parameter is a
+ * pointer to a pre-allocated array of LLVMBasicBlockRef of at least
+ * LLVMCountBasicBlocks() in length. This array is populated with
+ * LLVMBasicBlockRef instances.
+ */
void LLVMGetBasicBlocks(LLVMValueRef Fn, LLVMBasicBlockRef *BasicBlocks);
+
+/**
+ * Obtain the first basic block in a function.
+ *
+ * The returned basic block can be used as an iterator. You will likely
+ * eventually call into LLVMGetNextBasicBlock() with it.
+ *
+ * @see llvm::Function::begin()
+ */
LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn);
+
+/**
+ * Obtain the last basic block in a function.
+ *
+ * @see llvm::Function::end()
+ */
LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn);
+
+/**
+ * Advance a basic block iterator.
+ */
LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Go backwards in a basic block iterator.
+ */
LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the basic block that corresponds to the entry point of a
+ * function.
+ *
+ * @see llvm::Function::getEntryBlock()
+ */
LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn);
+/**
+ * Append a basic block to the end of a function.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
LLVMValueRef Fn,
const char *Name);
+
+/**
+ * Append a basic block to the end of a function using the global
+ * context.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef Fn, const char *Name);
+
+/**
+ * Insert a basic block in a function before another basic block.
+ *
+ * The function to add to is determined by the function of the
+ * passed basic block.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
LLVMBasicBlockRef LLVMInsertBasicBlockInContext(LLVMContextRef C,
LLVMBasicBlockRef BB,
const char *Name);
-LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef Fn, const char *Name);
+/**
+ * Insert a basic block in a function using the global context.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef InsertBeforeBB,
const char *Name);
+
+/**
+ * Remove a basic block from a function and delete it.
+ *
+ * This deletes the basic block from its containing function and deletes
+ * the basic block itself.
+ *
+ * @see llvm::BasicBlock::eraseFromParent()
+ */
void LLVMDeleteBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Remove a basic block from a function.
+ *
+ * This deletes the basic block from its containing function but keep
+ * the basic block alive.
+ *
+ * @see llvm::BasicBlock::removeFromParent()
+ */
void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BB);
+/**
+ * Move a basic block to before another one.
+ *
+ * @see llvm::BasicBlock::moveBefore()
+ */
void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+
+/**
+ * Move a basic block to after another one.
+ *
+ * @see llvm::BasicBlock::moveAfter()
+ */
void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+/**
+ * Obtain the first instruction in a basic block.
+ *
+ * The returned LLVMValueRef corresponds to a llvm::Instruction
+ * instance.
+ */
LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the last instruction in a basic block.
+ *
+ * The returned LLVMValueRef corresponds to a LLVM:Instruction.
+ */
LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB);
-/* Operations on instructions */
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueInstruction Instructions
+ *
+ * Functions in this group relate to the inspection and manipulation of
+ * individual instructions.
+ *
+ * In the C++ API, an instruction is modeled by llvm::Instruction. This
+ * class has a large number of descendents. llvm::Instruction is a
+ * llvm::Value and in the C API, instructions are modeled by
+ * LLVMValueRef.
+ *
+ * This group also contains sub-groups which operate on specific
+ * llvm::Instruction types, e.g. llvm::CallInst.
+ *
+ * @{
+ */
+
+/**
+ * Determine whether an instruction has any metadata attached.
+ */
+int LLVMHasMetadata(LLVMValueRef Val);
+
+/**
+ * Return metadata associated with an instruction value.
+ */
+LLVMValueRef LLVMGetMetadata(LLVMValueRef Val, unsigned KindID);
+
+/**
+ * Set metadata associated with an instruction value.
+ */
+void LLVMSetMetadata(LLVMValueRef Val, unsigned KindID, LLVMValueRef Node);
+
+/**
+ * Obtain the basic block to which an instruction belongs.
+ *
+ * @see llvm::Instruction::getParent()
+ */
LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the instruction that occurs after the one specified.
+ *
+ * The next instruction will be from the same basic block.
+ *
+ * If this is the last instruction in a basic block, NULL will be
+ * returned.
+ */
LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst);
+
+/**
+ * Obtain the instruction that occured before this one.
+ *
+ * If the instruction is the first instruction in a basic block, NULL
+ * will be returned.
+ */
LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst);
+
+/**
+ * Remove and delete an instruction.
+ *
+ * The instruction specified is removed from its containing building
+ * block and then deleted.
+ *
+ * @see llvm::Instruction::eraseFromParent()
+ */
void LLVMInstructionEraseFromParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the code opcode for an individual instruction.
+ *
+ * @see llvm::Instruction::getOpCode()
+ */
LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst);
+
+/**
+ * Obtain the predicate of an instruction.
+ *
+ * This is only valid for instructions that correspond to llvm::ICmpInst
+ * or llvm::ConstantExpr whose opcode is llvm::Instruction::ICmp.
+ *
+ * @see llvm::ICmpInst::getPredicate()
+ */
LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst);
-/* Operations on call sites */
+/**
+ * @defgroup LLVMCCoreValueInstructionCall Call Sites and Invocations
+ *
+ * Functions in this group apply to instructions that refer to call
+ * sites and invocations. These correspond to C++ types in the
+ * llvm::CallInst class tree.
+ *
+ * @{
+ */
+
+/**
+ * Set the calling convention for a call instruction.
+ *
+ * This expects an LLVMValueRef that corresponds to a llvm::CallInst or
+ * llvm::InvokeInst.
+ *
+ * @see llvm::CallInst::setCallingConv()
+ * @see llvm::InvokeInst::setCallingConv()
+ */
void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC);
+
+/**
+ * Obtain the calling convention for a call instruction.
+ *
+ * This is the opposite of LLVMSetInstructionCallConv(). Reads its
+ * usage.
+ *
+ * @see LLVMSetInstructionCallConv()
+ */
unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr);
+
+
void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute);
-void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
+void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
LLVMAttribute);
-void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
+void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align);
-/* Operations on call instructions (only) */
+/**
+ * Obtain whether a call instruction is a tail call.
+ *
+ * This only works on llvm::CallInst instructions.
+ *
+ * @see llvm::CallInst::isTailCall()
+ */
LLVMBool LLVMIsTailCall(LLVMValueRef CallInst);
+
+/**
+ * Set whether a call instruction is a tail call.
+ *
+ * This only works on llvm::CallInst instructions.
+ *
+ * @see llvm::CallInst::setTailCall()
+ */
void LLVMSetTailCall(LLVMValueRef CallInst, LLVMBool IsTailCall);
-/* Operations on switch instructions (only) */
+/**
+ * @}
+ */
+
+/**
+ * Obtain the default destination basic block of a switch instruction.
+ *
+ * This only works on llvm::SwitchInst instructions.
+ *
+ * @see llvm::SwitchInst::getDefaultDest()
+ */
LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef SwitchInstr);
-/* Operations on phi nodes */
+/**
+ * @defgroup LLVMCCoreValueInstructionPHINode PHI Nodes
+ *
+ * Functions in this group only apply to instructions that map to
+ * llvm::PHINode instances.
+ *
+ * @{
+ */
+
+/**
+ * Add an incoming value to the end of a PHI list.
+ */
void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues,
LLVMBasicBlockRef *IncomingBlocks, unsigned Count);
+
+/**
+ * Obtain the number of incoming basic blocks to a PHI node.
+ */
unsigned LLVMCountIncoming(LLVMValueRef PhiNode);
+
+/**
+ * Obtain an incoming value to a PHI node as a LLVMValueRef.
+ */
LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index);
+
+/**
+ * Obtain an incoming value to a PHI node as a LLVMBasicBlockRef.
+ */
LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index);
-/*===-- Instruction builders ----------------------------------------------===*/
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
-/* An instruction builder represents a point within a basic block, and is the
- * exclusive means of building instructions using the C interface.
+/**
+ * @defgroup LLVMCCoreInstructionBuilder Instruction Builders
+ *
+ * An instruction builder represents a point within a basic block and is
+ * the exclusive means of building instructions using the C interface.
+ *
+ * @{
*/
LLVMBuilderRef LLVMCreateBuilderInContext(LLVMContextRef C);
@@ -964,6 +2398,8 @@ LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
const char *Name);
LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
const char *Name);
+LLVMBool LLVMGetVolatile(LLVMValueRef MemoryAccessInst);
+void LLVMSetVolatile(LLVMValueRef MemoryAccessInst, LLVMBool IsVolatile);
/* Casts */
LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef, LLVMValueRef Val,
@@ -1044,21 +2480,37 @@ LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef, LLVMValueRef Val,
LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef, LLVMValueRef LHS,
LLVMValueRef RHS, const char *Name);
+/**
+ * @}
+ */
-/*===-- Module providers --------------------------------------------------===*/
+/**
+ * @defgroup LLVMCCoreModuleProvider Module Providers
+ *
+ * @{
+ */
-/* Changes the type of M so it can be passed to FunctionPassManagers and the
+/**
+ * Changes the type of M so it can be passed to FunctionPassManagers and the
* JIT. They take ModuleProviders for historical reasons.
*/
LLVMModuleProviderRef
LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M);
-/* Destroys the module M.
+/**
+ * Destroys the module M.
*/
void LLVMDisposeModuleProvider(LLVMModuleProviderRef M);
+/**
+ * @}
+ */
-/*===-- Memory buffers ----------------------------------------------------===*/
+/**
+ * @defgroup LLVMCCoreMemoryBuffers Memory Buffers
+ *
+ * @{
+ */
LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(const char *Path,
LLVMMemoryBufferRef *OutMemBuf,
@@ -1067,23 +2519,39 @@ LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage);
void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf);
-/*===-- Pass Registry -----------------------------------------------------===*/
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCorePassRegistry Pass Registry
+ *
+ * @{
+ */
/** Return the global pass registry, for use with initialization functions.
- See llvm::PassRegistry::getPassRegistry. */
+ @see llvm::PassRegistry::getPassRegistry */
LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void);
-/*===-- Pass Managers -----------------------------------------------------===*/
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCorePassManagers Pass Managers
+ *
+ * @{
+ */
/** Constructs a new whole-module pass pipeline. This type of pipeline is
suitable for link-time optimization and whole-module transformations.
- See llvm::PassManager::PassManager. */
+ @see llvm::PassManager::PassManager */
LLVMPassManagerRef LLVMCreatePassManager(void);
/** Constructs a new function-by-function pass pipeline over the module
provider. It does not take ownership of the module provider. This type of
pipeline is suitable for code generation and JIT compilation tasks.
- See llvm::FunctionPassManager::FunctionPassManager. */
+ @see llvm::FunctionPassManager::FunctionPassManager */
LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M);
/** Deprecated: Use LLVMCreateFunctionPassManagerForModule instead. */
@@ -1091,30 +2559,42 @@ LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef MP);
/** Initializes, executes on the provided module, and finalizes all of the
passes scheduled in the pass manager. Returns 1 if any of the passes
- modified the module, 0 otherwise. See llvm::PassManager::run(Module&). */
+ modified the module, 0 otherwise.
+ @see llvm::PassManager::run(Module&) */
LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M);
/** Initializes all of the function passes scheduled in the function pass
manager. Returns 1 if any of the passes modified the module, 0 otherwise.
- See llvm::FunctionPassManager::doInitialization. */
+ @see llvm::FunctionPassManager::doInitialization */
LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM);
/** Executes all of the function passes scheduled in the function pass manager
on the provided function. Returns 1 if any of the passes modified the
function, false otherwise.
- See llvm::FunctionPassManager::run(Function&). */
+ @see llvm::FunctionPassManager::run(Function&) */
LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F);
/** Finalizes all of the function passes scheduled in in the function pass
manager. Returns 1 if any of the passes modified the module, 0 otherwise.
- See llvm::FunctionPassManager::doFinalization. */
+ @see llvm::FunctionPassManager::doFinalization */
LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM);
/** Frees the memory of a pass pipeline. For function pipelines, does not free
the module provider.
- See llvm::PassManagerBase::~PassManagerBase. */
+ @see llvm::PassManagerBase::~PassManagerBase. */
void LLVMDisposePassManager(LLVMPassManagerRef PM);
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
#ifdef __cplusplus
}
diff --git a/contrib/llvm/include/llvm-c/Disassembler.h b/contrib/llvm/include/llvm-c/Disassembler.h
index bf2f276..a676e37 100644
--- a/contrib/llvm/include/llvm-c/Disassembler.h
+++ b/contrib/llvm/include/llvm-c/Disassembler.h
@@ -19,6 +19,13 @@
#include <stddef.h>
/**
+ * @defgroup LLVMCDisassembler Disassembler
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+/**
* An opaque reference to a disassembler context.
*/
typedef void *LLVMDisasmContextRef;
@@ -157,6 +164,10 @@ size_t LLVMDisasmInstruction(LLVMDisasmContextRef DC, uint8_t *Bytes,
uint64_t BytesSize, uint64_t PC,
char *OutString, size_t OutStringSize);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif /* !defined(__cplusplus) */
diff --git a/contrib/llvm/include/llvm-c/EnhancedDisassembly.h b/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
index 0c173c2..71a0d49 100644
--- a/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
+++ b/contrib/llvm/include/llvm-c/EnhancedDisassembly.h
@@ -25,6 +25,19 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCEnhancedDisassembly Enhanced Disassembly
+ * @ingroup LLVMC
+ * @deprecated
+ *
+ * This module contains an interface to the Enhanced Disassembly (edis)
+ * library. The edis library is deprecated and will likely disappear in
+ * the near future. You should use the @ref LLVMCDisassembler interface
+ * instead.
+ *
+ * @{
+ */
+
/*!
@typedef EDByteReaderCallback
Interface to memory from which instructions may be read.
@@ -504,6 +517,10 @@ int EDBlockEvaluateOperand(uint64_t *result,
int EDBlockVisitTokens(EDInstRef inst,
EDTokenVisitor_t visitor);
+/**
+ * @}
+ */
+
#endif
#ifdef __cplusplus
diff --git a/contrib/llvm/include/llvm-c/ExecutionEngine.h b/contrib/llvm/include/llvm-c/ExecutionEngine.h
index f5f4061..cb77bb2 100644
--- a/contrib/llvm/include/llvm-c/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm-c/ExecutionEngine.h
@@ -26,6 +26,13 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCExecutionEngine Execution Engine
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
void LLVMLinkInJIT(void);
void LLVMLinkInInterpreter(void);
@@ -125,6 +132,10 @@ void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
diff --git a/contrib/llvm/include/llvm-c/Initialization.h b/contrib/llvm/include/llvm-c/Initialization.h
index 3b59abb..cb3ab9e 100644
--- a/contrib/llvm/include/llvm-c/Initialization.h
+++ b/contrib/llvm/include/llvm-c/Initialization.h
@@ -22,9 +22,19 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCInitialization Initialization Routines
+ * @ingroup LLVMC
+ *
+ * This module contains routines used to initialize the LLVM system.
+ *
+ * @{
+ */
+
void LLVMInitializeCore(LLVMPassRegistryRef R);
void LLVMInitializeTransformUtils(LLVMPassRegistryRef R);
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R);
+void LLVMInitializeVectorization(LLVMPassRegistryRef R);
void LLVMInitializeInstCombine(LLVMPassRegistryRef R);
void LLVMInitializeIPO(LLVMPassRegistryRef R);
void LLVMInitializeInstrumentation(LLVMPassRegistryRef R);
@@ -33,6 +43,10 @@ void LLVMInitializeIPA(LLVMPassRegistryRef R);
void LLVMInitializeCodeGen(LLVMPassRegistryRef R);
void LLVMInitializeTarget(LLVMPassRegistryRef R);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
index fca3946..5338d3f 100644
--- a/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
+++ b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
@@ -20,6 +20,13 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCLinkTimeOptimizer Link Time Optimization
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
/// This provides a dummy type for pointers to the LTO object.
typedef void* llvm_lto_t;
@@ -51,6 +58,10 @@ extern "C" {
extern llvm_lto_status_t llvm_optimize_modules
(llvm_lto_t lto, const char* output_filename);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/include/llvm-c/Object.h b/contrib/llvm/include/llvm-c/Object.h
index 7b1cf71..e2dad62 100644
--- a/contrib/llvm/include/llvm-c/Object.h
+++ b/contrib/llvm/include/llvm-c/Object.h
@@ -28,23 +28,74 @@
extern "C" {
#endif
-
+/**
+ * @defgroup LLVMCObject Object file reading and writing
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+// Opaque type wrappers
typedef struct LLVMOpaqueObjectFile *LLVMObjectFileRef;
-
typedef struct LLVMOpaqueSectionIterator *LLVMSectionIteratorRef;
+typedef struct LLVMOpaqueSymbolIterator *LLVMSymbolIteratorRef;
+typedef struct LLVMOpaqueRelocationIterator *LLVMRelocationIteratorRef;
+// ObjectFile creation
LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf);
void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile);
+// ObjectFile Section iterators
LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef ObjectFile);
void LLVMDisposeSectionIterator(LLVMSectionIteratorRef SI);
LLVMBool LLVMIsSectionIteratorAtEnd(LLVMObjectFileRef ObjectFile,
LLVMSectionIteratorRef SI);
void LLVMMoveToNextSection(LLVMSectionIteratorRef SI);
+void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect,
+ LLVMSymbolIteratorRef Sym);
+
+// ObjectFile Symbol iterators
+LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef ObjectFile);
+void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI);
+LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef ObjectFile,
+ LLVMSymbolIteratorRef SI);
+void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI);
+
+// SectionRef accessors
const char *LLVMGetSectionName(LLVMSectionIteratorRef SI);
uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI);
const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI);
-
+uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI);
+LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI,
+ LLVMSymbolIteratorRef Sym);
+
+// Section Relocation iterators
+LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section);
+void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef RI);
+LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section,
+ LLVMRelocationIteratorRef RI);
+void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef RI);
+
+
+// SymbolRef accessors
+const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI);
+uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI);
+uint64_t LLVMGetSymbolFileOffset(LLVMSymbolIteratorRef SI);
+uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI);
+
+// RelocationRef accessors
+uint64_t LLVMGetRelocationAddress(LLVMRelocationIteratorRef RI);
+uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI);
+LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI);
+uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI);
+// NOTE: Caller takes ownership of returned string of the two
+// following functions.
+const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI);
+const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI);
+
+/**
+ * @}
+ */
#ifdef __cplusplus
}
@@ -68,6 +119,27 @@ namespace llvm {
return reinterpret_cast<LLVMSectionIteratorRef>
(const_cast<section_iterator*>(SI));
}
+
+ inline symbol_iterator *unwrap(LLVMSymbolIteratorRef SI) {
+ return reinterpret_cast<symbol_iterator*>(SI);
+ }
+
+ inline LLVMSymbolIteratorRef
+ wrap(const symbol_iterator *SI) {
+ return reinterpret_cast<LLVMSymbolIteratorRef>
+ (const_cast<symbol_iterator*>(SI));
+ }
+
+ inline relocation_iterator *unwrap(LLVMRelocationIteratorRef SI) {
+ return reinterpret_cast<relocation_iterator*>(SI);
+ }
+
+ inline LLVMRelocationIteratorRef
+ wrap(const relocation_iterator *SI) {
+ return reinterpret_cast<LLVMRelocationIteratorRef>
+ (const_cast<relocation_iterator*>(SI));
+ }
+
}
}
diff --git a/contrib/llvm/include/llvm-c/Target.h b/contrib/llvm/include/llvm-c/Target.h
index 7afaef1..568e60d 100644
--- a/contrib/llvm/include/llvm-c/Target.h
+++ b/contrib/llvm/include/llvm-c/Target.h
@@ -26,6 +26,13 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCTarget Target information
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
enum LLVMByteOrdering { LLVMBigEndian, LLVMLittleEndian };
typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
@@ -47,6 +54,24 @@ typedef struct LLVMStructLayout *LLVMStructLayoutRef;
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+/* Declare all of the available assembly printer initialization functions. */
+#define LLVM_ASM_PRINTER(TargetName) \
+ void LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
+
+/* Declare all of the available assembly parser initialization functions. */
+#define LLVM_ASM_PARSER(TargetName) \
+ void LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
+
+/* Declare all of the available disassembler initialization functions. */
+#define LLVM_DISASSEMBLER(TargetName) \
+ void LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
+
/** LLVMInitializeAllTargetInfos - The main program should call this function if
it wants access to all available targets that LLVM is configured to
support. */
@@ -64,6 +89,43 @@ static inline void LLVMInitializeAllTargets(void) {
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
}
+
+/** LLVMInitializeAllTargetMCs - The main program should call this function if
+ it wants access to all available target MC that LLVM is configured to
+ support. */
+static inline void LLVMInitializeAllTargetMCs(void) {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllAsmPrinters - The main program should call this function if
+ it wants all asm printers that LLVM is configured to support, to make them
+ available via the TargetRegistry. */
+static inline void LLVMInitializeAllAsmPrinters() {
+#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllAsmParsers - The main program should call this function if
+ it wants all asm parsers that LLVM is configured to support, to make them
+ available via the TargetRegistry. */
+static inline void LLVMInitializeAllAsmParsers() {
+#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllDisassemblers - The main program should call this function
+ if it wants all disassemblers that LLVM is configured to support, to make
+ them available via the TargetRegistry. */
+static inline void LLVMInitializeAllDisassemblers() {
+#define LLVM_DISASSEMBLER(TargetName) \
+ LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
+}
/** LLVMInitializeNativeTarget - The main program should call this function to
initialize the native target corresponding to the host. This is useful
@@ -157,6 +219,9 @@ unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy,
See the destructor llvm::TargetData::~TargetData. */
void LLVMDisposeTargetData(LLVMTargetDataRef);
+/**
+ * @}
+ */
#ifdef __cplusplus
}
diff --git a/contrib/llvm/include/llvm-c/TargetMachine.h b/contrib/llvm/include/llvm-c/TargetMachine.h
new file mode 100644
index 0000000..0d35d73
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/TargetMachine.h
@@ -0,0 +1,142 @@
+/*===-- llvm-c/TargetMachine.h - Target Machine Library C Interface - C++ -*-=*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to the Target and TargetMachine *|
+|* classes, which can be used to generate assembly or object files. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TARGETMACHINE_H
+#define LLVM_C_TARGETMACHINE_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef struct LLVMTargetMachine *LLVMTargetMachineRef;
+typedef struct LLVMTarget *LLVMTargetRef;
+
+typedef enum {
+ LLVMCodeGenLevelNone,
+ LLVMCodeGenLevelLess,
+ LLVMCodeGenLevelDefault,
+ LLVMCodeGenLevelAggressive
+} LLVMCodeGenOptLevel;
+
+typedef enum {
+ LLVMRelocDefault,
+ LLVMRelocStatic,
+ LLVMRelocPIC,
+ LLVMRelocDynamicNoPic
+} LLVMRelocMode;
+
+typedef enum {
+ LLVMCodeModelDefault,
+ LLVMCodeModelJITDefault,
+ LLVMCodeModelSmall,
+ LLVMCodeModelKernel,
+ LLVMCodeModelMedium,
+ LLVMCodeModelLarge
+} LLVMCodeModel;
+
+typedef enum {
+ LLVMAssemblyFile,
+ LLVMObjectFile
+} LLVMCodeGenFileType;
+
+/** Returns the first llvm::Target in the registered targets list. */
+LLVMTargetRef LLVMGetFirstTarget();
+/** Returns the next llvm::Target given a previous one (or null if there's none) */
+LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T);
+
+/*===-- Target ------------------------------------------------------------===*/
+/** Returns the name of a target. See llvm::Target::getName */
+const char *LLVMGetTargetName(LLVMTargetRef T);
+
+/** Returns the description of a target. See llvm::Target::getDescription */
+const char *LLVMGetTargetDescription(LLVMTargetRef T);
+
+/** Returns if the target has a JIT */
+LLVMBool LLVMTargetHasJIT(LLVMTargetRef T);
+
+/** Returns if the target has a TargetMachine associated */
+LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T);
+
+/** Returns if the target as an ASM backend (required for emitting output) */
+LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T);
+
+/*===-- Target Machine ----------------------------------------------------===*/
+/** Creates a new llvm::TargetMachine. See llvm::Target::createTargetMachine */
+LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, char *Triple,
+ char *CPU, char *Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
+ LLVMCodeModel CodeModel);
+
+/** Dispose the LLVMTargetMachineRef instance generated by
+ LLVMCreateTargetMachine. */
+void LLVMDisposeTargetMachine(LLVMTargetMachineRef T);
+
+/** Returns the Target used in a TargetMachine */
+LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T);
+
+/** Returns the triple used creating this target machine. See
+ llvm::TargetMachine::getTriple. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineTriple(LLVMTargetMachineRef T);
+
+/** Returns the cpu used creating this target machine. See
+ llvm::TargetMachine::getCPU. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
+
+/** Returns the feature string used creating this target machine. See
+ llvm::TargetMachine::getFeatureString. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
+
+/** Returns the llvm::TargetData used for this llvm:TargetMachine. */
+LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T);
+
+/** Emits an asm or object file for the given module to the filename. This
+ wraps several c++ only classes (among them a file stream). Returns any
+ error in ErrorMessage. Use LLVMDisposeMessage to dispose the message. */
+LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
+ char *Filename, LLVMCodeGenFileType codegen, char **ErrorMessage);
+
+
+
+
+#ifdef __cplusplus
+}
+
+namespace llvm {
+ class TargetMachine;
+ class Target;
+
+ inline TargetMachine *unwrap(LLVMTargetMachineRef P) {
+ return reinterpret_cast<TargetMachine*>(P);
+ }
+ inline Target *unwrap(LLVMTargetRef P) {
+ return reinterpret_cast<Target*>(P);
+ }
+ inline LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return reinterpret_cast<LLVMTargetMachineRef>(
+ const_cast<TargetMachine*>(P));
+ }
+ inline LLVMTargetRef wrap(const Target * P) {
+ return reinterpret_cast<LLVMTargetRef>(const_cast<Target*>(P));
+ }
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/IPO.h b/contrib/llvm/include/llvm-c/Transforms/IPO.h
index 710bebe..4480780 100644
--- a/contrib/llvm/include/llvm-c/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm-c/Transforms/IPO.h
@@ -21,6 +21,13 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCTransformsIPO Interprocedural transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
/** See llvm::createArgumentPromotionPass function. */
void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM);
@@ -63,6 +70,10 @@ void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM);
/** See llvm::createStripSymbolsPass function. */
void LLVMAddStripSymbolsPass(LLVMPassManagerRef PM);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
diff --git a/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h b/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h
index fa722c9..cee6e5a 100644
--- a/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h
+++ b/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h
@@ -23,6 +23,13 @@ typedef struct LLVMOpaquePassManagerBuilder *LLVMPassManagerBuilderRef;
extern "C" {
#endif
+/**
+ * @defgroup LLVMCTransformsPassManagerBuilder Pass manager builder
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
/** See llvm::PassManagerBuilder. */
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate(void);
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB);
@@ -73,6 +80,10 @@ void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
bool Internalize,
bool RunInliner);
+/**
+ * @}
+ */
+
#ifdef __cplusplus
}
diff --git a/contrib/llvm/include/llvm-c/Transforms/Scalar.h b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
index 6015ef9..a2c4d61 100644
--- a/contrib/llvm/include/llvm-c/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
@@ -25,6 +25,13 @@
extern "C" {
#endif
+/**
+ * @defgroup LLVMCTransformsScalar Scalar transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
/** See llvm::createAggressiveDCEPass function. */
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM);
@@ -116,6 +123,9 @@ void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM);
/** See llvm::createBasicAliasAnalysisPass function */
void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM);
+/**
+ * @}
+ */
#ifdef __cplusplus
}
diff --git a/contrib/llvm/include/llvm-c/Transforms/Vectorize.h b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
new file mode 100644
index 0000000..9e7c754
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
@@ -0,0 +1,48 @@
+/*===---------------------------Vectorize.h --------------------- -*- C -*-===*\
+|*===----------- Vectorization Transformation Library C Interface ---------===*|
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMVectorize.a, which *|
+|* implements various vectorization transformations of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_VECTORIZE_H
+#define LLVM_C_TRANSFORMS_VECTORIZE_H
+
+#include "llvm-c/Core.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsVectorize Vectorization transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createBBVectorizePass function. */
+void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
+
diff --git a/contrib/llvm/include/llvm-c/lto.h b/contrib/llvm/include/llvm-c/lto.h
index 7ea7ad0..5d9cecb 100644
--- a/contrib/llvm/include/llvm-c/lto.h
+++ b/contrib/llvm/include/llvm-c/lto.h
@@ -20,24 +20,31 @@
#include <stddef.h>
#include <unistd.h>
+/**
+ * @defgroup LLVMCLTO LTO
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
#define LTO_API_VERSION 4
typedef enum {
LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */
- LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
- LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
- LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
- LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
- LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
- LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
- LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
- LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
- LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
+ LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
+ LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
+ LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
+ LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
+ LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
+ LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
+ LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
+ LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
+ LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500,
- LTO_SYMBOL_SCOPE_MASK = 0x00003800,
- LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
- LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
- LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
+ LTO_SYMBOL_SCOPE_MASK = 0x00003800,
+ LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
+ LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
+ LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800,
LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN = 0x00002800
} lto_symbol_attributes;
@@ -88,7 +95,7 @@ lto_module_is_object_file(const char* path);
* Checks if a file is a loadable object compiled for requested target.
*/
extern bool
-lto_module_is_object_file_for_target(const char* path,
+lto_module_is_object_file_for_target(const char* path,
const char* target_triple_prefix);
@@ -103,7 +110,7 @@ lto_module_is_object_file_in_memory(const void* mem, size_t length);
* Checks if a buffer is a loadable object compiled for requested target.
*/
extern bool
-lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
+lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
const char* target_triple_prefix);
@@ -244,6 +251,12 @@ lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
int nargs);
/**
+ * Enables the internalize pass during LTO optimizations.
+ */
+extern void
+lto_codegen_set_whole_program_optimization(lto_code_gen_t cg);
+
+/**
* Adds to a list of all global symbols that must exist in the final
* generated code. If a function is not listed, it might be
* inlined into every usage and optimized away.
@@ -251,7 +264,6 @@ lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
extern void
lto_codegen_add_must_preserve_symbol(lto_code_gen_t cg, const char* symbol);
-
/**
* Writes a new object file at the specified path that contains the
* merged contents of all modules added so far.
@@ -260,11 +272,10 @@ lto_codegen_add_must_preserve_symbol(lto_code_gen_t cg, const char* symbol);
extern bool
lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path);
-
/**
* Generates code for all added modules into one native object file.
* On success returns a pointer to a generated mach-o/ELF buffer and
- * length set to the buffer size. The buffer is owned by the
+ * length set to the buffer size. The buffer is owned by the
* lto_code_gen_t and will be freed when lto_codegen_dispose()
* is called, or lto_codegen_compile() is called again.
* On failure, returns NULL (check lto_get_error_message() for details).
@@ -285,9 +296,13 @@ lto_codegen_compile_to_file(lto_code_gen_t cg, const char** name);
*/
extern void
lto_codegen_debug_options(lto_code_gen_t cg, const char *);
+
#ifdef __cplusplus
}
#endif
+/**
+ * @}
+ */
#endif
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index d2566a4..2b466f9 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -320,6 +320,7 @@ namespace llvm {
const fltSemantics &getSemantics() const { return *semantics; }
bool isZero() const { return category == fcZero; }
bool isNonZero() const { return category != fcZero; }
+ bool isNormal() const { return category == fcNormal; }
bool isNaN() const { return category == fcNaN; }
bool isInfinity() const { return category == fcInfinity; }
bool isNegative() const { return sign; }
@@ -328,8 +329,16 @@ namespace llvm {
APFloat& operator=(const APFloat &);
- /* Return an arbitrary integer value usable for hashing. */
- uint32_t getHashValue() const;
+ /// \brief Overload to compute a hash code for an APFloat value.
+ ///
+ /// Note that the use of hash codes for floating point values is in general
+ /// frought with peril. Equality is hard to define for these values. For
+ /// example, should negative and positive zero hash to different codes? Are
+ /// they equal or not? This hash value implementation specifically
+ /// emphasizes producing different codes for different inputs in order to
+ /// be used in canonicalization and memoization. As such, equality is
+ /// bitwiseIsEqual, and 0 != -0.
+ friend hash_code hash_value(const APFloat &Arg);
/// Converts this value into a decimal string.
///
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index 707e0db..4101989 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -23,11 +23,12 @@
#include <string>
namespace llvm {
- class Serializer;
class Deserializer;
class FoldingSetNodeID;
- class raw_ostream;
+ class Serializer;
class StringRef;
+ class hash_code;
+ class raw_ostream;
template<typename T>
class SmallVectorImpl;
@@ -497,15 +498,13 @@ public:
if (loBitsSet == APINT_BITS_PER_WORD)
return APInt(numBits, -1ULL);
// For small values, return quickly.
- if (numBits < APINT_BITS_PER_WORD)
- return APInt(numBits, (1ULL << loBitsSet) - 1);
+ if (loBitsSet <= APINT_BITS_PER_WORD)
+ return APInt(numBits, -1ULL >> (APINT_BITS_PER_WORD - loBitsSet));
return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
- /// The hash value is computed as the sum of the words and the bit width.
- /// @returns A hash value computed from the sum of the APInt words.
- /// @brief Get a hash value based on this APInt
- uint64_t getHashValue() const;
+ /// \brief Overload to compute a hash_code for an APInt value.
+ friend hash_code hash_value(const APInt &Arg);
/// This function returns a pointer to the internal storage of the APInt.
/// This is useful for writing out the APInt in binary form without any
@@ -562,7 +561,15 @@ public:
/// Performs logical negation operation on this APInt.
/// @returns true if *this is zero, false otherwise.
/// @brief Logical negation operator.
- bool operator!() const;
+ bool operator!() const {
+ if (isSingleWord())
+ return !VAL;
+
+ for (unsigned i = 0; i != getNumWords(); ++i)
+ if (pVal[i])
+ return false;
+ return true;
+ }
/// @}
/// @name Assignment Operators
@@ -835,7 +842,11 @@ public:
/// @returns the bit value at bitPosition
/// @brief Array-indexing support.
- bool operator[](unsigned bitPosition) const;
+ bool operator[](unsigned bitPosition) const {
+ assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
+ return (maskBit(bitPosition) &
+ (isSingleWord() ? VAL : pVal[whichWord(bitPosition)])) != 0;
+ }
/// @}
/// @name Comparison Operators
@@ -1056,6 +1067,16 @@ public:
/// @brief Zero extend or truncate to width
APInt zextOrTrunc(unsigned width) const;
+ /// Make this APInt have the bit width given by \p width. The value is sign
+ /// extended, or left alone to make it that width.
+ /// @brief Sign extend or truncate to width
+ APInt sextOrSelf(unsigned width) const;
+
+ /// Make this APInt have the bit width given by \p width. The value is zero
+ /// extended, or left alone to make it that width.
+ /// @brief Zero extend or truncate to width
+ APInt zextOrSelf(unsigned width) const;
+
/// @}
/// @name Bit Manipulation Operators
/// @{
diff --git a/contrib/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm/include/llvm/ADT/ArrayRef.h
index 33a8c65..f4c8e55 100644
--- a/contrib/llvm/include/llvm/ADT/ArrayRef.h
+++ b/contrib/llvm/include/llvm/ADT/ArrayRef.h
@@ -14,8 +14,7 @@
#include <vector>
namespace llvm {
- class APInt;
-
+
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
@@ -33,33 +32,33 @@ namespace llvm {
typedef const T *iterator;
typedef const T *const_iterator;
typedef size_t size_type;
-
+
private:
/// The start of the array, in an external buffer.
const T *Data;
-
+
/// The number of elements.
size_type Length;
-
+
public:
/// @name Constructors
/// @{
-
+
/// Construct an empty ArrayRef.
/*implicit*/ ArrayRef() : Data(0), Length(0) {}
-
+
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
: Data(&OneElt), Length(1) {}
-
+
/// Construct an ArrayRef from a pointer and length.
/*implicit*/ ArrayRef(const T *data, size_t length)
: Data(data), Length(length) {}
-
+
/// Construct an ArrayRef from a range.
ArrayRef(const T *begin, const T *end)
: Data(begin), Length(end - begin) {}
-
+
/// Construct an ArrayRef from a SmallVector.
/*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
@@ -67,39 +66,39 @@ namespace llvm {
/// Construct an ArrayRef from a std::vector.
/*implicit*/ ArrayRef(const std::vector<T> &Vec)
: Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {}
-
+
/// Construct an ArrayRef from a C array.
template <size_t N>
/*implicit*/ ArrayRef(const T (&Arr)[N])
: Data(Arr), Length(N) {}
-
+
/// @}
/// @name Simple Operations
/// @{
iterator begin() const { return Data; }
iterator end() const { return Data + Length; }
-
+
/// empty - Check if the array is empty.
bool empty() const { return Length == 0; }
-
+
const T *data() const { return Data; }
-
+
/// size - Get the array size.
size_t size() const { return Length; }
-
+
/// front - Get the first element.
const T &front() const {
assert(!empty());
return Data[0];
}
-
+
/// back - Get the last element.
const T &back() const {
assert(!empty());
return Data[Length-1];
}
-
+
/// equals - Check for element-wise equality.
bool equals(ArrayRef RHS) const {
if (Length != RHS.Length)
@@ -111,18 +110,18 @@ namespace llvm {
}
/// slice(n) - Chop off the first N elements of the array.
- ArrayRef<T> slice(unsigned N) {
+ ArrayRef<T> slice(unsigned N) const {
assert(N <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, size()-N);
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
- ArrayRef<T> slice(unsigned N, unsigned M) {
+ ArrayRef<T> slice(unsigned N, unsigned M) const {
assert(N+M <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, M);
}
-
+
/// @}
/// @name Operator Overloads
/// @{
@@ -130,22 +129,104 @@ namespace llvm {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
-
+
/// @}
/// @name Expensive Operations
/// @{
std::vector<T> vec() const {
return std::vector<T>(Data, Data+Length);
}
-
+
/// @}
/// @name Conversion operators
/// @{
operator std::vector<T>() const {
return std::vector<T>(Data, Data+Length);
}
+
+ /// @}
+ };
+
+ /// MutableArrayRef - Represent a mutable reference to an array (0 or more
+ /// elements consecutively in memory), i.e. a start pointer and a length. It
+ /// allows various APIs to take and modify consecutive elements easily and
+ /// conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the MutableArrayRef. For this reason, it is not in
+ /// general safe to store a MutableArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class MutableArrayRef : public ArrayRef<T> {
+ public:
+ typedef T *iterator;
+
+ /// Construct an empty ArrayRef.
+ /*implicit*/ MutableArrayRef() : ArrayRef<T>() {}
+
+ /// Construct an MutableArrayRef from a single element.
+ /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
+
+ /// Construct an MutableArrayRef from a pointer and length.
+ /*implicit*/ MutableArrayRef(T *data, size_t length)
+ : ArrayRef<T>(data, length) {}
+
+ /// Construct an MutableArrayRef from a range.
+ MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
+
+ /// Construct an MutableArrayRef from a SmallVector.
+ /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct a MutableArrayRef from a std::vector.
+ /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct an MutableArrayRef from a C array.
+ template <size_t N>
+ /*implicit*/ MutableArrayRef(T (&Arr)[N])
+ : ArrayRef<T>(Arr) {}
+
+ T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
+
+ iterator begin() const { return data(); }
+ iterator end() const { return data() + this->size(); }
+
+ /// front - Get the first element.
+ T &front() const {
+ assert(!this->empty());
+ return data()[0];
+ }
+
+ /// back - Get the last element.
+ T &back() const {
+ assert(!this->empty());
+ return data()[this->size()-1];
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ MutableArrayRef<T> slice(unsigned N) const {
+ assert(N <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(data()+N, this->size()-N);
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ MutableArrayRef<T> slice(unsigned N, unsigned M) const {
+ assert(N+M <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(data()+N, M);
+ }
/// @}
+ /// @name Operator Overloads
+ /// @{
+ T &operator[](size_t Index) const {
+ assert(Index < this->size() && "Invalid index!");
+ return data()[Index];
+ }
};
/// @name ArrayRef Convenience constructors
@@ -215,5 +296,5 @@ namespace llvm {
static const bool value = true;
};
}
-
+
#endif
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
index ac1cf0c..7e0b5ba 100644
--- a/contrib/llvm/include/llvm/ADT/BitVector.h
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -14,12 +14,12 @@
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdlib>
-#include <cstring>
namespace llvm {
@@ -116,7 +116,7 @@ public:
else if (sizeof(BitWord) == 8)
NumBits += CountPopulation_64(Bits[i]);
else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
return NumBits;
}
@@ -146,10 +146,9 @@ public:
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return -1;
}
@@ -170,10 +169,9 @@ public:
if (Copy != 0) {
if (sizeof(BitWord) == 4)
return WordPos * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Copy);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
// Check subsequent words.
@@ -181,10 +179,9 @@ public:
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32((uint32_t)Bits[i]);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return -1;
}
@@ -318,6 +315,16 @@ public:
return *this;
}
+ // reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+ BitVector &reset(const BitVector &RHS) {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ Bits[i] &= ~RHS.Bits[i];
+ return *this;
+ }
+
BitVector &operator|=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
@@ -365,6 +372,42 @@ public:
std::swap(Capacity, RHS.Capacity);
}
+ //===--------------------------------------------------------------------===//
+ // Portable bit mask operations.
+ //===--------------------------------------------------------------------===//
+ //
+ // These methods all operate on arrays of uint32_t, each holding 32 bits. The
+ // fixed word size makes it easier to work with literal bit vector constants
+ // in portable code.
+ //
+ // The LSB in each word is the lowest numbered bit. The size of a portable
+ // bit mask is always a whole multiple of 32 bits. If no bit mask size is
+ // given, the bit mask is assumed to cover the entire BitVector.
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, false>(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, false>(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, true>(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, true>(Mask, MaskWords);
+ }
+
private:
unsigned NumBitWords(unsigned S) const {
return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
@@ -400,6 +443,33 @@ private:
void init_words(BitWord *B, unsigned NumWords, bool t) {
memset(B, 0 - (int)t, NumWords*sizeof(BitWord));
}
+
+ template<bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ assert(BITWORD_SIZE % 32 == 0 && "Unsupported BitWord size.");
+ MaskWords = std::min(MaskWords, (size() + 31) / 32);
+ const unsigned Scale = BITWORD_SIZE / 32;
+ unsigned i;
+ for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
+ BitWord BW = Bits[i];
+ // This inner loop should unroll completely when BITWORD_SIZE > 32.
+ for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) BW |= BitWord(M) << b;
+ else BW &= ~(BitWord(M) << b);
+ }
+ Bits[i] = BW;
+ }
+ for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) Bits[i] |= BitWord(M) << b;
+ else Bits[i] &= ~(BitWord(M) << b);
+ }
+ if (AddBits)
+ clear_unused_bits();
+ }
};
inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) {
diff --git a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
index 99ed15c..e502ac4 100644
--- a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
+++ b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -36,6 +36,7 @@ namespace llvm {
/// for more information on the properties which the predicate function itself
/// should satisfy.
class DAGDeltaAlgorithm {
+ virtual void anchor();
public:
typedef unsigned change_ty;
typedef std::pair<change_ty, change_ty> edge_ty;
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index e70cacf..8d4a19d 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -30,12 +30,11 @@ namespace llvm {
template<typename KeyT, typename ValueT,
typename KeyInfoT = DenseMapInfo<KeyT>,
- typename ValueInfoT = DenseMapInfo<ValueT>, bool IsConst = false>
+ bool IsConst = false>
class DenseMapIterator;
template<typename KeyT, typename ValueT,
- typename KeyInfoT = DenseMapInfo<KeyT>,
- typename ValueInfoT = DenseMapInfo<ValueT> >
+ typename KeyInfoT = DenseMapInfo<KeyT> >
class DenseMap {
typedef std::pair<KeyT, ValueT> BucketT;
unsigned NumBuckets;
@@ -80,19 +79,19 @@ public:
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, true> const_iterator;
+ KeyInfoT, true> const_iterator;
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
return empty() ? end() : iterator(Buckets, Buckets+NumBuckets);
}
inline iterator end() {
- return iterator(Buckets+NumBuckets, Buckets+NumBuckets);
+ return iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
}
inline const_iterator begin() const {
return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets);
}
inline const_iterator end() const {
- return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets);
+ return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
}
bool empty() const { return NumEntries == 0; }
@@ -137,13 +136,33 @@ public:
iterator find(const KeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets);
+ return iterator(TheBucket, Buckets+NumBuckets, true);
return end();
}
const_iterator find(const KeyT &Val) const {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets);
+ return const_iterator(TheBucket, Buckets+NumBuckets, true);
+ return end();
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template<class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return iterator(TheBucket, Buckets+NumBuckets, true);
+ return end();
+ }
+ template<class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return const_iterator(TheBucket, Buckets+NumBuckets, true);
return end();
}
@@ -162,13 +181,12 @@ public:
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
BucketT *TheBucket;
if (LookupBucketFor(KV.first, TheBucket))
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets),
+ return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets),
- true);
+ return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true), true);
}
/// insert - Range insertion of pairs.
@@ -237,7 +255,7 @@ public:
private:
void CopyFrom(const DenseMap& other) {
if (NumBuckets != 0 &&
- (!isPodLike<KeyInfoT>::value || !isPodLike<ValueInfoT>::value)) {
+ (!isPodLike<KeyT>::value || !isPodLike<ValueT>::value)) {
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
@@ -266,7 +284,7 @@ private:
Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
- if (isPodLike<KeyInfoT>::value && isPodLike<ValueInfoT>::value)
+ if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
memcpy(Buckets, other.Buckets, NumBuckets * sizeof(BucketT));
else
for (size_t i = 0; i < NumBuckets; ++i) {
@@ -310,6 +328,10 @@ private:
static unsigned getHashValue(const KeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
static const KeyT getEmptyKey() {
return KeyInfoT::getEmptyKey();
}
@@ -321,7 +343,8 @@ private:
/// FoundBucket. If the bucket contains the key and a value, this returns
/// true, otherwise it returns a bucket with an empty marker or tombstone and
/// returns false.
- bool LookupBucketFor(const KeyT &Val, BucketT *&FoundBucket) const {
+ template<typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) const {
unsigned BucketNo = getHashValue(Val);
unsigned ProbeAmt = 1;
BucketT *BucketsPtr = Buckets;
@@ -342,7 +365,7 @@ private:
while (1) {
BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1));
// Found Val's bucket? If so, return it.
- if (KeyInfoT::isEqual(ThisBucket->first, Val)) {
+ if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
FoundBucket = ThisBucket;
return true;
}
@@ -478,12 +501,12 @@ public:
};
template<typename KeyT, typename ValueT,
- typename KeyInfoT, typename ValueInfoT, bool IsConst>
+ typename KeyInfoT, bool IsConst>
class DenseMapIterator {
typedef std::pair<KeyT, ValueT> Bucket;
typedef DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, true> ConstIterator;
- friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, ValueInfoT, true>;
+ KeyInfoT, true> ConstIterator;
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
public:
typedef ptrdiff_t difference_type;
typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
@@ -495,15 +518,16 @@ private:
public:
DenseMapIterator() : Ptr(0), End(0) {}
- DenseMapIterator(pointer Pos, pointer E) : Ptr(Pos), End(E) {
- AdvancePastEmptyBuckets();
+ DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
+ : Ptr(Pos), End(E) {
+ if (!NoAdvance) AdvancePastEmptyBuckets();
}
// If IsConst is true this is a converting constructor from iterator to
// const_iterator and the default copy constructor is used.
// Otherwise this is a copy constructor for iterator.
DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
- KeyInfoT, ValueInfoT, false>& I)
+ KeyInfoT, false>& I)
: Ptr(I.Ptr), End(I.End) {}
reference operator*() const {
@@ -541,9 +565,9 @@ private:
}
};
-template<typename KeyT, typename ValueT, typename KeyInfoT, typename ValueInfoT>
+template<typename KeyT, typename ValueT, typename KeyInfoT>
static inline size_t
-capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT, ValueInfoT> &X) {
+capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
return X.getMemorySize();
}
diff --git a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
index df4084e..1559a35 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
@@ -59,7 +59,7 @@ template<> struct DenseMapInfo<char> {
// Provide DenseMapInfo for unsigned ints.
template<> struct DenseMapInfo<unsigned> {
- static inline unsigned getEmptyKey() { return ~0; }
+ static inline unsigned getEmptyKey() { return ~0U; }
static inline unsigned getTombstoneKey() { return ~0U - 1; }
static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
index d2e0b8f..7d7c777 100644
--- a/contrib/llvm/include/llvm/ADT/FoldingSet.h
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -193,12 +193,11 @@ protected:
virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
/// NodeEquals - Instantiations of the FoldingSet template implement
/// this function to compare the given node with the given ID.
- virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID,
+ virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const=0;
- /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// ComputeNodeHash - Instantiations of the FoldingSet template implement
/// this function to compute a hash value for the given node.
- virtual unsigned ComputeNodeHash(Node *N,
- FoldingSetNodeID &TempID) const = 0;
+ virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
};
//===----------------------------------------------------------------------===//
@@ -220,7 +219,7 @@ template<typename T> struct DefaultFoldingSetTrait {
// to compute a temporary ID if necessary. The default implementation
// just calls Profile and does a regular comparison. Implementations
// can override this to provide more efficient implementations.
- static inline bool Equals(T &X, const FoldingSetNodeID &ID,
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID);
// ComputeHash - Compute a hash value for X, using TempID to
@@ -249,7 +248,7 @@ struct DefaultContextualFoldingSetTrait {
static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
X.Profile(ID, Context);
}
- static inline bool Equals(T &X, const FoldingSetNodeID &ID,
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID, Ctx Context);
static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
Ctx Context);
@@ -344,7 +343,7 @@ template<class T> class FoldingSetBucketIterator;
template<typename T>
inline bool
DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
- FoldingSetNodeID &TempID) {
+ unsigned IDHash, FoldingSetNodeID &TempID) {
FoldingSetTrait<T>::Profile(X, TempID);
return TempID == ID;
}
@@ -358,6 +357,7 @@ template<typename T, typename Ctx>
inline bool
DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
const FoldingSetNodeID &ID,
+ unsigned IDHash,
FoldingSetNodeID &TempID,
Ctx Context) {
ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
@@ -387,15 +387,14 @@ private:
}
/// NodeEquals - Instantiations may optionally provide a way to compare a
/// node with a specified ID.
- virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID,
+ virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const {
T *TN = static_cast<T *>(N);
- return FoldingSetTrait<T>::Equals(*TN, ID, TempID);
+ return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
}
- /// NodeEquals - Instantiations may optionally provide a way to compute a
+ /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
/// hash value directly from a node.
- virtual unsigned ComputeNodeHash(Node *N,
- FoldingSetNodeID &TempID) const {
+ virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const {
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
}
@@ -465,10 +464,11 @@ private:
ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
}
virtual bool NodeEquals(FoldingSetImpl::Node *N,
- const FoldingSetNodeID &ID,
+ const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const {
T *TN = static_cast<T *>(N);
- return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, TempID, Context);
+ return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
+ Context);
}
virtual unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
FoldingSetNodeID &TempID) const {
diff --git a/contrib/llvm/include/llvm/ADT/GraphTraits.h b/contrib/llvm/include/llvm/ADT/GraphTraits.h
index 0fd1f50..823caef 100644
--- a/contrib/llvm/include/llvm/ADT/GraphTraits.h
+++ b/contrib/llvm/include/llvm/ADT/GraphTraits.h
@@ -43,9 +43,12 @@ struct GraphTraits {
// typedef ...iterator nodes_iterator;
// static nodes_iterator nodes_begin(GraphType *G)
// static nodes_iterator nodes_end (GraphType *G)
- //
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ // static unsigned size (GraphType *G)
+ // Return total number of nodes in the graph
+ //
+
// If anyone tries to use this class without having an appropriate
// specialization, make an error. If you get this error, it's because you
diff --git a/contrib/llvm/include/llvm/ADT/Hashing.h b/contrib/llvm/include/llvm/ADT/Hashing.h
new file mode 100644
index 0000000..53032ee
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Hashing.h
@@ -0,0 +1,770 @@
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+// -- 'hash_code' class is an opaque type representing the hash code for some
+// data. It is the intended product of hashing, and can be used to implement
+// hash tables, checksumming, and other common uses of hashes. It is not an
+// integer type (although it can be converted to one) because it is risky
+// to assume much about the internals of a hash_code. In particular, each
+// execution of the program has a high probability of producing a different
+// hash_code for a given input. Thus their values are not stable to save or
+// persist, and should only be used during the execution for the
+// construction of hashing datastructures.
+//
+// -- 'hash_value' is a function designed to be overloaded for each
+// user-defined type which wishes to be used within a hashing context. It
+// should be overloaded within the user-defined type's namespace and found
+// via ADL. Overloads for primitive types are provided by this library.
+//
+// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+// programmers in easily and intuitively combining a set of data into
+// a single hash_code for their object. They should only logically be used
+// within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+// Allow detecting C++11 feature availability when building with Clang without
+// breaking other compilers.
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+namespace llvm {
+
+/// \brief An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+/// using llvm::hash_value;
+/// llvm::hash_code code = hash_value(x);
+/// \endcode
+///
+/// Also note that there are two numerical values which are reserved, and the
+/// implementation ensures will never be produced for real hash_codes. These
+/// can be used as sentinels within hashing data structures.
+class hash_code {
+ size_t value;
+
+public:
+ /// \brief Default construct a hash_code.
+ /// Note that this leaves the value uninitialized.
+ hash_code() {}
+
+ /// \brief Form a hash code directly from a numerical value.
+ hash_code(size_t value) : value(value) {}
+
+ /// \brief Convert the hash code to its numerical value for use.
+ /*explicit*/ operator size_t() const { return value; }
+
+ friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value == rhs.value;
+ }
+ friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value != rhs.value;
+ }
+
+ /// \brief Allow a hash_code to be directly run through hash_value.
+ friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// \brief Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+typename enable_if<is_integral_or_enum<T>, hash_code>::type hash_value(T value);
+
+/// \brief Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// \brief Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// \brief Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// \brief Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(size_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+ uint64_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::isBigEndianHost())
+ return sys::SwapByteOrder(result);
+ return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+ uint32_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::isBigEndianHost())
+ return sys::SwapByteOrder(result);
+ return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// \brief Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+ return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+ // Murmur-inspired hashing.
+ const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+ uint64_t a = (low ^ high) * kMul;
+ a ^= (a >> 47);
+ uint64_t b = (high ^ a) * kMul;
+ b ^= (b >> 47);
+ b *= kMul;
+ return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+ uint8_t a = s[0];
+ uint8_t b = s[len >> 1];
+ uint8_t c = s[len - 1];
+ uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+ uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+ return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch32(s);
+ return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s);
+ uint64_t b = fetch64(s + len - 8);
+ return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s) * k1;
+ uint64_t b = fetch64(s + 8);
+ uint64_t c = fetch64(s + len - 8) * k2;
+ uint64_t d = fetch64(s + len - 16) * k0;
+ return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+ a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t z = fetch64(s + 24);
+ uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+ uint64_t b = rotate(a + z, 52);
+ uint64_t c = rotate(a, 37);
+ a += fetch64(s + 8);
+ c += rotate(a, 7);
+ a += fetch64(s + 16);
+ uint64_t vf = a + z;
+ uint64_t vs = b + rotate(a, 31) + c;
+ a = fetch64(s + 16) + fetch64(s + len - 32);
+ z = fetch64(s + len - 8);
+ b = rotate(a + z, 52);
+ c = rotate(a, 37);
+ a += fetch64(s + len - 24);
+ c += rotate(a, 7);
+ a += fetch64(s + len - 16);
+ uint64_t wf = a + z;
+ uint64_t ws = b + rotate(a, 31) + c;
+ uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+ return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+ if (length >= 4 && length <= 8)
+ return hash_4to8_bytes(s, length, seed);
+ if (length > 8 && length <= 16)
+ return hash_9to16_bytes(s, length, seed);
+ if (length > 16 && length <= 32)
+ return hash_17to32_bytes(s, length, seed);
+ if (length > 32)
+ return hash_33to64_bytes(s, length, seed);
+ if (length != 0)
+ return hash_1to3_bytes(s, length, seed);
+
+ return k2 ^ seed;
+}
+
+/// \brief The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+ uint64_t h0, h1, h2, h3, h4, h5, h6;
+ uint64_t seed;
+
+ /// \brief Create a new hash_state structure and initialize it based on the
+ /// seed and the first 64-byte chunk.
+ /// This effectively performs the initial mix.
+ static hash_state create(const char *s, uint64_t seed) {
+ hash_state state = {
+ 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+ seed * k1, shift_mix(seed), 0, seed };
+ state.h6 = hash_16_bytes(state.h4, state.h5);
+ state.mix(s);
+ return state;
+ }
+
+ /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+ /// and 'b', including whatever is already in 'a' and 'b'.
+ static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+ a += fetch64(s);
+ uint64_t c = fetch64(s + 24);
+ b = rotate(b + a + c, 21);
+ uint64_t d = a;
+ a += fetch64(s + 8) + fetch64(s + 16);
+ b += rotate(a, 44) + d;
+ a += c;
+ }
+
+ /// \brief Mix in a 64-byte buffer of data.
+ /// We mix all 64 bytes even when the chunk length is smaller, but we
+ /// record the actual length.
+ void mix(const char *s) {
+ h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+ h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+ h0 ^= h6;
+ h1 += h3 + fetch64(s + 40);
+ h2 = rotate(h2 + h5, 33) * k1;
+ h3 = h4 * k1;
+ h4 = h0 + h5;
+ mix_32_bytes(s, h3, h4);
+ h5 = h2 + h6;
+ h6 = h1 + fetch64(s + 16);
+ mix_32_bytes(s + 32, h5, h6);
+ std::swap(h2, h0);
+ }
+
+ /// \brief Compute the final 64-bit hash code value based on the current
+ /// state and the length of bytes hashed.
+ uint64_t finalize(size_t length) {
+ return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+ hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+ }
+};
+
+
+/// \brief A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern size_t fixed_seed_override;
+
+inline size_t get_execution_seed() {
+ // FIXME: This needs to be a per-execution seed. This is just a placeholder
+ // implementation. Switching to a per-execution seed is likely to flush out
+ // instability bugs and so will happen as its own commit.
+ //
+ // However, if there is a fixed seed override set the first time this is
+ // called, return that instead of the per-execution seed.
+ const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+ static size_t seed = fixed_seed_override ? fixed_seed_override
+ : (size_t)seed_prime;
+ return seed;
+}
+
+
+/// \brief Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+ : integral_constant<bool, ((is_integral_or_enum<T>::value ||
+ is_pointer<T>::value) &&
+ 64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+ : integral_constant<bool, (is_hashable_data<T>::value &&
+ is_hashable_data<U>::value &&
+ (sizeof(T) + sizeof(U)) ==
+ sizeof(std::pair<T, U>))> {};
+
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+typename enable_if<is_hashable_data<T>, T>::type
+get_hashable_data(const T &value) {
+ return value;
+}
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+typename enable_if_c<!is_hashable_data<T>::value, size_t>::type
+get_hashable_data(const T &value) {
+ using ::llvm::hash_value;
+ return hash_value(value);
+}
+
+/// \brief Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+ size_t offset = 0) {
+ size_t store_size = sizeof(value) - offset;
+ if (buffer_ptr + store_size > buffer_end)
+ return false;
+ const char *value_data = reinterpret_cast<const char *>(&value);
+ memcpy(buffer_ptr, value_data + offset, store_size);
+ buffer_ptr += store_size;
+ return true;
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+ typedef typename std::iterator_traits<InputIteratorT>::value_type ValueT;
+ const size_t seed = get_execution_seed();
+ char buffer[64], *buffer_ptr = buffer;
+ char *const buffer_end = buffer_ptr + array_lengthof(buffer);
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+ if (first == last)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+ assert(buffer_ptr == buffer_end);
+
+ hash_state state = state.create(buffer, seed);
+ size_t length = 64;
+ while (first != last) {
+ // Fill up the buffer. We don't clear it, which re-mixes the last round
+ // when only a partial 64-byte chunk is left.
+ buffer_ptr = buffer;
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+
+ // Rotate the buffer if we did a partial fill in order to simulate doing
+ // a mix of the last 64-bytes. That is how the algorithm works when we
+ // have a contiguous byte sequence, and we want to emulate that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+ };
+
+ return state.finalize(length);
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+typename enable_if<is_hashable_data<ValueT>, hash_code>::type
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+ const size_t seed = get_execution_seed();
+ const char *s_begin = reinterpret_cast<const char *>(first);
+ const char *s_end = reinterpret_cast<const char *>(last);
+ const size_t length = std::distance(s_begin, s_end);
+ if (length <= 64)
+ return hash_short(s_begin, length, seed);
+
+ const char *s_aligned_end = s_begin + (length & ~63);
+ hash_state state = state.create(s_begin, seed);
+ s_begin += 64;
+ while (s_begin != s_aligned_end) {
+ state.mix(s_begin);
+ s_begin += 64;
+ }
+ if (length & 63)
+ state.mix(s_end - 64);
+
+ return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// \brief Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+ return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+ char buffer[64];
+ hash_state state;
+ const size_t seed;
+
+public:
+ /// \brief Construct a recursive hash combining helper.
+ ///
+ /// This sets up the state for a recursive hash combine, including getting
+ /// the seed and buffer setup.
+ hash_combine_recursive_helper()
+ : seed(get_execution_seed()) {}
+
+ /// \brief Combine one chunk of data into the current in-flight hash.
+ ///
+ /// This merges one chunk of data into the hash. First it tries to buffer
+ /// the data. If the buffer is full, it hashes the buffer into its
+ /// hash_state, empties it, and then merges the new chunk in. This also
+ /// handles cases where the data straddles the end of the buffer.
+ template <typename T>
+ char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+ if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+ // Check for skew which prevents the buffer from being packed, and do
+ // a partial store into the buffer to fill it. This is only a concern
+ // with the variadic combine because that formation can have varying
+ // argument types.
+ size_t partial_store_size = buffer_end - buffer_ptr;
+ memcpy(buffer_ptr, &data, partial_store_size);
+
+ // If the store fails, our buffer is full and ready to hash. We have to
+ // either initialize the hash state (on the first full buffer) or mix
+ // this buffer into the existing hash state. Length tracks the *hashed*
+ // length, not the buffered length.
+ if (length == 0) {
+ state = state.create(buffer, seed);
+ length = 64;
+ } else {
+ // Mix this chunk into the current state and bump length up by 64.
+ state.mix(buffer);
+ length += 64;
+ }
+ // Reset the buffer_ptr to the head of the buffer for the next chunk of
+ // data.
+ buffer_ptr = buffer;
+
+ // Try again to store into the buffer -- this cannot fail as we only
+ // store types smaller than the buffer.
+ if (!store_and_advance(buffer_ptr, buffer_end, data,
+ partial_store_size))
+ abort();
+ }
+ return buffer_ptr;
+ }
+
+#if defined(__has_feature) && __has_feature(__cxx_variadic_templates__)
+
+ /// \brief Recursive, variadic combining method.
+ ///
+ /// This function recurses through each argument, combining that argument
+ /// into a single hash.
+ template <typename T, typename ...Ts>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T &arg, const Ts &...args) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+ // Recurse to the next argument.
+ return combine(length, buffer_ptr, buffer_end, args...);
+ }
+
+#else
+ // Manually expanded recursive combining methods. See variadic above for
+ // documentation.
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4, const T5 &arg5, const T6 &arg6) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5, arg6);
+ }
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4, const T5 &arg5) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5);
+ }
+ template <typename T1, typename T2, typename T3, typename T4>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4);
+ }
+ template <typename T1, typename T2, typename T3>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1, const T2 &arg2, const T3 &arg3) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end, arg2, arg3);
+ }
+ template <typename T1, typename T2>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1, const T2 &arg2) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end, arg2);
+ }
+ template <typename T1>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T1 &arg1) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1));
+ return combine(length, buffer_ptr, buffer_end);
+ }
+
+#endif
+
+ /// \brief Base case for recursive, variadic combining.
+ ///
+ /// The base case when combining arguments recursively is reached when all
+ /// arguments have been handled. It flushes the remaining buffer and
+ /// constructs a hash_code.
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+ // Check whether the entire set of values fit in the buffer. If so, we'll
+ // use the optimized short hashing routine and skip state entirely.
+ if (length == 0)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+
+ // Mix the final buffer, rotating it if we did a partial fill in order to
+ // simulate doing a mix of the last 64-bytes. That is how the algorithm
+ // works when we have a contiguous byte sequence, and we want to emulate
+ // that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+
+ return state.finalize(length);
+ }
+};
+
+} // namespace detail
+} // namespace hashing
+
+
+#if __has_feature(__cxx_variadic_templates__)
+
+/// \brief Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+ // Recursively hash each argument using a helper class.
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+#else
+
+// What follows are manually exploded overloads for each argument width. See
+// the above variadic definition for documentation and specification.
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4, const T5 &arg5, const T6 &arg6) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64,
+ arg1, arg2, arg3, arg4, arg5, arg6);
+}
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4, const T5 &arg5) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64,
+ arg1, arg2, arg3, arg4, arg5);
+}
+template <typename T1, typename T2, typename T3, typename T4>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3,
+ const T4 &arg4) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64,
+ arg1, arg2, arg3, arg4);
+}
+template <typename T1, typename T2, typename T3>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2, arg3);
+}
+template <typename T1, typename T2>
+hash_code hash_combine(const T1 &arg1, const T2 &arg2) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2);
+}
+template <typename T1>
+hash_code hash_combine(const T1 &arg1) {
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, arg1);
+}
+
+#endif
+
+
+// Implementation details for implementatinos of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+ // Similar to hash_4to8_bytes but using a seed instead of length.
+ const uint64_t seed = get_execution_seed();
+ const char *s = reinterpret_cast<const char *>(&value);
+ const uint64_t a = fetch32(s);
+ return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+typename enable_if<is_integral_or_enum<T>, hash_code>::type
+hash_value(T value) {
+ return ::llvm::hashing::detail::hash_integer_value(value);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+ return ::llvm::hashing::detail::hash_integer_value(
+ reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+ return hash_combine(arg.first, arg.second);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+ return hash_combine_range(arg.begin(), arg.end());
+}
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
index d597a7c..89b1648 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <functional>
#include <vector>
@@ -346,7 +347,7 @@ public:
if (prev)
prev->next = next;
else
- factory->Cache[computeDigest()] = next;
+ factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
}
// We need to clear the mutability bit in case we are
@@ -428,6 +429,11 @@ protected:
TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
value_type_ref getValue(TreeTy* T) const { return T->value; }
+ // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+ static inline unsigned maskCacheIndex(unsigned I) {
+ return (I & ~0x02);
+ }
+
unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
unsigned hl = getHeight(L);
unsigned hr = getHeight(R);
@@ -610,7 +616,7 @@ public:
// Search the hashtable for another tree with the same digest, and
// if find a collision compare those trees by their contents.
unsigned digest = TNew->computeDigest();
- TreeTy *&entry = Cache[digest];
+ TreeTy *&entry = Cache[maskCacheIndex(digest)];
do {
if (!entry)
break;
@@ -686,7 +692,7 @@ public:
stack.back() |= VisitedRight;
break;
default:
- assert(false && "Unreachable.");
+ llvm_unreachable("Unreachable.");
}
}
@@ -722,7 +728,7 @@ public:
skipToParent();
break;
default:
- assert(false && "Unreachable.");
+ llvm_unreachable("Unreachable.");
}
return *this;
}
@@ -747,7 +753,7 @@ public:
stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
break;
default:
- assert(false && "Unreachable.");
+ llvm_unreachable("Unreachable.");
}
return *this;
}
diff --git a/contrib/llvm/include/llvm/ADT/IntervalMap.h b/contrib/llvm/include/llvm/ADT/IntervalMap.h
index 1230e8f..931b67e 100644
--- a/contrib/llvm/include/llvm/ADT/IntervalMap.h
+++ b/contrib/llvm/include/llvm/ADT/IntervalMap.h
@@ -739,7 +739,7 @@ public:
// A Path is used by iterators to represent a position in a B+-tree, and the
// path to get there from the root.
//
-// The Path class also constains the tree navigation code that doesn't have to
+// The Path class also contains the tree navigation code that doesn't have to
// be templatized.
//
//===----------------------------------------------------------------------===//
@@ -1977,7 +1977,7 @@ iterator::overflow(unsigned Level) {
CurSize[Nodes] = CurSize[NewNode];
Node[Nodes] = Node[NewNode];
CurSize[NewNode] = 0;
- Node[NewNode] = this->map->newNode<NodeT>();
+ Node[NewNode] = this->map->template newNode<NodeT>();
++Nodes;
}
diff --git a/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
index 2f6fd2b..3a1a3f4 100644
--- a/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -46,6 +46,7 @@ namespace llvm {
public:
RefCountedBase() : ref_cnt(0) {}
+ RefCountedBase(const RefCountedBase &) : ref_cnt(0) {}
void Retain() const { ++ref_cnt; }
void Release() const {
@@ -64,9 +65,12 @@ namespace llvm {
//===----------------------------------------------------------------------===//
class RefCountedBaseVPTR {
mutable unsigned ref_cnt;
+ virtual void anchor();
protected:
RefCountedBaseVPTR() : ref_cnt(0) {}
+ RefCountedBaseVPTR(const RefCountedBaseVPTR &) : ref_cnt(0) {}
+
virtual ~RefCountedBaseVPTR() {}
void Retain() const { ++ref_cnt; }
@@ -76,9 +80,15 @@ namespace llvm {
}
template <typename T>
- friend class IntrusiveRefCntPtr;
+ friend struct IntrusiveRefCntPtrInfo;
};
+
+ template <typename T> struct IntrusiveRefCntPtrInfo {
+ static void retain(T *obj) { obj->Retain(); }
+ static void release(T *obj) { obj->Release(); }
+ };
+
//===----------------------------------------------------------------------===//
/// IntrusiveRefCntPtr - A template class that implements a "smart pointer"
/// that assumes the wrapped object has a reference count associated
@@ -105,7 +115,7 @@ namespace llvm {
explicit IntrusiveRefCntPtr() : Obj(0) {}
- explicit IntrusiveRefCntPtr(T* obj) : Obj(obj) {
+ IntrusiveRefCntPtr(T* obj) : Obj(obj) {
retain();
}
@@ -153,14 +163,19 @@ namespace llvm {
other.Obj = Obj;
Obj = tmp;
}
-
+
+ void reset() {
+ release();
+ Obj = 0;
+ }
+
void resetWithoutRelease() {
Obj = 0;
}
private:
- void retain() { if (Obj) Obj->Retain(); }
- void release() { if (Obj) Obj->Release(); }
+ void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
+ void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
void replace(T* S) {
this_type(S).swap(*this);
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
index 85dbba2..ccdcd1a 100644
--- a/contrib/llvm/include/llvm/ADT/PointerIntPair.h
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -92,10 +92,14 @@ public:
}
PointerTy const *getAddrOfPointer() const {
+ return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+ }
+
+ PointerTy *getAddrOfPointer() {
assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
"Can only return the address if IntBits is cleared and "
"PtrTraits doesn't change the pointer");
- return reinterpret_cast<PointerTy const *>(&Value);
+ return reinterpret_cast<PointerTy *>(&Value);
}
void *getOpaqueValue() const { return reinterpret_cast<void*>(Value); }
diff --git a/contrib/llvm/include/llvm/ADT/PointerUnion.h b/contrib/llvm/include/llvm/ADT/PointerUnion.h
index 487096a..614b59c 100644
--- a/contrib/llvm/include/llvm/ADT/PointerUnion.h
+++ b/contrib/llvm/include/llvm/ADT/PointerUnion.h
@@ -142,16 +142,19 @@ namespace llvm {
return T();
}
- /// \brief If the union is set to the first pointer type we can get an
- /// address pointing to it.
- template <typename T>
- PT1 const *getAddrOf() const {
+ /// \brief If the union is set to the first pointer type get an address
+ /// pointing to it.
+ PT1 const *getAddrOfPtr1() const {
+ return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
+ }
+
+ /// \brief If the union is set to the first pointer type get an address
+ /// pointing to it.
+ PT1 *getAddrOfPtr1() {
assert(is<PT1>() && "Val is not the first pointer");
assert(get<PT1>() == Val.getPointer() &&
"Can't get the address because PointerLikeTypeTraits changes the ptr");
- T const *can_only_get_address_of_first_pointer_type
- = reinterpret_cast<PT1 const *>(Val.getAddrOfPointer());
- return can_only_get_address_of_first_pointer_type;
+ return (PT1 *)Val.getAddrOfPointer();
}
/// Assignment operators - Allow assigning into this union from either
@@ -263,7 +266,7 @@ namespace llvm {
::llvm::PointerUnionTypeSelector<PT1, T, IsInnerUnion,
::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3 >
>::Return Ty;
- return Ty(Val).is<T>();
+ return Ty(Val).template is<T>();
}
/// get<T>() - Return the value of the specified pointer type. If the
@@ -276,7 +279,7 @@ namespace llvm {
::llvm::PointerUnionTypeSelector<PT1, T, IsInnerUnion,
::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3 >
>::Return Ty;
- return Ty(Val).get<T>();
+ return Ty(Val).template get<T>();
}
/// dyn_cast<T>() - If the current value is of the specified pointer type,
diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h
index abe2067..965f0de 100644
--- a/contrib/llvm/include/llvm/ADT/SetVector.h
+++ b/contrib/llvm/include/llvm/ADT/SetVector.h
@@ -144,6 +144,12 @@ public:
set_.erase(back());
vector_.pop_back();
}
+
+ T pop_back_val() {
+ T Ret = back();
+ pop_back();
+ return Ret;
+ }
bool operator==(const SetVector &that) const {
return vector_ == that.vector_;
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
index b15b3ee..a3469a1 100644
--- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -175,7 +175,7 @@ public:
return CountPopulation_32(Bits);
if (sizeof(uintptr_t) * CHAR_BIT == 64)
return CountPopulation_64(Bits);
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return getPointer()->count();
}
@@ -212,7 +212,7 @@ public:
return CountTrailingZeros_32(Bits);
if (sizeof(uintptr_t) * CHAR_BIT == 64)
return CountTrailingZeros_64(Bits);
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return getPointer()->find_first();
}
@@ -230,7 +230,7 @@ public:
return CountTrailingZeros_32(Bits);
if (sizeof(uintptr_t) * CHAR_BIT == 64)
return CountTrailingZeros_64(Bits);
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return getPointer()->find_next(Prev);
}
diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
index 9992858..70693d5 100644
--- a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -137,6 +137,10 @@ private:
void operator=(const SmallPtrSetImpl &RHS); // DO NOT IMPLEMENT.
protected:
+ /// swap - Swaps the elements of two sets.
+ /// Note: This method assumes that both sets have the same small size.
+ void swap(SmallPtrSetImpl &RHS);
+
void CopyFrom(const SmallPtrSetImpl &RHS);
};
@@ -287,8 +291,20 @@ public:
return *this;
}
+ /// swap - Swaps the elements of two sets.
+ void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
+ SmallPtrSetImpl::swap(RHS);
+ }
};
}
+namespace std {
+ /// Implement std::swap in terms of SmallPtrSet swap.
+ template<class T, unsigned N>
+ inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
+ LHS.swap(RHS);
+ }
+}
+
#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallSet.h b/contrib/llvm/include/llvm/ADT/SmallSet.h
index d03f1be..cd117f5 100644
--- a/contrib/llvm/include/llvm/ADT/SmallSet.h
+++ b/contrib/llvm/include/llvm/ADT/SmallSet.h
@@ -27,13 +27,13 @@ namespace llvm {
///
/// Note that this set does not provide a way to iterate over members in the
/// set.
-template <typename T, unsigned N>
+template <typename T, unsigned N, typename C = std::less<T> >
class SmallSet {
/// Use a SmallVector to hold the elements here (even though it will never
/// reach its 'large' stage) to avoid calling the default ctors of elements
/// we will never use.
SmallVector<T, N> Vector;
- std::set<T> Set;
+ std::set<T, C> Set;
typedef typename SmallVector<T, N>::const_iterator VIterator;
typedef typename SmallVector<T, N>::iterator mutable_iterator;
public:
diff --git a/contrib/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm/include/llvm/ADT/SmallString.h
index da26416..199783b 100644
--- a/contrib/llvm/include/llvm/ADT/SmallString.h
+++ b/contrib/llvm/include/llvm/ADT/SmallString.h
@@ -24,21 +24,244 @@ namespace llvm {
template<unsigned InternalLen>
class SmallString : public SmallVector<char, InternalLen> {
public:
- // Default ctor - Initialize to empty.
+ /// Default ctor - Initialize to empty.
SmallString() {}
- // Initialize from a StringRef.
+ /// Initialize from a StringRef.
SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
- // Initialize with a range.
+ /// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
- // Copy ctor.
+ /// Copy ctor.
SmallString(const SmallString &RHS) : SmallVector<char, InternalLen>(RHS) {}
+ // Note that in order to add new overloads for append & assign, we have to
+ // duplicate the inherited versions so as not to inadvertently hide them.
+
+ /// @}
+ /// @name String Assignment
+ /// @{
+
+ /// Assign from a repeated element
+ void assign(unsigned NumElts, char Elt) {
+ this->SmallVectorImpl<char>::assign(NumElts, Elt);
+ }
+
+ /// Assign from an iterator pair
+ template<typename in_iter>
+ void assign(in_iter S, in_iter E) {
+ this->clear();
+ SmallVectorImpl<char>::append(S, E);
+ }
+
+ /// Assign from a StringRef
+ void assign(StringRef RHS) {
+ this->clear();
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// Assign from a SmallVector
+ void assign(const SmallVectorImpl<char> &RHS) {
+ this->clear();
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// @}
+ /// @name String Concatenation
+ /// @{
+
+ /// Append from an iterator pair
+ template<typename in_iter>
+ void append(in_iter S, in_iter E) {
+ SmallVectorImpl<char>::append(S, E);
+ }
+
+ /// Append from a StringRef
+ void append(StringRef RHS) {
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// Append from a SmallVector
+ void append(const SmallVectorImpl<char> &RHS) {
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// @}
+ /// @name String Comparison
+ /// @{
+
+ /// equals - Check for string equality, this is more efficient than
+ /// compare() when the relative ordering of inequal strings isn't needed.
+ bool equals(StringRef RHS) const {
+ return str().equals(RHS);
+ }
+
+ /// equals_lower - Check for string equality, ignoring case.
+ bool equals_lower(StringRef RHS) const {
+ return str().equals_lower(RHS);
+ }
+
+ /// compare - Compare two strings; the result is -1, 0, or 1 if this string
+ /// is lexicographically less than, equal to, or greater than the \arg RHS.
+ int compare(StringRef RHS) const {
+ return str().compare(RHS);
+ }
+
+ /// compare_lower - Compare two strings, ignoring case.
+ int compare_lower(StringRef RHS) const {
+ return str().compare_lower(RHS);
+ }
+
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ int compare_numeric(StringRef RHS) const {
+ return str().compare_numeric(RHS);
+ }
+
+ /// @}
+ /// @name String Predicates
+ /// @{
+
+ /// startswith - Check if this string starts with the given \arg Prefix.
+ bool startswith(StringRef Prefix) const {
+ return str().startswith(Prefix);
+ }
+
+ /// endswith - Check if this string ends with the given \arg Suffix.
+ bool endswith(StringRef Suffix) const {
+ return str().endswith(Suffix);
+ }
+
+ /// @}
+ /// @name String Searching
+ /// @{
+
+ /// find - Search for the first character \arg C in the string.
+ ///
+ /// \return - The index of the first occurrence of \arg C, or npos if not
+ /// found.
+ size_t find(char C, size_t From = 0) const {
+ return str().find(C, From);
+ }
+
+ /// find - Search for the first string \arg Str in the string.
+ ///
+ /// \return - The index of the first occurrence of \arg Str, or npos if not
+ /// found.
+ size_t find(StringRef Str, size_t From = 0) const {
+ return str().find(Str, From);
+ }
+
+ /// rfind - Search for the last character \arg C in the string.
+ ///
+ /// \return - The index of the last occurrence of \arg C, or npos if not
+ /// found.
+ size_t rfind(char C, size_t From = StringRef::npos) const {
+ return str().rfind(C, From);
+ }
+
+ /// rfind - Search for the last string \arg Str in the string.
+ ///
+ /// \return - The index of the last occurrence of \arg Str, or npos if not
+ /// found.
+ size_t rfind(StringRef Str) const {
+ return str().rfind(Str);
+ }
+
+ /// find_first_of - Find the first character in the string that is \arg C,
+ /// or npos if not found. Same as find.
+ size_t find_first_of(char C, size_t From = 0) const {
+ return str().find_first_of(C, From);
+ }
+
+ /// find_first_of - Find the first character in the string that is in \arg
+ /// Chars, or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_t find_first_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_of(Chars, From);
+ }
+
+ /// find_first_not_of - Find the first character in the string that is not
+ /// \arg C or npos if not found.
+ size_t find_first_not_of(char C, size_t From = 0) const {
+ return str().find_first_not_of(C, From);
+ }
+
+ /// find_first_not_of - Find the first character in the string that is not
+ /// in the string \arg Chars, or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_not_of(Chars, From);
+ }
+
+ /// find_last_of - Find the last character in the string that is \arg C, or
+ /// npos if not found.
+ size_t find_last_of(char C, size_t From = StringRef::npos) const {
+ return str().find_last_of(C, From);
+ }
+
+ /// find_last_of - Find the last character in the string that is in \arg C,
+ /// or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_t find_last_of(
+ StringRef Chars, size_t From = StringRef::npos) const {
+ return str().find_last_of(Chars, From);
+ }
+
+ /// @}
+ /// @name Helpful Algorithms
+ /// @{
+
+ /// count - Return the number of occurrences of \arg C in the string.
+ size_t count(char C) const {
+ return str().count(C);
+ }
+
+ /// count - Return the number of non-overlapped occurrences of \arg Str in
+ /// the string.
+ size_t count(StringRef Str) const {
+ return str().count(Str);
+ }
+
+ /// @}
+ /// @name Substring Operations
+ /// @{
+
+ /// substr - Return a reference to the substring from [Start, Start + N).
+ ///
+ /// \param Start - The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param N - The number of characters to included in the substring. If N
+ /// exceeds the number of characters remaining in the string, the string
+ /// suffix (starting with \arg Start) will be returned.
+ StringRef substr(size_t Start, size_t N = StringRef::npos) const {
+ return str().substr(Start, N);
+ }
+
+ /// slice - Return a reference to the substring from [Start, End).
+ ///
+ /// \param Start - The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param End - The index following the last character to include in the
+ /// substring. If this is npos, or less than \arg Start, or exceeds the
+ /// number of characters remaining in the string, the string suffix
+ /// (starting with \arg Start) will be returned.
+ StringRef slice(size_t Start, size_t End) const {
+ return str().slice(Start, End);
+ }
// Extra methods.
+
+ /// Explicit conversion to StringRef
StringRef str() const { return StringRef(this->begin(), this->size()); }
// TODO: Make this const, if it's safe...
@@ -48,7 +271,7 @@ public:
return this->data();
}
- // Implicit conversion to StringRef.
+ /// Implicit conversion to StringRef.
operator StringRef() const { return str(); }
// Extra operators.
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index 1c42f29..0d9d0d1 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -23,30 +23,6 @@
#include <iterator>
#include <memory>
-#ifdef _MSC_VER
-namespace std {
-#if _MSC_VER <= 1310
- // Work around flawed VC++ implementation of std::uninitialized_copy. Define
- // additional overloads so that elements with pointer types are recognized as
- // scalars and not objects, causing bizarre type conversion errors.
- template<class T1, class T2>
- inline _Scalar_ptr_iterator_tag _Ptr_cat(T1 **, T2 **) {
- _Scalar_ptr_iterator_tag _Cat;
- return _Cat;
- }
-
- template<class T1, class T2>
- inline _Scalar_ptr_iterator_tag _Ptr_cat(T1* const *, T2 **) {
- _Scalar_ptr_iterator_tag _Cat;
- return _Cat;
- }
-#else
-// FIXME: It is not clear if the problem is fixed in VS 2005. What is clear
-// is that the above hack won't work if it wasn't fixed.
-#endif
-}
-#endif
-
namespace llvm {
/// SmallVectorBase - This is all the non-templated stuff common to all
@@ -100,10 +76,10 @@ public:
template <typename T>
class SmallVectorTemplateCommon : public SmallVectorBase {
protected:
- void setEnd(T *P) { this->EndX = P; }
-public:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
+ void setEnd(T *P) { this->EndX = P; }
+public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
@@ -174,7 +150,7 @@ public:
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
-public:
+protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
@@ -194,6 +170,23 @@ public:
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0);
+
+public:
+ void push_back(const T &Elt) {
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ new (this->end()) T(Elt);
+ this->setEnd(this->end()+1);
+ return;
+ }
+ this->grow();
+ goto Retry;
+ }
+
+ void pop_back() {
+ this->setEnd(this->end()-1);
+ this->end()->~T();
+ }
};
// Define this out-of-line to dissuade the C++ compiler from inlining it.
@@ -226,7 +219,7 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
-public:
+protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
@@ -255,6 +248,21 @@ public:
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
+public:
+ void push_back(const T &Elt) {
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ *this->end() = Elt;
+ this->setEnd(this->end()+1);
+ return;
+ }
+ this->grow();
+ goto Retry;
+ }
+
+ void pop_back() {
+ this->setEnd(this->end()-1);
+ }
};
@@ -270,11 +278,13 @@ public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
+protected:
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
+public:
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
@@ -297,7 +307,7 @@ public:
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
- this->construct_range(this->end(), this->begin()+N, T());
+ std::uninitialized_fill(this->end(), this->begin()+N, T());
this->setEnd(this->begin()+N);
}
}
@@ -309,7 +319,7 @@ public:
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
- construct_range(this->end(), this->begin()+N, NV);
+ std::uninitialized_fill(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
}
}
@@ -319,25 +329,9 @@ public:
this->grow(N);
}
- void push_back(const T &Elt) {
- if (this->EndX < this->CapacityX) {
- Retry:
- new (this->end()) T(Elt);
- this->setEnd(this->end()+1);
- return;
- }
- this->grow();
- goto Retry;
- }
-
- void pop_back() {
- this->setEnd(this->end()-1);
- this->end()->~T();
- }
-
T pop_back_val() {
T Result = this->back();
- pop_back();
+ this->pop_back();
return Result;
}
@@ -376,7 +370,7 @@ public:
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
- construct_range(this->begin(), this->end(), Elt);
+ std::uninitialized_fill(this->begin(), this->end(), Elt);
}
iterator erase(iterator I) {
@@ -384,7 +378,7 @@ public:
// Shift all elts down one.
std::copy(I+1, this->end(), I);
// Drop the last elt.
- pop_back();
+ this->pop_back();
return(N);
}
@@ -400,7 +394,7 @@ public:
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
- push_back(Elt);
+ this->push_back(Elt);
return this->end()-1;
}
@@ -554,12 +548,6 @@ public:
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
-
-private:
- static void construct_range(T *S, T *E, const T &Elt) {
- for (; S != E; ++S)
- new (S) T(Elt);
- }
};
@@ -686,9 +674,7 @@ public:
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(NumTsAvailable) {
- this->reserve(Size);
- while (Size--)
- this->push_back(Value);
+ this->assign(Size, Value);
}
template<typename ItTy>
@@ -718,9 +704,7 @@ public:
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(0) {
- this->reserve(Size);
- while (Size--)
- this->push_back(Value);
+ this->assign(Size, Value);
}
template<typename ItTy>
diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
index d977136..89774c3 100644
--- a/contrib/llvm/include/llvm/ADT/SparseBitVector.h
+++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
@@ -18,11 +18,11 @@
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <climits>
-#include <cstring>
namespace llvm {
@@ -128,7 +128,7 @@ public:
else if (sizeof(BitWord) == 8)
NumBits += CountPopulation_64(Bits[i]);
else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
return NumBits;
}
@@ -138,13 +138,11 @@ public:
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
- assert(0 && "Illegal empty element");
- return 0; // Not reached
+ llvm_unreachable("Illegal empty element");
}
/// find_next - Returns the index of the next set bit starting from the
@@ -165,10 +163,9 @@ public:
if (Copy != 0) {
if (sizeof(BitWord) == 4)
return WordPos * BITWORD_SIZE + CountTrailingZeros_32(Copy);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return WordPos * BITWORD_SIZE + CountTrailingZeros_64(Copy);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
// Check subsequent words.
@@ -176,10 +173,9 @@ public:
if (Bits[i] != 0) {
if (sizeof(BitWord) == 4)
return i * BITWORD_SIZE + CountTrailingZeros_32(Bits[i]);
- else if (sizeof(BitWord) == 8)
+ if (sizeof(BitWord) == 8)
return i * BITWORD_SIZE + CountTrailingZeros_64(Bits[i]);
- else
- assert(0 && "Unsupported!");
+ llvm_unreachable("Unsupported!");
}
return -1;
}
@@ -264,15 +260,6 @@ public:
}
BecameZero = allzero;
}
-
- // Get a hash value for this element;
- uint64_t getHashValue() const {
- uint64_t HashVal = 0;
- for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
- HashVal ^= Bits[i];
- }
- return HashVal;
- }
};
template <unsigned ElementSize = 128>
@@ -813,18 +800,6 @@ public:
iterator end() const {
return iterator(this, true);
}
-
- // Get a hash value for this bitmap.
- uint64_t getHashValue() const {
- uint64_t HashVal = 0;
- for (ElementListConstIter Iter = Elements.begin();
- Iter != Elements.end();
- ++Iter) {
- HashVal ^= Iter->index();
- HashVal ^= Iter->getHashValue();
- }
- return HashVal;
- }
};
// Convenience functions to allow Or and And without dereferencing in the user
diff --git a/contrib/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm/include/llvm/ADT/SparseSet.h
new file mode 100644
index 0000000..923c6a5
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SparseSet.h
@@ -0,0 +1,268 @@
+//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseSet class derived from the version described in
+// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993.
+//
+// A sparse set holds a small number of objects identified by integer keys from
+// a moderately sized universe. The sparse set uses more memory than other
+// containers in order to provide faster operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <limits>
+
+namespace llvm {
+
+/// SparseSetFunctor - Objects in a SparseSet are identified by small integer
+/// keys. A functor object is used to compute the key of an object. The
+/// functor's operator() must return an unsigned smaller than the universe.
+///
+/// The default functor implementation forwards to a getSparseSetKey() method
+/// on the object. It is intended for sparse sets holding ad-hoc structs.
+///
+template<typename ValueT>
+struct SparseSetFunctor {
+ unsigned operator()(const ValueT &Val) {
+ return Val.getSparseSetKey();
+ }
+};
+
+/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for
+/// SparseSet<unsigned>.
+///
+template<> struct SparseSetFunctor<unsigned> {
+ unsigned operator()(unsigned Val) { return Val; }
+};
+
+/// SparseSet - Fast set implementation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures. It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector. The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table. The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations. When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector. Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller. N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @param ValueT The type of objects in the set.
+/// @param SparseT An unsigned integer type. See above.
+/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT.
+///
+template<typename ValueT,
+ typename SparseT = uint8_t,
+ typename KeyFunctorT = SparseSetFunctor<ValueT> >
+class SparseSet {
+ typedef SmallVector<ValueT, 8> DenseT;
+ DenseT Dense;
+ SparseT *Sparse;
+ unsigned Universe;
+ KeyFunctorT KeyOf;
+
+ // Disable copy construction and assignment.
+ // This data structure is not meant to be used that way.
+ SparseSet(const SparseSet&); // DO NOT IMPLEMENT.
+ SparseSet &operator=(const SparseSet&); // DO NOT IMPLEMENT.
+
+public:
+ typedef ValueT value_type;
+ typedef ValueT &reference;
+ typedef const ValueT &const_reference;
+ typedef ValueT *pointer;
+ typedef const ValueT *const_pointer;
+
+ SparseSet() : Sparse(0), Universe(0) {}
+ ~SparseSet() { free(Sparse); }
+
+ /// setUniverse - Set the universe size which determines the largest key the
+ /// set can hold. The universe must be sized before any elements can be
+ /// added.
+ ///
+ /// @param U Universe size. All object keys must be less than U.
+ ///
+ void setUniverse(unsigned U) {
+ // It's not hard to resize the universe on a non-empty set, but it doesn't
+ // seem like a likely use case, so we can add that code when we need it.
+ assert(empty() && "Can only resize universe on an empty map");
+ // Hysteresis prevents needless reallocations.
+ if (U >= Universe/4 && U <= Universe)
+ return;
+ free(Sparse);
+ // The Sparse array doesn't actually need to be initialized, so malloc
+ // would be enough here, but that will cause tools like valgrind to
+ // complain about branching on uninitialized data.
+ Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
+ Universe = U;
+ }
+
+ // Import trivial vector stuff from DenseT.
+ typedef typename DenseT::iterator iterator;
+ typedef typename DenseT::const_iterator const_iterator;
+
+ const_iterator begin() const { return Dense.begin(); }
+ const_iterator end() const { return Dense.end(); }
+ iterator begin() { return Dense.begin(); }
+ iterator end() { return Dense.end(); }
+
+ /// empty - Returns true if the set is empty.
+ ///
+ /// This is not the same as BitVector::empty().
+ ///
+ bool empty() const { return Dense.empty(); }
+
+ /// size - Returns the number of elements in the set.
+ ///
+ /// This is not the same as BitVector::size() which returns the size of the
+ /// universe.
+ ///
+ unsigned size() const { return Dense.size(); }
+
+ /// clear - Clears the set. This is a very fast constant time operation.
+ ///
+ void clear() {
+ // Sparse does not need to be cleared, see find().
+ Dense.clear();
+ }
+
+ /// find - Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(unsigned Key) {
+ assert(Key < Universe && "Key out of range");
+ assert(std::numeric_limits<SparseT>::is_integer &&
+ !std::numeric_limits<SparseT>::is_signed &&
+ "SparseT must be an unsigned integer type");
+ const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+ for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) {
+ const unsigned FoundKey = KeyOf(Dense[i]);
+ assert(FoundKey < Universe && "Invalid key in set. Did object mutate?");
+ if (Key == FoundKey)
+ return begin() + i;
+ // Stride is 0 when SparseT >= unsigned. We don't need to loop.
+ if (!Stride)
+ break;
+ }
+ return end();
+ }
+
+ const_iterator find(unsigned Key) const {
+ return const_cast<SparseSet*>(this)->find(Key);
+ }
+
+ /// count - Returns true if this set contains an element identified by Key.
+ ///
+ bool count(unsigned Key) const {
+ return find(Key) != end();
+ }
+
+ /// insert - Attempts to insert a new element.
+ ///
+ /// If Val is successfully inserted, return (I, true), where I is an iterator
+ /// pointing to the newly inserted element.
+ ///
+ /// If the set already contains an element with the same key as Val, return
+ /// (I, false), where I is an iterator pointing to the existing element.
+ ///
+ /// Insertion invalidates all iterators.
+ ///
+ std::pair<iterator, bool> insert(const ValueT &Val) {
+ unsigned Key = KeyOf(Val);
+ iterator I = find(Key);
+ if (I != end())
+ return std::make_pair(I, false);
+ Sparse[Key] = size();
+ Dense.push_back(Val);
+ return std::make_pair(end() - 1, true);
+ }
+
+ /// array subscript - If an element already exists with this key, return it.
+ /// Otherwise, automatically construct a new value from Key, insert it,
+ /// and return the newly inserted element.
+ ValueT &operator[](unsigned Key) {
+ return *insert(ValueT(Key)).first;
+ }
+
+ /// erase - Erases an existing element identified by a valid iterator.
+ ///
+ /// This invalidates all iterators, but erase() returns an iterator pointing
+ /// to the next element. This makes it possible to erase selected elements
+ /// while iterating over the set:
+ ///
+ /// for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+ /// if (test(*I))
+ /// I = Set.erase(I);
+ /// else
+ /// ++I;
+ ///
+ /// Note that end() changes when elements are erased, unlike std::list.
+ ///
+ iterator erase(iterator I) {
+ assert(unsigned(I - begin()) < size() && "Invalid iterator");
+ if (I != end() - 1) {
+ *I = Dense.back();
+ unsigned BackKey = KeyOf(Dense.back());
+ assert(BackKey < Universe && "Invalid key in set. Did object mutate?");
+ Sparse[BackKey] = I - begin();
+ }
+ // This depends on SmallVector::pop_back() not invalidating iterators.
+ // std::vector::pop_back() doesn't give that guarantee.
+ Dense.pop_back();
+ return I;
+ }
+
+ /// erase - Erases an element identified by Key, if it exists.
+ ///
+ /// @param Key The key identifying the element to erase.
+ /// @returns True when an element was erased, false if no element was found.
+ ///
+ bool erase(unsigned Key) {
+ iterator I = find(Key);
+ if (I == end())
+ return false;
+ erase(I);
+ return true;
+ }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Statistic.h b/contrib/llvm/include/llvm/ADT/Statistic.h
index b8a1a2f..b54d10b 100644
--- a/contrib/llvm/include/llvm/ADT/Statistic.h
+++ b/contrib/llvm/include/llvm/ADT/Statistic.h
@@ -27,6 +27,7 @@
#define LLVM_ADT_STATISTIC_H
#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Valgrind.h"
namespace llvm {
class raw_ostream;
@@ -110,6 +111,7 @@ protected:
bool tmp = Initialized;
sys::MemoryFence();
if (!tmp) RegisterStatistic();
+ TsanHappensAfter(this);
return *this;
}
void RegisterStatistic();
diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h
index d01d3e1..655d884 100644
--- a/contrib/llvm/include/llvm/ADT/StringExtras.h
+++ b/contrib/llvm/include/llvm/ADT/StringExtras.h
@@ -15,12 +15,7 @@
#define LLVM_ADT_STRINGEXTRAS_H
#include "llvm/Support/DataTypes.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/StringRef.h"
-#include <cctype>
-#include <cstdio>
-#include <string>
namespace llvm {
template<typename T> class SmallVectorImpl;
@@ -101,38 +96,6 @@ static inline std::string itostr(int64_t X) {
return utostr(static_cast<uint64_t>(X));
}
-static inline std::string ftostr(double V) {
- char Buffer[200];
- sprintf(Buffer, "%20.6e", V);
- char *B = Buffer;
- while (*B == ' ') ++B;
- return B;
-}
-
-static inline std::string ftostr(const APFloat& V) {
- if (&V.getSemantics() == &APFloat::IEEEdouble)
- return ftostr(V.convertToDouble());
- else if (&V.getSemantics() == &APFloat::IEEEsingle)
- return ftostr((double)V.convertToFloat());
- return "<unknown format in ftostr>"; // error
-}
-
-static inline std::string LowercaseString(const std::string &S) {
- std::string result(S);
- for (unsigned i = 0; i < S.length(); ++i)
- if (isupper(result[i]))
- result[i] = char(tolower(result[i]));
- return result;
-}
-
-static inline std::string UppercaseString(const std::string &S) {
- std::string result(S);
- for (unsigned i = 0; i < S.length(); ++i)
- if (islower(result[i]))
- result[i] = char(toupper(result[i]));
- return result;
-}
-
/// StrInStrNoCase - Portable version of strcasestr. Locates the first
/// occurrence of string 's1' in string 's2', ignoring case. Returns
/// the offset of s2 in s1 or npos if s2 cannot be found.
diff --git a/contrib/llvm/include/llvm/ADT/StringMap.h b/contrib/llvm/include/llvm/ADT/StringMap.h
index 3507787..097418e 100644
--- a/contrib/llvm/include/llvm/ADT/StringMap.h
+++ b/contrib/llvm/include/llvm/ADT/StringMap.h
@@ -51,20 +51,11 @@ public:
/// StringMapImpl - This is the base class of StringMap that is shared among
/// all of its instantiations.
class StringMapImpl {
-public:
- /// ItemBucket - The hash table consists of an array of these. If Item is
- /// non-null, this is an extant entry, otherwise, it is a hole.
- struct ItemBucket {
- /// FullHashValue - This remembers the full hash value of the key for
- /// easy scanning.
- unsigned FullHashValue;
-
- /// Item - This is a pointer to the actual item object.
- StringMapEntryBase *Item;
- };
-
protected:
- ItemBucket *TheTable;
+ // Array of NumBuckets pointers to entries, null pointers are holes.
+ // TheTable[NumBuckets] contains a sentinel value for easy iteration. Follwed
+ // by an array of the actual hash values as unsigned integers.
+ StringMapEntryBase **TheTable;
unsigned NumBuckets;
unsigned NumItems;
unsigned NumTombstones;
@@ -238,8 +229,9 @@ public:
template<typename ValueTy, typename AllocatorTy = MallocAllocator>
class StringMap : public StringMapImpl {
AllocatorTy Allocator;
- typedef StringMapEntry<ValueTy> MapEntryTy;
public:
+ typedef StringMapEntry<ValueTy> MapEntryTy;
+
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
@@ -289,13 +281,13 @@ public:
iterator find(StringRef Key) {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
- return iterator(TheTable+Bucket);
+ return iterator(TheTable+Bucket, true);
}
const_iterator find(StringRef Key) const {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
- return const_iterator(TheTable+Bucket);
+ return const_iterator(TheTable+Bucket, true);
}
/// lookup - Return the entry for the specified key, or a default
@@ -320,13 +312,13 @@ public:
/// insert it and return true.
bool insert(MapEntryTy *KeyValue) {
unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
- ItemBucket &Bucket = TheTable[BucketNo];
- if (Bucket.Item && Bucket.Item != getTombstoneVal())
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
return false; // Already exists in map.
- if (Bucket.Item == getTombstoneVal())
+ if (Bucket == getTombstoneVal())
--NumTombstones;
- Bucket.Item = KeyValue;
+ Bucket = KeyValue;
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
@@ -340,10 +332,11 @@ public:
// Zap all values, resetting the keys back to non-present (not tombstone),
// which is safe because we're removing all elements.
- for (ItemBucket *I = TheTable, *E = TheTable+NumBuckets; I != E; ++I) {
- if (I->Item && I->Item != getTombstoneVal()) {
- static_cast<MapEntryTy*>(I->Item)->Destroy(Allocator);
- I->Item = 0;
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *&Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
+ static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+ Bucket = 0;
}
}
@@ -357,21 +350,21 @@ public:
template <typename InitTy>
MapEntryTy &GetOrCreateValue(StringRef Key, InitTy Val) {
unsigned BucketNo = LookupBucketFor(Key);
- ItemBucket &Bucket = TheTable[BucketNo];
- if (Bucket.Item && Bucket.Item != getTombstoneVal())
- return *static_cast<MapEntryTy*>(Bucket.Item);
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
+ return *static_cast<MapEntryTy*>(Bucket);
MapEntryTy *NewItem =
MapEntryTy::Create(Key.begin(), Key.end(), Allocator, Val);
- if (Bucket.Item == getTombstoneVal())
+ if (Bucket == getTombstoneVal())
--NumTombstones;
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
// Fill in the bucket for the hash table. The FullHashValue was already
// filled in by LookupBucketFor.
- Bucket.Item = NewItem;
+ Bucket = NewItem;
RehashTable();
return *NewItem;
@@ -410,21 +403,21 @@ public:
template<typename ValueTy>
class StringMapConstIterator {
protected:
- StringMapImpl::ItemBucket *Ptr;
+ StringMapEntryBase **Ptr;
public:
typedef StringMapEntry<ValueTy> value_type;
- explicit StringMapConstIterator(StringMapImpl::ItemBucket *Bucket,
+ explicit StringMapConstIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: Ptr(Bucket) {
if (!NoAdvance) AdvancePastEmptyBuckets();
}
const value_type &operator*() const {
- return *static_cast<StringMapEntry<ValueTy>*>(Ptr->Item);
+ return *static_cast<StringMapEntry<ValueTy>*>(*Ptr);
}
const value_type *operator->() const {
- return static_cast<StringMapEntry<ValueTy>*>(Ptr->Item);
+ return static_cast<StringMapEntry<ValueTy>*>(*Ptr);
}
bool operator==(const StringMapConstIterator &RHS) const {
@@ -445,7 +438,7 @@ public:
private:
void AdvancePastEmptyBuckets() {
- while (Ptr->Item == 0 || Ptr->Item == StringMapImpl::getTombstoneVal())
+ while (*Ptr == 0 || *Ptr == StringMapImpl::getTombstoneVal())
++Ptr;
}
};
@@ -453,15 +446,15 @@ private:
template<typename ValueTy>
class StringMapIterator : public StringMapConstIterator<ValueTy> {
public:
- explicit StringMapIterator(StringMapImpl::ItemBucket *Bucket,
+ explicit StringMapIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: StringMapConstIterator<ValueTy>(Bucket, NoAdvance) {
}
StringMapEntry<ValueTy> &operator*() const {
- return *static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item);
+ return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
StringMapEntry<ValueTy> *operator->() const {
- return static_cast<StringMapEntry<ValueTy>*>(this->Ptr->Item);
+ return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
};
diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h
index 8396921..76ba66e 100644
--- a/contrib/llvm/include/llvm/ADT/StringRef.h
+++ b/contrib/llvm/include/llvm/ADT/StringRef.h
@@ -10,15 +10,26 @@
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
+#include "llvm/Support/type_traits.h"
+
#include <cassert>
#include <cstring>
-#include <utility>
+#include <limits>
#include <string>
+#include <utility>
namespace llvm {
template<typename T>
class SmallVectorImpl;
class APInt;
+ class hash_code;
+ class StringRef;
+
+ /// Helper functions for StringRef::getAsInteger.
+ bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
+ unsigned long long &Result);
+
+ bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
/// StringRef - Represent a constant reference to a string, i.e. a character
/// array and a length, which need not be null terminated.
@@ -304,14 +315,29 @@ namespace llvm {
///
/// If the string is invalid or if only a subset of the string is valid,
/// this returns true to signify the error. The string is considered
- /// erroneous if empty.
+ /// erroneous if empty or if it overflows T.
///
- bool getAsInteger(unsigned Radix, long long &Result) const;
- bool getAsInteger(unsigned Radix, unsigned long long &Result) const;
- bool getAsInteger(unsigned Radix, int &Result) const;
- bool getAsInteger(unsigned Radix, unsigned &Result) const;
+ template <typename T>
+ typename enable_if_c<std::numeric_limits<T>::is_signed, bool>::type
+ getAsInteger(unsigned Radix, T &Result) const {
+ long long LLVal;
+ if (getAsSignedInteger(*this, Radix, LLVal) ||
+ static_cast<T>(LLVal) != LLVal)
+ return true;
+ Result = LLVal;
+ return false;
+ }
- // TODO: Provide overloads for int/unsigned that check for overflow.
+ template <typename T>
+ typename enable_if_c<!std::numeric_limits<T>::is_signed, bool>::type
+ getAsInteger(unsigned Radix, T &Result) const {
+ unsigned long long ULLVal;
+ if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
+ static_cast<T>(ULLVal) != ULLVal)
+ return true;
+ Result = ULLVal;
+ return false;
+ }
/// getAsInteger - Parse the current string as an integer of the
/// specified radix, or of an autosensed radix if the radix given
@@ -327,6 +353,16 @@ namespace llvm {
bool getAsInteger(unsigned Radix, APInt &Result) const;
/// @}
+ /// @name String Operations
+ /// @{
+
+ // lower - Convert the given ASCII string to lowercase.
+ std::string lower() const;
+
+ /// upper - Convert the given ASCII string to uppercase.
+ std::string upper() const;
+
+ /// @}
/// @name Substring Operations
/// @{
@@ -343,6 +379,20 @@ namespace llvm {
Start = min(Start, Length);
return StringRef(Data + Start, min(N, Length - Start));
}
+
+ /// drop_front - Return a StringRef equal to 'this' but with the first
+ /// elements dropped.
+ StringRef drop_front(unsigned N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(N);
+ }
+
+ /// drop_back - Return a StringRef equal to 'this' but with the last
+ /// elements dropped.
+ StringRef drop_back(unsigned N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(0, size()-N);
+ }
/// slice - Return a reference to the substring from [Start, End).
///
@@ -466,6 +516,9 @@ namespace llvm {
/// @}
+ /// \brief Compute a hash_code for a StringRef.
+ hash_code hash_value(StringRef S);
+
// StringRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <> struct isPodLike<StringRef> { static const bool value = true; };
diff --git a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
index ee86d8b..5014517 100644
--- a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
+++ b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
@@ -37,6 +37,15 @@ public:
delete V;
}
+ // implicit conversion operator to ArrayRef.
+ operator ArrayRef<EltTy>() const {
+ if (Val.isNull())
+ return ArrayRef<EltTy>();
+ if (Val.template is<EltTy>())
+ return *Val.getAddrOfPtr1();
+ return *Val.template get<VecTy*>();
+ }
+
bool empty() const {
// This vector can be empty if it contains no element, or if it
// contains a pointer to an empty vector.
@@ -54,18 +63,20 @@ public:
return Val.template get<VecTy*>()->size();
}
- typedef const EltTy *iterator;
- iterator begin() const {
+ typedef const EltTy *const_iterator;
+ typedef EltTy *iterator;
+
+ iterator begin() {
if (empty())
return 0;
if (Val.template is<EltTy>())
- return Val.template getAddrOf<EltTy>();
+ return Val.getAddrOfPtr1();
return Val.template get<VecTy *>()->begin();
}
- iterator end() const {
+ iterator end() {
if (empty())
return 0;
@@ -75,7 +86,14 @@ public:
return Val.template get<VecTy *>()->end();
}
-
+ const_iterator begin() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
+ }
+
+ const_iterator end() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
+ }
+
EltTy operator[](unsigned i) const {
assert(!Val.isNull() && "can't index into an empty vector");
if (EltTy V = Val.template dyn_cast<EltTy>()) {
@@ -124,6 +142,20 @@ public:
}
// Otherwise, we're already empty.
}
+
+ iterator erase(iterator I) {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>()) {
+ if (I == begin())
+ Val = (EltTy)0;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ // multiple items in a vector; just do the erase, there is no
+ // benefit to collapsing back to a pointer
+ return Vec->erase(I);
+ }
+
+ return 0;
+ }
private:
void operator=(const TinyPtrVector&); // NOT IMPLEMENTED YET.
diff --git a/contrib/llvm/include/llvm/ADT/Trie.h b/contrib/llvm/include/llvm/ADT/Trie.h
index 6b150c8f..845af01 100644
--- a/contrib/llvm/include/llvm/ADT/Trie.h
+++ b/contrib/llvm/include/llvm/ADT/Trie.h
@@ -220,8 +220,7 @@ bool Trie<Payload>::addString(const std::string& s, const Payload& data) {
assert(0 && "FIXME!");
return false;
case Node::DontMatch:
- assert(0 && "Impossible!");
- return false;
+ llvm_unreachable("Impossible!");
case Node::LabelIsPrefix:
s1 = s1.substr(nNode->label().length());
cNode = nNode;
@@ -258,8 +257,7 @@ const Payload& Trie<Payload>::lookup(const std::string& s) const {
case Node::StringIsPrefix:
return Empty;
case Node::DontMatch:
- assert(0 && "Impossible!");
- return Empty;
+ llvm_unreachable("Impossible!");
case Node::LabelIsPrefix:
s1 = s1.substr(nNode->label().length());
cNode = nNode;
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index 3503c0f..f5f99d0 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -43,20 +43,19 @@ public:
enum ArchType {
UnknownArch,
- alpha, // Alpha: alpha
arm, // ARM; arm, armv.*, xscale
- bfin, // Blackfin: bfin
cellspu, // CellSPU: spu, cellspu
+ hexagon, // Hexagon: hexagon
mips, // MIPS: mips, mipsallegrex
- mipsel, // MIPSEL: mipsel, mipsallegrexel, psp
+ mipsel, // MIPSEL: mipsel, mipsallegrexel
mips64, // MIPS64: mips64
mips64el,// MIPS64EL: mips64el
msp430, // MSP430: msp430
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
+ r600, // R600: AMD GPUs HD2XXX - HD6XXX
sparc, // Sparc: sparc
sparcv9, // Sparcv9: Sparcv9
- systemz, // SystemZ: s390x
tce, // TCE (http://tce.cs.tut.fi/): tce
thumb, // Thumb: thumb, thumbv.*
x86, // X86: i[3-9]86
@@ -66,16 +65,16 @@ public:
ptx32, // PTX: ptx (32-bit)
ptx64, // PTX: ptx (64-bit)
le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten)
- amdil, // amdil: amd IL
-
- InvalidArch
+ amdil // amdil: amd IL
};
enum VendorType {
UnknownVendor,
Apple,
PC,
- SCEI
+ SCEI,
+ BGP,
+ BGQ
};
enum OSType {
UnknownOS,
@@ -93,61 +92,52 @@ public:
MinGW32, // i*86-pc-mingw32, *-w64-mingw32
NetBSD,
OpenBSD,
- Psp,
Solaris,
Win32,
Haiku,
Minix,
RTEMS,
- NativeClient
+ NativeClient,
+ CNK // BG/P Compute-Node Kernel
};
enum EnvironmentType {
UnknownEnvironment,
GNU,
GNUEABI,
+ GNUEABIHF,
EABI,
- MachO
+ MachO,
+ ANDROIDEABI
};
private:
std::string Data;
- /// The parsed arch type (or InvalidArch if uninitialized).
- mutable ArchType Arch;
+ /// The parsed arch type.
+ ArchType Arch;
/// The parsed vendor type.
- mutable VendorType Vendor;
+ VendorType Vendor;
/// The parsed OS type.
- mutable OSType OS;
+ OSType OS;
/// The parsed Environment type.
- mutable EnvironmentType Environment;
-
- bool isInitialized() const { return Arch != InvalidArch; }
- static ArchType ParseArch(StringRef ArchName);
- static VendorType ParseVendor(StringRef VendorName);
- static OSType ParseOS(StringRef OSName);
- static EnvironmentType ParseEnvironment(StringRef EnvironmentName);
- void Parse() const;
+ EnvironmentType Environment;
public:
/// @name Constructors
/// @{
- Triple() : Data(), Arch(InvalidArch) {}
- explicit Triple(const Twine &Str) : Data(Str.str()), Arch(InvalidArch) {}
- Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr)
- : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr).str()),
- Arch(InvalidArch) {
- }
+ /// \brief Default constructor is the same as an empty string and leaves all
+ /// triple fields unknown.
+ Triple() : Data(), Arch(), Vendor(), OS(), Environment() {}
+ explicit Triple(const Twine &Str);
+ Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
- const Twine &EnvironmentStr)
- : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr + Twine('-') +
- EnvironmentStr).str()), Arch(InvalidArch) {
- }
+ const Twine &EnvironmentStr);
/// @}
/// @name Normalization
@@ -164,22 +154,13 @@ public:
/// @{
/// getArch - Get the parsed architecture type of this triple.
- ArchType getArch() const {
- if (!isInitialized()) Parse();
- return Arch;
- }
+ ArchType getArch() const { return Arch; }
/// getVendor - Get the parsed vendor type of this triple.
- VendorType getVendor() const {
- if (!isInitialized()) Parse();
- return Vendor;
- }
+ VendorType getVendor() const { return Vendor; }
/// getOS - Get the parsed operating system type of this triple.
- OSType getOS() const {
- if (!isInitialized()) Parse();
- return OS;
- }
+ OSType getOS() const { return OS; }
/// hasEnvironment - Does this triple have the optional environment
/// (fourth) component?
@@ -188,11 +169,31 @@ public:
}
/// getEnvironment - Get the parsed environment type of this triple.
- EnvironmentType getEnvironment() const {
- if (!isInitialized()) Parse();
- return Environment;
+ EnvironmentType getEnvironment() const { return Environment; }
+
+ /// getOSVersion - Parse the version number from the OS name component of the
+ /// triple, if present.
+ ///
+ /// For example, "fooos1.2.3" would return (1, 2, 3).
+ ///
+ /// If an entry is not defined, it will be returned as 0.
+ void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+
+ /// getOSMajorVersion - Return just the major version number, this is
+ /// specialized because it is a common query.
+ unsigned getOSMajorVersion() const {
+ unsigned Maj, Min, Micro;
+ getOSVersion(Maj, Min, Micro);
+ return Maj;
}
+ /// getMacOSXVersion - Parse the version number as with getOSVersion and then
+ /// translate generic "darwin" versions to the corresponding OS X versions.
+ /// This may also be called with IOS triples but the OS X version number is
+ /// just set to a constant 10.4.0 in that case. Returns true if successful.
+ bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
/// @}
/// @name Direct Component Access
/// @{
@@ -221,21 +222,28 @@ public:
/// if the environment component is present).
StringRef getOSAndEnvironmentName() const;
- /// getOSVersion - Parse the version number from the OS name component of the
- /// triple, if present.
+ /// @}
+ /// @name Convenience Predicates
+ /// @{
+
+ /// \brief Test whether the architecture is 64-bit
///
- /// For example, "fooos1.2.3" would return (1, 2, 3).
+ /// Note that this tests for 64-bit pointer width, and nothing else. Note
+ /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
+ /// 16-bit. The inner details of pointer width for particular architectures
+ /// is not summed up in the triple, and so only a coarse grained predicate
+ /// system is provided.
+ bool isArch64Bit() const;
+
+ /// \brief Test whether the architecture is 32-bit
///
- /// If an entry is not defined, it will be returned as 0.
- void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+ /// Note that this tests for 32-bit pointer width, and nothing else.
+ bool isArch32Bit() const;
- /// getOSMajorVersion - Return just the major version number, this is
- /// specialized because it is a common query.
- unsigned getOSMajorVersion() const {
- unsigned Maj, Min, Micro;
- getOSVersion(Maj, Min, Micro);
- return Maj;
- }
+ /// \brief Test whether the architecture is 16-bit
+ ///
+ /// Note that this tests for 16-bit pointer width, and nothing else.
+ bool isArch16Bit() const;
/// isOSVersionLT - Helper function for doing comparisons against version
/// numbers included in the target triple.
@@ -254,6 +262,22 @@ public:
return false;
}
+ /// isMacOSXVersionLT - Comparison function for checking OS X version
+ /// compatibility, which handles supporting skewed version numbering schemes
+ /// used by the "darwin" triples.
+ unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
+ unsigned Micro = 0) const {
+ assert(isMacOSX() && "Not an OS X triple!");
+
+ // If this is OS X, expect a sane version number.
+ if (getOS() == Triple::MacOSX)
+ return isOSVersionLT(Major, Minor, Micro);
+
+ // Otherwise, compare to the "Darwin" number.
+ assert(Major == 10 && "Unexpected major version");
+ return isOSVersionLT(Minor + 4, Micro, 0);
+ }
+
/// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
/// "darwin" and "osx" as OS X triples.
bool isMacOSX() const {
@@ -265,26 +289,30 @@ public:
return isMacOSX() || getOS() == Triple::IOS;
}
+ /// \brief Tests for either Cygwin or MinGW OS
+ bool isOSCygMing() const {
+ return getOS() == Triple::Cygwin || getOS() == Triple::MinGW32;
+ }
+
/// isOSWindows - Is this a "Windows" OS.
bool isOSWindows() const {
- return getOS() == Triple::Win32 || getOS() == Triple::Cygwin ||
- getOS() == Triple::MinGW32;
+ return getOS() == Triple::Win32 || isOSCygMing();
}
- /// isMacOSXVersionLT - Comparison function for checking OS X version
- /// compatibility, which handles supporting skewed version numbering schemes
- /// used by the "darwin" triples.
- unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
- unsigned Micro = 0) const {
- assert(isMacOSX() && "Not an OS X triple!");
+ /// \brief Tests whether the OS uses the ELF binary format.
+ bool isOSBinFormatELF() const {
+ return !isOSDarwin() && !isOSWindows();
+ }
- // If this is OS X, expect a sane version number.
- if (getOS() == Triple::MacOSX)
- return isOSVersionLT(Major, Minor, Micro);
+ /// \brief Tests whether the OS uses the COFF binary format.
+ bool isOSBinFormatCOFF() const {
+ return isOSWindows();
+ }
- // Otherwise, compare to the "Darwin" number.
- assert(Major == 10 && "Unexpected major version");
- return isOSVersionLT(Minor + 4, Micro, 0);
+ /// \brief Tests whether the environment is MachO.
+ // FIXME: Should this be an OSBinFormat predicate?
+ bool isEnvironmentMachO() const {
+ return getEnvironment() == Triple::MachO || isOSDarwin();
}
/// @}
@@ -335,6 +363,26 @@ public:
const char *getArchNameForAssembler();
/// @}
+ /// @name Helpers to build variants of a particular triple.
+ /// @{
+
+ /// \brief Form a triple with a 32-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 32-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get32BitArchVariant() const;
+
+ /// \brief Form a triple with a 64-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 64-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get64BitArchVariant() const;
+
+ /// @}
/// @name Static helpers for IDs.
/// @{
diff --git a/contrib/llvm/include/llvm/ADT/Twine.h b/contrib/llvm/include/llvm/ADT/Twine.h
index 3a60cab..9101df8 100644
--- a/contrib/llvm/include/llvm/ADT/Twine.h
+++ b/contrib/llvm/include/llvm/ADT/Twine.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>
@@ -425,7 +426,7 @@ namespace llvm {
StringRef getSingleStringRef() const {
assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
switch (getLHSKind()) {
- default: assert(0 && "Out of sync with isSingleStringRef");
+ default: llvm_unreachable("Out of sync with isSingleStringRef");
case EmptyKind: return StringRef();
case CStringKind: return StringRef(LHS.cString);
case StdStringKind: return StringRef(*LHS.stdString);
diff --git a/contrib/llvm/include/llvm/ADT/ValueMap.h b/contrib/llvm/include/llvm/ADT/ValueMap.h
index d1f4e5a..707d07d 100644
--- a/contrib/llvm/include/llvm/ADT/ValueMap.h
+++ b/contrib/llvm/include/llvm/ADT/ValueMap.h
@@ -35,7 +35,7 @@
namespace llvm {
-template<typename KeyT, typename ValueT, typename Config, typename ValueInfoT>
+template<typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH;
template<typename DenseMapT, typename KeyT>
@@ -72,13 +72,11 @@ struct ValueMapConfig {
};
/// See the file comment.
-template<typename KeyT, typename ValueT, typename Config = ValueMapConfig<KeyT>,
- typename ValueInfoT = DenseMapInfo<ValueT> >
+template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT> >
class ValueMap {
- friend class ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT>;
- typedef ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> ValueMapCVH;
- typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH>,
- ValueInfoT> MapT;
+ friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> ValueMapCVH;
+ typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH> > MapT;
typedef typename Config::ExtraData ExtraData;
MapT Map;
ExtraData Data;
@@ -190,11 +188,11 @@ private:
// This CallbackVH updates its ValueMap when the contained Value changes,
// according to the user's preferences expressed through the Config object.
-template<typename KeyT, typename ValueT, typename Config, typename ValueInfoT>
+template<typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH : public CallbackVH {
- friend class ValueMap<KeyT, ValueT, Config, ValueInfoT>;
+ friend class ValueMap<KeyT, ValueT, Config>;
friend struct DenseMapInfo<ValueMapCallbackVH>;
- typedef ValueMap<KeyT, ValueT, Config, ValueInfoT> ValueMapT;
+ typedef ValueMap<KeyT, ValueT, Config> ValueMapT;
typedef typename llvm::remove_pointer<KeyT>::type KeySansPointerT;
ValueMapT *Map;
@@ -244,9 +242,9 @@ public:
}
};
-template<typename KeyT, typename ValueT, typename Config, typename ValueInfoT>
-struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> > {
- typedef ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> VH;
+template<typename KeyT, typename ValueT, typename Config>
+struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> VH;
typedef DenseMapInfo<KeyT> PointerInfo;
static inline VH getEmptyKey() {
diff --git a/contrib/llvm/include/llvm/ADT/VariadicFunction.h b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
new file mode 100644
index 0000000..a9a0dc6
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
@@ -0,0 +1,331 @@
+//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements compile-time type-safe variadic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_VARIADIC_FUNCTION_H
+#define LLVM_ADT_VARIADIC_FUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+// Define macros to aid in expanding a comma separated series with the index of
+// the series pasted onto the last token.
+#define LLVM_COMMA_JOIN1(x) x ## 0
+#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
+#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
+#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
+#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
+#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
+#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
+#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
+#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
+#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
+#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
+#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
+#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
+#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
+#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
+#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
+#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
+#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
+#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
+#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
+#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
+#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
+#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
+#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
+#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
+#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
+#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
+#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
+#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
+#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
+#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
+#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
+
+/// \brief Class which can simulate a type-safe variadic function.
+///
+/// The VariadicFunction class template makes it easy to define
+/// type-safe variadic functions where all arguments have the same
+/// type.
+///
+/// Suppose we need a variadic function like this:
+///
+/// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
+///
+/// Instead of many overloads of Foo(), we only need to define a helper
+/// function that takes an array of arguments:
+///
+/// ResultT FooImpl(ArrayRef<const ArgT *> Args) {
+/// // 'Args[i]' is a pointer to the i-th argument passed to Foo().
+/// ...
+/// }
+///
+/// and then define Foo() like this:
+///
+/// const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
+///
+/// VariadicFunction takes care of defining the overloads of Foo().
+///
+/// Actually, Foo is a function object (i.e. functor) instead of a plain
+/// function. This object is stateless and its constructor/destructor
+/// does nothing, so it's safe to create global objects and call Foo(...) at
+/// any time.
+///
+/// Sometimes we need a variadic function to have some fixed leading
+/// arguments whose types may be different from that of the optional
+/// arguments. For example:
+///
+/// bool FullMatch(const StringRef &S, const RE &Regex,
+/// const ArgT &A_0, ..., const ArgT &A_N);
+///
+/// VariadicFunctionN is for such cases, where N is the number of fixed
+/// arguments. It is like VariadicFunction, except that it takes N more
+/// template arguments for the types of the fixed arguments:
+///
+/// bool FullMatchImpl(const StringRef &S, const RE &Regex,
+/// ArrayRef<const ArgT *> Args) { ... }
+/// const VariadicFunction2<bool, const StringRef&,
+/// const RE&, ArgT, FullMatchImpl>
+/// FullMatch;
+///
+/// Currently VariadicFunction and friends support up-to 3
+/// fixed leading arguments and up-to 32 optional arguments.
+template <typename ResultT, typename ArgT,
+ ResultT (*Func)(ArrayRef<const ArgT *>)>
+struct VariadicFunction {
+ ResultT operator()() const {
+ return Func(ArrayRef<const ArgT *>());
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename ArgT,
+ ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
+struct VariadicFunction1 {
+ ResultT operator()(Param0T P0) const {
+ return Func(P0, ArrayRef<const ArgT *>());
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
+ ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
+struct VariadicFunction2 {
+ ResultT operator()(Param0T P0, Param1T P1) const {
+ return Func(P0, P1, ArrayRef<const ArgT *>());
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, Param1T P1, \
+ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, P1, makeAraryRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T,
+ typename Param2T, typename ArgT,
+ ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
+struct VariadicFunction3 {
+ ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
+ return Func(P0, P1, P2, ArrayRef<const ArgT *>());
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
+ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, P1, P2, makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+// Cleanup the macro namespace.
+#undef LLVM_COMMA_JOIN1
+#undef LLVM_COMMA_JOIN2
+#undef LLVM_COMMA_JOIN3
+#undef LLVM_COMMA_JOIN4
+#undef LLVM_COMMA_JOIN5
+#undef LLVM_COMMA_JOIN6
+#undef LLVM_COMMA_JOIN7
+#undef LLVM_COMMA_JOIN8
+#undef LLVM_COMMA_JOIN9
+#undef LLVM_COMMA_JOIN10
+#undef LLVM_COMMA_JOIN11
+#undef LLVM_COMMA_JOIN12
+#undef LLVM_COMMA_JOIN13
+#undef LLVM_COMMA_JOIN14
+#undef LLVM_COMMA_JOIN15
+#undef LLVM_COMMA_JOIN16
+#undef LLVM_COMMA_JOIN17
+#undef LLVM_COMMA_JOIN18
+#undef LLVM_COMMA_JOIN19
+#undef LLVM_COMMA_JOIN20
+#undef LLVM_COMMA_JOIN21
+#undef LLVM_COMMA_JOIN22
+#undef LLVM_COMMA_JOIN23
+#undef LLVM_COMMA_JOIN24
+#undef LLVM_COMMA_JOIN25
+#undef LLVM_COMMA_JOIN26
+#undef LLVM_COMMA_JOIN27
+#undef LLVM_COMMA_JOIN28
+#undef LLVM_COMMA_JOIN29
+#undef LLVM_COMMA_JOIN30
+#undef LLVM_COMMA_JOIN31
+#undef LLVM_COMMA_JOIN32
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_VARIADIC_FUNCTION_H
diff --git a/contrib/llvm/include/llvm/ADT/VectorExtras.h b/contrib/llvm/include/llvm/ADT/VectorExtras.h
deleted file mode 100644
index e05f585..0000000
--- a/contrib/llvm/include/llvm/ADT/VectorExtras.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//===-- llvm/ADT/VectorExtras.h - Helpers for std::vector -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains helper functions which are useful for working with the
-// std::vector class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_VECTOREXTRAS_H
-#define LLVM_ADT_VECTOREXTRAS_H
-
-#include <cstdarg>
-#include <vector>
-
-namespace llvm {
-
-/// make_vector - Helper function which is useful for building temporary vectors
-/// to pass into type construction of CallInst ctors. This turns a null
-/// terminated list of pointers (or other value types) into a real live vector.
-///
-template<typename T>
-inline std::vector<T> make_vector(T A, ...) {
- va_list Args;
- va_start(Args, A);
- std::vector<T> Result;
- Result.push_back(A);
- while (T Val = va_arg(Args, T))
- Result.push_back(Val);
- va_end(Args);
- return Result;
-}
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/ADT/edit_distance.h b/contrib/llvm/include/llvm/ADT/edit_distance.h
new file mode 100644
index 0000000..f77ef13
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/edit_distance.h
@@ -0,0 +1,102 @@
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a Levenshtein distance function that works for any two
+// sequences, with each element of each sequence being analogous to a character
+// in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <algorithm>
+
+namespace llvm {
+
+/// \brief Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+ bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0) {
+ // The algorithm implemented below is the "classic"
+ // dynamic-programming algorithm for computing the Levenshtein
+ // distance, which is described here:
+ //
+ // http://en.wikipedia.org/wiki/Levenshtein_distance
+ //
+ // Although the algorithm is typically described using an m x n
+ // array, only two rows are used at a time, so this implemenation
+ // just keeps two separate vectors for those two rows.
+ typename ArrayRef<T>::size_type m = FromArray.size();
+ typename ArrayRef<T>::size_type n = ToArray.size();
+
+ const unsigned SmallBufferSize = 64;
+ unsigned SmallBuffer[SmallBufferSize];
+ llvm::OwningArrayPtr<unsigned> Allocated;
+ unsigned *Previous = SmallBuffer;
+ if (2*(n + 1) > SmallBufferSize) {
+ Previous = new unsigned [2*(n+1)];
+ Allocated.reset(Previous);
+ }
+ unsigned *Current = Previous + (n + 1);
+
+ for (unsigned i = 0; i <= n; ++i)
+ Previous[i] = i;
+
+ for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+ Current[0] = y;
+ unsigned BestThisRow = Current[0];
+
+ for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+ if (AllowReplacements) {
+ Current[x] = std::min(
+ Previous[x-1] + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+ std::min(Current[x-1], Previous[x])+1);
+ }
+ else {
+ if (FromArray[y-1] == ToArray[x-1]) Current[x] = Previous[x-1];
+ else Current[x] = std::min(Current[x-1], Previous[x]) + 1;
+ }
+ BestThisRow = std::min(BestThisRow, Current[x]);
+ }
+
+ if (MaxEditDistance && BestThisRow > MaxEditDistance)
+ return MaxEditDistance + 1;
+
+ unsigned *tmp = Current;
+ Current = Previous;
+ Previous = tmp;
+ }
+
+ unsigned Result = Previous[n];
+ return Result;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ilist.h b/contrib/llvm/include/llvm/ADT/ilist.h
index bcacfd9..ba9864a 100644
--- a/contrib/llvm/include/llvm/ADT/ilist.h
+++ b/contrib/llvm/include/llvm/ADT/ilist.h
@@ -652,10 +652,6 @@ struct ilist : public iplist<NodeTy> {
void push_front(const NodeTy &val) { insert(this->begin(), val); }
void push_back(const NodeTy &val) { insert(this->end(), val); }
- // Special forms of insert...
- template<class InIt> void insert(iterator where, InIt first, InIt last) {
- for (; first != last; ++first) insert(where, *first);
- }
void insert(iterator where, size_type count, const NodeTy &val) {
for (; count != 0; --count) insert(where, val);
}
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index d71ba20..b823f71 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -327,7 +327,7 @@ public:
}
/// doesAccessArgPointees - Return true if functions with the specified
- /// behavior are known to potentially read or write from objects pointed
+ /// behavior are known to potentially read or write from objects pointed
/// to be their pointer-typed arguments (with arbitrary offsets).
///
static bool doesAccessArgPointees(ModRefBehavior MRB) {
@@ -568,6 +568,11 @@ bool isNoAliasCall(const Value *V);
///
bool isIdentifiedObject(const Value *V);
+/// isKnownNonNull - Return true if this pointer couldn't possibly be null by
+/// its definition. This returns true for allocas, non-extern-weak globals and
+/// byval arguments.
+bool isKnownNonNull(const Value *V);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
index c4ebe40..95626d6 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -264,6 +264,7 @@ private:
}
void setVolatile() { Volatile = true; }
+public:
/// aliasesPointer - Return true if the specified pointer "may" (or must)
/// alias one of the members in the set.
///
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
index 0fb2bd7..6f2ccfb 100644
--- a/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyImpl.h
@@ -24,7 +24,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <vector>
-#include <sstream>
#include <string>
namespace llvm {
@@ -41,7 +40,7 @@ class MachineBlockFrequencyInfo;
template<class BlockT, class FunctionT, class BlockProbInfoT>
class BlockFrequencyImpl {
- DenseMap<BlockT *, BlockFrequency> Freqs;
+ DenseMap<const BlockT *, BlockFrequency> Freqs;
BlockProbInfoT *BPI;
@@ -52,15 +51,16 @@ class BlockFrequencyImpl {
const uint32_t EntryFreq;
std::string getBlockName(BasicBlock *BB) const {
- return BB->getNameStr();
+ return BB->getName().str();
}
std::string getBlockName(MachineBasicBlock *MBB) const {
- std::stringstream ss;
+ std::string str;
+ raw_string_ostream ss(str);
ss << "BB#" << MBB->getNumber();
if (const BasicBlock *BB = MBB->getBasicBlock())
- ss << " derived from LLVM BB " << BB->getNameStr();
+ ss << " derived from LLVM BB " << BB->getName();
return ss.str();
}
@@ -308,8 +308,9 @@ class BlockFrequencyImpl {
public:
/// getBlockFreq - Return block frequency. Return 0 if we don't have it.
- BlockFrequency getBlockFreq(BlockT *BB) const {
- typename DenseMap<BlockT *, BlockFrequency>::const_iterator I = Freqs.find(BB);
+ BlockFrequency getBlockFreq(const BlockT *BB) const {
+ typename DenseMap<const BlockT *, BlockFrequency>::const_iterator
+ I = Freqs.find(BB);
if (I != Freqs.end())
return I->second;
return 0;
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
index 9e698a9..fcab906 100644
--- a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -47,7 +47,7 @@ public:
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
///
- BlockFrequency getBlockFreq(BasicBlock *BB) const;
+ BlockFrequency getBlockFreq(const BasicBlock *BB) const;
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
index a2c12ab..2ced796 100644
--- a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -17,29 +17,23 @@
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/BranchProbability.h"
namespace llvm {
-
+class LoopInfo;
class raw_ostream;
+/// \brief Analysis pass providing branch probability information.
+///
+/// This is a function analysis pass which provides information on the relative
+/// probabilities of each "edge" in the function's CFG where such an edge is
+/// defined by a pair of basic blocks. The probability for a given block and
+/// a successor block are always relative to the probabilities of the other
+/// successor blocks. Another way of looking at it is that the probabilities
+/// for a given block B and each of its successors should sum to exactly
+/// one (100%).
class BranchProbabilityInfo : public FunctionPass {
-
- // Default weight value. Used when we don't have information about the edge.
- // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
- // the successors have a weight yet. But it doesn't make sense when providing
- // weight to an edge that may have siblings with non-zero weights. This can
- // be handled various ways, but it's probably fine for an edge with unknown
- // weight to just "inherit" the non-zero weight of an adjacent successor.
- static const uint32_t DEFAULT_WEIGHT = 16;
-
- typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
-
- DenseMap<Edge, uint32_t> Weights;
-
- // Get sum of the block successors' weights.
- uint32_t getSumForBlock(const BasicBlock *BB) const;
-
public:
static char ID;
@@ -48,34 +42,86 @@ public:
}
void getAnalysisUsage(AnalysisUsage &AU) const;
-
bool runOnFunction(Function &F);
+ void print(raw_ostream &OS, const Module *M = 0) const;
+
+ /// \brief Get an edge's probability, relative to other out-edges of the Src.
+ ///
+ /// This routine provides access to the fractional probability between zero
+ /// (0%) and one (100%) of this edge executing, relative to other edges
+ /// leaving the 'Src' block. The returned probability is never zero, and can
+ /// only be one if the source block has only one successor.
+ BranchProbability getEdgeProbability(const BasicBlock *Src,
+ const BasicBlock *Dst) const;
+
+ /// \brief Test if an edge is hot relative to other out-edges of the Src.
+ ///
+ /// Check whether this edge out of the source block is 'hot'. We define hot
+ /// as having a relative probability >= 80%.
+ bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
- // Returned value is between 1 and UINT32_MAX. Look at
- // BranchProbabilityInfo.cpp for details.
+ /// \brief Retrieve the hot successor of a block if one exists.
+ ///
+ /// Given a basic block, look through its successors and if one exists for
+ /// which \see isEdgeHot would return true, return that successor block.
+ BasicBlock *getHotSucc(BasicBlock *BB) const;
+
+ /// \brief Print an edge's probability.
+ ///
+ /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
+ /// then prints that probability to the provided stream. That stream is then
+ /// returned.
+ raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
+ const BasicBlock *Dst) const;
+
+ /// \brief Get the raw edge weight calculated for the block pair.
+ ///
+ /// This returns the raw edge weight. It is guaranteed to fall between 1 and
+ /// UINT32_MAX. Note that the raw edge weight is not meaningful in isolation.
+ /// This interface should be very carefully, and primarily by routines that
+ /// are updating the analysis by later calling setEdgeWeight.
uint32_t getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const;
- // Look at BranchProbabilityInfo.cpp for details. Use it with caution!
+ /// \brief Set the raw edge weight for the block pair.
+ ///
+ /// This allows a pass to explicitly set the edge weight for a block. It can
+ /// be used when updating the CFG to update and preserve the branch
+ /// probability information. Read the implementation of how these edge
+ /// weights are calculated carefully before using!
void setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst,
uint32_t Weight);
- // A 'Hot' edge is an edge which probability is >= 80%.
- bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
+private:
+ typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
- // Return a hot successor for the block BB or null if there isn't one.
- BasicBlock *getHotSucc(BasicBlock *BB) const;
+ // Default weight value. Used when we don't have information about the edge.
+ // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+ // the successors have a weight yet. But it doesn't make sense when providing
+ // weight to an edge that may have siblings with non-zero weights. This can
+ // be handled various ways, but it's probably fine for an edge with unknown
+ // weight to just "inherit" the non-zero weight of an adjacent successor.
+ static const uint32_t DEFAULT_WEIGHT = 16;
- // Return a probability as a fraction between 0 (0% probability) and
- // 1 (100% probability), however the value is never equal to 0, and can be 1
- // only iff SRC block has only one successor.
- BranchProbability getEdgeProbability(const BasicBlock *Src,
- const BasicBlock *Dst) const;
+ DenseMap<Edge, uint32_t> Weights;
+
+ /// \brief Handle to the LoopInfo analysis.
+ LoopInfo *LI;
+
+ /// \brief Track the last function we run over for printing.
+ Function *LastF;
+
+ /// \brief Track the set of blocks directly succeeded by a returning block.
+ SmallPtrSet<BasicBlock *, 16> PostDominatedByUnreachable;
+
+ /// \brief Get sum of the block successors' weights.
+ uint32_t getSumForBlock(const BasicBlock *BB) const;
- // Print value between 0 (0% probability) and 1 (100% probability),
- // however the value is never equal to 0, and can be 1 only iff SRC block
- // has only one successor.
- raw_ostream &printEdgeProbability(raw_ostream &OS, BasicBlock *Src,
- BasicBlock *Dst) const;
+ bool calcUnreachableHeuristics(BasicBlock *BB);
+ bool calcMetadataWeights(BasicBlock *BB);
+ bool calcPointerHeuristics(BasicBlock *BB);
+ bool calcLoopBranchHeuristics(BasicBlock *BB);
+ bool calcZeroHeuristics(BasicBlock *BB);
+ bool calcFloatingPointHeuristics(BasicBlock *BB);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
index 61614e3..4704a92 100644
--- a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -29,13 +29,13 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const Function *F) {
- return "CFG for '" + F->getNameStr() + "' function";
+ return "CFG for '" + F->getName().str() + "' function";
}
static std::string getSimpleNodeLabel(const BasicBlock *Node,
- const Function *Graph) {
+ const Function *) {
if (!Node->getName().empty())
- return Node->getNameStr();
+ return Node->getName().str();
std::string Str;
raw_string_ostream OS(Str);
@@ -45,7 +45,7 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
}
static std::string getCompleteNodeLabel(const BasicBlock *Node,
- const Function *Graph) {
+ const Function *) {
std::string Str;
raw_string_ostream OS(Str);
@@ -95,7 +95,9 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
std::string Str;
raw_string_ostream OS(Str);
- OS << SI->getCaseValue(SuccNo)->getValue();
+ SwitchInst::ConstCaseIt Case =
+ SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
+ OS << Case.getCaseValue()->getValue();
return OS.str();
}
return "";
diff --git a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
index b3390f4..9b5e842 100644
--- a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -14,9 +14,12 @@
#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
#define LLVM_ANALYSIS_CAPTURETRACKING_H
-namespace llvm {
- class Value;
+#include "llvm/Constants.h"
+#include "llvm/Instructions.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Support/CallSite.h"
+namespace llvm {
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
@@ -28,6 +31,33 @@ namespace llvm {
bool ReturnCaptures,
bool StoreCaptures);
+ /// This callback is used in conjunction with PointerMayBeCaptured. In
+ /// addition to the interface here, you'll need to provide your own getters
+ /// to see whether anything was captured.
+ struct CaptureTracker {
+ virtual ~CaptureTracker();
+
+ /// tooManyUses - The depth of traversal has breached a limit. There may be
+ /// capturing instructions that will not be passed into captured().
+ virtual void tooManyUses() = 0;
+
+ /// shouldExplore - This is the use of a value derived from the pointer.
+ /// To prune the search (ie., assume that none of its users could possibly
+ /// capture) return false. To search it, return true.
+ ///
+ /// U->getUser() is always an Instruction.
+ virtual bool shouldExplore(Use *U) = 0;
+
+ /// captured - Information about the pointer was captured by the user of
+ /// use U. Return true to stop the traversal or false to continue looking
+ /// for more capturing instructions.
+ virtual bool captured(Use *U) = 0;
+ };
+
+ /// PointerMayBeCaptured - Visit the value and the values derived from it and
+ /// find values which appear to be capturing the pointer value. This feeds
+ /// results into and is controlled by the CaptureTracker object.
+ void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker);
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
index d96dd82..7116078 100644
--- a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -1,4 +1,4 @@
-//===- CodeMetrics.h - Measures the weight of a function---------*- C++ -*-===//
+//===- CodeMetrics.h - Code cost measurements -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,80 +18,75 @@
#include "llvm/ADT/DenseMap.h"
namespace llvm {
-
+ class BasicBlock;
+ class Function;
+ class Instruction;
class TargetData;
+ class Value;
- // CodeMetrics - Calculate size and a few similar metrics for a set of
- // basic blocks.
- struct CodeMetrics {
- /// NeverInline - True if this callee should never be inlined into a
- /// caller.
- // bool NeverInline;
+ /// \brief Check whether an instruction is likely to be "free" when lowered.
+ bool isInstructionFree(const Instruction *I, const TargetData *TD = 0);
+
+ /// \brief Check whether a call will lower to something small.
+ ///
+ /// This tests checks whether calls to this function will lower to something
+ /// significantly cheaper than a traditional call, often a single
+ /// instruction.
+ bool callIsSmall(const Function *F);
- // True if this function contains a call to setjmp or _setjmp
- bool callsSetJmp;
+ /// \brief Utility to calculate the size and a few similar metrics for a set
+ /// of basic blocks.
+ struct CodeMetrics {
+ /// \brief True if this function contains a call to setjmp or other functions
+ /// with attribute "returns twice" without having the attribute itself.
+ bool exposesReturnsTwice;
- // True if this function calls itself
+ /// \brief True if this function calls itself.
bool isRecursive;
- // True if this function contains one or more indirect branches
+ /// \brief True if this function contains one or more indirect branches.
bool containsIndirectBr;
- /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
+ /// \brief True if this function calls alloca (in the C sense).
bool usesDynamicAlloca;
- /// NumInsts, NumBlocks - Keep track of how large each function is, which
- /// is used to estimate the code size cost of inlining it.
- unsigned NumInsts, NumBlocks;
+ /// \brief Number of instructions in the analyzed blocks.
+ unsigned NumInsts;
+
+ /// \brief Number of analyzed blocks.
+ unsigned NumBlocks;
- /// NumBBInsts - Keeps track of basic block code size estimates.
+ /// \brief Keeps track of basic block code size estimates.
DenseMap<const BasicBlock *, unsigned> NumBBInsts;
- /// NumCalls - Keep track of the number of calls to 'big' functions.
+ /// \brief Keep track of the number of calls to 'big' functions.
unsigned NumCalls;
- /// NumInlineCandidates - Keep track of the number of calls to internal
- /// functions with only a single caller. These are likely targets for
- /// future inlining, likely exposed by interleaved devirtualization.
+ /// \brief The number of calls to internal functions with a single caller.
+ ///
+ /// These are likely targets for future inlining, likely exposed by
+ /// interleaved devirtualization.
unsigned NumInlineCandidates;
- /// NumVectorInsts - Keep track of how many instructions produce vector
- /// values. The inliner is being more aggressive with inlining vector
- /// kernels.
+ /// \brief How many instructions produce vector values.
+ ///
+ /// The inliner is more aggressive with inlining vector kernels.
unsigned NumVectorInsts;
- /// NumRets - Keep track of how many Ret instructions the block contains.
+ /// \brief How many 'ret' instructions the blocks contain.
unsigned NumRets;
- CodeMetrics() : callsSetJmp(false), isRecursive(false),
+ CodeMetrics() : exposesReturnsTwice(false), isRecursive(false),
containsIndirectBr(false), usesDynamicAlloca(false),
NumInsts(0), NumBlocks(0), NumCalls(0),
NumInlineCandidates(0), NumVectorInsts(0),
NumRets(0) {}
- /// analyzeBasicBlock - Add information about the specified basic block
- /// to the current structure.
+ /// \brief Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetData *TD = 0);
- /// analyzeFunction - Add information about the specified function
- /// to the current structure.
+ /// \brief Add information about a function to the current state.
void analyzeFunction(Function *F, const TargetData *TD = 0);
-
- /// CountCodeReductionForConstant - Figure out an approximation for how
- /// many instructions will be constant folded if the specified value is
- /// constant.
- unsigned CountCodeReductionForConstant(Value *V);
-
- /// CountBonusForConstant - Figure out an approximation for how much
- /// per-call performance boost we can expect if the specified value is
- /// constant.
- unsigned CountBonusForConstant(Value *V);
-
- /// CountCodeReductionForAlloca - Figure out an approximation of how much
- /// smaller the function will be if it is inlined into a context where an
- /// argument becomes an alloca.
- ///
- unsigned CountCodeReductionForAlloca(Value *V);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
index 05018fa..2fdef5f 100644
--- a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -25,6 +25,7 @@ namespace llvm {
class ConstantExpr;
class Instruction;
class TargetData;
+ class TargetLibraryInfo;
class Function;
class Type;
template<typename T>
@@ -35,13 +36,15 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
-Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0);
+Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands. If successful, the constant result is returned, if not,
@@ -51,7 +54,8 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
@@ -59,7 +63,8 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS,
- const TargetData *TD = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices. The constant result is
@@ -76,15 +81,22 @@ Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0);
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE);
-
+
+/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
+/// indices (with an *implied* zero pointer index that is not in the list),
+/// return the constant value being addressed by a virtual load, or null if
+/// something is funny and we can't decide.
+Constant *ConstantFoldLoadThroughGEPIndices(Constant *C,
+ ArrayRef<Constant*> Indices);
+
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.
bool canConstantFoldCallTo(const Function *F);
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
-Constant *
-ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands);
+Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI = 0);
}
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DIBuilder.h b/contrib/llvm/include/llvm/Analysis/DIBuilder.h
index ee24226..2d109cd 100644
--- a/contrib/llvm/include/llvm/Analysis/DIBuilder.h
+++ b/contrib/llvm/include/llvm/Analysis/DIBuilder.h
@@ -42,6 +42,7 @@ namespace llvm {
class DISubprogram;
class DITemplateTypeParameter;
class DITemplateValueParameter;
+ class DIObjCProperty;
class DIBuilder {
private:
@@ -190,6 +191,39 @@ namespace llvm {
StringRef PropertySetterName = StringRef(),
unsigned PropertyAttributes = 0);
+ /// createObjCIVar - Create debugging information entry for Objective-C
+ /// instance variable.
+ /// @param Name Member name.
+ /// @param File File where this member is defined.
+ /// @param LineNo Line number.
+ /// @param SizeInBits Member size.
+ /// @param AlignInBits Member alignment.
+ /// @param OffsetInBits Member offset.
+ /// @param Flags Flags to encode member attribute, e.g. private
+ /// @param Ty Parent type.
+ /// @param Property Property associated with this ivar.
+ DIType createObjCIVar(StringRef Name, DIFile File,
+ unsigned LineNo, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DIType Ty,
+ MDNode *PropertyNode);
+
+ /// createObjCProperty - Create debugging information entry for Objective-C
+ /// property.
+ /// @param Name Property name.
+ /// @param File File where this property is defined.
+ /// @param LineNumber Line number.
+ /// @param GetterName Name of the Objective C property getter selector.
+ /// @param SetterName Name of the Objective C property setter selector.
+ /// @param PropertyAttributes Objective C property attributes.
+ /// @param Ty Type.
+ DIObjCProperty createObjCProperty(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes,
+ DIType Ty);
+
/// createClassType - Create debugging information entry for a class.
/// @param Scope Scope in which this class is defined.
/// @param Name class name.
@@ -313,6 +347,10 @@ namespace llvm {
DIType createTemporaryType();
DIType createTemporaryType(DIFile F);
+ /// createForwardDecl - Create a temporary forward-declared type.
+ DIType createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
+ unsigned Line, unsigned RuntimeLang = 0);
+
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.
void retainType(DIType T);
@@ -407,6 +445,7 @@ namespace llvm {
/// @param Ty Function type.
/// @param isLocalToUnit True if this function is not externally visible..
/// @param isDefinition True if this is a function definition.
+ /// @param ScopeLine Set to the beginning of the scope this starts
/// @param Flags e.g. is this function prototyped or not.
/// This flags are used to emit dwarf attributes.
/// @param isOptimized True if optimization is ON.
@@ -417,6 +456,7 @@ namespace llvm {
DIFile File, unsigned LineNo,
DIType Ty, bool isLocalToUnit,
bool isDefinition,
+ unsigned ScopeLine,
unsigned Flags = 0,
bool isOptimized = false,
Function *Fn = 0,
@@ -470,7 +510,7 @@ namespace llvm {
/// @param Scope Lexical block.
/// @param File Source file.
DILexicalBlockFile createLexicalBlockFile(DIDescriptor Scope,
- DIFile File);
+ DIFile File);
/// createLexicalBlock - This creates a descriptor for a lexical block
/// with the specified parent context.
diff --git a/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
index 30741c4..b701b8f 100644
--- a/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -31,7 +31,7 @@ struct DOTGraphTraitsViewer : public FunctionPass {
std::string Title, GraphName;
Graph = &getAnalysis<Analysis>();
GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph);
- Title = GraphName + " for '" + F.getNameStr() + "' function";
+ Title = GraphName + " for '" + F.getName().str() + "' function";
ViewGraph(Graph, Name, Simple, Title);
return false;
@@ -55,7 +55,7 @@ struct DOTGraphTraitsPrinter : public FunctionPass {
virtual bool runOnFunction(Function &F) {
Analysis *Graph;
- std::string Filename = Name + "." + F.getNameStr() + ".dot";
+ std::string Filename = Name + "." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
@@ -64,7 +64,7 @@ struct DOTGraphTraitsPrinter : public FunctionPass {
std::string Title, GraphName;
GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph);
- Title = GraphName + " for '" + F.getNameStr() + "' function";
+ Title = GraphName + " for '" + F.getName().str() + "' function";
if (ErrorInfo.empty())
WriteGraph(File, Graph, Simple, Title);
diff --git a/contrib/llvm/include/llvm/Analysis/DebugInfo.h b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
index 9a53c4d..894c542 100644
--- a/contrib/llvm/include/llvm/Analysis/DebugInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
@@ -43,6 +43,7 @@ namespace llvm {
class DILexicalBlockFile;
class DIVariable;
class DIType;
+ class DIObjCProperty;
/// DIDescriptor - A thin wraper around MDNode to access encoded debug info.
/// This should not be stored in a container, because underly MDNode may
@@ -128,6 +129,7 @@ namespace llvm {
bool isUnspecifiedParameter() const;
bool isTemplateTypeParameter() const;
bool isTemplateValueParameter() const;
+ bool isObjCProperty() const;
};
/// DISubrange - This is used to represent ranges, for array bounds.
@@ -135,8 +137,8 @@ namespace llvm {
public:
explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
- int64_t getLo() const { return (int64_t)getUInt64Field(1); }
- int64_t getHi() const { return (int64_t)getUInt64Field(2); }
+ uint64_t getLo() const { return getUInt64Field(1); }
+ uint64_t getHi() const { return getUInt64Field(2); }
};
/// DIArray - This descriptor holds an array of descriptors.
@@ -153,6 +155,7 @@ namespace llvm {
/// DIScope - A base class for various scopes.
class DIScope : public DIDescriptor {
+ virtual void anchor();
public:
explicit DIScope(const MDNode *N = 0) : DIDescriptor (N) {}
virtual ~DIScope() {}
@@ -163,6 +166,7 @@ namespace llvm {
/// DICompileUnit - A wrapper for a compile unit.
class DICompileUnit : public DIScope {
+ virtual void anchor();
public:
explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
@@ -202,6 +206,7 @@ namespace llvm {
/// DIFile - This is a wrapper for a file.
class DIFile : public DIScope {
+ virtual void anchor();
public:
explicit DIFile(const MDNode *N = 0) : DIScope(N) {
if (DbgNode && !isFile())
@@ -230,7 +235,7 @@ namespace llvm {
/// FIXME: Types should be factored much better so that CV qualifiers and
/// others do not require a huge and empty descriptor full of zeros.
class DIType : public DIScope {
- public:
+ virtual void anchor();
protected:
// This ctor is used when the Tag has already been validated by a derived
// ctor.
@@ -240,7 +245,6 @@ namespace llvm {
/// Verify - Verify that a type descriptor is well formed.
bool Verify() const;
- public:
explicit DIType(const MDNode *N);
explicit DIType() {}
virtual ~DIType() {}
@@ -320,6 +324,7 @@ namespace llvm {
/// DIBasicType - A basic type, like 'int' or 'float'.
class DIBasicType : public DIType {
+ virtual void anchor();
public:
explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
@@ -338,6 +343,7 @@ namespace llvm {
/// DIDerivedType - A simple derived type, like a const qualified type,
/// a typedef, a pointer or reference, etc.
class DIDerivedType : public DIType {
+ virtual void anchor();
protected:
explicit DIDerivedType(const MDNode *N, bool, bool)
: DIType(N, true, true) {}
@@ -351,29 +357,45 @@ namespace llvm {
/// return base type size.
uint64_t getOriginalTypeSize() const;
- StringRef getObjCPropertyName() const { return getStringField(10); }
+ /// getObjCProperty - Return property node, if this ivar is
+ /// associated with one.
+ MDNode *getObjCProperty() const;
+
+ StringRef getObjCPropertyName() const {
+ if (getVersion() > LLVMDebugVersion11)
+ return StringRef();
+ return getStringField(10);
+ }
StringRef getObjCPropertyGetterName() const {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return getStringField(11);
}
StringRef getObjCPropertySetterName() const {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return getStringField(12);
}
bool isReadOnlyObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_readonly) != 0;
}
bool isReadWriteObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_readwrite) != 0;
}
bool isAssignObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_assign) != 0;
}
bool isRetainObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_retain) != 0;
}
bool isCopyObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_copy) != 0;
}
bool isNonAtomicObjCProperty() {
+ assert (getVersion() <= LLVMDebugVersion11 && "Invalid Request");
return (getUnsignedField(13) & dwarf::DW_APPLE_PROPERTY_nonatomic) != 0;
}
@@ -391,6 +413,7 @@ namespace llvm {
/// other types, like a function or struct.
/// FIXME: Why is this a DIDerivedType??
class DICompositeType : public DIDerivedType {
+ virtual void anchor();
public:
explicit DICompositeType(const MDNode *N = 0)
: DIDerivedType(N, true, true) {
@@ -454,6 +477,7 @@ namespace llvm {
/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
class DISubprogram : public DIScope {
+ virtual void anchor();
public:
explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
@@ -495,6 +519,7 @@ namespace llvm {
DICompositeType getContainingType() const {
return getFieldAs<DICompositeType>(13);
}
+
unsigned isArtificial() const {
if (getVersion() <= llvm::LLVMDebugVersion8)
return getUnsignedField(14);
@@ -543,6 +568,11 @@ namespace llvm {
return getFieldAs<DIFile>(6).getDirectory();
}
+ /// getScopeLineNumber - Get the beginning of the scope of the
+ /// function, not necessarily where the name of the program
+ /// starts.
+ unsigned getScopeLineNumber() const { return getUnsignedField(20); }
+
/// Verify - Verify that a subprogram descriptor is well formed.
bool Verify() const;
@@ -621,7 +651,7 @@ namespace llvm {
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
- DICompileUnit getCompileUnit() const{
+ DICompileUnit getCompileUnit() const {
assert (getVersion() <= LLVMDebugVersion10 && "Invalid getCompileUnit!");
if (getVersion() == llvm::LLVMDebugVersion7)
return getFieldAs<DICompileUnit>(3);
@@ -687,6 +717,7 @@ namespace llvm {
/// DILexicalBlock - This is a wrapper for a lexical block.
class DILexicalBlock : public DIScope {
+ virtual void anchor();
public:
explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@@ -705,6 +736,7 @@ namespace llvm {
/// DILexicalBlockFile - This is a wrapper for a lexical block with
/// a filename change.
class DILexicalBlockFile : public DIScope {
+ virtual void anchor();
public:
explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getScope().getContext(); }
@@ -724,6 +756,7 @@ namespace llvm {
/// DINameSpace - A wrapper for a C++ style name space.
class DINameSpace : public DIScope {
+ virtual void anchor();
public:
explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@@ -760,6 +793,51 @@ namespace llvm {
bool Verify() const;
};
+ class DIObjCProperty : public DIDescriptor {
+ public:
+ explicit DIObjCProperty(const MDNode *N) : DIDescriptor(N) { }
+
+ StringRef getObjCPropertyName() const { return getStringField(1); }
+ DIFile getFile() const { return getFieldAs<DIFile>(2); }
+ unsigned getLineNumber() const { return getUnsignedField(3); }
+
+ StringRef getObjCPropertyGetterName() const {
+ return getStringField(4);
+ }
+ StringRef getObjCPropertySetterName() const {
+ return getStringField(5);
+ }
+ bool isReadOnlyObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_readonly) != 0;
+ }
+ bool isReadWriteObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_readwrite) != 0;
+ }
+ bool isAssignObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_assign) != 0;
+ }
+ bool isRetainObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_retain) != 0;
+ }
+ bool isCopyObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_copy) != 0;
+ }
+ bool isNonAtomicObjCProperty() {
+ return (getUnsignedField(6) & dwarf::DW_APPLE_PROPERTY_nonatomic) != 0;
+ }
+
+ DIType getType() const { return getFieldAs<DIType>(7); }
+
+ /// Verify - Verify that a derived type descriptor is well formed.
+ bool Verify() const;
+
+ /// print - print derived type.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print derived type to dbgs() with a newline.
+ void dump() const;
+ };
+
/// getDISubprogram - Find subprogram that is enclosing this scope.
DISubprogram getDISubprogram(const MDNode *Scope);
@@ -816,7 +894,7 @@ namespace llvm {
/// addGlobalVariable - Add global variable into GVs.
bool addGlobalVariable(DIGlobalVariable DIG);
- // addSubprogram - Add subprgoram into SPs.
+ // addSubprogram - Add subprogram into SPs.
bool addSubprogram(DISubprogram SP);
/// addType - Add type into Tys.
diff --git a/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
index d7f74af..a2e0675 100644
--- a/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
+++ b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
@@ -154,6 +154,7 @@ public:
/// used to compute a forward dominator frontiers.
///
class DominanceFrontier : public DominanceFrontierBase {
+ virtual void anchor();
public:
static char ID; // Pass ID, replacement for typeid
DominanceFrontier() :
diff --git a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
index ae552b0..0c29236 100644
--- a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
+++ b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
@@ -171,7 +171,7 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// it might be that some blocks did not get a DFS number (e.g., blocks of
// infinite loops). In these cases an artificial exit node is required.
- MultipleRoots |= (DT.isPostDominator() && N != F.size());
+ MultipleRoots |= (DT.isPostDominator() && N != GraphTraits<FuncT*>::size(&F));
// When naively implemented, the Lengauer-Tarjan algorithm requires a separate
// bucket for each vertex. However, this is unnecessary, because each vertex
diff --git a/contrib/llvm/include/llvm/Analysis/Dominators.h b/contrib/llvm/include/llvm/Analysis/Dominators.h
index 230e83d..6e8e4246 100644
--- a/contrib/llvm/include/llvm/Analysis/Dominators.h
+++ b/contrib/llvm/include/llvm/Analysis/Dominators.h
@@ -185,6 +185,18 @@ void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType>& DT,
template<class NodeT>
class DominatorTreeBase : public DominatorBase<NodeT> {
+ bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
+ const DomTreeNodeBase<NodeT> *B) const {
+ assert(A != B);
+ assert(isReachableFromEntry(B));
+ assert(isReachableFromEntry(A));
+
+ const DomTreeNodeBase<NodeT> *IDom;
+ while ((IDom = B->getIDom()) != 0 && IDom != A && IDom != B)
+ B = IDom; // Walk up the tree
+ return IDom != 0;
+ }
+
protected:
typedef DenseMap<NodeT*, DomTreeNodeBase<NodeT>*> DomTreeNodeMapType;
DomTreeNodeMapType DomTreeNodes;
@@ -321,8 +333,7 @@ public:
/// block. This is the same as using operator[] on this class.
///
inline DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const {
- typename DomTreeNodeMapType::const_iterator I = DomTreeNodes.find(BB);
- return I != DomTreeNodes.end() ? I->second : 0;
+ return DomTreeNodes.lookup(BB);
}
/// getRootNode - This returns the entry node for the CFG of the function. If
@@ -339,38 +350,26 @@ public:
/// Note that this is not a constant time operation!
///
bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
- const DomTreeNodeBase<NodeT> *B) const {
- if (A == 0 || B == 0) return false;
- return dominatedBySlowTreeWalk(A, B);
- }
-
- inline bool properlyDominates(const NodeT *A, const NodeT *B) {
+ const DomTreeNodeBase<NodeT> *B) {
+ if (A == 0 || B == 0)
+ return false;
if (A == B)
return false;
-
- // Cast away the const qualifiers here. This is ok since
- // this function doesn't actually return the values returned
- // from getNode.
- return properlyDominates(getNode(const_cast<NodeT *>(A)),
- getNode(const_cast<NodeT *>(B)));
- }
-
- bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
- const DomTreeNodeBase<NodeT> *B) const {
- const DomTreeNodeBase<NodeT> *IDom;
- if (A == 0 || B == 0) return false;
- while ((IDom = B->getIDom()) != 0 && IDom != A && IDom != B)
- B = IDom; // Walk up the tree
- return IDom != 0;
+ return dominates(A, B);
}
+ bool properlyDominates(const NodeT *A, const NodeT *B);
/// isReachableFromEntry - Return true if A is dominated by the entry
/// block of the function containing it.
- bool isReachableFromEntry(const NodeT* A) {
+ bool isReachableFromEntry(const NodeT* A) const {
assert(!this->isPostDominator() &&
"This is not implemented for post dominators");
- return dominates(&A->getParent()->front(), A);
+ return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
+ }
+
+ inline bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const {
+ return A;
}
/// dominates - Returns true iff A dominates B. Note that this is not a
@@ -378,10 +377,16 @@ public:
///
inline bool dominates(const DomTreeNodeBase<NodeT> *A,
const DomTreeNodeBase<NodeT> *B) {
+ // A node trivially dominates itself.
if (B == A)
- return true; // A node trivially dominates itself.
+ return true;
- if (A == 0 || B == 0)
+ // An unreachable node is dominated by anything.
+ if (!isReachableFromEntry(B))
+ return true;
+
+ // And dominates nothing.
+ if (!isReachableFromEntry(A))
return false;
// Compare the result of the tree walk and the dfs numbers, if expensive
@@ -406,16 +411,7 @@ public:
return dominatedBySlowTreeWalk(A, B);
}
- inline bool dominates(const NodeT *A, const NodeT *B) {
- if (A == B)
- return true;
-
- // Cast away the const qualifiers here. This is ok since
- // this function doesn't actually return the values returned
- // from getNode.
- return dominates(getNode(const_cast<NodeT *>(A)),
- getNode(const_cast<NodeT *>(B)));
- }
+ bool dominates(const NodeT *A, const NodeT *B);
NodeT *getRoot() const {
assert(this->Roots.size() == 1 && "Should always have entry node!");
@@ -623,9 +619,8 @@ protected:
}
DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) {
- typename DomTreeNodeMapType::iterator I = this->DomTreeNodes.find(BB);
- if (I != this->DomTreeNodes.end() && I->second)
- return I->second;
+ if (DomTreeNodeBase<NodeT> *Node = getNode(BB))
+ return Node;
// Haven't calculated this node yet? Get or calculate the node for the
// immediate dominator.
@@ -641,8 +636,7 @@ protected:
}
inline NodeT *getIDom(NodeT *BB) const {
- typename DenseMap<NodeT*, NodeT*>::const_iterator I = IDoms.find(BB);
- return I != IDoms.end() ? I->second : 0;
+ return IDoms.lookup(BB);
}
inline void addRoot(NodeT* BB) {
@@ -653,21 +647,24 @@ public:
/// recalculate - compute a dominator tree for the given function
template<class FT>
void recalculate(FT& F) {
+ typedef GraphTraits<FT*> TraitsTy;
reset();
this->Vertex.push_back(0);
if (!this->IsPostDominators) {
// Initialize root
- this->Roots.push_back(&F.front());
- this->IDoms[&F.front()] = 0;
- this->DomTreeNodes[&F.front()] = 0;
+ NodeT *entry = TraitsTy::getEntryNode(&F);
+ this->Roots.push_back(entry);
+ this->IDoms[entry] = 0;
+ this->DomTreeNodes[entry] = 0;
Calculate<FT, NodeT*>(*this, F);
} else {
// Initialize the roots list
- for (typename FT::iterator I = F.begin(), E = F.end(); I != E; ++I) {
- if (std::distance(GraphTraits<FT*>::child_begin(I),
- GraphTraits<FT*>::child_end(I)) == 0)
+ for (typename TraitsTy::nodes_iterator I = TraitsTy::nodes_begin(&F),
+ E = TraitsTy::nodes_end(&F); I != E; ++I) {
+ if (std::distance(TraitsTy::child_begin(I),
+ TraitsTy::child_end(I)) == 0)
addRoot(I);
// Prepopulate maps so that we don't get iterator invalidation issues later.
@@ -680,6 +677,32 @@ public:
}
};
+// These two functions are declared out of line as a workaround for building
+// with old (< r147295) versions of clang because of pr11642.
+template<class NodeT>
+bool DominatorTreeBase<NodeT>::dominates(const NodeT *A, const NodeT *B) {
+ if (A == B)
+ return true;
+
+ // Cast away the const qualifiers here. This is ok since
+ // this function doesn't actually return the values returned
+ // from getNode.
+ return dominates(getNode(const_cast<NodeT *>(A)),
+ getNode(const_cast<NodeT *>(B)));
+}
+template<class NodeT>
+bool
+DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A, const NodeT *B) {
+ if (A == B)
+ return false;
+
+ // Cast away the const qualifiers here. This is ok since
+ // this function doesn't actually return the values returned
+ // from getNode.
+ return dominates(getNode(const_cast<NodeT *>(A)),
+ getNode(const_cast<NodeT *>(B)));
+}
+
EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>);
//===-------------------------------------
@@ -749,9 +772,12 @@ public:
return DT->dominates(A, B);
}
- // dominates - Return true if A dominates B. This performs the
- // special checks necessary if A and B are in the same basic block.
- bool dominates(const Instruction *A, const Instruction *B) const;
+ // dominates - Return true if Def dominates a use in User. This performs
+ // the special checks necessary if Def and User are in the same basic block.
+ // Note that Def doesn't dominate a use in Def itself!
+ bool dominates(const Instruction *Def, const Use &U) const;
+ bool dominates(const Instruction *Def, const Instruction *User) const;
+ bool dominates(const Instruction *Def, const BasicBlock *BB) const;
bool properlyDominates(const DomTreeNode *A, const DomTreeNode *B) const {
return DT->properlyDominates(A, B);
@@ -814,10 +840,12 @@ public:
DT->splitBlock(NewBB);
}
- bool isReachableFromEntry(const BasicBlock* A) {
+ bool isReachableFromEntry(const BasicBlock* A) const {
return DT->isReachableFromEntry(A);
}
+ bool isReachableFromEntry(const Use &U) const;
+
virtual void releaseMemory() {
DT->releaseMemory();
diff --git a/contrib/llvm/include/llvm/Analysis/IVUsers.h b/contrib/llvm/include/llvm/Analysis/IVUsers.h
index 2fb607c..2bf79b9 100644
--- a/contrib/llvm/include/llvm/Analysis/IVUsers.h
+++ b/contrib/llvm/include/llvm/Analysis/IVUsers.h
@@ -166,10 +166,16 @@ public:
const_iterator end() const { return IVUses.end(); }
bool empty() const { return IVUses.empty(); }
+ bool isIVUserOrOperand(Instruction *Inst) const {
+ return Processed.count(Inst);
+ }
+
void print(raw_ostream &OS, const Module* = 0) const;
/// dump - This method is used for debugging.
void dump() const;
+protected:
+ bool AddUsersImpl(Instruction *I, SmallPtrSet<Loop*,16> &SimpleLoopNests);
};
Pass *createIVUsersPass();
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index 36a16e6..691c2d1 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -14,171 +14,118 @@
#ifndef LLVM_ANALYSIS_INLINECOST_H
#define LLVM_ANALYSIS_INLINECOST_H
-#include <cassert>
-#include <climits>
-#include <vector>
+#include "llvm/Function.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/ValueMap.h"
#include "llvm/Analysis/CodeMetrics.h"
+#include <cassert>
+#include <climits>
+#include <vector>
namespace llvm {
- class Value;
- class Function;
- class BasicBlock;
class CallSite;
- template<class PtrType, unsigned SmallSize>
- class SmallPtrSet;
class TargetData;
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
- const int IndirectCallBonus = -100;
+ const int IndirectCallThreshold = 100;
const int CallPenalty = 25;
const int LastCallToStaticBonus = -15000;
const int ColdccPenalty = 2000;
const int NoreturnPenalty = 10000;
}
- /// InlineCost - Represent the cost of inlining a function. This
- /// supports special values for functions which should "always" or
- /// "never" be inlined. Otherwise, the cost represents a unitless
- /// amount; smaller values increase the likelihood of the function
- /// being inlined.
+ /// \brief Represents the cost of inlining a function.
+ ///
+ /// This supports special values for functions which should "always" or
+ /// "never" be inlined. Otherwise, the cost represents a unitless amount;
+ /// smaller values increase the likelihood of the function being inlined.
+ ///
+ /// Objects of this type also provide the adjusted threshold for inlining
+ /// based on the information available for a particular callsite. They can be
+ /// directly tested to determine if inlining should occur given the cost and
+ /// threshold for this cost metric.
class InlineCost {
- enum Kind {
- Value,
- Always,
- Never
+ enum SentinelValues {
+ AlwaysInlineCost = INT_MIN,
+ NeverInlineCost = INT_MAX
};
- // This is a do-it-yourself implementation of
- // int Cost : 30;
- // unsigned Type : 2;
- // We used to use bitfields, but they were sometimes miscompiled (PR3822).
- enum { TYPE_BITS = 2 };
- enum { COST_BITS = unsigned(sizeof(unsigned)) * CHAR_BIT - TYPE_BITS };
- unsigned TypedCost; // int Cost : COST_BITS; unsigned Type : TYPE_BITS;
+ /// \brief The estimated cost of inlining this callsite.
+ const int Cost;
- Kind getType() const {
- return Kind(TypedCost >> COST_BITS);
- }
+ /// \brief The adjusted threshold against which this cost was computed.
+ const int Threshold;
- int getCost() const {
- // Sign-extend the bottom COST_BITS bits.
- return (int(TypedCost << TYPE_BITS)) >> TYPE_BITS;
- }
+ // Trivial constructor, interesting logic in the factory functions below.
+ InlineCost(int Cost, int Threshold)
+ : Cost(Cost), Threshold(Threshold) {}
- InlineCost(int C, int T) {
- TypedCost = (unsigned(C << TYPE_BITS) >> TYPE_BITS) | (T << COST_BITS);
- assert(getCost() == C && "Cost exceeds InlineCost precision");
- }
public:
- static InlineCost get(int Cost) { return InlineCost(Cost, Value); }
- static InlineCost getAlways() { return InlineCost(0, Always); }
- static InlineCost getNever() { return InlineCost(0, Never); }
-
- bool isVariable() const { return getType() == Value; }
- bool isAlways() const { return getType() == Always; }
- bool isNever() const { return getType() == Never; }
-
- /// getValue() - Return a "variable" inline cost's amount. It is
- /// an error to call this on an "always" or "never" InlineCost.
- int getValue() const {
- assert(getType() == Value && "Invalid access of InlineCost");
- return getCost();
+ static InlineCost get(int Cost, int Threshold) {
+ assert(Cost > AlwaysInlineCost && "Cost crosses sentinel value");
+ assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
+ return InlineCost(Cost, Threshold);
+ }
+ static InlineCost getAlways() {
+ return InlineCost(AlwaysInlineCost, 0);
+ }
+ static InlineCost getNever() {
+ return InlineCost(NeverInlineCost, 0);
}
- };
-
- /// InlineCostAnalyzer - Cost analyzer used by inliner.
- class InlineCostAnalyzer {
- struct ArgInfo {
- public:
- unsigned ConstantWeight;
- unsigned AllocaWeight;
-
- ArgInfo(unsigned CWeight, unsigned AWeight)
- : ConstantWeight(CWeight), AllocaWeight(AWeight)
- {}
- };
-
- struct FunctionInfo {
- CodeMetrics Metrics;
- /// ArgumentWeights - Each formal argument of the function is inspected to
- /// see if it is used in any contexts where making it a constant or alloca
- /// would reduce the code size. If so, we add some value to the argument
- /// entry here.
- std::vector<ArgInfo> ArgumentWeights;
+ /// \brief Test whether the inline cost is low enough for inlining.
+ operator bool() const {
+ return Cost < Threshold;
+ }
- /// analyzeFunction - Add information about the specified function
- /// to the current structure.
- void analyzeFunction(Function *F, const TargetData *TD);
+ bool isAlways() const { return Cost == AlwaysInlineCost; }
+ bool isNever() const { return Cost == NeverInlineCost; }
+ bool isVariable() const { return !isAlways() && !isNever(); }
- /// NeverInline - Returns true if the function should never be
- /// inlined into any caller.
- bool NeverInline();
- };
+ /// \brief Get the inline cost estimate.
+ /// It is an error to call this on an "always" or "never" InlineCost.
+ int getCost() const {
+ assert(isVariable() && "Invalid access of InlineCost");
+ return Cost;
+ }
- // The Function* for a function can be changed (by ArgumentPromotion);
- // the ValueMap will update itself when this happens.
- ValueMap<const Function *, FunctionInfo> CachedFunctionInfo;
+ /// \brief Get the cost delta from the threshold for inlining.
+ /// Only valid if the cost is of the variable kind. Returns a negative
+ /// value if the cost is too high to inline.
+ int getCostDelta() const { return Threshold - getCost(); }
+ };
+ /// InlineCostAnalyzer - Cost analyzer used by inliner.
+ class InlineCostAnalyzer {
// TargetData if available, or null.
const TargetData *TD;
- int CountBonusForConstant(Value *V, Constant *C = NULL);
- int ConstantFunctionBonus(CallSite CS, Constant *C);
- int getInlineSize(CallSite CS, Function *Callee);
- int getInlineBonuses(CallSite CS, Function *Callee);
public:
InlineCostAnalyzer(): TD(0) {}
void setTargetData(const TargetData *TData) { TD = TData; }
- /// getInlineCost - The heuristic used to determine if we should inline the
- /// function call or not.
+ /// \brief Get an InlineCost object representing the cost of inlining this
+ /// callsite.
///
- InlineCost getInlineCost(CallSite CS,
- SmallPtrSet<const Function *, 16> &NeverInline);
+ /// Note that threshold is passed into this function. Only costs below the
+ /// threshold are computed with any accuracy. The threshold can be used to
+ /// bound the computation necessary to determine whether the cost is
+ /// sufficiently low to warrant inlining.
+ InlineCost getInlineCost(CallSite CS, int Threshold);
/// getCalledFunction - The heuristic used to determine if we should inline
/// the function call or not. The callee is explicitly specified, to allow
- /// you to calculate the cost of inlining a function via a pointer. The
- /// result assumes that the inlined version will always be used. You should
- /// weight it yourself in cases where this callee will not always be called.
- InlineCost getInlineCost(CallSite CS,
- Function *Callee,
- SmallPtrSet<const Function *, 16> &NeverInline);
-
- /// getSpecializationBonus - The heuristic used to determine the per-call
- /// performance boost for using a specialization of Callee with argument
- /// SpecializedArgNos replaced by a constant.
- int getSpecializationBonus(Function *Callee,
- SmallVectorImpl<unsigned> &SpecializedArgNo);
-
- /// getSpecializationCost - The heuristic used to determine the code-size
- /// impact of creating a specialized version of Callee with argument
- /// SpecializedArgNo replaced by a constant.
- InlineCost getSpecializationCost(Function *Callee,
- SmallVectorImpl<unsigned> &SpecializedArgNo);
-
- /// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
- /// higher threshold to determine if the function call should be inlined.
- float getInlineFudgeFactor(CallSite CS);
-
- /// resetCachedFunctionInfo - erase any cached cost info for this function.
- void resetCachedCostInfo(Function* Caller) {
- CachedFunctionInfo[Caller] = FunctionInfo();
- }
-
- /// growCachedCostInfo - update the cached cost info for Caller after Callee
- /// has been inlined. If Callee is NULL it means a dead call has been
- /// eliminated.
- void growCachedCostInfo(Function* Caller, Function* Callee);
-
- /// clear - empty the cache of inline costs
- void clear();
+ /// you to calculate the cost of inlining a function via a pointer. This
+ /// behaves exactly as the version with no explicit callee parameter in all
+ /// other respects.
+ //
+ // Note: This is used by out-of-tree passes, please do not remove without
+ // adding a replacement API.
+ InlineCost getInlineCost(CallSite CS, Function *Callee, int Threshold);
};
/// callIsSmall - If a call is likely to lower to a single target instruction,
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
index c1d87d3..152e885 100644
--- a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -20,147 +20,198 @@
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
namespace llvm {
+ template<typename T>
+ class ArrayRef;
class DominatorTree;
class Instruction;
- class Value;
class TargetData;
- template<typename T>
- class ArrayRef;
+ class TargetLibraryInfo;
+ class Type;
+ class Value;
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD = 0, const DominatorTree *DT=0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0,
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyGEPInst(ArrayRef<Value *> Ops,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
+ /// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
+ /// the result. If not, this returns null.
+ Value *SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
+
//=== Helper functions for higher up the class hierarchy.
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD = 0, const DominatorTree *DT = 0);
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
- /// ReplaceAndSimplifyAllUses - Perform From->replaceAllUsesWith(To) and then
- /// delete the From instruction. In addition to a basic RAUW, this does a
- /// recursive simplification of the updated instructions. This catches
- /// things where one simplification exposes other opportunities. This only
- /// simplifies and deletes scalar operations, it does not change the CFG.
+ /// \brief Replace all uses of 'I' with 'SimpleV' and simplify the uses
+ /// recursively.
///
- void ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
- const TargetData *TD = 0,
- const DominatorTree *DT = 0);
+ /// This first performs a normal RAUW of I with SimpleV. It then recursively
+ /// attempts to simplify those users updated by the operation. The 'I'
+ /// instruction must not be equal to the simplified value 'SimpleV'.
+ ///
+ /// The function returns true if any simplifications were performed.
+ bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
+
+ /// \brief Recursively attempt to simplify an instruction.
+ ///
+ /// This routine uses SimplifyInstruction to simplify 'I', and if successful
+ /// replaces uses of 'I' with the simplified value. It then recurses on each
+ /// of the users impacted. It returns true if any simplifications were
+ /// performed.
+ bool recursivelySimplifyInstruction(Instruction *I,
+ const TargetData *TD = 0,
+ const TargetLibraryInfo *TLI = 0,
+ const DominatorTree *DT = 0);
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalIterator.h b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
index 82b3294..0968c74 100644
--- a/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
+++ b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
@@ -101,14 +101,14 @@ public:
IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) {
OrigContainer = M;
if (!ProcessInterval(&M->front())) {
- assert(0 && "ProcessInterval should never fail for first interval!");
+ llvm_unreachable("ProcessInterval should never fail for first interval!");
}
}
IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) {
OrigContainer = &IP;
if (!ProcessInterval(IP.getRootInterval())) {
- assert(0 && "ProcessInterval should never fail for first interval!");
+ llvm_unreachable("ProcessInterval should never fail for first interval!");
}
}
diff --git a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
index fc4d0af..065c230 100644
--- a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -20,12 +20,14 @@
namespace llvm {
class Constant;
class TargetData;
+ class TargetLibraryInfo;
class Value;
/// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
/// information.
class LazyValueInfo : public FunctionPass {
class TargetData *TD;
+ class TargetLibraryInfo *TLI;
void *PImpl;
LazyValueInfo(const LazyValueInfo&); // DO NOT IMPLEMENT.
void operator=(const LazyValueInfo&); // DO NOT IMPLEMENT.
@@ -68,9 +70,7 @@ public:
// Implementation boilerplate.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
virtual bool runOnFunction(Function &F);
};
diff --git a/contrib/llvm/include/llvm/Analysis/Loads.h b/contrib/llvm/include/llvm/Analysis/Loads.h
index 1574262..5f0aefb 100644
--- a/contrib/llvm/include/llvm/Analysis/Loads.h
+++ b/contrib/llvm/include/llvm/Analysis/Loads.h
@@ -20,6 +20,7 @@ namespace llvm {
class AliasAnalysis;
class TargetData;
+class MDNode;
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
@@ -41,10 +42,15 @@ bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
/// MaxInstsToScan specifies the maximum instructions to scan in the block.
/// If it is set to 0, it will scan the whole block. You can also optionally
/// specify an alias analysis implementation, which makes this more precise.
+///
+/// If TBAATag is non-null and a load or store is found, the TBAA tag from the
+/// load or store is recorded there. If there is no TBAA tag or if no access
+/// is found, it is left unmodified.
Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0);
+ AliasAnalysis *AA = 0,
+ MDNode **TBAATag = 0);
}
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 12cb6c5..91feaaa 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -23,7 +23,6 @@
// * whether or not a particular block branches out of the loop
// * the successor blocks of the loop
// * the loop depth
-// * the trip count
// * etc...
//
//===----------------------------------------------------------------------===//
@@ -416,14 +415,26 @@ public:
#ifndef NDEBUG
assert(!Blocks.empty() && "Loop header is missing");
+ // Setup for using a depth-first iterator to visit every block in the loop.
+ SmallVector<BlockT*, 8> ExitBBs;
+ getExitBlocks(ExitBBs);
+ llvm::SmallPtrSet<BlockT*, 8> VisitSet;
+ VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
+ df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
+ BI = df_ext_begin(getHeader(), VisitSet),
+ BE = df_ext_end(getHeader(), VisitSet);
+
+ // Keep track of the number of BBs visited.
+ unsigned NumVisited = 0;
+
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
// Check the individual blocks.
- for (block_iterator I = block_begin(), E = block_end(); I != E; ++I) {
- BlockT *BB = *I;
+ for ( ; BI != BE; ++BI) {
+ BlockT *BB = *BI;
bool HasInsideLoopSuccs = false;
bool HasInsideLoopPreds = false;
SmallVector<BlockT *, 2> OutsideLoopPreds;
@@ -440,7 +451,7 @@ public:
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
- typename InvBlockTraits::NodeType *N = *PI;
+ BlockT *N = *PI;
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
@@ -464,8 +475,12 @@ public:
assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
assert(BB != getHeader()->getParent()->begin() &&
"Loop contains function entry block!");
+
+ NumVisited++;
}
+ assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
+
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
// Each block in each subloop should be contained within this loop.
@@ -571,37 +586,6 @@ public:
///
PHINode *getCanonicalInductionVariable() const;
- /// getTripCount - Return a loop-invariant LLVM value indicating the number of
- /// times the loop will be executed. Note that this means that the backedge
- /// of the loop executes N-1 times. If the trip-count cannot be determined,
- /// this returns null.
- ///
- /// The IndVarSimplify pass transforms loops to have a form that this
- /// function easily understands.
- ///
- Value *getTripCount() const;
-
- /// getSmallConstantTripCount - Returns the trip count of this loop as a
- /// normal unsigned value, if possible. Returns 0 if the trip count is unknown
- /// of not constant. Will also return 0 if the trip count is very large
- /// (>= 2^32)
- ///
- /// The IndVarSimplify pass transforms loops to have a form that this
- /// function easily understands.
- ///
- unsigned getSmallConstantTripCount() const;
-
- /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
- /// trip count of this loop as a normal unsigned value, if possible. This
- /// means that the actual trip count is always a multiple of the returned
- /// value (don't forget the trip count could very well be zero as well!).
- ///
- /// Returns 1 if the trip count is unknown or not guaranteed to be the
- /// multiple of a constant (which is also the case if the trip count is simply
- /// constant, use getSmallConstantTripCount for that case), Will also return 1
- /// if the trip count is very large (>= 2^32).
- unsigned getSmallConstantTripMultiple() const;
-
/// isLCSSAForm - Return true if the Loop is in LCSSA form
bool isLCSSAForm(DominatorTree &DT) const;
@@ -610,6 +594,9 @@ public:
/// normal form.
bool isLoopSimplifyForm() const;
+ /// isSafeToClone - Return true if the loop body is safe to clone in practice.
+ bool isSafeToClone() const;
+
/// hasDedicatedExits - Return true if no exit block for the loop
/// has a predecessor that is outside the loop.
bool hasDedicatedExits() const;
@@ -671,9 +658,7 @@ public:
/// block is in no loop (for example the entry node), null is returned.
///
LoopT *getLoopFor(const BlockT *BB) const {
- typename DenseMap<BlockT *, LoopT *>::const_iterator I=
- BBMap.find(const_cast<BlockT*>(BB));
- return I != BBMap.end() ? I->second : 0;
+ return BBMap.lookup(const_cast<BlockT*>(BB));
}
/// operator[] - same as getLoopFor...
@@ -712,9 +697,7 @@ public:
/// the loop hierarchy tree.
void changeLoopFor(BlockT *BB, LoopT *L) {
if (!L) {
- typename DenseMap<BlockT *, LoopT *>::iterator I = BBMap.find(BB);
- if (I != BBMap.end())
- BBMap.erase(I);
+ BBMap.erase(BB);
return;
}
BBMap[BB] = L;
@@ -771,7 +754,7 @@ public:
}
LoopT *ConsiderForLoop(BlockT *BB, DominatorTreeBase<BlockT> &DT) {
- if (BBMap.find(BB) != BBMap.end()) return 0;// Haven't processed this node?
+ if (BBMap.count(BB)) return 0; // Haven't processed this node?
std::vector<BlockT *> TodoStack;
@@ -782,7 +765,8 @@ public:
InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
I != E; ++I) {
typename InvBlockTraits::NodeType *N = *I;
- if (DT.dominates(BB, N)) // If BB dominates its predecessor...
+ // If BB dominates its predecessor...
+ if (DT.dominates(BB, N) && DT.isReachableFromEntry(N))
TodoStack.push_back(N);
}
@@ -792,14 +776,12 @@ public:
LoopT *L = new LoopT(BB);
BBMap[BB] = L;
- BlockT *EntryBlock = BB->getParent()->begin();
-
while (!TodoStack.empty()) { // Process all the nodes in the loop
BlockT *X = TodoStack.back();
TodoStack.pop_back();
if (!L->contains(X) && // As of yet unprocessed??
- DT.dominates(EntryBlock, X)) { // X is reachable from entry block?
+ DT.isReachableFromEntry(X)) {
// Check to see if this block already belongs to a loop. If this occurs
// then we have a case where a loop that is supposed to be a child of
// the current loop was processed before the current loop. When this
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
index e18d937..68ce364 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -324,6 +324,7 @@ namespace llvm {
/// Current AA implementation, just a cache.
AliasAnalysis *AA;
TargetData *TD;
+ DominatorTree *DT;
OwningPtr<PredIteratorCache> PredCache;
public:
MemoryDependenceAnalysis();
@@ -430,6 +431,9 @@ namespace llvm {
void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
+ AliasAnalysis::ModRefResult
+ getModRefInfo(const Instruction *Inst, const AliasAnalysis::Location &Loc);
+
/// verifyRemoved - Verify that the specified instruction does not occur
/// in our internal data structures.
void verifyRemoved(Instruction *Inst) const;
diff --git a/contrib/llvm/include/llvm/Analysis/PHITransAddr.h b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
index 033efba..ff9a247 100644
--- a/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
+++ b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -20,7 +20,8 @@
namespace llvm {
class DominatorTree;
class TargetData;
-
+ class TargetLibraryInfo;
+
/// PHITransAddr - An address value which tracks and handles phi translation.
/// As we walk "up" the CFG through predecessors, we need to ensure that the
/// address we're tracking is kept up to date. For example, if we're analyzing
@@ -37,11 +38,14 @@ class PHITransAddr {
/// TD - The target data we are playing with if known, otherwise null.
const TargetData *TD;
+
+ /// TLI - The target library info if known, otherwise null.
+ const TargetLibraryInfo *TLI;
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
- PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td) {
+ PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td), TLI(0) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
diff --git a/contrib/llvm/include/llvm/Analysis/ProfileInfo.h b/contrib/llvm/include/llvm/Analysis/ProfileInfo.h
index 300a027..6c2e273 100644
--- a/contrib/llvm/include/llvm/Analysis/ProfileInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/ProfileInfo.h
@@ -22,6 +22,7 @@
#define LLVM_ANALYSIS_PROFILEINFO_H
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -85,13 +86,11 @@ namespace llvm {
// getFunction() - Returns the Function for an Edge, checking for validity.
static const FType* getFunction(Edge e) {
- if (e.first) {
+ if (e.first)
return e.first->getParent();
- } else if (e.second) {
+ if (e.second)
return e.second->getParent();
- }
- assert(0 && "Invalid ProfileInfo::Edge");
- return (const FType*)0;
+ llvm_unreachable("Invalid ProfileInfo::Edge");
}
// getEdge() - Creates an Edge from two BasicBlocks.
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfo.h b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
index 9d89545..b098eea 100644
--- a/contrib/llvm/include/llvm/Analysis/RegionInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
@@ -681,7 +681,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RegionNode &Node) {
if (Node.isSubRegion())
return OS << Node.getNodeAs<Region>()->getNameStr();
else
- return OS << Node.getNodeAs<BasicBlock>()->getNameStr();
+ return OS << Node.getNodeAs<BasicBlock>()->getName();
}
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index 10d933e..72408f7 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -41,6 +41,7 @@ namespace llvm {
class Type;
class ScalarEvolution;
class TargetData;
+ class TargetLibraryInfo;
class LLVMContext;
class Loop;
class LoopInfo;
@@ -118,6 +119,10 @@ namespace llvm {
///
bool isAllOnesValue() const;
+ /// isNonConstantNegative - Return true if the specified scev is negated,
+ /// but not a constant.
+ bool isNonConstantNegative() const;
+
/// print - Print out the internal representation of this scalar to the
/// specified stream. This should really only be used for debugging
/// purposes.
@@ -135,7 +140,7 @@ namespace llvm {
ID = X.FastID;
}
static bool Equals(const SCEV &X, const FoldingSetNodeID &ID,
- FoldingSetNodeID &TempID) {
+ unsigned IDHash, FoldingSetNodeID &TempID) {
return ID == X.FastID;
}
static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
@@ -224,6 +229,10 @@ namespace llvm {
///
TargetData *TD;
+ /// TLI - The target library information for the target we are targeting.
+ ///
+ TargetLibraryInfo *TLI;
+
/// DT - The dominator tree.
///
DominatorTree *DT;
@@ -721,16 +730,21 @@ namespace llvm {
const SCEV *LHS, const SCEV *RHS);
/// getSmallConstantTripCount - Returns the maximum trip count of this loop
- /// as a normal unsigned value, if possible. Returns 0 if the trip count is
- /// unknown or not constant.
- unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitBlock);
+ /// as a normal unsigned value. Returns 0 if the trip count is unknown or
+ /// not constant. This "trip count" assumes that control exits via
+ /// ExitingBlock. More precisely, it is the number of times that control may
+ /// reach ExitingBlock before taking the branch. For loops with multiple
+ /// exits, it may not be the number times that the loop header executes if
+ /// the loop exits prematurely via another branch.
+ unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock);
/// getSmallConstantTripMultiple - Returns the largest constant divisor of
/// the trip count of this loop as a normal unsigned value, if
/// possible. This means that the actual trip count is always a multiple of
/// the returned value (don't forget the trip count could very well be zero
- /// as well!).
- unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitBlock);
+ /// as well!). As explained in the comments for getSmallConstantTripCount,
+ /// this assumes that control exits the loop via ExitingBlock.
+ unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock);
// getExitCount - Get the expression for the number of loop iterations for
// which this loop is guaranteed not to exit via ExitingBlock. Otherwise
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
index a4ad145..c22fc3a 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -22,6 +22,8 @@
#include <set>
namespace llvm {
+ class TargetLowering;
+
/// SCEVExpander - This class uses information about analyze scalars to
/// rewrite expressions in canonical form.
///
@@ -58,6 +60,9 @@ namespace llvm {
/// insert the IV increment at this position.
Instruction *IVIncInsertPos;
+ /// Phis that complete an IV chain. Reuse
+ std::set<AssertingVH<PHINode> > ChainedPhis;
+
/// CanonicalMode - When true, expressions are expanded in "canonical"
/// form. In particular, addrecs are expanded as arithmetic based on
/// a canonical induction variable. When false, expression are expanded
@@ -100,6 +105,7 @@ namespace llvm {
InsertedExpressions.clear();
InsertedValues.clear();
InsertedPostIncValues.clear();
+ ChainedPhis.clear();
}
/// getOrInsertCanonicalInductionVariable - This method returns the
@@ -108,14 +114,18 @@ namespace llvm {
/// starts at zero and steps by one on each iteration.
PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty);
- /// hoistStep - Utility for hoisting an IV increment.
- static bool hoistStep(Instruction *IncV, Instruction *InsertPos,
- const DominatorTree *DT);
+ /// getIVIncOperand - Return the induction variable increment's IV operand.
+ Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
+ bool allowScale);
+
+ /// hoistIVInc - Utility for hoisting an IV increment.
+ bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
/// replaceCongruentIVs - replace congruent phis with their most canonical
/// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts);
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ const TargetLowering *TLI = NULL);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
@@ -161,6 +171,16 @@ namespace llvm {
void clearInsertPoint() {
Builder.ClearInsertionPoint();
}
+
+ /// isInsertedInstruction - Return true if the specified instruction was
+ /// inserted by the code rewriter. If so, the client should not modify the
+ /// instruction.
+ bool isInsertedInstruction(Instruction *I) const {
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
+ }
+
+ void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
+
private:
LLVMContext &getContext() const { return SE.getContext(); }
@@ -195,13 +215,6 @@ namespace llvm {
/// result will be expanded to have that type, with a cast if necessary.
Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
- /// isInsertedInstruction - Return true if the specified instruction was
- /// inserted by the code rewriter. If so, the client should not modify the
- /// instruction.
- bool isInsertedInstruction(Instruction *I) const {
- return InsertedValues.count(I) || InsertedPostIncValues.count(I);
- }
-
/// getRelevantLoop - Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
@@ -244,6 +257,8 @@ namespace llvm {
const Loop *L,
Type *ExpandTy,
Type *IntTy);
+ Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
+ Type *ExpandTy, Type *IntTy, bool useSubtract);
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index b6f0ae5..47b3710 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -491,7 +491,6 @@ namespace llvm {
RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
- return RetVal();
}
};
}
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index 6826330..f2f9db4 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -17,15 +17,15 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DataTypes.h"
-#include <string>
namespace llvm {
- template <typename T> class SmallVectorImpl;
class Value;
class Instruction;
class APInt;
class TargetData;
-
+ class StringRef;
+ class MDNode;
+
/// ComputeMaskedBits - Determine which of the bits specified in Mask are
/// known to be either zero or one and return them in the KnownZero/KnownOne
/// bit sets. This code only analyzes bits in Mask, in order to short-circuit
@@ -36,10 +36,10 @@ namespace llvm {
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
- APInt &KnownOne, const TargetData *TD = 0,
- unsigned Depth = 0);
-
+ void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const TargetData *TD = 0, unsigned Depth = 0);
+ void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
+
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
@@ -48,8 +48,10 @@ namespace llvm {
/// isPowerOfTwo - Return true if the given value is known to have exactly one
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
- /// type and vectors of integers.
- bool isPowerOfTwo(Value *V, const TargetData *TD = 0, unsigned Depth = 0);
+ /// type and vectors of integers. If 'OrZero' is set then returns true if the
+ /// given value is either a power of two or zero.
+ bool isPowerOfTwo(Value *V, const TargetData *TD = 0, bool OrZero = false,
+ unsigned Depth = 0);
/// isKnownNonZero - Return true if the given value is known to be non-zero
/// when defined. For vectors return true if every element is known to be
@@ -123,16 +125,15 @@ namespace llvm {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
}
- /// GetConstantStringInfo - This function computes the length of a
+ /// getConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
- /// and returns the string in Str. If unsuccessful, it returns false. If
- /// StopAtNul is set to true (the default), the returned string is truncated
- /// by a nul character in the global. If StopAtNul is false, the nul
- /// character is included in the result string.
- bool GetConstantStringInfo(const Value *V, std::string &Str,
- uint64_t Offset = 0,
- bool StopAtNul = true);
-
+ /// and returns the string in Str. If unsuccessful, it returns false. This
+ /// does not include the trailing nul character by default. If TrimAtNul is
+ /// set to false, then this returns any trailing nul characters as well as any
+ /// other characters that come after it.
+ bool getConstantStringInfo(const Value *V, StringRef &Str,
+ uint64_t Offset = 0, bool TrimAtNul = true);
+
/// GetStringLength - If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
uint64_t GetStringLength(Value *V);
@@ -154,6 +155,27 @@ namespace llvm {
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
+ /// isSafeToSpeculativelyExecute - Return true if the instruction does not
+ /// have any effects besides calculating the result and does not have
+ /// undefined behavior.
+ ///
+ /// This method never returns true for an instruction that returns true for
+ /// mayHaveSideEffects; however, this method also does some other checks in
+ /// addition. It checks for undefined behavior, like dividing by zero or
+ /// loading from an invalid pointer (but not for undefined results, like a
+ /// shift with a shift amount larger than the width of the result). It checks
+ /// for malloc and alloca because speculatively executing them might cause a
+ /// memory leak. It also returns false for instructions related to control
+ /// flow, specifically terminators and PHI nodes.
+ ///
+ /// This method only looks at the instruction itself and its operands, so if
+ /// this method returns true, it is safe to move the instruction as long as
+ /// the correct dominance relationships for the operands and users hold.
+ /// However, this method can return true for instructions that read memory;
+ /// for such instructions, moving them may change the resulting value.
+ bool isSafeToSpeculativelyExecute(const Value *V,
+ const TargetData *TD = 0);
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Argument.h b/contrib/llvm/include/llvm/Argument.h
index cd74882..e66075c 100644
--- a/contrib/llvm/include/llvm/Argument.h
+++ b/contrib/llvm/include/llvm/Argument.h
@@ -30,6 +30,7 @@ template<typename ValueSubClass, typename ItemParentClass>
/// the function was called with.
/// @brief LLVM Argument representation
class Argument : public Value, public ilist_node<Argument> {
+ virtual void anchor();
Function *Parent;
friend class SymbolTableListTraits<Argument, Function>;
diff --git a/contrib/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h b/contrib/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h
index 3a65f97..37b47c3 100644
--- a/contrib/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h
+++ b/contrib/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h
@@ -22,7 +22,7 @@ namespace llvm {
class Function;
class BasicBlock;
class Instruction;
-class raw_ostream;
+class Value;
class formatted_raw_ostream;
class AssemblyAnnotationWriter {
@@ -32,30 +32,30 @@ public:
/// emitFunctionAnnot - This may be implemented to emit a string right before
/// the start of a function.
- virtual void emitFunctionAnnot(const Function *F,
- formatted_raw_ostream &OS) {}
+ virtual void emitFunctionAnnot(const Function *,
+ formatted_raw_ostream &) {}
/// emitBasicBlockStartAnnot - This may be implemented to emit a string right
/// after the basic block label, but before the first instruction in the
/// block.
- virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
- formatted_raw_ostream &OS) {
+ virtual void emitBasicBlockStartAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
}
/// emitBasicBlockEndAnnot - This may be implemented to emit a string right
/// after the basic block.
- virtual void emitBasicBlockEndAnnot(const BasicBlock *BB,
- formatted_raw_ostream &OS) {
+ virtual void emitBasicBlockEndAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
}
/// emitInstructionAnnot - This may be implemented to emit a string right
/// before an instruction is emitted.
- virtual void emitInstructionAnnot(const Instruction *I,
- formatted_raw_ostream &OS) {}
+ virtual void emitInstructionAnnot(const Instruction *,
+ formatted_raw_ostream &) {}
/// printInfoComment - This may be implemented to emit a comment to the
/// right of an instruction or global value.
- virtual void printInfoComment(const Value &V, formatted_raw_ostream &OS) {}
+ virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Assembly/Parser.h b/contrib/llvm/include/llvm/Assembly/Parser.h
index 82ec6d8..b971c53 100644
--- a/contrib/llvm/include/llvm/Assembly/Parser.h
+++ b/contrib/llvm/include/llvm/Assembly/Parser.h
@@ -21,7 +21,6 @@ namespace llvm {
class Module;
class MemoryBuffer;
class SMDiagnostic;
-class raw_ostream;
class LLVMContext;
/// This function is the main interface to the LLVM Assembly Parser. It parses
diff --git a/contrib/llvm/include/llvm/Assembly/Writer.h b/contrib/llvm/include/llvm/Assembly/Writer.h
index 8d8befd..6b89ae0 100644
--- a/contrib/llvm/include/llvm/Assembly/Writer.h
+++ b/contrib/llvm/include/llvm/Assembly/Writer.h
@@ -19,7 +19,6 @@
namespace llvm {
-class Type;
class Module;
class Value;
class raw_ostream;
diff --git a/contrib/llvm/include/llvm/Attributes.h b/contrib/llvm/include/llvm/Attributes.h
index 2d7b33b..0099f17 100644
--- a/contrib/llvm/include/llvm/Attributes.h
+++ b/contrib/llvm/include/llvm/Attributes.h
@@ -22,8 +22,66 @@
namespace llvm {
class Type;
+namespace Attribute {
+/// We use this proxy POD type to allow constructing Attributes constants
+/// using initializer lists. Do not use this class directly.
+struct AttrConst {
+ uint64_t v;
+ AttrConst operator | (const AttrConst Attrs) const {
+ AttrConst Res = {v | Attrs.v};
+ return Res;
+ }
+ AttrConst operator ~ () const {
+ AttrConst Res = {~v};
+ return Res;
+ }
+};
+} // namespace Attribute
+
+
/// Attributes - A bitset of attributes.
-typedef unsigned Attributes;
+class Attributes {
+ public:
+ Attributes() : Bits(0) { }
+ explicit Attributes(uint64_t Val) : Bits(Val) { }
+ /*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { }
+ Attributes(const Attributes &Attrs) : Bits(Attrs.Bits) { }
+ // This is a "safe bool() operator".
+ operator const void *() const { return Bits ? this : 0; }
+ bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; }
+ Attributes &operator = (const Attributes &Attrs) {
+ Bits = Attrs.Bits;
+ return *this;
+ }
+ bool operator == (const Attributes &Attrs) const {
+ return Bits == Attrs.Bits;
+ }
+ bool operator != (const Attributes &Attrs) const {
+ return Bits != Attrs.Bits;
+ }
+ Attributes operator | (const Attributes &Attrs) const {
+ return Attributes(Bits | Attrs.Bits);
+ }
+ Attributes operator & (const Attributes &Attrs) const {
+ return Attributes(Bits & Attrs.Bits);
+ }
+ Attributes operator ^ (const Attributes &Attrs) const {
+ return Attributes(Bits ^ Attrs.Bits);
+ }
+ Attributes &operator |= (const Attributes &Attrs) {
+ Bits |= Attrs.Bits;
+ return *this;
+ }
+ Attributes &operator &= (const Attributes &Attrs) {
+ Bits &= Attrs.Bits;
+ return *this;
+ }
+ Attributes operator ~ () const { return Attributes(~Bits); }
+ uint64_t Raw() const { return Bits; }
+ private:
+ // Currently, we need less than 64 bits.
+ uint64_t Bits;
+};
namespace Attribute {
@@ -33,44 +91,55 @@ namespace Attribute {
/// results or the function itself.
/// @brief Function attributes.
-const Attributes None = 0; ///< No attributes have been set
-const Attributes ZExt = 1<<0; ///< Zero extended before/after call
-const Attributes SExt = 1<<1; ///< Sign extended before/after call
-const Attributes NoReturn = 1<<2; ///< Mark the function as not returning
-const Attributes InReg = 1<<3; ///< Force argument to be passed in register
-const Attributes StructRet = 1<<4; ///< Hidden pointer to structure to return
-const Attributes NoUnwind = 1<<5; ///< Function doesn't unwind stack
-const Attributes NoAlias = 1<<6; ///< Considered to not alias after call
-const Attributes ByVal = 1<<7; ///< Pass structure by value
-const Attributes Nest = 1<<8; ///< Nested function static chain
-const Attributes ReadNone = 1<<9; ///< Function does not access memory
-const Attributes ReadOnly = 1<<10; ///< Function only reads from memory
-const Attributes NoInline = 1<<11; ///< inline=never
-const Attributes AlwaysInline = 1<<12; ///< inline=always
-const Attributes OptimizeForSize = 1<<13; ///< opt_size
-const Attributes StackProtect = 1<<14; ///< Stack protection.
-const Attributes StackProtectReq = 1<<15; ///< Stack protection required.
-const Attributes Alignment = 31<<16; ///< Alignment of parameter (5 bits)
+// We declare AttrConst objects that will be used throughout the code
+// and also raw uint64_t objects with _i suffix to be used below for other
+// constant declarations. This is done to avoid static CTORs and at the same
+// time to keep type-safety of Attributes.
+#define DECLARE_LLVM_ATTRIBUTE(name, value) \
+ const uint64_t name##_i = value; \
+ const AttrConst name = {value};
+
+DECLARE_LLVM_ATTRIBUTE(None,0) ///< No attributes have been set
+DECLARE_LLVM_ATTRIBUTE(ZExt,1<<0) ///< Zero extended before/after call
+DECLARE_LLVM_ATTRIBUTE(SExt,1<<1) ///< Sign extended before/after call
+DECLARE_LLVM_ATTRIBUTE(NoReturn,1<<2) ///< Mark the function as not returning
+DECLARE_LLVM_ATTRIBUTE(InReg,1<<3) ///< Force argument to be passed in register
+DECLARE_LLVM_ATTRIBUTE(StructRet,1<<4) ///< Hidden pointer to structure to return
+DECLARE_LLVM_ATTRIBUTE(NoUnwind,1<<5) ///< Function doesn't unwind stack
+DECLARE_LLVM_ATTRIBUTE(NoAlias,1<<6) ///< Considered to not alias after call
+DECLARE_LLVM_ATTRIBUTE(ByVal,1<<7) ///< Pass structure by value
+DECLARE_LLVM_ATTRIBUTE(Nest,1<<8) ///< Nested function static chain
+DECLARE_LLVM_ATTRIBUTE(ReadNone,1<<9) ///< Function does not access memory
+DECLARE_LLVM_ATTRIBUTE(ReadOnly,1<<10) ///< Function only reads from memory
+DECLARE_LLVM_ATTRIBUTE(NoInline,1<<11) ///< inline=never
+DECLARE_LLVM_ATTRIBUTE(AlwaysInline,1<<12) ///< inline=always
+DECLARE_LLVM_ATTRIBUTE(OptimizeForSize,1<<13) ///< opt_size
+DECLARE_LLVM_ATTRIBUTE(StackProtect,1<<14) ///< Stack protection.
+DECLARE_LLVM_ATTRIBUTE(StackProtectReq,1<<15) ///< Stack protection required.
+DECLARE_LLVM_ATTRIBUTE(Alignment,31<<16) ///< Alignment of parameter (5 bits)
// stored as log2 of alignment with +1 bias
// 0 means unaligned different from align 1
-const Attributes NoCapture = 1<<21; ///< Function creates no aliases of pointer
-const Attributes NoRedZone = 1<<22; /// disable redzone
-const Attributes NoImplicitFloat = 1<<23; /// disable implicit floating point
- /// instructions.
-const Attributes Naked = 1<<24; ///< Naked function
-const Attributes InlineHint = 1<<25; ///< source said inlining was
- ///desirable
-const Attributes StackAlignment = 7<<26; ///< Alignment of stack for
- ///function (3 bits) stored as log2
- ///of alignment with +1 bias
- ///0 means unaligned (different from
- ///alignstack(1))
-const Attributes ReturnsTwice = 1<<29; ///< Function can return twice
-const Attributes UWTable = 1<<30; ///< Function must be in a unwind
- ///table
-const Attributes NonLazyBind = 1U<<31; ///< Function is called early and/or
- /// often, so lazy binding isn't
- /// worthwhile.
+DECLARE_LLVM_ATTRIBUTE(NoCapture,1<<21) ///< Function creates no aliases of pointer
+DECLARE_LLVM_ATTRIBUTE(NoRedZone,1<<22) /// disable redzone
+DECLARE_LLVM_ATTRIBUTE(NoImplicitFloat,1<<23) /// disable implicit floating point
+ /// instructions.
+DECLARE_LLVM_ATTRIBUTE(Naked,1<<24) ///< Naked function
+DECLARE_LLVM_ATTRIBUTE(InlineHint,1<<25) ///< source said inlining was
+ ///desirable
+DECLARE_LLVM_ATTRIBUTE(StackAlignment,7<<26) ///< Alignment of stack for
+ ///function (3 bits) stored as log2
+ ///of alignment with +1 bias
+ ///0 means unaligned (different from
+ ///alignstack= {1))
+DECLARE_LLVM_ATTRIBUTE(ReturnsTwice,1<<29) ///< Function can return twice
+DECLARE_LLVM_ATTRIBUTE(UWTable,1<<30) ///< Function must be in a unwind
+ ///table
+DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or
+ /// often, so lazy binding isn't
+ /// worthwhile.
+DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on.
+
+#undef DECLARE_LLVM_ATTRIBUTE
/// Note that uwtable is about the ABI or the user mandating an entry in the
/// unwind table. The nounwind attribute is about an exception passing by the
@@ -85,24 +154,26 @@ const Attributes NonLazyBind = 1U<<31; ///< Function is called early and/or
/// uwtable + nounwind = Needs an entry because the ABI says so.
/// @brief Attributes that only apply to function parameters.
-const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture;
+const AttrConst ParameterOnly = {ByVal_i | Nest_i |
+ StructRet_i | NoCapture_i};
/// @brief Attributes that may be applied to the function itself. These cannot
/// be used on return values or function parameters.
-const Attributes FunctionOnly = NoReturn | NoUnwind | ReadNone | ReadOnly |
- NoInline | AlwaysInline | OptimizeForSize | StackProtect | StackProtectReq |
- NoRedZone | NoImplicitFloat | Naked | InlineHint | StackAlignment |
- UWTable | NonLazyBind | ReturnsTwice;
+const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i |
+ ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i |
+ StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i |
+ Naked_i | InlineHint_i | StackAlignment_i |
+ UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i};
/// @brief Parameter attributes that do not apply to vararg call arguments.
-const Attributes VarArgsIncompatible = StructRet;
+const AttrConst VarArgsIncompatible = {StructRet_i};
/// @brief Attributes that are mutually incompatible.
-const Attributes MutuallyIncompatible[4] = {
- ByVal | InReg | Nest | StructRet,
- ZExt | SExt,
- ReadNone | ReadOnly,
- NoInline | AlwaysInline
+const AttrConst MutuallyIncompatible[4] = {
+ {ByVal_i | InReg_i | Nest_i | StructRet_i},
+ {ZExt_i | SExt_i},
+ {ReadNone_i | ReadOnly_i},
+ {NoInline_i | AlwaysInline_i}
};
/// @brief Which attributes cannot be applied to a type.
@@ -113,20 +184,20 @@ Attributes typeIncompatible(Type *Ty);
inline Attributes constructAlignmentFromInt(unsigned i) {
// Default alignment, allow the target to define how to align it.
if (i == 0)
- return 0;
+ return None;
assert(isPowerOf2_32(i) && "Alignment must be a power of two.");
assert(i <= 0x40000000 && "Alignment too large.");
- return (Log2_32(i)+1) << 16;
+ return Attributes((Log2_32(i)+1) << 16);
}
/// This returns the alignment field of an attribute as a byte alignment value.
inline unsigned getAlignmentFromAttrs(Attributes A) {
Attributes Align = A & Attribute::Alignment;
- if (Align == 0)
+ if (!Align)
return 0;
- return 1U << ((Align >> 16) - 1);
+ return 1U << ((Align.Raw() >> 16) - 1);
}
/// This turns an int stack alignment (which must be a power of 2) into
@@ -134,21 +205,21 @@ inline unsigned getAlignmentFromAttrs(Attributes A) {
inline Attributes constructStackAlignmentFromInt(unsigned i) {
// Default alignment, allow the target to define how to align it.
if (i == 0)
- return 0;
+ return None;
assert(isPowerOf2_32(i) && "Alignment must be a power of two.");
assert(i <= 0x100 && "Alignment too large.");
- return (Log2_32(i)+1) << 26;
+ return Attributes((Log2_32(i)+1) << 26);
}
/// This returns the stack alignment field of an attribute as a byte alignment
/// value.
inline unsigned getStackAlignmentFromAttrs(Attributes A) {
Attributes StackAlign = A & Attribute::StackAlignment;
- if (StackAlign == 0)
+ if (!StackAlign)
return 0;
- return 1U << ((StackAlign >> 26) - 1);
+ return 1U << ((StackAlign.Raw() >> 26) - 1);
}
@@ -242,7 +313,7 @@ public:
/// paramHasAttr - Return true if the specified parameter index has the
/// specified attribute set.
bool paramHasAttr(unsigned Idx, Attributes Attr) const {
- return (getAttributes(Idx) & Attr) != 0;
+ return getAttributes(Idx) & Attr;
}
/// getParamAlignment - Return the alignment for the specified function
diff --git a/contrib/llvm/include/llvm/AutoUpgrade.h b/contrib/llvm/include/llvm/AutoUpgrade.h
index 8ca3548..e13c4c1 100644
--- a/contrib/llvm/include/llvm/AutoUpgrade.h
+++ b/contrib/llvm/include/llvm/AutoUpgrade.h
@@ -39,14 +39,6 @@ namespace llvm {
/// This checks for global variables which should be upgraded. It returns true
/// if it requires upgrading.
bool UpgradeGlobalVariable(GlobalVariable *GV);
-
- /// This function checks debug info intrinsics. If an intrinsic is invalid
- /// then this function simply removes the intrinsic.
- void CheckDebugInfoIntrinsics(Module *M);
-
- /// This function upgrades the old pre-3.0 exception handling system to the
- /// new one. N.B. This will be removed in 3.1.
- void UpgradeExceptionHandling(Module *M);
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/BasicBlock.h b/contrib/llvm/include/llvm/BasicBlock.h
index 1cd8dc5..d2aa167 100644
--- a/contrib/llvm/include/llvm/BasicBlock.h
+++ b/contrib/llvm/include/llvm/BasicBlock.h
@@ -110,12 +110,6 @@ public:
const Function *getParent() const { return Parent; }
Function *getParent() { return Parent; }
- /// use_back - Specialize the methods defined in Value, as we know that an
- /// BasicBlock can only be used by Users (specifically terminators
- /// and BlockAddress's).
- User *use_back() { return cast<User>(*use_begin());}
- const User *use_back() const { return cast<User>(*use_begin());}
-
/// getTerminator() - If this is a well formed basic block, then this returns
/// a pointer to the terminator instruction. If it is not, then you get a
/// null pointer back.
@@ -274,6 +268,7 @@ public:
/// getLandingPadInst() - Return the landingpad instruction associated with
/// the landing pad.
LandingPadInst *getLandingPadInst();
+ const LandingPadInst *getLandingPadInst() const;
private:
/// AdjustBlockAddressRefCount - BasicBlock stores the number of BlockAddress
diff --git a/contrib/llvm/include/llvm/Bitcode/Archive.h b/contrib/llvm/include/llvm/Bitcode/Archive.h
index f89a86c..86c44c7 100644
--- a/contrib/llvm/include/llvm/Bitcode/Archive.h
+++ b/contrib/llvm/include/llvm/Bitcode/Archive.h
@@ -394,7 +394,7 @@ class Archive {
/// @brief Look up multiple symbols in the archive.
bool findModulesDefiningSymbols(
std::set<std::string>& symbols, ///< Symbols to be sought
- std::set<Module*>& modules, ///< The modules matching \p symbols
+ SmallVectorImpl<Module*>& modules, ///< The modules matching \p symbols
std::string* ErrMessage ///< Error msg storage, if non-zero
);
diff --git a/contrib/llvm/include/llvm/Bitcode/BitCodes.h b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
index 449dc35..28e1ab1 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
namespace llvm {
@@ -114,7 +115,6 @@ public:
bool hasEncodingData() const { return hasEncodingData(getEncoding()); }
static bool hasEncodingData(Encoding E) {
switch (E) {
- default: assert(0 && "Unknown encoding");
case Fixed:
case VBR:
return true;
@@ -123,6 +123,7 @@ public:
case Blob:
return false;
}
+ llvm_unreachable("Invalid encoding");
}
/// isChar6 - Return true if this character is legal in the Char6 encoding.
@@ -139,8 +140,7 @@ public:
if (C >= '0' && C <= '9') return C-'0'+26+26;
if (C == '.') return 62;
if (C == '_') return 63;
- assert(0 && "Not a value Char6 character!");
- return 0;
+ llvm_unreachable("Not a value Char6 character!");
}
static char DecodeChar6(unsigned V) {
@@ -150,17 +150,18 @@ public:
if (V < 26+26+10) return V-26-26+'0';
if (V == 62) return '.';
if (V == 63) return '_';
- assert(0 && "Not a value Char6 character!");
- return ' ';
+ llvm_unreachable("Not a value Char6 character!");
}
};
+template <> struct isPodLike<BitCodeAbbrevOp> { static const bool value=true; };
+
/// BitCodeAbbrev - This class represents an abbreviation record. An
/// abbreviation allows a complex record that has redundancy to be stored in a
/// specialized format instead of the fully-general, fully-vbr, format.
class BitCodeAbbrev {
- SmallVector<BitCodeAbbrevOp, 8> OperandList;
+ SmallVector<BitCodeAbbrevOp, 32> OperandList;
unsigned char RefCount; // Number of things using this.
~BitCodeAbbrev() {}
public:
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
index 0437f53..6586829 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
@@ -15,7 +15,10 @@
#ifndef BITSTREAM_READER_H
#define BITSTREAM_READER_H
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/StreamableMemoryObject.h"
#include <climits>
#include <string>
#include <vector>
@@ -36,9 +39,7 @@ public:
std::vector<std::pair<unsigned, std::string> > RecordNames;
};
private:
- /// FirstChar/LastChar - This remembers the first and last bytes of the
- /// stream.
- const unsigned char *FirstChar, *LastChar;
+ OwningPtr<StreamableMemoryObject> BitcodeBytes;
std::vector<BlockInfo> BlockInfoRecords;
@@ -47,10 +48,10 @@ private:
/// uses this.
bool IgnoreBlockInfoNames;
- BitstreamReader(const BitstreamReader&); // NOT IMPLEMENTED
- void operator=(const BitstreamReader&); // NOT IMPLEMENTED
+ BitstreamReader(const BitstreamReader&); // DO NOT IMPLEMENT
+ void operator=(const BitstreamReader&); // DO NOT IMPLEMENT
public:
- BitstreamReader() : FirstChar(0), LastChar(0), IgnoreBlockInfoNames(true) {
+ BitstreamReader() : IgnoreBlockInfoNames(true) {
}
BitstreamReader(const unsigned char *Start, const unsigned char *End) {
@@ -58,12 +59,17 @@ public:
init(Start, End);
}
+ BitstreamReader(StreamableMemoryObject *bytes) {
+ BitcodeBytes.reset(bytes);
+ }
+
void init(const unsigned char *Start, const unsigned char *End) {
- FirstChar = Start;
- LastChar = End;
assert(((End-Start) & 3) == 0 &&"Bitcode stream not a multiple of 4 bytes");
+ BitcodeBytes.reset(getNonStreamedMemoryObject(Start, End));
}
+ StreamableMemoryObject &getBitcodeBytes() { return *BitcodeBytes; }
+
~BitstreamReader() {
// Free the BlockInfoRecords.
while (!BlockInfoRecords.empty()) {
@@ -75,9 +81,6 @@ public:
BlockInfoRecords.pop_back();
}
}
-
- const unsigned char *getFirstChar() const { return FirstChar; }
- const unsigned char *getLastChar() const { return LastChar; }
/// CollectBlockInfoNames - This is called by clients that want block/record
/// name information.
@@ -122,7 +125,7 @@ public:
class BitstreamCursor {
friend class Deserializer;
BitstreamReader *BitStream;
- const unsigned char *NextChar;
+ size_t NextChar;
/// CurWord - This is the current data we have pulled from the stream but have
/// not returned to the client.
@@ -156,8 +159,7 @@ public:
}
explicit BitstreamCursor(BitstreamReader &R) : BitStream(&R) {
- NextChar = R.getFirstChar();
- assert(NextChar && "Bitstream not initialized yet");
+ NextChar = 0;
CurWord = 0;
BitsInCurWord = 0;
CurCodeSize = 2;
@@ -167,8 +169,7 @@ public:
freeState();
BitStream = &R;
- NextChar = R.getFirstChar();
- assert(NextChar && "Bitstream not initialized yet");
+ NextChar = 0;
CurWord = 0;
BitsInCurWord = 0;
CurCodeSize = 2;
@@ -225,13 +226,39 @@ public:
/// GetAbbrevIDWidth - Return the number of bits used to encode an abbrev #.
unsigned GetAbbrevIDWidth() const { return CurCodeSize; }
- bool AtEndOfStream() const {
- return NextChar == BitStream->getLastChar() && BitsInCurWord == 0;
+ bool isEndPos(size_t pos) {
+ return BitStream->getBitcodeBytes().isObjectEnd(static_cast<uint64_t>(pos));
+ }
+
+ bool canSkipToPos(size_t pos) const {
+ // pos can be skipped to if it is a valid address or one byte past the end.
+ return pos == 0 || BitStream->getBitcodeBytes().isValidAddress(
+ static_cast<uint64_t>(pos - 1));
+ }
+
+ unsigned char getByte(size_t pos) {
+ uint8_t byte = -1;
+ BitStream->getBitcodeBytes().readByte(pos, &byte);
+ return byte;
+ }
+
+ uint32_t getWord(size_t pos) {
+ uint8_t buf[sizeof(uint32_t)];
+ memset(buf, 0xFF, sizeof(buf));
+ BitStream->getBitcodeBytes().readBytes(pos,
+ sizeof(buf),
+ buf,
+ NULL);
+ return *reinterpret_cast<support::ulittle32_t *>(buf);
+ }
+
+ bool AtEndOfStream() {
+ return isEndPos(NextChar) && BitsInCurWord == 0;
}
/// GetCurrentBitNo - Return the bit # of the bit we are reading.
uint64_t GetCurrentBitNo() const {
- return (NextChar-BitStream->getFirstChar())*CHAR_BIT - BitsInCurWord;
+ return NextChar*CHAR_BIT - BitsInCurWord;
}
BitstreamReader *getBitStreamReader() {
@@ -246,12 +273,10 @@ public:
void JumpToBit(uint64_t BitNo) {
uintptr_t ByteNo = uintptr_t(BitNo/8) & ~3;
uintptr_t WordBitNo = uintptr_t(BitNo) & 31;
- assert(ByteNo <= (uintptr_t)(BitStream->getLastChar()-
- BitStream->getFirstChar()) &&
- "Invalid location");
+ assert(canSkipToPos(ByteNo) && "Invalid location");
// Move the cursor to the right word.
- NextChar = BitStream->getFirstChar()+ByteNo;
+ NextChar = ByteNo;
BitsInCurWord = 0;
CurWord = 0;
@@ -272,7 +297,7 @@ public:
}
// If we run out of data, stop at the end of the stream.
- if (NextChar == BitStream->getLastChar()) {
+ if (isEndPos(NextChar)) {
CurWord = 0;
BitsInCurWord = 0;
return 0;
@@ -281,8 +306,7 @@ public:
unsigned R = CurWord;
// Read the next word from the stream.
- CurWord = (NextChar[0] << 0) | (NextChar[1] << 8) |
- (NextChar[2] << 16) | (NextChar[3] << 24);
+ CurWord = getWord(NextChar);
NextChar += 4;
// Extract NumBits-BitsInCurWord from what we just read.
@@ -376,9 +400,8 @@ public:
// Check that the block wasn't partially defined, and that the offset isn't
// bogus.
- const unsigned char *const SkipTo = NextChar + NumWords*4;
- if (AtEndOfStream() || SkipTo > BitStream->getLastChar() ||
- SkipTo < BitStream->getFirstChar())
+ size_t SkipTo = NextChar + NumWords*4;
+ if (AtEndOfStream() || !canSkipToPos(SkipTo))
return true;
NextChar = SkipTo;
@@ -409,8 +432,7 @@ public:
if (NumWordsP) *NumWordsP = NumWords;
// Validate that this block is sane.
- if (CurCodeSize == 0 || AtEndOfStream() ||
- NextChar+NumWords*4 > BitStream->getLastChar())
+ if (CurCodeSize == 0 || AtEndOfStream())
return true;
return false;
@@ -455,10 +477,10 @@ private:
void ReadAbbreviatedField(const BitCodeAbbrevOp &Op,
SmallVectorImpl<uint64_t> &Vals) {
assert(!Op.isLiteral() && "Use ReadAbbreviatedLiteral for literals!");
-
+
// Decode the value as we are commanded.
switch (Op.getEncoding()) {
- default: assert(0 && "Unknown encoding!");
+ default: llvm_unreachable("Unknown encoding!");
case BitCodeAbbrevOp::Fixed:
Vals.push_back(Read((unsigned)Op.getEncodingData()));
break;
@@ -512,24 +534,25 @@ public:
SkipToWord(); // 32-bit alignment
// Figure out where the end of this blob will be including tail padding.
- const unsigned char *NewEnd = NextChar+((NumElts+3)&~3);
+ size_t NewEnd = NextChar+((NumElts+3)&~3);
// If this would read off the end of the bitcode file, just set the
// record to empty and return.
- if (NewEnd > BitStream->getLastChar()) {
+ if (!canSkipToPos(NewEnd)) {
Vals.append(NumElts, 0);
- NextChar = BitStream->getLastChar();
+ NextChar = BitStream->getBitcodeBytes().getExtent();
break;
}
// Otherwise, read the number of bytes. If we can return a reference to
// the data, do so to avoid copying it.
if (BlobStart) {
- *BlobStart = (const char*)NextChar;
+ *BlobStart = (const char*)BitStream->getBitcodeBytes().getPointer(
+ NextChar, NumElts);
*BlobLen = NumElts;
} else {
for (; NumElts; ++NextChar, --NumElts)
- Vals.push_back(*NextChar);
+ Vals.push_back(getByte(NextChar));
}
// Skip over tail padding.
NextChar = NewEnd;
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
index bfb3a4e..475da13 100644
--- a/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
@@ -16,13 +16,14 @@
#define BITSTREAM_WRITER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Bitcode/BitCodes.h"
#include <vector>
namespace llvm {
class BitstreamWriter {
- std::vector<unsigned char> &Out;
+ SmallVectorImpl<char> &Out;
/// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use.
unsigned CurBit;
@@ -59,8 +60,40 @@ class BitstreamWriter {
};
std::vector<BlockInfo> BlockInfoRecords;
+ // BackpatchWord - Backpatch a 32-bit word in the output with the specified
+ // value.
+ void BackpatchWord(unsigned ByteNo, unsigned NewWord) {
+ Out[ByteNo++] = (unsigned char)(NewWord >> 0);
+ Out[ByteNo++] = (unsigned char)(NewWord >> 8);
+ Out[ByteNo++] = (unsigned char)(NewWord >> 16);
+ Out[ByteNo ] = (unsigned char)(NewWord >> 24);
+ }
+
+ void WriteByte(unsigned char Value) {
+ Out.push_back(Value);
+ }
+
+ void WriteWord(unsigned Value) {
+ unsigned char Bytes[4] = {
+ (unsigned char)(Value >> 0),
+ (unsigned char)(Value >> 8),
+ (unsigned char)(Value >> 16),
+ (unsigned char)(Value >> 24) };
+ Out.append(&Bytes[0], &Bytes[4]);
+ }
+
+ unsigned GetBufferOffset() const {
+ return Out.size();
+ }
+
+ unsigned GetWordIndex() const {
+ unsigned Offset = GetBufferOffset();
+ assert((Offset & 3) == 0 && "Not 32-bit aligned");
+ return Offset / 4;
+ }
+
public:
- explicit BitstreamWriter(std::vector<unsigned char> &O)
+ explicit BitstreamWriter(SmallVectorImpl<char> &O)
: Out(O), CurBit(0), CurValue(0), CurCodeSize(2) {}
~BitstreamWriter() {
@@ -78,10 +111,8 @@ public:
}
}
- std::vector<unsigned char> &getBuffer() { return Out; }
-
/// \brief Retrieve the current position in the stream, in bits.
- uint64_t GetCurrentBitNo() const { return Out.size() * 8 + CurBit; }
+ uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; }
//===--------------------------------------------------------------------===//
// Basic Primitives for emitting bits to the stream.
@@ -97,11 +128,7 @@ public:
}
// Add the current word.
- unsigned V = CurValue;
- Out.push_back((unsigned char)(V >> 0));
- Out.push_back((unsigned char)(V >> 8));
- Out.push_back((unsigned char)(V >> 16));
- Out.push_back((unsigned char)(V >> 24));
+ WriteWord(CurValue);
if (CurBit)
CurValue = Val >> (32-CurBit);
@@ -121,11 +148,7 @@ public:
void FlushToWord() {
if (CurBit) {
- unsigned V = CurValue;
- Out.push_back((unsigned char)(V >> 0));
- Out.push_back((unsigned char)(V >> 8));
- Out.push_back((unsigned char)(V >> 16));
- Out.push_back((unsigned char)(V >> 24));
+ WriteWord(CurValue);
CurBit = 0;
CurValue = 0;
}
@@ -164,15 +187,6 @@ public:
Emit(Val, CurCodeSize);
}
- // BackpatchWord - Backpatch a 32-bit word in the output with the specified
- // value.
- void BackpatchWord(unsigned ByteNo, unsigned NewWord) {
- Out[ByteNo++] = (unsigned char)(NewWord >> 0);
- Out[ByteNo++] = (unsigned char)(NewWord >> 8);
- Out[ByteNo++] = (unsigned char)(NewWord >> 16);
- Out[ByteNo ] = (unsigned char)(NewWord >> 24);
- }
-
//===--------------------------------------------------------------------===//
// Block Manipulation
//===--------------------------------------------------------------------===//
@@ -199,7 +213,7 @@ public:
EmitVBR(CodeLen, bitc::CodeLenWidth);
FlushToWord();
- unsigned BlockSizeWordLoc = static_cast<unsigned>(Out.size());
+ unsigned BlockSizeWordIndex = GetWordIndex();
unsigned OldCodeSize = CurCodeSize;
// Emit a placeholder, which will be replaced when the block is popped.
@@ -209,7 +223,7 @@ public:
// Push the outer block's abbrev set onto the stack, start out with an
// empty abbrev set.
- BlockScope.push_back(Block(OldCodeSize, BlockSizeWordLoc/4));
+ BlockScope.push_back(Block(OldCodeSize, BlockSizeWordIndex));
BlockScope.back().PrevAbbrevs.swap(CurAbbrevs);
// If there is a blockinfo for this BlockID, add all the predefined abbrevs
@@ -239,7 +253,7 @@ public:
FlushToWord();
// Compute the size of the block, in words, not counting the size field.
- unsigned SizeInWords= static_cast<unsigned>(Out.size())/4-B.StartSizeWord-1;
+ unsigned SizeInWords = GetWordIndex() - B.StartSizeWord - 1;
unsigned ByteNo = B.StartSizeWord*4;
// Update the block size field in the header of this sub-block.
@@ -275,7 +289,7 @@ private:
// Encode the value as we are commanded.
switch (Op.getEncoding()) {
- default: assert(0 && "Unknown encoding!");
+ default: llvm_unreachable("Unknown encoding!");
case BitCodeAbbrevOp::Fixed:
if (Op.getEncodingData())
Emit((unsigned)V, (unsigned)Op.getEncodingData());
@@ -355,25 +369,24 @@ private:
// Flush to a 32-bit alignment boundary.
FlushToWord();
- assert((Out.size() & 3) == 0 && "Not 32-bit aligned");
// Emit each field as a literal byte.
if (BlobData) {
for (unsigned i = 0; i != BlobLen; ++i)
- Out.push_back((unsigned char)BlobData[i]);
+ WriteByte((unsigned char)BlobData[i]);
// Know that blob data is consumed for assertion below.
BlobData = 0;
} else {
for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) {
assert(Vals[RecordIdx] < 256 && "Value too large to emit as blob");
- Out.push_back((unsigned char)Vals[RecordIdx]);
+ WriteByte((unsigned char)Vals[RecordIdx]);
}
}
+
// Align end to 32-bits.
- while (Out.size() & 3)
- Out.push_back(0);
-
+ while (GetBufferOffset() & 3)
+ WriteByte(0);
} else { // Single scalar field.
assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
EmitAbbreviatedField(Op, Vals[RecordIdx]);
@@ -488,7 +501,7 @@ public:
/// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK.
void EnterBlockInfoBlock(unsigned CodeWidth) {
EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, CodeWidth);
- BlockInfoCurBID = -1U;
+ BlockInfoCurBID = ~0U;
}
private:
/// SwitchToBlockID - If we aren't already talking about the specified block
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 4b0dcc3..a8c34cb 100644
--- a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -29,23 +29,21 @@ namespace bitc {
// Module sub-block id's.
PARAMATTR_BLOCK_ID,
-
- /// TYPE_BLOCK_ID_OLD - This is the type descriptor block in LLVM 2.9 and
- /// earlier, replaced with TYPE_BLOCK_ID2. FIXME: Remove in LLVM 3.1.
- TYPE_BLOCK_ID_OLD,
+
+ UNUSED_ID1,
CONSTANTS_BLOCK_ID,
FUNCTION_BLOCK_ID,
- /// TYPE_SYMTAB_BLOCK_ID_OLD - This type descriptor is from LLVM 2.9 and
- /// earlier bitcode files. FIXME: Remove in LLVM 3.1
- TYPE_SYMTAB_BLOCK_ID_OLD,
+ UNUSED_ID2,
VALUE_SYMTAB_BLOCK_ID,
METADATA_BLOCK_ID,
METADATA_ATTACHMENT_ID,
- TYPE_BLOCK_ID_NEW
+ TYPE_BLOCK_ID_NEW,
+
+ USELIST_BLOCK_ID
};
@@ -63,10 +61,10 @@ namespace bitc {
MODULE_CODE_GLOBALVAR = 7,
// FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
- // section, visibility]
+ // section, visibility, gc, unnamed_addr]
MODULE_CODE_FUNCTION = 8,
- // ALIAS: [alias type, aliasee val#, linkage]
+ // ALIAS: [alias type, aliasee val#, linkage, visibility]
MODULE_CODE_ALIAS = 9,
/// MODULE_CODE_PURGEVALS: [numvals]
@@ -92,11 +90,12 @@ namespace bitc {
TYPE_CODE_OPAQUE = 6, // OPAQUE
TYPE_CODE_INTEGER = 7, // INTEGER: [width]
TYPE_CODE_POINTER = 8, // POINTER: [pointee type]
- TYPE_CODE_FUNCTION = 9, // FUNCTION: [vararg, retty, paramty x N]
+
+ TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty,
+ // paramty x N]
+
+ TYPE_CODE_HALF = 10, // HALF
- // FIXME: This is the encoding used for structs in LLVM 2.9 and earlier.
- // REMOVE this in LLVM 3.1
- TYPE_CODE_STRUCT_OLD = 10, // STRUCT: [ispacked, eltty x N]
TYPE_CODE_ARRAY = 11, // ARRAY: [numelts, eltty]
TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty]
@@ -113,7 +112,9 @@ namespace bitc {
TYPE_CODE_STRUCT_ANON = 18, // STRUCT_ANON: [ispacked, eltty x N]
TYPE_CODE_STRUCT_NAME = 19, // STRUCT_NAME: [strchr x N]
- TYPE_CODE_STRUCT_NAMED = 20 // STRUCT_NAMED: [ispacked, eltty x N]
+ TYPE_CODE_STRUCT_NAMED = 20,// STRUCT_NAMED: [ispacked, eltty x N]
+
+ TYPE_CODE_FUNCTION = 21 // FUNCTION: [vararg, retty, paramty x N]
};
// The type symbol table only has one code (TST_ENTRY_CODE).
@@ -163,7 +164,8 @@ namespace bitc {
CST_CODE_INLINEASM = 18, // INLINEASM: [sideeffect,asmstr,conststr]
CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval]
CST_CODE_CE_INBOUNDS_GEP = 20,// INBOUNDS_GEP: [n x operands]
- CST_CODE_BLOCKADDRESS = 21 // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
+ CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
+ CST_CODE_DATA = 22 // DATA: [n x elements]
};
/// CastOpcodes - These are values used in the bitcode files to encode which
@@ -270,7 +272,7 @@ namespace bitc {
FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#]
FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, op0, op1, ...]
FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...]
- FUNC_CODE_INST_UNWIND = 14, // UNWIND
+ // 14 is unused.
FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE
FUNC_CODE_INST_PHI = 16, // PHI: [ty, val0,bb0, ...]
@@ -314,6 +316,10 @@ namespace bitc {
FUNC_CODE_INST_STOREATOMIC = 42 // STORE: [ptrty,ptr,val, align, vol
// ordering, synchscope]
};
+
+ enum UseListCodes {
+ USELIST_CODE_ENTRY = 1 // USELIST_CODE_ENTRY: TBD.
+ };
} // End bitc namespace
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
index fa754c0..cc2b473 100644
--- a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -17,35 +17,45 @@
#include <string>
namespace llvm {
- class Module;
- class MemoryBuffer;
- class ModulePass;
class BitstreamWriter;
+ class MemoryBuffer;
+ class DataStreamer;
class LLVMContext;
+ class Module;
+ class ModulePass;
class raw_ostream;
-
+
/// getLazyBitcodeModule - Read the header of the specified bitcode buffer
/// and prepare for lazy deserialization of function bodies. If successful,
/// this takes ownership of 'buffer' and returns a non-null pointer. On
/// error, this returns null, *does not* take ownership of Buffer, and fills
/// in *ErrMsg with an error description if ErrMsg is non-null.
Module *getLazyBitcodeModule(MemoryBuffer *Buffer,
- LLVMContext& Context,
+ LLVMContext &Context,
std::string *ErrMsg = 0);
+ /// getStreamedBitcodeModule - Read the header of the specified stream
+ /// and prepare for lazy deserialization and streaming of function bodies.
+ /// On error, this returns null, and fills in *ErrMsg with an error
+ /// description if ErrMsg is non-null.
+ Module *getStreamedBitcodeModule(const std::string &name,
+ DataStreamer *streamer,
+ LLVMContext &Context,
+ std::string *ErrMsg = 0);
+
/// getBitcodeTargetTriple - Read the header of the specified bitcode
/// buffer and extract just the triple information. If successful,
/// this returns a string and *does not* take ownership
/// of 'buffer'. On error, this returns "", and fills in *ErrMsg
/// if ErrMsg is non-null.
std::string getBitcodeTargetTriple(MemoryBuffer *Buffer,
- LLVMContext& Context,
+ LLVMContext &Context,
std::string *ErrMsg = 0);
/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
/// If an error occurs, this returns null and fills in *ErrMsg if it is
/// non-null. This method *never* takes ownership of Buffer.
- Module *ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
+ Module *ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext &Context,
std::string *ErrMsg = 0);
/// WriteBitcodeToFile - Write the specified module to the specified
@@ -53,15 +63,11 @@ namespace llvm {
/// should be in "binary" mode.
void WriteBitcodeToFile(const Module *M, raw_ostream &Out);
- /// WriteBitcodeToStream - Write the specified module to the specified
- /// raw output stream.
- void WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream);
-
/// createBitcodeWriterPass - Create and return a pass that writes the module
/// to the specified ostream.
ModulePass *createBitcodeWriterPass(raw_ostream &Str);
-
-
+
+
/// isBitcodeWrapper - Return true if the given bytes are the magic bytes
/// for an LLVM IR bitcode wrapper.
///
@@ -109,21 +115,24 @@ namespace llvm {
/// uint32_t BitcodeSize; // Size of traditional bitcode file.
/// ... potentially other gunk ...
/// };
- ///
+ ///
/// This function is called when we find a file with a matching magic number.
/// In this case, skip down to the subsection of the file that is actually a
/// BC file.
- static inline bool SkipBitcodeWrapperHeader(unsigned char *&BufPtr,
- unsigned char *&BufEnd) {
+ /// If 'VerifyBufferSize' is true, check that the buffer is large enough to
+ /// contain the whole bitcode file.
+ static inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
+ const unsigned char *&BufEnd,
+ bool VerifyBufferSize) {
enum {
KnownHeaderSize = 4*4, // Size of header we read.
OffsetField = 2*4, // Offset in bytes to Offset field.
SizeField = 3*4 // Offset in bytes to Size field.
};
-
+
// Must contain the header!
if (BufEnd-BufPtr < KnownHeaderSize) return true;
-
+
unsigned Offset = ( BufPtr[OffsetField ] |
(BufPtr[OffsetField+1] << 8) |
(BufPtr[OffsetField+2] << 16) |
@@ -132,9 +141,9 @@ namespace llvm {
(BufPtr[SizeField +1] << 8) |
(BufPtr[SizeField +2] << 16) |
(BufPtr[SizeField +3] << 24));
-
+
// Verify that Offset+Size fits in the file.
- if (Offset+Size > unsigned(BufEnd-BufPtr))
+ if (VerifyBufferSize && Offset+Size > unsigned(BufEnd-BufPtr))
return true;
BufPtr += Offset;
BufEnd = BufPtr+Size;
diff --git a/contrib/llvm/include/llvm/CodeGen/Analysis.h b/contrib/llvm/include/llvm/CodeGen/Analysis.h
index d8e6407..0b609ed 100644
--- a/contrib/llvm/include/llvm/CodeGen/Analysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/Analysis.h
@@ -27,6 +27,7 @@ namespace llvm {
class GlobalVariable;
class TargetLowering;
class SDNode;
+class SDValue;
class SelectionDAG;
/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
@@ -70,6 +71,10 @@ bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
///
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
+/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
+/// return the equivalent code if we're allowed to assume that NaNs won't occur.
+ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
+
/// getICmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR integer condition code.
///
@@ -85,7 +90,7 @@ bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
const TargetLowering &TLI);
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
- const TargetLowering &TLI);
+ SDValue &Chain, const TargetLowering &TLI);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index 06c5c83..56a87f1 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -18,16 +18,12 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class BlockAddress;
class GCStrategy;
class Constant;
- class ConstantArray;
- class ConstantFP;
- class ConstantInt;
- class ConstantStruct;
- class ConstantVector;
class GCMetadataPrinter;
class GlobalValue;
class GlobalVariable;
@@ -37,14 +33,11 @@ namespace llvm {
class MachineLocation;
class MachineLoopInfo;
class MachineLoop;
- class MachineConstantPool;
- class MachineConstantPoolEntry;
class MachineConstantPoolValue;
class MachineJumpTableInfo;
class MachineModuleInfo;
class MachineMove;
class MCAsmInfo;
- class MCInst;
class MCContext;
class MCSection;
class MCStreamer;
@@ -56,8 +49,6 @@ namespace llvm {
class TargetLoweringObjectFile;
class TargetData;
class TargetMachine;
- class Twine;
- class Type;
/// AsmPrinter - This class is intended to be used as a driving class for all
/// asm writers.
@@ -97,6 +88,11 @@ namespace llvm {
///
MCSymbol *CurrentFnSym;
+ /// The symbol used to represent the start of the current function for the
+ /// purpose of calculating its size (e.g. using the .size directive). By
+ /// default, this is equal to CurrentFnSym.
+ MCSymbol *CurrentFnSymForSize;
+
private:
// GCMetadataPrinters - The garbage collection metadata printer table.
void *GCMetadataPrinters; // Really a DenseMap.
@@ -194,6 +190,11 @@ namespace llvm {
bool needsSEHMoves();
+ /// needsRelocationsForDwarfStringPool - Specifies whether the object format
+ /// expects to use relocations to refer to debug entries. Alternatively we
+ /// emit section offsets in bytes from the start of the string pool.
+ bool needsRelocationsForDwarfStringPool() const;
+
/// EmitConstantPool - Print to the current output stream assembly
/// representations of the constants in the constant pool MCP. This is
/// used to print out constants which have been "spilled to memory" by
@@ -256,13 +257,20 @@ namespace llvm {
/// EmitInstruction - Targets should implement this to emit instructions.
virtual void EmitInstruction(const MachineInstr *) {
- assert(0 && "EmitInstruction not implemented");
+ llvm_unreachable("EmitInstruction not implemented");
}
virtual void EmitFunctionEntryLabel();
virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+ /// EmitXXStructor - Targets can override this to change how global
+ /// constants that are part of a C++ static/global constructor list are
+ /// emitted.
+ virtual void EmitXXStructor(const Constant *CV) {
+ EmitGlobalConstant(CV);
+ }
+
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
/// exactly one predecessor and the control transfer mechanism between
/// the predecessor and this block is a fall-through.
@@ -466,7 +474,7 @@ namespace llvm {
const MachineBasicBlock *MBB,
unsigned uid) const;
void EmitLLVMUsedList(const Constant *List);
- void EmitXXStructorList(const Constant *List);
+ void EmitXXStructorList(const Constant *List, bool isCtor);
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C);
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/BinaryObject.h b/contrib/llvm/include/llvm/CodeGen/BinaryObject.h
deleted file mode 100644
index 8c1431f..0000000
--- a/contrib/llvm/include/llvm/CodeGen/BinaryObject.h
+++ /dev/null
@@ -1,353 +0,0 @@
-//===-- llvm/CodeGen/BinaryObject.h - Binary Object. -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a Binary Object Aka. "blob" for holding data from code
-// generators, ready for data to the object module code writters.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_BINARYOBJECT_H
-#define LLVM_CODEGEN_BINARYOBJECT_H
-
-#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/Support/DataTypes.h"
-
-#include <string>
-#include <vector>
-
-namespace llvm {
-
-typedef std::vector<uint8_t> BinaryData;
-
-class BinaryObject {
-protected:
- std::string Name;
- bool IsLittleEndian;
- bool Is64Bit;
- BinaryData Data;
- std::vector<MachineRelocation> Relocations;
-
-public:
- /// Constructors and destructor
- BinaryObject() {}
-
- BinaryObject(bool isLittleEndian, bool is64Bit)
- : IsLittleEndian(isLittleEndian), Is64Bit(is64Bit) {}
-
- BinaryObject(const std::string &name, bool isLittleEndian, bool is64Bit)
- : Name(name), IsLittleEndian(isLittleEndian), Is64Bit(is64Bit) {}
-
- ~BinaryObject() {}
-
- /// getName - get name of BinaryObject
- inline std::string getName() const { return Name; }
-
- /// get size of binary data
- size_t size() const {
- return Data.size();
- }
-
- /// get binary data
- BinaryData& getData() {
- return Data;
- }
-
- /// get machine relocations
- const std::vector<MachineRelocation>& getRelocations() const {
- return Relocations;
- }
-
- /// hasRelocations - Return true if 'Relocations' is not empty
- bool hasRelocations() const {
- return !Relocations.empty();
- }
-
- /// emitZeros - This callback is invoked to emit a arbitrary number
- /// of zero bytes to the data stream.
- inline void emitZeros(unsigned Size) {
- for (unsigned i=0; i < Size; ++i)
- emitByte(0);
- }
-
- /// emitByte - This callback is invoked when a byte needs to be
- /// written to the data stream.
- inline void emitByte(uint8_t B) {
- Data.push_back(B);
- }
-
- /// emitWord16 - This callback is invoked when a 16-bit word needs to be
- /// written to the data stream in correct endian format and correct size.
- inline void emitWord16(uint16_t W) {
- if (IsLittleEndian)
- emitWord16LE(W);
- else
- emitWord16BE(W);
- }
-
- /// emitWord16LE - This callback is invoked when a 16-bit word needs to be
- /// written to the data stream in correct endian format and correct size.
- inline void emitWord16LE(uint16_t W) {
- Data.push_back((uint8_t)(W >> 0));
- Data.push_back((uint8_t)(W >> 8));
- }
-
- /// emitWord16BE - This callback is invoked when a 16-bit word needs to be
- /// written to the data stream in correct endian format and correct size.
- inline void emitWord16BE(uint16_t W) {
- Data.push_back((uint8_t)(W >> 8));
- Data.push_back((uint8_t)(W >> 0));
- }
-
- /// emitWord - This callback is invoked when a word needs to be
- /// written to the data stream in correct endian format and correct size.
- inline void emitWord(uint64_t W) {
- if (!Is64Bit)
- emitWord32(W);
- else
- emitWord64(W);
- }
-
- /// emitWord32 - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in correct endian format.
- inline void emitWord32(uint32_t W) {
- if (IsLittleEndian)
- emitWordLE(W);
- else
- emitWordBE(W);
- }
-
- /// emitWord64 - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in correct endian format.
- inline void emitWord64(uint64_t W) {
- if (IsLittleEndian)
- emitDWordLE(W);
- else
- emitDWordBE(W);
- }
-
- /// emitWord64 - This callback is invoked when a x86_fp80 needs to be
- /// written to the data stream in correct endian format.
- inline void emitWordFP80(const uint64_t *W, unsigned PadSize) {
- if (IsLittleEndian) {
- emitWord64(W[0]);
- emitWord16(W[1]);
- } else {
- emitWord16(W[1]);
- emitWord64(W[0]);
- }
- emitZeros(PadSize);
- }
-
- /// emitWordLE - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in little-endian format.
- inline void emitWordLE(uint32_t W) {
- Data.push_back((uint8_t)(W >> 0));
- Data.push_back((uint8_t)(W >> 8));
- Data.push_back((uint8_t)(W >> 16));
- Data.push_back((uint8_t)(W >> 24));
- }
-
- /// emitWordBE - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in big-endian format.
- ///
- inline void emitWordBE(uint32_t W) {
- Data.push_back((uint8_t)(W >> 24));
- Data.push_back((uint8_t)(W >> 16));
- Data.push_back((uint8_t)(W >> 8));
- Data.push_back((uint8_t)(W >> 0));
- }
-
- /// emitDWordLE - This callback is invoked when a 64-bit word needs to be
- /// written to the data stream in little-endian format.
- inline void emitDWordLE(uint64_t W) {
- Data.push_back((uint8_t)(W >> 0));
- Data.push_back((uint8_t)(W >> 8));
- Data.push_back((uint8_t)(W >> 16));
- Data.push_back((uint8_t)(W >> 24));
- Data.push_back((uint8_t)(W >> 32));
- Data.push_back((uint8_t)(W >> 40));
- Data.push_back((uint8_t)(W >> 48));
- Data.push_back((uint8_t)(W >> 56));
- }
-
- /// emitDWordBE - This callback is invoked when a 64-bit word needs to be
- /// written to the data stream in big-endian format.
- inline void emitDWordBE(uint64_t W) {
- Data.push_back((uint8_t)(W >> 56));
- Data.push_back((uint8_t)(W >> 48));
- Data.push_back((uint8_t)(W >> 40));
- Data.push_back((uint8_t)(W >> 32));
- Data.push_back((uint8_t)(W >> 24));
- Data.push_back((uint8_t)(W >> 16));
- Data.push_back((uint8_t)(W >> 8));
- Data.push_back((uint8_t)(W >> 0));
- }
-
- /// fixByte - This callback is invoked when a byte needs to be
- /// fixup the buffer.
- inline void fixByte(uint8_t B, uint32_t offset) {
- Data[offset] = B;
- }
-
- /// fixWord16 - This callback is invoked when a 16-bit word needs to
- /// fixup the data stream in correct endian format.
- inline void fixWord16(uint16_t W, uint32_t offset) {
- if (IsLittleEndian)
- fixWord16LE(W, offset);
- else
- fixWord16BE(W, offset);
- }
-
- /// emitWord16LE - This callback is invoked when a 16-bit word needs to
- /// fixup the data stream in little endian format.
- inline void fixWord16LE(uint16_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 0);
- Data[++offset] = (uint8_t)(W >> 8);
- }
-
- /// fixWord16BE - This callback is invoked when a 16-bit word needs to
- /// fixup data stream in big endian format.
- inline void fixWord16BE(uint16_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 8);
- Data[++offset] = (uint8_t)(W >> 0);
- }
-
- /// emitWord - This callback is invoked when a word needs to
- /// fixup the data in correct endian format and correct size.
- inline void fixWord(uint64_t W, uint32_t offset) {
- if (!Is64Bit)
- fixWord32(W, offset);
- else
- fixWord64(W, offset);
- }
-
- /// fixWord32 - This callback is invoked when a 32-bit word needs to
- /// fixup the data in correct endian format.
- inline void fixWord32(uint32_t W, uint32_t offset) {
- if (IsLittleEndian)
- fixWord32LE(W, offset);
- else
- fixWord32BE(W, offset);
- }
-
- /// fixWord32LE - This callback is invoked when a 32-bit word needs to
- /// fixup the data in little endian format.
- inline void fixWord32LE(uint32_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 0);
- Data[++offset] = (uint8_t)(W >> 8);
- Data[++offset] = (uint8_t)(W >> 16);
- Data[++offset] = (uint8_t)(W >> 24);
- }
-
- /// fixWord32BE - This callback is invoked when a 32-bit word needs to
- /// fixup the data in big endian format.
- inline void fixWord32BE(uint32_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 24);
- Data[++offset] = (uint8_t)(W >> 16);
- Data[++offset] = (uint8_t)(W >> 8);
- Data[++offset] = (uint8_t)(W >> 0);
- }
-
- /// fixWord64 - This callback is invoked when a 64-bit word needs to
- /// fixup the data in correct endian format.
- inline void fixWord64(uint64_t W, uint32_t offset) {
- if (IsLittleEndian)
- fixWord64LE(W, offset);
- else
- fixWord64BE(W, offset);
- }
-
- /// fixWord64BE - This callback is invoked when a 64-bit word needs to
- /// fixup the data in little endian format.
- inline void fixWord64LE(uint64_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 0);
- Data[++offset] = (uint8_t)(W >> 8);
- Data[++offset] = (uint8_t)(W >> 16);
- Data[++offset] = (uint8_t)(W >> 24);
- Data[++offset] = (uint8_t)(W >> 32);
- Data[++offset] = (uint8_t)(W >> 40);
- Data[++offset] = (uint8_t)(W >> 48);
- Data[++offset] = (uint8_t)(W >> 56);
- }
-
- /// fixWord64BE - This callback is invoked when a 64-bit word needs to
- /// fixup the data in big endian format.
- inline void fixWord64BE(uint64_t W, uint32_t offset) {
- Data[offset] = (uint8_t)(W >> 56);
- Data[++offset] = (uint8_t)(W >> 48);
- Data[++offset] = (uint8_t)(W >> 40);
- Data[++offset] = (uint8_t)(W >> 32);
- Data[++offset] = (uint8_t)(W >> 24);
- Data[++offset] = (uint8_t)(W >> 16);
- Data[++offset] = (uint8_t)(W >> 8);
- Data[++offset] = (uint8_t)(W >> 0);
- }
-
- /// emitAlignment - Pad the data to the specified alignment.
- void emitAlignment(unsigned Alignment, uint8_t fill = 0) {
- if (Alignment <= 1) return;
- unsigned PadSize = -Data.size() & (Alignment-1);
- for (unsigned i = 0; i<PadSize; ++i)
- Data.push_back(fill);
- }
-
- /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
- /// written to the data stream.
- void emitULEB128Bytes(uint64_t Value) {
- do {
- uint8_t Byte = (uint8_t)(Value & 0x7f);
- Value >>= 7;
- if (Value) Byte |= 0x80;
- emitByte(Byte);
- } while (Value);
- }
-
- /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
- /// written to the data stream.
- void emitSLEB128Bytes(int64_t Value) {
- int Sign = Value >> (8 * sizeof(Value) - 1);
- bool IsMore;
-
- do {
- uint8_t Byte = (uint8_t)(Value & 0x7f);
- Value >>= 7;
- IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
- if (IsMore) Byte |= 0x80;
- emitByte(Byte);
- } while (IsMore);
- }
-
- /// emitString - This callback is invoked when a String needs to be
- /// written to the data stream.
- void emitString(const std::string &String) {
- for (unsigned i = 0, N = static_cast<unsigned>(String.size()); i<N; ++i) {
- unsigned char C = String[i];
- emitByte(C);
- }
- emitByte(0);
- }
-
- /// getCurrentPCOffset - Return the offset from the start of the emitted
- /// buffer that we are currently writing to.
- uintptr_t getCurrentPCOffset() const {
- return Data.size();
- }
-
- /// addRelocation - Whenever a relocatable address is needed, it should be
- /// noted with this interface.
- void addRelocation(const MachineRelocation& relocation) {
- Relocations.push_back(relocation);
- }
-
-};
-
-} // end namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
index 77dc644..3afe309 100644
--- a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -229,7 +229,7 @@ public:
/// getFirstUnallocated - Return the first unallocated register in the set, or
/// NumRegs if they are all allocated.
- unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const {
+ unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const {
for (unsigned i = 0; i != NumRegs; ++i)
if (!isAllocated(Regs[i]))
return i;
@@ -256,7 +256,7 @@ public:
/// AllocateReg - Attempt to allocate one of the specified registers. If none
/// are available, return zero. Otherwise, return the first one available,
/// marking it and any aliases as allocated.
- unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) {
+ unsigned AllocateReg(const uint16_t *Regs, unsigned NumRegs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
if (FirstUnalloc == NumRegs)
return 0; // Didn't find the reg.
@@ -268,7 +268,7 @@ public:
}
/// Version of AllocateReg with list of registers to be shadowed.
- unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs,
+ unsigned AllocateReg(const uint16_t *Regs, const uint16_t *ShadowRegs,
unsigned NumRegs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
if (FirstUnalloc == NumRegs)
@@ -306,12 +306,12 @@ public:
// First GPR that carries part of a byval aggregate that's split
// between registers and memory.
- unsigned getFirstByValReg() { return FirstByValRegValid ? FirstByValReg : 0; }
+ unsigned getFirstByValReg() const { return FirstByValRegValid ? FirstByValReg : 0; }
void setFirstByValReg(unsigned r) { FirstByValReg = r; FirstByValRegValid = true; }
void clearFirstByValReg() { FirstByValReg = 0; FirstByValRegValid = false; }
- bool isFirstByValRegValid() { return FirstByValRegValid; }
+ bool isFirstByValRegValid() const { return FirstByValRegValid; }
- ParmContext getCallOrPrologue() { return CallOrPrologue; }
+ ParmContext getCallOrPrologue() const { return CallOrPrologue; }
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.
diff --git a/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
new file mode 100644
index 0000000..2d2db78
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
@@ -0,0 +1,167 @@
+//=- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
+#define LLVM_CODEGEN_DFAPACKETIZER_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/DenseMap.h"
+#include <map>
+
+namespace llvm {
+
+class MCInstrDesc;
+class MachineInstr;
+class MachineLoopInfo;
+class MachineDominatorTree;
+class InstrItineraryData;
+class DefaultVLIWScheduler;
+class SUnit;
+
+class DFAPacketizer {
+private:
+ typedef std::pair<unsigned, unsigned> UnsignPair;
+ const InstrItineraryData *InstrItins;
+ int CurrentState;
+ const int (*DFAStateInputTable)[2];
+ const unsigned *DFAStateEntryTable;
+
+ // CachedTable is a map from <FromState, Input> to ToState.
+ DenseMap<UnsignPair, unsigned> CachedTable;
+
+ // ReadTable - Read the DFA transition table and update CachedTable.
+ void ReadTable(unsigned int state);
+
+public:
+ DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
+ const unsigned *SET);
+
+ // Reset the current state to make all resources available.
+ void clearResources() {
+ CurrentState = 0;
+ }
+
+ // canReserveResources - Check if the resources occupied by a MCInstrDesc
+ // are available in the current state.
+ bool canReserveResources(const llvm::MCInstrDesc *MID);
+
+ // reserveResources - Reserve the resources occupied by a MCInstrDesc and
+ // change the current state to reflect that change.
+ void reserveResources(const llvm::MCInstrDesc *MID);
+
+ // canReserveResources - Check if the resources occupied by a machine
+ // instruction are available in the current state.
+ bool canReserveResources(llvm::MachineInstr *MI);
+
+ // reserveResources - Reserve the resources occupied by a machine
+ // instruction and change the current state to reflect that change.
+ void reserveResources(llvm::MachineInstr *MI);
+
+ const InstrItineraryData *getInstrItins() const { return InstrItins; }
+};
+
+// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
+// packetizer works on machine basic blocks. For each instruction I in BB, the
+// packetizer consults the DFA to see if machine resources are available to
+// execute I. If so, the packetizer checks if I depends on any instruction J in
+// the current packet. If no dependency is found, I is added to current packet
+// and machine resource is marked as taken. If any dependency is found, a target
+// API call is made to prune the dependence.
+class VLIWPacketizerList {
+protected:
+ const TargetMachine &TM;
+ const MachineFunction &MF;
+ const TargetInstrInfo *TII;
+
+ // The VLIW Scheduler.
+ DefaultVLIWScheduler *VLIWScheduler;
+
+ // Vector of instructions assigned to the current packet.
+ std::vector<MachineInstr*> CurrentPacketMIs;
+ // DFA resource tracker.
+ DFAPacketizer *ResourceTracker;
+
+ // Generate MI -> SU map.
+ std::map<MachineInstr*, SUnit*> MIToSUnit;
+
+public:
+ VLIWPacketizerList(
+ MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+ bool IsPostRA);
+
+ virtual ~VLIWPacketizerList();
+
+ // PacketizeMIs - Implement this API in the backend to bundle instructions.
+ void PacketizeMIs(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator BeginItr,
+ MachineBasicBlock::iterator EndItr);
+
+ // getResourceTracker - return ResourceTracker
+ DFAPacketizer *getResourceTracker() {return ResourceTracker;}
+
+ // addToPacket - Add MI to the current packet.
+ virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator MII = MI;
+ CurrentPacketMIs.push_back(MI);
+ ResourceTracker->reserveResources(MI);
+ return MII;
+ }
+
+ // endPacket - End the current packet.
+ void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
+
+ // initPacketizerState - perform initialization before packetizing
+ // an instruction. This function is supposed to be overrided by
+ // the target dependent packetizer.
+ virtual void initPacketizerState(void) { return; }
+
+ // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+ virtual bool ignorePseudoInstruction(MachineInstr *I,
+ MachineBasicBlock *MBB) {
+ return false;
+ }
+
+ // isSoloInstruction - return true if instruction MI can not be packetized
+ // with any other instruction, which means that MI itself is a packet.
+ virtual bool isSoloInstruction(MachineInstr *MI) {
+ return true;
+ }
+
+ // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
+ // together.
+ virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+ return false;
+ }
+
+ // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
+ // and SUJ.
+ virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+ return false;
+ }
+
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
index 8aab3c6..a1d29b1 100644
--- a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
+++ b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
@@ -61,7 +62,7 @@ private:
/// Specialize WriteGraph, the standard implementation won't work.
raw_ostream &WriteGraph(raw_ostream &O, const EdgeBundles &G,
bool ShortNames = false,
- const std::string &Title = "");
+ const Twine &Title = "");
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index 18202d9..e57c8b1 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -21,9 +21,11 @@
namespace llvm {
class AllocaInst;
+class Constant;
class ConstantFP;
class FunctionLoweringInfo;
class Instruction;
+class LoadInst;
class MachineBasicBlock;
class MachineConstantPool;
class MachineFunction;
@@ -36,7 +38,8 @@ class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
class TargetRegisterInfo;
-class LoadInst;
+class User;
+class Value;
/// FastISel - This is a fast-path instruction selection class that
/// generates poor code and doesn't support illegal types or non-trivial
@@ -358,6 +361,8 @@ private:
bool SelectExtractValue(const User *I);
+ bool SelectInsertValue(const User *I);
+
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
@@ -378,6 +383,10 @@ private:
/// hasTrivialKill - Test whether the given value has exactly one use.
bool hasTrivialKill(const Value *V) const;
+
+ /// removeDeadCode - Remove all dead instructions between the I and E.
+ void removeDeadCode(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E);
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 09dac85..8cf22ec 100644
--- a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -21,10 +21,8 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
-#ifndef NDEBUG
-#include "llvm/ADT/SmallSet.h"
-#endif
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h"
@@ -98,8 +96,8 @@ public:
MachineBasicBlock::iterator InsertPt;
#ifndef NDEBUG
- SmallSet<const Instruction *, 8> CatchInfoLost;
- SmallSet<const Instruction *, 8> CatchInfoFound;
+ SmallPtrSet<const Instruction *, 8> CatchInfoLost;
+ SmallPtrSet<const Instruction *, 8> CatchInfoFound;
#endif
struct LiveOutInfo {
@@ -112,7 +110,7 @@ public:
/// VisitedBBs - The set of basic blocks visited thus far by instruction
/// selection.
- DenseSet<const BasicBlock*> VisitedBBs;
+ SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
/// PHINodesToUpdate - A list of phi instructions whose operand list will
/// be updated after processing the current basic block.
@@ -202,7 +200,7 @@ public:
/// setArgumentFrameIndex - Record frame index for the byval
/// argument.
void setArgumentFrameIndex(const Argument *A, int FI);
-
+
/// getArgumentFrameIndex - Get frame index for the byval argument.
int getArgumentFrameIndex(const Argument *A);
@@ -211,16 +209,18 @@ private:
IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
};
+/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
+/// being passed to this variadic function, and set the MachineModuleInfo's
+/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
+/// reference to _fltused on Windows, which will link in MSVCRT's
+/// floating-point support.
+void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI);
+
/// AddCatchInfo - Extract the personality and type infos from an eh.selector
/// call, and add them to the specified machine basic block.
void AddCatchInfo(const CallInst &I,
MachineModuleInfo *MMI, MachineBasicBlock *MBB);
-/// CopyCatchInfo - Copy catch information from SuccBB (or one of its
-/// successors) to LPad.
-void CopyCatchInfo(const BasicBlock *SuccBB, const BasicBlock *LPad,
- MachineModuleInfo *MMI, FunctionLoweringInfo &FLI);
-
/// AddLandingPadInfo - Extract the exception handling information from the
/// landingpad instruction and add them to the specified machine module info.
void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
diff --git a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
index cd760db..1cbd36a 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
@@ -37,6 +37,7 @@
#define LLVM_CODEGEN_GCSTRATEGY_H
#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Support/Registry.h"
#include <string>
@@ -68,6 +69,8 @@ namespace llvm {
bool CustomReadBarriers; //< Default is to insert loads.
bool CustomWriteBarriers; //< Default is to insert stores.
bool CustomRoots; //< Default is to pass through to backend.
+ bool CustomSafePoints; //< Default is to use NeededSafePoints
+ // to find safe points.
bool InitRoots; //< If set, roots are nulled during lowering.
bool UsesMetadata; //< If set, backend must emit metadata tables.
@@ -87,7 +90,9 @@ namespace llvm {
/// needsSafePoitns - True if safe points of any kind are required. By
// default, none are recorded.
- bool needsSafePoints() const { return NeededSafePoints != 0; }
+ bool needsSafePoints() const {
+ return CustomSafePoints || NeededSafePoints != 0;
+ }
/// needsSafePoint(Kind) - True if the given kind of safe point is
// required. By default, none are recorded.
@@ -109,6 +114,11 @@ namespace llvm {
/// can generate a stack map. If true, then
// performCustomLowering must delete them.
bool customRoots() const { return CustomRoots; }
+
+ /// customSafePoints - By default, the GC analysis will find safe
+ /// points according to NeededSafePoints. If true,
+ /// then findCustomSafePoints must create them.
+ bool customSafePoints() const { return CustomSafePoints; }
/// initializeRoots - If set, gcroot intrinsics should initialize their
// allocas to null before the first use. This is
@@ -135,6 +145,7 @@ namespace llvm {
/// which the LLVM IR can be modified.
virtual bool initializeCustomLowering(Module &F);
virtual bool performCustomLowering(Function &F);
+ virtual bool findCustomSafePoints(GCFunctionInfo& FI, MachineFunction& MF);
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 184e96d..ab8ab5d 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -57,7 +57,7 @@ namespace ISD {
AssertSext, AssertZext,
// Various leaf nodes.
- BasicBlock, VALUETYPE, CONDCODE, Register,
+ BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
Constant, ConstantFP,
GlobalAddress, GlobalTLSAddress, FrameIndex,
JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
@@ -107,13 +107,6 @@ namespace ISD {
// and returns an outchain.
EH_SJLJ_LONGJMP,
- // OUTCHAIN = EH_SJLJ_DISPATCHSETUP(INCHAIN, setjmpval)
- // This corresponds to the eh.sjlj.dispatchsetup intrinsic. It takes an
- // input chain and the value returning from setjmp as inputs and returns an
- // outchain. By default, this does nothing. Targets can lower this to unwind
- // setup code if needed.
- EH_SJLJ_DISPATCHSETUP,
-
// TargetConstant* - Like Constant*, but the DAG does not do any folding,
// simplification, or lowering of the constant. They are used for constants
// which are known to fit in the immediate fields of their users, or for
@@ -319,6 +312,9 @@ namespace ISD {
/// Byte Swap and Counting operators.
BSWAP, CTTZ, CTLZ, CTPOP,
+ /// Bit counting operators with an undefined result for zero inputs.
+ CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
+
// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
// i1 then the high bits must conform to getBooleanContents.
SELECT,
@@ -327,6 +323,9 @@ namespace ISD {
// and #2), returning a vector result. All vectors have the same length.
// Much like the scalar select and setcc, each bit in the condition selects
// whether the corresponding result element is taken from op #1 or op #2.
+ // At first, the VSELECT condition is of vXi1 type. Later, targets may change
+ // the condition type in order to match the VSELECT node using a a pattern.
+ // The condition follows the BooleanContent format of the target.
VSELECT,
// Select with condition operator - This selects between a true value and
diff --git a/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h b/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
index 88e22d6..89f00e9 100644
--- a/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
+++ b/contrib/llvm/include/llvm/CodeGen/JITCodeEmitter.h
@@ -51,6 +51,7 @@ class Function;
/// occurred, more memory is allocated, and we reemit the code into it.
///
class JITCodeEmitter : public MachineCodeEmitter {
+ virtual void anchor();
public:
virtual ~JITCodeEmitter() {}
diff --git a/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
index 1ed2547..8fb31aa 100644
--- a/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
+++ b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -85,11 +85,11 @@ namespace llvm {
virtual void dump(ScheduleDAG* DAG) const;
- // ScheduledNode - As nodes are scheduled, we look to see if there are any
+ // scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
- void ScheduledNode(SUnit *Node);
+ void scheduledNode(SUnit *Node);
private:
void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
diff --git a/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
index 0271c5d..eb01f66c 100644
--- a/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
+++ b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
@@ -153,6 +153,7 @@ private:
/// LexicalScope - This class is used to track scope information.
///
class LexicalScope {
+ virtual void anchor();
public:
LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
@@ -208,7 +209,7 @@ public:
Parent->closeInsnRange(NewScope);
}
- /// dominates - Return true if current scope dominsates given lexical scope.
+ /// dominates - Return true if current scope dominates given lexical scope.
bool dominates(const LexicalScope *S) const {
if (S == this)
return true;
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
index 098dd0b..46dd004 100644
--- a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -31,24 +31,20 @@ namespace {
if (std::getenv("bar") != (char*) -1)
return;
- (void) llvm::createDeadMachineInstructionElimPass();
-
(void) llvm::createFastRegisterAllocator();
(void) llvm::createBasicRegisterAllocator();
- (void) llvm::createLinearScanRegisterAllocator();
(void) llvm::createGreedyRegisterAllocator();
(void) llvm::createDefaultPBQPRegisterAllocator();
llvm::linkOcamlGC();
llvm::linkShadowStackGC();
-
+
(void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
- (void) llvm::createTDRRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createSourceListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
(void) llvm::createHybridListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
- (void) llvm::createTDListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createVLIWDAGScheduler(NULL, llvm::CodeGenOpt::Default);
}
} ForceCodegenLinking; // Force link by creating a global definition.
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
index 2288c1a..a6008ab 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -43,12 +43,10 @@ namespace llvm {
private:
enum {
HAS_PHI_KILL = 1,
- REDEF_BY_EC = 1 << 1,
- IS_PHI_DEF = 1 << 2,
- IS_UNUSED = 1 << 3
+ IS_PHI_DEF = 1 << 1,
+ IS_UNUSED = 1 << 2
};
- MachineInstr *copy;
unsigned char flags;
public:
@@ -57,23 +55,22 @@ namespace llvm {
/// The ID number of this value.
unsigned id;
- /// The index of the defining instruction (if isDefAccurate() returns true).
+ /// The index of the defining instruction.
SlotIndex def;
/// VNInfo constructor.
- VNInfo(unsigned i, SlotIndex d, MachineInstr *c)
- : copy(c), flags(0), id(i), def(d)
+ VNInfo(unsigned i, SlotIndex d)
+ : flags(0), id(i), def(d)
{ }
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : copy(orig.copy), flags(orig.flags), id(i), def(orig.def)
+ : flags(orig.flags), id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
flags = src.flags;
- copy = src.copy;
def = src.def;
}
@@ -86,19 +83,6 @@ namespace llvm {
flags = (flags | VNI->flags) & ~IS_UNUSED;
}
- /// For a register interval, if this VN was definied by a copy instr
- /// getCopy() returns a pointer to it, otherwise returns 0.
- /// For a stack interval the behaviour of this method is undefined.
- MachineInstr* getCopy() const { return copy; }
- /// For a register interval, set the copy member.
- /// This method should not be called on stack intervals as it may lead to
- /// undefined behavior.
- void setCopy(MachineInstr *c) { copy = c; }
-
- /// isDefByCopy - Return true when this value was defined by a copy-like
- /// instruction as determined by MachineInstr::isCopyLike.
- bool isDefByCopy() const { return copy != 0; }
-
/// Returns true if one or more kills are PHI nodes.
/// Obsolete, do not use!
bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
@@ -110,17 +94,6 @@ namespace llvm {
flags &= ~HAS_PHI_KILL;
}
- /// Returns true if this value is re-defined by an early clobber somewhere
- /// during the live range.
- bool hasRedefByEC() const { return flags & REDEF_BY_EC; }
- /// Set the "redef by early clobber" flag on this value.
- void setHasRedefByEC(bool hasRedef) {
- if (hasRedef)
- flags |= REDEF_BY_EC;
- else
- flags &= ~REDEF_BY_EC;
- }
-
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
@@ -294,10 +267,9 @@ namespace llvm {
/// getNextValue - Create a new value number and return it. MIIdx specifies
/// the instruction that defines the value number.
- VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
- VNInfo::Allocator &VNInfoAllocator) {
+ VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
- new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
valnos.push_back(VNI);
return VNI;
}
@@ -381,7 +353,7 @@ namespace llvm {
/// point is not contained in the half-open live range. It is usually the
/// getDefIndex() slot following its last use.
bool killedAt(SlotIndex index) const {
- const_iterator r = find(index.getUseIndex());
+ const_iterator r = find(index.getRegSlot(true));
return r != end() && r->end == index;
}
@@ -405,6 +377,14 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
+ const LiveRange *getLiveRangeBefore(SlotIndex Idx) const {
+ return getLiveRangeContaining(Idx.getPrevSlot());
+ }
+
+ LiveRange *getLiveRangeBefore(SlotIndex Idx) {
+ return getLiveRangeContaining(Idx.getPrevSlot());
+ }
+
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindLiveRangeContaining(Idx);
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 8ca58b8..76201c9 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -63,8 +63,34 @@ namespace llvm {
/// allocatableRegs_ - A bit vector of allocatable registers.
BitVector allocatableRegs_;
- /// CloneMIs - A list of clones as result of re-materialization.
- std::vector<MachineInstr*> CloneMIs;
+ /// reservedRegs_ - A bit vector of reserved registers.
+ BitVector reservedRegs_;
+
+ /// RegMaskSlots - Sorted list of instructions with register mask operands.
+ /// Always use the 'r' slot, RegMasks are normal clobbers, not early
+ /// clobbers.
+ SmallVector<SlotIndex, 8> RegMaskSlots;
+
+ /// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a
+ /// pointer to the corresponding register mask. This pointer can be
+ /// recomputed as:
+ ///
+ /// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
+ /// unsigned OpNum = findRegMaskOperand(MI);
+ /// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
+ ///
+ /// This is kept in a separate vector partly because some standard
+ /// libraries don't support lower_bound() with mixed objects, partly to
+ /// improve locality when searching in RegMaskSlots.
+ /// Also see the comment in LiveInterval::find().
+ SmallVector<const uint32_t*, 8> RegMaskBits;
+
+ /// For each basic block number, keep (begin, size) pairs indexing into the
+ /// RegMaskSlots and RegMaskBits arrays.
+ /// Note that basic block numbers may not be layout contiguous, that's why
+ /// we can't just keep track of the first register mask in each basic
+ /// block.
+ SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
public:
static char ID; // Pass identification, replacement for typeid
@@ -105,6 +131,12 @@ namespace llvm {
return allocatableRegs_.test(reg);
}
+ /// isReserved - is the physical register reg reserved in the current
+ /// function
+ bool isReserved(unsigned reg) const {
+ return reservedRegs_.test(reg);
+ }
+
/// getScaledIntervalSize - get the size of an interval in "units,"
/// where every function is composed of one thousand units. This
/// measure scales properly with empty index slots in the function.
@@ -125,19 +157,6 @@ namespace llvm {
return (unsigned)(IntervalPercentage * indexes_->getFunctionSize());
}
- /// conflictsWithPhysReg - Returns true if the specified register is used or
- /// defined during the duration of the specified interval. Copies to and
- /// from li.reg are allowed. This method is only able to analyze simple
- /// ranges that stay within a single basic block. Anything else is
- /// considered a conflict.
- bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
- unsigned reg);
-
- /// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
- /// it checks for alias uses and defs.
- bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
- SmallPtrSet<MachineInstr*,32> &JoinedCopies);
-
// Interval creation
LiveInterval &getOrCreateInterval(unsigned reg) {
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
@@ -177,14 +196,6 @@ namespace llvm {
return indexes_;
}
- SlotIndex getZeroIndex() const {
- return indexes_->getZeroIndex();
- }
-
- SlotIndex getInvalidIndex() const {
- return indexes_->getInvalidIndex();
- }
-
/// isNotInMIMap - returns true if the specified machine instr has been
/// removed or was never entered in the map.
bool isNotInMIMap(const MachineInstr* Instr) const {
@@ -216,21 +227,11 @@ namespace llvm {
return li.liveAt(getMBBStartIdx(mbb));
}
- LiveRange* findEnteringRange(LiveInterval &li,
- const MachineBasicBlock *mbb) {
- return li.getLiveRangeContaining(getMBBStartIdx(mbb));
- }
-
bool isLiveOutOfMBB(const LiveInterval &li,
const MachineBasicBlock *mbb) const {
return li.liveAt(getMBBEndIdx(mbb).getPrevSlot());
}
- LiveRange* findExitingRange(LiveInterval &li,
- const MachineBasicBlock *mbb) {
- return li.getLiveRangeContaining(getMBBEndIdx(mbb).getPrevSlot());
- }
-
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
return indexes_->getMBBFromIndex(index);
}
@@ -247,19 +248,11 @@ namespace llvm {
indexes_->replaceMachineInstrInMaps(MI, NewMI);
}
- void InsertMBBInMaps(MachineBasicBlock *MBB) {
- indexes_->insertMBBInMaps(MBB);
- }
-
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
return indexes_->findLiveInMBBs(Start, End, MBBs);
}
- void renumber() {
- indexes_->renumberIndexes();
- }
-
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
@@ -271,20 +264,6 @@ namespace llvm {
/// print - Implement the dump method.
virtual void print(raw_ostream &O, const Module* = 0) const;
- /// addIntervalsForSpills - Create new intervals for spilled defs / uses of
- /// the given interval. FIXME: It also returns the weight of the spill slot
- /// (if any is created) by reference. This is temporary.
- std::vector<LiveInterval*>
- addIntervalsForSpills(const LiveInterval& i,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
-
- /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
- /// around all defs and uses of the specified interval. Return true if it
- /// was able to cut its interval.
- bool spillPhysRegAroundRegDefsUses(const LiveInterval &li,
- unsigned PhysReg, VirtRegMap &vrm);
-
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable. Also returns true
/// by reference if all of the defs are load instructions.
@@ -292,33 +271,71 @@ namespace llvm {
const SmallVectorImpl<LiveInterval*> *SpillIs,
bool &isLoad);
- /// isReMaterializable - Returns true if the definition MI of the specified
- /// val# of the specified interval is re-materializable.
- bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
- MachineInstr *MI);
+ /// intervalIsInOneMBB - If LI is confined to a single basic block, return
+ /// a pointer to that block. If LI is live in to or out of any block,
+ /// return NULL.
+ MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
+
+ /// addKillFlags - Add kill flags to any instruction that kills a virtual
+ /// register.
+ void addKillFlags();
- /// getRepresentativeReg - Find the largest super register of the specified
- /// physical register.
- unsigned getRepresentativeReg(unsigned Reg) const;
+ /// handleMove - call this method to notify LiveIntervals that
+ /// instruction 'mi' has been moved within a basic block. This will update
+ /// the live intervals for all operands of mi. Moves between basic blocks
+ /// are not supported.
+ void handleMove(MachineInstr* MI);
- /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
- /// specified interval that conflicts with the specified physical register.
- unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
- unsigned PhysReg) const;
+ /// moveIntoBundle - Update intervals for operands of MI so that they
+ /// begin/end on the SlotIndex for BundleStart.
+ ///
+ /// Requires MI and BundleStart to have SlotIndexes, and assumes
+ /// existing liveness is accurate. BundleStart should be the first
+ /// instruction in the Bundle.
+ void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart);
+
+ // Register mask functions.
+ //
+ // Machine instructions may use a register mask operand to indicate that a
+ // large number of registers are clobbered by the instruction. This is
+ // typically used for calls.
+ //
+ // For compile time performance reasons, these clobbers are not recorded in
+ // the live intervals for individual physical registers. Instead,
+ // LiveIntervalAnalysis maintains a sorted list of instructions with
+ // register mask operands.
+
+ /// getRegMaskSlots - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands.
+ ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
+
+ /// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands in the basic block numbered
+ /// MBBNum.
+ ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
+ std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+ return getRegMaskSlots().slice(P.first, P.second);
+ }
- /// intervalIsInOneMBB - Returns true if the specified interval is entirely
- /// within a single basic block.
- bool intervalIsInOneMBB(const LiveInterval &li) const;
+ /// getRegMaskBits() - Returns an array of register mask pointers
+ /// corresponding to getRegMaskSlots().
+ ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
- /// getLastSplitPoint - Return the last possible insertion point in mbb for
- /// spilling and splitting code. This is the first terminator, or the call
- /// instruction if li is live into a landing pad successor.
- MachineBasicBlock::iterator getLastSplitPoint(const LiveInterval &li,
- MachineBasicBlock *mbb) const;
+ /// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding
+ /// to getRegMaskSlotsInBlock(MBBNum).
+ ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
+ std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+ return getRegMaskBits().slice(P.first, P.second);
+ }
- /// addKillFlags - Add kill flags to any instruction that kills a virtual
- /// register.
- void addKillFlags();
+ /// checkRegMaskInterference - Test if LI is live across any register mask
+ /// instructions, and compute a bit mask of physical registers that are not
+ /// clobbered by any of them.
+ ///
+ /// Returns false if LI doesn't cross any register mask instructions. In
+ /// that case, the bit vector is not filled in.
+ bool checkRegMaskInterference(LiveInterval &LI,
+ BitVector &UsableRegs);
private:
/// computeIntervals - Compute live intervals.
@@ -351,13 +368,12 @@ namespace llvm {
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
MachineBasicBlock::iterator mi,
SlotIndex MIIdx, MachineOperand& MO,
- LiveInterval &interval,
- MachineInstr *CopyMI);
+ LiveInterval &interval);
/// handleLiveInRegister - Create interval for a livein register.
void handleLiveInRegister(MachineBasicBlock* mbb,
SlotIndex MIIdx,
- LiveInterval &interval, bool isAlias = false);
+ LiveInterval &interval);
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
/// only allow one) virtual register operand, then its uses are implicitly
@@ -379,88 +395,12 @@ namespace llvm {
const SmallVectorImpl<LiveInterval*> *SpillIs,
bool &isLoad);
- /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
- /// slot / to reg or any rematerialized load into ith operand of specified
- /// MI. If it is successul, MI is updated with the newly created MI and
- /// returns true.
- bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
- MachineInstr *DefMI, SlotIndex InstrIdx,
- SmallVector<unsigned, 2> &Ops,
- bool isSS, int FrameIndex, unsigned Reg);
-
- /// canFoldMemoryOperand - Return true if the specified load / store
- /// folding is possible.
- bool canFoldMemoryOperand(MachineInstr *MI,
- SmallVector<unsigned, 2> &Ops,
- bool ReMatLoadSS) const;
-
- /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
- /// VNInfo that's after the specified index but is within the basic block.
- bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
- MachineBasicBlock *MBB,
- SlotIndex Idx) const;
-
- /// hasAllocatableSuperReg - Return true if the specified physical register
- /// has any super register that's allocatable.
- bool hasAllocatableSuperReg(unsigned Reg) const;
-
- /// SRInfo - Spill / restore info.
- struct SRInfo {
- SlotIndex index;
- unsigned vreg;
- bool canFold;
- SRInfo(SlotIndex i, unsigned vr, bool f)
- : index(i), vreg(vr), canFold(f) {}
- };
-
- bool alsoFoldARestore(int Id, SlotIndex index, unsigned vr,
- BitVector &RestoreMBBs,
- DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
- void eraseRestoreInfo(int Id, SlotIndex index, unsigned vr,
- BitVector &RestoreMBBs,
- DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
-
- /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
- /// spilled and create empty intervals for their uses.
- void handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
- const TargetRegisterClass* rc,
- std::vector<LiveInterval*> &NewLIs);
-
- /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
- /// interval on to-be re-materialized operands of MI) with new register.
- void rewriteImplicitOps(const LiveInterval &li,
- MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
-
- /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
- /// functions for addIntervalsForSpills to rewrite uses / defs for the given
- /// live range.
- bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
- bool TrySplit, SlotIndex index, SlotIndex end,
- MachineInstr *MI, MachineInstr *OrigDefMI, MachineInstr *DefMI,
- unsigned Slot, int LdSlot,
- bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm, const TargetRegisterClass* rc,
- SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
- unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
- DenseMap<unsigned,unsigned> &MBBVRegsMap,
- std::vector<LiveInterval*> &NewLIs);
- void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
- LiveInterval::Ranges::const_iterator &I,
- MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
- bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm, const TargetRegisterClass* rc,
- SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
- BitVector &SpillMBBs,
- DenseMap<unsigned,std::vector<SRInfo> > &SpillIdxes,
- BitVector &RestoreMBBs,
- DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes,
- DenseMap<unsigned,unsigned> &MBBVRegsMap,
- std::vector<LiveInterval*> &NewLIs);
-
static LiveInterval* createInterval(unsigned Reg);
void printInstrs(raw_ostream &O) const;
void dumpInstrs() const;
+
+ class HMEditor;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.h b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
index 9b0a671..57a6193 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeEdit.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
@@ -33,7 +34,9 @@ class VirtRegMap;
class LiveRangeEdit {
public:
/// Callback methods for LiveRangeEdit owners.
- struct Delegate {
+ class Delegate {
+ virtual void anchor();
+ public:
/// Called immediately before erasing a dead machine instruction.
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
@@ -54,8 +57,11 @@ public:
private:
LiveInterval &parent_;
SmallVectorImpl<LiveInterval*> &newRegs_;
+ MachineRegisterInfo &MRI;
+ LiveIntervals &LIS;
+ VirtRegMap *VRM;
+ const TargetInstrInfo &TII;
Delegate *const delegate_;
- const SmallVectorImpl<LiveInterval*> *uselessRegs_;
/// firstNew_ - Index of the first register added to newRegs_.
const unsigned firstNew_;
@@ -72,34 +78,37 @@ private:
SmallPtrSet<const VNInfo*,4> rematted_;
/// scanRemattable - Identify the parent_ values that may rematerialize.
- void scanRemattable(LiveIntervals &lis,
- const TargetInstrInfo &tii,
- AliasAnalysis *aa);
+ void scanRemattable(AliasAnalysis *aa);
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
/// OrigIdx are also available with the same value at UseIdx.
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
- SlotIndex UseIdx, LiveIntervals &lis);
+ SlotIndex UseIdx);
/// foldAsLoad - If LI has a single use and a single def that can be folded as
/// a load, eliminate the register by folding the def into the use.
- bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead,
- MachineRegisterInfo&, LiveIntervals&, const TargetInstrInfo&);
+ bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
public:
/// Create a LiveRangeEdit for breaking down parent into smaller pieces.
/// @param parent The register being spilled or split.
/// @param newRegs List to receive any new registers created. This needn't be
/// empty initially, any existing registers are ignored.
- /// @param uselessRegs List of registers that can't be used when
- /// rematerializing values because they are about to be removed.
+ /// @param MF The MachineFunction the live range edit is taking place in.
+ /// @param lis The collection of all live intervals in this function.
+ /// @param vrm Map of virtual registers to physical registers for this
+ /// function. If NULL, no virtual register map updates will
+ /// be done. This could be the case if called before Regalloc.
LiveRangeEdit(LiveInterval &parent,
SmallVectorImpl<LiveInterval*> &newRegs,
- Delegate *delegate = 0,
- const SmallVectorImpl<LiveInterval*> *uselessRegs = 0)
+ MachineFunction &MF,
+ LiveIntervals &lis,
+ VirtRegMap *vrm,
+ Delegate *delegate = 0)
: parent_(parent), newRegs_(newRegs),
+ MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
+ TII(*MF.getTarget().getInstrInfo()),
delegate_(delegate),
- uselessRegs_(uselessRegs),
firstNew_(newRegs.size()),
scannedRemattable_(false) {}
@@ -118,32 +127,24 @@ public:
return makeArrayRef(newRegs_).slice(firstNew_);
}
- /// FIXME: Temporary accessors until we can get rid of
- /// LiveIntervals::AddIntervalsForSpills
- SmallVectorImpl<LiveInterval*> *getNewVRegs() { return &newRegs_; }
- const SmallVectorImpl<LiveInterval*> *getUselessVRegs() {
- return uselessRegs_;
- }
-
/// createFrom - Create a new virtual register based on OldReg.
- LiveInterval &createFrom(unsigned OldReg, LiveIntervals&, VirtRegMap&);
+ LiveInterval &createFrom(unsigned OldReg);
/// create - Create a new register with the same class and original slot as
/// parent.
- LiveInterval &create(LiveIntervals &LIS, VirtRegMap &VRM) {
- return createFrom(getReg(), LIS, VRM);
+ LiveInterval &create() {
+ return createFrom(getReg());
}
/// anyRematerializable - Return true if any parent values may be
/// rematerializable.
/// This function must be called before any rematerialization is attempted.
- bool anyRematerializable(LiveIntervals&, const TargetInstrInfo&,
- AliasAnalysis*);
+ bool anyRematerializable(AliasAnalysis*);
/// checkRematerializable - Manually add VNI to the list of rematerializable
/// values if DefMI may be rematerializable.
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
- const TargetInstrInfo&, AliasAnalysis*);
+ AliasAnalysis*);
/// Remat - Information needed to rematerialize at a specific location.
struct Remat {
@@ -157,8 +158,7 @@ public:
/// When cheapAsAMove is set, only cheap remats are allowed.
bool canRematerializeAt(Remat &RM,
SlotIndex UseIdx,
- bool cheapAsAMove,
- LiveIntervals &lis);
+ bool cheapAsAMove);
/// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
/// instruction into MBB before MI. The new instruction is mapped, but
@@ -168,8 +168,6 @@ public:
MachineBasicBlock::iterator MI,
unsigned DestReg,
const Remat &RM,
- LiveIntervals&,
- const TargetInstrInfo&,
const TargetRegisterInfo&,
bool Late = false);
@@ -186,18 +184,21 @@ public:
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
/// to erase it from LIS.
- void eraseVirtReg(unsigned Reg, LiveIntervals &LIS);
+ void eraseVirtReg(unsigned Reg);
/// eliminateDeadDefs - Try to delete machine instructions that are now dead
/// (allDefsAreDead returns true). This may cause live intervals to be trimmed
/// and further dead efs to be eliminated.
+ /// RegsBeingSpilled lists registers currently being spilled by the register
+ /// allocator. These registers should not be split into new intervals
+ /// as currently those new intervals are not guaranteed to spill.
void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
- LiveIntervals&, VirtRegMap&,
- const TargetInstrInfo&);
+ ArrayRef<unsigned> RegsBeingSpilled
+ = ArrayRef<unsigned>());
/// calculateRegClassAndHint - Recompute register class and hint for each new
/// register.
- void calculateRegClassAndHint(MachineFunction&, LiveIntervals&,
+ void calculateRegClassAndHint(MachineFunction&,
const MachineLoopInfo&);
};
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
index 7ba901f..d4bb409 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -85,17 +85,11 @@ public:
///
SparseBitVector<> AliveBlocks;
- /// NumUses - Number of uses of this register across the entire function.
- ///
- unsigned NumUses;
-
/// Kills - List of MachineInstruction's which are the last use of this
/// virtual register (kill it) in their basic block.
///
std::vector<MachineInstr*> Kills;
- VarInfo() : NumUses(0) {}
-
/// removeKill - Delete a kill corresponding to the specified
/// machine instruction. Returns true if there was a kill
/// corresponding to this instruction, false otherwise.
@@ -166,6 +160,9 @@ private: // Intermediate data structures
/// the last use of the whole register.
bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
+ /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
+ void HandleRegMask(const MachineOperand&);
+
void HandlePhysRegUse(unsigned Reg, MachineInstr *MI);
void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallVector<unsigned, 4> &Defs);
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 5a20e95..ef9c0c2 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -77,6 +77,7 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
/// (disable optimization).
std::vector<uint32_t> Weights;
typedef std::vector<uint32_t>::iterator weight_iterator;
+ typedef std::vector<uint32_t>::const_iterator const_weight_iterator;
/// LiveIns - Keep track of the physical registers that are livein of
/// the basicblock.
@@ -84,8 +85,9 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
/// Alignment - Alignment of the basic block. Zero if the basic block does
/// not need to be aligned.
+ /// The alignment is specified as log2(bytes).
unsigned Alignment;
-
+
/// IsLandingPad - Indicate that this basic block is entered via an
/// exception handler.
bool IsLandingPad;
@@ -115,6 +117,10 @@ public:
/// "(null)".
StringRef getName() const;
+ /// getFullName - Return a formatted string to identify this block and its
+ /// parent function.
+ std::string getFullName() const;
+
/// hasAddressTaken - Test whether this block is potentially the target
/// of an indirect branch.
bool hasAddressTaken() const { return AddressTaken; }
@@ -128,10 +134,89 @@ public:
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
- typedef Instructions::iterator iterator;
- typedef Instructions::const_iterator const_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ /// bundle_iterator - MachineBasicBlock iterator that automatically skips over
+ /// MIs that are inside bundles (i.e. walk top level MIs only).
+ template<typename Ty, typename IterTy>
+ class bundle_iterator
+ : public std::iterator<std::bidirectional_iterator_tag, Ty, ptrdiff_t> {
+ IterTy MII;
+
+ public:
+ bundle_iterator(IterTy mii) : MII(mii) {
+ assert(!MII->isInsideBundle() &&
+ "It's not legal to initialize bundle_iterator with a bundled MI");
+ }
+
+ bundle_iterator(Ty &mi) : MII(mi) {
+ assert(!mi.isInsideBundle() &&
+ "It's not legal to initialize bundle_iterator with a bundled MI");
+ }
+ bundle_iterator(Ty *mi) : MII(mi) {
+ assert((!mi || !mi->isInsideBundle()) &&
+ "It's not legal to initialize bundle_iterator with a bundled MI");
+ }
+ bundle_iterator(const bundle_iterator &I) : MII(I.MII) {}
+ bundle_iterator() : MII(0) {}
+
+ Ty &operator*() const { return *MII; }
+ Ty *operator->() const { return &operator*(); }
+
+ operator Ty*() const { return MII; }
+
+ bool operator==(const bundle_iterator &x) const {
+ return MII == x.MII;
+ }
+ bool operator!=(const bundle_iterator &x) const {
+ return !operator==(x);
+ }
+
+ // Increment and decrement operators...
+ bundle_iterator &operator--() { // predecrement - Back up
+ do {
+ --MII;
+ } while (MII->isInsideBundle());
+ return *this;
+ }
+ bundle_iterator &operator++() { // preincrement - Advance
+ do {
+ ++MII;
+ } while (MII->isInsideBundle());
+ return *this;
+ }
+ bundle_iterator operator--(int) { // postdecrement operators...
+ bundle_iterator tmp = *this;
+ do {
+ --MII;
+ } while (MII->isInsideBundle());
+ return tmp;
+ }
+ bundle_iterator operator++(int) { // postincrement operators...
+ bundle_iterator tmp = *this;
+ do {
+ ++MII;
+ } while (MII->isInsideBundle());
+ return tmp;
+ }
+
+ IterTy getInstrIterator() const {
+ return MII;
+ }
+ };
+
+ typedef Instructions::iterator instr_iterator;
+ typedef Instructions::const_iterator const_instr_iterator;
+ typedef std::reverse_iterator<instr_iterator> reverse_instr_iterator;
+ typedef
+ std::reverse_iterator<const_instr_iterator> const_reverse_instr_iterator;
+
+ typedef
+ bundle_iterator<MachineInstr,instr_iterator> iterator;
+ typedef
+ bundle_iterator<const MachineInstr,const_instr_iterator> const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
@@ -141,15 +226,53 @@ public:
const MachineInstr& front() const { return Insts.front(); }
const MachineInstr& back() const { return Insts.back(); }
+ instr_iterator instr_begin() { return Insts.begin(); }
+ const_instr_iterator instr_begin() const { return Insts.begin(); }
+ instr_iterator instr_end() { return Insts.end(); }
+ const_instr_iterator instr_end() const { return Insts.end(); }
+ reverse_instr_iterator instr_rbegin() { return Insts.rbegin(); }
+ const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
+ reverse_instr_iterator instr_rend () { return Insts.rend(); }
+ const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
+
iterator begin() { return Insts.begin(); }
const_iterator begin() const { return Insts.begin(); }
- iterator end() { return Insts.end(); }
- const_iterator end() const { return Insts.end(); }
- reverse_iterator rbegin() { return Insts.rbegin(); }
- const_reverse_iterator rbegin() const { return Insts.rbegin(); }
+ iterator end() {
+ instr_iterator II = instr_end();
+ if (II != instr_begin()) {
+ while (II->isInsideBundle())
+ --II;
+ }
+ return II;
+ }
+ const_iterator end() const {
+ const_instr_iterator II = instr_end();
+ if (II != instr_begin()) {
+ while (II->isInsideBundle())
+ --II;
+ }
+ return II;
+ }
+ reverse_iterator rbegin() {
+ reverse_instr_iterator II = instr_rbegin();
+ if (II != instr_rend()) {
+ while (II->isInsideBundle())
+ ++II;
+ }
+ return II;
+ }
+ const_reverse_iterator rbegin() const {
+ const_reverse_instr_iterator II = instr_rbegin();
+ if (II != instr_rend()) {
+ while (II->isInsideBundle())
+ ++II;
+ }
+ return II;
+ }
reverse_iterator rend () { return Insts.rend(); }
const_reverse_iterator rend () const { return Insts.rend(); }
+
// Machine-CFG iterators
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
@@ -219,10 +342,12 @@ public:
bool livein_empty() const { return LiveIns.empty(); }
/// getAlignment - Return alignment of the basic block.
+ /// The alignment is specified as log2(bytes).
///
unsigned getAlignment() const { return Alignment; }
/// setAlignment - Set alignment of the basic block.
+ /// The alignment is specified as log2(bytes).
///
void setAlignment(unsigned Align) { Alignment = Align; }
@@ -239,7 +364,7 @@ public:
const MachineBasicBlock *getLandingPadSuccessor() const;
// Code Layout methods.
-
+
/// moveBefore/moveAfter - move 'this' block before or after the specified
/// block. This only moves the block, it does not modify the CFG or adjust
/// potential fall-throughs at the end of the block.
@@ -286,7 +411,7 @@ public:
/// in transferSuccessors, and update PHI operands in the successor blocks
/// which refer to fromMBB to refer to this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
-
+
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
bool isSuccessor(const MachineBasicBlock *MBB) const;
@@ -304,7 +429,7 @@ public:
/// branch to do so (e.g., a table jump). True is a conservative answer.
bool canFallThrough();
- /// Returns a pointer to the first instructon in this block that is not a
+ /// Returns a pointer to the first instructon in this block that is not a
/// PHINode instruction. When adding instruction to the beginning of the
/// basic block, they should be added before the returned value, not before
/// the first instruction, which might be PHI.
@@ -320,18 +445,16 @@ public:
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
+ const_iterator getFirstTerminator() const;
- const_iterator getFirstTerminator() const {
- return const_cast<MachineBasicBlock*>(this)->getFirstTerminator();
- }
+ /// getFirstInstrTerminator - Same getFirstTerminator but it ignores bundles
+ /// and return an instr_iterator instead.
+ instr_iterator getFirstInstrTerminator();
/// getLastNonDebugInstr - returns an iterator to the last non-debug
/// instruction in the basic block, or end()
iterator getLastNonDebugInstr();
-
- const_iterator getLastNonDebugInstr() const {
- return const_cast<MachineBasicBlock*>(this)->getLastNonDebugInstr();
- }
+ const_iterator getLastNonDebugInstr() const;
/// SplitCriticalEdge - Split the critical edge from this block to the
/// given successor block, and return the newly created block, or null
@@ -344,38 +467,88 @@ public:
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
+
template<typename IT>
- void insert(iterator I, IT S, IT E) { Insts.insert(I, S, E); }
- iterator insert(iterator I, MachineInstr *M) { return Insts.insert(I, M); }
- iterator insertAfter(iterator I, MachineInstr *M) {
- return Insts.insertAfter(I, M);
+ void insert(instr_iterator I, IT S, IT E) {
+ Insts.insert(I, S, E);
+ }
+ instr_iterator insert(instr_iterator I, MachineInstr *M) {
+ return Insts.insert(I, M);
+ }
+ instr_iterator insertAfter(instr_iterator I, MachineInstr *M) {
+ return Insts.insertAfter(I, M);
}
- // erase - Remove the specified element or range from the instruction list.
- // These functions delete any instructions removed.
- //
- iterator erase(iterator I) { return Insts.erase(I); }
- iterator erase(iterator I, iterator E) { return Insts.erase(I, E); }
- MachineInstr *remove(MachineInstr *I) { return Insts.remove(I); }
- void clear() { Insts.clear(); }
+ template<typename IT>
+ void insert(iterator I, IT S, IT E) {
+ Insts.insert(I.getInstrIterator(), S, E);
+ }
+ iterator insert(iterator I, MachineInstr *M) {
+ return Insts.insert(I.getInstrIterator(), M);
+ }
+ iterator insertAfter(iterator I, MachineInstr *M) {
+ return Insts.insertAfter(I.getInstrIterator(), M);
+ }
+
+ /// erase - Remove the specified element or range from the instruction list.
+ /// These functions delete any instructions removed.
+ ///
+ instr_iterator erase(instr_iterator I) {
+ return Insts.erase(I);
+ }
+ instr_iterator erase(instr_iterator I, instr_iterator E) {
+ return Insts.erase(I, E);
+ }
+ instr_iterator erase_instr(MachineInstr *I) {
+ instr_iterator MII(I);
+ return erase(MII);
+ }
+
+ iterator erase(iterator I);
+ iterator erase(iterator I, iterator E) {
+ return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
+ }
+ iterator erase(MachineInstr *I) {
+ iterator MII(I);
+ return erase(MII);
+ }
+
+ /// remove - Remove the instruction from the instruction list. This function
+ /// does not delete the instruction. WARNING: Note, if the specified
+ /// instruction is a bundle this function will remove all the bundled
+ /// instructions as well. It is up to the caller to keep a list of the
+ /// bundled instructions and re-insert them if desired. This function is
+ /// *not recommended* for manipulating instructions with bundles. Use
+ /// splice instead.
+ MachineInstr *remove(MachineInstr *I);
+ void clear() {
+ Insts.clear();
+ }
/// splice - Take an instruction from MBB 'Other' at the position From,
/// and insert it into this MBB right before 'where'.
- void splice(iterator where, MachineBasicBlock *Other, iterator From) {
+ void splice(instr_iterator where, MachineBasicBlock *Other,
+ instr_iterator From) {
Insts.splice(where, Other->Insts, From);
}
+ void splice(iterator where, MachineBasicBlock *Other, iterator From);
/// splice - Take a block of instructions from MBB 'Other' in the range [From,
/// To), and insert them into this MBB right before 'where'.
+ void splice(instr_iterator where, MachineBasicBlock *Other, instr_iterator From,
+ instr_iterator To) {
+ Insts.splice(where, Other->Insts, From, To);
+ }
void splice(iterator where, MachineBasicBlock *Other, iterator From,
iterator To) {
- Insts.splice(where, Other->Insts, From, To);
+ Insts.splice(where.getInstrIterator(), Other->Insts,
+ From.getInstrIterator(), To.getInstrIterator());
}
/// removeFromParent - This method unlinks 'this' from the containing
/// function, and returns it, but does not delete it.
MachineBasicBlock *removeFromParent();
-
+
/// eraseFromParent - This method unlinks 'this' from the containing
/// function and deletes it.
void eraseFromParent();
@@ -396,7 +569,10 @@ public:
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
/// any DBG_VALUE instructions. Return UnknownLoc if there is none.
- DebugLoc findDebugLoc(MachineBasicBlock::iterator &MBBI);
+ DebugLoc findDebugLoc(instr_iterator MBBI);
+ DebugLoc findDebugLoc(iterator MBBI) {
+ return findDebugLoc(MBBI.getInstrIterator());
+ }
// Debugging methods.
void dump() const;
@@ -418,13 +594,14 @@ private:
/// getWeightIterator - Return weight iterator corresponding to the I
/// successor iterator.
weight_iterator getWeightIterator(succ_iterator I);
+ const_weight_iterator getWeightIterator(const_succ_iterator I) const;
friend class MachineBranchProbabilityInfo;
/// getSuccWeight - Return weight of the edge from this block to MBB. This
/// method should NOT be called directly, but by using getEdgeWeight method
/// from MachineBranchProbabilityInfo class.
- uint32_t getSuccWeight(MachineBasicBlock *succ);
+ uint32_t getSuccWeight(const MachineBasicBlock *succ) const;
// Methods used to maintain doubly linked list of blocks...
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
index 416d40b..a9c7bf7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -20,6 +20,7 @@
namespace llvm {
+class MachineBasicBlock;
class MachineBranchProbabilityInfo;
template<class BlockT, class FunctionT, class BranchProbInfoT>
class BlockFrequencyImpl;
@@ -28,7 +29,8 @@ class BlockFrequencyImpl;
/// machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass {
- BlockFrequencyImpl<MachineBasicBlock, MachineFunction, MachineBranchProbabilityInfo> *MBFI;
+ BlockFrequencyImpl<MachineBasicBlock, MachineFunction,
+ MachineBranchProbabilityInfo> *MBFI;
public:
static char ID;
@@ -46,7 +48,7 @@ public:
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
///
- BlockFrequency getBlockFreq(MachineBasicBlock *MBB) const;
+ BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
};
}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
index d9673e2..af4db7d 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -25,6 +25,7 @@ class raw_ostream;
class MachineBasicBlock;
class MachineBranchProbabilityInfo : public ImmutablePass {
+ virtual void anchor();
// Default weight value. Used when we don't have information about the edge.
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
@@ -34,9 +35,6 @@ class MachineBranchProbabilityInfo : public ImmutablePass {
// weight to just "inherit" the non-zero weight of an adjacent successor.
static const uint32_t DEFAULT_WEIGHT = 16;
- // Get sum of the block successors' weights.
- uint32_t getSumForBlock(MachineBasicBlock *MBB) const;
-
public:
static char ID;
@@ -51,17 +49,27 @@ public:
// Return edge weight. If we don't have any informations about it - return
// DEFAULT_WEIGHT.
- uint32_t getEdgeWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst) const;
+ uint32_t getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
+
+ // Get sum of the block successors' weights, potentially scaling them to fit
+ // within 32-bits. If scaling is required, sets Scale based on the necessary
+ // adjustment. Any edge weights used with the sum should be divided by Scale.
+ uint32_t getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const;
// A 'Hot' edge is an edge which probability is >= 80%.
bool isEdgeHot(MachineBasicBlock *Src, MachineBasicBlock *Dst) const;
// Return a hot successor for the block BB or null if there isn't one.
+ // NB: This routine's complexity is linear on the number of successors.
MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
// Return a probability as a fraction between 0 (0% probability) and
// 1 (100% probability), however the value is never equal to 0, and can be 1
// only iff SRC block has only one successor.
+ // NB: This routine's complexity is linear on the number of successors of
+ // Src. Querying sequentially for each successor's probability is a quadratic
+ // query pattern.
BranchProbability getEdgeProbability(MachineBasicBlock *Src,
MachineBasicBlock *Dst) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h b/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
index 428aada..86e8f27 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
@@ -20,6 +20,8 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
+#include <string>
+
namespace llvm {
class MachineBasicBlock;
@@ -49,6 +51,7 @@ class MCSymbol;
/// occurred, more memory is allocated, and we reemit the code into it.
///
class MachineCodeEmitter {
+ virtual void anchor();
protected:
/// BufferBegin/BufferEnd - Pointers to the start and end of the memory
/// allocated for this code buffer.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
index 29f4f44..d6d65a2 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
@@ -34,6 +34,7 @@ class raw_ostream;
/// Abstract base class for all machine specific constantpool value subclasses.
///
class MachineConstantPoolValue {
+ virtual void anchor();
Type *Ty;
public:
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineDominators.h b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
index ab944a2..82a4ac8 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
@@ -84,7 +84,8 @@ public:
// Loop through the basic block until we find A or B.
MachineBasicBlock::iterator I = BBA->begin();
- for (; &*I != A && &*I != B; ++I) /*empty*/;
+ for (; &*I != A && &*I != B; ++I)
+ /*empty*/ ;
//if(!DT.IsPostDominators) {
// A dominates B if it is found first in the basic block.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index b347ca8..44402a9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -465,7 +465,7 @@ public:
bool isSpillSlotObjectIndex(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
- return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;;
+ return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
}
/// isDeadObjectIndex - Returns true if the specified index corresponds to
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
index 6e08f7b..dda2dc7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -120,10 +120,12 @@ class MachineFunction {
/// Alignment - The alignment of the function.
unsigned Alignment;
- /// CallsSetJmp - True if the function calls setjmp or sigsetjmp. This is used
- /// to limit optimizations which cannot reason about the control flow of
- /// setjmp.
- bool CallsSetJmp;
+ /// ExposesReturnsTwice - True if the function calls setjmp or related
+ /// functions with attribute "returns twice", but doesn't have
+ /// the attribute itself.
+ /// This is used to limit optimizations which cannot reason
+ /// about the control flow of such functions.
+ bool ExposesReturnsTwice;
MachineFunction(const MachineFunction &); // DO NOT IMPLEMENT
void operator=(const MachineFunction&); // DO NOT IMPLEMENT
@@ -187,20 +189,22 @@ public:
///
void setAlignment(unsigned A) { Alignment = A; }
- /// EnsureAlignment - Make sure the function is at least 'A' bits aligned.
+ /// EnsureAlignment - Make sure the function is at least 1 << A bytes aligned.
void EnsureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}
- /// callsSetJmp - Returns true if the function calls setjmp or sigsetjmp.
- bool callsSetJmp() const {
- return CallsSetJmp;
+ /// exposesReturnsTwice - Returns true if the function calls setjmp or
+ /// any other similar functions with attribute "returns twice" without
+ /// having the attribute itself.
+ bool exposesReturnsTwice() const {
+ return ExposesReturnsTwice;
}
- /// setCallsSetJmp - Set a flag that indicates if there's a call to setjmp or
- /// sigsetjmp.
- void setCallsSetJmp(bool B) {
- CallsSetJmp = B;
+ /// setCallsSetJmp - Set a flag that indicates if there's a call to
+ /// a "returns twice" function.
+ void setExposesReturnsTwice(bool B) {
+ ExposesReturnsTwice = B;
}
/// getInfo - Keep track of various per-function pieces of information for
@@ -376,7 +380,8 @@ public:
MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
unsigned f, uint64_t s,
unsigned base_alignment,
- const MDNode *TBAAInfo = 0);
+ const MDNode *TBAAInfo = 0,
+ const MDNode *Ranges = 0);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
@@ -437,6 +442,7 @@ template <> struct GraphTraits<MachineFunction*> :
typedef MachineFunction::iterator nodes_iterator;
static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
+ static unsigned size (MachineFunction *F) { return F->size(); }
};
template <> struct GraphTraits<const MachineFunction*> :
public GraphTraits<const MachineBasicBlock*> {
@@ -452,6 +458,9 @@ template <> struct GraphTraits<const MachineFunction*> :
static nodes_iterator nodes_end (const MachineFunction *F) {
return F->end();
}
+ static unsigned size (const MachineFunction *F) {
+ return F->size();
+ }
};
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
index 50676ad..50ea206 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -26,17 +26,14 @@ class MachineFunction;
struct MachineFunctionAnalysis : public FunctionPass {
private:
const TargetMachine &TM;
- CodeGenOpt::Level OptLevel;
MachineFunction *MF;
unsigned NextFnNum;
public:
static char ID;
- explicit MachineFunctionAnalysis(const TargetMachine &tm,
- CodeGenOpt::Level OL = CodeGenOpt::Default);
+ explicit MachineFunctionAnalysis(const TargetMachine &tm);
~MachineFunctionAnalysis();
MachineFunction &getMF() const { return *MF; }
- CodeGenOpt::Level getOptLevel() const { return OptLevel; }
virtual const char* getPassName() const {
return "Machine Function Analysis";
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index cae38f3..65093d7 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Target/TargetOpcodes.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/STLExtras.h"
@@ -53,9 +54,11 @@ public:
};
enum MIFlag {
- NoFlags = 0,
- FrameSetup = 1 << 0 // Instruction is used as a part of
+ NoFlags = 0,
+ FrameSetup = 1 << 0, // Instruction is used as a part of
// function frame setup code.
+ InsideBundle = 1 << 1 // Instruction is inside a bundle (not
+ // the first MI in a bundle)
};
private:
const MCInstrDesc *MCID; // Instruction descriptor.
@@ -71,9 +74,10 @@ private:
// anything other than to convey comment
// information to AsmPrinter.
+ uint16_t NumMemRefs; // information on memory references
+ mmo_iterator MemRefs;
+
std::vector<MachineOperand> Operands; // the operands
- mmo_iterator MemRefs; // information on memory references
- mmo_iterator MemRefsEnd;
MachineBasicBlock *Parent; // Pointer to the owning basic block.
DebugLoc debugLoc; // Source line information.
@@ -148,6 +152,12 @@ public:
AsmPrinterFlags |= (uint8_t)Flag;
}
+ /// clearAsmPrinterFlag - clear specific AsmPrinter flags
+ ///
+ void clearAsmPrinterFlag(CommentFlag Flag) {
+ AsmPrinterFlags &= ~Flag;
+ }
+
/// getFlags - Return the MI flags bitvector.
uint8_t getFlags() const {
return Flags;
@@ -167,12 +177,64 @@ public:
Flags = flags;
}
- /// clearAsmPrinterFlag - clear specific AsmPrinter flags
+ /// clearFlag - Clear a MI flag.
+ void clearFlag(MIFlag Flag) {
+ Flags &= ~((uint8_t)Flag);
+ }
+
+ /// isInsideBundle - Return true if MI is in a bundle (but not the first MI
+ /// in a bundle).
///
- void clearAsmPrinterFlag(CommentFlag Flag) {
- AsmPrinterFlags &= ~Flag;
+ /// A bundle looks like this before it's finalized:
+ /// ----------------
+ /// | MI |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// In this case, the first MI starts a bundle but is not inside a bundle, the
+ /// next 2 MIs are considered "inside" the bundle.
+ ///
+ /// After a bundle is finalized, it looks like this:
+ /// ----------------
+ /// | Bundle |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// The first instruction has the special opcode "BUNDLE". It's not "inside"
+ /// a bundle, but the next three MIs are.
+ bool isInsideBundle() const {
+ return getFlag(InsideBundle);
}
+ /// setIsInsideBundle - Set InsideBundle bit.
+ ///
+ void setIsInsideBundle(bool Val = true) {
+ if (Val)
+ setFlag(InsideBundle);
+ else
+ clearFlag(InsideBundle);
+ }
+
+ /// isBundled - Return true if this instruction part of a bundle. This is true
+ /// if either itself or its following instruction is marked "InsideBundle".
+ bool isBundled() const;
+
/// getDebugLoc - Returns the debug location id of this MachineInstr.
///
DebugLoc getDebugLoc() const { return debugLoc; }
@@ -223,15 +285,285 @@ public:
/// Access to memory operands of the instruction
mmo_iterator memoperands_begin() const { return MemRefs; }
- mmo_iterator memoperands_end() const { return MemRefsEnd; }
- bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+ mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+ bool memoperands_empty() const { return NumMemRefs == 0; }
/// hasOneMemOperand - Return true if this instruction has exactly one
/// MachineMemOperand.
bool hasOneMemOperand() const {
- return MemRefsEnd - MemRefs == 1;
+ return NumMemRefs == 1;
+ }
+
+ /// API for querying MachineInstr properties. They are the same as MCInstrDesc
+ /// queries but they are bundle aware.
+
+ enum QueryType {
+ IgnoreBundle, // Ignore bundles
+ AnyInBundle, // Return true if any instruction in bundle has property
+ AllInBundle // Return true if all instructions in bundle have property
+ };
+
+ /// hasProperty - Return true if the instruction (or in the case of a bundle,
+ /// the instructions inside the bundle) has the specified property.
+ /// The first argument is the property being queried.
+ /// The second argument indicates whether the query should look inside
+ /// instruction bundles.
+ bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+ // Inline the fast path.
+ if (Type == IgnoreBundle || !isBundle())
+ return getDesc().getFlags() & (1 << MCFlag);
+
+ // If we have a bundle, take the slow path.
+ return hasPropertyInBundle(1 << MCFlag, Type);
+ }
+
+ /// isVariadic - Return true if this instruction can have a variable number of
+ /// operands. In this case, the variable operands will be after the normal
+ /// operands but before the implicit definitions and uses (if any are
+ /// present).
+ bool isVariadic(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Variadic, Type);
+ }
+
+ /// hasOptionalDef - Set if this instruction has an optional definition, e.g.
+ /// ARM instructions which can set condition code if 's' bit is set.
+ bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::HasOptionalDef, Type);
+ }
+
+ /// isPseudo - Return true if this is a pseudo instruction that doesn't
+ /// correspond to a real machine instruction.
+ ///
+ bool isPseudo(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Pseudo, Type);
+ }
+
+ bool isReturn(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Return, Type);
+ }
+
+ bool isCall(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Call, Type);
+ }
+
+ /// isBarrier - Returns true if the specified instruction stops control flow
+ /// from executing the instruction immediately following it. Examples include
+ /// unconditional branches and return instructions.
+ bool isBarrier(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Barrier, Type);
+ }
+
+ /// isTerminator - Returns true if this instruction part of the terminator for
+ /// a basic block. Typically this is things like return and branch
+ /// instructions.
+ ///
+ /// Various passes use this to insert code into the bottom of a basic block,
+ /// but before control flow occurs.
+ bool isTerminator(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Terminator, Type);
+ }
+
+ /// isBranch - Returns true if this is a conditional, unconditional, or
+ /// indirect branch. Predicates below can be used to discriminate between
+ /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+ /// get more information.
+ bool isBranch(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Branch, Type);
+ }
+
+ /// isIndirectBranch - Return true if this is an indirect branch, such as a
+ /// branch through a register.
+ bool isIndirectBranch(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::IndirectBranch, Type);
+ }
+
+ /// isConditionalBranch - Return true if this is a branch which may fall
+ /// through to the next instruction or may transfer control flow to some other
+ /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
+ /// information about this branch.
+ bool isConditionalBranch(QueryType Type = AnyInBundle) const {
+ return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
+ }
+
+ /// isUnconditionalBranch - Return true if this is a branch which always
+ /// transfers control flow to some other block. The
+ /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+ /// about this branch.
+ bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
+ return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
+ }
+
+ // isPredicable - Return true if this instruction has a predicate operand that
+ // controls execution. It may be set to 'always', or may be set to other
+ /// values. There are various methods in TargetInstrInfo that can be used to
+ /// control and modify the predicate in this instruction.
+ bool isPredicable(QueryType Type = AllInBundle) const {
+ // If it's a bundle than all bundled instructions must be predicable for this
+ // to return true.
+ return hasProperty(MCID::Predicable, Type);
+ }
+
+ /// isCompare - Return true if this instruction is a comparison.
+ bool isCompare(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Compare, Type);
+ }
+
+ /// isMoveImmediate - Return true if this instruction is a move immediate
+ /// (including conditional moves) instruction.
+ bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::MoveImm, Type);
}
+ /// isBitcast - Return true if this instruction is a bitcast instruction.
+ ///
+ bool isBitcast(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Bitcast, Type);
+ }
+
+ /// isNotDuplicable - Return true if this instruction cannot be safely
+ /// duplicated. For example, if the instruction has a unique labels attached
+ /// to it, duplicating it would cause multiple definition errors.
+ bool isNotDuplicable(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::NotDuplicable, Type);
+ }
+
+ /// hasDelaySlot - Returns true if the specified instruction has a delay slot
+ /// which must be filled by the code generator.
+ bool hasDelaySlot(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::DelaySlot, Type);
+ }
+
+ /// canFoldAsLoad - Return true for instructions that can be folded as
+ /// memory operands in other instructions. The most common use for this
+ /// is instructions that are simple loads from memory that don't modify
+ /// the loaded value in any way, but it can also be used for instructions
+ /// that can be expressed as constant-pool loads, such as V_SETALLONES
+ /// on x86, to allow them to be folded when it is beneficial.
+ /// This should only be set on instructions that return a value in their
+ /// only virtual register definition.
+ bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::FoldableAsLoad, Type);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Side Effect Analysis
+ //===--------------------------------------------------------------------===//
+
+ /// mayLoad - Return true if this instruction could possibly read memory.
+ /// Instructions with this flag set are not necessarily simple load
+ /// instructions, they may load a value and modify it, for example.
+ bool mayLoad(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::MayLoad, Type);
+ }
+
+
+ /// mayStore - Return true if this instruction could possibly modify memory.
+ /// Instructions with this flag set are not necessarily simple store
+ /// instructions, they may store a modified value based on their operands, or
+ /// may not actually modify anything, for example.
+ bool mayStore(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::MayStore, Type);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Flags that indicate whether an instruction can be modified by a method.
+ //===--------------------------------------------------------------------===//
+
+ /// isCommutable - Return true if this may be a 2- or 3-address
+ /// instruction (of the form "X = op Y, Z, ..."), which produces the same
+ /// result if Y and Z are exchanged. If this flag is set, then the
+ /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+ /// instruction.
+ ///
+ /// Note that this flag may be set on instructions that are only commutable
+ /// sometimes. In these cases, the call to commuteInstruction will fail.
+ /// Also note that some instructions require non-trivial modification to
+ /// commute them.
+ bool isCommutable(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Commutable, Type);
+ }
+
+ /// isConvertibleTo3Addr - Return true if this is a 2-address instruction
+ /// which can be changed into a 3-address instruction if needed. Doing this
+ /// transformation can be profitable in the register allocator, because it
+ /// means that the instruction can use a 2-address form if possible, but
+ /// degrade into a less efficient form if the source and dest register cannot
+ /// be assigned to the same register. For example, this allows the x86
+ /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
+ /// is the same speed as the shift but has bigger code size.
+ ///
+ /// If this returns true, then the target must implement the
+ /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+ /// is allowed to fail if the transformation isn't valid for this specific
+ /// instruction (e.g. shl reg, 4 on x86).
+ ///
+ bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::ConvertibleTo3Addr, Type);
+ }
+
+ /// usesCustomInsertionHook - Return true if this instruction requires
+ /// custom insertion support when the DAG scheduler is inserting it into a
+ /// machine basic block. If this is true for the instruction, it basically
+ /// means that it is a pseudo instruction used at SelectionDAG time that is
+ /// expanded out into magic code by the target when MachineInstrs are formed.
+ ///
+ /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+ /// is used to insert this into the MachineBasicBlock.
+ bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::UsesCustomInserter, Type);
+ }
+
+ /// hasPostISelHook - Return true if this instruction requires *adjustment*
+ /// after instruction selection by calling a target hook. For example, this
+ /// can be used to fill in ARM 's' optional operand depending on whether
+ /// the conditional flag register is used.
+ bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::HasPostISelHook, Type);
+ }
+
+ /// isRematerializable - Returns true if this instruction is a candidate for
+ /// remat. This flag is deprecated, please don't use it anymore. If this
+ /// flag is set, the isReallyTriviallyReMaterializable() method is called to
+ /// verify the instruction is really rematable.
+ bool isRematerializable(QueryType Type = AllInBundle) const {
+ // It's only possible to re-mat a bundle if all bundled instructions are
+ // re-materializable.
+ return hasProperty(MCID::Rematerializable, Type);
+ }
+
+ /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or
+ /// less) than a move instruction. This is useful during certain types of
+ /// optimizations (e.g., remat during two-address conversion or machine licm)
+ /// where we would like to remat or hoist the instruction, but not if it costs
+ /// more than moving the instruction into the appropriate register. Note, we
+ /// are not marking copies from and to the same register class with this flag.
+ bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
+ // Only returns true for a bundle if all bundled instructions are cheap.
+ // FIXME: This probably requires a target hook.
+ return hasProperty(MCID::CheapAsAMove, Type);
+ }
+
+ /// hasExtraSrcRegAllocReq - Returns true if this instruction source operands
+ /// have special register allocation requirements that are not captured by the
+ /// operand register classes. e.g. ARM::STRD's two source registers must be an
+ /// even / odd pair, ARM::STM registers have to be in ascending order.
+ /// Post-register allocation passes should not attempt to change allocations
+ /// for sources of instructions with this flag.
+ bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
+ }
+
+ /// hasExtraDefRegAllocReq - Returns true if this instruction def operands
+ /// have special register allocation requirements that are not captured by the
+ /// operand register classes. e.g. ARM::LDRD's two def registers must be an
+ /// even / odd pair, ARM::LDM registers have to be in ascending order.
+ /// Post-register allocation passes should not attempt to change allocations
+ /// for definitions of instructions with this flag.
+ bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::ExtraDefRegAllocReq, Type);
+ }
+
+
enum MICheckType {
CheckDefs, // Check all operands for equality
CheckKillDead, // Check all operands including kill / dead markers
@@ -281,6 +613,9 @@ public:
bool isRegSequence() const {
return getOpcode() == TargetOpcode::REG_SEQUENCE;
}
+ bool isBundle() const {
+ return getOpcode() == TargetOpcode::BUNDLE;
+ }
bool isCopy() const {
return getOpcode() == TargetOpcode::COPY;
}
@@ -300,6 +635,9 @@ public:
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
+ /// getBundleSize - Return the number of instructions inside the MI bundle.
+ unsigned getBundleSize() const;
+
/// readsRegister - Return true if the MachineInstr reads the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
@@ -372,6 +710,7 @@ public:
/// that are not dead are skipped. If Overlap is true, then it also looks for
/// defs that merely overlap the specified register. If TargetRegisterInfo is
/// non-null, then it also checks if there is a def of a super-register.
+ /// This may also return a register mask operand when Overlap is true.
int findRegisterDefOperandIdx(unsigned Reg,
bool isDead = false, bool Overlap = false,
const TargetRegisterInfo *TRI = NULL) const;
@@ -416,7 +755,7 @@ public:
/// isRegTiedToUseOperand - Given the index of a register def operand,
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
- /// first tied use operand index by reference is UseOpIdx is not null.
+ /// first tied use operand index by reference if UseOpIdx is not null.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const;
/// isRegTiedToDefOperand - Return true if the use operand of the specified
@@ -448,6 +787,10 @@ public:
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound = false);
+ /// clearRegisterKills - Clear all kill flags affecting Reg. If RegInfo is
+ /// provided, this includes super-register kills.
+ void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
+
/// addRegisterDead - We have determined MI defined a register without a use.
/// Look for the operand that defines it and mark it as IsDead. If
/// AddIfNotFound is true, add a implicit operand if it's not found. Returns
@@ -462,7 +805,10 @@ public:
/// setPhysRegsDeadExcept - Mark every physreg used by this instruction as
/// dead except those in the UsedRegs list.
- void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ ///
+ /// On instructions with register mask operands, also add implicit-def
+ /// operands for all registers in UsedRegs.
+ void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI);
/// isSafeToMove - Return true if it is safe to move this instruction. If
@@ -550,7 +896,7 @@ public:
/// list. This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
MemRefs = NewMemRefs;
- MemRefsEnd = NewMemRefsEnd;
+ NumMemRefs = NewMemRefsEnd - NewMemRefs;
}
private:
@@ -572,6 +918,10 @@ private:
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
void AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo);
+
+ /// hasPropertyInBundle - Slow path for hasProperty when we're dealing with a
+ /// bundle.
+ bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
};
/// MachineInstrExpressionTrait - Special DenseMapInfo traits to compare
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index b989027..99849a6 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -34,6 +34,7 @@ namespace RegState {
Undef = 0x20,
EarlyClobber = 0x40,
Debug = 0x80,
+ DefineNoRead = Define | Undef,
ImplicitDefine = Implicit | Define,
ImplicitKill = Implicit | Kill
};
@@ -124,6 +125,11 @@ public:
return *this;
}
+ const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
+ MI->addOperand(MachineOperand::CreateRegMask(Mask));
+ return *this;
+ }
+
const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
MI->addMemOperand(*MI->getParent()->getParent(), MMO);
return *this;
@@ -209,6 +215,30 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define);
}
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::instr_iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineInstr *I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ if (I->isInsideBundle()) {
+ MachineBasicBlock::instr_iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID, DestReg);
+ }
+
+ MachineBasicBlock::iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID, DestReg);
+}
+
/// BuildMI - This version of the builder inserts the newly-built
/// instruction before the given position in the given MachineBasicBlock, and
/// does NOT take a destination register.
@@ -222,6 +252,28 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
return MachineInstrBuilder(MI);
}
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::instr_iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineInstr *I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ if (I->isInsideBundle()) {
+ MachineBasicBlock::instr_iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID);
+ }
+
+ MachineBasicBlock::iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID);
+}
+
/// BuildMI - This version of the builder inserts the newly-built
/// instruction at the end of the given MachineBasicBlock, and does NOT take a
/// destination register.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
new file mode 100644
index 0000000..0fb4969
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -0,0 +1,203 @@
+//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provide utility functions to manipulate machine instruction
+// bundles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI,
+ MachineBasicBlock::instr_iterator LastMI);
+
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI);
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool finalizeBundles(MachineFunction &MF);
+
+/// getBundleStart - Returns the first instruction in the bundle containing MI.
+///
+static inline MachineInstr *getBundleStart(MachineInstr *MI) {
+ MachineBasicBlock::instr_iterator I = MI;
+ while (I->isInsideBundle())
+ --I;
+ return I;
+}
+
+static inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
+ MachineBasicBlock::const_instr_iterator I = MI;
+ while (I->isInsideBundle())
+ --I;
+ return I;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//
+
+/// MachineOperandIteratorBase - Iterator that can visit all operands on a
+/// MachineInstr, or all operands on a bundle of MachineInstrs. This class is
+/// not intended to be used directly, use one of the sub-classes instead.
+///
+/// Intended use:
+///
+/// for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
+/// if (!MIO->isReg())
+/// continue;
+/// ...
+/// }
+///
+class MachineOperandIteratorBase {
+ MachineBasicBlock::instr_iterator InstrI, InstrE;
+ MachineInstr::mop_iterator OpI, OpE;
+
+ // If the operands on InstrI are exhausted, advance InstrI to the next
+ // bundled instruction with operands.
+ void advance() {
+ while (OpI == OpE) {
+ // Don't advance off the basic block, or into a new bundle.
+ if (++InstrI == InstrE || !InstrI->isInsideBundle())
+ break;
+ OpI = InstrI->operands_begin();
+ OpE = InstrI->operands_end();
+ }
+ }
+
+protected:
+ /// MachineOperandIteratorBase - Create an iterator that visits all operands
+ /// on MI, or all operands on every instruction in the bundle containing MI.
+ ///
+ /// @param MI The instruction to examine.
+ /// @param WholeBundle When true, visit all operands on the entire bundle.
+ ///
+ explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) {
+ if (WholeBundle) {
+ InstrI = getBundleStart(MI);
+ InstrE = MI->getParent()->instr_end();
+ } else {
+ InstrI = InstrE = MI;
+ ++InstrE;
+ }
+ OpI = InstrI->operands_begin();
+ OpE = InstrI->operands_end();
+ if (WholeBundle)
+ advance();
+ }
+
+ MachineOperand &deref() const { return *OpI; }
+
+public:
+ /// isValid - Returns true until all the operands have been visited.
+ bool isValid() const { return OpI != OpE; }
+
+ /// Preincrement. Move to the next operand.
+ void operator++() {
+ assert(isValid() && "Cannot advance MIOperands beyond the last operand");
+ ++OpI;
+ advance();
+ }
+
+ /// getOperandNo - Returns the number of the current operand relative to its
+ /// instruction.
+ ///
+ unsigned getOperandNo() const {
+ return OpI - InstrI->operands_begin();
+ }
+
+ /// RegInfo - Information about a virtual register used by a set of operands.
+ ///
+ struct RegInfo {
+ /// Reads - One of the operands read the virtual register. This does not
+ /// include <undef> or <internal> use operands, see MO::readsReg().
+ bool Reads;
+
+ /// Writes - One of the operands writes the virtual register.
+ bool Writes;
+
+ /// Tied - Uses and defs must use the same register. This can be because of
+ /// a two-address constraint, or there may be a partial redefinition of a
+ /// sub-register.
+ bool Tied;
+ };
+
+ /// analyzeVirtReg - Analyze how the current instruction or bundle uses a
+ /// virtual register. This function should not be called after operator++(),
+ /// it expects a fresh iterator.
+ ///
+ /// @param Reg The virtual register to analyze.
+ /// @param Ops When set, this vector will receive an (MI, OpNum) entry for
+ /// each operand referring to Reg.
+ /// @returns A filled-in RegInfo struct.
+ RegInfo analyzeVirtReg(unsigned Reg,
+ SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
+};
+
+/// MIOperands - Iterate over operands of a single instruction.
+///
+class MIOperands : public MachineOperandIteratorBase {
+public:
+ MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {}
+ MachineOperand &operator* () const { return deref(); }
+ MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIOperands - Iterate over operands of a single const instruction.
+///
+class ConstMIOperands : public MachineOperandIteratorBase {
+public:
+ ConstMIOperands(const MachineInstr *MI)
+ : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {}
+ const MachineOperand &operator* () const { return deref(); }
+ const MachineOperand *operator->() const { return &deref(); }
+};
+
+/// MIBundleOperands - Iterate over all operands in a bundle of machine
+/// instructions.
+///
+class MIBundleOperands : public MachineOperandIteratorBase {
+public:
+ MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {}
+ MachineOperand &operator* () const { return deref(); }
+ MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
+/// machine instructions.
+///
+class ConstMIBundleOperands : public MachineOperandIteratorBase {
+public:
+ ConstMIBundleOperands(const MachineInstr *MI)
+ : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {}
+ const MachineOperand &operator* () const { return deref(); }
+ const MachineOperand *operator->() const { return &deref(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
index 6264349..6bd6682 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -47,7 +47,12 @@ public:
/// EK_BlockAddress - Each entry is a plain address of block, e.g.:
/// .word LBB123
EK_BlockAddress,
-
+
+ /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
+ /// with a relocation as gp-relative, e.g.:
+ /// .gpdword LBB123
+ EK_GPRel64BlockAddress,
+
/// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
/// with a relocation as gp-relative, e.g.:
/// .gprel32 LBB123
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
index 768ce47..1ac9080 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
@@ -22,6 +22,7 @@ namespace llvm {
class Value;
class FoldingSetNodeID;
+class MDNode;
class raw_ostream;
/// MachinePointerInfo - This class contains a discriminated union of
@@ -83,6 +84,7 @@ class MachineMemOperand {
uint64_t Size;
unsigned Flags;
const MDNode *TBAAInfo;
+ const MDNode *Ranges;
public:
/// Flags values. These may be or'd together.
@@ -95,14 +97,17 @@ public:
MOVolatile = 4,
/// The memory access is non-temporal.
MONonTemporal = 8,
+ /// The memory access is invariant.
+ MOInvariant = 16,
// This is the number of bits we need to represent flags.
- MOMaxBits = 4
+ MOMaxBits = 5
};
/// MachineMemOperand - Construct an MachineMemOperand object with the
/// specified PtrInfo, flags, size, and base alignment.
MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
- unsigned base_alignment, const MDNode *TBAAInfo = 0);
+ unsigned base_alignment, const MDNode *TBAAInfo = 0,
+ const MDNode *Ranges = 0);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
@@ -137,10 +142,14 @@ public:
/// getTBAAInfo - Return the TBAA tag for the memory reference.
const MDNode *getTBAAInfo() const { return TBAAInfo; }
+ /// getRanges - Return the range tag for the memory reference.
+ const MDNode *getRanges() const { return Ranges; }
+
bool isLoad() const { return Flags & MOLoad; }
bool isStore() const { return Flags & MOStore; }
bool isVolatile() const { return Flags & MOVolatile; }
bool isNonTemporal() const { return Flags & MONonTemporal; }
+ bool isInvariant() const { return Flags & MOInvariant; }
/// refineAlignment - Update this MachineMemOperand to reflect the alignment
/// of MMO, if it has a greater alignment. This must only be used when the
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 2bf7f17..6b88d4a 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -161,10 +161,10 @@ class MachineModuleInfo : public ImmutablePass {
/// in this module.
bool DbgInfoAvailable;
- /// CallsExternalVAFunctionWithFloatingPointArguments - True if this module
- /// calls VarArg function with floating point arguments. This is used to emit
- /// an undefined reference to fltused on Windows targets.
- bool CallsExternalVAFunctionWithFloatingPointArguments;
+ /// UsesVAFloatArgument - True if this module calls VarArg function with
+ /// floating-point arguments. This is used to emit an undefined reference
+ /// to _fltused on Windows targets.
+ bool UsesVAFloatArgument;
public:
static char ID; // Pass identification, replacement for typeid
@@ -223,12 +223,12 @@ public:
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
- bool callsExternalVAFunctionWithFloatingPointArguments() const {
- return CallsExternalVAFunctionWithFloatingPointArguments;
+ bool usesVAFloatArgument() const {
+ return UsesVAFloatArgument;
}
- void setCallsExternalVAFunctionWithFloatingPointArguments(bool b) {
- CallsExternalVAFunctionWithFloatingPointArguments = b;
+ void setUsesVAFloatArgument(bool b) {
+ UsesVAFloatArgument = b;
}
/// getFrameMoves - Returns a reference to a list of moves done in the current
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
index 5440a63..d244dd9 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -48,6 +48,7 @@ public:
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress, ///< Address of a global value
MO_BlockAddress, ///< Address of a basic block
+ MO_RegisterMask, ///< Mask of preserved registers.
MO_Metadata, ///< Metadata reference (for debug info)
MO_MCSymbol ///< MCSymbol reference (for debug/eh info)
};
@@ -102,6 +103,17 @@ private:
///
bool IsUndef : 1;
+ /// IsInternalRead - True if this operand reads a value that was defined
+ /// inside the same instruction or bundle. This flag can be set on both use
+ /// and def operands. On a sub-register def operand, it refers to the part
+ /// of the register that isn't written. On a full-register def operand, it
+ /// is a noop.
+ ///
+ /// When this flag is set, the instruction bundle must contain at least one
+ /// other def of the register. If multiple instructions in the bundle define
+ /// the register, the meaning is target-defined.
+ bool IsInternalRead : 1;
+
/// IsEarlyClobber - True if this MO_Register 'def' operand is written to
/// by the MachineInstr before all input registers are read. This is used to
/// model the GCC inline asm '&' constraint modifier.
@@ -130,6 +142,7 @@ private:
const ConstantFP *CFP; // For MO_FPImmediate.
const ConstantInt *CI; // For MO_CImmediate. Integers > 64bit.
int64_t ImmVal; // For MO_Immediate.
+ const uint32_t *RegMask; // For MO_RegisterMask.
const MDNode *MD; // For MO_Metadata.
MCSymbol *Sym; // For MO_MCSymbol
@@ -209,10 +222,13 @@ public:
bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
/// isBlockAddress - Tests if this is a MO_BlockAddress operand.
bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
+ /// isRegMask - Tests if this is a MO_RegisterMask operand.
+ bool isRegMask() const { return OpKind == MO_RegisterMask; }
/// isMetadata - Tests if this is a MO_Metadata operand.
bool isMetadata() const { return OpKind == MO_Metadata; }
bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
+
//===--------------------------------------------------------------------===//
// Accessors for Register Operands
//===--------------------------------------------------------------------===//
@@ -258,6 +274,11 @@ public:
return IsUndef;
}
+ bool isInternalRead() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsInternalRead;
+ }
+
bool isEarlyClobber() const {
assert(isReg() && "Wrong MachineOperand accessor");
return IsEarlyClobber;
@@ -272,9 +293,12 @@ public:
/// register. A use operand with the <undef> flag set doesn't read its
/// register. A sub-register def implicitly reads the other parts of the
/// register being redefined unless the <undef> flag is set.
+ ///
+ /// This refers to reading the register value from before the current
+ /// instruction or bundle. Internal bundle reads are not included.
bool readsReg() const {
assert(isReg() && "Wrong MachineOperand accessor");
- return !isUndef() && (isUse() || getSubReg());
+ return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
}
/// getNextOperandForReg - Return the next MachineOperand in the function that
@@ -343,6 +367,11 @@ public:
IsUndef = Val;
}
+ void setIsInternalRead(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsInternalRead = Val;
+ }
+
void setIsEarlyClobber(bool Val = true) {
assert(isReg() && IsDef && "Wrong MachineOperand accessor");
IsEarlyClobber = Val;
@@ -412,6 +441,28 @@ public:
return Contents.OffsetedInfo.Val.SymbolName;
}
+ /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
+ /// It is sometimes necessary to detach the register mask pointer from its
+ /// machine operand. This static method can be used for such detached bit
+ /// mask pointers.
+ static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
+ // See TargetRegisterInfo.h.
+ assert(PhysReg < (1u << 30) && "Not a physical register");
+ return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
+ }
+
+ /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
+ bool clobbersPhysReg(unsigned PhysReg) const {
+ return clobbersPhysReg(getRegMask(), PhysReg);
+ }
+
+ /// getRegMask - Returns a bit mask of registers preserved by this RegMask
+ /// operand.
+ const uint32_t *getRegMask() const {
+ assert(isRegMask() && "Wrong MachineOperand accessor");
+ return Contents.RegMask;
+ }
+
const MDNode *getMetadata() const {
assert(isMetadata() && "Wrong MachineOperand accessor");
return Contents.MD;
@@ -498,6 +549,7 @@ public:
Op.IsKill = isKill;
Op.IsDead = isDead;
Op.IsUndef = isUndef;
+ Op.IsInternalRead = false;
Op.IsEarlyClobber = isEarlyClobber;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
@@ -557,6 +609,24 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
+ /// CreateRegMask - Creates a register mask operand referencing Mask. The
+ /// operand does not take ownership of the memory referenced by Mask, it must
+ /// remain valid for the lifetime of the operand.
+ ///
+ /// A RegMask operand represents a set of non-clobbered physical registers on
+ /// an instruction that clobbers many registers, typically a call. The bit
+ /// mask has a bit set for each physreg that is preserved by this
+ /// instruction, as described in the documentation for
+ /// TargetRegisterInfo::getCallPreservedMask().
+ ///
+ /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+ ///
+ static MachineOperand CreateRegMask(const uint32_t *Mask) {
+ assert(Mask && "Missing register mask");
+ MachineOperand Op(MachineOperand::MO_RegisterMask);
+ Op.Contents.RegMask = Mask;
+ return Op;
+ }
static MachineOperand CreateMetadata(const MDNode *Meta) {
MachineOperand Op(MachineOperand::MO_Metadata);
Op.Contents.MD = Meta;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
index 6ee2e90..c41e8e26 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
@@ -33,6 +33,7 @@ typedef void *(*MachinePassCtor)();
///
//===----------------------------------------------------------------------===//
class MachinePassRegistryListener {
+ virtual void anchor();
public:
MachinePassRegistryListener() {}
virtual ~MachinePassRegistryListener() {}
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 3866b26..3272fbd 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -15,12 +15,13 @@
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/IndexedMap.h"
#include <vector>
namespace llvm {
-
+
/// MachineRegisterInfo - Keep track of information for virtual and physical
/// registers, including vreg register classes, use/def chains for registers,
/// etc.
@@ -31,6 +32,11 @@ class MachineRegisterInfo {
/// registers have a single def.
bool IsSSA;
+ /// TracksLiveness - True while register liveness is being tracked accurately.
+ /// Basic block live-in lists, kill flags, and implicit defs may not be
+ /// accurate when after this flag is cleared.
+ bool TracksLiveness;
+
/// VRegInfo - Information we keep for each virtual register.
///
/// Each element in this list contains the register class of the vreg and the
@@ -46,18 +52,32 @@ class MachineRegisterInfo {
/// the allocator should prefer the physical register allocated to the virtual
/// register of the hint.
IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints;
-
+
/// PhysRegUseDefLists - This is an array of the head of the use/def list for
/// physical registers.
- MachineOperand **PhysRegUseDefLists;
-
+ MachineOperand **PhysRegUseDefLists;
+
/// UsedPhysRegs - This is a bit vector that is computed and set by the
/// register allocator, and must be kept up to date by passes that run after
/// register allocation (though most don't modify this). This is used
/// so that the code generator knows which callee save registers to save and
/// for other target specific uses.
+ /// This vector only has bits set for registers explicitly used, not their
+ /// aliases.
BitVector UsedPhysRegs;
-
+
+ /// UsedPhysRegMask - Additional used physregs, but including aliases.
+ BitVector UsedPhysRegMask;
+
+ /// ReservedRegs - This is a bit vector of reserved registers. The target
+ /// may change its mind about which registers should be reserved. This
+ /// vector is the frozen set of reserved registers when register allocation
+ /// started.
+ BitVector ReservedRegs;
+
+ /// AllocatableRegs - From TRI->getAllocatableSet.
+ mutable BitVector AllocatableRegs;
+
/// LiveIns/LiveOuts - Keep track of the physical registers that are
/// livein/liveout of the function. Live in values are typically arguments in
/// registers, live out values are typically return values in registers.
@@ -65,7 +85,7 @@ class MachineRegisterInfo {
/// stored in the second element.
std::vector<std::pair<unsigned, unsigned> > LiveIns;
std::vector<unsigned> LiveOuts;
-
+
MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT
void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT
public:
@@ -88,6 +108,23 @@ public:
// leaveSSA - Indicates that the machine function is no longer in SSA form.
void leaveSSA() { IsSSA = false; }
+ /// tracksLiveness - Returns true when tracking register liveness accurately.
+ ///
+ /// While this flag is true, register liveness information in basic block
+ /// live-in lists and machine instruction operands is accurate. This means it
+ /// can be used to change the code in ways that affect the values in
+ /// registers, for example by the register scavenger.
+ ///
+ /// When this flag is false, liveness is no longer reliable.
+ bool tracksLiveness() const { return TracksLiveness; }
+
+ /// invalidateLiveness - Indicates that register liveness is no longer being
+ /// tracked accurately.
+ ///
+ /// This should be called by late passes that invalidate the liveness
+ /// information.
+ void invalidateLiveness() { TracksLiveness = false; }
+
//===--------------------------------------------------------------------===//
// Register Info
//===--------------------------------------------------------------------===//
@@ -141,7 +178,7 @@ public:
return use_iterator(getRegUseDefListHead(RegNo));
}
static use_iterator use_end() { return use_iterator(0); }
-
+
/// use_empty - Return true if there are no instructions using the specified
/// register.
bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
@@ -157,7 +194,7 @@ public:
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_nodbg_iterator use_nodbg_end() { return use_nodbg_iterator(0); }
-
+
/// use_nodbg_empty - Return true if there are no non-Debug instructions
/// using the specified register.
bool use_nodbg_empty(unsigned RegNo) const {
@@ -171,8 +208,16 @@ public:
/// replaceRegWith - Replace all instances of FromReg with ToReg in the
/// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
/// except that it also changes any definitions of the register as well.
+ ///
+ /// Note that it is usually necessary to first constrain ToReg's register
+ /// class to match the FromReg constraints using:
+ ///
+ /// constrainRegClass(ToReg, getRegClass(FromReg))
+ ///
+ /// That function will return NULL if the virtual registers have incompatible
+ /// constraints.
void replaceRegWith(unsigned FromReg, unsigned ToReg);
-
+
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
@@ -180,7 +225,7 @@ public:
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
-
+
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
@@ -197,15 +242,20 @@ public:
/// optimization passes which extend register lifetimes and need only
/// preserve conservative kill flag information.
void clearKillFlags(unsigned Reg) const;
-
+
#ifndef NDEBUG
void dumpUses(unsigned RegNo) const;
#endif
-
+
+ /// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant
+ /// throughout the function. It is safe to move instructions that read such
+ /// a physreg.
+ bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const;
+
//===--------------------------------------------------------------------===//
// Virtual Register Info
//===--------------------------------------------------------------------===//
-
+
/// getRegClass - Return the register class of the specified virtual register.
///
const TargetRegisterClass *getRegClass(unsigned Reg) const {
@@ -246,6 +296,9 @@ public:
///
unsigned getNumVirtRegs() const { return VRegInfo.size(); }
+ /// clearVirtRegs - Remove all virtual registers (after physreg assignment).
+ void clearVirtRegs();
+
/// setRegAllocationHint - Specify a register allocation hint for the
/// specified virtual register.
void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) {
@@ -271,38 +324,87 @@ public:
//===--------------------------------------------------------------------===//
// Physical Register Use Info
//===--------------------------------------------------------------------===//
-
+
/// isPhysRegUsed - Return true if the specified register is used in this
/// function. This only works after register allocation.
- bool isPhysRegUsed(unsigned Reg) const { return UsedPhysRegs[Reg]; }
-
+ bool isPhysRegUsed(unsigned Reg) const {
+ return UsedPhysRegs.test(Reg) || UsedPhysRegMask.test(Reg);
+ }
+
+ /// isPhysRegOrOverlapUsed - Return true if Reg or any overlapping register
+ /// is used in this function.
+ bool isPhysRegOrOverlapUsed(unsigned Reg) const {
+ if (UsedPhysRegMask.test(Reg))
+ return true;
+ for (const uint16_t *AI = TRI->getOverlaps(Reg); *AI; ++AI)
+ if (UsedPhysRegs.test(*AI))
+ return true;
+ return false;
+ }
+
/// setPhysRegUsed - Mark the specified register used in this function.
/// This should only be called during and after register allocation.
- void setPhysRegUsed(unsigned Reg) { UsedPhysRegs[Reg] = true; }
+ void setPhysRegUsed(unsigned Reg) { UsedPhysRegs.set(Reg); }
/// addPhysRegsUsed - Mark the specified registers used in this function.
/// This should only be called during and after register allocation.
void addPhysRegsUsed(const BitVector &Regs) { UsedPhysRegs |= Regs; }
+ /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
+ /// This corresponds to the bit mask attached to register mask operands.
+ void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
+ UsedPhysRegMask.setBitsNotInMask(RegMask);
+ }
+
/// setPhysRegUnused - Mark the specified register unused in this function.
/// This should only be called during and after register allocation.
- void setPhysRegUnused(unsigned Reg) { UsedPhysRegs[Reg] = false; }
+ void setPhysRegUnused(unsigned Reg) {
+ UsedPhysRegs.reset(Reg);
+ UsedPhysRegMask.reset(Reg);
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Reserved Register Info
+ //===--------------------------------------------------------------------===//
+ //
+ // The set of reserved registers must be invariant during register
+ // allocation. For example, the target cannot suddenly decide it needs a
+ // frame pointer when the register allocator has already used the frame
+ // pointer register for something else.
+ //
+ // These methods can be used by target hooks like hasFP() to avoid changing
+ // the reserved register set during register allocation.
+
+ /// freezeReservedRegs - Called by the register allocator to freeze the set
+ /// of reserved registers before allocation begins.
+ void freezeReservedRegs(const MachineFunction&);
+
+ /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
+ /// to ensure the set of reserved registers stays constant.
+ bool reservedRegsFrozen() const {
+ return !ReservedRegs.empty();
+ }
+
+ /// canReserveReg - Returns true if PhysReg can be used as a reserved
+ /// register. Any register can be reserved before freezeReservedRegs() is
+ /// called.
+ bool canReserveReg(unsigned PhysReg) const {
+ return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
+ }
- /// closePhysRegsUsed - Expand UsedPhysRegs to its transitive closure over
- /// subregisters. That means that if R is used, so are all subregisters.
- void closePhysRegsUsed(const TargetRegisterInfo&);
//===--------------------------------------------------------------------===//
// LiveIn/LiveOut Management
//===--------------------------------------------------------------------===//
-
+
/// addLiveIn/Out - Add the specified register as a live in/out. Note that it
/// is an error to add the same register to the same set more than once.
void addLiveIn(unsigned Reg, unsigned vreg = 0) {
LiveIns.push_back(std::make_pair(Reg, vreg));
}
void addLiveOut(unsigned Reg) { LiveOuts.push_back(Reg); }
-
+
// Iteration support for live in/out sets. These sets are kept in sorted
// order by their register number.
typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
@@ -334,7 +436,7 @@ public:
private:
void HandleVRegListReallocation();
-
+
public:
/// defusechain_iterator - This class provides iterator support for machine
/// operands in the function that use or define a specific register. If
@@ -362,31 +464,31 @@ public:
MachineInstr, ptrdiff_t>::reference reference;
typedef std::iterator<std::forward_iterator_tag,
MachineInstr, ptrdiff_t>::pointer pointer;
-
+
defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {}
defusechain_iterator() : Op(0) {}
-
+
bool operator==(const defusechain_iterator &x) const {
return Op == x.Op;
}
bool operator!=(const defusechain_iterator &x) const {
return !operator==(x);
}
-
+
/// atEnd - return true if this iterator is equal to reg_end() on the value.
bool atEnd() const { return Op == 0; }
-
+
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
Op = Op->getNextOperandForReg();
-
+
// If this is an operand we don't care about, skip it.
- while (Op && ((!ReturnUses && Op->isUse()) ||
+ while (Op && ((!ReturnUses && Op->isUse()) ||
(!ReturnDefs && Op->isDef()) ||
(SkipDebug && Op->isDebug())))
Op = Op->getNextOperandForReg();
-
+
return *this;
}
defusechain_iterator operator++(int) { // Postincrement
@@ -404,30 +506,38 @@ public:
return MI;
}
+ MachineInstr *skipBundle() {
+ if (!Op) return 0;
+ MachineInstr *MI = getBundleStart(Op->getParent());
+ do ++*this;
+ while (Op && getBundleStart(Op->getParent()) == MI);
+ return MI;
+ }
+
MachineOperand &getOperand() const {
assert(Op && "Cannot dereference end iterator!");
return *Op;
}
-
+
/// getOperandNo - Return the operand # of this MachineOperand in its
/// MachineInstr.
unsigned getOperandNo() const {
assert(Op && "Cannot dereference end iterator!");
return Op - &Op->getParent()->getOperand(0);
}
-
+
// Retrieve a reference to the current operand.
MachineInstr &operator*() const {
assert(Op && "Cannot dereference end iterator!");
return *Op->getParent();
}
-
+
MachineInstr *operator->() const {
assert(Op && "Cannot dereference end iterator!");
return Op->getParent();
}
};
-
+
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
new file mode 100644
index 0000000..e852009
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -0,0 +1,91 @@
+//==- MachineScheduler.h - MachineInstr Scheduling Pass ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a MachineSchedRegistry for registering alternative machine
+// schedulers. A Target may provide an alternative scheduler implementation by
+// implementing the following boilerplate:
+//
+// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
+// return new CustomMachineScheduler(C);
+// }
+// static MachineSchedRegistry
+// SchedCustomRegistry("custom", "Run my target's custom scheduler",
+// createCustomMachineSched);
+//
+// Inside <Target>PassConfig:
+// enablePass(MachineSchedulerID);
+// MachineSchedRegistry::setDefault(createCustomMachineSched);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MACHINESCHEDULER_H
+#define MACHINESCHEDULER_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class LiveIntervals;
+class MachineDominatorTree;
+class MachineLoopInfo;
+class ScheduleDAGInstrs;
+
+/// MachineSchedContext provides enough context from the MachineScheduler pass
+/// for the target to instantiate a scheduler.
+struct MachineSchedContext {
+ MachineFunction *MF;
+ const MachineLoopInfo *MLI;
+ const MachineDominatorTree *MDT;
+ const TargetPassConfig *PassConfig;
+ AliasAnalysis *AA;
+ LiveIntervals *LIS;
+
+ MachineSchedContext(): MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {}
+};
+
+/// MachineSchedRegistry provides a selection of available machine instruction
+/// schedulers.
+class MachineSchedRegistry : public MachinePassRegistryNode {
+public:
+ typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
+
+ // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
+ typedef ScheduleDAGCtor FunctionPassCtor;
+
+ static MachinePassRegistry Registry;
+
+ MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+ Registry.Add(this);
+ }
+ ~MachineSchedRegistry() { Registry.Remove(this); }
+
+ // Accessors.
+ //
+ MachineSchedRegistry *getNext() const {
+ return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
+ }
+ static MachineSchedRegistry *getList() {
+ return (MachineSchedRegistry *)Registry.getList();
+ }
+ static ScheduleDAGCtor getDefault() {
+ return (ScheduleDAGCtor)Registry.getDefault();
+ }
+ static void setDefault(ScheduleDAGCtor C) {
+ Registry.setDefault((MachinePassCtor)C);
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h b/contrib/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h
deleted file mode 100644
index d46628c..0000000
--- a/contrib/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h
+++ /dev/null
@@ -1,171 +0,0 @@
-//===-- llvm/CodeGen/ObjectCodeEmitter.h - Object Code Emitter -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Generalized Object Code Emitter, works with ObjectModule and BinaryObject.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_OBJECTCODEEMITTER_H
-#define LLVM_CODEGEN_OBJECTCODEEMITTER_H
-
-#include "llvm/CodeGen/MachineCodeEmitter.h"
-
-namespace llvm {
-
-class BinaryObject;
-class MachineBasicBlock;
-class MachineCodeEmitter;
-class MachineFunction;
-class MachineConstantPool;
-class MachineJumpTableInfo;
-class MachineModuleInfo;
-
-class ObjectCodeEmitter : public MachineCodeEmitter {
-protected:
-
- /// Binary Object (Section or Segment) we are emitting to.
- BinaryObject *BO;
-
- /// MBBLocations - This vector is a mapping from MBB ID's to their address.
- /// It is filled in by the StartMachineBasicBlock callback and queried by
- /// the getMachineBasicBlockAddress callback.
- std::vector<uintptr_t> MBBLocations;
-
- /// LabelLocations - This vector is a mapping from Label ID's to their
- /// address.
- std::vector<uintptr_t> LabelLocations;
-
- /// CPLocations - This is a map of constant pool indices to offsets from the
- /// start of the section for that constant pool index.
- std::vector<uintptr_t> CPLocations;
-
- /// CPSections - This is a map of constant pool indices to the Section
- /// containing the constant pool entry for that index.
- std::vector<uintptr_t> CPSections;
-
- /// JTLocations - This is a map of jump table indices to offsets from the
- /// start of the section for that jump table index.
- std::vector<uintptr_t> JTLocations;
-
-public:
- ObjectCodeEmitter();
- ObjectCodeEmitter(BinaryObject *bo);
- virtual ~ObjectCodeEmitter();
-
- /// setBinaryObject - set the BinaryObject we are writting to
- void setBinaryObject(BinaryObject *bo);
-
- /// emitByte - This callback is invoked when a byte needs to be
- /// written to the data stream, without buffer overflow testing.
- void emitByte(uint8_t B);
-
- /// emitWordLE - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in little-endian format.
- void emitWordLE(uint32_t W);
-
- /// emitWordBE - This callback is invoked when a 32-bit word needs to be
- /// written to the data stream in big-endian format.
- void emitWordBE(uint32_t W);
-
- /// emitDWordLE - This callback is invoked when a 64-bit word needs to be
- /// written to the data stream in little-endian format.
- void emitDWordLE(uint64_t W);
-
- /// emitDWordBE - This callback is invoked when a 64-bit word needs to be
- /// written to the data stream in big-endian format.
- void emitDWordBE(uint64_t W);
-
- /// emitAlignment - Move the CurBufferPtr pointer up to the specified
- /// alignment (saturated to BufferEnd of course).
- void emitAlignment(unsigned Alignment = 0, uint8_t fill = 0);
-
- /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
- /// written to the data stream.
- void emitULEB128Bytes(uint64_t Value);
-
- /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
- /// written to the data stream.
- void emitSLEB128Bytes(uint64_t Value);
-
- /// emitString - This callback is invoked when a String needs to be
- /// written to the data stream.
- void emitString(const std::string &String);
-
- /// getCurrentPCValue - This returns the address that the next emitted byte
- /// will be output to.
- uintptr_t getCurrentPCValue() const;
-
- /// getCurrentPCOffset - Return the offset from the start of the emitted
- /// buffer that we are currently writing to.
- uintptr_t getCurrentPCOffset() const;
-
- /// addRelocation - Whenever a relocatable address is needed, it should be
- /// noted with this interface.
- void addRelocation(const MachineRelocation& relocation);
-
- /// earlyResolveAddresses - True if the code emitter can use symbol addresses
- /// during code emission time. The JIT is capable of doing this because it
- /// creates jump tables or constant pools in memory on the fly while the
- /// object code emitters rely on a linker to have real addresses and should
- /// use relocations instead.
- bool earlyResolveAddresses() const { return false; }
-
- /// startFunction - This callback is invoked when the specified function is
- /// about to be code generated. This initializes the BufferBegin/End/Ptr
- /// fields.
- virtual void startFunction(MachineFunction &F) = 0;
-
- /// finishFunction - This callback is invoked when the specified function has
- /// finished code generation. If a buffer overflow has occurred, this method
- /// returns true (the callee is required to try again), otherwise it returns
- /// false.
- virtual bool finishFunction(MachineFunction &F) = 0;
-
- /// StartMachineBasicBlock - This should be called by the target when a new
- /// basic block is about to be emitted. This way the MCE knows where the
- /// start of the block is, and can implement getMachineBasicBlockAddress.
- virtual void StartMachineBasicBlock(MachineBasicBlock *MBB);
-
- /// getMachineBasicBlockAddress - Return the address of the specified
- /// MachineBasicBlock, only usable after the label for the MBB has been
- /// emitted.
- virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const;
-
- /// emitJumpTables - Emit all the jump tables for a given jump table info
- /// record to the appropriate section.
- virtual void emitJumpTables(MachineJumpTableInfo *MJTI) = 0;
-
- /// getJumpTableEntryAddress - Return the address of the jump table with index
- /// 'Index' in the function that last called initJumpTableInfo.
- virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const;
-
- /// emitConstantPool - For each constant pool entry, figure out which section
- /// the constant should live in, allocate space for it, and emit it to the
- /// Section data buffer.
- virtual void emitConstantPool(MachineConstantPool *MCP) = 0;
-
- /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
- /// the constant pool that was last emitted with the emitConstantPool method.
- virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const;
-
- /// getConstantPoolEntrySection - Return the section of the 'Index' entry in
- /// the constant pool that was last emitted with the emitConstantPool method.
- virtual uintptr_t getConstantPoolEntrySection(unsigned Index) const;
-
- /// Specifies the MachineModuleInfo object. This is used for exception handling
- /// purposes.
- virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
- // to be implemented or depreciated with MachineModuleInfo
-
-}; // end class ObjectCodeEmitter
-
-} // end namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
index 5240729..a5d8b0d 100644
--- a/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
@@ -350,6 +350,43 @@ namespace PBQP {
numNodes = numEdges = 0;
}
+ /// \brief Dump a graph to an output stream.
+ template <typename OStream>
+ void dump(OStream &os) {
+ os << getNumNodes() << " " << getNumEdges() << "\n";
+
+ for (NodeItr nodeItr = nodesBegin(), nodeEnd = nodesEnd();
+ nodeItr != nodeEnd; ++nodeItr) {
+ const Vector& v = getNodeCosts(nodeItr);
+ os << "\n" << v.getLength() << "\n";
+ assert(v.getLength() != 0 && "Empty vector in graph.");
+ os << v[0];
+ for (unsigned i = 1; i < v.getLength(); ++i) {
+ os << " " << v[i];
+ }
+ os << "\n";
+ }
+
+ for (EdgeItr edgeItr = edgesBegin(), edgeEnd = edgesEnd();
+ edgeItr != edgeEnd; ++edgeItr) {
+ unsigned n1 = std::distance(nodesBegin(), getEdgeNode1(edgeItr));
+ unsigned n2 = std::distance(nodesBegin(), getEdgeNode2(edgeItr));
+ assert(n1 != n2 && "PBQP graphs shound not have self-edges.");
+ const Matrix& m = getEdgeCosts(edgeItr);
+ os << "\n" << n1 << " " << n2 << "\n"
+ << m.getRows() << " " << m.getCols() << "\n";
+ assert(m.getRows() != 0 && "No rows in matrix.");
+ assert(m.getCols() != 0 && "No cols in matrix.");
+ for (unsigned i = 0; i < m.getRows(); ++i) {
+ os << m[i][0];
+ for (unsigned j = 1; j < m.getCols(); ++j) {
+ os << " " << m[i][j];
+ }
+ os << "\n";
+ }
+ }
+ }
+
/// \brief Print a representation of this graph in DOT format.
/// @param os Output stream to print on.
template <typename OStream>
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
index 791c227..3fee18c 100644
--- a/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/HeuristicBase.h
@@ -157,7 +157,7 @@ namespace PBQP {
case 0: s.applyR0(nItr); break;
case 1: s.applyR1(nItr); break;
case 2: s.applyR2(nItr); break;
- default: assert(false &&
+ default: llvm_unreachable(
"Optimal reductions of degree > 2 nodes is invalid.");
}
@@ -186,7 +186,7 @@ namespace PBQP {
/// \brief Add a node to the heuristic reduce list.
/// @param nItr Node iterator to add to the heuristic reduce list.
void addToHeuristicList(Graph::NodeItr nItr) {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Heuristically reduce one of the nodes in the heuristic
@@ -194,25 +194,25 @@ namespace PBQP {
/// @return True if a reduction takes place, false if the heuristic reduce
/// list is empty.
void heuristicReduce() {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Prepare a change in the costs on the given edge.
/// @param eItr Edge iterator.
void preUpdateEdgeCosts(Graph::EdgeItr eItr) {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Handle the change in the costs on the given edge.
/// @param eItr Edge iterator.
void postUpdateEdgeCostts(Graph::EdgeItr eItr) {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Handle the addition of a new edge into the PBQP graph.
/// @param eItr Edge iterator for the added edge.
void handleAddEdge(Graph::EdgeItr eItr) {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Handle disconnection of an edge from a node.
@@ -223,7 +223,7 @@ namespace PBQP {
/// method allows for the effect to be computed only for the remaining
/// node in the graph.
void handleRemoveEdge(Graph::EdgeItr eItr, Graph::NodeItr nItr) {
- assert(false && "Must be implemented in derived class.");
+ llvm_unreachable("Must be implemented in derived class.");
}
/// \brief Clean up any structures used by HeuristicBase.
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
index e96c4cb..a859e58 100644
--- a/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Heuristics/Briggs.h
@@ -418,6 +418,12 @@ namespace PBQP {
unsigned numRegs = getGraph().getNodeCosts(nItr).getLength() - 1;
nd.numDenied = 0;
+ const Vector& nCosts = getGraph().getNodeCosts(nItr);
+ for (unsigned i = 1; i < nCosts.getLength(); ++i) {
+ if (nCosts[i] == std::numeric_limits<PBQPNum>::infinity())
+ ++nd.numDenied;
+ }
+
nd.numSafe = numRegs;
nd.unsafeDegrees.resize(numRegs, 0);
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index 7a03ce9..3b38199 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CODEGEN_PASSES_H
#define LLVM_CODEGEN_PASSES_H
+#include "llvm/Pass.h"
#include "llvm/Target/TargetMachine.h"
#include <string>
@@ -26,7 +27,211 @@ namespace llvm {
class TargetLowering;
class TargetRegisterClass;
class raw_ostream;
+}
+namespace llvm {
+
+extern char &NoPassID; // Allow targets to choose not to run a pass.
+
+class PassConfigImpl;
+
+/// Target-Independent Code Generator Pass Configuration Options.
+///
+/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
+/// to the internals of other CodeGen passes.
+class TargetPassConfig : public ImmutablePass {
+public:
+ /// Pseudo Pass IDs. These are defined within TargetPassConfig because they
+ /// are unregistered pass IDs. They are only useful for use with
+ /// TargetPassConfig APIs to identify multiple occurrences of the same pass.
+ ///
+
+ /// EarlyTailDuplicate - A clone of the TailDuplicate pass that runs early
+ /// during codegen, on SSA form.
+ static char EarlyTailDuplicateID;
+
+ /// PostRAMachineLICM - A clone of the LICM pass that runs during late machine
+ /// optimization after regalloc.
+ static char PostRAMachineLICMID;
+
+protected:
+ TargetMachine *TM;
+ PassManagerBase &PM;
+ PassConfigImpl *Impl; // Internal data structures
+ bool Initialized; // Flagged after all passes are configured.
+
+ // Target Pass Options
+ // Targets provide a default setting, user flags override.
+ //
+ bool DisableVerify;
+
+ /// Default setting for -enable-tail-merge on this target.
+ bool EnableTailMerge;
+
+public:
+ TargetPassConfig(TargetMachine *tm, PassManagerBase &pm);
+ // Dummy constructor.
+ TargetPassConfig();
+
+ virtual ~TargetPassConfig();
+
+ static char ID;
+
+ /// Get the right type of TargetMachine for this target.
+ template<typename TMC> TMC &getTM() const {
+ return *static_cast<TMC*>(TM);
+ }
+
+ const TargetLowering *getTargetLowering() const {
+ return TM->getTargetLowering();
+ }
+
+ //
+ void setInitialized() { Initialized = true; }
+
+ CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
+
+ void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
+
+ bool getEnableTailMerge() const { return EnableTailMerge; }
+ void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
+
+ /// Allow the target to override a specific pass without overriding the pass
+ /// pipeline. When passes are added to the standard pipeline at the
+ /// point where StadardID is expected, add TargetID in its place.
+ void substitutePass(char &StandardID, char &TargetID);
+
+ /// Allow the target to enable a specific standard pass by default.
+ void enablePass(char &ID) { substitutePass(ID, ID); }
+
+ /// Allow the target to disable a specific standard pass by default.
+ void disablePass(char &ID) { substitutePass(ID, NoPassID); }
+
+ /// Return the pass ssubtituted for StandardID by the target.
+ /// If no substitution exists, return StandardID.
+ AnalysisID getPassSubstitution(AnalysisID StandardID) const;
+
+ /// Return true if the optimized regalloc pipeline is enabled.
+ bool getOptimizeRegAlloc() const;
+
+ /// Add common target configurable passes that perform LLVM IR to IR
+ /// transforms following machine independent optimization.
+ virtual void addIRPasses();
+
+ /// Add common passes that perform LLVM IR to IR transforms in preparation for
+ /// instruction selection.
+ virtual void addISelPrepare();
+
+ /// addInstSelector - This method should install an instruction selector pass,
+ /// which converts from LLVM code to machine instructions.
+ virtual bool addInstSelector() {
+ return true;
+ }
+
+ /// Add the complete, standard set of LLVM CodeGen passes.
+ /// Fully developed targets will not generally override this.
+ virtual void addMachinePasses();
+
+protected:
+ // Helper to verify the analysis is really immutable.
+ void setOpt(bool &Opt, bool Val);
+
+ /// Methods with trivial inline returns are convenient points in the common
+ /// codegen pass pipeline where targets may insert passes. Methods with
+ /// out-of-line standard implementations are major CodeGen stages called by
+ /// addMachinePasses. Some targets may override major stages when inserting
+ /// passes is insufficient, but maintaining overriden stages is more work.
+ ///
+
+ /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+ /// passes (which are run just before instruction selector).
+ virtual bool addPreISel() {
+ return true;
+ }
+
+ /// addMachineSSAOptimization - Add standard passes that optimize machine
+ /// instructions in SSA form.
+ virtual void addMachineSSAOptimization();
+
+ /// addPreRegAlloc - This method may be implemented by targets that want to
+ /// run passes immediately before register allocation. This should return
+ /// true if -print-machineinstrs should print after these passes.
+ virtual bool addPreRegAlloc() {
+ return false;
+ }
+
+ /// createTargetRegisterAllocator - Create the register allocator pass for
+ /// this target at the current optimization level.
+ virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
+
+ /// addFastRegAlloc - Add the minimum set of target-independent passes that
+ /// are required for fast register allocation.
+ virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
+
+ /// addOptimizedRegAlloc - Add passes related to register allocation.
+ /// LLVMTargetMachine provides standard regalloc passes for most targets.
+ virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+
+ /// addFinalizeRegAlloc - This method may be implemented by targets that want
+ /// to run passes within the regalloc pipeline, immediately after the register
+ /// allocation pass itself. These passes run as soon as virtual regisiters
+ /// have been rewritten to physical registers but before and other postRA
+ /// optimization happens. Targets that have marked instructions for bundling
+ /// must have finalized those bundles by the time these passes have run,
+ /// because subsequent passes are not guaranteed to be bundle-aware.
+ virtual bool addFinalizeRegAlloc() {
+ return false;
+ }
+
+ /// addPostRegAlloc - This method may be implemented by targets that want to
+ /// run passes after register allocation pass pipeline but before
+ /// prolog-epilog insertion. This should return true if -print-machineinstrs
+ /// should print after these passes.
+ virtual bool addPostRegAlloc() {
+ return false;
+ }
+
+ /// Add passes that optimize machine instructions after register allocation.
+ virtual void addMachineLateOptimization();
+
+ /// addPreSched2 - This method may be implemented by targets that want to
+ /// run passes after prolog-epilog insertion and before the second instruction
+ /// scheduling pass. This should return true if -print-machineinstrs should
+ /// print after these passes.
+ virtual bool addPreSched2() {
+ return false;
+ }
+
+ /// Add standard basic block placement passes.
+ virtual void addBlockPlacement();
+
+ /// addPreEmitPass - This pass may be implemented by targets that want to run
+ /// passes immediately before machine code is emitted. This should return
+ /// true if -print-machineinstrs should print out the code after the passes.
+ virtual bool addPreEmitPass() {
+ return false;
+ }
+
+ /// Utilities for targets to add passes to the pass manager.
+ ///
+
+ /// Add a CodeGen pass at this point in the pipeline after checking overrides.
+ /// Return the pass that was added, or NoPassID.
+ AnalysisID addPass(char &ID);
+
+ /// addMachinePasses helper to create the target-selected or overriden
+ /// regalloc pass.
+ FunctionPass *createRegAllocPass(bool Optimized);
+
+ /// printAndVerify - Add a pass to dump then verify the machine function, if
+ /// those steps are enabled.
+ ///
+ void printAndVerify(const char *Banner) const;
+};
+} // namespace llvm
+
+/// List of target independent CodeGen pass IDs.
+namespace llvm {
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
/// work well with unreachable basic blocks (what live ranges make sense for a
/// block that cannot be reached?). As such, a code generator should either
@@ -41,31 +246,29 @@ namespace llvm {
createMachineFunctionPrinterPass(raw_ostream &OS,
const std::string &Banner ="");
- /// MachineLoopInfo pass - This pass is a loop analysis pass.
- ///
+ /// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID;
- /// MachineLoopRanges pass - This pass is an on-demand loop coverage
- /// analysis pass.
- ///
+ /// MachineLoopRanges - This pass is an on-demand loop coverage analysis.
extern char &MachineLoopRangesID;
- /// MachineDominators pass - This pass is a machine dominators analysis pass.
- ///
+ /// MachineDominators - This pass is a machine dominators analysis pass.
extern char &MachineDominatorsID;
/// EdgeBundles analysis - Bundle machine CFG edges.
- ///
extern char &EdgeBundlesID;
- /// PHIElimination pass - This pass eliminates machine instruction PHI nodes
+ /// LiveVariables pass - This pass computes the set of blocks in which each
+ /// variable is life and sets machine operand kill flags.
+ extern char &LiveVariablesID;
+
+ /// PHIElimination - This pass eliminates machine instruction PHI nodes
/// by inserting copy instructions. This destroys SSA information, but is the
/// desired input for some register allocators. This pass is "required" by
/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
- ///
extern char &PHIEliminationID;
- /// StrongPHIElimination pass - This pass eliminates machine instruction PHI
+ /// StrongPHIElimination - This pass eliminates machine instruction PHI
/// nodes by inserting copy instructions. This destroys SSA information, but
/// is the desired input for some register allocators. This pass is
/// "required" by these register allocator like this:
@@ -76,32 +279,30 @@ namespace llvm {
/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
extern char &LiveStacksID;
- /// TwoAddressInstruction pass - This pass reduces two-address instructions to
+ /// TwoAddressInstruction - This pass reduces two-address instructions to
/// use two operands. This destroys SSA information but it is desired by
/// register allocators.
extern char &TwoAddressInstructionPassID;
- /// RegisteCoalescer pass - This pass merges live ranges to eliminate copies.
- extern char &RegisterCoalescerPassID;
+ /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+ extern char &ProcessImplicitDefsID;
+
+ /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+ extern char &RegisterCoalescerID;
+
+ /// MachineScheduler - This pass schedules machine instructions.
+ extern char &MachineSchedulerID;
/// SpillPlacement analysis. Suggest optimal placement of spill code between
/// basic blocks.
- ///
extern char &SpillPlacementID;
- /// UnreachableMachineBlockElimination pass - This pass removes unreachable
+ /// UnreachableMachineBlockElimination - This pass removes unreachable
/// machine basic blocks.
extern char &UnreachableMachineBlockElimID;
- /// DeadMachineInstructionElim pass - This pass removes dead machine
- /// instructions.
- ///
- FunctionPass *createDeadMachineInstructionElimPass();
-
- /// Creates a register allocator as the user specified on the command line, or
- /// picks one that matches OptLevel.
- ///
- FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel);
+ /// DeadMachineInstructionElim - This pass removes dead machine instructions.
+ extern char &DeadMachineInstructionElimID;
/// FastRegisterAllocation Pass - This pass register allocates as fast as
/// possible. It is best suited for debug code where live ranges are short.
@@ -118,56 +319,59 @@ namespace llvm {
///
FunctionPass *createGreedyRegisterAllocator();
- /// LinearScanRegisterAllocation Pass - This pass implements the linear scan
- /// register allocation algorithm, a global register allocator.
- ///
- FunctionPass *createLinearScanRegisterAllocator();
-
/// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
/// Quadratic Prograaming (PBQP) based register allocator.
///
FunctionPass *createDefaultPBQPRegisterAllocator();
- /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code,
+ /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
/// and eliminates abstract frame references.
- ///
- FunctionPass *createPrologEpilogCodeInserter();
+ extern char &PrologEpilogCodeInserterID;
- /// ExpandPostRAPseudos Pass - This pass expands pseudo instructions after
+ /// ExpandPostRAPseudos - This pass expands pseudo instructions after
/// register allocation.
- ///
- FunctionPass *createExpandPostRAPseudosPass();
+ extern char &ExpandPostRAPseudosID;
/// createPostRAScheduler - This pass performs post register allocation
/// scheduling.
- FunctionPass *createPostRAScheduler(CodeGenOpt::Level OptLevel);
+ extern char &PostRASchedulerID;
- /// BranchFolding Pass - This pass performs machine code CFG based
+ /// BranchFolding - This pass performs machine code CFG based
/// optimizations to delete branches to branches, eliminate branches to
/// successor blocks (creating fall throughs), and eliminating branches over
/// branches.
- FunctionPass *createBranchFoldingPass(bool DefaultEnableTailMerge);
+ extern char &BranchFolderPassID;
- /// TailDuplicate Pass - Duplicate blocks with unconditional branches
+ /// TailDuplicate - Duplicate blocks with unconditional branches
/// into tails of their predecessors.
- FunctionPass *createTailDuplicatePass(bool PreRegAlloc = false);
+ extern char &TailDuplicateID;
+
+ /// IfConverter - This pass performs machine code if conversion.
+ extern char &IfConverterID;
- /// IfConverter Pass - This pass performs machine code if conversion.
- FunctionPass *createIfConverterPass();
+ /// MachineBlockPlacement - This pass places basic blocks based on branch
+ /// probabilities.
+ extern char &MachineBlockPlacementID;
- /// Code Placement Pass - This pass optimize code placement and aligns loop
+ /// MachineBlockPlacementStats - This pass collects statistics about the
+ /// basic block placement using branch probabilities and block frequency
+ /// information.
+ extern char &MachineBlockPlacementStatsID;
+
+ /// Code Placement - This pass optimize code placement and aligns loop
/// headers to target specific alignment boundary.
- FunctionPass *createCodePlacementOptPass();
+ extern char &CodePlacementOptID;
- /// IntrinsicLowering Pass - Performs target-independent LLVM IR
- /// transformations for highly portable strategies.
+ /// GCLowering Pass - Performs target-independent LLVM IR transformations for
+ /// highly portable strategies.
+ ///
FunctionPass *createGCLoweringPass();
- /// MachineCodeAnalysis Pass - Target-independent pass to mark safe points in
- /// machine code. Must be added very late during code generation, just prior
- /// to output, and importantly after all CFG transformations (such as branch
- /// folding).
- FunctionPass *createGCMachineCodeAnalysisPass();
+ /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+ /// in machine code. Must be added very late during code generation, just
+ /// prior to output, and importantly after all CFG transformations (such as
+ /// branch folding).
+ extern char &GCMachineCodeAnalysisID;
/// Deleter Pass - Releases GC metadata.
///
@@ -177,54 +381,56 @@ namespace llvm {
///
FunctionPass *createGCInfoPrinter(raw_ostream &OS);
- /// createMachineCSEPass - This pass performs global CSE on machine
- /// instructions.
- FunctionPass *createMachineCSEPass();
+ /// MachineCSE - This pass performs global CSE on machine instructions.
+ extern char &MachineCSEID;
- /// createMachineLICMPass - This pass performs LICM on machine instructions.
- ///
- FunctionPass *createMachineLICMPass(bool PreRegAlloc = true);
+ /// MachineLICM - This pass performs LICM on machine instructions.
+ extern char &MachineLICMID;
+
+ /// MachineSinking - This pass performs sinking on machine instructions.
+ extern char &MachineSinkingID;
- /// createMachineSinkingPass - This pass performs sinking on machine
- /// instructions.
- FunctionPass *createMachineSinkingPass();
+ /// MachineCopyPropagation - This pass performs copy propagation on
+ /// machine instructions.
+ extern char &MachineCopyPropagationID;
- /// createPeepholeOptimizerPass - This pass performs peephole optimizations -
+ /// PeepholeOptimizer - This pass performs peephole optimizations -
/// like extension and comparison eliminations.
- FunctionPass *createPeepholeOptimizerPass();
+ extern char &PeepholeOptimizerID;
- /// createOptimizePHIsPass - This pass optimizes machine instruction PHIs
+ /// OptimizePHIs - This pass optimizes machine instruction PHIs
/// to take advantage of opportunities created during DAG legalization.
- FunctionPass *createOptimizePHIsPass();
+ extern char &OptimizePHIsID;
- /// createStackSlotColoringPass - This pass performs stack slot coloring.
- FunctionPass *createStackSlotColoringPass(bool);
+ /// StackSlotColoring - This pass performs stack slot coloring.
+ extern char &StackSlotColoringID;
/// createStackProtectorPass - This pass adds stack protectors to functions.
+ ///
FunctionPass *createStackProtectorPass(const TargetLowering *tli);
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
+ ///
FunctionPass *createMachineVerifierPass(const char *Banner = 0);
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
FunctionPass *createDwarfEHPass(const TargetMachine *tm);
- /// createSjLjEHPass - This pass adapts exception handling code to use
+ /// createSjLjEHPreparePass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
- FunctionPass *createSjLjEHPass(const TargetLowering *tli);
+ ///
+ FunctionPass *createSjLjEHPreparePass(const TargetLowering *tli);
- /// createLocalStackSlotAllocationPass - This pass assigns local frame
- /// indices to stack slots relative to one another and allocates
- /// base registers to access them when it is estimated by the target to
- /// be out of range of normal frame pointer or stack pointer index
- /// addressing.
- FunctionPass *createLocalStackSlotAllocationPass();
+ /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+ /// slots relative to one another and allocates base registers to access them
+ /// when it is estimated by the target to be out of range of normal frame
+ /// pointer or stack pointer index addressing.
+ extern char &LocalStackSlotAllocationID;
- /// createExpandISelPseudosPass - This pass expands pseudo-instructions.
- ///
- FunctionPass *createExpandISelPseudosPass();
+ /// ExpandISelPseudos - This pass expands pseudo-instructions.
+ extern char &ExpandISelPseudosID;
/// createExecutionDependencyFixPass - This pass fixes execution time
/// problems with dependent instructions, such as switching execution
@@ -234,6 +440,13 @@ namespace llvm {
///
FunctionPass *createExecutionDependencyFixPass(const TargetRegisterClass *RC);
+ /// UnpackMachineBundles - This pass unpack machine instruction bundles.
+ extern char &UnpackMachineBundlesID;
+
+ /// FinalizeMachineBundles - This pass finalize machine instruction
+ /// bundles (created earlier, e.g. during pre-RA scheduling).
+ extern char &FinalizeMachineBundlesID;
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
index 26b6773..3986a8d 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
@@ -68,6 +68,10 @@ class RegScavenger {
/// available, unset means the register is currently being used.
BitVector RegsAvailable;
+ // These BitVectors are only used internally to forward(). They are members
+ // to avoid frequent reallocations.
+ BitVector KillRegs, DefRegs;
+
public:
RegScavenger()
: MBB(NULL), NumPhysRegs(0), Tracking(false),
@@ -130,8 +134,9 @@ private:
/// isUsed / isUnused - Test if a register is currently being used.
///
- bool isUsed(unsigned Reg) const { return !RegsAvailable.test(Reg); }
- bool isUnused(unsigned Reg) const { return RegsAvailable.test(Reg); }
+ bool isUsed(unsigned Reg) const {
+ return !RegsAvailable.test(Reg) || ReservedRegs.test(Reg);
+ }
/// isAliasUsed - Is Reg or an alias currently in use?
bool isAliasUsed(unsigned Reg) const;
@@ -139,7 +144,7 @@ private:
/// setUsed / setUnused - Mark the state of one or a number of registers.
///
void setUsed(BitVector &Regs) {
- RegsAvailable &= ~Regs;
+ RegsAvailable.reset(Regs);
}
void setUnused(BitVector &Regs) {
RegsAvailable |= Regs;
@@ -148,9 +153,6 @@ private:
/// Add Reg and all its sub-registers to BV.
void addRegWithSubRegs(BitVector &BV, unsigned Reg);
- /// Add Reg and its aliases to BV.
- void addRegWithAliases(BitVector &BV, unsigned Reg);
-
/// findSurvivorReg - Return the candidate register that is unused for the
/// longest after StartMI. UseMI is set to the instruction where the search
/// stopped.
diff --git a/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h b/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h
new file mode 100644
index 0000000..56b5855
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -0,0 +1,142 @@
+//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef RESOURCE_PRIORITY_QUEUE_H
+#define RESOURCE_PRIORITY_QUEUE_H
+
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class ResourcePriorityQueue;
+
+ /// Sorting functions for the Available queue.
+ struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ ResourcePriorityQueue *PQ;
+ explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ class ResourcePriorityQueue : public SchedulingPriorityQueue {
+ /// SUnits - The SUnits for the current graph.
+ std::vector<SUnit> *SUnits;
+
+ /// NumNodesSolelyBlocking - This vector contains, for every node in the
+ /// Queue, the number of nodes that the node is the sole unscheduled
+ /// predecessor for. This is used as a tie-breaker heuristic for better
+ /// mobility.
+ std::vector<unsigned> NumNodesSolelyBlocking;
+
+ /// Queue - The queue.
+ std::vector<SUnit*> Queue;
+
+ /// RegPressure - Tracking current reg pressure per register class.
+ ///
+ std::vector<unsigned> RegPressure;
+
+ /// RegLimit - Tracking the number of allocatable registers per register
+ /// class.
+ std::vector<unsigned> RegLimit;
+
+ resource_sort Picker;
+ const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
+ const TargetInstrInfo *TII;
+ const InstrItineraryData* InstrItins;
+ /// ResourcesModel - Represents VLIW state.
+ /// Not limited to VLIW targets per say, but assumes
+ /// definition of DFA by a target.
+ DFAPacketizer *ResourcesModel;
+
+ /// Resource model - packet/bundle model. Purely
+ /// internal at the time.
+ std::vector<SUnit*> Packet;
+
+ /// Heuristics for estimating register pressure.
+ unsigned ParallelLiveRanges;
+ signed HorizontalVerticalBalance;
+
+ public:
+ ResourcePriorityQueue(SelectionDAGISel *IS);
+
+ ~ResourcePriorityQueue() {
+ delete ResourcesModel;
+ }
+
+ bool isBottomUp() const { return false; }
+
+ void initNodes(std::vector<SUnit> &sunits);
+
+ void addNode(const SUnit *SU) {
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void updateNode(const SUnit *SU) {}
+
+ void releaseState() {
+ SUnits = 0;
+ }
+
+ unsigned getLatency(unsigned NodeNum) const {
+ assert(NodeNum < (*SUnits).size());
+ return (*SUnits)[NodeNum].getHeight();
+ }
+
+ unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+ assert(NodeNum < NumNodesSolelyBlocking.size());
+ return NumNodesSolelyBlocking[NodeNum];
+ }
+
+ /// Single cost function reflecting benefit of scheduling SU
+ /// in the current cycle.
+ signed SUSchedulingCost (SUnit *SU);
+
+ /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+ ///
+ void initNumRegDefsLeft(SUnit *SU);
+ void updateNumRegDefsLeft(SUnit *SU);
+ signed regPressureDelta(SUnit *SU, bool RawPressure = false);
+ signed rawRegPressureDelta (SUnit *SU, unsigned RCId);
+
+ bool empty() const { return Queue.empty(); }
+
+ virtual void push(SUnit *U);
+
+ virtual SUnit *pop();
+
+ virtual void remove(SUnit *SU);
+
+ virtual void dump(ScheduleDAG* DAG) const;
+
+ /// scheduledNode - Main resource tracking point.
+ void scheduledNode(SUnit *Node);
+ bool isResourceAvailable(SUnit *SU);
+ void reserveResources(SUnit *SU);
+
+private:
+ void adjustPriorityOfUnscheduledPreds(SUnit *SU);
+ SUnit *getSingleUnscheduledPred(SUnit *SU);
+ unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
+ unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 1bbc6c5..f4de693 100644
--- a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -8,7 +8,8 @@
//===----------------------------------------------------------------------===//
//
// This file implements the ScheduleDAG class, which is used as the common
-// base class for instruction schedulers.
+// base class for instruction schedulers. This encapsulates the scheduling DAG,
+// which is shared between SelectionDAG and MachineInstr scheduling.
//
//===----------------------------------------------------------------------===//
@@ -16,7 +17,7 @@
#define LLVM_CODEGEN_SCHEDULEDAG_H
#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/GraphTraits.h"
@@ -129,8 +130,7 @@ namespace llvm {
Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias &&
Contents.Order.isArtificial == Other.Contents.Order.isArtificial;
}
- assert(0 && "Invalid dependency kind!");
- return false;
+ llvm_unreachable("Invalid dependency kind!");
}
bool operator!=(const SDep &Other) const {
@@ -232,6 +232,7 @@ namespace llvm {
public:
SUnit *OrigNode; // If not this, the node from which
// this node was cloned.
+ // (SD scheduling only)
// Preds/Succs - The SUnits before/after us in the graph.
SmallVector<SDep, 4> Preds; // All sunit predecessors.
@@ -409,6 +410,13 @@ namespace llvm {
return false;
}
+ bool isTopReady() const {
+ return NumPredsLeft == 0;
+ }
+ bool isBottomReady() const {
+ return NumSuccsLeft == 0;
+ }
+
void dump(const ScheduleDAG *G) const;
void dumpAll(const ScheduleDAG *G) const;
void print(raw_ostream &O, const ScheduleDAG *G) const;
@@ -427,6 +435,7 @@ namespace llvm {
/// implementation to decide.
///
class SchedulingPriorityQueue {
+ virtual void anchor();
unsigned CurCycle;
bool HasReadyFilter;
public:
@@ -465,13 +474,13 @@ namespace llvm {
virtual void dump(ScheduleDAG *) const {}
- /// ScheduledNode - As each node is scheduled, this method is invoked. This
+ /// scheduledNode - As each node is scheduled, this method is invoked. This
/// allows the priority function to adjust the priority of related
/// unscheduled nodes, for example.
///
- virtual void ScheduledNode(SUnit *) {}
+ virtual void scheduledNode(SUnit *) {}
- virtual void UnscheduledNode(SUnit *) {}
+ virtual void unscheduledNode(SUnit *) {}
void setCurCycle(unsigned Cycle) {
CurCycle = Cycle;
@@ -484,15 +493,11 @@ namespace llvm {
class ScheduleDAG {
public:
- MachineBasicBlock *BB; // The block in which to insert instructions
- MachineBasicBlock::iterator InsertPos;// The position to insert instructions
const TargetMachine &TM; // Target processor
const TargetInstrInfo *TII; // Target instruction information
const TargetRegisterInfo *TRI; // Target processor register info
MachineFunction &MF; // Machine function
MachineRegisterInfo &MRI; // Virtual/real register map
- std::vector<SUnit*> Sequence; // The schedule. Null SUnit*'s
- // represent noop instructions.
std::vector<SUnit> SUnits; // The scheduling units.
SUnit EntrySU; // Special node for the region entry.
SUnit ExitSU; // Special node for the region exit.
@@ -507,6 +512,9 @@ namespace llvm {
virtual ~ScheduleDAG();
+ /// clearDAG - clear the DAG state (between regions).
+ void clearDAG();
+
/// getInstrDesc - Return the MCInstrDesc of this SUnit.
/// Return NULL for SDNodes without a machine opcode.
const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
@@ -517,66 +525,43 @@ namespace llvm {
/// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
/// using 'dot'.
///
+ void viewGraph(const Twine &Name, const Twine &Title);
void viewGraph();
- /// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
- /// according to the order specified in Sequence.
- ///
- virtual MachineBasicBlock *EmitSchedule() = 0;
-
- void dumpSchedule() const;
-
virtual void dumpNode(const SUnit *SU) const = 0;
/// getGraphNodeLabel - Return a label for an SUnit node in a visualization
/// of the ScheduleDAG.
virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
+ /// getDAGLabel - Return a label for the region of code covered by the DAG.
+ virtual std::string getDAGName() const = 0;
+
/// addCustomGraphFeatures - Add custom features for a visualization of
/// the ScheduleDAG.
virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
#ifndef NDEBUG
- /// VerifySchedule - Verify that all SUnits were scheduled and that
- /// their state is consistent.
- void VerifySchedule(bool isBottomUp);
+ /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
+ /// their state is consistent. Return the number of scheduled SUnits.
+ unsigned VerifyScheduledDAG(bool isBottomUp);
#endif
protected:
- /// Run - perform scheduling.
- ///
- void Run(MachineBasicBlock *bb, MachineBasicBlock::iterator insertPos);
-
- /// BuildSchedGraph - Build SUnits and set up their Preds and Succs
- /// to form the scheduling dependency graph.
- ///
- virtual void BuildSchedGraph(AliasAnalysis *AA) = 0;
-
/// ComputeLatency - Compute node latency.
///
- virtual void ComputeLatency(SUnit *SU) = 0;
+ virtual void computeLatency(SUnit *SU) = 0;
/// ComputeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
- virtual void ComputeOperandLatency(SUnit *, SUnit *,
+ virtual void computeOperandLatency(SUnit *, SUnit *,
SDep&) const { }
- /// Schedule - Order nodes according to selected style, filling
- /// in the Sequence member.
- ///
- virtual void Schedule() = 0;
-
/// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may
/// override this as needed.
- virtual bool ForceUnitLatencies() const { return false; }
-
- /// EmitNoop - Emit a noop instruction.
- ///
- void EmitNoop();
-
- void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
+ virtual bool forceUnitLatencies() const { return false; }
private:
// Return the MCInstrDesc of this SDNode or NULL.
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
new file mode 100644
index 0000000..c8de7bc
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -0,0 +1,344 @@
+//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAGInstrs class, which implements
+// scheduling for a MachineInstr-based dependency graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCHEDULEDAGINSTRS_H
+#define SCHEDULEDAGINSTRS_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include <map>
+
+namespace llvm {
+ class MachineLoopInfo;
+ class MachineDominatorTree;
+ class LiveIntervals;
+
+ /// LoopDependencies - This class analyzes loop-oriented register
+ /// dependencies, which are used to guide scheduling decisions.
+ /// For example, loop induction variable increments should be
+ /// scheduled as soon as possible after the variable's last use.
+ ///
+ class LoopDependencies {
+ const MachineLoopInfo &MLI;
+ const MachineDominatorTree &MDT;
+
+ public:
+ typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
+ LoopDeps;
+ LoopDeps Deps;
+
+ LoopDependencies(const MachineLoopInfo &mli,
+ const MachineDominatorTree &mdt) :
+ MLI(mli), MDT(mdt) {}
+
+ /// VisitLoop - Clear out any previous state and analyze the given loop.
+ ///
+ void VisitLoop(const MachineLoop *Loop) {
+ assert(Deps.empty() && "stale loop dependencies");
+
+ MachineBasicBlock *Header = Loop->getHeader();
+ SmallSet<unsigned, 8> LoopLiveIns;
+ for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
+ LE = Header->livein_end(); LI != LE; ++LI)
+ LoopLiveIns.insert(*LI);
+
+ const MachineDomTreeNode *Node = MDT.getNode(Header);
+ const MachineBasicBlock *MBB = Node->getBlock();
+ assert(Loop->contains(MBB) &&
+ "Loop does not contain header!");
+ VisitRegion(Node, MBB, Loop, LoopLiveIns);
+ }
+
+ private:
+ void VisitRegion(const MachineDomTreeNode *Node,
+ const MachineBasicBlock *MBB,
+ const MachineLoop *Loop,
+ const SmallSet<unsigned, 8> &LoopLiveIns) {
+ unsigned Count = 0;
+ for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ const MachineInstr *MI = I;
+ if (MI->isDebugValue())
+ continue;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (LoopLiveIns.count(MOReg))
+ Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
+ }
+ ++Count; // Not every iteration due to dbg_value above.
+ }
+
+ const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
+ for (std::vector<MachineDomTreeNode*>::const_iterator I =
+ Children.begin(), E = Children.end(); I != E; ++I) {
+ const MachineDomTreeNode *ChildNode = *I;
+ MachineBasicBlock *ChildBlock = ChildNode->getBlock();
+ if (Loop->contains(ChildBlock))
+ VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
+ }
+ }
+ };
+
+ /// An individual mapping from virtual register number to SUnit.
+ struct VReg2SUnit {
+ unsigned VirtReg;
+ SUnit *SU;
+
+ VReg2SUnit(unsigned reg, SUnit *su): VirtReg(reg), SU(su) {}
+
+ unsigned getSparseSetKey() const {
+ return TargetRegisterInfo::virtReg2Index(VirtReg);
+ }
+ };
+
+ /// Combine a SparseSet with a 1x1 vector to track physical registers.
+ /// The SparseSet allows iterating over the (few) live registers for quickly
+ /// comparing against a regmask or clearing the set.
+ ///
+ /// Storage for the map is allocated once for the pass. The map can be
+ /// cleared between scheduling regions without freeing unused entries.
+ class Reg2SUnitsMap {
+ SparseSet<unsigned> PhysRegSet;
+ std::vector<std::vector<SUnit*> > SUnits;
+ public:
+ typedef SparseSet<unsigned>::const_iterator const_iterator;
+
+ // Allow iteration over register numbers (keys) in the map. If needed, we
+ // can provide an iterator over SUnits (values) as well.
+ const_iterator reg_begin() const { return PhysRegSet.begin(); }
+ const_iterator reg_end() const { return PhysRegSet.end(); }
+
+ /// Initialize the map with the number of registers.
+ /// If the map is already large enough, no allocation occurs.
+ /// For simplicity we expect the map to be empty().
+ void setRegLimit(unsigned Limit);
+
+ /// Returns true if the map is empty.
+ bool empty() const { return PhysRegSet.empty(); }
+
+ /// Clear the map without deallocating storage.
+ void clear();
+
+ bool contains(unsigned Reg) const { return PhysRegSet.count(Reg); }
+
+ /// If this register is mapped, return its existing SUnits vector.
+ /// Otherwise map the register and return an empty SUnits vector.
+ std::vector<SUnit *> &operator[](unsigned Reg) {
+ bool New = PhysRegSet.insert(Reg).second;
+ assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
+ (void)New;
+ return SUnits[Reg];
+ }
+
+ /// Erase an existing element without freeing memory.
+ void erase(unsigned Reg) {
+ PhysRegSet.erase(Reg);
+ SUnits[Reg].clear();
+ }
+ };
+
+ /// Use SparseSet as a SparseMap by relying on the fact that it never
+ /// compares ValueT's, only unsigned keys. This allows the set to be cleared
+ /// between scheduling regions in constant time as long as ValueT does not
+ /// require a destructor.
+ typedef SparseSet<VReg2SUnit> VReg2SUnitMap;
+
+ /// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
+ /// MachineInstrs.
+ class ScheduleDAGInstrs : public ScheduleDAG {
+ protected:
+ const MachineLoopInfo &MLI;
+ const MachineDominatorTree &MDT;
+ const MachineFrameInfo *MFI;
+ const InstrItineraryData *InstrItins;
+
+ /// Live Intervals provides reaching defs in preRA scheduling.
+ LiveIntervals *LIS;
+
+ /// isPostRA flag indicates vregs cannot be present.
+ bool IsPostRA;
+
+ /// UnitLatencies (misnamed) flag avoids computing def-use latencies, using
+ /// the def-side latency only.
+ bool UnitLatencies;
+
+ /// State specific to the current scheduling region.
+ /// ------------------------------------------------
+
+ /// The block in which to insert instructions
+ MachineBasicBlock *BB;
+
+ /// The beginning of the range to be scheduled.
+ MachineBasicBlock::iterator RegionBegin;
+
+ /// The end of the range to be scheduled.
+ MachineBasicBlock::iterator RegionEnd;
+
+ /// The index in BB of RegionEnd.
+ unsigned EndIndex;
+
+ /// After calling BuildSchedGraph, each machine instruction in the current
+ /// scheduling region is mapped to an SUnit.
+ DenseMap<MachineInstr*, SUnit*> MISUnitMap;
+
+ /// State internal to DAG building.
+ /// -------------------------------
+
+ /// Defs, Uses - Remember where defs and uses of each register are as we
+ /// iterate upward through the instructions. This is allocated here instead
+ /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+ /// destructed for each block.
+ Reg2SUnitsMap Defs;
+ Reg2SUnitsMap Uses;
+
+ /// Track the last instructon in this region defining each virtual register.
+ VReg2SUnitMap VRegDefs;
+
+ /// PendingLoads - Remember where unknown loads are after the most recent
+ /// unknown store, as we iterate. As with Defs and Uses, this is here
+ /// to minimize construction/destruction.
+ std::vector<SUnit *> PendingLoads;
+
+ /// LoopRegs - Track which registers are used for loop-carried dependencies.
+ ///
+ LoopDependencies LoopRegs;
+
+ /// DbgValues - Remember instruction that preceeds DBG_VALUE.
+ /// These are generated by buildSchedGraph but persist so they can be
+ /// referenced when emitting the final schedule.
+ typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
+ DbgValueVector;
+ DbgValueVector DbgValues;
+ MachineInstr *FirstDbgValue;
+
+ public:
+ explicit ScheduleDAGInstrs(MachineFunction &mf,
+ const MachineLoopInfo &mli,
+ const MachineDominatorTree &mdt,
+ bool IsPostRAFlag,
+ LiveIntervals *LIS = 0);
+
+ virtual ~ScheduleDAGInstrs() {}
+
+ /// begin - Return an iterator to the top of the current scheduling region.
+ MachineBasicBlock::iterator begin() const { return RegionBegin; }
+
+ /// end - Return an iterator to the bottom of the current scheduling region.
+ MachineBasicBlock::iterator end() const { return RegionEnd; }
+
+ /// newSUnit - Creates a new SUnit and return a ptr to it.
+ SUnit *newSUnit(MachineInstr *MI);
+
+ /// getSUnit - Return an existing SUnit for this MI, or NULL.
+ SUnit *getSUnit(MachineInstr *MI) const;
+
+ /// startBlock - Prepare to perform scheduling in the given block.
+ virtual void startBlock(MachineBasicBlock *BB);
+
+ /// finishBlock - Clean up after scheduling in the given block.
+ virtual void finishBlock();
+
+ /// Initialize the scheduler state for the next scheduling region.
+ virtual void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount);
+
+ /// Notify that the scheduler has finished scheduling the current region.
+ virtual void exitRegion();
+
+ /// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
+ /// input.
+ void buildSchedGraph(AliasAnalysis *AA);
+
+ /// addSchedBarrierDeps - Add dependencies from instructions in the current
+ /// list of instructions being scheduled to scheduling barrier. We want to
+ /// make sure instructions which define registers that are either used by
+ /// the terminator or are live-out are properly scheduled. This is
+ /// especially important when the definition latency of the return value(s)
+ /// are too high to be hidden by the branch or when the liveout registers
+ /// used by instructions in the fallthrough block.
+ void addSchedBarrierDeps();
+
+ /// computeLatency - Compute node latency.
+ ///
+ virtual void computeLatency(SUnit *SU);
+
+ /// computeOperandLatency - Override dependence edge latency using
+ /// operand use/def information
+ ///
+ virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
+ SDep& dep) const;
+
+ /// schedule - Order nodes according to selected style, filling
+ /// in the Sequence member.
+ ///
+ /// Typically, a scheduling algorithm will implement schedule() without
+ /// overriding enterRegion() or exitRegion().
+ virtual void schedule() = 0;
+
+ /// finalizeSchedule - Allow targets to perform final scheduling actions at
+ /// the level of the whole MachineFunction. By default does nothing.
+ virtual void finalizeSchedule() {}
+
+ virtual void dumpNode(const SUnit *SU) const;
+
+ /// Return a label for a DAG node that points to an instruction.
+ virtual std::string getGraphNodeLabel(const SUnit *SU) const;
+
+ /// Return a label for the region of code covered by the DAG.
+ virtual std::string getDAGName() const;
+
+ protected:
+ void initSUnits();
+ void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO);
+ void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
+ void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
+ void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
+
+ VReg2SUnitMap::iterator findVRegDef(unsigned VirtReg) {
+ return VRegDefs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
+ }
+ };
+
+ /// newSUnit - Creates a new SUnit and return a ptr to it.
+ inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
+#ifndef NDEBUG
+ const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
+#endif
+ SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
+ assert((Addr == 0 || Addr == &SUnits[0]) &&
+ "SUnits std::vector reallocated on the fly!");
+ SUnits.back().OrigNode = &SUnits.back();
+ return &SUnits.back();
+ }
+
+ /// getSUnit - Return an existing SUnit for this MI, or NULL.
+ inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
+ DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
+ if (I == MISUnitMap.end())
+ return 0;
+ return I->second;
+ }
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
index 96573dd..a582b0c 100644
--- a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
+++ b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
@@ -42,7 +42,7 @@ public:
: MachinePassRegistryNode(N, D, (MachinePassCtor)C)
{ Registry.Add(this); }
~RegisterScheduler() { Registry.Remove(this); }
-
+
// Accessors.
//
@@ -68,11 +68,6 @@ public:
ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
-/// createTDRRListDAGScheduler - This creates a top down register usage
-/// reduction list scheduler.
-ScheduleDAGSDNodes *createTDRRListDAGScheduler(SelectionDAGISel *IS,
- CodeGenOpt::Level OptLevel);
-
/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
/// schedules nodes in source code order when possible.
ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
@@ -91,16 +86,17 @@ ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
/// to reduce register pressure.
ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level);
-/// createTDListDAGScheduler - This creates a top-down list scheduler with
-/// a hazard recognizer.
-ScheduleDAGSDNodes *createTDListDAGScheduler(SelectionDAGISel *IS,
- CodeGenOpt::Level OptLevel);
/// createFastDAGScheduler - This creates a "fast" scheduler.
///
ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
+/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
+/// DFA driven list scheduler with clustering heuristic to control
+/// register pressure.
+ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
/// createDefaultScheduler - This creates an instruction scheduler appropriate
/// for the target.
ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 132983c..6a7a87e 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -51,7 +51,7 @@ public:
static void noteHead(SDNode*, SDNode*) {}
static void deleteNode(SDNode *) {
- assert(0 && "ilist_traits<SDNode> shouldn't see a deleteNode call!");
+ llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
}
private:
static void createNode(const SDNode &);
@@ -112,9 +112,10 @@ public:
};
enum CombineLevel {
- Unrestricted, // Combine may create illegal operations and illegal types.
- NoIllegalTypes, // Combine may create illegal operations but no illegal types.
- NoIllegalOperations // Combine may only create legal operations and types.
+ BeforeLegalizeTypes,
+ AfterLegalizeTypes,
+ AfterLegalizeVectorOps,
+ AfterLegalizeDAG
};
class SelectionDAG;
@@ -138,6 +139,7 @@ class SelectionDAG {
const TargetSelectionDAGInfo &TSI;
MachineFunction *MF;
LLVMContext *Context;
+ CodeGenOpt::Level OptLevel;
/// EntryNode - The starting token.
SDNode EntryNode;
@@ -186,7 +188,7 @@ class SelectionDAG {
SelectionDAG(const SelectionDAG&); // Do not implement.
public:
- explicit SelectionDAG(const TargetMachine &TM);
+ explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level);
~SelectionDAG();
/// init - Prepare this SelectionDAG to process code in the given
@@ -392,6 +394,7 @@ public:
unsigned char TargetFlags = 0);
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
+ SDValue getRegisterMask(const uint32_t *RegMask);
SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label);
SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
bool isTarget = false, unsigned char TargetFlags = 0);
@@ -650,8 +653,8 @@ public:
///
SDValue getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
- bool isNonTemporal, unsigned Alignment,
- const MDNode *TBAAInfo = 0);
+ bool isNonTemporal, bool isInvariant, unsigned Alignment,
+ const MDNode *TBAAInfo = 0, const MDNode *Ranges = 0);
SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
EVT MemVT, bool isVolatile,
@@ -663,8 +666,9 @@ public:
EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
- bool isVolatile, bool isNonTemporal, unsigned Alignment,
- const MDNode *TBAAInfo = 0);
+ bool isVolatile, bool isNonTemporal, bool isInvariant,
+ unsigned Alignment, const MDNode *TBAAInfo = 0,
+ const MDNode *Ranges = 0);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
@@ -976,8 +980,8 @@ public:
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
/// processing. Targets can implement the computeMaskedBitsForTargetNode
/// method in the TargetLowering class to allow target nodes to be understood.
- void ComputeMaskedBits(SDValue Op, const APInt &Mask, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth = 0) const;
+ void ComputeMaskedBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ unsigned Depth = 0) const;
/// ComputeNumSignBits - Return the number of times the sign bit of the
/// register is replicated into the other bits. We know that at least 1 bit
@@ -1033,6 +1037,7 @@ private:
void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, const SDValue *Ops, unsigned NumOps,
void *&InsertPos);
+ SDNode *UpdadeDebugLocOnMergedSDNode(SDNode *N, DebugLoc loc);
void DeleteNodeNotInCSEMaps(SDNode *N);
void DeallocateNode(SDNode *N);
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index ecf3947..ee3f231 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -29,6 +29,7 @@ namespace llvm {
class MachineFunction;
class MachineInstr;
class TargetLowering;
+ class TargetLibraryInfo;
class TargetInstrInfo;
class FunctionLoweringInfo;
class ScheduleHazardRecognizer;
@@ -42,6 +43,7 @@ class SelectionDAGISel : public MachineFunctionPass {
public:
const TargetMachine &TM;
const TargetLowering &TLI;
+ const TargetLibraryInfo *LibInfo;
FunctionLoweringInfo *FuncInfo;
MachineFunction *MF;
MachineRegisterInfo *RegInfo;
@@ -92,7 +94,7 @@ public:
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
- /// FIXME: This is a static member function because the MSP430/SystemZ/X86
+ /// FIXME: This is a static member function because the MSP430/X86
/// targets, which uses it during isel. This could become a proper member.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
CodeGenOpt::Level OptLevel,
@@ -179,6 +181,7 @@ protected:
/// ISelUpdater - helper class to handle updates of the
/// instruction selection graph.
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
+ virtual void anchor();
SelectionDAG::allnodes_iterator &ISelPosition;
public:
explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp)
@@ -237,8 +240,7 @@ public:
/// succeeds or false if it fails. The number is a private implementation
/// detail to the code tblgen produces.
virtual bool CheckPatternPredicate(unsigned PredNo) const {
- assert(0 && "Tblgen should generate the implementation of this!");
- return 0;
+ llvm_unreachable("Tblgen should generate the implementation of this!");
}
/// CheckNodePredicate - This function is generated by tblgen in the target.
@@ -246,20 +248,17 @@ public:
/// false if it fails. The number is a private implementation
/// detail to the code tblgen produces.
virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
- assert(0 && "Tblgen should generate the implementation of this!");
- return 0;
+ llvm_unreachable("Tblgen should generate the implementation of this!");
}
virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
unsigned PatternNo,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
- assert(0 && "Tblgen should generate the implementation of this!");
- return false;
+ llvm_unreachable("Tblgen should generate the implementation of this!");
}
virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
- assert(0 && "Tblgen should generate this!");
- return SDValue();
+ llvm_unreachable("Tblgen should generate this!");
}
SDNode *SelectCodeCommon(SDNode *NodeToMatch,
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 6c7be69..f8248b8 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -917,12 +917,13 @@ public:
// with MachineMemOperand information.
bool isVolatile() const { return (SubclassData >> 5) & 1; }
bool isNonTemporal() const { return (SubclassData >> 6) & 1; }
+ bool isInvariant() const { return (SubclassData >> 7) & 1; }
AtomicOrdering getOrdering() const {
- return AtomicOrdering((SubclassData >> 7) & 15);
+ return AtomicOrdering((SubclassData >> 8) & 15);
}
SynchronizationScope getSynchScope() const {
- return SynchronizationScope((SubclassData >> 11) & 1);
+ return SynchronizationScope((SubclassData >> 12) & 1);
}
/// Returns the SrcValue and offset that describes the location of the access
@@ -932,6 +933,9 @@ public:
/// Returns the TBAAInfo that describes the dereference.
const MDNode *getTBAAInfo() const { return MMO->getTBAAInfo(); }
+ /// Returns the Ranges that describes the dereference.
+ const MDNode *getRanges() const { return MMO->getRanges(); }
+
/// getMemoryVT - Return the type of the in-memory value.
EVT getMemoryVT() const { return MemoryVT; }
@@ -993,8 +997,8 @@ class AtomicSDNode : public MemSDNode {
"Ordering may not require more than 4 bits!");
assert((SynchScope & 1) == SynchScope &&
"SynchScope may not require more than 1 bit!");
- SubclassData |= Ordering << 7;
- SubclassData |= SynchScope << 11;
+ SubclassData |= Ordering << 8;
+ SubclassData |= SynchScope << 12;
assert(getOrdering() == Ordering && "Ordering encoding error!");
assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
@@ -1113,11 +1117,9 @@ protected:
}
public:
- void getMask(SmallVectorImpl<int> &M) const {
+ ArrayRef<int> getMask() const {
EVT VT = getValueType(0);
- M.clear();
- for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
- M.push_back(Mask[i]);
+ return makeArrayRef(Mask, VT.getVectorNumElements());
}
int getMaskElt(unsigned Idx) const {
assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
@@ -1434,6 +1436,23 @@ public:
}
};
+class RegisterMaskSDNode : public SDNode {
+ // The memory for RegMask is not owned by the node.
+ const uint32_t *RegMask;
+ friend class SelectionDAG;
+ RegisterMaskSDNode(const uint32_t *mask)
+ : SDNode(ISD::RegisterMask, DebugLoc(), getSDVTList(MVT::Untyped)),
+ RegMask(mask) {}
+public:
+
+ const uint32_t *getRegMask() const { return RegMask; }
+
+ static bool classof(const RegisterMaskSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::RegisterMask;
+ }
+};
+
class BlockAddressSDNode : public SDNode {
const BlockAddress *BA;
unsigned char TargetFlags;
@@ -1684,6 +1703,8 @@ public:
/// setMemRefs - Assign this MachineSDNodes's memory reference descriptor
/// list. This does not transfer ownership.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+ for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
+ assert(*MMI && "Null mem ref detected!");
MemRefs = NewMemRefs;
MemRefsEnd = NewMemRefsEnd;
}
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
index 2d98864..d868cb8 100644
--- a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -19,7 +19,7 @@
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
#define LLVM_CODEGEN_SLOTINDEXES_H
-#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -83,7 +83,29 @@ namespace llvm {
friend class SlotIndexes;
friend struct DenseMapInfo<SlotIndex>;
- enum Slot { LOAD, USE, DEF, STORE, NUM };
+ enum Slot {
+ /// Basic block boundary. Used for live ranges entering and leaving a
+ /// block without being live in the layout neighbor. Also used as the
+ /// def slot of PHI-defs.
+ Slot_Block,
+
+ /// Early-clobber register use/def slot. A live range defined at
+ /// Slot_EarlyCLobber interferes with normal live ranges killed at
+ /// Slot_Register. Also used as the kill slot for live ranges tied to an
+ /// early-clobber def.
+ Slot_EarlyClobber,
+
+ /// Normal register use/def slot. Normal instructions kill and define
+ /// register live ranges at this slot.
+ Slot_Register,
+
+ /// Dead def kill point. Kill slot for a live range that is defined by
+ /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
+ /// used anywhere.
+ Slot_Dead,
+
+ Slot_Count
+ };
PointerIntPair<IndexListEntry*, 2, unsigned> lie;
@@ -113,7 +135,7 @@ namespace llvm {
enum {
/// The default distance between instructions as returned by distance().
/// This may vary as instructions are inserted and removed.
- InstrDist = 4*NUM
+ InstrDist = 4 * Slot_Count
};
static inline SlotIndex getEmptyKey() {
@@ -186,69 +208,55 @@ namespace llvm {
return A.lie.getPointer() == B.lie.getPointer();
}
+ /// isEarlierInstr - Return true if A refers to an instruction earlier than
+ /// B. This is equivalent to A < B && !isSameInstr(A, B).
+ static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
+ return A.entry().getIndex() < B.entry().getIndex();
+ }
+
/// Return the distance from this index to the given one.
int distance(SlotIndex other) const {
return other.getIndex() - getIndex();
}
- /// isLoad - Return true if this is a LOAD slot.
- bool isLoad() const {
- return getSlot() == LOAD;
- }
+ /// isBlock - Returns true if this is a block boundary slot.
+ bool isBlock() const { return getSlot() == Slot_Block; }
- /// isDef - Return true if this is a DEF slot.
- bool isDef() const {
- return getSlot() == DEF;
- }
+ /// isEarlyClobber - Returns true if this is an early-clobber slot.
+ bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
- /// isUse - Return true if this is a USE slot.
- bool isUse() const {
- return getSlot() == USE;
- }
+ /// isRegister - Returns true if this is a normal register use/def slot.
+ /// Note that early-clobber slots may also be used for uses and defs.
+ bool isRegister() const { return getSlot() == Slot_Register; }
- /// isStore - Return true if this is a STORE slot.
- bool isStore() const {
- return getSlot() == STORE;
- }
+ /// isDead - Returns true if this is a dead def kill slot.
+ bool isDead() const { return getSlot() == Slot_Dead; }
/// Returns the base index for associated with this index. The base index
- /// is the one associated with the LOAD slot for the instruction pointed to
- /// by this index.
+ /// is the one associated with the Slot_Block slot for the instruction
+ /// pointed to by this index.
SlotIndex getBaseIndex() const {
- return getLoadIndex();
+ return SlotIndex(&entry(), Slot_Block);
}
/// Returns the boundary index for associated with this index. The boundary
- /// index is the one associated with the LOAD slot for the instruction
+ /// index is the one associated with the Slot_Block slot for the instruction
/// pointed to by this index.
SlotIndex getBoundaryIndex() const {
- return getStoreIndex();
+ return SlotIndex(&entry(), Slot_Dead);
}
- /// Returns the index of the LOAD slot for the instruction pointed to by
- /// this index.
- SlotIndex getLoadIndex() const {
- return SlotIndex(&entry(), SlotIndex::LOAD);
- }
-
- /// Returns the index of the USE slot for the instruction pointed to by
- /// this index.
- SlotIndex getUseIndex() const {
- return SlotIndex(&entry(), SlotIndex::USE);
+ /// Returns the register use/def slot in the current instruction for a
+ /// normal or early-clobber def.
+ SlotIndex getRegSlot(bool EC = false) const {
+ return SlotIndex(&entry(), EC ? Slot_EarlyClobber : Slot_Register);
}
- /// Returns the index of the DEF slot for the instruction pointed to by
- /// this index.
- SlotIndex getDefIndex() const {
- return SlotIndex(&entry(), SlotIndex::DEF);
+ /// Returns the dead def kill slot for the current instruction.
+ SlotIndex getDeadSlot() const {
+ return SlotIndex(&entry(), Slot_Dead);
}
- /// Returns the index of the STORE slot for the instruction pointed to by
- /// this index.
- SlotIndex getStoreIndex() const {
- return SlotIndex(&entry(), SlotIndex::STORE);
- }
-
/// Returns the next slot in the index list. This could be either the
/// next slot for the instruction pointed to by this index or, if this
/// index is a STORE, the first slot for the next instruction.
@@ -257,8 +265,8 @@ namespace llvm {
/// use one of those methods.
SlotIndex getNextSlot() const {
Slot s = getSlot();
- if (s == SlotIndex::STORE) {
- return SlotIndex(entry().getNext(), SlotIndex::LOAD);
+ if (s == Slot_Dead) {
+ return SlotIndex(entry().getNext(), Slot_Block);
}
return SlotIndex(&entry(), s + 1);
}
@@ -271,14 +279,14 @@ namespace llvm {
/// Returns the previous slot in the index list. This could be either the
/// previous slot for the instruction pointed to by this index or, if this
- /// index is a LOAD, the last slot for the previous instruction.
+ /// index is a Slot_Block, the last slot for the previous instruction.
/// WARNING: This method is considerably more expensive than the methods
/// that return specific slots (getUseIndex(), etc). If you can - please
/// use one of those methods.
SlotIndex getPrevSlot() const {
Slot s = getSlot();
- if (s == SlotIndex::LOAD) {
- return SlotIndex(entry().getPrev(), SlotIndex::STORE);
+ if (s == Slot_Block) {
+ return SlotIndex(entry().getPrev(), Slot_Dead);
}
return SlotIndex(&entry(), s - 1);
}
@@ -464,11 +472,6 @@ namespace llvm {
return SlotIndex(back(), 0);
}
- /// Returns the invalid index marker for this analysis.
- SlotIndex getInvalidIndex() {
- return getZeroIndex();
- }
-
/// Returns the distance between the highest and lowest indexes allocated
/// so far.
unsigned getIndexesLength() const {
@@ -486,12 +489,13 @@ namespace llvm {
/// Returns true if the given machine instr is mapped to an index,
/// otherwise returns false.
bool hasIndex(const MachineInstr *instr) const {
- return (mi2iMap.find(instr) != mi2iMap.end());
+ return mi2iMap.count(instr);
}
/// Returns the base index for the given instruction.
- SlotIndex getInstructionIndex(const MachineInstr *instr) const {
- Mi2IndexMap::const_iterator itr = mi2iMap.find(instr);
+ SlotIndex getInstructionIndex(const MachineInstr *MI) const {
+ // Instructions inside a bundle have the same number as the bundle itself.
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(getBundleStart(MI));
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
return itr->second;
}
@@ -645,6 +649,8 @@ namespace llvm {
/// instructions, create the new index after the null indexes instead of
/// before them.
SlotIndex insertMachineInstrInMaps(MachineInstr *mi, bool Late = false) {
+ assert(!mi->isInsideBundle() &&
+ "Instructions inside bundles should use bundle start's slot.");
assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed.");
// Numbering DBG_VALUE instructions could cause code generation to be
// affected by debug information.
@@ -677,7 +683,7 @@ namespace llvm {
if (dist == 0)
renumberIndexes(newEntry);
- SlotIndex newIndex(newEntry, SlotIndex::LOAD);
+ SlotIndex newIndex(newEntry, SlotIndex::Slot_Block);
mi2iMap.insert(std::make_pair(mi, newIndex));
return newIndex;
}
@@ -728,8 +734,8 @@ namespace llvm {
insert(nextEntry, startEntry);
insert(nextEntry, stopEntry);
- SlotIndex startIdx(startEntry, SlotIndex::LOAD);
- SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
+ SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
+ SlotIndex endIdx(nextEntry, SlotIndex::Slot_Block);
assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
"Blocks must be added in order");
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index ca40ccf..5a42136 100644
--- a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -15,9 +15,9 @@
#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
class MachineModuleInfo;
@@ -65,6 +65,11 @@ public:
virtual MCSymbol *
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const;
+
+ virtual const MCSection *
+ getStaticCtorSection(unsigned Priority = 65535) const;
+ virtual const MCSection *
+ getStaticDtorSection(unsigned Priority = 65535) const;
};
@@ -73,6 +78,12 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
public:
virtual ~TargetLoweringObjectFileMachO() {}
+ /// emitModuleFlags - Emit the module flags that specify the garbage
+ /// collection information.
+ virtual void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
+ Mangler *Mang, const TargetMachine &TM) const;
+
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
index cae0bcb..76c2357 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -16,10 +16,11 @@
#ifndef LLVM_CODEGEN_VALUETYPES_H
#define LLVM_CODEGEN_VALUETYPES_H
-#include <cassert>
-#include <string>
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <string>
namespace llvm {
class Type;
@@ -45,49 +46,56 @@ namespace llvm {
FIRST_INTEGER_VALUETYPE = i1,
LAST_INTEGER_VALUETYPE = i128,
- f32 = 7, // This is a 32 bit floating point value
- f64 = 8, // This is a 64 bit floating point value
- f80 = 9, // This is a 80 bit floating point value
- f128 = 10, // This is a 128 bit floating point value
- ppcf128 = 11, // This is a PPC 128-bit floating point value
-
- v2i8 = 12, // 2 x i8
- v4i8 = 13, // 4 x i8
- v8i8 = 14, // 8 x i8
- v16i8 = 15, // 16 x i8
- v32i8 = 16, // 32 x i8
- v2i16 = 17, // 2 x i16
- v4i16 = 18, // 4 x i16
- v8i16 = 19, // 8 x i16
- v16i16 = 20, // 16 x i16
- v2i32 = 21, // 2 x i32
- v4i32 = 22, // 4 x i32
- v8i32 = 23, // 8 x i32
- v1i64 = 24, // 1 x i64
- v2i64 = 25, // 2 x i64
- v4i64 = 26, // 4 x i64
- v8i64 = 27, // 8 x i64
-
- v2f32 = 28, // 2 x f32
- v4f32 = 29, // 4 x f32
- v8f32 = 30, // 8 x f32
- v2f64 = 31, // 2 x f64
- v4f64 = 32, // 4 x f64
+ f16 = 7, // This is a 16 bit floating point value
+ f32 = 8, // This is a 32 bit floating point value
+ f64 = 9, // This is a 64 bit floating point value
+ f80 = 10, // This is a 80 bit floating point value
+ f128 = 11, // This is a 128 bit floating point value
+ ppcf128 = 12, // This is a PPC 128-bit floating point value
+
+ FIRST_FP_VALUETYPE = f16,
+ LAST_FP_VALUETYPE = ppcf128,
+
+ v2i8 = 13, // 2 x i8
+ v4i8 = 14, // 4 x i8
+ v8i8 = 15, // 8 x i8
+ v16i8 = 16, // 16 x i8
+ v32i8 = 17, // 32 x i8
+ v2i16 = 18, // 2 x i16
+ v4i16 = 19, // 4 x i16
+ v8i16 = 20, // 8 x i16
+ v16i16 = 21, // 16 x i16
+ v2i32 = 22, // 2 x i32
+ v4i32 = 23, // 4 x i32
+ v8i32 = 24, // 8 x i32
+ v1i64 = 25, // 1 x i64
+ v2i64 = 26, // 2 x i64
+ v4i64 = 27, // 4 x i64
+ v8i64 = 28, // 8 x i64
+
+ v2f16 = 29, // 2 x f16
+ v2f32 = 30, // 2 x f32
+ v4f32 = 31, // 4 x f32
+ v8f32 = 32, // 8 x f32
+ v2f64 = 33, // 2 x f64
+ v4f64 = 34, // 4 x f64
FIRST_VECTOR_VALUETYPE = v2i8,
LAST_VECTOR_VALUETYPE = v4f64,
+ FIRST_FP_VECTOR_VALUETYPE = v2f16,
+ LAST_FP_VECTOR_VALUETYPE = v4f64,
- x86mmx = 33, // This is an X86 MMX value
+ x86mmx = 35, // This is an X86 MMX value
- Glue = 34, // This glues nodes together during pre-RA sched
+ Glue = 36, // This glues nodes together during pre-RA sched
- isVoid = 35, // This has no value
+ isVoid = 37, // This has no value
- untyped = 36, // This value takes a register, but has
+ Untyped = 38, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
- LAST_VALUETYPE = 37, // This always remains at the end of the list.
+ LAST_VALUETYPE = 39, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@@ -143,8 +151,10 @@ namespace llvm {
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
- return ((SimpleTy >= MVT::f32 && SimpleTy <= MVT::ppcf128) ||
- (SimpleTy >= MVT::v2f32 && SimpleTy <= MVT::v4f64));
+ return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
+ (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
}
/// isInteger - Return true if this is an integer, or a vector integer type.
@@ -203,6 +213,7 @@ namespace llvm {
case v2i64:
case v4i64:
case v8i64: return i64;
+ case v2f16: return f16;
case v2f32:
case v4f32:
case v8f32: return f32;
@@ -233,6 +244,7 @@ namespace llvm {
case v2i16:
case v2i32:
case v2i64:
+ case v2f16:
case v2f32:
case v2f64: return 2;
case v1i64: return 1;
@@ -242,21 +254,23 @@ namespace llvm {
unsigned getSizeInBits() const {
switch (SimpleTy) {
case iPTR:
- assert(0 && "Value type size is target-dependent. Ask TLI.");
+ llvm_unreachable("Value type size is target-dependent. Ask TLI.");
case iPTRAny:
case iAny:
case fAny:
- assert(0 && "Value type is overloaded.");
+ llvm_unreachable("Value type is overloaded.");
default:
- assert(0 && "getSizeInBits called on extended MVT.");
+ llvm_unreachable("getSizeInBits called on extended MVT.");
case i1 : return 1;
case i8 : return 8;
case i16 :
+ case f16:
case v2i8: return 16;
case f32 :
case i32 :
case v4i8:
- case v2i16: return 32;
+ case v2i16:
+ case v2f16: return 32;
case x86mmx:
case f64 :
case i64 :
@@ -300,7 +314,9 @@ namespace llvm {
static MVT getFloatingPointVT(unsigned BitWidth) {
switch (BitWidth) {
default:
- assert(false && "Bad bit width!");
+ llvm_unreachable("Bad bit width!");
+ case 16:
+ return MVT::f16;
case 32:
return MVT::f32;
case 64:
@@ -359,6 +375,9 @@ namespace llvm {
if (NumElements == 4) return MVT::v4i64;
if (NumElements == 8) return MVT::v8i64;
break;
+ case MVT::f16:
+ if (NumElements == 2) return MVT::v2f16;
+ break;
case MVT::f32:
if (NumElements == 2) return MVT::v2f32;
if (NumElements == 4) return MVT::v4f32;
@@ -424,20 +443,6 @@ namespace llvm {
return getExtendedVectorVT(Context, VT, NumElements);
}
- /// getIntVectorWithNumElements - Return any integer vector type that has
- /// the specified number of elements.
- static EVT getIntVectorWithNumElements(LLVMContext &C, unsigned NumElts) {
- switch (NumElts) {
- default: return getVectorVT(C, MVT::i8, NumElts);
- case 1: return MVT::v1i64;
- case 2: return MVT::v2i32;
- case 4: return MVT::v4i16;
- case 8: return MVT::v8i8;
- case 16: return MVT::v16i8;
- }
- return MVT::INVALID_SIMPLE_VALUE_TYPE;
- }
-
/// changeVectorElementTypeToInteger - Return a vector with the same number
/// of elements as this vector, but with the element type converted to an
/// integer type with the same bitwidth.
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
index 0cfb634..6c22690 100644
--- a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -26,39 +26,41 @@ def i16 : ValueType<16 , 3>; // 16-bit integer value
def i32 : ValueType<32 , 4>; // 32-bit integer value
def i64 : ValueType<64 , 5>; // 64-bit integer value
def i128 : ValueType<128, 6>; // 128-bit integer value
-def f32 : ValueType<32 , 7>; // 32-bit floating point value
-def f64 : ValueType<64 , 8>; // 64-bit floating point value
-def f80 : ValueType<80 , 9>; // 80-bit floating point value
-def f128 : ValueType<128, 10>; // 128-bit floating point value
-def ppcf128: ValueType<128, 11>; // PPC 128-bit floating point value
+def f16 : ValueType<16 , 7>; // 32-bit floating point value
+def f32 : ValueType<32 , 8>; // 32-bit floating point value
+def f64 : ValueType<64 , 9>; // 64-bit floating point value
+def f80 : ValueType<80 , 10>; // 80-bit floating point value
+def f128 : ValueType<128, 11>; // 128-bit floating point value
+def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value
-def v2i8 : ValueType<16 , 12>; // 2 x i8 vector value
-def v4i8 : ValueType<32 , 13>; // 4 x i8 vector value
-def v8i8 : ValueType<64 , 14>; // 8 x i8 vector value
-def v16i8 : ValueType<128, 15>; // 16 x i8 vector value
-def v32i8 : ValueType<256, 16>; // 32 x i8 vector value
-def v2i16 : ValueType<32 , 17>; // 2 x i16 vector value
-def v4i16 : ValueType<64 , 18>; // 4 x i16 vector value
-def v8i16 : ValueType<128, 19>; // 8 x i16 vector value
-def v16i16 : ValueType<256, 20>; // 16 x i16 vector value
-def v2i32 : ValueType<64 , 21>; // 2 x i32 vector value
-def v4i32 : ValueType<128, 22>; // 4 x i32 vector value
-def v8i32 : ValueType<256, 23>; // 8 x i32 vector value
-def v1i64 : ValueType<64 , 24>; // 1 x i64 vector value
-def v2i64 : ValueType<128, 25>; // 2 x i64 vector value
-def v4i64 : ValueType<256, 26>; // 4 x i64 vector value
-def v8i64 : ValueType<512, 27>; // 8 x i64 vector value
+def v2i8 : ValueType<16 , 13>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 14>; // 4 x i8 vector value
+def v8i8 : ValueType<64 , 15>; // 8 x i8 vector value
+def v16i8 : ValueType<128, 16>; // 16 x i8 vector value
+def v32i8 : ValueType<256, 17>; // 32 x i8 vector value
+def v2i16 : ValueType<32 , 18>; // 2 x i16 vector value
+def v4i16 : ValueType<64 , 19>; // 4 x i16 vector value
+def v8i16 : ValueType<128, 20>; // 8 x i16 vector value
+def v16i16 : ValueType<256, 21>; // 16 x i16 vector value
+def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value
+def v4i32 : ValueType<128, 23>; // 4 x i32 vector value
+def v8i32 : ValueType<256, 24>; // 8 x i32 vector value
+def v1i64 : ValueType<64 , 25>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 26>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 27>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 28>; // 8 x i64 vector value
-def v2f32 : ValueType<64 , 28>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 29>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 30>; // 8 x f32 vector value
-def v2f64 : ValueType<128, 31>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 32>; // 4 x f64 vector value
+def v2f16 : ValueType<32 , 29>; // 2 x f16 vector value
+def v2f32 : ValueType<64 , 30>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 31>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 32>; // 8 x f32 vector value
+def v2f64 : ValueType<128, 33>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 34>; // 4 x f64 vector value
-def x86mmx : ValueType<64 , 33>; // X86 MMX value
-def FlagVT : ValueType<0 , 34>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 35>; // Produces no value
-def untyped: ValueType<8 , 36>; // Produces an untyped value
+def x86mmx : ValueType<64 , 35>; // X86 MMX value
+def FlagVT : ValueType<0 , 36>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 37>; // Produces no value
+def untyped: ValueType<8 , 38>; // Produces an untyped value
def MetadataVT: ValueType<0, 250>; // Metadata
diff --git a/contrib/llvm/include/llvm/Constant.h b/contrib/llvm/include/llvm/Constant.h
index ecc1fe7..13acdc6 100644
--- a/contrib/llvm/include/llvm/Constant.h
+++ b/contrib/llvm/include/llvm/Constant.h
@@ -41,6 +41,7 @@ namespace llvm {
class Constant : public User {
void operator=(const Constant &); // Do not implement
Constant(const Constant &); // Do not implement
+ virtual void anchor();
protected:
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
@@ -90,12 +91,13 @@ public:
/// FIXME: This really should not be in VMCore.
PossibleRelocationsTy getRelocationInfo() const;
- /// getVectorElements - This method, which is only valid on constant of vector
- /// type, returns the elements of the vector in the specified smallvector.
- /// This handles breaking down a vector undef into undef elements, etc. For
- /// constant exprs and other cases we can't handle, we return an empty vector.
- void getVectorElements(SmallVectorImpl<Constant*> &Elts) const;
-
+ /// getAggregateElement - For aggregates (struct/array/vector) return the
+ /// constant that corresponds to the specified element if possible, or null if
+ /// not. This can return null if the element index is a ConstantExpr, or if
+ /// 'this' is a constant expr.
+ Constant *getAggregateElement(unsigned Elt) const;
+ Constant *getAggregateElement(Constant *Elt) const;
+
/// destroyConstant - Called if some element of this constant is no longer
/// valid. At this point only other constants may be on the use_list for this
/// constant. Any constants on our Use list must also be destroy'd. The
@@ -103,7 +105,7 @@ public:
/// available cached constants. Implementations should call
/// destroyConstantImpl as the last thing they do, to destroy all users and
/// delete this.
- virtual void destroyConstant() { assert(0 && "Not reached!"); }
+ virtual void destroyConstant() { llvm_unreachable("Not reached!"); }
//// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Constant *) { return true; }
@@ -129,11 +131,12 @@ public:
// to be here to avoid link errors.
assert(getNumOperands() == 0 && "replaceUsesOfWithOnConstant must be "
"implemented for all constants that have operands!");
- assert(0 && "Constants that do not have operands cannot be using 'From'!");
+ llvm_unreachable("Constants that do not have operands cannot be using "
+ "'From'!");
}
-
+
static Constant *getNullValue(Type* Ty);
-
+
/// @returns the value for an integer constant of the given type that has all
/// its bits set to true.
/// @brief Get the all ones value
diff --git a/contrib/llvm/include/llvm/Constants.h b/contrib/llvm/include/llvm/Constants.h
index 6545a3f..0abe17d 100644
--- a/contrib/llvm/include/llvm/Constants.h
+++ b/contrib/llvm/include/llvm/Constants.h
@@ -34,10 +34,13 @@ class IntegerType;
class StructType;
class PointerType;
class VectorType;
+class SequentialType;
template<class ConstantClass, class TypeClass, class ValType>
struct ConstantCreator;
template<class ConstantClass, class TypeClass>
+struct ConstantArrayCreator;
+template<class ConstantClass, class TypeClass>
struct ConvertConstantType;
//===----------------------------------------------------------------------===//
@@ -45,6 +48,7 @@ struct ConvertConstantType;
/// represents both boolean and integral constants.
/// @brief Class for constant integers.
class ConstantInt : public Constant {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT
ConstantInt(IntegerType *Ty, const APInt& V);
@@ -229,6 +233,7 @@ public:
///
class ConstantFP : public Constant {
APFloat Val;
+ virtual void anchor();
void *operator new(size_t, unsigned);// DO NOT IMPLEMENT
ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT
friend class LLVMContextImpl;
@@ -296,7 +301,6 @@ public:
/// ConstantAggregateZero - All zero aggregate value
///
class ConstantAggregateZero : public Constant {
- friend struct ConstantCreator<ConstantAggregateZero, Type, char>;
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT
protected:
@@ -308,10 +312,26 @@ protected:
return User::operator new(s, 0);
}
public:
- static ConstantAggregateZero* get(Type *Ty);
+ static ConstantAggregateZero *get(Type *Ty);
virtual void destroyConstant();
+ /// getSequentialElement - If this CAZ has array or vector type, return a zero
+ /// with the right element type.
+ Constant *getSequentialElement() const;
+
+ /// getStructElement - If this CAZ has struct type, return a zero with the
+ /// right element type for the specified element.
+ Constant *getStructElement(unsigned Elt) const;
+
+ /// getElementValue - Return a zero of the right value for the specified GEP
+ /// index.
+ Constant *getElementValue(Constant *C) const;
+
+ /// getElementValue - Return a zero of the right value for the specified GEP
+ /// index.
+ Constant *getElementValue(unsigned Idx) const;
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
///
static bool classof(const ConstantAggregateZero *) { return true; }
@@ -325,8 +345,7 @@ public:
/// ConstantArray - Constant Array Declarations
///
class ConstantArray : public Constant {
- friend struct ConstantCreator<ConstantArray, ArrayType,
- std::vector<Constant*> >;
+ friend struct ConstantArrayCreator<ConstantArray, ArrayType>;
ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT
protected:
ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);
@@ -334,15 +353,6 @@ public:
// ConstantArray accessors
static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
- /// This method constructs a ConstantArray and initializes it with a text
- /// string. The default behavior (AddNull==true) causes a null terminator to
- /// be placed at the end of the array. This effectively increases the length
- /// of the array by one (you've been warned). However, in some situations
- /// this is not desired so if AddNull==false then the string is copied without
- /// null termination.
- static Constant *get(LLVMContext &Context, StringRef Initializer,
- bool AddNull = true);
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
@@ -353,28 +363,6 @@ public:
return reinterpret_cast<ArrayType*>(Value::getType());
}
- /// isString - This method returns true if the array is an array of i8 and
- /// the elements of the array are all ConstantInt's.
- bool isString() const;
-
- /// isCString - This method returns true if the array is a string (see
- /// @verbatim
- /// isString) and it ends in a null byte \0 and does not contains any other
- /// @endverbatim
- /// null bytes except its terminator.
- bool isCString() const;
-
- /// getAsString - If this array is isString(), then this method converts the
- /// array to an std::string and returns it. Otherwise, it asserts out.
- ///
- std::string getAsString() const;
-
- /// getAsCString - If this array is isCString(), then this method converts the
- /// array (without the trailing null byte) to an std::string and returns it.
- /// Otherwise, it asserts out.
- ///
- std::string getAsCString() const;
-
virtual void destroyConstant();
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
@@ -396,8 +384,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantArray, Constant)
// ConstantStruct - Constant Struct Declarations
//
class ConstantStruct : public Constant {
- friend struct ConstantCreator<ConstantStruct, StructType,
- std::vector<Constant*> >;
+ friend struct ConstantArrayCreator<ConstantStruct, StructType>;
ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT
protected:
ConstantStruct(StructType *T, ArrayRef<Constant *> Val);
@@ -457,8 +444,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantStruct, Constant)
/// ConstantVector - Constant Vector Declarations
///
class ConstantVector : public Constant {
- friend struct ConstantCreator<ConstantVector, VectorType,
- std::vector<Constant*> >;
+ friend struct ConstantArrayCreator<ConstantVector, VectorType>;
ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT
protected:
ConstantVector(VectorType *T, ArrayRef<Constant *> Val);
@@ -466,6 +452,10 @@ public:
// ConstantVector accessors
static Constant *get(ArrayRef<Constant*> V);
+ /// getSplat - Return a ConstantVector with the specified constant in each
+ /// element.
+ static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
@@ -475,12 +465,6 @@ public:
inline VectorType *getType() const {
return reinterpret_cast<VectorType*>(Value::getType());
}
-
- /// This function will return true iff every element in this vector constant
- /// is set to all ones.
- /// @returns true iff this constant's emements are all set to all ones.
- /// @brief Determine if the value is all ones.
- bool isAllOnesValue() const;
/// getSplatValue - If this is a splat constant, meaning that all of the
/// elements have the same value, return that value. Otherwise return NULL.
@@ -507,7 +491,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantVector, Constant)
/// ConstantPointerNull - a constant pointer value that points to null
///
class ConstantPointerNull : public Constant {
- friend struct ConstantCreator<ConstantPointerNull, PointerType, char>;
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT
protected:
@@ -539,6 +522,240 @@ public:
return V->getValueID() == ConstantPointerNullVal;
}
};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataSequential - A vector or array constant whose element type is a
+/// simple 1/2/4/8-byte integer or float/double, and whose elements are just
+/// simple data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+///
+/// This is the common base class of ConstantDataArray and ConstantDataVector.
+///
+class ConstantDataSequential : public Constant {
+ friend class LLVMContextImpl;
+ /// DataElements - A pointer to the bytes underlying this constant (which is
+ /// owned by the uniquing StringMap).
+ const char *DataElements;
+
+ /// Next - This forms a link list of ConstantDataSequential nodes that have
+ /// the same value but different type. For example, 0,0,0,1 could be a 4
+ /// element array of i8, or a 1-element array of i32. They'll both end up in
+ /// the same StringMap bucket, linked up.
+ ConstantDataSequential *Next;
+ void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ ConstantDataSequential(const ConstantDataSequential &); // DO NOT IMPLEMENT
+protected:
+ explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
+ : Constant(ty, VT, 0, 0), DataElements(Data), Next(0) {}
+ ~ConstantDataSequential() { delete Next; }
+
+ static Constant *getImpl(StringRef Bytes, Type *Ty);
+
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// isElementTypeCompatible - Return true if a ConstantDataSequential can be
+ /// formed with a vector or array of the specified element type.
+ /// ConstantDataArray only works with normal float and int types that are
+ /// stored densely in memory, not with things like i42 or x86_f80.
+ static bool isElementTypeCompatible(const Type *Ty);
+
+ /// getElementAsInteger - If this is a sequential container of integers (of
+ /// any size), return the specified element in the low bits of a uint64_t.
+ uint64_t getElementAsInteger(unsigned i) const;
+
+ /// getElementAsAPFloat - If this is a sequential container of floating point
+ /// type, return the specified element as an APFloat.
+ APFloat getElementAsAPFloat(unsigned i) const;
+
+ /// getElementAsFloat - If this is an sequential container of floats, return
+ /// the specified element as a float.
+ float getElementAsFloat(unsigned i) const;
+
+ /// getElementAsDouble - If this is an sequential container of doubles, return
+ /// the specified element as a double.
+ double getElementAsDouble(unsigned i) const;
+
+ /// getElementAsConstant - Return a Constant for a specified index's element.
+ /// Note that this has to compute a new constant to return, so it isn't as
+ /// efficient as getElementAsInteger/Float/Double.
+ Constant *getElementAsConstant(unsigned i) const;
+
+ /// getType - Specialize the getType() method to always return a
+ /// SequentialType, which reduces the amount of casting needed in parts of the
+ /// compiler.
+ inline SequentialType *getType() const {
+ return reinterpret_cast<SequentialType*>(Value::getType());
+ }
+
+ /// getElementType - Return the element type of the array/vector.
+ Type *getElementType() const;
+
+ /// getNumElements - Return the number of elements in the array or vector.
+ unsigned getNumElements() const;
+
+ /// getElementByteSize - Return the size (in bytes) of each element in the
+ /// array/vector. The size of the elements is known to be a multiple of one
+ /// byte.
+ uint64_t getElementByteSize() const;
+
+
+ /// isString - This method returns true if this is an array of i8.
+ bool isString() const;
+
+ /// isCString - This method returns true if the array "isString", ends with a
+ /// nul byte, and does not contains any other nul bytes.
+ bool isCString() const;
+
+ /// getAsString - If this array is isString(), then this method returns the
+ /// array as a StringRef. Otherwise, it asserts out.
+ ///
+ StringRef getAsString() const {
+ assert(isString() && "Not a string");
+ return getRawDataValues();
+ }
+
+ /// getAsCString - If this array is isCString(), then this method returns the
+ /// array (without the trailing null byte) as a StringRef. Otherwise, it
+ /// asserts out.
+ ///
+ StringRef getAsCString() const {
+ assert(isCString() && "Isn't a C string");
+ StringRef Str = getAsString();
+ return Str.substr(0, Str.size()-1);
+ }
+
+ /// getRawDataValues - Return the raw, underlying, bytes of this data. Note
+ /// that this is an extremely tricky thing to work with, as it exposes the
+ /// host endianness of the data elements.
+ StringRef getRawDataValues() const;
+
+ virtual void destroyConstant();
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const ConstantDataSequential *) { return true; }
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataArrayVal ||
+ V->getValueID() == ConstantDataVectorVal;
+ }
+private:
+ const char *getElementPointer(unsigned Elt) const;
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataArray - An array constant whose element type is a simple
+/// 1/2/4/8-byte integer or float/double, and whose elements are just simple
+/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+class ConstantDataArray : public ConstantDataSequential {
+ void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ ConstantDataArray(const ConstantDataArray &); // DO NOT IMPLEMENT
+ virtual void anchor();
+ friend class ConstantDataSequential;
+ explicit ConstantDataArray(Type *ty, const char *Data)
+ : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// get() constructors - Return a constant with array type with an element
+ /// count and element type matching the ArrayRef passed in. Note that this
+ /// can return a ConstantAggregateZero object.
+ static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
+
+ /// getString - This method constructs a CDS and initializes it with a text
+ /// string. The default behavior (AddNull==true) causes a null terminator to
+ /// be placed at the end of the array (increasing the length of the string by
+ /// one more than the StringRef would normally indicate. Pass AddNull=false
+ /// to disable this behavior.
+ static Constant *getString(LLVMContext &Context, StringRef Initializer,
+ bool AddNull = true);
+
+ /// getType - Specialize the getType() method to always return an ArrayType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline ArrayType *getType() const {
+ return reinterpret_cast<ArrayType*>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const ConstantDataArray *) { return true; }
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataArrayVal;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataVector - A vector constant whose element type is a simple
+/// 1/2/4/8-byte integer or float/double, and whose elements are just simple
+/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+class ConstantDataVector : public ConstantDataSequential {
+ void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ ConstantDataVector(const ConstantDataVector &); // DO NOT IMPLEMENT
+ virtual void anchor();
+ friend class ConstantDataSequential;
+ explicit ConstantDataVector(Type *ty, const char *Data)
+ : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// get() constructors - Return a constant with vector type with an element
+ /// count and element type matching the ArrayRef passed in. Note that this
+ /// can return a ConstantAggregateZero object.
+ static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
+
+ /// getSplat - Return a ConstantVector with the specified constant in each
+ /// element. The specified constant has to be a of a compatible type (i8/i16/
+ /// i32/i64/float/double) and must be a ConstantFP or ConstantInt.
+ static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
+ /// getSplatValue - If this is a splat constant, meaning that all of the
+ /// elements have the same value, return that value. Otherwise return NULL.
+ Constant *getSplatValue() const;
+
+ /// getType - Specialize the getType() method to always return a VectorType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline VectorType *getType() const {
+ return reinterpret_cast<VectorType*>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const ConstantDataVector *) { return true; }
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataVectorVal;
+ }
+};
+
+
/// BlockAddress - The address of a basic block.
///
@@ -897,7 +1114,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)
/// LangRef.html#undefvalues for details.
///
class UndefValue : public Constant {
- friend struct ConstantCreator<UndefValue, Type, char>;
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
UndefValue(const UndefValue &); // DO NOT IMPLEMENT
protected:
@@ -913,6 +1129,22 @@ public:
///
static UndefValue *get(Type *T);
+ /// getSequentialElement - If this Undef has array or vector type, return a
+ /// undef with the right element type.
+ UndefValue *getSequentialElement() const;
+
+ /// getStructElement - If this undef has struct type, return a undef with the
+ /// right element type for the specified element.
+ UndefValue *getStructElement(unsigned Elt) const;
+
+ /// getElementValue - Return an undef of the right value for the specified GEP
+ /// index.
+ UndefValue *getElementValue(Constant *C) const;
+
+ /// getElementValue - Return an undef of the right value for the specified GEP
+ /// index.
+ UndefValue *getElementValue(unsigned Idx) const;
+
virtual void destroyConstant();
/// Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/contrib/llvm/include/llvm/DebugInfoProbe.h b/contrib/llvm/include/llvm/DebugInfoProbe.h
deleted file mode 100644
index 78d00df..0000000
--- a/contrib/llvm/include/llvm/DebugInfoProbe.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- DebugInfoProbe.h - DebugInfo Probe ----------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a probe, DebugInfoProbe, that can be used by pass
-// manager to analyze how optimizer is treating debugging information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_DEBUGINFOPROBE_H
-#define LLVM_TRANSFORMS_UTILS_DEBUGINFOPROBE_H
-
-#include "llvm/ADT/StringMap.h"
-
-namespace llvm {
- class Function;
- class Pass;
- class DebugInfoProbeImpl;
-
- /// DebugInfoProbe - This class provides a interface to monitor
- /// how an optimization pass is preserving debugging information.
- class DebugInfoProbe {
- public:
- DebugInfoProbe();
- ~DebugInfoProbe();
-
- /// initialize - Collect information before running an optimization pass.
- void initialize(StringRef PName, Function &F);
-
- /// finalize - Collect information after running an optimization pass. This
- /// must be used after initialization.
- void finalize(Function &F);
-
- /// report - Report findings. This should be invoked after finalize.
- void report();
-
- private:
- DebugInfoProbeImpl *pImpl;
- };
-
- /// DebugInfoProbeInfo - This class provides an interface that a pass manager
- /// can use to manage debug info probes.
- class DebugInfoProbeInfo {
- StringMap<DebugInfoProbe *> Probes;
- public:
- DebugInfoProbeInfo() {}
-
- /// ~DebugInfoProbeInfo - Report data collected by all probes before deleting
- /// them.
- ~DebugInfoProbeInfo();
-
- /// initialize - Collect information before running an optimization pass.
- void initialize(Pass *P, Function &F);
-
- /// finalize - Collect information after running an optimization pass. This
- /// must be used after initialization.
- void finalize(Pass *P, Function &F);
- };
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/include/llvm/DefaultPasses.h b/contrib/llvm/include/llvm/DefaultPasses.h
index 2e4145b..929569d 100644
--- a/contrib/llvm/include/llvm/DefaultPasses.h
+++ b/contrib/llvm/include/llvm/DefaultPasses.h
@@ -14,6 +14,8 @@
#ifndef LLVM_DEFAULT_PASS_SUPPORT_H
#define LLVM_DEFAULT_PASS_SUPPORT_H
+#include <llvm/PassSupport.h>
+
namespace llvm {
class PassManagerBase;
diff --git a/contrib/llvm/include/llvm/DerivedTypes.h b/contrib/llvm/include/llvm/DerivedTypes.h
index 445c3de..da5ad27 100644
--- a/contrib/llvm/include/llvm/DerivedTypes.h
+++ b/contrib/llvm/include/llvm/DerivedTypes.h
@@ -195,9 +195,10 @@ class StructType : public CompositeType {
// This is the contents of the SubClassData field.
SCDB_HasBody = 1,
SCDB_Packed = 2,
- SCDB_IsLiteral = 4
+ SCDB_IsLiteral = 4,
+ SCDB_IsSized = 8
};
-
+
/// SymbolTableEntry - For a named struct that actually has a name, this is a
/// pointer to the symbol table entry (maintained by LLVMContext) for the
/// struct. This is null if the type is an literal struct or if it is
@@ -248,6 +249,9 @@ public:
/// isOpaque - Return true if this is a type with an identity that has no body
/// specified yet. These prints as 'opaque' in .ll files.
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
+
+ /// isSized - Return true if this is a sized type.
+ bool isSized() const;
/// hasName - Return true if this is a named struct that has a non-empty name.
bool hasName() const { return SymbolTableEntry != 0; }
@@ -374,6 +378,7 @@ public:
///
static VectorType *getInteger(VectorType *VTy) {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ assert(EltBits && "Element size must be of a non-zero size");
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
return VectorType::get(EltTy, VTy->getNumElements());
}
@@ -408,6 +413,7 @@ public:
unsigned getNumElements() const { return NumElements; }
/// @brief Return the number of bits in the Vector type.
+ /// Returns zero when the vector is a vector of pointers.
unsigned getBitWidth() const {
return NumElements * getElementType()->getPrimitiveSizeInBits();
}
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index cf85671..e920e98 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -15,17 +15,19 @@
#ifndef LLVM_EXECUTION_ENGINE_H
#define LLVM_EXECUTION_ENGINE_H
-#include <vector>
-#include <map>
-#include <string>
#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ValueMap.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <vector>
+#include <map>
+#include <string>
namespace llvm {
@@ -41,6 +43,7 @@ class MachineCodeInfo;
class Module;
class MutexGuard;
class TargetData;
+class Triple;
class Type;
/// \brief Helper class for helping synchronize access to the global address map
@@ -132,14 +135,12 @@ protected:
Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM);
static ExecutionEngine *(*MCJITCtor)(
Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM);
static ExecutionEngine *(*InterpCtor)(Module *M, std::string *ErrorStr);
@@ -228,6 +229,26 @@ public:
virtual GenericValue runFunction(Function *F,
const std::vector<GenericValue> &ArgValues) = 0;
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) = 0;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) {
+ llvm_unreachable("Re-mapping of section addresses not supported with this "
+ "EE!");
+ }
+
/// runStaticConstructorsDestructors - This method is used to execute all of
/// the static constructors or destructors for a program.
///
@@ -462,6 +483,7 @@ private:
CodeGenOpt::Level OptLevel;
JITMemoryManager *JMM;
bool AllocateGVsWithCode;
+ TargetOptions Options;
Reloc::Model RelocModel;
CodeModel::Model CMModel;
std::string MArch;
@@ -475,6 +497,7 @@ private:
ErrorStr = NULL;
OptLevel = CodeGenOpt::Default;
JMM = NULL;
+ Options = TargetOptions();
AllocateGVsWithCode = false;
RelocModel = Reloc::Default;
CMModel = CodeModel::JITDefault;
@@ -518,6 +541,13 @@ public:
return *this;
}
+ /// setTargetOptions - Set the target options that the ExecutionEngine
+ /// target is using. Defaults to TargetOptions().
+ EngineBuilder &setTargetOptions(const TargetOptions &Opts) {
+ Options = Opts;
+ return *this;
+ }
+
/// setRelocationModel - Set the relocation model that the ExecutionEngine
/// target is using. Defaults to target specific default "Reloc::Default".
EngineBuilder &setRelocationModel(Reloc::Model RM) {
@@ -572,17 +602,20 @@ public:
return *this;
}
+ TargetMachine *selectTarget();
+
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
- static TargetMachine *selectTarget(Module *M,
- StringRef MArch,
- StringRef MCPU,
- const SmallVectorImpl<std::string>& MAttrs,
- Reloc::Model RM,
- CodeModel::Model CM,
- std::string *Err);
-
- ExecutionEngine *create();
+ TargetMachine *selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
+
+ ExecutionEngine *create() {
+ return create(selectTarget());
+ }
+
+ ExecutionEngine *create(TargetMachine *TM);
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h b/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h
new file mode 100644
index 0000000..ca87342
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h
@@ -0,0 +1,102 @@
+//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper for the Intel JIT Events API. It allows for the
+// implementation of the jitprofiling library to be swapped with an alternative
+// implementation (for testing). To include this file, you must have the
+// jitprofiling.h header available; it is available in Intel(R) VTune(TM)
+// Amplifier XE 2011.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INTEL_JIT_EVENTS_WRAPPER_H
+#define INTEL_JIT_EVENTS_WRAPPER_H
+
+#include <jitprofiling.h>
+
+namespace llvm {
+
+class IntelJITEventsWrapper {
+ // Function pointer types for testing implementation of Intel jitprofiling
+ // library
+ typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*);
+ typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx );
+ typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void);
+ typedef void (*FinalizeThreadPtr)(void);
+ typedef void (*FinalizeProcessPtr)(void);
+ typedef unsigned int (*GetNewMethodIDPtr)(void);
+
+ NotifyEventPtr NotifyEventFunc;
+ RegisterCallbackExPtr RegisterCallbackExFunc;
+ IsProfilingActivePtr IsProfilingActiveFunc;
+ FinalizeThreadPtr FinalizeThreadFunc;
+ FinalizeProcessPtr FinalizeProcessFunc;
+ GetNewMethodIDPtr GetNewMethodIDFunc;
+
+public:
+ bool isAmplifierRunning() {
+ return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON;
+ }
+
+ IntelJITEventsWrapper()
+ : NotifyEventFunc(::iJIT_NotifyEvent),
+ RegisterCallbackExFunc(::iJIT_RegisterCallbackEx),
+ IsProfilingActiveFunc(::iJIT_IsProfilingActive),
+ FinalizeThreadFunc(::FinalizeThread),
+ FinalizeProcessFunc(::FinalizeProcess),
+ GetNewMethodIDFunc(::iJIT_GetNewMethodID) {
+ }
+
+ IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl,
+ RegisterCallbackExPtr RegisterCallbackExImpl,
+ IsProfilingActivePtr IsProfilingActiveImpl,
+ FinalizeThreadPtr FinalizeThreadImpl,
+ FinalizeProcessPtr FinalizeProcessImpl,
+ GetNewMethodIDPtr GetNewMethodIDImpl)
+ : NotifyEventFunc(NotifyEventImpl),
+ RegisterCallbackExFunc(RegisterCallbackExImpl),
+ IsProfilingActiveFunc(IsProfilingActiveImpl),
+ FinalizeThreadFunc(FinalizeThreadImpl),
+ FinalizeProcessFunc(FinalizeProcessImpl),
+ GetNewMethodIDFunc(GetNewMethodIDImpl) {
+ }
+
+ // Sends an event anncouncing that a function has been emitted
+ // return values are event-specific. See Intel documentation for details.
+ int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
+ if (!NotifyEventFunc)
+ return -1;
+ return NotifyEventFunc(EventType, EventSpecificData);
+ }
+
+ // Registers a callback function to receive notice of profiling state changes
+ void iJIT_RegisterCallbackEx(void *UserData,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx) {
+ if (RegisterCallbackExFunc)
+ RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx);
+ }
+
+ // Returns the current profiler mode
+ iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) {
+ if (!IsProfilingActiveFunc)
+ return iJIT_NOTHING_RUNNING;
+ return IsProfilingActiveFunc();
+ }
+
+ // Generates a locally unique method ID for use in code registration
+ unsigned int iJIT_GetNewMethodID(void) {
+ if (!GetNewMethodIDFunc)
+ return -1;
+ return GetNewMethodIDFunc();
+ }
+};
+
+} //namespace llvm
+
+#endif //INTEL_JIT_EVENTS_WRAPPER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
index abc063b..eea603f 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -15,6 +15,7 @@
#ifndef LLVM_EXECUTION_ENGINE_JIT_EVENTLISTENER_H
#define LLVM_EXECUTION_ENGINE_JIT_EVENTLISTENER_H
+#include "llvm/Config/config.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
@@ -23,6 +24,8 @@
namespace llvm {
class Function;
class MachineFunction;
+class OProfileWrapper;
+class IntelJITEventsWrapper;
/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
/// about a generated machine code function.
@@ -59,9 +62,9 @@ public:
/// NotifyFunctionEmitted - Called after a function has been successfully
/// emitted to memory. The function still has its MachineFunction attached,
/// if you should happen to need that.
- virtual void NotifyFunctionEmitted(const Function &F,
- void *Code, size_t Size,
- const EmittedFunctionDetails &Details) {}
+ virtual void NotifyFunctionEmitted(const Function &,
+ void *, size_t,
+ const EmittedFunctionDetails &) {}
/// NotifyFreeingMachineCode - Called from freeMachineCodeForFunction(), after
/// the global mapping is removed, but before the machine code is returned to
@@ -71,12 +74,43 @@ public:
/// parameter to a previous NotifyFunctionEmitted call. The Function passed
/// to NotifyFunctionEmitted may have been destroyed by the time of the
/// matching NotifyFreeingMachineCode call.
- virtual void NotifyFreeingMachineCode(void *OldPtr) {}
-};
+ virtual void NotifyFreeingMachineCode(void *) {}
+
+#if LLVM_USE_INTEL_JITEVENTS
+ // Construct an IntelJITEventListener
+ static JITEventListener *createIntelJITEventListener();
+
+ // Construct an IntelJITEventListener with a test Intel JIT API implementation
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl);
+#else
+ static JITEventListener *createIntelJITEventListener() { return 0; }
+
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl) {
+ return 0;
+ }
+#endif // USE_INTEL_JITEVENTS
+
+#if LLVM_USE_OPROFILE
+ // Construct an OProfileJITEventListener
+ static JITEventListener *createOProfileJITEventListener();
-// This returns NULL if support isn't available.
-JITEventListener *createOProfileJITEventListener();
+ // Construct an OProfileJITEventListener with a test opagent implementation
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl);
+#else
+
+ static JITEventListener *createOProfileJITEventListener() { return 0; }
+
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl) {
+ return 0;
+ }
+#endif // USE_OPROFILE
+
+};
} // end namespace llvm.
-#endif
+#endif // defined LLVM_EXECUTION_ENGINE_JIT_EVENTLISTENER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
index a63f0da..4c75b6a 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
@@ -47,6 +47,17 @@ public:
/// debugging, and may be turned on by default in debug mode.
virtual void setPoisonMemory(bool poison) = 0;
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function. As such it is only useful for resolving library
+ /// symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) = 0;
+
//===--------------------------------------------------------------------===//
// Global Offset Table Management
//===--------------------------------------------------------------------===//
@@ -101,6 +112,22 @@ public:
virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
uint8_t *FunctionEnd) = 0;
+ /// allocateCodeSection - Allocate a memory block of (at least) the given
+ /// size suitable for executable code. The SectionID is a unique identifier
+ /// assigned by the JIT and passed through to the memory manager for
+ /// the instance class to use if it needs to communicate to the JIT about
+ /// a given section after the fact.
+ virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) = 0;
+
+ /// allocateDataSection - Allocate a memory block of (at least) the given
+ /// size suitable for data. The SectionID is a unique identifier
+ /// assigned by the JIT and passed through to the memory manager for
+ /// the instance class to use if it needs to communicate to the JIT about
+ /// a given section after the fact.
+ virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) = 0;
+
/// allocateSpace - Allocate a memory block of the given size. This method
/// cannot be called between calls to startFunctionBody and endFunctionBody.
virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0;
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h b/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h
new file mode 100644
index 0000000..ab7f25e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h
@@ -0,0 +1,124 @@
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPROFILE_WRAPPER_H
+#define OPROFILE_WRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+ typedef op_agent_t (*op_open_agent_ptr_t)();
+ typedef int (*op_close_agent_ptr_t)(op_agent_t);
+ typedef int (*op_write_native_code_ptr_t)(op_agent_t,
+ const char*,
+ uint64_t,
+ void const*,
+ const unsigned int);
+ typedef int (*op_write_debug_line_info_ptr_t)(op_agent_t,
+ void const*,
+ size_t,
+ struct debug_line_info const*);
+ typedef int (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+ // Also used for op_minor_version function which has the same signature
+ typedef int (*op_major_version_ptr_t)(void);
+
+ // This is not a part of the opagent API, but is useful nonetheless
+ typedef bool (*IsOProfileRunningPtrT)(void);
+
+
+ op_agent_t Agent;
+ op_open_agent_ptr_t OpenAgentFunc;
+ op_close_agent_ptr_t CloseAgentFunc;
+ op_write_native_code_ptr_t WriteNativeCodeFunc;
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoFunc;
+ op_unload_native_code_ptr_t UnloadNativeCodeFunc;
+ op_major_version_ptr_t MajorVersionFunc;
+ op_major_version_ptr_t MinorVersionFunc;
+ IsOProfileRunningPtrT IsOProfileRunningFunc;
+
+ bool Initialized;
+
+public:
+ OProfileWrapper();
+
+ // For testing with a mock opagent implementation, skips the dynamic load and
+ // the function resolution.
+ OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+ op_close_agent_ptr_t CloseAgentImpl,
+ op_write_native_code_ptr_t WriteNativeCodeImpl,
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+ op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+ op_major_version_ptr_t MajorVersionImpl,
+ op_major_version_ptr_t MinorVersionImpl,
+ IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+ : OpenAgentFunc(OpenAgentImpl),
+ CloseAgentFunc(CloseAgentImpl),
+ WriteNativeCodeFunc(WriteNativeCodeImpl),
+ WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+ UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+ MajorVersionFunc(MajorVersionImpl),
+ MinorVersionFunc(MinorVersionImpl),
+ IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+ Initialized(true)
+ {
+ }
+
+ // Calls op_open_agent in the oprofile JIT library and saves the returned
+ // op_agent_t handle internally so it can be used when calling all the other
+ // op_* functions. Callers of this class do not need to keep track of
+ // op_agent_t objects.
+ bool op_open_agent();
+
+ int op_close_agent();
+ int op_write_native_code(const char* name,
+ uint64_t addr,
+ void const* code,
+ const unsigned int size);
+ int op_write_debug_line_info(void const* code,
+ size_t num_entries,
+ struct debug_line_info const* info);
+ int op_unload_native_code(uint64_t addr);
+ int op_major_version(void);
+ int op_minor_version(void);
+
+ // Returns true if the oprofiled process is running, the opagent library is
+ // loaded and a connection to the agent has been established, and false
+ // otherwise.
+ bool isAgentAvailable();
+
+private:
+ // Loads the libopagent library and initializes this wrapper if the oprofile
+ // daemon is running
+ bool initialize();
+
+ // Searches /proc for the oprofile daemon and returns true if the process if
+ // found, or false otherwise.
+ bool checkForOProfileProcEntry();
+
+ bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif //OPROFILE_WRAPPER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
index 724b9f0..54c28f3 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -35,15 +35,18 @@ public:
RTDyldMemoryManager() {}
virtual ~RTDyldMemoryManager();
- // Allocate ActualSize bytes, or more, for the named function. Return
- // a pointer to the allocated memory and update Size to reflect how much
- // memory was acutally allocated.
- virtual uint8_t *startFunctionBody(const char *Name, uintptr_t &Size) = 0;
+ /// allocateCodeSection - Allocate a memory block of (at least) the given
+ /// size suitable for executable code.
+ virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) = 0;
- // Mark the end of the function, including how much of the allocated
- // memory was actually used.
- virtual void endFunctionBody(const char *Name, uint8_t *FunctionStart,
- uint8_t *FunctionEnd) = 0;
+ /// allocateDataSection - Allocate a memory block of (at least) the given
+ /// size suitable for data.
+ virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) = 0;
+
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) = 0;
};
class RuntimeDyld {
@@ -54,6 +57,10 @@ class RuntimeDyld {
// interface.
RuntimeDyldImpl *Dyld;
RTDyldMemoryManager *MM;
+protected:
+ // Change the address associated with a section when resolving relocations.
+ // Any relocations already associated with the symbol will be re-resolved.
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
public:
RuntimeDyld(RTDyldMemoryManager*);
~RuntimeDyld();
@@ -65,9 +72,13 @@ public:
void *getSymbolAddress(StringRef Name);
// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
- // Change the address associated with a symbol when resolving relocations.
- // Any relocations already associated with the symbol will be re-resolved.
- void reassignSymbolAddress(StringRef Name, uint8_t *Addr);
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress);
+
StringRef getErrorString();
};
diff --git a/contrib/llvm/include/llvm/Function.h b/contrib/llvm/include/llvm/Function.h
index 678651b..e17cd87 100644
--- a/contrib/llvm/include/llvm/Function.h
+++ b/contrib/llvm/include/llvm/Function.h
@@ -146,7 +146,7 @@ public:
/// The particular intrinsic functions which correspond to this value are
/// defined in llvm/Intrinsics.h.
///
- unsigned getIntrinsicID() const LLVM_ATTRIBUTE_READONLY;
+ unsigned getIntrinsicID() const LLVM_READONLY;
bool isIntrinsic() const { return getIntrinsicID() != 0; }
/// getCallingConv()/setCallingConv(CC) - These method get and set the
@@ -425,6 +425,12 @@ public:
///
bool hasAddressTaken(const User** = 0) const;
+ /// isDefTriviallyDead - Return true if it is trivially safe to remove
+ /// this function definition from the module (because it isn't externally
+ /// visible, does not have its address taken, and has no callers). To make
+ /// this more accurate, call removeDeadConstantUsers first.
+ bool isDefTriviallyDead() const;
+
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
bool callsFunctionThatReturnsTwice() const;
diff --git a/contrib/llvm/include/llvm/GlobalValue.h b/contrib/llvm/include/llvm/GlobalValue.h
index 63dc4ab..81a11a4 100644
--- a/contrib/llvm/include/llvm/GlobalValue.h
+++ b/contrib/llvm/include/llvm/GlobalValue.h
@@ -59,19 +59,18 @@ public:
protected:
GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
LinkageTypes linkage, const Twine &Name)
- : Constant(ty, vty, Ops, NumOps), Parent(0),
- Linkage(linkage), Visibility(DefaultVisibility), Alignment(0),
- UnnamedAddr(0) {
+ : Constant(ty, vty, Ops, NumOps), Linkage(linkage),
+ Visibility(DefaultVisibility), Alignment(0), UnnamedAddr(0), Parent(0) {
setName(Name);
}
- Module *Parent;
// Note: VC++ treats enums as signed, so an extra bit is required to prevent
// Linkage and Visibility from turning into negative values.
LinkageTypes Linkage : 5; // The linkage of this global
unsigned Visibility : 2; // The visibility style of this global
unsigned Alignment : 16; // Alignment of this symbol, must be power of two
unsigned UnnamedAddr : 1; // This value's address is not significant
+ Module *Parent; // The containing module.
std::string Section; // Section to emit this into, empty mean default
public:
~GlobalValue() {
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
index c91fbf8..33d2043 100644
--- a/contrib/llvm/include/llvm/InitializePasses.h
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -31,6 +31,10 @@ void initializeTransformUtils(PassRegistry&);
/// ScalarOpts library.
void initializeScalarOpts(PassRegistry&);
+/// initializeVectorization - Initialize all passes linked into the
+/// Vectorize library.
+void initializeVectorization(PassRegistry&);
+
/// initializeInstCombine - Initialize all passes linked into the
/// ScalarOpts library.
void initializeInstCombine(PassRegistry&);
@@ -67,6 +71,7 @@ void initializeBasicCallGraphPass(PassRegistry&);
void initializeBlockExtractorPassPass(PassRegistry&);
void initializeBlockFrequencyInfoPass(PassRegistry&);
void initializeBlockPlacementPass(PassRegistry&);
+void initializeBranchFolderPassPass(PassRegistry&);
void initializeBranchProbabilityInfoPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
void initializeCFGOnlyPrinterPass(PassRegistry&);
@@ -77,8 +82,10 @@ void initializeCFGViewerPass(PassRegistry&);
void initializeCalculateSpillWeightsPass(PassRegistry&);
void initializeCallGraphAnalysisGroup(PassRegistry&);
void initializeCodeGenPreparePass(PassRegistry&);
+void initializeCodePlacementOptPass(PassRegistry&);
void initializeConstantMergePass(PassRegistry&);
void initializeConstantPropagationPass(PassRegistry&);
+void initializeMachineCopyPropagationPass(PassRegistry&);
void initializeCorrelatedValuePropagationPass(PassRegistry&);
void initializeDAEPass(PassRegistry&);
void initializeDAHPass(PassRegistry&);
@@ -94,12 +101,17 @@ void initializeDominanceFrontierPass(PassRegistry&);
void initializeDominatorTreePass(PassRegistry&);
void initializeEdgeBundlesPass(PassRegistry&);
void initializeEdgeProfilerPass(PassRegistry&);
+void initializeExpandPostRAPass(PassRegistry&);
void initializePathProfilerPass(PassRegistry&);
void initializeGCOVProfilerPass(PassRegistry&);
+void initializeAddressSanitizerPass(PassRegistry&);
+void initializeThreadSanitizerPass(PassRegistry&);
void initializeEarlyCSEPass(PassRegistry&);
void initializeExpandISelPseudosPass(PassRegistry&);
void initializeFindUsedTypesPass(PassRegistry&);
void initializeFunctionAttrsPass(PassRegistry&);
+void initializeGCInfoDeleterPass(PassRegistry&);
+void initializeGCMachineCodeAnalysisPass(PassRegistry&);
void initializeGCModuleInfoPass(PassRegistry&);
void initializeGVNPass(PassRegistry&);
void initializeGlobalDCEPass(PassRegistry&);
@@ -127,6 +139,7 @@ void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
void initializePathProfileLoaderPassPass(PassRegistry&);
+void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLoopDeletionPass(PassRegistry&);
void initializeLoopDependenceAnalysisPass(PassRegistry&);
void initializeLoopExtractorPass(PassRegistry&);
@@ -134,8 +147,8 @@ void initializeLoopInfoPass(PassRegistry&);
void initializeLoopInstSimplifyPass(PassRegistry&);
void initializeLoopRotatePass(PassRegistry&);
void initializeLoopSimplifyPass(PassRegistry&);
-void initializeLoopSplitterPass(PassRegistry&);
void initializeLoopStrengthReducePass(PassRegistry&);
+void initializeGlobalMergePass(PassRegistry&);
void initializeLoopUnrollPass(PassRegistry&);
void initializeLoopUnswitchPass(PassRegistry&);
void initializeLoopIdiomRecognizePass(PassRegistry&);
@@ -145,6 +158,8 @@ void initializeLowerIntrinsicsPass(PassRegistry&);
void initializeLowerInvokePass(PassRegistry&);
void initializeLowerSwitchPass(PassRegistry&);
void initializeMachineBlockFrequencyInfoPass(PassRegistry&);
+void initializeMachineBlockPlacementPass(PassRegistry&);
+void initializeMachineBlockPlacementStatsPass(PassRegistry&);
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
@@ -152,6 +167,7 @@ void initializeMachineLICMPass(PassRegistry&);
void initializeMachineLoopInfoPass(PassRegistry&);
void initializeMachineLoopRangesPass(PassRegistry&);
void initializeMachineModuleInfoPass(PassRegistry&);
+void initializeMachineSchedulerPass(PassRegistry&);
void initializeMachineSinkingPass(PassRegistry&);
void initializeMachineVerifierPassPass(PassRegistry&);
void initializeMemCpyOptPass(PassRegistry&);
@@ -163,6 +179,7 @@ void initializeNoAAPass(PassRegistry&);
void initializeNoProfileInfoPass(PassRegistry&);
void initializeNoPathProfileInfoPass(PassRegistry&);
void initializeObjCARCAliasAnalysisPass(PassRegistry&);
+void initializeObjCARCAPElimPass(PassRegistry&);
void initializeObjCARCExpandPass(PassRegistry&);
void initializeObjCARCContractPass(PassRegistry&);
void initializeObjCARCOptPass(PassRegistry&);
@@ -177,6 +194,7 @@ void initializePostDomOnlyViewerPass(PassRegistry&);
void initializePostDomPrinterPass(PassRegistry&);
void initializePostDomViewerPass(PassRegistry&);
void initializePostDominatorTreePass(PassRegistry&);
+void initializePostRASchedulerPass(PassRegistry&);
void initializePreVerifierPass(PassRegistry&);
void initializePrintDbgInfoPass(PassRegistry&);
void initializePrintFunctionPassPass(PassRegistry&);
@@ -189,7 +207,6 @@ void initializePathProfileVerifierPass(PassRegistry&);
void initializeProfileVerifierPassPass(PassRegistry&);
void initializePromotePassPass(PassRegistry&);
void initializePruneEHPass(PassRegistry&);
-void initializeRALinScanPass(PassRegistry&);
void initializeReassociatePass(PassRegistry&);
void initializeRegToMemPass(PassRegistry&);
void initializeRegionInfoPass(PassRegistry&);
@@ -219,6 +236,8 @@ void initializeStripNonDebugSymbolsPass(PassRegistry&);
void initializeStripSymbolsPass(PassRegistry&);
void initializeStrongPHIEliminationPass(PassRegistry&);
void initializeTailCallElimPass(PassRegistry&);
+void initializeTailDuplicatePassPass(PassRegistry&);
+void initializeTargetPassConfigPass(PassRegistry&);
void initializeTargetDataPass(PassRegistry&);
void initializeTargetLibraryInfoPass(PassRegistry&);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
@@ -229,7 +248,9 @@ void initializeUnreachableMachineBlockElimPass(PassRegistry&);
void initializeVerifierPass(PassRegistry&);
void initializeVirtRegMapPass(PassRegistry&);
void initializeInstSimplifierPass(PassRegistry&);
-
+void initializeUnpackMachineBundlesPass(PassRegistry&);
+void initializeFinalizeMachineBundlesPass(PassRegistry&);
+void initializeBBVectorizePass(PassRegistry&);
}
#endif
diff --git a/contrib/llvm/include/llvm/InlineAsm.h b/contrib/llvm/include/llvm/InlineAsm.h
index de5ce4e..37aa18b 100644
--- a/contrib/llvm/include/llvm/InlineAsm.h
+++ b/contrib/llvm/include/llvm/InlineAsm.h
@@ -17,6 +17,7 @@
#define LLVM_INLINEASM_H
#include "llvm/Value.h"
+#include "llvm/ADT/StringRef.h"
#include <vector>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/InstrTypes.h b/contrib/llvm/include/llvm/InstrTypes.h
index a1492f3..2529f24 100644
--- a/contrib/llvm/include/llvm/InstrTypes.h
+++ b/contrib/llvm/include/llvm/InstrTypes.h
@@ -388,6 +388,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
/// if (isa<CastInst>(Instr)) { ... }
/// @brief Base class of casting instructions.
class CastInst : public UnaryInstruction {
+ virtual void anchor();
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
diff --git a/contrib/llvm/include/llvm/Instruction.def b/contrib/llvm/include/llvm/Instruction.def
index d36e4be..e59a052 100644
--- a/contrib/llvm/include/llvm/Instruction.def
+++ b/contrib/llvm/include/llvm/Instruction.def
@@ -99,81 +99,80 @@ HANDLE_TERM_INST ( 2, Br , BranchInst)
HANDLE_TERM_INST ( 3, Switch , SwitchInst)
HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
-HANDLE_TERM_INST ( 6, Unwind , UnwindInst)
-HANDLE_TERM_INST ( 7, Resume , ResumeInst)
-HANDLE_TERM_INST ( 8, Unreachable, UnreachableInst)
- LAST_TERM_INST ( 8)
+HANDLE_TERM_INST ( 6, Resume , ResumeInst)
+HANDLE_TERM_INST ( 7, Unreachable, UnreachableInst)
+ LAST_TERM_INST ( 7)
// Standard binary operators...
- FIRST_BINARY_INST( 9)
-HANDLE_BINARY_INST( 9, Add , BinaryOperator)
-HANDLE_BINARY_INST(10, FAdd , BinaryOperator)
-HANDLE_BINARY_INST(11, Sub , BinaryOperator)
-HANDLE_BINARY_INST(12, FSub , BinaryOperator)
-HANDLE_BINARY_INST(13, Mul , BinaryOperator)
-HANDLE_BINARY_INST(14, FMul , BinaryOperator)
-HANDLE_BINARY_INST(15, UDiv , BinaryOperator)
-HANDLE_BINARY_INST(16, SDiv , BinaryOperator)
-HANDLE_BINARY_INST(17, FDiv , BinaryOperator)
-HANDLE_BINARY_INST(18, URem , BinaryOperator)
-HANDLE_BINARY_INST(19, SRem , BinaryOperator)
-HANDLE_BINARY_INST(20, FRem , BinaryOperator)
+ FIRST_BINARY_INST( 8)
+HANDLE_BINARY_INST( 8, Add , BinaryOperator)
+HANDLE_BINARY_INST( 9, FAdd , BinaryOperator)
+HANDLE_BINARY_INST(10, Sub , BinaryOperator)
+HANDLE_BINARY_INST(11, FSub , BinaryOperator)
+HANDLE_BINARY_INST(12, Mul , BinaryOperator)
+HANDLE_BINARY_INST(13, FMul , BinaryOperator)
+HANDLE_BINARY_INST(14, UDiv , BinaryOperator)
+HANDLE_BINARY_INST(15, SDiv , BinaryOperator)
+HANDLE_BINARY_INST(16, FDiv , BinaryOperator)
+HANDLE_BINARY_INST(17, URem , BinaryOperator)
+HANDLE_BINARY_INST(18, SRem , BinaryOperator)
+HANDLE_BINARY_INST(19, FRem , BinaryOperator)
// Logical operators (integer operands)
-HANDLE_BINARY_INST(21, Shl , BinaryOperator) // Shift left (logical)
-HANDLE_BINARY_INST(22, LShr , BinaryOperator) // Shift right (logical)
-HANDLE_BINARY_INST(23, AShr , BinaryOperator) // Shift right (arithmetic)
-HANDLE_BINARY_INST(24, And , BinaryOperator)
-HANDLE_BINARY_INST(25, Or , BinaryOperator)
-HANDLE_BINARY_INST(26, Xor , BinaryOperator)
- LAST_BINARY_INST(26)
+HANDLE_BINARY_INST(20, Shl , BinaryOperator) // Shift left (logical)
+HANDLE_BINARY_INST(21, LShr , BinaryOperator) // Shift right (logical)
+HANDLE_BINARY_INST(22, AShr , BinaryOperator) // Shift right (arithmetic)
+HANDLE_BINARY_INST(23, And , BinaryOperator)
+HANDLE_BINARY_INST(24, Or , BinaryOperator)
+HANDLE_BINARY_INST(25, Xor , BinaryOperator)
+ LAST_BINARY_INST(25)
// Memory operators...
- FIRST_MEMORY_INST(27)
-HANDLE_MEMORY_INST(27, Alloca, AllocaInst) // Stack management
-HANDLE_MEMORY_INST(28, Load , LoadInst ) // Memory manipulation instrs
-HANDLE_MEMORY_INST(29, Store , StoreInst )
-HANDLE_MEMORY_INST(30, GetElementPtr, GetElementPtrInst)
-HANDLE_MEMORY_INST(31, Fence , FenceInst )
-HANDLE_MEMORY_INST(32, AtomicCmpXchg , AtomicCmpXchgInst )
-HANDLE_MEMORY_INST(33, AtomicRMW , AtomicRMWInst )
- LAST_MEMORY_INST(33)
+ FIRST_MEMORY_INST(26)
+HANDLE_MEMORY_INST(26, Alloca, AllocaInst) // Stack management
+HANDLE_MEMORY_INST(27, Load , LoadInst ) // Memory manipulation instrs
+HANDLE_MEMORY_INST(28, Store , StoreInst )
+HANDLE_MEMORY_INST(29, GetElementPtr, GetElementPtrInst)
+HANDLE_MEMORY_INST(30, Fence , FenceInst )
+HANDLE_MEMORY_INST(31, AtomicCmpXchg , AtomicCmpXchgInst )
+HANDLE_MEMORY_INST(32, AtomicRMW , AtomicRMWInst )
+ LAST_MEMORY_INST(32)
// Cast operators ...
// NOTE: The order matters here because CastInst::isEliminableCastPair
// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
- FIRST_CAST_INST(34)
-HANDLE_CAST_INST(34, Trunc , TruncInst ) // Truncate integers
-HANDLE_CAST_INST(35, ZExt , ZExtInst ) // Zero extend integers
-HANDLE_CAST_INST(36, SExt , SExtInst ) // Sign extend integers
-HANDLE_CAST_INST(37, FPToUI , FPToUIInst ) // floating point -> UInt
-HANDLE_CAST_INST(38, FPToSI , FPToSIInst ) // floating point -> SInt
-HANDLE_CAST_INST(39, UIToFP , UIToFPInst ) // UInt -> floating point
-HANDLE_CAST_INST(40, SIToFP , SIToFPInst ) // SInt -> floating point
-HANDLE_CAST_INST(41, FPTrunc , FPTruncInst ) // Truncate floating point
-HANDLE_CAST_INST(42, FPExt , FPExtInst ) // Extend floating point
-HANDLE_CAST_INST(43, PtrToInt, PtrToIntInst) // Pointer -> Integer
-HANDLE_CAST_INST(44, IntToPtr, IntToPtrInst) // Integer -> Pointer
-HANDLE_CAST_INST(45, BitCast , BitCastInst ) // Type cast
- LAST_CAST_INST(45)
+ FIRST_CAST_INST(33)
+HANDLE_CAST_INST(33, Trunc , TruncInst ) // Truncate integers
+HANDLE_CAST_INST(34, ZExt , ZExtInst ) // Zero extend integers
+HANDLE_CAST_INST(35, SExt , SExtInst ) // Sign extend integers
+HANDLE_CAST_INST(36, FPToUI , FPToUIInst ) // floating point -> UInt
+HANDLE_CAST_INST(37, FPToSI , FPToSIInst ) // floating point -> SInt
+HANDLE_CAST_INST(38, UIToFP , UIToFPInst ) // UInt -> floating point
+HANDLE_CAST_INST(39, SIToFP , SIToFPInst ) // SInt -> floating point
+HANDLE_CAST_INST(40, FPTrunc , FPTruncInst ) // Truncate floating point
+HANDLE_CAST_INST(41, FPExt , FPExtInst ) // Extend floating point
+HANDLE_CAST_INST(42, PtrToInt, PtrToIntInst) // Pointer -> Integer
+HANDLE_CAST_INST(43, IntToPtr, IntToPtrInst) // Integer -> Pointer
+HANDLE_CAST_INST(44, BitCast , BitCastInst ) // Type cast
+ LAST_CAST_INST(44)
// Other operators...
- FIRST_OTHER_INST(46)
-HANDLE_OTHER_INST(46, ICmp , ICmpInst ) // Integer comparison instruction
-HANDLE_OTHER_INST(47, FCmp , FCmpInst ) // Floating point comparison instr.
-HANDLE_OTHER_INST(48, PHI , PHINode ) // PHI node instruction
-HANDLE_OTHER_INST(49, Call , CallInst ) // Call a function
-HANDLE_OTHER_INST(50, Select , SelectInst ) // select instruction
-HANDLE_OTHER_INST(51, UserOp1, Instruction) // May be used internally in a pass
-HANDLE_OTHER_INST(52, UserOp2, Instruction) // Internal to passes only
-HANDLE_OTHER_INST(53, VAArg , VAArgInst ) // vaarg instruction
-HANDLE_OTHER_INST(54, ExtractElement, ExtractElementInst)// extract from vector
-HANDLE_OTHER_INST(55, InsertElement, InsertElementInst) // insert into vector
-HANDLE_OTHER_INST(56, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
-HANDLE_OTHER_INST(57, ExtractValue, ExtractValueInst)// extract from aggregate
-HANDLE_OTHER_INST(58, InsertValue, InsertValueInst) // insert into aggregate
-HANDLE_OTHER_INST(59, LandingPad, LandingPadInst) // Landing pad instruction.
- LAST_OTHER_INST(59)
+ FIRST_OTHER_INST(45)
+HANDLE_OTHER_INST(45, ICmp , ICmpInst ) // Integer comparison instruction
+HANDLE_OTHER_INST(46, FCmp , FCmpInst ) // Floating point comparison instr.
+HANDLE_OTHER_INST(47, PHI , PHINode ) // PHI node instruction
+HANDLE_OTHER_INST(48, Call , CallInst ) // Call a function
+HANDLE_OTHER_INST(49, Select , SelectInst ) // select instruction
+HANDLE_OTHER_INST(50, UserOp1, Instruction) // May be used internally in a pass
+HANDLE_OTHER_INST(51, UserOp2, Instruction) // Internal to passes only
+HANDLE_OTHER_INST(52, VAArg , VAArgInst ) // vaarg instruction
+HANDLE_OTHER_INST(53, ExtractElement, ExtractElementInst)// extract from vector
+HANDLE_OTHER_INST(54, InsertElement, InsertElementInst) // insert into vector
+HANDLE_OTHER_INST(55, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
+HANDLE_OTHER_INST(56, ExtractValue, ExtractValueInst)// extract from aggregate
+HANDLE_OTHER_INST(57, InsertValue, InsertValueInst) // insert into aggregate
+HANDLE_OTHER_INST(58, LandingPad, LandingPadInst) // Landing pad instruction.
+ LAST_OTHER_INST(58)
#undef FIRST_TERM_INST
#undef HANDLE_TERM_INST
diff --git a/contrib/llvm/include/llvm/Instruction.h b/contrib/llvm/include/llvm/Instruction.h
index 934e890..9c5ac44 100644
--- a/contrib/llvm/include/llvm/Instruction.h
+++ b/contrib/llvm/include/llvm/Instruction.h
@@ -143,7 +143,7 @@ public:
/// getMetadata - Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
- MDNode *getMetadata(const char *Kind) const {
+ MDNode *getMetadata(StringRef Kind) const {
if (!hasMetadata()) return 0;
return getMetadataImpl(Kind);
}
@@ -168,7 +168,7 @@ public:
/// node. This updates/replaces metadata if already present, or removes it if
/// Node is null.
void setMetadata(unsigned KindID, MDNode *Node);
- void setMetadata(const char *Kind, MDNode *Node);
+ void setMetadata(StringRef Kind, MDNode *Node);
/// setDebugLoc - Set the debug location information for this instruction.
void setDebugLoc(const DebugLoc &Loc) { DbgLoc = Loc; }
@@ -185,7 +185,7 @@ private:
// These are all implemented in Metadata.cpp.
MDNode *getMetadataImpl(unsigned KindID) const;
- MDNode *getMetadataImpl(const char *Kind) const;
+ MDNode *getMetadataImpl(StringRef Kind) const;
void getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,MDNode*> > &)const;
void getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned,
MDNode*> > &) const;
@@ -244,26 +244,6 @@ public:
return mayWriteToMemory() || mayThrow();
}
- /// isSafeToSpeculativelyExecute - Return true if the instruction does not
- /// have any effects besides calculating the result and does not have
- /// undefined behavior.
- ///
- /// This method never returns true for an instruction that returns true for
- /// mayHaveSideEffects; however, this method also does some other checks in
- /// addition. It checks for undefined behavior, like dividing by zero or
- /// loading from an invalid pointer (but not for undefined results, like a
- /// shift with a shift amount larger than the width of the result). It checks
- /// for malloc and alloca because speculatively executing them might cause a
- /// memory leak. It also returns false for instructions related to control
- /// flow, specifically terminators and PHI nodes.
- ///
- /// This method only looks at the instruction itself and its operands, so if
- /// this method returns true, it is safe to move the instruction as long as
- /// the correct dominance relationships for the operands and users hold.
- /// However, this method can return true for instructions that read memory;
- /// for such instructions, moving them may change the resulting value.
- bool isSafeToSpeculativelyExecute() const;
-
/// clone() - Create a copy of 'this' instruction that is identical in all
/// ways except the following:
/// * The instruction has no parent
diff --git a/contrib/llvm/include/llvm/Instructions.h b/contrib/llvm/include/llvm/Instructions.h
index 3faab35..f6eaf04 100644
--- a/contrib/llvm/include/llvm/Instructions.h
+++ b/contrib/llvm/include/llvm/Instructions.h
@@ -776,6 +776,10 @@ public:
static Type *getIndexedType(Type *Ptr, ArrayRef<Constant *> IdxList);
static Type *getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList);
+ /// getIndexedType - Returns the address space used by the GEP pointer.
+ ///
+ static unsigned getAddressSpace(Value *Ptr);
+
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
@@ -788,7 +792,7 @@ public:
return getOperand(0);
}
static unsigned getPointerOperandIndex() {
- return 0U; // get index for modifying correct operand
+ return 0U; // get index for modifying correct operand.
}
unsigned getPointerAddressSpace() const {
@@ -797,10 +801,25 @@ public:
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
- PointerType *getPointerOperandType() const {
- return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
}
+ /// GetGEPReturnType - Returns the pointer type returned by the GEP
+ /// instruction, which may be a vector of pointers.
+ static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
+ Type *PtrTy = PointerType::get(checkGEPType(
+ getIndexedType(Ptr->getType(), IdxList)),
+ getAddressSpace(Ptr));
+ // Vector GEP
+ if (Ptr->getType()->isVectorTy()) {
+ unsigned NumElem = cast<VectorType>(Ptr->getType())->getNumElements();
+ return VectorType::get(PtrTy, NumElem);
+ }
+
+ // Scalar GEP
+ return PtrTy;
+ }
unsigned getNumIndices() const { // Note: always non-negative
return getNumOperands() - 1;
@@ -847,10 +866,7 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr,
unsigned Values,
const Twine &NameStr,
Instruction *InsertBefore)
- : Instruction(PointerType::get(checkGEPType(
- getIndexedType(Ptr->getType(), IdxList)),
- cast<PointerType>(Ptr->getType())
- ->getAddressSpace()),
+ : Instruction(getGEPReturnType(Ptr, IdxList),
GetElementPtr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertBefore) {
@@ -861,10 +877,7 @@ GetElementPtrInst::GetElementPtrInst(Value *Ptr,
unsigned Values,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
- : Instruction(PointerType::get(checkGEPType(
- getIndexedType(Ptr->getType(), IdxList)),
- cast<PointerType>(Ptr->getType())
- ->getAddressSpace()),
+ : Instruction(getGEPReturnType(Ptr, IdxList),
GetElementPtr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertAtEnd) {
@@ -905,7 +918,7 @@ public:
"Both operands to ICmp instruction are not of the same type!");
// Check that the operands are the right type
assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
- getOperand(0)->getType()->isPointerTy()) &&
+ getOperand(0)->getType()->getScalarType()->isPointerTy()) &&
"Invalid operand types for ICmp instruction");
}
@@ -945,7 +958,7 @@ public:
"Both operands to ICmp instruction are not of the same type!");
// Check that the operands are the right type
assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
- getOperand(0)->getType()->isPointerTy()) &&
+ getOperand(0)->getType()->getScalarType()->isPointerTy()) &&
"Invalid operand types for ICmp instruction");
}
@@ -1657,10 +1670,33 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ Constant *getMask() const {
+ return reinterpret_cast<Constant*>(getOperand(2));
+ }
+
/// getMaskValue - Return the index from the shuffle mask for the specified
/// output result. This is either -1 if the element is undef or a number less
/// than 2*numelements.
- int getMaskValue(unsigned i) const;
+ static int getMaskValue(Constant *Mask, unsigned i);
+
+ int getMaskValue(unsigned i) const {
+ return getMaskValue(getMask(), i);
+ }
+
+ /// getShuffleMask - Return the full mask for this instruction, where each
+ /// element is the element number and undef's are returned as -1.
+ static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
+
+ void getShuffleMask(SmallVectorImpl<int> &Result) const {
+ return getShuffleMask(getMask(), Result);
+ }
+
+ SmallVector<int, 16> getShuffleMask() const {
+ SmallVector<int, 16> Mask;
+ getShuffleMask(Mask);
+ return Mask;
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const ShuffleVectorInst *) { return true; }
@@ -2431,6 +2467,122 @@ class SwitchInst : public TerminatorInst {
protected:
virtual SwitchInst *clone_impl() const;
public:
+
+ // -2
+ static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
+
+ template <class SwitchInstTy, class ConstantIntTy, class BasicBlockTy>
+ class CaseIteratorT {
+ protected:
+
+ SwitchInstTy *SI;
+ unsigned Index;
+
+ public:
+
+ typedef CaseIteratorT<SwitchInstTy, ConstantIntTy, BasicBlockTy> Self;
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// case number.
+ CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) {
+ this->SI = SI;
+ Index = CaseNum;
+ }
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// TerminatorInst's successor index.
+ static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) {
+ assert(SuccessorIndex < SI->getNumSuccessors() &&
+ "Successor index # out of range!");
+ return SuccessorIndex != 0 ?
+ Self(SI, SuccessorIndex - 1) :
+ Self(SI, DefaultPseudoIndex);
+ }
+
+ /// Resolves case value for current case.
+ ConstantIntTy *getCaseValue() {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ return reinterpret_cast<ConstantIntTy*>(SI->getOperand(2 + Index*2));
+ }
+
+ /// Resolves successor for current case.
+ BasicBlockTy *getCaseSuccessor() {
+ assert((Index < SI->getNumCases() ||
+ Index == DefaultPseudoIndex) &&
+ "Index out the number of cases.");
+ return SI->getSuccessor(getSuccessorIndex());
+ }
+
+ /// Returns number of current case.
+ unsigned getCaseIndex() const { return Index; }
+
+ /// Returns TerminatorInst's successor index for current case successor.
+ unsigned getSuccessorIndex() const {
+ assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) &&
+ "Index out the number of cases.");
+ return Index != DefaultPseudoIndex ? Index + 1 : 0;
+ }
+
+ Self operator++() {
+ // Check index correctness after increment.
+ // Note: Index == getNumCases() means end().
+ assert(Index+1 <= SI->getNumCases() && "Index out the number of cases.");
+ ++Index;
+ return *this;
+ }
+ Self operator++(int) {
+ Self tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+ Self operator--() {
+ // Check index correctness after decrement.
+ // Note: Index == getNumCases() means end().
+ // Also allow "-1" iterator here. That will became valid after ++.
+ assert((Index == 0 || Index-1 <= SI->getNumCases()) &&
+ "Index out the number of cases.");
+ --Index;
+ return *this;
+ }
+ Self operator--(int) {
+ Self tmp = *this;
+ --(*this);
+ return tmp;
+ }
+ bool operator==(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index == Index;
+ }
+ bool operator!=(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index != Index;
+ }
+ };
+
+ typedef CaseIteratorT<const SwitchInst, const ConstantInt, const BasicBlock>
+ ConstCaseIt;
+
+ class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> {
+
+ typedef CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> ParentTy;
+
+ public:
+
+ CaseIt(const ParentTy& Src) : ParentTy(Src) {}
+ CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {}
+
+ /// Sets the new value for current case.
+ void setValue(ConstantInt *V) {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
+ }
+
+ /// Sets the new successor for current case.
+ void setSuccessor(BasicBlock *S) {
+ SI->setSuccessor(getSuccessorIndex(), S);
+ }
+ };
+
static SwitchInst *Create(Value *Value, BasicBlock *Default,
unsigned NumCases, Instruction *InsertBefore = 0) {
return new SwitchInst(Value, Default, NumCases, InsertBefore);
@@ -2439,6 +2591,7 @@ public:
unsigned NumCases, BasicBlock *InsertAtEnd) {
return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
}
+
~SwitchInst();
/// Provide fast operand accessors
@@ -2452,61 +2605,94 @@ public:
return cast<BasicBlock>(getOperand(1));
}
- /// getNumCases - return the number of 'cases' in this switch instruction.
- /// Note that case #0 is always the default case.
- unsigned getNumCases() const {
- return getNumOperands()/2;
+ void setDefaultDest(BasicBlock *DefaultCase) {
+ setOperand(1, reinterpret_cast<Value*>(DefaultCase));
}
- /// getCaseValue - Return the specified case value. Note that case #0, the
- /// default destination, does not have a case value.
- ConstantInt *getCaseValue(unsigned i) {
- assert(i && i < getNumCases() && "Illegal case value to get!");
- return getSuccessorValue(i);
+ /// getNumCases - return the number of 'cases' in this switch instruction,
+ /// except the default case
+ unsigned getNumCases() const {
+ return getNumOperands()/2 - 1;
}
- /// getCaseValue - Return the specified case value. Note that case #0, the
- /// default destination, does not have a case value.
- const ConstantInt *getCaseValue(unsigned i) const {
- assert(i && i < getNumCases() && "Illegal case value to get!");
- return getSuccessorValue(i);
+ /// Returns a read/write iterator that points to the first
+ /// case in SwitchInst.
+ CaseIt case_begin() {
+ return CaseIt(this, 0);
}
-
+ /// Returns a read-only iterator that points to the first
+ /// case in the SwitchInst.
+ ConstCaseIt case_begin() const {
+ return ConstCaseIt(this, 0);
+ }
+
+ /// Returns a read/write iterator that points one past the last
+ /// in the SwitchInst.
+ CaseIt case_end() {
+ return CaseIt(this, getNumCases());
+ }
+ /// Returns a read-only iterator that points one past the last
+ /// in the SwitchInst.
+ ConstCaseIt case_end() const {
+ return ConstCaseIt(this, getNumCases());
+ }
+ /// Returns an iterator that points to the default case.
+ /// Note: this iterator allows to resolve successor only. Attempt
+ /// to resolve case value causes an assertion.
+ /// Also note, that increment and decrement also causes an assertion and
+ /// makes iterator invalid.
+ CaseIt case_default() {
+ return CaseIt(this, DefaultPseudoIndex);
+ }
+ ConstCaseIt case_default() const {
+ return ConstCaseIt(this, DefaultPseudoIndex);
+ }
+
/// findCaseValue - Search all of the case values for the specified constant.
- /// If it is explicitly handled, return the case number of it, otherwise
- /// return 0 to indicate that it is handled by the default handler.
- unsigned findCaseValue(const ConstantInt *C) const {
- for (unsigned i = 1, e = getNumCases(); i != e; ++i)
- if (getCaseValue(i) == C)
+ /// If it is explicitly handled, return the case iterator of it, otherwise
+ /// return default case iterator to indicate
+ /// that it is handled by the default handler.
+ CaseIt findCaseValue(const ConstantInt *C) {
+ for (CaseIt i = case_begin(), e = case_end(); i != e; ++i)
+ if (i.getCaseValue() == C)
return i;
- return 0;
+ return case_default();
}
-
+ ConstCaseIt findCaseValue(const ConstantInt *C) const {
+ for (ConstCaseIt i = case_begin(), e = case_end(); i != e; ++i)
+ if (i.getCaseValue() == C)
+ return i;
+ return case_default();
+ }
+
/// findCaseDest - Finds the unique case value for a given successor. Returns
/// null if the successor is not found, not unique, or is the default case.
ConstantInt *findCaseDest(BasicBlock *BB) {
if (BB == getDefaultDest()) return NULL;
ConstantInt *CI = NULL;
- for (unsigned i = 1, e = getNumCases(); i != e; ++i) {
- if (getSuccessor(i) == BB) {
+ for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) {
+ if (i.getCaseSuccessor() == BB) {
if (CI) return NULL; // Multiple cases lead to BB.
- else CI = getCaseValue(i);
+ else CI = i.getCaseValue();
}
}
return CI;
}
/// addCase - Add an entry to the switch instruction...
- ///
+ /// Note:
+ /// This action invalidates case_end(). Old case_end() iterator will
+ /// point to the added case.
void addCase(ConstantInt *OnVal, BasicBlock *Dest);
- /// removeCase - This method removes the specified successor from the switch
- /// instruction. Note that this cannot be used to remove the default
- /// destination (successor #0). Also note that this operation may reorder the
+ /// removeCase - This method removes the specified case and its successor
+ /// from the switch instruction. Note that this operation may reorder the
/// remaining cases at index idx and above.
- ///
- void removeCase(unsigned idx);
+ /// Note:
+ /// This action invalidates iterators for all cases following the one removed,
+ /// including the case_end() iterator.
+ void removeCase(CaseIt i);
unsigned getNumSuccessors() const { return getNumOperands()/2; }
BasicBlock *getSuccessor(unsigned idx) const {
@@ -2518,20 +2704,6 @@ public:
setOperand(idx*2+1, (Value*)NewSucc);
}
- // getSuccessorValue - Return the value associated with the specified
- // successor.
- ConstantInt *getSuccessorValue(unsigned idx) const {
- assert(idx < getNumSuccessors() && "Successor # out of range!");
- return reinterpret_cast<ConstantInt*>(getOperand(idx*2));
- }
-
- // setSuccessorValue - Updates the value associated with the specified
- // successor.
- void setSuccessorValue(unsigned idx, ConstantInt* SuccessorValue) {
- assert(idx < getNumSuccessors() && "Successor # out of range!");
- setOperand(idx*2, reinterpret_cast<Value*>(SuccessorValue));
- }
-
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SwitchInst *) { return true; }
static inline bool classof(const Instruction *I) {
@@ -2890,42 +3062,6 @@ InvokeInst::InvokeInst(Value *Func,
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)
//===----------------------------------------------------------------------===//
-// UnwindInst Class
-//===----------------------------------------------------------------------===//
-
-//===---------------------------------------------------------------------------
-/// UnwindInst - Immediately exit the current function, unwinding the stack
-/// until an invoke instruction is found.
-///
-class UnwindInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
-protected:
- virtual UnwindInst *clone_impl() const;
-public:
- // allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
- explicit UnwindInst(LLVMContext &C, Instruction *InsertBefore = 0);
- explicit UnwindInst(LLVMContext &C, BasicBlock *InsertAtEnd);
-
- unsigned getNumSuccessors() const { return 0; }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnwindInst *) { return true; }
- static inline bool classof(const Instruction *I) {
- return I->getOpcode() == Instruction::Unwind;
- }
- static inline bool classof(const Value *V) {
- return isa<Instruction>(V) && classof(cast<Instruction>(V));
- }
-private:
- virtual BasicBlock *getSuccessorV(unsigned idx) const;
- virtual unsigned getNumSuccessorsV() const;
- virtual void setSuccessorV(unsigned idx, BasicBlock *B);
-};
-
-//===----------------------------------------------------------------------===//
// ResumeInst Class
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/include/llvm/IntrinsicInst.h b/contrib/llvm/include/llvm/IntrinsicInst.h
index 4286201..1cebdd2 100644
--- a/contrib/llvm/include/llvm/IntrinsicInst.h
+++ b/contrib/llvm/include/llvm/IntrinsicInst.h
@@ -277,34 +277,6 @@ namespace llvm {
}
};
- /// EHExceptionInst - This represents the llvm.eh.exception instruction.
- ///
- class EHExceptionInst : public IntrinsicInst {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const EHExceptionInst *) { return true; }
- static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::eh_exception;
- }
- static inline bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
- /// EHSelectorInst - This represents the llvm.eh.selector instruction.
- ///
- class EHSelectorInst : public IntrinsicInst {
- public:
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const EHSelectorInst *) { return true; }
- static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::eh_selector;
- }
- static inline bool classof(const Value *V) {
- return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
- }
- };
-
}
#endif
diff --git a/contrib/llvm/include/llvm/Intrinsics.td b/contrib/llvm/include/llvm/Intrinsics.td
index d70f915..069f907 100644
--- a/contrib/llvm/include/llvm/Intrinsics.td
+++ b/contrib/llvm/include/llvm/Intrinsics.td
@@ -284,8 +284,8 @@ def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
let Properties = [IntrNoMem] in {
def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
- def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
- def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+ def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+ def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
}
//===------------------------ Debugger Intrinsics -------------------------===//
@@ -304,10 +304,6 @@ let Properties = [IntrNoMem] in {
//===------------------ Exception Handling Intrinsics----------------------===//
//
-def int_eh_exception : Intrinsic<[llvm_ptr_ty], [], [IntrReadMem]>;
-def int_eh_selector : Intrinsic<[llvm_i32_ty],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty]>;
-def int_eh_resume : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [Throws]>;
// The result of eh.typeid.for depends on the enclosing function, but inside a
// given function it is 'const' and may be CSE'd etc.
@@ -326,7 +322,6 @@ let Properties = [IntrNoMem] in {
def int_eh_sjlj_callsite : Intrinsic<[], [llvm_i32_ty]>;
}
def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
-def int_eh_sjlj_dispatch_setup : Intrinsic<[], [llvm_i32_ty]>;
def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
@@ -443,6 +438,6 @@ include "llvm/IntrinsicsPowerPC.td"
include "llvm/IntrinsicsX86.td"
include "llvm/IntrinsicsARM.td"
include "llvm/IntrinsicsCellSPU.td"
-include "llvm/IntrinsicsAlpha.td"
include "llvm/IntrinsicsXCore.td"
include "llvm/IntrinsicsPTX.td"
+include "llvm/IntrinsicsHexagon.td"
diff --git a/contrib/llvm/include/llvm/IntrinsicsAlpha.td b/contrib/llvm/include/llvm/IntrinsicsAlpha.td
deleted file mode 100644
index 59865cf..0000000
--- a/contrib/llvm/include/llvm/IntrinsicsAlpha.td
+++ /dev/null
@@ -1,18 +0,0 @@
-//===- IntrinsicsAlpha.td - Defines Alpha intrinsics -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the Alpha-specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-
-let TargetPrefix = "alpha" in { // All intrinsics start with "llvm.alpha.".
- def int_alpha_umulh : GCCBuiltin<"__builtin_alpha_umulh">,
- Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
-}
diff --git a/contrib/llvm/include/llvm/IntrinsicsHexagon.td b/contrib/llvm/include/llvm/IntrinsicsHexagon.td
new file mode 100644
index 0000000..eb5dc8f
--- /dev/null
+++ b/contrib/llvm/include/llvm/IntrinsicsHexagon.td
@@ -0,0 +1,3671 @@
+//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the Hexagon-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all Hexagon intrinsics.
+//
+// All Hexagon intrinsics start with "llvm.hexagon.".
+let TargetPrefix = "hexagon" in {
+ /// Hexagon_Intrinsic - Base class for all altivec intrinsics.
+ class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
+// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(void_ftype_SI,BT_VOID,BT_INT) ->
+// Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
+// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i16_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
+// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
+// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
+// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
+// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
+// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
+// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
+// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
+// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
+// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
+// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT) ->
+// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_BOOL) ->
+// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeq : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpeqp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpeqp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtp : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtup : Hexagon_qi_didi_Intrinsic<"HEXAGON.C2.cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsset : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclr : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeqi : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgti : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgti">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgtui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgei : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgei">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgeui : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpgeui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmplt : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmplt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpltu : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.cmpltu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclri : Hexagon_qi_sisi_Intrinsic<"HEXAGON.C2.bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_and : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.and">;
+//
+// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_or : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.or">;
+//
+// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_xor : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_andn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.andn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
+//
+def int_hexagon_C2_not : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.not">;
+//
+// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_orn : Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C2.orn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
+//
+def int_hexagon_C2_pxfer_map : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.pxfer.map">;
+//
+// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_any8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.any8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_all8 : Hexagon_qi_qi_Intrinsic<"HEXAGON.C2.all8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
+//
+def int_hexagon_C2_vitpack : Hexagon_si_qiqi_Intrinsic<"HEXAGON.C2.vitpack">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_mux : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.mux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxii : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxii">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxir : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxir">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxri : Hexagon_si_qisisi_Intrinsic<"HEXAGON.C2.muxri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
+//
+def int_hexagon_C2_vmux : Hexagon_di_qididi_Intrinsic<"HEXAGON.C2.vmux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
+//
+def int_hexagon_C2_mask : Hexagon_di_qi_Intrinsic<"HEXAGON.C2.mask">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbeq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpheq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpweq : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpweq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgt : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgtu : Hexagon_qi_didi_Intrinsic<"HEXAGON.A2.vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
+//
+def int_hexagon_C2_tfrpr : Hexagon_si_qi_Intrinsic<"HEXAGON.C2.tfrpr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
+//
+def int_hexagon_C2_tfrrp : Hexagon_qi_si_Intrinsic<"HEXAGON.C2.tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.acc.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpy.nac.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.sat.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyd.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.mpyd.rnd.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.mpyu.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.acc.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.mpyud.nac.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.hl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.lh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.mpyud.ll.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysmi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpysmi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsip :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsip">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsin :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.macsin">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyss.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_udi_sisi_Intrinsic<"HEXAGON.M2.dpmpyuu.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.acc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.dpmpyuu.nac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpy.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_up :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.M2.mpyu.up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.dpmpyss.rnd.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.mpyui">;
+//
+// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_maci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.maci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_acci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.acci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_accii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.accii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_nacci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.nacci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_naccii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.naccii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_subacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2s.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s0pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.vmpy2s.s1pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.vmac2">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vmpy2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vmac2es">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrmac.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrmpy.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vdmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vdmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vdmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.cmpyrsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpysc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cnacsc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_di_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
+//
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.M2.vrcmpys.acc.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_si_disi_Intrinsic<"HEXAGON.M2.vrcmpys.s1rp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacls.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmachs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyl.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.M2.hmmpyh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmaculs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.mmacuhs.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyul.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.mmpyuh.rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmaci.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vrcmacr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmaci.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.M2.cmacr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyi.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vrcmpyr.s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyi.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.M2.cmpyr.s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vcmpy.s1.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M2.vcmac.s0.sat.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcrotate :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_add :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.add">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_sub :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.sub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hl">;
+def int_hexagon_A2_addh_l16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.lh">;
+def int_hexagon_A2_addh_l16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hl">;
+def int_hexagon_A2_addh_l16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.lh">;
+def int_hexagon_A2_addh_l16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.l16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.l16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.addh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subh.h16.sat.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_aslh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.aslh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_asrh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.asrh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addpsat :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.addpsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
+//
+def int_hexagon_A2_addsp :
+Hexagon_di_sidi_Intrinsic<"HEXAGON.A2.addsp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_subp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.subp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
+//
+def int_hexagon_A2_neg :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.neg">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_negsat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.negsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abs :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abssat :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.abssat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vconj :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vconj">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_negp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.negp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_absp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.absp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_max :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.max">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_maxu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.maxu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_min :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.min">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_minu :
+Hexagon_usi_sisi_Intrinsic<"HEXAGON.A2.minu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.maxp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.maxup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.minp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minup :
+Hexagon_udi_didi_Intrinsic<"HEXAGON.A2.minup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfr :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrsi :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.tfrsi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_tfrp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.tfrp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrpi :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.tfrpi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.zxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxth :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combinew :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combineii :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A2.combineii">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.combine.ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfril :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfril">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfrih :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.tfrih">;
+//
+// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.and">;
+//
+// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.or">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.xor">;
+//
+// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
+//
+def int_hexagon_A2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_xor_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M2.xor.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.subri">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_andir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.andir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_orir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.orir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_andp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.andp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_orp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.orp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_xorp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.xorp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_notp :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.notp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtw :
+Hexagon_di_si_Intrinsic<"HEXAGON.A2.sxtw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_sat :
+Hexagon_si_di_Intrinsic<"HEXAGON.A2.sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sath :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.sath">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satuh :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satub :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satb :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.satb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddubs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vadduhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavghs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svavghs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svnavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svadduhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubuhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A2.svsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vraddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vraddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vraddub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vraddub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vradduh :
+Hexagon_si_didi_Intrinsic<"HEXAGON.M2.vradduh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsububs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsububs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubuhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubws :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vsubws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsh :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabshsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabshsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsw :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabsw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabswsat :
+Hexagon_di_di_Intrinsic<"HEXAGON.A2.vabswsat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffw">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.M2.vabsdiffh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vrsadub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vrsadub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.A2.vrsadub.acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgubr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavgubr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavguhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vnavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxub :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A2.vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsl.r.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsl.r.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsl.r.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.r.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.r.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.lsr.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asr.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.lsr.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.asl.i.r.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asr.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.lsr.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.asl.i.p.or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asl.i.r.sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.asr.i.r.rnd.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_addasl_rrri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.addasl.rrri">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_valignib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.valignib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_valignrb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.valignrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vspliceib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON.S2.vspliceib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_vsplicerb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON.S2.vsplicerb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsplatrh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.vsplatrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_insert :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.insert">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxb.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxh.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxw.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON.S2.tableidxd.goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_extractu :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S2.extractu">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S2_insertp :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON.S2.insertp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
+//
+def int_hexagon_S2_extractup :
+Hexagon_di_disisi_Intrinsic<"HEXAGON.S2.extractup">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
+//
+def int_hexagon_S2_insert_rp :
+Hexagon_si_sisidi_Intrinsic<"HEXAGON.S2.insert.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S2_extractu_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON.S2.extractu.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_S2_insertp_rp :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.S2.insertp.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_extractup_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.extractup.rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_i :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_r :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.S2.tstbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.setbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.togglebit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.S2.clrbit.r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.i.svw.trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON.S2.asr.r.svw.trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.i.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asr.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.asl.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsr.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON.S2.lsl.r.vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vrndpackwhs">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathub :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.svsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunohb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunohb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunewh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunewh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunowh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.vtrunowh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunehb :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vtrunehb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vsxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON.S2.vzxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.vsatwuh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
+//
+def int_hexagon_S2_packhl :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.S2.packhl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
+//
+def int_hexagon_A2_swiz :
+Hexagon_si_si_Intrinsic<"HEXAGON.A2.swiz">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathub.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsathb.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwh.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.vsatwuh.nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffob :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffob">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeb :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffoh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffoh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeh :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.shuffeh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
+//
+def int_hexagon_S2_parityp :
+Hexagon_si_didi_Intrinsic<"HEXAGON.S2.parityp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_lfsp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S2.lfsp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clbnorm :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.clbnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clb :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.clb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl0 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl1 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.cl1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
+//
+def int_hexagon_S2_clbp :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.clbp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl0p :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl1p :
+Hexagon_si_di_Intrinsic<"HEXAGON.S2.cl1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
+//
+def int_hexagon_S2_brev :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.brev">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct0 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct1 :
+Hexagon_si_si_Intrinsic<"HEXAGON.S2.ct1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_interleave :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.interleave">;
+//
+// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_deinterleave :
+Hexagon_di_di_Intrinsic<"HEXAGON.S2.deinterleave">;
+
+//
+// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_SI_to_SXTHI_asrh :
+Hexagon_si_si_Intrinsic<"SI.to.SXTHI.asrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.orn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.andn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A4.ornp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.A4.andnp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
+//
+def int_hexagon_A4_combineir :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineir">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_sisi,2)
+//
+def int_hexagon_A4_combineri :
+Hexagon_di_sisi_Intrinsic<"HEXAGON.A4.combineri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneq :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneqi :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplte :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplte">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpltei :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmpltei">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteu :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteu">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteui :
+Hexagon_qi_sisi_Intrinsic<"HEXAGON.C4.cmplteui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.rcmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9 :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_qi_qiqi_Intrinsic<"HEXAGON.C4.fastcorner9_not">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.and_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_andn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_and :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_orn :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_or :
+Hexagon_qi_qiqiqi_Intrinsic<"HEXAGON.C4.or_or">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.addaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.subaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andnp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S4.andnp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ornp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON.S4.ornp">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_xor_xacc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON.M4.xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.and_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.xor_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.M4.or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andix :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andix">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_andi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_ori :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON.S4.or_ori">;
+//
+// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_modwrapu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.modwrapu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.cround_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_ri_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON.A4.round_rr_sat">;
diff --git a/contrib/llvm/include/llvm/IntrinsicsX86.td b/contrib/llvm/include/llvm/IntrinsicsX86.td
index d445a01..a6fda4a 100644
--- a/contrib/llvm/include/llvm/IntrinsicsX86.td
+++ b/contrib/llvm/include/llvm/IntrinsicsX86.td
@@ -145,10 +145,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Comparison ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse_cmp_ss :
+ def int_x86_sse_cmp_ss : GCCBuiltin<"__builtin_ia32_cmpss">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_sse_cmp_ps :
+ def int_x86_sse_cmp_ps : GCCBuiltin<"__builtin_ia32_cmpps">,
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">,
@@ -281,10 +281,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// FP comparison ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse2_cmp_sd :
+ def int_x86_sse2_cmp_sd : GCCBuiltin<"__builtin_ia32_cmpsd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_sse2_cmp_pd :
+ def int_x86_sse2_cmp_pd : GCCBuiltin<"__builtin_ia32_cmppd">,
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">,
@@ -452,28 +452,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_i32_ty], [IntrNoMem]>;
}
-// Integer comparison ops
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse2_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem]>;
- def int_x86_sse2_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem]>;
- def int_x86_sse2_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty], [IntrNoMem]>;
- def int_x86_sse2_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty], [IntrNoMem]>;
- def int_x86_sse2_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem]>;
- def int_x86_sse2_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty], [IntrNoMem]>;
-}
-
// Conversion ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_cvtdq2pd : GCCBuiltin<"__builtin_ia32_cvtdq2pd">,
@@ -627,8 +605,8 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
@@ -655,8 +633,8 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_x86mmx_ty], [IntrNoMem]>;
def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
- llvm_v8i16_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
}
// Packed multiply high with round and scale
@@ -792,12 +770,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Vector compare, min, max
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_sse41_pcmpeqq : GCCBuiltin<"__builtin_ia32_pcmpeqq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem, Commutative]>;
- def int_x86_sse42_pcmpgtq : GCCBuiltin<"__builtin_ia32_pcmpgtq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
- [IntrNoMem]>;
def int_x86_sse41_pmaxsb : GCCBuiltin<"__builtin_ia32_pmaxsb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem, Commutative]>;
@@ -919,7 +891,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Vector sum of absolute differences
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw128">,
- Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i32_ty],
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i32_ty],
[IntrNoMem, Commutative]>;
}
@@ -932,13 +904,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Test instruction with bitwise comparison.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_ptestz : GCCBuiltin<"__builtin_ia32_ptestz128">,
- Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
def int_x86_sse41_ptestc : GCCBuiltin<"__builtin_ia32_ptestc128">,
- Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
def int_x86_sse41_ptestnzc : GCCBuiltin<"__builtin_ia32_ptestnzc128">,
- Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
}
@@ -1120,17 +1092,17 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx_vpermil_pd : GCCBuiltin<"__builtin_ia32_vpermilpd">,
+ def int_x86_avx_vpermil_pd :
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx_vpermil_ps : GCCBuiltin<"__builtin_ia32_vpermilps">,
+ def int_x86_avx_vpermil_ps :
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx_vpermil_pd_256 : GCCBuiltin<"__builtin_ia32_vpermilpd256">,
+ def int_x86_avx_vpermil_pd_256 :
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx_vpermil_ps_256 : GCCBuiltin<"__builtin_ia32_vpermilps256">,
+ def int_x86_avx_vpermil_ps_256 :
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
llvm_i8_ty], [IntrNoMem]>;
}
@@ -1281,13 +1253,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Vector load with broadcast
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx_vbroadcastss :
+ def int_x86_avx_vbroadcast_ss :
GCCBuiltin<"__builtin_ia32_vbroadcastss">,
Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
def int_x86_avx_vbroadcast_sd_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastsd256">,
Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
- def int_x86_avx_vbroadcastss_256 :
+ def int_x86_avx_vbroadcast_ss_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastss256">,
Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
def int_x86_avx_vbroadcastf128_pd_256 :
@@ -1300,12 +1272,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// SIMD load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_avx_loadu_pd_256 : GCCBuiltin<"__builtin_ia32_loadupd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
- def int_x86_avx_loadu_ps_256 : GCCBuiltin<"__builtin_ia32_loadups256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
- def int_x86_avx_loadu_dq_256 : GCCBuiltin<"__builtin_ia32_loaddqu256">,
- Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
}
@@ -1361,6 +1327,1046 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
//===----------------------------------------------------------------------===//
+// AVX2
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmul_dq : GCCBuiltin<"__builtin_ia32_pmuldq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Vector min, max
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxu_w : GCCBuiltin<"__builtin_ia32_pmaxuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxu_d : GCCBuiltin<"__builtin_ia32_pmaxud256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_b : GCCBuiltin<"__builtin_ia32_pmaxsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_d : GCCBuiltin<"__builtin_ia32_pmaxsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_b : GCCBuiltin<"__builtin_ia32_pminub256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_w : GCCBuiltin<"__builtin_ia32_pminuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_d : GCCBuiltin<"__builtin_ia32_pminud256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_b : GCCBuiltin<"__builtin_ia32_pminsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_d : GCCBuiltin<"__builtin_ia32_pminsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psll_w : GCCBuiltin<"__builtin_ia32_psllw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psll_d : GCCBuiltin<"__builtin_ia32_pslld256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psll_q : GCCBuiltin<"__builtin_ia32_psllq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx2_psra_w : GCCBuiltin<"__builtin_ia32_psraw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psra_d : GCCBuiltin<"__builtin_ia32_psrad256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx2_psll_dq : GCCBuiltin<"__builtin_ia32_pslldqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_dq : GCCBuiltin<"__builtin_ia32_psrldqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psll_dq_bs : GCCBuiltin<"__builtin_ia32_pslldqi256_byteshift">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_dq_bs : GCCBuiltin<"__builtin_ia32_psrldqi256_byteshift">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_packsswb : GCCBuiltin<"__builtin_ia32_packsswb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_packssdw : GCCBuiltin<"__builtin_ia32_packssdw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_packuswb : GCCBuiltin<"__builtin_ia32_packuswb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_packusdw : GCCBuiltin<"__builtin_ia32_packusdw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Absolute value ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Horizontal arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+}
+
+// Sign ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psign_b : GCCBuiltin<"__builtin_ia32_psignb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psign_w : GCCBuiltin<"__builtin_ia32_psignw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psign_d : GCCBuiltin<"__builtin_ia32_psignd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Packed multiply high with round and scale
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+}
+
+// Vector sign and zero extend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmovsxbd : GCCBuiltin<"__builtin_ia32_pmovsxbd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxbq : GCCBuiltin<"__builtin_ia32_pmovsxbq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxbw : GCCBuiltin<"__builtin_ia32_pmovsxbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxdq : GCCBuiltin<"__builtin_ia32_pmovsxdq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxwd : GCCBuiltin<"__builtin_ia32_pmovsxwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxwq : GCCBuiltin<"__builtin_ia32_pmovsxwq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbw : GCCBuiltin<"__builtin_ia32_pmovzxbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pblendvb : GCCBuiltin<"__builtin_ia32_pblendvb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pblendw : GCCBuiltin<"__builtin_ia32_pblendw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pblendd_128 : GCCBuiltin<"__builtin_ia32_pblendd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pblendd_256 : GCCBuiltin<"__builtin_ia32_pblendd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_vbroadcast_ss_ps :
+ GCCBuiltin<"__builtin_ia32_vbroadcastss_ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx2_vbroadcast_sd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastsd_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx2_vbroadcast_ss_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastss_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx2_vbroadcasti128 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastsi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx2_pbroadcastb_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastb_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastw_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastw_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastd_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastd_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastq_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx2_pbroadcastq_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+}
+
+// Vector permutation
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_permd : GCCBuiltin<"__builtin_ia32_permvarsi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_permq : GCCBuiltin<"__builtin_ia32_permdi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_permps : GCCBuiltin<"__builtin_ia32_permvarsf256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_permpd : GCCBuiltin<"__builtin_ia32_permdf256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_vperm2i128 : GCCBuiltin<"__builtin_ia32_permti256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector extract and insert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_vextracti128 : GCCBuiltin<"__builtin_ia32_extract128i256">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_vinserti128 : GCCBuiltin<"__builtin_ia32_insert128i256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty], [IntrReadMem]>;
+ def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty], [IntrReadMem]>;
+ def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty], [IntrReadMem]>;
+ def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadMem]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+ def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
+ def int_x86_avx2_maskstore_d_256 :
+ GCCBuiltin<"__builtin_ia32_maskstored256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty], []>;
+ def int_x86_avx2_maskstore_q_256 :
+ GCCBuiltin<"__builtin_ia32_maskstoreq256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty], []>;
+}
+
+// Variable bit shift ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psllv_d : GCCBuiltin<"__builtin_ia32_psllv4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_d_256 : GCCBuiltin<"__builtin_ia32_psllv8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_q : GCCBuiltin<"__builtin_ia32_psllv2di">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_q_256 : GCCBuiltin<"__builtin_ia32_psllv4di">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx2_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_d_256 : GCCBuiltin<"__builtin_ia32_psrlv8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_q : GCCBuiltin<"__builtin_ia32_psrlv2di">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_q_256 : GCCBuiltin<"__builtin_ia32_psrlv4di">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx2_psrav_d : GCCBuiltin<"__builtin_ia32_psrav4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrav_d_256 : GCCBuiltin<"__builtin_ia32_psrav8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FMA4
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_fma4_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_ss : GCCBuiltin<"__builtin_ia32_vfmsubss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_sd : GCCBuiltin<"__builtin_ia32_vfmsubsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_ps : GCCBuiltin<"__builtin_ia32_vfmsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_pd : GCCBuiltin<"__builtin_ia32_vfmsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfmsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfmsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_ss : GCCBuiltin<"__builtin_ia32_vfnmaddss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_sd : GCCBuiltin<"__builtin_ia32_vfnmaddsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_ps : GCCBuiltin<"__builtin_ia32_vfnmaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_pd : GCCBuiltin<"__builtin_ia32_vfnmaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_ss : GCCBuiltin<"__builtin_ia32_vfnmsubss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_sd : GCCBuiltin<"__builtin_ia32_vfnmsubsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_ps : GCCBuiltin<"__builtin_ia32_vfnmsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_pd : GCCBuiltin<"__builtin_ia32_vfnmsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfnmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsubadd_ps : GCCBuiltin<"__builtin_ia32_vfmsubaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsubadd_pd : GCCBuiltin<"__builtin_ia32_vfmsubaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsubadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma4_vfmsubadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// XOP
+
+ def int_x86_xop_vpermil2pd : GCCBuiltin<"__builtin_ia32_vpermil2pd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpermil2pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermil2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpermil2ps : GCCBuiltin<"__builtin_ia32_vpermil2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpermil2ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermil2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vfrcz_pd :
+ GCCBuiltin<"__builtin_ia32_vfrczpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ps :
+ GCCBuiltin<"__builtin_ia32_vfrczps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_sd :
+ GCCBuiltin<"__builtin_ia32_vfrczsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ss :
+ GCCBuiltin<"__builtin_ia32_vfrczss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vfrcz_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfrczpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfrczps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcmov :
+ GCCBuiltin<"__builtin_ia32_vpcmov">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcmov_256 :
+ GCCBuiltin<"__builtin_ia32_vpcmov_256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomeqb :
+ GCCBuiltin<"__builtin_ia32_vpcomeqb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomeqw :
+ GCCBuiltin<"__builtin_ia32_vpcomeqw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomeqd :
+ GCCBuiltin<"__builtin_ia32_vpcomeqd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomeqq :
+ GCCBuiltin<"__builtin_ia32_vpcomeqq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomequb :
+ GCCBuiltin<"__builtin_ia32_vpcomequb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomequd :
+ GCCBuiltin<"__builtin_ia32_vpcomequd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomequq :
+ GCCBuiltin<"__builtin_ia32_vpcomequq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomequw :
+ GCCBuiltin<"__builtin_ia32_vpcomequw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseb :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalsed :
+ GCCBuiltin<"__builtin_ia32_vpcomfalsed">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseq :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseub :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseud :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseuq :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalseuw :
+ GCCBuiltin<"__builtin_ia32_vpcomfalseuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomfalsew :
+ GCCBuiltin<"__builtin_ia32_vpcomfalsew">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeb :
+ GCCBuiltin<"__builtin_ia32_vpcomgeb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomged :
+ GCCBuiltin<"__builtin_ia32_vpcomged">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeq :
+ GCCBuiltin<"__builtin_ia32_vpcomgeq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeub :
+ GCCBuiltin<"__builtin_ia32_vpcomgeub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeud :
+ GCCBuiltin<"__builtin_ia32_vpcomgeud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeuq :
+ GCCBuiltin<"__builtin_ia32_vpcomgeuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgeuw :
+ GCCBuiltin<"__builtin_ia32_vpcomgeuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgew :
+ GCCBuiltin<"__builtin_ia32_vpcomgew">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtb :
+ GCCBuiltin<"__builtin_ia32_vpcomgtb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtd :
+ GCCBuiltin<"__builtin_ia32_vpcomgtd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtq :
+ GCCBuiltin<"__builtin_ia32_vpcomgtq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtub :
+ GCCBuiltin<"__builtin_ia32_vpcomgtub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtud :
+ GCCBuiltin<"__builtin_ia32_vpcomgtud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtuq :
+ GCCBuiltin<"__builtin_ia32_vpcomgtuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtuw :
+ GCCBuiltin<"__builtin_ia32_vpcomgtuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomgtw :
+ GCCBuiltin<"__builtin_ia32_vpcomgtw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleb :
+ GCCBuiltin<"__builtin_ia32_vpcomleb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomled :
+ GCCBuiltin<"__builtin_ia32_vpcomled">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleq :
+ GCCBuiltin<"__builtin_ia32_vpcomleq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleub :
+ GCCBuiltin<"__builtin_ia32_vpcomleub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleud :
+ GCCBuiltin<"__builtin_ia32_vpcomleud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleuq :
+ GCCBuiltin<"__builtin_ia32_vpcomleuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomleuw :
+ GCCBuiltin<"__builtin_ia32_vpcomleuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomlew :
+ GCCBuiltin<"__builtin_ia32_vpcomlew">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltb :
+ GCCBuiltin<"__builtin_ia32_vpcomltb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltd :
+ GCCBuiltin<"__builtin_ia32_vpcomltd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltq :
+ GCCBuiltin<"__builtin_ia32_vpcomltq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltub :
+ GCCBuiltin<"__builtin_ia32_vpcomltub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltud :
+ GCCBuiltin<"__builtin_ia32_vpcomltud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltuq :
+ GCCBuiltin<"__builtin_ia32_vpcomltuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltuw :
+ GCCBuiltin<"__builtin_ia32_vpcomltuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomltw :
+ GCCBuiltin<"__builtin_ia32_vpcomltw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneb :
+ GCCBuiltin<"__builtin_ia32_vpcomneb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomned :
+ GCCBuiltin<"__builtin_ia32_vpcomned">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneq :
+ GCCBuiltin<"__builtin_ia32_vpcomneq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneub :
+ GCCBuiltin<"__builtin_ia32_vpcomneub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneud :
+ GCCBuiltin<"__builtin_ia32_vpcomneud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneuq :
+ GCCBuiltin<"__builtin_ia32_vpcomneuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomneuw :
+ GCCBuiltin<"__builtin_ia32_vpcomneuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomnew :
+ GCCBuiltin<"__builtin_ia32_vpcomnew">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueb :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrued :
+ GCCBuiltin<"__builtin_ia32_vpcomtrued">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueq :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueub :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueud :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueuq :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtrueuw :
+ GCCBuiltin<"__builtin_ia32_vpcomtrueuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcomtruew :
+ GCCBuiltin<"__builtin_ia32_vpcomtruew">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vphaddbd :
+ GCCBuiltin<"__builtin_ia32_vphaddbd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddbq :
+ GCCBuiltin<"__builtin_ia32_vphaddbq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddbw :
+ GCCBuiltin<"__builtin_ia32_vphaddbw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadddq :
+ GCCBuiltin<"__builtin_ia32_vphadddq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubd :
+ GCCBuiltin<"__builtin_ia32_vphaddubd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubq :
+ GCCBuiltin<"__builtin_ia32_vphaddubq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubw :
+ GCCBuiltin<"__builtin_ia32_vphaddubw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddudq :
+ GCCBuiltin<"__builtin_ia32_vphaddudq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadduwd :
+ GCCBuiltin<"__builtin_ia32_vphadduwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadduwq :
+ GCCBuiltin<"__builtin_ia32_vphadduwq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddwd :
+ GCCBuiltin<"__builtin_ia32_vphaddwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddwq :
+ GCCBuiltin<"__builtin_ia32_vphaddwq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubbw :
+ GCCBuiltin<"__builtin_ia32_vphsubbw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubdq :
+ GCCBuiltin<"__builtin_ia32_vphsubdq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubwd :
+ GCCBuiltin<"__builtin_ia32_vphsubwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vpmacsdd :
+ GCCBuiltin<"__builtin_ia32_vpmacsdd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsdqh :
+ GCCBuiltin<"__builtin_ia32_vpmacsdqh">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsdql :
+ GCCBuiltin<"__builtin_ia32_vpmacsdql">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdd :
+ GCCBuiltin<"__builtin_ia32_vpmacssdd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdqh :
+ GCCBuiltin<"__builtin_ia32_vpmacssdqh">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdql :
+ GCCBuiltin<"__builtin_ia32_vpmacssdql">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsswd :
+ GCCBuiltin<"__builtin_ia32_vpmacsswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssww :
+ GCCBuiltin<"__builtin_ia32_vpmacssww">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacswd :
+ GCCBuiltin<"__builtin_ia32_vpmacswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsww :
+ GCCBuiltin<"__builtin_ia32_vpmacsww">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmadcsswd :
+ GCCBuiltin<"__builtin_ia32_vpmadcsswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmadcswd :
+ GCCBuiltin<"__builtin_ia32_vpmadcswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpperm :
+ GCCBuiltin<"__builtin_ia32_vpperm">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotb :
+ GCCBuiltin<"__builtin_ia32_vprotb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotd :
+ GCCBuiltin<"__builtin_ia32_vprotd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotq :
+ GCCBuiltin<"__builtin_ia32_vprotq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotw :
+ GCCBuiltin<"__builtin_ia32_vprotw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshab :
+ GCCBuiltin<"__builtin_ia32_vpshab">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshad :
+ GCCBuiltin<"__builtin_ia32_vpshad">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshaq :
+ GCCBuiltin<"__builtin_ia32_vpshaq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshaw :
+ GCCBuiltin<"__builtin_ia32_vpshaw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlb :
+ GCCBuiltin<"__builtin_ia32_vpshlb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshld :
+ GCCBuiltin<"__builtin_ia32_vpshld">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlq :
+ GCCBuiltin<"__builtin_ia32_vpshlq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlw :
+ GCCBuiltin<"__builtin_ia32_vpshlw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
// MMX
// Empty MMX state op.
@@ -1587,13 +2593,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
- llvm_x86mmx_ty], [IntrNoMem]>;
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
- llvm_x86mmx_ty], [IntrNoMem]>;
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
- llvm_x86mmx_ty], [IntrNoMem]>;
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">,
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
@@ -1629,3 +2635,63 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
}
+
+//===----------------------------------------------------------------------===//
+// BMI
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_bmi_bextr_32 : GCCBuiltin<"__builtin_ia32_bextr_u32">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_bextr_64 : GCCBuiltin<"__builtin_ia32_bextr_u64">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_bzhi_32 : GCCBuiltin<"__builtin_ia32_bzhi_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_bzhi_64 : GCCBuiltin<"__builtin_ia32_bzhi_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_pdep_32 : GCCBuiltin<"__builtin_ia32_pdep_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_pdep_64 : GCCBuiltin<"__builtin_ia32_pdep_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_pext_32 : GCCBuiltin<"__builtin_ia32_pext_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_pext_64 : GCCBuiltin<"__builtin_ia32_pext_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FS/GS Base
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_rdfsbase_32 : GCCBuiltin<"__builtin_ia32_rdfsbase32">,
+ Intrinsic<[llvm_i32_ty], []>;
+ def int_x86_rdgsbase_32 : GCCBuiltin<"__builtin_ia32_rdgsbase32">,
+ Intrinsic<[llvm_i32_ty], []>;
+ def int_x86_rdfsbase_64 : GCCBuiltin<"__builtin_ia32_rdfsbase64">,
+ Intrinsic<[llvm_i64_ty], []>;
+ def int_x86_rdgsbase_64 : GCCBuiltin<"__builtin_ia32_rdgsbase64">,
+ Intrinsic<[llvm_i64_ty], []>;
+ def int_x86_wrfsbase_32 : GCCBuiltin<"__builtin_ia32_wrfsbase32">,
+ Intrinsic<[], [llvm_i32_ty]>;
+ def int_x86_wrgsbase_32 : GCCBuiltin<"__builtin_ia32_wrgsbase32">,
+ Intrinsic<[], [llvm_i32_ty]>;
+ def int_x86_wrfsbase_64 : GCCBuiltin<"__builtin_ia32_wrfsbase64">,
+ Intrinsic<[], [llvm_i64_ty]>;
+ def int_x86_wrgsbase_64 : GCCBuiltin<"__builtin_ia32_wrgsbase64">,
+ Intrinsic<[], [llvm_i64_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Half float conversion
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/LLVMContext.h b/contrib/llvm/include/llvm/LLVMContext.h
index 65146c3..18adcd1 100644
--- a/contrib/llvm/include/llvm/LLVMContext.h
+++ b/contrib/llvm/include/llvm/LLVMContext.h
@@ -19,6 +19,7 @@ namespace llvm {
class LLVMContextImpl;
class StringRef;
+class Twine;
class Instruction;
class Module;
class SMDiagnostic;
@@ -40,7 +41,9 @@ public:
enum {
MD_dbg = 0, // "dbg"
MD_tbaa = 1, // "tbaa"
- MD_prof = 2 // "prof"
+ MD_prof = 2, // "prof"
+ MD_fpaccuracy = 3, // "fpaccuracy"
+ MD_range = 4 // "range"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
@@ -79,9 +82,9 @@ public:
/// be prepared to drop the erroneous construct on the floor and "not crash".
/// The generated code need not be correct. The error message will be
/// implicitly prefixed with "error: " and should not end with a ".".
- void emitError(unsigned LocCookie, StringRef ErrorStr);
- void emitError(const Instruction *I, StringRef ErrorStr);
- void emitError(StringRef ErrorStr);
+ void emitError(unsigned LocCookie, const Twine &ErrorStr);
+ void emitError(const Instruction *I, const Twine &ErrorStr);
+ void emitError(const Twine &ErrorStr);
private:
// DO NOT IMPLEMENT
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index f690d04..2258d45 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -31,6 +31,7 @@
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Vectorize.h"
#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
#include <cstdlib>
@@ -69,7 +70,7 @@ namespace {
(void) llvm::createEdgeProfilerPass();
(void) llvm::createOptimalEdgeProfilerPass();
(void) llvm::createPathProfilerPass();
- (void) llvm::createGCOVProfilerPass(true, true, false);
+ (void) llvm::createGCOVProfilerPass();
(void) llvm::createFunctionInliningPass();
(void) llvm::createAlwaysInlinerPass();
(void) llvm::createGlobalDCEPass();
@@ -97,6 +98,7 @@ namespace {
(void) llvm::createNoAAPass();
(void) llvm::createNoProfileInfoPass();
(void) llvm::createObjCARCAliasAnalysisPass();
+ (void) llvm::createObjCARCAPElimPass();
(void) llvm::createObjCARCExpandPass();
(void) llvm::createObjCARCContractPass();
(void) llvm::createObjCARCOptPass();
@@ -150,6 +152,7 @@ namespace {
(void) llvm::createCorrelatedValuePropagationPass();
(void) llvm::createMemDepPrinter();
(void) llvm::createInstructionSimplifierPass();
+ (void) llvm::createBBVectorizePass();
(void)new llvm::IntervalPartition();
(void)new llvm::FindUsedTypes();
diff --git a/contrib/llvm/include/llvm/Linker.h b/contrib/llvm/include/llvm/Linker.h
index 88908fb..1ebcd6b 100644
--- a/contrib/llvm/include/llvm/Linker.h
+++ b/contrib/llvm/include/llvm/Linker.h
@@ -15,14 +15,15 @@
#define LLVM_LINKER_H
#include <memory>
+#include <string>
#include <vector>
-#include "llvm/ADT/StringRef.h"
namespace llvm {
namespace sys { class Path; }
class Module;
class LLVMContext;
+class StringRef;
/// This class provides the core functionality of linking in LLVM. It retains a
/// Module object which is the composite of the modules and libraries linked
diff --git a/contrib/llvm/include/llvm/MC/MCAsmBackend.h b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
index 4a0cf37..05e6286 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
@@ -12,17 +12,20 @@
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCFixup.h"
-#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
+class MCAsmLayout;
+class MCAssembler;
class MCELFObjectTargetWriter;
-class MCFixup;
+struct MCFixupKindInfo;
+class MCFragment;
class MCInst;
+class MCInstFragment;
class MCObjectWriter;
class MCSection;
-template<typename T>
-class SmallVectorImpl;
+class MCValue;
class raw_ostream;
/// MCAsmBackend - Generic interface to target specific assembler backends.
@@ -44,8 +47,8 @@ public:
/// createELFObjectTargetWriter - Create a new ELFObjectTargetWriter to enable
/// non-standard ELFObjectWriters.
virtual MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
- assert(0 && "createELFObjectTargetWriter is not supported by asm backend");
- return 0;
+ llvm_unreachable("createELFObjectTargetWriter is not supported by asm "
+ "backend");
}
/// hasReliableSymbolDifference - Check whether this target implements
@@ -85,12 +88,21 @@ public:
/// getFixupKindInfo - Get information on a fixup kind.
virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
+ /// processFixupValue - Target hook to adjust the literal value of a fixup
+ /// if necessary. IsResolved signals whether the caller believes a relocation
+ /// is needed; the target can modify the value. The default does nothing.
+ virtual void processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {}
+
/// @}
- /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// applyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
- virtual void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const = 0;
/// @}
@@ -98,11 +110,18 @@ public:
/// @name Target Relaxation Interfaces
/// @{
- /// MayNeedRelaxation - Check whether the given instruction may need
+ /// mayNeedRelaxation - Check whether the given instruction may need
/// relaxation.
///
/// \param Inst - The instruction to test.
- virtual bool MayNeedRelaxation(const MCInst &Inst) const = 0;
+ virtual bool mayNeedRelaxation(const MCInst &Inst) const = 0;
+
+ /// fixupNeedsRelaxation - Target specific predicate for whether a given
+ /// fixup requires the associated instruction to be relaxed.
+ virtual bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const = 0;
/// RelaxInstruction - Relax the instruction in the given fragment to the next
/// wider instruction.
@@ -110,20 +129,20 @@ public:
/// \param Inst - The instruction to relax, which may be the same as the
/// output.
/// \parm Res [output] - On return, the relaxed instruction.
- virtual void RelaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
+ virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
/// @}
- /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
+ /// writeNopData - Write an (optimal) nop sequence of Count bytes to the given
/// output. If the target cannot generate such a sequence, it should return an
/// error.
///
/// \return - True on success.
- virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
+ virtual bool writeNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
- /// HandleAssemblerFlag - Handle any target-specific assembler flags.
+ /// handleAssemblerFlag - Handle any target-specific assembler flags.
/// By default, do nothing.
- virtual void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
+ virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
index c3c296e..0f67c99 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -36,10 +36,6 @@ namespace llvm {
enum LCOMMType { None, NoAlignment, ByteAlignment };
}
- namespace Structors {
- enum OutputOrder { None, PriorityOrder, ReversePriorityOrder };
- }
-
/// MCAsmInfo - This class is intended to be used as a base class for asm
/// properties and features specific to the target.
class MCAsmInfo {
@@ -47,7 +43,7 @@ namespace llvm {
//===------------------------------------------------------------------===//
// Properties to be set by the target writer, used to configure asm printer.
//
-
+
/// PointerSize - Pointer size in bytes.
/// Default is 4.
unsigned PointerSize;
@@ -72,11 +68,6 @@ namespace llvm {
/// the macho-specific .tbss directive for emitting thread local BSS Symbols
bool HasMachoTBSSDirective; // Default is false.
- /// StructorOutputOrder - Whether the static ctor/dtor list should be output
- /// in no particular order, in order of increasing priority or the reverse:
- /// in order of decreasing priority (the default).
- Structors::OutputOrder StructorOutputOrder; // Default is reverse order.
-
/// HasStaticCtorDtorReferenceInStaticMode - True if the compiler should
/// emit a ".reference .constructors_used" or ".reference .destructors_used"
/// directive after the a static ctor/dtor list. This directive is only
@@ -152,6 +143,10 @@ namespace llvm {
/// symbol names. This defaults to true.
bool AllowPeriodsInName;
+ /// AllowUTF8 - This is true if the assembler accepts UTF-8 input.
+ // FIXME: Make this a more general encoding setting?
+ bool AllowUTF8;
+
//===--- Data Emission Directives -------------------------------------===//
/// ZeroDirective - this should be set to the directive used to get some
@@ -189,6 +184,11 @@ namespace llvm {
const char *JT32Begin; // Defaults to "$a."
bool SupportsDataRegions;
+ /// GPRel64Directive - if non-null, a directive that is used to emit a word
+ /// which should be relocated as a 64-bit GP-relative offset, e.g. .gpdword
+ /// on Mips.
+ const char *GPRel64Directive; // Defaults to NULL.
+
/// GPRel32Directive - if non-null, a directive that is used to emit a word
/// which should be relocated as a 32-bit GP-relative offset, e.g. .gpword
/// on Mips or .gprel32 on Alpha.
@@ -323,13 +323,17 @@ namespace llvm {
const char* DwarfSectionOffsetDirective; // Defaults to NULL
/// DwarfRequiresRelocationForSectionOffset - True if we need to produce a
- // relocation when we want a section offset in dwarf.
+ /// relocation when we want a section offset in dwarf.
bool DwarfRequiresRelocationForSectionOffset; // Defaults to true;
- // DwarfUsesLabelOffsetDifference - True if Dwarf2 output can
- // use EmitLabelOffsetDifference.
+ /// DwarfUsesLabelOffsetDifference - True if Dwarf2 output can
+ /// use EmitLabelOffsetDifference.
bool DwarfUsesLabelOffsetForRanges;
+ /// DwarfUsesRelocationsForStringPool - True if this Dwarf output must use
+ /// relocations to refer to entries in the string pool.
+ bool DwarfUsesRelocationsForStringPool;
+
/// DwarfRegNumForCFI - True if dwarf register numbers are printed
/// instead of symbolic register names in .cfi_* directives.
bool DwarfRegNumForCFI; // Defaults to false;
@@ -381,6 +385,7 @@ namespace llvm {
const char *getData64bitsDirective(unsigned AS = 0) const {
return AS == 0 ? Data64bitsDirective : getDataASDirective(64, AS);
}
+ const char *getGPRel64Directive() const { return GPRel64Directive; }
const char *getGPRel32Directive() const { return GPRel32Directive; }
/// [Code|Data]Begin label name accessors.
@@ -424,9 +429,6 @@ namespace llvm {
//
bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
- Structors::OutputOrder getStructorOutputOrder() const {
- return StructorOutputOrder;
- }
bool hasStaticCtorDtorReferenceInStaticMode() const {
return HasStaticCtorDtorReferenceInStaticMode;
}
@@ -487,6 +489,9 @@ namespace llvm {
bool doesAllowPeriodsInName() const {
return AllowPeriodsInName;
}
+ bool doesAllowUTF8() const {
+ return AllowUTF8;
+ }
const char *getZeroDirective() const {
return ZeroDirective;
}
@@ -554,7 +559,7 @@ namespace llvm {
ExceptionsType == ExceptionHandling::ARM ||
ExceptionsType == ExceptionHandling::Win64);
}
- bool doesDwarfUsesInlineInfoSection() const {
+ bool doesDwarfUseInlineInfoSection() const {
return DwarfUsesInlineInfoSection;
}
const char *getDwarfSectionOffsetDirective() const {
@@ -563,9 +568,12 @@ namespace llvm {
bool doesDwarfRequireRelocationForSectionOffset() const {
return DwarfRequiresRelocationForSectionOffset;
}
- bool doesDwarfUsesLabelOffsetForRanges() const {
+ bool doesDwarfUseLabelOffsetForRanges() const {
return DwarfUsesLabelOffsetForRanges;
}
+ bool doesDwarfUseRelocationsForStringPool() const {
+ return DwarfUsesRelocationsForStringPool;
+ }
bool useDwarfRegNumForCFI() const {
return DwarfRegNumForCFI;
}
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h b/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h
index a3ee159..0ff3e12 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h
@@ -14,9 +14,21 @@
namespace llvm {
class MCAsmInfoCOFF : public MCAsmInfo {
+ virtual void anchor();
protected:
explicit MCAsmInfoCOFF();
-
+ };
+
+ class MCAsmInfoMicrosoft : public MCAsmInfoCOFF {
+ virtual void anchor();
+ protected:
+ explicit MCAsmInfoMicrosoft();
+ };
+
+ class MCAsmInfoGNUCOFF : public MCAsmInfoCOFF {
+ virtual void anchor();
+ protected:
+ explicit MCAsmInfoGNUCOFF();
};
}
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h b/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h
index 1f6c499..af552de 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h
@@ -18,7 +18,9 @@
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
- struct MCAsmInfoDarwin : public MCAsmInfo {
+ class MCAsmInfoDarwin : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit MCAsmInfoDarwin();
};
}
diff --git a/contrib/llvm/include/llvm/MC/MCAsmLayout.h b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
index a4585d1..cf79216 100644
--- a/contrib/llvm/include/llvm/MC/MCAsmLayout.h
+++ b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
@@ -10,6 +10,7 @@
#ifndef LLVM_MC_MCASMLAYOUT_H
#define LLVM_MC_MCASMLAYOUT_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index b8f8cc4..d139173 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -25,7 +25,6 @@ namespace llvm {
class raw_ostream;
class MCAsmLayout;
class MCAssembler;
-class MCBinaryExpr;
class MCContext;
class MCCodeEmitter;
class MCExpr;
@@ -106,6 +105,7 @@ public:
};
class MCDataFragment : public MCFragment {
+ virtual void anchor();
SmallString<32> Contents;
/// Fixups - The list of fixups in this fragment.
@@ -160,6 +160,8 @@ public:
// object with just the MCInst and a code size, then we should just change
// MCDataFragment to have an optional MCInst at its end.
class MCInstFragment : public MCFragment {
+ virtual void anchor();
+
/// Inst - The instruction this is a fragment for.
MCInst Inst;
@@ -215,6 +217,8 @@ public:
};
class MCAlignFragment : public MCFragment {
+ virtual void anchor();
+
/// Alignment - The alignment to ensure, in bytes.
unsigned Alignment;
@@ -263,6 +267,8 @@ public:
};
class MCFillFragment : public MCFragment {
+ virtual void anchor();
+
/// Value - Value to use for filling bytes.
int64_t Value;
@@ -300,6 +306,8 @@ public:
};
class MCOrgFragment : public MCFragment {
+ virtual void anchor();
+
/// Offset - The offset this fragment should start at.
const MCExpr *Offset;
@@ -327,6 +335,8 @@ public:
};
class MCLEBFragment : public MCFragment {
+ virtual void anchor();
+
/// Value - The value this fragment should contain.
const MCExpr *Value;
@@ -358,6 +368,8 @@ public:
};
class MCDwarfLineAddrFragment : public MCFragment {
+ virtual void anchor();
+
/// LineDelta - the value of the difference between the two line numbers
/// between two .loc dwarf directives.
int64_t LineDelta;
@@ -393,6 +405,8 @@ public:
};
class MCDwarfCallFrameFragment : public MCFragment {
+ virtual void anchor();
+
/// AddrDelta - The expression for the difference of the two symbols that
/// make up the address delta between two .cfi_* dwarf directives.
const MCExpr *AddrDelta;
@@ -711,43 +725,44 @@ private:
/// \return Whether the fixup value was fully resolved. This is true if the
/// \arg Value result is fixed, otherwise the value may change due to
/// relocation.
- bool EvaluateFixup(const MCAsmLayout &Layout,
+ bool evaluateFixup(const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
MCValue &Target, uint64_t &Value) const;
/// Check whether a fixup can be satisfied, or whether it needs to be relaxed
/// (increased in size, in order to hold its value correctly).
- bool FixupNeedsRelaxation(const MCFixup &Fixup, const MCFragment *DF,
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCInstFragment *DF,
const MCAsmLayout &Layout) const;
/// Check whether the given fragment needs relaxation.
- bool FragmentNeedsRelaxation(const MCInstFragment *IF,
+ bool fragmentNeedsRelaxation(const MCInstFragment *IF,
const MCAsmLayout &Layout) const;
- /// LayoutOnce - Perform one layout iteration and return true if any offsets
+ /// layoutOnce - Perform one layout iteration and return true if any offsets
/// were adjusted.
- bool LayoutOnce(MCAsmLayout &Layout);
+ bool layoutOnce(MCAsmLayout &Layout);
- bool LayoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD);
+ bool layoutSectionOnce(MCAsmLayout &Layout, MCSectionData &SD);
- bool RelaxInstruction(MCAsmLayout &Layout, MCInstFragment &IF);
+ bool relaxInstruction(MCAsmLayout &Layout, MCInstFragment &IF);
- bool RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
+ bool relaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
- bool RelaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
- bool RelaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+ bool relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
+ bool relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
MCDwarfCallFrameFragment &DF);
- /// FinishLayout - Finalize a layout, including fragment lowering.
- void FinishLayout(MCAsmLayout &Layout);
+ /// finishLayout - Finalize a layout, including fragment lowering.
+ void finishLayout(MCAsmLayout &Layout);
- uint64_t HandleFixup(const MCAsmLayout &Layout,
+ uint64_t handleFixup(const MCAsmLayout &Layout,
MCFragment &F, const MCFixup &Fixup);
public:
/// Compute the effective fragment size assuming it is laid out at the given
/// \arg SectionAddress and \arg FragmentOffset.
- uint64_t ComputeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const;
+ uint64_t computeFragmentSize(const MCAsmLayout &Layout,
+ const MCFragment &F) const;
/// Find the symbol which defines the atom containing the given symbol, or
/// null if there is no such symbol.
@@ -760,7 +775,7 @@ public:
bool isSymbolLinkerVisible(const MCSymbol &SD) const;
/// Emit the section contents using the given object writer.
- void WriteSectionData(const MCSectionData *Section,
+ void writeSectionData(const MCSectionData *Section,
const MCAsmLayout &Layout) const;
/// Check whether a given symbol has been flagged with .thumb_func.
diff --git a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
index bc63241..934ef69 100644
--- a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
+++ b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
@@ -10,12 +10,8 @@
#ifndef LLVM_MC_MCCODEEMITTER_H
#define LLVM_MC_MCCODEEMITTER_H
-#include "llvm/MC/MCFixup.h"
-
-#include <cassert>
-
namespace llvm {
-class MCExpr;
+class MCFixup;
class MCInst;
class raw_ostream;
template<typename T> class SmallVectorImpl;
diff --git a/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h b/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h
index 1c54c47..d1765e1 100644
--- a/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h
@@ -20,7 +20,7 @@
namespace llvm {
class MCCodeGenInfo {
- /// RelocationModel - Relocation model: statcic, pic, etc.
+ /// RelocationModel - Relocation model: static, pic, etc.
///
Reloc::Model RelocationModel;
@@ -28,13 +28,20 @@ namespace llvm {
///
CodeModel::Model CMModel;
+ /// OptLevel - Optimization level.
+ ///
+ CodeGenOpt::Level OptLevel;
+
public:
void InitMCCodeGenInfo(Reloc::Model RM = Reloc::Default,
- CodeModel::Model CM = CodeModel::Default);
+ CodeModel::Model CM = CodeModel::Default,
+ CodeGenOpt::Level OL = CodeGenOpt::Default);
Reloc::Model getRelocationModel() const { return RelocationModel; }
CodeModel::Model getCodeModel() const { return CMModel; }
+
+ CodeGenOpt::Level getOptLevel() const { return OptLevel; }
};
} // namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index a49a35c..b586319 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -15,6 +15,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <vector> // FIXME: Shouldn't be needed.
@@ -29,6 +30,7 @@ namespace llvm {
class MCObjectFileInfo;
class MCRegisterInfo;
class MCLineSection;
+ class SMLoc;
class StringRef;
class Twine;
class MCSectionMachO;
@@ -43,6 +45,8 @@ namespace llvm {
public:
typedef StringMap<MCSymbol*, BumpPtrAllocator&> SymbolTable;
private:
+ /// The SourceMgr for this object, if any.
+ const SourceMgr *SrcMgr;
/// The MCAsmInfo for this target.
const MCAsmInfo &MAI;
@@ -98,6 +102,27 @@ namespace llvm {
MCDwarfLoc CurrentDwarfLoc;
bool DwarfLocSeen;
+ /// Generate dwarf debugging info for assembly source files.
+ bool GenDwarfForAssembly;
+
+ /// The current dwarf file number when generate dwarf debugging info for
+ /// assembly source files.
+ unsigned GenDwarfFileNumber;
+
+ /// The default initial text section that we generate dwarf debugging line
+ /// info for when generating dwarf assembly source files.
+ const MCSection *GenDwarfSection;
+ /// Symbols created for the start and end of this section.
+ MCSymbol *GenDwarfSectionStartSym, *GenDwarfSectionEndSym;
+
+ /// The information gathered from labels that will have dwarf label
+ /// entries when generating dwarf assembly source files.
+ std::vector<const MCGenDwarfLabelEntry *> MCGenDwarfLabelEntries;
+
+ /// The string to embed in the debug information for the compile unit, if
+ /// non-empty.
+ StringRef DwarfDebugFlags;
+
/// Honor temporary labels, this is useful for debugging semantic
/// differences between temporary and non-temporary labels (primarily on
/// Darwin).
@@ -116,9 +141,11 @@ namespace llvm {
public:
explicit MCContext(const MCAsmInfo &MAI, const MCRegisterInfo &MRI,
- const MCObjectFileInfo *MOFI);
+ const MCObjectFileInfo *MOFI, const SourceMgr *Mgr = 0);
~MCContext();
+ const SourceMgr *getSourceManager() const { return SrcMgr; }
+
const MCAsmInfo &getAsmInfo() const { return MAI; }
const MCRegisterInfo &getRegisterInfo() const { return MRI; }
@@ -204,7 +231,8 @@ namespace llvm {
/// @{
/// GetDwarfFile - creates an entry in the dwarf file and directory tables.
- unsigned GetDwarfFile(StringRef FileName, unsigned FileNumber);
+ unsigned GetDwarfFile(StringRef Directory, StringRef FileName,
+ unsigned FileNumber);
bool isValidDwarfFileNumber(unsigned FileNumber);
@@ -251,6 +279,31 @@ namespace llvm {
bool getDwarfLocSeen() { return DwarfLocSeen; }
const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }
+ bool getGenDwarfForAssembly() { return GenDwarfForAssembly; }
+ void setGenDwarfForAssembly(bool Value) { GenDwarfForAssembly = Value; }
+ unsigned getGenDwarfFileNumber() { return GenDwarfFileNumber; }
+ unsigned nextGenDwarfFileNumber() { return ++GenDwarfFileNumber; }
+ const MCSection *getGenDwarfSection() { return GenDwarfSection; }
+ void setGenDwarfSection(const MCSection *Sec) { GenDwarfSection = Sec; }
+ MCSymbol *getGenDwarfSectionStartSym() { return GenDwarfSectionStartSym; }
+ void setGenDwarfSectionStartSym(MCSymbol *Sym) {
+ GenDwarfSectionStartSym = Sym;
+ }
+ MCSymbol *getGenDwarfSectionEndSym() { return GenDwarfSectionEndSym; }
+ void setGenDwarfSectionEndSym(MCSymbol *Sym) {
+ GenDwarfSectionEndSym = Sym;
+ }
+ const std::vector<const MCGenDwarfLabelEntry *>
+ &getMCGenDwarfLabelEntries() const {
+ return MCGenDwarfLabelEntries;
+ }
+ void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry *E) {
+ MCGenDwarfLabelEntries.push_back(E);
+ }
+
+ void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; }
+ StringRef getDwarfDebugFlags() { return DwarfDebugFlags; }
+
/// @}
char *getSecureLogFile() { return SecureLogFile; }
@@ -268,6 +321,11 @@ namespace llvm {
}
void Deallocate(void *Ptr) {
}
+
+ // Unrecoverable error has occured. Display the best diagnostic we can
+ // and bail via exit(1). For now, most MC backend errors are unrecoverable.
+ // FIXME: We should really do something about that.
+ LLVM_ATTRIBUTE_NORETURN void FatalError(SMLoc L, const Twine &Msg);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCDisassembler.h b/contrib/llvm/include/llvm/MC/MCDisassembler.h
index 454277d..4b5fbec 100644
--- a/contrib/llvm/include/llvm/MC/MCDisassembler.h
+++ b/contrib/llvm/include/llvm/MC/MCDisassembler.h
@@ -90,7 +90,7 @@ public:
/// @return - An array of instruction information, with one entry for
/// each MCInst opcode this disassembler returns.
/// NULL if there is no info for this target.
- virtual EDInstInfo *getEDInfo() const { return (EDInstInfo*)0; }
+ virtual const EDInstInfo *getEDInfo() const { return (EDInstInfo*)0; }
private:
//
diff --git a/contrib/llvm/include/llvm/MC/MCDwarf.h b/contrib/llvm/include/llvm/MC/MCDwarf.h
index 431e3c4..fdb7ab2 100644
--- a/contrib/llvm/include/llvm/MC/MCDwarf.h
+++ b/contrib/llvm/include/llvm/MC/MCDwarf.h
@@ -17,20 +17,18 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MachineLocation.h"
-#include "llvm/MC/MCObjectWriter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Dwarf.h"
#include <vector>
namespace llvm {
class MCContext;
- class MCExpr;
+ class MCObjectWriter;
class MCSection;
- class MCSectionData;
class MCStreamer;
class MCSymbol;
- class MCObjectStreamer;
- class raw_ostream;
+ class SourceMgr;
+ class SMLoc;
/// MCDwarfFile - Instances of this class represent the name of the dwarf
/// .file directive and its associated dwarf file number in the MC file,
@@ -210,7 +208,7 @@ namespace llvm {
//
// This emits the Dwarf file and the line tables.
//
- static void Emit(MCStreamer *MCOS);
+ static const MCSymbol *Emit(MCStreamer *MCOS);
};
class MCDwarfLineAddr {
@@ -227,23 +225,63 @@ namespace llvm {
int64_t LineDelta, uint64_t AddrDelta);
};
+ class MCGenDwarfInfo {
+ public:
+ //
+ // When generating dwarf for assembly source files this emits the Dwarf
+ // sections.
+ //
+ static void Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol);
+ };
+
+ // When generating dwarf for assembly source files this is the info that is
+ // needed to be gathered for each symbol that will have a dwarf label.
+ class MCGenDwarfLabelEntry {
+ private:
+ // Name of the symbol without a leading underbar, if any.
+ StringRef Name;
+ // The dwarf file number this symbol is in.
+ unsigned FileNumber;
+ // The line number this symbol is at.
+ unsigned LineNumber;
+ // The low_pc for the dwarf label is taken from this symbol.
+ MCSymbol *Label;
+
+ public:
+ MCGenDwarfLabelEntry(StringRef name, unsigned fileNumber,
+ unsigned lineNumber, MCSymbol *label) :
+ Name(name), FileNumber(fileNumber), LineNumber(lineNumber), Label(label){}
+
+ StringRef getName() const { return Name; }
+ unsigned getFileNumber() const { return FileNumber; }
+ unsigned getLineNumber() const { return LineNumber; }
+ MCSymbol *getLabel() const { return Label; }
+
+ // This is called when label is created when we are generating dwarf for
+ // assembly source files.
+ static void Make(MCSymbol *Symbol, MCStreamer *MCOS, SourceMgr &SrcMgr,
+ SMLoc &Loc);
+ };
+
class MCCFIInstruction {
public:
- enum OpType { SameValue, Remember, Restore, Move, RelMove };
+ enum OpType { SameValue, RememberState, RestoreState, Move, RelMove, Escape,
+ Restore};
private:
OpType Operation;
MCSymbol *Label;
// Move to & from location.
MachineLocation Destination;
MachineLocation Source;
+ std::vector<char> Values;
public:
MCCFIInstruction(OpType Op, MCSymbol *L)
: Operation(Op), Label(L) {
- assert(Op == Remember || Op == Restore);
+ assert(Op == RememberState || Op == RestoreState);
}
MCCFIInstruction(OpType Op, MCSymbol *L, unsigned Register)
: Operation(Op), Label(L), Destination(Register) {
- assert(Op == SameValue);
+ assert(Op == SameValue || Op == Restore);
}
MCCFIInstruction(MCSymbol *L, const MachineLocation &D,
const MachineLocation &S)
@@ -254,16 +292,24 @@ namespace llvm {
: Operation(Op), Label(L), Destination(D), Source(S) {
assert(Op == RelMove);
}
+ MCCFIInstruction(OpType Op, MCSymbol *L, StringRef Vals)
+ : Operation(Op), Label(L), Values(Vals.begin(), Vals.end()) {
+ assert(Op == Escape);
+ }
OpType getOperation() const { return Operation; }
MCSymbol *getLabel() const { return Label; }
const MachineLocation &getDestination() const { return Destination; }
const MachineLocation &getSource() const { return Source; }
+ const StringRef getValues() const {
+ return StringRef(&Values[0], Values.size());
+ }
};
struct MCDwarfFrameInfo {
MCDwarfFrameInfo() : Begin(0), End(0), Personality(0), Lsda(0),
Function(0), Instructions(), PersonalityEncoding(),
- LsdaEncoding(0), CompactUnwindEncoding(0) {}
+ LsdaEncoding(0), CompactUnwindEncoding(0),
+ IsSignalFrame(false) {}
MCSymbol *Begin;
MCSymbol *End;
const MCSymbol *Personality;
@@ -273,6 +319,7 @@ namespace llvm {
unsigned PersonalityEncoding;
unsigned LsdaEncoding;
uint32_t CompactUnwindEncoding;
+ bool IsSignalFrame;
};
class MCDwarfFrameEmitter {
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
index 3c150dc..f153cb0 100644
--- a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -10,28 +10,91 @@
#ifndef LLVM_MC_MCELFOBJECTWRITER_H
#define LLVM_MC_MCELFOBJECTWRITER_H
-#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ELF.h"
+#include <vector>
namespace llvm {
+class MCAssembler;
+class MCFixup;
+class MCFragment;
+class MCObjectWriter;
+class MCSymbol;
+class MCValue;
+
+/// @name Relocation Data
+/// @{
+
+struct ELFRelocationEntry {
+ // Make these big enough for both 32-bit and 64-bit
+ uint64_t r_offset;
+ int Index;
+ unsigned Type;
+ const MCSymbol *Symbol;
+ uint64_t r_addend;
+ const MCFixup *Fixup;
+
+ ELFRelocationEntry()
+ : r_offset(0), Index(0), Type(0), Symbol(0), r_addend(0), Fixup(0) {}
+
+ ELFRelocationEntry(uint64_t RelocOffset, int Idx, unsigned RelType,
+ const MCSymbol *Sym, uint64_t Addend, const MCFixup &Fixup)
+ : r_offset(RelocOffset), Index(Idx), Type(RelType), Symbol(Sym),
+ r_addend(Addend), Fixup(&Fixup) {}
+
+ // Support lexicographic sorting.
+ bool operator<(const ELFRelocationEntry &RE) const {
+ return RE.r_offset < r_offset;
+ }
+};
+
class MCELFObjectTargetWriter {
- const Triple::OSType OSType;
+ const uint8_t OSABI;
const uint16_t EMachine;
const unsigned HasRelocationAddend : 1;
const unsigned Is64Bit : 1;
+
protected:
- MCELFObjectTargetWriter(bool Is64Bit_, Triple::OSType OSType_,
+
+ MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_,
uint16_t EMachine_, bool HasRelocationAddend_);
public:
- virtual ~MCELFObjectTargetWriter();
+ static uint8_t getOSABI(Triple::OSType OSType) {
+ switch (OSType) {
+ case Triple::FreeBSD:
+ return ELF::ELFOSABI_FREEBSD;
+ case Triple::Linux:
+ return ELF::ELFOSABI_LINUX;
+ default:
+ return ELF::ELFOSABI_NONE;
+ }
+ }
+
+ virtual ~MCELFObjectTargetWriter() {}
+
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const = 0;
+ virtual unsigned getEFlags() const;
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
+ virtual void adjustFixupOffset(const MCFixup &Fixup,
+ uint64_t &RelocOffset);
+
+ virtual void sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs);
/// @name Accessors
/// @{
- Triple::OSType getOSType() { return OSType; }
+ uint8_t getOSABI() { return OSABI; }
uint16_t getEMachine() { return EMachine; }
bool hasRelocationAddend() { return HasRelocationAddend; }
- bool is64Bit() { return Is64Bit; }
+ bool is64Bit() const { return Is64Bit; }
/// @}
};
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
index 0f28599..ff33641 100644
--- a/contrib/llvm/include/llvm/MC/MCExpr.h
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -15,7 +15,6 @@
#include "llvm/Support/DataTypes.h"
namespace llvm {
-class MCAsmInfo;
class MCAsmLayout;
class MCAssembler;
class MCContext;
@@ -162,6 +161,7 @@ public:
VK_TPOFF,
VK_DTPOFF,
VK_TLVP, // Mach-O thread local variable relocation
+ VK_SECREL,
// FIXME: We'd really like to use the generic Kinds listed above for these.
VK_ARM_PLT, // ARM-style PLT references. i.e., (PLT) instead of @PLT
VK_ARM_TLSGD, // ditto for TLSGD, GOT, GOTOFF, TPOFF and GOTTPOFF
@@ -169,12 +169,32 @@ public:
VK_ARM_GOTOFF,
VK_ARM_TPOFF,
VK_ARM_GOTTPOFF,
+ VK_ARM_TARGET1,
VK_PPC_TOC,
VK_PPC_DARWIN_HA16, // ha16(symbol)
VK_PPC_DARWIN_LO16, // lo16(symbol)
VK_PPC_GAS_HA16, // symbol@ha
- VK_PPC_GAS_LO16 // symbol@l
+ VK_PPC_GAS_LO16, // symbol@l
+
+ VK_Mips_GPREL,
+ VK_Mips_GOT_CALL,
+ VK_Mips_GOT16,
+ VK_Mips_GOT,
+ VK_Mips_ABS_HI,
+ VK_Mips_ABS_LO,
+ VK_Mips_TLSGD,
+ VK_Mips_TLSLDM,
+ VK_Mips_DTPREL_HI,
+ VK_Mips_DTPREL_LO,
+ VK_Mips_GOTTPREL,
+ VK_Mips_TPREL_HI,
+ VK_Mips_TPREL_LO,
+ VK_Mips_GPOFF_HI,
+ VK_Mips_GPOFF_LO,
+ VK_Mips_GOT_DISP,
+ VK_Mips_GOT_PAGE,
+ VK_Mips_GOT_OFST
};
private:
@@ -185,7 +205,9 @@ private:
const VariantKind Kind;
explicit MCSymbolRefExpr(const MCSymbol *_Symbol, VariantKind _Kind)
- : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol), Kind(_Kind) {}
+ : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol), Kind(_Kind) {
+ assert(Symbol);
+ }
public:
/// @name Construction
diff --git a/contrib/llvm/include/llvm/MC/MCFixup.h b/contrib/llvm/include/llvm/MC/MCFixup.h
index 6fde797..16e9eb7 100644
--- a/contrib/llvm/include/llvm/MC/MCFixup.h
+++ b/contrib/llvm/include/llvm/MC/MCFixup.h
@@ -11,6 +11,8 @@
#define LLVM_MC_MCFIXUP_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
#include <cassert>
namespace llvm {
@@ -26,6 +28,14 @@ enum MCFixupKind {
FK_PCRel_2, ///< A two-byte pc relative fixup.
FK_PCRel_4, ///< A four-byte pc relative fixup.
FK_PCRel_8, ///< A eight-byte pc relative fixup.
+ FK_GPRel_1, ///< A one-byte gp relative fixup.
+ FK_GPRel_2, ///< A two-byte gp relative fixup.
+ FK_GPRel_4, ///< A four-byte gp relative fixup.
+ FK_GPRel_8, ///< A eight-byte gp relative fixup.
+ FK_SecRel_1, ///< A one-byte section relative fixup.
+ FK_SecRel_2, ///< A two-byte section relative fixup.
+ FK_SecRel_4, ///< A four-byte section relative fixup.
+ FK_SecRel_8, ///< A eight-byte section relative fixup.
FirstTargetFixupKind = 128,
@@ -61,14 +71,17 @@ class MCFixup {
/// determine how the operand value should be encoded into the instruction.
unsigned Kind;
+ /// The source location which gave rise to the fixup, if any.
+ SMLoc Loc;
public:
static MCFixup Create(uint32_t Offset, const MCExpr *Value,
- MCFixupKind Kind) {
+ MCFixupKind Kind, SMLoc Loc = SMLoc()) {
assert(unsigned(Kind) < MaxTargetFixupKind && "Kind out of range!");
MCFixup FI;
FI.Value = Value;
FI.Offset = Offset;
FI.Kind = unsigned(Kind);
+ FI.Loc = Loc;
return FI;
}
@@ -83,13 +96,15 @@ public:
/// size. It is an error to pass an unsupported size.
static MCFixupKind getKindForSize(unsigned Size, bool isPCRel) {
switch (Size) {
- default: assert(0 && "Invalid generic fixup size!");
+ default: llvm_unreachable("Invalid generic fixup size!");
case 1: return isPCRel ? FK_PCRel_1 : FK_Data_1;
case 2: return isPCRel ? FK_PCRel_2 : FK_Data_2;
case 4: return isPCRel ? FK_PCRel_4 : FK_Data_4;
case 8: return isPCRel ? FK_PCRel_8 : FK_Data_8;
}
}
+
+ SMLoc getLoc() const { return Loc; }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCInst.h b/contrib/llvm/include/llvm/MC/MCInst.h
index d384764..397a37d 100644
--- a/contrib/llvm/include/llvm/MC/MCInst.h
+++ b/contrib/llvm/include/llvm/MC/MCInst.h
@@ -19,12 +19,14 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SMLoc.h"
namespace llvm {
class raw_ostream;
class MCAsmInfo;
class MCInstPrinter;
class MCExpr;
+class MCInst;
/// MCOperand - Instances of this class represent operands of the MCInst class.
/// This is a simple discriminated union.
@@ -34,7 +36,8 @@ class MCOperand {
kRegister, ///< Register operand.
kImmediate, ///< Immediate operand.
kFPImmediate, ///< Floating-point immediate operand.
- kExpr ///< Relocatable immediate operand.
+ kExpr, ///< Relocatable immediate operand.
+ kInst ///< Sub-instruction operand.
};
unsigned char Kind;
@@ -43,6 +46,7 @@ class MCOperand {
int64_t ImmVal;
double FPImmVal;
const MCExpr *ExprVal;
+ const MCInst *InstVal;
};
public:
@@ -53,6 +57,7 @@ public:
bool isImm() const { return Kind == kImmediate; }
bool isFPImm() const { return Kind == kFPImmediate; }
bool isExpr() const { return Kind == kExpr; }
+ bool isInst() const { return Kind == kInst; }
/// getReg - Returns the register number.
unsigned getReg() const {
@@ -94,6 +99,15 @@ public:
ExprVal = Val;
}
+ const MCInst *getInst() const {
+ assert(isInst() && "This is not a sub-instruction");
+ return InstVal;
+ }
+ void setInst(const MCInst *Val) {
+ assert(isInst() && "This is not a sub-instruction");
+ InstVal = Val;
+ }
+
static MCOperand CreateReg(unsigned Reg) {
MCOperand Op;
Op.Kind = kRegister;
@@ -118,24 +132,34 @@ public:
Op.ExprVal = Val;
return Op;
}
+ static MCOperand CreateInst(const MCInst *Val) {
+ MCOperand Op;
+ Op.Kind = kInst;
+ Op.InstVal = Val;
+ return Op;
+ }
void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
void dump() const;
};
+template <> struct isPodLike<MCOperand> { static const bool value = true; };
/// MCInst - Instances of this class represent a single low-level machine
/// instruction.
class MCInst {
unsigned Opcode;
+ SMLoc Loc;
SmallVector<MCOperand, 8> Operands;
public:
MCInst() : Opcode(0) {}
void setOpcode(unsigned Op) { Opcode = Op; }
-
unsigned getOpcode() const { return Opcode; }
+ void setLoc(SMLoc loc) { Loc = loc; }
+ SMLoc getLoc() const { return Loc; }
+
const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
MCOperand &getOperand(unsigned i) { return Operands[i]; }
unsigned getNumOperands() const { return Operands.size(); }
diff --git a/contrib/llvm/include/llvm/MC/MCInstPrinter.h b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
index 01ad2d3f..3c4f28b 100644
--- a/contrib/llvm/include/llvm/MC/MCInstPrinter.h
+++ b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
@@ -14,6 +14,8 @@ namespace llvm {
class MCInst;
class raw_ostream;
class MCAsmInfo;
+class MCInstrInfo;
+class MCRegisterInfo;
class StringRef;
/// MCInstPrinter - This is an instance of a target assembly language printer
@@ -25,6 +27,8 @@ protected:
/// assembly emission is disable.
raw_ostream *CommentStream;
const MCAsmInfo &MAI;
+ const MCInstrInfo &MII;
+ const MCRegisterInfo &MRI;
/// The current set of available features.
unsigned AvailableFeatures;
@@ -32,8 +36,9 @@ protected:
/// Utility function for printing annotations.
void printAnnotation(raw_ostream &OS, StringRef Annot);
public:
- MCInstPrinter(const MCAsmInfo &mai)
- : CommentStream(0), MAI(mai), AvailableFeatures(0) {}
+ MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
+ const MCRegisterInfo &mri)
+ : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0) {}
virtual ~MCInstPrinter();
@@ -47,7 +52,7 @@ public:
/// getOpcodeName - Return the name of the specified opcode enum (e.g.
/// "MOV32ri") or empty if we can't resolve it.
- virtual StringRef getOpcodeName(unsigned Opcode) const;
+ StringRef getOpcodeName(unsigned Opcode) const;
/// printRegName - Print the assembler register name.
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
diff --git a/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
index 8f3c499..acad633 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
@@ -33,7 +33,7 @@ public:
}
virtual bool isConditionalBranch(const MCInst &Inst) const {
- return Info->get(Inst.getOpcode()).isBranch();
+ return Info->get(Inst.getOpcode()).isConditionalBranch();
}
virtual bool isUnconditionalBranch(const MCInst &Inst) const {
diff --git a/contrib/llvm/include/llvm/MC/MCInstrDesc.h b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
index aafa800..186612d 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
@@ -58,17 +58,17 @@ public:
/// if the operand is a register. If isLookupPtrRegClass is set, then this is
/// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to
/// get a dynamic register class.
- short RegClass;
+ int16_t RegClass;
/// Flags - These are flags from the MCOI::OperandFlags enum.
- unsigned short Flags;
+ uint8_t Flags;
+
+ /// OperandType - Information about the type of the operand.
+ uint8_t OperandType;
/// Lower 16 bits are used to specify which constraints are set. The higher 16
/// bits are used to specify the value of constraints (4 bits each).
- unsigned Constraints;
-
- /// OperandType - Information about the type of the operand.
- MCOI::OperandType OperandType;
+ uint32_t Constraints;
/// Currently no other information.
/// isLookupPtrRegClass - Set if this operand is a pointer value and it
@@ -137,11 +137,10 @@ public:
unsigned short NumDefs; // Num of args that are definitions
unsigned short SchedClass; // enum identifying instr sched class
unsigned short Size; // Number of bytes in encoding.
- const char * Name; // Name of the instruction record in td file
unsigned Flags; // Flags identifying machine instr class
uint64_t TSFlags; // Target Specific Flag values
- const unsigned *ImplicitUses; // Registers implicitly read by this instr
- const unsigned *ImplicitDefs; // Registers implicitly defined by this instr
+ const uint16_t *ImplicitUses; // Registers implicitly read by this instr
+ const uint16_t *ImplicitDefs; // Registers implicitly defined by this instr
const MCOperandInfo *OpInfo; // 'NumOperands' entries about operands
/// getOperandConstraint - Returns the value of the specific constraint if
@@ -161,12 +160,6 @@ public:
return Opcode;
}
- /// getName - Return the name of the record in the .td file for this
- /// instruction, for example "ADD8ri".
- const char *getName() const {
- return Name;
- }
-
/// getNumOperands - Return the number of declared MachineOperands for this
/// MachineInstruction. Note that variadic (isVariadic() returns true)
/// instructions may have additional operands at the end of the list, and note
@@ -184,6 +177,10 @@ public:
return NumDefs;
}
+ /// getFlags - Return flags of this instruction.
+ ///
+ unsigned getFlags() const { return Flags; }
+
/// isVariadic - Return true if this instruction can have a variable number of
/// operands. In this case, the variable operands will be after the normal
/// operands but before the implicit definitions and uses (if any are
@@ -198,84 +195,6 @@ public:
return Flags & (1 << MCID::HasOptionalDef);
}
- /// getImplicitUses - Return a list of registers that are potentially
- /// read by any instance of this machine instruction. For example, on X86,
- /// the "adc" instruction adds two register operands and adds the carry bit in
- /// from the flags register. In this case, the instruction is marked as
- /// implicitly reading the flags. Likewise, the variable shift instruction on
- /// X86 is marked as implicitly reading the 'CL' register, which it always
- /// does.
- ///
- /// This method returns null if the instruction has no implicit uses.
- const unsigned *getImplicitUses() const {
- return ImplicitUses;
- }
-
- /// getNumImplicitUses - Return the number of implicit uses this instruction
- /// has.
- unsigned getNumImplicitUses() const {
- if (ImplicitUses == 0) return 0;
- unsigned i = 0;
- for (; ImplicitUses[i]; ++i) /*empty*/;
- return i;
- }
-
- /// getImplicitDefs - Return a list of registers that are potentially
- /// written by any instance of this machine instruction. For example, on X86,
- /// many instructions implicitly set the flags register. In this case, they
- /// are marked as setting the FLAGS. Likewise, many instructions always
- /// deposit their result in a physical register. For example, the X86 divide
- /// instruction always deposits the quotient and remainder in the EAX/EDX
- /// registers. For that instruction, this will return a list containing the
- /// EAX/EDX/EFLAGS registers.
- ///
- /// This method returns null if the instruction has no implicit defs.
- const unsigned *getImplicitDefs() const {
- return ImplicitDefs;
- }
-
- /// getNumImplicitDefs - Return the number of implicit defs this instruction
- /// has.
- unsigned getNumImplicitDefs() const {
- if (ImplicitDefs == 0) return 0;
- unsigned i = 0;
- for (; ImplicitDefs[i]; ++i) /*empty*/;
- return i;
- }
-
- /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly
- /// uses the specified physical register.
- bool hasImplicitUseOfPhysReg(unsigned Reg) const {
- if (const unsigned *ImpUses = ImplicitUses)
- for (; *ImpUses; ++ImpUses)
- if (*ImpUses == Reg) return true;
- return false;
- }
-
- /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly
- /// defines the specified physical register.
- bool hasImplicitDefOfPhysReg(unsigned Reg) const {
- if (const unsigned *ImpDefs = ImplicitDefs)
- for (; *ImpDefs; ++ImpDefs)
- if (*ImpDefs == Reg) return true;
- return false;
- }
-
- /// getSchedClass - Return the scheduling class for this instruction. The
- /// scheduling class is an index into the InstrItineraryData table. This
- /// returns zero if there is no known scheduling information for the
- /// instruction.
- ///
- unsigned getSchedClass() const {
- return SchedClass;
- }
-
- /// getSize - Return the number of bytes in the encoding of this instruction,
- /// or zero if the encoding size cannot be known from the opcode.
- unsigned getSize() const {
- return Size;
- }
-
/// isPseudo - Return true if this is a pseudo instruction that doesn't
/// correspond to a real machine instruction.
///
@@ -298,18 +217,6 @@ public:
return Flags & (1 << MCID::Barrier);
}
- /// findFirstPredOperandIdx() - Find the index of the first operand in the
- /// operand list that is used to represent the predicate. It returns -1 if
- /// none is found.
- int findFirstPredOperandIdx() const {
- if (isPredicable()) {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (OpInfo[i].isPredicate())
- return i;
- }
- return -1;
- }
-
/// isTerminator - Returns true if this instruction part of the terminator for
/// a basic block. Typically this is things like return and branch
/// instructions.
@@ -530,6 +437,97 @@ public:
bool hasExtraDefRegAllocReq() const {
return Flags & (1 << MCID::ExtraDefRegAllocReq);
}
+
+
+ /// getImplicitUses - Return a list of registers that are potentially
+ /// read by any instance of this machine instruction. For example, on X86,
+ /// the "adc" instruction adds two register operands and adds the carry bit in
+ /// from the flags register. In this case, the instruction is marked as
+ /// implicitly reading the flags. Likewise, the variable shift instruction on
+ /// X86 is marked as implicitly reading the 'CL' register, which it always
+ /// does.
+ ///
+ /// This method returns null if the instruction has no implicit uses.
+ const uint16_t *getImplicitUses() const {
+ return ImplicitUses;
+ }
+
+ /// getNumImplicitUses - Return the number of implicit uses this instruction
+ /// has.
+ unsigned getNumImplicitUses() const {
+ if (ImplicitUses == 0) return 0;
+ unsigned i = 0;
+ for (; ImplicitUses[i]; ++i) /*empty*/;
+ return i;
+ }
+
+ /// getImplicitDefs - Return a list of registers that are potentially
+ /// written by any instance of this machine instruction. For example, on X86,
+ /// many instructions implicitly set the flags register. In this case, they
+ /// are marked as setting the FLAGS. Likewise, many instructions always
+ /// deposit their result in a physical register. For example, the X86 divide
+ /// instruction always deposits the quotient and remainder in the EAX/EDX
+ /// registers. For that instruction, this will return a list containing the
+ /// EAX/EDX/EFLAGS registers.
+ ///
+ /// This method returns null if the instruction has no implicit defs.
+ const uint16_t *getImplicitDefs() const {
+ return ImplicitDefs;
+ }
+
+ /// getNumImplicitDefs - Return the number of implicit defs this instruction
+ /// has.
+ unsigned getNumImplicitDefs() const {
+ if (ImplicitDefs == 0) return 0;
+ unsigned i = 0;
+ for (; ImplicitDefs[i]; ++i) /*empty*/;
+ return i;
+ }
+
+ /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly
+ /// uses the specified physical register.
+ bool hasImplicitUseOfPhysReg(unsigned Reg) const {
+ if (const uint16_t *ImpUses = ImplicitUses)
+ for (; *ImpUses; ++ImpUses)
+ if (*ImpUses == Reg) return true;
+ return false;
+ }
+
+ /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly
+ /// defines the specified physical register.
+ bool hasImplicitDefOfPhysReg(unsigned Reg) const {
+ if (const uint16_t *ImpDefs = ImplicitDefs)
+ for (; *ImpDefs; ++ImpDefs)
+ if (*ImpDefs == Reg) return true;
+ return false;
+ }
+
+ /// getSchedClass - Return the scheduling class for this instruction. The
+ /// scheduling class is an index into the InstrItineraryData table. This
+ /// returns zero if there is no known scheduling information for the
+ /// instruction.
+ ///
+ unsigned getSchedClass() const {
+ return SchedClass;
+ }
+
+ /// getSize - Return the number of bytes in the encoding of this instruction,
+ /// or zero if the encoding size cannot be known from the opcode.
+ unsigned getSize() const {
+ return Size;
+ }
+
+ /// findFirstPredOperandIdx() - Find the index of the first operand in the
+ /// operand list that is used to represent the predicate. It returns -1 if
+ /// none is found.
+ int findFirstPredOperandIdx() const {
+ if (isPredicable()) {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (OpInfo[i].isPredicate())
+ return i;
+ }
+ return -1;
+ }
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/MC/MCInstrInfo.h b/contrib/llvm/include/llvm/MC/MCInstrInfo.h
index a63e5fa..1d3a36c 100644
--- a/contrib/llvm/include/llvm/MC/MCInstrInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCInstrInfo.h
@@ -24,14 +24,19 @@ namespace llvm {
/// MCInstrInfo - Interface to description of machine instruction set
///
class MCInstrInfo {
- const MCInstrDesc *Desc; // Raw array to allow static init'n
- unsigned NumOpcodes; // Number of entries in the desc array
+ const MCInstrDesc *Desc; // Raw array to allow static init'n
+ const unsigned *InstrNameIndices; // Array for name indices in InstrNameData
+ const char *InstrNameData; // Instruction name string pool
+ unsigned NumOpcodes; // Number of entries in the desc array
public:
/// InitMCInstrInfo - Initialize MCInstrInfo, called by TableGen
/// auto-generated routines. *DO NOT USE*.
- void InitMCInstrInfo(const MCInstrDesc *D, unsigned NO) {
+ void InitMCInstrInfo(const MCInstrDesc *D, const unsigned *NI, const char *ND,
+ unsigned NO) {
Desc = D;
+ InstrNameIndices = NI;
+ InstrNameData = ND;
NumOpcodes = NO;
}
@@ -44,6 +49,12 @@ public:
assert(Opcode < NumOpcodes && "Invalid opcode!");
return Desc[Opcode];
}
+
+ /// getName - Returns the name for the instructions with the given opcode.
+ const char *getName(unsigned Opcode) const {
+ assert(Opcode < NumOpcodes && "Invalid opcode!");
+ return &InstrNameData[InstrNameIndices[Opcode]];
+ }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
index 060d508..aea4b41 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -14,15 +14,14 @@
#ifndef LLVM_MC_MCBJECTFILEINFO_H
#define LLVM_MC_MCBJECTFILEINFO_H
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
-class MCContext;
-class MCSection;
-class Triple;
-
+ class MCContext;
+ class MCSection;
+ class StringRef;
+ class Triple;
+
class MCObjectFileInfo {
protected:
/// CommDirectiveSupportsAlignment - True if .comm supports alignment. This
@@ -47,6 +46,9 @@ protected:
unsigned FDEEncoding;
unsigned FDECFIEncoding;
unsigned TTypeEncoding;
+ // Section flags for eh_frame
+ unsigned EHSectionType;
+ unsigned EHSectionFlags;
/// TextSection - Section directive for standard text.
///
@@ -82,13 +84,20 @@ protected:
/// this is the section to emit them into.
const MCSection *CompactUnwindSection;
+ /// DwarfAccelNamesSection, DwarfAccelObjCSection
+ /// If we use the DWARF accelerated hash tables then we want toe emit these
+ /// sections.
+ const MCSection *DwarfAccelNamesSection;
+ const MCSection *DwarfAccelObjCSection;
+ const MCSection *DwarfAccelNamespaceSection;
+ const MCSection *DwarfAccelTypesSection;
+
// Dwarf sections for debug info. If a target supports debug info, these must
// be set.
const MCSection *DwarfAbbrevSection;
const MCSection *DwarfInfoSection;
const MCSection *DwarfLineSection;
const MCSection *DwarfFrameSection;
- const MCSection *DwarfPubNamesSection;
const MCSection *DwarfPubTypesSection;
const MCSection *DwarfDebugInlineSection;
const MCSection *DwarfStrSection;
@@ -102,7 +111,7 @@ protected:
const MCSection *TLSExtraDataSection;
/// TLSDataSection - Section directive for Thread Local data.
- /// ELF and MachO only.
+ /// ELF, MachO and COFF.
const MCSection *TLSDataSection; // Defaults to ".tdata".
/// TLSBSSSection - Section directive for Thread Local uninitialized data.
@@ -156,7 +165,7 @@ protected:
const MCSection *DrectveSection;
const MCSection *PDataSection;
const MCSection *XDataSection;
-
+
public:
void InitMCObjectFileInfo(StringRef TT, Reloc::Model RM, CodeModel::Model CM,
MCContext &ctx);
@@ -181,17 +190,26 @@ public:
const MCSection *getTextSection() const { return TextSection; }
const MCSection *getDataSection() const { return DataSection; }
const MCSection *getBSSSection() const { return BSSSection; }
- const MCSection *getStaticCtorSection() const { return StaticCtorSection; }
- const MCSection *getStaticDtorSection() const { return StaticDtorSection; }
const MCSection *getLSDASection() const { return LSDASection; }
const MCSection *getCompactUnwindSection() const{
return CompactUnwindSection;
}
+ const MCSection *getDwarfAccelNamesSection() const {
+ return DwarfAccelNamesSection;
+ }
+ const MCSection *getDwarfAccelObjCSection() const {
+ return DwarfAccelObjCSection;
+ }
+ const MCSection *getDwarfAccelNamespaceSection() const {
+ return DwarfAccelNamespaceSection;
+ }
+ const MCSection *getDwarfAccelTypesSection() const {
+ return DwarfAccelTypesSection;
+ }
const MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
const MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
const MCSection *getDwarfLineSection() const { return DwarfLineSection; }
const MCSection *getDwarfFrameSection() const { return DwarfFrameSection; }
- const MCSection *getDwarfPubNamesSection() const{return DwarfPubNamesSection;}
const MCSection *getDwarfPubTypesSection() const{return DwarfPubTypesSection;}
const MCSection *getDwarfDebugInlineSection() const {
return DwarfDebugInlineSection;
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
index f897e64..a69075d 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -34,6 +34,8 @@ class MCObjectStreamer : public MCStreamer {
MCSectionData *CurSectionData;
virtual void EmitInstToData(const MCInst &Inst) = 0;
+ virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
+ virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame);
protected:
MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB,
@@ -70,14 +72,15 @@ public:
virtual void ChangeSection(const MCSection *Section);
virtual void EmitInstruction(const MCInst &Inst);
virtual void EmitInstToFragment(const MCInst &Inst);
- virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
+ virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
const MCSymbol *LastLabel,
const MCSymbol *Label,
unsigned PointerSize);
virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
const MCSymbol *Label);
- virtual void Finish();
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+ virtual void FinishImpl();
/// @}
};
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
index 782d844..6e44e6ce 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -10,7 +10,6 @@
#ifndef LLVM_MC_MCOBJECTWRITER_H
#define LLVM_MC_MCOBJECTWRITER_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -20,11 +19,9 @@ class MCAsmLayout;
class MCAssembler;
class MCFixup;
class MCFragment;
-class MCSymbol;
class MCSymbolData;
class MCSymbolRefExpr;
class MCValue;
-class raw_ostream;
/// MCObjectWriter - Defines the object file and target independent interfaces
/// used by the assembler backend to write native file format object files.
@@ -188,11 +185,10 @@ public:
/// Utility function to encode a SLEB128 value.
static void EncodeSLEB128(int64_t Value, raw_ostream &OS);
/// Utility function to encode a ULEB128 value.
- static void EncodeULEB128(uint64_t Value, raw_ostream &OS);
+ static void EncodeULEB128(uint64_t Value, raw_ostream &OS,
+ unsigned Padding = 0);
};
-MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
-
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index 9bbb755..ac04483 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -71,6 +71,7 @@ public:
bool isNot(TokenKind K) const { return Kind != K; }
SMLoc getLoc() const;
+ SMLoc getEndLoc() const;
/// getStringContents - Get the contents of a string token (without quotes).
StringRef getStringContents() const {
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index 6ff1753..793c709 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -11,6 +11,7 @@
#define LLVM_MC_MCASMPARSER_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/ArrayRef.h"
namespace llvm {
class AsmToken;
@@ -22,6 +23,7 @@ class MCExpr;
class MCStreamer;
class MCTargetAsmParser;
class SMLoc;
+class SMRange;
class SourceMgr;
class StringRef;
class Twine;
@@ -62,6 +64,9 @@ public:
MCTargetAsmParser &getTargetParser() const { return *TargetParser; }
void setTargetParser(MCTargetAsmParser &P);
+ virtual unsigned getAssemblerDialect() { return 0;}
+ virtual void setAssemblerDialect(unsigned i) { }
+
bool getShowParsedOperands() const { return ShowParsedOperands; }
void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }
@@ -72,14 +77,16 @@ public:
/// Msg.
///
/// \return The return value is true, if warnings are fatal.
- virtual bool Warning(SMLoc L, const Twine &Msg) = 0;
+ virtual bool Warning(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) = 0;
/// Error - Emit an error at the location \arg L, with the message \arg
/// Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
/// clients.
- virtual bool Error(SMLoc L, const Twine &Msg) = 0;
+ virtual bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) = 0;
/// Lex - Get the next AsmToken in the stream, possibly handling file
/// inclusion first.
@@ -89,7 +96,8 @@ public:
const AsmToken &getTok();
/// \brief Report an error at the current lexer location.
- bool TokError(const Twine &Msg);
+ bool TokError(const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
/// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
/// and set \arg Res to the identifier contents.
diff --git a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
index ada5ae8..27acf2f 100644
--- a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
+++ b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -17,6 +17,7 @@
#define LLVM_MC_MCREGISTERINFO_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
namespace llvm {
@@ -24,28 +25,18 @@ namespace llvm {
/// MCRegisterClass - Base class of TargetRegisterClass.
class MCRegisterClass {
public:
- typedef const unsigned* iterator;
- typedef const unsigned* const_iterator;
-private:
- unsigned ID;
+ typedef const uint16_t* iterator;
+ typedef const uint16_t* const_iterator;
+
const char *Name;
- const unsigned RegSize, Alignment; // Size & Alignment of register in bytes
- const int CopyCost;
+ const iterator RegsBegin;
+ const uint8_t *const RegSet;
+ const uint16_t RegsSize;
+ const uint16_t RegSetSize;
+ const uint16_t ID;
+ const uint16_t RegSize, Alignment; // Size & Alignment of register in bytes
+ const int8_t CopyCost;
const bool Allocatable;
- const iterator RegsBegin, RegsEnd;
- const unsigned char *const RegSet;
- const unsigned RegSetSize;
-public:
- MCRegisterClass(unsigned id, const char *name,
- unsigned RS, unsigned Al, int CC, bool Allocable,
- iterator RB, iterator RE, const unsigned char *Bits,
- unsigned NumBytes)
- : ID(id), Name(name), RegSize(RS), Alignment(Al), CopyCost(CC),
- Allocatable(Allocable), RegsBegin(RB), RegsEnd(RE), RegSet(Bits),
- RegSetSize(NumBytes) {
- for (iterator i = RegsBegin; i != RegsEnd; ++i)
- assert(contains(*i) && "Bit field corrupted.");
- }
/// getID() - Return the register class ID number.
///
@@ -58,11 +49,11 @@ public:
/// begin/end - Return all of the registers in this class.
///
iterator begin() const { return RegsBegin; }
- iterator end() const { return RegsEnd; }
+ iterator end() const { return RegsBegin + RegsSize; }
/// getNumRegs - Return the number of registers in this class.
///
- unsigned getNumRegs() const { return (unsigned)(RegsEnd-RegsBegin); }
+ unsigned getNumRegs() const { return RegsSize; }
/// getRegister - Return the specified register in the class.
///
@@ -115,10 +106,10 @@ public:
/// of AX.
///
struct MCRegisterDesc {
- const char *Name; // Printable name for the reg (for debugging)
- const unsigned *Overlaps; // Overlapping registers, described above
- const unsigned *SubRegs; // Sub-register set, described above
- const unsigned *SuperRegs; // Super-register set, described above
+ const char *Name; // Printable name for the reg (for debugging)
+ uint32_t Overlaps; // Overlapping registers, described above
+ uint32_t SubRegs; // Sub-register set, described above
+ uint32_t SuperRegs; // Super-register set, described above
};
/// MCRegisterInfo base class - We assume that the target defines a static
@@ -136,50 +127,82 @@ struct MCRegisterDesc {
class MCRegisterInfo {
public:
typedef const MCRegisterClass *regclass_iterator;
+
+ /// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
+ /// performed with a binary search.
+ struct DwarfLLVMRegPair {
+ unsigned FromReg;
+ unsigned ToReg;
+
+ bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
+ };
private:
const MCRegisterDesc *Desc; // Pointer to the descriptor array
unsigned NumRegs; // Number of entries in the array
unsigned RAReg; // Return address register
const MCRegisterClass *Classes; // Pointer to the regclass array
unsigned NumClasses; // Number of entries in the array
- DenseMap<unsigned, int> L2DwarfRegs; // LLVM to Dwarf regs mapping
- DenseMap<unsigned, int> EHL2DwarfRegs; // LLVM to Dwarf regs mapping EH
- DenseMap<unsigned, unsigned> Dwarf2LRegs; // Dwarf to LLVM regs mapping
- DenseMap<unsigned, unsigned> EHDwarf2LRegs; // Dwarf to LLVM regs mapping EH
+ const uint16_t *RegLists; // Pointer to the reglists array
+ const uint16_t *SubRegIndices; // Pointer to the subreg lookup
+ // array.
+ unsigned NumSubRegIndices; // Number of subreg indices.
+
+ unsigned L2DwarfRegsSize;
+ unsigned EHL2DwarfRegsSize;
+ unsigned Dwarf2LRegsSize;
+ unsigned EHDwarf2LRegsSize;
+ const DwarfLLVMRegPair *L2DwarfRegs; // LLVM to Dwarf regs mapping
+ const DwarfLLVMRegPair *EHL2DwarfRegs; // LLVM to Dwarf regs mapping EH
+ const DwarfLLVMRegPair *Dwarf2LRegs; // Dwarf to LLVM regs mapping
+ const DwarfLLVMRegPair *EHDwarf2LRegs; // Dwarf to LLVM regs mapping EH
DenseMap<unsigned, int> L2SEHRegs; // LLVM to SEH regs mapping
public:
/// InitMCRegisterInfo - Initialize MCRegisterInfo, called by TableGen
/// auto-generated routines. *DO NOT USE*.
void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
- const MCRegisterClass *C, unsigned NC) {
+ const MCRegisterClass *C, unsigned NC,
+ const uint16_t *RL,
+ const uint16_t *SubIndices,
+ unsigned NumIndices) {
Desc = D;
NumRegs = NR;
RAReg = RA;
Classes = C;
+ RegLists = RL;
NumClasses = NC;
+ SubRegIndices = SubIndices;
+ NumSubRegIndices = NumIndices;
}
- /// mapLLVMRegToDwarfReg - Used to initialize LLVM register to Dwarf
+ /// mapLLVMRegsToDwarfRegs - Used to initialize LLVM register to Dwarf
/// register number mapping. Called by TableGen auto-generated routines.
/// *DO NOT USE*.
- void mapLLVMRegToDwarfReg(unsigned LLVMReg, int DwarfReg, bool isEH) {
- if (isEH)
- EHL2DwarfRegs[LLVMReg] = DwarfReg;
- else
- L2DwarfRegs[LLVMReg] = DwarfReg;
+ void mapLLVMRegsToDwarfRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+ bool isEH) {
+ if (isEH) {
+ EHL2DwarfRegs = Map;
+ EHL2DwarfRegsSize = Size;
+ } else {
+ L2DwarfRegs = Map;
+ L2DwarfRegsSize = Size;
+ }
}
-
- /// mapDwarfRegToLLVMReg - Used to initialize Dwarf register to LLVM
+
+ /// mapDwarfRegsToLLVMRegs - Used to initialize Dwarf register to LLVM
/// register number mapping. Called by TableGen auto-generated routines.
/// *DO NOT USE*.
- void mapDwarfRegToLLVMReg(unsigned DwarfReg, unsigned LLVMReg, bool isEH) {
- if (isEH)
- EHDwarf2LRegs[DwarfReg] = LLVMReg;
- else
- Dwarf2LRegs[DwarfReg] = LLVMReg;
+ void mapDwarfRegsToLLVMRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+ bool isEH) {
+ if (isEH) {
+ EHDwarf2LRegs = Map;
+ EHDwarf2LRegsSize = Size;
+ } else {
+ Dwarf2LRegs = Map;
+ Dwarf2LRegsSize = Size;
+ }
}
-
+
/// mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register
/// number mapping. By default the SEH register number is just the same
/// as the LLVM register number.
@@ -212,9 +235,9 @@ public:
/// register, or a null list of there are none. The list returned is zero
/// terminated.
///
- const unsigned *getAliasSet(unsigned RegNo) const {
+ const uint16_t *getAliasSet(unsigned RegNo) const {
// The Overlaps set always begins with Reg itself.
- return get(RegNo).Overlaps + 1;
+ return RegLists + get(RegNo).Overlaps + 1;
}
/// getOverlaps - Return a list of registers that overlap Reg, including
@@ -222,8 +245,8 @@ public:
/// list.
/// These are exactly the registers in { x | regsOverlap(x, Reg) }.
///
- const unsigned *getOverlaps(unsigned RegNo) const {
- return get(RegNo).Overlaps;
+ const uint16_t *getOverlaps(unsigned RegNo) const {
+ return RegLists + get(RegNo).Overlaps;
}
/// getSubRegisters - Return the list of registers that are sub-registers of
@@ -231,8 +254,35 @@ public:
/// returned is zero terminated and sorted according to super-sub register
/// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH.
///
- const unsigned *getSubRegisters(unsigned RegNo) const {
- return get(RegNo).SubRegs;
+ const uint16_t *getSubRegisters(unsigned RegNo) const {
+ return RegLists + get(RegNo).SubRegs;
+ }
+
+ /// getSubReg - Returns the physical register number of sub-register "Index"
+ /// for physical register RegNo. Return zero if the sub-register does not
+ /// exist.
+ unsigned getSubReg(unsigned Reg, unsigned Idx) const {
+ return *(SubRegIndices + (Reg - 1) * NumSubRegIndices + Idx - 1);
+ }
+
+ /// getMatchingSuperReg - Return a super-register of the specified register
+ /// Reg so its sub-register of index SubIdx is Reg.
+ unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+ const MCRegisterClass *RC) const {
+ for (const uint16_t *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs)
+ if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR))
+ return SR;
+ return 0;
+ }
+
+ /// getSubRegIndex - For a given register pair, return the sub-register index
+ /// if the second register is a sub-register of the first. Return zero
+ /// otherwise.
+ unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const {
+ for (unsigned I = 1; I <= NumSubRegIndices; ++I)
+ if (getSubReg(RegNo, I) == SubRegNo)
+ return I;
+ return 0;
}
/// getSuperRegisters - Return the list of registers that are super-registers
@@ -240,8 +290,8 @@ public:
/// returned is zero terminated and sorted according to super-sub register
/// relations. e.g. X86::AL's super-register list is AX, EAX, RAX.
///
- const unsigned *getSuperRegisters(unsigned RegNo) const {
- return get(RegNo).SuperRegs;
+ const uint16_t *getSuperRegisters(unsigned RegNo) const {
+ return RegLists + get(RegNo).SuperRegs;
}
/// getName - Return the human-readable symbolic target-specific name for the
@@ -261,22 +311,26 @@ public:
/// parameter allows targets to use different numberings for EH info and
/// debugging info.
int getDwarfRegNum(unsigned RegNum, bool isEH) const {
- const DenseMap<unsigned, int> &M = isEH ? EHL2DwarfRegs : L2DwarfRegs;
- const DenseMap<unsigned, int>::const_iterator I = M.find(RegNum);
- if (I == M.end()) return -1;
- return I->second;
+ const DwarfLLVMRegPair *M = isEH ? EHL2DwarfRegs : L2DwarfRegs;
+ unsigned Size = isEH ? EHL2DwarfRegsSize : L2DwarfRegsSize;
+
+ DwarfLLVMRegPair Key = { RegNum, 0 };
+ const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
+ if (I == M+Size || I->FromReg != RegNum)
+ return -1;
+ return I->ToReg;
}
/// getLLVMRegNum - Map a dwarf register back to a target register.
///
int getLLVMRegNum(unsigned RegNum, bool isEH) const {
- const DenseMap<unsigned, unsigned> &M = isEH ? EHDwarf2LRegs : Dwarf2LRegs;
- const DenseMap<unsigned, unsigned>::const_iterator I = M.find(RegNum);
- if (I == M.end()) {
- assert(0 && "Invalid RegNum");
- return -1;
- }
- return I->second;
+ const DwarfLLVMRegPair *M = isEH ? EHDwarf2LRegs : Dwarf2LRegs;
+ unsigned Size = isEH ? EHDwarf2LRegsSize : Dwarf2LRegsSize;
+
+ DwarfLLVMRegPair Key = { RegNum, 0 };
+ const DwarfLLVMRegPair *I = std::lower_bound(M, M+Size, Key);
+ assert(I != M+Size && I->FromReg == RegNum && "Invalid RegNum");
+ return I->ToReg;
}
/// getSEHRegNum - Map a target register to an equivalent SEH register
@@ -301,7 +355,7 @@ public:
return Classes[i];
}
};
-
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
index 5700817..7da6534 100644
--- a/contrib/llvm/include/llvm/MC/MCSection.h
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -14,12 +14,10 @@
#ifndef LLVM_MC_MCSECTION_H
#define LLVM_MC_MCSECTION_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Casting.h"
namespace llvm {
- class MCContext;
class MCAsmInfo;
class raw_ostream;
diff --git a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
index b154cf5..7eacde5 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -15,8 +15,8 @@
#define LLVM_MC_MCSECTIONCOFF_H
#include "llvm/MC/MCSection.h"
-
#include "llvm/Support/COFF.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/MCSectionELF.h b/contrib/llvm/include/llvm/MC/MCSectionELF.h
index c82de71..7321ca8 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionELF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionELF.h
@@ -16,6 +16,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/Support/ELF.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/MCSectionMachO.h b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
index bdb17e9..15eb4f4 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionMachO.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
@@ -15,6 +15,7 @@
#define LLVM_MC_MCSECTIONMACHO_H
#include "llvm/MC/MCSection.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index 451efbf..2595600 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -23,7 +23,6 @@
namespace llvm {
class MCAsmBackend;
- class MCAsmInfo;
class MCCodeEmitter;
class MCContext;
class MCExpr;
@@ -32,7 +31,6 @@ namespace llvm {
class MCSection;
class MCSymbol;
class StringRef;
- class TargetLoweringObjectFile;
class Twine;
class raw_ostream;
class formatted_raw_ostream;
@@ -94,6 +92,10 @@ namespace llvm {
const MCExpr *ForceExpAbs(const MCExpr* Expr);
+ void RecordProcStart(MCDwarfFrameInfo &Frame);
+ virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
+ void RecordProcEnd(MCDwarfFrameInfo &Frame);
+ virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);
void EmitFrames(bool usingCFI);
MCWin64EHUnwindInfo *getCurrentW64UnwindInfo(){return CurrentW64UnwindInfo;}
@@ -334,6 +336,11 @@ namespace llvm {
/// EndCOFFSymbolDef - Marks the end of the symbol definition.
virtual void EndCOFFSymbolDef() = 0;
+ /// EmitCOFFSecRel32 - Emits a COFF section relative relocation.
+ ///
+ /// @param Symbol - Symbol the section relative realocation should point to.
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
+
/// EmitELFSize - Emit an ELF .size directive.
///
/// This corresponds to an assembler statement such as:
@@ -420,7 +427,8 @@ namespace llvm {
/// EmitULEB128Value - Special case of EmitULEB128Value that avoids the
/// client having to pass in a MCExpr for constant integers.
- void EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace = 0);
+ void EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace = 0,
+ unsigned Padding = 0);
/// EmitSLEB128Value - Special case of EmitSLEB128Value that avoids the
/// client having to pass in a MCExpr for constant integers.
@@ -431,6 +439,13 @@ namespace llvm {
void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
unsigned AddrSpace = 0);
+ /// EmitGPRel64Value - Emit the expression @p Value into the output as a
+ /// gprel64 (64-bit GP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .gpdword on
+ /// targets that support them.
+ virtual void EmitGPRel64Value(const MCExpr *Value);
+
/// EmitGPRel32Value - Emit the expression @p Value into the output as a
/// gprel32 (32-bit GP relative) value.
///
@@ -493,7 +508,8 @@ namespace llvm {
/// @param Offset - The offset to reach. This may be an expression, but the
/// expression must be associated with the current section.
/// @param Value - The value to use when filling bytes.
- virtual void EmitValueToOffset(const MCExpr *Offset,
+ /// @return false on success, true if the offset was invalid.
+ virtual bool EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0) = 0;
/// @}
@@ -505,7 +521,8 @@ namespace llvm {
/// EmitDwarfFileDirective - Associate a filename with a specified logical
/// file number. This implements the DWARF2 '.file 4 "foo.c"' assembler
/// directive.
- virtual bool EmitDwarfFileDirective(unsigned FileNo,StringRef Filename);
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename);
/// EmitDwarfLocDirective - This implements the DWARF2
// '.loc fileno lineno ...' assembler directive.
@@ -529,8 +546,8 @@ namespace llvm {
virtual void EmitCompactUnwindEncoding(uint32_t CompactUnwindEncoding);
virtual void EmitCFISections(bool EH, bool Debug);
- virtual void EmitCFIStartProc();
- virtual void EmitCFIEndProc();
+ void EmitCFIStartProc();
+ void EmitCFIEndProc();
virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
virtual void EmitCFIDefCfaOffset(int64_t Offset);
virtual void EmitCFIDefCfaRegister(int64_t Register);
@@ -540,8 +557,11 @@ namespace llvm {
virtual void EmitCFIRememberState();
virtual void EmitCFIRestoreState();
virtual void EmitCFISameValue(int64_t Register);
+ virtual void EmitCFIRestore(int64_t Register);
virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
+ virtual void EmitCFIEscape(StringRef Values);
+ virtual void EmitCFISignalFrame();
virtual void EmitWin64EHStartProc(const MCSymbol *Symbol);
virtual void EmitWin64EHEndProc();
@@ -581,8 +601,10 @@ namespace llvm {
virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
bool isVector);
+ /// FinishImpl - Streamer specific finalization.
+ virtual void FinishImpl() = 0;
/// Finish - Finish emission of machine code.
- virtual void Finish() = 0;
+ void Finish();
};
/// createNullStreamer - Create a dummy machine code streamer, which does
@@ -613,6 +635,7 @@ namespace llvm {
bool isVerboseAsm,
bool useLoc,
bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *InstPrint = 0,
MCCodeEmitter *CE = 0,
MCAsmBackend *TAB = 0,
@@ -638,14 +661,8 @@ namespace llvm {
/// createELFStreamer - Create a machine code streamer which will generate
/// ELF format object files.
MCStreamer *createELFStreamer(MCContext &Ctx, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *CE,
- bool RelaxAll, bool NoExecStack);
-
- /// createLoggingStreamer - Create a machine code streamer which just logs the
- /// API calls and then dispatches to another streamer.
- ///
- /// The new streamer takes ownership of the \arg Child.
- MCStreamer *createLoggingStreamer(MCStreamer *Child, raw_ostream &OS);
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll, bool NoExecStack);
/// createPureStreamer - Create a machine code streamer which will generate
/// "pure" MC object files, for use with MC-JIT and testing tools.
diff --git a/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h
new file mode 100644
index 0000000..7a0b1ff
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h
@@ -0,0 +1,36 @@
+//===-- llvm/MC/MCWinCOFFObjectWriter.h - Win COFF Object Writer *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFOBJECTWRITER_H
+#define LLVM_MC_MCWINCOFFOBJECTWRITER_H
+
+namespace llvm {
+ class MCWinCOFFObjectTargetWriter {
+ const unsigned Machine;
+
+ protected:
+ MCWinCOFFObjectTargetWriter(unsigned Machine_);
+
+ public:
+ virtual ~MCWinCOFFObjectTargetWriter() {}
+
+ unsigned getMachine() const { return Machine; }
+ virtual unsigned getRelocType(unsigned FixupKind) const = 0;
+ };
+
+ /// \brief Construct a new Win COFF writer instance.
+ ///
+ /// \param MOTW - The target specific WinCOFF writer subclass.
+ /// \param OS - The stream to write to.
+ /// \returns The constructed object writer.
+ MCObjectWriter *createWinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW,
+ raw_ostream &OS);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Metadata.h b/contrib/llvm/include/llvm/Metadata.h
index 887e33c..7357986 100644
--- a/contrib/llvm/include/llvm/Metadata.h
+++ b/contrib/llvm/include/llvm/Metadata.h
@@ -36,30 +36,27 @@ template<typename ValueSubClass, typename ItemParentClass>
/// These are used to efficiently contain a byte sequence for metadata.
/// MDString is always unnamed.
class MDString : public Value {
+ virtual void anchor();
MDString(const MDString &); // DO NOT IMPLEMENT
- StringRef Str;
- explicit MDString(LLVMContext &C, StringRef S);
-
+ explicit MDString(LLVMContext &C);
public:
static MDString *get(LLVMContext &Context, StringRef Str);
static MDString *get(LLVMContext &Context, const char *Str) {
return get(Context, Str ? StringRef(Str) : StringRef());
}
- StringRef getString() const { return Str; }
+ StringRef getString() const { return getName(); }
- unsigned getLength() const { return (unsigned)Str.size(); }
+ unsigned getLength() const { return (unsigned)getName().size(); }
typedef StringRef::iterator iterator;
/// begin() - Pointer to the first byte of the string.
- ///
- iterator begin() const { return Str.begin(); }
+ iterator begin() const { return getName().begin(); }
/// end() - Pointer to one byte past the end of the string.
- ///
- iterator end() const { return Str.end(); }
+ iterator end() const { return getName().end(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MDString *) { return true; }
@@ -78,6 +75,10 @@ class MDNode : public Value, public FoldingSetNode {
void operator=(const MDNode &); // DO NOT IMPLEMENT
friend class MDNodeOperand;
friend class LLVMContextImpl;
+ friend struct FoldingSetTrait<MDNode>;
+
+ /// Hash - If the MDNode is uniqued cache the hash to speed up lookup.
+ unsigned Hash;
/// NumOperands - This many 'MDNodeOperand' items are co-allocated onto the
/// end of this MDNode.
@@ -134,6 +135,9 @@ public:
/// deleteTemporary - Deallocate a node created by getTemporary. The
/// node must not have any users.
static void deleteTemporary(MDNode *N);
+
+ /// replaceOperandWith - Replace a specific operand.
+ void replaceOperandWith(unsigned i, Value *NewVal);
/// getOperand - Return specified operand.
Value *getOperand(unsigned i) const;
@@ -225,6 +229,9 @@ public:
/// print - Implement operator<< on NamedMDNode.
void print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW = 0) const;
+
+ /// dump() - Allow printing of NamedMDNodes from the debugger.
+ void dump() const;
};
} // end llvm namespace
diff --git a/contrib/llvm/include/llvm/Module.h b/contrib/llvm/include/llvm/Module.h
index 8ce5ec4..b9c9881 100644
--- a/contrib/llvm/include/llvm/Module.h
+++ b/contrib/llvm/include/llvm/Module.h
@@ -30,8 +30,7 @@ class GVMaterializer;
class LLVMContext;
class StructType;
template<typename T> struct DenseMapInfo;
-template<typename KeyT, typename ValueT,
- typename KeyInfoT, typename ValueInfoT> class DenseMap;
+template<typename KeyT, typename ValueT, typename KeyInfoT> class DenseMap;
template<> struct ilist_traits<Function>
: public SymbolTableListTraits<Function, Module> {
@@ -154,6 +153,39 @@ public:
/// An enumeration for describing the size of a pointer on the target machine.
enum PointerSize { AnyPointerSize, Pointer32, Pointer64 };
+ /// An enumeration for the supported behaviors of module flags. The following
+ /// module flags behavior values are supported:
+ ///
+ /// Value Behavior
+ /// ----- --------
+ /// 1 Error
+ /// Emits an error if two values disagree.
+ ///
+ /// 2 Warning
+ /// Emits a warning if two values disagree.
+ ///
+ /// 3 Require
+ /// Emits an error when the specified value is not present
+ /// or doesn't have the specified value. It is an error for
+ /// two (or more) llvm.module.flags with the same ID to have
+ /// the Require behavior but different values. There may be
+ /// multiple Require flags per ID.
+ ///
+ /// 4 Override
+ /// Uses the specified value if the two values disagree. It
+ /// is an error for two (or more) llvm.module.flags with the
+ /// same ID to have the Override behavior but different
+ /// values.
+ enum ModFlagBehavior { Error = 1, Warning = 2, Require = 3, Override = 4 };
+
+ struct ModuleFlagEntry {
+ ModFlagBehavior Behavior;
+ MDString *Key;
+ Value *Val;
+ ModuleFlagEntry(ModFlagBehavior B, MDString *K, Value *V)
+ : Behavior(B), Key(K), Val(V) {}
+ };
+
/// @}
/// @name Member Variables
/// @{
@@ -266,8 +298,8 @@ public:
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
- typedef DenseMap<StructType*, unsigned, DenseMapInfo<StructType*>,
- DenseMapInfo<unsigned> > NumeredTypesMapTy;
+ typedef DenseMap<StructType*, unsigned, DenseMapInfo<StructType*> >
+ NumeredTypesMapTy;
/// findUsedStructTypes - Walk the entire module and find all of the
/// struct types that are in use, returning them in a vector.
@@ -373,6 +405,30 @@ public:
void eraseNamedMetadata(NamedMDNode *NMD);
/// @}
+/// @name Module Flags Accessors
+/// @{
+
+ /// getModuleFlagsMetadata - Returns the module flags in the provided vector.
+ void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;
+
+ /// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
+ /// represents module-level flags. This method returns null if there are no
+ /// module-level flags.
+ NamedMDNode *getModuleFlagsMetadata() const;
+
+ /// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module
+ /// that represents module-level flags. If module-level flags aren't found,
+ /// it creates the named metadata that contains them.
+ NamedMDNode *getOrInsertModuleFlagsMetadata();
+
+ /// addModuleFlag - Add a module-level flag to the module-level flags
+ /// metadata. It will create the module-level flags named metadata if it
+ /// doesn't already exist.
+ void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Value *Val);
+ void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
+ void addModuleFlag(MDNode *Node);
+
+/// @}
/// @name Materialization
/// @{
diff --git a/contrib/llvm/include/llvm/Object/Archive.h b/contrib/llvm/include/llvm/Object/Archive.h
index 4f08120..358b27a 100644
--- a/contrib/llvm/include/llvm/Object/Archive.h
+++ b/contrib/llvm/include/llvm/Object/Archive.h
@@ -22,6 +22,7 @@ namespace llvm {
namespace object {
class Archive : public Binary {
+ virtual void anchor();
public:
class Child {
const Archive *Parent;
@@ -34,6 +35,10 @@ public:
return (Parent == other.Parent) && (Data.begin() == other.Data.begin());
}
+ bool operator <(const Child &other) const {
+ return Data.begin() < other.Data.begin();
+ }
+
Child getNext() const;
error_code getName(StringRef &Result) const;
int getLastModified() const;
@@ -50,6 +55,7 @@ public:
class child_iterator {
Child child;
public:
+ child_iterator() : child(Child(0, StringRef())) {}
child_iterator(const Child &c) : child(c) {}
const Child* operator->() const {
return &child;
@@ -63,24 +69,73 @@ public:
return !(*this == other);
}
+ bool operator <(const child_iterator &other) const {
+ return child < other.child;
+ }
+
child_iterator& operator++() { // Preincrement
child = child.getNext();
return *this;
}
};
+ class Symbol {
+ const Archive *Parent;
+ uint32_t SymbolIndex;
+ uint32_t StringIndex; // Extra index to the string.
+
+ public:
+ bool operator ==(const Symbol &other) const {
+ return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
+ }
+
+ Symbol(const Archive *p, uint32_t symi, uint32_t stri)
+ : Parent(p)
+ , SymbolIndex(symi)
+ , StringIndex(stri) {}
+ error_code getName(StringRef &Result) const;
+ error_code getMember(child_iterator &Result) const;
+ Symbol getNext() const;
+ };
+
+ class symbol_iterator {
+ Symbol symbol;
+ public:
+ symbol_iterator(const Symbol &s) : symbol(s) {}
+ const Symbol *operator->() const {
+ return &symbol;
+ }
+
+ bool operator==(const symbol_iterator &other) const {
+ return symbol == other.symbol;
+ }
+
+ bool operator!=(const symbol_iterator &other) const {
+ return !(*this == other);
+ }
+
+ symbol_iterator& operator++() { // Preincrement
+ symbol = symbol.getNext();
+ return *this;
+ }
+ };
+
Archive(MemoryBuffer *source, error_code &ec);
- child_iterator begin_children() const;
+ child_iterator begin_children(bool skip_internal = true) const;
child_iterator end_children() const;
+ symbol_iterator begin_symbols() const;
+ symbol_iterator end_symbols() const;
+
// Cast methods.
static inline bool classof(Archive const *v) { return true; }
static inline bool classof(Binary const *v) {
- return v->getType() == Binary::isArchive;
+ return v->isArchive();
}
private:
+ child_iterator SymbolTable;
child_iterator StringTable;
};
diff --git a/contrib/llvm/include/llvm/Object/Binary.h b/contrib/llvm/include/llvm/Object/Binary.h
index cd092fd..77a08d5 100644
--- a/contrib/llvm/include/llvm/Object/Binary.h
+++ b/contrib/llvm/include/llvm/Object/Binary.h
@@ -37,16 +37,25 @@ protected:
Binary(unsigned int Type, MemoryBuffer *Source);
enum {
- isArchive,
-
+ ID_Archive,
// Object and children.
- isObject,
- isCOFF,
- isELF,
- isMachO,
- lastObject
+ ID_StartObjects,
+ ID_COFF,
+ ID_ELF32L, // ELF 32-bit, little endian
+ ID_ELF32B, // ELF 32-bit, big endian
+ ID_ELF64L, // ELF 64-bit, little endian
+ ID_ELF64B, // ELF 64-bit, big endian
+ ID_MachO,
+ ID_EndObjects
};
+ static inline unsigned int getELFType(bool isLittleEndian, bool is64Bits) {
+ if (isLittleEndian)
+ return is64Bits ? ID_ELF64L : ID_ELF32L;
+ else
+ return is64Bits ? ID_ELF64B : ID_ELF32B;
+ }
+
public:
virtual ~Binary();
@@ -56,9 +65,37 @@ public:
// Cast methods.
unsigned int getType() const { return TypeID; }
static inline bool classof(const Binary *v) { return true; }
+
+ // Convenience methods
+ bool isObject() const {
+ return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
+ }
+
+ bool isArchive() const {
+ return TypeID == ID_Archive;
+ }
+
+ bool isELF() const {
+ return TypeID >= ID_ELF32L && TypeID <= ID_ELF64B;
+ }
+
+ bool isMachO() const {
+ return TypeID == ID_MachO;
+ }
+
+ bool isCOFF() const {
+ return TypeID == ID_COFF;
+ }
};
+/// @brief Create a Binary from Source, autodetecting the file type.
+///
+/// @param Source The data to create the Binary from. Ownership is transfered
+/// to Result if successful. If an error is returned, Source is destroyed
+/// by createBinary before returning.
+/// @param Result A pointer to the resulting Binary if no error occured.
error_code createBinary(MemoryBuffer *Source, OwningPtr<Binary> &Result);
+
error_code createBinary(StringRef Path, OwningPtr<Binary> &Result);
}
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
index 067bcd4..68b5ca1 100644
--- a/contrib/llvm/include/llvm/Object/COFF.h
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -19,6 +19,9 @@
#include "llvm/Support/Endian.h"
namespace llvm {
+ template <typename T>
+ class ArrayRef;
+
namespace object {
struct coff_file_header {
@@ -45,13 +48,18 @@ struct coff_symbol {
support::ulittle32_t Value;
support::little16_t SectionNumber;
- struct {
- support::ulittle8_t BaseType;
- support::ulittle8_t ComplexType;
- } Type;
+ support::ulittle16_t Type;
support::ulittle8_t StorageClass;
support::ulittle8_t NumberOfAuxSymbols;
+
+ uint8_t getBaseType() const {
+ return Type & 0x0F;
+ }
+
+ uint8_t getComplexType() const {
+ return (Type & 0xF0) >> 4;
+ }
};
struct coff_section {
@@ -73,6 +81,16 @@ struct coff_relocation {
support::ulittle16_t Type;
};
+struct coff_aux_section_definition {
+ support::ulittle32_t Length;
+ support::ulittle16_t NumberOfRelocations;
+ support::ulittle16_t NumberOfLinenumbers;
+ support::ulittle32_t CheckSum;
+ support::ulittle16_t Number;
+ support::ulittle8_t Selection;
+ char Unused[3];
+};
+
class COFFObjectFile : public ObjectFile {
private:
const coff_file_header *Header;
@@ -81,11 +99,7 @@ private:
const char *StringTable;
uint32_t StringTableSize;
- error_code getSection(int32_t index,
- const coff_section *&Res) const;
error_code getString(uint32_t offset, StringRef &Res) const;
- error_code getSymbol(uint32_t index,
- const coff_symbol *&Res) const;
const coff_symbol *toSymb(DataRefImpl Symb) const;
const coff_section *toSec(DataRefImpl Sec) const;
@@ -94,13 +108,14 @@ private:
protected:
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
- virtual error_code getSymbolOffset(DataRefImpl Symb, uint64_t &Res) const;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
- virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const;
- virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const;
- virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::SymbolType &Res) const;
+ virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const;
+ virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
+ virtual error_code getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -111,6 +126,10 @@ protected:
virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
@@ -120,10 +139,12 @@ protected:
RelocationRef &Res) const;
virtual error_code getRelocationAddress(DataRefImpl Rel,
uint64_t &Res) const;
+ virtual error_code getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const;
virtual error_code getRelocationSymbol(DataRefImpl Rel,
SymbolRef &Res) const;
virtual error_code getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const;
+ uint64_t &Res) const;
virtual error_code getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const;
virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel,
@@ -131,16 +152,46 @@ protected:
virtual error_code getRelocationValueString(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const;
+ virtual error_code getLibraryNext(DataRefImpl LibData,
+ LibraryRef &Result) const;
+ virtual error_code getLibraryPath(DataRefImpl LibData,
+ StringRef &Result) const;
+
public:
COFFObjectFile(MemoryBuffer *Object, error_code &ec);
virtual symbol_iterator begin_symbols() const;
virtual symbol_iterator end_symbols() const;
+ virtual symbol_iterator begin_dynamic_symbols() const;
+ virtual symbol_iterator end_dynamic_symbols() const;
+ virtual library_iterator begin_libraries_needed() const;
+ virtual library_iterator end_libraries_needed() const;
virtual section_iterator begin_sections() const;
virtual section_iterator end_sections() const;
virtual uint8_t getBytesInAddress() const;
virtual StringRef getFileFormatName() const;
virtual unsigned getArch() const;
+ virtual StringRef getLoadName() const;
+
+ error_code getHeader(const coff_file_header *&Res) const;
+ error_code getSection(int32_t index, const coff_section *&Res) const;
+ error_code getSymbol(uint32_t index, const coff_symbol *&Res) const;
+ template <typename T>
+ error_code getAuxSymbol(uint32_t index, const T *&Res) const {
+ const coff_symbol *s;
+ error_code ec = getSymbol(index, s);
+ Res = reinterpret_cast<const T*>(s);
+ return ec;
+ }
+ error_code getSymbolName(const coff_symbol *symbol, StringRef &Res) const;
+ error_code getSectionName(const coff_section *Sec, StringRef &Res) const;
+ error_code getSectionContents(const coff_section *Sec,
+ ArrayRef<uint8_t> &Res) const;
+
+ static inline bool classof(const Binary *v) {
+ return v->isCOFF();
+ }
+ static inline bool classof(const COFFObjectFile *v) { return true; }
};
}
diff --git a/contrib/llvm/include/llvm/Object/ELF.h b/contrib/llvm/include/llvm/Object/ELF.h
new file mode 100644
index 0000000..0828985
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ELF.h
@@ -0,0 +1,2209 @@
+//===- ELF.h - ELF object file implementation -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ELFObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELF_H
+#define LLVM_OBJECT_ELF_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+namespace object {
+
+// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelperCommon {
+ typedef support::detail::packed_endian_specific_integral
+ <uint16_t, target_endianness, support::aligned> Elf_Half;
+ typedef support::detail::packed_endian_specific_integral
+ <uint32_t, target_endianness, support::aligned> Elf_Word;
+ typedef support::detail::packed_endian_specific_integral
+ <int32_t, target_endianness, support::aligned> Elf_Sword;
+ typedef support::detail::packed_endian_specific_integral
+ <uint64_t, target_endianness, support::aligned> Elf_Xword;
+ typedef support::detail::packed_endian_specific_integral
+ <int64_t, target_endianness, support::aligned> Elf_Sxword;
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct ELFDataTypeTypedefHelper;
+
+/// ELF 32bit types.
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelper<target_endianness, false>
+ : ELFDataTypeTypedefHelperCommon<target_endianness> {
+ typedef uint32_t value_type;
+ typedef support::detail::packed_endian_specific_integral
+ <value_type, target_endianness, support::aligned> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral
+ <value_type, target_endianness, support::aligned> Elf_Off;
+};
+
+/// ELF 64bit types.
+template<support::endianness target_endianness>
+struct ELFDataTypeTypedefHelper<target_endianness, true>
+ : ELFDataTypeTypedefHelperCommon<target_endianness>{
+ typedef uint64_t value_type;
+ typedef support::detail::packed_endian_specific_integral
+ <value_type, target_endianness, support::aligned> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral
+ <value_type, target_endianness, support::aligned> Elf_Off;
+};
+
+// I really don't like doing this, but the alternative is copypasta.
+#define LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Addr Elf_Addr; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Off Elf_Off; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Half Elf_Half; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Word Elf_Word; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sword Elf_Sword; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Xword Elf_Xword; \
+typedef typename \
+ ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sxword Elf_Sxword;
+
+ // Section header.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Shdr_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Shdr_Base<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Word sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Word sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Word sh_addralign;// Section address alignment
+ Elf_Word sh_entsize; // Size of records contained within the section
+};
+
+template<support::endianness target_endianness>
+struct Elf_Shdr_Base<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Xword sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Xword sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Xword sh_addralign;// Section address alignment
+ Elf_Xword sh_entsize; // Size of records contained within the section
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Shdr_Impl : Elf_Shdr_Base<target_endianness, is64Bits> {
+ using Elf_Shdr_Base<target_endianness, is64Bits>::sh_entsize;
+ using Elf_Shdr_Base<target_endianness, is64Bits>::sh_size;
+
+ /// @brief Get the number of entities this section contains if it has any.
+ unsigned getEntityCount() const {
+ if (sh_entsize == 0)
+ return 0;
+ return sh_size / sh_entsize;
+ }
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Sym_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Sym_Base<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word st_name; // Symbol name (index into string table)
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Word st_size; // Size of the symbol
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+};
+
+template<support::endianness target_endianness>
+struct Elf_Sym_Base<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Xword st_size; // Size of the symbol
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Sym_Impl : Elf_Sym_Base<target_endianness, is64Bits> {
+ using Elf_Sym_Base<target_endianness, is64Bits>::st_info;
+
+ // These accessors and mutators correspond to the ELF32_ST_BIND,
+ // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+/// Elf_Versym: This is the structure of entries in the SHT_GNU_versym section
+/// (.gnu.version). This structure is identical for ELF32 and ELF64.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Versym_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Verdaux_Impl;
+
+/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
+/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Verdef_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ typedef Elf_Verdaux_Impl<target_endianness, is64Bits> Elf_Verdaux;
+ Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
+ Elf_Half vd_flags; // Bitwise flags (VER_DEF_*)
+ Elf_Half vd_ndx; // Version index, used in .gnu.version entries
+ Elf_Half vd_cnt; // Number of Verdaux entries
+ Elf_Word vd_hash; // Hash of name
+ Elf_Word vd_aux; // Offset to the first Verdaux entry (in bytes)
+ Elf_Word vd_next; // Offset to the next Verdef entry (in bytes)
+
+ /// Get the first Verdaux entry for this Verdef.
+ const Elf_Verdaux *getAux() const {
+ return reinterpret_cast<const Elf_Verdaux*>((const char*)this + vd_aux);
+ }
+};
+
+/// Elf_Verdaux: This is the structure of auxilary data in the SHT_GNU_verdef
+/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Verdaux_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ Elf_Word vda_name; // Version name (offset in string table)
+ Elf_Word vda_next; // Offset to next Verdaux entry (in bytes)
+};
+
+/// Elf_Verneed: This is the structure of entries in the SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Verneed_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ Elf_Half vn_version; // Version of this structure (e.g. VER_NEED_CURRENT)
+ Elf_Half vn_cnt; // Number of associated Vernaux entries
+ Elf_Word vn_file; // Library name (string table offset)
+ Elf_Word vn_aux; // Offset to first Vernaux entry (in bytes)
+ Elf_Word vn_next; // Offset to next Verneed entry (in bytes)
+};
+
+/// Elf_Vernaux: This is the structure of auxiliary data in SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Vernaux_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ Elf_Word vna_hash; // Hash of dependency name
+ Elf_Half vna_flags; // Bitwise Flags (VER_FLAG_*)
+ Elf_Half vna_other; // Version index, used in .gnu.version entries
+ Elf_Word vna_name; // Dependency name
+ Elf_Word vna_next; // Offset to next Vernaux entry (in bytes)
+};
+
+/// Elf_Dyn_Base: This structure matches the form of entries in the dynamic
+/// table section (.dynamic) look like.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Dyn_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Dyn_Base<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Sword d_tag;
+ union {
+ Elf_Word d_val;
+ Elf_Addr d_ptr;
+ } d_un;
+};
+
+template<support::endianness target_endianness>
+struct Elf_Dyn_Base<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Sxword d_tag;
+ union {
+ Elf_Xword d_val;
+ Elf_Addr d_ptr;
+ } d_un;
+};
+
+/// Elf_Dyn_Impl: This inherits from Elf_Dyn_Base, adding getters and setters.
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Dyn_Impl : Elf_Dyn_Base<target_endianness, is64Bits> {
+ using Elf_Dyn_Base<target_endianness, is64Bits>::d_tag;
+ using Elf_Dyn_Base<target_endianness, is64Bits>::d_un;
+ int64_t getTag() const { return d_tag; }
+ uint64_t getVal() const { return d_un.d_val; }
+ uint64_t getPtr() const { return d_un.ptr; }
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+class ELFObjectFile;
+
+// DynRefImpl: Reference to an entry in the dynamic table
+// This is an ELF-specific interface.
+template<support::endianness target_endianness, bool is64Bits>
+class DynRefImpl {
+ typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn;
+ typedef ELFObjectFile<target_endianness, is64Bits> OwningType;
+
+ DataRefImpl DynPimpl;
+ const OwningType *OwningObject;
+
+public:
+ DynRefImpl() : OwningObject(NULL) { }
+
+ DynRefImpl(DataRefImpl DynP, const OwningType *Owner);
+
+ bool operator==(const DynRefImpl &Other) const;
+ bool operator <(const DynRefImpl &Other) const;
+
+ error_code getNext(DynRefImpl &Result) const;
+ int64_t getTag() const;
+ uint64_t getVal() const;
+ uint64_t getPtr() const;
+
+ DataRefImpl getRawDataRefImpl() const;
+};
+
+// Elf_Rel: Elf Relocation
+template<support::endianness target_endianness, bool is64Bits, bool isRela>
+struct Elf_Rel_Base;
+
+template<support::endianness target_endianness>
+struct Elf_Rel_Base<target_endianness, false, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Word r_info; // Symbol table index and type of relocation to apply
+};
+
+template<support::endianness target_endianness>
+struct Elf_Rel_Base<target_endianness, true, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Xword r_info; // Symbol table index and type of relocation to apply
+};
+
+template<support::endianness target_endianness>
+struct Elf_Rel_Base<target_endianness, false, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Word r_info; // Symbol table index and type of relocation to apply
+ Elf_Sword r_addend; // Compute value for relocatable field by adding this
+};
+
+template<support::endianness target_endianness>
+struct Elf_Rel_Base<target_endianness, true, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Xword r_info; // Symbol table index and type of relocation to apply
+ Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
+};
+
+template<support::endianness target_endianness, bool is64Bits, bool isRela>
+struct Elf_Rel_Impl;
+
+template<support::endianness target_endianness, bool isRela>
+struct Elf_Rel_Impl<target_endianness, true, isRela>
+ : Elf_Rel_Base<target_endianness, true, isRela> {
+ using Elf_Rel_Base<target_endianness, true, isRela>::r_info;
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ uint64_t getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(uint64_t s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(uint64_t s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ }
+};
+
+template<support::endianness target_endianness, bool isRela>
+struct Elf_Rel_Impl<target_endianness, false, isRela>
+ : Elf_Rel_Base<target_endianness, false, isRela> {
+ using Elf_Rel_Base<target_endianness, false, isRela>::r_info;
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+
+ // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+ // and ELF32_R_INFO macros defined in the ELF specification:
+ uint32_t getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(uint32_t s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(uint32_t s, unsigned char t) {
+ r_info = (s << 8) + t;
+ }
+};
+
+
+template<support::endianness target_endianness, bool is64Bits>
+class ELFObjectFile : public ObjectFile {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+
+ typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
+ typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
+ typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn;
+ typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel;
+ typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela;
+ typedef Elf_Verdef_Impl<target_endianness, is64Bits> Elf_Verdef;
+ typedef Elf_Verdaux_Impl<target_endianness, is64Bits> Elf_Verdaux;
+ typedef Elf_Verneed_Impl<target_endianness, is64Bits> Elf_Verneed;
+ typedef Elf_Vernaux_Impl<target_endianness, is64Bits> Elf_Vernaux;
+ typedef Elf_Versym_Impl<target_endianness, is64Bits> Elf_Versym;
+ typedef DynRefImpl<target_endianness, is64Bits> DynRef;
+ typedef content_iterator<DynRef> dyn_iterator;
+
+protected:
+ struct Elf_Ehdr {
+ unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+ Elf_Half e_type; // Type of file (see ET_*)
+ Elf_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf_Word e_version; // Must be equal to 1
+ Elf_Addr e_entry; // Address to jump to in order to start program
+ Elf_Off e_phoff; // Program header table's file offset, in bytes
+ Elf_Off e_shoff; // Section header table's file offset, in bytes
+ Elf_Word e_flags; // Processor-specific flags
+ Elf_Half e_ehsize; // Size of ELF header, in bytes
+ Elf_Half e_phentsize;// Size of an entry in the program header table
+ Elf_Half e_phnum; // Number of entries in the program header table
+ Elf_Half e_shentsize;// Size of an entry in the section header table
+ Elf_Half e_shnum; // Number of entries in the section header table
+ Elf_Half e_shstrndx; // Section header table index of section name
+ // string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+ };
+ // This flag is used for classof, to distinguish ELFObjectFile from
+ // its subclass. If more subclasses will be created, this flag will
+ // have to become an enum.
+ bool isDyldELFObject;
+
+private:
+ typedef SmallVector<const Elf_Shdr*, 1> Sections_t;
+ typedef DenseMap<unsigned, unsigned> IndexMap_t;
+ typedef DenseMap<const Elf_Shdr*, SmallVector<uint32_t, 1> > RelocMap_t;
+
+ const Elf_Ehdr *Header;
+ const Elf_Shdr *SectionHeaderTable;
+ const Elf_Shdr *dot_shstrtab_sec; // Section header string table.
+ const Elf_Shdr *dot_strtab_sec; // Symbol header string table.
+ const Elf_Shdr *dot_dynstr_sec; // Dynamic symbol string table.
+
+ // SymbolTableSections[0] always points to the dynamic string table section
+ // header, or NULL if there is no dynamic string table.
+ Sections_t SymbolTableSections;
+ IndexMap_t SymbolTableSectionsIndexMap;
+ DenseMap<const Elf_Sym*, ELF::Elf64_Word> ExtendedSymbolTable;
+
+ const Elf_Shdr *dot_dynamic_sec; // .dynamic
+ const Elf_Shdr *dot_gnu_version_sec; // .gnu.version
+ const Elf_Shdr *dot_gnu_version_r_sec; // .gnu.version_r
+ const Elf_Shdr *dot_gnu_version_d_sec; // .gnu.version_d
+
+ // Pointer to SONAME entry in dynamic string table
+ // This is set the first time getLoadName is called.
+ mutable const char *dt_soname;
+
+ // Records for each version index the corresponding Verdef or Vernaux entry.
+ // This is filled the first time LoadVersionMap() is called.
+ class VersionMapEntry : public PointerIntPair<const void*, 1> {
+ public:
+ // If the integer is 0, this is an Elf_Verdef*.
+ // If the integer is 1, this is an Elf_Vernaux*.
+ VersionMapEntry() : PointerIntPair<const void*, 1>(NULL, 0) { }
+ VersionMapEntry(const Elf_Verdef *verdef)
+ : PointerIntPair<const void*, 1>(verdef, 0) { }
+ VersionMapEntry(const Elf_Vernaux *vernaux)
+ : PointerIntPair<const void*, 1>(vernaux, 1) { }
+ bool isNull() const { return getPointer() == NULL; }
+ bool isVerdef() const { return !isNull() && getInt() == 0; }
+ bool isVernaux() const { return !isNull() && getInt() == 1; }
+ const Elf_Verdef *getVerdef() const {
+ return isVerdef() ? (const Elf_Verdef*)getPointer() : NULL;
+ }
+ const Elf_Vernaux *getVernaux() const {
+ return isVernaux() ? (const Elf_Vernaux*)getPointer() : NULL;
+ }
+ };
+ mutable SmallVector<VersionMapEntry, 16> VersionMap;
+ void LoadVersionDefs(const Elf_Shdr *sec) const;
+ void LoadVersionNeeds(const Elf_Shdr *ec) const;
+ void LoadVersionMap() const;
+
+ /// @brief Map sections to an array of relocation sections that reference
+ /// them sorted by section index.
+ RelocMap_t SectionRelocMap;
+
+ /// @brief Get the relocation section that contains \a Rel.
+ const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
+ return getSection(Rel.w.b);
+ }
+
+ bool isRelocationHasAddend(DataRefImpl Rel) const;
+ template<typename T>
+ const T *getEntry(uint16_t Section, uint32_t Entry) const;
+ template<typename T>
+ const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
+ const Elf_Shdr *getSection(DataRefImpl index) const;
+ const Elf_Shdr *getSection(uint32_t index) const;
+ const Elf_Rel *getRel(DataRefImpl Rel) const;
+ const Elf_Rela *getRela(DataRefImpl Rela) const;
+ const char *getString(uint32_t section, uint32_t offset) const;
+ const char *getString(const Elf_Shdr *section, uint32_t offset) const;
+ error_code getSymbolName(const Elf_Shdr *section,
+ const Elf_Sym *Symb,
+ StringRef &Res) const;
+ error_code getSymbolVersion(const Elf_Shdr *section,
+ const Elf_Sym *Symb,
+ StringRef &Version,
+ bool &IsDefault) const;
+ void VerifyStrTab(const Elf_Shdr *sh) const;
+
+protected:
+ const Elf_Sym *getSymbol(DataRefImpl Symb) const; // FIXME: Should be private?
+ void validateSymbol(DataRefImpl Symb) const;
+
+public:
+ const Elf_Dyn *getDyn(DataRefImpl DynData) const;
+ error_code getSymbolVersion(SymbolRef Symb, StringRef &Version,
+ bool &IsDefault) const;
+protected:
+ virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
+ virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const;
+ virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
+ virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
+ virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
+ virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const;
+ virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
+ virtual error_code getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const;
+
+ friend class DynRefImpl<target_endianness, is64Bits>;
+ virtual error_code getDynNext(DataRefImpl DynData, DynRef &Result) const;
+
+ virtual error_code getLibraryNext(DataRefImpl Data, LibraryRef &Result) const;
+ virtual error_code getLibraryPath(DataRefImpl Data, StringRef &Res) const;
+
+ virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
+ virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
+ virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const;
+ virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const;
+ virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res) const;
+ virtual error_code getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const;
+ virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Res) const;
+ virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
+ bool &Result) const;
+ virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
+ virtual relocation_iterator getSectionRelEnd(DataRefImpl Sec) const;
+
+ virtual error_code getRelocationNext(DataRefImpl Rel,
+ RelocationRef &Res) const;
+ virtual error_code getRelocationAddress(DataRefImpl Rel,
+ uint64_t &Res) const;
+ virtual error_code getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const;
+ virtual error_code getRelocationSymbol(DataRefImpl Rel,
+ SymbolRef &Res) const;
+ virtual error_code getRelocationType(DataRefImpl Rel,
+ uint64_t &Res) const;
+ virtual error_code getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const;
+ virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel,
+ int64_t &Res) const;
+ virtual error_code getRelocationValueString(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const;
+
+public:
+ ELFObjectFile(MemoryBuffer *Object, error_code &ec);
+ virtual symbol_iterator begin_symbols() const;
+ virtual symbol_iterator end_symbols() const;
+
+ virtual symbol_iterator begin_dynamic_symbols() const;
+ virtual symbol_iterator end_dynamic_symbols() const;
+
+ virtual section_iterator begin_sections() const;
+ virtual section_iterator end_sections() const;
+
+ virtual library_iterator begin_libraries_needed() const;
+ virtual library_iterator end_libraries_needed() const;
+
+ virtual dyn_iterator begin_dynamic_table() const;
+ virtual dyn_iterator end_dynamic_table() const;
+
+ virtual uint8_t getBytesInAddress() const;
+ virtual StringRef getFileFormatName() const;
+ virtual StringRef getObjectType() const { return "ELF"; }
+ virtual unsigned getArch() const;
+ virtual StringRef getLoadName() const;
+
+ uint64_t getNumSections() const;
+ uint64_t getStringTableIndex() const;
+ ELF::Elf64_Word getSymbolTableIndex(const Elf_Sym *symb) const;
+ const Elf_Shdr *getSection(const Elf_Sym *symb) const;
+
+ // Methods for type inquiry through isa, cast, and dyn_cast
+ bool isDyldType() const { return isDyldELFObject; }
+ static inline bool classof(const Binary *v) {
+ return v->getType() == getELFType(target_endianness == support::little,
+ is64Bits);
+ }
+ static inline bool classof(const ELFObjectFile *v) { return true; }
+};
+
+// Iterate through the version definitions, and place each Elf_Verdef
+// in the VersionMap according to its index.
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>::
+ LoadVersionDefs(const Elf_Shdr *sec) const {
+ unsigned vd_size = sec->sh_size; // Size of section in bytes
+ unsigned vd_count = sec->sh_info; // Number of Verdef entries
+ const char *sec_start = (const char*)base() + sec->sh_offset;
+ const char *sec_end = sec_start + vd_size;
+ // The first Verdef entry is at the start of the section.
+ const char *p = sec_start;
+ for (unsigned i = 0; i < vd_count; i++) {
+ if (p + sizeof(Elf_Verdef) > sec_end)
+ report_fatal_error("Section ended unexpectedly while scanning "
+ "version definitions.");
+ const Elf_Verdef *vd = reinterpret_cast<const Elf_Verdef *>(p);
+ if (vd->vd_version != ELF::VER_DEF_CURRENT)
+ report_fatal_error("Unexpected verdef version");
+ size_t index = vd->vd_ndx & ELF::VERSYM_VERSION;
+ if (index >= VersionMap.size())
+ VersionMap.resize(index+1);
+ VersionMap[index] = VersionMapEntry(vd);
+ p += vd->vd_next;
+ }
+}
+
+// Iterate through the versions needed section, and place each Elf_Vernaux
+// in the VersionMap according to its index.
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>::
+ LoadVersionNeeds(const Elf_Shdr *sec) const {
+ unsigned vn_size = sec->sh_size; // Size of section in bytes
+ unsigned vn_count = sec->sh_info; // Number of Verneed entries
+ const char *sec_start = (const char*)base() + sec->sh_offset;
+ const char *sec_end = sec_start + vn_size;
+ // The first Verneed entry is at the start of the section.
+ const char *p = sec_start;
+ for (unsigned i = 0; i < vn_count; i++) {
+ if (p + sizeof(Elf_Verneed) > sec_end)
+ report_fatal_error("Section ended unexpectedly while scanning "
+ "version needed records.");
+ const Elf_Verneed *vn = reinterpret_cast<const Elf_Verneed *>(p);
+ if (vn->vn_version != ELF::VER_NEED_CURRENT)
+ report_fatal_error("Unexpected verneed version");
+ // Iterate through the Vernaux entries
+ const char *paux = p + vn->vn_aux;
+ for (unsigned j = 0; j < vn->vn_cnt; j++) {
+ if (paux + sizeof(Elf_Vernaux) > sec_end)
+ report_fatal_error("Section ended unexpected while scanning auxiliary "
+ "version needed records.");
+ const Elf_Vernaux *vna = reinterpret_cast<const Elf_Vernaux *>(paux);
+ size_t index = vna->vna_other & ELF::VERSYM_VERSION;
+ if (index >= VersionMap.size())
+ VersionMap.resize(index+1);
+ VersionMap[index] = VersionMapEntry(vna);
+ paux += vna->vna_next;
+ }
+ p += vn->vn_next;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>::LoadVersionMap() const {
+ // If there is no dynamic symtab or version table, there is nothing to do.
+ if (SymbolTableSections[0] == NULL || dot_gnu_version_sec == NULL)
+ return;
+
+ // Has the VersionMap already been loaded?
+ if (VersionMap.size() > 0)
+ return;
+
+ // The first two version indexes are reserved.
+ // Index 0 is LOCAL, index 1 is GLOBAL.
+ VersionMap.push_back(VersionMapEntry());
+ VersionMap.push_back(VersionMapEntry());
+
+ if (dot_gnu_version_d_sec)
+ LoadVersionDefs(dot_gnu_version_d_sec);
+
+ if (dot_gnu_version_r_sec)
+ LoadVersionNeeds(dot_gnu_version_r_sec);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>
+ ::validateSymbol(DataRefImpl Symb) const {
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
+ // FIXME: We really need to do proper error handling in the case of an invalid
+ // input file. Because we don't use exceptions, I think we'll just pass
+ // an error object around.
+ if (!( symb
+ && SymbolTableSection
+ && symb >= (const Elf_Sym*)(base()
+ + SymbolTableSection->sh_offset)
+ && symb < (const Elf_Sym*)(base()
+ + SymbolTableSection->sh_offset
+ + SymbolTableSection->sh_size)))
+ // FIXME: Proper error handling.
+ report_fatal_error("Symb must point to a valid symbol!");
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolNext(DataRefImpl Symb,
+ SymbolRef &Result) const {
+ validateSymbol(Symb);
+ const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
+
+ ++Symb.d.a;
+ // Check to see if we are at the end of this symbol table.
+ if (Symb.d.a >= SymbolTableSection->getEntityCount()) {
+ // We are at the end. If there are other symbol tables, jump to them.
+ // If the symbol table is .dynsym, we are iterating dynamic symbols,
+ // and there is only one table of these.
+ if (Symb.d.b != 0) {
+ ++Symb.d.b;
+ Symb.d.a = 1; // The 0th symbol in ELF is fake.
+ }
+ // Otherwise return the terminator.
+ if (Symb.d.b == 0 || Symb.d.b >= SymbolTableSections.size()) {
+ Symb.d.a = std::numeric_limits<uint32_t>::max();
+ Symb.d.b = std::numeric_limits<uint32_t>::max();
+ }
+ }
+
+ Result = SymbolRef(Symb, this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolName(DataRefImpl Symb,
+ StringRef &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ return getSymbolName(SymbolTableSections[Symb.d.b], symb, Result);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolVersion(SymbolRef SymRef,
+ StringRef &Version,
+ bool &IsDefault) const {
+ DataRefImpl Symb = SymRef.getRawDataRefImpl();
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ return getSymbolVersion(SymbolTableSections[Symb.d.b], symb,
+ Version, IsDefault);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ELF::Elf64_Word ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolTableIndex(const Elf_Sym *symb) const {
+ if (symb->st_shndx == ELF::SHN_XINDEX)
+ return ExtendedSymbolTable.lookup(symb);
+ return symb->st_shndx;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>
+ ::getSection(const Elf_Sym *symb) const {
+ if (symb->st_shndx == ELF::SHN_XINDEX)
+ return getSection(ExtendedSymbolTable.lookup(symb));
+ if (symb->st_shndx >= ELF::SHN_LORESERVE)
+ return 0;
+ return getSection(symb->st_shndx);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolFileOffset(DataRefImpl Symb,
+ uint64_t &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *Section;
+ switch (getSymbolTableIndex(symb)) {
+ case ELF::SHN_COMMON:
+ // Unintialized symbols have no offset in the object file
+ case ELF::SHN_UNDEF:
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ case ELF::SHN_ABS:
+ Result = symb->st_value;
+ return object_error::success;
+ default: Section = getSection(symb);
+ }
+
+ switch (symb->getType()) {
+ case ELF::STT_SECTION:
+ Result = Section ? Section->sh_addr : UnknownAddressOrSize;
+ return object_error::success;
+ case ELF::STT_FUNC:
+ case ELF::STT_OBJECT:
+ case ELF::STT_NOTYPE:
+ Result = symb->st_value +
+ (Section ? Section->sh_offset : 0);
+ return object_error::success;
+ default:
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolAddress(DataRefImpl Symb,
+ uint64_t &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *Section;
+ switch (getSymbolTableIndex(symb)) {
+ case ELF::SHN_COMMON:
+ case ELF::SHN_UNDEF:
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ case ELF::SHN_ABS:
+ Result = symb->st_value;
+ return object_error::success;
+ default: Section = getSection(symb);
+ }
+
+ switch (symb->getType()) {
+ case ELF::STT_SECTION:
+ Result = Section ? Section->sh_addr : UnknownAddressOrSize;
+ return object_error::success;
+ case ELF::STT_FUNC:
+ case ELF::STT_OBJECT:
+ case ELF::STT_NOTYPE:
+ Result = symb->st_value + (Section ? Section->sh_addr : 0);
+ return object_error::success;
+ default:
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolSize(DataRefImpl Symb,
+ uint64_t &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ if (symb->st_size == 0)
+ Result = UnknownAddressOrSize;
+ Result = symb->st_size;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolNMTypeChar(DataRefImpl Symb,
+ char &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *Section = getSection(symb);
+
+ char ret = '?';
+
+ if (Section) {
+ switch (Section->sh_type) {
+ case ELF::SHT_PROGBITS:
+ case ELF::SHT_DYNAMIC:
+ switch (Section->sh_flags) {
+ case (ELF::SHF_ALLOC | ELF::SHF_EXECINSTR):
+ ret = 't'; break;
+ case (ELF::SHF_ALLOC | ELF::SHF_WRITE):
+ ret = 'd'; break;
+ case ELF::SHF_ALLOC:
+ case (ELF::SHF_ALLOC | ELF::SHF_MERGE):
+ case (ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS):
+ ret = 'r'; break;
+ }
+ break;
+ case ELF::SHT_NOBITS: ret = 'b';
+ }
+ }
+
+ switch (getSymbolTableIndex(symb)) {
+ case ELF::SHN_UNDEF:
+ if (ret == '?')
+ ret = 'U';
+ break;
+ case ELF::SHN_ABS: ret = 'a'; break;
+ case ELF::SHN_COMMON: ret = 'c'; break;
+ }
+
+ switch (symb->getBinding()) {
+ case ELF::STB_GLOBAL: ret = ::toupper(ret); break;
+ case ELF::STB_WEAK:
+ if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF)
+ ret = 'w';
+ else
+ if (symb->getType() == ELF::STT_OBJECT)
+ ret = 'V';
+ else
+ ret = 'W';
+ }
+
+ if (ret == '?' && symb->getType() == ELF::STT_SECTION) {
+ StringRef name;
+ if (error_code ec = getSymbolName(Symb, name))
+ return ec;
+ Result = StringSwitch<char>(name)
+ .StartsWith(".debug", 'N')
+ .StartsWith(".note", 'n')
+ .Default('?');
+ return object_error::success;
+ }
+
+ Result = ret;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolType(DataRefImpl Symb,
+ SymbolRef::Type &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+
+ switch (symb->getType()) {
+ case ELF::STT_NOTYPE:
+ Result = SymbolRef::ST_Unknown;
+ break;
+ case ELF::STT_SECTION:
+ Result = SymbolRef::ST_Debug;
+ break;
+ case ELF::STT_FILE:
+ Result = SymbolRef::ST_File;
+ break;
+ case ELF::STT_FUNC:
+ Result = SymbolRef::ST_Function;
+ break;
+ case ELF::STT_OBJECT:
+ case ELF::STT_COMMON:
+ case ELF::STT_TLS:
+ Result = SymbolRef::ST_Data;
+ break;
+ default:
+ Result = SymbolRef::ST_Other;
+ break;
+ }
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolFlags(DataRefImpl Symb,
+ uint32_t &Result) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+
+ Result = SymbolRef::SF_None;
+
+ if (symb->getBinding() != ELF::STB_LOCAL)
+ Result |= SymbolRef::SF_Global;
+
+ if (symb->getBinding() == ELF::STB_WEAK)
+ Result |= SymbolRef::SF_Weak;
+
+ if (symb->st_shndx == ELF::SHN_ABS)
+ Result |= SymbolRef::SF_Absolute;
+
+ if (symb->getType() == ELF::STT_FILE ||
+ symb->getType() == ELF::STT_SECTION)
+ Result |= SymbolRef::SF_FormatSpecific;
+
+ if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF)
+ Result |= SymbolRef::SF_Undefined;
+
+ if (symb->getType() == ELF::STT_COMMON ||
+ getSymbolTableIndex(symb) == ELF::SHN_COMMON)
+ Result |= SymbolRef::SF_Common;
+
+ if (symb->getType() == ELF::STT_TLS)
+ Result |= SymbolRef::SF_ThreadLocal;
+
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ const Elf_Shdr *sec = getSection(symb);
+ if (!sec)
+ Res = end_sections();
+ else {
+ DataRefImpl Sec;
+ Sec.p = reinterpret_cast<intptr_t>(sec);
+ Res = section_iterator(SectionRef(Sec, this));
+ }
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionNext(DataRefImpl Sec, SectionRef &Result) const {
+ const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
+ sec += Header->e_shentsize;
+ Sec.p = reinterpret_cast<intptr_t>(sec);
+ Result = SectionRef(Sec, this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionName(DataRefImpl Sec,
+ StringRef &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ Result = StringRef(getString(dot_shstrtab_sec, sec->sh_name));
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionAddress(DataRefImpl Sec,
+ uint64_t &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ Result = sec->sh_addr;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionSize(DataRefImpl Sec,
+ uint64_t &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ Result = sec->sh_size;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionContents(DataRefImpl Sec,
+ StringRef &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ const char *start = (const char*)base() + sec->sh_offset;
+ Result = StringRef(start, sec->sh_size);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionAlignment(DataRefImpl Sec,
+ uint64_t &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ Result = sec->sh_addralign;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionText(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & ELF::SHF_EXECINSTR)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionData(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE)
+ && sec->sh_type == ELF::SHT_PROGBITS)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionBSS(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE)
+ && sec->sh_type == ELF::SHT_NOBITS)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & ELF::SHF_ALLOC)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionVirtual(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_type == ELF::SHT_NOBITS)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>::isSectionZeroInit(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ // For ELF, all zero-init sections are virtual (that is, they occupy no space
+ // in the object image) and vice versa.
+ if (sec->sh_flags & ELF::SHT_NOBITS)
+ Result = true;
+ else
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::sectionContainsSymbol(DataRefImpl Sec,
+ DataRefImpl Symb,
+ bool &Result) const {
+ // FIXME: Unimplemented.
+ Result = false;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+relocation_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionRelBegin(DataRefImpl Sec) const {
+ DataRefImpl RelData;
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec);
+ if (sec != 0 && ittr != SectionRelocMap.end()) {
+ RelData.w.a = getSection(ittr->second[0])->sh_info;
+ RelData.w.b = ittr->second[0];
+ RelData.w.c = 0;
+ }
+ return relocation_iterator(RelocationRef(RelData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+relocation_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::getSectionRelEnd(DataRefImpl Sec) const {
+ DataRefImpl RelData;
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec);
+ if (sec != 0 && ittr != SectionRelocMap.end()) {
+ // Get the index of the last relocation section for this section.
+ std::size_t relocsecindex = ittr->second[ittr->second.size() - 1];
+ const Elf_Shdr *relocsec = getSection(relocsecindex);
+ RelData.w.a = relocsec->sh_info;
+ RelData.w.b = relocsecindex;
+ RelData.w.c = relocsec->sh_size / relocsec->sh_entsize;
+ }
+ return relocation_iterator(RelocationRef(RelData, this));
+}
+
+// Relocations
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationNext(DataRefImpl Rel,
+ RelocationRef &Result) const {
+ ++Rel.w.c;
+ const Elf_Shdr *relocsec = getSection(Rel.w.b);
+ if (Rel.w.c >= (relocsec->sh_size / relocsec->sh_entsize)) {
+ // We have reached the end of the relocations for this section. See if there
+ // is another relocation section.
+ typename RelocMap_t::mapped_type relocseclist =
+ SectionRelocMap.lookup(getSection(Rel.w.a));
+
+ // Do a binary search for the current reloc section index (which must be
+ // present). Then get the next one.
+ typename RelocMap_t::mapped_type::const_iterator loc =
+ std::lower_bound(relocseclist.begin(), relocseclist.end(), Rel.w.b);
+ ++loc;
+
+ // If there is no next one, don't do anything. The ++Rel.w.c above sets Rel
+ // to the end iterator.
+ if (loc != relocseclist.end()) {
+ Rel.w.b = *loc;
+ Rel.w.a = 0;
+ }
+ }
+ Result = RelocationRef(Rel, this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationSymbol(DataRefImpl Rel,
+ SymbolRef &Result) const {
+ uint32_t symbolIdx;
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ switch (sec->sh_type) {
+ default :
+ report_fatal_error("Invalid section type in Rel!");
+ case ELF::SHT_REL : {
+ symbolIdx = getRel(Rel)->getSymbol();
+ break;
+ }
+ case ELF::SHT_RELA : {
+ symbolIdx = getRela(Rel)->getSymbol();
+ break;
+ }
+ }
+ DataRefImpl SymbolData;
+ IndexMap_t::const_iterator it = SymbolTableSectionsIndexMap.find(sec->sh_link);
+ if (it == SymbolTableSectionsIndexMap.end())
+ report_fatal_error("Relocation symbol table not found!");
+ SymbolData.d.a = symbolIdx;
+ SymbolData.d.b = it->second;
+ Result = SymbolRef(SymbolData, this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationAddress(DataRefImpl Rel,
+ uint64_t &Result) const {
+ uint64_t offset;
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ switch (sec->sh_type) {
+ default :
+ report_fatal_error("Invalid section type in Rel!");
+ case ELF::SHT_REL : {
+ offset = getRel(Rel)->r_offset;
+ break;
+ }
+ case ELF::SHT_RELA : {
+ offset = getRela(Rel)->r_offset;
+ break;
+ }
+ }
+
+ Result = offset;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Result) const {
+ uint64_t offset;
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ switch (sec->sh_type) {
+ default :
+ report_fatal_error("Invalid section type in Rel!");
+ case ELF::SHT_REL : {
+ offset = getRel(Rel)->r_offset;
+ break;
+ }
+ case ELF::SHT_RELA : {
+ offset = getRela(Rel)->r_offset;
+ break;
+ }
+ }
+
+ Result = offset - sec->sh_addr;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationType(DataRefImpl Rel,
+ uint64_t &Result) const {
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ switch (sec->sh_type) {
+ default :
+ report_fatal_error("Invalid section type in Rel!");
+ case ELF::SHT_REL : {
+ Result = getRel(Rel)->getType();
+ break;
+ }
+ case ELF::SHT_RELA : {
+ Result = getRela(Rel)->getType();
+ break;
+ }
+ }
+ return object_error::success;
+}
+
+#define LLVM_ELF_SWITCH_RELOC_TYPE_NAME(enum) \
+ case ELF::enum: res = #enum; break;
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const {
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ uint8_t type;
+ StringRef res;
+ switch (sec->sh_type) {
+ default :
+ return object_error::parse_failed;
+ case ELF::SHT_REL : {
+ type = getRel(Rel)->getType();
+ break;
+ }
+ case ELF::SHT_RELA : {
+ type = getRela(Rel)->getType();
+ break;
+ }
+ }
+ switch (Header->e_machine) {
+ case ELF::EM_X86_64:
+ switch (type) {
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_NONE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PLT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_COPY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_JUMP_SLOT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_RELATIVE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPCREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32S);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPMOD64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSGD);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSLD);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTTPOFF);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTOFF64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE64);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32_TLSDESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC);
+ default:
+ res = "Unknown";
+ }
+ break;
+ case ELF::EM_386:
+ switch (type) {
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_NONE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PLT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_COPY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_JUMP_SLOT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_RELATIVE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTOFF);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTPC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32PLT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTIE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_PUSH);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_POP);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_PUSH);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_POP);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDO_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE_32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPMOD32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTDESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_IRELATIVE);
+ default:
+ res = "Unknown";
+ }
+ break;
+ default:
+ res = "Unknown";
+ }
+ Result.append(res.begin(), res.end());
+ return object_error::success;
+}
+
+#undef LLVM_ELF_SWITCH_RELOC_TYPE_NAME
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationAdditionalInfo(DataRefImpl Rel,
+ int64_t &Result) const {
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ switch (sec->sh_type) {
+ default :
+ report_fatal_error("Invalid section type in Rel!");
+ case ELF::SHT_REL : {
+ Result = 0;
+ return object_error::success;
+ }
+ case ELF::SHT_RELA : {
+ Result = getRela(Rel)->r_addend;
+ return object_error::success;
+ }
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getRelocationValueString(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const {
+ const Elf_Shdr *sec = getSection(Rel.w.b);
+ uint8_t type;
+ StringRef res;
+ int64_t addend = 0;
+ uint16_t symbol_index = 0;
+ switch (sec->sh_type) {
+ default :
+ return object_error::parse_failed;
+ case ELF::SHT_REL : {
+ type = getRel(Rel)->getType();
+ symbol_index = getRel(Rel)->getSymbol();
+ // TODO: Read implicit addend from section data.
+ break;
+ }
+ case ELF::SHT_RELA : {
+ type = getRela(Rel)->getType();
+ symbol_index = getRela(Rel)->getSymbol();
+ addend = getRela(Rel)->r_addend;
+ break;
+ }
+ }
+ const Elf_Sym *symb = getEntry<Elf_Sym>(sec->sh_link, symbol_index);
+ StringRef symname;
+ if (error_code ec = getSymbolName(getSection(sec->sh_link), symb, symname))
+ return ec;
+ switch (Header->e_machine) {
+ case ELF::EM_X86_64:
+ switch (type) {
+ case ELF::R_X86_64_32S:
+ res = symname;
+ break;
+ case ELF::R_X86_64_PC32: {
+ std::string fmtbuf;
+ raw_string_ostream fmt(fmtbuf);
+ fmt << symname << (addend < 0 ? "" : "+") << addend << "-P";
+ fmt.flush();
+ Result.append(fmtbuf.begin(), fmtbuf.end());
+ }
+ break;
+ default:
+ res = "Unknown";
+ }
+ break;
+ default:
+ res = "Unknown";
+ }
+ if (Result.empty())
+ Result.append(res.begin(), res.end());
+ return object_error::success;
+}
+
+// Verify that the last byte in the string table in a null.
+template<support::endianness target_endianness, bool is64Bits>
+void ELFObjectFile<target_endianness, is64Bits>
+ ::VerifyStrTab(const Elf_Shdr *sh) const {
+ const char *strtab = (const char*)base() + sh->sh_offset;
+ if (strtab[sh->sh_size - 1] != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("String table must end with a null terminator!");
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object
+ , error_code &ec)
+ : ObjectFile(getELFType(target_endianness == support::little, is64Bits),
+ Object, ec)
+ , isDyldELFObject(false)
+ , SectionHeaderTable(0)
+ , dot_shstrtab_sec(0)
+ , dot_strtab_sec(0)
+ , dot_dynstr_sec(0)
+ , dot_dynamic_sec(0)
+ , dot_gnu_version_sec(0)
+ , dot_gnu_version_r_sec(0)
+ , dot_gnu_version_d_sec(0)
+ , dt_soname(0)
+ {
+
+ const uint64_t FileSize = Data->getBufferSize();
+
+ if (sizeof(Elf_Ehdr) > FileSize)
+ // FIXME: Proper error handling.
+ report_fatal_error("File too short!");
+
+ Header = reinterpret_cast<const Elf_Ehdr *>(base());
+
+ if (Header->e_shoff == 0)
+ return;
+
+ const uint64_t SectionTableOffset = Header->e_shoff;
+
+ if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize)
+ // FIXME: Proper error handling.
+ report_fatal_error("Section header table goes past end of file!");
+
+ // The getNumSections() call below depends on SectionHeaderTable being set.
+ SectionHeaderTable =
+ reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
+ const uint64_t SectionTableSize = getNumSections() * Header->e_shentsize;
+
+ if (SectionTableOffset + SectionTableSize > FileSize)
+ // FIXME: Proper error handling.
+ report_fatal_error("Section table goes past end of file!");
+
+ // To find the symbol tables we walk the section table to find SHT_SYMTAB.
+ const Elf_Shdr* SymbolTableSectionHeaderIndex = 0;
+ const Elf_Shdr* sh = SectionHeaderTable;
+
+ // Reserve SymbolTableSections[0] for .dynsym
+ SymbolTableSections.push_back(NULL);
+
+ for (uint64_t i = 0, e = getNumSections(); i != e; ++i) {
+ switch (sh->sh_type) {
+ case ELF::SHT_SYMTAB_SHNDX: {
+ if (SymbolTableSectionHeaderIndex)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .symtab_shndx!");
+ SymbolTableSectionHeaderIndex = sh;
+ break;
+ }
+ case ELF::SHT_SYMTAB: {
+ SymbolTableSectionsIndexMap[i] = SymbolTableSections.size();
+ SymbolTableSections.push_back(sh);
+ break;
+ }
+ case ELF::SHT_DYNSYM: {
+ if (SymbolTableSections[0] != NULL)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .dynsym!");
+ SymbolTableSectionsIndexMap[i] = 0;
+ SymbolTableSections[0] = sh;
+ break;
+ }
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA: {
+ SectionRelocMap[getSection(sh->sh_info)].push_back(i);
+ break;
+ }
+ case ELF::SHT_DYNAMIC: {
+ if (dot_dynamic_sec != NULL)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .dynamic!");
+ dot_dynamic_sec = sh;
+ break;
+ }
+ case ELF::SHT_GNU_versym: {
+ if (dot_gnu_version_sec != NULL)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .gnu.version section!");
+ dot_gnu_version_sec = sh;
+ break;
+ }
+ case ELF::SHT_GNU_verdef: {
+ if (dot_gnu_version_d_sec != NULL)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .gnu.version_d section!");
+ dot_gnu_version_d_sec = sh;
+ break;
+ }
+ case ELF::SHT_GNU_verneed: {
+ if (dot_gnu_version_r_sec != NULL)
+ // FIXME: Proper error handling.
+ report_fatal_error("More than one .gnu.version_r section!");
+ dot_gnu_version_r_sec = sh;
+ break;
+ }
+ }
+ ++sh;
+ }
+
+ // Sort section relocation lists by index.
+ for (typename RelocMap_t::iterator i = SectionRelocMap.begin(),
+ e = SectionRelocMap.end(); i != e; ++i) {
+ std::sort(i->second.begin(), i->second.end());
+ }
+
+ // Get string table sections.
+ dot_shstrtab_sec = getSection(getStringTableIndex());
+ if (dot_shstrtab_sec) {
+ // Verify that the last byte in the string table in a null.
+ VerifyStrTab(dot_shstrtab_sec);
+ }
+
+ // Merge this into the above loop.
+ for (const char *i = reinterpret_cast<const char *>(SectionHeaderTable),
+ *e = i + getNumSections() * Header->e_shentsize;
+ i != e; i += Header->e_shentsize) {
+ const Elf_Shdr *sh = reinterpret_cast<const Elf_Shdr*>(i);
+ if (sh->sh_type == ELF::SHT_STRTAB) {
+ StringRef SectionName(getString(dot_shstrtab_sec, sh->sh_name));
+ if (SectionName == ".strtab") {
+ if (dot_strtab_sec != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("Already found section named .strtab!");
+ dot_strtab_sec = sh;
+ VerifyStrTab(dot_strtab_sec);
+ } else if (SectionName == ".dynstr") {
+ if (dot_dynstr_sec != 0)
+ // FIXME: Proper error handling.
+ report_fatal_error("Already found section named .dynstr!");
+ dot_dynstr_sec = sh;
+ VerifyStrTab(dot_dynstr_sec);
+ }
+ }
+ }
+
+ // Build symbol name side-mapping if there is one.
+ if (SymbolTableSectionHeaderIndex) {
+ const Elf_Word *ShndxTable = reinterpret_cast<const Elf_Word*>(base() +
+ SymbolTableSectionHeaderIndex->sh_offset);
+ error_code ec;
+ for (symbol_iterator si = begin_symbols(),
+ se = end_symbols(); si != se; si.increment(ec)) {
+ if (ec)
+ report_fatal_error("Fewer extended symbol table entries than symbols!");
+ if (*ShndxTable != ELF::SHN_UNDEF)
+ ExtendedSymbolTable[getSymbol(si->getRawDataRefImpl())] = *ShndxTable;
+ ++ShndxTable;
+ }
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_symbols() const {
+ DataRefImpl SymbolData;
+ if (SymbolTableSections.size() <= 1) {
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ } else {
+ SymbolData.d.a = 1; // The 0th symbol in ELF is fake.
+ SymbolData.d.b = 1; // The 0th table is .dynsym
+ }
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_symbols() const {
+ DataRefImpl SymbolData;
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_dynamic_symbols() const {
+ DataRefImpl SymbolData;
+ if (SymbolTableSections[0] == NULL) {
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ } else {
+ SymbolData.d.a = 1; // The 0th symbol in ELF is fake.
+ SymbolData.d.b = 0; // The 0th table is .dynsym
+ }
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+symbol_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_dynamic_symbols() const {
+ DataRefImpl SymbolData;
+ SymbolData.d.a = std::numeric_limits<uint32_t>::max();
+ SymbolData.d.b = std::numeric_limits<uint32_t>::max();
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+section_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(base() + Header->e_shoff);
+ return section_iterator(SectionRef(ret, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+section_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_sections() const {
+ DataRefImpl ret;
+ ret.p = reinterpret_cast<intptr_t>(base()
+ + Header->e_shoff
+ + (Header->e_shentsize*getNumSections()));
+ return section_iterator(SectionRef(ret, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+typename ELFObjectFile<target_endianness, is64Bits>::dyn_iterator
+ELFObjectFile<target_endianness, is64Bits>::begin_dynamic_table() const {
+ DataRefImpl DynData;
+ if (dot_dynamic_sec == NULL || dot_dynamic_sec->sh_size == 0) {
+ DynData.d.a = std::numeric_limits<uint32_t>::max();
+ } else {
+ DynData.d.a = 0;
+ }
+ return dyn_iterator(DynRef(DynData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+typename ELFObjectFile<target_endianness, is64Bits>::dyn_iterator
+ELFObjectFile<target_endianness, is64Bits>
+ ::end_dynamic_table() const {
+ DataRefImpl DynData;
+ DynData.d.a = std::numeric_limits<uint32_t>::max();
+ return dyn_iterator(DynRef(DynData, this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getDynNext(DataRefImpl DynData,
+ DynRef &Result) const {
+ ++DynData.d.a;
+
+ // Check to see if we are at the end of .dynamic
+ if (DynData.d.a >= dot_dynamic_sec->getEntityCount()) {
+ // We are at the end. Return the terminator.
+ DynData.d.a = std::numeric_limits<uint32_t>::max();
+ }
+
+ Result = DynRef(DynData, this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef
+ELFObjectFile<target_endianness, is64Bits>::getLoadName() const {
+ if (!dt_soname) {
+ // Find the DT_SONAME entry
+ dyn_iterator it = begin_dynamic_table();
+ dyn_iterator ie = end_dynamic_table();
+ error_code ec;
+ while (it != ie) {
+ if (it->getTag() == ELF::DT_SONAME)
+ break;
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("dynamic table iteration failed");
+ }
+ if (it != ie) {
+ if (dot_dynstr_sec == NULL)
+ report_fatal_error("Dynamic string table is missing");
+ dt_soname = getString(dot_dynstr_sec, it->getVal());
+ } else {
+ dt_soname = "";
+ }
+ }
+ return dt_soname;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+library_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::begin_libraries_needed() const {
+ // Find the first DT_NEEDED entry
+ dyn_iterator i = begin_dynamic_table();
+ dyn_iterator e = end_dynamic_table();
+ error_code ec;
+ while (i != e) {
+ if (i->getTag() == ELF::DT_NEEDED)
+ break;
+ i.increment(ec);
+ if (ec)
+ report_fatal_error("dynamic table iteration failed");
+ }
+ // Use the same DataRefImpl format as DynRef.
+ return library_iterator(LibraryRef(i->getRawDataRefImpl(), this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getLibraryNext(DataRefImpl Data,
+ LibraryRef &Result) const {
+ // Use the same DataRefImpl format as DynRef.
+ dyn_iterator i = dyn_iterator(DynRef(Data, this));
+ dyn_iterator e = end_dynamic_table();
+
+ // Skip the current dynamic table entry.
+ error_code ec;
+ if (i != e) {
+ i.increment(ec);
+ // TODO: proper error handling
+ if (ec)
+ report_fatal_error("dynamic table iteration failed");
+ }
+
+ // Find the next DT_NEEDED entry.
+ while (i != e) {
+ if (i->getTag() == ELF::DT_NEEDED)
+ break;
+ i.increment(ec);
+ if (ec)
+ report_fatal_error("dynamic table iteration failed");
+ }
+ Result = LibraryRef(i->getRawDataRefImpl(), this);
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getLibraryPath(DataRefImpl Data, StringRef &Res) const {
+ dyn_iterator i = dyn_iterator(DynRef(Data, this));
+ if (i == end_dynamic_table())
+ report_fatal_error("getLibraryPath() called on iterator end");
+
+ if (i->getTag() != ELF::DT_NEEDED)
+ report_fatal_error("Invalid library_iterator");
+
+ // This uses .dynstr to lookup the name of the DT_NEEDED entry.
+ // THis works as long as DT_STRTAB == .dynstr. This is true most of
+ // the time, but the specification allows exceptions.
+ // TODO: This should really use DT_STRTAB instead. Doing this requires
+ // reading the program headers.
+ if (dot_dynstr_sec == NULL)
+ report_fatal_error("Dynamic string table is missing");
+ Res = getString(dot_dynstr_sec, i->getVal());
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+library_iterator ELFObjectFile<target_endianness, is64Bits>
+ ::end_libraries_needed() const {
+ dyn_iterator e = end_dynamic_table();
+ // Use the same DataRefImpl format as DynRef.
+ return library_iterator(LibraryRef(e->getRawDataRefImpl(), this));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint8_t ELFObjectFile<target_endianness, is64Bits>::getBytesInAddress() const {
+ return is64Bits ? 8 : 4;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+StringRef ELFObjectFile<target_endianness, is64Bits>
+ ::getFileFormatName() const {
+ switch(Header->e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return "ELF32-i386";
+ case ELF::EM_X86_64:
+ return "ELF32-x86-64";
+ case ELF::EM_ARM:
+ return "ELF32-arm";
+ default:
+ return "ELF32-unknown";
+ }
+ case ELF::ELFCLASS64:
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return "ELF64-i386";
+ case ELF::EM_X86_64:
+ return "ELF64-x86-64";
+ default:
+ return "ELF64-unknown";
+ }
+ default:
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid ELFCLASS!");
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const {
+ switch(Header->e_machine) {
+ case ELF::EM_386:
+ return Triple::x86;
+ case ELF::EM_X86_64:
+ return Triple::x86_64;
+ case ELF::EM_ARM:
+ return Triple::arm;
+ default:
+ return Triple::UnknownArch;
+ }
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t ELFObjectFile<target_endianness, is64Bits>::getNumSections() const {
+ assert(Header && "Header not initialized!");
+ if (Header->e_shnum == ELF::SHN_UNDEF) {
+ assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
+ return SectionHeaderTable->sh_size;
+ }
+ return Header->e_shnum;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+uint64_t
+ELFObjectFile<target_endianness, is64Bits>::getStringTableIndex() const {
+ if (Header->e_shnum == ELF::SHN_UNDEF) {
+ if (Header->e_shstrndx == ELF::SHN_HIRESERVE)
+ return SectionHeaderTable->sh_link;
+ if (Header->e_shstrndx >= getNumSections())
+ return 0;
+ }
+ return Header->e_shstrndx;
+}
+
+
+template<support::endianness target_endianness, bool is64Bits>
+template<typename T>
+inline const T *
+ELFObjectFile<target_endianness, is64Bits>::getEntry(uint16_t Section,
+ uint32_t Entry) const {
+ return getEntry<T>(getSection(Section), Entry);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+template<typename T>
+inline const T *
+ELFObjectFile<target_endianness, is64Bits>::getEntry(const Elf_Shdr * Section,
+ uint32_t Entry) const {
+ return reinterpret_cast<const T *>(
+ base()
+ + Section->sh_offset
+ + (Entry * Section->sh_entsize));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
+ELFObjectFile<target_endianness, is64Bits>::getSymbol(DataRefImpl Symb) const {
+ return getEntry<Elf_Sym>(SymbolTableSections[Symb.d.b], Symb.d.a);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Dyn *
+ELFObjectFile<target_endianness, is64Bits>::getDyn(DataRefImpl DynData) const {
+ return getEntry<Elf_Dyn>(dot_dynamic_sec, DynData.d.a);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rel *
+ELFObjectFile<target_endianness, is64Bits>::getRel(DataRefImpl Rel) const {
+ return getEntry<Elf_Rel>(Rel.w.b, Rel.w.c);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rela *
+ELFObjectFile<target_endianness, is64Bits>::getRela(DataRefImpl Rela) const {
+ return getEntry<Elf_Rela>(Rela.w.b, Rela.w.c);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>::getSection(DataRefImpl Symb) const {
+ const Elf_Shdr *sec = getSection(Symb.d.b);
+ if (sec->sh_type != ELF::SHT_SYMTAB || sec->sh_type != ELF::SHT_DYNSYM)
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid symbol table section!");
+ return sec;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
+ELFObjectFile<target_endianness, is64Bits>::getSection(uint32_t index) const {
+ if (index == 0)
+ return 0;
+ if (!SectionHeaderTable || index >= getNumSections())
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid section index!");
+
+ return reinterpret_cast<const Elf_Shdr *>(
+ reinterpret_cast<const char *>(SectionHeaderTable)
+ + (index * Header->e_shentsize));
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const char *ELFObjectFile<target_endianness, is64Bits>
+ ::getString(uint32_t section,
+ ELF::Elf32_Word offset) const {
+ return getString(getSection(section), offset);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+const char *ELFObjectFile<target_endianness, is64Bits>
+ ::getString(const Elf_Shdr *section,
+ ELF::Elf32_Word offset) const {
+ assert(section && section->sh_type == ELF::SHT_STRTAB && "Invalid section!");
+ if (offset >= section->sh_size)
+ // FIXME: Proper error handling.
+ report_fatal_error("Symbol name offset outside of string table!");
+ return (const char *)base() + section->sh_offset + offset;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolName(const Elf_Shdr *section,
+ const Elf_Sym *symb,
+ StringRef &Result) const {
+ if (symb->st_name == 0) {
+ const Elf_Shdr *section = getSection(symb);
+ if (!section)
+ Result = "";
+ else
+ Result = getString(dot_shstrtab_sec, section->sh_name);
+ return object_error::success;
+ }
+
+ if (section == SymbolTableSections[0]) {
+ // Symbol is in .dynsym, use .dynstr string table
+ Result = getString(dot_dynstr_sec, symb->st_name);
+ } else {
+ // Use the default symbol table name section.
+ Result = getString(dot_strtab_sec, symb->st_name);
+ }
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolVersion(const Elf_Shdr *section,
+ const Elf_Sym *symb,
+ StringRef &Version,
+ bool &IsDefault) const {
+ // Handle non-dynamic symbols.
+ if (section != SymbolTableSections[0]) {
+ // Non-dynamic symbols can have versions in their names
+ // A name of the form 'foo@V1' indicates version 'V1', non-default.
+ // A name of the form 'foo@@V2' indicates version 'V2', default version.
+ StringRef Name;
+ error_code ec = getSymbolName(section, symb, Name);
+ if (ec != object_error::success)
+ return ec;
+ size_t atpos = Name.find('@');
+ if (atpos == StringRef::npos) {
+ Version = "";
+ IsDefault = false;
+ return object_error::success;
+ }
+ ++atpos;
+ if (atpos < Name.size() && Name[atpos] == '@') {
+ IsDefault = true;
+ ++atpos;
+ } else {
+ IsDefault = false;
+ }
+ Version = Name.substr(atpos);
+ return object_error::success;
+ }
+
+ // This is a dynamic symbol. Look in the GNU symbol version table.
+ if (dot_gnu_version_sec == NULL) {
+ // No version table.
+ Version = "";
+ IsDefault = false;
+ return object_error::success;
+ }
+
+ // Determine the position in the symbol table of this entry.
+ const char *sec_start = (const char*)base() + section->sh_offset;
+ size_t entry_index = ((const char*)symb - sec_start)/section->sh_entsize;
+
+ // Get the corresponding version index entry
+ const Elf_Versym *vs = getEntry<Elf_Versym>(dot_gnu_version_sec, entry_index);
+ size_t version_index = vs->vs_index & ELF::VERSYM_VERSION;
+
+ // Special markers for unversioned symbols.
+ if (version_index == ELF::VER_NDX_LOCAL ||
+ version_index == ELF::VER_NDX_GLOBAL) {
+ Version = "";
+ IsDefault = false;
+ return object_error::success;
+ }
+
+ // Lookup this symbol in the version table
+ LoadVersionMap();
+ if (version_index >= VersionMap.size() || VersionMap[version_index].isNull())
+ report_fatal_error("Symbol has version index without corresponding "
+ "define or reference entry");
+ const VersionMapEntry &entry = VersionMap[version_index];
+
+ // Get the version name string
+ size_t name_offset;
+ if (entry.isVerdef()) {
+ // The first Verdaux entry holds the name.
+ name_offset = entry.getVerdef()->getAux()->vda_name;
+ } else {
+ name_offset = entry.getVernaux()->vna_name;
+ }
+ Version = getString(dot_dynstr_sec, name_offset);
+
+ // Set IsDefault
+ if (entry.isVerdef()) {
+ IsDefault = !(vs->vs_index & ELF::VERSYM_HIDDEN);
+ } else {
+ IsDefault = false;
+ }
+
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline DynRefImpl<target_endianness, is64Bits>
+ ::DynRefImpl(DataRefImpl DynP, const OwningType *Owner)
+ : DynPimpl(DynP)
+ , OwningObject(Owner) {}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline bool DynRefImpl<target_endianness, is64Bits>
+ ::operator==(const DynRefImpl &Other) const {
+ return DynPimpl == Other.DynPimpl;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline bool DynRefImpl<target_endianness, is64Bits>
+ ::operator <(const DynRefImpl &Other) const {
+ return DynPimpl < Other.DynPimpl;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline error_code DynRefImpl<target_endianness, is64Bits>
+ ::getNext(DynRefImpl &Result) const {
+ return OwningObject->getDynNext(DynPimpl, Result);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline int64_t DynRefImpl<target_endianness, is64Bits>
+ ::getTag() const {
+ return OwningObject->getDyn(DynPimpl)->d_tag;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline uint64_t DynRefImpl<target_endianness, is64Bits>
+ ::getVal() const {
+ return OwningObject->getDyn(DynPimpl)->d_un.d_val;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline uint64_t DynRefImpl<target_endianness, is64Bits>
+ ::getPtr() const {
+ return OwningObject->getDyn(DynPimpl)->d_un.d_ptr;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+inline DataRefImpl DynRefImpl<target_endianness, is64Bits>
+ ::getRawDataRefImpl() const {
+ return DynPimpl;
+}
+
+/// This is a generic interface for retrieving GNU symbol version
+/// information from an ELFObjectFile.
+static inline error_code GetELFSymbolVersion(const ObjectFile *Obj,
+ const SymbolRef &Sym,
+ StringRef &Version,
+ bool &IsDefault) {
+ // Little-endian 32-bit
+ if (const ELFObjectFile<support::little, false> *ELFObj =
+ dyn_cast<ELFObjectFile<support::little, false> >(Obj))
+ return ELFObj->getSymbolVersion(Sym, Version, IsDefault);
+
+ // Big-endian 32-bit
+ if (const ELFObjectFile<support::big, false> *ELFObj =
+ dyn_cast<ELFObjectFile<support::big, false> >(Obj))
+ return ELFObj->getSymbolVersion(Sym, Version, IsDefault);
+
+ // Little-endian 64-bit
+ if (const ELFObjectFile<support::little, true> *ELFObj =
+ dyn_cast<ELFObjectFile<support::little, true> >(Obj))
+ return ELFObj->getSymbolVersion(Sym, Version, IsDefault);
+
+ // Big-endian 64-bit
+ if (const ELFObjectFile<support::big, true> *ELFObj =
+ dyn_cast<ELFObjectFile<support::big, true> >(Obj))
+ return ELFObj->getSymbolVersion(Sym, Version, IsDefault);
+
+ llvm_unreachable("Object passed to GetELFSymbolVersion() is not ELF");
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/MachO.h b/contrib/llvm/include/llvm/Object/MachO.h
index f5e7461..0b73f94 100644
--- a/contrib/llvm/include/llvm/Object/MachO.h
+++ b/contrib/llvm/include/llvm/Object/MachO.h
@@ -18,6 +18,7 @@
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/MachOObject.h"
#include "llvm/Support/MachO.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
@@ -31,23 +32,36 @@ public:
virtual symbol_iterator begin_symbols() const;
virtual symbol_iterator end_symbols() const;
+ virtual symbol_iterator begin_dynamic_symbols() const;
+ virtual symbol_iterator end_dynamic_symbols() const;
+ virtual library_iterator begin_libraries_needed() const;
+ virtual library_iterator end_libraries_needed() const;
virtual section_iterator begin_sections() const;
virtual section_iterator end_sections() const;
virtual uint8_t getBytesInAddress() const;
virtual StringRef getFileFormatName() const;
virtual unsigned getArch() const;
+ virtual StringRef getLoadName() const;
+
+ MachOObject *getObject() { return MachOObj; }
+
+ static inline bool classof(const Binary *v) {
+ return v->isMachO();
+ }
+ static inline bool classof(const MachOObjectFile *v) { return true; }
protected:
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
- virtual error_code getSymbolOffset(DataRefImpl Symb, uint64_t &Res) const;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
- virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const;
- virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const;
- virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::SymbolType &Res) const;
+ virtual error_code getSymbolFlags(DataRefImpl Symb, uint32_t &Res) const;
+ virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
+ virtual error_code getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -58,6 +72,10 @@ protected:
virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Res) const;
+ virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl DRI, DataRefImpl S,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
@@ -67,16 +85,22 @@ protected:
RelocationRef &Res) const;
virtual error_code getRelocationAddress(DataRefImpl Rel,
uint64_t &Res) const;
+ virtual error_code getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const;
virtual error_code getRelocationSymbol(DataRefImpl Rel,
SymbolRef &Res) const;
virtual error_code getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const;
+ uint64_t &Res) const;
virtual error_code getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const;
virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel,
int64_t &Res) const;
virtual error_code getRelocationValueString(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const;
+ virtual error_code getRelocationHidden(DataRefImpl Rel, bool &Result) const;
+
+ virtual error_code getLibraryNext(DataRefImpl LibData, LibraryRef &Res) const;
+ virtual error_code getLibraryPath(DataRefImpl LibData, StringRef &Res) const;
private:
MachOObject *MachOObj;
@@ -97,6 +121,9 @@ private:
void getRelocation(DataRefImpl Rel,
InMemoryStruct<macho::RelocationEntry> &Res) const;
std::size_t getSectionIndex(DataRefImpl Sec) const;
+
+ void printRelocationTargetName(InMemoryStruct<macho::RelocationEntry>& RE,
+ raw_string_ostream &fmt) const;
};
}
diff --git a/contrib/llvm/include/llvm/Object/MachOObject.h b/contrib/llvm/include/llvm/Object/MachOObject.h
index 51be847..0560402 100644
--- a/contrib/llvm/include/llvm/Object/MachOObject.h
+++ b/contrib/llvm/include/llvm/Object/MachOObject.h
@@ -177,14 +177,14 @@ public:
void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;
/// @}
-
+
/// @name Object Dump Facilities
/// @{
/// dump - Support for debugging, callable in GDB: V->dump()
//
void dump() const;
void dumpHeader() const;
-
+
/// print - Implement operator<< on Value.
///
void print(raw_ostream &O) const;
@@ -192,7 +192,7 @@ public:
/// @}
};
-
+
inline raw_ostream &operator<<(raw_ostream &OS, const MachOObject &V) {
V.print(OS);
return OS;
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
index 83854a0..4dd7fb5 100644
--- a/contrib/llvm/include/llvm/Object/ObjectFile.h
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -20,6 +20,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstring>
+#include <vector>
namespace llvm {
namespace object {
@@ -37,6 +38,9 @@ union DataRefImpl {
uint32_t a, b;
} d;
uintptr_t p;
+ DataRefImpl() {
+ std::memset(this, 0, sizeof(DataRefImpl));
+ }
};
template<class content_type>
@@ -78,52 +82,13 @@ static bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}
-/// SymbolRef - This is a value type class that represents a single symbol in
-/// the list of symbols in the object file.
-class SymbolRef {
- friend class SectionRef;
- DataRefImpl SymbolPimpl;
- const ObjectFile *OwningObject;
-
-public:
- SymbolRef() : OwningObject(NULL) {
- std::memset(&SymbolPimpl, 0, sizeof(SymbolPimpl));
- }
-
- enum SymbolType {
- ST_Function,
- ST_Data,
- ST_External, // Defined in another object file
- ST_Other
- };
-
- SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
-
- bool operator==(const SymbolRef &Other) const;
-
- error_code getNext(SymbolRef &Result) const;
-
- error_code getName(StringRef &Result) const;
- error_code getAddress(uint64_t &Result) const;
- error_code getOffset(uint64_t &Result) const;
- error_code getSize(uint64_t &Result) const;
- error_code getSymbolType(SymbolRef::SymbolType &Result) const;
-
- /// Returns the ascii char that should be displayed in a symbol table dump via
- /// nm for this symbol.
- error_code getNMTypeChar(char &Result) const;
-
- /// Returns true for symbols that are internal to the object file format such
- /// as section symbols.
- error_code isInternal(bool &Result) const;
-
- /// Returns true for symbols that can be used in another objects,
- /// such as library functions
- error_code isGlobal(bool &Result) const;
+static bool operator <(const DataRefImpl &a, const DataRefImpl &b) {
+ // Check bitwise identical. This is the only legal way to compare a union w/o
+ // knowing which member is in use.
+ return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
+}
- DataRefImpl getRawDataRefImpl() const;
-};
-typedef content_iterator<SymbolRef> symbol_iterator;
+class SymbolRef;
/// RelocationRef - This is a value type class that represents a single
/// relocation in the list of relocations in the object file.
@@ -132,9 +97,7 @@ class RelocationRef {
const ObjectFile *OwningObject;
public:
- RelocationRef() : OwningObject(NULL) {
- std::memset(&RelocationPimpl, 0, sizeof(RelocationPimpl));
- }
+ RelocationRef() : OwningObject(NULL) { }
RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
@@ -143,8 +106,14 @@ public:
error_code getNext(RelocationRef &Result) const;
error_code getAddress(uint64_t &Result) const;
+ error_code getOffset(uint64_t &Result) const;
error_code getSymbol(SymbolRef &Result) const;
- error_code getType(uint32_t &Result) const;
+ error_code getType(uint64_t &Result) const;
+
+ /// @brief Indicates whether this relocation should hidden when listing
+ /// relocations, usually because it is the trailing part of a multipart
+ /// relocation that will be printed as part of the leading relocation.
+ error_code getHidden(bool &Result) const;
/// @brief Get a string that represents the type of this relocation.
///
@@ -168,13 +137,12 @@ class SectionRef {
const ObjectFile *OwningObject;
public:
- SectionRef() : OwningObject(NULL) {
- std::memset(&SectionPimpl, 0, sizeof(SectionPimpl));
- }
+ SectionRef() : OwningObject(NULL) { }
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
bool operator==(const SectionRef &Other) const;
+ bool operator <(const SectionRef &Other) const;
error_code getNext(SectionRef &Result) const;
@@ -190,21 +158,112 @@ public:
error_code isText(bool &Result) const;
error_code isData(bool &Result) const;
error_code isBSS(bool &Result) const;
+ error_code isRequiredForExecution(bool &Result) const;
+ error_code isVirtual(bool &Result) const;
+ error_code isZeroInit(bool &Result) const;
error_code containsSymbol(SymbolRef S, bool &Result) const;
relocation_iterator begin_relocations() const;
relocation_iterator end_relocations() const;
+
+ DataRefImpl getRawDataRefImpl() const;
};
typedef content_iterator<SectionRef> section_iterator;
+/// SymbolRef - This is a value type class that represents a single symbol in
+/// the list of symbols in the object file.
+class SymbolRef {
+ friend class SectionRef;
+ DataRefImpl SymbolPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ SymbolRef() : OwningObject(NULL) { }
+
+ enum Type {
+ ST_Unknown, // Type not specified
+ ST_Data,
+ ST_Debug,
+ ST_File,
+ ST_Function,
+ ST_Other
+ };
+
+ enum Flags {
+ SF_None = 0,
+ SF_Undefined = 1U << 0, // Symbol is defined in another object file
+ SF_Global = 1U << 1, // Global symbol
+ SF_Weak = 1U << 2, // Weak symbol
+ SF_Absolute = 1U << 3, // Absolute symbol
+ SF_ThreadLocal = 1U << 4, // Thread local symbol
+ SF_Common = 1U << 5, // Symbol has common linkage
+ SF_FormatSpecific = 1U << 31 // Specific to the object file format
+ // (e.g. section symbols)
+ };
+
+ SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
+
+ bool operator==(const SymbolRef &Other) const;
+ bool operator <(const SymbolRef &Other) const;
+
+ error_code getNext(SymbolRef &Result) const;
+
+ error_code getName(StringRef &Result) const;
+ error_code getAddress(uint64_t &Result) const;
+ error_code getFileOffset(uint64_t &Result) const;
+ error_code getSize(uint64_t &Result) const;
+ error_code getType(SymbolRef::Type &Result) const;
+
+ /// Returns the ascii char that should be displayed in a symbol table dump via
+ /// nm for this symbol.
+ error_code getNMTypeChar(char &Result) const;
+
+ /// Get symbol flags (bitwise OR of SymbolRef::Flags)
+ error_code getFlags(uint32_t &Result) const;
+
+ /// @brief Return true for common symbols such as uninitialized globals
+ error_code isCommon(bool &Result) const;
+
+ /// @brief Get section this symbol is defined in reference to. Result is
+ /// end_sections() if it is undefined or is an absolute symbol.
+ error_code getSection(section_iterator &Result) const;
+
+ DataRefImpl getRawDataRefImpl() const;
+};
+typedef content_iterator<SymbolRef> symbol_iterator;
+
+/// LibraryRef - This is a value type class that represents a single library in
+/// the list of libraries needed by a shared or dynamic object.
+class LibraryRef {
+ friend class SectionRef;
+ DataRefImpl LibraryPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ LibraryRef() : OwningObject(NULL) { }
+
+ LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner);
+
+ bool operator==(const LibraryRef &Other) const;
+ bool operator <(const LibraryRef &Other) const;
+
+ error_code getNext(LibraryRef &Result) const;
+
+ // Get the path to this library, as stored in the object file.
+ error_code getPath(StringRef &Result) const;
+
+ DataRefImpl getRawDataRefImpl() const;
+};
+typedef content_iterator<LibraryRef> library_iterator;
+
const uint64_t UnknownAddressOrSize = ~0ULL;
/// ObjectFile - This class is the base class for all object file types.
/// Concrete instances of this object are created by createObjectFile, which
/// figure out which type to create.
class ObjectFile : public Binary {
-private:
+ virtual void anchor();
ObjectFile(); // = delete
ObjectFile(const ObjectFile &other); // = delete
@@ -227,12 +286,15 @@ protected:
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const = 0;
virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const = 0;
virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const =0;
- virtual error_code getSymbolOffset(DataRefImpl Symb, uint64_t &Res) const =0;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const =0;
virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const = 0;
+ virtual error_code getSymbolType(DataRefImpl Symb,
+ SymbolRef::Type &Res) const = 0;
virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const = 0;
- virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const = 0;
- virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const = 0;
- virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::SymbolType &Res) const = 0;
+ virtual error_code getSymbolFlags(DataRefImpl Symb,
+ uint32_t &Res) const = 0;
+ virtual error_code getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const = 0;
// Same as above for SectionRef.
friend class SectionRef;
@@ -245,6 +307,11 @@ protected:
virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const = 0;
virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const = 0;
virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const = 0;
+ virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Res) const = 0;
+ // A section is 'virtual' if its contents aren't present in the object image.
+ virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const = 0;
+ virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const = 0;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const = 0;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const = 0;
@@ -257,25 +324,42 @@ protected:
RelocationRef &Res) const = 0;
virtual error_code getRelocationAddress(DataRefImpl Rel,
uint64_t &Res) const =0;
+ virtual error_code getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const =0;
virtual error_code getRelocationSymbol(DataRefImpl Rel,
SymbolRef &Res) const = 0;
virtual error_code getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const = 0;
+ uint64_t &Res) const = 0;
virtual error_code getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const = 0;
virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel,
int64_t &Res) const = 0;
virtual error_code getRelocationValueString(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const = 0;
+ virtual error_code getRelocationHidden(DataRefImpl Rel, bool &Result) const {
+ Result = false;
+ return object_error::success;
+ }
+
+ // Same for LibraryRef
+ friend class LibraryRef;
+ virtual error_code getLibraryNext(DataRefImpl Lib, LibraryRef &Res) const = 0;
+ virtual error_code getLibraryPath(DataRefImpl Lib, StringRef &Res) const = 0;
public:
virtual symbol_iterator begin_symbols() const = 0;
virtual symbol_iterator end_symbols() const = 0;
+ virtual symbol_iterator begin_dynamic_symbols() const = 0;
+ virtual symbol_iterator end_dynamic_symbols() const = 0;
+
virtual section_iterator begin_sections() const = 0;
virtual section_iterator end_sections() const = 0;
+ virtual library_iterator begin_libraries_needed() const = 0;
+ virtual library_iterator end_libraries_needed() const = 0;
+
/// @brief The number of bytes used to represent an address in this object
/// file format.
virtual uint8_t getBytesInAddress() const = 0;
@@ -283,6 +367,11 @@ public:
virtual StringRef getFileFormatName() const = 0;
virtual /* Triple::ArchType */ unsigned getArch() const = 0;
+ /// For shared objects, returns the name which this object should be
+ /// loaded from at runtime. This corresponds to DT_SONAME on ELF and
+ /// LC_ID_DYLIB (install name) on MachO.
+ virtual StringRef getLoadName() const = 0;
+
/// @returns Pointer to ObjectFile subclass to handle this type of object.
/// @param ObjectPath The path to the object file. ObjectPath.isObject must
/// return true.
@@ -291,8 +380,7 @@ public:
static ObjectFile *createObjectFile(MemoryBuffer *Object);
static inline bool classof(const Binary *v) {
- return v->getType() >= isObject &&
- v->getType() < lastObject;
+ return v->isObject();
}
static inline bool classof(const ObjectFile *v) { return true; }
@@ -311,6 +399,10 @@ inline bool SymbolRef::operator==(const SymbolRef &Other) const {
return SymbolPimpl == Other.SymbolPimpl;
}
+inline bool SymbolRef::operator <(const SymbolRef &Other) const {
+ return SymbolPimpl < Other.SymbolPimpl;
+}
+
inline error_code SymbolRef::getNext(SymbolRef &Result) const {
return OwningObject->getSymbolNext(SymbolPimpl, Result);
}
@@ -323,8 +415,8 @@ inline error_code SymbolRef::getAddress(uint64_t &Result) const {
return OwningObject->getSymbolAddress(SymbolPimpl, Result);
}
-inline error_code SymbolRef::getOffset(uint64_t &Result) const {
- return OwningObject->getSymbolOffset(SymbolPimpl, Result);
+inline error_code SymbolRef::getFileOffset(uint64_t &Result) const {
+ return OwningObject->getSymbolFileOffset(SymbolPimpl, Result);
}
inline error_code SymbolRef::getSize(uint64_t &Result) const {
@@ -335,15 +427,15 @@ inline error_code SymbolRef::getNMTypeChar(char &Result) const {
return OwningObject->getSymbolNMTypeChar(SymbolPimpl, Result);
}
-inline error_code SymbolRef::isInternal(bool &Result) const {
- return OwningObject->isSymbolInternal(SymbolPimpl, Result);
+inline error_code SymbolRef::getFlags(uint32_t &Result) const {
+ return OwningObject->getSymbolFlags(SymbolPimpl, Result);
}
-inline error_code SymbolRef::isGlobal(bool &Result) const {
- return OwningObject->isSymbolGlobal(SymbolPimpl, Result);
+inline error_code SymbolRef::getSection(section_iterator &Result) const {
+ return OwningObject->getSymbolSection(SymbolPimpl, Result);
}
-inline error_code SymbolRef::getSymbolType(SymbolRef::SymbolType &Result) const {
+inline error_code SymbolRef::getType(SymbolRef::Type &Result) const {
return OwningObject->getSymbolType(SymbolPimpl, Result);
}
@@ -362,6 +454,10 @@ inline bool SectionRef::operator==(const SectionRef &Other) const {
return SectionPimpl == Other.SectionPimpl;
}
+inline bool SectionRef::operator <(const SectionRef &Other) const {
+ return SectionPimpl < Other.SectionPimpl;
+}
+
inline error_code SectionRef::getNext(SectionRef &Result) const {
return OwningObject->getSectionNext(SectionPimpl, Result);
}
@@ -398,6 +494,18 @@ inline error_code SectionRef::isBSS(bool &Result) const {
return OwningObject->isSectionBSS(SectionPimpl, Result);
}
+inline error_code SectionRef::isRequiredForExecution(bool &Result) const {
+ return OwningObject->isSectionRequiredForExecution(SectionPimpl, Result);
+}
+
+inline error_code SectionRef::isVirtual(bool &Result) const {
+ return OwningObject->isSectionVirtual(SectionPimpl, Result);
+}
+
+inline error_code SectionRef::isZeroInit(bool &Result) const {
+ return OwningObject->isSectionZeroInit(SectionPimpl, Result);
+}
+
inline error_code SectionRef::containsSymbol(SymbolRef S, bool &Result) const {
return OwningObject->sectionContainsSymbol(SectionPimpl, S.SymbolPimpl,
Result);
@@ -411,6 +519,9 @@ inline relocation_iterator SectionRef::end_relocations() const {
return OwningObject->getSectionRelEnd(SectionPimpl);
}
+inline DataRefImpl SectionRef::getRawDataRefImpl() const {
+ return SectionPimpl;
+}
/// RelocationRef
inline RelocationRef::RelocationRef(DataRefImpl RelocationP,
@@ -430,11 +541,15 @@ inline error_code RelocationRef::getAddress(uint64_t &Result) const {
return OwningObject->getRelocationAddress(RelocationPimpl, Result);
}
+inline error_code RelocationRef::getOffset(uint64_t &Result) const {
+ return OwningObject->getRelocationOffset(RelocationPimpl, Result);
+}
+
inline error_code RelocationRef::getSymbol(SymbolRef &Result) const {
return OwningObject->getRelocationSymbol(RelocationPimpl, Result);
}
-inline error_code RelocationRef::getType(uint32_t &Result) const {
+inline error_code RelocationRef::getType(uint64_t &Result) const {
return OwningObject->getRelocationType(RelocationPimpl, Result);
}
@@ -452,6 +567,30 @@ inline error_code RelocationRef::getValueString(SmallVectorImpl<char> &Result)
return OwningObject->getRelocationValueString(RelocationPimpl, Result);
}
+inline error_code RelocationRef::getHidden(bool &Result) const {
+ return OwningObject->getRelocationHidden(RelocationPimpl, Result);
+}
+// Inline function definitions.
+inline LibraryRef::LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner)
+ : LibraryPimpl(LibraryP)
+ , OwningObject(Owner) {}
+
+inline bool LibraryRef::operator==(const LibraryRef &Other) const {
+ return LibraryPimpl == Other.LibraryPimpl;
+}
+
+inline bool LibraryRef::operator <(const LibraryRef &Other) const {
+ return LibraryPimpl < Other.LibraryPimpl;
+}
+
+inline error_code LibraryRef::getNext(LibraryRef &Result) const {
+ return OwningObject->getLibraryNext(LibraryPimpl, Result);
+}
+
+inline error_code LibraryRef::getPath(StringRef &Result) const {
+ return OwningObject->getLibraryPath(LibraryPimpl, Result);
+}
+
} // end namespace object
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Operator.h b/contrib/llvm/include/llvm/Operator.h
index 48a5796..abd6a19 100644
--- a/contrib/llvm/include/llvm/Operator.h
+++ b/contrib/llvm/include/llvm/Operator.h
@@ -261,8 +261,8 @@ public:
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
- PointerType *getPointerOperandType() const {
- return reinterpret_cast<PointerType*>(getPointerOperand()->getType());
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
}
unsigned getNumIndices() const { // Note: always non-negative
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
index 04dd8b6..888537d 100644
--- a/contrib/llvm/include/llvm/Pass.h
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -53,7 +53,7 @@ typedef const void* AnalysisID;
/// Ordering of pass manager types is important here.
enum PassManagerType {
PMT_Unknown = 0,
- PMT_ModulePassManager = 1, ///< MPPassManager
+ PMT_ModulePassManager = 1, ///< MPPassManager
PMT_CallGraphPassManager, ///< CGPassManager
PMT_FunctionPassManager, ///< FPPassManager
PMT_LoopPassManager, ///< LPPassManager
@@ -84,14 +84,14 @@ class Pass {
PassKind Kind;
void operator=(const Pass&); // DO NOT IMPLEMENT
Pass(const Pass &); // DO NOT IMPLEMENT
-
+
public:
- explicit Pass(PassKind K, char &pid);
+ explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
virtual ~Pass();
-
+
PassKind getPassKind() const { return Kind; }
-
+
/// getPassName - Return a nice clean name for a pass. This usually
/// implemented in terms of the name that is registered by one of the
/// Registration templates, but can be overloaded directly.
@@ -99,7 +99,7 @@ public:
virtual const char *getPassName() const;
/// getPassID - Return the PassID number that corresponds to this pass.
- virtual AnalysisID getPassID() const {
+ AnalysisID getPassID() const {
return PassID;
}
@@ -119,12 +119,12 @@ public:
const std::string &Banner) const = 0;
/// Each pass is responsible for assigning a pass manager to itself.
- /// PMS is the stack of available pass manager.
- virtual void assignPassManager(PMStack &,
+ /// PMS is the stack of available pass manager.
+ virtual void assignPassManager(PMStack &,
PassManagerType) {}
/// Check if available pass managers are suitable for this pass or not.
virtual void preparePassManager(PMStack &);
-
+
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const;
@@ -159,9 +159,9 @@ public:
virtual void *getAdjustedAnalysisPointer(AnalysisID ID);
virtual ImmutablePass *getAsImmutablePass();
virtual PMDataManager *getAsPMDataManager();
-
+
/// verifyAnalysis() - This member can be implemented by a analysis pass to
- /// check state of analysis information.
+ /// check state of analysis information.
virtual void verifyAnalysis() const;
// dumpPassStructure - Implement the -debug-passes=PassStructure option
@@ -175,6 +175,10 @@ public:
// argument string, or null if it is not known.
static const PassInfo *lookupPassInfo(StringRef Arg);
+ // createPass - Create a object for the specified pass class,
+ // or null if it is not known.
+ static Pass *createPass(AnalysisID ID);
+
/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
/// get analysis information that might be around, for example to update it.
/// This is different than getAnalysis in that it can fail (if the analysis
@@ -226,7 +230,7 @@ public:
/// being operated on.
virtual bool runOnModule(Module &M) = 0;
- virtual void assignPassManager(PMStack &PMS,
+ virtual void assignPassManager(PMStack &PMS,
PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
@@ -259,9 +263,9 @@ public:
///
bool runOnModule(Module &) { return false; }
- explicit ImmutablePass(char &pid)
+ explicit ImmutablePass(char &pid)
: ModulePass(pid) {}
-
+
// Force out-of-line virtual method.
virtual ~ImmutablePass();
};
@@ -286,7 +290,7 @@ public:
/// any necessary per-module initialization.
///
virtual bool doInitialization(Module &);
-
+
/// runOnFunction - Virtual method overriden by subclasses to do the
/// per-function processing of the pass.
///
@@ -297,7 +301,7 @@ public:
///
virtual bool doFinalization(Module &);
- virtual void assignPassManager(PMStack &PMS,
+ virtual void assignPassManager(PMStack &PMS,
PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
@@ -348,7 +352,7 @@ public:
///
virtual bool doFinalization(Module &);
- virtual void assignPassManager(PMStack &PMS,
+ virtual void assignPassManager(PMStack &PMS,
PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
diff --git a/contrib/llvm/include/llvm/PassAnalysisSupport.h b/contrib/llvm/include/llvm/PassAnalysisSupport.h
index fede121..5c6a2d7 100644
--- a/contrib/llvm/include/llvm/PassAnalysisSupport.h
+++ b/contrib/llvm/include/llvm/PassAnalysisSupport.h
@@ -19,6 +19,7 @@
#ifndef LLVM_PASS_ANALYSIS_SUPPORT_H
#define LLVM_PASS_ANALYSIS_SUPPORT_H
+#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <vector>
diff --git a/contrib/llvm/include/llvm/PassManager.h b/contrib/llvm/include/llvm/PassManager.h
index c8b5dca..ce5fda7 100644
--- a/contrib/llvm/include/llvm/PassManager.h
+++ b/contrib/llvm/include/llvm/PassManager.h
@@ -53,17 +53,13 @@ public:
/// will be destroyed as well, so there is no need to delete the pass. This
/// implies that all passes MUST be allocated with 'new'.
void add(Pass *P);
-
+
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool run(Module &M);
private:
- /// addImpl - Add a pass to the queue of passes to run, without
- /// checking whether to add a printer pass.
- void addImpl(Pass *P);
-
- /// PassManagerImpl_New is the actual class. PassManager is just the
+ /// PassManagerImpl_New is the actual class. PassManager is just the
/// wraper to publish simple pass manager interface
PassManagerImpl *PM;
};
@@ -75,11 +71,11 @@ public:
/// but does not take ownership of, the specified Module.
explicit FunctionPassManager(Module *M);
~FunctionPassManager();
-
+
/// add - Add a pass to the queue of passes to run. This passes
/// ownership of the Pass to the PassManager. When the
/// PassManager_X is destroyed, the pass will be destroyed as well, so
- /// there is no need to delete the pass. (TODO delete passes.)
+ /// there is no need to delete the pass.
/// This implies that all passes MUST be allocated with 'new'.
void add(Pass *P);
@@ -88,20 +84,16 @@ public:
/// so, return true.
///
bool run(Function &F);
-
+
/// doInitialization - Run all of the initializers for the function passes.
///
bool doInitialization();
-
+
/// doFinalization - Run all of the finalizers for the function passes.
///
bool doFinalization();
-
-private:
- /// addImpl - Add a pass to the queue of passes to run, without
- /// checking whether to add a printer pass.
- void addImpl(Pass *P);
+private:
FunctionPassManagerImpl *FPM;
Module *M;
};
diff --git a/contrib/llvm/include/llvm/PassManagers.h b/contrib/llvm/include/llvm/PassManagers.h
index c05347d..fa29f50 100644
--- a/contrib/llvm/include/llvm/PassManagers.h
+++ b/contrib/llvm/include/llvm/PassManagers.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares the LLVM Pass Manager infrastructure.
+// This file declares the LLVM Pass Manager infrastructure.
//
//===----------------------------------------------------------------------===//
@@ -24,11 +24,11 @@
//===----------------------------------------------------------------------===//
// Overview:
// The Pass Manager Infrastructure manages passes. It's responsibilities are:
-//
+//
// o Manage optimization pass execution order
// o Make required Analysis information available before pass P is run
// o Release memory occupied by dead passes
-// o If Analysis information is dirtied by a pass then regenerate Analysis
+// o If Analysis information is dirtied by a pass then regenerate Analysis
// information before it is consumed by another pass.
//
// Pass Manager Infrastructure uses multiple pass managers. They are
@@ -43,13 +43,13 @@
//
// [o] class PMTopLevelManager;
//
-// Two top level managers, PassManager and FunctionPassManager, derive from
-// PMTopLevelManager. PMTopLevelManager manages information used by top level
+// Two top level managers, PassManager and FunctionPassManager, derive from
+// PMTopLevelManager. PMTopLevelManager manages information used by top level
// managers such as last user info.
//
// [o] class PMDataManager;
//
-// PMDataManager manages information, e.g. list of available analysis info,
+// PMDataManager manages information, e.g. list of available analysis info,
// used by a pass manager to manage execution order of passes. It also provides
// a place to implement common pass manager APIs. All pass managers derive from
// PMDataManager.
@@ -82,7 +82,7 @@
// relies on PassManagerImpl to do all the tasks.
//
// [o] class PassManagerImpl : public Pass, public PMDataManager,
-// public PMDTopLevelManager
+// public PMTopLevelManager
//
// PassManagerImpl is a top level pass manager responsible for managing
// MPPassManagers.
@@ -109,7 +109,7 @@ enum PassDebuggingString {
ON_REGION_MSG, // " 'on Region ...\n'"
ON_LOOP_MSG, // " 'on Loop ...\n'"
ON_CG_MSG // "' on Call Graph ...\n'"
-};
+};
/// PassManagerPrettyStackEntry - This is used to print informative information
/// about what pass is running when/if a stack trace is generated.
@@ -124,19 +124,19 @@ public:
: P(p), V(&v), M(0) {} // When P is run on V
PassManagerPrettyStackEntry(Pass *p, Module &m)
: P(p), V(0), M(&m) {} // When P is run on M
-
+
/// print - Emit information about this stack frame to OS.
virtual void print(raw_ostream &OS) const;
};
-
-
+
+
//===----------------------------------------------------------------------===//
// PMStack
//
/// PMStack - This class implements a stack data structure of PMDataManager
/// pointers.
///
-/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
+/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
/// using PMStack. Each Pass implements assignPassManager() to connect itself
/// with appropriate manager. assignPassManager() walks PMStack to find
/// suitable manager.
@@ -174,9 +174,8 @@ protected:
void initializeAllAnalysisInfo();
private:
- /// This is implemented by top level pass manager and used by
- /// schedulePass() to add analysis info passes that are not available.
- virtual void addTopLevelPass(Pass *P) = 0;
+ virtual PMDataManager *getAsPMDataManager() = 0;
+ virtual PassManagerType getTopLevelPassManagerType() = 0;
public:
/// Schedule pass P for execution. Make sure that passes required by
@@ -198,7 +197,7 @@ public:
/// Find analysis usage information for the pass P.
AnalysisUsage *findAnalysisUsage(Pass *P);
- virtual ~PMTopLevelManager();
+ virtual ~PMTopLevelManager();
/// Add immutable pass and initialize it.
inline void addImmutablePass(ImmutablePass *P) {
@@ -228,7 +227,7 @@ public:
PMStack activeStack;
protected:
-
+
/// Collection of pass managers
SmallVector<PMDataManager *, 8> PassManagers;
@@ -254,7 +253,7 @@ private:
};
-
+
//===----------------------------------------------------------------------===//
// PMDataManager
@@ -268,7 +267,7 @@ public:
}
virtual ~PMDataManager();
-
+
virtual Pass *getAsPass() = 0;
/// Augment AvailableAnalysis by adding analysis made available by pass P.
@@ -279,16 +278,16 @@ public:
/// Remove Analysis that is not preserved by the pass
void removeNotPreservedAnalysis(Pass *P);
-
+
/// Remove dead passes used by P.
- void removeDeadPasses(Pass *P, StringRef Msg,
+ void removeDeadPasses(Pass *P, StringRef Msg,
enum PassDebuggingString);
/// Remove P.
- void freePass(Pass *P, StringRef Msg,
+ void freePass(Pass *P, StringRef Msg,
enum PassDebuggingString);
- /// Add pass P into the PassVector. Update
+ /// Add pass P into the PassVector. Update
/// AvailableAnalysis appropriately if ProcessAnalysis is true.
void add(Pass *P, bool ProcessAnalysis = true);
@@ -300,7 +299,7 @@ public:
virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F);
/// Initialize available analysis information.
- void initializeAnalysisInfo() {
+ void initializeAnalysisInfo() {
AvailableAnalysis.clear();
for (unsigned i = 0; i < PMT_Last; ++i)
InheritedAnalysis[i] = NULL;
@@ -347,9 +346,9 @@ public:
return (unsigned)PassVector.size();
}
- virtual PassManagerType getPassManagerType() const {
+ virtual PassManagerType getPassManagerType() const {
assert ( 0 && "Invalid use of getPassManagerType");
- return PMT_Unknown;
+ return PMT_Unknown;
}
std::map<AnalysisID, Pass*> *getAvailableAnalysis() {
@@ -377,17 +376,17 @@ protected:
// then PMT_Last active pass mangers.
std::map<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];
-
+
/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
/// or higher is specified.
bool isPassDebuggingExecutionsOrMore() const;
-
+
private:
void dumpAnalysisUsage(StringRef Msg, const Pass *P,
const AnalysisUsage::VectorType &Set) const;
- // Set of available Analysis. This information is used while scheduling
- // pass. If a pass requires an analysis which is not available then
+ // Set of available Analysis. This information is used while scheduling
+ // pass. If a pass requires an analysis which is not available then
// the required analysis pass is scheduled to run before the pass itself is
// scheduled to run.
std::map<AnalysisID, Pass*> AvailableAnalysis;
@@ -403,27 +402,27 @@ private:
// FPPassManager
//
/// FPPassManager manages BBPassManagers and FunctionPasses.
-/// It batches all function passes and basic block pass managers together and
-/// sequence them to process one function at a time before processing next
+/// It batches all function passes and basic block pass managers together and
+/// sequence them to process one function at a time before processing next
/// function.
class FPPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
- explicit FPPassManager()
+ explicit FPPassManager()
: ModulePass(ID), PMDataManager() { }
-
+
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool runOnFunction(Function &F);
bool runOnModule(Module &M);
-
+
/// cleanup - After running all passes, clean up pass manager cache.
void cleanup();
/// doInitialization - Run all of the initializers for the function passes.
///
bool doInitialization(Module &M);
-
+
/// doFinalization - Run all of the finalizers for the function passes.
///
bool doFinalization(Module &M);
@@ -449,8 +448,8 @@ public:
return FP;
}
- virtual PassManagerType getPassManagerType() const {
- return PMT_FunctionPassManager;
+ virtual PassManagerType getPassManagerType() const {
+ return PMT_FunctionPassManager;
}
};
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
index 0827909..c50c2cc 100644
--- a/contrib/llvm/include/llvm/PassSupport.h
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -25,6 +25,7 @@
#include "llvm/PassRegistry.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Valgrind.h"
#include <vector>
namespace llvm {
@@ -135,7 +136,10 @@ private:
if (old_val == 0) { \
function(Registry); \
sys::MemoryFence(); \
+ TsanIgnoreWritesBegin(); \
+ TsanHappensBefore(&initialized); \
initialized = 2; \
+ TsanIgnoreWritesEnd(); \
} else { \
sys::cas_flag tmp = initialized; \
sys::MemoryFence(); \
@@ -143,7 +147,8 @@ private:
tmp = initialized; \
sys::MemoryFence(); \
} \
- }
+ } \
+ TsanHappensAfter(&initialized);
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis) \
static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
diff --git a/contrib/llvm/include/llvm/Support/BlockFrequency.h b/contrib/llvm/include/llvm/Support/BlockFrequency.h
index 554b784..839cf93 100644
--- a/contrib/llvm/include/llvm/Support/BlockFrequency.h
+++ b/contrib/llvm/include/llvm/Support/BlockFrequency.h
@@ -14,6 +14,8 @@
#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
#define LLVM_SUPPORT_BLOCKFREQUENCY_H
+#include "llvm/Support/DataTypes.h"
+
namespace llvm {
class raw_ostream;
diff --git a/contrib/llvm/include/llvm/Support/BranchProbability.h b/contrib/llvm/include/llvm/Support/BranchProbability.h
index 05c24d4..eedf692 100644
--- a/contrib/llvm/include/llvm/Support/BranchProbability.h
+++ b/contrib/llvm/include/llvm/Support/BranchProbability.h
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_BRANCHPROBABILITY_H
#include "llvm/Support/DataTypes.h"
+#include <cassert>
namespace llvm {
@@ -22,7 +23,6 @@ class raw_ostream;
// This class represents Branch Probability as a non-negative fraction.
class BranchProbability {
-
// Numerator
uint32_t N;
@@ -30,19 +30,44 @@ class BranchProbability {
uint32_t D;
public:
- BranchProbability(uint32_t n, uint32_t d);
+ BranchProbability(uint32_t n, uint32_t d) : N(n), D(d) {
+ assert(d > 0 && "Denomiator cannot be 0!");
+ assert(n <= d && "Probability cannot be bigger than 1!");
+ }
+
+ static BranchProbability getZero() { return BranchProbability(0, 1); }
+ static BranchProbability getOne() { return BranchProbability(1, 1); }
uint32_t getNumerator() const { return N; }
uint32_t getDenominator() const { return D; }
// Return (1 - Probability).
- BranchProbability getCompl() {
+ BranchProbability getCompl() const {
return BranchProbability(D - N, D);
}
void print(raw_ostream &OS) const;
void dump() const;
+
+ bool operator==(BranchProbability RHS) const {
+ return (uint64_t)N * RHS.D == (uint64_t)D * RHS.N;
+ }
+ bool operator!=(BranchProbability RHS) const {
+ return !(*this == RHS);
+ }
+ bool operator<(BranchProbability RHS) const {
+ return (uint64_t)N * RHS.D < (uint64_t)D * RHS.N;
+ }
+ bool operator>(BranchProbability RHS) const {
+ return RHS < *this;
+ }
+ bool operator<=(BranchProbability RHS) const {
+ return (uint64_t)N * RHS.D <= (uint64_t)D * RHS.N;
+ }
+ bool operator>=(BranchProbability RHS) const {
+ return RHS <= *this;
+ }
};
raw_ostream &operator<<(raw_ostream &OS, const BranchProbability &Prob);
diff --git a/contrib/llvm/include/llvm/Support/CFG.h b/contrib/llvm/include/llvm/Support/CFG.h
index 29313ef..f5dc8ea 100644
--- a/contrib/llvm/include/llvm/Support/CFG.h
+++ b/contrib/llvm/include/llvm/Support/CFG.h
@@ -71,6 +71,12 @@ public:
unsigned getOperandNo() const {
return It.getOperandNo();
}
+
+ /// getUse - Return the operand Use in the predecessor's terminator
+ /// of the successor.
+ Use &getUse() const {
+ return It.getUse();
+ }
};
typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator;
@@ -314,6 +320,7 @@ template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
typedef Function::iterator nodes_iterator;
static nodes_iterator nodes_begin(Function *F) { return F->begin(); }
static nodes_iterator nodes_end (Function *F) { return F->end(); }
+ static unsigned size (Function *F) { return F->size(); }
};
template <> struct GraphTraits<const Function*> :
public GraphTraits<const BasicBlock*> {
@@ -323,6 +330,7 @@ template <> struct GraphTraits<const Function*> :
typedef Function::const_iterator nodes_iterator;
static nodes_iterator nodes_begin(const Function *F) { return F->begin(); }
static nodes_iterator nodes_end (const Function *F) { return F->end(); }
+ static unsigned size (const Function *F) { return F->size(); }
};
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
index 6739255..88c60ba 100644
--- a/contrib/llvm/include/llvm/Support/COFF.h
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -24,6 +24,7 @@
#define LLVM_SUPPORT_WIN_COFF_H
#include "llvm/Support/DataTypes.h"
+#include <cassert>
#include <cstring>
namespace llvm {
@@ -49,8 +50,65 @@ namespace COFF {
};
enum MachineTypes {
- IMAGE_FILE_MACHINE_I386 = 0x14C,
- IMAGE_FILE_MACHINE_AMD64 = 0x8664
+ IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
+ IMAGE_FILE_MACHINE_AM33 = 0x13,
+ IMAGE_FILE_MACHINE_AMD64 = 0x8664,
+ IMAGE_FILE_MACHINE_ARM = 0x1C0,
+ IMAGE_FILE_MACHINE_ARMV7 = 0x1C4,
+ IMAGE_FILE_MACHINE_EBC = 0xEBC,
+ IMAGE_FILE_MACHINE_I386 = 0x14C,
+ IMAGE_FILE_MACHINE_IA64 = 0x200,
+ IMAGE_FILE_MACHINE_M32R = 0x9041,
+ IMAGE_FILE_MACHINE_MIPS16 = 0x266,
+ IMAGE_FILE_MACHINE_MIPSFPU = 0x366,
+ IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466,
+ IMAGE_FILE_MACHINE_POWERPC = 0x1F0,
+ IMAGE_FILE_MACHINE_POWERPCFP = 0x1F1,
+ IMAGE_FILE_MACHINE_R4000 = 0x166,
+ IMAGE_FILE_MACHINE_SH3 = 0x1A2,
+ IMAGE_FILE_MACHINE_SH3DSP = 0x1A3,
+ IMAGE_FILE_MACHINE_SH4 = 0x1A6,
+ IMAGE_FILE_MACHINE_SH5 = 0x1A8,
+ IMAGE_FILE_MACHINE_THUMB = 0x1C2,
+ IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169
+ };
+
+ enum Characteristics {
+ /// The file does not contain base relocations and must be loaded at its
+ /// preferred base. If this cannot be done, the loader will error.
+ IMAGE_FILE_RELOCS_STRIPPED = 0x0001,
+ /// The file is valid and can be run.
+ IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002,
+ /// COFF line numbers have been stripped. This is deprecated and should be
+ /// 0.
+ IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004,
+ /// COFF symbol table entries for local symbols have been removed. This is
+ /// deprecated and should be 0.
+ IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008,
+ /// Aggressively trim working set. This is deprecated and must be 0.
+ IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010,
+ /// Image can handle > 2GiB addresses.
+ IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020,
+ /// Little endian: the LSB precedes the MSB in memory. This is deprecated
+ /// and should be 0.
+ IMAGE_FILE_BYTES_REVERSED_LO = 0x0080,
+ /// Machine is based on a 32bit word architecture.
+ IMAGE_FILE_32BIT_MACHINE = 0x0100,
+ /// Debugging info has been removed.
+ IMAGE_FILE_DEBUG_STRIPPED = 0x0200,
+ /// If the image is on removable media, fully load it and copy it to swap.
+ IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400,
+ /// If the image is on network media, fully load it and copy it to swap.
+ IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800,
+ /// The image file is a system file, not a user program.
+ IMAGE_FILE_SYSTEM = 0x1000,
+ /// The image file is a DLL.
+ IMAGE_FILE_DLL = 0x2000,
+ /// This file should only be run on a uniprocessor machine.
+ IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000,
+ /// Big endian: the MSB precedes the LSB in memory. This is deprecated
+ /// and should be 0.
+ IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
};
struct symbol {
@@ -231,6 +289,24 @@ namespace COFF {
IMAGE_REL_AMD64_SSPAN32 = 0x0010
};
+ enum RelocationTypesARM {
+ IMAGE_REL_ARM_ABSOLUTE = 0x0000,
+ IMAGE_REL_ARM_ADDR32 = 0x0001,
+ IMAGE_REL_ARM_ADDR32NB = 0x0002,
+ IMAGE_REL_ARM_BRANCH24 = 0x0003,
+ IMAGE_REL_ARM_BRANCH11 = 0x0004,
+ IMAGE_REL_ARM_TOKEN = 0x0005,
+ IMAGE_REL_ARM_BLX24 = 0x0008,
+ IMAGE_REL_ARM_BLX11 = 0x0009,
+ IMAGE_REL_ARM_SECTION = 0x000E,
+ IMAGE_REL_ARM_SECREL = 0x000F,
+ IMAGE_REL_ARM_MOV32A = 0x0010,
+ IMAGE_REL_ARM_MOV32T = 0x0011,
+ IMAGE_REL_ARM_BRANCH20T = 0x0012,
+ IMAGE_REL_ARM_BRANCH24T = 0x0014,
+ IMAGE_REL_ARM_BLX23T = 0x0015
+ };
+
enum COMDATType {
IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
IMAGE_COMDAT_SELECT_ANY,
@@ -292,7 +368,219 @@ namespace COFF {
AuxiliarySectionDefinition SectionDefinition;
};
-} // End namespace llvm.
+ /// @brief The Import Directory Table.
+ ///
+ /// There is a single array of these and one entry per imported DLL.
+ struct ImportDirectoryTableEntry {
+ uint32_t ImportLookupTableRVA;
+ uint32_t TimeDateStamp;
+ uint32_t ForwarderChain;
+ uint32_t NameRVA;
+ uint32_t ImportAddressTableRVA;
+ };
+
+ /// @brief The PE32 Import Lookup Table.
+ ///
+ /// There is an array of these for each imported DLL. It represents either
+ /// the ordinal to import from the target DLL, or a name to lookup and import
+ /// from the target DLL.
+ ///
+ /// This also happens to be the same format used by the Import Address Table
+ /// when it is initially written out to the image.
+ struct ImportLookupTableEntry32 {
+ uint32_t data;
+
+ /// @brief Is this entry specified by ordinal, or name?
+ bool isOrdinal() const { return data & 0x80000000; }
+
+ /// @brief Get the ordinal value of this entry. isOrdinal must be true.
+ uint16_t getOrdinal() const {
+ assert(isOrdinal() && "ILT entry is not an ordinal!");
+ return data & 0xFFFF;
+ }
+
+ /// @brief Set the ordinal value and set isOrdinal to true.
+ void setOrdinal(uint16_t o) {
+ data = o;
+ data |= 0x80000000;
+ }
+
+ /// @brief Get the Hint/Name entry RVA. isOrdinal must be false.
+ uint32_t getHintNameRVA() const {
+ assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
+ return data;
+ }
+
+ /// @brief Set the Hint/Name entry RVA and set isOrdinal to false.
+ void setHintNameRVA(uint32_t rva) { data = rva; }
+ };
+
+ /// @brief The DOS compatible header at the front of all PEs.
+ struct DOSHeader {
+ uint16_t Magic;
+ uint16_t UsedBytesInTheLastPage;
+ uint16_t FileSizeInPages;
+ uint16_t NumberOfRelocationItems;
+ uint16_t HeaderSizeInParagraphs;
+ uint16_t MinimumExtraParagraphs;
+ uint16_t MaximumExtraParagraphs;
+ uint16_t InitialRelativeSS;
+ uint16_t InitialSP;
+ uint16_t Checksum;
+ uint16_t InitialIP;
+ uint16_t InitialRelativeCS;
+ uint16_t AddressOfRelocationTable;
+ uint16_t OverlayNumber;
+ uint16_t Reserved[4];
+ uint16_t OEMid;
+ uint16_t OEMinfo;
+ uint16_t Reserved2[10];
+ uint32_t AddressOfNewExeHeader;
+ };
+
+ struct PEHeader {
+ uint32_t Signature;
+ header COFFHeader;
+ uint16_t Magic;
+ uint8_t MajorLinkerVersion;
+ uint8_t MinorLinkerVersion;
+ uint32_t SizeOfCode;
+ uint32_t SizeOfInitializedData;
+ uint32_t SizeOfUninitializedData;
+ uint32_t AddressOfEntryPoint; // RVA
+ uint32_t BaseOfCode; // RVA
+ uint32_t BaseOfData; // RVA
+ uint64_t ImageBase;
+ uint32_t SectionAlignment;
+ uint32_t FileAlignment;
+ uint16_t MajorOperatingSystemVersion;
+ uint16_t MinorOperatingSystemVersion;
+ uint16_t MajorImageVersion;
+ uint16_t MinorImageVersion;
+ uint16_t MajorSubsystemVersion;
+ uint16_t MinorSubsystemVersion;
+ uint32_t Win32VersionValue;
+ uint32_t SizeOfImage;
+ uint32_t SizeOfHeaders;
+ uint32_t CheckSum;
+ uint16_t Subsystem;
+ uint16_t DLLCharacteristics;
+ uint64_t SizeOfStackReserve;
+ uint64_t SizeOfStackCommit;
+ uint64_t SizeOfHeapReserve;
+ uint64_t SizeOfHeapCommit;
+ uint32_t LoaderFlags;
+ uint32_t NumberOfRvaAndSize;
+ };
+
+ struct DataDirectory {
+ uint32_t RelativeVirtualAddress;
+ uint32_t Size;
+ };
+
+ enum WindowsSubsystem {
+ IMAGE_SUBSYSTEM_UNKNOWN = 0, ///< An unknown subsystem.
+ IMAGE_SUBSYSTEM_NATIVE = 1, ///< Device drivers and native Windows processes
+ IMAGE_SUBSYSTEM_WINDOWS_GUI = 2, ///< The Windows GUI subsystem.
+ IMAGE_SUBSYSTEM_WINDOWS_CUI = 3, ///< The Windows character subsystem.
+ IMAGE_SUBSYSTEM_POSIX_CUI = 7, ///< The POSIX character subsystem.
+ IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9, ///< Windows CE.
+ IMAGE_SUBSYSTEM_EFI_APPLICATION = 10, ///< An EFI application.
+ IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11, ///< An EFI driver with boot
+ /// services.
+ IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12, ///< An EFI driver with run-time
+ /// services.
+ IMAGE_SUBSYSTEM_EFI_ROM = 13, ///< An EFI ROM image.
+ IMAGE_SUBSYSTEM_XBOX = 14 ///< XBOX.
+ };
+
+ enum DLLCharacteristics {
+ /// DLL can be relocated at load time.
+ IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040,
+ /// Code integrity checks are enforced.
+ IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY = 0x0080,
+ IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100, ///< Image is NX compatible.
+ /// Isolation aware, but do not isolate the image.
+ IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION = 0x0200,
+ /// Does not use structured exception handling (SEH). No SEH handler may be
+ /// called in this image.
+ IMAGE_DLL_CHARACTERISTICS_NO_SEH = 0x0400,
+ /// Do not bind the image.
+ IMAGE_DLL_CHARACTERISTICS_NO_BIND = 0x0800,
+ IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER = 0x2000, ///< A WDM driver.
+ /// Terminal Server aware.
+ IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
+ };
+
+ enum DebugType {
+ IMAGE_DEBUG_TYPE_UNKNOWN = 0,
+ IMAGE_DEBUG_TYPE_COFF = 1,
+ IMAGE_DEBUG_TYPE_CODEVIEW = 2,
+ IMAGE_DEBUG_TYPE_FPO = 3,
+ IMAGE_DEBUG_TYPE_MISC = 4,
+ IMAGE_DEBUG_TYPE_EXCEPTION = 5,
+ IMAGE_DEBUG_TYPE_FIXUP = 6,
+ IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7,
+ IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8,
+ IMAGE_DEBUG_TYPE_BORLAND = 9,
+ IMAGE_DEBUG_TYPE_CLSID = 11
+ };
+
+ enum BaseRelocationType {
+ IMAGE_REL_BASED_ABSOLUTE = 0,
+ IMAGE_REL_BASED_HIGH = 1,
+ IMAGE_REL_BASED_LOW = 2,
+ IMAGE_REL_BASED_HIGHLOW = 3,
+ IMAGE_REL_BASED_HIGHADJ = 4,
+ IMAGE_REL_BASED_MIPS_JMPADDR = 5,
+ IMAGE_REL_BASED_ARM_MOV32A = 5,
+ IMAGE_REL_BASED_ARM_MOV32T = 7,
+ IMAGE_REL_BASED_MIPS_JMPADDR16 = 9,
+ IMAGE_REL_BASED_DIR64 = 10
+ };
+
+ enum ImportType {
+ IMPORT_CODE = 0,
+ IMPORT_DATA = 1,
+ IMPORT_CONST = 2
+ };
+
+ enum ImportNameType {
+ /// Import is by ordinal. This indicates that the value in the Ordinal/Hint
+ /// field of the import header is the import's ordinal. If this constant is
+ /// not specified, then the Ordinal/Hint field should always be interpreted
+ /// as the import's hint.
+ IMPORT_ORDINAL = 0,
+ /// The import name is identical to the public symbol name
+ IMPORT_NAME = 1,
+ /// The import name is the public symbol name, but skipping the leading ?,
+ /// @, or optionally _.
+ IMPORT_NAME_NOPREFIX = 2,
+ /// The import name is the public symbol name, but skipping the leading ?,
+ /// @, or optionally _, and truncating at the first @.
+ IMPORT_NAME_UNDECORATE = 3
+ };
+
+ struct ImportHeader {
+ uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
+ uint16_t Sig2; ///< Must be 0xFFFF.
+ uint16_t Version;
+ uint16_t Machine;
+ uint32_t TimeDateStamp;
+ uint32_t SizeOfData;
+ uint16_t OrdinalHint;
+ uint16_t TypeInfo;
+
+ ImportType getType() const {
+ return static_cast<ImportType>(TypeInfo & 0x3);
+ }
+
+ ImportNameType getNameType() const {
+ return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 3);
+ }
+ };
+
} // End namespace COFF.
+} // End namespace llvm.
#endif
diff --git a/contrib/llvm/include/llvm/Support/CallSite.h b/contrib/llvm/include/llvm/Support/CallSite.h
index 04b8c4e..20634ed 100644
--- a/contrib/llvm/include/llvm/Support/CallSite.h
+++ b/contrib/llvm/include/llvm/Support/CallSite.h
@@ -237,6 +237,16 @@ public:
#undef CALLSITE_DELEGATE_GETTER
#undef CALLSITE_DELEGATE_SETTER
+ /// @brief Determine whether this argument is not captured.
+ bool doesNotCapture(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::NoCapture);
+ }
+
+ /// @brief Determine whether this argument is passed by value.
+ bool isByValArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ }
+
/// hasArgument - Returns true if this CallSite passes the given Value* as an
/// argument to the called function.
bool hasArgument(const Value *Arg) const {
diff --git a/contrib/llvm/include/llvm/Support/Capacity.h b/contrib/llvm/include/llvm/Support/Capacity.h
index d8cda43..7460f98 100644
--- a/contrib/llvm/include/llvm/Support/Capacity.h
+++ b/contrib/llvm/include/llvm/Support/Capacity.h
@@ -15,6 +15,8 @@
#ifndef LLVM_SUPPORT_CAPACITY_H
#define LLVM_SUPPORT_CAPACITY_H
+#include <cstddef>
+
namespace llvm {
template <typename T>
diff --git a/contrib/llvm/include/llvm/Support/CodeGen.h b/contrib/llvm/include/llvm/Support/CodeGen.h
index 41351dc..1b66c94 100644
--- a/contrib/llvm/include/llvm/Support/CodeGen.h
+++ b/contrib/llvm/include/llvm/Support/CodeGen.h
@@ -27,6 +27,26 @@ namespace llvm {
enum Model { Default, JITDefault, Small, Kernel, Medium, Large };
}
+ // TLS models.
+ namespace TLSModel {
+ enum Model {
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec
+ };
+ }
+
+ // Code generation optimization level.
+ namespace CodeGenOpt {
+ enum Level {
+ None, // -O0
+ Less, // -O1
+ Default, // -O2, -Os
+ Aggressive // -O3
+ };
+ }
+
} // end llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm/include/llvm/Support/CommandLine.h
index c6b62a8..c212d2d 100644
--- a/contrib/llvm/include/llvm/Support/CommandLine.h
+++ b/contrib/llvm/include/llvm/Support/CommandLine.h
@@ -40,7 +40,7 @@ namespace cl {
//===----------------------------------------------------------------------===//
// ParseCommandLineOptions - Command line option processing entry point.
//
-void ParseCommandLineOptions(int argc, char **argv,
+void ParseCommandLineOptions(int argc, const char * const *argv,
const char *Overview = 0,
bool ReadResponseFiles = false);
@@ -83,10 +83,10 @@ void MarkOptionsChanged();
//
enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
- Optional = 0x01, // Zero or One occurrence
- ZeroOrMore = 0x02, // Zero or more occurrences allowed
- Required = 0x03, // One occurrence required
- OneOrMore = 0x04, // One or more occurrences required
+ Optional = 0x00, // Zero or One occurrence
+ ZeroOrMore = 0x01, // Zero or more occurrences allowed
+ Required = 0x02, // One occurrence required
+ OneOrMore = 0x03, // One or more occurrences required
// ConsumeAfter - Indicates that this option is fed anything that follows the
// last positional argument required by the application (it is an error if
@@ -95,23 +95,20 @@ enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
// found. Once a filename is found, all of the succeeding arguments are
// passed, unprocessed, to the ConsumeAfter option.
//
- ConsumeAfter = 0x05,
-
- OccurrencesMask = 0x07
+ ConsumeAfter = 0x04
};
enum ValueExpected { // Is a value required for the option?
- ValueOptional = 0x08, // The value can appear... or not
- ValueRequired = 0x10, // The value is required to appear!
- ValueDisallowed = 0x18, // A value may not be specified (for flags)
- ValueMask = 0x18
+ // zero reserved for the unspecified value
+ ValueOptional = 0x01, // The value can appear... or not
+ ValueRequired = 0x02, // The value is required to appear!
+ ValueDisallowed = 0x03 // A value may not be specified (for flags)
};
enum OptionHidden { // Control whether -help shows this option
- NotHidden = 0x20, // Option included in -help & -help-hidden
- Hidden = 0x40, // -help doesn't, but -help-hidden does
- ReallyHidden = 0x60, // Neither -help nor -help-hidden show this arg
- HiddenMask = 0x60
+ NotHidden = 0x00, // Option included in -help & -help-hidden
+ Hidden = 0x01, // -help doesn't, but -help-hidden does
+ ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg
};
// Formatting flags - This controls special features that the option might have
@@ -130,18 +127,16 @@ enum OptionHidden { // Control whether -help shows this option
//
enum FormattingFlags {
- NormalFormatting = 0x000, // Nothing special
- Positional = 0x080, // Is a positional argument, no '-' required
- Prefix = 0x100, // Can this option directly prefix its value?
- Grouping = 0x180, // Can this option group with other options?
- FormattingMask = 0x180 // Union of the above flags.
+ NormalFormatting = 0x00, // Nothing special
+ Positional = 0x01, // Is a positional argument, no '-' required
+ Prefix = 0x02, // Can this option directly prefix its value?
+ Grouping = 0x03 // Can this option group with other options?
};
enum MiscFlags { // Miscellaneous flags to adjust argument
- CommaSeparated = 0x200, // Should this cl::list split between commas?
- PositionalEatsArgs = 0x400, // Should this positional cl::list eat -args?
- Sink = 0x800, // Should this cl::list eat all unknown options?
- MiscMask = 0xE00 // Union of the above flags.
+ CommaSeparated = 0x01, // Should this cl::list split between commas?
+ PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args?
+ Sink = 0x04 // Should this cl::list eat all unknown options?
};
@@ -168,7 +163,15 @@ class Option {
virtual void anchor();
int NumOccurrences; // The number of times specified
- int Flags; // Flags for the argument
+ // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid
+ // problems with signed enums in bitfields.
+ unsigned Occurrences : 3; // enum NumOccurrencesFlag
+ // not using the enum type for 'Value' because zero is an implementation
+ // detail representing the non-value
+ unsigned Value : 2;
+ unsigned HiddenFlag : 2; // enum OptionHidden
+ unsigned Formatting : 2; // enum FormattingFlags
+ unsigned Misc : 3;
unsigned Position; // Position of last occurrence of the option
unsigned AdditionalVals;// Greater than 0 for multi-valued option.
Option *NextRegistered; // Singly linked list of registered options.
@@ -178,21 +181,20 @@ public:
const char *ValueStr; // String describing what the value of this option is
inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
- return static_cast<enum NumOccurrencesFlag>(Flags & OccurrencesMask);
+ return (enum NumOccurrencesFlag)Occurrences;
}
inline enum ValueExpected getValueExpectedFlag() const {
- int VE = Flags & ValueMask;
- return VE ? static_cast<enum ValueExpected>(VE)
+ return Value ? ((enum ValueExpected)Value)
: getValueExpectedFlagDefault();
}
inline enum OptionHidden getOptionHiddenFlag() const {
- return static_cast<enum OptionHidden>(Flags & HiddenMask);
+ return (enum OptionHidden)HiddenFlag;
}
inline enum FormattingFlags getFormattingFlag() const {
- return static_cast<enum FormattingFlags>(Flags & FormattingMask);
+ return (enum FormattingFlags)Formatting;
}
inline unsigned getMiscFlags() const {
- return Flags & MiscMask;
+ return Misc;
}
inline unsigned getPosition() const { return Position; }
inline unsigned getNumAdditionalVals() const { return AdditionalVals; }
@@ -206,27 +208,21 @@ public:
void setArgStr(const char *S) { ArgStr = S; }
void setDescription(const char *S) { HelpStr = S; }
void setValueStr(const char *S) { ValueStr = S; }
-
- void setFlag(unsigned Flag, unsigned FlagMask) {
- Flags &= ~FlagMask;
- Flags |= Flag;
- }
-
void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) {
- setFlag(Val, OccurrencesMask);
+ Occurrences = Val;
}
- void setValueExpectedFlag(enum ValueExpected Val) { setFlag(Val, ValueMask); }
- void setHiddenFlag(enum OptionHidden Val) { setFlag(Val, HiddenMask); }
- void setFormattingFlag(enum FormattingFlags V) { setFlag(V, FormattingMask); }
- void setMiscFlag(enum MiscFlags M) { setFlag(M, M); }
+ void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; }
+ void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; }
+ void setFormattingFlag(enum FormattingFlags V) { Formatting = V; }
+ void setMiscFlag(enum MiscFlags M) { Misc |= M; }
void setPosition(unsigned pos) { Position = pos; }
protected:
- explicit Option(unsigned DefaultFlags)
- : NumOccurrences(0), Flags(DefaultFlags | NormalFormatting), Position(0),
+ explicit Option(enum NumOccurrencesFlag Occurrences,
+ enum OptionHidden Hidden)
+ : NumOccurrences(0), Occurrences(Occurrences), HiddenFlag(Hidden),
+ Formatting(NormalFormatting), Position(0),
AdditionalVals(0), NextRegistered(0),
ArgStr(""), HelpStr(""), ValueStr("") {
- assert(getNumOccurrencesFlag() != 0 &&
- getOptionHiddenFlag() != 0 && "Not all default flags specified!");
}
inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }
@@ -326,6 +322,8 @@ LocationClass<Ty> location(Ty &L) { return LocationClass<Ty>(L); }
struct GenericOptionValue {
virtual ~GenericOptionValue() {}
virtual bool compare(const GenericOptionValue &V) const = 0;
+private:
+ virtual void anchor();
};
template<class DataType> struct OptionValue;
@@ -339,7 +337,7 @@ struct OptionValueBase : public GenericOptionValue {
bool hasValue() const { return false; }
- const DataType &getValue() const { assert(false && "no default value"); }
+ const DataType &getValue() const { llvm_unreachable("no default value"); }
// Some options may take their value from a different data type.
template<class DT>
@@ -416,6 +414,8 @@ struct OptionValue<cl::boolOrDefault> : OptionValueCopy<cl::boolOrDefault> {
setValue(V);
return *this;
}
+private:
+ virtual void anchor();
};
template<>
@@ -431,6 +431,8 @@ struct OptionValue<std::string> : OptionValueCopy<std::string> {
setValue(V);
return *this;
}
+private:
+ virtual void anchor();
};
//===----------------------------------------------------------------------===//
@@ -1171,14 +1173,14 @@ public:
// One option...
template<class M0t>
- explicit opt(const M0t &M0) : Option(Optional | NotHidden) {
+ explicit opt(const M0t &M0) : Option(Optional, NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
- opt(const M0t &M0, const M1t &M1) : Option(Optional | NotHidden) {
+ opt(const M0t &M0, const M1t &M1) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
@@ -1186,21 +1188,21 @@ public:
// Three options...
template<class M0t, class M1t, class M2t>
opt(const M0t &M0, const M1t &M1,
- const M2t &M2) : Option(Optional | NotHidden) {
+ const M2t &M2) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
opt(const M0t &M0, const M1t &M1, const M2t &M2,
- const M3t &M3) : Option(Optional | NotHidden) {
+ const M3t &M3) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4) : Option(Optional | NotHidden) {
+ const M4t &M4) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
@@ -1209,7 +1211,7 @@ public:
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4, const M5t &M5) : Option(Optional | NotHidden) {
+ const M4t &M4, const M5t &M5) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
@@ -1219,7 +1221,7 @@ public:
class M4t, class M5t, class M6t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5,
- const M6t &M6) : Option(Optional | NotHidden) {
+ const M6t &M6) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
@@ -1229,7 +1231,7 @@ public:
class M4t, class M5t, class M6t, class M7t>
opt(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
- const M7t &M7) : Option(Optional | NotHidden) {
+ const M7t &M7) : Option(Optional, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
@@ -1338,34 +1340,34 @@ public:
// One option...
template<class M0t>
- explicit list(const M0t &M0) : Option(ZeroOrMore | NotHidden) {
+ explicit list(const M0t &M0) : Option(ZeroOrMore, NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
- list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) {
+ list(const M0t &M0, const M1t &M1) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
list(const M0t &M0, const M1t &M1, const M2t &M2)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4) : Option(ZeroOrMore | NotHidden) {
+ const M4t &M4) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
@@ -1374,7 +1376,7 @@ public:
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) {
+ const M4t &M4, const M5t &M5) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
@@ -1384,7 +1386,7 @@ public:
class M4t, class M5t, class M6t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
@@ -1394,7 +1396,7 @@ public:
class M4t, class M5t, class M6t, class M7t>
list(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
- const M7t &M7) : Option(ZeroOrMore | NotHidden) {
+ const M7t &M7) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
@@ -1536,34 +1538,34 @@ public:
// One option...
template<class M0t>
- explicit bits(const M0t &M0) : Option(ZeroOrMore | NotHidden) {
+ explicit bits(const M0t &M0) : Option(ZeroOrMore, NotHidden) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
- bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore | NotHidden) {
+ bits(const M0t &M0, const M1t &M1) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
bits(const M0t &M0, const M1t &M1, const M2t &M2)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
// Five options...
template<class M0t, class M1t, class M2t, class M3t, class M4t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4) : Option(ZeroOrMore | NotHidden) {
+ const M4t &M4) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this);
done();
@@ -1572,7 +1574,7 @@ public:
template<class M0t, class M1t, class M2t, class M3t,
class M4t, class M5t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
- const M4t &M4, const M5t &M5) : Option(ZeroOrMore | NotHidden) {
+ const M4t &M4, const M5t &M5) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this);
done();
@@ -1582,7 +1584,7 @@ public:
class M4t, class M5t, class M6t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6)
- : Option(ZeroOrMore | NotHidden) {
+ : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this);
done();
@@ -1592,7 +1594,7 @@ public:
class M4t, class M5t, class M6t, class M7t>
bits(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3,
const M4t &M4, const M5t &M5, const M6t &M6,
- const M7t &M7) : Option(ZeroOrMore | NotHidden) {
+ const M7t &M7) : Option(ZeroOrMore, NotHidden) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
apply(M4, this); apply(M5, this); apply(M6, this); apply(M7, this);
done();
@@ -1632,27 +1634,27 @@ public:
// One option...
template<class M0t>
- explicit alias(const M0t &M0) : Option(Optional | Hidden), AliasFor(0) {
+ explicit alias(const M0t &M0) : Option(Optional, Hidden), AliasFor(0) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
- alias(const M0t &M0, const M1t &M1) : Option(Optional | Hidden), AliasFor(0) {
+ alias(const M0t &M0, const M1t &M1) : Option(Optional, Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
alias(const M0t &M0, const M1t &M1, const M2t &M2)
- : Option(Optional | Hidden), AliasFor(0) {
+ : Option(Optional, Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
alias(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
- : Option(Optional | Hidden), AliasFor(0) {
+ : Option(Optional, Hidden), AliasFor(0) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
index e092157..d0b186e 100644
--- a/contrib/llvm/include/llvm/Support/Compiler.h
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -49,16 +49,22 @@
#define LLVM_ATTRIBUTE_UNUSED
#endif
-#ifdef __GNUC__ // aka 'ATTRIBUTE_CONST' but following LLVM Conventions.
-#define LLVM_ATTRIBUTE_READNONE __attribute__((__const__))
+#if (__GNUC__ >= 4) && !defined(__MINGW32__) && !defined(__CYGWIN__)
+#define LLVM_ATTRIBUTE_WEAK __attribute__((__weak__))
#else
-#define LLVM_ATTRIBUTE_READNONE
+#define LLVM_ATTRIBUTE_WEAK
#endif
-#ifdef __GNUC__ // aka 'ATTRIBUTE_PURE' but following LLVM Conventions.
-#define LLVM_ATTRIBUTE_READONLY __attribute__((__pure__))
+#ifdef __GNUC__ // aka 'CONST' but following LLVM Conventions.
+#define LLVM_READNONE __attribute__((__const__))
#else
-#define LLVM_ATTRIBUTE_READONLY
+#define LLVM_READNONE
+#endif
+
+#ifdef __GNUC__ // aka 'PURE' but following LLVM Conventions.
+#define LLVM_READONLY __attribute__((__pure__))
+#else
+#define LLVM_READONLY
#endif
#if (__GNUC__ >= 4)
@@ -67,6 +73,7 @@
#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR)
#endif
+
// C++ doesn't support 'extern template' of template specializations. GCC does,
// but requires __extension__ before it. In the header, use this:
// EXTERN_TEMPLATE_INSTANTIATION(class foo<bar>);
@@ -111,6 +118,14 @@
#define LLVM_ATTRIBUTE_NORETURN
#endif
+// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
+// pedantic diagnostics.
+#ifdef __GNUC__
+#define LLVM_EXTENSION __extension__
+#else
+#define LLVM_EXTENSION
+#endif
+
// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
#if __has_feature(attribute_deprecated_with_message)
# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
diff --git a/contrib/llvm/include/llvm/Support/DOTGraphTraits.h b/contrib/llvm/include/llvm/Support/DOTGraphTraits.h
index 3cb8164..483f267 100644
--- a/contrib/llvm/include/llvm/Support/DOTGraphTraits.h
+++ b/contrib/llvm/include/llvm/Support/DOTGraphTraits.h
@@ -42,13 +42,13 @@ public:
/// top of the graph.
///
template<typename GraphType>
- static std::string getGraphName(const GraphType& Graph) { return ""; }
+ static std::string getGraphName(const GraphType &) { return ""; }
/// getGraphProperties - Return any custom properties that should be included
/// in the top level graph structure for dot.
///
template<typename GraphType>
- static std::string getGraphProperties(const GraphType& Graph) {
+ static std::string getGraphProperties(const GraphType &) {
return "";
}
@@ -61,44 +61,44 @@ public:
/// isNodeHidden - If the function returns true, the given node is not
/// displayed in the graph.
- static bool isNodeHidden(const void *Node) {
+ static bool isNodeHidden(const void *) {
return false;
}
/// getNodeLabel - Given a node and a pointer to the top level graph, return
/// the label to print in the node.
template<typename GraphType>
- std::string getNodeLabel(const void *Node, const GraphType& Graph) {
+ std::string getNodeLabel(const void *, const GraphType &) {
return "";
}
/// hasNodeAddressLabel - If this method returns true, the address of the node
/// is added to the label of the node.
template<typename GraphType>
- static bool hasNodeAddressLabel(const void *Node, const GraphType& Graph) {
+ static bool hasNodeAddressLabel(const void *, const GraphType &) {
return false;
}
/// If you want to specify custom node attributes, this is the place to do so
///
template<typename GraphType>
- static std::string getNodeAttributes(const void *Node,
- const GraphType& Graph) {
+ static std::string getNodeAttributes(const void *,
+ const GraphType &) {
return "";
}
/// If you want to override the dot attributes printed for a particular edge,
/// override this method.
template<typename EdgeIter, typename GraphType>
- static std::string getEdgeAttributes(const void *Node, EdgeIter EI,
- const GraphType& Graph) {
+ static std::string getEdgeAttributes(const void *, EdgeIter,
+ const GraphType &) {
return "";
}
/// getEdgeSourceLabel - If you want to label the edge source itself,
/// implement this method.
template<typename EdgeIter>
- static std::string getEdgeSourceLabel(const void *Node, EdgeIter I) {
+ static std::string getEdgeSourceLabel(const void *, EdgeIter) {
return "";
}
@@ -106,7 +106,7 @@ public:
/// should actually target another edge source, not a node. If this method is
/// implemented, getEdgeTarget should be implemented.
template<typename EdgeIter>
- static bool edgeTargetsEdgeSource(const void *Node, EdgeIter I) {
+ static bool edgeTargetsEdgeSource(const void *, EdgeIter) {
return false;
}
@@ -114,7 +114,7 @@ public:
/// called to determine which outgoing edge of Node is the target of this
/// edge.
template<typename EdgeIter>
- static EdgeIter getEdgeTarget(const void *Node, EdgeIter I) {
+ static EdgeIter getEdgeTarget(const void *, EdgeIter I) {
return I;
}
@@ -126,13 +126,13 @@ public:
/// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the
/// number of incoming edge labels the given node has.
- static unsigned numEdgeDestLabels(const void *Node) {
+ static unsigned numEdgeDestLabels(const void *) {
return 0;
}
/// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the
/// incoming edge label with the given index in the given node.
- static std::string getEdgeDestLabel(const void *Node, unsigned i) {
+ static std::string getEdgeDestLabel(const void *, unsigned) {
return "";
}
@@ -143,7 +143,7 @@ public:
/// it to add things to the output graph.
///
template<typename GraphType, typename GraphWriter>
- static void addCustomGraphFeatures(const GraphType& Graph, GraphWriter &GW) {}
+ static void addCustomGraphFeatures(const GraphType &, GraphWriter &) {}
};
diff --git a/contrib/llvm/include/llvm/Support/DataStream.h b/contrib/llvm/include/llvm/Support/DataStream.h
new file mode 100644
index 0000000..fedb0c9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DataStream.h
@@ -0,0 +1,38 @@
+//===---- llvm/Support/DataStream.h - Lazy bitcode streaming ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines DataStreamer, which fetches bytes of data from
+// a stream source. It provides support for streaming (lazy reading) of
+// data, e.g. bitcode
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_SUPPORT_DATASTREAM_H_
+#define LLVM_SUPPORT_DATASTREAM_H_
+
+#include <string>
+
+namespace llvm {
+
+class DataStreamer {
+public:
+ /// Fetch bytes [start-end) from the stream, and write them to the
+ /// buffer pointed to by buf. Returns the number of bytes actually written.
+ virtual size_t GetBytes(unsigned char *buf, size_t len) = 0;
+
+ virtual ~DataStreamer();
+};
+
+DataStreamer *getDataFileStreamer(const std::string &Filename,
+ std::string *Err);
+
+}
+
+#endif // LLVM_SUPPORT_DATASTREAM_H_
diff --git a/contrib/llvm/include/llvm/Support/DataTypes.h.in b/contrib/llvm/include/llvm/Support/DataTypes.h.in
index 425805a..b492bb1 100644
--- a/contrib/llvm/include/llvm/Support/DataTypes.h.in
+++ b/contrib/llvm/include/llvm/Support/DataTypes.h.in
@@ -167,9 +167,24 @@ typedef signed int ssize_t;
# define UINT64_C(C) C##ui64
#endif
+#ifndef PRId64
+# define PRId64 "I64d"
+#endif
+#ifndef PRIi64
+# define PRIi64 "I64i"
+#endif
+#ifndef PRIo64
+# define PRIo64 "I64o"
+#endif
+#ifndef PRIu64
+# define PRIu64 "I64u"
+#endif
#ifndef PRIx64
# define PRIx64 "I64x"
#endif
+#ifndef PRIX64
+# define PRIX64 "I64X"
+#endif
#endif /* _MSC_VER */
diff --git a/contrib/llvm/include/llvm/Support/Debug.h b/contrib/llvm/include/llvm/Support/Debug.h
index 8651fc1..e723272 100644
--- a/contrib/llvm/include/llvm/Support/Debug.h
+++ b/contrib/llvm/include/llvm/Support/Debug.h
@@ -35,14 +35,14 @@ class raw_ostream;
#ifndef DEBUG_TYPE
#define DEBUG_TYPE ""
#endif
-
+
#ifndef NDEBUG
/// DebugFlag - This boolean is set to true if the '-debug' command line option
/// is specified. This should probably not be referenced directly, instead, use
/// the DEBUG macro below.
///
extern bool DebugFlag;
-
+
/// isCurrentDebugType - Return true if the specified string is the debug type
/// specified on the command line, or if none was specified on the command line
/// with the -debug-only=X option.
@@ -54,7 +54,7 @@ bool isCurrentDebugType(const char *Type);
/// debug output to be produced.
///
void SetCurrentDebugType(const char *Type);
-
+
/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
/// information. In the '-debug' option is specified on the commandline, and if
/// this is a debug build, then the code specified as the option to the macro
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.h b/contrib/llvm/include/llvm/Support/Dwarf.h
index 30f9187..8f18a99 100644
--- a/contrib/llvm/include/llvm/Support/Dwarf.h
+++ b/contrib/llvm/include/llvm/Support/Dwarf.h
@@ -22,7 +22,8 @@ namespace llvm {
// Debug info constants.
enum {
- LLVMDebugVersion = (11 << 16), // Current version of debug information.
+ LLVMDebugVersion = (12 << 16), // Current version of debug information.
+ LLVMDebugVersion11 = (11 << 16), // Constant for version 11.
LLVMDebugVersion10 = (10 << 16), // Constant for version 10.
LLVMDebugVersion9 = (9 << 16), // Constant for version 9.
LLVMDebugVersion8 = (8 << 16), // Constant for version 8.
@@ -130,6 +131,7 @@ enum dwarf_constants {
DW_TAG_GNU_template_parameter_pack = 0x4107,
DW_TAG_GNU_formal_parameter_pack = 0x4108,
DW_TAG_lo_user = 0x4080,
+ DW_TAG_APPLE_property = 0x4200,
DW_TAG_hi_user = 0xffff,
// Children flag
@@ -269,6 +271,7 @@ enum dwarf_constants {
DW_AT_APPLE_property_setter = 0x3fea,
DW_AT_APPLE_property_attribute = 0x3feb,
DW_AT_APPLE_objc_complete_type = 0x3fec,
+ DW_AT_APPLE_property = 0x3fed,
// Attribute form encodings
DW_FORM_addr = 0x01,
@@ -526,6 +529,7 @@ enum dwarf_constants {
DW_LANG_D = 0x0013,
DW_LANG_Python = 0x0014,
DW_LANG_lo_user = 0x8000,
+ DW_LANG_Mips_Assembler = 0x8001,
DW_LANG_hi_user = 0xffff,
// Identifier case codes
diff --git a/contrib/llvm/include/llvm/Support/DynamicLibrary.h b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
index 288936b..0f59cbf 100644
--- a/contrib/llvm/include/llvm/Support/DynamicLibrary.h
+++ b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
@@ -17,6 +17,9 @@
#include <string>
namespace llvm {
+
+class StringRef;
+
namespace sys {
/// This class provides a portable interface to dynamic libraries which also
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
index c5b85e2..04953b6 100644
--- a/contrib/llvm/include/llvm/Support/ELF.h
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -599,7 +599,25 @@ enum {
R_ARM_THM_TLS_DESCSEQ32 = 0x82
};
+// Mips Specific e_flags
+enum {
+ EF_MIPS_NOREORDER = 0x00000001, // Don't reorder instructions
+ EF_MIPS_PIC = 0x00000002, // Position independent code
+ EF_MIPS_CPIC = 0x00000004, // Call object with Position independent code
+ EF_MIPS_ARCH_1 = 0x00000000, // MIPS1 instruction set
+ EF_MIPS_ARCH_2 = 0x10000000, // MIPS2 instruction set
+ EF_MIPS_ARCH_3 = 0x20000000, // MIPS3 instruction set
+ EF_MIPS_ARCH_4 = 0x30000000, // MIPS4 instruction set
+ EF_MIPS_ARCH_5 = 0x40000000, // MIPS5 instruction set
+ EF_MIPS_ARCH_32 = 0x50000000, // MIPS32 instruction set per linux not elf.h
+ EF_MIPS_ARCH_64 = 0x60000000, // MIPS64 instruction set per linux not elf.h
+ EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2
+ EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2
+ EF_MIPS_ARCH = 0xf0000000 // Mask for applying EF_MIPS_ARCH_ variant
+};
+
// ELF Relocation types for Mips
+// .
enum {
R_MIPS_NONE = 0,
R_MIPS_16 = 1,
@@ -611,6 +629,7 @@ enum {
R_MIPS_GPREL16 = 7,
R_MIPS_LITERAL = 8,
R_MIPS_GOT16 = 9,
+ R_MIPS_GOT = 9,
R_MIPS_PC16 = 10,
R_MIPS_CALL16 = 11,
R_MIPS_GPREL32 = 12,
@@ -717,6 +736,9 @@ enum {
SHT_GROUP = 17, // Section group.
SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
+ SHT_GNU_verneed = 0x6ffffffe, // GNU version references.
+ SHT_GNU_versym = 0x6fffffff, // GNU symbol versions table.
SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
SHT_LOPROC = 0x70000000, // Lowest processor architecture-specific type.
// Fixme: All this is duplicated in MCSectionELF. Why??
@@ -871,6 +893,7 @@ enum {
STT_TLS = 6, // Thread local data object
STT_LOOS = 7, // Lowest operating system-specific symbol type
STT_HIOS = 8, // Highest operating system-specific symbol type
+ STT_GNU_IFUNC = 10, // GNU indirect function
STT_LOPROC = 13, // Lowest processor-specific symbol type
STT_HIPROC = 15 // Highest processor-specific symbol type
};
@@ -1084,6 +1107,33 @@ enum {
DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
};
+// ElfXX_VerDef structure version (GNU versioning)
+enum {
+ VER_DEF_NONE = 0,
+ VER_DEF_CURRENT = 1
+};
+
+// VerDef Flags (ElfXX_VerDef::vd_flags)
+enum {
+ VER_FLG_BASE = 0x1,
+ VER_FLG_WEAK = 0x2,
+ VER_FLG_INFO = 0x4
+};
+
+// Special constants for the version table. (SHT_GNU_versym/.gnu.version)
+enum {
+ VER_NDX_LOCAL = 0, // Unversioned local symbol
+ VER_NDX_GLOBAL = 1, // Unversioned global symbol
+ VERSYM_VERSION = 0x7fff, // Version Index mask
+ VERSYM_HIDDEN = 0x8000 // Hidden bit (non-default version)
+};
+
+// ElfXX_VerNeed structure version (GNU versioning)
+enum {
+ VER_NEED_NONE = 0,
+ VER_NEED_CURRENT = 1
+};
+
} // end namespace ELF
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/Endian.h b/contrib/llvm/include/llvm/Support/Endian.h
index af1b506..733ab75 100644
--- a/contrib/llvm/include/llvm/Support/Endian.h
+++ b/contrib/llvm/include/llvm/Support/Endian.h
@@ -98,6 +98,9 @@ public:
operator value_type() const {
return endian::read_le<value_type, unaligned>(Value);
}
+ void operator=(value_type newValue) {
+ endian::write_le<value_type, unaligned>((void *)&Value, newValue);
+ }
private:
uint8_t Value[sizeof(value_type)];
};
@@ -108,6 +111,9 @@ public:
operator value_type() const {
return endian::read_be<value_type, unaligned>(Value);
}
+ void operator=(value_type newValue) {
+ endian::write_be<value_type, unaligned>((void *)&Value, newValue);
+ }
private:
uint8_t Value[sizeof(value_type)];
};
@@ -118,6 +124,9 @@ public:
operator value_type() const {
return endian::read_le<value_type, aligned>(&Value);
}
+ void operator=(value_type newValue) {
+ endian::write_le<value_type, aligned>((void *)&Value, newValue);
+ }
private:
value_type Value;
};
@@ -128,6 +137,9 @@ public:
operator value_type() const {
return endian::read_be<value_type, aligned>(&Value);
}
+ void operator=(value_type newValue) {
+ endian::write_be<value_type, aligned>((void *)&Value, newValue);
+ }
private:
value_type Value;
};
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
index a868e5f..e6f9926 100644
--- a/contrib/llvm/include/llvm/Support/FileSystem.h
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -27,14 +27,21 @@
#ifndef LLVM_SUPPORT_FILE_SYSTEM_H
#define LLVM_SUPPORT_FILE_SYSTEM_H
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/PathV1.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/system_error.h"
#include <ctime>
#include <iterator>
+#include <stack>
#include <string>
+#include <vector>
+
+#if HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
namespace llvm {
namespace sys {
@@ -91,7 +98,20 @@ struct space_info {
/// a platform specific member to store the result.
class file_status
{
- // implementation defined status field.
+ #if defined(LLVM_ON_UNIX)
+ dev_t st_dev;
+ ino_t st_ino;
+ #elif defined (LLVM_ON_WIN32)
+ uint32_t LastWriteTimeHigh;
+ uint32_t LastWriteTimeLow;
+ uint32_t VolumeSerialNumber;
+ uint32_t FileSizeHigh;
+ uint32_t FileSizeLow;
+ uint32_t FileIndexHigh;
+ uint32_t FileIndexLow;
+ #endif
+ friend bool equivalent(file_status A, file_status B);
+ friend error_code status(const Twine &path, file_status &result);
file_type Type;
public:
explicit file_status(file_type v=file_type::status_error)
@@ -101,6 +121,44 @@ public:
void type(file_type v) { Type = v; }
};
+/// file_magic - An "enum class" enumeration of file types based on magic (the first
+/// N bytes of the file).
+struct file_magic {
+ enum _ {
+ unknown = 0, ///< Unrecognized file
+ bitcode, ///< Bitcode file
+ archive, ///< ar style archive file
+ elf_relocatable, ///< ELF Relocatable object file
+ elf_executable, ///< ELF Executable image
+ elf_shared_object, ///< ELF dynamically linked shared lib
+ elf_core, ///< ELF core image
+ macho_object, ///< Mach-O Object file
+ macho_executable, ///< Mach-O Executable
+ macho_fixed_virtual_memory_shared_lib, ///< Mach-O Shared Lib, FVM
+ macho_core, ///< Mach-O Core File
+ macho_preload_executabl, ///< Mach-O Preloaded Executable
+ macho_dynamically_linked_shared_lib, ///< Mach-O dynlinked shared lib
+ macho_dynamic_linker, ///< The Mach-O dynamic linker
+ macho_bundle, ///< Mach-O Bundle file
+ macho_dynamically_linked_shared_lib_stub, ///< Mach-O Shared lib stub
+ macho_dsym_companion, ///< Mach-O dSYM companion file
+ coff_object, ///< COFF object file
+ pecoff_executable ///< PECOFF executable file
+ };
+
+ bool is_object() const {
+ return v_ == unknown ? false : true;
+ }
+
+ file_magic() : v_(unknown) {}
+ file_magic(_ v) : v_(v) {}
+ explicit file_magic(int v) : v_(_(v)) {}
+ operator int() const {return v_;}
+
+private:
+ int v_;
+};
+
/// @}
/// @name Physical Operators
/// @{
@@ -241,6 +299,8 @@ bool equivalent(file_status A, file_status B);
/// @brief Do paths represent the same thing?
///
+/// assert(status_known(A) || status_known(B));
+///
/// @param A Input path A.
/// @param B Input path B.
/// @param result Set to true if stat(A) and stat(B) have the same device and
@@ -397,13 +457,16 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result);
error_code get_magic(const Twine &path, uint32_t len,
SmallVectorImpl<char> &result);
+/// @brief Identify the type of a binary file based on how magical it is.
+file_magic identify_magic(StringRef magic);
+
/// @brief Get and identify \a path's type based on its content.
///
/// @param path Input path.
/// @param result Set to the type of file, or LLVMFileType::Unknown_FileType.
/// @results errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
-error_code identify_magic(const Twine &path, LLVMFileType &result);
+error_code identify_magic(const Twine &path, file_magic &result);
/// @brief Get library paths the system linker uses.
///
@@ -479,76 +542,171 @@ public:
bool operator>=(const directory_entry& rhs) const;
};
+namespace detail {
+ struct DirIterState;
+
+ error_code directory_iterator_construct(DirIterState&, StringRef);
+ error_code directory_iterator_increment(DirIterState&);
+ error_code directory_iterator_destruct(DirIterState&);
+
+ /// DirIterState - Keeps state for the directory_iterator. It is reference
+ /// counted in order to preserve InputIterator semantics on copy.
+ struct DirIterState : public RefCountedBase<DirIterState> {
+ DirIterState()
+ : IterationHandle(0) {}
+
+ ~DirIterState() {
+ directory_iterator_destruct(*this);
+ }
+
+ intptr_t IterationHandle;
+ directory_entry CurrentEntry;
+ };
+}
+
/// directory_iterator - Iterates through the entries in path. There is no
/// operator++ because we need an error_code. If it's really needed we can make
/// it call report_fatal_error on error.
class directory_iterator {
- intptr_t IterationHandle;
- directory_entry CurrentEntry;
-
- // Platform implementations implement these functions to handle iteration.
- friend error_code directory_iterator_construct(directory_iterator &it,
- StringRef path);
- friend error_code directory_iterator_increment(directory_iterator &it);
- friend error_code directory_iterator_destruct(directory_iterator &it);
+ IntrusiveRefCntPtr<detail::DirIterState> State;
public:
- explicit directory_iterator(const Twine &path, error_code &ec)
- : IterationHandle(0) {
+ explicit directory_iterator(const Twine &path, error_code &ec) {
+ State = new detail::DirIterState;
SmallString<128> path_storage;
- ec = directory_iterator_construct(*this, path.toStringRef(path_storage));
+ ec = detail::directory_iterator_construct(*State,
+ path.toStringRef(path_storage));
}
- /// Construct end iterator.
- directory_iterator() : IterationHandle(0) {}
-
- ~directory_iterator() {
- directory_iterator_destruct(*this);
+ explicit directory_iterator(const directory_entry &de, error_code &ec) {
+ State = new detail::DirIterState;
+ ec = detail::directory_iterator_construct(*State, de.path());
}
+ /// Construct end iterator.
+ directory_iterator() : State(new detail::DirIterState) {}
+
// No operator++ because we need error_code.
directory_iterator &increment(error_code &ec) {
- ec = directory_iterator_increment(*this);
+ ec = directory_iterator_increment(*State);
return *this;
}
- const directory_entry &operator*() const { return CurrentEntry; }
- const directory_entry *operator->() const { return &CurrentEntry; }
+ const directory_entry &operator*() const { return State->CurrentEntry; }
+ const directory_entry *operator->() const { return &State->CurrentEntry; }
+
+ bool operator==(const directory_iterator &RHS) const {
+ return State->CurrentEntry == RHS.State->CurrentEntry;
+ }
bool operator!=(const directory_iterator &RHS) const {
- return CurrentEntry != RHS.CurrentEntry;
+ return !(*this == RHS);
}
// Other members as required by
// C++ Std, 24.1.1 Input iterators [input.iterators]
};
+namespace detail {
+ /// RecDirIterState - Keeps state for the recursive_directory_iterator. It is
+ /// reference counted in order to preserve InputIterator semantics on copy.
+ struct RecDirIterState : public RefCountedBase<RecDirIterState> {
+ RecDirIterState()
+ : Level(0)
+ , HasNoPushRequest(false) {}
+
+ std::stack<directory_iterator, std::vector<directory_iterator> > Stack;
+ uint16_t Level;
+ bool HasNoPushRequest;
+ };
+}
+
/// recursive_directory_iterator - Same as directory_iterator except for it
/// recurses down into child directories.
class recursive_directory_iterator {
- uint16_t Level;
- bool HasNoPushRequest;
- // implementation directory iterator status
+ IntrusiveRefCntPtr<detail::RecDirIterState> State;
public:
- explicit recursive_directory_iterator(const Twine &path, error_code &ec);
+ recursive_directory_iterator() {}
+ explicit recursive_directory_iterator(const Twine &path, error_code &ec)
+ : State(new detail::RecDirIterState) {
+ State->Stack.push(directory_iterator(path, ec));
+ if (State->Stack.top() == directory_iterator())
+ State.reset();
+ }
// No operator++ because we need error_code.
- directory_iterator &increment(error_code &ec);
+ recursive_directory_iterator &increment(error_code &ec) {
+ static const directory_iterator end_itr;
+
+ if (State->HasNoPushRequest)
+ State->HasNoPushRequest = false;
+ else {
+ file_status st;
+ if ((ec = State->Stack.top()->status(st))) return *this;
+ if (is_directory(st)) {
+ State->Stack.push(directory_iterator(*State->Stack.top(), ec));
+ if (ec) return *this;
+ if (State->Stack.top() != end_itr) {
+ ++State->Level;
+ return *this;
+ }
+ State->Stack.pop();
+ }
+ }
+
+ while (!State->Stack.empty()
+ && State->Stack.top().increment(ec) == end_itr) {
+ State->Stack.pop();
+ --State->Level;
+ }
+
+ // Check if we are done. If so, create an end iterator.
+ if (State->Stack.empty())
+ State.reset();
- const directory_entry &operator*() const;
- const directory_entry *operator->() const;
+ return *this;
+ }
+
+ const directory_entry &operator*() const { return *State->Stack.top(); }
+ const directory_entry *operator->() const { return &*State->Stack.top(); }
// observers
- /// Gets the current level. path is at level 0.
- int level() const;
+ /// Gets the current level. Starting path is at level 0.
+ int level() const { return State->Level; }
+
/// Returns true if no_push has been called for this directory_entry.
- bool no_push_request() const;
+ bool no_push_request() const { return State->HasNoPushRequest; }
// modifiers
/// Goes up one level if Level > 0.
- void pop();
+ void pop() {
+ assert(State && "Cannot pop and end itertor!");
+ assert(State->Level > 0 && "Cannot pop an iterator with level < 1");
+
+ static const directory_iterator end_itr;
+ error_code ec;
+ do {
+ if (ec)
+ report_fatal_error("Error incrementing directory iterator.");
+ State->Stack.pop();
+ --State->Level;
+ } while (!State->Stack.empty()
+ && State->Stack.top().increment(ec) == end_itr);
+
+ // Check if we are done. If so, create an end iterator.
+ if (State->Stack.empty())
+ State.reset();
+ }
+
/// Does not go down into the current directory_entry.
- void no_push();
+ void no_push() { State->HasNoPushRequest = true; }
+ bool operator==(const recursive_directory_iterator &RHS) const {
+ return State == RHS.State;
+ }
+
+ bool operator!=(const recursive_directory_iterator &RHS) const {
+ return !(*this == RHS);
+ }
// Other members as required by
// C++ Std, 24.1.1 Input iterators [input.iterators]
};
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
index eab0c9d..ae32da5 100644
--- a/contrib/llvm/include/llvm/Support/GraphWriter.h
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -296,26 +296,26 @@ public:
template<typename GraphType>
raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
bool ShortNames = false,
- const std::string &Title = "") {
+ const Twine &Title = "") {
// Start the graph emission process...
GraphWriter<GraphType> W(O, G, ShortNames);
// Emit the graph.
- W.writeGraph(Title);
+ W.writeGraph(Title.str());
return O;
}
template<typename GraphType>
-sys::Path WriteGraph(const GraphType &G, const std::string &Name,
- bool ShortNames = false, const std::string &Title = "") {
+sys::Path WriteGraph(const GraphType &G, const Twine &Name,
+ bool ShortNames = false, const Twine &Title = "") {
std::string ErrMsg;
sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg);
if (Filename.isEmpty()) {
errs() << "Error: " << ErrMsg << "\n";
return Filename;
}
- Filename.appendComponent(Name + ".dot");
+ Filename.appendComponent((Name + ".dot").str());
if (Filename.makeUnique(true,&ErrMsg)) {
errs() << "Error: " << ErrMsg << "\n";
return sys::Path();
@@ -341,8 +341,8 @@ sys::Path WriteGraph(const GraphType &G, const std::string &Name,
/// then cleanup. For use from the debugger.
///
template<typename GraphType>
-void ViewGraph(const GraphType &G, const std::string &Name,
- bool ShortNames = false, const std::string &Title = "",
+void ViewGraph(const GraphType &G, const Twine &Name,
+ bool ShortNames = false, const Twine &Title = "",
GraphProgram::Name Program = GraphProgram::DOT) {
sys::Path Filename = llvm::WriteGraph(G, Name, ShortNames, Title);
diff --git a/contrib/llvm/include/llvm/Support/Host.h b/contrib/llvm/include/llvm/Support/Host.h
index f77d4c1..b331016 100644
--- a/contrib/llvm/include/llvm/Support/Host.h
+++ b/contrib/llvm/include/llvm/Support/Host.h
@@ -33,14 +33,14 @@ namespace sys {
return !isLittleEndianHost();
}
- /// getHostTriple() - Return the target triple of the running
- /// system.
+ /// getDefaultTargetTriple() - Return the default target triple the compiler
+ /// has been configured to produce code for.
///
/// The target triple is a string in the format of:
/// CPU_TYPE-VENDOR-OPERATING_SYSTEM
/// or
/// CPU_TYPE-VENDOR-KERNEL-OPERATING_SYSTEM
- std::string getHostTriple();
+ std::string getDefaultTargetTriple();
/// getHostCPUName - Get the LLVM name for the host CPU. The particular format
/// of the name is target dependent, and suitable for passing as -mcpu to the
diff --git a/contrib/llvm/include/llvm/Support/IRReader.h b/contrib/llvm/include/llvm/Support/IRReader.h
index 292c001..6d8a9b3 100644
--- a/contrib/llvm/include/llvm/Support/IRReader.h
+++ b/contrib/llvm/include/llvm/Support/IRReader.h
@@ -40,7 +40,8 @@ namespace llvm {
std::string ErrMsg;
Module *M = getLazyBitcodeModule(Buffer, Context, &ErrMsg);
if (M == 0) {
- Err = SMDiagnostic(Buffer->getBufferIdentifier(), ErrMsg);
+ Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error,
+ ErrMsg);
// ParseBitcodeFile does not take ownership of the Buffer in the
// case of an error.
delete Buffer;
@@ -60,7 +61,7 @@ namespace llvm {
LLVMContext &Context) {
OwningPtr<MemoryBuffer> File;
if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
- Err = SMDiagnostic(Filename,
+ Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + ec.message());
return 0;
}
@@ -80,7 +81,8 @@ namespace llvm {
std::string ErrMsg;
Module *M = ParseBitcodeFile(Buffer, Context, &ErrMsg);
if (M == 0)
- Err = SMDiagnostic(Buffer->getBufferIdentifier(), ErrMsg);
+ Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error,
+ ErrMsg);
// ParseBitcodeFile does not take ownership of the Buffer.
delete Buffer;
return M;
@@ -97,7 +99,7 @@ namespace llvm {
LLVMContext &Context) {
OwningPtr<MemoryBuffer> File;
if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
- Err = SMDiagnostic(Filename,
+ Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + ec.message());
return 0;
}
diff --git a/contrib/llvm/include/llvm/Support/InstVisitor.h b/contrib/llvm/include/llvm/Support/InstVisitor.h
index a661c4f..52de8f6 100644
--- a/contrib/llvm/include/llvm/Support/InstVisitor.h
+++ b/contrib/llvm/include/llvm/Support/InstVisitor.h
@@ -14,6 +14,7 @@
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -157,54 +158,74 @@ public:
// Specific Instruction type classes... note that all of the casts are
// necessary because we use the instruction classes as opaque types...
//
- RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitUnwindInst(UnwindInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
- RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
- RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
- RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(Instruction); }
- RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); }
- RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction); }
- RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I){ DELEGATE(Instruction); }
- RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction); }
- RetTy visitFenceInst(FenceInst &I) { DELEGATE(Instruction); }
- RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction); }
- RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction); }
- RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst); }
- RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst); }
- RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst); }
- RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst); }
- RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst); }
- RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst); }
- RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst); }
- RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst); }
- RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst); }
- RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst); }
- RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst); }
- RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst); }
- RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction); }
- RetTy visitCallInst(CallInst &I) { DELEGATE(Instruction); }
- RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(Instruction); }
+ RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitLoadInst(LoadInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction);}
+ RetTy visitFenceInst(FenceInst &I) { DELEGATE(Instruction);}
+ RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
+ RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction);}
+ RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst);}
+ RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst);}
+ RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst);}
+ RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst);}
+ RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction);}
+ RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(UnaryInstruction);}
RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
- RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction); }
- RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction); }
- RetTy visitExtractValueInst(ExtractValueInst &I) { DELEGATE(Instruction);}
- RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
- RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
+ RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
+ RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
+ RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
+ RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
+ RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
+
+ // Call and Invoke are slightly different as they delegate first through
+ // a generic CallSite visitor.
+ RetTy visitCallInst(CallInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
+ RetTy visitInvokeInst(InvokeInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
// Next level propagators: If the user does not overload a specific
// instruction type, they can overload one of these to get the whole class
// of instructions...
//
- RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction); }
- RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction); }
- RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction); }
- RetTy visitCastInst(CastInst &I) { DELEGATE(Instruction); }
+ RetTy visitCastInst(CastInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction);}
+ RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction);}
+ RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction);}
+ RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
+
+ // Provide a special visitor for a 'callsite' that visits both calls and
+ // invokes. When unimplemented, properly delegates to either the terminator or
+ // regular instruction visitor.
+ RetTy visitCallSite(CallSite CS) {
+ assert(CS);
+ Instruction &I = *CS.getInstruction();
+ if (CS.isCall())
+ DELEGATE(Instruction);
+
+ assert(CS.isInvoke());
+ DELEGATE(TerminatorInst);
+ }
// If the user wants a 'default' case, they can choose to override this
// function. If this function is not overloaded in the user's subclass, then
diff --git a/contrib/llvm/include/llvm/Support/JSONParser.h b/contrib/llvm/include/llvm/Support/JSONParser.h
new file mode 100644
index 0000000..11149f1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/JSONParser.h
@@ -0,0 +1,448 @@
+//===--- JSONParser.h - Simple JSON parser ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a JSON parser.
+//
+// See http://www.json.org/ for an overview.
+// See http://www.ietf.org/rfc/rfc4627.txt for the full standard.
+//
+// FIXME: Currently this supports a subset of JSON. Specifically, support
+// for numbers, booleans and null for values is missing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_JSON_PARSER_H
+#define LLVM_SUPPORT_JSON_PARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+
+namespace llvm {
+
+class JSONContainer;
+class JSONString;
+class JSONValue;
+class JSONKeyValuePair;
+
+/// \brief Base class for a parsable JSON atom.
+///
+/// This class has no semantics other than being a unit of JSON data which can
+/// be parsed out of a JSON document.
+class JSONAtom {
+public:
+ /// \brief Possible types of JSON objects.
+ enum Kind { JK_KeyValuePair, JK_Array, JK_Object, JK_String };
+
+ /// \brief Returns the type of this value.
+ Kind getKind() const { return MyKind; }
+
+ static bool classof(const JSONAtom *Atom) { return true; }
+
+protected:
+ JSONAtom(Kind MyKind) : MyKind(MyKind) {}
+
+private:
+ Kind MyKind;
+};
+
+/// \brief A parser for JSON text.
+///
+/// Use an object of JSONParser to iterate over the values of a JSON text.
+/// All objects are parsed during the iteration, so you can only iterate once
+/// over the JSON text, but the cost of partial iteration is minimized.
+/// Create a new JSONParser if you want to iterate multiple times.
+class JSONParser {
+public:
+ /// \brief Create a JSONParser for the given input.
+ ///
+ /// Parsing is started via parseRoot(). Access to the object returned from
+ /// parseRoot() will parse the input lazily.
+ JSONParser(StringRef Input, SourceMgr *SM);
+
+ /// \brief Returns the outermost JSON value (either an array or an object).
+ ///
+ /// Can return NULL if the input does not start with an array or an object.
+ /// The object is not parsed yet - the caller must iterate over the
+ /// returned object to trigger parsing.
+ ///
+ /// A JSONValue can be either a JSONString, JSONObject or JSONArray.
+ JSONValue *parseRoot();
+
+ /// \brief Parses the JSON text and returns whether it is valid JSON.
+ ///
+ /// In case validate() return false, failed() will return true and
+ /// getErrorMessage() will return the parsing error.
+ bool validate();
+
+ /// \brief Returns true if an error occurs during parsing.
+ ///
+ /// If there was an error while parsing an object that was created by
+ /// iterating over the result of 'parseRoot', 'failed' will return true.
+ bool failed() const;
+
+private:
+ /// \brief These methods manage the implementation details of parsing new JSON
+ /// atoms.
+ /// @{
+ JSONString *parseString();
+ JSONValue *parseValue();
+ JSONKeyValuePair *parseKeyValuePair();
+ /// @}
+
+ /// \brief Helpers to parse the elements out of both forms of containers.
+ /// @{
+ const JSONAtom *parseElement(JSONAtom::Kind ContainerKind);
+ StringRef::iterator parseFirstElement(JSONAtom::Kind ContainerKind,
+ char StartChar, char EndChar,
+ const JSONAtom *&Element);
+ StringRef::iterator parseNextElement(JSONAtom::Kind ContainerKind,
+ char EndChar,
+ const JSONAtom *&Element);
+ /// @}
+
+ /// \brief Whitespace parsing.
+ /// @{
+ void nextNonWhitespace();
+ bool isWhitespace();
+ /// @}
+
+ /// \brief These methods are used for error handling.
+ /// {
+ void setExpectedError(StringRef Expected, StringRef Found);
+ void setExpectedError(StringRef Expected, char Found);
+ bool errorIfAtEndOfFile(StringRef Message);
+ bool errorIfNotAt(char C, StringRef Message);
+ /// }
+
+ /// \brief Skips all elements in the given container.
+ bool skipContainer(const JSONContainer &Container);
+
+ /// \brief Skips to the next position behind the given JSON atom.
+ bool skip(const JSONAtom &Atom);
+
+ /// All nodes are allocated by the parser and will be deallocated when the
+ /// parser is destroyed.
+ BumpPtrAllocator ValueAllocator;
+
+ /// \brief The original input to the parser.
+ MemoryBuffer *InputBuffer;
+
+ /// \brief The source manager used for diagnostics and buffer management.
+ SourceMgr *SM;
+
+ /// \brief The current position in the parse stream.
+ StringRef::iterator Position;
+
+ /// \brief The end position for fast EOF checks without introducing
+ /// unnecessary dereferences.
+ StringRef::iterator End;
+
+ /// \brief If true, an error has occurred.
+ bool Failed;
+
+ friend class JSONContainer;
+};
+
+
+/// \brief Base class for JSON value objects.
+///
+/// This object represents an abstract JSON value. It is the root node behind
+/// the group of JSON entities that can represent top-level values in a JSON
+/// document. It has no API, and is just a placeholder in the type hierarchy of
+/// nodes.
+class JSONValue : public JSONAtom {
+protected:
+ JSONValue(Kind MyKind) : JSONAtom(MyKind) {}
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ switch (Atom->getKind()) {
+ case JK_Array:
+ case JK_Object:
+ case JK_String:
+ return true;
+ case JK_KeyValuePair:
+ return false;
+ }
+ llvm_unreachable("Invalid JSONAtom kind");
+ }
+ static bool classof(const JSONValue *Value) { return true; }
+ ///@}
+};
+
+/// \brief Gives access to the text of a JSON string.
+///
+/// FIXME: Implement a method to return the unescaped text.
+class JSONString : public JSONValue {
+public:
+ /// \brief Returns the underlying parsed text of the string.
+ ///
+ /// This is the unescaped content of the JSON text.
+ /// See http://www.ietf.org/rfc/rfc4627.txt for details.
+ StringRef getRawText() const { return RawText; }
+
+private:
+ JSONString(StringRef RawText) : JSONValue(JK_String), RawText(RawText) {}
+
+ StringRef RawText;
+
+ friend class JSONParser;
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ return Atom->getKind() == JK_String;
+ }
+ static bool classof(const JSONString *String) { return true; }
+ ///@}
+};
+
+/// \brief A (key, value) tuple of type (JSONString *, JSONValue *).
+///
+/// Note that JSONKeyValuePair is not a JSONValue, it is a bare JSONAtom.
+/// JSONKeyValuePairs can be elements of a JSONObject, but not of a JSONArray.
+/// They are not viable as top-level values either.
+class JSONKeyValuePair : public JSONAtom {
+public:
+ const JSONString * const Key;
+ const JSONValue * const Value;
+
+private:
+ JSONKeyValuePair(const JSONString *Key, const JSONValue *Value)
+ : JSONAtom(JK_KeyValuePair), Key(Key), Value(Value) {}
+
+ friend class JSONParser;
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ return Atom->getKind() == JK_KeyValuePair;
+ }
+ static bool classof(const JSONKeyValuePair *KeyValuePair) { return true; }
+ ///@}
+};
+
+/// \brief Implementation of JSON containers (arrays and objects).
+///
+/// JSONContainers drive the lazy parsing of JSON arrays and objects via
+/// forward iterators.
+class JSONContainer : public JSONValue {
+private:
+ /// \brief An iterator that parses the underlying container during iteration.
+ ///
+ /// Iterators on the same collection use shared state, so when multiple copies
+ /// of an iterator exist, only one is allowed to be used for iteration;
+ /// iterating multiple copies of an iterator of the same collection will lead
+ /// to undefined behavior.
+ class AtomIterator {
+ public:
+ AtomIterator(const AtomIterator &I) : Container(I.Container) {}
+
+ /// \brief Iterator interface.
+ ///@{
+ bool operator==(const AtomIterator &I) const {
+ if (isEnd() || I.isEnd())
+ return isEnd() == I.isEnd();
+ return Container->Position == I.Container->Position;
+ }
+ bool operator!=(const AtomIterator &I) const {
+ return !(*this == I);
+ }
+ AtomIterator &operator++() {
+ Container->parseNextElement();
+ return *this;
+ }
+ const JSONAtom *operator*() {
+ return Container->Current;
+ }
+ ///@}
+
+ private:
+ /// \brief Create an iterator for which 'isEnd' returns true.
+ AtomIterator() : Container(0) {}
+
+ /// \brief Create an iterator for the given container.
+ AtomIterator(const JSONContainer *Container) : Container(Container) {}
+
+ bool isEnd() const {
+ return Container == 0 || Container->Position == StringRef::iterator();
+ }
+
+ const JSONContainer * const Container;
+
+ friend class JSONContainer;
+ };
+
+protected:
+ /// \brief An iterator for the specified AtomT.
+ ///
+ /// Used for the implementation of iterators for JSONArray and JSONObject.
+ template <typename AtomT>
+ class IteratorTemplate : public std::iterator<std::forward_iterator_tag,
+ const AtomT*> {
+ public:
+ explicit IteratorTemplate(const AtomIterator& AtomI)
+ : AtomI(AtomI) {}
+
+ bool operator==(const IteratorTemplate &I) const {
+ return AtomI == I.AtomI;
+ }
+ bool operator!=(const IteratorTemplate &I) const { return !(*this == I); }
+
+ IteratorTemplate &operator++() {
+ ++AtomI;
+ return *this;
+ }
+
+ const AtomT *operator*() { return dyn_cast<AtomT>(*AtomI); }
+
+ private:
+ AtomIterator AtomI;
+ };
+
+ JSONContainer(JSONParser *Parser, char StartChar, char EndChar,
+ JSONAtom::Kind ContainerKind)
+ : JSONValue(ContainerKind), Parser(Parser),
+ Position(), Current(0), Started(false),
+ StartChar(StartChar), EndChar(EndChar) {}
+
+ /// \brief Returns a lazy parsing iterator over the container.
+ ///
+ /// As the iterator drives the parse stream, begin() must only be called
+ /// once per container.
+ AtomIterator atom_begin() const {
+ if (Started)
+ report_fatal_error("Cannot parse container twice.");
+ Started = true;
+ // Set up the position and current element when we begin iterating over the
+ // container.
+ Position = Parser->parseFirstElement(getKind(), StartChar, EndChar, Current);
+ return AtomIterator(this);
+ }
+ AtomIterator atom_end() const {
+ return AtomIterator();
+ }
+
+private:
+ AtomIterator atom_current() const {
+ if (!Started)
+ return atom_begin();
+
+ return AtomIterator(this);
+ }
+
+ /// \brief Parse the next element in the container into the Current element.
+ ///
+ /// This routine is called as an iterator into this container walks through
+ /// its elements. It mutates the container's internal current node to point to
+ /// the next atom of the container.
+ void parseNextElement() const {
+ Parser->skip(*Current);
+ Position = Parser->parseNextElement(getKind(), EndChar, Current);
+ }
+
+ // For parsing, JSONContainers call back into the JSONParser.
+ JSONParser * const Parser;
+
+ // 'Position', 'Current' and 'Started' store the state of the parse stream
+ // for iterators on the container, they don't change the container's elements
+ // and are thus marked as mutable.
+ mutable StringRef::iterator Position;
+ mutable const JSONAtom *Current;
+ mutable bool Started;
+
+ const char StartChar;
+ const char EndChar;
+
+ friend class JSONParser;
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ switch (Atom->getKind()) {
+ case JK_Array:
+ case JK_Object:
+ return true;
+ case JK_KeyValuePair:
+ case JK_String:
+ return false;
+ }
+ llvm_unreachable("Invalid JSONAtom kind");
+ }
+ static bool classof(const JSONContainer *Container) { return true; }
+ ///@}
+};
+
+/// \brief A simple JSON array.
+class JSONArray : public JSONContainer {
+public:
+ typedef IteratorTemplate<JSONValue> const_iterator;
+
+ /// \brief Returns a lazy parsing iterator over the container.
+ ///
+ /// As the iterator drives the parse stream, begin() must only be called
+ /// once per container.
+ const_iterator begin() const { return const_iterator(atom_begin()); }
+ const_iterator end() const { return const_iterator(atom_end()); }
+
+private:
+ JSONArray(JSONParser *Parser)
+ : JSONContainer(Parser, '[', ']', JSONAtom::JK_Array) {}
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ return Atom->getKind() == JSONAtom::JK_Array;
+ }
+ static bool classof(const JSONArray *Array) { return true; }
+ ///@}
+
+ friend class JSONParser;
+};
+
+/// \brief A JSON object: an iterable list of JSON key-value pairs.
+class JSONObject : public JSONContainer {
+public:
+ typedef IteratorTemplate<JSONKeyValuePair> const_iterator;
+
+ /// \brief Returns a lazy parsing iterator over the container.
+ ///
+ /// As the iterator drives the parse stream, begin() must only be called
+ /// once per container.
+ const_iterator begin() const { return const_iterator(atom_begin()); }
+ const_iterator end() const { return const_iterator(atom_end()); }
+
+private:
+ JSONObject(JSONParser *Parser)
+ : JSONContainer(Parser, '{', '}', JSONAtom::JK_Object) {}
+
+public:
+ /// \brief dyn_cast helpers
+ ///@{
+ static bool classof(const JSONAtom *Atom) {
+ return Atom->getKind() == JSONAtom::JK_Object;
+ }
+ static bool classof(const JSONObject *Object) { return true; }
+ ///@}
+
+ friend class JSONParser;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_JSON_PARSER_H
diff --git a/contrib/llvm/include/llvm/Support/LockFileManager.h b/contrib/llvm/include/llvm/Support/LockFileManager.h
new file mode 100644
index 0000000..e2fa8eb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/LockFileManager.h
@@ -0,0 +1,74 @@
+//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H
+#define LLVM_SUPPORT_LOCKFILEMANAGER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/system_error.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+/// \brief Class that manages the creation of a lock file to aid
+/// implicit coordination between different processes.
+///
+/// The implicit coordination works by creating a ".lock" file alongside
+/// the file that we're coordinating for, using the atomicity of the file
+/// system to ensure that only a single process can create that ".lock" file.
+/// When the lock file is removed, the owning process has finished the
+/// operation.
+class LockFileManager {
+public:
+ /// \brief Describes the state of a lock file.
+ enum LockFileState {
+ /// \brief The lock file has been created and is owned by this instance
+ /// of the object.
+ LFS_Owned,
+ /// \brief The lock file already exists and is owned by some other
+ /// instance.
+ LFS_Shared,
+ /// \brief An error occurred while trying to create or find the lock
+ /// file.
+ LFS_Error
+ };
+
+private:
+ SmallString<128> LockFileName;
+ SmallString<128> UniqueLockFileName;
+
+ Optional<std::pair<std::string, int> > Owner;
+ Optional<error_code> Error;
+
+ LockFileManager(const LockFileManager &);
+ LockFileManager &operator=(const LockFileManager &);
+
+ static Optional<std::pair<std::string, int> >
+ readLockFile(StringRef LockFileName);
+
+ static bool processStillExecuting(StringRef Hostname, int PID);
+
+public:
+
+ LockFileManager(StringRef FileName);
+ ~LockFileManager();
+
+ /// \brief Determine the state of the lock file.
+ LockFileState getState() const;
+
+ operator LockFileState() const { return getState(); }
+
+ /// \brief For a shared lock, wait until the owner releases the lock.
+ void waitForUnlock();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_LOCKFILEMANAGER_H
diff --git a/contrib/llvm/include/llvm/Support/MachO.h b/contrib/llvm/include/llvm/Support/MachO.h
index 5b68586..44a7a79 100644
--- a/contrib/llvm/include/llvm/Support/MachO.h
+++ b/contrib/llvm/include/llvm/Support/MachO.h
@@ -114,6 +114,10 @@ namespace llvm {
LoadCommandVersionMinIPhoneOS = 0x00000025u, // LC_VERSION_MIN_IPHONEOS
LoadCommandFunctionStarts = 0x00000026u, // LC_FUNCTION_STARTS
LoadCommandDyldEnvironment = 0x00000027u, // LC_DYLD_ENVIRONMENT
+ LoadCommandMain = 0x80000028u, // LC_MAIN
+ LoadCommandDataInCode = 0x00000029u, // LC_DATA_IN_CODE
+ LoadCommandSourceVersion = 0x0000002Au, // LC_SOURCE_VERSION
+ LoadCommandCodeSignDRs = 0x0000002Bu, // LC_DYLIB_CODE_SIGN_DRS
// Constant bits for the "flags" field in llvm::MachO::segment_command
SegmentCommandFlagBitHighVM = 0x1u, // SG_HIGHVM
@@ -240,6 +244,9 @@ namespace llvm {
NListSectionNoSection = 0u, // NO_SECT
NListSectionMaxSection = 0xffu, // MAX_SECT
+ NListDescWeakRef = 0x40u,
+ NListDescWeakDef = 0x80u,
+
// Constant values for the "n_type" field in llvm::MachO::nlist and
// llvm::MachO::nlist_64 when "(n_type & NlistMaskStab) != 0"
StabGlobalSymbol = 0x20u, // N_GSYM
diff --git a/contrib/llvm/include/llvm/Support/ManagedStatic.h b/contrib/llvm/include/llvm/Support/ManagedStatic.h
index 53e73ad..4171d1b 100644
--- a/contrib/llvm/include/llvm/Support/ManagedStatic.h
+++ b/contrib/llvm/include/llvm/Support/ManagedStatic.h
@@ -16,6 +16,7 @@
#include "llvm/Support/Atomic.h"
#include "llvm/Support/Threading.h"
+#include "llvm/Support/Valgrind.h"
namespace llvm {
@@ -65,6 +66,7 @@ public:
void* tmp = Ptr;
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
return *static_cast<C*>(Ptr);
}
@@ -72,6 +74,7 @@ public:
void* tmp = Ptr;
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
return static_cast<C*>(Ptr);
}
@@ -79,6 +82,7 @@ public:
void* tmp = Ptr;
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
return *static_cast<C*>(Ptr);
}
@@ -86,6 +90,7 @@ public:
void* tmp = Ptr;
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
return static_cast<C*>(Ptr);
}
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
index 4627557..d085c94 100644
--- a/contrib/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -51,6 +51,13 @@ inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
+/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedInt(int64_t x) {
+ return isInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
inline bool isUInt(uint64_t x) {
@@ -70,6 +77,13 @@ inline bool isUInt<32>(uint64_t x) {
return static_cast<uint32_t>(x) == x;
}
+/// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedUInt(uint64_t x) {
+ return isUInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
/// bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
diff --git a/contrib/llvm/include/llvm/Support/MemoryObject.h b/contrib/llvm/include/llvm/Support/MemoryObject.h
index dec0f13..b778b08 100644
--- a/contrib/llvm/include/llvm/Support/MemoryObject.h
+++ b/contrib/llvm/include/llvm/Support/MemoryObject.h
@@ -23,19 +23,19 @@ class MemoryObject {
public:
/// Destructor - Override as necessary.
virtual ~MemoryObject();
-
+
/// getBase - Returns the lowest valid address in the region.
///
/// @result - The lowest valid address.
virtual uint64_t getBase() const = 0;
-
+
/// getExtent - Returns the size of the region in bytes. (The region is
- /// contiguous, so the highest valid address of the region
+ /// contiguous, so the highest valid address of the region
/// is getBase() + getExtent() - 1).
///
/// @result - The size of the region.
virtual uint64_t getExtent() const = 0;
-
+
/// readByte - Tries to read a single byte from the region.
///
/// @param address - The address of the byte, in the same space as getBase().
@@ -43,7 +43,7 @@ public:
/// @result - 0 if successful; -1 if not. Failure may be due to a
/// bounds violation or an implementation-specific error.
virtual int readByte(uint64_t address, uint8_t* ptr) const = 0;
-
+
/// readBytes - Tries to read a contiguous range of bytes from the
/// region, up to the end of the region.
/// You should override this function if there is a quicker
@@ -67,4 +67,3 @@ public:
}
#endif
-
diff --git a/contrib/llvm/include/llvm/Support/PathV1.h b/contrib/llvm/include/llvm/Support/PathV1.h
index 45165de..f4bedf9 100644
--- a/contrib/llvm/include/llvm/Support/PathV1.h
+++ b/contrib/llvm/include/llvm/Support/PathV1.h
@@ -131,20 +131,6 @@ namespace sys {
/// @brief Find a library.
static Path FindLibrary(std::string& short_name);
- /// Construct a path to the default LLVM configuration directory. The
- /// implementation must ensure that this is a well-known (same on many
- /// systems) directory in which llvm configuration files exist. For
- /// example, on Unix, the /etc/llvm directory has been selected.
- /// @brief Construct a path to the default LLVM configuration directory
- static Path GetLLVMDefaultConfigDir();
-
- /// Construct a path to the LLVM installed configuration directory. The
- /// implementation must ensure that this refers to the "etc" directory of
- /// the LLVM installation. This is the location where configuration files
- /// will be located for a particular installation of LLVM on a machine.
- /// @brief Construct a path to the LLVM installed configuration directory
- static Path GetLLVMConfigDir();
-
/// Construct a path to the current user's home directory. The
/// implementation must use an operating system specific mechanism for
/// determining the user's home directory. For example, the environment
diff --git a/contrib/llvm/include/llvm/Support/PatternMatch.h b/contrib/llvm/include/llvm/Support/PatternMatch.h
index f0fb516..221fa8b 100644
--- a/contrib/llvm/include/llvm/Support/PatternMatch.h
+++ b/contrib/llvm/include/llvm/Support/PatternMatch.h
@@ -31,6 +31,7 @@
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/Operator.h"
namespace llvm {
namespace PatternMatch {
@@ -97,12 +98,19 @@ struct apint_match {
Res = &CI->getValue();
return true;
}
+ // FIXME: Remove this.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) {
Res = &CI->getValue();
return true;
}
+ if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
+ if (ConstantInt *CI =
+ dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
return false;
}
};
@@ -143,9 +151,13 @@ struct cst_pred_ty : public Predicate {
bool match(ITy *V) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
return this->isValue(CI->getValue());
+ // FIXME: Remove this.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
return this->isValue(CI->getValue());
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
+ return this->isValue(CI->getValue());
return false;
}
};
@@ -163,12 +175,22 @@ struct api_pred_ty : public Predicate {
Res = &CI->getValue();
return true;
}
+
+ // FIXME: remove.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
if (this->isValue(CI->getValue())) {
Res = &CI->getValue();
return true;
}
+
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+
return false;
}
};
@@ -441,6 +463,26 @@ m_IDiv(const LHS &L, const RHS &R) {
}
//===----------------------------------------------------------------------===//
+// Class that matches exact binary ops.
+//
+template<typename SubPattern_t>
+struct Exact_match {
+ SubPattern_t SubPattern;
+
+ Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template<typename OpTy>
+ bool match(OpTy *V) {
+ if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V))
+ return PEO->isExact() && SubPattern.match(V);
+ return false;
+ }
+};
+
+template<typename T>
+inline Exact_match<T> m_Exact(const T &SubPattern) { return SubPattern; }
+
+//===----------------------------------------------------------------------===//
// Matchers for CmpInst classes
//
@@ -529,10 +571,8 @@ struct CastClass_match {
template<typename OpTy>
bool match(OpTy *V) {
- if (CastInst *I = dyn_cast<CastInst>(V))
- return I->getOpcode() == Opcode && Op.match(I->getOperand(0));
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- return CE->getOpcode() == Opcode && Op.match(CE->getOperand(0));
+ if (Operator *O = dyn_cast<Operator>(V))
+ return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
return false;
}
};
@@ -585,21 +625,18 @@ struct not_match {
template<typename OpTy>
bool match(OpTy *V) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (I->getOpcode() == Instruction::Xor)
- return matchIfNot(I->getOperand(0), I->getOperand(1));
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::Xor)
- return matchIfNot(CE->getOperand(0), CE->getOperand(1));
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Xor)
+ return matchIfNot(O->getOperand(0), O->getOperand(1));
return false;
}
private:
bool matchIfNot(Value *LHS, Value *RHS) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
- return CI->isAllOnesValue() && L.match(LHS);
- if (ConstantVector *CV = dyn_cast<ConstantVector>(RHS))
- return CV->isAllOnesValue() && L.match(LHS);
- return false;
+ return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) ||
+ // FIXME: Remove CV.
+ isa<ConstantVector>(RHS)) &&
+ cast<Constant>(RHS)->isAllOnesValue() &&
+ L.match(LHS);
}
};
@@ -615,19 +652,16 @@ struct neg_match {
template<typename OpTy>
bool match(OpTy *V) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (I->getOpcode() == Instruction::Sub)
- return matchIfNeg(I->getOperand(0), I->getOperand(1));
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::Sub)
- return matchIfNeg(CE->getOperand(0), CE->getOperand(1));
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Sub)
+ return matchIfNeg(O->getOperand(0), O->getOperand(1));
return false;
}
private:
bool matchIfNeg(Value *LHS, Value *RHS) {
- if (ConstantInt *C = dyn_cast<ConstantInt>(LHS))
- return C->isZero() && L.match(RHS);
- return false;
+ return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
+ isa<ConstantAggregateZero>(LHS)) &&
+ L.match(RHS);
}
};
@@ -644,12 +678,9 @@ struct fneg_match {
template<typename OpTy>
bool match(OpTy *V) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (I->getOpcode() == Instruction::FSub)
- return matchIfFNeg(I->getOperand(0), I->getOperand(1));
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- if (CE->getOpcode() == Instruction::FSub)
- return matchIfFNeg(CE->getOperand(0), CE->getOperand(1));
+ if (Operator *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::FSub)
+ return matchIfFNeg(O->getOperand(0), O->getOperand(1));
return false;
}
private:
diff --git a/contrib/llvm/include/llvm/Support/Process.h b/contrib/llvm/include/llvm/Support/Process.h
index 27ef267..3379922 100644
--- a/contrib/llvm/include/llvm/Support/Process.h
+++ b/contrib/llvm/include/llvm/Support/Process.h
@@ -138,9 +138,6 @@ namespace sys {
/// Resets the terminals colors, or returns an escape sequence to do so.
static const char *ResetColor();
-
- /// Change the program working directory to that given by \arg Path.
- static void SetWorkingDirectory(std::string Path);
/// @}
};
}
diff --git a/contrib/llvm/include/llvm/Support/Program.h b/contrib/llvm/include/llvm/Support/Program.h
index a502657..a85f235 100644
--- a/contrib/llvm/include/llvm/Support/Program.h
+++ b/contrib/llvm/include/llvm/Support/Program.h
@@ -17,6 +17,7 @@
#include "llvm/Support/Path.h"
namespace llvm {
+class error_code;
namespace sys {
// TODO: Add operations to communicate with the process, redirect its I/O,
@@ -122,12 +123,12 @@ namespace sys {
/// @brief Construct a Program by finding it by name.
static Path FindProgramByName(const std::string& name);
- // These methods change the specified standard stream (stdin,
- // stdout, or stderr) to binary mode. They return true if an error
- // occurred
- static bool ChangeStdinToBinary();
- static bool ChangeStdoutToBinary();
- static bool ChangeStderrToBinary();
+ // These methods change the specified standard stream (stdin, stdout, or
+ // stderr) to binary mode. They return errc::success if the specified stream
+ // was changed. Otherwise a platform dependent error is returned.
+ static error_code ChangeStdinToBinary();
+ static error_code ChangeStdoutToBinary();
+ static error_code ChangeStderrToBinary();
/// A convenience function equivalent to Program prg; prg.Execute(..);
/// prg.Wait(..);
diff --git a/contrib/llvm/include/llvm/Support/Recycler.h b/contrib/llvm/include/llvm/Support/Recycler.h
index d8f8c78..fa6e189 100644
--- a/contrib/llvm/include/llvm/Support/Recycler.h
+++ b/contrib/llvm/include/llvm/Support/Recycler.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ilist.h"
#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cassert>
namespace llvm {
@@ -52,7 +53,7 @@ struct ilist_traits<RecyclerStruct> :
static void noteHead(RecyclerStruct*, RecyclerStruct*) {}
static void deleteNode(RecyclerStruct *) {
- assert(0 && "Recycler's ilist_traits shouldn't see a deleteNode call!");
+ llvm_unreachable("Recycler's ilist_traits shouldn't see a deleteNode call!");
}
};
diff --git a/contrib/llvm/include/llvm/Support/SMLoc.h b/contrib/llvm/include/llvm/Support/SMLoc.h
index 02db327..d48bfcc 100644
--- a/contrib/llvm/include/llvm/Support/SMLoc.h
+++ b/contrib/llvm/include/llvm/Support/SMLoc.h
@@ -15,9 +15,11 @@
#ifndef SUPPORT_SMLOC_H
#define SUPPORT_SMLOC_H
+#include <cassert>
+
namespace llvm {
-// SMLoc - Represents a location in source code.
+/// SMLoc - Represents a location in source code.
class SMLoc {
const char *Ptr;
public:
@@ -38,7 +40,23 @@ public:
}
};
-}
+/// SMRange - Represents a range in source code. Note that unlike standard STL
+/// ranges, the locations specified are considered to be *inclusive*. For
+/// example, [X,X] *does* include X, it isn't an empty range.
+class SMRange {
+public:
+ SMLoc Start, End;
+
+ SMRange() {}
+ SMRange(SMLoc Start, SMLoc End) : Start(Start), End(End) {
+ assert(Start.isValid() == End.isValid() &&
+ "Start and end should either both be valid or both be invalid!");
+ }
+
+ bool isValid() const { return Start.isValid(); }
+};
+
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Support/SaveAndRestore.h b/contrib/llvm/include/llvm/Support/SaveAndRestore.h
index f720639..ffa99b9 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Support/SaveAndRestore.h
+++ b/contrib/llvm/include/llvm/Support/SaveAndRestore.h
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_SAVERESTORE
-#define LLVM_CLANG_ANALYSIS_SAVERESTORE
+#ifndef LLVM_ADT_SAVERESTORE
+#define LLVM_ADT_SAVERESTORE
-namespace clang {
+namespace llvm {
// SaveAndRestore - A utility class that uses RAII to save and restore
// the value of a variable.
diff --git a/contrib/llvm/include/llvm/Support/SourceMgr.h b/contrib/llvm/include/llvm/Support/SourceMgr.h
index deb8caf..58b8fab 100644
--- a/contrib/llvm/include/llvm/Support/SourceMgr.h
+++ b/contrib/llvm/include/llvm/Support/SourceMgr.h
@@ -17,10 +17,8 @@
#define SUPPORT_SOURCEMGR_H
#include "llvm/Support/SMLoc.h"
-
+#include "llvm/ADT/ArrayRef.h"
#include <string>
-#include <vector>
-#include <cassert>
namespace llvm {
class MemoryBuffer;
@@ -33,10 +31,16 @@ namespace llvm {
/// and handles diagnostic wrangling.
class SourceMgr {
public:
+ enum DiagKind {
+ DK_Error,
+ DK_Warning,
+ DK_Note
+ };
+
/// DiagHandlerTy - Clients that want to handle their own diagnostics in a
/// custom way can register a function pointer+context as a diagnostic
/// handler. It gets called each time PrintMessage is invoked.
- typedef void (*DiagHandlerTy)(const SMDiagnostic&, void *Context);
+ typedef void (*DiagHandlerTy)(const SMDiagnostic &, void *Context);
private:
struct SrcBuffer {
/// Buffer - The memory buffer for the file.
@@ -124,11 +128,8 @@ public:
/// PrintMessage - Emit a message about the specified location with the
/// specified string.
///
- /// @param Type - If non-null, the kind of message (e.g., "error") which is
- /// prefixed to the message.
- /// @param ShowLine - Should the diagnostic show the source line.
- void PrintMessage(SMLoc Loc, const Twine &Msg, const char *Type,
- bool ShowLine = true) const;
+ void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const;
/// GetMessage - Return an SMDiagnostic at the specified location with the
@@ -136,10 +137,8 @@ public:
///
/// @param Type - If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
- /// @param ShowLine - Should the diagnostic show the source line.
- SMDiagnostic GetMessage(SMLoc Loc,
- const Twine &Msg, const char *Type,
- bool ShowLine = true) const;
+ SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const;
/// PrintIncludeStack - Prints the names of included files and the line of the
/// file they were included from. A diagnostic handler can use this before
@@ -158,35 +157,38 @@ class SMDiagnostic {
SMLoc Loc;
std::string Filename;
int LineNo, ColumnNo;
+ SourceMgr::DiagKind Kind;
std::string Message, LineContents;
- unsigned ShowLine : 1;
+ std::vector<std::pair<unsigned, unsigned> > Ranges;
public:
// Null diagnostic.
- SMDiagnostic() : SM(0), LineNo(0), ColumnNo(0), ShowLine(0) {}
+ SMDiagnostic()
+ : SM(0), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {}
// Diagnostic with no location (e.g. file not found, command line arg error).
- SMDiagnostic(const std::string &filename, const std::string &Msg)
- : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1),
- Message(Msg), ShowLine(false) {}
+ SMDiagnostic(const std::string &filename, SourceMgr::DiagKind Kind,
+ const std::string &Msg)
+ : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Kind),
+ Message(Msg) {}
// Diagnostic with a location.
SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN,
- int Line, int Col,
+ int Line, int Col, SourceMgr::DiagKind Kind,
const std::string &Msg, const std::string &LineStr,
- bool showline = true)
- : SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Message(Msg),
- LineContents(LineStr), ShowLine(showline) {}
+ ArrayRef<std::pair<unsigned,unsigned> > Ranges);
const SourceMgr *getSourceMgr() const { return SM; }
SMLoc getLoc() const { return Loc; }
const std::string &getFilename() const { return Filename; }
int getLineNo() const { return LineNo; }
int getColumnNo() const { return ColumnNo; }
+ SourceMgr::DiagKind getKind() const { return Kind; }
const std::string &getMessage() const { return Message; }
const std::string &getLineContents() const { return LineContents; }
- bool getShowLine() const { return ShowLine; }
-
- void Print(const char *ProgName, raw_ostream &S) const;
+ const std::vector<std::pair<unsigned, unsigned> > &getRanges() const {
+ return Ranges;
+ }
+ void print(const char *ProgName, raw_ostream &S) const;
};
} // end llvm namespace
diff --git a/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h b/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h
new file mode 100644
index 0000000..531dbb2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/StreamableMemoryObject.h
@@ -0,0 +1,181 @@
+//===- StreamableMemoryObject.h - Streamable data interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef STREAMABLEMEMORYOBJECT_H_
+#define STREAMABLEMEMORYOBJECT_H_
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/DataStream.h"
+#include <vector>
+
+namespace llvm {
+
+/// StreamableMemoryObject - Interface to data which might be streamed.
+/// Streamability has 2 important implications/restrictions. First, the data
+/// might not yet exist in memory when the request is made. This just means
+/// that readByte/readBytes might have to block or do some work to get it.
+/// More significantly, the exact size of the object might not be known until
+/// it has all been fetched. This means that to return the right result,
+/// getExtent must also wait for all the data to arrive; therefore it should
+/// not be called on objects which are actually streamed (this would defeat
+/// the purpose of streaming). Instead, isValidAddress and isObjectEnd can be
+/// used to test addresses without knowing the exact size of the stream.
+/// Finally, getPointer can be used instead of readBytes to avoid extra copying.
+class StreamableMemoryObject : public MemoryObject {
+ public:
+ /// Destructor - Override as necessary.
+ virtual ~StreamableMemoryObject();
+
+ /// getBase - Returns the lowest valid address in the region.
+ ///
+ /// @result - The lowest valid address.
+ virtual uint64_t getBase() const = 0;
+
+ /// getExtent - Returns the size of the region in bytes. (The region is
+ /// contiguous, so the highest valid address of the region
+ /// is getBase() + getExtent() - 1).
+ /// May block until all bytes in the stream have been read
+ ///
+ /// @result - The size of the region.
+ virtual uint64_t getExtent() const = 0;
+
+ /// readByte - Tries to read a single byte from the region.
+ /// May block until (address - base) bytes have been read
+ /// @param address - The address of the byte, in the same space as getBase().
+ /// @param ptr - A pointer to a byte to be filled in. Must be non-NULL.
+ /// @result - 0 if successful; -1 if not. Failure may be due to a
+ /// bounds violation or an implementation-specific error.
+ virtual int readByte(uint64_t address, uint8_t* ptr) const = 0;
+
+ /// readBytes - Tries to read a contiguous range of bytes from the
+ /// region, up to the end of the region.
+ /// May block until (address - base + size) bytes have
+ /// been read. Additionally, StreamableMemoryObjects will
+ /// not do partial reads - if size bytes cannot be read,
+ /// readBytes will fail.
+ ///
+ /// @param address - The address of the first byte, in the same space as
+ /// getBase().
+ /// @param size - The maximum number of bytes to copy.
+ /// @param buf - A pointer to a buffer to be filled in. Must be non-NULL
+ /// and large enough to hold size bytes.
+ /// @param copied - A pointer to a nunber that is filled in with the number
+ /// of bytes actually read. May be NULL.
+ /// @result - 0 if successful; -1 if not. Failure may be due to a
+ /// bounds violation or an implementation-specific error.
+ virtual int readBytes(uint64_t address,
+ uint64_t size,
+ uint8_t* buf,
+ uint64_t* copied) const = 0;
+
+ /// getPointer - Ensures that the requested data is in memory, and returns
+ /// A pointer to it. More efficient than using readBytes if the
+ /// data is already in memory.
+ /// May block until (address - base + size) bytes have been read
+ /// @param address - address of the byte, in the same space as getBase()
+ /// @param size - amount of data that must be available on return
+ /// @result - valid pointer to the requested data
+ virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const = 0;
+
+ /// isValidAddress - Returns true if the address is within the object
+ /// (i.e. between base and base + extent - 1 inclusive)
+ /// May block until (address - base) bytes have been read
+ /// @param address - address of the byte, in the same space as getBase()
+ /// @result - true if the address may be read with readByte()
+ virtual bool isValidAddress(uint64_t address) const = 0;
+
+ /// isObjectEnd - Returns true if the address is one past the end of the
+ /// object (i.e. if it is equal to base + extent)
+ /// May block until (address - base) bytes have been read
+ /// @param address - address of the byte, in the same space as getBase()
+ /// @result - true if the address is equal to base + extent
+ virtual bool isObjectEnd(uint64_t address) const = 0;
+};
+
+/// StreamingMemoryObject - interface to data which is actually streamed from
+/// a DataStreamer. In addition to inherited members, it has the
+/// dropLeadingBytes and setKnownObjectSize methods which are not applicable
+/// to non-streamed objects.
+class StreamingMemoryObject : public StreamableMemoryObject {
+public:
+ StreamingMemoryObject(DataStreamer *streamer);
+ virtual uint64_t getBase() const { return 0; }
+ virtual uint64_t getExtent() const;
+ virtual int readByte(uint64_t address, uint8_t* ptr) const;
+ virtual int readBytes(uint64_t address,
+ uint64_t size,
+ uint8_t* buf,
+ uint64_t* copied) const ;
+ virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const {
+ // This could be fixed by ensuring the bytes are fetched and making a copy,
+ // requiring that the bitcode size be known, or otherwise ensuring that
+ // the memory doesn't go away/get reallocated, but it's
+ // not currently necessary. Users that need the pointer don't stream.
+ assert(0 && "getPointer in streaming memory objects not allowed");
+ return NULL;
+ }
+ virtual bool isValidAddress(uint64_t address) const;
+ virtual bool isObjectEnd(uint64_t address) const;
+
+ /// Drop s bytes from the front of the stream, pushing the positions of the
+ /// remaining bytes down by s. This is used to skip past the bitcode header,
+ /// since we don't know a priori if it's present, and we can't put bytes
+ /// back into the stream once we've read them.
+ bool dropLeadingBytes(size_t s);
+
+ /// If the data object size is known in advance, many of the operations can
+ /// be made more efficient, so this method should be called before reading
+ /// starts (although it can be called anytime).
+ void setKnownObjectSize(size_t size);
+
+private:
+ const static uint32_t kChunkSize = 4096 * 4;
+ mutable std::vector<unsigned char> Bytes;
+ OwningPtr<DataStreamer> Streamer;
+ mutable size_t BytesRead; // Bytes read from stream
+ size_t BytesSkipped;// Bytes skipped at start of stream (e.g. wrapper/header)
+ mutable size_t ObjectSize; // 0 if unknown, set if wrapper seen or EOF reached
+ mutable bool EOFReached;
+
+ // Fetch enough bytes such that Pos can be read or EOF is reached
+ // (i.e. BytesRead > Pos). Return true if Pos can be read.
+ // Unlike most of the functions in BitcodeReader, returns true on success.
+ // Most of the requests will be small, but we fetch at kChunkSize bytes
+ // at a time to avoid making too many potentially expensive GetBytes calls
+ bool fetchToPos(size_t Pos) const {
+ if (EOFReached) return Pos < ObjectSize;
+ while (Pos >= BytesRead) {
+ Bytes.resize(BytesRead + BytesSkipped + kChunkSize);
+ size_t bytes = Streamer->GetBytes(&Bytes[BytesRead + BytesSkipped],
+ kChunkSize);
+ BytesRead += bytes;
+ if (bytes < kChunkSize) {
+ if (ObjectSize && BytesRead < Pos)
+ assert(0 && "Unexpected short read fetching bitcode");
+ if (BytesRead <= Pos) { // reached EOF/ran out of bytes
+ ObjectSize = BytesRead;
+ EOFReached = true;
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ StreamingMemoryObject(const StreamingMemoryObject&); // DO NOT IMPLEMENT
+ void operator=(const StreamingMemoryObject&); // DO NOT IMPLEMENT
+};
+
+StreamableMemoryObject *getNonStreamedMemoryObject(
+ const unsigned char *Start, const unsigned char *End);
+
+}
+#endif // STREAMABLEMEMORYOBJECT_H_
diff --git a/contrib/llvm/include/llvm/Support/TargetRegistry.h b/contrib/llvm/include/llvm/Support/TargetRegistry.h
index 45f249d..8808130 100644
--- a/contrib/llvm/include/llvm/Support/TargetRegistry.h
+++ b/contrib/llvm/include/llvm/Support/TargetRegistry.h
@@ -44,12 +44,14 @@ namespace llvm {
class MCTargetAsmLexer;
class MCTargetAsmParser;
class TargetMachine;
+ class TargetOptions;
class raw_ostream;
class formatted_raw_ostream;
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
bool isVerboseAsm,
bool useLoc, bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
MCAsmBackend *TAB,
@@ -73,7 +75,8 @@ namespace llvm {
StringRef TT);
typedef MCCodeGenInfo *(*MCCodeGenInfoCtorFnTy)(StringRef TT,
Reloc::Model RM,
- CodeModel::Model CM);
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL);
typedef MCInstrInfo *(*MCInstrInfoCtorFnTy)(void);
typedef MCInstrAnalysis *(*MCInstrAnalysisCtorFnTy)(const MCInstrInfo*Info);
typedef MCRegisterInfo *(*MCRegInfoCtorFnTy)(StringRef TT);
@@ -84,8 +87,10 @@ namespace llvm {
StringRef TT,
StringRef CPU,
StringRef Features,
+ const TargetOptions &Options,
Reloc::Model RM,
- CodeModel::Model CM);
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL);
typedef AsmPrinter *(*AsmPrinterCtorTy)(TargetMachine &TM,
MCStreamer &Streamer);
typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T, StringRef TT);
@@ -99,6 +104,8 @@ namespace llvm {
typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI);
typedef MCCodeEmitter *(*MCCodeEmitterCtorTy)(const MCInstrInfo &II,
const MCSubtargetInfo &STI,
@@ -116,6 +123,7 @@ namespace llvm {
bool isVerboseAsm,
bool useLoc,
bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
MCAsmBackend *TAB,
@@ -143,8 +151,8 @@ namespace llvm {
/// registered.
MCAsmInfoCtorFnTy MCAsmInfoCtorFn;
- /// MCCodeGenInfoCtorFn - Constructor function for this target's MCCodeGenInfo,
- /// if registered.
+ /// MCCodeGenInfoCtorFn - Constructor function for this target's
+ /// MCCodeGenInfo, if registered.
MCCodeGenInfoCtorFnTy MCCodeGenInfoCtorFn;
/// MCInstrInfoCtorFn - Constructor function for this target's MCInstrInfo,
@@ -275,10 +283,11 @@ namespace llvm {
/// createMCCodeGenInfo - Create a MCCodeGenInfo implementation.
///
MCCodeGenInfo *createMCCodeGenInfo(StringRef Triple, Reloc::Model RM,
- CodeModel::Model CM) const {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) const {
if (!MCCodeGenInfoCtorFn)
return 0;
- return MCCodeGenInfoCtorFn(Triple, RM, CM);
+ return MCCodeGenInfoCtorFn(Triple, RM, CM, OL);
}
/// createMCInstrInfo - Create a MCInstrInfo implementation.
@@ -329,12 +338,14 @@ namespace llvm {
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
TargetMachine *createTargetMachine(StringRef Triple, StringRef CPU,
- StringRef Features,
- Reloc::Model RM = Reloc::Default,
- CodeModel::Model CM = CodeModel::Default) const {
+ StringRef Features, const TargetOptions &Options,
+ Reloc::Model RM = Reloc::Default,
+ CodeModel::Model CM = CodeModel::Default,
+ CodeGenOpt::Level OL = CodeGenOpt::Default) const {
if (!TargetMachineCtorFn)
return 0;
- return TargetMachineCtorFn(*this, Triple, CPU, Features, RM, CM);
+ return TargetMachineCtorFn(*this, Triple, CPU, Features, Options,
+ RM, CM, OL);
}
/// createMCAsmBackend - Create a target specific assembly parser.
@@ -383,10 +394,12 @@ namespace llvm {
MCInstPrinter *createMCInstPrinter(unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) const {
if (!MCInstPrinterCtorFn)
return 0;
- return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, STI);
+ return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, MII, MRI, STI);
}
@@ -426,13 +439,14 @@ namespace llvm {
bool isVerboseAsm,
bool useLoc,
bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
MCAsmBackend *TAB,
bool ShowInst) const {
// AsmStreamerCtorFn is default to llvm::createAsmStreamer
return AsmStreamerCtorFn(Ctx, OS, isVerboseAsm, useLoc, useCFI,
- InstPrint, CE, TAB, ShowInst);
+ useDwarfDirectory, InstPrint, CE, TAB, ShowInst);
}
/// @}
@@ -776,7 +790,7 @@ namespace llvm {
/// extern "C" void LLVMInitializeFooTargetInfo() {
/// RegisterTarget<Triple::foo> X(TheFooTarget, "foo", "Foo description");
/// }
- template<Triple::ArchType TargetArchType = Triple::InvalidArch,
+ template<Triple::ArchType TargetArchType = Triple::UnknownArch,
bool HasJIT = false>
struct RegisterTarget {
RegisterTarget(Target &T, const char *Name, const char *Desc) {
@@ -840,8 +854,8 @@ namespace llvm {
TargetRegistry::RegisterMCCodeGenInfo(T, &Allocator);
}
private:
- static MCCodeGenInfo *Allocator(StringRef TT,
- Reloc::Model RM, CodeModel::Model CM) {
+ static MCCodeGenInfo *Allocator(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL) {
return new MCCodeGenInfoImpl();
}
};
@@ -1010,9 +1024,11 @@ namespace llvm {
private:
static TargetMachine *Allocator(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM,
- CodeModel::Model CM) {
- return new TargetMachineImpl(T, TT, CPU, FS, RM, CM);
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ return new TargetMachineImpl(T, TT, CPU, FS, Options, RM, CM, OL);
}
};
diff --git a/contrib/llvm/include/llvm/Support/TargetSelect.h b/contrib/llvm/include/llvm/Support/TargetSelect.h
index 83ff68c..a86e953 100644
--- a/contrib/llvm/include/llvm/Support/TargetSelect.h
+++ b/contrib/llvm/include/llvm/Support/TargetSelect.h
@@ -149,6 +149,18 @@ namespace llvm {
#endif
}
+ /// InitializeNativeTargetDisassembler - The main program should call
+ /// this function to initialize the native target disassembler.
+ inline bool InitializeNativeTargetDisassembler() {
+ // If we have a native target, initialize the corresponding disassembler.
+#ifdef LLVM_NATIVE_DISASSEMBLER
+ LLVM_NATIVE_DISASSEMBLER();
+ return false;
+#else
+ return true;
+#endif
+ }
+
}
#endif
diff --git a/contrib/llvm/include/llvm/Support/Valgrind.h b/contrib/llvm/include/llvm/Support/Valgrind.h
index 7662eaa..e147647 100644
--- a/contrib/llvm/include/llvm/Support/Valgrind.h
+++ b/contrib/llvm/include/llvm/Support/Valgrind.h
@@ -16,8 +16,23 @@
#ifndef LLVM_SYSTEM_VALGRIND_H
#define LLVM_SYSTEM_VALGRIND_H
+#include "llvm/Support/Compiler.h"
+#include "llvm/Config/llvm-config.h"
#include <stddef.h>
+#if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG)
+// tsan (Thread Sanitizer) is a valgrind-based tool that detects these exact
+// functions by name.
+extern "C" {
+LLVM_ATTRIBUTE_WEAK void AnnotateHappensAfter(const char *file, int line,
+ const volatile void *cv);
+LLVM_ATTRIBUTE_WEAK void AnnotateHappensBefore(const char *file, int line,
+ const volatile void *cv);
+LLVM_ATTRIBUTE_WEAK void AnnotateIgnoreWritesBegin(const char *file, int line);
+LLVM_ATTRIBUTE_WEAK void AnnotateIgnoreWritesEnd(const char *file, int line);
+}
+#endif
+
namespace llvm {
namespace sys {
// True if Valgrind is controlling this process.
@@ -26,6 +41,34 @@ namespace sys {
// Discard valgrind's translation of code in the range [Addr .. Addr + Len).
// Otherwise valgrind may continue to execute the old version of the code.
void ValgrindDiscardTranslations(const void *Addr, size_t Len);
+
+#if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG)
+ // Thread Sanitizer is a valgrind tool that finds races in code.
+ // See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations .
+
+ // This marker is used to define a happens-before arc. The race detector will
+ // infer an arc from the begin to the end when they share the same pointer
+ // argument.
+ #define TsanHappensBefore(cv) \
+ AnnotateHappensBefore(__FILE__, __LINE__, cv)
+
+ // This marker defines the destination of a happens-before arc.
+ #define TsanHappensAfter(cv) \
+ AnnotateHappensAfter(__FILE__, __LINE__, cv)
+
+ // Ignore any races on writes between here and the next TsanIgnoreWritesEnd.
+ #define TsanIgnoreWritesBegin() \
+ AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+
+ // Resume checking for racy writes.
+ #define TsanIgnoreWritesEnd() \
+ AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+#else
+ #define TsanHappensBefore(cv)
+ #define TsanHappensAfter(cv)
+ #define TsanIgnoreWritesBegin()
+ #define TsanIgnoreWritesEnd()
+#endif
}
}
diff --git a/contrib/llvm/include/llvm/Support/ValueHandle.h b/contrib/llvm/include/llvm/Support/ValueHandle.h
index c0cdc35..b7210b2 100644
--- a/contrib/llvm/include/llvm/Support/ValueHandle.h
+++ b/contrib/llvm/include/llvm/Support/ValueHandle.h
@@ -49,52 +49,61 @@ protected:
Tracking,
Weak
};
-private:
+private:
PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
ValueHandleBase *Next;
- Value *VP;
+
+ // A subclass may want to store some information along with the value
+ // pointer. Allow them to do this by making the value pointer a pointer-int
+ // pair. The 'setValPtrInt' and 'getValPtrInt' methods below give them this
+ // access.
+ PointerIntPair<Value*, 2> VP;
explicit ValueHandleBase(const ValueHandleBase&); // DO NOT IMPLEMENT.
public:
explicit ValueHandleBase(HandleBaseKind Kind)
- : PrevPair(0, Kind), Next(0), VP(0) {}
+ : PrevPair(0, Kind), Next(0), VP(0, 0) {}
ValueHandleBase(HandleBaseKind Kind, Value *V)
- : PrevPair(0, Kind), Next(0), VP(V) {
- if (isValid(VP))
+ : PrevPair(0, Kind), Next(0), VP(V, 0) {
+ if (isValid(VP.getPointer()))
AddToUseList();
}
ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
: PrevPair(0, Kind), Next(0), VP(RHS.VP) {
- if (isValid(VP))
+ if (isValid(VP.getPointer()))
AddToExistingUseList(RHS.getPrevPtr());
}
~ValueHandleBase() {
- if (isValid(VP))
+ if (isValid(VP.getPointer()))
RemoveFromUseList();
}
Value *operator=(Value *RHS) {
- if (VP == RHS) return RHS;
- if (isValid(VP)) RemoveFromUseList();
- VP = RHS;
- if (isValid(VP)) AddToUseList();
+ if (VP.getPointer() == RHS) return RHS;
+ if (isValid(VP.getPointer())) RemoveFromUseList();
+ VP.setPointer(RHS);
+ if (isValid(VP.getPointer())) AddToUseList();
return RHS;
}
Value *operator=(const ValueHandleBase &RHS) {
- if (VP == RHS.VP) return RHS.VP;
- if (isValid(VP)) RemoveFromUseList();
- VP = RHS.VP;
- if (isValid(VP)) AddToExistingUseList(RHS.getPrevPtr());
- return VP;
+ if (VP.getPointer() == RHS.VP.getPointer()) return RHS.VP.getPointer();
+ if (isValid(VP.getPointer())) RemoveFromUseList();
+ VP.setPointer(RHS.VP.getPointer());
+ if (isValid(VP.getPointer())) AddToExistingUseList(RHS.getPrevPtr());
+ return VP.getPointer();
}
Value *operator->() const { return getValPtr(); }
Value &operator*() const { return *getValPtr(); }
protected:
- Value *getValPtr() const { return VP; }
+ Value *getValPtr() const { return VP.getPointer(); }
+
+ void setValPtrInt(unsigned K) { VP.setInt(K); }
+ unsigned getValPtrInt() const { return VP.getInt(); }
+
static bool isValid(Value *V) {
return V &&
V != DenseMapInfo<Value *>::getEmptyKey() &&
diff --git a/contrib/llvm/include/llvm/Support/YAMLParser.h b/contrib/llvm/include/llvm/Support/YAMLParser.h
new file mode 100644
index 0000000..b24cacd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/YAMLParser.h
@@ -0,0 +1,549 @@
+//===--- YAMLParser.h - Simple YAML parser --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a YAML 1.2 parser.
+//
+// See http://www.yaml.org/spec/1.2/spec.html for the full standard.
+//
+// This currently does not implement the following:
+// * Multi-line literal folding.
+// * Tag resolution.
+// * UTF-16.
+// * BOMs anywhere other than the first Unicode scalar value in the file.
+//
+// The most important class here is Stream. This represents a YAML stream with
+// 0, 1, or many documents.
+//
+// SourceMgr sm;
+// StringRef input = getInput();
+// yaml::Stream stream(input, sm);
+//
+// for (yaml::document_iterator di = stream.begin(), de = stream.end();
+// di != de; ++di) {
+// yaml::Node *n = di->getRoot();
+// if (n) {
+// // Do something with n...
+// } else
+// break;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_YAML_PARSER_H
+#define LLVM_SUPPORT_YAML_PARSER_H
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/SMLoc.h"
+
+#include <limits>
+#include <utility>
+
+namespace llvm {
+class MemoryBuffer;
+class SourceMgr;
+class raw_ostream;
+class Twine;
+
+namespace yaml {
+
+class document_iterator;
+class Document;
+class Node;
+class Scanner;
+struct Token;
+
+/// @brief Dump all the tokens in this stream to OS.
+/// @returns true if there was an error, false otherwise.
+bool dumpTokens(StringRef Input, raw_ostream &);
+
+/// @brief Scans all tokens in input without outputting anything. This is used
+/// for benchmarking the tokenizer.
+/// @returns true if there was an error, false otherwise.
+bool scanTokens(StringRef Input);
+
+/// @brief Escape \a Input for a double quoted scalar.
+std::string escape(StringRef Input);
+
+/// @brief This class represents a YAML stream potentially containing multiple
+/// documents.
+class Stream {
+public:
+ Stream(StringRef Input, SourceMgr &);
+ ~Stream();
+
+ document_iterator begin();
+ document_iterator end();
+ void skip();
+ bool failed();
+ bool validate() {
+ skip();
+ return !failed();
+ }
+
+ void printError(Node *N, const Twine &Msg);
+
+private:
+ OwningPtr<Scanner> scanner;
+ OwningPtr<Document> CurrentDoc;
+
+ friend class Document;
+
+ /// @brief Validate a %YAML x.x directive.
+ void handleYAMLDirective(const Token &);
+};
+
+/// @brief Abstract base class for all Nodes.
+class Node {
+public:
+ enum NodeKind {
+ NK_Null,
+ NK_Scalar,
+ NK_KeyValue,
+ NK_Mapping,
+ NK_Sequence,
+ NK_Alias
+ };
+
+ Node(unsigned int Type, OwningPtr<Document>&, StringRef Anchor);
+
+ /// @brief Get the value of the anchor attached to this node. If it does not
+ /// have one, getAnchor().size() will be 0.
+ StringRef getAnchor() const { return Anchor; }
+
+ SMRange getSourceRange() const { return SourceRange; }
+ void setSourceRange(SMRange SR) { SourceRange = SR; }
+
+ // These functions forward to Document and Scanner.
+ Token &peekNext();
+ Token getNext();
+ Node *parseBlockNode();
+ BumpPtrAllocator &getAllocator();
+ void setError(const Twine &Message, Token &Location) const;
+ bool failed() const;
+
+ virtual void skip() {};
+
+ unsigned int getType() const { return TypeID; }
+ static inline bool classof(const Node *) { return true; }
+
+ void *operator new ( size_t Size
+ , BumpPtrAllocator &Alloc
+ , size_t Alignment = 16) throw() {
+ return Alloc.Allocate(Size, Alignment);
+ }
+
+ void operator delete(void *Ptr, BumpPtrAllocator &Alloc, size_t) throw() {
+ Alloc.Deallocate(Ptr);
+ }
+
+protected:
+ OwningPtr<Document> &Doc;
+ SMRange SourceRange;
+
+ void operator delete(void *) throw() {}
+
+ virtual ~Node() {}
+
+private:
+ unsigned int TypeID;
+ StringRef Anchor;
+};
+
+/// @brief A null value.
+///
+/// Example:
+/// !!null null
+class NullNode : public Node {
+public:
+ NullNode(OwningPtr<Document> &D) : Node(NK_Null, D, StringRef()) {}
+
+ static inline bool classof(const NullNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Null;
+ }
+};
+
+/// @brief A scalar node is an opaque datum that can be presented as a
+/// series of zero or more Unicode scalar values.
+///
+/// Example:
+/// Adena
+class ScalarNode : public Node {
+public:
+ ScalarNode(OwningPtr<Document> &D, StringRef Anchor, StringRef Val)
+ : Node(NK_Scalar, D, Anchor)
+ , Value(Val) {
+ SMLoc Start = SMLoc::getFromPointer(Val.begin());
+ SMLoc End = SMLoc::getFromPointer(Val.end() - 1);
+ SourceRange = SMRange(Start, End);
+ }
+
+ // Return Value without any escaping or folding or other fun YAML stuff. This
+ // is the exact bytes that are contained in the file (after conversion to
+ // utf8).
+ StringRef getRawValue() const { return Value; }
+
+ /// @brief Gets the value of this node as a StringRef.
+ ///
+ /// @param Storage is used to store the content of the returned StringRef iff
+ /// it requires any modification from how it appeared in the source.
+ /// This happens with escaped characters and multi-line literals.
+ StringRef getValue(SmallVectorImpl<char> &Storage) const;
+
+ static inline bool classof(const ScalarNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Scalar;
+ }
+
+private:
+ StringRef Value;
+
+ StringRef unescapeDoubleQuoted( StringRef UnquotedValue
+ , StringRef::size_type Start
+ , SmallVectorImpl<char> &Storage) const;
+};
+
+/// @brief A key and value pair. While not technically a Node under the YAML
+/// representation graph, it is easier to treat them this way.
+///
+/// TODO: Consider making this not a child of Node.
+///
+/// Example:
+/// Section: .text
+class KeyValueNode : public Node {
+public:
+ KeyValueNode(OwningPtr<Document> &D)
+ : Node(NK_KeyValue, D, StringRef())
+ , Key(0)
+ , Value(0)
+ {}
+
+ /// @brief Parse and return the key.
+ ///
+ /// This may be called multiple times.
+ ///
+ /// @returns The key, or nullptr if failed() == true.
+ Node *getKey();
+
+ /// @brief Parse and return the value.
+ ///
+ /// This may be called multiple times.
+ ///
+ /// @returns The value, or nullptr if failed() == true.
+ Node *getValue();
+
+ virtual void skip() {
+ getKey()->skip();
+ getValue()->skip();
+ }
+
+ static inline bool classof(const KeyValueNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_KeyValue;
+ }
+
+private:
+ Node *Key;
+ Node *Value;
+};
+
+/// @brief This is an iterator abstraction over YAML collections shared by both
+/// sequences and maps.
+///
+/// BaseT must have a ValueT* member named CurrentEntry and a member function
+/// increment() which must set CurrentEntry to 0 to create an end iterator.
+template <class BaseT, class ValueT>
+class basic_collection_iterator
+ : public std::iterator<std::forward_iterator_tag, ValueT> {
+public:
+ basic_collection_iterator() : Base(0) {}
+ basic_collection_iterator(BaseT *B) : Base(B) {}
+
+ ValueT *operator ->() const {
+ assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+ return Base->CurrentEntry;
+ }
+
+ ValueT &operator *() const {
+ assert(Base && Base->CurrentEntry &&
+ "Attempted to dereference end iterator!");
+ return *Base->CurrentEntry;
+ }
+
+ operator ValueT*() const {
+ assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+ return Base->CurrentEntry;
+ }
+
+ bool operator !=(const basic_collection_iterator &Other) const {
+ if(Base != Other.Base)
+ return true;
+ return (Base && Other.Base) && Base->CurrentEntry
+ != Other.Base->CurrentEntry;
+ }
+
+ basic_collection_iterator &operator++() {
+ assert(Base && "Attempted to advance iterator past end!");
+ Base->increment();
+ // Create an end iterator.
+ if (Base->CurrentEntry == 0)
+ Base = 0;
+ return *this;
+ }
+
+private:
+ BaseT *Base;
+};
+
+// The following two templates are used for both MappingNode and Sequence Node.
+template <class CollectionType>
+typename CollectionType::iterator begin(CollectionType &C) {
+ assert(C.IsAtBeginning && "You may only iterate over a collection once!");
+ C.IsAtBeginning = false;
+ typename CollectionType::iterator ret(&C);
+ ++ret;
+ return ret;
+}
+
+template <class CollectionType>
+void skip(CollectionType &C) {
+ // TODO: support skipping from the middle of a parsed collection ;/
+ assert((C.IsAtBeginning || C.IsAtEnd) && "Cannot skip mid parse!");
+ if (C.IsAtBeginning)
+ for (typename CollectionType::iterator i = begin(C), e = C.end();
+ i != e; ++i)
+ i->skip();
+}
+
+/// @brief Represents a YAML map created from either a block map for a flow map.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+/// Name: _main
+/// Scope: Global
+class MappingNode : public Node {
+public:
+ enum MappingType {
+ MT_Block,
+ MT_Flow,
+ MT_Inline //< An inline mapping node is used for "[key: value]".
+ };
+
+ MappingNode(OwningPtr<Document> &D, StringRef Anchor, MappingType MT)
+ : Node(NK_Mapping, D, Anchor)
+ , Type(MT)
+ , IsAtBeginning(true)
+ , IsAtEnd(false)
+ , CurrentEntry(0)
+ {}
+
+ friend class basic_collection_iterator<MappingNode, KeyValueNode>;
+ typedef basic_collection_iterator<MappingNode, KeyValueNode> iterator;
+ template <class T> friend typename T::iterator yaml::begin(T &);
+ template <class T> friend void yaml::skip(T &);
+
+ iterator begin() {
+ return yaml::begin(*this);
+ }
+
+ iterator end() { return iterator(); }
+
+ virtual void skip() {
+ yaml::skip(*this);
+ }
+
+ static inline bool classof(const MappingNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Mapping;
+ }
+
+private:
+ MappingType Type;
+ bool IsAtBeginning;
+ bool IsAtEnd;
+ KeyValueNode *CurrentEntry;
+
+ void increment();
+};
+
+/// @brief Represents a YAML sequence created from either a block sequence for a
+/// flow sequence.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+/// - Hello
+/// - World
+class SequenceNode : public Node {
+public:
+ enum SequenceType {
+ ST_Block,
+ ST_Flow,
+ // Use for:
+ //
+ // key:
+ // - val1
+ // - val2
+ //
+ // As a BlockMappingEntry and BlockEnd are not created in this case.
+ ST_Indentless
+ };
+
+ SequenceNode(OwningPtr<Document> &D, StringRef Anchor, SequenceType ST)
+ : Node(NK_Sequence, D, Anchor)
+ , SeqType(ST)
+ , IsAtBeginning(true)
+ , IsAtEnd(false)
+ , WasPreviousTokenFlowEntry(true) // Start with an imaginary ','.
+ , CurrentEntry(0)
+ {}
+
+ friend class basic_collection_iterator<SequenceNode, Node>;
+ typedef basic_collection_iterator<SequenceNode, Node> iterator;
+ template <class T> friend typename T::iterator yaml::begin(T &);
+ template <class T> friend void yaml::skip(T &);
+
+ void increment();
+
+ iterator begin() {
+ return yaml::begin(*this);
+ }
+
+ iterator end() { return iterator(); }
+
+ virtual void skip() {
+ yaml::skip(*this);
+ }
+
+ static inline bool classof(const SequenceNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Sequence;
+ }
+
+private:
+ SequenceType SeqType;
+ bool IsAtBeginning;
+ bool IsAtEnd;
+ bool WasPreviousTokenFlowEntry;
+ Node *CurrentEntry;
+};
+
+/// @brief Represents an alias to a Node with an anchor.
+///
+/// Example:
+/// *AnchorName
+class AliasNode : public Node {
+public:
+ AliasNode(OwningPtr<Document> &D, StringRef Val)
+ : Node(NK_Alias, D, StringRef()), Name(Val) {}
+
+ StringRef getName() const { return Name; }
+ Node *getTarget();
+
+ static inline bool classof(const ScalarNode *) { return true; }
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Alias;
+ }
+
+private:
+ StringRef Name;
+};
+
+/// @brief A YAML Stream is a sequence of Documents. A document contains a root
+/// node.
+class Document {
+public:
+ /// @brief Root for parsing a node. Returns a single node.
+ Node *parseBlockNode();
+
+ Document(Stream &ParentStream);
+
+ /// @brief Finish parsing the current document and return true if there are
+ /// more. Return false otherwise.
+ bool skip();
+
+ /// @brief Parse and return the root level node.
+ Node *getRoot() {
+ if (Root)
+ return Root;
+ return Root = parseBlockNode();
+ }
+
+private:
+ friend class Node;
+ friend class document_iterator;
+
+ /// @brief Stream to read tokens from.
+ Stream &stream;
+
+ /// @brief Used to allocate nodes to. All are destroyed without calling their
+ /// destructor when the document is destroyed.
+ BumpPtrAllocator NodeAllocator;
+
+ /// @brief The root node. Used to support skipping a partially parsed
+ /// document.
+ Node *Root;
+
+ Token &peekNext();
+ Token getNext();
+ void setError(const Twine &Message, Token &Location) const;
+ bool failed() const;
+
+ void handleTagDirective(const Token &Tag) {
+ // TODO: Track tags.
+ }
+
+ /// @brief Parse %BLAH directives and return true if any were encountered.
+ bool parseDirectives();
+
+ /// @brief Consume the next token and error if it is not \a TK.
+ bool expectToken(int TK);
+};
+
+/// @brief Iterator abstraction for Documents over a Stream.
+class document_iterator {
+public:
+ document_iterator() : Doc(NullDoc) {}
+ document_iterator(OwningPtr<Document> &D) : Doc(D) {}
+
+ bool operator !=(const document_iterator &Other) {
+ return Doc != Other.Doc;
+ }
+
+ document_iterator operator ++() {
+ if (!Doc->skip()) {
+ Doc.reset(0);
+ } else {
+ Stream &S = Doc->stream;
+ Doc.reset(new Document(S));
+ }
+ return *this;
+ }
+
+ Document &operator *() {
+ return *Doc;
+ }
+
+ OwningPtr<Document> &operator ->() {
+ return Doc;
+ }
+
+private:
+ static OwningPtr<Document> NullDoc;
+ OwningPtr<Document> &Doc;
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/system_error.h b/contrib/llvm/include/llvm/Support/system_error.h
index 2c15b69..af81206 100644
--- a/contrib/llvm/include/llvm/Support/system_error.h
+++ b/contrib/llvm/include/llvm/Support/system_error.h
@@ -470,17 +470,6 @@ template <> struct hash<std::error_code>;
namespace llvm {
-template <class T, T v>
-struct integral_constant {
- typedef T value_type;
- static const value_type value = v;
- typedef integral_constant<T,v> type;
- operator value_type() { return value; }
-};
-
-typedef integral_constant<bool, true> true_type;
-typedef integral_constant<bool, false> false_type;
-
// is_error_code_enum
template <class Tp> struct is_error_code_enum : public false_type {};
@@ -738,6 +727,10 @@ class error_code {
public:
error_code() : _val_(0), _cat_(&system_category()) {}
+ static error_code success() {
+ return error_code();
+ }
+
error_code(int _val, const error_category& _cat)
: _val_(_val), _cat_(&_cat) {}
diff --git a/contrib/llvm/include/llvm/Support/type_traits.h b/contrib/llvm/include/llvm/Support/type_traits.h
index 515295b..a3a551f 100644
--- a/contrib/llvm/include/llvm/Support/type_traits.h
+++ b/contrib/llvm/include/llvm/Support/type_traits.h
@@ -17,6 +17,8 @@
#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H
+#include "llvm/Support/DataTypes.h"
+#include <cstddef>
#include <utility>
// This is actually the conforming implementation which works with abstract
@@ -64,22 +66,99 @@ struct isPodLike {
// std::pair's are pod-like if their elements are.
template<typename T, typename U>
struct isPodLike<std::pair<T, U> > {
- static const bool value = isPodLike<T>::value & isPodLike<U>::value;
+ static const bool value = isPodLike<T>::value && isPodLike<U>::value;
};
+template <class T, T v>
+struct integral_constant {
+ typedef T value_type;
+ static const value_type value = v;
+ typedef integral_constant<T,v> type;
+ operator value_type() { return value; }
+};
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
/// \brief Metafunction that determines whether the two given types are
/// equivalent.
-template<typename T, typename U>
-struct is_same {
- static const bool value = false;
+template<typename T, typename U> struct is_same : public false_type {};
+template<typename T> struct is_same<T, T> : public true_type {};
+
+/// \brief Metafunction that removes const qualification from a type.
+template <typename T> struct remove_const { typedef T type; };
+template <typename T> struct remove_const<const T> { typedef T type; };
+
+/// \brief Metafunction that removes volatile qualification from a type.
+template <typename T> struct remove_volatile { typedef T type; };
+template <typename T> struct remove_volatile<volatile T> { typedef T type; };
+
+/// \brief Metafunction that removes both const and volatile qualification from
+/// a type.
+template <typename T> struct remove_cv {
+ typedef typename remove_const<typename remove_volatile<T>::type>::type type;
};
-template<typename T>
-struct is_same<T, T> {
- static const bool value = true;
+/// \brief Helper to implement is_integral metafunction.
+template <typename T> struct is_integral_impl : false_type {};
+template <> struct is_integral_impl< bool> : true_type {};
+template <> struct is_integral_impl< char> : true_type {};
+template <> struct is_integral_impl< signed char> : true_type {};
+template <> struct is_integral_impl<unsigned char> : true_type {};
+template <> struct is_integral_impl< wchar_t> : true_type {};
+template <> struct is_integral_impl< short> : true_type {};
+template <> struct is_integral_impl<unsigned short> : true_type {};
+template <> struct is_integral_impl< int> : true_type {};
+template <> struct is_integral_impl<unsigned int> : true_type {};
+template <> struct is_integral_impl< long> : true_type {};
+template <> struct is_integral_impl<unsigned long> : true_type {};
+template <> struct is_integral_impl< long long> : true_type {};
+template <> struct is_integral_impl<unsigned long long> : true_type {};
+
+/// \brief Metafunction that determines whether the given type is an integral
+/// type.
+template <typename T>
+struct is_integral : is_integral_impl<T> {};
+
+/// \brief Metafunction to remove reference from a type.
+template <typename T> struct remove_reference { typedef T type; };
+template <typename T> struct remove_reference<T&> { typedef T type; };
+
+/// \brief Metafunction that determines whether the given type is a pointer
+/// type.
+template <typename T> struct is_pointer : false_type {};
+template <typename T> struct is_pointer<T*> : true_type {};
+template <typename T> struct is_pointer<T* const> : true_type {};
+template <typename T> struct is_pointer<T* volatile> : true_type {};
+template <typename T> struct is_pointer<T* const volatile> : true_type {};
+
+/// \brief Metafunction that determines whether the given type is either an
+/// integral type or an enumeration type.
+///
+/// Note that this accepts potentially more integral types than we whitelist
+/// above for is_integral because it is based on merely being convertible
+/// implicitly to an integral type.
+template <typename T> class is_integral_or_enum {
+ // Provide an overload which can be called with anything implicitly
+ // convertible to an unsigned long long. This should catch integer types and
+ // enumeration types at least. We blacklist classes with conversion operators
+ // below.
+ static double check_int_convertible(unsigned long long);
+ static char check_int_convertible(...);
+
+ typedef typename remove_reference<T>::type UnderlyingT;
+ static UnderlyingT &nonce_instance;
+
+public:
+ enum {
+ value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
+ !is_same<UnderlyingT, float>::value &&
+ !is_same<UnderlyingT, double>::value &&
+ sizeof(char) != sizeof(check_int_convertible(nonce_instance)))
+ };
};
-
+
// enable_if_c - Enable/disable a template based on a metafunction
template<bool Cond, typename T = void>
struct enable_if_c {
diff --git a/contrib/llvm/include/llvm/TableGen/Record.h b/contrib/llvm/include/llvm/TableGen/Record.h
index afce760..5e68c10 100644
--- a/contrib/llvm/include/llvm/TableGen/Record.h
+++ b/contrib/llvm/include/llvm/TableGen/Record.h
@@ -20,6 +20,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -32,7 +33,6 @@ class BitsRecTy;
class IntRecTy;
class StringRecTy;
class ListRecTy;
-class CodeRecTy;
class DagRecTy;
class RecordRecTy;
@@ -43,7 +43,6 @@ class BitInit;
class BitsInit;
class IntInit;
class StringInit;
-class CodeInit;
class ListInit;
class UnOpInit;
class BinOpInit;
@@ -68,6 +67,7 @@ class RecordKeeper;
class RecTy {
ListRecTy *ListTy;
+ virtual void anchor();
public:
RecTy() : ListTy(0) {}
virtual ~RecTy() {}
@@ -99,7 +99,6 @@ public: // These methods should only be called from subclasses of Init
virtual Init *convertValue( TernOpInit *UI) {
return convertValue((TypedInit*)UI);
}
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -119,7 +118,6 @@ public: // These methods should only be called by subclasses of RecTy.
virtual bool baseClassOf(const IntRecTy *RHS) const { return false; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
};
@@ -144,7 +142,6 @@ public:
virtual Init *convertValue( IntInit *II);
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return (Init*)VB; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -165,7 +162,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return true; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
@@ -189,7 +185,6 @@ public:
virtual Init *convertValue( IntInit *II);
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -212,7 +207,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return true; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
@@ -233,7 +227,6 @@ public:
virtual Init *convertValue( IntInit *II) { return (Init*)II; }
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -255,7 +248,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return true; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
@@ -279,7 +271,6 @@ public:
virtual Init *convertValue( BinOpInit *BO);
virtual Init *convertValue( TernOpInit *BO) { return RecTy::convertValue(BO);}
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -298,7 +289,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return false; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return true; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
};
@@ -322,7 +312,6 @@ public:
virtual Init *convertValue( IntInit *II) { return 0; }
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI);
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( DagInit *DI) { return 0; }
@@ -346,47 +335,6 @@ public:
virtual bool baseClassOf(const ListRecTy *RHS) const {
return RHS->getElementType()->typeIsConvertibleTo(Ty);
}
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
-};
-
-/// CodeRecTy - 'code' - Represent an code fragment, function or method.
-///
-class CodeRecTy : public RecTy {
- static CodeRecTy Shared;
- CodeRecTy() {}
-public:
- static CodeRecTy *get() { return &Shared; }
-
- virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
- virtual Init *convertValue( BitInit *BI) { return 0; }
- virtual Init *convertValue( BitsInit *BI) { return 0; }
- virtual Init *convertValue( IntInit *II) { return 0; }
- virtual Init *convertValue(StringInit *SI) { return 0; }
- virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return (Init*)CI; }
- virtual Init *convertValue(VarBitInit *VB) { return 0; }
- virtual Init *convertValue( DefInit *DI) { return 0; }
- virtual Init *convertValue( DagInit *DI) { return 0; }
- virtual Init *convertValue( UnOpInit *UI) { return RecTy::convertValue(UI);}
- virtual Init *convertValue( BinOpInit *UI) { return RecTy::convertValue(UI);}
- virtual Init *convertValue( TernOpInit *UI) { return RecTy::convertValue(UI);}
- virtual Init *convertValue( TypedInit *TI);
- virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
- virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
-
- std::string getAsString() const { return "code"; }
-
- bool typeIsConvertibleTo(const RecTy *RHS) const {
- return RHS->baseClassOf(this);
- }
- virtual bool baseClassOf(const BitRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const BitsRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const IntRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return true; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
};
@@ -405,7 +353,6 @@ public:
virtual Init *convertValue( IntInit *II) { return 0; }
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( DefInit *DI) { return 0; }
virtual Init *convertValue( UnOpInit *BO);
@@ -427,7 +374,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return false; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return true; }
virtual bool baseClassOf(const RecordRecTy *RHS) const { return false; }
};
@@ -451,7 +397,6 @@ public:
virtual Init *convertValue( IntInit *II) { return 0; }
virtual Init *convertValue(StringInit *SI) { return 0; }
virtual Init *convertValue( ListInit *LI) { return 0; }
- virtual Init *convertValue( CodeInit *CI) { return 0; }
virtual Init *convertValue(VarBitInit *VB) { return 0; }
virtual Init *convertValue( UnOpInit *UI) { return RecTy::convertValue(UI);}
virtual Init *convertValue( BinOpInit *UI) { return RecTy::convertValue(UI);}
@@ -472,7 +417,6 @@ public:
virtual bool baseClassOf(const IntRecTy *RHS) const { return false; }
virtual bool baseClassOf(const StringRecTy *RHS) const { return false; }
virtual bool baseClassOf(const ListRecTy *RHS) const { return false; }
- virtual bool baseClassOf(const CodeRecTy *RHS) const { return false; }
virtual bool baseClassOf(const DagRecTy *RHS) const { return false; }
virtual bool baseClassOf(const RecordRecTy *RHS) const;
};
@@ -489,6 +433,7 @@ RecTy *resolveTypes(RecTy *T1, RecTy *T2);
class Init {
Init(const Init &); // Do not define.
Init &operator=(const Init &); // Do not define.
+ virtual void anchor();
protected:
Init(void) {}
@@ -617,6 +562,7 @@ class UnsetInit : public Init {
UnsetInit() : Init() {}
UnsetInit(const UnsetInit &); // Do not define.
UnsetInit &operator=(const UnsetInit &Other); // Do not define.
+ virtual void anchor();
public:
static UnsetInit *get();
@@ -638,6 +584,7 @@ class BitInit : public Init {
explicit BitInit(bool V) : Value(V) {}
BitInit(const BitInit &Other); // Do not define.
BitInit &operator=(BitInit &Other); // Do not define.
+ virtual void anchor();
public:
static BitInit *get(bool V);
@@ -725,8 +672,7 @@ public:
///
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const {
- assert(0 && "Illegal bit reference off int");
- return 0;
+ llvm_unreachable("Illegal bit reference off int");
}
/// resolveListElementReference - This method is used to implement
@@ -734,8 +680,7 @@ public:
/// now, we return the resolved value, otherwise we return null.
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
- assert(0 && "Illegal element reference off int");
- return 0;
+ llvm_unreachable("Illegal element reference off int");
}
};
@@ -750,9 +695,10 @@ class StringInit : public TypedInit {
StringInit(const StringInit &Other); // Do not define.
StringInit &operator=(const StringInit &Other); // Do not define.
+ virtual void anchor();
public:
- static StringInit *get(const std::string &V);
+ static StringInit *get(StringRef);
const std::string &getValue() const { return Value; }
@@ -769,8 +715,7 @@ public:
///
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const {
- assert(0 && "Illegal bit reference off string");
- return 0;
+ llvm_unreachable("Illegal bit reference off string");
}
/// resolveListElementReference - This method is used to implement
@@ -778,31 +723,8 @@ public:
/// now, we return the resolved value, otherwise we return null.
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
- assert(0 && "Illegal element reference off string");
- return 0;
- }
-};
-
-/// CodeInit - "[{...}]" - Represent a code fragment.
-///
-class CodeInit : public Init {
- std::string Value;
-
- explicit CodeInit(const std::string &V) : Value(V) {}
-
- CodeInit(const CodeInit &Other); // Do not define.
- CodeInit &operator=(const CodeInit &Other); // Do not define.
-
-public:
- static CodeInit *get(const std::string &V);
-
- const std::string &getValue() const { return Value; }
-
- virtual Init *convertInitializerTo(RecTy *Ty) const {
- return Ty->convertValue(const_cast<CodeInit *>(this));
+ llvm_unreachable("Illegal element reference off string");
}
-
- virtual std::string getAsString() const { return "[{" + Value + "}]"; }
};
/// ListInit - [AL, AH, CL] - Represent a list of defs
@@ -861,8 +783,7 @@ public:
///
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const {
- assert(0 && "Illegal bit reference off list");
- return 0;
+ llvm_unreachable("Illegal bit reference off list");
}
/// resolveListElementReference - This method is used to implement
@@ -1058,9 +979,11 @@ public:
/// VarInit - 'Opcode' - Represent a reference to an entire variable object.
///
class VarInit : public TypedInit {
- std::string VarName;
+ Init *VarName;
explicit VarInit(const std::string &VN, RecTy *T)
+ : TypedInit(T), VarName(StringInit::get(VN)) {}
+ explicit VarInit(Init *VN, RecTy *T)
: TypedInit(T), VarName(VN) {}
VarInit(const VarInit &Other); // Do not define.
@@ -1074,7 +997,11 @@ public:
return Ty->convertValue(const_cast<VarInit *>(this));
}
- const std::string &getName() const { return VarName; }
+ const std::string &getName() const;
+ Init *getNameInit() const { return VarName; }
+ std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const;
@@ -1092,7 +1019,7 @@ public:
///
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
- virtual std::string getAsString() const { return VarName; }
+ virtual std::string getAsString() const { return getName(); }
};
@@ -1201,8 +1128,7 @@ public:
///
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const {
- assert(0 && "Illegal bit reference off def");
- return 0;
+ llvm_unreachable("Illegal bit reference off def");
}
/// resolveListElementReference - This method is used to implement
@@ -1210,8 +1136,7 @@ public:
/// now, we return the resolved value, otherwise we return null.
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
- assert(0 && "Illegal element reference off def");
- return 0;
+ llvm_unreachable("Illegal element reference off def");
}
};
@@ -1320,14 +1245,12 @@ public:
virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) const {
- assert(0 && "Illegal bit reference off dag");
- return 0;
+ llvm_unreachable("Illegal bit reference off dag");
}
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const {
- assert(0 && "Illegal element reference off dag");
- return 0;
+ llvm_unreachable("Illegal element reference off dag");
}
};
@@ -1345,6 +1268,10 @@ public:
RecordVal(const std::string &N, RecTy *T, unsigned P);
const std::string &getName() const;
+ const Init *getNameInit() const { return Name; }
+ std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
unsigned getPrefix() const { return Prefix; }
RecTy *getType() const { return Ty; }
@@ -1375,7 +1302,7 @@ class Record {
unsigned ID;
Init *Name;
SMLoc Loc;
- std::vector<std::string> TemplateArgs;
+ std::vector<Init *> TemplateArgs;
std::vector<RecordVal> Values;
std::vector<Record*> SuperClasses;
@@ -1384,13 +1311,21 @@ class Record {
DefInit *TheInit;
+ void init();
void checkName();
public:
// Constructs a record.
explicit Record(const std::string &N, SMLoc loc, RecordKeeper &records) :
- ID(LastID++), Name(StringInit::get(N)), Loc(loc), TrackedRecords(records), TheInit(0) {}
+ ID(LastID++), Name(StringInit::get(N)), Loc(loc), TrackedRecords(records),
+ TheInit(0) {
+ init();
+ }
+ explicit Record(Init *N, SMLoc loc, RecordKeeper &records) :
+ ID(LastID++), Name(N), Loc(loc), TrackedRecords(records), TheInit(0) {
+ init();
+ }
~Record() {}
@@ -1400,6 +1335,13 @@ public:
unsigned getID() const { return ID; }
const std::string &getName() const;
+ Init *getNameInit() const {
+ return Name;
+ }
+ const std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
+
void setName(Init *Name); // Also updates RecordKeeper.
void setName(const std::string &Name); // Also updates RecordKeeper.
@@ -1408,46 +1350,69 @@ public:
/// get the corresponding DefInit.
DefInit *getDefInit();
- const std::vector<std::string> &getTemplateArgs() const {
+ const std::vector<Init *> &getTemplateArgs() const {
return TemplateArgs;
}
const std::vector<RecordVal> &getValues() const { return Values; }
const std::vector<Record*> &getSuperClasses() const { return SuperClasses; }
- bool isTemplateArg(StringRef Name) const {
+ bool isTemplateArg(Init *Name) const {
for (unsigned i = 0, e = TemplateArgs.size(); i != e; ++i)
if (TemplateArgs[i] == Name) return true;
return false;
}
+ bool isTemplateArg(StringRef Name) const {
+ return isTemplateArg(StringInit::get(Name.str()));
+ }
- const RecordVal *getValue(StringRef Name) const {
+ const RecordVal *getValue(const Init *Name) const {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
- if (Values[i].getName() == Name) return &Values[i];
+ if (Values[i].getNameInit() == Name) return &Values[i];
return 0;
}
- RecordVal *getValue(StringRef Name) {
+ const RecordVal *getValue(StringRef Name) const {
+ return getValue(StringInit::get(Name));
+ }
+ RecordVal *getValue(const Init *Name) {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
- if (Values[i].getName() == Name) return &Values[i];
+ if (Values[i].getNameInit() == Name) return &Values[i];
return 0;
}
+ RecordVal *getValue(StringRef Name) {
+ return getValue(StringInit::get(Name));
+ }
- void addTemplateArg(StringRef Name) {
+ void addTemplateArg(Init *Name) {
assert(!isTemplateArg(Name) && "Template arg already defined!");
TemplateArgs.push_back(Name);
}
+ void addTemplateArg(StringRef Name) {
+ addTemplateArg(StringInit::get(Name.str()));
+ }
void addValue(const RecordVal &RV) {
- assert(getValue(RV.getName()) == 0 && "Value already added!");
+ assert(getValue(RV.getNameInit()) == 0 && "Value already added!");
Values.push_back(RV);
+ if (Values.size() > 1)
+ // Keep NAME at the end of the list. It makes record dumps a
+ // bit prettier and allows TableGen tests to be written more
+ // naturally. Tests can use CHECK-NEXT to look for Record
+ // fields they expect to see after a def. They can't do that if
+ // NAME is the first Record field.
+ std::swap(Values[Values.size() - 2], Values[Values.size() - 1]);
}
- void removeValue(StringRef Name) {
+ void removeValue(Init *Name) {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
- if (Values[i].getName() == Name) {
+ if (Values[i].getNameInit() == Name) {
Values.erase(Values.begin()+i);
return;
}
- assert(0 && "Cannot remove an entry that does not exist!");
+ llvm_unreachable("Cannot remove an entry that does not exist!");
+ }
+
+ void removeValue(StringRef Name) {
+ removeValue(StringInit::get(Name.str()));
}
bool isSubClassOf(const Record *R) const {
@@ -1459,7 +1424,7 @@ public:
bool isSubClassOf(StringRef Name) const {
for (unsigned i = 0, e = SuperClasses.size(); i != e; ++i)
- if (SuperClasses[i]->getName() == Name)
+ if (SuperClasses[i]->getNameInitAsString() == Name)
return true;
return false;
}
@@ -1553,12 +1518,6 @@ public:
/// the value is not the right type.
///
DagInit *getValueAsDag(StringRef FieldName) const;
-
- /// getValueAsCode - This method looks up the specified field and returns
- /// its value as the string data in a CodeInit, throwing an exception if the
- /// field does not exist or if the value is not a code object.
- ///
- std::string getValueAsCode(StringRef FieldName) const;
};
raw_ostream &operator<<(raw_ostream &OS, const Record &R);
@@ -1576,6 +1535,7 @@ struct MultiClass {
class RecordKeeper {
std::map<std::string, Record*> Classes, Defs;
+
public:
~RecordKeeper() {
for (std::map<std::string, Record*>::iterator I = Classes.begin(),
@@ -1598,12 +1558,12 @@ public:
return I == Defs.end() ? 0 : I->second;
}
void addClass(Record *R) {
- assert(getClass(R->getName()) == 0 && "Class already exists!");
- Classes.insert(std::make_pair(R->getName(), R));
+ assert(getClass(R->getNameInitAsString()) == 0 && "Class already exists!");
+ Classes.insert(std::make_pair(R->getNameInitAsString(), R));
}
void addDef(Record *R) {
- assert(getDef(R->getName()) == 0 && "Def already exists!");
- Defs.insert(std::make_pair(R->getName(), R));
+ assert(getDef(R->getNameInitAsString()) == 0 && "Def already exists!");
+ Defs.insert(std::make_pair(R->getNameInitAsString(), R));
}
/// removeClass - Remove, but do not delete, the specified record.
@@ -1650,6 +1610,16 @@ struct LessRecordFieldName {
raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ Init *Name, const std::string &Scoper);
+
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ const std::string &Name, const std::string &Scoper);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/TableGen/TableGenAction.h b/contrib/llvm/include/llvm/TableGen/TableGenAction.h
index 9f1c23c..733ae62 100644
--- a/contrib/llvm/include/llvm/TableGen/TableGenAction.h
+++ b/contrib/llvm/include/llvm/TableGen/TableGenAction.h
@@ -21,6 +21,7 @@ class raw_ostream;
class RecordKeeper;
class TableGenAction {
+ virtual void anchor();
public:
virtual ~TableGenAction() {}
diff --git a/contrib/llvm/include/llvm/TableGen/TableGenBackend.h b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
index 853f92e..3ebcd92 100644
--- a/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
+++ b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
@@ -16,7 +16,6 @@
#define LLVM_TABLEGEN_TABLEGENBACKEND_H
#include "llvm/Support/raw_ostream.h"
-#include <string>
namespace llvm {
@@ -24,6 +23,7 @@ class Record;
class RecordKeeper;
struct TableGenBackend {
+ virtual void anchor();
virtual ~TableGenBackend() {}
// run - All TableGen backends should implement the run method, which should
@@ -34,7 +34,7 @@ struct TableGenBackend {
public: // Useful helper routines...
/// EmitSourceFileHeader - Output a LLVM style file header to the specified
/// ostream.
- void EmitSourceFileHeader(const std::string &Desc, raw_ostream &OS) const;
+ void EmitSourceFileHeader(StringRef Desc, raw_ostream &OS) const;
};
diff --git a/contrib/llvm/include/llvm/Target/Mangler.h b/contrib/llvm/include/llvm/Target/Mangler.h
index c1c118b..d5e165e 100644
--- a/contrib/llvm/include/llvm/Target/Mangler.h
+++ b/contrib/llvm/include/llvm/Target/Mangler.h
@@ -17,11 +17,9 @@
#include "llvm/ADT/DenseMap.h"
namespace llvm {
-class StringRef;
class Twine;
-class Value;
class GlobalValue;
-template <typename T> class SmallVectorImpl;
+template <typename T> class SmallVectorImpl;
class MCContext;
class MCSymbol;
class TargetData;
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index aa9a4f5..fa1ec55 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -22,8 +22,12 @@ include "llvm/Intrinsics.td"
class RegisterClass; // Forward def
// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
-class SubRegIndex {
+class SubRegIndex<list<SubRegIndex> comps = []> {
string Namespace = "";
+
+ // ComposedOf - A list of two SubRegIndex instances, [A, B].
+ // This indicates that this SubRegIndex is the result of composing A and B.
+ list<SubRegIndex> ComposedOf = comps;
}
// RegAltNameIndex - The alternate name set to use for register operands of
@@ -83,9 +87,15 @@ class Register<string n, list<string> altNames = []> {
// CostPerUse - Additional cost of instructions using this register compared
// to other registers in its class. The register allocator will try to
// minimize the number of instructions using a register with a CostPerUse.
- // This is used by the x86-64 and ARM Thumb targets where some registers
+ // This is used by the x86-64 and ARM Thumb targets where some registers
// require larger instruction encodings.
int CostPerUse = 0;
+
+ // CoveredBySubRegs - When this bit is set, the value of this register is
+ // completely determined by the value of its sub-registers. For example, the
+ // x86 register AX is covered by its sub-registers AL and AH, but EAX is not
+ // covered by its sub-register AX.
+ bit CoveredBySubRegs = 0;
}
// RegisterWithSubRegs - This can be used to define instances of Register which
@@ -194,12 +204,15 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
//
// (decimate GPR, 2) - Pick every N'th element, starting with the first.
//
+// (interleave A, B, ...) - Interleave the elements from each argument list.
+//
// All of these operators work on ordered sets, not lists. That means
// duplicates are removed from sub-expressions.
// Set operators. The rest is defined in TargetSelectionDAG.td.
def sequence;
def decimate;
+def interleave;
// RegisterTuples - Automatically generate super-registers by forming tuples of
// sub-registers. This is useful for modeling register sequence constraints
@@ -356,6 +369,15 @@ class Instruction {
// associated with them. Once we've migrated all of them over to true
// pseudo-instructions that are lowered to real instructions prior to
// the printer/emitter, we can remove this attribute and just use isPseudo.
+ //
+ // The intended use is:
+ // isPseudo: Does not have encoding information and should be expanded,
+ // at the latest, during lowering to MCInst.
+ //
+ // isCodeGenOnly: Does have encoding information and can go through to the
+ // CodeEmitter unchanged, but duplicates a canonical instruction
+ // definition's encoding and should be ignored when constructing the
+ // assembler match tables.
bit isCodeGenOnly = 0;
// Is this instruction a pseudo instruction for use by the assembler parser.
@@ -414,7 +436,7 @@ class Predicate<string cond> {
/// NoHonorSignDependentRounding - This predicate is true if support for
/// sign-dependent-rounding is not enabled.
def NoHonorSignDependentRounding
- : Predicate<"!HonorSignDependentRoundingFPMath()">;
+ : Predicate<"!TM.Options.HonorSignDependentRoundingFPMath()">;
class Requires<list<Predicate> preds> {
list<Predicate> Predicates = preds;
@@ -679,6 +701,11 @@ def COPY : Instruction {
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
+def BUNDLE : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "BUNDLE";
+}
}
//===----------------------------------------------------------------------===//
@@ -698,7 +725,15 @@ class AsmParser {
// function of the AsmParser class to call on every matched instruction.
// This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
+}
+def DefaultAsmParser : AsmParser;
+//===----------------------------------------------------------------------===//
+// AsmParserVariant - Subtargets can have multiple different assembly parsers
+// (e.g. AT&T vs Intel syntax on X86 for example). This class can be
+// implemented by targets to describe such variants.
+//
+class AsmParserVariant {
// Variant - AsmParsers can be of multiple different variants. Variants are
// used to support targets that need to parser multiple formats for the
// assembly language.
@@ -715,7 +750,7 @@ class AsmParser {
// purposes of matching.
string RegisterPrefix = "";
}
-def DefaultAsmParser : AsmParser;
+def DefaultAsmParserVariant : AsmParserVariant;
/// AssemblerPredicate - This is a Predicate that can be used when the assembler
/// matches instructions and aliases.
@@ -724,7 +759,20 @@ class AssemblerPredicate<string cond> {
string AssemblerCondString = cond;
}
-
+/// TokenAlias - This class allows targets to define assembler token
+/// operand aliases. That is, a token literal operand which is equivalent
+/// to another, canonical, token literal. For example, ARM allows:
+/// vmov.u32 s4, #0 -> vmov.i32, #0
+/// 'u32' is a more specific designator for the 32-bit integer type specifier
+/// and is legal for any instruction which accepts 'i32' as a datatype suffix.
+/// def : TokenAlias<".u32", ".i32">;
+///
+/// This works by marking the match class of 'From' as a subclass of the
+/// match class of 'To'.
+class TokenAlias<string From, string To> {
+ string FromToken = From;
+ string ToToken = To;
+}
/// MnemonicAlias - This class allows targets to define assembler mnemonic
/// aliases. This should be used when all forms of one mnemonic are accepted
@@ -813,6 +861,10 @@ class Target {
// AssemblyParsers - The AsmParser instances available for this target.
list<AsmParser> AssemblyParsers = [DefaultAsmParser];
+ /// AssemblyParserVariants - The AsmParserVariant instances available for
+ /// this target.
+ list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant];
+
// AssemblyWriters - The AsmWriter instances available for this target.
list<AsmWriter> AssemblyWriters = [DefaultAsmWriter];
}
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
index 275957e..a6251e7 100644
--- a/contrib/llvm/include/llvm/Target/TargetCallingConv.h
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -14,6 +14,10 @@
#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
#define LLVM_TARGET_TARGETCALLINGCONV_H
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include <string>
+
namespace llvm {
namespace ISD {
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.td b/contrib/llvm/include/llvm/Target/TargetCallingConv.td
index 6da3ba1..a53ed29 100644
--- a/contrib/llvm/include/llvm/Target/TargetCallingConv.td
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.td
@@ -133,3 +133,14 @@ class CCDelegateTo<CallingConv cc> : CCAction {
class CallingConv<list<CCAction> actions> {
list<CCAction> Actions = actions;
}
+
+/// CalleeSavedRegs - A list of callee saved registers for a given calling
+/// convention. The order of registers is used by PrologEpilogInsertion when
+/// allocation stack slots for saved registers.
+///
+/// For each CalleeSavedRegs def, TableGen will emit a FOO_SaveList array for
+/// returning from getCalleeSavedRegs(), and a FOO_RegMask bit mask suitable for
+/// returning from getCallPreservedMask().
+class CalleeSavedRegs<dag saves> {
+ dag SaveList = saves;
+}
diff --git a/contrib/llvm/include/llvm/Target/TargetData.h b/contrib/llvm/include/llvm/Target/TargetData.h
index 26fd187..d116f39 100644
--- a/contrib/llvm/include/llvm/Target/TargetData.h
+++ b/contrib/llvm/include/llvm/Target/TargetData.h
@@ -44,7 +44,7 @@ enum AlignTypeEnum {
AGGREGATE_ALIGN = 'a', ///< Aggregate alignment
STACK_ALIGN = 's' ///< Stack objects alignment
};
-
+
/// Target alignment element.
///
/// Stores the alignment data associated with a given alignment type (pointer,
@@ -80,7 +80,7 @@ private:
unsigned StackNaturalAlign; ///< Stack natural alignment
SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
-
+
/// Alignments- Where the primitive type alignment data is stored.
///
/// @sa init().
@@ -88,7 +88,7 @@ private:
/// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
/// we don't.
SmallVector<TargetAlignElem, 16> Alignments;
-
+
/// InvalidAlignmentElem - This member is a signal that a requested alignment
/// type and bit width were not found in the SmallVector.
static const TargetAlignElem InvalidAlignmentElem;
@@ -112,19 +112,30 @@ private:
return &align != &InvalidAlignmentElem;
}
+ /// Initialise a TargetData object with default values, ensure that the
+ /// target data pass is registered.
+ void init();
+
public:
/// Default ctor.
///
/// @note This has to exist, because this is a pass, but it should never be
/// used.
TargetData();
-
+
/// Constructs a TargetData from a specification string. See init().
explicit TargetData(StringRef TargetDescription)
: ImmutablePass(ID) {
- init(TargetDescription);
+ std::string errMsg = parseSpecifier(TargetDescription, this);
+ assert(errMsg == "" && "Invalid target data layout string.");
+ (void)errMsg;
}
+ /// Parses a target data specification string. Returns an error message
+ /// if the string is malformed, or the empty string on success. Optionally
+ /// initialises a TargetData object if passed a non-null pointer.
+ static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0);
+
/// Initialize target data from properties stored in the module.
explicit TargetData(const Module *M);
@@ -141,9 +152,6 @@ public:
~TargetData(); // Not virtual, do not subclass this class
- //! Parse a target data layout string and initialize TargetData alignments.
- void init(StringRef TargetDescription);
-
/// Target endianness...
bool isLittleEndian() const { return LittleEndian; }
bool isBigEndian() const { return !LittleEndian; }
@@ -152,7 +160,7 @@ public:
/// TargetData. This representation is in the same format accepted by the
/// string constructor above.
std::string getStringRepresentation() const;
-
+
/// isLegalInteger - This function returns true if the specified type is
/// known to be a native integer type supported by the CPU. For example,
/// i64 is not native on most 32-bit CPUs and i37 is not native on any known
@@ -166,7 +174,7 @@ public:
return true;
return false;
}
-
+
bool isIllegalInteger(unsigned Width) const {
return !isLegalInteger(Width);
}
@@ -251,11 +259,11 @@ public:
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
/// specified type.
unsigned getABITypeAlignment(Type *Ty) const;
-
+
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
-
+
/// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
/// for the specified type when it is part of a call frame.
@@ -305,7 +313,7 @@ public:
assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
}
-
+
static char ID; // Pass identification, replacement for typeid
};
diff --git a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
index b97f3e2..114295e 100644
--- a/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetELFWriterInfo.h
@@ -15,9 +15,6 @@
#define LLVM_TARGET_TARGETELFWRITERINFO_H
namespace llvm {
- class Function;
- class TargetData;
- class TargetMachine;
//===--------------------------------------------------------------------===//
// TargetELFWriterInfo
diff --git a/contrib/llvm/include/llvm/Target/TargetFrameLowering.h b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
index 4c759b2..d56db7b 100644
--- a/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
@@ -15,8 +15,6 @@
#define LLVM_TARGET_TARGETFRAMELOWERING_H
#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/MC/MCDwarf.h"
-#include "llvm/ADT/ArrayRef.h"
#include <utility>
#include <vector>
@@ -24,8 +22,6 @@
namespace llvm {
class CalleeSavedInfo;
class MachineFunction;
- class MachineBasicBlock;
- class MachineMove;
class RegScavenger;
/// Information about stack frame layout on the target. It holds the direction
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
index 07f614d..d1e380c 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -15,6 +15,7 @@
#define LLVM_TARGET_TARGETINSTRINFO_H
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineFunction.h"
namespace llvm {
@@ -278,8 +279,7 @@ public:
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
- assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
- return 0;
+ llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
}
/// InsertBranch - Insert branch code into the end of the specified
@@ -296,8 +296,7 @@ public:
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const {
- assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
- return 0;
+ llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
@@ -353,12 +352,28 @@ public:
return false;
}
+ /// isProfitableToUnpredicate - Return true if it's profitable to unpredicate
+ /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
+ /// exclusive predicates.
+ /// e.g.
+ /// subeq r0, r1, #1
+ /// addne r0, r1, #1
+ /// =>
+ /// sub r0, r1, #1
+ /// addne r0, r1, #1
+ ///
+ /// This may be profitable is conditional instructions are always executed.
+ virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const {
+ return false;
+ }
+
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
- assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
+ llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
}
/// storeRegToStackSlot - Store the specified register of the given register
@@ -371,7 +386,8 @@ public:
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::storeRegToStackSlot!");
}
/// loadRegFromStackSlot - Load the specified register of the given register
@@ -383,7 +399,8 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::loadRegFromStackSlot!");
}
/// expandPostRAPseudo - This function is called for all pseudo instructions
@@ -535,7 +552,7 @@ public:
/// isUnpredicatedTerminator - Returns true if the instruction is a
/// terminator instruction that has not been predicated.
- virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
+ virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const = 0;
/// PredicateInstruction - Convert the instruction into a predicated
/// instruction. It returns true if the operation was successful.
@@ -646,7 +663,16 @@ public:
virtual int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
- SDNode *UseNode, unsigned UseIdx) const;
+ SDNode *UseNode, unsigned UseIdx) const = 0;
+
+ /// getOutputLatency - Compute and return the output dependency latency of a
+ /// a given pair of defs which both target the same register. This is usually
+ /// one.
+ virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *DepMI) const {
+ return 1;
+ }
/// getInstrLatency - Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
@@ -656,7 +682,7 @@ public:
unsigned *PredCost = 0) const;
virtual int getInstrLatency(const InstrItineraryData *ItinData,
- SDNode *Node) const;
+ SDNode *Node) const = 0;
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
@@ -718,6 +744,80 @@ public:
///
virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
+
+ /// getPartialRegUpdateClearance - Returns the preferred minimum clearance
+ /// before an instruction with an unwanted partial register update.
+ ///
+ /// Some instructions only write part of a register, and implicitly need to
+ /// read the other parts of the register. This may cause unwanted stalls
+ /// preventing otherwise unrelated instructions from executing in parallel in
+ /// an out-of-order CPU.
+ ///
+ /// For example, the x86 instruction cvtsi2ss writes its result to bits
+ /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
+ /// the instruction needs to wait for the old value of the register to become
+ /// available:
+ ///
+ /// addps %xmm1, %xmm0
+ /// movaps %xmm0, (%rax)
+ /// cvtsi2ss %rbx, %xmm0
+ ///
+ /// In the code above, the cvtsi2ss instruction needs to wait for the addps
+ /// instruction before it can issue, even though the high bits of %xmm0
+ /// probably aren't needed.
+ ///
+ /// This hook returns the preferred clearance before MI, measured in
+ /// instructions. Other defs of MI's operand OpNum are avoided in the last N
+ /// instructions before MI. It should only return a positive value for
+ /// unwanted dependencies. If the old bits of the defined register have
+ /// useful values, or if MI is determined to otherwise read the dependency,
+ /// the hook should return 0.
+ ///
+ /// The unwanted dependency may be handled by:
+ ///
+ /// 1. Allocating the same register for an MI def and use. That makes the
+ /// unwanted dependency identical to a required dependency.
+ ///
+ /// 2. Allocating a register for the def that has no defs in the previous N
+ /// instructions.
+ ///
+ /// 3. Calling breakPartialRegDependency() with the same arguments. This
+ /// allows the target to insert a dependency breaking instruction.
+ ///
+ virtual unsigned
+ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ // The default implementation returns 0 for no partial register dependency.
+ return 0;
+ }
+
+ /// breakPartialRegDependency - Insert a dependency-breaking instruction
+ /// before MI to eliminate an unwanted dependency on OpNum.
+ ///
+ /// If it wasn't possible to avoid a def in the last N instructions before MI
+ /// (see getPartialRegUpdateClearance), this hook will be called to break the
+ /// unwanted dependency.
+ ///
+ /// On x86, an xorps instruction can be used as a dependency breaker:
+ ///
+ /// addps %xmm1, %xmm0
+ /// movaps %xmm0, (%rax)
+ /// xorps %xmm0, %xmm0
+ /// cvtsi2ss %rbx, %xmm0
+ ///
+ /// An <imp-kill> operand should be added to MI if an instruction was
+ /// inserted. This ties the instructions together in the post-ra scheduler.
+ ///
+ virtual void
+ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {}
+
+ /// Create machine specific model for scheduling.
+ virtual DFAPacketizer*
+ CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const {
+ return NULL;
+ }
+
private:
int CallFrameSetupOpcode, CallFrameDestroyOpcode;
};
@@ -746,6 +846,7 @@ public:
virtual bool hasStoreToStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
+ virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
virtual bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual void reMaterialize(MachineBasicBlock &MBB,
@@ -761,6 +862,13 @@ public:
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
+ using TargetInstrInfo::getOperandLatency;
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+ using TargetInstrInfo::getInstrLatency;
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
bool usePreRAHazardRecognizer() const;
diff --git a/contrib/llvm/include/llvm/Target/TargetJITInfo.h b/contrib/llvm/include/llvm/Target/TargetJITInfo.h
index b198eb6..044afd9 100644
--- a/contrib/llvm/include/llvm/Target/TargetJITInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetJITInfo.h
@@ -17,9 +17,9 @@
#ifndef LLVM_TARGET_TARGETJITINFO_H
#define LLVM_TARGET_TARGETJITINFO_H
-#include <cassert>
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DataTypes.h"
+#include <cassert>
namespace llvm {
class Function;
@@ -30,6 +30,7 @@ namespace llvm {
/// TargetJITInfo - Target specific information required by the Just-In-Time
/// code generator.
class TargetJITInfo {
+ virtual void anchor();
public:
virtual ~TargetJITInfo() {}
@@ -45,8 +46,8 @@ namespace llvm {
/// ptr.
virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
JITCodeEmitter &JCE) {
- assert(0 && "This target doesn't implement emitGlobalValueIndirectSym!");
- return 0;
+ llvm_unreachable("This target doesn't implement "
+ "emitGlobalValueIndirectSym!");
}
/// Records the required size and alignment for a call stub in bytes.
@@ -57,8 +58,6 @@ namespace llvm {
/// Returns the maximum size and alignment for a call stub on this target.
virtual StubLayout getStubLayout() {
llvm_unreachable("This target doesn't implement getStubLayout!");
- StubLayout Result = {0, 0};
- return Result;
}
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
@@ -68,15 +67,13 @@ namespace llvm {
/// aligned from the address the JCE was set up to emit at.
virtual void *emitFunctionStub(const Function* F, void *Target,
JITCodeEmitter &JCE) {
- assert(0 && "This target doesn't implement emitFunctionStub!");
- return 0;
+ llvm_unreachable("This target doesn't implement emitFunctionStub!");
}
/// getPICJumpTableEntry - Returns the value of the jumptable entry for the
/// specific basic block.
virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) {
- assert(0 && "This target doesn't implement getPICJumpTableEntry!");
- return 0;
+ llvm_unreachable("This target doesn't implement getPICJumpTableEntry!");
}
/// LazyResolverFn - This typedef is used to represent the function that
@@ -97,8 +94,7 @@ namespace llvm {
/// function, and giving the JIT the target function used to do the lazy
/// resolving.
virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn) {
- assert(0 && "Not implemented for this target!");
- return 0;
+ llvm_unreachable("Not implemented for this target!");
}
/// relocate - Before the JIT can run a block of code that has been emitted,
@@ -114,8 +110,7 @@ namespace llvm {
/// handling thread local variables. This method returns a value only
/// meaningful to the target.
virtual char* allocateThreadLocalMemory(size_t size) {
- assert(0 && "This target does not implement thread local storage!");
- return 0;
+ llvm_unreachable("This target does not implement thread local storage!");
}
/// needsGOT - Allows a target to specify that it would like the
diff --git a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
index 02a1a3c..70e26bf 100644
--- a/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetLibraryInfo.h
@@ -11,33 +11,204 @@
#define LLVM_TARGET_TARGETLIBRARYINFO_H
#include "llvm/Pass.h"
+#include "llvm/ADT/DenseMap.h"
namespace llvm {
class Triple;
namespace LibFunc {
enum Func {
- /// void *memset(void *b, int c, size_t len);
- memset,
-
- // void *memcpy(void *s1, const void *s2, size_t n);
+ /// double acos(double x);
+ acos,
+ /// long double acosl(long double x);
+ acosl,
+ /// float acosf(float x);
+ acosf,
+ /// double asin(double x);
+ asin,
+ /// long double asinl(long double x);
+ asinl,
+ /// float asinf(float x);
+ asinf,
+ /// double atan(double x);
+ atan,
+ /// long double atanl(long double x);
+ atanl,
+ /// float atanf(float x);
+ atanf,
+ /// double atan2(double y, double x);
+ atan2,
+ /// long double atan2l(long double y, long double x);
+ atan2l,
+ /// float atan2f(float y, float x);
+ atan2f,
+ /// double ceil(double x);
+ ceil,
+ /// long double ceill(long double x);
+ ceill,
+ /// float ceilf(float x);
+ ceilf,
+ /// double copysign(double x, double y);
+ copysign,
+ /// float copysignf(float x, float y);
+ copysignf,
+ /// long double copysignl(long double x, long double y);
+ copysignl,
+ /// double cos(double x);
+ cos,
+ /// long double cosl(long double x);
+ cosl,
+ /// float cosf(float x);
+ cosf,
+ /// double cosh(double x);
+ cosh,
+ /// long double coshl(long double x);
+ coshl,
+ /// float coshf(float x);
+ coshf,
+ /// double exp(double x);
+ exp,
+ /// long double expl(long double x);
+ expl,
+ /// float expf(float x);
+ expf,
+ /// double exp2(double x);
+ exp2,
+ /// long double exp2l(long double x);
+ exp2l,
+ /// float exp2f(float x);
+ exp2f,
+ /// double expm1(double x);
+ expm1,
+ /// long double expm1l(long double x);
+ expm1l,
+ /// float expm1f(float x);
+ expl1f,
+ /// double fabs(double x);
+ fabs,
+ /// long double fabsl(long double x);
+ fabsl,
+ /// float fabsf(float x);
+ fabsf,
+ /// double floor(double x);
+ floor,
+ /// long double floorl(long double x);
+ floorl,
+ /// float floorf(float x);
+ floorf,
+ /// int fiprintf(FILE *stream, const char *format, ...);
+ fiprintf,
+ /// double fmod(double x, double y);
+ fmod,
+ /// long double fmodl(long double x, long double y);
+ fmodl,
+ /// float fmodf(float x, float y);
+ fmodf,
+ /// int fputs(const char *s, FILE *stream);
+ fputs,
+ /// size_t fwrite(const void *ptr, size_t size, size_t nitems,
+ /// FILE *stream);
+ fwrite,
+ /// int iprintf(const char *format, ...);
+ iprintf,
+ /// double log(double x);
+ log,
+ /// long double logl(long double x);
+ logl,
+ /// float logf(float x);
+ logf,
+ /// double log2(double x);
+ log2,
+ /// double long double log2l(long double x);
+ log2l,
+ /// float log2f(float x);
+ log2f,
+ /// double log10(double x);
+ log10,
+ /// long double log10l(long double x);
+ log10l,
+ /// float log10f(float x);
+ log10f,
+ /// double log1p(double x);
+ log1p,
+ /// long double log1pl(long double x);
+ log1pl,
+ /// float log1pf(float x);
+ log1pf,
+ /// void *memcpy(void *s1, const void *s2, size_t n);
memcpy,
-
- // void *memmove(void *s1, const void *s2, size_t n);
+ /// void *memmove(void *s1, const void *s2, size_t n);
memmove,
-
+ /// void *memset(void *b, int c, size_t len);
+ memset,
/// void memset_pattern16(void *b, const void *pattern16, size_t len);
memset_pattern16,
-
- /// int iprintf(const char *format, ...);
- iprintf,
-
+ /// double nearbyint(double x);
+ nearbyint,
+ /// float nearbyintf(float x);
+ nearbyintf,
+ /// long double nearbyintl(long double x);
+ nearbyintl,
+ /// double pow(double x, double y);
+ pow,
+ /// float powf(float x, float y);
+ powf,
+ /// long double powl(long double x, long double y);
+ powl,
+ /// double rint(double x);
+ rint,
+ /// float rintf(float x);
+ rintf,
+ /// long dobule rintl(long double x);
+ rintl,
+ /// double sin(double x);
+ sin,
+ /// long double sinl(long double x);
+ sinl,
+ /// float sinf(float x);
+ sinf,
+ /// double sinh(double x);
+ sinh,
+ /// long double sinhl(long double x);
+ sinhl,
+ /// float sinhf(float x);
+ sinhf,
/// int siprintf(char *str, const char *format, ...);
siprintf,
-
- /// int fiprintf(FILE *stream, const char *format, ...);
- fiprintf,
-
+ /// double sqrt(double x);
+ sqrt,
+ /// long double sqrtl(long double x);
+ sqrtl,
+ /// float sqrtf(float x);
+ sqrtf,
+ /// double tan(double x);
+ tan,
+ /// long double tanl(long double x);
+ tanl,
+ /// float tanf(float x);
+ tanf,
+ /// double tanh(double x);
+ tanh,
+ /// long double tanhl(long double x);
+ tanhl,
+ /// float tanhf(float x);
+ tanhf,
+ /// double trunc(double x);
+ trunc,
+ /// float truncf(float x);
+ truncf,
+ /// long double truncl(long double x);
+ truncl,
+ /// int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ cxa_atexit,
+ /// void __cxa_guard_abort(guard_t *guard);
+ /// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
+ cxa_guard_abort,
+ /// int __cxa_guard_acquire(guard_t *guard);
+ cxa_guard_acquire,
+ /// void __cxa_guard_release(guard_t *guard);
+ cxa_guard_release,
+
NumLibFuncs
};
}
@@ -46,7 +217,24 @@ namespace llvm {
/// library functions are available for the current target, and allows a
/// frontend to disable optimizations through -fno-builtin etc.
class TargetLibraryInfo : public ImmutablePass {
- unsigned char AvailableArray[(LibFunc::NumLibFuncs+7)/8];
+ virtual void anchor();
+ unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4];
+ llvm::DenseMap<unsigned, std::string> CustomNames;
+ static const char* StandardNames[LibFunc::NumLibFuncs];
+
+ enum AvailabilityState {
+ StandardName = 3, // (memset to all ones)
+ CustomName = 1,
+ Unavailable = 0 // (memset to all zeros)
+ };
+ void setState(LibFunc::Func F, AvailabilityState State) {
+ AvailableArray[F/4] &= ~(3 << 2*(F&3));
+ AvailableArray[F/4] |= State << 2*(F&3);
+ }
+ AvailabilityState getState(LibFunc::Func F) const {
+ return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
+ }
+
public:
static char ID;
TargetLibraryInfo();
@@ -56,19 +244,39 @@ public:
/// has - This function is used by optimizations that want to match on or form
/// a given library function.
bool has(LibFunc::Func F) const {
- return (AvailableArray[F/8] & (1 << (F&7))) != 0;
+ return getState(F) != Unavailable;
+ }
+
+ StringRef getName(LibFunc::Func F) const {
+ AvailabilityState State = getState(F);
+ if (State == Unavailable)
+ return StringRef();
+ if (State == StandardName)
+ return StandardNames[F];
+ assert(State == CustomName);
+ return CustomNames.find(F)->second;
}
/// setUnavailable - this can be used by whatever sets up TargetLibraryInfo to
/// ban use of specific library functions.
void setUnavailable(LibFunc::Func F) {
- AvailableArray[F/8] &= ~(1 << (F&7));
+ setState(F, Unavailable);
}
void setAvailable(LibFunc::Func F) {
- AvailableArray[F/8] |= 1 << (F&7);
+ setState(F, StandardName);
}
-
+
+ void setAvailableWithName(LibFunc::Func F, StringRef Name) {
+ if (StandardNames[F] != Name) {
+ setState(F, CustomName);
+ CustomNames[F] = Name;
+ assert(CustomNames.find(F) != CustomNames.end());
+ } else {
+ setState(F, StandardName);
+ }
+ }
+
/// disableAllFunctions - This disables all builtins, which is used for
/// options like -fno-builtin.
void disableAllFunctions();
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 013e70a..720c9df 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -25,7 +25,6 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/Support/DebugLoc.h"
@@ -36,41 +35,34 @@
#include <vector>
namespace llvm {
- class AllocaInst;
- class APFloat;
class CallInst;
class CCState;
- class Function;
class FastISel;
class FunctionLoweringInfo;
class ImmutableCallSite;
+ class IntrinsicInst;
class MachineBasicBlock;
class MachineFunction;
- class MachineFrameInfo;
class MachineInstr;
class MachineJumpTableInfo;
class MCContext;
class MCExpr;
- class SDNode;
- class SDValue;
- class SelectionDAG;
template<typename T> class SmallVectorImpl;
class TargetData;
- class TargetMachine;
class TargetRegisterClass;
class TargetLoweringObjectFile;
class Value;
- // FIXME: should this be here?
- namespace TLSModel {
- enum Model {
- GeneralDynamic,
- LocalDynamic,
- InitialExec,
- LocalExec
+ namespace Sched {
+ enum Preference {
+ None, // No preference
+ Source, // Follow source order.
+ RegPressure, // Scheduling for lowest register pressure.
+ Hybrid, // Scheduling for both latency and register pressure.
+ ILP, // Scheduling for ILP in low register pressure mode.
+ VLIW // Scheduling for VLIW targets.
};
}
- TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc);
//===----------------------------------------------------------------------===//
@@ -94,7 +86,7 @@ public:
Custom // Use the LowerOperation hook to implement custom lowering.
};
- /// LegalizeAction - This enum indicates whether a types are legal for a
+ /// LegalizeTypeAction - This enum indicates whether a types are legal for a
/// target, and if not, what action should be used to make them valid.
enum LegalizeTypeAction {
TypeLegal, // The target natively supports this type.
@@ -115,8 +107,6 @@ public:
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
- default:
- assert(false && "Unknown BooleanContent!");
case UndefinedBooleanContent:
// Extend by adding rubbish bits.
return ISD::ANY_EXTEND;
@@ -127,6 +117,7 @@ public:
// Extend by copying the sign bit.
return ISD::SIGN_EXTEND;
}
+ llvm_unreachable("Invalid content kind");
}
/// NOTE: The constructor takes ownership of TLOF.
@@ -199,9 +190,9 @@ public:
/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
- virtual TargetRegisterClass *getRegClassFor(EVT VT) const {
+ virtual const TargetRegisterClass *getRegClassFor(EVT VT) const {
assert(VT.isSimple() && "getRegClassFor called on illegal type!");
- TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
+ const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
assert(RC && "This value type is not natively supported!");
return RC;
}
@@ -292,11 +283,9 @@ public:
VT = getTypeToTransformTo(Context, VT);
break;
default:
- assert(false && "Type is not legal nor is it to be expanded!");
- return VT;
+ llvm_unreachable("Type is not legal nor is it to be expanded!");
}
}
- return VT;
}
/// getVectorTypeBreakdown - Vector types are broken down into some number of
@@ -520,8 +509,19 @@ public:
/// AllowUnknown is true, this will return MVT::Other for types with no EVT
/// counterpart (e.g. structs), otherwise it will assert.
EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
- EVT VT = EVT::getEVT(Ty, AllowUnknown);
- return VT == MVT::iPTR ? PointerTy : VT;
+ // Lower scalar pointers to native pointer types.
+ if (Ty->isPointerTy()) return PointerTy;
+
+ if (Ty->isVectorTy()) {
+ VectorType *VTy = cast<VectorType>(Ty);
+ Type *Elm = VTy->getElementType();
+ // Lower vectors of pointers to native pointer types.
+ if (Elm->isPointerTy())
+ Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext());
+ return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
+ VTy->getNumElements());
+ }
+ return EVT::getEVT(Ty, AllowUnknown);
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@@ -554,8 +554,7 @@ public:
if (VT.isInteger()) {
return getRegisterType(Context, getTypeToTransformTo(Context, VT));
}
- assert(0 && "Unsupported extended type!");
- return EVT(MVT::Other); // Not reached
+ llvm_unreachable("Unsupported extended type!");
}
/// getNumRegisters - Return the number of registers that this ValueType will
@@ -580,8 +579,7 @@ public:
unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
return (BitWidth + RegWidth - 1) / RegWidth;
}
- assert(0 && "Unsupported extended type!");
- return 0; // Not reached
+ llvm_unreachable("Unsupported extended type!");
}
/// ShouldShrinkFPConstant - If true, then instruction selection should
@@ -646,7 +644,7 @@ public:
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If
- /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// 'IsZeroVal' is true, that means it's safe to return a
/// non-scalar-integer type, e.g. empty string source, constant, or loaded
/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
/// constant so it does not need to be loaded.
@@ -654,7 +652,7 @@ public:
/// target-independent logic.
virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
- bool /*NonScalarIntSafe*/,
+ bool /*IsZeroVal*/,
bool /*MemcpyStrSrc*/,
MachineFunction &/*MF*/) const {
return MVT::Other;
@@ -679,10 +677,10 @@ public:
return StackPointerRegisterToSaveRestore;
}
- /// getExceptionAddressRegister - If a physical register, this returns
+ /// getExceptionPointerRegister - If a physical register, this returns
/// the register that receives the exception address on entry to a landing
/// pad.
- unsigned getExceptionAddressRegister() const {
+ unsigned getExceptionPointerRegister() const {
return ExceptionPointerRegister;
}
@@ -772,8 +770,7 @@ public:
LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
MCContext &/*Ctx*/) const {
- assert(0 && "Need to implement this hook if target has custom JTIs");
- return 0;
+ llvm_unreachable("Need to implement this hook if target has custom JTIs");
}
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
@@ -865,7 +862,6 @@ public:
/// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -1035,7 +1031,7 @@ protected:
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
- void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
+ void addRegisterClass(EVT VT, const TargetRegisterClass *RC) {
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
@@ -1141,26 +1137,28 @@ protected:
JumpBufAlignment = Align;
}
- /// setMinFunctionAlignment - Set the target's minimum function alignment.
+ /// setMinFunctionAlignment - Set the target's minimum function alignment (in
+ /// log2(bytes))
void setMinFunctionAlignment(unsigned Align) {
MinFunctionAlignment = Align;
}
/// setPrefFunctionAlignment - Set the target's preferred function alignment.
/// This should be set if there is a performance benefit to
- /// higher-than-minimum alignment
+ /// higher-than-minimum alignment (in log2(bytes))
void setPrefFunctionAlignment(unsigned Align) {
PrefFunctionAlignment = Align;
}
/// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
/// alignment is zero, it means the target does not care about loop alignment.
+ /// The alignment is specified in log2(bytes).
void setPrefLoopAlignment(unsigned Align) {
PrefLoopAlignment = Align;
}
/// setMinStackArgumentAlignment - Set the minimum stack alignment of an
- /// argument.
+ /// argument (in log2(bytes)).
void setMinStackArgumentAlignment(unsigned Align) {
MinStackArgumentAlignment = Align;
}
@@ -1196,8 +1194,7 @@ public:
const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
- assert(0 && "Not Implemented");
- return SDValue(); // this is here to silence compiler errors
+ llvm_unreachable("Not Implemented");
}
/// LowerCallTo - This function lowers an abstract call to a function into an
@@ -1224,7 +1221,8 @@ public:
LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt,
bool isVarArg, bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
- bool isReturnValueUsed, SDValue Callee, ArgListTy &Args,
+ bool doesNotRet, bool isReturnValueUsed,
+ SDValue Callee, ArgListTy &Args,
SelectionDAG &DAG, DebugLoc dl) const;
/// LowerCall - This hook must be implemented to lower calls into the
@@ -1236,14 +1234,13 @@ public:
virtual SDValue
LowerCall(SDValue /*Chain*/, SDValue /*Callee*/,
CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
- bool &/*isTailCall*/,
+ bool /*doesNotRet*/, bool &/*isTailCall*/,
const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
const SmallVectorImpl<SDValue> &/*OutVals*/,
const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
- assert(0 && "Not Implemented");
- return SDValue(); // this is here to silence compiler errors
+ llvm_unreachable("Not Implemented");
}
/// HandleByVal - Target-specific cleanup for formal ByVal parameters.
@@ -1273,14 +1270,15 @@ public:
const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
const SmallVectorImpl<SDValue> &/*OutVals*/,
DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
- assert(0 && "Not Implemented");
- return SDValue(); // this is here to silence compiler errors
+ llvm_unreachable("Not Implemented");
}
/// isUsedByReturnOnly - Return true if result of the specified node is used
- /// by a return node only. This is used to determine whether it is possible
+ /// by a return node only. It also compute and return the input chain for the
+ /// tail call.
+ /// This is used to determine whether it is possible
/// to codegen a libcall as tail call at legalization time.
- virtual bool isUsedByReturnOnly(SDNode *) const {
+ virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const {
return false;
}
@@ -1339,7 +1337,7 @@ public:
virtual void ReplaceNodeResults(SDNode * /*N*/,
SmallVectorImpl<SDValue> &/*Results*/,
SelectionDAG &/*DAG*/) const {
- assert(0 && "ReplaceNodeResults not implemented for this target!");
+ llvm_unreachable("ReplaceNodeResults not implemented for this target!");
}
/// getTargetNodeName() - This method returns the name of a target specific
@@ -1531,6 +1529,17 @@ public:
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
+ /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
+ /// same BB as Load/Store instructions reading the address. This allows as
+ /// much computation as possible to be done in the address mode for that
+ /// operand. This hook lets targets also pass back when this should be done
+ /// on intrinsics which load/store.
+ virtual bool GetAddrModeArguments(IntrinsicInst *I,
+ SmallVectorImpl<Value*> &Ops,
+ Type *&AccessTy) const {
+ return false;
+ }
+
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -1581,6 +1590,18 @@ public:
return false;
}
+ /// isFNegFree - Return true if an fneg operation is free to the point where
+ /// it is never worthwhile to replace it with a bitwise operation.
+ virtual bool isFNegFree(EVT) const {
+ return false;
+ }
+
+ /// isFAbsFree - Return true if an fneg operation is free to the point where
+ /// it is never worthwhile to replace it with a bitwise operation.
+ virtual bool isFAbsFree(EVT) const {
+ return false;
+ }
+
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
@@ -1593,9 +1614,9 @@ public:
//
SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
SelectionDAG &DAG) const;
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
std::vector<SDNode*>* Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
std::vector<SDNode*>* Created) const;
@@ -1753,7 +1774,7 @@ private:
/// RegClassForVT - This indicates the default register class to use for
/// each ValueType the target supports natively.
- TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
+ const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
@@ -1925,12 +1946,9 @@ private:
// Vectors with illegal element types are expanded.
EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
return LegalizeKind(TypeSplitVector, NVT);
-
- assert(false && "Unable to handle this kind of vector type");
- return LegalizeKind(TypeLegal, VT);
}
- std::vector<std::pair<EVT, TargetRegisterClass*> > AvailableRegClasses;
+ std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
/// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
diff --git a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index 7d06cec..d631f58 100644
--- a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -15,18 +15,17 @@
#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
-#include "llvm/ADT/StringRef.h"
+#include "llvm/Module.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/SectionKind.h"
+#include "llvm/ADT/ArrayRef.h"
namespace llvm {
class MachineModuleInfo;
class Mangler;
- class MCAsmInfo;
class MCContext;
class MCExpr;
class MCSection;
- class MCSectionMachO;
class MCSymbol;
class MCStreamer;
class GlobalValue;
@@ -53,7 +52,13 @@ public:
virtual void emitPersonalityValue(MCStreamer &Streamer,
const TargetMachine &TM,
const MCSymbol *Sym) const;
-
+
+ /// emitModuleFlags - Emit the module flags that the platform cares about.
+ virtual void emitModuleFlags(MCStreamer &,
+ ArrayRef<Module::ModuleFlagEntry>,
+ Mangler *, const TargetMachine &) const {
+ }
+
/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively
/// decide not to emit the UsedDirective for some symbols in llvm.used.
/// FIXME: REMOVE this (rdar://7071300)
@@ -86,9 +91,7 @@ public:
const TargetMachine &TM) const {
return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
}
-
-
-
+
/// getExplicitSectionGlobal - Targets should implement this method to assign
/// a section to globals with an explicit section specfied. The
/// implementation of this method can assume that GV->hasSection() is true.
@@ -121,7 +124,18 @@ public:
const MCExpr *
getExprForDwarfReference(const MCSymbol *Sym, unsigned Encoding,
MCStreamer &Streamer) const;
-
+
+ virtual const MCSection *
+ getStaticCtorSection(unsigned Priority = 65535) const {
+ (void)Priority;
+ return StaticCtorSection;
+ }
+ virtual const MCSection *
+ getStaticDtorSection(unsigned Priority = 65535) const {
+ (void)Priority;
+ return StaticDtorSection;
+ }
+
protected:
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
index 8a8d142..1a05604 100644
--- a/contrib/llvm/include/llvm/Target/TargetMachine.h
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -14,7 +14,8 @@
#ifndef LLVM_TARGET_TARGETMACHINE_H
#define LLVM_TARGET_TARGETMACHINE_H
-#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -23,11 +24,10 @@ namespace llvm {
class InstrItineraryData;
class JITCodeEmitter;
+class GlobalValue;
class MCAsmInfo;
class MCCodeGenInfo;
class MCContext;
-class Pass;
-class PassManager;
class PassManagerBase;
class Target;
class TargetData;
@@ -37,32 +37,13 @@ class TargetInstrInfo;
class TargetIntrinsicInfo;
class TargetJITInfo;
class TargetLowering;
+class TargetPassConfig;
class TargetRegisterInfo;
class TargetSelectionDAGInfo;
class TargetSubtargetInfo;
class formatted_raw_ostream;
class raw_ostream;
-// Code generation optimization level.
-namespace CodeGenOpt {
- enum Level {
- None, // -O0
- Less, // -O1
- Default, // -O2, -Os
- Aggressive // -O3
- };
-}
-
-namespace Sched {
- enum Preference {
- None, // No preference
- Latency, // Scheduling for shortest total latency.
- RegPressure, // Scheduling for lowest register pressure.
- Hybrid, // Scheduling for both latency and register pressure.
- ILP // Scheduling for ILP in low register pressure mode.
- };
-}
-
//===----------------------------------------------------------------------===//
///
/// TargetMachine - Primary interface to the complete machine description for
@@ -74,7 +55,7 @@ class TargetMachine {
void operator=(const TargetMachine &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef TargetTriple,
- StringRef CPU, StringRef FS);
+ StringRef CPU, StringRef FS, const TargetOptions &Options);
/// getSubtargetImpl - virtual method implemented by subclasses that returns
/// a reference to that target's TargetSubtargetInfo-derived member variable.
@@ -101,6 +82,7 @@ protected: // Can only create subclasses.
unsigned MCSaveTempLabels : 1;
unsigned MCUseLoc : 1;
unsigned MCUseCFI : 1;
+ unsigned MCUseDwarfDirectory : 1;
public:
virtual ~TargetMachine();
@@ -111,6 +93,8 @@ public:
const StringRef getTargetCPU() const { return TargetCPU; }
const StringRef getTargetFeatureString() const { return TargetFS; }
+ TargetOptions Options;
+
// Interfaces to the major aspects of target machine information:
// -- Instruction opcode and operand information
// -- Pipelines and scheduling information
@@ -196,6 +180,14 @@ public:
/// setMCUseCFI - Set whether all we should use dwarf's .cfi_* directives.
void setMCUseCFI(bool Value) { MCUseCFI = Value; }
+ /// hasMCUseDwarfDirectory - Check whether we should use .file directives with
+ /// explicit directories.
+ bool hasMCUseDwarfDirectory() const { return MCUseDwarfDirectory; }
+
+ /// setMCUseDwarfDirectory - Set whether all we should use .file directives
+ /// with explicit directories.
+ void setMCUseDwarfDirectory(bool Value) { MCUseDwarfDirectory = Value; }
+
/// getRelocationModel - Returns the code generation relocation model. The
/// choices are static, PIC, and dynamic-no-pic, and target default.
Reloc::Model getRelocationModel() const;
@@ -204,6 +196,18 @@ public:
/// medium, large, and target default.
CodeModel::Model getCodeModel() const;
+ /// getTLSModel - Returns the TLS model which should be used for the given
+ /// global variable.
+ TLSModel::Model getTLSModel(const GlobalValue *GV) const;
+
+ /// getOptLevel - Returns the optimization level: None, Less,
+ /// Default, or Aggressive.
+ CodeGenOpt::Level getOptLevel() const;
+
+ void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
+
+ bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
+
/// getAsmVerbosityDefault - Returns the default value of asm verbosity.
///
static bool getAsmVerbosityDefault();
@@ -236,10 +240,6 @@ public:
CGFT_Null // Do not emit any output.
};
- /// getEnableTailMergeDefault - the default setting for -enable-tail-merge
- /// on this target. User flag overrides.
- virtual bool getEnableTailMergeDefault() const { return true; }
-
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
/// specified file emitted. Typically this will involve several steps of code
/// generation. This method should return true if emission of this file type
@@ -247,8 +247,7 @@ public:
virtual bool addPassesToEmitFile(PassManagerBase &,
formatted_raw_ostream &,
CodeGenFileType,
- CodeGenOpt::Level,
- bool = true) {
+ bool /*DisableVerify*/ = true) {
return true;
}
@@ -260,8 +259,7 @@ public:
///
virtual bool addPassesToEmitMachineCode(PassManagerBase &,
JITCodeEmitter &,
- CodeGenOpt::Level,
- bool = true) {
+ bool /*DisableVerify*/ = true) {
return true;
}
@@ -273,8 +271,7 @@ public:
virtual bool addPassesToEmitMC(PassManagerBase &,
MCContext *&,
raw_ostream &,
- CodeGenOpt::Level,
- bool = true) {
+ bool /*DisableVerify*/ = true) {
return true;
}
};
@@ -285,25 +282,21 @@ public:
class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, StringRef TargetTriple,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
-
-private:
- /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for
- /// both emitting to assembly files or machine code output.
- ///
- bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
- bool DisableVerify, MCContext *&OutCtx);
+ StringRef CPU, StringRef FS, TargetOptions Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
public:
+ /// createPassConfig - Create a pass configuration object to be used by
+ /// addPassToEmitX methods for generating a pipeline of CodeGen passes.
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
/// addPassesToEmitFile - Add passes to the specified pass manager to get the
/// specified file emitted. Typically this will involve several steps of code
- /// generation. If OptLevel is None, the code generator should emit code as
- /// fast as possible, though the generated code may be less efficient.
+ /// generation.
virtual bool addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- CodeGenOpt::Level,
bool DisableVerify = true);
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
@@ -314,7 +307,6 @@ public:
///
virtual bool addPassesToEmitMachineCode(PassManagerBase &PM,
JITCodeEmitter &MCE,
- CodeGenOpt::Level,
bool DisableVerify = true);
/// addPassesToEmitMC - Add passes to the specified pass manager to get
@@ -325,65 +317,15 @@ public:
virtual bool addPassesToEmitMC(PassManagerBase &PM,
MCContext *&Ctx,
raw_ostream &OS,
- CodeGenOpt::Level OptLevel,
bool DisableVerify = true);
- /// Target-Independent Code Generator Pass Configuration Options.
-
- /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
- /// passes (which are run just before instruction selector).
- virtual bool addPreISel(PassManagerBase &, CodeGenOpt::Level) {
- return true;
- }
-
- /// addInstSelector - This method should install an instruction selector pass,
- /// which converts from LLVM code to machine instructions.
- virtual bool addInstSelector(PassManagerBase &, CodeGenOpt::Level) {
- return true;
- }
-
- /// addPreRegAlloc - This method may be implemented by targets that want to
- /// run passes immediately before register allocation. This should return
- /// true if -print-machineinstrs should print after these passes.
- virtual bool addPreRegAlloc(PassManagerBase &, CodeGenOpt::Level) {
- return false;
- }
-
- /// addPostRegAlloc - This method may be implemented by targets that want
- /// to run passes after register allocation but before prolog-epilog
- /// insertion. This should return true if -print-machineinstrs should print
- /// after these passes.
- virtual bool addPostRegAlloc(PassManagerBase &, CodeGenOpt::Level) {
- return false;
- }
-
- /// addPreSched2 - This method may be implemented by targets that want to
- /// run passes after prolog-epilog insertion and before the second instruction
- /// scheduling pass. This should return true if -print-machineinstrs should
- /// print after these passes.
- virtual bool addPreSched2(PassManagerBase &, CodeGenOpt::Level) {
- return false;
- }
-
- /// addPreEmitPass - This pass may be implemented by targets that want to run
- /// passes immediately before machine code is emitted. This should return
- /// true if -print-machineinstrs should print out the code after the passes.
- virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) {
- return false;
- }
-
-
/// addCodeEmitter - This pass should be overridden by the target to add a
/// code emitter, if supported. If this is not supported, 'true' should be
/// returned.
- virtual bool addCodeEmitter(PassManagerBase &, CodeGenOpt::Level,
+ virtual bool addCodeEmitter(PassManagerBase &,
JITCodeEmitter &) {
return true;
}
-
- /// getEnableTailMergeDefault - the default setting for -enable-tail-merge
- /// on this target. User flag overrides.
- virtual bool getEnableTailMergeDefault() const { return true; }
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetOpcodes.h b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
index 37f7b2f..f0b181e 100644
--- a/contrib/llvm/include/llvm/Target/TargetOpcodes.h
+++ b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
@@ -82,7 +82,12 @@ namespace TargetOpcode {
/// COPY - Target-independent register copy. This instruction can also be
/// used to copy between subregisters of virtual registers.
- COPY = 13
+ COPY = 13,
+
+ /// BUNDLE - This instruction represents an instruction bundle. Instructions
+ /// which immediately follow a BUNDLE instruction which are marked with
+ /// 'InsideBundle' flag are inside the bundle.
+ BUNDLE
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
index e07e8c1..12a2757 100644
--- a/contrib/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -15,151 +15,177 @@
#ifndef LLVM_TARGET_TARGETOPTIONS_H
#define LLVM_TARGET_TARGETOPTIONS_H
+#include <string>
+
namespace llvm {
class MachineFunction;
+ class StringRef;
// Possible float ABI settings. Used with FloatABIType in TargetOptions.h.
namespace FloatABI {
enum ABIType {
- Default, // Target-specific (either soft of hard depending on triple, etc).
+ Default, // Target-specific (either soft or hard depending on triple, etc).
Soft, // Soft float.
Hard // Hard float.
};
}
-
- /// PrintMachineCode - This flag is enabled when the -print-machineinstrs
- /// option is specified on the command line, and should enable debugging
- /// output from the code generator.
- extern bool PrintMachineCode;
-
- /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is
- /// specified on the command line. If the target supports the frame pointer
- /// elimination optimization, this option should disable it.
- extern bool NoFramePointerElim;
-
- /// NoFramePointerElimNonLeaf - This flag is enabled when the
- /// -disable-non-leaf-fp-elim is specified on the command line. If the target
- /// supports the frame pointer elimination optimization, this option should
- /// disable it for non-leaf functions.
- extern bool NoFramePointerElimNonLeaf;
-
- /// DisableFramePointerElim - This returns true if frame pointer elimination
- /// optimization should be disabled for the given machine function.
- extern bool DisableFramePointerElim(const MachineFunction &MF);
-
- /// LessPreciseFPMAD - This flag is enabled when the
- /// -enable-fp-mad is specified on the command line. When this flag is off
- /// (the default), the code generator is not allowed to generate mad
- /// (multiply add) if the result is "less precise" than doing those operations
- /// individually.
- extern bool LessPreciseFPMADOption;
- extern bool LessPreciseFPMAD();
-
- /// NoExcessFPPrecision - This flag is enabled when the
- /// -disable-excess-fp-precision flag is specified on the command line. When
- /// this flag is off (the default), the code generator is allowed to produce
- /// results that are "more precise" than IEEE allows. This includes use of
- /// FMA-like operations and use of the X86 FP registers without rounding all
- /// over the place.
- extern bool NoExcessFPPrecision;
-
- /// UnsafeFPMath - This flag is enabled when the
- /// -enable-unsafe-fp-math flag is specified on the command line. When
- /// this flag is off (the default), the code generator is not allowed to
- /// produce results that are "less precise" than IEEE allows. This includes
- /// use of X86 instructions like FSIN and FCOS instead of libcalls.
- /// UnsafeFPMath implies LessPreciseFPMAD.
- extern bool UnsafeFPMath;
-
- /// NoInfsFPMath - This flag is enabled when the
- /// -enable-no-infs-fp-math flag is specified on the command line. When
- /// this flag is off (the default), the code generator is not allowed to
- /// assume the FP arithmetic arguments and results are never +-Infs.
- extern bool NoInfsFPMath;
-
- /// NoNaNsFPMath - This flag is enabled when the
- /// -enable-no-nans-fp-math flag is specified on the command line. When
- /// this flag is off (the default), the code generator is not allowed to
- /// assume the FP arithmetic arguments and results are never NaNs.
- extern bool NoNaNsFPMath;
-
- /// HonorSignDependentRoundingFPMath - This returns true when the
- /// -enable-sign-dependent-rounding-fp-math is specified. If this returns
- /// false (the default), the code generator is allowed to assume that the
- /// rounding behavior is the default (round-to-zero for all floating point to
- /// integer conversions, and round-to-nearest for all other arithmetic
- /// truncations). If this is enabled (set to true), the code generator must
- /// assume that the rounding mode may dynamically change.
- extern bool HonorSignDependentRoundingFPMathOption;
- extern bool HonorSignDependentRoundingFPMath();
-
- /// UseSoftFloat - This flag is enabled when the -soft-float flag is specified
- /// on the command line. When this flag is on, the code generator will
- /// generate libcalls to the software floating point library instead of
- /// target FP instructions.
- extern bool UseSoftFloat;
-
- /// FloatABIType - This setting is set by -float-abi=xxx option is specfied
- /// on the command line. This setting may either be Default, Soft, or Hard.
- /// Default selects the target's default behavior. Soft selects the ABI for
- /// UseSoftFloat, but does not inidcate that FP hardware may not be used.
- /// Such a combination is unfortunately popular (e.g. arm-apple-darwin).
- /// Hard presumes that the normal FP ABI is used.
- extern FloatABI::ABIType FloatABIType;
-
- /// NoZerosInBSS - By default some codegens place zero-initialized data to
- /// .bss section. This flag disables such behaviour (necessary, e.g. for
- /// crt*.o compiling).
- extern bool NoZerosInBSS;
-
- /// JITExceptionHandling - This flag indicates that the JIT should emit
- /// exception handling information.
- extern bool JITExceptionHandling;
-
- /// JITEmitDebugInfo - This flag indicates that the JIT should try to emit
- /// debug information and notify a debugger about it.
- extern bool JITEmitDebugInfo;
-
- /// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write
- /// the object files generated by the JITEmitDebugInfo flag to disk. This
- /// flag is hidden and is only for debugging the debug info.
- extern bool JITEmitDebugInfoToDisk;
-
- /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
- /// specified on the commandline. When the flag is on, participating targets
- /// will perform tail call optimization on all calls which use the fastcc
- /// calling convention and which satisfy certain target-independent
- /// criteria (being at the end of a function, having the same return type
- /// as their parent function, etc.), using an alternate ABI if necessary.
- extern bool GuaranteedTailCallOpt;
-
- /// StackAlignmentOverride - Override default stack alignment for target.
- extern unsigned StackAlignmentOverride;
-
- /// RealignStack - This flag indicates whether the stack should be
- /// automatically realigned, if needed.
- extern bool RealignStack;
-
- /// DisableJumpTables - This flag indicates jump tables should not be
- /// generated.
- extern bool DisableJumpTables;
-
- /// EnableFastISel - This flag enables fast-path instruction selection
- /// which trades away generated code quality in favor of reducing
- /// compile time.
- extern bool EnableFastISel;
-
- /// StrongPHIElim - This flag enables more aggressive PHI elimination
- /// wth earlier copy coalescing.
- extern bool StrongPHIElim;
-
- /// getTrapFunctionName - If this returns a non-empty string, this means isel
- /// should lower Intrinsic::trap to a call to the specified function name
- /// instead of an ISD::TRAP node.
- extern StringRef getTrapFunctionName();
-
- extern bool EnableSegmentedStacks;
+ class TargetOptions {
+ public:
+ TargetOptions()
+ : PrintMachineCode(false), NoFramePointerElim(false),
+ NoFramePointerElimNonLeaf(false), LessPreciseFPMADOption(false),
+ NoExcessFPPrecision(false), UnsafeFPMath(false), NoInfsFPMath(false),
+ NoNaNsFPMath(false), HonorSignDependentRoundingFPMathOption(false),
+ UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false),
+ JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false),
+ GuaranteedTailCallOpt(false), DisableTailCalls(false),
+ StackAlignmentOverride(0), RealignStack(true),
+ DisableJumpTables(false), EnableFastISel(false),
+ PositionIndependentExecutable(false), EnableSegmentedStacks(false),
+ TrapFuncName(""), FloatABIType(FloatABI::Default)
+ {}
+
+ /// PrintMachineCode - This flag is enabled when the -print-machineinstrs
+ /// option is specified on the command line, and should enable debugging
+ /// output from the code generator.
+ unsigned PrintMachineCode : 1;
+
+ /// NoFramePointerElim - This flag is enabled when the -disable-fp-elim is
+ /// specified on the command line. If the target supports the frame pointer
+ /// elimination optimization, this option should disable it.
+ unsigned NoFramePointerElim : 1;
+
+ /// NoFramePointerElimNonLeaf - This flag is enabled when the
+ /// -disable-non-leaf-fp-elim is specified on the command line. If the
+ /// target supports the frame pointer elimination optimization, this option
+ /// should disable it for non-leaf functions.
+ unsigned NoFramePointerElimNonLeaf : 1;
+
+ /// DisableFramePointerElim - This returns true if frame pointer elimination
+ /// optimization should be disabled for the given machine function.
+ bool DisableFramePointerElim(const MachineFunction &MF) const;
+
+ /// LessPreciseFPMAD - This flag is enabled when the
+ /// -enable-fp-mad is specified on the command line. When this flag is off
+ /// (the default), the code generator is not allowed to generate mad
+ /// (multiply add) if the result is "less precise" than doing those
+ /// operations individually.
+ unsigned LessPreciseFPMADOption : 1;
+ bool LessPreciseFPMAD() const;
+
+ /// NoExcessFPPrecision - This flag is enabled when the
+ /// -disable-excess-fp-precision flag is specified on the command line.
+ /// When this flag is off (the default), the code generator is allowed to
+ /// produce results that are "more precise" than IEEE allows. This includes
+ /// use of FMA-like operations and use of the X86 FP registers without
+ /// rounding all over the place.
+ unsigned NoExcessFPPrecision : 1;
+
+ /// UnsafeFPMath - This flag is enabled when the
+ /// -enable-unsafe-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// produce results that are "less precise" than IEEE allows. This includes
+ /// use of X86 instructions like FSIN and FCOS instead of libcalls.
+ /// UnsafeFPMath implies LessPreciseFPMAD.
+ unsigned UnsafeFPMath : 1;
+
+ /// NoInfsFPMath - This flag is enabled when the
+ /// -enable-no-infs-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never +-Infs.
+ unsigned NoInfsFPMath : 1;
+
+ /// NoNaNsFPMath - This flag is enabled when the
+ /// -enable-no-nans-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never NaNs.
+ unsigned NoNaNsFPMath : 1;
+
+ /// HonorSignDependentRoundingFPMath - This returns true when the
+ /// -enable-sign-dependent-rounding-fp-math is specified. If this returns
+ /// false (the default), the code generator is allowed to assume that the
+ /// rounding behavior is the default (round-to-zero for all floating point
+ /// to integer conversions, and round-to-nearest for all other arithmetic
+ /// truncations). If this is enabled (set to true), the code generator must
+ /// assume that the rounding mode may dynamically change.
+ unsigned HonorSignDependentRoundingFPMathOption : 1;
+ bool HonorSignDependentRoundingFPMath() const;
+
+ /// UseSoftFloat - This flag is enabled when the -soft-float flag is
+ /// specified on the command line. When this flag is on, the code generator
+ /// will generate libcalls to the software floating point library instead of
+ /// target FP instructions.
+ unsigned UseSoftFloat : 1;
+
+ /// NoZerosInBSS - By default some codegens place zero-initialized data to
+ /// .bss section. This flag disables such behaviour (necessary, e.g. for
+ /// crt*.o compiling).
+ unsigned NoZerosInBSS : 1;
+
+ /// JITExceptionHandling - This flag indicates that the JIT should emit
+ /// exception handling information.
+ unsigned JITExceptionHandling : 1;
+
+ /// JITEmitDebugInfo - This flag indicates that the JIT should try to emit
+ /// debug information and notify a debugger about it.
+ unsigned JITEmitDebugInfo : 1;
+
+ /// JITEmitDebugInfoToDisk - This flag indicates that the JIT should write
+ /// the object files generated by the JITEmitDebugInfo flag to disk. This
+ /// flag is hidden and is only for debugging the debug info.
+ unsigned JITEmitDebugInfoToDisk : 1;
+
+ /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
+ /// specified on the commandline. When the flag is on, participating targets
+ /// will perform tail call optimization on all calls which use the fastcc
+ /// calling convention and which satisfy certain target-independent
+ /// criteria (being at the end of a function, having the same return type
+ /// as their parent function, etc.), using an alternate ABI if necessary.
+ unsigned GuaranteedTailCallOpt : 1;
+
+ /// DisableTailCalls - This flag controls whether we will use tail calls.
+ /// Disabling them may be useful to maintain a correct call stack.
+ unsigned DisableTailCalls : 1;
+
+ /// StackAlignmentOverride - Override default stack alignment for target.
+ unsigned StackAlignmentOverride;
+
+ /// RealignStack - This flag indicates whether the stack should be
+ /// automatically realigned, if needed.
+ unsigned RealignStack : 1;
+
+ /// DisableJumpTables - This flag indicates jump tables should not be
+ /// generated.
+ unsigned DisableJumpTables : 1;
+
+ /// EnableFastISel - This flag enables fast-path instruction selection
+ /// which trades away generated code quality in favor of reducing
+ /// compile time.
+ unsigned EnableFastISel : 1;
+
+ /// PositionIndependentExecutable - This flag indicates whether the code
+ /// will eventually be linked into a single executable, despite the PIC
+ /// relocation model being in use. It's value is undefined (and irrelevant)
+ /// if the relocation model is anything other than PIC.
+ unsigned PositionIndependentExecutable : 1;
+
+ unsigned EnableSegmentedStacks : 1;
+
+ /// getTrapFunctionName - If this returns a non-empty string, this means
+ /// isel should lower Intrinsic::trap to a call to the specified function
+ /// name instead of an ISD::TRAP node.
+ std::string TrapFuncName;
+ StringRef getTrapFunctionName() const;
+
+ /// FloatABIType - This setting is set by -float-abi=xxx option is specfied
+ /// on the command line. This setting may either be Default, Soft, or Hard.
+ /// Default selects the target's default behavior. Soft selects the ABI for
+ /// UseSoftFloat, but does not indicate that FP hardware may not be used.
+ /// Such a combination is unfortunately popular (e.g. arm-apple-darwin).
+ /// Hard presumes that the normal FP ABI is used.
+ FloatABI::ABIType FloatABIType;
+ };
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
index 682aa50..7d8a46b 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CallingConv.h"
#include <cassert>
#include <functional>
@@ -33,25 +34,18 @@ class raw_ostream;
class TargetRegisterClass {
public:
- typedef const unsigned* iterator;
- typedef const unsigned* const_iterator;
- typedef const EVT* vt_iterator;
+ typedef const uint16_t* iterator;
+ typedef const uint16_t* const_iterator;
+ typedef const MVT::SimpleValueType* vt_iterator;
typedef const TargetRegisterClass* const * sc_iterator;
-private:
+
+ // Instance variables filled by tablegen, do not use!
const MCRegisterClass *MC;
const vt_iterator VTs;
const unsigned *SubClassMask;
const sc_iterator SuperClasses;
const sc_iterator SuperRegClasses;
-public:
- TargetRegisterClass(const MCRegisterClass *MC, const EVT *vts,
- const unsigned *subcm,
- const TargetRegisterClass * const *supcs,
- const TargetRegisterClass * const *superregcs)
- : MC(MC), VTs(vts), SubClassMask(subcm), SuperClasses(supcs),
- SuperRegClasses(superregcs) {}
-
- virtual ~TargetRegisterClass() {} // Allow subclasses
+ ArrayRef<uint16_t> (*OrderFunc)(const MachineFunction&);
/// getID() - Return the register class ID number.
///
@@ -108,7 +102,7 @@ public:
///
bool hasType(EVT vt) const {
for(int i = 0; VTs[i] != MVT::Other; ++i)
- if (VTs[i] == vt)
+ if (EVT(VTs[i]) == vt)
return true;
return false;
}
@@ -165,7 +159,7 @@ public:
/// getSubClassMask - Returns a bit vector of subclasses, including this one.
/// The vector is indexed by class IDs, see hasSubClassEq() above for how to
/// use it.
- const unsigned *getSubClassMask() const {
+ const uint32_t *getSubClassMask() const {
return SubClassMask;
}
@@ -196,9 +190,8 @@ public:
///
/// By default, this method returns all registers in the class.
///
- virtual
- ArrayRef<unsigned> getRawAllocationOrder(const MachineFunction &MF) const {
- return makeArrayRef(begin(), getNumRegs());
+ ArrayRef<uint16_t> getRawAllocationOrder(const MachineFunction &MF) const {
+ return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
}
};
@@ -209,6 +202,13 @@ struct TargetRegisterInfoDesc {
bool inAllocatableClass; // Register belongs to an allocatable regclass.
};
+/// Each TargetRegisterClass has a per register weight, and weight
+/// limit which must be less than the limits of its pressure sets.
+struct RegClassWeight {
+ unsigned RegWeigt;
+ unsigned WeightLimit;
+};
+
/// TargetRegisterInfo base class - We assume that the target defines a static
/// array of TargetRegisterDesc objects that represent all of the machine
/// registers that the target has. As such, we simply have to track a pointer
@@ -332,7 +332,7 @@ public:
if (regA == regB) return true;
if (isVirtualRegister(regA) || isVirtualRegister(regB))
return false;
- for (const unsigned *regList = getOverlaps(regA)+1; *regList; ++regList) {
+ for (const uint16_t *regList = getOverlaps(regA)+1; *regList; ++regList) {
if (*regList == regB) return true;
}
return false;
@@ -347,7 +347,7 @@ public:
/// isSuperRegister - Returns true if regB is a super-register of regA.
///
bool isSuperRegister(unsigned regA, unsigned regB) const {
- for (const unsigned *regList = getSuperRegisters(regA); *regList;++regList){
+ for (const uint16_t *regList = getSuperRegisters(regA); *regList;++regList){
if (*regList == regB) return true;
}
return false;
@@ -356,10 +356,33 @@ public:
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee saved registers on this target. The register should be in the
/// order of desired callee-save stack frame offset. The first register is
- /// closed to the incoming stack pointer if stack grows down, and vice versa.
- virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0)
+ /// closest to the incoming stack pointer if stack grows down, and vice versa.
+ ///
+ virtual const uint16_t* getCalleeSavedRegs(const MachineFunction *MF = 0)
const = 0;
+ /// getCallPreservedMask - Return a mask of call-preserved registers for the
+ /// given calling convention on the current sub-target. The mask should
+ /// include all call-preserved aliases. This is used by the register
+ /// allocator to determine which registers can be live across a call.
+ ///
+ /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
+ /// A set bit indicates that all bits of the corresponding register are
+ /// preserved across the function call. The bit mask is expected to be
+ /// sub-register complete, i.e. if A is preserved, so are all its
+ /// sub-registers.
+ ///
+ /// Bits are numbered from the LSB, so the bit for physical register Reg can
+ /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
+ ///
+ /// A NULL pointer means that no register mask will be used, and call
+ /// instructions should use implicit-def operands to indicate call clobbered
+ /// registers.
+ ///
+ virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const {
+ // The default mask clobbers everything. All targets should override.
+ return 0;
+ }
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses
@@ -367,24 +390,11 @@ public:
/// used by register scavenger to determine what registers are free.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
- /// getSubReg - Returns the physical register number of sub-register "Index"
- /// for physical register RegNo. Return zero if the sub-register does not
- /// exist.
- virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0;
-
- /// getSubRegIndex - For a given register pair, return the sub-register index
- /// if the second register is a sub-register of the first. Return zero
- /// otherwise.
- virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0;
-
/// getMatchingSuperReg - Return a super-register of the specified register
/// Reg so its sub-register of index SubIdx is Reg.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
const TargetRegisterClass *RC) const {
- for (const unsigned *SRs = getSuperRegisters(Reg); unsigned SR = *SRs;++SRs)
- if (Reg == getSubReg(SR, SubIdx) && RC->contains(SR))
- return SR;
- return 0;
+ return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
}
/// canCombineSubRegIndices - Given a register class and a list of
@@ -402,11 +412,11 @@ public:
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
+ ///
+ /// TableGen will synthesize missing A sub-classes.
virtual const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const {
- return 0;
- }
+ const TargetRegisterClass *B, unsigned Idx) const =0;
/// getSubClassWithSubReg - Returns the largest legal sub-class of RC that
/// supports the sub-register index Idx.
@@ -419,6 +429,7 @@ public:
/// supported by the full GR32 register class in 64-bit mode, but only by the
/// GR32_ABCD regiister class in 32-bit mode.
///
+ /// TableGen will synthesize missing RC sub-classes.
virtual const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const =0;
@@ -469,8 +480,7 @@ public:
/// values. If a target supports multiple different pointer register classes,
/// kind specifies which one is indicated.
virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const {
- assert(0 && "Target didn't implement getPointerRegClass!");
- return 0; // Must return a value in order to compile with VS 2005
+ llvm_unreachable("Target didn't implement getPointerRegClass!");
}
/// getCrossCopyRegClass - Returns a legal register class to copy a register
@@ -497,18 +507,37 @@ public:
/// getRegPressureLimit - Return the register pressure "high water mark" for
/// the specific register class. The scheduler is in high register pressure
/// mode (for the specific register class) if it goes over the limit.
+ ///
+ /// Note: this is the old register pressure model that relies on a manually
+ /// specified representative register class per value type.
virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
return 0;
}
+ /// Get the weight in units of pressure for this register class.
+ virtual const RegClassWeight &getRegClassWeight(
+ const TargetRegisterClass *RC) const = 0;
+
+ /// Get the number of dimensions of register pressure.
+ virtual unsigned getNumRegPressureSets() const = 0;
+
+ /// Get the register unit pressure limit for this dimension.
+ /// This limit must be adjusted dynamically for reserved registers.
+ virtual unsigned getRegPressureSetLimit(unsigned Idx) const = 0;
+
+ /// Get the dimensions of register pressure impacted by this register class.
+ /// Returns a -1 terminated array of pressure set IDs.
+ virtual const int *getRegClassPressureSets(
+ const TargetRegisterClass *RC) const = 0;
+
/// getRawAllocationOrder - Returns the register allocation order for a
/// specified register class with a target-dependent hint. The returned list
/// may contain reserved registers that cannot be allocated.
///
/// Register allocators need only call this function to resolve
/// target-dependent hints, but it should work without hinting as well.
- virtual ArrayRef<unsigned>
+ virtual ArrayRef<uint16_t>
getRawAllocationOrder(const TargetRegisterClass *RC,
unsigned HintType, unsigned HintReg,
const MachineFunction &MF) const {
@@ -607,22 +636,22 @@ public:
virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const {
- assert(0 && "materializeFrameBaseRegister does not exist on this target");
+ llvm_unreachable("materializeFrameBaseRegister does not exist on this "
+ "target");
}
/// resolveFrameIndex - Resolve a frame index operand of an instruction
/// to reference the indicated base register plus offset instead.
virtual void resolveFrameIndex(MachineBasicBlock::iterator I,
unsigned BaseReg, int64_t Offset) const {
- assert(0 && "resolveFrameIndex does not exist on this target");
+ llvm_unreachable("resolveFrameIndex does not exist on this target");
}
/// isFrameOffsetLegal - Determine whether a given offset immediate is
/// encodable to resolve a frame index.
virtual bool isFrameOffsetLegal(const MachineInstr *MI,
int64_t Offset) const {
- assert(0 && "isFrameOffsetLegal does not exist on this target");
- return false; // Must return a value in order to compile with VS 2005
+ llvm_unreachable("isFrameOffsetLegal does not exist on this target");
}
/// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
@@ -636,7 +665,8 @@ public:
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
- assert(0 && "Call Frame Pseudo Instructions do not exist on this target!");
+ llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
+ "target!");
}
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
index 612635e..f55cf0e 100644
--- a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -352,6 +352,8 @@ def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
def cttz : SDNode<"ISD::CTTZ" , SDTIntUnaryOp>;
def ctpop : SDNode<"ISD::CTPOP" , SDTIntUnaryOp>;
+def ctlz_zero_undef : SDNode<"ISD::CTLZ_ZERO_UNDEF", SDTIntUnaryOp>;
+def cttz_zero_undef : SDNode<"ISD::CTTZ_ZERO_UNDEF", SDTIntUnaryOp>;
def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
@@ -655,6 +657,51 @@ def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
+def extloadvi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def extloadvi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f32;
+}]>;
+def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f64;
+}]>;
+
+def sextloadvi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def sextloadvi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+
+def zextloadvi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def zextloadvi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+
// store fragments.
def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
diff --git a/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
index 9556c7a..fc23b2c 100644
--- a/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
@@ -15,7 +15,7 @@
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
@@ -39,7 +39,7 @@ public:
// AntiDepBreakMode - Type of anti-dependence breaking that should
// be performed before post-RA scheduling.
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
- typedef SmallVectorImpl<TargetRegisterClass*> RegClassVector;
+ typedef SmallVectorImpl<const TargetRegisterClass*> RegClassVector;
virtual ~TargetSubtargetInfo();
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
index f9d7f9e..18176e8 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -94,6 +94,7 @@ Pass *createFunctionInliningPass(int Threshold);
/// createAlwaysInlinerPass - Return a new pass object that inlines only
/// functions that are marked as "always_inline".
Pass *createAlwaysInlinerPass();
+Pass *createAlwaysInlinerPass(bool InsertLifetime);
//===----------------------------------------------------------------------===//
/// createPruneEHPass - Return a new pass object which transforms invoke
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
index 3ac4c59..7c3cfc8 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
@@ -31,7 +31,7 @@ namespace llvm {
///
struct Inliner : public CallGraphSCCPass {
explicit Inliner(char &ID);
- explicit Inliner(char &ID, int Threshold);
+ explicit Inliner(char &ID, int Threshold, bool InsertLifetime);
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
@@ -65,28 +65,21 @@ struct Inliner : public CallGraphSCCPass {
///
virtual InlineCost getInlineCost(CallSite CS) = 0;
- // getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
- // higher threshold to determine if the function call should be inlined.
+ /// removeDeadFunctions - Remove dead functions.
///
- virtual float getInlineFudgeFactor(CallSite CS) = 0;
+ /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+ /// which restricts it to deleting functions with an 'AlwaysInline'
+ /// attribute. This is useful for the InlineAlways pass that only wants to
+ /// deal with that subset of the functions.
+ bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
- /// resetCachedCostInfo - erase any cached cost data from the derived class.
- /// If the derived class has no such data this can be empty.
- ///
- virtual void resetCachedCostInfo(Function* Caller) = 0;
-
- /// growCachedCostInfo - update the cached cost info for Caller after Callee
- /// has been inlined.
- virtual void growCachedCostInfo(Function *Caller, Function *Callee) = 0;
-
- /// removeDeadFunctions - Remove dead functions that are not included in
- /// DNR (Do Not Remove) list.
- bool removeDeadFunctions(CallGraph &CG,
- SmallPtrSet<const Function *, 16> *DNR = NULL);
private:
// InlineThreshold - Cache the value here for easy access.
unsigned InlineThreshold;
+ // InsertLifetime - Insert @llvm.lifetime intrinsics.
+ bool InsertLifetime;
+
/// shouldInline - Return true if the inliner should attempt to
/// inline at the given CallSite.
bool shouldInline(CallSite CS);
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
index cc74e7f..47ce902 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -60,6 +60,10 @@ public:
/// out of the frontend.
EP_EarlyAsPossible,
+ /// EP_ModuleOptimizerEarly - This extension point allows adding passes
+ /// just before the main module-level optimization passes.
+ EP_ModuleOptimizerEarly,
+
/// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
/// the end of the loop optimizer.
EP_LoopOptimizerEnd,
@@ -67,7 +71,16 @@ public:
/// EP_ScalarOptimizerLate - This extension point allows adding optimization
/// passes after most of the main optimizations, but before the last
/// cleanup-ish optimizations.
- EP_ScalarOptimizerLate
+ EP_ScalarOptimizerLate,
+
+ /// EP_OptimizerLast -- This extension point allows adding passes that
+ /// run after everything else.
+ EP_OptimizerLast,
+
+ /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
+ /// should not be disabled by O0 optimization level. The passes will be
+ /// inserted after the inlining pass.
+ EP_EnabledOnOptLevel0
};
/// The Optimization Level - Specify the basic optimization level.
@@ -90,6 +103,7 @@ public:
bool DisableSimplifyLibCalls;
bool DisableUnitAtATime;
bool DisableUnrollLoops;
+ bool Vectorize;
private:
/// ExtensionList - This is list of all of the extensions that are registered.
@@ -117,8 +131,9 @@ public:
/// populateModulePassManager - This sets up the primary pass manager.
void populateModulePassManager(PassManagerBase &MPM);
void populateLTOPassManager(PassManagerBase &PM, bool Internalize,
- bool RunInliner);
+ bool RunInliner, bool DisableGVNLoadPRE = false);
};
+
/// Registers a function for adding a standard set of passes. This should be
/// used by optimizer plugins to allow all front ends to transparently use
/// them. Create a static instance of this class in your plugin, providing a
@@ -129,5 +144,6 @@ struct RegisterStandardPasses {
PassManagerBuilder::addGlobalExtension(Ty, Fn);
}
};
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
index 8d55231..bbf3a69 100644
--- a/contrib/llvm/include/llvm/Transforms/Instrumentation.h
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -17,6 +17,7 @@
namespace llvm {
class ModulePass;
+class FunctionPass;
// Insert edge profiling instrumentation
ModulePass *createEdgeProfilerPass();
@@ -29,7 +30,13 @@ ModulePass *createPathProfilerPass();
// Insert GCOV profiling instrumentation
ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true,
- bool Use402Format = false);
+ bool Use402Format = false,
+ bool UseExtraChecksum = false);
+
+// Insert AddressSanitizer (address sanity checking) instrumentation
+ModulePass *createAddressSanitizerPass();
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerPass();
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
index b1536f9..7f055d4 100644
--- a/contrib/llvm/include/llvm/Transforms/Scalar.h
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -112,6 +112,8 @@ Pass *createLICMPass();
//
Pass *createLoopStrengthReducePass(const TargetLowering *TLI = 0);
+Pass *createGlobalMergePass(const TargetLowering *TLI = 0);
+
//===----------------------------------------------------------------------===//
//
// LoopUnswitch - This pass is a simple loop unswitching pass.
@@ -307,12 +309,6 @@ extern char &InstructionNamerID;
//===----------------------------------------------------------------------===//
//
-// GEPSplitter - Split complex GEPs into simple ones
-//
-FunctionPass *createGEPSplitterPass();
-
-//===----------------------------------------------------------------------===//
-//
// Sink - Code Sinking
//
FunctionPass *createSinkingPass();
@@ -331,6 +327,12 @@ Pass *createCorrelatedValuePropagationPass();
//===----------------------------------------------------------------------===//
//
+// ObjCARCAPElim - ObjC ARC autorelease pool elimination.
+//
+Pass *createObjCARCAPElimPass();
+
+//===----------------------------------------------------------------------===//
+//
// ObjCARCExpand - ObjC ARC preliminary simplifications.
//
Pass *createObjCARCExpandPass();
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 6fcd160..867b9e4 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -173,9 +173,8 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
/// complicated to handle the case where one of the edges being split
/// is an exit of a loop with other exits).
///
-BasicBlock *SplitBlockPredecessors(BasicBlock *BB, BasicBlock *const *Preds,
- unsigned NumPreds, const char *Suffix,
- Pass *P = 0);
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
+ const char *Suffix, Pass *P = 0);
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicInliner.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicInliner.h
deleted file mode 100644
index 4bca6b8..0000000
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicInliner.h
+++ /dev/null
@@ -1,55 +0,0 @@
-//===- BasicInliner.h - Basic function level inliner ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a simple function based inliner that does not use
-// call graph information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BASICINLINER_H
-#define BASICINLINER_H
-
-#include "llvm/Analysis/InlineCost.h"
-
-namespace llvm {
-
- class Function;
- class TargetData;
- struct BasicInlinerImpl;
-
- /// BasicInliner - BasicInliner provides function level inlining interface.
- /// Clients provide list of functions which are inline without using
- /// module level call graph information. Note that the BasicInliner is
- /// free to delete a function if it is inlined into all call sites.
- class BasicInliner {
- public:
-
- explicit BasicInliner(TargetData *T = NULL);
- ~BasicInliner();
-
- /// addFunction - Add function into the list of functions to process.
- /// All functions must be inserted using this interface before invoking
- /// inlineFunctions().
- void addFunction(Function *F);
-
- /// neverInlineFunction - Sometimes a function is never to be inlined
- /// because of one or other reason.
- void neverInlineFunction(Function *F);
-
- /// inlineFuctions - Walk all call sites in all functions supplied by
- /// client. Inline as many call sites as possible. Delete completely
- /// inlined functions.
- void inlineFunctions();
-
- private:
- BasicInlinerImpl *Impl;
- };
-}
-
-#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index e825938..17cd58eb 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -20,6 +20,7 @@
namespace llvm {
class Value;
class TargetData;
+ class TargetLibraryInfo;
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
Value *CastToCStr(Value *V, IRBuilder<> &B);
@@ -68,7 +69,7 @@ namespace llvm {
/// 'Op' and returns one value with the same type. If 'Op' is a long double,
/// 'l' is added as the suffix of name, if 'Op' is a float, we add a 'f'
/// suffix.
- Value *EmitUnaryFloatFnCall(Value *Op, const char *Name, IRBuilder<> &B,
+ Value *EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
const AttrListPtr &Attrs);
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
@@ -86,12 +87,13 @@ namespace llvm {
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
- void EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD);
+ void EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
void EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
- const TargetData *TD);
+ const TargetData *TD, const TargetLibraryInfo *TLI);
/// SimplifyFortifiedLibCalls - Helper class for folding checked library
/// calls (e.g. __strcpy_chk) into their unchecked counterparts.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
index 674c2d0..b7b5d29 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -56,21 +56,13 @@ struct ClonedCodeInfo {
/// call instruction.
bool ContainsCalls;
- /// ContainsUnwinds - This is set to true if the cloned code contains an
- /// unwind instruction.
- bool ContainsUnwinds;
-
/// ContainsDynamicAllocas - This is set to true if the cloned code contains
/// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in
/// the entry block or they are in the entry block but are not a constant
/// size.
bool ContainsDynamicAllocas;
- ClonedCodeInfo() {
- ContainsCalls = false;
- ContainsUnwinds = false;
- ContainsDynamicAllocas = false;
- }
+ ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {}
};
@@ -134,8 +126,8 @@ inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
/// Clone OldFunc into NewFunc, transforming the old arguments into references
/// to VMap values. Note that if NewFunc already has basic blocks, the ones
/// cloned into it will be added to the end of the function. This function
-/// fills in a list of return instructions, and can optionally append the
-/// specified suffix to all values cloned.
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
///
/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
/// mappings.
@@ -145,7 +137,8 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
- ClonedCodeInfo *CodeInfo = 0);
+ ClonedCodeInfo *CodeInfo = 0,
+ ValueMapTypeRemapper *TypeMapper = 0);
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly. The
@@ -204,9 +197,9 @@ public:
/// exists in the instruction stream. Similarly this will inline a recursive
/// function by one level.
///
-bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI);
-bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI);
-bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI);
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI, bool InsertLifetime = true);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
new file mode 100644
index 0000000..7ad7bdd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -0,0 +1,66 @@
+//===-- CmpInstAnalysis.h - Utils to help fold compare insts ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse compare instructions
+// and fold them into constants or other compare instructions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+#define LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+
+#include "llvm/InstrTypes.h"
+
+namespace llvm {
+ class ICmpInst;
+ class Value;
+
+ /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+ /// are carefully arranged to allow folding of expressions such as:
+ ///
+ /// (A < B) | (A > B) --> (A != B)
+ ///
+ /// Note that this is only valid if the first and second predicates have the
+ /// same sign. Is illegal to do: (A u< B) | (A s> B)
+ ///
+ /// Three bits are used to represent the condition, as follows:
+ /// 0 A > B
+ /// 1 A == B
+ /// 2 A < B
+ ///
+ /// <=> Value Definition
+ /// 000 0 Always false
+ /// 001 1 A > B
+ /// 010 2 A == B
+ /// 011 3 A >= B
+ /// 100 4 A < B
+ /// 101 5 A != B
+ /// 110 6 A <= B
+ /// 111 7 Always true
+ ///
+ unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
+
+ /// getICmpValue - This is the complement of getICmpCode, which turns an
+ /// opcode and two operands into either a constant true or false, or the
+ /// predicate for a new ICmp instruction. The sign is passed in to determine
+ /// which kind of predicate to use in the new icmp instruction.
+ /// Non-NULL return value will be a true or false constant.
+ /// NULL return means a new ICmp is needed. The predicate for which is
+ /// output in NewICmpPred.
+ Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+ CmpInst::Predicate &NewICmpPred);
+
+ /// PredicatesFoldable - Return true if both predicates match sign or if at
+ /// least one of them is an equality comparison (which is signless).
+ bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+
+} // end namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 0000000..2c0ec9b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,33 @@
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULE_UTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULE_UTILS_H
+
+namespace llvm {
+
+class Module;
+class Function;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority);
+
+} // End llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_MODULE_UTILS_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
index 064e550..4c82149 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -14,16 +14,18 @@
#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#include "llvm/ADT/StringRef.h"
+
namespace llvm {
- class Value;
class BasicBlock;
- class Use;
- class PHINode;
+ class Instruction;
+ class LoadInst;
template<typename T> class SmallVectorImpl;
template<typename T> class SSAUpdaterTraits;
- class DbgDeclareInst;
- class DIBuilder;
- class BumpPtrAllocator;
+ class PHINode;
+ class Type;
+ class Use;
+ class Value;
/// SSAUpdater - This class updates SSA form for a set of values defined in
/// multiple blocks. This is used when code duplication or another unstructured
@@ -137,12 +139,7 @@ public:
/// passed into the run method). Clients should implement this with a more
/// efficient version if possible.
virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &Insts) const {
- for (unsigned i = 0, e = Insts.size(); i != e; ++i)
- if (Insts[i] == I)
- return true;
- return false;
- }
+ const SmallVectorImpl<Instruction*> &Insts) const;
/// doExtraRewritesBeforeFinalDeletion - This hook is invoked after all the
/// stores are found and inserted as available values, but
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index 5a03d22..a9adbd7 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -15,8 +15,16 @@
#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ValueHandle.h"
+
namespace llvm {
+class CastInst;
+class PHINode;
template<typename T> class SSAUpdaterTraits;
template<typename UpdaterT>
@@ -372,7 +380,7 @@ public:
if (!SomePHI)
break;
if (CheckIfPHIMatches(SomePHI)) {
- RecordMatchingPHI(SomePHI);
+ RecordMatchingPHIs(BlockList);
break;
}
// Match failed: clear all the PHITag values.
@@ -429,38 +437,17 @@ public:
return true;
}
- /// RecordMatchingPHI - For a PHI node that matches, record it and its input
- /// PHIs in both the BBMap and the AvailableVals mapping.
- void RecordMatchingPHI(PhiT *PHI) {
- SmallVector<PhiT*, 20> WorkList;
- WorkList.push_back(PHI);
-
- // Record this PHI.
- BlkT *BB = PHI->getParent();
- ValT PHIVal = Traits::GetPHIValue(PHI);
- (*AvailableVals)[BB] = PHIVal;
- BBMap[BB]->AvailableVal = PHIVal;
-
- while (!WorkList.empty()) {
- PHI = WorkList.pop_back_val();
-
- // Iterate through the PHI's incoming values.
- for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
- E = Traits::PHI_end(PHI); I != E; ++I) {
- ValT IncomingVal = I.getIncomingValue();
- PhiT *IncomingPHI = Traits::ValueIsPHI(IncomingVal, Updater);
- if (!IncomingPHI) continue;
- BB = IncomingPHI->getParent();
- BBInfo *Info = BBMap[BB];
- if (!Info || Info->AvailableVal)
- continue;
-
- // Record the PHI and add it to the worklist.
- (*AvailableVals)[BB] = IncomingVal;
- Info->AvailableVal = IncomingVal;
- WorkList.push_back(IncomingPHI);
+ /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+ /// the BBMap and the AvailableVals mapping.
+ void RecordMatchingPHIs(BlockListTy *BlockList) {
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ if (PhiT *PHI = (*I)->PHITag) {
+ BlkT *BB = PHI->getParent();
+ ValT PHIVal = Traits::GetPHIValue(PHI);
+ (*AvailableVals)[BB] = PHIVal;
+ BBMap[BB]->AvailableVal = PHIVal;
}
- }
}
};
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
index 524cf5a..2632d18 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -17,21 +17,23 @@
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ValueHandle.h"
namespace llvm {
extern cl::opt<bool> DisableIVRewrite;
+class CastInst;
+class IVUsers;
class Loop;
-class LoopInfo;
-class DominatorTree;
-class ScalarEvolution;
class LPPassManager;
-class IVUsers;
+class PHINode;
+class ScalarEvolution;
/// Interface for visiting interesting IV users that are recognized but not
/// simplified by this utility.
class IVVisitor {
+ virtual void anchor();
public:
virtual ~IVVisitor() {}
virtual void visitCast(CastInst *Cast) = 0;
@@ -47,12 +49,6 @@ bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, LPPassManager *LPM,
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, LPPassManager *LPM,
SmallVectorImpl<WeakVH> &Dead);
-/// simplifyIVUsers - Simplify instructions recorded by the IVUsers pass.
-/// This is a legacy implementation to reproduce the behavior of the
-/// IndVarSimplify pass prior to DisableIVRewrite.
-bool simplifyIVUsers(IVUsers *IU, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead);
-
} // namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
index 7212a8c..f175e83 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -22,9 +22,12 @@ class Loop;
class LoopInfo;
class LPPassManager;
-bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
+bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
unsigned TripMultiple, LoopInfo* LI, LPPassManager* LPM);
+bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
+ LPPassManager* LPM);
+
}
#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
index 0384656..8594707 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -20,7 +20,7 @@
namespace llvm {
class Value;
class Instruction;
- typedef ValueMap<const Value *, TrackingVH<Value> > ValueToValueMapTy;
+ typedef ValueMap<const Value *, WeakVH> ValueToValueMapTy;
/// ValueMapTypeRemapper - This is a class that can be implemented by clients
/// to remap types when cloning constants and instructions.
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize.h
new file mode 100644
index 0000000..7701ceb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize.h
@@ -0,0 +1,106 @@
+//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Vectorize transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_H
+
+namespace llvm {
+class BasicBlock;
+class BasicBlockPass;
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize configuration.
+struct VectorizeConfig {
+ //===--------------------------------------------------------------------===//
+ // Target architecture related parameters
+
+ /// @brief The size of the native vector registers.
+ unsigned VectorBits;
+
+ /// @brief Vectorize integer values.
+ bool VectorizeInts;
+
+ /// @brief Vectorize floating-point values.
+ bool VectorizeFloats;
+
+ /// @brief Vectorize casting (conversion) operations.
+ bool VectorizeCasts;
+
+ /// @brief Vectorize floating-point math intrinsics.
+ bool VectorizeMath;
+
+ /// @brief Vectorize the fused-multiply-add intrinsic.
+ bool VectorizeFMA;
+
+ /// @brief Vectorize loads and stores.
+ bool VectorizeMemOps;
+
+ /// @brief Only generate aligned loads and stores.
+ bool AlignedOnly;
+
+ //===--------------------------------------------------------------------===//
+ // Misc parameters
+
+ /// @brief The required chain depth for vectorization.
+ unsigned ReqChainDepth;
+
+ /// @brief The maximum search distance for instruction pairs.
+ unsigned SearchLimit;
+
+ /// @brief The maximum number of candidate pairs with which to use a full
+ /// cycle check.
+ unsigned MaxCandPairsForCycleCheck;
+
+ /// @brief Replicating one element to a pair breaks the chain.
+ bool SplatBreaksChain;
+
+ /// @brief The maximum number of pairable instructions per group.
+ unsigned MaxInsts;
+
+ /// @brief The maximum number of pairing iterations.
+ unsigned MaxIter;
+
+ /// @brief Don't boost the chain-depth contribution of loads and stores.
+ bool NoMemOpBoost;
+
+ /// @brief Use a fast instruction dependency analysis.
+ bool FastDep;
+
+ /// @brief Initialize the VectorizeConfig from command line options.
+ VectorizeConfig();
+};
+
+//===----------------------------------------------------------------------===//
+//
+// BBVectorize - A basic-block vectorization pass.
+//
+BasicBlockPass *
+createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize the BasicBlock.
+///
+/// @param BB The BasicBlock to be vectorized
+/// @param P The current running pass, should require AliasAnalysis and
+/// ScalarEvolution. After the vectorization, AliasAnalysis,
+/// ScalarEvolution and CFG are preserved.
+///
+/// @return True if the BB is changed, false otherwise.
+///
+bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
+ const VectorizeConfig &C = VectorizeConfig());
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Type.h b/contrib/llvm/include/llvm/Type.h
index 43b7dc5..185258d 100644
--- a/contrib/llvm/include/llvm/Type.h
+++ b/contrib/llvm/include/llvm/Type.h
@@ -16,6 +16,7 @@
#define LLVM_TYPE_H
#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -25,6 +26,7 @@ class raw_ostream;
class Module;
class LLVMContext;
class LLVMContextImpl;
+class StringRef;
template<class GraphType> struct GraphTraits;
/// The instances of the Type class are immutable: once they are created,
@@ -47,23 +49,24 @@ public:
enum TypeID {
// PrimitiveTypes - make sure LastPrimitiveTyID stays up to date.
VoidTyID = 0, ///< 0: type with no size
- FloatTyID, ///< 1: 32-bit floating point type
- DoubleTyID, ///< 2: 64-bit floating point type
- X86_FP80TyID, ///< 3: 80-bit floating point type (X87)
- FP128TyID, ///< 4: 128-bit floating point type (112-bit mantissa)
- PPC_FP128TyID, ///< 5: 128-bit floating point type (two 64-bits, PowerPC)
- LabelTyID, ///< 6: Labels
- MetadataTyID, ///< 7: Metadata
- X86_MMXTyID, ///< 8: MMX vectors (64 bits, X86 specific)
+ HalfTyID, ///< 1: 16-bit floating point type
+ FloatTyID, ///< 2: 32-bit floating point type
+ DoubleTyID, ///< 3: 64-bit floating point type
+ X86_FP80TyID, ///< 4: 80-bit floating point type (X87)
+ FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa)
+ PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC)
+ LabelTyID, ///< 7: Labels
+ MetadataTyID, ///< 8: Metadata
+ X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific)
// Derived types... see DerivedTypes.h file.
// Make sure FirstDerivedTyID stays up to date!
- IntegerTyID, ///< 9: Arbitrary bit width integers
- FunctionTyID, ///< 10: Functions
- StructTyID, ///< 11: Structures
- ArrayTyID, ///< 12: Arrays
- PointerTyID, ///< 13: Pointers
- VectorTyID, ///< 14: SIMD 'packed' format, or other vector type
+ IntegerTyID, ///< 10: Arbitrary bit width integers
+ FunctionTyID, ///< 11: Functions
+ StructTyID, ///< 12: Structures
+ ArrayTyID, ///< 13: Arrays
+ PointerTyID, ///< 14: Pointers
+ VectorTyID, ///< 15: SIMD 'packed' format, or other vector type
NumTypeIDs, // Must remain as last defined ID
LastPrimitiveTyID = X86_MMXTyID,
@@ -74,21 +77,32 @@ private:
/// Context - This refers to the LLVMContext in which this type was uniqued.
LLVMContext &Context;
- TypeID ID : 8; // The current base type of this type.
- unsigned SubclassData : 24; // Space for subclasses to store data
+ // Due to Ubuntu GCC bug 910363:
+ // https://bugs.launchpad.net/ubuntu/+source/gcc-4.5/+bug/910363
+ // Bitpack ID and SubclassData manually.
+ // Note: TypeID : low 8 bit; SubclassData : high 24 bit.
+ uint32_t IDAndSubclassData;
protected:
friend class LLVMContextImpl;
explicit Type(LLVMContext &C, TypeID tid)
- : Context(C), ID(tid), SubclassData(0),
- NumContainedTys(0), ContainedTys(0) {}
+ : Context(C), IDAndSubclassData(0),
+ NumContainedTys(0), ContainedTys(0) {
+ setTypeID(tid);
+ }
~Type() {}
-
- unsigned getSubclassData() const { return SubclassData; }
+
+ void setTypeID(TypeID ID) {
+ IDAndSubclassData = (ID & 0xFF) | (IDAndSubclassData & 0xFFFFFF00);
+ assert(getTypeID() == ID && "TypeID data too large for field");
+ }
+
+ unsigned getSubclassData() const { return IDAndSubclassData >> 8; }
+
void setSubclassData(unsigned val) {
- SubclassData = val;
+ IDAndSubclassData = (IDAndSubclassData & 0xFF) | (val << 8);
// Ensure we don't have any accidental truncation.
- assert(SubclassData == val && "Subclass data too large for field");
+ assert(getSubclassData() == val && "Subclass data too large for field");
}
/// NumContainedTys - Keeps track of how many Type*'s there are in the
@@ -116,49 +130,54 @@ public:
/// getTypeID - Return the type id for the type. This will return one
/// of the TypeID enum elements defined above.
///
- TypeID getTypeID() const { return ID; }
+ TypeID getTypeID() const { return (TypeID)(IDAndSubclassData & 0xFF); }
/// isVoidTy - Return true if this is 'void'.
- bool isVoidTy() const { return ID == VoidTyID; }
+ bool isVoidTy() const { return getTypeID() == VoidTyID; }
+
+ /// isHalfTy - Return true if this is 'half', a 16-bit IEEE fp type.
+ bool isHalfTy() const { return getTypeID() == HalfTyID; }
/// isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
- bool isFloatTy() const { return ID == FloatTyID; }
+ bool isFloatTy() const { return getTypeID() == FloatTyID; }
/// isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
- bool isDoubleTy() const { return ID == DoubleTyID; }
+ bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
/// isX86_FP80Ty - Return true if this is x86 long double.
- bool isX86_FP80Ty() const { return ID == X86_FP80TyID; }
+ bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
/// isFP128Ty - Return true if this is 'fp128'.
- bool isFP128Ty() const { return ID == FP128TyID; }
+ bool isFP128Ty() const { return getTypeID() == FP128TyID; }
/// isPPC_FP128Ty - Return true if this is powerpc long double.
- bool isPPC_FP128Ty() const { return ID == PPC_FP128TyID; }
+ bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
/// isFloatingPointTy - Return true if this is one of the five floating point
/// types
bool isFloatingPointTy() const {
- return ID == FloatTyID || ID == DoubleTyID ||
- ID == X86_FP80TyID || ID == FP128TyID || ID == PPC_FP128TyID;
+ return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
+ getTypeID() == DoubleTyID ||
+ getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
+ getTypeID() == PPC_FP128TyID;
}
/// isX86_MMXTy - Return true if this is X86 MMX.
- bool isX86_MMXTy() const { return ID == X86_MMXTyID; }
+ bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
///
bool isFPOrFPVectorTy() const;
/// isLabelTy - Return true if this is 'label'.
- bool isLabelTy() const { return ID == LabelTyID; }
+ bool isLabelTy() const { return getTypeID() == LabelTyID; }
/// isMetadataTy - Return true if this is 'metadata'.
- bool isMetadataTy() const { return ID == MetadataTyID; }
+ bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
/// isIntegerTy - True if this is an instance of IntegerType.
///
- bool isIntegerTy() const { return ID == IntegerTyID; }
+ bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
/// isIntegerTy - Return true if this is an IntegerType of the given width.
bool isIntegerTy(unsigned Bitwidth) const;
@@ -170,23 +189,23 @@ public:
/// isFunctionTy - True if this is an instance of FunctionType.
///
- bool isFunctionTy() const { return ID == FunctionTyID; }
+ bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
/// isStructTy - True if this is an instance of StructType.
///
- bool isStructTy() const { return ID == StructTyID; }
+ bool isStructTy() const { return getTypeID() == StructTyID; }
/// isArrayTy - True if this is an instance of ArrayType.
///
- bool isArrayTy() const { return ID == ArrayTyID; }
+ bool isArrayTy() const { return getTypeID() == ArrayTyID; }
/// isPointerTy - True if this is an instance of PointerType.
///
- bool isPointerTy() const { return ID == PointerTyID; }
+ bool isPointerTy() const { return getTypeID() == PointerTyID; }
/// isVectorTy - True if this is an instance of VectorType.
///
- bool isVectorTy() const { return ID == VectorTyID; }
+ bool isVectorTy() const { return getTypeID() == VectorTyID; }
/// canLosslesslyBitCastTo - Return true if this type could be converted
/// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts
@@ -202,14 +221,14 @@ public:
/// Here are some useful little methods to query what type derived types are
/// Note that all other types can just compare to see if this == Type::xxxTy;
///
- bool isPrimitiveType() const { return ID <= LastPrimitiveTyID; }
- bool isDerivedType() const { return ID >= FirstDerivedTyID; }
+ bool isPrimitiveType() const { return getTypeID() <= LastPrimitiveTyID; }
+ bool isDerivedType() const { return getTypeID() >= FirstDerivedTyID; }
/// isFirstClassType - Return true if the type is "first class", meaning it
/// is a valid type for a Value.
///
bool isFirstClassType() const {
- return ID != FunctionTyID && ID != VoidTyID;
+ return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
}
/// isSingleValueType - Return true if the type is a valid type for a
@@ -217,8 +236,9 @@ public:
/// and array types.
///
bool isSingleValueType() const {
- return (ID != VoidTyID && isPrimitiveType()) ||
- ID == IntegerTyID || ID == PointerTyID || ID == VectorTyID;
+ return (getTypeID() != VoidTyID && isPrimitiveType()) ||
+ getTypeID() == IntegerTyID || getTypeID() == PointerTyID ||
+ getTypeID() == VectorTyID;
}
/// isAggregateType - Return true if the type is an aggregate type. This
@@ -227,7 +247,7 @@ public:
/// does not include vector types.
///
bool isAggregateType() const {
- return ID == StructTyID || ID == ArrayTyID;
+ return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
}
/// isSized - Return true if it makes sense to take the size of this type. To
@@ -236,12 +256,14 @@ public:
///
bool isSized() const {
// If it's a primitive, it is always sized.
- if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID ||
- ID == X86_MMXTyID)
+ if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
+ getTypeID() == PointerTyID ||
+ getTypeID() == X86_MMXTyID)
return true;
// If it is not something that can have a size (e.g. a function or label),
// it doesn't have a size.
- if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID)
+ if (getTypeID() != StructTyID && getTypeID() != ArrayTyID &&
+ getTypeID() != VectorTyID)
return false;
// Otherwise we have to try harder to decide.
return isSizedDerivedType();
@@ -294,6 +316,34 @@ public:
unsigned getNumContainedTypes() const { return NumContainedTys; }
//===--------------------------------------------------------------------===//
+ // Helper methods corresponding to subclass methods. This forces a cast to
+ // the specified subclass and calls its accessor. "getVectorNumElements" (for
+ // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is
+ // only intended to cover the core methods that are frequently used, helper
+ // methods should not be added here.
+
+ unsigned getIntegerBitWidth() const;
+
+ Type *getFunctionParamType(unsigned i) const;
+ unsigned getFunctionNumParams() const;
+ bool isFunctionVarArg() const;
+
+ StringRef getStructName() const;
+ unsigned getStructNumElements() const;
+ Type *getStructElementType(unsigned N) const;
+
+ Type *getSequentialElementType() const;
+
+ uint64_t getArrayNumElements() const;
+ Type *getArrayElementType() const { return getSequentialElementType(); }
+
+ unsigned getVectorNumElements() const;
+ Type *getVectorElementType() const { return getSequentialElementType(); }
+
+ unsigned getPointerAddressSpace() const;
+ Type *getPointerElementType() const { return getSequentialElementType(); }
+
+ //===--------------------------------------------------------------------===//
// Static members exported by the Type class itself. Useful for getting
// instances of Type.
//
@@ -306,6 +356,7 @@ public:
//
static Type *getVoidTy(LLVMContext &C);
static Type *getLabelTy(LLVMContext &C);
+ static Type *getHalfTy(LLVMContext &C);
static Type *getFloatTy(LLVMContext &C);
static Type *getDoubleTy(LLVMContext &C);
static Type *getMetadataTy(LLVMContext &C);
@@ -324,6 +375,7 @@ public:
// Convenience methods for getting pointer types with one of the above builtin
// types as pointee.
//
+ static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
diff --git a/contrib/llvm/include/llvm/User.h b/contrib/llvm/include/llvm/User.h
index 62bc9f0..c52f32f 100644
--- a/contrib/llvm/include/llvm/User.h
+++ b/contrib/llvm/include/llvm/User.h
@@ -19,6 +19,7 @@
#ifndef LLVM_USER_H
#define LLVM_USER_H
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Value.h"
namespace llvm {
@@ -34,6 +35,7 @@ class User : public Value {
void *operator new(size_t); // Do not implement
template <unsigned>
friend struct HungoffOperandTraits;
+ virtual void anchor();
protected:
/// OperandList - This is a pointer to the array of Uses for this User.
/// For nodes of fixed arity (e.g. a binary operator) this array will live
@@ -64,11 +66,11 @@ public:
void operator delete(void *Usr);
/// placement delete - required by std, but never called.
void operator delete(void*, unsigned) {
- assert(0 && "Constructor throws?");
+ llvm_unreachable("Constructor throws?");
}
/// placement delete - required by std, but never called.
void operator delete(void*, unsigned, bool) {
- assert(0 && "Constructor throws?");
+ llvm_unreachable("Constructor throws?");
}
protected:
template <int Idx, typename U> static Use &OpFrom(const U *that) {
diff --git a/contrib/llvm/include/llvm/Value.h b/contrib/llvm/include/llvm/Value.h
index a71e2fd..a82ac45 100644
--- a/contrib/llvm/include/llvm/Value.h
+++ b/contrib/llvm/include/llvm/Value.h
@@ -15,9 +15,7 @@
#define LLVM_VALUE_H
#include "llvm/Use.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
-#include <string>
namespace llvm {
@@ -32,8 +30,6 @@ class GlobalAlias;
class InlineAsm;
class ValueSymbolTable;
template<typename ValueTy> class StringMapEntry;
-template <typename ValueTy = Value>
-class AssertingVH;
typedef StringMapEntry<Value*> ValueName;
class raw_ostream;
class AssemblyAnnotationWriter;
@@ -42,6 +38,7 @@ class LLVMContext;
class Twine;
class MDNode;
class Type;
+class StringRef;
//===----------------------------------------------------------------------===//
// Value Class
@@ -110,26 +107,16 @@ public:
/// All values hold a context through their type.
LLVMContext &getContext() const;
- // All values can potentially be named...
- bool hasName() const { return Name != 0; }
+ // All values can potentially be named.
+ bool hasName() const { return Name != 0 && SubclassID != MDStringVal; }
ValueName *getValueName() const { return Name; }
+ void setValueName(ValueName *VN) { Name = VN; }
/// getName() - Return a constant reference to the value's name. This is cheap
/// and guaranteed to return the same reference as long as the value is not
/// modified.
- ///
- /// This is currently guaranteed to return a StringRef for which data() points
- /// to a valid null terminated string. The use of StringRef.data() is
- /// deprecated here, however, and clients should not rely on it. If such
- /// behavior is needed, clients should use expensive getNameStr(), or switch
- /// to an interface that does not depend on null termination.
StringRef getName() const;
- /// getNameStr() - Return the name of the specified value, *constructing a
- /// string* to hold it. This is guaranteed to construct a string and is very
- /// expensive, clients should use getName() unless necessary.
- std::string getNameStr() const;
-
/// setName() - Change the name of the value, choosing a new unique name if
/// the provided name is taken.
///
@@ -205,6 +192,8 @@ public:
BlockAddressVal, // This is an instance of BlockAddress
ConstantExprVal, // This is an instance of ConstantExpr
ConstantAggregateZeroVal, // This is an instance of ConstantAggregateZero
+ ConstantDataArrayVal, // This is an instance of ConstantDataArray
+ ConstantDataVectorVal, // This is an instance of ConstantDataVector
ConstantIntVal, // This is an instance of ConstantInt
ConstantFPVal, // This is an instance of ConstantFP
ConstantArrayVal, // This is an instance of ConstantArray
@@ -273,14 +262,32 @@ public:
return true; // Values are always values.
}
- /// stripPointerCasts - This method strips off any unneeded pointer
- /// casts from the specified value, returning the original uncasted value.
- /// Note that the returned value has pointer type if the specified value does.
+ /// stripPointerCasts - This method strips off any unneeded pointer casts and
+ /// all-zero GEPs from the specified value, returning the original uncasted
+ /// value. If this is called on a non-pointer value, it returns 'this'.
Value *stripPointerCasts();
const Value *stripPointerCasts() const {
return const_cast<Value*>(this)->stripPointerCasts();
}
+ /// stripInBoundsConstantOffsets - This method strips off unneeded pointer casts and
+ /// all-constant GEPs from the specified value, returning the original
+ /// pointer value. If this is called on a non-pointer value, it returns
+ /// 'this'.
+ Value *stripInBoundsConstantOffsets();
+ const Value *stripInBoundsConstantOffsets() const {
+ return const_cast<Value*>(this)->stripInBoundsConstantOffsets();
+ }
+
+ /// stripInBoundsOffsets - This method strips off unneeded pointer casts and
+ /// any in-bounds Offsets from the specified value, returning the original
+ /// pointer value. If this is called on a non-pointer value, it returns
+ /// 'this'.
+ Value *stripInBoundsOffsets();
+ const Value *stripInBoundsOffsets() const {
+ return const_cast<Value*>(this)->stripInBoundsOffsets();
+ }
+
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
bool isDereferenceablePointer() const;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index bd132c0..95c834b 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -440,3 +440,19 @@ bool llvm::isIdentifiedObject(const Value *V) {
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
}
+
+/// isKnownNonNull - Return true if we know that the specified value is never
+/// null.
+bool llvm::isKnownNonNull(const Value *V) {
+ // Alloca never returns null, malloc might.
+ if (isa<AllocaInst>(V)) return true;
+
+ // A byval argument is never null.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->hasByValAttr();
+
+ // Global values are not null unless extern weak.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return !GV->hasExternalWeakLinkage();
+ return false;
+}
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index d947220..9f219f5 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -127,9 +127,8 @@ AliasAnalysis::AliasResult
AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
- const char *AliasString;
+ const char *AliasString = 0;
switch (R) {
- default: llvm_unreachable("Unknown alias type!");
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
case PartialAlias: Partial++; AliasString = "Partial alias"; break;
@@ -154,9 +153,8 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
const Location &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
- const char *MRString;
+ const char *MRString = 0;
switch (R) {
- default: llvm_unreachable("Unknown mod/ref type!");
case NoModRef: NoMR++; MRString = "NoModRef"; break;
case Ref: JustRef++; MRString = "JustRef"; break;
case Mod: JustMod++; MRString = "JustMod"; break;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 37271b9..ac72983 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -193,8 +193,6 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::MustAlias:
PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
++MustAlias; break;
- default:
- errs() << "Unknown alias query result!\n";
}
}
}
@@ -223,8 +221,6 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
++ModRef; break;
- default:
- errs() << "Unknown alias query result!\n";
}
}
}
diff --git a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
index 3fcd3b5..f80e2fb 100644
--- a/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/contrib/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -189,7 +189,9 @@ bool AliasSet::aliasesUnknownInst(Instruction *Inst, AliasAnalysis &AA) const {
}
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.getModRefInfo(Inst, I.getPointer(), I.getSize()) !=
+ if (AA.getModRefInfo(Inst, AliasAnalysis::Location(I.getPointer(),
+ I.getSize(),
+ I.getTBAAInfo())) !=
AliasAnalysis::NoModRef)
return true;
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index af400ba..20ecfd2 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -42,22 +42,6 @@ using namespace llvm;
// Useful predicates
//===----------------------------------------------------------------------===//
-/// isKnownNonNull - Return true if we know that the specified value is never
-/// null.
-static bool isKnownNonNull(const Value *V) {
- // Alloca never returns null, malloc might.
- if (isa<AllocaInst>(V)) return true;
-
- // A byval argument is never null.
- if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValAttr();
-
- // Global values are not null unless extern weak.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
- return !GV->hasExternalWeakLinkage();
- return false;
-}
-
/// isNonEscapingLocalObject - Return true if the pointer is to a function-local
/// object that never escapes from the function.
static bool isNonEscapingLocalObject(const Value *V) {
@@ -100,42 +84,59 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
+static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+ bool RoundToAlign = false) {
Type *AccessTy;
+ unsigned Align;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
AccessTy = GV->getType()->getElementType();
+ Align = GV->getAlignment();
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
if (!AI->isArrayAllocation())
AccessTy = AI->getType()->getElementType();
else
return AliasAnalysis::UnknownSize;
+ Align = AI->getAlignment();
} else if (const CallInst* CI = extractMallocCall(V)) {
- if (!isArrayMalloc(V, &TD))
+ if (!RoundToAlign && !isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return C->getZExtValue();
return AliasAnalysis::UnknownSize;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
- if (A->hasByValAttr())
+ if (A->hasByValAttr()) {
AccessTy = cast<PointerType>(A->getType())->getElementType();
- else
+ Align = A->getParamAlignment();
+ } else {
return AliasAnalysis::UnknownSize;
+ }
} else {
return AliasAnalysis::UnknownSize;
}
-
- if (AccessTy->isSized())
- return TD.getTypeAllocSize(AccessTy);
- return AliasAnalysis::UnknownSize;
+
+ if (!AccessTy->isSized())
+ return AliasAnalysis::UnknownSize;
+
+ uint64_t Size = TD.getTypeAllocSize(AccessTy);
+ // If there is an explicitly specified alignment, and we need to
+ // take alignment into account, round up the size. (If the alignment
+ // is implicit, getTypeAllocSize is sufficient.)
+ if (RoundToAlign && Align)
+ Size = RoundUpToAlignment(Size, Align);
+
+ return Size;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const TargetData &TD) {
- uint64_t ObjectSize = getObjectSize(V, TD);
+ // This function needs to use the aligned object size because we allow
+ // reads a bit past the end given sufficient alignment.
+ uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
+
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -706,8 +707,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// pointer were passed to arguments that were neither of these, then it
// couldn't be no-capture.
if (!(*CI)->getType()->isPointerTy() ||
- (!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture) &&
- !CS.paramHasAttr(ArgNo+1, Attribute::ByVal)))
+ (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
continue;
// If this is a no-capture pointer argument, see if we can tell that it
@@ -978,10 +978,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
//
// TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
// practical effect of this is protecting TBAA in the case of dynamic
- // indices into arrays of unions. An alternative way to solve this would
- // be to have clang emit extra metadata for unions and/or union accesses.
- // A union-specific solution wouldn't handle the problem for malloc'd
- // memory however.
+ // indices into arrays of unions or malloc'd memory.
return PartialAlias;
}
diff --git a/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp b/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
index d16665f..8a660f7 100644
--- a/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BlockFrequencyInfo.cpp
@@ -58,6 +58,6 @@ void BlockFrequencyInfo::print(raw_ostream &O, const Module *) const {
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
///
-BlockFrequency BlockFrequencyInfo::getBlockFreq(BasicBlock *BB) const {
+BlockFrequency BlockFrequencyInfo::getBlockFreq(const BasicBlock *BB) const {
return BFI->getBlockFreq(BB);
}
diff --git a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index bde3b76..2730ce6 100644
--- a/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -12,11 +12,14 @@
//===----------------------------------------------------------------------===//
#include "llvm/Constants.h"
+#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -29,121 +32,118 @@ INITIALIZE_PASS_END(BranchProbabilityInfo, "branch-prob",
char BranchProbabilityInfo::ID = 0;
-namespace {
-// Please note that BranchProbabilityAnalysis is not a FunctionPass.
-// It is created by BranchProbabilityInfo (which is a FunctionPass), which
-// provides a clear interface. Thanks to that, all heuristics and other
-// private methods are hidden in the .cpp file.
-class BranchProbabilityAnalysis {
-
- typedef std::pair<const BasicBlock *, const BasicBlock *> Edge;
-
- DenseMap<Edge, uint32_t> *Weights;
-
- BranchProbabilityInfo *BP;
-
- LoopInfo *LI;
-
-
- // Weights are for internal use only. They are used by heuristics to help to
- // estimate edges' probability. Example:
- //
- // Using "Loop Branch Heuristics" we predict weights of edges for the
- // block BB2.
- // ...
- // |
- // V
- // BB1<-+
- // | |
- // | | (Weight = 124)
- // V |
- // BB2--+
- // |
- // | (Weight = 4)
- // V
- // BB3
- //
- // Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875
- // Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125
-
- static const uint32_t LBH_TAKEN_WEIGHT = 124;
- static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
-
- static const uint32_t RH_TAKEN_WEIGHT = 24;
- static const uint32_t RH_NONTAKEN_WEIGHT = 8;
-
- static const uint32_t PH_TAKEN_WEIGHT = 20;
- static const uint32_t PH_NONTAKEN_WEIGHT = 12;
-
- static const uint32_t ZH_TAKEN_WEIGHT = 20;
- static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
-
- // Standard weight value. Used when none of the heuristics set weight for
- // the edge.
- static const uint32_t NORMAL_WEIGHT = 16;
-
- // Minimum weight of an edge. Please note, that weight is NEVER 0.
- static const uint32_t MIN_WEIGHT = 1;
-
- // Return TRUE if BB leads directly to a Return Instruction.
- static bool isReturningBlock(BasicBlock *BB) {
- SmallPtrSet<BasicBlock *, 8> Visited;
-
- while (true) {
- TerminatorInst *TI = BB->getTerminator();
- if (isa<ReturnInst>(TI))
- return true;
-
- if (TI->getNumSuccessors() > 1)
- break;
-
- // It is unreachable block which we can consider as a return instruction.
- if (TI->getNumSuccessors() == 0)
- return true;
-
- Visited.insert(BB);
- BB = TI->getSuccessor(0);
+// Weights are for internal use only. They are used by heuristics to help to
+// estimate edges' probability. Example:
+//
+// Using "Loop Branch Heuristics" we predict weights of edges for the
+// block BB2.
+// ...
+// |
+// V
+// BB1<-+
+// | |
+// | | (Weight = 124)
+// V |
+// BB2--+
+// |
+// | (Weight = 4)
+// V
+// BB3
+//
+// Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875
+// Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125
+static const uint32_t LBH_TAKEN_WEIGHT = 124;
+static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
+
+/// \brief Unreachable-terminating branch taken weight.
+///
+/// This is the weight for a branch being taken to a block that terminates
+/// (eventually) in unreachable. These are predicted as unlikely as possible.
+static const uint32_t UR_TAKEN_WEIGHT = 1;
+
+/// \brief Unreachable-terminating branch not-taken weight.
+///
+/// This is the weight for a branch not being taken toward a block that
+/// terminates (eventually) in unreachable. Such a branch is essentially never
+/// taken. Set the weight to an absurdly high value so that nested loops don't
+/// easily subsume it.
+static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1;
+
+static const uint32_t PH_TAKEN_WEIGHT = 20;
+static const uint32_t PH_NONTAKEN_WEIGHT = 12;
+
+static const uint32_t ZH_TAKEN_WEIGHT = 20;
+static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
+
+static const uint32_t FPH_TAKEN_WEIGHT = 20;
+static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
+
+// Standard weight value. Used when none of the heuristics set weight for
+// the edge.
+static const uint32_t NORMAL_WEIGHT = 16;
+
+// Minimum weight of an edge. Please note, that weight is NEVER 0.
+static const uint32_t MIN_WEIGHT = 1;
+
+static uint32_t getMaxWeightFor(BasicBlock *BB) {
+ return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
+}
- // Stop if cycle is detected.
- if (Visited.count(BB))
- return false;
- }
+/// \brief Calculate edge weights for successors lead to unreachable.
+///
+/// Predict that a successor which leads necessarily to an
+/// unreachable-terminated block as extremely unlikely.
+bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
+ TerminatorInst *TI = BB->getTerminator();
+ if (TI->getNumSuccessors() == 0) {
+ if (isa<UnreachableInst>(TI))
+ PostDominatedByUnreachable.insert(BB);
return false;
}
- uint32_t getMaxWeightFor(BasicBlock *BB) const {
- return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
- }
+ SmallPtrSet<BasicBlock *, 4> UnreachableEdges;
+ SmallPtrSet<BasicBlock *, 4> ReachableEdges;
-public:
- BranchProbabilityAnalysis(DenseMap<Edge, uint32_t> *W,
- BranchProbabilityInfo *BP, LoopInfo *LI)
- : Weights(W), BP(BP), LI(LI) {
+ for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
+ if (PostDominatedByUnreachable.count(*I))
+ UnreachableEdges.insert(*I);
+ else
+ ReachableEdges.insert(*I);
}
- // Metadata Weights
- bool calcMetadataWeights(BasicBlock *BB);
+ // If all successors are in the set of blocks post-dominated by unreachable,
+ // this block is too.
+ if (UnreachableEdges.size() == TI->getNumSuccessors())
+ PostDominatedByUnreachable.insert(BB);
- // Return Heuristics
- bool calcReturnHeuristics(BasicBlock *BB);
-
- // Pointer Heuristics
- bool calcPointerHeuristics(BasicBlock *BB);
-
- // Loop Branch Heuristics
- bool calcLoopBranchHeuristics(BasicBlock *BB);
+ // Skip probabilities if this block has a single successor or if all were
+ // reachable.
+ if (TI->getNumSuccessors() == 1 || UnreachableEdges.empty())
+ return false;
- // Zero Heurestics
- bool calcZeroHeuristics(BasicBlock *BB);
+ uint32_t UnreachableWeight =
+ std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT);
+ for (SmallPtrSet<BasicBlock *, 4>::iterator I = UnreachableEdges.begin(),
+ E = UnreachableEdges.end();
+ I != E; ++I)
+ setEdgeWeight(BB, *I, UnreachableWeight);
+
+ if (ReachableEdges.empty())
+ return true;
+ uint32_t ReachableWeight =
+ std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT);
+ for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReachableEdges.begin(),
+ E = ReachableEdges.end();
+ I != E; ++I)
+ setEdgeWeight(BB, *I, ReachableWeight);
- bool runOnFunction(Function &F);
-};
-} // end anonymous namespace
+ return true;
+}
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
-bool BranchProbabilityAnalysis::calcMetadataWeights(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 1)
return false;
@@ -174,54 +174,14 @@ bool BranchProbabilityAnalysis::calcMetadataWeights(BasicBlock *BB) {
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- BP->setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
+ setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
return true;
}
-// Calculate Edge Weights using "Return Heuristics". Predict a successor which
-// leads directly to Return Instruction will not be taken.
-bool BranchProbabilityAnalysis::calcReturnHeuristics(BasicBlock *BB){
- if (BB->getTerminator()->getNumSuccessors() == 1)
- return false;
-
- SmallPtrSet<BasicBlock *, 4> ReturningEdges;
- SmallPtrSet<BasicBlock *, 4> StayEdges;
-
- for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- BasicBlock *Succ = *I;
- if (isReturningBlock(Succ))
- ReturningEdges.insert(Succ);
- else
- StayEdges.insert(Succ);
- }
-
- if (uint32_t numStayEdges = StayEdges.size()) {
- uint32_t stayWeight = RH_TAKEN_WEIGHT / numStayEdges;
- if (stayWeight < NORMAL_WEIGHT)
- stayWeight = NORMAL_WEIGHT;
-
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = StayEdges.begin(),
- E = StayEdges.end(); I != E; ++I)
- BP->setEdgeWeight(BB, *I, stayWeight);
- }
-
- if (uint32_t numRetEdges = ReturningEdges.size()) {
- uint32_t retWeight = RH_NONTAKEN_WEIGHT / numRetEdges;
- if (retWeight < MIN_WEIGHT)
- retWeight = MIN_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReturningEdges.begin(),
- E = ReturningEdges.end(); I != E; ++I) {
- BP->setEdgeWeight(BB, *I, retWeight);
- }
- }
-
- return ReturningEdges.size() > 0;
-}
-
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
// between two pointer or pointer and NULL will fail.
-bool BranchProbabilityAnalysis::calcPointerHeuristics(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
@@ -249,16 +209,14 @@ bool BranchProbabilityAnalysis::calcPointerHeuristics(BasicBlock *BB) {
if (!isProb)
std::swap(Taken, NonTaken);
- BP->setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
- BP->setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
return true;
}
// Calculate Edge Weights using "Loop Branch Heuristics". Predict backedges
// as taken, exiting edges as not-taken.
-bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
- uint32_t numSuccs = BB->getTerminator()->getNumSuccessors();
-
+bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
Loop *L = LI->getLoopFor(BB);
if (!L)
return false;
@@ -267,17 +225,13 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
SmallPtrSet<BasicBlock *, 8> ExitingEdges;
SmallPtrSet<BasicBlock *, 8> InEdges; // Edges from header to the loop.
- bool isHeader = BB == L->getHeader();
-
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- BasicBlock *Succ = *I;
- Loop *SuccL = LI->getLoopFor(Succ);
- if (SuccL != L)
- ExitingEdges.insert(Succ);
- else if (Succ == L->getHeader())
- BackEdges.insert(Succ);
- else if (isHeader)
- InEdges.insert(Succ);
+ if (!L->contains(*I))
+ ExitingEdges.insert(*I);
+ else if (L->getHeader() == *I)
+ BackEdges.insert(*I);
+ else
+ InEdges.insert(*I);
}
if (uint32_t numBackEdges = BackEdges.size()) {
@@ -288,7 +242,7 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
BasicBlock *Back = *EI;
- BP->setEdgeWeight(BB, Back, backWeight);
+ setEdgeWeight(BB, Back, backWeight);
}
}
@@ -300,27 +254,26 @@ bool BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
BasicBlock *Back = *EI;
- BP->setEdgeWeight(BB, Back, inWeight);
+ setEdgeWeight(BB, Back, inWeight);
}
}
- uint32_t numExitingEdges = ExitingEdges.size();
- if (uint32_t numNonExitingEdges = numSuccs - numExitingEdges) {
- uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numNonExitingEdges;
+ if (uint32_t numExitingEdges = ExitingEdges.size()) {
+ uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numExitingEdges;
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
for (SmallPtrSet<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
BasicBlock *Exiting = *EI;
- BP->setEdgeWeight(BB, Exiting, exitWeight);
+ setEdgeWeight(BB, Exiting, exitWeight);
}
}
return true;
}
-bool BranchProbabilityAnalysis::calcZeroHeuristics(BasicBlock *BB) {
+bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
@@ -375,45 +328,94 @@ bool BranchProbabilityAnalysis::calcZeroHeuristics(BasicBlock *BB) {
if (!isProb)
std::swap(Taken, NonTaken);
- BP->setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
- BP->setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
return true;
}
+bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
+ BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
+ if (!BI || !BI->isConditional())
+ return false;
-bool BranchProbabilityAnalysis::runOnFunction(Function &F) {
-
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
- BasicBlock *BB = I++;
-
- if (calcMetadataWeights(BB))
- continue;
+ Value *Cond = BI->getCondition();
+ FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond);
+ if (!FCmp)
+ return false;
- if (calcLoopBranchHeuristics(BB))
- continue;
+ bool isProb;
+ if (FCmp->isEquality()) {
+ // f1 == f2 -> Unlikely
+ // f1 != f2 -> Likely
+ isProb = !FCmp->isTrueWhenEqual();
+ } else if (FCmp->getPredicate() == FCmpInst::FCMP_ORD) {
+ // !isnan -> Likely
+ isProb = true;
+ } else if (FCmp->getPredicate() == FCmpInst::FCMP_UNO) {
+ // isnan -> Unlikely
+ isProb = false;
+ } else {
+ return false;
+ }
- if (calcReturnHeuristics(BB))
- continue;
+ BasicBlock *Taken = BI->getSuccessor(0);
+ BasicBlock *NonTaken = BI->getSuccessor(1);
- if (calcPointerHeuristics(BB))
- continue;
+ if (!isProb)
+ std::swap(Taken, NonTaken);
- calcZeroHeuristics(BB);
- }
+ setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT);
- return false;
+ return true;
}
void BranchProbabilityInfo::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<LoopInfo>();
- AU.setPreservesAll();
+ AU.addRequired<LoopInfo>();
+ AU.setPreservesAll();
}
bool BranchProbabilityInfo::runOnFunction(Function &F) {
- LoopInfo &LI = getAnalysis<LoopInfo>();
- BranchProbabilityAnalysis BPA(&Weights, this, &LI);
- return BPA.runOnFunction(F);
+ LastF = &F; // Store the last function we ran on for printing.
+ LI = &getAnalysis<LoopInfo>();
+ assert(PostDominatedByUnreachable.empty());
+
+ // Walk the basic blocks in post-order so that we can build up state about
+ // the successors of a block iteratively.
+ for (po_iterator<BasicBlock *> I = po_begin(&F.getEntryBlock()),
+ E = po_end(&F.getEntryBlock());
+ I != E; ++I) {
+ DEBUG(dbgs() << "Computing probabilities for " << I->getName() << "\n");
+ if (calcUnreachableHeuristics(*I))
+ continue;
+ if (calcMetadataWeights(*I))
+ continue;
+ if (calcLoopBranchHeuristics(*I))
+ continue;
+ if (calcPointerHeuristics(*I))
+ continue;
+ if (calcZeroHeuristics(*I))
+ continue;
+ calcFloatingPointHeuristics(*I);
+ }
+
+ PostDominatedByUnreachable.clear();
+ return false;
+}
+
+void BranchProbabilityInfo::print(raw_ostream &OS, const Module *) const {
+ OS << "---- Branch Probabilities ----\n";
+ // We print the probabilities from the last function the analysis ran over,
+ // or the function it is currently running over.
+ assert(LastF && "Cannot print prior to running over a function");
+ for (Function::const_iterator BI = LastF->begin(), BE = LastF->end();
+ BI != BE; ++BI) {
+ for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI);
+ SI != SE; ++SI) {
+ printEdgeProbability(OS << " ", BI, *SI);
+ }
+ }
}
uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
@@ -434,12 +436,8 @@ uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
bool BranchProbabilityInfo::
isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
- uint32_t Weight = getEdgeWeight(Src, Dst);
- uint32_t Sum = getSumForBlock(Src);
-
- // FIXME: Implement BranchProbability::compare then change this code to
- // compare this BranchProbability against a static "hot" BranchProbability.
- return (uint64_t)Weight * 5 > (uint64_t)Sum * 4;
+ // FIXME: Compare against a static "hot" BranchProbability.
+ return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
}
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
@@ -461,8 +459,8 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
}
}
- // FIXME: Use BranchProbability::compare.
- if ((uint64_t)MaxWeight * 5 > (uint64_t)Sum * 4)
+ // Hot probability is at least 4/5 = 80%
+ if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
return MaxSucc;
return 0;
@@ -483,8 +481,8 @@ getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
void BranchProbabilityInfo::
setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) {
Weights[std::make_pair(Src, Dst)] = Weight;
- DEBUG(dbgs() << "set edge " << Src->getNameStr() << " -> "
- << Dst->getNameStr() << " weight to " << Weight
+ DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
+ << Dst->getName() << " weight to " << Weight
<< (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
}
@@ -499,11 +497,12 @@ getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
}
raw_ostream &
-BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, BasicBlock *Src,
- BasicBlock *Dst) const {
+BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS,
+ const BasicBlock *Src,
+ const BasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
- OS << "edge " << Src->getNameStr() << " -> " << Dst->getNameStr()
+ OS << "edge " << Src->getName() << " -> " << Dst->getName()
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
diff --git a/contrib/llvm/lib/Analysis/CFGPrinter.cpp b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
index 7bb063f..7685400 100644
--- a/contrib/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/CFGPrinter.cpp
@@ -77,7 +77,7 @@ namespace {
}
virtual bool runOnFunction(Function &F) {
- std::string Filename = "cfg." + F.getNameStr() + ".dot";
+ std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
@@ -111,7 +111,7 @@ namespace {
}
virtual bool runOnFunction(Function &F) {
- std::string Filename = "cfg." + F.getNameStr() + ".dot";
+ std::string Filename = "cfg." + F.getName().str() + ".dot";
errs() << "Writing '" << Filename << "'...";
std::string ErrorInfo;
@@ -143,7 +143,7 @@ INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
/// being a 'dot' and 'gv' program in your path.
///
void Function::viewCFG() const {
- ViewGraph(this, "cfg" + getNameStr());
+ ViewGraph(this, "cfg" + getName());
}
/// viewCFGOnly - This function is meant for use from the debugger. It works
@@ -152,7 +152,7 @@ void Function::viewCFG() const {
/// his can make the graph smaller.
///
void Function::viewCFGOnly() const {
- ViewGraph(this, "cfg" + getNameStr(), true);
+ ViewGraph(this, "cfg" + getName(), true);
}
FunctionPass *llvm::createCFGPrinterPass () {
diff --git a/contrib/llvm/lib/Analysis/CaptureTracking.cpp b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
index b2c27d1..dd33eeb 100644
--- a/contrib/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/contrib/llvm/lib/Analysis/CaptureTracking.cpp
@@ -16,25 +16,35 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/CaptureTracking.h"
-#include "llvm/Constants.h"
-#include "llvm/Instructions.h"
-#include "llvm/Value.h"
-#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/Analysis/CaptureTracking.h"
using namespace llvm;
-/// As its comment mentions, PointerMayBeCaptured can be expensive.
-/// However, it's not easy for BasicAA to cache the result, because
-/// it's an ImmutablePass. To work around this, bound queries at a
-/// fixed number of uses.
-///
-/// TODO: Write a new FunctionPass AliasAnalysis so that it can keep
-/// a cache. Then we can move the code from BasicAliasAnalysis into
-/// that path, and remove this threshold.
-static int const Threshold = 20;
+CaptureTracker::~CaptureTracker() {}
+
+namespace {
+ struct SimpleCaptureTracker : public CaptureTracker {
+ explicit SimpleCaptureTracker(bool ReturnCaptures)
+ : ReturnCaptures(ReturnCaptures), Captured(false) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) { return true; }
+
+ bool captured(Use *U) {
+ if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
+ return false;
+
+ Captured = true;
+ return true;
+ }
+
+ bool ReturnCaptures;
+
+ bool Captured;
+ };
+}
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
@@ -45,6 +55,26 @@ static int const Threshold = 20;
/// counts as capturing it or not.
bool llvm::PointerMayBeCaptured(const Value *V,
bool ReturnCaptures, bool StoreCaptures) {
+ assert(!isa<GlobalValue>(V) &&
+ "It doesn't make sense to ask whether a global is captured.");
+
+ // TODO: If StoreCaptures is not true, we could do Fancy analysis
+ // to determine whether this store is not actually an escape point.
+ // In that case, BasicAliasAnalysis should be updated as well to
+ // take advantage of this.
+ (void)StoreCaptures;
+
+ SimpleCaptureTracker SCT(ReturnCaptures);
+ PointerMayBeCaptured(V, &SCT);
+ return SCT.Captured;
+}
+
+/// TODO: Write a new FunctionPass AliasAnalysis so that it can keep
+/// a cache. Then we can move the code from BasicAliasAnalysis into
+/// that path, and remove this threshold.
+static int const Threshold = 20;
+
+void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
assert(V->getType()->isPointerTy() && "Capture is for pointers only!");
SmallVector<Use*, Threshold> Worklist;
SmallSet<Use*, Threshold> Visited;
@@ -55,9 +85,10 @@ bool llvm::PointerMayBeCaptured(const Value *V,
// If there are lots of uses, conservatively say that the value
// is captured to avoid taking too much compile time.
if (Count++ >= Threshold)
- return true;
+ return Tracker->tooManyUses();
Use *U = &UI.getUse();
+ if (!Tracker->shouldExplore(U)) continue;
Visited.insert(U);
Worklist.push_back(U);
}
@@ -86,11 +117,10 @@ bool llvm::PointerMayBeCaptured(const Value *V,
// (think of self-referential objects).
CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
for (CallSite::arg_iterator A = B; A != E; ++A)
- if (A->get() == V && !CS.paramHasAttr(A - B + 1, Attribute::NoCapture))
+ if (A->get() == V && !CS.doesNotCapture(A - B))
// The parameter is not marked 'nocapture' - captured.
- return true;
- // Only passed via 'nocapture' arguments, or is the called function - not
- // captured.
+ if (Tracker->captured(U))
+ return;
break;
}
case Instruction::Load:
@@ -99,18 +129,11 @@ bool llvm::PointerMayBeCaptured(const Value *V,
case Instruction::VAArg:
// "va-arg" from a pointer does not cause it to be captured.
break;
- case Instruction::Ret:
- if (ReturnCaptures)
- return true;
- break;
case Instruction::Store:
if (V == I->getOperand(0))
// Stored the pointer - conservatively assume it may be captured.
- // TODO: If StoreCaptures is not true, we could do Fancy analysis
- // to determine whether this store is not actually an escape point.
- // In that case, BasicAliasAnalysis should be updated as well to
- // take advantage of this.
- return true;
+ if (Tracker->captured(U))
+ return;
// Storing to the pointee does not cause the pointer to be captured.
break;
case Instruction::BitCast:
@@ -122,7 +145,8 @@ bool llvm::PointerMayBeCaptured(const Value *V,
UI != UE; ++UI) {
Use *U = &UI.getUse();
if (Visited.insert(U))
- Worklist.push_back(U);
+ if (Tracker->shouldExplore(U))
+ Worklist.push_back(U);
}
break;
case Instruction::ICmp:
@@ -136,13 +160,16 @@ bool llvm::PointerMayBeCaptured(const Value *V,
break;
// Otherwise, be conservative. There are crazy ways to capture pointers
// using comparisons.
- return true;
+ if (Tracker->captured(U))
+ return;
+ break;
default:
// Something else - be conservative and say it is captured.
- return true;
+ if (Tracker->captured(U))
+ return;
+ break;
}
}
- // All uses examined - not captured.
- return false;
+ // All uses examined.
}
diff --git a/contrib/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
new file mode 100644
index 0000000..316e7bc9
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/CodeMetrics.cpp
@@ -0,0 +1,184 @@
+//===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements code cost measurement utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CodeMetrics.h"
+#include "llvm/Function.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Target/TargetData.h"
+
+using namespace llvm;
+
+/// callIsSmall - If a call is likely to lower to a single target instruction,
+/// or is otherwise deemed small return true.
+/// TODO: Perhaps calls like memcpy, strcpy, etc?
+bool llvm::callIsSmall(const Function *F) {
+ if (!F) return false;
+
+ if (F->hasLocalLinkage()) return false;
+
+ if (!F->hasName()) return false;
+
+ StringRef Name = F->getName();
+
+ // These will all likely lower to a single selection DAG node.
+ if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
+ Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
+ Name == "sin" || Name == "sinf" || Name == "sinl" ||
+ Name == "cos" || Name == "cosf" || Name == "cosl" ||
+ Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
+ return true;
+
+ // These are all likely to be optimized into something smaller.
+ if (Name == "pow" || Name == "powf" || Name == "powl" ||
+ Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
+ Name == "floor" || Name == "floorf" || Name == "ceil" ||
+ Name == "round" || Name == "ffs" || Name == "ffsl" ||
+ Name == "abs" || Name == "labs" || Name == "llabs")
+ return true;
+
+ return false;
+}
+
+bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
+ if (isa<PHINode>(I))
+ return true;
+
+ // If a GEP has all constant indices, it will probably be folded with
+ // a load/store.
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
+ return GEP->hasAllConstantIndices();
+
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ case Intrinsic::ptr_annotation:
+ case Intrinsic::var_annotation:
+ // These intrinsics don't count as size.
+ return true;
+ }
+ }
+
+ if (const CastInst *CI = dyn_cast<CastInst>(I)) {
+ // Noop casts, including ptr <-> int, don't count.
+ if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) || isa<PtrToIntInst>(CI))
+ return true;
+ // trunc to a native type is free (assuming the target has compare and
+ // shift-right of the same width).
+ if (TD && isa<TruncInst>(CI) &&
+ TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
+ return true;
+ // Result of a cmp instruction is often extended (to be used by other
+ // cmp instructions, logical or return instructions). These are usually
+ // nop on most sane targets.
+ if (isa<CmpInst>(CI->getOperand(0)))
+ return true;
+ }
+
+ return false;
+}
+
+/// analyzeBasicBlock - Fill in the current structure with information gleaned
+/// from the specified block.
+void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
+ const TargetData *TD) {
+ ++NumBlocks;
+ unsigned NumInstsBeforeThisBB = NumInsts;
+ for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
+ II != E; ++II) {
+ if (isInstructionFree(II, TD))
+ continue;
+
+ // Special handling for calls.
+ if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
+ ImmutableCallSite CS(cast<Instruction>(II));
+
+ if (const Function *F = CS.getCalledFunction()) {
+ // If a function is both internal and has a single use, then it is
+ // extremely likely to get inlined in the future (it was probably
+ // exposed by an interleaved devirtualization pass).
+ if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
+ ++NumInlineCandidates;
+
+ // If this call is to function itself, then the function is recursive.
+ // Inlining it into other functions is a bad idea, because this is
+ // basically just a form of loop peeling, and our metrics aren't useful
+ // for that case.
+ if (F == BB->getParent())
+ isRecursive = true;
+ }
+
+ if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
+ // Each argument to a call takes on average one instruction to set up.
+ NumInsts += CS.arg_size();
+
+ // We don't want inline asm to count as a call - that would prevent loop
+ // unrolling. The argument setup cost is still real, though.
+ if (!isa<InlineAsm>(CS.getCalledValue()))
+ ++NumCalls;
+ }
+ }
+
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
+ if (!AI->isStaticAlloca())
+ this->usesDynamicAlloca = true;
+ }
+
+ if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
+ ++NumVectorInsts;
+
+ ++NumInsts;
+ }
+
+ if (isa<ReturnInst>(BB->getTerminator()))
+ ++NumRets;
+
+ // We never want to inline functions that contain an indirectbr. This is
+ // incorrect because all the blockaddress's (in static global initializers
+ // for example) would be referring to the original function, and this indirect
+ // jump would jump from the inlined copy of the function into the original
+ // function which is extremely undefined behavior.
+ // FIXME: This logic isn't really right; we can safely inline functions
+ // with indirectbr's as long as no other function or global references the
+ // blockaddress of a block within the current function. And as a QOI issue,
+ // if someone is using a blockaddress without an indirectbr, and that
+ // reference somehow ends up in another function or global, we probably
+ // don't want to inline this function.
+ if (isa<IndirectBrInst>(BB->getTerminator()))
+ containsIndirectBr = true;
+
+ // Remember NumInsts for this BB.
+ NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
+}
+
+void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+ // If this function contains a call that "returns twice" (e.g., setjmp or
+ // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
+ // This is a hack because we depend on the user marking their local variables
+ // as volatile if they are live across a setjmp call, and they probably
+ // won't do this in callers.
+ exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
+ !F->hasFnAttr(Attribute::ReturnsTwice);
+
+ // Look at the size of the callee.
+ for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
+ analyzeBasicBlock(&*BB, TD);
+}
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index df79849..7a0a4e1e 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -26,6 +26,7 @@
#include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/ErrorHandling.h"
@@ -51,6 +52,42 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy())
return Constant::getAllOnesValue(DestTy);
+ // Handle a vector->integer cast.
+ if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
+ ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
+ if (CDV == 0)
+ return ConstantExpr::getBitCast(C, DestTy);
+
+ unsigned NumSrcElts = CDV->getType()->getNumElements();
+
+ Type *SrcEltTy = CDV->getType()->getElementType();
+
+ // If the vector is a vector of floating point, convert it to vector of int
+ // to simplify things.
+ if (SrcEltTy->isFloatingPointTy()) {
+ unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
+ Type *SrcIVTy =
+ VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
+ // Ask VMCore to do the conversion now that #elts line up.
+ C = ConstantExpr::getBitCast(C, SrcIVTy);
+ CDV = cast<ConstantDataVector>(C);
+ }
+
+ // Now that we know that the input value is a vector of integers, just shift
+ // and insert them into our result.
+ unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
+ APInt Result(IT->getBitWidth(), 0);
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ Result <<= BitShift;
+ if (TD.isLittleEndian())
+ Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
+ else
+ Result |= CDV->getElementAsInteger(i);
+ }
+
+ return ConstantInt::get(IT, Result);
+ }
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
@@ -64,17 +101,16 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
// If this is a bitcast from constant vector -> vector, fold it.
- ConstantVector *CV = dyn_cast<ConstantVector>(C);
- if (CV == 0)
+ if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
- unsigned NumSrcElt = CV->getNumOperands();
+ unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
- Type *SrcEltTy = CV->getType()->getElementType();
+ Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
@@ -94,7 +130,6 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
- if (!C) return ConstantExpr::getBitCast(C, DestTy);
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
@@ -108,8 +143,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
- CV = dyn_cast<ConstantVector>(C);
- if (!CV) // If VMCore wasn't able to fold it, bail out.
+ // If VMCore wasn't able to fold it, bail out.
+ if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
+ !isa<ConstantDataVector>(C))
return C;
}
@@ -131,7 +167,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++));
+ Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@@ -148,28 +184,29 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
Result.push_back(Elt);
}
- } else {
- // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
- unsigned Ratio = NumDstElt/NumSrcElt;
- unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+ return ConstantVector::get(Result);
+ }
+
+ // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
+ unsigned Ratio = NumDstElt/NumSrcElt;
+ unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
+
+ // Loop over each source value, expanding into multiple results.
+ for (unsigned i = 0; i != NumSrcElt; ++i) {
+ Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
+ if (!Src) // Reject constantexpr elements.
+ return ConstantExpr::getBitCast(C, DestTy);
- // Loop over each source value, expanding into multiple results.
- for (unsigned i = 0; i != NumSrcElt; ++i) {
- Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i));
- if (!Src) // Reject constantexpr elements.
- return ConstantExpr::getBitCast(C, DestTy);
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
+ for (unsigned j = 0; j != Ratio; ++j) {
+ // Shift the piece of the value into the right place, depending on
+ // endianness.
+ Constant *Elt = ConstantExpr::getLShr(Src,
+ ConstantInt::get(Src->getType(), ShiftAmt));
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
- unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
- for (unsigned j = 0; j != Ratio; ++j) {
- // Shift the piece of the value into the right place, depending on
- // endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
- ConstantInt::get(Src->getType(), ShiftAmt));
- ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
- // Truncate and remember this piece.
- Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
- }
+ // Truncate and remember this piece.
+ Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
@@ -272,7 +309,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
@@ -310,12 +347,20 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
// not reached.
}
- if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
- uint64_t EltSize = TD.getTypeAllocSize(CA->getType()->getElementType());
+ if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
+ isa<ConstantDataSequential>(C)) {
+ Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
+ uint64_t EltSize = TD.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
- for (; Index != CA->getType()->getNumElements(); ++Index) {
- if (!ReadDataFromGlobal(CA->getOperand(Index), Offset, CurPtr,
+ uint64_t NumElts;
+ if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
+ NumElts = AT->getNumElements();
+ else
+ NumElts = cast<VectorType>(C->getType())->getNumElements();
+
+ for (; Index != NumElts; ++Index) {
+ if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
BytesLeft, TD))
return false;
if (EltSize >= BytesLeft)
@@ -327,30 +372,12 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
- if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
- uint64_t EltSize = TD.getTypeAllocSize(CV->getType()->getElementType());
- uint64_t Index = ByteOffset / EltSize;
- uint64_t Offset = ByteOffset - Index * EltSize;
- for (; Index != CV->getType()->getNumElements(); ++Index) {
- if (!ReadDataFromGlobal(CV->getOperand(Index), Offset, CurPtr,
- BytesLeft, TD))
- return false;
- if (EltSize >= BytesLeft)
- return true;
- Offset = 0;
- BytesLeft -= EltSize;
- CurPtr += EltSize;
- }
- return true;
- }
-
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
- BytesLeft, TD);
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ BytesLeft, TD);
}
// Otherwise, unknown initializer type.
@@ -445,9 +472,9 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
- std::string Str;
- if (TD && GetConstantStringInfo(CE, Str) && !Str.empty()) {
- unsigned StrLen = Str.length();
+ StringRef Str;
+ if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) {
+ unsigned StrLen = Str.size();
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
// Replace load with immediate integer if the result is an integer or fp
@@ -542,8 +569,8 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
- Type *ResultTy,
- const TargetData *TD) {
+ Type *ResultTy, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -568,7 +595,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Constant *C =
ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
return C;
}
@@ -576,10 +603,11 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
- Type *ResultTy,
- const TargetData *TD) {
+ Type *ResultTy, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
- if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
+ if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
+ !Ptr->getType()->isPointerTy())
return 0;
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
@@ -602,7 +630,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
Res = ConstantExpr::getIntToPtr(Res, ResultTy);
if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
- Res = ConstantFoldConstantExpression(ResCE, TD);
+ Res = ConstantFoldConstantExpression(ResCE, TD, TLI);
return Res;
}
}
@@ -729,7 +757,9 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
-Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
+Constant *llvm::ConstantFoldInstruction(Instruction *I,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
Constant *CommonValue = 0;
@@ -765,7 +795,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
- TD);
+ TD, TLI);
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -781,28 +811,29 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
cast<Constant>(EVI->getAggregateOperand()),
EVI->getIndices());
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD);
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI);
}
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
i != e; ++i) {
Constant *NewC = cast<Constant>(*i);
// Recursively fold the ConstantExpr's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
- NewC = ConstantFoldConstantExpression(NewCE, TD);
+ NewC = ConstantFoldConstantExpression(NewCE, TD, TLI);
Ops.push_back(NewC);
}
if (CE->isCompare())
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
- TD);
- return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD);
+ TD, TLI);
+ return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI);
}
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -817,7 +848,8 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
@@ -830,11 +862,11 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
- case Instruction::FCmp: assert(0 && "Invalid for compares");
+ case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
- return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1));
+ return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
return 0;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
@@ -888,9 +920,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
- if (Constant *C = CastGEPIndices(Ops, DestTy, TD))
+ if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI))
return C;
- if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD))
+ if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
@@ -903,7 +935,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Ops0, Constant *Ops1,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
@@ -920,7 +953,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
IntPtrTy, false);
Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
@@ -929,7 +962,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
}
@@ -944,7 +977,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
IntPtrTy, false);
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
IntPtrTy, false);
- return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD);
+ return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
@@ -953,7 +986,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
CE0->getType() == IntPtrTy &&
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
- CE1->getOperand(0), TD);
+ CE1->getOperand(0), TD, TLI);
}
}
@@ -962,13 +995,15 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
Constant *LHS =
- ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,TD);
+ ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
+ TD, TLI);
Constant *RHS =
- ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,TD);
+ ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
+ TD, TLI);
unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
- return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD);
+ return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
@@ -981,56 +1016,30 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// constant expression, or null if something is funny and we can't decide.
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
- if (CE->getOperand(1) != Constant::getNullValue(CE->getOperand(1)->getType()))
+ if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
-
+
// Loop over all of the operands, tracking down which value we are
- // addressing...
- gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
- for (++I; I != E; ++I)
- if (StructType *STy = dyn_cast<StructType>(*I)) {
- ConstantInt *CU = cast<ConstantInt>(I.getOperand());
- assert(CU->getZExtValue() < STy->getNumElements() &&
- "Struct index out of range!");
- unsigned El = (unsigned)CU->getZExtValue();
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
- C = CS->getOperand(El);
- } else if (isa<ConstantAggregateZero>(C)) {
- C = Constant::getNullValue(STy->getElementType(El));
- } else if (isa<UndefValue>(C)) {
- C = UndefValue::get(STy->getElementType(El));
- } else {
- return 0;
- }
- } else if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
- if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
- if (CI->getZExtValue() >= ATy->getNumElements())
- return 0;
- if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
- C = CA->getOperand(CI->getZExtValue());
- else if (isa<ConstantAggregateZero>(C))
- C = Constant::getNullValue(ATy->getElementType());
- else if (isa<UndefValue>(C))
- C = UndefValue::get(ATy->getElementType());
- else
- return 0;
- } else if (VectorType *VTy = dyn_cast<VectorType>(*I)) {
- if (CI->getZExtValue() >= VTy->getNumElements())
- return 0;
- if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
- C = CP->getOperand(CI->getZExtValue());
- else if (isa<ConstantAggregateZero>(C))
- C = Constant::getNullValue(VTy->getElementType());
- else if (isa<UndefValue>(C))
- C = UndefValue::get(VTy->getElementType());
- else
- return 0;
- } else {
- return 0;
- }
- } else {
- return 0;
- }
+ // addressing.
+ for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
+ C = C->getAggregateElement(CE->getOperand(i));
+ if (C == 0) return 0;
+ }
+ return C;
+}
+
+/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
+/// indices (with an *implied* zero pointer index that is not in the list),
+/// return the constant value being addressed by a virtual load, or null if
+/// something is funny and we can't decide.
+Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
+ ArrayRef<Constant*> Indices) {
+ // Loop over all of the operands, tracking down which value we are
+ // addressing.
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ C = C->getAggregateElement(Indices[i]);
+ if (C == 0) return 0;
+ }
return C;
}
@@ -1045,6 +1054,7 @@ bool
llvm::canConstantFoldCallTo(const Function *F) {
switch (F->getIntrinsicID()) {
case Intrinsic::sqrt:
+ case Intrinsic::pow:
case Intrinsic::powi:
case Intrinsic::bswap:
case Intrinsic::ctpop:
@@ -1115,7 +1125,6 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold float/double");
- return 0; // dummy return to suppress warning
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
@@ -1132,7 +1141,6 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold float/double");
- return 0; // dummy return to suppress warning
}
/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
@@ -1143,11 +1151,8 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// available for the result. Returns null if the conversion cannot be
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
-static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
- Type *Ty) {
- assert(Op && "Called with NULL operand");
- APFloat Val(Op->getValueAPF());
-
+static Constant *ConstantFoldConvertToInt(const APFloat &Val,
+ bool roundTowardZero, Type *Ty) {
// All of these conversion intrinsics form an integer of at most 64bits.
unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
assert(ResultWidth <= 64 &&
@@ -1168,7 +1173,8 @@ static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *
-llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
+llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI) {
if (!F->hasName()) return 0;
StringRef Name = F->getName();
@@ -1183,6 +1189,8 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
}
+ if (!TLI)
+ return 0;
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
@@ -1201,43 +1209,43 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
Op->getValueAPF().convertToDouble();
switch (Name[0]) {
case 'a':
- if (Name == "acos")
+ if (Name == "acos" && TLI->has(LibFunc::acos))
return ConstantFoldFP(acos, V, Ty);
- else if (Name == "asin")
+ else if (Name == "asin" && TLI->has(LibFunc::asin))
return ConstantFoldFP(asin, V, Ty);
- else if (Name == "atan")
+ else if (Name == "atan" && TLI->has(LibFunc::atan))
return ConstantFoldFP(atan, V, Ty);
break;
case 'c':
- if (Name == "ceil")
+ if (Name == "ceil" && TLI->has(LibFunc::ceil))
return ConstantFoldFP(ceil, V, Ty);
- else if (Name == "cos")
+ else if (Name == "cos" && TLI->has(LibFunc::cos))
return ConstantFoldFP(cos, V, Ty);
- else if (Name == "cosh")
+ else if (Name == "cosh" && TLI->has(LibFunc::cosh))
return ConstantFoldFP(cosh, V, Ty);
- else if (Name == "cosf")
+ else if (Name == "cosf" && TLI->has(LibFunc::cosf))
return ConstantFoldFP(cos, V, Ty);
break;
case 'e':
- if (Name == "exp")
+ if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
- if (Name == "exp2") {
+ if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
}
break;
case 'f':
- if (Name == "fabs")
+ if (Name == "fabs" && TLI->has(LibFunc::fabs))
return ConstantFoldFP(fabs, V, Ty);
- else if (Name == "floor")
+ else if (Name == "floor" && TLI->has(LibFunc::floor))
return ConstantFoldFP(floor, V, Ty);
break;
case 'l':
- if (Name == "log" && V > 0)
+ if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
return ConstantFoldFP(log, V, Ty);
- else if (Name == "log10" && V > 0)
+ else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
return ConstantFoldFP(log10, V, Ty);
else if (F->getIntrinsicID() == Intrinsic::sqrt &&
(Ty->isFloatTy() || Ty->isDoubleTy())) {
@@ -1248,21 +1256,21 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
}
break;
case 's':
- if (Name == "sin")
+ if (Name == "sin" && TLI->has(LibFunc::sin))
return ConstantFoldFP(sin, V, Ty);
- else if (Name == "sinh")
+ else if (Name == "sinh" && TLI->has(LibFunc::sinh))
return ConstantFoldFP(sinh, V, Ty);
- else if (Name == "sqrt" && V >= 0)
+ else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
return ConstantFoldFP(sqrt, V, Ty);
- else if (Name == "sqrtf" && V >= 0)
+ else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
return ConstantFoldFP(sqrt, V, Ty);
- else if (Name == "sinf")
+ else if (Name == "sinf" && TLI->has(LibFunc::sinf))
return ConstantFoldFP(sin, V, Ty);
break;
case 't':
- if (Name == "tan")
+ if (Name == "tan" && TLI->has(LibFunc::tan))
return ConstantFoldFP(tan, V, Ty);
- else if (Name == "tanh")
+ else if (Name == "tanh" && TLI->has(LibFunc::tanh))
return ConstantFoldFP(tanh, V, Ty);
break;
default:
@@ -1277,10 +1285,6 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
case Intrinsic::ctpop:
return ConstantInt::get(Ty, Op->getValue().countPopulation());
- case Intrinsic::cttz:
- return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
- case Intrinsic::ctlz:
- return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
case Intrinsic::convert_from_fp16: {
APFloat Val(Op->getValue());
@@ -1300,24 +1304,31 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
}
}
- if (ConstantVector *Op = dyn_cast<ConstantVector>(Operands[0])) {
+ // Support ConstantVector in case we have an Undef in the top.
+ if (isa<ConstantVector>(Operands[0]) ||
+ isa<ConstantDataVector>(Operands[0])) {
+ Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/false, Ty);
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
- if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
- return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty);
+ if (ConstantFP *FPOp =
+ dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ /*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1337,16 +1348,21 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
-
+
double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
- if (Name == "pow")
+ if (F->getIntrinsicID() == Intrinsic::pow) {
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
- if (Name == "fmod")
+ }
+ if (!TLI)
+ return 0;
+ if (Name == "pow" && TLI->has(LibFunc::pow))
+ return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
+ if (Name == "fmod" && TLI->has(LibFunc::fmod))
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
- if (Name == "atan2")
+ if (Name == "atan2" && TLI->has(LibFunc::atan2))
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
@@ -1361,7 +1377,6 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
return 0;
}
-
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1375,7 +1390,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
APInt Res;
bool Overflow;
switch (F->getIntrinsicID()) {
- default: assert(0 && "Invalid case");
+ default: llvm_unreachable("Invalid case");
case Intrinsic::sadd_with_overflow:
Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
break;
@@ -1401,6 +1416,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands) {
};
return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops);
}
+ case Intrinsic::cttz:
+ // FIXME: This should check for Op2 == 1, and become unreachable if
+ // Op1 == 0.
+ return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
+ case Intrinsic::ctlz:
+ // FIXME: This should check for Op2 == 1, and become unreachable if
+ // Op1 == 0.
+ return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
diff --git a/contrib/llvm/lib/Analysis/DIBuilder.cpp b/contrib/llvm/lib/Analysis/DIBuilder.cpp
index bfa429d..85913b1 100644
--- a/contrib/llvm/lib/Analysis/DIBuilder.cpp
+++ b/contrib/llvm/lib/Analysis/DIBuilder.cpp
@@ -17,6 +17,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
using namespace llvm;
@@ -76,10 +77,11 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
StringRef Directory, StringRef Producer,
bool isOptimized, StringRef Flags,
unsigned RunTimeVer) {
- assert (Lang <= dwarf::DW_LANG_D && Lang >= dwarf::DW_LANG_C89
- && "Invalid Language tag");
- assert (!Filename.empty()
- && "Unable to create compile unit without filename");
+ assert(((Lang <= dwarf::DW_LANG_Python && Lang >= dwarf::DW_LANG_C89) ||
+ (Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
+ "Invalid Language tag");
+ assert(!Filename.empty() &&
+ "Unable to create compile unit without filename");
Value *TElts[] = { GetTagConstant(VMContext, DW_TAG_base_type) };
TempEnumTypes = MDNode::getTemporary(VMContext, TElts);
Value *THElts[] = { TempEnumTypes };
@@ -189,7 +191,7 @@ DIType DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
return DIType(MDNode::get(VMContext, Elts));
}
-/// createQaulifiedType - Create debugging information entry for a qualified
+/// createQualifiedType - Create debugging information entry for a qualified
/// type, e.g. 'const int'.
DIType DIBuilder::createQualifiedType(unsigned Tag, DIType FromTy) {
// Qualified types are encoded in DIDerivedType format.
@@ -358,13 +360,58 @@ DIType DIBuilder::createObjCIVar(StringRef Name,
return DIType(MDNode::get(VMContext, Elts));
}
+/// createObjCIVar - Create debugging information entry for Objective-C
+/// instance variable.
+DIType DIBuilder::createObjCIVar(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType Ty, MDNode *PropertyNode) {
+ // TAG_member is encoded in DIDerivedType format.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_member),
+ getNonCompileUnitScope(File),
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ Ty,
+ PropertyNode
+ };
+ return DIType(MDNode::get(VMContext, Elts));
+}
+
+/// createObjCProperty - Create debugging information entry for Objective-C
+/// property.
+DIObjCProperty DIBuilder::createObjCProperty(StringRef Name,
+ DIFile File, unsigned LineNumber,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes,
+ DIType Ty) {
+ Value *Elts[] = {
+ GetTagConstant(VMContext, dwarf::DW_TAG_APPLE_property),
+ MDString::get(VMContext, Name),
+ File,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ MDString::get(VMContext, GetterName),
+ MDString::get(VMContext, SetterName),
+ ConstantInt::get(Type::getInt32Ty(VMContext), PropertyAttributes),
+ Ty
+ };
+ return DIObjCProperty(MDNode::get(VMContext, Elts));
+}
+
/// createClassType - Create debugging information entry for a class.
DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits, uint64_t AlignInBits,
uint64_t OffsetInBits, unsigned Flags,
DIType DerivedFrom, DIArray Elements,
- MDNode *VTableHoder, MDNode *TemplateParams) {
+ MDNode *VTableHolder, MDNode *TemplateParams) {
// TAG_class_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_class_type),
@@ -379,7 +426,7 @@ DIType DIBuilder::createClassType(DIDescriptor Context, StringRef Name,
DerivedFrom,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- VTableHoder,
+ VTableHolder,
TemplateParams
};
return DIType(MDNode::get(VMContext, Elts));
@@ -440,7 +487,7 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -465,7 +512,7 @@ DIType DIBuilder::createUnionType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -484,9 +531,9 @@ DIType DIBuilder::createSubroutineType(DIFile File, DIArray ParameterTypes) {
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt64Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
ParameterTypes,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -500,7 +547,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
- DIArray Elements) {
+ DIArray Elements) {
// TAG_enumeration_type is encoded in DICompositeType format.
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type),
@@ -512,7 +559,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name,
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -628,6 +675,31 @@ DIType DIBuilder::createTemporaryType(DIFile F) {
return DIType(Node);
}
+/// createForwardDecl - Create a temporary forward-declared type that
+/// can be RAUW'd if the full type is seen.
+DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
+ unsigned Line, unsigned RuntimeLang) {
+ // Create a temporary MDNode.
+ Value *Elts[] = {
+ GetTagConstant(VMContext, Tag),
+ NULL, // TheCU
+ MDString::get(VMContext, Name),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Line),
+ // To ease transition include sizes etc of 0.
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0),
+ ConstantInt::get(Type::getInt32Ty(VMContext),
+ DIDescriptor::FlagFwdDecl),
+ NULL,
+ DIArray(),
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts);
+ return DIType(Node);
+}
+
/// getOrCreateArray - Get a DIArray, create one if required.
DIArray DIBuilder::getOrCreateArray(ArrayRef<Value *> Elements) {
if (Elements.empty()) {
@@ -738,7 +810,7 @@ DIVariable DIBuilder::createComplexVariable(unsigned Tag, DIDescriptor Scope,
Elts.push_back(MDString::get(VMContext, Name));
Elts.push_back(F);
Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext),
- (LineNo | (ArgNo << 24))));
+ (LineNo | (ArgNo << 24))));
Elts.push_back(Ty);
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
@@ -754,6 +826,7 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context,
DIFile File, unsigned LineNo,
DIType Ty,
bool isLocalToUnit, bool isDefinition,
+ unsigned ScopeLine,
unsigned Flags, bool isOptimized,
Function *Fn,
MDNode *TParams,
@@ -777,13 +850,14 @@ DISubprogram DIBuilder::createFunction(DIDescriptor Context,
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
ConstantInt::get(Type::getInt32Ty(VMContext), 0),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ NULL,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
Fn,
TParams,
Decl,
- THolder
+ THolder,
+ ConstantInt::get(Type::getInt32Ty(VMContext), ScopeLine)
};
MDNode *Node = MDNode::get(VMContext, Elts);
@@ -831,7 +905,9 @@ DISubprogram DIBuilder::createMethod(DIDescriptor Context,
Fn,
TParam,
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- THolder
+ THolder,
+ // FIXME: Do we want to use a different scope lines?
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
};
MDNode *Node = MDNode::get(VMContext, Elts);
return DISubprogram(Node);
@@ -854,7 +930,7 @@ DINameSpace DIBuilder::createNameSpace(DIDescriptor Scope, StringRef Name,
/// createLexicalBlockFile - This creates a new MDNode that encapsulates
/// an existing scope with a new filename.
DILexicalBlockFile DIBuilder::createLexicalBlockFile(DIDescriptor Scope,
- DIFile File) {
+ DIFile File) {
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_lexical_block),
Scope,
diff --git a/contrib/llvm/lib/Analysis/DebugInfo.cpp b/contrib/llvm/lib/Analysis/DebugInfo.cpp
index 44457d3..f61a8f3 100644
--- a/contrib/llvm/lib/Analysis/DebugInfo.cpp
+++ b/contrib/llvm/lib/Analysis/DebugInfo.cpp
@@ -68,7 +68,7 @@ uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const {
return 0;
if (Elt < DbgNode->getNumOperands())
- if (ConstantInt *CI = dyn_cast<ConstantInt>(DbgNode->getOperand(Elt)))
+ if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(DbgNode->getOperand(Elt)))
return CI->getZExtValue();
return 0;
@@ -289,6 +289,10 @@ bool DIDescriptor::isEnumerator() const {
return DbgNode && getTag() == dwarf::DW_TAG_enumerator;
}
+/// isObjCProperty - Return true if the specified tag is DW_TAG
+bool DIDescriptor::isObjCProperty() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_APPLE_property;
+}
//===----------------------------------------------------------------------===//
// Simple Descriptor Constructors and other Methods
//===----------------------------------------------------------------------===//
@@ -373,6 +377,19 @@ bool DICompileUnit::Verify() const {
return true;
}
+/// Verify - Verify that an ObjC property is well formed.
+bool DIObjCProperty::Verify() const {
+ if (!DbgNode)
+ return false;
+ unsigned Tag = getTag();
+ if (Tag != dwarf::DW_TAG_APPLE_property) return false;
+ DIType Ty = getType();
+ if (!Ty.Verify()) return false;
+
+ // Don't worry about the rest of the strings for now.
+ return true;
+}
+
/// Verify - Verify that a type descriptor is well formed.
bool DIType::Verify() const {
if (!DbgNode)
@@ -482,6 +499,7 @@ bool DINameSpace::Verify() const {
/// return base type size.
uint64_t DIDerivedType::getOriginalTypeSize() const {
unsigned Tag = getTag();
+
if (Tag == dwarf::DW_TAG_member || Tag == dwarf::DW_TAG_typedef ||
Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
Tag == dwarf::DW_TAG_restrict_type) {
@@ -490,7 +508,13 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
// approach.
if (!BaseType.isValid())
return getSizeInBits();
- if (BaseType.isDerivedType())
+ // If this is a derived type, go ahead and get the base type, unless
+ // it's a reference then it's just the size of the field. Pointer types
+ // have no need of this since they're a different type of qualification
+ // on the type.
+ if (BaseType.getTag() == dwarf::DW_TAG_reference_type)
+ return getSizeInBits();
+ else if (BaseType.isDerivedType())
return DIDerivedType(BaseType).getOriginalTypeSize();
else
return BaseType.getSizeInBits();
@@ -499,6 +523,13 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
return getSizeInBits();
}
+/// getObjCProperty - Return property node, if this ivar is associated with one.
+MDNode *DIDerivedType::getObjCProperty() const {
+ if (getVersion() <= LLVMDebugVersion11 || DbgNode->getNumOperands() <= 10)
+ return NULL;
+ return dyn_cast_or_null<MDNode>(DbgNode->getOperand(10));
+}
+
/// isInlinedFnArgument - Return true if this variable provides debugging
/// information for an inlined function arguments.
bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
@@ -565,8 +596,7 @@ StringRef DIScope::getFilename() const {
return DIType(DbgNode).getFilename();
if (isFile())
return DIFile(DbgNode).getFilename();
- assert(0 && "Invalid DIScope!");
- return StringRef();
+ llvm_unreachable("Invalid DIScope!");
}
StringRef DIScope::getDirectory() const {
@@ -586,8 +616,7 @@ StringRef DIScope::getDirectory() const {
return DIType(DbgNode).getDirectory();
if (isFile())
return DIFile(DbgNode).getDirectory();
- assert(0 && "Invalid DIScope!");
- return StringRef();
+ llvm_unreachable("Invalid DIScope!");
}
DIArray DICompileUnit::getEnumTypes() const {
@@ -632,6 +661,32 @@ DIArray DICompileUnit::getGlobalVariables() const {
}
//===----------------------------------------------------------------------===//
+// DIDescriptor: vtable anchors for all descriptors.
+//===----------------------------------------------------------------------===//
+
+void DIScope::anchor() { }
+
+void DICompileUnit::anchor() { }
+
+void DIFile::anchor() { }
+
+void DIType::anchor() { }
+
+void DIBasicType::anchor() { }
+
+void DIDerivedType::anchor() { }
+
+void DICompositeType::anchor() { }
+
+void DISubprogram::anchor() { }
+
+void DILexicalBlock::anchor() { }
+
+void DINameSpace::anchor() { }
+
+void DILexicalBlockFile::anchor() { }
+
+//===----------------------------------------------------------------------===//
// DIDescriptor: dump routines for all descriptors.
//===----------------------------------------------------------------------===//
@@ -679,8 +734,13 @@ void DIType::print(raw_ostream &OS) const {
if (isBasicType())
DIBasicType(DbgNode).print(OS);
- else if (isDerivedType())
- DIDerivedType(DbgNode).print(OS);
+ else if (isDerivedType()) {
+ DIDerivedType DTy = DIDerivedType(DbgNode);
+ DTy.print(OS);
+ DICompositeType CTy = getDICompositeType(DTy);
+ if (CTy.Verify())
+ CTy.print(OS);
+ }
else if (isCompositeType())
DICompositeType(DbgNode).print(OS);
else {
@@ -698,7 +758,9 @@ void DIBasicType::print(raw_ostream &OS) const {
/// print - Print derived type.
void DIDerivedType::print(raw_ostream &OS) const {
- OS << "\n\t Derived From: "; getTypeDerivedFrom().print(OS);
+ OS << "\n\t Derived From: ";
+ getTypeDerivedFrom().print(OS);
+ OS << "\n\t";
}
/// print - Print composite type.
@@ -725,6 +787,9 @@ void DISubprogram::print(raw_ostream &OS) const {
if (isDefinition())
OS << " [def] ";
+ if (getScopeLineNumber() != getLineNumber())
+ OS << " [Scope: " << getScopeLineNumber() << "] ";
+
OS << "\n";
}
@@ -927,9 +992,30 @@ DIVariable llvm::cleanseInlinedVariable(MDNode *DV, LLVMContext &VMContext) {
/// processModule - Process entire module and collect debug info.
void DebugInfoFinder::processModule(Module &M) {
- if (NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu"))
- for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i)
- addCompileUnit(DICompileUnit(CU_Nodes->getOperand(i)));
+ if (NamedMDNode *CU_Nodes = M.getNamedMetadata("llvm.dbg.cu")) {
+ for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
+ DICompileUnit CU(CU_Nodes->getOperand(i));
+ addCompileUnit(CU);
+ if (CU.getVersion() > LLVMDebugVersion10) {
+ DIArray GVs = CU.getGlobalVariables();
+ for (unsigned i = 0, e = GVs.getNumElements(); i != e; ++i) {
+ DIGlobalVariable DIG(GVs.getElement(i));
+ if (addGlobalVariable(DIG))
+ processType(DIG.getType());
+ }
+ DIArray SPs = CU.getSubprograms();
+ for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i)
+ processSubprogram(DISubprogram(SPs.getElement(i)));
+ DIArray EnumTypes = CU.getEnumTypes();
+ for (unsigned i = 0, e = EnumTypes.getNumElements(); i != e; ++i)
+ processType(DIType(EnumTypes.getElement(i)));
+ DIArray RetainedTypes = CU.getRetainedTypes();
+ for (unsigned i = 0, e = RetainedTypes.getNumElements(); i != e; ++i)
+ processType(DIType(RetainedTypes.getElement(i)));
+ return;
+ }
+ }
+ }
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Function::iterator FI = (*I).begin(), FE = (*I).end(); FI != FE; ++FI)
diff --git a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
index 6de4e1e..1604576 100644
--- a/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
+++ b/contrib/llvm/lib/Analysis/DominanceFrontier.cpp
@@ -35,6 +35,8 @@ namespace {
};
}
+void DominanceFrontier::anchor() { }
+
const DominanceFrontier::DomSetType &
DominanceFrontier::calculate(const DominatorTree &DT,
const DomTreeNode *Node) {
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index 2e79eab..0df3e8a 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -127,16 +127,9 @@ private:
}
}
- // Loop over all of the users of the function, looking for non-call uses.
- for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I){
- User *U = *I;
- if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
- || !CallSite(cast<Instruction>(U)).isCallee(I)) {
- // Not a call, or being used as a parameter rather than as the callee.
- ExternalCallingNode->addCalledFunction(CallSite(), Node);
- break;
- }
- }
+ // If this function has its address taken, anything could call it.
+ if (F->hasAddressTaken())
+ ExternalCallingNode->addCalledFunction(CallSite(), Node);
// If this function is not defined in this translation unit, it could call
// anything.
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index b226d66..c1d8e3e 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -21,6 +21,7 @@
#include "llvm/Instructions.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/MemoryBuiltins.h"
@@ -467,6 +468,11 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
} else if (isMalloc(&cast<Instruction>(*II)) ||
isFreeCall(&cast<Instruction>(*II))) {
FunctionEffect |= ModRef;
+ } else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
+ // The callgraph doesn't include intrinsic calls.
+ Function *Callee = Intrinsic->getCalledFunction();
+ ModRefBehavior Behaviour = AliasAnalysis::getModRefBehavior(Callee);
+ FunctionEffect |= (Behaviour & ModRef);
}
if ((FunctionEffect & Mod) == 0)
diff --git a/contrib/llvm/lib/Analysis/IVUsers.cpp b/contrib/llvm/lib/Analysis/IVUsers.cpp
index d0ca892..b80966b 100644
--- a/contrib/llvm/lib/Analysis/IVUsers.cpp
+++ b/contrib/llvm/lib/Analysis/IVUsers.cpp
@@ -79,10 +79,44 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
return false;
}
-/// AddUsersIfInteresting - Inspect the specified instruction. If it is a
+/// Return true if all loop headers that dominate this block are in simplified
+/// form.
+static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
+ const LoopInfo *LI,
+ SmallPtrSet<Loop*,16> &SimpleLoopNests) {
+ Loop *NearestLoop = 0;
+ for (DomTreeNode *Rung = DT->getNode(BB);
+ Rung; Rung = Rung->getIDom()) {
+ BasicBlock *DomBB = Rung->getBlock();
+ Loop *DomLoop = LI->getLoopFor(DomBB);
+ if (DomLoop && DomLoop->getHeader() == DomBB) {
+ // If the domtree walk reaches a loop with no preheader, return false.
+ if (!DomLoop->isLoopSimplifyForm())
+ return false;
+ // If we have already checked this loop nest, stop checking.
+ if (SimpleLoopNests.count(DomLoop))
+ break;
+ // If we have not already checked this loop nest, remember the loop
+ // header nearest to BB. The nearest loop may not contain BB.
+ if (!NearestLoop)
+ NearestLoop = DomLoop;
+ }
+ }
+ if (NearestLoop)
+ SimpleLoopNests.insert(NearestLoop);
+ return true;
+}
+
+/// AddUsersImpl - Inspect the specified instruction. If it is a
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
-bool IVUsers::AddUsersIfInteresting(Instruction *I) {
+bool IVUsers::AddUsersImpl(Instruction *I,
+ SmallPtrSet<Loop*,16> &SimpleLoopNests) {
+ // Add this IV user to the Processed set before returning false to ensure that
+ // all IV users are members of the set. See IVUsers::isIVUserOrOperand.
+ if (!Processed.insert(I))
+ return true; // Instruction already handled.
+
if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
@@ -93,9 +127,6 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
return false;
- if (!Processed.insert(I))
- return true; // Instruction already handled.
-
// Get the symbolic expression for this instruction.
const SCEV *ISE = SE->getSCEV(I);
@@ -115,6 +146,18 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
if (isa<PHINode>(User) && Processed.count(User))
continue;
+ // Only consider IVUsers that are dominated by simplified loop
+ // headers. Otherwise, SCEVExpander will crash.
+ BasicBlock *UseBB = User->getParent();
+ // A phi's use is live out of its predecessor block.
+ if (PHINode *PHI = dyn_cast<PHINode>(User)) {
+ unsigned OperandNo = UI.getOperandNo();
+ unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
+ UseBB = PHI->getIncomingBlock(ValNo);
+ }
+ if (!isSimplifiedLoopNest(UseBB, DT, LI, SimpleLoopNests))
+ return false;
+
// Descend recursively, but not into PHI nodes outside the current loop.
// It's important to see the entire expression outside the loop to get
// choices that depend on addressing mode use right, although we won't
@@ -124,12 +167,12 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
bool AddUserToIVUsers = false;
if (LI->getLoopFor(User->getParent()) != L) {
if (isa<PHINode>(User) || Processed.count(User) ||
- !AddUsersIfInteresting(User)) {
+ !AddUsersImpl(User, SimpleLoopNests)) {
DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
- } else if (Processed.count(User) || !AddUsersIfInteresting(User)) {
+ } else if (Processed.count(User) || !AddUsersImpl(User, SimpleLoopNests)) {
DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
@@ -153,6 +196,15 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
return true;
}
+bool IVUsers::AddUsersIfInteresting(Instruction *I) {
+ // SCEVExpander can only handle users that are dominated by simplified loop
+ // entries. Keep track of all loops that are only dominated by other simple
+ // loops so we don't traverse the domtree for each user.
+ SmallPtrSet<Loop*,16> SimpleLoopNests;
+
+ return AddUsersImpl(I, SimpleLoopNests);
+}
+
IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
IVUses.push_back(new IVStrideUse(this, User, Operand));
return IVUses.back();
@@ -268,6 +320,7 @@ void IVStrideUse::transformToPostInc(const Loop *L) {
void IVStrideUse::deleted() {
// Remove this user from the list.
+ Parent->Processed.erase(this->getUser());
Parent->IVUses.erase(this);
// this now dangles!
}
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index e12e322..3e3d2ab 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -11,645 +11,1012 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "inline-cost"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstVisitor.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/CallingConv.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Operator.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
using namespace llvm;
-/// callIsSmall - If a call is likely to lower to a single target instruction,
-/// or is otherwise deemed small return true.
-/// TODO: Perhaps calls like memcpy, strcpy, etc?
-bool llvm::callIsSmall(const Function *F) {
- if (!F) return false;
+STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
+
+namespace {
+
+class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
+ typedef InstVisitor<CallAnalyzer, bool> Base;
+ friend class InstVisitor<CallAnalyzer, bool>;
+
+ // TargetData if available, or null.
+ const TargetData *const TD;
+
+ // The called function.
+ Function &F;
+
+ int Threshold;
+ int Cost;
+ const bool AlwaysInline;
+
+ bool IsRecursive;
+ bool ExposesReturnsTwice;
+ bool HasDynamicAlloca;
+ unsigned NumInstructions, NumVectorInstructions;
+ int FiftyPercentVectorBonus, TenPercentVectorBonus;
+ int VectorBonus;
+
+ // While we walk the potentially-inlined instructions, we build up and
+ // maintain a mapping of simplified values specific to this callsite. The
+ // idea is to propagate any special information we have about arguments to
+ // this call through the inlinable section of the function, and account for
+ // likely simplifications post-inlining. The most important aspect we track
+ // is CFG altering simplifications -- when we prove a basic block dead, that
+ // can cause dramatic shifts in the cost of inlining a function.
+ DenseMap<Value *, Constant *> SimplifiedValues;
+
+ // Keep track of the values which map back (through function arguments) to
+ // allocas on the caller stack which could be simplified through SROA.
+ DenseMap<Value *, Value *> SROAArgValues;
+
+ // The mapping of caller Alloca values to their accumulated cost savings. If
+ // we have to disable SROA for one of the allocas, this tells us how much
+ // cost must be added.
+ DenseMap<Value *, int> SROAArgCosts;
+
+ // Keep track of values which map to a pointer base and constant offset.
+ DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
+
+ // Custom simplification helper routines.
+ bool isAllocaDerivedArg(Value *V);
+ bool lookupSROAArgAndCost(Value *V, Value *&Arg,
+ DenseMap<Value *, int>::iterator &CostIt);
+ void disableSROA(DenseMap<Value *, int>::iterator CostIt);
+ void disableSROA(Value *V);
+ void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
+ int InstructionCost);
+ bool handleSROACandidate(bool IsSROAValid,
+ DenseMap<Value *, int>::iterator CostIt,
+ int InstructionCost);
+ bool isGEPOffsetConstant(GetElementPtrInst &GEP);
+ bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
+ ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
+
+ // Custom analysis routines.
+ bool analyzeBlock(BasicBlock *BB);
+
+ // Disable several entry points to the visitor so we don't accidentally use
+ // them by declaring but not defining them here.
+ void visit(Module *); void visit(Module &);
+ void visit(Function *); void visit(Function &);
+ void visit(BasicBlock *); void visit(BasicBlock &);
+
+ // Provide base case for our instruction visit.
+ bool visitInstruction(Instruction &I);
+
+ // Our visit overrides.
+ bool visitAlloca(AllocaInst &I);
+ bool visitPHI(PHINode &I);
+ bool visitGetElementPtr(GetElementPtrInst &I);
+ bool visitBitCast(BitCastInst &I);
+ bool visitPtrToInt(PtrToIntInst &I);
+ bool visitIntToPtr(IntToPtrInst &I);
+ bool visitCastInst(CastInst &I);
+ bool visitUnaryInstruction(UnaryInstruction &I);
+ bool visitICmp(ICmpInst &I);
+ bool visitSub(BinaryOperator &I);
+ bool visitBinaryOperator(BinaryOperator &I);
+ bool visitLoad(LoadInst &I);
+ bool visitStore(StoreInst &I);
+ bool visitCallSite(CallSite CS);
+
+public:
+ CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
+ : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
+ AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
+ IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false),
+ NumInstructions(0), NumVectorInstructions(0),
+ FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
+ NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
+ NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
+ NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) {
+ }
- if (F->hasLocalLinkage()) return false;
+ bool analyzeCall(CallSite CS);
- if (!F->hasName()) return false;
+ int getThreshold() { return Threshold; }
+ int getCost() { return Cost; }
- StringRef Name = F->getName();
+ // Keep a bunch of stats about the cost savings found so we can print them
+ // out when debugging.
+ unsigned NumConstantArgs;
+ unsigned NumConstantOffsetPtrArgs;
+ unsigned NumAllocaArgs;
+ unsigned NumConstantPtrCmps;
+ unsigned NumConstantPtrDiffs;
+ unsigned NumInstructionsSimplified;
+ unsigned SROACostSavings;
+ unsigned SROACostSavingsLost;
- // These will all likely lower to a single selection DAG node.
- if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
- Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
- Name == "sin" || Name == "sinf" || Name == "sinl" ||
- Name == "cos" || Name == "cosf" || Name == "cosl" ||
- Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
- return true;
+ void dump();
+};
- // These are all likely to be optimized into something smaller.
- if (Name == "pow" || Name == "powf" || Name == "powl" ||
- Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
- Name == "floor" || Name == "floorf" || Name == "ceil" ||
- Name == "round" || Name == "ffs" || Name == "ffsl" ||
- Name == "abs" || Name == "labs" || Name == "llabs")
- return true;
+} // namespace
- return false;
+/// \brief Test whether the given value is an Alloca-derived function argument.
+bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
+ return SROAArgValues.count(V);
}
-/// analyzeBasicBlock - Fill in the current structure with information gleaned
-/// from the specified block.
-void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
- const TargetData *TD) {
- ++NumBlocks;
- unsigned NumInstsBeforeThisBB = NumInsts;
- for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
- II != E; ++II) {
- if (isa<PHINode>(II)) continue; // PHI nodes don't count.
-
- // Special handling for calls.
- if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
- if (isa<DbgInfoIntrinsic>(II))
- continue; // Debug intrinsics don't count as size.
-
- ImmutableCallSite CS(cast<Instruction>(II));
-
- if (const Function *F = CS.getCalledFunction()) {
- // If a function is both internal and has a single use, then it is
- // extremely likely to get inlined in the future (it was probably
- // exposed by an interleaved devirtualization pass).
- if (F->hasInternalLinkage() && F->hasOneUse())
- ++NumInlineCandidates;
-
- // If this call is to function itself, then the function is recursive.
- // Inlining it into other functions is a bad idea, because this is
- // basically just a form of loop peeling, and our metrics aren't useful
- // for that case.
- if (F == BB->getParent())
- isRecursive = true;
- }
+/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
+/// Returns false if V does not map to a SROA-candidate.
+bool CallAnalyzer::lookupSROAArgAndCost(
+ Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
+ if (SROAArgValues.empty() || SROAArgCosts.empty())
+ return false;
- if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
- // Each argument to a call takes on average one instruction to set up.
- NumInsts += CS.arg_size();
+ DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
+ if (ArgIt == SROAArgValues.end())
+ return false;
- // We don't want inline asm to count as a call - that would prevent loop
- // unrolling. The argument setup cost is still real, though.
- if (!isa<InlineAsm>(CS.getCalledValue()))
- ++NumCalls;
- }
- }
+ Arg = ArgIt->second;
+ CostIt = SROAArgCosts.find(Arg);
+ return CostIt != SROAArgCosts.end();
+}
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
- if (!AI->isStaticAlloca())
- this->usesDynamicAlloca = true;
- }
+/// \brief Disable SROA for the candidate marked by this cost iterator.
+///
+/// This markes the candidate as no longer viable for SROA, and adds the cost
+/// savings associated with it back into the inline cost measurement.
+void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
+ // If we're no longer able to perform SROA we need to undo its cost savings
+ // and prevent subsequent analysis.
+ Cost += CostIt->second;
+ SROACostSavings -= CostIt->second;
+ SROACostSavingsLost += CostIt->second;
+ SROAArgCosts.erase(CostIt);
+}
- if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
- ++NumVectorInsts;
+/// \brief If 'V' maps to a SROA candidate, disable SROA for it.
+void CallAnalyzer::disableSROA(Value *V) {
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(V, SROAArg, CostIt))
+ disableSROA(CostIt);
+}
- if (const CastInst *CI = dyn_cast<CastInst>(II)) {
- // Noop casts, including ptr <-> int, don't count.
- if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
- isa<PtrToIntInst>(CI))
- continue;
- // trunc to a native type is free (assuming the target has compare and
- // shift-right of the same width).
- if (isa<TruncInst>(CI) && TD &&
- TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
- continue;
- // Result of a cmp instruction is often extended (to be used by other
- // cmp instructions, logical or return instructions). These are usually
- // nop on most sane targets.
- if (isa<CmpInst>(CI->getOperand(0)))
- continue;
- } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){
- // If a GEP has all constant indices, it will probably be folded with
- // a load/store.
- if (GEPI->hasAllConstantIndices())
- continue;
+/// \brief Accumulate the given cost for a particular SROA candidate.
+void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
+ int InstructionCost) {
+ CostIt->second += InstructionCost;
+ SROACostSavings += InstructionCost;
+}
+
+/// \brief Helper for the common pattern of handling a SROA candidate.
+/// Either accumulates the cost savings if the SROA remains valid, or disables
+/// SROA for the candidate.
+bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
+ DenseMap<Value *, int>::iterator CostIt,
+ int InstructionCost) {
+ if (IsSROAValid) {
+ accumulateSROACost(CostIt, InstructionCost);
+ return true;
+ }
+
+ disableSROA(CostIt);
+ return false;
+}
+
+/// \brief Check whether a GEP's indices are all constant.
+///
+/// Respects any simplified values known during the analysis of this callsite.
+bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
+ for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
+ if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
+ return false;
+
+ return true;
+}
+
+/// \brief Accumulate a constant GEP offset into an APInt if possible.
+///
+/// Returns false if unable to compute the offset for any reason. Respects any
+/// simplified values known during the analysis of this callsite.
+bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
+ if (!TD)
+ return false;
+
+ unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ assert(IntPtrWidth == Offset.getBitWidth());
+
+ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
+ OpC = dyn_cast<ConstantInt>(SimpleOp);
+ if (!OpC)
+ return false;
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = TD->getStructLayout(STy);
+ Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
+ continue;
}
- ++NumInsts;
+ APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
+ Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
}
+ return true;
+}
- if (isa<ReturnInst>(BB->getTerminator()))
- ++NumRets;
+bool CallAnalyzer::visitAlloca(AllocaInst &I) {
+ // FIXME: Check whether inlining will turn a dynamic alloca into a static
+ // alloca, and handle that case.
- // We never want to inline functions that contain an indirectbr. This is
- // incorrect because all the blockaddress's (in static global initializers
- // for example) would be referring to the original function, and this indirect
- // jump would jump from the inlined copy of the function into the original
- // function which is extremely undefined behavior.
- if (isa<IndirectBrInst>(BB->getTerminator()))
- containsIndirectBr = true;
+ // We will happily inline static alloca instructions or dynamic alloca
+ // instructions in always-inline situations.
+ if (AlwaysInline || I.isStaticAlloca())
+ return Base::visitAlloca(I);
- // Remember NumInsts for this BB.
- NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
+ // FIXME: This is overly conservative. Dynamic allocas are inefficient for
+ // a variety of reasons, and so we would like to not inline them into
+ // functions which don't currently have a dynamic alloca. This simply
+ // disables inlining altogether in the presence of a dynamic alloca.
+ HasDynamicAlloca = true;
+ return false;
}
-// CountCodeReductionForConstant - Figure out an approximation for how many
-// instructions will be constant folded if the specified value is constant.
-//
-unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- User *U = *UI;
- if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
- // We will be able to eliminate all but one of the successors.
- const TerminatorInst &TI = cast<TerminatorInst>(*U);
- const unsigned NumSucc = TI.getNumSuccessors();
- unsigned Instrs = 0;
- for (unsigned I = 0; I != NumSucc; ++I)
- Instrs += NumBBInsts[TI.getSuccessor(I)];
- // We don't know which blocks will be eliminated, so use the average size.
- Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
- } else {
- // Figure out if this instruction will be removed due to simple constant
- // propagation.
- Instruction &Inst = cast<Instruction>(*U);
-
- // We can't constant propagate instructions which have effects or
- // read memory.
- //
- // FIXME: It would be nice to capture the fact that a load from a
- // pointer-to-constant-global is actually a *really* good thing to zap.
- // Unfortunately, we don't know the pointer that may get propagated here,
- // so we can't make this decision.
- if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
- isa<AllocaInst>(Inst))
- continue;
+bool CallAnalyzer::visitPHI(PHINode &I) {
+ // FIXME: We should potentially be tracking values through phi nodes,
+ // especially when they collapse to a single value due to deleted CFG edges
+ // during inlining.
- bool AllOperandsConstant = true;
- for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
- if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
- AllOperandsConstant = false;
- break;
- }
+ // FIXME: We need to propagate SROA *disabling* through phi nodes, even
+ // though we don't want to propagate it's bonuses. The idea is to disable
+ // SROA if it *might* be used in an inappropriate manner.
- if (AllOperandsConstant) {
- // We will get to remove this instruction...
- Reduction += InlineConstants::InstrCost;
+ // Phi nodes are always zero-cost.
+ return true;
+}
- // And any other instructions that use it which become constants
- // themselves.
- Reduction += CountCodeReductionForConstant(&Inst);
+bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
+ SROAArg, CostIt);
+
+ // Try to fold GEPs of constant-offset call site argument pointers. This
+ // requires target data and inbounds GEPs.
+ if (TD && I.isInBounds()) {
+ // Check if we have a base + offset for the pointer.
+ Value *Ptr = I.getPointerOperand();
+ std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
+ if (BaseAndOffset.first) {
+ // Check if the offset of this GEP is constant, and if so accumulate it
+ // into Offset.
+ if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
+ // Non-constant GEPs aren't folded, and disable SROA.
+ if (SROACandidate)
+ disableSROA(CostIt);
+ return false;
}
+
+ // Add the result as a new mapping to Base + Offset.
+ ConstantOffsetPtrs[&I] = BaseAndOffset;
+
+ // Also handle SROA candidates here, we already know that the GEP is
+ // all-constant indexed.
+ if (SROACandidate)
+ SROAArgValues[&I] = SROAArg;
+
+ return true;
}
}
- return Reduction;
-}
-// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
-// the function will be if it is inlined into a context where an argument
-// becomes an alloca.
-//
-unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
- if (!V->getType()->isPointerTy()) return 0; // Not a pointer
- unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *I = cast<Instruction>(*UI);
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += InlineConstants::InstrCost;
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has variable indices, we won't be able to do much with it.
- if (GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP);
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
- // Track pointer through bitcasts.
- Reduction += CountCodeReductionForAlloca(BCI);
- } else {
- // If there is some other strange instruction, we're not going to be able
- // to do much if we inline this.
- return 0;
- }
+ if (isGEPOffsetConstant(I)) {
+ if (SROACandidate)
+ SROAArgValues[&I] = SROAArg;
+
+ // Constant GEPs are modeled as free.
+ return true;
}
- return Reduction;
+ // Variable GEPs will require math and will disable SROA.
+ if (SROACandidate)
+ disableSROA(CostIt);
+ return false;
}
-/// analyzeFunction - Fill in the current structure with information gleaned
-/// from the specified function.
-void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
- // If this function contains a call to setjmp or _setjmp, never inline
- // it. This is a hack because we depend on the user marking their local
- // variables as volatile if they are live across a setjmp call, and they
- // probably won't do this in callers.
- if (F->callsFunctionThatReturnsTwice())
- callsSetJmp = true;
-
- // Look at the size of the callee.
- for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- analyzeBasicBlock(&*BB, TD);
-}
+bool CallAnalyzer::visitBitCast(BitCastInst &I) {
+ // Propagate constants through bitcasts.
+ if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
+ if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
-/// analyzeFunction - Fill in the current structure with information gleaned
-/// from the specified function.
-void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F,
- const TargetData *TD) {
- Metrics.analyzeFunction(F, TD);
-
- // A function with exactly one return has it removed during the inlining
- // process (see InlineFunction), so don't count it.
- // FIXME: This knowledge should really be encoded outside of FunctionInfo.
- if (Metrics.NumRets==1)
- --Metrics.NumInsts;
-
- // Check out all of the arguments to the function, figuring out how much
- // code can be eliminated if one of the arguments is a constant.
- ArgumentWeights.reserve(F->arg_size());
- for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
- ArgumentWeights.push_back(ArgInfo(Metrics.CountCodeReductionForConstant(I),
- Metrics.CountCodeReductionForAlloca(I)));
+ // Track base/offsets through casts
+ std::pair<Value *, APInt> BaseAndOffset
+ = ConstantOffsetPtrs.lookup(I.getOperand(0));
+ // Casts don't change the offset, just wrap it up.
+ if (BaseAndOffset.first)
+ ConstantOffsetPtrs[&I] = BaseAndOffset;
+
+ // Also look for SROA candidates here.
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
+ SROAArgValues[&I] = SROAArg;
+
+ // Bitcasts are always zero cost.
+ return true;
}
-/// NeverInline - returns true if the function should never be inlined into
-/// any caller
-bool InlineCostAnalyzer::FunctionInfo::NeverInline() {
- return (Metrics.callsSetJmp || Metrics.isRecursive ||
- Metrics.containsIndirectBr);
+bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
+ // Propagate constants through ptrtoint.
+ if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
+ if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
+
+ // Track base/offset pairs when converted to a plain integer provided the
+ // integer is large enough to represent the pointer.
+ unsigned IntegerSize = I.getType()->getScalarSizeInBits();
+ if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
+ std::pair<Value *, APInt> BaseAndOffset
+ = ConstantOffsetPtrs.lookup(I.getOperand(0));
+ if (BaseAndOffset.first)
+ ConstantOffsetPtrs[&I] = BaseAndOffset;
+ }
+
+ // This is really weird. Technically, ptrtoint will disable SROA. However,
+ // unless that ptrtoint is *used* somewhere in the live basic blocks after
+ // inlining, it will be nuked, and SROA should proceed. All of the uses which
+ // would block SROA would also block SROA if applied directly to a pointer,
+ // and so we can just add the integer in here. The only places where SROA is
+ // preserved either cannot fire on an integer, or won't in-and-of themselves
+ // disable SROA (ext) w/o some later use that we would see and disable.
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
+ SROAArgValues[&I] = SROAArg;
+
+ // A ptrtoint cast is free so long as the result is large enough to store the
+ // pointer, and a legal integer type.
+ return TD && TD->isLegalInteger(IntegerSize) &&
+ IntegerSize >= TD->getPointerSizeInBits();
}
-// getSpecializationBonus - The heuristic used to determine the per-call
-// performance boost for using a specialization of Callee with argument
-// specializedArgNo replaced by a constant.
-int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
- SmallVectorImpl<unsigned> &SpecializedArgNos)
-{
- if (Callee->mayBeOverridden())
- return 0;
- int Bonus = 0;
- // If this function uses the coldcc calling convention, prefer not to
- // specialize it.
- if (Callee->getCallingConv() == CallingConv::Cold)
- Bonus -= InlineConstants::ColdccPenalty;
-
- // Get information about the callee.
- FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee, TD);
-
- unsigned ArgNo = 0;
- unsigned i = 0;
- for (Function::arg_iterator I = Callee->arg_begin(), E = Callee->arg_end();
- I != E; ++I, ++ArgNo)
- if (ArgNo == SpecializedArgNos[i]) {
- ++i;
- Bonus += CountBonusForConstant(I);
+bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
+ // Propagate constants through ptrtoint.
+ if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
+ if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
+ SimplifiedValues[&I] = C;
+ return true;
}
- // Calls usually take a long time, so they make the specialization gain
- // smaller.
- Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+ // Track base/offset pairs when round-tripped through a pointer without
+ // modifications provided the integer is not too large.
+ Value *Op = I.getOperand(0);
+ unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
+ if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
+ std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
+ if (BaseAndOffset.first)
+ ConstantOffsetPtrs[&I] = BaseAndOffset;
+ }
+
+ // "Propagate" SROA here in the same manner as we do for ptrtoint above.
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
+ SROAArgValues[&I] = SROAArg;
- return Bonus;
+ // An inttoptr cast is free so long as the input is a legal integer type
+ // which doesn't contain values outside the range of a pointer.
+ return TD && TD->isLegalInteger(IntegerSize) &&
+ IntegerSize <= TD->getPointerSizeInBits();
}
-// ConstantFunctionBonus - Figure out how much of a bonus we can get for
-// possibly devirtualizing a function. We'll subtract the size of the function
-// we may wish to inline from the indirect call bonus providing a limit on
-// growth. Leave an upper limit of 0 for the bonus - we don't want to penalize
-// inlining because we decide we don't want to give a bonus for
-// devirtualizing.
-int InlineCostAnalyzer::ConstantFunctionBonus(CallSite CS, Constant *C) {
+bool CallAnalyzer::visitCastInst(CastInst &I) {
+ // Propagate constants through ptrtoint.
+ if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
+ if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
- // This could just be NULL.
- if (!C) return 0;
+ // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
+ disableSROA(I.getOperand(0));
- Function *F = dyn_cast<Function>(C);
- if (!F) return 0;
+ // No-op casts don't have any cost.
+ if (I.isLosslessCast())
+ return true;
- int Bonus = InlineConstants::IndirectCallBonus + getInlineSize(CS, F);
- return (Bonus > 0) ? 0 : Bonus;
+ // trunc to a native type is free (assuming the target has compare and
+ // shift-right of the same width).
+ if (TD && isa<TruncInst>(I) &&
+ TD->isLegalInteger(TD->getTypeSizeInBits(I.getType())))
+ return true;
+
+ // Result of a cmp instruction is often extended (to be used by other
+ // cmp instructions, logical or return instructions). These are usually
+ // no-ops on most sane targets.
+ if (isa<CmpInst>(I.getOperand(0)))
+ return true;
+
+ // Assume the rest of the casts require work.
+ return false;
}
-// CountBonusForConstant - Figure out an approximation for how much per-call
-// performance boost we can expect if the specified value is constant.
-int InlineCostAnalyzer::CountBonusForConstant(Value *V, Constant *C) {
- unsigned Bonus = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- User *U = *UI;
- if (CallInst *CI = dyn_cast<CallInst>(U)) {
- // Turning an indirect call into a direct call is a BIG win
- if (CI->getCalledValue() == V)
- Bonus += ConstantFunctionBonus(CallSite(CI), C);
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
- // Turning an indirect call into a direct call is a BIG win
- if (II->getCalledValue() == V)
- Bonus += ConstantFunctionBonus(CallSite(II), C);
+bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
+ Value *Operand = I.getOperand(0);
+ Constant *Ops[1] = { dyn_cast<Constant>(Operand) };
+ if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand)))
+ if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
+ Ops, TD)) {
+ SimplifiedValues[&I] = C;
+ return true;
}
- // FIXME: Eliminating conditional branches and switches should
- // also yield a per-call performance boost.
- else {
- // Figure out the bonuses that wll accrue due to simple constant
- // propagation.
- Instruction &Inst = cast<Instruction>(*U);
-
- // We can't constant propagate instructions which have effects or
- // read memory.
- //
- // FIXME: It would be nice to capture the fact that a load from a
- // pointer-to-constant-global is actually a *really* good thing to zap.
- // Unfortunately, we don't know the pointer that may get propagated here,
- // so we can't make this decision.
- if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
- isa<AllocaInst>(Inst))
- continue;
- bool AllOperandsConstant = true;
- for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
- if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
- AllOperandsConstant = false;
- break;
- }
+ // Disable any SROA on the argument to arbitrary unary operators.
+ disableSROA(Operand);
- if (AllOperandsConstant)
- Bonus += CountBonusForConstant(&Inst);
+ return false;
+}
+
+bool CallAnalyzer::visitICmp(ICmpInst &I) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ // First try to handle simplified comparisons.
+ if (!isa<Constant>(LHS))
+ if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+ LHS = SimpleLHS;
+ if (!isa<Constant>(RHS))
+ if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+ RHS = SimpleRHS;
+ if (Constant *CLHS = dyn_cast<Constant>(LHS))
+ if (Constant *CRHS = dyn_cast<Constant>(RHS))
+ if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
+
+ // Otherwise look for a comparison between constant offset pointers with
+ // a common base.
+ Value *LHSBase, *RHSBase;
+ APInt LHSOffset, RHSOffset;
+ llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ if (LHSBase) {
+ llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ if (RHSBase && LHSBase == RHSBase) {
+ // We have common bases, fold the icmp to a constant based on the
+ // offsets.
+ Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
+ Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
+ if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
+ SimplifiedValues[&I] = C;
+ ++NumConstantPtrCmps;
+ return true;
+ }
}
}
- return Bonus;
-}
+ // If the comparison is an equality comparison with null, we can simplify it
+ // for any alloca-derived argument.
+ if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
+ if (isAllocaDerivedArg(I.getOperand(0))) {
+ // We can actually predict the result of comparisons between an
+ // alloca-derived value and null. Note that this fires regardless of
+ // SROA firing.
+ bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
+ SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
+ : ConstantInt::getFalse(I.getType());
+ return true;
+ }
+
+ // Finally check for SROA candidates in comparisons.
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
+ if (isa<ConstantPointerNull>(I.getOperand(1))) {
+ accumulateSROACost(CostIt, InlineConstants::InstrCost);
+ return true;
+ }
-int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
- // Get information about the callee.
- FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee, TD);
-
- // InlineCost - This value measures how good of an inline candidate this call
- // site is to inline. A lower inline cost make is more likely for the call to
- // be inlined. This value may go negative.
- //
- int InlineCost = 0;
-
- // Compute any size reductions we can expect due to arguments being passed into
- // the function.
- //
- unsigned ArgNo = 0;
- CallSite::arg_iterator I = CS.arg_begin();
- for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
- FI != FE; ++I, ++FI, ++ArgNo) {
-
- // If an alloca is passed in, inlining this function is likely to allow
- // significant future optimization possibilities (like scalar promotion, and
- // scalarization), so encourage the inlining of the function.
- //
- if (isa<AllocaInst>(I))
- InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
-
- // If this is a constant being passed into the function, use the argument
- // weights calculated for the callee to determine how much will be folded
- // away with this information.
- else if (isa<Constant>(I))
- InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
+ disableSROA(CostIt);
}
- // Each argument passed in has a cost at both the caller and the callee
- // sides. Measurements show that each argument costs about the same as an
- // instruction.
- InlineCost -= (CS.arg_size() * InlineConstants::InstrCost);
+ return false;
+}
- // Now that we have considered all of the factors that make the call site more
- // likely to be inlined, look at factors that make us not want to inline it.
+bool CallAnalyzer::visitSub(BinaryOperator &I) {
+ // Try to handle a special case: we can fold computing the difference of two
+ // constant-related pointers.
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ Value *LHSBase, *RHSBase;
+ APInt LHSOffset, RHSOffset;
+ llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ if (LHSBase) {
+ llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ if (RHSBase && LHSBase == RHSBase) {
+ // We have common bases, fold the subtract to a constant based on the
+ // offsets.
+ Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
+ Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
+ if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
+ SimplifiedValues[&I] = C;
+ ++NumConstantPtrDiffs;
+ return true;
+ }
+ }
+ }
- // Calls usually take a long time, so they make the inlining gain smaller.
- InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+ // Otherwise, fall back to the generic logic for simplifying and handling
+ // instructions.
+ return Base::visitSub(I);
+}
- // Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
+bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ if (!isa<Constant>(LHS))
+ if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+ LHS = SimpleLHS;
+ if (!isa<Constant>(RHS))
+ if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+ RHS = SimpleRHS;
+ Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
+ if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
+ SimplifiedValues[&I] = C;
+ return true;
+ }
- return InlineCost;
-}
+ // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
+ disableSROA(LHS);
+ disableSROA(RHS);
-int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
- // Get information about the callee.
- FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee, TD);
-
- bool isDirectCall = CS.getCalledFunction() == Callee;
- Instruction *TheCall = CS.getInstruction();
- int Bonus = 0;
-
- // If there is only one call of the function, and it has internal linkage,
- // make it almost guaranteed to be inlined.
- //
- if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
- Bonus += InlineConstants::LastCallToStaticBonus;
-
- // If the instruction after the call, or if the normal destination of the
- // invoke is an unreachable instruction, the function is noreturn. As such,
- // there is little point in inlining this.
- if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
- if (isa<UnreachableInst>(II->getNormalDest()->begin()))
- Bonus += InlineConstants::NoreturnPenalty;
- } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
- Bonus += InlineConstants::NoreturnPenalty;
-
- // If this function uses the coldcc calling convention, prefer not to inline
- // it.
- if (Callee->getCallingConv() == CallingConv::Cold)
- Bonus += InlineConstants::ColdccPenalty;
-
- // Add to the inline quality for properties that make the call valuable to
- // inline. This includes factors that indicate that the result of inlining
- // the function will be optimizable. Currently this just looks at arguments
- // passed into the function.
- //
- CallSite::arg_iterator I = CS.arg_begin();
- for (Function::arg_iterator FI = Callee->arg_begin(), FE = Callee->arg_end();
- FI != FE; ++I, ++FI)
- // Compute any constant bonus due to inlining we want to give here.
- if (isa<Constant>(I))
- Bonus += CountBonusForConstant(FI, cast<Constant>(I));
-
- return Bonus;
+ return false;
}
-// getInlineCost - The heuristic used to determine if we should inline the
-// function call or not.
-//
-InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- SmallPtrSet<const Function*, 16> &NeverInline) {
- return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
-}
+bool CallAnalyzer::visitLoad(LoadInst &I) {
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
+ if (I.isSimple()) {
+ accumulateSROACost(CostIt, InlineConstants::InstrCost);
+ return true;
+ }
-InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- Function *Callee,
- SmallPtrSet<const Function*, 16> &NeverInline) {
- Instruction *TheCall = CS.getInstruction();
- Function *Caller = TheCall->getParent()->getParent();
+ disableSROA(CostIt);
+ }
- // Don't inline functions which can be redefined at link-time to mean
- // something else. Don't inline functions marked noinline or call sites
- // marked noinline.
- if (Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
- CS.isNoInline())
- return llvm::InlineCost::getNever();
+ return false;
+}
- // Get information about the callee.
- FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+bool CallAnalyzer::visitStore(StoreInst &I) {
+ Value *SROAArg;
+ DenseMap<Value *, int>::iterator CostIt;
+ if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
+ if (I.isSimple()) {
+ accumulateSROACost(CostIt, InlineConstants::InstrCost);
+ return true;
+ }
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee, TD);
+ disableSROA(CostIt);
+ }
- // If we should never inline this, return a huge cost.
- if (CalleeFI->NeverInline())
- return InlineCost::getNever();
+ return false;
+}
- // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
- // could move this up and avoid computing the FunctionInfo for
- // things we are going to just return always inline for. This
- // requires handling setjmp somewhere else, however.
- if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
- return InlineCost::getAlways();
+bool CallAnalyzer::visitCallSite(CallSite CS) {
+ if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
+ !F.hasFnAttr(Attribute::ReturnsTwice)) {
+ // This aborts the entire analysis.
+ ExposesReturnsTwice = true;
+ return false;
+ }
- if (CalleeFI->Metrics.usesDynamicAlloca) {
- // Get information about the caller.
- FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return Base::visitCallSite(CS);
+
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::objectsize:
+ case Intrinsic::ptr_annotation:
+ case Intrinsic::var_annotation:
+ // SROA can usually chew through these intrinsics and they have no cost
+ // so don't pay the price of analyzing them in detail.
+ return true;
+ }
+ }
+
+ if (Function *F = CS.getCalledFunction()) {
+ if (F == CS.getInstruction()->getParent()->getParent()) {
+ // This flag will fully abort the analysis, so don't bother with anything
+ // else.
+ IsRecursive = true;
+ return false;
+ }
- // If we haven't calculated this information yet, do so now.
- if (CallerFI.Metrics.NumBlocks == 0) {
- CallerFI.analyzeFunction(Caller, TD);
+ if (!callIsSmall(F)) {
+ // We account for the average 1 instruction per call argument setup
+ // here.
+ Cost += CS.arg_size() * InlineConstants::InstrCost;
- // Recompute the CalleeFI pointer, getting Caller could have invalidated
- // it.
- CalleeFI = &CachedFunctionInfo[Callee];
+ // Everything other than inline ASM will also have a significant cost
+ // merely from making the call.
+ if (!isa<InlineAsm>(CS.getCalledValue()))
+ Cost += InlineConstants::CallPenalty;
}
- // Don't inline a callee with dynamic alloca into a caller without them.
- // Functions containing dynamic alloca's are inefficient in various ways;
- // don't create more inefficiency.
- if (!CallerFI.Metrics.usesDynamicAlloca)
- return InlineCost::getNever();
+ return Base::visitCallSite(CS);
}
- // InlineCost - This value measures how good of an inline candidate this call
- // site is to inline. A lower inline cost make is more likely for the call to
- // be inlined. This value may go negative due to the fact that bonuses
- // are negative numbers.
- //
- int InlineCost = getInlineSize(CS, Callee) + getInlineBonuses(CS, Callee);
- return llvm::InlineCost::get(InlineCost);
+ // Otherwise we're in a very special case -- an indirect function call. See
+ // if we can be particularly clever about this.
+ Value *Callee = CS.getCalledValue();
+
+ // First, pay the price of the argument setup. We account for the average
+ // 1 instruction per call argument setup here.
+ Cost += CS.arg_size() * InlineConstants::InstrCost;
+
+ // Next, check if this happens to be an indirect function call to a known
+ // function in this inline context. If not, we've done all we can.
+ Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
+ if (!F)
+ return Base::visitCallSite(CS);
+
+ // If we have a constant that we are calling as a function, we can peer
+ // through it and see the function target. This happens not infrequently
+ // during devirtualization and so we want to give it a hefty bonus for
+ // inlining, but cap that bonus in the event that inlining wouldn't pan
+ // out. Pretend to inline the function, with a custom threshold.
+ CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold);
+ if (CA.analyzeCall(CS)) {
+ // We were able to inline the indirect call! Subtract the cost from the
+ // bonus we want to apply, but don't go below zero.
+ Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
+ }
+
+ return Base::visitCallSite(CS);
}
-// getSpecializationCost - The heuristic used to determine the code-size
-// impact of creating a specialized version of Callee with argument
-// SpecializedArgNo replaced by a constant.
-InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
- SmallVectorImpl<unsigned> &SpecializedArgNos)
-{
- // Don't specialize functions which can be redefined at link-time to mean
- // something else.
- if (Callee->mayBeOverridden())
- return llvm::InlineCost::getNever();
+bool CallAnalyzer::visitInstruction(Instruction &I) {
+ // We found something we don't understand or can't handle. Mark any SROA-able
+ // values in the operand list as no longer viable.
+ for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
+ disableSROA(*OI);
+
+ return false;
+}
- // Get information about the callee.
- FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee, TD);
+/// \brief Analyze a basic block for its contribution to the inline cost.
+///
+/// This method walks the analyzer over every instruction in the given basic
+/// block and accounts for their cost during inlining at this callsite. It
+/// aborts early if the threshold has been exceeded or an impossible to inline
+/// construct has been detected. It returns false if inlining is no longer
+/// viable, and true if inlining remains viable.
+bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
+ for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
+ I != E; ++I) {
+ ++NumInstructions;
+ if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
+ ++NumVectorInstructions;
+
+ // If the instruction simplified to a constant, there is no cost to this
+ // instruction. Visit the instructions using our InstVisitor to account for
+ // all of the per-instruction logic. The visit tree returns true if we
+ // consumed the instruction in any way, and false if the instruction's base
+ // cost should count against inlining.
+ if (Base::visit(I))
+ ++NumInstructionsSimplified;
+ else
+ Cost += InlineConstants::InstrCost;
+
+ // If the visit this instruction detected an uninlinable pattern, abort.
+ if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ return false;
+
+ if (NumVectorInstructions > NumInstructions/2)
+ VectorBonus = FiftyPercentVectorBonus;
+ else if (NumVectorInstructions > NumInstructions/10)
+ VectorBonus = TenPercentVectorBonus;
+ else
+ VectorBonus = 0;
+
+ // Check if we've past the threshold so we don't spin in huge basic
+ // blocks that will never inline.
+ if (!AlwaysInline && Cost > (Threshold + VectorBonus))
+ return false;
+ }
- int Cost = 0;
+ return true;
+}
- // Look at the original size of the callee. Each instruction counts as 5.
- Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
+/// \brief Compute the base pointer and cumulative constant offsets for V.
+///
+/// This strips all constant offsets off of V, leaving it the base pointer, and
+/// accumulates the total constant offset applied in the returned constant. It
+/// returns 0 if V is not a pointer, and returns the constant '0' if there are
+/// no constant offsets applied.
+ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
+ if (!TD || !V->getType()->isPointerTy())
+ return 0;
- // Offset that with the amount of code that can be constant-folded
- // away with the given arguments replaced by constants.
- for (SmallVectorImpl<unsigned>::iterator an = SpecializedArgNos.begin(),
- ae = SpecializedArgNos.end(); an != ae; ++an)
- Cost -= CalleeFI->ArgumentWeights[*an].ConstantWeight;
+ unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ APInt Offset = APInt::getNullValue(IntPtrWidth);
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+ Visited.insert(V);
+ do {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
+ return 0;
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ break;
+ V = GA->getAliasee();
+ } else {
+ break;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(V));
- return llvm::InlineCost::get(Cost);
+ Type *IntPtrTy = TD->getIntPtrType(V->getContext());
+ return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
-// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
-// higher threshold to determine if the function call should be inlined.
-float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
- Function *Callee = CS.getCalledFunction();
-
- // Get information about the callee.
- FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
-
- // If we haven't calculated this information yet, do so now.
- if (CalleeFI.Metrics.NumBlocks == 0)
- CalleeFI.analyzeFunction(Callee, TD);
-
- float Factor = 1.0f;
- // Single BB functions are often written to be inlined.
- if (CalleeFI.Metrics.NumBlocks == 1)
- Factor += 0.5f;
-
- // Be more aggressive if the function contains a good chunk (if it mades up
- // at least 10% of the instructions) of vector instructions.
- if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/2)
- Factor += 2.0f;
- else if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/10)
- Factor += 1.5f;
- return Factor;
-}
+/// \brief Analyze a call site for potential inlining.
+///
+/// Returns true if inlining this call is viable, and false if it is not
+/// viable. It computes the cost and adjusts the threshold based on numerous
+/// factors and heuristics. If this method returns false but the computed cost
+/// is below the computed threshold, then inlining was forcibly disabled by
+/// some artifact of the rountine.
+bool CallAnalyzer::analyzeCall(CallSite CS) {
+ ++NumCallsAnalyzed;
+
+ // Track whether the post-inlining function would have more than one basic
+ // block. A single basic block is often intended for inlining. Balloon the
+ // threshold by 50% until we pass the single-BB phase.
+ bool SingleBB = true;
+ int SingleBBBonus = Threshold / 2;
+ Threshold += SingleBBBonus;
+
+ // Unless we are always-inlining, perform some tweaks to the cost and
+ // threshold based on the direct callsite information.
+ if (!AlwaysInline) {
+ // We want to more aggressively inline vector-dense kernels, so up the
+ // threshold, and we'll lower it if the % of vector instructions gets too
+ // low.
+ assert(NumInstructions == 0);
+ assert(NumVectorInstructions == 0);
+ FiftyPercentVectorBonus = Threshold;
+ TenPercentVectorBonus = Threshold / 2;
+
+ // Subtract off one instruction per call argument as those will be free after
+ // inlining.
+ Cost -= CS.arg_size() * InlineConstants::InstrCost;
+
+ // If there is only one call of the function, and it has internal linkage,
+ // the cost of inlining it drops dramatically.
+ if (F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction())
+ Cost += InlineConstants::LastCallToStaticBonus;
+
+ // If the instruction after the call, or if the normal destination of the
+ // invoke is an unreachable instruction, the function is noreturn. As such,
+ // there is little point in inlining this unless there is literally zero cost.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ if (isa<UnreachableInst>(II->getNormalDest()->begin()))
+ Threshold = 1;
+ } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction())))
+ Threshold = 1;
+
+ // If this function uses the coldcc calling convention, prefer not to inline
+ // it.
+ if (F.getCallingConv() == CallingConv::Cold)
+ Cost += InlineConstants::ColdccPenalty;
+
+ // Check if we're done. This can happen due to bonuses and penalties.
+ if (Cost > Threshold)
+ return false;
+ }
-/// growCachedCostInfo - update the cached cost info for Caller after Callee has
-/// been inlined.
-void
-InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
- CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
+ if (F.empty())
+ return true;
- // For small functions we prefer to recalculate the cost for better accuracy.
- if (CallerMetrics.NumBlocks < 10 && CallerMetrics.NumInsts < 1000) {
- resetCachedCostInfo(Caller);
- return;
+ // Track whether we've seen a return instruction. The first return
+ // instruction is free, as at least one will usually disappear in inlining.
+ bool HasReturn = false;
+
+ // Populate our simplified values by mapping from function arguments to call
+ // arguments with known important simplifications.
+ CallSite::arg_iterator CAI = CS.arg_begin();
+ for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
+ FAI != FAE; ++FAI, ++CAI) {
+ assert(CAI != CS.arg_end());
+ if (Constant *C = dyn_cast<Constant>(CAI))
+ SimplifiedValues[FAI] = C;
+
+ Value *PtrArg = *CAI;
+ if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
+ ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
+
+ // We can SROA any pointer arguments derived from alloca instructions.
+ if (isa<AllocaInst>(PtrArg)) {
+ SROAArgValues[FAI] = PtrArg;
+ SROAArgCosts[PtrArg] = 0;
+ }
+ }
}
+ NumConstantArgs = SimplifiedValues.size();
+ NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
+ NumAllocaArgs = SROAArgValues.size();
+
+ // The worklist of live basic blocks in the callee *after* inlining. We avoid
+ // adding basic blocks of the callee which can be proven to be dead for this
+ // particular call site in order to get more accurate cost estimates. This
+ // requires a somewhat heavyweight iteration pattern: we need to walk the
+ // basic blocks in a breadth-first order as we insert live successors. To
+ // accomplish this, prioritizing for small iterations because we exit after
+ // crossing our threshold, we use a small-size optimized SetVector.
+ typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
+ SmallPtrSet<BasicBlock *, 16> > BBSetVector;
+ BBSetVector BBWorklist;
+ BBWorklist.insert(&F.getEntryBlock());
+ // Note that we *must not* cache the size, this loop grows the worklist.
+ for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
+ // Bail out the moment we cross the threshold. This means we'll under-count
+ // the cost, but only when undercounting doesn't matter.
+ if (!AlwaysInline && Cost > (Threshold + VectorBonus))
+ break;
+
+ BasicBlock *BB = BBWorklist[Idx];
+ if (BB->empty())
+ continue;
+
+ // Handle the terminator cost here where we can track returns and other
+ // function-wide constructs.
+ TerminatorInst *TI = BB->getTerminator();
+
+ // We never want to inline functions that contain an indirectbr. This is
+ // incorrect because all the blockaddress's (in static global initializers
+ // for example) would be referring to the original function, and this indirect
+ // jump would jump from the inlined copy of the function into the original
+ // function which is extremely undefined behavior.
+ // FIXME: This logic isn't really right; we can safely inline functions
+ // with indirectbr's as long as no other function or global references the
+ // blockaddress of a block within the current function. And as a QOI issue,
+ // if someone is using a blockaddress without an indirectbr, and that
+ // reference somehow ends up in another function or global, we probably
+ // don't want to inline this function.
+ if (isa<IndirectBrInst>(TI))
+ return false;
+
+ if (!HasReturn && isa<ReturnInst>(TI))
+ HasReturn = true;
+ else
+ Cost += InlineConstants::InstrCost;
+
+ // Analyze the cost of this block. If we blow through the threshold, this
+ // returns false, and we can bail on out.
+ if (!analyzeBlock(BB)) {
+ if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ return false;
+ break;
+ }
- // For large functions, we can save a lot of computation time by skipping
- // recalculations.
- if (CallerMetrics.NumCalls > 0)
- --CallerMetrics.NumCalls;
+ // Add in the live successors by first checking whether we have terminator
+ // that may be simplified based on the values simplified by this call.
+ if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
+ if (BI->isConditional()) {
+ Value *Cond = BI->getCondition();
+ if (ConstantInt *SimpleCond
+ = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
+ BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
+ continue;
+ }
+ }
+ } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+ Value *Cond = SI->getCondition();
+ if (ConstantInt *SimpleCond
+ = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
+ BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
+ continue;
+ }
+ }
- if (Callee == 0) return;
+ // If we're unable to select a particular successor, just count all of
+ // them.
+ for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx)
+ BBWorklist.insert(TI->getSuccessor(TIdx));
+
+ // If we had any successors at this point, than post-inlining is likely to
+ // have them as well. Note that we assume any basic blocks which existed
+ // due to branches or switches which folded above will also fold after
+ // inlining.
+ if (SingleBB && TI->getNumSuccessors() > 1) {
+ // Take off the bonus we applied to the threshold.
+ Threshold -= SingleBBBonus;
+ SingleBB = false;
+ }
+ }
- CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
+ Threshold += VectorBonus;
- // If we don't have metrics for the callee, don't recalculate them just to
- // update an approximation in the caller. Instead, just recalculate the
- // caller info from scratch.
- if (CalleeMetrics.NumBlocks == 0) {
- resetCachedCostInfo(Caller);
- return;
- }
+ return AlwaysInline || Cost < Threshold;
+}
- // Since CalleeMetrics were already calculated, we know that the CallerMetrics
- // reference isn't invalidated: both were in the DenseMap.
- CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
-
- // FIXME: If any of these three are true for the callee, the callee was
- // not inlined into the caller, so I think they're redundant here.
- CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
- CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
- CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
-
- CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
- CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
- CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
- CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
- CallerMetrics.NumRets += CalleeMetrics.NumRets;
-
- // analyzeBasicBlock counts each function argument as an inst.
- if (CallerMetrics.NumInsts >= Callee->arg_size())
- CallerMetrics.NumInsts -= Callee->arg_size();
- else
- CallerMetrics.NumInsts = 0;
-
- // We are not updating the argument weights. We have already determined that
- // Caller is a fairly large function, so we accept the loss of precision.
+/// \brief Dump stats about this call's analysis.
+void CallAnalyzer::dump() {
+#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
+ DEBUG_PRINT_STAT(NumConstantArgs);
+ DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
+ DEBUG_PRINT_STAT(NumAllocaArgs);
+ DEBUG_PRINT_STAT(NumConstantPtrCmps);
+ DEBUG_PRINT_STAT(NumConstantPtrDiffs);
+ DEBUG_PRINT_STAT(NumInstructionsSimplified);
+ DEBUG_PRINT_STAT(SROACostSavings);
+ DEBUG_PRINT_STAT(SROACostSavingsLost);
+#undef DEBUG_PRINT_STAT
}
-/// clear - empty the cache of inline costs
-void InlineCostAnalyzer::clear() {
- CachedFunctionInfo.clear();
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
+ return getInlineCost(CS, CS.getCalledFunction(), Threshold);
+}
+
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
+ int Threshold) {
+ // Don't inline functions which can be redefined at link-time to mean
+ // something else. Don't inline functions marked noinline or call sites
+ // marked noinline.
+ if (!Callee || Callee->mayBeOverridden() ||
+ Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
+ return llvm::InlineCost::getNever();
+
+ DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n");
+
+ CallAnalyzer CA(TD, *Callee, Threshold);
+ bool ShouldInline = CA.analyzeCall(CS);
+
+ DEBUG(CA.dump());
+
+ // Check if there was a reason to force inlining or no inlining.
+ if (!ShouldInline && CA.getCost() < CA.getThreshold())
+ return InlineCost::getNever();
+ if (ShouldInline && CA.getCost() >= CA.getThreshold())
+ return InlineCost::getAlways();
+
+ return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
}
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index 131cc97..16e7a72 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -18,13 +18,17 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "instsimplify"
+#include "llvm/GlobalAlias.h"
#include "llvm/Operator.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/ConstantRange.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetData.h"
@@ -37,23 +41,28 @@ STATISTIC(NumExpand, "Number of expansions");
STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations");
-static Value *SimplifyAndInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
-static Value *SimplifyBinOp(unsigned, Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
-static Value *SimplifyCmpInst(unsigned, Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
-static Value *SimplifyOrInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
-static Value *SimplifyXorInst(Value *, Value *, const TargetData *,
- const DominatorTree *, unsigned);
+struct Query {
+ const TargetData *TD;
+ const TargetLibraryInfo *TLI;
+ const DominatorTree *DT;
+
+ Query(const TargetData *td, const TargetLibraryInfo *tli,
+ const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {};
+};
+
+static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
+static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &,
+ unsigned);
+static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &,
+ unsigned);
+static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned);
+static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned);
+static Value *SimplifyTruncInst(Value *, Type *, const Query &, unsigned);
/// getFalse - For a boolean type, or a vector of boolean type, return false, or
/// a vector with every element false, as appropriate for the type.
static Constant *getFalse(Type *Ty) {
- assert((Ty->isIntegerTy(1) ||
- (Ty->isVectorTy() &&
- cast<VectorType>(Ty)->getElementType()->isIntegerTy(1))) &&
+ assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getNullValue(Ty);
}
@@ -61,13 +70,25 @@ static Constant *getFalse(Type *Ty) {
/// getTrue - For a boolean type, or a vector of boolean type, return true, or
/// a vector with every element true, as appropriate for the type.
static Constant *getTrue(Type *Ty) {
- assert((Ty->isIntegerTy(1) ||
- (Ty->isVectorTy() &&
- cast<VectorType>(Ty)->getElementType()->isIntegerTy(1))) &&
+ assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getAllOnesValue(Ty);
}
+/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
+static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
+ Value *RHS) {
+ CmpInst *Cmp = dyn_cast<CmpInst>(V);
+ if (!Cmp)
+ return false;
+ CmpInst::Predicate CPred = Cmp->getPredicate();
+ Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
+ if (CPred == Pred && CLHS == LHS && CRHS == RHS)
+ return true;
+ return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
+ CRHS == LHS;
+}
+
/// ValueDominatesPHI - Does the given value dominate the specified phi node?
static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
Instruction *I = dyn_cast<Instruction>(V);
@@ -75,9 +96,20 @@ static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
// Arguments and constants dominate all instructions.
return true;
+ // If we are processing instructions (and/or basic blocks) that have not been
+ // fully added to a function, the parent nodes may still be null. Simply
+ // return the conservative answer in these cases.
+ if (!I->getParent() || !P->getParent() || !I->getParent()->getParent())
+ return false;
+
// If we have a DominatorTree then do a precise test.
- if (DT)
+ if (DT) {
+ if (!DT->isReachableFromEntry(P->getParent()))
+ return true;
+ if (!DT->isReachableFromEntry(I->getParent()))
+ return false;
return DT->dominates(I, P);
+ }
// Otherwise, if the instruction is in the entry block, and is not an invoke,
// then it obviously dominates all phi nodes.
@@ -94,8 +126,8 @@ static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
/// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
/// Returns the simplified value, or null if no simplification was performed.
static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- unsigned OpcToExpand, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+ unsigned OpcToExpand, const Query &Q,
+ unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -107,8 +139,8 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
// It does! Try turning it into "(A op C) op' (B op C)".
Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
// Do "A op C" and "B op C" both simplify?
- if (Value *L = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse))
- if (Value *R = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
@@ -117,8 +149,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return LHS;
}
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
- MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
++NumExpand;
return V;
}
@@ -131,8 +162,8 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
// It does! Try turning it into "(A op B) op' (A op C)".
Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
// Do "A op B" and "A op C" both simplify?
- if (Value *L = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse))
- if (Value *R = SimplifyBinOp(Opcode, A, C, TD, DT, MaxRecurse)) {
+ if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
+ if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
@@ -141,8 +172,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return RHS;
}
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, TD, DT,
- MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
++NumExpand;
return V;
}
@@ -157,8 +187,8 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
/// OpCodeToExtract is Mul then this tries to turn "(A*B)+(A*C)" into "A*(B+C)".
/// Returns the simplified value, or null if no simplification was performed.
static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- unsigned OpcToExtract, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+ unsigned OpcToExtract, const Query &Q,
+ unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -182,7 +212,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *DD = A == C ? D : C;
// Form "A op' (B op DD)" if it simplifies completely.
// Does "B op DD" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, DD, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, DD, Q, MaxRecurse)) {
// It does! Return "A op' V" if it simplifies or is already available.
// If V equals B then "A op' V" is just the LHS. If V equals DD then
// "A op' V" is just the RHS.
@@ -191,7 +221,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return V == B ? LHS : RHS;
}
// Otherwise return "A op' V" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, A, V, Q, MaxRecurse)) {
++NumFactor;
return W;
}
@@ -205,7 +235,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Value *CC = B == D ? C : D;
// Form "(A op CC) op' B" if it simplifies completely..
// Does "A op CC" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, CC, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, CC, Q, MaxRecurse)) {
// It does! Return "V op' B" if it simplifies or is already available.
// If V equals A then "V op' B" is just the LHS. If V equals CC then
// "V op' B" is just the RHS.
@@ -214,7 +244,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
return V == A ? LHS : RHS;
}
// Otherwise return "V op' B" if it simplifies.
- if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(OpcodeToExtract, V, B, Q, MaxRecurse)) {
++NumFactor;
return W;
}
@@ -227,9 +257,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
/// operations. Returns the simpler value, or null if none was found.
static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
- const TargetData *TD,
- const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc;
assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
@@ -247,12 +275,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = RHS;
// Does "B op C" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, C, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
// It does! Return "A op V" if it simplifies or is already available.
// If V equals B then "A op V" is just the LHS.
if (V == B) return LHS;
// Otherwise return "A op V" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, A, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -266,12 +294,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, B, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
// It does! Return "V op C" if it simplifies or is already available.
// If V equals B then "V op C" is just the RHS.
if (V == B) return RHS;
// Otherwise return "V op C" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, V, C, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -289,12 +317,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = RHS;
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
// It does! Return "V op B" if it simplifies or is already available.
// If V equals A then "V op B" is just the LHS.
if (V == A) return LHS;
// Otherwise return "V op B" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, V, B, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -308,12 +336,12 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD, DT, MaxRecurse)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
// It does! Return "B op V" if it simplifies or is already available.
// If V equals C then "B op V" is just the RHS.
if (V == C) return RHS;
// Otherwise return "B op V" if it simplifies.
- if (Value *W = SimplifyBinOp(Opcode, B, V, TD, DT, MaxRecurse)) {
+ if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
@@ -328,9 +356,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
/// evaluating it on both branches of the select results in the same value.
/// Returns the common value if so, otherwise returns null.
static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD,
- const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return 0;
@@ -347,11 +373,11 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
Value *TV;
Value *FV;
if (SI == LHS) {
- TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, TD, DT, MaxRecurse);
- FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, TD, DT, MaxRecurse);
+ TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
} else {
- TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), TD, DT, MaxRecurse);
- FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), TD, DT, MaxRecurse);
+ TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
+ FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
}
// If they simplified to the same value, then return the common value.
@@ -402,8 +428,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
/// result in the same value. Returns the common value if so, otherwise returns
/// null.
static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
- Value *RHS, const TargetData *TD,
- const DominatorTree *DT,
+ Value *RHS, const Query &Q,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
@@ -416,40 +441,67 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
}
assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
SelectInst *SI = cast<SelectInst>(LHS);
+ Value *Cond = SI->getCondition();
+ Value *TV = SI->getTrueValue();
+ Value *FV = SI->getFalseValue();
// Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
// Does "cmp TV, RHS" simplify?
- if (Value *TCmp = SimplifyCmpInst(Pred, SI->getTrueValue(), RHS, TD, DT,
- MaxRecurse)) {
- // It does! Does "cmp FV, RHS" simplify?
- if (Value *FCmp = SimplifyCmpInst(Pred, SI->getFalseValue(), RHS, TD, DT,
- MaxRecurse)) {
- // It does! If they simplified to the same value, then use it as the
- // result of the original comparison.
- if (TCmp == FCmp)
- return TCmp;
- Value *Cond = SI->getCondition();
- // If the false value simplified to false, then the result of the compare
- // is equal to "Cond && TCmp". This also catches the case when the false
- // value simplified to false and the true value to true, returning "Cond".
- if (match(FCmp, m_Zero()))
- if (Value *V = SimplifyAndInst(Cond, TCmp, TD, DT, MaxRecurse))
- return V;
- // If the true value simplified to true, then the result of the compare
- // is equal to "Cond || FCmp".
- if (match(TCmp, m_One()))
- if (Value *V = SimplifyOrInst(Cond, FCmp, TD, DT, MaxRecurse))
- return V;
- // Finally, if the false value simplified to true and the true value to
- // false, then the result of the compare is equal to "!Cond".
- if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
- if (Value *V =
- SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
- TD, DT, MaxRecurse))
- return V;
- }
+ Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
+ if (TCmp == Cond) {
+ // It not only simplified, it simplified to the select condition. Replace
+ // it with 'true'.
+ TCmp = getTrue(Cond->getType());
+ } else if (!TCmp) {
+ // It didn't simplify. However if "cmp TV, RHS" is equal to the select
+ // condition then we can replace it with 'true'. Otherwise give up.
+ if (!isSameCompare(Cond, Pred, TV, RHS))
+ return 0;
+ TCmp = getTrue(Cond->getType());
+ }
+
+ // Does "cmp FV, RHS" simplify?
+ Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
+ if (FCmp == Cond) {
+ // It not only simplified, it simplified to the select condition. Replace
+ // it with 'false'.
+ FCmp = getFalse(Cond->getType());
+ } else if (!FCmp) {
+ // It didn't simplify. However if "cmp FV, RHS" is equal to the select
+ // condition then we can replace it with 'false'. Otherwise give up.
+ if (!isSameCompare(Cond, Pred, FV, RHS))
+ return 0;
+ FCmp = getFalse(Cond->getType());
}
+ // If both sides simplified to the same value, then use it as the result of
+ // the original comparison.
+ if (TCmp == FCmp)
+ return TCmp;
+
+ // The remaining cases only make sense if the select condition has the same
+ // type as the result of the comparison, so bail out if this is not so.
+ if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
+ return 0;
+ // If the false value simplified to false, then the result of the compare
+ // is equal to "Cond && TCmp". This also catches the case when the false
+ // value simplified to false and the true value to true, returning "Cond".
+ if (match(FCmp, m_Zero()))
+ if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
+ return V;
+ // If the true value simplified to true, then the result of the compare
+ // is equal to "Cond || FCmp".
+ if (match(TCmp, m_One()))
+ if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
+ return V;
+ // Finally, if the false value simplified to true and the true value to
+ // false, then the result of the compare is equal to "!Cond".
+ if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
+ if (Value *V =
+ SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
+ Q, MaxRecurse))
+ return V;
+
return 0;
}
@@ -458,8 +510,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
/// it on the incoming phi values yields the same result for every value. If so
/// returns the common value, otherwise returns null.
static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return 0;
@@ -468,13 +519,13 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
if (isa<PHINode>(LHS)) {
PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
- if (!ValueDominatesPHI(RHS, PI, DT))
+ if (!ValueDominatesPHI(RHS, PI, Q.DT))
return 0;
} else {
assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
PI = cast<PHINode>(RHS);
// Bail out if LHS and the phi may be mutually interdependent due to a loop.
- if (!ValueDominatesPHI(LHS, PI, DT))
+ if (!ValueDominatesPHI(LHS, PI, Q.DT))
return 0;
}
@@ -485,8 +536,8 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = PI == LHS ?
- SimplifyBinOp(Opcode, Incoming, RHS, TD, DT, MaxRecurse) :
- SimplifyBinOp(Opcode, LHS, Incoming, TD, DT, MaxRecurse);
+ SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
+ SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
@@ -502,8 +553,7 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
/// incoming phi values yields the same result every time. If so returns the
/// common result, otherwise returns null.
static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return 0;
@@ -517,7 +567,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
PHINode *PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
- if (!ValueDominatesPHI(RHS, PI, DT))
+ if (!ValueDominatesPHI(RHS, PI, Q.DT))
return 0;
// Evaluate the BinOp on the incoming phi values.
@@ -526,7 +576,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
- Value *V = SimplifyCmpInst(Pred, Incoming, RHS, TD, DT, MaxRecurse);
+ Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
@@ -540,13 +590,12 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
- return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(),
- Ops, TD);
+ return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops,
+ Q.TD, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -576,17 +625,17 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// i1 add -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
MaxRecurse))
return V;
// Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Add, Op0, Op1, Instruction::Mul,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// Threading Add over selects and phi nodes is pointless, so don't bother.
@@ -602,20 +651,116 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ RecursionLimit);
+}
+
+/// \brief Accumulate the constant integer offset a GEP represents.
+///
+/// Given a getelementptr instruction/constantexpr, accumulate the constant
+/// offset from the base pointer into the provided APInt 'Offset'. Returns true
+/// if the GEP has all-constant indices. Returns false if any non-constant
+/// index is encountered leaving the 'Offset' in an undefined state. The
+/// 'Offset' APInt must be the bitwidth of the target's pointer size.
+static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
+ APInt &Offset) {
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ assert(IntPtrWidth == Offset.getBitWidth());
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E;
+ ++I, ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(*I);
+ if (!OpC) return false;
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = TD.getStructLayout(STy);
+ Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
+ continue;
+ }
+
+ APInt TypeSize(IntPtrWidth, TD.getTypeAllocSize(GTI.getIndexedType()));
+ Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
+ }
+ return true;
+}
+
+/// \brief Compute the base pointer and cumulative constant offsets for V.
+///
+/// This strips all constant offsets off of V, leaving it the base pointer, and
+/// accumulates the total constant offset applied in the returned constant. It
+/// returns 0 if V is not a pointer, and returns the constant '0' if there are
+/// no constant offsets applied.
+static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
+ Value *&V) {
+ if (!V->getType()->isPointerTy())
+ return 0;
+
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ APInt Offset = APInt::getNullValue(IntPtrWidth);
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+ Visited.insert(V);
+ do {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->isInBounds() || !accumulateGEPOffset(TD, GEP, Offset))
+ break;
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ break;
+ V = GA->getAliasee();
+ } else {
+ break;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(V));
+
+ Type *IntPtrTy = TD.getIntPtrType(V->getContext());
+ return ConstantInt::get(IntPtrTy, Offset);
+}
+
+/// \brief Compute the constant difference between two pointer values.
+/// If the difference is not a constant, returns zero.
+static Constant *computePointerDifference(const TargetData &TD,
+ Value *LHS, Value *RHS) {
+ Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
+ if (!LHSOffset)
+ return 0;
+ Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ if (!RHSOffset)
+ return 0;
+
+ // If LHS and RHS are not related via constant offsets to the same base
+ // value, there is nothing we can do here.
+ if (LHS != RHS)
+ return 0;
+
+ // Otherwise, the difference of LHS - RHS can be computed as:
+ // LHS - RHS
+ // = (LHSOffset + Base) - (RHSOffset + Base)
+ // = LHSOffset - RHSOffset
+ return ConstantExpr::getSub(LHSOffset, RHSOffset);
}
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0))
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
- Ops, TD);
+ Ops, Q.TD, Q.TLI);
}
// X - undef -> undef
@@ -643,19 +788,17 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *Y = 0, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
// It does! Now see if "X + V" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, X, V, TD, DT,
- MaxRecurse-1)) {
+ if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
// It does! Now see if "Y + V" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, TD, DT,
- MaxRecurse-1)) {
+ if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
@@ -667,19 +810,17 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
X = Op0;
if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
// See if "V === X - Y" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
// It does! Now see if "V - Z" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, TD, DT,
- MaxRecurse-1)) {
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
// It does! Now see if "V - Y" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, TD, DT,
- MaxRecurse-1)) {
+ if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
@@ -691,23 +832,39 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Z = Op0;
if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
// See if "V === Z - X" simplifies.
- if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
// It does! Now see if "V + Y" simplifies.
- if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, TD, DT,
- MaxRecurse-1)) {
+ if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
+ // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
+ if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
+ match(Op1, m_Trunc(m_Value(Y))))
+ if (X->getType() == Y->getType())
+ // See if "V === X - Y" simplifies.
+ if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
+ // It does! Now see if "trunc V" simplifies.
+ if (Value *W = SimplifyTruncInst(V, Op0->getType(), Q, MaxRecurse-1))
+ // It does, return the simplified "trunc V".
+ return W;
+
+ // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
+ if (Q.TD && match(Op0, m_PtrToInt(m_Value(X))) &&
+ match(Op1, m_PtrToInt(m_Value(Y))))
+ if (Constant *Result = computePointerDifference(*Q.TD, X, Y))
+ return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
+
// Mul distributes over Sub. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Sub, Op0, Op1, Instruction::Mul,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// i1 sub -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyXorInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Threading Sub over selects and phi nodes is pointless, so don't bother.
@@ -723,19 +880,21 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
- Ops, TD);
+ Ops, Q.TD, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -755,40 +914,37 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
return Op0;
// (X / Y) * Y -> X if the division is exact.
- Value *X = 0, *Y = 0;
- if ((match(Op0, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op1) || // (X / Y) * Y
- (match(Op1, m_IDiv(m_Value(X), m_Value(Y))) && Y == Op0)) { // Y * (X / Y)
- BinaryOperator *Div = cast<BinaryOperator>(Y == Op1 ? Op0 : Op1);
- if (Div->isExact())
- return X;
- }
+ Value *X = 0;
+ if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
+ match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
+ return X;
// i1 mul -> and.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
- if (Value *V = SimplifyAndInst(Op0, Op1, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
// Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
@@ -796,19 +952,19 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyMulInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
}
}
@@ -842,7 +998,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
Value *X = 0, *Y = 0;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
- BinaryOperator *Mul = cast<BinaryOperator>(Op0);
+ OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
// If the Mul knows it does not overflow, then we are good to go.
if ((isSigned && Mul->hasNoSignedWrap()) ||
(!isSigned && Mul->hasNoUnsignedWrap()))
@@ -861,13 +1017,13 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
@@ -875,36 +1031,38 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, TD, DT, MaxRecurse))
+static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, TD, DT, MaxRecurse))
+static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyUDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
-static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *,
- const DominatorTree *, unsigned) {
+static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned) {
// undef / X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
@@ -917,19 +1075,19 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *,
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFDivInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyRem - Given operands for an SRem or URem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
}
}
@@ -964,13 +1122,13 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
@@ -978,36 +1136,38 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, TD, DT, MaxRecurse))
+static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySRemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
- if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, TD, DT, MaxRecurse))
+static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
+ if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyURemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
-static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *,
- const DominatorTree *, unsigned) {
+static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
+ unsigned) {
// undef % X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
@@ -1020,19 +1180,19 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *,
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFRemInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, TD);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
}
}
@@ -1057,13 +1217,13 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
@@ -1072,9 +1232,8 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
/// SimplifyShlInst - Given operands for an Shl, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, TD, DT, MaxRecurse))
+ const Query &Q, unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
return V;
// undef << X -> 0
@@ -1083,23 +1242,23 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// (X >> A) << A -> X
Value *X;
- if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1))) &&
- cast<PossiblyExactOperator>(Op0)->isExact())
+ if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
return 0;
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifyLShrInst - Given operands for an LShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, TD, DT, MaxRecurse))
+ const Query &Q, unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::LShr, Op0, Op1, Q, MaxRecurse))
return V;
// undef >>l X -> 0
@@ -1116,16 +1275,18 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifyAShrInst - Given operands for an AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
- if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, TD, DT, MaxRecurse))
+ const Query &Q, unsigned MaxRecurse) {
+ if (Value *V = SimplifyShift(Instruction::AShr, Op0, Op1, Q, MaxRecurse))
return V;
// all ones >>a X -> all ones
@@ -1146,19 +1307,22 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
- Ops, TD);
+ Ops, Q.TD, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1197,37 +1361,46 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
(A == Op0 || B == Op0))
return Op0;
+ // A & (-A) = A if A is a power of two or zero.
+ if (match(Op0, m_Neg(m_Specific(Op1))) ||
+ match(Op1, m_Neg(m_Specific(Op0)))) {
+ if (isPowerOfTwo(Op0, Q.TD, /*OrZero*/true))
+ return Op0;
+ if (isPowerOfTwo(Op1, Q.TD, /*OrZero*/true))
+ return Op1;
+ }
+
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
// And distributes over Or. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// And distributes over Xor. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// Or distributes over And. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::And, Op0, Op1, Instruction::Or,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
@@ -1235,19 +1408,20 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAndInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
- Ops, TD);
+ Ops, Q.TD, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1297,51 +1471,51 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
return Constant::getAllOnesValue(Op0->getType());
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
MaxRecurse))
return V;
// Or distributes over And. Try some generic simplifications based on this.
- if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
+ MaxRecurse))
return V;
// And distributes over Or. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Or, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
- if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, TD, DT,
+ if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
- if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, TD, DT,
- MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyOrInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
-static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
- const DominatorTree *DT, unsigned MaxRecurse) {
+static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
+ unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
- Ops, TD);
+ Ops, Q.TD, Q.TLI);
}
// Canonicalize the constant to the RHS.
@@ -1366,13 +1540,13 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
return Constant::getAllOnesValue(Op0->getType());
// Try some generic simplifications for associative operations.
- if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, TD, DT,
+ if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
MaxRecurse))
return V;
// And distributes over Xor. Try some generic simplifications based on this.
if (Value *V = FactorizeBinOp(Instruction::Xor, Op0, Op1, Instruction::And,
- TD, DT, MaxRecurse))
+ Q, MaxRecurse))
return V;
// Threading Xor over selects and phi nodes is pointless, so don't bother.
@@ -1388,8 +1562,9 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
}
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
+ return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
}
static Type *GetCompareTy(Value *Op) {
@@ -1416,17 +1591,56 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
return 0;
}
+static Constant *computePointerICmp(const TargetData &TD,
+ CmpInst::Predicate Pred,
+ Value *LHS, Value *RHS) {
+ // We can only fold certain predicates on pointer comparisons.
+ switch (Pred) {
+ default:
+ return 0;
+
+ // Equality comaprisons are easy to fold.
+ case CmpInst::ICMP_EQ:
+ case CmpInst::ICMP_NE:
+ break;
+
+ // We can only handle unsigned relational comparisons because 'inbounds' on
+ // a GEP only protects against unsigned wrapping.
+ case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_UGE:
+ case CmpInst::ICMP_ULT:
+ case CmpInst::ICMP_ULE:
+ // However, we have to switch them to their signed variants to handle
+ // negative indices from the base pointer.
+ Pred = ICmpInst::getSignedPredicate(Pred);
+ break;
+ }
+
+ Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
+ if (!LHSOffset)
+ return 0;
+ Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ if (!RHSOffset)
+ return 0;
+
+ // If LHS and RHS are not related via constant offsets to the same base
+ // value, there is nothing we can do here.
+ if (LHS != RHS)
+ return 0;
+
+ return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
+}
+
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -1443,8 +1657,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
// Special case logic when the operands have i1 type.
- if (OpTy->isIntegerTy(1) || (OpTy->isVectorTy() &&
- cast<VectorType>(OpTy)->getElementType()->isIntegerTy(1))) {
+ if (OpTy->getScalarType()->isIntegerTy(1)) {
switch (Pred) {
default: break;
case ICmpInst::ICMP_EQ:
@@ -1480,63 +1693,101 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
- // icmp <alloca*>, <global/alloca*/null> - Different stack variables have
- // different addresses, and what's more the address of a stack variable is
- // never null or equal to the address of a global. Note that generalizing
- // to the case where LHS is a global variable address or null is pointless,
- // since if both LHS and RHS are constants then we already constant folded
- // the compare, and if only one of them is then we moved it to RHS already.
- if (isa<AllocaInst>(LHS) && (isa<GlobalValue>(RHS) || isa<AllocaInst>(RHS) ||
- isa<ConstantPointerNull>(RHS)))
- // We already know that LHS != RHS.
- return ConstantInt::get(ITy, CmpInst::isFalseWhenEqual(Pred));
+ // icmp <object*>, <object*/null> - Different identified objects have
+ // different addresses (unless null), and what's more the address of an
+ // identified local is never equal to another argument (again, barring null).
+ // Note that generalizing to the case where LHS is a global variable address
+ // or null is pointless, since if both LHS and RHS are constants then we
+ // already constant folded the compare, and if only one of them is then we
+ // moved it to RHS already.
+ Value *LHSPtr = LHS->stripPointerCasts();
+ Value *RHSPtr = RHS->stripPointerCasts();
+ if (LHSPtr == RHSPtr)
+ return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
+
+ // Be more aggressive about stripping pointer adjustments when checking a
+ // comparison of an alloca address to another object. We can rip off all
+ // inbounds GEP operations, even if they are variable.
+ LHSPtr = LHSPtr->stripInBoundsOffsets();
+ if (llvm::isIdentifiedObject(LHSPtr)) {
+ RHSPtr = RHSPtr->stripInBoundsOffsets();
+ if (llvm::isKnownNonNull(LHSPtr) || llvm::isKnownNonNull(RHSPtr)) {
+ // If both sides are different identified objects, they aren't equal
+ // unless they're null.
+ if (LHSPtr != RHSPtr && llvm::isIdentifiedObject(RHSPtr) &&
+ Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
+
+ // A local identified object (alloca or noalias call) can't equal any
+ // incoming argument, unless they're both null.
+ if (isa<Instruction>(LHSPtr) && isa<Argument>(RHSPtr) &&
+ Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
+ }
+
+ // Assume that the constant null is on the right.
+ if (llvm::isKnownNonNull(LHSPtr) && isa<ConstantPointerNull>(RHSPtr)) {
+ if (Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
+ else if (Pred == CmpInst::ICMP_NE)
+ return ConstantInt::get(ITy, true);
+ }
+ } else if (isa<Argument>(LHSPtr)) {
+ RHSPtr = RHSPtr->stripInBoundsOffsets();
+ // An alloca can't be equal to an argument.
+ if (isa<AllocaInst>(RHSPtr)) {
+ if (Pred == CmpInst::ICMP_EQ)
+ return ConstantInt::get(ITy, false);
+ else if (Pred == CmpInst::ICMP_NE)
+ return ConstantInt::get(ITy, true);
+ }
+ }
// If we are comparing with zero then try hard since this is a common case.
if (match(RHS, m_Zero())) {
bool LHSKnownNonNegative, LHSKnownNegative;
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_ULT:
return getFalse(ITy);
case ICmpInst::ICMP_UGE:
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, TD))
+ if (isKnownNonZero(LHS, Q.TD))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, TD))
+ if (isKnownNonZero(LHS, Q.TD))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative)
return getFalse(ITy);
break;
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
if (LHSKnownNegative)
return getTrue(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
return getFalse(ITy);
break;
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative)
return getTrue(ITy);
break;
case ICmpInst::ICMP_SGT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
if (LHSKnownNegative)
return getFalse(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
return getTrue(ITy);
break;
}
@@ -1564,6 +1815,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// 'srem x, CI2' produces (-|CI2|, |CI2|).
Upper = CI2->getValue().abs();
Lower = (-Upper) + 1;
+ } else if (match(LHS, m_UDiv(m_ConstantInt(CI2), m_Value()))) {
+ // 'udiv CI2, x' produces [0, CI2].
+ Upper = CI2->getValue() + 1;
} else if (match(LHS, m_UDiv(m_Value(), m_ConstantInt(CI2)))) {
// 'udiv x, CI2' produces [0, UINT_MAX / CI2].
APInt NegOne = APInt::getAllOnesValue(Width);
@@ -1616,19 +1870,19 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
- if (MaxRecurse && TD && isa<PtrToIntInst>(LI) &&
- TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
+ if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
+ Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
ConstantExpr::getIntToPtr(RHSC, SrcTy),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
} else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
if (RI->getOperand(0)->getType() == SrcTy)
// Compare without the cast.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
}
}
@@ -1640,7 +1894,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that signed predicates become unsigned.
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
- SrcOp, RI->getOperand(0), TD, DT,
+ SrcOp, RI->getOperand(0), Q,
MaxRecurse-1))
return V;
}
@@ -1656,15 +1910,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// also a case of comparing two zero-extended values.
if (RExt == CI && MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
- SrcOp, Trunc, TD, DT, MaxRecurse-1))
+ SrcOp, Trunc, Q, MaxRecurse-1))
return V;
// Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
// there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
// LHS <u RHS.
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
@@ -1701,7 +1954,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that the predicate does not change.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
}
// Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
@@ -1715,16 +1968,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If the re-extended constant didn't change then this is effectively
// also a case of comparing two sign-extended values.
if (RExt == CI && MaxRecurse)
- if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, TD, DT,
- MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
return V;
// Otherwise the upper bits of LHS are all equal, while RHS has varying
// bits there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
- default:
- assert(false && "Unknown ICmp predicate!");
+ default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_EQ:
return ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_NE:
@@ -1751,7 +2002,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
Constant::getNullValue(SrcTy),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
break;
case ICmpInst::ICMP_ULT:
@@ -1760,7 +2011,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
Constant::getNullValue(SrcTy),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
break;
}
@@ -1794,14 +2045,14 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if ((A == RHS || B == RHS) && NoLHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
Constant::getNullValue(RHS->getType()),
- TD, DT, MaxRecurse-1))
+ Q, MaxRecurse-1))
return V;
// icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
if ((C == LHS || D == LHS) && NoRHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred,
Constant::getNullValue(LHS->getType()),
- C == LHS ? D : C, TD, DT, MaxRecurse-1))
+ C == LHS ? D : C, Q, MaxRecurse-1))
return V;
// icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
@@ -1810,7 +2061,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Determine Y and Z in the form icmp (X+Y), (X+Z).
Value *Y = (A == C || A == D) ? B : A;
Value *Z = (C == A || C == B) ? D : C;
- if (Value *V = SimplifyICmpInst(Pred, Y, Z, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1))
return V;
}
}
@@ -1822,7 +2073,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
if (!KnownNonNegative)
break;
// fall-through
@@ -1832,7 +2083,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getFalse(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
if (!KnownNonNegative)
break;
// fall-through
@@ -1849,7 +2100,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
if (!KnownNonNegative)
break;
// fall-through
@@ -1859,7 +2110,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getTrue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
if (!KnownNonNegative)
break;
// fall-through
@@ -1870,6 +2121,15 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
+ // x udiv y <=u x.
+ if (LBO && match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
+ // icmp pred (X /u Y), X
+ if (Pred == ICmpInst::ICMP_UGT)
+ return getFalse(ITy);
+ if (Pred == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ }
+
if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
LBO->getOperand(1) == RBO->getOperand(1)) {
switch (LBO->getOpcode()) {
@@ -1884,7 +2144,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (!LBO->isExact() || !RBO->isExact())
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
- RBO->getOperand(0), TD, DT, MaxRecurse-1))
+ RBO->getOperand(0), Q, MaxRecurse-1))
return V;
break;
case Instruction::Shl: {
@@ -1895,7 +2155,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (!NSW && ICmpInst::isSigned(Pred))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
- RBO->getOperand(0), TD, DT, MaxRecurse-1))
+ RBO->getOperand(0), Q, MaxRecurse-1))
return V;
break;
}
@@ -1949,7 +2209,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
@@ -1963,7 +2223,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1))
return V;
break;
}
@@ -2019,7 +2279,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
@@ -2033,7 +2293,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
- if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
+ if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1))
return V;
break;
}
@@ -2090,37 +2350,66 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return getFalse(ITy);
}
+ // Simplify comparisons of related pointers using a powerful, recursive
+ // GEP-walk when we have target data available..
+ if (Q.TD && LHS->getType()->isPointerTy() && RHS->getType()->isPointerTy())
+ if (Constant *C = computePointerICmp(*Q.TD, Pred, LHS, RHS))
+ return C;
+
+ if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
+ if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
+ if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
+ GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
+ (ICmpInst::isEquality(Pred) ||
+ (GLHS->isInBounds() && GRHS->isInBounds() &&
+ Pred == ICmpInst::getSignedPredicate(Pred)))) {
+ // The bases are equal and the indices are constant. Build a constant
+ // expression GEP with the same indices and a null base pointer to see
+ // what constant folding can make out of it.
+ Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
+ SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
+ Constant *NewLHS = ConstantExpr::getGetElementPtr(Null, IndicesLHS);
+
+ SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
+ Constant *NewRHS = ConstantExpr::getGetElementPtr(Null, IndicesRHS);
+ return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
+ }
+ }
+ }
+
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, TD);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
@@ -2188,27 +2477,31 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
return 0;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ RecursionLimit);
}
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
-Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
- const TargetData *TD, const DominatorTree *) {
+static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
+ Value *FalseVal, const Query &Q,
+ unsigned MaxRecurse) {
// select true, X, Y -> X
// select false, X, Y -> Y
if (ConstantInt *CB = dyn_cast<ConstantInt>(CondVal))
@@ -2231,12 +2524,22 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
return 0;
}
+Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
+ RecursionLimit);
+}
+
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops,
- const TargetData *TD, const DominatorTree *) {
+static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// The type of the GEP pointer operand.
- PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
+ PointerType *PtrTy = dyn_cast<PointerType>(Ops[0]->getType());
+ // The GEP pointer operand is not a pointer, it's a vector of pointers.
+ if (!PtrTy)
+ return 0;
// getelementptr P -> P.
if (Ops.size() == 1)
@@ -2255,9 +2558,9 @@ Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops,
if (C->isZero())
return Ops[0];
// getelementptr P, N -> P if P points to a type of zero size.
- if (TD) {
+ if (Q.TD) {
Type *Ty = PtrTy->getElementType();
- if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
+ if (Ty->isSized() && Q.TD->getTypeAllocSize(Ty) == 0)
return Ops[0];
}
}
@@ -2270,12 +2573,17 @@ Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops,
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
+}
+
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
-Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
- ArrayRef<unsigned> Idxs,
- const TargetData *,
- const DominatorTree *) {
+static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs, const Query &Q,
+ unsigned) {
if (Constant *CAgg = dyn_cast<Constant>(Agg))
if (Constant *CVal = dyn_cast<Constant>(Val))
return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
@@ -2300,8 +2608,17 @@ Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
return 0;
}
+Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
+ RecursionLimit);
+}
+
/// SimplifyPHINode - See if we can fold the given phi. If not, returns null.
-static Value *SimplifyPHINode(PHINode *PN, const DominatorTree *DT) {
+static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
Value *CommonValue = 0;
@@ -2329,67 +2646,77 @@ static Value *SimplifyPHINode(PHINode *PN, const DominatorTree *DT) {
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
if (HasUndefInput)
- return ValueDominatesPHI(CommonValue, PN, DT) ? CommonValue : 0;
+ return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : 0;
return CommonValue;
}
+static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
+ if (Constant *C = dyn_cast<Constant>(Op))
+ return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.TD, Q.TLI);
+
+ return 0;
+}
+
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
+}
//=== Helper functions for higher up the class hierarchy.
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
switch (Opcode) {
case Instruction::Add:
return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
+ Q, MaxRecurse);
case Instruction::Sub:
return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
- case Instruction::Mul: return SimplifyMulInst (LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::SRem: return SimplifySRemInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::URem: return SimplifyURemInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, TD, DT, MaxRecurse);
+ Q, MaxRecurse);
+ case Instruction::Mul: return SimplifyMulInst (LHS, RHS, Q, MaxRecurse);
+ case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, Q, MaxRecurse);
case Instruction::Shl:
return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
- TD, DT, MaxRecurse);
+ Q, MaxRecurse);
case Instruction::LShr:
- return SimplifyLShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
+ return SimplifyLShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
case Instruction::AShr:
- return SimplifyAShrInst(LHS, RHS, /*isExact*/false, TD, DT, MaxRecurse);
- case Instruction::And: return SimplifyAndInst(LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::Or: return SimplifyOrInst (LHS, RHS, TD, DT, MaxRecurse);
- case Instruction::Xor: return SimplifyXorInst(LHS, RHS, TD, DT, MaxRecurse);
+ return SimplifyAShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
+ case Instruction::And: return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse);
+ case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
default:
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
- return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, TD);
+ return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.TD,
+ Q.TLI);
}
// If the operation is associative, try some generic simplifications.
if (Instruction::isAssociative(Opcode))
- if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, TD, DT,
- MaxRecurse))
+ if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
- // If the operation is with the result of a select instruction, check whether
+ // If the operation is with the result of a select instruction check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
- if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, TD, DT,
- MaxRecurse))
+ if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
- if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, TD, DT, MaxRecurse))
+ if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
return 0;
@@ -2397,119 +2724,136 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
}
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result.
static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT,
- unsigned MaxRecurse) {
+ const Query &Q, unsigned MaxRecurse) {
if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
- return SimplifyICmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
- return SimplifyFCmpInst(Predicate, LHS, RHS, TD, DT, MaxRecurse);
+ return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
+ return SimplifyFCmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const DominatorTree *DT) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, TD, DT, RecursionLimit);
+ const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ RecursionLimit);
+}
+
+static Value *SimplifyCallInst(CallInst *CI, const Query &) {
+ // call undef -> undef
+ if (isa<UndefValue>(CI->getCalledValue()))
+ return UndefValue::get(CI->getType());
+
+ return 0;
}
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+ const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
switch (I->getOpcode()) {
default:
- Result = ConstantFoldInstruction(I, TD);
+ Result = ConstantFoldInstruction(I, TD, TLI);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::Mul:
- Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::SDiv:
- Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::UDiv:
- Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FDiv:
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::SRem:
- Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::URem:
- Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FRem:
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, DT);
+ TD, TLI, DT);
break;
case Instruction::And:
- Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Or:
- Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Xor:
- Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, DT);
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::ICmp:
Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, DT);
+ I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, DT);
+ I->getOperand(0), I->getOperand(1), TD, TLI, DT);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
- I->getOperand(2), TD, DT);
+ I->getOperand(2), TD, TLI, DT);
break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
- Result = SimplifyGEPInst(Ops, TD, DT);
+ Result = SimplifyGEPInst(Ops, TD, TLI, DT);
break;
}
case Instruction::InsertValue: {
InsertValueInst *IV = cast<InsertValueInst>(I);
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
IV->getInsertedValueOperand(),
- IV->getIndices(), TD, DT);
+ IV->getIndices(), TD, TLI, DT);
break;
}
case Instruction::PHI:
- Result = SimplifyPHINode(cast<PHINode>(I), DT);
+ Result = SimplifyPHINode(cast<PHINode>(I), Query (TD, TLI, DT));
+ break;
+ case Instruction::Call:
+ Result = SimplifyCallInst(cast<CallInst>(I), Query (TD, TLI, DT));
+ break;
+ case Instruction::Trunc:
+ Result = SimplifyTruncInst(I->getOperand(0), I->getType(), TD, TLI, DT);
break;
}
@@ -2519,57 +2863,84 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
return Result == I ? UndefValue::get(I->getType()) : Result;
}
-/// ReplaceAndSimplifyAllUses - Perform From->replaceAllUsesWith(To) and then
-/// delete the From instruction. In addition to a basic RAUW, this does a
-/// recursive simplification of the newly formed instructions. This catches
-/// things where one simplification exposes other opportunities. This only
-/// simplifies and deletes scalar operations, it does not change the CFG.
+/// \brief Implementation of recursive simplification through an instructions
+/// uses.
///
-void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
- const TargetData *TD,
- const DominatorTree *DT) {
- assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!");
-
- // FromHandle/ToHandle - This keeps a WeakVH on the from/to values so that
- // we can know if it gets deleted out from under us or replaced in a
- // recursive simplification.
- WeakVH FromHandle(From);
- WeakVH ToHandle(To);
-
- while (!From->use_empty()) {
- // Update the instruction to use the new value.
- Use &TheUse = From->use_begin().getUse();
- Instruction *User = cast<Instruction>(TheUse.getUser());
- TheUse = To;
-
- // Check to see if the instruction can be folded due to the operand
- // replacement. For example changing (or X, Y) into (or X, -1) can replace
- // the 'or' with -1.
- Value *SimplifiedVal;
- {
- // Sanity check to make sure 'User' doesn't dangle across
- // SimplifyInstruction.
- AssertingVH<> UserHandle(User);
-
- SimplifiedVal = SimplifyInstruction(User, TD, DT);
- if (SimplifiedVal == 0) continue;
- }
+/// This is the common implementation of the recursive simplification routines.
+/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
+/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
+/// instructions to process and attempt to simplify it using
+/// InstructionSimplify.
+///
+/// This routine returns 'true' only when *it* simplifies something. The passed
+/// in simplified value does not count toward this.
+static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ bool Simplified = false;
+ SmallSetVector<Instruction *, 8> Worklist;
+
+ // If we have an explicit value to collapse to, do that round of the
+ // simplification loop by hand initially.
+ if (SimpleV) {
+ for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
+ ++UI)
+ if (*UI != I)
+ Worklist.insert(cast<Instruction>(*UI));
+
+ // Replace the instruction with its simplified value.
+ I->replaceAllUsesWith(SimpleV);
+
+ // Gracefully handle edge cases where the instruction is not wired into any
+ // parent block.
+ if (I->getParent())
+ I->eraseFromParent();
+ } else {
+ Worklist.insert(I);
+ }
- // Recursively simplify this user to the new value.
- ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD, DT);
- From = dyn_cast_or_null<Instruction>((Value*)FromHandle);
- To = ToHandle;
+ // Note that we must test the size on each iteration, the worklist can grow.
+ for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
+ I = Worklist[Idx];
+
+ // See if this instruction simplifies.
+ SimpleV = SimplifyInstruction(I, TD, TLI, DT);
+ if (!SimpleV)
+ continue;
- assert(ToHandle && "To value deleted by recursive simplification?");
+ Simplified = true;
- // If the recursive simplification ended up revisiting and deleting
- // 'From' then we're done.
- if (From == 0)
- return;
+ // Stash away all the uses of the old instruction so we can check them for
+ // recursive simplifications after a RAUW. This is cheaper than checking all
+ // uses of To on the recursive step in most cases.
+ for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
+ ++UI)
+ Worklist.insert(cast<Instruction>(*UI));
+
+ // Replace the instruction with its simplified value.
+ I->replaceAllUsesWith(SimpleV);
+
+ // Gracefully handle edge cases where the instruction is not wired into any
+ // parent block.
+ if (I->getParent())
+ I->eraseFromParent();
}
+ return Simplified;
+}
- // If 'From' has value handles referring to it, do a real RAUW to update them.
- From->replaceAllUsesWith(To);
+bool llvm::recursivelySimplifyInstruction(Instruction *I,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
+}
- From->eraseFromParent();
+bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT) {
+ assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
+ assert(SimpleV && "Must provide a simplified value.");
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, TD, TLI, DT);
}
diff --git a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
index f80595c..5ca2746 100644
--- a/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -20,20 +20,25 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include <map>
#include <stack>
using namespace llvm;
+using namespace PatternMatch;
char LazyValueInfo::ID = 0;
-INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
+INITIALIZE_PASS_BEGIN(LazyValueInfo, "lazy-value-info",
+ "Lazy Value Information Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(LazyValueInfo, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
namespace llvm {
@@ -61,10 +66,10 @@ class LVILatticeVal {
constant,
/// notconstant - This Value is known to not have the specified value.
notconstant,
-
+
/// constantrange - The Value falls within this range.
constantrange,
-
+
/// overdefined - This value is not known to be constant, and we know that
/// it has a value.
overdefined
@@ -207,7 +212,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData for smarter constant folding.
+ // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
@@ -233,7 +238,7 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData for smarter constant folding.
+ // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
@@ -305,50 +310,6 @@ namespace {
};
}
-namespace llvm {
- template<>
- struct DenseMapInfo<LVIValueHandle> {
- typedef DenseMapInfo<Value*> PointerInfo;
- static inline LVIValueHandle getEmptyKey() {
- return LVIValueHandle(PointerInfo::getEmptyKey(),
- static_cast<LazyValueInfoCache*>(0));
- }
- static inline LVIValueHandle getTombstoneKey() {
- return LVIValueHandle(PointerInfo::getTombstoneKey(),
- static_cast<LazyValueInfoCache*>(0));
- }
- static unsigned getHashValue(const LVIValueHandle &Val) {
- return PointerInfo::getHashValue(Val);
- }
- static bool isEqual(const LVIValueHandle &LHS, const LVIValueHandle &RHS) {
- return LHS == RHS;
- }
- };
-
- template<>
- struct DenseMapInfo<std::pair<AssertingVH<BasicBlock>, Value*> > {
- typedef std::pair<AssertingVH<BasicBlock>, Value*> PairTy;
- typedef DenseMapInfo<AssertingVH<BasicBlock> > APointerInfo;
- typedef DenseMapInfo<Value*> BPointerInfo;
- static inline PairTy getEmptyKey() {
- return std::make_pair(APointerInfo::getEmptyKey(),
- BPointerInfo::getEmptyKey());
- }
- static inline PairTy getTombstoneKey() {
- return std::make_pair(APointerInfo::getTombstoneKey(),
- BPointerInfo::getTombstoneKey());
- }
- static unsigned getHashValue( const PairTy &Val) {
- return APointerInfo::getHashValue(Val.first) ^
- BPointerInfo::getHashValue(Val.second);
- }
- static bool isEqual(const PairTy &LHS, const PairTy &RHS) {
- return APointerInfo::isEqual(LHS.first, RHS.first) &&
- BPointerInfo::isEqual(LHS.second, RHS.second);
- }
- };
-}
-
namespace {
/// LazyValueInfoCache - This is the cache kept by LazyValueInfo which
/// maintains information about queries across the clients' queries.
@@ -360,14 +321,18 @@ namespace {
/// ValueCache - This is all of the cached information for all values,
/// mapped from Value* to key information.
- DenseMap<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+ std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
/// OverDefinedCache - This tracks, on a per-block basis, the set of
/// values that are over-defined at the end of that block. This is required
/// for cache updating.
typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
DenseSet<OverDefinedPairTy> OverDefinedCache;
-
+
+ /// SeenBlocks - Keep track of all blocks that we have ever seen, so we
+ /// don't spend time removing unused blocks from our caches.
+ DenseSet<AssertingVH<BasicBlock> > SeenBlocks;
+
/// BlockValueStack - This stack holds the state of the value solver
/// during a query. It basically emulates the callstack of the naive
/// recursive value lookup process.
@@ -438,6 +403,7 @@ namespace {
/// clear - Empty the cache.
void clear() {
+ SeenBlocks.clear();
ValueCache.clear();
OverDefinedCache.clear();
}
@@ -466,6 +432,12 @@ void LVIValueHandle::deleted() {
}
void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+ // Shortcut if we have never seen this block.
+ DenseSet<AssertingVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
+ if (I == SeenBlocks.end())
+ return;
+ SeenBlocks.erase(I);
+
SmallVector<OverDefinedPairTy, 4> ToErase;
for (DenseSet<OverDefinedPairTy>::iterator I = OverDefinedCache.begin(),
E = OverDefinedCache.end(); I != E; ++I) {
@@ -477,7 +449,7 @@ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
E = ToErase.end(); I != E; ++I)
OverDefinedCache.erase(*I);
- for (DenseMap<LVIValueHandle, ValueCacheEntryTy>::iterator
+ for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
I->second.erase(BB);
}
@@ -505,6 +477,7 @@ LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
if (Constant *VC = dyn_cast<Constant>(Val))
return LVILatticeVal::get(VC);
+ SeenBlocks.insert(BB);
return lookup(Val)[BB];
}
@@ -513,6 +486,7 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
return true;
ValueCacheEntryTy &Cache = lookup(Val);
+ SeenBlocks.insert(BB);
LVILatticeVal &BBLV = Cache[BB];
// OverDefinedCacheUpdater is a helper object that will update
@@ -823,9 +797,8 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition());
- if (ICI && ICI->getOperand(0) == Val &&
- isa<Constant>(ICI->getOperand(1))) {
- if (ICI->isEquality()) {
+ if (ICI && isa<Constant>(ICI->getOperand(1))) {
+ if (ICI->isEquality() && ICI->getOperand(0) == Val) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
@@ -835,12 +808,23 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
return true;
}
- if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
+ // Recognize the range checking idiom that InstCombine produces.
+ // (X-C1) u< C2 --> [C1, C1+C2)
+ ConstantInt *NegOffset = 0;
+ if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
+ match(ICI->getOperand(0), m_Add(m_Specific(Val),
+ m_ConstantInt(NegOffset)));
+
+ ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1));
+ if (CI && (ICI->getOperand(0) == Val || NegOffset)) {
// Calculate the range of values that would satisfy the comparison.
ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
ConstantRange TrueValues =
ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
+ if (NegOffset) // Apply the offset from above.
+ TrueValues = TrueValues.subtract(NegOffset->getValue());
+
// If we're interested in the false dest, invert the condition.
if (!isTrueDest) TrueValues = TrueValues.inverse();
@@ -882,10 +866,11 @@ bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
// BBFrom to BBTo.
unsigned NumEdges = 0;
ConstantInt *EdgeVal = 0;
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) {
- if (SI->getSuccessor(i) != BBTo) continue;
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ if (i.getCaseSuccessor() != BBTo) continue;
if (NumEdges++) break;
- EdgeVal = SI->getCaseValue(i);
+ EdgeVal = i.getCaseValue();
}
assert(EdgeVal && "Missing successor?");
if (NumEdges == 1) {
@@ -1007,12 +992,19 @@ static LazyValueInfoCache &getCache(void *&PImpl) {
bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl)
getCache(PImpl).clear();
-
+
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
// Fully lazy.
return false;
}
+void LazyValueInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<TargetLibraryInfo>();
+}
+
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
@@ -1061,7 +1053,8 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
// If we know the value is a constant, evaluate the conditional.
Constant *Res = 0;
if (Result.isConstant()) {
- Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD);
+ Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
+ TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? False : True;
return Unknown;
@@ -1102,13 +1095,15 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD);
+ Result.getNotConstant(), C, TD,
+ TLI);
if (Res->isNullValue())
return False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
- Result.getNotConstant(), C, TD);
+ Result.getNotConstant(), C, TD,
+ TLI);
if (Res->isNullValue())
return True;
}
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index 38d677d..83bdf52 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -44,6 +44,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/IntrinsicInst.h"
@@ -103,6 +104,7 @@ namespace {
AliasAnalysis *AA;
DominatorTree *DT;
TargetData *TD;
+ TargetLibraryInfo *TLI;
std::string Messages;
raw_string_ostream MessagesStr;
@@ -117,6 +119,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
+ AU.addRequired<TargetLibraryInfo>();
AU.addRequired<DominatorTree>();
}
virtual void print(raw_ostream &O, const Module *M) const {}
@@ -149,6 +152,7 @@ namespace {
char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
@@ -174,6 +178,7 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
Messages.clear();
@@ -411,9 +416,8 @@ void Lint::visitMemoryReference(Instruction &I,
if (Align != 0) {
unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
- APInt Mask = APInt::getAllOnesValue(BitWidth),
- KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Ptr, Mask, KnownZero, KnownOne, TD);
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD);
Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
"Undefined behavior: Memory reference address is misaligned", &I);
}
@@ -471,9 +475,8 @@ static bool isZero(Value *V, TargetData *TD) {
if (isa<UndefValue>(V)) return true;
unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
- APInt Mask = APInt::getAllOnesValue(BitWidth),
- KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ ComputeMaskedBits(V, KnownZero, KnownOne, TD);
return KnownZero.isAllOnesValue();
}
@@ -614,10 +617,10 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, TD, DT))
+ if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT))
return findValueImpl(W, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (Value *W = ConstantFoldConstantExpression(CE, TD))
+ if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
}
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
index 0e6bcbf..873a275 100644
--- a/contrib/llvm/lib/Analysis/Loads.cpp
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -17,6 +17,7 @@
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Operator.h"
using namespace llvm;
@@ -160,10 +161,15 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
/// it is set to 0, it will scan the whole block. You can also optionally
/// specify an alias analysis implementation, which makes this more precise.
+///
+/// If TBAATag is non-null and a load or store is found, the TBAA tag from the
+/// load or store is recorded there. If there is no TBAA tag or if no access
+/// is found, it is left unmodified.
Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan,
- AliasAnalysis *AA) {
+ AliasAnalysis *AA,
+ MDNode **TBAATag) {
if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
// If we're using alias analysis to disambiguate get the size of *Ptr.
@@ -191,15 +197,19 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// (This is true even if the load is volatile or atomic, although
// those cases are unlikely.)
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
- if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
+ if (AreEquivalentAddressValues(LI->getOperand(0), Ptr)) {
+ if (TBAATag) *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
return LI;
+ }
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If this is a store through Ptr, the value is available!
// (This is true even if the store is volatile or atomic, although
// those cases are unlikely.)
- if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
+ if (AreEquivalentAddressValues(SI->getOperand(1), Ptr)) {
+ if (TBAATag) *TBAATag = SI->getMetadata(LLVMContext::MD_tbaa);
return SI->getOperand(0);
+ }
// If Ptr is an alloca and this is a store to a different alloca, ignore
// the store. This is a trivial form of alias analysis that is important
diff --git a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
index 3997ac4..463269d 100644
--- a/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -91,8 +91,6 @@ static Value *GetPointerOperand(Value *I) {
if (StoreInst *i = dyn_cast<StoreInst>(I))
return i->getPointerOperand();
llvm_unreachable("Value is no load or store instruction!");
- // Never reached.
- return 0;
}
static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 85aacca..f7a60a1 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopIterator.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
@@ -95,7 +96,7 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
// Test if the value is already loop-invariant.
if (isLoopInvariant(I))
return true;
- if (!I->isSafeToSpeculativelyExecute())
+ if (!isSafeToSpeculativelyExecute(I))
return false;
if (I->mayReadFromMemory())
return false;
@@ -165,99 +166,6 @@ PHINode *Loop::getCanonicalInductionVariable() const {
return 0;
}
-/// getTripCount - Return a loop-invariant LLVM value indicating the number of
-/// times the loop will be executed. Note that this means that the backedge
-/// of the loop executes N-1 times. If the trip-count cannot be determined,
-/// this returns null.
-///
-/// The IndVarSimplify pass transforms loops to have a form that this
-/// function easily understands.
-///
-Value *Loop::getTripCount() const {
- // Canonical loops will end with a 'cmp ne I, V', where I is the incremented
- // canonical induction variable and V is the trip count of the loop.
- PHINode *IV = getCanonicalInductionVariable();
- if (IV == 0 || IV->getNumIncomingValues() != 2) return 0;
-
- bool P0InLoop = contains(IV->getIncomingBlock(0));
- Value *Inc = IV->getIncomingValue(!P0InLoop);
- BasicBlock *BackedgeBlock = IV->getIncomingBlock(!P0InLoop);
-
- if (BranchInst *BI = dyn_cast<BranchInst>(BackedgeBlock->getTerminator()))
- if (BI->isConditional()) {
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) {
- if (ICI->getOperand(0) == Inc) {
- if (BI->getSuccessor(0) == getHeader()) {
- if (ICI->getPredicate() == ICmpInst::ICMP_NE)
- return ICI->getOperand(1);
- } else if (ICI->getPredicate() == ICmpInst::ICMP_EQ) {
- return ICI->getOperand(1);
- }
- }
- }
- }
-
- return 0;
-}
-
-/// getSmallConstantTripCount - Returns the trip count of this loop as a
-/// normal unsigned value, if possible. Returns 0 if the trip count is unknown
-/// or not constant. Will also return 0 if the trip count is very large
-/// (>= 2^32)
-unsigned Loop::getSmallConstantTripCount() const {
- Value* TripCount = this->getTripCount();
- if (TripCount) {
- if (ConstantInt *TripCountC = dyn_cast<ConstantInt>(TripCount)) {
- // Guard against huge trip counts.
- if (TripCountC->getValue().getActiveBits() <= 32) {
- return (unsigned)TripCountC->getZExtValue();
- }
- }
- }
- return 0;
-}
-
-/// getSmallConstantTripMultiple - Returns the largest constant divisor of the
-/// trip count of this loop as a normal unsigned value, if possible. This
-/// means that the actual trip count is always a multiple of the returned
-/// value (don't forget the trip count could very well be zero as well!).
-///
-/// Returns 1 if the trip count is unknown or not guaranteed to be the
-/// multiple of a constant (which is also the case if the trip count is simply
-/// constant, use getSmallConstantTripCount for that case), Will also return 1
-/// if the trip count is very large (>= 2^32).
-unsigned Loop::getSmallConstantTripMultiple() const {
- Value* TripCount = this->getTripCount();
- // This will hold the ConstantInt result, if any
- ConstantInt *Result = NULL;
- if (TripCount) {
- // See if the trip count is constant itself
- Result = dyn_cast<ConstantInt>(TripCount);
- // if not, see if it is a multiplication
- if (!Result)
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TripCount)) {
- switch (BO->getOpcode()) {
- case BinaryOperator::Mul:
- Result = dyn_cast<ConstantInt>(BO->getOperand(1));
- break;
- case BinaryOperator::Shl:
- if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1)))
- if (CI->getValue().getActiveBits() <= 5)
- return 1u << CI->getZExtValue();
- break;
- default:
- break;
- }
- }
- }
- // Guard against huge trip counts.
- if (Result && Result->getValue().getActiveBits() <= 32) {
- return (unsigned)Result->getZExtValue();
- } else {
- return 1;
- }
-}
-
/// isLCSSAForm - Return true if the Loop is in LCSSA form
bool Loop::isLCSSAForm(DominatorTree &DT) const {
// Sort the blocks vector so that we can use binary search to do quick
@@ -297,6 +205,17 @@ bool Loop::isLoopSimplifyForm() const {
return getLoopPreheader() && getLoopLatch() && hasDedicatedExits();
}
+/// isSafeToClone - Return true if the loop body is safe to clone in practice.
+/// Routines that reform the loop CFG and split edges often fail on indirectbr.
+bool Loop::isSafeToClone() const {
+ // Return false if any loop blocks contain indirectbrs.
+ for (Loop::block_iterator I = block_begin(), E = block_end(); I != E; ++I) {
+ if (isa<IndirectBrInst>((*I)->getTerminator()))
+ return false;
+ }
+ return true;
+}
+
/// hasDedicatedExits - Return true if no exit block for the loop
/// has a predecessor that is outside the loop.
bool Loop::hasDedicatedExits() const {
@@ -477,21 +396,19 @@ void UnloopUpdater::updateBlockParents() {
/// removeBlocksFromAncestors - Remove unloop's blocks from all ancestors below
/// their new parents.
void UnloopUpdater::removeBlocksFromAncestors() {
- // Remove unloop's blocks from all ancestors below their new parents.
+ // Remove all unloop's blocks (including those in nested subloops) from
+ // ancestors below the new parent loop.
for (Loop::block_iterator BI = Unloop->block_begin(),
BE = Unloop->block_end(); BI != BE; ++BI) {
- Loop *NewParent = LI->getLoopFor(*BI);
- // If this block is an immediate subloop, remove all blocks (including
- // nested subloops) from ancestors below the new parent loop.
- // Otherwise, if this block is in a nested subloop, skip it.
- if (SubloopParents.count(NewParent))
- NewParent = SubloopParents[NewParent];
- else if (Unloop->contains(NewParent))
- continue;
-
+ Loop *OuterParent = LI->getLoopFor(*BI);
+ if (Unloop->contains(OuterParent)) {
+ while (OuterParent->getParentLoop() != Unloop)
+ OuterParent = OuterParent->getParentLoop();
+ OuterParent = SubloopParents[OuterParent];
+ }
// Remove blocks from former Ancestors except Unloop itself which will be
// deleted.
- for (Loop *OldParent = Unloop->getParentLoop(); OldParent != NewParent;
+ for (Loop *OldParent = Unloop->getParentLoop(); OldParent != OuterParent;
OldParent = OldParent->getParentLoop()) {
assert(OldParent && "new loop is not an ancestor of the original");
OldParent->removeBlockFromLoop(*BI);
diff --git a/contrib/llvm/lib/Analysis/LoopPass.cpp b/contrib/llvm/lib/Analysis/LoopPass.cpp
index 5ba1f40..aba700a 100644
--- a/contrib/llvm/lib/Analysis/LoopPass.cpp
+++ b/contrib/llvm/lib/Analysis/LoopPass.cpp
@@ -14,10 +14,8 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/DebugInfoProbe.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Timer.h"
using namespace llvm;
@@ -54,20 +52,6 @@ char PrintLoopPass::ID = 0;
}
//===----------------------------------------------------------------------===//
-// DebugInfoProbe
-
-static DebugInfoProbeInfo *TheDebugProbe;
-static void createDebugInfoProbe() {
- if (TheDebugProbe) return;
-
- // Constructed the first time this is called. This guarantees that the
- // object will be constructed, if -enable-debug-info-probe is set,
- // before static globals, thus it will be destroyed before them.
- static ManagedStatic<DebugInfoProbeInfo> DIP;
- TheDebugProbe = &*DIP;
-}
-
-//===----------------------------------------------------------------------===//
// LPPassManager
//
@@ -195,7 +179,6 @@ void LPPassManager::getAnalysisUsage(AnalysisUsage &Info) const {
bool LPPassManager::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfo>();
bool Changed = false;
- createDebugInfoProbe();
// Collect inherited analysis from Module level pass manager.
populateInheritedAnalysis(TPM->activeStack);
@@ -227,21 +210,19 @@ bool LPPassManager::runOnFunction(Function &F) {
// Run all passes on the current Loop.
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
LoopPass *P = getContainedPass(Index);
+
dumpPassInfo(P, EXECUTION_MSG, ON_LOOP_MSG,
CurrentLoop->getHeader()->getName());
dumpRequiredSet(P);
initializeAnalysisImpl(P);
- if (TheDebugProbe)
- TheDebugProbe->initialize(P, F);
+
{
PassManagerPrettyStackEntry X(P, *CurrentLoop->getHeader());
TimeRegion PassTimer(getPassTimer(P));
Changed |= P->runOnLoop(CurrentLoop, *this);
}
- if (TheDebugProbe)
- TheDebugProbe->finalize(P, F);
if (Changed)
dumpPassInfo(P, MODIFICATION_MSG, ON_LOOP_MSG,
diff --git a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
index fde07ea..22414b3 100644
--- a/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/MemDepPrinter.cpp
@@ -130,7 +130,7 @@ bool MemDepPrinter::runOnFunction(Function &F) {
AliasAnalysis::Location Loc = AA.getLocation(LI);
MDA.getNonLocalPointerDependency(Loc, true, LI->getParent(), NLDI);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- if (!LI->isUnordered()) {
+ if (!SI->isUnordered()) {
// FIXME: Handle atomic/volatile stores.
Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
static_cast<BasicBlock *>(0)));
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index 8d451c4..b145650 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -48,10 +48,10 @@ static bool isMallocCall(const CallInst *CI) {
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
FunctionType *FTy = Callee->getFunctionType();
- if (FTy->getNumParams() != 1)
- return false;
- return FTy->getParamType(0)->isIntegerTy(32) ||
- FTy->getParamType(0)->isIntegerTy(64);
+ return FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
+ FTy->getNumParams() == 1 &&
+ (FTy->getParamType(0)->isIntegerTy(32) ||
+ FTy->getParamType(0)->isIntegerTy(64));
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 92967c0..3a544f3 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -22,6 +22,7 @@
#include "llvm/Function.h"
#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
@@ -91,6 +92,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
TD = getAnalysisIfAvailable<TargetData>();
+ DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
return false;
@@ -321,14 +323,100 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
!TD.fitsInLegalInteger(NewLoadByteSize*8))
return 0;
+ if (LIOffs+NewLoadByteSize > MemLocEnd &&
+ LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
+ // We will be reading past the location accessed by the original program.
+ // While this is safe in a regular build, Address Safety analysis tools
+ // may start reporting false warnings. So, don't do widening.
+ return 0;
+ }
+
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
return NewLoadByteSize;
NewLoadByteSize <<= 1;
}
-
- return 0;
+}
+
+namespace {
+ /// Only find pointer captures which happen before the given instruction. Uses
+ /// the dominator tree to determine whether one instruction is before another.
+ struct CapturesBefore : public CaptureTracker {
+ CapturesBefore(const Instruction *I, DominatorTree *DT)
+ : BeforeHere(I), DT(DT), Captured(false) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
+ return false;
+ return true;
+ }
+
+ bool captured(Use *U) {
+ Instruction *I = cast<Instruction>(U->getUser());
+ BasicBlock *BB = I->getParent();
+ if (BeforeHere != I &&
+ (!DT->isReachableFromEntry(BB) || DT->dominates(BeforeHere, I)))
+ return false;
+ Captured = true;
+ return true;
+ }
+
+ const Instruction *BeforeHere;
+ DominatorTree *DT;
+
+ bool Captured;
+ };
+}
+
+AliasAnalysis::ModRefResult
+MemoryDependenceAnalysis::getModRefInfo(const Instruction *Inst,
+ const AliasAnalysis::Location &MemLoc) {
+ AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
+ if (MR != AliasAnalysis::ModRef) return MR;
+
+ // FIXME: this is really just shoring-up a deficiency in alias analysis.
+ // BasicAA isn't willing to spend linear time determining whether an alloca
+ // was captured before or after this particular call, while we are. However,
+ // with a smarter AA in place, this test is just wasting compile time.
+ if (!DT) return AliasAnalysis::ModRef;
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+ if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object))
+ return AliasAnalysis::ModRef;
+ ImmutableCallSite CS(Inst);
+ if (!CS.getInstruction()) return AliasAnalysis::ModRef;
+
+ CapturesBefore CB(Inst, DT);
+ llvm::PointerMayBeCaptured(Object, &CB);
+
+ if (isa<Constant>(Object) || CS.getInstruction() == Object || CB.Captured)
+ return AliasAnalysis::ModRef;
+
+ unsigned ArgNo = 0;
+ for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ CI != CE; ++CI, ++ArgNo) {
+ // Only look at the no-capture or byval pointer arguments. If this
+ // pointer were passed to arguments that were neither of these, then it
+ // couldn't be no-capture.
+ if (!(*CI)->getType()->isPointerTy() ||
+ (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
+ continue;
+
+ // If this is a no-capture pointer argument, see if we can tell that it
+ // is impossible to alias the pointer we're checking. If not, we have to
+ // assume that the call could touch the pointer, even though it doesn't
+ // escape.
+ if (!AA->isNoAlias(AliasAnalysis::Location(*CI),
+ AliasAnalysis::Location(Object))) {
+ return AliasAnalysis::ModRef;
+ }
+ }
+ return AliasAnalysis::NoModRef;
}
/// getPointerDependencyFrom - Return the instruction on which a memory
@@ -478,7 +566,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
- switch (AA->getModRefInfo(Inst, MemLoc)) {
+ switch (getModRefInfo(Inst, MemLoc)) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
diff --git a/contrib/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
index 7e22ddc..38cb1c9 100644
--- a/contrib/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm/lib/Analysis/PHITransAddr.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PHITransAddr.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/Analysis/Dominators.h"
@@ -27,7 +28,7 @@ static bool CanPHITrans(Instruction *Inst) {
return true;
if (isa<CastInst>(Inst) &&
- Inst->isSafeToSpeculativelyExecute())
+ isSafeToSpeculativelyExecute(Inst))
return true;
if (Inst->getOpcode() == Instruction::Add &&
@@ -73,7 +74,6 @@ static bool VerifySubExpr(Value *Expr,
errs() << *I << '\n';
llvm_unreachable("Either something is missing from InstInputs or "
"CanPHITrans is wrong.");
- return false;
}
// Validate the operands of the instruction.
@@ -100,7 +100,6 @@ bool PHITransAddr::Verify() const {
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
errs() << " InstInput #" << i << " is " << *InstInputs[i] << "\n";
llvm_unreachable("This is unexpected.");
- return false;
}
// a-ok.
@@ -186,7 +185,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// operands need to be phi translated, and if so, reconstruct it.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return 0;
Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
if (PHIIn == 0) return 0;
if (PHIIn == Cast->getOperand(0))
@@ -228,7 +227,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEP;
// Simplify the GEP to handle 'gep x, 0' -> x etc.
- if (Value *V = SimplifyGEPInst(GEPOps, TD, DT)) {
+ if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
@@ -284,7 +283,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, DT)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
@@ -381,7 +380,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
// Handle cast of PHI translatable value.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!Cast->isSafeToSpeculativelyExecute()) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return 0;
Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
CurBB, PredBB, DT, NewInsts);
if (OpVal == 0) return 0;
diff --git a/contrib/llvm/lib/Analysis/PathNumbering.cpp b/contrib/llvm/lib/Analysis/PathNumbering.cpp
index 0e3b6e6..80c5222 100644
--- a/contrib/llvm/lib/Analysis/PathNumbering.cpp
+++ b/contrib/llvm/lib/Analysis/PathNumbering.cpp
@@ -386,8 +386,8 @@ void BallLarusDag::buildNode(BLBlockNodeMap& inDag, BLNodeStack& dfsStack) {
}
TerminatorInst* terminator = currentNode->getBlock()->getTerminator();
- if(isa<ReturnInst>(terminator) || isa<UnreachableInst>(terminator)
- || isa<ResumeInst>(terminator) || isa<UnwindInst>(terminator))
+ if(isa<ReturnInst>(terminator) || isa<UnreachableInst>(terminator) ||
+ isa<ResumeInst>(terminator))
addEdge(currentNode, getExit(),0);
currentNode->setColor(BallLarusNode::GRAY);
diff --git a/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp b/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp
index 0ae734e..0fcdfe7 100644
--- a/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp
+++ b/contrib/llvm/lib/Analysis/PathProfileVerifier.cpp
@@ -137,22 +137,22 @@ bool PathProfileVerifier::runOnModule (Module &M) {
BasicBlock* source = nextEdge->getSource();
BasicBlock* target = nextEdge->getTarget();
unsigned duplicateNumber = nextEdge->getDuplicateNumber();
- DEBUG(dbgs () << source->getNameStr() << " --{" << duplicateNumber
- << "}--> " << target->getNameStr());
+ DEBUG(dbgs() << source->getName() << " --{" << duplicateNumber
+ << "}--> " << target->getName());
// Ensure all the referenced edges exist
// TODO: make this a separate function
if( !arrayMap.count(source) ) {
- errs() << " error [" << F->getNameStr() << "()]: source '"
- << source->getNameStr()
+ errs() << " error [" << F->getName() << "()]: source '"
+ << source->getName()
<< "' does not exist in the array map.\n";
} else if( !arrayMap[source].count(target) ) {
- errs() << " error [" << F->getNameStr() << "()]: target '"
- << target->getNameStr()
+ errs() << " error [" << F->getName() << "()]: target '"
+ << target->getName()
<< "' does not exist in the array map.\n";
} else if( !arrayMap[source][target].count(duplicateNumber) ) {
- errs() << " error [" << F->getNameStr() << "()]: edge "
- << source->getNameStr() << " -> " << target->getNameStr()
+ errs() << " error [" << F->getName() << "()]: edge "
+ << source->getName() << " -> " << target->getName()
<< " duplicate number " << duplicateNumber
<< " does not exist in the array map.\n";
} else {
diff --git a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index b594e2b..63468f8 100644
--- a/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -332,7 +332,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) {
// Clear Minimal Edges.
MinimalWeight.clear();
- DEBUG(dbgs() << "Working on function " << F.getNameStr() << "\n");
+ DEBUG(dbgs() << "Working on function " << F.getName() << "\n");
// Since the entry block is the first one and has no predecessors, the edge
// (0,entry) is inserted with the starting weight of 1.
diff --git a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
index 098079b..c4da807 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -160,7 +160,7 @@ bool LoaderPass::runOnModule(Module &M) {
ReadCount = 0;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
- DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n");
+ DEBUG(dbgs() << "Working on " << F->getName() << "\n");
readEdge(getEdge(0,&F->getEntryBlock()), Counters);
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
TerminatorInst *TI = BB->getTerminator();
@@ -181,7 +181,7 @@ bool LoaderPass::runOnModule(Module &M) {
ReadCount = 0;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
- DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n");
+ DEBUG(dbgs() << "Working on " << F->getName() << "\n");
readEdge(getEdge(0,&F->getEntryBlock()), Counters);
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
TerminatorInst *TI = BB->getTerminator();
diff --git a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
index a017518..0cb1588 100644
--- a/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileVerifierPass.cpp
@@ -30,7 +30,7 @@ static cl::opt<bool,false>
ProfileVerifierDisableAssertions("profile-verifier-noassert",
cl::desc("Disable assertions"));
-namespace llvm {
+namespace {
template<class FType, class BType>
class ProfileVerifierPassT : public FunctionPass {
@@ -125,8 +125,8 @@ namespace llvm {
outCount++;
}
}
- dbgs() << "Block " << BB->getNameStr() << " in "
- << BB->getParent()->getNameStr() << ":"
+ dbgs() << "Block " << BB->getName() << " in "
+ << BB->getParent()->getName() << ":"
<< "BBWeight=" << format("%20.20g",BBWeight) << ","
<< "inWeight=" << format("%20.20g",inWeight) << ","
<< "inCount=" << inCount << ","
@@ -143,8 +143,8 @@ namespace llvm {
template<class FType, class BType>
void ProfileVerifierPassT<FType, BType>::debugEntry (DetailedBlockInfo *DI) {
- dbgs() << "TROUBLE: Block " << DI->BB->getNameStr() << " in "
- << DI->BB->getParent()->getNameStr() << ":"
+ dbgs() << "TROUBLE: Block " << DI->BB->getName() << " in "
+ << DI->BB->getParent()->getName() << ":"
<< "BBWeight=" << format("%20.20g",DI->BBWeight) << ","
<< "inWeight=" << format("%20.20g",DI->inWeight) << ","
<< "inCount=" << DI->inCount << ","
@@ -201,13 +201,13 @@ namespace llvm {
double EdgeWeight = PI->getEdgeWeight(E);
if (EdgeWeight == ProfileInfoT<FType, BType>::MissingValue) {
dbgs() << "Edge " << E << " in Function "
- << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": ";
+ << ProfileInfoT<FType, BType>::getFunction(E)->getName() << ": ";
ASSERTMESSAGE("Edge has missing value");
return 0;
} else {
if (EdgeWeight < 0) {
dbgs() << "Edge " << E << " in Function "
- << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": ";
+ << ProfileInfoT<FType, BType>::getFunction(E)->getName() << ": ";
ASSERTMESSAGE("Edge has negative value");
}
return EdgeWeight;
@@ -220,8 +220,8 @@ namespace llvm {
DetailedBlockInfo *DI) {
if (Error) {
DEBUG(debugEntry(DI));
- dbgs() << "Block " << DI->BB->getNameStr() << " in Function "
- << DI->BB->getParent()->getNameStr() << ": ";
+ dbgs() << "Block " << DI->BB->getName() << " in Function "
+ << DI->BB->getParent()->getName() << ": ";
ASSERTMESSAGE(Message);
}
return;
diff --git a/contrib/llvm/lib/Analysis/RegionInfo.cpp b/contrib/llvm/lib/Analysis/RegionInfo.cpp
index 52753cb..b507b1e 100644
--- a/contrib/llvm/lib/Analysis/RegionInfo.cpp
+++ b/contrib/llvm/lib/Analysis/RegionInfo.cpp
@@ -186,18 +186,16 @@ std::string Region::getNameStr() const {
raw_string_ostream OS(entryName);
WriteAsOperand(OS, getEntry(), false);
- entryName = OS.str();
} else
- entryName = getEntry()->getNameStr();
+ entryName = getEntry()->getName();
if (getExit()) {
if (getExit()->getName().empty()) {
raw_string_ostream OS(exitName);
WriteAsOperand(OS, getExit(), false);
- exitName = OS.str();
} else
- exitName = getExit()->getNameStr();
+ exitName = getExit()->getName();
} else
exitName = "<Function Return>";
@@ -652,7 +650,7 @@ void RegionInfo::buildRegionsTree(DomTreeNode *N, Region *region) {
// This basic block is a start block of a region. It is already in the
// BBtoRegion relation. Only the child basic blocks have to be updated.
if (it != BBtoRegion.end()) {
- Region *newRegion = it->second;;
+ Region *newRegion = it->second;
region->addSubRegion(getTopMostParent(newRegion));
region = newRegion;
} else {
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index e0ac56c..1d55642 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -74,6 +74,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
@@ -108,6 +109,7 @@ INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
char ScalarEvolution::ID = 0;
@@ -188,6 +190,14 @@ void SCEV::print(raw_ostream &OS) const {
OS << OpStr;
}
OS << ")";
+ switch (NAry->getSCEVType()) {
+ case scAddExpr:
+ case scMulExpr:
+ if (NAry->getNoWrapFlags(FlagNUW))
+ OS << "<nuw>";
+ if (NAry->getNoWrapFlags(FlagNSW))
+ OS << "<nsw>";
+ }
return;
}
case scUDivExpr: {
@@ -249,11 +259,9 @@ Type *SCEV::getType() const {
return cast<SCEVUnknown>(this)->getType();
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return 0;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return 0;
}
bool SCEV::isZero() const {
@@ -274,6 +282,20 @@ bool SCEV::isAllOnesValue() const {
return false;
}
+/// isNonConstantNegative - Return true if the specified scev is negated, but
+/// not a constant.
+bool SCEV::isNonConstantNegative() const {
+ const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
+ if (!Mul) return false;
+
+ // If there is a constant factor, it will be first.
+ const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
+ if (!SC) return false;
+
+ // Return true if the value is negative, this matches things like (-42 * V).
+ return SC->getValue()->getValue().isNegative();
+}
+
SCEVCouldNotCompute::SCEVCouldNotCompute() :
SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
@@ -587,11 +609,8 @@ namespace {
}
default:
- break;
+ llvm_unreachable("Unknown SCEV kind!");
}
-
- llvm_unreachable("Unknown SCEV kind!");
- return 0;
}
};
}
@@ -2581,7 +2600,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2590,7 +2609,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2607,7 +2626,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -2617,7 +2636,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
@@ -3108,7 +3127,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, TD, DT))
+ if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -3168,7 +3187,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// Add the total offset from all the GEP indices to the base.
return getAddExpr(BaseS, TotalOffset,
- isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
+ isInBounds ? SCEV::FlagNUW : SCEV::FlagAnyWrap);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
@@ -3242,9 +3261,8 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
+ ComputeMaskedBits(U->getValue(), Zeros, Ones);
return Zeros.countTrailingOnes();
}
@@ -3382,9 +3400,8 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
- ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
+ ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
if (Ones == ~Zeros + 1)
return setUnsignedRange(U, ConservativeResult);
return setUnsignedRange(U,
@@ -3584,6 +3601,12 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// because it leads to N-1 getAddExpr calls for N ultimate operands.
// Instead, gather up all the operands and make a single getAddExpr call.
// LLVM IR canonical form means we need only traverse the left operands.
+ //
+ // Don't apply this instruction's NSW or NUW flags to the new
+ // expression. The instruction may be guarded by control flow that the
+ // no-wrap behavior depends on. Non-control-equivalent instructions can be
+ // mapped to the same SCEV expression, and it would be incorrect to transfer
+ // NSW/NUW semantics to those operations.
SmallVector<const SCEV *, 4> AddOps;
AddOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
@@ -3598,16 +3621,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
AddOps.push_back(Op1);
}
AddOps.push_back(getSCEV(U->getOperand(0)));
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
- OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(V);
- if (OBO->hasNoSignedWrap())
- setFlags(Flags, SCEV::FlagNSW);
- if (OBO->hasNoUnsignedWrap())
- setFlags(Flags, SCEV::FlagNUW);
- return getAddExpr(AddOps, Flags);
+ return getAddExpr(AddOps);
}
case Instruction::Mul: {
- // See the Add code above.
+ // Don't transfer NSW/NUW for the same reason as AddExpr.
SmallVector<const SCEV *, 4> MulOps;
MulOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0);
@@ -3641,9 +3658,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// knew about to reconstruct a low-bits mask value.
unsigned LZ = A.countLeadingZeros();
unsigned BitWidth = A.getBitWidth();
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
+ ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
@@ -3915,13 +3931,19 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
//
/// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
-/// normal unsigned value, if possible. Returns 0 if the trip count is unknown
-/// or not constant. Will also return 0 if the maximum trip count is very large
-/// (>= 2^32)
-unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
- BasicBlock *ExitBlock) {
+/// normal unsigned value. Returns 0 if the trip count is unknown or not
+/// constant. Will also return 0 if the maximum trip count is very large (>=
+/// 2^32).
+///
+/// This "trip count" assumes that control exits via ExitingBlock. More
+/// precisely, it is the number of times that control may reach ExitingBlock
+/// before taking the branch. For loops with multiple exits, it may not be the
+/// number times that the loop header executes because the loop may exit
+/// prematurely via another branch.
+unsigned ScalarEvolution::
+getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock) {
const SCEVConstant *ExitCount =
- dyn_cast<SCEVConstant>(getExitCount(L, ExitBlock));
+ dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
if (!ExitCount)
return 0;
@@ -3944,9 +3966,12 @@ unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
/// multiple of a constant (which is also the case if the trip count is simply
/// constant, use getSmallConstantTripCount for that case), Will also return 1
/// if the trip count is very large (>= 2^32).
-unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
- BasicBlock *ExitBlock) {
- const SCEV *ExitCount = getExitCount(L, ExitBlock);
+///
+/// As explained in the comments for getSmallConstantTripCount, this assumes
+/// that control exits the loop via ExitingBlock.
+unsigned ScalarEvolution::
+getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) {
+ const SCEV *ExitCount = getExitCount(L, ExitingBlock);
if (ExitCount == getCouldNotCompute())
return 1;
@@ -4153,13 +4178,19 @@ void ScalarEvolution::forgetValue(Value *V) {
}
/// getExact - Get the exact loop backedge taken count considering all loop
-/// exits. If all exits are computable, this is the minimum computed count.
+/// exits. A computable result can only be return for loops with a single exit.
+/// Returning the minimum taken count among all exits is incorrect because one
+/// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
+/// the limit of each loop test is never skipped. This is a valid assumption as
+/// long as the loop exits via that test. For precise results, it is the
+/// caller's responsibility to specify the relevant loop exit using
+/// getExact(ExitingBlock, SE).
const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
// If any exits were not computable, the loop is not computable.
if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
- // We need at least one computable exit.
+ // We need exactly one computable exit.
if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
@@ -4171,8 +4202,8 @@ ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
if (!BECount)
BECount = ENT->ExactNotTaken;
- else
- BECount = SE->getUMinFromMismatchedTypes(BECount, ENT->ExactNotTaken);
+ else if (BECount != ENT->ExactNotTaken)
+ return SE->getCouldNotCompute();
}
assert(BECount && "Invalid not taken count for loop exit");
return BECount;
@@ -4253,8 +4284,15 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
if (MaxBECount == getCouldNotCompute())
MaxBECount = EL.Max;
- else if (EL.Max != getCouldNotCompute())
- MaxBECount = getUMinFromMismatchedTypes(MaxBECount, EL.Max);
+ else if (EL.Max != getCouldNotCompute()) {
+ // We cannot take the "min" MaxBECount, because non-unit stride loops may
+ // skip some loop tests. Taking the max over the exits is sufficiently
+ // conservative. TODO: We could do better taking into consideration
+ // that (1) the loop has unit stride (2) the last loop test is
+ // less-than/greater-than (3) any loop test is less-than/greater-than AND
+ // falls-through some constant times less then the other tests.
+ MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
+ }
}
return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
@@ -4539,40 +4577,6 @@ EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
return cast<SCEVConstant>(Val)->getValue();
}
-/// GetAddressedElementFromGlobal - Given a global variable with an initializer
-/// and a GEP expression (missing the pointer index) indexing into it, return
-/// the addressed element of the initializer or null if the index expression is
-/// invalid.
-static Constant *
-GetAddressedElementFromGlobal(GlobalVariable *GV,
- const std::vector<ConstantInt*> &Indices) {
- Constant *Init = GV->getInitializer();
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- uint64_t Idx = Indices[i]->getZExtValue();
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
- assert(Idx < CS->getNumOperands() && "Bad struct index!");
- Init = cast<Constant>(CS->getOperand(Idx));
- } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
- if (Idx >= CA->getNumOperands()) return 0; // Bogus program
- Init = cast<Constant>(CA->getOperand(Idx));
- } else if (isa<ConstantAggregateZero>(Init)) {
- if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
- assert(Idx < STy->getNumElements() && "Bad struct index!");
- Init = Constant::getNullValue(STy->getElementType(Idx));
- } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
- if (Idx >= ATy->getNumElements()) return 0; // Bogus program
- Init = Constant::getNullValue(ATy->getElementType());
- } else {
- llvm_unreachable("Unknown constant aggregate type!");
- }
- return 0;
- } else {
- return 0; // Unknown initializer type
- }
- }
- return Init;
-}
-
/// ComputeLoadConstantCompareExitLimit - Given an exit condition of
/// 'icmp op load X, cst', try to see if we can compute the backedge
/// execution count.
@@ -4600,7 +4604,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
// Okay, we allow one non-constant index into the GEP instruction.
Value *VarIdx = 0;
- std::vector<ConstantInt*> Indexes;
+ std::vector<Constant*> Indexes;
unsigned VarIdxNum = 0;
for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
@@ -4612,6 +4616,10 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
Indexes.push_back(0);
}
+ // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
+ if (!VarIdx)
+ return getCouldNotCompute();
+
// Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
// Check to see if X is a loop variant variable value now.
const SCEV *Idx = getSCEV(VarIdx);
@@ -4634,7 +4642,8 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
// Form the GEP offset.
Indexes[VarIdxNum] = Val;
- Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
+ Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
+ Indexes);
if (Result == 0) break; // Cannot compute!
// Evaluate the condition for this iteration.
@@ -4658,7 +4667,8 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
/// specified type, assuming that all operands were constants.
static bool CanConstantFold(const Instruction *I) {
if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
- isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
+ isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
+ isa<LoadInst>(I))
return true;
if (const CallInst *CI = dyn_cast<CallInst>(I))
@@ -4748,16 +4758,23 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (!I) return 0;
- Instruction *I = cast<Instruction>(V);
if (Constant *C = Vals.lookup(I)) return C;
- assert(!isa<PHINode>(I) && "loop header phis should be mapped to constant");
- assert(canConstantEvolve(I, L) && "cannot evaluate expression in this loop");
- (void)L;
+ // An instruction inside the loop depends on a value outside the loop that we
+ // weren't given a mapping for, or a value such as a call inside the loop.
+ if (!canConstantEvolve(I, L)) return 0;
+
+ // An unmapped PHI can be due to a branch or another loop inside this loop,
+ // or due to this not being the initial iteration through a loop where we
+ // couldn't compute the evolution of this particular PHI last time.
+ if (isa<PHINode>(I)) return 0;
std::vector<Constant*> Operands(I->getNumOperands());
@@ -4768,16 +4785,21 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
if (!Operands[i]) return 0;
continue;
}
- Constant *C = EvaluateExpression(Operand, L, Vals, TD);
+ Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
Vals[Operand] = C;
if (!C) return 0;
Operands[i] = C;
}
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ if (CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
- Operands[1], TD);
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD);
+ Operands[1], TD, TLI);
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isVolatile())
+ return ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ }
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
+ TLI);
}
/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
@@ -4798,23 +4820,26 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
- // FIXME: Nick's fix for PR11034 will seed constants for multiple header phis.
DenseMap<Instruction *, Constant *> CurrentIterVals;
+ BasicBlock *Header = L->getHeader();
+ assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
// Since the loop is canonicalized, the PHI node must have two entries. One
// entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- Constant *StartCST =
- dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0)
- return RetVal = 0; // Must be a constant.
- CurrentIterVals[PN] = StartCST;
+ PHINode *PHI = 0;
+ for (BasicBlock::iterator I = Header->begin();
+ (PHI = dyn_cast<PHINode>(I)); ++I) {
+ Constant *StartCST =
+ dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
+ if (StartCST == 0) continue;
+ CurrentIterVals[PHI] = StartCST;
+ }
+ if (!CurrentIterVals.count(PN))
+ return RetVal = 0;
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- if (getConstantEvolvingPHI(BEValue, L) != PN &&
- !isa<Constant>(BEValue))
- return RetVal = 0; // Not derived from same PHI.
// Execute the loop symbolically to determine the exit value.
if (BEs.getActiveBits() >= 32)
@@ -4826,15 +4851,46 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
if (IterationNum == NumIterations)
return RetVal = CurrentIterVals[PN]; // Got exit value!
- // Compute the value of the PHI node for the next iteration.
+ // Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
- Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD);
- if (NextPHI == CurrentIterVals[PN])
- return RetVal = NextPHI; // Stopped evolving!
+ DenseMap<Instruction *, Constant *> NextIterVals;
+ Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
+ TLI);
if (NextPHI == 0)
return 0; // Couldn't evaluate!
- DenseMap<Instruction *, Constant *> NextIterVals;
NextIterVals[PN] = NextPHI;
+
+ bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
+
+ // Also evaluate the other PHI nodes. However, we don't get to stop if we
+ // cease to be able to evaluate one of them or if they stop evolving,
+ // because that doesn't necessarily prevent us from computing PN.
+ SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
+ for (DenseMap<Instruction *, Constant *>::const_iterator
+ I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
+ PHINode *PHI = dyn_cast<PHINode>(I->first);
+ if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
+ PHIsToCompute.push_back(std::make_pair(PHI, I->second));
+ }
+ // We use two distinct loops because EvaluateExpression may invalidate any
+ // iterators into CurrentIterVals.
+ for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
+ I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
+ PHINode *PHI = I->first;
+ Constant *&NextPHI = NextIterVals[PHI];
+ if (!NextPHI) { // Not already computed.
+ Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ }
+ if (NextPHI != I->second)
+ StoppedEvolving = false;
+ }
+
+ // If all entries in CurrentIterVals == NextIterVals then we can stop
+ // iterating, the loop can't continue to change.
+ if (StoppedEvolving)
+ return RetVal = CurrentIterVals[PN];
+
CurrentIterVals.swap(NextIterVals);
}
}
@@ -4844,9 +4900,9 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
/// try to evaluate a few iterations of the loop until we get the exit
/// condition gets a value of ExitWhen (true or false). If we cannot
/// evaluate the trip count of the loop, return getCouldNotCompute().
-const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
- Value *Cond,
- bool ExitWhen) {
+const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
+ Value *Cond,
+ bool ExitWhen) {
PHINode *PN = getConstantEvolvingPHI(Cond, L);
if (PN == 0) return getCouldNotCompute();
@@ -4854,29 +4910,33 @@ const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// That's the only form we support here.
if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
+ DenseMap<Instruction *, Constant *> CurrentIterVals;
+ BasicBlock *Header = L->getHeader();
+ assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
+
// One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- Constant *StartCST =
- dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
-
- Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- if (getConstantEvolvingPHI(BEValue, L) != PN &&
- !isa<Constant>(BEValue))
- return getCouldNotCompute(); // Not derived from same PHI.
+ PHINode *PHI = 0;
+ for (BasicBlock::iterator I = Header->begin();
+ (PHI = dyn_cast<PHINode>(I)); ++I) {
+ Constant *StartCST =
+ dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
+ if (StartCST == 0) continue;
+ CurrentIterVals[PHI] = StartCST;
+ }
+ if (!CurrentIterVals.count(PN))
+ return getCouldNotCompute();
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
// "ExitWhen".
- unsigned IterationNum = 0;
+
unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
- for (Constant *PHIVal = StartCST;
- IterationNum != MaxIterations; ++IterationNum) {
- DenseMap<Instruction *, Constant *> PHIValMap;
- PHIValMap[PN] = PHIVal;
+ for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal =
- dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, PHIValMap, TD));
+ dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
+ TD, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@@ -4886,11 +4946,29 @@ const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
return getConstant(Type::getInt32Ty(getContext()), IterationNum);
}
- // Compute the value of the PHI node for the next iteration.
- Constant *NextPHI = EvaluateExpression(BEValue, L, PHIValMap, TD);
- if (NextPHI == 0 || NextPHI == PHIVal)
- return getCouldNotCompute();// Couldn't evaluate or not making progress...
- PHIVal = NextPHI;
+ // Update all the PHI nodes for the next iteration.
+ DenseMap<Instruction *, Constant *> NextIterVals;
+
+ // Create a list of which PHIs we need to compute. We want to do this before
+ // calling EvaluateExpression on them because that may invalidate iterators
+ // into CurrentIterVals.
+ SmallVector<PHINode *, 8> PHIsToCompute;
+ for (DenseMap<Instruction *, Constant *>::const_iterator
+ I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
+ PHINode *PHI = dyn_cast<PHINode>(I->first);
+ if (!PHI || PHI->getParent() != Header) continue;
+ PHIsToCompute.push_back(PHI);
+ }
+ for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
+ E = PHIsToCompute.end(); I != E; ++I) {
+ PHINode *PHI = *I;
+ Constant *&NextPHI = NextIterVals[PHI];
+ if (NextPHI) continue; // Already computed!
+
+ Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
+ NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+ }
+ CurrentIterVals.swap(NextIterVals);
}
// Too many iterations were needed to evaluate.
@@ -4921,6 +4999,98 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
return C;
}
+/// This builds up a Constant using the ConstantExpr interface. That way, we
+/// will return Constants for objects which aren't represented by a
+/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
+/// Returns NULL if the SCEV isn't representable as a Constant.
+static Constant *BuildConstantFromSCEV(const SCEV *V) {
+ switch (V->getSCEVType()) {
+ default: // TODO: smax, umax.
+ case scCouldNotCompute:
+ case scAddRecExpr:
+ break;
+ case scConstant:
+ return cast<SCEVConstant>(V)->getValue();
+ case scUnknown:
+ return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
+ case scSignExtend: {
+ const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
+ return ConstantExpr::getSExt(CastOp, SS->getType());
+ break;
+ }
+ case scZeroExtend: {
+ const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
+ return ConstantExpr::getZExt(CastOp, SZ->getType());
+ break;
+ }
+ case scTruncate: {
+ const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
+ if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
+ return ConstantExpr::getTrunc(CastOp, ST->getType());
+ break;
+ }
+ case scAddExpr: {
+ const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
+ if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
+ if (C->getType()->isPointerTy())
+ C = ConstantExpr::getBitCast(C, Type::getInt8PtrTy(C->getContext()));
+ for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
+ Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
+ if (!C2) return 0;
+
+ // First pointer!
+ if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
+ std::swap(C, C2);
+ // The offsets have been converted to bytes. We can add bytes to an
+ // i8* by GEP with the byte count in the first index.
+ C = ConstantExpr::getBitCast(C,Type::getInt8PtrTy(C->getContext()));
+ }
+
+ // Don't bother trying to sum two pointers. We probably can't
+ // statically compute a load that results from it anyway.
+ if (C2->getType()->isPointerTy())
+ return 0;
+
+ if (C->getType()->isPointerTy()) {
+ if (cast<PointerType>(C->getType())->getElementType()->isStructTy())
+ C2 = ConstantExpr::getIntegerCast(
+ C2, Type::getInt32Ty(C->getContext()), true);
+ C = ConstantExpr::getGetElementPtr(C, C2);
+ } else
+ C = ConstantExpr::getAdd(C, C2);
+ }
+ return C;
+ }
+ break;
+ }
+ case scMulExpr: {
+ const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
+ if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
+ // Don't bother with pointers at all.
+ if (C->getType()->isPointerTy()) return 0;
+ for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
+ Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
+ if (!C2 || C2->getType()->isPointerTy()) return 0;
+ C = ConstantExpr::getMul(C, C2);
+ }
+ return C;
+ }
+ break;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
+ if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
+ if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
+ if (LHS->getType() == RHS->getType())
+ return ConstantExpr::getUDiv(LHS, RHS);
+ break;
+ }
+ }
+ return 0;
+}
+
const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
if (isa<SCEVConstant>(V)) return V;
@@ -4973,11 +5143,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
const SCEV *OpV = getSCEVAtScope(OrigV, L);
MadeImprovement |= OrigV != OpV;
- Constant *C = 0;
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
- C = SC->getValue();
- if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
- C = dyn_cast<Constant>(SU->getValue());
+ Constant *C = BuildConstantFromSCEV(OpV);
if (!C) return V;
if (C->getType() != Op->getType())
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
@@ -4992,10 +5158,14 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
Constant *C = 0;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD);
- else
+ Operands[0], Operands[1], TD,
+ TLI);
+ else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isVolatile())
+ C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
+ } else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- Operands, TD);
+ Operands, TD, TLI);
if (!C) return V;
return getSCEV(C);
}
@@ -5113,7 +5283,6 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
}
llvm_unreachable("Unknown SCEV type!");
- return 0;
}
/// getSCEVAtScope - This is a convenience function which does
@@ -5350,10 +5519,10 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
// behavior. Loops must exhibit defined behavior until a wrapped value is
// actually used. So the trip count computed by udiv could be smaller than the
// number of well-defined iterations.
- if (AddRec->getNoWrapFlags(SCEV::FlagNW))
+ if (AddRec->getNoWrapFlags(SCEV::FlagNW)) {
// FIXME: We really want an "isexact" bit for udiv.
return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
-
+ }
// Then, try to solve the above equation provided that Start is constant.
if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
@@ -5744,7 +5913,6 @@ ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
switch (Pred) {
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
- break;
case ICmpInst::ICMP_SGT:
Pred = ICmpInst::ICMP_SLT;
std::swap(LHS, RHS);
@@ -6089,8 +6257,9 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
return getCouldNotCompute();
// Check to see if we have a flag which makes analysis easy.
- bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
- AddRec->getNoWrapFlags(SCEV::FlagNUW);
+ bool NoWrap = isSigned ?
+ AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNW)) :
+ AddRec->getNoWrapFlags((SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNW));
if (AddRec->isAffine()) {
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
@@ -6381,6 +6550,7 @@ bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
return false;
}
@@ -6417,6 +6587,7 @@ void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<LoopInfo>();
AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequired<TargetLibraryInfo>();
}
bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
@@ -6592,11 +6763,8 @@ ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
return LoopInvariant;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return LoopVariant;
- default: break;
+ default: llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return LoopVariant;
}
bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
@@ -6678,11 +6846,9 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
return ProperlyDominatesBlock;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return DoesNotDominateBlock;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return DoesNotDominateBlock;
}
bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
@@ -6728,11 +6894,9 @@ bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
return false;
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
- default: break;
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
}
- llvm_unreachable("Unknown SCEV kind!");
- return false;
}
void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 47f0f32..69507be 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -19,6 +19,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
@@ -30,6 +31,19 @@ using namespace llvm;
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
+ // This function must be called with the builder having a valid insertion
+ // point. It doesn't need to be the actual IP where the uses of the returned
+ // cast will be added, but it must dominate such IP.
+ // We use this precondition to produce a cast that will dominate all its
+ // uses. In particular, this is crucial for the case where the builder's
+ // insertion point *is* the point where we were asked to put the cast.
+ // Since we don't know the the builder's insertion point is actually
+ // where the uses will be added (only that it dominates it), we are
+ // not allowed to move it.
+ BasicBlock::iterator BIP = Builder.GetInsertPoint();
+
+ Instruction *Ret = NULL;
+
// Check to see if there is already a cast!
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
@@ -37,27 +51,35 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
if (U->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(U))
if (CI->getOpcode() == Op) {
- // If the cast isn't where we want it, fix it.
- if (BasicBlock::iterator(CI) != IP) {
+ // If the cast isn't where we want it, create a new cast at IP.
+ // Likewise, do not reuse a cast at BIP because it must dominate
+ // instructions that might be inserted before BIP.
+ if (BasicBlock::iterator(CI) != IP || BIP == IP) {
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
- Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
+ Ret = CastInst::Create(Op, V, Ty, "", IP);
+ Ret->takeName(CI);
+ CI->replaceAllUsesWith(Ret);
CI->setOperand(0, UndefValue::get(V->getType()));
- rememberInstruction(NewCI);
- return NewCI;
+ break;
}
- rememberInstruction(CI);
- return CI;
+ Ret = CI;
+ break;
}
}
// Create a new cast.
- Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
- rememberInstruction(I);
- return I;
+ if (!Ret)
+ Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
+
+ // We assert at the end of the function since IP might point to an
+ // instruction with different dominance properties than a cast
+ // (an invoke for example) and not dominate BIP (but the cast does).
+ assert(SE.DT->dominates(Ret, BIP));
+
+ rememberInstruction(Ret);
+ return Ret;
}
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
@@ -73,9 +95,14 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
"InsertNoopCastOfTo cannot change sizes!");
// Short-circuit unnecessary bitcasts.
- if (Op == Instruction::BitCast && V->getType() == Ty)
- return V;
-
+ if (Op == Instruction::BitCast) {
+ if (V->getType() == Ty)
+ return V;
+ if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ if (CI->getOperand(0)->getType() == Ty)
+ return CI->getOperand(0);
+ }
+ }
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
@@ -115,8 +142,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
- while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) ||
- isa<LandingPadInst>(IP))
+ while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
++IP;
return ReuseOrCreateCast(I, Ty, Op, IP);
}
@@ -492,6 +518,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
V = InsertNoopCastOfTo(V,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
+ assert(!isa<Instruction>(V) ||
+ SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
+
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
@@ -588,20 +617,6 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
return expand(SE.getAddExpr(Ops));
}
-/// isNonConstantNegative - Return true if the specified scev is negated, but
-/// not a constant.
-static bool isNonConstantNegative(const SCEV *F) {
- const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F);
- if (!Mul) return false;
-
- // If there is a constant factor, it will be first.
- const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
- if (!SC) return false;
-
- // Return true if the value is negative, this matches things like (-42 * V).
- return SC->getValue()->getValue().isNegative();
-}
-
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
/// SCEV expansion. If they are nested, this is the most nested. If they are
/// neighboring, pick the later.
@@ -655,7 +670,6 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
- return 0;
}
namespace {
@@ -680,10 +694,10 @@ public:
// If one operand is a non-constant negative and the other is not,
// put the non-constant negative on the right so that a sub can
// be used instead of a negate and add.
- if (isNonConstantNegative(LHS.second)) {
- if (!isNonConstantNegative(RHS.second))
+ if (LHS.second->isNonConstantNegative()) {
+ if (!RHS.second->isNonConstantNegative())
return false;
- } else if (isNonConstantNegative(RHS.second))
+ } else if (RHS.second->isNonConstantNegative())
return true;
// Otherwise they are equivalent according to this comparison.
@@ -744,7 +758,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
for (++I; I != E && I->first == CurLoop; ++I)
NewOps.push_back(I->second);
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
- } else if (isNonConstantNegative(Op)) {
+ } else if (Op->isNonConstantNegative()) {
// Instead of doing a negate and add, just do a subtract.
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
@@ -875,58 +889,138 @@ bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
return isNormalAddRecExprPHI(PN, IncV, L);
}
-/// Determine if this cyclic phi is in a form that would have been generated by
-/// LSR. We don't care if the phi was actually expanded in this pass, as long
-/// as it is in a low-cost form, for example, no implied multiplication. This
-/// should match any patterns generated by getAddRecExprPHILiterally and
-/// expandAddtoGEP.
-bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
- const Loop *L) {
+/// getIVIncOperand returns an induction variable increment's induction
+/// variable operand.
+///
+/// If allowScale is set, any type of GEP is allowed as long as the nonIV
+/// operands dominate InsertPos.
+///
+/// If allowScale is not set, ensure that a GEP increment conforms to one of the
+/// simple patterns generated by getAddRecExprPHILiterally and
+/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
+Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
+ Instruction *InsertPos,
+ bool allowScale) {
+ if (IncV == InsertPos)
+ return NULL;
+
switch (IncV->getOpcode()) {
+ default:
+ return NULL;
// Check for a simple Add/Sub or GEP of a loop invariant step.
case Instruction::Add:
- case Instruction::Sub:
- return IncV->getOperand(0) == PN
- && L->isLoopInvariant(IncV->getOperand(1));
+ case Instruction::Sub: {
+ Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
+ if (!OInst || SE.DT->dominates(OInst, InsertPos))
+ return dyn_cast<Instruction>(IncV->getOperand(0));
+ return NULL;
+ }
case Instruction::BitCast:
- IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0));
- if (!IncV)
- return false;
- // fall-thru to GEP handling
- case Instruction::GetElementPtr: {
- // This must be a pointer addition of constants (pretty) or some number of
- // address-size elements (ugly).
+ return dyn_cast<Instruction>(IncV->getOperand(0));
+ case Instruction::GetElementPtr:
for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
I != E; ++I) {
if (isa<Constant>(*I))
continue;
- // ugly geps have 2 operands.
- // i1* is used by the expander to represent an address-size element.
+ if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
+ if (!SE.DT->dominates(OInst, InsertPos))
+ return NULL;
+ }
+ if (allowScale) {
+ // allow any kind of GEP as long as it can be hoisted.
+ continue;
+ }
+ // This must be a pointer addition of constants (pretty), which is already
+ // handled, or some number of address-size elements (ugly). Ugly geps
+ // have 2 operands. i1* is used by the expander to represent an
+ // address-size element.
if (IncV->getNumOperands() != 2)
- return false;
+ return NULL;
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
- return false;
- // Ensure the operands dominate the insertion point. I don't know of a
- // case when this would not be true, so this is somewhat untested.
- if (L == IVIncInsertLoop) {
- for (User::op_iterator OI = IncV->op_begin()+1,
- OE = IncV->op_end(); OI != OE; ++OI)
- if (Instruction *OInst = dyn_cast<Instruction>(OI))
- if (!SE.DT->dominates(OInst, IVIncInsertPos))
- return false;
- }
+ return NULL;
break;
}
- IncV = dyn_cast<Instruction>(IncV->getOperand(0));
- if (IncV && IncV->getOpcode() == Instruction::BitCast)
- IncV = dyn_cast<Instruction>(IncV->getOperand(0));
- return IncV == PN;
+ return dyn_cast<Instruction>(IncV->getOperand(0));
}
- default:
+}
+
+/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
+/// it available to other uses in this loop. Recursively hoist any operands,
+/// until we reach a value that dominates InsertPos.
+bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
+ if (SE.DT->dominates(IncV, InsertPos))
+ return true;
+
+ // InsertPos must itself dominate IncV so that IncV's new position satisfies
+ // its existing users.
+ if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
return false;
+
+ // Check that the chain of IV operands leading back to Phi can be hoisted.
+ SmallVector<Instruction*, 4> IVIncs;
+ for(;;) {
+ Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
+ if (!Oper)
+ return false;
+ // IncV is safe to hoist.
+ IVIncs.push_back(IncV);
+ IncV = Oper;
+ if (SE.DT->dominates(IncV, InsertPos))
+ break;
+ }
+ for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
+ E = IVIncs.rend(); I != E; ++I) {
+ (*I)->moveBefore(InsertPos);
+ }
+ return true;
+}
+
+/// Determine if this cyclic phi is in a form that would have been generated by
+/// LSR. We don't care if the phi was actually expanded in this pass, as long
+/// as it is in a low-cost form, for example, no implied multiplication. This
+/// should match any patterns generated by getAddRecExprPHILiterally and
+/// expandAddtoGEP.
+bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
+ const Loop *L) {
+ for(Instruction *IVOper = IncV;
+ (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
+ /*allowScale=*/false));) {
+ if (IVOper == PN)
+ return true;
}
+ return false;
+}
+
+/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
+/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
+/// need to materialize IV increments elsewhere to handle difficult situations.
+Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
+ Type *ExpandTy, Type *IntTy,
+ bool useSubtract) {
+ Value *IncV;
+ // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
+ if (ExpandTy->isPointerTy()) {
+ PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
+ // If the step isn't constant, don't use an implicitly scaled GEP, because
+ // that would require a multiply inside the loop.
+ if (!isa<ConstantInt>(StepV))
+ GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
+ GEPPtrTy->getAddressSpace());
+ const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
+ IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
+ if (IncV->getType() != PN->getType()) {
+ IncV = Builder.CreateBitCast(IncV, PN->getType());
+ rememberInstruction(IncV);
+ }
+ } else {
+ IncV = useSubtract ?
+ Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
+ Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
+ rememberInstruction(IncV);
+ }
+ return IncV;
}
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
@@ -956,26 +1050,28 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
if (LSRMode) {
if (!isExpandedAddRecExprPHI(PN, IncV, L))
continue;
+ if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
+ continue;
}
else {
if (!isNormalAddRecExprPHI(PN, IncV, L))
continue;
+ if (L == IVIncInsertLoop)
+ do {
+ if (SE.DT->dominates(IncV, IVIncInsertPos))
+ break;
+ // Make sure the increment is where we want it. But don't move it
+ // down past a potential existing post-inc user.
+ IncV->moveBefore(IVIncInsertPos);
+ IVIncInsertPos = IncV;
+ IncV = cast<Instruction>(IncV->getOperand(0));
+ } while (IncV != PN);
}
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
// Remember the increment.
rememberInstruction(IncV);
- if (L == IVIncInsertLoop)
- do {
- if (SE.DT->dominates(IncV, IVIncInsertPos))
- break;
- // Make sure the increment is where we want it. But don't move it
- // down past a potential existing post-inc user.
- IncV->moveBefore(IVIncInsertPos);
- IVIncInsertPos = IncV;
- IncV = cast<Instruction>(IncV->getOperand(0));
- } while (IncV != PN);
return PN;
}
}
@@ -984,6 +1080,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+ // Another AddRec may need to be recursively expanded below. For example, if
+ // this AddRec is quadratic, the StepV may itself be an AddRec in this
+ // loop. Remove this loop from the PostIncLoops set before expanding such
+ // AddRecs. Otherwise, we cannot find a valid position for the step
+ // (i.e. StepV can never dominate its loop header). Ideally, we could do
+ // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
+ // so it's not worth implementing SmallPtrSet::swap.
+ PostIncLoopSet SavedPostIncLoops = PostIncLoops;
+ PostIncLoops.clear();
+
// Expand code for the start value.
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
L->getHeader()->begin());
@@ -993,16 +1099,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
L->getHeader()));
- // Expand code for the step value. Insert instructions right before the
- // terminator corresponding to the back-edge. Do this before creating the PHI
- // so that PHI reuse code doesn't see an incomplete PHI. If the stride is
- // negative, insert a sub instead of an add for the increment (unless it's a
- // constant, because subtracts of constants are canonicalized to adds).
+ // Expand code for the step value. Do this before creating the PHI so that PHI
+ // reuse code doesn't see an incomplete PHI.
const SCEV *Step = Normalized->getStepRecurrence(SE);
- bool isPointer = ExpandTy->isPointerTy();
- bool isNegative = !isPointer && isNonConstantNegative(Step);
- if (isNegative)
+ // If the stride is negative, insert a sub instead of an add for the increment
+ // (unless it's a constant, because subtracts of constants are canonicalized
+ // to adds).
+ bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
+ if (useSubtract)
Step = SE.getNegativeSCEV(Step);
+ // Expand the step somewhere that dominates the loop header.
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
// Create the PHI.
@@ -1023,33 +1129,14 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
continue;
}
- // Create a step value and add it to the PHI. If IVIncInsertLoop is
- // non-null and equal to the addrec's loop, insert the instructions
- // at IVIncInsertPos.
+ // Create a step value and add it to the PHI.
+ // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
+ // instructions at IVIncInsertPos.
Instruction *InsertPos = L == IVIncInsertLoop ?
IVIncInsertPos : Pred->getTerminator();
Builder.SetInsertPoint(InsertPos);
- Value *IncV;
- // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
- if (isPointer) {
- PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
- // If the step isn't constant, don't use an implicitly scaled GEP, because
- // that would require a multiply inside the loop.
- if (!isa<ConstantInt>(StepV))
- GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
- GEPPtrTy->getAddressSpace());
- const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
- IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
- if (IncV->getType() != PN->getType()) {
- IncV = Builder.CreateBitCast(IncV, PN->getType());
- rememberInstruction(IncV);
- }
- } else {
- IncV = isNegative ?
- Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
- Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
- rememberInstruction(IncV);
- }
+ Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
+
PN->addIncoming(IncV, Pred);
}
@@ -1057,6 +1144,10 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+ // After expanding subexpressions, restore the PostIncLoops set so the caller
+ // can ensure that IVIncrement dominates the current uses.
+ PostIncLoops = SavedPostIncLoops;
+
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
@@ -1124,10 +1215,31 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// For an expansion to use the postinc form, the client must call
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
- assert((!isa<Instruction>(Result) ||
- SE.DT->dominates(cast<Instruction>(Result),
- Builder.GetInsertPoint())) &&
- "postinc expansion does not dominate use");
+ if (isa<Instruction>(Result)
+ && !SE.DT->dominates(cast<Instruction>(Result),
+ Builder.GetInsertPoint())) {
+ // The induction variable's postinc expansion does not dominate this use.
+ // IVUsers tries to prevent this case, so it is rare. However, it can
+ // happen when an IVUser outside the loop is not dominated by the latch
+ // block. Adjusting IVIncInsertPos before expansion begins cannot handle
+ // all cases. Consider a phi outide whose operand is replaced during
+ // expansion with the value of the postinc user. Without fundamentally
+ // changing the way postinc users are tracked, the only remedy is
+ // inserting an extra IV increment. StepV might fold into PostLoopOffset,
+ // but hopefully expandCodeFor handles that.
+ bool useSubtract =
+ !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
+ if (useSubtract)
+ Step = SE.getNegativeSCEV(Step);
+ // Expand the step somewhere that dominates the loop header.
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
+ Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
+ // Restore the insertion point to the place where the caller has
+ // determined dominates all uses.
+ restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+ Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
+ }
}
// Re-apply any non-loop-dominating scale.
@@ -1363,10 +1475,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
- Instruction *I) {
- BasicBlock::iterator IP = I;
- while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
- ++IP;
+ Instruction *IP) {
Builder.SetInsertPoint(IP->getParent(), IP);
return expandCodeFor(SH, Ty);
}
@@ -1392,14 +1501,23 @@ Value *SCEVExpander::expand(const SCEV *S) {
if (!L) break;
if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
+ else {
+ // LSR sets the insertion point for AddRec start/step values to the
+ // block start to simplify value reuse, even though it's an invalid
+ // position. SCEVExpander must correct for this in all cases.
+ InsertPt = L->getHeader()->getFirstInsertionPt();
+ }
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstInsertionPt();
- while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
+ while (InsertPt != Builder.GetInsertPoint()
+ && (isInsertedInstruction(InsertPt)
+ || isa<DbgInfoIntrinsic>(InsertPt))) {
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
+ }
break;
}
@@ -1434,23 +1552,9 @@ void SCEVExpander::rememberInstruction(Value *I) {
InsertedPostIncValues.insert(I);
else
InsertedValues.insert(I);
-
- // If we just claimed an existing instruction and that instruction had
- // been the insert point, adjust the insert point forward so that
- // subsequently inserted code will be dominated.
- if (Builder.GetInsertPoint() == I) {
- BasicBlock::iterator It = cast<Instruction>(I);
- do { ++It; } while (isInsertedInstruction(It) ||
- isa<DbgInfoIntrinsic>(It));
- Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
- }
}
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
- // If we acquired more instructions since the old insert point was saved,
- // advance past them.
- while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
-
Builder.SetInsertPoint(BB, I);
}
@@ -1478,40 +1582,13 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
return V;
}
-/// hoistStep - Attempt to hoist an IV increment above a potential use.
-///
-/// To successfully hoist, two criteria must be met:
-/// - IncV operands dominate InsertPos and
-/// - InsertPos dominates IncV
-///
-/// Meeting the second condition means that we don't need to check all of IncV's
-/// existing uses (it's moving up in the domtree).
-///
-/// This does not yet recursively hoist the operands, although that would
-/// not be difficult.
-///
-/// This does not require a SCEVExpander instance and could be replaced by a
-/// general code-insertion helper.
-bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
- const DominatorTree *DT) {
- if (DT->dominates(IncV, InsertPos))
- return true;
-
- if (!DT->dominates(InsertPos->getParent(), IncV->getParent()))
- return false;
-
- if (IncV->mayHaveSideEffects())
- return false;
-
- // Attempt to hoist IncV
- for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end();
- OI != OE; ++OI) {
- Instruction *OInst = dyn_cast<Instruction>(OI);
- if (OInst && !DT->dominates(OInst, InsertPos))
- return false;
- }
- IncV->moveBefore(InsertPos);
- return true;
+/// Sort values by integer width for replaceCongruentIVs.
+static bool width_descending(Value *lhs, Value *rhs) {
+ // Put pointers at the back and make sure pointer < pointer = false.
+ if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
+ return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
+ return rhs->getType()->getPrimitiveSizeInBits()
+ < lhs->getType()->getPrimitiveSizeInBits();
}
/// replaceCongruentIVs - Check for congruent phis in this loop header and
@@ -1521,23 +1598,45 @@ bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts) {
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ const TargetLowering *TLI) {
+ // Find integer phis in order of increasing width.
+ SmallVector<PHINode*, 8> Phis;
+ for (BasicBlock::iterator I = L->getHeader()->begin();
+ PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
+ Phis.push_back(Phi);
+ }
+ if (TLI)
+ std::sort(Phis.begin(), Phis.end(), width_descending);
+
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
- for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- PHINode *Phi = cast<PHINode>(I);
+ // Process phis from wide to narrow. Mapping wide phis to the their truncation
+ // so narrow phis can reuse them.
+ for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
+ PEnd = Phis.end(); PIter != PEnd; ++PIter) {
+ PHINode *Phi = *PIter;
+
if (!SE.isSCEVable(Phi->getType()))
continue;
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
if (!OrigPhiRef) {
OrigPhiRef = Phi;
+ if (Phi->getType()->isIntegerTy() && TLI
+ && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
+ // This phi can be freely truncated to the narrowest phi type. Map the
+ // truncated expression to it so it will be reused for narrow types.
+ const SCEV *TruncExpr =
+ SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
+ ExprToIVMap[TruncExpr] = Phi;
+ }
continue;
}
- // If one phi derives from the other via GEPs, types may differ.
- // We could consider adding a bitcast here to handle it.
- if (OrigPhiRef->getType() != Phi->getType())
+ // Replacing a pointer phi with an integer phi or vice-versa doesn't make
+ // sense.
+ if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
continue;
if (BasicBlock *LatchBlock = L->getLoopLatch()) {
@@ -1546,32 +1645,56 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
Instruction *IsomorphicInc =
cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
- // If this phi is more canonical, swap it with the original.
- if (!isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)
- && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L)) {
+ // If this phi has the same width but is more canonical, replace the
+ // original with it. As part of the "more canonical" determination,
+ // respect a prior decision to use an IV chain.
+ if (OrigPhiRef->getType() == Phi->getType()
+ && !(ChainedPhis.count(Phi)
+ || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
+ && (ChainedPhis.count(Phi)
+ || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
std::swap(OrigPhiRef, Phi);
std::swap(OrigInc, IsomorphicInc);
}
// Replacing the congruent phi is sufficient because acyclic redundancy
// elimination, CSE/GVN, should handle the rest. However, once SCEV proves
// that a phi is congruent, it's often the head of an IV user cycle that
- // is isomorphic with the original phi. So it's worth eagerly cleaning up
- // the common case of a single IV increment.
- if (OrigInc != IsomorphicInc &&
- OrigInc->getType() == IsomorphicInc->getType() &&
- SE.getSCEV(OrigInc) == SE.getSCEV(IsomorphicInc) &&
- hoistStep(OrigInc, IsomorphicInc, DT)) {
+ // is isomorphic with the original phi. It's worth eagerly cleaning up the
+ // common case of a single IV increment so that DeleteDeadPHIs can remove
+ // cycles that had postinc uses.
+ const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
+ IsomorphicInc->getType());
+ if (OrigInc != IsomorphicInc
+ && TruncExpr == SE.getSCEV(IsomorphicInc)
+ && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
+ || hoistIVInc(OrigInc, IsomorphicInc))) {
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv.inc: "
<< *IsomorphicInc << '\n');
- IsomorphicInc->replaceAllUsesWith(OrigInc);
+ Value *NewInc = OrigInc;
+ if (OrigInc->getType() != IsomorphicInc->getType()) {
+ Instruction *IP = isa<PHINode>(OrigInc)
+ ? (Instruction*)L->getHeader()->getFirstInsertionPt()
+ : OrigInc->getNextNode();
+ IRBuilder<> Builder(IP);
+ Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
+ NewInc = Builder.
+ CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
+ }
+ IsomorphicInc->replaceAllUsesWith(NewInc);
DeadInsts.push_back(IsomorphicInc);
}
}
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
++NumElim;
- Phi->replaceAllUsesWith(OrigPhiRef);
+ Value *NewIV = OrigPhiRef;
+ if (OrigPhiRef->getType() != Phi->getType()) {
+ IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
+ Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
+ NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
+ }
+ Phi->replaceAllUsesWith(NewIV);
DeadInsts.push_back(Phi);
}
return NumElim;
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index c66ecd6..dd2ed4f 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -118,7 +118,6 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
// Conservatively use AnyWrap until/unless we need FlagNW.
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
switch (Kind) {
- default: llvm_unreachable("Unexpected transform name!");
case NormalizeAutodetect:
if (IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
const SCEV *TransformedStep =
@@ -191,7 +190,6 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
}
llvm_unreachable("Unexpected SCEV kind!");
- return 0;
}
/// Manage recursive transformation across an expression DAG. Revisiting
diff --git a/contrib/llvm/lib/Analysis/SparsePropagation.cpp b/contrib/llvm/lib/Analysis/SparsePropagation.cpp
index d8c207b..c819666 100644
--- a/contrib/llvm/lib/Analysis/SparsePropagation.cpp
+++ b/contrib/llvm/lib/Analysis/SparsePropagation.cpp
@@ -194,8 +194,8 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs.assign(TI.getNumSuccessors(), true);
return;
}
-
- Succs[SI.findCaseValue(cast<ConstantInt>(C))] = true;
+ SwitchInst::CaseIt Case = SI.findCaseValue(cast<ConstantInt>(C));
+ Succs[Case.getSuccessorIndex()] = true;
}
@@ -327,13 +327,13 @@ void SparseSolver::Solve(Function &F) {
}
void SparseSolver::Print(Function &F, raw_ostream &OS) const {
- OS << "\nFUNCTION: " << F.getNameStr() << "\n";
+ OS << "\nFUNCTION: " << F.getName() << "\n";
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!BBExecutable.count(BB))
OS << "INFEASIBLE: ";
OS << "\t";
if (BB->hasName())
- OS << BB->getNameStr() << ":\n";
+ OS << BB->getName() << ":\n";
else
OS << "; anon bb\n";
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
diff --git a/contrib/llvm/lib/Analysis/Trace.cpp b/contrib/llvm/lib/Analysis/Trace.cpp
index 68a39cd..ff5010b 100644
--- a/contrib/llvm/lib/Analysis/Trace.cpp
+++ b/contrib/llvm/lib/Analysis/Trace.cpp
@@ -34,7 +34,7 @@ Module *Trace::getModule() const {
///
void Trace::print(raw_ostream &O) const {
Function *F = getFunction();
- O << "; Trace from function " << F->getNameStr() << ", blocks:\n";
+ O << "; Trace from function " << F->getName() << ", blocks:\n";
for (const_iterator i = begin(), e = end(); i != e; ++i) {
O << "; ";
WriteAsOperand(O, *i, true, getModule());
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index 4d94f61..a430f62 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -20,8 +20,10 @@
#include "llvm/GlobalAlias.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
#include "llvm/Operator.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PatternMatch.h"
@@ -41,10 +43,176 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
return TD ? TD->getPointerSizeInBits() : 0;
}
-/// ComputeMaskedBits - Determine which of the bits specified in Mask are
-/// known to be either zero or one and return them in the KnownZero/KnownOne
-/// bit sets. This code only analyzes bits in Mask, in order to short-circuit
-/// processing.
+static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2,
+ const TargetData *TD, unsigned Depth) {
+ if (!Add) {
+ if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
+ // We know that the top bits of C-X are clear if X contains less bits
+ // than C (i.e. no wrap-around can happen). For example, 20-X is
+ // positive if we can prove that X is >= 0 and < 16.
+ if (!CLHS->getValue().isNegative()) {
+ unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
+ // NLZ can't be BitWidth with no sign bit
+ APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
+ llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
+
+ // If all of the MaskV bits are known to be zero, then we know the
+ // output top bits are zero, because we now know that the output is
+ // from [0-C].
+ if ((KnownZero2 & MaskV) == MaskV) {
+ unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
+ // Top bits known zero.
+ KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
+ }
+ }
+ }
+ }
+
+ unsigned BitWidth = KnownZero.getBitWidth();
+
+ // If one of the operands has trailing zeros, then the bits that the
+ // other operand has in those bit positions will be preserved in the
+ // result. For an add, this works with either operand. For a subtract,
+ // this only works if the known zeros are in the right operand.
+ APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
+ llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
+ assert((LHSKnownZero & LHSKnownOne) == 0 &&
+ "Bits known to be one AND zero?");
+ unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
+
+ llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
+
+ // Determine which operand has more trailing zeros, and use that
+ // many bits from the other operand.
+ if (LHSKnownZeroOut > RHSKnownZeroOut) {
+ if (Add) {
+ APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut);
+ KnownZero |= KnownZero2 & Mask;
+ KnownOne |= KnownOne2 & Mask;
+ } else {
+ // If the known zeros are in the left operand for a subtract,
+ // fall back to the minimum known zeros in both operands.
+ KnownZero |= APInt::getLowBitsSet(BitWidth,
+ std::min(LHSKnownZeroOut,
+ RHSKnownZeroOut));
+ }
+ } else if (RHSKnownZeroOut >= LHSKnownZeroOut) {
+ APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut);
+ KnownZero |= LHSKnownZero & Mask;
+ KnownOne |= LHSKnownOne & Mask;
+ }
+
+ // Are we still trying to solve for the sign bit?
+ if (!KnownZero.isNegative() && !KnownOne.isNegative()) {
+ if (NSW) {
+ if (Add) {
+ // Adding two positive numbers can't wrap into negative
+ if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
+ KnownZero |= APInt::getSignBit(BitWidth);
+ // and adding two negative numbers can't wrap into positive.
+ else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
+ KnownOne |= APInt::getSignBit(BitWidth);
+ } else {
+ // Subtracting a negative number from a positive one can't wrap
+ if (LHSKnownZero.isNegative() && KnownOne2.isNegative())
+ KnownZero |= APInt::getSignBit(BitWidth);
+ // neither can subtracting a positive number from a negative one.
+ else if (LHSKnownOne.isNegative() && KnownZero2.isNegative())
+ KnownOne |= APInt::getSignBit(BitWidth);
+ }
+ }
+ }
+}
+
+static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2,
+ const TargetData *TD, unsigned Depth) {
+ unsigned BitWidth = KnownZero.getBitWidth();
+ ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
+ bool isKnownNegative = false;
+ bool isKnownNonNegative = false;
+ // If the multiplication is known not to overflow, compute the sign bit.
+ if (NSW) {
+ if (Op0 == Op1) {
+ // The product of a number with itself is non-negative.
+ isKnownNonNegative = true;
+ } else {
+ bool isKnownNonNegativeOp1 = KnownZero.isNegative();
+ bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
+ bool isKnownNegativeOp1 = KnownOne.isNegative();
+ bool isKnownNegativeOp0 = KnownOne2.isNegative();
+ // The product of two numbers with the same sign is non-negative.
+ isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
+ (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
+ // The product of a negative number and a non-negative number is either
+ // negative or zero.
+ if (!isKnownNonNegative)
+ isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
+ isKnownNonZero(Op0, TD, Depth)) ||
+ (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
+ isKnownNonZero(Op1, TD, Depth));
+ }
+ }
+
+ // If low bits are zero in either operand, output low known-0 bits.
+ // Also compute a conserative estimate for high known-0 bits.
+ // More trickiness is possible, but this is sufficient for the
+ // interesting case of alignment computation.
+ KnownOne.clearAllBits();
+ unsigned TrailZ = KnownZero.countTrailingOnes() +
+ KnownZero2.countTrailingOnes();
+ unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
+ KnownZero2.countLeadingOnes(),
+ BitWidth) - BitWidth;
+
+ TrailZ = std::min(TrailZ, BitWidth);
+ LeadZ = std::min(LeadZ, BitWidth);
+ KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
+ APInt::getHighBitsSet(BitWidth, LeadZ);
+
+ // Only make use of no-wrap flags if we failed to compute the sign bit
+ // directly. This matters if the multiplication always overflows, in
+ // which case we prefer to follow the result of the direct computation,
+ // though as the program is invoking undefined behaviour we can choose
+ // whatever we like here.
+ if (isKnownNonNegative && !KnownOne.isNegative())
+ KnownZero.setBit(BitWidth - 1);
+ else if (isKnownNegative && !KnownZero.isNegative())
+ KnownOne.setBit(BitWidth - 1);
+}
+
+void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
+ unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned NumRanges = Ranges.getNumOperands() / 2;
+ assert(NumRanges >= 1);
+
+ // Use the high end of the ranges to find leading zeros.
+ unsigned MinLeadingZeros = BitWidth;
+ for (unsigned i = 0; i < NumRanges; ++i) {
+ ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0));
+ ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1));
+ ConstantRange Range(Lower->getValue(), Upper->getValue());
+ if (Range.isWrappedSet())
+ MinLeadingZeros = 0; // -1 has no zeros
+ unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
+ MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
+ }
+
+ KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
+}
+/// ComputeMaskedBits - Determine which of the bits are known to be either zero
+/// or one and return them in the KnownZero/KnownOne bit sets.
+///
/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
/// we cannot optimize based on the assumption that it is zero without changing
/// it to be an explicit zero. If we don't change it to zero, other code could
@@ -54,67 +222,75 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
-/// where V is a vector, the mask, known zero, and known one values are the
+/// where V is a vector, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
-void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
- APInt &KnownZero, APInt &KnownOne,
+void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const TargetData *TD, unsigned Depth) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
- unsigned BitWidth = Mask.getBitWidth();
- assert((V->getType()->isIntOrIntVectorTy() || V->getType()->isPointerTy())
- && "Not integer or pointer type!");
+ unsigned BitWidth = KnownZero.getBitWidth();
+
+ assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarType()->isPointerTy()) &&
+ "Not integer or pointer type!");
assert((!TD ||
TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
(!V->getType()->isIntOrIntVectorTy() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
+ KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
"V, Mask, KnownOne and KnownZero should have same BitWidth");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// We know all of the bits for a constant!
- KnownOne = CI->getValue() & Mask;
- KnownZero = ~KnownOne & Mask;
+ KnownOne = CI->getValue();
+ KnownZero = ~KnownOne;
return;
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) ||
isa<ConstantAggregateZero>(V)) {
KnownOne.clearAllBits();
- KnownZero = Mask;
+ KnownZero = APInt::getAllOnesValue(BitWidth);
return;
}
// Handle a constant vector by taking the intersection of the known bits of
- // each element.
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
+ // each element. There is no real need to handle ConstantVector here, because
+ // we don't handle undef in any particularly useful way.
+ if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
+ // We know that CDS must be a vector of integers. Take the intersection of
+ // each element.
KnownZero.setAllBits(); KnownOne.setAllBits();
- for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
- APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
- ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2,
- TD, Depth);
- KnownZero &= KnownZero2;
- KnownOne &= KnownOne2;
+ APInt Elt(KnownZero.getBitWidth(), 0);
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ Elt = CDS->getElementAsInteger(i);
+ KnownZero &= ~Elt;
+ KnownOne &= Elt;
}
return;
}
+
// The address of an aligned GlobalValue has trailing zeros.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
- if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
- Type *ObjectType = GV->getType()->getElementType();
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (!GV->isDeclaration() && !GV->mayBeOverridden())
- Align = TD->getPrefTypeAlignment(ObjectType);
- else
- Align = TD->getABITypeAlignment(ObjectType);
+ if (Align == 0 && TD) {
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
+ Align = TD->getPreferredAlignment(GVar);
+ else
+ Align = TD->getABITypeAlignment(ObjectType);
+ }
+ }
}
if (Align > 0)
- KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth,
+ CountTrailingZeros_32(Align));
else
KnownZero.clearAllBits();
KnownOne.clearAllBits();
@@ -126,8 +302,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (GA->mayBeOverridden()) {
KnownZero.clearAllBits(); KnownOne.clearAllBits();
} else {
- ComputeMaskedBits(GA->getAliasee(), Mask, KnownZero, KnownOne,
- TD, Depth+1);
+ ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
}
return;
}
@@ -136,15 +311,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Get alignment information off byval arguments if specified in the IR.
if (A->hasByValAttr())
if (unsigned Align = A->getParamAlignment())
- KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth,
+ CountTrailingZeros_32(Align));
return;
}
// Start out not knowing anything.
KnownZero.clearAllBits(); KnownOne.clearAllBits();
- if (Depth == MaxDepth || Mask == 0)
+ if (Depth == MaxDepth)
return; // Limit search depth.
Operator *I = dyn_cast<Operator>(V);
@@ -153,12 +328,14 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
switch (I->getOpcode()) {
default: break;
+ case Instruction::Load:
+ if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
+ computeMaskedBitsLoad(*MD, KnownZero);
+ return;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
- APInt Mask2(Mask & ~KnownZero);
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -169,10 +346,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
case Instruction::Or: {
- ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
- APInt Mask2(Mask & ~KnownOne);
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -183,9 +358,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
case Instruction::Xor: {
- ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -197,55 +371,32 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
case Instruction::Mul: {
- APInt Mask2 = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1);
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
- Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
- // If low bits are zero in either operand, output low known-0 bits.
- // Also compute a conserative estimate for high known-0 bits.
- // More trickiness is possible, but this is sufficient for the
- // interesting case of alignment computation.
- KnownOne.clearAllBits();
- unsigned TrailZ = KnownZero.countTrailingOnes() +
- KnownZero2.countTrailingOnes();
- unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
- KnownZero2.countLeadingOnes(),
- BitWidth) - BitWidth;
-
- TrailZ = std::min(TrailZ, BitWidth);
- LeadZ = std::min(LeadZ, BitWidth);
- KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
- APInt::getHighBitsSet(BitWidth, LeadZ);
- KnownZero &= Mask;
- return;
+ bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW,
+ KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth);
+ break;
}
case Instruction::UDiv: {
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(I->getOperand(0),
- AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
KnownOne2.clearAllBits();
KnownZero2.clearAllBits();
- ComputeMaskedBits(I->getOperand(1),
- AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
- KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
+ KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
return;
}
case Instruction::Select:
- ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1);
- ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD,
+ ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -278,11 +429,9 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
else
SrcBitWidth = SrcTy->getScalarSizeInBits();
- APInt MaskIn = Mask.zextOrTrunc(SrcBitWidth);
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
- ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
KnownZero = KnownZero.zextOrTrunc(BitWidth);
KnownOne = KnownOne.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
@@ -296,8 +445,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
!I->getType()->isVectorTy()) {
- ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
return;
}
break;
@@ -306,11 +454,9 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
- APInt MaskIn = Mask.trunc(SrcBitWidth);
KnownZero = KnownZero.trunc(SrcBitWidth);
KnownOne = KnownOne.trunc(SrcBitWidth);
- ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
@@ -327,9 +473,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
- APInt Mask2(Mask.lshr(ShiftAmt));
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero <<= ShiftAmt;
KnownOne <<= ShiftAmt;
@@ -344,9 +488,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
// Unsigned shift right.
- APInt Mask2(Mask.shl(ShiftAmt));
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
@@ -362,9 +504,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
// Signed shift right.
- APInt Mask2(Mask.shl(ShiftAmt));
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
@@ -378,100 +518,25 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
break;
case Instruction::Sub: {
- if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) {
- // We know that the top bits of C-X are clear if X contains less bits
- // than C (i.e. no wrap-around can happen). For example, 20-X is
- // positive if we can prove that X is >= 0 and < 16.
- if (!CLHS->getValue().isNegative()) {
- unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
- // NLZ can't be BitWidth with no sign bit
- APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
- ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2,
- TD, Depth+1);
-
- // If all of the MaskV bits are known to be zero, then we know the
- // output top bits are zero, because we now know that the output is
- // from [0-C].
- if ((KnownZero2 & MaskV) == MaskV) {
- unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
- // Top bits known zero.
- KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
- }
- }
- }
+ bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
+ KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
+ Depth);
+ break;
}
- // fall through
case Instruction::Add: {
- // If one of the operands has trailing zeros, then the bits that the
- // other operand has in those bit positions will be preserved in the
- // result. For an add, this works with either operand. For a subtract,
- // this only works if the known zeros are in the right operand.
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- APInt Mask2 = APInt::getLowBitsSet(BitWidth,
- BitWidth - Mask.countLeadingZeros());
- ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD,
- Depth+1);
- assert((LHSKnownZero & LHSKnownOne) == 0 &&
- "Bits known to be one AND zero?");
- unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
-
- ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD,
- Depth+1);
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
- unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
-
- // Determine which operand has more trailing zeros, and use that
- // many bits from the other operand.
- if (LHSKnownZeroOut > RHSKnownZeroOut) {
- if (I->getOpcode() == Instruction::Add) {
- APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut);
- KnownZero |= KnownZero2 & Mask;
- KnownOne |= KnownOne2 & Mask;
- } else {
- // If the known zeros are in the left operand for a subtract,
- // fall back to the minimum known zeros in both operands.
- KnownZero |= APInt::getLowBitsSet(BitWidth,
- std::min(LHSKnownZeroOut,
- RHSKnownZeroOut));
- }
- } else if (RHSKnownZeroOut >= LHSKnownZeroOut) {
- APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut);
- KnownZero |= LHSKnownZero & Mask;
- KnownOne |= LHSKnownOne & Mask;
- }
-
- // Are we still trying to solve for the sign bit?
- if (Mask.isNegative() && !KnownZero.isNegative() && !KnownOne.isNegative()){
- OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(I);
- if (OBO->hasNoSignedWrap()) {
- if (I->getOpcode() == Instruction::Add) {
- // Adding two positive numbers can't wrap into negative
- if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
- KnownZero |= APInt::getSignBit(BitWidth);
- // and adding two negative numbers can't wrap into positive.
- else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
- KnownOne |= APInt::getSignBit(BitWidth);
- } else {
- // Subtracting a negative number from a positive one can't wrap
- if (LHSKnownZero.isNegative() && KnownOne2.isNegative())
- KnownZero |= APInt::getSignBit(BitWidth);
- // neither can subtracting a positive number from a negative one.
- else if (LHSKnownOne.isNegative() && KnownZero2.isNegative())
- KnownOne |= APInt::getSignBit(BitWidth);
- }
- }
- }
-
- return;
+ bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
+ KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
+ Depth);
+ break;
}
case Instruction::SRem:
if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
APInt RA = Rem->getValue().abs();
if (RA.isPowerOf2()) {
APInt LowBits = RA - 1;
- APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
// The low bits of the first operand are unchanged by the srem.
KnownZero = KnownZero2 & LowBits;
@@ -487,19 +552,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
KnownOne |= ~LowBits;
- KnownZero &= Mask;
- KnownOne &= Mask;
-
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
}
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero.
- if (Mask.isNegative() && KnownZero.isNonNegative()) {
- APInt Mask2 = APInt::getSignBit(BitWidth);
+ if (KnownZero.isNonNegative()) {
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD,
+ ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
Depth+1);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
@@ -512,27 +573,24 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
APInt RA = Rem->getValue();
if (RA.isPowerOf2()) {
APInt LowBits = (RA - 1);
- APInt Mask2 = LowBits & Mask;
- KnownZero |= ~LowBits & Mask;
- ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD,
Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ KnownZero |= ~LowBits;
+ KnownOne &= LowBits;
break;
}
}
// Since the result is less than or equal to either operand, any leading
// zero bits in either operand must also exist in the result.
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne,
- TD, Depth+1);
- ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2,
- TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
KnownOne.clearAllBits();
- KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
+ KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
break;
}
@@ -543,17 +601,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
if (Align > 0)
- KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
break;
}
case Instruction::GetElementPtr: {
// Analyze all of the subscripts of this getelementptr instruction
// to determine if we can prove known low zero bits.
- APInt LocalMask = APInt::getAllOnesValue(BitWidth);
APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), LocalMask,
- LocalKnownZero, LocalKnownOne, TD, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
+ Depth+1);
unsigned TrailZ = LocalKnownZero.countTrailingOnes();
gep_type_iterator GTI = gep_type_begin(I);
@@ -573,17 +629,15 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (!IndexedTy->isSized()) return;
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
- LocalMask = APInt::getAllOnesValue(GEPOpiBits);
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
- ComputeMaskedBits(Index, LocalMask,
- LocalKnownZero, LocalKnownOne, TD, Depth+1);
+ ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
TrailZ = std::min(TrailZ,
unsigned(CountTrailingZeros_64(TypeSize) +
LocalKnownZero.countTrailingOnes()));
}
}
- KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask;
+ KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
break;
}
case Instruction::PHI: {
@@ -618,17 +672,13 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
break;
// Ok, we have a PHI of the form L op= R. Check for low
// zero bits.
- APInt Mask2 = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
- Mask2 = APInt::getLowBitsSet(BitWidth,
- KnownZero2.countTrailingOnes());
+ ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1);
// We need to take the minimum number of known bits
APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
- ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1);
+ ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1);
- KnownZero = Mask &
- APInt::getLowBitsSet(BitWidth,
+ KnownZero = APInt::getLowBitsSet(BitWidth,
std::min(KnownZero2.countTrailingOnes(),
KnownZero3.countTrailingOnes()));
break;
@@ -657,8 +707,8 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownOne2 = APInt(BitWidth, 0);
// Recurse, but cap the recursion to one level, because we don't
// want to waste time spinning around in loops.
- ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne,
- KnownZero2, KnownOne2, TD, MaxDepth-1);
+ ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
+ MaxDepth-1);
KnownZero &= KnownZero2;
KnownOne &= KnownOne2;
// If all bits have been ruled out, there's no need to check
@@ -673,10 +723,17 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
- case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz: {
unsigned LowBits = Log2_32(BitWidth)+1;
+ // If this call is undefined for 0, the result will be less than 2^n.
+ if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
+ LowBits -= 1;
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ break;
+ }
+ case Intrinsic::ctpop: {
+ unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
@@ -687,6 +744,34 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
}
break;
+ case Instruction::ExtractValue:
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
+ ExtractValueInst *EVI = cast<ExtractValueInst>(I);
+ if (EVI->getNumIndices() != 1) break;
+ if (EVI->getIndices()[0] == 0) {
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::sadd_with_overflow:
+ ComputeMaskedBitsAddSub(true, II->getArgOperand(0),
+ II->getArgOperand(1), false, KnownZero,
+ KnownOne, KnownZero2, KnownOne2, TD, Depth);
+ break;
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ ComputeMaskedBitsAddSub(false, II->getArgOperand(0),
+ II->getArgOperand(1), false, KnownZero,
+ KnownOne, KnownZero2, KnownOne2, TD, Depth);
+ break;
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1),
+ false, KnownZero, KnownOne,
+ KnownZero2, KnownOne2, TD, Depth);
+ break;
+ }
+ }
+ }
}
}
@@ -702,8 +787,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
}
APInt ZeroBits(BitWidth, 0);
APInt OneBits(BitWidth, 0);
- ComputeMaskedBits(V, APInt::getSignBit(BitWidth), ZeroBits, OneBits, TD,
- Depth);
+ ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth);
KnownOne = OneBits[BitWidth - 1];
KnownZero = ZeroBits[BitWidth - 1];
}
@@ -712,10 +796,15 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
- return CI->getValue().isPowerOf2();
- // TODO: Handle vector constants.
+bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+ unsigned Depth) {
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ if (C->isNullValue())
+ return OrZero;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ return CI->getValue().isPowerOf2();
+ // TODO: Handle vector constants.
+ }
// 1 << X is clearly a power of two if the one is not shifted off the end. If
// it is shifted off the end then the result is undefined.
@@ -731,21 +820,36 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, unsigned Depth) {
if (Depth++ == MaxDepth)
return false;
+ Value *X = 0, *Y = 0;
+ // A shift of a power of two is a power of two or zero.
+ if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
+ match(V, m_Shr(m_Value(X), m_Value()))))
+ return isPowerOfTwo(X, TD, /*OrZero*/true, Depth);
+
if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
- return isPowerOfTwo(ZI->getOperand(0), TD, Depth);
+ return isPowerOfTwo(ZI->getOperand(0), TD, OrZero, Depth);
if (SelectInst *SI = dyn_cast<SelectInst>(V))
- return isPowerOfTwo(SI->getTrueValue(), TD, Depth) &&
- isPowerOfTwo(SI->getFalseValue(), TD, Depth);
+ return isPowerOfTwo(SI->getTrueValue(), TD, OrZero, Depth) &&
+ isPowerOfTwo(SI->getFalseValue(), TD, OrZero, Depth);
+
+ if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
+ // A power of two and'd with anything is a power of two or zero.
+ if (isPowerOfTwo(X, TD, /*OrZero*/true, Depth) ||
+ isPowerOfTwo(Y, TD, /*OrZero*/true, Depth))
+ return true;
+ // X & (-X) is always a power of two or zero.
+ if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
+ return true;
+ return false;
+ }
// An exact divide or right shift can only shift off zero bits, so the result
// is a power of two only if the first operand is a power of two and not
// copying a sign bit (sdiv int_min, 2).
- if (match(V, m_LShr(m_Value(), m_Value())) ||
- match(V, m_UDiv(m_Value(), m_Value()))) {
- PossiblyExactOperator *PEO = cast<PossiblyExactOperator>(V);
- if (PEO->isExact())
- return isPowerOfTwo(PEO->getOperand(0), TD, Depth);
+ if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
+ match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
+ return isPowerOfTwo(cast<Operator>(V)->getOperand(0), TD, OrZero, Depth);
}
return false;
@@ -767,7 +871,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
}
// The remaining tests are all recursive, so bail out if we hit the limit.
- if (Depth++ == MaxDepth)
+ if (Depth++ >= MaxDepth)
return false;
unsigned BitWidth = getBitWidth(V->getType(), TD);
@@ -785,13 +889,13 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
// if the lowest bit is shifted off the end.
if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
- BinaryOperator *BO = cast<BinaryOperator>(V);
+ OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
return isKnownNonZero(X, TD, Depth);
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(X, APInt(BitWidth, 1), KnownZero, KnownOne, TD, Depth);
+ ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
if (KnownOne[0])
return true;
}
@@ -799,7 +903,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
// defined if the sign bit is shifted off the end.
else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
// shr exact can only shift out zero bits.
- BinaryOperator *BO = cast<BinaryOperator>(V);
+ PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
if (BO->isExact())
return isKnownNonZero(X, TD, Depth);
@@ -809,10 +913,8 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
return true;
}
// div exact can only produce a zero if the dividend is zero.
- else if (match(V, m_IDiv(m_Value(X), m_Value()))) {
- BinaryOperator *BO = cast<BinaryOperator>(V);
- if (BO->isExact())
- return isKnownNonZero(X, TD, Depth);
+ else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
+ return isKnownNonZero(X, TD, Depth);
}
// X + Y.
else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
@@ -835,20 +937,29 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
// to INT_MIN.
- ComputeMaskedBits(X, Mask, KnownZero, KnownOne, TD, Depth);
+ ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth);
if ((KnownOne & Mask) != 0)
return true;
// The sign bit of Y is set. If some other bit is set then Y is not equal
// to INT_MIN.
- ComputeMaskedBits(Y, Mask, KnownZero, KnownOne, TD, Depth);
+ ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth);
if ((KnownOne & Mask) != 0)
return true;
}
// The sum of a non-negative number and a power of two is not zero.
- if (XKnownNonNegative && isPowerOfTwo(Y, TD, Depth))
+ if (XKnownNonNegative && isPowerOfTwo(Y, TD, /*OrZero*/false, Depth))
return true;
- if (YKnownNonNegative && isPowerOfTwo(X, TD, Depth))
+ if (YKnownNonNegative && isPowerOfTwo(X, TD, /*OrZero*/false, Depth))
+ return true;
+ }
+ // X * Y.
+ else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
+ OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
+ // If X and Y are non-zero then so is X * Y as long as the multiplication
+ // does not overflow.
+ if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
+ isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth))
return true;
}
// (C ? X : Y) != 0 if X != 0 and Y != 0.
@@ -861,8 +972,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
if (!BitWidth) return false;
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, APInt::getAllOnesValue(BitWidth), KnownZero, KnownOne,
- TD, Depth);
+ ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
return KnownOne != 0;
}
@@ -878,7 +988,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
const TargetData *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
+ ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
return (KnownZero & Mask) == Mask;
}
@@ -917,30 +1027,28 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
- case Instruction::AShr:
+ case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- // ashr X, C -> adds C sign bits.
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
- Tmp += C->getZExtValue();
+ // ashr X, C -> adds C sign bits. Vectors too.
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
+ Tmp += ShAmt->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
- // vector ashr X, <C, C, C, C> -> adds C sign bits
- if (ConstantVector *C = dyn_cast<ConstantVector>(U->getOperand(1))) {
- if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
- Tmp += CI->getZExtValue();
- if (Tmp > TyBits) Tmp = TyBits;
- }
- }
return Tmp;
- case Instruction::Shl:
- if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ }
+ case Instruction::Shl: {
+ const APInt *ShAmt;
+ if (match(U->getOperand(1), m_APInt(ShAmt))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
- if (C->getZExtValue() >= TyBits || // Bad shift.
- C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
- return Tmp - C->getZExtValue();
+ Tmp2 = ShAmt->getZExtValue();
+ if (Tmp2 >= TyBits || // Bad shift.
+ Tmp2 >= Tmp) break; // Shifted all sign bits out.
+ return Tmp - Tmp2;
}
break;
+ }
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: // NOT is handled here.
@@ -971,13 +1079,11 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
- APInt Mask = APInt::getAllOnesValue(TyBits);
- ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD,
- Depth+1);
+ ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero | APInt(TyBits, 1)) == Mask)
+ if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
return TyBits;
// If we are subtracting one from a positive number, there is no carry
@@ -998,12 +1104,10 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
if (CLHS->isNullValue()) {
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
- APInt Mask = APInt::getAllOnesValue(TyBits);
- ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne,
- TD, Depth+1);
+ ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero | APInt(TyBits, 1)) == Mask)
+ if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
return TyBits;
// If the input is known to be positive (the sign bit is known clear),
@@ -1045,8 +1149,8 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
- APInt Mask = APInt::getAllOnesValue(TyBits);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
+ APInt Mask;
+ ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
@@ -1282,23 +1386,21 @@ Value *llvm::isBytewiseValue(Value *V) {
}
}
- // A ConstantArray is splatable if all its members are equal and also
- // splatable.
- if (ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
- if (CA->getNumOperands() == 0)
- return 0;
-
- Value *Val = isBytewiseValue(CA->getOperand(0));
+ // A ConstantDataArray/Vector is splatable if all its members are equal and
+ // also splatable.
+ if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
+ Value *Elt = CA->getElementAsConstant(0);
+ Value *Val = isBytewiseValue(Elt);
if (!Val)
return 0;
- for (unsigned I = 1, E = CA->getNumOperands(); I != E; ++I)
- if (CA->getOperand(I-1) != CA->getOperand(I))
+ for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
+ if (CA->getElementAsConstant(I) != Elt)
return 0;
return Val;
}
-
+
// Conceptually, we could handle things like:
// %a = zext i8 %X to i16
// %b = shl i16 %a, 8
@@ -1395,50 +1497,44 @@ static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
Instruction *InsertBefore) {
// Nothing to index? Just return V then (this is useful at the end of our
- // recursion)
+ // recursion).
if (idx_range.empty())
return V;
- // We have indices, so V should have an indexable type
- assert((V->getType()->isStructTy() || V->getType()->isArrayTy())
- && "Not looking at a struct or array?");
- assert(ExtractValueInst::getIndexedType(V->getType(), idx_range)
- && "Invalid indices for type?");
- CompositeType *PTy = cast<CompositeType>(V->getType());
-
- if (isa<UndefValue>(V))
- return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
- idx_range));
- else if (isa<ConstantAggregateZero>(V))
- return Constant::getNullValue(ExtractValueInst::getIndexedType(PTy,
- idx_range));
- else if (Constant *C = dyn_cast<Constant>(V)) {
- if (isa<ConstantArray>(C) || isa<ConstantStruct>(C))
- // Recursively process this constant
- return FindInsertedValue(C->getOperand(idx_range[0]), idx_range.slice(1),
- InsertBefore);
- } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
+ // We have indices, so V should have an indexable type.
+ assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
+ "Not looking at a struct or array?");
+ assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
+ "Invalid indices for type?");
+
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ C = C->getAggregateElement(idx_range[0]);
+ if (C == 0) return 0;
+ return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
+ }
+
+ if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
// Loop the indices for the insertvalue instruction in parallel with the
// requested indices
const unsigned *req_idx = idx_range.begin();
for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
i != e; ++i, ++req_idx) {
if (req_idx == idx_range.end()) {
- if (InsertBefore)
- // The requested index identifies a part of a nested aggregate. Handle
- // this specially. For example,
- // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
- // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
- // %C = extractvalue {i32, { i32, i32 } } %B, 1
- // This can be changed into
- // %A = insertvalue {i32, i32 } undef, i32 10, 0
- // %C = insertvalue {i32, i32 } %A, i32 11, 1
- // which allows the unused 0,0 element from the nested struct to be
- // removed.
- return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
- InsertBefore);
- else
- // We can't handle this without inserting insertvalues
+ // We can't handle this without inserting insertvalues
+ if (!InsertBefore)
return 0;
+
+ // The requested index identifies a part of a nested aggregate. Handle
+ // this specially. For example,
+ // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
+ // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
+ // %C = extractvalue {i32, { i32, i32 } } %B, 1
+ // This can be changed into
+ // %A = insertvalue {i32, i32 } undef, i32 10, 0
+ // %C = insertvalue {i32, i32 } %A, i32 11, 1
+ // which allows the unused 0,0 element from the nested struct to be
+ // removed.
+ return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
+ InsertBefore);
}
// This insert value inserts something else than what we are looking for.
@@ -1454,7 +1550,9 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
return FindInsertedValue(I->getInsertedValueOperand(),
makeArrayRef(req_idx, idx_range.end()),
InsertBefore);
- } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
+ }
+
+ if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
// If we're extracting a value from an aggregrate that was extracted from
// something else, we can extract from that something else directly instead.
// However, we will need to chain I's indices with the requested indices.
@@ -1486,7 +1584,8 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const TargetData &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr);
- if (PtrOp == 0) return Ptr;
+ if (PtrOp == 0 || Ptr->getType()->isVectorTy())
+ return Ptr;
// Just look through bitcasts.
if (PtrOp->getOpcode() == Instruction::BitCast)
@@ -1521,34 +1620,19 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
}
-/// GetConstantStringInfo - This function computes the length of a
+/// getConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false.
-bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
- uint64_t Offset,
- bool StopAtNul) {
- // If V is NULL then return false;
- if (V == NULL) return false;
-
- // Look through bitcast instructions.
- if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul);
-
- // If the value is not a GEP instruction nor a constant expression with a
- // GEP instruction, then return false because ConstantArray can't occur
- // any other way
- const User *GEP = 0;
- if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
- GEP = GEPI;
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (CE->getOpcode() == Instruction::BitCast)
- return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul);
- if (CE->getOpcode() != Instruction::GetElementPtr)
- return false;
- GEP = CE;
- }
+bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
+ uint64_t Offset, bool TrimAtNul) {
+ assert(V);
+
+ // Look through bitcast instructions and geps.
+ V = V->stripPointerCasts();
- if (GEP) {
+ // If the value is a GEP instructionor constant expression, treat it as an
+ // offset.
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Make sure the GEP has exactly three arguments.
if (GEP->getNumOperands() != 3)
return false;
@@ -1573,51 +1657,48 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
StartIdx = CI->getZExtValue();
else
return false;
- return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset,
- StopAtNul);
+ return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset);
}
-
+
// The GEP instruction, constant or instruction, must reference a global
// variable that is a constant and is initialized. The referenced constant
// initializer is the array that we'll use for optimization.
- const GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
+ const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return false;
- const Constant *GlobalInit = GV->getInitializer();
-
- // Handle the ConstantAggregateZero case
- if (isa<ConstantAggregateZero>(GlobalInit)) {
+
+ // Handle the all-zeros case
+ if (GV->getInitializer()->isNullValue()) {
// This is a degenerate case. The initializer is constant zero so the
// length of the string must be zero.
- Str.clear();
+ Str = "";
return true;
}
// Must be a Constant Array
- const ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (Array == 0 || !Array->getType()->getElementType()->isIntegerTy(8))
+ const ConstantDataArray *Array =
+ dyn_cast<ConstantDataArray>(GV->getInitializer());
+ if (Array == 0 || !Array->isString())
return false;
// Get the number of elements in the array
- uint64_t NumElts = Array->getType()->getNumElements();
-
+ uint64_t NumElts = Array->getType()->getArrayNumElements();
+
+ // Start out with the entire array in the StringRef.
+ Str = Array->getAsString();
+
if (Offset > NumElts)
return false;
- // Traverse the constant array from 'Offset' which is the place the GEP refers
- // to in the array.
- Str.reserve(NumElts-Offset);
- for (unsigned i = Offset; i != NumElts; ++i) {
- const Constant *Elt = Array->getOperand(i);
- const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI) // This array isn't suitable, non-int initializer.
- return false;
- if (StopAtNul && CI->isZero())
- return true; // we found end of string, success!
- Str += (char)CI->getZExtValue();
- }
+ // Skip over 'offset' bytes.
+ Str = Str.substr(Offset);
- // The array isn't null terminated, but maybe this is a memcpy, not a strcpy.
+ if (TrimAtNul) {
+ // Trim off the \0 and anything after it. If the array is not nul
+ // terminated, we just return the whole end of string. The client may know
+ // some other way that the string is length-bound.
+ Str = Str.substr(0, Str.find('\0'));
+ }
return true;
}
@@ -1629,8 +1710,7 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
/// the specified pointer, return 'len+1'. If we can't, return 0.
static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
// Look through noop bitcast instructions.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- return GetStringLengthH(BCI->getOperand(0), PHIs);
+ V = V->stripPointerCasts();
// If this is a PHI node, there are two cases: either we have already seen it
// or we haven't.
@@ -1666,75 +1746,13 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
if (Len1 != Len2) return 0;
return Len1;
}
-
- // If the value is not a GEP instruction nor a constant expression with a
- // GEP instruction, then return unknown.
- User *GEP = 0;
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
- GEP = GEPI;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (CE->getOpcode() != Instruction::GetElementPtr)
- return 0;
- GEP = CE;
- } else {
- return 0;
- }
-
- // Make sure the GEP has exactly three arguments.
- if (GEP->getNumOperands() != 3)
- return 0;
-
- // Check to make sure that the first operand of the GEP is an integer and
- // has value 0 so that we are sure we're indexing into the initializer.
- if (ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
- if (!Idx->isZero())
- return 0;
- } else
- return 0;
-
- // If the second index isn't a ConstantInt, then this is a variable index
- // into the array. If this occurs, we can't say anything meaningful about
- // the string.
- uint64_t StartIdx = 0;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
- StartIdx = CI->getZExtValue();
- else
- return 0;
-
- // The GEP instruction, constant or instruction, must reference a global
- // variable that is a constant and is initialized. The referenced constant
- // initializer is the array that we'll use for optimization.
- GlobalVariable* GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
- if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
- GV->mayBeOverridden())
+
+ // Otherwise, see if we can read the string.
+ StringRef StrData;
+ if (!getConstantStringInfo(V, StrData))
return 0;
- Constant *GlobalInit = GV->getInitializer();
-
- // Handle the ConstantAggregateZero case, which is a degenerate case. The
- // initializer is constant zero so the length of the string must be zero.
- if (isa<ConstantAggregateZero>(GlobalInit))
- return 1; // Len = 0 offset by 1.
-
- // Must be a Constant Array
- ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (!Array || !Array->getType()->getElementType()->isIntegerTy(8))
- return false;
-
- // Get the number of elements in the array
- uint64_t NumElts = Array->getType()->getNumElements();
-
- // Traverse the constant array from StartIdx (derived above) which is
- // the place the GEP refers to in the array.
- for (unsigned i = StartIdx; i != NumElts; ++i) {
- Constant *Elt = Array->getOperand(i);
- ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- if (!CI) // This array isn't suitable, non-int initializer.
- return 0;
- if (CI->isZero())
- return i-StartIdx+1; // We found end of string, success!
- }
- return 0; // The array isn't null terminated, conservatively return 'unknown'.
+ return StrData.size()+1;
}
/// GetStringLength - If we can compute the length of the string pointed to by
@@ -1793,3 +1811,94 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
}
return true;
}
+
+bool llvm::isSafeToSpeculativelyExecute(const Value *V,
+ const TargetData *TD) {
+ const Operator *Inst = dyn_cast<Operator>(V);
+ if (!Inst)
+ return false;
+
+ for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
+ if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
+ if (C->canTrap())
+ return false;
+
+ switch (Inst->getOpcode()) {
+ default:
+ return true;
+ case Instruction::UDiv:
+ case Instruction::URem:
+ // x / y is undefined if y == 0, but calcuations like x / 3 are safe.
+ return isKnownNonZero(Inst->getOperand(1), TD);
+ case Instruction::SDiv:
+ case Instruction::SRem: {
+ Value *Op = Inst->getOperand(1);
+ // x / y is undefined if y == 0
+ if (!isKnownNonZero(Op, TD))
+ return false;
+ // x / y might be undefined if y == -1
+ unsigned BitWidth = getBitWidth(Op->getType(), TD);
+ if (BitWidth == 0)
+ return false;
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ ComputeMaskedBits(Op, KnownZero, KnownOne, TD);
+ return !!KnownZero;
+ }
+ case Instruction::Load: {
+ const LoadInst *LI = cast<LoadInst>(Inst);
+ if (!LI->isUnordered())
+ return false;
+ return LI->getPointerOperand()->isDereferenceablePointer();
+ }
+ case Instruction::Call: {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ switch (II->getIntrinsicID()) {
+ // These synthetic intrinsics have no side-effects, and just mark
+ // information about their operands.
+ // FIXME: There are other no-op synthetic instructions that potentially
+ // should be considered at least *safe* to speculate...
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ return true;
+
+ case Intrinsic::bswap:
+ case Intrinsic::ctlz:
+ case Intrinsic::ctpop:
+ case Intrinsic::cttz:
+ case Intrinsic::objectsize:
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::usub_with_overflow:
+ return true;
+ // TODO: some fp intrinsics are marked as having the same error handling
+ // as libm. They're safe to speculate when they won't error.
+ // TODO: are convert_{from,to}_fp16 safe?
+ // TODO: can we list target-specific intrinsics here?
+ default: break;
+ }
+ }
+ return false; // The called function could have undefined behavior or
+ // side-effects, even if marked readnone nounwind.
+ }
+ case Instruction::VAArg:
+ case Instruction::Alloca:
+ case Instruction::Invoke:
+ case Instruction::PHI:
+ case Instruction::Store:
+ case Instruction::Ret:
+ case Instruction::Br:
+ case Instruction::IndirectBr:
+ case Instruction::Switch:
+ case Instruction::Unreachable:
+ case Instruction::Fence:
+ case Instruction::LandingPad:
+ case Instruction::AtomicRMW:
+ case Instruction::AtomicCmpXchg:
+ case Instruction::Resume:
+ return false; // Misc instructions which have effects
+ }
+}
diff --git a/contrib/llvm/lib/Archive/ArchiveReader.cpp b/contrib/llvm/lib/Archive/ArchiveReader.cpp
index eef6fe0..68873e2 100644
--- a/contrib/llvm/lib/Archive/ArchiveReader.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveReader.cpp
@@ -12,9 +12,11 @@
//===----------------------------------------------------------------------===//
#include "ArchiveInternals.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Module.h"
+#include <cstdio>
#include <cstdlib>
#include <memory>
using namespace llvm;
@@ -504,7 +506,7 @@ Archive::findModuleDefiningSymbol(const std::string& symbol,
// Modules that define those symbols.
bool
Archive::findModulesDefiningSymbols(std::set<std::string>& symbols,
- std::set<Module*>& result,
+ SmallVectorImpl<Module*>& result,
std::string* error) {
if (!mapfile || !base) {
if (error)
@@ -569,21 +571,26 @@ Archive::findModulesDefiningSymbols(std::set<std::string>& symbols,
// At this point we have a valid symbol table (one way or another) so we
// just use it to quickly find the symbols requested.
+ SmallPtrSet<Module*, 16> Added;
for (std::set<std::string>::iterator I=symbols.begin(),
- E=symbols.end(); I != E;) {
+ Next = I,
+ E=symbols.end(); I != E; I = Next) {
+ // Increment Next before we invalidate it.
+ ++Next;
+
// See if this symbol exists
Module* m = findModuleDefiningSymbol(*I,error);
- if (m) {
- // The symbol exists, insert the Module into our result, duplicates will
- // be ignored.
- result.insert(m);
-
- // Remove the symbol now that its been resolved, being careful to
- // post-increment the iterator.
- symbols.erase(I++);
- } else {
- ++I;
- }
+ if (!m)
+ continue;
+ bool NewMember = Added.insert(m);
+ if (!NewMember)
+ continue;
+
+ // The symbol exists, insert the Module into our result.
+ result.push_back(m);
+
+ // Remove the symbol now that its been resolved.
+ symbols.erase(I);
}
return true;
}
diff --git a/contrib/llvm/lib/Archive/ArchiveWriter.cpp b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
index 8fcc7aa..9ef2943 100644
--- a/contrib/llvm/lib/Archive/ArchiveWriter.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
@@ -182,11 +182,11 @@ Archive::addFileBefore(const sys::Path& filePath, iterator where,
if (hasSlash || filePath.str().length() > 15)
flags |= ArchiveMember::HasLongFilenameFlag;
- sys::LLVMFileType type;
+ sys::fs::file_magic type;
if (sys::fs::identify_magic(mbr->path.str(), type))
- type = sys::Unknown_FileType;
+ type = sys::fs::file_magic::unknown;
switch (type) {
- case sys::Bitcode_FileType:
+ case sys::fs::file_magic::bitcode:
flags |= ArchiveMember::BitcodeFlag;
break;
default:
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index d0dd986..8818168 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -29,7 +29,7 @@
using namespace llvm;
bool LLLexer::Error(LocTy ErrorLoc, const Twine &Msg) const {
- ErrorInfo = SM.GetMessage(ErrorLoc, Msg, "error");
+ ErrorInfo = SM.GetMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
return true;
}
@@ -55,18 +55,22 @@ uint64_t LLLexer::atoull(const char *Buffer, const char *End) {
return Result;
}
+static char parseHexChar(char C) {
+ if (C >= '0' && C <= '9')
+ return C-'0';
+ if (C >= 'A' && C <= 'F')
+ return C-'A'+10;
+ if (C >= 'a' && C <= 'f')
+ return C-'a'+10;
+ return 0;
+}
+
uint64_t LLLexer::HexIntToVal(const char *Buffer, const char *End) {
uint64_t Result = 0;
for (; Buffer != End; ++Buffer) {
uint64_t OldRes = Result;
Result *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Result += C-'0';
- else if (C >= 'A' && C <= 'F')
- Result += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Result += C-'a'+10;
+ Result += parseHexChar(*Buffer);
if (Result < OldRes) { // Uh, oh, overflow detected!!!
Error("constant bigger than 64 bits detected!");
@@ -82,24 +86,12 @@ void LLLexer::HexToIntPair(const char *Buffer, const char *End,
for (int i=0; i<16; i++, Buffer++) {
assert(Buffer != End);
Pair[0] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[0] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[0] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[0] += C-'a'+10;
+ Pair[0] += parseHexChar(*Buffer);
}
Pair[1] = 0;
for (int i=0; i<16 && Buffer != End; i++, Buffer++) {
Pair[1] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[1] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[1] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[1] += C-'a'+10;
+ Pair[1] += parseHexChar(*Buffer);
}
if (Buffer != End)
Error("constant bigger than 128 bits detected!");
@@ -113,24 +105,12 @@ void LLLexer::FP80HexToIntPair(const char *Buffer, const char *End,
for (int i=0; i<4 && Buffer != End; i++, Buffer++) {
assert(Buffer != End);
Pair[1] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[1] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[1] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[1] += C-'a'+10;
+ Pair[1] += parseHexChar(*Buffer);
}
Pair[0] = 0;
for (int i=0; i<16; i++, Buffer++) {
Pair[0] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[0] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[0] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[0] += C-'a'+10;
+ Pair[0] += parseHexChar(*Buffer);
}
if (Buffer != End)
Error("constant bigger than 128 bits detected!");
@@ -149,9 +129,7 @@ static void UnEscapeLexed(std::string &Str) {
*BOut++ = '\\'; // Two \ becomes one
BIn += 2;
} else if (BIn < EndBuffer-2 && isxdigit(BIn[1]) && isxdigit(BIn[2])) {
- char Tmp = BIn[3]; BIn[3] = 0; // Terminate string
- *BOut = (char)strtol(BIn+1, 0, 16); // Convert to number
- BIn[3] = Tmp; // Restore character
+ *BOut = parseHexChar(BIn[1]) * 16 + parseHexChar(BIn[2]);
BIn += 3; // Skip over handled chars
++BOut;
} else {
@@ -503,6 +481,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(tail);
KEYWORD(target);
KEYWORD(triple);
+ KEYWORD(unwind);
KEYWORD(deplibs);
KEYWORD(datalayout);
KEYWORD(volatile);
@@ -570,6 +549,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(noimplicitfloat);
KEYWORD(naked);
KEYWORD(nonlazybind);
+ KEYWORD(address_safety);
KEYWORD(type);
KEYWORD(opaque);
@@ -596,6 +576,7 @@ lltok::Kind LLLexer::LexIdentifier() {
if (Len == strlen(STR) && !memcmp(StartChar, STR, strlen(STR))) { \
TyVal = LLVMTY; return lltok::Type; }
TYPEKEYWORD("void", Type::getVoidTy(Context));
+ TYPEKEYWORD("half", Type::getHalfTy(Context));
TYPEKEYWORD("float", Type::getFloatTy(Context));
TYPEKEYWORD("double", Type::getDoubleTy(Context));
TYPEKEYWORD("x86_fp80", Type::getX86_FP80Ty(Context));
@@ -642,7 +623,6 @@ lltok::Kind LLLexer::LexIdentifier() {
INSTKEYWORD(indirectbr, IndirectBr);
INSTKEYWORD(invoke, Invoke);
INSTKEYWORD(resume, Resume);
- INSTKEYWORD(unwind, Unwind);
INSTKEYWORD(unreachable, Unreachable);
INSTKEYWORD(alloca, Alloca);
@@ -715,7 +695,7 @@ lltok::Kind LLLexer::Lex0x() {
if (Kind == 'J') {
// HexFPConstant - Floating point constant represented in IEEE format as a
// hexadecimal number for when exponential notation is not precise enough.
- // Float and double only.
+ // Half, Float, and double only.
APFloatVal = APFloat(BitsToDouble(HexIntToVal(TokStart+2, CurPtr)));
return lltok::APFloat;
}
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.h b/contrib/llvm/lib/AsmParser/LLLexer.h
index 33b9135..09aea5b 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.h
+++ b/contrib/llvm/lib/AsmParser/LLLexer.h
@@ -42,7 +42,6 @@ namespace llvm {
APFloat APFloatVal;
APSInt APSIntVal;
- std::string TheError;
public:
explicit LLLexer(MemoryBuffer *StartBuf, SourceMgr &SM, SMDiagnostic &,
LLVMContext &C);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index cafaab0..068be3d 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -120,11 +120,6 @@ bool LLParser::ValidateEndOfModule() {
for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; )
UpgradeCallsToIntrinsic(FI++); // must be post-increment, as we remove
- // Upgrade to new EH scheme. N.B. This will go away in 3.1.
- UpgradeExceptionHandling(M);
-
- // Check debug info intrinsics.
- CheckDebugInfoIntrinsics(M);
return false;
}
@@ -879,7 +874,7 @@ bool LLParser::ParseOptionalAddrSpace(unsigned &AddrSpace) {
/// ParseOptionalAttrs - Parse a potentially empty attribute list. AttrKind
/// indicates what kind of attribute list this is: 0: function arg, 1: result,
/// 2: function attr.
-bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
+bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
Attrs = Attribute::None;
LocTy AttrLoc = Lex.getLoc();
@@ -924,6 +919,7 @@ bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
case lltok::kw_naked: Attrs |= Attribute::Naked; break;
case lltok::kw_nonlazybind: Attrs |= Attribute::NonLazyBind; break;
+ case lltok::kw_address_safety: Attrs |= Attribute::AddressSafety; break;
case lltok::kw_alignstack: {
unsigned Alignment;
@@ -1047,13 +1043,11 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
case lltok::kw_cc: {
unsigned ArbitraryCC;
Lex.Lex();
- if (ParseUInt32(ArbitraryCC)) {
+ if (ParseUInt32(ArbitraryCC))
return true;
- } else
- CC = static_cast<CallingConv::ID>(ArbitraryCC);
- return false;
+ CC = static_cast<CallingConv::ID>(ArbitraryCC);
+ return false;
}
- break;
}
Lex.Lex();
@@ -1069,7 +1063,7 @@ bool LLParser::ParseInstructionMetadata(Instruction *Inst,
return TokError("expected metadata after comma");
std::string Name = Lex.getStrVal();
- unsigned MDK = M->getMDKindID(Name.c_str());
+ unsigned MDK = M->getMDKindID(Name);
Lex.Lex();
MDNode *Node;
@@ -1358,8 +1352,8 @@ bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
// Parse the argument.
LocTy ArgLoc;
Type *ArgTy = 0;
- unsigned ArgAttrs1 = Attribute::None;
- unsigned ArgAttrs2 = Attribute::None;
+ Attributes ArgAttrs1;
+ Attributes ArgAttrs2;
Value *V;
if (ParseType(ArgTy, ArgLoc))
return true;
@@ -1399,7 +1393,7 @@ bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
} else {
LocTy TypeLoc = Lex.getLoc();
Type *ArgTy = 0;
- unsigned Attrs;
+ Attributes Attrs;
std::string Name;
if (ParseType(ArgTy) ||
@@ -1466,7 +1460,7 @@ bool LLParser::ParseFunctionType(Type *&Result) {
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
if (!ArgList[i].Name.empty())
return Error(ArgList[i].Loc, "argument name invalid in function type");
- if (ArgList[i].Attrs != 0)
+ if (ArgList[i].Attrs)
return Error(ArgList[i].Loc,
"argument attributes invalid in function type");
}
@@ -1612,7 +1606,8 @@ bool LLParser::ParseArrayVectorType(Type *&Result, bool isVector) {
if ((unsigned)Size != Size)
return Error(SizeLoc, "size too large for vector");
if (!VectorType::isValidElementType(EltTy))
- return Error(TypeLoc, "vector element type must be fp or integer");
+ return Error(TypeLoc,
+ "vector element type must be fp, integer or a pointer to these types");
Result = VectorType::get(EltTy, unsigned(Size));
} else {
if (!ArrayType::isValidElementType(EltTy))
@@ -1971,9 +1966,10 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
return Error(ID.Loc, "constant vector must not be empty");
if (!Elts[0]->getType()->isIntegerTy() &&
- !Elts[0]->getType()->isFloatingPointTy())
+ !Elts[0]->getType()->isFloatingPointTy() &&
+ !Elts[0]->getType()->isPointerTy())
return Error(FirstEltLoc,
- "vector elements must have integer or floating point type");
+ "vector elements must have integer, pointer or floating point type");
// Verify that all the vector elements have the same type.
for (unsigned i = 1, e = Elts.size(); i != e; ++i)
@@ -2022,7 +2018,8 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
}
case lltok::kw_c: // c "foo"
Lex.Lex();
- ID.ConstantVal = ConstantArray::get(Context, Lex.getStrVal(), false);
+ ID.ConstantVal = ConstantDataArray::getString(Context, Lex.getStrVal(),
+ false);
if (ParseToken(lltok::StringConstant, "expected string")) return true;
ID.Kind = ValID::t_Constant;
return false;
@@ -2165,7 +2162,7 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
} else {
assert(Opc == Instruction::ICmp && "Unexpected opcode for CmpInst!");
if (!Val0->getType()->isIntOrIntVectorTy() &&
- !Val0->getType()->isPointerTy())
+ !Val0->getType()->getScalarType()->isPointerTy())
return Error(ID.Loc, "icmp requires pointer or integer operands");
ID.ConstantVal = ConstantExpr::getICmp(Pred, Val0, Val1);
}
@@ -2299,7 +2296,8 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
return true;
if (Opc == Instruction::GetElementPtr) {
- if (Elts.size() == 0 || !Elts[0]->getType()->isPointerTy())
+ if (Elts.size() == 0 ||
+ !Elts[0]->getType()->getScalarType()->isPointerTy())
return Error(ID.Loc, "getelementptr requires pointer operand");
ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
@@ -2440,7 +2438,6 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
return Error(ID.Loc, "functions are not values, refer to them as pointers");
switch (ID.Kind) {
- default: llvm_unreachable("Unknown ValID!");
case ValID::t_LocalID:
if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
V = PFS->GetVal(ID.UIntVal, Ty, ID.Loc);
@@ -2485,13 +2482,16 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
!ConstantFP::isValueValidForType(Ty, ID.APFloatVal))
return Error(ID.Loc, "floating point constant invalid for type");
- // The lexer has no type info, so builds all float and double FP constants
- // as double. Fix this here. Long double does not need this.
- if (&ID.APFloatVal.getSemantics() == &APFloat::IEEEdouble &&
- Ty->isFloatTy()) {
+ // The lexer has no type info, so builds all half, float, and double FP
+ // constants as double. Fix this here. Long double does not need this.
+ if (&ID.APFloatVal.getSemantics() == &APFloat::IEEEdouble) {
bool Ignored;
- ID.APFloatVal.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven,
- &Ignored);
+ if (Ty->isHalfTy())
+ ID.APFloatVal.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven,
+ &Ignored);
+ else if (Ty->isFloatTy())
+ ID.APFloatVal.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven,
+ &Ignored);
}
V = ConstantFP::get(Context, ID.APFloatVal);
@@ -2549,6 +2549,7 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
return Error(ID.Loc, "constant expression type mismatch");
return false;
}
+ llvm_unreachable("Invalid ValID");
}
bool LLParser::ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS) {
@@ -2585,7 +2586,8 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
LocTy LinkageLoc = Lex.getLoc();
unsigned Linkage;
- unsigned Visibility, RetAttrs;
+ unsigned Visibility;
+ Attributes RetAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc = Lex.getLoc();
@@ -2649,7 +2651,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
SmallVector<ArgInfo, 8> ArgList;
bool isVarArg;
- unsigned FuncAttrs;
+ Attributes FuncAttrs;
std::string Section;
unsigned Alignment;
std::string GC;
@@ -2835,7 +2837,7 @@ bool LLParser::ParseBasicBlock(PerFunctionState &PFS) {
}
switch (ParseInstruction(Inst, BB, PFS)) {
- default: assert(0 && "Unknown ParseInstruction result!");
+ default: llvm_unreachable("Unknown ParseInstruction result!");
case InstError: return true;
case InstNormal:
BB->getInstList().push_back(Inst);
@@ -2881,7 +2883,6 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
switch (Token) {
default: return Error(Loc, "expected instruction opcode");
// Terminator Instructions.
- case lltok::kw_unwind: Inst = new UnwindInst(Context); return false;
case lltok::kw_unreachable: Inst = new UnreachableInst(Context); return false;
case lltok::kw_ret: return ParseRet(Inst, BB, PFS);
case lltok::kw_br: return ParseBr(Inst, PFS);
@@ -2953,19 +2954,11 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
- case lltok::kw_load: return ParseLoad(Inst, PFS, false);
- case lltok::kw_store: return ParseStore(Inst, PFS, false);
+ case lltok::kw_load: return ParseLoad(Inst, PFS);
+ case lltok::kw_store: return ParseStore(Inst, PFS);
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS);
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS);
case lltok::kw_fence: return ParseFence(Inst, PFS);
- case lltok::kw_volatile:
- // For compatibility; canonical location is after load
- if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, true);
- else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, true);
- else
- return TokError("expected 'load' or 'store'");
case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
case lltok::kw_extractvalue: return ParseExtractValue(Inst, PFS);
case lltok::kw_insertvalue: return ParseInsertValue(Inst, PFS);
@@ -3169,7 +3162,7 @@ bool LLParser::ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
/// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
LocTy CallLoc = Lex.getLoc();
- unsigned RetAttrs, FnAttrs;
+ Attributes RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3342,7 +3335,7 @@ bool LLParser::ParseCompare(Instruction *&Inst, PerFunctionState &PFS,
} else {
assert(Opc == Instruction::ICmp && "Unknown opcode for CmpInst!");
if (!LHS->getType()->isIntOrIntVectorTy() &&
- !LHS->getType()->isPointerTy())
+ !LHS->getType()->getScalarType()->isPointerTy())
return Error(Loc, "icmp requires integer operands");
Inst = new ICmpInst(CmpInst::Predicate(Pred), LHS, RHS);
}
@@ -3462,7 +3455,7 @@ bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) {
return true;
if (!ShuffleVectorInst::isValidOperands(Op0, Op1, Op2))
- return Error(Loc, "invalid extractelement operands");
+ return Error(Loc, "invalid shufflevector operands");
Inst = new ShuffleVectorInst(Op0, Op1, Op2);
return false;
@@ -3568,7 +3561,7 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
/// ParameterList OptionalAttrs
bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
bool isTail) {
- unsigned RetAttrs, FnAttrs;
+ Attributes RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3689,10 +3682,7 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)?
/// ::= 'load' 'atomic' 'volatile'? TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
-/// Compatibility:
-/// ::= 'volatile' 'load' TypeAndValue (',' 'align' i32)?
-int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
Value *Val; LocTy Loc;
unsigned Alignment = 0;
bool AteExtraComma = false;
@@ -3701,15 +3691,12 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
- if (isVolatile)
- return TokError("mixing atomic with old volatile placement");
isAtomic = true;
Lex.Lex();
}
+ bool isVolatile = false;
if (Lex.getKind() == lltok::kw_volatile) {
- if (isVolatile)
- return TokError("duplicate volatile before and after store");
isVolatile = true;
Lex.Lex();
}
@@ -3736,10 +3723,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
/// ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)?
/// ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
-/// Compatibility:
-/// ::= 'volatile' 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
-int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
+int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0;
bool AteExtraComma = false;
@@ -3748,15 +3732,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
- if (isVolatile)
- return TokError("mixing atomic with old volatile placement");
isAtomic = true;
Lex.Lex();
}
+ bool isVolatile = false;
if (Lex.getKind() == lltok::kw_volatile) {
- if (isVolatile)
- return TokError("duplicate volatile before and after store");
isVolatile = true;
Lex.Lex();
}
@@ -3902,13 +3883,15 @@ int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
/// ParseGetElementPtr
/// ::= 'getelementptr' 'inbounds'? TypeAndValue (',' TypeAndValue)*
int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Ptr, *Val; LocTy Loc, EltLoc;
+ Value *Ptr = 0;
+ Value *Val = 0;
+ LocTy Loc, EltLoc;
bool InBounds = EatIfPresent(lltok::kw_inbounds);
if (ParseTypeAndValue(Ptr, Loc, PFS)) return true;
- if (!Ptr->getType()->isPointerTy())
+ if (!Ptr->getType()->getScalarType()->isPointerTy())
return Error(Loc, "base of getelementptr must be a pointer");
SmallVector<Value*, 16> Indices;
@@ -3919,11 +3902,23 @@ int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
break;
}
if (ParseTypeAndValue(Val, EltLoc, PFS)) return true;
- if (!Val->getType()->isIntegerTy())
+ if (!Val->getType()->getScalarType()->isIntegerTy())
return Error(EltLoc, "getelementptr index must be an integer");
+ if (Val->getType()->isVectorTy() != Ptr->getType()->isVectorTy())
+ return Error(EltLoc, "getelementptr index type missmatch");
+ if (Val->getType()->isVectorTy()) {
+ unsigned ValNumEl = cast<VectorType>(Val->getType())->getNumElements();
+ unsigned PtrNumEl = cast<VectorType>(Ptr->getType())->getNumElements();
+ if (ValNumEl != PtrNumEl)
+ return Error(EltLoc,
+ "getelementptr vector index has a wrong number of elements");
+ }
Indices.push_back(Val);
}
+ if (Val && Val->getType()->isVectorTy() && Indices.size() != 1)
+ return Error(EltLoc, "vector getelementptrs must have a single index");
+
if (!GetElementPtrInst::getIndexedType(Ptr->getType(), Indices))
return Error(Loc, "invalid getelementptr indices");
Inst = GetElementPtrInst::Create(Ptr, Indices);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index cbc3c23..dda8808 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -15,6 +15,7 @@
#define LLVM_ASMPARSER_LLPARSER_H
#include "LLLexer.h"
+#include "llvm/Attributes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Type.h"
@@ -171,7 +172,7 @@ namespace llvm {
return ParseUInt32(Val);
}
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
- bool ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind);
+ bool ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind);
bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
bool ParseOptionalLinkage(unsigned &Linkage) {
bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
@@ -304,8 +305,8 @@ namespace llvm {
struct ParamInfo {
LocTy Loc;
Value *V;
- unsigned Attrs;
- ParamInfo(LocTy loc, Value *v, unsigned attrs)
+ Attributes Attrs;
+ ParamInfo(LocTy loc, Value *v, Attributes attrs)
: Loc(loc), V(v), Attrs(attrs) {}
};
bool ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
@@ -325,9 +326,9 @@ namespace llvm {
struct ArgInfo {
LocTy Loc;
Type *Ty;
- unsigned Attrs;
+ Attributes Attrs;
std::string Name;
- ArgInfo(LocTy L, Type *ty, unsigned Attr, const std::string &N)
+ ArgInfo(LocTy L, Type *ty, Attributes Attr, const std::string &N)
: Loc(L), Ty(ty), Attrs(Attr), Name(N) {}
};
bool ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList, bool &isVarArg);
@@ -363,8 +364,8 @@ namespace llvm {
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
- int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
+ int ParseLoad(Instruction *&I, PerFunctionState &PFS);
+ int ParseStore(Instruction *&I, PerFunctionState &PFS);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index 8f16772..adf5d4f 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -50,6 +50,7 @@ namespace lltok {
kw_tail,
kw_target,
kw_triple,
+ kw_unwind,
kw_deplibs,
kw_datalayout,
kw_volatile,
@@ -102,6 +103,7 @@ namespace lltok {
kw_noimplicitfloat,
kw_naked,
kw_nonlazybind,
+ kw_address_safety,
kw_type,
kw_opaque,
@@ -126,7 +128,7 @@ namespace lltok {
kw_landingpad, kw_personality, kw_cleanup, kw_catch, kw_filter,
- kw_ret, kw_br, kw_switch, kw_indirectbr, kw_invoke, kw_unwind, kw_resume,
+ kw_ret, kw_br, kw_switch, kw_indirectbr, kw_invoke, kw_resume,
kw_unreachable,
kw_alloca, kw_load, kw_store, kw_fence, kw_cmpxchg, kw_atomicrmw,
diff --git a/contrib/llvm/lib/AsmParser/Parser.cpp b/contrib/llvm/lib/AsmParser/Parser.cpp
index 59fb471..21b7fd4 100644
--- a/contrib/llvm/lib/AsmParser/Parser.cpp
+++ b/contrib/llvm/lib/AsmParser/Parser.cpp
@@ -44,7 +44,7 @@ Module *llvm::ParseAssemblyFile(const std::string &Filename, SMDiagnostic &Err,
LLVMContext &Context) {
OwningPtr<MemoryBuffer> File;
if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), File)) {
- Err = SMDiagnostic(Filename,
+ Err = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + ec.message());
return 0;
}
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 46565f3..e399040 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -22,11 +22,19 @@
#include "llvm/AutoUpgrade.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataStream.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/OperandTraits.h"
using namespace llvm;
+void BitcodeReader::materializeForwardReferencedFunctions() {
+ while (!BlockAddrFwdRefs.empty()) {
+ Function *F = BlockAddrFwdRefs.begin()->first;
+ F->Materialize();
+ }
+}
+
void BitcodeReader::FreeState() {
if (BufferOwned)
delete Buffer;
@@ -394,7 +402,7 @@ Type *BitcodeReader::getTypeByID(unsigned ID) {
// The type table size is always specified correctly.
if (ID >= TypeList.size())
return 0;
-
+
if (Type *Ty = TypeList[ID])
return Ty;
@@ -403,14 +411,6 @@ Type *BitcodeReader::getTypeByID(unsigned ID) {
return TypeList[ID] = StructType::create(Context);
}
-/// FIXME: Remove in LLVM 3.1, only used by ParseOldTypeTable.
-Type *BitcodeReader::getTypeByIDOrNull(unsigned ID) {
- if (ID >= TypeList.size())
- TypeList.resize(ID+1);
-
- return TypeList[ID];
-}
-
//===----------------------------------------------------------------------===//
// Functions for parsing blocks from the bitcode file
@@ -462,8 +462,8 @@ bool BitcodeReader::ParseAttributeBlock() {
// If Function attributes are using index 0 then transfer them
// to index ~0. Index 0 is used for return value attributes but used to be
// used for function attributes.
- Attributes RetAttribute = Attribute::None;
- Attributes FnAttribute = Attribute::None;
+ Attributes RetAttribute;
+ Attributes FnAttribute;
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
// FIXME: remove in LLVM 3.0
// The alignment is stored as a 16-bit raw value from bits 31--16.
@@ -473,23 +473,24 @@ bool BitcodeReader::ParseAttributeBlock() {
if (Alignment && !isPowerOf2_32(Alignment))
return Error("Alignment is not a power of two.");
- Attributes ReconstitutedAttr = Record[i+1] & 0xffff;
+ Attributes ReconstitutedAttr(Record[i+1] & 0xffff);
if (Alignment)
ReconstitutedAttr |= Attribute::constructAlignmentFromInt(Alignment);
- ReconstitutedAttr |= (Record[i+1] & (0xffffull << 32)) >> 11;
- Record[i+1] = ReconstitutedAttr;
+ ReconstitutedAttr |=
+ Attributes((Record[i+1] & (0xffffull << 32)) >> 11);
+ Record[i+1] = ReconstitutedAttr.Raw();
if (Record[i] == 0)
- RetAttribute = Record[i+1];
+ RetAttribute = ReconstitutedAttr;
else if (Record[i] == ~0U)
- FnAttribute = Record[i+1];
+ FnAttribute = ReconstitutedAttr;
}
- unsigned OldRetAttrs = (Attribute::NoUnwind|Attribute::NoReturn|
+ Attributes OldRetAttrs = (Attribute::NoUnwind|Attribute::NoReturn|
Attribute::ReadOnly|Attribute::ReadNone);
if (FnAttribute == Attribute::None && RetAttribute != Attribute::None &&
- (RetAttribute & OldRetAttrs) != 0) {
+ (RetAttribute & OldRetAttrs)) {
if (FnAttribute == Attribute::None) { // add a slot so they get added.
Record.push_back(~0U);
Record.push_back(0);
@@ -506,8 +507,9 @@ bool BitcodeReader::ParseAttributeBlock() {
} else if (Record[i] == ~0U) {
if (FnAttribute != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(~0U, FnAttribute));
- } else if (Record[i+1] != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(Record[i], Record[i+1]));
+ } else if (Attributes(Record[i+1]) != Attribute::None)
+ Attrs.push_back(AttributeWithIndex::get(Record[i],
+ Attributes(Record[i+1])));
}
MAttributes.push_back(AttrListPtr::get(Attrs.begin(), Attrs.end()));
@@ -521,7 +523,7 @@ bool BitcodeReader::ParseAttributeBlock() {
bool BitcodeReader::ParseTypeTable() {
if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW))
return Error("Malformed block record");
-
+
return ParseTypeTableBody();
}
@@ -533,7 +535,7 @@ bool BitcodeReader::ParseTypeTableBody() {
unsigned NumRecords = 0;
SmallString<64> TypeName;
-
+
// Read all the records for this type table.
while (1) {
unsigned Code = Stream.ReadCode();
@@ -573,6 +575,9 @@ bool BitcodeReader::ParseTypeTableBody() {
case bitc::TYPE_CODE_VOID: // VOID
ResultTy = Type::getVoidTy(Context);
break;
+ case bitc::TYPE_CODE_HALF: // HALF
+ ResultTy = Type::getHalfTy(Context);
+ break;
case bitc::TYPE_CODE_FLOAT: // FLOAT
ResultTy = Type::getFloatTy(Context);
break;
@@ -615,12 +620,12 @@ bool BitcodeReader::ParseTypeTableBody() {
ResultTy = PointerType::get(ResultTy, AddressSpace);
break;
}
- case bitc::TYPE_CODE_FUNCTION: {
+ case bitc::TYPE_CODE_FUNCTION_OLD: {
// FIXME: attrid is dead, remove it in LLVM 3.0
// FUNCTION: [vararg, attrid, retty, paramty x N]
if (Record.size() < 3)
return Error("Invalid FUNCTION type record");
- std::vector<Type*> ArgTys;
+ SmallVector<Type*, 8> ArgTys;
for (unsigned i = 3, e = Record.size(); i != e; ++i) {
if (Type *T = getTypeByID(Record[i]))
ArgTys.push_back(T);
@@ -635,10 +640,29 @@ bool BitcodeReader::ParseTypeTableBody() {
ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
break;
}
+ case bitc::TYPE_CODE_FUNCTION: {
+ // FUNCTION: [vararg, retty, paramty x N]
+ if (Record.size() < 2)
+ return Error("Invalid FUNCTION type record");
+ SmallVector<Type*, 8> ArgTys;
+ for (unsigned i = 2, e = Record.size(); i != e; ++i) {
+ if (Type *T = getTypeByID(Record[i]))
+ ArgTys.push_back(T);
+ else
+ break;
+ }
+
+ ResultTy = getTypeByID(Record[1]);
+ if (ResultTy == 0 || ArgTys.size() < Record.size()-2)
+ return Error("invalid type in function type");
+
+ ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
+ break;
+ }
case bitc::TYPE_CODE_STRUCT_ANON: { // STRUCT: [ispacked, eltty x N]
if (Record.size() < 1)
return Error("Invalid STRUCT type record");
- std::vector<Type*> EltTys;
+ SmallVector<Type*, 8> EltTys;
for (unsigned i = 1, e = Record.size(); i != e; ++i) {
if (Type *T = getTypeByID(Record[i]))
EltTys.push_back(T);
@@ -728,247 +752,6 @@ bool BitcodeReader::ParseTypeTableBody() {
}
}
-// FIXME: Remove in LLVM 3.1
-bool BitcodeReader::ParseOldTypeTable() {
- if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_OLD))
- return Error("Malformed block record");
-
- if (!TypeList.empty())
- return Error("Multiple TYPE_BLOCKs found!");
-
-
- // While horrible, we have no good ordering of types in the bc file. Just
- // iteratively parse types out of the bc file in multiple passes until we get
- // them all. Do this by saving a cursor for the start of the type block.
- BitstreamCursor StartOfTypeBlockCursor(Stream);
-
- unsigned NumTypesRead = 0;
-
- SmallVector<uint64_t, 64> Record;
-RestartScan:
- unsigned NextTypeID = 0;
- bool ReadAnyTypes = false;
-
- // Read all the records for this type table.
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (NextTypeID != TypeList.size())
- return Error("Invalid type forward reference in TYPE_BLOCK_ID_OLD");
-
- // If we haven't read all of the types yet, iterate again.
- if (NumTypesRead != TypeList.size()) {
- // If we didn't successfully read any types in this pass, then we must
- // have an unhandled forward reference.
- if (!ReadAnyTypes)
- return Error("Obsolete bitcode contains unhandled recursive type");
-
- Stream = StartOfTypeBlockCursor;
- goto RestartScan;
- }
-
- if (Stream.ReadBlockEnd())
- return Error("Error at end of type table block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- Type *ResultTy = 0;
- switch (Stream.ReadRecord(Code, Record)) {
- default: return Error("unknown type in type table");
- case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries]
- // TYPE_CODE_NUMENTRY contains a count of the number of types in the
- // type list. This allows us to reserve space.
- if (Record.size() < 1)
- return Error("Invalid TYPE_CODE_NUMENTRY record");
- TypeList.resize(Record[0]);
- continue;
- case bitc::TYPE_CODE_VOID: // VOID
- ResultTy = Type::getVoidTy(Context);
- break;
- case bitc::TYPE_CODE_FLOAT: // FLOAT
- ResultTy = Type::getFloatTy(Context);
- break;
- case bitc::TYPE_CODE_DOUBLE: // DOUBLE
- ResultTy = Type::getDoubleTy(Context);
- break;
- case bitc::TYPE_CODE_X86_FP80: // X86_FP80
- ResultTy = Type::getX86_FP80Ty(Context);
- break;
- case bitc::TYPE_CODE_FP128: // FP128
- ResultTy = Type::getFP128Ty(Context);
- break;
- case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128
- ResultTy = Type::getPPC_FP128Ty(Context);
- break;
- case bitc::TYPE_CODE_LABEL: // LABEL
- ResultTy = Type::getLabelTy(Context);
- break;
- case bitc::TYPE_CODE_METADATA: // METADATA
- ResultTy = Type::getMetadataTy(Context);
- break;
- case bitc::TYPE_CODE_X86_MMX: // X86_MMX
- ResultTy = Type::getX86_MMXTy(Context);
- break;
- case bitc::TYPE_CODE_INTEGER: // INTEGER: [width]
- if (Record.size() < 1)
- return Error("Invalid Integer type record");
- ResultTy = IntegerType::get(Context, Record[0]);
- break;
- case bitc::TYPE_CODE_OPAQUE: // OPAQUE
- if (NextTypeID < TypeList.size() && TypeList[NextTypeID] == 0)
- ResultTy = StructType::create(Context);
- break;
- case bitc::TYPE_CODE_STRUCT_OLD: {// STRUCT_OLD
- if (NextTypeID >= TypeList.size()) break;
- // If we already read it, don't reprocess.
- if (TypeList[NextTypeID] &&
- !cast<StructType>(TypeList[NextTypeID])->isOpaque())
- break;
-
- // Set a type.
- if (TypeList[NextTypeID] == 0)
- TypeList[NextTypeID] = StructType::create(Context);
-
- std::vector<Type*> EltTys;
- for (unsigned i = 1, e = Record.size(); i != e; ++i) {
- if (Type *Elt = getTypeByIDOrNull(Record[i]))
- EltTys.push_back(Elt);
- else
- break;
- }
-
- if (EltTys.size() != Record.size()-1)
- break; // Not all elements are ready.
-
- cast<StructType>(TypeList[NextTypeID])->setBody(EltTys, Record[0]);
- ResultTy = TypeList[NextTypeID];
- TypeList[NextTypeID] = 0;
- break;
- }
- case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or
- // [pointee type, address space]
- if (Record.size() < 1)
- return Error("Invalid POINTER type record");
- unsigned AddressSpace = 0;
- if (Record.size() == 2)
- AddressSpace = Record[1];
- if ((ResultTy = getTypeByIDOrNull(Record[0])))
- ResultTy = PointerType::get(ResultTy, AddressSpace);
- break;
- }
- case bitc::TYPE_CODE_FUNCTION: {
- // FIXME: attrid is dead, remove it in LLVM 3.0
- // FUNCTION: [vararg, attrid, retty, paramty x N]
- if (Record.size() < 3)
- return Error("Invalid FUNCTION type record");
- std::vector<Type*> ArgTys;
- for (unsigned i = 3, e = Record.size(); i != e; ++i) {
- if (Type *Elt = getTypeByIDOrNull(Record[i]))
- ArgTys.push_back(Elt);
- else
- break;
- }
- if (ArgTys.size()+3 != Record.size())
- break; // Something was null.
- if ((ResultTy = getTypeByIDOrNull(Record[2])))
- ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
- break;
- }
- case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty]
- if (Record.size() < 2)
- return Error("Invalid ARRAY type record");
- if ((ResultTy = getTypeByIDOrNull(Record[1])))
- ResultTy = ArrayType::get(ResultTy, Record[0]);
- break;
- case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty]
- if (Record.size() < 2)
- return Error("Invalid VECTOR type record");
- if ((ResultTy = getTypeByIDOrNull(Record[1])))
- ResultTy = VectorType::get(ResultTy, Record[0]);
- break;
- }
-
- if (NextTypeID >= TypeList.size())
- return Error("invalid TYPE table");
-
- if (ResultTy && TypeList[NextTypeID] == 0) {
- ++NumTypesRead;
- ReadAnyTypes = true;
-
- TypeList[NextTypeID] = ResultTy;
- }
-
- ++NextTypeID;
- }
-}
-
-
-bool BitcodeReader::ParseOldTypeSymbolTable() {
- if (Stream.EnterSubBlock(bitc::TYPE_SYMTAB_BLOCK_ID_OLD))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
-
- // Read all the records for this type table.
- std::string TypeName;
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of type symbol table block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: unknown type.
- break;
- case bitc::TST_CODE_ENTRY: // TST_ENTRY: [typeid, namechar x N]
- if (ConvertToString(Record, 1, TypeName))
- return Error("Invalid TST_ENTRY record");
- unsigned TypeID = Record[0];
- if (TypeID >= TypeList.size())
- return Error("Invalid Type ID in TST_ENTRY record");
-
- // Only apply the type name to a struct type with no name.
- if (StructType *STy = dyn_cast<StructType>(TypeList[TypeID]))
- if (!STy->isLiteral() && !STy->hasName())
- STy->setName(TypeName);
- TypeName.clear();
- break;
- }
- }
-}
-
bool BitcodeReader::ParseValueSymbolTable() {
if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
return Error("Malformed block record");
@@ -1262,7 +1045,9 @@ bool BitcodeReader::ParseConstants() {
case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
if (Record.empty())
return Error("Invalid FLOAT record");
- if (CurTy->isFloatTy())
+ if (CurTy->isHalfTy())
+ V = ConstantFP::get(Context, APFloat(APInt(16, (uint16_t)Record[0])));
+ else if (CurTy->isFloatTy())
V = ConstantFP::get(Context, APFloat(APInt(32, (uint32_t)Record[0])));
else if (CurTy->isDoubleTy())
V = ConstantFP::get(Context, APFloat(APInt(64, Record[0])));
@@ -1286,7 +1071,7 @@ bool BitcodeReader::ParseConstants() {
return Error("Invalid CST_AGGREGATE record");
unsigned Size = Record.size();
- std::vector<Constant*> Elts;
+ SmallVector<Constant*, 16> Elts;
if (StructType *STy = dyn_cast<StructType>(CurTy)) {
for (unsigned i = 0; i != Size; ++i)
@@ -1308,35 +1093,78 @@ bool BitcodeReader::ParseConstants() {
}
break;
}
- case bitc::CST_CODE_STRING: { // STRING: [values]
+ case bitc::CST_CODE_STRING: // STRING: [values]
+ case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
if (Record.empty())
- return Error("Invalid CST_AGGREGATE record");
-
- ArrayType *ATy = cast<ArrayType>(CurTy);
- Type *EltTy = ATy->getElementType();
+ return Error("Invalid CST_STRING record");
unsigned Size = Record.size();
- std::vector<Constant*> Elts;
+ SmallString<16> Elts;
for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ConstantInt::get(EltTy, Record[i]));
- V = ConstantArray::get(ATy, Elts);
+ Elts.push_back(Record[i]);
+ V = ConstantDataArray::getString(Context, Elts,
+ BitCode == bitc::CST_CODE_CSTRING);
break;
}
- case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
+ case bitc::CST_CODE_DATA: {// DATA: [n x value]
if (Record.empty())
- return Error("Invalid CST_AGGREGATE record");
-
- ArrayType *ATy = cast<ArrayType>(CurTy);
- Type *EltTy = ATy->getElementType();
-
+ return Error("Invalid CST_DATA record");
+
+ Type *EltTy = cast<SequentialType>(CurTy)->getElementType();
unsigned Size = Record.size();
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ConstantInt::get(EltTy, Record[i]));
- Elts.push_back(Constant::getNullValue(EltTy));
- V = ConstantArray::get(ATy, Elts);
+
+ if (EltTy->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(64)) {
+ SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isFloatTy()) {
+ SmallVector<float, 16> Elts;
+ for (unsigned i = 0; i != Size; ++i) {
+ union { uint32_t I; float F; };
+ I = Record[i];
+ Elts.push_back(F);
+ }
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isDoubleTy()) {
+ SmallVector<double, 16> Elts;
+ for (unsigned i = 0; i != Size; ++i) {
+ union { uint64_t I; double F; };
+ I = Record[i];
+ Elts.push_back(F);
+ }
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else {
+ return Error("Unknown element type in CE_DATA");
+ }
break;
}
+
case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval]
if (Record.size() < 3) return Error("Invalid CE_BINOP record");
int Opc = GetDecodedBinaryOpcode(Record[0], CurTy);
@@ -1517,6 +1345,50 @@ bool BitcodeReader::ParseConstants() {
return false;
}
+bool BitcodeReader::ParseUseLists() {
+ if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID))
+ return Error("Malformed block record");
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records.
+ while (1) {
+ unsigned Code = Stream.ReadCode();
+ if (Code == bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd())
+ return Error("Error at end of use-list table block");
+ return false;
+ }
+
+ if (Code == bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ Stream.ReadSubBlockID();
+ if (Stream.SkipBlock())
+ return Error("Malformed block record");
+ continue;
+ }
+
+ if (Code == bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a use list record.
+ Record.clear();
+ switch (Stream.ReadRecord(Code, Record)) {
+ default: // Default behavior: unknown type.
+ break;
+ case bitc::USELIST_CODE_ENTRY: { // USELIST_CODE_ENTRY: TBD.
+ unsigned RecordLength = Record.size();
+ if (RecordLength < 1)
+ return Error ("Invalid UseList reader!");
+ UseListRecords.push_back(Record);
+ break;
+ }
+ }
+ }
+}
+
/// RememberAndSkipFunctionBody - When we see the block for a function body,
/// remember where it is and then skip it. This lets us lazily deserialize the
/// functions.
@@ -1538,8 +1410,36 @@ bool BitcodeReader::RememberAndSkipFunctionBody() {
return false;
}
-bool BitcodeReader::ParseModule() {
- if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+bool BitcodeReader::GlobalCleanup() {
+ // Patch the initializers for globals and aliases up.
+ ResolveGlobalAndAliasInits();
+ if (!GlobalInits.empty() || !AliasInits.empty())
+ return Error("Malformed global initializer set");
+
+ // Look for intrinsic functions which need to be upgraded at some point
+ for (Module::iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ Function *NewFn;
+ if (UpgradeIntrinsicFunction(FI, NewFn))
+ UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
+ }
+
+ // Look for global variables which need to be renamed.
+ for (Module::global_iterator
+ GI = TheModule->global_begin(), GE = TheModule->global_end();
+ GI != GE; ++GI)
+ UpgradeGlobalVariable(GI);
+ // Force deallocation of memory for these vectors to favor the client that
+ // want lazy deserialization.
+ std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
+ std::vector<std::pair<GlobalAlias*, unsigned> >().swap(AliasInits);
+ return false;
+}
+
+bool BitcodeReader::ParseModule(bool Resume) {
+ if (Resume)
+ Stream.JumpToBit(NextUnreadBit);
+ else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
return Error("Malformed block record");
SmallVector<uint64_t, 64> Record;
@@ -1553,33 +1453,7 @@ bool BitcodeReader::ParseModule() {
if (Stream.ReadBlockEnd())
return Error("Error at end of module block");
- // Patch the initializers for globals and aliases up.
- ResolveGlobalAndAliasInits();
- if (!GlobalInits.empty() || !AliasInits.empty())
- return Error("Malformed global initializer set");
- if (!FunctionsWithBodies.empty())
- return Error("Too few function bodies found");
-
- // Look for intrinsic functions which need to be upgraded at some point
- for (Module::iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- Function* NewFn;
- if (UpgradeIntrinsicFunction(FI, NewFn))
- UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
- }
-
- // Look for global variables which need to be renamed.
- for (Module::global_iterator
- GI = TheModule->global_begin(), GE = TheModule->global_end();
- GI != GE; ++GI)
- UpgradeGlobalVariable(GI);
-
- // Force deallocation of memory for these vectors to favor the client that
- // want lazy deserialization.
- std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
- std::vector<std::pair<GlobalAlias*, unsigned> >().swap(AliasInits);
- std::vector<Function*>().swap(FunctionsWithBodies);
- return false;
+ return GlobalCleanup();
}
if (Code == bitc::ENTER_SUBBLOCK) {
@@ -1600,17 +1474,10 @@ bool BitcodeReader::ParseModule() {
if (ParseTypeTable())
return true;
break;
- case bitc::TYPE_BLOCK_ID_OLD:
- if (ParseOldTypeTable())
- return true;
- break;
- case bitc::TYPE_SYMTAB_BLOCK_ID_OLD:
- if (ParseOldTypeSymbolTable())
- return true;
- break;
case bitc::VALUE_SYMTAB_BLOCK_ID:
if (ParseValueSymbolTable())
return true;
+ SeenValueSymbolTable = true;
break;
case bitc::CONSTANTS_BLOCK_ID:
if (ParseConstants() || ResolveGlobalAndAliasInits())
@@ -1623,13 +1490,29 @@ bool BitcodeReader::ParseModule() {
case bitc::FUNCTION_BLOCK_ID:
// If this is the first function body we've seen, reverse the
// FunctionsWithBodies list.
- if (!HasReversedFunctionsWithBodies) {
+ if (!SeenFirstFunctionBody) {
std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
- HasReversedFunctionsWithBodies = true;
+ if (GlobalCleanup())
+ return true;
+ SeenFirstFunctionBody = true;
}
if (RememberAndSkipFunctionBody())
return true;
+ // For streaming bitcode, suspend parsing when we reach the function
+ // bodies. Subsequent materialization calls will resume it when
+ // necessary. For streaming, the function bodies must be at the end of
+ // the bitcode. If the bitcode file is old, the symbol table will be
+ // at the end instead and will not have been seen yet. In this case,
+ // just finish the parse now.
+ if (LazyStreamer && SeenValueSymbolTable) {
+ NextUnreadBit = Stream.GetCurrentBitNo();
+ return false;
+ }
+ break;
+ case bitc::USELIST_BLOCK_ID:
+ if (ParseUseLists())
+ return true;
break;
}
continue;
@@ -1784,8 +1667,10 @@ bool BitcodeReader::ParseModule() {
// If this is a function with a body, remember the prototype we are
// creating now, so that we can match up the body with them later.
- if (!isProto)
+ if (!isProto) {
FunctionsWithBodies.push_back(Func);
+ if (LazyStreamer) DeferredFunctionInfo[Func] = 0;
+ }
break;
}
// ALIAS: [alias type, aliasee val#, linkage]
@@ -1824,24 +1709,7 @@ bool BitcodeReader::ParseModule() {
bool BitcodeReader::ParseBitcodeInto(Module *M) {
TheModule = 0;
- unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
- unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
-
- if (Buffer->getBufferSize() & 3) {
- if (!isRawBitcode(BufPtr, BufEnd) && !isBitcodeWrapper(BufPtr, BufEnd))
- return Error("Invalid bitcode signature");
- else
- return Error("Bitcode stream should be a multiple of 4 bytes in length");
- }
-
- // If we have a wrapper header, parse it and ignore the non-bc file contents.
- // The magic number is 0x0B17C0DE stored in little endian.
- if (isBitcodeWrapper(BufPtr, BufEnd))
- if (SkipBitcodeWrapperHeader(BufPtr, BufEnd))
- return Error("Invalid bitcode wrapper header");
-
- StreamFile.init(BufPtr, BufEnd);
- Stream.init(StreamFile);
+ if (InitStream()) return true;
// Sniff for the signature.
if (Stream.Read(8) != 'B' ||
@@ -1883,8 +1751,9 @@ bool BitcodeReader::ParseBitcodeInto(Module *M) {
if (TheModule)
return Error("Multiple MODULE_BLOCKs in same stream");
TheModule = M;
- if (ParseModule())
+ if (ParseModule(false))
return true;
+ if (LazyStreamer) return false;
break;
default:
if (Stream.SkipBlock())
@@ -1952,20 +1821,7 @@ bool BitcodeReader::ParseModuleTriple(std::string &Triple) {
}
bool BitcodeReader::ParseTriple(std::string &Triple) {
- if (Buffer->getBufferSize() & 3)
- return Error("Bitcode stream should be a multiple of 4 bytes in length");
-
- unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
- unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
-
- // If we have a wrapper header, parse it and ignore the non-bc file contents.
- // The magic number is 0x0B17C0DE stored in little endian.
- if (isBitcodeWrapper(BufPtr, BufEnd))
- if (SkipBitcodeWrapperHeader(BufPtr, BufEnd))
- return Error("Invalid bitcode wrapper header");
-
- StreamFile.init(BufPtr, BufEnd);
- Stream.init(StreamFile);
+ if (InitStream()) return true;
// Sniff for the signature.
if (Stream.Read(8) != 'B' ||
@@ -2517,10 +2373,6 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
- case bitc::FUNC_CODE_INST_UNWIND: // UNWIND
- I = new UnwindInst(Context);
- InstructionList.push_back(I);
- break;
case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
I = new UnreachableInst(Context);
InstructionList.push_back(I);
@@ -2845,6 +2697,19 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
return false;
}
+/// FindFunctionInStream - Find the function body in the bitcode stream
+bool BitcodeReader::FindFunctionInStream(Function *F,
+ DenseMap<Function*, uint64_t>::iterator DeferredFunctionInfoIterator) {
+ while (DeferredFunctionInfoIterator->second == 0) {
+ if (Stream.AtEndOfStream())
+ return Error("Could not find Function in stream");
+ // ParseModule will parse the next body in the stream and set its
+ // position in the DeferredFunctionInfo map.
+ if (ParseModule(true)) return true;
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// GVMaterializer implementation
//===----------------------------------------------------------------------===//
@@ -2865,6 +2730,10 @@ bool BitcodeReader::Materialize(GlobalValue *GV, std::string *ErrInfo) {
DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
+ // If its position is recorded as 0, its body is somewhere in the stream
+ // but we haven't seen it yet.
+ if (DFII->second == 0)
+ if (LazyStreamer && FindFunctionInStream(F, DFII)) return true;
// Move the bit stream to the saved position of the deferred function body.
Stream.JumpToBit(DFII->second);
@@ -2920,6 +2789,12 @@ bool BitcodeReader::MaterializeModule(Module *M, std::string *ErrInfo) {
Materialize(F, ErrInfo))
return true;
+ // At this point, if there are any function bodies, the current bit is
+ // pointing to the END_BLOCK record after them. Now make sure the rest
+ // of the bits in the module have been read.
+ if (NextUnreadBit)
+ ParseModule(true);
+
// Upgrade any intrinsic calls that slipped through (should not happen!) and
// delete the old functions to clean up. We can't do this unless the entire
// module is materialized because there could always be another function body
@@ -2939,15 +2814,60 @@ bool BitcodeReader::MaterializeModule(Module *M, std::string *ErrInfo) {
}
std::vector<std::pair<Function*, Function*> >().swap(UpgradedIntrinsics);
- // Upgrade to new EH scheme. N.B. This will go away in 3.1.
- UpgradeExceptionHandling(M);
+ return false;
+}
+
+bool BitcodeReader::InitStream() {
+ if (LazyStreamer) return InitLazyStream();
+ return InitStreamFromBuffer();
+}
+
+bool BitcodeReader::InitStreamFromBuffer() {
+ const unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
+ const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
+
+ if (Buffer->getBufferSize() & 3) {
+ if (!isRawBitcode(BufPtr, BufEnd) && !isBitcodeWrapper(BufPtr, BufEnd))
+ return Error("Invalid bitcode signature");
+ else
+ return Error("Bitcode stream should be a multiple of 4 bytes in length");
+ }
+
+ // If we have a wrapper header, parse it and ignore the non-bc file contents.
+ // The magic number is 0x0B17C0DE stored in little endian.
+ if (isBitcodeWrapper(BufPtr, BufEnd))
+ if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true))
+ return Error("Invalid bitcode wrapper header");
- // Check debug info intrinsics.
- CheckDebugInfoIntrinsics(TheModule);
+ StreamFile.reset(new BitstreamReader(BufPtr, BufEnd));
+ Stream.init(*StreamFile);
return false;
}
+bool BitcodeReader::InitLazyStream() {
+ // Check and strip off the bitcode wrapper; BitstreamReader expects never to
+ // see it.
+ StreamingMemoryObject *Bytes = new StreamingMemoryObject(LazyStreamer);
+ StreamFile.reset(new BitstreamReader(Bytes));
+ Stream.init(*StreamFile);
+
+ unsigned char buf[16];
+ if (Bytes->readBytes(0, 16, buf, NULL) == -1)
+ return Error("Bitcode stream must be at least 16 bytes in length");
+
+ if (!isBitcode(buf, buf + 16))
+ return Error("Invalid bitcode signature");
+
+ if (isBitcodeWrapper(buf, buf + 4)) {
+ const unsigned char *bitcodeStart = buf;
+ const unsigned char *bitcodeEnd = buf + 16;
+ SkipBitcodeWrapperHeader(bitcodeStart, bitcodeEnd, false);
+ Bytes->dropLeadingBytes(bitcodeStart - buf);
+ Bytes->setKnownObjectSize(bitcodeEnd - bitcodeStart);
+ }
+ return false;
+}
//===----------------------------------------------------------------------===//
// External interface
@@ -2970,6 +2890,27 @@ Module *llvm::getLazyBitcodeModule(MemoryBuffer *Buffer,
}
// Have the BitcodeReader dtor delete 'Buffer'.
R->setBufferOwned(true);
+
+ R->materializeForwardReferencedFunctions();
+
+ return M;
+}
+
+
+Module *llvm::getStreamedBitcodeModule(const std::string &name,
+ DataStreamer *streamer,
+ LLVMContext &Context,
+ std::string *ErrMsg) {
+ Module *M = new Module(name, Context);
+ BitcodeReader *R = new BitcodeReader(streamer, Context);
+ M->setMaterializer(R);
+ if (R->ParseBitcodeInto(M)) {
+ if (ErrMsg)
+ *ErrMsg = R->getErrorString();
+ delete M; // Also deletes R.
+ return 0;
+ }
+ R->setBufferOwned(false); // no buffer to delete
return M;
}
@@ -2990,6 +2931,9 @@ Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
return 0;
}
+ // TODO: Restore the use-lists to the in-memory state when the bitcode was
+ // written. We must defer until the Module has been fully materialized.
+
return M;
}
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
index 6e6118c..e7c4e94 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.h
@@ -126,8 +126,11 @@ class BitcodeReader : public GVMaterializer {
Module *TheModule;
MemoryBuffer *Buffer;
bool BufferOwned;
- BitstreamReader StreamFile;
+ OwningPtr<BitstreamReader> StreamFile;
BitstreamCursor Stream;
+ DataStreamer *LazyStreamer;
+ uint64_t NextUnreadBit;
+ bool SeenValueSymbolTable;
const char *ErrorString;
@@ -135,6 +138,7 @@ class BitcodeReader : public GVMaterializer {
BitcodeReaderValueList ValueList;
BitcodeReaderMDValueList MDValueList;
SmallVector<Instruction *, 64> InstructionList;
+ SmallVector<SmallVector<uint64_t, 64>, 64> UseListRecords;
std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInits;
std::vector<std::pair<GlobalAlias*, unsigned> > AliasInits;
@@ -160,9 +164,10 @@ class BitcodeReader : public GVMaterializer {
// Map the bitcode's custom MDKind ID to the Module's MDKind ID.
DenseMap<unsigned, unsigned> MDKindMap;
- // After the module header has been read, the FunctionsWithBodies list is
- // reversed. This keeps track of whether we've done this yet.
- bool HasReversedFunctionsWithBodies;
+ // Several operations happen after the module header has been read, but
+ // before function bodies are processed. This keeps track of whether
+ // we've done this yet.
+ bool SeenFirstFunctionBody;
/// DeferredFunctionInfo - When function bodies are initially scanned, this
/// map contains info about where to find deferred function body in the
@@ -177,13 +182,22 @@ class BitcodeReader : public GVMaterializer {
public:
explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
: Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
- ErrorString(0), ValueList(C), MDValueList(C) {
- HasReversedFunctionsWithBodies = false;
+ LazyStreamer(0), NextUnreadBit(0), SeenValueSymbolTable(false),
+ ErrorString(0), ValueList(C), MDValueList(C),
+ SeenFirstFunctionBody(false) {
+ }
+ explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C)
+ : Context(C), TheModule(0), Buffer(0), BufferOwned(false),
+ LazyStreamer(streamer), NextUnreadBit(0), SeenValueSymbolTable(false),
+ ErrorString(0), ValueList(C), MDValueList(C),
+ SeenFirstFunctionBody(false) {
}
~BitcodeReader() {
FreeState();
}
-
+
+ void materializeForwardReferencedFunctions();
+
void FreeState();
/// setBufferOwned - If this is true, the reader will destroy the MemoryBuffer
@@ -211,7 +225,6 @@ public:
bool ParseTriple(std::string &Triple);
private:
Type *getTypeByID(unsigned ID);
- Type *getTypeByIDOrNull(unsigned ID);
Value *getFnValueByID(unsigned ID, Type *Ty) {
if (Ty && Ty->isMetadataTy())
return MDValueList.getValueFwdRef(ID);
@@ -256,21 +269,26 @@ private:
}
- bool ParseModule();
+ bool ParseModule(bool Resume);
bool ParseAttributeBlock();
bool ParseTypeTable();
- bool ParseOldTypeTable(); // FIXME: Remove in LLVM 3.1
bool ParseTypeTableBody();
- bool ParseOldTypeSymbolTable(); // FIXME: Remove in LLVM 3.1
bool ParseValueSymbolTable();
bool ParseConstants();
bool RememberAndSkipFunctionBody();
bool ParseFunctionBody(Function *F);
+ bool GlobalCleanup();
bool ResolveGlobalAndAliasInits();
bool ParseMetadata();
bool ParseMetadataAttachment();
bool ParseModuleTriple(std::string &Triple);
+ bool ParseUseLists();
+ bool InitStream();
+ bool InitStreamFromBuffer();
+ bool InitLazyStream();
+ bool FindFunctionInStream(Function *F,
+ DenseMap<Function*, uint64_t>::iterator DeferredFunctionInfoIterator);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 5b3d969..b25d2e9 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -23,6 +23,7 @@
#include "llvm/Operator.h"
#include "llvm/ValueSymbolTable.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -31,6 +32,12 @@
#include <map>
using namespace llvm;
+static cl::opt<bool>
+EnablePreserveUseListOrdering("enable-bc-uselist-preserve",
+ cl::desc("Turn on experimental support for "
+ "use-list order preservation."),
+ cl::init(false), cl::Hidden);
+
/// These are manifest constants used by the bitcode writer. They do not need to
/// be kept in sync with the reader, but need to be consistent within this file.
enum {
@@ -119,7 +126,6 @@ static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
switch (Ordering) {
- default: llvm_unreachable("Unknown atomic ordering");
case NotAtomic: return bitc::ORDERING_NOTATOMIC;
case Unordered: return bitc::ORDERING_UNORDERED;
case Monotonic: return bitc::ORDERING_MONOTONIC;
@@ -128,14 +134,15 @@ static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
case AcquireRelease: return bitc::ORDERING_ACQREL;
case SequentiallyConsistent: return bitc::ORDERING_SEQCST;
}
+ llvm_unreachable("Invalid ordering");
}
static unsigned GetEncodedSynchScope(SynchronizationScope SynchScope) {
switch (SynchScope) {
- default: llvm_unreachable("Unknown synchronization scope");
case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD;
case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD;
}
+ llvm_unreachable("Invalid synch scope");
}
static void WriteStringRecord(unsigned Code, StringRef Str,
@@ -172,10 +179,11 @@ static void WriteAttributeTable(const ValueEnumerator &VE,
// Store the alignment in the bitcode as a 16-bit raw value instead of a
// 5-bit log2 encoded value. Shift the bits above the alignment up by
// 11 bits.
- uint64_t FauxAttr = PAWI.Attrs & 0xffff;
+ uint64_t FauxAttr = PAWI.Attrs.Raw() & 0xffff;
if (PAWI.Attrs & Attribute::Alignment)
- FauxAttr |= (1ull<<16)<<(((PAWI.Attrs & Attribute::Alignment)-1) >> 16);
- FauxAttr |= (PAWI.Attrs & (0x3FFull << 21)) << 11;
+ FauxAttr |= (1ull<<16)<<
+ (((PAWI.Attrs & Attribute::Alignment).Raw()-1) >> 16);
+ FauxAttr |= (PAWI.Attrs.Raw() & (0x3FFull << 21)) << 11;
Record.push_back(FauxAttr);
}
@@ -194,11 +202,12 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */);
SmallVector<uint64_t, 64> TypeVals;
+ uint64_t NumBits = Log2_32_Ceil(VE.getTypes().size()+1);
+
// Abbrev for TYPE_CODE_POINTER.
BitCodeAbbrev *Abbv = new BitCodeAbbrev();
Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0
unsigned PtrAbbrev = Stream.EmitAbbrev(Abbv);
@@ -206,10 +215,9 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Abbv = new BitCodeAbbrev();
Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isvararg
- Abbv->Add(BitCodeAbbrevOp(0)); // FIXME: DEAD value, remove in LLVM 3.0
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
unsigned FunctionAbbrev = Stream.EmitAbbrev(Abbv);
// Abbrev for TYPE_CODE_STRUCT_ANON.
@@ -217,8 +225,8 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
unsigned StructAnonAbbrev = Stream.EmitAbbrev(Abbv);
// Abbrev for TYPE_CODE_STRUCT_NAME.
@@ -233,16 +241,16 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
unsigned StructNamedAbbrev = Stream.EmitAbbrev(Abbv);
// Abbrev for TYPE_CODE_ARRAY.
Abbv = new BitCodeAbbrev();
Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY));
Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // size
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
unsigned ArrayAbbrev = Stream.EmitAbbrev(Abbv);
// Emit an entry count so the reader can reserve space.
@@ -259,6 +267,7 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
switch (T->getTypeID()) {
default: llvm_unreachable("Unknown type!");
case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break;
+ case Type::HalfTyID: Code = bitc::TYPE_CODE_HALF; break;
case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break;
case Type::DoubleTyID: Code = bitc::TYPE_CODE_DOUBLE; break;
case Type::X86_FP80TyID: Code = bitc::TYPE_CODE_X86_FP80; break;
@@ -284,10 +293,9 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
}
case Type::FunctionTyID: {
FunctionType *FT = cast<FunctionType>(T);
- // FUNCTION: [isvararg, attrid, retty, paramty x N]
+ // FUNCTION: [isvararg, retty, paramty x N]
Code = bitc::TYPE_CODE_FUNCTION;
TypeVals.push_back(FT->isVarArg());
- TypeVals.push_back(0); // FIXME: DEAD: remove in llvm 3.0
TypeVals.push_back(VE.getTypeID(FT->getReturnType()));
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
TypeVals.push_back(VE.getTypeID(FT->getParamType(i)));
@@ -350,7 +358,6 @@ static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
static unsigned getEncodedLinkage(const GlobalValue *GV) {
switch (GV->getLinkage()) {
- default: llvm_unreachable("Invalid linkage!");
case GlobalValue::ExternalLinkage: return 0;
case GlobalValue::WeakAnyLinkage: return 1;
case GlobalValue::AppendingLinkage: return 2;
@@ -368,15 +375,16 @@ static unsigned getEncodedLinkage(const GlobalValue *GV) {
case GlobalValue::LinkerPrivateWeakLinkage: return 14;
case GlobalValue::LinkerPrivateWeakDefAutoLinkage: return 15;
}
+ llvm_unreachable("Invalid linkage");
}
static unsigned getEncodedVisibility(const GlobalValue *GV) {
switch (GV->getVisibility()) {
- default: llvm_unreachable("Invalid visibility!");
case GlobalValue::DefaultVisibility: return 0;
case GlobalValue::HiddenVisibility: return 1;
case GlobalValue::ProtectedVisibility: return 2;
}
+ llvm_unreachable("Invalid visibility");
}
// Emit top-level description of module, including target triple, inline asm,
@@ -499,8 +507,8 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
// Emit the function proto information.
for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
- // FUNCTION: [type, callingconv, isproto, paramattr,
- // linkage, alignment, section, visibility, gc, unnamed_addr]
+ // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
+ // section, visibility, gc, unnamed_addr]
Vals.push_back(VE.getTypeID(F->getType()));
Vals.push_back(F->getCallingConv());
Vals.push_back(F->isDeclaration());
@@ -520,6 +528,7 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
// Emit the alias information.
for (Module::const_alias_iterator AI = M->alias_begin(), E = M->alias_end();
AI != E; ++AI) {
+ // ALIAS: [alias type, aliasee val#, linkage, visibility]
Vals.push_back(VE.getTypeID(AI->getType()));
Vals.push_back(VE.getValueID(AI->getAliasee()));
Vals.push_back(getEncodedLinkage(AI));
@@ -819,7 +828,7 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
Code = bitc::CST_CODE_FLOAT;
Type *Ty = CFP->getType();
- if (Ty->isFloatTy() || Ty->isDoubleTy()) {
+ if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) {
Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
} else if (Ty->isX86_FP80Ty()) {
// api needed to prevent premature destruction
@@ -836,34 +845,56 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal,
} else {
assert (0 && "Unknown FP type!");
}
- } else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) {
- const ConstantArray *CA = cast<ConstantArray>(C);
+ } else if (isa<ConstantDataSequential>(C) &&
+ cast<ConstantDataSequential>(C)->isString()) {
+ const ConstantDataSequential *Str = cast<ConstantDataSequential>(C);
// Emit constant strings specially.
- unsigned NumOps = CA->getNumOperands();
+ unsigned NumElts = Str->getNumElements();
// If this is a null-terminated string, use the denser CSTRING encoding.
- if (CA->getOperand(NumOps-1)->isNullValue()) {
+ if (Str->isCString()) {
Code = bitc::CST_CODE_CSTRING;
- --NumOps; // Don't encode the null, which isn't allowed by char6.
+ --NumElts; // Don't encode the null, which isn't allowed by char6.
} else {
Code = bitc::CST_CODE_STRING;
AbbrevToUse = String8Abbrev;
}
bool isCStr7 = Code == bitc::CST_CODE_CSTRING;
bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING;
- for (unsigned i = 0; i != NumOps; ++i) {
- unsigned char V = cast<ConstantInt>(CA->getOperand(i))->getZExtValue();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ unsigned char V = Str->getElementAsInteger(i);
Record.push_back(V);
isCStr7 &= (V & 128) == 0;
if (isCStrChar6)
isCStrChar6 = BitCodeAbbrevOp::isChar6(V);
}
-
+
if (isCStrChar6)
AbbrevToUse = CString6Abbrev;
else if (isCStr7)
AbbrevToUse = CString7Abbrev;
- } else if (isa<ConstantArray>(C) || isa<ConstantStruct>(V) ||
- isa<ConstantVector>(V)) {
+ } else if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(C)) {
+ Code = bitc::CST_CODE_DATA;
+ Type *EltTy = CDS->getType()->getElementType();
+ if (isa<IntegerType>(EltTy)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
+ Record.push_back(CDS->getElementAsInteger(i));
+ } else if (EltTy->isFloatTy()) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ union { float F; uint32_t I; };
+ F = CDS->getElementAsFloat(i);
+ Record.push_back(I);
+ }
+ } else {
+ assert(EltTy->isDoubleTy() && "Unknown ConstantData element type");
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ union { double F; uint64_t I; };
+ F = CDS->getElementAsDouble(i);
+ Record.push_back(I);
+ }
+ }
+ } else if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
+ isa<ConstantVector>(C)) {
Code = bitc::CST_CODE_AGGREGATE;
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
Record.push_back(VE.getValueID(C->getOperand(i)));
@@ -1105,10 +1136,18 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
}
break;
case Instruction::Switch:
- Code = bitc::FUNC_CODE_INST_SWITCH;
- Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i)));
+ {
+ Code = bitc::FUNC_CODE_INST_SWITCH;
+ SwitchInst &SI = cast<SwitchInst>(I);
+ Vals.push_back(VE.getTypeID(SI.getCondition()->getType()));
+ Vals.push_back(VE.getValueID(SI.getCondition()));
+ Vals.push_back(VE.getValueID(SI.getDefaultDest()));
+ for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
+ i != e; ++i) {
+ Vals.push_back(VE.getValueID(i.getCaseValue()));
+ Vals.push_back(VE.getValueID(i.getCaseSuccessor()));
+ }
+ }
break;
case Instruction::IndirectBr:
Code = bitc::FUNC_CODE_INST_INDIRECTBR;
@@ -1146,9 +1185,6 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Code = bitc::FUNC_CODE_INST_RESUME;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
break;
- case Instruction::Unwind:
- Code = bitc::FUNC_CODE_INST_UNWIND;
- break;
case Instruction::Unreachable:
Code = bitc::FUNC_CODE_INST_UNREACHABLE;
AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
@@ -1573,6 +1609,102 @@ static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) {
Stream.ExitBlock();
}
+// Sort the Users based on the order in which the reader parses the bitcode
+// file.
+static bool bitcodereader_order(const User *lhs, const User *rhs) {
+ // TODO: Implement.
+ return true;
+}
+
+static void WriteUseList(const Value *V, const ValueEnumerator &VE,
+ BitstreamWriter &Stream) {
+
+ // One or zero uses can't get out of order.
+ if (V->use_empty() || V->hasNUses(1))
+ return;
+
+ // Make a copy of the in-memory use-list for sorting.
+ unsigned UseListSize = std::distance(V->use_begin(), V->use_end());
+ SmallVector<const User*, 8> UseList;
+ UseList.reserve(UseListSize);
+ for (Value::const_use_iterator I = V->use_begin(), E = V->use_end();
+ I != E; ++I) {
+ const User *U = *I;
+ UseList.push_back(U);
+ }
+
+ // Sort the copy based on the order read by the BitcodeReader.
+ std::sort(UseList.begin(), UseList.end(), bitcodereader_order);
+
+ // TODO: Generate a diff between the BitcodeWriter in-memory use-list and the
+ // sorted list (i.e., the expected BitcodeReader in-memory use-list).
+
+ // TODO: Emit the USELIST_CODE_ENTRYs.
+}
+
+static void WriteFunctionUseList(const Function *F, ValueEnumerator &VE,
+ BitstreamWriter &Stream) {
+ VE.incorporateFunction(*F);
+
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI)
+ WriteUseList(AI, VE, Stream);
+ for (Function::const_iterator BB = F->begin(), FE = F->end(); BB != FE;
+ ++BB) {
+ WriteUseList(BB, VE, Stream);
+ for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end(); II != IE;
+ ++II) {
+ WriteUseList(II, VE, Stream);
+ for (User::const_op_iterator OI = II->op_begin(), E = II->op_end();
+ OI != E; ++OI) {
+ if ((isa<Constant>(*OI) && !isa<GlobalValue>(*OI)) ||
+ isa<InlineAsm>(*OI))
+ WriteUseList(*OI, VE, Stream);
+ }
+ }
+ }
+ VE.purgeFunction();
+}
+
+// Emit use-lists.
+static void WriteModuleUseLists(const Module *M, ValueEnumerator &VE,
+ BitstreamWriter &Stream) {
+ Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3);
+
+ // XXX: this modifies the module, but in a way that should never change the
+ // behavior of any pass or codegen in LLVM. The problem is that GVs may
+ // contain entries in the use_list that do not exist in the Module and are
+ // not stored in the .bc file.
+ for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I)
+ I->removeDeadConstantUsers();
+
+ // Write the global variables.
+ for (Module::const_global_iterator GI = M->global_begin(),
+ GE = M->global_end(); GI != GE; ++GI) {
+ WriteUseList(GI, VE, Stream);
+
+ // Write the global variable initializers.
+ if (GI->hasInitializer())
+ WriteUseList(GI->getInitializer(), VE, Stream);
+ }
+
+ // Write the functions.
+ for (Module::const_iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) {
+ WriteUseList(FI, VE, Stream);
+ if (!FI->isDeclaration())
+ WriteFunctionUseList(FI, VE, Stream);
+ }
+
+ // Write the aliases.
+ for (Module::const_alias_iterator AI = M->alias_begin(), AE = M->alias_end();
+ AI != AE; ++AI) {
+ WriteUseList(AI, VE, Stream);
+ WriteUseList(AI->getAliasee(), VE, Stream);
+ }
+
+ Stream.ExitBlock();
+}
/// WriteModule - Emit the specified module to the bitstream.
static void WriteModule(const Module *M, BitstreamWriter &Stream) {
@@ -1607,17 +1739,21 @@ static void WriteModule(const Module *M, BitstreamWriter &Stream) {
// Emit metadata.
WriteModuleMetadata(M, VE, Stream);
- // Emit function bodies.
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F)
- if (!F->isDeclaration())
- WriteFunction(*F, VE, Stream);
-
// Emit metadata.
WriteModuleMetadataStore(M, Stream);
// Emit names for globals/functions etc.
WriteValueSymbolTable(M->getValueSymbolTable(), VE, Stream);
+ // Emit use-lists.
+ if (EnablePreserveUseListOrdering)
+ WriteModuleUseLists(M, VE, Stream);
+
+ // Emit function bodies.
+ for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F)
+ if (!F->isDeclaration())
+ WriteFunction(*F, VE, Stream);
+
Stream.ExitBlock();
}
@@ -1639,7 +1775,17 @@ enum {
DarwinBCHeaderSize = 5*4
};
-static void EmitDarwinBCHeader(BitstreamWriter &Stream, const Triple &TT) {
+static void WriteInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer,
+ uint32_t &Position) {
+ Buffer[Position + 0] = (unsigned char) (Value >> 0);
+ Buffer[Position + 1] = (unsigned char) (Value >> 8);
+ Buffer[Position + 2] = (unsigned char) (Value >> 16);
+ Buffer[Position + 3] = (unsigned char) (Value >> 24);
+ Position += 4;
+}
+
+static void EmitDarwinBCHeaderAndTrailer(SmallVectorImpl<char> &Buffer,
+ const Triple &TT) {
unsigned CPUType = ~0U;
// Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*,
@@ -1666,63 +1812,55 @@ static void EmitDarwinBCHeader(BitstreamWriter &Stream, const Triple &TT) {
CPUType = DARWIN_CPU_TYPE_ARM;
// Traditional Bitcode starts after header.
+ assert(Buffer.size() >= DarwinBCHeaderSize &&
+ "Expected header size to be reserved");
unsigned BCOffset = DarwinBCHeaderSize;
+ unsigned BCSize = Buffer.size()-DarwinBCHeaderSize;
- Stream.Emit(0x0B17C0DE, 32);
- Stream.Emit(0 , 32); // Version.
- Stream.Emit(BCOffset , 32);
- Stream.Emit(0 , 32); // Filled in later.
- Stream.Emit(CPUType , 32);
-}
-
-/// EmitDarwinBCTrailer - Emit the darwin epilog after the bitcode file and
-/// finalize the header.
-static void EmitDarwinBCTrailer(BitstreamWriter &Stream, unsigned BufferSize) {
- // Update the size field in the header.
- Stream.BackpatchWord(DarwinBCSizeFieldOffset, BufferSize-DarwinBCHeaderSize);
+ // Write the magic and version.
+ unsigned Position = 0;
+ WriteInt32ToBuffer(0x0B17C0DE , Buffer, Position);
+ WriteInt32ToBuffer(0 , Buffer, Position); // Version.
+ WriteInt32ToBuffer(BCOffset , Buffer, Position);
+ WriteInt32ToBuffer(BCSize , Buffer, Position);
+ WriteInt32ToBuffer(CPUType , Buffer, Position);
// If the file is not a multiple of 16 bytes, insert dummy padding.
- while (BufferSize & 15) {
- Stream.Emit(0, 8);
- ++BufferSize;
- }
+ while (Buffer.size() & 15)
+ Buffer.push_back(0);
}
-
/// WriteBitcodeToFile - Write the specified module to the specified output
/// stream.
void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out) {
- std::vector<unsigned char> Buffer;
- BitstreamWriter Stream(Buffer);
-
+ SmallVector<char, 1024> Buffer;
Buffer.reserve(256*1024);
- WriteBitcodeToStream( M, Stream );
-
- // Write the generated bitstream to "Out".
- Out.write((char*)&Buffer.front(), Buffer.size());
-}
-
-/// WriteBitcodeToStream - Write the specified module to the specified output
-/// stream.
-void llvm::WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream) {
- // If this is darwin or another generic macho target, emit a file header and
- // trailer if needed.
+ // If this is darwin or another generic macho target, reserve space for the
+ // header.
Triple TT(M->getTargetTriple());
if (TT.isOSDarwin())
- EmitDarwinBCHeader(Stream, TT);
-
- // Emit the file header.
- Stream.Emit((unsigned)'B', 8);
- Stream.Emit((unsigned)'C', 8);
- Stream.Emit(0x0, 4);
- Stream.Emit(0xC, 4);
- Stream.Emit(0xE, 4);
- Stream.Emit(0xD, 4);
-
- // Emit the module.
- WriteModule(M, Stream);
+ Buffer.insert(Buffer.begin(), DarwinBCHeaderSize, 0);
+
+ // Emit the module into the buffer.
+ {
+ BitstreamWriter Stream(Buffer);
+
+ // Emit the file header.
+ Stream.Emit((unsigned)'B', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit(0x0, 4);
+ Stream.Emit(0xC, 4);
+ Stream.Emit(0xE, 4);
+ Stream.Emit(0xD, 4);
+
+ // Emit the module.
+ WriteModule(M, Stream);
+ }
if (TT.isOSDarwin())
- EmitDarwinBCTrailer(Stream, Stream.getBuffer().size());
+ EmitDarwinBCHeaderAndTrailer(Buffer, TT);
+
+ // Write the generated bitstream to "Out".
+ Out.write((char*)&Buffer.front(), Buffer.size());
}
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index 9ae9905..1ed9004 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -19,6 +19,8 @@
#include "llvm/Module.h"
#include "llvm/ValueSymbolTable.h"
#include "llvm/Instructions.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -107,7 +109,6 @@ ValueEnumerator::ValueEnumerator(const Module *M) {
OptimizeConstants(FirstConstant, Values.size());
}
-
unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
InstructionMapType::const_iterator I = InstructionMap.find(Inst);
assert(I != InstructionMap.end() && "Instruction is not mapped!");
@@ -130,6 +131,43 @@ unsigned ValueEnumerator::getValueID(const Value *V) const {
return I->second-1;
}
+void ValueEnumerator::dump() const {
+ print(dbgs(), ValueMap, "Default");
+ dbgs() << '\n';
+ print(dbgs(), MDValueMap, "MetaData");
+ dbgs() << '\n';
+}
+
+void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
+ const char *Name) const {
+
+ OS << "Map Name: " << Name << "\n";
+ OS << "Size: " << Map.size() << "\n";
+ for (ValueMapType::const_iterator I = Map.begin(),
+ E = Map.end(); I != E; ++I) {
+
+ const Value *V = I->first;
+ if (V->hasName())
+ OS << "Value: " << V->getName();
+ else
+ OS << "Value: [null]\n";
+ V->dump();
+
+ OS << " Uses(" << std::distance(V->use_begin(),V->use_end()) << "):";
+ for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
+ UI != UE; ++UI) {
+ if (UI != V->use_begin())
+ OS << ",";
+ if((*UI)->hasName())
+ OS << " " << (*UI)->getName();
+ else
+ OS << " [null]";
+
+ }
+ OS << "\n\n";
+ }
+}
+
// Optimize constant ordering.
namespace {
struct CstSortPredicate {
@@ -283,10 +321,6 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
if (const Constant *C = dyn_cast<Constant>(V)) {
if (isa<GlobalValue>(C)) {
// Initializers for globals are handled explicitly elsewhere.
- } else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) {
- // Do not enumerate the initializers for an array of simple characters.
- // The initializers just pollute the value table, and we emit the strings
- // specially.
} else if (C->getNumOperands()) {
// If a constant has operands, enumerate them. This makes sure that if a
// constant has uses (for example an array of const ints), that they are
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index b6fc920..a6ca536 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -32,6 +32,7 @@ class NamedMDNode;
class AttrListPtr;
class ValueSymbolTable;
class MDSymbolTable;
+class raw_ostream;
class ValueEnumerator {
public:
@@ -83,6 +84,9 @@ private:
public:
ValueEnumerator(const Module *M);
+ void dump() const;
+ void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const;
+
unsigned getValueID(const Value *V) const;
unsigned getTypeID(Type *T) const {
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 25842a7..822a564 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -148,7 +148,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
assert(State == NULL);
State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
- bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
+ bool IsReturnBlock = (!BB->empty() && BB->back().isReturn());
std::vector<unsigned> &KillIndices = State->GetKillIndices();
std::vector<unsigned> &DefIndices = State->GetDefIndices();
@@ -157,7 +157,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
- for (const unsigned *Alias = TRI->getOverlaps(*I);
+ for (const uint16_t *Alias = TRI->getOverlaps(*I);
unsigned Reg = *Alias; ++Alias) {
State->UnionGroups(Reg, 0);
KillIndices[Reg] = BB->size();
@@ -173,7 +173,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- for (const unsigned *Alias = TRI->getOverlaps(*I);
+ for (const uint16_t *Alias = TRI->getOverlaps(*I);
unsigned Reg = *Alias; ++Alias) {
State->UnionGroups(Reg, 0);
KillIndices[Reg] = BB->size();
@@ -186,10 +186,10 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(BB);
- for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
+ for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
- for (const unsigned *Alias = TRI->getOverlaps(Reg);
+ for (const uint16_t *Alias = TRI->getOverlaps(Reg);
unsigned AliasReg = *Alias; ++Alias) {
State->UnionGroups(AliasReg, 0);
KillIndices[AliasReg] = BB->size();
@@ -265,7 +265,7 @@ void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
IsImplicitDefUse(MI, MO)) {
const unsigned Reg = MO.getReg();
PassthruRegs.insert(Reg);
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
PassthruRegs.insert(*Subreg);
}
@@ -333,7 +333,7 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
}
// Repeat for subregisters.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
unsigned SubregReg = *Subreg;
if (!State->IsLive(SubregReg)) {
@@ -384,7 +384,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ if (MI->isCall() || MI->hasExtraDefRegAllocReq() ||
TII->isPredicated(MI)) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
@@ -392,7 +392,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// Any aliased that are live at this point are completely or
// partially defined here, so group those aliases with Reg.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (State->IsLive(AliasReg)) {
State->UnionGroups(Reg, AliasReg);
@@ -423,7 +423,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
continue;
// Update def for Reg and aliases.
- for (const unsigned *Alias = TRI->getOverlaps(Reg);
+ for (const uint16_t *Alias = TRI->getOverlaps(Reg);
unsigned AliasReg = *Alias; ++Alias)
DefIndices[AliasReg] = Count;
}
@@ -451,8 +451,8 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// instruction which may not be executed. The second R6 def may or may not
// re-define R6 so it's not safe to change it since the last R6 use cannot be
// changed.
- bool Special = MI->getDesc().isCall() ||
- MI->getDesc().hasExtraSrcRegAllocReq() ||
+ bool Special = MI->isCall() ||
+ MI->hasExtraSrcRegAllocReq() ||
TII->isPredicated(MI);
// Scan the register uses for this instruction and update
@@ -678,7 +678,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
goto next_super_reg;
} else {
bool found = false;
- for (const unsigned *Alias = TRI->getAliasSet(NewReg);
+ for (const uint16_t *Alias = TRI->getAliasSet(NewReg);
*Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (State->IsLive(AliasReg) ||
@@ -780,6 +780,9 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
I != E; --Count) {
MachineInstr *MI = --I;
+ if (MI->isDebugValue())
+ continue;
+
DEBUG(dbgs() << "Anti: ");
DEBUG(MI->dump());
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
index 1005f10..87f6431 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.cpp
@@ -41,7 +41,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg,
if (HintPair.first) {
const TargetRegisterInfo &TRI = VRM.getTargetRegInfo();
// The remaining allocation order may depend on the hint.
- ArrayRef<unsigned> Order =
+ ArrayRef<uint16_t> Order =
TRI.getRawAllocationOrder(RC, HintPair.first, Hint,
VRM.getMachineFunction());
if (Order.empty())
diff --git a/contrib/llvm/lib/CodeGen/AllocationOrder.h b/contrib/llvm/lib/CodeGen/AllocationOrder.h
index d1e48a1..0ce7e0c 100644
--- a/contrib/llvm/lib/CodeGen/AllocationOrder.h
+++ b/contrib/llvm/lib/CodeGen/AllocationOrder.h
@@ -34,8 +34,7 @@ public:
/// AllocationOrder - Create a new AllocationOrder for VirtReg.
/// @param VirtReg Virtual register to allocate for.
/// @param VRM Virtual register map for function.
- /// @param ReservedRegs Set of reserved registers as returned by
- /// TargetRegisterInfo::getReservedRegs().
+ /// @param RegClassInfo Information about reserved and allocatable registers.
AllocationOrder(unsigned VirtReg,
const VirtRegMap &VRM,
const RegisterClassInfo &RegClassInfo);
diff --git a/contrib/llvm/lib/CodeGen/Analysis.cpp b/contrib/llvm/lib/CodeGen/Analysis.cpp
index fafc010..00874d4 100644
--- a/contrib/llvm/lib/CodeGen/Analysis.cpp
+++ b/contrib/llvm/lib/CodeGen/Analysis.cpp
@@ -1,4 +1,4 @@
-//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===//
+//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/Analysis.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
@@ -149,33 +150,37 @@ llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
/// consideration of global floating-point math flags.
///
ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
- ISD::CondCode FPC, FOC;
switch (Pred) {
- case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
- case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
- case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
- case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
- case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
- case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
- case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
- case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
- case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
- case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
- case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
- case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
- case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
- case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
- case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
- case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
- default:
- llvm_unreachable("Invalid FCmp predicate opcode!");
- FOC = FPC = ISD::SETFALSE;
- break;
+ case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
+ case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
+ case FCmpInst::FCMP_OGT: return ISD::SETOGT;
+ case FCmpInst::FCMP_OGE: return ISD::SETOGE;
+ case FCmpInst::FCMP_OLT: return ISD::SETOLT;
+ case FCmpInst::FCMP_OLE: return ISD::SETOLE;
+ case FCmpInst::FCMP_ONE: return ISD::SETONE;
+ case FCmpInst::FCMP_ORD: return ISD::SETO;
+ case FCmpInst::FCMP_UNO: return ISD::SETUO;
+ case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
+ case FCmpInst::FCMP_UGT: return ISD::SETUGT;
+ case FCmpInst::FCMP_UGE: return ISD::SETUGE;
+ case FCmpInst::FCMP_ULT: return ISD::SETULT;
+ case FCmpInst::FCMP_ULE: return ISD::SETULE;
+ case FCmpInst::FCMP_UNE: return ISD::SETUNE;
+ case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
+ default: llvm_unreachable("Invalid FCmp predicate opcode!");
+ }
+}
+
+ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
+ switch (CC) {
+ case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
+ case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
+ case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
+ case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
+ case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
+ case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
+ default: return CC;
}
- if (NoNaNsFPMath)
- return FOC;
- else
- return FPC;
}
/// getICmpCondCode - Return the ISD condition code corresponding to
@@ -195,7 +200,6 @@ ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
case ICmpInst::ICMP_UGT: return ISD::SETUGT;
default:
llvm_unreachable("Invalid ICmp predicate opcode!");
- return ISD::SETNE;
}
}
@@ -221,12 +225,13 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
// longjmp on x86), it can end up causing miscompilation that has not
// been fully understood.
if (!Ret &&
- (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
+ (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
+ !isa<UnreachableInst>(Term))) return false;
// If I will have a chain, make sure no other instruction that will have a
// chain interposes between I and the return.
if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
- !I->isSafeToSpeculativelyExecute())
+ !isSafeToSpeculativelyExecute(I))
for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
--BBI) {
if (&*BBI == I)
@@ -235,7 +240,7 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
if (isa<DbgInfoIntrinsic>(BBI))
continue;
if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
- !BBI->isSafeToSpeculativelyExecute())
+ !isSafeToSpeculativelyExecute(BBI))
return false;
}
@@ -250,7 +255,7 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
const Function *F = ExitBB->getParent();
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
return false;
@@ -285,12 +290,12 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
}
bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
- const TargetLowering &TLI) {
+ SDValue &Chain, const TargetLowering &TLI) {
const Function *F = DAG.getMachineFunction().getFunction();
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
if (CallerRetAttr & ~Attribute::NoAlias)
return false;
@@ -299,5 +304,5 @@ bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
return false;
// Check if the only use is a function return node.
- return TLI.isUsedByReturnOnly(Node);
+ return TLI.isUsedByReturnOnly(Node, Chain);
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index 3f23873..b60fda8 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -29,6 +29,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/ADT/SmallString.h"
@@ -36,6 +37,12 @@
#include "llvm/ADT/Twine.h"
using namespace llvm;
+cl::opt<bool>
+EnableARMEHABIDescriptors("arm-enable-ehabi-descriptors", cl::Hidden,
+ cl::desc("Generate ARM EHABI tables with unwinding descriptors"),
+ cl::init(false));
+
+
ARMException::ARMException(AsmPrinter *A)
: DwarfException(A),
shouldEmitTable(false), shouldEmitMoves(false), shouldEmitTableModule(false)
@@ -72,13 +79,15 @@ void ARMException::EndFunction() {
Asm->OutStreamer.EmitPersonality(PerSym);
}
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
+ if (EnableARMEHABIDescriptors) {
+ // Map all labels and get rid of any dead landing pads.
+ MMI->TidyLandingPads();
- Asm->OutStreamer.EmitHandlerData();
+ Asm->OutStreamer.EmitHandlerData();
- // Emit actual exception table
- EmitExceptionTable();
+ // Emit actual exception table
+ EmitExceptionTable();
+ }
}
Asm->OutStreamer.EmitFnEnd();
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 1999f36..b0b2ff4 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -100,6 +100,7 @@ AsmPrinter::AsmPrinter(TargetMachine &tm, MCStreamer &Streamer)
OutStreamer(Streamer),
LastMI(0), LastFn(0), Counter(~0U), SetCounter(0) {
DD = 0; DE = 0; MMI = 0; LI = 0;
+ CurrentFnSym = CurrentFnSymForSize = 0;
GCMetadataPrinters = 0;
VerboseAsm = Streamer.isVerboseAsm();
}
@@ -613,6 +614,10 @@ bool AsmPrinter::needsSEHMoves() {
MF->getFunction()->needsUnwindTableEntry();
}
+bool AsmPrinter::needsRelocationsForDwarfStringPool() const {
+ return MAI->doesDwarfUseRelocationsForStringPool();
+}
+
void AsmPrinter::emitPrologLabel(const MachineInstr &MI) {
MCSymbol *Label = MI.getOperand(0).getMCSymbol();
@@ -732,6 +737,18 @@ void AsmPrinter::EmitFunctionBody() {
OutStreamer.EmitRawText(StringRef("\tnop\n"));
}
+ const Function *F = MF->getFunction();
+ for (Function::const_iterator i = F->begin(), e = F->end(); i != e; ++i) {
+ const BasicBlock *BB = i;
+ if (!BB->hasAddressTaken())
+ continue;
+ MCSymbol *Sym = GetBlockAddressSymbol(BB);
+ if (Sym->isDefined())
+ continue;
+ OutStreamer.AddComment("Address of block that was removed by CodeGen");
+ OutStreamer.EmitLabel(Sym);
+ }
+
// Emit target-specific gunk after the function body.
EmitFunctionBodyEnd();
@@ -745,7 +762,8 @@ void AsmPrinter::EmitFunctionBody() {
const MCExpr *SizeExp =
MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(FnEndLabel, OutContext),
- MCSymbolRefExpr::Create(CurrentFnSym, OutContext),
+ MCSymbolRefExpr::Create(CurrentFnSymForSize,
+ OutContext),
OutContext);
OutStreamer.EmitELFSize(CurrentFnSym, SizeExp);
}
@@ -780,7 +798,7 @@ void AsmPrinter::EmitDwarfRegOp(const MachineLocation &MLoc) const {
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
int Reg = TRI->getDwarfRegNum(MLoc.getReg(), false);
- for (const unsigned *SR = TRI->getSuperRegisters(MLoc.getReg());
+ for (const uint16_t *SR = TRI->getSuperRegisters(MLoc.getReg());
*SR && Reg < 0; ++SR) {
Reg = TRI->getDwarfRegNum(*SR, false);
// FIXME: Get the bit range this register uses of the superregister
@@ -841,6 +859,12 @@ bool AsmPrinter::doFinalization(Module &M) {
EmitVisibility(Name, V, false);
}
+ // Emit module flags.
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ M.getModuleFlagsMetadata(ModuleFlags);
+ if (!ModuleFlags.empty())
+ getObjFileLowering().emitModuleFlags(OutStreamer, ModuleFlags, Mang, TM);
+
// Finalize debug and EH information.
if (DE) {
{
@@ -929,6 +953,7 @@ void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
this->MF = &MF;
// Get the function symbol.
CurrentFnSym = Mang->getSymbol(MF.getFunction());
+ CurrentFnSymForSize = CurrentFnSym;
if (isVerbose())
LI = &getAnalysis<MachineLoopInfo>();
@@ -1120,7 +1145,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MCExpr *Value = 0;
switch (MJTI->getEntryKind()) {
case MachineJumpTableInfo::EK_Inline:
- llvm_unreachable("Cannot emit EK_Inline jump table entry"); break;
+ llvm_unreachable("Cannot emit EK_Inline jump table entry");
case MachineJumpTableInfo::EK_Custom32:
Value = TM.getTargetLowering()->LowerCustomJumpTableEntry(MJTI, MBB, UID,
OutContext);
@@ -1139,6 +1164,15 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
return;
}
+ case MachineJumpTableInfo::EK_GPRel64BlockAddress: {
+ // EK_GPRel64BlockAddress - Each entry is an address of block, encoded
+ // with a relocation as gp-relative, e.g.:
+ // .gpdword LBB123
+ MCSymbol *MBBSym = MBB->getSymbol();
+ OutStreamer.EmitGPRel64Value(MCSymbolRefExpr::Create(MBBSym, OutContext));
+ return;
+ }
+
case MachineJumpTableInfo::EK_LabelDifference32: {
// EK_LabelDifference32 - Each entry is the address of the block minus
// the address of the jump table. This is used for PIC jump tables where
@@ -1191,12 +1225,8 @@ bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) {
assert(GV->hasInitializer() && "Not a special LLVM global!");
- const TargetData *TD = TM.getTargetData();
- unsigned Align = Log2_32(TD->getPointerPrefAlignment());
if (GV->getName() == "llvm.global_ctors") {
- OutStreamer.SwitchSection(getObjFileLowering().getStaticCtorSection());
- EmitAlignment(Align);
- EmitXXStructorList(GV->getInitializer());
+ EmitXXStructorList(GV->getInitializer(), /* isCtor */ true);
if (TM.getRelocationModel() == Reloc::Static &&
MAI->hasStaticCtorDtorReferenceInStaticMode()) {
@@ -1208,9 +1238,7 @@ bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) {
}
if (GV->getName() == "llvm.global_dtors") {
- OutStreamer.SwitchSection(getObjFileLowering().getStaticDtorSection());
- EmitAlignment(Align);
- EmitXXStructorList(GV->getInitializer());
+ EmitXXStructorList(GV->getInitializer(), /* isCtor */ false);
if (TM.getRelocationModel() == Reloc::Static &&
MAI->hasStaticCtorDtorReferenceInStaticMode()) {
@@ -1240,7 +1268,7 @@ void AsmPrinter::EmitLLVMUsedList(const Constant *List) {
}
}
-typedef std::pair<int, Constant*> Structor;
+typedef std::pair<unsigned, Constant*> Structor;
static bool priority_order(const Structor& lhs, const Structor& rhs) {
return lhs.first < rhs.first;
@@ -1248,7 +1276,7 @@ static bool priority_order(const Structor& lhs, const Structor& rhs) {
/// EmitXXStructorList - Emit the ctor or dtor list taking into account the init
/// priority.
-void AsmPrinter::EmitXXStructorList(const Constant *List) {
+void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
// Should be an array of '{ int, void ()* }' structs. The first value is the
// init priority.
if (!isa<ConstantArray>(List)) return;
@@ -1274,19 +1302,20 @@ void AsmPrinter::EmitXXStructorList(const Constant *List) {
CS->getOperand(1)));
}
- // Emit the function pointers in reverse priority order.
- switch (MAI->getStructorOutputOrder()) {
- case Structors::None:
- break;
- case Structors::PriorityOrder:
- std::sort(Structors.begin(), Structors.end(), priority_order);
- break;
- case Structors::ReversePriorityOrder:
- std::sort(Structors.rbegin(), Structors.rend(), priority_order);
- break;
+ // Emit the function pointers in the target-specific order
+ const TargetData *TD = TM.getTargetData();
+ unsigned Align = Log2_32(TD->getPointerPrefAlignment());
+ std::stable_sort(Structors.begin(), Structors.end(), priority_order);
+ for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
+ const MCSection *OutputSection =
+ (isCtor ?
+ getObjFileLowering().getStaticCtorSection(Structors[i].first) :
+ getObjFileLowering().getStaticDtorSection(Structors[i].first));
+ OutStreamer.SwitchSection(OutputSection);
+ if (OutStreamer.getCurrentSection() != OutStreamer.getPreviousSection())
+ EmitAlignment(Align);
+ EmitXXStructor(Structors[i].second);
}
- for (unsigned i = 0, e = Structors.size(); i != e; ++i)
- EmitGlobalConstant(Structors[i].second);
}
//===--------------------------------------------------------------------===//
@@ -1423,7 +1452,6 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
if (CE == 0) {
llvm_unreachable("Unknown constant value to lower!");
- return MCConstantExpr::Create(0, Ctx);
}
switch (CE->getOpcode()) {
@@ -1445,7 +1473,6 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
!AP.MF ? 0 : AP.MF->getFunction()->getParent());
report_fatal_error(OS.str());
}
- return MCConstantExpr::Create(0, Ctx);
case Instruction::GetElementPtr: {
const TargetData &TD = *AP.TM.getTargetData();
// Generate a symbolic expression for the byte address
@@ -1543,6 +1570,19 @@ static void EmitGlobalConstantImpl(const Constant *C, unsigned AddrSpace,
/// isRepeatedByteSequence - Determine whether the given value is
/// composed of a repeated sequence of identical bytes and return the
/// byte value. If it is not a repeated sequence, return -1.
+static int isRepeatedByteSequence(const ConstantDataSequential *V) {
+ StringRef Data = V->getRawDataValues();
+ assert(!Data.empty() && "Empty aggregates should be CAZ node");
+ char C = Data[0];
+ for (unsigned i = 1, e = Data.size(); i != e; ++i)
+ if (Data[i] != C) return -1;
+ return static_cast<uint8_t>(C); // Ensure 255 is not returned as -1.
+}
+
+
+/// isRepeatedByteSequence - Determine whether the given value is
+/// composed of a repeated sequence of identical bytes and return the
+/// byte value. If it is not a repeated sequence, return -1.
static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
@@ -1568,8 +1608,7 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
if (const ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
// Make sure all array elements are sequences of the same repeated
// byte.
- if (CA->getNumOperands() == 0) return -1;
-
+ assert(CA->getNumOperands() != 0 && "Should be a CAZ");
int Byte = isRepeatedByteSequence(CA->getOperand(0), TM);
if (Byte == -1) return -1;
@@ -1580,37 +1619,92 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
}
return Byte;
}
+
+ if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V))
+ return isRepeatedByteSequence(CDS);
return -1;
}
-static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
- AsmPrinter &AP) {
- if (AddrSpace != 0 || !CA->isString()) {
- // Not a string. Print the values in successive locations.
-
- // See if we can aggregate some values. Make sure it can be
- // represented as a series of bytes of the constant value.
- int Value = isRepeatedByteSequence(CA, AP.TM);
-
- if (Value != -1) {
- uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType());
- AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
+static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
+ unsigned AddrSpace,AsmPrinter &AP){
+
+ // See if we can aggregate this into a .fill, if so, emit it as such.
+ int Value = isRepeatedByteSequence(CDS, AP.TM);
+ if (Value != -1) {
+ uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType());
+ // Don't emit a 1-byte object as a .fill.
+ if (Bytes > 1)
+ return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
+ }
+
+ // If this can be emitted with .ascii/.asciz, emit it as such.
+ if (CDS->isString())
+ return AP.OutStreamer.EmitBytes(CDS->getAsString(), AddrSpace);
+
+ // Otherwise, emit the values in successive locations.
+ unsigned ElementByteSize = CDS->getElementByteSize();
+ if (isa<IntegerType>(CDS->getElementType())) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ if (AP.isVerbose())
+ AP.OutStreamer.GetCommentOS() << format("0x%" PRIx64 "\n",
+ CDS->getElementAsInteger(i));
+ AP.OutStreamer.EmitIntValue(CDS->getElementAsInteger(i),
+ ElementByteSize, AddrSpace);
+ }
+ } else if (ElementByteSize == 4) {
+ // FP Constants are printed as integer constants to avoid losing
+ // precision.
+ assert(CDS->getElementType()->isFloatTy());
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ union {
+ float F;
+ uint32_t I;
+ };
+
+ F = CDS->getElementAsFloat(i);
+ if (AP.isVerbose())
+ AP.OutStreamer.GetCommentOS() << "float " << F << '\n';
+ AP.OutStreamer.EmitIntValue(I, 4, AddrSpace);
}
- else {
- for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- EmitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
+ } else {
+ assert(CDS->getElementType()->isDoubleTy());
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ union {
+ double F;
+ uint64_t I;
+ };
+
+ F = CDS->getElementAsDouble(i);
+ if (AP.isVerbose())
+ AP.OutStreamer.GetCommentOS() << "double " << F << '\n';
+ AP.OutStreamer.EmitIntValue(I, 8, AddrSpace);
}
- return;
}
- // Otherwise, it can be emitted as .ascii.
- SmallVector<char, 128> TmpVec;
- TmpVec.reserve(CA->getNumOperands());
- for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- TmpVec.push_back(cast<ConstantInt>(CA->getOperand(i))->getZExtValue());
+ const TargetData &TD = *AP.TM.getTargetData();
+ unsigned Size = TD.getTypeAllocSize(CDS->getType());
+ unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) *
+ CDS->getNumElements();
+ if (unsigned Padding = Size - EmittedSize)
+ AP.OutStreamer.EmitZeros(Padding, AddrSpace);
- AP.OutStreamer.EmitBytes(StringRef(TmpVec.data(), TmpVec.size()), AddrSpace);
+}
+
+static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
+ AsmPrinter &AP) {
+ // See if we can aggregate some values. Make sure it can be
+ // represented as a series of bytes of the constant value.
+ int Value = isRepeatedByteSequence(CA, AP.TM);
+
+ if (Value != -1) {
+ uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType());
+ AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
+ }
+ else {
+ for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
+ EmitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
+ }
}
static void EmitGlobalConstantVector(const ConstantVector *CV,
@@ -1656,29 +1750,44 @@ static void EmitGlobalConstantStruct(const ConstantStruct *CS,
static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
AsmPrinter &AP) {
- // FP Constants are printed as integer constants to avoid losing
- // precision.
- if (CFP->getType()->isDoubleTy()) {
+ if (CFP->getType()->isHalfTy()) {
if (AP.isVerbose()) {
- double Val = CFP->getValueAPF().convertToDouble();
- AP.OutStreamer.GetCommentOS() << "double " << Val << '\n';
+ SmallString<10> Str;
+ CFP->getValueAPF().toString(Str);
+ AP.OutStreamer.GetCommentOS() << "half " << Str << '\n';
}
-
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
- AP.OutStreamer.EmitIntValue(Val, 8, AddrSpace);
+ AP.OutStreamer.EmitIntValue(Val, 2, AddrSpace);
return;
}
if (CFP->getType()->isFloatTy()) {
if (AP.isVerbose()) {
float Val = CFP->getValueAPF().convertToFloat();
- AP.OutStreamer.GetCommentOS() << "float " << Val << '\n';
+ uint64_t IntVal = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
+ AP.OutStreamer.GetCommentOS() << "float " << Val << '\n'
+ << " (" << format("0x%x", IntVal) << ")\n";
}
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
AP.OutStreamer.EmitIntValue(Val, 4, AddrSpace);
return;
}
+ // FP Constants are printed as integer constants to avoid losing
+ // precision.
+ if (CFP->getType()->isDoubleTy()) {
+ if (AP.isVerbose()) {
+ double Val = CFP->getValueAPF().convertToDouble();
+ uint64_t IntVal = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
+ AP.OutStreamer.GetCommentOS() << "double " << Val << '\n'
+ << " (" << format("0x%lx", IntVal) << ")\n";
+ }
+
+ uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
+ AP.OutStreamer.EmitIntValue(Val, 8, AddrSpace);
+ return;
+ }
+
if (CFP->getType()->isX86_FP80Ty()) {
// all long double variants are printed as hex
// API needed to prevent premature destruction
@@ -1742,20 +1851,20 @@ static void EmitGlobalConstantLargeInt(const ConstantInt *CI,
static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
AsmPrinter &AP) {
- if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV)) {
- uint64_t Size = AP.TM.getTargetData()->getTypeAllocSize(CV->getType());
+ const TargetData *TD = AP.TM.getTargetData();
+ uint64_t Size = TD->getTypeAllocSize(CV->getType());
+ if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
return AP.OutStreamer.EmitZeros(Size, AddrSpace);
- }
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- unsigned Size = AP.TM.getTargetData()->getTypeAllocSize(CV->getType());
switch (Size) {
case 1:
case 2:
case 4:
case 8:
if (AP.isVerbose())
- AP.OutStreamer.GetCommentOS() << format("0x%llx\n", CI->getZExtValue());
+ AP.OutStreamer.GetCommentOS() << format("0x%" PRIx64 "\n",
+ CI->getZExtValue());
AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
return;
default:
@@ -1764,29 +1873,45 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
}
}
- if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV))
- return EmitGlobalConstantArray(CVA, AddrSpace, AP);
-
- if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
- return EmitGlobalConstantStruct(CVS, AddrSpace, AP);
-
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV))
return EmitGlobalConstantFP(CFP, AddrSpace, AP);
if (isa<ConstantPointerNull>(CV)) {
- unsigned Size = AP.TM.getTargetData()->getTypeAllocSize(CV->getType());
AP.OutStreamer.EmitIntValue(0, Size, AddrSpace);
return;
}
+ if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(CV))
+ return EmitGlobalConstantDataSequential(CDS, AddrSpace, AP);
+
+ if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV))
+ return EmitGlobalConstantArray(CVA, AddrSpace, AP);
+
+ if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
+ return EmitGlobalConstantStruct(CVS, AddrSpace, AP);
+
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of
+ // vectors).
+ if (CE->getOpcode() == Instruction::BitCast)
+ return EmitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP);
+
+ if (Size > 8) {
+ // If the constant expression's size is greater than 64-bits, then we have
+ // to emit the value in chunks. Try to constant fold the value and emit it
+ // that way.
+ Constant *New = ConstantFoldConstantExpression(CE, TD);
+ if (New && New != CE)
+ return EmitGlobalConstantImpl(New, AddrSpace, AP);
+ }
+ }
+
if (const ConstantVector *V = dyn_cast<ConstantVector>(CV))
return EmitGlobalConstantVector(V, AddrSpace, AP);
-
+
// Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it
// thread the streamer with EmitValue.
- AP.OutStreamer.EmitValue(LowerConstant(CV, AP),
- AP.TM.getTargetData()->getTypeAllocSize(CV->getType()),
- AddrSpace);
+ AP.OutStreamer.EmitValue(LowerConstant(CV, AP), Size, AddrSpace);
}
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
@@ -1953,7 +2078,7 @@ static void EmitBasicBlockLoopComments(const MachineBasicBlock &MBB,
void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const {
// Emit an alignment directive for this block, if needed.
if (unsigned Align = MBB->getAlignment())
- EmitAlignment(Log2_32(Align));
+ EmitAlignment(Align);
// If the block has its address taken, emit any labels that were used to
// reference the block. It is possible that there is more than one label
@@ -1970,27 +2095,22 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const {
OutStreamer.EmitLabel(Syms[i]);
}
+ // Print some verbose block comments.
+ if (isVerbose()) {
+ if (const BasicBlock *BB = MBB->getBasicBlock())
+ if (BB->hasName())
+ OutStreamer.AddComment("%" + BB->getName());
+ EmitBasicBlockLoopComments(*MBB, LI, *this);
+ }
+
// Print the main label for the block.
if (MBB->pred_empty() || isBlockOnlyReachableByFallthrough(MBB)) {
if (isVerbose() && OutStreamer.hasRawTextSupport()) {
- if (const BasicBlock *BB = MBB->getBasicBlock())
- if (BB->hasName())
- OutStreamer.AddComment("%" + BB->getName());
-
- EmitBasicBlockLoopComments(*MBB, LI, *this);
-
// NOTE: Want this comment at start of line, don't emit with AddComment.
OutStreamer.EmitRawText(Twine(MAI->getCommentString()) + " BB#" +
Twine(MBB->getNumber()) + ":");
}
} else {
- if (isVerbose()) {
- if (const BasicBlock *BB = MBB->getBasicBlock())
- if (BB->hasName())
- OutStreamer.AddComment("%" + BB->getName());
- EmitBasicBlockLoopComments(*MBB, LI, *this);
- }
-
OutStreamer.EmitLabel(MBB->getSymbol());
}
}
@@ -2048,7 +2168,7 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
MachineInstr &MI = *II;
// If it is not a simple branch, we are in a table somewhere.
- if (!MI.getDesc().isBranch() || MI.getDesc().isIndirectBranch())
+ if (!MI.isBranch() || MI.isIndirectBranch())
return false;
// If we are the operands of one of the branches, this is not
@@ -2090,6 +2210,4 @@ GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy *S) {
}
report_fatal_error("no GCMetadataPrinter registered for GC: " + Twine(Name));
- return 0;
}
-
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 4d6c281..90d511c 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -25,6 +25,7 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -35,23 +36,8 @@ using namespace llvm;
void AsmPrinter::EmitSLEB128(int Value, const char *Desc) const {
if (isVerbose() && Desc)
OutStreamer.AddComment(Desc);
-
- if (MAI->hasLEB128()) {
- OutStreamer.EmitSLEB128IntValue(Value);
- return;
- }
- // If we don't have .sleb128, emit as .bytes.
- int Sign = Value >> (8 * sizeof(Value) - 1);
- bool IsMore;
-
- do {
- unsigned char Byte = static_cast<unsigned char>(Value & 0x7f);
- Value >>= 7;
- IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
- if (IsMore) Byte |= 0x80;
- OutStreamer.EmitIntValue(Byte, 1, /*addrspace*/0);
- } while (IsMore);
+ OutStreamer.EmitSLEB128IntValue(Value);
}
/// EmitULEB128 - emit the specified signed leb128 value.
@@ -60,25 +46,7 @@ void AsmPrinter::EmitULEB128(unsigned Value, const char *Desc,
if (isVerbose() && Desc)
OutStreamer.AddComment(Desc);
- // FIXME: Should we add a PadTo option to the streamer?
- if (MAI->hasLEB128() && PadTo == 0) {
- OutStreamer.EmitULEB128IntValue(Value);
- return;
- }
-
- // If we don't have .uleb128 or we want to emit padding, emit as .bytes.
- do {
- unsigned char Byte = static_cast<unsigned char>(Value & 0x7f);
- Value >>= 7;
- if (Value || PadTo != 0) Byte |= 0x80;
- OutStreamer.EmitIntValue(Byte, 1, /*addrspace*/0);
- } while (Value);
-
- if (PadTo) {
- if (PadTo > 1)
- OutStreamer.EmitFill(PadTo - 1, 0x80/*fillval*/, 0/*addrspace*/);
- OutStreamer.EmitFill(1, 0/*fillval*/, 0/*addrspace*/);
- }
+ OutStreamer.EmitULEB128IntValue(Value, 0/*addrspace*/, PadTo);
}
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
@@ -143,7 +111,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
return 0;
switch (Encoding & 0x07) {
- default: assert(0 && "Invalid encoded value.");
+ default: llvm_unreachable("Invalid encoded value.");
case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize();
case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4;
@@ -177,9 +145,8 @@ void AsmPrinter::EmitReference(const GlobalValue *GV, unsigned Encoding)const{
void AsmPrinter::EmitSectionOffset(const MCSymbol *Label,
const MCSymbol *SectionLabel) const {
// On COFF targets, we have to emit the special .secrel32 directive.
- if (const char *SecOffDir = MAI->getDwarfSectionOffsetDirective()) {
- // FIXME: MCize.
- OutStreamer.EmitRawText(SecOffDir + Twine(Label->getName()));
+ if (MAI->getDwarfSectionOffsetDirective()) {
+ OutStreamer.EmitCOFFSecRel32(Label);
return;
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index 8eda889..d605854 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -326,7 +326,11 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
}
- if (OpNo >= MI->getNumOperands()) {
+ // We may have a location metadata attached to the end of the
+ // instruction, and at no point should see metadata at any
+ // other point while processing. It's an error if so.
+ if (OpNo >= MI->getNumOperands() ||
+ MI->getOperand(OpNo).isMetadata()) {
Error = true;
} else {
unsigned OpFlags = MI->getOperand(OpNo).getImm();
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index 9c1ce76..3776848 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -112,15 +112,6 @@ DIE::~DIE() {
delete Children[i];
}
-/// addSiblingOffset - Add a sibling offset field to the front of the DIE.
-///
-DIEValue *DIE::addSiblingOffset(BumpPtrAllocator &A) {
- DIEInteger *DI = new (A) DIEInteger(0);
- Values.insert(Values.begin(), DI);
- Abbrev.AddFirstAttribute(dwarf::DW_AT_sibling, dwarf::DW_FORM_ref4);
- return DI;
-}
-
#ifndef NDEBUG
void DIE::print(raw_ostream &O, unsigned IncIndent) {
IndentCount += IncIndent;
@@ -174,6 +165,7 @@ void DIE::dump() {
}
#endif
+void DIEValue::anchor() { }
#ifndef NDEBUG
void DIEValue::dump() {
@@ -223,33 +215,14 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize();
- default: llvm_unreachable("DIE Value form not supported yet"); break;
+ default: llvm_unreachable("DIE Value form not supported yet");
}
- return 0;
}
#ifndef NDEBUG
void DIEInteger::print(raw_ostream &O) {
- O << "Int: " << (int64_t)Integer
- << format(" 0x%llx", (unsigned long long)Integer);
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEString Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit string value.
-///
-void DIEString::EmitValue(AsmPrinter *AP, unsigned Form) const {
- AP->OutStreamer.EmitBytes(Str, /*addrspace*/0);
- // Emit nul terminator.
- AP->OutStreamer.EmitIntValue(0, 1, /*addrspace*/0);
-}
-
-#ifndef NDEBUG
-void DIEString::print(raw_ostream &O) {
- O << "Str: \"" << Str << "\"";
+ O << "Int: " << (int64_t)Integer << " 0x";
+ O.write_hex(Integer);
}
#endif
@@ -267,6 +240,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const {
///
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
+ if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getTargetData().getPointerSize();
}
@@ -290,6 +264,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const {
///
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
+ if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getTargetData().getPointerSize();
}
@@ -335,7 +310,7 @@ unsigned DIEBlock::ComputeSize(AsmPrinter *AP) {
///
void DIEBlock::EmitValue(AsmPrinter *Asm, unsigned Form) const {
switch (Form) {
- default: assert(0 && "Improper form for block"); break;
+ default: llvm_unreachable("Improper form for block");
case dwarf::DW_FORM_block1: Asm->EmitInt8(Size); break;
case dwarf::DW_FORM_block2: Asm->EmitInt16(Size); break;
case dwarf::DW_FORM_block4: Asm->EmitInt32(Size); break;
@@ -355,9 +330,8 @@ unsigned DIEBlock::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
case dwarf::DW_FORM_block: return Size + MCAsmInfo::getULEB128Size(Size);
- default: llvm_unreachable("Improper form for block"); break;
+ default: llvm_unreachable("Improper form for block");
}
- return 0;
}
#ifndef NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
index 7d61f1e..f93ea1b 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.h
@@ -31,17 +31,17 @@ namespace llvm {
class DIEAbbrevData {
/// Attribute - Dwarf attribute code.
///
- unsigned Attribute;
+ uint16_t Attribute;
/// Form - Dwarf form code.
///
- unsigned Form;
+ uint16_t Form;
public:
- DIEAbbrevData(unsigned A, unsigned F) : Attribute(A), Form(F) {}
+ DIEAbbrevData(uint16_t A, uint16_t F) : Attribute(A), Form(F) {}
// Accessors.
- unsigned getAttribute() const { return Attribute; }
- unsigned getForm() const { return Form; }
+ uint16_t getAttribute() const { return Attribute; }
+ uint16_t getForm() const { return Form; }
/// Profile - Used to gather unique data for the abbreviation folding set.
///
@@ -54,41 +54,41 @@ namespace llvm {
class DIEAbbrev : public FoldingSetNode {
/// Tag - Dwarf tag code.
///
- unsigned Tag;
+ uint16_t Tag;
- /// Unique number for node.
+ /// ChildrenFlag - Dwarf children flag.
///
- unsigned Number;
+ uint16_t ChildrenFlag;
- /// ChildrenFlag - Dwarf children flag.
+ /// Unique number for node.
///
- unsigned ChildrenFlag;
+ unsigned Number;
/// Data - Raw data bytes for abbreviation.
///
SmallVector<DIEAbbrevData, 8> Data;
public:
- DIEAbbrev(unsigned T, unsigned C) : Tag(T), ChildrenFlag(C), Data() {}
+ DIEAbbrev(uint16_t T, uint16_t C) : Tag(T), ChildrenFlag(C), Data() {}
// Accessors.
- unsigned getTag() const { return Tag; }
+ uint16_t getTag() const { return Tag; }
unsigned getNumber() const { return Number; }
- unsigned getChildrenFlag() const { return ChildrenFlag; }
+ uint16_t getChildrenFlag() const { return ChildrenFlag; }
const SmallVector<DIEAbbrevData, 8> &getData() const { return Data; }
- void setTag(unsigned T) { Tag = T; }
- void setChildrenFlag(unsigned CF) { ChildrenFlag = CF; }
+ void setTag(uint16_t T) { Tag = T; }
+ void setChildrenFlag(uint16_t CF) { ChildrenFlag = CF; }
void setNumber(unsigned N) { Number = N; }
/// AddAttribute - Adds another set of attribute information to the
/// abbreviation.
- void AddAttribute(unsigned Attribute, unsigned Form) {
+ void AddAttribute(uint16_t Attribute, uint16_t Form) {
Data.push_back(DIEAbbrevData(Attribute, Form));
}
/// AddFirstAttribute - Adds a set of attribute information to the front
/// of the abbreviation.
- void AddFirstAttribute(unsigned Attribute, unsigned Form) {
+ void AddFirstAttribute(uint16_t Attribute, uint16_t Form) {
Data.insert(Data.begin(), DIEAbbrevData(Attribute, Form));
}
@@ -113,10 +113,6 @@ namespace llvm {
class DIE {
protected:
- /// Abbrev - Buffer for constructing abbreviation.
- ///
- DIEAbbrev Abbrev;
-
/// Offset - Offset in debug info section.
///
unsigned Offset;
@@ -125,6 +121,10 @@ namespace llvm {
///
unsigned Size;
+ /// Abbrev - Buffer for constructing abbreviation.
+ ///
+ DIEAbbrev Abbrev;
+
/// Children DIEs.
///
std::vector<DIE *> Children;
@@ -139,8 +139,8 @@ namespace llvm {
mutable unsigned IndentCount;
public:
explicit DIE(unsigned Tag)
- : Abbrev(Tag, dwarf::DW_CHILDREN_no), Offset(0),
- Size(0), Parent (0), IndentCount(0) {}
+ : Offset(0), Size(0), Abbrev(Tag, dwarf::DW_CHILDREN_no), Parent(0),
+ IndentCount(0) {}
virtual ~DIE();
// Accessors.
@@ -163,16 +163,6 @@ namespace llvm {
Values.push_back(Value);
}
- /// SiblingOffset - Return the offset of the debug information entry's
- /// sibling.
- unsigned getSiblingOffset() const { return Offset + Size; }
-
- /// addSiblingOffset - Add a sibling offset field to the front of the DIE.
- /// The caller is responsible for deleting the return value at or after the
- /// same time it destroys this DIE.
- ///
- DIEValue *addSiblingOffset(BumpPtrAllocator &A);
-
/// addChild - Add a child to the DIE.
///
void addChild(DIE *Child) {
@@ -195,12 +185,12 @@ namespace llvm {
/// DIEValue - A debug information entry value.
///
class DIEValue {
+ virtual void anchor();
public:
enum {
isInteger,
isString,
isLabel,
- isSectionOffset,
isDelta,
isEntry,
isBlock
@@ -276,33 +266,6 @@ namespace llvm {
};
//===--------------------------------------------------------------------===//
- /// DIEString - A string value DIE. This DIE keeps string reference only.
- ///
- class DIEString : public DIEValue {
- const StringRef Str;
- public:
- explicit DIEString(const StringRef S) : DIEValue(isString), Str(S) {}
-
- /// EmitValue - Emit string value.
- ///
- virtual void EmitValue(AsmPrinter *AP, unsigned Form) const;
-
- /// SizeOf - Determine size of string value in bytes.
- ///
- virtual unsigned SizeOf(AsmPrinter *AP, unsigned /*Form*/) const {
- return Str.size() + sizeof(char); // sizeof('\0');
- }
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEString *) { return true; }
- static bool classof(const DIEValue *S) { return S->getType() == isString; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
/// DIELabel - A label expression DIE.
//
class DIELabel : public DIEValue {
@@ -359,7 +322,7 @@ namespace llvm {
};
//===--------------------------------------------------------------------===//
- /// DIEntry - A pointer to another debug information entry. An instance of
+ /// DIEEntry - A pointer to another debug information entry. An instance of
/// this class can also be used as a proxy for a debug information entry not
/// yet defined (ie. types.)
class DIEEntry : public DIEValue {
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
new file mode 100644
index 0000000..660684d
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
@@ -0,0 +1,287 @@
+//=-- llvm/CodeGen/DwarfAccelTable.cpp - Dwarf Accelerator Tables -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing dwarf accelerator tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DwarfAccelTable.h"
+#include "DwarfDebug.h"
+#include "DIE.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+const char *DwarfAccelTable::Atom::AtomTypeString(enum AtomType AT) {
+ switch (AT) {
+ case eAtomTypeNULL: return "eAtomTypeNULL";
+ case eAtomTypeDIEOffset: return "eAtomTypeDIEOffset";
+ case eAtomTypeCUOffset: return "eAtomTypeCUOffset";
+ case eAtomTypeTag: return "eAtomTypeTag";
+ case eAtomTypeNameFlags: return "eAtomTypeNameFlags";
+ case eAtomTypeTypeFlags: return "eAtomTypeTypeFlags";
+ }
+ llvm_unreachable("invalid AtomType!");
+}
+
+// The general case would need to have a less hard coded size for the
+// length of the HeaderData, however, if we're constructing based on a
+// single Atom then we know it will always be: 4 + 4 + 2 + 2.
+DwarfAccelTable::DwarfAccelTable(DwarfAccelTable::Atom atom) :
+ Header(12),
+ HeaderData(atom) {
+}
+
+// The length of the header data is always going to be 4 + 4 + 4*NumAtoms.
+DwarfAccelTable::DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &atomList) :
+ Header(8 + (atomList.size() * 4)),
+ HeaderData(atomList) {
+}
+
+DwarfAccelTable::~DwarfAccelTable() {
+ for (size_t i = 0, e = Data.size(); i < e; ++i)
+ delete Data[i];
+ for (StringMap<DataArray>::iterator
+ EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI)
+ for (DataArray::iterator DI = EI->second.begin(),
+ DE = EI->second.end(); DI != DE; ++DI)
+ delete (*DI);
+}
+
+void DwarfAccelTable::AddName(StringRef Name, DIE* die, char Flags) {
+ // If the string is in the list already then add this die to the list
+ // otherwise add a new one.
+ DataArray &DIEs = Entries[Name];
+ DIEs.push_back(new HashDataContents(die, Flags));
+}
+
+void DwarfAccelTable::ComputeBucketCount(void) {
+ // First get the number of unique hashes.
+ std::vector<uint32_t> uniques(Data.size());
+ for (size_t i = 0, e = Data.size(); i < e; ++i)
+ uniques[i] = Data[i]->HashValue;
+ array_pod_sort(uniques.begin(), uniques.end());
+ std::vector<uint32_t>::iterator p =
+ std::unique(uniques.begin(), uniques.end());
+ uint32_t num = std::distance(uniques.begin(), p);
+
+ // Then compute the bucket size, minimum of 1 bucket.
+ if (num > 1024) Header.bucket_count = num/4;
+ if (num > 16) Header.bucket_count = num/2;
+ else Header.bucket_count = num > 0 ? num : 1;
+
+ Header.hashes_count = num;
+}
+
+namespace {
+ // DIESorter - comparison predicate that sorts DIEs by their offset.
+ struct DIESorter {
+ bool operator()(const struct DwarfAccelTable::HashDataContents *A,
+ const struct DwarfAccelTable::HashDataContents *B) const {
+ return A->Die->getOffset() < B->Die->getOffset();
+ }
+ };
+}
+
+void DwarfAccelTable::FinalizeTable(AsmPrinter *Asm, const char *Prefix) {
+ // Create the individual hash data outputs.
+ for (StringMap<DataArray>::iterator
+ EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) {
+ struct HashData *Entry = new HashData((*EI).getKeyData());
+
+ // Unique the entries.
+ std::stable_sort(EI->second.begin(), EI->second.end(), DIESorter());
+ EI->second.erase(std::unique(EI->second.begin(), EI->second.end()),
+ EI->second.end());
+
+ for (DataArray::const_iterator DI = EI->second.begin(),
+ DE = EI->second.end();
+ DI != DE; ++DI)
+ Entry->addData((*DI));
+ Data.push_back(Entry);
+ }
+
+ // Figure out how many buckets we need, then compute the bucket
+ // contents and the final ordering. We'll emit the hashes and offsets
+ // by doing a walk during the emission phase. We add temporary
+ // symbols to the data so that we can reference them during the offset
+ // later, we'll emit them when we emit the data.
+ ComputeBucketCount();
+
+ // Compute bucket contents and final ordering.
+ Buckets.resize(Header.bucket_count);
+ for (size_t i = 0, e = Data.size(); i < e; ++i) {
+ uint32_t bucket = Data[i]->HashValue % Header.bucket_count;
+ Buckets[bucket].push_back(Data[i]);
+ Data[i]->Sym = Asm->GetTempSymbol(Prefix, i);
+ }
+}
+
+// Emits the header for the table via the AsmPrinter.
+void DwarfAccelTable::EmitHeader(AsmPrinter *Asm) {
+ Asm->OutStreamer.AddComment("Header Magic");
+ Asm->EmitInt32(Header.magic);
+ Asm->OutStreamer.AddComment("Header Version");
+ Asm->EmitInt16(Header.version);
+ Asm->OutStreamer.AddComment("Header Hash Function");
+ Asm->EmitInt16(Header.hash_function);
+ Asm->OutStreamer.AddComment("Header Bucket Count");
+ Asm->EmitInt32(Header.bucket_count);
+ Asm->OutStreamer.AddComment("Header Hash Count");
+ Asm->EmitInt32(Header.hashes_count);
+ Asm->OutStreamer.AddComment("Header Data Length");
+ Asm->EmitInt32(Header.header_data_len);
+ Asm->OutStreamer.AddComment("HeaderData Die Offset Base");
+ Asm->EmitInt32(HeaderData.die_offset_base);
+ Asm->OutStreamer.AddComment("HeaderData Atom Count");
+ Asm->EmitInt32(HeaderData.Atoms.size());
+ for (size_t i = 0; i < HeaderData.Atoms.size(); i++) {
+ Atom A = HeaderData.Atoms[i];
+ Asm->OutStreamer.AddComment(Atom::AtomTypeString(A.type));
+ Asm->EmitInt16(A.type);
+ Asm->OutStreamer.AddComment(dwarf::FormEncodingString(A.form));
+ Asm->EmitInt16(A.form);
+ }
+}
+
+// Walk through and emit the buckets for the table. This will look
+// like a list of numbers of how many elements are in each bucket.
+void DwarfAccelTable::EmitBuckets(AsmPrinter *Asm) {
+ unsigned index = 0;
+ for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
+ Asm->OutStreamer.AddComment("Bucket " + Twine(i));
+ if (Buckets[i].size() != 0)
+ Asm->EmitInt32(index);
+ else
+ Asm->EmitInt32(UINT32_MAX);
+ index += Buckets[i].size();
+ }
+}
+
+// Walk through the buckets and emit the individual hashes for each
+// bucket.
+void DwarfAccelTable::EmitHashes(AsmPrinter *Asm) {
+ for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
+ for (HashList::const_iterator HI = Buckets[i].begin(),
+ HE = Buckets[i].end(); HI != HE; ++HI) {
+ Asm->OutStreamer.AddComment("Hash in Bucket " + Twine(i));
+ Asm->EmitInt32((*HI)->HashValue);
+ }
+ }
+}
+
+// Walk through the buckets and emit the individual offsets for each
+// element in each bucket. This is done via a symbol subtraction from the
+// beginning of the section. The non-section symbol will be output later
+// when we emit the actual data.
+void DwarfAccelTable::EmitOffsets(AsmPrinter *Asm, MCSymbol *SecBegin) {
+ for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
+ for (HashList::const_iterator HI = Buckets[i].begin(),
+ HE = Buckets[i].end(); HI != HE; ++HI) {
+ Asm->OutStreamer.AddComment("Offset in Bucket " + Twine(i));
+ MCContext &Context = Asm->OutStreamer.getContext();
+ const MCExpr *Sub =
+ MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create((*HI)->Sym, Context),
+ MCSymbolRefExpr::Create(SecBegin, Context),
+ Context);
+ Asm->OutStreamer.EmitValue(Sub, sizeof(uint32_t), 0);
+ }
+ }
+}
+
+// Walk through the buckets and emit the full data for each element in
+// the bucket. For the string case emit the dies and the various offsets.
+// Terminate each HashData bucket with 0.
+void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfDebug *D) {
+ uint64_t PrevHash = UINT64_MAX;
+ for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
+ for (HashList::const_iterator HI = Buckets[i].begin(),
+ HE = Buckets[i].end(); HI != HE; ++HI) {
+ // Remember to emit the label for our offset.
+ Asm->OutStreamer.EmitLabel((*HI)->Sym);
+ Asm->OutStreamer.AddComment((*HI)->Str);
+ Asm->EmitSectionOffset(D->getStringPoolEntry((*HI)->Str),
+ D->getStringPool());
+ Asm->OutStreamer.AddComment("Num DIEs");
+ Asm->EmitInt32((*HI)->Data.size());
+ for (std::vector<struct HashDataContents*>::const_iterator
+ DI = (*HI)->Data.begin(), DE = (*HI)->Data.end();
+ DI != DE; ++DI) {
+ // Emit the DIE offset
+ Asm->EmitInt32((*DI)->Die->getOffset());
+ // If we have multiple Atoms emit that info too.
+ // FIXME: A bit of a hack, we either emit only one atom or all info.
+ if (HeaderData.Atoms.size() > 1) {
+ Asm->EmitInt16((*DI)->Die->getTag());
+ Asm->EmitInt8((*DI)->Flags);
+ }
+ }
+ // Emit a 0 to terminate the data unless we have a hash collision.
+ if (PrevHash != (*HI)->HashValue)
+ Asm->EmitInt32(0);
+ PrevHash = (*HI)->HashValue;
+ }
+ }
+}
+
+// Emit the entire data structure to the output file.
+void DwarfAccelTable::Emit(AsmPrinter *Asm, MCSymbol *SecBegin,
+ DwarfDebug *D) {
+ // Emit the header.
+ EmitHeader(Asm);
+
+ // Emit the buckets.
+ EmitBuckets(Asm);
+
+ // Emit the hashes.
+ EmitHashes(Asm);
+
+ // Emit the offsets.
+ EmitOffsets(Asm, SecBegin);
+
+ // Emit the hash data.
+ EmitData(Asm, D);
+}
+
+#ifndef NDEBUG
+void DwarfAccelTable::print(raw_ostream &O) {
+
+ Header.print(O);
+ HeaderData.print(O);
+
+ O << "Entries: \n";
+ for (StringMap<DataArray>::const_iterator
+ EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) {
+ O << "Name: " << EI->getKeyData() << "\n";
+ for (DataArray::const_iterator DI = EI->second.begin(),
+ DE = EI->second.end();
+ DI != DE; ++DI)
+ (*DI)->print(O);
+ }
+
+ O << "Buckets and Hashes: \n";
+ for (size_t i = 0, e = Buckets.size(); i < e; ++i)
+ for (HashList::const_iterator HI = Buckets[i].begin(),
+ HE = Buckets[i].end(); HI != HE; ++HI)
+ (*HI)->print(O);
+
+ O << "Data: \n";
+ for (std::vector<HashData*>::const_iterator
+ DI = Data.begin(), DE = Data.end(); DI != DE; ++DI)
+ (*DI)->print(O);
+
+
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
new file mode 100644
index 0000000..2278d4c
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
@@ -0,0 +1,290 @@
+//==-- llvm/CodeGen/DwarfAccelTable.h - Dwarf Accelerator Tables -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing dwarf accelerator tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CODEGEN_ASMPRINTER_DWARFACCELTABLE_H__
+#define CODEGEN_ASMPRINTER_DWARFACCELTABLE_H__
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormattedStream.h"
+#include "DIE.h"
+#include <vector>
+#include <map>
+
+// The dwarf accelerator tables are an indirect hash table optimized
+// for null lookup rather than access to known data. They are output into
+// an on-disk format that looks like this:
+//
+// .-------------.
+// | HEADER |
+// |-------------|
+// | BUCKETS |
+// |-------------|
+// | HASHES |
+// |-------------|
+// | OFFSETS |
+// |-------------|
+// | DATA |
+// `-------------'
+//
+// where the header contains a magic number, version, type of hash function,
+// the number of buckets, total number of hashes, and room for a special
+// struct of data and the length of that struct.
+//
+// The buckets contain an index (e.g. 6) into the hashes array. The hashes
+// section contains all of the 32-bit hash values in contiguous memory, and
+// the offsets contain the offset into the data area for the particular
+// hash.
+//
+// For a lookup example, we could hash a function name and take it modulo the
+// number of buckets giving us our bucket. From there we take the bucket value
+// as an index into the hashes table and look at each successive hash as long
+// as the hash value is still the same modulo result (bucket value) as earlier.
+// If we have a match we look at that same entry in the offsets table and
+// grab the offset in the data for our final match.
+
+namespace llvm {
+
+class AsmPrinter;
+class DIE;
+class DwarfDebug;
+
+class DwarfAccelTable {
+
+ enum HashFunctionType {
+ eHashFunctionDJB = 0u
+ };
+
+ static uint32_t HashDJB (StringRef Str) {
+ uint32_t h = 5381;
+ for (unsigned i = 0, e = Str.size(); i != e; ++i)
+ h = ((h << 5) + h) + Str[i];
+ return h;
+ }
+
+ // Helper function to compute the number of buckets needed based on
+ // the number of unique hashes.
+ void ComputeBucketCount (void);
+
+ struct TableHeader {
+ uint32_t magic; // 'HASH' magic value to allow endian detection
+ uint16_t version; // Version number.
+ uint16_t hash_function; // The hash function enumeration that was used.
+ uint32_t bucket_count; // The number of buckets in this hash table.
+ uint32_t hashes_count; // The total number of unique hash values
+ // and hash data offsets in this table.
+ uint32_t header_data_len; // The bytes to skip to get to the hash
+ // indexes (buckets) for correct alignment.
+ // Also written to disk is the implementation specific header data.
+
+ static const uint32_t MagicHash = 0x48415348;
+
+ TableHeader (uint32_t data_len) :
+ magic (MagicHash), version (1), hash_function (eHashFunctionDJB),
+ bucket_count (0), hashes_count (0), header_data_len (data_len)
+ {}
+
+#ifndef NDEBUG
+ void print(raw_ostream &O) {
+ O << "Magic: " << format("0x%x", magic) << "\n"
+ << "Version: " << version << "\n"
+ << "Hash Function: " << hash_function << "\n"
+ << "Bucket Count: " << bucket_count << "\n"
+ << "Header Data Length: " << header_data_len << "\n";
+ }
+ void dump() { print(dbgs()); }
+#endif
+ };
+
+public:
+ // The HeaderData describes the form of each set of data. In general this
+ // is as a list of atoms (atom_count) where each atom contains a type
+ // (AtomType type) of data, and an encoding form (form). In the case of
+ // data that is referenced via DW_FORM_ref_* the die_offset_base is
+ // used to describe the offset for all forms in the list of atoms.
+ // This also serves as a public interface of sorts.
+ // When written to disk this will have the form:
+ //
+ // uint32_t die_offset_base
+ // uint32_t atom_count
+ // atom_count Atoms
+ enum AtomType {
+ eAtomTypeNULL = 0u,
+ eAtomTypeDIEOffset = 1u, // DIE offset, check form for encoding
+ eAtomTypeCUOffset = 2u, // DIE offset of the compiler unit header that
+ // contains the item in question
+ eAtomTypeTag = 3u, // DW_TAG_xxx value, should be encoded as
+ // DW_FORM_data1 (if no tags exceed 255) or
+ // DW_FORM_data2.
+ eAtomTypeNameFlags = 4u, // Flags from enum NameFlags
+ eAtomTypeTypeFlags = 5u // Flags from enum TypeFlags
+ };
+
+ enum TypeFlags {
+ eTypeFlagClassMask = 0x0000000fu,
+
+ // Always set for C++, only set for ObjC if this is the
+ // @implementation for a class.
+ eTypeFlagClassIsImplementation = ( 1u << 1 )
+ };
+
+ // Make these public so that they can be used as a general interface to
+ // the class.
+ struct Atom {
+ AtomType type; // enum AtomType
+ uint16_t form; // DWARF DW_FORM_ defines
+
+ Atom(AtomType type, uint16_t form) : type(type), form(form) {}
+ static const char * AtomTypeString(enum AtomType);
+#ifndef NDEBUG
+ void print(raw_ostream &O) {
+ O << "Type: " << AtomTypeString(type) << "\n"
+ << "Form: " << dwarf::FormEncodingString(form) << "\n";
+ }
+ void dump() {
+ print(dbgs());
+ }
+#endif
+ };
+
+ private:
+ struct TableHeaderData {
+
+ uint32_t die_offset_base;
+ std::vector<Atom> Atoms;
+
+ TableHeaderData(std::vector<DwarfAccelTable::Atom> &AtomList,
+ uint32_t offset = 0) :
+ die_offset_base(offset) {
+ for (size_t i = 0, e = AtomList.size(); i != e; ++i)
+ Atoms.push_back(AtomList[i]);
+ }
+
+ TableHeaderData(DwarfAccelTable::Atom Atom, uint32_t offset = 0)
+ : die_offset_base(offset) {
+ Atoms.push_back(Atom);
+ }
+
+#ifndef NDEBUG
+ void print (raw_ostream &O) {
+ O << "die_offset_base: " << die_offset_base << "\n";
+ for (size_t i = 0; i < Atoms.size(); i++)
+ Atoms[i].print(O);
+ }
+ void dump() {
+ print(dbgs());
+ }
+#endif
+ };
+
+ // The data itself consists of a str_offset, a count of the DIEs in the
+ // hash and the offsets to the DIEs themselves.
+ // On disk each data section is ended with a 0 KeyType as the end of the
+ // hash chain.
+ // On output this looks like:
+ // uint32_t str_offset
+ // uint32_t hash_data_count
+ // HashData[hash_data_count]
+public:
+ struct HashDataContents {
+ DIE *Die; // Offsets
+ char Flags; // Specific flags to output
+
+ HashDataContents(DIE *D, char Flags) :
+ Die(D),
+ Flags(Flags) { }
+ #ifndef NDEBUG
+ void print(raw_ostream &O) const {
+ O << " Offset: " << Die->getOffset() << "\n";
+ O << " Tag: " << dwarf::TagString(Die->getTag()) << "\n";
+ O << " Flags: " << Flags << "\n";
+ }
+ #endif
+ };
+private:
+ struct HashData {
+ StringRef Str;
+ uint32_t HashValue;
+ MCSymbol *Sym;
+ std::vector<struct HashDataContents*> Data; // offsets
+ HashData(StringRef S) : Str(S) {
+ HashValue = DwarfAccelTable::HashDJB(S);
+ }
+ void addData(struct HashDataContents *Datum) { Data.push_back(Datum); }
+ #ifndef NDEBUG
+ void print(raw_ostream &O) {
+ O << "Name: " << Str << "\n";
+ O << " Hash Value: " << format("0x%x", HashValue) << "\n";
+ O << " Symbol: " ;
+ if (Sym) Sym->print(O);
+ else O << "<none>";
+ O << "\n";
+ for (size_t i = 0; i < Data.size(); i++) {
+ O << " Offset: " << Data[i]->Die->getOffset() << "\n";
+ O << " Tag: " << dwarf::TagString(Data[i]->Die->getTag()) << "\n";
+ O << " Flags: " << Data[i]->Flags << "\n";
+ }
+ }
+ void dump() {
+ print(dbgs());
+ }
+ #endif
+ };
+
+ DwarfAccelTable(const DwarfAccelTable&); // DO NOT IMPLEMENT
+ void operator=(const DwarfAccelTable&); // DO NOT IMPLEMENT
+
+ // Internal Functions
+ void EmitHeader(AsmPrinter *);
+ void EmitBuckets(AsmPrinter *);
+ void EmitHashes(AsmPrinter *);
+ void EmitOffsets(AsmPrinter *, MCSymbol *);
+ void EmitData(AsmPrinter *, DwarfDebug *D);
+
+ // Output Variables
+ TableHeader Header;
+ TableHeaderData HeaderData;
+ std::vector<HashData*> Data;
+
+ // String Data
+ typedef std::vector<struct HashDataContents*> DataArray;
+ typedef StringMap<DataArray> StringEntries;
+ StringEntries Entries;
+
+ // Buckets/Hashes/Offsets
+ typedef std::vector<HashData*> HashList;
+ typedef std::vector<HashList> BucketList;
+ BucketList Buckets;
+ HashList Hashes;
+
+ // Public Implementation
+ public:
+ DwarfAccelTable(DwarfAccelTable::Atom);
+ DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &);
+ ~DwarfAccelTable();
+ void AddName(StringRef, DIE*, char = 0);
+ void FinalizeTable(AsmPrinter *, const char *);
+ void Emit(AsmPrinter *, MCSymbol *, DwarfDebug *);
+#ifndef NDEBUG
+ void print(raw_ostream &O);
+ void dump() { print(dbgs()); }
+#endif
+};
+
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
index 8ed4f4c..d975f1f 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -142,12 +142,14 @@ void DwarfCFIException::EndFunction() {
Asm->OutStreamer.EmitCFIEndProc();
+ if (!shouldEmitPersonality)
+ return;
+
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("eh_func_end",
Asm->getFunctionNumber()));
// Map all labels and get rid of any dead landing pads.
MMI->TidyLandingPads();
- if (shouldEmitPersonality)
- EmitExceptionTable();
+ EmitExceptionTable();
}
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 6fe476d..69dc454 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -13,12 +13,14 @@
#define DEBUG_TYPE "dwarfdebug"
+#include "DwarfAccelTable.h"
#include "DwarfCompileUnit.h"
#include "DwarfDebug.h"
#include "llvm/Constants.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/Analysis/DIBuilder.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameLowering.h"
@@ -30,8 +32,9 @@
using namespace llvm;
/// CompileUnit - Compile unit constructor.
-CompileUnit::CompileUnit(unsigned I, DIE *D, AsmPrinter *A, DwarfDebug *DW)
- : ID(I), CUDie(D), Asm(A), DD(DW), IndexTyDie(0) {
+CompileUnit::CompileUnit(unsigned I, unsigned L, DIE *D, AsmPrinter *A,
+ DwarfDebug *DW)
+ : ID(I), Language(L), CUDie(D), Asm(A), DD(DW), IndexTyDie(0) {
DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
}
@@ -67,12 +70,19 @@ void CompileUnit::addSInt(DIE *Die, unsigned Attribute,
Die->addValue(Attribute, Form, Value);
}
-/// addString - Add a string attribute data and value. DIEString only
-/// keeps string reference.
-void CompileUnit::addString(DIE *Die, unsigned Attribute, unsigned Form,
- StringRef String) {
- DIEValue *Value = new (DIEValueAllocator) DIEString(String);
- Die->addValue(Attribute, Form, Value);
+/// addString - Add a string attribute data and value. We always emit a
+/// reference to the string pool instead of immediate strings so that DIEs have
+/// more predictable sizes.
+void CompileUnit::addString(DIE *Die, unsigned Attribute, StringRef String) {
+ MCSymbol *Symb = DD->getStringPoolEntry(String);
+ DIEValue *Value;
+ if (Asm->needsRelocationsForDwarfStringPool())
+ Value = new (DIEValueAllocator) DIELabel(Symb);
+ else {
+ MCSymbol *StringPool = DD->getStringPool();
+ Value = new (DIEValueAllocator) DIEDelta(Symb, StringPool);
+ }
+ Die->addValue(Attribute, dwarf::DW_FORM_strp, Value);
}
/// addLabel - Add a Dwarf label attribute data and value.
@@ -98,7 +108,6 @@ void CompileUnit::addDIEEntry(DIE *Die, unsigned Attribute, unsigned Form,
Die->addValue(Attribute, Form, createDIEEntry(Entry));
}
-
/// addBlock - Add block data.
///
void CompileUnit::addBlock(DIE *Die, unsigned Attribute, unsigned Form,
@@ -135,8 +144,7 @@ void CompileUnit::addSourceLine(DIE *Die, DIGlobalVariable G) {
unsigned Line = G.getLineNumber();
if (Line == 0)
return;
- unsigned FileID = DD->GetOrCreateSourceID(G.getFilename(),
- G.getDirectory());
+ unsigned FileID = DD->GetOrCreateSourceID(G.getFilename(), G.getDirectory());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -148,14 +156,14 @@ void CompileUnit::addSourceLine(DIE *Die, DISubprogram SP) {
// Verify subprogram.
if (!SP.Verify())
return;
- // If the line number is 0, don't add it.
- if (SP.getLineNumber() == 0)
- return;
+ // If the line number is 0, don't add it.
unsigned Line = SP.getLineNumber();
- if (!SP.getContext().Verify())
+ if (Line == 0)
return;
- unsigned FileID = DD->GetOrCreateSourceID(SP.getFilename(), SP.getDirectory());
+
+ unsigned FileID = DD->GetOrCreateSourceID(SP.getFilename(),
+ SP.getDirectory());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -169,9 +177,28 @@ void CompileUnit::addSourceLine(DIE *Die, DIType Ty) {
return;
unsigned Line = Ty.getLineNumber();
- if (Line == 0 || !Ty.getContext().Verify())
+ if (Line == 0)
return;
- unsigned FileID = DD->GetOrCreateSourceID(Ty.getFilename(), Ty.getDirectory());
+ unsigned FileID = DD->GetOrCreateSourceID(Ty.getFilename(),
+ Ty.getDirectory());
+ assert(FileID && "Invalid file id");
+ addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
+ addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
+}
+
+/// addSourceLine - Add location information to specified debug information
+/// entry.
+void CompileUnit::addSourceLine(DIE *Die, DIObjCProperty Ty) {
+ // Verify type.
+ if (!Ty.Verify())
+ return;
+
+ unsigned Line = Ty.getLineNumber();
+ if (Line == 0)
+ return;
+ DIFile File = Ty.getFile();
+ unsigned FileID = DD->GetOrCreateSourceID(File.getFilename(),
+ File.getDirectory());
assert(FileID && "Invalid file id");
addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -458,7 +485,7 @@ static bool isTypeSigned(DIType Ty, int *SizeInBits) {
/// addConstantValue - Add constant value entry in variable DIE.
bool CompileUnit::addConstantValue(DIE *Die, const MachineOperand &MO,
DIType Ty) {
- assert (MO.isImm() && "Invalid machine operand!");
+ assert(MO.isImm() && "Invalid machine operand!");
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
int SizeInBits = -1;
bool SignedConstant = isTypeSigned(Ty, &SizeInBits);
@@ -558,8 +585,8 @@ void CompileUnit::addTemplateParams(DIE &Buffer, DIArray TParams) {
Buffer.addChild(getOrCreateTemplateValueParameterDIE(
DITemplateValueParameter(Element)));
}
-
}
+
/// addToContextOwner - Add Die into the list of its context owner's children.
void CompileUnit::addToContextOwner(DIE *Die, DIDescriptor Context) {
if (Context.isType()) {
@@ -598,13 +625,29 @@ DIE *CompileUnit::getOrCreateTypeDIE(const MDNode *TyNode) {
assert(Ty.isDerivedType() && "Unknown kind of DIType");
constructTypeDIE(*TyDIE, DIDerivedType(Ty));
}
-
+ // If this is a named finished type then include it in the list of types
+ // for the accelerator tables.
+ if (!Ty.getName().empty() && !Ty.isForwardDecl()) {
+ bool IsImplementation = 0;
+ if (Ty.isCompositeType()) {
+ DICompositeType CT(Ty);
+ // A runtime language of 0 actually means C/C++ and that any
+ // non-negative value is some version of Objective-C/C++.
+ IsImplementation = (CT.getRunTimeLang() == 0) ||
+ CT.isObjcClassComplete();
+ }
+ unsigned Flags = IsImplementation ?
+ DwarfAccelTable::eTypeFlagClassIsImplementation : 0;
+ addAccelType(Ty.getName(), std::make_pair(TyDIE, Flags));
+ }
+
addToContextOwner(TyDIE, Ty.getContext());
return TyDIE;
}
/// addType - Add a new type attribute to the specified entity.
-void CompileUnit::addType(DIE *Entity, DIType Ty) {
+void CompileUnit::addType(DIE *Entity, DIType Ty,
+ unsigned Attribute) {
if (!Ty.Verify())
return;
@@ -612,7 +655,7 @@ void CompileUnit::addType(DIE *Entity, DIType Ty) {
DIEEntry *Entry = getDIEEntry(Ty);
// If it exists then use the existing value.
if (Entry) {
- Entity->addValue(dwarf::DW_AT_type, dwarf::DW_FORM_ref4, Entry);
+ Entity->addValue(Attribute, dwarf::DW_FORM_ref4, Entry);
return;
}
@@ -622,7 +665,7 @@ void CompileUnit::addType(DIE *Entity, DIType Ty) {
// Set up proxy.
Entry = createDIEEntry(Buffer);
insertDIEEntry(Ty, Entry);
- Entity->addValue(dwarf::DW_AT_type, dwarf::DW_FORM_ref4, Entry);
+ Entity->addValue(Attribute, dwarf::DW_FORM_ref4, Entry);
// If this is a complete composite type then include it in the
// list of global types.
@@ -662,7 +705,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
StringRef Name = BTy.getName();
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
+ addString(&Buffer, dwarf::DW_AT_name, Name);
if (BTy.getTag() == dwarf::DW_TAG_unspecified_type) {
Buffer.setTag(dwarf::DW_TAG_unspecified_type);
@@ -671,8 +714,8 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
}
Buffer.setTag(dwarf::DW_TAG_base_type);
- addUInt(&Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
- BTy.getEncoding());
+ addUInt(&Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
+ BTy.getEncoding());
uint64_t Size = BTy.getSizeInBits() >> 3;
addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
@@ -696,10 +739,10 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
+ addString(&Buffer, dwarf::DW_AT_name, Name);
// Add size if non-zero (derived types might be zero-sized.)
- if (Size)
+ if (Size && Tag != dwarf::DW_TAG_pointer_type)
addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
// Add source line info if available and TyDesc is not a forward declaration.
@@ -755,8 +798,12 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
Buffer.addChild(Arg);
}
}
- // Add prototype flag.
- if (isPrototyped)
+ // Add prototype flag if we're dealing with a C language and the
+ // function has been prototyped.
+ if (isPrototyped &&
+ (Language == dwarf::DW_LANG_C89 ||
+ Language == dwarf::DW_LANG_C99 ||
+ Language == dwarf::DW_LANG_ObjC))
addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
}
break;
@@ -779,13 +826,13 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
DISubprogram SP(Element);
ElemDie = getOrCreateSubprogramDIE(DISubprogram(Element));
if (SP.isProtected())
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_protected);
else if (SP.isPrivate())
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_private);
else
- addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
if (SP.isExplicit())
addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1);
@@ -793,15 +840,54 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
else if (Element.isVariable()) {
DIVariable DV(Element);
ElemDie = new DIE(dwarf::DW_TAG_variable);
- addString(ElemDie, dwarf::DW_AT_name, dwarf::DW_FORM_string,
- DV.getName());
+ addString(ElemDie, dwarf::DW_AT_name, DV.getName());
addType(ElemDie, DV.getType());
addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
addSourceLine(ElemDie, DV);
- } else if (Element.isDerivedType())
- ElemDie = createMemberDIE(DIDerivedType(Element));
- else
+ } else if (Element.isDerivedType()) {
+ DIDerivedType DDTy(Element);
+ if (DDTy.getTag() == dwarf::DW_TAG_friend) {
+ ElemDie = new DIE(dwarf::DW_TAG_friend);
+ addType(ElemDie, DDTy.getTypeDerivedFrom(), dwarf::DW_AT_friend);
+ } else
+ ElemDie = createMemberDIE(DIDerivedType(Element));
+ } else if (Element.isObjCProperty()) {
+ DIObjCProperty Property(Element);
+ ElemDie = new DIE(Property.getTag());
+ StringRef PropertyName = Property.getObjCPropertyName();
+ addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
+ addType(ElemDie, Property.getType());
+ addSourceLine(ElemDie, Property);
+ StringRef GetterName = Property.getObjCPropertyGetterName();
+ if (!GetterName.empty())
+ addString(ElemDie, dwarf::DW_AT_APPLE_property_getter, GetterName);
+ StringRef SetterName = Property.getObjCPropertySetterName();
+ if (!SetterName.empty())
+ addString(ElemDie, dwarf::DW_AT_APPLE_property_setter, SetterName);
+ unsigned PropertyAttributes = 0;
+ if (Property.isReadOnlyObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_readonly;
+ if (Property.isReadWriteObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_readwrite;
+ if (Property.isAssignObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_assign;
+ if (Property.isRetainObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_retain;
+ if (Property.isCopyObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_copy;
+ if (Property.isNonAtomicObjCProperty())
+ PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_nonatomic;
+ if (PropertyAttributes)
+ addUInt(ElemDie, dwarf::DW_AT_APPLE_property_attribute, 0,
+ PropertyAttributes);
+
+ DIEEntry *Entry = getDIEEntry(Element);
+ if (!Entry) {
+ Entry = createDIEEntry(ElemDie);
+ insertDIEEntry(Element, Entry);
+ }
+ } else
continue;
Buffer.addChild(ElemDie);
}
@@ -809,11 +895,6 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (CTy.isAppleBlockExtension())
addUInt(&Buffer, dwarf::DW_AT_APPLE_block, dwarf::DW_FORM_flag, 1);
- unsigned RLang = CTy.getRunTimeLang();
- if (RLang)
- addUInt(&Buffer, dwarf::DW_AT_APPLE_runtime_class,
- dwarf::DW_FORM_data1, RLang);
-
DICompositeType ContainingType = CTy.getContainingType();
if (DIDescriptor(ContainingType).isCompositeType())
addDIEEntry(&Buffer, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4,
@@ -827,7 +908,11 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
addUInt(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type,
dwarf::DW_FORM_flag, 1);
- if (Tag == dwarf::DW_TAG_class_type)
+ // Add template parameters to a class, structure or union types.
+ // FIXME: The support isn't in the metadata for this yet.
+ if (Tag == dwarf::DW_TAG_class_type ||
+ Tag == dwarf::DW_TAG_structure_type ||
+ Tag == dwarf::DW_TAG_union_type)
addTemplateParams(Buffer, CTy.getTemplateParams());
break;
@@ -838,11 +923,11 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add name if not anonymous or intermediate type.
if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
+ addString(&Buffer, dwarf::DW_AT_name, Name);
if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type
|| Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type)
- {
+ {
// Add size if non-zero (derived types might be zero-sized.)
if (Size)
addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
@@ -857,6 +942,12 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
// Add source line info if available.
if (!CTy.isForwardDecl())
addSourceLine(&Buffer, CTy);
+
+ // No harm in adding the runtime language to the declaration.
+ unsigned RLang = CTy.getRunTimeLang();
+ if (RLang)
+ addUInt(&Buffer, dwarf::DW_AT_APPLE_runtime_class,
+ dwarf::DW_FORM_data1, RLang);
}
}
@@ -870,7 +961,7 @@ CompileUnit::getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP) {
ParamDIE = new DIE(dwarf::DW_TAG_template_type_parameter);
addType(ParamDIE, TP.getType());
- addString(ParamDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string, TP.getName());
+ addString(ParamDIE, dwarf::DW_AT_name, TP.getName());
return ParamDIE;
}
@@ -885,7 +976,7 @@ CompileUnit::getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TPV)
ParamDIE = new DIE(dwarf::DW_TAG_template_value_parameter);
addType(ParamDIE, TPV.getType());
if (!TPV.getName().empty())
- addString(ParamDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string, TPV.getName());
+ addString(ParamDIE, dwarf::DW_AT_name, TPV.getName());
addUInt(ParamDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
TPV.getValue());
return ParamDIE;
@@ -898,8 +989,11 @@ DIE *CompileUnit::getOrCreateNameSpace(DINameSpace NS) {
return NDie;
NDie = new DIE(dwarf::DW_TAG_namespace);
insertDIE(NS, NDie);
- if (!NS.getName().empty())
- addString(NDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, NS.getName());
+ if (!NS.getName().empty()) {
+ addString(NDie, dwarf::DW_AT_name, NS.getName());
+ addAccelNamespace(NS.getName(), NDie);
+ } else
+ addAccelNamespace("(anonymous namespace)", NDie);
addSourceLine(NDie, NS);
addToContextOwner(NDie, NS.getContext());
return NDie;
@@ -921,6 +1015,12 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
if (SPDie)
return SPDie;
+ DISubprogram SPDecl = SP.getFunctionDeclaration();
+ DIE *DeclDie = NULL;
+ if (SPDecl.isSubprogram()) {
+ DeclDie = getOrCreateSubprogramDIE(SPDecl);
+ }
+
SPDie = new DIE(dwarf::DW_TAG_subprogram);
// DW_TAG_inlined_subroutine may refer to this DIE.
@@ -932,25 +1032,36 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
// Add function template parameters.
addTemplateParams(*SPDie, SP.getTemplateParams());
+ // Unfortunately this code needs to stay here to work around
+ // a bug in older gdbs that requires the linkage name to resolve
+ // multiple template functions.
StringRef LinkageName = SP.getLinkageName();
if (!LinkageName.empty())
- addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
- dwarf::DW_FORM_string,
- getRealLinkageName(LinkageName));
+ addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
+ getRealLinkageName(LinkageName));
// If this DIE is going to refer declaration info using AT_specification
// then there is no need to add other attributes.
- if (SP.getFunctionDeclaration().isSubprogram())
+ if (DeclDie) {
+ // Refer function declaration directly.
+ addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
+ DeclDie);
+
return SPDie;
+ }
// Constructors and operators for anonymous aggregates do not have names.
if (!SP.getName().empty())
- addString(SPDie, dwarf::DW_AT_name, dwarf::DW_FORM_string,
- SP.getName());
+ addString(SPDie, dwarf::DW_AT_name, SP.getName());
addSourceLine(SPDie, SP);
- if (SP.isPrototyped())
+ // Add the prototype if we have a prototype and we have a C like
+ // language.
+ if (SP.isPrototyped() &&
+ (Language == dwarf::DW_LANG_C89 ||
+ Language == dwarf::DW_LANG_C99 ||
+ Language == dwarf::DW_LANG_ObjC))
addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
// Add Return Type.
@@ -965,7 +1076,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
unsigned VK = SP.getVirtuality();
if (VK) {
- addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_flag, VK);
+ addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1, VK);
DIEBlock *Block = getDIEBlock();
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
addUInt(Block, 0, dwarf::DW_FORM_udata, SP.getVirtualIndex());
@@ -1052,31 +1163,30 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
insertDIE(N, VariableDIE);
// Add name.
- addString(VariableDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string,
- GV.getDisplayName());
+ addString(VariableDIE, dwarf::DW_AT_name, GV.getDisplayName());
StringRef LinkageName = GV.getLinkageName();
bool isGlobalVariable = GV.getGlobal() != NULL;
if (!LinkageName.empty() && isGlobalVariable)
- addString(VariableDIE, dwarf::DW_AT_MIPS_linkage_name,
- dwarf::DW_FORM_string,
- getRealLinkageName(LinkageName));
+ addString(VariableDIE, dwarf::DW_AT_MIPS_linkage_name,
+ getRealLinkageName(LinkageName));
// Add type.
DIType GTy = GV.getType();
addType(VariableDIE, GTy);
// Add scoping info.
- if (!GV.isLocalToUnit()) {
+ if (!GV.isLocalToUnit())
addUInt(VariableDIE, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
- // Expose as global.
- addGlobal(GV.getName(), VariableDIE);
- }
+
// Add line number info.
addSourceLine(VariableDIE, GV);
// Add to context owner.
DIDescriptor GVContext = GV.getContext();
addToContextOwner(VariableDIE, GVContext);
// Add location.
+ bool addToAccelTable = false;
+ DIE *VariableSpecDIE = NULL;
if (isGlobalVariable) {
+ addToAccelTable = true;
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
addLabel(Block, 0, dwarf::DW_FORM_udata,
@@ -1086,7 +1196,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
if (GVContext && GV.isDefinition() && !GVContext.isCompileUnit() &&
!GVContext.isFile() && !isSubprogramContext(GVContext)) {
// Create specification DIE.
- DIE *VariableSpecDIE = new DIE(dwarf::DW_TAG_variable);
+ VariableSpecDIE = new DIE(dwarf::DW_TAG_variable);
addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
dwarf::DW_FORM_ref4, VariableDIE);
addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
@@ -1095,11 +1205,12 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
addDie(VariableSpecDIE);
} else {
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
- }
+ }
} else if (const ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(GV.getConstant()))
addConstantValue(VariableDIE, CI, GTy.isUnsignedDIType());
else if (const ConstantExpr *CE = getMergedGlobalExpr(N->getOperand(11))) {
+ addToAccelTable = true;
// GV is a merged global.
DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
Value *Ptr = CE->getOperand(0);
@@ -1114,6 +1225,16 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
}
+ if (addToAccelTable) {
+ DIE *AddrDIE = VariableSpecDIE ? VariableSpecDIE : VariableDIE;
+ addAccelName(GV.getName(), AddrDIE);
+
+ // If the linkage name is different than the name, go ahead and output
+ // that as well into the name table.
+ if (GV.getLinkageName() != "" && GV.getName() != GV.getLinkageName())
+ addAccelName(GV.getLinkageName(), AddrDIE);
+ }
+
return;
}
@@ -1121,8 +1242,8 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy){
DIE *DW_Subrange = new DIE(dwarf::DW_TAG_subrange_type);
addDIEEntry(DW_Subrange, dwarf::DW_AT_type, dwarf::DW_FORM_ref4, IndexTy);
- int64_t L = SR.getLo();
- int64_t H = SR.getHi();
+ uint64_t L = SR.getLo();
+ uint64_t H = SR.getHi();
// The L value defines the lower bounds which is typically zero for C/C++. The
// H value is the upper bounds. Values are 64 bit. H - L + 1 is the size
@@ -1135,8 +1256,8 @@ void CompileUnit::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy)
return;
}
if (L)
- addSInt(DW_Subrange, dwarf::DW_AT_lower_bound, 0, L);
- addSInt(DW_Subrange, dwarf::DW_AT_upper_bound, 0, H);
+ addUInt(DW_Subrange, dwarf::DW_AT_lower_bound, 0, L);
+ addUInt(DW_Subrange, dwarf::DW_AT_upper_bound, 0, H);
Buffer.addChild(DW_Subrange);
}
@@ -1175,7 +1296,7 @@ void CompileUnit::constructArrayTypeDIE(DIE &Buffer,
DIE *CompileUnit::constructEnumTypeDIE(DIEnumerator ETy) {
DIE *Enumerator = new DIE(dwarf::DW_TAG_enumerator);
StringRef Name = ETy.getName();
- addString(Enumerator, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
+ addString(Enumerator, dwarf::DW_AT_name, Name);
int64_t Value = ETy.getEnumValue();
addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata, Value);
return Enumerator;
@@ -1212,8 +1333,7 @@ DIE *CompileUnit::constructVariableDIE(DbgVariable *DV, bool isScopeAbstract) {
addDIEEntry(VariableDie, dwarf::DW_AT_abstract_origin,
dwarf::DW_FORM_ref4, AbsDIE);
else {
- addString(VariableDie, dwarf::DW_AT_name,
- dwarf::DW_FORM_string, Name);
+ addString(VariableDie, dwarf::DW_AT_name, Name);
addSourceLine(VariableDie, DV->getVariable());
addType(VariableDie, DV->getType());
}
@@ -1308,7 +1428,7 @@ DIE *CompileUnit::createMemberDIE(DIDerivedType DT) {
DIE *MemberDie = new DIE(DT.getTag());
StringRef Name = DT.getName();
if (!Name.empty())
- addString(MemberDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
+ addString(MemberDie, dwarf::DW_AT_name, Name);
addType(MemberDie, DT.getTypeDerivedFrom());
@@ -1366,32 +1486,35 @@ DIE *CompileUnit::createMemberDIE(DIDerivedType DT) {
addBlock(MemberDie, dwarf::DW_AT_data_member_location, 0, MemLocationDie);
if (DT.isProtected())
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_protected);
else if (DT.isPrivate())
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_private);
// Otherwise C++ member and base classes are considered public.
else
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+ addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
if (DT.isVirtual())
- addUInt(MemberDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_flag,
+ addUInt(MemberDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_data1,
dwarf::DW_VIRTUALITY_virtual);
// Objective-C properties.
+ if (MDNode *PNode = DT.getObjCProperty())
+ if (DIEEntry *PropertyDie = getDIEEntry(PNode))
+ MemberDie->addValue(dwarf::DW_AT_APPLE_property, dwarf::DW_FORM_ref4,
+ PropertyDie);
+
+ // This is only for backward compatibility.
StringRef PropertyName = DT.getObjCPropertyName();
if (!PropertyName.empty()) {
- addString(MemberDie, dwarf::DW_AT_APPLE_property_name, dwarf::DW_FORM_string,
- PropertyName);
+ addString(MemberDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
StringRef GetterName = DT.getObjCPropertyGetterName();
if (!GetterName.empty())
- addString(MemberDie, dwarf::DW_AT_APPLE_property_getter,
- dwarf::DW_FORM_string, GetterName);
+ addString(MemberDie, dwarf::DW_AT_APPLE_property_getter, GetterName);
StringRef SetterName = DT.getObjCPropertySetterName();
if (!SetterName.empty())
- addString(MemberDie, dwarf::DW_AT_APPLE_property_setter,
- dwarf::DW_FORM_string, SetterName);
+ addString(MemberDie, dwarf::DW_AT_APPLE_property_setter, SetterName);
unsigned PropertyAttributes = 0;
if (DT.isReadOnlyObjCProperty())
PropertyAttributes |= dwarf::DW_APPLE_PROPERTY_readonly;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index 7859265..45e407e 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -29,13 +29,17 @@ class ConstantInt;
class DbgVariable;
//===----------------------------------------------------------------------===//
-/// CompileUnit - This dwarf writer support class manages information associate
+/// CompileUnit - This dwarf writer support class manages information associated
/// with a source file.
class CompileUnit {
/// ID - File identifier for source.
///
unsigned ID;
+ /// Language - The DW_AT_language of the compile unit
+ ///
+ unsigned Language;
+
/// Die - Compile unit debug information entry.
///
const OwningPtr<DIE> CUDie;
@@ -56,14 +60,17 @@ class CompileUnit {
/// descriptors to debug information entries using a DIEEntry proxy.
DenseMap<const MDNode *, DIEEntry *> MDNodeToDIEEntryMap;
- /// Globals - A map of globally visible named entities for this unit.
- ///
- StringMap<DIE*> Globals;
-
/// GlobalTypes - A map of globally visible types for this unit.
///
StringMap<DIE*> GlobalTypes;
+ /// AccelNames - A map of names for the name accelerator table.
+ ///
+ StringMap<std::vector<DIE*> > AccelNames;
+ StringMap<std::vector<DIE*> > AccelObjC;
+ StringMap<std::vector<DIE*> > AccelNamespace;
+ StringMap<std::vector<std::pair<DIE*, unsigned> > > AccelTypes;
+
/// DIEBlocks - A list of all the DIEBlocks in use.
std::vector<DIEBlock *> DIEBlocks;
@@ -73,27 +80,56 @@ class CompileUnit {
DenseMap<DIE *, const MDNode *> ContainingTypeMap;
public:
- CompileUnit(unsigned I, DIE *D, AsmPrinter *A, DwarfDebug *DW);
+ CompileUnit(unsigned I, unsigned L, DIE *D, AsmPrinter *A, DwarfDebug *DW);
~CompileUnit();
// Accessors.
unsigned getID() const { return ID; }
+ unsigned getLanguage() const { return Language; }
DIE* getCUDie() const { return CUDie.get(); }
- const StringMap<DIE*> &getGlobals() const { return Globals; }
const StringMap<DIE*> &getGlobalTypes() const { return GlobalTypes; }
+ const StringMap<std::vector<DIE*> > &getAccelNames() const {
+ return AccelNames;
+ }
+ const StringMap<std::vector<DIE*> > &getAccelObjC() const {
+ return AccelObjC;
+ }
+ const StringMap<std::vector<DIE*> > &getAccelNamespace() const {
+ return AccelNamespace;
+ }
+ const StringMap<std::vector<std::pair<DIE*, unsigned > > >
+ &getAccelTypes() const {
+ return AccelTypes;
+ }
+
/// hasContent - Return true if this compile unit has something to write out.
///
bool hasContent() const { return !CUDie->getChildren().empty(); }
- /// addGlobal - Add a new global entity to the compile unit.
- ///
- void addGlobal(StringRef Name, DIE *Die) { Globals[Name] = Die; }
-
/// addGlobalType - Add a new global type to the compile unit.
///
void addGlobalType(DIType Ty);
+
+ /// addAccelName - Add a new name to the name accelerator table.
+ void addAccelName(StringRef Name, DIE *Die) {
+ std::vector<DIE*> &DIEs = AccelNames[Name];
+ DIEs.push_back(Die);
+ }
+ void addAccelObjC(StringRef Name, DIE *Die) {
+ std::vector<DIE*> &DIEs = AccelObjC[Name];
+ DIEs.push_back(Die);
+ }
+ void addAccelNamespace(StringRef Name, DIE *Die) {
+ std::vector<DIE*> &DIEs = AccelNamespace[Name];
+ DIEs.push_back(Die);
+ }
+ void addAccelType(StringRef Name, std::pair<DIE *, unsigned> Die) {
+ std::vector<std::pair<DIE*, unsigned > > &DIEs = AccelTypes[Name];
+ DIEs.push_back(Die);
+ }
+
/// getDIE - Returns the debug information entry map slot for the
/// specified debug variable.
DIE *getDIE(const MDNode *N) { return MDNodeToDieMap.lookup(N); }
@@ -150,8 +186,7 @@ public:
/// addString - Add a string attribute data and value.
///
- void addString(DIE *Die, unsigned Attribute, unsigned Form,
- const StringRef Str);
+ void addString(DIE *Die, unsigned Attribute, const StringRef Str);
/// addLabel - Add a Dwarf label attribute data and value.
///
@@ -178,6 +213,7 @@ public:
void addSourceLine(DIE *Die, DISubprogram SP);
void addSourceLine(DIE *Die, DIType Ty);
void addSourceLine(DIE *Die, DINameSpace NS);
+ void addSourceLine(DIE *Die, DIObjCProperty Ty);
/// addAddress - Add an address attribute to a die based on the location
/// provided.
@@ -225,8 +261,10 @@ public:
/// addToContextOwner - Add Die into the list of its context owner's children.
void addToContextOwner(DIE *Die, DIDescriptor Context);
- /// addType - Add a new type attribute to the specified entity.
- void addType(DIE *Entity, DIType Ty);
+ /// addType - Add a new type attribute to the specified entity. This takes
+ /// and attribute parameter because DW_AT_friend attributes are also
+ /// type references.
+ void addType(DIE *Entity, DIType Ty, unsigned Attribute = dwarf::DW_AT_type);
/// getOrCreateNameSpace - Create a DIE for DINameSpace.
DIE *getOrCreateNameSpace(DINameSpace NS);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 1b7e370..cb78878 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -14,10 +14,12 @@
#define DEBUG_TYPE "dwarfdebug"
#include "DwarfDebug.h"
#include "DIE.h"
+#include "DwarfAccelTable.h"
#include "DwarfCompileUnit.h"
#include "llvm/Constants.h"
#include "llvm/Module.h"
#include "llvm/Instructions.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -52,6 +54,10 @@ static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden,
cl::desc("Make an absence of debug location information explicit."),
cl::init(false));
+static cl::opt<bool> DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
+ cl::desc("Output prototype dwarf accelerator tables."),
+ cl::init(false));
+
namespace {
const char *DWARFGroupName = "DWARF Emission";
const char *DbgTimerName = "DWARF Debug Writer";
@@ -128,6 +134,11 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
DwarfStrSectionSym = TextSectionSym = 0;
DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
FunctionBeginSym = FunctionEndSym = 0;
+
+ // Turn on accelerator tables for Darwin.
+ if (Triple(M->getTargetTriple()).isOSDarwin())
+ DwarfAccelTables = true;
+
{
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
beginModule(M);
@@ -136,6 +147,22 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
DwarfDebug::~DwarfDebug() {
}
+/// EmitSectionSym - Switch to the specified MCSection and emit an assembler
+/// temporary label to it if SymbolStem is specified.
+static MCSymbol *EmitSectionSym(AsmPrinter *Asm, const MCSection *Section,
+ const char *SymbolStem = 0) {
+ Asm->OutStreamer.SwitchSection(Section);
+ if (!SymbolStem) return 0;
+
+ MCSymbol *TmpSym = Asm->GetTempSymbol(SymbolStem);
+ Asm->OutStreamer.EmitLabel(TmpSym);
+ return TmpSym;
+}
+
+MCSymbol *DwarfDebug::getStringPool() {
+ return Asm->GetTempSymbol("section_str");
+}
+
MCSymbol *DwarfDebug::getStringPoolEntry(StringRef Str) {
std::pair<MCSymbol*, unsigned> &Entry = StringPool[Str];
if (Entry.first) return Entry.first;
@@ -144,7 +171,6 @@ MCSymbol *DwarfDebug::getStringPoolEntry(StringRef Str) {
return Entry.first = Asm->GetTempSymbol("string", Entry.second);
}
-
/// assignAbbrevNumber - Define a unique number for the abbreviation.
///
void DwarfDebug::assignAbbrevNumber(DIEAbbrev &Abbrev) {
@@ -178,6 +204,63 @@ static StringRef getRealLinkageName(StringRef LinkageName) {
return LinkageName;
}
+static bool isObjCClass(StringRef Name) {
+ return Name.startswith("+") || Name.startswith("-");
+}
+
+static bool hasObjCCategory(StringRef Name) {
+ if (!isObjCClass(Name)) return false;
+
+ size_t pos = Name.find(')');
+ if (pos != std::string::npos) {
+ if (Name[pos+1] != ' ') return false;
+ return true;
+ }
+ return false;
+}
+
+static void getObjCClassCategory(StringRef In, StringRef &Class,
+ StringRef &Category) {
+ if (!hasObjCCategory(In)) {
+ Class = In.slice(In.find('[') + 1, In.find(' '));
+ Category = "";
+ return;
+ }
+
+ Class = In.slice(In.find('[') + 1, In.find('('));
+ Category = In.slice(In.find('[') + 1, In.find(' '));
+ return;
+}
+
+static StringRef getObjCMethodName(StringRef In) {
+ return In.slice(In.find(' ') + 1, In.find(']'));
+}
+
+// Add the various names to the Dwarf accelerator table names.
+static void addSubprogramNames(CompileUnit *TheCU, DISubprogram SP,
+ DIE* Die) {
+ if (!SP.isDefinition()) return;
+
+ TheCU->addAccelName(SP.getName(), Die);
+
+ // If the linkage name is different than the name, go ahead and output
+ // that as well into the name table.
+ if (SP.getLinkageName() != "" && SP.getName() != SP.getLinkageName())
+ TheCU->addAccelName(SP.getLinkageName(), Die);
+
+ // If this is an Objective-C selector name add it to the ObjC accelerator
+ // too.
+ if (isObjCClass(SP.getName())) {
+ StringRef Class, Category;
+ getObjCClassCategory(SP.getName(), Class, Category);
+ TheCU->addAccelObjC(Class, Die);
+ if (Category != "")
+ TheCU->addAccelObjC(Category, Die);
+ // Also add the base method name to the name table.
+ TheCU->addAccelName(getObjCMethodName(SP.getName()), Die);
+ }
+}
+
/// updateSubprogramScopeDIE - Find DIE for the given subprogram and
/// attach appropriate DW_AT_low_pc and DW_AT_high_pc attributes.
/// If there are global variables in this scope then create and insert
@@ -190,11 +273,7 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU,
DISubprogram SP(SPNode);
DISubprogram SPDecl = SP.getFunctionDeclaration();
- if (SPDecl.isSubprogram())
- // Refer function declaration directly.
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
- SPCU->getOrCreateSubprogramDIE(SPDecl));
- else {
+ if (!SPDecl.isSubprogram()) {
// There is not any need to generate specification DIE for a function
// defined at compile unit level. If a function is defined inside another
// function then gdb prefers the definition at top level and but does not
@@ -203,7 +282,7 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU,
if (SP.isDefinition() && !SP.getContext().isCompileUnit() &&
!SP.getContext().isFile() &&
!isSubprogramContext(SP.getContext())) {
- SPCU-> addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+ SPCU->addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
// Add arguments.
DICompositeType SPTy = SP.getType();
@@ -241,6 +320,10 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU,
MachineLocation Location(RI->getFrameRegister(*Asm->MF));
SPCU->addAddress(SPDie, dwarf::DW_AT_frame_base, Location);
+ // Add name to the name table, we do this here because we're guaranteed
+ // to have concrete versions of our DW_TAG_subprogram nodes.
+ addSubprogramNames(SPCU, SP, SPDie);
+
return SPDie;
}
@@ -248,7 +331,6 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU,
/// for this scope and attach DW_AT_low_pc/DW_AT_high_pc labels.
DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
LexicalScope *Scope) {
-
DIE *ScopeDIE = new DIE(dwarf::DW_TAG_lexical_block);
if (Scope->isAbstractScope())
return ScopeDIE;
@@ -294,10 +376,9 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
/// of the function.
DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
LexicalScope *Scope) {
-
const SmallVector<InsnRange, 4> &Ranges = Scope->getRanges();
- assert (Ranges.empty() == false
- && "LexicalScope does not have instruction markers!");
+ assert(Ranges.empty() == false &&
+ "LexicalScope does not have instruction markers!");
if (!Scope->getScopeNode())
return NULL;
@@ -314,8 +395,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
const MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
if (StartLabel == 0 || EndLabel == 0) {
- assert (0 && "Unexpected Start and End labels for a inlined scope!");
- return 0;
+ llvm_unreachable("Unexpected Start and End labels for a inlined scope!");
}
assert(StartLabel->isDefined() &&
"Invalid starting label for an inlined scope!");
@@ -358,16 +438,20 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
I = InlineInfo.find(InlinedSP);
if (I == InlineInfo.end()) {
- InlineInfo[InlinedSP].push_back(std::make_pair(StartLabel,
- ScopeDIE));
+ InlineInfo[InlinedSP].push_back(std::make_pair(StartLabel, ScopeDIE));
InlinedSPNodes.push_back(InlinedSP);
} else
I->second.push_back(std::make_pair(StartLabel, ScopeDIE));
DILocation DL(Scope->getInlinedAt());
- TheCU->addUInt(ScopeDIE, dwarf::DW_AT_call_file, 0, TheCU->getID());
+ TheCU->addUInt(ScopeDIE, dwarf::DW_AT_call_file, 0,
+ GetOrCreateSourceID(DL.getFilename(), DL.getDirectory()));
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_call_line, 0, DL.getLineNumber());
+ // Add name to the name table, we do this here because we're guaranteed
+ // to have concrete versions of our DW_TAG_inlined_subprogram nodes.
+ addSubprogramNames(TheCU, InlinedSP, ScopeDIE);
+
return ScopeDIE;
}
@@ -376,7 +460,7 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) {
if (!Scope || !Scope->getScopeNode())
return NULL;
- SmallVector <DIE *, 8> Children;
+ SmallVector<DIE *, 8> Children;
// Collect arguments for current function.
if (LScopes.isCurrentFunctionScope(Scope))
@@ -426,39 +510,39 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) {
ScopeDIE->addChild(*I);
if (DS.isSubprogram())
- TheCU->addPubTypes(DISubprogram(DS));
+ TheCU->addPubTypes(DISubprogram(DS));
- return ScopeDIE;
+ return ScopeDIE;
}
/// GetOrCreateSourceID - Look up the source id with the given directory and
/// source file names. If none currently exists, create a new id and insert it
/// in the SourceIds map. This can update DirectoryNames and SourceFileNames
/// maps as well.
-
unsigned DwarfDebug::GetOrCreateSourceID(StringRef FileName,
StringRef DirName) {
// If FE did not provide a file name, then assume stdin.
if (FileName.empty())
return GetOrCreateSourceID("<stdin>", StringRef());
- // MCStream expects full path name as filename.
- if (!DirName.empty() && !sys::path::is_absolute(FileName)) {
- SmallString<128> FullPathName = DirName;
- sys::path::append(FullPathName, FileName);
- // Here FullPathName will be copied into StringMap by GetOrCreateSourceID.
- return GetOrCreateSourceID(StringRef(FullPathName), StringRef());
- }
+ // TODO: this might not belong here. See if we can factor this better.
+ if (DirName == CompilationDir)
+ DirName = "";
- StringMapEntry<unsigned> &Entry = SourceIdMap.GetOrCreateValue(FileName);
- if (Entry.getValue())
- return Entry.getValue();
+ unsigned SrcId = SourceIdMap.size()+1;
- unsigned SrcId = SourceIdMap.size();
- Entry.setValue(SrcId);
+ // We look up the file/dir pair by concatenating them with a zero byte.
+ SmallString<128> NamePair;
+ NamePair += DirName;
+ NamePair += '\0'; // Zero bytes are not allowed in paths.
+ NamePair += FileName;
+
+ StringMapEntry<unsigned> &Ent = SourceIdMap.GetOrCreateValue(NamePair, SrcId);
+ if (Ent.getValue() != SrcId)
+ return Ent.getValue();
// Print out a .file directive to specify files for .loc directives.
- Asm->OutStreamer.EmitDwarfFileDirective(SrcId, Entry.getKey());
+ Asm->OutStreamer.EmitDwarfFileDirective(SrcId, DirName, FileName);
return SrcId;
}
@@ -468,39 +552,36 @@ unsigned DwarfDebug::GetOrCreateSourceID(StringRef FileName,
CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) {
DICompileUnit DIUnit(N);
StringRef FN = DIUnit.getFilename();
- StringRef Dir = DIUnit.getDirectory();
- unsigned ID = GetOrCreateSourceID(FN, Dir);
+ CompilationDir = DIUnit.getDirectory();
+ unsigned ID = GetOrCreateSourceID(FN, CompilationDir);
DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- CompileUnit *NewCU = new CompileUnit(ID, Die, Asm, this);
- NewCU->addString(Die, dwarf::DW_AT_producer, dwarf::DW_FORM_string,
- DIUnit.getProducer());
+ CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die, Asm, this);
+ NewCU->addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
DIUnit.getLanguage());
- NewCU->addString(Die, dwarf::DW_AT_name, dwarf::DW_FORM_string, FN);
- // Use DW_AT_entry_pc instead of DW_AT_low_pc/DW_AT_high_pc pair. This
- // simplifies debug range entries.
- NewCU->addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_addr, 0);
+ NewCU->addString(Die, dwarf::DW_AT_name, FN);
+ // 2.17.1 requires that we use DW_AT_low_pc for a single entry point
+ // into an entity.
+ NewCU->addUInt(Die, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, 0);
// DW_AT_stmt_list is a offset of line number information for this
// compile unit in debug_line section.
- if(Asm->MAI->doesDwarfRequireRelocationForSectionOffset())
+ if (Asm->MAI->doesDwarfRequireRelocationForSectionOffset())
NewCU->addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4,
Asm->GetTempSymbol("section_line"));
else
NewCU->addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
- if (!Dir.empty())
- NewCU->addString(Die, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string, Dir);
+ if (!CompilationDir.empty())
+ NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
if (DIUnit.isOptimized())
NewCU->addUInt(Die, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
StringRef Flags = DIUnit.getFlags();
if (!Flags.empty())
- NewCU->addString(Die, dwarf::DW_AT_APPLE_flags, dwarf::DW_FORM_string,
- Flags);
+ NewCU->addString(Die, dwarf::DW_AT_APPLE_flags, Flags);
- unsigned RVer = DIUnit.getRunTimeVersion();
- if (RVer)
+ if (unsigned RVer = DIUnit.getRunTimeVersion())
NewCU->addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
dwarf::DW_FORM_data1, RVer);
@@ -513,6 +594,11 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) {
/// construct SubprogramDIE - Construct subprogram DIE.
void DwarfDebug::constructSubprogramDIE(CompileUnit *TheCU,
const MDNode *N) {
+ CompileUnit *&CURef = SPMap[N];
+ if (CURef)
+ return;
+ CURef = TheCU;
+
DISubprogram SP(N);
if (!SP.isDefinition())
// This is a method declaration which will be handled while constructing
@@ -527,10 +613,6 @@ void DwarfDebug::constructSubprogramDIE(CompileUnit *TheCU,
// Add to context owner.
TheCU->addToContextOwner(SubprogramDie, SP.getContext());
- // Expose as global.
- TheCU->addGlobal(SP.getName(), SubprogramDie);
-
- SPMap[N] = TheCU;
return;
}
@@ -676,7 +758,7 @@ void DwarfDebug::endModule() {
// Construct subprogram DIE and add variables DIEs.
CompileUnit *SPCU = CUMap.lookup(TheCU);
- assert (SPCU && "Unable to find Compile Unit!");
+ assert(SPCU && "Unable to find Compile Unit!");
constructSubprogramDIE(SPCU, SP);
DIE *ScopeDIE = SPCU->getDIE(SP);
for (unsigned vi = 0, ve = Variables.getNumElements(); vi != ve; ++vi) {
@@ -697,6 +779,13 @@ void DwarfDebug::endModule() {
DIE *ISP = *AI;
FirstCU->addUInt(ISP, dwarf::DW_AT_inline, 0, dwarf::DW_INL_inlined);
}
+ for (DenseMap<const MDNode *, DIE *>::iterator AI = AbstractSPDies.begin(),
+ AE = AbstractSPDies.end(); AI != AE; ++AI) {
+ DIE *ISP = AI->second;
+ if (InlinedSubprogramDIEs.count(ISP))
+ continue;
+ FirstCU->addUInt(ISP, dwarf::DW_AT_inline, 0, dwarf::DW_INL_inlined);
+ }
// Emit DW_AT_containing_type attribute to connect types with their
// vtable holding type.
@@ -727,9 +816,14 @@ void DwarfDebug::endModule() {
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
- // Emit info into a debug pubnames section.
- emitDebugPubNames();
-
+ // Emit info into a dwarf accelerator table sections.
+ if (DwarfAccelTables) {
+ emitAccelNames();
+ emitAccelObjC();
+ emitAccelNamespaces();
+ emitAccelTypes();
+ }
+
// Emit info into a debug pubtypes section.
emitDebugPubTypes();
@@ -837,7 +931,7 @@ DwarfDebug::collectVariableInfoFromMMITable(const MachineFunction *MF,
/// isDbgValueInDefinedReg - Return true if debug value, encoded by
/// DBG_VALUE instruction, is in a defined reg.
static bool isDbgValueInDefinedReg(const MachineInstr *MI) {
- assert (MI->isDebugValue() && "Invalid DBG_VALUE machine instruction!");
+ assert(MI->isDebugValue() && "Invalid DBG_VALUE machine instruction!");
return MI->getNumOperands() == 3 &&
MI->getOperand(0).isReg() && MI->getOperand(0).getReg() &&
MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0;
@@ -867,8 +961,7 @@ static DotDebugLocEntry getDebugLocEntry(AsmPrinter *Asm,
if (MI->getOperand(0).isCImm())
return DotDebugLocEntry(FLabel, SLabel, MI->getOperand(0).getCImm());
- assert (0 && "Unexpected 3 operand DBG_VALUE instruction!");
- return DotDebugLocEntry();
+ llvm_unreachable("Unexpected 3 operand DBG_VALUE instruction!");
}
/// collectVariableInfo - Find variables for each lexical scope.
@@ -964,7 +1057,8 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
}
// The value is valid until the next DBG_VALUE or clobber.
- DotDebugLocEntries.push_back(getDebugLocEntry(Asm, FLabel, SLabel, Begin));
+ DotDebugLocEntries.push_back(getDebugLocEntry(Asm, FLabel, SLabel,
+ Begin));
}
DotDebugLocEntries.push_back(DotDebugLocEntry());
}
@@ -999,12 +1093,15 @@ void DwarfDebug::beginInstruction(const MachineInstr *MI) {
if (!MI->isDebugValue()) {
DebugLoc DL = MI->getDebugLoc();
if (DL != PrevInstLoc && (!DL.isUnknown() || UnknownLocations)) {
- unsigned Flags = DWARF2_FLAG_IS_STMT;
+ unsigned Flags = 0;
PrevInstLoc = DL;
if (DL == PrologEndLoc) {
Flags |= DWARF2_FLAG_PROLOGUE_END;
PrologEndLoc = DebugLoc();
}
+ if (PrologEndLoc.isUnknown())
+ Flags |= DWARF2_FLAG_IS_STMT;
+
if (!DL.isUnknown()) {
const MDNode *Scope = DL.getScope(Asm->MF->getFunction()->getContext());
recordSourceLine(DL.getLine(), DL.getCol(), Scope, Flags);
@@ -1099,12 +1196,19 @@ static MDNode *getScopeNode(DebugLoc DL, const LLVMContext &Ctx) {
}
/// getFnDebugLoc - Walk up the scope chain of given debug loc and find
-/// line number info for the function.
+/// line number info for the function.
static DebugLoc getFnDebugLoc(DebugLoc DL, const LLVMContext &Ctx) {
const MDNode *Scope = getScopeNode(DL, Ctx);
DISubprogram SP = getDISubprogram(Scope);
- if (SP.Verify())
- return DebugLoc::get(SP.getLineNumber(), 0, SP);
+ if (SP.Verify()) {
+ // Check for number of operands since the compatibility is
+ // cheap here.
+ if (SP->getNumOperands() > 19)
+ return DebugLoc::get(SP.getScopeLineNumber(), 0, SP);
+ else
+ return DebugLoc::get(SP.getLineNumber(), 0, SP);
+ }
+
return DebugLoc();
}
@@ -1135,7 +1239,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
const MachineInstr *MI = II;
if (MI->isDebugValue()) {
- assert (MI->getNumOperands() > 1 && "Invalid machine instruction!");
+ assert(MI->getNumOperands() > 1 && "Invalid machine instruction!");
// Keep track of user variables.
const MDNode *Var =
@@ -1206,7 +1310,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
if (!MOI->isReg() || !MOI->isDef() || !MOI->getReg())
continue;
- for (const unsigned *AI = TRI->getOverlaps(MOI->getReg());
+ for (const uint16_t *AI = TRI->getOverlaps(MOI->getReg());
unsigned Reg = *AI; ++AI) {
const MDNode *Var = LiveUserVar[Reg];
if (!Var)
@@ -1277,7 +1381,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
MF->getFunction()->getContext());
recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(),
FnStartDL.getScope(MF->getFunction()->getContext()),
- DWARF2_FLAG_IS_STMT);
+ 0);
}
}
@@ -1303,7 +1407,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
CompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
- assert (TheCU && "Unable to find compile unit!");
+ assert(TheCU && "Unable to find compile unit!");
// Construct abstract scopes.
ArrayRef<LexicalScope *> AList = LScopes.getAbstractScopesList();
@@ -1327,7 +1431,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope);
- if (!DisableFramePointerElim(*MF))
+ if (!MF->getTarget().Options.DisableFramePointerElim(*MF))
TheCU->addUInt(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr,
dwarf::DW_FORM_flag, 1);
@@ -1380,7 +1484,7 @@ void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S,
Fn = DB.getFilename();
Dir = DB.getDirectory();
} else
- assert(0 && "Unexpected scope info");
+ llvm_unreachable("Unexpected scope info");
Src = GetOrCreateSourceID(Fn, Dir);
}
@@ -1398,10 +1502,6 @@ DwarfDebug::computeSizeAndOffset(DIE *Die, unsigned Offset, bool Last) {
// Get the children.
const std::vector<DIE *> &Children = Die->getChildren();
- // If not last sibling and has children then add sibling offset attribute.
- if (!Last && !Children.empty())
- Die->addSiblingOffset(DIEValueAllocator);
-
// Record the abbreviation.
assignAbbrevNumber(Die->getAbbrev());
@@ -1454,18 +1554,6 @@ void DwarfDebug::computeSizeAndOffsets() {
}
}
-/// EmitSectionSym - Switch to the specified MCSection and emit an assembler
-/// temporary label to it if SymbolStem is specified.
-static MCSymbol *EmitSectionSym(AsmPrinter *Asm, const MCSection *Section,
- const char *SymbolStem = 0) {
- Asm->OutStreamer.SwitchSection(Section);
- if (!SymbolStem) return 0;
-
- MCSymbol *TmpSym = Asm->GetTempSymbol(SymbolStem);
- Asm->OutStreamer.EmitLabel(TmpSym);
- return TmpSym;
-}
-
/// EmitSectionLabels - Emit initial Dwarf sections with a label at
/// the start of each one.
void DwarfDebug::EmitSectionLabels() {
@@ -1483,7 +1571,6 @@ void DwarfDebug::EmitSectionLabels() {
EmitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
EmitSectionSym(Asm, TLOF.getDwarfLocSection());
- EmitSectionSym(Asm, TLOF.getDwarfPubNamesSection());
EmitSectionSym(Asm, TLOF.getDwarfPubTypesSection());
DwarfStrSectionSym =
EmitSectionSym(Asm, TLOF.getDwarfStrSection(), "section_str");
@@ -1525,9 +1612,6 @@ void DwarfDebug::emitDIE(DIE *Die) {
Asm->OutStreamer.AddComment(dwarf::AttributeString(Attr));
switch (Attr) {
- case dwarf::DW_AT_sibling:
- Asm->EmitInt32(Die->getSiblingOffset());
- break;
case dwarf::DW_AT_abstract_origin: {
DIEEntry *E = cast<DIEEntry>(Values[i]);
DIE *Origin = E->getEntry();
@@ -1539,7 +1623,7 @@ void DwarfDebug::emitDIE(DIE *Die) {
// DW_AT_range Value encodes offset in debug_range section.
DIEInteger *V = cast<DIEInteger>(Values[i]);
- if (Asm->MAI->doesDwarfUsesLabelOffsetForRanges()) {
+ if (Asm->MAI->doesDwarfUseLabelOffsetForRanges()) {
Asm->EmitLabelPlusOffset(DwarfDebugRangeSectionSym,
V->getValue(),
4);
@@ -1678,62 +1762,133 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(1);
}
-/// emitDebugPubNames - Emit visible names into a debug pubnames section.
-///
-void DwarfDebug::emitDebugPubNames() {
+/// emitAccelNames - Emit visible names into a hashed accelerator table
+/// section.
+void DwarfDebug::emitAccelNames() {
+ DwarfAccelTable AT(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeDIEOffset,
+ dwarf::DW_FORM_data4));
for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
E = CUMap.end(); I != E; ++I) {
CompileUnit *TheCU = I->second;
- // Start the dwarf pubnames section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfPubNamesSection());
+ const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelNames();
+ for (StringMap<std::vector<DIE*> >::const_iterator
+ GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
+ const char *Name = GI->getKeyData();
+ const std::vector<DIE *> &Entities = GI->second;
+ for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
+ DE = Entities.end(); DI != DE; ++DI)
+ AT.AddName(Name, (*DI));
+ }
+ }
- Asm->OutStreamer.AddComment("Length of Public Names Info");
- Asm->EmitLabelDifference(
- Asm->GetTempSymbol("pubnames_end", TheCU->getID()),
- Asm->GetTempSymbol("pubnames_begin", TheCU->getID()), 4);
+ AT.FinalizeTable(Asm, "Names");
+ Asm->OutStreamer.SwitchSection(
+ Asm->getObjFileLowering().getDwarfAccelNamesSection());
+ MCSymbol *SectionBegin = Asm->GetTempSymbol("names_begin");
+ Asm->OutStreamer.EmitLabel(SectionBegin);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("pubnames_begin",
- TheCU->getID()));
+ // Emit the full data.
+ AT.Emit(Asm, SectionBegin, this);
+}
- Asm->OutStreamer.AddComment("DWARF Version");
- Asm->EmitInt16(dwarf::DWARF_VERSION);
+/// emitAccelObjC - Emit objective C classes and categories into a hashed
+/// accelerator table section.
+void DwarfDebug::emitAccelObjC() {
+ DwarfAccelTable AT(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeDIEOffset,
+ dwarf::DW_FORM_data4));
+ for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
+ E = CUMap.end(); I != E; ++I) {
+ CompileUnit *TheCU = I->second;
+ const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelObjC();
+ for (StringMap<std::vector<DIE*> >::const_iterator
+ GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
+ const char *Name = GI->getKeyData();
+ const std::vector<DIE *> &Entities = GI->second;
+ for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
+ DE = Entities.end(); DI != DE; ++DI)
+ AT.AddName(Name, (*DI));
+ }
+ }
- Asm->OutStreamer.AddComment("Offset of Compilation Unit Info");
- Asm->EmitSectionOffset(Asm->GetTempSymbol("info_begin", TheCU->getID()),
- DwarfInfoSectionSym);
+ AT.FinalizeTable(Asm, "ObjC");
+ Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
+ .getDwarfAccelObjCSection());
+ MCSymbol *SectionBegin = Asm->GetTempSymbol("objc_begin");
+ Asm->OutStreamer.EmitLabel(SectionBegin);
- Asm->OutStreamer.AddComment("Compilation Unit Length");
- Asm->EmitLabelDifference(Asm->GetTempSymbol("info_end", TheCU->getID()),
- Asm->GetTempSymbol("info_begin", TheCU->getID()),
- 4);
+ // Emit the full data.
+ AT.Emit(Asm, SectionBegin, this);
+}
- const StringMap<DIE*> &Globals = TheCU->getGlobals();
- for (StringMap<DIE*>::const_iterator
- GI = Globals.begin(), GE = Globals.end(); GI != GE; ++GI) {
+/// emitAccelNamespace - Emit namespace dies into a hashed accelerator
+/// table.
+void DwarfDebug::emitAccelNamespaces() {
+ DwarfAccelTable AT(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeDIEOffset,
+ dwarf::DW_FORM_data4));
+ for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
+ E = CUMap.end(); I != E; ++I) {
+ CompileUnit *TheCU = I->second;
+ const StringMap<std::vector<DIE*> > &Names = TheCU->getAccelNamespace();
+ for (StringMap<std::vector<DIE*> >::const_iterator
+ GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
const char *Name = GI->getKeyData();
- DIE *Entity = GI->second;
+ const std::vector<DIE *> &Entities = GI->second;
+ for (std::vector<DIE *>::const_iterator DI = Entities.begin(),
+ DE = Entities.end(); DI != DE; ++DI)
+ AT.AddName(Name, (*DI));
+ }
+ }
- Asm->OutStreamer.AddComment("DIE offset");
- Asm->EmitInt32(Entity->getOffset());
+ AT.FinalizeTable(Asm, "namespac");
+ Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
+ .getDwarfAccelNamespaceSection());
+ MCSymbol *SectionBegin = Asm->GetTempSymbol("namespac_begin");
+ Asm->OutStreamer.EmitLabel(SectionBegin);
- if (Asm->isVerbose())
- Asm->OutStreamer.AddComment("External Name");
- Asm->OutStreamer.EmitBytes(StringRef(Name, strlen(Name)+1), 0);
- }
+ // Emit the full data.
+ AT.Emit(Asm, SectionBegin, this);
+}
- Asm->OutStreamer.AddComment("End Mark");
- Asm->EmitInt32(0);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("pubnames_end",
- TheCU->getID()));
+/// emitAccelTypes() - Emit type dies into a hashed accelerator table.
+void DwarfDebug::emitAccelTypes() {
+ std::vector<DwarfAccelTable::Atom> Atoms;
+ Atoms.push_back(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeDIEOffset,
+ dwarf::DW_FORM_data4));
+ Atoms.push_back(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeTag,
+ dwarf::DW_FORM_data2));
+ Atoms.push_back(DwarfAccelTable::Atom(DwarfAccelTable::eAtomTypeTypeFlags,
+ dwarf::DW_FORM_data1));
+ DwarfAccelTable AT(Atoms);
+ for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
+ E = CUMap.end(); I != E; ++I) {
+ CompileUnit *TheCU = I->second;
+ const StringMap<std::vector<std::pair<DIE*, unsigned > > > &Names
+ = TheCU->getAccelTypes();
+ for (StringMap<std::vector<std::pair<DIE*, unsigned> > >::const_iterator
+ GI = Names.begin(), GE = Names.end(); GI != GE; ++GI) {
+ const char *Name = GI->getKeyData();
+ const std::vector<std::pair<DIE *, unsigned> > &Entities = GI->second;
+ for (std::vector<std::pair<DIE *, unsigned> >::const_iterator DI
+ = Entities.begin(), DE = Entities.end(); DI !=DE; ++DI)
+ AT.AddName(Name, (*DI).first, (*DI).second);
+ }
}
+
+ AT.FinalizeTable(Asm, "types");
+ Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering()
+ .getDwarfAccelTypesSection());
+ MCSymbol *SectionBegin = Asm->GetTempSymbol("types_begin");
+ Asm->OutStreamer.EmitLabel(SectionBegin);
+
+ // Emit the full data.
+ AT.Emit(Asm, SectionBegin, this);
}
void DwarfDebug::emitDebugPubTypes() {
for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
E = CUMap.end(); I != E; ++I) {
CompileUnit *TheCU = I->second;
- // Start the dwarf pubnames section.
+ // Start the dwarf pubtypes section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfPubTypesSection());
Asm->OutStreamer.AddComment("Length of Public Types Info");
@@ -1766,6 +1921,7 @@ void DwarfDebug::emitDebugPubTypes() {
Asm->EmitInt32(Entity->getOffset());
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("External Name");
+ // Emit the name with a terminating null byte.
Asm->OutStreamer.EmitBytes(StringRef(Name, GI->getKeyLength()+1), 0);
}
@@ -1801,8 +1957,10 @@ void DwarfDebug::emitDebugStr() {
// Emit a label for reference from debug information entries.
Asm->OutStreamer.EmitLabel(Entries[i].second->getValue().first);
- // Emit the string itself.
- Asm->OutStreamer.EmitBytes(Entries[i].second->getKey(), 0/*addrspace*/);
+ // Emit the string itself with a terminating null byte.
+ Asm->OutStreamer.EmitBytes(StringRef(Entries[i].second->getKeyData(),
+ Entries[i].second->getKeyLength()+1),
+ 0/*addrspace*/);
}
}
@@ -1958,7 +2116,7 @@ void DwarfDebug::emitDebugMacInfo() {
/// __debug_info section, and the low_pc is the starting address for the
/// inlining instance.
void DwarfDebug::emitDebugInlineInfo() {
- if (!Asm->MAI->doesDwarfUsesInlineInfoSection())
+ if (!Asm->MAI->doesDwarfUseInlineInfoSection())
return;
if (!FirstCU)
@@ -1990,10 +2148,9 @@ void DwarfDebug::emitDebugInlineInfo() {
StringRef Name = SP.getName();
Asm->OutStreamer.AddComment("MIPS linkage name");
- if (LName.empty()) {
- Asm->OutStreamer.EmitBytes(Name, 0);
- Asm->OutStreamer.EmitIntValue(0, 1, 0); // nul terminator.
- } else
+ if (LName.empty())
+ Asm->EmitSectionOffset(getStringPoolEntry(Name), DwarfStrSectionSym);
+ else
Asm->EmitSectionOffset(getStringPoolEntry(getRealLinkageName(LName)),
DwarfStrSectionSym);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 35653be..83f30f5 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -30,7 +30,8 @@
namespace llvm {
class CompileUnit;
-class DbgConcreteScope;
+class ConstantInt;
+class ConstantFP;
class DbgVariable;
class MachineFrameInfo;
class MachineModuleInfo;
@@ -207,8 +208,8 @@ class DwarfDebug {
///
std::vector<DIEAbbrev *> Abbreviations;
- /// SourceIdMap - Source id map, i.e. pair of directory id and source file
- /// id mapped to a unique id.
+ /// SourceIdMap - Source id map, i.e. pair of source filename and directory,
+ /// separated by a zero byte, mapped to a unique id.
StringMap<unsigned> SourceIdMap;
/// StringPool - A String->Symbol mapping of strings used by indirect
@@ -216,8 +217,6 @@ class DwarfDebug {
StringMap<std::pair<MCSymbol*, unsigned> > StringPool;
unsigned NextStringPoolNumber;
- MCSymbol *getStringPoolEntry(StringRef Str);
-
/// SectionMap - Provides a unique id per text section.
///
UniqueVector<const MCSection*> SectionMap;
@@ -239,12 +238,12 @@ class DwarfDebug {
/// DotDebugLocEntries - Collection of DotDebugLocEntry.
SmallVector<DotDebugLocEntry, 4> DotDebugLocEntries;
- /// InliendSubprogramDIEs - Collection of subprgram DIEs that are marked
+ /// InlinedSubprogramDIEs - Collection of subprogram DIEs that are marked
/// (at the end of the module) as DW_AT_inline.
SmallPtrSet<DIE *, 4> InlinedSubprogramDIEs;
/// InlineInfo - Keep track of inlined functions and their location. This
- /// information is used to populate debug_inlined section.
+ /// information is used to populate the debug_inlined section.
typedef std::pair<const MCSymbol *, DIE *> InlineInfoLabels;
DenseMap<const MDNode *, SmallVector<InlineInfoLabels, 4> > InlineInfo;
SmallVector<const MDNode *, 4> InlinedSPNodes;
@@ -304,6 +303,10 @@ class DwarfDebug {
MCSymbol *DwarfDebugLocSectionSym;
MCSymbol *FunctionBeginSym, *FunctionEndSym;
+ // As an optimization, there is no need to emit an entry in the directory
+ // table for the same directory as DW_at_comp_dir.
+ StringRef CompilationDir;
+
private:
/// assignAbbrevNumber - Define a unique number for the abbreviation.
@@ -340,7 +343,7 @@ private:
/// the start of each one.
void EmitSectionLabels();
- /// emitDIE - Recusively Emits a debug information entry.
+ /// emitDIE - Recursively Emits a debug information entry.
///
void emitDIE(DIE *Die);
@@ -365,10 +368,22 @@ private:
///
void emitEndOfLineMatrix(unsigned SectionEnd);
- /// emitDebugPubNames - Emit visible names into a debug pubnames section.
- ///
- void emitDebugPubNames();
+ /// emitAccelNames - Emit visible names into a hashed accelerator table
+ /// section.
+ void emitAccelNames();
+
+ /// emitAccelObjC - Emit objective C classes and categories into a hashed
+ /// accelerator table section.
+ void emitAccelObjC();
+
+ /// emitAccelNamespace - Emit namespace dies into a hashed accelerator
+ /// table.
+ void emitAccelNamespaces();
+ /// emitAccelTypes() - Emit type dies into a hashed accelerator table.
+ ///
+ void emitAccelTypes();
+
/// emitDebugPubTypes - Emit visible types into a debug pubtypes section.
///
void emitDebugPubTypes();
@@ -407,10 +422,10 @@ private:
/// 3. an unsigned LEB128 number indicating the number of distinct inlining
/// instances for the function.
///
- /// The rest of the entry consists of a {die_offset, low_pc} pair for each
+ /// The rest of the entry consists of a {die_offset, low_pc} pair for each
/// inlined instance; the die_offset points to the inlined_subroutine die in
- /// the __debug_info section, and the low_pc is the starting address for the
- /// inlining instance.
+ /// the __debug_info section, and the low_pc is the starting address for the
+ /// inlining instance.
void emitDebugInlineInfo();
/// constructCompileUnit - Create new CompileUnit for the given
@@ -426,8 +441,8 @@ private:
void recordSourceLine(unsigned Line, unsigned Col, const MDNode *Scope,
unsigned Flags);
- /// identifyScopeMarkers() - Indentify instructions that are marking
- /// beginning of or end of a scope.
+ /// identifyScopeMarkers() - Indentify instructions that are marking the
+ /// beginning of or ending of a scope.
void identifyScopeMarkers();
/// addCurrentFnArgument - If Var is an current function argument that add
@@ -472,7 +487,7 @@ public:
void collectInfoFromNamedMDNodes(Module *M);
/// collectLegacyDebugInfo - Collect debug info using DebugInfoFinder.
- /// FIXME - Remove this when dragon-egg and llvm-gcc switch to DIBuilder.
+ /// FIXME - Remove this when DragonEgg switches to DIBuilder.
bool collectLegacyDebugInfo(Module *M);
/// beginModule - Emit all Dwarf sections that should come prior to the
@@ -504,6 +519,13 @@ public:
/// createSubprogramDIE - Create new DIE using SP.
DIE *createSubprogramDIE(DISubprogram SP);
+
+ /// getStringPool - returns the entry into the start of the pool.
+ MCSymbol *getStringPool();
+
+ /// getStringPoolEntry - returns an entry into the string pool with the given
+ /// string text.
+ MCSymbol *getStringPoolEntry(StringRef Str);
};
} // End of namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
index 18b726b..70cc2e5 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
@@ -31,6 +31,7 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -184,7 +185,7 @@ ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
/// CallToNoUnwindFunction - Return `true' if this is a call to a function
/// marked `nounwind'. Return `false' otherwise.
bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
- assert(MI->getDesc().isCall() && "This should be a call instruction!");
+ assert(MI->isCall() && "This should be a call instruction!");
bool MarkedNoUnwind = false;
bool SawFunc = false;
@@ -243,7 +244,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
MI != E; ++MI) {
if (!MI->isLabel()) {
- if (MI->getDesc().isCall())
+ if (MI->isCall())
SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI);
continue;
}
@@ -529,10 +530,8 @@ void DwarfException::EmitExceptionTable() {
// Offset of the landing pad, counted in 16-byte bundles relative to the
// @LPStart address.
if (VerboseAsm) {
- Asm->OutStreamer.AddComment(Twine(">> Call Site ") +
- llvm::utostr(idx) + " <<");
- Asm->OutStreamer.AddComment(Twine(" On exception at call site ") +
- llvm::utostr(idx));
+ Asm->OutStreamer.AddComment(">> Call Site " + Twine(idx) + " <<");
+ Asm->OutStreamer.AddComment(" On exception at call site "+Twine(idx));
}
Asm->EmitULEB128(idx);
@@ -543,8 +542,8 @@ void DwarfException::EmitExceptionTable() {
if (S.Action == 0)
Asm->OutStreamer.AddComment(" Action: cleanup");
else
- Asm->OutStreamer.AddComment(Twine(" Action: ") +
- llvm::utostr((S.Action - 1) / 2 + 1));
+ Asm->OutStreamer.AddComment(" Action: " +
+ Twine((S.Action - 1) / 2 + 1));
}
Asm->EmitULEB128(S.Action);
}
@@ -596,8 +595,7 @@ void DwarfException::EmitExceptionTable() {
// number of 16-byte bundles. The first call site is counted relative to
// the start of the procedure fragment.
if (VerboseAsm)
- Asm->OutStreamer.AddComment(Twine(">> Call Site ") +
- llvm::utostr(++Entry) + " <<");
+ Asm->OutStreamer.AddComment(">> Call Site " + Twine(++Entry) + " <<");
Asm->EmitLabelDifference(BeginLabel, EHFuncBeginSym, 4);
if (VerboseAsm)
Asm->OutStreamer.AddComment(Twine(" Call between ") +
@@ -625,8 +623,8 @@ void DwarfException::EmitExceptionTable() {
if (S.Action == 0)
Asm->OutStreamer.AddComment(" On action: cleanup");
else
- Asm->OutStreamer.AddComment(Twine(" On action: ") +
- llvm::utostr((S.Action - 1) / 2 + 1));
+ Asm->OutStreamer.AddComment(" On action: " +
+ Twine((S.Action - 1) / 2 + 1));
}
Asm->EmitULEB128(S.Action);
}
@@ -640,8 +638,7 @@ void DwarfException::EmitExceptionTable() {
if (VerboseAsm) {
// Emit comments that decode the action table.
- Asm->OutStreamer.AddComment(Twine(">> Action Record ") +
- llvm::utostr(++Entry) + " <<");
+ Asm->OutStreamer.AddComment(">> Action Record " + Twine(++Entry) + " <<");
}
// Type Filter
@@ -650,11 +647,11 @@ void DwarfException::EmitExceptionTable() {
// type of the catch clauses or the types in the exception specification.
if (VerboseAsm) {
if (Action.ValueForTypeID > 0)
- Asm->OutStreamer.AddComment(Twine(" Catch TypeInfo ") +
- llvm::itostr(Action.ValueForTypeID));
+ Asm->OutStreamer.AddComment(" Catch TypeInfo " +
+ Twine(Action.ValueForTypeID));
else if (Action.ValueForTypeID < 0)
- Asm->OutStreamer.AddComment(Twine(" Filter TypeInfo ") +
- llvm::itostr(Action.ValueForTypeID));
+ Asm->OutStreamer.AddComment(" Filter TypeInfo " +
+ Twine(Action.ValueForTypeID));
else
Asm->OutStreamer.AddComment(" Cleanup");
}
@@ -669,8 +666,7 @@ void DwarfException::EmitExceptionTable() {
Asm->OutStreamer.AddComment(" No further actions");
} else {
unsigned NextAction = Entry + (Action.NextAction + 1) / 2;
- Asm->OutStreamer.AddComment(Twine(" Continue to action ") +
- llvm::utostr(NextAction));
+ Asm->OutStreamer.AddComment(" Continue to action "+Twine(NextAction));
}
}
Asm->EmitSLEB128(Action.NextAction);
@@ -687,7 +683,7 @@ void DwarfException::EmitExceptionTable() {
I = TypeInfos.rbegin(), E = TypeInfos.rend(); I != E; ++I) {
const GlobalVariable *GV = *I;
if (VerboseAsm)
- Asm->OutStreamer.AddComment(Twine("TypeInfo ") + llvm::utostr(Entry--));
+ Asm->OutStreamer.AddComment("TypeInfo " + Twine(Entry--));
if (GV)
Asm->EmitReference(GV, TTypeEncoding);
else
@@ -707,7 +703,7 @@ void DwarfException::EmitExceptionTable() {
if (VerboseAsm) {
--Entry;
if (TypeID != 0)
- Asm->OutStreamer.AddComment(Twine("FilterInfo ") + llvm::itostr(Entry));
+ Asm->OutStreamer.AddComment("FilterInfo " + Twine(Entry));
}
Asm->EmitULEB128(TypeID);
@@ -719,17 +715,17 @@ void DwarfException::EmitExceptionTable() {
/// EndModule - Emit all exception information that should come after the
/// content.
void DwarfException::EndModule() {
- assert(0 && "Should be implemented");
+ llvm_unreachable("Should be implemented");
}
/// BeginFunction - Gather pre-function exception information. Assumes it's
/// being emitted immediately after the function entry point.
void DwarfException::BeginFunction(const MachineFunction *MF) {
- assert(0 && "Should be implemented");
+ llvm_unreachable("Should be implemented");
}
/// EndFunction - Gather and emit post-function exception information.
///
void DwarfException::EndFunction() {
- assert(0 && "Should be implemented");
+ llvm_unreachable("Should be implemented");
}
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index 75288b0..ef1d2ba 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -61,29 +62,33 @@ TailMergeSize("tail-merge-size",
namespace {
/// BranchFolderPass - Wrap branch folder in a machine function pass.
- class BranchFolderPass : public MachineFunctionPass,
- public BranchFolder {
+ class BranchFolderPass : public MachineFunctionPass {
public:
static char ID;
- explicit BranchFolderPass(bool defaultEnableTailMerge)
- : MachineFunctionPass(ID), BranchFolder(defaultEnableTailMerge, true) {}
+ explicit BranchFolderPass(): MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
- virtual const char *getPassName() const { return "Control Flow Optimizer"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetPassConfig>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
};
}
char BranchFolderPass::ID = 0;
+char &llvm::BranchFolderPassID = BranchFolderPass::ID;
-FunctionPass *llvm::createBranchFoldingPass(bool DefaultEnableTailMerge) {
- return new BranchFolderPass(DefaultEnableTailMerge);
-}
+INITIALIZE_PASS(BranchFolderPass, "branch-folder",
+ "Control Flow Optimizer", false, false)
bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
- return OptimizeFunction(MF,
- MF.getTarget().getInstrInfo(),
- MF.getTarget().getRegisterInfo(),
- getAnalysisIfAvailable<MachineModuleInfo>());
+ TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
+ BranchFolder Folder(PassConfig->getEnableTailMerge(), /*CommonHoist=*/true);
+ return Folder.OptimizeFunction(MF,
+ MF.getTarget().getInstrInfo(),
+ MF.getTarget().getRegisterInfo(),
+ getAnalysisIfAvailable<MachineModuleInfo>());
}
@@ -132,7 +137,7 @@ bool BranchFolder::OptimizeImpDefsBlock(MachineBasicBlock *MBB) {
break;
unsigned Reg = I->getOperand(0).getReg();
ImpDefRegs.insert(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
ImpDefRegs.insert(SubReg);
++I;
@@ -179,8 +184,14 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
TII = tii;
TRI = tri;
MMI = mmi;
+ RS = NULL;
- RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : NULL;
+ // Use a RegScavenger to help update liveness when required.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ if (MRI.tracksLiveness() && TRI->requiresRegisterScavenging(MF))
+ RS = new RegScavenger();
+ else
+ MRI.invalidateLiveness();
// Fix CFG. The later algorithms expect it to be right.
bool MadeChange = false;
@@ -208,7 +219,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
delete RS;
return MadeChange;
}
-
+
// Walk the function to find jump tables that are live.
BitVector JTIsLive(JTI->getJumpTables().size());
for (MachineFunction::iterator BB = MF.begin(), E = MF.end();
@@ -432,10 +443,9 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
for (; I != E; ++I) {
if (I->isDebugValue())
continue;
- const MCInstrDesc &MCID = I->getDesc();
- if (MCID.isCall())
+ if (I->isCall())
Time += 10;
- else if (MCID.mayLoad() || MCID.mayStore())
+ else if (I->mayLoad() || I->mayStore())
Time += 2;
else
++Time;
@@ -484,8 +494,9 @@ BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
// an object with itself.
#ifndef _GLIBCXX_DEBUG
llvm_unreachable("Predecessor appears twice");
-#endif
+#else
return false;
+#endif
}
}
@@ -502,7 +513,7 @@ static unsigned CountTerminators(MachineBasicBlock *MBB,
break;
}
--I;
- if (!I->getDesc().isTerminator()) break;
+ if (!I->isTerminator()) break;
++NumTerms;
}
return NumTerms;
@@ -550,8 +561,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
// heuristics.
unsigned EffectiveTailLen = CommonTailLen;
if (SuccBB && MBB1 != PredBB && MBB2 != PredBB &&
- !MBB1->back().getDesc().isBarrier() &&
- !MBB2->back().getDesc().isBarrier())
+ !MBB1->back().isBarrier() &&
+ !MBB2->back().isBarrier())
++EffectiveTailLen;
// Check if the common tail is long enough to be worthwhile.
@@ -870,6 +881,9 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// Visit each predecessor only once.
if (!UniquePreds.insert(PBB))
continue;
+ // Skip blocks which may jump to a landing pad. Can't tail merge these.
+ if (PBB->getLandingPadSuccessor())
+ continue;
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
@@ -924,8 +938,9 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
if (MergePotentials.size() >= 2)
MadeChange |= TryTailMergeBlocks(IBB, PredBB);
// Reinsert an unconditional branch if needed.
- // The 1 below can occur as a result of removing blocks in TryTailMergeBlocks.
- PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
+ // The 1 below can occur as a result of removing blocks in
+ // TryTailMergeBlocks.
+ PredBB = prior(I); // this may have been changed in TryTailMergeBlocks
if (MergePotentials.size() == 1 &&
MergePotentials.begin()->getBlock() != PredBB)
FixTail(MergePotentials.begin()->getBlock(), IBB, TII);
@@ -980,7 +995,7 @@ static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) {
if (!MBBI->isDebugValue())
break;
}
- return (MBBI->getDesc().isBranch());
+ return (MBBI->isBranch());
}
/// IsBetterFallthrough - Return true if it would be clearly better to
@@ -1008,7 +1023,23 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
MachineBasicBlock::iterator MBB2I = --MBB2->end();
while (MBB2I->isDebugValue())
--MBB2I;
- return MBB2I->getDesc().isCall() && !MBB1I->getDesc().isCall();
+ return MBB2I->isCall() && !MBB1I->isCall();
+}
+
+/// getBranchDebugLoc - Find and return, if any, the DebugLoc of the branch
+/// instructions on the block. Always use the DebugLoc of the first
+/// branching instruction found unless its absent, in which case use the
+/// DebugLoc of the second if present.
+static DebugLoc getBranchDebugLoc(MachineBasicBlock &MBB) {
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin())
+ return DebugLoc();
+ --I;
+ while (I->isDebugValue() && I != MBB.begin())
+ --I;
+ if (I->isBranch())
+ return I->getDebugLoc();
+ return DebugLoc();
}
/// OptimizeBlock - Analyze and optimize control flow related to the specified
@@ -1016,7 +1047,6 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
bool MadeChange = false;
MachineFunction &MF = *MBB->getParent();
- DebugLoc dl; // FIXME: this is nowhere
ReoptimizeBlock:
MachineFunction::iterator FallThrough = MBB;
@@ -1065,6 +1095,7 @@ ReoptimizeBlock:
// destination, remove the branch, replacing it with an unconditional one or
// a fall-through.
if (PriorTBB && PriorTBB == PriorFBB) {
+ DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
@@ -1091,7 +1122,7 @@ ReoptimizeBlock:
MachineBasicBlock::iterator PrevBBIter = PrevBB.end();
--PrevBBIter;
MachineBasicBlock::iterator MBBIter = MBB->begin();
- // Check if DBG_VALUE at the end of PrevBB is identical to the
+ // Check if DBG_VALUE at the end of PrevBB is identical to the
// DBG_VALUE at the beginning of MBB.
while (PrevBBIter != PrevBB.begin() && MBBIter != MBB->end()
&& PrevBBIter->isDebugValue() && MBBIter->isDebugValue()) {
@@ -1103,7 +1134,7 @@ ReoptimizeBlock:
}
}
PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end());
- PrevBB.removeSuccessor(PrevBB.succ_begin());;
+ PrevBB.removeSuccessor(PrevBB.succ_begin());
assert(PrevBB.succ_empty());
PrevBB.transferSuccessors(MBB);
MadeChange = true;
@@ -1122,6 +1153,7 @@ ReoptimizeBlock:
// If the prior block branches somewhere else on the condition and here if
// the condition is false, remove the uncond second branch.
if (PriorFBB == MBB) {
+ DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
@@ -1135,6 +1167,7 @@ ReoptimizeBlock:
if (PriorTBB == MBB) {
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
+ DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
MadeChange = true;
@@ -1172,6 +1205,7 @@ ReoptimizeBlock:
DEBUG(dbgs() << "\nMoving MBB: " << *MBB
<< "To make fallthrough to: " << *PriorTBB << "\n");
+ DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
@@ -1201,6 +1235,7 @@ ReoptimizeBlock:
if (CurTBB && CurFBB && CurFBB == MBB && CurTBB != MBB) {
SmallVector<MachineOperand, 4> NewCond(CurCond);
if (!TII->ReverseBranchCondition(NewCond)) {
+ DebugLoc dl = getBranchDebugLoc(*MBB);
TII->RemoveBranch(*MBB);
TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
MadeChange = true;
@@ -1214,6 +1249,7 @@ ReoptimizeBlock:
if (CurTBB && CurCond.empty() && CurFBB == 0 &&
IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
!MBB->hasAddressTaken()) {
+ DebugLoc dl = getBranchDebugLoc(*MBB);
// This block may contain just an unconditional branch. Because there can
// be 'non-branch terminators' in the block, try removing the branch and
// then seeing if the block is empty.
@@ -1256,8 +1292,9 @@ ReoptimizeBlock:
assert(PriorFBB == 0 && "Machine CFG out of date!");
PriorFBB = MBB;
}
+ DebugLoc pdl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, dl);
+ TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, pdl);
}
// Iterate through all the predecessors, revectoring each in-turn.
@@ -1281,9 +1318,10 @@ ReoptimizeBlock:
bool NewCurUnAnalyzable = TII->AnalyzeBranch(*PMBB, NewCurTBB,
NewCurFBB, NewCurCond, true);
if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
+ DebugLoc pdl = getBranchDebugLoc(*PMBB);
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
- TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, dl);
+ TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, pdl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
@@ -1343,7 +1381,7 @@ ReoptimizeBlock:
if (CurFallsThru) {
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
CurCond.clear();
- TII->InsertBranch(*MBB, NextBB, 0, CurCond, dl);
+ TII->InsertBranch(*MBB, NextBB, 0, CurCond, DebugLoc());
}
MBB->moveAfter(PredBB);
MadeChange = true;
@@ -1446,7 +1484,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
continue;
if (MO.isUse()) {
Uses.insert(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Uses.insert(*AS);
} else if (!MO.isDead())
// Don't try to hoist code in the rare case the terminator defines a
@@ -1469,6 +1507,9 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
bool IsDef = false;
for (unsigned i = 0, e = PI->getNumOperands(); !IsDef && i != e; ++i) {
const MachineOperand &MO = PI->getOperand(i);
+ // If PI has a regmask operand, it is probably a call. Separate away.
+ if (MO.isRegMask())
+ return Loc;
if (!MO.isReg() || MO.isUse())
continue;
unsigned Reg = MO.getReg();
@@ -1505,16 +1546,16 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
continue;
if (MO.isUse()) {
Uses.insert(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Uses.insert(*AS);
} else {
if (Uses.count(Reg)) {
Uses.erase(Reg);
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
Uses.erase(*SR); // Use getSubRegisters to be conservative
}
Defs.insert(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Defs.insert(*AS);
}
}
@@ -1581,6 +1622,11 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
bool IsSafe = true;
for (unsigned i = 0, e = TIB->getNumOperands(); i != e; ++i) {
MachineOperand &MO = TIB->getOperand(i);
+ // Don't attempt to hoist instructions with register masks.
+ if (MO.isRegMask()) {
+ IsSafe = false;
+ break;
+ }
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
@@ -1615,6 +1661,11 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
IsSafe = false;
break;
}
+
+ if (MO.isKill() && Uses.count(Reg))
+ // Kills a register that's read by the instruction at the point of
+ // insertion. Remove the kill marker.
+ MO.setIsKill(false);
}
}
if (!IsSafe)
@@ -1632,7 +1683,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
unsigned Reg = MO.getReg();
if (!Reg || !LocalDefsSet.count(Reg))
continue;
- for (const unsigned *OR = TRI->getOverlaps(Reg); *OR; ++OR)
+ for (const uint16_t *OR = TRI->getOverlaps(Reg); *OR; ++OR)
LocalDefsSet.erase(*OR);
}
@@ -1645,11 +1696,11 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!Reg)
continue;
LocalDefs.push_back(Reg);
- for (const unsigned *OR = TRI->getOverlaps(Reg); *OR; ++OR)
+ for (const uint16_t *OR = TRI->getOverlaps(Reg); *OR; ++OR)
LocalDefsSet.insert(*OR);
}
- HasDups = true;;
+ HasDups = true;
++TIB;
++FIB;
}
diff --git a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index 14eb054..2b7dfdb 100644
--- a/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -58,7 +58,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
- for (const unsigned *Alias = TRI.getOverlaps(Reg);
+ for (const uint16_t *Alias = TRI.getOverlaps(Reg);
unsigned Reg = *Alias; ++Alias)
UsedRegs[Reg/32] |= 1 << (Reg&31);
}
diff --git a/contrib/llvm/lib/CodeGen/CodeGen.cpp b/contrib/llvm/lib/CodeGen/CodeGen.cpp
index 424535b..a81bb5c 100644
--- a/contrib/llvm/lib/CodeGen/CodeGen.cpp
+++ b/contrib/llvm/lib/CodeGen/CodeGen.cpp
@@ -19,36 +19,49 @@ using namespace llvm;
/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
void llvm::initializeCodeGen(PassRegistry &Registry) {
+ initializeBranchFolderPassPass(Registry);
initializeCalculateSpillWeightsPass(Registry);
+ initializeCodePlacementOptPass(Registry);
initializeDeadMachineInstructionElimPass(Registry);
+ initializeExpandPostRAPass(Registry);
+ initializeExpandISelPseudosPass(Registry);
+ initializeFinalizeMachineBundlesPass(Registry);
+ initializeGCMachineCodeAnalysisPass(Registry);
initializeGCModuleInfoPass(Registry);
initializeIfConverterPass(Registry);
initializeLiveDebugVariablesPass(Registry);
initializeLiveIntervalsPass(Registry);
initializeLiveStacksPass(Registry);
initializeLiveVariablesPass(Registry);
+ initializeLocalStackSlotPassPass(Registry);
initializeMachineBlockFrequencyInfoPass(Registry);
+ initializeMachineBlockPlacementPass(Registry);
+ initializeMachineBlockPlacementStatsPass(Registry);
+ initializeMachineCopyPropagationPass(Registry);
initializeMachineCSEPass(Registry);
initializeMachineDominatorTreePass(Registry);
initializeMachineLICMPass(Registry);
initializeMachineLoopInfoPass(Registry);
initializeMachineModuleInfoPass(Registry);
+ initializeMachineSchedulerPass(Registry);
initializeMachineSinkingPass(Registry);
initializeMachineVerifierPassPass(Registry);
initializeOptimizePHIsPass(Registry);
initializePHIEliminationPass(Registry);
initializePeepholeOptimizerPass(Registry);
+ initializePostRASchedulerPass(Registry);
initializeProcessImplicitDefsPass(Registry);
initializePEIPass(Registry);
- initializeRALinScanPass(Registry);
initializeRegisterCoalescerPass(Registry);
initializeRenderMachineFunctionPass(Registry);
initializeSlotIndexesPass(Registry);
- initializeLoopSplitterPass(Registry);
initializeStackProtectorPass(Registry);
initializeStackSlotColoringPass(Registry);
initializeStrongPHIEliminationPass(Registry);
+ initializeTailDuplicatePassPass(Registry);
+ initializeTargetPassConfigPass(Registry);
initializeTwoAddressInstructionPassPass(Registry);
+ initializeUnpackMachineBundlesPass(Registry);
initializeUnreachableBlockElimPass(Registry);
initializeUnreachableMachineBlockElimPass(Registry);
initializeVirtRegMapPass(Registry);
diff --git a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
index 270c337..c13c05e 100644
--- a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
+++ b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
@@ -39,9 +39,6 @@ namespace {
CodePlacementOpt() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
- virtual const char *getPassName() const {
- return "Code Placement Optimizer";
- }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineLoopInfo>();
@@ -69,9 +66,9 @@ namespace {
char CodePlacementOpt::ID = 0;
} // end anonymous namespace
-FunctionPass *llvm::createCodePlacementOptPass() {
- return new CodePlacementOpt();
-}
+char &llvm::CodePlacementOptID = CodePlacementOpt::ID;
+INITIALIZE_PASS(CodePlacementOpt, "code-placement",
+ "Code Placement Optimizer", false, false)
/// HasFallthrough - Test whether the given branch has a fallthrough, either as
/// a plain fallthrough or as a fallthrough case of a conditional branch.
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 84c4d59..bad5010 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -35,7 +35,8 @@ CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
RegClassInfo(RCI),
Classes(TRI->getNumRegs(), static_cast<const TargetRegisterClass *>(0)),
KillIndices(TRI->getNumRegs(), 0),
- DefIndices(TRI->getNumRegs(), 0) {}
+ DefIndices(TRI->getNumRegs(), 0),
+ KeepRegs(TRI->getNumRegs(), false) {}
CriticalAntiDepBreaker::~CriticalAntiDepBreaker() {
}
@@ -52,9 +53,9 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
}
// Clear "do not change" set.
- KeepRegs.clear();
+ KeepRegs.reset();
- bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
+ bool IsReturnBlock = (BBSize != 0 && BB->back().isReturn());
// Determine the live-out physregs for this block.
if (IsReturnBlock) {
@@ -63,14 +64,14 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
E = MRI.liveout_end(); I != E; ++I) {
unsigned Reg = *I;
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
+ KillIndices[Reg] = BBSize;
DefIndices[Reg] = ~0u;
// Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
+ KillIndices[AliasReg] = BBSize;
DefIndices[AliasReg] = ~0u;
}
}
@@ -85,14 +86,14 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
+ KillIndices[Reg] = BBSize;
DefIndices[Reg] = ~0u;
// Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
+ KillIndices[AliasReg] = BBSize;
DefIndices[AliasReg] = ~0u;
}
}
@@ -102,18 +103,18 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
// callee-saved register that is not saved in the prolog.
const MachineFrameInfo *MFI = MF.getFrameInfo();
BitVector Pristine = MFI->getPristineRegs(BB);
- for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
+ for (const uint16_t *I = TRI->getCalleeSavedRegs(&MF); *I; ++I) {
unsigned Reg = *I;
if (!IsReturnBlock && !Pristine.test(Reg)) continue;
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
+ KillIndices[Reg] = BBSize;
DefIndices[Reg] = ~0u;
// Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
+ KillIndices[AliasReg] = BBSize;
DefIndices[AliasReg] = ~0u;
}
}
@@ -121,7 +122,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
void CriticalAntiDepBreaker::FinishBlock() {
RegRefs.clear();
- KeepRegs.clear();
+ KeepRegs.reset();
}
void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
@@ -193,8 +194,8 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
// instruction which may not be executed. The second R6 def may or may not
// re-define R6 so it's not safe to change it since the last R6 use cannot be
// changed.
- bool Special = MI->getDesc().isCall() ||
- MI->getDesc().hasExtraSrcRegAllocReq() ||
+ bool Special = MI->isCall() ||
+ MI->hasExtraSrcRegAllocReq() ||
TII->isPredicated(MI);
// Scan the register operands for this instruction and update
@@ -217,7 +218,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
// Now check for aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
// If an alias of the reg is used during the live range, give up.
// Note that this allows us to skip checking if AntiDepReg
// overlaps with any of the aliases, among other things.
@@ -233,10 +234,11 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
RegRefs.insert(std::make_pair(Reg, &MO));
if (MO.isUse() && Special) {
- if (KeepRegs.insert(Reg)) {
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ if (!KeepRegs.test(Reg)) {
+ KeepRegs.set(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
- KeepRegs.insert(*Subreg);
+ KeepRegs.set(*Subreg);
}
}
}
@@ -253,6 +255,17 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// address updates.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
+
+ if (MO.isRegMask())
+ for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
+ if (MO.clobbersPhysReg(i)) {
+ DefIndices[i] = Count;
+ KillIndices[i] = ~0u;
+ KeepRegs.reset(i);
+ Classes[i] = 0;
+ RegRefs.erase(i);
+ }
+
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
@@ -265,21 +278,21 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
assert(((KillIndices[Reg] == ~0u) !=
(DefIndices[Reg] == ~0u)) &&
"Kill and Def maps aren't consistent for Reg!");
- KeepRegs.erase(Reg);
+ KeepRegs.reset(Reg);
Classes[Reg] = 0;
RegRefs.erase(Reg);
// Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
unsigned SubregReg = *Subreg;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
- KeepRegs.erase(SubregReg);
+ KeepRegs.reset(SubregReg);
Classes[SubregReg] = 0;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
+ for (const uint16_t *Super = TRI->getSuperRegisters(Reg);
*Super; ++Super) {
unsigned SuperReg = *Super;
Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
@@ -315,7 +328,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
"Kill and Def maps aren't consistent for Reg!");
}
// Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (KillIndices[AliasReg] == ~0u) {
KillIndices[AliasReg] = Count;
@@ -355,6 +368,9 @@ CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin,
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &CheckOper = MI->getOperand(i);
+ if (CheckOper.isRegMask() && CheckOper.clobbersPhysReg(NewReg))
+ return true;
+
if (!CheckOper.isReg() || !CheckOper.isDef() ||
CheckOper.getReg() != NewReg)
continue;
@@ -427,6 +443,8 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// Keep a map of the MachineInstr*'s back to the SUnit representing them.
// This is used for updating debug information.
+ //
+ // FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
// Find the node at the bottom of the critical path.
@@ -535,7 +553,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
if (!RegClassInfo.isAllocatable(AntiDepReg))
// Don't break anti-dependencies on non-allocatable registers.
AntiDepReg = 0;
- else if (KeepRegs.count(AntiDepReg))
+ else if (KeepRegs.test(AntiDepReg))
// Don't break anti-dependencies if an use down below requires
// this exact register.
AntiDepReg = 0;
@@ -572,7 +590,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ if (MI->isCall() || MI->hasExtraDefRegAllocReq() ||
TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index 0710780..7746259 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -24,7 +24,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/SmallSet.h"
#include <map>
namespace llvm {
@@ -66,7 +65,7 @@ class TargetRegisterInfo;
/// KeepRegs - A set of registers which are live and cannot be changed to
/// break anti-dependencies.
- SmallSet<unsigned, 4> KeepRegs;
+ BitVector KeepRegs;
public:
CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo&);
diff --git a/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
new file mode 100644
index 0000000..bfbe779
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/DFAPacketizer.cpp
@@ -0,0 +1,223 @@
+//=- llvm/CodeGen/DFAPacketizer.cpp - DFA Packetizer for VLIW -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
+using namespace llvm;
+
+DFAPacketizer::DFAPacketizer(const InstrItineraryData *I, const int (*SIT)[2],
+ const unsigned *SET):
+ InstrItins(I), CurrentState(0), DFAStateInputTable(SIT),
+ DFAStateEntryTable(SET) {}
+
+
+//
+// ReadTable - Read the DFA transition table and update CachedTable.
+//
+// Format of the transition tables:
+// DFAStateInputTable[][2] = pairs of <Input, Transition> for all valid
+// transitions
+// DFAStateEntryTable[i] = Index of the first entry in DFAStateInputTable
+// for the ith state
+//
+void DFAPacketizer::ReadTable(unsigned int state) {
+ unsigned ThisState = DFAStateEntryTable[state];
+ unsigned NextStateInTable = DFAStateEntryTable[state+1];
+ // Early exit in case CachedTable has already contains this
+ // state's transitions.
+ if (CachedTable.count(UnsignPair(state,
+ DFAStateInputTable[ThisState][0])))
+ return;
+
+ for (unsigned i = ThisState; i < NextStateInTable; i++)
+ CachedTable[UnsignPair(state, DFAStateInputTable[i][0])] =
+ DFAStateInputTable[i][1];
+}
+
+
+// canReserveResources - Check if the resources occupied by a MCInstrDesc
+// are available in the current state.
+bool DFAPacketizer::canReserveResources(const llvm::MCInstrDesc *MID) {
+ unsigned InsnClass = MID->getSchedClass();
+ const llvm::InstrStage *IS = InstrItins->beginStage(InsnClass);
+ unsigned FuncUnits = IS->getUnits();
+ UnsignPair StateTrans = UnsignPair(CurrentState, FuncUnits);
+ ReadTable(CurrentState);
+ return (CachedTable.count(StateTrans) != 0);
+}
+
+
+// reserveResources - Reserve the resources occupied by a MCInstrDesc and
+// change the current state to reflect that change.
+void DFAPacketizer::reserveResources(const llvm::MCInstrDesc *MID) {
+ unsigned InsnClass = MID->getSchedClass();
+ const llvm::InstrStage *IS = InstrItins->beginStage(InsnClass);
+ unsigned FuncUnits = IS->getUnits();
+ UnsignPair StateTrans = UnsignPair(CurrentState, FuncUnits);
+ ReadTable(CurrentState);
+ assert(CachedTable.count(StateTrans) != 0);
+ CurrentState = CachedTable[StateTrans];
+}
+
+
+// canReserveResources - Check if the resources occupied by a machine
+// instruction are available in the current state.
+bool DFAPacketizer::canReserveResources(llvm::MachineInstr *MI) {
+ const llvm::MCInstrDesc &MID = MI->getDesc();
+ return canReserveResources(&MID);
+}
+
+// reserveResources - Reserve the resources occupied by a machine
+// instruction and change the current state to reflect that change.
+void DFAPacketizer::reserveResources(llvm::MachineInstr *MI) {
+ const llvm::MCInstrDesc &MID = MI->getDesc();
+ reserveResources(&MID);
+}
+
+namespace llvm {
+// DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and overrides
+// Schedule method to build the dependence graph.
+class DefaultVLIWScheduler : public ScheduleDAGInstrs {
+public:
+ DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
+ MachineDominatorTree &MDT, bool IsPostRA);
+ // Schedule - Actual scheduling work.
+ void schedule();
+};
+}
+
+DefaultVLIWScheduler::DefaultVLIWScheduler(
+ MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+ bool IsPostRA) :
+ ScheduleDAGInstrs(MF, MLI, MDT, IsPostRA) {
+}
+
+void DefaultVLIWScheduler::schedule() {
+ // Build the scheduling graph.
+ buildSchedGraph(0);
+}
+
+// VLIWPacketizerList Ctor
+VLIWPacketizerList::VLIWPacketizerList(
+ MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
+ bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
+ TII = TM.getInstrInfo();
+ ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
+ VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
+}
+
+// VLIWPacketizerList Dtor
+VLIWPacketizerList::~VLIWPacketizerList() {
+ if (VLIWScheduler)
+ delete VLIWScheduler;
+
+ if (ResourceTracker)
+ delete ResourceTracker;
+}
+
+// endPacket - End the current packet, bundle packet instructions and reset
+// DFA state.
+void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
+ MachineInstr *MI) {
+ if (CurrentPacketMIs.size() > 1) {
+ MachineInstr *MIFirst = CurrentPacketMIs.front();
+ finalizeBundle(*MBB, MIFirst, MI);
+ }
+ CurrentPacketMIs.clear();
+ ResourceTracker->clearResources();
+}
+
+// PacketizeMIs - Bundle machine instructions into packets.
+void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator BeginItr,
+ MachineBasicBlock::iterator EndItr) {
+ assert(VLIWScheduler && "VLIW Scheduler is not initialized!");
+ VLIWScheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size());
+ VLIWScheduler->schedule();
+ VLIWScheduler->exitRegion();
+
+ // Generate MI -> SU map.
+ //std::map <MachineInstr*, SUnit*> MIToSUnit;
+ MIToSUnit.clear();
+ for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) {
+ SUnit *SU = &VLIWScheduler->SUnits[i];
+ MIToSUnit[SU->getInstr()] = SU;
+ }
+
+ // The main packetizer loop.
+ for (; BeginItr != EndItr; ++BeginItr) {
+ MachineInstr *MI = BeginItr;
+
+ this->initPacketizerState();
+
+ // End the current packet if needed.
+ if (this->isSoloInstruction(MI)) {
+ endPacket(MBB, MI);
+ continue;
+ }
+
+ // Ignore pseudo instructions.
+ if (this->ignorePseudoInstruction(MI, MBB))
+ continue;
+
+ SUnit *SUI = MIToSUnit[MI];
+ assert(SUI && "Missing SUnit Info!");
+
+ // Ask DFA if machine resource is available for MI.
+ bool ResourceAvail = ResourceTracker->canReserveResources(MI);
+ if (ResourceAvail) {
+ // Dependency check for MI with instructions in CurrentPacketMIs.
+ for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
+ MachineInstr *MJ = *VI;
+ SUnit *SUJ = MIToSUnit[MJ];
+ assert(SUJ && "Missing SUnit Info!");
+
+ // Is it legal to packetize SUI and SUJ together.
+ if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
+ // Allow packetization if dependency can be pruned.
+ if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
+ // End the packet if dependency cannot be pruned.
+ endPacket(MBB, MI);
+ break;
+ } // !isLegalToPruneDependencies.
+ } // !isLegalToPacketizeTogether.
+ } // For all instructions in CurrentPacketMIs.
+ } else {
+ // End the packet if resource is not available.
+ endPacket(MBB, MI);
+ }
+
+ // Add MI to the current packet.
+ BeginItr = this->addToPacket(MI);
+ } // For all instructions in BB.
+
+ // End any packet left behind.
+ endPacket(MBB, EndItr);
+}
diff --git a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index 6de6c0c..aa10d1d 100644
--- a/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/contrib/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -28,11 +28,12 @@ STATISTIC(NumDeletes, "Number of dead instructions deleted");
namespace {
class DeadMachineInstructionElim : public MachineFunctionPass {
virtual bool runOnMachineFunction(MachineFunction &MF);
-
+
const TargetRegisterInfo *TRI;
const MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
BitVector LivePhysRegs;
+ BitVector ReservedRegs;
public:
static char ID; // Pass identification, replacement for typeid
@@ -45,14 +46,11 @@ namespace {
};
}
char DeadMachineInstructionElim::ID = 0;
+char &llvm::DeadMachineInstructionElimID = DeadMachineInstructionElim::ID;
INITIALIZE_PASS(DeadMachineInstructionElim, "dead-mi-elimination",
"Remove dead machine instructions", false, false)
-FunctionPass *llvm::createDeadMachineInstructionElimPass() {
- return new DeadMachineInstructionElim();
-}
-
bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
// Technically speaking inline asm without side effects and no defs can still
// be deleted. But there is so much bad inline asm code out there, we should
@@ -70,10 +68,14 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef()) {
unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ?
- LivePhysRegs[Reg] : !MRI->use_nodbg_empty(Reg)) {
- // This def has a non-debug use. Don't delete the instruction!
- return false;
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ // Don't delete live physreg defs, or any reserved register defs.
+ if (LivePhysRegs.test(Reg) || ReservedRegs.test(Reg))
+ return false;
+ } else {
+ if (!MRI->use_nodbg_empty(Reg))
+ // This def has a non-debug use. Don't delete the instruction!
+ return false;
}
}
}
@@ -89,7 +91,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
// Treat reserved registers as always live.
- BitVector ReservedRegs = TRI->getReservedRegs(MF);
+ ReservedRegs = TRI->getReservedRegs(MF);
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
@@ -102,7 +104,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
LivePhysRegs = ReservedRegs;
// Also add any explicit live-out physregs for this block.
- if (!MBB->empty() && MBB->back().getDesc().isReturn())
+ if (!MBB->empty() && MBB->back().isReturn())
for (MachineRegisterInfo::liveout_iterator LOI = MRI->liveout_begin(),
LOE = MRI->liveout_end(); LOI != LOE; ++LOI) {
unsigned Reg = *LOI;
@@ -169,10 +171,13 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
// Check the subreg set, not the alias set, because a def
// of a super-register may still be partially live after
// this def.
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
*SubRegs; ++SubRegs)
LivePhysRegs.reset(*SubRegs);
}
+ } else if (MO.isRegMask()) {
+ // Register mask of preserved registers. All clobbers are dead.
+ LivePhysRegs.clearBitsNotInMask(MO.getRegMask());
}
}
// Record the physreg uses, after the defs, in case a physreg is
@@ -183,7 +188,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
LivePhysRegs.set(Reg);
- for (const unsigned *AliasSet = TRI->getAliasSet(Reg);
+ for (const uint16_t *AliasSet = TRI->getAliasSet(Reg);
*AliasSet; ++AliasSet)
LivePhysRegs.set(*AliasSet);
}
diff --git a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index ed9e409..944dd4f 100644
--- a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -28,98 +28,34 @@
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
-STATISTIC(NumLandingPadsSplit, "Number of landing pads split");
-STATISTIC(NumUnwindsLowered, "Number of unwind instructions lowered");
-STATISTIC(NumResumesLowered, "Number of eh.resume calls lowered");
-STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
+STATISTIC(NumResumesLowered, "Number of resume calls lowered");
namespace {
class DwarfEHPrepare : public FunctionPass {
const TargetMachine *TM;
const TargetLowering *TLI;
- // The eh.exception intrinsic.
- Function *ExceptionValueIntrinsic;
-
- // The eh.selector intrinsic.
- Function *SelectorIntrinsic;
-
- // _Unwind_Resume_or_Rethrow or _Unwind_SjLj_Resume call.
- Constant *URoR;
-
- // The EH language-specific catch-all type.
- GlobalVariable *EHCatchAllValue;
-
- // _Unwind_Resume or the target equivalent.
+ // RewindFunction - _Unwind_Resume or the target equivalent.
Constant *RewindFunction;
- // We both use and preserve dominator info.
- DominatorTree *DT;
-
- // The function we are running on.
- Function *F;
-
- // The landing pads for this function.
- typedef SmallPtrSet<BasicBlock*, 8> BBSet;
- BBSet LandingPads;
-
- bool InsertUnwindResumeCalls();
-
- bool NormalizeLandingPads();
- bool LowerUnwindsAndResumes();
- bool MoveExceptionValueCalls();
-
- Instruction *CreateExceptionValueCall(BasicBlock *BB);
-
- /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
- /// use the "llvm.eh.catch.all.value" call need to convert to using its
- /// initializer instead.
- bool CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
-
- bool HasCatchAllInSelector(IntrinsicInst *);
+ bool InsertUnwindResumeCalls(Function &Fn);
+ Instruction *GetExceptionObject(ResumeInst *RI);
- /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
- void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
- SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels);
-
- /// FindAllURoRInvokes - Find all URoR invokes in the function.
- void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
-
- /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" or
- /// "_Unwind_SjLj_Resume" calls. The "unwind" part of these invokes jump to
- /// a landing pad within the current function. This is a candidate to merge
- /// the selector associated with the URoR invoke with the one from the
- /// URoR's landing pad.
- bool HandleURoRInvokes();
-
- /// FindSelectorAndURoR - Find the eh.selector call and URoR call associated
- /// with the eh.exception call. This recursively looks past instructions
- /// which don't change the EH pointer value, like casts or PHI nodes.
- bool FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
- SmallPtrSet<IntrinsicInst*, 8> &SelCalls,
- SmallPtrSet<PHINode*, 32> &SeenPHIs);
-
public:
static char ID; // Pass identification, replacement for typeid.
DwarfEHPrepare(const TargetMachine *tm) :
FunctionPass(ID), TM(tm), TLI(TM->getTargetLowering()),
- ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
- URoR(0), EHCatchAllValue(0), RewindFunction(0) {
+ RewindFunction(0) {
initializeDominatorTreePass(*PassRegistry::getPassRegistry());
}
virtual bool runOnFunction(Function &Fn);
- // getAnalysisUsage - We need the dominator tree for handling URoR.
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
- AU.addPreserved<DominatorTree>();
- }
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
const char *getPassName() const {
return "Exception handling preparation";
}
-
};
} // end anonymous namespace
@@ -129,543 +65,52 @@ FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm) {
return new DwarfEHPrepare(tm);
}
-/// HasCatchAllInSelector - Return true if the intrinsic instruction has a
-/// catch-all.
-bool DwarfEHPrepare::HasCatchAllInSelector(IntrinsicInst *II) {
- if (!EHCatchAllValue) return false;
-
- unsigned ArgIdx = II->getNumArgOperands() - 1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(II->getArgOperand(ArgIdx));
- return GV == EHCatchAllValue;
-}
-
-/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
-void DwarfEHPrepare::
-FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
- SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
- for (Value::use_iterator
- I = SelectorIntrinsic->use_begin(),
- E = SelectorIntrinsic->use_end(); I != E; ++I) {
- IntrinsicInst *II = cast<IntrinsicInst>(*I);
-
- if (II->getParent()->getParent() != F)
- continue;
-
- if (!HasCatchAllInSelector(II))
- Sels.insert(II);
- else
- CatchAllSels.insert(II);
- }
-}
-
-/// FindAllURoRInvokes - Find all URoR invokes in the function.
-void DwarfEHPrepare::
-FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes) {
- for (Value::use_iterator
- I = URoR->use_begin(),
- E = URoR->use_end(); I != E; ++I) {
- if (InvokeInst *II = dyn_cast<InvokeInst>(*I))
- URoRInvokes.insert(II);
- }
-}
-
-/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
-/// the "llvm.eh.catch.all.value" call need to convert to using its
-/// initializer instead.
-bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
- if (!EHCatchAllValue) return false;
-
- if (!SelectorIntrinsic) {
- SelectorIntrinsic =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
- if (!SelectorIntrinsic) return false;
- }
-
- bool Changed = false;
- for (SmallPtrSet<IntrinsicInst*, 32>::iterator
- I = Sels.begin(), E = Sels.end(); I != E; ++I) {
- IntrinsicInst *Sel = *I;
-
- // Index of the "llvm.eh.catch.all.value" variable.
- unsigned OpIdx = Sel->getNumArgOperands() - 1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
- if (GV != EHCatchAllValue) continue;
- Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
- Changed = true;
- }
-
- return Changed;
-}
-
-/// FindSelectorAndURoR - Find the eh.selector call associated with the
-/// eh.exception call. And indicate if there is a URoR "invoke" associated with
-/// the eh.exception call. This recursively looks past instructions which don't
-/// change the EH pointer value, like casts or PHI nodes.
-bool
-DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
- SmallPtrSet<IntrinsicInst*, 8> &SelCalls,
- SmallPtrSet<PHINode*, 32> &SeenPHIs) {
- bool Changed = false;
-
- for (Value::use_iterator
- I = Inst->use_begin(), E = Inst->use_end(); I != E; ++I) {
- Instruction *II = dyn_cast<Instruction>(*I);
- if (!II || II->getParent()->getParent() != F) continue;
-
- if (IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(II)) {
- if (Sel->getIntrinsicID() == Intrinsic::eh_selector)
- SelCalls.insert(Sel);
- } else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(II)) {
- if (Invoke->getCalledFunction() == URoR)
- URoRInvoke = true;
- } else if (CastInst *CI = dyn_cast<CastInst>(II)) {
- Changed |= FindSelectorAndURoR(CI, URoRInvoke, SelCalls, SeenPHIs);
- } else if (PHINode *PN = dyn_cast<PHINode>(II)) {
- if (SeenPHIs.insert(PN))
- // Don't process a PHI node more than once.
- Changed |= FindSelectorAndURoR(PN, URoRInvoke, SelCalls, SeenPHIs);
- }
- }
-
- return Changed;
-}
-
-/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" or
-/// "_Unwind_SjLj_Resume" calls. The "unwind" part of these invokes jump to a
-/// landing pad within the current function. This is a candidate to merge the
-/// selector associated with the URoR invoke with the one from the URoR's
-/// landing pad.
-bool DwarfEHPrepare::HandleURoRInvokes() {
- if (!EHCatchAllValue) {
- EHCatchAllValue =
- F->getParent()->getNamedGlobal("llvm.eh.catch.all.value");
- if (!EHCatchAllValue) return false;
- }
-
- if (!SelectorIntrinsic) {
- SelectorIntrinsic =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
- if (!SelectorIntrinsic) return false;
- }
-
- SmallPtrSet<IntrinsicInst*, 32> Sels;
- SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
- FindAllCleanupSelectors(Sels, CatchAllSels);
-
- if (!URoR) {
- URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
- if (!URoR) return CleanupSelectors(CatchAllSels);
- }
-
- SmallPtrSet<InvokeInst*, 32> URoRInvokes;
- FindAllURoRInvokes(URoRInvokes);
-
- SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
-
- for (SmallPtrSet<IntrinsicInst*, 32>::iterator
- SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
- const BasicBlock *SelBB = (*SI)->getParent();
- for (SmallPtrSet<InvokeInst*, 32>::iterator
- UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
- const BasicBlock *URoRBB = (*UI)->getParent();
- if (DT->dominates(SelBB, URoRBB)) {
- SelsToConvert.insert(*SI);
- break;
+/// GetExceptionObject - Return the exception object from the value passed into
+/// the 'resume' instruction (typically an aggregate). Clean up any dead
+/// instructions, including the 'resume' instruction.
+Instruction *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
+ Value *V = RI->getOperand(0);
+ Instruction *ExnObj = 0;
+ InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
+ LoadInst *SelLoad = 0;
+ InsertValueInst *ExcIVI = 0;
+ bool EraseIVIs = false;
+
+ if (SelIVI) {
+ if (SelIVI->getNumIndices() == 1 && *SelIVI->idx_begin() == 1) {
+ ExcIVI = dyn_cast<InsertValueInst>(SelIVI->getOperand(0));
+ if (ExcIVI && isa<UndefValue>(ExcIVI->getOperand(0)) &&
+ ExcIVI->getNumIndices() == 1 && *ExcIVI->idx_begin() == 0) {
+ ExnObj = cast<Instruction>(ExcIVI->getOperand(1));
+ SelLoad = dyn_cast<LoadInst>(SelIVI->getOperand(1));
+ EraseIVIs = true;
}
}
}
- bool Changed = false;
-
- if (Sels.size() != SelsToConvert.size()) {
- // If we haven't been able to convert all of the clean-up selectors, then
- // loop through the slow way to see if they still need to be converted.
- if (!ExceptionValueIntrinsic) {
- ExceptionValueIntrinsic =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
- if (!ExceptionValueIntrinsic)
- return CleanupSelectors(CatchAllSels);
- }
-
- for (Value::use_iterator
- I = ExceptionValueIntrinsic->use_begin(),
- E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
- IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
- if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;
-
- bool URoRInvoke = false;
- SmallPtrSet<IntrinsicInst*, 8> SelCalls;
- SmallPtrSet<PHINode*, 32> SeenPHIs;
- Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls, SeenPHIs);
-
- if (URoRInvoke) {
- // This EH pointer is being used by an invoke of an URoR instruction and
- // an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
- // need to convert it to a 'catch-all'.
- for (SmallPtrSet<IntrinsicInst*, 8>::iterator
- SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
- if (!HasCatchAllInSelector(*SI))
- SelsToConvert.insert(*SI);
- }
- }
- }
-
- if (!SelsToConvert.empty()) {
- // Convert all clean-up eh.selectors, which are associated with "invokes" of
- // URoR calls, into catch-all eh.selectors.
- Changed = true;
-
- for (SmallPtrSet<IntrinsicInst*, 8>::iterator
- SI = SelsToConvert.begin(), SE = SelsToConvert.end();
- SI != SE; ++SI) {
- IntrinsicInst *II = *SI;
-
- // Use the exception object pointer and the personality function
- // from the original selector.
- CallSite CS(II);
- IntrinsicInst::op_iterator I = CS.arg_begin();
- IntrinsicInst::op_iterator E = CS.arg_end();
- IntrinsicInst::op_iterator B = prior(E);
-
- // Exclude last argument if it is an integer.
- if (isa<ConstantInt>(B)) E = B;
+ if (!ExnObj)
+ ExnObj = ExtractValueInst::Create(RI->getOperand(0), 0, "exn.obj", RI);
- // Add exception object pointer (front).
- // Add personality function (next).
- // Add in any filter IDs (rest).
- SmallVector<Value*, 8> Args(I, E);
+ RI->eraseFromParent();
- Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
-
- CallInst *NewSelector =
- CallInst::Create(SelectorIntrinsic, Args, "eh.sel.catch.all", II);
-
- NewSelector->setTailCall(II->isTailCall());
- NewSelector->setAttributes(II->getAttributes());
- NewSelector->setCallingConv(II->getCallingConv());
-
- II->replaceAllUsesWith(NewSelector);
- II->eraseFromParent();
- }
+ if (EraseIVIs) {
+ if (SelIVI->getNumUses() == 0)
+ SelIVI->eraseFromParent();
+ if (ExcIVI->getNumUses() == 0)
+ ExcIVI->eraseFromParent();
+ if (SelLoad && SelLoad->getNumUses() == 0)
+ SelLoad->eraseFromParent();
}
- Changed |= CleanupSelectors(CatchAllSels);
- return Changed;
-}
-
-/// NormalizeLandingPads - Normalize and discover landing pads, noting them
-/// in the LandingPads set. A landing pad is normal if the only CFG edges
-/// that end at it are unwind edges from invoke instructions. If we inlined
-/// through an invoke we could have a normal branch from the previous
-/// unwind block through to the landing pad for the original invoke.
-/// Abnormal landing pads are fixed up by redirecting all unwind edges to
-/// a new basic block which falls through to the original.
-bool DwarfEHPrepare::NormalizeLandingPads() {
- bool Changed = false;
-
- const MCAsmInfo *MAI = TM->getMCAsmInfo();
- bool usingSjLjEH = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
-
- for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
- TerminatorInst *TI = I->getTerminator();
- if (!isa<InvokeInst>(TI))
- continue;
- BasicBlock *LPad = TI->getSuccessor(1);
- // Skip landing pads that have already been normalized.
- if (LandingPads.count(LPad))
- continue;
-
- // Check that only invoke unwind edges end at the landing pad.
- bool OnlyUnwoundTo = true;
- bool SwitchOK = usingSjLjEH;
- for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad);
- PI != PE; ++PI) {
- TerminatorInst *PT = (*PI)->getTerminator();
- // The SjLj dispatch block uses a switch instruction. This is effectively
- // an unwind edge, so we can disregard it here. There will only ever
- // be one dispatch, however, so if there are multiple switches, one
- // of them truly is a normal edge, not an unwind edge.
- if (SwitchOK && isa<SwitchInst>(PT)) {
- SwitchOK = false;
- continue;
- }
- if (!isa<InvokeInst>(PT) || LPad == PT->getSuccessor(0)) {
- OnlyUnwoundTo = false;
- break;
- }
- }
-
- if (OnlyUnwoundTo) {
- // Only unwind edges lead to the landing pad. Remember the landing pad.
- LandingPads.insert(LPad);
- continue;
- }
-
- // At least one normal edge ends at the landing pad. Redirect the unwind
- // edges to a new basic block which falls through into this one.
-
- // Create the new basic block.
- BasicBlock *NewBB = BasicBlock::Create(F->getContext(),
- LPad->getName() + "_unwind_edge");
-
- // Insert it into the function right before the original landing pad.
- LPad->getParent()->getBasicBlockList().insert(LPad, NewBB);
-
- // Redirect unwind edges from the original landing pad to NewBB.
- for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ) {
- TerminatorInst *PT = (*PI++)->getTerminator();
- if (isa<InvokeInst>(PT) && PT->getSuccessor(1) == LPad)
- // Unwind to the new block.
- PT->setSuccessor(1, NewBB);
- }
-
- // If there are any PHI nodes in LPad, we need to update them so that they
- // merge incoming values from NewBB instead.
- for (BasicBlock::iterator II = LPad->begin(); isa<PHINode>(II); ++II) {
- PHINode *PN = cast<PHINode>(II);
- pred_iterator PB = pred_begin(NewBB), PE = pred_end(NewBB);
-
- // Check to see if all of the values coming in via unwind edges are the
- // same. If so, we don't need to create a new PHI node.
- Value *InVal = PN->getIncomingValueForBlock(*PB);
- for (pred_iterator PI = PB; PI != PE; ++PI) {
- if (PI != PB && InVal != PN->getIncomingValueForBlock(*PI)) {
- InVal = 0;
- break;
- }
- }
-
- if (InVal == 0) {
- // Different unwind edges have different values. Create a new PHI node
- // in NewBB.
- PHINode *NewPN = PHINode::Create(PN->getType(),
- PN->getNumIncomingValues(),
- PN->getName()+".unwind", NewBB);
- // Add an entry for each unwind edge, using the value from the old PHI.
- for (pred_iterator PI = PB; PI != PE; ++PI)
- NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);
-
- // Now use this new PHI as the common incoming value for NewBB in PN.
- InVal = NewPN;
- }
-
- // Revector exactly one entry in the PHI node to come from NewBB
- // and delete all other entries that come from unwind edges. If
- // there are both normal and unwind edges from the same predecessor,
- // this leaves an entry for the normal edge.
- for (pred_iterator PI = PB; PI != PE; ++PI)
- PN->removeIncomingValue(*PI);
- PN->addIncoming(InVal, NewBB);
- }
-
- // Add a fallthrough from NewBB to the original landing pad.
- BranchInst::Create(LPad, NewBB);
-
- // Now update DominatorTree analysis information.
- DT->splitBlock(NewBB);
-
- // Remember the newly constructed landing pad. The original landing pad
- // LPad is no longer a landing pad now that all unwind edges have been
- // revectored to NewBB.
- LandingPads.insert(NewBB);
- ++NumLandingPadsSplit;
- Changed = true;
- }
-
- return Changed;
-}
-
-/// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
-/// rethrowing any previously caught exception. This will crash horribly
-/// at runtime if there is no such exception: using unwind to throw a new
-/// exception is currently not supported.
-bool DwarfEHPrepare::LowerUnwindsAndResumes() {
- SmallVector<Instruction*, 16> ResumeInsts;
-
- for (Function::iterator fi = F->begin(), fe = F->end(); fi != fe; ++fi) {
- for (BasicBlock::iterator bi = fi->begin(), be = fi->end(); bi != be; ++bi){
- if (isa<UnwindInst>(bi))
- ResumeInsts.push_back(bi);
- else if (CallInst *call = dyn_cast<CallInst>(bi))
- if (Function *fn = dyn_cast<Function>(call->getCalledValue()))
- if (fn->getName() == "llvm.eh.resume")
- ResumeInsts.push_back(bi);
- }
- }
-
- if (ResumeInsts.empty()) return false;
-
- // Find the rewind function if we didn't already.
- if (!RewindFunction) {
- LLVMContext &Ctx = ResumeInsts[0]->getContext();
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
- Type::getInt8PtrTy(Ctx), false);
- const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
- RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
- }
-
- bool Changed = false;
-
- for (SmallVectorImpl<Instruction*>::iterator
- I = ResumeInsts.begin(), E = ResumeInsts.end(); I != E; ++I) {
- Instruction *RI = *I;
-
- // Replace the resuming instruction with a call to _Unwind_Resume (or the
- // appropriate target equivalent).
-
- llvm::Value *ExnValue;
- if (isa<UnwindInst>(RI))
- ExnValue = CreateExceptionValueCall(RI->getParent());
- else
- ExnValue = cast<CallInst>(RI)->getArgOperand(0);
-
- // Create the call...
- CallInst *CI = CallInst::Create(RewindFunction, ExnValue, "", RI);
- CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
-
- // ...followed by an UnreachableInst, if it was an unwind.
- // Calls to llvm.eh.resume are typically already followed by this.
- if (isa<UnwindInst>(RI))
- new UnreachableInst(RI->getContext(), RI);
-
- if (isa<UnwindInst>(RI))
- ++NumUnwindsLowered;
- else
- ++NumResumesLowered;
-
- // Nuke the resume instruction.
- RI->eraseFromParent();
-
- Changed = true;
- }
-
- return Changed;
-}
-
-/// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
-/// landing pads by replacing calls outside of landing pads with direct use of
-/// a register holding the appropriate value; this requires adding calls inside
-/// all landing pads to initialize the register. Also, move eh.exception calls
-/// inside landing pads to the start of the landing pad (optional, but may make
-/// things simpler for later passes).
-bool DwarfEHPrepare::MoveExceptionValueCalls() {
- // If the eh.exception intrinsic is not declared in the module then there is
- // nothing to do. Speed up compilation by checking for this common case.
- if (!ExceptionValueIntrinsic &&
- !F->getParent()->getFunction(Intrinsic::getName(Intrinsic::eh_exception)))
- return false;
-
- bool Changed = false;
-
- // Move calls to eh.exception that are inside a landing pad to the start of
- // the landing pad.
- for (BBSet::const_iterator LI = LandingPads.begin(), LE = LandingPads.end();
- LI != LE; ++LI) {
- BasicBlock *LP = *LI;
- for (BasicBlock::iterator II = LP->getFirstNonPHIOrDbg(), IE = LP->end();
- II != IE;)
- if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
- // Found a call to eh.exception.
- if (!EI->use_empty()) {
- // If there is already a call to eh.exception at the start of the
- // landing pad, then get hold of it; otherwise create such a call.
- Value *CallAtStart = CreateExceptionValueCall(LP);
-
- // If the call was at the start of a landing pad then leave it alone.
- if (EI == CallAtStart)
- continue;
- EI->replaceAllUsesWith(CallAtStart);
- }
- EI->eraseFromParent();
- ++NumExceptionValuesMoved;
- Changed = true;
- }
- }
-
- // Look for calls to eh.exception that are not in a landing pad. If one is
- // found, then a register that holds the exception value will be created in
- // each landing pad, and the SSAUpdater will be used to compute the values
- // returned by eh.exception calls outside of landing pads.
- SSAUpdater SSA;
-
- // Remember where we found the eh.exception call, to avoid rescanning earlier
- // basic blocks which we already know contain no eh.exception calls.
- bool FoundCallOutsideLandingPad = false;
- Function::iterator BB = F->begin();
- for (Function::iterator BE = F->end(); BB != BE; ++BB) {
- // Skip over landing pads.
- if (LandingPads.count(BB))
- continue;
-
- for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
- II != IE; ++II)
- if (isa<EHExceptionInst>(II)) {
- SSA.Initialize(II->getType(), II->getName());
- FoundCallOutsideLandingPad = true;
- break;
- }
-
- if (FoundCallOutsideLandingPad)
- break;
- }
-
- // If all calls to eh.exception are in landing pads then we are done.
- if (!FoundCallOutsideLandingPad)
- return Changed;
-
- // Add a call to eh.exception at the start of each landing pad, and tell the
- // SSAUpdater that this is the value produced by the landing pad.
- for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
- LI != LE; ++LI)
- SSA.AddAvailableValue(*LI, CreateExceptionValueCall(*LI));
-
- // Now turn all calls to eh.exception that are not in a landing pad into a use
- // of the appropriate register.
- for (Function::iterator BE = F->end(); BB != BE; ++BB) {
- // Skip over landing pads.
- if (LandingPads.count(BB))
- continue;
-
- for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
- II != IE;)
- if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
- // Found a call to eh.exception, replace it with the value from any
- // upstream landing pad(s).
- EI->replaceAllUsesWith(SSA.GetValueAtEndOfBlock(BB));
- EI->eraseFromParent();
- ++NumExceptionValuesMoved;
- }
- }
-
- return true;
-}
-
-/// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
-/// the start of the basic block (unless there already is one, in which case
-/// the existing call is returned).
-Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
- Instruction *Start = BB->getFirstNonPHIOrDbg();
- // Is this a call to eh.exception?
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Start))
- if (CI->getIntrinsicID() == Intrinsic::eh_exception)
- // Reuse the existing call.
- return Start;
-
- // Find the eh.exception intrinsic if we didn't already.
- if (!ExceptionValueIntrinsic)
- ExceptionValueIntrinsic = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::eh_exception);
-
- // Create the call.
- return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
+ return ExnObj;
}
/// InsertUnwindResumeCalls - Convert the ResumeInsts that are still present
/// into calls to the appropriate _Unwind_Resume function.
-bool DwarfEHPrepare::InsertUnwindResumeCalls() {
+bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
bool UsesNewEH = false;
SmallVector<ResumeInst*, 16> Resumes;
- for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
+ for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
TerminatorInst *TI = I->getTerminator();
if (ResumeInst *RI = dyn_cast<ResumeInst>(TI))
Resumes.push_back(RI);
@@ -682,27 +127,45 @@ bool DwarfEHPrepare::InsertUnwindResumeCalls() {
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
Type::getInt8PtrTy(Ctx), false);
const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
- RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
+ RewindFunction = Fn.getParent()->getOrInsertFunction(RewindName, FTy);
}
// Create the basic block where the _Unwind_Resume call will live.
- LLVMContext &Ctx = F->getContext();
- BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", F);
- PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), Resumes.size(),
+ LLVMContext &Ctx = Fn.getContext();
+ unsigned ResumesSize = Resumes.size();
+
+ if (ResumesSize == 1) {
+ // Instead of creating a new BB and PHI node, just append the call to
+ // _Unwind_Resume to the end of the single resume block.
+ ResumeInst *RI = Resumes.front();
+ BasicBlock *UnwindBB = RI->getParent();
+ Instruction *ExnObj = GetExceptionObject(RI);
+
+ // Call the _Unwind_Resume function.
+ CallInst *CI = CallInst::Create(RewindFunction, ExnObj, "", UnwindBB);
+ CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
+
+ // We never expect _Unwind_Resume to return.
+ new UnreachableInst(Ctx, UnwindBB);
+ return true;
+ }
+
+ BasicBlock *UnwindBB = BasicBlock::Create(Ctx, "unwind_resume", &Fn);
+ PHINode *PN = PHINode::Create(Type::getInt8PtrTy(Ctx), ResumesSize,
"exn.obj", UnwindBB);
// Extract the exception object from the ResumeInst and add it to the PHI node
// that feeds the _Unwind_Resume call.
- BasicBlock *UnwindBBDom = Resumes[0]->getParent();
for (SmallVectorImpl<ResumeInst*>::iterator
I = Resumes.begin(), E = Resumes.end(); I != E; ++I) {
ResumeInst *RI = *I;
- BranchInst::Create(UnwindBB, RI->getParent());
- ExtractValueInst *ExnObj = ExtractValueInst::Create(RI->getOperand(0),
- 0, "exn.obj", RI);
- PN->addIncoming(ExnObj, RI->getParent());
- UnwindBBDom = DT->findNearestCommonDominator(RI->getParent(), UnwindBBDom);
- RI->eraseFromParent();
+ BasicBlock *Parent = RI->getParent();
+ BranchInst::Create(UnwindBB, Parent);
+
+ Instruction *ExnObj = GetExceptionObject(RI);
+ PN->addIncoming(ExnObj, Parent);
+
+ ++NumResumesLowered;
}
// Call the function.
@@ -711,40 +174,10 @@ bool DwarfEHPrepare::InsertUnwindResumeCalls() {
// We never expect _Unwind_Resume to return.
new UnreachableInst(Ctx, UnwindBB);
-
- // Now update DominatorTree analysis information.
- DT->addNewBlock(UnwindBB, UnwindBBDom);
return true;
}
bool DwarfEHPrepare::runOnFunction(Function &Fn) {
- bool Changed = false;
-
- // Initialize internal state.
- DT = &getAnalysis<DominatorTree>(); // FIXME: We won't need this with the new EH.
- F = &Fn;
-
- if (InsertUnwindResumeCalls()) {
- // FIXME: The reset of this function can go once the new EH is done.
- LandingPads.clear();
- return true;
- }
-
- // Ensure that only unwind edges end at landing pads (a landing pad is a
- // basic block where an invoke unwind edge ends).
- Changed |= NormalizeLandingPads();
-
- // Turn unwind instructions and eh.resume calls into libcalls.
- Changed |= LowerUnwindsAndResumes();
-
- // TODO: Move eh.selector calls to landing pads and combine them.
-
- // Move eh.exception calls to landing pads.
- Changed |= MoveExceptionValueCalls();
-
- Changed |= HandleURoRInvokes();
-
- LandingPads.clear();
-
+ bool Changed = InsertUnwindResumeCalls(Fn);
return Changed;
}
diff --git a/contrib/llvm/lib/CodeGen/ELF.h b/contrib/llvm/lib/CodeGen/ELF.h
deleted file mode 100644
index 5b63468..0000000
--- a/contrib/llvm/lib/CodeGen/ELF.h
+++ /dev/null
@@ -1,227 +0,0 @@
-//===-- lib/CodeGen/ELF.h - ELF constants and data structures ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header contains common, non-processor-specific data structures and
-// constants for the ELF file format.
-//
-// The details of the ELF32 bits in this file are largely based on the Tool
-// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
-// Version 1.2, May 1995. The ELF64 is based on HP/Intel definition of the
-// ELF-64 object file format document, Version 1.5 Draft 2 May 27, 1998
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ELF_H
-#define CODEGEN_ELF_H
-
-#include "llvm/CodeGen/BinaryObject.h"
-#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
- class GlobalValue;
-
- /// ELFSym - This struct contains information about each symbol that is
- /// added to logical symbol table for the module. This is eventually
- /// turned into a real symbol table in the file.
- struct ELFSym {
-
- // ELF symbols are related to llvm ones by being one of the two llvm
- // types, for the other ones (section, file, func) a null pointer is
- // assumed by default.
- union {
- const GlobalValue *GV; // If this is a pointer to a GV
- const char *Ext; // If this is a pointer to a named symbol
- } Source;
-
- // Describes from which source type this ELF symbol comes from,
- // they can be GlobalValue, ExternalSymbol or neither.
- enum {
- isGV, // The Source.GV field is valid.
- isExtSym, // The Source.ExtSym field is valid.
- isOther // Not a GlobalValue or External Symbol
- };
- unsigned SourceType;
-
- bool isGlobalValue() const { return SourceType == isGV; }
- bool isExternalSym() const { return SourceType == isExtSym; }
-
- // getGlobalValue - If this is a global value which originated the
- // elf symbol, return a reference to it.
- const GlobalValue *getGlobalValue() const {
- assert(SourceType == isGV && "This is not a global value");
- return Source.GV;
- }
-
- // getExternalSym - If this is an external symbol which originated the
- // elf symbol, return a reference to it.
- const char *getExternalSymbol() const {
- assert(SourceType == isExtSym && "This is not an external symbol");
- return Source.Ext;
- }
-
- // getGV - From a global value return a elf symbol to represent it
- static ELFSym *getGV(const GlobalValue *GV, unsigned Bind,
- unsigned Type, unsigned Visibility) {
- ELFSym *Sym = new ELFSym();
- Sym->Source.GV = GV;
- Sym->setBind(Bind);
- Sym->setType(Type);
- Sym->setVisibility(Visibility);
- Sym->SourceType = isGV;
- return Sym;
- }
-
- // getExtSym - Create and return an elf symbol to represent an
- // external symbol
- static ELFSym *getExtSym(const char *Ext) {
- ELFSym *Sym = new ELFSym();
- Sym->Source.Ext = Ext;
- Sym->setBind(ELF::STB_GLOBAL);
- Sym->setType(ELF::STT_NOTYPE);
- Sym->setVisibility(ELF::STV_DEFAULT);
- Sym->SourceType = isExtSym;
- return Sym;
- }
-
- // getSectionSym - Returns a elf symbol to represent an elf section
- static ELFSym *getSectionSym() {
- ELFSym *Sym = new ELFSym();
- Sym->setBind(ELF::STB_LOCAL);
- Sym->setType(ELF::STT_SECTION);
- Sym->setVisibility(ELF::STV_DEFAULT);
- Sym->SourceType = isOther;
- return Sym;
- }
-
- // getFileSym - Returns a elf symbol to represent the module identifier
- static ELFSym *getFileSym() {
- ELFSym *Sym = new ELFSym();
- Sym->setBind(ELF::STB_LOCAL);
- Sym->setType(ELF::STT_FILE);
- Sym->setVisibility(ELF::STV_DEFAULT);
- Sym->SectionIdx = 0xfff1; // ELFSection::SHN_ABS;
- Sym->SourceType = isOther;
- return Sym;
- }
-
- // getUndefGV - Returns a STT_NOTYPE symbol
- static ELFSym *getUndefGV(const GlobalValue *GV, unsigned Bind) {
- ELFSym *Sym = new ELFSym();
- Sym->Source.GV = GV;
- Sym->setBind(Bind);
- Sym->setType(ELF::STT_NOTYPE);
- Sym->setVisibility(ELF::STV_DEFAULT);
- Sym->SectionIdx = 0; //ELFSection::SHN_UNDEF;
- Sym->SourceType = isGV;
- return Sym;
- }
-
- // ELF specific fields
- unsigned NameIdx; // Index in .strtab of name, once emitted.
- uint64_t Value;
- unsigned Size;
- uint8_t Info;
- uint8_t Other;
- unsigned short SectionIdx;
-
- // Symbol index into the Symbol table
- unsigned SymTabIdx;
-
- ELFSym() : SourceType(isOther), NameIdx(0), Value(0),
- Size(0), Info(0), Other(ELF::STV_DEFAULT), SectionIdx(0),
- SymTabIdx(0) {}
-
- unsigned getBind() const { return (Info >> 4) & 0xf; }
- unsigned getType() const { return Info & 0xf; }
- bool isLocalBind() const { return getBind() == ELF::STB_LOCAL; }
- bool isFileType() const { return getType() == ELF::STT_FILE; }
-
- void setBind(unsigned X) {
- assert(X == (X & 0xF) && "Bind value out of range!");
- Info = (Info & 0x0F) | (X << 4);
- }
-
- void setType(unsigned X) {
- assert(X == (X & 0xF) && "Type value out of range!");
- Info = (Info & 0xF0) | X;
- }
-
- void setVisibility(unsigned V) {
- assert(V == (V & 0x3) && "Visibility value out of range!");
- Other = V;
- }
- };
-
- /// ELFSection - This struct contains information about each section that is
- /// emitted to the file. This is eventually turned into the section header
- /// table at the end of the file.
- class ELFSection : public BinaryObject {
- public:
- // ELF specific fields
- unsigned NameIdx; // sh_name - .shstrtab idx of name, once emitted.
- unsigned Type; // sh_type - Section contents & semantics
- unsigned Flags; // sh_flags - Section flags.
- uint64_t Addr; // sh_addr - The mem addr this section is in.
- unsigned Offset; // sh_offset - Offset from the file start
- unsigned Size; // sh_size - The section size.
- unsigned Link; // sh_link - Section header table index link.
- unsigned Info; // sh_info - Auxiliary information.
- unsigned Align; // sh_addralign - Alignment of section.
- unsigned EntSize; // sh_entsize - Size of entries in the section e
-
- /// SectionIdx - The number of the section in the Section Table.
- unsigned short SectionIdx;
-
- /// Sym - The symbol to represent this section if it has one.
- ELFSym *Sym;
-
- /// getSymIndex - Returns the symbol table index of the symbol
- /// representing this section.
- unsigned getSymbolTableIndex() const {
- assert(Sym && "section not present in the symbol table");
- return Sym->SymTabIdx;
- }
-
- ELFSection(const std::string &name, bool isLittleEndian, bool is64Bit)
- : BinaryObject(name, isLittleEndian, is64Bit), Type(0), Flags(0), Addr(0),
- Offset(0), Size(0), Link(0), Info(0), Align(0), EntSize(0), Sym(0) {}
- };
-
- /// ELFRelocation - This class contains all the information necessary to
- /// to generate any 32-bit or 64-bit ELF relocation entry.
- class ELFRelocation {
- uint64_t r_offset; // offset in the section of the object this applies to
- uint32_t r_symidx; // symbol table index of the symbol to use
- uint32_t r_type; // machine specific relocation type
- int64_t r_add; // explicit relocation addend
- bool r_rela; // if true then the addend is part of the entry
- // otherwise the addend is at the location specified
- // by r_offset
- public:
- uint64_t getInfo(bool is64Bit) const {
- if (is64Bit)
- return ((uint64_t)r_symidx << 32) + ((uint64_t)r_type & 0xFFFFFFFFL);
- else
- return (r_symidx << 8) + (r_type & 0xFFL);
- }
-
- uint64_t getOffset() const { return r_offset; }
- int64_t getAddend() const { return r_add; }
-
- ELFRelocation(uint64_t off, uint32_t sym, uint32_t type,
- bool rela = true, int64_t addend = 0) :
- r_offset(off), r_symidx(sym), r_type(type),
- r_add(addend), r_rela(rela) {}
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp b/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp
deleted file mode 100644
index 660424c3..0000000
--- a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp
+++ /dev/null
@@ -1,205 +0,0 @@
-//===-- lib/CodeGen/ELFCodeEmitter.cpp ------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "elfce"
-
-#include "ELF.h"
-#include "ELFWriter.h"
-#include "ELFCodeEmitter.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/BinaryObject.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetELFWriterInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-//===----------------------------------------------------------------------===//
-// ELFCodeEmitter Implementation
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-/// startFunction - This callback is invoked when a new machine function is
-/// about to be emitted.
-void ELFCodeEmitter::startFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "processing function: "
- << MF.getFunction()->getName() << "\n");
-
- // Get the ELF Section that this function belongs in.
- ES = &EW.getTextSection(MF.getFunction());
-
- // Set the desired binary object to be used by the code emitters
- setBinaryObject(ES);
-
- // Get the function alignment in bytes
- unsigned Align = (1 << MF.getAlignment());
-
- // The function must start on its required alignment
- ES->emitAlignment(Align);
-
- // Update the section alignment if needed.
- ES->Align = std::max(ES->Align, Align);
-
- // Record the function start offset
- FnStartOff = ES->getCurrentPCOffset();
-
- // Emit constant pool and jump tables to their appropriate sections.
- // They need to be emitted before the function because in some targets
- // the later may reference JT or CP entry address.
- emitConstantPool(MF.getConstantPool());
- if (MF.getJumpTableInfo())
- emitJumpTables(MF.getJumpTableInfo());
-}
-
-/// finishFunction - This callback is invoked after the function is completely
-/// finished.
-bool ELFCodeEmitter::finishFunction(MachineFunction &MF) {
- // Add a symbol to represent the function.
- const Function *F = MF.getFunction();
- ELFSym *FnSym = ELFSym::getGV(F, EW.getGlobalELFBinding(F), ELF::STT_FUNC,
- EW.getGlobalELFVisibility(F));
- FnSym->SectionIdx = ES->SectionIdx;
- FnSym->Size = ES->getCurrentPCOffset()-FnStartOff;
- EW.AddPendingGlobalSymbol(F, true);
-
- // Offset from start of Section
- FnSym->Value = FnStartOff;
-
- if (!F->hasPrivateLinkage())
- EW.SymbolList.push_back(FnSym);
-
- // Patch up Jump Table Section relocations to use the real MBBs offsets
- // now that the MBB label offsets inside the function are known.
- if (MF.getJumpTableInfo()) {
- ELFSection &JTSection = EW.getJumpTableSection();
- for (std::vector<MachineRelocation>::iterator MRI = JTRelocations.begin(),
- MRE = JTRelocations.end(); MRI != MRE; ++MRI) {
- MachineRelocation &MR = *MRI;
- uintptr_t MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
- MR.setResultPointer((void*)MBBOffset);
- MR.setConstantVal(ES->SectionIdx);
- JTSection.addRelocation(MR);
- }
- }
-
- // If we have emitted any relocations to function-specific objects such as
- // basic blocks, constant pools entries, or jump tables, record their
- // addresses now so that we can rewrite them with the correct addresses later
- for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
- MachineRelocation &MR = Relocations[i];
- intptr_t Addr;
- if (MR.isGlobalValue()) {
- EW.AddPendingGlobalSymbol(MR.getGlobalValue());
- } else if (MR.isExternalSymbol()) {
- EW.AddPendingExternalSymbol(MR.getExternalSymbol());
- } else if (MR.isBasicBlock()) {
- Addr = getMachineBasicBlockAddress(MR.getBasicBlock());
- MR.setConstantVal(ES->SectionIdx);
- MR.setResultPointer((void*)Addr);
- } else if (MR.isConstantPoolIndex()) {
- Addr = getConstantPoolEntryAddress(MR.getConstantPoolIndex());
- MR.setConstantVal(CPSections[MR.getConstantPoolIndex()]);
- MR.setResultPointer((void*)Addr);
- } else if (MR.isJumpTableIndex()) {
- ELFSection &JTSection = EW.getJumpTableSection();
- Addr = getJumpTableEntryAddress(MR.getJumpTableIndex());
- MR.setConstantVal(JTSection.SectionIdx);
- MR.setResultPointer((void*)Addr);
- } else {
- llvm_unreachable("Unhandled relocation type");
- }
- ES->addRelocation(MR);
- }
-
- // Clear per-function data structures.
- JTRelocations.clear();
- Relocations.clear();
- CPLocations.clear();
- CPSections.clear();
- JTLocations.clear();
- MBBLocations.clear();
- return false;
-}
-
-/// emitConstantPool - For each constant pool entry, figure out which section
-/// the constant should live in and emit the constant
-void ELFCodeEmitter::emitConstantPool(MachineConstantPool *MCP) {
- const std::vector<MachineConstantPoolEntry> &CP = MCP->getConstants();
- if (CP.empty()) return;
-
- // TODO: handle PIC codegen
- assert(TM.getRelocationModel() != Reloc::PIC_ &&
- "PIC codegen not yet handled for elf constant pools!");
-
- for (unsigned i = 0, e = CP.size(); i != e; ++i) {
- MachineConstantPoolEntry CPE = CP[i];
-
- // Record the constant pool location and the section index
- ELFSection &CstPool = EW.getConstantPoolSection(CPE);
- CPLocations.push_back(CstPool.size());
- CPSections.push_back(CstPool.SectionIdx);
-
- if (CPE.isMachineConstantPoolEntry())
- assert(0 && "CPE.isMachineConstantPoolEntry not supported yet");
-
- // Emit the constant to constant pool section
- EW.EmitGlobalConstant(CPE.Val.ConstVal, CstPool);
- }
-}
-
-/// emitJumpTables - Emit all the jump tables for a given jump table info
-/// record to the appropriate section.
-void ELFCodeEmitter::emitJumpTables(MachineJumpTableInfo *MJTI) {
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- if (JT.empty()) return;
-
- // FIXME: handle PIC codegen
- assert(TM.getRelocationModel() != Reloc::PIC_ &&
- "PIC codegen not yet handled for elf jump tables!");
-
- const TargetELFWriterInfo *TEW = TM.getELFWriterInfo();
- unsigned EntrySize = 4; //MJTI->getEntrySize();
-
- // Get the ELF Section to emit the jump table
- ELFSection &JTSection = EW.getJumpTableSection();
-
- // For each JT, record its offset from the start of the section
- for (unsigned i = 0, e = JT.size(); i != e; ++i) {
- const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
-
- // Record JT 'i' offset in the JT section
- JTLocations.push_back(JTSection.size());
-
- // Each MBB entry in the Jump table section has a relocation entry
- // against the current text section.
- for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
- unsigned MachineRelTy = TEW->getAbsoluteLabelMachineRelTy();
- MachineRelocation MR =
- MachineRelocation::getBB(JTSection.size(), MachineRelTy, MBBs[mi]);
-
- // Add the relocation to the Jump Table section
- JTRelocations.push_back(MR);
-
- // Output placeholder for MBB in the JT section
- for (unsigned s=0; s < EntrySize; ++s)
- JTSection.emitByte(0);
- }
- }
-}
-
-} // end namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.h b/contrib/llvm/lib/CodeGen/ELFCodeEmitter.h
deleted file mode 100644
index 8671c67..0000000
--- a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.h
+++ /dev/null
@@ -1,78 +0,0 @@
-//===-- lib/CodeGen/ELFCodeEmitter.h ----------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ELFCODEEMITTER_H
-#define ELFCODEEMITTER_H
-
-#include "llvm/CodeGen/ObjectCodeEmitter.h"
-#include <vector>
-
-namespace llvm {
- class ELFWriter;
- class ELFSection;
-
- /// ELFCodeEmitter - This class is used by the ELFWriter to
- /// emit the code for functions to the ELF file.
- class ELFCodeEmitter : public ObjectCodeEmitter {
- ELFWriter &EW;
-
- /// Target machine description
- TargetMachine &TM;
-
- /// Section containing code for functions
- ELFSection *ES;
-
- /// Relocations - Record relocations needed by the current function
- std::vector<MachineRelocation> Relocations;
-
- /// JTRelocations - Record relocations needed by the relocation
- /// section.
- std::vector<MachineRelocation> JTRelocations;
-
- /// FnStartPtr - Function offset from the beginning of ELFSection 'ES'
- uintptr_t FnStartOff;
- public:
- explicit ELFCodeEmitter(ELFWriter &ew) : EW(ew), TM(EW.TM) {}
-
- /// addRelocation - Register new relocations for this function
- void addRelocation(const MachineRelocation &MR) {
- Relocations.push_back(MR);
- }
-
- /// emitConstantPool - For each constant pool entry, figure out which
- /// section the constant should live in and emit data to it
- void emitConstantPool(MachineConstantPool *MCP);
-
- /// emitJumpTables - Emit all the jump tables for a given jump table
- /// info and record them to the appropriate section.
- void emitJumpTables(MachineJumpTableInfo *MJTI);
-
- void startFunction(MachineFunction &F);
- bool finishFunction(MachineFunction &F);
-
- /// emitLabel - Emits a label
- virtual void emitLabel(MCSymbol *Label) {
- assert(0 && "emitLabel not implemented");
- }
-
- /// getLabelAddress - Return the address of the specified LabelID,
- /// only usable after the LabelID has been emitted.
- virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
- assert(0 && "getLabelAddress not implemented");
- return 0;
- }
-
- virtual void setModuleInfo(llvm::MachineModuleInfo* MMI) {}
-
-}; // end class ELFCodeEmitter
-
-} // end namespace llvm
-
-#endif
-
diff --git a/contrib/llvm/lib/CodeGen/ELFWriter.cpp b/contrib/llvm/lib/CodeGen/ELFWriter.cpp
deleted file mode 100644
index f2c2185..0000000
--- a/contrib/llvm/lib/CodeGen/ELFWriter.cpp
+++ /dev/null
@@ -1,1105 +0,0 @@
-//===-- ELFWriter.cpp - Target-independent ELF Writer code ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the target-independent ELF writer. This file writes out
-// the ELF file in the following order:
-//
-// #1. ELF Header
-// #2. '.text' section
-// #3. '.data' section
-// #4. '.bss' section (conceptual position in file)
-// ...
-// #X. '.shstrtab' section
-// #Y. Section Table
-//
-// The entries in the section table are laid out as:
-// #0. Null entry [required]
-// #1. ".text" entry - the program code
-// #2. ".data" entry - global variables with initializers. [ if needed ]
-// #3. ".bss" entry - global variables without initializers. [ if needed ]
-// ...
-// #N. ".shstrtab" entry - String table for the section names.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "elfwriter"
-#include "ELF.h"
-#include "ELFWriter.h"
-#include "ELFCodeEmitter.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/PassManager.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/CodeGen/BinaryObject.h"
-#include "llvm/CodeGen/MachineCodeEmitter.h"
-#include "llvm/CodeGen/ObjectCodeEmitter.h"
-#include "llvm/CodeGen/MachineCodeEmitter.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetELFWriterInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallString.h"
-using namespace llvm;
-
-char ELFWriter::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// ELFWriter Implementation
-//===----------------------------------------------------------------------===//
-
-ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm)
- : MachineFunctionPass(ID), O(o), TM(tm),
- OutContext(*new MCContext(*TM.getMCAsmInfo(), *TM.getRegisterInfo(),
- &TM.getTargetLowering()->getObjFileLowering())),
- TLOF(TM.getTargetLowering()->getObjFileLowering()),
- is64Bit(TM.getTargetData()->getPointerSizeInBits() == 64),
- isLittleEndian(TM.getTargetData()->isLittleEndian()),
- ElfHdr(isLittleEndian, is64Bit) {
-
- MAI = TM.getMCAsmInfo();
- TEW = TM.getELFWriterInfo();
-
- // Create the object code emitter object for this target.
- ElfCE = new ELFCodeEmitter(*this);
-
- // Initial number of sections
- NumSections = 0;
-}
-
-ELFWriter::~ELFWriter() {
- delete ElfCE;
- delete &OutContext;
-
- while(!SymbolList.empty()) {
- delete SymbolList.back();
- SymbolList.pop_back();
- }
-
- while(!PrivateSyms.empty()) {
- delete PrivateSyms.back();
- PrivateSyms.pop_back();
- }
-
- while(!SectionList.empty()) {
- delete SectionList.back();
- SectionList.pop_back();
- }
-
- // Release the name mangler object.
- delete Mang; Mang = 0;
-}
-
-// doInitialization - Emit the file header and all of the global variables for
-// the module to the ELF file.
-bool ELFWriter::doInitialization(Module &M) {
- // Initialize TargetLoweringObjectFile.
- const_cast<TargetLoweringObjectFile&>(TLOF).Initialize(OutContext, TM);
-
- Mang = new Mangler(OutContext, *TM.getTargetData());
-
- // ELF Header
- // ----------
- // Fields e_shnum e_shstrndx are only known after all section have
- // been emitted. They locations in the ouput buffer are recorded so
- // to be patched up later.
- //
- // Note
- // ----
- // emitWord method behaves differently for ELF32 and ELF64, writing
- // 4 bytes in the former and 8 in the last for *_off and *_addr elf types
-
- ElfHdr.emitByte(0x7f); // e_ident[EI_MAG0]
- ElfHdr.emitByte('E'); // e_ident[EI_MAG1]
- ElfHdr.emitByte('L'); // e_ident[EI_MAG2]
- ElfHdr.emitByte('F'); // e_ident[EI_MAG3]
-
- ElfHdr.emitByte(TEW->getEIClass()); // e_ident[EI_CLASS]
- ElfHdr.emitByte(TEW->getEIData()); // e_ident[EI_DATA]
- ElfHdr.emitByte(ELF::EV_CURRENT); // e_ident[EI_VERSION]
- ElfHdr.emitAlignment(16); // e_ident[EI_NIDENT-EI_PAD]
-
- ElfHdr.emitWord16(ELF::ET_REL); // e_type
- ElfHdr.emitWord16(TEW->getEMachine()); // e_machine = target
- ElfHdr.emitWord32(ELF::EV_CURRENT); // e_version
- ElfHdr.emitWord(0); // e_entry, no entry point in .o file
- ElfHdr.emitWord(0); // e_phoff, no program header for .o
- ELFHdr_e_shoff_Offset = ElfHdr.size();
- ElfHdr.emitWord(0); // e_shoff = sec hdr table off in bytes
- ElfHdr.emitWord32(TEW->getEFlags()); // e_flags = whatever the target wants
- ElfHdr.emitWord16(TEW->getHdrSize()); // e_ehsize = ELF header size
- ElfHdr.emitWord16(0); // e_phentsize = prog header entry size
- ElfHdr.emitWord16(0); // e_phnum = # prog header entries = 0
-
- // e_shentsize = Section header entry size
- ElfHdr.emitWord16(TEW->getSHdrSize());
-
- // e_shnum = # of section header ents
- ELFHdr_e_shnum_Offset = ElfHdr.size();
- ElfHdr.emitWord16(0); // Placeholder
-
- // e_shstrndx = Section # of '.shstrtab'
- ELFHdr_e_shstrndx_Offset = ElfHdr.size();
- ElfHdr.emitWord16(0); // Placeholder
-
- // Add the null section, which is required to be first in the file.
- getNullSection();
-
- // The first entry in the symtab is the null symbol and the second
- // is a local symbol containing the module/file name
- SymbolList.push_back(new ELFSym());
- SymbolList.push_back(ELFSym::getFileSym());
-
- return false;
-}
-
-// AddPendingGlobalSymbol - Add a global to be processed and to
-// the global symbol lookup, use a zero index because the table
-// index will be determined later.
-void ELFWriter::AddPendingGlobalSymbol(const GlobalValue *GV,
- bool AddToLookup /* = false */) {
- PendingGlobals.insert(GV);
- if (AddToLookup)
- GblSymLookup[GV] = 0;
-}
-
-// AddPendingExternalSymbol - Add the external to be processed
-// and to the external symbol lookup, use a zero index because
-// the symbol table index will be determined later.
-void ELFWriter::AddPendingExternalSymbol(const char *External) {
- PendingExternals.insert(External);
- ExtSymLookup[External] = 0;
-}
-
-ELFSection &ELFWriter::getDataSection() {
- const MCSectionELF *Data = (const MCSectionELF *)TLOF.getDataSection();
- return getSection(Data->getSectionName(), Data->getType(),
- Data->getFlags(), 4);
-}
-
-ELFSection &ELFWriter::getBSSSection() {
- const MCSectionELF *BSS = (const MCSectionELF *)TLOF.getBSSSection();
- return getSection(BSS->getSectionName(), BSS->getType(), BSS->getFlags(), 4);
-}
-
-// getCtorSection - Get the static constructor section
-ELFSection &ELFWriter::getCtorSection() {
- const MCSectionELF *Ctor = (const MCSectionELF *)TLOF.getStaticCtorSection();
- return getSection(Ctor->getSectionName(), Ctor->getType(), Ctor->getFlags());
-}
-
-// getDtorSection - Get the static destructor section
-ELFSection &ELFWriter::getDtorSection() {
- const MCSectionELF *Dtor = (const MCSectionELF *)TLOF.getStaticDtorSection();
- return getSection(Dtor->getSectionName(), Dtor->getType(), Dtor->getFlags());
-}
-
-// getTextSection - Get the text section for the specified function
-ELFSection &ELFWriter::getTextSection(const Function *F) {
- const MCSectionELF *Text =
- (const MCSectionELF *)TLOF.SectionForGlobal(F, Mang, TM);
- return getSection(Text->getSectionName(), Text->getType(), Text->getFlags());
-}
-
-// getJumpTableSection - Get a read only section for constants when
-// emitting jump tables. TODO: add PIC support
-ELFSection &ELFWriter::getJumpTableSection() {
- const MCSectionELF *JT =
- (const MCSectionELF *)TLOF.getSectionForConstant(SectionKind::getReadOnly());
- return getSection(JT->getSectionName(), JT->getType(), JT->getFlags(),
- TM.getTargetData()->getPointerABIAlignment());
-}
-
-// getConstantPoolSection - Get a constant pool section based on the machine
-// constant pool entry type and relocation info.
-ELFSection &ELFWriter::getConstantPoolSection(MachineConstantPoolEntry &CPE) {
- SectionKind Kind;
- switch (CPE.getRelocationInfo()) {
- default: llvm_unreachable("Unknown section kind");
- case 2: Kind = SectionKind::getReadOnlyWithRel(); break;
- case 1:
- Kind = SectionKind::getReadOnlyWithRelLocal();
- break;
- case 0:
- switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) {
- case 4: Kind = SectionKind::getMergeableConst4(); break;
- case 8: Kind = SectionKind::getMergeableConst8(); break;
- case 16: Kind = SectionKind::getMergeableConst16(); break;
- default: Kind = SectionKind::getMergeableConst(); break;
- }
- }
-
- const MCSectionELF *CPSect =
- (const MCSectionELF *)TLOF.getSectionForConstant(Kind);
- return getSection(CPSect->getSectionName(), CPSect->getType(),
- CPSect->getFlags(), CPE.getAlignment());
-}
-
-// getRelocSection - Return the relocation section of section 'S'. 'RelA'
-// is true if the relocation section contains entries with addends.
-ELFSection &ELFWriter::getRelocSection(ELFSection &S) {
- unsigned SectionType = TEW->hasRelocationAddend() ?
- ELF::SHT_RELA : ELF::SHT_REL;
-
- std::string SectionName(".rel");
- if (TEW->hasRelocationAddend())
- SectionName.append("a");
- SectionName.append(S.getName());
-
- return getSection(SectionName, SectionType, 0, TEW->getPrefELFAlignment());
-}
-
-// getGlobalELFVisibility - Returns the ELF specific visibility type
-unsigned ELFWriter::getGlobalELFVisibility(const GlobalValue *GV) {
- switch (GV->getVisibility()) {
- default:
- llvm_unreachable("unknown visibility type");
- case GlobalValue::DefaultVisibility:
- return ELF::STV_DEFAULT;
- case GlobalValue::HiddenVisibility:
- return ELF::STV_HIDDEN;
- case GlobalValue::ProtectedVisibility:
- return ELF::STV_PROTECTED;
- }
- return 0;
-}
-
-// getGlobalELFBinding - Returns the ELF specific binding type
-unsigned ELFWriter::getGlobalELFBinding(const GlobalValue *GV) {
- if (GV->hasInternalLinkage())
- return ELF::STB_LOCAL;
-
- if (GV->isWeakForLinker() && !GV->hasCommonLinkage())
- return ELF::STB_WEAK;
-
- return ELF::STB_GLOBAL;
-}
-
-// getGlobalELFType - Returns the ELF specific type for a global
-unsigned ELFWriter::getGlobalELFType(const GlobalValue *GV) {
- if (GV->isDeclaration())
- return ELF::STT_NOTYPE;
-
- if (isa<Function>(GV))
- return ELF::STT_FUNC;
-
- return ELF::STT_OBJECT;
-}
-
-// IsELFUndefSym - True if the global value must be marked as a symbol
-// which points to a SHN_UNDEF section. This means that the symbol has
-// no definition on the module.
-static bool IsELFUndefSym(const GlobalValue *GV) {
- return GV->isDeclaration() || (isa<Function>(GV));
-}
-
-// AddToSymbolList - Update the symbol lookup and If the symbol is
-// private add it to PrivateSyms list, otherwise to SymbolList.
-void ELFWriter::AddToSymbolList(ELFSym *GblSym) {
- assert(GblSym->isGlobalValue() && "Symbol must be a global value");
-
- const GlobalValue *GV = GblSym->getGlobalValue();
- if (GV->hasPrivateLinkage()) {
- // For a private symbols, keep track of the index inside
- // the private list since it will never go to the symbol
- // table and won't be patched up later.
- PrivateSyms.push_back(GblSym);
- GblSymLookup[GV] = PrivateSyms.size()-1;
- } else {
- // Non private symbol are left with zero indices until
- // they are patched up during the symbol table emition
- // (where the indicies are created).
- SymbolList.push_back(GblSym);
- GblSymLookup[GV] = 0;
- }
-}
-
-/// HasCommonSymbols - True if this section holds common symbols, this is
-/// indicated on the ELF object file by a symbol with SHN_COMMON section
-/// header index.
-static bool HasCommonSymbols(const MCSectionELF &S) {
- // FIXME: this is wrong, a common symbol can be in .data for example.
- if (StringRef(S.getSectionName()).startswith(".gnu.linkonce."))
- return true;
-
- return false;
-}
-
-
-// EmitGlobal - Choose the right section for global and emit it
-void ELFWriter::EmitGlobal(const GlobalValue *GV) {
-
- // Check if the referenced symbol is already emitted
- if (GblSymLookup.find(GV) != GblSymLookup.end())
- return;
-
- // Handle ELF Bind, Visibility and Type for the current symbol
- unsigned SymBind = getGlobalELFBinding(GV);
- unsigned SymType = getGlobalELFType(GV);
- bool IsUndefSym = IsELFUndefSym(GV);
-
- ELFSym *GblSym = IsUndefSym ? ELFSym::getUndefGV(GV, SymBind)
- : ELFSym::getGV(GV, SymBind, SymType, getGlobalELFVisibility(GV));
-
- if (!IsUndefSym) {
- assert(isa<GlobalVariable>(GV) && "GV not a global variable!");
- const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
-
- // Handle special llvm globals
- if (EmitSpecialLLVMGlobal(GVar))
- return;
-
- // Get the ELF section where this global belongs from TLOF
- const MCSectionELF *S =
- (const MCSectionELF *)TLOF.SectionForGlobal(GV, Mang, TM);
- ELFSection &ES =
- getSection(S->getSectionName(), S->getType(), S->getFlags());
- SectionKind Kind = S->getKind();
-
- // The symbol align should update the section alignment if needed
- const TargetData *TD = TM.getTargetData();
- unsigned Align = TD->getPreferredAlignment(GVar);
- unsigned Size = TD->getTypeAllocSize(GVar->getInitializer()->getType());
- GblSym->Size = Size;
-
- if (HasCommonSymbols(*S)) { // Symbol must go to a common section
- GblSym->SectionIdx = ELF::SHN_COMMON;
-
- // A new linkonce section is created for each global in the
- // common section, the default alignment is 1 and the symbol
- // value contains its alignment.
- ES.Align = 1;
- GblSym->Value = Align;
-
- } else if (Kind.isBSS() || Kind.isThreadBSS()) { // Symbol goes to BSS.
- GblSym->SectionIdx = ES.SectionIdx;
-
- // Update the size with alignment and the next object can
- // start in the right offset in the section
- if (Align) ES.Size = (ES.Size + Align-1) & ~(Align-1);
- ES.Align = std::max(ES.Align, Align);
-
- // GblSym->Value should contain the virtual offset inside the section.
- // Virtual because the BSS space is not allocated on ELF objects
- GblSym->Value = ES.Size;
- ES.Size += Size;
-
- } else { // The symbol must go to some kind of data section
- GblSym->SectionIdx = ES.SectionIdx;
-
- // GblSym->Value should contain the symbol offset inside the section,
- // and all symbols should start on their required alignment boundary
- ES.Align = std::max(ES.Align, Align);
- ES.emitAlignment(Align);
- GblSym->Value = ES.size();
-
- // Emit the global to the data section 'ES'
- EmitGlobalConstant(GVar->getInitializer(), ES);
- }
- }
-
- AddToSymbolList(GblSym);
-}
-
-void ELFWriter::EmitGlobalConstantStruct(const ConstantStruct *CVS,
- ELFSection &GblS) {
-
- // Print the fields in successive locations. Pad to align if needed!
- const TargetData *TD = TM.getTargetData();
- unsigned Size = TD->getTypeAllocSize(CVS->getType());
- const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType());
- uint64_t sizeSoFar = 0;
- for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) {
- const Constant* field = CVS->getOperand(i);
-
- // Check if padding is needed and insert one or more 0s.
- uint64_t fieldSize = TD->getTypeAllocSize(field->getType());
- uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1))
- - cvsLayout->getElementOffset(i)) - fieldSize;
- sizeSoFar += fieldSize + padSize;
-
- // Now print the actual field value.
- EmitGlobalConstant(field, GblS);
-
- // Insert padding - this may include padding to increase the size of the
- // current field up to the ABI size (if the struct is not packed) as well
- // as padding to ensure that the next field starts at the right offset.
- GblS.emitZeros(padSize);
- }
- assert(sizeSoFar == cvsLayout->getSizeInBytes() &&
- "Layout of constant struct may be incorrect!");
-}
-
-void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) {
- const TargetData *TD = TM.getTargetData();
- unsigned Size = TD->getTypeAllocSize(CV->getType());
-
- if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV)) {
- for (unsigned i = 0, e = CVA->getNumOperands(); i != e; ++i)
- EmitGlobalConstant(CVA->getOperand(i), GblS);
- return;
- } else if (isa<ConstantAggregateZero>(CV)) {
- GblS.emitZeros(Size);
- return;
- } else if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV)) {
- EmitGlobalConstantStruct(CVS, GblS);
- return;
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- APInt Val = CFP->getValueAPF().bitcastToAPInt();
- if (CFP->getType()->isDoubleTy())
- GblS.emitWord64(Val.getZExtValue());
- else if (CFP->getType()->isFloatTy())
- GblS.emitWord32(Val.getZExtValue());
- else if (CFP->getType()->isX86_FP80Ty()) {
- unsigned PadSize = TD->getTypeAllocSize(CFP->getType())-
- TD->getTypeStoreSize(CFP->getType());
- GblS.emitWordFP80(Val.getRawData(), PadSize);
- } else if (CFP->getType()->isPPC_FP128Ty())
- llvm_unreachable("PPC_FP128Ty global emission not implemented");
- return;
- } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- if (Size == 1)
- GblS.emitByte(CI->getZExtValue());
- else if (Size == 2)
- GblS.emitWord16(CI->getZExtValue());
- else if (Size == 4)
- GblS.emitWord32(CI->getZExtValue());
- else
- EmitGlobalConstantLargeInt(CI, GblS);
- return;
- } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
- VectorType *PTy = CP->getType();
- for (unsigned I = 0, E = PTy->getNumElements(); I < E; ++I)
- EmitGlobalConstant(CP->getOperand(I), GblS);
- return;
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
- // Resolve a constant expression which returns a (Constant, Offset)
- // pair. If 'Res.first' is a GlobalValue, emit a relocation with
- // the offset 'Res.second', otherwise emit a global constant like
- // it is always done for not contant expression types.
- CstExprResTy Res = ResolveConstantExpr(CE);
- const Constant *Op = Res.first;
-
- if (isa<GlobalValue>(Op))
- EmitGlobalDataRelocation(cast<const GlobalValue>(Op),
- TD->getTypeAllocSize(Op->getType()),
- GblS, Res.second);
- else
- EmitGlobalConstant(Op, GblS);
-
- return;
- } else if (CV->getType()->getTypeID() == Type::PointerTyID) {
- // Fill the data entry with zeros or emit a relocation entry
- if (isa<ConstantPointerNull>(CV))
- GblS.emitZeros(Size);
- else
- EmitGlobalDataRelocation(cast<const GlobalValue>(CV),
- Size, GblS);
- return;
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
- // This is a constant address for a global variable or function and
- // therefore must be referenced using a relocation entry.
- EmitGlobalDataRelocation(GV, Size, GblS);
- return;
- }
-
- std::string msg;
- raw_string_ostream ErrorMsg(msg);
- ErrorMsg << "Constant unimp for type: " << *CV->getType();
- report_fatal_error(ErrorMsg.str());
-}
-
-// ResolveConstantExpr - Resolve the constant expression until it stop
-// yielding other constant expressions.
-CstExprResTy ELFWriter::ResolveConstantExpr(const Constant *CV) {
- const TargetData *TD = TM.getTargetData();
-
- // There ins't constant expression inside others anymore
- if (!isa<ConstantExpr>(CV))
- return std::make_pair(CV, 0);
-
- const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
- switch (CE->getOpcode()) {
- case Instruction::BitCast:
- return ResolveConstantExpr(CE->getOperand(0));
-
- case Instruction::GetElementPtr: {
- const Constant *ptrVal = CE->getOperand(0);
- SmallVector<Value*, 8> idxVec(CE->op_begin()+1, CE->op_end());
- int64_t Offset = TD->getIndexedOffset(ptrVal->getType(), idxVec);
- return std::make_pair(ptrVal, Offset);
- }
- case Instruction::IntToPtr: {
- Constant *Op = CE->getOperand(0);
- Op = ConstantExpr::getIntegerCast(Op, TD->getIntPtrType(CV->getContext()),
- false/*ZExt*/);
- return ResolveConstantExpr(Op);
- }
- case Instruction::PtrToInt: {
- Constant *Op = CE->getOperand(0);
- Type *Ty = CE->getType();
-
- // We can emit the pointer value into this slot if the slot is an
- // integer slot greater or equal to the size of the pointer.
- if (TD->getTypeAllocSize(Ty) == TD->getTypeAllocSize(Op->getType()))
- return ResolveConstantExpr(Op);
-
- llvm_unreachable("Integer size less then pointer size");
- }
- case Instruction::Add:
- case Instruction::Sub: {
- // Only handle cases where there's a constant expression with GlobalValue
- // as first operand and ConstantInt as second, which are the cases we can
- // solve direclty using a relocation entry. GlobalValue=Op0, CstInt=Op1
- // 1) Instruction::Add => (global) + CstInt
- // 2) Instruction::Sub => (global) + -CstInt
- const Constant *Op0 = CE->getOperand(0);
- const Constant *Op1 = CE->getOperand(1);
- assert(isa<ConstantInt>(Op1) && "Op1 must be a ConstantInt");
-
- CstExprResTy Res = ResolveConstantExpr(Op0);
- assert(isa<GlobalValue>(Res.first) && "Op0 must be a GlobalValue");
-
- const APInt &RHS = cast<ConstantInt>(Op1)->getValue();
- switch (CE->getOpcode()) {
- case Instruction::Add:
- return std::make_pair(Res.first, RHS.getSExtValue());
- case Instruction::Sub:
- return std::make_pair(Res.first, (-RHS).getSExtValue());
- }
- }
- }
-
- report_fatal_error(CE->getOpcodeName() +
- StringRef(": Unsupported ConstantExpr type"));
-
- return std::make_pair(CV, 0); // silence warning
-}
-
-void ELFWriter::EmitGlobalDataRelocation(const GlobalValue *GV, unsigned Size,
- ELFSection &GblS, int64_t Offset) {
- // Create the relocation entry for the global value
- MachineRelocation MR =
- MachineRelocation::getGV(GblS.getCurrentPCOffset(),
- TEW->getAbsoluteLabelMachineRelTy(),
- const_cast<GlobalValue*>(GV),
- Offset);
-
- // Fill the data entry with zeros
- GblS.emitZeros(Size);
-
- // Add the relocation entry for the current data section
- GblS.addRelocation(MR);
-}
-
-void ELFWriter::EmitGlobalConstantLargeInt(const ConstantInt *CI,
- ELFSection &S) {
- const TargetData *TD = TM.getTargetData();
- unsigned BitWidth = CI->getBitWidth();
- assert(isPowerOf2_32(BitWidth) &&
- "Non-power-of-2-sized integers not handled!");
-
- const uint64_t *RawData = CI->getValue().getRawData();
- uint64_t Val = 0;
- for (unsigned i = 0, e = BitWidth / 64; i != e; ++i) {
- Val = (TD->isBigEndian()) ? RawData[e - i - 1] : RawData[i];
- S.emitWord64(Val);
- }
-}
-
-/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
-/// special global used by LLVM. If so, emit it and return true, otherwise
-/// do nothing and return false.
-bool ELFWriter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) {
- if (GV->getName() == "llvm.used")
- llvm_unreachable("not implemented yet");
-
- // Ignore debug and non-emitted data. This handles llvm.compiler.used.
- if (GV->getSection() == "llvm.metadata" ||
- GV->hasAvailableExternallyLinkage())
- return true;
-
- if (!GV->hasAppendingLinkage()) return false;
-
- assert(GV->hasInitializer() && "Not a special LLVM global!");
-
- const TargetData *TD = TM.getTargetData();
- unsigned Align = TD->getPointerPrefAlignment();
- if (GV->getName() == "llvm.global_ctors") {
- ELFSection &Ctor = getCtorSection();
- Ctor.emitAlignment(Align);
- EmitXXStructorList(GV->getInitializer(), Ctor);
- return true;
- }
-
- if (GV->getName() == "llvm.global_dtors") {
- ELFSection &Dtor = getDtorSection();
- Dtor.emitAlignment(Align);
- EmitXXStructorList(GV->getInitializer(), Dtor);
- return true;
- }
-
- return false;
-}
-
-/// EmitXXStructorList - Emit the ctor or dtor list. This just emits out the
-/// function pointers, ignoring the init priority.
-void ELFWriter::EmitXXStructorList(const Constant *List, ELFSection &Xtor) {
- // Should be an array of '{ i32, void ()* }' structs. The first value is the
- // init priority, which we ignore.
- if (List->isNullValue()) return;
- const ConstantArray *InitList = cast<ConstantArray>(List);
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
- if (InitList->getOperand(i)->isNullValue())
- continue;
- ConstantStruct *CS = cast<ConstantStruct>(InitList->getOperand(i));
-
- if (CS->getOperand(1)->isNullValue())
- continue;
-
- // Emit the function pointer.
- EmitGlobalConstant(CS->getOperand(1), Xtor);
- }
-}
-
-bool ELFWriter::runOnMachineFunction(MachineFunction &MF) {
- // Nothing to do here, this is all done through the ElfCE object above.
- return false;
-}
-
-/// doFinalization - Now that the module has been completely processed, emit
-/// the ELF file to 'O'.
-bool ELFWriter::doFinalization(Module &M) {
- // Emit .data section placeholder
- getDataSection();
-
- // Emit .bss section placeholder
- getBSSSection();
-
- // Build and emit data, bss and "common" sections.
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- EmitGlobal(I);
-
- // Emit all pending globals
- for (PendingGblsIter I = PendingGlobals.begin(), E = PendingGlobals.end();
- I != E; ++I)
- EmitGlobal(*I);
-
- // Emit all pending externals
- for (PendingExtsIter I = PendingExternals.begin(), E = PendingExternals.end();
- I != E; ++I)
- SymbolList.push_back(ELFSym::getExtSym(*I));
-
- // Emit a symbol for each section created until now, skip null section
- for (unsigned i = 1, e = SectionList.size(); i < e; ++i) {
- ELFSection &ES = *SectionList[i];
- ELFSym *SectionSym = ELFSym::getSectionSym();
- SectionSym->SectionIdx = ES.SectionIdx;
- SymbolList.push_back(SectionSym);
- ES.Sym = SymbolList.back();
- }
-
- // Emit string table
- EmitStringTable(M.getModuleIdentifier());
-
- // Emit the symbol table now, if non-empty.
- EmitSymbolTable();
-
- // Emit the relocation sections.
- EmitRelocations();
-
- // Emit the sections string table.
- EmitSectionTableStringTable();
-
- // Dump the sections and section table to the .o file.
- OutputSectionsAndSectionTable();
-
- return false;
-}
-
-// RelocateField - Patch relocatable field with 'Offset' in 'BO'
-// using a 'Value' of known 'Size'
-void ELFWriter::RelocateField(BinaryObject &BO, uint32_t Offset,
- int64_t Value, unsigned Size) {
- if (Size == 32)
- BO.fixWord32(Value, Offset);
- else if (Size == 64)
- BO.fixWord64(Value, Offset);
- else
- llvm_unreachable("don't know howto patch relocatable field");
-}
-
-/// EmitRelocations - Emit relocations
-void ELFWriter::EmitRelocations() {
-
- // True if the target uses the relocation entry to hold the addend,
- // otherwise the addend is written directly to the relocatable field.
- bool HasRelA = TEW->hasRelocationAddend();
-
- // Create Relocation sections for each section which needs it.
- for (unsigned i=0, e=SectionList.size(); i != e; ++i) {
- ELFSection &S = *SectionList[i];
-
- // This section does not have relocations
- if (!S.hasRelocations()) continue;
- ELFSection &RelSec = getRelocSection(S);
-
- // 'Link' - Section hdr idx of the associated symbol table
- // 'Info' - Section hdr idx of the section to which the relocation applies
- ELFSection &SymTab = getSymbolTableSection();
- RelSec.Link = SymTab.SectionIdx;
- RelSec.Info = S.SectionIdx;
- RelSec.EntSize = TEW->getRelocationEntrySize();
-
- // Get the relocations from Section
- std::vector<MachineRelocation> Relos = S.getRelocations();
- for (std::vector<MachineRelocation>::iterator MRI = Relos.begin(),
- MRE = Relos.end(); MRI != MRE; ++MRI) {
- MachineRelocation &MR = *MRI;
-
- // Relocatable field offset from the section start
- unsigned RelOffset = MR.getMachineCodeOffset();
-
- // Symbol index in the symbol table
- unsigned SymIdx = 0;
-
- // Target specific relocation field type and size
- unsigned RelType = TEW->getRelocationType(MR.getRelocationType());
- unsigned RelTySize = TEW->getRelocationTySize(RelType);
- int64_t Addend = 0;
-
- // There are several machine relocations types, and each one of
- // them needs a different approach to retrieve the symbol table index.
- if (MR.isGlobalValue()) {
- const GlobalValue *G = MR.getGlobalValue();
- int64_t GlobalOffset = MR.getConstantVal();
- SymIdx = GblSymLookup[G];
- if (G->hasPrivateLinkage()) {
- // If the target uses a section offset in the relocation:
- // SymIdx + Addend = section sym for global + section offset
- unsigned SectionIdx = PrivateSyms[SymIdx]->SectionIdx;
- Addend = PrivateSyms[SymIdx]->Value + GlobalOffset;
- SymIdx = SectionList[SectionIdx]->getSymbolTableIndex();
- } else {
- Addend = TEW->getDefaultAddendForRelTy(RelType, GlobalOffset);
- }
- } else if (MR.isExternalSymbol()) {
- const char *ExtSym = MR.getExternalSymbol();
- SymIdx = ExtSymLookup[ExtSym];
- Addend = TEW->getDefaultAddendForRelTy(RelType);
- } else {
- // Get the symbol index for the section symbol
- unsigned SectionIdx = MR.getConstantVal();
- SymIdx = SectionList[SectionIdx]->getSymbolTableIndex();
-
- // The symbol offset inside the section
- int64_t SymOffset = (int64_t)MR.getResultPointer();
-
- // For pc relative relocations where symbols are defined in the same
- // section they are referenced, ignore the relocation entry and patch
- // the relocatable field with the symbol offset directly.
- if (S.SectionIdx == SectionIdx && TEW->isPCRelativeRel(RelType)) {
- int64_t Value = TEW->computeRelocation(SymOffset, RelOffset, RelType);
- RelocateField(S, RelOffset, Value, RelTySize);
- continue;
- }
-
- Addend = TEW->getDefaultAddendForRelTy(RelType, SymOffset);
- }
-
- // The target without addend on the relocation symbol must be
- // patched in the relocation place itself to contain the addend
- // otherwise write zeros to make sure there is no garbage there
- RelocateField(S, RelOffset, HasRelA ? 0 : Addend, RelTySize);
-
- // Get the relocation entry and emit to the relocation section
- ELFRelocation Rel(RelOffset, SymIdx, RelType, HasRelA, Addend);
- EmitRelocation(RelSec, Rel, HasRelA);
- }
- }
-}
-
-/// EmitRelocation - Write relocation 'Rel' to the relocation section 'Rel'
-void ELFWriter::EmitRelocation(BinaryObject &RelSec, ELFRelocation &Rel,
- bool HasRelA) {
- RelSec.emitWord(Rel.getOffset());
- RelSec.emitWord(Rel.getInfo(is64Bit));
- if (HasRelA)
- RelSec.emitWord(Rel.getAddend());
-}
-
-/// EmitSymbol - Write symbol 'Sym' to the symbol table 'SymbolTable'
-void ELFWriter::EmitSymbol(BinaryObject &SymbolTable, ELFSym &Sym) {
- if (is64Bit) {
- SymbolTable.emitWord32(Sym.NameIdx);
- SymbolTable.emitByte(Sym.Info);
- SymbolTable.emitByte(Sym.Other);
- SymbolTable.emitWord16(Sym.SectionIdx);
- SymbolTable.emitWord64(Sym.Value);
- SymbolTable.emitWord64(Sym.Size);
- } else {
- SymbolTable.emitWord32(Sym.NameIdx);
- SymbolTable.emitWord32(Sym.Value);
- SymbolTable.emitWord32(Sym.Size);
- SymbolTable.emitByte(Sym.Info);
- SymbolTable.emitByte(Sym.Other);
- SymbolTable.emitWord16(Sym.SectionIdx);
- }
-}
-
-/// EmitSectionHeader - Write section 'Section' header in 'SHdrTab'
-/// Section Header Table
-void ELFWriter::EmitSectionHeader(BinaryObject &SHdrTab,
- const ELFSection &SHdr) {
- SHdrTab.emitWord32(SHdr.NameIdx);
- SHdrTab.emitWord32(SHdr.Type);
- if (is64Bit) {
- SHdrTab.emitWord64(SHdr.Flags);
- SHdrTab.emitWord(SHdr.Addr);
- SHdrTab.emitWord(SHdr.Offset);
- SHdrTab.emitWord64(SHdr.Size);
- SHdrTab.emitWord32(SHdr.Link);
- SHdrTab.emitWord32(SHdr.Info);
- SHdrTab.emitWord64(SHdr.Align);
- SHdrTab.emitWord64(SHdr.EntSize);
- } else {
- SHdrTab.emitWord32(SHdr.Flags);
- SHdrTab.emitWord(SHdr.Addr);
- SHdrTab.emitWord(SHdr.Offset);
- SHdrTab.emitWord32(SHdr.Size);
- SHdrTab.emitWord32(SHdr.Link);
- SHdrTab.emitWord32(SHdr.Info);
- SHdrTab.emitWord32(SHdr.Align);
- SHdrTab.emitWord32(SHdr.EntSize);
- }
-}
-
-/// EmitStringTable - If the current symbol table is non-empty, emit the string
-/// table for it
-void ELFWriter::EmitStringTable(const std::string &ModuleName) {
- if (!SymbolList.size()) return; // Empty symbol table.
- ELFSection &StrTab = getStringTableSection();
-
- // Set the zero'th symbol to a null byte, as required.
- StrTab.emitByte(0);
-
- // Walk on the symbol list and write symbol names into the string table.
- unsigned Index = 1;
- for (ELFSymIter I=SymbolList.begin(), E=SymbolList.end(); I != E; ++I) {
- ELFSym &Sym = *(*I);
-
- std::string Name;
- if (Sym.isGlobalValue()) {
- SmallString<40> NameStr;
- Mang->getNameWithPrefix(NameStr, Sym.getGlobalValue(), false);
- Name.append(NameStr.begin(), NameStr.end());
- } else if (Sym.isExternalSym())
- Name.append(Sym.getExternalSymbol());
- else if (Sym.isFileType())
- Name.append(ModuleName);
-
- if (Name.empty()) {
- Sym.NameIdx = 0;
- } else {
- Sym.NameIdx = Index;
- StrTab.emitString(Name);
-
- // Keep track of the number of bytes emitted to this section.
- Index += Name.size()+1;
- }
- }
- assert(Index == StrTab.size());
- StrTab.Size = Index;
-}
-
-// SortSymbols - On the symbol table local symbols must come before
-// all other symbols with non-local bindings. The return value is
-// the position of the first non local symbol.
-unsigned ELFWriter::SortSymbols() {
- unsigned FirstNonLocalSymbol;
- std::vector<ELFSym*> LocalSyms, OtherSyms;
-
- for (ELFSymIter I=SymbolList.begin(), E=SymbolList.end(); I != E; ++I) {
- if ((*I)->isLocalBind())
- LocalSyms.push_back(*I);
- else
- OtherSyms.push_back(*I);
- }
- SymbolList.clear();
- FirstNonLocalSymbol = LocalSyms.size();
-
- for (unsigned i = 0; i < FirstNonLocalSymbol; ++i)
- SymbolList.push_back(LocalSyms[i]);
-
- for (ELFSymIter I=OtherSyms.begin(), E=OtherSyms.end(); I != E; ++I)
- SymbolList.push_back(*I);
-
- LocalSyms.clear();
- OtherSyms.clear();
-
- return FirstNonLocalSymbol;
-}
-
-/// EmitSymbolTable - Emit the symbol table itself.
-void ELFWriter::EmitSymbolTable() {
- if (!SymbolList.size()) return; // Empty symbol table.
-
- // Now that we have emitted the string table and know the offset into the
- // string table of each symbol, emit the symbol table itself.
- ELFSection &SymTab = getSymbolTableSection();
- SymTab.Align = TEW->getPrefELFAlignment();
-
- // Section Index of .strtab.
- SymTab.Link = getStringTableSection().SectionIdx;
-
- // Size of each symtab entry.
- SymTab.EntSize = TEW->getSymTabEntrySize();
-
- // Reorder the symbol table with local symbols first!
- unsigned FirstNonLocalSymbol = SortSymbols();
-
- // Emit all the symbols to the symbol table.
- for (unsigned i = 0, e = SymbolList.size(); i < e; ++i) {
- ELFSym &Sym = *SymbolList[i];
-
- // Emit symbol to the symbol table
- EmitSymbol(SymTab, Sym);
-
- // Record the symbol table index for each symbol
- if (Sym.isGlobalValue())
- GblSymLookup[Sym.getGlobalValue()] = i;
- else if (Sym.isExternalSym())
- ExtSymLookup[Sym.getExternalSymbol()] = i;
-
- // Keep track on the symbol index into the symbol table
- Sym.SymTabIdx = i;
- }
-
- // One greater than the symbol table index of the last local symbol
- SymTab.Info = FirstNonLocalSymbol;
- SymTab.Size = SymTab.size();
-}
-
-/// EmitSectionTableStringTable - This method adds and emits a section for the
-/// ELF Section Table string table: the string table that holds all of the
-/// section names.
-void ELFWriter::EmitSectionTableStringTable() {
- // First step: add the section for the string table to the list of sections:
- ELFSection &SHStrTab = getSectionHeaderStringTableSection();
-
- // Now that we know which section number is the .shstrtab section, update the
- // e_shstrndx entry in the ELF header.
- ElfHdr.fixWord16(SHStrTab.SectionIdx, ELFHdr_e_shstrndx_Offset);
-
- // Set the NameIdx of each section in the string table and emit the bytes for
- // the string table.
- unsigned Index = 0;
-
- for (ELFSectionIter I=SectionList.begin(), E=SectionList.end(); I != E; ++I) {
- ELFSection &S = *(*I);
- // Set the index into the table. Note if we have lots of entries with
- // common suffixes, we could memoize them here if we cared.
- S.NameIdx = Index;
- SHStrTab.emitString(S.getName());
-
- // Keep track of the number of bytes emitted to this section.
- Index += S.getName().size()+1;
- }
-
- // Set the size of .shstrtab now that we know what it is.
- assert(Index == SHStrTab.size());
- SHStrTab.Size = Index;
-}
-
-/// OutputSectionsAndSectionTable - Now that we have constructed the file header
-/// and all of the sections, emit these to the ostream destination and emit the
-/// SectionTable.
-void ELFWriter::OutputSectionsAndSectionTable() {
- // Pass #1: Compute the file offset for each section.
- size_t FileOff = ElfHdr.size(); // File header first.
-
- // Adjust alignment of all section if needed, skip the null section.
- for (unsigned i=1, e=SectionList.size(); i < e; ++i) {
- ELFSection &ES = *SectionList[i];
- if (!ES.size()) {
- ES.Offset = FileOff;
- continue;
- }
-
- // Update Section size
- if (!ES.Size)
- ES.Size = ES.size();
-
- // Align FileOff to whatever the alignment restrictions of the section are.
- if (ES.Align)
- FileOff = (FileOff+ES.Align-1) & ~(ES.Align-1);
-
- ES.Offset = FileOff;
- FileOff += ES.Size;
- }
-
- // Align Section Header.
- unsigned TableAlign = TEW->getPrefELFAlignment();
- FileOff = (FileOff+TableAlign-1) & ~(TableAlign-1);
-
- // Now that we know where all of the sections will be emitted, set the e_shnum
- // entry in the ELF header.
- ElfHdr.fixWord16(NumSections, ELFHdr_e_shnum_Offset);
-
- // Now that we know the offset in the file of the section table, update the
- // e_shoff address in the ELF header.
- ElfHdr.fixWord(FileOff, ELFHdr_e_shoff_Offset);
-
- // Now that we know all of the data in the file header, emit it and all of the
- // sections!
- O.write((char *)&ElfHdr.getData()[0], ElfHdr.size());
- FileOff = ElfHdr.size();
-
- // Section Header Table blob
- BinaryObject SHdrTable(isLittleEndian, is64Bit);
-
- // Emit all of sections to the file and build the section header table.
- for (ELFSectionIter I=SectionList.begin(), E=SectionList.end(); I != E; ++I) {
- ELFSection &S = *(*I);
- DEBUG(dbgs() << "SectionIdx: " << S.SectionIdx << ", Name: " << S.getName()
- << ", Size: " << S.Size << ", Offset: " << S.Offset
- << ", SectionData Size: " << S.size() << "\n");
-
- // Align FileOff to whatever the alignment restrictions of the section are.
- if (S.size()) {
- if (S.Align) {
- for (size_t NewFileOff = (FileOff+S.Align-1) & ~(S.Align-1);
- FileOff != NewFileOff; ++FileOff)
- O << (char)0xAB;
- }
- O.write((char *)&S.getData()[0], S.Size);
- FileOff += S.Size;
- }
-
- EmitSectionHeader(SHdrTable, S);
- }
-
- // Align output for the section table.
- for (size_t NewFileOff = (FileOff+TableAlign-1) & ~(TableAlign-1);
- FileOff != NewFileOff; ++FileOff)
- O << (char)0xAB;
-
- // Emit the section table itself.
- O.write((char *)&SHdrTable.getData()[0], SHdrTable.size());
-}
diff --git a/contrib/llvm/lib/CodeGen/ELFWriter.h b/contrib/llvm/lib/CodeGen/ELFWriter.h
deleted file mode 100644
index 6f7fbac..0000000
--- a/contrib/llvm/lib/CodeGen/ELFWriter.h
+++ /dev/null
@@ -1,251 +0,0 @@
-//===-- ELFWriter.h - Target-independent ELF writer support -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ELFWriter class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ELFWRITER_H
-#define ELFWRITER_H
-
-#include "llvm/ADT/SetVector.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include <map>
-
-namespace llvm {
- class BinaryObject;
- class Constant;
- class ConstantInt;
- class ConstantStruct;
- class ELFCodeEmitter;
- class ELFRelocation;
- class ELFSection;
- struct ELFSym;
- class GlobalVariable;
- class JITDebugRegisterer;
- class Mangler;
- class MachineCodeEmitter;
- class MachineConstantPoolEntry;
- class ObjectCodeEmitter;
- class MCAsmInfo;
- class TargetELFWriterInfo;
- class TargetLoweringObjectFile;
- class raw_ostream;
- class SectionKind;
- class MCContext;
- class TargetMachine;
-
- typedef std::vector<ELFSym*>::iterator ELFSymIter;
- typedef std::vector<ELFSection*>::iterator ELFSectionIter;
- typedef SetVector<const GlobalValue*>::const_iterator PendingGblsIter;
- typedef SetVector<const char *>::const_iterator PendingExtsIter;
- typedef std::pair<const Constant *, int64_t> CstExprResTy;
-
- /// ELFWriter - This class implements the common target-independent code for
- /// writing ELF files. Targets should derive a class from this to
- /// parameterize the output format.
- ///
- class ELFWriter : public MachineFunctionPass {
- friend class ELFCodeEmitter;
- friend class JITDebugRegisterer;
- public:
- static char ID;
-
- /// Return the ELFCodeEmitter as an instance of ObjectCodeEmitter
- ObjectCodeEmitter *getObjectCodeEmitter() {
- return reinterpret_cast<ObjectCodeEmitter*>(ElfCE);
- }
-
- ELFWriter(raw_ostream &O, TargetMachine &TM);
- ~ELFWriter();
-
- protected:
- /// Output stream to send the resultant object file to.
- raw_ostream &O;
-
- /// Target machine description.
- TargetMachine &TM;
-
- /// Context object for machine code objects.
- MCContext &OutContext;
-
- /// Target Elf Writer description.
- const TargetELFWriterInfo *TEW;
-
- /// Mang - The object used to perform name mangling for this module.
- Mangler *Mang;
-
- /// MCE - The MachineCodeEmitter object that we are exposing to emit machine
- /// code for functions to the .o file.
- ELFCodeEmitter *ElfCE;
-
- /// TLOF - Target Lowering Object File, provide section names for globals
- /// and other object file specific stuff
- const TargetLoweringObjectFile &TLOF;
-
- /// MAI - Target Asm Info, provide information about section names for
- /// globals and other target specific stuff.
- const MCAsmInfo *MAI;
-
- //===------------------------------------------------------------------===//
- // Properties inferred automatically from the target machine.
- //===------------------------------------------------------------------===//
-
- /// is64Bit/isLittleEndian - This information is inferred from the target
- /// machine directly, indicating whether to emit a 32- or 64-bit ELF file.
- bool is64Bit, isLittleEndian;
-
- /// doInitialization - Emit the file header and all of the global variables
- /// for the module to the ELF file.
- bool doInitialization(Module &M);
- bool runOnMachineFunction(MachineFunction &MF);
-
- /// doFinalization - Now that the module has been completely processed, emit
- /// the ELF file to 'O'.
- bool doFinalization(Module &M);
-
- private:
- /// Blob containing the Elf header
- BinaryObject ElfHdr;
-
- /// SectionList - This is the list of sections that we have emitted to the
- /// file. Once the file has been completely built, the section header table
- /// is constructed from this info.
- std::vector<ELFSection*> SectionList;
- unsigned NumSections; // Always = SectionList.size()
-
- /// SectionLookup - This is a mapping from section name to section number in
- /// the SectionList. Used to quickly gather the Section Index from MAI names
- std::map<std::string, ELFSection*> SectionLookup;
-
- /// PendingGlobals - Globals not processed as symbols yet.
- SetVector<const GlobalValue*> PendingGlobals;
-
- /// GblSymLookup - This is a mapping from global value to a symbol index
- /// in the symbol table or private symbols list. This is useful since reloc
- /// symbol references must be quickly mapped to their indices on the lists.
- std::map<const GlobalValue*, uint32_t> GblSymLookup;
-
- /// PendingExternals - Externals not processed as symbols yet.
- SetVector<const char *> PendingExternals;
-
- /// ExtSymLookup - This is a mapping from externals to a symbol index
- /// in the symbol table list. This is useful since reloc symbol references
- /// must be quickly mapped to their symbol table indices.
- std::map<const char *, uint32_t> ExtSymLookup;
-
- /// SymbolList - This is the list of symbols emitted to the symbol table.
- /// When the SymbolList is finally built, local symbols must be placed in
- /// the beginning while non-locals at the end.
- std::vector<ELFSym*> SymbolList;
-
- /// PrivateSyms - Record private symbols, every symbol here must never be
- /// present in the SymbolList.
- std::vector<ELFSym*> PrivateSyms;
-
- /// getSection - Return the section with the specified name, creating a new
- /// section if one does not already exist.
- ELFSection &getSection(const std::string &Name, unsigned Type,
- unsigned Flags = 0, unsigned Align = 0) {
- ELFSection *&SN = SectionLookup[Name];
- if (SN) return *SN;
-
- SectionList.push_back(new ELFSection(Name, isLittleEndian, is64Bit));
- SN = SectionList.back();
- SN->SectionIdx = NumSections++;
- SN->Type = Type;
- SN->Flags = Flags;
- SN->Link = ELF::SHN_UNDEF;
- SN->Align = Align;
- return *SN;
- }
-
- ELFSection &getNonExecStackSection() {
- return getSection(".note.GNU-stack", ELF::SHT_PROGBITS, 0, 1);
- }
-
- ELFSection &getSymbolTableSection() {
- return getSection(".symtab", ELF::SHT_SYMTAB, 0);
- }
-
- ELFSection &getStringTableSection() {
- return getSection(".strtab", ELF::SHT_STRTAB, 0, 1);
- }
-
- ELFSection &getSectionHeaderStringTableSection() {
- return getSection(".shstrtab", ELF::SHT_STRTAB, 0, 1);
- }
-
- ELFSection &getNullSection() {
- return getSection("", ELF::SHT_NULL, 0);
- }
-
- ELFSection &getDataSection();
- ELFSection &getBSSSection();
- ELFSection &getCtorSection();
- ELFSection &getDtorSection();
- ELFSection &getJumpTableSection();
- ELFSection &getConstantPoolSection(MachineConstantPoolEntry &CPE);
- ELFSection &getTextSection(const Function *F);
- ELFSection &getRelocSection(ELFSection &S);
-
- // Helpers for obtaining ELF specific info.
- unsigned getGlobalELFBinding(const GlobalValue *GV);
- unsigned getGlobalELFType(const GlobalValue *GV);
- unsigned getGlobalELFVisibility(const GlobalValue *GV);
-
- // AddPendingGlobalSymbol - Add a global to be processed and to
- // the global symbol lookup, use a zero index because the table
- // index will be determined later.
- void AddPendingGlobalSymbol(const GlobalValue *GV,
- bool AddToLookup = false);
-
- // AddPendingExternalSymbol - Add the external to be processed
- // and to the external symbol lookup, use a zero index because
- // the symbol table index will be determined later.
- void AddPendingExternalSymbol(const char *External);
-
- // AddToSymbolList - Update the symbol lookup and If the symbol is
- // private add it to PrivateSyms list, otherwise to SymbolList.
- void AddToSymbolList(ELFSym *GblSym);
-
- // As we complete the ELF file, we need to update fields in the ELF header
- // (e.g. the location of the section table). These members keep track of
- // the offset in ELFHeader of these various pieces to update and other
- // locations in the file.
- unsigned ELFHdr_e_shoff_Offset; // e_shoff in ELF header.
- unsigned ELFHdr_e_shstrndx_Offset; // e_shstrndx in ELF header.
- unsigned ELFHdr_e_shnum_Offset; // e_shnum in ELF header.
-
- private:
- void EmitGlobal(const GlobalValue *GV);
- void EmitGlobalConstant(const Constant *C, ELFSection &GblS);
- void EmitGlobalConstantStruct(const ConstantStruct *CVS,
- ELFSection &GblS);
- void EmitGlobalConstantLargeInt(const ConstantInt *CI, ELFSection &S);
- void EmitGlobalDataRelocation(const GlobalValue *GV, unsigned Size,
- ELFSection &GblS, int64_t Offset = 0);
- bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
- void EmitXXStructorList(const Constant *List, ELFSection &Xtor);
- void EmitRelocations();
- void EmitRelocation(BinaryObject &RelSec, ELFRelocation &Rel, bool HasRelA);
- void EmitSectionHeader(BinaryObject &SHdrTab, const ELFSection &SHdr);
- void EmitSectionTableStringTable();
- void EmitSymbol(BinaryObject &SymbolTable, ELFSym &Sym);
- void EmitSymbolTable();
- void EmitStringTable(const std::string &ModuleName);
- void OutputSectionsAndSectionTable();
- void RelocateField(BinaryObject &BO, uint32_t Offset, int64_t Value,
- unsigned Size);
- unsigned SortSymbols();
- CstExprResTy ResolveConstantExpr(const Constant *CV);
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/EdgeBundles.cpp b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
index a7aba89..3bb0465 100644
--- a/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
+++ b/contrib/llvm/lib/CodeGen/EdgeBundles.cpp
@@ -77,7 +77,7 @@ void EdgeBundles::view() const {
/// Specialize WriteGraph, the standard implementation won't work.
raw_ostream &llvm::WriteGraph(raw_ostream &O, const EdgeBundles &G,
bool ShortNames,
- const std::string &Title) {
+ const Twine &Title) {
const MachineFunction *MF = G.getMachineFunction();
O << "digraph {\n";
diff --git a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
index 01dccdb..a48c540 100644
--- a/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/contrib/llvm/lib/CodeGen/ExecutionDepsFix.cpp
@@ -26,7 +26,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -45,7 +45,7 @@ using namespace llvm;
/// DomainValue for each register, but it may contain multiple execution
/// domains. A register value is initially created in a single execution
/// domain, but if we were forced to pay the penalty of a domain crossing, we
-/// keep track of the fact the the register is now available in multiple
+/// keep track of the fact that the register is now available in multiple
/// domains.
namespace {
struct DomainValue {
@@ -57,8 +57,10 @@ struct DomainValue {
// domains where the register is available for free.
unsigned AvailableDomains;
- // Position of the last defining instruction.
- unsigned Dist;
+ // Pointer to the next DomainValue in a chain. When two DomainValues are
+ // merged, Victim.Next is set to point to Victor, so old DomainValue
+ // references can be updated by folowing the chain.
+ DomainValue *Next;
// Twiddleable instructions using or defining these registers.
SmallVector<MachineInstr*, 8> Instrs;
@@ -92,16 +94,33 @@ struct DomainValue {
return CountTrailingZeros_32(AvailableDomains);
}
- DomainValue() { clear(); }
+ DomainValue() : Refs(0) { clear(); }
+ // Clear this DomainValue and point to next which has all its data.
void clear() {
- Refs = AvailableDomains = Dist = 0;
+ AvailableDomains = 0;
+ Next = 0;
Instrs.clear();
}
};
}
namespace {
+/// LiveReg - Information about a live register.
+struct LiveReg {
+ /// Value currently in this register, or NULL when no value is being tracked.
+ /// This counts as a DomainValue reference.
+ DomainValue *Value;
+
+ /// Instruction that defined this register, relative to the beginning of the
+ /// current basic block. When a LiveReg is used to represent a live-out
+ /// register, this value is relative to the end of the basic block, so it
+ /// will be a negative number.
+ int Def;
+};
+} // anonynous namespace
+
+namespace {
class ExeDepsFix : public MachineFunctionPass {
static char ID;
SpecificBumpPtrAllocator<DomainValue> Allocator;
@@ -111,13 +130,19 @@ class ExeDepsFix : public MachineFunctionPass {
MachineFunction *MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- MachineBasicBlock *MBB;
std::vector<int> AliasMap;
const unsigned NumRegs;
- DomainValue **LiveRegs;
- typedef DenseMap<MachineBasicBlock*,DomainValue**> LiveOutMap;
+ LiveReg *LiveRegs;
+ typedef DenseMap<MachineBasicBlock*, LiveReg*> LiveOutMap;
LiveOutMap LiveOuts;
- unsigned Distance;
+
+ /// Current instruction number.
+ /// The first instruction in each basic block is 0.
+ int CurInstr;
+
+ /// True when the current block has a predecessor that hasn't been visited
+ /// yet.
+ bool SeenUnknownBackEdge;
public:
ExeDepsFix(const TargetRegisterClass *rc)
@@ -131,26 +156,33 @@ public:
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const {
- return "SSE execution domain fixup";
+ return "Execution dependency fix";
}
private:
// Register mapping.
- int RegIndex(unsigned Reg);
+ int regIndex(unsigned Reg);
// DomainValue allocation.
- DomainValue *Alloc(int domain = -1);
- void Recycle(DomainValue*);
+ DomainValue *alloc(int domain = -1);
+ DomainValue *retain(DomainValue *DV) {
+ if (DV) ++DV->Refs;
+ return DV;
+ }
+ void release(DomainValue*);
+ DomainValue *resolve(DomainValue*&);
// LiveRegs manipulations.
- void SetLiveReg(int rx, DomainValue *DV);
- void Kill(int rx);
- void Force(int rx, unsigned domain);
- void Collapse(DomainValue *dv, unsigned domain);
- bool Merge(DomainValue *A, DomainValue *B);
-
- void enterBasicBlock();
- void visitGenericInstr(MachineInstr*);
+ void setLiveReg(int rx, DomainValue *DV);
+ void kill(int rx);
+ void force(int rx, unsigned domain);
+ void collapse(DomainValue *dv, unsigned domain);
+ bool merge(DomainValue *A, DomainValue *B);
+
+ void enterBasicBlock(MachineBasicBlock*);
+ void leaveBasicBlock(MachineBasicBlock*);
+ void visitInstr(MachineInstr*);
+ void processDefs(MachineInstr*, bool Kill);
void visitSoftInstr(MachineInstr*, unsigned mask);
void visitHardInstr(MachineInstr*, unsigned domain);
};
@@ -160,83 +192,108 @@ char ExeDepsFix::ID = 0;
/// Translate TRI register number to an index into our smaller tables of
/// interesting registers. Return -1 for boring registers.
-int ExeDepsFix::RegIndex(unsigned Reg) {
+int ExeDepsFix::regIndex(unsigned Reg) {
assert(Reg < AliasMap.size() && "Invalid register");
return AliasMap[Reg];
}
-DomainValue *ExeDepsFix::Alloc(int domain) {
+DomainValue *ExeDepsFix::alloc(int domain) {
DomainValue *dv = Avail.empty() ?
new(Allocator.Allocate()) DomainValue :
Avail.pop_back_val();
- dv->Dist = Distance;
if (domain >= 0)
dv->addDomain(domain);
+ assert(dv->Refs == 0 && "Reference count wasn't cleared");
+ assert(!dv->Next && "Chained DomainValue shouldn't have been recycled");
return dv;
}
-void ExeDepsFix::Recycle(DomainValue *dv) {
- assert(dv && "Cannot recycle NULL");
- dv->clear();
- Avail.push_back(dv);
+/// release - Release a reference to DV. When the last reference is released,
+/// collapse if needed.
+void ExeDepsFix::release(DomainValue *DV) {
+ while (DV) {
+ assert(DV->Refs && "Bad DomainValue");
+ if (--DV->Refs)
+ return;
+
+ // There are no more DV references. Collapse any contained instructions.
+ if (DV->AvailableDomains && !DV->isCollapsed())
+ collapse(DV, DV->getFirstDomain());
+
+ DomainValue *Next = DV->Next;
+ DV->clear();
+ Avail.push_back(DV);
+ // Also release the next DomainValue in the chain.
+ DV = Next;
+ }
+}
+
+/// resolve - Follow the chain of dead DomainValues until a live DomainValue is
+/// reached. Update the referenced pointer when necessary.
+DomainValue *ExeDepsFix::resolve(DomainValue *&DVRef) {
+ DomainValue *DV = DVRef;
+ if (!DV || !DV->Next)
+ return DV;
+
+ // DV has a chain. Find the end.
+ do DV = DV->Next;
+ while (DV->Next);
+
+ // Update DVRef to point to DV.
+ retain(DV);
+ release(DVRef);
+ DVRef = DV;
+ return DV;
}
/// Set LiveRegs[rx] = dv, updating reference counts.
-void ExeDepsFix::SetLiveReg(int rx, DomainValue *dv) {
+void ExeDepsFix::setLiveReg(int rx, DomainValue *dv) {
assert(unsigned(rx) < NumRegs && "Invalid index");
- if (!LiveRegs) {
- LiveRegs = new DomainValue*[NumRegs];
- std::fill(LiveRegs, LiveRegs+NumRegs, (DomainValue*)0);
- }
+ assert(LiveRegs && "Must enter basic block first.");
- if (LiveRegs[rx] == dv)
+ if (LiveRegs[rx].Value == dv)
return;
- if (LiveRegs[rx]) {
- assert(LiveRegs[rx]->Refs && "Bad refcount");
- if (--LiveRegs[rx]->Refs == 0) Recycle(LiveRegs[rx]);
- }
- LiveRegs[rx] = dv;
- if (dv) ++dv->Refs;
+ if (LiveRegs[rx].Value)
+ release(LiveRegs[rx].Value);
+ LiveRegs[rx].Value = retain(dv);
}
// Kill register rx, recycle or collapse any DomainValue.
-void ExeDepsFix::Kill(int rx) {
+void ExeDepsFix::kill(int rx) {
assert(unsigned(rx) < NumRegs && "Invalid index");
- if (!LiveRegs || !LiveRegs[rx]) return;
-
- // Before killing the last reference to an open DomainValue, collapse it to
- // the first available domain.
- if (LiveRegs[rx]->Refs == 1 && !LiveRegs[rx]->isCollapsed())
- Collapse(LiveRegs[rx], LiveRegs[rx]->getFirstDomain());
- else
- SetLiveReg(rx, 0);
+ assert(LiveRegs && "Must enter basic block first.");
+ if (!LiveRegs[rx].Value)
+ return;
+
+ release(LiveRegs[rx].Value);
+ LiveRegs[rx].Value = 0;
}
/// Force register rx into domain.
-void ExeDepsFix::Force(int rx, unsigned domain) {
+void ExeDepsFix::force(int rx, unsigned domain) {
assert(unsigned(rx) < NumRegs && "Invalid index");
- DomainValue *dv;
- if (LiveRegs && (dv = LiveRegs[rx])) {
+ assert(LiveRegs && "Must enter basic block first.");
+ if (DomainValue *dv = LiveRegs[rx].Value) {
if (dv->isCollapsed())
dv->addDomain(domain);
else if (dv->hasDomain(domain))
- Collapse(dv, domain);
+ collapse(dv, domain);
else {
// This is an incompatible open DomainValue. Collapse it to whatever and
// force the new value into domain. This costs a domain crossing.
- Collapse(dv, dv->getFirstDomain());
- assert(LiveRegs[rx] && "Not live after collapse?");
- LiveRegs[rx]->addDomain(domain);
+ collapse(dv, dv->getFirstDomain());
+ assert(LiveRegs[rx].Value && "Not live after collapse?");
+ LiveRegs[rx].Value->addDomain(domain);
}
} else {
// Set up basic collapsed DomainValue.
- SetLiveReg(rx, Alloc(domain));
+ setLiveReg(rx, alloc(domain));
}
}
/// Collapse open DomainValue into given domain. If there are multiple
/// registers using dv, they each get a unique collapsed DomainValue.
-void ExeDepsFix::Collapse(DomainValue *dv, unsigned domain) {
+void ExeDepsFix::collapse(DomainValue *dv, unsigned domain) {
assert(dv->hasDomain(domain) && "Cannot collapse");
// Collapse all the instructions.
@@ -247,13 +304,13 @@ void ExeDepsFix::Collapse(DomainValue *dv, unsigned domain) {
// If there are multiple users, give them new, unique DomainValues.
if (LiveRegs && dv->Refs > 1)
for (unsigned rx = 0; rx != NumRegs; ++rx)
- if (LiveRegs[rx] == dv)
- SetLiveReg(rx, Alloc(domain));
+ if (LiveRegs[rx].Value == dv)
+ setLiveReg(rx, alloc(domain));
}
/// Merge - All instructions and registers in B are moved to A, and B is
/// released.
-bool ExeDepsFix::Merge(DomainValue *A, DomainValue *B) {
+bool ExeDepsFix::merge(DomainValue *A, DomainValue *B) {
assert(!A->isCollapsed() && "Cannot merge into collapsed");
assert(!B->isCollapsed() && "Cannot merge from collapsed");
if (A == B)
@@ -263,47 +320,188 @@ bool ExeDepsFix::Merge(DomainValue *A, DomainValue *B) {
if (!common)
return false;
A->AvailableDomains = common;
- A->Dist = std::max(A->Dist, B->Dist);
A->Instrs.append(B->Instrs.begin(), B->Instrs.end());
+
+ // Clear the old DomainValue so we won't try to swizzle instructions twice.
+ B->clear();
+ // All uses of B are referred to A.
+ B->Next = retain(A);
+
for (unsigned rx = 0; rx != NumRegs; ++rx)
- if (LiveRegs[rx] == B)
- SetLiveReg(rx, A);
+ if (LiveRegs[rx].Value == B)
+ setLiveReg(rx, A);
return true;
}
-void ExeDepsFix::enterBasicBlock() {
- // Try to coalesce live-out registers from predecessors.
- for (MachineBasicBlock::livein_iterator i = MBB->livein_begin(),
+// enterBasicBlock - Set up LiveRegs by merging predecessor live-out values.
+void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
+ // Detect back-edges from predecessors we haven't processed yet.
+ SeenUnknownBackEdge = false;
+
+ // Reset instruction counter in each basic block.
+ CurInstr = 0;
+
+ // Set up LiveRegs to represent registers entering MBB.
+ if (!LiveRegs)
+ LiveRegs = new LiveReg[NumRegs];
+
+ // Default values are 'nothing happened a long time ago'.
+ for (unsigned rx = 0; rx != NumRegs; ++rx) {
+ LiveRegs[rx].Value = 0;
+ LiveRegs[rx].Def = -(1 << 20);
+ }
+
+ // This is the entry block.
+ if (MBB->pred_empty()) {
+ for (MachineBasicBlock::livein_iterator i = MBB->livein_begin(),
e = MBB->livein_end(); i != e; ++i) {
- int rx = RegIndex(*i);
- if (rx < 0) continue;
- for (MachineBasicBlock::const_pred_iterator pi = MBB->pred_begin(),
- pe = MBB->pred_end(); pi != pe; ++pi) {
- LiveOutMap::const_iterator fi = LiveOuts.find(*pi);
- if (fi == LiveOuts.end()) continue;
- DomainValue *pdv = fi->second[rx];
- if (!pdv) continue;
- if (!LiveRegs || !LiveRegs[rx]) {
- SetLiveReg(rx, pdv);
+ int rx = regIndex(*i);
+ if (rx < 0)
+ continue;
+ // Treat function live-ins as if they were defined just before the first
+ // instruction. Usually, function arguments are set up immediately
+ // before the call.
+ LiveRegs[rx].Def = -1;
+ }
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": entry\n");
+ return;
+ }
+
+ // Try to coalesce live-out registers from predecessors.
+ for (MachineBasicBlock::const_pred_iterator pi = MBB->pred_begin(),
+ pe = MBB->pred_end(); pi != pe; ++pi) {
+ LiveOutMap::const_iterator fi = LiveOuts.find(*pi);
+ if (fi == LiveOuts.end()) {
+ SeenUnknownBackEdge = true;
+ continue;
+ }
+ assert(fi->second && "Can't have NULL entries");
+
+ for (unsigned rx = 0; rx != NumRegs; ++rx) {
+ // Use the most recent predecessor def for each register.
+ LiveRegs[rx].Def = std::max(LiveRegs[rx].Def, fi->second[rx].Def);
+
+ DomainValue *pdv = resolve(fi->second[rx].Value);
+ if (!pdv)
+ continue;
+ if (!LiveRegs[rx].Value) {
+ setLiveReg(rx, pdv);
continue;
}
// We have a live DomainValue from more than one predecessor.
- if (LiveRegs[rx]->isCollapsed()) {
+ if (LiveRegs[rx].Value->isCollapsed()) {
// We are already collapsed, but predecessor is not. Force him.
- unsigned domain = LiveRegs[rx]->getFirstDomain();
- if (!pdv->isCollapsed() && pdv->hasDomain(domain))
- Collapse(pdv, domain);
+ unsigned Domain = LiveRegs[rx].Value->getFirstDomain();
+ if (!pdv->isCollapsed() && pdv->hasDomain(Domain))
+ collapse(pdv, Domain);
continue;
}
// Currently open, merge in predecessor.
if (!pdv->isCollapsed())
- Merge(LiveRegs[rx], pdv);
+ merge(LiveRegs[rx].Value, pdv);
else
- Force(rx, pdv->getFirstDomain());
+ force(rx, pdv->getFirstDomain());
+ }
+ }
+ DEBUG(dbgs() << "BB#" << MBB->getNumber()
+ << (SeenUnknownBackEdge ? ": incomplete\n" : ": all preds known\n"));
+}
+
+void ExeDepsFix::leaveBasicBlock(MachineBasicBlock *MBB) {
+ assert(LiveRegs && "Must enter basic block first.");
+ // Save live registers at end of MBB - used by enterBasicBlock().
+ // Also use LiveOuts as a visited set to detect back-edges.
+ bool First = LiveOuts.insert(std::make_pair(MBB, LiveRegs)).second;
+
+ if (First) {
+ // LiveRegs was inserted in LiveOuts. Adjust all defs to be relative to
+ // the end of this block instead of the beginning.
+ for (unsigned i = 0, e = NumRegs; i != e; ++i)
+ LiveRegs[i].Def -= CurInstr;
+ } else {
+ // Insertion failed, this must be the second pass.
+ // Release all the DomainValues instead of keeping them.
+ for (unsigned i = 0, e = NumRegs; i != e; ++i)
+ release(LiveRegs[i].Value);
+ delete[] LiveRegs;
+ }
+ LiveRegs = 0;
+}
+
+void ExeDepsFix::visitInstr(MachineInstr *MI) {
+ if (MI->isDebugValue())
+ return;
+
+ // Update instructions with explicit execution domains.
+ std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(MI);
+ if (DomP.first) {
+ if (DomP.second)
+ visitSoftInstr(MI, DomP.second);
+ else
+ visitHardInstr(MI, DomP.first);
+ }
+
+ // Process defs to track register ages, and kill values clobbered by generic
+ // instructions.
+ processDefs(MI, !DomP.first);
+}
+
+// Update def-ages for registers defined by MI.
+// If Kill is set, also kill off DomainValues clobbered by the defs.
+void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) {
+ assert(!MI->isDebugValue() && "Won't process debug values");
+ const MCInstrDesc &MCID = MI->getDesc();
+ for (unsigned i = 0,
+ e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs();
+ i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (MO.isImplicit())
+ break;
+ if (MO.isUse())
+ continue;
+ int rx = regIndex(MO.getReg());
+ if (rx < 0)
+ continue;
+
+ // This instruction explicitly defines rx.
+ DEBUG(dbgs() << TRI->getName(RC->getRegister(rx)) << ":\t" << CurInstr
+ << '\t' << *MI);
+
+ // How many instructions since rx was last written?
+ unsigned Clearance = CurInstr - LiveRegs[rx].Def;
+ LiveRegs[rx].Def = CurInstr;
+
+ // Kill off domains redefined by generic instructions.
+ if (Kill)
+ kill(rx);
+
+ // Verify clearance before partial register updates.
+ unsigned Pref = TII->getPartialRegUpdateClearance(MI, i, TRI);
+ if (!Pref)
+ continue;
+ DEBUG(dbgs() << "Clearance: " << Clearance << ", want " << Pref);
+ if (Pref > Clearance) {
+ DEBUG(dbgs() << ": Break dependency.\n");
+ TII->breakPartialRegDependency(MI, i, TRI);
+ continue;
+ }
+
+ // The current clearance seems OK, but we may be ignoring a def from a
+ // back-edge.
+ if (!SeenUnknownBackEdge || Pref <= unsigned(CurInstr)) {
+ DEBUG(dbgs() << ": OK.\n");
+ continue;
}
+
+ // A def from an unprocessed back-edge may make us break this dependency.
+ DEBUG(dbgs() << ": Wait for back-edge to resolve.\n");
}
+
+ ++CurInstr;
}
// A hard instruction only works in one domain. All input registers will be
@@ -314,19 +512,19 @@ void ExeDepsFix::visitHardInstr(MachineInstr *mi, unsigned domain) {
e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
- int rx = RegIndex(mo.getReg());
+ int rx = regIndex(mo.getReg());
if (rx < 0) continue;
- Force(rx, domain);
+ force(rx, domain);
}
// Kill all defs and force them.
for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
- int rx = RegIndex(mo.getReg());
+ int rx = regIndex(mo.getReg());
if (rx < 0) continue;
- Kill(rx);
- Force(rx, domain);
+ kill(rx);
+ force(rx, domain);
}
}
@@ -343,9 +541,9 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
- int rx = RegIndex(mo.getReg());
+ int rx = regIndex(mo.getReg());
if (rx < 0) continue;
- if (DomainValue *dv = LiveRegs[rx]) {
+ if (DomainValue *dv = LiveRegs[rx].Value) {
// Bitmask of domains that dv and available have in common.
unsigned common = dv->getCommonDomains(available);
// Is it possible to use this collapsed register for free?
@@ -360,7 +558,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
else
// Open DomainValue is not compatible with instruction. It is useless
// now.
- Kill(rx);
+ kill(rx);
}
}
@@ -374,94 +572,89 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// Kill off any remaining uses that don't match available, and build a list of
// incoming DomainValues that we want to merge.
- SmallVector<DomainValue*,4> doms;
+ SmallVector<LiveReg, 4> Regs;
for (SmallVector<int, 4>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
int rx = *i;
- DomainValue *dv = LiveRegs[rx];
+ const LiveReg &LR = LiveRegs[rx];
// This useless DomainValue could have been missed above.
- if (!dv->getCommonDomains(available)) {
- Kill(*i);
+ if (!LR.Value->getCommonDomains(available)) {
+ kill(rx);
continue;
}
- // sorted, uniqued insert.
- bool inserted = false;
- for (SmallVector<DomainValue*,4>::iterator i = doms.begin(), e = doms.end();
- i != e && !inserted; ++i) {
- if (dv == *i)
- inserted = true;
- else if (dv->Dist < (*i)->Dist) {
- inserted = true;
- doms.insert(i, dv);
+ // Sorted insertion.
+ bool Inserted = false;
+ for (SmallVector<LiveReg, 4>::iterator i = Regs.begin(), e = Regs.end();
+ i != e && !Inserted; ++i) {
+ if (LR.Def < i->Def) {
+ Inserted = true;
+ Regs.insert(i, LR);
}
}
- if (!inserted)
- doms.push_back(dv);
+ if (!Inserted)
+ Regs.push_back(LR);
}
// doms are now sorted in order of appearance. Try to merge them all, giving
// priority to the latest ones.
DomainValue *dv = 0;
- while (!doms.empty()) {
+ while (!Regs.empty()) {
if (!dv) {
- dv = doms.pop_back_val();
+ dv = Regs.pop_back_val().Value;
+ // Force the first dv to match the current instruction.
+ dv->AvailableDomains = dv->getCommonDomains(available);
+ assert(dv->AvailableDomains && "Domain should have been filtered");
continue;
}
- DomainValue *latest = doms.pop_back_val();
- if (Merge(dv, latest)) continue;
+ DomainValue *Latest = Regs.pop_back_val().Value;
+ // Skip already merged values.
+ if (Latest == dv || Latest->Next)
+ continue;
+ if (merge(dv, Latest))
+ continue;
// If latest didn't merge, it is useless now. Kill all registers using it.
for (SmallVector<int,4>::iterator i=used.begin(), e=used.end(); i != e; ++i)
- if (LiveRegs[*i] == latest)
- Kill(*i);
+ if (LiveRegs[*i].Value == Latest)
+ kill(*i);
}
// dv is the DomainValue we are going to use for this instruction.
- if (!dv)
- dv = Alloc();
- dv->Dist = Distance;
- dv->AvailableDomains = available;
+ if (!dv) {
+ dv = alloc();
+ dv->AvailableDomains = available;
+ }
dv->Instrs.push_back(mi);
// Finally set all defs and non-collapsed uses to dv.
for (unsigned i = 0, e = mi->getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &mo = mi->getOperand(i);
if (!mo.isReg()) continue;
- int rx = RegIndex(mo.getReg());
+ int rx = regIndex(mo.getReg());
if (rx < 0) continue;
- if (!LiveRegs || !LiveRegs[rx] || (mo.isDef() && LiveRegs[rx]!=dv)) {
- Kill(rx);
- SetLiveReg(rx, dv);
+ if (!LiveRegs[rx].Value || (mo.isDef() && LiveRegs[rx].Value != dv)) {
+ kill(rx);
+ setLiveReg(rx, dv);
}
}
}
-void ExeDepsFix::visitGenericInstr(MachineInstr *mi) {
- // Process explicit defs, kill any relevant registers redefined.
- for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
- MachineOperand &mo = mi->getOperand(i);
- if (!mo.isReg()) continue;
- int rx = RegIndex(mo.getReg());
- if (rx < 0) continue;
- Kill(rx);
- }
-}
-
bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
TII = MF->getTarget().getInstrInfo();
TRI = MF->getTarget().getRegisterInfo();
- MBB = 0;
LiveRegs = 0;
- Distance = 0;
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
+ DEBUG(dbgs() << "********** FIX EXECUTION DEPENDENCIES: "
+ << RC->getName() << " **********\n");
+
// If no relevant registers are used in the function, we can skip it
// completely.
bool anyregs = false;
for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end();
I != E; ++I)
- if (MF->getRegInfo().isPhysRegUsed(*I)) {
+ if (MF->getRegInfo().isPhysRegOrOverlapUsed(*I)) {
anyregs = true;
break;
}
@@ -473,43 +666,48 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
// or -1.
AliasMap.resize(TRI->getNumRegs(), -1);
for (unsigned i = 0, e = RC->getNumRegs(); i != e; ++i)
- for (const unsigned *AI = TRI->getOverlaps(RC->getRegister(i)); *AI; ++AI)
+ for (const uint16_t *AI = TRI->getOverlaps(RC->getRegister(i)); *AI; ++AI)
AliasMap[*AI] = i;
}
MachineBasicBlock *Entry = MF->begin();
- SmallPtrSet<MachineBasicBlock*, 16> Visited;
- for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*, 16> >
- DFI = df_ext_begin(Entry, Visited), DFE = df_ext_end(Entry, Visited);
- DFI != DFE; ++DFI) {
- MBB = *DFI;
- enterBasicBlock();
+ ReversePostOrderTraversal<MachineBasicBlock*> RPOT(Entry);
+ SmallVector<MachineBasicBlock*, 16> Loops;
+ for (ReversePostOrderTraversal<MachineBasicBlock*>::rpo_iterator
+ MBBI = RPOT.begin(), MBBE = RPOT.end(); MBBI != MBBE; ++MBBI) {
+ MachineBasicBlock *MBB = *MBBI;
+ enterBasicBlock(MBB);
+ if (SeenUnknownBackEdge)
+ Loops.push_back(MBB);
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
- ++I) {
- MachineInstr *mi = I;
- if (mi->isDebugValue()) continue;
- ++Distance;
- std::pair<uint16_t, uint16_t> domp = TII->getExecutionDomain(mi);
- if (domp.first)
- if (domp.second)
- visitSoftInstr(mi, domp.second);
- else
- visitHardInstr(mi, domp.first);
- else if (LiveRegs)
- visitGenericInstr(mi);
- }
+ ++I)
+ visitInstr(I);
+ leaveBasicBlock(MBB);
+ }
- // Save live registers at end of MBB - used by enterBasicBlock().
- if (LiveRegs)
- LiveOuts.insert(std::make_pair(MBB, LiveRegs));
- LiveRegs = 0;
+ // Visit all the loop blocks again in order to merge DomainValues from
+ // back-edges.
+ for (unsigned i = 0, e = Loops.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = Loops[i];
+ enterBasicBlock(MBB);
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I)
+ if (!I->isDebugValue())
+ processDefs(I, false);
+ leaveBasicBlock(MBB);
}
- // Clear the LiveOuts vectors. Should we also collapse any remaining
- // DomainValues?
- for (LiveOutMap::const_iterator i = LiveOuts.begin(), e = LiveOuts.end();
- i != e; ++i)
- delete[] i->second;
+ // Clear the LiveOuts vectors and collapse any remaining DomainValues.
+ for (ReversePostOrderTraversal<MachineBasicBlock*>::rpo_iterator
+ MBBI = RPOT.begin(), MBBE = RPOT.end(); MBBI != MBBE; ++MBBI) {
+ LiveOutMap::const_iterator FI = LiveOuts.find(*MBBI);
+ if (FI == LiveOuts.end() || !FI->second)
+ continue;
+ for (unsigned i = 0, e = NumRegs; i != e; ++i)
+ if (FI->second[i].Value)
+ release(FI->second[i].Value);
+ delete[] FI->second;
+ }
LiveOuts.clear();
Avail.clear();
Allocator.DestroyAll();
diff --git a/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
index a67140e..2c4a935 100644
--- a/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandISelPseudos.cpp
@@ -32,10 +32,6 @@ namespace {
private:
virtual bool runOnMachineFunction(MachineFunction &MF);
- const char *getPassName() const {
- return "Expand ISel Pseudo-instructions";
- }
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -43,12 +39,9 @@ namespace {
} // end anonymous namespace
char ExpandISelPseudos::ID = 0;
+char &llvm::ExpandISelPseudosID = ExpandISelPseudos::ID;
INITIALIZE_PASS(ExpandISelPseudos, "expand-isel-pseudos",
- "Expand CodeGen Pseudo-instructions", false, false)
-
-FunctionPass *llvm::createExpandISelPseudosPass() {
- return new ExpandISelPseudos();
-}
+ "Expand ISel Pseudo-instructions", false, false)
bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
@@ -62,8 +55,7 @@ bool ExpandISelPseudos::runOnMachineFunction(MachineFunction &MF) {
MachineInstr *MI = MBBI++;
// If MI is a pseudo, expand it.
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.usesCustomInsertionHook()) {
+ if (MI->usesCustomInsertionHook()) {
Changed = true;
MachineBasicBlock *NewMBB =
TLI->EmitInstrWithCustomInserter(MI, MBB);
diff --git a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
index e2a14a8..b14afc2 100644
--- a/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/contrib/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -36,10 +36,6 @@ public:
static char ID; // Pass identification, replacement for typeid
ExpandPostRA() : MachineFunctionPass(ID) {}
- const char *getPassName() const {
- return "Post-RA pseudo instruction expansion pass";
- }
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreservedID(MachineLoopInfoID);
@@ -61,10 +57,10 @@ private:
} // end anonymous namespace
char ExpandPostRA::ID = 0;
+char &llvm::ExpandPostRAPseudosID = ExpandPostRA::ID;
-FunctionPass *llvm::createExpandPostRAPseudosPass() {
- return new ExpandPostRA();
-}
+INITIALIZE_PASS(ExpandPostRA, "postrapseudos",
+ "Post-RA pseudo instruction expansion pass", false, false)
/// TransferDeadFlag - MI is a pseudo-instruction with DstReg dead,
/// and the lowered replacement instructions immediately precede it.
@@ -207,7 +203,7 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
++mi;
// Only expand pseudos.
- if (!MI->getDesc().isPseudo())
+ if (!MI->isPseudo())
continue;
// Give targets a chance to expand even standard pseudos.
diff --git a/contrib/llvm/lib/CodeGen/GCMetadata.cpp b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
index d757cf4..1caf8c2 100644
--- a/contrib/llvm/lib/CodeGen/GCMetadata.cpp
+++ b/contrib/llvm/lib/CodeGen/GCMetadata.cpp
@@ -143,12 +143,12 @@ void Printer::getAnalysisUsage(AnalysisUsage &AU) const {
static const char *DescKind(GC::PointKind Kind) {
switch (Kind) {
- default: llvm_unreachable("Unknown GC point kind");
case GC::Loop: return "loop";
case GC::Return: return "return";
case GC::PreCall: return "pre-call";
case GC::PostCall: return "post-call";
}
+ llvm_unreachable("Invalid point kind");
}
bool Printer::runOnFunction(Function &F) {
@@ -156,12 +156,12 @@ bool Printer::runOnFunction(Function &F) {
GCFunctionInfo *FD = &getAnalysis<GCModuleInfo>().getFunctionInfo(F);
- OS << "GC roots for " << FD->getFunction().getNameStr() << ":\n";
+ OS << "GC roots for " << FD->getFunction().getName() << ":\n";
for (GCFunctionInfo::roots_iterator RI = FD->roots_begin(),
RE = FD->roots_end(); RI != RE; ++RI)
OS << "\t" << RI->Num << "\t" << RI->StackOffset << "[sp]\n";
- OS << "GC safe points for " << FD->getFunction().getNameStr() << ":\n";
+ OS << "GC safe points for " << FD->getFunction().getName() << ":\n";
for (GCFunctionInfo::iterator PI = FD->begin(),
PE = FD->end(); PI != PE; ++PI) {
diff --git a/contrib/llvm/lib/CodeGen/GCStrategy.cpp b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
index 766c6ee..506b5cf 100644
--- a/contrib/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
@@ -10,8 +10,8 @@
// This file implements target- and collector-independent garbage collection
// infrastructure.
//
-// MachineCodeAnalysis identifies the GC safe points in the machine code. Roots
-// are identified in SelectionDAGISel.
+// GCMachineCodeAnalysis identifies the GC safe points in the machine code.
+// Roots are identified in SelectionDAGISel.
//
//===----------------------------------------------------------------------===//
@@ -35,9 +35,9 @@
using namespace llvm;
namespace {
-
+
/// LowerIntrinsics - This pass rewrites calls to the llvm.gcread or
- /// llvm.gcwrite intrinsics, replacing them with simple loads and stores as
+ /// llvm.gcwrite intrinsics, replacing them with simple loads and stores as
/// directed by the GCStrategy. It also performs automatic root initialization
/// and custom intrinsic lowering.
class LowerIntrinsics : public FunctionPass {
@@ -47,47 +47,46 @@ namespace {
bool PerformDefaultLowering(Function &F, GCStrategy &Coll);
static bool InsertRootInitializers(Function &F,
AllocaInst **Roots, unsigned Count);
-
+
public:
static char ID;
-
+
LowerIntrinsics();
const char *getPassName() const;
void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
};
-
-
- /// MachineCodeAnalysis - This is a target-independent pass over the machine
+
+
+ /// GCMachineCodeAnalysis - This is a target-independent pass over the machine
/// function representation to identify safe points for the garbage collector
/// in the machine code. It inserts labels at safe points and populates a
/// GCMetadata record for each function.
- class MachineCodeAnalysis : public MachineFunctionPass {
+ class GCMachineCodeAnalysis : public MachineFunctionPass {
const TargetMachine *TM;
GCFunctionInfo *FI;
MachineModuleInfo *MMI;
const TargetInstrInfo *TII;
-
+
void FindSafePoints(MachineFunction &MF);
void VisitCallPoint(MachineBasicBlock::iterator MI);
- MCSymbol *InsertLabel(MachineBasicBlock &MBB,
+ MCSymbol *InsertLabel(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
DebugLoc DL) const;
-
+
void FindStackOffsets(MachineFunction &MF);
-
+
public:
static char ID;
-
- MachineCodeAnalysis();
- const char *getPassName() const;
+
+ GCMachineCodeAnalysis();
void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
bool runOnMachineFunction(MachineFunction &MF);
};
-
+
}
// -----------------------------------------------------------------------------
@@ -97,6 +96,7 @@ GCStrategy::GCStrategy() :
CustomReadBarriers(false),
CustomWriteBarriers(false),
CustomRoots(false),
+ CustomSafePoints(false),
InitRoots(true),
UsesMetadata(false)
{}
@@ -104,18 +104,24 @@ GCStrategy::GCStrategy() :
GCStrategy::~GCStrategy() {
for (iterator I = begin(), E = end(); I != E; ++I)
delete *I;
-
+
Functions.clear();
}
-
+
bool GCStrategy::initializeCustomLowering(Module &M) { return false; }
-
+
bool GCStrategy::performCustomLowering(Function &F) {
dbgs() << "gc " << getName() << " must override performCustomLowering.\n";
+ llvm_unreachable("must override performCustomLowering");
+}
+
+
+bool GCStrategy::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &F) {
+ dbgs() << "gc " << getName() << " must override findCustomSafePoints.\n";
llvm_unreachable(0);
- return 0;
}
+
GCFunctionInfo *GCStrategy::insertFunctionInfo(const Function &F) {
GCFunctionInfo *FI = new GCFunctionInfo(F, *this);
Functions.push_back(FI);
@@ -132,7 +138,7 @@ INITIALIZE_PASS_END(LowerIntrinsics, "gc-lowering", "GC Lowering", false, false)
FunctionPass *llvm::createGCLoweringPass() {
return new LowerIntrinsics();
}
-
+
char LowerIntrinsics::ID = 0;
LowerIntrinsics::LowerIntrinsics()
@@ -143,7 +149,7 @@ LowerIntrinsics::LowerIntrinsics()
const char *LowerIntrinsics::getPassName() const {
return "Lower Garbage Collection Instructions";
}
-
+
void LowerIntrinsics::getAnalysisUsage(AnalysisUsage &AU) const {
FunctionPass::getAnalysisUsage(AU);
AU.addRequired<GCModuleInfo>();
@@ -161,22 +167,22 @@ bool LowerIntrinsics::doInitialization(Module &M) {
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isDeclaration() && I->hasGC())
MI->getFunctionInfo(*I); // Instantiate the GC strategy.
-
+
bool MadeChange = false;
for (GCModuleInfo::iterator I = MI->begin(), E = MI->end(); I != E; ++I)
if (NeedsCustomLoweringPass(**I))
if ((*I)->initializeCustomLowering(M))
MadeChange = true;
-
+
return MadeChange;
}
-bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
+bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
unsigned Count) {
// Scroll past alloca instructions.
BasicBlock::iterator IP = F.getEntryBlock().begin();
while (isa<AllocaInst>(IP)) ++IP;
-
+
// Search for initializers in the initial BB.
SmallPtrSet<AllocaInst*,16> InitedRoots;
for (; !CouldBecomeSafePoint(IP); ++IP)
@@ -184,10 +190,10 @@ bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
if (AllocaInst *AI =
dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts()))
InitedRoots.insert(AI);
-
+
// Add root initializers.
bool MadeChange = false;
-
+
for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I)
if (!InitedRoots.count(*I)) {
StoreInst* SI = new StoreInst(ConstantPointerNull::get(cast<PointerType>(
@@ -196,7 +202,7 @@ bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
SI->insertAfter(*I);
MadeChange = true;
}
-
+
return MadeChange;
}
@@ -220,26 +226,26 @@ bool LowerIntrinsics::NeedsCustomLoweringPass(const GCStrategy &C) {
bool LowerIntrinsics::CouldBecomeSafePoint(Instruction *I) {
// The natural definition of instructions which could introduce safe points
// are:
- //
+ //
// - call, invoke (AfterCall, BeforeCall)
// - phis (Loops)
// - invoke, ret, unwind (Exit)
- //
+ //
// However, instructions as seemingly inoccuous as arithmetic can become
// libcalls upon lowering (e.g., div i64 on a 32-bit platform), so instead
// it is necessary to take a conservative approach.
-
+
if (isa<AllocaInst>(I) || isa<GetElementPtrInst>(I) ||
isa<StoreInst>(I) || isa<LoadInst>(I))
return false;
-
+
// llvm.gcroot is safe because it doesn't do anything at runtime.
if (CallInst *CI = dyn_cast<CallInst>(I))
if (Function *F = CI->getCalledFunction())
if (unsigned IID = F->getIntrinsicID())
if (IID == Intrinsic::gcroot)
return false;
-
+
return true;
}
@@ -249,15 +255,15 @@ bool LowerIntrinsics::runOnFunction(Function &F) {
// Quick exit for functions that do not use GC.
if (!F.hasGC())
return false;
-
+
GCFunctionInfo &FI = getAnalysis<GCModuleInfo>().getFunctionInfo(F);
GCStrategy &S = FI.getStrategy();
-
+
bool MadeChange = false;
-
+
if (NeedsDefaultLoweringPass(S))
MadeChange |= PerformDefaultLowering(F, S);
-
+
bool UseCustomLoweringPass = NeedsCustomLoweringPass(S);
if (UseCustomLoweringPass)
MadeChange |= S.performCustomLowering(F);
@@ -275,9 +281,9 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
bool LowerWr = !S.customWriteBarrier();
bool LowerRd = !S.customReadBarrier();
bool InitRoots = S.initializeRoots();
-
+
SmallVector<AllocaInst*, 32> Roots;
-
+
bool MadeChange = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
@@ -313,104 +319,104 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
default:
continue;
}
-
+
MadeChange = true;
}
}
}
-
+
if (Roots.size())
MadeChange |= InsertRootInitializers(F, Roots.begin(), Roots.size());
-
+
return MadeChange;
}
// -----------------------------------------------------------------------------
-FunctionPass *llvm::createGCMachineCodeAnalysisPass() {
- return new MachineCodeAnalysis();
-}
+char GCMachineCodeAnalysis::ID = 0;
+char &llvm::GCMachineCodeAnalysisID = GCMachineCodeAnalysis::ID;
-char MachineCodeAnalysis::ID = 0;
+INITIALIZE_PASS(GCMachineCodeAnalysis, "gc-analysis",
+ "Analyze Machine Code For Garbage Collection", false, false)
-MachineCodeAnalysis::MachineCodeAnalysis()
+GCMachineCodeAnalysis::GCMachineCodeAnalysis()
: MachineFunctionPass(ID) {}
-const char *MachineCodeAnalysis::getPassName() const {
- return "Analyze Machine Code For Garbage Collection";
-}
-
-void MachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+void GCMachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
AU.addRequired<GCModuleInfo>();
}
-MCSymbol *MachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- DebugLoc DL) const {
+MCSymbol *GCMachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ DebugLoc DL) const {
MCSymbol *Label = MBB.getParent()->getContext().CreateTempSymbol();
BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
return Label;
}
-void MachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
+void GCMachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
// Find the return address (next instruction), too, so as to bracket the call
// instruction.
- MachineBasicBlock::iterator RAI = CI;
- ++RAI;
-
+ MachineBasicBlock::iterator RAI = CI;
+ ++RAI;
+
if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
MCSymbol* Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
}
-
+
if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
MCSymbol* Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
}
}
-void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
+void GCMachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
for (MachineFunction::iterator BBI = MF.begin(),
BBE = MF.end(); BBI != BBE; ++BBI)
for (MachineBasicBlock::iterator MI = BBI->begin(),
ME = BBI->end(); MI != ME; ++MI)
- if (MI->getDesc().isCall())
+ if (MI->isCall())
VisitCallPoint(MI);
}
-void MachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
+void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
const TargetFrameLowering *TFI = TM->getFrameLowering();
assert(TFI && "TargetRegisterInfo not available!");
-
+
for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(),
RE = FI->roots_end(); RI != RE; ++RI)
RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
}
-bool MachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
+bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
// Quick exit for functions that do not use GC.
if (!MF.getFunction()->hasGC())
return false;
-
+
FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction());
if (!FI->getStrategy().needsSafePoints())
return false;
-
+
TM = &MF.getTarget();
MMI = &getAnalysis<MachineModuleInfo>();
TII = TM->getInstrInfo();
-
+
// Find the size of the stack frame.
FI->setFrameSize(MF.getFrameInfo()->getStackSize());
-
+
// Find all safe points.
- FindSafePoints(MF);
-
+ if (FI->getStrategy().customSafePoints()) {
+ FI->getStrategy().findCustomSafePoints(*FI, MF);
+ } else {
+ FindSafePoints(MF);
+ }
+
// Find the stack offsets for all roots.
FindStackOffsets(MF);
-
+
return false;
}
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index ce7ed29..75ae5b9 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -62,6 +62,7 @@ STATISTIC(NumTriangleFRev, "Number of triangle (F/R) if-conversions performed");
STATISTIC(NumDiamonds, "Number of diamond if-conversions performed");
STATISTIC(NumIfConvBBs, "Number of if-converted blocks");
STATISTIC(NumDupBBs, "Number of duplicated blocks");
+STATISTIC(NumUnpred, "Number of true blocks of diamonds unpredicated");
namespace {
class IfConverter : public MachineFunctionPass {
@@ -169,7 +170,6 @@ namespace {
}
virtual bool runOnMachineFunction(MachineFunction &MF);
- virtual const char *getPassName() const { return "If Converter"; }
private:
bool ReverseBranchCondition(BBInfo &BBI);
@@ -195,7 +195,8 @@ namespace {
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
- SmallSet<unsigned, 4> &Redefs);
+ SmallSet<unsigned, 4> &Redefs,
+ SmallSet<unsigned, 4> *LaterRedefs = 0);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> &Redefs,
@@ -251,12 +252,12 @@ namespace {
char IfConverter::ID = 0;
}
+char &llvm::IfConverterID = IfConverter::ID;
+
INITIALIZE_PASS_BEGIN(IfConverter, "if-converter", "If Converter", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_END(IfConverter, "if-converter", "If Converter", false, false)
-FunctionPass *llvm::createIfConverterPass() { return new IfConverter(); }
-
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getTarget().getTargetLowering();
TII = MF.getTarget().getInstrInfo();
@@ -313,8 +314,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
bool RetVal = false;
switch (Kind) {
- default: assert(false && "Unexpected!");
- break;
+ default: llvm_unreachable("Unexpected!");
case ICSimple:
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
@@ -573,12 +573,12 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
// blocks, move the end iterators up past any branch instructions.
while (TIE != TIB) {
--TIE;
- if (!TIE->getDesc().isBranch())
+ if (!TIE->isBranch())
break;
}
while (FIE != FIB) {
--FIE;
- if (!FIE->getDesc().isBranch())
+ if (!FIE->isBranch())
break;
}
@@ -651,12 +651,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
if (I->isDebugValue())
continue;
- const MCInstrDesc &MCID = I->getDesc();
- if (MCID.isNotDuplicable())
+ if (I->isNotDuplicable())
BBI.CannotBeCopied = true;
bool isPredicated = TII->isPredicated(I);
- bool isCondBr = BBI.IsBrAnalyzable && MCID.isConditionalBranch();
+ bool isCondBr = BBI.IsBrAnalyzable && I->isConditionalBranch();
if (!isCondBr) {
if (!isPredicated) {
@@ -963,7 +962,7 @@ static void InitPredRedefs(MachineBasicBlock *BB, SmallSet<unsigned,4> &Redefs,
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
Redefs.insert(Reg);
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
Redefs.insert(*Subreg);
}
@@ -984,7 +983,7 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
Defs.push_back(Reg);
else if (MO.isKill()) {
Redefs.erase(Reg);
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
Redefs.erase(*SR);
}
}
@@ -997,7 +996,7 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
true/*IsImp*/,false/*IsKill*/));
} else {
Redefs.insert(Reg);
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
Redefs.insert(*SR);
}
}
@@ -1035,7 +1034,7 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
if (Kind == ICSimpleFalse)
if (TII->ReverseBranchCondition(Cond))
- assert(false && "Unable to reverse branch condition!");
+ llvm_unreachable("Unable to reverse branch condition!");
// Initialize liveins to the first BB. These are potentiall redefined by
// predicated instructions.
@@ -1108,7 +1107,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
if (TII->ReverseBranchCondition(Cond))
- assert(false && "Unable to reverse branch condition!");
+ llvm_unreachable("Unable to reverse branch condition!");
if (Kind == ICTriangleRev || Kind == ICTriangleFRev) {
if (ReverseBranchCondition(*CvtBBI)) {
@@ -1155,7 +1154,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
SmallVector<MachineOperand, 4> RevCond(CvtBBI->BrCond.begin(),
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
- assert(false && "Unable to reverse branch condition!");
+ llvm_unreachable("Unable to reverse branch condition!");
TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
}
@@ -1227,7 +1226,7 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
BBInfo *BBI2 = &FalseBBI;
SmallVector<MachineOperand, 4> RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
- assert(false && "Unable to reverse branch condition!");
+ llvm_unreachable("Unable to reverse branch condition!");
SmallVector<MachineOperand, 4> *Cond1 = &BBI.BrCond;
SmallVector<MachineOperand, 4> *Cond2 = &RevCond;
@@ -1281,7 +1280,7 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
- // Predicate the 'true' block after removing its branch.
+ // Remove branch from 'true' block and remove duplicated instructions.
BBI1->NonPredSize -= TII->RemoveBranch(*BBI1->BB);
DI1 = BBI1->BB->end();
for (unsigned i = 0; i != NumDups2; ) {
@@ -1294,9 +1293,8 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
++i;
}
BBI1->BB->erase(DI1, BBI1->BB->end());
- PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, Redefs);
- // Predicate the 'false' block.
+ // Remove 'false' block branch and find the last instruction to predicate.
BBI2->NonPredSize -= TII->RemoveBranch(*BBI2->BB);
DI2 = BBI2->BB->end();
while (NumDups2 != 0) {
@@ -1308,6 +1306,55 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
if (!DI2->isDebugValue())
--NumDups2;
}
+
+ // Remember which registers would later be defined by the false block.
+ // This allows us not to predicate instructions in the true block that would
+ // later be re-defined. That is, rather than
+ // subeq r0, r1, #1
+ // addne r0, r1, #1
+ // generate:
+ // sub r0, r1, #1
+ // addne r0, r1, #1
+ SmallSet<unsigned, 4> RedefsByFalse;
+ SmallSet<unsigned, 4> ExtUses;
+ if (TII->isProfitableToUnpredicate(*BBI1->BB, *BBI2->BB)) {
+ for (MachineBasicBlock::iterator FI = BBI2->BB->begin(); FI != DI2; ++FI) {
+ if (FI->isDebugValue())
+ continue;
+ SmallVector<unsigned, 4> Defs;
+ for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = FI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (MO.isDef()) {
+ Defs.push_back(Reg);
+ } else if (!RedefsByFalse.count(Reg)) {
+ // These are defined before ctrl flow reach the 'false' instructions.
+ // They cannot be modified by the 'true' instructions.
+ ExtUses.insert(Reg);
+ for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ ExtUses.insert(*SR);
+ }
+ }
+
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Reg = Defs[i];
+ if (!ExtUses.count(Reg)) {
+ RedefsByFalse.insert(Reg);
+ for (const uint16_t *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ RedefsByFalse.insert(*SR);
+ }
+ }
+ }
+ }
+
+ // Predicate the 'true' block.
+ PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, Redefs, &RedefsByFalse);
+
+ // Predicate the 'false' block.
PredicateBlock(*BBI2, DI2, *Cond2, Redefs);
// Merge the true block into the entry of the diamond.
@@ -1319,7 +1366,7 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// fold the tail block in as well. Otherwise, unless it falls through to the
// tail, add a unconditional branch to it.
if (TailBB) {
- BBInfo TailBBI = BBAnalysis[TailBB->getNumber()];
+ BBInfo &TailBBI = BBAnalysis[TailBB->getNumber()];
bool CanMergeTail = !TailBBI.HasFallThrough;
// There may still be a fall-through edge from BBI1 or BBI2 to TailBB;
// check if there are any other predecessors besides those.
@@ -1356,15 +1403,49 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
return true;
}
+static bool MaySpeculate(const MachineInstr *MI,
+ SmallSet<unsigned, 4> &LaterRedefs,
+ const TargetInstrInfo *TII) {
+ bool SawStore = true;
+ if (!MI->isSafeToMove(TII, 0, SawStore))
+ return false;
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (MO.isDef() && !LaterRedefs.count(Reg))
+ return false;
+ }
+
+ return true;
+}
+
/// PredicateBlock - Predicate instructions from the start of the block to the
/// specified end with the specified condition.
void IfConverter::PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
- SmallSet<unsigned, 4> &Redefs) {
+ SmallSet<unsigned, 4> &Redefs,
+ SmallSet<unsigned, 4> *LaterRedefs) {
+ bool AnyUnpred = false;
+ bool MaySpec = LaterRedefs != 0;
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
if (I->isDebugValue() || TII->isPredicated(I))
continue;
+ // It may be possible not to predicate an instruction if it's the 'true'
+ // side of a diamond and the 'false' side may re-define the instruction's
+ // defs.
+ if (MaySpec && MaySpeculate(I, *LaterRedefs, TII)) {
+ AnyUnpred = true;
+ continue;
+ }
+ // If any instruction is predicated, then every instruction after it must
+ // be predicated.
+ MaySpec = false;
if (!TII->PredicateInstruction(I, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
@@ -1383,6 +1464,8 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
BBI.NonPredSize = 0;
++NumIfConvBBs;
+ if (AnyUnpred)
+ ++NumUnpred;
}
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
@@ -1395,9 +1478,8 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
- const MCInstrDesc &MCID = I->getDesc();
// Do not copy the end of the block branches.
- if (IgnoreBr && MCID.isBranch())
+ if (IgnoreBr && I->isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
index 726af46..d5ea666 100644
--- a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -14,14 +14,15 @@
#define DEBUG_TYPE "regalloc"
#include "Spiller.h"
-#include "LiveRangeEdit.h"
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
@@ -173,8 +174,7 @@ private:
void reMaterializeAll();
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
- bool foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
+ bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
MachineInstr *LoadMI = 0);
void insertReload(LiveInterval &NewLI, SlotIndex,
MachineBasicBlock::iterator MI);
@@ -578,7 +578,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
if (isSibling(SrcReg)) {
LiveInterval &SrcLI = LIS.getInterval(SrcReg);
- LiveRange *SrcLR = SrcLI.getLiveRangeContaining(VNI->def.getUseIndex());
+ LiveRange *SrcLR = SrcLI.getLiveRangeContaining(VNI->def.getRegSlot(true));
assert(SrcLR && "Copy from non-existing value");
// Check if this COPY kills its source.
SVI->second.KillsSource = (SrcLR->end == VNI->def);
@@ -644,16 +644,18 @@ void InlineSpiller::analyzeSiblingValues() {
if (VNI->isUnused())
continue;
MachineInstr *DefMI = 0;
+ if (!VNI->isPHIDef()) {
+ DefMI = LIS.getInstructionFromIndex(VNI->def);
+ assert(DefMI && "No defining instruction");
+ }
// Check possible sibling copies.
- if (VNI->isPHIDef() || VNI->getCopy()) {
+ if (VNI->isPHIDef() || DefMI->isCopy()) {
VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);
assert(OrigVNI && "Def outside original live range");
if (OrigVNI->def != VNI->def)
DefMI = traceSiblingValue(Reg, VNI, OrigVNI);
}
- if (!DefMI && !VNI->isPHIDef())
- DefMI = LIS.getInstructionFromIndex(VNI->def);
- if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) {
+ if (DefMI && Edit->checkRematerializable(VNI, DefMI, AA)) {
DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'
<< VNI->def << " may remat from " << *DefMI);
}
@@ -665,8 +667,8 @@ void InlineSpiller::analyzeSiblingValues() {
/// a spill at a better location.
bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
- VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex());
- assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy");
+ VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
+ assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
SibValueMap::iterator I = SibValues.find(VNI);
if (I == SibValues.end())
return false;
@@ -726,7 +728,6 @@ bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
MRI.getRegClass(SVI.SpillReg), &TRI);
--MII; // Point to store instruction.
LIS.InsertMachineInstrInMaps(MII);
- VRM.addSpillSlotUse(StackSlot, MII);
DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);
++NumSpills;
@@ -760,7 +761,7 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
// Find all spills and copies of VNI.
for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
MachineInstr *MI = UI.skipInstruction();) {
- if (!MI->isCopy() && !MI->getDesc().mayStore())
+ if (!MI->isCopy() && !MI->mayStore())
continue;
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (LI->getVNInfoAt(Idx) != VNI)
@@ -770,9 +771,9 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
if (isSibling(DstReg)) {
LiveInterval &DstLI = LIS.getInterval(DstReg);
- VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex());
+ VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
assert(DstVNI && "Missing defined value");
- assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot");
+ assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
WorkList.push_back(std::make_pair(&DstLI, DstVNI));
}
continue;
@@ -811,7 +812,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
- VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
+ VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(*PI));
if (PVNI)
WorkList.push_back(std::make_pair(LI, PVNI));
}
@@ -824,7 +825,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
continue;
LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
- VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex());
+ VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
assert(SnipVNI && "Snippet undefined before copy");
WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
} while (!WorkList.empty());
@@ -833,7 +834,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
MachineBasicBlock::iterator MI) {
- SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex();
+ SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
@@ -855,7 +856,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
if (SibI != SibValues.end())
RM.OrigMI = SibI->second.DefMI;
- if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) {
+ if (!Edit->canRematerializeAt(RM, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
@@ -863,42 +864,37 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
- bool Reads, Writes;
- SmallVector<unsigned, 8> Ops;
- tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops);
- if (Writes) {
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(Ops[i]);
- if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
- markValueUsed(&VirtReg, ParentVNI);
- DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
- return false;
- }
- }
+ SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
+ MIBundleOperands::RegInfo RI =
+ MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
+ if (RI.Tied) {
+ markValueUsed(&VirtReg, ParentVNI);
+ DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
+ return false;
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
- if (RM.OrigMI->getDesc().canFoldAsLoad() &&
- foldMemoryOperand(MI, Ops, RM.OrigMI)) {
+ if (RM.OrigMI->canFoldAsLoad() &&
+ foldMemoryOperand(Ops, RM.OrigMI)) {
Edit->markRematerialized(RM.ParentVNI);
++NumFoldedLoads;
return true;
}
// Alocate a new register for the remat.
- LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM);
+ LiveInterval &NewLI = Edit->createFrom(Original);
NewLI.markNotSpillable();
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
- LIS, TII, TRI);
+ TRI);
DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
<< *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(Ops[i]);
+ MachineOperand &MO = MI->getOperand(Ops[i].second);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
MO.setReg(NewLI.reg);
MO.setIsKill();
@@ -906,8 +902,8 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
- VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator());
- NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
+ VNInfo *DefVNI = NewLI.getNextValue(DefIdx, LIS.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(DefIdx, UseIdx.getRegSlot(), DefVNI));
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
++NumRemats;
return true;
@@ -917,7 +913,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
// analyzeSiblingValues has already tested all relevant defining instructions.
- if (!Edit->anyRematerializable(LIS, TII, AA))
+ if (!Edit->anyRematerializable(AA))
return;
UsedValues.clear();
@@ -929,7 +925,7 @@ void InlineSpiller::reMaterializeAll() {
LiveInterval &LI = LIS.getInterval(Reg);
for (MachineRegisterInfo::use_nodbg_iterator
RI = MRI.use_nodbg_begin(Reg);
- MachineInstr *MI = RI.skipInstruction();)
+ MachineInstr *MI = RI.skipBundle();)
anyRemat |= reMaterializeFor(LI, MI);
}
if (!anyRemat)
@@ -958,7 +954,7 @@ void InlineSpiller::reMaterializeAll() {
if (DeadDefs.empty())
return;
DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
- Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
+ Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
// Get rid of deleted and empty intervals.
for (unsigned i = RegsToSpill.size(); i != 0; --i) {
@@ -970,7 +966,7 @@ void InlineSpiller::reMaterializeAll() {
LiveInterval &LI = LIS.getInterval(Reg);
if (!LI.empty())
continue;
- Edit->eraseVirtReg(Reg, LIS);
+ Edit->eraseVirtReg(Reg);
RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
}
DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
@@ -1008,23 +1004,35 @@ bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
return true;
}
-/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
-/// @param MI Instruction using or defining the current register.
-/// @param Ops Operand indices from readsWritesVirtualRegister().
+/// foldMemoryOperand - Try folding stack slot references in Ops into their
+/// instructions.
+///
+/// @param Ops Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
-/// @return True on success, and MI will be erased.
-bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr *LoadMI) {
+/// @return True on success.
+bool InlineSpiller::
+foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
+ MachineInstr *LoadMI) {
+ if (Ops.empty())
+ return false;
+ // Don't attempt folding in bundles.
+ MachineInstr *MI = Ops.front().first;
+ if (Ops.back().first != MI || MI->isBundled())
+ return false;
+
bool WasCopy = MI->isCopy();
+ unsigned ImpReg = 0;
+
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- unsigned Idx = Ops[i];
+ unsigned Idx = Ops[i].second;
MachineOperand &MO = MI->getOperand(Idx);
- if (MO.isImplicit())
+ if (MO.isImplicit()) {
+ ImpReg = MO.getReg();
continue;
+ }
// FIXME: Teach targets to deal with subregs.
if (MO.getSubReg())
return false;
@@ -1042,13 +1050,24 @@ bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
if (!FoldMI)
return false;
LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
- if (!LoadMI)
- VRM.addSpillSlotUse(StackSlot, FoldMI);
MI->eraseFromParent();
- DEBUG(dbgs() << "\tfolded: " << *FoldMI);
+
+ // TII.foldMemoryOperand may have left some implicit operands on the
+ // instruction. Strip them.
+ if (ImpReg)
+ for (unsigned i = FoldMI->getNumOperands(); i; --i) {
+ MachineOperand &MO = FoldMI->getOperand(i - 1);
+ if (!MO.isReg() || !MO.isImplicit())
+ break;
+ if (MO.getReg() == ImpReg)
+ FoldMI->RemoveOperand(i - 1);
+ }
+
+ DEBUG(dbgs() << "\tfolded: " << LIS.getInstructionIndex(FoldMI) << '\t'
+ << *FoldMI);
if (!WasCopy)
++NumFolded;
- else if (Ops.front() == 0)
+ else if (Ops.front().second == 0)
++NumSpills;
else
++NumReloads;
@@ -1063,11 +1082,9 @@ void InlineSpiller::insertReload(LiveInterval &NewLI,
TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot,
MRI.getRegClass(NewLI.reg), &TRI);
--MI; // Point to load instruction.
- SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
- VRM.addSpillSlotUse(StackSlot, MI);
+ SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getRegSlot();
DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
- VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
- LIS.getVNInfoAllocator());
+ VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, LIS.getVNInfoAllocator());
NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
++NumReloads;
}
@@ -1079,10 +1096,9 @@ void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot,
MRI.getRegClass(NewLI.reg), &TRI);
--MI; // Point to store instruction.
- SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
- VRM.addSpillSlotUse(StackSlot, MI);
+ SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getRegSlot();
DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
- VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator());
+ VNInfo *StoreVNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator());
NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
++NumSpills;
}
@@ -1093,8 +1109,8 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
LiveInterval &OldLI = LIS.getInterval(Reg);
// Iterate over instructions using Reg.
- for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
- MachineInstr *MI = RI.skipInstruction();) {
+ for (MachineRegisterInfo::reg_iterator RegI = MRI.reg_begin(Reg);
+ MachineInstr *MI = RegI.skipBundle();) {
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue()) {
@@ -1123,14 +1139,14 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
continue;
// Analyze instruction.
- bool Reads, Writes;
- SmallVector<unsigned, 8> Ops;
- tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops);
+ SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
+ MIBundleOperands::RegInfo RI =
+ MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
// Find the slot index where this instruction reads and writes OldLI.
// This is usually the def slot, except for tied early clobbers.
- SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex();
- if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex()))
+ SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
+ if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
if (SlotIndex::isSameInstr(Idx, VNI->def))
Idx = VNI->def;
@@ -1143,7 +1159,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
SnippetCopies.insert(MI);
continue;
}
- if (Writes) {
+ if (RI.Writes) {
// Hoist the spill of a sib-reg copy.
if (hoistSpill(OldLI, MI)) {
// This COPY is now dead, the value is already in the stack slot.
@@ -1160,24 +1176,24 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
}
// Attempt to fold memory ops.
- if (foldMemoryOperand(MI, Ops))
+ if (foldMemoryOperand(Ops))
continue;
// Allocate interval around instruction.
// FIXME: Infer regclass from instruction alone.
- LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM);
+ LiveInterval &NewLI = Edit->createFrom(Reg);
NewLI.markNotSpillable();
- if (Reads)
+ if (RI.Reads)
insertReload(NewLI, Idx, MI);
// Rewrite instruction operands.
bool hasLiveDef = false;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(Ops[i]);
+ MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
MO.setReg(NewLI.reg);
if (MO.isUse()) {
- if (!MI->isRegTiedToDefOperand(Ops[i]))
+ if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second))
MO.setIsKill();
} else {
if (!MO.isDead())
@@ -1187,15 +1203,15 @@ void InlineSpiller::spillAroundUses(unsigned Reg) {
DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);
// FIXME: Use a second vreg if instruction has no tied ops.
- if (Writes) {
- if (hasLiveDef)
- insertSpill(NewLI, OldLI, Idx, MI);
- else {
- // This instruction defines a dead value. We don't need to spill it,
- // but do create a live range for the dead value.
- VNInfo *VNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator());
- NewLI.addRange(LiveRange(Idx, Idx.getNextSlot(), VNI));
- }
+ if (RI.Writes) {
+ if (hasLiveDef)
+ insertSpill(NewLI, OldLI, Idx, MI);
+ else {
+ // This instruction defines a dead value. We don't need to spill it,
+ // but do create a live range for the dead value.
+ VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(Idx, Idx.getDeadSlot(), VNI));
+ }
}
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
@@ -1208,7 +1224,7 @@ void InlineSpiller::spillAll() {
if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
StackSlot = VRM.assignVirt2StackSlot(Original);
StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
- StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator());
+ StackInt->getNextValue(SlotIndex(), LSS.getVNInfoAllocator());
} else
StackInt = &LSS.getInterval(StackSlot);
@@ -1228,7 +1244,7 @@ void InlineSpiller::spillAll() {
// Hoisted spills may cause dead code.
if (!DeadDefs.empty()) {
DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
- Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
+ Edit->eliminateDeadDefs(DeadDefs, RegsToSpill);
}
// Finally delete the SnippetCopies.
@@ -1237,7 +1253,6 @@ void InlineSpiller::spillAll() {
MachineInstr *MI = RI.skipInstruction();) {
assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
// FIXME: Do this with a LiveRangeEdit callback.
- VRM.RemoveMachineInstrFromMaps(MI);
LIS.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
}
@@ -1245,7 +1260,7 @@ void InlineSpiller::spillAll() {
// Delete all spilled registers.
for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
- Edit->eraseVirtReg(RegsToSpill[i], LIS);
+ Edit->eraseVirtReg(RegsToSpill[i]);
}
void InlineSpiller::spill(LiveRangeEdit &edit) {
@@ -1274,5 +1289,5 @@ void InlineSpiller::spill(LiveRangeEdit &edit) {
if (!RegsToSpill.empty())
spillAll();
- Edit->calculateRegClassAndHint(MF, LIS, Loops);
+ Edit->calculateRegClassAndHint(MF, Loops);
}
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
index 29b47bd..8368b58 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.cpp
@@ -1,4 +1,4 @@
-//===-- InterferenceCache.h - Caching per-block interference ---*- C++ -*--===//
+//===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,6 +15,7 @@
#include "InterferenceCache.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
using namespace llvm;
@@ -24,13 +25,14 @@ InterferenceCache::BlockInterference InterferenceCache::Cursor::NoInterference;
void InterferenceCache::init(MachineFunction *mf,
LiveIntervalUnion *liuarray,
SlotIndexes *indexes,
+ LiveIntervals *lis,
const TargetRegisterInfo *tri) {
MF = mf;
LIUArray = liuarray;
TRI = tri;
PhysRegEntries.assign(TRI->getNumRegs(), 0);
for (unsigned i = 0; i != CacheEntries; ++i)
- Entries[i].clear(mf, indexes);
+ Entries[i].clear(mf, indexes, lis);
}
InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
@@ -78,7 +80,7 @@ void InterferenceCache::Entry::reset(unsigned physReg,
PhysReg = physReg;
Blocks.resize(MF->getNumBlockIDs());
Aliases.clear();
- for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
+ for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
LiveIntervalUnion *LIU = LIUArray + *AS;
Aliases.push_back(std::make_pair(LIU, LIU->getTag()));
}
@@ -94,7 +96,7 @@ void InterferenceCache::Entry::reset(unsigned physReg,
bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
const TargetRegisterInfo *TRI) {
unsigned i = 0, e = Aliases.size();
- for (const unsigned *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
+ for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
LiveIntervalUnion *LIU = LIUArray + *AS;
if (i == e || Aliases[i].first != LIU)
return false;
@@ -121,6 +123,8 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
BlockInterference *BI = &Blocks[MBBNum];
+ ArrayRef<SlotIndex> RegMaskSlots;
+ ArrayRef<const uint32_t*> RegMaskBits;
for (;;) {
BI->Tag = Tag;
BI->First = BI->Last = SlotIndex();
@@ -137,6 +141,18 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
BI->First = StartI;
}
+ // Also check for register mask interference.
+ RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
+ RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
+ SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
+ for (unsigned i = 0, e = RegMaskSlots.size();
+ i != e && RegMaskSlots[i] < Limit; ++i)
+ if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
+ // Register mask i clobbers PhysReg before the LIU interference.
+ BI->First = RegMaskSlots[i];
+ break;
+ }
+
PrevPos = Stop;
if (BI->First.isValid())
break;
@@ -166,4 +182,15 @@ void InterferenceCache::Entry::update(unsigned MBBNum) {
if (Backup)
++I;
}
+
+ // Also check for register mask interference.
+ SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
+ for (unsigned i = RegMaskSlots.size();
+ i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
+ if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
+ // Register mask i-1 clobbers PhysReg after the LIU interference.
+ // Model the regmask clobber as a dead def.
+ BI->Last = RegMaskSlots[i-1].getDeadSlot();
+ break;
+ }
}
diff --git a/contrib/llvm/lib/CodeGen/InterferenceCache.h b/contrib/llvm/lib/CodeGen/InterferenceCache.h
index 4df0a9e..485a325 100644
--- a/contrib/llvm/lib/CodeGen/InterferenceCache.h
+++ b/contrib/llvm/lib/CodeGen/InterferenceCache.h
@@ -18,10 +18,11 @@
namespace llvm {
+class LiveIntervals;
+
class InterferenceCache {
const TargetRegisterInfo *TRI;
LiveIntervalUnion *LIUArray;
- SlotIndexes *Indexes;
MachineFunction *MF;
/// BlockInterference - information about the interference in a single basic
@@ -52,6 +53,9 @@ class InterferenceCache {
/// Indexes - Mapping block numbers to SlotIndex ranges.
SlotIndexes *Indexes;
+ /// LIS - Used for accessing register mask interference maps.
+ LiveIntervals *LIS;
+
/// PrevPos - The previous position the iterators were moved to.
SlotIndex PrevPos;
@@ -71,13 +75,14 @@ class InterferenceCache {
void update(unsigned MBBNum);
public:
- Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(0) {}
+ Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(0), LIS(0) {}
- void clear(MachineFunction *mf, SlotIndexes *indexes) {
+ void clear(MachineFunction *mf, SlotIndexes *indexes, LiveIntervals *lis) {
assert(!hasRefs() && "Cannot clear cache entry with references");
PhysReg = 0;
MF = mf;
Indexes = indexes;
+ LIS = lis;
}
unsigned getPhysReg() const { return PhysReg; }
@@ -124,10 +129,10 @@ class InterferenceCache {
Entry *get(unsigned PhysReg);
public:
- InterferenceCache() : TRI(0), LIUArray(0), Indexes(0), MF(0), RoundRobin(0) {}
+ InterferenceCache() : TRI(0), LIUArray(0), MF(0), RoundRobin(0) {}
/// init - Prepare cache for a new function.
- void init(MachineFunction*, LiveIntervalUnion*, SlotIndexes*,
+ void init(MachineFunction*, LiveIntervalUnion*, SlotIndexes*, LiveIntervals*,
const TargetRegisterInfo *);
/// getMaxCursors - Return the maximum number of concurrent cursors that can
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 0f92c2d..a9ca42f 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -448,11 +448,6 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::dbg_declare:
break; // Simply strip out debugging intrinsics
- case Intrinsic::eh_exception:
- case Intrinsic::eh_selector:
- CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
- break;
-
case Intrinsic::eh_typeid_for:
// Return something different to eh_selector.
CI->replaceAllUsesWith(ConstantInt::get(CI->getType(), 1));
diff --git a/contrib/llvm/lib/CodeGen/JITCodeEmitter.cpp b/contrib/llvm/lib/CodeGen/JITCodeEmitter.cpp
new file mode 100644
index 0000000..96a5389
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/JITCodeEmitter.cpp
@@ -0,0 +1,14 @@
+//===-- llvm/CodeGen/JITCodeEmitter.cpp - Code emission --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/JITCodeEmitter.h"
+
+using namespace llvm;
+
+void JITCodeEmitter::anchor() { }
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index 187147a..a1f479a 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -11,82 +11,42 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Scalar.h"
#include "llvm/PassManager.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/GCStrategy.h"
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-namespace llvm {
- bool EnableFastISel;
-}
+// Enable or disable FastISel. Both options are needed, because
+// FastISel is enabled by default with -fast, and we wish to be
+// able to enable or disable fast-isel independently from -O0.
+static cl::opt<cl::boolOrDefault>
+EnableFastISelOption("fast-isel", cl::Hidden,
+ cl::desc("Enable the \"fast\" instruction selector"));
-static cl::opt<bool> DisablePostRA("disable-post-ra", cl::Hidden,
- cl::desc("Disable Post Regalloc"));
-static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden,
- cl::desc("Disable branch folding"));
-static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
- cl::desc("Disable tail duplication"));
-static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
- cl::desc("Disable pre-register allocation tail duplication"));
-static cl::opt<bool> DisableCodePlace("disable-code-place", cl::Hidden,
- cl::desc("Disable code placement"));
-static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
- cl::desc("Disable Stack Slot Coloring"));
-static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
- cl::desc("Disable Machine Dead Code Elimination"));
-static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
- cl::desc("Disable Machine LICM"));
-static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
- cl::desc("Disable Machine Common Subexpression Elimination"));
-static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
- cl::Hidden,
- cl::desc("Disable Machine LICM"));
-static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
- cl::desc("Disable Machine Sinking"));
-static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
- cl::desc("Disable Loop Strength Reduction Pass"));
-static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
- cl::desc("Disable Codegen Prepare"));
-static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
- cl::desc("Print LLVM IR produced by the loop-reduce pass"));
-static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
- cl::desc("Print LLVM IR input to isel pass"));
-static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
- cl::desc("Dump garbage collector data"));
static cl::opt<bool> ShowMCEncoding("show-mc-encoding", cl::Hidden,
cl::desc("Show encoding in .s output"));
static cl::opt<bool> ShowMCInst("show-mc-inst", cl::Hidden,
cl::desc("Show instruction structure in .s output"));
-static cl::opt<bool> EnableMCLogging("enable-mc-api-logging", cl::Hidden,
- cl::desc("Enable MC API logging"));
-static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
- cl::desc("Verify generated machine code"),
- cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=NULL));
static cl::opt<cl::boolOrDefault>
AsmVerbose("asm-verbose", cl::desc("Add comments to directives."),
@@ -94,25 +54,20 @@ AsmVerbose("asm-verbose", cl::desc("Add comments to directives."),
static bool getVerboseAsm() {
switch (AsmVerbose) {
- default:
case cl::BOU_UNSET: return TargetMachine::getAsmVerbosityDefault();
case cl::BOU_TRUE: return true;
case cl::BOU_FALSE: return false;
}
+ llvm_unreachable("Invalid verbose asm state");
}
-// Enable or disable FastISel. Both options are needed, because
-// FastISel is enabled by default with -fast, and we wish to be
-// able to enable or disable fast-isel independently from -O0.
-static cl::opt<cl::boolOrDefault>
-EnableFastISelOption("fast-isel", cl::Hidden,
- cl::desc("Enable the \"fast\" instruction selector"));
-
LLVMTargetMachine::LLVMTargetMachine(const Target &T, StringRef Triple,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : TargetMachine(T, Triple, CPU, FS) {
- CodeGenInfo = T.createMCCodeGenInfo(Triple, RM, CM);
+ TargetOptions Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : TargetMachine(T, Triple, CPU, FS, Options) {
+ CodeGenInfo = T.createMCCodeGenInfo(Triple, RM, CM, OL);
AsmInfo = T.createMCAsmInfo(Triple);
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
@@ -123,16 +78,88 @@ LLVMTargetMachine::LLVMTargetMachine(const Target &T, StringRef Triple,
"and that InitializeAllTargetMCs() is being invoked!");
}
+/// Turn exception handling constructs into something the code generators can
+/// handle.
+static void addPassesToHandleExceptions(TargetMachine *TM,
+ PassManagerBase &PM) {
+ switch (TM->getMCAsmInfo()->getExceptionHandlingType()) {
+ case ExceptionHandling::SjLj:
+ // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
+ // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
+ // catch info can get misplaced when a selector ends up more than one block
+ // removed from the parent invoke(s). This could happen when a landing
+ // pad is shared by multiple invokes and is also a target of a normal
+ // edge from elsewhere.
+ PM.add(createSjLjEHPreparePass(TM->getTargetLowering()));
+ // FALLTHROUGH
+ case ExceptionHandling::DwarfCFI:
+ case ExceptionHandling::ARM:
+ case ExceptionHandling::Win64:
+ PM.add(createDwarfEHPass(TM));
+ break;
+ case ExceptionHandling::None:
+ PM.add(createLowerInvokePass(TM->getTargetLowering()));
+
+ // The lower invoke pass may create unreachable code. Remove it.
+ PM.add(createUnreachableBlockEliminationPass());
+ break;
+ }
+}
+
+/// addPassesToX helper drives creation and initialization of TargetPassConfig.
+static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
+ PassManagerBase &PM,
+ bool DisableVerify) {
+ // Targets may override createPassConfig to provide a target-specific sublass.
+ TargetPassConfig *PassConfig = TM->createPassConfig(PM);
+
+ // Set PassConfig options provided by TargetMachine.
+ PassConfig->setDisableVerify(DisableVerify);
+
+ PM.add(PassConfig);
+
+ PassConfig->addIRPasses();
+
+ addPassesToHandleExceptions(TM, PM);
+
+ PassConfig->addISelPrepare();
+
+ // Install a MachineModuleInfo class, which is an immutable pass that holds
+ // all the per-module stuff we're generating, including MCContext.
+ MachineModuleInfo *MMI =
+ new MachineModuleInfo(*TM->getMCAsmInfo(), *TM->getRegisterInfo(),
+ &TM->getTargetLowering()->getObjFileLowering());
+ PM.add(MMI);
+ MCContext *Context = &MMI->getContext(); // Return the MCContext by-ref.
+
+ // Set up a MachineFunction for the rest of CodeGen to work on.
+ PM.add(new MachineFunctionAnalysis(*TM));
+
+ // Enable FastISel with -fast, but allow that to be overridden.
+ if (EnableFastISelOption == cl::BOU_TRUE ||
+ (TM->getOptLevel() == CodeGenOpt::None &&
+ EnableFastISelOption != cl::BOU_FALSE))
+ TM->setFastISel(true);
+
+ // Ask the target for an isel.
+ if (PassConfig->addInstSelector())
+ return NULL;
+
+ PassConfig->addMachinePasses();
+
+ PassConfig->setInitialized();
+
+ return Context;
+}
+
bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
bool DisableVerify) {
// Add common CodeGen passes.
- MCContext *Context = 0;
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Context))
+ MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify);
+ if (!Context)
return true;
- assert(Context != 0 && "Failed to get MCContext");
if (hasMCSaveTempLabels())
Context->setAllowTemporaryLabels(false);
@@ -142,10 +169,11 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
OwningPtr<MCStreamer> AsmStreamer;
switch (FileType) {
- default: return true;
case CGFT_AssemblyFile: {
MCInstPrinter *InstPrinter =
- getTarget().createMCInstPrinter(MAI.getAssemblerDialect(), MAI, STI);
+ getTarget().createMCInstPrinter(MAI.getAssemblerDialect(), MAI,
+ *getInstrInfo(),
+ Context->getRegisterInfo(), STI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = 0;
@@ -160,6 +188,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
getVerboseAsm(),
hasMCUseLoc(),
hasMCUseCFI(),
+ hasMCUseDwarfDirectory(),
InstPrinter,
MCE, MAB,
ShowMCInst);
@@ -189,9 +218,6 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
break;
}
- if (EnableMCLogging)
- AsmStreamer.reset(createLoggingStreamer(AsmStreamer.take(), errs()));
-
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
if (Printer == 0)
@@ -214,14 +240,13 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
///
bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
JITCodeEmitter &JCE,
- CodeGenOpt::Level OptLevel,
bool DisableVerify) {
// Add common CodeGen passes.
- MCContext *Ctx = 0;
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Ctx))
+ MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify);
+ if (!Context)
return true;
- addCodeEmitter(PM, OptLevel, JCE);
+ addCodeEmitter(PM, JCE);
PM.add(createGCInfoDeleter());
return false; // success!
@@ -235,10 +260,10 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
MCContext *&Ctx,
raw_ostream &Out,
- CodeGenOpt::Level OptLevel,
bool DisableVerify) {
// Add common CodeGen passes.
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Ctx))
+ Ctx = addPassesToGenerateCode(this, PM, DisableVerify);
+ if (!Ctx)
return true;
if (hasMCSaveTempLabels())
@@ -247,7 +272,8 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(),STI, *Ctx);
+ MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(),STI,
+ *Ctx);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple());
if (MCE == 0 || MAB == 0)
return true;
@@ -271,227 +297,3 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
return false; // success!
}
-
-static void printNoVerify(PassManagerBase &PM, const char *Banner) {
- if (PrintMachineCode)
- PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
-}
-
-static void printAndVerify(PassManagerBase &PM,
- const char *Banner) {
- if (PrintMachineCode)
- PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
-
- if (VerifyMachineCode)
- PM.add(createMachineVerifierPass(Banner));
-}
-
-/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for both
-/// emitting to assembly files or machine code output.
-///
-bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
- bool DisableVerify,
- MCContext *&OutContext) {
- // Standard LLVM-Level Passes.
-
- // Basic AliasAnalysis support.
- // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
- // BasicAliasAnalysis wins if they disagree. This is intended to help
- // support "obvious" type-punning idioms.
- PM.add(createTypeBasedAliasAnalysisPass());
- PM.add(createBasicAliasAnalysisPass());
-
- // Before running any passes, run the verifier to determine if the input
- // coming from the front-end and/or optimizer is valid.
- if (!DisableVerify)
- PM.add(createVerifierPass());
-
- // Run loop strength reduction before anything else.
- if (OptLevel != CodeGenOpt::None && !DisableLSR) {
- PM.add(createLoopStrengthReducePass(getTargetLowering()));
- if (PrintLSR)
- PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
- }
-
- PM.add(createGCLoweringPass());
-
- // Make sure that no unreachable blocks are instruction selected.
- PM.add(createUnreachableBlockEliminationPass());
-
- // Turn exception handling constructs into something the code generators can
- // handle.
- switch (getMCAsmInfo()->getExceptionHandlingType()) {
- case ExceptionHandling::SjLj:
- // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
- // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
- // catch info can get misplaced when a selector ends up more than one block
- // removed from the parent invoke(s). This could happen when a landing
- // pad is shared by multiple invokes and is also a target of a normal
- // edge from elsewhere.
- PM.add(createSjLjEHPass(getTargetLowering()));
- // FALLTHROUGH
- case ExceptionHandling::DwarfCFI:
- case ExceptionHandling::ARM:
- case ExceptionHandling::Win64:
- PM.add(createDwarfEHPass(this));
- break;
- case ExceptionHandling::None:
- PM.add(createLowerInvokePass(getTargetLowering()));
-
- // The lower invoke pass may create unreachable code. Remove it.
- PM.add(createUnreachableBlockEliminationPass());
- break;
- }
-
- if (OptLevel != CodeGenOpt::None && !DisableCGP)
- PM.add(createCodeGenPreparePass(getTargetLowering()));
-
- PM.add(createStackProtectorPass(getTargetLowering()));
-
- addPreISel(PM, OptLevel);
-
- if (PrintISelInput)
- PM.add(createPrintFunctionPass("\n\n"
- "*** Final LLVM Code input to ISel ***\n",
- &dbgs()));
-
- // All passes which modify the LLVM IR are now complete; run the verifier
- // to ensure that the IR is valid.
- if (!DisableVerify)
- PM.add(createVerifierPass());
-
- // Standard Lower-Level Passes.
-
- // Install a MachineModuleInfo class, which is an immutable pass that holds
- // all the per-module stuff we're generating, including MCContext.
- MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo(),
- *getRegisterInfo(),
- &getTargetLowering()->getObjFileLowering());
- PM.add(MMI);
- OutContext = &MMI->getContext(); // Return the MCContext specifically by-ref.
-
- // Set up a MachineFunction for the rest of CodeGen to work on.
- PM.add(new MachineFunctionAnalysis(*this, OptLevel));
-
- // Enable FastISel with -fast, but allow that to be overridden.
- if (EnableFastISelOption == cl::BOU_TRUE ||
- (OptLevel == CodeGenOpt::None && EnableFastISelOption != cl::BOU_FALSE))
- EnableFastISel = true;
-
- // Ask the target for an isel.
- if (addInstSelector(PM, OptLevel))
- return true;
-
- // Print the instruction selected machine code...
- printAndVerify(PM, "After Instruction Selection");
-
- // Expand pseudo-instructions emitted by ISel.
- PM.add(createExpandISelPseudosPass());
-
- // Pre-ra tail duplication.
- if (OptLevel != CodeGenOpt::None && !DisableEarlyTailDup) {
- PM.add(createTailDuplicatePass(true));
- printAndVerify(PM, "After Pre-RegAlloc TailDuplicate");
- }
-
- // Optimize PHIs before DCE: removing dead PHI cycles may make more
- // instructions dead.
- if (OptLevel != CodeGenOpt::None)
- PM.add(createOptimizePHIsPass());
-
- // If the target requests it, assign local variables to stack slots relative
- // to one another and simplify frame index references where possible.
- PM.add(createLocalStackSlotAllocationPass());
-
- if (OptLevel != CodeGenOpt::None) {
- // With optimization, dead code should already be eliminated. However
- // there is one known exception: lowered code for arguments that are only
- // used by tail calls, where the tail calls reuse the incoming stack
- // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- if (!DisableMachineDCE)
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass");
-
- if (!DisableMachineLICM)
- PM.add(createMachineLICMPass());
- if (!DisableMachineCSE)
- PM.add(createMachineCSEPass());
- if (!DisableMachineSink)
- PM.add(createMachineSinkingPass());
- printAndVerify(PM, "After Machine LICM, CSE and Sinking passes");
-
- PM.add(createPeepholeOptimizerPass());
- printAndVerify(PM, "After codegen peephole optimization pass");
- }
-
- // Run pre-ra passes.
- if (addPreRegAlloc(PM, OptLevel))
- printAndVerify(PM, "After PreRegAlloc passes");
-
- // Perform register allocation.
- PM.add(createRegisterAllocator(OptLevel));
- printAndVerify(PM, "After Register Allocation");
-
- // Perform stack slot coloring and post-ra machine LICM.
- if (OptLevel != CodeGenOpt::None) {
- // FIXME: Re-enable coloring with register when it's capable of adding
- // kill markers.
- if (!DisableSSC)
- PM.add(createStackSlotColoringPass(false));
-
- // Run post-ra machine LICM to hoist reloads / remats.
- if (!DisablePostRAMachineLICM)
- PM.add(createMachineLICMPass(false));
-
- printAndVerify(PM, "After StackSlotColoring and postra Machine LICM");
- }
-
- // Run post-ra passes.
- if (addPostRegAlloc(PM, OptLevel))
- printAndVerify(PM, "After PostRegAlloc passes");
-
- PM.add(createExpandPostRAPseudosPass());
- printAndVerify(PM, "After ExpandPostRAPseudos");
-
- // Insert prolog/epilog code. Eliminate abstract frame index references...
- PM.add(createPrologEpilogCodeInserter());
- printAndVerify(PM, "After PrologEpilogCodeInserter");
-
- // Run pre-sched2 passes.
- if (addPreSched2(PM, OptLevel))
- printAndVerify(PM, "After PreSched2 passes");
-
- // Second pass scheduler.
- if (OptLevel != CodeGenOpt::None && !DisablePostRA) {
- PM.add(createPostRAScheduler(OptLevel));
- printAndVerify(PM, "After PostRAScheduler");
- }
-
- // Branch folding must be run after regalloc and prolog/epilog insertion.
- if (OptLevel != CodeGenOpt::None && !DisableBranchFold) {
- PM.add(createBranchFoldingPass(getEnableTailMergeDefault()));
- printNoVerify(PM, "After BranchFolding");
- }
-
- // Tail duplication.
- if (OptLevel != CodeGenOpt::None && !DisableTailDuplicate) {
- PM.add(createTailDuplicatePass(false));
- printNoVerify(PM, "After TailDuplicate");
- }
-
- PM.add(createGCMachineCodeAnalysisPass());
-
- if (PrintGCInfo)
- PM.add(createGCInfoPrinter(dbgs()));
-
- if (OptLevel != CodeGenOpt::None && !DisableCodePlace) {
- PM.add(createCodePlacementOptPass());
- printNoVerify(PM, "After CodePlacementOpt");
- }
-
- if (addPreEmitPass(PM, OptLevel))
- printNoVerify(PM, "After PreEmit passes");
-
- return false;
-}
diff --git a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
index 0eb009d..deab05a 100644
--- a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -46,7 +46,7 @@ bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
// Finally, just to provide a stable ordering, use the node number as a
// deciding factor.
- return LHSNum < RHSNum;
+ return RHSNum < LHSNum;
}
@@ -84,11 +84,11 @@ void LatencyPriorityQueue::push(SUnit *SU) {
}
-// ScheduledNode - As nodes are scheduled, we look to see if there are any
+// scheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
// single predecessor has a higher priority, since scheduling it will make
// the node available.
-void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
+void LatencyPriorityQueue::scheduledNode(SUnit *SU) {
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
AdjustPriorityOfUnscheduledPreds(I->getSUnit());
diff --git a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
index a12e1a3..f1abcbb 100644
--- a/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
+++ b/contrib/llvm/lib/CodeGen/LexicalScopes.cpp
@@ -311,6 +311,8 @@ bool LexicalScopes::dominates(DebugLoc DL, MachineBasicBlock *MBB) {
return Result;
}
+void LexicalScope::anchor() { }
+
/// dump - Print data structures.
void LexicalScope::dump() const {
#ifndef NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
index 3dfe4c0..2187833 100644
--- a/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveDebugVariables.cpp
@@ -226,7 +226,7 @@ public:
LiveInterval *LI, const VNInfo *VNI,
SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
- UserValueScopes &UVS);
+ UserValueScopes &UVS);
/// addDefsFromCopies - The value in LI/LocNo may be copies to other
/// registers. Determine if any of the copies are available at the kill
@@ -468,7 +468,7 @@ bool LDVImpl::collectDebugValues(MachineFunction &mf) {
// DBG_VALUE has no slot index, use the previous instruction instead.
SlotIndex Idx = MBBI == MBB->begin() ?
LIS->getMBBStartIdx(MBB) :
- LIS->getInstructionIndex(llvm::prior(MBBI)).getDefIndex();
+ LIS->getInstructionIndex(llvm::prior(MBBI)).getRegSlot();
// Handle consecutive DBG_VALUE instructions with the same slot index.
do {
if (handleDebugValue(MBBI, Idx)) {
@@ -486,7 +486,7 @@ void UserValue::extendDef(SlotIndex Idx, unsigned LocNo,
LiveInterval *LI, const VNInfo *VNI,
SmallVectorImpl<SlotIndex> *Kills,
LiveIntervals &LIS, MachineDominatorTree &MDT,
- UserValueScopes &UVS) {
+ UserValueScopes &UVS) {
SmallVector<SlotIndex, 16> Todo;
Todo.push_back(Idx);
do {
@@ -575,15 +575,15 @@ UserValue::addDefsFromCopies(LiveInterval *LI, unsigned LocNo,
// Is LocNo extended to reach this copy? If not, another def may be blocking
// it, or we are looking at a wrong value of LI.
SlotIndex Idx = LIS.getInstructionIndex(MI);
- LocMap::iterator I = locInts.find(Idx.getUseIndex());
+ LocMap::iterator I = locInts.find(Idx.getRegSlot(true));
if (!I.valid() || I.value() != LocNo)
continue;
if (!LIS.hasInterval(DstReg))
continue;
LiveInterval *DstLI = &LIS.getInterval(DstReg);
- const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getDefIndex());
- assert(DstVNI && DstVNI->def == Idx.getDefIndex() && "Bad copy value");
+ const VNInfo *DstVNI = DstLI->getVNInfoAt(Idx.getRegSlot());
+ assert(DstVNI && DstVNI->def == Idx.getRegSlot() && "Bad copy value");
CopyValues.push_back(std::make_pair(DstLI, DstVNI));
}
@@ -620,7 +620,7 @@ void
UserValue::computeIntervals(MachineRegisterInfo &MRI,
LiveIntervals &LIS,
MachineDominatorTree &MDT,
- UserValueScopes &UVS) {
+ UserValueScopes &UVS) {
SmallVector<std::pair<SlotIndex, unsigned>, 16> Defs;
// Collect all defs to be extended (Skipping undefs).
@@ -841,7 +841,7 @@ bool
UserValue::splitRegister(unsigned OldReg, ArrayRef<LiveInterval*> NewRegs) {
bool DidChange = false;
// Split locations referring to OldReg. Iterate backwards so splitLocation can
- // safely erase unuused locations.
+ // safely erase unused locations.
for (unsigned i = locations.size(); i ; --i) {
unsigned LocNo = i-1;
const MachineOperand *Loc = &locations[LocNo];
@@ -889,8 +889,7 @@ UserValue::rewriteLocations(VirtRegMap &VRM, const TargetRegisterInfo &TRI) {
// index is no longer available. That means the user value is in a
// non-existent sub-register, and %noreg is exactly what we want.
Loc.substPhysReg(VRM.getPhys(VirtReg), TRI);
- } else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT &&
- VRM.isSpillSlotUsed(VRM.getStackSlot(VirtReg))) {
+ } else if (VRM.getStackSlot(VirtReg) != VirtRegMap::NO_STACK_SLOT) {
// FIXME: Translate SubIdx to a stackslot offset.
Loc = MachineOperand::CreateFI(VRM.getStackSlot(VirtReg));
} else {
@@ -921,8 +920,8 @@ findInsertLocation(MachineBasicBlock *MBB, SlotIndex Idx,
}
// Don't insert anything after the first terminator, though.
- return MI->getDesc().isTerminator() ? MBB->getFirstTerminator() :
- llvm::next(MachineBasicBlock::iterator(MI));
+ return MI->isTerminator() ? MBB->getFirstTerminator() :
+ llvm::next(MachineBasicBlock::iterator(MI));
}
DebugLoc UserValue::findDebugLoc() {
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index b69945a..ac18843 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -381,37 +381,40 @@ void LiveInterval::join(LiveInterval &Other,
for (unsigned i = 0; i != NumVals; ++i) {
unsigned LHSValID = LHSValNoAssignments[i];
if (i != LHSValID ||
- (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i)))
+ (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) {
MustMapCurValNos = true;
+ break;
+ }
}
// If we have to apply a mapping to our base interval assignment, rewrite it
// now.
if (MustMapCurValNos) {
// Map the first live range.
+
iterator OutIt = begin();
OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
- ++OutIt;
- for (iterator I = OutIt, E = end(); I != E; ++I) {
- OutIt->valno = NewVNInfo[LHSValNoAssignments[I->valno->id]];
+ for (iterator I = next(OutIt), E = end(); I != E; ++I) {
+ VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
+ assert(nextValNo != 0 && "Huh?");
// If this live range has the same value # as its immediate predecessor,
// and if they are neighbors, remove one LiveRange. This happens when we
- // have [0,3:0)[4,7:1) and map 0/1 onto the same value #.
- if (OutIt->valno == (OutIt-1)->valno && (OutIt-1)->end == OutIt->start) {
- (OutIt-1)->end = OutIt->end;
+ // have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
+ if (OutIt->valno == nextValNo && OutIt->end == I->start) {
+ OutIt->end = I->end;
} else {
- if (I != OutIt) {
+ // Didn't merge. Move OutIt to the next interval,
+ ++OutIt;
+ OutIt->valno = nextValNo;
+ if (OutIt != I) {
OutIt->start = I->start;
OutIt->end = I->end;
}
-
- // Didn't merge, on to the next one.
- ++OutIt;
}
}
-
// If we merge some live ranges, chop off the end.
+ ++OutIt;
ranges.erase(OutIt, end());
}
@@ -639,8 +642,6 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
OS << "-phidef";
if (vni->hasPHIKill())
OS << "-phikill";
- if (vni->hasRedefByEC())
- OS << "-ec";
}
}
}
@@ -680,15 +681,14 @@ unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
// Connect to values live out of predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI)
- if (const VNInfo *PVNI =
- LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot()))
+ if (const VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(*PI)))
EqClass.join(VNI->id, PVNI->id);
} else {
// Normal value defined by an instruction. Check for two-addr redef.
// FIXME: This could be coincidental. Should we really check for a tied
// operand constraint?
// Note that VNI->def may be a use slot for an early clobber def.
- if (const VNInfo *UVNI = LI->getVNInfoAt(VNI->def.getPrevSlot()))
+ if (const VNInfo *UVNI = LI->getVNInfoBefore(VNI->def))
EqClass.join(VNI->id, UVNI->id);
}
}
@@ -716,7 +716,7 @@ void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[],
continue;
// DBG_VALUE instructions should have been eliminated earlier.
SlotIndex Idx = LIS.getInstructionIndex(MI);
- Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex();
+ Idx = Idx.getRegSlot(MO.isUse());
const VNInfo *VNI = LI.getVNInfoAt(Idx);
assert(VNI && "Interval not live at use.");
MO.setReg(LIV[getEqClass(VNI)]->reg);
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index b1e202a..3ade660 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -15,31 +15,22 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "liveintervals"
+#define DEBUG_TYPE "regalloc"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "VirtRegMap.h"
#include "llvm/Value.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ProcessImplicitDefs.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
@@ -52,19 +43,14 @@ static cl::opt<bool> DisableReMat("disable-rematerialization",
cl::init(false), cl::Hidden);
STATISTIC(numIntervals , "Number of original intervals");
-STATISTIC(numFolds , "Number of loads/stores folded into instructions");
-STATISTIC(numSplits , "Number of intervals split");
char LiveIntervals::ID = 0;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_DEPENDENCY(LiveVariables)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(PHIElimination)
-INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
-INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
"Live Interval Analysis", false, false)
@@ -74,18 +60,8 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<AliasAnalysis>();
AU.addRequired<LiveVariables>();
AU.addPreserved<LiveVariables>();
- AU.addRequired<MachineLoopInfo>();
- AU.addPreserved<MachineLoopInfo>();
+ AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
-
- if (!StrongPHIElim) {
- AU.addPreservedID(PHIEliminationID);
- AU.addRequiredID(PHIEliminationID);
- }
-
- AU.addRequiredID(TwoAddressInstructionPassID);
- AU.addPreserved<ProcessImplicitDefs>();
- AU.addRequired<ProcessImplicitDefs>();
AU.addPreserved<SlotIndexes>();
AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -98,14 +74,12 @@ void LiveIntervals::releaseMemory() {
delete I->second;
r2iMap_.clear();
+ RegMaskSlots.clear();
+ RegMaskBits.clear();
+ RegMaskBlocks.clear();
// Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
- while (!CloneMIs.empty()) {
- MachineInstr *MI = CloneMIs.back();
- CloneMIs.pop_back();
- mf_->DeleteMachineInstr(MI);
- }
}
/// runOnMachineFunction - Register allocate the whole function
@@ -120,6 +94,7 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
lv_ = &getAnalysis<LiveVariables>();
indexes_ = &getAnalysis<SlotIndexes>();
allocatableRegs_ = tri_->getAllocatableSet(fn);
+ reservedRegs_ = tri_->getReservedRegs(fn);
computeIntervals();
@@ -132,10 +107,21 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
/// print - Implement the dump method.
void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
OS << "********** INTERVALS **********\n";
- for (const_iterator I = begin(), E = end(); I != E; ++I) {
- I->second->print(OS, tri_);
- OS << "\n";
- }
+
+ // Dump the physregs.
+ for (unsigned Reg = 1, RegE = tri_->getNumRegs(); Reg != RegE; ++Reg)
+ if (const LiveInterval *LI = r2iMap_.lookup(Reg)) {
+ LI->print(OS, tri_);
+ OS << '\n';
+ }
+
+ // Dump the virtregs.
+ for (unsigned Reg = 0, RegE = mri_->getNumVirtRegs(); Reg != RegE; ++Reg)
+ if (const LiveInterval *LI =
+ r2iMap_.lookup(TargetRegisterInfo::index2VirtReg(Reg))) {
+ LI->print(OS, tri_);
+ OS << '\n';
+ }
printInstrs(OS);
}
@@ -149,103 +135,6 @@ void LiveIntervals::dumpInstrs() const {
printInstrs(dbgs());
}
-bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
- VirtRegMap &vrm, unsigned reg) {
- // We don't handle fancy stuff crossing basic block boundaries
- if (li.ranges.size() != 1)
- return true;
- const LiveRange &range = li.ranges.front();
- SlotIndex idx = range.start.getBaseIndex();
- SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
-
- // Skip deleted instructions
- MachineInstr *firstMI = getInstructionFromIndex(idx);
- while (!firstMI && idx != end) {
- idx = idx.getNextIndex();
- firstMI = getInstructionFromIndex(idx);
- }
- if (!firstMI)
- return false;
-
- // Find last instruction in range
- SlotIndex lastIdx = end.getPrevIndex();
- MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
- while (!lastMI && lastIdx != idx) {
- lastIdx = lastIdx.getPrevIndex();
- lastMI = getInstructionFromIndex(lastIdx);
- }
- if (!lastMI)
- return false;
-
- // Range cannot cross basic block boundaries or terminators
- MachineBasicBlock *MBB = firstMI->getParent();
- if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
- return true;
-
- MachineBasicBlock::const_iterator E = lastMI;
- ++E;
- for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
- const MachineInstr &MI = *I;
-
- // Allow copies to and from li.reg
- if (MI.isCopy())
- if (MI.getOperand(0).getReg() == li.reg ||
- MI.getOperand(1).getReg() == li.reg)
- continue;
-
- // Check for operands using reg
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand& mop = MI.getOperand(i);
- if (!mop.isReg())
- continue;
- unsigned PhysReg = mop.getReg();
- if (PhysReg == 0 || PhysReg == li.reg)
- continue;
- if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
- if (!vrm.hasPhys(PhysReg))
- continue;
- PhysReg = vrm.getPhys(PhysReg);
- }
- if (PhysReg && tri_->regsOverlap(PhysReg, reg))
- return true;
- }
- }
-
- // No conflicts found.
- return false;
-}
-
-bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
- SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
- for (LiveInterval::Ranges::const_iterator
- I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- for (SlotIndex index = I->start.getBaseIndex(),
- end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
- index != end;
- index = index.getNextIndex()) {
- MachineInstr *MI = getInstructionFromIndex(index);
- if (!MI)
- continue; // skip deleted instructions
-
- if (JoinedCopies.count(MI))
- continue;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned PhysReg = MO.getReg();
- if (PhysReg == 0 || PhysReg == Reg ||
- TargetRegisterInfo::isVirtualRegister(PhysReg))
- continue;
- if (tri_->regsOverlap(Reg, PhysReg))
- return true;
- }
- }
- }
-
- return false;
-}
-
static
bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
unsigned Reg = MI.getOperand(MOIdx).getReg();
@@ -271,9 +160,9 @@ bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
if (!MO.getSubReg() || MO.isEarlyClobber())
return false;
- SlotIndex RedefIndex = MIIdx.getDefIndex();
+ SlotIndex RedefIndex = MIIdx.getRegSlot();
const LiveRange *OldLR =
- interval.getLiveRangeContaining(RedefIndex.getUseIndex());
+ interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
if (DefMI != 0) {
return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
@@ -296,34 +185,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
if (interval.empty()) {
// Get the Idx of the defining instructions.
- SlotIndex defIndex = MIIdx.getDefIndex();
- // Earlyclobbers move back one, so that they overlap the live range
- // of inputs.
- if (MO.isEarlyClobber())
- defIndex = MIIdx.getUseIndex();
-
- // Make sure the first definition is not a partial redefinition. Add an
- // <imp-def> of the full register.
- // FIXME: LiveIntervals shouldn't modify the code like this. Whoever
- // created the machine instruction should annotate it with <undef> flags
- // as needed. Then we can simply assert here. The REG_SEQUENCE lowering
- // is the main suspect.
- if (MO.getSubReg()) {
- mi->addRegisterDefined(interval.reg);
- // Mark all defs of interval.reg on this instruction as reading <undef>.
- for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
- MachineOperand &MO2 = mi->getOperand(i);
- if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
- MO2.setIsUndef();
- }
- }
+ SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
- MachineInstr *CopyMI = NULL;
- if (mi->isCopyLike()) {
- CopyMI = mi;
- }
+ // Make sure the first definition is not a partial redefinition.
+ assert(!MO.readsReg() && "First def cannot also read virtual register "
+ "missing <undef> flag?");
- VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
+ VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
// Loop over all of the blocks that the vreg is defined in. There are
@@ -334,9 +202,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// FIXME: what about dead vars?
SlotIndex killIdx;
if (vi.Kills[0] != mi)
- killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
+ killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
else
- killIdx = defIndex.getStoreIndex();
+ killIdx = defIndex.getDeadSlot();
// If the kill happens after the definition, we have an intra-block
// live range.
@@ -384,14 +252,14 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
MachineInstr *Kill = vi.Kills[i];
SlotIndex Start = getMBBStartIdx(Kill->getParent());
- SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
+ SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
// Create interval with one of a NEW value number. Note that this value
// number isn't actually defined by an instruction, weird huh? :)
if (PHIJoin) {
assert(getInstructionFromIndex(Start) == 0 &&
"PHI def index points at actual instruction.");
- ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
+ ValNo = interval.getNextValue(Start, VNInfoAllocator);
ValNo->setIsPHIDef(true);
}
LiveRange LR(Start, killIdx, ValNo);
@@ -422,14 +290,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// are actually two values in the live interval. Because of this we
// need to take the LiveRegion that defines this register and split it
// into two values.
- SlotIndex RedefIndex = MIIdx.getDefIndex();
- if (MO.isEarlyClobber())
- RedefIndex = MIIdx.getUseIndex();
+ SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
const LiveRange *OldLR =
- interval.getLiveRangeContaining(RedefIndex.getUseIndex());
+ interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
VNInfo *OldValNo = OldLR->valno;
- SlotIndex DefIndex = OldValNo->def.getDefIndex();
+ SlotIndex DefIndex = OldValNo->def.getRegSlot();
// Delete the previous value, which should be short and continuous,
// because the 2-addr copy must be in the same MBB as the redef.
@@ -440,12 +306,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
// Value#0 is now defined by the 2-addr instruction.
- OldValNo->def = RedefIndex;
- OldValNo->setCopy(0);
-
- // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
- if (PartReDef && mi->isCopyLike())
- OldValNo->setCopy(&*mi);
+ OldValNo->def = RedefIndex;
// Add the new live interval which replaces the range for the input copy.
LiveRange LR(DefIndex, RedefIndex, ValNo);
@@ -455,7 +316,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// If this redefinition is dead, we need to add a dummy unit live
// range covering the def slot.
if (MO.isDead())
- interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
+ interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
OldValNo));
DEBUG({
@@ -467,15 +328,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// live until the end of the block. We've already taken care of the
// rest of the live range.
- SlotIndex defIndex = MIIdx.getDefIndex();
+ SlotIndex defIndex = MIIdx.getRegSlot();
if (MO.isEarlyClobber())
- defIndex = MIIdx.getUseIndex();
+ defIndex = MIIdx.getRegSlot(true);
- VNInfo *ValNo;
- MachineInstr *CopyMI = NULL;
- if (mi->isCopyLike())
- CopyMI = mi;
- ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
+ VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
SlotIndex killIndex = getMBBEndIdx(mbb);
LiveRange LR(defIndex, killIndex, ValNo);
@@ -490,21 +347,26 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
DEBUG(dbgs() << '\n');
}
+static bool isRegLiveIntoSuccessor(const MachineBasicBlock *MBB, unsigned Reg) {
+ for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end();
+ SI != SE; ++SI) {
+ const MachineBasicBlock* succ = *SI;
+ if (succ->isLiveIn(Reg))
+ return true;
+ }
+ return false;
+}
+
void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator mi,
SlotIndex MIIdx,
MachineOperand& MO,
- LiveInterval &interval,
- MachineInstr *CopyMI) {
- // A physical register cannot be live across basic block, so its
- // lifetime must end somewhere in its defining basic block.
+ LiveInterval &interval) {
DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_));
SlotIndex baseIndex = MIIdx;
- SlotIndex start = baseIndex.getDefIndex();
- // Earlyclobbers move back one.
- if (MO.isEarlyClobber())
- start = MIIdx.getUseIndex();
+ SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber());
SlotIndex end = start;
// If it is not used after definition, it is considered dead at
@@ -514,7 +376,7 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
// advance below compensates.
if (MO.isDead()) {
DEBUG(dbgs() << " dead");
- end = start.getStoreIndex();
+ end = start.getDeadSlot();
goto exit;
}
@@ -531,21 +393,21 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
if (mi->killsRegister(interval.reg, tri_)) {
DEBUG(dbgs() << " killed");
- end = baseIndex.getDefIndex();
+ end = baseIndex.getRegSlot();
goto exit;
} else {
int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
if (DefIdx != -1) {
if (mi->isRegTiedToUseOperand(DefIdx)) {
// Two-address instruction.
- end = baseIndex.getDefIndex();
+ end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber());
} else {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that
// defines it. Hence its interval is:
// [defSlot(def), defSlot(def)+1)
DEBUG(dbgs() << " dead");
- end = start.getStoreIndex();
+ end = start.getDeadSlot();
}
goto exit;
}
@@ -554,12 +416,19 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
baseIndex = baseIndex.getNextIndex();
}
- // The only case we should have a dead physreg here without a killing or
- // instruction where we know it's dead is if it is live-in to the function
- // and never used. Another possible case is the implicit use of the
- // physical register has been deleted by two-address pass.
- end = start.getStoreIndex();
+ // If we get here the register *should* be live out.
+ assert(!isAllocatable(interval.reg) && "Physregs shouldn't be live out!");
+ // FIXME: We need saner rules for reserved regs.
+ if (isReserved(interval.reg)) {
+ end = start.getDeadSlot();
+ } else {
+ // Unreserved, unallocable registers like EFLAGS can be live across basic
+ // block boundaries.
+ assert(isRegLiveIntoSuccessor(MBB, interval.reg) &&
+ "Unreserved reg not live-out?");
+ end = getMBBEndIdx(MBB);
+ }
exit:
assert(start < end && "did not find end of interval?");
@@ -567,9 +436,7 @@ exit:
VNInfo *ValNo = interval.getVNInfoAt(start);
bool Extend = ValNo != 0;
if (!Extend)
- ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
- if (Extend && MO.isEarlyClobber())
- ValNo->setHasRedefByEC(true);
+ ValNo = interval.getNextValue(start, VNInfoAllocator);
LiveRange LR(start, end, ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR << '\n');
@@ -583,18 +450,20 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
getOrCreateInterval(MO.getReg()));
- else {
- MachineInstr *CopyMI = NULL;
- if (MI->isCopyLike())
- CopyMI = MI;
+ else
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
- getOrCreateInterval(MO.getReg()), CopyMI);
- }
+ getOrCreateInterval(MO.getReg()));
}
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
SlotIndex MIIdx,
- LiveInterval &interval, bool isAlias) {
+ LiveInterval &interval) {
+ assert(TargetRegisterInfo::isPhysicalRegister(interval.reg) &&
+ "Only physical registers can be live in.");
+ assert((!isAllocatable(interval.reg) || MBB->getParent()->begin() ||
+ MBB->isLandingPad()) &&
+ "Allocatable live-ins only valid for entry blocks and landing pads.");
+
DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_));
// Look for kills, if it reaches a def before it's killed, then it shouldn't
@@ -621,16 +490,16 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
while (mi != E) {
if (mi->killsRegister(interval.reg, tri_)) {
DEBUG(dbgs() << " killed");
- end = baseIndex.getDefIndex();
+ end = baseIndex.getRegSlot();
SeenDefUse = true;
break;
- } else if (mi->definesRegister(interval.reg, tri_)) {
+ } else if (mi->modifiesRegister(interval.reg, tri_)) {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that defines
// it. Hence its interval is:
// [defSlot(def), defSlot(def)+1)
DEBUG(dbgs() << " dead");
- end = start.getStoreIndex();
+ end = start.getDeadSlot();
SeenDefUse = true;
break;
}
@@ -644,10 +513,16 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
// Live-in register might not be used at all.
if (!SeenDefUse) {
- if (isAlias) {
+ if (isAllocatable(interval.reg) ||
+ !isRegLiveIntoSuccessor(MBB, interval.reg)) {
+ // Allocatable registers are never live through.
+ // Non-allocatable registers that aren't live into any successors also
+ // aren't live through.
DEBUG(dbgs() << " dead");
- end = MIIdx.getStoreIndex();
+ return;
} else {
+ // If we get here the register is non-allocatable and live into some
+ // successor. We'll conservatively assume it's live-through.
DEBUG(dbgs() << " live through");
end = getMBBEndIdx(MBB);
}
@@ -656,8 +531,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
SlotIndex defIdx = getMBBStartIdx(MBB);
assert(getInstructionFromIndex(defIdx) == 0 &&
"PHI def index points at actual instruction.");
- VNInfo *vni =
- interval.getNextValue(defIdx, 0, VNInfoAllocator);
+ VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator);
vni->setIsPHIDef(true);
LiveRange LR(start, end, vni);
@@ -674,10 +548,14 @@ void LiveIntervals::computeIntervals() {
<< "********** Function: "
<< ((Value*)mf_->getFunction())->getName() << '\n');
+ RegMaskBlocks.resize(mf_->getNumBlockIDs());
+
SmallVector<unsigned, 8> UndefUses;
for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
+ RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
+
if (MBB->empty())
continue;
@@ -690,11 +568,6 @@ void LiveIntervals::computeIntervals() {
for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
LE = MBB->livein_end(); LI != LE; ++LI) {
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
- // Multiple live-ins can alias the same register.
- for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
- if (!hasInterval(*AS))
- handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
- true);
}
// Skip over empty initial indices.
@@ -706,10 +579,20 @@ void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << MIIndex << "\t" << *MI);
if (MI->isDebugValue())
continue;
+ assert(indexes_->getInstructionFromIndex(MIIndex) == MI &&
+ "Lost SlotIndex synchronization");
// Handle defs.
for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
MachineOperand &MO = MI->getOperand(i);
+
+ // Collect register masks.
+ if (MO.isRegMask()) {
+ RegMaskSlots.push_back(MIIndex.getRegSlot());
+ RegMaskBits.push_back(MO.getRegMask());
+ continue;
+ }
+
if (!MO.isReg() || !MO.getReg())
continue;
@@ -723,6 +606,10 @@ void LiveIntervals::computeIntervals() {
// Move to the next instr slot.
MIIndex = indexes_->getNextNonNullIndex(MIIndex);
}
+
+ // Compute the number of register mask instructions in this block.
+ std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
+ RMB.second = RegMaskSlots.size() - RMB.first;;
}
// Create empty intervals for registers defined by implicit_def's (except
@@ -754,7 +641,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead) {
DEBUG(dbgs() << "Shrink: " << *li << '\n');
assert(TargetRegisterInfo::isVirtualRegister(li->reg)
- && "Can't only shrink physical registers");
+ && "Can only shrink virtual registers");
// Find all the values used, including PHI kills.
SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
@@ -766,8 +653,10 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
MachineInstr *UseMI = I.skipInstruction();) {
if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
continue;
- SlotIndex Idx = getInstructionIndex(UseMI).getUseIndex();
- VNInfo *VNI = li->getVNInfoAt(Idx);
+ SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
+ // Note: This intentionally picks up the wrong VNI in case of an EC redef.
+ // See below.
+ VNInfo *VNI = li->getVNInfoBefore(Idx);
if (!VNI) {
// This shouldn't happen: readsVirtualRegister returns true, but there is
// no live value. It is likely caused by a target getting <undef> flags
@@ -777,11 +666,12 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
<< *li << '\n');
continue;
}
- if (VNI->def == Idx) {
- // Special case: An early-clobber tied operand reads and writes the
- // register one slot early.
- Idx = Idx.getPrevSlot();
- VNI = li->getVNInfoAt(Idx);
+ // Special case: An early-clobber tied operand reads and writes the
+ // register one slot early. The getVNInfoBefore call above would have
+ // picked up the value defined by UseMI. Adjust the kill slot and value.
+ if (SlotIndex::isSameInstr(VNI->def, Idx)) {
+ Idx = VNI->def;
+ VNI = li->getVNInfoBefore(Idx);
assert(VNI && "Early-clobber tied value not available");
}
WorkList.push_back(std::make_pair(Idx, VNI));
@@ -794,14 +684,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
VNInfo *VNI = *I;
if (VNI->isUnused())
continue;
- NewLI.addRange(LiveRange(VNI->def, VNI->def.getNextSlot(), VNI));
-
- // A use tied to an early-clobber def ends at the load slot and isn't caught
- // above. Catch it here instead. This probably only ever happens for inline
- // assembly.
- if (VNI->def.isUse())
- if (VNInfo *UVNI = li->getVNInfoAt(VNI->def.getLoadIndex()))
- WorkList.push_back(std::make_pair(VNI->def.getLoadIndex(), UVNI));
+ NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
}
// Keep track of the PHIs that are in use.
@@ -812,11 +695,11 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
SlotIndex Idx = WorkList.back().first;
VNInfo *VNI = WorkList.back().second;
WorkList.pop_back();
- const MachineBasicBlock *MBB = getMBBFromIndex(Idx);
+ const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
SlotIndex BlockStart = getMBBStartIdx(MBB);
// Extend the live range for VNI to be live at Idx.
- if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx.getNextSlot())) {
+ if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
(void)ExtVNI;
assert(ExtVNI == VNI && "Unexpected existing value number");
// Is this a PHIDef we haven't seen before?
@@ -827,9 +710,9 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
PE = MBB->pred_end(); PI != PE; ++PI) {
if (!LiveOut.insert(*PI))
continue;
- SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
+ SlotIndex Stop = getMBBEndIdx(*PI);
// A predecessor is not required to have a live-out value for a PHI.
- if (VNInfo *PVNI = li->getVNInfoAt(Stop))
+ if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
WorkList.push_back(std::make_pair(Stop, PVNI));
}
continue;
@@ -837,15 +720,16 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// VNI is live-in to MBB.
DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
- NewLI.addRange(LiveRange(BlockStart, Idx.getNextSlot(), VNI));
+ NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
// Make sure VNI is live-out from the predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
if (!LiveOut.insert(*PI))
continue;
- SlotIndex Stop = getMBBEndIdx(*PI).getPrevSlot();
- assert(li->getVNInfoAt(Stop) == VNI && "Wrong value out of predecessor");
+ SlotIndex Stop = getMBBEndIdx(*PI);
+ assert(li->getVNInfoBefore(Stop) == VNI &&
+ "Wrong value out of predecessor");
WorkList.push_back(std::make_pair(Stop, VNI));
}
}
@@ -859,7 +743,7 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
continue;
LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
assert(LII != NewLI.end() && "Missing live range for PHI");
- if (LII->end != VNI->def.getNextSlot())
+ if (LII->end != VNI->def.getDeadSlot())
continue;
if (VNI->isPHIDef()) {
// This is a dead PHI. Remove it.
@@ -890,28 +774,6 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li,
// Register allocator hooks.
//
-MachineBasicBlock::iterator
-LiveIntervals::getLastSplitPoint(const LiveInterval &li,
- MachineBasicBlock *mbb) const {
- const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor();
-
- // If li is not live into a landing pad, we can insert spill code before the
- // first terminator.
- if (!lpad || !isLiveInToMBB(li, lpad))
- return mbb->getFirstTerminator();
-
- // When there is a landing pad, spill code must go before the call instruction
- // that can throw.
- MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin();
- while (I != B) {
- --I;
- if (I->getDesc().isCall())
- return I;
- }
- // The block contains no calls that can throw, so use the first terminator.
- return mbb->getFirstTerminator();
-}
-
void LiveIntervals::addKillFlags() {
for (iterator I = begin(), E = end(); I != E; ++I) {
unsigned Reg = I->first;
@@ -924,8 +786,8 @@ void LiveIntervals::addKillFlags() {
// Every instruction that kills Reg corresponds to a live range end point.
for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
++RI) {
- // A LOAD index indicates an MBB edge.
- if (RI->end.isLoad())
+ // A block index indicates an MBB edge.
+ if (RI->end.isBlock())
continue;
MachineInstr *MI = getInstructionFromIndex(RI->end);
if (!MI)
@@ -949,16 +811,10 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
if (Reg == 0 || Reg == li.reg)
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
- !allocatableRegs_[Reg])
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isAllocatable(Reg))
continue;
- // FIXME: For now, only remat MI with at most one register operand.
- assert(!RegOp &&
- "Can't rematerialize instruction with multiple register operand!");
RegOp = MO.getReg();
-#ifndef NDEBUG
- break;
-#endif
+ break; // Found vreg operand - leave the loop.
}
return RegOp;
}
@@ -1011,14 +867,6 @@ LiveIntervals::isReMaterializable(const LiveInterval &li,
return true;
}
-/// isReMaterializable - Returns true if the definition MI of the specified
-/// val# of the specified interval is re-materializable.
-bool LiveIntervals::isReMaterializable(const LiveInterval &li,
- const VNInfo *ValNo, MachineInstr *MI) {
- bool Dummy2;
- return isReMaterializable(li, ValNo, MI, 0, Dummy2);
-}
-
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool
@@ -1044,1141 +892,653 @@ LiveIntervals::isReMaterializable(const LiveInterval &li,
return true;
}
-/// FilterFoldedOps - Filter out two-address use operands. Return
-/// true if it finds any issue with the operands that ought to prevent
-/// folding.
-static bool FilterFoldedOps(MachineInstr *MI,
- SmallVector<unsigned, 2> &Ops,
- unsigned &MRInfo,
- SmallVector<unsigned, 2> &FoldOps) {
- MRInfo = 0;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
- unsigned OpIdx = Ops[i];
- MachineOperand &MO = MI->getOperand(OpIdx);
- // FIXME: fold subreg use.
- if (MO.getSubReg())
- return true;
- if (MO.isDef())
- MRInfo |= (unsigned)VirtRegMap::isMod;
- else {
- // Filter out two-address use operand(s).
- if (MI->isRegTiedToDefOperand(OpIdx)) {
- MRInfo = VirtRegMap::isModRef;
- continue;
- }
- MRInfo |= (unsigned)VirtRegMap::isRef;
- }
- FoldOps.push_back(OpIdx);
- }
- return false;
+MachineBasicBlock*
+LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
+ // A local live range must be fully contained inside the block, meaning it is
+ // defined and killed at instructions, not at block boundaries. It is not
+ // live in or or out of any block.
+ //
+ // It is technically possible to have a PHI-defined live range identical to a
+ // single block, but we are going to return false in that case.
+
+ SlotIndex Start = LI.beginIndex();
+ if (Start.isBlock())
+ return NULL;
+
+ SlotIndex Stop = LI.endIndex();
+ if (Stop.isBlock())
+ return NULL;
+
+ // getMBBFromIndex doesn't need to search the MBB table when both indexes
+ // belong to proper instructions.
+ MachineBasicBlock *MBB1 = indexes_->getMBBFromIndex(Start);
+ MachineBasicBlock *MBB2 = indexes_->getMBBFromIndex(Stop);
+ return MBB1 == MBB2 ? MBB1 : NULL;
}
+float
+LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
+ // Limit the loop depth ridiculousness.
+ if (loopDepth > 200)
+ loopDepth = 200;
-/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
-/// slot / to reg or any rematerialized load into ith operand of specified
-/// MI. If it is successul, MI is updated with the newly created MI and
-/// returns true.
-bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
- VirtRegMap &vrm, MachineInstr *DefMI,
- SlotIndex InstrIdx,
- SmallVector<unsigned, 2> &Ops,
- bool isSS, int Slot, unsigned Reg) {
- // If it is an implicit def instruction, just delete it.
- if (MI->isImplicitDef()) {
- RemoveMachineInstrFromMaps(MI);
- vrm.RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- ++numFolds;
- return true;
- }
+ // The loop depth is used to roughly estimate the number of times the
+ // instruction is executed. Something like 10^d is simple, but will quickly
+ // overflow a float. This expression behaves like 10^d for small d, but is
+ // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
+ // headroom before overflow.
+ // By the way, powf() might be unavailable here. For consistency,
+ // We may take pow(double,double).
+ float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
- // Filter the list of operand indexes that are to be folded. Abort if
- // any operand will prevent folding.
- unsigned MRInfo = 0;
- SmallVector<unsigned, 2> FoldOps;
- if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
- return false;
+ return (isDef + isUse) * lc;
+}
- // The only time it's safe to fold into a two address instruction is when
- // it's folding reload and spill from / into a spill stack slot.
- if (DefMI && (MRInfo & VirtRegMap::isMod))
- return false;
+LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
+ MachineInstr* startInst) {
+ LiveInterval& Interval = getOrCreateInterval(reg);
+ VNInfo* VN = Interval.getNextValue(
+ SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getVNInfoAllocator());
+ VN->setHasPHIKill(true);
+ LiveRange LR(
+ SlotIndex(getInstructionIndex(startInst).getRegSlot()),
+ getMBBEndIdx(startInst->getParent()), VN);
+ Interval.addRange(LR);
- MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
- : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
- if (fmi) {
- // Remember this instruction uses the spill slot.
- if (isSS) vrm.addSpillSlotUse(Slot, fmi);
-
- // Attempt to fold the memory reference into the instruction. If
- // we can do this, we don't need to insert spill code.
- if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
- vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
- vrm.transferSpillPts(MI, fmi);
- vrm.transferRestorePts(MI, fmi);
- vrm.transferEmergencySpills(MI, fmi);
- ReplaceMachineInstrInMaps(MI, fmi);
- MI->eraseFromParent();
- MI = fmi;
- ++numFolds;
- return true;
- }
- return false;
+ return LR;
}
-/// canFoldMemoryOperand - Returns true if the specified load / store
-/// folding is possible.
-bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
- SmallVector<unsigned, 2> &Ops,
- bool ReMat) const {
- // Filter the list of operand indexes that are to be folded. Abort if
- // any operand will prevent folding.
- unsigned MRInfo = 0;
- SmallVector<unsigned, 2> FoldOps;
- if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
+
+//===----------------------------------------------------------------------===//
+// Register mask functions
+//===----------------------------------------------------------------------===//
+
+bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
+ BitVector &UsableRegs) {
+ if (LI.empty())
return false;
+ LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
+
+ // Use a smaller arrays for local live ranges.
+ ArrayRef<SlotIndex> Slots;
+ ArrayRef<const uint32_t*> Bits;
+ if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
+ Slots = getRegMaskSlotsInBlock(MBB->getNumber());
+ Bits = getRegMaskBitsInBlock(MBB->getNumber());
+ } else {
+ Slots = getRegMaskSlots();
+ Bits = getRegMaskBits();
+ }
- // It's only legal to remat for a use, not a def.
- if (ReMat && (MRInfo & VirtRegMap::isMod))
+ // We are going to enumerate all the register mask slots contained in LI.
+ // Start with a binary search of RegMaskSlots to find a starting point.
+ ArrayRef<SlotIndex>::iterator SlotI =
+ std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
+ ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
+
+ // No slots in range, LI begins after the last call.
+ if (SlotI == SlotE)
return false;
- return tii_->canFoldMemoryOperand(MI, FoldOps);
+ bool Found = false;
+ for (;;) {
+ assert(*SlotI >= LiveI->start);
+ // Loop over all slots overlapping this segment.
+ while (*SlotI < LiveI->end) {
+ // *SlotI overlaps LI. Collect mask bits.
+ if (!Found) {
+ // This is the first overlap. Initialize UsableRegs to all ones.
+ UsableRegs.clear();
+ UsableRegs.resize(tri_->getNumRegs(), true);
+ Found = true;
+ }
+ // Remove usable registers clobbered by this mask.
+ UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
+ if (++SlotI == SlotE)
+ return Found;
+ }
+ // *SlotI is beyond the current LI segment.
+ LiveI = LI.advanceTo(LiveI, *SlotI);
+ if (LiveI == LiveE)
+ return Found;
+ // Advance SlotI until it overlaps.
+ while (*SlotI < LiveI->start)
+ if (++SlotI == SlotE)
+ return Found;
+ }
}
-bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
- LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
+//===----------------------------------------------------------------------===//
+// IntervalUpdate class.
+//===----------------------------------------------------------------------===//
- MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
+// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
+class LiveIntervals::HMEditor {
+private:
+ LiveIntervals& LIS;
+ const MachineRegisterInfo& MRI;
+ const TargetRegisterInfo& TRI;
+ SlotIndex NewIdx;
+
+ typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
+ typedef DenseSet<IntRangePair> RangeSet;
+
+ struct RegRanges {
+ LiveRange* Use;
+ LiveRange* EC;
+ LiveRange* Dead;
+ LiveRange* Def;
+ RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
+ };
+ typedef DenseMap<unsigned, RegRanges> BundleRanges;
+
+public:
+ HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
+ const TargetRegisterInfo& TRI, SlotIndex NewIdx)
+ : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
+
+ // Update intervals for all operands of MI from OldIdx to NewIdx.
+ // This assumes that MI used to be at OldIdx, and now resides at
+ // NewIdx.
+ void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
+ assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
+
+ // Collect the operands.
+ RangeSet Entering, Internal, Exiting;
+ bool hasRegMaskOp = false;
+ collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
+
+ // To keep the LiveRanges valid within an interval, move the ranges closest
+ // to the destination first. This prevents ranges from overlapping, to that
+ // APIs like removeRange still work.
+ if (NewIdx < OldIdx) {
+ moveAllEnteringFrom(OldIdx, Entering);
+ moveAllInternalFrom(OldIdx, Internal);
+ moveAllExitingFrom(OldIdx, Exiting);
+ }
+ else {
+ moveAllExitingFrom(OldIdx, Exiting);
+ moveAllInternalFrom(OldIdx, Internal);
+ moveAllEnteringFrom(OldIdx, Entering);
+ }
- if (mbb == 0)
- return false;
+ if (hasRegMaskOp)
+ updateRegMaskSlots(OldIdx);
- for (++itr; itr != li.ranges.end(); ++itr) {
- MachineBasicBlock *mbb2 =
- indexes_->getMBBCoveringRange(itr->start, itr->end);
+#ifndef NDEBUG
+ LIValidator validator;
+ std::for_each(Entering.begin(), Entering.end(), validator);
+ std::for_each(Internal.begin(), Internal.end(), validator);
+ std::for_each(Exiting.begin(), Exiting.end(), validator);
+ assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
+#endif
- if (mbb2 != mbb)
- return false;
}
- return true;
-}
+ // Update intervals for all operands of MI to refer to BundleStart's
+ // SlotIndex.
+ void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
+ if (MI == BundleStart)
+ return; // Bundling instr with itself - nothing to do.
+
+ SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
+ "SlotIndex <-> Instruction mapping broken for MI");
+
+ // Collect all ranges already in the bundle.
+ MachineBasicBlock::instr_iterator BII(BundleStart);
+ RangeSet Entering, Internal, Exiting;
+ bool hasRegMaskOp = false;
+ collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
+ for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
+ if (&*BII == MI)
+ continue;
+ collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
+ }
-/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
-/// interval on to-be re-materialized operands of MI) with new register.
-void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
- MachineInstr *MI, unsigned NewVReg,
- VirtRegMap &vrm) {
- // There is an implicit use. That means one of the other operand is
- // being remat'ed and the remat'ed instruction has li.reg as an
- // use operand. Make sure we rewrite that as well.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
- if (!vrm.isReMaterialized(Reg))
- continue;
- MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
- MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
- if (UseMO)
- UseMO->setReg(NewVReg);
- }
-}
+ BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
-/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
-/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
-bool LiveIntervals::
-rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
- bool TrySplit, SlotIndex index, SlotIndex end,
- MachineInstr *MI,
- MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
- unsigned Slot, int LdSlot,
- bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm,
- const TargetRegisterClass* rc,
- SmallVector<int, 4> &ReMatIds,
- const MachineLoopInfo *loopInfo,
- unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
- DenseMap<unsigned,unsigned> &MBBVRegsMap,
- std::vector<LiveInterval*> &NewLIs) {
- bool CanFold = false;
- RestartInstruction:
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& mop = MI->getOperand(i);
- if (!mop.isReg())
- continue;
- unsigned Reg = mop.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
- if (Reg != li.reg)
- continue;
+ collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- bool TryFold = !DefIsReMat;
- bool FoldSS = true; // Default behavior unless it's a remat.
- int FoldSlot = Slot;
- if (DefIsReMat) {
- // If this is the rematerializable definition MI itself and
- // all of its uses are rematerialized, simply delete it.
- if (MI == ReMatOrigDefMI && CanDelete) {
- DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
- << *MI << '\n');
- RemoveMachineInstrFromMaps(MI);
- vrm.RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- break;
- }
+ DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
+ DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
+ DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
- // If def for this use can't be rematerialized, then try folding.
- // If def is rematerializable and it's a load, also try folding.
- TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
- if (isLoad) {
- // Try fold loads (from stack slot, constant pool, etc.) into uses.
- FoldSS = isLoadSS;
- FoldSlot = LdSlot;
- }
- }
+ moveAllEnteringFromInto(OldIdx, Entering, BR);
+ moveAllInternalFromInto(OldIdx, Internal, BR);
+ moveAllExitingFromInto(OldIdx, Exiting, BR);
- // Scan all of the operands of this instruction rewriting operands
- // to use NewVReg instead of li.reg as appropriate. We do this for
- // two reasons:
- //
- // 1. If the instr reads the same spilled vreg multiple times, we
- // want to reuse the NewVReg.
- // 2. If the instr is a two-addr instruction, we are required to
- // keep the src/dst regs pinned.
- //
- // Keep track of whether we replace a use and/or def so that we can
- // create the spill interval with the appropriate range.
- SmallVector<unsigned, 2> Ops;
- tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
-
- // Create a new virtual register for the spill interval.
- // Create the new register now so we can map the fold instruction
- // to the new register so when it is unfolded we get the correct
- // answer.
- bool CreatedNewVReg = false;
- if (NewVReg == 0) {
- NewVReg = mri_->createVirtualRegister(rc);
- vrm.grow();
- CreatedNewVReg = true;
-
- // The new virtual register should get the same allocation hints as the
- // old one.
- std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
- if (Hint.first || Hint.second)
- mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
- }
- if (!TryFold)
- CanFold = false;
- else {
- // Do not fold load / store here if we are splitting. We'll find an
- // optimal point to insert a load / store later.
- if (!TrySplit) {
- if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
- Ops, FoldSS, FoldSlot, NewVReg)) {
- // Folding the load/store can completely change the instruction in
- // unpredictable ways, rescan it from the beginning.
-
- if (FoldSS) {
- // We need to give the new vreg the same stack slot as the
- // spilled interval.
- vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
- }
-
- HasUse = false;
- HasDef = false;
- CanFold = false;
- if (isNotInMIMap(MI))
- break;
- goto RestartInstruction;
- }
- } else {
- // We'll try to fold it later if it's profitable.
- CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
- }
- }
+#ifndef NDEBUG
+ LIValidator validator;
+ std::for_each(Entering.begin(), Entering.end(), validator);
+ std::for_each(Internal.begin(), Internal.end(), validator);
+ std::for_each(Exiting.begin(), Exiting.end(), validator);
+ assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
+#endif
+ }
- mop.setReg(NewVReg);
- if (mop.isImplicit())
- rewriteImplicitOps(li, MI, NewVReg, vrm);
-
- // Reuse NewVReg for other reads.
- bool HasEarlyClobber = false;
- for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
- MachineOperand &mopj = MI->getOperand(Ops[j]);
- mopj.setReg(NewVReg);
- if (mopj.isImplicit())
- rewriteImplicitOps(li, MI, NewVReg, vrm);
- if (mopj.isEarlyClobber())
- HasEarlyClobber = true;
- }
+private:
- if (CreatedNewVReg) {
- if (DefIsReMat) {
- vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
- if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
- // Each valnum may have its own remat id.
- ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
- } else {
- vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
- }
- if (!CanDelete || (HasUse && HasDef)) {
- // If this is a two-addr instruction then its use operands are
- // rematerializable but its def is not. It should be assigned a
- // stack slot.
- vrm.assignVirt2StackSlot(NewVReg, Slot);
- }
- } else {
- vrm.assignVirt2StackSlot(NewVReg, Slot);
+#ifndef NDEBUG
+ class LIValidator {
+ private:
+ DenseSet<const LiveInterval*> Checked, Bogus;
+ public:
+ void operator()(const IntRangePair& P) {
+ const LiveInterval* LI = P.first;
+ if (Checked.count(LI))
+ return;
+ Checked.insert(LI);
+ if (LI->empty())
+ return;
+ SlotIndex LastEnd = LI->begin()->start;
+ for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
+ LRI != LRE; ++LRI) {
+ const LiveRange& LR = *LRI;
+ if (LastEnd > LR.start || LR.start >= LR.end)
+ Bogus.insert(LI);
+ LastEnd = LR.end;
}
- } else if (HasUse && HasDef &&
- vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
- // If this interval hasn't been assigned a stack slot (because earlier
- // def is a deleted remat def), do it now.
- assert(Slot != VirtRegMap::NO_STACK_SLOT);
- vrm.assignVirt2StackSlot(NewVReg, Slot);
}
- // Re-matting an instruction with virtual register use. Add the
- // register as an implicit use on the use MI.
- if (DefIsReMat && ImpUse)
- MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
-
- // Create a new register interval for this spill / remat.
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- if (CreatedNewVReg) {
- NewLIs.push_back(&nI);
- MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
- if (TrySplit)
- vrm.setIsSplitFromReg(NewVReg, li.reg);
+ bool rangesOk() const {
+ return Bogus.empty();
}
+ };
+#endif
- if (HasUse) {
- if (CreatedNewVReg) {
- LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
- nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- } else {
- // Extend the split live interval to this def / use.
- SlotIndex End = index.getDefIndex();
- LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
- nI.getValNumInfo(nI.getNumValNums()-1));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
+ // Collect IntRangePairs for all operands of MI that may need fixing.
+ // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
+ // maps).
+ void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
+ RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
+ hasRegMaskOp = false;
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& MO = *MOI;
+
+ if (MO.isRegMask()) {
+ hasRegMaskOp = true;
+ continue;
}
- }
- if (HasDef) {
- // An early clobber starts at the use slot, except for an early clobber
- // tied to a use operand (yes, that is a thing).
- LiveRange LR(HasEarlyClobber && !HasUse ?
- index.getUseIndex() : index.getDefIndex(),
- index.getStoreIndex(),
- nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- }
- DEBUG({
- dbgs() << "\t\t\t\tAdded new interval: ";
- nI.print(dbgs(), tri_);
- dbgs() << '\n';
- });
- }
- return CanFold;
-}
-bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
- const VNInfo *VNI,
- MachineBasicBlock *MBB,
- SlotIndex Idx) const {
- return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
-}
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue;
-/// RewriteInfo - Keep track of machine instrs that will be rewritten
-/// during spilling.
-namespace {
- struct RewriteInfo {
- SlotIndex Index;
- MachineInstr *MI;
- RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
- };
+ unsigned Reg = MO.getReg();
- struct RewriteInfoCompare {
- bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
- return LHS.Index < RHS.Index;
- }
- };
-}
+ // TODO: Currently we're skipping uses that are reserved or have no
+ // interval, but we're not updating their kills. This should be
+ // fixed.
+ if (!LIS.hasInterval(Reg) ||
+ (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
+ continue;
-void LiveIntervals::
-rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
- LiveInterval::Ranges::const_iterator &I,
- MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
- unsigned Slot, int LdSlot,
- bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm,
- const TargetRegisterClass* rc,
- SmallVector<int, 4> &ReMatIds,
- const MachineLoopInfo *loopInfo,
- BitVector &SpillMBBs,
- DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
- BitVector &RestoreMBBs,
- DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
- DenseMap<unsigned,unsigned> &MBBVRegsMap,
- std::vector<LiveInterval*> &NewLIs) {
- bool AllCanFold = true;
- unsigned NewVReg = 0;
- SlotIndex start = I->start.getBaseIndex();
- SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
-
- // First collect all the def / use in this live range that will be rewritten.
- // Make sure they are sorted according to instruction index.
- std::vector<RewriteInfo> RewriteMIs;
- for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
- re = mri_->reg_end(); ri != re; ) {
- MachineInstr *MI = &*ri;
- MachineOperand &O = ri.getOperand();
- ++ri;
- if (MI->isDebugValue()) {
- // Modify DBG_VALUE now that the value is in a spill slot.
- if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
- uint64_t Offset = MI->getOperand(1).getImm();
- const MDNode *MDPtr = MI->getOperand(2).getMetadata();
- DebugLoc DL = MI->getDebugLoc();
- int FI = isLoadSS ? LdSlot : (int)Slot;
- if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
- Offset, MDPtr, DL)) {
- DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
- ReplaceMachineInstrInMaps(MI, NewDV);
- MachineBasicBlock *MBB = MI->getParent();
- MBB->insert(MBB->erase(MI), NewDV);
- continue;
+ LiveInterval* LI = &LIS.getInterval(Reg);
+
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
+ }
+ if (MO.isDef()) {
+ if (MO.isEarlyClobber()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true));
+ assert(LR != 0 && "No EC range?");
+ if (LR->end > OldIdx.getDeadSlot())
+ Exiting.insert(std::make_pair(LI, LR));
+ else
+ Internal.insert(std::make_pair(LI, LR));
+ } else if (MO.isDead()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
+ assert(LR != 0 && "No dead-def range?");
+ Internal.insert(std::make_pair(LI, LR));
+ } else {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot());
+ assert(LR && LR->end > OldIdx.getDeadSlot() &&
+ "Non-dead-def should have live range exiting.");
+ Exiting.insert(std::make_pair(LI, LR));
}
}
-
- DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
- RemoveMachineInstrFromMaps(MI);
- vrm.RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- continue;
}
- assert(!(O.isImplicit() && O.isUse()) &&
- "Spilling register that's used as implicit use?");
- SlotIndex index = getInstructionIndex(MI);
- if (index < start || index >= end)
- continue;
-
- if (O.isUndef())
- // Must be defined by an implicit def. It should not be spilled. Note,
- // this is for correctness reason. e.g.
- // 8 %reg1024<def> = IMPLICIT_DEF
- // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
- // The live range [12, 14) are not part of the r1024 live interval since
- // it's defined by an implicit def. It will not conflicts with live
- // interval of r1025. Now suppose both registers are spilled, you can
- // easily see a situation where both registers are reloaded before
- // the INSERT_SUBREG and both target registers that would overlap.
- continue;
- RewriteMIs.push_back(RewriteInfo(index, MI));
}
- std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
-
- unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
- // Now rewrite the defs and uses.
- for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
- RewriteInfo &rwi = RewriteMIs[i];
- ++i;
- SlotIndex index = rwi.Index;
- MachineInstr *MI = rwi.MI;
- // If MI def and/or use the same register multiple times, then there
- // are multiple entries.
- while (i != e && RewriteMIs[i].MI == MI) {
- assert(RewriteMIs[i].Index == index);
- ++i;
- }
- MachineBasicBlock *MBB = MI->getParent();
- if (ImpUse && MI != ReMatDefMI) {
- // Re-matting an instruction with virtual register use. Prevent interval
- // from being spilled.
- getInterval(ImpUse).markNotSpillable();
- }
+ // Collect IntRangePairs for all operands of MI that may need fixing.
+ void collectRangesInBundle(MachineInstr* MI, RangeSet& Entering,
+ RangeSet& Exiting, SlotIndex MIStartIdx,
+ SlotIndex MIEndIdx) {
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& MO = *MOI;
+ assert(!MO.isRegMask() && "Can't have RegMasks in bundles.");
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue;
- unsigned MBBId = MBB->getNumber();
- unsigned ThisVReg = 0;
- if (TrySplit) {
- DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
- if (NVI != MBBVRegsMap.end()) {
- ThisVReg = NVI->second;
- // One common case:
- // x = use
- // ...
- // ...
- // def = ...
- // = use
- // It's better to start a new interval to avoid artificially
- // extend the new interval.
- if (MI->readsWritesVirtualRegister(li.reg) ==
- std::make_pair(false,true)) {
- MBBVRegsMap.erase(MBB->getNumber());
- ThisVReg = 0;
- }
- }
- }
+ unsigned Reg = MO.getReg();
+
+ // TODO: Currently we're skipping uses that are reserved or have no
+ // interval, but we're not updating their kills. This should be
+ // fixed.
+ if (!LIS.hasInterval(Reg) ||
+ (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
+ continue;
- bool IsNew = ThisVReg == 0;
- if (IsNew) {
- // This ends the previous live interval. If all of its def / use
- // can be folded, give it a low spill weight.
- if (NewVReg && TrySplit && AllCanFold) {
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- nI.weight /= 10.0F;
+ LiveInterval* LI = &LIS.getInterval(Reg);
+
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(MIStartIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
+ }
+ if (MO.isDef()) {
+ assert(!MO.isEarlyClobber() && "Early clobbers not allowed in bundles.");
+ assert(!MO.isDead() && "Dead-defs not allowed in bundles.");
+ LiveRange* LR = LI->getLiveRangeContaining(MIEndIdx.getDeadSlot());
+ assert(LR != 0 && "Internal ranges not allowed in bundles.");
+ Exiting.insert(std::make_pair(LI, LR));
}
- AllCanFold = true;
}
- NewVReg = ThisVReg;
-
- bool HasDef = false;
- bool HasUse = false;
- bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
- index, end, MI, ReMatOrigDefMI, ReMatDefMI,
- Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
- CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
- ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
- if (!HasDef && !HasUse)
- continue;
+ }
- AllCanFold &= CanFold;
+ BundleRanges createBundleRanges(RangeSet& Entering, RangeSet& Internal, RangeSet& Exiting) {
+ BundleRanges BR;
- // Update weight of spill interval.
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- if (!TrySplit) {
- // The spill weight is now infinity as it cannot be spilled again.
- nI.markNotSpillable();
- continue;
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI) {
+ LiveInterval* LI = EI->first;
+ LiveRange* LR = EI->second;
+ BR[LI->reg].Use = LR;
}
- // Keep track of the last def and first use in each MBB.
- if (HasDef) {
- if (MI != ReMatOrigDefMI || !CanDelete) {
- bool HasKill = false;
- if (!HasUse)
- HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
- else {
- // If this is a two-address code, then this index starts a new VNInfo.
- const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
- if (VNI)
- HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
- }
- DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
- SpillIdxes.find(MBBId);
- if (!HasKill) {
- if (SII == SpillIdxes.end()) {
- std::vector<SRInfo> S;
- S.push_back(SRInfo(index, NewVReg, true));
- SpillIdxes.insert(std::make_pair(MBBId, S));
- } else if (SII->second.back().vreg != NewVReg) {
- SII->second.push_back(SRInfo(index, NewVReg, true));
- } else if (index > SII->second.back().index) {
- // If there is an earlier def and this is a two-address
- // instruction, then it's not possible to fold the store (which
- // would also fold the load).
- SRInfo &Info = SII->second.back();
- Info.index = index;
- Info.canFold = !HasUse;
- }
- SpillMBBs.set(MBBId);
- } else if (SII != SpillIdxes.end() &&
- SII->second.back().vreg == NewVReg &&
- index > SII->second.back().index) {
- // There is an earlier def that's not killed (must be two-address).
- // The spill is no longer needed.
- SII->second.pop_back();
- if (SII->second.empty()) {
- SpillIdxes.erase(MBBId);
- SpillMBBs.reset(MBBId);
- }
- }
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II) {
+ LiveInterval* LI = II->first;
+ LiveRange* LR = II->second;
+ if (LR->end.isDead()) {
+ BR[LI->reg].Dead = LR;
+ } else {
+ BR[LI->reg].EC = LR;
}
}
- if (HasUse) {
- DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
- SpillIdxes.find(MBBId);
- if (SII != SpillIdxes.end() &&
- SII->second.back().vreg == NewVReg &&
- index > SII->second.back().index)
- // Use(s) following the last def, it's not safe to fold the spill.
- SII->second.back().canFold = false;
- DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
- RestoreIdxes.find(MBBId);
- if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
- // If we are splitting live intervals, only fold if it's the first
- // use and there isn't another use later in the MBB.
- RII->second.back().canFold = false;
- else if (IsNew) {
- // Only need a reload if there isn't an earlier def / use.
- if (RII == RestoreIdxes.end()) {
- std::vector<SRInfo> Infos;
- Infos.push_back(SRInfo(index, NewVReg, true));
- RestoreIdxes.insert(std::make_pair(MBBId, Infos));
- } else {
- RII->second.push_back(SRInfo(index, NewVReg, true));
- }
- RestoreMBBs.set(MBBId);
- }
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI) {
+ LiveInterval* LI = EI->first;
+ LiveRange* LR = EI->second;
+ BR[LI->reg].Def = LR;
}
- // Update spill weight.
- unsigned loopDepth = loopInfo->getLoopDepth(MBB);
- nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
+ return BR;
}
- if (NewVReg && TrySplit && AllCanFold) {
- // If all of its def / use can be folded, give it a low spill weight.
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- nI.weight /= 10.0F;
+ void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
+ MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
+ if (!OldKillMI->killsRegister(reg))
+ return; // Bail out if we don't have kill flags on the old register.
+ MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
+ assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
+ assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill.");
+ OldKillMI->clearRegisterKills(reg, &TRI);
+ NewKillMI->addRegisterKilled(reg, &TRI);
}
-}
-bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
- unsigned vr, BitVector &RestoreMBBs,
- DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
- if (!RestoreMBBs[Id])
- return false;
- std::vector<SRInfo> &Restores = RestoreIdxes[Id];
- for (unsigned i = 0, e = Restores.size(); i != e; ++i)
- if (Restores[i].index == index &&
- Restores[i].vreg == vr &&
- Restores[i].canFold)
- return true;
- return false;
-}
-
-void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
- unsigned vr, BitVector &RestoreMBBs,
- DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
- if (!RestoreMBBs[Id])
- return;
- std::vector<SRInfo> &Restores = RestoreIdxes[Id];
- for (unsigned i = 0, e = Restores.size(); i != e; ++i)
- if (Restores[i].index == index && Restores[i].vreg)
- Restores[i].index = SlotIndex();
-}
+ void updateRegMaskSlots(SlotIndex OldIdx) {
+ SmallVectorImpl<SlotIndex>::iterator RI =
+ std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
+ OldIdx);
+ assert(*RI == OldIdx && "No RegMask at OldIdx.");
+ *RI = NewIdx;
+ assert(*prior(RI) < *RI && *RI < *next(RI) &&
+ "RegSlots out of order. Did you move one call across another?");
+ }
-/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
-/// spilled and create empty intervals for their uses.
-void
-LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
- const TargetRegisterClass* rc,
- std::vector<LiveInterval*> &NewLIs) {
- for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
- re = mri_->reg_end(); ri != re; ) {
- MachineOperand &O = ri.getOperand();
- MachineInstr *MI = &*ri;
- ++ri;
- if (MI->isDebugValue()) {
- // Remove debug info for now.
- O.setReg(0U);
- DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
- continue;
- }
- if (O.isDef()) {
- assert(MI->isImplicitDef() &&
- "Register def was not rewritten?");
- RemoveMachineInstrFromMaps(MI);
- vrm.RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- } else {
- // This must be an use of an implicit_def so it's not part of the live
- // interval. Create a new empty live interval for it.
- // FIXME: Can we simply erase some of the instructions? e.g. Stores?
- unsigned NewVReg = mri_->createVirtualRegister(rc);
- vrm.grow();
- vrm.setIsImplicitlyDefined(NewVReg);
- NewLIs.push_back(&getOrCreateInterval(NewVReg));
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == li.reg) {
- MO.setReg(NewVReg);
- MO.setIsUndef();
- }
- }
+ // Return the last use of reg between NewIdx and OldIdx.
+ SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
+ SlotIndex LastUse = NewIdx;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI.use_nodbg_begin(Reg),
+ UE = MRI.use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ if (InstSlot > LastUse && InstSlot < OldIdx)
+ LastUse = InstSlot;
}
+ return LastUse;
}
-}
-
-float
-LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
- // Limit the loop depth ridiculousness.
- if (loopDepth > 200)
- loopDepth = 200;
-
- // The loop depth is used to roughly estimate the number of times the
- // instruction is executed. Something like 10^d is simple, but will quickly
- // overflow a float. This expression behaves like 10^d for small d, but is
- // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
- // headroom before overflow.
- // By the way, powf() might be unavailable here. For consistency,
- // We may take pow(double,double).
- float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
-
- return (isDef + isUse) * lc;
-}
-static void normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
- for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
- NewLIs[i]->weight =
- normalizeSpillWeight(NewLIs[i]->weight, NewLIs[i]->getSize());
-}
+ void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ bool LiveThrough = LR->end > OldIdx.getRegSlot();
+ if (LiveThrough)
+ return;
+ SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
+ if (LastUse != NewIdx)
+ moveKillFlags(LI->reg, NewIdx, LastUse);
+ LR->end = LastUse.getRegSlot();
+ }
-std::vector<LiveInterval*> LiveIntervals::
-addIntervalsForSpills(const LiveInterval &li,
- const SmallVectorImpl<LiveInterval*> *SpillIs,
- const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
- assert(li.isSpillable() && "attempt to spill already spilled interval!");
-
- DEBUG({
- dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
- li.print(dbgs(), tri_);
- dbgs() << '\n';
- });
-
- // Each bit specify whether a spill is required in the MBB.
- BitVector SpillMBBs(mf_->getNumBlockIDs());
- DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
- BitVector RestoreMBBs(mf_->getNumBlockIDs());
- DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
- DenseMap<unsigned,unsigned> MBBVRegsMap;
- std::vector<LiveInterval*> NewLIs;
- const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
-
- unsigned NumValNums = li.getNumValNums();
- SmallVector<MachineInstr*, 4> ReMatDefs;
- ReMatDefs.resize(NumValNums, NULL);
- SmallVector<MachineInstr*, 4> ReMatOrigDefs;
- ReMatOrigDefs.resize(NumValNums, NULL);
- SmallVector<int, 4> ReMatIds;
- ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
- BitVector ReMatDelete(NumValNums);
- unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
-
- // Spilling a split live interval. It cannot be split any further. Also,
- // it's also guaranteed to be a single val# / range interval.
- if (vrm.getPreSplitReg(li.reg)) {
- vrm.setIsSplitFromReg(li.reg, 0);
- // Unset the split kill marker on the last use.
- SlotIndex KillIdx = vrm.getKillPoint(li.reg);
- if (KillIdx != SlotIndex()) {
- MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
- assert(KillMI && "Last use disappeared?");
- int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
- assert(KillOp != -1 && "Last use disappeared?");
- KillMI->getOperand(KillOp).setIsKill(false);
- }
- vrm.removeKillPoint(li.reg);
- bool DefIsReMat = vrm.isReMaterialized(li.reg);
- Slot = vrm.getStackSlot(li.reg);
- assert(Slot != VirtRegMap::MAX_STACK_SLOT);
- MachineInstr *ReMatDefMI = DefIsReMat ?
- vrm.getReMaterializedMI(li.reg) : NULL;
- int LdSlot = 0;
- bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
- bool isLoad = isLoadSS ||
- (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
- bool IsFirstRange = true;
- for (LiveInterval::Ranges::const_iterator
- I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- // If this is a split live interval with multiple ranges, it means there
- // are two-address instructions that re-defined the value. Only the
- // first def can be rematerialized!
- if (IsFirstRange) {
- // Note ReMatOrigDefMI has already been deleted.
- rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
- Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
- false, vrm, rc, ReMatIds, loopInfo,
- SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
- MBBVRegsMap, NewLIs);
- } else {
- rewriteInstructionsForSpills(li, false, I, NULL, 0,
- Slot, 0, false, false, false,
- false, vrm, rc, ReMatIds, loopInfo,
- SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
- MBBVRegsMap, NewLIs);
+ void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ // Extend the LiveRange if NewIdx is past the end.
+ if (NewIdx > LR->end) {
+ // Move kill flags if OldIdx was not originally the end
+ // (otherwise LR->end points to an invalid slot).
+ if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
+ assert(LR->end > OldIdx && "LiveRange does not cover original slot");
+ moveKillFlags(LI->reg, LR->end, NewIdx);
}
- IsFirstRange = false;
+ LR->end = NewIdx.getRegSlot();
}
-
- handleSpilledImpDefs(li, vrm, rc, NewLIs);
- normalizeSpillWeights(NewLIs);
- return NewLIs;
}
- bool TrySplit = !intervalIsInOneMBB(li);
- if (TrySplit)
- ++numSplits;
- bool NeedStackSlot = false;
- for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
- i != e; ++i) {
- const VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (VNI->isUnused())
- continue; // Dead val#.
- // Is the def for the val# rematerializable?
- MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
- bool dummy;
- if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
- // Remember how to remat the def of this val#.
- ReMatOrigDefs[VN] = ReMatDefMI;
- // Original def may be modified so we have to make a copy here.
- MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
- CloneMIs.push_back(Clone);
- ReMatDefs[VN] = Clone;
-
- bool CanDelete = true;
- if (VNI->hasPHIKill()) {
- // A kill is a phi node, not all of its uses can be rematerialized.
- // It must not be deleted.
- CanDelete = false;
- // Need a stack slot if there is any live range where uses cannot be
- // rematerialized.
- NeedStackSlot = true;
- }
- if (CanDelete)
- ReMatDelete.set(VN);
+ void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
+ bool GoingUp = NewIdx < OldIdx;
+
+ if (GoingUp) {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringUpFrom(OldIdx, *EI);
} else {
- // Need a stack slot if there is any live range where uses cannot be
- // rematerialized.
- NeedStackSlot = true;
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringDownFrom(OldIdx, *EI);
}
}
- // One stack slot per live interval.
- if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
- if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
- Slot = vrm.assignVirt2StackSlot(li.reg);
-
- // This case only occurs when the prealloc splitter has already assigned
- // a stack slot to this vreg.
- else
- Slot = vrm.getStackSlot(li.reg);
+ void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ LR->end <= OldIdx.getDeadSlot() &&
+ "Range should be internal to OldIdx.");
+ LiveRange Tmp(*LR);
+ Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ Tmp.valno->def = Tmp.start;
+ Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
+ LI->removeRange(*LR);
+ LI->addRange(Tmp);
}
- // Create new intervals and rewrite defs and uses.
- for (LiveInterval::Ranges::const_iterator
- I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
- MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
- bool DefIsReMat = ReMatDefMI != NULL;
- bool CanDelete = ReMatDelete[I->valno->id];
- int LdSlot = 0;
- bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
- bool isLoad = isLoadSS ||
- (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
- rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
- Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
- CanDelete, vrm, rc, ReMatIds, loopInfo,
- SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
- MBBVRegsMap, NewLIs);
+ void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II)
+ moveInternalFrom(OldIdx, *II);
}
- // Insert spills / restores if we are splitting.
- if (!TrySplit) {
- handleSpilledImpDefs(li, vrm, rc, NewLIs);
- normalizeSpillWeights(NewLIs);
- return NewLIs;
+ void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ "Range should start in OldIdx.");
+ assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
+ SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ LR->start = NewStart;
+ LR->valno->def = NewStart;
}
- SmallPtrSet<LiveInterval*, 4> AddedKill;
- SmallVector<unsigned, 2> Ops;
- if (NeedStackSlot) {
- int Id = SpillMBBs.find_first();
- while (Id != -1) {
- std::vector<SRInfo> &spills = SpillIdxes[Id];
- for (unsigned i = 0, e = spills.size(); i != e; ++i) {
- SlotIndex index = spills[i].index;
- unsigned VReg = spills[i].vreg;
- LiveInterval &nI = getOrCreateInterval(VReg);
- bool isReMat = vrm.isReMaterialized(VReg);
- MachineInstr *MI = getInstructionFromIndex(index);
- bool CanFold = false;
- bool FoundUse = false;
- Ops.clear();
- if (spills[i].canFold) {
- CanFold = true;
- for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
- MachineOperand &MO = MI->getOperand(j);
- if (!MO.isReg() || MO.getReg() != VReg)
- continue;
-
- Ops.push_back(j);
- if (MO.isDef())
- continue;
- if (isReMat ||
- (!FoundUse && !alsoFoldARestore(Id, index, VReg,
- RestoreMBBs, RestoreIdxes))) {
- // MI has two-address uses of the same register. If the use
- // isn't the first and only use in the BB, then we can't fold
- // it. FIXME: Move this to rewriteInstructionsForSpills.
- CanFold = false;
- break;
- }
- FoundUse = true;
- }
- }
- // Fold the store into the def if possible.
- bool Folded = false;
- if (CanFold && !Ops.empty()) {
- if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
- Folded = true;
- if (FoundUse) {
- // Also folded uses, do not issue a load.
- eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
- nI.removeRange(index.getLoadIndex(), index.getDefIndex());
- }
- nI.removeRange(index.getDefIndex(), index.getStoreIndex());
- }
- }
-
- // Otherwise tell the spiller to issue a spill.
- if (!Folded) {
- LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
- bool isKill = LR->end == index.getStoreIndex();
- if (!MI->registerDefIsDead(nI.reg))
- // No need to spill a dead def.
- vrm.addSpillPoint(VReg, isKill, MI);
- if (isKill)
- AddedKill.insert(&nI);
- }
- }
- Id = SpillMBBs.find_next(Id);
- }
+ void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI)
+ moveExitingFrom(OldIdx, *EI);
}
- int Id = RestoreMBBs.find_first();
- while (Id != -1) {
- std::vector<SRInfo> &restores = RestoreIdxes[Id];
- for (unsigned i = 0, e = restores.size(); i != e; ++i) {
- SlotIndex index = restores[i].index;
- if (index == SlotIndex())
- continue;
- unsigned VReg = restores[i].vreg;
- LiveInterval &nI = getOrCreateInterval(VReg);
- bool isReMat = vrm.isReMaterialized(VReg);
- MachineInstr *MI = getInstructionFromIndex(index);
- bool CanFold = false;
- Ops.clear();
- if (restores[i].canFold) {
- CanFold = true;
- for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
- MachineOperand &MO = MI->getOperand(j);
- if (!MO.isReg() || MO.getReg() != VReg)
- continue;
-
- if (MO.isDef()) {
- // If this restore were to be folded, it would have been folded
- // already.
- CanFold = false;
- break;
- }
- Ops.push_back(j);
- }
- }
+ void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ bool LiveThrough = LR->end > OldIdx.getRegSlot();
+ if (LiveThrough) {
+ assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
+ "Def in bundle should be def range.");
+ assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
+ "If bundle has use for this reg it should be LR.");
+ BR[LI->reg].Use = LR;
+ return;
+ }
- // Fold the load into the use if possible.
- bool Folded = false;
- if (CanFold && !Ops.empty()) {
- if (!isReMat)
- Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
- else {
- MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
- int LdSlot = 0;
- bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
- // If the rematerializable def is a load, also try to fold it.
- if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
- Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
- Ops, isLoadSS, LdSlot, VReg);
- if (!Folded) {
- unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
- if (ImpUse) {
- // Re-matting an instruction with virtual register use. Add the
- // register as an implicit use on the use MI and mark the register
- // interval as unspillable.
- LiveInterval &ImpLi = getInterval(ImpUse);
- ImpLi.markNotSpillable();
- MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
- }
- }
- }
- }
- // If folding is not possible / failed, then tell the spiller to issue a
- // load / rematerialization for us.
- if (Folded)
- nI.removeRange(index.getLoadIndex(), index.getDefIndex());
- else
- vrm.addRestorePoint(VReg, MI);
+ SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
+ moveKillFlags(LI->reg, OldIdx, LastUse);
+
+ if (LR->start < NewIdx) {
+ // Becoming a new entering range.
+ assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
+ "Bundle shouldn't be re-defining reg mid-range.");
+ assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
+ "Bundle shouldn't have different use range for same reg.");
+ LR->end = LastUse.getRegSlot();
+ BR[LI->reg].Use = LR;
+ } else {
+ // Becoming a new Dead-def.
+ assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
+ "Live range starting at unexpected slot.");
+ assert(BR[LI->reg].Def == LR && "Reg should have def range.");
+ assert(BR[LI->reg].Dead == 0 &&
+ "Can't have def and dead def of same reg in a bundle.");
+ LR->end = LastUse.getDeadSlot();
+ BR[LI->reg].Dead = BR[LI->reg].Def;
+ BR[LI->reg].Def = 0;
}
- Id = RestoreMBBs.find_next(Id);
}
- // Finalize intervals: add kills, finalize spill weights, and filter out
- // dead intervals.
- std::vector<LiveInterval*> RetNewLIs;
- for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
- LiveInterval *LI = NewLIs[i];
- if (!LI->empty()) {
- if (!AddedKill.count(LI)) {
- LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
- SlotIndex LastUseIdx = LR->end.getBaseIndex();
- MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
- int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
- assert(UseIdx != -1);
- if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
- LastUse->getOperand(UseIdx).setIsKill();
- vrm.addKillPoint(LI->reg, LastUseIdx);
- }
- }
- RetNewLIs.push_back(LI);
+ void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ if (NewIdx > LR->end) {
+ // Range extended to bundle. Add to bundle uses.
+ // Note: Currently adds kill flags to bundle start.
+ assert(BR[LI->reg].Use == 0 &&
+ "Bundle already has use range for reg.");
+ moveKillFlags(LI->reg, LR->end, NewIdx);
+ LR->end = NewIdx.getRegSlot();
+ BR[LI->reg].Use = LR;
+ } else {
+ assert(BR[LI->reg].Use != 0 &&
+ "Bundle should already have a use range for reg.");
}
}
- handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
- normalizeSpillWeights(RetNewLIs);
- return RetNewLIs;
-}
-
-/// hasAllocatableSuperReg - Return true if the specified physical register has
-/// any super register that's allocatable.
-bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
- for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
- if (allocatableRegs_[*AS] && hasInterval(*AS))
- return true;
- return false;
-}
+ void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
+ BundleRanges& BR) {
+ bool GoingUp = NewIdx < OldIdx;
-/// getRepresentativeReg - Find the largest super register of the specified
-/// physical register.
-unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
- // Find the largest super-register that is allocatable.
- unsigned BestReg = Reg;
- for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
- unsigned SuperReg = *AS;
- if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
- BestReg = SuperReg;
- break;
+ if (GoingUp) {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringUpFromInto(OldIdx, *EI, BR);
+ } else {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringDownFromInto(OldIdx, *EI, BR);
}
}
- return BestReg;
-}
-/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
-/// specified interval that conflicts with the specified physical register.
-unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
- unsigned PhysReg) const {
- unsigned NumConflicts = 0;
- const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
- E = mri_->reg_end(); I != E; ++I) {
- MachineOperand &O = I.getOperand();
- MachineInstr *MI = O.getParent();
- if (MI->isDebugValue())
- continue;
- SlotIndex Index = getInstructionIndex(MI);
- if (pli.liveAt(Index))
- ++NumConflicts;
+ void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ // TODO: Sane rules for moving ranges into bundles.
}
- return NumConflicts;
-}
-/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
-/// around all defs and uses of the specified interval. Return true if it
-/// was able to cut its interval.
-bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
- unsigned PhysReg, VirtRegMap &vrm) {
- unsigned SpillReg = getRepresentativeReg(PhysReg);
-
- DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg)
- << " represented by " << tri_->getName(SpillReg) << '\n');
-
- for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
- // If there are registers which alias PhysReg, but which are not a
- // sub-register of the chosen representative super register. Assert
- // since we can't handle it yet.
- assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
- tri_->isSuperRegister(*AS, SpillReg));
-
- bool Cut = false;
- SmallVector<unsigned, 4> PRegs;
- if (hasInterval(SpillReg))
- PRegs.push_back(SpillReg);
- for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR)
- if (hasInterval(*SR))
- PRegs.push_back(*SR);
-
- DEBUG({
- dbgs() << "Trying to spill:";
- for (unsigned i = 0, e = PRegs.size(); i != e; ++i)
- dbgs() << ' ' << tri_->getName(PRegs[i]);
- dbgs() << '\n';
- });
-
- SmallPtrSet<MachineInstr*, 8> SeenMIs;
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
- E = mri_->reg_end(); I != E; ++I) {
- MachineOperand &O = I.getOperand();
- MachineInstr *MI = O.getParent();
- if (MI->isDebugValue() || SeenMIs.count(MI))
- continue;
- SeenMIs.insert(MI);
- SlotIndex Index = getInstructionIndex(MI);
- bool LiveReg = false;
- for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
- unsigned PReg = PRegs[i];
- LiveInterval &pli = getInterval(PReg);
- if (!pli.liveAt(Index))
- continue;
- LiveReg = true;
- SlotIndex StartIdx = Index.getLoadIndex();
- SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
- if (!pli.isInOneLiveRange(StartIdx, EndIdx)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, tm_);
- }
- report_fatal_error(Msg.str());
+ void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
+ BundleRanges& BR) {
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II)
+ moveInternalFromInto(OldIdx, *II, BR);
+ }
+
+ void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+
+ assert(LR->start.isRegister() &&
+ "Don't know how to merge exiting ECs into bundles yet.");
+
+ if (LR->end > NewIdx.getDeadSlot()) {
+ // This range is becoming an exiting range on the bundle.
+ // If there was an old dead-def of this reg, delete it.
+ if (BR[LI->reg].Dead != 0) {
+ LI->removeRange(*BR[LI->reg].Dead);
+ BR[LI->reg].Dead = 0;
+ }
+ assert(BR[LI->reg].Def == 0 &&
+ "Can't have two defs for the same variable exiting a bundle.");
+ LR->start = NewIdx.getRegSlot();
+ LR->valno->def = LR->start;
+ BR[LI->reg].Def = LR;
+ } else {
+ // This range is becoming internal to the bundle.
+ assert(LR->end == NewIdx.getRegSlot() &&
+ "Can't bundle def whose kill is before the bundle");
+ if (BR[LI->reg].Dead || BR[LI->reg].Def) {
+ // Already have a def for this. Just delete range.
+ LI->removeRange(*LR);
+ } else {
+ // Make range dead, record.
+ LR->end = NewIdx.getDeadSlot();
+ BR[LI->reg].Dead = LR;
+ assert(BR[LI->reg].Use == LR &&
+ "Range becoming dead should currently be use.");
}
- pli.removeRange(StartIdx, EndIdx);
- LiveReg = true;
+ // In both cases the range is no longer a use on the bundle.
+ BR[LI->reg].Use = 0;
}
- if (!LiveReg)
- continue;
- DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI);
- vrm.addEmergencySpill(SpillReg, MI);
- Cut = true;
}
- return Cut;
-}
-LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
- MachineInstr* startInst) {
- LiveInterval& Interval = getOrCreateInterval(reg);
- VNInfo* VN = Interval.getNextValue(
- SlotIndex(getInstructionIndex(startInst).getDefIndex()),
- startInst, getVNInfoAllocator());
- VN->setHasPHIKill(true);
- LiveRange LR(
- SlotIndex(getInstructionIndex(startInst).getDefIndex()),
- getMBBEndIdx(startInst->getParent()), VN);
- Interval.addRange(LR);
+ void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
+ BundleRanges& BR) {
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI)
+ moveExitingFromInto(OldIdx, *EI, BR);
+ }
- return LR;
+};
+
+void LiveIntervals::handleMove(MachineInstr* MI) {
+ SlotIndex OldIndex = indexes_->getInstructionIndex(MI);
+ indexes_->removeMachineInstrFromMaps(MI);
+ SlotIndex NewIndex = MI->isInsideBundle() ?
+ indexes_->getInstructionIndex(MI) :
+ indexes_->insertMachineInstrInMaps(MI);
+ assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
+ OldIndex < getMBBEndIdx(MI->getParent()) &&
+ "Cannot handle moves across basic block boundaries.");
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
+
+ HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HME.moveAllRangesFrom(MI, OldIndex);
}
+void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart) {
+ SlotIndex NewIndex = indexes_->getInstructionIndex(BundleStart);
+ HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HME.moveAllRangesInto(MI, BundleStart);
+}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
index 110fe1e..60a6880 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.cpp
@@ -21,6 +21,8 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include <algorithm>
+
using namespace llvm;
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
index 5d64d28..dbf5ac1 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalUnion.h
@@ -20,8 +20,6 @@
#include "llvm/ADT/IntervalMap.h"
#include "llvm/CodeGen/LiveInterval.h"
-#include <algorithm>
-
namespace llvm {
class MachineLoopRange;
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
index a7d5af5..d8ab791 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeCalc.cpp
@@ -65,7 +65,7 @@ void LiveRangeCalc::extend(LiveInterval *LI,
assert(DomTree && "Missing dominator tree");
MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill.getPrevSlot());
- assert(Kill && "No MBB at Kill");
+ assert(KillMBB && "No MBB at Kill");
// Is there a def in the same MBB we can extend?
if (LI->extendInBlock(Indexes->getMBBStartIdx(KillMBB), Kill))
@@ -237,7 +237,7 @@ void LiveRangeCalc::updateSSA(SlotIndexes *Indexes,
assert(Alloc && "Need VNInfo allocator to create PHI-defs");
SlotIndex Start, End;
tie(Start, End) = Indexes->getMBBRange(MBB);
- VNInfo *VNI = I->LI->getNextValue(Start, 0, *Alloc);
+ VNInfo *VNI = I->LI->getNextValue(Start, *Alloc);
VNI->setIsPHIDef(true);
I->Value = VNI;
// This block is done, we know the final value.
diff --git a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
index b23f851..695f536 100644
--- a/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -1,4 +1,4 @@
-//===--- LiveRangeEdit.cpp - Basic tools for editing a register live range --===//
+//===-- LiveRangeEdit.cpp - Basic tools for editing a register live range -===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "regalloc"
-#include "LiveRangeEdit.h"
#include "VirtRegMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
@@ -29,13 +29,14 @@ STATISTIC(NumDCEDeleted, "Number of instructions deleted by DCE");
STATISTIC(NumDCEFoldedLoads, "Number of single use loads folded after DCE");
STATISTIC(NumFracRanges, "Number of live ranges fractured by DCE");
-LiveInterval &LiveRangeEdit::createFrom(unsigned OldReg,
- LiveIntervals &LIS,
- VirtRegMap &VRM) {
- MachineRegisterInfo &MRI = VRM.getRegInfo();
+void LiveRangeEdit::Delegate::anchor() { }
+
+LiveInterval &LiveRangeEdit::createFrom(unsigned OldReg) {
unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
- VRM.grow();
- VRM.setIsSplitFromReg(VReg, VRM.getOriginal(OldReg));
+ if (VRM) {
+ VRM->grow();
+ VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg));
+ }
LiveInterval &LI = LIS.getOrCreateInterval(VReg);
newRegs_.push_back(&LI);
return LI;
@@ -43,37 +44,32 @@ LiveInterval &LiveRangeEdit::createFrom(unsigned OldReg,
bool LiveRangeEdit::checkRematerializable(VNInfo *VNI,
const MachineInstr *DefMI,
- const TargetInstrInfo &tii,
AliasAnalysis *aa) {
assert(DefMI && "Missing instruction");
scannedRemattable_ = true;
- if (!tii.isTriviallyReMaterializable(DefMI, aa))
+ if (!TII.isTriviallyReMaterializable(DefMI, aa))
return false;
remattable_.insert(VNI);
return true;
}
-void LiveRangeEdit::scanRemattable(LiveIntervals &lis,
- const TargetInstrInfo &tii,
- AliasAnalysis *aa) {
+void LiveRangeEdit::scanRemattable(AliasAnalysis *aa) {
for (LiveInterval::vni_iterator I = parent_.vni_begin(),
E = parent_.vni_end(); I != E; ++I) {
VNInfo *VNI = *I;
if (VNI->isUnused())
continue;
- MachineInstr *DefMI = lis.getInstructionFromIndex(VNI->def);
+ MachineInstr *DefMI = LIS.getInstructionFromIndex(VNI->def);
if (!DefMI)
continue;
- checkRematerializable(VNI, DefMI, tii, aa);
+ checkRematerializable(VNI, DefMI, aa);
}
scannedRemattable_ = true;
}
-bool LiveRangeEdit::anyRematerializable(LiveIntervals &lis,
- const TargetInstrInfo &tii,
- AliasAnalysis *aa) {
+bool LiveRangeEdit::anyRematerializable(AliasAnalysis *aa) {
if (!scannedRemattable_)
- scanRemattable(lis, tii, aa);
+ scanRemattable(aa);
return !remattable_.empty();
}
@@ -81,24 +77,18 @@ bool LiveRangeEdit::anyRematerializable(LiveIntervals &lis,
/// OrigIdx are also available with the same value at UseIdx.
bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
SlotIndex OrigIdx,
- SlotIndex UseIdx,
- LiveIntervals &lis) {
- OrigIdx = OrigIdx.getUseIndex();
- UseIdx = UseIdx.getUseIndex();
+ SlotIndex UseIdx) {
+ OrigIdx = OrigIdx.getRegSlot(true);
+ UseIdx = UseIdx.getRegSlot(true);
for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = OrigMI->getOperand(i);
if (!MO.isReg() || !MO.getReg() || MO.isDef())
continue;
// Reserved registers are OK.
- if (MO.isUndef() || !lis.hasInterval(MO.getReg()))
+ if (MO.isUndef() || !LIS.hasInterval(MO.getReg()))
continue;
- // We cannot depend on virtual registers in uselessRegs_.
- if (uselessRegs_)
- for (unsigned ui = 0, ue = uselessRegs_->size(); ui != ue; ++ui)
- if ((*uselessRegs_)[ui]->reg == MO.getReg())
- return false;
- LiveInterval &li = lis.getInterval(MO.getReg());
+ LiveInterval &li = LIS.getInterval(MO.getReg());
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
if (!OVNI)
continue;
@@ -110,8 +100,7 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
bool LiveRangeEdit::canRematerializeAt(Remat &RM,
SlotIndex UseIdx,
- bool cheapAsAMove,
- LiveIntervals &lis) {
+ bool cheapAsAMove) {
assert(scannedRemattable_ && "Call anyRematerializable first");
// Use scanRemattable info.
@@ -121,19 +110,19 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM,
// No defining instruction provided.
SlotIndex DefIdx;
if (RM.OrigMI)
- DefIdx = lis.getInstructionIndex(RM.OrigMI);
+ DefIdx = LIS.getInstructionIndex(RM.OrigMI);
else {
DefIdx = RM.ParentVNI->def;
- RM.OrigMI = lis.getInstructionFromIndex(DefIdx);
+ RM.OrigMI = LIS.getInstructionFromIndex(DefIdx);
assert(RM.OrigMI && "No defining instruction for remattable value");
}
// If only cheap remats were requested, bail out early.
- if (cheapAsAMove && !RM.OrigMI->getDesc().isAsCheapAsAMove())
+ if (cheapAsAMove && !RM.OrigMI->isAsCheapAsAMove())
return false;
// Verify that all used registers are available with the same values.
- if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx, lis))
+ if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx))
return false;
return true;
@@ -143,27 +132,22 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg,
const Remat &RM,
- LiveIntervals &lis,
- const TargetInstrInfo &tii,
const TargetRegisterInfo &tri,
bool Late) {
assert(RM.OrigMI && "Invalid remat");
- tii.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
+ TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri);
rematted_.insert(RM.ParentVNI);
- return lis.getSlotIndexes()->insertMachineInstrInMaps(--MI, Late)
- .getDefIndex();
+ return LIS.getSlotIndexes()->insertMachineInstrInMaps(--MI, Late)
+ .getRegSlot();
}
-void LiveRangeEdit::eraseVirtReg(unsigned Reg, LiveIntervals &LIS) {
+void LiveRangeEdit::eraseVirtReg(unsigned Reg) {
if (delegate_ && delegate_->LRE_CanEraseVirtReg(Reg))
LIS.removeInterval(Reg);
}
bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
- SmallVectorImpl<MachineInstr*> &Dead,
- MachineRegisterInfo &MRI,
- LiveIntervals &LIS,
- const TargetInstrInfo &TII) {
+ SmallVectorImpl<MachineInstr*> &Dead) {
MachineInstr *DefMI = 0, *UseMI = 0;
// Check that there is a single def and a single use.
@@ -174,7 +158,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
if (MO.isDef()) {
if (DefMI && DefMI != MI)
return false;
- if (!MI->getDesc().canFoldAsLoad())
+ if (!MI->canFoldAsLoad())
return false;
DefMI = MI;
} else if (!MO.isUndef()) {
@@ -209,19 +193,17 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
}
void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
- LiveIntervals &LIS, VirtRegMap &VRM,
- const TargetInstrInfo &TII) {
+ ArrayRef<unsigned> RegsBeingSpilled) {
SetVector<LiveInterval*,
SmallVector<LiveInterval*, 8>,
SmallPtrSet<LiveInterval*, 8> > ToShrink;
- MachineRegisterInfo &MRI = VRM.getRegInfo();
for (;;) {
// Erase all dead defs.
while (!Dead.empty()) {
MachineInstr *MI = Dead.pop_back_val();
assert(MI->allDefsAreDead() && "Def isn't really dead");
- SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex();
+ SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
// Never delete inline asm.
if (MI->isInlineAsm()) {
@@ -265,7 +247,7 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
LI.removeValNo(VNI);
if (LI.empty()) {
ToShrink.remove(&LI);
- eraseVirtReg(Reg, LIS);
+ eraseVirtReg(Reg);
}
}
}
@@ -284,12 +266,26 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
// Shrink just one live interval. Then delete new dead defs.
LiveInterval *LI = ToShrink.back();
ToShrink.pop_back();
- if (foldAsLoad(LI, Dead, MRI, LIS, TII))
+ if (foldAsLoad(LI, Dead))
continue;
if (delegate_)
delegate_->LRE_WillShrinkVirtReg(LI->reg);
if (!LIS.shrinkToUses(LI, &Dead))
continue;
+
+ // Don't create new intervals for a register being spilled.
+ // The new intervals would have to be spilled anyway so its not worth it.
+ // Also they currently aren't spilled so creating them and not spilling
+ // them results in incorrect code.
+ bool BeingSpilled = false;
+ for (unsigned i = 0, e = RegsBeingSpilled.size(); i != e; ++i) {
+ if (LI->reg == RegsBeingSpilled[i]) {
+ BeingSpilled = true;
+ break;
+ }
+ }
+
+ if (BeingSpilled) continue;
// LI may have been separated, create new intervals.
LI->RenumberValues(LIS);
@@ -298,16 +294,16 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
if (NumComp <= 1)
continue;
++NumFracRanges;
- bool IsOriginal = VRM.getOriginal(LI->reg) == LI->reg;
+ bool IsOriginal = VRM && VRM->getOriginal(LI->reg) == LI->reg;
DEBUG(dbgs() << NumComp << " components: " << *LI << '\n');
SmallVector<LiveInterval*, 8> Dups(1, LI);
for (unsigned i = 1; i != NumComp; ++i) {
- Dups.push_back(&createFrom(LI->reg, LIS, VRM));
+ Dups.push_back(&createFrom(LI->reg));
// If LI is an original interval that hasn't been split yet, make the new
// intervals their own originals instead of referring to LI. The original
// interval must contain all the split products, and LI doesn't.
if (IsOriginal)
- VRM.setIsSplitFromReg(Dups.back()->reg, 0);
+ VRM->setIsSplitFromReg(Dups.back()->reg, 0);
if (delegate_)
delegate_->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg);
}
@@ -316,10 +312,8 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
}
void LiveRangeEdit::calculateRegClassAndHint(MachineFunction &MF,
- LiveIntervals &LIS,
const MachineLoopInfo &Loops) {
VirtRegAuxInfo VRAI(MF, LIS, Loops);
- MachineRegisterInfo &MRI = MF.getRegInfo();
for (iterator I = begin(), E = end(); I != E; ++I) {
LiveInterval &LI = **I;
if (MRI.recomputeRegClass(LI.reg, MF.getTarget()))
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index 2ca90f9..5a0d97d 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -14,7 +14,7 @@
// the instruction, but are never used after the instruction (i.e., they are
// killed).
//
-// This class computes live variables using are sparse implementation based on
+// This class computes live variables using a sparse implementation based on
// the machine code SSA form. This class computes live variable information for
// each virtual and _register allocatable_ physical register in a function. It
// uses the dominance properties of SSA form to efficiently compute live
@@ -33,6 +33,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
@@ -41,6 +42,7 @@
using namespace llvm;
char LiveVariables::ID = 0;
+char &llvm::LiveVariablesID = LiveVariables::ID;
INITIALIZE_PASS_BEGIN(LiveVariables, "livevars",
"Live Variable Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(UnreachableMachineBlockElim)
@@ -90,7 +92,7 @@ void LiveVariables::MarkVirtRegAliveInBlock(VarInfo& VRInfo,
MachineBasicBlock *MBB,
std::vector<MachineBasicBlock*> &WorkList) {
unsigned BBNum = MBB->getNumber();
-
+
// Check to see if this basic block is one of the killing blocks. If so,
// remove it.
for (unsigned i = 0, e = VRInfo.Kills.size(); i != e; ++i)
@@ -98,7 +100,7 @@ void LiveVariables::MarkVirtRegAliveInBlock(VarInfo& VRInfo,
VRInfo.Kills.erase(VRInfo.Kills.begin()+i); // Erase entry
break;
}
-
+
if (MBB == DefBlock) return; // Terminate recursion
if (VRInfo.AliveBlocks.test(BBNum))
@@ -107,6 +109,7 @@ void LiveVariables::MarkVirtRegAliveInBlock(VarInfo& VRInfo,
// Mark the variable known alive in this bb
VRInfo.AliveBlocks.set(BBNum);
+ assert(MBB != &MF->front() && "Can't find reaching def for virtreg");
WorkList.insert(WorkList.end(), MBB->pred_rbegin(), MBB->pred_rend());
}
@@ -130,7 +133,6 @@ void LiveVariables::HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
unsigned BBNum = MBB->getNumber();
VarInfo& VRInfo = getVarInfo(reg);
- VRInfo.NumUses++;
// Check to see if this basic block is already a kill block.
if (!VRInfo.Kills.empty() && VRInfo.Kills.back()->getParent() == MBB) {
@@ -190,7 +192,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
unsigned LastDefReg = 0;
unsigned LastDefDist = 0;
MachineInstr *LastDef = NULL;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
MachineInstr *Def = PhysRegDef[SubReg];
if (!Def)
@@ -214,7 +216,7 @@ MachineInstr *LiveVariables::FindLastPartialDef(unsigned Reg,
unsigned DefReg = MO.getReg();
if (TRI->isSubRegister(Reg, DefReg)) {
PartDefRegs.insert(DefReg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(DefReg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(DefReg);
unsigned SubReg = *SubRegs; ++SubRegs)
PartDefRegs.insert(SubReg);
}
@@ -245,7 +247,7 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
true/*IsImp*/));
PhysRegDef[Reg] = LastPartialDef;
SmallSet<unsigned, 8> Processed;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
if (Processed.count(SubReg))
continue;
@@ -257,20 +259,19 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr *MI) {
false/*IsDef*/,
true/*IsImp*/));
PhysRegDef[SubReg] = LastPartialDef;
- for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
Processed.insert(*SS);
}
}
- }
- else if (LastDef && !PhysRegUse[Reg] &&
- !LastDef->findRegisterDefOperand(Reg))
+ } else if (LastDef && !PhysRegUse[Reg] &&
+ !LastDef->findRegisterDefOperand(Reg))
// Last def defines the super register, add an implicit def of reg.
- LastDef->addOperand(MachineOperand::CreateReg(Reg,
- true/*IsDef*/, true/*IsImp*/));
+ LastDef->addOperand(MachineOperand::CreateReg(Reg, true/*IsDef*/,
+ true/*IsImp*/));
// Remember this use.
PhysRegUse[Reg] = MI;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
PhysRegUse[SubReg] = MI;
}
@@ -286,7 +287,7 @@ MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
unsigned LastPartDefDist = 0;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
@@ -331,11 +332,11 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// Or whole register is defined, but only partly used.
// AX<dead> = AL<imp-def>
// = AL<kill>
- // AX =
+ // AX =
MachineInstr *LastPartDef = 0;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
@@ -350,7 +351,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
}
if (MachineInstr *Use = PhysRegUse[SubReg]) {
PartUses.insert(SubReg);
- for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
PartUses.insert(*SS);
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
@@ -366,7 +367,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
if (!PartUses.count(SubReg))
continue;
@@ -387,11 +388,11 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
else {
LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true);
PhysRegUse[SubReg] = LastRefOrPartRef;
- for (const unsigned *SSRegs = TRI->getSubRegisters(SubReg);
+ for (const uint16_t *SSRegs = TRI->getSubRegisters(SubReg);
unsigned SSReg = *SSRegs; ++SSRegs)
PhysRegUse[SSReg] = LastRefOrPartRef;
}
- for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
PartUses.erase(*SS);
}
} else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) {
@@ -419,16 +420,37 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
return true;
}
+void LiveVariables::HandleRegMask(const MachineOperand &MO) {
+ // Call HandlePhysRegKill() for all live registers clobbered by Mask.
+ // Clobbered registers are always dead, sp there is no need to use
+ // HandlePhysRegDef().
+ for (unsigned Reg = 1, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg) {
+ // Skip dead regs.
+ if (!PhysRegDef[Reg] && !PhysRegUse[Reg])
+ continue;
+ // Skip mask-preserved regs.
+ if (!MO.clobbersPhysReg(Reg))
+ continue;
+ // Kill the largest clobbered super-register.
+ // This avoids needless implicit operands.
+ unsigned Super = Reg;
+ for (const uint16_t *SR = TRI->getSuperRegisters(Reg); *SR; ++SR)
+ if ((PhysRegDef[*SR] || PhysRegUse[*SR]) && MO.clobbersPhysReg(*SR))
+ Super = *SR;
+ HandlePhysRegKill(Super, 0);
+ }
+}
+
void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
SmallVector<unsigned, 4> &Defs) {
// What parts of the register are previously defined?
SmallSet<unsigned, 32> Live;
if (PhysRegDef[Reg] || PhysRegUse[Reg]) {
Live.insert(Reg);
- for (const unsigned *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
Live.insert(*SS);
} else {
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
// If a register isn't itself defined, but all parts that make up of it
// are defined, then consider it also defined.
@@ -440,7 +462,7 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
continue;
if (PhysRegDef[SubReg] || PhysRegUse[SubReg]) {
Live.insert(SubReg);
- for (const unsigned *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
Live.insert(*SS);
}
}
@@ -450,7 +472,7 @@ void LiveVariables::HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
// is referenced.
HandlePhysRegKill(Reg, MI);
// Only some of the sub-registers are used.
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
if (!Live.count(SubReg))
// Skip if this sub-register isn't defined.
@@ -469,7 +491,7 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
Defs.pop_back();
PhysRegDef[Reg] = MI;
PhysRegUse[Reg] = NULL;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
PhysRegDef[SubReg] = MI;
PhysRegUse[SubReg] = NULL;
@@ -492,6 +514,12 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
std::fill(PhysRegUse, PhysRegUse + NumRegs, (MachineInstr*)0);
PHIJoins.clear();
+ // FIXME: LiveIntervals will be updated to remove its dependence on
+ // LiveVariables to improve compilation time and eliminate bizarre pass
+ // dependencies. Until then, we can't change much in -O0.
+ if (!MRI->isSSA())
+ report_fatal_error("regalloc=... not currently supported with -O0");
+
analyzePHINodes(mf);
// Calculate live variable information in depth first order on the CFG of the
@@ -536,8 +564,13 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Clear kill and dead markers. LV will recompute them.
SmallVector<unsigned, 4> UseRegs;
SmallVector<unsigned, 4> DefRegs;
+ SmallVector<unsigned, 1> RegMasks;
for (unsigned i = 0; i != NumOperandsToProcess; ++i) {
MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask()) {
+ RegMasks.push_back(i);
+ continue;
+ }
if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned MOReg = MO.getReg();
@@ -559,6 +592,10 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
HandlePhysRegUse(MOReg, MI);
}
+ // Process all masked registers. (Call clobbers).
+ for (unsigned i = 0, e = RegMasks.size(); i != e; ++i)
+ HandleRegMask(MI->getOperand(RegMasks[i]));
+
// Process all defs.
for (unsigned i = 0, e = DefRegs.size(); i != e; ++i) {
unsigned MOReg = DefRegs[i];
@@ -590,8 +627,8 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// them. The tail callee need not take the same registers as input
// that it produces as output, and there are dependencies for its input
// registers elsewhere.
- if (!MBB->empty() && MBB->back().getDesc().isReturn()
- && !MBB->back().getDesc().isCall()) {
+ if (!MBB->empty() && MBB->back().isReturn()
+ && !MBB->back().isCall()) {
MachineInstr *Ret = &MBB->back();
for (MachineRegisterInfo::liveout_iterator
@@ -607,10 +644,27 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
}
}
+ // MachineCSE may CSE instructions which write to non-allocatable physical
+ // registers across MBBs. Remember if any reserved register is liveout.
+ SmallSet<unsigned, 4> LiveOuts;
+ for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI) {
+ MachineBasicBlock *SuccMBB = *SI;
+ if (SuccMBB->isLandingPad())
+ continue;
+ for (MachineBasicBlock::livein_iterator LI = SuccMBB->livein_begin(),
+ LE = SuccMBB->livein_end(); LI != LE; ++LI) {
+ unsigned LReg = *LI;
+ if (!TRI->isInAllocatableClass(LReg))
+ // Ignore other live-ins, e.g. those that are live into landing pads.
+ LiveOuts.insert(LReg);
+ }
+ }
+
// Loop over PhysRegDef / PhysRegUse, killing any registers that are
// available at the end of the basic block.
for (unsigned i = 0; i != NumRegs; ++i)
- if (PhysRegDef[i] || PhysRegUse[i])
+ if ((PhysRegDef[i] || PhysRegUse[i]) && !LiveOuts.count(i))
HandlePhysRegDef(i, 0, Defs);
std::fill(PhysRegDef, PhysRegDef + NumRegs, (MachineInstr*)0);
@@ -754,7 +808,7 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB,
const unsigned NumNew = BB->getNumber();
// All registers used by PHI nodes in SuccBB must be live through BB.
- for (MachineBasicBlock::const_iterator BBI = SuccBB->begin(),
+ for (MachineBasicBlock::iterator BBI = SuccBB->begin(),
BBE = SuccBB->end(); BBI != BBE && BBI->isPHI(); ++BBI)
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
if (BBI->getOperand(i+1).getMBB() == BB)
diff --git a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index 1318d62..238bf52 100644
--- a/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/contrib/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -71,19 +71,15 @@ namespace {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
- return "Local Stack Slot Allocation";
- }
private:
};
} // end anonymous namespace
char LocalStackSlotPass::ID = 0;
-
-FunctionPass *llvm::createLocalStackSlotAllocationPass() {
- return new LocalStackSlotPass();
-}
+char &llvm::LocalStackSlotAllocationID = LocalStackSlotPass::ID;
+INITIALIZE_PASS(LocalStackSlotPass, "localstackalloc",
+ "Local Stack Slot Allocation", false, false)
bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
MachineFrameInfo *MFI = MF.getFrameInfo();
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 4c5fe4c..6c8a107 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -73,7 +73,8 @@ void ilist_traits<MachineBasicBlock>::addNodeToList(MachineBasicBlock *N) {
// Make sure the instructions have their operands in the reginfo lists.
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- for (MachineBasicBlock::iterator I = N->begin(), E = N->end(); I != E; ++I)
+ for (MachineBasicBlock::instr_iterator
+ I = N->instr_begin(), E = N->instr_end(); I != E; ++I)
I->AddRegOperandsToUseLists(RegInfo);
LeakDetector::removeGarbageObject(N);
@@ -120,8 +121,8 @@ void ilist_traits<MachineInstr>::removeNodeFromList(MachineInstr *N) {
/// lists.
void ilist_traits<MachineInstr>::
transferNodesFromList(ilist_traits<MachineInstr> &fromList,
- MachineBasicBlock::iterator first,
- MachineBasicBlock::iterator last) {
+ ilist_iterator<MachineInstr> first,
+ ilist_iterator<MachineInstr> last) {
assert(Parent->getParent() == fromList.Parent->getParent() &&
"MachineInstr parent mismatch!");
@@ -140,33 +141,75 @@ void ilist_traits<MachineInstr>::deleteNode(MachineInstr* MI) {
}
MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
- iterator I = begin();
- while (I != end() && I->isPHI())
+ instr_iterator I = instr_begin(), E = instr_end();
+ while (I != E && I->isPHI())
++I;
+ assert(!I->isInsideBundle() && "First non-phi MI cannot be inside a bundle!");
return I;
}
MachineBasicBlock::iterator
MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) {
- while (I != end() && (I->isPHI() || I->isLabel() || I->isDebugValue()))
+ iterator E = end();
+ while (I != E && (I->isPHI() || I->isLabel() || I->isDebugValue()))
++I;
+ // FIXME: This needs to change if we wish to bundle labels / dbg_values
+ // inside the bundle.
+ assert(!I->isInsideBundle() &&
+ "First non-phi / non-label instruction is inside a bundle!");
return I;
}
MachineBasicBlock::iterator MachineBasicBlock::getFirstTerminator() {
- iterator I = end();
- while (I != begin() && ((--I)->getDesc().isTerminator() || I->isDebugValue()))
+ iterator B = begin(), E = end(), I = E;
+ while (I != B && ((--I)->isTerminator() || I->isDebugValue()))
; /*noop */
- while (I != end() && !I->getDesc().isTerminator())
+ while (I != E && !I->isTerminator())
+ ++I;
+ return I;
+}
+
+MachineBasicBlock::const_iterator
+MachineBasicBlock::getFirstTerminator() const {
+ const_iterator B = begin(), E = end(), I = E;
+ while (I != B && ((--I)->isTerminator() || I->isDebugValue()))
+ ; /*noop */
+ while (I != E && !I->isTerminator())
+ ++I;
+ return I;
+}
+
+MachineBasicBlock::instr_iterator MachineBasicBlock::getFirstInstrTerminator() {
+ instr_iterator B = instr_begin(), E = instr_end(), I = E;
+ while (I != B && ((--I)->isTerminator() || I->isDebugValue()))
+ ; /*noop */
+ while (I != E && !I->isTerminator())
++I;
return I;
}
MachineBasicBlock::iterator MachineBasicBlock::getLastNonDebugInstr() {
- iterator B = begin(), I = end();
+ // Skip over end-of-block dbg_value instructions.
+ instr_iterator B = instr_begin(), I = instr_end();
+ while (I != B) {
+ --I;
+ // Return instruction that starts a bundle.
+ if (I->isDebugValue() || I->isInsideBundle())
+ continue;
+ return I;
+ }
+ // The block is all debug values.
+ return end();
+}
+
+MachineBasicBlock::const_iterator
+MachineBasicBlock::getLastNonDebugInstr() const {
+ // Skip over end-of-block dbg_value instructions.
+ const_instr_iterator B = instr_begin(), I = instr_end();
while (I != B) {
--I;
- if (I->isDebugValue())
+ // Return instruction that starts a bundle.
+ if (I->isDebugValue() || I->isInsideBundle())
continue;
return I;
}
@@ -195,6 +238,18 @@ StringRef MachineBasicBlock::getName() const {
return "(null)";
}
+/// Return a hopefully unique identifier for this block.
+std::string MachineBasicBlock::getFullName() const {
+ std::string Name;
+ if (getParent())
+ Name = (getParent()->getFunction()->getName() + ":").str();
+ if (getBasicBlock())
+ Name += getBasicBlock()->getName();
+ else
+ Name += (Twine("BB") + Twine(getNumber())).str();
+ return Name;
+}
+
void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
const MachineFunction *MF = getParent();
if (!MF) {
@@ -203,8 +258,6 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
return;
}
- if (Alignment) { OS << "Alignment " << Alignment << "\n"; }
-
if (Indexes)
OS << Indexes->getMBBStartIdx(this) << '\t';
@@ -218,6 +271,12 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
}
if (isLandingPad()) { OS << Comma << "EH LANDING PAD"; Comma = ", "; }
if (hasAddressTaken()) { OS << Comma << "ADDRESS TAKEN"; Comma = ", "; }
+ if (Alignment) {
+ OS << Comma << "Align " << Alignment << " (" << (1u << Alignment)
+ << " bytes)";
+ Comma = ", ";
+ }
+
OS << '\n';
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
@@ -237,13 +296,15 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
OS << '\n';
}
- for (const_iterator I = begin(); I != end(); ++I) {
+ for (const_instr_iterator I = instr_begin(); I != instr_end(); ++I) {
if (Indexes) {
if (Indexes->hasIndex(I))
OS << Indexes->getInstructionIndex(I);
OS << '\t';
}
OS << '\t';
+ if (I->isInsideBundle())
+ OS << " * ";
I->print(OS, &getParent()->getTarget());
}
@@ -260,8 +321,8 @@ void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
void MachineBasicBlock::removeLiveIn(unsigned Reg) {
std::vector<unsigned>::iterator I =
std::find(LiveIns.begin(), LiveIns.end(), Reg);
- assert(I != LiveIns.end() && "Not a live in!");
- LiveIns.erase(I);
+ if (I != LiveIns.end())
+ LiveIns.erase(I);
}
bool MachineBasicBlock::isLiveIn(unsigned Reg) const {
@@ -297,8 +358,22 @@ void MachineBasicBlock::updateTerminator() {
TII->RemoveBranch(*this);
} else {
// The block has an unconditional fallthrough. If its successor is not
- // its layout successor, insert a branch.
- TBB = *succ_begin();
+ // its layout successor, insert a branch. First we have to locate the
+ // only non-landing-pad successor, as that is the fallthrough block.
+ for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
+ if ((*SI)->isLandingPad())
+ continue;
+ assert(!TBB && "Found more than one non-landing-pad successor!");
+ TBB = *SI;
+ }
+
+ // If there is no non-landing-pad successor, the block has no
+ // fall-through edges to be concerned with.
+ if (!TBB)
+ return;
+
+ // Finally update the unconditional successor to be reached via a branch
+ // if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
@@ -435,8 +510,8 @@ MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
fromMBB->removeSuccessor(Succ);
// Fix up any PHI nodes in the successor.
- for (MachineBasicBlock::iterator MI = Succ->begin(), ME = Succ->end();
- MI != ME && MI->isPHI(); ++MI)
+ for (MachineBasicBlock::instr_iterator MI = Succ->instr_begin(),
+ ME = Succ->instr_end(); MI != ME && MI->isPHI(); ++MI)
for (unsigned i = 2, e = MI->getNumOperands()+1; i != e; i += 2) {
MachineOperand &MO = MI->getOperand(i);
if (MO.getMBB() == fromMBB)
@@ -473,13 +548,10 @@ bool MachineBasicBlock::canFallThrough() {
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond)) {
// If we couldn't analyze the branch, examine the last instruction.
// If the block doesn't end in a known control barrier, assume fallthrough
- // is possible. The isPredicable check is needed because this code can be
+ // is possible. The isPredicated check is needed because this code can be
// called during IfConversion, where an instruction which is normally a
- // Barrier is predicated and thus no longer an actual control barrier. This
- // is over-conservative though, because if an instruction isn't actually
- // predicated we could still treat it like a barrier.
- return empty() || !back().getDesc().isBarrier() ||
- back().getDesc().isPredicable();
+ // Barrier is predicated and thus no longer an actual control barrier.
+ return empty() || !back().isBarrier() || TII->isPredicated(&back());
}
// If there is no branch, control always falls through.
@@ -538,14 +610,16 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// Collect a list of virtual registers killed by the terminators.
SmallVector<unsigned, 4> KilledRegs;
if (LV)
- for (iterator I = getFirstTerminator(), E = end(); I != E; ++I) {
+ for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
+ I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
- if (!OI->isReg() || !OI->isUse() || !OI->isKill() || OI->isUndef())
+ if (!OI->isReg() || OI->getReg() == 0 ||
+ !OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(MI)) {
KilledRegs.push_back(Reg);
DEBUG(dbgs() << "Removing terminator kill: " << *MI);
@@ -565,7 +639,8 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
}
// Fix PHI nodes in Succ so they refer to NMBB instead of this
- for (MachineBasicBlock::iterator i = Succ->begin(), e = Succ->end();
+ for (MachineBasicBlock::instr_iterator
+ i = Succ->instr_begin(),e = Succ->instr_end();
i != e && i->isPHI(); ++i)
for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
if (i->getOperand(ni+1).getMBB() == this)
@@ -577,14 +652,16 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
NMBB->addLiveIn(*I);
// Update LiveVariables.
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
- for (iterator I = end(), E = begin(); I != E;) {
- if (!(--I)->addRegisterKilled(Reg, NULL, /* addIfNotFound= */ false))
+ for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
+ if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
continue;
- LV->getVarInfo(Reg).Kills.push_back(I);
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ LV->getVarInfo(Reg).Kills.push_back(I);
DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
@@ -650,6 +727,42 @@ MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
return NMBB;
}
+MachineBasicBlock::iterator
+MachineBasicBlock::erase(MachineBasicBlock::iterator I) {
+ if (I->isBundle()) {
+ MachineBasicBlock::iterator E = llvm::next(I);
+ return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
+ }
+
+ return Insts.erase(I.getInstrIterator());
+}
+
+MachineInstr *MachineBasicBlock::remove(MachineInstr *I) {
+ if (I->isBundle()) {
+ instr_iterator MII = llvm::next(I);
+ iterator E = end();
+ while (MII != E && MII->isInsideBundle()) {
+ MachineInstr *MI = &*MII++;
+ Insts.remove(MI);
+ }
+ }
+
+ return Insts.remove(I);
+}
+
+void MachineBasicBlock::splice(MachineBasicBlock::iterator where,
+ MachineBasicBlock *Other,
+ MachineBasicBlock::iterator From) {
+ if (From->isBundle()) {
+ MachineBasicBlock::iterator To = llvm::next(From);
+ Insts.splice(where.getInstrIterator(), Other->Insts,
+ From.getInstrIterator(), To.getInstrIterator());
+ return;
+ }
+
+ Insts.splice(where.getInstrIterator(), Other->Insts, From.getInstrIterator());
+}
+
/// removeFromParent - This method unlinks 'this' from the containing function,
/// and returns it, but does not delete it.
MachineBasicBlock *MachineBasicBlock::removeFromParent() {
@@ -673,10 +786,10 @@ void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Cannot replace self with self!");
- MachineBasicBlock::iterator I = end();
- while (I != begin()) {
+ MachineBasicBlock::instr_iterator I = instr_end();
+ while (I != instr_begin()) {
--I;
- if (!I->getDesc().isTerminator()) break;
+ if (!I->isTerminator()) break;
// Scan the operands of this machine instruction, replacing any uses of Old
// with New.
@@ -755,27 +868,27 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA,
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
/// any DBG_VALUE instructions. Return UnknownLoc if there is none.
DebugLoc
-MachineBasicBlock::findDebugLoc(MachineBasicBlock::iterator &MBBI) {
+MachineBasicBlock::findDebugLoc(instr_iterator MBBI) {
DebugLoc DL;
- MachineBasicBlock::iterator E = end();
- if (MBBI != E) {
- // Skip debug declarations, we don't want a DebugLoc from them.
- MachineBasicBlock::iterator MBBI2 = MBBI;
- while (MBBI2 != E && MBBI2->isDebugValue())
- MBBI2++;
- if (MBBI2 != E)
- DL = MBBI2->getDebugLoc();
- }
+ instr_iterator E = instr_end();
+ if (MBBI == E)
+ return DL;
+
+ // Skip debug declarations, we don't want a DebugLoc from them.
+ while (MBBI != E && MBBI->isDebugValue())
+ MBBI++;
+ if (MBBI != E)
+ DL = MBBI->getDebugLoc();
return DL;
}
/// getSuccWeight - Return weight of the edge from this block to MBB.
///
-uint32_t MachineBasicBlock::getSuccWeight(MachineBasicBlock *succ) {
+uint32_t MachineBasicBlock::getSuccWeight(const MachineBasicBlock *succ) const {
if (Weights.empty())
return 0;
- succ_iterator I = std::find(Successors.begin(), Successors.end(), succ);
+ const_succ_iterator I = std::find(Successors.begin(), Successors.end(), succ);
return *getWeightIterator(I);
}
@@ -789,6 +902,16 @@ getWeightIterator(MachineBasicBlock::succ_iterator I) {
return Weights.begin() + index;
}
+/// getWeightIterator - Return wight iterator corresonding to the I successor
+/// iterator
+MachineBasicBlock::const_weight_iterator MachineBasicBlock::
+getWeightIterator(MachineBasicBlock::const_succ_iterator I) const {
+ assert(Weights.size() == Successors.size() && "Async weight list!");
+ const size_t index = std::distance(Successors.begin(), I);
+ assert(index < Weights.size() && "Not a current successor!");
+ return Weights.begin() + index;
+}
+
void llvm::WriteAsOperand(raw_ostream &OS, const MachineBasicBlock *MBB,
bool t) {
OS << "BB#" << MBB->getNumber();
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp b/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
index b92cda9..a079d6e 100644
--- a/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBlockFrequencyInfo.cpp
@@ -56,6 +56,6 @@ bool MachineBlockFrequencyInfo::runOnMachineFunction(MachineFunction &F) {
/// the other block frequencies. We do this to avoid using of floating points.
///
BlockFrequency MachineBlockFrequencyInfo::
-getBlockFreq(MachineBasicBlock *MBB) const {
+getBlockFreq(const MachineBasicBlock *MBB) const {
return MBFI->getBlockFreq(MBB);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
new file mode 100644
index 0000000..22d7212
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -0,0 +1,1001 @@
+//===-- MachineBlockPlacement.cpp - Basic Block Code Layout optimization --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements basic block placement transformations using the CFG
+// structure and branch probability estimates.
+//
+// The pass strives to preserve the structure of the CFG (that is, retain
+// a topological ordering of basic blocks) in the absense of a *strong* signal
+// to the contrary from probabilities. However, within the CFG structure, it
+// attempts to choose an ordering which favors placing more likely sequences of
+// blocks adjacent to each other.
+//
+// The algorithm works from the inner-most loop within a function outward, and
+// at each stage walks through the basic blocks, trying to coalesce them into
+// sequential chains where allowed by the CFG (or demanded by heavy
+// probabilities). Finally, it walks the blocks in topological order, and the
+// first time it reaches a chain of basic blocks, it schedules them in the
+// function in-order.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "block-placement2"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
+#include <algorithm>
+using namespace llvm;
+
+STATISTIC(NumCondBranches, "Number of conditional branches");
+STATISTIC(NumUncondBranches, "Number of uncondittional branches");
+STATISTIC(CondBranchTakenFreq,
+ "Potential frequency of taking conditional branches");
+STATISTIC(UncondBranchTakenFreq,
+ "Potential frequency of taking unconditional branches");
+
+namespace {
+class BlockChain;
+/// \brief Type for our function-wide basic block -> block chain mapping.
+typedef DenseMap<MachineBasicBlock *, BlockChain *> BlockToChainMapType;
+}
+
+namespace {
+/// \brief A chain of blocks which will be laid out contiguously.
+///
+/// This is the datastructure representing a chain of consecutive blocks that
+/// are profitable to layout together in order to maximize fallthrough
+/// probabilities. We also can use a block chain to represent a sequence of
+/// basic blocks which have some external (correctness) requirement for
+/// sequential layout.
+///
+/// Eventually, the block chains will form a directed graph over the function.
+/// We provide an SCC-supporting-iterator in order to quicky build and walk the
+/// SCCs of block chains within a function.
+///
+/// The block chains also have support for calculating and caching probability
+/// information related to the chain itself versus other chains. This is used
+/// for ranking during the final layout of block chains.
+class BlockChain {
+ /// \brief The sequence of blocks belonging to this chain.
+ ///
+ /// This is the sequence of blocks for a particular chain. These will be laid
+ /// out in-order within the function.
+ SmallVector<MachineBasicBlock *, 4> Blocks;
+
+ /// \brief A handle to the function-wide basic block to block chain mapping.
+ ///
+ /// This is retained in each block chain to simplify the computation of child
+ /// block chains for SCC-formation and iteration. We store the edges to child
+ /// basic blocks, and map them back to their associated chains using this
+ /// structure.
+ BlockToChainMapType &BlockToChain;
+
+public:
+ /// \brief Construct a new BlockChain.
+ ///
+ /// This builds a new block chain representing a single basic block in the
+ /// function. It also registers itself as the chain that block participates
+ /// in with the BlockToChain mapping.
+ BlockChain(BlockToChainMapType &BlockToChain, MachineBasicBlock *BB)
+ : Blocks(1, BB), BlockToChain(BlockToChain), LoopPredecessors(0) {
+ assert(BB && "Cannot create a chain with a null basic block");
+ BlockToChain[BB] = this;
+ }
+
+ /// \brief Iterator over blocks within the chain.
+ typedef SmallVectorImpl<MachineBasicBlock *>::const_iterator iterator;
+
+ /// \brief Beginning of blocks within the chain.
+ iterator begin() const { return Blocks.begin(); }
+
+ /// \brief End of blocks within the chain.
+ iterator end() const { return Blocks.end(); }
+
+ /// \brief Merge a block chain into this one.
+ ///
+ /// This routine merges a block chain into this one. It takes care of forming
+ /// a contiguous sequence of basic blocks, updating the edge list, and
+ /// updating the block -> chain mapping. It does not free or tear down the
+ /// old chain, but the old chain's block list is no longer valid.
+ void merge(MachineBasicBlock *BB, BlockChain *Chain) {
+ assert(BB);
+ assert(!Blocks.empty());
+
+ // Fast path in case we don't have a chain already.
+ if (!Chain) {
+ assert(!BlockToChain[BB]);
+ Blocks.push_back(BB);
+ BlockToChain[BB] = this;
+ return;
+ }
+
+ assert(BB == *Chain->begin());
+ assert(Chain->begin() != Chain->end());
+
+ // Update the incoming blocks to point to this chain, and add them to the
+ // chain structure.
+ for (BlockChain::iterator BI = Chain->begin(), BE = Chain->end();
+ BI != BE; ++BI) {
+ Blocks.push_back(*BI);
+ assert(BlockToChain[*BI] == Chain && "Incoming blocks not in chain");
+ BlockToChain[*BI] = this;
+ }
+ }
+
+#ifndef NDEBUG
+ /// \brief Dump the blocks in this chain.
+ void dump() LLVM_ATTRIBUTE_USED {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ (*I)->dump();
+ }
+#endif // NDEBUG
+
+ /// \brief Count of predecessors within the loop currently being processed.
+ ///
+ /// This count is updated at each loop we process to represent the number of
+ /// in-loop predecessors of this chain.
+ unsigned LoopPredecessors;
+};
+}
+
+namespace {
+class MachineBlockPlacement : public MachineFunctionPass {
+ /// \brief A typedef for a block filter set.
+ typedef SmallPtrSet<MachineBasicBlock *, 16> BlockFilterSet;
+
+ /// \brief A handle to the branch probability pass.
+ const MachineBranchProbabilityInfo *MBPI;
+
+ /// \brief A handle to the function-wide block frequency pass.
+ const MachineBlockFrequencyInfo *MBFI;
+
+ /// \brief A handle to the loop info.
+ const MachineLoopInfo *MLI;
+
+ /// \brief A handle to the target's instruction info.
+ const TargetInstrInfo *TII;
+
+ /// \brief A handle to the target's lowering info.
+ const TargetLowering *TLI;
+
+ /// \brief Allocator and owner of BlockChain structures.
+ ///
+ /// We build BlockChains lazily by merging together high probability BB
+ /// sequences acording to the "Algo2" in the paper mentioned at the top of
+ /// the file. To reduce malloc traffic, we allocate them using this slab-like
+ /// allocator, and destroy them after the pass completes.
+ SpecificBumpPtrAllocator<BlockChain> ChainAllocator;
+
+ /// \brief Function wide BasicBlock to BlockChain mapping.
+ ///
+ /// This mapping allows efficiently moving from any given basic block to the
+ /// BlockChain it participates in, if any. We use it to, among other things,
+ /// allow implicitly defining edges between chains as the existing edges
+ /// between basic blocks.
+ DenseMap<MachineBasicBlock *, BlockChain *> BlockToChain;
+
+ void markChainSuccessors(BlockChain &Chain,
+ MachineBasicBlock *LoopHeaderBB,
+ SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
+ const BlockFilterSet *BlockFilter = 0);
+ MachineBasicBlock *selectBestSuccessor(MachineBasicBlock *BB,
+ BlockChain &Chain,
+ const BlockFilterSet *BlockFilter);
+ MachineBasicBlock *selectBestCandidateBlock(
+ BlockChain &Chain, SmallVectorImpl<MachineBasicBlock *> &WorkList,
+ const BlockFilterSet *BlockFilter);
+ MachineBasicBlock *getFirstUnplacedBlock(
+ MachineFunction &F,
+ const BlockChain &PlacedChain,
+ MachineFunction::iterator &PrevUnplacedBlockIt,
+ const BlockFilterSet *BlockFilter);
+ void buildChain(MachineBasicBlock *BB, BlockChain &Chain,
+ SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
+ const BlockFilterSet *BlockFilter = 0);
+ MachineBasicBlock *findBestLoopTop(MachineFunction &F,
+ MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet);
+ void buildLoopChains(MachineFunction &F, MachineLoop &L);
+ void buildCFGChains(MachineFunction &F);
+ void AlignLoops(MachineFunction &F);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ MachineBlockPlacement() : MachineFunctionPass(ID) {
+ initializeMachineBlockPlacementPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addRequired<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+}
+
+char MachineBlockPlacement::ID = 0;
+char &llvm::MachineBlockPlacementID = MachineBlockPlacement::ID;
+INITIALIZE_PASS_BEGIN(MachineBlockPlacement, "block-placement2",
+ "Branch Probability Basic Block Placement", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(MachineBlockPlacement, "block-placement2",
+ "Branch Probability Basic Block Placement", false, false)
+
+#ifndef NDEBUG
+/// \brief Helper to print the name of a MBB.
+///
+/// Only used by debug logging.
+static std::string getBlockName(MachineBasicBlock *BB) {
+ std::string Result;
+ raw_string_ostream OS(Result);
+ OS << "BB#" << BB->getNumber()
+ << " (derived from LLVM BB '" << BB->getName() << "')";
+ OS.flush();
+ return Result;
+}
+
+/// \brief Helper to print the number of a MBB.
+///
+/// Only used by debug logging.
+static std::string getBlockNum(MachineBasicBlock *BB) {
+ std::string Result;
+ raw_string_ostream OS(Result);
+ OS << "BB#" << BB->getNumber();
+ OS.flush();
+ return Result;
+}
+#endif
+
+/// \brief Mark a chain's successors as having one fewer preds.
+///
+/// When a chain is being merged into the "placed" chain, this routine will
+/// quickly walk the successors of each block in the chain and mark them as
+/// having one fewer active predecessor. It also adds any successors of this
+/// chain which reach the zero-predecessor state to the worklist passed in.
+void MachineBlockPlacement::markChainSuccessors(
+ BlockChain &Chain,
+ MachineBasicBlock *LoopHeaderBB,
+ SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
+ const BlockFilterSet *BlockFilter) {
+ // Walk all the blocks in this chain, marking their successors as having
+ // a predecessor placed.
+ for (BlockChain::iterator CBI = Chain.begin(), CBE = Chain.end();
+ CBI != CBE; ++CBI) {
+ // Add any successors for which this is the only un-placed in-loop
+ // predecessor to the worklist as a viable candidate for CFG-neutral
+ // placement. No subsequent placement of this block will violate the CFG
+ // shape, so we get to use heuristics to choose a favorable placement.
+ for (MachineBasicBlock::succ_iterator SI = (*CBI)->succ_begin(),
+ SE = (*CBI)->succ_end();
+ SI != SE; ++SI) {
+ if (BlockFilter && !BlockFilter->count(*SI))
+ continue;
+ BlockChain &SuccChain = *BlockToChain[*SI];
+ // Disregard edges within a fixed chain, or edges to the loop header.
+ if (&Chain == &SuccChain || *SI == LoopHeaderBB)
+ continue;
+
+ // This is a cross-chain edge that is within the loop, so decrement the
+ // loop predecessor count of the destination chain.
+ if (SuccChain.LoopPredecessors > 0 && --SuccChain.LoopPredecessors == 0)
+ BlockWorkList.push_back(*SuccChain.begin());
+ }
+ }
+}
+
+/// \brief Select the best successor for a block.
+///
+/// This looks across all successors of a particular block and attempts to
+/// select the "best" one to be the layout successor. It only considers direct
+/// successors which also pass the block filter. It will attempt to avoid
+/// breaking CFG structure, but cave and break such structures in the case of
+/// very hot successor edges.
+///
+/// \returns The best successor block found, or null if none are viable.
+MachineBasicBlock *MachineBlockPlacement::selectBestSuccessor(
+ MachineBasicBlock *BB, BlockChain &Chain,
+ const BlockFilterSet *BlockFilter) {
+ const BranchProbability HotProb(4, 5); // 80%
+
+ MachineBasicBlock *BestSucc = 0;
+ // FIXME: Due to the performance of the probability and weight routines in
+ // the MBPI analysis, we manually compute probabilities using the edge
+ // weights. This is suboptimal as it means that the somewhat subtle
+ // definition of edge weight semantics is encoded here as well. We should
+ // improve the MBPI interface to effeciently support query patterns such as
+ // this.
+ uint32_t BestWeight = 0;
+ uint32_t WeightScale = 0;
+ uint32_t SumWeight = MBPI->getSumForBlock(BB, WeightScale);
+ DEBUG(dbgs() << "Attempting merge from: " << getBlockName(BB) << "\n");
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end();
+ SI != SE; ++SI) {
+ if (BlockFilter && !BlockFilter->count(*SI))
+ continue;
+ BlockChain &SuccChain = *BlockToChain[*SI];
+ if (&SuccChain == &Chain) {
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> Already merged!\n");
+ continue;
+ }
+ if (*SI != *SuccChain.begin()) {
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> Mid chain!\n");
+ continue;
+ }
+
+ uint32_t SuccWeight = MBPI->getEdgeWeight(BB, *SI);
+ BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight);
+
+ // Only consider successors which are either "hot", or wouldn't violate
+ // any CFG constraints.
+ if (SuccChain.LoopPredecessors != 0) {
+ if (SuccProb < HotProb) {
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> CFG conflict\n");
+ continue;
+ }
+
+ // Make sure that a hot successor doesn't have a globally more important
+ // predecessor.
+ BlockFrequency CandidateEdgeFreq
+ = MBFI->getBlockFreq(BB) * SuccProb * HotProb.getCompl();
+ bool BadCFGConflict = false;
+ for (MachineBasicBlock::pred_iterator PI = (*SI)->pred_begin(),
+ PE = (*SI)->pred_end();
+ PI != PE; ++PI) {
+ if (*PI == *SI || (BlockFilter && !BlockFilter->count(*PI)) ||
+ BlockToChain[*PI] == &Chain)
+ continue;
+ BlockFrequency PredEdgeFreq
+ = MBFI->getBlockFreq(*PI) * MBPI->getEdgeProbability(*PI, *SI);
+ if (PredEdgeFreq >= CandidateEdgeFreq) {
+ BadCFGConflict = true;
+ break;
+ }
+ }
+ if (BadCFGConflict) {
+ DEBUG(dbgs() << " " << getBlockName(*SI)
+ << " -> non-cold CFG conflict\n");
+ continue;
+ }
+ }
+
+ DEBUG(dbgs() << " " << getBlockName(*SI) << " -> " << SuccProb
+ << " (prob)"
+ << (SuccChain.LoopPredecessors != 0 ? " (CFG break)" : "")
+ << "\n");
+ if (BestSucc && BestWeight >= SuccWeight)
+ continue;
+ BestSucc = *SI;
+ BestWeight = SuccWeight;
+ }
+ return BestSucc;
+}
+
+namespace {
+/// \brief Predicate struct to detect blocks already placed.
+class IsBlockPlaced {
+ const BlockChain &PlacedChain;
+ const BlockToChainMapType &BlockToChain;
+
+public:
+ IsBlockPlaced(const BlockChain &PlacedChain,
+ const BlockToChainMapType &BlockToChain)
+ : PlacedChain(PlacedChain), BlockToChain(BlockToChain) {}
+
+ bool operator()(MachineBasicBlock *BB) const {
+ return BlockToChain.lookup(BB) == &PlacedChain;
+ }
+};
+}
+
+/// \brief Select the best block from a worklist.
+///
+/// This looks through the provided worklist as a list of candidate basic
+/// blocks and select the most profitable one to place. The definition of
+/// profitable only really makes sense in the context of a loop. This returns
+/// the most frequently visited block in the worklist, which in the case of
+/// a loop, is the one most desirable to be physically close to the rest of the
+/// loop body in order to improve icache behavior.
+///
+/// \returns The best block found, or null if none are viable.
+MachineBasicBlock *MachineBlockPlacement::selectBestCandidateBlock(
+ BlockChain &Chain, SmallVectorImpl<MachineBasicBlock *> &WorkList,
+ const BlockFilterSet *BlockFilter) {
+ // Once we need to walk the worklist looking for a candidate, cleanup the
+ // worklist of already placed entries.
+ // FIXME: If this shows up on profiles, it could be folded (at the cost of
+ // some code complexity) into the loop below.
+ WorkList.erase(std::remove_if(WorkList.begin(), WorkList.end(),
+ IsBlockPlaced(Chain, BlockToChain)),
+ WorkList.end());
+
+ MachineBasicBlock *BestBlock = 0;
+ BlockFrequency BestFreq;
+ for (SmallVectorImpl<MachineBasicBlock *>::iterator WBI = WorkList.begin(),
+ WBE = WorkList.end();
+ WBI != WBE; ++WBI) {
+ BlockChain &SuccChain = *BlockToChain[*WBI];
+ if (&SuccChain == &Chain) {
+ DEBUG(dbgs() << " " << getBlockName(*WBI)
+ << " -> Already merged!\n");
+ continue;
+ }
+ assert(SuccChain.LoopPredecessors == 0 && "Found CFG-violating block");
+
+ BlockFrequency CandidateFreq = MBFI->getBlockFreq(*WBI);
+ DEBUG(dbgs() << " " << getBlockName(*WBI) << " -> " << CandidateFreq
+ << " (freq)\n");
+ if (BestBlock && BestFreq >= CandidateFreq)
+ continue;
+ BestBlock = *WBI;
+ BestFreq = CandidateFreq;
+ }
+ return BestBlock;
+}
+
+/// \brief Retrieve the first unplaced basic block.
+///
+/// This routine is called when we are unable to use the CFG to walk through
+/// all of the basic blocks and form a chain due to unnatural loops in the CFG.
+/// We walk through the function's blocks in order, starting from the
+/// LastUnplacedBlockIt. We update this iterator on each call to avoid
+/// re-scanning the entire sequence on repeated calls to this routine.
+MachineBasicBlock *MachineBlockPlacement::getFirstUnplacedBlock(
+ MachineFunction &F, const BlockChain &PlacedChain,
+ MachineFunction::iterator &PrevUnplacedBlockIt,
+ const BlockFilterSet *BlockFilter) {
+ for (MachineFunction::iterator I = PrevUnplacedBlockIt, E = F.end(); I != E;
+ ++I) {
+ if (BlockFilter && !BlockFilter->count(I))
+ continue;
+ if (BlockToChain[I] != &PlacedChain) {
+ PrevUnplacedBlockIt = I;
+ // Now select the head of the chain to which the unplaced block belongs
+ // as the block to place. This will force the entire chain to be placed,
+ // and satisfies the requirements of merging chains.
+ return *BlockToChain[I]->begin();
+ }
+ }
+ return 0;
+}
+
+void MachineBlockPlacement::buildChain(
+ MachineBasicBlock *BB,
+ BlockChain &Chain,
+ SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
+ const BlockFilterSet *BlockFilter) {
+ assert(BB);
+ assert(BlockToChain[BB] == &Chain);
+ MachineFunction &F = *BB->getParent();
+ MachineFunction::iterator PrevUnplacedBlockIt = F.begin();
+
+ MachineBasicBlock *LoopHeaderBB = BB;
+ markChainSuccessors(Chain, LoopHeaderBB, BlockWorkList, BlockFilter);
+ BB = *llvm::prior(Chain.end());
+ for (;;) {
+ assert(BB);
+ assert(BlockToChain[BB] == &Chain);
+ assert(*llvm::prior(Chain.end()) == BB);
+ MachineBasicBlock *BestSucc = 0;
+
+ // Look for the best viable successor if there is one to place immediately
+ // after this block.
+ BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
+
+ // If an immediate successor isn't available, look for the best viable
+ // block among those we've identified as not violating the loop's CFG at
+ // this point. This won't be a fallthrough, but it will increase locality.
+ if (!BestSucc)
+ BestSucc = selectBestCandidateBlock(Chain, BlockWorkList, BlockFilter);
+
+ if (!BestSucc) {
+ BestSucc = getFirstUnplacedBlock(F, Chain, PrevUnplacedBlockIt,
+ BlockFilter);
+ if (!BestSucc)
+ break;
+
+ DEBUG(dbgs() << "Unnatural loop CFG detected, forcibly merging the "
+ "layout successor until the CFG reduces\n");
+ }
+
+ // Place this block, updating the datastructures to reflect its placement.
+ BlockChain &SuccChain = *BlockToChain[BestSucc];
+ // Zero out LoopPredecessors for the successor we're about to merge in case
+ // we selected a successor that didn't fit naturally into the CFG.
+ SuccChain.LoopPredecessors = 0;
+ DEBUG(dbgs() << "Merging from " << getBlockNum(BB)
+ << " to " << getBlockNum(BestSucc) << "\n");
+ markChainSuccessors(SuccChain, LoopHeaderBB, BlockWorkList, BlockFilter);
+ Chain.merge(BestSucc, &SuccChain);
+ BB = *llvm::prior(Chain.end());
+ }
+
+ DEBUG(dbgs() << "Finished forming chain for header block "
+ << getBlockNum(*Chain.begin()) << "\n");
+}
+
+/// \brief Find the best loop top block for layout.
+///
+/// This routine implements the logic to analyze the loop looking for the best
+/// block to layout at the top of the loop. Typically this is done to maximize
+/// fallthrough opportunities.
+MachineBasicBlock *
+MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
+ MachineLoop &L,
+ const BlockFilterSet &LoopBlockSet) {
+ // We don't want to layout the loop linearly in all cases. If the loop header
+ // is just a normal basic block in the loop, we want to look for what block
+ // within the loop is the best one to layout at the top. However, if the loop
+ // header has be pre-merged into a chain due to predecessors not having
+ // analyzable branches, *and* the predecessor it is merged with is *not* part
+ // of the loop, rotating the header into the middle of the loop will create
+ // a non-contiguous range of blocks which is Very Bad. So start with the
+ // header and only rotate if safe.
+ BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
+ if (!LoopBlockSet.count(*HeaderChain.begin()))
+ return L.getHeader();
+
+ BlockFrequency BestExitEdgeFreq;
+ MachineBasicBlock *ExitingBB = 0;
+ MachineBasicBlock *LoopingBB = 0;
+ // If there are exits to outer loops, loop rotation can severely limit
+ // fallthrough opportunites unless it selects such an exit. Keep a set of
+ // blocks where rotating to exit with that block will reach an outer loop.
+ SmallPtrSet<MachineBasicBlock *, 4> BlocksExitingToOuterLoop;
+
+ DEBUG(dbgs() << "Finding best loop exit for: "
+ << getBlockName(L.getHeader()) << "\n");
+ for (MachineLoop::block_iterator I = L.block_begin(),
+ E = L.block_end();
+ I != E; ++I) {
+ BlockChain &Chain = *BlockToChain[*I];
+ // Ensure that this block is at the end of a chain; otherwise it could be
+ // mid-way through an inner loop or a successor of an analyzable branch.
+ if (*I != *llvm::prior(Chain.end()))
+ continue;
+
+ // Now walk the successors. We need to establish whether this has a viable
+ // exiting successor and whether it has a viable non-exiting successor.
+ // We store the old exiting state and restore it if a viable looping
+ // successor isn't found.
+ MachineBasicBlock *OldExitingBB = ExitingBB;
+ BlockFrequency OldBestExitEdgeFreq = BestExitEdgeFreq;
+ // We also compute and store the best looping successor for use in layout.
+ MachineBasicBlock *BestLoopSucc = 0;
+ // FIXME: Due to the performance of the probability and weight routines in
+ // the MBPI analysis, we use the internal weights. This is only valid
+ // because it is purely a ranking function, we don't care about anything
+ // but the relative values.
+ uint32_t BestLoopSuccWeight = 0;
+ // FIXME: We also manually compute the probabilities to avoid quadratic
+ // behavior.
+ uint32_t WeightScale = 0;
+ uint32_t SumWeight = MBPI->getSumForBlock(*I, WeightScale);
+ for (MachineBasicBlock::succ_iterator SI = (*I)->succ_begin(),
+ SE = (*I)->succ_end();
+ SI != SE; ++SI) {
+ if ((*SI)->isLandingPad())
+ continue;
+ if (*SI == *I)
+ continue;
+ BlockChain &SuccChain = *BlockToChain[*SI];
+ // Don't split chains, either this chain or the successor's chain.
+ if (&Chain == &SuccChain || *SI != *SuccChain.begin()) {
+ DEBUG(dbgs() << " " << (LoopBlockSet.count(*SI) ? "looping: "
+ : "exiting: ")
+ << getBlockName(*I) << " -> "
+ << getBlockName(*SI) << " (chain conflict)\n");
+ continue;
+ }
+
+ uint32_t SuccWeight = MBPI->getEdgeWeight(*I, *SI);
+ if (LoopBlockSet.count(*SI)) {
+ DEBUG(dbgs() << " looping: " << getBlockName(*I) << " -> "
+ << getBlockName(*SI) << " (" << SuccWeight << ")\n");
+ if (BestLoopSucc && BestLoopSuccWeight >= SuccWeight)
+ continue;
+
+ BestLoopSucc = *SI;
+ BestLoopSuccWeight = SuccWeight;
+ continue;
+ }
+
+ BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight);
+ BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(*I) * SuccProb;
+ DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> "
+ << getBlockName(*SI) << " (" << ExitEdgeFreq << ")\n");
+ // Note that we slightly bias this toward an existing layout successor to
+ // retain incoming order in the absence of better information.
+ // FIXME: Should we bias this more strongly? It's pretty weak.
+ if (!ExitingBB || ExitEdgeFreq > BestExitEdgeFreq ||
+ ((*I)->isLayoutSuccessor(*SI) &&
+ !(ExitEdgeFreq < BestExitEdgeFreq))) {
+ BestExitEdgeFreq = ExitEdgeFreq;
+ ExitingBB = *I;
+ }
+
+ if (MachineLoop *ExitLoop = MLI->getLoopFor(*SI))
+ if (ExitLoop->contains(&L))
+ BlocksExitingToOuterLoop.insert(*I);
+ }
+
+ // Restore the old exiting state, no viable looping successor was found.
+ if (!BestLoopSucc) {
+ ExitingBB = OldExitingBB;
+ BestExitEdgeFreq = OldBestExitEdgeFreq;
+ continue;
+ }
+
+ // If this was best exiting block thus far, also record the looping block.
+ if (ExitingBB == *I)
+ LoopingBB = BestLoopSucc;
+ }
+ // Without a candidate exitting block or with only a single block in the
+ // loop, just use the loop header to layout the loop.
+ if (!ExitingBB || L.getNumBlocks() == 1)
+ return L.getHeader();
+
+ // Also, if we have exit blocks which lead to outer loops but didn't select
+ // one of them as the exiting block we are rotating toward, disable loop
+ // rotation altogether.
+ if (!BlocksExitingToOuterLoop.empty() &&
+ !BlocksExitingToOuterLoop.count(ExitingBB))
+ return L.getHeader();
+
+ assert(LoopingBB && "All successors of a loop block are exit blocks!");
+ DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n");
+ DEBUG(dbgs() << " Best top block: " << getBlockName(LoopingBB) << "\n");
+ return LoopingBB;
+}
+
+/// \brief Forms basic block chains from the natural loop structures.
+///
+/// These chains are designed to preserve the existing *structure* of the code
+/// as much as possible. We can then stitch the chains together in a way which
+/// both preserves the topological structure and minimizes taken conditional
+/// branches.
+void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
+ MachineLoop &L) {
+ // First recurse through any nested loops, building chains for those inner
+ // loops.
+ for (MachineLoop::iterator LI = L.begin(), LE = L.end(); LI != LE; ++LI)
+ buildLoopChains(F, **LI);
+
+ SmallVector<MachineBasicBlock *, 16> BlockWorkList;
+ BlockFilterSet LoopBlockSet(L.block_begin(), L.block_end());
+
+ MachineBasicBlock *LayoutTop = findBestLoopTop(F, L, LoopBlockSet);
+ BlockChain &LoopChain = *BlockToChain[LayoutTop];
+
+ // FIXME: This is a really lame way of walking the chains in the loop: we
+ // walk the blocks, and use a set to prevent visiting a particular chain
+ // twice.
+ SmallPtrSet<BlockChain *, 4> UpdatedPreds;
+ assert(LoopChain.LoopPredecessors == 0);
+ UpdatedPreds.insert(&LoopChain);
+ for (MachineLoop::block_iterator BI = L.block_begin(),
+ BE = L.block_end();
+ BI != BE; ++BI) {
+ BlockChain &Chain = *BlockToChain[*BI];
+ if (!UpdatedPreds.insert(&Chain))
+ continue;
+
+ assert(Chain.LoopPredecessors == 0);
+ for (BlockChain::iterator BCI = Chain.begin(), BCE = Chain.end();
+ BCI != BCE; ++BCI) {
+ assert(BlockToChain[*BCI] == &Chain);
+ for (MachineBasicBlock::pred_iterator PI = (*BCI)->pred_begin(),
+ PE = (*BCI)->pred_end();
+ PI != PE; ++PI) {
+ if (BlockToChain[*PI] == &Chain || !LoopBlockSet.count(*PI))
+ continue;
+ ++Chain.LoopPredecessors;
+ }
+ }
+
+ if (Chain.LoopPredecessors == 0)
+ BlockWorkList.push_back(*Chain.begin());
+ }
+
+ buildChain(LayoutTop, LoopChain, BlockWorkList, &LoopBlockSet);
+
+ DEBUG({
+ // Crash at the end so we get all of the debugging output first.
+ bool BadLoop = false;
+ if (LoopChain.LoopPredecessors) {
+ BadLoop = true;
+ dbgs() << "Loop chain contains a block without its preds placed!\n"
+ << " Loop header: " << getBlockName(*L.block_begin()) << "\n"
+ << " Chain header: " << getBlockName(*LoopChain.begin()) << "\n";
+ }
+ for (BlockChain::iterator BCI = LoopChain.begin(), BCE = LoopChain.end();
+ BCI != BCE; ++BCI)
+ if (!LoopBlockSet.erase(*BCI)) {
+ // We don't mark the loop as bad here because there are real situations
+ // where this can occur. For example, with an unanalyzable fallthrough
+ // from a loop block to a non-loop block or vice versa.
+ dbgs() << "Loop chain contains a block not contained by the loop!\n"
+ << " Loop header: " << getBlockName(*L.block_begin()) << "\n"
+ << " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"
+ << " Bad block: " << getBlockName(*BCI) << "\n";
+ }
+
+ if (!LoopBlockSet.empty()) {
+ BadLoop = true;
+ for (BlockFilterSet::iterator LBI = LoopBlockSet.begin(),
+ LBE = LoopBlockSet.end();
+ LBI != LBE; ++LBI)
+ dbgs() << "Loop contains blocks never placed into a chain!\n"
+ << " Loop header: " << getBlockName(*L.block_begin()) << "\n"
+ << " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"
+ << " Bad block: " << getBlockName(*LBI) << "\n";
+ }
+ assert(!BadLoop && "Detected problems with the placement of this loop.");
+ });
+}
+
+void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
+ // Ensure that every BB in the function has an associated chain to simplify
+ // the assumptions of the remaining algorithm.
+ SmallVector<MachineOperand, 4> Cond; // For AnalyzeBranch.
+ for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
+ MachineBasicBlock *BB = FI;
+ BlockChain *Chain
+ = new (ChainAllocator.Allocate()) BlockChain(BlockToChain, BB);
+ // Also, merge any blocks which we cannot reason about and must preserve
+ // the exact fallthrough behavior for.
+ for (;;) {
+ Cond.clear();
+ MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond) || !FI->canFallThrough())
+ break;
+
+ MachineFunction::iterator NextFI(llvm::next(FI));
+ MachineBasicBlock *NextBB = NextFI;
+ // Ensure that the layout successor is a viable block, as we know that
+ // fallthrough is a possibility.
+ assert(NextFI != FE && "Can't fallthrough past the last block.");
+ DEBUG(dbgs() << "Pre-merging due to unanalyzable fallthrough: "
+ << getBlockName(BB) << " -> " << getBlockName(NextBB)
+ << "\n");
+ Chain->merge(NextBB, 0);
+ FI = NextFI;
+ BB = NextBB;
+ }
+ }
+
+ // Build any loop-based chains.
+ for (MachineLoopInfo::iterator LI = MLI->begin(), LE = MLI->end(); LI != LE;
+ ++LI)
+ buildLoopChains(F, **LI);
+
+ SmallVector<MachineBasicBlock *, 16> BlockWorkList;
+
+ SmallPtrSet<BlockChain *, 4> UpdatedPreds;
+ for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
+ MachineBasicBlock *BB = &*FI;
+ BlockChain &Chain = *BlockToChain[BB];
+ if (!UpdatedPreds.insert(&Chain))
+ continue;
+
+ assert(Chain.LoopPredecessors == 0);
+ for (BlockChain::iterator BCI = Chain.begin(), BCE = Chain.end();
+ BCI != BCE; ++BCI) {
+ assert(BlockToChain[*BCI] == &Chain);
+ for (MachineBasicBlock::pred_iterator PI = (*BCI)->pred_begin(),
+ PE = (*BCI)->pred_end();
+ PI != PE; ++PI) {
+ if (BlockToChain[*PI] == &Chain)
+ continue;
+ ++Chain.LoopPredecessors;
+ }
+ }
+
+ if (Chain.LoopPredecessors == 0)
+ BlockWorkList.push_back(*Chain.begin());
+ }
+
+ BlockChain &FunctionChain = *BlockToChain[&F.front()];
+ buildChain(&F.front(), FunctionChain, BlockWorkList);
+
+ typedef SmallPtrSet<MachineBasicBlock *, 16> FunctionBlockSetType;
+ DEBUG({
+ // Crash at the end so we get all of the debugging output first.
+ bool BadFunc = false;
+ FunctionBlockSetType FunctionBlockSet;
+ for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
+ FunctionBlockSet.insert(FI);
+
+ for (BlockChain::iterator BCI = FunctionChain.begin(),
+ BCE = FunctionChain.end();
+ BCI != BCE; ++BCI)
+ if (!FunctionBlockSet.erase(*BCI)) {
+ BadFunc = true;
+ dbgs() << "Function chain contains a block not in the function!\n"
+ << " Bad block: " << getBlockName(*BCI) << "\n";
+ }
+
+ if (!FunctionBlockSet.empty()) {
+ BadFunc = true;
+ for (FunctionBlockSetType::iterator FBI = FunctionBlockSet.begin(),
+ FBE = FunctionBlockSet.end();
+ FBI != FBE; ++FBI)
+ dbgs() << "Function contains blocks never placed into a chain!\n"
+ << " Bad block: " << getBlockName(*FBI) << "\n";
+ }
+ assert(!BadFunc && "Detected problems with the block placement.");
+ });
+
+ // Splice the blocks into place.
+ MachineFunction::iterator InsertPos = F.begin();
+ for (BlockChain::iterator BI = FunctionChain.begin(),
+ BE = FunctionChain.end();
+ BI != BE; ++BI) {
+ DEBUG(dbgs() << (BI == FunctionChain.begin() ? "Placing chain "
+ : " ... ")
+ << getBlockName(*BI) << "\n");
+ if (InsertPos != MachineFunction::iterator(*BI))
+ F.splice(InsertPos, *BI);
+ else
+ ++InsertPos;
+
+ // Update the terminator of the previous block.
+ if (BI == FunctionChain.begin())
+ continue;
+ MachineBasicBlock *PrevBB = llvm::prior(MachineFunction::iterator(*BI));
+
+ // FIXME: It would be awesome of updateTerminator would just return rather
+ // than assert when the branch cannot be analyzed in order to remove this
+ // boiler plate.
+ Cond.clear();
+ MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ if (!TII->AnalyzeBranch(*PrevBB, TBB, FBB, Cond))
+ PrevBB->updateTerminator();
+ }
+
+ // Fixup the last block.
+ Cond.clear();
+ MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
+ if (!TII->AnalyzeBranch(F.back(), TBB, FBB, Cond))
+ F.back().updateTerminator();
+}
+
+/// \brief Recursive helper to align a loop and any nested loops.
+static void AlignLoop(MachineFunction &F, MachineLoop *L, unsigned Align) {
+ // Recurse through nested loops.
+ for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ AlignLoop(F, *I, Align);
+
+ L->getTopBlock()->setAlignment(Align);
+}
+
+/// \brief Align loop headers to target preferred alignments.
+void MachineBlockPlacement::AlignLoops(MachineFunction &F) {
+ if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ return;
+
+ unsigned Align = TLI->getPrefLoopAlignment();
+ if (!Align)
+ return; // Don't care about loop alignment.
+
+ for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I)
+ AlignLoop(F, *I, Align);
+}
+
+bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
+ // Check for single-block functions and skip them.
+ if (llvm::next(F.begin()) == F.end())
+ return false;
+
+ MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
+ MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+ MLI = &getAnalysis<MachineLoopInfo>();
+ TII = F.getTarget().getInstrInfo();
+ TLI = F.getTarget().getTargetLowering();
+ assert(BlockToChain.empty());
+
+ buildCFGChains(F);
+ AlignLoops(F);
+
+ BlockToChain.clear();
+ ChainAllocator.DestroyAll();
+
+ // We always return true as we have no way to track whether the final order
+ // differs from the original order.
+ return true;
+}
+
+namespace {
+/// \brief A pass to compute block placement statistics.
+///
+/// A separate pass to compute interesting statistics for evaluating block
+/// placement. This is separate from the actual placement pass so that they can
+/// be computed in the absense of any placement transformations or when using
+/// alternative placement strategies.
+class MachineBlockPlacementStats : public MachineFunctionPass {
+ /// \brief A handle to the branch probability pass.
+ const MachineBranchProbabilityInfo *MBPI;
+
+ /// \brief A handle to the function-wide block frequency pass.
+ const MachineBlockFrequencyInfo *MBFI;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ MachineBlockPlacementStats() : MachineFunctionPass(ID) {
+ initializeMachineBlockPlacementStatsPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineBranchProbabilityInfo>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+}
+
+char MachineBlockPlacementStats::ID = 0;
+char &llvm::MachineBlockPlacementStatsID = MachineBlockPlacementStats::ID;
+INITIALIZE_PASS_BEGIN(MachineBlockPlacementStats, "block-placement-stats",
+ "Basic Block Placement Stats", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineBlockFrequencyInfo)
+INITIALIZE_PASS_END(MachineBlockPlacementStats, "block-placement-stats",
+ "Basic Block Placement Stats", false, false)
+
+bool MachineBlockPlacementStats::runOnMachineFunction(MachineFunction &F) {
+ // Check for single-block functions and skip them.
+ if (llvm::next(F.begin()) == F.end())
+ return false;
+
+ MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
+ MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+
+ for (MachineFunction::iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ BlockFrequency BlockFreq = MBFI->getBlockFreq(I);
+ Statistic &NumBranches = (I->succ_size() > 1) ? NumCondBranches
+ : NumUncondBranches;
+ Statistic &BranchTakenFreq = (I->succ_size() > 1) ? CondBranchTakenFreq
+ : UncondBranchTakenFreq;
+ for (MachineBasicBlock::succ_iterator SI = I->succ_begin(),
+ SE = I->succ_end();
+ SI != SE; ++SI) {
+ // Skip if this successor is a fallthrough.
+ if (I->isLayoutSuccessor(*SI))
+ continue;
+
+ BlockFrequency EdgeFreq = BlockFreq * MBPI->getEdgeProbability(I, *SI);
+ ++NumBranches;
+ BranchTakenFreq += EdgeFreq.getFrequency();
+ }
+ }
+
+ return false;
+}
+
diff --git a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
index c13fa6b..0cc1af0 100644
--- a/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
@@ -26,26 +26,43 @@ INITIALIZE_PASS_END(MachineBranchProbabilityInfo, "machine-branch-prob",
char MachineBranchProbabilityInfo::ID = 0;
-uint32_t MachineBranchProbabilityInfo::
-getSumForBlock(MachineBasicBlock *MBB) const {
- uint32_t Sum = 0;
+void MachineBranchProbabilityInfo::anchor() { }
+uint32_t MachineBranchProbabilityInfo::
+getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const {
+ // First we compute the sum with 64-bits of precision, ensuring that cannot
+ // overflow by bounding the number of weights considered. Hopefully no one
+ // actually needs 2^32 successors.
+ assert(MBB->succ_size() < UINT32_MAX);
+ uint64_t Sum = 0;
+ Scale = 1;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- MachineBasicBlock *Succ = *I;
- uint32_t Weight = getEdgeWeight(MBB, Succ);
- uint32_t PrevSum = Sum;
-
+ uint32_t Weight = getEdgeWeight(MBB, *I);
Sum += Weight;
- assert(Sum > PrevSum); (void) PrevSum;
}
+ // If the computed sum fits in 32-bits, we're done.
+ if (Sum <= UINT32_MAX)
+ return Sum;
+
+ // Otherwise, compute the scale necessary to cause the weights to fit, and
+ // re-sum with that scale applied.
+ assert((Sum / UINT32_MAX) < UINT32_MAX);
+ Scale = (Sum / UINT32_MAX) + 1;
+ Sum = 0;
+ for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end(); I != E; ++I) {
+ uint32_t Weight = getEdgeWeight(MBB, *I);
+ Sum += Weight / Scale;
+ }
+ assert(Sum <= UINT32_MAX);
return Sum;
}
uint32_t
-MachineBranchProbabilityInfo::getEdgeWeight(MachineBasicBlock *Src,
- MachineBasicBlock *Dst) const {
+MachineBranchProbabilityInfo::getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
uint32_t Weight = Src->getSuccWeight(Dst);
if (!Weight)
return DEFAULT_WEIGHT;
@@ -55,37 +72,24 @@ MachineBranchProbabilityInfo::getEdgeWeight(MachineBasicBlock *Src,
bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src,
MachineBasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
- uint32_t Weight = getEdgeWeight(Src, Dst);
- uint32_t Sum = getSumForBlock(Src);
-
- // FIXME: Implement BranchProbability::compare then change this code to
- // compare this BranchProbability against a static "hot" BranchProbability.
- return (uint64_t)Weight * 5 > (uint64_t)Sum * 4;
+ // FIXME: Compare against a static "hot" BranchProbability.
+ return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
}
MachineBasicBlock *
MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
- uint32_t Sum = 0;
uint32_t MaxWeight = 0;
MachineBasicBlock *MaxSucc = 0;
-
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- MachineBasicBlock *Succ = *I;
- uint32_t Weight = getEdgeWeight(MBB, Succ);
- uint32_t PrevSum = Sum;
-
- Sum += Weight;
- assert(Sum > PrevSum); (void) PrevSum;
-
+ uint32_t Weight = getEdgeWeight(MBB, *I);
if (Weight > MaxWeight) {
MaxWeight = Weight;
- MaxSucc = Succ;
+ MaxSucc = *I;
}
}
- // FIXME: Use BranchProbability::compare.
- if ((uint64_t)MaxWeight * 5 >= (uint64_t)Sum * 4)
+ if (getEdgeProbability(MBB, MaxSucc) >= BranchProbability(4, 5))
return MaxSucc;
return 0;
@@ -94,8 +98,9 @@ MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const {
BranchProbability
MachineBranchProbabilityInfo::getEdgeProbability(MachineBasicBlock *Src,
MachineBasicBlock *Dst) const {
- uint32_t N = getEdgeWeight(Src, Dst);
- uint32_t D = getSumForBlock(Src);
+ uint32_t Scale = 1;
+ uint32_t D = getSumForBlock(Src, Scale);
+ uint32_t N = getEdgeWeight(Src, Dst) / Scale;
return BranchProbability(N, D);
}
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index 7eda8c1..a63688e 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -26,13 +26,14 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
-
using namespace llvm;
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
STATISTIC(NumPhysCSEs,
"Number of physreg referencing common subexpr eliminated");
+STATISTIC(NumCrossBBCSEs,
+ "Number of cross-MBB physreg referencing CS eliminated");
STATISTIC(NumCommutes, "Number of copies coalesced after commuting");
namespace {
@@ -49,7 +50,7 @@ namespace {
}
virtual bool runOnMachineFunction(MachineFunction &MF);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -62,6 +63,8 @@ namespace {
virtual void releaseMemory() {
ScopeMap.clear();
Exps.clear();
+ AllocatableRegs.clear();
+ ReservedRegs.clear();
}
private:
@@ -75,6 +78,8 @@ namespace {
ScopedHTType VNT;
SmallVector<MachineInstr*, 64> Exps;
unsigned CurrVN;
+ BitVector AllocatableRegs;
+ BitVector ReservedRegs;
bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
@@ -82,9 +87,12 @@ namespace {
MachineBasicBlock::const_iterator E) const ;
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
- SmallSet<unsigned,8> &PhysRefs) const;
+ SmallSet<unsigned,8> &PhysRefs,
+ SmallVector<unsigned,2> &PhysDefs) const;
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
- SmallSet<unsigned,8> &PhysRefs) const;
+ SmallSet<unsigned,8> &PhysRefs,
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &NonLocal) const;
bool isCSECandidate(MachineInstr *MI);
bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
MachineInstr *CSMI, MachineInstr *MI);
@@ -99,6 +107,7 @@ namespace {
} // end anonymous namespace
char MachineCSE::ID = 0;
+char &llvm::MachineCSEID = MachineCSE::ID;
INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
"Machine Common Subexpression Elimination", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
@@ -106,8 +115,6 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineCSE, "machine-cse",
"Machine Common Subexpression Elimination", false, false)
-FunctionPass *llvm::createMachineCSEPass() { return new MachineCSE(); }
-
bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB) {
bool Changed = false;
@@ -163,6 +170,8 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
bool SeenDef = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I->getOperand(i);
+ if (MO.isRegMask() && MO.clobbersPhysReg(Reg))
+ SeenDef = true;
if (!MO.isReg() || !MO.getReg())
continue;
if (!TRI->regsOverlap(MO.getReg(), Reg))
@@ -173,7 +182,7 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
SeenDef = true;
}
if (SeenDef)
- // See a def of Reg (or an alias) before encountering any use, it's
+ // See a def of Reg (or an alias) before encountering any use, it's
// trivially dead.
return true;
@@ -189,7 +198,8 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
/// instruction does not uses a physical register.
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
- SmallSet<unsigned,8> &PhysRefs) const {
+ SmallSet<unsigned,8> &PhysRefs,
+ SmallVector<unsigned,2> &PhysDefs) const{
MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
@@ -207,7 +217,9 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
(MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
continue;
PhysRefs.insert(Reg);
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
+ if (MO.isDef())
+ PhysDefs.push_back(Reg);
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
PhysRefs.insert(*Alias);
}
@@ -215,25 +227,56 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
}
bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
- SmallSet<unsigned,8> &PhysRefs) const {
+ SmallSet<unsigned,8> &PhysRefs,
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &NonLocal) const {
// For now conservatively returns false if the common subexpression is
- // not in the same basic block as the given instruction.
- MachineBasicBlock *MBB = MI->getParent();
- if (CSMI->getParent() != MBB)
- return false;
+ // not in the same basic block as the given instruction. The only exception
+ // is if the common subexpression is in the sole predecessor block.
+ const MachineBasicBlock *MBB = MI->getParent();
+ const MachineBasicBlock *CSMBB = CSMI->getParent();
+
+ bool CrossMBB = false;
+ if (CSMBB != MBB) {
+ if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB)
+ return false;
+
+ for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
+ if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i]))
+ // Avoid extending live range of physical registers if they are
+ //allocatable or reserved.
+ return false;
+ }
+ CrossMBB = true;
+ }
MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
MachineBasicBlock::const_iterator E = MI;
+ MachineBasicBlock::const_iterator EE = CSMBB->end();
unsigned LookAheadLeft = LookAheadLimit;
while (LookAheadLeft) {
// Skip over dbg_value's.
- while (I != E && I->isDebugValue())
+ while (I != E && I != EE && I->isDebugValue())
++I;
+ if (I == EE) {
+ assert(CrossMBB && "Reaching end-of-MBB without finding MI?");
+ (void)CrossMBB;
+ CrossMBB = false;
+ NonLocal = true;
+ I = MBB->begin();
+ EE = MBB->end();
+ continue;
+ }
+
if (I == E)
return true;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I->getOperand(i);
+ // RegMasks go on instructions like calls that clobber lots of physregs.
+ // Don't attempt to CSE across such an instruction.
+ if (MO.isRegMask())
+ return false;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
@@ -260,12 +303,11 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) {
return false;
// Ignore stuff that we obviously can't move.
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.mayStore() || MCID.isCall() || MCID.isTerminator() ||
+ if (MI->mayStore() || MI->isCall() || MI->isTerminator() ||
MI->hasUnmodeledSideEffects())
return false;
- if (MCID.mayLoad()) {
+ if (MI->mayLoad()) {
// Okay, this instruction does a load. As a refinement, we allow the target
// to decide whether the loaded value is actually a constant. If so, we can
// actually use it as a load.
@@ -287,7 +329,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
// Heuristics #1: Don't CSE "cheap" computation if the def is not local or in
// an immediate predecessor. We don't want to increase register pressure and
// end up causing other computation to be spilled.
- if (MI->getDesc().isAsCheapAsAMove()) {
+ if (MI->isAsCheapAsAMove()) {
MachineBasicBlock *CSBB = CSMI->getParent();
MachineBasicBlock *BB = MI->getParent();
if (CSBB != BB && !CSBB->isSuccessor(BB))
@@ -376,7 +418,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// Commute commutable instructions.
bool Commuted = false;
- if (!FoundCSE && MI->getDesc().isCommutable()) {
+ if (!FoundCSE && MI->isCommutable()) {
MachineInstr *NewMI = TII->commuteInstruction(MI);
if (NewMI) {
Commuted = true;
@@ -394,16 +436,18 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// If the instruction defines physical registers and the values *may* be
// used, then it's not safe to replace it with a common subexpression.
// It's also not safe if the instruction uses physical registers.
+ bool CrossMBBPhysDef = false;
SmallSet<unsigned,8> PhysRefs;
- if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs)) {
+ SmallVector<unsigned, 2> PhysDefs;
+ if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) {
FoundCSE = false;
- // ... Unless the CS is local and it also defines the physical register
- // which is not clobbered in between and the physical register uses
- // were not clobbered.
+ // ... Unless the CS is local or is in the sole predecessor block
+ // and it also defines the physical register which is not clobbered
+ // in between and the physical register uses were not clobbered.
unsigned CSVN = VNT.lookup(MI);
MachineInstr *CSMI = Exps[CSVN];
- if (PhysRegDefsReach(CSMI, MI, PhysRefs))
+ if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
FoundCSE = true;
}
@@ -458,6 +502,18 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second);
MRI->clearKillFlags(CSEPairs[i].second);
}
+
+ if (CrossMBBPhysDef) {
+ // Add physical register defs now coming in from a predecessor to MBB
+ // livein list.
+ while (!PhysDefs.empty()) {
+ unsigned LiveIn = PhysDefs.pop_back_val();
+ if (!MBB->isLiveIn(LiveIn))
+ MBB->addLiveIn(LiveIn);
+ }
+ ++NumCrossBBCSEs;
+ }
+
MI->eraseFromParent();
++NumCSEs;
if (!PhysRefs.empty())
@@ -542,5 +598,7 @@ bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<MachineDominatorTree>();
+ AllocatableRegs = TRI->getAllocatableSet(MF);
+ ReservedRegs = TRI->getReservedRegs(MF);
return PerformCSE(DT->getRootNode());
}
diff --git a/contrib/llvm/lib/CodeGen/MachineCodeEmitter.cpp b/contrib/llvm/lib/CodeGen/MachineCodeEmitter.cpp
new file mode 100644
index 0000000..81b4978
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineCodeEmitter.cpp
@@ -0,0 +1,14 @@
+//===-- llvm/CodeGen/MachineCodeEmitter.cpp - Code emission -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineCodeEmitter.h"
+
+using namespace llvm;
+
+void MachineCodeEmitter::anchor() { }
diff --git a/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
new file mode 100644
index 0000000..9730eaa
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -0,0 +1,340 @@
+//===- MachineCopyPropagation.cpp - Machine Copy Propagation Pass ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is an extremely simple MachineInstr-level copy propagation pass.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "codegen-cp"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumDeletes, "Number of dead copies deleted");
+
+namespace {
+ class MachineCopyPropagation : public MachineFunctionPass {
+ const TargetRegisterInfo *TRI;
+ BitVector ReservedRegs;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ MachineCopyPropagation() : MachineFunctionPass(ID) {
+ initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ private:
+ typedef SmallVector<unsigned, 4> DestList;
+ typedef DenseMap<unsigned, DestList> SourceMap;
+
+ void SourceNoLongerAvailable(unsigned Reg,
+ SourceMap &SrcMap,
+ DenseMap<unsigned, MachineInstr*> &AvailCopyMap);
+ bool CopyPropagateBlock(MachineBasicBlock &MBB);
+ };
+}
+char MachineCopyPropagation::ID = 0;
+char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
+
+INITIALIZE_PASS(MachineCopyPropagation, "machine-cp",
+ "Machine Copy Propagation Pass", false, false)
+
+void
+MachineCopyPropagation::SourceNoLongerAvailable(unsigned Reg,
+ SourceMap &SrcMap,
+ DenseMap<unsigned, MachineInstr*> &AvailCopyMap) {
+ SourceMap::iterator SI = SrcMap.find(Reg);
+ if (SI != SrcMap.end()) {
+ const DestList& Defs = SI->second;
+ for (DestList::const_iterator I = Defs.begin(), E = Defs.end();
+ I != E; ++I) {
+ unsigned MappedDef = *I;
+ // Source of copy is no longer available for propagation.
+ if (AvailCopyMap.erase(MappedDef)) {
+ for (const uint16_t *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
+ AvailCopyMap.erase(*SR);
+ }
+ }
+ }
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ SI = SrcMap.find(*AS);
+ if (SI != SrcMap.end()) {
+ const DestList& Defs = SI->second;
+ for (DestList::const_iterator I = Defs.begin(), E = Defs.end();
+ I != E; ++I) {
+ unsigned MappedDef = *I;
+ if (AvailCopyMap.erase(MappedDef)) {
+ for (const uint16_t *SR = TRI->getSubRegisters(MappedDef); *SR; ++SR)
+ AvailCopyMap.erase(*SR);
+ }
+ }
+ }
+ }
+}
+
+static bool NoInterveningSideEffect(const MachineInstr *CopyMI,
+ const MachineInstr *MI) {
+ const MachineBasicBlock *MBB = CopyMI->getParent();
+ if (MI->getParent() != MBB)
+ return false;
+ MachineBasicBlock::const_iterator I = CopyMI;
+ MachineBasicBlock::const_iterator E = MBB->end();
+ MachineBasicBlock::const_iterator E2 = MI;
+
+ ++I;
+ while (I != E && I != E2) {
+ if (I->hasUnmodeledSideEffects() || I->isCall() ||
+ I->isTerminator())
+ return false;
+ ++I;
+ }
+ return true;
+}
+
+/// isNopCopy - Return true if the specified copy is really a nop. That is
+/// if the source of the copy is the same of the definition of the copy that
+/// supplied the source. If the source of the copy is a sub-register than it
+/// must check the sub-indices match. e.g.
+/// ecx = mov eax
+/// al = mov cl
+/// But not
+/// ecx = mov eax
+/// al = mov ch
+static bool isNopCopy(MachineInstr *CopyMI, unsigned Def, unsigned Src,
+ const TargetRegisterInfo *TRI) {
+ unsigned SrcSrc = CopyMI->getOperand(1).getReg();
+ if (Def == SrcSrc)
+ return true;
+ if (TRI->isSubRegister(SrcSrc, Def)) {
+ unsigned SrcDef = CopyMI->getOperand(0).getReg();
+ unsigned SubIdx = TRI->getSubRegIndex(SrcSrc, Def);
+ if (!SubIdx)
+ return false;
+ return SubIdx == TRI->getSubRegIndex(SrcDef, Src);
+ }
+
+ return false;
+}
+
+bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
+ SmallSetVector<MachineInstr*, 8> MaybeDeadCopies; // Candidates for deletion
+ DenseMap<unsigned, MachineInstr*> AvailCopyMap; // Def -> available copies map
+ DenseMap<unsigned, MachineInstr*> CopyMap; // Def -> copies map
+ SourceMap SrcMap; // Src -> Def map
+
+ bool Changed = false;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E; ) {
+ MachineInstr *MI = &*I;
+ ++I;
+
+ if (MI->isCopy()) {
+ unsigned Def = MI->getOperand(0).getReg();
+ unsigned Src = MI->getOperand(1).getReg();
+
+ if (TargetRegisterInfo::isVirtualRegister(Def) ||
+ TargetRegisterInfo::isVirtualRegister(Src))
+ report_fatal_error("MachineCopyPropagation should be run after"
+ " register allocation!");
+
+ DenseMap<unsigned, MachineInstr*>::iterator CI = AvailCopyMap.find(Src);
+ if (CI != AvailCopyMap.end()) {
+ MachineInstr *CopyMI = CI->second;
+ if (!ReservedRegs.test(Def) &&
+ (!ReservedRegs.test(Src) || NoInterveningSideEffect(CopyMI, MI)) &&
+ isNopCopy(CopyMI, Def, Src, TRI)) {
+ // The two copies cancel out and the source of the first copy
+ // hasn't been overridden, eliminate the second one. e.g.
+ // %ECX<def> = COPY %EAX<kill>
+ // ... nothing clobbered EAX.
+ // %EAX<def> = COPY %ECX
+ // =>
+ // %ECX<def> = COPY %EAX
+ //
+ // Also avoid eliminating a copy from reserved registers unless the
+ // definition is proven not clobbered. e.g.
+ // %RSP<def> = COPY %RAX
+ // CALL
+ // %RAX<def> = COPY %RSP
+
+ // Clear any kills of Def between CopyMI and MI. This extends the
+ // live range.
+ for (MachineBasicBlock::iterator I = CopyMI, E = MI; I != E; ++I)
+ I->clearRegisterKills(Def, TRI);
+
+ MI->eraseFromParent();
+ Changed = true;
+ ++NumDeletes;
+ continue;
+ }
+ }
+
+ // If Src is defined by a previous copy, it cannot be eliminated.
+ CI = CopyMap.find(Src);
+ if (CI != CopyMap.end())
+ MaybeDeadCopies.remove(CI->second);
+ for (const uint16_t *AS = TRI->getAliasSet(Src); *AS; ++AS) {
+ CI = CopyMap.find(*AS);
+ if (CI != CopyMap.end())
+ MaybeDeadCopies.remove(CI->second);
+ }
+
+ // Copy is now a candidate for deletion.
+ MaybeDeadCopies.insert(MI);
+
+ // If 'Src' is previously source of another copy, then this earlier copy's
+ // source is no longer available. e.g.
+ // %xmm9<def> = copy %xmm2
+ // ...
+ // %xmm2<def> = copy %xmm0
+ // ...
+ // %xmm2<def> = copy %xmm9
+ SourceNoLongerAvailable(Def, SrcMap, AvailCopyMap);
+
+ // Remember Def is defined by the copy.
+ // ... Make sure to clear the def maps of aliases first.
+ for (const uint16_t *AS = TRI->getAliasSet(Def); *AS; ++AS) {
+ CopyMap.erase(*AS);
+ AvailCopyMap.erase(*AS);
+ }
+ CopyMap[Def] = MI;
+ AvailCopyMap[Def] = MI;
+ for (const uint16_t *SR = TRI->getSubRegisters(Def); *SR; ++SR) {
+ CopyMap[*SR] = MI;
+ AvailCopyMap[*SR] = MI;
+ }
+
+ // Remember source that's copied to Def. Once it's clobbered, then
+ // it's no longer available for copy propagation.
+ if (std::find(SrcMap[Src].begin(), SrcMap[Src].end(), Def) ==
+ SrcMap[Src].end()) {
+ SrcMap[Src].push_back(Def);
+ }
+
+ continue;
+ }
+
+ // Not a copy.
+ SmallVector<unsigned, 2> Defs;
+ int RegMaskOpNum = -1;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ RegMaskOpNum = i;
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ report_fatal_error("MachineCopyPropagation should be run after"
+ " register allocation!");
+
+ if (MO.isDef()) {
+ Defs.push_back(Reg);
+ continue;
+ }
+
+ // If 'Reg' is defined by a copy, the copy is no longer a candidate
+ // for elimination.
+ DenseMap<unsigned, MachineInstr*>::iterator CI = CopyMap.find(Reg);
+ if (CI != CopyMap.end())
+ MaybeDeadCopies.remove(CI->second);
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ CI = CopyMap.find(*AS);
+ if (CI != CopyMap.end())
+ MaybeDeadCopies.remove(CI->second);
+ }
+ }
+
+ // The instruction has a register mask operand which means that it clobbers
+ // a large set of registers. It is possible to use the register mask to
+ // prune the available copies, but treat it like a basic block boundary for
+ // now.
+ if (RegMaskOpNum >= 0) {
+ // Erase any MaybeDeadCopies whose destination register is clobbered.
+ const MachineOperand &MaskMO = MI->getOperand(RegMaskOpNum);
+ for (SmallSetVector<MachineInstr*, 8>::iterator
+ DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
+ DI != DE; ++DI) {
+ unsigned Reg = (*DI)->getOperand(0).getReg();
+ if (ReservedRegs.test(Reg) || !MaskMO.clobbersPhysReg(Reg))
+ continue;
+ (*DI)->eraseFromParent();
+ Changed = true;
+ ++NumDeletes;
+ }
+
+ // Clear all data structures as if we were beginning a new basic block.
+ MaybeDeadCopies.clear();
+ AvailCopyMap.clear();
+ CopyMap.clear();
+ SrcMap.clear();
+ continue;
+ }
+
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Reg = Defs[i];
+
+ // No longer defined by a copy.
+ CopyMap.erase(Reg);
+ AvailCopyMap.erase(Reg);
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ CopyMap.erase(*AS);
+ AvailCopyMap.erase(*AS);
+ }
+
+ // If 'Reg' is previously source of a copy, it is no longer available for
+ // copy propagation.
+ SourceNoLongerAvailable(Reg, SrcMap, AvailCopyMap);
+ }
+ }
+
+ // If MBB doesn't have successors, delete the copies whose defs are not used.
+ // If MBB does have successors, then conservative assume the defs are live-out
+ // since we don't want to trust live-in lists.
+ if (MBB.succ_empty()) {
+ for (SmallSetVector<MachineInstr*, 8>::iterator
+ DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end();
+ DI != DE; ++DI) {
+ if (!ReservedRegs.test((*DI)->getOperand(0).getReg())) {
+ (*DI)->eraseFromParent();
+ Changed = true;
+ ++NumDeletes;
+ }
+ }
+ }
+
+ return Changed;
+}
+
+bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+
+ TRI = MF.getTarget().getRegisterInfo();
+ ReservedRegs = TRI->getReservedRegs(MF);
+
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ Changed |= CopyPropagateBlock(*I);
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index 20066a0..d8c2f6a 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -13,12 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Function.h"
-#include "llvm/Instructions.h"
-#include "llvm/Config/config.h"
#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -28,6 +25,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
@@ -197,9 +195,10 @@ MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
MachineMemOperand *
MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
uint64_t s, unsigned base_alignment,
- const MDNode *TBAAInfo) {
+ const MDNode *TBAAInfo,
+ const MDNode *Ranges) {
return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
- TBAAInfo);
+ TBAAInfo, Ranges);
}
MachineMemOperand *
@@ -286,7 +285,13 @@ void MachineFunction::dump() const {
}
void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
- OS << "# Machine code for function " << Fn->getName() << ":\n";
+ OS << "# Machine code for function " << Fn->getName() << ": ";
+ if (RegInfo) {
+ OS << (RegInfo->isSSA() ? "SSA" : "Post SSA");
+ if (!RegInfo->tracksLiveness())
+ OS << ", not tracking liveness";
+ }
+ OS << '\n';
// Print Frame Information
FrameInfo->print(*this, OS);
@@ -335,7 +340,7 @@ namespace llvm {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const MachineFunction *F) {
- return "CFG for '" + F->getFunction()->getNameStr() + "' function";
+ return "CFG for '" + F->getFunction()->getName().str() + "' function";
}
std::string getNodeLabel(const MachineBasicBlock *Node,
@@ -368,7 +373,7 @@ namespace llvm {
void MachineFunction::viewCFG() const
{
#ifndef NDEBUG
- ViewGraph(this, "mf" + getFunction()->getNameStr());
+ ViewGraph(this, "mf" + getFunction()->getName());
#else
errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
@@ -378,7 +383,7 @@ void MachineFunction::viewCFG() const
void MachineFunction::viewCFGOnly() const
{
#ifndef NDEBUG
- ViewGraph(this, "mf" + getFunction()->getNameStr(), true);
+ ViewGraph(this, "mf" + getFunction()->getName(), true);
#else
errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
@@ -464,7 +469,7 @@ MachineFrameInfo::getPristineRegs(const MachineBasicBlock *MBB) const {
if (!isCalleeSavedInfoValid())
return BV;
- for (const unsigned *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
+ for (const uint16_t *CSR = TRI->getCalleeSavedRegs(MF); CSR && *CSR; ++CSR)
BV.set(*CSR);
// The entry MBB always has all CSRs pristine.
@@ -532,6 +537,8 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerSize();
+ case MachineJumpTableInfo::EK_GPRel64BlockAddress:
+ return 8;
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
@@ -539,8 +546,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
case MachineJumpTableInfo::EK_Inline:
return 0;
}
- assert(0 && "Unknown jump table encoding!");
- return ~0;
+ llvm_unreachable("Unknown jump table encoding!");
}
/// getEntryAlignment - Return the alignment of each entry in the jump table.
@@ -551,6 +557,8 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const {
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
return TD.getPointerABIAlignment();
+ case MachineJumpTableInfo::EK_GPRel64BlockAddress:
+ return TD.getABIIntegerTypeAlignment(64);
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
@@ -558,8 +566,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const {
case MachineJumpTableInfo::EK_Inline:
return 1;
}
- assert(0 && "Unknown jump table encoding!");
- return ~0;
+ llvm_unreachable("Unknown jump table encoding!");
}
/// createJumpTableIndex - Create a new jump table entry in the jump table info.
@@ -619,6 +626,8 @@ void MachineJumpTableInfo::dump() const { print(dbgs()); }
// MachineConstantPool implementation
//===----------------------------------------------------------------------===//
+void MachineConstantPoolValue::anchor() { }
+
Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
@@ -653,35 +662,37 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
// reject them.
if (A->getType() == B->getType()) return false;
+ // We can't handle structs or arrays.
+ if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
+ isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
+ return false;
+
// For now, only support constants with the same size.
- if (TD->getTypeStoreSize(A->getType()) != TD->getTypeStoreSize(B->getType()))
+ uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
+ if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
+ StoreSize > 128)
return false;
- // If a floating-point value and an integer value have the same encoding,
- // they can share a constant-pool entry.
- if (const ConstantFP *AFP = dyn_cast<ConstantFP>(A))
- if (const ConstantInt *BI = dyn_cast<ConstantInt>(B))
- return AFP->getValueAPF().bitcastToAPInt() == BI->getValue();
- if (const ConstantFP *BFP = dyn_cast<ConstantFP>(B))
- if (const ConstantInt *AI = dyn_cast<ConstantInt>(A))
- return BFP->getValueAPF().bitcastToAPInt() == AI->getValue();
-
- // Two vectors can share an entry if each pair of corresponding
- // elements could.
- if (const ConstantVector *AV = dyn_cast<ConstantVector>(A))
- if (const ConstantVector *BV = dyn_cast<ConstantVector>(B)) {
- if (AV->getType()->getNumElements() != BV->getType()->getNumElements())
- return false;
- for (unsigned i = 0, e = AV->getType()->getNumElements(); i != e; ++i)
- if (!CanShareConstantPoolEntry(AV->getOperand(i),
- BV->getOperand(i), TD))
- return false;
- return true;
- }
-
- // TODO: Handle other cases.
-
- return false;
+ Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
+
+ // Try constant folding a bitcast of both instructions to an integer. If we
+ // get two identical ConstantInt's, then we are good to share them. We use
+ // the constant folding APIs to do this so that we get the benefit of
+ // TargetData.
+ if (isa<PointerType>(A->getType()))
+ A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
+ const_cast<Constant*>(A), TD);
+ else if (A->getType() != IntTy)
+ A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
+ const_cast<Constant*>(A), TD);
+ if (isa<PointerType>(B->getType()))
+ B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
+ const_cast<Constant*>(B), TD);
+ else if (B->getType() != IntTy)
+ B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
+ const_cast<Constant*>(B), TD);
+
+ return A == B;
}
/// getConstantPoolIndex - Create a new entry in the constant pool or return
diff --git a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
index 054c750..35591e1 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -19,9 +19,8 @@ using namespace llvm;
char MachineFunctionAnalysis::ID = 0;
-MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm,
- CodeGenOpt::Level OL) :
- FunctionPass(ID), TM(tm), OptLevel(OL), MF(0) {
+MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm) :
+ FunctionPass(ID), TM(tm), MF(0) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index a240667..e553a04 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -40,6 +40,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -178,6 +179,7 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp,
IsKill = isKill;
IsDead = isDead;
IsUndef = isUndef;
+ IsInternalRead = false;
IsEarlyClobber = false;
IsDebug = isDebug;
SubReg = 0;
@@ -191,7 +193,6 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
return false;
switch (getType()) {
- default: llvm_unreachable("Unrecognized operand type");
case MachineOperand::MO_Register:
return getReg() == Other.getReg() && isDef() == Other.isDef() &&
getSubReg() == Other.getSubReg();
@@ -216,11 +217,14 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
getOffset() == Other.getOffset();
case MachineOperand::MO_BlockAddress:
return getBlockAddress() == Other.getBlockAddress();
+ case MO_RegisterMask:
+ return getRegMask() == Other.getRegMask();
case MachineOperand::MO_MCSymbol:
return getMCSymbol() == Other.getMCSymbol();
case MachineOperand::MO_Metadata:
return getMetadata() == Other.getMetadata();
}
+ llvm_unreachable("Invalid machine operand type");
}
/// print - Print the specified machine operand.
@@ -240,7 +244,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << PrintReg(getReg(), TRI, getSubReg());
if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
- isEarlyClobber()) {
+ isInternalRead() || isEarlyClobber()) {
OS << '<';
bool NeedComma = false;
if (isDef()) {
@@ -256,14 +260,26 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
NeedComma = true;
}
- if (isKill() || isDead() || isUndef()) {
+ if (isKill() || isDead() || isUndef() || isInternalRead()) {
if (NeedComma) OS << ',';
- if (isKill()) OS << "kill";
- if (isDead()) OS << "dead";
+ NeedComma = false;
+ if (isKill()) {
+ OS << "kill";
+ NeedComma = true;
+ }
+ if (isDead()) {
+ OS << "dead";
+ NeedComma = true;
+ }
if (isUndef()) {
- if (isKill() || isDead())
- OS << ',';
+ if (NeedComma) OS << ',';
OS << "undef";
+ NeedComma = true;
+ }
+ if (isInternalRead()) {
+ if (NeedComma) OS << ',';
+ OS << "internal";
+ NeedComma = true;
}
}
OS << '>';
@@ -311,6 +327,9 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
WriteAsOperand(OS, getBlockAddress(), /*PrintType=*/false);
OS << '>';
break;
+ case MachineOperand::MO_RegisterMask:
+ OS << "<regmask>";
+ break;
case MachineOperand::MO_Metadata:
OS << '<';
WriteAsOperand(OS, getMetadata(), /*PrintType=*/false);
@@ -319,8 +338,6 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
case MachineOperand::MO_MCSymbol:
OS << "<MCSym=" << *getMCSymbol() << '>';
break;
- default:
- llvm_unreachable("Unrecognized operand type");
}
if (unsigned TF = getTargetFlags())
@@ -364,10 +381,11 @@ MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) {
MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
uint64_t s, unsigned int a,
- const MDNode *TBAAInfo)
+ const MDNode *TBAAInfo,
+ const MDNode *Ranges)
: PtrInfo(ptrinfo), Size(s),
Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
- TBAAInfo(TBAAInfo) {
+ TBAAInfo(TBAAInfo), Ranges(Ranges) {
assert((PtrInfo.V == 0 || isa<PointerType>(PtrInfo.V->getType())) &&
"invalid pointer value");
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
@@ -465,7 +483,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
/// MCID NULL and no operands.
MachineInstr::MachineInstr()
: MCID(0), Flags(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0),
+ NumMemRefs(0), MemRefs(0),
Parent(0) {
// Make sure that we get added to a machine basicblock
LeakDetector::addGarbageObject(this);
@@ -473,10 +491,10 @@ MachineInstr::MachineInstr()
void MachineInstr::addImplicitDefUseOperands() {
if (MCID->ImplicitDefs)
- for (const unsigned *ImpDefs = MCID->ImplicitDefs; *ImpDefs; ++ImpDefs)
+ for (const uint16_t *ImpDefs = MCID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
addOperand(MachineOperand::CreateReg(*ImpDefs, true, true));
if (MCID->ImplicitUses)
- for (const unsigned *ImpUses = MCID->ImplicitUses; *ImpUses; ++ImpUses)
+ for (const uint16_t *ImpUses = MCID->getImplicitUses(); *ImpUses; ++ImpUses)
addOperand(MachineOperand::CreateReg(*ImpUses, false, true));
}
@@ -485,7 +503,7 @@ void MachineInstr::addImplicitDefUseOperands() {
/// the MCInstrDesc.
MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0) {
+ NumMemRefs(0), MemRefs(0), Parent(0) {
unsigned NumImplicitOps = 0;
if (!NoImp)
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
@@ -500,7 +518,7 @@ MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp)
MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
bool NoImp)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
+ NumMemRefs(0), MemRefs(0), Parent(0), debugLoc(dl) {
unsigned NumImplicitOps = 0;
if (!NoImp)
NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
@@ -516,7 +534,7 @@ MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
/// basic block.
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0) {
+ NumMemRefs(0), MemRefs(0), Parent(0) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
unsigned NumImplicitOps =
MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
@@ -532,7 +550,7 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid)
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const MCInstrDesc &tid)
: MCID(&tid), Flags(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
+ NumMemRefs(0), MemRefs(0), Parent(0), debugLoc(dl) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
unsigned NumImplicitOps =
MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
@@ -547,7 +565,7 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
///
MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
: MCID(&MI.getDesc()), Flags(0), AsmPrinterFlags(0),
- MemRefs(MI.MemRefs), MemRefsEnd(MI.MemRefsEnd),
+ NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
Parent(0), debugLoc(MI.getDebugLoc()) {
Operands.reserve(MI.getNumOperands());
@@ -722,17 +740,33 @@ void MachineInstr::RemoveOperand(unsigned OpNo) {
void MachineInstr::addMemOperand(MachineFunction &MF,
MachineMemOperand *MO) {
mmo_iterator OldMemRefs = MemRefs;
- mmo_iterator OldMemRefsEnd = MemRefsEnd;
+ uint16_t OldNumMemRefs = NumMemRefs;
- size_t NewNum = (MemRefsEnd - MemRefs) + 1;
+ uint16_t NewNum = NumMemRefs + 1;
mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
- mmo_iterator NewMemRefsEnd = NewMemRefs + NewNum;
- std::copy(OldMemRefs, OldMemRefsEnd, NewMemRefs);
+ std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
NewMemRefs[NewNum - 1] = MO;
MemRefs = NewMemRefs;
- MemRefsEnd = NewMemRefsEnd;
+ NumMemRefs = NewNum;
+}
+
+bool MachineInstr::hasPropertyInBundle(unsigned Mask, QueryType Type) const {
+ const MachineBasicBlock *MBB = getParent();
+ MachineBasicBlock::const_instr_iterator MII = *this; ++MII;
+ while (MII != MBB->end() && MII->isInsideBundle()) {
+ if (MII->getDesc().getFlags() & Mask) {
+ if (Type == AnyInBundle)
+ return true;
+ } else {
+ if (Type == AllInBundle)
+ return false;
+ }
+ ++MII;
+ }
+
+ return Type == AllInBundle;
}
bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
@@ -743,6 +777,19 @@ bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
Other->getNumOperands() != getNumOperands())
return false;
+ if (isBundle()) {
+ // Both instructions are bundles, compare MIs inside the bundle.
+ MachineBasicBlock::const_instr_iterator I1 = *this;
+ MachineBasicBlock::const_instr_iterator E1 = getParent()->instr_end();
+ MachineBasicBlock::const_instr_iterator I2 = *Other;
+ MachineBasicBlock::const_instr_iterator E2= Other->getParent()->instr_end();
+ while (++I1 != E1 && I1->isInsideBundle()) {
+ ++I2;
+ if (I2 == E2 || !I2->isInsideBundle() || !I1->isIdenticalTo(I2, Check))
+ return false;
+ }
+ }
+
// Check operands to make sure they match.
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
@@ -789,6 +836,18 @@ bool MachineInstr::isIdenticalTo(const MachineInstr *Other,
/// block, and returns it, but does not delete it.
MachineInstr *MachineInstr::removeFromParent() {
assert(getParent() && "Not embedded in a basic block!");
+
+ // If it's a bundle then remove the MIs inside the bundle as well.
+ if (isBundle()) {
+ MachineBasicBlock *MBB = getParent();
+ MachineBasicBlock::instr_iterator MII = *this; ++MII;
+ MachineBasicBlock::instr_iterator E = MBB->instr_end();
+ while (MII != E && MII->isInsideBundle()) {
+ MachineInstr *MI = &*MII;
+ ++MII;
+ MBB->remove(MI);
+ }
+ }
getParent()->remove(this);
return this;
}
@@ -798,6 +857,17 @@ MachineInstr *MachineInstr::removeFromParent() {
/// block, and deletes it.
void MachineInstr::eraseFromParent() {
assert(getParent() && "Not embedded in a basic block!");
+ // If it's a bundle then remove the MIs inside the bundle as well.
+ if (isBundle()) {
+ MachineBasicBlock *MBB = getParent();
+ MachineBasicBlock::instr_iterator MII = *this; ++MII;
+ MachineBasicBlock::instr_iterator E = MBB->instr_end();
+ while (MII != E && MII->isInsideBundle()) {
+ MachineInstr *MI = &*MII;
+ ++MII;
+ MBB->erase(MI);
+ }
+ }
getParent()->erase(this);
}
@@ -817,6 +887,16 @@ unsigned MachineInstr::getNumExplicitOperands() const {
return NumOperands;
}
+/// isBundled - Return true if this instruction part of a bundle. This is true
+/// if either itself or its following instruction is marked "InsideBundle".
+bool MachineInstr::isBundled() const {
+ if (isInsideBundle())
+ return true;
+ MachineBasicBlock::const_instr_iterator nextMI = this;
+ ++nextMI;
+ return nextMI != Parent->instr_end() && nextMI->isInsideBundle();
+}
+
bool MachineInstr::isStackAligningInlineAsm() const {
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
@@ -887,6 +967,20 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx,
return NULL;
}
+/// getBundleSize - Return the number of instructions inside the MI bundle.
+unsigned MachineInstr::getBundleSize() const {
+ assert(isBundle() && "Expecting a bundle");
+
+ MachineBasicBlock::const_instr_iterator I = *this;
+ unsigned Size = 0;
+ while ((++I)->isInsideBundle()) {
+ ++Size;
+ }
+ assert(Size > 1 && "Malformed bundle");
+
+ return Size;
+}
+
/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
@@ -948,6 +1042,10 @@ MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
+ // Accept regmask operands when Overlap is set.
+ // Ignore them when looking for a specific def operand (Overlap == false).
+ if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
+ return i;
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
@@ -1118,6 +1216,8 @@ void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) {
/// copyPredicates - Copies predicate operand(s) from MI.
void MachineInstr::copyPredicates(const MachineInstr *MI) {
+ assert(!isBundle() && "MachineInstr::copyPredicates() can't handle bundles");
+
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isPredicable())
return;
@@ -1159,13 +1259,13 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
AliasAnalysis *AA,
bool &SawStore) const {
// Ignore stuff that we obviously can't move.
- if (MCID->mayStore() || MCID->isCall()) {
+ if (mayStore() || isCall()) {
SawStore = true;
return false;
}
if (isLabel() || isDebugValue() ||
- MCID->isTerminator() || hasUnmodeledSideEffects())
+ isTerminator() || hasUnmodeledSideEffects())
return false;
// See if this instruction does a load. If so, we have to guarantee that the
@@ -1173,7 +1273,7 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII,
// destination. The check for isInvariantLoad gives the targe the chance to
// classify the load as always returning a constant, e.g. a constant pool
// load.
- if (MCID->mayLoad() && !isInvariantLoad(AA))
+ if (mayLoad() && !isInvariantLoad(AA))
// Otherwise, this is a real load. If there is a store between the load and
// end of block, or if the load is volatile, we can't move it.
return !SawStore && !hasVolatileMemoryRef();
@@ -1213,9 +1313,9 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII,
/// have no volatile memory references.
bool MachineInstr::hasVolatileMemoryRef() const {
// An instruction known never to access memory won't have a volatile access.
- if (!MCID->mayStore() &&
- !MCID->mayLoad() &&
- !MCID->isCall() &&
+ if (!mayStore() &&
+ !mayLoad() &&
+ !isCall() &&
!hasUnmodeledSideEffects())
return false;
@@ -1239,7 +1339,7 @@ bool MachineInstr::hasVolatileMemoryRef() const {
/// *all* loads the instruction does are invariant (if it does multiple loads).
bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
// If the instruction doesn't load at all, it isn't an invariant load.
- if (!MCID->mayLoad())
+ if (!mayLoad())
return false;
// If the instruction has lost its memoperands, conservatively assume that
@@ -1253,6 +1353,7 @@ bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
E = memoperands_end(); I != E; ++I) {
if ((*I)->isVolatile()) return false;
if ((*I)->isStore()) return false;
+ if ((*I)->isInvariant()) return true;
if (const Value *V = (*I)->getValue()) {
// A load from a constant PseudoSourceValue is invariant.
@@ -1291,7 +1392,7 @@ unsigned MachineInstr::isConstantValuePHI() const {
}
bool MachineInstr::hasUnmodeledSideEffects() const {
- if (getDesc().hasUnmodeledSideEffects())
+ if (hasProperty(MCID::UnmodeledSideEffects))
return true;
if (isInlineAsm()) {
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
@@ -1384,7 +1485,10 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << " = ";
// Print the opcode name.
- OS << getDesc().getName();
+ if (TM && TM->getInstrInfo())
+ OS << TM->getInstrInfo()->getName(getOpcode());
+ else
+ OS << "UNKNOWN";
// Print the rest of the operands.
bool OmittedAnyCallClobbers = false;
@@ -1419,14 +1523,14 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
// call instructions much less noisy on targets where calls clobber lots
// of registers. Don't rely on MO.isDead() because we may be called before
// LiveVariables is run, or we may be looking at a non-allocatable reg.
- if (MF && getDesc().isCall() &&
+ if (MF && isCall() &&
MO.isReg() && MO.isImplicit() && MO.isDef()) {
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
const MachineRegisterInfo &MRI = MF->getRegInfo();
if (MRI.use_empty(Reg) && !MRI.isLiveOut(Reg)) {
bool HasAliasLive = false;
- for (const unsigned *Alias = TM->getRegisterInfo()->getAliasSet(Reg);
+ for (const uint16_t *Alias = TM->getRegisterInfo()->getAliasSet(Reg);
unsigned AliasReg = *Alias; ++Alias)
if (!MRI.use_empty(AliasReg) || MRI.isLiveOut(AliasReg)) {
HasAliasLive = true;
@@ -1617,6 +1721,20 @@ bool MachineInstr::addRegisterKilled(unsigned IncomingReg,
return Found;
}
+void MachineInstr::clearRegisterKills(unsigned Reg,
+ const TargetRegisterInfo *RegInfo) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg))
+ RegInfo = 0;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || !MO.isKill())
+ continue;
+ unsigned OpReg = MO.getReg();
+ if (OpReg == Reg || (RegInfo && RegInfo->isSuperRegister(Reg, OpReg)))
+ MO.setIsKill(false);
+ }
+}
+
bool MachineInstr::addRegisterDead(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo,
bool AddIfNotFound) {
@@ -1689,16 +1807,21 @@ void MachineInstr::addRegisterDefined(unsigned IncomingReg,
true /*IsImp*/));
}
-void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+void MachineInstr::setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
const TargetRegisterInfo &TRI) {
+ bool HasRegMask = false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
MachineOperand &MO = getOperand(i);
+ if (MO.isRegMask()) {
+ HasRegMask = true;
+ continue;
+ }
if (!MO.isReg() || !MO.isDef()) continue;
unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
+ if (!TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
bool Dead = true;
- for (SmallVectorImpl<unsigned>::const_iterator I = UsedRegs.begin(),
- E = UsedRegs.end(); I != E; ++I)
+ for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
+ I != E; ++I)
if (TRI.regsOverlap(*I, Reg)) {
Dead = false;
break;
@@ -1706,53 +1829,66 @@ void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRe
// If there are no uses, including partial uses, the def is dead.
if (Dead) MO.setIsDead();
}
+
+ // This is a call with a register mask operand.
+ // Mask clobbers are always dead, so add defs for the non-dead defines.
+ if (HasRegMask)
+ for (ArrayRef<unsigned>::iterator I = UsedRegs.begin(), E = UsedRegs.end();
+ I != E; ++I)
+ addRegisterDefined(*I, &TRI);
}
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
- unsigned Hash = MI->getOpcode() * 37;
+ // Build up a buffer of hash code components.
+ //
+ // FIXME: This is a total hack. We should have a hash_value overload for
+ // MachineOperand, but currently that doesn't work because there are many
+ // different ideas of "equality" and thus different sets of information that
+ // contribute to the hash code. This one happens to want to take a specific
+ // subset. And it's still not clear that this routine uses the *correct*
+ // subset of information when computing the hash code. The goal is to use the
+ // same inputs for the hash code here that MachineInstr::isIdenticalTo uses to
+ // test for equality when passed the 'IgnoreVRegDefs' filter flag. It would
+ // be very useful to factor the selection of relevant inputs out of the two
+ // functions and into a common routine, but it's not clear how that can be
+ // done.
+ SmallVector<size_t, 8> HashComponents;
+ HashComponents.reserve(MI->getNumOperands() + 1);
+ HashComponents.push_back(MI->getOpcode());
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- uint64_t Key = (uint64_t)MO.getType() << 32;
switch (MO.getType()) {
default: break;
case MachineOperand::MO_Register:
if (MO.isDef() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue; // Skip virtual register defs.
- Key |= MO.getReg();
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getReg()));
break;
case MachineOperand::MO_Immediate:
- Key |= MO.getImm();
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getImm()));
break;
case MachineOperand::MO_FrameIndex:
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_JumpTableIndex:
- Key |= MO.getIndex();
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getIndex()));
break;
case MachineOperand::MO_MachineBasicBlock:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getMBB());
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getMBB()));
break;
case MachineOperand::MO_GlobalAddress:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getGlobal());
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getGlobal()));
break;
case MachineOperand::MO_BlockAddress:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getBlockAddress());
+ HashComponents.push_back(hash_combine(MO.getType(),
+ MO.getBlockAddress()));
break;
case MachineOperand::MO_MCSymbol:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getMCSymbol());
+ HashComponents.push_back(hash_combine(MO.getType(), MO.getMCSymbol()));
break;
}
- Key += ~(Key << 32);
- Key ^= (Key >> 22);
- Key += ~(Key << 13);
- Key ^= (Key >> 8);
- Key += (Key << 3);
- Key ^= (Key >> 15);
- Key += ~(Key << 27);
- Key ^= (Key >> 31);
- Hash = (unsigned)Key + Hash * 37;
- }
- return Hash;
+ }
+ return hash_combine_range(HashComponents.begin(), HashComponents.end());
}
void MachineInstr::emitError(StringRef Msg) const {
diff --git a/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
new file mode 100644
index 0000000..73489a7
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -0,0 +1,278 @@
+//===-- lib/CodeGen/MachineInstrBundle.cpp --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace llvm;
+
+namespace {
+ class UnpackMachineBundles : public MachineFunctionPass {
+ public:
+ static char ID; // Pass identification
+ UnpackMachineBundles() : MachineFunctionPass(ID) {
+ initializeUnpackMachineBundlesPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+ };
+} // end anonymous namespace
+
+char UnpackMachineBundles::ID = 0;
+char &llvm::UnpackMachineBundlesID = UnpackMachineBundles::ID;
+INITIALIZE_PASS(UnpackMachineBundles, "unpack-mi-bundles",
+ "Unpack machine instruction bundles", false, false)
+
+bool UnpackMachineBundles::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = &*I;
+
+ for (MachineBasicBlock::instr_iterator MII = MBB->instr_begin(),
+ MIE = MBB->instr_end(); MII != MIE; ) {
+ MachineInstr *MI = &*MII;
+
+ // Remove BUNDLE instruction and the InsideBundle flags from bundled
+ // instructions.
+ if (MI->isBundle()) {
+ while (++MII != MIE && MII->isInsideBundle()) {
+ MII->setIsInsideBundle(false);
+ for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MII->getOperand(i);
+ if (MO.isReg() && MO.isInternalRead())
+ MO.setIsInternalRead(false);
+ }
+ }
+ MI->eraseFromParent();
+
+ Changed = true;
+ continue;
+ }
+
+ ++MII;
+ }
+ }
+
+ return Changed;
+}
+
+
+namespace {
+ class FinalizeMachineBundles : public MachineFunctionPass {
+ public:
+ static char ID; // Pass identification
+ FinalizeMachineBundles() : MachineFunctionPass(ID) {
+ initializeFinalizeMachineBundlesPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+ };
+} // end anonymous namespace
+
+char FinalizeMachineBundles::ID = 0;
+char &llvm::FinalizeMachineBundlesID = FinalizeMachineBundles::ID;
+INITIALIZE_PASS(FinalizeMachineBundles, "finalize-mi-bundles",
+ "Finalize machine instruction bundles", false, false)
+
+bool FinalizeMachineBundles::runOnMachineFunction(MachineFunction &MF) {
+ return llvm::finalizeBundles(MF);
+}
+
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void llvm::finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI,
+ MachineBasicBlock::instr_iterator LastMI) {
+ assert(FirstMI != LastMI && "Empty bundle?");
+
+ const TargetMachine &TM = MBB.getParent()->getTarget();
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ MachineInstrBuilder MIB = BuildMI(MBB, FirstMI, FirstMI->getDebugLoc(),
+ TII->get(TargetOpcode::BUNDLE));
+
+ SmallVector<unsigned, 8> LocalDefs;
+ SmallSet<unsigned, 8> LocalDefSet;
+ SmallSet<unsigned, 8> DeadDefSet;
+ SmallSet<unsigned, 8> KilledDefSet;
+ SmallVector<unsigned, 8> ExternUses;
+ SmallSet<unsigned, 8> ExternUseSet;
+ SmallSet<unsigned, 8> KilledUseSet;
+ SmallSet<unsigned, 8> UndefUseSet;
+ SmallVector<MachineOperand*, 4> Defs;
+ for (; FirstMI != LastMI; ++FirstMI) {
+ for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = FirstMI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (MO.isDef()) {
+ Defs.push_back(&MO);
+ continue;
+ }
+
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ if (LocalDefSet.count(Reg)) {
+ MO.setIsInternalRead();
+ if (MO.isKill())
+ // Internal def is now killed.
+ KilledDefSet.insert(Reg);
+ } else {
+ if (ExternUseSet.insert(Reg)) {
+ ExternUses.push_back(Reg);
+ if (MO.isUndef())
+ UndefUseSet.insert(Reg);
+ }
+ if (MO.isKill())
+ // External def is now killed.
+ KilledUseSet.insert(Reg);
+ }
+ }
+
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ MachineOperand &MO = *Defs[i];
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+
+ if (LocalDefSet.insert(Reg)) {
+ LocalDefs.push_back(Reg);
+ if (MO.isDead()) {
+ DeadDefSet.insert(Reg);
+ }
+ } else {
+ // Re-defined inside the bundle, it's no longer killed.
+ KilledDefSet.erase(Reg);
+ if (!MO.isDead())
+ // Previously defined but dead.
+ DeadDefSet.erase(Reg);
+ }
+
+ if (!MO.isDead()) {
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
+ unsigned SubReg = *SubRegs; ++SubRegs) {
+ if (LocalDefSet.insert(SubReg))
+ LocalDefs.push_back(SubReg);
+ }
+ }
+ }
+
+ FirstMI->setIsInsideBundle();
+ Defs.clear();
+ }
+
+ SmallSet<unsigned, 8> Added;
+ for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
+ unsigned Reg = LocalDefs[i];
+ if (Added.insert(Reg)) {
+ // If it's not live beyond end of the bundle, mark it dead.
+ bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
+ MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
+ getImplRegState(true));
+ }
+ }
+
+ for (unsigned i = 0, e = ExternUses.size(); i != e; ++i) {
+ unsigned Reg = ExternUses[i];
+ bool isKill = KilledUseSet.count(Reg);
+ bool isUndef = UndefUseSet.count(Reg);
+ MIB.addReg(Reg, getKillRegState(isKill) | getUndefRegState(isUndef) |
+ getImplRegState(true));
+ }
+}
+
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator
+llvm::finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI) {
+ MachineBasicBlock::instr_iterator E = MBB.instr_end();
+ MachineBasicBlock::instr_iterator LastMI = llvm::next(FirstMI);
+ while (LastMI != E && LastMI->isInsideBundle())
+ ++LastMI;
+ finalizeBundle(MBB, FirstMI, LastMI);
+ return LastMI;
+}
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool llvm::finalizeBundles(MachineFunction &MF) {
+ bool Changed = false;
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock &MBB = *I;
+
+ MachineBasicBlock::instr_iterator MII = MBB.instr_begin();
+ assert(!MII->isInsideBundle() &&
+ "First instr cannot be inside bundle before finalization!");
+
+ MachineBasicBlock::instr_iterator MIE = MBB.instr_end();
+ if (MII == MIE)
+ continue;
+ for (++MII; MII != MIE; ) {
+ if (!MII->isInsideBundle())
+ ++MII;
+ else {
+ MII = finalizeBundle(MBB, llvm::prior(MII));
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//===----------------------------------------------------------------------===//
+
+MachineOperandIteratorBase::RegInfo
+MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg,
+ SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops) {
+ RegInfo RI = { false, false, false };
+ for(; isValid(); ++*this) {
+ MachineOperand &MO = deref();
+ if (!MO.isReg() || MO.getReg() != Reg)
+ continue;
+
+ // Remember each (MI, OpNo) that refers to Reg.
+ if (Ops)
+ Ops->push_back(std::make_pair(MO.getParent(), getOperandNo()));
+
+ // Both defs and uses can read virtual registers.
+ if (MO.readsReg()) {
+ RI.Reads = true;
+ if (MO.isDef())
+ RI.Tied = true;
+ }
+
+ // Only defs can write.
+ if (MO.isDef())
+ RI.Writes = true;
+ else if (!RI.Tied && MO.getParent()->isRegTiedToDefOperand(getOperandNo()))
+ RI.Tied = true;
+ }
+ return RI;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index a1f80d5..8c562cc 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -45,7 +45,7 @@ using namespace llvm;
static cl::opt<bool>
AvoidSpeculation("avoid-speculation",
cl::desc("MachineLICM should avoid speculation"),
- cl::init(false), cl::Hidden);
+ cl::init(true), cl::Hidden);
STATISTIC(NumHoisted,
"Number of machine instructions hoisted out of loops");
@@ -60,8 +60,6 @@ STATISTIC(NumPostRAHoisted,
namespace {
class MachineLICM : public MachineFunctionPass {
- bool PreRegAlloc;
-
const TargetMachine *TM;
const TargetInstrInfo *TII;
const TargetLowering *TLI;
@@ -69,6 +67,7 @@ namespace {
const MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
const InstrItineraryData *InstrItins;
+ bool PreRegAlloc;
// Various analyses that we use...
AliasAnalysis *AA; // Alias analysis info.
@@ -81,7 +80,13 @@ namespace {
MachineLoop *CurLoop; // The current loop we are working on.
MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
- BitVector AllocatableSet;
+ // Exit blocks for CurLoop.
+ SmallVector<MachineBasicBlock*, 8> ExitBlocks;
+
+ bool isExitBlock(const MachineBasicBlock *MBB) const {
+ return std::find(ExitBlocks.begin(), ExitBlocks.end(), MBB) !=
+ ExitBlocks.end();
+ }
// Track 'estimated' register pressure.
SmallSet<unsigned, 32> RegSeen;
@@ -122,8 +127,6 @@ namespace {
virtual bool runOnMachineFunction(MachineFunction &MF);
- const char *getPassName() const { return "Machine Instruction LICM"; }
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineLoopInfo>();
AU.addRequired<MachineDominatorTree>();
@@ -165,7 +168,9 @@ namespace {
/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
/// gather register def and frame object update information.
- void ProcessMI(MachineInstr *MI, unsigned *PhysRegDefs,
+ void ProcessMI(MachineInstr *MI,
+ BitVector &PhysRegDefs,
+ BitVector &PhysRegClobbers,
SmallSet<int, 32> &StoredFIs,
SmallVector<CandidateInfo, 32> &Candidates);
@@ -182,12 +187,12 @@ namespace {
/// invariant. I.e., all virtual register operands are defined outside of
/// the loop, physical registers aren't accessed (explicitly or implicitly),
/// and the instruction is hoistable.
- ///
+ ///
bool IsLoopInvariantInst(MachineInstr &I);
- /// HasAnyPHIUse - Return true if the specified register is used by any
- /// phi node.
- bool HasAnyPHIUse(unsigned Reg) const;
+ /// HasLoopPHIUse - Return true if the specified instruction is used by any
+ /// phi node in the current loop.
+ bool HasLoopPHIUse(const MachineInstr *MI) const;
/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
/// and an use in the current loop, return true if the target considered
@@ -200,7 +205,7 @@ namespace {
/// CanCauseHighRegPressure - Visit BBs from header to current BB,
/// check if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
- bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
+ bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost, bool Cheap);
/// UpdateBackTraceRegPressure - Traverse the back trace from header to
/// the current block and update their register pressures to reflect the
@@ -215,13 +220,25 @@ namespace {
/// If not then a load from this mbb may not be safe to hoist.
bool IsGuaranteedToExecute(MachineBasicBlock *BB);
- /// HoistRegion - Walk the specified region of the CFG (defined by all
- /// blocks dominated by the specified block, and that are in the current
- /// loop) in depth first order w.r.t the DominatorTree. This allows us to
- /// visit definitions before uses, allowing us to hoist a loop body in one
- /// pass without iteration.
+ void EnterScope(MachineBasicBlock *MBB);
+
+ void ExitScope(MachineBasicBlock *MBB);
+
+ /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
+ /// dominator tree node if its a leaf or all of its children are done. Walk
+ /// up the dominator tree to destroy ancestors which are now done.
+ void ExitScopeIfDone(MachineDomTreeNode *Node,
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
+
+ /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
+ /// blocks dominated by the specified header block, and that are in the
+ /// current loop) in depth first order w.r.t the DominatorTree. This allows
+ /// us to visit definitions before uses, allowing us to hoist a loop body in
+ /// one pass without iteration.
///
- void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false);
+ void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
+ void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
/// getRegisterClassIDAndCost - For a given MI, register, and the operand
/// index, return the ID and cost of its representative register class by
@@ -278,6 +295,7 @@ namespace {
} // end anonymous namespace
char MachineLICM::ID = 0;
+char &llvm::MachineLICMID = MachineLICM::ID;
INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
"Machine Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
@@ -286,10 +304,6 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineLICM, "machinelicm",
"Machine Loop Invariant Code Motion", false, false)
-FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
- return new MachineLICM(PreRegAlloc);
-}
-
/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
/// loop that has a unique predecessor.
static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
@@ -305,12 +319,6 @@ static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
}
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
- if (PreRegAlloc)
- DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
- else
- DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
- DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
-
Changed = FirstInLoop = false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
@@ -319,7 +327,14 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
InstrItins = TM->getInstrItineraryData();
- AllocatableSet = TRI->getAllocatableSet(MF);
+
+ PreRegAlloc = MRI->isSSA();
+
+ if (PreRegAlloc)
+ DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
+ else
+ DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
+ DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
if (PreRegAlloc) {
// Estimate register pressure during pre-regalloc pass.
@@ -341,6 +356,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
while (!Worklist.empty()) {
CurLoop = Worklist.pop_back_val();
CurPreheader = 0;
+ ExitBlocks.clear();
// If this is done before regalloc, only visit outer-most preheader-sporting
// loops.
@@ -349,6 +365,8 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
continue;
}
+ CurLoop->getExitBlocks(ExitBlocks);
+
if (!PreRegAlloc)
HoistRegionPostRA();
else {
@@ -356,7 +374,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
// being hoisted.
MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
FirstInLoop = true;
- HoistRegion(N, true);
+ HoistOutOfLoop(N);
CSEMap.clear();
}
}
@@ -383,7 +401,8 @@ static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
/// gather register def and frame object update information.
void MachineLICM::ProcessMI(MachineInstr *MI,
- unsigned *PhysRegDefs,
+ BitVector &PhysRegDefs,
+ BitVector &PhysRegClobbers,
SmallSet<int, 32> &StoredFIs,
SmallVector<CandidateInfo, 32> &Candidates) {
bool RuledOut = false;
@@ -402,6 +421,13 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
continue;
}
+ // We can't hoist an instruction defining a physreg that is clobbered in
+ // the loop.
+ if (MO.isRegMask()) {
+ PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
+ continue;
+ }
+
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
@@ -411,7 +437,7 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
"Not expecting virtual register!");
if (!MO.isDef()) {
- if (Reg && PhysRegDefs[Reg])
+ if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
// If it's using a non-loop-invariant register, then it's obviously not
// safe to hoist.
HasNonInvariantUse = true;
@@ -419,9 +445,8 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
}
if (MO.isImplicit()) {
- ++PhysRegDefs[Reg];
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- ++PhysRegDefs[*AS];
+ for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
+ PhysRegClobbers.set(*AS);
if (!MO.isDead())
// Non-dead implicit def? This cannot be hoisted.
RuledOut = true;
@@ -438,14 +463,17 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
Def = Reg;
// If we have already seen another instruction that defines the same
- // register, then this is not safe.
- if (++PhysRegDefs[Reg] > 1)
- // MI defined register is seen defined by another instruction in
- // the loop, it cannot be a LICM candidate.
- RuledOut = true;
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- if (++PhysRegDefs[*AS] > 1)
+ // register, then this is not safe. Two defs is indicated by setting a
+ // PhysRegClobbers bit.
+ for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS) {
+ if (PhysRegDefs.test(*AS))
+ PhysRegClobbers.set(*AS);
+ if (PhysRegClobbers.test(*AS))
+ // MI defined register is seen defined by another instruction in
+ // the loop, it cannot be a LICM candidate.
RuledOut = true;
+ PhysRegDefs.set(*AS);
+ }
}
// Only consider reloads for now and remats which do not have register
@@ -461,9 +489,13 @@ void MachineLICM::ProcessMI(MachineInstr *MI,
/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
/// invariants out to the preheader.
void MachineLICM::HoistRegionPostRA() {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader)
+ return;
+
unsigned NumRegs = TRI->getNumRegs();
- unsigned *PhysRegDefs = new unsigned[NumRegs];
- std::fill(PhysRegDefs, PhysRegDefs + NumRegs, 0);
+ BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
+ BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
SmallVector<CandidateInfo, 32> Candidates;
SmallSet<int, 32> StoredFIs;
@@ -485,16 +517,31 @@ void MachineLICM::HoistRegionPostRA() {
for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
- ++PhysRegDefs[Reg];
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- ++PhysRegDefs[*AS];
+ for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
+ PhysRegDefs.set(*AS);
}
SpeculationState = SpeculateUnknown;
for (MachineBasicBlock::iterator
MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
MachineInstr *MI = &*MII;
- ProcessMI(MI, PhysRegDefs, StoredFIs, Candidates);
+ ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
+ }
+ }
+
+ // Gather the registers read / clobbered by the terminator.
+ BitVector TermRegs(NumRegs);
+ MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
+ if (TI != Preheader->end()) {
+ for (unsigned i = 0, e = TI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = TI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ for (const uint16_t *AS = TRI->getOverlaps(Reg); *AS; ++AS)
+ TermRegs.set(*AS);
}
}
@@ -503,19 +550,25 @@ void MachineLICM::HoistRegionPostRA() {
// instruction in the loop.
// 2. If the candidate is a load from stack slot (always true for now),
// check if the slot is stored anywhere in the loop.
+ // 3. Make sure candidate def should not clobber
+ // registers read by the terminator. Similarly its def should not be
+ // clobbered by the terminator.
for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
if (Candidates[i].FI != INT_MIN &&
StoredFIs.count(Candidates[i].FI))
continue;
- if (PhysRegDefs[Candidates[i].Def] == 1) {
+ unsigned Def = Candidates[i].Def;
+ if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
bool Safe = true;
MachineInstr *MI = Candidates[i].MI;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
const MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg() || MO.isDef() || !MO.getReg())
continue;
- if (PhysRegDefs[MO.getReg()]) {
+ unsigned Reg = MO.getReg();
+ if (PhysRegDefs.test(Reg) ||
+ PhysRegClobbers.test(Reg)) {
// If it's using a non-loop-invariant register, then it's obviously
// not safe to hoist.
Safe = false;
@@ -526,8 +579,6 @@ void MachineLICM::HoistRegionPostRA() {
HoistPostRA(MI, Candidates[i].Def);
}
}
-
- delete[] PhysRegDefs;
}
/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
@@ -556,26 +607,17 @@ void MachineLICM::AddToLiveIns(unsigned Reg) {
/// dirty work.
void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
MachineBasicBlock *Preheader = getCurPreheader();
- if (!Preheader) return;
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
- DEBUG({
- dbgs() << "Hoisting " << *MI;
- if (Preheader->getBasicBlock())
- dbgs() << " to MachineBasicBlock "
- << Preheader->getName();
- if (MI->getParent()->getBasicBlock())
- dbgs() << " from MachineBasicBlock "
- << MI->getParent()->getName();
- dbgs() << "\n";
- });
+ DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
+ << MI->getParent()->getNumber() << ": " << *MI);
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
- // Add register to livein list to all the BBs in the current loop since a
+ // Add register to livein list to all the BBs in the current loop since a
// loop invariant must be kept live throughout the whole loop. This is
// important to ensure later passes do not scavenge the def register.
AddToLiveIns(Def);
@@ -589,7 +631,7 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
if (SpeculationState != SpeculateUnknown)
return SpeculationState == SpeculateFalse;
-
+
if (BB != CurLoop->getHeader()) {
// Check loop exiting blocks.
SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
@@ -605,57 +647,126 @@ bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
return true;
}
-/// HoistRegion - Walk the specified region of the CFG (defined by all blocks
-/// dominated by the specified block, and that are in the current loop) in depth
-/// first order w.r.t the DominatorTree. This allows us to visit definitions
-/// before uses, allowing us to hoist a loop body in one pass without iteration.
-///
-void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) {
- assert(N != 0 && "Null dominator tree node?");
- MachineBasicBlock *BB = N->getBlock();
+void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
- // If the header of the loop containing this basic block is a landing pad,
- // then don't try to hoist instructions out of this loop.
- const MachineLoop *ML = MLI->getLoopFor(BB);
- if (ML && ML->getHeader()->isLandingPad()) return;
+ // Remember livein register pressure.
+ BackTrace.push_back(RegPressure);
+}
- // If this subregion is not in the top level loop at all, exit.
- if (!CurLoop->contains(BB)) return;
+void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
+ BackTrace.pop_back();
+}
- MachineBasicBlock *Preheader = getCurPreheader();
- if (!Preheader)
+/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
+/// dominator tree node if its a leaf or all of its children are done. Walk
+/// up the dominator tree to destroy ancestors which are now done.
+void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
+ if (OpenChildren[Node])
return;
- if (IsHeader) {
+ // Pop scope.
+ ExitScope(Node->getBlock());
+
+ // Now traverse upwards to pop ancestors whose offsprings are all done.
+ while (MachineDomTreeNode *Parent = ParentMap[Node]) {
+ unsigned Left = --OpenChildren[Parent];
+ if (Left != 0)
+ break;
+ ExitScope(Parent->getBlock());
+ Node = Parent;
+ }
+}
+
+/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
+/// blocks dominated by the specified header block, and that are in the
+/// current loop) in depth first order w.r.t the DominatorTree. This allows
+/// us to visit definitions before uses, allowing us to hoist a loop body in
+/// one pass without iteration.
+///
+void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
+ SmallVector<MachineDomTreeNode*, 32> Scopes;
+ SmallVector<MachineDomTreeNode*, 8> WorkList;
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
+ DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
+
+ // Perform a DFS walk to determine the order of visit.
+ WorkList.push_back(HeaderN);
+ do {
+ MachineDomTreeNode *Node = WorkList.pop_back_val();
+ assert(Node != 0 && "Null dominator tree node?");
+ MachineBasicBlock *BB = Node->getBlock();
+
+ // If the header of the loop containing this basic block is a landing pad,
+ // then don't try to hoist instructions out of this loop.
+ const MachineLoop *ML = MLI->getLoopFor(BB);
+ if (ML && ML->getHeader()->isLandingPad())
+ continue;
+
+ // If this subregion is not in the top level loop at all, exit.
+ if (!CurLoop->contains(BB))
+ continue;
+
+ Scopes.push_back(Node);
+ const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
+ unsigned NumChildren = Children.size();
+
+ // Don't hoist things out of a large switch statement. This often causes
+ // code to be hoisted that wasn't going to be executed, and increases
+ // register pressure in a situation where it's likely to matter.
+ if (BB->succ_size() >= 25)
+ NumChildren = 0;
+
+ OpenChildren[Node] = NumChildren;
+ // Add children in reverse order as then the next popped worklist node is
+ // the first child of this node. This means we ultimately traverse the
+ // DOM tree in exactly the same order as if we'd recursed.
+ for (int i = (int)NumChildren-1; i >= 0; --i) {
+ MachineDomTreeNode *Child = Children[i];
+ ParentMap[Child] = Node;
+ WorkList.push_back(Child);
+ }
+ } while (!WorkList.empty());
+
+ if (Scopes.size() != 0) {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader)
+ return;
+
// Compute registers which are livein into the loop headers.
RegSeen.clear();
BackTrace.clear();
InitRegPressure(Preheader);
}
- // Remember livein register pressure.
- BackTrace.push_back(RegPressure);
+ // Now perform LICM.
+ for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
+ MachineDomTreeNode *Node = Scopes[i];
+ MachineBasicBlock *MBB = Node->getBlock();
- SpeculationState = SpeculateUnknown;
- for (MachineBasicBlock::iterator
- MII = BB->begin(), E = BB->end(); MII != E; ) {
- MachineBasicBlock::iterator NextMII = MII; ++NextMII;
- MachineInstr *MI = &*MII;
- if (!Hoist(MI, Preheader))
- UpdateRegPressure(MI);
- MII = NextMII;
- }
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader)
+ continue;
- // Don't hoist things out of a large switch statement. This often causes
- // code to be hoisted that wasn't going to be executed, and increases
- // register pressure in a situation where it's likely to matter.
- if (BB->succ_size() < 25) {
- const std::vector<MachineDomTreeNode*> &Children = N->getChildren();
- for (unsigned I = 0, E = Children.size(); I != E; ++I)
- HoistRegion(Children[I]);
- }
+ EnterScope(MBB);
- BackTrace.pop_back();
+ // Process the block
+ SpeculationState = SpeculateUnknown;
+ for (MachineBasicBlock::iterator
+ MII = MBB->begin(), E = MBB->end(); MII != E; ) {
+ MachineBasicBlock::iterator NextMII = MII; ++NextMII;
+ MachineInstr *MI = &*MII;
+ if (!Hoist(MI, Preheader))
+ UpdateRegPressure(MI);
+ MII = NextMII;
+ }
+
+ // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
+ ExitScopeIfDone(Node, OpenChildren, ParentMap);
+ }
}
static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
@@ -670,7 +781,7 @@ MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
unsigned &RCId, unsigned &RCCost) const {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
EVT VT = *RC->vt_begin();
- if (VT == MVT::untyped) {
+ if (VT == MVT::Untyped) {
RCId = RC->getID();
RCCost = 1;
} else {
@@ -678,7 +789,7 @@ MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
RCCost = TLI->getRepRegClassCostFor(VT);
}
}
-
+
/// InitRegPressure - Find all virtual register references that are liveout of
/// the preheader to initialize the starting "register pressure". Note this
/// does not count live through (livein but not used) registers.
@@ -762,6 +873,21 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
}
}
+/// isLoadFromGOTOrConstantPool - Return true if this machine instruction
+/// loads from global offset table or constant pool.
+static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
+ assert (MI.mayLoad() && "Expected MI that loads!");
+ for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
+ E = MI.memoperands_end(); I != E; ++I) {
+ if (const Value *V = (*I)->getValue()) {
+ if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
+ if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
+ return true;
+ }
+ }
+ return false;
+}
+
/// IsLICMCandidate - Returns true if the instruction may be a suitable
/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
/// not safe to hoist it.
@@ -773,9 +899,12 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
// If it is load then check if it is guaranteed to execute by making sure that
// it dominates all exiting blocks. If it doesn't, then there is a path out of
- // the loop which does not execute this load, so we can't hoist it.
+ // the loop which does not execute this load, so we can't hoist it. Loads
+ // from constant memory are not safe to speculate all the time, for example
+ // indexed load from a jump table.
// Stores and side effects are already checked by isSafeToMove.
- if (I.getDesc().mayLoad() && !IsGuaranteedToExecute(I.getParent()))
+ if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
+ !IsGuaranteedToExecute(I.getParent()))
return false;
return true;
@@ -785,7 +914,7 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
/// invariant. I.e., all virtual register operands are defined outside of the
/// loop, physical registers aren't accessed explicitly, and there are no side
/// effects that aren't captured by the operands or other flags.
-///
+///
bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
if (!IsLICMCandidate(I))
return false;
@@ -806,18 +935,8 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI->def_empty(Reg))
- return false;
- if (AllocatableSet.test(Reg))
+ if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
return false;
- // Check for a def among the register's aliases too.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- if (!MRI->def_empty(AliasReg))
- return false;
- if (AllocatableSet.test(AliasReg))
- return false;
- }
// Otherwise it's safe to move.
continue;
} else if (!MO.isDead()) {
@@ -847,22 +966,40 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
}
-/// HasAnyPHIUse - Return true if the specified register is used by any
-/// phi node.
-bool MachineLICM::HasAnyPHIUse(unsigned Reg) const {
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
- UE = MRI->use_end(); UI != UE; ++UI) {
- MachineInstr *UseMI = &*UI;
- if (UseMI->isPHI())
- return true;
- // Look pass copies as well.
- if (UseMI->isCopy()) {
- unsigned Def = UseMI->getOperand(0).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Def) &&
- HasAnyPHIUse(Def))
- return true;
+/// HasLoopPHIUse - Return true if the specified instruction is used by a
+/// phi node and hoisting it could cause a copy to be inserted.
+bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
+ SmallVector<const MachineInstr*, 8> Work(1, MI);
+ do {
+ MI = Work.pop_back_val();
+ for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
+ if (!MO->isReg() || !MO->isDef())
+ continue;
+ unsigned Reg = MO->getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
+ UE = MRI->use_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ // A PHI may cause a copy to be inserted.
+ if (UseMI->isPHI()) {
+ // A PHI inside the loop causes a copy because the live range of Reg is
+ // extended across the PHI.
+ if (CurLoop->contains(UseMI))
+ return true;
+ // A PHI in an exit block can cause a copy to be inserted if the PHI
+ // has multiple predecessors in the loop with different values.
+ // For now, approximate by rejecting all exit blocks.
+ if (isExitBlock(UseMI->getParent()))
+ return true;
+ continue;
+ }
+ // Look past copies as well.
+ if (UseMI->isCopy() && CurLoop->contains(UseMI))
+ Work.push_back(UseMI);
+ }
}
- }
+ } while (!Work.empty());
return false;
}
@@ -903,7 +1040,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
/// the operand latency between its def and a use is one or less.
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
- if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike())
+ if (MI.isAsCheapAsAMove() || MI.isCopyLike())
return true;
if (!InstrItins || InstrItins->isEmpty())
return false;
@@ -930,16 +1067,25 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
/// if hoisting an instruction of the given cost matrix can cause high
/// register pressure.
-bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
+bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost,
+ bool CheapInstr) {
for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
CI != CE; ++CI) {
- if (CI->second <= 0)
+ if (CI->second <= 0)
continue;
unsigned RCId = CI->first;
+ unsigned Limit = RegLimit[RCId];
+ int Cost = CI->second;
+
+ // Don't hoist cheap instructions if they would increase register pressure,
+ // even if we're under the limit.
+ if (CheapInstr)
+ return true;
+
for (unsigned i = BackTrace.size(); i != 0; --i) {
SmallVector<unsigned, 8> &RP = BackTrace[i-1];
- if (RP[RCId] + CI->second >= RegLimit[RCId])
+ if (RP[RCId] + Cost >= Limit)
return true;
}
}
@@ -999,87 +1145,95 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
if (MI.isImplicitDef())
return true;
- // If the instruction is cheap, only hoist if it is re-materilizable. LICM
- // will increase register pressure. It's probably not worth it if the
- // instruction is cheap.
- // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
- // these tend to help performance in low register pressure situation. The
- // trade off is it may cause spill in high pressure situation. It will end up
- // adding a store in the loop preheader. But the reload is no more expensive.
- // The side benefit is these loads are frequently CSE'ed.
- if (IsCheapInstruction(MI)) {
- if (!TII->isTriviallyReMaterializable(&MI, AA))
- return false;
- } else {
- // Estimate register pressure to determine whether to LICM the instruction.
- // In low register pressure situation, we can be more aggressive about
- // hoisting. Also, favors hoisting long latency instructions even in
- // moderately high pressure situation.
- // FIXME: If there are long latency loop-invariant instructions inside the
- // loop at this point, why didn't the optimizer's LICM hoist them?
- DenseMap<unsigned, int> Cost;
- for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- continue;
- unsigned Reg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
+ // Besides removing computation from the loop, hoisting an instruction has
+ // these effects:
+ //
+ // - The value defined by the instruction becomes live across the entire
+ // loop. This increases register pressure in the loop.
+ //
+ // - If the value is used by a PHI in the loop, a copy will be required for
+ // lowering the PHI after extending the live range.
+ //
+ // - When hoisting the last use of a value in the loop, that value no longer
+ // needs to be live in the loop. This lowers register pressure in the loop.
+
+ bool CheapInstr = IsCheapInstruction(MI);
+ bool CreatesCopy = HasLoopPHIUse(&MI);
+
+ // Don't hoist a cheap instruction if it would create a copy in the loop.
+ if (CheapInstr && CreatesCopy) {
+ DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
+ return false;
+ }
- unsigned RCId, RCCost;
- getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
- if (MO.isDef()) {
- if (HasHighOperandLatency(MI, i, Reg)) {
- ++NumHighLatency;
- return true;
- }
+ // Rematerializable instructions should always be hoisted since the register
+ // allocator can just pull them down again when needed.
+ if (TII->isTriviallyReMaterializable(&MI, AA))
+ return true;
+
+ // Estimate register pressure to determine whether to LICM the instruction.
+ // In low register pressure situation, we can be more aggressive about
+ // hoisting. Also, favors hoisting long latency instructions even in
+ // moderately high pressure situation.
+ // Cheap instructions will only be hoisted if they don't increase register
+ // pressure at all.
+ // FIXME: If there are long latency loop-invariant instructions inside the
+ // loop at this point, why didn't the optimizer's LICM hoist them?
+ DenseMap<unsigned, int> Cost;
+ for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || MO.isImplicit())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
- DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
- if (CI != Cost.end())
- CI->second += RCCost;
- else
- Cost.insert(std::make_pair(RCId, RCCost));
- } else if (isOperandKill(MO, MRI)) {
- // Is a virtual register use is a kill, hoisting it out of the loop
- // may actually reduce register pressure or be register pressure
- // neutral.
- DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
- if (CI != Cost.end())
- CI->second -= RCCost;
- else
- Cost.insert(std::make_pair(RCId, -RCCost));
+ unsigned RCId, RCCost;
+ getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
+ if (MO.isDef()) {
+ if (HasHighOperandLatency(MI, i, Reg)) {
+ DEBUG(dbgs() << "Hoist High Latency: " << MI);
+ ++NumHighLatency;
+ return true;
}
+ Cost[RCId] += RCCost;
+ } else if (isOperandKill(MO, MRI)) {
+ // Is a virtual register use is a kill, hoisting it out of the loop
+ // may actually reduce register pressure or be register pressure
+ // neutral.
+ Cost[RCId] -= RCCost;
}
+ }
- // Visit BBs from header to current BB, if hoisting this doesn't cause
- // high register pressure, then it's safe to proceed.
- if (!CanCauseHighRegPressure(Cost)) {
- ++NumLowRP;
- return true;
- }
+ // Visit BBs from header to current BB, if hoisting this doesn't cause
+ // high register pressure, then it's safe to proceed.
+ if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
+ DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
+ ++NumLowRP;
+ return true;
+ }
- // Do not "speculate" in high register pressure situation. If an
- // instruction is not guaranteed to be executed in the loop, it's best to be
- // conservative.
- if (AvoidSpeculation &&
- (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI)))
- return false;
+ // Don't risk increasing register pressure if it would create copies.
+ if (CreatesCopy) {
+ DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
+ return false;
+ }
- // High register pressure situation, only hoist if the instruction is going to
- // be remat'ed.
- if (!TII->isTriviallyReMaterializable(&MI, AA) &&
- !MI.isInvariantLoad(AA))
- return false;
+ // Do not "speculate" in high register pressure situation. If an
+ // instruction is not guaranteed to be executed in the loop, it's best to be
+ // conservative.
+ if (AvoidSpeculation &&
+ (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
+ DEBUG(dbgs() << "Won't speculate: " << MI);
+ return false;
}
- // If result(s) of this instruction is used by PHIs outside of the loop, then
- // don't hoist it if the instruction because it will introduce an extra copy.
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.isDef())
- continue;
- if (HasAnyPHIUse(MO.getReg()))
- return false;
+ // High register pressure situation, only hoist if the instruction is going
+ // to be remat'ed.
+ if (!TII->isTriviallyReMaterializable(&MI, AA) &&
+ !MI.isInvariantLoad(AA)) {
+ DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
+ return false;
}
return true;
@@ -1087,7 +1241,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
// Don't unfold simple loads.
- if (MI->getDesc().canFoldAsLoad())
+ if (MI->canFoldAsLoad())
return 0;
// If not, we may be able to unfold a load and hoist that.
@@ -1123,8 +1277,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
assert(NewMIs.size() == 2 &&
"Unfolded a load into multiple instructions!");
MachineBasicBlock *MBB = MI->getParent();
- MBB->insert(MI, NewMIs[0]);
- MBB->insert(MI, NewMIs[1]);
+ MachineBasicBlock::iterator Pos = MI;
+ MBB->insert(Pos, NewMIs[0]);
+ MBB->insert(Pos, NewMIs[1]);
// If unfolding produced a load that wasn't loop-invariant or profitable to
// hoist, discard the new instructions and bail.
if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
@@ -1180,6 +1335,7 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
// Replace virtual registers defined by MI by their counterparts defined
// by Dup.
+ SmallVector<unsigned, 2> Defs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
@@ -1190,11 +1346,33 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
"Instructions with different phys regs are not identical!");
if (MO.isReg() && MO.isDef() &&
- !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
- MRI->clearKillFlags(Dup->getOperand(i).getReg());
+ !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ Defs.push_back(i);
+ }
+
+ SmallVector<const TargetRegisterClass*, 2> OrigRCs;
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Idx = Defs[i];
+ unsigned Reg = MI->getOperand(Idx).getReg();
+ unsigned DupReg = Dup->getOperand(Idx).getReg();
+ OrigRCs.push_back(MRI->getRegClass(DupReg));
+
+ if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
+ // Restore old RCs if more than one defs.
+ for (unsigned j = 0; j != i; ++j)
+ MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
+ return false;
}
}
+
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Idx = Defs[i];
+ unsigned Reg = MI->getOperand(Idx).getReg();
+ unsigned DupReg = Dup->getOperand(Idx).getReg();
+ MRI->replaceRegWith(Reg, DupReg);
+ MRI->clearKillFlags(DupReg);
+ }
+
MI->eraseFromParent();
++NumCSEed;
return true;
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index 80c4854..ea98b23 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -257,7 +257,7 @@ MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI,
: ImmutablePass(ID), Context(MAI, MRI, MOFI),
ObjFileMMI(0), CompactUnwindEncoding(0), CurCallSite(0), CallsEHReturn(0),
CallsUnwindInit(0), DbgInfoAvailable(false),
- CallsExternalVAFunctionWithFloatingPointArguments(false) {
+ UsesVAFloatArgument(false) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
// Always emit some info, by default "no personality" info.
Personalities.push_back(NULL);
@@ -268,9 +268,9 @@ MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI,
MachineModuleInfo::MachineModuleInfo()
: ImmutablePass(ID),
Context(*(MCAsmInfo*)0, *(MCRegisterInfo*)0, (MCObjectFileInfo*)0) {
- assert(0 && "This MachineModuleInfo constructor should never be called, MMI "
- "should always be explicitly constructed by LLVMTargetMachine");
- abort();
+ llvm_unreachable("This MachineModuleInfo constructor should never be called, "
+ "MMI should always be explicitly constructed by "
+ "LLVMTargetMachine");
}
MachineModuleInfo::~MachineModuleInfo() {
@@ -503,8 +503,7 @@ void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
/// indexes.
void MachineModuleInfo::setCallSiteLandingPad(MCSymbol *Sym,
ArrayRef<unsigned> Sites) {
- for (unsigned I = 0, E = Sites.size(); I != E; ++I)
- LPadToCallSiteMap[Sym].push_back(Sites[I]);
+ LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
}
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
@@ -541,8 +540,7 @@ try_next:;
// Add the new filter.
int FilterID = -(1 + FilterIds.size());
FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
- for (unsigned I = 0, N = TyIds.size(); I != N; ++I)
- FilterIds.push_back(TyIds[I]);
+ FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
FilterEnds.push_back(FilterIds.size());
FilterIds.push_back(0); // terminator
return FilterID;
@@ -561,13 +559,13 @@ unsigned MachineModuleInfo::getPersonalityIndex() const {
const Function* Personality = NULL;
// Scan landing pads. If there is at least one non-NULL personality - use it.
- for (unsigned i = 0; i != LandingPads.size(); ++i)
+ for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
if (LandingPads[i].Personality) {
Personality = LandingPads[i].Personality;
break;
}
- for (unsigned i = 0; i < Personalities.size(); ++i) {
+ for (unsigned i = 0, e = Personalities.size(); i < e; ++i) {
if (Personalities[i] == Personality)
return i;
}
diff --git a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
index 9f4ef12..58e067b 100644
--- a/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
+++ b/contrib/llvm/lib/CodeGen/MachinePassRegistry.cpp
@@ -16,6 +16,7 @@
using namespace llvm;
+void MachinePassRegistryListener::anchor() { }
/// Add - Adds a function pass to the registration list.
///
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 266ebf6..7ea1517 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -18,11 +18,12 @@
using namespace llvm;
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI)
- : TRI(&TRI), IsSSA(true) {
+ : TRI(&TRI), IsSSA(true), TracksLiveness(true) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
UsedPhysRegs.resize(TRI.getNumRegs());
-
+ UsedPhysRegMask.resize(TRI.getNumRegs());
+
// Create the physreg use/def lists.
PhysRegUseDefLists = new MachineOperand*[TRI.getNumRegs()];
memset(PhysRegUseDefLists, 0, sizeof(MachineOperand*)*TRI.getNumRegs());
@@ -30,9 +31,7 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI)
MachineRegisterInfo::~MachineRegisterInfo() {
#ifndef NDEBUG
- for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i)
- assert(VRegInfo[TargetRegisterInfo::index2VirtReg(i)].second == 0 &&
- "Vreg use list non-empty still?");
+ clearVirtRegs();
for (unsigned i = 0, e = UsedPhysRegs.size(); i != e; ++i)
assert(!PhysRegUseDefLists[i] &&
"PhysRegUseDefLists has entries after all instructions are deleted");
@@ -76,12 +75,14 @@ MachineRegisterInfo::recomputeRegClass(unsigned Reg, const TargetMachine &TM) {
// Accumulate constraints from all uses.
for (reg_nodbg_iterator I = reg_nodbg_begin(Reg), E = reg_nodbg_end(); I != E;
++I) {
- // TRI doesn't have accurate enough information to model this yet.
- if (I.getOperand().getSubReg())
- return false;
const TargetRegisterClass *OpRC =
I->getRegClassConstraint(I.getOperandNo(), TII, TRI);
- if (OpRC)
+ if (unsigned SubIdx = I.getOperand().getSubReg()) {
+ if (OpRC)
+ NewRC = TRI->getMatchingSuperRegClass(NewRC, OpRC, SubIdx);
+ else
+ NewRC = TRI->getSubClassWithSubReg(NewRC, SubIdx);
+ } else if (OpRC)
NewRC = TRI->getCommonSubClass(NewRC, OpRC);
if (!NewRC || NewRC == OldRC)
return false;
@@ -115,6 +116,16 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
return Reg;
}
+/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
+void MachineRegisterInfo::clearVirtRegs() {
+#ifndef NDEBUG
+ for (unsigned i = 0, e = getNumVirtRegs(); i != e; ++i)
+ assert(VRegInfo[TargetRegisterInfo::index2VirtReg(i)].second == 0 &&
+ "Vreg use list non-empty still?");
+#endif
+ VRegInfo.clear();
+}
+
/// HandleVRegListReallocation - We just added a virtual register to the
/// VRegInfo info list and it reallocated. Update the use/def lists info
/// pointers.
@@ -150,9 +161,8 @@ void MachineRegisterInfo::replaceRegWith(unsigned FromReg, unsigned ToReg) {
/// form, so there should only be one definition.
MachineInstr *MachineRegisterInfo::getVRegDef(unsigned Reg) const {
// Since we are in SSA form, we can use the first definition.
- if (!def_empty(Reg))
- return &*def_begin(Reg);
- return 0;
+ def_iterator I = def_begin(Reg);
+ return !I.atEnd() ? &*I : 0;
}
bool MachineRegisterInfo::hasOneUse(unsigned RegNo) const {
@@ -242,18 +252,31 @@ MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
}
}
-void MachineRegisterInfo::closePhysRegsUsed(const TargetRegisterInfo &TRI) {
- for (int i = UsedPhysRegs.find_first(); i >= 0;
- i = UsedPhysRegs.find_next(i))
- for (const unsigned *SS = TRI.getSubRegisters(i);
- unsigned SubReg = *SS; ++SS)
- if (SubReg > unsigned(i))
- UsedPhysRegs.set(SubReg);
-}
-
#ifndef NDEBUG
void MachineRegisterInfo::dumpUses(unsigned Reg) const {
for (use_iterator I = use_begin(Reg), E = use_end(); I != E; ++I)
I.getOperand().getParent()->dump();
}
#endif
+
+void MachineRegisterInfo::freezeReservedRegs(const MachineFunction &MF) {
+ ReservedRegs = TRI->getReservedRegs(MF);
+}
+
+bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg,
+ const MachineFunction &MF) const {
+ assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
+
+ // Check if any overlapping register is modified.
+ for (const uint16_t *R = TRI->getOverlaps(PhysReg); *R; ++R)
+ if (!def_empty(*R))
+ return false;
+
+ // Check if any overlapping register is allocatable so it may be used later.
+ if (AllocatableRegs.empty())
+ AllocatableRegs = TRI->getAllocatableSet(MF);
+ for (const uint16_t *R = TRI->getOverlaps(PhysReg); *R; ++R)
+ if (AllocatableRegs.test(*R))
+ return false;
+ return true;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index 84d6df2..070a557 100644
--- a/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -81,7 +81,7 @@ unsigned LookForIdenticalPHI(MachineBasicBlock *BB,
if (BB->empty())
return 0;
- MachineBasicBlock::iterator I = BB->front();
+ MachineBasicBlock::iterator I = BB->begin();
if (!I->isPHI())
return 0;
@@ -182,7 +182,7 @@ unsigned MachineSSAUpdater::GetValueInMiddleOfBlock(MachineBasicBlock *BB) {
return DupPHI;
// Otherwise, we do need a PHI: insert one now.
- MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->front();
+ MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
MachineInstr *InsertedPHI = InsertNewDef(TargetOpcode::PHI, BB,
Loc, VRC, MRI, TII);
@@ -214,7 +214,6 @@ MachineBasicBlock *findCorrespondingPred(const MachineInstr *MI,
}
llvm_unreachable("MachineOperand::getParent() failure?");
- return 0;
}
/// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
@@ -311,7 +310,7 @@ public:
/// Add it into the specified block and return the register.
static unsigned CreateEmptyPHI(MachineBasicBlock *BB, unsigned NumPreds,
MachineSSAUpdater *Updater) {
- MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->front();
+ MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->begin();
MachineInstr *PHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
Updater->VRC, Updater->MRI,
Updater->TII);
diff --git a/contrib/llvm/lib/CodeGen/MachineScheduler.cpp b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
new file mode 100644
index 0000000..1d3241b
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -0,0 +1,614 @@
+//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachineScheduler schedules machine instructions after phi elimination. It
+// preserves LiveIntervals so it can be invoked before register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "misched"
+
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineScheduler.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/PriorityQueue.h"
+
+#include <queue>
+
+using namespace llvm;
+
+static cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
+ cl::desc("Force top-down list scheduling"));
+static cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
+ cl::desc("Force bottom-up list scheduling"));
+
+#ifndef NDEBUG
+static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
+ cl::desc("Pop up a window to show MISched dags after they are processed"));
+
+static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
+ cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
+#else
+static bool ViewMISchedDAGs = false;
+#endif // NDEBUG
+
+//===----------------------------------------------------------------------===//
+// Machine Instruction Scheduling Pass and Registry
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// MachineScheduler runs after coalescing and before register allocation.
+class MachineScheduler : public MachineSchedContext,
+ public MachineFunctionPass {
+public:
+ MachineScheduler();
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual void releaseMemory() {}
+
+ virtual bool runOnMachineFunction(MachineFunction&);
+
+ virtual void print(raw_ostream &O, const Module* = 0) const;
+
+ static char ID; // Class identification, replacement for typeinfo
+};
+} // namespace
+
+char MachineScheduler::ID = 0;
+
+char &llvm::MachineSchedulerID = MachineScheduler::ID;
+
+INITIALIZE_PASS_BEGIN(MachineScheduler, "misched",
+ "Machine Instruction Scheduler", false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(MachineScheduler, "misched",
+ "Machine Instruction Scheduler", false, false)
+
+MachineScheduler::MachineScheduler()
+: MachineFunctionPass(ID) {
+ initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
+}
+
+void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequiredID(MachineDominatorsID);
+ AU.addRequired<MachineLoopInfo>();
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<TargetPassConfig>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<LiveIntervals>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+MachinePassRegistry MachineSchedRegistry::Registry;
+
+/// A dummy default scheduler factory indicates whether the scheduler
+/// is overridden on the command line.
+static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
+ return 0;
+}
+
+/// MachineSchedOpt allows command line selection of the scheduler.
+static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
+ RegisterPassParser<MachineSchedRegistry> >
+MachineSchedOpt("misched",
+ cl::init(&useDefaultMachineSched), cl::Hidden,
+ cl::desc("Machine instruction scheduler to use"));
+
+static MachineSchedRegistry
+DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
+ useDefaultMachineSched);
+
+/// Forward declare the standard machine scheduler. This will be used as the
+/// default scheduler if the target does not set a default.
+static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C);
+
+/// Top-level MachineScheduler pass driver.
+///
+/// Visit blocks in function order. Divide each block into scheduling regions
+/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
+/// consistent with the DAG builder, which traverses the interior of the
+/// scheduling regions bottom-up.
+///
+/// This design avoids exposing scheduling boundaries to the DAG builder,
+/// simplifying the DAG builder's support for "special" target instructions.
+/// At the same time the design allows target schedulers to operate across
+/// scheduling boundaries, for example to bundle the boudary instructions
+/// without reordering them. This creates complexity, because the target
+/// scheduler must update the RegionBegin and RegionEnd positions cached by
+/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
+/// design would be to split blocks at scheduling boundaries, but LLVM has a
+/// general bias against block splitting purely for implementation simplicity.
+bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
+ // Initialize the context of the pass.
+ MF = &mf;
+ MLI = &getAnalysis<MachineLoopInfo>();
+ MDT = &getAnalysis<MachineDominatorTree>();
+ PassConfig = &getAnalysis<TargetPassConfig>();
+ AA = &getAnalysis<AliasAnalysis>();
+
+ LIS = &getAnalysis<LiveIntervals>();
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+
+ // Select the scheduler, or set the default.
+ MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
+ if (Ctor == useDefaultMachineSched) {
+ // Get the default scheduler set by the target.
+ Ctor = MachineSchedRegistry::getDefault();
+ if (!Ctor) {
+ Ctor = createConvergingSched;
+ MachineSchedRegistry::setDefault(Ctor);
+ }
+ }
+ // Instantiate the selected scheduler.
+ OwningPtr<ScheduleDAGInstrs> Scheduler(Ctor(this));
+
+ // Visit all machine basic blocks.
+ for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
+ MBB != MBBEnd; ++MBB) {
+
+ Scheduler->startBlock(MBB);
+
+ // Break the block into scheduling regions [I, RegionEnd), and schedule each
+ // region as soon as it is discovered. RegionEnd points the the scheduling
+ // boundary at the bottom of the region. The DAG does not include RegionEnd,
+ // but the region does (i.e. the next RegionEnd is above the previous
+ // RegionBegin). If the current block has no terminator then RegionEnd ==
+ // MBB->end() for the bottom region.
+ //
+ // The Scheduler may insert instructions during either schedule() or
+ // exitRegion(), even for empty regions. So the local iterators 'I' and
+ // 'RegionEnd' are invalid across these calls.
+ unsigned RemainingCount = MBB->size();
+ for(MachineBasicBlock::iterator RegionEnd = MBB->end();
+ RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) {
+ // Avoid decrementing RegionEnd for blocks with no terminator.
+ if (RegionEnd != MBB->end()
+ || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) {
+ --RegionEnd;
+ // Count the boundary instruction.
+ --RemainingCount;
+ }
+
+ // The next region starts above the previous region. Look backward in the
+ // instruction stream until we find the nearest boundary.
+ MachineBasicBlock::iterator I = RegionEnd;
+ for(;I != MBB->begin(); --I, --RemainingCount) {
+ if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF))
+ break;
+ }
+ // Notify the scheduler of the region, even if we may skip scheduling
+ // it. Perhaps it still needs to be bundled.
+ Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount);
+
+ // Skip empty scheduling regions (0 or 1 schedulable instructions).
+ if (I == RegionEnd || I == llvm::prior(RegionEnd)) {
+ // Close the current region. Bundle the terminator if needed.
+ // This invalidates 'RegionEnd' and 'I'.
+ Scheduler->exitRegion();
+ continue;
+ }
+ DEBUG(dbgs() << "MachineScheduling " << MF->getFunction()->getName()
+ << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: ";
+ if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " Remaining: " << RemainingCount << "\n");
+
+ // Schedule a region: possibly reorder instructions.
+ // This invalidates 'RegionEnd' and 'I'.
+ Scheduler->schedule();
+
+ // Close the current region.
+ Scheduler->exitRegion();
+
+ // Scheduling has invalidated the current iterator 'I'. Ask the
+ // scheduler for the top of it's scheduled region.
+ RegionEnd = Scheduler->begin();
+ }
+ assert(RemainingCount == 0 && "Instruction count mismatch!");
+ Scheduler->finishBlock();
+ }
+ Scheduler->finalizeSchedule();
+ DEBUG(LIS->print(dbgs()));
+ return true;
+}
+
+void MachineScheduler::print(raw_ostream &O, const Module* m) const {
+ // unimplemented
+}
+
+//===----------------------------------------------------------------------===//
+// MachineSchedStrategy - Interface to a machine scheduling algorithm.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ScheduleDAGMI;
+
+/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected
+/// scheduling algorithm.
+///
+/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it
+/// in ScheduleDAGInstrs.h
+class MachineSchedStrategy {
+public:
+ virtual ~MachineSchedStrategy() {}
+
+ /// Initialize the strategy after building the DAG for a new region.
+ virtual void initialize(ScheduleDAGMI *DAG) = 0;
+
+ /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
+ /// schedule the node at the top of the unscheduled region. Otherwise it will
+ /// be scheduled at the bottom.
+ virtual SUnit *pickNode(bool &IsTopNode) = 0;
+
+ /// When all predecessor dependencies have been resolved, free this node for
+ /// top-down scheduling.
+ virtual void releaseTopNode(SUnit *SU) = 0;
+ /// When all successor dependencies have been resolved, free this node for
+ /// bottom-up scheduling.
+ virtual void releaseBottomNode(SUnit *SU) = 0;
+};
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals
+// preservation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules
+/// machine instructions while updating LiveIntervals.
+class ScheduleDAGMI : public ScheduleDAGInstrs {
+ AliasAnalysis *AA;
+ MachineSchedStrategy *SchedImpl;
+
+ /// The top of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentTop;
+
+ /// The bottom of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentBottom;
+
+ /// The number of instructions scheduled so far. Used to cut off the
+ /// scheduler at the point determined by misched-cutoff.
+ unsigned NumInstrsScheduled;
+public:
+ ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S):
+ ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS),
+ AA(C->AA), SchedImpl(S), CurrentTop(), CurrentBottom(),
+ NumInstrsScheduled(0) {}
+
+ ~ScheduleDAGMI() {
+ delete SchedImpl;
+ }
+
+ MachineBasicBlock::iterator top() const { return CurrentTop; }
+ MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
+
+ /// Implement ScheduleDAGInstrs interface.
+ void schedule();
+
+protected:
+ void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+ bool checkSchedLimit();
+
+ void releaseSucc(SUnit *SU, SDep *SuccEdge);
+ void releaseSuccessors(SUnit *SU);
+ void releasePred(SUnit *SU, SDep *PredEdge);
+ void releasePredecessors(SUnit *SU);
+};
+} // namespace
+
+/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
+/// NumPredsLeft reaches zero, release the successor node.
+void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
+ SUnit *SuccSU = SuccEdge->getSUnit();
+
+#ifndef NDEBUG
+ if (SuccSU->NumPredsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
+ SuccSU->dump(this);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(0);
+ }
+#endif
+ --SuccSU->NumPredsLeft;
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
+ SchedImpl->releaseTopNode(SuccSU);
+}
+
+/// releaseSuccessors - Call releaseSucc on each of SU's successors.
+void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ releaseSucc(SU, &*I);
+ }
+}
+
+/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
+/// NumSuccsLeft reaches zero, release the predecessor node.
+void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
+ SUnit *PredSU = PredEdge->getSUnit();
+
+#ifndef NDEBUG
+ if (PredSU->NumSuccsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
+ PredSU->dump(this);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(0);
+ }
+#endif
+ --PredSU->NumSuccsLeft;
+ if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
+ SchedImpl->releaseBottomNode(PredSU);
+}
+
+/// releasePredecessors - Call releasePred on each of SU's predecessors.
+void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ releasePred(SU, &*I);
+ }
+}
+
+void ScheduleDAGMI::moveInstruction(MachineInstr *MI,
+ MachineBasicBlock::iterator InsertPos) {
+ // Fix RegionBegin if the first instruction moves down.
+ if (&*RegionBegin == MI)
+ RegionBegin = llvm::next(RegionBegin);
+ BB->splice(InsertPos, BB, MI);
+ LIS->handleMove(MI);
+ // Fix RegionBegin if another instruction moves above the first instruction.
+ if (RegionBegin == InsertPos)
+ RegionBegin = MI;
+}
+
+bool ScheduleDAGMI::checkSchedLimit() {
+#ifndef NDEBUG
+ if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
+ CurrentTop = CurrentBottom;
+ return false;
+ }
+ ++NumInstrsScheduled;
+#endif
+ return true;
+}
+
+/// schedule - Called back from MachineScheduler::runOnMachineFunction
+/// after setting up the current scheduling region.
+void ScheduleDAGMI::schedule() {
+ buildSchedGraph(AA);
+
+ DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ SUnits[su].dumpAll(this));
+
+ if (ViewMISchedDAGs) viewGraph();
+
+ SchedImpl->initialize(this);
+
+ // Release edges from the special Entry node or to the special Exit node.
+ releaseSuccessors(&EntrySU);
+ releasePredecessors(&ExitSU);
+
+ // Release all DAG roots for scheduling.
+ for (std::vector<SUnit>::iterator I = SUnits.begin(), E = SUnits.end();
+ I != E; ++I) {
+ // A SUnit is ready to top schedule if it has no predecessors.
+ if (I->Preds.empty())
+ SchedImpl->releaseTopNode(&(*I));
+ // A SUnit is ready to bottom schedule if it has no successors.
+ if (I->Succs.empty())
+ SchedImpl->releaseBottomNode(&(*I));
+ }
+
+ CurrentTop = RegionBegin;
+ CurrentBottom = RegionEnd;
+ bool IsTopNode = false;
+ while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
+ DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
+ << " Scheduling Instruction:\n"; SU->dump(this));
+ if (!checkSchedLimit())
+ break;
+
+ // Move the instruction to its new location in the instruction stream.
+ MachineInstr *MI = SU->getInstr();
+
+ if (IsTopNode) {
+ assert(SU->isTopReady() && "node still has unscheduled dependencies");
+ if (&*CurrentTop == MI)
+ ++CurrentTop;
+ else
+ moveInstruction(MI, CurrentTop);
+ // Release dependent instructions for scheduling.
+ releaseSuccessors(SU);
+ }
+ else {
+ assert(SU->isBottomReady() && "node still has unscheduled dependencies");
+ if (&*llvm::prior(CurrentBottom) == MI)
+ --CurrentBottom;
+ else {
+ if (&*CurrentTop == MI)
+ CurrentTop = llvm::next(CurrentTop);
+ moveInstruction(MI, CurrentBottom);
+ CurrentBottom = MI;
+ }
+ // Release dependent instructions for scheduling.
+ releasePredecessors(SU);
+ }
+ SU->isScheduled = true;
+ }
+ assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
+}
+
+//===----------------------------------------------------------------------===//
+// ConvergingScheduler - Implementation of the standard MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class ConvergingScheduler : public MachineSchedStrategy {
+ ScheduleDAGMI *DAG;
+
+ unsigned NumTopReady;
+ unsigned NumBottomReady;
+
+public:
+ virtual void initialize(ScheduleDAGMI *dag) {
+ DAG = dag;
+
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ }
+
+ virtual SUnit *pickNode(bool &IsTopNode) {
+ if (DAG->top() == DAG->bottom())
+ return NULL;
+
+ // As an initial placeholder heuristic, schedule in the direction that has
+ // the fewest choices.
+ SUnit *SU;
+ if (ForceTopDown || (!ForceBottomUp && NumTopReady <= NumBottomReady)) {
+ SU = DAG->getSUnit(DAG->top());
+ IsTopNode = true;
+ }
+ else {
+ SU = DAG->getSUnit(llvm::prior(DAG->bottom()));
+ IsTopNode = false;
+ }
+ if (SU->isTopReady()) {
+ assert(NumTopReady > 0 && "bad ready count");
+ --NumTopReady;
+ }
+ if (SU->isBottomReady()) {
+ assert(NumBottomReady > 0 && "bad ready count");
+ --NumBottomReady;
+ }
+ return SU;
+ }
+
+ virtual void releaseTopNode(SUnit *SU) {
+ ++NumTopReady;
+ }
+ virtual void releaseBottomNode(SUnit *SU) {
+ ++NumBottomReady;
+ }
+};
+} // namespace
+
+/// Create the standard converging machine scheduler. This will be used as the
+/// default scheduler if the target does not set a default.
+static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ return new ScheduleDAGMI(C, new ConvergingScheduler());
+}
+static MachineSchedRegistry
+ConvergingSchedRegistry("converge", "Standard converging scheduler.",
+ createConvergingSched);
+
+//===----------------------------------------------------------------------===//
+// Machine Instruction Shuffler for Correctness Testing
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+namespace {
+/// Apply a less-than relation on the node order, which corresponds to the
+/// instruction order prior to scheduling. IsReverse implements greater-than.
+template<bool IsReverse>
+struct SUnitOrder {
+ bool operator()(SUnit *A, SUnit *B) const {
+ if (IsReverse)
+ return A->NodeNum > B->NodeNum;
+ else
+ return A->NodeNum < B->NodeNum;
+ }
+};
+
+/// Reorder instructions as much as possible.
+class InstructionShuffler : public MachineSchedStrategy {
+ bool IsAlternating;
+ bool IsTopDown;
+
+ // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
+ // gives nodes with a higher number higher priority causing the latest
+ // instructions to be scheduled first.
+ PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
+ TopQ;
+ // When scheduling bottom-up, use greater-than as the queue priority.
+ PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
+ BottomQ;
+public:
+ InstructionShuffler(bool alternate, bool topdown)
+ : IsAlternating(alternate), IsTopDown(topdown) {}
+
+ virtual void initialize(ScheduleDAGMI *) {
+ TopQ.clear();
+ BottomQ.clear();
+ }
+
+ /// Implement MachineSchedStrategy interface.
+ /// -----------------------------------------
+
+ virtual SUnit *pickNode(bool &IsTopNode) {
+ SUnit *SU;
+ if (IsTopDown) {
+ do {
+ if (TopQ.empty()) return NULL;
+ SU = TopQ.top();
+ TopQ.pop();
+ } while (SU->isScheduled);
+ IsTopNode = true;
+ }
+ else {
+ do {
+ if (BottomQ.empty()) return NULL;
+ SU = BottomQ.top();
+ BottomQ.pop();
+ } while (SU->isScheduled);
+ IsTopNode = false;
+ }
+ if (IsAlternating)
+ IsTopDown = !IsTopDown;
+ return SU;
+ }
+
+ virtual void releaseTopNode(SUnit *SU) {
+ TopQ.push(SU);
+ }
+ virtual void releaseBottomNode(SUnit *SU) {
+ BottomQ.push(SU);
+ }
+};
+} // namespace
+
+static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
+ bool Alternate = !ForceTopDown && !ForceBottomUp;
+ bool TopDown = !ForceBottomUp;
+ assert((TopDown || !ForceTopDown) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ return new ScheduleDAGMI(C, new InstructionShuffler(Alternate, TopDown));
+}
+static MachineSchedRegistry ShufflerRegistry(
+ "shuffle", "Shuffle machine instructions alternating directions",
+ createInstructionShuffler);
+#endif // !NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index 29cfb49..1ce546b 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -32,7 +32,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-static cl::opt<bool>
+static cl::opt<bool>
SplitEdges("machine-sink-split",
cl::desc("Split critical edges during machine sinking"),
cl::init(true), cl::Hidden);
@@ -90,12 +90,19 @@ namespace {
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge, bool &LocalUse) const;
+ MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
+ bool &BreakPHIEdge);
+ bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *SuccToSinkTo);
+
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
};
} // end anonymous namespace
char MachineSinking::ID = 0;
+char &llvm::MachineSinkingID = MachineSinking::ID;
INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
"Machine code sinking", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
@@ -104,8 +111,6 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MachineSinking, "machine-sink",
"Machine code sinking", false, false)
-FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
-
bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB) {
if (!MI->isCopy())
@@ -147,14 +152,10 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
+ // Ignore debug uses because debug info doesn't affect the code.
if (MRI->use_nodbg_empty(Reg))
return true;
- // Ignoring debug uses is necessary so debug info doesn't affect the code.
- // This may leave a referencing dbg_value in the original block, before
- // the definition of the vreg. Dwarf generator handles this although the
- // user might not get the right info at runtime.
-
// BreakPHIEdge is true if all the uses are in the successor MBB being sunken
// into and they are all PHI nodes. In this case, machine-sink must break
// the critical edge first. e.g.
@@ -291,7 +292,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
if (!CEBCandidates.insert(std::make_pair(From, To)))
return true;
- if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove())
+ if (!MI->isCopy() && !MI->isAsCheapAsAMove())
return true;
// MI is cheap, we probably don't want to break the critical edge for it.
@@ -382,9 +383,9 @@ static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
}
-/// collectDebgValues - Scan instructions following MI and collect any
+/// collectDebgValues - Scan instructions following MI and collect any
/// matching DBG_VALUEs.
-static void collectDebugValues(MachineInstr *MI,
+static void collectDebugValues(MachineInstr *MI,
SmallVector<MachineInstr *, 2> & DbgValues) {
DbgValues.clear();
if (!MI->getOperand(0).isReg())
@@ -401,35 +402,76 @@ static void collectDebugValues(MachineInstr *MI,
}
}
-/// SinkInstruction - Determine whether it is safe to sink the specified machine
-/// instruction out of its current block into a successor.
-bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
- // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
- // be close to the source to make it easier to coalesce.
- if (AvoidsSinking(MI, MRI))
+/// isPostDominatedBy - Return true if A is post dominated by B.
+static bool isPostDominatedBy(MachineBasicBlock *A, MachineBasicBlock *B) {
+
+ // FIXME - Use real post dominator.
+ if (A->succ_size() != 2)
+ return false;
+ MachineBasicBlock::succ_iterator I = A->succ_begin();
+ if (B == *I)
+ ++I;
+ MachineBasicBlock *OtherSuccBlock = *I;
+ if (OtherSuccBlock->succ_size() != 1 ||
+ *(OtherSuccBlock->succ_begin()) != B)
return false;
- // Check if it's safe to move the instruction.
- if (!MI->isSafeToMove(TII, AA, SawStore))
+ return true;
+}
+
+/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
+bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *SuccToSinkTo) {
+ assert (MI && "Invalid MachineInstr!");
+ assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
+
+ if (MBB == SuccToSinkTo)
return false;
- // FIXME: This should include support for sinking instructions within the
- // block they are currently in to shorten the live ranges. We often get
- // instructions sunk into the top of a large block, but it would be better to
- // also sink them down before their first use in the block. This xform has to
- // be careful not to *increase* register pressure though, e.g. sinking
- // "x = y + z" down if it kills y and z would increase the live ranges of y
- // and z and only shrink the live range of x.
+ // It is profitable if SuccToSinkTo does not post dominate current block.
+ if (!isPostDominatedBy(MBB, SuccToSinkTo))
+ return true;
+
+ // Check if only use in post dominated block is PHI instruction.
+ bool NonPHIUse = false;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
+ I != E; ++I) {
+ MachineInstr *UseInst = &*I;
+ MachineBasicBlock *UseBlock = UseInst->getParent();
+ if (UseBlock == SuccToSinkTo && !UseInst->isPHI())
+ NonPHIUse = true;
+ }
+ if (!NonPHIUse)
+ return true;
+
+ // If SuccToSinkTo post dominates then also it may be profitable if MI
+ // can further profitably sinked into another block in next round.
+ bool BreakPHIEdge = false;
+ // FIXME - If finding successor is compile time expensive then catch results.
+ if (MachineBasicBlock *MBB2 = FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge))
+ return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2);
+
+ // If SuccToSinkTo is final destination and it is a post dominator of current
+ // block then it is not profitable to sink MI into SuccToSinkTo block.
+ return false;
+}
+
+/// FindSuccToSinkTo - Find a successor to sink this instruction to.
+MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ bool &BreakPHIEdge) {
+
+ assert (MI && "Invalid MachineInstr!");
+ assert (MBB && "Invalid MachineBasicBlock!");
// Loop over all the operands of the specified instruction. If there is
// anything we can't handle, bail out.
- MachineBasicBlock *ParentBlock = MI->getParent();
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
MachineBasicBlock *SuccToSinkTo = 0;
-
- bool BreakPHIEdge = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
@@ -442,24 +484,11 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI->def_empty(Reg))
- return false;
-
- if (AllocatableSet.test(Reg))
- return false;
-
- // Check for a def among the register's aliases too.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- if (!MRI->def_empty(AliasReg))
- return false;
-
- if (AllocatableSet.test(AliasReg))
- return false;
- }
+ if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
+ return NULL;
} else if (!MO.isDead()) {
// A def that isn't dead. We can't move it.
- return false;
+ return NULL;
}
} else {
// Virtual register uses are always safe to sink.
@@ -467,7 +496,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If it's not safe to move defs of the register class, then abort.
if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
- return false;
+ return NULL;
// FIXME: This picks a successor to sink into based on having one
// successor that dominates all the uses. However, there are cases where
@@ -488,48 +517,79 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If a previous operand picked a block to sink to, then this operand
// must be sinkable to the same block.
bool LocalUse = false;
- if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock,
+ if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
BreakPHIEdge, LocalUse))
- return false;
+ return NULL;
continue;
}
// Otherwise, we should look at all the successors and decide which one
// we should sink to.
- for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
- E = ParentBlock->succ_end(); SI != E; ++SI) {
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ E = MBB->succ_end(); SI != E; ++SI) {
+ MachineBasicBlock *SuccBlock = *SI;
bool LocalUse = false;
- if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock,
+ if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
BreakPHIEdge, LocalUse)) {
- SuccToSinkTo = *SI;
+ SuccToSinkTo = SuccBlock;
break;
}
if (LocalUse)
// Def is used locally, it's never safe to move this def.
- return false;
+ return NULL;
}
// If we couldn't find a block to sink to, ignore this instruction.
if (SuccToSinkTo == 0)
- return false;
+ return NULL;
+ else if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
+ return NULL;
}
}
- // If there are no outputs, it must have side-effects.
- if (SuccToSinkTo == 0)
- return false;
+ // It is not possible to sink an instruction into its own block. This can
+ // happen with loops.
+ if (MBB == SuccToSinkTo)
+ return NULL;
// It's not safe to sink instructions to EH landing pad. Control flow into
// landing pad is implicitly defined.
- if (SuccToSinkTo->isLandingPad())
+ if (SuccToSinkTo && SuccToSinkTo->isLandingPad())
+ return NULL;
+
+ return SuccToSinkTo;
+}
+
+/// SinkInstruction - Determine whether it is safe to sink the specified machine
+/// instruction out of its current block into a successor.
+bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
+ // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
+ // be close to the source to make it easier to coalesce.
+ if (AvoidsSinking(MI, MRI))
return false;
- // It is not possible to sink an instruction into its own block. This can
- // happen with loops.
- if (MI->getParent() == SuccToSinkTo)
+ // Check if it's safe to move the instruction.
+ if (!MI->isSafeToMove(TII, AA, SawStore))
return false;
+ // FIXME: This should include support for sinking instructions within the
+ // block they are currently in to shorten the live ranges. We often get
+ // instructions sunk into the top of a large block, but it would be better to
+ // also sink them down before their first use in the block. This xform has to
+ // be careful not to *increase* register pressure though, e.g. sinking
+ // "x = y + z" down if it kills y and z would increase the live ranges of y
+ // and z and only shrink the live range of x.
+
+ bool BreakPHIEdge = false;
+ MachineBasicBlock *ParentBlock = MI->getParent();
+ MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge);
+
+ // If there are no outputs, it must have side-effects.
+ if (SuccToSinkTo == 0)
+ return false;
+
+
// If the instruction to move defines a dead physical register which is live
// when leaving the basic block, don't move it because it could turn into a
// "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index 26847d3..74ba94d 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
@@ -69,14 +70,17 @@ namespace {
unsigned foundErrors;
typedef SmallVector<unsigned, 16> RegVector;
+ typedef SmallVector<const uint32_t*, 4> RegMaskVector;
typedef DenseSet<unsigned> RegSet;
typedef DenseMap<unsigned, const MachineInstr*> RegMap;
const MachineInstr *FirstTerminator;
BitVector regsReserved;
+ BitVector regsAllocatable;
RegSet regsLive;
RegVector regsDefined, regsDead, regsKilled;
+ RegMaskVector regMasks;
RegSet regsLiveInButUnused;
SlotIndex lastIndex;
@@ -85,7 +89,7 @@ namespace {
void addRegWithSubRegs(RegVector &RV, unsigned Reg) {
RV.push_back(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg))
- for (const unsigned *R = TRI->getSubRegisters(Reg); *R; R++)
+ for (const uint16_t *R = TRI->getSubRegisters(Reg); *R; R++)
RV.push_back(*R);
}
@@ -175,6 +179,10 @@ namespace {
return Reg < regsReserved.size() && regsReserved.test(Reg);
}
+ bool isAllocatable(unsigned Reg) {
+ return Reg < regsAllocatable.size() && regsAllocatable.test(Reg);
+ }
+
// Analysis information if available
LiveVariables *LiveVars;
LiveIntervals *LiveInts;
@@ -194,6 +202,7 @@ namespace {
void report(const char *msg, const MachineInstr *MI);
void report(const char *msg, const MachineOperand *MO, unsigned MONum);
+ void checkLiveness(const MachineOperand *MO, unsigned MONum);
void markReachable(const MachineBasicBlock *MBB);
void calcRegsPassed();
void checkPHIOps(const MachineBasicBlock *MBB);
@@ -279,13 +288,17 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::const_iterator MFI = MF.begin(), MFE = MF.end();
MFI!=MFE; ++MFI) {
visitMachineBasicBlockBefore(MFI);
- for (MachineBasicBlock::const_iterator MBBI = MFI->begin(),
- MBBE = MFI->end(); MBBI != MBBE; ++MBBI) {
+ for (MachineBasicBlock::const_instr_iterator MBBI = MFI->instr_begin(),
+ MBBE = MFI->instr_end(); MBBI != MBBE; ++MBBI) {
if (MBBI->getParent() != MFI) {
report("Bad instruction parent pointer", MFI);
*OS << "Instruction: " << *MBBI;
continue;
}
+ // Skip BUNDLE instruction for now. FIXME: We should add code to verify
+ // the BUNDLE's specifically.
+ if (MBBI->isBundle())
+ continue;
visitMachineInstrBefore(MBBI);
for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I)
visitMachineOperand(&MBBI->getOperand(I), I);
@@ -305,6 +318,7 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
regsDefined.clear();
regsDead.clear();
regsKilled.clear();
+ regMasks.clear();
regsLiveInButUnused.clear();
MBBInfoMap.clear();
@@ -320,7 +334,7 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
MF->print(*OS, Indexes);
}
*OS << "*** Bad machine code: " << msg << " ***\n"
- << "- function: " << MF->getFunction()->getNameStr() << "\n";
+ << "- function: " << MF->getFunction()->getName() << "\n";
}
void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
@@ -370,12 +384,15 @@ void MachineVerifier::visitMachineFunctionBefore() {
// A sub-register of a reserved register is also reserved
for (int Reg = regsReserved.find_first(); Reg>=0;
Reg = regsReserved.find_next(Reg)) {
- for (const unsigned *Sub = TRI->getSubRegisters(Reg); *Sub; ++Sub) {
+ for (const uint16_t *Sub = TRI->getSubRegisters(Reg); *Sub; ++Sub) {
// FIXME: This should probably be:
// assert(regsReserved.test(*Sub) && "Non-reserved sub-register");
regsReserved.set(*Sub);
}
}
+
+ regsAllocatable = TRI->getAllocatableSet(*MF);
+
markReachable(&MF->front());
}
@@ -393,6 +410,20 @@ void
MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
FirstTerminator = 0;
+ if (MRI->isSSA()) {
+ // If this block has allocatable physical registers live-in, check that
+ // it is an entry block or landing pad.
+ for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
+ LE = MBB->livein_end();
+ LI != LE; ++LI) {
+ unsigned reg = *LI;
+ if (isAllocatable(reg) && !MBB->isLandingPad() &&
+ MBB != MBB->getParent()->begin()) {
+ report("MBB has allocable live-in, but isn't entry or landing-pad.", MBB);
+ }
+ }
+ }
+
// Count the number of landing pad successors.
SmallPtrSet<MachineBasicBlock*, 4> LandingPadSuccs;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
@@ -435,7 +466,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
- if (!MBB->empty() && MBB->back().getDesc().isBarrier() &&
+ if (!MBB->empty() && MBB->back().isBarrier() &&
!TII->isPredicated(&MBB->back())) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
@@ -456,10 +487,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via unconditional branch but doesn't contain "
"any instructions!", MBB);
- } else if (!MBB->back().getDesc().isBarrier()) {
+ } else if (!MBB->back().isBarrier()) {
report("MBB exits via unconditional branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().getDesc().isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via unconditional branch but the branch isn't a "
"terminator instruction!", MBB);
}
@@ -479,10 +510,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/fall-through but doesn't "
"contain any instructions!", MBB);
- } else if (MBB->back().getDesc().isBarrier()) {
+ } else if (MBB->back().isBarrier()) {
report("MBB exits via conditional branch/fall-through but ends with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().getDesc().isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/fall-through but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -499,10 +530,10 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
if (MBB->empty()) {
report("MBB exits via conditional branch/branch but doesn't "
"contain any instructions!", MBB);
- } else if (!MBB->back().getDesc().isBarrier()) {
+ } else if (!MBB->back().isBarrier()) {
report("MBB exits via conditional branch/branch but doesn't end with a "
"barrier instruction!", MBB);
- } else if (!MBB->back().getDesc().isTerminator()) {
+ } else if (!MBB->back().isTerminator()) {
report("MBB exits via conditional branch/branch but the branch "
"isn't a terminator instruction!", MBB);
}
@@ -523,7 +554,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
continue;
}
regsLive.insert(*I);
- for (const unsigned *R = TRI->getSubRegisters(*I); *R; R++)
+ for (const uint16_t *R = TRI->getSubRegisters(*I); *R; R++)
regsLive.insert(*R);
}
regsLiveInButUnused = regsLive;
@@ -533,7 +564,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
BitVector PR = MFI->getPristineRegs(MBB);
for (int I = PR.find_first(); I>0; I = PR.find_next(I)) {
regsLive.insert(I);
- for (const unsigned *R = TRI->getSubRegisters(I); *R; R++)
+ for (const uint16_t *R = TRI->getSubRegisters(I); *R; R++)
regsLive.insert(*R);
}
@@ -555,19 +586,22 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
// Check the MachineMemOperands for basic consistency.
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
E = MI->memoperands_end(); I != E; ++I) {
- if ((*I)->isLoad() && !MCID.mayLoad())
+ if ((*I)->isLoad() && !MI->mayLoad())
report("Missing mayLoad flag", MI);
- if ((*I)->isStore() && !MCID.mayStore())
+ if ((*I)->isStore() && !MI->mayStore())
report("Missing mayStore flag", MI);
}
// Debug values must not have a slot index.
- // Other instructions must have one.
+ // Other instructions must have one, unless they are inside a bundle.
if (LiveInts) {
bool mapped = !LiveInts->isNotInMIMap(MI);
if (MI->isDebugValue()) {
if (mapped)
report("Debug instruction has a slot index", MI);
+ } else if (MI->isInsideBundle()) {
+ if (mapped)
+ report("Instruction inside bundle has a slot index", MI);
} else {
if (!mapped)
report("Missing slot index", MI);
@@ -575,7 +609,9 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
}
// Ensure non-terminators don't follow terminators.
- if (MCID.isTerminator()) {
+ // Ignore predicated terminators formed by if conversion.
+ // FIXME: If conversion shouldn't need to violate this rule.
+ if (MI->isTerminator() && !TII->isPredicated(MI)) {
if (!FirstTerminator)
FirstTerminator = MI;
} else if (FirstTerminator) {
@@ -606,7 +642,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() &&
- !(MCID.isVariadic() && MONum == MCID.getNumOperands()-1)) {
+ !(MI->isVariadic() && MONum == MCID.getNumOperands()-1)) {
if (MO->isDef() && !MCOI.isOptionalDef())
report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
@@ -614,7 +650,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
- if (MO->isReg() && !MO->isImplicit() && !MCID.isVariadic() && MO->getReg())
+ if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
report("Extra explicit operand on non-variadic instruction", MO, MONum);
}
@@ -623,112 +659,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const unsigned Reg = MO->getReg();
if (!Reg)
return;
+ if (MRI->tracksLiveness() && !MI->isDebugValue())
+ checkLiveness(MO, MONum);
- // Check Live Variables.
- if (MI->isDebugValue()) {
- // Liveness checks are not valid for debug values.
- } else if (MO->isUse() && !MO->isUndef()) {
- regsLiveInButUnused.erase(Reg);
-
- bool isKill = false;
- unsigned defIdx;
- if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
- // A two-addr use counts as a kill if use and def are the same.
- unsigned DefReg = MI->getOperand(defIdx).getReg();
- if (Reg == DefReg)
- isKill = true;
- else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- report("Two-address instruction operands must be identical",
- MO, MONum);
- }
- } else
- isKill = MO->isKill();
-
- if (isKill)
- addRegWithSubRegs(regsKilled, Reg);
-
- // Check that LiveVars knows this kill.
- if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
- MO->isKill()) {
- LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
- if (std::find(VI.Kills.begin(),
- VI.Kills.end(), MI) == VI.Kills.end())
- report("Kill missing from LiveVariables", MO, MONum);
- }
-
- // Check LiveInts liveness and kill.
- if (TargetRegisterInfo::isVirtualRegister(Reg) &&
- LiveInts && !LiveInts->isNotInMIMap(MI)) {
- SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getUseIndex();
- if (LiveInts->hasInterval(Reg)) {
- const LiveInterval &LI = LiveInts->getInterval(Reg);
- if (!LI.liveAt(UseIdx)) {
- report("No live range at use", MO, MONum);
- *OS << UseIdx << " is not live in " << LI << '\n';
- }
- // Check for extra kill flags.
- // Note that we allow missing kill flags for now.
- if (MO->isKill() && !LI.killedAt(UseIdx.getDefIndex())) {
- report("Live range continues after kill flag", MO, MONum);
- *OS << "Live range: " << LI << '\n';
- }
- } else {
- report("Virtual register has no Live interval", MO, MONum);
- }
- }
-
- // Use of a dead register.
- if (!regsLive.count(Reg)) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- // Reserved registers may be used even when 'dead'.
- if (!isReserved(Reg))
- report("Using an undefined physical register", MO, MONum);
- } else {
- BBInfo &MInfo = MBBInfoMap[MI->getParent()];
- // We don't know which virtual registers are live in, so only complain
- // if vreg was killed in this MBB. Otherwise keep track of vregs that
- // must be live in. PHI instructions are handled separately.
- if (MInfo.regsKilled.count(Reg))
- report("Using a killed virtual register", MO, MONum);
- else if (!MI->isPHI())
- MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
- }
- }
- } else if (MO->isDef()) {
- // Register defined.
- // TODO: verify that earlyclobber ops are not used.
- if (MO->isDead())
- addRegWithSubRegs(regsDead, Reg);
- else
- addRegWithSubRegs(regsDefined, Reg);
-
- // Verify SSA form.
- if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
- llvm::next(MRI->def_begin(Reg)) != MRI->def_end())
- report("Multiple virtual register defs in SSA form", MO, MONum);
-
- // Check LiveInts for a live range, but only for virtual registers.
- if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
- !LiveInts->isNotInMIMap(MI)) {
- SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getDefIndex();
- if (LiveInts->hasInterval(Reg)) {
- const LiveInterval &LI = LiveInts->getInterval(Reg);
- if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
- assert(VNI && "NULL valno is not allowed");
- if (VNI->def != DefIdx && !MO->isEarlyClobber()) {
- report("Inconsistent valno->def", MO, MONum);
- *OS << "Valno " << VNI->id << " is not defined at "
- << DefIdx << " in " << LI << '\n';
- }
- } else {
- report("No live range at def", MO, MONum);
- *OS << DefIdx << " is not live in " << LI << '\n';
- }
- } else {
- report("Virtual register has no Live interval", MO, MONum);
- }
- }
- }
// Check register classes.
if (MONum < MCID.getNumOperands() && !MO->isImplicit()) {
@@ -790,6 +723,10 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
break;
}
+ case MachineOperand::MO_RegisterMask:
+ regMasks.push_back(MO->getRegMask());
+ break;
+
case MachineOperand::MO_MachineBasicBlock:
if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
report("PHI operand is not in the CFG", MO, MONum);
@@ -800,11 +737,11 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
LiveInts && !LiveInts->isNotInMIMap(MI)) {
LiveInterval &LI = LiveStks->getInterval(MO->getIndex());
SlotIndex Idx = LiveInts->getInstructionIndex(MI);
- if (MCID.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
+ if (MI->mayLoad() && !LI.liveAt(Idx.getRegSlot(true))) {
report("Instruction loads from dead spill slot", MO, MONum);
*OS << "Live stack: " << LI << '\n';
}
- if (MCID.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
+ if (MI->mayStore() && !LI.liveAt(Idx.getRegSlot())) {
report("Instruction stores to dead spill slot", MO, MONum);
*OS << "Live stack: " << LI << '\n';
}
@@ -816,10 +753,127 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
}
}
+void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
+ const MachineInstr *MI = MO->getParent();
+ const unsigned Reg = MO->getReg();
+
+ // Both use and def operands can read a register.
+ if (MO->readsReg()) {
+ regsLiveInButUnused.erase(Reg);
+
+ bool isKill = false;
+ unsigned defIdx;
+ if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
+ // A two-addr use counts as a kill if use and def are the same.
+ unsigned DefReg = MI->getOperand(defIdx).getReg();
+ if (Reg == DefReg)
+ isKill = true;
+ else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ report("Two-address instruction operands must be identical", MO, MONum);
+ }
+ } else
+ isKill = MO->isKill();
+
+ if (isKill)
+ addRegWithSubRegs(regsKilled, Reg);
+
+ // Check that LiveVars knows this kill.
+ if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MO->isKill()) {
+ LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
+ if (std::find(VI.Kills.begin(), VI.Kills.end(), MI) == VI.Kills.end())
+ report("Kill missing from LiveVariables", MO, MONum);
+ }
+
+ // Check LiveInts liveness and kill.
+ if (TargetRegisterInfo::isVirtualRegister(Reg) &&
+ LiveInts && !LiveInts->isNotInMIMap(MI)) {
+ SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getRegSlot(true);
+ if (LiveInts->hasInterval(Reg)) {
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ if (!LI.liveAt(UseIdx)) {
+ report("No live range at use", MO, MONum);
+ *OS << UseIdx << " is not live in " << LI << '\n';
+ }
+ // Check for extra kill flags.
+ // Note that we allow missing kill flags for now.
+ if (MO->isKill() && !LI.killedAt(UseIdx.getRegSlot())) {
+ report("Live range continues after kill flag", MO, MONum);
+ *OS << "Live range: " << LI << '\n';
+ }
+ } else {
+ report("Virtual register has no Live interval", MO, MONum);
+ }
+ }
+
+ // Use of a dead register.
+ if (!regsLive.count(Reg)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ // Reserved registers may be used even when 'dead'.
+ if (!isReserved(Reg))
+ report("Using an undefined physical register", MO, MONum);
+ } else {
+ BBInfo &MInfo = MBBInfoMap[MI->getParent()];
+ // We don't know which virtual registers are live in, so only complain
+ // if vreg was killed in this MBB. Otherwise keep track of vregs that
+ // must be live in. PHI instructions are handled separately.
+ if (MInfo.regsKilled.count(Reg))
+ report("Using a killed virtual register", MO, MONum);
+ else if (!MI->isPHI())
+ MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
+ }
+ }
+ }
+
+ if (MO->isDef()) {
+ // Register defined.
+ // TODO: verify that earlyclobber ops are not used.
+ if (MO->isDead())
+ addRegWithSubRegs(regsDead, Reg);
+ else
+ addRegWithSubRegs(regsDefined, Reg);
+
+ // Verify SSA form.
+ if (MRI->isSSA() && TargetRegisterInfo::isVirtualRegister(Reg) &&
+ llvm::next(MRI->def_begin(Reg)) != MRI->def_end())
+ report("Multiple virtual register defs in SSA form", MO, MONum);
+
+ // Check LiveInts for a live range, but only for virtual registers.
+ if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
+ !LiveInts->isNotInMIMap(MI)) {
+ SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getRegSlot();
+ if (LiveInts->hasInterval(Reg)) {
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
+ assert(VNI && "NULL valno is not allowed");
+ if (VNI->def != DefIdx && !MO->isEarlyClobber()) {
+ report("Inconsistent valno->def", MO, MONum);
+ *OS << "Valno " << VNI->id << " is not defined at "
+ << DefIdx << " in " << LI << '\n';
+ }
+ } else {
+ report("No live range at def", MO, MONum);
+ *OS << DefIdx << " is not live in " << LI << '\n';
+ }
+ } else {
+ report("Virtual register has no Live interval", MO, MONum);
+ }
+ }
+ }
+}
+
void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
set_union(MInfo.regsKilled, regsKilled);
set_subtract(regsLive, regsKilled); regsKilled.clear();
+ // Kill any masked registers.
+ while (!regMasks.empty()) {
+ const uint32_t *Mask = regMasks.pop_back_val();
+ for (RegSet::iterator I = regsLive.begin(), E = regsLive.end(); I != E; ++I)
+ if (TargetRegisterInfo::isPhysicalRegister(*I) &&
+ MachineOperand::clobbersPhysReg(Mask, *I))
+ regsDead.push_back(*I);
+ }
set_subtract(regsLive, regsDead); regsDead.clear();
set_union(regsLive, regsDefined); regsDefined.clear();
@@ -855,7 +909,7 @@ MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
void MachineVerifier::calcRegsPassed() {
// First push live-out regs to successors' vregsPassed. Remember the MBBs that
// have any vregsPassed.
- DenseSet<const MachineBasicBlock*> todo;
+ SmallPtrSet<const MachineBasicBlock*, 8> todo;
for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
MFI != MFE; ++MFI) {
const MachineBasicBlock &MBB(*MFI);
@@ -892,7 +946,7 @@ void MachineVerifier::calcRegsPassed() {
// similar to calcRegsPassed, only backwards.
void MachineVerifier::calcRegsRequired() {
// First push live-in regs to predecessors' vregsRequired.
- DenseSet<const MachineBasicBlock*> todo;
+ SmallPtrSet<const MachineBasicBlock*, 8> todo;
for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
MFI != MFE; ++MFI) {
const MachineBasicBlock &MBB(*MFI);
@@ -925,9 +979,10 @@ void MachineVerifier::calcRegsRequired() {
// Check PHI instructions at the beginning of MBB. It is assumed that
// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) {
+ SmallPtrSet<const MachineBasicBlock*, 8> seen;
for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end();
BBI != BBE && BBI->isPHI(); ++BBI) {
- DenseSet<const MachineBasicBlock*> seen;
+ seen.clear();
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
@@ -968,8 +1023,17 @@ void MachineVerifier::visitMachineFunctionAfter() {
}
// Now check liveness info if available
- if (LiveVars || LiveInts)
- calcRegsRequired();
+ calcRegsRequired();
+
+ if (MRI->isSSA() && !MF->empty()) {
+ BBInfo &MInfo = MBBInfoMap[&MF->front()];
+ for (RegSet::iterator
+ I = MInfo.vregsRequired.begin(), E = MInfo.vregsRequired.end(); I != E;
+ ++I)
+ report("Virtual register def doesn't dominate all uses.",
+ MRI->getVRegDef(*I));
+ }
+
if (LiveVars)
verifyLiveVariables();
if (LiveInts)
@@ -1065,33 +1129,43 @@ void MachineVerifier::verifyLiveIntervals() {
report("No instruction at def index", MF);
*OS << "Valno #" << VNI->id << " is defined at " << VNI->def
<< " in " << LI << '\n';
- } else if (!MI->modifiesRegister(LI.reg, TRI)) {
- report("Defining instruction does not modify register", MI);
- *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+ continue;
}
+ bool hasDef = false;
bool isEarlyClobber = false;
- if (MI) {
- for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end(); MOI != MOE; ++MOI) {
- if (MOI->isReg() && MOI->getReg() == LI.reg && MOI->isDef() &&
- MOI->isEarlyClobber()) {
- isEarlyClobber = true;
- break;
- }
+ for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
+ if (!MOI->isReg() || !MOI->isDef())
+ continue;
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ if (MOI->getReg() != LI.reg)
+ continue;
+ } else {
+ if (!TargetRegisterInfo::isPhysicalRegister(MOI->getReg()) ||
+ !TRI->regsOverlap(LI.reg, MOI->getReg()))
+ continue;
}
+ hasDef = true;
+ if (MOI->isEarlyClobber())
+ isEarlyClobber = true;
+ }
+
+ if (!hasDef) {
+ report("Defining instruction does not modify register", MI);
+ *OS << "Valno #" << VNI->id << " in " << LI << '\n';
}
// Early clobber defs begin at USE slots, but other defs must begin at
// DEF slots.
if (isEarlyClobber) {
- if (!VNI->def.isUse()) {
- report("Early clobber def must be at a USE slot", MF);
+ if (!VNI->def.isEarlyClobber()) {
+ report("Early clobber def must be at an early-clobber slot", MF);
*OS << "Valno #" << VNI->id << " is defined at " << VNI->def
<< " in " << LI << '\n';
}
- } else if (!VNI->def.isDef()) {
- report("Non-PHI, non-early clobber def must be at a DEF slot", MF);
+ } else if (!VNI->def.isRegister()) {
+ report("Non-PHI, non-early clobber def must be at a register slot",
+ MF);
*OS << "Valno #" << VNI->id << " is defined at " << VNI->def
<< " in " << LI << '\n';
}
@@ -1137,32 +1211,76 @@ void MachineVerifier::verifyLiveIntervals() {
*OS << " in " << LI << '\n';
continue;
}
- if (I->end != LiveInts->getMBBEndIdx(EndMBB)) {
- // The live segment is ending inside EndMBB
- const MachineInstr *MI =
- LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
- if (!MI) {
- report("Live segment doesn't end at a valid instruction", EndMBB);
+
+ // No more checks for live-out segments.
+ if (I->end == LiveInts->getMBBEndIdx(EndMBB))
+ continue;
+
+ // The live segment is ending inside EndMBB
+ const MachineInstr *MI =
+ LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
+ if (!MI) {
+ report("Live segment doesn't end at a valid instruction", EndMBB);
I->print(*OS);
*OS << " in " << LI << '\n' << "Basic block starts at "
- << MBBStartIdx << '\n';
- } else if (TargetRegisterInfo::isVirtualRegister(LI.reg) &&
- !MI->readsVirtualRegister(LI.reg)) {
- // A live range can end with either a redefinition, a kill flag on a
- // use, or a dead flag on a def.
- // FIXME: Should we check for each of these?
- bool hasDeadDef = false;
- for (MachineInstr::const_mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end(); MOI != MOE; ++MOI) {
- if (MOI->isReg() && MOI->getReg() == LI.reg && MOI->isDef() && MOI->isDead()) {
- hasDeadDef = true;
- break;
- }
- }
+ << MBBStartIdx << '\n';
+ continue;
+ }
+
+ // The block slot must refer to a basic block boundary.
+ if (I->end.isBlock()) {
+ report("Live segment ends at B slot of an instruction", MI);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+
+ if (I->end.isDead()) {
+ // Segment ends on the dead slot.
+ // That means there must be a dead def.
+ if (!SlotIndex::isSameInstr(I->start, I->end)) {
+ report("Live segment ending at dead slot spans instructions", MI);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+ }
+
+ // A live segment can only end at an early-clobber slot if it is being
+ // redefined by an early-clobber def.
+ if (I->end.isEarlyClobber()) {
+ if (I+1 == E || (I+1)->start != I->end) {
+ report("Live segment ending at early clobber slot must be "
+ "redefined by an EC def in the same instruction", MI);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+ }
+
+ // The following checks only apply to virtual registers. Physreg liveness
+ // is too weird to check.
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
+ // A live range can end with either a redefinition, a kill flag on a
+ // use, or a dead flag on a def.
+ bool hasRead = false;
+ bool hasDeadDef = false;
+ for (ConstMIBundleOperands MOI(MI); MOI.isValid(); ++MOI) {
+ if (!MOI->isReg() || MOI->getReg() != LI.reg)
+ continue;
+ if (MOI->readsReg())
+ hasRead = true;
+ if (MOI->isDef() && MOI->isDead())
+ hasDeadDef = true;
+ }
+ if (I->end.isDead()) {
if (!hasDeadDef) {
- report("Instruction killing live segment neither defines nor reads "
- "register", MI);
+ report("Instruction doesn't have a dead def operand", MI);
+ I->print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+ } else {
+ if (!hasRead) {
+ report("Instruction ending live range doesn't read the register",
+ MI);
I->print(*OS);
*OS << " in " << LI << '\n';
}
@@ -1192,8 +1310,8 @@ void MachineVerifier::verifyLiveIntervals() {
// Check that VNI is live-out of all predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
PE = MFI->pred_end(); PI != PE; ++PI) {
- SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI).getPrevSlot();
- const VNInfo *PVNI = LI.getVNInfoAt(PEnd);
+ SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI);
+ const VNInfo *PVNI = LI.getVNInfoBefore(PEnd);
if (VNI->isPHIDef() && VNI->def == LiveInts->getMBBStartIdx(MFI))
continue;
@@ -1201,7 +1319,7 @@ void MachineVerifier::verifyLiveIntervals() {
if (!PVNI) {
report("Register not marked live out of predecessor", *PI);
*OS << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
- << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live at "
+ << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live before "
<< PEnd << " in " << LI << '\n';
continue;
}
diff --git a/contrib/llvm/lib/CodeGen/ObjectCodeEmitter.cpp b/contrib/llvm/lib/CodeGen/ObjectCodeEmitter.cpp
deleted file mode 100644
index cf05275..0000000
--- a/contrib/llvm/lib/CodeGen/ObjectCodeEmitter.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- llvm/CodeGen/ObjectCodeEmitter.cpp -------------------- -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/BinaryObject.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineRelocation.h"
-#include "llvm/CodeGen/ObjectCodeEmitter.h"
-
-//===----------------------------------------------------------------------===//
-// ObjectCodeEmitter Implementation
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-ObjectCodeEmitter::ObjectCodeEmitter() : BO(0) {}
-ObjectCodeEmitter::ObjectCodeEmitter(BinaryObject *bo) : BO(bo) {}
-ObjectCodeEmitter::~ObjectCodeEmitter() {}
-
-/// setBinaryObject - set the BinaryObject we are writting to
-void ObjectCodeEmitter::setBinaryObject(BinaryObject *bo) { BO = bo; }
-
-/// emitByte - This callback is invoked when a byte needs to be
-/// written to the data stream, without buffer overflow testing.
-void ObjectCodeEmitter::emitByte(uint8_t B) {
- BO->emitByte(B);
-}
-
-/// emitWordLE - This callback is invoked when a 32-bit word needs to be
-/// written to the data stream in little-endian format.
-void ObjectCodeEmitter::emitWordLE(uint32_t W) {
- BO->emitWordLE(W);
-}
-
-/// emitWordBE - This callback is invoked when a 32-bit word needs to be
-/// written to the data stream in big-endian format.
-void ObjectCodeEmitter::emitWordBE(uint32_t W) {
- BO->emitWordBE(W);
-}
-
-/// emitDWordLE - This callback is invoked when a 64-bit word needs to be
-/// written to the data stream in little-endian format.
-void ObjectCodeEmitter::emitDWordLE(uint64_t W) {
- BO->emitDWordLE(W);
-}
-
-/// emitDWordBE - This callback is invoked when a 64-bit word needs to be
-/// written to the data stream in big-endian format.
-void ObjectCodeEmitter::emitDWordBE(uint64_t W) {
- BO->emitDWordBE(W);
-}
-
-/// emitAlignment - Align 'BO' to the necessary alignment boundary.
-void ObjectCodeEmitter::emitAlignment(unsigned Alignment /* 0 */,
- uint8_t fill /* 0 */) {
- BO->emitAlignment(Alignment, fill);
-}
-
-/// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
-/// written to the data stream.
-void ObjectCodeEmitter::emitULEB128Bytes(uint64_t Value) {
- BO->emitULEB128Bytes(Value);
-}
-
-/// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
-/// written to the data stream.
-void ObjectCodeEmitter::emitSLEB128Bytes(uint64_t Value) {
- BO->emitSLEB128Bytes(Value);
-}
-
-/// emitString - This callback is invoked when a String needs to be
-/// written to the data stream.
-void ObjectCodeEmitter::emitString(const std::string &String) {
- BO->emitString(String);
-}
-
-/// getCurrentPCValue - This returns the address that the next emitted byte
-/// will be output to.
-uintptr_t ObjectCodeEmitter::getCurrentPCValue() const {
- return BO->getCurrentPCOffset();
-}
-
-/// getCurrentPCOffset - Return the offset from the start of the emitted
-/// buffer that we are currently writing to.
-uintptr_t ObjectCodeEmitter::getCurrentPCOffset() const {
- return BO->getCurrentPCOffset();
-}
-
-/// addRelocation - Whenever a relocatable address is needed, it should be
-/// noted with this interface.
-void ObjectCodeEmitter::addRelocation(const MachineRelocation& relocation) {
- BO->addRelocation(relocation);
-}
-
-/// StartMachineBasicBlock - This should be called by the target when a new
-/// basic block is about to be emitted. This way the MCE knows where the
-/// start of the block is, and can implement getMachineBasicBlockAddress.
-void ObjectCodeEmitter::StartMachineBasicBlock(MachineBasicBlock *MBB) {
- if (MBBLocations.size() <= (unsigned)MBB->getNumber())
- MBBLocations.resize((MBB->getNumber()+1)*2);
- MBBLocations[MBB->getNumber()] = getCurrentPCOffset();
-}
-
-/// getMachineBasicBlockAddress - Return the address of the specified
-/// MachineBasicBlock, only usable after the label for the MBB has been
-/// emitted.
-uintptr_t
-ObjectCodeEmitter::getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
- assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
- MBBLocations[MBB->getNumber()] && "MBB not emitted!");
- return MBBLocations[MBB->getNumber()];
-}
-
-/// getJumpTableEntryAddress - Return the address of the jump table with index
-/// 'Index' in the function that last called initJumpTableInfo.
-uintptr_t ObjectCodeEmitter::getJumpTableEntryAddress(unsigned Index) const {
- assert(JTLocations.size() > Index && "JT not emitted!");
- return JTLocations[Index];
-}
-
-/// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
-/// the constant pool that was last emitted with the emitConstantPool method.
-uintptr_t ObjectCodeEmitter::getConstantPoolEntryAddress(unsigned Index) const {
- assert(CPLocations.size() > Index && "CP not emitted!");
- return CPLocations[Index];
-}
-
-/// getConstantPoolEntrySection - Return the section of the 'Index' entry in
-/// the constant pool that was last emitted with the emitConstantPool method.
-uintptr_t ObjectCodeEmitter::getConstantPoolEntrySection(unsigned Index) const {
- assert(CPSections.size() > Index && "CP not emitted!");
- return CPSections[Index];
-}
-
-} // end namespace llvm
-
diff --git a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
index c05be13..6da313e 100644
--- a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
+++ b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
@@ -56,11 +56,10 @@ namespace {
}
char OptimizePHIs::ID = 0;
+char &llvm::OptimizePHIsID = OptimizePHIs::ID;
INITIALIZE_PASS(OptimizePHIs, "opt-phis",
"Optimize machine instruction PHIs", false, false)
-FunctionPass *llvm::createOptimizePHIsPass() { return new OptimizePHIs(); }
-
bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) {
MRI = &Fn.getRegInfo();
TII = Fn.getTarget().getInstrInfo();
@@ -165,7 +164,11 @@ bool OptimizePHIs::OptimizeBB(MachineBasicBlock &MBB) {
InstrSet PHIsInCycle;
if (IsSingleValuePHICycle(MI, SingleValReg, PHIsInCycle) &&
SingleValReg != 0) {
- MRI->replaceRegWith(MI->getOperand(0).getReg(), SingleValReg);
+ unsigned OldReg = MI->getOperand(0).getReg();
+ if (!MRI->constrainRegClass(SingleValReg, MRI->getRegClass(OldReg)))
+ continue;
+
+ MRI->replaceRegWith(OldReg, SingleValReg);
MI->eraseFromParent();
++NumPHICycles;
Changed = true;
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.cpp b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
index 6994aa5..0ed4c34 100644
--- a/contrib/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
@@ -92,11 +92,15 @@ STATISTIC(NumCriticalEdgesSplit, "Number of critical edges split");
STATISTIC(NumReused, "Number of reused lowered phis");
char PHIElimination::ID = 0;
-INITIALIZE_PASS(PHIElimination, "phi-node-elimination",
- "Eliminate PHI nodes for register allocation", false, false)
-
char& llvm::PHIEliminationID = PHIElimination::ID;
+INITIALIZE_PASS_BEGIN(PHIElimination, "phi-node-elimination",
+ "Eliminate PHI nodes for register allocation",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
+INITIALIZE_PASS_END(PHIElimination, "phi-node-elimination",
+ "Eliminate PHI nodes for register allocation", false, false)
+
void PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<LiveVariables>();
AU.addPreserved<MachineDominatorTree>();
@@ -241,7 +245,6 @@ void PHIElimination::LowerAtomicPHINode(
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
// Increment use count of the newly created virtual register.
- VI.NumUses++;
LV->setPHIJoin(IncomingReg);
// When we are reusing the incoming register, it may already have been
@@ -410,7 +413,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
return false; // Quick exit for basic blocks without PHIs.
bool Changed = false;
- for (MachineBasicBlock::const_iterator BBI = MBB.begin(), BBE = MBB.end();
+ for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index 315aedd..53d1fcf 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -12,62 +12,617 @@
//
//===---------------------------------------------------------------------===//
-#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/PassManager.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
+static cl::opt<bool> DisablePostRA("disable-post-ra", cl::Hidden,
+ cl::desc("Disable Post Regalloc"));
+static cl::opt<bool> DisableBranchFold("disable-branch-fold", cl::Hidden,
+ cl::desc("Disable branch folding"));
+static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
+ cl::desc("Disable tail duplication"));
+static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
+ cl::desc("Disable pre-register allocation tail duplication"));
+static cl::opt<bool> EnableBlockPlacement("enable-block-placement",
+ cl::Hidden, cl::desc("Enable probability-driven block placement"));
+static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats",
+ cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
+static cl::opt<bool> DisableCodePlace("disable-code-place", cl::Hidden,
+ cl::desc("Disable code placement"));
+static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
+ cl::desc("Disable Stack Slot Coloring"));
+static cl::opt<bool> DisableMachineDCE("disable-machine-dce", cl::Hidden,
+ cl::desc("Disable Machine Dead Code Elimination"));
+static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
+ cl::desc("Disable Machine LICM"));
+static cl::opt<bool> DisableMachineCSE("disable-machine-cse", cl::Hidden,
+ cl::desc("Disable Machine Common Subexpression Elimination"));
+static cl::opt<cl::boolOrDefault>
+OptimizeRegAlloc("optimize-regalloc", cl::Hidden,
+ cl::desc("Enable optimized register allocation compilation path."));
+static cl::opt<cl::boolOrDefault>
+EnableMachineSched("enable-misched", cl::Hidden,
+ cl::desc("Enable the machine instruction scheduling pass."));
+static cl::opt<bool> EnableStrongPHIElim("strong-phi-elim", cl::Hidden,
+ cl::desc("Use strong PHI elimination."));
+static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
+ cl::Hidden,
+ cl::desc("Disable Machine LICM"));
+static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
+ cl::desc("Disable Machine Sinking"));
+static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
+ cl::desc("Disable Loop Strength Reduction Pass"));
+static cl::opt<bool> DisableCGP("disable-cgp", cl::Hidden,
+ cl::desc("Disable Codegen Prepare"));
+static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
+ cl::desc("Disable Copy Propagation pass"));
+static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
+ cl::desc("Print LLVM IR produced by the loop-reduce pass"));
+static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
+ cl::desc("Print LLVM IR input to isel pass"));
+static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
+ cl::desc("Dump garbage collector data"));
+static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
+ cl::desc("Verify generated machine code"),
+ cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=NULL));
+
+/// Allow standard passes to be disabled by command line options. This supports
+/// simple binary flags that either suppress the pass or do nothing.
+/// i.e. -disable-mypass=false has no effect.
+/// These should be converted to boolOrDefault in order to use applyOverride.
+static AnalysisID applyDisable(AnalysisID ID, bool Override) {
+ if (Override)
+ return &NoPassID;
+ return ID;
+}
+
+/// Allow Pass selection to be overriden by command line options. This supports
+/// flags with ternary conditions. TargetID is passed through by default. The
+/// pass is suppressed when the option is false. When the option is true, the
+/// StandardID is selected if the target provides no default.
+static AnalysisID applyOverride(AnalysisID TargetID, cl::boolOrDefault Override,
+ AnalysisID StandardID) {
+ switch (Override) {
+ case cl::BOU_UNSET:
+ return TargetID;
+ case cl::BOU_TRUE:
+ if (TargetID != &NoPassID)
+ return TargetID;
+ if (StandardID == &NoPassID)
+ report_fatal_error("Target cannot enable pass");
+ return StandardID;
+ case cl::BOU_FALSE:
+ return &NoPassID;
+ }
+ llvm_unreachable("Invalid command line option state");
+}
+
+/// Allow standard passes to be disabled by the command line, regardless of who
+/// is adding the pass.
+///
+/// StandardID is the pass identified in the standard pass pipeline and provided
+/// to addPass(). It may be a target-specific ID in the case that the target
+/// directly adds its own pass, but in that case we harmlessly fall through.
+///
+/// TargetID is the pass that the target has configured to override StandardID.
+///
+/// StandardID may be a pseudo ID. In that case TargetID is the name of the real
+/// pass to run. This allows multiple options to control a single pass depending
+/// on where in the pipeline that pass is added.
+static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) {
+ if (StandardID == &PostRASchedulerID)
+ return applyDisable(TargetID, DisablePostRA);
+
+ if (StandardID == &BranchFolderPassID)
+ return applyDisable(TargetID, DisableBranchFold);
+
+ if (StandardID == &TailDuplicateID)
+ return applyDisable(TargetID, DisableTailDuplicate);
+
+ if (StandardID == &TargetPassConfig::EarlyTailDuplicateID)
+ return applyDisable(TargetID, DisableEarlyTailDup);
+
+ if (StandardID == &MachineBlockPlacementID)
+ return applyDisable(TargetID, DisableCodePlace);
+
+ if (StandardID == &CodePlacementOptID)
+ return applyDisable(TargetID, DisableCodePlace);
+
+ if (StandardID == &StackSlotColoringID)
+ return applyDisable(TargetID, DisableSSC);
+
+ if (StandardID == &DeadMachineInstructionElimID)
+ return applyDisable(TargetID, DisableMachineDCE);
+
+ if (StandardID == &MachineLICMID)
+ return applyDisable(TargetID, DisableMachineLICM);
+
+ if (StandardID == &MachineCSEID)
+ return applyDisable(TargetID, DisableMachineCSE);
+
+ if (StandardID == &MachineSchedulerID)
+ return applyOverride(TargetID, EnableMachineSched, StandardID);
+
+ if (StandardID == &TargetPassConfig::PostRAMachineLICMID)
+ return applyDisable(TargetID, DisablePostRAMachineLICM);
+
+ if (StandardID == &MachineSinkingID)
+ return applyDisable(TargetID, DisableMachineSink);
+
+ if (StandardID == &MachineCopyPropagationID)
+ return applyDisable(TargetID, DisableCopyProp);
+
+ return TargetID;
+}
+
//===---------------------------------------------------------------------===//
+/// TargetPassConfig
+//===---------------------------------------------------------------------===//
+
+INITIALIZE_PASS(TargetPassConfig, "targetpassconfig",
+ "Target Pass Configuration", false, false)
+char TargetPassConfig::ID = 0;
+
+static char NoPassIDAnchor = 0;
+char &llvm::NoPassID = NoPassIDAnchor;
+
+// Pseudo Pass IDs.
+char TargetPassConfig::EarlyTailDuplicateID = 0;
+char TargetPassConfig::PostRAMachineLICMID = 0;
+
+namespace llvm {
+class PassConfigImpl {
+public:
+ // List of passes explicitly substituted by this target. Normally this is
+ // empty, but it is a convenient way to suppress or replace specific passes
+ // that are part of a standard pass pipeline without overridding the entire
+ // pipeline. This mechanism allows target options to inherit a standard pass's
+ // user interface. For example, a target may disable a standard pass by
+ // default by substituting NoPass, and the user may still enable that standard
+ // pass with an explicit command line option.
+ DenseMap<AnalysisID,AnalysisID> TargetPasses;
+};
+} // namespace llvm
+
+// Out of line virtual method.
+TargetPassConfig::~TargetPassConfig() {
+ delete Impl;
+}
+
+// Out of line constructor provides default values for pass options and
+// registers all common codegen passes.
+TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
+ : ImmutablePass(ID), TM(tm), PM(pm), Impl(0), Initialized(false),
+ DisableVerify(false),
+ EnableTailMerge(true) {
+
+ Impl = new PassConfigImpl();
+
+ // Register all target independent codegen passes to activate their PassIDs,
+ // including this pass itself.
+ initializeCodeGen(*PassRegistry::getPassRegistry());
+
+ // Substitute Pseudo Pass IDs for real ones.
+ substitutePass(EarlyTailDuplicateID, TailDuplicateID);
+ substitutePass(PostRAMachineLICMID, MachineLICMID);
+
+ // Temporarily disable experimental passes.
+ substitutePass(MachineSchedulerID, NoPassID);
+}
+
+/// createPassConfig - Create a pass configuration object to be used by
+/// addPassToEmitX methods for generating a pipeline of CodeGen passes.
+///
+/// Targets may override this to extend TargetPassConfig.
+TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new TargetPassConfig(this, PM);
+}
+
+TargetPassConfig::TargetPassConfig()
+ : ImmutablePass(ID), PM(*(PassManagerBase*)0) {
+ llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
+}
+
+// Helper to verify the analysis is really immutable.
+void TargetPassConfig::setOpt(bool &Opt, bool Val) {
+ assert(!Initialized && "PassConfig is immutable");
+ Opt = Val;
+}
+
+void TargetPassConfig::substitutePass(char &StandardID, char &TargetID) {
+ Impl->TargetPasses[&StandardID] = &TargetID;
+}
+
+AnalysisID TargetPassConfig::getPassSubstitution(AnalysisID ID) const {
+ DenseMap<AnalysisID, AnalysisID>::const_iterator
+ I = Impl->TargetPasses.find(ID);
+ if (I == Impl->TargetPasses.end())
+ return ID;
+ return I->second;
+}
+
+/// Add a CodeGen pass at this point in the pipeline after checking for target
+/// and command line overrides.
+AnalysisID TargetPassConfig::addPass(char &ID) {
+ assert(!Initialized && "PassConfig is immutable");
+
+ AnalysisID TargetID = getPassSubstitution(&ID);
+ AnalysisID FinalID = overridePass(&ID, TargetID);
+ if (FinalID == &NoPassID)
+ return FinalID;
+
+ Pass *P = Pass::createPass(FinalID);
+ if (!P)
+ llvm_unreachable("Pass ID not registered");
+ PM.add(P);
+ return FinalID;
+}
+
+void TargetPassConfig::printAndVerify(const char *Banner) const {
+ if (TM->shouldPrintMachineCode())
+ PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
+
+ if (VerifyMachineCode)
+ PM.add(createMachineVerifierPass(Banner));
+}
+
+/// Add common target configurable passes that perform LLVM IR to IR transforms
+/// following machine independent optimization.
+void TargetPassConfig::addIRPasses() {
+ // Basic AliasAnalysis support.
+ // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
+ // BasicAliasAnalysis wins if they disagree. This is intended to help
+ // support "obvious" type-punning idioms.
+ PM.add(createTypeBasedAliasAnalysisPass());
+ PM.add(createBasicAliasAnalysisPass());
+
+ // Before running any passes, run the verifier to determine if the input
+ // coming from the front-end and/or optimizer is valid.
+ if (!DisableVerify)
+ PM.add(createVerifierPass());
+
+ // Run loop strength reduction before anything else.
+ if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
+ PM.add(createLoopStrengthReducePass(getTargetLowering()));
+ if (PrintLSR)
+ PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
+ }
+
+ PM.add(createGCLoweringPass());
+
+ // Make sure that no unreachable blocks are instruction selected.
+ PM.add(createUnreachableBlockEliminationPass());
+}
+
+/// Add common passes that perform LLVM IR to IR transforms in preparation for
+/// instruction selection.
+void TargetPassConfig::addISelPrepare() {
+ if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
+ PM.add(createCodeGenPreparePass(getTargetLowering()));
+
+ PM.add(createStackProtectorPass(getTargetLowering()));
+
+ addPreISel();
+
+ if (PrintISelInput)
+ PM.add(createPrintFunctionPass("\n\n"
+ "*** Final LLVM Code input to ISel ***\n",
+ &dbgs()));
+
+ // All passes which modify the LLVM IR are now complete; run the verifier
+ // to ensure that the IR is valid.
+ if (!DisableVerify)
+ PM.add(createVerifierPass());
+}
+
+/// Add the complete set of target-independent postISel code generator passes.
///
-/// RegisterRegAlloc class - Track the registration of register allocators.
+/// This can be read as the standard order of major LLVM CodeGen stages. Stages
+/// with nontrivial configuration or multiple passes are broken out below in
+/// add%Stage routines.
///
+/// Any TargetPassConfig::addXX routine may be overriden by the Target. The
+/// addPre/Post methods with empty header implementations allow injecting
+/// target-specific fixups just before or after major stages. Additionally,
+/// targets have the flexibility to change pass order within a stage by
+/// overriding default implementation of add%Stage routines below. Each
+/// technique has maintainability tradeoffs because alternate pass orders are
+/// not well supported. addPre/Post works better if the target pass is easily
+/// tied to a common pass. But if it has subtle dependencies on multiple passes,
+/// the target should override the stage instead.
+///
+/// TODO: We could use a single addPre/Post(ID) hook to allow pass injection
+/// before/after any target-independent pass. But it's currently overkill.
+void TargetPassConfig::addMachinePasses() {
+ // Print the instruction selected machine code...
+ printAndVerify("After Instruction Selection");
+
+ // Expand pseudo-instructions emitted by ISel.
+ addPass(ExpandISelPseudosID);
+
+ // Add passes that optimize machine instructions in SSA form.
+ if (getOptLevel() != CodeGenOpt::None) {
+ addMachineSSAOptimization();
+ }
+ else {
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ addPass(LocalStackSlotAllocationID);
+ }
+
+ // Run pre-ra passes.
+ if (addPreRegAlloc())
+ printAndVerify("After PreRegAlloc passes");
+
+ // Run register allocation and passes that are tightly coupled with it,
+ // including phi elimination and scheduling.
+ if (getOptimizeRegAlloc())
+ addOptimizedRegAlloc(createRegAllocPass(true));
+ else
+ addFastRegAlloc(createRegAllocPass(false));
+
+ // Run post-ra passes.
+ if (addPostRegAlloc())
+ printAndVerify("After PostRegAlloc passes");
+
+ // Insert prolog/epilog code. Eliminate abstract frame index references...
+ addPass(PrologEpilogCodeInserterID);
+ printAndVerify("After PrologEpilogCodeInserter");
+
+ /// Add passes that optimize machine instructions after register allocation.
+ if (getOptLevel() != CodeGenOpt::None)
+ addMachineLateOptimization();
+
+ // Expand pseudo instructions before second scheduling pass.
+ addPass(ExpandPostRAPseudosID);
+ printAndVerify("After ExpandPostRAPseudos");
+
+ // Run pre-sched2 passes.
+ if (addPreSched2())
+ printAndVerify("After PreSched2 passes");
+
+ // Second pass scheduler.
+ if (getOptLevel() != CodeGenOpt::None) {
+ addPass(PostRASchedulerID);
+ printAndVerify("After PostRAScheduler");
+ }
+
+ // GC
+ addPass(GCMachineCodeAnalysisID);
+ if (PrintGCInfo)
+ PM.add(createGCInfoPrinter(dbgs()));
+
+ // Basic block placement.
+ if (getOptLevel() != CodeGenOpt::None)
+ addBlockPlacement();
+
+ if (addPreEmitPass())
+ printAndVerify("After PreEmit passes");
+}
+
+/// Add passes that optimize machine instructions in SSA form.
+void TargetPassConfig::addMachineSSAOptimization() {
+ // Pre-ra tail duplication.
+ if (addPass(EarlyTailDuplicateID) != &NoPassID)
+ printAndVerify("After Pre-RegAlloc TailDuplicate");
+
+ // Optimize PHIs before DCE: removing dead PHI cycles may make more
+ // instructions dead.
+ addPass(OptimizePHIsID);
+
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ addPass(LocalStackSlotAllocationID);
+
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ addPass(DeadMachineInstructionElimID);
+ printAndVerify("After codegen DCE pass");
+
+ addPass(MachineLICMID);
+ addPass(MachineCSEID);
+ addPass(MachineSinkingID);
+ printAndVerify("After Machine LICM, CSE and Sinking passes");
+
+ addPass(PeepholeOptimizerID);
+ printAndVerify("After codegen peephole optimization pass");
+}
+
+//===---------------------------------------------------------------------===//
+/// Register Allocation Pass Configuration
//===---------------------------------------------------------------------===//
+
+bool TargetPassConfig::getOptimizeRegAlloc() const {
+ switch (OptimizeRegAlloc) {
+ case cl::BOU_UNSET: return getOptLevel() != CodeGenOpt::None;
+ case cl::BOU_TRUE: return true;
+ case cl::BOU_FALSE: return false;
+ }
+ llvm_unreachable("Invalid optimize-regalloc state");
+}
+
+/// RegisterRegAlloc's global Registry tracks allocator registration.
MachinePassRegistry RegisterRegAlloc::Registry;
-static FunctionPass *createDefaultRegisterAllocator() { return 0; }
+/// A dummy default pass factory indicates whether the register allocator is
+/// overridden on the command line.
+static FunctionPass *useDefaultRegisterAllocator() { return 0; }
static RegisterRegAlloc
defaultRegAlloc("default",
"pick register allocator based on -O option",
- createDefaultRegisterAllocator);
+ useDefaultRegisterAllocator);
-//===---------------------------------------------------------------------===//
-///
-/// RegAlloc command line options.
-///
-//===---------------------------------------------------------------------===//
+/// -regalloc=... command line option.
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
RegisterPassParser<RegisterRegAlloc> >
RegAlloc("regalloc",
- cl::init(&createDefaultRegisterAllocator),
+ cl::init(&useDefaultRegisterAllocator),
cl::desc("Register allocator to use"));
-//===---------------------------------------------------------------------===//
+/// Instantiate the default register allocator pass for this target for either
+/// the optimized or unoptimized allocation path. This will be added to the pass
+/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
+/// in the optimized case.
///
-/// createRegisterAllocator - choose the appropriate register allocator.
+/// A target that uses the standard regalloc pass order for fast or optimized
+/// allocation may still override this for per-target regalloc
+/// selection. But -regalloc=... always takes precedence.
+FunctionPass *TargetPassConfig::createTargetRegisterAllocator(bool Optimized) {
+ if (Optimized)
+ return createGreedyRegisterAllocator();
+ else
+ return createFastRegisterAllocator();
+}
+
+/// Find and instantiate the register allocation pass requested by this target
+/// at the current optimization level. Different register allocators are
+/// defined as separate passes because they may require different analysis.
///
-//===---------------------------------------------------------------------===//
-FunctionPass *llvm::createRegisterAllocator(CodeGenOpt::Level OptLevel) {
+/// This helper ensures that the regalloc= option is always available,
+/// even for targets that override the default allocator.
+///
+/// FIXME: When MachinePassRegistry register pass IDs instead of function ptrs,
+/// this can be folded into addPass.
+FunctionPass *TargetPassConfig::createRegAllocPass(bool Optimized) {
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
+ // Initialize the global default.
if (!Ctor) {
Ctor = RegAlloc;
RegisterRegAlloc::setDefault(RegAlloc);
}
+ if (Ctor != useDefaultRegisterAllocator)
+ return Ctor();
- // This forces linking of the linear scan register allocator,
- // so -regalloc=linearscan still works in clang.
- if (Ctor == createLinearScanRegisterAllocator)
- return createLinearScanRegisterAllocator();
+ // With no -regalloc= override, ask the target for a regalloc pass.
+ return createTargetRegisterAllocator(Optimized);
+}
- if (Ctor != createDefaultRegisterAllocator)
- return Ctor();
+/// Add the minimum set of target-independent passes that are required for
+/// register allocation. No coalescing or scheduling.
+void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
+ addPass(PHIEliminationID);
+ addPass(TwoAddressInstructionPassID);
- // When the 'default' allocator is requested, pick one based on OptLevel.
- switch (OptLevel) {
- case CodeGenOpt::None:
- return createFastRegisterAllocator();
- default:
- return createGreedyRegisterAllocator();
+ PM.add(RegAllocPass);
+ printAndVerify("After Register Allocation");
+}
+
+/// Add standard target-independent passes that are tightly coupled with
+/// optimized register allocation, including coalescing, machine instruction
+/// scheduling, and register allocation itself.
+void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
+ // LiveVariables currently requires pure SSA form.
+ //
+ // FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
+ // LiveVariables can be removed completely, and LiveIntervals can be directly
+ // computed. (We still either need to regenerate kill flags after regalloc, or
+ // preferably fix the scavenger to not depend on them).
+ addPass(LiveVariablesID);
+
+ // Add passes that move from transformed SSA into conventional SSA. This is a
+ // "copy coalescing" problem.
+ //
+ if (!EnableStrongPHIElim) {
+ // Edge splitting is smarter with machine loop info.
+ addPass(MachineLoopInfoID);
+ addPass(PHIEliminationID);
+ }
+ addPass(TwoAddressInstructionPassID);
+
+ // FIXME: Either remove this pass completely, or fix it so that it works on
+ // SSA form. We could modify LiveIntervals to be independent of this pass, But
+ // it would be even better to simply eliminate *all* IMPLICIT_DEFs before
+ // leaving SSA.
+ addPass(ProcessImplicitDefsID);
+
+ if (EnableStrongPHIElim)
+ addPass(StrongPHIEliminationID);
+
+ addPass(RegisterCoalescerID);
+
+ // PreRA instruction scheduling.
+ if (addPass(MachineSchedulerID) != &NoPassID)
+ printAndVerify("After Machine Scheduling");
+
+ // Add the selected register allocation pass.
+ PM.add(RegAllocPass);
+ printAndVerify("After Register Allocation");
+
+ // FinalizeRegAlloc is convenient until MachineInstrBundles is more mature,
+ // but eventually, all users of it should probably be moved to addPostRA and
+ // it can go away. Currently, it's the intended place for targets to run
+ // FinalizeMachineBundles, because passes other than MachineScheduling an
+ // RegAlloc itself may not be aware of bundles.
+ if (addFinalizeRegAlloc())
+ printAndVerify("After RegAlloc finalization");
+
+ // Perform stack slot coloring and post-ra machine LICM.
+ //
+ // FIXME: Re-enable coloring with register when it's capable of adding
+ // kill markers.
+ addPass(StackSlotColoringID);
+
+ // Run post-ra machine LICM to hoist reloads / remats.
+ //
+ // FIXME: can this move into MachineLateOptimization?
+ addPass(PostRAMachineLICMID);
+
+ printAndVerify("After StackSlotColoring and postra Machine LICM");
+}
+
+//===---------------------------------------------------------------------===//
+/// Post RegAlloc Pass Configuration
+//===---------------------------------------------------------------------===//
+
+/// Add passes that optimize machine instructions after register allocation.
+void TargetPassConfig::addMachineLateOptimization() {
+ // Branch folding must be run after regalloc and prolog/epilog insertion.
+ if (addPass(BranchFolderPassID) != &NoPassID)
+ printAndVerify("After BranchFolding");
+
+ // Tail duplication.
+ if (addPass(TailDuplicateID) != &NoPassID)
+ printAndVerify("After TailDuplicate");
+
+ // Copy propagation.
+ if (addPass(MachineCopyPropagationID) != &NoPassID)
+ printAndVerify("After copy propagation pass");
+}
+
+/// Add standard basic block placement passes.
+void TargetPassConfig::addBlockPlacement() {
+ AnalysisID ID = &NoPassID;
+ if (EnableBlockPlacement) {
+ // MachineBlockPlacement is an experimental pass which is disabled by
+ // default currently. Eventually it should subsume CodePlacementOpt, so
+ // when enabled, the other is disabled.
+ ID = addPass(MachineBlockPlacementID);
+ } else {
+ ID = addPass(CodePlacementOptID);
+ }
+ if (ID != &NoPassID) {
+ // Run a separate pass to collect block placement statistics.
+ if (EnableBlockPlacementStats)
+ addPass(MachineBlockPlacementStatsID);
+
+ printAndVerify("After machine block placement.");
}
}
diff --git a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
index bbc7ce2..9c5c029 100644
--- a/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -39,7 +39,7 @@
// =>
// v1 = bitcast v0
// = v0
-//
+//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "peephole-opt"
@@ -68,7 +68,7 @@ DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
STATISTIC(NumReuse, "Number of extension results reused");
STATISTIC(NumBitcasts, "Number of bitcasts eliminated");
STATISTIC(NumCmps, "Number of compares eliminated");
-STATISTIC(NumImmFold, "Number of move immediate foled");
+STATISTIC(NumImmFold, "Number of move immediate folded");
namespace {
class PeepholeOptimizer : public MachineFunctionPass {
@@ -109,22 +109,19 @@ namespace {
}
char PeepholeOptimizer::ID = 0;
+char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
"Peephole Optimizations", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
"Peephole Optimizations", false, false)
-FunctionPass *llvm::createPeepholeOptimizerPass() {
- return new PeepholeOptimizer();
-}
-
/// OptimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
-///
+///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
@@ -134,7 +131,7 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
unsigned SrcReg, DstReg, SubIdx;
if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
return false;
-
+
if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
@@ -240,6 +237,10 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
if (PHIBBs.count(UseMBB))
continue;
+ // About to add uses of DstReg, clear DstReg's kill flags.
+ if (!Changed)
+ MRI->clearKillFlags(DstReg);
+
unsigned NewVR = MRI->createVirtualRegister(RC);
BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
TII->get(TargetOpcode::COPY), NewVR)
@@ -292,7 +293,7 @@ bool PeepholeOptimizer::OptimizeBitcastInstr(MachineInstr *MI,
assert(Def && Src && "Malformed bitcast instruction!");
MachineInstr *DefMI = MRI->getVRegDef(Src);
- if (!DefMI || !DefMI->getDesc().isBitcast())
+ if (!DefMI || !DefMI->isBitcast())
return false;
unsigned SrcSrc = 0;
@@ -353,7 +354,7 @@ bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
SmallSet<unsigned, 4> &ImmDefRegs,
DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isMoveImmediate())
+ if (!MI->isMoveImmediate())
return false;
if (MCID.getNumDefs() != 1)
return false;
@@ -363,7 +364,7 @@ bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
ImmDefRegs.insert(Reg);
return true;
}
-
+
return false;
}
@@ -395,7 +396,7 @@ bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
if (DisablePeephole)
return false;
-
+
TM = &MF.getTarget();
TII = TM->getInstrInfo();
MRI = &MF.getRegInfo();
@@ -408,7 +409,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
DenseMap<unsigned, MachineInstr*> ImmDefMIs;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
MachineBasicBlock *MBB = &*I;
-
+
bool SeenMoveImm = false;
LocalMIs.clear();
ImmDefRegs.clear();
@@ -428,17 +429,15 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
continue;
}
- const MCInstrDesc &MCID = MI->getDesc();
-
- if (MCID.isBitcast()) {
+ if (MI->isBitcast()) {
if (OptimizeBitcastInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
Changed = true;
MII = First ? I->begin() : llvm::next(PMII);
continue;
- }
- } else if (MCID.isCompare()) {
+ }
+ } else if (MI->isCompare()) {
if (OptimizeCmpInstr(MI, MBB)) {
// MI is deleted.
LocalMIs.erase(MI);
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index c73e877..24d3e5a 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -23,7 +23,6 @@
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
#include "RegisterClassInfo.h"
-#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
@@ -32,6 +31,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetLowering.h"
@@ -45,7 +45,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
-#include <set>
using namespace llvm;
STATISTIC(NumNoops, "Number of noops inserted");
@@ -82,16 +81,15 @@ namespace {
AliasAnalysis *AA;
const TargetInstrInfo *TII;
RegisterClassInfo RegClassInfo;
- CodeGenOpt::Level OptLevel;
public:
static char ID;
- PostRAScheduler(CodeGenOpt::Level ol) :
- MachineFunctionPass(ID), OptLevel(ol) {}
+ PostRAScheduler() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
+ AU.addRequired<TargetPassConfig>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
@@ -99,10 +97,6 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
- return "Post RA top-down list latency scheduler";
- }
-
bool runOnMachineFunction(MachineFunction &Fn);
};
char PostRAScheduler::ID = 0;
@@ -130,36 +124,49 @@ namespace {
/// AA - AliasAnalysis for making memory reference queries.
AliasAnalysis *AA;
- /// KillIndices - The index of the most recent kill (proceding bottom-up),
- /// or ~0u if the register is not live.
- std::vector<unsigned> KillIndices;
+ /// LiveRegs - true if the register is live.
+ BitVector LiveRegs;
+
+ /// The schedule. Null SUnit*'s represent noop instructions.
+ std::vector<SUnit*> Sequence;
public:
SchedulePostRATDList(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
AliasAnalysis *AA, const RegisterClassInfo&,
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
- SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs);
+ SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs);
~SchedulePostRATDList();
- /// StartBlock - Initialize register live-range state for scheduling in
+ /// startBlock - Initialize register live-range state for scheduling in
/// this block.
///
- void StartBlock(MachineBasicBlock *BB);
+ void startBlock(MachineBasicBlock *BB);
+
+ /// Initialize the scheduler state for the next scheduling region.
+ virtual void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount);
+
+ /// Notify that the scheduler has finished scheduling the current region.
+ virtual void exitRegion();
/// Schedule - Schedule the instruction range using list scheduling.
///
- void Schedule();
+ void schedule();
+
+ void EmitSchedule();
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
void Observe(MachineInstr *MI, unsigned Count);
- /// FinishBlock - Clean up register live-range state.
+ /// finishBlock - Clean up register live-range state.
///
- void FinishBlock();
+ void finishBlock();
/// FixupKills - Fix register kill flags that have been made
/// invalid due to scheduling
@@ -177,16 +184,23 @@ namespace {
// adjustments may be made to the instruction if necessary. Return
// true if the operand has been deleted, false if not.
bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
+
+ void dumpSchedule() const;
};
}
+char &llvm::PostRASchedulerID = PostRAScheduler::ID;
+
+INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
+ "Post RA top-down list latency scheduler", false, false)
+
SchedulePostRATDList::SchedulePostRATDList(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
AliasAnalysis *AA, const RegisterClassInfo &RCI,
TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
- SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs)
- : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), AA(AA),
- KillIndices(TRI->getNumRegs())
+ SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs)
+ : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA),
+ LiveRegs(TRI->getNumRegs())
{
const TargetMachine &TM = MF.getTarget();
const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
@@ -204,16 +218,48 @@ SchedulePostRATDList::~SchedulePostRATDList() {
delete AntiDepBreak;
}
+/// Initialize state associated with the next scheduling region.
+void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount) {
+ ScheduleDAGInstrs::enterRegion(bb, begin, end, endcount);
+ Sequence.clear();
+}
+
+/// Print the schedule before exiting the region.
+void SchedulePostRATDList::exitRegion() {
+ DEBUG({
+ dbgs() << "*** Final schedule ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
+ ScheduleDAGInstrs::exitRegion();
+}
+
+/// dumpSchedule - dump the scheduled Sequence.
+void SchedulePostRATDList::dumpSchedule() const {
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ if (SUnit *SU = Sequence[i])
+ SU->dump(this);
+ else
+ dbgs() << "**** NOOP ****\n";
+ }
+}
+
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
TII = Fn.getTarget().getInstrInfo();
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
+ TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
+
RegClassInfo.runOnMachineFunction(Fn);
// Check for explicit enable/disable of post-ra scheduling.
- TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = TargetSubtargetInfo::ANTIDEP_NONE;
- SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
+ TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
+ TargetSubtargetInfo::ANTIDEP_NONE;
+ SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
if (EnablePostRAScheduler.getPosition() > 0) {
if (!EnablePostRAScheduler)
return false;
@@ -221,7 +267,8 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// Check that post-RA scheduling is enabled for this target.
// This may upgrade the AntiDepMode.
const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
- if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
+ if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode,
+ CriticalPathRCs))
return false;
}
@@ -248,13 +295,13 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
static int bbcnt = 0;
if (bbcnt++ % DebugDiv != DebugMod)
continue;
- dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
- ":BB#" << MBB->getNumber() << " ***\n";
+ dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName()
+ << ":BB#" << MBB->getNumber() << " ***\n";
}
#endif
// Initialize register live-range state for scheduling in this block.
- Scheduler.StartBlock(MBB);
+ Scheduler.startBlock(MBB);
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
@@ -262,8 +309,13 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
MachineInstr *MI = llvm::prior(I);
- if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
- Scheduler.Run(MBB, I, Current, CurrentCount);
+ // Calls are not scheduling boundaries before register allocation, but
+ // post-ra we don't gain anything by scheduling across calls since we
+ // don't need to worry about register pressure.
+ if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
+ Scheduler.enterRegion(MBB, I, Current, CurrentCount);
+ Scheduler.schedule();
+ Scheduler.exitRegion();
Scheduler.EmitSchedule();
Current = MI;
CurrentCount = Count - 1;
@@ -271,15 +323,19 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
}
I = MI;
--Count;
+ if (MI->isBundle())
+ Count -= MI->getBundleSize();
}
assert(Count == 0 && "Instruction count mismatch!");
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
- Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
+ Scheduler.enterRegion(MBB, MBB->begin(), Current, CurrentCount);
+ Scheduler.schedule();
+ Scheduler.exitRegion();
Scheduler.EmitSchedule();
// Clean up register live-range state.
- Scheduler.FinishBlock();
+ Scheduler.finishBlock();
// Update register kills
Scheduler.FixupKills(MBB);
@@ -291,9 +347,9 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
-void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
+void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
// Call the superclass.
- ScheduleDAGInstrs::StartBlock(BB);
+ ScheduleDAGInstrs::startBlock(BB);
// Reset the hazard recognizer and anti-dep breaker.
HazardRec->Reset();
@@ -303,14 +359,14 @@ void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
/// Schedule - Schedule the instruction range using list scheduling.
///
-void SchedulePostRATDList::Schedule() {
+void SchedulePostRATDList::schedule() {
// Build the scheduling graph.
- BuildSchedGraph(AA);
+ buildSchedGraph(AA);
if (AntiDepBreak != NULL) {
unsigned Broken =
- AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
- InsertPosIndex, DbgValues);
+ AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
+ EndIndex, DbgValues);
if (Broken != 0) {
// We made changes. Update the dependency graph.
@@ -319,11 +375,8 @@ void SchedulePostRATDList::Schedule() {
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
- SUnits.clear();
- Sequence.clear();
- EntrySU = SUnit();
- ExitSU = SUnit();
- BuildSchedGraph(AA);
+ ScheduleDAG::clearDAG();
+ buildSchedGraph(AA);
NumFixedAnti += Broken;
}
@@ -343,38 +396,36 @@ void SchedulePostRATDList::Schedule() {
///
void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
if (AntiDepBreak != NULL)
- AntiDepBreak->Observe(MI, Count, InsertPosIndex);
+ AntiDepBreak->Observe(MI, Count, EndIndex);
}
/// FinishBlock - Clean up register live-range state.
///
-void SchedulePostRATDList::FinishBlock() {
+void SchedulePostRATDList::finishBlock() {
if (AntiDepBreak != NULL)
AntiDepBreak->FinishBlock();
// Call the superclass.
- ScheduleDAGInstrs::FinishBlock();
+ ScheduleDAGInstrs::finishBlock();
}
/// StartBlockForKills - Initialize register live-range state for updating kills
///
void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
- // Initialize the indices to indicate that no registers are live.
- for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
- KillIndices[i] = ~0u;
+ // Start with no live registers.
+ LiveRegs.reset();
// Determine the live-out physregs for this block.
- if (!BB->empty() && BB->back().getDesc().isReturn()) {
+ if (!BB->empty() && BB->back().isReturn()) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
unsigned Reg = *I;
- KillIndices[Reg] = BB->size();
+ LiveRegs.set(Reg);
// Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- KillIndices[*Subreg] = BB->size();
- }
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ LiveRegs.set(*Subreg);
}
}
else {
@@ -384,12 +435,11 @@ void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
- KillIndices[Reg] = BB->size();
+ LiveRegs.set(Reg);
// Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- KillIndices[*Subreg] = BB->size();
- }
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ LiveRegs.set(*Subreg);
}
}
}
@@ -404,7 +454,7 @@ bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
}
// If MO itself is live, clear the kill flag...
- if (KillIndices[MO.getReg()] != ~0u) {
+ if (LiveRegs.test(MO.getReg())) {
MO.setIsKill(false);
return false;
}
@@ -414,9 +464,9 @@ bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
MO.setIsKill(false);
bool AllDead = true;
const unsigned SuperReg = MO.getReg();
- for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(SuperReg);
*Subreg; ++Subreg) {
- if (KillIndices[*Subreg] != ~0u) {
+ if (LiveRegs.test(*Subreg)) {
MI->addOperand(MachineOperand::CreateReg(*Subreg,
true /*IsDef*/,
true /*IsImp*/,
@@ -437,7 +487,7 @@ bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
- std::set<unsigned> killedRegs;
+ BitVector killedRegs(TRI->getNumRegs());
BitVector ReservedRegs = TRI->getReservedRegs(MF);
StartBlockForKills(MBB);
@@ -455,6 +505,8 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
// are completely defined.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ LiveRegs.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
@@ -462,19 +514,18 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
- KillIndices[Reg] = ~0u;
+ LiveRegs.reset(Reg);
// Repeat for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- KillIndices[*Subreg] = ~0u;
- }
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ LiveRegs.reset(*Subreg);
}
// Examine all used registers and set/clear kill flag. When a
// register is used multiple times we only set the kill flag on
// the first use.
- killedRegs.clear();
+ killedRegs.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse()) continue;
@@ -482,12 +533,12 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
bool kill = false;
- if (killedRegs.find(Reg) == killedRegs.end()) {
+ if (!killedRegs.test(Reg)) {
kill = true;
// A register is not killed if any subregs are live...
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
- if (KillIndices[*Subreg] != ~0u) {
+ if (LiveRegs.test(*Subreg)) {
kill = false;
break;
}
@@ -496,7 +547,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
// If subreg is not live, then register is killed if it became
// live in this instruction
if (kill)
- kill = (KillIndices[Reg] == ~0u);
+ kill = !LiveRegs.test(Reg);
}
if (MO.isKill() != kill) {
@@ -506,7 +557,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
DEBUG(MI->dump());
}
- killedRegs.insert(Reg);
+ killedRegs.set(Reg);
}
// Mark any used register (that is not using undef) and subregs as
@@ -517,12 +568,11 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
unsigned Reg = MO.getReg();
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
- KillIndices[Reg] = Count;
+ LiveRegs.set(Reg);
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- KillIndices[*Subreg] = Count;
- }
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ LiveRegs.set(*Subreg);
}
}
}
@@ -585,7 +635,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
ReleaseSuccessors(SU);
SU->isScheduled = true;
- AvailableQueue.ScheduledNode(SU);
+ AvailableQueue.scheduledNode(SU);
}
/// ListScheduleTopDown - The main loop of list scheduling for top-down
@@ -699,14 +749,46 @@ void SchedulePostRATDList::ListScheduleTopDown() {
}
#ifndef NDEBUG
- VerifySchedule(/*isBottomUp=*/false);
-#endif
+ unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
+ unsigned Noops = 0;
+ for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
+ if (!Sequence[i])
+ ++Noops;
+ assert(Sequence.size() - Noops == ScheduledNodes &&
+ "The number of nodes scheduled doesn't match the expected number!");
+#endif // NDEBUG
}
-//===----------------------------------------------------------------------===//
-// Public Constructor Functions
-//===----------------------------------------------------------------------===//
+// EmitSchedule - Emit the machine code in scheduled order.
+void SchedulePostRATDList::EmitSchedule() {
+ RegionBegin = RegionEnd;
+
+ // If first instruction was a DBG_VALUE then put it back.
+ if (FirstDbgValue)
+ BB->splice(RegionEnd, BB, FirstDbgValue);
+
+ // Then re-insert them according to the given schedule.
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ if (SUnit *SU = Sequence[i])
+ BB->splice(RegionEnd, BB, SU->getInstr());
+ else
+ // Null SUnit* is a noop.
+ TII->insertNoop(*BB, RegionEnd);
+
+ // Update the Begin iterator, as the first instruction in the block
+ // may have been scheduled later.
+ if (i == 0)
+ RegionBegin = prior(RegionEnd);
+ }
-FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
- return new PostRAScheduler(OptLevel);
+ // Reinsert any remaining debug_values.
+ for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
+ DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
+ std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
+ MachineInstr *DbgValue = P.first;
+ MachineBasicBlock::iterator OrigPrivMI = P.second;
+ BB->splice(++OrigPrivMI, BB, DbgValue);
+ }
+ DbgValues.clear();
+ FirstDbgValue = NULL;
}
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index b1d8c97..1ad3479 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -26,6 +26,8 @@
using namespace llvm;
char ProcessImplicitDefs::ID = 0;
+char &llvm::ProcessImplicitDefsID = ProcessImplicitDefs::ID;
+
INITIALIZE_PASS_BEGIN(ProcessImplicitDefs, "processimpdefs",
"Process Implicit Definitions", false, false)
INITIALIZE_PASS_DEPENDENCY(LiveVariables)
@@ -36,7 +38,6 @@ void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<LiveVariables>();
- AU.addRequired<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
AU.addPreservedID(TwoAddressInstructionPassID);
@@ -50,10 +51,10 @@ ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
SmallSet<unsigned, 8> &ImpDefRegs) {
switch(OpIdx) {
case 1:
- return MI->isCopy() && (MI->getOperand(0).getSubReg() == 0 ||
+ return MI->isCopy() && (!MI->getOperand(0).readsReg() ||
ImpDefRegs.count(MI->getOperand(0).getReg()));
case 2:
- return MI->isSubregToReg() && (MI->getOperand(0).getSubReg() == 0 ||
+ return MI->isSubregToReg() && (!MI->getOperand(0).readsReg() ||
ImpDefRegs.count(MI->getOperand(0).getReg()));
default: return false;
}
@@ -66,7 +67,7 @@ static bool isUndefCopy(MachineInstr *MI, unsigned Reg,
MachineOperand &MO1 = MI->getOperand(1);
if (MO1.getReg() != Reg)
return false;
- if (!MO0.getSubReg() || ImpDefRegs.count(MO0.getReg()))
+ if (!MO0.readsReg() || ImpDefRegs.count(MO0.getReg()))
return true;
return false;
}
@@ -87,7 +88,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
TII = fn.getTarget().getInstrInfo();
TRI = fn.getTarget().getRegisterInfo();
MRI = &fn.getRegInfo();
- LV = &getAnalysis<LiveVariables>();
+ LV = getAnalysisIfAvailable<LiveVariables>();
SmallSet<unsigned, 8> ImpDefRegs;
SmallVector<MachineInstr*, 8> ImpDefMIs;
@@ -105,23 +106,24 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
MachineInstr *MI = &*I;
++I;
if (MI->isImplicitDef()) {
- if (MI->getOperand(0).getSubReg())
+ ImpDefMIs.push_back(MI);
+ // Is this a sub-register read-modify-write?
+ if (MI->getOperand(0).readsReg())
continue;
unsigned Reg = MI->getOperand(0).getReg();
ImpDefRegs.insert(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (const unsigned *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
+ for (const uint16_t *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
ImpDefRegs.insert(*SS);
}
- ImpDefMIs.push_back(MI);
continue;
}
// Eliminate %reg1032:sub<def> = COPY undef.
- if (MI->isCopy() && MI->getOperand(0).getSubReg()) {
+ if (MI->isCopy() && MI->getOperand(0).readsReg()) {
MachineOperand &MO = MI->getOperand(1);
if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
- if (MO.isKill()) {
+ if (LV && MO.isKill()) {
LiveVariables::VarInfo& vi = LV->getVarInfo(MO.getReg());
vi.removeKill(MI);
}
@@ -140,7 +142,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
bool ChangedToImpDef = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || (MO.isDef() && !MO.getSubReg()) || MO.isUndef())
+ if (!MO.isReg() || !MO.readsReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
@@ -155,8 +157,10 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
MI->RemoveOperand(j);
if (isKill) {
ImpDefRegs.erase(Reg);
- LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
- vi.removeKill(MI);
+ if (LV) {
+ LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
+ vi.removeKill(MI);
+ }
}
ChangedToImpDef = true;
Changed = true;
@@ -172,10 +176,10 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
continue;
}
if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
- // Make sure other uses of
+ // Make sure other reads of Reg are also marked <undef>.
for (unsigned j = i+1; j != e; ++j) {
MachineOperand &MOJ = MI->getOperand(j);
- if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
+ if (MOJ.isReg() && MOJ.getReg() == Reg && MOJ.readsReg())
MOJ.setIsUndef();
}
ImpDefRegs.erase(Reg);
@@ -265,7 +269,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
}
// Update LiveVariables varinfo if the instruction is a kill.
- if (isKill) {
+ if (LV && isKill) {
LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
vi.removeKill(RMI);
}
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 32c9325..458915e 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -45,24 +45,22 @@
using namespace llvm;
char PEI::ID = 0;
+char &llvm::PrologEpilogCodeInserterID = PEI::ID;
INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
"Prologue/Epilogue Insertion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(PEI, "prologepilog",
- "Prologue/Epilogue Insertion", false, false)
+ "Prologue/Epilogue Insertion & Frame Finalization",
+ false, false)
STATISTIC(NumVirtualFrameRegs, "Number of virtual frame regs encountered");
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
STATISTIC(NumBytesStackSpace,
"Number of bytes used for stack in all functions");
-/// createPrologEpilogCodeInserter - This function returns a pass that inserts
-/// prolog and epilog code, and eliminates abstract frame references.
-///
-FunctionPass *llvm::createPrologEpilogCodeInserter() { return new PEI(); }
-
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
///
@@ -71,6 +69,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
+ assert(!Fn.getRegInfo().getNumVirtRegs() && "Regalloc must assign all vregs");
+
RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : NULL;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
@@ -125,6 +125,9 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging)
scavengeFrameVirtualRegs(Fn);
+ // Clear any vregs created by virtual scavenging.
+ Fn.getRegInfo().clearVirtRegs();
+
delete RS;
clearAllSets();
return true;
@@ -207,7 +210,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
MachineFrameInfo *MFI = Fn.getFrameInfo();
// Get the callee saved register list...
- const unsigned *CSRegs = RegInfo->getCalleeSavedRegs(&Fn);
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&Fn);
// These are used to keep track the callee-save area. Initialize them.
MinCSFrameIndex = INT_MAX;
@@ -224,17 +227,9 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
- if (Fn.getRegInfo().isPhysRegUsed(Reg)) {
+ if (Fn.getRegInfo().isPhysRegOrOverlapUsed(Reg)) {
// If the reg is modified, save it!
CSI.push_back(CalleeSavedInfo(Reg));
- } else {
- for (const unsigned *AliasSet = RegInfo->getAliasSet(Reg);
- *AliasSet; ++AliasSet) { // Check alias registers too.
- if (Fn.getRegInfo().isPhysRegUsed(*AliasSet)) {
- CSI.push_back(CalleeSavedInfo(Reg));
- break;
- }
- }
}
}
@@ -332,7 +327,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = I;
- while (I2 != MBB->begin() && (--I2)->getDesc().isTerminator())
+ while (I2 != MBB->begin() && (--I2)->isTerminator())
I = I2;
bool AtStart = I == MBB->begin();
@@ -426,11 +421,11 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Skip over all terminator instructions, which are part of the
// return sequence.
- if (! I->getDesc().isTerminator()) {
+ if (! I->isTerminator()) {
++I;
} else {
MachineBasicBlock::iterator I2 = I;
- while (I2 != MBB->begin() && (--I2)->getDesc().isTerminator())
+ while (I2 != MBB->begin() && (--I2)->isTerminator())
I = I2;
}
}
@@ -698,7 +693,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
// Add epilogue to restore the callee-save registers in each exiting block
for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
// If last instruction is a return instruction, add an epilogue
- if (!I->empty() && I->back().getDesc().isReturn())
+ if (!I->empty() && I->back().isReturn())
TFI.emitEpilogue(Fn, *I);
}
@@ -706,7 +701,7 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
// we've been asked for it. This, when linked with a runtime with support
// for segmented stacks (libgcc is one), will result in allocating stack
// space in small chunks instead of one large contiguous block.
- if (EnableSegmentedStacks)
+ if (Fn.getTarget().Options.EnableSegmentedStacks)
TFI.adjustForSegmentedStacks(Fn);
}
@@ -813,6 +808,10 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
+///
+/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
+/// iterate over the vreg use list, which at this point only contains machine
+/// operands for which eliminateFrameIndex need a new scratch reg.
void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Run through the instructions and find any virtual registers.
for (MachineFunction::iterator BB = Fn.begin(),
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
index e239159..0d140a9 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.h
@@ -40,10 +40,6 @@ namespace llvm {
initializePEIPass(*PassRegistry::getPassRegistry());
}
- const char *getPassName() const {
- return "Prolog/Epilog Insertion & Frame Finalization";
- }
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
diff --git a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
index 73b66d8..49599b3 100644
--- a/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
+++ b/contrib/llvm/lib/CodeGen/PseudoSourceValue.cpp
@@ -87,7 +87,6 @@ bool PseudoSourceValue::isConstant(const MachineFrameInfo *) const {
this == getJumpTable())
return true;
llvm_unreachable("Unknown PseudoSourceValue!");
- return false;
}
bool PseudoSourceValue::isAliased(const MachineFrameInfo *MFI) const {
@@ -97,7 +96,6 @@ bool PseudoSourceValue::isAliased(const MachineFrameInfo *MFI) const {
this == getJumpTable())
return false;
llvm_unreachable("Unknown PseudoSourceValue!");
- return true;
}
bool PseudoSourceValue::mayAlias(const MachineFrameInfo *MFI) const {
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.cpp b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
new file mode 100644
index 0000000..b00eceb
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.cpp
@@ -0,0 +1,280 @@
+//===-- RegAllocBase.cpp - Register Allocator Base Class ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegAllocBase class which provides comon functionality
+// for LiveIntervalUnion-based register allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "RegAllocBase.h"
+#include "Spiller.h"
+#include "VirtRegMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#ifndef NDEBUG
+#include "llvm/ADT/SparseBitVector.h"
+#endif
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
+
+using namespace llvm;
+
+STATISTIC(NumAssigned , "Number of registers assigned");
+STATISTIC(NumUnassigned , "Number of registers unassigned");
+STATISTIC(NumNewQueued , "Number of new live ranges queued");
+
+// Temporary verification option until we can put verification inside
+// MachineVerifier.
+static cl::opt<bool, true>
+VerifyRegAlloc("verify-regalloc", cl::location(RegAllocBase::VerifyEnabled),
+ cl::desc("Verify during register allocation"));
+
+const char *RegAllocBase::TimerGroupName = "Register Allocation";
+bool RegAllocBase::VerifyEnabled = false;
+
+#ifndef NDEBUG
+// Verify each LiveIntervalUnion.
+void RegAllocBase::verify() {
+ LiveVirtRegBitSet VisitedVRegs;
+ OwningArrayPtr<LiveVirtRegBitSet>
+ unionVRegs(new LiveVirtRegBitSet[PhysReg2LiveUnion.numRegs()]);
+
+ // Verify disjoint unions.
+ for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
+ DEBUG(PhysReg2LiveUnion[PhysReg].print(dbgs(), TRI));
+ LiveVirtRegBitSet &VRegs = unionVRegs[PhysReg];
+ PhysReg2LiveUnion[PhysReg].verify(VRegs);
+ // Union + intersection test could be done efficiently in one pass, but
+ // don't add a method to SparseBitVector unless we really need it.
+ assert(!VisitedVRegs.intersects(VRegs) && "vreg in multiple unions");
+ VisitedVRegs |= VRegs;
+ }
+
+ // Verify vreg coverage.
+ for (LiveIntervals::iterator liItr = LIS->begin(), liEnd = LIS->end();
+ liItr != liEnd; ++liItr) {
+ unsigned reg = liItr->first;
+ if (TargetRegisterInfo::isPhysicalRegister(reg)) continue;
+ if (!VRM->hasPhys(reg)) continue; // spilled?
+ unsigned PhysReg = VRM->getPhys(reg);
+ if (!unionVRegs[PhysReg].test(reg)) {
+ dbgs() << "LiveVirtReg " << reg << " not in union " <<
+ TRI->getName(PhysReg) << "\n";
+ llvm_unreachable("unallocated live vreg");
+ }
+ }
+ // FIXME: I'm not sure how to verify spilled intervals.
+}
+#endif //!NDEBUG
+
+//===----------------------------------------------------------------------===//
+// RegAllocBase Implementation
+//===----------------------------------------------------------------------===//
+
+// Instantiate a LiveIntervalUnion for each physical register.
+void RegAllocBase::LiveUnionArray::init(LiveIntervalUnion::Allocator &allocator,
+ unsigned NRegs) {
+ NumRegs = NRegs;
+ Array =
+ static_cast<LiveIntervalUnion*>(malloc(sizeof(LiveIntervalUnion)*NRegs));
+ for (unsigned r = 0; r != NRegs; ++r)
+ new(Array + r) LiveIntervalUnion(r, allocator);
+}
+
+void RegAllocBase::init(VirtRegMap &vrm, LiveIntervals &lis) {
+ NamedRegionTimer T("Initialize", TimerGroupName, TimePassesIsEnabled);
+ TRI = &vrm.getTargetRegInfo();
+ MRI = &vrm.getRegInfo();
+ VRM = &vrm;
+ LIS = &lis;
+ MRI->freezeReservedRegs(vrm.getMachineFunction());
+ RegClassInfo.runOnMachineFunction(vrm.getMachineFunction());
+
+ const unsigned NumRegs = TRI->getNumRegs();
+ if (NumRegs != PhysReg2LiveUnion.numRegs()) {
+ PhysReg2LiveUnion.init(UnionAllocator, NumRegs);
+ // Cache an interferece query for each physical reg
+ Queries.reset(new LiveIntervalUnion::Query[PhysReg2LiveUnion.numRegs()]);
+ }
+}
+
+void RegAllocBase::LiveUnionArray::clear() {
+ if (!Array)
+ return;
+ for (unsigned r = 0; r != NumRegs; ++r)
+ Array[r].~LiveIntervalUnion();
+ free(Array);
+ NumRegs = 0;
+ Array = 0;
+}
+
+void RegAllocBase::releaseMemory() {
+ for (unsigned r = 0, e = PhysReg2LiveUnion.numRegs(); r != e; ++r)
+ PhysReg2LiveUnion[r].clear();
+}
+
+// Visit all the live registers. If they are already assigned to a physical
+// register, unify them with the corresponding LiveIntervalUnion, otherwise push
+// them on the priority queue for later assignment.
+void RegAllocBase::seedLiveRegs() {
+ NamedRegionTimer T("Seed Live Regs", TimerGroupName, TimePassesIsEnabled);
+ for (LiveIntervals::iterator I = LIS->begin(), E = LIS->end(); I != E; ++I) {
+ unsigned RegNum = I->first;
+ LiveInterval &VirtReg = *I->second;
+ if (TargetRegisterInfo::isPhysicalRegister(RegNum))
+ PhysReg2LiveUnion[RegNum].unify(VirtReg);
+ else
+ enqueue(&VirtReg);
+ }
+}
+
+void RegAllocBase::assign(LiveInterval &VirtReg, unsigned PhysReg) {
+ DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
+ << " to " << PrintReg(PhysReg, TRI) << '\n');
+ assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
+ VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
+ MRI->setPhysRegUsed(PhysReg);
+ PhysReg2LiveUnion[PhysReg].unify(VirtReg);
+ ++NumAssigned;
+}
+
+void RegAllocBase::unassign(LiveInterval &VirtReg, unsigned PhysReg) {
+ DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
+ << " from " << PrintReg(PhysReg, TRI) << '\n');
+ assert(VRM->getPhys(VirtReg.reg) == PhysReg && "Inconsistent unassign");
+ PhysReg2LiveUnion[PhysReg].extract(VirtReg);
+ VRM->clearVirt(VirtReg.reg);
+ ++NumUnassigned;
+}
+
+// Top-level driver to manage the queue of unassigned VirtRegs and call the
+// selectOrSplit implementation.
+void RegAllocBase::allocatePhysRegs() {
+ seedLiveRegs();
+
+ // Continue assigning vregs one at a time to available physical registers.
+ while (LiveInterval *VirtReg = dequeue()) {
+ assert(!VRM->hasPhys(VirtReg->reg) && "Register already assigned");
+
+ // Unused registers can appear when the spiller coalesces snippets.
+ if (MRI->reg_nodbg_empty(VirtReg->reg)) {
+ DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n');
+ LIS->removeInterval(VirtReg->reg);
+ continue;
+ }
+
+ // Invalidate all interference queries, live ranges could have changed.
+ invalidateVirtRegs();
+
+ // selectOrSplit requests the allocator to return an available physical
+ // register if possible and populate a list of new live intervals that
+ // result from splitting.
+ DEBUG(dbgs() << "\nselectOrSplit "
+ << MRI->getRegClass(VirtReg->reg)->getName()
+ << ':' << *VirtReg << '\n');
+ typedef SmallVector<LiveInterval*, 4> VirtRegVec;
+ VirtRegVec SplitVRegs;
+ unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);
+
+ if (AvailablePhysReg == ~0u) {
+ // selectOrSplit failed to find a register!
+ const char *Msg = "ran out of registers during register allocation";
+ // Probably caused by an inline asm.
+ MachineInstr *MI;
+ for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg);
+ (MI = I.skipInstruction());)
+ if (MI->isInlineAsm())
+ break;
+ if (MI)
+ MI->emitError(Msg);
+ else
+ report_fatal_error(Msg);
+ // Keep going after reporting the error.
+ VRM->assignVirt2Phys(VirtReg->reg,
+ RegClassInfo.getOrder(MRI->getRegClass(VirtReg->reg)).front());
+ continue;
+ }
+
+ if (AvailablePhysReg)
+ assign(*VirtReg, AvailablePhysReg);
+
+ for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
+ I != E; ++I) {
+ LiveInterval *SplitVirtReg = *I;
+ assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
+ if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
+ DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
+ LIS->removeInterval(SplitVirtReg->reg);
+ continue;
+ }
+ DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
+ assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
+ "expect split value in virtual register");
+ enqueue(SplitVirtReg);
+ ++NumNewQueued;
+ }
+ }
+}
+
+// Check if this live virtual register interferes with a physical register. If
+// not, then check for interference on each register that aliases with the
+// physical register. Return the interfering register.
+unsigned RegAllocBase::checkPhysRegInterference(LiveInterval &VirtReg,
+ unsigned PhysReg) {
+ for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
+ if (query(VirtReg, *AliasI).checkInterference())
+ return *AliasI;
+ return 0;
+}
+
+// Add newly allocated physical registers to the MBB live in sets.
+void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
+ NamedRegionTimer T("MBB Live Ins", TimerGroupName, TimePassesIsEnabled);
+ SlotIndexes *Indexes = LIS->getSlotIndexes();
+ if (MF->size() <= 1)
+ return;
+
+ LiveIntervalUnion::SegmentIter SI;
+ for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
+ LiveIntervalUnion &LiveUnion = PhysReg2LiveUnion[PhysReg];
+ if (LiveUnion.empty())
+ continue;
+ DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " live-in:");
+ MachineFunction::iterator MBB = llvm::next(MF->begin());
+ MachineFunction::iterator MFE = MF->end();
+ SlotIndex Start, Stop;
+ tie(Start, Stop) = Indexes->getMBBRange(MBB);
+ SI.setMap(LiveUnion.getMap());
+ SI.find(Start);
+ while (SI.valid()) {
+ if (SI.start() <= Start) {
+ if (!MBB->isLiveIn(PhysReg))
+ MBB->addLiveIn(PhysReg);
+ DEBUG(dbgs() << "\tBB#" << MBB->getNumber() << ':'
+ << PrintReg(SI.value()->reg, TRI));
+ } else if (SI.start() > Stop)
+ MBB = Indexes->getMBBFromIndex(SI.start().getPrevIndex());
+ if (++MBB == MFE)
+ break;
+ tie(Start, Stop) = Indexes->getMBBRange(MBB);
+ SI.advanceTo(Start);
+ }
+ DEBUG(dbgs() << '\n');
+ }
+}
+
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBase.h b/contrib/llvm/lib/CodeGen/RegAllocBase.h
index 03164211..072fe2b 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBase.h
+++ b/contrib/llvm/lib/CodeGen/RegAllocBase.h
@@ -49,11 +49,6 @@ class VirtRegMap;
class LiveIntervals;
class Spiller;
-// Forward declare a priority queue of live virtual registers. If an
-// implementation needs to prioritize by anything other than spill weight, then
-// this will become an abstract base class with virtual calls to push/get.
-class LiveVirtRegQueue;
-
/// RegAllocBase provides the register allocation driver and interface that can
/// be extended to add interesting heuristics.
///
@@ -67,7 +62,6 @@ class RegAllocBase {
// registers may have changed.
unsigned UserTag;
-protected:
// Array of LiveIntervalUnions indexed by physical register.
class LiveUnionArray {
unsigned NumRegs;
@@ -88,17 +82,19 @@ protected:
}
};
- const TargetRegisterInfo *TRI;
- MachineRegisterInfo *MRI;
- VirtRegMap *VRM;
- LiveIntervals *LIS;
- RegisterClassInfo RegClassInfo;
LiveUnionArray PhysReg2LiveUnion;
// Current queries, one per physreg. They must be reinitialized each time we
// query on a new live virtual register.
OwningArrayPtr<LiveIntervalUnion::Query> Queries;
+protected:
+ const TargetRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ VirtRegMap *VRM;
+ LiveIntervals *LIS;
+ RegisterClassInfo RegClassInfo;
+
RegAllocBase(): UserTag(0), TRI(0), MRI(0), VRM(0), LIS(0) {}
virtual ~RegAllocBase() {}
@@ -115,16 +111,17 @@ protected:
return Queries[PhysReg];
}
+ // Get direct access to the underlying LiveIntervalUnion for PhysReg.
+ LiveIntervalUnion &getLiveUnion(unsigned PhysReg) {
+ return PhysReg2LiveUnion[PhysReg];
+ }
+
// Invalidate all cached information about virtual registers - live ranges may
// have changed.
void invalidateVirtRegs() { ++UserTag; }
// The top-level driver. The output is a VirtRegMap that us updated with
// physical register assignments.
- //
- // If an implementation wants to override the LiveInterval comparator, we
- // should modify this interface to allow passing in an instance derived from
- // LiveVirtRegQueue.
void allocatePhysRegs();
// Get a temporary reference to a Spiller instance.
@@ -160,12 +157,6 @@ protected:
/// allocation is making progress.
void unassign(LiveInterval &VirtReg, unsigned PhysReg);
- // Helper for spilling all live virtual registers currently unified under preg
- // that interfere with the most recently queried lvr. Return true if spilling
- // was successful, and append any new spilled/split intervals to splitLVRs.
- bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
- SmallVectorImpl<LiveInterval*> &SplitVRegs);
-
/// addMBBLiveIns - Add physreg liveins to basic blocks.
void addMBBLiveIns(MachineFunction *);
@@ -183,9 +174,6 @@ public:
private:
void seedLiveRegs();
-
- void spillReg(LiveInterval &VirtReg, unsigned PhysReg,
- SmallVectorImpl<LiveInterval*> &SplitVRegs);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
index 5496d69..77ee314 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocBasic.cpp
@@ -1,4 +1,4 @@
-//===-- RegAllocBasic.cpp - basic register allocator ----------------------===//
+//===-- RegAllocBasic.cpp - Basic Register Allocator ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,18 +15,15 @@
#define DEBUG_TYPE "regalloc"
#include "RegAllocBase.h"
#include "LiveDebugVariables.h"
-#include "LiveIntervalUnion.h"
-#include "LiveRangeEdit.h"
#include "RenderMachineFunction.h"
#include "Spiller.h"
#include "VirtRegMap.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Function.h"
#include "llvm/PassAnalysisSupport.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
@@ -37,35 +34,17 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#ifndef NDEBUG
-#include "llvm/ADT/SparseBitVector.h"
-#endif
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Timer.h"
#include <cstdlib>
#include <queue>
using namespace llvm;
-STATISTIC(NumAssigned , "Number of registers assigned");
-STATISTIC(NumUnassigned , "Number of registers unassigned");
-STATISTIC(NumNewQueued , "Number of new live ranges queued");
-
static RegisterRegAlloc basicRegAlloc("basic", "basic register allocator",
createBasicRegisterAllocator);
-// Temporary verification option until we can put verification inside
-// MachineVerifier.
-static cl::opt<bool, true>
-VerifyRegAlloc("verify-regalloc", cl::location(RegAllocBase::VerifyEnabled),
- cl::desc("Verify during register allocation"));
-
-const char *RegAllocBase::TimerGroupName = "Register Allocation";
-bool RegAllocBase::VerifyEnabled = false;
-
namespace {
struct CompSpillWeight {
bool operator()(LiveInterval *A, LiveInterval *B) const {
@@ -93,6 +72,11 @@ class RABasic : public MachineFunctionPass, public RegAllocBase
std::auto_ptr<Spiller> SpillerInstance;
std::priority_queue<LiveInterval*, std::vector<LiveInterval*>,
CompSpillWeight> Queue;
+
+ // Scratch space. Allocated here to avoid repeated malloc calls in
+ // selectOrSplit().
+ BitVector UsableRegs;
+
public:
RABasic();
@@ -128,6 +112,15 @@ public:
/// Perform register allocation.
virtual bool runOnMachineFunction(MachineFunction &mf);
+ // Helper for spilling all live virtual registers currently unified under preg
+ // that interfere with the most recently queried lvr. Return true if spilling
+ // was successful, and append any new spilled/split intervals to splitLVRs.
+ bool spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs);
+
+ void spillReg(LiveInterval &VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs);
+
static char ID;
};
@@ -139,8 +132,8 @@ RABasic::RABasic(): MachineFunctionPass(ID) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
- initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
+ initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
@@ -157,9 +150,6 @@ void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
- if (StrongPHIElim)
- AU.addRequiredID(StrongPHIEliminationID);
- AU.addRequiredTransitiveID(RegisterCoalescerPassID);
AU.addRequired<CalculateSpillWeights>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
@@ -178,204 +168,10 @@ void RABasic::releaseMemory() {
RegAllocBase::releaseMemory();
}
-#ifndef NDEBUG
-// Verify each LiveIntervalUnion.
-void RegAllocBase::verify() {
- LiveVirtRegBitSet VisitedVRegs;
- OwningArrayPtr<LiveVirtRegBitSet>
- unionVRegs(new LiveVirtRegBitSet[PhysReg2LiveUnion.numRegs()]);
-
- // Verify disjoint unions.
- for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
- DEBUG(PhysReg2LiveUnion[PhysReg].print(dbgs(), TRI));
- LiveVirtRegBitSet &VRegs = unionVRegs[PhysReg];
- PhysReg2LiveUnion[PhysReg].verify(VRegs);
- // Union + intersection test could be done efficiently in one pass, but
- // don't add a method to SparseBitVector unless we really need it.
- assert(!VisitedVRegs.intersects(VRegs) && "vreg in multiple unions");
- VisitedVRegs |= VRegs;
- }
-
- // Verify vreg coverage.
- for (LiveIntervals::iterator liItr = LIS->begin(), liEnd = LIS->end();
- liItr != liEnd; ++liItr) {
- unsigned reg = liItr->first;
- if (TargetRegisterInfo::isPhysicalRegister(reg)) continue;
- if (!VRM->hasPhys(reg)) continue; // spilled?
- unsigned PhysReg = VRM->getPhys(reg);
- if (!unionVRegs[PhysReg].test(reg)) {
- dbgs() << "LiveVirtReg " << reg << " not in union " <<
- TRI->getName(PhysReg) << "\n";
- llvm_unreachable("unallocated live vreg");
- }
- }
- // FIXME: I'm not sure how to verify spilled intervals.
-}
-#endif //!NDEBUG
-
-//===----------------------------------------------------------------------===//
-// RegAllocBase Implementation
-//===----------------------------------------------------------------------===//
-
-// Instantiate a LiveIntervalUnion for each physical register.
-void RegAllocBase::LiveUnionArray::init(LiveIntervalUnion::Allocator &allocator,
- unsigned NRegs) {
- NumRegs = NRegs;
- Array =
- static_cast<LiveIntervalUnion*>(malloc(sizeof(LiveIntervalUnion)*NRegs));
- for (unsigned r = 0; r != NRegs; ++r)
- new(Array + r) LiveIntervalUnion(r, allocator);
-}
-
-void RegAllocBase::init(VirtRegMap &vrm, LiveIntervals &lis) {
- NamedRegionTimer T("Initialize", TimerGroupName, TimePassesIsEnabled);
- TRI = &vrm.getTargetRegInfo();
- MRI = &vrm.getRegInfo();
- VRM = &vrm;
- LIS = &lis;
- RegClassInfo.runOnMachineFunction(vrm.getMachineFunction());
-
- const unsigned NumRegs = TRI->getNumRegs();
- if (NumRegs != PhysReg2LiveUnion.numRegs()) {
- PhysReg2LiveUnion.init(UnionAllocator, NumRegs);
- // Cache an interferece query for each physical reg
- Queries.reset(new LiveIntervalUnion::Query[PhysReg2LiveUnion.numRegs()]);
- }
-}
-
-void RegAllocBase::LiveUnionArray::clear() {
- if (!Array)
- return;
- for (unsigned r = 0; r != NumRegs; ++r)
- Array[r].~LiveIntervalUnion();
- free(Array);
- NumRegs = 0;
- Array = 0;
-}
-
-void RegAllocBase::releaseMemory() {
- for (unsigned r = 0, e = PhysReg2LiveUnion.numRegs(); r != e; ++r)
- PhysReg2LiveUnion[r].clear();
-}
-
-// Visit all the live registers. If they are already assigned to a physical
-// register, unify them with the corresponding LiveIntervalUnion, otherwise push
-// them on the priority queue for later assignment.
-void RegAllocBase::seedLiveRegs() {
- NamedRegionTimer T("Seed Live Regs", TimerGroupName, TimePassesIsEnabled);
- for (LiveIntervals::iterator I = LIS->begin(), E = LIS->end(); I != E; ++I) {
- unsigned RegNum = I->first;
- LiveInterval &VirtReg = *I->second;
- if (TargetRegisterInfo::isPhysicalRegister(RegNum))
- PhysReg2LiveUnion[RegNum].unify(VirtReg);
- else
- enqueue(&VirtReg);
- }
-}
-
-void RegAllocBase::assign(LiveInterval &VirtReg, unsigned PhysReg) {
- DEBUG(dbgs() << "assigning " << PrintReg(VirtReg.reg, TRI)
- << " to " << PrintReg(PhysReg, TRI) << '\n');
- assert(!VRM->hasPhys(VirtReg.reg) && "Duplicate VirtReg assignment");
- VRM->assignVirt2Phys(VirtReg.reg, PhysReg);
- MRI->setPhysRegUsed(PhysReg);
- PhysReg2LiveUnion[PhysReg].unify(VirtReg);
- ++NumAssigned;
-}
-
-void RegAllocBase::unassign(LiveInterval &VirtReg, unsigned PhysReg) {
- DEBUG(dbgs() << "unassigning " << PrintReg(VirtReg.reg, TRI)
- << " from " << PrintReg(PhysReg, TRI) << '\n');
- assert(VRM->getPhys(VirtReg.reg) == PhysReg && "Inconsistent unassign");
- PhysReg2LiveUnion[PhysReg].extract(VirtReg);
- VRM->clearVirt(VirtReg.reg);
- ++NumUnassigned;
-}
-
-// Top-level driver to manage the queue of unassigned VirtRegs and call the
-// selectOrSplit implementation.
-void RegAllocBase::allocatePhysRegs() {
- seedLiveRegs();
-
- // Continue assigning vregs one at a time to available physical registers.
- while (LiveInterval *VirtReg = dequeue()) {
- assert(!VRM->hasPhys(VirtReg->reg) && "Register already assigned");
-
- // Unused registers can appear when the spiller coalesces snippets.
- if (MRI->reg_nodbg_empty(VirtReg->reg)) {
- DEBUG(dbgs() << "Dropping unused " << *VirtReg << '\n');
- LIS->removeInterval(VirtReg->reg);
- continue;
- }
-
- // Invalidate all interference queries, live ranges could have changed.
- invalidateVirtRegs();
-
- // selectOrSplit requests the allocator to return an available physical
- // register if possible and populate a list of new live intervals that
- // result from splitting.
- DEBUG(dbgs() << "\nselectOrSplit "
- << MRI->getRegClass(VirtReg->reg)->getName()
- << ':' << *VirtReg << '\n');
- typedef SmallVector<LiveInterval*, 4> VirtRegVec;
- VirtRegVec SplitVRegs;
- unsigned AvailablePhysReg = selectOrSplit(*VirtReg, SplitVRegs);
-
- if (AvailablePhysReg == ~0u) {
- // selectOrSplit failed to find a register!
- const char *Msg = "ran out of registers during register allocation";
- // Probably caused by an inline asm.
- MachineInstr *MI;
- for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg);
- (MI = I.skipInstruction());)
- if (MI->isInlineAsm())
- break;
- if (MI)
- MI->emitError(Msg);
- else
- report_fatal_error(Msg);
- // Keep going after reporting the error.
- VRM->assignVirt2Phys(VirtReg->reg,
- RegClassInfo.getOrder(MRI->getRegClass(VirtReg->reg)).front());
- continue;
- }
-
- if (AvailablePhysReg)
- assign(*VirtReg, AvailablePhysReg);
-
- for (VirtRegVec::iterator I = SplitVRegs.begin(), E = SplitVRegs.end();
- I != E; ++I) {
- LiveInterval *SplitVirtReg = *I;
- assert(!VRM->hasPhys(SplitVirtReg->reg) && "Register already assigned");
- if (MRI->reg_nodbg_empty(SplitVirtReg->reg)) {
- DEBUG(dbgs() << "not queueing unused " << *SplitVirtReg << '\n');
- LIS->removeInterval(SplitVirtReg->reg);
- continue;
- }
- DEBUG(dbgs() << "queuing new interval: " << *SplitVirtReg << "\n");
- assert(TargetRegisterInfo::isVirtualRegister(SplitVirtReg->reg) &&
- "expect split value in virtual register");
- enqueue(SplitVirtReg);
- ++NumNewQueued;
- }
- }
-}
-
-// Check if this live virtual register interferes with a physical register. If
-// not, then check for interference on each register that aliases with the
-// physical register. Return the interfering register.
-unsigned RegAllocBase::checkPhysRegInterference(LiveInterval &VirtReg,
- unsigned PhysReg) {
- for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
- if (query(VirtReg, *AliasI).checkInterference())
- return *AliasI;
- return 0;
-}
-
-// Helper for spillInteferences() that spills all interfering vregs currently
+// Helper for spillInterferences() that spills all interfering vregs currently
// assigned to this physical register.
-void RegAllocBase::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
- SmallVectorImpl<LiveInterval*> &SplitVRegs) {
+void RABasic::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
+ SmallVectorImpl<LiveInterval*> &SplitVRegs) {
LiveIntervalUnion::Query &Q = query(VirtReg, PhysReg);
assert(Q.seenAllInterferences() && "need collectInterferences()");
const SmallVectorImpl<LiveInterval*> &PendingSpills = Q.interferingVRegs();
@@ -391,7 +187,7 @@ void RegAllocBase::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
unassign(SpilledVReg, PhysReg);
// Spill the extracted interval.
- LiveRangeEdit LRE(SpilledVReg, SplitVRegs, 0, &PendingSpills);
+ LiveRangeEdit LRE(SpilledVReg, SplitVRegs, *MF, *LIS, VRM);
spiller().spill(LRE);
}
// After extracting segments, the query's results are invalid. But keep the
@@ -402,14 +198,13 @@ void RegAllocBase::spillReg(LiveInterval& VirtReg, unsigned PhysReg,
// Spill or split all live virtual registers currently unified under PhysReg
// that interfere with VirtReg. The newly spilled or split live intervals are
// returned by appending them to SplitVRegs.
-bool
-RegAllocBase::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
+bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
SmallVectorImpl<LiveInterval*> &SplitVRegs) {
// Record each interference and determine if all are spillable before mutating
// either the union or live intervals.
unsigned NumInterferences = 0;
// Collect interferences assigned to any alias of the physical register.
- for (const unsigned *asI = TRI->getOverlaps(PhysReg); *asI; ++asI) {
+ for (const uint16_t *asI = TRI->getOverlaps(PhysReg); *asI; ++asI) {
LiveIntervalUnion::Query &QAlias = query(VirtReg, *asI);
NumInterferences += QAlias.collectInterferingVRegs();
if (QAlias.seenUnspillableVReg()) {
@@ -421,52 +216,11 @@ RegAllocBase::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
assert(NumInterferences > 0 && "expect interference");
// Spill each interfering vreg allocated to PhysReg or an alias.
- for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
+ for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI)
spillReg(VirtReg, *AliasI, SplitVRegs);
return true;
}
-// Add newly allocated physical registers to the MBB live in sets.
-void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
- NamedRegionTimer T("MBB Live Ins", TimerGroupName, TimePassesIsEnabled);
- SlotIndexes *Indexes = LIS->getSlotIndexes();
- if (MF->size() <= 1)
- return;
-
- LiveIntervalUnion::SegmentIter SI;
- for (unsigned PhysReg = 0; PhysReg < PhysReg2LiveUnion.numRegs(); ++PhysReg) {
- LiveIntervalUnion &LiveUnion = PhysReg2LiveUnion[PhysReg];
- if (LiveUnion.empty())
- continue;
- DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " live-in:");
- MachineFunction::iterator MBB = llvm::next(MF->begin());
- MachineFunction::iterator MFE = MF->end();
- SlotIndex Start, Stop;
- tie(Start, Stop) = Indexes->getMBBRange(MBB);
- SI.setMap(LiveUnion.getMap());
- SI.find(Start);
- while (SI.valid()) {
- if (SI.start() <= Start) {
- if (!MBB->isLiveIn(PhysReg))
- MBB->addLiveIn(PhysReg);
- DEBUG(dbgs() << "\tBB#" << MBB->getNumber() << ':'
- << PrintReg(SI.value()->reg, TRI));
- } else if (SI.start() > Stop)
- MBB = Indexes->getMBBFromIndex(SI.start().getPrevIndex());
- if (++MBB == MFE)
- break;
- tie(Start, Stop) = Indexes->getMBBRange(MBB);
- SI.advanceTo(Start);
- }
- DEBUG(dbgs() << '\n');
- }
-}
-
-
-//===----------------------------------------------------------------------===//
-// RABasic Implementation
-//===----------------------------------------------------------------------===//
-
// Driver for the register assignment and splitting heuristics.
// Manages iteration over the LiveIntervalUnions.
//
@@ -481,6 +235,10 @@ void RegAllocBase::addMBBLiveIns(MachineFunction *MF) {
// selectOrSplit().
unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &SplitVRegs) {
+ // Check for register mask interference. When live ranges cross calls, the
+ // set of usable registers is reduced to the callee-saved ones.
+ bool CrossRegMasks = LIS->checkRegMaskInterference(VirtReg, UsableRegs);
+
// Populate a list of physical register spill candidates.
SmallVector<unsigned, 8> PhysRegSpillCands;
@@ -491,6 +249,11 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
++I) {
unsigned PhysReg = *I;
+ // If PhysReg is clobbered by a register mask, it isn't useful for
+ // allocation or spilling.
+ if (CrossRegMasks && !UsableRegs.test(PhysReg))
+ continue;
+
// Check interference and as a side effect, intialize queries for this
// VirtReg and its aliases.
unsigned interfReg = checkPhysRegInterference(VirtReg, PhysReg);
@@ -498,9 +261,9 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
// Found an available register.
return PhysReg;
}
- Queries[interfReg].collectInterferingVRegs(1);
- LiveInterval *interferingVirtReg =
- Queries[interfReg].interferingVRegs().front();
+ LiveIntervalUnion::Query &IntfQ = query(VirtReg, interfReg);
+ IntfQ.collectInterferingVRegs(1);
+ LiveInterval *interferingVirtReg = IntfQ.interferingVRegs().front();
// The current VirtReg must either be spillable, or one of its interferences
// must have less spill weight.
@@ -524,7 +287,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
if (!VirtReg.isSpillable())
return ~0u;
- LiveRangeEdit LRE(VirtReg, SplitVRegs);
+ LiveRangeEdit LRE(VirtReg, SplitVRegs, *MF, *LIS, VRM);
spiller().spill(LRE);
// The live virtual register requesting allocation was spilled, so tell
@@ -579,7 +342,10 @@ bool RABasic::runOnMachineFunction(MachineFunction &mf) {
// Write out new DBG_VALUE instructions.
getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
- // The pass output is in VirtRegMap. Release all the transient data.
+ // All machine operands and other references to virtual registers have been
+ // replaced. Remove the virtual registers and release all the transient data.
+ VRM->clearAllVirt();
+ MRI->clearVirtRegs();
releaseMemory();
return true;
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index b36a445..e09b7f8 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -32,6 +32,7 @@
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
@@ -49,10 +50,7 @@ namespace {
public:
static char ID;
RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1),
- isBulkSpilling(false) {
- initializePHIEliminationPass(*PassRegistry::getPassRegistry());
- initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
- }
+ isBulkSpilling(false) {}
private:
const TargetMachine *TM;
MachineFunction *MF;
@@ -71,16 +69,20 @@ namespace {
// Everything we know about a live virtual register.
struct LiveReg {
MachineInstr *LastUse; // Last instr to use reg.
+ unsigned VirtReg; // Virtual register number.
unsigned PhysReg; // Currently held here.
unsigned short LastOpNum; // OpNum on LastUse.
bool Dirty; // Register needs spill.
- LiveReg(unsigned p=0) : LastUse(0), PhysReg(p), LastOpNum(0),
- Dirty(false) {}
+ explicit LiveReg(unsigned v)
+ : LastUse(0), VirtReg(v), PhysReg(0), LastOpNum(0), Dirty(false) {}
+
+ unsigned getSparseSetKey() const {
+ return TargetRegisterInfo::virtReg2Index(VirtReg);
+ }
};
- typedef DenseMap<unsigned, LiveReg> LiveRegMap;
- typedef LiveRegMap::value_type LiveRegEntry;
+ typedef SparseSet<LiveReg> LiveRegMap;
// LiveVirtRegs - This map contains entries for each virtual register
// that is currently available in a physical register.
@@ -137,8 +139,6 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- AU.addRequiredID(PHIEliminationID);
- AU.addRequiredID(TwoAddressInstructionPassID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -159,14 +159,23 @@ namespace {
void usePhysReg(MachineOperand&);
void definePhysReg(MachineInstr *MI, unsigned PhysReg, RegState NewState);
unsigned calcSpillCost(unsigned PhysReg) const;
- void assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg);
- void allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint);
+ void assignVirtToPhysReg(LiveReg&, unsigned PhysReg);
+ LiveRegMap::iterator findLiveVirtReg(unsigned VirtReg) {
+ return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
+ }
+ LiveRegMap::const_iterator findLiveVirtReg(unsigned VirtReg) const {
+ return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
+ }
+ LiveRegMap::iterator assignVirtToPhysReg(unsigned VReg, unsigned PhysReg);
+ LiveRegMap::iterator allocVirtReg(MachineInstr *MI, LiveRegMap::iterator,
+ unsigned Hint);
LiveRegMap::iterator defineVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint);
LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum,
unsigned VirtReg, unsigned Hint);
void spillAll(MachineInstr *MI);
bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
+ void addRetOperands(MachineBasicBlock *MBB);
};
char RAFast::ID = 0;
}
@@ -222,10 +231,10 @@ void RAFast::addKillFlag(const LiveReg &LR) {
/// killVirtReg - Mark virtreg as no longer available.
void RAFast::killVirtReg(LiveRegMap::iterator LRI) {
- addKillFlag(LRI->second);
- const LiveReg &LR = LRI->second;
- assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping");
- PhysRegState[LR.PhysReg] = regFree;
+ addKillFlag(*LRI);
+ assert(PhysRegState[LRI->PhysReg] == LRI->VirtReg &&
+ "Broken RegState mapping");
+ PhysRegState[LRI->PhysReg] = regFree;
// Erase from LiveVirtRegs unless we're spilling in bulk.
if (!isBulkSpilling)
LiveVirtRegs.erase(LRI);
@@ -235,7 +244,7 @@ void RAFast::killVirtReg(LiveRegMap::iterator LRI) {
void RAFast::killVirtReg(unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"killVirtReg needs a virtual register");
- LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg);
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
if (LRI != LiveVirtRegs.end())
killVirtReg(LRI);
}
@@ -245,7 +254,7 @@ void RAFast::killVirtReg(unsigned VirtReg) {
void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) {
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Spilling a physical register is illegal!");
- LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg);
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register");
spillVirtReg(MI, LRI);
}
@@ -253,18 +262,18 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) {
/// spillVirtReg - Do the actual work of spilling.
void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
LiveRegMap::iterator LRI) {
- LiveReg &LR = LRI->second;
- assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping");
+ LiveReg &LR = *LRI;
+ assert(PhysRegState[LR.PhysReg] == LRI->VirtReg && "Broken RegState mapping");
if (LR.Dirty) {
// If this physreg is used by the instruction, we want to kill it on the
// instruction, not on the spill.
bool SpillKill = LR.LastUse != MI;
LR.Dirty = false;
- DEBUG(dbgs() << "Spilling " << PrintReg(LRI->first, TRI)
+ DEBUG(dbgs() << "Spilling " << PrintReg(LRI->VirtReg, TRI)
<< " in " << PrintReg(LR.PhysReg, TRI));
- const TargetRegisterClass *RC = MRI->getRegClass(LRI->first);
- int FI = getStackSpaceFor(LRI->first, RC);
+ const TargetRegisterClass *RC = MRI->getRegClass(LRI->VirtReg);
+ int FI = getStackSpaceFor(LRI->VirtReg, RC);
DEBUG(dbgs() << " to stack slot #" << FI << "\n");
TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI);
++NumStores; // Update statistics
@@ -272,7 +281,8 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
// If this register is used by DBG_VALUE then insert new DBG_VALUE to
// identify spilled location as the place to find corresponding variable's
// value.
- SmallVector<MachineInstr *, 4> &LRIDbgValues = LiveDbgValueMap[LRI->first];
+ SmallVector<MachineInstr *, 4> &LRIDbgValues =
+ LiveDbgValueMap[LRI->VirtReg];
for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
MachineInstr *DBG = LRIDbgValues[li];
const MDNode *MDPtr =
@@ -295,8 +305,9 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
}
}
- // Now this register is spilled there is should not be any DBG_VALUE pointing
- // to this register because they are all pointing to spilled value now.
+ // Now this register is spilled there is should not be any DBG_VALUE
+ // pointing to this register because they are all pointing to spilled value
+ // now.
LRIDbgValues.clear();
if (SpillKill)
LR.LastUse = 0; // Don't kill register again
@@ -343,7 +354,7 @@ void RAFast::usePhysReg(MachineOperand &MO) {
}
// Maybe a superregister is reserved?
- for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
unsigned Alias = *AS; ++AS) {
switch (PhysRegState[Alias]) {
case regDisabled:
@@ -397,7 +408,7 @@ void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
// This is a disabled register, disable all aliases.
PhysRegState[PhysReg] = NewState;
- for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
unsigned Alias = *AS; ++AS) {
switch (unsigned VirtReg = PhysRegState[Alias]) {
case regDisabled:
@@ -435,14 +446,17 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
DEBUG(dbgs() << PrintReg(VirtReg, TRI) << " corresponding "
<< PrintReg(PhysReg, TRI) << " is reserved already.\n");
return spillImpossible;
- default:
- return LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean;
+ default: {
+ LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
+ assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
+ return I->Dirty ? spillDirty : spillClean;
+ }
}
// This is a disabled register, add up cost of aliases.
DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is disabled.\n");
unsigned Cost = 0;
- for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ for (const uint16_t *AS = TRI->getAliasSet(PhysReg);
unsigned Alias = *AS; ++AS) {
if (UsedInInstr.test(Alias))
return spillImpossible;
@@ -454,10 +468,13 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
break;
case regReserved:
return spillImpossible;
- default:
- Cost += LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean;
+ default: {
+ LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
+ assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
+ Cost += I->Dirty ? spillDirty : spillClean;
break;
}
+ }
}
return Cost;
}
@@ -467,17 +484,27 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
/// that PhysReg is the proper container for VirtReg now. The physical
/// register must not be used for anything else when this is called.
///
-void RAFast::assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg) {
- DEBUG(dbgs() << "Assigning " << PrintReg(LRE.first, TRI) << " to "
+void RAFast::assignVirtToPhysReg(LiveReg &LR, unsigned PhysReg) {
+ DEBUG(dbgs() << "Assigning " << PrintReg(LR.VirtReg, TRI) << " to "
<< PrintReg(PhysReg, TRI) << "\n");
- PhysRegState[PhysReg] = LRE.first;
- assert(!LRE.second.PhysReg && "Already assigned a physreg");
- LRE.second.PhysReg = PhysReg;
+ PhysRegState[PhysReg] = LR.VirtReg;
+ assert(!LR.PhysReg && "Already assigned a physreg");
+ LR.PhysReg = PhysReg;
+}
+
+RAFast::LiveRegMap::iterator
+RAFast::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) {
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && "VirtReg disappeared");
+ assignVirtToPhysReg(*LRI, PhysReg);
+ return LRI;
}
/// allocVirtReg - Allocate a physical register for VirtReg.
-void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
- const unsigned VirtReg = LRE.first;
+RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI,
+ LiveRegMap::iterator LRI,
+ unsigned Hint) {
+ const unsigned VirtReg = LRI->VirtReg;
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
"Can only allocate virtual registers");
@@ -496,7 +523,9 @@ void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
if (Cost < spillDirty) {
if (Cost)
definePhysReg(MI, Hint, regFree);
- return assignVirtToPhysReg(LRE, Hint);
+ // definePhysReg may kill virtual registers and modify LiveVirtRegs.
+ // That invalidates LRI, so run a new lookup for VirtReg.
+ return assignVirtToPhysReg(VirtReg, Hint);
}
}
@@ -505,8 +534,10 @@ void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
// First try to find a completely free register.
for (ArrayRef<unsigned>::iterator I = AO.begin(), E = AO.end(); I != E; ++I) {
unsigned PhysReg = *I;
- if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg))
- return assignVirtToPhysReg(LRE, PhysReg);
+ if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg)) {
+ assignVirtToPhysReg(*LRI, PhysReg);
+ return LRI;
+ }
}
DEBUG(dbgs() << "Allocating " << PrintReg(VirtReg) << " from "
@@ -519,21 +550,25 @@ void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
DEBUG(dbgs() << "\tCost: " << Cost << "\n");
DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
// Cost is 0 when all aliases are already disabled.
- if (Cost == 0)
- return assignVirtToPhysReg(LRE, *I);
+ if (Cost == 0) {
+ assignVirtToPhysReg(*LRI, *I);
+ return LRI;
+ }
if (Cost < BestCost)
BestReg = *I, BestCost = Cost;
}
if (BestReg) {
definePhysReg(MI, BestReg, regFree);
- return assignVirtToPhysReg(LRE, BestReg);
+ // definePhysReg may kill virtual registers and modify LiveVirtRegs.
+ // That invalidates LRI, so run a new lookup for VirtReg.
+ return assignVirtToPhysReg(VirtReg, BestReg);
}
// Nothing we can do. Report an error and keep going with a bad allocation.
MI->emitError("ran out of registers during register allocation");
definePhysReg(MI, *AO.begin(), regFree);
- assignVirtToPhysReg(LRE, *AO.begin());
+ return assignVirtToPhysReg(VirtReg, *AO.begin());
}
/// defineVirtReg - Allocate a register for VirtReg and mark it as dirty.
@@ -544,8 +579,7 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
- tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
- LiveReg &LR = LRI->second;
+ tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
if (New) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
@@ -555,18 +589,18 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
if (UseMI.isCopyLike())
Hint = UseMI.getOperand(0).getReg();
}
- allocVirtReg(MI, *LRI, Hint);
- } else if (LR.LastUse) {
+ LRI = allocVirtReg(MI, LRI, Hint);
+ } else if (LRI->LastUse) {
// Redefining a live register - kill at the last use, unless it is this
// instruction defining VirtReg multiple times.
- if (LR.LastUse != MI || LR.LastUse->getOperand(LR.LastOpNum).isUse())
- addKillFlag(LR);
+ if (LRI->LastUse != MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse())
+ addKillFlag(*LRI);
}
- assert(LR.PhysReg && "Register not assigned");
- LR.LastUse = MI;
- LR.LastOpNum = OpNum;
- LR.Dirty = true;
- UsedInInstr.set(LR.PhysReg);
+ assert(LRI->PhysReg && "Register not assigned");
+ LRI->LastUse = MI;
+ LRI->LastOpNum = OpNum;
+ LRI->Dirty = true;
+ UsedInInstr.set(LRI->PhysReg);
return LRI;
}
@@ -578,18 +612,17 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
"Not a virtual register");
LiveRegMap::iterator LRI;
bool New;
- tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
- LiveReg &LR = LRI->second;
+ tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
MachineOperand &MO = MI->getOperand(OpNum);
if (New) {
- allocVirtReg(MI, *LRI, Hint);
+ LRI = allocVirtReg(MI, LRI, Hint);
const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
int FrameIndex = getStackSpaceFor(VirtReg, RC);
DEBUG(dbgs() << "Reloading " << PrintReg(VirtReg, TRI) << " into "
- << PrintReg(LR.PhysReg, TRI) << "\n");
- TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FrameIndex, RC, TRI);
+ << PrintReg(LRI->PhysReg, TRI) << "\n");
+ TII->loadRegFromStackSlot(*MBB, MI, LRI->PhysReg, FrameIndex, RC, TRI);
++NumLoads;
- } else if (LR.Dirty) {
+ } else if (LRI->Dirty) {
if (isLastUseOfLocalReg(MO)) {
DEBUG(dbgs() << "Killing last use: " << MO << "\n");
if (MO.isUse())
@@ -614,10 +647,10 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
MO.setIsDead(false);
}
- assert(LR.PhysReg && "Register not assigned");
- LR.LastUse = MI;
- LR.LastOpNum = OpNum;
- UsedInInstr.set(LR.PhysReg);
+ assert(LRI->PhysReg && "Register not assigned");
+ LRI->LastUse = MI;
+ LRI->LastOpNum = OpNum;
+ UsedInInstr.set(LRI->PhysReg);
return LRI;
}
@@ -674,7 +707,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
UsedInInstr.set(Reg);
if (ThroughRegs.count(PhysRegState[Reg]))
definePhysReg(MI, Reg, regFree);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
UsedInInstr.set(*AS);
if (ThroughRegs.count(PhysRegState[*AS]))
definePhysReg(MI, *AS, regFree);
@@ -682,7 +715,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
}
SmallVector<unsigned, 8> PartialDefs;
- DEBUG(dbgs() << "Allocating tied uses and early clobbers.\n");
+ DEBUG(dbgs() << "Allocating tied uses.\n");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
@@ -694,7 +727,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
<< DefIdx << ".\n");
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
- unsigned PhysReg = LRI->second.PhysReg;
+ unsigned PhysReg = LRI->PhysReg;
setPhysReg(MI, i, PhysReg);
// Note: we don't update the def operand yet. That would cause the normal
// def-scan to attempt spilling.
@@ -703,16 +736,25 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
// Reload the register, but don't assign to the operand just yet.
// That would confuse the later phys-def processing pass.
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
- PartialDefs.push_back(LRI->second.PhysReg);
- } else if (MO.isEarlyClobber()) {
- // Note: defineVirtReg may invalidate MO.
- LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
- unsigned PhysReg = LRI->second.PhysReg;
- if (setPhysReg(MI, i, PhysReg))
- VirtDead.push_back(Reg);
+ PartialDefs.push_back(LRI->PhysReg);
}
}
+ DEBUG(dbgs() << "Allocating early clobbers.\n");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
+ if (!MO.isEarlyClobber())
+ continue;
+ // Note: defineVirtReg may invalidate MO.
+ LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
+ unsigned PhysReg = LRI->PhysReg;
+ if (setPhysReg(MI, i, PhysReg))
+ VirtDead.push_back(Reg);
+ }
+
// Restore UsedInInstr to a state usable for allocating normal virtual uses.
UsedInInstr.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -730,32 +772,66 @@ void RAFast::handleThroughOperands(MachineInstr *MI,
UsedInInstr.set(PartialDefs[i]);
}
-void RAFast::AllocateBasicBlock() {
- DEBUG(dbgs() << "\nAllocating " << *MBB);
+/// addRetOperand - ensure that a return instruction has an operand for each
+/// value live out of the function.
+///
+/// Things marked both call and return are tail calls; do not do this for them.
+/// The tail callee need not take the same registers as input that it produces
+/// as output, and there are dependencies for its input registers elsewhere.
+///
+/// FIXME: This should be done as part of instruction selection, and this helper
+/// should be deleted. Until then, we use custom logic here to create the proper
+/// operand under all circumstances. We can't use addRegisterKilled because that
+/// doesn't make sense for undefined values. We can't simply avoid calling it
+/// for undefined values, because we must ensure that the operand always exists.
+void RAFast::addRetOperands(MachineBasicBlock *MBB) {
+ if (MBB->empty() || !MBB->back().isReturn() || MBB->back().isCall())
+ return;
+
+ MachineInstr *MI = &MBB->back();
+
+ for (MachineRegisterInfo::liveout_iterator
+ I = MBB->getParent()->getRegInfo().liveout_begin(),
+ E = MBB->getParent()->getRegInfo().liveout_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ "Cannot have a live-out virtual register.");
+
+ bool hasDef = PhysRegState[Reg] == regReserved;
+
+ // Check if this register already has an operand.
+ bool Found = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse())
+ continue;
+
+ unsigned OperReg = MO.getReg();
+ if (!TargetRegisterInfo::isPhysicalRegister(OperReg))
+ continue;
- // FIXME: This should probably be added by instruction selection instead?
- // If the last instruction in the block is a return, make sure to mark it as
- // using all of the live-out values in the function. Things marked both call
- // and return are tail calls; do not do this for them. The tail callee need
- // not take the same registers as input that it produces as output, and there
- // are dependencies for its input registers elsewhere.
- if (!MBB->empty() && MBB->back().getDesc().isReturn() &&
- !MBB->back().getDesc().isCall()) {
- MachineInstr *Ret = &MBB->back();
-
- for (MachineRegisterInfo::liveout_iterator
- I = MF->getRegInfo().liveout_begin(),
- E = MF->getRegInfo().liveout_end(); I != E; ++I) {
- assert(TargetRegisterInfo::isPhysicalRegister(*I) &&
- "Cannot have a live-out virtual register.");
-
- // Add live-out registers as implicit uses.
- Ret->addRegisterKilled(*I, TRI, true);
+ if (OperReg == Reg || TRI->isSuperRegister(OperReg, Reg)) {
+ // If the ret already has an operand for this physreg or a superset,
+ // don't duplicate it. Set the kill flag if the value is defined.
+ if (hasDef && !MO.isKill())
+ MO.setIsKill();
+ Found = true;
+ break;
+ }
}
+ if (!Found)
+ MI->addOperand(MachineOperand::CreateReg(Reg,
+ false /*IsDef*/,
+ true /*IsImp*/,
+ hasDef/*IsKill*/));
}
+}
+
+void RAFast::AllocateBasicBlock() {
+ DEBUG(dbgs() << "\nAllocating " << *MBB);
PhysRegState.assign(TRI->getNumRegs(), regDisabled);
- assert(LiveVirtRegs.empty() && "Mapping not cleared form last block?");
+ assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
MachineBasicBlock::iterator MII = MBB->begin();
@@ -783,25 +859,26 @@ void RAFast::AllocateBasicBlock() {
case regReserved:
dbgs() << "*";
break;
- default:
+ default: {
dbgs() << '=' << PrintReg(PhysRegState[Reg]);
- if (LiveVirtRegs[PhysRegState[Reg]].Dirty)
+ LiveRegMap::iterator I = findLiveVirtReg(PhysRegState[Reg]);
+ assert(I != LiveVirtRegs.end() && "Missing VirtReg entry");
+ if (I->Dirty)
dbgs() << "*";
- assert(LiveVirtRegs[PhysRegState[Reg]].PhysReg == Reg &&
- "Bad inverse map");
+ assert(I->PhysReg == Reg && "Bad inverse map");
break;
}
+ }
}
dbgs() << '\n';
// Check that LiveVirtRegs is the inverse.
for (LiveRegMap::iterator i = LiveVirtRegs.begin(),
e = LiveVirtRegs.end(); i != e; ++i) {
- assert(TargetRegisterInfo::isVirtualRegister(i->first) &&
+ assert(TargetRegisterInfo::isVirtualRegister(i->VirtReg) &&
"Bad map key");
- assert(TargetRegisterInfo::isPhysicalRegister(i->second.PhysReg) &&
+ assert(TargetRegisterInfo::isPhysicalRegister(i->PhysReg) &&
"Bad map value");
- assert(PhysRegState[i->second.PhysReg] == i->first &&
- "Bad inverse map");
+ assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map");
}
});
@@ -815,10 +892,9 @@ void RAFast::AllocateBasicBlock() {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
- LiveDbgValueMap[Reg].push_back(MI);
- LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
+ LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
if (LRI != LiveVirtRegs.end())
- setPhysReg(MI, i, LRI->second.PhysReg);
+ setPhysReg(MI, i, LRI->PhysReg);
else {
int SS = StackSlotForVirtReg[Reg];
if (SS == -1) {
@@ -849,6 +925,7 @@ void RAFast::AllocateBasicBlock() {
}
}
}
+ LiveDbgValueMap[Reg].push_back(MI);
}
}
// Next instruction.
@@ -932,7 +1009,7 @@ void RAFast::AllocateBasicBlock() {
if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
if (MO.isUse()) {
LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, CopyDst);
- unsigned PhysReg = LRI->second.PhysReg;
+ unsigned PhysReg = LRI->PhysReg;
CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0;
if (setPhysReg(MI, i, PhysReg))
killVirtReg(LRI);
@@ -953,13 +1030,13 @@ void RAFast::AllocateBasicBlock() {
// Look for physreg defs and tied uses.
if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue;
UsedInInstr.set(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(Reg); *AS; ++AS)
UsedInInstr.set(*AS);
}
}
unsigned DefOpEnd = MI->getNumOperands();
- if (MCID.isCall()) {
+ if (MI->isCall()) {
// Spill all virtregs before a call. This serves two purposes: 1. If an
// exception is thrown, the landing pad is going to expect to find
// registers in their spill slots, and 2. we don't have to wade through
@@ -988,7 +1065,7 @@ void RAFast::AllocateBasicBlock() {
continue;
}
LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, CopySrc);
- unsigned PhysReg = LRI->second.PhysReg;
+ unsigned PhysReg = LRI->PhysReg;
if (setPhysReg(MI, i, PhysReg)) {
VirtDead.push_back(Reg);
CopyDst = 0; // cancel coalescing;
@@ -1024,6 +1101,9 @@ void RAFast::AllocateBasicBlock() {
MBB->erase(Coalesced[i]);
NumCopies += Coalesced.size();
+ // addRetOperands must run after we've seen all defs in this block.
+ addRetOperands(MBB);
+
DEBUG(MBB->dump());
}
@@ -1038,12 +1118,16 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
TM = &Fn.getTarget();
TRI = TM->getRegisterInfo();
TII = TM->getInstrInfo();
+ MRI->freezeReservedRegs(Fn);
RegClassInfo.runOnMachineFunction(Fn);
UsedInInstr.resize(TRI->getNumRegs());
+ assert(!MRI->isSSA() && "regalloc requires leaving SSA");
+
// initialize the virtual->physical register map to have a 'null'
// mapping for all virtual registers
StackSlotForVirtReg.resize(MRI->getNumVirtRegs());
+ LiveVirtRegs.setUniverse(MRI->getNumVirtRegs());
// Loop over all of the basic blocks, eliminating virtual register references
for (MachineFunction::iterator MBBi = Fn.begin(), MBBe = Fn.end();
@@ -1052,16 +1136,17 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
AllocateBasicBlock();
}
- // Make sure the set of used physregs is closed under subreg operations.
- MRI->closePhysRegsUsed(*TRI);
-
// Add the clobber lists for all the instructions we skipped earlier.
for (SmallPtrSet<const MCInstrDesc*, 4>::const_iterator
I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I)
- if (const unsigned *Defs = (*I)->getImplicitDefs())
+ if (const uint16_t *Defs = (*I)->getImplicitDefs())
while (*Defs)
MRI->setPhysRegUsed(*Defs++);
+ // All machine operands and other references to virtual registers have been
+ // replaced. Remove the virtual registers.
+ MRI->clearVirtRegs();
+
SkippedInstrs.clear();
StackSlotForVirtReg.clear();
LiveDbgValueMap.clear();
diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
index f54a2c8..3f2a617 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -16,7 +16,6 @@
#include "AllocationOrder.h"
#include "InterferenceCache.h"
#include "LiveDebugVariables.h"
-#include "LiveRangeEdit.h"
#include "RegAllocBase.h"
#include "Spiller.h"
#include "SpillPlacement.h"
@@ -29,6 +28,7 @@
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/EdgeBundles.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -168,6 +168,19 @@ class RAGreedy : public MachineFunctionPass,
}
};
+ // Register mask interference. The current VirtReg is checked for register
+ // mask interference on entry to selectOrSplit(). If there is no
+ // interference, UsableRegs is left empty. If there is interference,
+ // UsableRegs has a bit mask of registers that can be used without register
+ // mask interference.
+ BitVector UsableRegs;
+
+ /// clobberedByRegMask - Returns true if PhysReg is not directly usable
+ /// because of register mask clobbers.
+ bool clobberedByRegMask(unsigned PhysReg) const {
+ return !UsableRegs.empty() && !UsableRegs.test(PhysReg);
+ }
+
// splitting state.
std::auto_ptr<SplitAnalysis> SA;
std::auto_ptr<SplitEditor> SE;
@@ -248,7 +261,6 @@ public:
static char ID;
private:
- void LRE_WillEraseInstruction(MachineInstr*);
bool LRE_CanEraseVirtReg(unsigned);
void LRE_WillShrinkVirtReg(unsigned);
void LRE_DidCloneVirtReg(unsigned, unsigned);
@@ -308,8 +320,8 @@ RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
- initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
+ initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
@@ -328,9 +340,6 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveDebugVariables>();
AU.addPreserved<LiveDebugVariables>();
- if (StrongPHIElim)
- AU.addRequiredID(StrongPHIEliminationID);
- AU.addRequiredTransitiveID(RegisterCoalescerPassID);
AU.addRequired<CalculateSpillWeights>();
AU.addRequired<LiveStacks>();
AU.addPreserved<LiveStacks>();
@@ -350,11 +359,6 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
// LiveRangeEdit delegate methods
//===----------------------------------------------------------------------===//
-void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
- // LRE itself will remove from SlotIndexes and parent basic block.
- VRM->RemoveMachineInstrFromMaps(MI);
-}
-
bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
unassign(LIS->getInterval(VirtReg), PhysReg);
@@ -424,13 +428,13 @@ void RAGreedy::enqueue(LiveInterval *LI) {
Prio |= (1u << 30);
}
- Queue.push(std::make_pair(Prio, Reg));
+ Queue.push(std::make_pair(Prio, ~Reg));
}
LiveInterval *RAGreedy::dequeue() {
if (Queue.empty())
return 0;
- LiveInterval *LI = &LIS->getInterval(Queue.top().second);
+ LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
Queue.pop();
return LI;
}
@@ -446,9 +450,12 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
Order.rewind();
unsigned PhysReg;
- while ((PhysReg = Order.next()))
+ while ((PhysReg = Order.next())) {
+ if (clobberedByRegMask(PhysReg))
+ continue;
if (!checkPhysRegInterference(VirtReg, PhysReg))
break;
+ }
if (!PhysReg || Order.isHint(PhysReg))
return PhysReg;
@@ -457,7 +464,7 @@ unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
// If we missed a simple hint, try to cheaply evict interference from the
// preferred register.
if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
- if (Order.isHint(Hint)) {
+ if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) {
DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
EvictionCost MaxCost(1);
if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
@@ -532,7 +539,7 @@ bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
Cascade = NextCascade;
EvictionCost Cost;
- for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
+ for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
// If there is 10 or more interferences, chances are one is heavier.
if (Q.collectInterferingVRegs(10) >= 10)
@@ -590,7 +597,7 @@ void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
<< " interference: Cascade " << Cascade << '\n');
- for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
+ for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
@@ -629,6 +636,8 @@ unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
Order.rewind();
while (unsigned PhysReg = Order.next()) {
+ if (clobberedByRegMask(PhysReg))
+ continue;
if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
continue;
// The first use of a callee-saved register in a function has cost 1.
@@ -1118,6 +1127,8 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
}
--NumCands;
GlobalCand[Worst] = GlobalCand[NumCands];
+ if (BestCand == NumCands)
+ BestCand = Worst;
}
if (GlobalCand.size() <= NumCands)
@@ -1172,7 +1183,7 @@ unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
return 0;
// Prepare split editor.
- LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
// Assign all edge bundles to the preferred candidate, or NoCand.
@@ -1220,7 +1231,7 @@ unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
unsigned Reg = VirtReg.reg;
bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
- LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit, SplitSpillMode);
ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
for (unsigned i = 0; i != UseBlocks.size(); ++i) {
@@ -1268,7 +1279,7 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
SmallVectorImpl<float> &GapWeight) {
assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
- const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ ArrayRef<SlotIndex> Uses = SA->getUseSlots();
const unsigned NumGaps = Uses.size()-1;
// Start and end points for the interference check.
@@ -1280,7 +1291,7 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
GapWeight.assign(NumGaps, 0.0f);
// Add interference from each overlapping register.
- for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
+ for (const uint16_t *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
.checkInterference())
continue;
@@ -1292,7 +1303,7 @@ void RAGreedy::calcGapWeights(unsigned PhysReg,
// surrounding the instruction. The exception is interference before
// StartIdx and after StopIdx.
//
- LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
+ LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx);
for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
// Skip the gaps before IntI.
while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
@@ -1329,7 +1340,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// that the interval is continuous from FirstInstr to LastInstr. We should
// make sure that we don't do anything illegal to such an interval, though.
- const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
+ ArrayRef<SlotIndex> Uses = SA->getUseSlots();
if (Uses.size() <= 2)
return 0;
const unsigned NumGaps = Uses.size()-1;
@@ -1337,10 +1348,40 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
DEBUG({
dbgs() << "tryLocalSplit: ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
- dbgs() << ' ' << SA->UseSlots[i];
+ dbgs() << ' ' << Uses[i];
dbgs() << '\n';
});
+ // If VirtReg is live across any register mask operands, compute a list of
+ // gaps with register masks.
+ SmallVector<unsigned, 8> RegMaskGaps;
+ if (!UsableRegs.empty()) {
+ // Get regmask slots for the whole block.
+ ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
+ DEBUG(dbgs() << RMS.size() << " regmasks in block:");
+ // Constrain to VirtReg's live range.
+ unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
+ Uses.front().getRegSlot()) - RMS.begin();
+ unsigned re = RMS.size();
+ for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
+ // Look for Uses[i] <= RMS <= Uses[i+1].
+ assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
+ if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
+ continue;
+ // Skip a regmask on the same instruction as the last use. It doesn't
+ // overlap the live range.
+ if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
+ break;
+ DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
+ RegMaskGaps.push_back(i);
+ // Advance ri to the next gap. A regmask on one of the uses counts in
+ // both gaps.
+ while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
+ ++ri;
+ }
+ DEBUG(dbgs() << '\n');
+ }
+
// Since we allow local split results to be split again, there is a risk of
// creating infinite loops. It is tempting to require that the new live
// ranges have less instructions than the original. That would guarantee
@@ -1375,6 +1416,11 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
// order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
calcGapWeights(PhysReg, GapWeight);
+ // Remove any gaps with regmask clobbers.
+ if (clobberedByRegMask(PhysReg))
+ for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
+ GapWeight[RegMaskGaps[i]] = HUGE_VALF;
+
// Try to find the best sequence of gaps to close.
// The new spill weight must be larger than any gap interference.
@@ -1466,7 +1512,7 @@ unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
<< '-' << Uses[BestAfter] << ", " << BestDiff
<< ", " << (BestAfter - BestBefore + 1) << " instrs\n");
- LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
+ LiveRangeEdit LREdit(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
SE->reset(LREdit);
SE->openIntv();
@@ -1553,6 +1599,11 @@ unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<LiveInterval*> &NewVRegs) {
+ // Check if VirtReg is live across any calls.
+ UsableRegs.clear();
+ if (LIS->checkRegMaskInterference(VirtReg, UsableRegs))
+ DEBUG(dbgs() << "Live across regmasks.\n");
+
// First try assigning a free register.
AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
@@ -1593,7 +1644,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
// Finally spill VirtReg itself.
NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
- LiveRangeEdit LRE(VirtReg, NewVRegs, this);
+ LiveRangeEdit LRE(VirtReg, NewVRegs, *MF, *LIS, VRM, this);
spiller().spill(LRE);
setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
@@ -1628,7 +1679,7 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
ExtraRegInfo.clear();
ExtraRegInfo.resize(MRI->getNumVirtRegs());
NextCascade = 1;
- IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
+ IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI);
GlobalCand.resize(32); // This will grow as needed.
allocatePhysRegs();
@@ -1647,7 +1698,10 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
DebugVars->emitDebugValues(VRM);
}
- // The pass output is in VirtRegMap. Release all the transient data.
+ // All machine operands and other references to virtual registers have been
+ // replaced. Remove the virtual registers and release all the transient data.
+ VRM->clearAllVirt();
+ MRI->clearVirtRegs();
releaseMemory();
return true;
diff --git a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp b/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
deleted file mode 100644
index ce3fb90..0000000
--- a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
+++ /dev/null
@@ -1,1543 +0,0 @@
-//===-- RegAllocLinearScan.cpp - Linear Scan register allocator -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements a linear scan register allocator.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "regalloc"
-#include "LiveDebugVariables.h"
-#include "LiveRangeEdit.h"
-#include "VirtRegMap.h"
-#include "VirtRegRewriter.h"
-#include "RegisterClassInfo.h"
-#include "Spiller.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/CalcSpillWeights.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/ADT/EquivalenceClasses.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <queue>
-#include <memory>
-#include <cmath>
-
-using namespace llvm;
-
-STATISTIC(NumIters , "Number of iterations performed");
-STATISTIC(NumBacktracks, "Number of times we had to backtrack");
-STATISTIC(NumCoalesce, "Number of copies coalesced");
-STATISTIC(NumDowngrade, "Number of registers downgraded");
-
-static cl::opt<bool>
-NewHeuristic("new-spilling-heuristic",
- cl::desc("Use new spilling heuristic"),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-TrivCoalesceEnds("trivial-coalesce-ends",
- cl::desc("Attempt trivial coalescing of interval ends"),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-AvoidWAWHazard("avoid-waw-hazard",
- cl::desc("Avoid write-write hazards for some register classes"),
- cl::init(false), cl::Hidden);
-
-static RegisterRegAlloc
-linearscanRegAlloc("linearscan", "linear scan register allocator",
- createLinearScanRegisterAllocator);
-
-namespace {
- // When we allocate a register, add it to a fixed-size queue of
- // registers to skip in subsequent allocations. This trades a small
- // amount of register pressure and increased spills for flexibility in
- // the post-pass scheduler.
- //
- // Note that in a the number of registers used for reloading spills
- // will be one greater than the value of this option.
- //
- // One big limitation of this is that it doesn't differentiate between
- // different register classes. So on x86-64, if there is xmm register
- // pressure, it can caused fewer GPRs to be held in the queue.
- static cl::opt<unsigned>
- NumRecentlyUsedRegs("linearscan-skip-count",
- cl::desc("Number of registers for linearscan to remember"
- "to skip."),
- cl::init(0),
- cl::Hidden);
-
- struct RALinScan : public MachineFunctionPass {
- static char ID;
- RALinScan() : MachineFunctionPass(ID) {
- initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
- initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
- initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
- initializeRegisterCoalescerPass(
- *PassRegistry::getPassRegistry());
- initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
- initializeLiveStacksPass(*PassRegistry::getPassRegistry());
- initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
- initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
- initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
- initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
-
- // Initialize the queue to record recently-used registers.
- if (NumRecentlyUsedRegs > 0)
- RecentRegs.resize(NumRecentlyUsedRegs, 0);
- RecentNext = RecentRegs.begin();
- avoidWAW_ = 0;
- }
-
- typedef std::pair<LiveInterval*, LiveInterval::iterator> IntervalPtr;
- typedef SmallVector<IntervalPtr, 32> IntervalPtrs;
- private:
- /// RelatedRegClasses - This structure is built the first time a function is
- /// compiled, and keeps track of which register classes have registers that
- /// belong to multiple classes or have aliases that are in other classes.
- EquivalenceClasses<const TargetRegisterClass*> RelatedRegClasses;
- DenseMap<unsigned, const TargetRegisterClass*> OneClassForEachPhysReg;
-
- // NextReloadMap - For each register in the map, it maps to the another
- // register which is defined by a reload from the same stack slot and
- // both reloads are in the same basic block.
- DenseMap<unsigned, unsigned> NextReloadMap;
-
- // DowngradedRegs - A set of registers which are being "downgraded", i.e.
- // un-favored for allocation.
- SmallSet<unsigned, 8> DowngradedRegs;
-
- // DowngradeMap - A map from virtual registers to physical registers being
- // downgraded for the virtual registers.
- DenseMap<unsigned, unsigned> DowngradeMap;
-
- MachineFunction* mf_;
- MachineRegisterInfo* mri_;
- const TargetMachine* tm_;
- const TargetRegisterInfo* tri_;
- const TargetInstrInfo* tii_;
- BitVector allocatableRegs_;
- BitVector reservedRegs_;
- LiveIntervals* li_;
- MachineLoopInfo *loopInfo;
- RegisterClassInfo RegClassInfo;
-
- /// handled_ - Intervals are added to the handled_ set in the order of their
- /// start value. This is uses for backtracking.
- std::vector<LiveInterval*> handled_;
-
- /// fixed_ - Intervals that correspond to machine registers.
- ///
- IntervalPtrs fixed_;
-
- /// active_ - Intervals that are currently being processed, and which have a
- /// live range active for the current point.
- IntervalPtrs active_;
-
- /// inactive_ - Intervals that are currently being processed, but which have
- /// a hold at the current point.
- IntervalPtrs inactive_;
-
- typedef std::priority_queue<LiveInterval*,
- SmallVector<LiveInterval*, 64>,
- greater_ptr<LiveInterval> > IntervalHeap;
- IntervalHeap unhandled_;
-
- /// regUse_ - Tracks register usage.
- SmallVector<unsigned, 32> regUse_;
- SmallVector<unsigned, 32> regUseBackUp_;
-
- /// vrm_ - Tracks register assignments.
- VirtRegMap* vrm_;
-
- std::auto_ptr<VirtRegRewriter> rewriter_;
-
- std::auto_ptr<Spiller> spiller_;
-
- // The queue of recently-used registers.
- SmallVector<unsigned, 4> RecentRegs;
- SmallVector<unsigned, 4>::iterator RecentNext;
-
- // Last write-after-write register written.
- unsigned avoidWAW_;
-
- // Record that we just picked this register.
- void recordRecentlyUsed(unsigned reg) {
- assert(reg != 0 && "Recently used register is NOREG!");
- if (!RecentRegs.empty()) {
- *RecentNext++ = reg;
- if (RecentNext == RecentRegs.end())
- RecentNext = RecentRegs.begin();
- }
- }
-
- public:
- virtual const char* getPassName() const {
- return "Linear Scan Register Allocator";
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<AliasAnalysis>();
- AU.addPreserved<AliasAnalysis>();
- AU.addRequired<LiveIntervals>();
- AU.addPreserved<SlotIndexes>();
- if (StrongPHIElim)
- AU.addRequiredID(StrongPHIEliminationID);
- // Make sure PassManager knows which analyses to make available
- // to coalescing and which analyses coalescing invalidates.
- AU.addRequiredTransitiveID(RegisterCoalescerPassID);
- AU.addRequired<CalculateSpillWeights>();
- AU.addRequiredID(LiveStacksID);
- AU.addPreservedID(LiveStacksID);
- AU.addRequired<MachineLoopInfo>();
- AU.addPreserved<MachineLoopInfo>();
- AU.addRequired<VirtRegMap>();
- AU.addPreserved<VirtRegMap>();
- AU.addRequired<LiveDebugVariables>();
- AU.addPreserved<LiveDebugVariables>();
- AU.addRequiredID(MachineDominatorsID);
- AU.addPreservedID(MachineDominatorsID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- /// runOnMachineFunction - register allocate the whole function
- bool runOnMachineFunction(MachineFunction&);
-
- // Determine if we skip this register due to its being recently used.
- bool isRecentlyUsed(unsigned reg) const {
- return reg == avoidWAW_ ||
- std::find(RecentRegs.begin(), RecentRegs.end(), reg) != RecentRegs.end();
- }
-
- private:
- /// linearScan - the linear scan algorithm
- void linearScan();
-
- /// initIntervalSets - initialize the interval sets.
- ///
- void initIntervalSets();
-
- /// processActiveIntervals - expire old intervals and move non-overlapping
- /// ones to the inactive list.
- void processActiveIntervals(SlotIndex CurPoint);
-
- /// processInactiveIntervals - expire old intervals and move overlapping
- /// ones to the active list.
- void processInactiveIntervals(SlotIndex CurPoint);
-
- /// hasNextReloadInterval - Return the next liveinterval that's being
- /// defined by a reload from the same SS as the specified one.
- LiveInterval *hasNextReloadInterval(LiveInterval *cur);
-
- /// DowngradeRegister - Downgrade a register for allocation.
- void DowngradeRegister(LiveInterval *li, unsigned Reg);
-
- /// UpgradeRegister - Upgrade a register for allocation.
- void UpgradeRegister(unsigned Reg);
-
- /// assignRegOrStackSlotAtInterval - assign a register if one
- /// is available, or spill.
- void assignRegOrStackSlotAtInterval(LiveInterval* cur);
-
- void updateSpillWeights(std::vector<float> &Weights,
- unsigned reg, float weight,
- const TargetRegisterClass *RC);
-
- /// findIntervalsToSpill - Determine the intervals to spill for the
- /// specified interval. It's passed the physical registers whose spill
- /// weight is the lowest among all the registers whose live intervals
- /// conflict with the interval.
- void findIntervalsToSpill(LiveInterval *cur,
- std::vector<std::pair<unsigned,float> > &Candidates,
- unsigned NumCands,
- SmallVector<LiveInterval*, 8> &SpillIntervals);
-
- /// attemptTrivialCoalescing - If a simple interval is defined by a copy,
- /// try to allocate the definition to the same register as the source,
- /// if the register is not defined during the life time of the interval.
- /// This eliminates a copy, and is used to coalesce copies which were not
- /// coalesced away before allocation either due to dest and src being in
- /// different register classes or because the coalescer was overly
- /// conservative.
- unsigned attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg);
-
- ///
- /// Register usage / availability tracking helpers.
- ///
-
- void initRegUses() {
- regUse_.resize(tri_->getNumRegs(), 0);
- regUseBackUp_.resize(tri_->getNumRegs(), 0);
- }
-
- void finalizeRegUses() {
-#ifndef NDEBUG
- // Verify all the registers are "freed".
- bool Error = false;
- for (unsigned i = 0, e = tri_->getNumRegs(); i != e; ++i) {
- if (regUse_[i] != 0) {
- dbgs() << tri_->getName(i) << " is still in use!\n";
- Error = true;
- }
- }
- if (Error)
- llvm_unreachable(0);
-#endif
- regUse_.clear();
- regUseBackUp_.clear();
- }
-
- void addRegUse(unsigned physReg) {
- assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
- "should be physical register!");
- ++regUse_[physReg];
- for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as)
- ++regUse_[*as];
- }
-
- void delRegUse(unsigned physReg) {
- assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
- "should be physical register!");
- assert(regUse_[physReg] != 0);
- --regUse_[physReg];
- for (const unsigned* as = tri_->getAliasSet(physReg); *as; ++as) {
- assert(regUse_[*as] != 0);
- --regUse_[*as];
- }
- }
-
- bool isRegAvail(unsigned physReg) const {
- assert(TargetRegisterInfo::isPhysicalRegister(physReg) &&
- "should be physical register!");
- return regUse_[physReg] == 0;
- }
-
- void backUpRegUses() {
- regUseBackUp_ = regUse_;
- }
-
- void restoreRegUses() {
- regUse_ = regUseBackUp_;
- }
-
- ///
- /// Register handling helpers.
- ///
-
- /// getFreePhysReg - return a free physical register for this virtual
- /// register interval if we have one, otherwise return 0.
- unsigned getFreePhysReg(LiveInterval* cur);
- unsigned getFreePhysReg(LiveInterval* cur,
- const TargetRegisterClass *RC,
- unsigned MaxInactiveCount,
- SmallVector<unsigned, 256> &inactiveCounts,
- bool SkipDGRegs);
-
- /// getFirstNonReservedPhysReg - return the first non-reserved physical
- /// register in the register class.
- unsigned getFirstNonReservedPhysReg(const TargetRegisterClass *RC) {
- ArrayRef<unsigned> O = RegClassInfo.getOrder(RC);
- assert(!O.empty() && "All registers reserved?!");
- return O.front();
- }
-
- void ComputeRelatedRegClasses();
-
- template <typename ItTy>
- void printIntervals(const char* const str, ItTy i, ItTy e) const {
- DEBUG({
- if (str)
- dbgs() << str << " intervals:\n";
-
- for (; i != e; ++i) {
- dbgs() << '\t' << *i->first << " -> ";
-
- unsigned reg = i->first->reg;
- if (TargetRegisterInfo::isVirtualRegister(reg))
- reg = vrm_->getPhys(reg);
-
- dbgs() << tri_->getName(reg) << '\n';
- }
- });
- }
- };
- char RALinScan::ID = 0;
-}
-
-INITIALIZE_PASS_BEGIN(RALinScan, "linearscan-regalloc",
- "Linear Scan Register Allocator", false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
-INITIALIZE_PASS_DEPENDENCY(CalculateSpillWeights)
-INITIALIZE_PASS_DEPENDENCY(LiveStacks)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
-INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_END(RALinScan, "linearscan-regalloc",
- "Linear Scan Register Allocator", false, false)
-
-void RALinScan::ComputeRelatedRegClasses() {
- // First pass, add all reg classes to the union, and determine at least one
- // reg class that each register is in.
- bool HasAliases = false;
- for (TargetRegisterInfo::regclass_iterator RCI = tri_->regclass_begin(),
- E = tri_->regclass_end(); RCI != E; ++RCI) {
- RelatedRegClasses.insert(*RCI);
- for (TargetRegisterClass::iterator I = (*RCI)->begin(), E = (*RCI)->end();
- I != E; ++I) {
- HasAliases = HasAliases || *tri_->getAliasSet(*I) != 0;
-
- const TargetRegisterClass *&PRC = OneClassForEachPhysReg[*I];
- if (PRC) {
- // Already processed this register. Just make sure we know that
- // multiple register classes share a register.
- RelatedRegClasses.unionSets(PRC, *RCI);
- } else {
- PRC = *RCI;
- }
- }
- }
-
- // Second pass, now that we know conservatively what register classes each reg
- // belongs to, add info about aliases. We don't need to do this for targets
- // without register aliases.
- if (HasAliases)
- for (DenseMap<unsigned, const TargetRegisterClass*>::iterator
- I = OneClassForEachPhysReg.begin(), E = OneClassForEachPhysReg.end();
- I != E; ++I)
- for (const unsigned *AS = tri_->getAliasSet(I->first); *AS; ++AS) {
- const TargetRegisterClass *AliasClass =
- OneClassForEachPhysReg.lookup(*AS);
- if (AliasClass)
- RelatedRegClasses.unionSets(I->second, AliasClass);
- }
-}
-
-/// attemptTrivialCoalescing - If a simple interval is defined by a copy, try
-/// allocate the definition the same register as the source register if the
-/// register is not defined during live time of the interval. If the interval is
-/// killed by a copy, try to use the destination register. This eliminates a
-/// copy. This is used to coalesce copies which were not coalesced away before
-/// allocation either due to dest and src being in different register classes or
-/// because the coalescer was overly conservative.
-unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
- unsigned Preference = vrm_->getRegAllocPref(cur.reg);
- if ((Preference && Preference == Reg) || !cur.containsOneValue())
- return Reg;
-
- // We cannot handle complicated live ranges. Simple linear stuff only.
- if (cur.ranges.size() != 1)
- return Reg;
-
- const LiveRange &range = cur.ranges.front();
-
- VNInfo *vni = range.valno;
- if (vni->isUnused() || !vni->def.isValid())
- return Reg;
-
- unsigned CandReg;
- {
- MachineInstr *CopyMI;
- if ((CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
- // Defined by a copy, try to extend SrcReg forward
- CandReg = CopyMI->getOperand(1).getReg();
- else if (TrivCoalesceEnds &&
- (CopyMI = li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
- CopyMI->isCopy() && cur.reg == CopyMI->getOperand(1).getReg())
- // Only used by a copy, try to extend DstReg backwards
- CandReg = CopyMI->getOperand(0).getReg();
- else
- return Reg;
-
- // If the target of the copy is a sub-register then don't coalesce.
- if(CopyMI->getOperand(0).getSubReg())
- return Reg;
- }
-
- if (TargetRegisterInfo::isVirtualRegister(CandReg)) {
- if (!vrm_->isAssignedReg(CandReg))
- return Reg;
- CandReg = vrm_->getPhys(CandReg);
- }
- if (Reg == CandReg)
- return Reg;
-
- const TargetRegisterClass *RC = mri_->getRegClass(cur.reg);
- if (!RC->contains(CandReg))
- return Reg;
-
- if (li_->conflictsWithPhysReg(cur, *vrm_, CandReg))
- return Reg;
-
- // Try to coalesce.
- DEBUG(dbgs() << "Coalescing: " << cur << " -> " << tri_->getName(CandReg)
- << '\n');
- vrm_->clearVirt(cur.reg);
- vrm_->assignVirt2Phys(cur.reg, CandReg);
-
- ++NumCoalesce;
- return CandReg;
-}
-
-bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
- mf_ = &fn;
- mri_ = &fn.getRegInfo();
- tm_ = &fn.getTarget();
- tri_ = tm_->getRegisterInfo();
- tii_ = tm_->getInstrInfo();
- allocatableRegs_ = tri_->getAllocatableSet(fn);
- reservedRegs_ = tri_->getReservedRegs(fn);
- li_ = &getAnalysis<LiveIntervals>();
- loopInfo = &getAnalysis<MachineLoopInfo>();
- RegClassInfo.runOnMachineFunction(fn);
-
- // We don't run the coalescer here because we have no reason to
- // interact with it. If the coalescer requires interaction, it
- // won't do anything. If it doesn't require interaction, we assume
- // it was run as a separate pass.
-
- // If this is the first function compiled, compute the related reg classes.
- if (RelatedRegClasses.empty())
- ComputeRelatedRegClasses();
-
- // Also resize register usage trackers.
- initRegUses();
-
- vrm_ = &getAnalysis<VirtRegMap>();
- if (!rewriter_.get()) rewriter_.reset(createVirtRegRewriter());
-
- spiller_.reset(createSpiller(*this, *mf_, *vrm_));
-
- initIntervalSets();
-
- linearScan();
-
- // Rewrite spill code and update the PhysRegsUsed set.
- rewriter_->runOnMachineFunction(*mf_, *vrm_, li_);
-
- // Write out new DBG_VALUE instructions.
- getAnalysis<LiveDebugVariables>().emitDebugValues(vrm_);
-
- assert(unhandled_.empty() && "Unhandled live intervals remain!");
-
- finalizeRegUses();
-
- fixed_.clear();
- active_.clear();
- inactive_.clear();
- handled_.clear();
- NextReloadMap.clear();
- DowngradedRegs.clear();
- DowngradeMap.clear();
- spiller_.reset(0);
-
- return true;
-}
-
-/// initIntervalSets - initialize the interval sets.
-///
-void RALinScan::initIntervalSets()
-{
- assert(unhandled_.empty() && fixed_.empty() &&
- active_.empty() && inactive_.empty() &&
- "interval sets should be empty on initialization");
-
- handled_.reserve(li_->getNumIntervals());
-
- for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
- if (TargetRegisterInfo::isPhysicalRegister(i->second->reg)) {
- if (!i->second->empty() && allocatableRegs_.test(i->second->reg)) {
- mri_->setPhysRegUsed(i->second->reg);
- fixed_.push_back(std::make_pair(i->second, i->second->begin()));
- }
- } else {
- if (i->second->empty()) {
- assignRegOrStackSlotAtInterval(i->second);
- }
- else
- unhandled_.push(i->second);
- }
- }
-}
-
-void RALinScan::linearScan() {
- // linear scan algorithm
- DEBUG({
- dbgs() << "********** LINEAR SCAN **********\n"
- << "********** Function: "
- << mf_->getFunction()->getName() << '\n';
- printIntervals("fixed", fixed_.begin(), fixed_.end());
- });
-
- while (!unhandled_.empty()) {
- // pick the interval with the earliest start point
- LiveInterval* cur = unhandled_.top();
- unhandled_.pop();
- ++NumIters;
- DEBUG(dbgs() << "\n*** CURRENT ***: " << *cur << '\n');
-
- assert(!cur->empty() && "Empty interval in unhandled set.");
-
- processActiveIntervals(cur->beginIndex());
- processInactiveIntervals(cur->beginIndex());
-
- assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
- "Can only allocate virtual registers!");
-
- // Allocating a virtual register. try to find a free
- // physical register or spill an interval (possibly this one) in order to
- // assign it one.
- assignRegOrStackSlotAtInterval(cur);
-
- DEBUG({
- printIntervals("active", active_.begin(), active_.end());
- printIntervals("inactive", inactive_.begin(), inactive_.end());
- });
- }
-
- // Expire any remaining active intervals
- while (!active_.empty()) {
- IntervalPtr &IP = active_.back();
- unsigned reg = IP.first->reg;
- DEBUG(dbgs() << "\tinterval " << *IP.first << " expired\n");
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
- reg = vrm_->getPhys(reg);
- delRegUse(reg);
- active_.pop_back();
- }
-
- // Expire any remaining inactive intervals
- DEBUG({
- for (IntervalPtrs::reverse_iterator
- i = inactive_.rbegin(); i != inactive_.rend(); ++i)
- dbgs() << "\tinterval " << *i->first << " expired\n";
- });
- inactive_.clear();
-
- // Add live-ins to every BB except for entry. Also perform trivial coalescing.
- MachineFunction::iterator EntryMBB = mf_->begin();
- SmallVector<MachineBasicBlock*, 8> LiveInMBBs;
- for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
- LiveInterval &cur = *i->second;
- unsigned Reg = 0;
- bool isPhys = TargetRegisterInfo::isPhysicalRegister(cur.reg);
- if (isPhys)
- Reg = cur.reg;
- else if (vrm_->isAssignedReg(cur.reg))
- Reg = attemptTrivialCoalescing(cur, vrm_->getPhys(cur.reg));
- if (!Reg)
- continue;
- // Ignore splited live intervals.
- if (!isPhys && vrm_->getPreSplitReg(cur.reg))
- continue;
-
- for (LiveInterval::Ranges::const_iterator I = cur.begin(), E = cur.end();
- I != E; ++I) {
- const LiveRange &LR = *I;
- if (li_->findLiveInMBBs(LR.start, LR.end, LiveInMBBs)) {
- for (unsigned i = 0, e = LiveInMBBs.size(); i != e; ++i)
- if (LiveInMBBs[i] != EntryMBB) {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
- "Adding a virtual register to livein set?");
- LiveInMBBs[i]->addLiveIn(Reg);
- }
- LiveInMBBs.clear();
- }
- }
- }
-
- DEBUG(dbgs() << *vrm_);
-
- // Look for physical registers that end up not being allocated even though
- // register allocator had to spill other registers in its register class.
- if (!vrm_->FindUnusedRegisters(li_))
- return;
-}
-
-/// processActiveIntervals - expire old intervals and move non-overlapping ones
-/// to the inactive list.
-void RALinScan::processActiveIntervals(SlotIndex CurPoint)
-{
- DEBUG(dbgs() << "\tprocessing active intervals:\n");
-
- for (unsigned i = 0, e = active_.size(); i != e; ++i) {
- LiveInterval *Interval = active_[i].first;
- LiveInterval::iterator IntervalPos = active_[i].second;
- unsigned reg = Interval->reg;
-
- IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
-
- if (IntervalPos == Interval->end()) { // Remove expired intervals.
- DEBUG(dbgs() << "\t\tinterval " << *Interval << " expired\n");
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
- reg = vrm_->getPhys(reg);
- delRegUse(reg);
-
- // Pop off the end of the list.
- active_[i] = active_.back();
- active_.pop_back();
- --i; --e;
-
- } else if (IntervalPos->start > CurPoint) {
- // Move inactive intervals to inactive list.
- DEBUG(dbgs() << "\t\tinterval " << *Interval << " inactive\n");
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
- reg = vrm_->getPhys(reg);
- delRegUse(reg);
- // add to inactive.
- inactive_.push_back(std::make_pair(Interval, IntervalPos));
-
- // Pop off the end of the list.
- active_[i] = active_.back();
- active_.pop_back();
- --i; --e;
- } else {
- // Otherwise, just update the iterator position.
- active_[i].second = IntervalPos;
- }
- }
-}
-
-/// processInactiveIntervals - expire old intervals and move overlapping
-/// ones to the active list.
-void RALinScan::processInactiveIntervals(SlotIndex CurPoint)
-{
- DEBUG(dbgs() << "\tprocessing inactive intervals:\n");
-
- for (unsigned i = 0, e = inactive_.size(); i != e; ++i) {
- LiveInterval *Interval = inactive_[i].first;
- LiveInterval::iterator IntervalPos = inactive_[i].second;
- unsigned reg = Interval->reg;
-
- IntervalPos = Interval->advanceTo(IntervalPos, CurPoint);
-
- if (IntervalPos == Interval->end()) { // remove expired intervals.
- DEBUG(dbgs() << "\t\tinterval " << *Interval << " expired\n");
-
- // Pop off the end of the list.
- inactive_[i] = inactive_.back();
- inactive_.pop_back();
- --i; --e;
- } else if (IntervalPos->start <= CurPoint) {
- // move re-activated intervals in active list
- DEBUG(dbgs() << "\t\tinterval " << *Interval << " active\n");
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
- reg = vrm_->getPhys(reg);
- addRegUse(reg);
- // add to active
- active_.push_back(std::make_pair(Interval, IntervalPos));
-
- // Pop off the end of the list.
- inactive_[i] = inactive_.back();
- inactive_.pop_back();
- --i; --e;
- } else {
- // Otherwise, just update the iterator position.
- inactive_[i].second = IntervalPos;
- }
- }
-}
-
-/// updateSpillWeights - updates the spill weights of the specifed physical
-/// register and its weight.
-void RALinScan::updateSpillWeights(std::vector<float> &Weights,
- unsigned reg, float weight,
- const TargetRegisterClass *RC) {
- SmallSet<unsigned, 4> Processed;
- SmallSet<unsigned, 4> SuperAdded;
- SmallVector<unsigned, 4> Supers;
- Weights[reg] += weight;
- Processed.insert(reg);
- for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as) {
- Weights[*as] += weight;
- Processed.insert(*as);
- if (tri_->isSubRegister(*as, reg) &&
- SuperAdded.insert(*as) &&
- RC->contains(*as)) {
- Supers.push_back(*as);
- }
- }
-
- // If the alias is a super-register, and the super-register is in the
- // register class we are trying to allocate. Then add the weight to all
- // sub-registers of the super-register even if they are not aliases.
- // e.g. allocating for GR32, bh is not used, updating bl spill weight.
- // bl should get the same spill weight otherwise it will be chosen
- // as a spill candidate since spilling bh doesn't make ebx available.
- for (unsigned i = 0, e = Supers.size(); i != e; ++i) {
- for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
- if (!Processed.count(*sr))
- Weights[*sr] += weight;
- }
-}
-
-static
-RALinScan::IntervalPtrs::iterator
-FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
- for (RALinScan::IntervalPtrs::iterator I = IP.begin(), E = IP.end();
- I != E; ++I)
- if (I->first == LI) return I;
- return IP.end();
-}
-
-static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V,
- SlotIndex Point){
- for (unsigned i = 0, e = V.size(); i != e; ++i) {
- RALinScan::IntervalPtr &IP = V[i];
- LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
- IP.second, Point);
- if (I != IP.first->begin()) --I;
- IP.second = I;
- }
-}
-
-/// getConflictWeight - Return the number of conflicts between cur
-/// live interval and defs and uses of Reg weighted by loop depthes.
-static
-float getConflictWeight(LiveInterval *cur, unsigned Reg, LiveIntervals *li_,
- MachineRegisterInfo *mri_,
- MachineLoopInfo *loopInfo) {
- float Conflicts = 0;
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(Reg),
- E = mri_->reg_end(); I != E; ++I) {
- MachineInstr *MI = &*I;
- if (cur->liveAt(li_->getInstructionIndex(MI))) {
- unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
- Conflicts += std::pow(10.0f, (float)loopDepth);
- }
- }
- return Conflicts;
-}
-
-/// findIntervalsToSpill - Determine the intervals to spill for the
-/// specified interval. It's passed the physical registers whose spill
-/// weight is the lowest among all the registers whose live intervals
-/// conflict with the interval.
-void RALinScan::findIntervalsToSpill(LiveInterval *cur,
- std::vector<std::pair<unsigned,float> > &Candidates,
- unsigned NumCands,
- SmallVector<LiveInterval*, 8> &SpillIntervals) {
- // We have figured out the *best* register to spill. But there are other
- // registers that are pretty good as well (spill weight within 3%). Spill
- // the one that has fewest defs and uses that conflict with cur.
- float Conflicts[3] = { 0.0f, 0.0f, 0.0f };
- SmallVector<LiveInterval*, 8> SLIs[3];
-
- DEBUG({
- dbgs() << "\tConsidering " << NumCands << " candidates: ";
- for (unsigned i = 0; i != NumCands; ++i)
- dbgs() << tri_->getName(Candidates[i].first) << " ";
- dbgs() << "\n";
- });
-
- // Calculate the number of conflicts of each candidate.
- for (IntervalPtrs::iterator i = active_.begin(); i != active_.end(); ++i) {
- unsigned Reg = i->first->reg;
- unsigned PhysReg = vrm_->getPhys(Reg);
- if (!cur->overlapsFrom(*i->first, i->second))
- continue;
- for (unsigned j = 0; j < NumCands; ++j) {
- unsigned Candidate = Candidates[j].first;
- if (tri_->regsOverlap(PhysReg, Candidate)) {
- if (NumCands > 1)
- Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
- SLIs[j].push_back(i->first);
- }
- }
- }
-
- for (IntervalPtrs::iterator i = inactive_.begin(); i != inactive_.end(); ++i){
- unsigned Reg = i->first->reg;
- unsigned PhysReg = vrm_->getPhys(Reg);
- if (!cur->overlapsFrom(*i->first, i->second-1))
- continue;
- for (unsigned j = 0; j < NumCands; ++j) {
- unsigned Candidate = Candidates[j].first;
- if (tri_->regsOverlap(PhysReg, Candidate)) {
- if (NumCands > 1)
- Conflicts[j] += getConflictWeight(cur, Reg, li_, mri_, loopInfo);
- SLIs[j].push_back(i->first);
- }
- }
- }
-
- // Which is the best candidate?
- unsigned BestCandidate = 0;
- float MinConflicts = Conflicts[0];
- for (unsigned i = 1; i != NumCands; ++i) {
- if (Conflicts[i] < MinConflicts) {
- BestCandidate = i;
- MinConflicts = Conflicts[i];
- }
- }
-
- std::copy(SLIs[BestCandidate].begin(), SLIs[BestCandidate].end(),
- std::back_inserter(SpillIntervals));
-}
-
-namespace {
- struct WeightCompare {
- private:
- const RALinScan &Allocator;
-
- public:
- WeightCompare(const RALinScan &Alloc) : Allocator(Alloc) {}
-
- typedef std::pair<unsigned, float> RegWeightPair;
- bool operator()(const RegWeightPair &LHS, const RegWeightPair &RHS) const {
- return LHS.second < RHS.second && !Allocator.isRecentlyUsed(LHS.first);
- }
- };
-}
-
-static bool weightsAreClose(float w1, float w2) {
- if (!NewHeuristic)
- return false;
-
- float diff = w1 - w2;
- if (diff <= 0.02f) // Within 0.02f
- return true;
- return (diff / w2) <= 0.05f; // Within 5%.
-}
-
-LiveInterval *RALinScan::hasNextReloadInterval(LiveInterval *cur) {
- DenseMap<unsigned, unsigned>::iterator I = NextReloadMap.find(cur->reg);
- if (I == NextReloadMap.end())
- return 0;
- return &li_->getInterval(I->second);
-}
-
-void RALinScan::DowngradeRegister(LiveInterval *li, unsigned Reg) {
- for (const unsigned *AS = tri_->getOverlaps(Reg); *AS; ++AS) {
- bool isNew = DowngradedRegs.insert(*AS);
- (void)isNew; // Silence compiler warning.
- assert(isNew && "Multiple reloads holding the same register?");
- DowngradeMap.insert(std::make_pair(li->reg, *AS));
- }
- ++NumDowngrade;
-}
-
-void RALinScan::UpgradeRegister(unsigned Reg) {
- if (Reg) {
- DowngradedRegs.erase(Reg);
- for (const unsigned *AS = tri_->getAliasSet(Reg); *AS; ++AS)
- DowngradedRegs.erase(*AS);
- }
-}
-
-namespace {
- struct LISorter {
- bool operator()(LiveInterval* A, LiveInterval* B) {
- return A->beginIndex() < B->beginIndex();
- }
- };
-}
-
-/// assignRegOrStackSlotAtInterval - assign a register if one is available, or
-/// spill.
-void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
- const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
- DEBUG(dbgs() << "\tallocating current interval from "
- << RC->getName() << ": ");
-
- // This is an implicitly defined live interval, just assign any register.
- if (cur->empty()) {
- unsigned physReg = vrm_->getRegAllocPref(cur->reg);
- if (!physReg)
- physReg = getFirstNonReservedPhysReg(RC);
- DEBUG(dbgs() << tri_->getName(physReg) << '\n');
- // Note the register is not really in use.
- vrm_->assignVirt2Phys(cur->reg, physReg);
- return;
- }
-
- backUpRegUses();
-
- std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
- SlotIndex StartPosition = cur->beginIndex();
- const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
-
- // If start of this live interval is defined by a move instruction and its
- // source is assigned a physical register that is compatible with the target
- // register class, then we should try to assign it the same register.
- // This can happen when the move is from a larger register class to a smaller
- // one, e.g. X86::mov32to32_. These move instructions are not coalescable.
- if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
- VNInfo *vni = cur->begin()->valno;
- if (!vni->isUnused() && vni->def.isValid()) {
- MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
- if (CopyMI && CopyMI->isCopy()) {
- unsigned DstSubReg = CopyMI->getOperand(0).getSubReg();
- unsigned SrcReg = CopyMI->getOperand(1).getReg();
- unsigned SrcSubReg = CopyMI->getOperand(1).getSubReg();
- unsigned Reg = 0;
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
- Reg = SrcReg;
- else if (vrm_->isAssignedReg(SrcReg))
- Reg = vrm_->getPhys(SrcReg);
- if (Reg) {
- if (SrcSubReg)
- Reg = tri_->getSubReg(Reg, SrcSubReg);
- if (DstSubReg)
- Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
- if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
- mri_->setRegAllocationHint(cur->reg, 0, Reg);
- }
- }
- }
- }
-
- // For every interval in inactive we overlap with, mark the
- // register as not free and update spill weights.
- for (IntervalPtrs::const_iterator i = inactive_.begin(),
- e = inactive_.end(); i != e; ++i) {
- unsigned Reg = i->first->reg;
- assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
- "Can only allocate virtual registers!");
- const TargetRegisterClass *RegRC = mri_->getRegClass(Reg);
- // If this is not in a related reg class to the register we're allocating,
- // don't check it.
- if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
- cur->overlapsFrom(*i->first, i->second-1)) {
- Reg = vrm_->getPhys(Reg);
- addRegUse(Reg);
- SpillWeightsToAdd.push_back(std::make_pair(Reg, i->first->weight));
- }
- }
-
- // Speculatively check to see if we can get a register right now. If not,
- // we know we won't be able to by adding more constraints. If so, we can
- // check to see if it is valid. Doing an exhaustive search of the fixed_ list
- // is very bad (it contains all callee clobbered registers for any functions
- // with a call), so we want to avoid doing that if possible.
- unsigned physReg = getFreePhysReg(cur);
- unsigned BestPhysReg = physReg;
- if (physReg) {
- // We got a register. However, if it's in the fixed_ list, we might
- // conflict with it. Check to see if we conflict with it or any of its
- // aliases.
- SmallSet<unsigned, 8> RegAliases;
- for (const unsigned *AS = tri_->getAliasSet(physReg); *AS; ++AS)
- RegAliases.insert(*AS);
-
- bool ConflictsWithFixed = false;
- for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
- IntervalPtr &IP = fixed_[i];
- if (physReg == IP.first->reg || RegAliases.count(IP.first->reg)) {
- // Okay, this reg is on the fixed list. Check to see if we actually
- // conflict.
- LiveInterval *I = IP.first;
- if (I->endIndex() > StartPosition) {
- LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
- IP.second = II;
- if (II != I->begin() && II->start > StartPosition)
- --II;
- if (cur->overlapsFrom(*I, II)) {
- ConflictsWithFixed = true;
- break;
- }
- }
- }
- }
-
- // Okay, the register picked by our speculative getFreePhysReg call turned
- // out to be in use. Actually add all of the conflicting fixed registers to
- // regUse_ so we can do an accurate query.
- if (ConflictsWithFixed) {
- // For every interval in fixed we overlap with, mark the register as not
- // free and update spill weights.
- for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
- IntervalPtr &IP = fixed_[i];
- LiveInterval *I = IP.first;
-
- const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
- if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
- I->endIndex() > StartPosition) {
- LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
- IP.second = II;
- if (II != I->begin() && II->start > StartPosition)
- --II;
- if (cur->overlapsFrom(*I, II)) {
- unsigned reg = I->reg;
- addRegUse(reg);
- SpillWeightsToAdd.push_back(std::make_pair(reg, I->weight));
- }
- }
- }
-
- // Using the newly updated regUse_ object, which includes conflicts in the
- // future, see if there are any registers available.
- physReg = getFreePhysReg(cur);
- }
- }
-
- // Restore the physical register tracker, removing information about the
- // future.
- restoreRegUses();
-
- // If we find a free register, we are done: assign this virtual to
- // the free physical register and add this interval to the active
- // list.
- if (physReg) {
- DEBUG(dbgs() << tri_->getName(physReg) << '\n');
- assert(RC->contains(physReg) && "Invalid candidate");
- vrm_->assignVirt2Phys(cur->reg, physReg);
- addRegUse(physReg);
- active_.push_back(std::make_pair(cur, cur->begin()));
- handled_.push_back(cur);
-
- // Remember physReg for avoiding a write-after-write hazard in the next
- // instruction.
- if (AvoidWAWHazard &&
- tri_->avoidWriteAfterWrite(mri_->getRegClass(cur->reg)))
- avoidWAW_ = physReg;
-
- // "Upgrade" the physical register since it has been allocated.
- UpgradeRegister(physReg);
- if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
- // "Downgrade" physReg to try to keep physReg from being allocated until
- // the next reload from the same SS is allocated.
- mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
- DowngradeRegister(cur, physReg);
- }
- return;
- }
- DEBUG(dbgs() << "no free registers\n");
-
- // Compile the spill weights into an array that is better for scanning.
- std::vector<float> SpillWeights(tri_->getNumRegs(), 0.0f);
- for (std::vector<std::pair<unsigned, float> >::iterator
- I = SpillWeightsToAdd.begin(), E = SpillWeightsToAdd.end(); I != E; ++I)
- updateSpillWeights(SpillWeights, I->first, I->second, RC);
-
- // for each interval in active, update spill weights.
- for (IntervalPtrs::const_iterator i = active_.begin(), e = active_.end();
- i != e; ++i) {
- unsigned reg = i->first->reg;
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
- reg = vrm_->getPhys(reg);
- updateSpillWeights(SpillWeights, reg, i->first->weight, RC);
- }
-
- DEBUG(dbgs() << "\tassigning stack slot at interval "<< *cur << ":\n");
-
- // Find a register to spill.
- float minWeight = HUGE_VALF;
- unsigned minReg = 0;
-
- bool Found = false;
- std::vector<std::pair<unsigned,float> > RegsWeights;
- ArrayRef<unsigned> Order = RegClassInfo.getOrder(RC);
- if (!minReg || SpillWeights[minReg] == HUGE_VALF)
- for (unsigned i = 0; i != Order.size(); ++i) {
- unsigned reg = Order[i];
- float regWeight = SpillWeights[reg];
- // Skip recently allocated registers and reserved registers.
- if (minWeight > regWeight && !isRecentlyUsed(reg))
- Found = true;
- RegsWeights.push_back(std::make_pair(reg, regWeight));
- }
-
- // If we didn't find a register that is spillable, try aliases?
- if (!Found) {
- for (unsigned i = 0; i != Order.size(); ++i) {
- unsigned reg = Order[i];
- // No need to worry about if the alias register size < regsize of RC.
- // We are going to spill all registers that alias it anyway.
- for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as)
- RegsWeights.push_back(std::make_pair(*as, SpillWeights[*as]));
- }
- }
-
- // Sort all potential spill candidates by weight.
- std::sort(RegsWeights.begin(), RegsWeights.end(), WeightCompare(*this));
- minReg = RegsWeights[0].first;
- minWeight = RegsWeights[0].second;
- if (minWeight == HUGE_VALF) {
- // All registers must have inf weight. Just grab one!
- minReg = BestPhysReg ? BestPhysReg : getFirstNonReservedPhysReg(RC);
- if (cur->weight == HUGE_VALF ||
- li_->getApproximateInstructionCount(*cur) == 0) {
- // Spill a physical register around defs and uses.
- if (li_->spillPhysRegAroundRegDefsUses(*cur, minReg, *vrm_)) {
- // spillPhysRegAroundRegDefsUses may have invalidated iterator stored
- // in fixed_. Reset them.
- for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
- IntervalPtr &IP = fixed_[i];
- LiveInterval *I = IP.first;
- if (I->reg == minReg || tri_->isSubRegister(minReg, I->reg))
- IP.second = I->advanceTo(I->begin(), StartPosition);
- }
-
- DowngradedRegs.clear();
- assignRegOrStackSlotAtInterval(cur);
- } else {
- assert(false && "Ran out of registers during register allocation!");
- report_fatal_error("Ran out of registers during register allocation!");
- }
- return;
- }
- }
-
- // Find up to 3 registers to consider as spill candidates.
- unsigned LastCandidate = RegsWeights.size() >= 3 ? 3 : 1;
- while (LastCandidate > 1) {
- if (weightsAreClose(RegsWeights[LastCandidate-1].second, minWeight))
- break;
- --LastCandidate;
- }
-
- DEBUG({
- dbgs() << "\t\tregister(s) with min weight(s): ";
-
- for (unsigned i = 0; i != LastCandidate; ++i)
- dbgs() << tri_->getName(RegsWeights[i].first)
- << " (" << RegsWeights[i].second << ")\n";
- });
-
- // If the current has the minimum weight, we need to spill it and
- // add any added intervals back to unhandled, and restart
- // linearscan.
- if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
- DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
- SmallVector<LiveInterval*, 8> added;
- LiveRangeEdit LRE(*cur, added);
- spiller_->spill(LRE);
-
- std::sort(added.begin(), added.end(), LISorter());
- if (added.empty())
- return; // Early exit if all spills were folded.
-
- // Merge added with unhandled. Note that we have already sorted
- // intervals returned by addIntervalsForSpills by their starting
- // point.
- // This also update the NextReloadMap. That is, it adds mapping from a
- // register defined by a reload from SS to the next reload from SS in the
- // same basic block.
- MachineBasicBlock *LastReloadMBB = 0;
- LiveInterval *LastReload = 0;
- int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
- for (unsigned i = 0, e = added.size(); i != e; ++i) {
- LiveInterval *ReloadLi = added[i];
- if (ReloadLi->weight == HUGE_VALF &&
- li_->getApproximateInstructionCount(*ReloadLi) == 0) {
- SlotIndex ReloadIdx = ReloadLi->beginIndex();
- MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
- int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
- if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
- // Last reload of same SS is in the same MBB. We want to try to
- // allocate both reloads the same register and make sure the reg
- // isn't clobbered in between if at all possible.
- assert(LastReload->beginIndex() < ReloadIdx);
- NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
- }
- LastReloadMBB = ReloadMBB;
- LastReload = ReloadLi;
- LastReloadSS = ReloadSS;
- }
- unhandled_.push(ReloadLi);
- }
- return;
- }
-
- ++NumBacktracks;
-
- // Push the current interval back to unhandled since we are going
- // to re-run at least this iteration. Since we didn't modify it it
- // should go back right in the front of the list
- unhandled_.push(cur);
-
- assert(TargetRegisterInfo::isPhysicalRegister(minReg) &&
- "did not choose a register to spill?");
-
- // We spill all intervals aliasing the register with
- // minimum weight, rollback to the interval with the earliest
- // start point and let the linear scan algorithm run again
- SmallVector<LiveInterval*, 8> spillIs;
-
- // Determine which intervals have to be spilled.
- findIntervalsToSpill(cur, RegsWeights, LastCandidate, spillIs);
-
- // Set of spilled vregs (used later to rollback properly)
- SmallSet<unsigned, 8> spilled;
-
- // The earliest start of a Spilled interval indicates up to where
- // in handled we need to roll back
- assert(!spillIs.empty() && "No spill intervals?");
- SlotIndex earliestStart = spillIs[0]->beginIndex();
-
- // Spill live intervals of virtual regs mapped to the physical register we
- // want to clear (and its aliases). We only spill those that overlap with the
- // current interval as the rest do not affect its allocation. we also keep
- // track of the earliest start of all spilled live intervals since this will
- // mark our rollback point.
- SmallVector<LiveInterval*, 8> added;
- while (!spillIs.empty()) {
- LiveInterval *sli = spillIs.back();
- spillIs.pop_back();
- DEBUG(dbgs() << "\t\t\tspilling(a): " << *sli << '\n');
- if (sli->beginIndex() < earliestStart)
- earliestStart = sli->beginIndex();
- LiveRangeEdit LRE(*sli, added, 0, &spillIs);
- spiller_->spill(LRE);
- spilled.insert(sli->reg);
- }
-
- // Include any added intervals in earliestStart.
- for (unsigned i = 0, e = added.size(); i != e; ++i) {
- SlotIndex SI = added[i]->beginIndex();
- if (SI < earliestStart)
- earliestStart = SI;
- }
-
- DEBUG(dbgs() << "\t\trolling back to: " << earliestStart << '\n');
-
- // Scan handled in reverse order up to the earliest start of a
- // spilled live interval and undo each one, restoring the state of
- // unhandled.
- while (!handled_.empty()) {
- LiveInterval* i = handled_.back();
- // If this interval starts before t we are done.
- if (!i->empty() && i->beginIndex() < earliestStart)
- break;
- DEBUG(dbgs() << "\t\t\tundo changes for: " << *i << '\n');
- handled_.pop_back();
-
- // When undoing a live interval allocation we must know if it is active or
- // inactive to properly update regUse_ and the VirtRegMap.
- IntervalPtrs::iterator it;
- if ((it = FindIntervalInVector(active_, i)) != active_.end()) {
- active_.erase(it);
- assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
- if (!spilled.count(i->reg))
- unhandled_.push(i);
- delRegUse(vrm_->getPhys(i->reg));
- vrm_->clearVirt(i->reg);
- } else if ((it = FindIntervalInVector(inactive_, i)) != inactive_.end()) {
- inactive_.erase(it);
- assert(!TargetRegisterInfo::isPhysicalRegister(i->reg));
- if (!spilled.count(i->reg))
- unhandled_.push(i);
- vrm_->clearVirt(i->reg);
- } else {
- assert(TargetRegisterInfo::isVirtualRegister(i->reg) &&
- "Can only allocate virtual registers!");
- vrm_->clearVirt(i->reg);
- unhandled_.push(i);
- }
-
- DenseMap<unsigned, unsigned>::iterator ii = DowngradeMap.find(i->reg);
- if (ii == DowngradeMap.end())
- // It interval has a preference, it must be defined by a copy. Clear the
- // preference now since the source interval allocation may have been
- // undone as well.
- mri_->setRegAllocationHint(i->reg, 0, 0);
- else {
- UpgradeRegister(ii->second);
- }
- }
-
- // Rewind the iterators in the active, inactive, and fixed lists back to the
- // point we reverted to.
- RevertVectorIteratorsTo(active_, earliestStart);
- RevertVectorIteratorsTo(inactive_, earliestStart);
- RevertVectorIteratorsTo(fixed_, earliestStart);
-
- // Scan the rest and undo each interval that expired after t and
- // insert it in active (the next iteration of the algorithm will
- // put it in inactive if required)
- for (unsigned i = 0, e = handled_.size(); i != e; ++i) {
- LiveInterval *HI = handled_[i];
- if (!HI->expiredAt(earliestStart) &&
- HI->expiredAt(cur->beginIndex())) {
- DEBUG(dbgs() << "\t\t\tundo changes for: " << *HI << '\n');
- active_.push_back(std::make_pair(HI, HI->begin()));
- assert(!TargetRegisterInfo::isPhysicalRegister(HI->reg));
- addRegUse(vrm_->getPhys(HI->reg));
- }
- }
-
- // Merge added with unhandled.
- // This also update the NextReloadMap. That is, it adds mapping from a
- // register defined by a reload from SS to the next reload from SS in the
- // same basic block.
- MachineBasicBlock *LastReloadMBB = 0;
- LiveInterval *LastReload = 0;
- int LastReloadSS = VirtRegMap::NO_STACK_SLOT;
- std::sort(added.begin(), added.end(), LISorter());
- for (unsigned i = 0, e = added.size(); i != e; ++i) {
- LiveInterval *ReloadLi = added[i];
- if (ReloadLi->weight == HUGE_VALF &&
- li_->getApproximateInstructionCount(*ReloadLi) == 0) {
- SlotIndex ReloadIdx = ReloadLi->beginIndex();
- MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
- int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
- if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
- // Last reload of same SS is in the same MBB. We want to try to
- // allocate both reloads the same register and make sure the reg
- // isn't clobbered in between if at all possible.
- assert(LastReload->beginIndex() < ReloadIdx);
- NextReloadMap.insert(std::make_pair(LastReload->reg, ReloadLi->reg));
- }
- LastReloadMBB = ReloadMBB;
- LastReload = ReloadLi;
- LastReloadSS = ReloadSS;
- }
- unhandled_.push(ReloadLi);
- }
-}
-
-unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
- const TargetRegisterClass *RC,
- unsigned MaxInactiveCount,
- SmallVector<unsigned, 256> &inactiveCounts,
- bool SkipDGRegs) {
- unsigned FreeReg = 0;
- unsigned FreeRegInactiveCount = 0;
-
- std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg);
- // Resolve second part of the hint (if possible) given the current allocation.
- unsigned physReg = Hint.second;
- if (TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
- physReg = vrm_->getPhys(physReg);
-
- ArrayRef<unsigned> Order;
- if (Hint.first)
- Order = tri_->getRawAllocationOrder(RC, Hint.first, physReg, *mf_);
- else
- Order = RegClassInfo.getOrder(RC);
-
- assert(!Order.empty() && "No allocatable register in this register class!");
-
- // Scan for the first available register.
- for (unsigned i = 0; i != Order.size(); ++i) {
- unsigned Reg = Order[i];
- // Ignore "downgraded" registers.
- if (SkipDGRegs && DowngradedRegs.count(Reg))
- continue;
- // Skip reserved registers.
- if (reservedRegs_.test(Reg))
- continue;
- // Skip recently allocated registers.
- if (isRegAvail(Reg) && (!SkipDGRegs || !isRecentlyUsed(Reg))) {
- FreeReg = Reg;
- if (FreeReg < inactiveCounts.size())
- FreeRegInactiveCount = inactiveCounts[FreeReg];
- else
- FreeRegInactiveCount = 0;
- break;
- }
- }
-
- // If there are no free regs, or if this reg has the max inactive count,
- // return this register.
- if (FreeReg == 0 || FreeRegInactiveCount == MaxInactiveCount) {
- // Remember what register we picked so we can skip it next time.
- if (FreeReg != 0) recordRecentlyUsed(FreeReg);
- return FreeReg;
- }
-
- // Continue scanning the registers, looking for the one with the highest
- // inactive count. Alkis found that this reduced register pressure very
- // slightly on X86 (in rev 1.94 of this file), though this should probably be
- // reevaluated now.
- for (unsigned i = 0; i != Order.size(); ++i) {
- unsigned Reg = Order[i];
- // Ignore "downgraded" registers.
- if (SkipDGRegs && DowngradedRegs.count(Reg))
- continue;
- // Skip reserved registers.
- if (reservedRegs_.test(Reg))
- continue;
- if (isRegAvail(Reg) && Reg < inactiveCounts.size() &&
- FreeRegInactiveCount < inactiveCounts[Reg] &&
- (!SkipDGRegs || !isRecentlyUsed(Reg))) {
- FreeReg = Reg;
- FreeRegInactiveCount = inactiveCounts[Reg];
- if (FreeRegInactiveCount == MaxInactiveCount)
- break; // We found the one with the max inactive count.
- }
- }
-
- // Remember what register we picked so we can skip it next time.
- recordRecentlyUsed(FreeReg);
-
- return FreeReg;
-}
-
-/// getFreePhysReg - return a free physical register for this virtual register
-/// interval if we have one, otherwise return 0.
-unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
- SmallVector<unsigned, 256> inactiveCounts;
- unsigned MaxInactiveCount = 0;
-
- const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
- const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
-
- for (IntervalPtrs::iterator i = inactive_.begin(), e = inactive_.end();
- i != e; ++i) {
- unsigned reg = i->first->reg;
- assert(TargetRegisterInfo::isVirtualRegister(reg) &&
- "Can only allocate virtual registers!");
-
- // If this is not in a related reg class to the register we're allocating,
- // don't check it.
- const TargetRegisterClass *RegRC = mri_->getRegClass(reg);
- if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader) {
- reg = vrm_->getPhys(reg);
- if (inactiveCounts.size() <= reg)
- inactiveCounts.resize(reg+1);
- ++inactiveCounts[reg];
- MaxInactiveCount = std::max(MaxInactiveCount, inactiveCounts[reg]);
- }
- }
-
- // If copy coalescer has assigned a "preferred" register, check if it's
- // available first.
- unsigned Preference = vrm_->getRegAllocPref(cur->reg);
- if (Preference) {
- DEBUG(dbgs() << "(preferred: " << tri_->getName(Preference) << ") ");
- if (isRegAvail(Preference) &&
- RC->contains(Preference))
- return Preference;
- }
-
- unsigned FreeReg = getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts,
- true);
- if (FreeReg)
- return FreeReg;
- return getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, false);
-}
-
-FunctionPass* llvm::createLinearScanRegisterAllocator() {
- return new RALinScan();
-}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 0d2cf2d..a284614 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -32,14 +32,17 @@
#define DEBUG_TYPE "regalloc"
#include "RenderMachineFunction.h"
-#include "Splitter.h"
+#include "Spiller.h"
#include "VirtRegMap.h"
-#include "VirtRegRewriter.h"
#include "RegisterCoalescer.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/RegAllocPBQP.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -54,6 +57,7 @@
#include <limits>
#include <memory>
#include <set>
+#include <sstream>
#include <vector>
using namespace llvm;
@@ -67,10 +71,12 @@ pbqpCoalescing("pbqp-coalescing",
cl::desc("Attempt coalescing during PBQP register allocation."),
cl::init(false), cl::Hidden);
+#ifndef NDEBUG
static cl::opt<bool>
-pbqpPreSplitting("pbqp-pre-splitting",
- cl::desc("Pre-split before PBQP register allocation."),
- cl::init(false), cl::Hidden);
+pbqpDumpGraphs("pbqp-dump-graphs",
+ cl::desc("Dump graphs for each function/round in the compilation unit."),
+ cl::init(false), cl::Hidden);
+#endif
namespace {
@@ -88,11 +94,9 @@ public:
: MachineFunctionPass(ID), builder(b), customPassID(cPassID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
- initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
- initializeLoopSplitterPass(*PassRegistry::getPassRegistry());
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
}
@@ -132,6 +136,7 @@ private:
MachineRegisterInfo *mri;
RenderMachineFunction *rmf;
+ std::auto_ptr<Spiller> spiller;
LiveIntervals *lis;
LiveStacks *lss;
VirtRegMap *vrm;
@@ -141,10 +146,6 @@ private:
/// \brief Finds the initial set of vreg intervals to allocate.
void findVRegIntervalsToAlloc();
- /// \brief Adds a stack interval if the given live interval has been
- /// spilled. Used to support stack slot coloring.
- void addStackInterval(const LiveInterval *spilled,MachineRegisterInfo* mri);
-
/// \brief Given a solved PBQP problem maps this solution back to a register
/// assignment.
bool mapPBQPToRegAlloc(const PBQPRAProblem &problem,
@@ -170,7 +171,7 @@ PBQP::Graph::NodeItr PBQPRAProblem::getNodeForVReg(unsigned vreg) const {
VReg2Node::const_iterator nodeItr = vreg2Node.find(vreg);
assert(nodeItr != vreg2Node.end() && "No node for vreg.");
return nodeItr->second;
-
+
}
const PBQPRAProblem::AllowedSet&
@@ -195,9 +196,9 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
const RegSet &vregs) {
typedef std::vector<const LiveInterval*> LIVector;
-
+ ArrayRef<SlotIndex> regMaskSlots = lis->getRegMaskSlots();
MachineRegisterInfo *mri = &mf->getRegInfo();
- const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
+ const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem());
PBQP::Graph &g = p->getGraph();
@@ -214,7 +215,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
BitVector reservedRegs = tri->getReservedRegs(*mf);
- // Iterate over vregs.
+ // Iterate over vregs.
for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
vregItr != vregEnd; ++vregItr) {
unsigned vreg = *vregItr;
@@ -224,7 +225,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
// Compute an initial allowed set for the current vreg.
typedef std::vector<unsigned> VRAllowed;
VRAllowed vrAllowed;
- ArrayRef<unsigned> rawOrder = trc->getRawAllocationOrder(*mf);
+ ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
for (unsigned i = 0; i != rawOrder.size(); ++i) {
unsigned preg = rawOrder[i];
if (!reservedRegs.test(preg)) {
@@ -232,7 +233,9 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
}
}
- // Remove any physical registers which overlap.
+ RegSet overlappingPRegs;
+
+ // Record physical registers whose ranges overlap.
for (RegSet::const_iterator pregItr = pregs.begin(),
pregEnd = pregs.end();
pregItr != pregEnd; ++pregItr) {
@@ -243,9 +246,41 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
continue;
}
- if (!vregLI->overlaps(*pregLI)) {
- continue;
+ if (vregLI->overlaps(*pregLI))
+ overlappingPRegs.insert(preg);
+ }
+
+ // Record any overlaps with regmask operands.
+ BitVector regMaskOverlaps(tri->getNumRegs());
+ for (ArrayRef<SlotIndex>::iterator rmItr = regMaskSlots.begin(),
+ rmEnd = regMaskSlots.end();
+ rmItr != rmEnd; ++rmItr) {
+ SlotIndex rmIdx = *rmItr;
+ if (vregLI->liveAt(rmIdx)) {
+ MachineInstr *rmMI = lis->getInstructionFromIndex(rmIdx);
+ const uint32_t* regMask = 0;
+ for (MachineInstr::mop_iterator mopItr = rmMI->operands_begin(),
+ mopEnd = rmMI->operands_end();
+ mopItr != mopEnd; ++mopItr) {
+ if (mopItr->isRegMask()) {
+ regMask = mopItr->getRegMask();
+ break;
+ }
+ }
+ assert(regMask != 0 && "Couldn't find register mask.");
+ regMaskOverlaps.setBitsNotInMask(regMask);
}
+ }
+
+ for (unsigned preg = 0; preg < tri->getNumRegs(); ++preg) {
+ if (regMaskOverlaps.test(preg))
+ overlappingPRegs.insert(preg);
+ }
+
+ for (RegSet::const_iterator pregItr = overlappingPRegs.begin(),
+ pregEnd = overlappingPRegs.end();
+ pregItr != pregEnd; ++pregItr) {
+ unsigned preg = *pregItr;
// Remove the register from the allowed set.
VRAllowed::iterator eraseItr =
@@ -256,7 +291,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
}
// Also remove any aliases.
- const unsigned *aliasItr = tri->getAliasSet(preg);
+ const uint16_t *aliasItr = tri->getAliasSet(preg);
if (aliasItr != 0) {
for (; *aliasItr != 0; ++aliasItr) {
VRAllowed::iterator eraseItr =
@@ -270,7 +305,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
}
// Construct the node.
- PBQP::Graph::NodeItr node =
+ PBQP::Graph::NodeItr node =
g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));
// Record the mapping and allowed set in the problem.
@@ -371,7 +406,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
const float copyFactor = 0.5; // Cost of copy relative to load. Current
// value plucked randomly out of the air.
-
+
PBQP::PBQPNum cBenefit =
copyFactor * LiveIntervals::getSpillWeight(false, true,
loopInfo->getLoopDepth(mbb));
@@ -382,7 +417,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
}
const PBQPRAProblem::AllowedSet &allowed = p->getAllowedSet(src);
- unsigned pregOpt = 0;
+ unsigned pregOpt = 0;
while (pregOpt < allowed.size() && allowed[pregOpt] != dst) {
++pregOpt;
}
@@ -407,7 +442,7 @@ std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
std::swap(allowed1, allowed2);
}
}
-
+
addVirtRegCoalesce(g.getEdgeCosts(edge), *allowed1, *allowed2,
cBenefit);
}
@@ -439,27 +474,29 @@ void PBQPBuilderWithCoalescing::addVirtRegCoalesce(
if (preg1 == preg2) {
costMat[i + 1][j + 1] += -benefit;
- }
+ }
}
}
}
void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
+ au.setPreservesCFG();
+ au.addRequired<AliasAnalysis>();
+ au.addPreserved<AliasAnalysis>();
au.addRequired<SlotIndexes>();
au.addPreserved<SlotIndexes>();
au.addRequired<LiveIntervals>();
//au.addRequiredID(SplitCriticalEdgesID);
- au.addRequiredID(RegisterCoalescerPassID);
if (customPassID)
au.addRequiredID(*customPassID);
au.addRequired<CalculateSpillWeights>();
au.addRequired<LiveStacks>();
au.addPreserved<LiveStacks>();
+ au.addRequired<MachineDominatorTree>();
+ au.addPreserved<MachineDominatorTree>();
au.addRequired<MachineLoopInfo>();
au.addPreserved<MachineLoopInfo>();
- if (pbqpPreSplitting)
- au.addRequired<LoopSplitter>();
au.addRequired<VirtRegMap>();
au.addRequired<RenderMachineFunction>();
MachineFunctionPass::getAnalysisUsage(au);
@@ -488,29 +525,6 @@ void RegAllocPBQP::findVRegIntervalsToAlloc() {
}
}
-void RegAllocPBQP::addStackInterval(const LiveInterval *spilled,
- MachineRegisterInfo* mri) {
- int stackSlot = vrm->getStackSlot(spilled->reg);
-
- if (stackSlot == VirtRegMap::NO_STACK_SLOT) {
- return;
- }
-
- const TargetRegisterClass *RC = mri->getRegClass(spilled->reg);
- LiveInterval &stackInterval = lss->getOrCreateInterval(stackSlot, RC);
-
- VNInfo *vni;
- if (stackInterval.getNumValNums() != 0) {
- vni = stackInterval.getValNumInfo(0);
- } else {
- vni = stackInterval.getNextValue(
- SlotIndex(), 0, lss->getVNInfoAllocator());
- }
-
- LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
- stackInterval.MergeRangesInAsValue(rhsInterval, vni);
-}
-
bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
const PBQP::Solution &solution) {
// Set to true if we have any spills
@@ -529,28 +543,22 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
unsigned alloc = solution.getSelection(node);
if (problem.isPRegOption(vreg, alloc)) {
- unsigned preg = problem.getPRegForOption(vreg, alloc);
+ unsigned preg = problem.getPRegForOption(vreg, alloc);
DEBUG(dbgs() << "VREG " << vreg << " -> " << tri->getName(preg) << "\n");
assert(preg != 0 && "Invalid preg selected.");
- vrm->assignVirt2Phys(vreg, preg);
+ vrm->assignVirt2Phys(vreg, preg);
} else if (problem.isSpillOption(vreg, alloc)) {
vregsToAlloc.erase(vreg);
- const LiveInterval* spillInterval = &lis->getInterval(vreg);
- double oldWeight = spillInterval->weight;
- rmf->rememberUseDefs(spillInterval);
- std::vector<LiveInterval*> newSpills =
- lis->addIntervalsForSpills(*spillInterval, 0, loopInfo, *vrm);
- addStackInterval(spillInterval, mri);
- rmf->rememberSpills(spillInterval, newSpills);
-
- (void) oldWeight;
+ SmallVector<LiveInterval*, 8> newSpills;
+ LiveRangeEdit LRE(lis->getInterval(vreg), newSpills, *mf, *lis, vrm);
+ spiller->spill(LRE);
+
DEBUG(dbgs() << "VREG " << vreg << " -> SPILLED (Cost: "
- << oldWeight << ", New vregs: ");
+ << LRE.getParent().weight << ", New vregs: ");
// Copy any newly inserted live intervals into the list of regs to
// allocate.
- for (std::vector<LiveInterval*>::const_iterator
- itr = newSpills.begin(), end = newSpills.end();
+ for (LiveRangeEdit::iterator itr = LRE.begin(), end = LRE.end();
itr != end; ++itr) {
assert(!(*itr)->empty() && "Empty spill range.");
DEBUG(dbgs() << (*itr)->reg << " ");
@@ -560,9 +568,9 @@ bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
DEBUG(dbgs() << ")\n");
// We need another round if spill intervals were added.
- anotherRoundNeeded |= !newSpills.empty();
+ anotherRoundNeeded |= !LRE.empty();
} else {
- assert(false && "Unknown allocation option.");
+ llvm_unreachable("Unknown allocation option.");
}
}
@@ -642,7 +650,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
tm = &mf->getTarget();
tri = tm->getRegisterInfo();
tii = tm->getInstrInfo();
- mri = &mf->getRegInfo();
+ mri = &mf->getRegInfo();
lis = &getAnalysis<LiveIntervals>();
lss = &getAnalysis<LiveStacks>();
@@ -650,7 +658,9 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
rmf = &getAnalysis<RenderMachineFunction>();
vrm = &getAnalysis<VirtRegMap>();
+ spiller.reset(createInlineSpiller(*this, MF, *vrm));
+ mri->freezeReservedRegs(MF);
DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getFunction()->getName() << "\n");
@@ -666,6 +676,12 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
// Find the vreg intervals in need of allocation.
findVRegIntervalsToAlloc();
+ const Function* func = mf->getFunction();
+ std::string fqn =
+ func->getParent()->getModuleIdentifier() + "." +
+ func->getName().str();
+ (void)fqn;
+
// If there are non-empty intervals allocate them using pbqp.
if (!vregsToAlloc.empty()) {
@@ -677,6 +693,20 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
std::auto_ptr<PBQPRAProblem> problem =
builder->build(mf, lis, loopInfo, vregsToAlloc);
+
+#ifndef NDEBUG
+ if (pbqpDumpGraphs) {
+ std::ostringstream rs;
+ rs << round;
+ std::string graphFileName(fqn + "." + rs.str() + ".pbqpgraph");
+ std::string tmp;
+ raw_fd_ostream os(graphFileName.c_str(), tmp);
+ DEBUG(dbgs() << "Dumping graph for round " << round << " to \""
+ << graphFileName << "\"\n");
+ problem->getGraph().dump(os);
+ }
+#endif
+
PBQP::Solution solution =
PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(
problem->getGraph());
@@ -698,9 +728,12 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *vrm << "\n");
// Run rewriter
- std::auto_ptr<VirtRegRewriter> rewriter(createVirtRegRewriter());
+ vrm->rewrite(lis->getSlotIndexes());
- rewriter->runOnMachineFunction(*mf, *vrm, lis);
+ // All machine operands and other references to virtual registers have been
+ // replaced. Remove the virtual registers.
+ vrm->clearAllVirt();
+ mri->clearVirtRegs();
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
index 786d279..17165fa 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterClassInfo.cpp
@@ -18,12 +18,16 @@
#include "RegisterClassInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Target/TargetMachine.h"
-
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static cl::opt<unsigned>
+StressRA("stress-regalloc", cl::Hidden, cl::init(0), cl::value_desc("N"),
+ cl::desc("Limit all regclasses to N registers"));
+
RegisterClassInfo::RegisterClassInfo() : Tag(0), MF(0), TRI(0), CalleeSaved(0)
{}
@@ -39,14 +43,14 @@ void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) {
}
// Does this MF have different CSRs?
- const unsigned *CSR = TRI->getCalleeSavedRegs(MF);
+ const uint16_t *CSR = TRI->getCalleeSavedRegs(MF);
if (Update || CSR != CalleeSaved) {
// Build a CSRNum map. Every CSR alias gets an entry pointing to the last
// overlapping CSR.
CSRNum.clear();
CSRNum.resize(TRI->getNumRegs(), 0);
for (unsigned N = 0; unsigned Reg = CSR[N]; ++N)
- for (const unsigned *AS = TRI->getOverlaps(Reg);
+ for (const uint16_t *AS = TRI->getOverlaps(Reg);
unsigned Alias = *AS; ++AS)
CSRNum[Alias] = N + 1; // 0 means no CSR, 1 means CalleeSaved[0], ...
Update = true;
@@ -81,7 +85,7 @@ void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
// FIXME: Once targets reserve registers instead of removing them from the
// allocation order, we can simply use begin/end here.
- ArrayRef<unsigned> RawOrder = RC->getRawAllocationOrder(*MF);
+ ArrayRef<uint16_t> RawOrder = RC->getRawAllocationOrder(*MF);
for (unsigned i = 0; i != RawOrder.size(); ++i) {
unsigned PhysReg = RawOrder[i];
// Remove reserved registers from the allocation order.
@@ -99,6 +103,10 @@ void RegisterClassInfo::compute(const TargetRegisterClass *RC) const {
// CSR aliases go after the volatile registers, preserve the target's order.
std::copy(CSRAlias.begin(), CSRAlias.end(), &RCI.Order[N]);
+ // Register allocator stress test. Clip register class to N registers.
+ if (StressRA && RCI.NumRegs > StressRA)
+ RCI.NumRegs = StressRA;
+
// Check if RC is a proper sub-class.
if (const TargetRegisterClass *Super = TRI->getLargestLegalSuperClass(RC))
if (Super != RC && getNumAllocatableRegs(Super) > RCI.NumRegs)
diff --git a/contrib/llvm/lib/CodeGen/RegisterClassInfo.h b/contrib/llvm/lib/CodeGen/RegisterClassInfo.h
index 2c14070..400e1f4 100644
--- a/contrib/llvm/lib/CodeGen/RegisterClassInfo.h
+++ b/contrib/llvm/lib/CodeGen/RegisterClassInfo.h
@@ -49,7 +49,7 @@ class RegisterClassInfo {
// Callee saved registers of last MF. Assumed to be valid until the next
// runOnFunction() call.
- const unsigned *CalleeSaved;
+ const uint16_t *CalleeSaved;
// Map register number to CalleeSaved index + 1;
SmallVector<uint8_t, 4> CSRNum;
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 9b414d6..75f88ca 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "regcoalescing"
+#define DEBUG_TYPE "regalloc"
#include "RegisterCoalescer.h"
#include "LiveDebugVariables.h"
#include "RegisterClassInfo.h"
@@ -169,10 +169,6 @@ namespace {
/// it as well.
bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
- /// RemoveCopyFlag - If DstReg is no longer defined by CopyMI, clear the
- /// VNInfo copy flag for DstReg and all aliases.
- void RemoveCopyFlag(unsigned DstReg, const MachineInstr *CopyMI);
-
/// markAsJoined - Remember that CopyMI has already been joined.
void markAsJoined(MachineInstr *CopyMI);
@@ -197,7 +193,7 @@ namespace {
};
} /// end anonymous namespace
-char &llvm::RegisterCoalescerPassID = RegisterCoalescer::ID;
+char &llvm::RegisterCoalescerID = RegisterCoalescer::ID;
INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
"Simple Register Coalescing", false, false)
@@ -205,9 +201,6 @@ INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
-INITIALIZE_PASS_DEPENDENCY(PHIElimination)
-INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
"Simple Register Coalescing", false, false)
@@ -379,9 +372,6 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
- AU.addPreservedID(StrongPHIEliminationID);
- AU.addPreservedID(PHIEliminationID);
- AU.addPreservedID(TwoAddressInstructionPassID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -423,7 +413,7 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
LiveInterval &IntB =
LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
- SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getDefIndex();
+ SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
@@ -434,40 +424,19 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// Get the location that B is defined at. Two options: either this value has
// an unknown definition point or it is defined at CopyIdx. If unknown, we
// can't process it.
- if (!BValNo->isDefByCopy()) return false;
- assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
+ if (BValNo->def != CopyIdx) return false;
// AValNo is the value number in A that defines the copy, A3 in the example.
- SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
+ SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true);
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
// The live range might not exist after fun with physreg coalescing.
if (ALR == IntA.end()) return false;
VNInfo *AValNo = ALR->valno;
- // If it's re-defined by an early clobber somewhere in the live range, then
- // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
- // See PR3149:
- // 172 %ECX<def> = MOV32rr %reg1039<kill>
- // 180 INLINEASM <es:subl $5,$1
- // sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
- // %EAX<kill>,
- // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
- // 188 %EAX<def> = MOV32rr %EAX<kill>
- // 196 %ECX<def> = MOV32rr %ECX<kill>
- // 204 %ECX<def> = MOV32rr %ECX<kill>
- // 212 %EAX<def> = MOV32rr %EAX<kill>
- // 220 %EAX<def> = MOV32rr %EAX
- // 228 %reg1039<def> = MOV32rr %ECX<kill>
- // The early clobber operand ties ECX input to the ECX def.
- //
- // The live interval of ECX is represented as this:
- // %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
- // The coalescer has no idea there was a def in the middle of [174,230].
- if (AValNo->hasRedefByEC())
- return false;
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
- if (!CP.isCoalescable(AValNo->getCopy()))
+ MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def);
+ if (!CP.isCoalescable(ACopyMI))
return false;
// Get the LiveRange in IntB that this value number starts with.
@@ -492,7 +461,7 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// of its aliases is overlapping the live interval of the virtual register.
// If so, do not coalesce.
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
- for (const unsigned *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
if (LIS->hasInterval(*AS) && IntA.overlaps(LIS->getInterval(*AS))) {
DEBUG({
dbgs() << "\t\tInterfere with alias ";
@@ -511,8 +480,7 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// We are about to delete CopyMI, so need to remove it as the 'instruction
// that defines this value #'. Update the valnum with the new defining
// instruction #.
- BValNo->def = FillerStart;
- BValNo->setCopy(0);
+ BValNo->def = FillerStart;
// Okay, we can merge them. We need to insert a new liverange:
// [ValLR.end, BLR.begin) of either value number, then we merge the
@@ -522,12 +490,12 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
// If the IntB live range is assigned to a physical register, and if that
// physreg has sub-registers, update their live intervals as well.
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
- for (const unsigned *SR = TRI->getSubRegisters(IntB.reg); *SR; ++SR) {
+ for (const uint16_t *SR = TRI->getSubRegisters(IntB.reg); *SR; ++SR) {
if (!LIS->hasInterval(*SR))
continue;
LiveInterval &SRLI = LIS->getInterval(*SR);
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
- SRLI.getNextValue(FillerStart, 0,
+ SRLI.getNextValue(FillerStart,
LIS->getVNInfoAllocator())));
}
}
@@ -554,9 +522,11 @@ bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
ValLREndInst->getOperand(UIdx).setIsKill(false);
}
- // If the copy instruction was killing the destination register before the
- // merge, find the last use and trim the live range. That will also add the
- // isKill marker.
+ // Rewrite the copy. If the copy instruction was killing the destination
+ // register before the merge, find the last use and trim the live range. That
+ // will also add the isKill marker.
+ CopyMI->substituteRegister(IntA.reg, IntB.reg, CP.getSubIdx(),
+ *TRI);
if (ALR->end == CopyIdx)
LIS->shrinkToUses(&IntA);
@@ -625,7 +595,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
if (!LIS->hasInterval(CP.getDstReg()))
return false;
- SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getDefIndex();
+ SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot();
LiveInterval &IntA =
LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
@@ -635,13 +605,13 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
- if (!BValNo || !BValNo->isDefByCopy())
+ if (!BValNo || BValNo->def != CopyIdx)
return false;
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
// AValNo is the value number in A that defines the copy, A3 in the example.
- VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
+ VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true));
assert(AValNo && "COPY source not live");
// If other defs can reach uses of this def, then it's not safe to perform
@@ -651,8 +621,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def);
if (!DefMI)
return false;
- const MCInstrDesc &MCID = DefMI->getDesc();
- if (!MCID.isCommutable())
+ if (!DefMI->isCommutable())
return false;
// If DefMI is a two-address instruction then commuting it will change the
// destination register.
@@ -684,7 +653,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// Abort if the aliases of IntB.reg have values that are not simply the
// clobbers from the superreg.
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
- for (const unsigned *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
+ for (const uint16_t *AS = TRI->getAliasSet(IntB.reg); *AS; ++AS)
if (LIS->hasInterval(*AS) &&
HasOtherReachingDefs(IntA, LIS->getInterval(*AS), AValNo, 0))
return false;
@@ -718,7 +687,8 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
return false;
if (NewMI != DefMI) {
LIS->ReplaceMachineInstrInMaps(DefMI, NewMI);
- MBB->insert(DefMI, NewMI);
+ MachineBasicBlock::iterator Pos = DefMI;
+ MBB->insert(Pos, NewMI);
MBB->erase(DefMI);
}
unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
@@ -747,7 +717,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
UseMO.setReg(NewReg);
continue;
}
- SlotIndex UseIdx = LIS->getInstructionIndex(UseMI).getUseIndex();
+ SlotIndex UseIdx = LIS->getInstructionIndex(UseMI).getRegSlot(true);
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
@@ -765,7 +735,7 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// This copy will become a noop. If it's defining a new val#, merge it into
// BValNo.
- SlotIndex DefIdx = UseIdx.getDefIndex();
+ SlotIndex DefIdx = UseIdx.getRegSlot();
VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
if (!DVNI)
continue;
@@ -779,7 +749,6 @@ bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
// is updated.
VNInfo *ValNo = BValNo;
ValNo->def = AValNo->def;
- ValNo->setCopy(0);
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
@@ -799,7 +768,7 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
bool preserveSrcInt,
unsigned DstReg,
MachineInstr *CopyMI) {
- SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getUseIndex();
+ SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(true);
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
assert(SrcLR != SrcInt.end() && "Live range not found!");
VNInfo *ValNo = SrcLR->valno;
@@ -809,14 +778,14 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
if (!DefMI)
return false;
assert(DefMI && "Defining instruction disappeared");
- const MCInstrDesc &MCID = DefMI->getDesc();
- if (!MCID.isAsCheapAsAMove())
+ if (!DefMI->isAsCheapAsAMove())
return false;
if (!TII->isTriviallyReMaterializable(DefMI, AA))
return false;
bool SawStore = false;
if (!DefMI->isSafeToMove(TII, AA, SawStore))
return false;
+ const MCInstrDesc &MCID = DefMI->getDesc();
if (MCID.getNumDefs() != 1)
return false;
if (!DefMI->isImplicitDef()) {
@@ -831,27 +800,52 @@ bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
return false;
}
- RemoveCopyFlag(DstReg, CopyMI);
-
MachineBasicBlock *MBB = CopyMI->getParent();
MachineBasicBlock::iterator MII =
llvm::next(MachineBasicBlock::iterator(CopyMI));
TII->reMaterialize(*MBB, MII, DstReg, 0, DefMI, *TRI);
MachineInstr *NewMI = prior(MII);
+ // NewMI may have dead implicit defs (E.g. EFLAGS for MOV<bits>r0 on X86).
+ // We need to remember these so we can add intervals once we insert
+ // NewMI into SlotIndexes.
+ SmallVector<unsigned, 4> NewMIImplDefs;
+ for (unsigned i = NewMI->getDesc().getNumOperands(),
+ e = NewMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = NewMI->getOperand(i);
+ if (MO.isReg()) {
+ assert(MO.isDef() && MO.isImplicit() && MO.isDead() &&
+ TargetRegisterInfo::isPhysicalRegister(MO.getReg()));
+ NewMIImplDefs.push_back(MO.getReg());
+ }
+ }
+
// CopyMI may have implicit operands, transfer them over to the newly
// rematerialized instruction. And update implicit def interval valnos.
for (unsigned i = CopyMI->getDesc().getNumOperands(),
e = CopyMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = CopyMI->getOperand(i);
- if (MO.isReg() && MO.isImplicit())
- NewMI->addOperand(MO);
- if (MO.isDef())
- RemoveCopyFlag(MO.getReg(), CopyMI);
+ if (MO.isReg()) {
+ assert(MO.isImplicit() && "No explicit operands after implict operands.");
+ // Discard VReg implicit defs.
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
+ NewMI->addOperand(MO);
+ }
+ }
}
- NewMI->copyImplicitOps(CopyMI);
LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI);
+
+ SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI);
+ for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) {
+ unsigned reg = NewMIImplDefs[i];
+ LiveInterval &li = LIS->getInterval(reg);
+ VNInfo *DeadDefVN = li.getNextValue(NewMIIdx.getRegSlot(),
+ LIS->getVNInfoAllocator());
+ LiveRange lr(NewMIIdx.getRegSlot(), NewMIIdx.getDeadSlot(), DeadDefVN);
+ li.addRange(lr);
+ }
+
CopyMI->eraseFromParent();
ReMatCopies.insert(CopyMI);
ReMatDefs.insert(DefMI);
@@ -887,7 +881,7 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI,
DstInt = SrcInt;
SrcInt = 0;
- VNInfo *DeadVNI = DstInt->getVNInfoAt(Idx.getDefIndex());
+ VNInfo *DeadVNI = DstInt->getVNInfoAt(Idx.getRegSlot());
assert(DeadVNI && "No value defined in DstInt");
DstInt->removeValNo(DeadVNI);
@@ -941,13 +935,10 @@ RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
- bool Kills = false, Deads = false;
// Replace SrcReg with DstReg in all UseMI operands.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = UseMI->getOperand(Ops[i]);
- Kills |= MO.isKill();
- Deads |= MO.isDead();
// Make sure we don't create read-modify-write defs accidentally. We
// assume here that a SrcReg def cannot be joined into a live DstReg. If
@@ -967,19 +958,6 @@ RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
if (JoinedCopies.count(UseMI))
continue;
- if (SubIdx) {
- // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
- // read-modify-write of DstReg.
- if (Deads)
- UseMI->addRegisterDead(DstReg, TRI);
- else if (!Reads && Writes)
- UseMI->addRegisterDefined(DstReg, TRI);
-
- // Kill flags apply to the whole physical register.
- if (DstIsPhys && Kills)
- UseMI->addRegisterKilled(DstReg, TRI);
- }
-
DEBUG({
dbgs() << "\t\tupdated: ";
if (!UseMI->isDebugValue())
@@ -996,7 +974,7 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *LIS,
const TargetRegisterInfo *TRI) {
if (li.empty()) {
if (TargetRegisterInfo::isPhysicalRegister(li.reg))
- for (const unsigned* SR = TRI->getSubRegisters(li.reg); *SR; ++SR) {
+ for (const uint16_t* SR = TRI->getSubRegisters(li.reg); *SR; ++SR) {
if (!LIS->hasInterval(*SR))
continue;
LiveInterval &sli = LIS->getInterval(*SR);
@@ -1013,7 +991,7 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *LIS,
/// the val# it defines. If the live interval becomes empty, remove it as well.
bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
MachineInstr *DefMI) {
- SlotIndex DefIdx = LIS->getInstructionIndex(DefMI).getDefIndex();
+ SlotIndex DefIdx = LIS->getInstructionIndex(DefMI).getRegSlot();
LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
if (DefIdx != MLR->valno->def)
return false;
@@ -1021,27 +999,6 @@ bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
return removeIntervalIfEmpty(li, LIS, TRI);
}
-void RegisterCoalescer::RemoveCopyFlag(unsigned DstReg,
- const MachineInstr *CopyMI) {
- SlotIndex DefIdx = LIS->getInstructionIndex(CopyMI).getDefIndex();
- if (LIS->hasInterval(DstReg)) {
- LiveInterval &LI = LIS->getInterval(DstReg);
- if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
- if (LR->valno->def == DefIdx)
- LR->valno->setCopy(0);
- }
- if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
- return;
- for (const unsigned* AS = TRI->getAliasSet(DstReg); *AS; ++AS) {
- if (!LIS->hasInterval(*AS))
- continue;
- LiveInterval &LI = LIS->getInterval(*AS);
- if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
- if (LR->valno->def == DefIdx)
- LR->valno->setCopy(0);
- }
-}
-
/// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
/// We need to be careful about coalescing a source physical register with a
/// virtual register. Once the coalescing is done, it cannot be broken and these
@@ -1279,7 +1236,7 @@ bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
}
}
- // SrcReg is guarateed to be the register whose live interval that is
+ // SrcReg is guaranteed to be the register whose live interval that is
// being merged.
LIS->removeInterval(CP.getSrcReg());
@@ -1368,9 +1325,9 @@ static bool RegistersDefinedFromSameValue(LiveIntervals &li,
// FIXME: This is very conservative. For example, we don't handle
// physical registers.
- MachineInstr *MI = VNI->getCopy();
+ MachineInstr *MI = li.getInstructionFromIndex(VNI->def);
- if (!MI->isFullCopy() || CP.isPartial() || CP.isPhys())
+ if (!MI || !MI->isFullCopy() || CP.isPartial() || CP.isPhys())
return false;
unsigned Dst = MI->getOperand(0).getReg();
@@ -1388,11 +1345,9 @@ static bool RegistersDefinedFromSameValue(LiveIntervals &li,
assert(Dst == A);
VNInfo *Other = LR->valno;
- if (!Other->isDefByCopy())
- return false;
- const MachineInstr *OtherMI = Other->getCopy();
+ const MachineInstr *OtherMI = li.getInstructionFromIndex(Other->def);
- if (!OtherMI->isFullCopy())
+ if (!OtherMI || !OtherMI->isFullCopy())
return false;
unsigned OtherDst = OtherMI->getOperand(0).getReg();
@@ -1431,7 +1386,44 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
// than the full interfeence check below. We allow overlapping live ranges
// only when one is a copy of the other.
if (CP.isPhys()) {
- for (const unsigned *AS = TRI->getAliasSet(CP.getDstReg()); *AS; ++AS){
+ // Optimization for reserved registers like ESP.
+ // We can only merge with a reserved physreg if RHS has a single value that
+ // is a copy of CP.DstReg(). The live range of the reserved register will
+ // look like a set of dead defs - we don't properly track the live range of
+ // reserved registers.
+ if (RegClassInfo.isReserved(CP.getDstReg())) {
+ assert(CP.isFlipped() && RHS.containsOneValue() &&
+ "Invalid join with reserved register");
+ // Deny any overlapping intervals. This depends on all the reserved
+ // register live ranges to look like dead defs.
+ for (const uint16_t *AS = TRI->getOverlaps(CP.getDstReg()); *AS; ++AS) {
+ if (!LIS->hasInterval(*AS)) {
+ // Make sure at least DstReg itself exists before attempting a join.
+ if (*AS == CP.getDstReg())
+ LIS->getOrCreateInterval(CP.getDstReg());
+ continue;
+ }
+ if (RHS.overlaps(LIS->getInterval(*AS))) {
+ DEBUG(dbgs() << "\t\tInterference: " << PrintReg(*AS, TRI) << '\n');
+ return false;
+ }
+ }
+ // Skip any value computations, we are not adding new values to the
+ // reserved register. Also skip merging the live ranges, the reserved
+ // register live range doesn't need to be accurate as long as all the
+ // defs are there.
+ return true;
+ }
+
+ // Check if a register mask clobbers DstReg.
+ BitVector UsableRegs;
+ if (LIS->checkRegMaskInterference(RHS, UsableRegs) &&
+ !UsableRegs.test(CP.getDstReg())) {
+ DEBUG(dbgs() << "\t\tRegister mask interference.\n");
+ return false;
+ }
+
+ for (const uint16_t *AS = TRI->getAliasSet(CP.getDstReg()); *AS; ++AS){
if (!LIS->hasInterval(*AS))
continue;
const LiveInterval &LHS = LIS->getInterval(*AS);
@@ -1485,12 +1477,12 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
- if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
+ if (VNI->isUnused() || VNI->isPHIDef())
+ continue;
+ MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
+ assert(MI && "Missing def");
+ if (!MI->isCopyLike()) // Src not defined by a copy?
continue;
-
- // Never join with a register that has EarlyClobber redefs.
- if (VNI->hasRedefByEC())
- return false;
// Figure out the value # from the RHS.
LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
@@ -1499,7 +1491,6 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
// DstReg is known to be a register in the LHS interval. If the src is
// from the RHS interval, we can use its value #.
- MachineInstr *MI = VNI->getCopy();
if (!CP.isCoalescable(MI) &&
!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, lr, DupCopies))
continue;
@@ -1512,12 +1503,12 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
- if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
+ if (VNI->isUnused() || VNI->isPHIDef())
+ continue;
+ MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def);
+ assert(MI && "Missing def");
+ if (!MI->isCopyLike()) // Src not defined by a copy?
continue;
-
- // Never join with a register that has EarlyClobber redefs.
- if (VNI->hasRedefByEC())
- return false;
// Figure out the value # from the LHS.
LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
@@ -1526,7 +1517,6 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
// DstReg is known to be a register in the RHS interval. If the src is
// from the LHS interval, we can use its value #.
- MachineInstr *MI = VNI->getCopy();
if (!CP.isCoalescable(MI) &&
!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, lr, DupCopies))
continue;
@@ -1600,10 +1590,6 @@ bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
if (LHSValNoAssignments[I->valno->id] !=
RHSValNoAssignments[J->valno->id])
return false;
- // If it's re-defined by an early clobber somewhere in the live range,
- // then conservatively abort coalescing.
- if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
- return false;
}
if (I->end < J->end)
@@ -1905,8 +1891,8 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
unsigned Reg = MO.getReg();
if (!Reg)
continue;
+ DeadDefs.push_back(Reg);
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- DeadDefs.push_back(Reg);
// Remat may also enable register class inflation.
if (RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)))
InflateRegs.push_back(Reg);
@@ -1936,7 +1922,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
// Check for now unnecessary kill flags.
if (LIS->isNotInMIMap(MI)) continue;
- SlotIndex DefIdx = LIS->getInstructionIndex(MI).getDefIndex();
+ SlotIndex DefIdx = LIS->getInstructionIndex(MI).getRegSlot();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isKill()) continue;
@@ -1950,7 +1936,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
// remain alive.
if (!TargetRegisterInfo::isPhysicalRegister(reg))
continue;
- for (const unsigned *SR = TRI->getSubRegisters(reg);
+ for (const uint16_t *SR = TRI->getSubRegisters(reg);
unsigned S = *SR; ++SR)
if (LIS->hasInterval(S) && LIS->getInterval(S).liveAt(DefIdx))
MI->addRegisterDefined(S, TRI);
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
index 472c483..310b933 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.h
@@ -1,4 +1,4 @@
-//===-- RegisterCoalescer.h - Register Coalescing Interface ------*- C++ -*-===//
+//===-- RegisterCoalescer.h - Register Coalescing Interface -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the abstract interface for register coalescers,
+// This file contains the abstract interface for register coalescers,
// allowing them to interact with and query register allocators.
//
//===----------------------------------------------------------------------===//
@@ -47,7 +47,7 @@ namespace llvm {
/// CrossClass - True when both regs are virtual, and newRC is constrained.
bool CrossClass;
- /// Flipped - True when DstReg and SrcReg are reversed from the oriignal
+ /// Flipped - True when DstReg and SrcReg are reversed from the original
/// copy instruction.
bool Flipped;
diff --git a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
index ca02aa1..03bd82e 100644
--- a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -37,7 +37,7 @@ using namespace llvm;
void RegScavenger::setUsed(unsigned Reg) {
RegsAvailable.reset(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
RegsAvailable.reset(SubReg);
}
@@ -45,7 +45,7 @@ void RegScavenger::setUsed(unsigned Reg) {
bool RegScavenger::isAliasUsed(unsigned Reg) const {
if (isUsed(Reg))
return true;
- for (const unsigned *R = TRI->getAliasSet(Reg); *R; ++R)
+ for (const uint16_t *R = TRI->getAliasSet(Reg); *R; ++R)
if (isUsed(*R))
return true;
return false;
@@ -59,9 +59,6 @@ void RegScavenger::initRegState() {
// All registers started out unused.
RegsAvailable.set();
- // Reserved registers are always used.
- RegsAvailable ^= ReservedRegs;
-
if (!MBB)
return;
@@ -86,17 +83,24 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
assert((NumPhysRegs == 0 || NumPhysRegs == TRI->getNumRegs()) &&
"Target changed?");
+ // It is not possible to use the register scavenger after late optimization
+ // passes that don't preserve accurate liveness information.
+ assert(MRI->tracksLiveness() &&
+ "Cannot use register scavenger with inaccurate liveness");
+
// Self-initialize.
if (!MBB) {
NumPhysRegs = TRI->getNumRegs();
RegsAvailable.resize(NumPhysRegs);
+ KillRegs.resize(NumPhysRegs);
+ DefRegs.resize(NumPhysRegs);
// Create reserved registers bitvector.
ReservedRegs = TRI->getReservedRegs(MF);
// Create callee-saved registers bitvector.
CalleeSavedRegs.resize(NumPhysRegs);
- const unsigned *CSRegs = TRI->getCalleeSavedRegs();
+ const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
if (CSRegs != NULL)
for (unsigned i = 0; CSRegs[i]; ++i)
CalleeSavedRegs.set(CSRegs[i]);
@@ -110,13 +114,7 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) {
void RegScavenger::addRegWithSubRegs(BitVector &BV, unsigned Reg) {
BV.set(Reg);
- for (const unsigned *R = TRI->getSubRegisters(Reg); *R; R++)
- BV.set(*R);
-}
-
-void RegScavenger::addRegWithAliases(BitVector &BV, unsigned Reg) {
- BV.set(Reg);
- for (const unsigned *R = TRI->getAliasSet(Reg); *R; R++)
+ for (const uint16_t *R = TRI->getSubRegisters(Reg); *R; R++)
BV.set(*R);
}
@@ -148,12 +146,12 @@ void RegScavenger::forward() {
// predicated, conservatively assume "kill" markers do not actually kill the
// register. Similarly ignores "dead" markers.
bool isPred = TII->isPredicated(MI);
- BitVector EarlyClobberRegs(NumPhysRegs);
- BitVector KillRegs(NumPhysRegs);
- BitVector DefRegs(NumPhysRegs);
- BitVector DeadRegs(NumPhysRegs);
+ KillRegs.reset();
+ DefRegs.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ (isPred ? DefRegs : KillRegs).setBitsNotInMask(MO.getRegMask());
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
@@ -164,21 +162,19 @@ void RegScavenger::forward() {
// Ignore undef uses.
if (MO.isUndef())
continue;
- // Two-address operands implicitly kill.
- if (!isPred && (MO.isKill() || MI->isRegTiedToDefOperand(i)))
+ if (!isPred && MO.isKill())
addRegWithSubRegs(KillRegs, Reg);
} else {
assert(MO.isDef());
if (!isPred && MO.isDead())
- addRegWithSubRegs(DeadRegs, Reg);
+ addRegWithSubRegs(KillRegs, Reg);
else
addRegWithSubRegs(DefRegs, Reg);
- if (MO.isEarlyClobber())
- addRegWithAliases(EarlyClobberRegs, Reg);
}
}
// Verify uses and defs.
+#ifndef NDEBUG
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
@@ -199,17 +195,18 @@ void RegScavenger::forward() {
// Ideally we would like a way to model this, but leaving the
// insert_subreg around causes both correctness and performance issues.
bool SubUsed = false;
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
+ for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs)
if (isUsed(SubReg)) {
SubUsed = true;
break;
}
- assert(SubUsed && "Using an undefined register!");
+ if (!SubUsed) {
+ MBB->getParent()->verify(NULL, "In Register Scavenger");
+ llvm_unreachable("Using an undefined register!");
+ }
(void)SubUsed;
}
- assert((!EarlyClobberRegs.test(Reg) || MI->isRegTiedToDefOperand(i)) &&
- "Using an early clobbered register!");
} else {
assert(MO.isDef());
#if 0
@@ -221,18 +218,20 @@ void RegScavenger::forward() {
#endif
}
}
+#endif // NDEBUG
// Commit the changes.
setUnused(KillRegs);
- setUnused(DeadRegs);
setUsed(DefRegs);
}
void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) {
+ used = RegsAvailable;
+ used.flip();
if (includeReserved)
- used = ~RegsAvailable;
+ used |= ReservedRegs;
else
- used = ~RegsAvailable & ~ReservedRegs;
+ used.reset(ReservedRegs);
}
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
@@ -286,6 +285,8 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
// Remove any candidates touched by instruction.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isRegMask())
+ Candidates.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg() || MO.isUndef() || !MO.getReg())
continue;
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
@@ -296,7 +297,7 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
continue;
}
Candidates.reset(MO.getReg());
- for (const unsigned *R = TRI->getAliasSet(MO.getReg()); *R; R++)
+ for (const uint16_t *R = TRI->getAliasSet(MO.getReg()); *R; R++)
Candidates.reset(*R);
}
// If we're not in a virtual reg's live range, this is a valid
@@ -347,9 +348,9 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// RegsAvailable, as RegsAvailable does not take aliases into account.
// That's what getRegsAvailable() is for.
BitVector Available = getRegsAvailable(RC);
-
- if ((Candidates & Available).any())
- Candidates &= Available;
+ Available &= Candidates;
+ if (Available.any())
+ Candidates = Available;
// Find the register whose use is furthest away.
MachineBasicBlock::iterator UseMI;
diff --git a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp b/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
index 8b02ec4..6020908 100644
--- a/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/RenderMachineFunction.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/RenderMachineFunction.cpp - MF->HTML -----s-----------===//
+//===-- llvm/CodeGen/RenderMachineFunction.cpp - MF->HTML -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -560,12 +560,13 @@ namespace llvm {
// For uses/defs recorded use/def indexes override current liveness and
// instruction operands (Only for the interval which records the indexes).
- if (i.isUse() || i.isDef()) {
+ // FIXME: This is all wrong, uses and defs share the same slots.
+ if (i.isEarlyClobber() || i.isRegister()) {
UseDefs::const_iterator udItr = useDefs.find(li);
if (udItr != useDefs.end()) {
const SlotSet &slotSet = udItr->second;
if (slotSet.count(i)) {
- if (i.isUse()) {
+ if (i.isEarlyClobber()) {
return Used;
}
// else
@@ -586,9 +587,9 @@ namespace llvm {
return AliveStack;
}
} else {
- if (i.isDef() && mi->definesRegister(li->reg, tri)) {
+ if (i.isRegister() && mi->definesRegister(li->reg, tri)) {
return Defined;
- } else if (i.isUse() && mi->readsRegister(li->reg)) {
+ } else if (i.isEarlyClobber() && mi->readsRegister(li->reg)) {
return Used;
} else {
if (vrm == 0 ||
@@ -804,7 +805,7 @@ namespace llvm {
os << indent + s(2) << "<tr height=6ex>\n";
// Render the code column.
- if (i.isLoad()) {
+ if (i.isBlock()) {
MachineBasicBlock *mbb = sis->getMBBFromIndex(i);
mi = sis->getInstructionFromIndex(i);
@@ -823,7 +824,7 @@ namespace llvm {
}
os << indent + s(4) << "</td>\n";
} else {
- i = i.getStoreIndex(); // <- Will be incremented to the next index.
+ i = i.getDeadSlot(); // <- Will be incremented to the next index.
continue;
}
}
@@ -952,10 +953,10 @@ namespace llvm {
rItr != rEnd; ++rItr) {
const MachineInstr *mi = &*rItr;
if (mi->readsRegister(li->reg)) {
- useDefs[li].insert(lis->getInstructionIndex(mi).getUseIndex());
+ useDefs[li].insert(lis->getInstructionIndex(mi).getRegSlot(true));
}
if (mi->definesRegister(li->reg)) {
- useDefs[li].insert(lis->getInstructionIndex(mi).getDefIndex());
+ useDefs[li].insert(lis->getInstructionIndex(mi).getRegSlot());
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index 1e9b5c8..8fd6426 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -31,6 +31,8 @@ static cl::opt<bool> StressSchedOpt(
cl::desc("Stress test instruction scheduling"));
#endif
+void SchedulingPriorityQueue::anchor() { }
+
ScheduleDAG::ScheduleDAG(MachineFunction &mf)
: TM(mf.getTarget()),
TII(TM.getInstrInfo()),
@@ -44,42 +46,17 @@ ScheduleDAG::ScheduleDAG(MachineFunction &mf)
ScheduleDAG::~ScheduleDAG() {}
-/// getInstrDesc helper to handle SDNodes.
-const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
- if (!Node || !Node->isMachineOpcode()) return NULL;
- return &TII->get(Node->getMachineOpcode());
-}
-
-/// dump - dump the schedule.
-void ScheduleDAG::dumpSchedule() const {
- for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
- if (SUnit *SU = Sequence[i])
- SU->dump(this);
- else
- dbgs() << "**** NOOP ****\n";
- }
-}
-
-
-/// Run - perform scheduling.
-///
-void ScheduleDAG::Run(MachineBasicBlock *bb,
- MachineBasicBlock::iterator insertPos) {
- BB = bb;
- InsertPos = insertPos;
-
+/// Clear the DAG state (e.g. between scheduling regions).
+void ScheduleDAG::clearDAG() {
SUnits.clear();
- Sequence.clear();
EntrySU = SUnit();
ExitSU = SUnit();
+}
- Schedule();
-
- DEBUG({
- dbgs() << "*** Final schedule ***\n";
- dumpSchedule();
- dbgs() << '\n';
- });
+/// getInstrDesc helper to handle SDNodes.
+const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
+ if (!Node || !Node->isMachineOpcode()) return NULL;
+ return &TII->get(Node->getMachineOpcode());
}
/// addPred - This adds the specified edge as a pred of the current node if
@@ -313,13 +290,12 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ch "; break;
}
- dbgs() << "#";
- dbgs() << I->getSUnit() << " - SU(" << I->getSUnit()->NodeNum << ")";
+ dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();
if (I->isAssignedRegDep())
- dbgs() << " Reg=" << G->TRI->getName(I->getReg());
+ dbgs() << " Reg=" << PrintReg(I->getReg(), G->TRI);
dbgs() << "\n";
}
}
@@ -334,8 +310,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ch "; break;
}
- dbgs() << "#";
- dbgs() << I->getSUnit() << " - SU(" << I->getSUnit()->NodeNum << ")";
+ dbgs() << "SU(" << I->getSUnit()->NodeNum << ")";
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();
@@ -346,13 +321,12 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
}
#ifndef NDEBUG
-/// VerifySchedule - Verify that all SUnits were scheduled and that
-/// their state is consistent.
+/// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
+/// their state is consistent. Return the number of scheduled nodes.
///
-void ScheduleDAG::VerifySchedule(bool isBottomUp) {
+unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
bool AnyNotSched = false;
unsigned DeadNodes = 0;
- unsigned Noops = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
if (!SUnits[i].isScheduled) {
if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
@@ -393,12 +367,8 @@ void ScheduleDAG::VerifySchedule(bool isBottomUp) {
}
}
}
- for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
- if (!Sequence[i])
- ++Noops;
assert(!AnyNotSched);
- assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
- "The number of nodes scheduled doesn't match the expected number!");
+ return SUnits.size() - DeadNodes;
}
#endif
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
deleted file mode 100644
index f8b1bc7..0000000
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-//===---- ScheduleDAGEmit.cpp - Emit routines for the ScheduleDAG class ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements the Emit routines for the ScheduleDAG class, which creates
-// MachineInstrs according to the computed schedule.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pre-RA-sched"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/MathExtras.h"
-using namespace llvm;
-
-void ScheduleDAG::EmitNoop() {
- TII->insertNoop(*BB, InsertPos);
-}
-
-void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
- DenseMap<SUnit*, unsigned> &VRBaseMap) {
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl()) continue; // ignore chain preds
- if (I->getSUnit()->CopyDstRC) {
- // Copy to physical register.
- DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
- assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
- // Find the destination physical register.
- unsigned Reg = 0;
- for (SUnit::const_succ_iterator II = SU->Succs.begin(),
- EE = SU->Succs.end(); II != EE; ++II) {
- if (II->isCtrl()) continue; // ignore chain preds
- if (II->getReg()) {
- Reg = II->getReg();
- break;
- }
- }
- BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
- .addReg(VRI->second);
- } else {
- // Copy from physical register.
- assert(I->getReg() && "Unknown physical register!");
- unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
- bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
- (void)isNew; // Silence compiler warning.
- assert(isNew && "Node emitted out of order - early");
- BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
- .addReg(I->getReg());
- }
- break;
- }
-}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index 34b8ab0..6be1ab7 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -13,14 +13,15 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "sched-instrs"
-#include "ScheduleDAGInstrs.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -33,25 +34,17 @@ using namespace llvm;
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
- const MachineDominatorTree &mdt)
+ const MachineDominatorTree &mdt,
+ bool IsPostRAFlag,
+ LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
- InstrItins(mf.getTarget().getInstrItineraryData()),
- Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()),
- LoopRegs(MLI, MDT), FirstDbgValue(0) {
+ InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis),
+ IsPostRA(IsPostRAFlag), UnitLatencies(false), LoopRegs(MLI, MDT),
+ FirstDbgValue(0) {
+ assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
-}
-
-/// Run - perform scheduling.
-///
-void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
- MachineBasicBlock::iterator begin,
- MachineBasicBlock::iterator end,
- unsigned endcount) {
- BB = bb;
- Begin = begin;
- InsertPosIndex = endcount;
-
- ScheduleDAG::Run(bb, end);
+ assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
+ "Virtual registers must be removed prior to PostRA scheduling");
}
/// getUnderlyingObjectFromInt - This is the function that does the work of
@@ -133,19 +126,58 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
return 0;
}
-void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
+void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) {
LoopRegs.Deps.clear();
if (MachineLoop *ML = MLI.getLoopFor(BB))
- if (BB == ML->getLoopLatch()) {
- MachineBasicBlock *Header = ML->getHeader();
- for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
- E = Header->livein_end(); I != E; ++I)
- LoopLiveInRegs.insert(*I);
+ if (BB == ML->getLoopLatch())
LoopRegs.VisitLoop(ML);
- }
}
-/// AddSchedBarrierDeps - Add dependencies from instructions in the current
+void ScheduleDAGInstrs::finishBlock() {
+ // Nothing to do.
+}
+
+/// Initialize the map with the number of registers.
+void Reg2SUnitsMap::setRegLimit(unsigned Limit) {
+ PhysRegSet.setUniverse(Limit);
+ SUnits.resize(Limit);
+}
+
+/// Clear the map without deallocating storage.
+void Reg2SUnitsMap::clear() {
+ for (const_iterator I = reg_begin(), E = reg_end(); I != E; ++I) {
+ SUnits[*I].clear();
+ }
+ PhysRegSet.clear();
+}
+
+/// Initialize the DAG and common scheduler state for the current scheduling
+/// region. This does not actually create the DAG, only clears it. The
+/// scheduling driver may call BuildSchedGraph multiple times per scheduling
+/// region.
+void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned endcount) {
+ BB = bb;
+ RegionBegin = begin;
+ RegionEnd = end;
+ EndIndex = endcount;
+ MISUnitMap.clear();
+
+ // Check to see if the scheduler cares about latencies.
+ UnitLatencies = forceUnitLatencies();
+
+ ScheduleDAG::clearDAG();
+}
+
+/// Close the current scheduling region. Don't clear any state in case the
+/// driver wants to refer to the previous scheduling region.
+void ScheduleDAGInstrs::exitRegion() {
+ // Nothing to do.
+}
+
+/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier by adding
/// the exit SU to the register defs and use list. This is because we want to
/// make sure instructions which define registers that are either used by
@@ -153,11 +185,11 @@ void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
/// especially important when the definition latency of the return value(s)
/// are too high to be hidden by the branch or when the liveout registers
/// used by instructions in the fallthrough block.
-void ScheduleDAGInstrs::AddSchedBarrierDeps() {
- MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
+void ScheduleDAGInstrs::addSchedBarrierDeps() {
+ MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0;
ExitSU.setInstr(ExitMI);
bool AllDepKnown = ExitMI &&
- (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
+ (ExitMI->isCall() || ExitMI->isBarrier());
if (ExitMI && AllDepKnown) {
// If it's a call or a barrier, add dependencies on the defs and uses of
// instruction.
@@ -167,29 +199,313 @@ void ScheduleDAGInstrs::AddSchedBarrierDeps() {
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
- Uses[Reg].push_back(&ExitSU);
+ if (TRI->isPhysicalRegister(Reg))
+ Uses[Reg].push_back(&ExitSU);
+ else {
+ assert(!IsPostRA && "Virtual register encountered after regalloc.");
+ addVRegUseDeps(&ExitSU, i);
+ }
}
} else {
// For others, e.g. fallthrough, conditional branch, assume the exit
// uses all the registers that are livein to the successor blocks.
- SmallSet<unsigned, 8> Seen;
+ assert(Uses.empty() && "Uses in set before adding deps?");
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
- if (Seen.insert(Reg))
+ if (!Uses.contains(Reg))
Uses[Reg].push_back(&ExitSU);
}
}
}
-void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
- // We'll be allocating one SUnit for each instruction, plus one for
- // the region exit node.
+/// MO is an operand of SU's instruction that defines a physical register. Add
+/// data dependencies from SU to any uses of the physical register.
+void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
+ const MachineOperand &MO) {
+ assert(MO.isDef() && "expect physreg def");
+
+ // Ask the target if address-backscheduling is desirable, and if so how much.
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
+ unsigned DataLatency = SU->Latency;
+
+ for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ if (!Uses.contains(*Alias))
+ continue;
+ std::vector<SUnit*> &UseList = Uses[*Alias];
+ for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
+ SUnit *UseSU = UseList[i];
+ if (UseSU == SU)
+ continue;
+ unsigned LDataLatency = DataLatency;
+ // Optionally add in a special extra latency for nodes that
+ // feed addresses.
+ // TODO: Perhaps we should get rid of
+ // SpecialAddressLatency and just move this into
+ // adjustSchedDependency for the targets that care about it.
+ if (SpecialAddressLatency != 0 && !UnitLatencies &&
+ UseSU != &ExitSU) {
+ MachineInstr *UseMI = UseSU->getInstr();
+ const MCInstrDesc &UseMCID = UseMI->getDesc();
+ int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
+ assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
+ if (RegUseIndex >= 0 &&
+ (UseMI->mayLoad() || UseMI->mayStore()) &&
+ (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
+ UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
+ LDataLatency += SpecialAddressLatency;
+ }
+ // Adjust the dependence latency using operand def/use
+ // information (if any), and then allow the target to
+ // perform its own adjustments.
+ const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
+ if (!UnitLatencies) {
+ computeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
+ }
+ UseSU->addPred(dep);
+ }
+ }
+}
+
+/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
+/// this SUnit to following instructions in the same scheduling region that
+/// depend the physical register referenced at OperIdx.
+void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
+ const MachineInstr *MI = SU->getInstr();
+ const MachineOperand &MO = MI->getOperand(OperIdx);
+
+ // Optionally add output and anti dependencies. For anti
+ // dependencies we use a latency of 0 because for a multi-issue
+ // target we want to allow the defining instruction to issue
+ // in the same cycle as the using instruction.
+ // TODO: Using a latency of 1 here for output dependencies assumes
+ // there's no cost for reusing registers.
+ SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
+ for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
+ if (!Defs.contains(*Alias))
+ continue;
+ std::vector<SUnit *> &DefList = Defs[*Alias];
+ for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
+ SUnit *DefSU = DefList[i];
+ if (DefSU == &ExitSU)
+ continue;
+ if (DefSU != SU &&
+ (Kind != SDep::Output || !MO.isDead() ||
+ !DefSU->getInstr()->registerDefIsDead(*Alias))) {
+ if (Kind == SDep::Anti)
+ DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
+ else {
+ unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
+ DefSU->getInstr());
+ DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
+ }
+ }
+ }
+ }
+
+ if (!MO.isDef()) {
+ // Either insert a new Reg2SUnits entry with an empty SUnits list, or
+ // retrieve the existing SUnits list for this register's uses.
+ // Push this SUnit on the use list.
+ Uses[MO.getReg()].push_back(SU);
+ }
+ else {
+ addPhysRegDataDeps(SU, MO);
+
+ // Either insert a new Reg2SUnits entry with an empty SUnits list, or
+ // retrieve the existing SUnits list for this register's defs.
+ std::vector<SUnit *> &DefList = Defs[MO.getReg()];
+
+ // If a def is going to wrap back around to the top of the loop,
+ // backschedule it.
+ if (!UnitLatencies && DefList.empty()) {
+ LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg());
+ if (I != LoopRegs.Deps.end()) {
+ const MachineOperand *UseMO = I->second.first;
+ unsigned Count = I->second.second;
+ const MachineInstr *UseMI = UseMO->getParent();
+ unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
+ const MCInstrDesc &UseMCID = UseMI->getDesc();
+ const TargetSubtargetInfo &ST =
+ TM.getSubtarget<TargetSubtargetInfo>();
+ unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
+ // TODO: If we knew the total depth of the region here, we could
+ // handle the case where the whole loop is inside the region but
+ // is large enough that the isScheduleHigh trick isn't needed.
+ if (UseMOIdx < UseMCID.getNumOperands()) {
+ // Currently, we only support scheduling regions consisting of
+ // single basic blocks. Check to see if the instruction is in
+ // the same region by checking to see if it has the same parent.
+ if (UseMI->getParent() != MI->getParent()) {
+ unsigned Latency = SU->Latency;
+ if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
+ Latency += SpecialAddressLatency;
+ // This is a wild guess as to the portion of the latency which
+ // will be overlapped by work done outside the current
+ // scheduling region.
+ Latency -= std::min(Latency, Count);
+ // Add the artificial edge.
+ ExitSU.addPred(SDep(SU, SDep::Order, Latency,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
+ } else if (SpecialAddressLatency > 0 &&
+ UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
+ // The entire loop body is within the current scheduling region
+ // and the latency of this operation is assumed to be greater
+ // than the latency of the loop.
+ // TODO: Recursively mark data-edge predecessors as
+ // isScheduleHigh too.
+ SU->isScheduleHigh = true;
+ }
+ }
+ LoopRegs.Deps.erase(I);
+ }
+ }
+
+ // clear this register's use list
+ if (Uses.contains(MO.getReg()))
+ Uses[MO.getReg()].clear();
+
+ if (!MO.isDead())
+ DefList.clear();
+
+ // Calls will not be reordered because of chain dependencies (see
+ // below). Since call operands are dead, calls may continue to be added
+ // to the DefList making dependence checking quadratic in the size of
+ // the block. Instead, we leave only one call at the back of the
+ // DefList.
+ if (SU->isCall) {
+ while (!DefList.empty() && DefList.back()->isCall)
+ DefList.pop_back();
+ }
+ // Defs are pushed in the order they are visited and never reordered.
+ DefList.push_back(SU);
+ }
+}
+
+/// addVRegDefDeps - Add register output and data dependencies from this SUnit
+/// to instructions that occur later in the same scheduling region if they read
+/// from or write to the virtual register defined at OperIdx.
+///
+/// TODO: Hoist loop induction variable increments. This has to be
+/// reevaluated. Generally, IV scheduling should be done before coalescing.
+void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
+ const MachineInstr *MI = SU->getInstr();
+ unsigned Reg = MI->getOperand(OperIdx).getReg();
+
+ // SSA defs do not have output/anti dependencies.
+ // The current operand is a def, so we have at least one.
+ if (llvm::next(MRI.def_begin(Reg)) == MRI.def_end())
+ return;
+
+ // Add output dependence to the next nearest def of this vreg.
+ //
+ // Unless this definition is dead, the output dependence should be
+ // transitively redundant with antidependencies from this definition's
+ // uses. We're conservative for now until we have a way to guarantee the uses
+ // are not eliminated sometime during scheduling. The output dependence edge
+ // is also useful if output latency exceeds def-use latency.
+ VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
+ if (DefI == VRegDefs.end())
+ VRegDefs.insert(VReg2SUnit(Reg, SU));
+ else {
+ SUnit *DefSU = DefI->SU;
+ if (DefSU != SU && DefSU != &ExitSU) {
+ unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx,
+ DefSU->getInstr());
+ DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg));
+ }
+ DefI->SU = SU;
+ }
+}
+
+/// addVRegUseDeps - Add a register data dependency if the instruction that
+/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
+/// register antidependency from this SUnit to instructions that occur later in
+/// the same scheduling region if they write the virtual register.
+///
+/// TODO: Handle ExitSU "uses" properly.
+void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
+ MachineInstr *MI = SU->getInstr();
+ unsigned Reg = MI->getOperand(OperIdx).getReg();
+
+ // Lookup this operand's reaching definition.
+ assert(LIS && "vreg dependencies requires LiveIntervals");
+ SlotIndex UseIdx = LIS->getInstructionIndex(MI).getRegSlot();
+ LiveInterval *LI = &LIS->getInterval(Reg);
+ VNInfo *VNI = LI->getVNInfoBefore(UseIdx);
+ // VNI will be valid because MachineOperand::readsReg() is checked by caller.
+ MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
+ // Phis and other noninstructions (after coalescing) have a NULL Def.
+ if (Def) {
+ SUnit *DefSU = getSUnit(Def);
+ if (DefSU) {
+ // The reaching Def lives within this scheduling region.
+ // Create a data dependence.
+ //
+ // TODO: Handle "special" address latencies cleanly.
+ const SDep &dep = SDep(DefSU, SDep::Data, DefSU->Latency, Reg);
+ if (!UnitLatencies) {
+ // Adjust the dependence latency using operand def/use information, then
+ // allow the target to perform its own adjustments.
+ computeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
+ }
+ SU->addPred(dep);
+ }
+ }
+
+ // Add antidependence to the following def of the vreg it uses.
+ VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
+ if (DefI != VRegDefs.end() && DefI->SU != SU)
+ DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg));
+}
+
+/// Create an SUnit for each real instruction, numbered in top-down toplological
+/// order. The instruction order A < B, implies that no edge exists from B to A.
+///
+/// Map each real instruction to its SUnit.
+///
+/// After initSUnits, the SUnits vector cannot be resized and the scheduler may
+/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
+/// instead of pointers.
+///
+/// MachineScheduler relies on initSUnits numbering the nodes by their order in
+/// the original instruction list.
+void ScheduleDAGInstrs::initSUnits() {
+ // We'll be allocating one SUnit for each real instruction in the region,
+ // which is contained within a basic block.
SUnits.reserve(BB->size());
+ for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
+ MachineInstr *MI = I;
+ if (MI->isDebugValue())
+ continue;
+
+ SUnit *SU = newSUnit(MI);
+ MISUnitMap[MI] = SU;
+
+ SU->isCall = MI->isCall();
+ SU->isCommutable = MI->isCommutable();
+
+ // Assign the Latency field of SU using target-provided information.
+ if (UnitLatencies)
+ SU->Latency = 1;
+ else
+ computeLatency(SU);
+ }
+}
+
+void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
+ // Create an SUnit for each real instruction.
+ initSUnits();
+
// We build scheduling units by walking a block's instruction list from bottom
// to top.
@@ -203,29 +519,29 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
- // Check to see if the scheduler cares about latencies.
- bool UnitLatencies = ForceUnitLatencies();
-
- // Ask the target if address-backscheduling is desirable, and if so how much.
- const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
- unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
-
// Remove any stale debug info; sometimes BuildSchedGraph is called again
// without emitting the info from the previous call.
DbgValues.clear();
FirstDbgValue = NULL;
+ assert(Defs.empty() && Uses.empty() &&
+ "Only BuildGraph should update Defs/Uses");
+ Defs.setRegLimit(TRI->getNumRegs());
+ Uses.setRegLimit(TRI->getNumRegs());
+
+ assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
+ // FIXME: Allow SparseSet to reserve space for the creation of virtual
+ // registers during scheduling. Don't artificially inflate the Universe
+ // because we want to assert that vregs are not created during DAG building.
+ VRegDefs.setUniverse(MRI.getNumVirtRegs());
+
// Model data dependencies between instructions being scheduled and the
// ExitSU.
- AddSchedBarrierDeps();
-
- for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs");
- }
+ addSchedBarrierDeps();
// Walk the list of instructions, from bottom moving up.
MachineInstr *PrevMI = NULL;
- for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
+ for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
MII != MIE; --MII) {
MachineInstr *MI = prior(MII);
if (MI && PrevMI) {
@@ -238,19 +554,11 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
continue;
}
- const MCInstrDesc &MCID = MI->getDesc();
- assert(!MCID.isTerminator() && !MI->isLabel() &&
+ assert(!MI->isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
- // Create the SUnit for this MI.
- SUnit *SU = NewSUnit(MI);
- SU->isCall = MCID.isCall();
- SU->isCommutable = MCID.isCommutable();
- // Assign the Latency field of SU using target-provided information.
- if (UnitLatencies)
- SU->Latency = 1;
- else
- ComputeLatency(SU);
+ SUnit *SU = MISUnitMap[MI];
+ assert(SU && "No SUnit mapped to this MI");
// Add register-based dependencies (data, anti, and output).
for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
@@ -259,152 +567,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
-
- std::vector<SUnit *> &UseList = Uses[Reg];
- // Defs are push in the order they are visited and never reordered.
- std::vector<SUnit *> &DefList = Defs[Reg];
- // Optionally add output and anti dependencies. For anti
- // dependencies we use a latency of 0 because for a multi-issue
- // target we want to allow the defining instruction to issue
- // in the same cycle as the using instruction.
- // TODO: Using a latency of 1 here for output dependencies assumes
- // there's no cost for reusing registers.
- SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
- unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
- for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
- SUnit *DefSU = DefList[i];
- if (DefSU == &ExitSU)
- continue;
- if (DefSU != SU &&
- (Kind != SDep::Output || !MO.isDead() ||
- !DefSU->getInstr()->registerDefIsDead(Reg)))
- DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg));
- }
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- std::vector<SUnit *> &MemDefList = Defs[*Alias];
- for (unsigned i = 0, e = MemDefList.size(); i != e; ++i) {
- SUnit *DefSU = MemDefList[i];
- if (DefSU == &ExitSU)
- continue;
- if (DefSU != SU &&
- (Kind != SDep::Output || !MO.isDead() ||
- !DefSU->getInstr()->registerDefIsDead(*Alias)))
- DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
- }
- }
-
- if (MO.isDef()) {
- // Add any data dependencies.
- unsigned DataLatency = SU->Latency;
- for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
- SUnit *UseSU = UseList[i];
- if (UseSU == SU)
- continue;
- unsigned LDataLatency = DataLatency;
- // Optionally add in a special extra latency for nodes that
- // feed addresses.
- // TODO: Do this for register aliases too.
- // TODO: Perhaps we should get rid of
- // SpecialAddressLatency and just move this into
- // adjustSchedDependency for the targets that care about it.
- if (SpecialAddressLatency != 0 && !UnitLatencies &&
- UseSU != &ExitSU) {
- MachineInstr *UseMI = UseSU->getInstr();
- const MCInstrDesc &UseMCID = UseMI->getDesc();
- int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
- assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
- if (RegUseIndex >= 0 &&
- (UseMCID.mayLoad() || UseMCID.mayStore()) &&
- (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
- UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
- LDataLatency += SpecialAddressLatency;
- }
- // Adjust the dependence latency using operand def/use
- // information (if any), and then allow the target to
- // perform its own adjustments.
- const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
- ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
- }
- UseSU->addPred(dep);
- }
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- std::vector<SUnit *> &UseList = Uses[*Alias];
- for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
- SUnit *UseSU = UseList[i];
- if (UseSU == SU)
- continue;
- const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
- ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
- }
- UseSU->addPred(dep);
- }
- }
-
- // If a def is going to wrap back around to the top of the loop,
- // backschedule it.
- if (!UnitLatencies && DefList.empty()) {
- LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
- if (I != LoopRegs.Deps.end()) {
- const MachineOperand *UseMO = I->second.first;
- unsigned Count = I->second.second;
- const MachineInstr *UseMI = UseMO->getParent();
- unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
- const MCInstrDesc &UseMCID = UseMI->getDesc();
- // TODO: If we knew the total depth of the region here, we could
- // handle the case where the whole loop is inside the region but
- // is large enough that the isScheduleHigh trick isn't needed.
- if (UseMOIdx < UseMCID.getNumOperands()) {
- // Currently, we only support scheduling regions consisting of
- // single basic blocks. Check to see if the instruction is in
- // the same region by checking to see if it has the same parent.
- if (UseMI->getParent() != MI->getParent()) {
- unsigned Latency = SU->Latency;
- if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
- Latency += SpecialAddressLatency;
- // This is a wild guess as to the portion of the latency which
- // will be overlapped by work done outside the current
- // scheduling region.
- Latency -= std::min(Latency, Count);
- // Add the artificial edge.
- ExitSU.addPred(SDep(SU, SDep::Order, Latency,
- /*Reg=*/0, /*isNormalMemory=*/false,
- /*isMustAlias=*/false,
- /*isArtificial=*/true));
- } else if (SpecialAddressLatency > 0 &&
- UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
- // The entire loop body is within the current scheduling region
- // and the latency of this operation is assumed to be greater
- // than the latency of the loop.
- // TODO: Recursively mark data-edge predecessors as
- // isScheduleHigh too.
- SU->isScheduleHigh = true;
- }
- }
- LoopRegs.Deps.erase(I);
- }
- }
-
- UseList.clear();
- if (!MO.isDead())
- DefList.clear();
-
- // Calls will not be reordered because of chain dependencies (see
- // below). Since call operands are dead, calls may continue to be added
- // to the DefList making dependence checking quadratic in the size of
- // the block. Instead, we leave only one call at the back of the
- // DefList.
- if (SU->isCall) {
- while (!DefList.empty() && DefList.back()->isCall)
- DefList.pop_back();
- }
- DefList.push_back(SU);
- } else {
- UseList.push_back(SU);
+ if (TRI->isPhysicalRegister(Reg))
+ addPhysRegDeps(SU, j);
+ else {
+ assert(!IsPostRA && "Virtual register encountered!");
+ if (MO.isDef())
+ addVRegDefDeps(SU, j);
+ else if (MO.readsReg()) // ignore undef operands
+ addVRegUseDeps(SU, j);
}
}
@@ -419,9 +589,9 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
- if (MCID.isCall() || MI->hasUnmodeledSideEffects() ||
+ if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
(MI->hasVolatileMemoryRef() &&
- (!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+ (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
for (std::map<const Value *, SUnit *>::iterator I =
@@ -460,7 +630,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
PendingLoads.clear();
AliasMemDefs.clear();
AliasMemUses.clear();
- } else if (MCID.mayStore()) {
+ } else if (MI->mayStore()) {
bool MayAlias = true;
TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
@@ -516,7 +686,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
/*isArtificial=*/true));
- } else if (MCID.mayLoad()) {
+ } else if (MI->mayLoad()) {
bool MayAlias = true;
TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {
@@ -558,32 +728,27 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
if (PrevMI)
FirstDbgValue = PrevMI;
- for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- Defs[i].clear();
- Uses[i].clear();
- }
+ Defs.clear();
+ Uses.clear();
+ VRegDefs.clear();
PendingLoads.clear();
}
-void ScheduleDAGInstrs::FinishBlock() {
- // Nothing to do.
-}
-
-void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
+void ScheduleDAGInstrs::computeLatency(SUnit *SU) {
// Compute the latency for the node.
if (!InstrItins || InstrItins->isEmpty()) {
SU->Latency = 1;
// Simplistic target-independent heuristic: assume that loads take
// extra time.
- if (SU->getInstr()->getDesc().mayLoad())
+ if (SU->getInstr()->mayLoad())
SU->Latency += 2;
} else {
SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
}
}
-void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
+void ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const {
if (!InstrItins || InstrItins->isEmpty())
return;
@@ -608,7 +773,9 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
// %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
// What we want is to compute latency between def of %D6/%D7 and use of
// %Q3 instead.
- DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ if (DefMI->getOperand(Op2).isReg())
+ DefIdx = Op2;
}
MachineInstr *UseMI = Use->getInstr();
// For all uses of the register, calculate the maxmimum latency
@@ -656,43 +823,8 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
return oss.str();
}
-// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
- // For MachineInstr-based scheduling, we're rescheduling the instructions in
- // the block, so start by removing them from the block.
- while (Begin != InsertPos) {
- MachineBasicBlock::iterator I = Begin;
- ++Begin;
- BB->remove(I);
- }
-
- // If first instruction was a DBG_VALUE then put it back.
- if (FirstDbgValue)
- BB->insert(InsertPos, FirstDbgValue);
-
- // Then re-insert them according to the given schedule.
- for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
- if (SUnit *SU = Sequence[i])
- BB->insert(InsertPos, SU->getInstr());
- else
- // Null SUnit* is a noop.
- EmitNoop();
- }
-
- // Update the Begin iterator, as the first instruction in the block
- // may have been scheduled later.
- if (!Sequence.empty())
- Begin = Sequence[0]->getInstr();
-
- // Reinsert any remaining debug_values.
- for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
- DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
- std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
- MachineInstr *DbgValue = P.first;
- MachineInstr *OrigPrivMI = P.second;
- BB->insertAfter(OrigPrivMI, DbgValue);
- }
- DbgValues.clear();
- FirstDbgValue = NULL;
- return BB;
+/// Return the basic block label. It is not necessarilly unique because a block
+/// contains multiple scheduling regions. But it is fine for visualization.
+std::string ScheduleDAGInstrs::getDAGName() const {
+ return "dag." + BB->getFullName();
}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
deleted file mode 100644
index 666bdf5..0000000
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
+++ /dev/null
@@ -1,212 +0,0 @@
-//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ScheduleDAGInstrs class, which implements
-// scheduling for a MachineInstr-based dependency graph.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SCHEDULEDAGINSTRS_H
-#define SCHEDULEDAGINSTRS_H
-
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/SmallSet.h"
-#include <map>
-
-namespace llvm {
- class MachineLoopInfo;
- class MachineDominatorTree;
-
- /// LoopDependencies - This class analyzes loop-oriented register
- /// dependencies, which are used to guide scheduling decisions.
- /// For example, loop induction variable increments should be
- /// scheduled as soon as possible after the variable's last use.
- ///
- class LLVM_LIBRARY_VISIBILITY LoopDependencies {
- const MachineLoopInfo &MLI;
- const MachineDominatorTree &MDT;
-
- public:
- typedef std::map<unsigned, std::pair<const MachineOperand *, unsigned> >
- LoopDeps;
- LoopDeps Deps;
-
- LoopDependencies(const MachineLoopInfo &mli,
- const MachineDominatorTree &mdt) :
- MLI(mli), MDT(mdt) {}
-
- /// VisitLoop - Clear out any previous state and analyze the given loop.
- ///
- void VisitLoop(const MachineLoop *Loop) {
- assert(Deps.empty() && "stale loop dependencies");
-
- MachineBasicBlock *Header = Loop->getHeader();
- SmallSet<unsigned, 8> LoopLiveIns;
- for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(),
- LE = Header->livein_end(); LI != LE; ++LI)
- LoopLiveIns.insert(*LI);
-
- const MachineDomTreeNode *Node = MDT.getNode(Header);
- const MachineBasicBlock *MBB = Node->getBlock();
- assert(Loop->contains(MBB) &&
- "Loop does not contain header!");
- VisitRegion(Node, MBB, Loop, LoopLiveIns);
- }
-
- private:
- void VisitRegion(const MachineDomTreeNode *Node,
- const MachineBasicBlock *MBB,
- const MachineLoop *Loop,
- const SmallSet<unsigned, 8> &LoopLiveIns) {
- unsigned Count = 0;
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- const MachineInstr *MI = I;
- if (MI->isDebugValue())
- continue;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse())
- continue;
- unsigned MOReg = MO.getReg();
- if (LoopLiveIns.count(MOReg))
- Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
- }
- ++Count; // Not every iteration due to dbg_value above.
- }
-
- const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
- for (std::vector<MachineDomTreeNode*>::const_iterator I =
- Children.begin(), E = Children.end(); I != E; ++I) {
- const MachineDomTreeNode *ChildNode = *I;
- MachineBasicBlock *ChildBlock = ChildNode->getBlock();
- if (Loop->contains(ChildBlock))
- VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns);
- }
- }
- };
-
- /// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
- /// MachineInstrs.
- class LLVM_LIBRARY_VISIBILITY ScheduleDAGInstrs : public ScheduleDAG {
- const MachineLoopInfo &MLI;
- const MachineDominatorTree &MDT;
- const MachineFrameInfo *MFI;
- const InstrItineraryData *InstrItins;
-
- /// Defs, Uses - Remember where defs and uses of each physical register
- /// are as we iterate upward through the instructions. This is allocated
- /// here instead of inside BuildSchedGraph to avoid the need for it to be
- /// initialized and destructed for each block.
- std::vector<std::vector<SUnit *> > Defs;
- std::vector<std::vector<SUnit *> > Uses;
-
- /// PendingLoads - Remember where unknown loads are after the most recent
- /// unknown store, as we iterate. As with Defs and Uses, this is here
- /// to minimize construction/destruction.
- std::vector<SUnit *> PendingLoads;
-
- /// LoopRegs - Track which registers are used for loop-carried dependencies.
- ///
- LoopDependencies LoopRegs;
-
- /// LoopLiveInRegs - Track which regs are live into a loop, to help guide
- /// back-edge-aware scheduling.
- ///
- SmallSet<unsigned, 8> LoopLiveInRegs;
-
- protected:
-
- /// DbgValues - Remember instruction that preceeds DBG_VALUE.
- typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
- DbgValueVector;
- DbgValueVector DbgValues;
- MachineInstr *FirstDbgValue;
-
- public:
- MachineBasicBlock::iterator Begin; // The beginning of the range to
- // be scheduled. The range extends
- // to InsertPos.
- unsigned InsertPosIndex; // The index in BB of InsertPos.
-
- explicit ScheduleDAGInstrs(MachineFunction &mf,
- const MachineLoopInfo &mli,
- const MachineDominatorTree &mdt);
-
- virtual ~ScheduleDAGInstrs() {}
-
- /// NewSUnit - Creates a new SUnit and return a ptr to it.
- ///
- SUnit *NewSUnit(MachineInstr *MI) {
-#ifndef NDEBUG
- const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
-#endif
- SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
- assert((Addr == 0 || Addr == &SUnits[0]) &&
- "SUnits std::vector reallocated on the fly!");
- SUnits.back().OrigNode = &SUnits.back();
- return &SUnits.back();
- }
-
- /// Run - perform scheduling.
- ///
- void Run(MachineBasicBlock *bb,
- MachineBasicBlock::iterator begin,
- MachineBasicBlock::iterator end,
- unsigned endindex);
-
- /// BuildSchedGraph - Build SUnits from the MachineBasicBlock that we are
- /// input.
- virtual void BuildSchedGraph(AliasAnalysis *AA);
-
- /// AddSchedBarrierDeps - Add dependencies from instructions in the current
- /// list of instructions being scheduled to scheduling barrier. We want to
- /// make sure instructions which define registers that are either used by
- /// the terminator or are live-out are properly scheduled. This is
- /// especially important when the definition latency of the return value(s)
- /// are too high to be hidden by the branch or when the liveout registers
- /// used by instructions in the fallthrough block.
- void AddSchedBarrierDeps();
-
- /// ComputeLatency - Compute node latency.
- ///
- virtual void ComputeLatency(SUnit *SU);
-
- /// ComputeOperandLatency - Override dependence edge latency using
- /// operand use/def information
- ///
- virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
- SDep& dep) const;
-
- virtual MachineBasicBlock *EmitSchedule();
-
- /// StartBlock - Prepare to perform scheduling in the given block.
- ///
- virtual void StartBlock(MachineBasicBlock *BB);
-
- /// Schedule - Order nodes according to selected style, filling
- /// in the Sequence member.
- ///
- virtual void Schedule() = 0;
-
- /// FinishBlock - Clean up after scheduling in the given block.
- ///
- virtual void FinishBlock();
-
- virtual void dumpNode(const SUnit *SU) const;
-
- virtual std::string getGraphNodeLabel(const SUnit *SU) const;
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
index 4b55a22..38feee9 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
@@ -25,7 +25,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Config/config.h"
#include <fstream>
using namespace llvm;
@@ -42,12 +41,12 @@ namespace llvm {
static bool renderGraphFromBottomUp() {
return true;
}
-
+
static bool hasNodeAddressLabel(const SUnit *Node,
const ScheduleDAG *Graph) {
return true;
}
-
+
/// If you want to override the dot attributes printed for a particular
/// edge, override this method.
static std::string getEdgeAttributes(const SUnit *Node,
@@ -59,7 +58,7 @@ namespace llvm {
return "color=blue,style=dashed";
return "";
}
-
+
std::string getNodeLabel(const SUnit *Node, const ScheduleDAG *Graph);
static std::string getNodeAttributes(const SUnit *N,
@@ -82,18 +81,17 @@ std::string DOTGraphTraits<ScheduleDAG*>::getNodeLabel(const SUnit *SU,
/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
/// rendered using 'dot'.
///
-void ScheduleDAG::viewGraph() {
-// This code is only for debugging!
+void ScheduleDAG::viewGraph(const Twine &Name, const Twine &Title) {
+ // This code is only for debugging!
#ifndef NDEBUG
- if (BB->getBasicBlock())
- ViewGraph(this, "dag." + MF.getFunction()->getNameStr(), false,
- "Scheduling-Units Graph for " + MF.getFunction()->getNameStr() +
- ":" + BB->getBasicBlock()->getNameStr());
- else
- ViewGraph(this, "dag." + MF.getFunction()->getNameStr(), false,
- "Scheduling-Units Graph for " + MF.getFunction()->getNameStr());
+ ViewGraph(this, Name, false, Title);
#else
errs() << "ScheduleDAG::viewGraph is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
+
+/// Out-of-line implementation with no arguments is handy for gdb.
+void ScheduleDAG::viewGraph() {
+ viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
+}
diff --git a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
index b80c01e..3d22035 100644
--- a/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/ScoreboardHazardRecognizer.cpp
@@ -140,8 +140,6 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
unsigned freeUnits = IS->getUnits();
switch (IS->getReservationKind()) {
- default:
- assert(0 && "Invalid FU reservation");
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[StageCycle];
@@ -194,8 +192,6 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
unsigned freeUnits = IS->getUnits();
switch (IS->getReservationKind()) {
- default:
- assert(0 && "Invalid FU reservation");
case InstrStage::Required:
// Required FUs conflict with both reserved and required ones
freeUnits &= ~ReservedScoreboard[cycle + i];
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 7b87868..d1b998f 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -22,7 +22,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
@@ -64,7 +63,24 @@ namespace {
bool LegalTypes;
// Worklist of all of the nodes that need to be simplified.
- std::vector<SDNode*> WorkList;
+ //
+ // This has the semantics that when adding to the worklist,
+ // the item added must be next to be processed. It should
+ // also only appear once. The naive approach to this takes
+ // linear time.
+ //
+ // To reduce the insert/remove time to logarithmic, we use
+ // a set and a vector to maintain our worklist.
+ //
+ // The set contains the items on the worklist, but does not
+ // maintain the order they should be visited.
+ //
+ // The vector maintains the order nodes should be visited, but may
+ // contain duplicate or removed nodes. When choosing a node to
+ // visit, we pop off the order stack until we find an item that is
+ // also in the contents set. All operations are O(log N).
+ SmallPtrSet<SDNode*, 64> WorkListContents;
+ SmallVector<SDNode*, 64> WorkListOrder;
// AA - Used for DAG load/store alias analysis.
AliasAnalysis &AA;
@@ -84,18 +100,17 @@ namespace {
SDValue visit(SDNode *N);
public:
- /// AddToWorkList - Add to the work list making sure it's instance is at the
- /// the back (next to be processed.)
+ /// AddToWorkList - Add to the work list making sure its instance is at the
+ /// back (next to be processed.)
void AddToWorkList(SDNode *N) {
- removeFromWorkList(N);
- WorkList.push_back(N);
+ WorkListContents.insert(N);
+ WorkListOrder.push_back(N);
}
/// removeFromWorkList - remove all instances of N from the worklist.
///
void removeFromWorkList(SDNode *N) {
- WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), N),
- WorkList.end());
+ WorkListContents.erase(N);
}
SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
@@ -159,7 +174,9 @@ namespace {
SDValue visitADD(SDNode *N);
SDValue visitSUB(SDNode *N);
SDValue visitADDC(SDNode *N);
+ SDValue visitSUBC(SDNode *N);
SDValue visitADDE(SDNode *N);
+ SDValue visitSUBE(SDNode *N);
SDValue visitMUL(SDNode *N);
SDValue visitSDIV(SDNode *N);
SDValue visitUDIV(SDNode *N);
@@ -181,7 +198,9 @@ namespace {
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitCTLZ(SDNode *N);
+ SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
+ SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTPOP(SDNode *N);
SDValue visitSELECT(SDNode *N);
SDValue visitSELECT_CC(SDNode *N);
@@ -279,7 +298,7 @@ namespace {
public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
- : DAG(D), TLI(D.getTargetLoweringInfo()), Level(Unrestricted),
+ : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {}
/// Run - runs the dag combiner on all nodes in the work list
@@ -362,6 +381,8 @@ CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
/// specified expression for the same cost as the expression itself, or 2 if we
/// can compute the negated form more cheaply than the expression itself.
static char isNegatibleForFree(SDValue Op, bool LegalOperations,
+ const TargetLowering &TLI,
+ const TargetOptions *Options,
unsigned Depth = 0) {
// No compile time optimizations on this type.
if (Op.getValueType() == MVT::ppcf128)
@@ -384,34 +405,44 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations,
return LegalOperations ? 0 : 1;
case ISD::FADD:
// FIXME: determine better conditions for this xform.
- if (!UnsafeFPMath) return 0;
+ if (!Options->UnsafeFPMath) return 0;
+
+ // After operation legalization, it might not be legal to create new FSUBs.
+ if (LegalOperations &&
+ !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType()))
+ return 0;
// fold (fsub (fadd A, B)) -> (fsub (fneg A), B)
- if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
+ if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
+ Options, Depth + 1))
return V;
// fold (fneg (fadd A, B)) -> (fsub (fneg B), A)
- return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
+ return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
+ Depth + 1);
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
- if (!UnsafeFPMath) return 0;
+ if (!Options->UnsafeFPMath) return 0;
// fold (fneg (fsub A, B)) -> (fsub B, A)
return 1;
case ISD::FMUL:
case ISD::FDIV:
- if (HonorSignDependentRoundingFPMath()) return 0;
+ if (Options->HonorSignDependentRoundingFPMath()) return 0;
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y))
- if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
+ if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI,
+ Options, Depth + 1))
return V;
- return isNegatibleForFree(Op.getOperand(1), LegalOperations, Depth+1);
+ return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options,
+ Depth + 1);
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FSIN:
- return isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1);
+ return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options,
+ Depth + 1);
}
}
@@ -435,10 +466,12 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
}
case ISD::FADD:
// FIXME: determine better conditions for this xform.
- assert(UnsafeFPMath);
+ assert(DAG.getTarget().Options.UnsafeFPMath);
// fold (fneg (fadd A, B)) -> (fsub (fneg A), B)
- if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
+ if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
+ DAG.getTargetLoweringInfo(),
+ &DAG.getTarget().Options, Depth+1))
return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1),
@@ -450,7 +483,7 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
Op.getOperand(0));
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
- assert(UnsafeFPMath);
+ assert(DAG.getTarget().Options.UnsafeFPMath);
// fold (fneg (fsub 0, B)) -> B
if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
@@ -463,10 +496,12 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
case ISD::FMUL:
case ISD::FDIV:
- assert(!HonorSignDependentRoundingFPMath());
+ assert(!DAG.getTarget().Options.HonorSignDependentRoundingFPMath());
// fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y)
- if (isNegatibleForFree(Op.getOperand(0), LegalOperations, Depth+1))
+ if (isNegatibleForFree(Op.getOperand(0), LegalOperations,
+ DAG.getTargetLoweringInfo(),
+ &DAG.getTarget().Options, Depth+1))
return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(),
GetNegatedExpression(Op.getOperand(0), DAG,
LegalOperations, Depth+1),
@@ -944,14 +979,13 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
void DAGCombiner::Run(CombineLevel AtLevel) {
// set the instance variables, so that the various visit routines may use it.
Level = AtLevel;
- LegalOperations = Level >= NoIllegalOperations;
- LegalTypes = Level >= NoIllegalTypes;
+ LegalOperations = Level >= AfterLegalizeVectorOps;
+ LegalTypes = Level >= AfterLegalizeTypes;
// Add all the dag nodes to the worklist.
- WorkList.reserve(DAG.allnodes_size());
for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
E = DAG.allnodes_end(); I != E; ++I)
- WorkList.push_back(I);
+ AddToWorkList(I);
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
@@ -962,11 +996,17 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
// done. Set it to null to avoid confusion.
DAG.setRoot(SDValue());
- // while the worklist isn't empty, inspect the node on the end of it and
+ // while the worklist isn't empty, find a node and
// try and combine it.
- while (!WorkList.empty()) {
- SDNode *N = WorkList.back();
- WorkList.pop_back();
+ while (!WorkListContents.empty()) {
+ SDNode *N;
+ // The WorkListOrder holds the SDNodes in order, but it may contain duplicates.
+ // In order to avoid a linear scan, we use a set (O(log N)) to hold what the
+ // worklist *should* contain, and check the node we want to visit is should
+ // actually be visited.
+ do {
+ N = WorkListOrder.pop_back_val();
+ } while (!WorkListContents.erase(N));
// If N has no uses, it is dead. Make sure to revisit all N's operands once
// N is deleted from the DAG, since they too may now be dead or may have a
@@ -1050,7 +1090,9 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::ADD: return visitADD(N);
case ISD::SUB: return visitSUB(N);
case ISD::ADDC: return visitADDC(N);
+ case ISD::SUBC: return visitSUBC(N);
case ISD::ADDE: return visitADDE(N);
+ case ISD::SUBE: return visitSUBE(N);
case ISD::MUL: return visitMUL(N);
case ISD::SDIV: return visitSDIV(N);
case ISD::UDIV: return visitUDIV(N);
@@ -1071,7 +1113,9 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::SRA: return visitSRA(N);
case ISD::SRL: return visitSRL(N);
case ISD::CTLZ: return visitCTLZ(N);
+ case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
+ case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
case ISD::CTPOP: return visitCTPOP(N);
case ISD::SELECT: return visitSELECT(N);
case ISD::SELECT_CC: return visitSELECT_CC(N);
@@ -1408,16 +1452,14 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (VT.isInteger() && !VT.isVector()) {
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
- APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
- DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
+ DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
+ DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
- if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
- (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
+ if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1);
}
}
@@ -1486,8 +1528,8 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
EVT VT = N0.getValueType();
// If the flag result is dead, turn this into an ADD.
- if (N->hasNUsesOfValue(0, 1))
- return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
+ if (!N->hasAnyUseOfValue(1))
+ return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
N->getDebugLoc(), MVT::Glue));
@@ -1503,16 +1545,14 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
APInt LHSZero, LHSOne;
APInt RHSZero, RHSOne;
- APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
- DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
+ DAG.ComputeMaskedBits(N0, LHSZero, LHSOne);
if (LHSZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
+ DAG.ComputeMaskedBits(N1, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If all possibly-set bits on the RHS are clear on the LHS, return an OR.
- if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) ||
- (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
+ if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero)
return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
N->getDebugLoc(), MVT::Glue));
@@ -1535,7 +1575,7 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
// fold (adde x, y, false) -> (addc x, y)
if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
- return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0);
+ return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N0, N1);
return SDValue();
}
@@ -1645,6 +1685,51 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitSUBC(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ EVT VT = N0.getValueType();
+
+ // If the flag result is dead, turn this into an SUB.
+ if (!N->hasAnyUseOfValue(1))
+ return CombineTo(N, DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1),
+ DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(),
+ MVT::Glue));
+
+ // fold (subc x, x) -> 0 + no borrow
+ if (N0 == N1)
+ return CombineTo(N, DAG.getConstant(0, VT),
+ DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(),
+ MVT::Glue));
+
+ // fold (subc x, 0) -> x + no borrow
+ if (N1C && N1C->isNullValue())
+ return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(),
+ MVT::Glue));
+
+ // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
+ if (N0C && N0C->isAllOnesValue())
+ return CombineTo(N, DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0),
+ DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(),
+ MVT::Glue));
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitSUBE(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue CarryIn = N->getOperand(2);
+
+ // fold (sube x, y, false) -> (subc x, y)
+ if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
+ return DAG.getNode(ISD::SUBC, N->getDebugLoc(), N->getVTList(), N0, N1);
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -1756,7 +1841,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
if (N0C && N1C && !N1C->isNullValue())
return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C);
// fold (sdiv X, 1) -> X
- if (N1C && N1C->getSExtValue() == 1LL)
+ if (N1C && N1C->getAPIntValue() == 1LL)
return N0;
// fold (sdiv X, -1) -> 0-X
if (N1C && N1C->isAllOnesValue())
@@ -1770,17 +1855,15 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
N0, N1);
}
// fold (sdiv X, pow2) -> simple ops after legalize
- if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap() &&
- (isPowerOf2_64(N1C->getSExtValue()) ||
- isPowerOf2_64(-N1C->getSExtValue()))) {
+ if (N1C && !N1C->isNullValue() &&
+ (N1C->getAPIntValue().isPowerOf2() ||
+ (-N1C->getAPIntValue()).isPowerOf2())) {
// If dividing by powers of two is cheap, then don't perform the following
// fold.
if (TLI.isPow2DivCheap())
return SDValue();
- int64_t pow2 = N1C->getSExtValue();
- int64_t abs2 = pow2 > 0 ? pow2 : -pow2;
- unsigned lg2 = Log2_64(abs2);
+ unsigned lg2 = N1C->getAPIntValue().countTrailingZeros();
// Splat the sign bit into the register
SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
@@ -1800,7 +1883,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
- if (pow2 > 0)
+ if (N1C->getAPIntValue().isNonNegative())
return SRA;
AddToWorkList(SRA.getNode());
@@ -1810,8 +1893,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
// if integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence.
- if (N1C && (N1C->getSExtValue() < -1 || N1C->getSExtValue() > 1) &&
- !TLI.isIntDivCheap()) {
+ if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) {
SDValue Op = BuildSDIV(N);
if (Op.getNode()) return Op;
}
@@ -2250,6 +2332,67 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
ORNode, N0.getOperand(1));
}
+ // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
+ // Only perform this optimization after type legalization and before
+ // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
+ // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
+ // we don't want to undo this promotion.
+ // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
+ // on scalars.
+ if ((N0.getOpcode() == ISD::BITCAST || N0.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ && Level == AfterLegalizeVectorOps) {
+ SDValue In0 = N0.getOperand(0);
+ SDValue In1 = N1.getOperand(0);
+ EVT In0Ty = In0.getValueType();
+ EVT In1Ty = In1.getValueType();
+ // If both incoming values are integers, and the original types are the same.
+ if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) {
+ SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), In0Ty, In0, In1);
+ SDValue BC = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, Op);
+ AddToWorkList(Op.getNode());
+ return BC;
+ }
+ }
+
+ // Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
+ // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
+ // If both shuffles use the same mask, and both shuffle within a single
+ // vector, then it is worthwhile to move the swizzle after the operation.
+ // The type-legalizer generates this pattern when loading illegal
+ // vector types from memory. In many cases this allows additional shuffle
+ // optimizations.
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
+ N0.getOperand(1).getOpcode() == ISD::UNDEF &&
+ N1.getOperand(1).getOpcode() == ISD::UNDEF) {
+ ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0);
+ ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1);
+
+ assert(N0.getOperand(0).getValueType() == N1.getOperand(1).getValueType() &&
+ "Inputs to shuffles are not the same type");
+
+ unsigned NumElts = VT.getVectorNumElements();
+
+ // Check that both shuffles use the same mask. The masks are known to be of
+ // the same length because the result vector type is the same.
+ bool SameMask = true;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx0 = SVN0->getMaskElt(i);
+ int Idx1 = SVN1->getMaskElt(i);
+ if (Idx0 != Idx1) {
+ SameMask = false;
+ break;
+ }
+ }
+
+ if (SameMask) {
+ SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT,
+ N0.getOperand(0), N1.getOperand(0));
+ AddToWorkList(Op.getNode());
+ return DAG.getVectorShuffle(VT, N->getDebugLoc(), Op,
+ DAG.getUNDEF(VT), &SVN0->getMask()[0]);
+ }
+ }
+
return SDValue();
}
@@ -2312,6 +2455,88 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
+ // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
+ // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
+ // already be zero by virtue of the width of the base type of the load.
+ //
+ // the 'X' node here can either be nothing or an extract_vector_elt to catch
+ // more cases.
+ if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ N0.getOperand(0).getOpcode() == ISD::LOAD) ||
+ N0.getOpcode() == ISD::LOAD) {
+ LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
+ N0 : N0.getOperand(0) );
+
+ // Get the constant (if applicable) the zero'th operand is being ANDed with.
+ // This can be a pure constant or a vector splat, in which case we treat the
+ // vector as a scalar and use the splat value.
+ APInt Constant = APInt::getNullValue(1);
+ if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
+ Constant = C->getAPIntValue();
+ } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
+ SplatBitSize, HasAnyUndefs);
+ if (IsSplat) {
+ // Undef bits can contribute to a possible optimisation if set, so
+ // set them.
+ SplatValue |= SplatUndef;
+
+ // The splat value may be something like "0x00FFFFFF", which means 0 for
+ // the first vector value and FF for the rest, repeating. We need a mask
+ // that will apply equally to all members of the vector, so AND all the
+ // lanes of the constant together.
+ EVT VT = Vector->getValueType(0);
+ unsigned BitWidth = VT.getVectorElementType().getSizeInBits();
+ Constant = APInt::getAllOnesValue(BitWidth);
+ for (unsigned i = 0, n = VT.getVectorNumElements(); i < n; ++i)
+ Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth);
+ }
+ }
+
+ // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
+ // actually legal and isn't going to get expanded, else this is a false
+ // optimisation.
+ bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
+ Load->getMemoryVT());
+
+ // Resize the constant to the same size as the original memory access before
+ // extension. If it is still the AllOnesValue then this AND is completely
+ // unneeded.
+ Constant =
+ Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits());
+
+ bool B;
+ switch (Load->getExtensionType()) {
+ default: B = false; break;
+ case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
+ case ISD::ZEXTLOAD:
+ case ISD::NON_EXTLOAD: B = true; break;
+ }
+
+ if (B && Constant.isAllOnesValue()) {
+ // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
+ // preserve semantics once we get rid of the AND.
+ SDValue NewLoad(Load, 0);
+ if (Load->getExtensionType() == ISD::EXTLOAD) {
+ NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
+ Load->getValueType(0), Load->getDebugLoc(),
+ Load->getChain(), Load->getBasePtr(),
+ Load->getOffset(), Load->getMemoryVT(),
+ Load->getMemOperand());
+ // Replace uses of the EXTLOAD with the new ZEXTLOAD.
+ CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
+ }
+
+ // Fold the AND away, taking care not to fold to the old load node if we
+ // replaced it.
+ CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
+
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ }
+ }
// fold (and (setcc x), (setcc y)) -> (setcc (and x, y))
if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
@@ -3323,7 +3548,9 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
// (and (srl x, (sub c1, c2), MASK)
- if (N1C && N0.getOpcode() == ISD::SRL &&
+ // Only fold this if the inner shift has no other uses -- if it does, folding
+ // this will increase the total number of instructions.
+ if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
if (c1 < VT.getSizeInBits()) {
@@ -3603,8 +3830,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (N1C && N0.getOpcode() == ISD::CTLZ &&
N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
APInt KnownZero, KnownOne;
- APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
- DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
+ DAG.ComputeMaskedBits(N0.getOperand(0), KnownZero, KnownOne);
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
@@ -3612,7 +3838,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// If all of the bits input the to ctlz node are known to be zero, then
// the result of the ctlz is "32" and the result of the shift is one.
- APInt UnknownBits = ~KnownZero & Mask;
+ APInt UnknownBits = ~KnownZero;
if (UnknownBits == 0) return DAG.getConstant(1, VT);
// Otherwise, check to see if there is exactly one bit input to the ctlz.
@@ -3713,6 +3939,16 @@ SDValue DAGCombiner::visitCTLZ(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // fold (ctlz_zero_undef c1) -> c2
+ if (isa<ConstantSDNode>(N0))
+ return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0);
+ return SDValue();
+}
+
SDValue DAGCombiner::visitCTTZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -3723,6 +3959,16 @@ SDValue DAGCombiner::visitCTTZ(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // fold (cttz_zero_undef c1) -> c2
+ if (isa<ConstantSDNode>(N0))
+ return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0);
+ return SDValue();
+}
+
SDValue DAGCombiner::visitCTPOP(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -4108,12 +4354,17 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// Only do this before legalize for now.
if (VT.isVector() && !LegalOperations) {
EVT N0VT = N0.getOperand(0).getValueType();
- // We know that the # elements of the results is the same as the
- // # elements of the compare (and the # elements of the compare result
- // for that matter). Check to see that they are the same size. If so,
- // we know that the element size of the sext'd result matches the
- // element size of the compare operands.
- if (VT.getSizeInBits() == N0VT.getSizeInBits())
+ // On some architectures (such as SSE/NEON/etc) the SETCC result type is
+ // of the same size as the compared operands. Only optimize sext(setcc())
+ // if this is the case.
+ EVT SVT = TLI.getSetCCResultType(N0VT);
+
+ // We know that the # elements of the results is the same as the
+ // # elements of the compare (and the # elements of the compare result
+ // for that matter). Check to see that they are the same size. If so,
+ // we know that the element size of the sext'd result matches the
+ // element size of the compare operands.
+ if (VT.getSizeInBits() == SVT.getSizeInBits())
return DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get());
@@ -4127,11 +4378,13 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
EVT MatchingVectorType =
EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
N0VT.getVectorNumElements());
- SDValue VsetCC =
- DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+
+ if (SVT == MatchingVectorType) {
+ SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType,
+ N0.getOperand(0), N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+ }
}
}
@@ -4162,6 +4415,44 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
return SDValue();
}
+// isTruncateOf - If N is a truncate of some other value, return true, record
+// the value being truncated in Op and which of Op's bits are zero in KnownZero.
+// This function computes KnownZero to avoid a duplicated call to
+// ComputeMaskedBits in the caller.
+static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
+ APInt &KnownZero) {
+ APInt KnownOne;
+ if (N->getOpcode() == ISD::TRUNCATE) {
+ Op = N->getOperand(0);
+ DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+ return true;
+ }
+
+ if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 ||
+ cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE)
+ return false;
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ assert(Op0.getValueType() == Op1.getValueType());
+
+ ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
+ ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
+ if (COp0 && COp0->isNullValue())
+ Op = Op1;
+ else if (COp1 && COp1->isNullValue())
+ Op = Op0;
+ else
+ return false;
+
+ DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+
+ if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue())
+ return false;
+
+ return true;
+}
+
SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -4175,6 +4466,30 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT,
N0.getOperand(0));
+ // fold (zext (truncate x)) -> (zext x) or
+ // (zext (truncate x)) -> (truncate x)
+ // This is valid when the truncated bits of x are already zero.
+ // FIXME: We should extend this to work for vectors too.
+ SDValue Op;
+ APInt KnownZero;
+ if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) {
+ APInt TruncatedBits =
+ (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ?
+ APInt(Op.getValueSizeInBits(), 0) :
+ APInt::getBitsSet(Op.getValueSizeInBits(),
+ N0.getValueSizeInBits(),
+ std::min(Op.getValueSizeInBits(),
+ VT.getSizeInBits()));
+ if (TruncatedBits == (KnownZero & TruncatedBits)) {
+ if (VT.bitsGT(Op.getValueType()))
+ return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, Op);
+ if (VT.bitsLT(Op.getValueType()))
+ return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op);
+
+ return Op;
+ }
+ }
+
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
@@ -4567,6 +4882,16 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
switch (V.getOpcode()) {
default: break;
+ case ISD::Constant: {
+ const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
+ assert(CV != 0 && "Const value should be ConstSDNode.");
+ const APInt &CVal = CV->getAPIntValue();
+ APInt NewVal = CVal & Mask;
+ if (NewVal != CVal) {
+ return DAG.getConstant(NewVal, V.getValueType());
+ }
+ break;
+ }
case ISD::OR:
case ISD::XOR:
// If the LHS or RHS don't contribute bits to the or, drop them.
@@ -4705,7 +5030,8 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if (ExtType == ISD::NON_EXTLOAD)
Load = DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff),
- LN0->isVolatile(), LN0->isNonTemporal(), NewAlign);
+ LN0->isVolatile(), LN0->isNonTemporal(),
+ LN0->isInvariant(), NewAlign);
else
Load = DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(),NewPtr,
LN0->getPointerInfo().getWithOffset(PtrOff),
@@ -4844,6 +5170,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ bool isLE = TLI.isLittleEndian();
// noop truncate
if (N0.getValueType() == N->getValueType(0))
@@ -4871,6 +5198,44 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
return N0.getOperand(0);
}
+ // Fold extract-and-trunc into a narrow extract. For example:
+ // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
+ // i32 y = TRUNCATE(i64 x)
+ // -- becomes --
+ // v16i8 b = BITCAST (v2i64 val)
+ // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
+ //
+ // Note: We only run this optimization after type legalization (which often
+ // creates this pattern) and before operation legalization after which
+ // we need to be more careful about the vector instructions that we generate.
+ if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ LegalTypes && !LegalOperations && N0->hasOneUse()) {
+
+ EVT VecTy = N0.getOperand(0).getValueType();
+ EVT ExTy = N0.getValueType();
+ EVT TrTy = N->getValueType(0);
+
+ unsigned NumElem = VecTy.getVectorNumElements();
+ unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
+
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem);
+ assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size");
+
+ SDValue EltNo = N0->getOperand(1);
+ if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
+ int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+
+ int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
+
+ SDValue V = DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
+ NVT, N0.getOperand(0));
+
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
+ N->getDebugLoc(), TrTy, V,
+ DAG.getConstant(Index, MVT::i32));
+ }
+ }
+
// See if we can simplify the input to this truncate through knowledge that
// only the low bits are being used.
// For example "trunc (or (shl x, 8), y)" // -> trunc y
@@ -4934,7 +5299,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(),
LD1->getBasePtr(), LD1->getPointerInfo(),
- false, false, Align);
+ false, false, false, Align);
}
return SDValue();
@@ -5004,7 +5369,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
LN0->getBasePtr(), LN0->getPointerInfo(),
LN0->isVolatile(), LN0->isNonTemporal(),
- OrigAlign);
+ LN0->isInvariant(), OrigAlign);
AddToWorkList(N);
CombineTo(N0.getNode(),
DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
@@ -5017,7 +5382,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
// fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
// fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
// This often reduces constant pool loads.
- if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
+ if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) ||
+ (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) &&
N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
N0.getOperand(0));
@@ -5247,20 +5613,24 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0);
// fold (fadd A, 0) -> A
- if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP &&
+ N1CFP->getValueAPF().isZero())
return N0;
// fold (fadd A, (fneg B)) -> (fsub A, B)
- if (isNegatibleForFree(N1, LegalOperations) == 2)
+ if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
+ isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
// fold (fadd (fneg A), B) -> (fsub B, A)
- if (isNegatibleForFree(N0, LegalOperations) == 2)
+ if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) &&
+ isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations));
// If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
- if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
- N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
+ if (DAG.getTarget().Options.UnsafeFPMath && N1CFP &&
+ N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() &&
+ isa<ConstantFPSDNode>(N0.getOperand(1)))
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getNode(ISD::FADD, N->getDebugLoc(), VT,
N0.getOperand(1), N1));
@@ -5285,20 +5655,39 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
// fold (fsub A, 0) -> A
- if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N1CFP && N1CFP->getValueAPF().isZero())
return N0;
// fold (fsub 0, B) -> -B
- if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) {
- if (isNegatibleForFree(N1, LegalOperations))
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N0CFP && N0CFP->getValueAPF().isZero()) {
+ if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options))
return GetNegatedExpression(N1, DAG, LegalOperations);
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N1);
}
// fold (fsub A, (fneg B)) -> (fadd A, B)
- if (isNegatibleForFree(N1, LegalOperations))
+ if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options))
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0,
GetNegatedExpression(N1, DAG, LegalOperations));
+ // If 'unsafe math' is enabled, fold
+ // (fsub x, (fadd x, y)) -> (fneg y) &
+ // (fsub x, (fadd y, x)) -> (fneg y)
+ if (DAG.getTarget().Options.UnsafeFPMath) {
+ if (N1.getOpcode() == ISD::FADD) {
+ SDValue N10 = N1->getOperand(0);
+ SDValue N11 = N1->getOperand(1);
+
+ if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI,
+ &DAG.getTarget().Options))
+ return GetNegatedExpression(N11, DAG, LegalOperations);
+ else if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI,
+ &DAG.getTarget().Options))
+ return GetNegatedExpression(N10, DAG, LegalOperations);
+ }
+ }
+
return SDValue();
}
@@ -5308,6 +5697,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// fold vector ops
if (VT.isVector()) {
@@ -5322,10 +5712,12 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0);
// fold (fmul A, 0) -> 0
- if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N1CFP && N1CFP->getValueAPF().isZero())
return N1;
// fold (fmul A, 0) -> 0, vector edition.
- if (UnsafeFPMath && ISD::isBuildVectorAllZeros(N1.getNode()))
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ ISD::isBuildVectorAllZeros(N1.getNode()))
return N1;
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
@@ -5336,8 +5728,10 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0);
// fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y)
- if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
- if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
+ if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI,
+ &DAG.getTarget().Options)) {
+ if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI,
+ &DAG.getTarget().Options)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
@@ -5348,7 +5742,8 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
}
// If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
- if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FMUL &&
+ if (DAG.getTarget().Options.UnsafeFPMath &&
+ N1CFP && N0.getOpcode() == ISD::FMUL &&
N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1)))
return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT,
@@ -5363,6 +5758,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// fold vector ops
if (VT.isVector()) {
@@ -5374,10 +5770,30 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
+ // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
+ if (N1CFP && VT != MVT::ppcf128 && DAG.getTarget().Options.UnsafeFPMath) {
+ // Compute the reciprocal 1.0 / c2.
+ APFloat N1APF = N1CFP->getValueAPF();
+ APFloat Recip(N1APF.getSemantics(), 1); // 1.0
+ APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
+ // Only do the transform if the reciprocal is a legal fp immediate that
+ // isn't too nasty (eg NaN, denormal, ...).
+ if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
+ (!LegalOperations ||
+ // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
+ // backend)... we should handle this gracefully after Legalize.
+ // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) ||
+ TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) ||
+ TLI.isFPImmLegal(Recip, VT)))
+ return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0,
+ DAG.getConstantFP(Recip, VT));
+ }
// (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
- if (char LHSNeg = isNegatibleForFree(N0, LegalOperations)) {
- if (char RHSNeg = isNegatibleForFree(N1, LegalOperations)) {
+ if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI,
+ &DAG.getTarget().Options)) {
+ if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI,
+ &DAG.getTarget().Options)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
@@ -5463,7 +5879,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
// fold (sint_to_fp c1) -> c1fp
if (N0C && OpVT != MVT::ppcf128 &&
// ...but only if the target supports immediate floating-point values
- (Level == llvm::Unrestricted ||
+ (!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
@@ -5488,7 +5904,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
// fold (uint_to_fp c1) -> c1fp
if (N0C && OpVT != MVT::ppcf128 &&
// ...but only if the target supports immediate floating-point values
- (Level == llvm::Unrestricted ||
+ (!LegalOperations ||
TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
@@ -5630,12 +6046,13 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- if (isNegatibleForFree(N0, LegalOperations))
+ if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(),
+ &DAG.getTarget().Options))
return GetNegatedExpression(N0, DAG, LegalOperations);
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
// constant pool values.
- if (N0.getOpcode() == ISD::BITCAST &&
+ if (!TLI.isFNegFree(VT) && N0.getOpcode() == ISD::BITCAST &&
!VT.isVector() &&
N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger()) {
@@ -5671,7 +6088,8 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
// constant pool values.
- if (N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
+ if (!TLI.isFAbsFree(VT) &&
+ N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) {
SDValue Int = N0.getOperand(0);
@@ -5860,6 +6278,47 @@ SDValue DAGCombiner::visitBR_CC(SDNode *N) {
return SDValue();
}
+/// canFoldInAddressingMode - Return true if 'Use' is a load or a store that
+/// uses N as its base pointer and that N may be folded in the load / store
+/// addressing mode.
+static bool canFoldInAddressingMode(SDNode *N, SDNode *Use,
+ SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ EVT VT;
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
+ if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
+ return false;
+ VT = Use->getValueType(0);
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
+ if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
+ return false;
+ VT = ST->getValue().getValueType();
+ } else
+ return false;
+
+ TargetLowering::AddrMode AM;
+ if (N->getOpcode() == ISD::ADD) {
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (Offset)
+ // [reg +/- imm]
+ AM.BaseOffs = Offset->getSExtValue();
+ else
+ // [reg +/- reg]
+ AM.Scale = 1;
+ } else if (N->getOpcode() == ISD::SUB) {
+ ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (Offset)
+ // [reg +/- imm]
+ AM.BaseOffs = -Offset->getSExtValue();
+ else
+ // [reg +/- reg]
+ AM.Scale = 1;
+ } else
+ return false;
+
+ return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext()));
+}
+
/// CombineToPreIndexedLoadStore - Try turning a load / store into a
/// pre-indexed load / store when the base pointer is an add or subtract
/// and it has other uses besides the load / store. After the
@@ -5867,7 +6326,7 @@ SDValue DAGCombiner::visitBR_CC(SDNode *N) {
/// the add / subtract in and all of its other uses are redirected to the
/// new load / store.
bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
- if (!LegalOperations)
+ if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
@@ -5946,10 +6405,9 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
if (N->hasPredecessorHelper(Use, Visited, Worklist))
return false;
- if (!((Use->getOpcode() == ISD::LOAD &&
- cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
- (Use->getOpcode() == ISD::STORE &&
- cast<StoreSDNode>(Use)->getBasePtr() == Ptr)))
+ // If Ptr may be folded in addressing mode of other use, then it's
+ // not profitable to do this transformation.
+ if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
RealUse = true;
}
@@ -5999,7 +6457,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
/// load / store effectively and all of its uses are redirected to the
/// new load / store.
bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
- if (!LegalOperations)
+ if (Level < AfterLegalizeDAG)
return false;
bool isLoad = true;
@@ -6046,7 +6504,8 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
continue;
// Try turning it into a post-indexed load / store except when
- // 1) All uses are load / store ops that use it as base ptr.
+ // 1) All uses are load / store ops that use it as base ptr (and
+ // it may be folded as addressing mmode).
// 2) Op must be independent of N, i.e. Op is neither a predecessor
// nor a successor of N. Otherwise, if Op is folded that would
// create a cycle.
@@ -6069,10 +6528,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
for (SDNode::use_iterator III = Use->use_begin(),
EEE = Use->use_end(); III != EEE; ++III) {
SDNode *UseUse = *III;
- if (!((UseUse->getOpcode() == ISD::LOAD &&
- cast<LoadSDNode>(UseUse)->getBasePtr().getNode() == Use) ||
- (UseUse->getOpcode() == ISD::STORE &&
- cast<StoreSDNode>(UseUse)->getBasePtr().getNode() == Use)))
+ if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI))
RealUse = true;
}
@@ -6139,7 +6595,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
if (!LD->isVolatile()) {
if (N->getValueType(1) == MVT::Other) {
// Unindexed loads.
- if (N->hasNUsesOfValue(0, 0)) {
+ if (!N->hasAnyUseOfValue(0)) {
// It's not safe to use the two value CombineTo variant here. e.g.
// v1, chain2 = load chain1, loc
// v2, chain3 = load chain2, loc
@@ -6164,7 +6620,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
} else {
// Indexed loads.
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
- if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
+ if (!N->hasAnyUseOfValue(0) && !N->hasAnyUseOfValue(1)) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
DEBUG(dbgs() << "\nReplacing.7 ";
N->dump(&DAG);
@@ -6222,7 +6678,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
BetterChain, Ptr, LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
+ LD->isInvariant(), LD->getAlignment());
} else {
ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
LD->getValueType(0),
@@ -6486,7 +6942,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
LD->getChain(), NewPtr,
LD->getPointerInfo().getWithOffset(PtrOff),
LD->isVolatile(), LD->isNonTemporal(),
- NewAlign);
+ LD->isInvariant(), NewAlign);
SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD,
DAG.getConstant(NewImm, NewVT));
SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(),
@@ -6546,7 +7002,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
SDValue NewLD = DAG.getLoad(IntVT, Value.getDebugLoc(),
LD->getChain(), LD->getBasePtr(),
LD->getPointerInfo(),
- false, false, LDAlign);
+ false, false, false, LDAlign);
SDValue NewST = DAG.getStore(NewLD.getValue(1), N->getDebugLoc(),
NewLD, ST->getBasePtr(),
@@ -6823,13 +7279,14 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// (vextract (scalar_to_vector val, 0) -> val
SDValue InVec = N->getOperand(0);
+ EVT VT = InVec.getValueType();
+ EVT NVT = N->getValueType(0);
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
// Check if the result type doesn't match the inserted element type. A
// SCALAR_TO_VECTOR may truncate the inserted element and the
// EXTRACT_VECTOR_ELT may widen the extracted vector.
SDValue InOp = InVec.getOperand(0);
- EVT NVT = N->getValueType(0);
if (InOp.getValueType() != NVT) {
assert(InOp.getValueType().isInteger() && NVT.isInteger());
return DAG.getSExtOrTrunc(InOp, InVec.getDebugLoc(), NVT);
@@ -6837,6 +7294,38 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
return InOp;
}
+ SDValue EltNo = N->getOperand(1);
+ bool ConstEltNo = isa<ConstantSDNode>(EltNo);
+
+ // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
+ // We only perform this optimization before the op legalization phase because
+ // we may introduce new vector instructions which are not backed by TD patterns.
+ // For example on AVX, extracting elements from a wide vector without using
+ // extract_subvector.
+ if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE
+ && ConstEltNo && !LegalOperations) {
+ int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int NumElem = VT.getVectorNumElements();
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec);
+ // Find the new index to extract from.
+ int OrigElt = SVOp->getMaskElt(Elt);
+
+ // Extracting an undef index is undef.
+ if (OrigElt == -1)
+ return DAG.getUNDEF(NVT);
+
+ // Select the right vector half to extract from.
+ if (OrigElt < NumElem) {
+ InVec = InVec->getOperand(0);
+ } else {
+ InVec = InVec->getOperand(1);
+ OrigElt -= NumElem;
+ }
+
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getDebugLoc(), NVT,
+ InVec, DAG.getConstant(OrigElt, MVT::i32));
+ }
+
// Perform only after legalization to ensure build_vector / vector_shuffle
// optimizations have already been done.
if (!LegalOperations) return SDValue();
@@ -6844,17 +7333,24 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
// (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
- SDValue EltNo = N->getOperand(1);
- if (isa<ConstantSDNode>(EltNo)) {
+ if (ConstEltNo) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
bool NewLoad = false;
bool BCNumEltsChanged = false;
- EVT VT = InVec.getValueType();
EVT ExtVT = VT.getVectorElementType();
EVT LVT = ExtVT;
+ // If the result of load has to be truncated, then it's not necessarily
+ // profitable.
+ if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT))
+ return SDValue();
+
if (InVec.getOpcode() == ISD::BITCAST) {
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+
EVT BCVT = InVec.getOperand(0).getValueType();
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue();
@@ -6872,12 +7368,20 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
} else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
InVec.getOperand(0).getValueType() == ExtVT &&
ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+
LN0 = cast<LoadSDNode>(InVec.getOperand(0));
} else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
// (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
// =>
// (load $addr+1*size)
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+
// If the bit convert changed the number of elements, it is unsafe
// to examine the mask.
if (BCNumEltsChanged)
@@ -6888,14 +7392,21 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
- if (InVec.getOpcode() == ISD::BITCAST)
+ if (InVec.getOpcode() == ISD::BITCAST) {
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+
InVec = InVec.getOperand(0);
+ }
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
}
}
+ // Make sure we found a non-volatile load and the extractelement is
+ // the only use.
if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile())
return SDValue();
@@ -6929,9 +7440,45 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
DAG.getConstant(PtrOff, PtrType));
}
- return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
- LN0->getPointerInfo().getWithOffset(PtrOff),
- LN0->isVolatile(), LN0->isNonTemporal(), Align);
+ // The replacement we need to do here is a little tricky: we need to
+ // replace an extractelement of a load with a load.
+ // Use ReplaceAllUsesOfValuesWith to do the replacement.
+ // Note that this replacement assumes that the extractvalue is the only
+ // use of the load; that's okay because we don't want to perform this
+ // transformation in other cases anyway.
+ SDValue Load;
+ SDValue Chain;
+ if (NVT.bitsGT(LVT)) {
+ // If the result type of vextract is wider than the load, then issue an
+ // extending load instead.
+ ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, LVT)
+ ? ISD::ZEXTLOAD : ISD::EXTLOAD;
+ Load = DAG.getExtLoad(ExtType, N->getDebugLoc(), NVT, LN0->getChain(),
+ NewPtr, LN0->getPointerInfo().getWithOffset(PtrOff),
+ LVT, LN0->isVolatile(), LN0->isNonTemporal(),Align);
+ Chain = Load.getValue(1);
+ } else {
+ Load = DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
+ LN0->getPointerInfo().getWithOffset(PtrOff),
+ LN0->isVolatile(), LN0->isNonTemporal(),
+ LN0->isInvariant(), Align);
+ Chain = Load.getValue(1);
+ if (NVT.bitsLT(LVT))
+ Load = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), NVT, Load);
+ else
+ Load = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), NVT, Load);
+ }
+ WorkListRemover DeadNodes(*this);
+ SDValue From[] = { SDValue(N, 0), SDValue(LN0,1) };
+ SDValue To[] = { Load, Chain };
+ DAG.ReplaceAllUsesOfValuesWith(From, To, 2, &DeadNodes);
+ // Since we're explcitly calling ReplaceAllUses, add the new node to the
+ // worklist explicitly as well.
+ AddToWorkList(Load.getNode());
+ AddUsersToWorkList(Load.getNode()); // Add users too
+ // Make sure to revisit this node to clean it up; it will usually be dead.
+ AddToWorkList(N);
+ return SDValue(N, 0);
}
return SDValue();
@@ -6939,11 +7486,122 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned NumInScalars = N->getNumOperands();
+ DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
+ // Check to see if this is a BUILD_VECTOR of a bunch of values
+ // which come from any_extend or zero_extend nodes. If so, we can create
+ // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
+ // optimizations. We do not handle sign-extend because we can't fill the sign
+ // using shuffles.
+ EVT SourceType = MVT::Other;
+ bool AllAnyExt = true;
+ bool AllUndef = true;
+ for (unsigned i = 0; i != NumInScalars; ++i) {
+ SDValue In = N->getOperand(i);
+ // Ignore undef inputs.
+ if (In.getOpcode() == ISD::UNDEF) continue;
+ AllUndef = false;
+
+ bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
+ bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
+
+ // Abort if the element is not an extension.
+ if (!ZeroExt && !AnyExt) {
+ SourceType = MVT::Other;
+ break;
+ }
+
+ // The input is a ZeroExt or AnyExt. Check the original type.
+ EVT InTy = In.getOperand(0).getValueType();
+
+ // Check that all of the widened source types are the same.
+ if (SourceType == MVT::Other)
+ // First time.
+ SourceType = InTy;
+ else if (InTy != SourceType) {
+ // Multiple income types. Abort.
+ SourceType = MVT::Other;
+ break;
+ }
+
+ // Check if all of the extends are ANY_EXTENDs.
+ AllAnyExt &= AnyExt;
+ }
+
+ if (AllUndef)
+ return DAG.getUNDEF(VT);
+
+ // In order to have valid types, all of the inputs must be extended from the
+ // same source type and all of the inputs must be any or zero extend.
+ // Scalar sizes must be a power of two.
+ EVT OutScalarTy = N->getValueType(0).getScalarType();
+ bool ValidTypes = SourceType != MVT::Other &&
+ isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
+ isPowerOf2_32(SourceType.getSizeInBits());
+
+ // We perform this optimization post type-legalization because
+ // the type-legalizer often scalarizes integer-promoted vectors.
+ // Performing this optimization before may create bit-casts which
+ // will be type-legalized to complex code sequences.
+ // We perform this optimization only before the operation legalizer because we
+ // may introduce illegal operations.
+ // Create a new simpler BUILD_VECTOR sequence which other optimizations can
+ // turn into a single shuffle instruction.
+ if ((Level == AfterLegalizeVectorOps || Level == AfterLegalizeTypes) &&
+ ValidTypes) {
+ bool isLE = TLI.isLittleEndian();
+ unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
+ assert(ElemRatio > 1 && "Invalid element size ratio");
+ SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
+ DAG.getConstant(0, SourceType);
+
+ unsigned NewBVElems = ElemRatio * N->getValueType(0).getVectorNumElements();
+ SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
+
+ // Populate the new build_vector
+ for (unsigned i=0; i < N->getNumOperands(); ++i) {
+ SDValue Cast = N->getOperand(i);
+ assert((Cast.getOpcode() == ISD::ANY_EXTEND ||
+ Cast.getOpcode() == ISD::ZERO_EXTEND ||
+ Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode");
+ SDValue In;
+ if (Cast.getOpcode() == ISD::UNDEF)
+ In = DAG.getUNDEF(SourceType);
+ else
+ In = Cast->getOperand(0);
+ unsigned Index = isLE ? (i * ElemRatio) :
+ (i * ElemRatio + (ElemRatio - 1));
+
+ assert(Index < Ops.size() && "Invalid index");
+ Ops[Index] = In;
+ }
+
+ // The type of the new BUILD_VECTOR node.
+ EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
+ assert(VecVT.getSizeInBits() == N->getValueType(0).getSizeInBits() &&
+ "Invalid vector size");
+ // Check if the new vector type is legal.
+ if (!isTypeLegal(VecVT)) return SDValue();
+
+ // Make the new BUILD_VECTOR.
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
+ VecVT, &Ops[0], Ops.size());
+
+ // The new BUILD_VECTOR node has the potential to be further optimized.
+ AddToWorkList(BV.getNode());
+ // Bitcast to the desired type.
+ return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), BV);
+ }
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
// at most two distinct vectors, turn this into a shuffle node.
+
+ // May only combine to shuffle after legalize if shuffle is legal.
+ if (LegalOperations &&
+ !TLI.isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))
+ return SDValue();
+
SDValue VecIn1, VecIn2;
for (unsigned i = 0; i != NumInScalars; ++i) {
// Ignore undef inputs.
@@ -6957,15 +7615,8 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
break;
}
- // If the input vector type disagrees with the result of the build_vector,
- // we can't make a shuffle.
+ // We allow up to two distinct input vectors.
SDValue ExtractedFromVec = N->getOperand(i).getOperand(0);
- if (ExtractedFromVec.getValueType() != VT) {
- VecIn1 = VecIn2 = SDValue(0, 0);
- break;
- }
-
- // Otherwise, remember this. We allow up to two distinct input vectors.
if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2)
continue;
@@ -6980,7 +7631,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
}
}
- // If everything is good, we can make a shuffle operation.
+ // If everything is good, we can make a shuffle operation.
if (VecIn1.getNode()) {
SmallVector<int, 8> Mask;
for (unsigned i = 0; i != NumInScalars; ++i) {
@@ -7006,14 +7657,39 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
Mask.push_back(Idx+NumInScalars);
}
- // Add count and size info.
+ // We can't generate a shuffle node with mismatched input and output types.
+ // Attempt to transform a single input vector to the correct type.
+ if ((VT != VecIn1.getValueType())) {
+ // We don't support shuffeling between TWO values of different types.
+ if (VecIn2.getNode() != 0)
+ return SDValue();
+
+ // We only support widening of vectors which are half the size of the
+ // output registers. For example XMM->YMM widening on X86 with AVX.
+ if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits())
+ return SDValue();
+
+ // Widen the input vector by adding undef values.
+ VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT,
+ VecIn1, DAG.getUNDEF(VecIn1.getValueType()));
+ }
+
+ // If VecIn2 is unused then change it to undef.
+ VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
+
+ // Check that we were able to transform all incoming values to the same type.
+ if (VecIn2.getValueType() != VecIn1.getValueType() ||
+ VecIn1.getValueType() != VT)
+ return SDValue();
+
+ // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
if (!isTypeLegal(VT))
return SDValue();
// Return the new VECTOR_SHUFFLE node.
SDValue Ops[2];
Ops[0] = VecIn1;
- Ops[1] = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
+ Ops[1] = VecIn2;
return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]);
}
@@ -7045,19 +7721,23 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
if (NVT != SmallVT || NVT.getSizeInBits()*2 != BigVT.getSizeInBits())
return SDValue();
- // Combine:
- // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
- // Into:
- // indicies are equal => V1
- // otherwise => (extract_subvec V1, ExtIdx)
- //
- SDValue InsIdx = N->getOperand(1);
- SDValue ExtIdx = V->getOperand(2);
+ // Only handle cases where both indexes are constants with the same type.
+ ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(V->getOperand(2));
- if (InsIdx == ExtIdx)
- return V->getOperand(1);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, N->getDebugLoc(), NVT,
- V->getOperand(0), N->getOperand(1));
+ if (InsIdx && ExtIdx &&
+ InsIdx->getValueType(0).getSizeInBits() <= 64 &&
+ ExtIdx->getValueType(0).getSizeInBits() <= 64) {
+ // Combine:
+ // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
+ // Into:
+ // indices are equal => V1
+ // otherwise => (extract_subvec V1, ExtIdx)
+ if (InsIdx->getZExtValue() == ExtIdx->getZExtValue())
+ return V->getOperand(1);
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, N->getDebugLoc(), NVT,
+ V->getOperand(0), N->getOperand(1));
+ }
}
return SDValue();
@@ -7068,15 +7748,63 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG");
+
+ // Canonicalize shuffle undef, undef -> undef
+ if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF)
+ return DAG.getUNDEF(VT);
+
+ ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
- assert(N0.getValueType().getVectorNumElements() == NumElts &&
- "Vector shuffle must be normalized in DAG");
+ // Canonicalize shuffle v, v -> v, undef
+ if (N0 == N1) {
+ SmallVector<int, 8> NewMask;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ if (Idx >= (int)NumElts) Idx -= NumElts;
+ NewMask.push_back(Idx);
+ }
+ return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, DAG.getUNDEF(VT),
+ &NewMask[0]);
+ }
+
+ // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
+ if (N0.getOpcode() == ISD::UNDEF) {
+ SmallVector<int, 8> NewMask;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ if (Idx >= 0) {
+ if (Idx < (int)NumElts)
+ Idx += NumElts;
+ else
+ Idx -= NumElts;
+ }
+ NewMask.push_back(Idx);
+ }
+ return DAG.getVectorShuffle(VT, N->getDebugLoc(), N1, DAG.getUNDEF(VT),
+ &NewMask[0]);
+ }
- // FIXME: implement canonicalizations from DAG.getVectorShuffle()
+ // Remove references to rhs if it is undef
+ if (N1.getOpcode() == ISD::UNDEF) {
+ bool Changed = false;
+ SmallVector<int, 8> NewMask;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ if (Idx >= (int)NumElts) {
+ Idx = -1;
+ Changed = true;
+ }
+ NewMask.push_back(Idx);
+ }
+ if (Changed)
+ return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, N1, &NewMask[0]);
+ }
// If it is a splat, check if the argument vector is another splat or a
// build_vector with all scalar elements the same.
- ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
SDNode *V = N0.getNode();
@@ -7115,6 +7843,40 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
return N0;
}
}
+
+ // If this shuffle node is simply a swizzle of another shuffle node,
+ // and it reverses the swizzle of the previous shuffle then we can
+ // optimize shuffle(shuffle(x, undef), undef) -> x.
+ if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG &&
+ N1.getOpcode() == ISD::UNDEF) {
+
+ ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0);
+
+ // Shuffle nodes can only reverse shuffles with a single non-undef value.
+ if (N0.getOperand(1).getOpcode() != ISD::UNDEF)
+ return SDValue();
+
+ // The incoming shuffle must be of the same type as the result of the
+ // current shuffle.
+ assert(OtherSV->getOperand(0).getValueType() == VT &&
+ "Shuffle types don't match");
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ assert(Idx < (int)NumElts && "Index references undef operand");
+ // Next, this index comes from the first value, which is the incoming
+ // shuffle. Adopt the incoming index.
+ if (Idx >= 0)
+ Idx = OtherSV->getMaskElt(Idx);
+
+ // The combined shuffle must map each index to itself.
+ if (Idx >= 0 && (unsigned)Idx != i)
+ return SDValue();
+ }
+
+ return OtherSV->getOperand(0);
+ }
+
return SDValue();
}
@@ -7190,7 +7952,8 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue Elt = RHS.getOperand(i);
if (!isa<ConstantSDNode>(Elt))
return SDValue();
- else if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
+
+ if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
Indices.push_back(i);
else if (cast<ConstantSDNode>(Elt)->isNullValue())
Indices.push_back(NumElts);
@@ -7261,8 +8024,19 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
}
EVT VT = LHSOp.getValueType();
- assert(RHSOp.getValueType() == VT &&
- "SimplifyVBinOp with different BUILD_VECTOR element types");
+ EVT RVT = RHSOp.getValueType();
+ if (RVT != VT) {
+ // Integer BUILD_VECTOR operands may have types larger than the element
+ // size (e.g., when the element type is not legal). Prior to type
+ // legalization, the types may not match between the two BUILD_VECTORS.
+ // Truncate one of the operands to make them match.
+ if (RVT.getSizeInBits() > VT.getSizeInBits()) {
+ RHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, RHSOp);
+ } else {
+ LHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), RVT, LHSOp);
+ VT = RVT;
+ }
+ }
SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), VT,
LHSOp, RHSOp);
if (FoldOp.getOpcode() != ISD::UNDEF &&
@@ -7374,8 +8148,8 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
if ((LLD->hasAnyUseOfValue(1) &&
(LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
- (LLD->hasAnyUseOfValue(1) &&
- (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
+ (RLD->hasAnyUseOfValue(1) &&
+ (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS))))
return false;
Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
@@ -7393,7 +8167,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
// FIXME: Discards pointer info.
LLD->getChain(), Addr, MachinePointerInfo(),
LLD->isVolatile(), LLD->isNonTemporal(),
- LLD->getAlignment());
+ LLD->isInvariant(), LLD->getAlignment());
} else {
Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ?
RLD->getExtensionType() : LLD->getExtensionType(),
@@ -7509,7 +8283,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
AddToWorkList(CPIdx.getNode());
return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(), false,
- false, Alignment);
+ false, false, Alignment);
}
}
@@ -7517,8 +8291,6 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// Check to see if we can perform the "gzip trick", transforming
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
- N0.getValueType().isInteger() &&
- N2.getValueType().isInteger() &&
(N1C->isNullValue() || // (a < 0) ? b : 0
(N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0
EVT XType = N0.getValueType();
@@ -7720,7 +8492,7 @@ SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
std::vector<SDNode*> Built;
- SDValue S = TLI.BuildSDIV(N, DAG, &Built);
+ SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, &Built);
for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
ii != ee; ++ii)
@@ -7734,7 +8506,7 @@ SDValue DAGCombiner::BuildSDIV(SDNode *N) {
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
std::vector<SDNode*> Built;
- SDValue S = TLI.BuildUDIV(N, DAG, &Built);
+ SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, &Built);
for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end();
ii != ee; ++ii)
@@ -7856,30 +8628,20 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
/// FindAliasInfo - Extracts the relevant alias information from the memory
/// node. Returns true if the operand was a load.
bool DAGCombiner::FindAliasInfo(SDNode *N,
- SDValue &Ptr, int64_t &Size,
- const Value *&SrcValue,
- int &SrcValueOffset,
- unsigned &SrcValueAlign,
- const MDNode *&TBAAInfo) const {
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- Ptr = LD->getBasePtr();
- Size = LD->getMemoryVT().getSizeInBits() >> 3;
- SrcValue = LD->getSrcValue();
- SrcValueOffset = LD->getSrcValueOffset();
- SrcValueAlign = LD->getOriginalAlignment();
- TBAAInfo = LD->getTBAAInfo();
- return true;
- }
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- Ptr = ST->getBasePtr();
- Size = ST->getMemoryVT().getSizeInBits() >> 3;
- SrcValue = ST->getSrcValue();
- SrcValueOffset = ST->getSrcValueOffset();
- SrcValueAlign = ST->getOriginalAlignment();
- TBAAInfo = ST->getTBAAInfo();
- return false;
- }
- llvm_unreachable("FindAliasInfo expected a memory operand");
+ SDValue &Ptr, int64_t &Size,
+ const Value *&SrcValue,
+ int &SrcValueOffset,
+ unsigned &SrcValueAlign,
+ const MDNode *&TBAAInfo) const {
+ LSBaseSDNode *LS = cast<LSBaseSDNode>(N);
+
+ Ptr = LS->getBasePtr();
+ Size = LS->getMemoryVT().getSizeInBits() >> 3;
+ SrcValue = LS->getSrcValue();
+ SrcValueOffset = LS->getSrcValueOffset();
+ SrcValueAlign = LS->getOriginalAlignment();
+ TBAAInfo = LS->getTBAAInfo();
+ return isa<LoadSDNode>(LS);
}
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index e8f8c73..0c1ac69 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -39,6 +39,7 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "isel"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
@@ -58,8 +59,15 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Debug.h"
+#include "llvm/ADT/Statistic.h"
using namespace llvm;
+STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
+ "target-independent selector");
+STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
+ "target-specific selector");
+STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
+
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
@@ -96,6 +104,11 @@ bool FastISel::hasTrivialKill(const Value *V) const {
!hasTrivialKill(Cast->getOperand(0)))
return false;
+ // GEPs with all zero indices are trivially coalesced by fast-isel.
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
+ if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
+ return false;
+
// Only instructions with a single use in the same basic block are considered
// to have trivial kills.
return I->hasOneUse() &&
@@ -123,15 +136,8 @@ unsigned FastISel::getRegForValue(const Value *V) {
return 0;
}
- // Look up the value to see if we already have a register for it. We
- // cache values defined by Instructions across blocks, and other values
- // only locally. This is because Instructions already have the SSA
- // def-dominates-use requirement enforced.
- DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
- if (I != FuncInfo.ValueMap.end())
- return I->second;
-
- unsigned Reg = LocalValueMap[V];
+ // Look up the value to see if we already have a register for it.
+ unsigned Reg = lookUpRegForValue(V);
if (Reg != 0)
return Reg;
@@ -186,7 +192,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
uint32_t IntBitWidth = IntVT.getSizeInBits();
bool isExact;
(void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
- APFloat::rmTowardZero, &isExact);
+ APFloat::rmTowardZero, &isExact);
if (isExact) {
APInt IntVal(IntBitWidth, x);
@@ -297,6 +303,18 @@ void FastISel::recomputeInsertPt() {
++FuncInfo.InsertPt;
}
+void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E) {
+ assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
+ while (I != E) {
+ MachineInstr *Dead = &*I;
+ ++I;
+ Dead->eraseFromParent();
+ ++NumFastIselDead;
+ }
+ recomputeInsertPt();
+}
+
FastISel::SavePoint FastISel::enterLocalValueArea() {
MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
DebugLoc OldDL = DL;
@@ -377,6 +395,13 @@ bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
ISDOpcode = ISD::SRA;
}
+ // Transform "urem x, pow2" -> "and x, pow2-1".
+ if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
+ isPowerOf2_64(Imm)) {
+ --Imm;
+ ISDOpcode = ISD::AND;
+ }
+
unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
Op0IsKill, Imm, VT.getSimpleVT());
if (ResultReg == 0) return false;
@@ -427,6 +452,11 @@ bool FastISel::SelectGetElementPtr(const User *I) {
bool NIsKill = hasTrivialKill(I->getOperand(0));
+ // Keep a running tab of the total offset to coalesce multiple N = N + Offset
+ // into a single N = N + TotalOffset.
+ uint64_t TotalOffs = 0;
+ // FIXME: What's a good SWAG number for MaxOffs?
+ uint64_t MaxOffs = 2048;
Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
@@ -436,14 +466,15 @@ bool FastISel::SelectGetElementPtr(const User *I) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
- uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
- // FIXME: This can be optimized by combining the add with a
- // subsequent one.
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
- if (N == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
- NIsKill = true;
+ TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
+ if (TotalOffs >= MaxOffs) {
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ if (N == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ NIsKill = true;
+ TotalOffs = 0;
+ }
}
Ty = StTy->getElementType(Field);
} else {
@@ -452,14 +483,26 @@ bool FastISel::SelectGetElementPtr(const User *I) {
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue;
- uint64_t Offs =
+ // N = N + Offset
+ TotalOffs +=
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
- N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
+ if (TotalOffs >= MaxOffs) {
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ if (N == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ NIsKill = true;
+ TotalOffs = 0;
+ }
+ continue;
+ }
+ if (TotalOffs) {
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
NIsKill = true;
- continue;
+ TotalOffs = 0;
}
// N = N + Idx * ElementSize;
@@ -484,6 +527,12 @@ bool FastISel::SelectGetElementPtr(const User *I) {
return false;
}
}
+ if (TotalOffs) {
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
+ if (N == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ }
// We successfully emitted code for the given LLVM Instruction.
UpdateValueMap(I, N);
@@ -512,21 +561,32 @@ bool FastISel::SelectCall(const User *I) {
return true;
}
+ MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
+ ComputeUsesVAFloatArgument(*Call, &MMI);
+
const Function *F = Call->getCalledFunction();
if (!F) return false;
// Handle selected intrinsic function calls.
switch (F->getIntrinsicID()) {
default: break;
+ // At -O0 we don't care about the lifetime intrinsics.
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ return true;
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
if (!DIVariable(DI->getVariable()).Verify() ||
- !FuncInfo.MF->getMMI().hasDebugInfo())
+ !FuncInfo.MF->getMMI().hasDebugInfo()) {
+ DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
+ }
const Value *Address = DI->getAddress();
- if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
+ if (!Address || isa<UndefValue>(Address)) {
+ DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
return true;
+ }
unsigned Reg = 0;
unsigned Offset = 0;
@@ -534,16 +594,36 @@ bool FastISel::SelectCall(const User *I) {
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
if (Offset)
- Reg = TRI.getFrameRegister(*FuncInfo.MF);
+ Reg = TRI.getFrameRegister(*FuncInfo.MF);
}
if (!Reg)
- Reg = getRegForValue(Address);
+ Reg = lookUpRegForValue(Address);
+
+ // If we have a VLA that has a "use" in a metadata node that's then used
+ // here but it has no other uses, then we have a problem. E.g.,
+ //
+ // int foo (const int *x) {
+ // char a[*x];
+ // return 0;
+ // }
+ //
+ // If we assign 'a' a vreg and fast isel later on has to use the selection
+ // DAG isel, it will want to copy the value to the vreg. However, there are
+ // no uses, which goes counter to what selection DAG isel expects.
+ if (!Reg && !Address->use_empty() && isa<Instruction>(Address) &&
+ (!isa<AllocaInst>(Address) ||
+ !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
+ Reg = FuncInfo.InitializeRegForValue(Address);
if (Reg)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::DBG_VALUE))
.addReg(Reg, RegState::Debug).addImm(Offset)
.addMetadata(DI->getVariable());
+ else
+ // We can't yet handle anything else here because it would require
+ // generating code, thus altering codegen because of debug info.
+ DEBUG(dbgs() << "Dropping debug info for " << DI);
return true;
}
case Intrinsic::dbg_value: {
@@ -581,60 +661,6 @@ bool FastISel::SelectCall(const User *I) {
}
return true;
}
- case Intrinsic::eh_exception: {
- EVT VT = TLI.getValueType(Call->getType());
- if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
- break;
-
- assert(FuncInfo.MBB->isLandingPad() &&
- "Call to eh.exception not in landing pad!");
- unsigned Reg = TLI.getExceptionAddressRegister();
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
- unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(Reg);
- UpdateValueMap(Call, ResultReg);
- return true;
- }
- case Intrinsic::eh_selector: {
- EVT VT = TLI.getValueType(Call->getType());
- if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
- break;
- if (FuncInfo.MBB->isLandingPad())
- AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
- else {
-#ifndef NDEBUG
- FuncInfo.CatchInfoLost.insert(Call);
-#endif
- // FIXME: Mark exception selector register as live in. Hack for PR1508.
- unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBB->addLiveIn(Reg);
- }
-
- unsigned Reg = TLI.getExceptionSelectorRegister();
- EVT SrcVT = TLI.getPointerTy();
- const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
- unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(Reg);
-
- bool ResultRegIsKill = hasTrivialKill(Call);
-
- // Cast the register to the type of the selector.
- if (SrcVT.bitsGT(MVT::i32))
- ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
- ResultReg, ResultRegIsKill);
- else if (SrcVT.bitsLT(MVT::i32))
- ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
- ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
- if (ResultReg == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
-
- UpdateValueMap(Call, ResultReg);
-
- return true;
- }
case Intrinsic::objectsize: {
ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
unsigned long long Res = CI->isZero() ? -1ULL : 0;
@@ -726,8 +752,8 @@ bool FastISel::SelectBitCast(const User *I) {
// First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0;
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
- TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
- TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
+ const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
+ const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
// Don't attempt a cross-class copy. It will likely fail.
if (SrcClass == DstClass) {
ResultReg = createResultReg(DstClass);
@@ -758,17 +784,33 @@ FastISel::SelectInstruction(const Instruction *I) {
DL = I->getDebugLoc();
+ MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
+
// First, try doing target-independent selection.
if (SelectOperator(I, I->getOpcode())) {
+ ++NumFastIselSuccessIndependent;
DL = DebugLoc();
return true;
}
+ // Remove dead code. However, ignore call instructions since we've flushed
+ // the local value map and recomputed the insert point.
+ if (!isa<CallInst>(I)) {
+ recomputeInsertPt();
+ if (SavedInsertPt != FuncInfo.InsertPt)
+ removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
+ }
// Next, try calling the target to attempt to handle the instruction.
+ SavedInsertPt = FuncInfo.InsertPt;
if (TargetSelectInstruction(I)) {
+ ++NumFastIselSuccessTarget;
DL = DebugLoc();
return true;
}
+ // Check for dead code and remove as necessary.
+ recomputeInsertPt();
+ if (SavedInsertPt != FuncInfo.InsertPt)
+ removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
DL = DebugLoc();
return false;
@@ -779,8 +821,11 @@ FastISel::SelectInstruction(const Instruction *I) {
/// the CFG.
void
FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
- if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
- // The unconditional fall-through case, which needs no instructions.
+
+ if (FuncInfo.MBB->getBasicBlock()->size() > 1 && FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
+ // For more accurate line information if this is the only instruction
+ // in the block then emit it, otherwise we have the unconditional
+ // fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
@@ -1354,8 +1399,8 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// exactly one register for each non-void instruction.
EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
- // Promote MVT::i1.
- if (VT == MVT::i1)
+ // Handle integer promotions, though, because they're common and easy.
+ if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
else {
FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index b052740..8dde919 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "function-lowering-info"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
@@ -68,7 +69,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
GetReturnInfo(Fn->getReturnType(),
Fn->getAttributes().getRetAttributes(), Outs, TLI);
CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), *MF,
- Fn->isVarArg(),
+ Fn->isVarArg(),
Outs, Fn->getContext());
// Initialize the mapping of values to registers. This is only set up for
@@ -92,14 +93,16 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
// candidate. I.e., it would trigger the creation of a stack protector.
bool MayNeedSP =
(AI->isArrayAllocation() ||
- (TySize > 8 && isa<ArrayType>(Ty) &&
+ (TySize >= 8 && isa<ArrayType>(Ty) &&
cast<ArrayType>(Ty)->getElementType()->isIntegerTy(8)));
StaticAllocaMap[AI] =
- MF->getFrameInfo()->CreateStackObject(TySize, Align, false, MayNeedSP);
+ MF->getFrameInfo()->CreateStackObject(TySize, Align, false,
+ MayNeedSP);
}
for (; BB != EB; ++BB)
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
// Mark values used outside their block as exported, by allocating
// a virtual register for them.
if (isUsedOutsideOfDefiningBlock(I))
@@ -355,7 +358,7 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
/// argument. This overrides previous frame index entry for this argument,
/// if any.
void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
- int FI) {
+ int FI) {
ByValArgFrameIndexMap[A] = FI;
}
@@ -367,10 +370,34 @@ int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
ByValArgFrameIndexMap.find(A);
if (I != ByValArgFrameIndexMap.end())
return I->second;
- DEBUG(dbgs() << "Argument does not have assigned frame index!");
+ DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
return 0;
}
+/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
+/// being passed to this variadic function, and set the MachineModuleInfo's
+/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
+/// reference to _fltused on Windows, which will link in MSVCRT's
+/// floating-point support.
+void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
+ MachineModuleInfo *MMI)
+{
+ FunctionType *FT = cast<FunctionType>(
+ I.getCalledValue()->getType()->getContainedType(0));
+ if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ Type* T = I.getArgOperand(i)->getType();
+ for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
+ i != e; ++i) {
+ if (i->isFloatingPointTy()) {
+ MMI->setUsesVAFloatArgument(true);
+ return;
+ }
+ }
+ }
+ }
+}
+
/// AddCatchInfo - Extract the personality and type infos from an eh.selector
/// call, and add them to the specified machine basic block.
void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
@@ -425,34 +452,6 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
}
}
-void llvm::CopyCatchInfo(const BasicBlock *SuccBB, const BasicBlock *LPad,
- MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) {
- SmallPtrSet<const BasicBlock*, 4> Visited;
-
- // The 'eh.selector' call may not be in the direct successor of a basic block,
- // but could be several successors deeper. If we don't find it, try going one
- // level further. <rdar://problem/8824861>
- while (Visited.insert(SuccBB)) {
- for (BasicBlock::const_iterator I = SuccBB->begin(), E = --SuccBB->end();
- I != E; ++I)
- if (const EHSelectorInst *EHSel = dyn_cast<EHSelectorInst>(I)) {
- // Apply the catch info to LPad.
- AddCatchInfo(*EHSel, MMI, FLI.MBBMap[LPad]);
-#ifndef NDEBUG
- if (!FLI.MBBMap[SuccBB]->isLandingPad())
- FLI.CatchInfoFound.insert(EHSel);
-#endif
- return;
- }
-
- const BranchInst *Br = dyn_cast<BranchInst>(SuccBB->getTerminator());
- if (Br && Br->isUnconditional())
- SuccBB = Br->getSuccessor(0);
- else
- break;
- }
-}
-
/// AddLandingPadInfo - Extract the exception handling information from the
/// landingpad instruction and add them to the specified machine module info.
void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 2ff66f8..1467d88 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -294,7 +294,7 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
const TargetRegisterClass *DstRC = 0;
if (IIOpNum < II->getNumOperands())
DstRC = TII->getRegClass(*II, IIOpNum, TRI);
- assert((DstRC || (MCID.isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
+ assert((DstRC || (MI->isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
"Don't have operand info for this instruction!");
if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
@@ -351,6 +351,8 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
MI->addOperand(MachineOperand::CreateFPImm(CFP));
} else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
+ } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateRegMask(RM->getRegMask()));
} else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(), TGA->getOffset(),
TGA->getTargetFlags()));
@@ -574,14 +576,19 @@ void InstrEmitter::EmitRegSequence(SDNode *Node,
for (unsigned i = 1; i != NumOps; ++i) {
SDValue Op = Node->getOperand(i);
if ((i & 1) == 0) {
- unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
- unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
- const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
- const TargetRegisterClass *SRC =
+ RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
+ // Skip physical registers as they don't have a vreg to get and we'll
+ // insert copies for them in TwoAddressInstructionPass anyway.
+ if (!R || !TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
+ unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
+ unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
+ const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
+ const TargetRegisterClass *SRC =
TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
- if (SRC && SRC != RC) {
- MRI->setRegClass(NewVReg, SRC);
- RC = SRC;
+ if (SRC && SRC != RC) {
+ MRI->setRegClass(NewVReg, SRC);
+ RC = SRC;
+ }
}
}
AddOperand(MI, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
@@ -700,33 +707,6 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// Create the new machine instruction.
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
- // The MachineInstr constructor adds implicit-def operands. Scan through
- // these to determine which are dead.
- if (MI->getNumOperands() != 0 &&
- Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
- // First, collect all used registers.
- SmallVector<unsigned, 8> UsedRegs;
- for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser())
- if (F->getOpcode() == ISD::CopyFromReg)
- UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
- else {
- // Collect declared implicit uses.
- const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
- UsedRegs.append(MCID.getImplicitUses(),
- MCID.getImplicitUses() + MCID.getNumImplicitUses());
- // In addition to declared implicit uses, we must also check for
- // direct RegisterSDNode operands.
- for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
- if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
- unsigned Reg = R->getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- UsedRegs.push_back(Reg);
- }
- }
- // Then mark unused registers as dead.
- MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
- }
-
// Add result register values for things that are defined by this
// instruction.
if (NumResults)
@@ -751,30 +731,63 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// hook knows where in the block to insert the replacement code.
MBB->insert(InsertPos, MI);
+ // The MachineInstr may also define physregs instead of virtregs. These
+ // physreg values can reach other instructions in different ways:
+ //
+ // 1. When there is a use of a Node value beyond the explicitly defined
+ // virtual registers, we emit a CopyFromReg for one of the implicitly
+ // defined physregs. This only happens when HasPhysRegOuts is true.
+ //
+ // 2. A CopyFromReg reading a physreg may be glued to this instruction.
+ //
+ // 3. A glued instruction may implicitly use a physreg.
+ //
+ // 4. A glued instruction may use a RegisterSDNode operand.
+ //
+ // Collect all the used physreg defs, and make sure that any unused physreg
+ // defs are marked as dead.
+ SmallVector<unsigned, 8> UsedRegs;
+
// Additional results must be physical register defs.
if (HasPhysRegOuts) {
for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
- if (Node->hasAnyUseOfValue(i))
- EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
- // If there are no uses, mark the register as dead now, so that
- // MachineLICM/Sink can see that it's dead. Don't do this if the
- // node has a Glue value, for the benefit of targets still using
- // Glue for values in physregs.
- else if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue)
- MI->addRegisterDead(Reg, TRI);
+ if (!Node->hasAnyUseOfValue(i))
+ continue;
+ // This implicitly defined physreg has a use.
+ UsedRegs.push_back(Reg);
+ EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
}
}
- // If the instruction has implicit defs and the node doesn't, mark the
- // implicit def as dead. If the node has any glue outputs, we don't do this
- // because we don't know what implicit defs are being used by glued nodes.
- if (Node->getValueType(Node->getNumValues()-1) != MVT::Glue)
- if (const unsigned *IDList = II.getImplicitDefs()) {
- for (unsigned i = NumResults, e = II.getNumDefs()+II.getNumImplicitDefs();
- i != e; ++i)
- MI->addRegisterDead(IDList[i-II.getNumDefs()], TRI);
+ // Scan the glue chain for any used physregs.
+ if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
+ for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
+ if (F->getOpcode() == ISD::CopyFromReg) {
+ UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
+ continue;
+ } else if (F->getOpcode() == ISD::CopyToReg) {
+ // Skip CopyToReg nodes that are internal to the glue chain.
+ continue;
+ }
+ // Collect declared implicit uses.
+ const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
+ UsedRegs.append(MCID.getImplicitUses(),
+ MCID.getImplicitUses() + MCID.getNumImplicitUses());
+ // In addition to declared implicit uses, we must also check for
+ // direct RegisterSDNode operands.
+ for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
+ if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
+ unsigned Reg = R->getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ UsedRegs.push_back(Reg);
+ }
}
+ }
+
+ // Finally mark unused registers as dead.
+ if (!UsedRegs.empty() || II.getImplicitDefs())
+ MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
// Run post-isel target hook to adjust this instruction if needed.
#ifdef NDEBUG
@@ -794,10 +807,8 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
Node->dump();
#endif
llvm_unreachable("This target-independent node should have been selected!");
- break;
case ISD::EntryToken:
llvm_unreachable("EntryToken should have been excluded from the schedule!");
- break;
case ISD::MERGE_VALUES:
case ISD::TokenFactor: // fall thru
break;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 63255ae..a96a997 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -46,37 +46,18 @@ using namespace llvm;
/// will attempt merge setcc and brc instructions into brcc's.
///
namespace {
-class SelectionDAGLegalize {
+class SelectionDAGLegalize : public SelectionDAG::DAGUpdateListener {
const TargetMachine &TM;
const TargetLowering &TLI;
SelectionDAG &DAG;
- // Libcall insertion helpers.
-
- /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been
- /// legalized. We use this to ensure that calls are properly serialized
- /// against each other, including inserted libcalls.
- SDValue LastCALLSEQ_END;
-
- /// IsLegalizingCall - This member is used *only* for purposes of providing
- /// helpful assertions that a libcall isn't created while another call is
- /// being legalized (which could lead to non-serialized call sequences).
- bool IsLegalizingCall;
-
- /// LegalizedNodes - For nodes that are of legal width, and that have more
- /// than one use, this map indicates what regularized operand to use. This
- /// allows us to avoid legalizing the same thing more than once.
- DenseMap<SDValue, SDValue> LegalizedNodes;
+ /// LegalizePosition - The iterator for walking through the node list.
+ SelectionDAG::allnodes_iterator LegalizePosition;
- void AddLegalizedOperand(SDValue From, SDValue To) {
- LegalizedNodes.insert(std::make_pair(From, To));
- // If someone requests legalization of the new node, return itself.
- if (From != To)
- LegalizedNodes.insert(std::make_pair(To, To));
+ /// LegalizedNodes - The set of nodes which have already been legalized.
+ SmallPtrSet<SDNode *, 16> LegalizedNodes;
- // Transfer SDDbgValues.
- DAG.TransferDbgValues(From, To);
- }
+ // Libcall insertion helpers.
public:
explicit SelectionDAGLegalize(SelectionDAG &DAG);
@@ -84,9 +65,8 @@ public:
void LegalizeDAG();
private:
- /// LegalizeOp - Return a legal replacement for the given operation, with
- /// all legal operands.
- SDValue LegalizeOp(SDValue O);
+ /// LegalizeOp - Legalizes the given operation.
+ void LegalizeOp(SDNode *Node);
SDValue OptimizeFloatStore(StoreSDNode *ST);
@@ -105,10 +85,7 @@ private:
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
- SmallVectorImpl<int> &Mask) const;
-
- bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
- SmallPtrSet<SDNode*, 32> &NodesLeadingTo);
+ ArrayRef<int> Mask) const;
void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
DebugLoc dl);
@@ -150,10 +127,46 @@ private:
SDValue ExpandInsertToVectorThroughStack(SDValue Op);
SDValue ExpandVectorBuildThroughStack(SDNode* Node);
+ SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP);
+
std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
- void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
- void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
+ void ExpandNode(SDNode *Node);
+ void PromoteNode(SDNode *Node);
+
+ void ForgetNode(SDNode *N) {
+ LegalizedNodes.erase(N);
+ if (LegalizePosition == SelectionDAG::allnodes_iterator(N))
+ ++LegalizePosition;
+ }
+
+public:
+ // DAGUpdateListener implementation.
+ virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ ForgetNode(N);
+ }
+ virtual void NodeUpdated(SDNode *N) {}
+
+ // Node replacement helpers
+ void ReplacedNode(SDNode *N) {
+ if (N->use_empty()) {
+ DAG.RemoveDeadNode(N, this);
+ } else {
+ ForgetNode(N);
+ }
+ }
+ void ReplaceNode(SDNode *Old, SDNode *New) {
+ DAG.ReplaceAllUsesWith(Old, New, this);
+ ReplacedNode(Old);
+ }
+ void ReplaceNode(SDValue Old, SDValue New) {
+ DAG.ReplaceAllUsesWith(Old, New, this);
+ ReplacedNode(Old.getNode());
+ }
+ void ReplaceNode(SDNode *Old, const SDValue *New) {
+ DAG.ReplaceAllUsesWith(Old, New, this);
+ ReplacedNode(Old);
+ }
};
}
@@ -164,7 +177,7 @@ private:
SDValue
SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
- SmallVectorImpl<int> &Mask) const {
+ ArrayRef<int> Mask) const {
unsigned NumMaskElts = VT.getVectorNumElements();
unsigned NumDestElts = NVT.getVectorNumElements();
unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
@@ -195,145 +208,37 @@ SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag)
}
void SelectionDAGLegalize::LegalizeDAG() {
- LastCALLSEQ_END = DAG.getEntryNode();
- IsLegalizingCall = false;
-
- // The legalize process is inherently a bottom-up recursive process (users
- // legalize their uses before themselves). Given infinite stack space, we
- // could just start legalizing on the root and traverse the whole graph. In
- // practice however, this causes us to run out of stack space on large basic
- // blocks. To avoid this problem, compute an ordering of the nodes where each
- // node is only legalized after all of its operands are legalized.
DAG.AssignTopologicalOrder();
- for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = prior(DAG.allnodes_end()); I != llvm::next(E); ++I)
- LegalizeOp(SDValue(I, 0));
- // Finally, it's possible the root changed. Get the new root.
- SDValue OldRoot = DAG.getRoot();
- assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
- DAG.setRoot(LegalizedNodes[OldRoot]);
-
- LegalizedNodes.clear();
-
- // Remove dead nodes now.
- DAG.RemoveDeadNodes();
-}
-
-
-/// FindCallEndFromCallStart - Given a chained node that is part of a call
-/// sequence, find the CALLSEQ_END node that terminates the call sequence.
-static SDNode *FindCallEndFromCallStart(SDNode *Node, int depth = 0) {
- // Nested CALLSEQ_START/END constructs aren't yet legal,
- // but we can DTRT and handle them correctly here.
- if (Node->getOpcode() == ISD::CALLSEQ_START)
- depth++;
- else if (Node->getOpcode() == ISD::CALLSEQ_END) {
- depth--;
- if (depth == 0)
- return Node;
- }
- if (Node->use_empty())
- return 0; // No CallSeqEnd
-
- // The chain is usually at the end.
- SDValue TheChain(Node, Node->getNumValues()-1);
- if (TheChain.getValueType() != MVT::Other) {
- // Sometimes it's at the beginning.
- TheChain = SDValue(Node, 0);
- if (TheChain.getValueType() != MVT::Other) {
- // Otherwise, hunt for it.
- for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i)
- if (Node->getValueType(i) == MVT::Other) {
- TheChain = SDValue(Node, i);
- break;
- }
-
- // Otherwise, we walked into a node without a chain.
- if (TheChain.getValueType() != MVT::Other)
- return 0;
+ // Visit all the nodes. We start in topological order, so that we see
+ // nodes with their original operands intact. Legalization can produce
+ // new nodes which may themselves need to be legalized. Iterate until all
+ // nodes have been legalized.
+ for (;;) {
+ bool AnyLegalized = false;
+ for (LegalizePosition = DAG.allnodes_end();
+ LegalizePosition != DAG.allnodes_begin(); ) {
+ --LegalizePosition;
+
+ SDNode *N = LegalizePosition;
+ if (LegalizedNodes.insert(N)) {
+ AnyLegalized = true;
+ LegalizeOp(N);
+ }
}
- }
-
- for (SDNode::use_iterator UI = Node->use_begin(),
- E = Node->use_end(); UI != E; ++UI) {
-
- // Make sure to only follow users of our token chain.
- SDNode *User = *UI;
- for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i)
- if (User->getOperand(i) == TheChain)
- if (SDNode *Result = FindCallEndFromCallStart(User, depth))
- return Result;
- }
- return 0;
-}
-
-/// FindCallStartFromCallEnd - Given a chained node that is part of a call
-/// sequence, find the CALLSEQ_START node that initiates the call sequence.
-static SDNode *FindCallStartFromCallEnd(SDNode *Node) {
- int nested = 0;
- assert(Node && "Didn't find callseq_start for a call??");
- while (Node->getOpcode() != ISD::CALLSEQ_START || nested) {
- Node = Node->getOperand(0).getNode();
- assert(Node->getOperand(0).getValueType() == MVT::Other &&
- "Node doesn't have a token chain argument!");
- switch (Node->getOpcode()) {
- default:
+ if (!AnyLegalized)
break;
- case ISD::CALLSEQ_START:
- if (!nested)
- return Node;
- nested--;
- break;
- case ISD::CALLSEQ_END:
- nested++;
- break;
- }
- }
- return 0;
-}
-
-/// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
-/// see if any uses can reach Dest. If no dest operands can get to dest,
-/// legalize them, legalize ourself, and return false, otherwise, return true.
-///
-/// Keep track of the nodes we fine that actually do lead to Dest in
-/// NodesLeadingTo. This avoids retraversing them exponential number of times.
-///
-bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
- SmallPtrSet<SDNode*, 32> &NodesLeadingTo) {
- if (N == Dest) return true; // N certainly leads to Dest :)
-
- // If we've already processed this node and it does lead to Dest, there is no
- // need to reprocess it.
- if (NodesLeadingTo.count(N)) return true;
-
- // If the first result of this node has been already legalized, then it cannot
- // reach N.
- if (LegalizedNodes.count(SDValue(N, 0))) return false;
-
- // Okay, this node has not already been legalized. Check and legalize all
- // operands. If none lead to Dest, then we can legalize this node.
- bool OperandsLeadToDest = false;
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- OperandsLeadToDest |= // If an operand leads to Dest, so do we.
- LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest,
- NodesLeadingTo);
- if (OperandsLeadToDest) {
- NodesLeadingTo.insert(N);
- return true;
}
- // Okay, this node looks safe, legalize it and return false.
- LegalizeOp(SDValue(N, 0));
- return false;
+ // Remove dead nodes now.
+ DAG.RemoveDeadNodes();
}
/// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
/// a load from the constant pool.
-static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
- SelectionDAG &DAG, const TargetLowering &TLI) {
+SDValue
+SelectionDAGLegalize::ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP) {
bool Extend = false;
DebugLoc dl = CFP->getDebugLoc();
@@ -369,20 +274,27 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
- if (Extend)
- return DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
- DAG.getEntryNode(),
- CPIdx, MachinePointerInfo::getConstantPool(),
- VT, false, false, Alignment);
- return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(), false, false,
- Alignment);
+ if (Extend) {
+ SDValue Result =
+ DAG.getExtLoad(ISD::EXTLOAD, dl, OrigVT,
+ DAG.getEntryNode(),
+ CPIdx, MachinePointerInfo::getConstantPool(),
+ VT, false, false, Alignment);
+ return Result;
+ }
+ SDValue Result =
+ DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(), false, false, false,
+ Alignment);
+ return Result;
}
/// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
-static
-SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
- const TargetLowering &TLI) {
+static void ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
+ const TargetLowering &TLI,
+ SelectionDAGLegalize *DAGLegalize) {
+ assert(ST->getAddressingMode() == ISD::UNINDEXED &&
+ "unaligned indexed stores not implemented!");
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
SDValue Val = ST->getValue();
@@ -397,8 +309,10 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// same size, then a (misaligned) int store.
// FIXME: Does not handle truncating floating point stores!
SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
- return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
- ST->isVolatile(), ST->isNonTemporal(), Alignment);
+ Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
+ ST->isVolatile(), ST->isNonTemporal(), Alignment);
+ DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
+ return;
}
// Do a (aligned) store to a stack slot, then copy from the stack slot
// to the final destination using (unaligned) integer loads and stores.
@@ -427,7 +341,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Load one integer register's worth from the stack slot.
SDValue Load = DAG.getLoad(RegVT, dl, Store, StackPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Store it to the final location. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr,
ST->getPointerInfo().getWithOffset(Offset),
@@ -458,8 +372,11 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
ST->isNonTemporal(),
MinAlign(ST->getAlignment(), Offset)));
// The order of the stores doesn't matter - say it with a TokenFactor.
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
- Stores.size());
+ SDValue Result =
+ DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
+ Stores.size());
+ DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
+ return;
}
assert(ST->getMemoryVT().isInteger() &&
!ST->getMemoryVT().isVector() &&
@@ -488,13 +405,18 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
NewStoredVT, ST->isVolatile(), ST->isNonTemporal(),
Alignment);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
+ SDValue Result =
+ DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
+ DAGLegalize->ReplaceNode(SDValue(ST, 0), Result);
}
/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
-static
-SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
- const TargetLowering &TLI) {
+static void
+ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
+ const TargetLowering &TLI,
+ SDValue &ValResult, SDValue &ChainResult) {
+ assert(LD->getAddressingMode() == ISD::UNINDEXED &&
+ "unaligned indexed loads not implemented!");
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
EVT VT = LD->getValueType(0);
@@ -507,13 +429,15 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// then bitconvert to floating point or vector.
SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
LD->isVolatile(),
- LD->isNonTemporal(), LD->getAlignment());
+ LD->isNonTemporal(),
+ LD->isInvariant(), LD->getAlignment());
SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
if (VT.isFloatingPoint() && LoadedVT != VT)
Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result);
- SDValue Ops[] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, dl);
+ ValResult = Result;
+ ChainResult = Chain;
+ return;
}
// Copy the value to a (aligned) stack slot using (unaligned) integer
@@ -537,6 +461,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
SDValue Load = DAG.getLoad(RegVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(Offset),
LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(),
MinAlign(LD->getAlignment(), Offset));
// Follow the load with a store to the stack slot. Remember the store.
Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, StackPtr,
@@ -572,8 +497,9 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
MachinePointerInfo(), LoadedVT, false, false, 0);
// Callers expect a MERGE_VALUES node.
- SDValue Ops[] = { Load, TF };
- return DAG.getMergeValues(Ops, 2, dl);
+ ValResult = Load;
+ ChainResult = TF;
+ return;
}
assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
"Unaligned load of unsupported type.");
@@ -626,8 +552,8 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
- SDValue Ops[] = { Result, TF };
- return DAG.getMergeValues(Ops, 2, dl);
+ ValResult = Result;
+ ChainResult = TF;
}
/// PerformInsertVectorEltInMemory - Some target cannot handle a variable
@@ -672,7 +598,8 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
false, false, 0);
// Load the updated vector.
return DAG.getLoad(VT, dl, Ch, StackPtr,
- MachinePointerInfo::getFixedStack(SPFI), false, false, 0);
+ MachinePointerInfo::getFixedStack(SPFI), false, false,
+ false, 0);
}
@@ -763,11 +690,10 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
/// LegalizeOp - Return a legal replacement for the given operation, with
/// all legal operands.
-SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
- if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
- return Op;
+void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
+ if (Node->getOpcode() == ISD::TargetConstant) // Allow illegal target nodes.
+ return;
- SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
@@ -782,13 +708,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
"Unexpected illegal type!");
- // Note that LegalizeOp may be reentered even from single-use nodes, which
- // means that we always must cache transformed nodes.
- DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
- if (I != LegalizedNodes.end()) return I->second;
-
SDValue Tmp1, Tmp2, Tmp3, Tmp4;
- SDValue Result = Op;
bool isCustom = false;
// Figure out the correct action; the way to query this varies by opcode
@@ -798,10 +718,15 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
- case ISD::VAARG:
case ISD::STACKSAVE:
Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
break;
+ case ISD::VAARG:
+ Action = TLI.getOperationAction(Node->getOpcode(),
+ Node->getValueType(0));
+ if (Action != TargetLowering::Promote)
+ Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
+ break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::EXTRACT_VECTOR_ELT:
@@ -865,7 +790,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::FRAME_TO_ARGS_OFFSET:
case ISD::EH_SJLJ_SETJMP:
case ISD::EH_SJLJ_LONGJMP:
- case ISD::EH_SJLJ_DISPATCHSETUP:
// These operations lie about being legal: when they claim to be legal,
// they should actually be expanded.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -882,17 +806,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (Action == TargetLowering::Legal)
Action = TargetLowering::Custom;
break;
- case ISD::BUILD_VECTOR:
- // A weird case: legalization for BUILD_VECTOR never legalizes the
- // operands!
- // FIXME: This really sucks... changing it isn't semantically incorrect,
- // but it massively pessimizes the code for floating-point BUILD_VECTORs
- // because ConstantFP operands get legalized into constant pool loads
- // before the BUILD_VECTOR code can see them. It doesn't usually bite,
- // though, because BUILD_VECTORS usually get lowered into other nodes
- // which get legalized properly.
- SimpleFinishLegalizing = false;
- break;
default:
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
Action = TargetLowering::Legal;
@@ -903,22 +816,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
}
if (SimpleFinishLegalizing) {
- SmallVector<SDValue, 8> Ops, ResultVals;
+ SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
- Ops.push_back(LegalizeOp(Node->getOperand(i)));
+ Ops.push_back(Node->getOperand(i));
switch (Node->getOpcode()) {
default: break;
- case ISD::BR:
- case ISD::BRIND:
- case ISD::BR_JT:
- case ISD::BR_CC:
- case ISD::BRCOND:
- // Branches tweak the chain to include LastCALLSEQ_END
- Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0],
- LastCALLSEQ_END);
- Ops[0] = LegalizeOp(Ops[0]);
- LastCALLSEQ_END = DAG.getEntryNode();
- break;
case ISD::SHL:
case ISD::SRL:
case ISD::SRA:
@@ -926,57 +828,66 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::ROTR:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
- if (!Ops[1].getValueType().isVector())
- Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(),
- Ops[1]));
+ if (!Ops[1].getValueType().isVector()) {
+ SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[1]);
+ HandleSDNode Handle(SAO);
+ LegalizeOp(SAO.getNode());
+ Ops[1] = Handle.getValue();
+ }
break;
case ISD::SRL_PARTS:
case ISD::SRA_PARTS:
case ISD::SHL_PARTS:
// Legalizing shifts/rotates requires adjusting the shift amount
// to the appropriate width.
- if (!Ops[2].getValueType().isVector())
- Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[0].getValueType(),
- Ops[2]));
+ if (!Ops[2].getValueType().isVector()) {
+ SDValue SAO = DAG.getShiftAmountOperand(Ops[0].getValueType(), Ops[2]);
+ HandleSDNode Handle(SAO);
+ LegalizeOp(SAO.getNode());
+ Ops[2] = Handle.getValue();
+ }
break;
}
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(),
- Ops.size()), 0);
+ SDNode *NewNode = DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
+ if (NewNode != Node) {
+ DAG.ReplaceAllUsesWith(Node, NewNode, this);
+ for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
+ DAG.TransferDbgValues(SDValue(Node, i), SDValue(NewNode, i));
+ ReplacedNode(Node);
+ Node = NewNode;
+ }
switch (Action) {
case TargetLowering::Legal:
- for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
- ResultVals.push_back(Result.getValue(i));
- break;
+ return;
case TargetLowering::Custom:
// FIXME: The handling for custom lowering with multiple results is
// a complete mess.
- Tmp1 = TLI.LowerOperation(Result, DAG);
+ Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Tmp1.getNode()) {
+ SmallVector<SDValue, 8> ResultVals;
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) {
if (e == 1)
ResultVals.push_back(Tmp1);
else
ResultVals.push_back(Tmp1.getValue(i));
}
- break;
+ if (Tmp1.getNode() != Node || Tmp1.getResNo() != 0) {
+ DAG.ReplaceAllUsesWith(Node, ResultVals.data(), this);
+ for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
+ DAG.TransferDbgValues(SDValue(Node, i), ResultVals[i]);
+ ReplacedNode(Node);
+ }
+ return;
}
// FALL THROUGH
case TargetLowering::Expand:
- ExpandNode(Result.getNode(), ResultVals);
- break;
+ ExpandNode(Node);
+ return;
case TargetLowering::Promote:
- PromoteNode(Result.getNode(), ResultVals);
- break;
- }
- if (!ResultVals.empty()) {
- for (unsigned i = 0, e = ResultVals.size(); i != e; ++i) {
- if (ResultVals[i] != SDValue(Node, i))
- ResultVals[i] = LegalizeOp(ResultVals[i]);
- AddLegalizedOperand(SDValue(Node, i), ResultVals[i]);
- }
- return ResultVals[Op.getResNo()];
+ PromoteNode(Node);
+ return;
}
}
@@ -987,160 +898,24 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Node->dump( &DAG);
dbgs() << "\n";
#endif
- assert(0 && "Do not know how to legalize this operator!");
+ llvm_unreachable("Do not know how to legalize this operator!");
- case ISD::SRA:
- case ISD::SRL:
- case ISD::SHL: {
- // Scalarize vector SRA/SRL/SHL.
- EVT VT = Node->getValueType(0);
- assert(VT.isVector() && "Unable to legalize non-vector shift");
- assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
- unsigned NumElem = VT.getVectorNumElements();
-
- SmallVector<SDValue, 8> Scalars;
- for (unsigned Idx = 0; Idx < NumElem; Idx++) {
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- VT.getScalarType(),
- Node->getOperand(0), DAG.getIntPtrConstant(Idx));
- SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- VT.getScalarType(),
- Node->getOperand(1), DAG.getIntPtrConstant(Idx));
- Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
- VT.getScalarType(), Ex, Sh));
- }
- Result = DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
- &Scalars[0], Scalars.size());
- break;
- }
-
- case ISD::BUILD_VECTOR:
- switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) {
- default: assert(0 && "This action is not supported yet!");
- case TargetLowering::Custom:
- Tmp3 = TLI.LowerOperation(Result, DAG);
- if (Tmp3.getNode()) {
- Result = Tmp3;
- break;
- }
- // FALLTHROUGH
- case TargetLowering::Expand:
- Result = ExpandBUILD_VECTOR(Result.getNode());
- break;
- }
- break;
- case ISD::CALLSEQ_START: {
- SDNode *CallEnd = FindCallEndFromCallStart(Node);
-
- // Recursively Legalize all of the inputs of the call end that do not lead
- // to this call start. This ensures that any libcalls that need be inserted
- // are inserted *before* the CALLSEQ_START.
- {SmallPtrSet<SDNode*, 32> NodesLeadingTo;
- for (unsigned i = 0, e = CallEnd->getNumOperands(); i != e; ++i)
- LegalizeAllNodesNotLeadingTo(CallEnd->getOperand(i).getNode(), Node,
- NodesLeadingTo);
- }
-
- // Now that we have legalized all of the inputs (which may have inserted
- // libcalls), create the new CALLSEQ_START node.
- Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
-
- // Merge in the last call to ensure that this call starts after the last
- // call ended.
- if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) {
- Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- Tmp1, LastCALLSEQ_END);
- Tmp1 = LegalizeOp(Tmp1);
- }
-
- // Do not try to legalize the target-specific arguments (#1+).
- if (Tmp1 != Node->getOperand(0)) {
- SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
- Ops[0] = Tmp1;
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0],
- Ops.size()), Result.getResNo());
- }
-
- // Remember that the CALLSEQ_START is legalized.
- AddLegalizedOperand(Op.getValue(0), Result);
- if (Node->getNumValues() == 2) // If this has a flag result, remember it.
- AddLegalizedOperand(Op.getValue(1), Result.getValue(1));
-
- // Now that the callseq_start and all of the non-call nodes above this call
- // sequence have been legalized, legalize the call itself. During this
- // process, no libcalls can/will be inserted, guaranteeing that no calls
- // can overlap.
- assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!");
- // Note that we are selecting this call!
- LastCALLSEQ_END = SDValue(CallEnd, 0);
- IsLegalizingCall = true;
-
- // Legalize the call, starting from the CALLSEQ_END.
- LegalizeOp(LastCALLSEQ_END);
- assert(!IsLegalizingCall && "CALLSEQ_END should have cleared this!");
- return Result;
- }
+ case ISD::CALLSEQ_START:
case ISD::CALLSEQ_END:
- // If the CALLSEQ_START node hasn't been legalized first, legalize it. This
- // will cause this node to be legalized as well as handling libcalls right.
- if (LastCALLSEQ_END.getNode() != Node) {
- LegalizeOp(SDValue(FindCallStartFromCallEnd(Node), 0));
- DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
- assert(I != LegalizedNodes.end() &&
- "Legalizing the call start should have legalized this node!");
- return I->second;
- }
-
- // Otherwise, the call start has been legalized and everything is going
- // according to plan. Just legalize ourselves normally here.
- Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
- // Do not try to legalize the target-specific arguments (#1+), except for
- // an optional flag input.
- if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Glue){
- if (Tmp1 != Node->getOperand(0)) {
- SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
- Ops[0] = Tmp1;
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- &Ops[0], Ops.size()),
- Result.getResNo());
- }
- } else {
- Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1));
- if (Tmp1 != Node->getOperand(0) ||
- Tmp2 != Node->getOperand(Node->getNumOperands()-1)) {
- SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
- Ops[0] = Tmp1;
- Ops.back() = Tmp2;
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- &Ops[0], Ops.size()),
- Result.getResNo());
- }
- }
- assert(IsLegalizingCall && "Call sequence imbalance between start/end?");
- // This finishes up call legalization.
- IsLegalizingCall = false;
-
- // If the CALLSEQ_END node has a flag, remember that we legalized it.
- AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0));
- if (Node->getNumValues() == 2)
- AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1));
- return Result.getValue(Op.getResNo());
+ break;
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Node);
- Tmp1 = LegalizeOp(LD->getChain()); // Legalize the chain.
- Tmp2 = LegalizeOp(LD->getBasePtr()); // Legalize the base pointer.
+ Tmp1 = LD->getChain(); // Legalize the chain.
+ Tmp2 = LD->getBasePtr(); // Legalize the base pointer.
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
EVT VT = Node->getValueType(0);
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- Tmp1, Tmp2, LD->getOffset()),
- Result.getResNo());
- Tmp3 = Result.getValue(0);
- Tmp4 = Result.getValue(1);
+ Tmp3 = SDValue(Node, 0);
+ Tmp4 = SDValue(Node, 1);
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned load and the target doesn't support it,
// expand it.
@@ -1148,20 +923,16 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
- DAG, TLI);
- Tmp3 = Result.getOperand(0);
- Tmp4 = Result.getOperand(1);
- Tmp3 = LegalizeOp(Tmp3);
- Tmp4 = LegalizeOp(Tmp4);
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node),
+ DAG, TLI, Tmp3, Tmp4);
}
}
break;
case TargetLowering::Custom:
Tmp1 = TLI.LowerOperation(Tmp3, DAG);
if (Tmp1.getNode()) {
- Tmp3 = LegalizeOp(Tmp1);
- Tmp4 = LegalizeOp(Tmp1.getValue(1));
+ Tmp3 = Tmp1;
+ Tmp4 = Tmp1.getValue(1);
}
break;
case TargetLowering::Promote: {
@@ -1172,17 +943,19 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1));
- Tmp4 = LegalizeOp(Tmp1.getValue(1));
+ LD->isInvariant(), LD->getAlignment());
+ Tmp3 = DAG.getNode(ISD::BITCAST, dl, VT, Tmp1);
+ Tmp4 = Tmp1.getValue(1);
break;
}
}
- // Since loads produce two values, make sure to remember that we
- // legalized both of them.
- AddLegalizedOperand(SDValue(Node, 0), Tmp3);
- AddLegalizedOperand(SDValue(Node, 1), Tmp4);
- return Op.getResNo() ? Tmp4 : Tmp3;
+ if (Tmp4.getNode() != Node) {
+ assert(Tmp3.getNode() != Node && "Load must be completely replaced");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp3);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp4);
+ ReplacedNode(Node);
+ }
+ return;
}
EVT SrcVT = LD->getMemoryVT();
@@ -1213,9 +986,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType NewExtType =
ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
- Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
- Tmp1, Tmp2, LD->getPointerInfo(),
- NVT, isVolatile, isNonTemporal, Alignment);
+ SDValue Result =
+ DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
Ch = Result.getValue(1); // The chain.
@@ -1230,8 +1004,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Result.getValueType(), Result,
DAG.getValueType(SrcVT));
- Tmp1 = LegalizeOp(Result);
- Tmp2 = LegalizeOp(Ch);
+ Tmp1 = Result;
+ Tmp2 = Ch;
} else if (SrcWidth & (SrcWidth - 1)) {
// If not loading a power-of-2 number of bits, expand as two loads.
assert(!SrcVT.isVector() && "Unsupported extload!");
@@ -1274,7 +1048,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
- Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
} else {
// Big endian - avoid unaligned loads.
// EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
@@ -1304,29 +1078,25 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
- Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
+ Tmp1 = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
}
- Tmp1 = LegalizeOp(Result);
- Tmp2 = LegalizeOp(Ch);
+ Tmp2 = Ch;
} else {
switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
// FALLTHROUGH
case TargetLowering::Legal:
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- Tmp1, Tmp2, LD->getOffset()),
- Result.getResNo());
- Tmp1 = Result.getValue(0);
- Tmp2 = Result.getValue(1);
+ Tmp1 = SDValue(Node, 0);
+ Tmp2 = SDValue(Node, 1);
if (isCustom) {
- Tmp3 = TLI.LowerOperation(Result, DAG);
+ Tmp3 = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Tmp3.getNode()) {
- Tmp1 = LegalizeOp(Tmp3);
- Tmp2 = LegalizeOp(Tmp3.getValue(1));
+ Tmp1 = Tmp3;
+ Tmp2 = Tmp3.getValue(1);
}
} else {
// If this is an unaligned load and the target doesn't support it,
@@ -1337,12 +1107,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
unsigned ABIAlignment =
TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
- DAG, TLI);
- Tmp1 = Result.getOperand(0);
- Tmp2 = Result.getOperand(1);
- Tmp1 = LegalizeOp(Tmp1);
- Tmp2 = LegalizeOp(Tmp2);
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node),
+ DAG, TLI, Tmp1, Tmp2);
}
}
}
@@ -1352,7 +1118,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2,
LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
+ LD->isInvariant(), LD->getAlignment());
unsigned ExtendOp;
switch (ExtType) {
case ISD::EXTLOAD:
@@ -1363,95 +1129,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
default: llvm_unreachable("Unexpected extend load type!");
}
- Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
- Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Load.getValue(1));
+ Tmp1 = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
+ Tmp2 = Load.getValue(1);
break;
}
- // If this is a promoted vector load, and the vector element types are
- // legal, then scalarize it.
- if (ExtType == ISD::EXTLOAD && SrcVT.isVector() &&
- TLI.isTypeLegal(Node->getValueType(0).getScalarType())) {
- SmallVector<SDValue, 8> LoadVals;
- SmallVector<SDValue, 8> LoadChains;
- unsigned NumElem = SrcVT.getVectorNumElements();
- unsigned Stride = SrcVT.getScalarType().getSizeInBits()/8;
-
- for (unsigned Idx=0; Idx<NumElem; Idx++) {
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(Stride));
- SDValue ScalarLoad = DAG.getExtLoad(ISD::EXTLOAD, dl,
- Node->getValueType(0).getScalarType(),
- Tmp1, Tmp2, LD->getPointerInfo().getWithOffset(Idx * Stride),
- SrcVT.getScalarType(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
-
- LoadVals.push_back(ScalarLoad.getValue(0));
- LoadChains.push_back(ScalarLoad.getValue(1));
- }
- Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &LoadChains[0], LoadChains.size());
- SDValue ValRes = DAG.getNode(ISD::BUILD_VECTOR, dl,
- Node->getValueType(0), &LoadVals[0], LoadVals.size());
-
- Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Result.getValue(0)); // Relegalize new nodes.
- break;
- }
-
- // If this is a promoted vector load, and the vector element types are
- // illegal, create the promoted vector from bitcasted segments.
- if (ExtType == ISD::EXTLOAD && SrcVT.isVector()) {
- EVT MemElemTy = Node->getValueType(0).getScalarType();
- EVT SrcSclrTy = SrcVT.getScalarType();
- unsigned SizeRatio =
- (MemElemTy.getSizeInBits() / SrcSclrTy.getSizeInBits());
-
- SmallVector<SDValue, 8> LoadVals;
- SmallVector<SDValue, 8> LoadChains;
- unsigned NumElem = SrcVT.getVectorNumElements();
- unsigned Stride = SrcVT.getScalarType().getSizeInBits()/8;
-
- for (unsigned Idx=0; Idx<NumElem; Idx++) {
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(Stride));
- SDValue ScalarLoad = DAG.getExtLoad(ISD::EXTLOAD, dl,
- SrcVT.getScalarType(),
- Tmp1, Tmp2, LD->getPointerInfo().getWithOffset(Idx * Stride),
- SrcVT.getScalarType(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- if (TLI.isBigEndian()) {
- // MSB (which is garbage, comes first)
- LoadVals.push_back(ScalarLoad.getValue(0));
- for (unsigned i = 0; i<SizeRatio-1; ++i)
- LoadVals.push_back(DAG.getUNDEF(SrcVT.getScalarType()));
- } else {
- // LSB (which is data, comes first)
- for (unsigned i = 0; i<SizeRatio-1; ++i)
- LoadVals.push_back(DAG.getUNDEF(SrcVT.getScalarType()));
- LoadVals.push_back(ScalarLoad.getValue(0));
- }
- LoadChains.push_back(ScalarLoad.getValue(1));
- }
-
- Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &LoadChains[0], LoadChains.size());
- EVT TempWideVector = EVT::getVectorVT(*DAG.getContext(),
- SrcVT.getScalarType(), NumElem*SizeRatio);
- SDValue ValRes = DAG.getNode(ISD::BUILD_VECTOR, dl,
- TempWideVector, &LoadVals[0], LoadVals.size());
-
- // Cast to the correct type
- ValRes = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), ValRes);
-
- Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Result.getValue(0)); // Relegalize new nodes.
- break;
-
- }
+ assert(!SrcVT.isVector() &&
+ "Vector Loads are handled in LegalizeVectorOps");
// FIXME: This does not work for vectors on most targets. Sign- and
// zero-extend operations are currently folded into extending loads,
@@ -1461,10 +1145,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
"EXTLOAD should always be supported!");
// Turn the unsupported load into an EXTLOAD followed by an explicit
// zero/sign extend inreg.
- Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
- Tmp1, Tmp2, LD->getPointerInfo(), SrcVT,
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
+ SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
+ Tmp1, Tmp2, LD->getPointerInfo(), SrcVT,
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->getAlignment());
SDValue ValRes;
if (ExtType == ISD::SEXTLOAD)
ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl,
@@ -1472,42 +1156,41 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Result, DAG.getValueType(SrcVT));
else
ValRes = DAG.getZeroExtendInReg(Result, dl, SrcVT.getScalarType());
- Tmp1 = LegalizeOp(ValRes); // Relegalize new nodes.
- Tmp2 = LegalizeOp(Result.getValue(1)); // Relegalize new nodes.
+ Tmp1 = ValRes;
+ Tmp2 = Result.getValue(1);
break;
}
}
// Since loads produce two values, make sure to remember that we legalized
// both of them.
- AddLegalizedOperand(SDValue(Node, 0), Tmp1);
- AddLegalizedOperand(SDValue(Node, 1), Tmp2);
- return Op.getResNo() ? Tmp2 : Tmp1;
+ if (Tmp2.getNode() != Node) {
+ assert(Tmp1.getNode() != Node && "Load must be completely replaced");
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp1);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Tmp2);
+ ReplacedNode(Node);
+ }
+ break;
}
case ISD::STORE: {
StoreSDNode *ST = cast<StoreSDNode>(Node);
- Tmp1 = LegalizeOp(ST->getChain()); // Legalize the chain.
- Tmp2 = LegalizeOp(ST->getBasePtr()); // Legalize the pointer.
+ Tmp1 = ST->getChain();
+ Tmp2 = ST->getBasePtr();
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
if (!ST->isTruncatingStore()) {
if (SDNode *OptStore = OptimizeFloatStore(ST).getNode()) {
- Result = SDValue(OptStore, 0);
+ ReplaceNode(ST, OptStore);
break;
}
{
- Tmp3 = LegalizeOp(ST->getValue());
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- Tmp1, Tmp3, Tmp2,
- ST->getOffset()),
- Result.getResNo());
-
+ Tmp3 = ST->getValue();
EVT VT = Tmp3.getValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
@@ -1515,27 +1198,31 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
- Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
- DAG, TLI);
+ ExpandUnalignedStore(cast<StoreSDNode>(Node),
+ DAG, TLI, this);
}
break;
case TargetLowering::Custom:
- Tmp1 = TLI.LowerOperation(Result, DAG);
- if (Tmp1.getNode()) Result = Tmp1;
+ Tmp1 = TLI.LowerOperation(SDValue(Node, 0), DAG);
+ if (Tmp1.getNode())
+ ReplaceNode(SDValue(Node, 0), Tmp1);
break;
- case TargetLowering::Promote:
+ case TargetLowering::Promote: {
assert(VT.isVector() && "Unknown legal promote case!");
Tmp3 = DAG.getNode(ISD::BITCAST, dl,
TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
- Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getPointerInfo(), isVolatile,
- isNonTemporal, Alignment);
+ SDValue Result =
+ DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
+ ST->getPointerInfo(), isVolatile,
+ isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
break;
}
+ }
break;
}
} else {
- Tmp3 = LegalizeOp(ST->getValue());
+ Tmp3 = ST->getValue();
EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
@@ -1547,8 +1234,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
StVT.getStoreSizeInBits());
Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT);
- Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- NVT, isVolatile, isNonTemporal, Alignment);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ NVT, isVolatile, isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
} else if (StWidth & (StWidth - 1)) {
// If not storing a power-of-2 number of bits, expand as two stores.
assert(!StVT.isVector() && "Unsupported truncstore!");
@@ -1602,17 +1291,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
}
// The order of the stores doesn't matter.
- Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
+ SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
+ ReplaceNode(SDValue(Node, 0), Result);
} else {
- if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
- Tmp2 != ST->getBasePtr())
- Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
- Tmp1, Tmp3, Tmp2,
- ST->getOffset()),
- Result.getResNo());
-
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
@@ -1620,120 +1303,24 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
- Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
- DAG, TLI);
+ ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
}
break;
case TargetLowering::Custom:
- Result = TLI.LowerOperation(Result, DAG);
+ ReplaceNode(SDValue(Node, 0),
+ TLI.LowerOperation(SDValue(Node, 0), DAG));
break;
case TargetLowering::Expand:
-
- EVT WideScalarVT = Tmp3.getValueType().getScalarType();
- EVT NarrowScalarVT = StVT.getScalarType();
-
- if (StVT.isVector()) {
- unsigned NumElem = StVT.getVectorNumElements();
- // The type of the data we want to save
- EVT RegVT = Tmp3.getValueType();
- EVT RegSclVT = RegVT.getScalarType();
- // The type of data as saved in memory.
- EVT MemSclVT = StVT.getScalarType();
-
- bool RegScalarLegal = TLI.isTypeLegal(RegSclVT);
- bool MemScalarLegal = TLI.isTypeLegal(MemSclVT);
-
- // We need to expand this store. If the register element type
- // is legal then we can scalarize the vector and use
- // truncating stores.
- if (RegScalarLegal) {
- // Cast floats into integers
- unsigned ScalarSize = MemSclVT.getSizeInBits();
- EVT EltVT = EVT::getIntegerVT(*DAG.getContext(), ScalarSize);
-
- // Round odd types to the next pow of two.
- if (!isPowerOf2_32(ScalarSize))
- ScalarSize = NextPowerOf2(ScalarSize);
-
- // Store Stride in bytes
- unsigned Stride = ScalarSize/8;
- // Extract each of the elements from the original vector
- // and save them into memory individually.
- SmallVector<SDValue, 8> Stores;
- for (unsigned Idx = 0; Idx < NumElem; Idx++) {
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- RegSclVT, Tmp3, DAG.getIntPtrConstant(Idx));
-
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(Stride));
-
- // This scalar TruncStore may be illegal, but we lehalize it
- // later.
- SDValue Store = DAG.getTruncStore(Tmp1, dl, Ex, Tmp2,
- ST->getPointerInfo().getWithOffset(Idx*Stride), MemSclVT,
- isVolatile, isNonTemporal, Alignment);
-
- Stores.push_back(Store);
- }
-
- Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Stores[0], Stores.size());
- break;
- }
-
- // The scalar register type is illegal.
- // For example saving <2 x i64> -> <2 x i32> on a x86.
- // In here we bitcast the value into a vector of smaller parts and
- // save it using smaller scalars.
- if (!RegScalarLegal && MemScalarLegal) {
- // Store Stride in bytes
- unsigned Stride = MemSclVT.getSizeInBits()/8;
-
- unsigned SizeRatio =
- (RegSclVT.getSizeInBits() / MemSclVT.getSizeInBits());
-
- EVT CastValueVT = EVT::getVectorVT(*DAG.getContext(),
- MemSclVT,
- SizeRatio * NumElem);
-
- // Cast the wide elem vector to wider vec with smaller elem type.
- // Example <2 x i64> -> <4 x i32>
- Tmp3 = DAG.getNode(ISD::BITCAST, dl, CastValueVT, Tmp3);
-
- SmallVector<SDValue, 8> Stores;
- for (unsigned Idx=0; Idx < NumElem * SizeRatio; Idx++) {
- // Extract the Ith element.
- SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- NarrowScalarVT, Tmp3, DAG.getIntPtrConstant(Idx));
- // Bump pointer.
- Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
- DAG.getIntPtrConstant(Stride));
-
- // Store if, this element is:
- // - First element on big endian, or
- // - Last element on little endian
- if (( TLI.isBigEndian() && (Idx % SizeRatio == 0)) ||
- ((!TLI.isBigEndian() && (Idx % SizeRatio == SizeRatio-1)))) {
- SDValue Store = DAG.getStore(Tmp1, dl, Ex, Tmp2,
- ST->getPointerInfo().getWithOffset(Idx*Stride),
- isVolatile, isNonTemporal, Alignment);
- Stores.push_back(Store);
- }
- }
- Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Stores[0], Stores.size());
- break;
- }
-
- assert(false && "Unable to legalize the vector trunc store!");
- }// is vector
-
+ assert(!StVT.isVector() &&
+ "Vector Stores are handled in LegalizeVectorOps");
// TRUNCSTORE:i16 i32 -> STORE i16
assert(TLI.isTypeLegal(StVT) && "Do not know how to expand this store!");
Tmp3 = DAG.getNode(ISD::TRUNCATE, dl, StVT, Tmp3);
- Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
- isVolatile, isNonTemporal, Alignment);
+ SDValue Result =
+ DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
+ ReplaceNode(SDValue(Node, 0), Result);
break;
}
}
@@ -1741,17 +1328,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
}
}
- assert(Result.getValueType() == Op.getValueType() &&
- "Bad legalization!");
-
- // Make sure that the generated code is itself legal.
- if (Result != Op)
- Result = LegalizeOp(Result);
-
- // Note that LegalizeOp may be reentered even from single-use nodes, which
- // means that we always must cache transformed nodes.
- AddLegalizedOperand(Op, Result);
- return Result;
}
SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
@@ -1778,7 +1354,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
if (Op.getValueType().isVector())
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr,MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
MachinePointerInfo(),
Vec.getValueType().getVectorElementType(),
@@ -1826,7 +1402,7 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
// Finally, load the updated vector.
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, PtrInfo,
- false, false, 0);
+ false, false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
@@ -1876,7 +1452,8 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
StoreChain = DAG.getEntryNode();
// Result is a load from the stack slot.
- return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo, false, false, 0);
+ return DAG.getLoad(VT, dl, StoreChain, FIPtr, PtrInfo,
+ false, false, false, 0);
}
SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
@@ -1905,7 +1482,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
assert(FloatVT.isByteSized() && "Unsupported floating point type!");
// Load out a legal integer with the same sign bit as the float.
SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
} else { // Little endian
SDValue LoadPtr = StackPtr;
// The float may be wider than the integer we are going to load. Advance
@@ -1916,7 +1493,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
LoadPtr, DAG.getIntPtrConstant(ByteOffset));
// Load a legal integer containing the sign bit.
SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Move the sign bit to the top bit of the loaded integer.
unsigned BitShift = LoadTy.getSizeInBits() -
(FloatVT.getSizeInBits() - 8 * ByteOffset);
@@ -1984,7 +1561,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
EVT OpVT = LHS.getValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
switch (TLI.getCondCodeAction(CCCode, OpVT)) {
- default: assert(0 && "Unknown condition code action!");
+ default: llvm_unreachable("Unknown condition code action!");
case TargetLowering::Legal:
// Nothing to do.
break;
@@ -1992,7 +1569,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
unsigned Opc = 0;
switch (CCCode) {
- default: assert(0 && "Don't know how to expand this condition!");
+ default: llvm_unreachable("Don't know how to expand this condition!");
case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break;
@@ -2058,7 +1635,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
// Result is a load from the stack slot.
if (SlotSize == DestSize)
return DAG.getLoad(DestVT, dl, Store, FIPtr, PtrInfo,
- false, false, DestAlign);
+ false, false, false, DestAlign);
assert(SlotSize < DestSize && "Unknown extension!");
return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr,
@@ -2081,7 +1658,7 @@ SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
false, false, 0);
return DAG.getLoad(Node->getValueType(0), dl, Ch, StackPtr,
MachinePointerInfo::getFixedStack(SPFI),
- false, false, 0);
+ false, false, false, 0);
}
@@ -2127,7 +1704,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
// If all elements are constants, create a load from the constant pool.
if (isConstant) {
- std::vector<Constant*> CV;
+ SmallVector<Constant*, 16> CV;
for (unsigned i = 0, e = NumElems; i != e; ++i) {
if (ConstantFPSDNode *V =
dyn_cast<ConstantFPSDNode>(Node->getOperand(i))) {
@@ -2155,7 +1732,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
return DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, Alignment);
+ false, false, false, Alignment);
}
if (!MoreThanTwoValues) {
@@ -2190,12 +1767,6 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
// and leave the Hi part unset.
SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
bool isSigned) {
- assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
- // The input chain to this libcall is the entry node of the function.
- // Legalizing the call will automatically add the previous call to the
- // dependence.
- SDValue InChain = DAG.getEntryNode();
-
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
@@ -2209,26 +1780,31 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- // Splice the libcall in wherever FindInputOutputChains tells us to.
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ // By default, the input chain to this libcall is the entry node of the
+ // function. If the libcall is going to be emitted as a tail call then
+ // TLI.isUsedByReturnOnly will change it to the right chain if the return
+ // node which is being folded has a non-entry input chain.
+ SDValue InChain = DAG.getEntryNode();
+
// isTailCall may be true since the callee does not reference caller stack
// frame. Check if it's in the right position.
- bool isTailCall = isInTailCallPosition(DAG, Node, TLI);
+ SDValue TCChain = InChain;
+ bool isTailCall = isInTailCallPosition(DAG, Node, TCChain, TLI);
+ if (isTailCall)
+ InChain = TCChain;
+
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), isTailCall,
- /*isReturnValueUsed=*/true,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
if (!CallInfo.second.getNode())
// It's a tailcall, return the chain (which is the DAG root).
return DAG.getRoot();
- // Legalize the call sequence, starting with the chain. This will advance
- // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
- // was added by LowerCallTo (guaranteeing proper serialization of calls).
- LegalizeOp(CallInfo.second);
return CallInfo.first;
}
@@ -2254,15 +1830,10 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, TLI.getLibcallCallingConv(LC), false,
- /*isReturnValueUsed=*/true,
+ false, 0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
- // Legalize the call sequence, starting with the chain. This will advance
- // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
- // was added by LowerCallTo (guaranteeing proper serialization of calls).
- LegalizeOp(CallInfo.second);
-
return CallInfo.first;
}
@@ -2272,7 +1843,6 @@ std::pair<SDValue, SDValue>
SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
SDNode *Node,
bool isSigned) {
- assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
SDValue InChain = Node->getOperand(0);
TargetLowering::ArgListTy Args;
@@ -2289,18 +1859,13 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- // Splice the libcall in wherever FindInputOutputChains tells us to.
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*isReturnValueUsed=*/true,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
- // Legalize the call sequence, starting with the chain. This will advance
- // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
- // was added by LowerCallTo (guaranteeing proper serialization of calls).
- LegalizeOp(CallInfo.second);
return CallInfo;
}
@@ -2311,7 +1876,7 @@ SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_PPCF128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: assert(0 && "Unexpected request for libcall!");
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::f32: LC = Call_F32; break;
case MVT::f64: LC = Call_F64; break;
case MVT::f80: LC = Call_F80; break;
@@ -2328,7 +1893,7 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
RTLIB::Libcall Call_I128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: assert(0 && "Unexpected request for libcall!");
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC = Call_I8; break;
case MVT::i16: LC = Call_I16; break;
case MVT::i32: LC = Call_I32; break;
@@ -2343,7 +1908,7 @@ static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
const TargetLowering &TLI) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: assert(0 && "Unexpected request for libcall!");
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
@@ -2388,7 +1953,7 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: assert(0 && "Unexpected request for libcall!");
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
@@ -2426,21 +1991,16 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- // Splice the libcall in wherever FindInputOutputChains tells us to.
DebugLoc dl = Node->getDebugLoc();
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*isReturnValueUsed=*/true, Callee, Args, DAG, dl);
-
- // Legalize the call sequence, starting with the chain. This will advance
- // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that
- // was added by LowerCallTo (guaranteeing proper serialization of calls).
- LegalizeOp(CallInfo.second);
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, dl);
// Remainder is loaded back from the stack frame.
- SDValue Rem = DAG.getLoad(RetVT, dl, LastCALLSEQ_END, FIPtr,
- MachinePointerInfo(), false, false, 0);
+ SDValue Rem = DAG.getLoad(RetVT, dl, CallInfo.second, FIPtr,
+ MachinePointerInfo(), false, false, false, 0);
Results.push_back(CallInfo.first);
Results.push_back(Rem);
}
@@ -2489,7 +2049,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
false, false, 0);
// load the constructed double
SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
// FP constant to bias correct the final result
SDValue Bias = DAG.getConstantFP(isSigned ?
BitsToDouble(0x4330000080000000ULL) :
@@ -2611,7 +2171,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
// offset depending on the data type.
uint64_t FF;
switch (Op0.getValueType().getSimpleVT().SimpleTy) {
- default: assert(0 && "Unsupported integer type!");
+ default: llvm_unreachable("Unsupported integer type!");
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
@@ -2629,13 +2189,15 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
if (DestVT == MVT::f32)
FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, Alignment);
+ false, false, false, Alignment);
else {
- FudgeInReg =
- LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
- DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(),
- MVT::f32, false, false, Alignment));
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
+ DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(),
+ MVT::f32, false, false, Alignment);
+ HandleSDNode Handle(Load);
+ LegalizeOp(Load.getNode());
+ FudgeInReg = Handle.getValue();
}
return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
@@ -2731,7 +2293,7 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
EVT SHVT = TLI.getShiftAmountTy(VT);
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT().SimpleTy) {
- default: assert(0 && "Unhandled Expand type in BSWAP!");
+ default: llvm_unreachable("Unhandled Expand type in BSWAP!");
case MVT::i16:
Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
@@ -2788,7 +2350,7 @@ static APInt SplatByte(unsigned NumBits, uint8_t ByteVal) {
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
DebugLoc dl) {
switch (Opc) {
- default: assert(0 && "Cannot expand this yet!");
+ default: llvm_unreachable("Cannot expand this yet!");
case ISD::CTPOP: {
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy(VT);
@@ -2831,6 +2393,9 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
return Op;
}
+ case ISD::CTLZ_ZERO_UNDEF:
+ // This trivially expands to CTLZ.
+ return DAG.getNode(ISD::CTLZ, dl, Op.getValueType(), Op);
case ISD::CTLZ: {
// for now, we do this:
// x = x | (x >> 1);
@@ -2852,6 +2417,9 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
Op = DAG.getNOT(dl, Op, VT);
return DAG.getNode(ISD::CTPOP, dl, VT, Op);
}
+ case ISD::CTTZ_ZERO_UNDEF:
+ // This trivially expands to CTTZ.
+ return DAG.getNode(ISD::CTTZ, dl, Op.getValueType(), Op);
case ISD::CTTZ: {
// for now, we use: { return popcount(~x & (x - 1)); }
// unless the target has ctlz but not ctpop, in which case we use:
@@ -2881,7 +2449,6 @@ std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
switch (Opc) {
default:
llvm_unreachable("Unhandled atomic intrinsic Expand!");
- break;
case ISD::ATOMIC_SWAP:
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type for atomic!");
@@ -2959,14 +2526,16 @@ std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
return ExpandChainLibCall(LC, Node, false);
}
-void SelectionDAGLegalize::ExpandNode(SDNode *Node,
- SmallVectorImpl<SDValue> &Results) {
+void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
+ SmallVector<SDValue, 8> Results;
DebugLoc dl = Node->getDebugLoc();
SDValue Tmp1, Tmp2, Tmp3, Tmp4;
switch (Node->getOpcode()) {
case ISD::CTPOP:
case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
Tmp1 = ExpandBitCount(Node->getOpcode(), Node->getOperand(0), dl);
Results.push_back(Tmp1);
break;
@@ -2986,7 +2555,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::PREFETCH:
case ISD::VAEND:
case ISD::EH_SJLJ_LONGJMP:
- case ISD::EH_SJLJ_DISPATCHSETUP:
// If the target didn't expand these, there's nothing to do, so just
// preserve the chain and be done.
Results.push_back(Node->getOperand(0));
@@ -3006,7 +2574,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
- /*isReturnValueUsed=*/true,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__sync_synchronize",
TLI.getPointerTy()),
Args, DAG, dl);
@@ -3083,7 +2651,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C,
/*isTailCall=*/false,
- /*isReturnValueUsed=*/true,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG, dl);
Results.push_back(CallResult.second);
@@ -3166,7 +2734,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
unsigned Align = Node->getConstantOperandVal(3);
SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2,
- MachinePointerInfo(V), false, false, 0);
+ MachinePointerInfo(V),
+ false, false, false, 0);
SDValue VAList = VAListLoad;
if (Align > TLI.getMinStackArgumentAlignment()) {
@@ -3191,7 +2760,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
MachinePointerInfo(V), false, false, 0);
// Load the actual argument out of the pointer VAList
Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
- false, false, 0));
+ false, false, false, 0));
Results.push_back(Results[0].getValue(1));
break;
}
@@ -3202,7 +2771,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
Tmp1 = DAG.getLoad(TLI.getPointerTy(), dl, Node->getOperand(0),
Node->getOperand(2), MachinePointerInfo(VS),
- false, false, 0);
+ false, false, false, 0);
Tmp1 = DAG.getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
MachinePointerInfo(VD), false, false, 0);
Results.push_back(Tmp1);
@@ -3236,15 +2805,57 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Node->getOperand(2), dl));
break;
case ISD::VECTOR_SHUFFLE: {
- SmallVector<int, 8> Mask;
- cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
+ SmallVector<int, 32> NewMask;
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
EVT VT = Node->getValueType(0);
EVT EltVT = VT.getVectorElementType();
- if (!TLI.isTypeLegal(EltVT))
- EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ if (!TLI.isTypeLegal(EltVT)) {
+
+ EVT NewEltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
+
+ // BUILD_VECTOR operands are allowed to be wider than the element type.
+ // But if NewEltVT is smaller that EltVT the BUILD_VECTOR does not accept it
+ if (NewEltVT.bitsLT(EltVT)) {
+
+ // Convert shuffle node.
+ // If original node was v4i64 and the new EltVT is i32,
+ // cast operands to v8i32 and re-build the mask.
+
+ // Calculate new VT, the size of the new VT should be equal to original.
+ EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT,
+ VT.getSizeInBits()/NewEltVT.getSizeInBits());
+ assert(NewVT.bitsEq(VT));
+
+ // cast operands to new VT
+ Op0 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op0);
+ Op1 = DAG.getNode(ISD::BITCAST, dl, NewVT, Op1);
+
+ // Convert the shuffle mask
+ unsigned int factor = NewVT.getVectorNumElements()/VT.getVectorNumElements();
+
+ // EltVT gets smaller
+ assert(factor > 0);
+
+ for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
+ if (Mask[i] < 0) {
+ for (unsigned fi = 0; fi < factor; ++fi)
+ NewMask.push_back(Mask[i]);
+ }
+ else {
+ for (unsigned fi = 0; fi < factor; ++fi)
+ NewMask.push_back(Mask[i]*factor+fi);
+ }
+ }
+ Mask = NewMask;
+ VT = NewVT;
+ }
+ EltVT = NewEltVT;
+ }
unsigned NumElems = VT.getVectorNumElements();
- SmallVector<SDValue, 8> Ops;
+ SmallVector<SDValue, 16> Ops;
for (unsigned i = 0; i != NumElems; ++i) {
if (Mask[i] < 0) {
Ops.push_back(DAG.getUNDEF(EltVT));
@@ -3253,14 +2864,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
unsigned Idx = Mask[i];
if (Idx < NumElems)
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Node->getOperand(0),
+ Op0,
DAG.getIntPtrConstant(Idx)));
else
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- Node->getOperand(1),
+ Op1,
DAG.getIntPtrConstant(Idx - NumElems)));
}
+
Tmp1 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
+ // We may have changed the BUILD_VECTOR type. Cast it back to the Node type.
+ Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0), Tmp1);
Results.push_back(Tmp1);
break;
}
@@ -3408,10 +3022,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
// Check to see if this FP immediate is already legal.
// If this is a legal constant, turn it into a TargetConstantFP node.
- if (TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0)))
- Results.push_back(SDValue(Node, 0));
- else
- Results.push_back(ExpandConstantFP(CFP, true, DAG, TLI));
+ if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0)))
+ Results.push_back(ExpandConstantFP(CFP, true));
break;
}
case ISD::EHSELECTION: {
@@ -3423,13 +3035,23 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::EXCEPTIONADDR: {
- unsigned Reg = TLI.getExceptionAddressRegister();
+ unsigned Reg = TLI.getExceptionPointerRegister();
assert(Reg && "Can't expand to unknown register!");
Results.push_back(DAG.getCopyFromReg(Node->getOperand(0), dl, Reg,
Node->getValueType(0)));
Results.push_back(Results[0].getValue(1));
break;
}
+ case ISD::FSUB: {
+ EVT VT = Node->getValueType(0);
+ assert(TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
+ TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
+ "Don't know how to expand this FP subtraction!");
+ Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
+ Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1);
+ Results.push_back(Tmp1);
+ break;
+ }
case ISD::SUB: {
EVT VT = Node->getValueType(0);
assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
@@ -3657,6 +3279,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
DAG.getIntPtrConstant(0));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Ret,
DAG.getIntPtrConstant(1));
+ // Ret is a node with an illegal type. Because such things are not
+ // generally permitted during this phase of legalization, delete the
+ // node. The above EXTRACT_ELEMENT nodes should have been folded.
+ DAG.DeleteNode(Ret.getNode());
}
if (isSigned) {
@@ -3797,7 +3423,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
LegalizeSetCCCondCode(TLI.getSetCCResultType(Tmp2.getValueType()),
Tmp2, Tmp3, Tmp4, dl);
- LastCALLSEQ_END = DAG.getEntryNode();
assert(!Tmp3.getNode() && "Can't legalize BR_CC with legal condition!");
Tmp3 = DAG.getConstant(0, Tmp2.getValueType());
@@ -3807,6 +3432,35 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Results.push_back(Tmp1);
break;
}
+ case ISD::BUILD_VECTOR:
+ Results.push_back(ExpandBUILD_VECTOR(Node));
+ break;
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::SHL: {
+ // Scalarize vector SRA/SRL/SHL.
+ EVT VT = Node->getValueType(0);
+ assert(VT.isVector() && "Unable to legalize non-vector shift");
+ assert(TLI.isTypeLegal(VT.getScalarType())&& "Element type must be legal");
+ unsigned NumElem = VT.getVectorNumElements();
+
+ SmallVector<SDValue, 8> Scalars;
+ for (unsigned Idx = 0; Idx < NumElem; Idx++) {
+ SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ VT.getScalarType(),
+ Node->getOperand(0), DAG.getIntPtrConstant(Idx));
+ SDValue Sh = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ VT.getScalarType(),
+ Node->getOperand(1), DAG.getIntPtrConstant(Idx));
+ Scalars.push_back(DAG.getNode(Node->getOpcode(), dl,
+ VT.getScalarType(), Ex, Sh));
+ }
+ SDValue Result =
+ DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
+ &Scalars[0], Scalars.size());
+ ReplaceNode(SDValue(Node, 0), Result);
+ break;
+ }
case ISD::GLOBAL_OFFSET_TABLE:
case ISD::GlobalAddress:
case ISD::GlobalTLSAddress:
@@ -3817,13 +3471,16 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_VOID:
// FIXME: Custom lowering for these operations shouldn't return null!
- for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
- Results.push_back(SDValue(Node, i));
break;
}
+
+ // Replace the original node with the legalized result.
+ if (!Results.empty())
+ ReplaceNode(Node, Results.data());
}
-void SelectionDAGLegalize::PromoteNode(SDNode *Node,
- SmallVectorImpl<SDValue> &Results) {
+
+void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
+ SmallVector<SDValue, 8> Results;
EVT OVT = Node->getValueType(0);
if (Node->getOpcode() == ISD::UINT_TO_FP ||
Node->getOpcode() == ISD::SINT_TO_FP ||
@@ -3835,20 +3492,24 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
SDValue Tmp1, Tmp2, Tmp3;
switch (Node->getOpcode()) {
case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTPOP:
// Zero extend the argument.
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
- // Perform the larger operation.
+ // Perform the larger operation. For CTPOP and CTTZ_ZERO_UNDEF, this is
+ // already the correct result.
Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
if (Node->getOpcode() == ISD::CTTZ) {
- //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
+ // FIXME: This should set a bit in the zero extended value instead.
Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT),
Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT),
ISD::SETEQ);
Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2,
DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
- } else if (Node->getOpcode() == ISD::CTLZ) {
+ } else if (Node->getOpcode() == ISD::CTLZ ||
+ Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
// Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
Tmp1 = DAG.getNode(ISD::SUB, dl, NVT, Tmp1,
DAG.getConstant(NVT.getSizeInBits() -
@@ -3877,6 +3538,33 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
Node->getOpcode() == ISD::SINT_TO_FP, dl);
Results.push_back(Tmp1);
break;
+ case ISD::VAARG: {
+ SDValue Chain = Node->getOperand(0); // Get the chain.
+ SDValue Ptr = Node->getOperand(1); // Get the pointer.
+
+ unsigned TruncOp;
+ if (OVT.isVector()) {
+ TruncOp = ISD::BITCAST;
+ } else {
+ assert(OVT.isInteger()
+ && "VAARG promotion is supported only for vectors or integer types");
+ TruncOp = ISD::TRUNCATE;
+ }
+
+ // Perform the larger operation, then convert back
+ Tmp1 = DAG.getVAArg(NVT, dl, Chain, Ptr, Node->getOperand(2),
+ Node->getConstantOperandVal(3));
+ Chain = Tmp1.getValue(1);
+
+ Tmp2 = DAG.getNode(TruncOp, dl, OVT, Tmp1);
+
+ // Modified the chain result - switch anything that used the old chain to
+ // use the new one.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 0), Tmp2);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), Chain);
+ ReplacedNode(Node);
+ break;
+ }
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
@@ -3924,8 +3612,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
break;
}
case ISD::VECTOR_SHUFFLE: {
- SmallVector<int, 8> Mask;
- cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Node)->getMask();
// Cast the two input vectors.
Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
@@ -3950,7 +3637,31 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
Tmp1, Tmp2, Node->getOperand(2)));
break;
}
+ case ISD::FDIV:
+ case ISD::FREM:
+ case ISD::FPOW: {
+ Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(1));
+ Tmp3 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
+ Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
+ Tmp3, DAG.getIntPtrConstant(0)));
+ break;
}
+ case ISD::FLOG2:
+ case ISD::FEXP2:
+ case ISD::FLOG:
+ case ISD::FEXP: {
+ Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
+ Results.push_back(DAG.getNode(ISD::FP_ROUND, dl, OVT,
+ Tmp2, DAG.getIntPtrConstant(0)));
+ break;
+ }
+ }
+
+ // Replace the original node with the legalized result.
+ if (!Results.empty())
+ ReplaceNode(Node, Results.data());
}
// SelectionDAG::Legalize - This is the entry point for the file.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 7c1cc69..e393896 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -479,8 +479,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
- L->getPointerInfo(), NVT,
- L->isVolatile(), L->isNonTemporal(), L->getAlignment());
+ L->getPointerInfo(), NVT, L->isVolatile(),
+ L->isNonTemporal(), false, L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewL.getValue(1));
@@ -492,7 +492,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
L->getMemoryVT(), dl, L->getChain(),
L->getBasePtr(), L->getOffset(), L->getPointerInfo(),
L->getMemoryVT(), L->isVolatile(),
- L->isNonTemporal(), L->getAlignment());
+ L->isNonTemporal(), false, L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewL.getValue(1));
@@ -672,7 +672,7 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
case ISD::SETUEQ:
LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
break;
- default: assert(false && "Do not know how to soften this setcc!");
+ default: llvm_unreachable("Do not know how to soften this setcc!");
}
}
@@ -1212,7 +1212,7 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
switch (SrcVT.getSimpleVT().SimpleTy) {
default:
- assert(false && "Unsupported UINT_TO_FP!");
+ llvm_unreachable("Unsupported UINT_TO_FP!");
case MVT::i32:
Parts = TwoE32;
break;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index a5c4c2d..95ddb1e 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -20,7 +20,6 @@
#include "LegalizeTypes.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -57,8 +56,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
case ISD::CONVERT_RNDSAT:
Res = PromoteIntRes_CONVERT_RNDSAT(N); break;
+ case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: Res = PromoteIntRes_CTLZ(N); break;
case ISD::CTPOP: Res = PromoteIntRes_CTPOP(N); break;
+ case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: Res = PromoteIntRes_CTTZ(N); break;
case ISD::EXTRACT_VECTOR_ELT:
Res = PromoteIntRes_EXTRACT_VECTOR_ELT(N); break;
@@ -211,13 +212,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
switch (getTypeAction(InVT)) {
- default:
- assert(false && "Unknown type action!");
- break;
case TargetLowering::TypeLegal:
break;
case TargetLowering::TypePromoteInteger:
- if (NOutVT.bitsEq(NInVT))
+ if (NOutVT.bitsEq(NInVT) && !NOutVT.isVector() && !NInVT.isVector())
// The input promotes to the same size. Convert the promoted value.
return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetPromotedInteger(InOp));
break;
@@ -251,9 +249,11 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp);
}
case TargetLowering::TypeWidenVector:
- if (OutVT.bitsEq(NInVT))
- // The input is widened to the same size. Convert to the widened value.
- return DAG.getNode(ISD::BITCAST, dl, OutVT, GetWidenedVector(InOp));
+ // The input is widened to the same size. Convert to the widened value.
+ // Make sure that the outgoing value is not a vector, because this would
+ // make us bitcast between two vectors which are legalized in different ways.
+ if (NOutVT.bitsEq(NInVT) && !NOutVT.isVector())
+ return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetWidenedVector(InOp));
}
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
@@ -312,7 +312,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
EVT OVT = N->getValueType(0);
EVT NVT = Op.getValueType();
- Op = DAG.getNode(ISD::CTLZ, dl, NVT, Op);
+ Op = DAG.getNode(N->getOpcode(), dl, NVT, Op);
// Subtract off the extra leading bits in the bigger type.
return DAG.getNode(ISD::SUB, dl, NVT, Op,
DAG.getConstant(NVT.getSizeInBits() -
@@ -330,13 +330,15 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
EVT OVT = N->getValueType(0);
EVT NVT = Op.getValueType();
DebugLoc dl = N->getDebugLoc();
- // The count is the same in the promoted type except if the original
- // value was zero. This can be handled by setting the bit just off
- // the top of the original type.
- APInt TopBit(NVT.getSizeInBits(), 0);
- TopBit.setBit(OVT.getSizeInBits());
- Op = DAG.getNode(ISD::OR, dl, NVT, Op, DAG.getConstant(TopBit, NVT));
- return DAG.getNode(ISD::CTTZ, dl, NVT, Op);
+ if (N->getOpcode() == ISD::CTTZ) {
+ // The count is the same in the promoted type except if the original
+ // value was zero. This can be handled by setting the bit just off
+ // the top of the original type.
+ APInt TopBit(NVT.getSizeInBits(), 0);
+ TopBit.setBit(OVT.getSizeInBits());
+ Op = DAG.getNode(ISD::OR, dl, NVT, Op, DAG.getConstant(TopBit, NVT));
+ }
+ return DAG.getNode(N->getOpcode(), dl, NVT, Op);
}
SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) {
@@ -486,7 +488,11 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_VSELECT(SDNode *N) {
- SDValue Mask = GetPromotedInteger(N->getOperand(0));
+ SDValue Mask = N->getOperand(0);
+ EVT OpTy = N->getOperand(1).getValueType();
+
+ // Promote all the way up to the canonical SetCC type.
+ Mask = PromoteTargetBoolean(Mask, TLI.getSetCCResultType(OpTy));
SDValue LHS = GetPromotedInteger(N->getOperand(1));
SDValue RHS = GetPromotedInteger(N->getOperand(2));
return DAG.getNode(ISD::VSELECT, N->getDebugLoc(),
@@ -1098,8 +1104,10 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::AssertZext: ExpandIntRes_AssertZext(N, Lo, Hi); break;
case ISD::BSWAP: ExpandIntRes_BSWAP(N, Lo, Hi); break;
case ISD::Constant: ExpandIntRes_Constant(N, Lo, Hi); break;
+ case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: ExpandIntRes_CTLZ(N, Lo, Hi); break;
case ISD::CTPOP: ExpandIntRes_CTPOP(N, Lo, Hi); break;
+ case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTTZ: ExpandIntRes_CTTZ(N, Lo, Hi); break;
case ISD::FP_TO_SINT: ExpandIntRes_FP_TO_SINT(N, Lo, Hi); break;
case ISD::FP_TO_UINT: ExpandIntRes_FP_TO_UINT(N, Lo, Hi); break;
@@ -1171,7 +1179,6 @@ std::pair <SDValue, SDValue> DAGTypeLegalizer::ExpandAtomic(SDNode *Node) {
switch (Opc) {
default:
llvm_unreachable("Unhandled atomic intrinsic Expand!");
- break;
case ISD::ATOMIC_SWAP:
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type for atomic!");
@@ -1355,7 +1362,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(N->getOperand(1), HighBitMask, KnownZero, KnownOne);
+ DAG.ComputeMaskedBits(N->getOperand(1), KnownZero, KnownOne);
// If we don't know anything about the high bits, exit.
if (((KnownZero|KnownOne) & HighBitMask) == 0)
@@ -1390,15 +1397,15 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
}
}
-#if 0
- // FIXME: This code is broken for shifts with a zero amount!
// If we know that all of the high bits of the shift amount are zero, then we
// can do this as a couple of simple shifts.
if ((KnownZero & HighBitMask) == HighBitMask) {
- // Compute 32-amt.
- SDValue Amt2 = DAG.getNode(ISD::SUB, ShTy,
- DAG.getConstant(NVTBits, ShTy),
- Amt);
+ // Calculate 31-x. 31 is used instead of 32 to avoid creating an undefined
+ // shift if x is zero. We can use XOR here because x is known to be smaller
+ // than 32.
+ SDValue Amt2 = DAG.getNode(ISD::XOR, dl, ShTy, Amt,
+ DAG.getConstant(NVTBits-1, ShTy));
+
unsigned Op1, Op2;
switch (N->getOpcode()) {
default: llvm_unreachable("Unknown shift");
@@ -1407,13 +1414,23 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
case ISD::SRA: Op1 = ISD::SRL; Op2 = ISD::SHL; break;
}
- Lo = DAG.getNode(N->getOpcode(), NVT, InL, Amt);
- Hi = DAG.getNode(ISD::OR, NVT,
- DAG.getNode(Op1, NVT, InH, Amt),
- DAG.getNode(Op2, NVT, InL, Amt2));
+ // When shifting right the arithmetic for Lo and Hi is swapped.
+ if (N->getOpcode() != ISD::SHL)
+ std::swap(InL, InH);
+
+ // Use a little trick to get the bits that move from Lo to Hi. First
+ // shift by one bit.
+ SDValue Sh1 = DAG.getNode(Op2, dl, NVT, InL, DAG.getConstant(1, ShTy));
+ // Then compute the remaining shift with amount-1.
+ SDValue Sh2 = DAG.getNode(Op2, dl, NVT, Sh1, Amt2);
+
+ Lo = DAG.getNode(N->getOpcode(), dl, NVT, InL, Amt);
+ Hi = DAG.getNode(ISD::OR, dl, NVT, DAG.getNode(Op1, dl, NVT, InH, Amt),Sh2);
+
+ if (N->getOpcode() != ISD::SHL)
+ std::swap(Hi, Lo);
return true;
}
-#endif
return false;
}
@@ -1493,8 +1510,6 @@ ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
Hi = DAG.getNode(ISD::SELECT, dl, NVT, isShort, HiS, HiL);
return true;
}
-
- return false;
}
void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
@@ -1702,8 +1717,8 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
SDValue HiNotZero = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Hi,
DAG.getConstant(0, NVT), ISD::SETNE);
- SDValue LoLZ = DAG.getNode(ISD::CTLZ, dl, NVT, Lo);
- SDValue HiLZ = DAG.getNode(ISD::CTLZ, dl, NVT, Hi);
+ SDValue LoLZ = DAG.getNode(N->getOpcode(), dl, NVT, Lo);
+ SDValue HiLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, NVT, Hi);
Lo = DAG.getNode(ISD::SELECT, dl, NVT, HiNotZero, HiLZ,
DAG.getNode(ISD::ADD, dl, NVT, LoLZ,
@@ -1732,8 +1747,8 @@ void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
SDValue LoNotZero = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo,
DAG.getConstant(0, NVT), ISD::SETNE);
- SDValue LoLZ = DAG.getNode(ISD::CTTZ, dl, NVT, Lo);
- SDValue HiLZ = DAG.getNode(ISD::CTTZ, dl, NVT, Hi);
+ SDValue LoLZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, NVT, Lo);
+ SDValue HiLZ = DAG.getNode(N->getOpcode(), dl, NVT, Hi);
Lo = DAG.getNode(ISD::SELECT, dl, NVT, LoNotZero, LoLZ,
DAG.getNode(ISD::ADD, dl, NVT, HiLZ,
@@ -1778,6 +1793,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned Alignment = N->getAlignment();
bool isVolatile = N->isVolatile();
bool isNonTemporal = N->isNonTemporal();
+ bool isInvariant = N->isInvariant();
DebugLoc dl = N->getDebugLoc();
assert(NVT.isByteSized() && "Expanded type not byte sized!");
@@ -1808,7 +1824,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
} else if (TLI.isLittleEndian()) {
// Little-endian - low bits are at low addresses.
Lo = DAG.getLoad(NVT, dl, Ch, Ptr, N->getPointerInfo(),
- isVolatile, isNonTemporal, Alignment);
+ isVolatile, isNonTemporal, isInvariant, Alignment);
unsigned ExcessBits =
N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
@@ -2305,12 +2321,14 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue Func = DAG.getExternalSymbol(TLI.getLibcallName(LC), PtrVT);
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(Chain, RetTy, true, false, false, false,
- 0, TLI.getLibcallCallingConv(LC), false,
- true, Func, Args, DAG, dl);
+ 0, TLI.getLibcallCallingConv(LC),
+ /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
+ Func, Args, DAG, dl);
SplitInteger(CallInfo.first, Lo, Hi);
SDValue Temp2 = DAG.getLoad(PtrVT, dl, CallInfo.second, Temp,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Temp2,
DAG.getConstant(0, PtrVT),
ISD::SETNE);
@@ -2781,7 +2799,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
else if (SrcVT == MVT::i128)
FF = APInt(32, F32TwoE128);
else
- assert(false && "Unsupported UINT_TO_FP!");
+ llvm_unreachable("Unsupported UINT_TO_FP!");
// Check whether the sign bit is set.
SDValue Lo, Hi;
@@ -2926,38 +2944,28 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SCALAR_TO_VECTOR(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
- SDValue Op0 = N->getOperand(1);
- SDValue Op1 = N->getOperand(1);
- assert(Op0.getValueType() == Op1.getValueType() &&
- "Invalid input vector types");
-
EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
assert(NOutVT.isVector() && "This type must be promoted to a vector type");
+ EVT InElemTy = OutVT.getVectorElementType();
EVT OutElemTy = NOutVT.getVectorElementType();
- unsigned NumElem0 = Op0.getValueType().getVectorNumElements();
- unsigned NumElem1 = Op1.getValueType().getVectorNumElements();
+ unsigned NumElem = N->getOperand(0).getValueType().getVectorNumElements();
unsigned NumOutElem = NOutVT.getVectorNumElements();
- assert(NumElem0 + NumElem1 == NumOutElem &&
- "Invalid number of incoming elements");
+ unsigned NumOperands = N->getNumOperands();
+ assert(NumElem * NumOperands == NumOutElem &&
+ "Unexpected number of elements");
// Take the elements from the first vector.
SmallVector<SDValue, 8> Ops(NumOutElem);
- for (unsigned i = 0; i < NumElem0; ++i) {
- SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- Op0.getValueType().getScalarType(), Op0,
- DAG.getIntPtrConstant(i));
- Ops[i] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
- }
-
- // Take the elements from the second vector
- for (unsigned i = 0; i < NumElem1; ++i) {
- SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- Op1.getValueType().getScalarType(), Op1,
- DAG.getIntPtrConstant(i));
- Ops[i + NumElem0] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ SDValue Op = N->getOperand(i);
+ for (unsigned j = 0; j < NumElem; ++j) {
+ SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ InElemTy, Op, DAG.getIntPtrConstant(j));
+ Ops[i * NumElem + j] = DAG.getNode(ISD::ANY_EXTEND, dl, OutElemTy, Ext);
+ }
}
return DAG.getNode(ISD::BUILD_VECTOR, dl, NOutVT, &Ops[0], Ops.size());
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index a4bb577..439aa4d 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -222,8 +222,6 @@ bool DAGTypeLegalizer::run() {
for (unsigned i = 0, NumResults = N->getNumValues(); i < NumResults; ++i) {
EVT ResultVT = N->getValueType(i);
switch (getTypeAction(ResultVT)) {
- default:
- assert(false && "Unknown action!");
case TargetLowering::TypeLegal:
break;
// The following calls must take care of *all* of the node's results,
@@ -275,8 +273,6 @@ ScanOperands:
EVT OpVT = N->getOperand(i).getValueType();
switch (getTypeAction(OpVT)) {
- default:
- assert(false && "Unknown action!");
case TargetLowering::TypeLegal:
continue;
// The following calls must either replace all of the node's results
@@ -752,7 +748,11 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
}
void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
- assert(Result.getValueType() == Op.getValueType().getVectorElementType() &&
+ // Note that in some cases vector operation operands may be greater than
+ // the vector element type. For example BUILD_VECTOR of type <1 x i1> with
+ // a constant i8 operand.
+ assert(Result.getValueType().getSizeInBits() >=
+ Op.getValueType().getVectorElementType().getSizeInBits() &&
"Invalid type for scalarized vector");
AnalyzeNewValue(Result);
@@ -889,7 +889,7 @@ SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
MachinePointerInfo(), false, false, 0);
// Result is a load from the stack slot.
return DAG.getLoad(DestVT, dl, Store, StackPtr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
/// CustomLowerNode - Replace the node's results with custom code provided
@@ -1056,8 +1056,9 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, TLI.getLibcallCallingConv(LC), false,
- /*isReturnValueUsed=*/true,
+ false, 0, TLI.getLibcallCallingConv(LC),
+ /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, dl);
return CallInfo.first;
}
@@ -1084,12 +1085,11 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- // Splice the libcall in wherever FindInputOutputChains tells us to.
Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
- /*isReturnValueUsed=*/true,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Node->getDebugLoc());
return CallInfo;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index abacdac..e866445 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -521,6 +521,7 @@ private:
SDValue ScalarizeVecRes_LOAD(LoadSDNode *N);
SDValue ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N);
SDValue ScalarizeVecRes_SIGN_EXTEND_INREG(SDNode *N);
+ SDValue ScalarizeVecRes_VSELECT(SDNode *N);
SDValue ScalarizeVecRes_SELECT(SDNode *N);
SDValue ScalarizeVecRes_SELECT_CC(SDNode *N);
SDValue ScalarizeVecRes_SETCC(SDNode *N);
@@ -633,6 +634,7 @@ private:
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue WidenVecOp_STORE(SDNode* N);
+ SDValue WidenVecOp_SETCC(SDNode* N);
SDValue WidenVecOp_Convert(SDNode *N);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 8e7e498..a8ff7c6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -21,7 +21,6 @@
#include "LegalizeTypes.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -46,8 +45,6 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Handle some special cases efficiently.
switch (getTypeAction(InVT)) {
- default:
- assert(false && "Unknown type action!");
case TargetLowering::TypeLegal:
case TargetLowering::TypePromoteInteger:
break;
@@ -130,7 +127,8 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
false, false, 0);
// Load the first half from the stack slot.
- Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo, false, false, 0);
+ Lo = DAG.getLoad(NOutVT, dl, Store, StackPtr, PtrInfo,
+ false, false, false, 0);
// Increment the pointer to the other half.
unsigned IncrementSize = NOutVT.getSizeInBits() / 8;
@@ -140,7 +138,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Load the second half from the stack slot.
Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr,
PtrInfo.getWithOffset(IncrementSize), false,
- false, MinAlign(Alignment, IncrementSize));
+ false, false, MinAlign(Alignment, IncrementSize));
// Handle endianness of the load.
if (TLI.isBigEndian())
@@ -212,11 +210,12 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
unsigned Alignment = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
+ bool isInvariant = LD->isInvariant();
assert(NVT.isByteSized() && "Expanded type not byte sized!");
Lo = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getPointerInfo(),
- isVolatile, isNonTemporal, Alignment);
+ isVolatile, isNonTemporal, isInvariant, Alignment);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits() / 8;
@@ -224,7 +223,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
DAG.getIntPtrConstant(IncrementSize));
Hi = DAG.getLoad(NVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
- isVolatile, isNonTemporal,
+ isVolatile, isNonTemporal, isInvariant,
MinAlign(Alignment, IncrementSize));
// Build a factor node to remember that this load is independent of the
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index f815b00..3ae8345 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -64,6 +64,8 @@ class VectorLegalizer {
// Implement vselect in terms of XOR, AND, OR when blend is not supported
// by the target.
SDValue ExpandVSELECT(SDValue Op);
+ SDValue ExpandLoad(SDValue Op);
+ SDValue ExpandStore(SDValue Op);
SDValue ExpandFNEG(SDValue Op);
// Implements vector promotion; this is essentially just bitcasting the
// operands to a different type and bitcasting the result back to the
@@ -124,6 +126,33 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
SDValue Result =
SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops.data(), Ops.size()), 0);
+ if (Op.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) {
+ if (TLI.isLoadExtLegal(LD->getExtensionType(), LD->getMemoryVT()))
+ return TranslateLegalizeResults(Op, Result);
+ Changed = true;
+ return LegalizeOp(ExpandLoad(Op));
+ }
+ } else if (Op.getOpcode() == ISD::STORE) {
+ StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
+ EVT StVT = ST->getMemoryVT();
+ EVT ValVT = ST->getValue().getValueType();
+ if (StVT.isVector() && ST->isTruncatingStore())
+ switch (TLI.getTruncStoreAction(ValVT, StVT)) {
+ default: llvm_unreachable("This action is not supported yet!");
+ case TargetLowering::Legal:
+ return TranslateLegalizeResults(Op, Result);
+ case TargetLowering::Custom:
+ Changed = true;
+ return LegalizeOp(TLI.LowerOperation(Result, DAG));
+ case TargetLowering::Expand:
+ Changed = true;
+ return LegalizeOp(ExpandStore(Op));
+ }
+ }
+
bool HasVectorValue = false;
for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end();
J != E;
@@ -156,8 +185,10 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR:
- case ISD::CTTZ:
case ISD::CTLZ:
+ case ISD::CTTZ:
+ case ISD::CTLZ_ZERO_UNDEF:
+ case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTPOP:
case ISD::SELECT:
case ISD::VSELECT:
@@ -262,6 +293,97 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
+
+SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
+ DebugLoc dl = Op.getDebugLoc();
+ LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
+ SDValue Chain = LD->getChain();
+ SDValue BasePTR = LD->getBasePtr();
+ EVT SrcVT = LD->getMemoryVT();
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+
+ SmallVector<SDValue, 8> LoadVals;
+ SmallVector<SDValue, 8> LoadChains;
+ unsigned NumElem = SrcVT.getVectorNumElements();
+ unsigned Stride = SrcVT.getScalarType().getSizeInBits()/8;
+
+ for (unsigned Idx=0; Idx<NumElem; Idx++) {
+ SDValue ScalarLoad = DAG.getExtLoad(ExtType, dl,
+ Op.getNode()->getValueType(0).getScalarType(),
+ Chain, BasePTR, LD->getPointerInfo().getWithOffset(Idx * Stride),
+ SrcVT.getScalarType(),
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->getAlignment());
+
+ BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR,
+ DAG.getIntPtrConstant(Stride));
+
+ LoadVals.push_back(ScalarLoad.getValue(0));
+ LoadChains.push_back(ScalarLoad.getValue(1));
+ }
+
+ SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &LoadChains[0], LoadChains.size());
+ SDValue Value = DAG.getNode(ISD::BUILD_VECTOR, dl,
+ Op.getNode()->getValueType(0), &LoadVals[0], LoadVals.size());
+
+ AddLegalizedOperand(Op.getValue(0), Value);
+ AddLegalizedOperand(Op.getValue(1), NewChain);
+
+ return (Op.getResNo() ? NewChain : Value);
+}
+
+SDValue VectorLegalizer::ExpandStore(SDValue Op) {
+ DebugLoc dl = Op.getDebugLoc();
+ StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
+ SDValue Chain = ST->getChain();
+ SDValue BasePTR = ST->getBasePtr();
+ SDValue Value = ST->getValue();
+ EVT StVT = ST->getMemoryVT();
+
+ unsigned Alignment = ST->getAlignment();
+ bool isVolatile = ST->isVolatile();
+ bool isNonTemporal = ST->isNonTemporal();
+
+ unsigned NumElem = StVT.getVectorNumElements();
+ // The type of the data we want to save
+ EVT RegVT = Value.getValueType();
+ EVT RegSclVT = RegVT.getScalarType();
+ // The type of data as saved in memory.
+ EVT MemSclVT = StVT.getScalarType();
+
+ // Cast floats into integers
+ unsigned ScalarSize = MemSclVT.getSizeInBits();
+
+ // Round odd types to the next pow of two.
+ if (!isPowerOf2_32(ScalarSize))
+ ScalarSize = NextPowerOf2(ScalarSize);
+
+ // Store Stride in bytes
+ unsigned Stride = ScalarSize/8;
+ // Extract each of the elements from the original vector
+ // and save them into memory individually.
+ SmallVector<SDValue, 8> Stores;
+ for (unsigned Idx = 0; Idx < NumElem; Idx++) {
+ SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ RegSclVT, Value, DAG.getIntPtrConstant(Idx));
+
+ // This scalar TruncStore may be illegal, but we legalize it later.
+ SDValue Store = DAG.getTruncStore(Chain, dl, Ex, BasePTR,
+ ST->getPointerInfo().getWithOffset(Idx*Stride), MemSclVT,
+ isVolatile, isNonTemporal, Alignment);
+
+ BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR,
+ DAG.getIntPtrConstant(Stride));
+
+ Stores.push_back(Store);
+ }
+ SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &Stores[0], Stores.size());
+ AddLegalizedOperand(Op, TF);
+ return TF;
+}
+
SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// Implement VSELECT in terms of XOR, AND, OR
// on platforms which do not support blend natively.
@@ -274,10 +396,12 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
// If we can't even use the basic vector operations of
// AND,OR,XOR, we will have to scalarize the op.
- if (!TLI.isOperationLegalOrCustom(ISD::AND, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::XOR, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::OR, VT))
- return DAG.UnrollVectorOp(Op.getNode());
+ // Notice that the operation may be 'promoted' which means that it is
+ // 'bitcasted' to another type which is handled.
+ if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Op.getNode());
assert(VT.getSizeInBits() == Op.getOperand(1).getValueType().getSizeInBits()
&& "Invalid mask size");
@@ -301,9 +425,9 @@ SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) {
DebugLoc DL = Op.getDebugLoc();
// Make sure that the SINT_TO_FP and SRL instructions are available.
- if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, VT) ||
- !TLI.isOperationLegalOrCustom(ISD::SRL, VT))
- return DAG.UnrollVectorOp(Op.getNode());
+ if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand ||
+ TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand)
+ return DAG.UnrollVectorOp(Op.getNode());
EVT SVT = VT.getScalarType();
assert((SVT.getSizeInBits() == 64 || SVT.getSizeInBits() == 32) &&
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 107a42b..5f23f01 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -21,7 +21,6 @@
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -59,6 +58,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::LOAD: R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N));break;
case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: R = ScalarizeVecRes_InregOp(N); break;
+ case ISD::VSELECT: R = ScalarizeVecRes_VSELECT(N); break;
case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
@@ -194,7 +194,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
N->getPointerInfo(),
N->getMemoryVT().getVectorElementType(),
N->isVolatile(), N->isNonTemporal(),
- N->getOriginalAlignment());
+ N->isInvariant(), N->getOriginalAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -227,6 +227,37 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
return InOp;
}
+SDValue DAGTypeLegalizer::ScalarizeVecRes_VSELECT(SDNode *N) {
+ SDValue Cond = GetScalarizedVector(N->getOperand(0));
+ SDValue LHS = GetScalarizedVector(N->getOperand(1));
+ TargetLowering::BooleanContent ScalarBool = TLI.getBooleanContents(false);
+ TargetLowering::BooleanContent VecBool = TLI.getBooleanContents(true);
+ if (ScalarBool != VecBool) {
+ EVT CondVT = Cond.getValueType();
+ switch (ScalarBool) {
+ case TargetLowering::UndefinedBooleanContent:
+ break;
+ case TargetLowering::ZeroOrOneBooleanContent:
+ assert(VecBool == TargetLowering::UndefinedBooleanContent ||
+ VecBool == TargetLowering::ZeroOrNegativeOneBooleanContent);
+ // Vector read from all ones, scalar expects a single 1 so mask.
+ Cond = DAG.getNode(ISD::AND, N->getDebugLoc(), CondVT,
+ Cond, DAG.getConstant(1, CondVT));
+ break;
+ case TargetLowering::ZeroOrNegativeOneBooleanContent:
+ assert(VecBool == TargetLowering::UndefinedBooleanContent ||
+ VecBool == TargetLowering::ZeroOrOneBooleanContent);
+ // Vector reads from a one, scalar from all ones so sign extend.
+ Cond = DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), CondVT,
+ Cond, DAG.getValueType(MVT::i1));
+ break;
+ }
+ }
+ return DAG.getNode(ISD::SELECT, N->getDebugLoc(),
+ LHS.getValueType(), Cond, LHS,
+ GetScalarizedVector(N->getOperand(2)));
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(1));
return DAG.getNode(ISD::SELECT, N->getDebugLoc(),
@@ -405,6 +436,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
N->dump(&DAG);
dbgs() << "\n");
SDValue Lo, Hi;
+
+ // See if the target wants to custom expand this node.
+ if (CustomLowerNode(N, N->getValueType(ResNo), true))
+ return;
switch (N->getOpcode()) {
default:
@@ -442,8 +477,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::ANY_EXTEND:
case ISD::CONVERT_RNDSAT:
case ISD::CTLZ:
- case ISD::CTPOP:
case ISD::CTTZ:
+ case ISD::CTLZ_ZERO_UNDEF:
+ case ISD::CTTZ_ZERO_UNDEF:
+ case ISD::CTPOP:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
@@ -677,7 +714,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Load the Lo part from the stack slot.
Lo = DAG.getLoad(Lo.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Increment the pointer to the other part.
unsigned IncrementSize = Lo.getValueType().getSizeInBits() / 8;
@@ -686,7 +723,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Load the Hi part from the stack slot.
Hi = DAG.getLoad(Hi.getValueType(), dl, Store, StackPtr, MachinePointerInfo(),
- false, false, MinAlign(Alignment, IncrementSize));
+ false, false, false, MinAlign(Alignment, IncrementSize));
}
void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
@@ -713,20 +750,21 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
unsigned Alignment = LD->getOriginalAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
+ bool isInvariant = LD->isInvariant();
EVT LoMemVT, HiMemVT;
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
LD->getPointerInfo(), LoMemVT, isVolatile, isNonTemporal,
- Alignment);
+ isInvariant, Alignment);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset,
LD->getPointerInfo().getWithOffset(IncrementSize),
- HiMemVT, isVolatile, isNonTemporal, Alignment);
+ HiMemVT, isVolatile, isNonTemporal, isInvariant, Alignment);
// Build a factor node to remember that this load is independent of the
// other one.
@@ -773,46 +811,18 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
- // Split the input.
+ // If the input also splits, handle it directly for a compile time speedup.
+ // Otherwise split it by hand.
EVT InVT = N->getOperand(0).getValueType();
- switch (getTypeAction(InVT)) {
- default: llvm_unreachable("Unexpected type action!");
- case TargetLowering::TypeLegal: {
+ if (getTypeAction(InVT) == TargetLowering::TypeSplitVector) {
+ GetSplitVector(N->getOperand(0), Lo, Hi);
+ } else {
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
DAG.getIntPtrConstant(0));
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
- break;
- }
- case TargetLowering::TypePromoteInteger: {
- SDValue InOp = GetPromotedInteger(N->getOperand(0));
- EVT InNVT = EVT::getVectorVT(*DAG.getContext(),
- InOp.getValueType().getVectorElementType(),
- LoVT.getVectorNumElements());
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(0));
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
- break;
- }
- case TargetLowering::TypeSplitVector:
- GetSplitVector(N->getOperand(0), Lo, Hi);
- break;
- case TargetLowering::TypeWidenVector: {
- // If the result needs to be split and the input needs to be widened,
- // the two types must have different lengths. Use the widened result
- // and extract from it to do the split.
- SDValue InOp = GetWidenedVector(N->getOperand(0));
- EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
- LoVT.getVectorNumElements());
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(0));
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
- DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
- break;
- }
}
if (N->getOpcode() == ISD::FP_ROUND) {
@@ -1239,6 +1249,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::LOAD: Res = WidenVecRes_LOAD(N); break;
case ISD::SCALAR_TO_VECTOR: Res = WidenVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SIGN_EXTEND_INREG: Res = WidenVecRes_InregOp(N); break;
+ case ISD::VSELECT:
case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::SETCC: Res = WidenVecRes_SETCC(N); break;
@@ -1590,12 +1601,15 @@ SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
switch (getTypeAction(InVT)) {
- default:
- assert(false && "Unknown type action!");
- break;
case TargetLowering::TypeLegal:
break;
case TargetLowering::TypePromoteInteger:
+ // If the incoming type is a vector that is being promoted, then
+ // we know that the elements are arranged differently and that we
+ // must perform the conversion using a stack slot.
+ if (InVT.isVector())
+ break;
+
// If the InOp is promoted to the same size, convert it. Otherwise,
// fall out of the switch and widen the promoted input.
InOp = GetPromotedInteger(InOp);
@@ -1928,7 +1942,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
SDValue InOp1 = GetWidenedVector(N->getOperand(1));
SDValue InOp2 = GetWidenedVector(N->getOperand(2));
assert(InOp1.getValueType() == WidenVT && InOp2.getValueType() == WidenVT);
- return DAG.getNode(ISD::SELECT, N->getDebugLoc(),
+ return DAG.getNode(N->getOpcode(), N->getDebugLoc(),
WidenVT, Cond1, InOp1, InOp2);
}
@@ -2032,6 +2046,7 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::STORE: Res = WidenVecOp_STORE(N); break;
+ case ISD::SETCC: Res = WidenVecOp_SETCC(N); break;
case ISD::FP_EXTEND:
case ISD::FP_TO_SINT:
@@ -2165,6 +2180,32 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
MVT::Other,&StChain[0],StChain.size());
}
+SDValue DAGTypeLegalizer::WidenVecOp_SETCC(SDNode *N) {
+ SDValue InOp0 = GetWidenedVector(N->getOperand(0));
+ SDValue InOp1 = GetWidenedVector(N->getOperand(1));
+ DebugLoc dl = N->getDebugLoc();
+
+ // WARNING: In this code we widen the compare instruction with garbage.
+ // This garbage may contain denormal floats which may be slow. Is this a real
+ // concern ? Should we zero the unused lanes if this is a float compare ?
+
+ // Get a new SETCC node to compare the newly widened operands.
+ // Only some of the compared elements are legal.
+ EVT SVT = TLI.getSetCCResultType(InOp0.getValueType());
+ SDValue WideSETCC = DAG.getNode(ISD::SETCC, N->getDebugLoc(),
+ SVT, InOp0, InOp1, N->getOperand(2));
+
+ // Extract the needed results from the result vector.
+ EVT ResVT = EVT::getVectorVT(*DAG.getContext(),
+ SVT.getVectorElementType(),
+ N->getValueType(0).getVectorNumElements());
+ SDValue CC = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ ResVT, WideSETCC, DAG.getIntPtrConstant(0));
+
+ return PromoteTargetBoolean(CC, N->getValueType(0));
+}
+
+
//===----------------------------------------------------------------------===//
// Vector Widening Utilities
//===----------------------------------------------------------------------===//
@@ -2276,6 +2317,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
unsigned Align = LD->getAlignment();
bool isVolatile = LD->isVolatile();
bool isNonTemporal = LD->isNonTemporal();
+ bool isInvariant = LD->isInvariant();
int LdWidth = LdVT.getSizeInBits();
int WidthDiff = WidenWidth - LdWidth; // Difference
@@ -2285,7 +2327,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
EVT NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
int NewVTWidth = NewVT.getSizeInBits();
SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr, LD->getPointerInfo(),
- isVolatile, isNonTemporal, Align);
+ isVolatile, isNonTemporal, isInvariant, Align);
LdChain.push_back(LdOp.getValue(1));
// Check if we can load the element with one instruction
@@ -2323,18 +2365,37 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
DAG.getIntPtrConstant(Increment));
+ SDValue L;
if (LdWidth < NewVTWidth) {
// Our current type we are using is too large, find a better size
NewVT = FindMemType(DAG, TLI, LdWidth, WidenVT, LdAlign, WidthDiff);
NewVTWidth = NewVT.getSizeInBits();
- }
-
- SDValue LdOp = DAG.getLoad(NewVT, dl, Chain, BasePtr,
+ L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
LD->getPointerInfo().getWithOffset(Offset),
isVolatile,
- isNonTemporal, MinAlign(Align, Increment));
- LdChain.push_back(LdOp.getValue(1));
- LdOps.push_back(LdOp);
+ isNonTemporal, isInvariant,
+ MinAlign(Align, Increment));
+ LdChain.push_back(L.getValue(1));
+ if (L->getValueType(0).isVector()) {
+ SmallVector<SDValue, 16> Loads;
+ Loads.push_back(L);
+ unsigned size = L->getValueSizeInBits(0);
+ while (size < LdOp->getValueSizeInBits(0)) {
+ Loads.push_back(DAG.getUNDEF(L->getValueType(0)));
+ size += L->getValueSizeInBits(0);
+ }
+ L = DAG.getNode(ISD::CONCAT_VECTORS, dl, LdOp->getValueType(0),
+ &Loads[0], Loads.size());
+ }
+ } else {
+ L = DAG.getLoad(NewVT, dl, Chain, BasePtr,
+ LD->getPointerInfo().getWithOffset(Offset), isVolatile,
+ isNonTemporal, isInvariant, MinAlign(Align, Increment));
+ LdChain.push_back(L.getValue(1));
+ }
+
+ LdOps.push_back(L);
+
LdWidth -= NewVTWidth;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
new file mode 100644
index 0000000..ff0136e
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -0,0 +1,657 @@
+//===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that prioritizes instructions using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+// The scheduler is basically a top-down adaptable list scheduler with DFA
+// resource tracking added to the cost function.
+// DFA is queried as a state machine to model "packets/bundles" during
+// schedule. Currently packets/bundles are discarded at the end of
+// scheduling, affecting only order of instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "scheduler"
+#include "llvm/CodeGen/ResourcePriorityQueue.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetLowering.h"
+
+using namespace llvm;
+
+static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden,
+ cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable use of DFA during scheduling"));
+
+static cl::opt<signed> RegPressureThreshold(
+ "dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5),
+ cl::desc("Track reg pressure and switch priority to in-depth"));
+
+
+ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) :
+ Picker(this),
+ InstrItins(IS->getTargetLowering().getTargetMachine().getInstrItineraryData())
+{
+ TII = IS->getTargetLowering().getTargetMachine().getInstrInfo();
+ TRI = IS->getTargetLowering().getTargetMachine().getRegisterInfo();
+ TLI = &IS->getTargetLowering();
+
+ const TargetMachine &tm = (*IS->MF).getTarget();
+ ResourcesModel = tm.getInstrInfo()->CreateTargetScheduleState(&tm,NULL);
+ // This hard requirment could be relaxed, but for now
+ // do not let it procede.
+ assert (ResourcesModel && "Unimplemented CreateTargetScheduleState.");
+
+ unsigned NumRC = TRI->getNumRegClasses();
+ RegLimit.resize(NumRC);
+ RegPressure.resize(NumRC);
+ std::fill(RegLimit.begin(), RegLimit.end(), 0);
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I)
+ RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, *IS->MF);
+
+ ParallelLiveRanges = 0;
+ HorizontalVerticalBalance = 0;
+}
+
+unsigned
+ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
+ unsigned NumberDeps = 0;
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+
+ SUnit *PredSU = I->getSUnit();
+ const SDNode *ScegN = PredSU->getNode();
+
+ if (!ScegN)
+ continue;
+
+ // If value is passed to CopyToReg, it is probably
+ // live outside BB.
+ switch (ScegN->getOpcode()) {
+ default: break;
+ case ISD::TokenFactor: break;
+ case ISD::CopyFromReg: NumberDeps++; break;
+ case ISD::CopyToReg: break;
+ case ISD::INLINEASM: break;
+ }
+ if (!ScegN->isMachineOpcode())
+ continue;
+
+ for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
+ EVT VT = ScegN->getValueType(i);
+ if (TLI->isTypeLegal(VT)
+ && (TLI->getRegClassFor(VT)->getID() == RCId)) {
+ NumberDeps++;
+ break;
+ }
+ }
+ }
+ return NumberDeps;
+}
+
+unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
+ unsigned RCId) {
+ unsigned NumberDeps = 0;
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+
+ SUnit *SuccSU = I->getSUnit();
+ const SDNode *ScegN = SuccSU->getNode();
+ if (!ScegN)
+ continue;
+
+ // If value is passed to CopyToReg, it is probably
+ // live outside BB.
+ switch (ScegN->getOpcode()) {
+ default: break;
+ case ISD::TokenFactor: break;
+ case ISD::CopyFromReg: break;
+ case ISD::CopyToReg: NumberDeps++; break;
+ case ISD::INLINEASM: break;
+ }
+ if (!ScegN->isMachineOpcode())
+ continue;
+
+ for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
+ const SDValue &Op = ScegN->getOperand(i);
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
+ if (TLI->isTypeLegal(VT)
+ && (TLI->getRegClassFor(VT)->getID() == RCId)) {
+ NumberDeps++;
+ break;
+ }
+ }
+ }
+ return NumberDeps;
+}
+
+static unsigned numberCtrlDepsInSU(SUnit *SU) {
+ unsigned NumberDeps = 0;
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I)
+ if (I->isCtrl())
+ NumberDeps++;
+
+ return NumberDeps;
+}
+
+static unsigned numberCtrlPredInSU(SUnit *SU) {
+ unsigned NumberDeps = 0;
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I)
+ if (I->isCtrl())
+ NumberDeps++;
+
+ return NumberDeps;
+}
+
+///
+/// Initialize nodes.
+///
+void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) {
+ SUnits = &sunits;
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+
+ for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
+ SUnit *SU = &(*SUnits)[i];
+ initNumRegDefsLeft(SU);
+ SU->NodeQueueId = 0;
+ }
+}
+
+/// This heuristic is used if DFA scheduling is not desired
+/// for some VLIW platform.
+bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
+ // The isScheduleHigh flag allows nodes with wraparound dependencies that
+ // cannot easily be modeled as edges with latencies to be scheduled as
+ // soon as possible in a top-down schedule.
+ if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
+ return false;
+
+ if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
+ return true;
+
+ unsigned LHSNum = LHS->NodeNum;
+ unsigned RHSNum = RHS->NodeNum;
+
+ // The most important heuristic is scheduling the critical path.
+ unsigned LHSLatency = PQ->getLatency(LHSNum);
+ unsigned RHSLatency = PQ->getLatency(RHSNum);
+ if (LHSLatency < RHSLatency) return true;
+ if (LHSLatency > RHSLatency) return false;
+
+ // After that, if two nodes have identical latencies, look to see if one will
+ // unblock more other nodes than the other.
+ unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
+ unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
+ if (LHSBlocked < RHSBlocked) return true;
+ if (LHSBlocked > RHSBlocked) return false;
+
+ // Finally, just to provide a stable ordering, use the node number as a
+ // deciding factor.
+ return LHSNum < RHSNum;
+}
+
+
+/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
+/// of SU, return it, otherwise return null.
+SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
+ SUnit *OnlyAvailablePred = 0;
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ SUnit &Pred = *I->getSUnit();
+ if (!Pred.isScheduled) {
+ // We found an available, but not scheduled, predecessor. If it's the
+ // only one we have found, keep track of it... otherwise give up.
+ if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
+ return 0;
+ OnlyAvailablePred = &Pred;
+ }
+ }
+ return OnlyAvailablePred;
+}
+
+void ResourcePriorityQueue::push(SUnit *SU) {
+ // Look at all of the successors of this node. Count the number of nodes that
+ // this node is the sole unscheduled node for.
+ unsigned NumNodesBlocking = 0;
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I)
+ if (getSingleUnscheduledPred(I->getSUnit()) == SU)
+ ++NumNodesBlocking;
+
+ NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
+ Queue.push_back(SU);
+}
+
+/// Check if scheduling of this SU is possible
+/// in the current packet.
+bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
+ if (!SU || !SU->getNode())
+ return false;
+
+ // If this is a compound instruction,
+ // it is likely to be a call. Do not delay it.
+ if (SU->getNode()->getGluedNode())
+ return true;
+
+ // First see if the pipeline could receive this instruction
+ // in the current cycle.
+ if (SU->getNode()->isMachineOpcode())
+ switch (SU->getNode()->getMachineOpcode()) {
+ default:
+ if (!ResourcesModel->canReserveResources(&TII->get(
+ SU->getNode()->getMachineOpcode())))
+ return false;
+ case TargetOpcode::EXTRACT_SUBREG:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ case TargetOpcode::IMPLICIT_DEF:
+ break;
+ }
+
+ // Now see if there are no other dependencies
+ // to instructions alredy in the packet.
+ for (unsigned i = 0, e = Packet.size(); i != e; ++i)
+ for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
+ E = Packet[i]->Succs.end(); I != E; ++I) {
+ // Since we do not add pseudos to packets, might as well
+ // ignor order deps.
+ if (I->isCtrl())
+ continue;
+
+ if (I->getSUnit() == SU)
+ return false;
+ }
+
+ return true;
+}
+
+/// Keep track of available resources.
+void ResourcePriorityQueue::reserveResources(SUnit *SU) {
+ // If this SU does not fit in the packet
+ // start a new one.
+ if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ }
+
+ if (SU->getNode() && SU->getNode()->isMachineOpcode()) {
+ switch (SU->getNode()->getMachineOpcode()) {
+ default:
+ ResourcesModel->reserveResources(&TII->get(
+ SU->getNode()->getMachineOpcode()));
+ break;
+ case TargetOpcode::EXTRACT_SUBREG:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ case TargetOpcode::IMPLICIT_DEF:
+ break;
+ }
+ Packet.push_back(SU);
+ }
+ // Forcefully end packet for PseudoOps.
+ else {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ }
+
+ // If packet is now full, reset the state so in the next cycle
+ // we start fresh.
+ if (Packet.size() >= InstrItins->IssueWidth) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ }
+}
+
+signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) {
+ signed RegBalance = 0;
+
+ if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
+ return RegBalance;
+
+ // Gen estimate.
+ for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) {
+ EVT VT = SU->getNode()->getValueType(i);
+ if (TLI->isTypeLegal(VT)
+ && TLI->getRegClassFor(VT)
+ && TLI->getRegClassFor(VT)->getID() == RCId)
+ RegBalance += numberRCValSuccInSU(SU, RCId);
+ }
+ // Kill estimate.
+ for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) {
+ const SDValue &Op = SU->getNode()->getOperand(i);
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
+ if (isa<ConstantSDNode>(Op.getNode()))
+ continue;
+
+ if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT)
+ && TLI->getRegClassFor(VT)->getID() == RCId)
+ RegBalance -= numberRCValPredInSU(SU, RCId);
+ }
+ return RegBalance;
+}
+
+/// Estimates change in reg pressure from this SU.
+/// It is acheived by trivial tracking of defined
+/// and used vregs in dependent instructions.
+/// The RawPressure flag makes this function to ignore
+/// existing reg file sizes, and report raw def/use
+/// balance.
+signed ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) {
+ signed RegBalance = 0;
+
+ if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
+ return RegBalance;
+
+ if (RawPressure) {
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I) {
+ const TargetRegisterClass *RC = *I;
+ RegBalance += rawRegPressureDelta(SU, RC->getID());
+ }
+ }
+ else {
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I) {
+ const TargetRegisterClass *RC = *I;
+ if ((RegPressure[RC->getID()] +
+ rawRegPressureDelta(SU, RC->getID()) > 0) &&
+ (RegPressure[RC->getID()] +
+ rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()]))
+ RegBalance += rawRegPressureDelta(SU, RC->getID());
+ }
+ }
+
+ return RegBalance;
+}
+
+// Constants used to denote relative importance of
+// heuristic components for cost computation.
+static const unsigned PriorityOne = 200;
+static const unsigned PriorityTwo = 100;
+static const unsigned PriorityThree = 50;
+static const unsigned PriorityFour = 15;
+static const unsigned PriorityFive = 5;
+static const unsigned ScaleOne = 20;
+static const unsigned ScaleTwo = 10;
+static const unsigned ScaleThree = 5;
+static const unsigned FactorOne = 2;
+
+/// Returns single number reflecting benefit of scheduling SU
+/// in the current cycle.
+signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
+ // Initial trivial priority.
+ signed ResCount = 1;
+
+ // Do not waste time on a node that is already scheduled.
+ if (SU->isScheduled)
+ return ResCount;
+
+ // Forced priority is high.
+ if (SU->isScheduleHigh)
+ ResCount += PriorityOne;
+
+ // Adaptable scheduling
+ // A small, but very parallel
+ // region, where reg pressure is an issue.
+ if (HorizontalVerticalBalance > RegPressureThreshold) {
+ // Critical path first
+ ResCount += (SU->getHeight() * ScaleTwo);
+ // If resources are available for it, multiply the
+ // chance of scheduling.
+ if (isResourceAvailable(SU))
+ ResCount <<= FactorOne;
+
+ // Consider change to reg pressure from scheduling
+ // this SU.
+ ResCount -= (regPressureDelta(SU,true) * ScaleOne);
+ }
+ // Default heuristic, greeady and
+ // critical path driven.
+ else {
+ // Critical path first.
+ ResCount += (SU->getHeight() * ScaleTwo);
+ // Now see how many instructions is blocked by this SU.
+ ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo);
+ // If resources are available for it, multiply the
+ // chance of scheduling.
+ if (isResourceAvailable(SU))
+ ResCount <<= FactorOne;
+
+ ResCount -= (regPressureDelta(SU) * ScaleTwo);
+ }
+
+ // These are platform specific things.
+ // Will need to go into the back end
+ // and accessed from here via a hook.
+ for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
+ if (N->isMachineOpcode()) {
+ const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
+ if (TID.isCall())
+ ResCount += (PriorityThree + (ScaleThree*N->getNumValues()));
+ }
+ else
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::TokenFactor:
+ case ISD::CopyFromReg:
+ case ISD::CopyToReg:
+ ResCount += PriorityFive;
+ break;
+
+ case ISD::INLINEASM:
+ ResCount += PriorityFour;
+ break;
+ }
+ }
+ return ResCount;
+}
+
+
+/// Main resource tracking point.
+void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
+ // Use NULL entry as an event marker to reset
+ // the DFA state.
+ if (!SU) {
+ ResourcesModel->clearResources();
+ Packet.clear();
+ return;
+ }
+
+ const SDNode *ScegN = SU->getNode();
+ // Update reg pressure tracking.
+ // First update current node.
+ if (ScegN->isMachineOpcode()) {
+ // Estimate generated regs.
+ for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
+ EVT VT = ScegN->getValueType(i);
+
+ if (TLI->isTypeLegal(VT)) {
+ const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
+ if (RC)
+ RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID());
+ }
+ }
+ // Estimate killed regs.
+ for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
+ const SDValue &Op = ScegN->getOperand(i);
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
+
+ if (TLI->isTypeLegal(VT)) {
+ const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
+ if (RC) {
+ if (RegPressure[RC->getID()] >
+ (numberRCValPredInSU(SU, RC->getID())))
+ RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID());
+ else RegPressure[RC->getID()] = 0;
+ }
+ }
+ }
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
+ continue;
+ --I->getSUnit()->NumRegDefsLeft;
+ }
+ }
+
+ // Reserve resources for this SU.
+ reserveResources(SU);
+
+ // Adjust number of parallel live ranges.
+ // Heuristic is simple - node with no data successors reduces
+ // number of live ranges. All others, increase it.
+ unsigned NumberNonControlDeps = 0;
+
+ for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ adjustPriorityOfUnscheduledPreds(I->getSUnit());
+ if (!I->isCtrl())
+ NumberNonControlDeps++;
+ }
+
+ if (!NumberNonControlDeps) {
+ if (ParallelLiveRanges >= SU->NumPreds)
+ ParallelLiveRanges -= SU->NumPreds;
+ else
+ ParallelLiveRanges = 0;
+
+ }
+ else
+ ParallelLiveRanges += SU->NumRegDefsLeft;
+
+ // Track parallel live chains.
+ HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU));
+ HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU));
+}
+
+void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) {
+ unsigned NodeNumDefs = 0;
+ for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
+ if (N->isMachineOpcode()) {
+ const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
+ // No register need be allocated for this.
+ if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
+ NodeNumDefs = 0;
+ break;
+ }
+ NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs());
+ }
+ else
+ switch(N->getOpcode()) {
+ default: break;
+ case ISD::CopyFromReg:
+ NodeNumDefs++;
+ break;
+ case ISD::INLINEASM:
+ NodeNumDefs++;
+ break;
+ }
+
+ SU->NumRegDefsLeft = NodeNumDefs;
+}
+
+/// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
+/// scheduled. If SU is not itself available, then there is at least one
+/// predecessor node that has not been scheduled yet. If SU has exactly ONE
+/// unscheduled predecessor, we want to increase its priority: it getting
+/// scheduled will make this node available, so it is better than some other
+/// node of the same priority that will not make a node available.
+void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) {
+ if (SU->isAvailable) return; // All preds scheduled.
+
+ SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
+ if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable)
+ return;
+
+ // Okay, we found a single predecessor that is available, but not scheduled.
+ // Since it is available, it must be in the priority queue. First remove it.
+ remove(OnlyAvailablePred);
+
+ // Reinsert the node into the priority queue, which recomputes its
+ // NumNodesSolelyBlocking value.
+ push(OnlyAvailablePred);
+}
+
+
+/// Main access point - returns next instructions
+/// to be placed in scheduling sequence.
+SUnit *ResourcePriorityQueue::pop() {
+ if (empty())
+ return 0;
+
+ std::vector<SUnit *>::iterator Best = Queue.begin();
+ if (!DisableDFASched) {
+ signed BestCost = SUSchedulingCost(*Best);
+ for (std::vector<SUnit *>::iterator I = Queue.begin(),
+ E = Queue.end(); I != E; ++I) {
+ if (*I == *Best)
+ continue;
+
+ if (SUSchedulingCost(*I) > BestCost) {
+ BestCost = SUSchedulingCost(*I);
+ Best = I;
+ }
+ }
+ }
+ // Use default TD scheduling mechanism.
+ else {
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ E = Queue.end(); I != E; ++I)
+ if (Picker(*Best, *I))
+ Best = I;
+ }
+
+ SUnit *V = *Best;
+ if (Best != prior(Queue.end()))
+ std::swap(*Best, Queue.back());
+
+ Queue.pop_back();
+
+ return V;
+}
+
+
+void ResourcePriorityQueue::remove(SUnit *SU) {
+ assert(!Queue.empty() && "Queue is empty!");
+ std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
+ if (I != prior(Queue.end()))
+ std::swap(*I, Queue.back());
+
+ Queue.pop_back();
+}
+
+
+#ifdef NDEBUG
+void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {}
+#else
+void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {
+ ResourcePriorityQueue q = *this;
+ while (!q.empty()) {
+ SUnit *su = q.pop();
+ dbgs() << "Height " << su->getHeight() << ": ";
+ su->dump(DAG);
+ }
+}
+#endif
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index b275c63..24da432 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -43,7 +43,7 @@ namespace {
SmallVector<SUnit *, 16> Queue;
bool empty() const { return Queue.empty(); }
-
+
void push(SUnit *U) {
Queue.push_back(U);
}
@@ -101,8 +101,8 @@ private:
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
void ListScheduleBottomUp();
- /// ForceUnitLatencies - The fast scheduler doesn't care about real latencies.
- bool ForceUnitLatencies() const { return true; }
+ /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
+ bool forceUnitLatencies() const { return true; }
};
} // end anonymous namespace
@@ -112,7 +112,7 @@ void ScheduleDAGFast::Schedule() {
DEBUG(dbgs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
- LiveRegDefs.resize(TRI->getNumRegs(), NULL);
+ LiveRegDefs.resize(TRI->getNumRegs(), NULL);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
// Build the scheduling graph.
@@ -159,7 +159,7 @@ void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
- // expensive to copy the register. Make sure nothing that can
+ // expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
if (!LiveRegDefs[I->getReg()]) {
@@ -245,10 +245,10 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
SDValue(LoadNode, 1));
- SUnit *NewSU = NewSUnit(N);
+ SUnit *NewSU = newSUnit(N);
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NewSU->NodeNum);
-
+
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
@@ -268,7 +268,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
LoadSU = &SUnits[LoadNode->getNodeId()];
isNewLoad = false;
} else {
- LoadSU = NewSUnit(LoadNode);
+ LoadSU = newSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
}
@@ -329,7 +329,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
D.setSUnit(LoadSU);
AddPred(SuccDep, D);
}
- }
+ }
if (isNewLoad) {
AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
}
@@ -381,11 +381,11 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
SmallVector<SUnit*, 2> &Copies) {
- SUnit *CopyFromSU = NewSUnit(static_cast<SDNode *>(NULL));
+ SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
- SUnit *CopyToSU = NewSUnit(static_cast<SDNode *>(NULL));
+ SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(NULL));
CopyToSU->CopySrcRC = DestRC;
CopyToSU->CopyDstRC = SrcRC;
@@ -425,7 +425,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = MCID.getNumDefs();
- for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
+ for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
@@ -447,7 +447,7 @@ static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
Added = true;
}
}
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
+ for (const uint16_t *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
if (RegAdded.insert(*Alias)) {
LRegs.push_back(*Alias);
@@ -508,7 +508,7 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
- for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg) {
+ for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
}
@@ -630,7 +630,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
std::reverse(Sequence.begin(), Sequence.end());
#ifndef NDEBUG
- VerifySchedule(/*isBottomUp=*/true);
+ VerifyScheduledSequence(/*isBottomUp=*/true);
#endif
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index e757def..2cb5d37 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -45,10 +45,6 @@ static RegisterScheduler
"Bottom-up register reduction list scheduling",
createBURRListDAGScheduler);
static RegisterScheduler
- tdrListrDAGScheduler("list-tdrr",
- "Top-down register reduction list scheduling",
- createTDRRListDAGScheduler);
-static RegisterScheduler
sourceListDAGScheduler("source",
"Similar to list-burr but schedules in source "
"order when possible",
@@ -93,6 +89,9 @@ static cl::opt<bool> DisableSchedCriticalPath(
static cl::opt<bool> DisableSchedHeight(
"disable-sched-height", cl::Hidden, cl::init(false),
cl::desc("Disable scheduled-height priority in sched=list-ilp"));
+static cl::opt<bool> Disable2AddrHack(
+ "disable-2addr-hack", cl::Hidden, cl::init(true),
+ cl::desc("Disable scheduler's two-address hack"));
static cl::opt<int> MaxReorderWindow(
"max-sched-reorder", cl::Hidden, cl::init(6),
@@ -103,17 +102,6 @@ static cl::opt<unsigned> AvgIPC(
"sched-avg-ipc", cl::Hidden, cl::init(1),
cl::desc("Average inst/cycle whan no target itinerary exists."));
-#ifndef NDEBUG
-namespace {
- // For sched=list-ilp, Count the number of times each factor comes into play.
- enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
- FactStatic, FactOther, NumFactors };
-}
-static const char *FactorName[NumFactors] =
-{"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
-static int FactorCount[NumFactors];
-#endif //!NDEBUG
-
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
@@ -121,10 +109,6 @@ namespace {
///
class ScheduleDAGRRList : public ScheduleDAGSDNodes {
private:
- /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
- /// it is top-down.
- bool isBottomUp;
-
/// NeedLatency - True if the scheduler will make use of latency information.
///
bool NeedLatency;
@@ -162,11 +146,15 @@ private:
/// and similar queries.
ScheduleDAGTopologicalSort Topo;
+ // Hack to keep track of the inverse of FindCallSeqStart without more crazy
+ // DAG crawling.
+ DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
+
public:
ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
SchedulingPriorityQueue *availqueue,
CodeGenOpt::Level OptLevel)
- : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
+ : ScheduleDAGSDNodes(mf),
NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
Topo(SUnits) {
@@ -221,8 +209,6 @@ private:
void ReleasePred(SUnit *SU, const SDep *PredEdge);
void ReleasePredecessors(SUnit *SU);
- void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
- void ReleaseSuccessors(SUnit *SU);
void ReleasePending();
void AdvanceToCycle(unsigned NextCycle);
void AdvancePastStalls(SUnit *SU);
@@ -242,15 +228,11 @@ private:
SUnit *PickNodeToScheduleBottomUp();
void ListScheduleBottomUp();
- void ScheduleNodeTopDown(SUnit*);
- void ListScheduleTopDown();
-
-
/// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
/// Updates the topological ordering if required.
SUnit *CreateNewSUnit(SDNode *N) {
unsigned NumSUnits = SUnits.size();
- SUnit *NewNode = NewSUnit(N);
+ SUnit *NewNode = newSUnit(N);
// Update the topological ordering.
if (NewNode->NodeNum >= NumSUnits)
Topo.InitDAGTopologicalSorting();
@@ -268,9 +250,9 @@ private:
return NewNode;
}
- /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
+ /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
/// need actual latency information but the hybrid scheduler does.
- bool ForceUnitLatencies() const {
+ bool forceUnitLatencies() const {
return !NeedLatency;
}
};
@@ -278,7 +260,7 @@ private:
/// GetCostForDef - Looks up the register class and cost for a given definition.
/// Typically this just means looking up the representative register class,
-/// but for untyped values (MVT::untyped) it means inspecting the node's
+/// but for untyped values (MVT::Untyped) it means inspecting the node's
/// opcode to determine what register class is being generated.
static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
const TargetLowering *TLI,
@@ -289,7 +271,7 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
// Special handling for untyped values. These values can only come from
// the expansion of custom DAG-to-DAG patterns.
- if (VT == MVT::untyped) {
+ if (VT == MVT::Untyped) {
const SDNode *Node = RegDefPos.GetNode();
unsigned Opcode = Node->getMachineOpcode();
@@ -319,18 +301,16 @@ void ScheduleDAGRRList::Schedule() {
DEBUG(dbgs()
<< "********** List Scheduling BB#" << BB->getNumber()
<< " '" << BB->getName() << "' **********\n");
-#ifndef NDEBUG
- for (int i = 0; i < NumFactors; ++i) {
- FactorCount[i] = 0;
- }
-#endif //!NDEBUG
CurCycle = 0;
IssueCount = 0;
MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
NumLiveRegs = 0;
- LiveRegDefs.resize(TRI->getNumRegs(), NULL);
- LiveRegGens.resize(TRI->getNumRegs(), NULL);
+ // Allocate slots for each physical register, plus one for a special register
+ // to track the virtual resource of a calling sequence.
+ LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
+ LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
+ CallSeqEndForStart.clear();
// Build the scheduling graph.
BuildSchedGraph(NULL);
@@ -343,18 +323,16 @@ void ScheduleDAGRRList::Schedule() {
HazardRec->Reset();
- // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
- if (isBottomUp)
- ListScheduleBottomUp();
- else
- ListScheduleTopDown();
+ // Execute the actual scheduling loop.
+ ListScheduleBottomUp();
-#ifndef NDEBUG
- for (int i = 0; i < NumFactors; ++i) {
- DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
- }
-#endif // !NDEBUG
AvailableQueue->releaseState();
+
+ DEBUG({
+ dbgs() << "*** Final schedule ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
}
//===----------------------------------------------------------------------===//
@@ -376,7 +354,7 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
#endif
--PredSU->NumSuccsLeft;
- if (!ForceUnitLatencies()) {
+ if (!forceUnitLatencies()) {
// Updating predecessor's height. This is now the cycle when the
// predecessor can be scheduled without causing a pipeline stall.
PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
@@ -403,6 +381,109 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
}
}
+/// IsChainDependent - Test if Outer is reachable from Inner through
+/// chain dependencies.
+static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
+ unsigned NestLevel,
+ const TargetInstrInfo *TII) {
+ SDNode *N = Outer;
+ for (;;) {
+ if (N == Inner)
+ return true;
+ // For a TokenFactor, examine each operand. There may be multiple ways
+ // to get to the CALLSEQ_BEGIN, but we need to find the path with the
+ // most nesting in order to ensure that we find the corresponding match.
+ if (N->getOpcode() == ISD::TokenFactor) {
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
+ return true;
+ return false;
+ }
+ // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
+ if (N->isMachineOpcode()) {
+ if (N->getMachineOpcode() ==
+ (unsigned)TII->getCallFrameDestroyOpcode()) {
+ ++NestLevel;
+ } else if (N->getMachineOpcode() ==
+ (unsigned)TII->getCallFrameSetupOpcode()) {
+ if (NestLevel == 0)
+ return false;
+ --NestLevel;
+ }
+ }
+ // Otherwise, find the chain and continue climbing.
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (N->getOperand(i).getValueType() == MVT::Other) {
+ N = N->getOperand(i).getNode();
+ goto found_chain_operand;
+ }
+ return false;
+ found_chain_operand:;
+ if (N->getOpcode() == ISD::EntryToken)
+ return false;
+ }
+}
+
+/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
+/// the corresponding (lowered) CALLSEQ_BEGIN node.
+///
+/// NestLevel and MaxNested are used in recursion to indcate the current level
+/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
+/// level seen so far.
+///
+/// TODO: It would be better to give CALLSEQ_END an explicit operand to point
+/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
+static SDNode *
+FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
+ const TargetInstrInfo *TII) {
+ for (;;) {
+ // For a TokenFactor, examine each operand. There may be multiple ways
+ // to get to the CALLSEQ_BEGIN, but we need to find the path with the
+ // most nesting in order to ensure that we find the corresponding match.
+ if (N->getOpcode() == ISD::TokenFactor) {
+ SDNode *Best = 0;
+ unsigned BestMaxNest = MaxNest;
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ unsigned MyNestLevel = NestLevel;
+ unsigned MyMaxNest = MaxNest;
+ if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
+ MyNestLevel, MyMaxNest, TII))
+ if (!Best || (MyMaxNest > BestMaxNest)) {
+ Best = New;
+ BestMaxNest = MyMaxNest;
+ }
+ }
+ assert(Best);
+ MaxNest = BestMaxNest;
+ return Best;
+ }
+ // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
+ if (N->isMachineOpcode()) {
+ if (N->getMachineOpcode() ==
+ (unsigned)TII->getCallFrameDestroyOpcode()) {
+ ++NestLevel;
+ MaxNest = std::max(MaxNest, NestLevel);
+ } else if (N->getMachineOpcode() ==
+ (unsigned)TII->getCallFrameSetupOpcode()) {
+ assert(NestLevel != 0);
+ --NestLevel;
+ if (NestLevel == 0)
+ return N;
+ }
+ }
+ // Otherwise, find the chain and continue climbing.
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (N->getOperand(i).getValueType() == MVT::Other) {
+ N = N->getOperand(i).getNode();
+ goto found_chain_operand;
+ }
+ return 0;
+ found_chain_operand:;
+ if (N->getOpcode() == ISD::EntryToken)
+ return 0;
+ }
+}
+
/// Call ReleasePred for each predecessor, then update register live def/gen.
/// Always update LiveRegDefs for a register dependence even if the current SU
/// also defines the register. This effectively create one large live range
@@ -440,6 +521,27 @@ void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
}
}
}
+
+ // If we're scheduling a lowered CALLSEQ_END, find the corresponding
+ // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
+ // these nodes, to prevent other calls from being interscheduled with them.
+ unsigned CallResource = TRI->getNumRegs();
+ if (!LiveRegDefs[CallResource])
+ for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
+ if (Node->isMachineOpcode() &&
+ Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
+ unsigned NestLevel = 0;
+ unsigned MaxNest = 0;
+ SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
+
+ SUnit *Def = &SUnits[N->getNodeId()];
+ CallSeqEndForStart[Def] = SU;
+
+ ++NumLiveRegs;
+ LiveRegDefs[CallResource] = Def;
+ LiveRegGens[CallResource] = SU;
+ break;
+ }
}
/// Check to see if any of the pending instructions are ready to issue. If
@@ -457,8 +559,7 @@ void ScheduleDAGRRList::ReleasePending() {
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
- unsigned ReadyCycle =
- isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
+ unsigned ReadyCycle = PendingQueue[i]->getHeight();
if (ReadyCycle < MinAvailableCycle)
MinAvailableCycle = ReadyCycle;
@@ -487,10 +588,7 @@ void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
}
else {
for (; CurCycle != NextCycle; ++CurCycle) {
- if (isBottomUp)
- HazardRec->RecedeCycle();
- else
- HazardRec->AdvanceCycle();
+ HazardRec->RecedeCycle();
}
}
// FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
@@ -511,7 +609,7 @@ void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
// currently need to treat these nodes like real instructions.
// if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
- unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
+ unsigned ReadyCycle = SU->getHeight();
// Bump CurCycle to account for latency. We assume the latency of other
// available instructions may be hidden by the stall (not a full pipe stall).
@@ -522,7 +620,7 @@ void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
// Calls are scheduled in their preceding cycle, so don't conflict with
// hazards from instructions after the call. EmitNode will reset the
// scoreboard state before emitting the call.
- if (isBottomUp && SU->isCall)
+ if (SU->isCall)
return;
// FIXME: For resource conflicts in very long non-pipelined stages, we
@@ -530,7 +628,7 @@ void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
int Stalls = 0;
while (true) {
ScheduleHazardRecognizer::HazardType HT =
- HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
+ HazardRec->getHazardType(SU, -Stalls);
if (HT == ScheduleHazardRecognizer::NoHazard)
break;
@@ -568,17 +666,13 @@ void ScheduleDAGRRList::EmitNode(SUnit *SU) {
HazardRec->Reset();
return;
}
- if (isBottomUp && SU->isCall) {
+ if (SU->isCall) {
// Calls are scheduled with their preceding instructions. For bottom-up
// scheduling, clear the pipeline state before emitting.
HazardRec->Reset();
}
HazardRec->EmitInstruction(SU);
-
- if (!isBottomUp && SU->isCall) {
- HazardRec->Reset();
- }
}
static void resetVRegCycle(SUnit *SU);
@@ -607,7 +701,7 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
Sequence.push_back(SU);
- AvailableQueue->ScheduledNode(SU);
+ AvailableQueue->scheduledNode(SU);
// If HazardRec is disabled, and each inst counts as one cycle, then
// advance CurCycle before ReleasePredecessors to avoid useless pushes to
@@ -630,6 +724,20 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
LiveRegGens[I->getReg()] = NULL;
}
}
+ // Release the special call resource dependence, if this is the beginning
+ // of a call.
+ unsigned CallResource = TRI->getNumRegs();
+ if (LiveRegDefs[CallResource] == SU)
+ for (const SDNode *SUNode = SU->getNode(); SUNode;
+ SUNode = SUNode->getGluedNode()) {
+ if (SUNode->isMachineOpcode() &&
+ SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
+ assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
+ --NumLiveRegs;
+ LiveRegDefs[CallResource] = NULL;
+ LiveRegGens[CallResource] = NULL;
+ }
+ }
resetVRegCycle(SU);
@@ -686,15 +794,41 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
}
}
+ // Reclaim the special call resource dependence, if this is the beginning
+ // of a call.
+ unsigned CallResource = TRI->getNumRegs();
+ for (const SDNode *SUNode = SU->getNode(); SUNode;
+ SUNode = SUNode->getGluedNode()) {
+ if (SUNode->isMachineOpcode() &&
+ SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
+ ++NumLiveRegs;
+ LiveRegDefs[CallResource] = SU;
+ LiveRegGens[CallResource] = CallSeqEndForStart[SU];
+ }
+ }
+
+ // Release the special call resource dependence, if this is the end
+ // of a call.
+ if (LiveRegGens[CallResource] == SU)
+ for (const SDNode *SUNode = SU->getNode(); SUNode;
+ SUNode = SUNode->getGluedNode()) {
+ if (SUNode->isMachineOpcode() &&
+ SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
+ assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
+ --NumLiveRegs;
+ LiveRegDefs[CallResource] = NULL;
+ LiveRegGens[CallResource] = NULL;
+ }
+ }
+
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
+ if (!LiveRegDefs[I->getReg()])
+ ++NumLiveRegs;
// This becomes the nearest def. Note that an earlier def may still be
// pending if this is a two-address node.
LiveRegDefs[I->getReg()] = SU;
- if (!LiveRegDefs[I->getReg()]) {
- ++NumLiveRegs;
- }
if (LiveRegGens[I->getReg()] == NULL ||
I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
LiveRegGens[I->getReg()] = I->getSUnit();
@@ -714,7 +848,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
else {
AvailableQueue->push(SU);
}
- AvailableQueue->UnscheduledNode(SU);
+ AvailableQueue->unscheduledNode(SU);
}
/// After backtracking, the hazard checker needs to be restored to a state
@@ -805,6 +939,11 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return NULL;
+ // unfolding an x86 DEC64m operation results in store, dec, load which
+ // can't be handled here so quit
+ if (NewNodes.size() == 3)
+ return NULL;
+
DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
@@ -830,7 +969,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
LoadNode->setNodeId(LoadSU->NodeNum);
InitNumRegDefsLeft(LoadSU);
- ComputeLatency(LoadSU);
+ computeLatency(LoadSU);
}
SUnit *NewSU = CreateNewSUnit(N);
@@ -848,7 +987,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
NewSU->isCommutable = true;
InitNumRegDefsLeft(NewSU);
- ComputeLatency(NewSU);
+ computeLatency(NewSU);
// Record all the edges to and from the old SU, by category.
SmallVector<SDep, 4> ChainPreds;
@@ -1027,7 +1166,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
unsigned NumRes = MCID.getNumDefs();
- for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
+ for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
if (Reg == *ImpDef)
break;
++NumRes;
@@ -1042,7 +1181,7 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
SmallSet<unsigned, 4> &RegAdded,
SmallVector<unsigned, 4> &LRegs,
const TargetRegisterInfo *TRI) {
- for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
+ for (const uint16_t *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
// Check if Ref is live.
if (!LiveRegDefs[*AliasI]) continue;
@@ -1057,6 +1196,31 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
}
}
+/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
+/// by RegMask, and add them to LRegs.
+static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
+ std::vector<SUnit*> &LiveRegDefs,
+ SmallSet<unsigned, 4> &RegAdded,
+ SmallVector<unsigned, 4> &LRegs) {
+ // Look at all live registers. Skip Reg0 and the special CallResource.
+ for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
+ if (!LiveRegDefs[i]) continue;
+ if (LiveRegDefs[i] == SU) continue;
+ if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
+ if (RegAdded.insert(i))
+ LRegs.push_back(i);
+ }
+}
+
+/// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
+static const uint32_t *getNodeRegMask(const SDNode *N) {
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (const RegisterMaskSDNode *Op =
+ dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
+ return Op->getRegMask();
+ return NULL;
+}
+
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
@@ -1108,10 +1272,27 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
if (!Node->isMachineOpcode())
continue;
+ // If we're in the middle of scheduling a call, don't begin scheduling
+ // another call. Also, don't allow any physical registers to be live across
+ // the call.
+ if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
+ // Check the special calling-sequence resource.
+ unsigned CallResource = TRI->getNumRegs();
+ if (LiveRegDefs[CallResource]) {
+ SDNode *Gen = LiveRegGens[CallResource]->getNode();
+ while (SDNode *Glued = Gen->getGluedNode())
+ Gen = Glued;
+ if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
+ LRegs.push_back(CallResource);
+ }
+ }
+ if (const uint32_t *RegMask = getNodeRegMask(Node))
+ CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
+
const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
if (!MCID.ImplicitDefs)
continue;
- for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
+ for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
@@ -1300,99 +1481,10 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
std::reverse(Sequence.begin(), Sequence.end());
#ifndef NDEBUG
- VerifySchedule(isBottomUp);
-#endif
-}
-
-//===----------------------------------------------------------------------===//
-// Top-Down Scheduling
-//===----------------------------------------------------------------------===//
-
-/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
-/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
-void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
- SUnit *SuccSU = SuccEdge->getSUnit();
-
-#ifndef NDEBUG
- if (SuccSU->NumPredsLeft == 0) {
- dbgs() << "*** Scheduling failed! ***\n";
- SuccSU->dump(this);
- dbgs() << " has been released too many times!\n";
- llvm_unreachable(0);
- }
-#endif
- --SuccSU->NumPredsLeft;
-
- // If all the node's predecessors are scheduled, this node is ready
- // to be scheduled. Ignore the special ExitSU node.
- if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
- SuccSU->isAvailable = true;
- AvailableQueue->push(SuccSU);
- }
-}
-
-void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
- // Top down: release successors
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- assert(!I->isAssignedRegDep() &&
- "The list-tdrr scheduler doesn't yet support physreg dependencies!");
-
- ReleaseSucc(SU, &*I);
- }
-}
-
-/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
-/// count of its successors. If a successor pending count is zero, add it to
-/// the Available queue.
-void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
- DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
- DEBUG(SU->dump(this));
-
- assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
- SU->setDepthToAtLeast(CurCycle);
- Sequence.push_back(SU);
-
- ReleaseSuccessors(SU);
- SU->isScheduled = true;
- AvailableQueue->ScheduledNode(SU);
-}
-
-/// ListScheduleTopDown - The main loop of list scheduling for top-down
-/// schedulers.
-void ScheduleDAGRRList::ListScheduleTopDown() {
- AvailableQueue->setCurCycle(CurCycle);
-
- // Release any successors of the special Entry node.
- ReleaseSuccessors(&EntrySU);
-
- // All leaves to Available queue.
- for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- // It is available if it has no predecessors.
- if (SUnits[i].Preds.empty()) {
- AvailableQueue->push(&SUnits[i]);
- SUnits[i].isAvailable = true;
- }
- }
-
- // While Available queue is not empty, grab the node with the highest
- // priority. If it is not ready put it back. Schedule the node.
- Sequence.reserve(SUnits.size());
- while (!AvailableQueue->empty()) {
- SUnit *CurSU = AvailableQueue->pop();
-
- if (CurSU)
- ScheduleNodeTopDown(CurSU);
- ++CurCycle;
- AvailableQueue->setCurCycle(CurCycle);
- }
-
-#ifndef NDEBUG
- VerifySchedule(isBottomUp);
+ VerifyScheduledSequence(/*isBottomUp=*/true);
#endif
}
-
//===----------------------------------------------------------------------===//
// RegReductionPriorityQueue Definition
//===----------------------------------------------------------------------===//
@@ -1437,21 +1529,6 @@ struct bu_ls_rr_sort : public queue_sort {
bool operator()(SUnit* left, SUnit* right) const;
};
-// td_ls_rr_sort - Priority function for top down register pressure reduction
-// scheduler.
-struct td_ls_rr_sort : public queue_sort {
- enum {
- IsBottomUp = false,
- HasReadyFilter = false
- };
-
- RegReductionPQBase *SPQ;
- td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
- td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
-
- bool operator()(const SUnit* left, const SUnit* right) const;
-};
-
// src_ls_rr_sort - Priority function for source order scheduler.
struct src_ls_rr_sort : public queue_sort {
enum {
@@ -1510,6 +1587,7 @@ protected:
std::vector<SUnit*> Queue;
unsigned CurQueueId;
bool TracksRegPressure;
+ bool SrcOrder;
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
@@ -1535,11 +1613,12 @@ public:
RegReductionPQBase(MachineFunction &mf,
bool hasReadyFilter,
bool tracksrp,
+ bool srcorder,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
const TargetLowering *tli)
: SchedulingPriorityQueue(hasReadyFilter),
- CurQueueId(0), TracksRegPressure(tracksrp),
+ CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
if (TracksRegPressure) {
unsigned NumRC = TRI->getNumRegClasses();
@@ -1610,9 +1689,9 @@ public:
int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
- void ScheduledNode(SUnit *SU);
+ void scheduledNode(SUnit *SU);
- void UnscheduledNode(SUnit *SU);
+ void unscheduledNode(SUnit *SU);
protected:
bool canClobber(const SUnit *SU, const SUnit *Op);
@@ -1654,10 +1733,12 @@ class RegReductionPriorityQueue : public RegReductionPQBase {
public:
RegReductionPriorityQueue(MachineFunction &mf,
bool tracksrp,
+ bool srcorder,
const TargetInstrInfo *tii,
const TargetRegisterInfo *tri,
const TargetLowering *tli)
- : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
+ : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
+ tii, tri, tli),
Picker(this) {}
bool isBottomUp() const { return SF::IsBottomUp; }
@@ -1680,10 +1761,7 @@ public:
SF DumpPicker = Picker;
while (!DumpQueue.empty()) {
SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
- if (isBottomUp())
- dbgs() << "Height " << SU->getHeight() << ": ";
- else
- dbgs() << "Depth " << SU->getDepth() << ": ";
+ dbgs() << "Height " << SU->getHeight() << ": ";
SU->dump(DAG);
}
}
@@ -1692,9 +1770,6 @@ public:
typedef RegReductionPriorityQueue<bu_ls_rr_sort>
BURegReductionPriorityQueue;
-typedef RegReductionPriorityQueue<td_ls_rr_sort>
-TDRegReductionPriorityQueue;
-
typedef RegReductionPriorityQueue<src_ls_rr_sort>
SrcRegReductionPriorityQueue;
@@ -1919,7 +1994,7 @@ int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
return PDiff;
}
-void RegReductionPQBase::ScheduledNode(SUnit *SU) {
+void RegReductionPQBase::scheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;
@@ -1988,7 +2063,7 @@ void RegReductionPQBase::ScheduledNode(SUnit *SU) {
dumpRegPressure();
}
-void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
+void RegReductionPQBase::unscheduledNode(SUnit *SU) {
if (!TracksRegPressure)
return;
@@ -2235,37 +2310,29 @@ static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
int LHeight = (int)left->getHeight() + LPenalty;
int RHeight = (int)right->getHeight() + RPenalty;
- bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
+ bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
BUHasStall(left, LHeight, SPQ);
- bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
+ bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
BUHasStall(right, RHeight, SPQ);
// If scheduling one of the node will cause a pipeline stall, delay it.
// If scheduling either one of the node will cause a pipeline stall, sort
// them according to their height.
if (LStall) {
- if (!RStall) {
- DEBUG(++FactorCount[FactStall]);
+ if (!RStall)
return 1;
- }
- if (LHeight != RHeight) {
- DEBUG(++FactorCount[FactStall]);
+ if (LHeight != RHeight)
return LHeight > RHeight ? 1 : -1;
- }
- } else if (RStall) {
- DEBUG(++FactorCount[FactStall]);
+ } else if (RStall)
return -1;
- }
// If either node is scheduling for latency, sort them by height/depth
// and latency.
- if (!checkPref || (left->SchedulingPref == Sched::Latency ||
- right->SchedulingPref == Sched::Latency)) {
+ if (!checkPref || (left->SchedulingPref == Sched::ILP ||
+ right->SchedulingPref == Sched::ILP)) {
if (DisableSchedCycles) {
- if (LHeight != RHeight) {
- DEBUG(++FactorCount[FactHeight]);
+ if (LHeight != RHeight)
return LHeight > RHeight ? 1 : -1;
- }
}
else {
// If neither instruction stalls (!LStall && !RStall) then
@@ -2274,17 +2341,14 @@ static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
int LDepth = left->getDepth() - LPenalty;
int RDepth = right->getDepth() - RPenalty;
if (LDepth != RDepth) {
- DEBUG(++FactorCount[FactDepth]);
DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
<< ") depth " << LDepth << " vs SU (" << right->NodeNum
<< ") depth " << RDepth << "\n");
return LDepth < RDepth ? 1 : -1;
}
}
- if (left->Latency != right->Latency) {
- DEBUG(++FactorCount[FactOther]);
+ if (left->Latency != right->Latency)
return left->Latency > right->Latency ? 1 : -1;
- }
}
return 0;
}
@@ -2298,7 +2362,6 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
bool LHasPhysReg = left->hasPhysRegDefs;
bool RHasPhysReg = right->hasPhysRegDefs;
if (LHasPhysReg != RHasPhysReg) {
- DEBUG(++FactorCount[FactRegUses]);
#ifndef NDEBUG
const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
#endif
@@ -2324,10 +2387,8 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
}
- if (LPriority != RPriority) {
- DEBUG(++FactorCount[FactStatic]);
+ if (LPriority != RPriority)
return LPriority > RPriority;
- }
// One or both of the nodes are calls and their sethi-ullman numbers are the
// same, then keep source order.
@@ -2360,18 +2421,14 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
// This creates more short live intervals.
unsigned LDist = closestSucc(left);
unsigned RDist = closestSucc(right);
- if (LDist != RDist) {
- DEBUG(++FactorCount[FactOther]);
+ if (LDist != RDist)
return LDist < RDist;
- }
// How many registers becomes live when the node is scheduled.
unsigned LScratch = calcMaxScratches(left);
unsigned RScratch = calcMaxScratches(right);
- if (LScratch != RScratch) {
- DEBUG(++FactorCount[FactOther]);
+ if (LScratch != RScratch)
return LScratch > RScratch;
- }
// Comparing latency against a call makes little sense unless the node
// is register pressure-neutral.
@@ -2386,20 +2443,15 @@ static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
return result > 0;
}
else {
- if (left->getHeight() != right->getHeight()) {
- DEBUG(++FactorCount[FactHeight]);
+ if (left->getHeight() != right->getHeight())
return left->getHeight() > right->getHeight();
- }
- if (left->getDepth() != right->getDepth()) {
- DEBUG(++FactorCount[FactDepth]);
+ if (left->getDepth() != right->getDepth())
return left->getDepth() < right->getDepth();
- }
}
assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
- DEBUG(++FactorCount[FactOther]);
return (left->NodeQueueId > right->NodeQueueId);
}
@@ -2459,13 +2511,11 @@ bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
// Avoid causing spills. If register pressure is high, schedule for
// register pressure reduction.
if (LHigh && !RHigh) {
- DEBUG(++FactorCount[FactPressureDiff]);
DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
<< right->NodeNum << ")\n");
return true;
}
else if (!LHigh && RHigh) {
- DEBUG(++FactorCount[FactPressureDiff]);
DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
<< left->NodeNum << ")\n");
return false;
@@ -2529,7 +2579,6 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
}
if (!DisableSchedRegPressure && LPDiff != RPDiff) {
- DEBUG(++FactorCount[FactPressureDiff]);
DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
<< " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
return LPDiff > RPDiff;
@@ -2538,7 +2587,6 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
bool LReduce = canEnableCoalescing(left);
bool RReduce = canEnableCoalescing(right);
- DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
if (LReduce && !RReduce) return false;
if (RReduce && !LReduce) return true;
}
@@ -2546,17 +2594,14 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
<< " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
- DEBUG(++FactorCount[FactRegUses]);
return LLiveUses < RLiveUses;
}
if (!DisableSchedStalls) {
bool LStall = BUHasStall(left, left->getHeight(), SPQ);
bool RStall = BUHasStall(right, right->getHeight(), SPQ);
- if (LStall != RStall) {
- DEBUG(++FactorCount[FactHeight]);
+ if (LStall != RStall)
return left->getHeight() > right->getHeight();
- }
}
if (!DisableSchedCriticalPath) {
@@ -2565,17 +2610,14 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
<< left->getDepth() << " != SU(" << right->NodeNum << "): "
<< right->getDepth() << "\n");
- DEBUG(++FactorCount[FactDepth]);
return left->getDepth() < right->getDepth();
}
}
if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
int spread = (int)left->getHeight() - (int)right->getHeight();
- if (std::abs(spread) > MaxReorderWindow) {
- DEBUG(++FactorCount[FactHeight]);
+ if (std::abs(spread) > MaxReorderWindow)
return left->getHeight() > right->getHeight();
- }
}
return BURRSort(left, right, SPQ);
@@ -2584,9 +2626,10 @@ bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
// Add pseudo dependency edges for two-address nodes.
- AddPseudoTwoAddrDeps();
+ if (!Disable2AddrHack)
+ AddPseudoTwoAddrDeps();
// Reroute edges to nodes with multiple uses.
- if (!TracksRegPressure)
+ if (!TracksRegPressure && !SrcOrder)
PrescheduleNodesWithMultipleUses();
// Calculate node priorities.
CalculateSethiUllmanNumbers();
@@ -2628,9 +2671,10 @@ static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
ScheduleDAGRRList *scheduleDAG,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) {
- const unsigned *ImpDefs
+ const uint16_t *ImpDefs
= TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
- if(!ImpDefs)
+ const uint32_t *RegMask = getNodeRegMask(SU->getNode());
+ if(!ImpDefs && !RegMask)
return false;
for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
@@ -2641,14 +2685,18 @@ static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
if (!PI->isAssignedRegDep())
continue;
- for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) {
- // Return true if SU clobbers this physical register use and the
- // definition of the register reaches from DepSU. IsReachable queries a
- // topological forward sort of the DAG (following the successors).
- if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
- scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
- return true;
- }
+ if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
+ scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
+ return true;
+
+ if (ImpDefs)
+ for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
+ // Return true if SU clobbers this physical register use and the
+ // definition of the register reaches from DepSU. IsReachable queries
+ // a topological forward sort of the DAG (following the successors).
+ if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
+ scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
+ return true;
}
}
return false;
@@ -2661,16 +2709,17 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
const TargetRegisterInfo *TRI) {
SDNode *N = SuccSU->getNode();
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
- const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
+ const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
assert(ImpDefs && "Caller should check hasPhysRegDefs");
for (const SDNode *SUNode = SU->getNode(); SUNode;
SUNode = SUNode->getGluedNode()) {
if (!SUNode->isMachineOpcode())
continue;
- const unsigned *SUImpDefs =
+ const uint16_t *SUImpDefs =
TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
- if (!SUImpDefs)
- return false;
+ const uint32_t *SURegMask = getNodeRegMask(SUNode);
+ if (!SUImpDefs && !SURegMask)
+ continue;
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == MVT::Glue || VT == MVT::Other)
@@ -2678,6 +2727,10 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
if (!N->hasAnyUseOfValue(i))
continue;
unsigned Reg = ImpDefs[i - NumDefs];
+ if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
+ return true;
+ if (!SUImpDefs)
+ continue;
for (;*SUImpDefs; ++SUImpDefs) {
unsigned SUReg = *SUImpDefs;
if (TRI->regsOverlap(Reg, SUReg))
@@ -2887,69 +2940,6 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() {
}
}
-/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
-/// predecessors of the successors of the SUnit SU. Stop when the provided
-/// limit is exceeded.
-static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
- unsigned Limit) {
- unsigned Sum = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- const SUnit *SuccSU = I->getSUnit();
- for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
- EE = SuccSU->Preds.end(); II != EE; ++II) {
- SUnit *PredSU = II->getSUnit();
- if (!PredSU->isScheduled)
- if (++Sum > Limit)
- return Sum;
- }
- }
- return Sum;
-}
-
-
-// Top down
-bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
- if (int res = checkSpecialNodes(left, right))
- return res < 0;
-
- unsigned LPriority = SPQ->getNodePriority(left);
- unsigned RPriority = SPQ->getNodePriority(right);
- bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
- bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
- bool LIsFloater = LIsTarget && left->NumPreds == 0;
- bool RIsFloater = RIsTarget && right->NumPreds == 0;
- unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
- unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
-
- if (left->NumSuccs == 0 && right->NumSuccs != 0)
- return false;
- else if (left->NumSuccs != 0 && right->NumSuccs == 0)
- return true;
-
- if (LIsFloater)
- LBonus -= 2;
- if (RIsFloater)
- RBonus -= 2;
- if (left->NumSuccs == 1)
- LBonus += 2;
- if (right->NumSuccs == 1)
- RBonus += 2;
-
- if (LPriority+LBonus != RPriority+RBonus)
- return LPriority+LBonus < RPriority+RBonus;
-
- if (left->getDepth() != right->getDepth())
- return left->getDepth() < right->getDepth();
-
- if (left->NumSuccsLeft != right->NumSuccsLeft)
- return left->NumSuccsLeft > right->NumSuccsLeft;
-
- assert(left->NodeQueueId && right->NodeQueueId &&
- "NodeQueueId cannot be zero");
- return (left->NodeQueueId > right->NodeQueueId);
-}
-
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
@@ -2962,21 +2952,7 @@ llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
BURegReductionPriorityQueue *PQ =
- new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
- ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
- PQ->setScheduleDAG(SD);
- return SD;
-}
-
-llvm::ScheduleDAGSDNodes *
-llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
- CodeGenOpt::Level OptLevel) {
- const TargetMachine &TM = IS->TM;
- const TargetInstrInfo *TII = TM.getInstrInfo();
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-
- TDRegReductionPriorityQueue *PQ =
- new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
@@ -2990,7 +2966,7 @@ llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
SrcRegReductionPriorityQueue *PQ =
- new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
@@ -3005,7 +2981,7 @@ llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
const TargetLowering *TLI = &IS->getTargetLowering();
HybridBURRPriorityQueue *PQ =
- new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
@@ -3021,7 +2997,7 @@ llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
const TargetLowering *TLI = &IS->getTargetLowering();
ILPBURRPriorityQueue *PQ =
- new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
PQ->setScheduleDAG(SD);
return SD;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 71f07d6..69dd813 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -17,6 +17,8 @@
#include "ScheduleDAGSDNodes.h"
#include "InstrEmitter.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -44,20 +46,26 @@ static cl::opt<int> HighLatencyCycles(
"instructions take for targets with no itinerary"));
ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
- : ScheduleDAG(mf),
+ : ScheduleDAG(mf), BB(0), DAG(0),
InstrItins(mf.getTarget().getInstrItineraryData()) {}
/// Run - perform scheduling.
///
-void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
- MachineBasicBlock::iterator insertPos) {
+void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
+ BB = bb;
DAG = dag;
- ScheduleDAG::Run(bb, insertPos);
+
+ // Clear the scheduler's SUnit DAG.
+ ScheduleDAG::clearDAG();
+ Sequence.clear();
+
+ // Invoke the target's selection of scheduler.
+ Schedule();
}
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
-SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
+SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
#ifndef NDEBUG
const SUnit *Addr = 0;
if (!SUnits.empty())
@@ -79,7 +87,7 @@ SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
}
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
- SUnit *SU = NewSUnit(Old->getNode());
+ SUnit *SU = newSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
SU->isVRegCycle = Old->isVRegCycle;
@@ -302,7 +310,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// If this node has already been processed, stop now.
if (NI->getNodeId() != -1) continue;
- SUnit *NodeSUnit = NewSUnit(NI);
+ SUnit *NodeSUnit = newSUnit(NI);
// See if anything is glued to this node, if so, add them to glued
// nodes. Nodes can have at most one glue input and one glue output. Glue
@@ -360,7 +368,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
InitNumRegDefsLeft(NodeSUnit);
// Assign the Latency field of NodeSUnit using target-provided information.
- ComputeLatency(NodeSUnit);
+ computeLatency(NodeSUnit);
}
// Find all call operands.
@@ -382,7 +390,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
// Check to see if the scheduler cares about latencies.
- bool UnitLatencies = ForceUnitLatencies();
+ bool UnitLatencies = forceUnitLatencies();
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
@@ -448,7 +456,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
OpLatency, PhysReg);
if (!isChain && !UnitLatencies) {
- ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
+ computeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
}
@@ -541,7 +549,7 @@ void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
}
}
-void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
+void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
SDNode *N = SU->getNode();
// TokenFactor operands are considered zero latency, and some schedulers
@@ -553,7 +561,7 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
}
// Check to see if the scheduler cares about latencies.
- if (ForceUnitLatencies()) {
+ if (forceUnitLatencies()) {
SU->Latency = 1;
return;
}
@@ -575,10 +583,10 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
SU->Latency += TII->getInstrLatency(InstrItins, N);
}
-void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
+void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const{
// Check to see if the scheduler cares about latencies.
- if (ForceUnitLatencies())
+ if (forceUnitLatencies())
return;
if (dep.getKind() != SDep::Data)
@@ -621,6 +629,30 @@ void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
}
}
+void ScheduleDAGSDNodes::dumpSchedule() const {
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ if (SUnit *SU = Sequence[i])
+ SU->dump(this);
+ else
+ dbgs() << "**** NOOP ****\n";
+ }
+}
+
+#ifndef NDEBUG
+/// VerifyScheduledSequence - Verify that all SUnits were scheduled and that
+/// their state is consistent with the nodes listed in Sequence.
+///
+void ScheduleDAGSDNodes::VerifyScheduledSequence(bool isBottomUp) {
+ unsigned ScheduledNodes = ScheduleDAG::VerifyScheduledDAG(isBottomUp);
+ unsigned Noops = 0;
+ for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
+ if (!Sequence[i])
+ ++Noops;
+ assert(Sequence.size() - Noops == ScheduledNodes &&
+ "The number of nodes scheduled doesn't match the expected number!");
+}
+#endif // NDEBUG
+
namespace {
struct OrderSorter {
bool operator()(const std::pair<unsigned, MachineInstr*> &A,
@@ -686,9 +718,48 @@ static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
}
+void ScheduleDAGSDNodes::
+EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
+ MachineBasicBlock::iterator InsertPos) {
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl()) continue; // ignore chain preds
+ if (I->getSUnit()->CopyDstRC) {
+ // Copy to physical register.
+ DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
+ assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
+ // Find the destination physical register.
+ unsigned Reg = 0;
+ for (SUnit::const_succ_iterator II = SU->Succs.begin(),
+ EE = SU->Succs.end(); II != EE; ++II) {
+ if (II->isCtrl()) continue; // ignore chain preds
+ if (II->getReg()) {
+ Reg = II->getReg();
+ break;
+ }
+ }
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
+ .addReg(VRI->second);
+ } else {
+ // Copy from physical register.
+ assert(I->getReg() && "Unknown physical register!");
+ unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
+ bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
+ (void)isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
+ .addReg(I->getReg());
+ }
+ break;
+ }
+}
-/// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
+/// EmitSchedule - Emit the machine code in scheduled order. Return the new
+/// InsertPos and MachineBasicBlock that contains this insertion
+/// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does
+/// not necessarily refer to returned BB. The emitter may split blocks.
+MachineBasicBlock *ScheduleDAGSDNodes::
+EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
@@ -711,7 +782,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
SUnit *SU = Sequence[i];
if (!SU) {
// Null SUnit* is a noop.
- EmitNoop();
+ TII->insertNoop(*Emitter.getBlock(), InsertPos);
continue;
}
@@ -719,7 +790,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// SDNode and any glued SDNodes and append them to the block.
if (!SU->getNode()) {
// Emit a copy.
- EmitPhysRegCopy(SU, CopyVRBaseMap);
+ EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos);
continue;
}
@@ -784,19 +855,24 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
}
// Add trailing DbgValue's before the terminator. FIXME: May want to add
// some of them before one or more conditional branches?
+ SmallVector<MachineInstr*, 8> DbgMIs;
while (DI != DE) {
- MachineBasicBlock *InsertBB = Emitter.getBlock();
- MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
- if (!(*DI)->isInvalidated()) {
- MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
- if (DbgMI)
- InsertBB->insert(Pos, DbgMI);
- }
+ if (!(*DI)->isInvalidated())
+ if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap))
+ DbgMIs.push_back(DbgMI);
++DI;
}
+
+ MachineBasicBlock *InsertBB = Emitter.getBlock();
+ MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator();
+ InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end());
}
- BB = Emitter.getBlock();
InsertPos = Emitter.getInsertPos();
- return BB;
+ return Emitter.getBlock();
+}
+
+/// Return the basic block label.
+std::string ScheduleDAGSDNodes::getDAGName() const {
+ return "sunit-dag." + BB->getFullName();
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 9c27b2e..75940ec 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -35,17 +35,20 @@ namespace llvm {
///
class ScheduleDAGSDNodes : public ScheduleDAG {
public:
+ MachineBasicBlock *BB;
SelectionDAG *DAG; // DAG of the current basic block
const InstrItineraryData *InstrItins;
+ /// The schedule. Null SUnit*'s represent noop instructions.
+ std::vector<SUnit*> Sequence;
+
explicit ScheduleDAGSDNodes(MachineFunction &mf);
virtual ~ScheduleDAGSDNodes() {}
/// Run - perform scheduling.
///
- void Run(SelectionDAG *dag, MachineBasicBlock *bb,
- MachineBasicBlock::iterator insertPos);
+ void Run(SelectionDAG *dag, MachineBasicBlock *bb);
/// isPassiveNode - Return true if the node is a non-scheduled leaf.
///
@@ -53,6 +56,7 @@ namespace llvm {
if (isa<ConstantSDNode>(Node)) return true;
if (isa<ConstantFPSDNode>(Node)) return true;
if (isa<RegisterSDNode>(Node)) return true;
+ if (isa<RegisterMaskSDNode>(Node)) return true;
if (isa<GlobalAddressSDNode>(Node)) return true;
if (isa<BasicBlockSDNode>(Node)) return true;
if (isa<FrameIndexSDNode>(Node)) return true;
@@ -67,7 +71,7 @@ namespace llvm {
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
- SUnit *NewSUnit(SDNode *N);
+ SUnit *newSUnit(SDNode *N);
/// Clone - Creates a clone of the specified SUnit. It does not copy the
/// predecessors / successors info nor the temporary scheduling states.
@@ -78,7 +82,7 @@ namespace llvm {
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
- virtual void BuildSchedGraph(AliasAnalysis *AA);
+ void BuildSchedGraph(AliasAnalysis *AA);
/// InitVRegCycleFlag - Set isVRegCycle if this node's single use is
/// CopyToReg and its only active data operands are CopyFromReg within a
@@ -90,30 +94,41 @@ namespace llvm {
///
void InitNumRegDefsLeft(SUnit *SU);
- /// ComputeLatency - Compute node latency.
+ /// computeLatency - Compute node latency.
///
- virtual void ComputeLatency(SUnit *SU);
+ virtual void computeLatency(SUnit *SU);
- /// ComputeOperandLatency - Override dependence edge latency using
+ /// computeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
- virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
+ virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const { }
- virtual void ComputeOperandLatency(SDNode *Def, SDNode *Use,
+ virtual void computeOperandLatency(SDNode *Def, SDNode *Use,
unsigned OpIdx, SDep& dep) const;
- virtual MachineBasicBlock *EmitSchedule();
-
/// Schedule - Order nodes according to selected style, filling
/// in the Sequence member.
///
virtual void Schedule() = 0;
+ /// VerifyScheduledSequence - Verify that all SUnits are scheduled and
+ /// consistent with the Sequence of scheduled instructions.
+ void VerifyScheduledSequence(bool isBottomUp);
+
+ /// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
+ /// according to the order specified in Sequence.
+ ///
+ MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos);
+
virtual void dumpNode(const SUnit *SU) const;
+ void dumpSchedule() const;
+
virtual std::string getGraphNodeLabel(const SUnit *SU) const;
+ virtual std::string getDAGName() const;
+
virtual void getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const;
/// RegDefIter - In place iteration over the values defined by an
@@ -159,6 +174,9 @@ namespace llvm {
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
void AddSchedEdges();
+
+ void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
+ MachineBasicBlock::iterator InsertPos);
};
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
index 430283d..c851291 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
@@ -1,4 +1,4 @@
-//===---- ScheduleDAGList.cpp - Implement a list scheduler for isel DAG ---===//
+//===- ScheduleDAGVLIW.cpp - SelectionDAG list scheduler for VLIW -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -31,6 +31,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/ResourcePriorityQueue.h"
#include <climits>
using namespace llvm;
@@ -38,15 +39,15 @@ STATISTIC(NumNoops , "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
static RegisterScheduler
- tdListDAGScheduler("list-td", "Top-down list scheduler",
- createTDListDAGScheduler);
+ VLIWScheduler("vliw-td", "VLIW scheduler",
+ createVLIWDAGScheduler);
namespace {
//===----------------------------------------------------------------------===//
-/// ScheduleDAGList - The actual list scheduler implementation. This supports
-/// top-down scheduling.
+/// ScheduleDAGVLIW - The actual DFA list scheduler implementation. This
+/// supports / top-down scheduling.
///
-class ScheduleDAGList : public ScheduleDAGSDNodes {
+class ScheduleDAGVLIW : public ScheduleDAGSDNodes {
private:
/// AvailableQueue - The priority queue to use for the available SUnits.
///
@@ -61,16 +62,20 @@ private:
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
+ /// AA - AliasAnalysis for making memory reference queries.
+ AliasAnalysis *AA;
+
public:
- ScheduleDAGList(MachineFunction &mf,
+ ScheduleDAGVLIW(MachineFunction &mf,
+ AliasAnalysis *aa,
SchedulingPriorityQueue *availqueue)
- : ScheduleDAGSDNodes(mf), AvailableQueue(availqueue) {
+ : ScheduleDAGSDNodes(mf), AvailableQueue(availqueue), AA(aa) {
const TargetMachine &tm = mf.getTarget();
HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
}
- ~ScheduleDAGList() {
+ ~ScheduleDAGVLIW() {
delete HazardRec;
delete AvailableQueue;
}
@@ -78,23 +83,25 @@ public:
void Schedule();
private:
- void ReleaseSucc(SUnit *SU, const SDep &D);
- void ReleaseSuccessors(SUnit *SU);
- void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
- void ListScheduleTopDown();
+ void releaseSucc(SUnit *SU, const SDep &D);
+ void releaseSuccessors(SUnit *SU);
+ void scheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
+ void listScheduleTopDown();
};
} // end anonymous namespace
/// Schedule - Schedule the DAG using list scheduling.
-void ScheduleDAGList::Schedule() {
- DEBUG(dbgs() << "********** List Scheduling **********\n");
+void ScheduleDAGVLIW::Schedule() {
+ DEBUG(dbgs()
+ << "********** List Scheduling BB#" << BB->getNumber()
+ << " '" << BB->getName() << "' **********\n");
// Build the scheduling graph.
- BuildSchedGraph(NULL);
+ BuildSchedGraph(AA);
AvailableQueue->initNodes(SUnits);
- ListScheduleTopDown();
+ listScheduleTopDown();
AvailableQueue->releaseState();
}
@@ -103,9 +110,9 @@ void ScheduleDAGList::Schedule() {
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
-/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
+/// releaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
-void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
+void ScheduleDAGVLIW::releaseSucc(SUnit *SU, const SDep &D) {
SUnit *SuccSU = D.getSUnit();
#ifndef NDEBUG
@@ -122,25 +129,26 @@ void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
- if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
PendingQueue.push_back(SuccSU);
+ }
}
-void ScheduleDAGList::ReleaseSuccessors(SUnit *SU) {
+void ScheduleDAGVLIW::releaseSuccessors(SUnit *SU) {
// Top down: release successors.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
assert(!I->isAssignedRegDep() &&
"The list-td scheduler doesn't yet support physreg dependencies!");
- ReleaseSucc(SU, *I);
+ releaseSucc(SU, *I);
}
}
-/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
+/// scheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
-void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
+void ScheduleDAGVLIW::scheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
@@ -148,20 +156,20 @@ void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
- ReleaseSuccessors(SU);
+ releaseSuccessors(SU);
SU->isScheduled = true;
- AvailableQueue->ScheduledNode(SU);
+ AvailableQueue->scheduledNode(SU);
}
-/// ListScheduleTopDown - The main loop of list scheduling for top-down
+/// listScheduleTopDown - The main loop of list scheduling for top-down
/// schedulers.
-void ScheduleDAGList::ListScheduleTopDown() {
+void ScheduleDAGVLIW::listScheduleTopDown() {
unsigned CurCycle = 0;
// Release any successors of the special Entry node.
- ReleaseSuccessors(&EntrySU);
+ releaseSuccessors(&EntrySU);
- // All leaves to Available queue.
+ // All leaves to AvailableQueue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
if (SUnits[i].Preds.empty()) {
@@ -170,7 +178,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
}
}
- // While Available queue is not empty, grab the node with the highest
+ // While AvailableQueue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
@@ -184,7 +192,8 @@ void ScheduleDAGList::ListScheduleTopDown() {
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
- } else {
+ }
+ else {
assert(PendingQueue[i]->getDepth() > CurCycle && "Negative latency?");
}
}
@@ -192,6 +201,8 @@ void ScheduleDAGList::ListScheduleTopDown() {
// If there are no instructions available, don't try to issue anything, and
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
+ // Reset DFA state.
+ AvailableQueue->scheduledNode(0);
++CurCycle;
continue;
}
@@ -223,7 +234,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
// If we found a node to schedule, do it now.
if (FoundSUnit) {
- ScheduleNodeTopDown(FoundSUnit, CurCycle);
+ scheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
// If this is a pseudo-op node, we don't want to increment the current
@@ -250,7 +261,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
}
#ifndef NDEBUG
- VerifySchedule(/*isBottomUp=*/false);
+ VerifyScheduledSequence(/*isBottomUp=*/false);
#endif
}
@@ -258,8 +269,8 @@ void ScheduleDAGList::ListScheduleTopDown() {
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-/// createTDListDAGScheduler - This creates a top-down list scheduler.
+/// createVLIWDAGScheduler - This creates a top-down list scheduler.
ScheduleDAGSDNodes *
-llvm::createTDListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
- return new ScheduleDAGList(*IS->MF, new LatencyPriorityQueue());
+llvm::createVLIWDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ return new ScheduleDAGVLIW(*IS->MF, IS->AA, new ResourcePriorityQueue(IS));
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 20bea8e..92671d1 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
@@ -63,6 +62,7 @@ static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP format");
+ case MVT::f16: return &APFloat::IEEEhalf;
case MVT::f32: return &APFloat::IEEEsingle;
case MVT::f64: return &APFloat::IEEEdouble;
case MVT::f80: return &APFloat::x87DoubleExtended;
@@ -125,20 +125,29 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) {
if (i == e) return false;
// Do not accept build_vectors that aren't all constants or which have non-~0
- // elements.
+ // elements. We have to be a bit careful here, as the type of the constant
+ // may not be the same as the type of the vector elements due to type
+ // legalization (the elements are promoted to a legal type for the target and
+ // a vector of a type may be legal when the base element type is not).
+ // We only want to check enough bits to cover the vector elements, because
+ // we care if the resultant vector is all ones, not whether the individual
+ // constants are.
SDValue NotZero = N->getOperand(i);
+ unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
if (isa<ConstantSDNode>(NotZero)) {
- if (!cast<ConstantSDNode>(NotZero)->isAllOnesValue())
+ if (cast<ConstantSDNode>(NotZero)->getAPIntValue().countTrailingOnes() <
+ EltSize)
return false;
} else if (isa<ConstantFPSDNode>(NotZero)) {
- if (!cast<ConstantFPSDNode>(NotZero)->getValueAPF().
- bitcastToAPInt().isAllOnesValue())
+ if (cast<ConstantFPSDNode>(NotZero)->getValueAPF()
+ .bitcastToAPInt().countTrailingOnes() < EltSize)
return false;
} else
return false;
// Okay, we have at least one ~0 value, check to see if the rest match or are
- // undefs.
+ // undefs. Even with the above element type twiddling, this should be OK, as
+ // the same type legalization should have applied to all the elements.
for (++i; i != e; ++i)
if (N->getOperand(i) != NotZero &&
N->getOperand(i).getOpcode() != ISD::UNDEF)
@@ -384,7 +393,9 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
case ISD::Register:
ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
break;
-
+ case ISD::RegisterMask:
+ ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
+ break;
case ISD::SRCVALUE:
ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
break;
@@ -475,7 +486,7 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
///
static inline unsigned
encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
- bool isNonTemporal) {
+ bool isNonTemporal, bool isInvariant) {
assert((ConvType & 3) == ConvType &&
"ConvType may not require more than 2 bits!");
assert((AM & 7) == AM &&
@@ -483,7 +494,8 @@ encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
return ConvType |
(AM << 2) |
(isVolatile << 5) |
- (isNonTemporal << 6);
+ (isNonTemporal << 6) |
+ (isInvariant << 7);
}
//===----------------------------------------------------------------------===//
@@ -564,6 +576,12 @@ void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
void SelectionDAG::RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener){
SmallVector<SDNode*, 16> DeadNodes(1, N);
+
+ // Create a dummy node that adds a reference to the root node, preventing
+ // it from being deleted. (This matters if the root is an operand of the
+ // dead node.)
+ HandleSDNode Dummy(getRoot());
+
RemoveDeadNodes(DeadNodes, UpdateListener);
}
@@ -834,9 +852,9 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
}
// EntryNode could meaningfully have debug info if we can find it...
-SelectionDAG::SelectionDAG(const TargetMachine &tm)
+SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
: TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
- EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)),
+ OptLevel(OL), EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), Ordering(0) {
AllNodes.push_back(&EntryNode);
Ordering = new SDNodeOrdering();
@@ -1025,16 +1043,14 @@ SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
return getConstantFP(APFloat((float)Val), VT, isTarget);
else if (EltVT==MVT::f64)
return getConstantFP(APFloat(Val), VT, isTarget);
- else if (EltVT==MVT::f80 || EltVT==MVT::f128) {
+ else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::f16) {
bool ignored;
APFloat apf = APFloat(Val);
apf.convert(*EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
&ignored);
return getConstantFP(apf, VT, isTarget);
- } else {
- assert(0 && "Unsupported type in getConstantFP");
- return SDValue();
- }
+ } else
+ llvm_unreachable("Unsupported type in getConstantFP");
}
SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
@@ -1369,6 +1385,20 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
return SDValue(N, 0);
}
+SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
+ ID.AddPointer(RegMask);
+ void *IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return SDValue(E, 0);
+
+ SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
+ CSEMap.InsertNode(N, IP);
+ AllNodes.push_back(N);
+ return SDValue(N, 0);
+}
+
SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
FoldingSetNodeID ID;
SDValue Ops[] = { Root };
@@ -1598,7 +1628,7 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
unsigned Depth) const {
APInt KnownZero, KnownOne;
- ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
return (KnownZero & Mask) == Mask;
}
@@ -1607,15 +1637,12 @@ bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
/// known to be either zero or one and return them in the KnownZero/KnownOne
/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
/// processing.
-void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
- APInt &KnownZero, APInt &KnownOne,
- unsigned Depth) const {
- unsigned BitWidth = Mask.getBitWidth();
- assert(BitWidth == Op.getValueType().getScalarType().getSizeInBits() &&
- "Mask size mismatches value type size!");
+void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
+ APInt &KnownOne, unsigned Depth) const {
+ unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
- if (Depth == 6 || Mask == 0)
+ if (Depth == 6)
return; // Limit search depth.
APInt KnownZero2, KnownOne2;
@@ -1623,14 +1650,13 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
- KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & Mask;
- KnownZero = ~KnownOne & Mask;
+ KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
+ KnownZero = ~KnownOne;
return;
case ISD::AND:
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownZero,
- KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1640,9 +1666,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
KnownZero |= KnownZero2;
return;
case ISD::OR:
- ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), Mask & ~KnownOne,
- KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1652,8 +1677,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
KnownOne |= KnownOne2;
return;
case ISD::XOR: {
- ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1665,9 +1690,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::MUL: {
- APInt Mask2 = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1686,33 +1710,29 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
LeadZ = std::min(LeadZ, BitWidth);
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
APInt::getHighBitsSet(BitWidth, LeadZ);
- KnownZero &= Mask;
return;
}
case ISD::UDIV: {
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(Op.getOperand(0),
- AllOnes, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
KnownOne2.clearAllBits();
KnownZero2.clearAllBits();
- ComputeMaskedBits(Op.getOperand(1),
- AllOnes, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,
LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
- KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
+ KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
return;
}
case ISD::SELECT:
- ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1721,8 +1741,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
KnownZero &= KnownZero2;
return;
case ISD::SELECT_CC:
- ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1);
- ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -1754,8 +1774,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (ShAmt >= BitWidth)
return;
- ComputeMaskedBits(Op.getOperand(0), Mask.lshr(ShAmt),
- KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero <<= ShAmt;
KnownOne <<= ShAmt;
@@ -1772,13 +1791,12 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (ShAmt >= BitWidth)
return;
- ComputeMaskedBits(Op.getOperand(0), (Mask << ShAmt),
- KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
- APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
KnownZero |= HighBits; // High bits known zero.
}
return;
@@ -1790,15 +1808,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (ShAmt >= BitWidth)
return;
- APInt InDemandedMask = (Mask << ShAmt);
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
- APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt) & Mask;
- if (HighBits.getBoolValue())
- InDemandedMask |= APInt::getSignBit(BitWidth);
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
- ComputeMaskedBits(Op.getOperand(0), InDemandedMask, KnownZero, KnownOne,
- Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.lshr(ShAmt);
KnownOne = KnownOne.lshr(ShAmt);
@@ -1820,10 +1834,10 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
- APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits) & Mask;
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
APInt InSignBit = APInt::getSignBit(EBits);
- APInt InputDemandedBits = Mask & APInt::getLowBitsSet(BitWidth, EBits);
+ APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
// If the sign extended bits are demanded, we know that the sign
// bit is demanded.
@@ -1831,8 +1845,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (NewBits.getBoolValue())
InputDemandedBits |= InSignBit;
- ComputeMaskedBits(Op.getOperand(0), InputDemandedBits,
- KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ KnownOne &= InputDemandedBits;
+ KnownZero &= InputDemandedBits;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
@@ -1850,7 +1865,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTPOP: {
unsigned LowBits = Log2_32(BitWidth)+1;
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
@@ -1858,22 +1875,23 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::LOAD: {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
if (ISD::isZEXTLoad(Op.getNode())) {
- LoadSDNode *LD = cast<LoadSDNode>(Op);
EVT VT = LD->getMemoryVT();
unsigned MemBits = VT.getScalarType().getSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ } else if (const MDNode *Ranges = LD->getRanges()) {
+ computeMaskedBitsLoad(*Ranges, KnownZero);
}
return;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
- APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
- APInt InMask = Mask.trunc(InBits);
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
KnownZero |= NewBits;
@@ -1883,17 +1901,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
APInt InSignBit = APInt::getSignBit(InBits);
- APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
- APInt InMask = Mask.trunc(InBits);
-
- // If any of the sign extended bits are demanded, we know that the sign
- // bit is demanded. Temporarily set this bit in the mask for our callee.
- if (NewBits.getBoolValue())
- InMask |= InSignBit;
+ APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// Note if the sign bit is known to be zero or one.
bool SignBitKnownZero = KnownZero.isNegative();
@@ -1901,13 +1913,6 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
assert(!(SignBitKnownZero && SignBitKnownOne) &&
"Sign bit can't be known to be both zero and one!");
- // If the sign bit wasn't actually demanded by our caller, we don't
- // want it set in the KnownZero and KnownOne result values. Reset the
- // mask and reapply it to the result values.
- InMask = Mask.trunc(InBits);
- KnownZero &= InMask;
- KnownOne &= InMask;
-
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
@@ -1921,10 +1926,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
- APInt InMask = Mask.trunc(InBits);
KnownZero = KnownZero.trunc(InBits);
KnownOne = KnownOne.trunc(InBits);
- ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
return;
@@ -1932,10 +1936,9 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarType().getSizeInBits();
- APInt InMask = Mask.zext(InBits);
KnownZero = KnownZero.zext(InBits);
KnownOne = KnownOne.zext(InBits);
- ComputeMaskedBits(Op.getOperand(0), InMask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.trunc(BitWidth);
KnownOne = KnownOne.trunc(BitWidth);
@@ -1944,9 +1947,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::AssertZext: {
EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
- ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
- KnownOne, Depth+1);
- KnownZero |= (~InMask) & Mask;
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ KnownZero |= (~InMask);
return;
}
case ISD::FGETSIGN:
@@ -1963,8 +1965,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
// NLZ can't be BitWidth with no sign bit
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
- ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero2, KnownOne2,
- Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
// If all of the MaskV bits are known to be zero, then we know the
// output top bits are zero, because we now know that the output is
@@ -1972,7 +1973,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if ((KnownZero2 & MaskV) == MaskV) {
unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
// Top bits known zero.
- KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
+ KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
}
}
}
@@ -1983,13 +1984,11 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
- APInt Mask2 = APInt::getLowBitsSet(BitWidth,
- BitWidth - Mask.countLeadingZeros());
- ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
- ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
KnownZeroOut = std::min(KnownZeroOut,
KnownZero2.countTrailingOnes());
@@ -2013,7 +2012,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
if (RA.isPowerOf2()) {
APInt LowBits = RA - 1;
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
- ComputeMaskedBits(Op.getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
// The low bits of the first operand are unchanged by the srem.
KnownZero = KnownZero2 & LowBits;
@@ -2028,10 +2027,6 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// the upper bits are all one.
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
KnownOne |= ~LowBits;
-
- KnownZero &= Mask;
- KnownOne &= Mask;
-
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
}
}
@@ -2041,9 +2036,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
const APInt &RA = Rem->getAPIntValue();
if (RA.isPowerOf2()) {
APInt LowBits = (RA - 1);
- APInt Mask2 = LowBits & Mask;
- KnownZero |= ~LowBits & Mask;
- ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
+ KnownZero |= ~LowBits;
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
break;
}
@@ -2051,16 +2045,13 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// Since the result is less than or equal to either operand, any leading
// zero bits in either operand must also exist in the result.
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- ComputeMaskedBits(Op.getOperand(0), AllOnes, KnownZero, KnownOne,
- Depth+1);
- ComputeMaskedBits(Op.getOperand(1), AllOnes, KnownZero2, KnownOne2,
- Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
KnownZero2.countLeadingOnes());
KnownOne.clearAllBits();
- KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
+ KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
return;
}
case ISD::FrameIndex:
@@ -2080,8 +2071,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
// Allow the target to implement this method for its nodes.
- TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this,
- Depth);
+ TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
return;
}
}
@@ -2205,12 +2195,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero, KnownOne;
- APInt Mask = APInt::getAllOnesValue(VTBits);
- ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero | APInt(VTBits, 1)) == Mask)
+ if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
return VTBits;
// If we are subtracting one from a positive number, there is no carry
@@ -2221,8 +2210,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
if (Tmp2 == 1) return 1;
- return std::min(Tmp, Tmp2)-1;
- break;
+ return std::min(Tmp, Tmp2)-1;
case ISD::SUB:
Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
@@ -2232,11 +2220,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
if (CLHS->isNullValue()) {
APInt KnownZero, KnownOne;
- APInt Mask = APInt::getAllOnesValue(VTBits);
- ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
+ ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
- if ((KnownZero | APInt(VTBits, 1)) == Mask)
+ if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
return VTBits;
// If the input is known to be positive (the sign bit is known clear),
@@ -2251,8 +2238,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
// is, at worst, one more bit than the inputs.
Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp == 1) return 1; // Early out.
- return std::min(Tmp, Tmp2)-1;
- break;
+ return std::min(Tmp, Tmp2)-1;
case ISD::TRUNCATE:
// FIXME: it's tricky to do anything useful for this, but it is an important
// case for targets like X86.
@@ -2286,9 +2272,9 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
APInt KnownZero, KnownOne;
- APInt Mask = APInt::getAllOnesValue(VTBits);
- ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
+ APInt Mask;
if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
} else if (KnownOne.isNegative()) { // sign bit is 1;
@@ -2328,7 +2314,7 @@ bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
// If we're told that NaNs won't happen, assume they won't.
- if (NoNaNsFPMath)
+ if (getTarget().Options.NoNaNsFPMath)
return true;
// If the value is a constant, we can obviously see if it is a NaN or not.
@@ -2423,8 +2409,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
case ISD::CTPOP:
return getConstant(Val.countPopulation(), VT);
case ISD::CTLZ:
+ case ISD::CTLZ_ZERO_UNDEF:
return getConstant(Val.countLeadingZeros(), VT);
case ISD::CTTZ:
+ case ISD::CTTZ_ZERO_UNDEF:
return getConstant(Val.countTrailingZeros(), VT);
}
}
@@ -2440,7 +2428,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
case ISD::FABS:
V.clearSign();
return getConstantFP(V, VT);
- case ISD::FP_ROUND:
case ISD::FP_EXTEND: {
bool ignored;
// This can return overflow, underflow, or inexact; we don't care.
@@ -2561,17 +2548,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
"Vector element count mismatch!");
if (OpOpcode == ISD::TRUNCATE)
return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
- else if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
- OpOpcode == ISD::ANY_EXTEND) {
+ if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
+ OpOpcode == ISD::ANY_EXTEND) {
// If the source is smaller than the dest, we still need an extend.
if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
.bitsLT(VT.getScalarType()))
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
- else if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
+ if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
- else
- return Operand.getNode()->getOperand(0);
+ return Operand.getNode()->getOperand(0);
}
+ if (OpOpcode == ISD::UNDEF)
+ return getUNDEF(VT);
break;
case ISD::BITCAST:
// Basic sanity checking.
@@ -2601,7 +2589,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
break;
case ISD::FNEG:
// -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
- if (UnsafeFPMath && OpOpcode == ISD::FSUB)
+ if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
Operand.getNode()->getOperand(0));
if (OpOpcode == ISD::FNEG) // --X -> X
@@ -2736,7 +2724,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
- if (UnsafeFPMath) {
+ if (getTarget().Options.UnsafeFPMath) {
if (Opcode == ISD::FADD) {
// 0+x --> x
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
@@ -3005,6 +2993,16 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
default: break;
}
}
+
+ if (Opcode == ISD::FP_ROUND) {
+ APFloat V = N1CFP->getValueAPF(); // make copy
+ bool ignored;
+ // This can return overflow, underflow, or inexact; we don't care.
+ // FIXME need to be more flexible about rounding mode.
+ (void)V.convert(*EVTToAPFloatSemantics(VT),
+ APFloat::rmNearestTiesToEven, &ignored);
+ return getConstantFP(V, VT);
+ }
}
// Canonicalize an UNDEF to the RHS, even over a constant.
@@ -3059,7 +3057,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::FMUL:
case ISD::FDIV:
case ISD::FREM:
- if (UnsafeFPMath)
+ if (getTarget().Options.UnsafeFPMath)
return N2;
break;
case ISD::MUL:
@@ -3133,16 +3131,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::SELECT:
if (N1C) {
if (N1C->getZExtValue())
- return N2; // select true, X, Y -> X
- else
- return N3; // select false, X, Y -> Y
+ return N2; // select true, X, Y -> X
+ return N3; // select false, X, Y -> Y
}
if (N2 == N3) return N2; // select C, X, X -> X
break;
case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!");
- break;
case ISD::INSERT_SUBVECTOR: {
SDValue Index = N3;
if (VT.isSimple() && N1.getValueType().isSimple()
@@ -3275,8 +3271,7 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
/// used when a memcpy is turned into a memset when the source is a constant
/// string ptr.
static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
- const TargetLowering &TLI,
- std::string &Str, unsigned Offset) {
+ const TargetLowering &TLI, StringRef Str) {
// Handle vector with all elements zero.
if (Str.empty()) {
if (VT.isInteger())
@@ -3294,15 +3289,18 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
}
assert(!VT.isVector() && "Can't handle vector type here!");
- unsigned NumBits = VT.getSizeInBits();
- unsigned MSB = NumBits / 8;
+ unsigned NumVTBytes = VT.getSizeInBits() / 8;
+ unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
+
uint64_t Val = 0;
- if (TLI.isLittleEndian())
- Offset = Offset + MSB - 1;
- for (unsigned i = 0; i != MSB; ++i) {
- Val = (Val << 8) | (unsigned char)Str[Offset];
- Offset += TLI.isLittleEndian() ? -1 : 1;
+ if (TLI.isLittleEndian()) {
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Val |= (uint64_t)(unsigned char)Str[i] << i*8;
+ } else {
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
}
+
return DAG.getConstant(Val, VT);
}
@@ -3317,7 +3315,7 @@ static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
/// isMemSrcFromString - Returns true if memcpy source is a string constant.
///
-static bool isMemSrcFromString(SDValue Src, std::string &Str) {
+static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
unsigned SrcDelta = 0;
GlobalAddressSDNode *G = NULL;
if (Src.getOpcode() == ISD::GlobalAddress)
@@ -3331,11 +3329,7 @@ static bool isMemSrcFromString(SDValue Src, std::string &Str) {
if (!G)
return false;
- const GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
- if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
- return true;
-
- return false;
+ return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
}
/// FindOptimalMemOpLowering - Determines the optimial series memory ops
@@ -3345,7 +3339,7 @@ static bool isMemSrcFromString(SDValue Src, std::string &Str) {
static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
unsigned Limit, uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
- bool NonScalarIntSafe,
+ bool IsZeroVal,
bool MemcpyStrSrc,
SelectionDAG &DAG,
const TargetLowering &TLI) {
@@ -3359,7 +3353,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
// 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
// not need to be loaded.
EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
- NonScalarIntSafe, MemcpyStrSrc,
+ IsZeroVal, MemcpyStrSrc,
DAG.getMachineFunction());
if (VT == MVT::Other) {
@@ -3438,7 +3432,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
- std::string Str;
+ StringRef Str;
bool CopyFromStr = isMemSrcFromString(Src, Str);
bool isZeroStr = CopyFromStr && Str.empty();
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
@@ -3475,7 +3469,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// We only handle zero vectors here.
// FIXME: Handle other cases where store of vector immediate is done in
// a single instruction.
- Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
+ Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
DstPtrInfo.getWithOffset(DstOff), isVol,
@@ -3562,7 +3556,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
SrcPtrInfo.getWithOffset(SrcOff), isVol,
- false, SrcAlign);
+ false, false, SrcAlign);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -3606,11 +3600,11 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
- bool NonScalarIntSafe =
+ bool IsZeroVal =
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
Size, (DstAlignCanChange ? 0 : Align), 0,
- NonScalarIntSafe, false, DAG, TLI))
+ IsZeroVal, false, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
@@ -3717,8 +3711,9 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMCPY), false,
- /*isReturnValueUsed=*/false,
+ TLI.getLibcallCallingConv(RTLIB::MEMCPY),
+ /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
TLI.getPointerTy()),
Args, *this, dl);
@@ -3769,8 +3764,9 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMMOVE), false,
- /*isReturnValueUsed=*/false,
+ TLI.getLibcallCallingConv(RTLIB::MEMMOVE),
+ /*isTailCall=*/false,
+ /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
TLI.getPointerTy()),
Args, *this, dl);
@@ -3829,8 +3825,9 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
false, false, false, false, 0,
- TLI.getLibcallCallingConv(RTLIB::MEMSET), false,
- /*isReturnValueUsed=*/false,
+ TLI.getLibcallCallingConv(RTLIB::MEMSET),
+ /*isTailCall=*/false,
+ /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
TLI.getPointerTy()),
Args, *this, dl);
@@ -4138,8 +4135,9 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
- bool isVolatile, bool isNonTemporal,
- unsigned Alignment, const MDNode *TBAAInfo) {
+ bool isVolatile, bool isNonTemporal, bool isInvariant,
+ unsigned Alignment, const MDNode *TBAAInfo,
+ const MDNode *Ranges) {
assert(Chain.getValueType() == MVT::Other &&
"Invalid chain type");
if (Alignment == 0) // Ensure that codegen never sees alignment 0
@@ -4150,6 +4148,8 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal;
+ if (isInvariant)
+ Flags |= MachineMemOperand::MOInvariant;
// If we don't have a PtrInfo, infer the trivial frame index case to simplify
// clients.
@@ -4159,7 +4159,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
- TBAAInfo);
+ TBAAInfo, Ranges);
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
}
@@ -4196,7 +4196,8 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
ID.AddInteger(MemVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
- MMO->isNonTemporal()));
+ MMO->isNonTemporal(),
+ MMO->isInvariant()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<LoadSDNode>(E)->refineAlignment(MMO);
@@ -4213,10 +4214,13 @@ SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo,
bool isVolatile, bool isNonTemporal,
- unsigned Alignment, const MDNode *TBAAInfo) {
+ bool isInvariant, unsigned Alignment,
+ const MDNode *TBAAInfo,
+ const MDNode *Ranges) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
- PtrInfo, VT, isVolatile, isNonTemporal, Alignment, TBAAInfo);
+ PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
+ TBAAInfo, Ranges);
}
SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
@@ -4226,7 +4230,7 @@ SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
unsigned Alignment, const MDNode *TBAAInfo) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
- PtrInfo, MemVT, isVolatile, isNonTemporal, Alignment,
+ PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
TBAAInfo);
}
@@ -4239,8 +4243,8 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
"Load is already a indexed load!");
return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getPointerInfo(),
- LD->getMemoryVT(),
- LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment());
+ LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
+ false, LD->getAlignment());
}
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
@@ -4282,7 +4286,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(VT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
- MMO->isNonTemporal()));
+ MMO->isNonTemporal(), MMO->isInvariant()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
@@ -4349,7 +4353,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(SVT.getRawBits());
ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
- MMO->isNonTemporal()));
+ MMO->isNonTemporal(), MMO->isInvariant()));
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
cast<StoreSDNode>(E)->refineAlignment(MMO);
@@ -4903,6 +4907,20 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
return N;
}
+/// UpdadeDebugLocOnMergedSDNode - If the opt level is -O0 then it throws away
+/// the line number information on the merged node since it is not possible to
+/// preserve the information that operation is associated with multiple lines.
+/// This will make the debugger working better at -O0, were there is a higher
+/// probability having other instructions associated with that line.
+///
+SDNode *SelectionDAG::UpdadeDebugLocOnMergedSDNode(SDNode *N, DebugLoc OLoc) {
+ DebugLoc NLoc = N->getDebugLoc();
+ if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && (OLoc != NLoc)) {
+ N->setDebugLoc(DebugLoc());
+ }
+ return N;
+}
+
/// MorphNodeTo - This *mutates* the specified node to have the specified
/// return type, opcode, and operands.
///
@@ -4924,7 +4942,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
- return ON;
+ return UpdadeDebugLocOnMergedSDNode(ON, N->getDebugLoc());
}
if (!RemoveNodeFromCSEMaps(N))
@@ -5128,8 +5146,9 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
FoldingSetNodeID ID;
AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
- return cast<MachineSDNode>(E);
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ return cast<MachineSDNode>(UpdadeDebugLocOnMergedSDNode(E, DL));
+ }
}
// Allocate a new MachineSDNode.
@@ -5290,6 +5309,10 @@ void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To,
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User, &Listener);
}
+
+ // If we just RAUW'd the root, take note.
+ if (FromN == getRoot())
+ setRoot(To);
}
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
@@ -5335,6 +5358,10 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To,
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User, &Listener);
}
+
+ // If we just RAUW'd the root, take note.
+ if (From == getRoot().getNode())
+ setRoot(SDValue(To, getRoot().getResNo()));
}
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
@@ -5373,6 +5400,10 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From,
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User, &Listener);
}
+
+ // If we just RAUW'd the root, take note.
+ if (From == getRoot().getNode())
+ setRoot(SDValue(To[getRoot().getResNo()]));
}
/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
@@ -5431,6 +5462,10 @@ void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
// already exists there, recursively merge the results together.
AddModifiedNodeToCSEMaps(User, &Listener);
}
+
+ // If we just RAUW'd the root, take note.
+ if (From == getRoot())
+ setRoot(To);
}
namespace {
@@ -5657,7 +5692,7 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
- MMO->isNonTemporal());
+ MMO->isNonTemporal(), MMO->isInvariant());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(isNonTemporal() == MMO->isNonTemporal() &&
"Non-temporal encoding error!");
@@ -5670,7 +5705,7 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
: SDNode(Opc, dl, VTs, Ops, NumOps),
MemoryVT(memvt), MMO(mmo) {
SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
- MMO->isNonTemporal());
+ MMO->isNonTemporal(), MMO->isInvariant());
assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
}
@@ -5846,565 +5881,6 @@ uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
}
-std::string SDNode::getOperationName(const SelectionDAG *G) const {
- switch (getOpcode()) {
- default:
- if (getOpcode() < ISD::BUILTIN_OP_END)
- return "<<Unknown DAG Node>>";
- if (isMachineOpcode()) {
- if (G)
- if (const TargetInstrInfo *TII = G->getTarget().getInstrInfo())
- if (getMachineOpcode() < TII->getNumOpcodes())
- return TII->get(getMachineOpcode()).getName();
- return "<<Unknown Machine Node #" + utostr(getOpcode()) + ">>";
- }
- if (G) {
- const TargetLowering &TLI = G->getTargetLoweringInfo();
- const char *Name = TLI.getTargetNodeName(getOpcode());
- if (Name) return Name;
- return "<<Unknown Target Node #" + utostr(getOpcode()) + ">>";
- }
- return "<<Unknown Node #" + utostr(getOpcode()) + ">>";
-
-#ifndef NDEBUG
- case ISD::DELETED_NODE:
- return "<<Deleted Node!>>";
-#endif
- case ISD::PREFETCH: return "Prefetch";
- case ISD::MEMBARRIER: return "MemBarrier";
- case ISD::ATOMIC_FENCE: return "AtomicFence";
- case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
- case ISD::ATOMIC_SWAP: return "AtomicSwap";
- case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
- case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
- case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
- case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
- case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
- case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
- case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
- case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
- case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
- case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
- case ISD::ATOMIC_LOAD: return "AtomicLoad";
- case ISD::ATOMIC_STORE: return "AtomicStore";
- case ISD::PCMARKER: return "PCMarker";
- case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
- case ISD::SRCVALUE: return "SrcValue";
- case ISD::MDNODE_SDNODE: return "MDNode";
- case ISD::EntryToken: return "EntryToken";
- case ISD::TokenFactor: return "TokenFactor";
- case ISD::AssertSext: return "AssertSext";
- case ISD::AssertZext: return "AssertZext";
-
- case ISD::BasicBlock: return "BasicBlock";
- case ISD::VALUETYPE: return "ValueType";
- case ISD::Register: return "Register";
-
- case ISD::Constant: return "Constant";
- case ISD::ConstantFP: return "ConstantFP";
- case ISD::GlobalAddress: return "GlobalAddress";
- case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
- case ISD::FrameIndex: return "FrameIndex";
- case ISD::JumpTable: return "JumpTable";
- case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
- case ISD::RETURNADDR: return "RETURNADDR";
- case ISD::FRAMEADDR: return "FRAMEADDR";
- case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
- case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
- case ISD::LSDAADDR: return "LSDAADDR";
- case ISD::EHSELECTION: return "EHSELECTION";
- case ISD::EH_RETURN: return "EH_RETURN";
- case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
- case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
- case ISD::EH_SJLJ_DISPATCHSETUP: return "EH_SJLJ_DISPATCHSETUP";
- case ISD::ConstantPool: return "ConstantPool";
- case ISD::ExternalSymbol: return "ExternalSymbol";
- case ISD::BlockAddress: return "BlockAddress";
- case ISD::INTRINSIC_WO_CHAIN:
- case ISD::INTRINSIC_VOID:
- case ISD::INTRINSIC_W_CHAIN: {
- unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
- unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
- if (IID < Intrinsic::num_intrinsics)
- return Intrinsic::getName((Intrinsic::ID)IID);
- else if (const TargetIntrinsicInfo *TII = G->getTarget().getIntrinsicInfo())
- return TII->getName(IID);
- llvm_unreachable("Invalid intrinsic ID");
- }
-
- case ISD::BUILD_VECTOR: return "BUILD_VECTOR";
- case ISD::TargetConstant: return "TargetConstant";
- case ISD::TargetConstantFP:return "TargetConstantFP";
- case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
- case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
- case ISD::TargetFrameIndex: return "TargetFrameIndex";
- case ISD::TargetJumpTable: return "TargetJumpTable";
- case ISD::TargetConstantPool: return "TargetConstantPool";
- case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
- case ISD::TargetBlockAddress: return "TargetBlockAddress";
-
- case ISD::CopyToReg: return "CopyToReg";
- case ISD::CopyFromReg: return "CopyFromReg";
- case ISD::UNDEF: return "undef";
- case ISD::MERGE_VALUES: return "merge_values";
- case ISD::INLINEASM: return "inlineasm";
- case ISD::EH_LABEL: return "eh_label";
- case ISD::HANDLENODE: return "handlenode";
-
- // Unary operators
- case ISD::FABS: return "fabs";
- case ISD::FNEG: return "fneg";
- case ISD::FSQRT: return "fsqrt";
- case ISD::FSIN: return "fsin";
- case ISD::FCOS: return "fcos";
- case ISD::FTRUNC: return "ftrunc";
- case ISD::FFLOOR: return "ffloor";
- case ISD::FCEIL: return "fceil";
- case ISD::FRINT: return "frint";
- case ISD::FNEARBYINT: return "fnearbyint";
- case ISD::FEXP: return "fexp";
- case ISD::FEXP2: return "fexp2";
- case ISD::FLOG: return "flog";
- case ISD::FLOG2: return "flog2";
- case ISD::FLOG10: return "flog10";
-
- // Binary operators
- case ISD::ADD: return "add";
- case ISD::SUB: return "sub";
- case ISD::MUL: return "mul";
- case ISD::MULHU: return "mulhu";
- case ISD::MULHS: return "mulhs";
- case ISD::SDIV: return "sdiv";
- case ISD::UDIV: return "udiv";
- case ISD::SREM: return "srem";
- case ISD::UREM: return "urem";
- case ISD::SMUL_LOHI: return "smul_lohi";
- case ISD::UMUL_LOHI: return "umul_lohi";
- case ISD::SDIVREM: return "sdivrem";
- case ISD::UDIVREM: return "udivrem";
- case ISD::AND: return "and";
- case ISD::OR: return "or";
- case ISD::XOR: return "xor";
- case ISD::SHL: return "shl";
- case ISD::SRA: return "sra";
- case ISD::SRL: return "srl";
- case ISD::ROTL: return "rotl";
- case ISD::ROTR: return "rotr";
- case ISD::FADD: return "fadd";
- case ISD::FSUB: return "fsub";
- case ISD::FMUL: return "fmul";
- case ISD::FDIV: return "fdiv";
- case ISD::FMA: return "fma";
- case ISD::FREM: return "frem";
- case ISD::FCOPYSIGN: return "fcopysign";
- case ISD::FGETSIGN: return "fgetsign";
- case ISD::FPOW: return "fpow";
-
- case ISD::FPOWI: return "fpowi";
- case ISD::SETCC: return "setcc";
- case ISD::SELECT: return "select";
- case ISD::VSELECT: return "vselect";
- case ISD::SELECT_CC: return "select_cc";
- case ISD::INSERT_VECTOR_ELT: return "insert_vector_elt";
- case ISD::EXTRACT_VECTOR_ELT: return "extract_vector_elt";
- case ISD::CONCAT_VECTORS: return "concat_vectors";
- case ISD::INSERT_SUBVECTOR: return "insert_subvector";
- case ISD::EXTRACT_SUBVECTOR: return "extract_subvector";
- case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector";
- case ISD::VECTOR_SHUFFLE: return "vector_shuffle";
- case ISD::CARRY_FALSE: return "carry_false";
- case ISD::ADDC: return "addc";
- case ISD::ADDE: return "adde";
- case ISD::SADDO: return "saddo";
- case ISD::UADDO: return "uaddo";
- case ISD::SSUBO: return "ssubo";
- case ISD::USUBO: return "usubo";
- case ISD::SMULO: return "smulo";
- case ISD::UMULO: return "umulo";
- case ISD::SUBC: return "subc";
- case ISD::SUBE: return "sube";
- case ISD::SHL_PARTS: return "shl_parts";
- case ISD::SRA_PARTS: return "sra_parts";
- case ISD::SRL_PARTS: return "srl_parts";
-
- // Conversion operators.
- case ISD::SIGN_EXTEND: return "sign_extend";
- case ISD::ZERO_EXTEND: return "zero_extend";
- case ISD::ANY_EXTEND: return "any_extend";
- case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
- case ISD::TRUNCATE: return "truncate";
- case ISD::FP_ROUND: return "fp_round";
- case ISD::FLT_ROUNDS_: return "flt_rounds";
- case ISD::FP_ROUND_INREG: return "fp_round_inreg";
- case ISD::FP_EXTEND: return "fp_extend";
-
- case ISD::SINT_TO_FP: return "sint_to_fp";
- case ISD::UINT_TO_FP: return "uint_to_fp";
- case ISD::FP_TO_SINT: return "fp_to_sint";
- case ISD::FP_TO_UINT: return "fp_to_uint";
- case ISD::BITCAST: return "bitcast";
- case ISD::FP16_TO_FP32: return "fp16_to_fp32";
- case ISD::FP32_TO_FP16: return "fp32_to_fp16";
-
- case ISD::CONVERT_RNDSAT: {
- switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
- default: llvm_unreachable("Unknown cvt code!");
- case ISD::CVT_FF: return "cvt_ff";
- case ISD::CVT_FS: return "cvt_fs";
- case ISD::CVT_FU: return "cvt_fu";
- case ISD::CVT_SF: return "cvt_sf";
- case ISD::CVT_UF: return "cvt_uf";
- case ISD::CVT_SS: return "cvt_ss";
- case ISD::CVT_SU: return "cvt_su";
- case ISD::CVT_US: return "cvt_us";
- case ISD::CVT_UU: return "cvt_uu";
- }
- }
-
- // Control flow instructions
- case ISD::BR: return "br";
- case ISD::BRIND: return "brind";
- case ISD::BR_JT: return "br_jt";
- case ISD::BRCOND: return "brcond";
- case ISD::BR_CC: return "br_cc";
- case ISD::CALLSEQ_START: return "callseq_start";
- case ISD::CALLSEQ_END: return "callseq_end";
-
- // Other operators
- case ISD::LOAD: return "load";
- case ISD::STORE: return "store";
- case ISD::VAARG: return "vaarg";
- case ISD::VACOPY: return "vacopy";
- case ISD::VAEND: return "vaend";
- case ISD::VASTART: return "vastart";
- case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
- case ISD::EXTRACT_ELEMENT: return "extract_element";
- case ISD::BUILD_PAIR: return "build_pair";
- case ISD::STACKSAVE: return "stacksave";
- case ISD::STACKRESTORE: return "stackrestore";
- case ISD::TRAP: return "trap";
-
- // Bit manipulation
- case ISD::BSWAP: return "bswap";
- case ISD::CTPOP: return "ctpop";
- case ISD::CTTZ: return "cttz";
- case ISD::CTLZ: return "ctlz";
-
- // Trampolines
- case ISD::INIT_TRAMPOLINE: return "init_trampoline";
- case ISD::ADJUST_TRAMPOLINE: return "adjust_trampoline";
-
- case ISD::CONDCODE:
- switch (cast<CondCodeSDNode>(this)->get()) {
- default: llvm_unreachable("Unknown setcc condition!");
- case ISD::SETOEQ: return "setoeq";
- case ISD::SETOGT: return "setogt";
- case ISD::SETOGE: return "setoge";
- case ISD::SETOLT: return "setolt";
- case ISD::SETOLE: return "setole";
- case ISD::SETONE: return "setone";
-
- case ISD::SETO: return "seto";
- case ISD::SETUO: return "setuo";
- case ISD::SETUEQ: return "setue";
- case ISD::SETUGT: return "setugt";
- case ISD::SETUGE: return "setuge";
- case ISD::SETULT: return "setult";
- case ISD::SETULE: return "setule";
- case ISD::SETUNE: return "setune";
-
- case ISD::SETEQ: return "seteq";
- case ISD::SETGT: return "setgt";
- case ISD::SETGE: return "setge";
- case ISD::SETLT: return "setlt";
- case ISD::SETLE: return "setle";
- case ISD::SETNE: return "setne";
- }
- }
-}
-
-const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
- switch (AM) {
- default:
- return "";
- case ISD::PRE_INC:
- return "<pre-inc>";
- case ISD::PRE_DEC:
- return "<pre-dec>";
- case ISD::POST_INC:
- return "<post-inc>";
- case ISD::POST_DEC:
- return "<post-dec>";
- }
-}
-
-std::string ISD::ArgFlagsTy::getArgFlagsString() {
- std::string S = "< ";
-
- if (isZExt())
- S += "zext ";
- if (isSExt())
- S += "sext ";
- if (isInReg())
- S += "inreg ";
- if (isSRet())
- S += "sret ";
- if (isByVal())
- S += "byval ";
- if (isNest())
- S += "nest ";
- if (getByValAlign())
- S += "byval-align:" + utostr(getByValAlign()) + " ";
- if (getOrigAlign())
- S += "orig-align:" + utostr(getOrigAlign()) + " ";
- if (getByValSize())
- S += "byval-size:" + utostr(getByValSize()) + " ";
- return S + ">";
-}
-
-void SDNode::dump() const { dump(0); }
-void SDNode::dump(const SelectionDAG *G) const {
- print(dbgs(), G);
- dbgs() << '\n';
-}
-
-void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
- OS << (void*)this << ": ";
-
- for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
- if (i) OS << ",";
- if (getValueType(i) == MVT::Other)
- OS << "ch";
- else
- OS << getValueType(i).getEVTString();
- }
- OS << " = " << getOperationName(G);
-}
-
-void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
- if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
- if (!MN->memoperands_empty()) {
- OS << "<";
- OS << "Mem:";
- for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
- e = MN->memoperands_end(); i != e; ++i) {
- OS << **i;
- if (llvm::next(i) != e)
- OS << " ";
- }
- OS << ">";
- }
- } else if (const ShuffleVectorSDNode *SVN =
- dyn_cast<ShuffleVectorSDNode>(this)) {
- OS << "<";
- for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
- int Idx = SVN->getMaskElt(i);
- if (i) OS << ",";
- if (Idx < 0)
- OS << "u";
- else
- OS << Idx;
- }
- OS << ">";
- } else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
- OS << '<' << CSDN->getAPIntValue() << '>';
- } else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
- if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
- OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
- else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble)
- OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
- else {
- OS << "<APFloat(";
- CSDN->getValueAPF().bitcastToAPInt().dump();
- OS << ")>";
- }
- } else if (const GlobalAddressSDNode *GADN =
- dyn_cast<GlobalAddressSDNode>(this)) {
- int64_t offset = GADN->getOffset();
- OS << '<';
- WriteAsOperand(OS, GADN->getGlobal());
- OS << '>';
- if (offset > 0)
- OS << " + " << offset;
- else
- OS << " " << offset;
- if (unsigned int TF = GADN->getTargetFlags())
- OS << " [TF=" << TF << ']';
- } else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
- OS << "<" << FIDN->getIndex() << ">";
- } else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
- OS << "<" << JTDN->getIndex() << ">";
- if (unsigned int TF = JTDN->getTargetFlags())
- OS << " [TF=" << TF << ']';
- } else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
- int offset = CP->getOffset();
- if (CP->isMachineConstantPoolEntry())
- OS << "<" << *CP->getMachineCPVal() << ">";
- else
- OS << "<" << *CP->getConstVal() << ">";
- if (offset > 0)
- OS << " + " << offset;
- else
- OS << " " << offset;
- if (unsigned int TF = CP->getTargetFlags())
- OS << " [TF=" << TF << ']';
- } else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
- OS << "<";
- const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
- if (LBB)
- OS << LBB->getName() << " ";
- OS << (const void*)BBDN->getBasicBlock() << ">";
- } else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
- OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :0);
- } else if (const ExternalSymbolSDNode *ES =
- dyn_cast<ExternalSymbolSDNode>(this)) {
- OS << "'" << ES->getSymbol() << "'";
- if (unsigned int TF = ES->getTargetFlags())
- OS << " [TF=" << TF << ']';
- } else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
- if (M->getValue())
- OS << "<" << M->getValue() << ">";
- else
- OS << "<null>";
- } else if (const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(this)) {
- if (MD->getMD())
- OS << "<" << MD->getMD() << ">";
- else
- OS << "<null>";
- } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
- OS << ":" << N->getVT().getEVTString();
- }
- else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
- OS << "<" << *LD->getMemOperand();
-
- bool doExt = true;
- switch (LD->getExtensionType()) {
- default: doExt = false; break;
- case ISD::EXTLOAD: OS << ", anyext"; break;
- case ISD::SEXTLOAD: OS << ", sext"; break;
- case ISD::ZEXTLOAD: OS << ", zext"; break;
- }
- if (doExt)
- OS << " from " << LD->getMemoryVT().getEVTString();
-
- const char *AM = getIndexedModeName(LD->getAddressingMode());
- if (*AM)
- OS << ", " << AM;
-
- OS << ">";
- } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
- OS << "<" << *ST->getMemOperand();
-
- if (ST->isTruncatingStore())
- OS << ", trunc to " << ST->getMemoryVT().getEVTString();
-
- const char *AM = getIndexedModeName(ST->getAddressingMode());
- if (*AM)
- OS << ", " << AM;
-
- OS << ">";
- } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
- OS << "<" << *M->getMemOperand() << ">";
- } else if (const BlockAddressSDNode *BA =
- dyn_cast<BlockAddressSDNode>(this)) {
- OS << "<";
- WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false);
- OS << ", ";
- WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false);
- OS << ">";
- if (unsigned int TF = BA->getTargetFlags())
- OS << " [TF=" << TF << ']';
- }
-
- if (G)
- if (unsigned Order = G->GetOrdering(this))
- OS << " [ORD=" << Order << ']';
-
- if (getNodeId() != -1)
- OS << " [ID=" << getNodeId() << ']';
-
- DebugLoc dl = getDebugLoc();
- if (G && !dl.isUnknown()) {
- DIScope
- Scope(dl.getScope(G->getMachineFunction().getFunction()->getContext()));
- OS << " dbg:";
- // Omit the directory, since it's usually long and uninteresting.
- if (Scope.Verify())
- OS << Scope.getFilename();
- else
- OS << "<unknown>";
- OS << ':' << dl.getLine();
- if (dl.getCol() != 0)
- OS << ':' << dl.getCol();
- }
-}
-
-void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
- print_types(OS, G);
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (i) OS << ", "; else OS << " ";
- OS << (void*)getOperand(i).getNode();
- if (unsigned RN = getOperand(i).getResNo())
- OS << ":" << RN;
- }
- print_details(OS, G);
-}
-
-static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
- const SelectionDAG *G, unsigned depth,
- unsigned indent) {
- if (depth == 0)
- return;
-
- OS.indent(indent);
-
- N->print(OS, G);
-
- if (depth < 1)
- return;
-
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- // Don't follow chain operands.
- if (N->getOperand(i).getValueType() == MVT::Other)
- continue;
- OS << '\n';
- printrWithDepthHelper(OS, N->getOperand(i).getNode(), G, depth-1, indent+2);
- }
-}
-
-void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
- unsigned depth) const {
- printrWithDepthHelper(OS, this, G, depth, 0);
-}
-
-void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
- // Don't print impossibly deep things.
- printrWithDepth(OS, G, 10);
-}
-
-void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
- printrWithDepth(dbgs(), G, depth);
-}
-
-void SDNode::dumprFull(const SelectionDAG *G) const {
- // Don't print impossibly deep things.
- dumprWithDepth(G, 10);
-}
-
-static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (N->getOperand(i).getNode()->hasOneUse())
- DumpNodes(N->getOperand(i).getNode(), indent+2, G);
- else
- dbgs() << "\n" << std::string(indent+2, ' ')
- << (void*)N->getOperand(i).getNode() << ": <multiple use>";
-
-
- dbgs() << "\n";
- dbgs().indent(indent);
- N->dump(G);
-}
-
SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
assert(N->getNumValues() == 1 &&
"Can't unroll a vector with multiple results!");
@@ -6527,20 +6003,14 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
const GlobalValue *GV;
int64_t GVOffset = 0;
if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
- // If GV has specified alignment, then use it. Otherwise, use the preferred
- // alignment.
- unsigned Align = GV->getAlignment();
- if (!Align) {
- if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
- if (GVar->hasInitializer()) {
- const TargetData *TD = TLI.getTargetData();
- Align = TD->getPreferredAlignment(GVar);
- }
- }
- if (!Align)
- Align = TLI.getTargetData()->getABITypeAlignment(GV->getType());
- }
- return MinAlign(Align, GVOffset);
+ unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
+ APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
+ llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
+ TLI.getTargetData());
+ unsigned AlignBits = KnownZero.countTrailingOnes();
+ unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
+ if (Align)
+ return MinAlign(Align, GVOffset);
}
// If this is a direct reference to a stack slot, use information about the
@@ -6566,74 +6036,6 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
return 0;
}
-void SelectionDAG::dump() const {
- dbgs() << "SelectionDAG has " << AllNodes.size() << " nodes:";
-
- for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
- I != E; ++I) {
- const SDNode *N = I;
- if (!N->hasOneUse() && N != getRoot().getNode())
- DumpNodes(N, 2, this);
- }
-
- if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
-
- dbgs() << "\n\n";
-}
-
-void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
- print_types(OS, G);
- print_details(OS, G);
-}
-
-typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
-static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
- const SelectionDAG *G, VisitedSDNodeSet &once) {
- if (!once.insert(N)) // If we've been here before, return now.
- return;
-
- // Dump the current SDNode, but don't end the line yet.
- OS << std::string(indent, ' ');
- N->printr(OS, G);
-
- // Having printed this SDNode, walk the children:
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- const SDNode *child = N->getOperand(i).getNode();
-
- if (i) OS << ",";
- OS << " ";
-
- if (child->getNumOperands() == 0) {
- // This child has no grandchildren; print it inline right here.
- child->printr(OS, G);
- once.insert(child);
- } else { // Just the address. FIXME: also print the child's opcode.
- OS << (void*)child;
- if (unsigned RN = N->getOperand(i).getResNo())
- OS << ":" << RN;
- }
- }
-
- OS << "\n";
-
- // Dump children that have grandchildren on their own line(s).
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- const SDNode *child = N->getOperand(i).getNode();
- DumpNodesr(OS, child, indent+2, G, once);
- }
-}
-
-void SDNode::dumpr() const {
- VisitedSDNodeSet once;
- DumpNodesr(dbgs(), this, 0, 0, once);
-}
-
-void SDNode::dumpr(const SelectionDAG *G) const {
- VisitedSDNodeSet once;
- DumpNodesr(dbgs(), this, 0, G, once);
-}
-
-
// getAddressSpace - Return the address space this GlobalAddress belongs to.
unsigned GlobalAddressSDNode::getAddressSpace() const {
return getGlobal()->getType()->getAddressSpace();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 095b400..94cb958 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -41,13 +41,13 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -197,7 +197,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
// FP_ROUND's are always exact here.
if (ValueVT.bitsLT(Val.getValueType()))
return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
- DAG.getIntPtrConstant(1));
+ DAG.getTargetConstant(1, TLI.getPointerTy()));
return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
}
@@ -206,7 +206,6 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
llvm_unreachable("Unknown mismatch!");
- return SDValue();
}
/// getCopyFromParts - Create a value that contains the specified legal parts
@@ -353,10 +352,13 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
assert(NumParts == 1 && "Do not know what to promote to!");
Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
} else {
- assert(PartVT.isInteger() && ValueVT.isInteger() &&
+ assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
+ ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
+ if (PartVT == MVT::x86mmx)
+ Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
@@ -364,10 +366,13 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
- assert(PartVT.isInteger() && ValueVT.isInteger() &&
+ assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
+ ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
+ if (PartVT == MVT::x86mmx)
+ Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
// The value may have changed - recompute ValueVT.
@@ -813,9 +818,11 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
}
}
-void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
+void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
+ const TargetLibraryInfo *li) {
AA = &aa;
GFI = gfi;
+ LibInfo = li;
TD = DAG.getTarget().getTargetData();
LPadToCallSiteMap.clear();
}
@@ -964,7 +971,7 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
DAG.AddDbgValue(SDV, Val.getNode(), false);
}
} else
- DEBUG(dbgs() << "Dropping debug info for " << DI);
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
DanglingDebugInfoMap[V] = DanglingDebugInfo();
}
}
@@ -1054,6 +1061,23 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
return DAG.getMergeValues(&Constants[0], Constants.size(),
getCurDebugLoc());
}
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(C)) {
+ SmallVector<SDValue, 4> Ops;
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
+ // Add each leaf value from the operand to the Constants list
+ // to form a flattened list of all the values.
+ for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
+ Ops.push_back(SDValue(Val, i));
+ }
+
+ if (isa<ArrayType>(CDS->getType()))
+ return DAG.getMergeValues(&Ops[0], Ops.size(), getCurDebugLoc());
+ return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
+ VT, &Ops[0], Ops.size());
+ }
if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
@@ -1088,9 +1112,9 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
// Now that we know the number and type of the elements, get that number of
// elements into the Ops array based on what kind of constant it is.
SmallVector<SDValue, 16> Ops;
- if (const ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
for (unsigned i = 0; i != NumElements; ++i)
- Ops.push_back(getValue(CP->getOperand(i)));
+ Ops.push_back(getValue(CV->getOperand(i)));
} else {
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
EVT EltVT = TLI.getValueType(VecTy->getElementType());
@@ -1126,7 +1150,6 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
}
llvm_unreachable("Can't get register for value!");
- return SDValue();
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
@@ -1285,8 +1308,8 @@ bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
}
/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
-uint32_t SelectionDAGBuilder::getEdgeWeight(MachineBasicBlock *Src,
- MachineBasicBlock *Dst) {
+uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
BranchProbabilityInfo *BPI = FuncInfo.BPI;
if (!BPI)
return 0;
@@ -1336,6 +1359,8 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
Condition = getICmpCondCode(IC->getPredicate());
} else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
Condition = getFCmpCondCode(FC->getPredicate());
+ if (TM.Options.NoNaNsFPMath)
+ Condition = getFCmpCodeWithoutNaN(Condition);
} else {
Condition = ISD::SETEQ; // silence warning.
llvm_unreachable("Unknown compare instruction");
@@ -1811,8 +1836,8 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
CopyToExportRegsIfNeeded(&I);
// Update successor info
- InvokeMBB->addSuccessor(Return);
- InvokeMBB->addSuccessor(LandingPad);
+ addSuccessorWithWeight(InvokeMBB, Return);
+ addSuccessorWithWeight(InvokeMBB, LandingPad);
// Drop into normal successor.
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
@@ -1820,9 +1845,6 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
DAG.getBasicBlock(Return)));
}
-void SelectionDAGBuilder::visitUnwind(const UnwindInst &I) {
-}
-
void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
}
@@ -1835,6 +1857,12 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
AddLandingPadInfo(LP, MMI, MBB);
+ // If there aren't registers to copy the values into (e.g., during SjLj
+ // exceptions), then don't bother to create these DAG nodes.
+ if (TLI.getExceptionPointerRegister() == 0 &&
+ TLI.getExceptionSelectorRegister() == 0)
+ return;
+
SmallVector<EVT, 2> ValueVTs;
ComputeValueVTs(TLI, LP.getType(), ValueVTs);
@@ -2003,7 +2031,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
}
static inline bool areJTsAllowed(const TargetLowering &TLI) {
- return !DisableJumpTables &&
+ return !TLI.getTargetMachine().Options.DisableJumpTables &&
(TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
}
@@ -2190,7 +2218,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
CaseRange LHSR(CR.Range.first, Pivot);
CaseRange RHSR(Pivot, CR.Range.second);
- Constant *C = Pivot->Low;
+ const Constant *C = Pivot->Low;
MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
// We know that we branch to the LHS if the Value being switched on is
@@ -2383,14 +2411,14 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
BranchProbabilityInfo *BPI = FuncInfo.BPI;
// Start with "simple" cases
- for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
- BasicBlock *SuccBB = SI.getSuccessor(i);
+ for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
+ i != e; ++i) {
+ const BasicBlock *SuccBB = i.getCaseSuccessor();
MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
uint32_t ExtraWeight = BPI ? BPI->getEdgeWeight(SI.getParent(), SuccBB) : 0;
- Cases.push_back(Case(SI.getSuccessorValue(i),
- SI.getSuccessorValue(i),
+ Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(),
SMBB, ExtraWeight));
}
std::sort(Cases.begin(), Cases.end(), CaseCmp());
@@ -2457,7 +2485,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
// If there is only the default destination, branch to it if it is not the
// next basic block. Otherwise, just fall through.
- if (SI.getNumCases() == 1) {
+ if (!SI.getNumCases()) {
// Update machine-CFG edges.
// If this is not a fall-through branch, emit the branch.
@@ -2626,6 +2654,8 @@ void SelectionDAGBuilder::visitFCmp(const User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Condition = getFCmpCondCode(predicate);
+ if (TM.Options.NoNaNsFPMath)
+ Condition = getFCmpCodeWithoutNaN(Condition);
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
}
@@ -2685,11 +2715,12 @@ void SelectionDAGBuilder::visitFPTrunc(const User &I) {
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
- DestVT, N, DAG.getIntPtrConstant(0)));
+ DestVT, N,
+ DAG.getTargetConstant(0, TLI.getPointerTy())));
}
void SelectionDAGBuilder::visitFPExt(const User &I){
- // FPTrunc is never a no-op cast, no need to check
+ // FPExt is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
@@ -2772,33 +2803,25 @@ void SelectionDAGBuilder::visitExtractElement(const User &I) {
TLI.getValueType(I.getType()), InVec, InIdx));
}
-// Utility for visitShuffleVector - Returns true if the mask is mask starting
-// from SIndx and increasing to the element length (undefs are allowed).
-static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
- unsigned MaskNumElts = Mask.size();
- for (unsigned i = 0; i != MaskNumElts; ++i)
- if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
+// Utility for visitShuffleVector - Return true if every element in Mask,
+// begining from position Pos and ending in Pos+Size, falls within the
+// specified sequential range [L, L+Pos). or is undef.
+static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
+ unsigned Pos, unsigned Size, int Low) {
+ for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
+ if (Mask[i] >= 0 && Mask[i] != Low)
return false;
return true;
}
void SelectionDAGBuilder::visitShuffleVector(const User &I) {
- SmallVector<int, 8> Mask;
SDValue Src1 = getValue(I.getOperand(0));
SDValue Src2 = getValue(I.getOperand(1));
- // Convert the ConstantVector mask operand into an array of ints, with -1
- // representing undef values.
- SmallVector<Constant*, 8> MaskElts;
- cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
- unsigned MaskNumElts = MaskElts.size();
- for (unsigned i = 0; i != MaskNumElts; ++i) {
- if (isa<UndefValue>(MaskElts[i]))
- Mask.push_back(-1);
- else
- Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
- }
-
+ SmallVector<int, 8> Mask;
+ ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
+ unsigned MaskNumElts = Mask.size();
+
EVT VT = TLI.getValueType(I.getType());
EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
@@ -2814,11 +2837,23 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
// Mask is longer than the source vectors and is a multiple of the source
// vectors. We can use concatenate vector to make the mask and vectors
// lengths match.
- if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
- // The shuffle is concatenating two vectors together.
- setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
- VT, Src1, Src2));
- return;
+ if (SrcNumElts*2 == MaskNumElts) {
+ // First check for Src1 in low and Src2 in high
+ if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
+ isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
+ // The shuffle is concatenating two vectors together.
+ setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
+ VT, Src1, Src2));
+ return;
+ }
+ // Then check for Src2 in low and Src1 in high
+ if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
+ isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
+ // The shuffle is concatenating two vectors together.
+ setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
+ VT, Src2, Src1));
+ return;
+ }
}
// Pad both vectors with undefs to make them the same length as the mask.
@@ -2843,10 +2878,9 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SmallVector<int, 8> MappedOps;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
- if (Idx < (int)SrcNumElts)
- MappedOps.push_back(Idx);
- else
- MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
+ if (Idx >= (int)SrcNumElts)
+ Idx -= SrcNumElts - MaskNumElts;
+ MappedOps.push_back(Idx);
}
setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
@@ -2858,13 +2892,13 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
// Analyze the access pattern of the vector to see if we can extract
// two subvectors and do the shuffle. The analysis is done by calculating
// the range of elements the mask access on both vectors.
- int MinRange[2] = { static_cast<int>(SrcNumElts+1),
- static_cast<int>(SrcNumElts+1)};
+ int MinRange[2] = { static_cast<int>(SrcNumElts),
+ static_cast<int>(SrcNumElts)};
int MaxRange[2] = {-1, -1};
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
- int Input = 0;
+ unsigned Input = 0;
if (Idx < 0)
continue;
@@ -2880,35 +2914,31 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
// Check if the access is smaller than the vector size and can we find
// a reasonable extract index.
- int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not
- // Extract.
+ int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
+ // Extract.
int StartIdx[2]; // StartIdx to extract from
- for (int Input=0; Input < 2; ++Input) {
- if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
+ for (unsigned Input = 0; Input < 2; ++Input) {
+ if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
RangeUse[Input] = 0; // Unused
StartIdx[Input] = 0;
- } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
- // Fits within range but we should see if we can find a good
- // start index that is a multiple of the mask length.
- if (MaxRange[Input] < (int)MaskNumElts) {
- RangeUse[Input] = 1; // Extract from beginning of the vector
- StartIdx[Input] = 0;
- } else {
- StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
- if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
- StartIdx[Input] + MaskNumElts <= SrcNumElts)
- RangeUse[Input] = 1; // Extract from a multiple of the mask length.
- }
+ continue;
}
+
+ // Find a good start index that is a multiple of the mask length. Then
+ // see if the rest of the elements are in range.
+ StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
+ if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
+ StartIdx[Input] + MaskNumElts <= SrcNumElts)
+ RangeUse[Input] = 1; // Extract from a multiple of the mask length.
}
if (RangeUse[0] == 0 && RangeUse[1] == 0) {
setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
return;
}
- else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
+ if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
// Extract appropriate subvector and generate a vector shuffle
- for (int Input=0; Input < 2; ++Input) {
+ for (unsigned Input = 0; Input < 2; ++Input) {
SDValue &Src = Input == 0 ? Src1 : Src2;
if (RangeUse[Input] == 0)
Src = DAG.getUNDEF(VT);
@@ -2921,12 +2951,13 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SmallVector<int, 8> MappedOps;
for (unsigned i = 0; i != MaskNumElts; ++i) {
int Idx = Mask[i];
- if (Idx < 0)
- MappedOps.push_back(Idx);
- else if (Idx < (int)SrcNumElts)
- MappedOps.push_back(Idx - StartIdx[0]);
- else
- MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
+ if (Idx >= 0) {
+ if (Idx < (int)SrcNumElts)
+ Idx -= StartIdx[0];
+ else
+ Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
+ }
+ MappedOps.push_back(Idx);
}
setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
@@ -2942,22 +2973,20 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
EVT PtrVT = TLI.getPointerTy();
SmallVector<SDValue,8> Ops;
for (unsigned i = 0; i != MaskNumElts; ++i) {
- if (Mask[i] < 0) {
- Ops.push_back(DAG.getUNDEF(EltVT));
- } else {
- int Idx = Mask[i];
- SDValue Res;
+ int Idx = Mask[i];
+ SDValue Res;
- if (Idx < (int)SrcNumElts)
- Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
- EltVT, Src1, DAG.getConstant(Idx, PtrVT));
- else
- Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
- EltVT, Src2,
- DAG.getConstant(Idx - SrcNumElts, PtrVT));
+ if (Idx < 0) {
+ Res = DAG.getUNDEF(EltVT);
+ } else {
+ SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
+ if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
- Ops.push_back(Res);
+ Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
+ EltVT, Src, DAG.getConstant(Idx, PtrVT));
}
+
+ Ops.push_back(Res);
}
setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
@@ -3042,7 +3071,9 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(I.getOperand(0));
- Type *Ty = I.getOperand(0)->getType();
+ // Note that the pointer operand may be a vector of pointers. Take the scalar
+ // element which holds a pointer.
+ Type *Ty = I.getOperand(0)->getType()->getScalarType();
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
@@ -3096,7 +3127,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
unsigned Amt = ElementSize.logBase2();
IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
N.getValueType(), IdxN,
- DAG.getConstant(Amt, TLI.getPointerTy()));
+ DAG.getConstant(Amt, IdxN.getValueType()));
} else {
SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
@@ -3175,8 +3206,10 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
+ bool isInvariant = I.getMetadata("invariant.load") != 0;
unsigned Alignment = I.getAlignment();
const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
+ const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
@@ -3224,7 +3257,8 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
DAG.getConstant(Offsets[i], PtrVT));
SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
- isNonTemporal, Alignment, TBAAInfo);
+ isNonTemporal, isInvariant, Alignment, TBAAInfo,
+ Ranges);
Values[i] = L;
Chains[ChainI] = L.getValue(1);
@@ -3358,7 +3392,7 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
DebugLoc dl = getCurDebugLoc();
ISD::NodeType NT;
switch (I.getOperation()) {
- default: llvm_unreachable("Unknown atomicrmw operation"); return;
+ default: llvm_unreachable("Unknown atomicrmw operation");
case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
@@ -3496,24 +3530,16 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
Info.opc == ISD::INTRINSIC_W_CHAIN)
- Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
+ Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI.getPointerTy()));
// Add all operands of the call to the operand list.
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
SDValue Op = getValue(I.getArgOperand(i));
- assert(TLI.isTypeLegal(Op.getValueType()) &&
- "Intrinsic uses a non-legal type?");
Ops.push_back(Op);
}
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getType(), ValueVTs);
-#ifndef NDEBUG
- for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
- assert(TLI.isTypeLegal(ValueVTs[Val]) &&
- "Intrinsic uses a non-legal type?");
- }
-#endif // NDEBUG
if (HasChain)
ValueVTs.push_back(MVT::Other);
@@ -3556,6 +3582,12 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
setValue(&I, Result);
+ } else {
+ // Assign order to result here. If the intrinsic does not produce a result,
+ // it won't be mapped to a SDNode and visit() will not assign it an order
+ // number.
+ ++SDNodeOrder;
+ AssignOrderingToNode(Result.getNode());
}
}
@@ -3597,17 +3629,6 @@ getF32Constant(SelectionDAG &DAG, unsigned Flt) {
return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
}
-// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
-const char *
-SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
- SDValue Op1 = getValue(I.getArgOperand(0));
- SDValue Op2 = getValue(I.getArgOperand(1));
-
- SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
- setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
- return 0;
-}
-
/// visitExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
void
@@ -4367,9 +4388,8 @@ static unsigned getTruncatedArgReg(const SDValue &N) {
const SDValue &CFR = Ext.getOperand(0);
if (CFR.getOpcode() == ISD::CopyFromReg)
return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
- else
- if (CFR.getOpcode() == ISD::TRUNCATE)
- return getTruncatedArgReg(CFR);
+ if (CFR.getOpcode() == ISD::TRUNCATE)
+ return getTruncatedArgReg(CFR);
}
return 0;
}
@@ -4398,7 +4418,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
// Some arguments' frame index is recorded during argument lowering.
Offset = FuncInfo.getArgumentFrameIndex(Arg);
if (Offset)
- Reg = TRI->getFrameRegister(MF);
+ Reg = TRI->getFrameRegister(MF);
if (!Reg && N.getNode()) {
if (N.getOpcode() == ISD::CopyFromReg)
@@ -4473,9 +4493,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::setjmp:
- return "_setjmp"+!TLI.usesUnderscoreSetJmp();
+ return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
case Intrinsic::longjmp:
- return "_longjmp"+!TLI.usesUnderscoreLongJmp();
+ return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
case Intrinsic::memcpy: {
// Assert for address < 256 since we support only user defined address
// spaces.
@@ -4531,8 +4551,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
MDNode *Variable = DI.getVariable();
const Value *Address = DI.getAddress();
- if (!Address || !DIVariable(Variable).Verify())
+ if (!Address || !DIVariable(Variable).Verify()) {
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return 0;
+ }
// Build an entry in DbgOrdering. Debug info input nodes get an SDNodeOrder
// but do not always have a corresponding SDNode built. The SDNodeOrder
@@ -4543,7 +4565,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
// Check if address has undef value.
if (isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
- DEBUG(dbgs() << "Dropping debug info for " << DI);
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
return 0;
}
@@ -4553,11 +4575,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
N = UnusedArgNodeMap[Address];
SDDbgValue *SDV;
if (N.getNode()) {
- // Parameters are handled specially.
- bool isParameter =
- DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable;
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
+ // Parameters are handled specially.
+ bool isParameter =
+ (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
+ isa<Argument>(Address));
+
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
if (isParameter && !AI) {
@@ -4577,7 +4601,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
0, dl, SDNodeOrder);
else {
// Can't do anything with other non-AI cases yet.
- DEBUG(dbgs() << "Dropping debug info for " << DI);
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
+ DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
+ DEBUG(Address->dump());
return 0;
}
DAG.AddDbgValue(SDV, N.getNode(), isParameter);
@@ -4599,7 +4625,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
}
}
- DEBUG(dbgs() << "Dropping debug info for " << DI);
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
return 0;
@@ -4645,7 +4671,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} else {
// We may expand this to cover more cases. One case where we have no
// data available is an unreferenced parameter.
- DEBUG(dbgs() << "Dropping debug info for " << DI);
+ DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
@@ -4654,8 +4680,11 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
V = BCI->getOperand(0);
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
// Don't handle byval struct arguments or VLAs, for example.
- if (!AI)
+ if (!AI) {
+ DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
+ DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return 0;
+ }
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI == FuncInfo.StaticAllocaMap.end())
@@ -4667,43 +4696,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
return 0;
}
- case Intrinsic::eh_exception: {
- // Insert the EXCEPTIONADDR instruction.
- assert(FuncInfo.MBB->isLandingPad() &&
- "Call to eh.exception not in landing pad!");
- SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
- SDValue Ops[1];
- Ops[0] = DAG.getRoot();
- SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
- setValue(&I, Op);
- DAG.setRoot(Op.getValue(1));
- return 0;
- }
-
- case Intrinsic::eh_selector: {
- MachineBasicBlock *CallMBB = FuncInfo.MBB;
- MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- if (CallMBB->isLandingPad())
- AddCatchInfo(I, &MMI, CallMBB);
- else {
-#ifndef NDEBUG
- FuncInfo.CatchInfoLost.insert(&I);
-#endif
- // FIXME: Mark exception selector register as live in. Hack for PR1508.
- unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBB->addLiveIn(Reg);
- }
-
- // Insert the EHSELECTION instruction.
- SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
- SDValue Ops[2];
- Ops[0] = getValue(I.getArgOperand(0));
- Ops[1] = getRoot();
- SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
- DAG.setRoot(Op.getValue(1));
- setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
- return 0;
- }
case Intrinsic::eh_typeid_for: {
// Find the type id for the given typeinfo.
@@ -4775,11 +4767,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
getRoot(), getValue(I.getArgOperand(0))));
return 0;
}
- case Intrinsic::eh_sjlj_dispatch_setup: {
- DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
- getRoot(), getValue(I.getArgOperand(0))));
- return 0;
- }
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
@@ -4841,6 +4828,22 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, Res);
return 0;
}
+ case Intrinsic::x86_avx_vinsertf128_pd_256:
+ case Intrinsic::x86_avx_vinsertf128_ps_256:
+ case Intrinsic::x86_avx_vinsertf128_si_256:
+ case Intrinsic::x86_avx2_vinserti128: {
+ DebugLoc dl = getCurDebugLoc();
+ EVT DestVT = TLI.getValueType(I.getType());
+ EVT ElVT = TLI.getValueType(I.getArgOperand(1)->getType());
+ uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
+ ElVT.getVectorNumElements();
+ Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, DestVT,
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ DAG.getConstant(Idx, MVT::i32));
+ setValue(&I, Res);
+ return 0;
+ }
case Intrinsic::convertff:
case Intrinsic::convertfsi:
case Intrinsic::convertfui:
@@ -4852,6 +4855,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertuu: {
ISD::CvtCode Code = ISD::CVT_INVALID;
switch (Intrinsic) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::convertff: Code = ISD::CVT_FF; break;
case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
@@ -4946,14 +4950,18 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::cttz: {
SDValue Arg = getValue(I.getArgOperand(0));
+ ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
- setValue(&I, DAG.getNode(ISD::CTTZ, dl, Ty, Arg));
+ setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
+ dl, Ty, Arg));
return 0;
}
case Intrinsic::ctlz: {
SDValue Arg = getValue(I.getArgOperand(0));
+ ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
EVT Ty = Arg.getValueType();
- setValue(&I, DAG.getNode(ISD::CTLZ, dl, Ty, Arg));
+ setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
+ dl, Ty, Arg));
return 0;
}
case Intrinsic::ctpop: {
@@ -5052,7 +5060,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::gcread:
case Intrinsic::gcwrite:
llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
- return 0;
case Intrinsic::flt_rounds:
setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
return 0;
@@ -5064,7 +5071,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::trap: {
- StringRef TrapFuncName = getTrapFunctionName();
+ StringRef TrapFuncName = TM.Options.getTrapFunctionName();
if (TrapFuncName.empty()) {
DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
return 0;
@@ -5073,25 +5080,36 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
std::pair<SDValue, SDValue> Result =
TLI.LowerCallTo(getRoot(), I.getType(),
false, false, false, false, 0, CallingConv::C,
- /*isTailCall=*/false, /*isReturnValueUsed=*/true,
+ /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
Args, DAG, getCurDebugLoc());
DAG.setRoot(Result.second);
return 0;
}
case Intrinsic::uadd_with_overflow:
- return implVisitAluOverflow(I, ISD::UADDO);
case Intrinsic::sadd_with_overflow:
- return implVisitAluOverflow(I, ISD::SADDO);
case Intrinsic::usub_with_overflow:
- return implVisitAluOverflow(I, ISD::USUBO);
case Intrinsic::ssub_with_overflow:
- return implVisitAluOverflow(I, ISD::SSUBO);
case Intrinsic::umul_with_overflow:
- return implVisitAluOverflow(I, ISD::UMULO);
- case Intrinsic::smul_with_overflow:
- return implVisitAluOverflow(I, ISD::SMULO);
+ case Intrinsic::smul_with_overflow: {
+ ISD::NodeType Op;
+ switch (Intrinsic) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
+ case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
+ case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
+ case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
+ case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
+ case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
+ }
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
+ setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
+ return 0;
+ }
case Intrinsic::prefetch: {
SDValue Ops[5];
unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
@@ -5226,7 +5244,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// If there's a possibility that fast-isel has already selected some amount
// of the current basic block, don't emit a tail call.
- if (isTailCall && EnableFastISel)
+ if (isTailCall && TM.Options.EnableFastISel)
isTailCall = false;
std::pair<SDValue,SDValue> Result =
@@ -5236,6 +5254,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
CS.getCallingConv(),
isTailCall,
+ CS.doesNotReturn(),
!CS.getInstruction()->use_empty(),
Callee, Args, DAG, getCurDebugLoc());
assert((isTailCall || Result.second.getNode()) &&
@@ -5264,7 +5283,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
Add,
MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
- false, false, 1);
+ false, false, false, 1);
Values[i] = L;
Chains[i] = L.getValue(1);
}
@@ -5375,7 +5394,8 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
Ptr, MachinePointerInfo(PtrVal),
false /*volatile*/,
- false /*nontemporal*/, 1 /* align=1 */);
+ false /*nontemporal*/,
+ false /*isinvariant*/, 1 /* align=1 */);
if (!ConstantMemory)
Builder.PendingLoads.push_back(LoadVal.getValue(1));
@@ -5470,23 +5490,8 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
return;
}
- // See if any floating point values are being passed to this function. This is
- // used to emit an undefined reference to fltused on Windows.
- FunctionType *FT =
- cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- if (FT->isVarArg() &&
- !MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
- for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
- Type* T = I.getArgOperand(i)->getType();
- for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
- i != e; ++i) {
- if (!i->isFloatingPointTy()) continue;
- MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
- break;
- }
- }
- }
+ ComputeUsesVAFloatArgument(I, &MMI);
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
@@ -5509,7 +5514,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// can't be a library call.
if (!F->hasLocalLinkage() && F->hasName()) {
StringRef Name = F->getName();
- if (Name == "copysign" || Name == "copysignf" || Name == "copysignl") {
+ if ((LibInfo->has(LibFunc::copysign) && Name == "copysign") ||
+ (LibInfo->has(LibFunc::copysignf) && Name == "copysignf") ||
+ (LibInfo->has(LibFunc::copysignl) && Name == "copysignl")) {
if (I.getNumArgOperands() == 2 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
@@ -5520,7 +5527,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
LHS.getValueType(), LHS, RHS));
return;
}
- } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
+ } else if ((LibInfo->has(LibFunc::fabs) && Name == "fabs") ||
+ (LibInfo->has(LibFunc::fabsf) && Name == "fabsf") ||
+ (LibInfo->has(LibFunc::fabsl) && Name == "fabsl")) {
if (I.getNumArgOperands() == 1 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType()) {
@@ -5529,7 +5538,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
Tmp.getValueType(), Tmp));
return;
}
- } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
+ } else if ((LibInfo->has(LibFunc::sin) && Name == "sin") ||
+ (LibInfo->has(LibFunc::sinf) && Name == "sinf") ||
+ (LibInfo->has(LibFunc::sinl) && Name == "sinl")) {
if (I.getNumArgOperands() == 1 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
@@ -5539,7 +5550,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
Tmp.getValueType(), Tmp));
return;
}
- } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
+ } else if ((LibInfo->has(LibFunc::cos) && Name == "cos") ||
+ (LibInfo->has(LibFunc::cosf) && Name == "cosf") ||
+ (LibInfo->has(LibFunc::cosl) && Name == "cosl")) {
if (I.getNumArgOperands() == 1 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
@@ -5549,7 +5562,9 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
Tmp.getValueType(), Tmp));
return;
}
- } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
+ } else if ((LibInfo->has(LibFunc::sqrt) && Name == "sqrt") ||
+ (LibInfo->has(LibFunc::sqrtf) && Name == "sqrtf") ||
+ (LibInfo->has(LibFunc::sqrtl) && Name == "sqrtl")) {
if (I.getNumArgOperands() == 1 && // Basic sanity checks.
I.getArgOperand(0)->getType()->isFloatingPointTy() &&
I.getType() == I.getArgOperand(0)->getType() &&
@@ -5559,6 +5574,85 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
Tmp.getValueType(), Tmp));
return;
}
+ } else if ((LibInfo->has(LibFunc::floor) && Name == "floor") ||
+ (LibInfo->has(LibFunc::floorf) && Name == "floorf") ||
+ (LibInfo->has(LibFunc::floorl) && Name == "floorl")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FFLOOR, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::nearbyint) && Name == "nearbyint") ||
+ (LibInfo->has(LibFunc::nearbyintf) && Name == "nearbyintf") ||
+ (LibInfo->has(LibFunc::nearbyintl) && Name == "nearbyintl")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FNEARBYINT, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::ceil) && Name == "ceil") ||
+ (LibInfo->has(LibFunc::ceilf) && Name == "ceilf") ||
+ (LibInfo->has(LibFunc::ceill) && Name == "ceill")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FCEIL, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::rint) && Name == "rint") ||
+ (LibInfo->has(LibFunc::rintf) && Name == "rintf") ||
+ (LibInfo->has(LibFunc::rintl) && Name == "rintl")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FRINT, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::trunc) && Name == "trunc") ||
+ (LibInfo->has(LibFunc::truncf) && Name == "truncf") ||
+ (LibInfo->has(LibFunc::truncl) && Name == "truncl")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FTRUNC, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::log2) && Name == "log2") ||
+ (LibInfo->has(LibFunc::log2f) && Name == "log2f") ||
+ (LibInfo->has(LibFunc::log2l) && Name == "log2l")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
+ I.onlyReadsMemory()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FLOG2, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
+ } else if ((LibInfo->has(LibFunc::exp2) && Name == "exp2") ||
+ (LibInfo->has(LibFunc::exp2f) && Name == "exp2f") ||
+ (LibInfo->has(LibFunc::exp2l) && Name == "exp2l")) {
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
+ I.onlyReadsMemory()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
+ setValue(&I, DAG.getNode(ISD::FEXP2, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
} else if (Name == "memcmp") {
if (visitMemCmpCall(I))
return;
@@ -5596,22 +5690,6 @@ public:
: TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
}
- /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
- /// busy in OutputRegs/InputRegs.
- void MarkAllocatedRegs(bool isOutReg, bool isInReg,
- std::set<unsigned> &OutputRegs,
- std::set<unsigned> &InputRegs,
- const TargetRegisterInfo &TRI) const {
- if (isOutReg) {
- for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
- MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
- }
- if (isInReg) {
- for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
- MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
- }
- }
-
/// getCallOperandValEVT - Return the EVT of the Value* that this operand
/// corresponds to. If there is no Value* for this operand, it returns
/// MVT::Other.
@@ -5659,18 +5737,6 @@ public:
return TLI.getValueType(OpTy, true);
}
-
-private:
- /// MarkRegAndAliases - Mark the specified register and all aliases in the
- /// specified set.
- static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
- const TargetRegisterInfo &TRI) {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
- Regs.insert(Reg);
- if (const unsigned *Aliases = TRI.getAliasSet(Reg))
- for (; *Aliases; ++Aliases)
- Regs.insert(*Aliases);
- }
};
typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
@@ -5684,39 +5750,13 @@ typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
/// allocation. This produces generally horrible, but correct, code.
///
/// OpInfo describes the operand.
-/// Input and OutputRegs are the set of already allocated physical registers.
///
static void GetRegistersForValue(SelectionDAG &DAG,
const TargetLowering &TLI,
DebugLoc DL,
- SDISelAsmOperandInfo &OpInfo,
- std::set<unsigned> &OutputRegs,
- std::set<unsigned> &InputRegs) {
+ SDISelAsmOperandInfo &OpInfo) {
LLVMContext &Context = *DAG.getContext();
- // Compute whether this value requires an input register, an output register,
- // or both.
- bool isOutReg = false;
- bool isInReg = false;
- switch (OpInfo.Type) {
- case InlineAsm::isOutput:
- isOutReg = true;
-
- // If there is an input constraint that matches this, we need to reserve
- // the input register so no other inputs allocate to it.
- isInReg = OpInfo.hasMatchingInput();
- break;
- case InlineAsm::isInput:
- isInReg = true;
- isOutReg = false;
- break;
- case InlineAsm::isClobber:
- isOutReg = true;
- isInReg = true;
- break;
- }
-
-
MachineFunction &MF = DAG.getMachineFunction();
SmallVector<unsigned, 4> Regs;
@@ -5790,8 +5830,6 @@ static void GetRegistersForValue(SelectionDAG &DAG,
}
OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
- const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
- OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
return;
}
@@ -5822,8 +5860,6 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
/// ConstraintOperands - Information about all of the constraints.
SDISelAsmOperandInfoVector ConstraintOperands;
- std::set<unsigned> OutputRegs, InputRegs;
-
TargetLowering::AsmOperandInfoVector
TargetConstraints = TLI.ParseConstraints(CS);
@@ -5956,7 +5992,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// constant pool entry to get its address.
const Value *OpVal = OpInfo.CallOperandVal;
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
- isa<ConstantVector>(OpVal)) {
+ isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
TLI.getPointerTy());
} else {
@@ -5985,8 +6021,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// If this constraint is for a specific register, allocate it before
// anything else.
if (OpInfo.ConstraintType == TargetLowering::C_Register)
- GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo, OutputRegs,
- InputRegs);
+ GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
}
// Second pass - Loop over all of the operands, assigning virtual or physregs
@@ -5997,8 +6032,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// C_Register operands have already been allocated, Other/Memory don't need
// to be.
if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
- GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo, OutputRegs,
- InputRegs);
+ GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
}
// AsmNodeOperands - The operands for the ISD::INLINEASM node.
@@ -6052,9 +6086,13 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Copy the output from the appropriate register. Find a register that
// we can use.
- if (OpInfo.AssignedRegs.Regs.empty())
- report_fatal_error("Couldn't allocate output reg for constraint '" +
- Twine(OpInfo.ConstraintCode) + "'!");
+ if (OpInfo.AssignedRegs.Regs.empty()) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.emitError(CS.getInstruction(),
+ "couldn't allocate output register for constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'");
+ break;
+ }
// If this is an indirect operand, store through the pointer after the
// asm.
@@ -6154,9 +6192,13 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
std::vector<SDValue> Ops;
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
Ops, DAG);
- if (Ops.empty())
- report_fatal_error("Invalid operand for inline asm constraint '" +
- Twine(OpInfo.ConstraintCode) + "'!");
+ if (Ops.empty()) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.emitError(CS.getInstruction(),
+ "invalid operand for inline asm constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'");
+ break;
+ }
// Add information to the INLINEASM node to know about this input.
unsigned ResOpType =
@@ -6187,9 +6229,13 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
"Don't know how to handle indirect register inputs yet!");
// Copy the input into the appropriate registers.
- if (OpInfo.AssignedRegs.Regs.empty())
- report_fatal_error("Couldn't allocate input reg for constraint '" +
- Twine(OpInfo.ConstraintCode) + "'!");
+ if (OpInfo.AssignedRegs.Regs.empty()) {
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.emitError(CS.getInstruction(),
+ "couldn't allocate input reg for constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'");
+ break;
+ }
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
Chain, &Flag);
@@ -6327,7 +6373,7 @@ TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
- bool isReturnValueUsed,
+ bool doesNotRet, bool isReturnValueUsed,
SDValue Callee,
ArgListTy &Args, SelectionDAG &DAG,
DebugLoc dl) const {
@@ -6424,7 +6470,7 @@ TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
}
SmallVector<SDValue, 4> InVals;
- Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
+ Chain = LowerCall(Chain, Callee, CallConv, isVarArg, doesNotRet, isTailCall,
Outs, OutVals, Ins, dl, DAG, InVals);
// Verify that the target's LowerCall behaved as expected.
@@ -6493,7 +6539,6 @@ void TargetLowering::LowerOperationWrapper(SDNode *N,
SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("LowerOperation not implemented for this target!");
- return SDValue();
}
void
@@ -6515,10 +6560,10 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
/// entry block, return true. This includes arguments used by switches, since
/// the switch may expand into multiple basic blocks.
-static bool isOnlyUsedInEntryBlock(const Argument *A) {
+static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
// With FastISel active, we may be splitting blocks, so force creation
// of virtual registers for all non-dead arguments.
- if (EnableFastISel)
+ if (FastISel)
return A->use_empty();
const BasicBlock *Entry = A->getParent()->begin();
@@ -6708,7 +6753,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
SDB->getCurDebugLoc());
SDB->setValue(I, Res);
- if (!EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
+ if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
if (LoadSDNode *LNode =
dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
if (FrameIndexSDNode *FI =
@@ -6718,7 +6763,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// If this argument is live outside of the entry block, insert a copy from
// wherever we got it to the vreg that other BB's will reference it as.
- if (!EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
+ if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
// If we can, though, try to skip creating an unnecessary vreg.
// FIXME: This isn't very clean... it would be nice to make this more
// general. It's also subtly incompatible with the hacks FastISel
@@ -6729,7 +6774,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
continue;
}
}
- if (!isOnlyUsedInEntryBlock(I)) {
+ if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
FuncInfo->InitializeRegForValue(I);
SDB->CopyToExportRegsIfNeeded(I);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 0a21ca3..8393b41 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -67,11 +67,11 @@ class SIToFPInst;
class StoreInst;
class SwitchInst;
class TargetData;
+class TargetLibraryInfo;
class TargetLowering;
class TruncInst;
class UIToFPInst;
class UnreachableInst;
-class UnwindInst;
class VAArgInst;
class ZExtInst;
@@ -129,13 +129,13 @@ private:
/// Case - A struct to record the Value for a switch case, and the
/// case's target basic block.
struct Case {
- Constant* Low;
- Constant* High;
+ const Constant *Low;
+ const Constant *High;
MachineBasicBlock* BB;
uint32_t ExtraWeight;
Case() : Low(0), High(0), BB(0), ExtraWeight(0) { }
- Case(Constant* low, Constant* high, MachineBasicBlock* bb,
+ Case(const Constant *low, const Constant *high, MachineBasicBlock *bb,
uint32_t extraweight) : Low(low), High(high), BB(bb),
ExtraWeight(extraweight) { }
@@ -294,6 +294,7 @@ public:
SelectionDAG &DAG;
const TargetData *TD;
AliasAnalysis *AA;
+ const TargetLibraryInfo *LibInfo;
/// SwitchCases - Vector of CaseBlock structures used to communicate
/// SwitchInst code generation information.
@@ -338,7 +339,8 @@ public:
HasTailCall(false), Context(dag.getContext()) {
}
- void init(GCFunctionInfo *gfi, AliasAnalysis &aa);
+ void init(GCFunctionInfo *gfi, AliasAnalysis &aa,
+ const TargetLibraryInfo *li);
/// clear - Clear out the current SelectionDAG and the associated
/// state and prepare this SelectionDAGBuilder object to be used
@@ -451,7 +453,8 @@ private:
MachineBasicBlock* Default,
MachineBasicBlock *SwitchBB);
- uint32_t getEdgeWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst);
+ uint32_t getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
void addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
uint32_t Weight = 0);
public:
@@ -471,7 +474,6 @@ private:
// These all get lowered before this pass.
void visitInvoke(const InvokeInst &I);
void visitResume(const ResumeInst &I);
- void visitUnwind(const UnwindInst &I);
void visitBinary(const User &I, unsigned OpCode);
void visitShift(const User &I, unsigned Opcode);
@@ -554,8 +556,6 @@ private:
void visitUserOp2(const Instruction &I) {
llvm_unreachable("UserOp2 should not exist at instruction selection time!");
}
-
- const char *implVisitAluOverflow(const CallInst &I, ISD::NodeType Op);
void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
new file mode 100644
index 0000000..f981afb
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -0,0 +1,631 @@
+//===-- SelectionDAGDumper.cpp - Implement SelectionDAG::dump() -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements the SelectionDAG::dump method and friends.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ScheduleDAGSDNodes.h"
+#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace llvm;
+
+std::string SDNode::getOperationName(const SelectionDAG *G) const {
+ switch (getOpcode()) {
+ default:
+ if (getOpcode() < ISD::BUILTIN_OP_END)
+ return "<<Unknown DAG Node>>";
+ if (isMachineOpcode()) {
+ if (G)
+ if (const TargetInstrInfo *TII = G->getTarget().getInstrInfo())
+ if (getMachineOpcode() < TII->getNumOpcodes())
+ return TII->getName(getMachineOpcode());
+ return "<<Unknown Machine Node #" + utostr(getOpcode()) + ">>";
+ }
+ if (G) {
+ const TargetLowering &TLI = G->getTargetLoweringInfo();
+ const char *Name = TLI.getTargetNodeName(getOpcode());
+ if (Name) return Name;
+ return "<<Unknown Target Node #" + utostr(getOpcode()) + ">>";
+ }
+ return "<<Unknown Node #" + utostr(getOpcode()) + ">>";
+
+#ifndef NDEBUG
+ case ISD::DELETED_NODE: return "<<Deleted Node!>>";
+#endif
+ case ISD::PREFETCH: return "Prefetch";
+ case ISD::MEMBARRIER: return "MemBarrier";
+ case ISD::ATOMIC_FENCE: return "AtomicFence";
+ case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
+ case ISD::ATOMIC_SWAP: return "AtomicSwap";
+ case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
+ case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
+ case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
+ case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
+ case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
+ case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
+ case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
+ case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
+ case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
+ case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
+ case ISD::ATOMIC_LOAD: return "AtomicLoad";
+ case ISD::ATOMIC_STORE: return "AtomicStore";
+ case ISD::PCMARKER: return "PCMarker";
+ case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
+ case ISD::SRCVALUE: return "SrcValue";
+ case ISD::MDNODE_SDNODE: return "MDNode";
+ case ISD::EntryToken: return "EntryToken";
+ case ISD::TokenFactor: return "TokenFactor";
+ case ISD::AssertSext: return "AssertSext";
+ case ISD::AssertZext: return "AssertZext";
+
+ case ISD::BasicBlock: return "BasicBlock";
+ case ISD::VALUETYPE: return "ValueType";
+ case ISD::Register: return "Register";
+ case ISD::RegisterMask: return "RegisterMask";
+ case ISD::Constant: return "Constant";
+ case ISD::ConstantFP: return "ConstantFP";
+ case ISD::GlobalAddress: return "GlobalAddress";
+ case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
+ case ISD::FrameIndex: return "FrameIndex";
+ case ISD::JumpTable: return "JumpTable";
+ case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
+ case ISD::RETURNADDR: return "RETURNADDR";
+ case ISD::FRAMEADDR: return "FRAMEADDR";
+ case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
+ case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
+ case ISD::LSDAADDR: return "LSDAADDR";
+ case ISD::EHSELECTION: return "EHSELECTION";
+ case ISD::EH_RETURN: return "EH_RETURN";
+ case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
+ case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
+ case ISD::ConstantPool: return "ConstantPool";
+ case ISD::ExternalSymbol: return "ExternalSymbol";
+ case ISD::BlockAddress: return "BlockAddress";
+ case ISD::INTRINSIC_WO_CHAIN:
+ case ISD::INTRINSIC_VOID:
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
+ unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
+ if (IID < Intrinsic::num_intrinsics)
+ return Intrinsic::getName((Intrinsic::ID)IID);
+ else if (const TargetIntrinsicInfo *TII = G->getTarget().getIntrinsicInfo())
+ return TII->getName(IID);
+ llvm_unreachable("Invalid intrinsic ID");
+ }
+
+ case ISD::BUILD_VECTOR: return "BUILD_VECTOR";
+ case ISD::TargetConstant: return "TargetConstant";
+ case ISD::TargetConstantFP: return "TargetConstantFP";
+ case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
+ case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
+ case ISD::TargetFrameIndex: return "TargetFrameIndex";
+ case ISD::TargetJumpTable: return "TargetJumpTable";
+ case ISD::TargetConstantPool: return "TargetConstantPool";
+ case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
+ case ISD::TargetBlockAddress: return "TargetBlockAddress";
+
+ case ISD::CopyToReg: return "CopyToReg";
+ case ISD::CopyFromReg: return "CopyFromReg";
+ case ISD::UNDEF: return "undef";
+ case ISD::MERGE_VALUES: return "merge_values";
+ case ISD::INLINEASM: return "inlineasm";
+ case ISD::EH_LABEL: return "eh_label";
+ case ISD::HANDLENODE: return "handlenode";
+
+ // Unary operators
+ case ISD::FABS: return "fabs";
+ case ISD::FNEG: return "fneg";
+ case ISD::FSQRT: return "fsqrt";
+ case ISD::FSIN: return "fsin";
+ case ISD::FCOS: return "fcos";
+ case ISD::FTRUNC: return "ftrunc";
+ case ISD::FFLOOR: return "ffloor";
+ case ISD::FCEIL: return "fceil";
+ case ISD::FRINT: return "frint";
+ case ISD::FNEARBYINT: return "fnearbyint";
+ case ISD::FEXP: return "fexp";
+ case ISD::FEXP2: return "fexp2";
+ case ISD::FLOG: return "flog";
+ case ISD::FLOG2: return "flog2";
+ case ISD::FLOG10: return "flog10";
+
+ // Binary operators
+ case ISD::ADD: return "add";
+ case ISD::SUB: return "sub";
+ case ISD::MUL: return "mul";
+ case ISD::MULHU: return "mulhu";
+ case ISD::MULHS: return "mulhs";
+ case ISD::SDIV: return "sdiv";
+ case ISD::UDIV: return "udiv";
+ case ISD::SREM: return "srem";
+ case ISD::UREM: return "urem";
+ case ISD::SMUL_LOHI: return "smul_lohi";
+ case ISD::UMUL_LOHI: return "umul_lohi";
+ case ISD::SDIVREM: return "sdivrem";
+ case ISD::UDIVREM: return "udivrem";
+ case ISD::AND: return "and";
+ case ISD::OR: return "or";
+ case ISD::XOR: return "xor";
+ case ISD::SHL: return "shl";
+ case ISD::SRA: return "sra";
+ case ISD::SRL: return "srl";
+ case ISD::ROTL: return "rotl";
+ case ISD::ROTR: return "rotr";
+ case ISD::FADD: return "fadd";
+ case ISD::FSUB: return "fsub";
+ case ISD::FMUL: return "fmul";
+ case ISD::FDIV: return "fdiv";
+ case ISD::FMA: return "fma";
+ case ISD::FREM: return "frem";
+ case ISD::FCOPYSIGN: return "fcopysign";
+ case ISD::FGETSIGN: return "fgetsign";
+ case ISD::FPOW: return "fpow";
+
+ case ISD::FPOWI: return "fpowi";
+ case ISD::SETCC: return "setcc";
+ case ISD::SELECT: return "select";
+ case ISD::VSELECT: return "vselect";
+ case ISD::SELECT_CC: return "select_cc";
+ case ISD::INSERT_VECTOR_ELT: return "insert_vector_elt";
+ case ISD::EXTRACT_VECTOR_ELT: return "extract_vector_elt";
+ case ISD::CONCAT_VECTORS: return "concat_vectors";
+ case ISD::INSERT_SUBVECTOR: return "insert_subvector";
+ case ISD::EXTRACT_SUBVECTOR: return "extract_subvector";
+ case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector";
+ case ISD::VECTOR_SHUFFLE: return "vector_shuffle";
+ case ISD::CARRY_FALSE: return "carry_false";
+ case ISD::ADDC: return "addc";
+ case ISD::ADDE: return "adde";
+ case ISD::SADDO: return "saddo";
+ case ISD::UADDO: return "uaddo";
+ case ISD::SSUBO: return "ssubo";
+ case ISD::USUBO: return "usubo";
+ case ISD::SMULO: return "smulo";
+ case ISD::UMULO: return "umulo";
+ case ISD::SUBC: return "subc";
+ case ISD::SUBE: return "sube";
+ case ISD::SHL_PARTS: return "shl_parts";
+ case ISD::SRA_PARTS: return "sra_parts";
+ case ISD::SRL_PARTS: return "srl_parts";
+
+ // Conversion operators.
+ case ISD::SIGN_EXTEND: return "sign_extend";
+ case ISD::ZERO_EXTEND: return "zero_extend";
+ case ISD::ANY_EXTEND: return "any_extend";
+ case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
+ case ISD::TRUNCATE: return "truncate";
+ case ISD::FP_ROUND: return "fp_round";
+ case ISD::FLT_ROUNDS_: return "flt_rounds";
+ case ISD::FP_ROUND_INREG: return "fp_round_inreg";
+ case ISD::FP_EXTEND: return "fp_extend";
+
+ case ISD::SINT_TO_FP: return "sint_to_fp";
+ case ISD::UINT_TO_FP: return "uint_to_fp";
+ case ISD::FP_TO_SINT: return "fp_to_sint";
+ case ISD::FP_TO_UINT: return "fp_to_uint";
+ case ISD::BITCAST: return "bitcast";
+ case ISD::FP16_TO_FP32: return "fp16_to_fp32";
+ case ISD::FP32_TO_FP16: return "fp32_to_fp16";
+
+ case ISD::CONVERT_RNDSAT: {
+ switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
+ default: llvm_unreachable("Unknown cvt code!");
+ case ISD::CVT_FF: return "cvt_ff";
+ case ISD::CVT_FS: return "cvt_fs";
+ case ISD::CVT_FU: return "cvt_fu";
+ case ISD::CVT_SF: return "cvt_sf";
+ case ISD::CVT_UF: return "cvt_uf";
+ case ISD::CVT_SS: return "cvt_ss";
+ case ISD::CVT_SU: return "cvt_su";
+ case ISD::CVT_US: return "cvt_us";
+ case ISD::CVT_UU: return "cvt_uu";
+ }
+ }
+
+ // Control flow instructions
+ case ISD::BR: return "br";
+ case ISD::BRIND: return "brind";
+ case ISD::BR_JT: return "br_jt";
+ case ISD::BRCOND: return "brcond";
+ case ISD::BR_CC: return "br_cc";
+ case ISD::CALLSEQ_START: return "callseq_start";
+ case ISD::CALLSEQ_END: return "callseq_end";
+
+ // Other operators
+ case ISD::LOAD: return "load";
+ case ISD::STORE: return "store";
+ case ISD::VAARG: return "vaarg";
+ case ISD::VACOPY: return "vacopy";
+ case ISD::VAEND: return "vaend";
+ case ISD::VASTART: return "vastart";
+ case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
+ case ISD::EXTRACT_ELEMENT: return "extract_element";
+ case ISD::BUILD_PAIR: return "build_pair";
+ case ISD::STACKSAVE: return "stacksave";
+ case ISD::STACKRESTORE: return "stackrestore";
+ case ISD::TRAP: return "trap";
+
+ // Bit manipulation
+ case ISD::BSWAP: return "bswap";
+ case ISD::CTPOP: return "ctpop";
+ case ISD::CTTZ: return "cttz";
+ case ISD::CTTZ_ZERO_UNDEF: return "cttz_zero_undef";
+ case ISD::CTLZ: return "ctlz";
+ case ISD::CTLZ_ZERO_UNDEF: return "ctlz_zero_undef";
+
+ // Trampolines
+ case ISD::INIT_TRAMPOLINE: return "init_trampoline";
+ case ISD::ADJUST_TRAMPOLINE: return "adjust_trampoline";
+
+ case ISD::CONDCODE:
+ switch (cast<CondCodeSDNode>(this)->get()) {
+ default: llvm_unreachable("Unknown setcc condition!");
+ case ISD::SETOEQ: return "setoeq";
+ case ISD::SETOGT: return "setogt";
+ case ISD::SETOGE: return "setoge";
+ case ISD::SETOLT: return "setolt";
+ case ISD::SETOLE: return "setole";
+ case ISD::SETONE: return "setone";
+
+ case ISD::SETO: return "seto";
+ case ISD::SETUO: return "setuo";
+ case ISD::SETUEQ: return "setue";
+ case ISD::SETUGT: return "setugt";
+ case ISD::SETUGE: return "setuge";
+ case ISD::SETULT: return "setult";
+ case ISD::SETULE: return "setule";
+ case ISD::SETUNE: return "setune";
+
+ case ISD::SETEQ: return "seteq";
+ case ISD::SETGT: return "setgt";
+ case ISD::SETGE: return "setge";
+ case ISD::SETLT: return "setlt";
+ case ISD::SETLE: return "setle";
+ case ISD::SETNE: return "setne";
+
+ case ISD::SETTRUE: return "settrue";
+ case ISD::SETTRUE2: return "settrue2";
+ case ISD::SETFALSE: return "setfalse";
+ case ISD::SETFALSE2: return "setfalse2";
+ }
+ }
+}
+
+const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
+ switch (AM) {
+ default: return "";
+ case ISD::PRE_INC: return "<pre-inc>";
+ case ISD::PRE_DEC: return "<pre-dec>";
+ case ISD::POST_INC: return "<post-inc>";
+ case ISD::POST_DEC: return "<post-dec>";
+ }
+}
+
+void SDNode::dump() const { dump(0); }
+void SDNode::dump(const SelectionDAG *G) const {
+ print(dbgs(), G);
+ dbgs() << '\n';
+}
+
+void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
+ OS << (void*)this << ": ";
+
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
+ if (i) OS << ",";
+ if (getValueType(i) == MVT::Other)
+ OS << "ch";
+ else
+ OS << getValueType(i).getEVTString();
+ }
+ OS << " = " << getOperationName(G);
+}
+
+void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
+ if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
+ if (!MN->memoperands_empty()) {
+ OS << "<";
+ OS << "Mem:";
+ for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
+ e = MN->memoperands_end(); i != e; ++i) {
+ OS << **i;
+ if (llvm::next(i) != e)
+ OS << " ";
+ }
+ OS << ">";
+ }
+ } else if (const ShuffleVectorSDNode *SVN =
+ dyn_cast<ShuffleVectorSDNode>(this)) {
+ OS << "<";
+ for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
+ int Idx = SVN->getMaskElt(i);
+ if (i) OS << ",";
+ if (Idx < 0)
+ OS << "u";
+ else
+ OS << Idx;
+ }
+ OS << ">";
+ } else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
+ OS << '<' << CSDN->getAPIntValue() << '>';
+ } else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
+ if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
+ OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
+ else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble)
+ OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
+ else {
+ OS << "<APFloat(";
+ CSDN->getValueAPF().bitcastToAPInt().dump();
+ OS << ")>";
+ }
+ } else if (const GlobalAddressSDNode *GADN =
+ dyn_cast<GlobalAddressSDNode>(this)) {
+ int64_t offset = GADN->getOffset();
+ OS << '<';
+ WriteAsOperand(OS, GADN->getGlobal());
+ OS << '>';
+ if (offset > 0)
+ OS << " + " << offset;
+ else
+ OS << " " << offset;
+ if (unsigned int TF = GADN->getTargetFlags())
+ OS << " [TF=" << TF << ']';
+ } else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
+ OS << "<" << FIDN->getIndex() << ">";
+ } else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
+ OS << "<" << JTDN->getIndex() << ">";
+ if (unsigned int TF = JTDN->getTargetFlags())
+ OS << " [TF=" << TF << ']';
+ } else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
+ int offset = CP->getOffset();
+ if (CP->isMachineConstantPoolEntry())
+ OS << "<" << *CP->getMachineCPVal() << ">";
+ else
+ OS << "<" << *CP->getConstVal() << ">";
+ if (offset > 0)
+ OS << " + " << offset;
+ else
+ OS << " " << offset;
+ if (unsigned int TF = CP->getTargetFlags())
+ OS << " [TF=" << TF << ']';
+ } else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
+ OS << "<";
+ const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
+ if (LBB)
+ OS << LBB->getName() << " ";
+ OS << (const void*)BBDN->getBasicBlock() << ">";
+ } else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
+ OS << ' ' << PrintReg(R->getReg(), G ? G->getTarget().getRegisterInfo() :0);
+ } else if (const ExternalSymbolSDNode *ES =
+ dyn_cast<ExternalSymbolSDNode>(this)) {
+ OS << "'" << ES->getSymbol() << "'";
+ if (unsigned int TF = ES->getTargetFlags())
+ OS << " [TF=" << TF << ']';
+ } else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
+ if (M->getValue())
+ OS << "<" << M->getValue() << ">";
+ else
+ OS << "<null>";
+ } else if (const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(this)) {
+ if (MD->getMD())
+ OS << "<" << MD->getMD() << ">";
+ else
+ OS << "<null>";
+ } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
+ OS << ":" << N->getVT().getEVTString();
+ }
+ else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
+ OS << "<" << *LD->getMemOperand();
+
+ bool doExt = true;
+ switch (LD->getExtensionType()) {
+ default: doExt = false; break;
+ case ISD::EXTLOAD: OS << ", anyext"; break;
+ case ISD::SEXTLOAD: OS << ", sext"; break;
+ case ISD::ZEXTLOAD: OS << ", zext"; break;
+ }
+ if (doExt)
+ OS << " from " << LD->getMemoryVT().getEVTString();
+
+ const char *AM = getIndexedModeName(LD->getAddressingMode());
+ if (*AM)
+ OS << ", " << AM;
+
+ OS << ">";
+ } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
+ OS << "<" << *ST->getMemOperand();
+
+ if (ST->isTruncatingStore())
+ OS << ", trunc to " << ST->getMemoryVT().getEVTString();
+
+ const char *AM = getIndexedModeName(ST->getAddressingMode());
+ if (*AM)
+ OS << ", " << AM;
+
+ OS << ">";
+ } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
+ OS << "<" << *M->getMemOperand() << ">";
+ } else if (const BlockAddressSDNode *BA =
+ dyn_cast<BlockAddressSDNode>(this)) {
+ OS << "<";
+ WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false);
+ OS << ", ";
+ WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false);
+ OS << ">";
+ if (unsigned int TF = BA->getTargetFlags())
+ OS << " [TF=" << TF << ']';
+ }
+
+ if (G)
+ if (unsigned Order = G->GetOrdering(this))
+ OS << " [ORD=" << Order << ']';
+
+ if (getNodeId() != -1)
+ OS << " [ID=" << getNodeId() << ']';
+
+ DebugLoc dl = getDebugLoc();
+ if (G && !dl.isUnknown()) {
+ DIScope
+ Scope(dl.getScope(G->getMachineFunction().getFunction()->getContext()));
+ OS << " dbg:";
+ // Omit the directory, since it's usually long and uninteresting.
+ if (Scope.Verify())
+ OS << Scope.getFilename();
+ else
+ OS << "<unknown>";
+ OS << ':' << dl.getLine();
+ if (dl.getCol() != 0)
+ OS << ':' << dl.getCol();
+ }
+}
+
+static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (N->getOperand(i).getNode()->hasOneUse())
+ DumpNodes(N->getOperand(i).getNode(), indent+2, G);
+ else
+ dbgs() << "\n" << std::string(indent+2, ' ')
+ << (void*)N->getOperand(i).getNode() << ": <multiple use>";
+
+ dbgs() << '\n';
+ dbgs().indent(indent);
+ N->dump(G);
+}
+
+void SelectionDAG::dump() const {
+ dbgs() << "SelectionDAG has " << AllNodes.size() << " nodes:";
+
+ for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
+ I != E; ++I) {
+ const SDNode *N = I;
+ if (!N->hasOneUse() && N != getRoot().getNode())
+ DumpNodes(N, 2, this);
+ }
+
+ if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
+ dbgs() << "\n\n";
+}
+
+void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
+ print_types(OS, G);
+ print_details(OS, G);
+}
+
+typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
+static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
+ const SelectionDAG *G, VisitedSDNodeSet &once) {
+ if (!once.insert(N)) // If we've been here before, return now.
+ return;
+
+ // Dump the current SDNode, but don't end the line yet.
+ OS.indent(indent);
+ N->printr(OS, G);
+
+ // Having printed this SDNode, walk the children:
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ const SDNode *child = N->getOperand(i).getNode();
+
+ if (i) OS << ",";
+ OS << " ";
+
+ if (child->getNumOperands() == 0) {
+ // This child has no grandchildren; print it inline right here.
+ child->printr(OS, G);
+ once.insert(child);
+ } else { // Just the address. FIXME: also print the child's opcode.
+ OS << (void*)child;
+ if (unsigned RN = N->getOperand(i).getResNo())
+ OS << ":" << RN;
+ }
+ }
+
+ OS << "\n";
+
+ // Dump children that have grandchildren on their own line(s).
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ const SDNode *child = N->getOperand(i).getNode();
+ DumpNodesr(OS, child, indent+2, G, once);
+ }
+}
+
+void SDNode::dumpr() const {
+ VisitedSDNodeSet once;
+ DumpNodesr(dbgs(), this, 0, 0, once);
+}
+
+void SDNode::dumpr(const SelectionDAG *G) const {
+ VisitedSDNodeSet once;
+ DumpNodesr(dbgs(), this, 0, G, once);
+}
+
+static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
+ const SelectionDAG *G, unsigned depth,
+ unsigned indent) {
+ if (depth == 0)
+ return;
+
+ OS.indent(indent);
+
+ N->print(OS, G);
+
+ if (depth < 1)
+ return;
+
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ // Don't follow chain operands.
+ if (N->getOperand(i).getValueType() == MVT::Other)
+ continue;
+ OS << '\n';
+ printrWithDepthHelper(OS, N->getOperand(i).getNode(), G, depth-1, indent+2);
+ }
+}
+
+void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
+ unsigned depth) const {
+ printrWithDepthHelper(OS, this, G, depth, 0);
+}
+
+void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
+ // Don't print impossibly deep things.
+ printrWithDepth(OS, G, 10);
+}
+
+void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
+ printrWithDepth(dbgs(), G, depth);
+}
+
+void SDNode::dumprFull(const SelectionDAG *G) const {
+ // Don't print impossibly deep things.
+ dumprWithDepth(G, 10);
+}
+
+void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
+ print_types(OS, G);
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ if (i) OS << ", "; else OS << " ";
+ OS << (void*)getOperand(i).getNode();
+ if (unsigned RN = getOperand(i).getResNo())
+ OS << ":" << RN;
+ }
+ print_details(OS, G);
+}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 68b9146..605509b 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -41,6 +41,7 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -61,6 +62,80 @@ STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
+#ifndef NDEBUG
+static cl::opt<bool>
+EnableFastISelVerbose2("fast-isel-verbose2", cl::Hidden,
+ cl::desc("Enable extra verbose messages in the \"fast\" "
+ "instruction selector"));
+ // Terminators
+STATISTIC(NumFastIselFailRet,"Fast isel fails on Ret");
+STATISTIC(NumFastIselFailBr,"Fast isel fails on Br");
+STATISTIC(NumFastIselFailSwitch,"Fast isel fails on Switch");
+STATISTIC(NumFastIselFailIndirectBr,"Fast isel fails on IndirectBr");
+STATISTIC(NumFastIselFailInvoke,"Fast isel fails on Invoke");
+STATISTIC(NumFastIselFailResume,"Fast isel fails on Resume");
+STATISTIC(NumFastIselFailUnreachable,"Fast isel fails on Unreachable");
+
+ // Standard binary operators...
+STATISTIC(NumFastIselFailAdd,"Fast isel fails on Add");
+STATISTIC(NumFastIselFailFAdd,"Fast isel fails on FAdd");
+STATISTIC(NumFastIselFailSub,"Fast isel fails on Sub");
+STATISTIC(NumFastIselFailFSub,"Fast isel fails on FSub");
+STATISTIC(NumFastIselFailMul,"Fast isel fails on Mul");
+STATISTIC(NumFastIselFailFMul,"Fast isel fails on FMul");
+STATISTIC(NumFastIselFailUDiv,"Fast isel fails on UDiv");
+STATISTIC(NumFastIselFailSDiv,"Fast isel fails on SDiv");
+STATISTIC(NumFastIselFailFDiv,"Fast isel fails on FDiv");
+STATISTIC(NumFastIselFailURem,"Fast isel fails on URem");
+STATISTIC(NumFastIselFailSRem,"Fast isel fails on SRem");
+STATISTIC(NumFastIselFailFRem,"Fast isel fails on FRem");
+
+ // Logical operators...
+STATISTIC(NumFastIselFailAnd,"Fast isel fails on And");
+STATISTIC(NumFastIselFailOr,"Fast isel fails on Or");
+STATISTIC(NumFastIselFailXor,"Fast isel fails on Xor");
+
+ // Memory instructions...
+STATISTIC(NumFastIselFailAlloca,"Fast isel fails on Alloca");
+STATISTIC(NumFastIselFailLoad,"Fast isel fails on Load");
+STATISTIC(NumFastIselFailStore,"Fast isel fails on Store");
+STATISTIC(NumFastIselFailAtomicCmpXchg,"Fast isel fails on AtomicCmpXchg");
+STATISTIC(NumFastIselFailAtomicRMW,"Fast isel fails on AtomicRWM");
+STATISTIC(NumFastIselFailFence,"Fast isel fails on Frence");
+STATISTIC(NumFastIselFailGetElementPtr,"Fast isel fails on GetElementPtr");
+
+ // Convert instructions...
+STATISTIC(NumFastIselFailTrunc,"Fast isel fails on Trunc");
+STATISTIC(NumFastIselFailZExt,"Fast isel fails on ZExt");
+STATISTIC(NumFastIselFailSExt,"Fast isel fails on SExt");
+STATISTIC(NumFastIselFailFPTrunc,"Fast isel fails on FPTrunc");
+STATISTIC(NumFastIselFailFPExt,"Fast isel fails on FPExt");
+STATISTIC(NumFastIselFailFPToUI,"Fast isel fails on FPToUI");
+STATISTIC(NumFastIselFailFPToSI,"Fast isel fails on FPToSI");
+STATISTIC(NumFastIselFailUIToFP,"Fast isel fails on UIToFP");
+STATISTIC(NumFastIselFailSIToFP,"Fast isel fails on SIToFP");
+STATISTIC(NumFastIselFailIntToPtr,"Fast isel fails on IntToPtr");
+STATISTIC(NumFastIselFailPtrToInt,"Fast isel fails on PtrToInt");
+STATISTIC(NumFastIselFailBitCast,"Fast isel fails on BitCast");
+
+ // Other instructions...
+STATISTIC(NumFastIselFailICmp,"Fast isel fails on ICmp");
+STATISTIC(NumFastIselFailFCmp,"Fast isel fails on FCmp");
+STATISTIC(NumFastIselFailPHI,"Fast isel fails on PHI");
+STATISTIC(NumFastIselFailSelect,"Fast isel fails on Select");
+STATISTIC(NumFastIselFailCall,"Fast isel fails on Call");
+STATISTIC(NumFastIselFailShl,"Fast isel fails on Shl");
+STATISTIC(NumFastIselFailLShr,"Fast isel fails on LShr");
+STATISTIC(NumFastIselFailAShr,"Fast isel fails on AShr");
+STATISTIC(NumFastIselFailVAArg,"Fast isel fails on VAArg");
+STATISTIC(NumFastIselFailExtractElement,"Fast isel fails on ExtractElement");
+STATISTIC(NumFastIselFailInsertElement,"Fast isel fails on InsertElement");
+STATISTIC(NumFastIselFailShuffleVector,"Fast isel fails on ShuffleVector");
+STATISTIC(NumFastIselFailExtractValue,"Fast isel fails on ExtractValue");
+STATISTIC(NumFastIselFailInsertValue,"Fast isel fails on InsertValue");
+STATISTIC(NumFastIselFailLandingPad,"Fast isel fails on LandingPad");
+#endif
+
static cl::opt<bool>
EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
cl::desc("Enable verbose messages in the \"fast\" "
@@ -142,14 +217,15 @@ namespace llvm {
CodeGenOpt::Level OptLevel) {
const TargetLowering &TLI = IS->getTargetLowering();
- if (OptLevel == CodeGenOpt::None)
+ if (OptLevel == CodeGenOpt::None ||
+ TLI.getSchedulingPreference() == Sched::Source)
return createSourceListDAGScheduler(IS, OptLevel);
- if (TLI.getSchedulingPreference() == Sched::Latency)
- return createTDListDAGScheduler(IS, OptLevel);
if (TLI.getSchedulingPreference() == Sched::RegPressure)
return createBURRListDAGScheduler(IS, OptLevel);
if (TLI.getSchedulingPreference() == Sched::Hybrid)
return createHybridListDAGScheduler(IS, OptLevel);
+ if (TLI.getSchedulingPreference() == Sched::VLIW)
+ return createVLIWDAGScheduler(IS, OptLevel);
assert(TLI.getSchedulingPreference() == Sched::ILP &&
"Unknown sched type!");
return createILPListDAGScheduler(IS, OptLevel);
@@ -174,12 +250,11 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
"TargetLowering::EmitInstrWithCustomInserter!";
#endif
llvm_unreachable(0);
- return 0;
}
void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
- assert(!MI->getDesc().hasPostISelHook() &&
+ assert(!MI->hasPostISelHook() &&
"If a target marks an instruction with 'hasPostISelHook', "
"it must implement TargetLowering::AdjustInstrPostInstrSelection!");
}
@@ -188,11 +263,13 @@ void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
+void SelectionDAGISel::ISelUpdater::anchor() { }
+
SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
CodeGenOpt::Level OL) :
MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
FuncInfo(new FunctionLoweringInfo(TLI)),
- CurDAG(new SelectionDAG(tm)),
+ CurDAG(new SelectionDAG(tm, OL)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
GFI(),
OptLevel(OL),
@@ -200,6 +277,7 @@ SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm,
initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
initializeAliasAnalysisAnalysisGroup(*PassRegistry::getPassRegistry());
initializeBranchProbabilityInfoPass(*PassRegistry::getPassRegistry());
+ initializeTargetLibraryInfoPass(*PassRegistry::getPassRegistry());
}
SelectionDAGISel::~SelectionDAGISel() {
@@ -213,6 +291,7 @@ void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<AliasAnalysis>();
AU.addRequired<GCModuleInfo>();
AU.addPreserved<GCModuleInfo>();
+ AU.addRequired<TargetLibraryInfo>();
if (UseMBPI && OptLevel != CodeGenOpt::None)
AU.addRequired<BranchProbabilityInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -258,9 +337,9 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Do some sanity-checking on the command-line options.
- assert((!EnableFastISelVerbose || EnableFastISel) &&
+ assert((!EnableFastISelVerbose || TM.Options.EnableFastISel) &&
"-fast-isel-verbose requires -fast-isel");
- assert((!EnableFastISelAbort || EnableFastISel) &&
+ assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
"-fast-isel-abort requires -fast-isel");
const Function &Fn = *mf.getFunction();
@@ -270,6 +349,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
RegInfo = &MF->getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
+ LibInfo = &getAnalysis<TargetLibraryInfo>();
GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : 0;
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
@@ -284,7 +364,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
else
FuncInfo->BPI = 0;
- SDB->init(GFI, *AA);
+ SDB->init(GFI, *AA, LibInfo);
SelectAllBasicBlocks(Fn);
@@ -348,7 +428,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
TII.get(TargetOpcode::DBG_VALUE))
.addReg(CopyUseMI->getOperand(0).getReg(), RegState::Debug)
.addImm(Offset).addMetadata(Variable);
- EntryMBB->insertAfter(CopyUseMI, NewMI);
+ MachineBasicBlock::iterator Pos = CopyUseMI;
+ EntryMBB->insertAfter(Pos, NewMI);
}
}
}
@@ -374,7 +455,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
}
// Determine if there is a call to setjmp in the machine function.
- MF->setCallsSetJmp(Fn.callsFunctionThatReturnsTwice());
+ MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
// Replace forward-declared registers with the registers containing
// the desired value.
@@ -427,7 +508,6 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
Worklist.push_back(CurDAG->getRoot().getNode());
- APInt Mask;
APInt KnownZero;
APInt KnownOne;
@@ -458,8 +538,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
continue;
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
- Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
- CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
+ CurDAG->ComputeMaskedBits(Src, KnownZero, KnownOne);
FuncInfo->AddLiveOutRegInfo(DestReg, NumSignBits, KnownZero, KnownOne);
} while (!Worklist.empty());
}
@@ -478,8 +557,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
#endif
{
BlockNumber = FuncInfo->MBB->getNumber();
- BlockName = MF->getFunction()->getNameStr() + ":" +
- FuncInfo->MBB->getBasicBlock()->getNameStr();
+ BlockName = MF->getFunction()->getName().str() + ":" +
+ FuncInfo->MBB->getBasicBlock()->getName().str();
}
DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
@@ -489,7 +568,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// Run the DAG combiner in pre-legalize mode.
{
NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
- CurDAG->Combine(Unrestricted, *AA, OptLevel);
+ CurDAG->Combine(BeforeLegalizeTypes, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber
@@ -517,7 +596,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{
NamedRegionTimer T("DAG Combining after legalize types", GroupName,
TimePassesIsEnabled);
- CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
+ CurDAG->Combine(AfterLegalizeTypes, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber
@@ -542,7 +621,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{
NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
TimePassesIsEnabled);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
+ CurDAG->Combine(AfterLegalizeVectorOps, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#"
@@ -564,7 +643,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
// Run the DAG combiner in post-legalize mode.
{
NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
+ CurDAG->Combine(AfterLegalizeDAG, *AA, OptLevel);
}
DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber
@@ -592,7 +671,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{
NamedRegionTimer T("Instruction Scheduling", GroupName,
TimePassesIsEnabled);
- Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
+ Scheduler->Run(CurDAG, FuncInfo->MBB);
}
if (ViewSUnitDAGs) Scheduler->viewGraph();
@@ -603,8 +682,9 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
{
NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
- LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule();
- FuncInfo->InsertPt = Scheduler->InsertPos;
+ // FuncInfo->InsertPt is passed by reference and set to the end of the
+ // scheduled instructions.
+ LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(FuncInfo->InsertPt);
}
// If the block was split, make sure we update any references that are used to
@@ -693,43 +773,18 @@ void SelectionDAGISel::PrepareEHLandingPad() {
// Assign the call site to the landing pad's begin label.
MF->getMMI().setCallSiteLandingPad(Label, SDB->LPadToCallSiteMap[MBB]);
-
+
const MCInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
// Mark exception register as live in.
- unsigned Reg = TLI.getExceptionAddressRegister();
+ unsigned Reg = TLI.getExceptionPointerRegister();
if (Reg) MBB->addLiveIn(Reg);
// Mark exception selector register as live in.
Reg = TLI.getExceptionSelectorRegister();
if (Reg) MBB->addLiveIn(Reg);
-
- // FIXME: Hack around an exception handling flaw (PR1508): the personality
- // function and list of typeids logically belong to the invoke (or, if you
- // like, the basic block containing the invoke), and need to be associated
- // with it in the dwarf exception handling tables. Currently however the
- // information is provided by an intrinsic (eh.selector) that can be moved
- // to unexpected places by the optimizers: if the unwind edge is critical,
- // then breaking it can result in the intrinsics being in the successor of
- // the landing pad, not the landing pad itself. This results
- // in exceptions not being caught because no typeids are associated with
- // the invoke. This may not be the only way things can go wrong, but it
- // is the only way we try to work around for the moment.
- const BasicBlock *LLVMBB = MBB->getBasicBlock();
- const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
-
- if (Br && Br->isUnconditional()) { // Critical edge?
- BasicBlock::const_iterator I, E;
- for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I)
- if (isa<EHSelectorInst>(I))
- break;
-
- if (I == E)
- // No catch info found - try to extract some from the successor.
- CopyCatchInfo(Br->getSuccessor(0), LLVMBB, &MF->getMMI(), *FuncInfo);
- }
}
/// TryToFoldFastISelLoad - We're checking to see if we can fold the specified
@@ -822,10 +877,90 @@ static bool isFoldedOrDeadInstruction(const Instruction *I,
!FuncInfo->isExportedInst(I); // Exported instrs must be computed.
}
+#ifndef NDEBUG
+// Collect per Instruction statistics for fast-isel misses. Only those
+// instructions that cause the bail are accounted for. It does not account for
+// instructions higher in the block. Thus, summing the per instructions stats
+// will not add up to what is reported by NumFastIselFailures.
+static void collectFailStats(const Instruction *I) {
+ switch (I->getOpcode()) {
+ default: assert (0 && "<Invalid operator> ");
+
+ // Terminators
+ case Instruction::Ret: NumFastIselFailRet++; return;
+ case Instruction::Br: NumFastIselFailBr++; return;
+ case Instruction::Switch: NumFastIselFailSwitch++; return;
+ case Instruction::IndirectBr: NumFastIselFailIndirectBr++; return;
+ case Instruction::Invoke: NumFastIselFailInvoke++; return;
+ case Instruction::Resume: NumFastIselFailResume++; return;
+ case Instruction::Unreachable: NumFastIselFailUnreachable++; return;
+
+ // Standard binary operators...
+ case Instruction::Add: NumFastIselFailAdd++; return;
+ case Instruction::FAdd: NumFastIselFailFAdd++; return;
+ case Instruction::Sub: NumFastIselFailSub++; return;
+ case Instruction::FSub: NumFastIselFailFSub++; return;
+ case Instruction::Mul: NumFastIselFailMul++; return;
+ case Instruction::FMul: NumFastIselFailFMul++; return;
+ case Instruction::UDiv: NumFastIselFailUDiv++; return;
+ case Instruction::SDiv: NumFastIselFailSDiv++; return;
+ case Instruction::FDiv: NumFastIselFailFDiv++; return;
+ case Instruction::URem: NumFastIselFailURem++; return;
+ case Instruction::SRem: NumFastIselFailSRem++; return;
+ case Instruction::FRem: NumFastIselFailFRem++; return;
+
+ // Logical operators...
+ case Instruction::And: NumFastIselFailAnd++; return;
+ case Instruction::Or: NumFastIselFailOr++; return;
+ case Instruction::Xor: NumFastIselFailXor++; return;
+
+ // Memory instructions...
+ case Instruction::Alloca: NumFastIselFailAlloca++; return;
+ case Instruction::Load: NumFastIselFailLoad++; return;
+ case Instruction::Store: NumFastIselFailStore++; return;
+ case Instruction::AtomicCmpXchg: NumFastIselFailAtomicCmpXchg++; return;
+ case Instruction::AtomicRMW: NumFastIselFailAtomicRMW++; return;
+ case Instruction::Fence: NumFastIselFailFence++; return;
+ case Instruction::GetElementPtr: NumFastIselFailGetElementPtr++; return;
+
+ // Convert instructions...
+ case Instruction::Trunc: NumFastIselFailTrunc++; return;
+ case Instruction::ZExt: NumFastIselFailZExt++; return;
+ case Instruction::SExt: NumFastIselFailSExt++; return;
+ case Instruction::FPTrunc: NumFastIselFailFPTrunc++; return;
+ case Instruction::FPExt: NumFastIselFailFPExt++; return;
+ case Instruction::FPToUI: NumFastIselFailFPToUI++; return;
+ case Instruction::FPToSI: NumFastIselFailFPToSI++; return;
+ case Instruction::UIToFP: NumFastIselFailUIToFP++; return;
+ case Instruction::SIToFP: NumFastIselFailSIToFP++; return;
+ case Instruction::IntToPtr: NumFastIselFailIntToPtr++; return;
+ case Instruction::PtrToInt: NumFastIselFailPtrToInt++; return;
+ case Instruction::BitCast: NumFastIselFailBitCast++; return;
+
+ // Other instructions...
+ case Instruction::ICmp: NumFastIselFailICmp++; return;
+ case Instruction::FCmp: NumFastIselFailFCmp++; return;
+ case Instruction::PHI: NumFastIselFailPHI++; return;
+ case Instruction::Select: NumFastIselFailSelect++; return;
+ case Instruction::Call: NumFastIselFailCall++; return;
+ case Instruction::Shl: NumFastIselFailShl++; return;
+ case Instruction::LShr: NumFastIselFailLShr++; return;
+ case Instruction::AShr: NumFastIselFailAShr++; return;
+ case Instruction::VAArg: NumFastIselFailVAArg++; return;
+ case Instruction::ExtractElement: NumFastIselFailExtractElement++; return;
+ case Instruction::InsertElement: NumFastIselFailInsertElement++; return;
+ case Instruction::ShuffleVector: NumFastIselFailShuffleVector++; return;
+ case Instruction::ExtractValue: NumFastIselFailExtractValue++; return;
+ case Instruction::InsertValue: NumFastIselFailInsertValue++; return;
+ case Instruction::LandingPad: NumFastIselFailLandingPad++; return;
+ }
+}
+#endif
+
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
- if (EnableFastISel)
+ if (TM.Options.EnableFastISel)
FastIS = TLI.createFastISel(*FuncInfo);
// Iterate over all basic blocks in the function.
@@ -894,13 +1029,16 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastIS->setLastLocalValue(0);
}
+ unsigned NumFastIselRemaining = std::distance(Begin, End);
// Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) {
const Instruction *Inst = llvm::prior(BI);
// If we no longer require this instruction, skip it.
- if (isFoldedOrDeadInstruction(Inst, FuncInfo))
+ if (isFoldedOrDeadInstruction(Inst, FuncInfo)) {
+ --NumFastIselRemaining;
continue;
+ }
// Bottom-up: reset the insert pos at the top, after any local-value
// instructions.
@@ -908,6 +1046,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Try to select the instruction with FastISel.
if (FastIS->SelectInstruction(Inst)) {
+ --NumFastIselRemaining;
++NumFastIselSuccess;
// If fast isel succeeded, skip over all the folded instructions, and
// then see if there is a load right before the selected instructions.
@@ -920,15 +1059,23 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
BeforeInst->hasOneUse() &&
- TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), Inst, FastIS))
+ TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), Inst, FastIS)) {
// If we succeeded, don't re-select the load.
BI = llvm::next(BasicBlock::const_iterator(BeforeInst));
+ --NumFastIselRemaining;
+ ++NumFastIselSuccess;
+ }
continue;
}
+#ifndef NDEBUG
+ if (EnableFastISelVerbose2)
+ collectFailStats(Inst);
+#endif
+
// Then handle certain instructions as single-LLVM-Instruction blocks.
if (isa<CallInst>(Inst)) {
- ++NumFastIselFailures;
+
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: ";
Inst->dump();
@@ -943,24 +1090,30 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
bool HadTailCall = false;
SelectBasicBlock(Inst, BI, HadTailCall);
+ // Recompute NumFastIselRemaining as Selection DAG instruction
+ // selection may have handled the call, input args, etc.
+ unsigned RemainingNow = std::distance(Begin, BI);
+ NumFastIselFailures += NumFastIselRemaining - RemainingNow;
+
// If the call was emitted as a tail call, we're done with the block.
if (HadTailCall) {
--BI;
break;
}
+ NumFastIselRemaining = RemainingNow;
continue;
}
if (isa<TerminatorInst>(Inst) && !isa<BranchInst>(Inst)) {
// Don't abort, and use a different message for terminator misses.
- ++NumFastIselFailures;
+ NumFastIselFailures += NumFastIselRemaining;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed terminator: ";
Inst->dump();
}
} else {
- ++NumFastIselFailures;
+ NumFastIselFailures += NumFastIselRemaining;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel miss: ";
Inst->dump();
@@ -1289,7 +1442,7 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
APInt NeededMask = DesiredMask & ~ActualMask;
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
+ CurDAG->ComputeMaskedBits(LHS, KnownZero, KnownOne);
// If all the missing bits in the or are already known to be set, match!
if ((NeededMask & KnownOne) == NeededMask)
@@ -2025,6 +2178,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::EntryToken: // These nodes remain the same.
case ISD::BasicBlock:
case ISD::Register:
+ case ISD::RegisterMask:
//case ISD::VALUETYPE:
//case ISD::CONDCODE:
case ISD::HANDLENODE:
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index cd1647b..6cde05a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -19,7 +19,6 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -28,7 +27,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Config/config.h"
using namespace llvm;
namespace llvm {
@@ -148,7 +146,7 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
void SelectionDAG::viewGraph(const std::string &Title) {
// This code is only for debugging!
#ifndef NDEBUG
- ViewGraph(this, "dag." + getMachineFunction().getFunction()->getNameStr(),
+ ViewGraph(this, "dag." + getMachineFunction().getFunction()->getName(),
false, Title);
#else
errs() << "SelectionDAG::viewGraph is only available in debug builds on "
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 907d8d9..09a2b1f 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -36,31 +36,9 @@ using namespace llvm;
/// - the promotion of vector elements. This feature is disabled by default
/// and only enabled using this flag.
static cl::opt<bool>
-AllowPromoteIntElem("promote-elements", cl::Hidden,
+AllowPromoteIntElem("promote-elements", cl::Hidden, cl::init(true),
cl::desc("Allow promotion of integer vector element types"));
-namespace llvm {
-TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) {
- bool isLocal = GV->hasLocalLinkage();
- bool isDeclaration = GV->isDeclaration();
- // FIXME: what should we do for protected and internal visibility?
- // For variables, is internal different from hidden?
- bool isHidden = GV->hasHiddenVisibility();
-
- if (reloc == Reloc::PIC_) {
- if (isLocal || isHidden)
- return TLSModel::LocalDynamic;
- else
- return TLSModel::GeneralDynamic;
- } else {
- if (!isDeclaration || isHidden)
- return TLSModel::LocalExec;
- else
- return TLSModel::InitialExec;
- }
-}
-}
-
/// InitLibcallNames - Set default libcall names.
///
static void InitLibcallNames(const char **Names) {
@@ -572,21 +550,42 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
// ConstantFP nodes default to expand. Targets can either change this to
// Legal, in which case all fp constants are legal, or use isFPImmLegal()
// to optimize expansions for certain constants.
+ setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
// These library functions default to expand.
- setOperationAction(ISD::FLOG , MVT::f64, Expand);
- setOperationAction(ISD::FLOG2, MVT::f64, Expand);
- setOperationAction(ISD::FLOG10,MVT::f64, Expand);
- setOperationAction(ISD::FEXP , MVT::f64, Expand);
- setOperationAction(ISD::FEXP2, MVT::f64, Expand);
- setOperationAction(ISD::FLOG , MVT::f32, Expand);
- setOperationAction(ISD::FLOG2, MVT::f32, Expand);
- setOperationAction(ISD::FLOG10,MVT::f32, Expand);
- setOperationAction(ISD::FEXP , MVT::f32, Expand);
- setOperationAction(ISD::FEXP2, MVT::f32, Expand);
+ setOperationAction(ISD::FLOG , MVT::f16, Expand);
+ setOperationAction(ISD::FLOG2, MVT::f16, Expand);
+ setOperationAction(ISD::FLOG10, MVT::f16, Expand);
+ setOperationAction(ISD::FEXP , MVT::f16, Expand);
+ setOperationAction(ISD::FEXP2, MVT::f16, Expand);
+ setOperationAction(ISD::FFLOOR, MVT::f16, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::f16, Expand);
+ setOperationAction(ISD::FCEIL, MVT::f16, Expand);
+ setOperationAction(ISD::FRINT, MVT::f16, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::f16, Expand);
+ setOperationAction(ISD::FLOG , MVT::f32, Expand);
+ setOperationAction(ISD::FLOG2, MVT::f32, Expand);
+ setOperationAction(ISD::FLOG10, MVT::f32, Expand);
+ setOperationAction(ISD::FEXP , MVT::f32, Expand);
+ setOperationAction(ISD::FEXP2, MVT::f32, Expand);
+ setOperationAction(ISD::FFLOOR, MVT::f32, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Expand);
+ setOperationAction(ISD::FCEIL, MVT::f32, Expand);
+ setOperationAction(ISD::FRINT, MVT::f32, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::f32, Expand);
+ setOperationAction(ISD::FLOG , MVT::f64, Expand);
+ setOperationAction(ISD::FLOG2, MVT::f64, Expand);
+ setOperationAction(ISD::FLOG10, MVT::f64, Expand);
+ setOperationAction(ISD::FEXP , MVT::f64, Expand);
+ setOperationAction(ISD::FEXP2, MVT::f64, Expand);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
+ setOperationAction(ISD::FCEIL, MVT::f64, Expand);
+ setOperationAction(ISD::FRINT, MVT::f64, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, MVT::Other, Expand);
@@ -610,7 +609,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
ExceptionSelectorRegister = 0;
BooleanContents = UndefinedBooleanContent;
BooleanVectorContents = UndefinedBooleanContent;
- SchedPreferenceInfo = Sched::Latency;
+ SchedPreferenceInfo = Sched::ILP;
JumpBufSize = 0;
JumpBufAlignment = 0;
MinFunctionAlignment = 0;
@@ -1080,8 +1079,12 @@ unsigned TargetLowering::getJumpTableEncoding() const {
SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
// If our PIC model is GP relative, use the global offset table as the base.
- if (getJumpTableEncoding() == MachineJumpTableInfo::EK_GPRel32BlockAddress)
+ unsigned JTEncoding = getJumpTableEncoding();
+
+ if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) ||
+ (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress))
return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy());
+
return Table;
}
@@ -1223,7 +1226,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Depth != 0) {
// If not at the root, Just compute the KnownZero/KnownOne bits to
// simplify things downstream.
- TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
+ TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
return false;
}
// If this is the root being simplified, allow it to have multiple uses,
@@ -1242,8 +1245,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
- KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask;
- KnownZero = ~KnownOne & NewMask;
+ KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
+ KnownZero = ~KnownOne;
return false; // Don't fall through, will infinitely loop.
case ISD::AND:
// If the RHS is a constant, check to see if the LHS would be zero without
@@ -1253,8 +1256,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
APInt LHSZero, LHSOne;
// Do not increment Depth here; that can cause an infinite loop.
- TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask,
- LHSZero, LHSOne, Depth);
+ TLO.DAG.ComputeMaskedBits(Op.getOperand(0), LHSZero, LHSOne, Depth);
// If the LHS already has zeros where RHSC does, this and is dead.
if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
@@ -1473,9 +1475,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
SDValue InnerOp = InOp.getNode()->getOperand(0);
EVT InnerVT = InnerOp.getValueType();
- if ((APInt::getHighBitsSet(BitWidth,
- BitWidth - InnerVT.getSizeInBits()) &
- DemandedMask) == 0 &&
+ unsigned InnerBits = InnerVT.getSizeInBits();
+ if (ShAmt < InnerBits && NewMask.lshr(InnerBits) == 0 &&
isTypeDesirableForOp(ISD::SHL, InnerVT)) {
EVT ShTy = getShiftAmountTy(InnerVT);
if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
@@ -1545,7 +1546,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// always convert this into a logical shr, even if the shift amount is
// variable. The low bit of the shift cannot be an input sign bit unless
// the shift amount is >= the size of the datatype, which is undefined.
- if (DemandedMask == 1)
+ if (NewMask == 1)
return TLO.CombineTo(Op,
TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
Op.getOperand(0), Op.getOperand(1)));
@@ -1588,23 +1589,40 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
break;
case ISD::SIGN_EXTEND_INREG: {
- EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+
+ APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
+ // If we only care about the highest bit, don't bother shifting right.
+ if (MsbMask == DemandedMask) {
+ unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
+ SDValue InOp = Op.getOperand(0);
+
+ // Compute the correct shift amount type, which must be getShiftAmountTy
+ // for scalar types after legalization.
+ EVT ShiftAmtTy = Op.getValueType();
+ if (TLO.LegalTypes() && !ShiftAmtTy.isVector())
+ ShiftAmtTy = getShiftAmountTy(ShiftAmtTy);
+
+ SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, ShiftAmtTy);
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl,
+ Op.getValueType(), InOp, ShiftAmt));
+ }
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
APInt NewBits =
APInt::getHighBitsSet(BitWidth,
- BitWidth - EVT.getScalarType().getSizeInBits());
+ BitWidth - ExVT.getScalarType().getSizeInBits());
// If none of the extended bits are demanded, eliminate the sextinreg.
if ((NewBits & NewMask) == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
APInt InSignBit =
- APInt::getSignBit(EVT.getScalarType().getSizeInBits()).zext(BitWidth);
+ APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
APInt InputDemandedBits =
APInt::getLowBitsSet(BitWidth,
- EVT.getScalarType().getSizeInBits()) &
+ ExVT.getScalarType().getSizeInBits()) &
NewMask;
// Since the sign extended bits are demanded, we know that the sign
@@ -1622,7 +1640,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If the input sign bit is known zero, convert this into a zero extension.
if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op,
- TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT));
+ TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,ExVT));
if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
@@ -1688,11 +1706,11 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If the sign bit is known one, the top bits match.
if (KnownOne.intersects(InSignBit)) {
- KnownOne |= NewBits;
- KnownZero &= ~NewBits;
+ KnownOne |= NewBits;
+ assert((KnownZero & NewBits) == 0);
} else { // Otherwise, top bits aren't known.
- KnownOne &= ~NewBits;
- KnownZero &= ~NewBits;
+ assert((KnownOne & NewBits) == 0);
+ assert((KnownZero & NewBits) == 0);
}
break;
}
@@ -1783,7 +1801,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
case ISD::BITCAST:
// If this is an FP->Int bitcast and if the sign bit is the only
// thing demanded, turn this into a FGETSIGN.
- if (!Op.getOperand(0).getValueType().isVector() &&
+ if (!TLO.LegalOperations() &&
+ !Op.getValueType().isVector() &&
+ !Op.getOperand(0).getValueType().isVector() &&
NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) &&
Op.getOperand(0).getValueType().isFloatingPoint()) {
bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType());
@@ -1824,7 +1844,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// FALL THROUGH
default:
// Just use ComputeMaskedBits to compute output bits.
- TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
+ TLO.DAG.ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
break;
}
@@ -1840,7 +1860,6 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -1851,7 +1870,7 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
+ KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
}
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
@@ -1895,9 +1914,8 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
// Fall back to ComputeMaskedBits to catch other known cases.
EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne);
+ DAG.ComputeMaskedBits(Val, KnownZero, KnownOne);
return (KnownZero.countPopulation() == BitWidth - 1) &&
(KnownOne.countPopulation() == 1);
}
@@ -2060,7 +2078,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
Lod->getPointerInfo().getWithOffset(bestOffset),
- false, false, NewAlign);
+ false, false, false, NewAlign);
return DAG.getSetCC(dl, VT,
DAG.getNode(ISD::AND, dl, newVT, NewLoad,
DAG.getConstant(bestMask.trunc(bestWidth),
@@ -2393,8 +2411,15 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (N0 == N1) {
// We can always fold X == X for integer setcc's.
- if (N0.getValueType().isInteger())
- return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
+ if (N0.getValueType().isInteger()) {
+ switch (getBooleanContents(N0.getValueType().isVector())) {
+ case UndefinedBooleanContent:
+ case ZeroOrOneBooleanContent:
+ return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
+ case ZeroOrNegativeOneBooleanContent:
+ return DAG.getConstant(ISD::isTrueWhenEqual(Cond) ? -1 : 0, VT);
+ }
+ }
unsigned UOF = ISD::getUnorderedFlavor(Cond);
if (UOF == 2) // FP operators that are undefined on NaNs.
return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
@@ -2428,6 +2453,10 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
}
}
+ // If RHS is a legal immediate value for a compare instruction, we need
+ // to be careful about increasing register pressure needlessly.
+ bool LegalRHSImm = false;
+
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) {
if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
// Turn (X+C1) == C2 --> X == C2-C1
@@ -2462,25 +2491,33 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
Cond);
}
}
+
+ // Could RHSC fold directly into a compare?
+ if (RHSC->getValueType(0).getSizeInBits() <= 64)
+ LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue());
}
// Simplify (X+Z) == X --> Z == 0
- if (N0.getOperand(0) == N1)
- return DAG.getSetCC(dl, VT, N0.getOperand(1),
- DAG.getConstant(0, N0.getValueType()), Cond);
- if (N0.getOperand(1) == N1) {
- if (DAG.isCommutativeBinOp(N0.getOpcode()))
- return DAG.getSetCC(dl, VT, N0.getOperand(0),
- DAG.getConstant(0, N0.getValueType()), Cond);
- else if (N0.getNode()->hasOneUse()) {
- assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
- // (Z-X) == X --> Z == X<<1
- SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(),
- N1,
+ // Don't do this if X is an immediate that can fold into a cmp
+ // instruction and X+Z has other uses. It could be an induction variable
+ // chain, and the transform would increase register pressure.
+ if (!LegalRHSImm || N0.getNode()->hasOneUse()) {
+ if (N0.getOperand(0) == N1)
+ return DAG.getSetCC(dl, VT, N0.getOperand(1),
+ DAG.getConstant(0, N0.getValueType()), Cond);
+ if (N0.getOperand(1) == N1) {
+ if (DAG.isCommutativeBinOp(N0.getOpcode()))
+ return DAG.getSetCC(dl, VT, N0.getOperand(0),
+ DAG.getConstant(0, N0.getValueType()), Cond);
+ else if (N0.getNode()->hasOneUse()) {
+ assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!");
+ // (Z-X) == X --> Z == X<<1
+ SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N1,
DAG.getConstant(1, getShiftAmountTy(N1.getValueType())));
- if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(SH.getNode());
- return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
+ if (!DCI.isCalledByLegalizer())
+ DCI.AddToWorklist(SH.getNode());
+ return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
+ }
}
}
}
@@ -2984,7 +3021,6 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
/// is.
static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
switch (CT) {
- default: llvm_unreachable("Unknown constraint type!");
case TargetLowering::C_Other:
case TargetLowering::C_Unknown:
return 0;
@@ -2995,6 +3031,7 @@ static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
case TargetLowering::C_Memory:
return 3;
}
+ llvm_unreachable("Invalid constraint type");
}
/// Examine constraint type and operand type and determine a weight value.
@@ -3242,8 +3279,9 @@ SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
- std::vector<SDNode*>* Created) const {
+SDValue TargetLowering::
+BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ std::vector<SDNode*>* Created) const {
EVT VT = N->getValueType(0);
DebugLoc dl= N->getDebugLoc();
@@ -3258,10 +3296,12 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
SDValue Q;
- if (isOperationLegalOrCustom(ISD::MULHS, VT))
+ if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) :
+ isOperationLegalOrCustom(ISD::MULHS, VT))
Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0),
DAG.getConstant(magics.m, VT));
- else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
+ else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) :
+ isOperationLegalOrCustom(ISD::SMUL_LOHI, VT))
Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT),
N->getOperand(0),
DAG.getConstant(magics.m, VT)).getNode(), 1);
@@ -3299,8 +3339,9 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
/// return a DAG expression to select that will generate the same value by
/// multiplying by a magic number. See:
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
-SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
- std::vector<SDNode*>* Created) const {
+SDValue TargetLowering::
+BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
+ std::vector<SDNode*>* Created) const {
EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
@@ -3332,9 +3373,11 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
// Multiply the numerator (operand 0) by the magic value
// FIXME: We should support doing a MUL in a wider type
- if (isOperationLegalOrCustom(ISD::MULHU, VT))
+ if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) :
+ isOperationLegalOrCustom(ISD::MULHU, VT))
Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, VT));
- else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
+ else if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) :
+ isOperationLegalOrCustom(ISD::UMUL_LOHI, VT))
Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q,
DAG.getConstant(magics.m, VT)).getNode(), 1);
else
diff --git a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
index 2609256..0016047 100644
--- a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
+++ b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
@@ -116,8 +116,7 @@ namespace {
// Branches and invokes do not escape, only unwind, resume, and return
// do.
TerminatorInst *TI = CurBB->getTerminator();
- if (!isa<UnwindInst>(TI) && !isa<ReturnInst>(TI) &&
- !isa<ResumeInst>(TI))
+ if (!isa<ReturnInst>(TI) && !isa<ResumeInst>(TI))
continue;
Builder.SetInsertPoint(TI->getParent(), TI);
diff --git a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
index 160f38f..21ae2f5 100644
--- a/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
+++ b/contrib/llvm/lib/CodeGen/ShrinkWrapping.cpp
@@ -93,6 +93,7 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
}
AU.addPreserved<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<TargetPassConfig>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -124,7 +125,7 @@ MachineLoop* PEI::getTopLevelLoopParent(MachineLoop *LP) {
}
bool PEI::isReturnBlock(MachineBasicBlock* MBB) {
- return (MBB && !MBB->empty() && MBB->back().getDesc().isReturn());
+ return (MBB && !MBB->empty() && MBB->back().isReturn());
}
// Initialize shrink wrapping DFA sets, called before iterations.
@@ -158,7 +159,7 @@ void PEI::initShrinkWrappingInfo() {
// via --shrink-wrap-func=<funcname>.
#ifndef NDEBUG
if (ShrinkWrapFunc != "") {
- std::string MFName = MF->getFunction()->getNameStr();
+ std::string MFName = MF->getFunction()->getName().str();
ShrinkWrapThisFunction = (MFName == ShrinkWrapFunc);
}
#endif
@@ -1045,7 +1046,7 @@ std::string PEI::getBasicBlockName(const MachineBasicBlock* MBB) {
return "";
if (MBB->getBasicBlock())
- return MBB->getBasicBlock()->getNameStr();
+ return MBB->getBasicBlock()->getName().str();
std::ostringstream name;
name << "_MBB_" << MBB->getNumber();
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index ded2459d..9a86f32 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -1,4 +1,4 @@
-//===- SjLjEHPass.cpp - Eliminate Invoke & Unwind instructions -----------===//
+//===- SjLjEHPrepare.cpp - Eliminate Invoke & Unwind instructions ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,21 +29,20 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include <set>
using namespace llvm;
-static cl::opt<bool> DisableOldSjLjEH("disable-old-sjlj-eh", cl::Hidden,
- cl::desc("Disable the old SjLj EH preparation pass"));
-
STATISTIC(NumInvokes, "Number of invokes replaced");
-STATISTIC(NumUnwinds, "Number of unwinds replaced");
STATISTIC(NumSpilled, "Number of registers live across unwind edges");
namespace {
- class SjLjEHPass : public FunctionPass {
+ class SjLjEHPrepare : public FunctionPass {
const TargetLowering *TLI;
Type *FunctionContextTy;
Constant *RegisterFn;
@@ -54,16 +53,12 @@ namespace {
Constant *StackRestoreFn;
Constant *LSDAAddrFn;
Value *PersonalityFn;
- Constant *SelectorFn;
- Constant *ExceptionFn;
Constant *CallSiteFn;
- Constant *DispatchSetupFn;
Constant *FuncCtxFn;
- Value *CallSite;
- DenseMap<InvokeInst*, BasicBlock*> LPadSuccMap;
+ AllocaInst *FuncCtx;
public:
static char ID; // Pass identification, replacement for typeid
- explicit SjLjEHPass(const TargetLowering *tli = NULL)
+ explicit SjLjEHPrepare(const TargetLowering *tli = NULL)
: FunctionPass(ID), TLI(tli) { }
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
@@ -75,28 +70,24 @@ namespace {
private:
bool setupEntryBlockAndCallSites(Function &F);
+ void substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
+ Value *SelVal);
Value *setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads);
void lowerIncomingArguments(Function &F);
void lowerAcrossUnwindEdges(Function &F, ArrayRef<InvokeInst*> Invokes);
-
- void insertCallSiteStore(Instruction *I, int Number, Value *CallSite);
- void markInvokeCallSite(InvokeInst *II, int InvokeNo, Value *CallSite,
- SwitchInst *CatchSwitch);
- void splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
- void splitLandingPad(InvokeInst *II);
- bool insertSjLjEHSupport(Function &F);
+ void insertCallSiteStore(Instruction *I, int Number);
};
} // end anonymous namespace
-char SjLjEHPass::ID = 0;
+char SjLjEHPrepare::ID = 0;
-// Public Interface To the SjLjEHPass pass.
-FunctionPass *llvm::createSjLjEHPass(const TargetLowering *TLI) {
- return new SjLjEHPass(TLI);
+// Public Interface To the SjLjEHPrepare pass.
+FunctionPass *llvm::createSjLjEHPreparePass(const TargetLowering *TLI) {
+ return new SjLjEHPrepare(TLI);
}
// doInitialization - Set up decalarations and types needed to process
// exceptions.
-bool SjLjEHPass::doInitialization(Module &M) {
+bool SjLjEHPrepare::doInitialization(Module &M) {
// Build the function context structure.
// builtin_setjmp uses a five word jbuf
Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
@@ -123,11 +114,7 @@ bool SjLjEHPass::doInitialization(Module &M) {
StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
BuiltinSetjmpFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setjmp);
LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
- SelectorFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_selector);
- ExceptionFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_exception);
CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
- DispatchSetupFn
- = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_dispatch_setup);
FuncCtxFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
PersonalityFn = 0;
@@ -136,583 +123,67 @@ bool SjLjEHPass::doInitialization(Module &M) {
/// insertCallSiteStore - Insert a store of the call-site value to the
/// function context
-void SjLjEHPass::insertCallSiteStore(Instruction *I, int Number,
- Value *CallSite) {
- ConstantInt *CallSiteNoC = ConstantInt::get(Type::getInt32Ty(I->getContext()),
- Number);
- // Insert a store of the call-site number
- new StoreInst(CallSiteNoC, CallSite, true, I); // volatile
-}
-
-/// splitLandingPad - Split a landing pad. This takes considerable care because
-/// of PHIs and other nasties. The problem is that the jump table needs to jump
-/// to the landing pad block. However, the landing pad block can be jumped to
-/// only by an invoke instruction. So we clone the landingpad instruction into
-/// its own basic block, have the invoke jump to there. The landingpad
-/// instruction's basic block's successor is now the target for the jump table.
-///
-/// But because of PHI nodes, we need to create another basic block for the jump
-/// table to jump to. This is definitely a hack, because the values for the PHI
-/// nodes may not be defined on the edge from the jump table. But that's okay,
-/// because the jump table is simply a construct to mimic what is happening in
-/// the CFG. So the values are mysteriously there, even though there is no value
-/// for the PHI from the jump table's edge (hence calling this a hack).
-void SjLjEHPass::splitLandingPad(InvokeInst *II) {
- SmallVector<BasicBlock*, 2> NewBBs;
- SplitLandingPadPredecessors(II->getUnwindDest(), II->getParent(),
- ".1", ".2", this, NewBBs);
-
- // Create an empty block so that the jump table has something to jump to
- // which doesn't have any PHI nodes.
- BasicBlock *LPad = NewBBs[0];
- BasicBlock *Succ = *succ_begin(LPad);
- BasicBlock *JumpTo = BasicBlock::Create(II->getContext(), "jt.land",
- LPad->getParent(), Succ);
- LPad->getTerminator()->eraseFromParent();
- BranchInst::Create(JumpTo, LPad);
- BranchInst::Create(Succ, JumpTo);
- LPadSuccMap[II] = JumpTo;
-
- for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
- PHINode *PN = cast<PHINode>(I);
- Value *Val = PN->removeIncomingValue(LPad, false);
- PN->addIncoming(Val, JumpTo);
- }
-}
-
-/// markInvokeCallSite - Insert code to mark the call_site for this invoke
-void SjLjEHPass::markInvokeCallSite(InvokeInst *II, int InvokeNo,
- Value *CallSite,
- SwitchInst *CatchSwitch) {
- ConstantInt *CallSiteNoC= ConstantInt::get(Type::getInt32Ty(II->getContext()),
- InvokeNo);
- // The runtime comes back to the dispatcher with the call_site - 1 in
- // the context. Odd, but there it is.
- ConstantInt *SwitchValC = ConstantInt::get(Type::getInt32Ty(II->getContext()),
- InvokeNo - 1);
-
- // If the unwind edge has phi nodes, split the edge.
- if (isa<PHINode>(II->getUnwindDest()->begin())) {
- // FIXME: New EH - This if-condition will be always true in the new scheme.
- if (II->getUnwindDest()->isLandingPad())
- splitLandingPad(II);
- else
- SplitCriticalEdge(II, 1, this);
-
- // If there are any phi nodes left, they must have a single predecessor.
- while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) {
- PN->replaceAllUsesWith(PN->getIncomingValue(0));
- PN->eraseFromParent();
- }
- }
+void SjLjEHPrepare::insertCallSiteStore(Instruction *I, int Number) {
+ IRBuilder<> Builder(I);
- // Insert the store of the call site value
- insertCallSiteStore(II, InvokeNo, CallSite);
-
- // Record the call site value for the back end so it stays associated with
- // the invoke.
- CallInst::Create(CallSiteFn, CallSiteNoC, "", II);
-
- // Add a switch case to our unwind block.
- if (BasicBlock *SuccBB = LPadSuccMap[II]) {
- CatchSwitch->addCase(SwitchValC, SuccBB);
- } else {
- CatchSwitch->addCase(SwitchValC, II->getUnwindDest());
- }
+ // Get a reference to the call_site field.
+ Type *Int32Ty = Type::getInt32Ty(I->getContext());
+ Value *Zero = ConstantInt::get(Int32Ty, 0);
+ Value *One = ConstantInt::get(Int32Ty, 1);
+ Value *Idxs[2] = { Zero, One };
+ Value *CallSite = Builder.CreateGEP(FuncCtx, Idxs, "call_site");
- // We still want this to look like an invoke so we emit the LSDA properly,
- // so we don't transform the invoke into a call here.
+ // Insert a store of the call-site number
+ ConstantInt *CallSiteNoC = ConstantInt::get(Type::getInt32Ty(I->getContext()),
+ Number);
+ Builder.CreateStore(CallSiteNoC, CallSite, true/*volatile*/);
}
/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
/// we reach blocks we've already seen.
-static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
- if (!LiveBBs.insert(BB).second) return; // already been here.
+static void MarkBlocksLiveIn(BasicBlock *BB,
+ SmallPtrSet<BasicBlock*, 64> &LiveBBs) {
+ if (!LiveBBs.insert(BB)) return; // already been here.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
MarkBlocksLiveIn(*PI, LiveBBs);
}
-/// splitLiveRangesAcrossInvokes - Each value that is live across an unwind edge
-/// we spill into a stack location, guaranteeing that there is nothing live
-/// across the unwind edge. This process also splits all critical edges
-/// coming out of invoke's.
-/// FIXME: Move this function to a common utility file (Local.cpp?) so
-/// both SjLj and LowerInvoke can use it.
-void SjLjEHPass::
-splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
- // First step, split all critical edges from invoke instructions.
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
- InvokeInst *II = Invokes[i];
- SplitCriticalEdge(II, 0, this);
-
- // FIXME: New EH - This if-condition will be always true in the new scheme.
- if (II->getUnwindDest()->isLandingPad())
- splitLandingPad(II);
- else
- SplitCriticalEdge(II, 1, this);
-
- assert(!isa<PHINode>(II->getNormalDest()) &&
- !isa<PHINode>(II->getUnwindDest()) &&
- "Critical edge splitting left single entry phi nodes?");
- }
-
- Function *F = Invokes.back()->getParent()->getParent();
-
- // To avoid having to handle incoming arguments specially, we lower each arg
- // to a copy instruction in the entry block. This ensures that the argument
- // value itself cannot be live across the entry block.
- BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
- while (isa<AllocaInst>(AfterAllocaInsertPt) &&
- isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
- ++AfterAllocaInsertPt;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI) {
- Type *Ty = AI->getType();
- // Aggregate types can't be cast, but are legal argument types, so we have
- // to handle them differently. We use an extract/insert pair as a
- // lightweight method to achieve the same goal.
- if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
- Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
- Instruction *NI = InsertValueInst::Create(AI, EI, 0);
- NI->insertAfter(EI);
- AI->replaceAllUsesWith(NI);
- // Set the operand of the instructions back to the AllocaInst.
- EI->setOperand(0, AI);
- NI->setOperand(0, AI);
- } else {
- // This is always a no-op cast because we're casting AI to AI->getType()
- // so src and destination types are identical. BitCast is the only
- // possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Set the operand of the cast instruction back to the AllocaInst.
- // Normally it's forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However,
- // we're replacing it here with the same value it was constructed with.
- // We do this because the above replaceAllUsesWith() clobbered the
- // operand, but we want this one to remain.
- NC->setOperand(0, AI);
- }
- }
-
- // Finally, scan the code looking for instructions with bad live ranges.
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
- // Ignore obvious cases we don't have to handle. In particular, most
- // instructions either have no uses or only have a single use inside the
- // current block. Ignore them quickly.
- Instruction *Inst = II;
- if (Inst->use_empty()) continue;
- if (Inst->hasOneUse() &&
- cast<Instruction>(Inst->use_back())->getParent() == BB &&
- !isa<PHINode>(Inst->use_back())) continue;
-
- // If this is an alloca in the entry block, it's not a real register
- // value.
- if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
- if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
- continue;
-
- // Avoid iterator invalidation by copying users to a temporary vector.
- SmallVector<Instruction*,16> Users;
- for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
- if (User->getParent() != BB || isa<PHINode>(User))
- Users.push_back(User);
- }
-
- // Find all of the blocks that this value is live in.
- std::set<BasicBlock*> LiveBBs;
- LiveBBs.insert(Inst->getParent());
- while (!Users.empty()) {
- Instruction *U = Users.back();
- Users.pop_back();
-
- if (!isa<PHINode>(U)) {
- MarkBlocksLiveIn(U->getParent(), LiveBBs);
- } else {
- // Uses for a PHI node occur in their predecessor block.
- PHINode *PN = cast<PHINode>(U);
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == Inst)
- MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
- }
- }
-
- // Now that we know all of the blocks that this thing is live in, see if
- // it includes any of the unwind locations.
- bool NeedsSpill = false;
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
- BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
- if (UnwindBlock != BB && LiveBBs.count(UnwindBlock))
- NeedsSpill = true;
- }
-
- // If we decided we need a spill, do it.
- // FIXME: Spilling this way is overkill, as it forces all uses of
- // the value to be reloaded from the stack slot, even those that aren't
- // in the unwind blocks. We should be more selective.
- if (NeedsSpill) {
- ++NumSpilled;
- DemoteRegToStack(*Inst, true);
- }
- }
-}
-
-/// CreateLandingPadLoad - Load the exception handling values and insert them
-/// into a structure.
-static Instruction *CreateLandingPadLoad(Function &F, Value *ExnAddr,
- Value *SelAddr,
- BasicBlock::iterator InsertPt) {
- Value *Exn = new LoadInst(ExnAddr, "exn", false,
- InsertPt);
- Type *Ty = Type::getInt8PtrTy(F.getContext());
- Exn = CastInst::Create(Instruction::IntToPtr, Exn, Ty, "", InsertPt);
- Value *Sel = new LoadInst(SelAddr, "sel", false, InsertPt);
-
- Ty = StructType::get(Exn->getType(), Sel->getType(), NULL);
- InsertValueInst *LPadVal = InsertValueInst::Create(llvm::UndefValue::get(Ty),
- Exn, 0,
- "lpad.val", InsertPt);
- return InsertValueInst::Create(LPadVal, Sel, 1, "lpad.val", InsertPt);
-}
-
-/// ReplaceLandingPadVal - Replace the landingpad instruction's value with a
-/// load from the stored values (via CreateLandingPadLoad). This looks through
-/// PHI nodes, and removes them if they are dead.
-static void ReplaceLandingPadVal(Function &F, Instruction *Inst, Value *ExnAddr,
- Value *SelAddr) {
- if (Inst->use_empty()) return;
-
- while (!Inst->use_empty()) {
- Instruction *I = cast<Instruction>(Inst->use_back());
-
- if (PHINode *PN = dyn_cast<PHINode>(I)) {
- ReplaceLandingPadVal(F, PN, ExnAddr, SelAddr);
- if (PN->use_empty()) PN->eraseFromParent();
- continue;
- }
-
- I->replaceUsesOfWith(Inst, CreateLandingPadLoad(F, ExnAddr, SelAddr, I));
- }
-}
-
-bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
- SmallVector<ReturnInst*,16> Returns;
- SmallVector<UnwindInst*,16> Unwinds;
- SmallVector<InvokeInst*,16> Invokes;
-
- // Look through the terminators of the basic blocks to find invokes, returns
- // and unwinds.
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
- // Remember all return instructions in case we insert an invoke into this
- // function.
- Returns.push_back(RI);
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
- Invokes.push_back(II);
- } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- Unwinds.push_back(UI);
- }
- }
-
- NumInvokes += Invokes.size();
- NumUnwinds += Unwinds.size();
-
- // If we don't have any invokes, there's nothing to do.
- if (Invokes.empty()) return false;
-
- // Find the eh.selector.*, eh.exception and alloca calls.
- //
- // Remember any allocas() that aren't in the entry block, as the
- // jmpbuf saved SP will need to be updated for them.
- //
- // We'll use the first eh.selector to determine the right personality
- // function to use. For SJLJ, we always use the same personality for the
- // whole function, not on a per-selector basis.
- // FIXME: That's a bit ugly. Better way?
- SmallVector<CallInst*,16> EH_Selectors;
- SmallVector<CallInst*,16> EH_Exceptions;
- SmallVector<Instruction*,16> JmpbufUpdatePoints;
-
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- // Note: Skip the entry block since there's nothing there that interests
- // us. eh.selector and eh.exception shouldn't ever be there, and we
- // want to disregard any allocas that are there.
- //
- // FIXME: This is awkward. The new EH scheme won't need to skip the entry
- // block.
- if (BB == F.begin()) {
- if (InvokeInst *II = dyn_cast<InvokeInst>(F.begin()->getTerminator())) {
- // FIXME: This will be always non-NULL in the new EH.
- if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
- if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
- }
-
- continue;
- }
-
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- if (CI->getCalledFunction() == SelectorFn) {
- if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
- EH_Selectors.push_back(CI);
- } else if (CI->getCalledFunction() == ExceptionFn) {
- EH_Exceptions.push_back(CI);
- } else if (CI->getCalledFunction() == StackRestoreFn) {
- JmpbufUpdatePoints.push_back(CI);
- }
- } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
- JmpbufUpdatePoints.push_back(AI);
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
- // FIXME: This will be always non-NULL in the new EH.
- if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
- if (!PersonalityFn) PersonalityFn = LPI->getPersonalityFn();
- }
- }
- }
-
- // If we don't have any eh.selector calls, we can't determine the personality
- // function. Without a personality function, we can't process exceptions.
- if (!PersonalityFn) return false;
-
- // We have invokes, so we need to add register/unregister calls to get this
- // function onto the global unwind stack.
- //
- // First thing we need to do is scan the whole function for values that are
- // live across unwind edges. Each value that is live across an unwind edge we
- // spill into a stack location, guaranteeing that there is nothing live across
- // the unwind edge. This process also splits all critical edges coming out of
- // invoke's.
- splitLiveRangesAcrossInvokes(Invokes);
-
-
- SmallVector<LandingPadInst*, 16> LandingPads;
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
- // FIXME: This will be always non-NULL in the new EH.
- if (LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst())
- LandingPads.push_back(LPI);
+/// substituteLPadValues - Substitute the values returned by the landingpad
+/// instruction with those returned by the personality function.
+void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
+ Value *SelVal) {
+ SmallVector<Value*, 8> UseWorkList(LPI->use_begin(), LPI->use_end());
+ while (!UseWorkList.empty()) {
+ Value *Val = UseWorkList.pop_back_val();
+ ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val);
+ if (!EVI) continue;
+ if (EVI->getNumIndices() != 1) continue;
+ if (*EVI->idx_begin() == 0)
+ EVI->replaceAllUsesWith(ExnVal);
+ else if (*EVI->idx_begin() == 1)
+ EVI->replaceAllUsesWith(SelVal);
+ if (EVI->getNumUses() == 0)
+ EVI->eraseFromParent();
}
+ if (LPI->getNumUses() == 0) return;
- BasicBlock *EntryBB = F.begin();
- // Create an alloca for the incoming jump buffer ptr and the new jump buffer
- // that needs to be restored on all exits from the function. This is an
- // alloca because the value needs to be added to the global context list.
- unsigned Align = 4; // FIXME: Should be a TLI check?
- AllocaInst *FunctionContext =
- new AllocaInst(FunctionContextTy, 0, Align,
- "fcn_context", F.begin()->begin());
-
- Value *Idxs[2];
- Type *Int32Ty = Type::getInt32Ty(F.getContext());
- Value *Zero = ConstantInt::get(Int32Ty, 0);
- // We need to also keep around a reference to the call_site field
- Idxs[0] = Zero;
- Idxs[1] = ConstantInt::get(Int32Ty, 1);
- CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, "call_site",
- EntryBB->getTerminator());
-
- // The exception selector comes back in context->data[1]
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, "fc_data",
- EntryBB->getTerminator());
- Idxs[1] = ConstantInt::get(Int32Ty, 1);
- Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs,
- "exc_selector_gep",
- EntryBB->getTerminator());
- // The exception value comes back in context->data[0]
- Idxs[1] = Zero;
- Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs,
- "exception_gep",
- EntryBB->getTerminator());
-
- // The result of the eh.selector call will be replaced with a a reference to
- // the selector value returned in the function context. We leave the selector
- // itself so the EH analysis later can use it.
- for (int i = 0, e = EH_Selectors.size(); i < e; ++i) {
- CallInst *I = EH_Selectors[i];
- Value *SelectorVal = new LoadInst(SelectorAddr, "select_val", true, I);
- I->replaceAllUsesWith(SelectorVal);
- }
-
- // eh.exception calls are replaced with references to the proper location in
- // the context. Unlike eh.selector, the eh.exception calls are removed
- // entirely.
- for (int i = 0, e = EH_Exceptions.size(); i < e; ++i) {
- CallInst *I = EH_Exceptions[i];
- // Possible for there to be duplicates, so check to make sure the
- // instruction hasn't already been removed.
- if (!I->getParent()) continue;
- Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
- Type *Ty = Type::getInt8PtrTy(F.getContext());
- Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
-
- I->replaceAllUsesWith(Val);
- I->eraseFromParent();
- }
-
- for (unsigned i = 0, e = LandingPads.size(); i != e; ++i)
- ReplaceLandingPadVal(F, LandingPads[i], ExceptionAddr, SelectorAddr);
-
- // The entry block changes to have the eh.sjlj.setjmp, with a conditional
- // branch to a dispatch block for non-zero returns. If we return normally,
- // we're not handling an exception and just register the function context and
- // continue.
-
- // Create the dispatch block. The dispatch block is basically a big switch
- // statement that goes to all of the invoke landing pads.
- BasicBlock *DispatchBlock =
- BasicBlock::Create(F.getContext(), "eh.sjlj.setjmp.catch", &F);
-
- // Insert a load of the callsite in the dispatch block, and a switch on its
- // value. By default, we issue a trap statement.
- BasicBlock *TrapBlock =
- BasicBlock::Create(F.getContext(), "trapbb", &F);
- CallInst::Create(Intrinsic::getDeclaration(F.getParent(), Intrinsic::trap),
- "", TrapBlock);
- new UnreachableInst(F.getContext(), TrapBlock);
-
- Value *DispatchLoad = new LoadInst(CallSite, "invoke.num", true,
- DispatchBlock);
- SwitchInst *DispatchSwitch =
- SwitchInst::Create(DispatchLoad, TrapBlock, Invokes.size(),
- DispatchBlock);
- // Split the entry block to insert the conditional branch for the setjmp.
- BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
- "eh.sjlj.setjmp.cont");
-
- // Populate the Function Context
- // 1. LSDA address
- // 2. Personality function address
- // 3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)
-
- // LSDA address
- Idxs[0] = Zero;
- Idxs[1] = ConstantInt::get(Int32Ty, 4);
- Value *LSDAFieldPtr =
- GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
- EntryBB->getTerminator());
- Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
- EntryBB->getTerminator());
- new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
-
- Idxs[1] = ConstantInt::get(Int32Ty, 3);
- Value *PersonalityFieldPtr =
- GetElementPtrInst::Create(FunctionContext, Idxs, "lsda_gep",
- EntryBB->getTerminator());
- new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
- EntryBB->getTerminator());
-
- // Save the frame pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 5);
- Value *JBufPtr
- = GetElementPtrInst::Create(FunctionContext, Idxs, "jbuf_gep",
- EntryBB->getTerminator());
- Idxs[1] = ConstantInt::get(Int32Ty, 0);
- Value *FramePtr =
- GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep",
- EntryBB->getTerminator());
-
- Value *Val = CallInst::Create(FrameAddrFn,
- ConstantInt::get(Int32Ty, 0),
- "fp",
- EntryBB->getTerminator());
- new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
-
- // Save the stack pointer.
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *StackPtr =
- GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep",
- EntryBB->getTerminator());
-
- Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
- new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
-
- // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
- Value *SetjmpArg =
- CastInst::Create(Instruction::BitCast, JBufPtr,
- Type::getInt8PtrTy(F.getContext()), "",
- EntryBB->getTerminator());
- Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
- "",
- EntryBB->getTerminator());
-
- // Add a call to dispatch_setup after the setjmp call. This is expanded to any
- // target-specific setup that needs to be done.
- CallInst::Create(DispatchSetupFn, DispatchVal, "", EntryBB->getTerminator());
+ // There are still some uses of LPI. Construct an aggregate with the exception
+ // values and replace the LPI with that aggregate.
+ Type *LPadType = LPI->getType();
+ Value *LPadVal = UndefValue::get(LPadType);
+ IRBuilder<>
+ Builder(llvm::next(BasicBlock::iterator(cast<Instruction>(SelVal))));
+ LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
- // check the return value of the setjmp. non-zero goes to dispatcher.
- Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
- ICmpInst::ICMP_EQ, DispatchVal, Zero,
- "notunwind");
- // Nuke the uncond branch.
- EntryBB->getTerminator()->eraseFromParent();
-
- // Put in a new condbranch in its place.
- BranchInst::Create(ContBlock, DispatchBlock, IsNormal, EntryBB);
-
- // Register the function context and make sure it's known to not throw
- CallInst *Register =
- CallInst::Create(RegisterFn, FunctionContext, "",
- ContBlock->getTerminator());
- Register->setDoesNotThrow();
-
- // At this point, we are all set up, update the invoke instructions to mark
- // their call_site values, and fill in the dispatch switch accordingly.
- for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
- markInvokeCallSite(Invokes[i], i+1, CallSite, DispatchSwitch);
-
- // Mark call instructions that aren't nounwind as no-action (call_site ==
- // -1). Skip the entry block, as prior to then, no function context has been
- // created for this function and any unexpected exceptions thrown will go
- // directly to the caller's context, which is what we want anyway, so no need
- // to do anything here.
- for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
- for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- // Ignore calls to the EH builtins (eh.selector, eh.exception)
- Constant *Callee = CI->getCalledFunction();
- if (Callee != SelectorFn && Callee != ExceptionFn
- && !CI->doesNotThrow())
- insertCallSiteStore(CI, -1, CallSite);
- } else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
- insertCallSiteStore(RI, -1, CallSite);
- }
- }
-
- // Replace all unwinds with a branch to the unwind handler.
- // ??? Should this ever happen with sjlj exceptions?
- for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
- BranchInst::Create(TrapBlock, Unwinds[i]);
- Unwinds[i]->eraseFromParent();
- }
-
- // Following any allocas not in the entry block, update the saved SP in the
- // jmpbuf to the new value.
- for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
- Instruction *AI = JmpbufUpdatePoints[i];
- Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
- StackAddr->insertAfter(AI);
- Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
- StoreStackAddr->insertAfter(StackAddr);
- }
-
- // Finally, for any returns from this function, if this function contains an
- // invoke, add a call to unregister the function context.
- for (unsigned i = 0, e = Returns.size(); i != e; ++i)
- CallInst::Create(UnregisterFn, FunctionContext, "", Returns[i]);
-
- return true;
+ LPI->replaceAllUsesWith(LPadVal);
}
/// setupFunctionContext - Allocate the function context on the stack and fill
/// it with all of the data that we know at this point.
-Value *SjLjEHPass::
+Value *SjLjEHPrepare::
setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
BasicBlock *EntryBB = F.begin();
@@ -721,56 +192,42 @@ setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
// because the value needs to be added to the global context list.
unsigned Align =
TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy);
- AllocaInst *FuncCtx =
+ FuncCtx =
new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin());
// Fill in the function context structure.
- Value *Idxs[2];
Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *Zero = ConstantInt::get(Int32Ty, 0);
Value *One = ConstantInt::get(Int32Ty, 1);
+ Value *Two = ConstantInt::get(Int32Ty, 2);
+ Value *Three = ConstantInt::get(Int32Ty, 3);
+ Value *Four = ConstantInt::get(Int32Ty, 4);
- // Keep around a reference to the call_site field.
- Idxs[0] = Zero;
- Idxs[1] = One;
- CallSite = GetElementPtrInst::Create(FuncCtx, Idxs, "call_site",
- EntryBB->getTerminator());
-
- // Reference the __data field.
- Idxs[1] = ConstantInt::get(Int32Ty, 2);
- Value *FCData = GetElementPtrInst::Create(FuncCtx, Idxs, "__data",
- EntryBB->getTerminator());
-
- // The exception value comes back in context->__data[0].
- Idxs[1] = Zero;
- Value *ExceptionAddr = GetElementPtrInst::Create(FCData, Idxs,
- "exception_gep",
- EntryBB->getTerminator());
-
- // The exception selector comes back in context->__data[1].
- Idxs[1] = One;
- Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs,
- "exn_selector_gep",
- EntryBB->getTerminator());
+ Value *Idxs[2] = { Zero, 0 };
for (unsigned I = 0, E = LPads.size(); I != E; ++I) {
LandingPadInst *LPI = LPads[I];
IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
+ // Reference the __data field.
+ Idxs[1] = Two;
+ Value *FCData = Builder.CreateGEP(FuncCtx, Idxs, "__data");
+
+ // The exception values come back in context->__data[0].
+ Idxs[1] = Zero;
+ Value *ExceptionAddr = Builder.CreateGEP(FCData, Idxs, "exception_gep");
Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val");
ExnVal = Builder.CreateIntToPtr(ExnVal, Type::getInt8PtrTy(F.getContext()));
- Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
- Type *LPadType = LPI->getType();
- Value *LPadVal = UndefValue::get(LPadType);
- LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
- LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
+ Idxs[1] = One;
+ Value *SelectorAddr = Builder.CreateGEP(FCData, Idxs, "exn_selector_gep");
+ Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val");
- LPI->replaceAllUsesWith(LPadVal);
+ substituteLPadValues(LPI, ExnVal, SelVal);
}
// Personality function
- Idxs[1] = ConstantInt::get(Int32Ty, 3);
+ Idxs[1] = Three;
if (!PersonalityFn)
PersonalityFn = LPads[0]->getPersonalityFn();
Value *PersonalityFieldPtr =
@@ -780,11 +237,11 @@ setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
EntryBB->getTerminator());
// LSDA address
- Idxs[1] = ConstantInt::get(Int32Ty, 4);
- Value *LSDAFieldPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "lsda_gep",
- EntryBB->getTerminator());
Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr",
EntryBB->getTerminator());
+ Idxs[1] = Four;
+ Value *LSDAFieldPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "lsda_gep",
+ EntryBB->getTerminator());
new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator());
return FuncCtx;
@@ -794,7 +251,7 @@ setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
/// specially, we lower each arg to a copy instruction in the entry block. This
/// ensures that the argument value itself cannot be live out of the entry
/// block.
-void SjLjEHPass::lowerIncomingArguments(Function &F) {
+void SjLjEHPrepare::lowerIncomingArguments(Function &F) {
BasicBlock::iterator AfterAllocaInsPt = F.begin()->begin();
while (isa<AllocaInst>(AfterAllocaInsPt) &&
isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsPt)->getArraySize()))
@@ -838,8 +295,8 @@ void SjLjEHPass::lowerIncomingArguments(Function &F) {
/// lowerAcrossUnwindEdges - Find all variables which are alive across an unwind
/// edge and spill them.
-void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
- ArrayRef<InvokeInst*> Invokes) {
+void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
+ ArrayRef<InvokeInst*> Invokes) {
// Finally, scan the code looking for instructions with bad live ranges.
for (Function::iterator
BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
@@ -870,7 +327,7 @@ void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
}
// Find all of the blocks that this value is live in.
- std::set<BasicBlock*> LiveBBs;
+ SmallPtrSet<BasicBlock*, 64> LiveBBs;
LiveBBs.insert(Inst->getParent());
while (!Users.empty()) {
Instruction *U = Users.back();
@@ -893,7 +350,10 @@ void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
+ DEBUG(dbgs() << "SJLJ Spill: " << *Inst << " around "
+ << UnwindBlock->getName() << "\n");
NeedsSpill = true;
+ break;
}
}
@@ -902,36 +362,60 @@ void SjLjEHPass::lowerAcrossUnwindEdges(Function &F,
// the value to be reloaded from the stack slot, even those that aren't
// in the unwind blocks. We should be more selective.
if (NeedsSpill) {
- ++NumSpilled;
DemoteRegToStack(*Inst, true);
+ ++NumSpilled;
}
}
}
+
+ // Go through the landing pads and remove any PHIs there.
+ for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
+ BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
+ LandingPadInst *LPI = UnwindBlock->getLandingPadInst();
+
+ // Place PHIs into a set to avoid invalidating the iterator.
+ SmallPtrSet<PHINode*, 8> PHIsToDemote;
+ for (BasicBlock::iterator
+ PN = UnwindBlock->begin(); isa<PHINode>(PN); ++PN)
+ PHIsToDemote.insert(cast<PHINode>(PN));
+ if (PHIsToDemote.empty()) continue;
+
+ // Demote the PHIs to the stack.
+ for (SmallPtrSet<PHINode*, 8>::iterator
+ I = PHIsToDemote.begin(), E = PHIsToDemote.end(); I != E; ++I)
+ DemotePHIToStack(*I);
+
+ // Move the landingpad instruction back to the top of the landing pad block.
+ LPI->moveBefore(UnwindBlock->begin());
+ }
}
/// setupEntryBlockAndCallSites - Setup the entry block by creating and filling
/// the function context and marking the call sites with the appropriate
/// values. These values are used by the DWARF EH emitter.
-bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
+bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) {
SmallVector<ReturnInst*, 16> Returns;
SmallVector<InvokeInst*, 16> Invokes;
- SmallVector<LandingPadInst*, 16> LPads;
+ SmallSetVector<LandingPadInst*, 16> LPads;
// Look through the terminators of the basic blocks to find invokes.
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
Invokes.push_back(II);
- LPads.push_back(II->getUnwindDest()->getLandingPadInst());
+ LPads.insert(II->getUnwindDest()->getLandingPadInst());
} else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
Returns.push_back(RI);
}
if (Invokes.empty()) return false;
+ NumInvokes += Invokes.size();
+
lowerIncomingArguments(F);
lowerAcrossUnwindEdges(F, Invokes);
- Value *FuncCtx = setupFunctionContext(F, LPads);
+ Value *FuncCtx =
+ setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end()));
BasicBlock *EntryBB = F.begin();
Type *Int32Ty = Type::getInt32Ty(F.getContext());
@@ -979,7 +463,7 @@ bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
// At this point, we are all set up, update the invoke instructions to mark
// their call_site values.
for (unsigned I = 0, E = Invokes.size(); I != E; ++I) {
- insertCallSiteStore(Invokes[I], I + 1, CallSite);
+ insertCallSiteStore(Invokes[I], I + 1);
ConstantInt *CallSiteNum =
ConstantInt::get(Type::getInt32Ty(F.getContext()), I + 1);
@@ -998,9 +482,9 @@ bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
for (BasicBlock::iterator I = BB->begin(), end = BB->end(); I != end; ++I)
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (!CI->doesNotThrow())
- insertCallSiteStore(CI, -1, CallSite);
+ insertCallSiteStore(CI, -1);
} else if (ResumeInst *RI = dyn_cast<ResumeInst>(I)) {
- insertCallSiteStore(RI, -1, CallSite);
+ insertCallSiteStore(RI, -1);
}
// Register the function context and make sure it's known to not throw
@@ -1008,6 +492,25 @@ bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
EntryBB->getTerminator());
Register->setDoesNotThrow();
+ // Following any allocas not in the entry block, update the saved SP in the
+ // jmpbuf to the new value.
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ if (BB == F.begin())
+ continue;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ if (CI->getCalledFunction() != StackRestoreFn)
+ continue;
+ } else if (!isa<AllocaInst>(I)) {
+ continue;
+ }
+ Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
+ StackAddr->insertAfter(I);
+ Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
+ StoreStackAddr->insertAfter(StackAddr);
+ }
+ }
+
// Finally, for any returns from this function, if this function contains an
// invoke, add a call to unregister the function context.
for (unsigned I = 0, E = Returns.size(); I != E; ++I)
@@ -1016,11 +519,7 @@ bool SjLjEHPass::setupEntryBlockAndCallSites(Function &F) {
return true;
}
-bool SjLjEHPass::runOnFunction(Function &F) {
- bool Res = false;
- if (!DisableOldSjLjEH)
- Res = insertSjLjEHSupport(F);
- else
- Res = setupEntryBlockAndCallSites(F);
+bool SjLjEHPrepare::runOnFunction(Function &F) {
+ bool Res = setupEntryBlockAndCallSites(F);
return Res;
}
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index ca79caf..c5bd3a3 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -76,7 +76,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
MachineBasicBlock *mbb = &*mbbItr;
// Insert an index for the MBB start.
- SlotIndex blockStartIndex(back(), SlotIndex::LOAD);
+ SlotIndex blockStartIndex(back(), SlotIndex::Slot_Block);
for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end();
miItr != miEnd; ++miItr) {
@@ -88,7 +88,8 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
push_back(createEntry(mi, index += SlotIndex::InstrDist));
// Save this base index in the maps.
- mi2iMap.insert(std::make_pair(mi, SlotIndex(back(), SlotIndex::LOAD)));
+ mi2iMap.insert(std::make_pair(mi, SlotIndex(back(),
+ SlotIndex::Slot_Block)));
++functionSize;
}
@@ -97,14 +98,15 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
push_back(createEntry(0, index += SlotIndex::InstrDist));
MBBRanges[mbb->getNumber()].first = blockStartIndex;
- MBBRanges[mbb->getNumber()].second = SlotIndex(back(), SlotIndex::LOAD);
+ MBBRanges[mbb->getNumber()].second = SlotIndex(back(),
+ SlotIndex::Slot_Block);
idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb));
}
// Sort the Idx2MBBMap
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
- DEBUG(dump());
+ DEBUG(mf->print(dbgs(), this));
// And we're done!
return false;
@@ -166,7 +168,7 @@ void SlotIndexes::dump() const {
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
if (isValid())
- os << entry().getIndex() << "LudS"[getSlot()];
+ os << entry().getIndex() << "Berd"[getSlot()];
else
os << "invalid";
}
diff --git a/contrib/llvm/lib/CodeGen/Spiller.cpp b/contrib/llvm/lib/CodeGen/Spiller.cpp
index b6bbcd7..4cd22eb 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.cpp
+++ b/contrib/llvm/lib/CodeGen/Spiller.cpp
@@ -11,8 +11,8 @@
#include "Spiller.h"
#include "VirtRegMap.h"
-#include "LiveRangeEdit.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -29,7 +29,7 @@
using namespace llvm;
namespace {
- enum SpillerName { trivial, standard, inline_ };
+ enum SpillerName { trivial, inline_ };
}
static cl::opt<SpillerName>
@@ -37,10 +37,9 @@ spillerOpt("spiller",
cl::desc("Spiller to use: (default: standard)"),
cl::Prefix,
cl::values(clEnumVal(trivial, "trivial spiller"),
- clEnumVal(standard, "default spiller"),
clEnumValN(inline_, "inline", "inline spiller"),
clEnumValEnd),
- cl::init(standard));
+ cl::init(trivial));
// Spiller virtual destructor implementation.
Spiller::~Spiller() {}
@@ -73,8 +72,9 @@ protected:
/// Add spill ranges for every use/def of the live interval, inserting loads
/// immediately before each use, and stores after each def. No folding or
/// remat is attempted.
- void trivialSpillEverywhere(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &newIntervals) {
+ void trivialSpillEverywhere(LiveRangeEdit& LRE) {
+ LiveInterval* li = &LRE.getParent();
+
DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
assert(li->weight != HUGE_VALF &&
@@ -116,17 +116,14 @@ protected:
}
// Create a new vreg & interval for this instr.
- unsigned newVReg = mri->createVirtualRegister(trc);
- vrm->grow();
- vrm->assignVirt2StackSlot(newVReg, ss);
- LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
+ LiveInterval *newLI = &LRE.create();
newLI->weight = HUGE_VALF;
// Update the reg operands & kill flags.
for (unsigned i = 0; i < indices.size(); ++i) {
unsigned mopIdx = indices[i];
MachineOperand &mop = mi->getOperand(mopIdx);
- mop.setReg(newVReg);
+ mop.setReg(newLI->reg);
if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) {
mop.setIsKill(true);
}
@@ -136,33 +133,29 @@ protected:
// Insert reload if necessary.
MachineBasicBlock::iterator miItr(mi);
if (hasUse) {
- tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
+ tii->loadRegFromStackSlot(*mi->getParent(), miItr, newLI->reg, ss, trc,
tri);
MachineInstr *loadInstr(prior(miItr));
SlotIndex loadIndex =
- lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
- vrm->addSpillSlotUse(ss, loadInstr);
+ lis->InsertMachineInstrInMaps(loadInstr).getRegSlot();
SlotIndex endIndex = loadIndex.getNextIndex();
VNInfo *loadVNI =
- newLI->getNextValue(loadIndex, 0, lis->getVNInfoAllocator());
+ newLI->getNextValue(loadIndex, lis->getVNInfoAllocator());
newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
}
// Insert store if necessary.
if (hasDef) {
- tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
+ tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr),newLI->reg,
true, ss, trc, tri);
MachineInstr *storeInstr(llvm::next(miItr));
SlotIndex storeIndex =
- lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
- vrm->addSpillSlotUse(ss, storeInstr);
+ lis->InsertMachineInstrInMaps(storeInstr).getRegSlot();
SlotIndex beginIndex = storeIndex.getPrevIndex();
VNInfo *storeVNI =
- newLI->getNextValue(beginIndex, 0, lis->getVNInfoAllocator());
+ newLI->getNextValue(beginIndex, lis->getVNInfoAllocator());
newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
}
-
- newIntervals.push_back(newLI);
}
}
};
@@ -182,60 +175,20 @@ public:
void spill(LiveRangeEdit &LRE) {
// Ignore spillIs - we don't use it.
- trivialSpillEverywhere(&LRE.getParent(), *LRE.getNewVRegs());
+ trivialSpillEverywhere(LRE);
}
};
} // end anonymous namespace
-namespace {
-
-/// Falls back on LiveIntervals::addIntervalsForSpills.
-class StandardSpiller : public Spiller {
-protected:
- MachineFunction *mf;
- LiveIntervals *lis;
- LiveStacks *lss;
- MachineLoopInfo *loopInfo;
- VirtRegMap *vrm;
-public:
- StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf,
- VirtRegMap &vrm)
- : mf(&mf),
- lis(&pass.getAnalysis<LiveIntervals>()),
- lss(&pass.getAnalysis<LiveStacks>()),
- loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()),
- vrm(&vrm) {}
-
- /// Falls back on LiveIntervals::addIntervalsForSpills.
- void spill(LiveRangeEdit &LRE) {
- std::vector<LiveInterval*> added =
- lis->addIntervalsForSpills(LRE.getParent(), LRE.getUselessVRegs(),
- loopInfo, *vrm);
- LRE.getNewVRegs()->insert(LRE.getNewVRegs()->end(),
- added.begin(), added.end());
-
- // Update LiveStacks.
- int SS = vrm->getStackSlot(LRE.getReg());
- if (SS == VirtRegMap::NO_STACK_SLOT)
- return;
- const TargetRegisterClass *RC = mf->getRegInfo().getRegClass(LRE.getReg());
- LiveInterval &SI = lss->getOrCreateInterval(SS, RC);
- if (!SI.hasAtLeastOneValue())
- SI.getNextValue(SlotIndex(), 0, lss->getVNInfoAllocator());
- SI.MergeRangesInAsValue(LRE.getParent(), SI.getValNumInfo(0));
- }
-};
-
-} // end anonymous namespace
+void Spiller::anchor() { }
llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
MachineFunction &mf,
VirtRegMap &vrm) {
switch (spillerOpt) {
- default: assert(0 && "unknown spiller");
case trivial: return new TrivialSpiller(pass, mf, vrm);
- case standard: return new StandardSpiller(pass, mf, vrm);
case inline_: return createInlineSpiller(pass, mf, vrm);
}
+ llvm_unreachable("Invalid spiller optimization");
}
diff --git a/contrib/llvm/lib/CodeGen/Spiller.h b/contrib/llvm/lib/CodeGen/Spiller.h
index 41f1727..b7d5bea 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.h
+++ b/contrib/llvm/lib/CodeGen/Spiller.h
@@ -22,6 +22,7 @@ namespace llvm {
/// Implementations are utility classes which insert spill or remat code on
/// demand.
class Spiller {
+ virtual void anchor();
public:
virtual ~Spiller() = 0;
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.cpp b/contrib/llvm/lib/CodeGen/SplitKit.cpp
index 6362780..9959f74 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.cpp
+++ b/contrib/llvm/lib/CodeGen/SplitKit.cpp
@@ -14,10 +14,10 @@
#define DEBUG_TYPE "regalloc"
#include "SplitKit.h"
-#include "LiveRangeEdit.h"
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
@@ -62,13 +62,14 @@ SlotIndex SplitAnalysis::computeLastSplitPoint(unsigned Num) {
const MachineBasicBlock *MBB = MF.getBlockNumbered(Num);
const MachineBasicBlock *LPad = MBB->getLandingPadSuccessor();
std::pair<SlotIndex, SlotIndex> &LSP = LastSplitPoint[Num];
+ SlotIndex MBBEnd = LIS.getMBBEndIdx(MBB);
// Compute split points on the first call. The pair is independent of the
// current live interval.
if (!LSP.first.isValid()) {
MachineBasicBlock::const_iterator FirstTerm = MBB->getFirstTerminator();
if (FirstTerm == MBB->end())
- LSP.first = LIS.getMBBEndIdx(MBB);
+ LSP.first = MBBEnd;
else
LSP.first = LIS.getInstructionIndex(FirstTerm);
@@ -80,7 +81,7 @@ SlotIndex SplitAnalysis::computeLastSplitPoint(unsigned Num) {
for (MachineBasicBlock::const_iterator I = MBB->end(), E = MBB->begin();
I != E;) {
--I;
- if (I->getDesc().isCall()) {
+ if (I->isCall()) {
LSP.second = LIS.getInstructionIndex(I);
break;
}
@@ -89,10 +90,32 @@ SlotIndex SplitAnalysis::computeLastSplitPoint(unsigned Num) {
// If CurLI is live into a landing pad successor, move the last split point
// back to the call that may throw.
- if (LPad && LSP.second.isValid() && LIS.isLiveInToMBB(*CurLI, LPad))
- return LSP.second;
- else
+ if (!LPad || !LSP.second || !LIS.isLiveInToMBB(*CurLI, LPad))
+ return LSP.first;
+
+ // Find the value leaving MBB.
+ const VNInfo *VNI = CurLI->getVNInfoBefore(MBBEnd);
+ if (!VNI)
+ return LSP.first;
+
+ // If the value leaving MBB was defined after the call in MBB, it can't
+ // really be live-in to the landing pad. This can happen if the landing pad
+ // has a PHI, and this register is undef on the exceptional edge.
+ // <rdar://problem/10664933>
+ if (!SlotIndex::isEarlierInstr(VNI->def, LSP.second) && VNI->def < MBBEnd)
return LSP.first;
+
+ // Value is properly live-in to the landing pad.
+ // Only allow splits before the call.
+ return LSP.second;
+}
+
+MachineBasicBlock::iterator
+SplitAnalysis::getLastSplitPointIter(MachineBasicBlock *MBB) {
+ SlotIndex LSP = getLastSplitPoint(MBB->getNumber());
+ if (LSP == LIS.getMBBEndIdx(MBB))
+ return MBB->end();
+ return LIS.getInstructionFromIndex(LSP);
}
/// analyzeUses - Count instructions, basic blocks, and loops using CurLI.
@@ -112,7 +135,7 @@ void SplitAnalysis::analyzeUses() {
I = MRI.use_nodbg_begin(CurLI->reg), E = MRI.use_nodbg_end(); I != E;
++I)
if (!I.getOperand().isUndef())
- UseSlots.push_back(LIS.getInstructionIndex(&*I).getDefIndex());
+ UseSlots.push_back(LIS.getInstructionIndex(&*I).getRegSlot());
array_pod_sort(UseSlots.begin(), UseSlots.end());
@@ -328,7 +351,7 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) {
// We don't need an AliasAnalysis since we will only be performing
// cheap-as-a-copy remats anyway.
- Edit->anyRematerializable(LIS, TII, 0);
+ Edit->anyRematerializable(0);
}
void SplitEditor::dump() const {
@@ -351,7 +374,7 @@ VNInfo *SplitEditor::defValue(unsigned RegIdx,
LiveInterval *LI = Edit->get(RegIdx);
// Create a new value.
- VNInfo *VNI = LI->getNextValue(Idx, 0, LIS.getVNInfoAllocator());
+ VNInfo *VNI = LI->getNextValue(Idx, LIS.getVNInfoAllocator());
// Use insert for lookup, so we can add missing values with a second lookup.
std::pair<ValueMap::iterator, bool> InsP =
@@ -366,14 +389,14 @@ VNInfo *SplitEditor::defValue(unsigned RegIdx,
// If the previous value was a simple mapping, add liveness for it now.
if (VNInfo *OldVNI = InsP.first->second.getPointer()) {
SlotIndex Def = OldVNI->def;
- LI->addRange(LiveRange(Def, Def.getNextSlot(), OldVNI));
+ LI->addRange(LiveRange(Def, Def.getDeadSlot(), OldVNI));
// No longer a simple mapping. Switch to a complex, non-forced mapping.
InsP.first->second = ValueForcePair();
}
// This is a complex mapping, add liveness for VNI
SlotIndex Def = VNI->def;
- LI->addRange(LiveRange(Def, Def.getNextSlot(), VNI));
+ LI->addRange(LiveRange(Def, Def.getDeadSlot(), VNI));
return VNI;
}
@@ -393,7 +416,7 @@ void SplitEditor::forceRecompute(unsigned RegIdx, const VNInfo *ParentVNI) {
// This was previously a single mapping. Make sure the old def is represented
// by a trivial live range.
SlotIndex Def = VNI->def;
- Edit->get(RegIdx)->addRange(LiveRange(Def, Def.getNextSlot(), VNI));
+ Edit->get(RegIdx)->addRange(LiveRange(Def, Def.getDeadSlot(), VNI));
// Mark as complex mapped, forced.
VFP = ValueForcePair(0, true);
}
@@ -413,33 +436,31 @@ VNInfo *SplitEditor::defFromParent(unsigned RegIdx,
// Attempt cheap-as-a-copy rematerialization.
LiveRangeEdit::Remat RM(ParentVNI);
- if (Edit->canRematerializeAt(RM, UseIdx, true, LIS)) {
- Def = Edit->rematerializeAt(MBB, I, LI->reg, RM, LIS, TII, TRI, Late);
+ if (Edit->canRematerializeAt(RM, UseIdx, true)) {
+ Def = Edit->rematerializeAt(MBB, I, LI->reg, RM, TRI, Late);
++NumRemats;
} else {
// Can't remat, just insert a copy from parent.
CopyMI = BuildMI(MBB, I, DebugLoc(), TII.get(TargetOpcode::COPY), LI->reg)
.addReg(Edit->getReg());
Def = LIS.getSlotIndexes()->insertMachineInstrInMaps(CopyMI, Late)
- .getDefIndex();
+ .getRegSlot();
++NumCopies;
}
// Define the value in Reg.
- VNInfo *VNI = defValue(RegIdx, ParentVNI, Def);
- VNI->setCopy(CopyMI);
- return VNI;
+ return defValue(RegIdx, ParentVNI, Def);
}
/// Create a new virtual register and live interval.
unsigned SplitEditor::openIntv() {
// Create the complement as index 0.
if (Edit->empty())
- Edit->create(LIS, VRM);
+ Edit->create();
// Create the open interval.
OpenIdx = Edit->size();
- Edit->create(LIS, VRM);
+ Edit->create();
return OpenIdx;
}
@@ -497,7 +518,7 @@ SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
}
DEBUG(dbgs() << ": valno " << ParentVNI->id);
VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Last, MBB,
- LIS.getLastSplitPoint(Edit->getParent(), &MBB));
+ SA.getLastSplitPointIter(&MBB));
RegAssign.insert(VNI->def, End, OpenIdx);
DEBUG(dump());
return VNI->def;
@@ -586,7 +607,7 @@ SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
void SplitEditor::overlapIntv(SlotIndex Start, SlotIndex End) {
assert(OpenIdx && "openIntv not called before overlapIntv");
const VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start);
- assert(ParentVNI == Edit->getParent().getVNInfoAt(End.getPrevSlot()) &&
+ assert(ParentVNI == Edit->getParent().getVNInfoBefore(End) &&
"Parent changes value in extended range");
assert(LIS.getMBBFromIndex(Start) == LIS.getMBBFromIndex(End) &&
"Range cannot span basic blocks");
@@ -640,7 +661,7 @@ void SplitEditor::removeBackCopies(SmallVectorImpl<VNInfo*> &Copies) {
DEBUG(dbgs() << " cannot find simple kill of RegIdx " << RegIdx << '\n');
forceRecompute(RegIdx, Edit->getParent().getVNInfoAt(Def));
} else {
- SlotIndex Kill = LIS.getInstructionIndex(MBBI).getDefIndex();
+ SlotIndex Kill = LIS.getInstructionIndex(MBBI).getRegSlot();
DEBUG(dbgs() << " move kill to " << Kill << '\t' << *MBBI);
AssignI.setStop(Kill);
}
@@ -780,7 +801,7 @@ void SplitEditor::hoistCopiesForSize() {
SlotIndex Last = LIS.getMBBEndIdx(Dom.first).getPrevSlot();
Dom.second =
defFromParent(0, ParentVNI, Last, *Dom.first,
- LIS.getLastSplitPoint(Edit->getParent(), Dom.first))->def;
+ SA.getLastSplitPointIter(Dom.first))->def;
}
// Remove redundant back-copies that are now known to be dominated by another
@@ -958,7 +979,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
// use the same register as the def, so just do that always.
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (MO.isDef() || MO.isUndef())
- Idx = MO.isEarlyClobber() ? Idx.getUseIndex() : Idx.getDefIndex();
+ Idx = Idx.getRegSlot(MO.isEarlyClobber());
// Rewrite to the mapped register at Idx.
unsigned RegIdx = RegAssign.lookup(Idx);
@@ -981,7 +1002,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
if (!Edit->getParent().liveAt(Idx))
continue;
} else
- Idx = Idx.getUseIndex();
+ Idx = Idx.getRegSlot(true);
getLRCalc(RegIdx).extend(LI, Idx.getNextSlot(), LIS.getSlotIndexes(),
&MDT, &LIS.getVNInfoAllocator());
@@ -994,8 +1015,8 @@ void SplitEditor::deleteRematVictims() {
LiveInterval *LI = *I;
for (LiveInterval::const_iterator LII = LI->begin(), LIE = LI->end();
LII != LIE; ++LII) {
- // Dead defs end at the store slot.
- if (LII->end != LII->valno->def.getNextSlot())
+ // Dead defs end at the dead slot.
+ if (LII->end != LII->valno->def.getDeadSlot())
continue;
MachineInstr *MI = LIS.getInstructionFromIndex(LII->valno->def);
assert(MI && "Missing instruction for dead def");
@@ -1012,7 +1033,7 @@ void SplitEditor::deleteRematVictims() {
if (Dead.empty())
return;
- Edit->eliminateDeadDefs(Dead, LIS, VRM, TII);
+ Edit->eliminateDeadDefs(Dead);
}
void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
@@ -1030,7 +1051,6 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
unsigned RegIdx = RegAssign.lookup(ParentVNI->def);
VNInfo *VNI = defValue(RegIdx, ParentVNI, ParentVNI->def);
VNI->setIsPHIDef(ParentVNI->isPHIDef());
- VNI->setCopy(ParentVNI->getCopy());
// Force rematted values to be recomputed everywhere.
// The new live ranges may be truncated.
@@ -1049,7 +1069,6 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
break;
case SM_Speed:
llvm_unreachable("Spill mode 'speed' not implemented yet");
- break;
}
// Transfer the simply mapped values, check if any are skipped.
@@ -1089,7 +1108,7 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
SmallVector<LiveInterval*, 8> dups;
dups.push_back(li);
for (unsigned j = 1; j != NumComp; ++j)
- dups.push_back(&Edit->create(LIS, VRM));
+ dups.push_back(&Edit->create());
ConEQ.Distribute(&dups[0], MRI);
// The new intervals all map back to i.
if (LRMap)
@@ -1097,7 +1116,7 @@ void SplitEditor::finish(SmallVectorImpl<unsigned> *LRMap) {
}
// Calculate spill weight and allocation hints for new intervals.
- Edit->calculateRegClassAndHint(VRM.getMachineFunction(), LIS, SA.Loops);
+ Edit->calculateRegClassAndHint(VRM.getMachineFunction(), SA.Loops);
assert(!LRMap || LRMap->size() == Edit->size());
}
diff --git a/contrib/llvm/lib/CodeGen/SplitKit.h b/contrib/llvm/lib/CodeGen/SplitKit.h
index d8fc212..4005a3d 100644
--- a/contrib/llvm/lib/CodeGen/SplitKit.h
+++ b/contrib/llvm/lib/CodeGen/SplitKit.h
@@ -46,9 +46,6 @@ public:
const MachineLoopInfo &Loops;
const TargetInstrInfo &TII;
- // Sorted slot indexes of using instructions.
- SmallVector<SlotIndex, 8> UseSlots;
-
/// Additional information about basic blocks where the current variable is
/// live. Such a block will look like one of these templates:
///
@@ -85,6 +82,9 @@ private:
// Current live interval.
const LiveInterval *CurLI;
+ // Sorted slot indexes of using instructions.
+ SmallVector<SlotIndex, 8> UseSlots;
+
/// LastSplitPoint - Last legal split point in each basic block in the current
/// function. The first entry is the first terminator, the second entry is the
/// last valid split point for a variable that is live in to a landing pad
@@ -135,7 +135,7 @@ public:
/// getParent - Return the last analyzed interval.
const LiveInterval &getParent() const { return *CurLI; }
- /// getLastSplitPoint - Return that base index of the last valid split point
+ /// getLastSplitPoint - Return the base index of the last valid split point
/// in the basic block numbered Num.
SlotIndex getLastSplitPoint(unsigned Num) {
// Inline the common simple case.
@@ -145,6 +145,9 @@ public:
return computeLastSplitPoint(Num);
}
+ /// getLastSplitPointIter - Returns the last split point as an iterator.
+ MachineBasicBlock::iterator getLastSplitPointIter(MachineBasicBlock*);
+
/// isOriginalEndpoint - Return true if the original live range was killed or
/// (re-)defined at Idx. Idx should be the 'def' slot for a normal kill/def,
/// and 'use' for an early-clobber def.
@@ -152,6 +155,10 @@ public:
/// splitting.
bool isOriginalEndpoint(SlotIndex Idx) const;
+ /// getUseSlots - Return an array of SlotIndexes of instructions using CurLI.
+ /// This include both use and def operands, at most one entry per instruction.
+ ArrayRef<SlotIndex> getUseSlots() const { return UseSlots; }
+
/// getUseBlocks - Return an array of BlockInfo objects for the basic blocks
/// where CurLI has uses.
ArrayRef<BlockInfo> getUseBlocks() const { return UseBlocks; }
diff --git a/contrib/llvm/lib/CodeGen/Splitter.cpp b/contrib/llvm/lib/CodeGen/Splitter.cpp
deleted file mode 100644
index 77973b7..0000000
--- a/contrib/llvm/lib/CodeGen/Splitter.cpp
+++ /dev/null
@@ -1,827 +0,0 @@
-//===-- llvm/CodeGen/Splitter.cpp - Splitter -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "loopsplitter"
-
-#include "Splitter.h"
-
-#include "llvm/Module.h"
-#include "llvm/CodeGen/CalcSpillWeights.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveStackAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-using namespace llvm;
-
-char LoopSplitter::ID = 0;
-INITIALIZE_PASS_BEGIN(LoopSplitter, "loop-splitting",
- "Split virtual regists across loop boundaries.", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_END(LoopSplitter, "loop-splitting",
- "Split virtual regists across loop boundaries.", false, false)
-
-namespace llvm {
-
- class StartSlotComparator {
- public:
- StartSlotComparator(LiveIntervals &lis) : lis(lis) {}
- bool operator()(const MachineBasicBlock *mbb1,
- const MachineBasicBlock *mbb2) const {
- return lis.getMBBStartIdx(mbb1) < lis.getMBBStartIdx(mbb2);
- }
- private:
- LiveIntervals &lis;
- };
-
- class LoopSplit {
- public:
- LoopSplit(LoopSplitter &ls, LiveInterval &li, MachineLoop &loop)
- : ls(ls), li(li), loop(loop), valid(true), inSplit(false), newLI(0) {
- assert(TargetRegisterInfo::isVirtualRegister(li.reg) &&
- "Cannot split physical registers.");
- }
-
- LiveInterval& getLI() const { return li; }
-
- MachineLoop& getLoop() const { return loop; }
-
- bool isValid() const { return valid; }
-
- bool isWorthwhile() const { return valid && (inSplit || !outSplits.empty()); }
-
- void invalidate() { valid = false; }
-
- void splitIncoming() { inSplit = true; }
-
- void splitOutgoing(MachineLoop::Edge &edge) { outSplits.insert(edge); }
-
- void addLoopInstr(MachineInstr *i) { loopInstrs.push_back(i); }
-
- void apply() {
- assert(valid && "Attempt to apply invalid split.");
- applyIncoming();
- applyOutgoing();
- copyRanges();
- renameInside();
- }
-
- private:
- LoopSplitter &ls;
- LiveInterval &li;
- MachineLoop &loop;
- bool valid, inSplit;
- std::set<MachineLoop::Edge> outSplits;
- std::vector<MachineInstr*> loopInstrs;
-
- LiveInterval *newLI;
- std::map<VNInfo*, VNInfo*> vniMap;
-
- LiveInterval* getNewLI() {
- if (newLI == 0) {
- const TargetRegisterClass *trc = ls.mri->getRegClass(li.reg);
- unsigned vreg = ls.mri->createVirtualRegister(trc);
- newLI = &ls.lis->getOrCreateInterval(vreg);
- }
- return newLI;
- }
-
- VNInfo* getNewVNI(VNInfo *oldVNI) {
- VNInfo *newVNI = vniMap[oldVNI];
-
- if (newVNI == 0) {
- newVNI = getNewLI()->createValueCopy(oldVNI,
- ls.lis->getVNInfoAllocator());
- vniMap[oldVNI] = newVNI;
- }
-
- return newVNI;
- }
-
- void applyIncoming() {
- if (!inSplit) {
- return;
- }
-
- MachineBasicBlock *preHeader = loop.getLoopPreheader();
- if (preHeader == 0) {
- assert(ls.canInsertPreHeader(loop) &&
- "Can't insert required preheader.");
- preHeader = &ls.insertPreHeader(loop);
- }
-
- LiveRange *preHeaderRange =
- ls.lis->findExitingRange(li, preHeader);
- assert(preHeaderRange != 0 && "Range not live into preheader.");
-
- // Insert the new copy.
- MachineInstr *copy = BuildMI(*preHeader,
- preHeader->getFirstTerminator(),
- DebugLoc(),
- ls.tii->get(TargetOpcode::COPY))
- .addReg(getNewLI()->reg, RegState::Define)
- .addReg(li.reg, RegState::Kill);
-
- ls.lis->InsertMachineInstrInMaps(copy);
-
- SlotIndex copyDefIdx = ls.lis->getInstructionIndex(copy).getDefIndex();
-
- VNInfo *newVal = getNewVNI(preHeaderRange->valno);
- newVal->def = copyDefIdx;
- newVal->setCopy(copy);
- li.removeRange(copyDefIdx, ls.lis->getMBBEndIdx(preHeader), true);
-
- getNewLI()->addRange(LiveRange(copyDefIdx,
- ls.lis->getMBBEndIdx(preHeader),
- newVal));
- }
-
- void applyOutgoing() {
-
- for (std::set<MachineLoop::Edge>::iterator osItr = outSplits.begin(),
- osEnd = outSplits.end();
- osItr != osEnd; ++osItr) {
- MachineLoop::Edge edge = *osItr;
- MachineBasicBlock *outBlock = edge.second;
- if (ls.isCriticalEdge(edge)) {
- assert(ls.canSplitEdge(edge) && "Unsplitable critical edge.");
- outBlock = &ls.splitEdge(edge, loop);
- }
- LiveRange *outRange = ls.lis->findEnteringRange(li, outBlock);
- assert(outRange != 0 && "No exiting range?");
-
- MachineInstr *copy = BuildMI(*outBlock, outBlock->begin(),
- DebugLoc(),
- ls.tii->get(TargetOpcode::COPY))
- .addReg(li.reg, RegState::Define)
- .addReg(getNewLI()->reg, RegState::Kill);
-
- ls.lis->InsertMachineInstrInMaps(copy);
-
- SlotIndex copyDefIdx = ls.lis->getInstructionIndex(copy).getDefIndex();
-
- // Blow away output range definition.
- outRange->valno->def = ls.lis->getInvalidIndex();
- li.removeRange(ls.lis->getMBBStartIdx(outBlock), copyDefIdx);
-
- SlotIndex newDefIdx = ls.lis->getMBBStartIdx(outBlock);
- assert(ls.lis->getInstructionFromIndex(newDefIdx) == 0 &&
- "PHI def index points at actual instruction.");
- VNInfo *newVal =
- getNewLI()->getNextValue(newDefIdx, 0, ls.lis->getVNInfoAllocator());
-
- getNewLI()->addRange(LiveRange(ls.lis->getMBBStartIdx(outBlock),
- copyDefIdx, newVal));
-
- }
- }
-
- void copyRange(LiveRange &lr) {
- std::pair<bool, LoopSplitter::SlotPair> lsr =
- ls.getLoopSubRange(lr, loop);
-
- if (!lsr.first)
- return;
-
- LiveRange loopRange(lsr.second.first, lsr.second.second,
- getNewVNI(lr.valno));
-
- li.removeRange(loopRange.start, loopRange.end, true);
-
- getNewLI()->addRange(loopRange);
- }
-
- void copyRanges() {
- for (std::vector<MachineInstr*>::iterator iItr = loopInstrs.begin(),
- iEnd = loopInstrs.end();
- iItr != iEnd; ++iItr) {
- MachineInstr &instr = **iItr;
- SlotIndex instrIdx = ls.lis->getInstructionIndex(&instr);
- if (instr.modifiesRegister(li.reg, 0)) {
- LiveRange *defRange =
- li.getLiveRangeContaining(instrIdx.getDefIndex());
- if (defRange != 0) // May have caught this already.
- copyRange(*defRange);
- }
- if (instr.readsRegister(li.reg, 0)) {
- LiveRange *useRange =
- li.getLiveRangeContaining(instrIdx.getUseIndex());
- if (useRange != 0) { // May have caught this already.
- copyRange(*useRange);
- }
- }
- }
-
- for (MachineLoop::block_iterator bbItr = loop.block_begin(),
- bbEnd = loop.block_end();
- bbItr != bbEnd; ++bbItr) {
- MachineBasicBlock &loopBlock = **bbItr;
- LiveRange *enteringRange =
- ls.lis->findEnteringRange(li, &loopBlock);
- if (enteringRange != 0) {
- copyRange(*enteringRange);
- }
- }
- }
-
- void renameInside() {
- for (std::vector<MachineInstr*>::iterator iItr = loopInstrs.begin(),
- iEnd = loopInstrs.end();
- iItr != iEnd; ++iItr) {
- MachineInstr &instr = **iItr;
- for (unsigned i = 0; i < instr.getNumOperands(); ++i) {
- MachineOperand &mop = instr.getOperand(i);
- if (mop.isReg() && mop.getReg() == li.reg) {
- mop.setReg(getNewLI()->reg);
- }
- }
- }
- }
-
- };
-
- void LoopSplitter::getAnalysisUsage(AnalysisUsage &au) const {
- au.addRequired<MachineDominatorTree>();
- au.addPreserved<MachineDominatorTree>();
- au.addRequired<MachineLoopInfo>();
- au.addPreserved<MachineLoopInfo>();
- au.addPreservedID(RegisterCoalescerPassID);
- au.addPreserved<CalculateSpillWeights>();
- au.addPreserved<LiveStacks>();
- au.addRequired<SlotIndexes>();
- au.addPreserved<SlotIndexes>();
- au.addRequired<LiveIntervals>();
- au.addPreserved<LiveIntervals>();
- MachineFunctionPass::getAnalysisUsage(au);
- }
-
- bool LoopSplitter::runOnMachineFunction(MachineFunction &fn) {
-
- mf = &fn;
- mri = &mf->getRegInfo();
- tii = mf->getTarget().getInstrInfo();
- tri = mf->getTarget().getRegisterInfo();
- sis = &getAnalysis<SlotIndexes>();
- lis = &getAnalysis<LiveIntervals>();
- mli = &getAnalysis<MachineLoopInfo>();
- mdt = &getAnalysis<MachineDominatorTree>();
-
- fqn = mf->getFunction()->getParent()->getModuleIdentifier() + "." +
- mf->getFunction()->getName().str();
-
- dbgs() << "Splitting " << mf->getFunction()->getName() << ".";
-
- dumpOddTerminators();
-
-// dbgs() << "----------------------------------------\n";
-// lis->dump();
-// dbgs() << "----------------------------------------\n";
-
-// std::deque<MachineLoop*> loops;
-// std::copy(mli->begin(), mli->end(), std::back_inserter(loops));
-// dbgs() << "Loops:\n";
-// while (!loops.empty()) {
-// MachineLoop &loop = *loops.front();
-// loops.pop_front();
-// std::copy(loop.begin(), loop.end(), std::back_inserter(loops));
-
-// dumpLoopInfo(loop);
-// }
-
- //lis->dump();
- //exit(0);
-
- // Setup initial intervals.
- for (LiveIntervals::iterator liItr = lis->begin(), liEnd = lis->end();
- liItr != liEnd; ++liItr) {
- LiveInterval *li = liItr->second;
-
- if (TargetRegisterInfo::isVirtualRegister(li->reg) &&
- !lis->intervalIsInOneMBB(*li)) {
- intervals.push_back(li);
- }
- }
-
- processIntervals();
-
- intervals.clear();
-
-// dbgs() << "----------------------------------------\n";
-// lis->dump();
-// dbgs() << "----------------------------------------\n";
-
- dumpOddTerminators();
-
- //exit(1);
-
- return false;
- }
-
- void LoopSplitter::releaseMemory() {
- fqn.clear();
- intervals.clear();
- loopRangeMap.clear();
- }
-
- void LoopSplitter::dumpOddTerminators() {
- for (MachineFunction::iterator bbItr = mf->begin(), bbEnd = mf->end();
- bbItr != bbEnd; ++bbItr) {
- MachineBasicBlock *mbb = &*bbItr;
- MachineBasicBlock *a = 0, *b = 0;
- SmallVector<MachineOperand, 4> c;
- if (tii->AnalyzeBranch(*mbb, a, b, c)) {
- dbgs() << "MBB#" << mbb->getNumber() << " has multiway terminator.\n";
- dbgs() << " Terminators:\n";
- for (MachineBasicBlock::iterator iItr = mbb->begin(), iEnd = mbb->end();
- iItr != iEnd; ++iItr) {
- MachineInstr *instr= &*iItr;
- dbgs() << " " << *instr << "";
- }
- dbgs() << "\n Listed successors: [ ";
- for (MachineBasicBlock::succ_iterator sItr = mbb->succ_begin(), sEnd = mbb->succ_end();
- sItr != sEnd; ++sItr) {
- MachineBasicBlock *succMBB = *sItr;
- dbgs() << succMBB->getNumber() << " ";
- }
- dbgs() << "]\n\n";
- }
- }
- }
-
- void LoopSplitter::dumpLoopInfo(MachineLoop &loop) {
- MachineBasicBlock &headerBlock = *loop.getHeader();
- typedef SmallVector<MachineLoop::Edge, 8> ExitEdgesList;
- ExitEdgesList exitEdges;
- loop.getExitEdges(exitEdges);
-
- dbgs() << " Header: BB#" << headerBlock.getNumber() << ", Contains: [ ";
- for (std::vector<MachineBasicBlock*>::const_iterator
- subBlockItr = loop.getBlocks().begin(),
- subBlockEnd = loop.getBlocks().end();
- subBlockItr != subBlockEnd; ++subBlockItr) {
- MachineBasicBlock &subBlock = **subBlockItr;
- dbgs() << "BB#" << subBlock.getNumber() << " ";
- }
- dbgs() << "], Exit edges: [ ";
- for (ExitEdgesList::iterator exitEdgeItr = exitEdges.begin(),
- exitEdgeEnd = exitEdges.end();
- exitEdgeItr != exitEdgeEnd; ++exitEdgeItr) {
- MachineLoop::Edge &exitEdge = *exitEdgeItr;
- dbgs() << "(MBB#" << exitEdge.first->getNumber()
- << ", MBB#" << exitEdge.second->getNumber() << ") ";
- }
- dbgs() << "], Sub-Loop Headers: [ ";
- for (MachineLoop::iterator subLoopItr = loop.begin(),
- subLoopEnd = loop.end();
- subLoopItr != subLoopEnd; ++subLoopItr) {
- MachineLoop &subLoop = **subLoopItr;
- MachineBasicBlock &subLoopBlock = *subLoop.getHeader();
- dbgs() << "BB#" << subLoopBlock.getNumber() << " ";
- }
- dbgs() << "]\n";
- }
-
- void LoopSplitter::updateTerminators(MachineBasicBlock &mbb) {
- mbb.updateTerminator();
-
- for (MachineBasicBlock::iterator miItr = mbb.begin(), miEnd = mbb.end();
- miItr != miEnd; ++miItr) {
- if (lis->isNotInMIMap(miItr)) {
- lis->InsertMachineInstrInMaps(miItr);
- }
- }
- }
-
- bool LoopSplitter::canInsertPreHeader(MachineLoop &loop) {
- MachineBasicBlock *header = loop.getHeader();
- MachineBasicBlock *a = 0, *b = 0;
- SmallVector<MachineOperand, 4> c;
-
- for (MachineBasicBlock::pred_iterator pbItr = header->pred_begin(),
- pbEnd = header->pred_end();
- pbItr != pbEnd; ++pbItr) {
- MachineBasicBlock *predBlock = *pbItr;
- if (!!tii->AnalyzeBranch(*predBlock, a, b, c)) {
- return false;
- }
- }
-
- MachineFunction::iterator headerItr(header);
- if (headerItr == mf->begin())
- return true;
- MachineBasicBlock *headerLayoutPred = llvm::prior(headerItr);
- assert(headerLayoutPred != 0 && "Header should have layout pred.");
-
- return (!tii->AnalyzeBranch(*headerLayoutPred, a, b, c));
- }
-
- MachineBasicBlock& LoopSplitter::insertPreHeader(MachineLoop &loop) {
- assert(loop.getLoopPreheader() == 0 && "Loop already has preheader.");
-
- MachineBasicBlock &header = *loop.getHeader();
-
- // Save the preds - we'll need to update them once we insert the preheader.
- typedef std::set<MachineBasicBlock*> HeaderPreds;
- HeaderPreds headerPreds;
-
- for (MachineBasicBlock::pred_iterator predItr = header.pred_begin(),
- predEnd = header.pred_end();
- predItr != predEnd; ++predItr) {
- if (!loop.contains(*predItr))
- headerPreds.insert(*predItr);
- }
-
- assert(!headerPreds.empty() && "No predecessors for header?");
-
- //dbgs() << fqn << " MBB#" << header.getNumber() << " inserting preheader...";
-
- MachineBasicBlock *preHeader =
- mf->CreateMachineBasicBlock(header.getBasicBlock());
-
- assert(preHeader != 0 && "Failed to create pre-header.");
-
- mf->insert(header, preHeader);
-
- for (HeaderPreds::iterator hpItr = headerPreds.begin(),
- hpEnd = headerPreds.end();
- hpItr != hpEnd; ++hpItr) {
- assert(*hpItr != 0 && "How'd a null predecessor get into this set?");
- MachineBasicBlock &hp = **hpItr;
- hp.ReplaceUsesOfBlockWith(&header, preHeader);
- }
- preHeader->addSuccessor(&header);
-
- MachineBasicBlock *oldLayoutPred =
- llvm::prior(MachineFunction::iterator(preHeader));
- if (oldLayoutPred != 0) {
- updateTerminators(*oldLayoutPred);
- }
-
- lis->InsertMBBInMaps(preHeader);
-
- if (MachineLoop *parentLoop = loop.getParentLoop()) {
- assert(parentLoop->getHeader() != loop.getHeader() &&
- "Parent loop has same header?");
- parentLoop->addBasicBlockToLoop(preHeader, mli->getBase());
-
- // Invalidate all parent loop ranges.
- while (parentLoop != 0) {
- loopRangeMap.erase(parentLoop);
- parentLoop = parentLoop->getParentLoop();
- }
- }
-
- for (LiveIntervals::iterator liItr = lis->begin(),
- liEnd = lis->end();
- liItr != liEnd; ++liItr) {
- LiveInterval &li = *liItr->second;
-
- // Is this safe for physregs?
- // TargetRegisterInfo::isPhysicalRegister(li.reg) ||
- if (!lis->isLiveInToMBB(li, &header))
- continue;
-
- if (lis->isLiveInToMBB(li, preHeader)) {
- assert(lis->isLiveOutOfMBB(li, preHeader) &&
- "Range terminates in newly added preheader?");
- continue;
- }
-
- bool insertRange = false;
-
- for (MachineBasicBlock::pred_iterator predItr = preHeader->pred_begin(),
- predEnd = preHeader->pred_end();
- predItr != predEnd; ++predItr) {
- MachineBasicBlock *predMBB = *predItr;
- if (lis->isLiveOutOfMBB(li, predMBB)) {
- insertRange = true;
- break;
- }
- }
-
- if (!insertRange)
- continue;
-
- SlotIndex newDefIdx = lis->getMBBStartIdx(preHeader);
- assert(lis->getInstructionFromIndex(newDefIdx) == 0 &&
- "PHI def index points at actual instruction.");
- VNInfo *newVal = li.getNextValue(newDefIdx, 0, lis->getVNInfoAllocator());
- li.addRange(LiveRange(lis->getMBBStartIdx(preHeader),
- lis->getMBBEndIdx(preHeader),
- newVal));
- }
-
-
- //dbgs() << "Dumping SlotIndexes:\n";
- //sis->dump();
-
- //dbgs() << "done. (Added MBB#" << preHeader->getNumber() << ")\n";
-
- return *preHeader;
- }
-
- bool LoopSplitter::isCriticalEdge(MachineLoop::Edge &edge) {
- assert(edge.first->succ_size() > 1 && "Non-sensical edge.");
- if (edge.second->pred_size() > 1)
- return true;
- return false;
- }
-
- bool LoopSplitter::canSplitEdge(MachineLoop::Edge &edge) {
- MachineFunction::iterator outBlockItr(edge.second);
- if (outBlockItr == mf->begin())
- return true;
- MachineBasicBlock *outBlockLayoutPred = llvm::prior(outBlockItr);
- assert(outBlockLayoutPred != 0 && "Should have a layout pred if out!=begin.");
- MachineBasicBlock *a = 0, *b = 0;
- SmallVector<MachineOperand, 4> c;
- return (!tii->AnalyzeBranch(*outBlockLayoutPred, a, b, c) &&
- !tii->AnalyzeBranch(*edge.first, a, b, c));
- }
-
- MachineBasicBlock& LoopSplitter::splitEdge(MachineLoop::Edge &edge,
- MachineLoop &loop) {
-
- MachineBasicBlock &inBlock = *edge.first;
- MachineBasicBlock &outBlock = *edge.second;
-
- assert((inBlock.succ_size() > 1) && (outBlock.pred_size() > 1) &&
- "Splitting non-critical edge?");
-
- //dbgs() << fqn << " Splitting edge (MBB#" << inBlock.getNumber()
- // << " -> MBB#" << outBlock.getNumber() << ")...";
-
- MachineBasicBlock *splitBlock =
- mf->CreateMachineBasicBlock();
-
- assert(splitBlock != 0 && "Failed to create split block.");
-
- mf->insert(&outBlock, splitBlock);
-
- inBlock.ReplaceUsesOfBlockWith(&outBlock, splitBlock);
- splitBlock->addSuccessor(&outBlock);
-
- MachineBasicBlock *oldLayoutPred =
- llvm::prior(MachineFunction::iterator(splitBlock));
- if (oldLayoutPred != 0) {
- updateTerminators(*oldLayoutPred);
- }
-
- lis->InsertMBBInMaps(splitBlock);
-
- loopRangeMap.erase(&loop);
-
- MachineLoop *splitParentLoop = loop.getParentLoop();
- while (splitParentLoop != 0 &&
- !splitParentLoop->contains(&outBlock)) {
- splitParentLoop = splitParentLoop->getParentLoop();
- }
-
- if (splitParentLoop != 0) {
- assert(splitParentLoop->contains(&loop) &&
- "Split-block parent doesn't contain original loop?");
- splitParentLoop->addBasicBlockToLoop(splitBlock, mli->getBase());
-
- // Invalidate all parent loop ranges.
- while (splitParentLoop != 0) {
- loopRangeMap.erase(splitParentLoop);
- splitParentLoop = splitParentLoop->getParentLoop();
- }
- }
-
-
- for (LiveIntervals::iterator liItr = lis->begin(),
- liEnd = lis->end();
- liItr != liEnd; ++liItr) {
- LiveInterval &li = *liItr->second;
- bool intersects = lis->isLiveOutOfMBB(li, &inBlock) &&
- lis->isLiveInToMBB(li, &outBlock);
- if (lis->isLiveInToMBB(li, splitBlock)) {
- if (!intersects) {
- li.removeRange(lis->getMBBStartIdx(splitBlock),
- lis->getMBBEndIdx(splitBlock), true);
- }
- } else if (intersects) {
- SlotIndex newDefIdx = lis->getMBBStartIdx(splitBlock);
- assert(lis->getInstructionFromIndex(newDefIdx) == 0 &&
- "PHI def index points at actual instruction.");
- VNInfo *newVal = li.getNextValue(newDefIdx, 0,
- lis->getVNInfoAllocator());
- li.addRange(LiveRange(lis->getMBBStartIdx(splitBlock),
- lis->getMBBEndIdx(splitBlock),
- newVal));
- }
- }
-
- //dbgs() << "done. (Added MBB#" << splitBlock->getNumber() << ")\n";
-
- return *splitBlock;
- }
-
- LoopSplitter::LoopRanges& LoopSplitter::getLoopRanges(MachineLoop &loop) {
- typedef std::set<MachineBasicBlock*, StartSlotComparator> LoopMBBSet;
- LoopRangeMap::iterator lrItr = loopRangeMap.find(&loop);
- if (lrItr == loopRangeMap.end()) {
- LoopMBBSet loopMBBs((StartSlotComparator(*lis)));
- std::copy(loop.block_begin(), loop.block_end(),
- std::inserter(loopMBBs, loopMBBs.begin()));
-
- assert(!loopMBBs.empty() && "No blocks in loop?");
-
- LoopRanges &loopRanges = loopRangeMap[&loop];
- assert(loopRanges.empty() && "Loop encountered but not processed?");
- SlotIndex oldEnd = lis->getMBBEndIdx(*loopMBBs.begin());
- loopRanges.push_back(
- std::make_pair(lis->getMBBStartIdx(*loopMBBs.begin()),
- lis->getInvalidIndex()));
- for (LoopMBBSet::iterator curBlockItr = llvm::next(loopMBBs.begin()),
- curBlockEnd = loopMBBs.end();
- curBlockItr != curBlockEnd; ++curBlockItr) {
- SlotIndex newStart = lis->getMBBStartIdx(*curBlockItr);
- if (newStart != oldEnd) {
- loopRanges.back().second = oldEnd;
- loopRanges.push_back(std::make_pair(newStart,
- lis->getInvalidIndex()));
- }
- oldEnd = lis->getMBBEndIdx(*curBlockItr);
- }
-
- loopRanges.back().second =
- lis->getMBBEndIdx(*llvm::prior(loopMBBs.end()));
-
- return loopRanges;
- }
- return lrItr->second;
- }
-
- std::pair<bool, LoopSplitter::SlotPair> LoopSplitter::getLoopSubRange(
- const LiveRange &lr,
- MachineLoop &loop) {
- LoopRanges &loopRanges = getLoopRanges(loop);
- LoopRanges::iterator lrItr = loopRanges.begin(),
- lrEnd = loopRanges.end();
- while (lrItr != lrEnd && lr.start >= lrItr->second) {
- ++lrItr;
- }
-
- if (lrItr == lrEnd) {
- SlotIndex invalid = lis->getInvalidIndex();
- return std::make_pair(false, SlotPair(invalid, invalid));
- }
-
- SlotIndex srStart(lr.start < lrItr->first ? lrItr->first : lr.start);
- SlotIndex srEnd(lr.end > lrItr->second ? lrItr->second : lr.end);
-
- return std::make_pair(true, SlotPair(srStart, srEnd));
- }
-
- void LoopSplitter::dumpLoopRanges(MachineLoop &loop) {
- LoopRanges &loopRanges = getLoopRanges(loop);
- dbgs() << "For loop MBB#" << loop.getHeader()->getNumber() << ", subranges are: [ ";
- for (LoopRanges::iterator lrItr = loopRanges.begin(), lrEnd = loopRanges.end();
- lrItr != lrEnd; ++lrItr) {
- dbgs() << "[" << lrItr->first << ", " << lrItr->second << ") ";
- }
- dbgs() << "]\n";
- }
-
- void LoopSplitter::processHeader(LoopSplit &split) {
- MachineBasicBlock &header = *split.getLoop().getHeader();
- //dbgs() << " Processing loop header BB#" << header.getNumber() << "\n";
-
- if (!lis->isLiveInToMBB(split.getLI(), &header))
- return; // Not live in, but nothing wrong so far.
-
- MachineBasicBlock *preHeader = split.getLoop().getLoopPreheader();
- if (!preHeader) {
-
- if (!canInsertPreHeader(split.getLoop())) {
- split.invalidate();
- return; // Couldn't insert a pre-header. Bail on this interval.
- }
-
- for (MachineBasicBlock::pred_iterator predItr = header.pred_begin(),
- predEnd = header.pred_end();
- predItr != predEnd; ++predItr) {
- if (lis->isLiveOutOfMBB(split.getLI(), *predItr)) {
- split.splitIncoming();
- break;
- }
- }
- } else if (lis->isLiveOutOfMBB(split.getLI(), preHeader)) {
- split.splitIncoming();
- }
- }
-
- void LoopSplitter::processLoopExits(LoopSplit &split) {
- typedef SmallVector<MachineLoop::Edge, 8> ExitEdgesList;
- ExitEdgesList exitEdges;
- split.getLoop().getExitEdges(exitEdges);
-
- //dbgs() << " Processing loop exits:\n";
-
- for (ExitEdgesList::iterator exitEdgeItr = exitEdges.begin(),
- exitEdgeEnd = exitEdges.end();
- exitEdgeItr != exitEdgeEnd; ++exitEdgeItr) {
- MachineLoop::Edge exitEdge = *exitEdgeItr;
-
- LiveRange *outRange =
- split.getLI().getLiveRangeContaining(lis->getMBBStartIdx(exitEdge.second));
-
- if (outRange != 0) {
- if (isCriticalEdge(exitEdge) && !canSplitEdge(exitEdge)) {
- split.invalidate();
- return;
- }
-
- split.splitOutgoing(exitEdge);
- }
- }
- }
-
- void LoopSplitter::processLoopUses(LoopSplit &split) {
- std::set<MachineInstr*> processed;
-
- for (MachineRegisterInfo::reg_iterator
- rItr = mri->reg_begin(split.getLI().reg),
- rEnd = mri->reg_end();
- rItr != rEnd; ++rItr) {
- MachineInstr &instr = *rItr;
- if (split.getLoop().contains(&instr) && processed.count(&instr) == 0) {
- split.addLoopInstr(&instr);
- processed.insert(&instr);
- }
- }
-
- //dbgs() << " Rewriting reg" << li.reg << " to reg" << newLI->reg
- // << " in blocks [ ";
- //dbgs() << "]\n";
- }
-
- bool LoopSplitter::splitOverLoop(LiveInterval &li, MachineLoop &loop) {
- assert(TargetRegisterInfo::isVirtualRegister(li.reg) &&
- "Attempt to split physical register.");
-
- LoopSplit split(*this, li, loop);
- processHeader(split);
- if (split.isValid())
- processLoopExits(split);
- if (split.isValid())
- processLoopUses(split);
- if (split.isValid() /* && split.isWorthwhile() */) {
- split.apply();
- DEBUG(dbgs() << "Success.\n");
- return true;
- }
- DEBUG(dbgs() << "Failed.\n");
- return false;
- }
-
- void LoopSplitter::processInterval(LiveInterval &li) {
- std::deque<MachineLoop*> loops;
- std::copy(mli->begin(), mli->end(), std::back_inserter(loops));
-
- while (!loops.empty()) {
- MachineLoop &loop = *loops.front();
- loops.pop_front();
- DEBUG(
- dbgs() << fqn << " reg" << li.reg << " " << li.weight << " BB#"
- << loop.getHeader()->getNumber() << " ";
- );
- if (!splitOverLoop(li, loop)) {
- // Couldn't split over outer loop, schedule sub-loops to be checked.
- std::copy(loop.begin(), loop.end(), std::back_inserter(loops));
- }
- }
- }
-
- void LoopSplitter::processIntervals() {
- while (!intervals.empty()) {
- LiveInterval &li = *intervals.front();
- intervals.pop_front();
-
- assert(!lis->intervalIsInOneMBB(li) &&
- "Single interval in process worklist.");
-
- processInterval(li);
- }
- }
-
-}
diff --git a/contrib/llvm/lib/CodeGen/Splitter.h b/contrib/llvm/lib/CodeGen/Splitter.h
deleted file mode 100644
index 9fb1b8b..0000000
--- a/contrib/llvm/lib/CodeGen/Splitter.h
+++ /dev/null
@@ -1,101 +0,0 @@
-//===-- llvm/CodeGen/Splitter.h - Splitter -*- C++ -*----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SPLITTER_H
-#define LLVM_CODEGEN_SPLITTER_H
-
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/SlotIndexes.h"
-
-#include <deque>
-#include <map>
-#include <string>
-#include <vector>
-
-namespace llvm {
-
- class LiveInterval;
- class LiveIntervals;
- struct LiveRange;
- class LoopSplit;
- class MachineDominatorTree;
- class MachineRegisterInfo;
- class SlotIndexes;
- class TargetInstrInfo;
- class VNInfo;
-
- class LoopSplitter : public MachineFunctionPass {
- friend class LoopSplit;
- public:
- static char ID;
-
- LoopSplitter() : MachineFunctionPass(ID) {
- initializeLoopSplitterPass(*PassRegistry::getPassRegistry());
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &au) const;
-
- virtual bool runOnMachineFunction(MachineFunction &fn);
-
- virtual void releaseMemory();
-
-
- private:
-
- MachineFunction *mf;
- LiveIntervals *lis;
- MachineLoopInfo *mli;
- MachineRegisterInfo *mri;
- MachineDominatorTree *mdt;
- SlotIndexes *sis;
- const TargetInstrInfo *tii;
- const TargetRegisterInfo *tri;
-
- std::string fqn;
- std::deque<LiveInterval*> intervals;
-
- typedef std::pair<SlotIndex, SlotIndex> SlotPair;
- typedef std::vector<SlotPair> LoopRanges;
- typedef std::map<MachineLoop*, LoopRanges> LoopRangeMap;
- LoopRangeMap loopRangeMap;
-
- void dumpLoopInfo(MachineLoop &loop);
-
- void dumpOddTerminators();
-
- void updateTerminators(MachineBasicBlock &mbb);
-
- bool canInsertPreHeader(MachineLoop &loop);
- MachineBasicBlock& insertPreHeader(MachineLoop &loop);
-
- bool isCriticalEdge(MachineLoop::Edge &edge);
- bool canSplitEdge(MachineLoop::Edge &edge);
- MachineBasicBlock& splitEdge(MachineLoop::Edge &edge, MachineLoop &loop);
-
- LoopRanges& getLoopRanges(MachineLoop &loop);
- std::pair<bool, SlotPair> getLoopSubRange(const LiveRange &lr,
- MachineLoop &loop);
-
- void dumpLoopRanges(MachineLoop &loop);
-
- void processHeader(LoopSplit &split);
- void processLoopExits(LoopSplit &split);
- void processLoopUses(LoopSplit &split);
-
- bool splitOverLoop(LiveInterval &li, MachineLoop &loop);
-
- void processInterval(LiveInterval &li);
-
- void processIntervals();
- };
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 1f0e5a2..43a6ad8 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -123,16 +123,11 @@ bool StackProtector::RequiresStackProtector() const {
// protectors.
return true;
- if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
- // We apparently only care about character arrays.
- if (!AT->getElementType()->isIntegerTy(8))
- continue;
-
+ if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType()))
// If an array has more than SSPBufferSize bytes of allocated space,
// then we emit stack protectors.
if (SSPBufferSize <= TD->getTypeAllocSize(AT))
return true;
- }
}
}
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 57cbe1ba..1e940b1 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "stackcoloring"
-#include "VirtRegMap.h"
#include "llvm/Function.h"
#include "llvm/Module.h"
#include "llvm/CodeGen/Passes.h"
@@ -40,29 +39,17 @@ DisableSharing("no-stack-slot-sharing",
cl::init(false), cl::Hidden,
cl::desc("Suppress slot sharing during stack coloring"));
-static cl::opt<bool>
-ColorWithRegsOpt("color-ss-with-regs",
- cl::init(false), cl::Hidden,
- cl::desc("Color stack slots with free registers"));
-
-
static cl::opt<int> DCELimit("ssc-dce-limit", cl::init(-1), cl::Hidden);
STATISTIC(NumEliminated, "Number of stack slots eliminated due to coloring");
-STATISTIC(NumRegRepl, "Number of stack slot refs replaced with reg refs");
-STATISTIC(NumLoadElim, "Number of loads eliminated");
-STATISTIC(NumStoreElim, "Number of stores eliminated");
STATISTIC(NumDead, "Number of trivially dead stack accesses eliminated");
namespace {
class StackSlotColoring : public MachineFunctionPass {
bool ColorWithRegs;
LiveStacks* LS;
- VirtRegMap* VRM;
MachineFrameInfo *MFI;
- MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
const MachineLoopInfo *loopInfo;
// SSIntervals - Spill slot intervals.
@@ -98,18 +85,12 @@ namespace {
MachineFunctionPass(ID), ColorWithRegs(false), NextColor(-1) {
initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
}
- StackSlotColoring(bool RegColor) :
- MachineFunctionPass(ID), ColorWithRegs(RegColor), NextColor(-1) {
- initializeStackSlotColoringPass(*PassRegistry::getPassRegistry());
- }
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<SlotIndexes>();
AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveStacks>();
- AU.addRequired<VirtRegMap>();
- AU.addPreserved<VirtRegMap>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
@@ -117,9 +98,6 @@ namespace {
}
virtual bool runOnMachineFunction(MachineFunction &MF);
- virtual const char* getPassName() const {
- return "Stack Slot Coloring";
- }
private:
void InitializeSlots();
@@ -127,41 +105,23 @@ namespace {
bool OverlapWithAssignments(LiveInterval *li, int Color) const;
int ColorSlot(LiveInterval *li);
bool ColorSlots(MachineFunction &MF);
- bool ColorSlotsWithFreeRegs(SmallVector<int, 16> &SlotMapping,
- SmallVector<SmallVector<int, 4>, 16> &RevMap,
- BitVector &SlotIsReg);
void RewriteInstruction(MachineInstr *MI, int OldFI, int NewFI,
MachineFunction &MF);
- bool PropagateBackward(MachineBasicBlock::iterator MII,
- MachineBasicBlock *MBB,
- unsigned OldReg, unsigned NewReg);
- bool PropagateForward(MachineBasicBlock::iterator MII,
- MachineBasicBlock *MBB,
- unsigned OldReg, unsigned NewReg);
- void UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
- unsigned Reg, const TargetRegisterClass *RC,
- SmallSet<unsigned, 4> &Defs,
- MachineFunction &MF);
- bool AllMemRefsCanBeUnfolded(int SS);
bool RemoveDeadStores(MachineBasicBlock* MBB);
};
} // end anonymous namespace
char StackSlotColoring::ID = 0;
+char &llvm::StackSlotColoringID = StackSlotColoring::ID;
INITIALIZE_PASS_BEGIN(StackSlotColoring, "stack-slot-coloring",
"Stack Slot Coloring", false, false)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_DEPENDENCY(LiveStacks)
-INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_END(StackSlotColoring, "stack-slot-coloring",
"Stack Slot Coloring", false, false)
-FunctionPass *llvm::createStackSlotColoringPass(bool RegColor) {
- return new StackSlotColoring(RegColor);
-}
-
namespace {
// IntervalSorter - Comparison predicate that sort live intervals by
// their weight.
@@ -248,79 +208,6 @@ StackSlotColoring::OverlapWithAssignments(LiveInterval *li, int Color) const {
return false;
}
-/// ColorSlotsWithFreeRegs - If there are any free registers available, try
-/// replacing spill slots references with registers instead.
-bool
-StackSlotColoring::ColorSlotsWithFreeRegs(SmallVector<int, 16> &SlotMapping,
- SmallVector<SmallVector<int, 4>, 16> &RevMap,
- BitVector &SlotIsReg) {
- if (!(ColorWithRegs || ColorWithRegsOpt) || !VRM->HasUnusedRegisters())
- return false;
-
- bool Changed = false;
- DEBUG(dbgs() << "Assigning unused registers to spill slots:\n");
- for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
- LiveInterval *li = SSIntervals[i];
- int SS = TargetRegisterInfo::stackSlot2Index(li->reg);
- if (!UsedColors[SS] || li->weight < 20)
- // If the weight is < 20, i.e. two references in a loop with depth 1,
- // don't bother with it.
- continue;
-
- // These slots allow to share the same registers.
- bool AllColored = true;
- SmallVector<unsigned, 4> ColoredRegs;
- for (unsigned j = 0, ee = RevMap[SS].size(); j != ee; ++j) {
- int RSS = RevMap[SS][j];
- const TargetRegisterClass *RC = LS->getIntervalRegClass(RSS);
- // If it's not colored to another stack slot, try coloring it
- // to a "free" register.
- if (!RC) {
- AllColored = false;
- continue;
- }
- unsigned Reg = VRM->getFirstUnusedRegister(RC);
- if (!Reg) {
- AllColored = false;
- continue;
- }
- if (!AllMemRefsCanBeUnfolded(RSS)) {
- AllColored = false;
- continue;
- } else {
- DEBUG(dbgs() << "Assigning fi#" << RSS << " to "
- << TRI->getName(Reg) << '\n');
- ColoredRegs.push_back(Reg);
- SlotMapping[RSS] = Reg;
- SlotIsReg.set(RSS);
- Changed = true;
- }
- }
-
- // Register and its sub-registers are no longer free.
- while (!ColoredRegs.empty()) {
- unsigned Reg = ColoredRegs.back();
- ColoredRegs.pop_back();
- VRM->setRegisterUsed(Reg);
- // If reg is a callee-saved register, it will have to be spilled in
- // the prologue.
- MRI->setPhysRegUsed(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- VRM->setRegisterUsed(*AS);
- MRI->setPhysRegUsed(*AS);
- }
- }
- // This spill slot is dead after the rewrites
- if (AllColored) {
- MFI->RemoveStackObject(SS);
- ++NumEliminated;
- }
- }
- DEBUG(dbgs() << '\n');
-
- return Changed;
-}
-
/// ColorSlot - Assign a "color" (stack slot) to the specified stack slot.
///
int StackSlotColoring::ColorSlot(LiveInterval *li) {
@@ -372,7 +259,6 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
SmallVector<int, 16> SlotMapping(NumObjs, -1);
SmallVector<float, 16> SlotWeights(NumObjs, 0.0);
SmallVector<SmallVector<int, 4>, 16> RevMap(NumObjs);
- BitVector SlotIsReg(NumObjs);
BitVector UsedColors(NumObjs);
DEBUG(dbgs() << "Color spill slot intervals:\n");
@@ -404,31 +290,19 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
DEBUG(dbgs() << '\n');
#endif
- // Can we "color" a stack slot with a unused register?
- Changed |= ColorSlotsWithFreeRegs(SlotMapping, RevMap, SlotIsReg);
-
if (!Changed)
return false;
// Rewrite all MO_FrameIndex operands.
SmallVector<SmallSet<unsigned, 4>, 4> NewDefs(MF.getNumBlockIDs());
for (unsigned SS = 0, SE = SSRefs.size(); SS != SE; ++SS) {
- bool isReg = SlotIsReg[SS];
int NewFI = SlotMapping[SS];
- if (NewFI == -1 || (NewFI == (int)SS && !isReg))
+ if (NewFI == -1 || (NewFI == (int)SS))
continue;
- const TargetRegisterClass *RC = LS->getIntervalRegClass(SS);
SmallVector<MachineInstr*, 8> &RefMIs = SSRefs[SS];
for (unsigned i = 0, e = RefMIs.size(); i != e; ++i)
- if (!isReg)
- RewriteInstruction(RefMIs[i], SS, NewFI, MF);
- else {
- // Rewrite to use a register instead.
- unsigned MBBId = RefMIs[i]->getParent()->getNumber();
- SmallSet<unsigned, 4> &Defs = NewDefs[MBBId];
- UnfoldAndRewriteInstruction(RefMIs[i], SS, NewFI, RC, Defs, MF);
- }
+ RewriteInstruction(RefMIs[i], SS, NewFI, MF);
}
// Delete unused stack slots.
@@ -441,28 +315,6 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
return true;
}
-/// AllMemRefsCanBeUnfolded - Return true if all references of the specified
-/// spill slot index can be unfolded.
-bool StackSlotColoring::AllMemRefsCanBeUnfolded(int SS) {
- SmallVector<MachineInstr*, 8> &RefMIs = SSRefs[SS];
- for (unsigned i = 0, e = RefMIs.size(); i != e; ++i) {
- MachineInstr *MI = RefMIs[i];
- if (TII->isLoadFromStackSlot(MI, SS) ||
- TII->isStoreToStackSlot(MI, SS))
- // Restore and spill will become copies.
- return true;
- if (!TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(), false, false))
- return false;
- for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
- MachineOperand &MO = MI->getOperand(j);
- if (MO.isFI() && MO.getIndex() != SS)
- // If it uses another frameindex, we can, currently* unfold it.
- return false;
- }
- }
- return true;
-}
-
/// RewriteInstruction - Rewrite specified instruction by replacing references
/// to old frame index with new one.
void StackSlotColoring::RewriteInstruction(MachineInstr *MI, int OldFI,
@@ -489,179 +341,6 @@ void StackSlotColoring::RewriteInstruction(MachineInstr *MI, int OldFI,
(*I)->setValue(NewSV);
}
-/// PropagateBackward - Traverse backward and look for the definition of
-/// OldReg. If it can successfully update all of the references with NewReg,
-/// do so and return true.
-bool StackSlotColoring::PropagateBackward(MachineBasicBlock::iterator MII,
- MachineBasicBlock *MBB,
- unsigned OldReg, unsigned NewReg) {
- if (MII == MBB->begin())
- return false;
-
- SmallVector<MachineOperand*, 4> Uses;
- SmallVector<MachineOperand*, 4> Refs;
- while (--MII != MBB->begin()) {
- bool FoundDef = false; // Not counting 2address def.
-
- Uses.clear();
- const MCInstrDesc &MCID = MII->getDesc();
- for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MII->getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0)
- continue;
- if (Reg == OldReg) {
- if (MO.isImplicit())
- return false;
-
- // Abort the use is actually a sub-register def. We don't have enough
- // information to figure out if it is really legal.
- if (MO.getSubReg() || MII->isSubregToReg())
- return false;
-
- const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
- if (RC && !RC->contains(NewReg))
- return false;
-
- if (MO.isUse()) {
- Uses.push_back(&MO);
- } else {
- Refs.push_back(&MO);
- if (!MII->isRegTiedToUseOperand(i))
- FoundDef = true;
- }
- } else if (TRI->regsOverlap(Reg, NewReg)) {
- return false;
- } else if (TRI->regsOverlap(Reg, OldReg)) {
- if (!MO.isUse() || !MO.isKill())
- return false;
- }
- }
-
- if (FoundDef) {
- // Found non-two-address def. Stop here.
- for (unsigned i = 0, e = Refs.size(); i != e; ++i)
- Refs[i]->setReg(NewReg);
- return true;
- }
-
- // Two-address uses must be updated as well.
- for (unsigned i = 0, e = Uses.size(); i != e; ++i)
- Refs.push_back(Uses[i]);
- }
- return false;
-}
-
-/// PropagateForward - Traverse forward and look for the kill of OldReg. If
-/// it can successfully update all of the uses with NewReg, do so and
-/// return true.
-bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII,
- MachineBasicBlock *MBB,
- unsigned OldReg, unsigned NewReg) {
- if (MII == MBB->end())
- return false;
-
- SmallVector<MachineOperand*, 4> Uses;
- while (++MII != MBB->end()) {
- bool FoundKill = false;
- const MCInstrDesc &MCID = MII->getDesc();
- for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MII->getOperand(i);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0)
- continue;
- if (Reg == OldReg) {
- if (MO.isDef() || MO.isImplicit())
- return false;
-
- // Abort the use is actually a sub-register use. We don't have enough
- // information to figure out if it is really legal.
- if (MO.getSubReg())
- return false;
-
- const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
- if (RC && !RC->contains(NewReg))
- return false;
- if (MO.isKill())
- FoundKill = true;
-
- Uses.push_back(&MO);
- } else if (TRI->regsOverlap(Reg, NewReg) ||
- TRI->regsOverlap(Reg, OldReg))
- return false;
- }
- if (FoundKill) {
- for (unsigned i = 0, e = Uses.size(); i != e; ++i)
- Uses[i]->setReg(NewReg);
- return true;
- }
- }
- return false;
-}
-
-/// UnfoldAndRewriteInstruction - Rewrite specified instruction by unfolding
-/// folded memory references and replacing those references with register
-/// references instead.
-void
-StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
- unsigned Reg,
- const TargetRegisterClass *RC,
- SmallSet<unsigned, 4> &Defs,
- MachineFunction &MF) {
- MachineBasicBlock *MBB = MI->getParent();
- if (unsigned DstReg = TII->isLoadFromStackSlot(MI, OldFI)) {
- if (PropagateForward(MI, MBB, DstReg, Reg)) {
- DEBUG(dbgs() << "Eliminated load: ");
- DEBUG(MI->dump());
- ++NumLoadElim;
- } else {
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY),
- DstReg).addReg(Reg);
- ++NumRegRepl;
- }
-
- if (!Defs.count(Reg)) {
- // If this is the first use of Reg in this MBB and it wasn't previously
- // defined in MBB, add it to livein.
- MBB->addLiveIn(Reg);
- Defs.insert(Reg);
- }
- } else if (unsigned SrcReg = TII->isStoreToStackSlot(MI, OldFI)) {
- if (MI->killsRegister(SrcReg) && PropagateBackward(MI, MBB, SrcReg, Reg)) {
- DEBUG(dbgs() << "Eliminated store: ");
- DEBUG(MI->dump());
- ++NumStoreElim;
- } else {
- BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), Reg)
- .addReg(SrcReg);
- ++NumRegRepl;
- }
-
- // Remember reg has been defined in MBB.
- Defs.insert(Reg);
- } else {
- SmallVector<MachineInstr*, 4> NewMIs;
- bool Success = TII->unfoldMemoryOperand(MF, MI, Reg, false, false, NewMIs);
- (void)Success; // Silence compiler warning.
- assert(Success && "Failed to unfold!");
- MachineInstr *NewMI = NewMIs[0];
- MBB->insert(MI, NewMI);
- ++NumRegRepl;
-
- if (NewMI->readsRegister(Reg)) {
- if (!Defs.count(Reg))
- // If this is the first use of Reg in this MBB and it wasn't previously
- // defined in MBB, add it to livein.
- MBB->addLiveIn(Reg);
- Defs.insert(Reg);
- }
- }
- MBB->erase(MI);
-}
/// RemoveDeadStores - Scan through a basic block and look for loads followed
/// by stores. If they're both using the same stack slot, then the store is
@@ -679,33 +358,33 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
I != E; ++I) {
if (DCELimit != -1 && (int)NumDead >= DCELimit)
break;
-
+
MachineBasicBlock::iterator NextMI = llvm::next(I);
if (NextMI == MBB->end()) continue;
-
+
int FirstSS, SecondSS;
unsigned LoadReg = 0;
unsigned StoreReg = 0;
if (!(LoadReg = TII->isLoadFromStackSlot(I, FirstSS))) continue;
if (!(StoreReg = TII->isStoreToStackSlot(NextMI, SecondSS))) continue;
if (FirstSS != SecondSS || LoadReg != StoreReg || FirstSS == -1) continue;
-
+
++NumDead;
changed = true;
-
+
if (NextMI->findRegisterUseOperandIdx(LoadReg, true, 0) != -1) {
++NumDead;
toErase.push_back(I);
}
-
+
toErase.push_back(NextMI);
++I;
}
-
+
for (SmallVector<MachineInstr*, 4>::iterator I = toErase.begin(),
E = toErase.end(); I != E; ++I)
(*I)->eraseFromParent();
-
+
return changed;
}
@@ -713,32 +392,27 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
DEBUG({
dbgs() << "********** Stack Slot Coloring **********\n"
- << "********** Function: "
+ << "********** Function: "
<< MF.getFunction()->getName() << '\n';
});
MFI = MF.getFrameInfo();
- MRI = &MF.getRegInfo();
TII = MF.getTarget().getInstrInfo();
- TRI = MF.getTarget().getRegisterInfo();
LS = &getAnalysis<LiveStacks>();
- VRM = &getAnalysis<VirtRegMap>();
loopInfo = &getAnalysis<MachineLoopInfo>();
bool Changed = false;
unsigned NumSlots = LS->getNumIntervals();
- if (NumSlots < 2) {
- if (NumSlots == 0 || !VRM->HasUnusedRegisters())
- // Nothing to do!
- return false;
- }
+ if (NumSlots == 0)
+ // Nothing to do!
+ return false;
// If there are calls to setjmp or sigsetjmp, don't perform stack slot
// coloring. The stack could be modified before the longjmp is executed,
// resulting in the wrong value being used afterwards. (See
// <rdar://problem/8007500>.)
- if (MF.callsSetJmp())
+ if (MF.exposesReturnsTwice())
return false;
// Gather spill slot references
diff --git a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
index 260cc0e..c6fdc73 100644
--- a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -228,7 +228,6 @@ static MachineOperand *findLastUse(MachineBasicBlock *MBB, unsigned Reg) {
return &MO;
}
}
- return NULL;
}
bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
@@ -390,12 +389,10 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) {
MachineOperand *LastUse = findLastUse(MBB, SrcReg);
assert(LastUse);
SlotIndex LastUseIndex = LI->getInstructionIndex(LastUse->getParent());
- SrcLI.removeRange(LastUseIndex.getDefIndex(), LI->getMBBEndIdx(MBB));
+ SrcLI.removeRange(LastUseIndex.getRegSlot(), LI->getMBBEndIdx(MBB));
LastUse->setIsKill(true);
}
- LI->renumber();
-
Allocator.Reset();
RegNodeMap.clear();
PHISrcDefs.clear();
@@ -745,7 +742,7 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
// Set the phi-def flag for the VN at this PHI.
SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
- VNInfo *DestVNI = DestLI.getVNInfoAt(PHIIndex.getDefIndex());
+ VNInfo *DestVNI = DestLI.getVNInfoAt(PHIIndex.getRegSlot());
assert(DestVNI);
DestVNI->setIsPHIDef(true);
@@ -756,7 +753,7 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
DestVNI->def = MBBStartIndex;
DestLI.addRange(LiveRange(MBBStartIndex,
- PHIIndex.getDefIndex(),
+ PHIIndex.getRegSlot(),
DestVNI));
return;
}
@@ -779,22 +776,21 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
SlotIndex DestCopyIndex = LI->getInstructionIndex(CopyInstr);
VNInfo *CopyVNI = CopyLI.getNextValue(MBBStartIndex,
- CopyInstr,
LI->getVNInfoAllocator());
CopyVNI->setIsPHIDef(true);
CopyLI.addRange(LiveRange(MBBStartIndex,
- DestCopyIndex.getDefIndex(),
+ DestCopyIndex.getRegSlot(),
CopyVNI));
// Adjust DestReg's live interval to adjust for its new definition at
// CopyInstr.
LiveInterval &DestLI = LI->getOrCreateInterval(DestReg);
SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
- DestLI.removeRange(PHIIndex.getDefIndex(), DestCopyIndex.getDefIndex());
+ DestLI.removeRange(PHIIndex.getRegSlot(), DestCopyIndex.getRegSlot());
- VNInfo *DestVNI = DestLI.getVNInfoAt(DestCopyIndex.getDefIndex());
+ VNInfo *DestVNI = DestLI.getVNInfoAt(DestCopyIndex.getRegSlot());
assert(DestVNI);
- DestVNI->def = DestCopyIndex.getDefIndex();
+ DestVNI->def = DestCopyIndex.getRegSlot();
InsertedDestCopies[CopyReg] = CopyInstr;
}
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index 3a6211a..8ebfbca 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -56,10 +56,10 @@ typedef std::vector<std::pair<MachineBasicBlock*,unsigned> > AvailableValsTy;
namespace {
/// TailDuplicatePass - Perform tail duplication.
class TailDuplicatePass : public MachineFunctionPass {
- bool PreRegAlloc;
const TargetInstrInfo *TII;
MachineModuleInfo *MMI;
MachineRegisterInfo *MRI;
+ bool PreRegAlloc;
// SSAUpdateVRs - A list of virtual registers for which to update SSA form.
SmallVector<unsigned, 16> SSAUpdateVRs;
@@ -70,11 +70,10 @@ namespace {
public:
static char ID;
- explicit TailDuplicatePass(bool PreRA) :
- MachineFunctionPass(ID), PreRegAlloc(PreRA) {}
+ explicit TailDuplicatePass() :
+ MachineFunctionPass(ID), PreRegAlloc(false) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
- virtual const char *getPassName() const { return "Tail Duplication"; }
private:
void AddSSAUpdateEntry(unsigned OrigReg, unsigned NewReg,
@@ -118,14 +117,16 @@ namespace {
char TailDuplicatePass::ID = 0;
}
-FunctionPass *llvm::createTailDuplicatePass(bool PreRegAlloc) {
- return new TailDuplicatePass(PreRegAlloc);
-}
+char &llvm::TailDuplicateID = TailDuplicatePass::ID;
+
+INITIALIZE_PASS(TailDuplicatePass, "tailduplication", "Tail Duplication",
+ false, false)
bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
MRI = &MF.getRegInfo();
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
+ PreRegAlloc = MRI->isSSA();
bool MadeChange = false;
while (TailDuplicateBlocks(MF))
@@ -432,7 +433,7 @@ void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
MO.setReg(VI->second);
}
}
- PredBB->insert(PredBB->end(), NewMI);
+ PredBB->insert(PredBB->instr_end(), NewMI);
}
/// UpdateSuccessorsPHIs - After FromBB is tail duplicated into its predecessor
@@ -553,7 +554,7 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
bool HasIndirectbr = false;
if (!TailBB.empty())
- HasIndirectbr = TailBB.back().getDesc().isIndirectBranch();
+ HasIndirectbr = TailBB.back().isIndirectBranch();
if (HasIndirectbr && PreRegAlloc)
MaxDuplicateCount = 20;
@@ -561,22 +562,21 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
// Check the instructions in the block to determine whether tail-duplication
// is invalid or unlikely to be profitable.
unsigned InstrCount = 0;
- for (MachineBasicBlock::const_iterator I = TailBB.begin(); I != TailBB.end();
- ++I) {
+ for (MachineBasicBlock::iterator I = TailBB.begin(); I != TailBB.end(); ++I) {
// Non-duplicable things shouldn't be tail-duplicated.
- if (I->getDesc().isNotDuplicable())
+ if (I->isNotDuplicable())
return false;
// Do not duplicate 'return' instructions if this is a pre-regalloc run.
// A return may expand into a lot more instructions (e.g. reload of callee
// saved registers) after PEI.
- if (PreRegAlloc && I->getDesc().isReturn())
+ if (PreRegAlloc && I->isReturn())
return false;
// Avoid duplicating calls before register allocation. Calls presents a
// barrier to register allocation so duplicating them may end up increasing
// spills.
- if (PreRegAlloc && I->getDesc().isCall())
+ if (PreRegAlloc && I->isCall())
return false;
if (!I->isPHI() && !I->isDebugValue())
@@ -611,7 +611,7 @@ TailDuplicatePass::isSimpleBB(MachineBasicBlock *TailBB) {
++I;
if (I == E)
return true;
- return I->getDesc().isUnconditionalBranch();
+ return I->isUnconditionalBranch();
}
static bool
@@ -778,8 +778,10 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
// Clone the contents of TailBB into PredBB.
DenseMap<unsigned, unsigned> LocalVRMap;
SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
- MachineBasicBlock::iterator I = TailBB->begin();
- while (I != TailBB->end()) {
+ // Use instr_iterator here to properly handle bundles, e.g.
+ // ARM Thumb2 IT block.
+ MachineBasicBlock::instr_iterator I = TailBB->instr_begin();
+ while (I != TailBB->instr_end()) {
MachineInstr *MI = &*I;
++I;
if (MI->isPHI()) {
@@ -824,7 +826,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
SmallVector<MachineOperand, 4> PriorCond;
// This has to check PrevBB->succ_size() because EH edges are ignored by
// AnalyzeBranch.
- if (PrevBB->succ_size() == 1 &&
+ if (PrevBB->succ_size() == 1 &&
!TII->AnalyzeBranch(*PrevBB, PriorTBB, PriorFBB, PriorCond, true) &&
PriorCond.empty() && !PriorTBB && TailBB->pred_size() == 1 &&
!TailBB->hasAddressTaken()) {
@@ -849,6 +851,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
// Replace def of virtual registers with new registers, and update
// uses with PHI source register or the new registers.
MachineInstr *MI = &*I++;
+ assert(!MI->isBundle() && "Not expecting bundles before regalloc!");
DuplicateInstruction(MI, TailBB, PrevBB, MF, LocalVRMap, UsedByPhi);
MI->eraseFromParent();
}
diff --git a/contrib/llvm/lib/Target/TargetFrameLowering.cpp b/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
index 122f869..cadb878 100644
--- a/contrib/llvm/lib/Target/TargetFrameLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp
@@ -1,4 +1,4 @@
-//===----- TargetFrameLowering.cpp - Implement target frame interface ------==//
+//===----- TargetFrameLoweringImpl.cpp - Implement target frame interface --==//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index f32678f..2beb928 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -77,6 +78,9 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
unsigned Reg1 = MI->getOperand(Idx1).getReg();
unsigned Reg2 = MI->getOperand(Idx2).getReg();
+ unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
+ unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
+ unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
bool Reg1IsKill = MI->getOperand(Idx1).isKill();
bool Reg2IsKill = MI->getOperand(Idx2).isKill();
// If destination is tied to either of the commuted source register, then
@@ -85,10 +89,12 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
Reg2IsKill = false;
Reg0 = Reg2;
+ SubReg0 = SubReg2;
} else if (HasDef && Reg0 == Reg2 &&
MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
Reg1IsKill = false;
Reg0 = Reg1;
+ SubReg0 = SubReg1;
}
if (NewMI) {
@@ -97,19 +103,23 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
MachineFunction &MF = *MI->getParent()->getParent();
if (HasDef)
return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
- .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
- .addReg(Reg2, getKillRegState(Reg2IsKill))
- .addReg(Reg1, getKillRegState(Reg2IsKill));
+ .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead), SubReg0)
+ .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
+ .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
else
return BuildMI(MF, MI->getDebugLoc(), MI->getDesc())
- .addReg(Reg2, getKillRegState(Reg2IsKill))
- .addReg(Reg1, getKillRegState(Reg2IsKill));
+ .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2)
+ .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1);
}
- if (HasDef)
+ if (HasDef) {
MI->getOperand(0).setReg(Reg0);
+ MI->getOperand(0).setSubReg(SubReg0);
+ }
MI->getOperand(Idx2).setReg(Reg1);
MI->getOperand(Idx1).setReg(Reg2);
+ MI->getOperand(Idx2).setSubReg(SubReg1);
+ MI->getOperand(Idx1).setSubReg(SubReg2);
MI->getOperand(Idx2).setIsKill(Reg1IsKill);
MI->getOperand(Idx1).setIsKill(Reg2IsKill);
return MI;
@@ -121,6 +131,9 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
+ assert(!MI->isBundle() &&
+ "TargetInstrInfoImpl::findCommutedOpIndices() can't handle bundles");
+
const MCInstrDesc &MCID = MI->getDesc();
if (!MCID.isCommutable())
return false;
@@ -136,11 +149,28 @@ bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
}
+bool
+TargetInstrInfoImpl::isUnpredicatedTerminator(const MachineInstr *MI) const {
+ if (!MI->isTerminator()) return false;
+
+ // Conditional branch is a special case.
+ if (MI->isBranch() && !MI->isBarrier())
+ return true;
+ if (!MI->isPredicable())
+ return true;
+ return !isPredicated(MI);
+}
+
+
bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
bool MadeChange = false;
+
+ assert(!MI->isBundle() &&
+ "TargetInstrInfoImpl::PredicateInstruction() can't handle bundles");
+
const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isPredicable())
+ if (!MI->isPredicable())
return false;
for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -218,7 +248,7 @@ TargetInstrInfoImpl::produceSameValue(const MachineInstr *MI0,
MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
MachineFunction &MF) const {
- assert(!Orig->getDesc().isNotDuplicable() &&
+ assert(!Orig->isNotDuplicable() &&
"Instruction cannot be duplicated");
return MF.CloneMachineInstr(Orig);
}
@@ -288,16 +318,15 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
- NewMI->getDesc().mayStore()) &&
+ NewMI->mayStore()) &&
"Folded a def to a non-store!");
assert((!(Flags & MachineMemOperand::MOLoad) ||
- NewMI->getDesc().mayLoad()) &&
+ NewMI->mayLoad()) &&
"Folded a use to a non-load!");
const MachineFrameInfo &MFI = *MF.getFrameInfo();
assert(MFI.getObjectOffset(FI) != -1);
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
Flags, MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
@@ -332,7 +361,7 @@ MachineInstr*
TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
- assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
+ assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
#ifndef NDEBUG
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
@@ -360,7 +389,6 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetMachine &TM = MF.getTarget();
const TargetInstrInfo &TII = *TM.getInstrInfo();
- const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
// Remat clients assume operand 0 is the defined register.
if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
@@ -383,10 +411,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
return true;
- const MCInstrDesc &MCID = MI->getDesc();
-
// Avoid instructions obviously unsafe for remat.
- if (MCID.isNotDuplicable() || MCID.mayStore() ||
+ if (MI->isNotDuplicable() || MI->mayStore() ||
MI->hasUnmodeledSideEffects())
return false;
@@ -396,7 +422,7 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
return false;
// Avoid instructions which load from potentially varying memory.
- if (MCID.mayLoad() && !MI->isInvariantLoad(AA))
+ if (MI->mayLoad() && !MI->isInvariantLoad(AA))
return false;
// If any of the registers accessed are non-constant, conservatively assume
@@ -414,19 +440,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI.def_empty(Reg))
- return false;
- BitVector AllocatableRegs = TRI.getAllocatableSet(MF, 0);
- if (AllocatableRegs.test(Reg))
+ if (!MRI.isConstantPhysReg(Reg, MF))
return false;
- // Check for a def among the register's aliases too.
- for (const unsigned *Alias = TRI.getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- if (!MRI.def_empty(AliasReg))
- return false;
- if (AllocatableRegs.test(AliasReg))
- return false;
- }
} else {
// A physreg def. We can't remat it.
return false;
@@ -457,7 +472,7 @@ bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const{
// Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isLabel())
+ if (MI->isTerminator() || MI->isLabel())
return true;
// Don't attempt to schedule around any instruction that defines
@@ -493,3 +508,32 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
return (ScheduleHazardRecognizer *)
new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
}
+
+int
+TargetInstrInfoImpl::getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const {
+ if (!ItinData || ItinData->isEmpty())
+ return -1;
+
+ if (!DefNode->isMachineOpcode())
+ return -1;
+
+ unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
+ if (!UseNode->isMachineOpcode())
+ return ItinData->getOperandCycle(DefClass, DefIdx);
+ unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
+ return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
+}
+
+int TargetInstrInfoImpl::getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *N) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ if (!N->isMachineOpcode())
+ return 1;
+
+ return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
+}
+
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 3848f4d..9925185 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -17,6 +17,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -53,11 +54,9 @@ TargetLoweringObjectFileELF::getCFIPersonalitySymbol(const GlobalValue *GV,
report_fatal_error("We do not support this DWARF encoding yet!");
case dwarf::DW_EH_PE_absptr:
return Mang->getSymbol(GV);
- break;
case dwarf::DW_EH_PE_pcrel: {
return getContext().GetOrCreateSymbol(StringRef("DW.ref.") +
Mang->getSymbol(GV)->getName());
- break;
}
}
}
@@ -78,14 +77,14 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Flags,
SectionKind::getDataRel(),
0, Label->getName());
+ unsigned Size = TM.getTargetData()->getPointerSize();
Streamer.SwitchSection(Sec);
- Streamer.EmitValueToAlignment(8);
+ Streamer.EmitValueToAlignment(TM.getTargetData()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
- const MCExpr *E = MCConstantExpr::Create(8, getContext());
+ const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E);
Streamer.EmitLabel(Label);
- unsigned Size = TM.getTargetData()->getPointerSize();
Streamer.EmitSymbolValue(Sym, Size);
}
@@ -189,6 +188,7 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
static const char *getSectionPrefixForGlobal(SectionKind Kind) {
if (Kind.isText()) return ".text.";
if (Kind.isReadOnly()) return ".rodata.";
+ if (Kind.isBSS()) return ".bss.";
if (Kind.isThreadData()) return ".tdata.";
if (Kind.isThreadBSS()) return ".tbss.";
@@ -217,7 +217,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// If this global is linkonce/weak and the target handles this by emitting it
// into a 'uniqued' section name, create and return the section now.
if ((GV->isWeakForLinker() || EmitUniquedSection) &&
- !Kind.isCommon() && !Kind.isBSS()) {
+ !Kind.isCommon()) {
const char *Prefix;
Prefix = getSectionPrefixForGlobal(Kind);
@@ -342,10 +342,92 @@ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
+const MCSection *
+TargetLoweringObjectFileELF::getStaticCtorSection(unsigned Priority) const {
+ // The default scheme is .ctor / .dtor, so we have to invert the priority
+ // numbering.
+ if (Priority == 65535)
+ return StaticCtorSection;
+
+ std::string Name = std::string(".ctors.") + utostr(65535 - Priority);
+ return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+}
+
+const MCSection *
+TargetLoweringObjectFileELF::getStaticDtorSection(unsigned Priority) const {
+ // The default scheme is .ctor / .dtor, so we have to invert the priority
+ // numbering.
+ if (Priority == 65535)
+ return StaticDtorSection;
+
+ std::string Name = std::string(".dtors.") + utostr(65535 - Priority);
+ return getContext().getELFSection(Name, ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC |ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+}
+
//===----------------------------------------------------------------------===//
// MachO
//===----------------------------------------------------------------------===//
+/// emitModuleFlags - Emit the module flags that specify the garbage collection
+/// information.
+void TargetLoweringObjectFileMachO::
+emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
+ Mangler *Mang, const TargetMachine &TM) const {
+ unsigned VersionVal = 0;
+ unsigned GCFlags = 0;
+ StringRef SectionVal;
+
+ for (ArrayRef<Module::ModuleFlagEntry>::iterator
+ i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) {
+ const Module::ModuleFlagEntry &MFE = *i;
+
+ // Ignore flags with 'Require' behavior.
+ if (MFE.Behavior == Module::Require)
+ continue;
+
+ StringRef Key = MFE.Key->getString();
+ Value *Val = MFE.Val;
+
+ if (Key == "Objective-C Image Info Version")
+ VersionVal = cast<ConstantInt>(Val)->getZExtValue();
+ else if (Key == "Objective-C Garbage Collection" ||
+ Key == "Objective-C GC Only")
+ GCFlags |= cast<ConstantInt>(Val)->getZExtValue();
+ else if (Key == "Objective-C Image Info Section")
+ SectionVal = cast<MDString>(Val)->getString();
+ }
+
+ // The section is mandatory. If we don't have it, then we don't have GC info.
+ if (SectionVal.empty()) return;
+
+ StringRef Segment, Section;
+ unsigned TAA = 0, StubSize = 0;
+ bool TAAParsed;
+ std::string ErrorCode =
+ MCSectionMachO::ParseSectionSpecifier(SectionVal, Segment, Section,
+ TAA, TAAParsed, StubSize);
+ if (!ErrorCode.empty())
+ // If invalid, report the error with report_fatal_error.
+ report_fatal_error("Invalid section specifier '" + Section + "': " +
+ ErrorCode + ".");
+
+ // Get the section.
+ const MCSectionMachO *S =
+ getContext().getMachOSection(Segment, Section, TAA, StubSize,
+ SectionKind::getDataNoRel());
+ Streamer.SwitchSection(S);
+ Streamer.EmitLabel(getContext().
+ GetOrCreateSymbol(StringRef("L_OBJC_IMAGE_INFO")));
+ Streamer.EmitIntValue(VersionVal, 4);
+ Streamer.EmitIntValue(GCFlags, 4);
+ Streamer.AddBlankLine();
+}
+
const MCSection *TargetLoweringObjectFileMachO::
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
@@ -358,11 +440,9 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
TAA, TAAParsed, StubSize);
if (!ErrorCode.empty()) {
// If invalid, report the error with report_fatal_error.
- report_fatal_error("Global variable '" + GV->getNameStr() +
- "' has an invalid section specifier '" + GV->getSection()+
- "': " + ErrorCode + ".");
- // Fall back to dropping it into the data section.
- return DataSection;
+ report_fatal_error("Global variable '" + GV->getName() +
+ "' has an invalid section specifier '" +
+ GV->getSection() + "': " + ErrorCode + ".");
}
// Get the section.
@@ -379,9 +459,9 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
// to reject it here.
if (S->getTypeAndAttributes() != TAA || S->getStubSize() != StubSize) {
// If invalid, report the error with report_fatal_error.
- report_fatal_error("Global variable '" + GV->getNameStr() +
- "' section type or attributes does not match previous"
- " section specifier");
+ report_fatal_error("Global variable '" + GV->getName() +
+ "' section type or attributes does not match previous"
+ " section specifier");
}
return S;
@@ -536,9 +616,7 @@ getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
- MachineModuleInfoImpl::StubValueTy &StubSym =
- GV->hasHiddenVisibility() ? MachOMMI.getHiddenGVStubEntry(SSym) :
- MachOMMI.getGVStubEntry(SSym);
+ MachineModuleInfoImpl::StubValueTy &StubSym = MachOMMI.getGVStubEntry(SSym);
if (StubSym.getPointer() == 0) {
MCSymbol *Sym = Mang->getSymbol(GV);
StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
@@ -568,6 +646,11 @@ getCOFFSectionFlags(SectionKind K) {
COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE;
+ else if (K.isThreadLocal())
+ Flags |=
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
else if (K.isReadOnly())
Flags |=
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
@@ -594,6 +677,8 @@ static const char *getCOFFSectionPrefixForUniqueGlobal(SectionKind Kind) {
return ".text$";
if (Kind.isBSS ())
return ".bss$";
+ if (Kind.isThreadLocal())
+ return ".tls$";
if (Kind.isWriteable())
return ".data$";
return ".rdata$";
@@ -603,7 +688,6 @@ static const char *getCOFFSectionPrefixForUniqueGlobal(SectionKind Kind) {
const MCSection *TargetLoweringObjectFileCOFF::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
- assert(!Kind.isThreadLocal() && "Doesn't support TLS");
// If this global is linkonce/weak and the target handles this by emitting it
// into a 'uniqued' section name, create and return the section now.
@@ -624,6 +708,9 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
if (Kind.isText())
return getTextSection();
+ if (Kind.isThreadLocal())
+ return getTLSDataSection();
+
return getDataSection();
}
diff --git a/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp b/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp
new file mode 100644
index 0000000..0f59d01
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/TargetOptionsImpl.cpp
@@ -0,0 +1,52 @@
+//===-- TargetOptionsImpl.cpp - Options that apply to all targets ----------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the methods in the TargetOptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+/// DisableFramePointerElim - This returns true if frame pointer elimination
+/// optimization should be disabled for the given machine function.
+bool TargetOptions::DisableFramePointerElim(const MachineFunction &MF) const {
+ // Check to see if we should eliminate non-leaf frame pointers and then
+ // check to see if we should eliminate all frame pointers.
+ if (NoFramePointerElimNonLeaf && !NoFramePointerElim) {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return MFI->hasCalls();
+ }
+
+ return NoFramePointerElim;
+}
+
+/// LessPreciseFPMAD - This flag return true when -enable-fp-mad option
+/// is specified on the command line. When this flag is off(default), the
+/// code generator is not allowed to generate mad (multiply add) if the
+/// result is "less precise" than doing those operations individually.
+bool TargetOptions::LessPreciseFPMAD() const {
+ return UnsafeFPMath || LessPreciseFPMADOption;
+}
+
+/// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
+/// that the rounding mode of the FPU can change from its default.
+bool TargetOptions::HonorSignDependentRoundingFPMath() const {
+ return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption;
+}
+
+/// getTrapFunctionName - If this returns a non-empty string, this means isel
+/// should lower Intrinsic::trap to a call to the specified function name
+/// instead of an ISD::TRAP node.
+StringRef TargetOptions::getTrapFunctionName() const {
+ return TrapFuncName;
+}
+
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index d879378..c30b133 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -36,6 +36,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -56,14 +57,18 @@ STATISTIC(NumConvertedTo3Addr, "Number of instructions promoted to 3-address");
STATISTIC(Num3AddrSunk, "Number of 3-address instructions sunk");
STATISTIC(NumReMats, "Number of instructions re-materialized");
STATISTIC(NumDeletes, "Number of dead instructions deleted");
+STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up");
+STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down");
namespace {
class TwoAddressInstructionPass : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const InstrItineraryData *InstrItins;
MachineRegisterInfo *MRI;
LiveVariables *LV;
AliasAnalysis *AA;
+ CodeGenOpt::Level OptLevel;
// DistanceMap - Keep track the distance of a MI from the start of the
// current basic block.
@@ -120,6 +125,18 @@ namespace {
MachineBasicBlock::iterator &nmi,
MachineFunction::iterator &mbbi, unsigned Dist);
+ bool isDefTooClose(unsigned Reg, unsigned Dist,
+ MachineInstr *MI, MachineBasicBlock *MBB);
+
+ bool RescheduleMIBelowKill(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg);
+ bool RescheduleKillAboveMI(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg);
+
bool TryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
MachineFunction::iterator &mbbi,
@@ -152,7 +169,6 @@ namespace {
AU.addPreserved<LiveVariables>();
AU.addPreservedID(MachineLoopInfoID);
AU.addPreservedID(MachineDominatorsID);
- AU.addPreservedID(PHIEliminationID);
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -225,12 +241,12 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
// appropriate location, we can try to sink the current instruction
// past it.
if (!KillMI || KillMI->getParent() != MBB || KillMI == MI ||
- KillMI->getDesc().isTerminator())
+ KillMI->isTerminator())
return false;
// If any of the definitions are used by another instruction between the
// position and the kill use, then it's not safe to sink it.
- //
+ //
// FIXME: This can be sped up if there is an easy way to query whether an
// instruction is before or after another instruction. Then we can use
// MachineRegisterInfo def / use instead.
@@ -273,7 +289,7 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
KillMO->setIsKill(false);
KillMO = MI->findRegisterUseOperand(SavedReg, false, TRI);
KillMO->setIsKill(true);
-
+
if (LV)
LV->replaceKillInstruction(SavedReg, KillMI, MI);
@@ -319,7 +335,7 @@ TwoAddressInstructionPass::isProfitableToReMat(unsigned Reg,
continue; // Current use.
OtherUse = true;
// There is at least one other use in the MBB that will clobber the
- // register.
+ // register.
if (isTwoAddrUse(UseMI, Reg))
return true;
}
@@ -467,6 +483,32 @@ static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
return false;
}
+/// findLocalKill - Look for an instruction below MI in the MBB that kills the
+/// specified register. Returns null if there are any other Reg use between the
+/// instructions.
+static
+MachineInstr *findLocalKill(unsigned Reg, MachineBasicBlock *MBB,
+ MachineInstr *MI, MachineRegisterInfo *MRI,
+ DenseMap<MachineInstr*, unsigned> &DistanceMap) {
+ MachineInstr *KillMI = 0;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(Reg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ if (UseMI == MI || UseMI->getParent() != MBB)
+ continue;
+ if (DistanceMap.count(UseMI))
+ continue;
+ if (!UI.getOperand().isKill())
+ return 0;
+ if (KillMI)
+ return 0; // -O0 kill markers cannot be trusted?
+ KillMI = UseMI;
+ }
+
+ return KillMI;
+}
+
/// findOnlyInterestingUse - Given a register, if has a single in-basic block
/// use, return the use instruction if it's a copy or a two-address use.
static
@@ -528,6 +570,9 @@ bool
TwoAddressInstructionPass::isProfitableToCommute(unsigned regB, unsigned regC,
MachineInstr *MI, MachineBasicBlock *MBB,
unsigned Dist) {
+ if (OptLevel == CodeGenOpt::None)
+ return false;
+
// Determine if it's profitable to commute this two address instruction. In
// general, we want no uses between this instruction and the definition of
// the two-address register.
@@ -544,7 +589,7 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regB, unsigned regC,
// %reg1029<def> = MOV8rr %reg1028
// %reg1029<def> = SHR8ri %reg1029, 7, %EFLAGS<imp-def,dead>
// insert => %reg1030<def> = MOV8rr %reg1029
- // %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %EFLAGS<imp-def,dead>
+ // %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %EFLAGS<imp-def,dead>
if (!MI->killsRegister(regC))
return false;
@@ -770,10 +815,9 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI,
static bool isSafeToDelete(MachineInstr *MI,
const TargetInstrInfo *TII,
SmallVector<unsigned, 4> &Kills) {
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.mayStore() || MCID.isCall())
+ if (MI->mayStore() || MI->isCall())
return false;
- if (MCID.isTerminator() || MI->hasUnmodeledSideEffects())
+ if (MI->isTerminator() || MI->hasUnmodeledSideEffects())
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -852,28 +896,316 @@ TwoAddressInstructionPass::DeleteUnusedInstr(MachineBasicBlock::iterator &mi,
return true;
}
+/// RescheduleMIBelowKill - If there is one more local instruction that reads
+/// 'Reg' and it kills 'Reg, consider moving the instruction below the kill
+/// instruction in order to eliminate the need for the copy.
+bool
+TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg) {
+ MachineInstr *MI = &*mi;
+ DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
+ if (DI == DistanceMap.end())
+ // Must be created from unfolded load. Don't waste time trying this.
+ return false;
+
+ MachineInstr *KillMI = findLocalKill(Reg, MBB, mi, MRI, DistanceMap);
+ if (!KillMI || KillMI->isCopy() || KillMI->isCopyLike())
+ // Don't mess with copies, they may be coalesced later.
+ return false;
+
+ if (KillMI->hasUnmodeledSideEffects() || KillMI->isCall() ||
+ KillMI->isBranch() || KillMI->isTerminator())
+ // Don't move pass calls, etc.
+ return false;
+
+ unsigned DstReg;
+ if (isTwoAddrUse(*KillMI, Reg, DstReg))
+ return false;
+
+ bool SeenStore = true;
+ if (!MI->isSafeToMove(TII, AA, SeenStore))
+ return false;
+
+ if (TII->getInstrLatency(InstrItins, MI) > 1)
+ // FIXME: Needs more sophisticated heuristics.
+ return false;
+
+ SmallSet<unsigned, 2> Uses;
+ SmallSet<unsigned, 2> Kills;
+ SmallSet<unsigned, 2> Defs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (!MOReg)
+ continue;
+ if (MO.isDef())
+ Defs.insert(MOReg);
+ else {
+ Uses.insert(MOReg);
+ if (MO.isKill() && MOReg != Reg)
+ Kills.insert(MOReg);
+ }
+ }
+
+ // Move the copies connected to MI down as well.
+ MachineBasicBlock::iterator From = MI;
+ MachineBasicBlock::iterator To = llvm::next(From);
+ while (To->isCopy() && Defs.count(To->getOperand(1).getReg())) {
+ Defs.insert(To->getOperand(0).getReg());
+ ++To;
+ }
+
+ // Check if the reschedule will not break depedencies.
+ unsigned NumVisited = 0;
+ MachineBasicBlock::iterator KillPos = KillMI;
+ ++KillPos;
+ for (MachineBasicBlock::iterator I = To; I != KillPos; ++I) {
+ MachineInstr *OtherMI = I;
+ // DBG_VALUE cannot be counted against the limit.
+ if (OtherMI->isDebugValue())
+ continue;
+ if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
+ return false;
+ ++NumVisited;
+ if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() ||
+ OtherMI->isBranch() || OtherMI->isTerminator())
+ // Don't move pass calls, etc.
+ return false;
+ for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = OtherMI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (!MOReg)
+ continue;
+ if (MO.isDef()) {
+ if (Uses.count(MOReg))
+ // Physical register use would be clobbered.
+ return false;
+ if (!MO.isDead() && Defs.count(MOReg))
+ // May clobber a physical register def.
+ // FIXME: This may be too conservative. It's ok if the instruction
+ // is sunken completely below the use.
+ return false;
+ } else {
+ if (Defs.count(MOReg))
+ return false;
+ if (MOReg != Reg &&
+ ((MO.isKill() && Uses.count(MOReg)) || Kills.count(MOReg)))
+ // Don't want to extend other live ranges and update kills.
+ return false;
+ }
+ }
+ }
+
+ // Move debug info as well.
+ while (From != MBB->begin() && llvm::prior(From)->isDebugValue())
+ --From;
+
+ // Copies following MI may have been moved as well.
+ nmi = To;
+ MBB->splice(KillPos, MBB, From, To);
+ DistanceMap.erase(DI);
+
+ if (LV) {
+ // Update live variables
+ LV->removeVirtualRegisterKilled(Reg, KillMI);
+ LV->addVirtualRegisterKilled(Reg, MI);
+ } else {
+ for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = KillMI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
+ continue;
+ MO.setIsKill(false);
+ }
+ MI->addRegisterKilled(Reg, 0);
+ }
+
+ return true;
+}
+
+/// isDefTooClose - Return true if the re-scheduling will put the given
+/// instruction too close to the defs of its register dependencies.
+bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist,
+ MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
+ DE = MRI->def_end(); DI != DE; ++DI) {
+ MachineInstr *DefMI = &*DI;
+ if (DefMI->getParent() != MBB || DefMI->isCopy() || DefMI->isCopyLike())
+ continue;
+ if (DefMI == MI)
+ return true; // MI is defining something KillMI uses
+ DenseMap<MachineInstr*, unsigned>::iterator DDI = DistanceMap.find(DefMI);
+ if (DDI == DistanceMap.end())
+ return true; // Below MI
+ unsigned DefDist = DDI->second;
+ assert(Dist > DefDist && "Visited def already?");
+ if (TII->getInstrLatency(InstrItins, DefMI) > (int)(Dist - DefDist))
+ return true;
+ }
+ return false;
+}
+
+/// RescheduleKillAboveMI - If there is one more local instruction that reads
+/// 'Reg' and it kills 'Reg, consider moving the kill instruction above the
+/// current two-address instruction in order to eliminate the need for the
+/// copy.
+bool
+TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &mi,
+ MachineBasicBlock::iterator &nmi,
+ unsigned Reg) {
+ MachineInstr *MI = &*mi;
+ DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(MI);
+ if (DI == DistanceMap.end())
+ // Must be created from unfolded load. Don't waste time trying this.
+ return false;
+
+ MachineInstr *KillMI = findLocalKill(Reg, MBB, mi, MRI, DistanceMap);
+ if (!KillMI || KillMI->isCopy() || KillMI->isCopyLike())
+ // Don't mess with copies, they may be coalesced later.
+ return false;
+
+ unsigned DstReg;
+ if (isTwoAddrUse(*KillMI, Reg, DstReg))
+ return false;
+
+ bool SeenStore = true;
+ if (!KillMI->isSafeToMove(TII, AA, SeenStore))
+ return false;
+
+ SmallSet<unsigned, 2> Uses;
+ SmallSet<unsigned, 2> Kills;
+ SmallSet<unsigned, 2> Defs;
+ SmallSet<unsigned, 2> LiveDefs;
+ for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = KillMI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (MO.isUse()) {
+ if (!MOReg)
+ continue;
+ if (isDefTooClose(MOReg, DI->second, MI, MBB))
+ return false;
+ Uses.insert(MOReg);
+ if (MO.isKill() && MOReg != Reg)
+ Kills.insert(MOReg);
+ } else if (TargetRegisterInfo::isPhysicalRegister(MOReg)) {
+ Defs.insert(MOReg);
+ if (!MO.isDead())
+ LiveDefs.insert(MOReg);
+ }
+ }
+
+ // Check if the reschedule will not break depedencies.
+ unsigned NumVisited = 0;
+ MachineBasicBlock::iterator KillPos = KillMI;
+ for (MachineBasicBlock::iterator I = mi; I != KillPos; ++I) {
+ MachineInstr *OtherMI = I;
+ // DBG_VALUE cannot be counted against the limit.
+ if (OtherMI->isDebugValue())
+ continue;
+ if (NumVisited > 10) // FIXME: Arbitrary limit to reduce compile time cost.
+ return false;
+ ++NumVisited;
+ if (OtherMI->hasUnmodeledSideEffects() || OtherMI->isCall() ||
+ OtherMI->isBranch() || OtherMI->isTerminator())
+ // Don't move pass calls, etc.
+ return false;
+ SmallVector<unsigned, 2> OtherDefs;
+ for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = OtherMI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned MOReg = MO.getReg();
+ if (!MOReg)
+ continue;
+ if (MO.isUse()) {
+ if (Defs.count(MOReg))
+ // Moving KillMI can clobber the physical register if the def has
+ // not been seen.
+ return false;
+ if (Kills.count(MOReg))
+ // Don't want to extend other live ranges and update kills.
+ return false;
+ } else {
+ OtherDefs.push_back(MOReg);
+ }
+ }
+
+ for (unsigned i = 0, e = OtherDefs.size(); i != e; ++i) {
+ unsigned MOReg = OtherDefs[i];
+ if (Uses.count(MOReg))
+ return false;
+ if (TargetRegisterInfo::isPhysicalRegister(MOReg) &&
+ LiveDefs.count(MOReg))
+ return false;
+ // Physical register def is seen.
+ Defs.erase(MOReg);
+ }
+ }
+
+ // Move the old kill above MI, don't forget to move debug info as well.
+ MachineBasicBlock::iterator InsertPos = mi;
+ while (InsertPos != MBB->begin() && llvm::prior(InsertPos)->isDebugValue())
+ --InsertPos;
+ MachineBasicBlock::iterator From = KillMI;
+ MachineBasicBlock::iterator To = llvm::next(From);
+ while (llvm::prior(From)->isDebugValue())
+ --From;
+ MBB->splice(InsertPos, MBB, From, To);
+
+ nmi = llvm::prior(InsertPos); // Backtrack so we process the moved instr.
+ DistanceMap.erase(DI);
+
+ if (LV) {
+ // Update live variables
+ LV->removeVirtualRegisterKilled(Reg, KillMI);
+ LV->addVirtualRegisterKilled(Reg, MI);
+ } else {
+ for (unsigned i = 0, e = KillMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = KillMI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
+ continue;
+ MO.setIsKill(false);
+ }
+ MI->addRegisterKilled(Reg, 0);
+ }
+ return true;
+}
+
/// TryInstructionTransform - For the case where an instruction has a single
/// pair of tied register operands, attempt some transformations that may
/// either eliminate the tied operands or improve the opportunities for
-/// coalescing away the register copy. Returns true if the tied operands
-/// are eliminated altogether.
+/// coalescing away the register copy. Returns true if no copy needs to be
+/// inserted to untie mi's operands (either because they were untied, or
+/// because mi was rescheduled, and will be visited again later).
bool TwoAddressInstructionPass::
TryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineBasicBlock::iterator &nmi,
MachineFunction::iterator &mbbi,
unsigned SrcIdx, unsigned DstIdx, unsigned Dist,
SmallPtrSet<MachineInstr*, 8> &Processed) {
- const MCInstrDesc &MCID = mi->getDesc();
- unsigned regA = mi->getOperand(DstIdx).getReg();
- unsigned regB = mi->getOperand(SrcIdx).getReg();
+ if (OptLevel == CodeGenOpt::None)
+ return false;
+
+ MachineInstr &MI = *mi;
+ unsigned regA = MI.getOperand(DstIdx).getReg();
+ unsigned regB = MI.getOperand(SrcIdx).getReg();
assert(TargetRegisterInfo::isVirtualRegister(regB) &&
"cannot make instruction into two-address form");
// If regA is dead and the instruction can be deleted, just delete
// it so it doesn't clobber regB.
- bool regBKilled = isKilled(*mi, regB, MRI, TII);
- if (!regBKilled && mi->getOperand(DstIdx).isDead() &&
+ bool regBKilled = isKilled(MI, regB, MRI, TII);
+ if (!regBKilled && MI.getOperand(DstIdx).isDead() &&
DeleteUnusedInstr(mi, nmi, mbbi, Dist)) {
++NumDeletes;
return true; // Done with this instruction.
@@ -885,20 +1217,20 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
unsigned regCIdx = ~0U;
bool TryCommute = false;
bool AggressiveCommute = false;
- if (MCID.isCommutable() && mi->getNumOperands() >= 3 &&
- TII->findCommutedOpIndices(mi, SrcOp1, SrcOp2)) {
+ if (MI.isCommutable() && MI.getNumOperands() >= 3 &&
+ TII->findCommutedOpIndices(&MI, SrcOp1, SrcOp2)) {
if (SrcIdx == SrcOp1)
regCIdx = SrcOp2;
else if (SrcIdx == SrcOp2)
regCIdx = SrcOp1;
if (regCIdx != ~0U) {
- regC = mi->getOperand(regCIdx).getReg();
- if (!regBKilled && isKilled(*mi, regC, MRI, TII))
+ regC = MI.getOperand(regCIdx).getReg();
+ if (!regBKilled && isKilled(MI, regC, MRI, TII))
// If C dies but B does not, swap the B and C operands.
// This makes the live ranges of A and C joinable.
TryCommute = true;
- else if (isProfitableToCommute(regB, regC, mi, mbbi, Dist)) {
+ else if (isProfitableToCommute(regB, regC, &MI, mbbi, Dist)) {
TryCommute = true;
AggressiveCommute = true;
}
@@ -913,10 +1245,17 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
return false;
}
+ // If there is one more use of regB later in the same MBB, consider
+ // re-schedule this MI below it.
+ if (RescheduleMIBelowKill(mbbi, mi, nmi, regB)) {
+ ++NumReSchedDowns;
+ return true;
+ }
+
if (TargetRegisterInfo::isVirtualRegister(regA))
ScanUses(regA, &*mbbi, Processed);
- if (MCID.isConvertibleTo3Addr()) {
+ if (MI.isConvertibleTo3Addr()) {
// This instruction is potentially convertible to a true
// three-address instruction. Check if it is profitable.
if (!regBKilled || isProfitableToConv3Addr(regA, regB)) {
@@ -928,6 +1267,13 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
}
}
+ // If there is one more use of regB later in the same MBB, consider
+ // re-schedule it before this MI if it's legal.
+ if (RescheduleKillAboveMI(mbbi, mi, nmi, regB)) {
+ ++NumReSchedUps;
+ return true;
+ }
+
// If this is an instruction with a load folded into it, try unfolding
// the load, e.g. avoid this:
// movq %rdx, %rcx
@@ -936,11 +1282,11 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// movq (%rax), %rcx
// addq %rdx, %rcx
// because it's preferable to schedule a load than a register copy.
- if (MCID.mayLoad() && !regBKilled) {
+ if (MI.mayLoad() && !regBKilled) {
// Determine if a load can be unfolded.
unsigned LoadRegIndex;
unsigned NewOpc =
- TII->getOpcodeAfterMemoryUnfold(mi->getOpcode(),
+ TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
/*UnfoldLoad=*/true,
/*UnfoldStore=*/false,
&LoadRegIndex);
@@ -950,12 +1296,12 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
MachineFunction &MF = *mbbi->getParent();
// Unfold the load.
- DEBUG(dbgs() << "2addr: UNFOLDING: " << *mi);
+ DEBUG(dbgs() << "2addr: UNFOLDING: " << MI);
const TargetRegisterClass *RC =
TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI);
unsigned Reg = MRI->createVirtualRegister(RC);
SmallVector<MachineInstr *, 2> NewMIs;
- if (!TII->unfoldMemoryOperand(MF, mi, Reg,
+ if (!TII->unfoldMemoryOperand(MF, &MI, Reg,
/*UnfoldLoad=*/true,/*UnfoldStore=*/false,
NewMIs)) {
DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
@@ -986,21 +1332,21 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
// Success, or at least we made an improvement. Keep the unfolded
// instructions and discard the original.
if (LV) {
- for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = mi->getOperand(i);
- if (MO.isReg() &&
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (MO.isReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isUse()) {
if (MO.isKill()) {
if (NewMIs[0]->killsRegister(MO.getReg()))
- LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[0]);
+ LV->replaceKillInstruction(MO.getReg(), &MI, NewMIs[0]);
else {
assert(NewMIs[1]->killsRegister(MO.getReg()) &&
"Kill missing after load unfold!");
- LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[1]);
+ LV->replaceKillInstruction(MO.getReg(), &MI, NewMIs[1]);
}
}
- } else if (LV->removeVirtualRegisterDead(MO.getReg(), mi)) {
+ } else if (LV->removeVirtualRegisterDead(MO.getReg(), &MI)) {
if (NewMIs[1]->registerDefIsDead(MO.getReg()))
LV->addVirtualRegisterDead(MO.getReg(), NewMIs[1]);
else {
@@ -1013,7 +1359,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
}
LV->addVirtualRegisterKilled(Reg, NewMIs[1]);
}
- mi->eraseFromParent();
+ MI.eraseFromParent();
mi = NewMIs[1];
if (TransformSuccess)
return true;
@@ -1035,18 +1381,19 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
/// runOnMachineFunction - Reduce two-address instructions to two operands.
///
bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "Machine Function\n");
const TargetMachine &TM = MF.getTarget();
MRI = &MF.getRegInfo();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
+ InstrItins = TM.getInstrItineraryData();
LV = getAnalysisIfAvailable<LiveVariables>();
AA = &getAnalysis<AliasAnalysis>();
+ OptLevel = TM.getOptLevel();
bool MadeChange = false;
DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n");
- DEBUG(dbgs() << "********** Function: "
+ DEBUG(dbgs() << "********** Function: "
<< MF.getFunction()->getName() << '\n');
// This pass takes the function out of SSA form.
@@ -1177,7 +1524,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
// If it's safe and profitable, remat the definition instead of
// copying it.
if (DefMI &&
- DefMI->getDesc().isAsCheapAsAMove() &&
+ DefMI->isAsCheapAsAMove() &&
DefMI->isSafeToReMat(TII, AA, regB) &&
isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist)){
DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n");
@@ -1248,19 +1595,19 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
MadeChange = true;
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
- }
- // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
- if (mi->isInsertSubreg()) {
- // From %reg = INSERT_SUBREG %reg, %subreg, subidx
- // To %reg:subidx = COPY %subreg
- unsigned SubIdx = mi->getOperand(3).getImm();
- mi->RemoveOperand(3);
- assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
- mi->getOperand(0).setSubReg(SubIdx);
- mi->RemoveOperand(1);
- mi->setDesc(TII->get(TargetOpcode::COPY));
- DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
+ // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
+ if (mi->isInsertSubreg()) {
+ // From %reg = INSERT_SUBREG %reg, %subreg, subidx
+ // To %reg:subidx = COPY %subreg
+ unsigned SubIdx = mi->getOperand(3).getImm();
+ mi->RemoveOperand(3);
+ assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
+ mi->getOperand(0).setSubReg(SubIdx);
+ mi->RemoveOperand(1);
+ mi->setDesc(TII->get(TargetOpcode::COPY));
+ DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
+ }
}
// Clear TiedOperands here instead of at the top of the loop
@@ -1298,6 +1645,36 @@ static void UpdateRegSequenceSrcs(unsigned SrcReg,
}
}
+// Find the first def of Reg, assuming they are all in the same basic block.
+static MachineInstr *findFirstDef(unsigned Reg, MachineRegisterInfo *MRI) {
+ SmallPtrSet<MachineInstr*, 8> Defs;
+ MachineInstr *First = 0;
+ for (MachineRegisterInfo::def_iterator RI = MRI->def_begin(Reg);
+ MachineInstr *MI = RI.skipInstruction(); Defs.insert(MI))
+ First = MI;
+ if (!First)
+ return 0;
+
+ MachineBasicBlock *MBB = First->getParent();
+ MachineBasicBlock::iterator A = First, B = First;
+ bool Moving;
+ do {
+ Moving = false;
+ if (A != MBB->begin()) {
+ Moving = true;
+ --A;
+ if (Defs.erase(A)) First = A;
+ }
+ if (B != MBB->end()) {
+ Defs.erase(B);
+ ++B;
+ Moving = true;
+ }
+ } while (Moving && !Defs.empty());
+ assert(Defs.empty() && "Instructions outside basic block!");
+ return First;
+}
+
/// CoalesceExtSubRegs - If a number of sources of the REG_SEQUENCE are
/// EXTRACT_SUBREG from the same register and to the same virtual register
/// with different sub-register indices, attempt to combine the
@@ -1380,8 +1757,10 @@ TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
CanCoalesce = false;
break;
}
- // Keep track of one of the uses.
- SomeMI = UseMI;
+ // Keep track of one of the uses. Preferably the first one which has a
+ // <def,undef> flag.
+ if (!SomeMI || UseMI->getOperand(0).isUndef())
+ SomeMI = UseMI;
}
if (!CanCoalesce)
continue;
@@ -1390,7 +1769,9 @@ TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
SomeMI->getDebugLoc(),
TII->get(TargetOpcode::COPY))
- .addReg(DstReg, RegState::Define, NewDstSubIdx)
+ .addReg(DstReg, RegState::Define |
+ getUndefRegState(SomeMI->getOperand(0).isUndef()),
+ NewDstSubIdx)
.addReg(SrcReg, 0, NewSrcSubIdx);
// Remove all the old extract instructions.
@@ -1452,26 +1833,30 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
SmallSet<unsigned, 4> Seen;
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
unsigned SrcReg = MI->getOperand(i).getReg();
+ unsigned SrcSubIdx = MI->getOperand(i).getSubReg();
unsigned SubIdx = MI->getOperand(i+1).getImm();
- if (MI->getOperand(i).getSubReg() ||
- TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
- DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
- llvm_unreachable(0);
+ // DefMI of NULL means the value does not have a vreg in this block
+ // i.e., its a physical register or a subreg.
+ // In either case we force a copy to be generated.
+ MachineInstr *DefMI = NULL;
+ if (!MI->getOperand(i).getSubReg() &&
+ !TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ DefMI = MRI->getVRegDef(SrcReg);
}
- MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
- if (DefMI->isImplicitDef()) {
+ if (DefMI && DefMI->isImplicitDef()) {
DefMI->eraseFromParent();
continue;
}
IsImpDef = false;
// Remember COPY sources. These might be candidate for coalescing.
- if (DefMI->isCopy() && DefMI->getOperand(1).getSubReg())
+ if (DefMI && DefMI->isCopy() && DefMI->getOperand(1).getSubReg())
RealSrcs.push_back(DefMI->getOperand(1).getReg());
bool isKill = MI->getOperand(i).isKill();
- if (!Seen.insert(SrcReg) || MI->getParent() != DefMI->getParent() ||
+ if (!DefMI || !Seen.insert(SrcReg) ||
+ MI->getParent() != DefMI->getParent() ||
!isKill || HasOtherRegSequenceUses(SrcReg, MI, MRI) ||
!TRI->getMatchingSuperRegClass(MRI->getRegClass(DstReg),
MRI->getRegClass(SrcReg), SubIdx)) {
@@ -1504,9 +1889,9 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
MachineInstr *CopyMI = BuildMI(*MI->getParent(), InsertLoc,
MI->getDebugLoc(), TII->get(TargetOpcode::COPY))
.addReg(DstReg, RegState::Define, SubIdx)
- .addReg(SrcReg, getKillRegState(isKill));
+ .addReg(SrcReg, getKillRegState(isKill), SrcSubIdx);
MI->getOperand(i).setReg(0);
- if (LV && isKill)
+ if (LV && isKill && !TargetRegisterInfo::isPhysicalRegister(SrcReg))
LV->replaceKillInstruction(SrcReg, MI, CopyMI);
DEBUG(dbgs() << "Inserted: " << *CopyMI);
}
@@ -1519,11 +1904,27 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
UpdateRegSequenceSrcs(SrcReg, DstReg, SubIdx, MRI, *TRI);
}
+ // Set <def,undef> flags on the first DstReg def in the basic block.
+ // It marks the beginning of the live range. All the other defs are
+ // read-modify-write.
+ if (MachineInstr *Def = findFirstDef(DstReg, MRI)) {
+ for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = Def->getOperand(i);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == DstReg)
+ MO.setIsUndef();
+ }
+ // Make sure there is a full non-subreg imp-def operand on the
+ // instruction. This shouldn't be necessary, but it seems that at least
+ // RAFast requires it.
+ Def->addRegisterDefined(DstReg, TRI);
+ DEBUG(dbgs() << "First def: " << *Def);
+ }
+
if (IsImpDef) {
DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF");
MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
- MI->RemoveOperand(j);
+ MI->RemoveOperand(j);
} else {
DEBUG(dbgs() << "Eliminated: " << *MI);
MI->eraseFromParent();
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
index 8a1cdc0..3bab93b 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -16,10 +16,9 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "virtregmap"
+#define DEBUG_TYPE "regalloc"
#include "VirtRegMap.h"
#include "llvm/Function.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -32,12 +31,8 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallSet.h"
#include <algorithm>
using namespace llvm;
@@ -58,34 +53,11 @@ bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
TRI = mf.getTarget().getRegisterInfo();
MF = &mf;
- ReMatId = MAX_STACK_SLOT+1;
- LowSpillSlot = HighSpillSlot = NO_STACK_SLOT;
-
Virt2PhysMap.clear();
Virt2StackSlotMap.clear();
- Virt2ReMatIdMap.clear();
Virt2SplitMap.clear();
- Virt2SplitKillMap.clear();
- ReMatMap.clear();
- ImplicitDefed.clear();
- SpillSlotToUsesMap.clear();
- MI2VirtMap.clear();
- SpillPt2VirtMap.clear();
- RestorePt2VirtMap.clear();
- EmergencySpillMap.clear();
- EmergencySpillSlots.clear();
-
- SpillSlotToUsesMap.resize(8);
- ImplicitDefed.resize(MF->getRegInfo().getNumVirtRegs());
-
- allocatableRCRegs.clear();
- for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
- E = TRI->regclass_end(); I != E; ++I)
- allocatableRCRegs.insert(std::make_pair(*I,
- TRI->getAllocatableSet(mf, *I)));
grow();
-
return false;
}
@@ -93,24 +65,12 @@ void VirtRegMap::grow() {
unsigned NumRegs = MF->getRegInfo().getNumVirtRegs();
Virt2PhysMap.resize(NumRegs);
Virt2StackSlotMap.resize(NumRegs);
- Virt2ReMatIdMap.resize(NumRegs);
Virt2SplitMap.resize(NumRegs);
- Virt2SplitKillMap.resize(NumRegs);
- ReMatMap.resize(NumRegs);
- ImplicitDefed.resize(NumRegs);
}
unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) {
int SS = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
RC->getAlignment());
- if (LowSpillSlot == NO_STACK_SLOT)
- LowSpillSlot = SS;
- if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
- HighSpillSlot = SS;
- assert(SS >= LowSpillSlot && "Unexpected low spill slot");
- unsigned Idx = SS-LowSpillSlot;
- while (Idx >= SpillSlotToUsesMap.size())
- SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
++NumSpillSlots;
return SS;
}
@@ -144,118 +104,6 @@ void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
Virt2StackSlotMap[virtReg] = SS;
}
-int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
- assert(TargetRegisterInfo::isVirtualRegister(virtReg));
- assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
- "attempt to assign re-mat id to already spilled register");
- Virt2ReMatIdMap[virtReg] = ReMatId;
- return ReMatId++;
-}
-
-void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
- assert(TargetRegisterInfo::isVirtualRegister(virtReg));
- assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
- "attempt to assign re-mat id to already spilled register");
- Virt2ReMatIdMap[virtReg] = id;
-}
-
-int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) {
- std::map<const TargetRegisterClass*, int>::iterator I =
- EmergencySpillSlots.find(RC);
- if (I != EmergencySpillSlots.end())
- return I->second;
- return EmergencySpillSlots[RC] = createSpillSlot(RC);
-}
-
-void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
- if (!MF->getFrameInfo()->isFixedObjectIndex(FI)) {
- // If FI < LowSpillSlot, this stack reference was produced by
- // instruction selection and is not a spill
- if (FI >= LowSpillSlot) {
- assert(FI >= 0 && "Spill slot index should not be negative!");
- assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
- && "Invalid spill slot");
- SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
- }
- }
-}
-
-void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
- MachineInstr *NewMI, ModRef MRInfo) {
- // Move previous memory references folded to new instruction.
- MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
- for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
- E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
- MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
- MI2VirtMap.erase(I++);
- }
-
- // add new memory reference
- MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
-}
-
-void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) {
- MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI);
- MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
-}
-
-void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isFI())
- continue;
- int FI = MO.getIndex();
- if (MF->getFrameInfo()->isFixedObjectIndex(FI))
- continue;
- // This stack reference was produced by instruction selection and
- // is not a spill
- if (FI < LowSpillSlot)
- continue;
- assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
- && "Invalid spill slot");
- SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
- }
- MI2VirtMap.erase(MI);
- SpillPt2VirtMap.erase(MI);
- RestorePt2VirtMap.erase(MI);
- EmergencySpillMap.erase(MI);
-}
-
-/// FindUnusedRegisters - Gather a list of allocatable registers that
-/// have not been allocated to any virtual register.
-bool VirtRegMap::FindUnusedRegisters(LiveIntervals* LIs) {
- unsigned NumRegs = TRI->getNumRegs();
- UnusedRegs.reset();
- UnusedRegs.resize(NumRegs);
-
- BitVector Used(NumRegs);
- for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
- unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
- if (Virt2PhysMap[Reg] != (unsigned)VirtRegMap::NO_PHYS_REG)
- Used.set(Virt2PhysMap[Reg]);
- }
-
- BitVector Allocatable = TRI->getAllocatableSet(*MF);
- bool AnyUnused = false;
- for (unsigned Reg = 1; Reg < NumRegs; ++Reg) {
- if (Allocatable[Reg] && !Used[Reg] && !LIs->hasInterval(Reg)) {
- bool ReallyUnused = true;
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
- if (Used[*AS] || LIs->hasInterval(*AS)) {
- ReallyUnused = false;
- break;
- }
- }
- if (ReallyUnused) {
- AnyUnused = true;
- UnusedRegs.set(Reg);
- }
- }
- }
-
- return AnyUnused;
-}
-
void VirtRegMap::rewrite(SlotIndexes *Indexes) {
DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n"
<< "********** Function: "
@@ -264,23 +112,32 @@ void VirtRegMap::rewrite(SlotIndexes *Indexes) {
SmallVector<unsigned, 8> SuperDeads;
SmallVector<unsigned, 8> SuperDefs;
SmallVector<unsigned, 8> SuperKills;
+#ifndef NDEBUG
+ BitVector Reserved = TRI->getReservedRegs(*MF);
+#endif
for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end();
MBBI != MBBE; ++MBBI) {
DEBUG(MBBI->print(dbgs(), Indexes));
- for (MachineBasicBlock::iterator MII = MBBI->begin(), MIE = MBBI->end();
- MII != MIE;) {
+ for (MachineBasicBlock::instr_iterator
+ MII = MBBI->instr_begin(), MIE = MBBI->instr_end(); MII != MIE;) {
MachineInstr *MI = MII;
++MII;
for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
MOE = MI->operands_end(); MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
+
+ // Make sure MRI knows about registers clobbered by regmasks.
+ if (MO.isRegMask())
+ MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+
if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
unsigned VirtReg = MO.getReg();
unsigned PhysReg = getPhys(VirtReg);
assert(PhysReg != NO_PHYS_REG && "Instruction uses unmapped VirtReg");
+ assert(!Reserved.test(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
if (MO.getSubReg()) {
@@ -332,7 +189,6 @@ void VirtRegMap::rewrite(SlotIndexes *Indexes) {
++NumIdCopies;
if (MI->getNumOperands() == 2) {
DEBUG(dbgs() << "Deleting identity copy.\n");
- RemoveMachineInstrFromMaps(MI);
if (Indexes)
Indexes->removeMachineInstrFromMaps(MI);
// It's safe to erase MI because MII has already been incremented.
diff --git a/contrib/llvm/lib/CodeGen/VirtRegMap.h b/contrib/llvm/lib/CodeGen/VirtRegMap.h
index 03abff3..8cac311 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegMap.h
+++ b/contrib/llvm/lib/CodeGen/VirtRegMap.h
@@ -18,22 +18,14 @@
#define LLVM_CODEGEN_VIRTREGMAP_H
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include <map>
namespace llvm {
- class LiveIntervals;
class MachineInstr;
class MachineFunction;
class MachineRegisterInfo;
class TargetInstrInfo;
- class TargetRegisterInfo;
class raw_ostream;
class SlotIndexes;
@@ -45,18 +37,12 @@ namespace llvm {
MAX_STACK_SLOT = (1L << 18)-1
};
- enum ModRef { isRef = 1, isMod = 2, isModRef = 3 };
- typedef std::multimap<MachineInstr*,
- std::pair<unsigned, ModRef> > MI2VirtMapTy;
-
private:
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineFunction *MF;
- DenseMap<const TargetRegisterClass*, BitVector> allocatableRCRegs;
-
/// Virt2PhysMap - This is a virtual to physical register
/// mapping. Each virtual register is required to have an entry in
/// it; even spilled virtual registers (the register mapped to a
@@ -70,71 +56,10 @@ namespace llvm {
/// at.
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
- /// Virt2ReMatIdMap - This is virtual register to rematerialization id
- /// mapping. Each spilled virtual register that should be remat'd has an
- /// entry in it which corresponds to the remat id.
- IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
-
/// Virt2SplitMap - This is virtual register to splitted virtual register
/// mapping.
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
- /// Virt2SplitKillMap - This is splitted virtual register to its last use
- /// (kill) index mapping.
- IndexedMap<SlotIndex, VirtReg2IndexFunctor> Virt2SplitKillMap;
-
- /// ReMatMap - This is virtual register to re-materialized instruction
- /// mapping. Each virtual register whose definition is going to be
- /// re-materialized has an entry in it.
- IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
-
- /// MI2VirtMap - This is MachineInstr to virtual register
- /// mapping. In the case of memory spill code being folded into
- /// instructions, we need to know which virtual register was
- /// read/written by this instruction.
- MI2VirtMapTy MI2VirtMap;
-
- /// SpillPt2VirtMap - This records the virtual registers which should
- /// be spilled right after the MachineInstr due to live interval
- /// splitting.
- std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >
- SpillPt2VirtMap;
-
- /// RestorePt2VirtMap - This records the virtual registers which should
- /// be restored right before the MachineInstr due to live interval
- /// splitting.
- std::map<MachineInstr*, std::vector<unsigned> > RestorePt2VirtMap;
-
- /// EmergencySpillMap - This records the physical registers that should
- /// be spilled / restored around the MachineInstr since the register
- /// allocator has run out of registers.
- std::map<MachineInstr*, std::vector<unsigned> > EmergencySpillMap;
-
- /// EmergencySpillSlots - This records emergency spill slots used to
- /// spill physical registers when the register allocator runs out of
- /// registers. Ideally only one stack slot is used per function per
- /// register class.
- std::map<const TargetRegisterClass*, int> EmergencySpillSlots;
-
- /// ReMatId - Instead of assigning a stack slot to a to be rematerialized
- /// virtual register, an unique id is being assigned. This keeps track of
- /// the highest id used so far. Note, this starts at (1<<18) to avoid
- /// conflicts with stack slot numbers.
- int ReMatId;
-
- /// LowSpillSlot, HighSpillSlot - Lowest and highest spill slot indexes.
- int LowSpillSlot, HighSpillSlot;
-
- /// SpillSlotToUsesMap - Records uses for each register spill slot.
- SmallVector<SmallPtrSet<MachineInstr*, 4>, 8> SpillSlotToUsesMap;
-
- /// ImplicitDefed - One bit for each virtual register. If set it indicates
- /// the register is implicitly defined.
- BitVector ImplicitDefed;
-
- /// UnusedRegs - A list of physical registers that have not been used.
- BitVector UnusedRegs;
-
/// createSpillSlot - Allocate a spill slot for RC from MFI.
unsigned createSpillSlot(const TargetRegisterClass *RC);
@@ -144,11 +69,7 @@ namespace llvm {
public:
static char ID;
VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
- Virt2StackSlotMap(NO_STACK_SLOT),
- Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
- Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
- ReMatId(MAX_STACK_SLOT+1),
- LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
+ Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -235,8 +156,7 @@ namespace llvm {
/// @brief returns true if the specified virtual register is not
/// mapped to a stack slot or rematerialized.
bool isAssignedReg(unsigned virtReg) const {
- if (getStackSlot(virtReg) == NO_STACK_SLOT &&
- getReMatId(virtReg) == NO_STACK_SLOT)
+ if (getStackSlot(virtReg) == NO_STACK_SLOT)
return true;
// Split register can be assigned a physical register as well as a
// stack slot or remat id.
@@ -250,13 +170,6 @@ namespace llvm {
return Virt2StackSlotMap[virtReg];
}
- /// @brief returns the rematerialization id mapped to the specified virtual
- /// register
- int getReMatId(unsigned virtReg) const {
- assert(TargetRegisterInfo::isVirtualRegister(virtReg));
- return Virt2ReMatIdMap[virtReg];
- }
-
/// @brief create a mapping for the specifed virtual register to
/// the next available stack slot
int assignVirt2StackSlot(unsigned virtReg);
@@ -264,250 +177,6 @@ namespace llvm {
/// the specified stack slot
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
- /// @brief assign an unique re-materialization id to the specified
- /// virtual register.
- int assignVirtReMatId(unsigned virtReg);
- /// @brief assign an unique re-materialization id to the specified
- /// virtual register.
- void assignVirtReMatId(unsigned virtReg, int id);
-
- /// @brief returns true if the specified virtual register is being
- /// re-materialized.
- bool isReMaterialized(unsigned virtReg) const {
- return ReMatMap[virtReg] != NULL;
- }
-
- /// @brief returns the original machine instruction being re-issued
- /// to re-materialize the specified virtual register.
- MachineInstr *getReMaterializedMI(unsigned virtReg) const {
- return ReMatMap[virtReg];
- }
-
- /// @brief records the specified virtual register will be
- /// re-materialized and the original instruction which will be re-issed
- /// for this purpose. If parameter all is true, then all uses of the
- /// registers are rematerialized and it's safe to delete the definition.
- void setVirtIsReMaterialized(unsigned virtReg, MachineInstr *def) {
- ReMatMap[virtReg] = def;
- }
-
- /// @brief record the last use (kill) of a split virtual register.
- void addKillPoint(unsigned virtReg, SlotIndex index) {
- Virt2SplitKillMap[virtReg] = index;
- }
-
- SlotIndex getKillPoint(unsigned virtReg) const {
- return Virt2SplitKillMap[virtReg];
- }
-
- /// @brief remove the last use (kill) of a split virtual register.
- void removeKillPoint(unsigned virtReg) {
- Virt2SplitKillMap[virtReg] = SlotIndex();
- }
-
- /// @brief returns true if the specified MachineInstr is a spill point.
- bool isSpillPt(MachineInstr *Pt) const {
- return SpillPt2VirtMap.find(Pt) != SpillPt2VirtMap.end();
- }
-
- /// @brief returns the virtual registers that should be spilled due to
- /// splitting right after the specified MachineInstr.
- std::vector<std::pair<unsigned,bool> > &getSpillPtSpills(MachineInstr *Pt) {
- return SpillPt2VirtMap[Pt];
- }
-
- /// @brief records the specified MachineInstr as a spill point for virtReg.
- void addSpillPoint(unsigned virtReg, bool isKill, MachineInstr *Pt) {
- std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
- I = SpillPt2VirtMap.find(Pt);
- if (I != SpillPt2VirtMap.end())
- I->second.push_back(std::make_pair(virtReg, isKill));
- else {
- std::vector<std::pair<unsigned,bool> > Virts;
- Virts.push_back(std::make_pair(virtReg, isKill));
- SpillPt2VirtMap.insert(std::make_pair(Pt, Virts));
- }
- }
-
- /// @brief - transfer spill point information from one instruction to
- /// another.
- void transferSpillPts(MachineInstr *Old, MachineInstr *New) {
- std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
- I = SpillPt2VirtMap.find(Old);
- if (I == SpillPt2VirtMap.end())
- return;
- while (!I->second.empty()) {
- unsigned virtReg = I->second.back().first;
- bool isKill = I->second.back().second;
- I->second.pop_back();
- addSpillPoint(virtReg, isKill, New);
- }
- SpillPt2VirtMap.erase(I);
- }
-
- /// @brief returns true if the specified MachineInstr is a restore point.
- bool isRestorePt(MachineInstr *Pt) const {
- return RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end();
- }
-
- /// @brief returns the virtual registers that should be restoreed due to
- /// splitting right after the specified MachineInstr.
- std::vector<unsigned> &getRestorePtRestores(MachineInstr *Pt) {
- return RestorePt2VirtMap[Pt];
- }
-
- /// @brief records the specified MachineInstr as a restore point for virtReg.
- void addRestorePoint(unsigned virtReg, MachineInstr *Pt) {
- std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
- RestorePt2VirtMap.find(Pt);
- if (I != RestorePt2VirtMap.end())
- I->second.push_back(virtReg);
- else {
- std::vector<unsigned> Virts;
- Virts.push_back(virtReg);
- RestorePt2VirtMap.insert(std::make_pair(Pt, Virts));
- }
- }
-
- /// @brief - transfer restore point information from one instruction to
- /// another.
- void transferRestorePts(MachineInstr *Old, MachineInstr *New) {
- std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
- RestorePt2VirtMap.find(Old);
- if (I == RestorePt2VirtMap.end())
- return;
- while (!I->second.empty()) {
- unsigned virtReg = I->second.back();
- I->second.pop_back();
- addRestorePoint(virtReg, New);
- }
- RestorePt2VirtMap.erase(I);
- }
-
- /// @brief records that the specified physical register must be spilled
- /// around the specified machine instr.
- void addEmergencySpill(unsigned PhysReg, MachineInstr *MI) {
- if (EmergencySpillMap.find(MI) != EmergencySpillMap.end())
- EmergencySpillMap[MI].push_back(PhysReg);
- else {
- std::vector<unsigned> PhysRegs;
- PhysRegs.push_back(PhysReg);
- EmergencySpillMap.insert(std::make_pair(MI, PhysRegs));
- }
- }
-
- /// @brief returns true if one or more physical registers must be spilled
- /// around the specified instruction.
- bool hasEmergencySpills(MachineInstr *MI) const {
- return EmergencySpillMap.find(MI) != EmergencySpillMap.end();
- }
-
- /// @brief returns the physical registers to be spilled and restored around
- /// the instruction.
- std::vector<unsigned> &getEmergencySpills(MachineInstr *MI) {
- return EmergencySpillMap[MI];
- }
-
- /// @brief - transfer emergency spill information from one instruction to
- /// another.
- void transferEmergencySpills(MachineInstr *Old, MachineInstr *New) {
- std::map<MachineInstr*,std::vector<unsigned> >::iterator I =
- EmergencySpillMap.find(Old);
- if (I == EmergencySpillMap.end())
- return;
- while (!I->second.empty()) {
- unsigned virtReg = I->second.back();
- I->second.pop_back();
- addEmergencySpill(virtReg, New);
- }
- EmergencySpillMap.erase(I);
- }
-
- /// @brief return or get a emergency spill slot for the register class.
- int getEmergencySpillSlot(const TargetRegisterClass *RC);
-
- /// @brief Return lowest spill slot index.
- int getLowSpillSlot() const {
- return LowSpillSlot;
- }
-
- /// @brief Return highest spill slot index.
- int getHighSpillSlot() const {
- return HighSpillSlot;
- }
-
- /// @brief Records a spill slot use.
- void addSpillSlotUse(int FrameIndex, MachineInstr *MI);
-
- /// @brief Returns true if spill slot has been used.
- bool isSpillSlotUsed(int FrameIndex) const {
- assert(FrameIndex >= 0 && "Spill slot index should not be negative!");
- return !SpillSlotToUsesMap[FrameIndex-LowSpillSlot].empty();
- }
-
- /// @brief Mark the specified register as being implicitly defined.
- void setIsImplicitlyDefined(unsigned VirtReg) {
- ImplicitDefed.set(TargetRegisterInfo::virtReg2Index(VirtReg));
- }
-
- /// @brief Returns true if the virtual register is implicitly defined.
- bool isImplicitlyDefined(unsigned VirtReg) const {
- return ImplicitDefed[TargetRegisterInfo::virtReg2Index(VirtReg)];
- }
-
- /// @brief Updates information about the specified virtual register's value
- /// folded into newMI machine instruction.
- void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
- ModRef MRInfo);
-
- /// @brief Updates information about the specified virtual register's value
- /// folded into the specified machine instruction.
- void virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo);
-
- /// @brief returns the virtual registers' values folded in memory
- /// operands of this instruction
- std::pair<MI2VirtMapTy::const_iterator, MI2VirtMapTy::const_iterator>
- getFoldedVirts(MachineInstr* MI) const {
- return MI2VirtMap.equal_range(MI);
- }
-
- /// RemoveMachineInstrFromMaps - MI is being erased, remove it from the
- /// the folded instruction map and spill point map.
- void RemoveMachineInstrFromMaps(MachineInstr *MI);
-
- /// FindUnusedRegisters - Gather a list of allocatable registers that
- /// have not been allocated to any virtual register.
- bool FindUnusedRegisters(LiveIntervals* LIs);
-
- /// HasUnusedRegisters - Return true if there are any allocatable registers
- /// that have not been allocated to any virtual register.
- bool HasUnusedRegisters() const {
- return !UnusedRegs.none();
- }
-
- /// setRegisterUsed - Remember the physical register is now used.
- void setRegisterUsed(unsigned Reg) {
- UnusedRegs.reset(Reg);
- }
-
- /// isRegisterUnused - Return true if the physical register has not been
- /// used.
- bool isRegisterUnused(unsigned Reg) const {
- return UnusedRegs[Reg];
- }
-
- /// getFirstUnusedRegister - Return the first physical register that has not
- /// been used.
- unsigned getFirstUnusedRegister(const TargetRegisterClass *RC) {
- int Reg = UnusedRegs.find_first();
- while (Reg != -1) {
- if (allocatableRCRegs[RC][Reg])
- return (unsigned)Reg;
- Reg = UnusedRegs.find_next(Reg);
- }
- return 0;
- }
-
/// rewrite - Rewrite all instructions in MF to use only physical registers
/// by mapping all virtual register operands to their assigned physical
/// registers.
diff --git a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp b/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
deleted file mode 100644
index a5ec797..0000000
--- a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
+++ /dev/null
@@ -1,2633 +0,0 @@
-//===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "virtregrewriter"
-#include "VirtRegRewriter.h"
-#include "VirtRegMap.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumDSE , "Number of dead stores elided");
-STATISTIC(NumDSS , "Number of dead spill slots removed");
-STATISTIC(NumCommutes, "Number of instructions commuted");
-STATISTIC(NumDRM , "Number of re-materializable defs elided");
-STATISTIC(NumStores , "Number of stores added");
-STATISTIC(NumPSpills , "Number of physical register spills");
-STATISTIC(NumOmitted , "Number of reloads omitted");
-STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
-STATISTIC(NumCopified, "Number of available reloads turned into copies");
-STATISTIC(NumReMats , "Number of re-materialization");
-STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumReused , "Number of values reused");
-STATISTIC(NumDCE , "Number of copies elided");
-STATISTIC(NumSUnfold , "Number of stores unfolded");
-STATISTIC(NumModRefUnfold, "Number of modref unfolded");
-
-namespace {
- enum RewriterName { local, trivial };
-}
-
-static cl::opt<RewriterName>
-RewriterOpt("rewriter",
- cl::desc("Rewriter to use (default=local)"),
- cl::Prefix,
- cl::values(clEnumVal(local, "local rewriter"),
- clEnumVal(trivial, "trivial rewriter"),
- clEnumValEnd),
- cl::init(local));
-
-static cl::opt<bool>
-ScheduleSpills("schedule-spills",
- cl::desc("Schedule spill code"),
- cl::init(false));
-
-VirtRegRewriter::~VirtRegRewriter() {}
-
-/// substitutePhysReg - Replace virtual register in MachineOperand with a
-/// physical register. Do the right thing with the sub-register index.
-/// Note that operands may be added, so the MO reference is no longer valid.
-static void substitutePhysReg(MachineOperand &MO, unsigned Reg,
- const TargetRegisterInfo &TRI) {
- if (MO.getSubReg()) {
- MO.substPhysReg(Reg, TRI);
-
- // Any kill flags apply to the full virtual register, so they also apply to
- // the full physical register.
- // We assume that partial defs have already been decorated with a super-reg
- // <imp-def> operand by LiveIntervals.
- MachineInstr &MI = *MO.getParent();
- if (MO.isUse() && !MO.isUndef() &&
- (MO.isKill() || MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
- MI.addRegisterKilled(Reg, &TRI, /*AddIfNotFound=*/ true);
- } else {
- MO.setReg(Reg);
- }
-}
-
-namespace {
-
-/// This class is intended for use with the new spilling framework only. It
-/// rewrites vreg def/uses to use the assigned preg, but does not insert any
-/// spill code.
-struct TrivialRewriter : public VirtRegRewriter {
-
- bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
- LiveIntervals* LIs) {
- DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
- DEBUG(dbgs() << "********** Function: "
- << MF.getFunction()->getName() << '\n');
- DEBUG(dbgs() << "**** Machine Instrs"
- << "(NOTE! Does not include spills and reloads!) ****\n");
- DEBUG(MF.dump());
-
- MachineRegisterInfo *mri = &MF.getRegInfo();
- const TargetRegisterInfo *tri = MF.getTarget().getRegisterInfo();
-
- bool changed = false;
-
- for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
- liItr != liEnd; ++liItr) {
-
- const LiveInterval *li = liItr->second;
- unsigned reg = li->reg;
-
- if (TargetRegisterInfo::isPhysicalRegister(reg)) {
- if (!li->empty())
- mri->setPhysRegUsed(reg);
- }
- else {
- if (!VRM.hasPhys(reg))
- continue;
- unsigned pReg = VRM.getPhys(reg);
- mri->setPhysRegUsed(pReg);
- // Copy the register use-list before traversing it.
- SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
- for (MachineRegisterInfo::reg_iterator I = mri->reg_begin(reg),
- E = mri->reg_end(); I != E; ++I)
- reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
- for (unsigned N=0; N != reglist.size(); ++N)
- substitutePhysReg(reglist[N].first->getOperand(reglist[N].second),
- pReg, *tri);
- changed |= !reglist.empty();
- }
- }
-
- DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
- DEBUG(MF.dump());
-
- return changed;
- }
-
-};
-
-}
-
-// ************************************************************************ //
-
-namespace {
-
-/// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
-/// from top down, keep track of which spill slots or remat are available in
-/// each register.
-///
-/// Note that not all physregs are created equal here. In particular, some
-/// physregs are reloads that we are allowed to clobber or ignore at any time.
-/// Other physregs are values that the register allocated program is using
-/// that we cannot CHANGE, but we can read if we like. We keep track of this
-/// on a per-stack-slot / remat id basis as the low bit in the value of the
-/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
-/// this bit and addAvailable sets it if.
-class AvailableSpills {
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
-
- // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
- // or remat'ed virtual register values that are still available, due to
- // being loaded or stored to, but not invalidated yet.
- std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
-
- // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
- // indicating which stack slot values are currently held by a physreg. This
- // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
- // physreg is modified.
- std::multimap<unsigned, int> PhysRegsAvailable;
-
- void disallowClobberPhysRegOnly(unsigned PhysReg);
-
- void ClobberPhysRegOnly(unsigned PhysReg);
-public:
- AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
- : TRI(tri), TII(tii) {
- }
-
- /// clear - Reset the state.
- void clear() {
- SpillSlotsOrReMatsAvailable.clear();
- PhysRegsAvailable.clear();
- }
-
- const TargetRegisterInfo *getRegInfo() const { return TRI; }
-
- /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
- /// available in a physical register, return that PhysReg, otherwise
- /// return 0.
- unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
- std::map<int, unsigned>::const_iterator I =
- SpillSlotsOrReMatsAvailable.find(Slot);
- if (I != SpillSlotsOrReMatsAvailable.end()) {
- return I->second >> 1; // Remove the CanClobber bit.
- }
- return 0;
- }
-
- /// addAvailable - Mark that the specified stack slot / remat is available
- /// in the specified physreg. If CanClobber is true, the physreg can be
- /// modified at any time without changing the semantics of the program.
- void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
- // If this stack slot is thought to be available in some other physreg,
- // remove its record.
- ModifyStackSlotOrReMat(SlotOrReMat);
-
- PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
- SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
- (unsigned)CanClobber;
-
- if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Remembering RM#"
- << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat);
- DEBUG(dbgs() << " in physreg " << TRI->getName(Reg)
- << (CanClobber ? " canclobber" : "") << "\n");
- }
-
- /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
- /// the value of the specified stackslot register if it desires. The
- /// specified stack slot must be available in a physreg for this query to
- /// make sense.
- bool canClobberPhysRegForSS(int SlotOrReMat) const {
- assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
- "Value not available!");
- return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
- }
-
- /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
- /// physical register where values for some stack slot(s) might be
- /// available.
- bool canClobberPhysReg(unsigned PhysReg) const {
- std::multimap<unsigned, int>::const_iterator I =
- PhysRegsAvailable.lower_bound(PhysReg);
- while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
- int SlotOrReMat = I->second;
- I++;
- if (!canClobberPhysRegForSS(SlotOrReMat))
- return false;
- }
- return true;
- }
-
- /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
- /// stackslot register. The register is still available but is no longer
- /// allowed to be modifed.
- void disallowClobberPhysReg(unsigned PhysReg);
-
- /// ClobberPhysReg - This is called when the specified physreg changes
- /// value. We use this to invalidate any info about stuff that lives in
- /// it and any of its aliases.
- void ClobberPhysReg(unsigned PhysReg);
-
- /// ModifyStackSlotOrReMat - This method is called when the value in a stack
- /// slot changes. This removes information about which register the
- /// previous value for this slot lives in (as the previous value is dead
- /// now).
- void ModifyStackSlotOrReMat(int SlotOrReMat);
-
- /// ClobberSharingStackSlots - When a register mapped to a stack slot changes,
- /// other stack slots sharing the same register are no longer valid.
- void ClobberSharingStackSlots(int StackSlot);
-
- /// AddAvailableRegsToLiveIn - Availability information is being kept coming
- /// into the specified MBB. Add available physical registers as potential
- /// live-in's. If they are reused in the MBB, they will be added to the
- /// live-in set to make register scavenger and post-allocation scheduler.
- void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-};
-
-}
-
-// ************************************************************************ //
-
-// Given a location where a reload of a spilled register or a remat of
-// a constant is to be inserted, attempt to find a safe location to
-// insert the load at an earlier point in the basic-block, to hide
-// latency of the load and to avoid address-generation interlock
-// issues.
-static MachineBasicBlock::iterator
-ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
- MachineBasicBlock::iterator const Begin,
- unsigned PhysReg,
- const TargetRegisterInfo *TRI,
- bool DoReMat,
- int SSorRMId,
- const TargetInstrInfo *TII,
- const MachineFunction &MF)
-{
- if (!ScheduleSpills)
- return InsertLoc;
-
- // Spill backscheduling is of primary interest to addresses, so
- // don't do anything if the register isn't in the register class
- // used for pointers.
-
- const TargetLowering *TL = MF.getTarget().getTargetLowering();
-
- if (!TL->isTypeLegal(TL->getPointerTy()))
- // Believe it or not, this is true on 16-bit targets like PIC16.
- return InsertLoc;
-
- const TargetRegisterClass *ptrRegClass =
- TL->getRegClassFor(TL->getPointerTy());
- if (!ptrRegClass->contains(PhysReg))
- return InsertLoc;
-
- // Scan upwards through the preceding instructions. If an instruction doesn't
- // reference the stack slot or the register we're loading, we can
- // backschedule the reload up past it.
- MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
- while (NewInsertLoc != Begin) {
- MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
- for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
- MachineOperand &Op = Prev->getOperand(i);
- if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
- goto stop;
- }
- if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
- Prev->findRegisterDefOperand(PhysReg))
- goto stop;
- for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
- if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
- Prev->findRegisterDefOperand(*Alias))
- goto stop;
- NewInsertLoc = Prev;
- }
-stop:;
-
- // If we made it to the beginning of the block, turn around and move back
- // down just past any existing reloads. They're likely to be reloads/remats
- // for instructions earlier than what our current reload/remat is for, so
- // they should be scheduled earlier.
- if (NewInsertLoc == Begin) {
- int FrameIdx;
- while (InsertLoc != NewInsertLoc &&
- (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
- TII->isTriviallyReMaterializable(NewInsertLoc)))
- ++NewInsertLoc;
- }
-
- return NewInsertLoc;
-}
-
-namespace {
-
-// ReusedOp - For each reused operand, we keep track of a bit of information,
-// in case we need to rollback upon processing a new operand. See comments
-// below.
-struct ReusedOp {
- // The MachineInstr operand that reused an available value.
- unsigned Operand;
-
- // StackSlotOrReMat - The spill slot or remat id of the value being reused.
- unsigned StackSlotOrReMat;
-
- // PhysRegReused - The physical register the value was available in.
- unsigned PhysRegReused;
-
- // AssignedPhysReg - The physreg that was assigned for use by the reload.
- unsigned AssignedPhysReg;
-
- // VirtReg - The virtual register itself.
- unsigned VirtReg;
-
- ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
- unsigned vreg)
- : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
- AssignedPhysReg(apr), VirtReg(vreg) {}
-};
-
-/// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
-/// is reused instead of reloaded.
-class ReuseInfo {
- MachineInstr &MI;
- std::vector<ReusedOp> Reuses;
- BitVector PhysRegsClobbered;
-public:
- ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
- PhysRegsClobbered.resize(tri->getNumRegs());
- }
-
- bool hasReuses() const {
- return !Reuses.empty();
- }
-
- /// addReuse - If we choose to reuse a virtual register that is already
- /// available instead of reloading it, remember that we did so.
- void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
- unsigned PhysRegReused, unsigned AssignedPhysReg,
- unsigned VirtReg) {
- // If the reload is to the assigned register anyway, no undo will be
- // required.
- if (PhysRegReused == AssignedPhysReg) return;
-
- // Otherwise, remember this.
- Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
- AssignedPhysReg, VirtReg));
- }
-
- void markClobbered(unsigned PhysReg) {
- PhysRegsClobbered.set(PhysReg);
- }
-
- bool isClobbered(unsigned PhysReg) const {
- return PhysRegsClobbered.test(PhysReg);
- }
-
- /// GetRegForReload - We are about to emit a reload into PhysReg. If there
- /// is some other operand that is using the specified register, either pick
- /// a new register to use, or evict the previous reload and use this reg.
- unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
- MachineFunction &MF, MachineInstr *MI,
- AvailableSpills &Spills,
- std::vector<MachineInstr*> &MaybeDeadStores,
- SmallSet<unsigned, 8> &Rejected,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM);
-
- /// GetRegForReload - Helper for the above GetRegForReload(). Add a
- /// 'Rejected' set to remember which registers have been considered and
- /// rejected for the reload. This avoids infinite looping in case like
- /// this:
- /// t1 := op t2, t3
- /// t2 <- assigned r0 for use by the reload but ended up reuse r1
- /// t3 <- assigned r1 for use by the reload but ended up reuse r0
- /// t1 <- desires r1
- /// sees r1 is taken by t2, tries t2's reload register r0
- /// sees r0 is taken by t3, tries t3's reload register r1
- /// sees r1 is taken by t2, tries t2's reload register r0 ...
- unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
- AvailableSpills &Spills,
- std::vector<MachineInstr*> &MaybeDeadStores,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
- SmallSet<unsigned, 8> Rejected;
- MachineFunction &MF = *MI->getParent()->getParent();
- const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
- return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
- Rejected, RegKills, KillOps, VRM);
- }
-};
-
-}
-
-// ****************** //
-// Utility Functions //
-// ****************** //
-
-/// findSinglePredSuccessor - Return via reference a vector of machine basic
-/// blocks each of which is a successor of the specified BB and has no other
-/// predecessor.
-static void findSinglePredSuccessor(MachineBasicBlock *MBB,
- SmallVectorImpl<MachineBasicBlock *> &Succs){
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- MachineBasicBlock *SuccMBB = *SI;
- if (SuccMBB->pred_size() == 1)
- Succs.push_back(SuccMBB);
- }
-}
-
-/// ResurrectConfirmedKill - Helper for ResurrectKill. This register is killed
-/// but not re-defined and it's being reused. Remove the kill flag for the
-/// register and unset the kill's marker and last kill operand.
-static void ResurrectConfirmedKill(unsigned Reg, const TargetRegisterInfo* TRI,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- DEBUG(dbgs() << "Resurrect " << TRI->getName(Reg) << "\n");
-
- MachineOperand *KillOp = KillOps[Reg];
- KillOp->setIsKill(false);
- // KillOps[Reg] might be a def of a super-register.
- unsigned KReg = KillOp->getReg();
- if (!RegKills[KReg])
- return;
-
- assert(KillOps[KReg]->getParent() == KillOp->getParent() &&
- "invalid superreg kill flags");
- KillOps[KReg] = NULL;
- RegKills.reset(KReg);
-
- // If it's a def of a super-register. Its other sub-regsters are no
- // longer killed as well.
- for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
- DEBUG(dbgs() << " Resurrect subreg " << TRI->getName(*SR) << "\n");
-
- assert(KillOps[*SR]->getParent() == KillOp->getParent() &&
- "invalid subreg kill flags");
- KillOps[*SR] = NULL;
- RegKills.reset(*SR);
- }
-}
-
-/// ResurrectKill - Invalidate kill info associated with a previous MI. An
-/// optimization may have decided that it's safe to reuse a previously killed
-/// register. If we fail to erase the invalid kill flags, then the register
-/// scavenger may later clobber the register used by this MI. Note that this
-/// must be done even if this MI is being deleted! Consider:
-///
-/// USE $r1 (vreg1) <kill>
-/// ...
-/// $r1(vreg3) = COPY $r1 (vreg2)
-///
-/// RegAlloc has smartly assigned all three vregs to the same physreg. Initially
-/// vreg1's only use is a kill. The rewriter doesn't know it should be live
-/// until it rewrites vreg2. At that points it sees that the copy is dead and
-/// deletes it. However, deleting the copy implicitly forwards liveness of $r1
-/// (it's copy coalescing). We must resurrect $r1 by removing the kill flag at
-/// vreg1 before deleting the copy.
-static void ResurrectKill(MachineInstr &MI, unsigned Reg,
- const TargetRegisterInfo* TRI, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
- ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
- return;
- }
- // No previous kill for this reg. Check for subreg kills as well.
- // d4 =
- // store d4, fi#0
- // ...
- // = s8<kill>
- // ...
- // = d4 <avoiding reload>
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
- unsigned SReg = *SR;
- if (RegKills[SReg] && KillOps[SReg]->getParent() != &MI)
- ResurrectConfirmedKill(SReg, TRI, RegKills, KillOps);
- }
-}
-
-/// InvalidateKills - MI is going to be deleted. If any of its operands are
-/// marked kill, then invalidate the information.
-static void InvalidateKills(MachineInstr &MI,
- const TargetRegisterInfo* TRI,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- SmallVector<unsigned, 2> *KillRegs = NULL) {
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
- continue;
- unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg))
- continue;
- if (KillRegs)
- KillRegs->push_back(Reg);
- assert(Reg < KillOps.size());
- if (KillOps[Reg] == &MO) {
- // This operand was the kill, now no longer.
- KillOps[Reg] = NULL;
- RegKills.reset(Reg);
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
- if (RegKills[*SR]) {
- assert(KillOps[*SR] == &MO && "bad subreg kill flags");
- KillOps[*SR] = NULL;
- RegKills.reset(*SR);
- }
- }
- }
- else {
- // This operand may have reused a previously killed reg. Keep it live in
- // case it continues to be used after erasing this instruction.
- ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
- }
- }
-}
-
-/// InvalidateRegDef - If the def operand of the specified def MI is now dead
-/// (since its spill instruction is removed), mark it isDead. Also checks if
-/// the def MI has other definition operands that are not dead. Returns it by
-/// reference.
-static bool InvalidateRegDef(MachineBasicBlock::iterator I,
- MachineInstr &NewDef, unsigned Reg,
- bool &HasLiveDef,
- const TargetRegisterInfo *TRI) {
- // Due to remat, it's possible this reg isn't being reused. That is,
- // the def of this reg (by prev MI) is now dead.
- MachineInstr *DefMI = I;
- MachineOperand *DefOp = NULL;
- for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = DefMI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.isKill() || MO.isUndef())
- continue;
- if (MO.getReg() == Reg)
- DefOp = &MO;
- else if (!MO.isDead())
- HasLiveDef = true;
- }
- if (!DefOp)
- return false;
-
- bool FoundUse = false, Done = false;
- MachineBasicBlock::iterator E = &NewDef;
- ++I; ++E;
- for (; !Done && I != E; ++I) {
- MachineInstr *NMI = I;
- for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
- MachineOperand &MO = NMI->getOperand(j);
- if (!MO.isReg() || MO.getReg() == 0 ||
- (MO.getReg() != Reg && !TRI->isSubRegister(Reg, MO.getReg())))
- continue;
- if (MO.isUse())
- FoundUse = true;
- Done = true; // Stop after scanning all the operands of this MI.
- }
- }
- if (!FoundUse) {
- // Def is dead!
- DefOp->setIsDead();
- return true;
- }
- return false;
-}
-
-/// UpdateKills - Track and update kill info. If a MI reads a register that is
-/// marked kill, then it must be due to register reuse. Transfer the kill info
-/// over.
-static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- // These do not affect kill info at all.
- if (MI.isDebugValue())
- return;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0)
- continue;
-
- // This operand may have reused a previously killed reg. Keep it live.
- ResurrectKill(MI, Reg, TRI, RegKills, KillOps);
-
- if (MO.isKill()) {
- RegKills.set(Reg);
- KillOps[Reg] = &MO;
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
- RegKills.set(*SR);
- KillOps[*SR] = &MO;
- }
- }
- }
-
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.getReg() || !MO.isDef())
- continue;
- unsigned Reg = MO.getReg();
- RegKills.reset(Reg);
- KillOps[Reg] = NULL;
- // It also defines (or partially define) aliases.
- for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
- RegKills.reset(*SR);
- KillOps[*SR] = NULL;
- }
- for (const unsigned *SR = TRI->getSuperRegisters(Reg); *SR; ++SR) {
- RegKills.reset(*SR);
- KillOps[*SR] = NULL;
- }
- }
-}
-
-/// ReMaterialize - Re-materialize definition for Reg targeting DestReg.
-///
-static void ReMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MII,
- unsigned DestReg, unsigned Reg,
- const TargetInstrInfo *TII,
- const TargetRegisterInfo *TRI,
- VirtRegMap &VRM) {
- MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
-#ifndef NDEBUG
- const MCInstrDesc &MCID = ReMatDefMI->getDesc();
- assert(MCID.getNumDefs() == 1 &&
- "Don't know how to remat instructions that define > 1 values!");
-#endif
- TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
- MachineInstr *NewMI = prior(MII);
- for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = NewMI->getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
- unsigned VirtReg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
- continue;
- assert(MO.isUse());
- unsigned Phys = VRM.getPhys(VirtReg);
- assert(Phys && "Virtual register is not assigned a register?");
- substitutePhysReg(MO, Phys, *TRI);
- }
- ++NumReMats;
-}
-
-/// findSuperReg - Find the SubReg's super-register of given register class
-/// where its SubIdx sub-register is SubReg.
-static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
- unsigned SubIdx, const TargetRegisterInfo *TRI) {
- for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
- I != E; ++I) {
- unsigned Reg = *I;
- if (TRI->getSubReg(Reg, SubIdx) == SubReg)
- return Reg;
- }
- return 0;
-}
-
-// ******************************** //
-// Available Spills Implementation //
-// ******************************** //
-
-/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
-/// stackslot register. The register is still available but is no longer
-/// allowed to be modifed.
-void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
- std::multimap<unsigned, int>::iterator I =
- PhysRegsAvailable.lower_bound(PhysReg);
- while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
- int SlotOrReMat = I->second;
- I++;
- assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
- "Bidirectional map mismatch!");
- SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
- DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
- << " copied, it is available for use but can no longer be modified\n");
- }
-}
-
-/// disallowClobberPhysReg - Unset the CanClobber bit of the specified
-/// stackslot register and its aliases. The register and its aliases may
-/// still available but is no longer allowed to be modifed.
-void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
- for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
- disallowClobberPhysRegOnly(*AS);
- disallowClobberPhysRegOnly(PhysReg);
-}
-
-/// ClobberPhysRegOnly - This is called when the specified physreg changes
-/// value. We use this to invalidate any info about stuff we thing lives in it.
-void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
- std::multimap<unsigned, int>::iterator I =
- PhysRegsAvailable.lower_bound(PhysReg);
- while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
- int SlotOrReMat = I->second;
- PhysRegsAvailable.erase(I++);
- assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
- "Bidirectional map mismatch!");
- SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
- DEBUG(dbgs() << "PhysReg " << TRI->getName(PhysReg)
- << " clobbered, invalidating ");
- if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
- else
- DEBUG(dbgs() << "SS#" << SlotOrReMat << "\n");
- }
-}
-
-/// ClobberPhysReg - This is called when the specified physreg changes
-/// value. We use this to invalidate any info about stuff we thing lives in
-/// it and any of its aliases.
-void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
- for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
- ClobberPhysRegOnly(*AS);
- ClobberPhysRegOnly(PhysReg);
-}
-
-/// AddAvailableRegsToLiveIn - Availability information is being kept coming
-/// into the specified MBB. Add available physical registers as potential
-/// live-in's. If they are reused in the MBB, they will be added to the
-/// live-in set to make register scavenger and post-allocation scheduler.
-void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- std::set<unsigned> NotAvailable;
- for (std::multimap<unsigned, int>::iterator
- I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
- I != E; ++I) {
- unsigned Reg = I->first;
- const TargetRegisterClass* RC = TRI->getMinimalPhysRegClass(Reg);
- // FIXME: A temporary workaround. We can't reuse available value if it's
- // not safe to move the def of the virtual register's class. e.g.
- // X86::RFP* register classes. Do not add it as a live-in.
- if (!TII->isSafeToMoveRegClassDefs(RC))
- // This is no longer available.
- NotAvailable.insert(Reg);
- else {
- MBB.addLiveIn(Reg);
- if (RegKills[Reg])
- ResurrectConfirmedKill(Reg, TRI, RegKills, KillOps);
- }
-
- // Skip over the same register.
- std::multimap<unsigned, int>::iterator NI = llvm::next(I);
- while (NI != E && NI->first == Reg) {
- ++I;
- ++NI;
- }
- }
-
- for (std::set<unsigned>::iterator I = NotAvailable.begin(),
- E = NotAvailable.end(); I != E; ++I) {
- ClobberPhysReg(*I);
- for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
- *SubRegs; ++SubRegs)
- ClobberPhysReg(*SubRegs);
- }
-}
-
-/// ModifyStackSlotOrReMat - This method is called when the value in a stack
-/// slot changes. This removes information about which register the previous
-/// value for this slot lives in (as the previous value is dead now).
-void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
- std::map<int, unsigned>::iterator It =
- SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
- if (It == SpillSlotsOrReMatsAvailable.end()) return;
- unsigned Reg = It->second >> 1;
- SpillSlotsOrReMatsAvailable.erase(It);
-
- // This register may hold the value of multiple stack slots, only remove this
- // stack slot from the set of values the register contains.
- std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
- for (; ; ++I) {
- assert(I != PhysRegsAvailable.end() && I->first == Reg &&
- "Map inverse broken!");
- if (I->second == SlotOrReMat) break;
- }
- PhysRegsAvailable.erase(I);
-}
-
-void AvailableSpills::ClobberSharingStackSlots(int StackSlot) {
- std::map<int, unsigned>::iterator It =
- SpillSlotsOrReMatsAvailable.find(StackSlot);
- if (It == SpillSlotsOrReMatsAvailable.end()) return;
- unsigned Reg = It->second >> 1;
-
- // Erase entries in PhysRegsAvailable for other stack slots.
- std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
- while (I != PhysRegsAvailable.end() && I->first == Reg) {
- std::multimap<unsigned, int>::iterator NextI = llvm::next(I);
- if (I->second != StackSlot) {
- DEBUG(dbgs() << "Clobbered sharing SS#" << I->second << " in "
- << PrintReg(Reg, TRI) << '\n');
- SpillSlotsOrReMatsAvailable.erase(I->second);
- PhysRegsAvailable.erase(I);
- }
- I = NextI;
- }
-}
-
-// ************************** //
-// Reuse Info Implementation //
-// ************************** //
-
-/// GetRegForReload - We are about to emit a reload into PhysReg. If there
-/// is some other operand that is using the specified register, either pick
-/// a new register to use, or evict the previous reload and use this reg.
-unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
- unsigned PhysReg,
- MachineFunction &MF,
- MachineInstr *MI, AvailableSpills &Spills,
- std::vector<MachineInstr*> &MaybeDeadStores,
- SmallSet<unsigned, 8> &Rejected,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
- const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
- const TargetRegisterInfo *TRI = Spills.getRegInfo();
-
- if (Reuses.empty()) return PhysReg; // This is most often empty.
-
- for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
- ReusedOp &Op = Reuses[ro];
- // If we find some other reuse that was supposed to use this register
- // exactly for its reload, we can change this reload to use ITS reload
- // register. That is, unless its reload register has already been
- // considered and subsequently rejected because it has also been reused
- // by another operand.
- if (Op.PhysRegReused == PhysReg &&
- Rejected.count(Op.AssignedPhysReg) == 0 &&
- RC->contains(Op.AssignedPhysReg)) {
- // Yup, use the reload register that we didn't use before.
- unsigned NewReg = Op.AssignedPhysReg;
- Rejected.insert(PhysReg);
- return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores,
- Rejected, RegKills, KillOps, VRM);
- } else {
- // Otherwise, we might also have a problem if a previously reused
- // value aliases the new register. If so, codegen the previous reload
- // and use this one.
- unsigned PRRU = Op.PhysRegReused;
- if (TRI->regsOverlap(PRRU, PhysReg)) {
- // Okay, we found out that an alias of a reused register
- // was used. This isn't good because it means we have
- // to undo a previous reuse.
- MachineBasicBlock *MBB = MI->getParent();
- const TargetRegisterClass *AliasRC =
- MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
-
- // Copy Op out of the vector and remove it, we're going to insert an
- // explicit load for it.
- ReusedOp NewOp = Op;
- Reuses.erase(Reuses.begin()+ro);
-
- // MI may be using only a sub-register of PhysRegUsed.
- unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
- unsigned SubIdx = 0;
- assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
- "A reuse cannot be a virtual register");
- if (PRRU != RealPhysRegUsed) {
- // What was the sub-register index?
- SubIdx = TRI->getSubRegIndex(PRRU, RealPhysRegUsed);
- assert(SubIdx &&
- "Operand physreg is not a sub-register of PhysRegUsed");
- }
-
- // Ok, we're going to try to reload the assigned physreg into the
- // slot that we were supposed to in the first place. However, that
- // register could hold a reuse. Check to see if it conflicts or
- // would prefer us to use a different register.
- unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
- MF, MI, Spills, MaybeDeadStores,
- Rejected, RegKills, KillOps, VRM);
-
- bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
- int SSorRMId = DoReMat
- ? VRM.getReMatId(NewOp.VirtReg) : (int) NewOp.StackSlotOrReMat;
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
- DoReMat, SSorRMId, TII, MF);
-
- if (DoReMat) {
- ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
- TRI, VRM);
- } else {
- TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
- NewOp.StackSlotOrReMat, AliasRC, TRI);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
- // Any stores to this stack slot are not dead anymore.
- MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
- ++NumLoads;
- }
- Spills.ClobberPhysReg(NewPhysReg);
- Spills.ClobberPhysReg(NewOp.PhysRegReused);
-
- unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
- MI->getOperand(NewOp.Operand).setReg(RReg);
- MI->getOperand(NewOp.Operand).setSubReg(0);
-
- Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(InsertLoc));
-
- DEBUG(dbgs() << "Reuse undone!\n");
- --NumReused;
-
- // Finally, PhysReg is now available, go ahead and use it.
- return PhysReg;
- }
- }
- }
- return PhysReg;
-}
-
-// ************************************************************************ //
-
-/// FoldsStackSlotModRef - Return true if the specified MI folds the specified
-/// stack slot mod/ref. It also checks if it's possible to unfold the
-/// instruction by having it define a specified physical register instead.
-static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
- const TargetInstrInfo *TII,
- const TargetRegisterInfo *TRI,
- VirtRegMap &VRM) {
- if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
- return false;
-
- bool Found = false;
- VirtRegMap::MI2VirtMapTy::const_iterator I, End;
- for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
- unsigned VirtReg = I->second.first;
- VirtRegMap::ModRef MR = I->second.second;
- if (MR & VirtRegMap::isModRef)
- if (VRM.getStackSlot(VirtReg) == SS) {
- Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
- break;
- }
- }
- if (!Found)
- return false;
-
- // Does the instruction uses a register that overlaps the scratch register?
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
- unsigned Reg = MO.getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- if (!VRM.hasPhys(Reg))
- continue;
- Reg = VRM.getPhys(Reg);
- }
- if (TRI->regsOverlap(PhysReg, Reg))
- return false;
- }
- return true;
-}
-
-/// FindFreeRegister - Find a free register of a given register class by looking
-/// at (at most) the last two machine instructions.
-static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
- MachineBasicBlock &MBB,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- BitVector &AllocatableRegs) {
- BitVector Defs(TRI->getNumRegs());
- BitVector Uses(TRI->getNumRegs());
- SmallVector<unsigned, 4> LocalUses;
- SmallVector<unsigned, 4> Kills;
-
- // Take a look at 2 instructions at most.
- unsigned Count = 0;
- while (Count < 2) {
- if (MII == MBB.begin())
- break;
- MachineInstr *PrevMI = prior(MII);
- MII = PrevMI;
-
- if (PrevMI->isDebugValue())
- continue; // Skip over dbg_value instructions.
- ++Count;
-
- for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = PrevMI->getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
- unsigned Reg = MO.getReg();
- if (MO.isDef()) {
- Defs.set(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- Defs.set(*AS);
- } else {
- LocalUses.push_back(Reg);
- if (MO.isKill() && AllocatableRegs[Reg])
- Kills.push_back(Reg);
- }
- }
-
- for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
- unsigned Kill = Kills[i];
- if (!Defs[Kill] && !Uses[Kill] &&
- RC->contains(Kill))
- return Kill;
- }
- for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
- unsigned Reg = LocalUses[i];
- Uses.set(Reg);
- for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
- Uses.set(*AS);
- }
- }
-
- return 0;
-}
-
-static
-void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
- const TargetRegisterInfo &TRI) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == VirtReg)
- substitutePhysReg(MO, PhysReg, TRI);
- }
-}
-
-namespace {
-
-struct RefSorter {
- bool operator()(const std::pair<MachineInstr*, int> &A,
- const std::pair<MachineInstr*, int> &B) {
- return A.second < B.second;
- }
-};
-
-// ***************************** //
-// Local Spiller Implementation //
-// ***************************** //
-
-class LocalRewriter : public VirtRegRewriter {
- MachineRegisterInfo *MRI;
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
- VirtRegMap *VRM;
- LiveIntervals *LIs;
- BitVector AllocatableRegs;
- DenseMap<MachineInstr*, unsigned> DistanceMap;
- DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
-
- MachineBasicBlock *MBB; // Basic block currently being processed.
-
-public:
-
- bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
- LiveIntervals* LIs);
-
-private:
- void EraseInstr(MachineInstr *MI) {
- VRM->RemoveMachineInstrFromMaps(MI);
- LIs->RemoveMachineInstrFromMaps(MI);
- MI->eraseFromParent();
- }
-
- bool OptimizeByUnfold2(unsigned VirtReg, int SS,
- MachineBasicBlock::iterator &MII,
- std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-
- bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
- std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-
- bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
- unsigned VirtReg, unsigned SrcReg, int SS,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- const TargetRegisterInfo *TRI);
-
- void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
- int Idx, unsigned PhysReg, int StackSlot,
- const TargetRegisterClass *RC,
- bool isAvailable, MachineInstr *&LastStore,
- AvailableSpills &Spills,
- SmallSet<MachineInstr*, 4> &ReMatDefs,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-
- void TransferDeadness(unsigned Reg, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-
- bool InsertEmergencySpills(MachineInstr *MI);
-
- bool InsertRestores(MachineInstr *MI,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-
- bool InsertSpills(MachineInstr *MI);
-
- void ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
- std::vector<MachineInstr*> &MaybeDeadStores,
- BitVector &RegKills,
- ReuseInfo &ReusedOperands,
- std::vector<MachineOperand*> &KillOps);
-
- void RewriteMBB(LiveIntervals *LIs,
- AvailableSpills &Spills, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps);
-};
-}
-
-bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
- LiveIntervals* lis) {
- MRI = &MF.getRegInfo();
- TRI = MF.getTarget().getRegisterInfo();
- TII = MF.getTarget().getInstrInfo();
- VRM = &vrm;
- LIs = lis;
- AllocatableRegs = TRI->getAllocatableSet(MF);
- DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
- << MF.getFunction()->getName() << "':\n");
- DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
- " reloads!) ****\n");
- DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
-
- // Spills - Keep track of which spilled values are available in physregs
- // so that we can choose to reuse the physregs instead of emitting
- // reloads. This is usually refreshed per basic block.
- AvailableSpills Spills(TRI, TII);
-
- // Keep track of kill information.
- BitVector RegKills(TRI->getNumRegs());
- std::vector<MachineOperand*> KillOps;
- KillOps.resize(TRI->getNumRegs(), NULL);
-
- // SingleEntrySuccs - Successor blocks which have a single predecessor.
- SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
- SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
-
- // Traverse the basic blocks depth first.
- MachineBasicBlock *Entry = MF.begin();
- SmallPtrSet<MachineBasicBlock*,16> Visited;
- for (df_ext_iterator<MachineBasicBlock*,
- SmallPtrSet<MachineBasicBlock*,16> >
- DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
- DFI != E; ++DFI) {
- MBB = *DFI;
- if (!EarlyVisited.count(MBB))
- RewriteMBB(LIs, Spills, RegKills, KillOps);
-
- // If this MBB is the only predecessor of a successor. Keep the
- // availability information and visit it next.
- do {
- // Keep visiting single predecessor successor as long as possible.
- SinglePredSuccs.clear();
- findSinglePredSuccessor(MBB, SinglePredSuccs);
- if (SinglePredSuccs.empty())
- MBB = 0;
- else {
- // FIXME: More than one successors, each of which has MBB has
- // the only predecessor.
- MBB = SinglePredSuccs[0];
- if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
- Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
- RewriteMBB(LIs, Spills, RegKills, KillOps);
- }
- }
- } while (MBB);
-
- // Clear the availability info.
- Spills.clear();
- }
-
- DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
- DEBUG(MF.print(dbgs(), LIs->getSlotIndexes()));
-
- // Mark unused spill slots.
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int SS = VRM->getLowSpillSlot();
- if (SS != VirtRegMap::NO_STACK_SLOT) {
- for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) {
- SmallVector<MachineInstr*, 4> &DbgValues = Slot2DbgValues[SS];
- if (!VRM->isSpillSlotUsed(SS)) {
- MFI->RemoveStackObject(SS);
- for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
- MachineInstr *DVMI = DbgValues[j];
- DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
- EraseInstr(DVMI);
- }
- ++NumDSS;
- }
- DbgValues.clear();
- }
- }
- Slot2DbgValues.clear();
-
- return true;
-}
-
-/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
-/// a scratch register is available.
-/// xorq %r12<kill>, %r13
-/// addq %rax, -184(%rbp)
-/// addq %r13, -184(%rbp)
-/// ==>
-/// xorq %r12<kill>, %r13
-/// movq -184(%rbp), %r12
-/// addq %rax, %r12
-/// addq %r13, %r12
-/// movq %r12, -184(%rbp)
-bool LocalRewriter::
-OptimizeByUnfold2(unsigned VirtReg, int SS,
- MachineBasicBlock::iterator &MII,
- std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
-
- MachineBasicBlock::iterator NextMII = llvm::next(MII);
- // Skip over dbg_value instructions.
- while (NextMII != MBB->end() && NextMII->isDebugValue())
- NextMII = llvm::next(NextMII);
- if (NextMII == MBB->end())
- return false;
-
- if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
- return false;
-
- // Now let's see if the last couple of instructions happens to have freed up
- // a register.
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
- if (!PhysReg)
- return false;
-
- MachineFunction &MF = *MBB->getParent();
- TRI = MF.getTarget().getRegisterInfo();
- MachineInstr &MI = *MII;
- if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
- return false;
-
- // If the next instruction also folds the same SS modref and can be unfoled,
- // then it's worthwhile to issue a load from SS into the free register and
- // then unfold these instructions.
- if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
- return false;
-
- // Back-schedule reloads and remats.
- ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
-
- // Load from SS to the spare physical register.
- TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC, TRI);
- // This invalidates Phys.
- Spills.ClobberPhysReg(PhysReg);
- // Remember it's available.
- Spills.addAvailable(SS, PhysReg);
- MaybeDeadStores[SS] = NULL;
-
- // Unfold current MI.
- SmallVector<MachineInstr*, 4> NewMIs;
- if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
- llvm_unreachable("Unable unfold the load / store folding instruction!");
- assert(NewMIs.size() == 1);
- AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
- VRM->transferRestorePts(&MI, NewMIs[0]);
- MII = MBB->insert(MII, NewMIs[0]);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- ++NumModRefUnfold;
-
- // Unfold next instructions that fold the same SS.
- do {
- MachineInstr &NextMI = *NextMII;
- NextMII = llvm::next(NextMII);
- NewMIs.clear();
- if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
- llvm_unreachable("Unable unfold the load / store folding instruction!");
- assert(NewMIs.size() == 1);
- AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
- VRM->transferRestorePts(&NextMI, NewMIs[0]);
- MBB->insert(NextMII, NewMIs[0]);
- InvalidateKills(NextMI, TRI, RegKills, KillOps);
- EraseInstr(&NextMI);
- ++NumModRefUnfold;
- // Skip over dbg_value instructions.
- while (NextMII != MBB->end() && NextMII->isDebugValue())
- NextMII = llvm::next(NextMII);
- if (NextMII == MBB->end())
- break;
- } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
-
- // Store the value back into SS.
- TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC, TRI);
- MachineInstr *StoreMI = prior(NextMII);
- VRM->addSpillSlotUse(SS, StoreMI);
- VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
-
- return true;
-}
-
-/// OptimizeByUnfold - Turn a store folding instruction into a load folding
-/// instruction. e.g.
-/// xorl %edi, %eax
-/// movl %eax, -32(%ebp)
-/// movl -36(%ebp), %eax
-/// orl %eax, -32(%ebp)
-/// ==>
-/// xorl %edi, %eax
-/// orl -36(%ebp), %eax
-/// mov %eax, -32(%ebp)
-/// This enables unfolding optimization for a subsequent instruction which will
-/// also eliminate the newly introduced store instruction.
-bool LocalRewriter::
-OptimizeByUnfold(MachineBasicBlock::iterator &MII,
- std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- MachineFunction &MF = *MBB->getParent();
- MachineInstr &MI = *MII;
- unsigned UnfoldedOpc = 0;
- unsigned UnfoldPR = 0;
- unsigned UnfoldVR = 0;
- int FoldedSS = VirtRegMap::NO_STACK_SLOT;
- VirtRegMap::MI2VirtMapTy::const_iterator I, End;
- for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
- // Only transform a MI that folds a single register.
- if (UnfoldedOpc)
- return false;
- UnfoldVR = I->second.first;
- VirtRegMap::ModRef MR = I->second.second;
- // MI2VirtMap be can updated which invalidate the iterator.
- // Increment the iterator first.
- ++I;
- if (VRM->isAssignedReg(UnfoldVR))
- continue;
- // If this reference is not a use, any previous store is now dead.
- // Otherwise, the store to this stack slot is not dead anymore.
- FoldedSS = VRM->getStackSlot(UnfoldVR);
- MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
- if (DeadStore && (MR & VirtRegMap::isModRef)) {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
- if (!PhysReg || !DeadStore->readsRegister(PhysReg))
- continue;
- UnfoldPR = PhysReg;
- UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
- false, true);
- }
- }
-
- if (!UnfoldedOpc) {
- if (!UnfoldVR)
- return false;
-
- // Look for other unfolding opportunities.
- return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
- RegKills, KillOps);
- }
-
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
- continue;
- unsigned VirtReg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
- continue;
- if (VRM->isAssignedReg(VirtReg)) {
- unsigned PhysReg = VRM->getPhys(VirtReg);
- if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
- return false;
- } else if (VRM->isReMaterialized(VirtReg))
- continue;
- int SS = VRM->getStackSlot(VirtReg);
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- if (PhysReg) {
- if (TRI->regsOverlap(PhysReg, UnfoldPR))
- return false;
- continue;
- }
- if (VRM->hasPhys(VirtReg)) {
- PhysReg = VRM->getPhys(VirtReg);
- if (!TRI->regsOverlap(PhysReg, UnfoldPR))
- continue;
- }
-
- // Ok, we'll need to reload the value into a register which makes
- // it impossible to perform the store unfolding optimization later.
- // Let's see if it is possible to fold the load if the store is
- // unfolded. This allows us to perform the store unfolding
- // optimization.
- SmallVector<MachineInstr*, 4> NewMIs;
- if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
- assert(NewMIs.size() == 1);
- MachineInstr *NewMI = NewMIs.back();
- MBB->insert(MII, NewMI);
- NewMIs.clear();
- int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
- assert(Idx != -1);
- SmallVector<unsigned, 1> Ops;
- Ops.push_back(Idx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
- NewMI->eraseFromParent();
- if (FoldedMI) {
- VRM->addSpillSlotUse(SS, FoldedMI);
- if (!VRM->hasPhys(UnfoldVR))
- VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
- VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- MII = FoldedMI;
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- return true;
- }
- }
- }
-
- return false;
-}
-
-/// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
-/// where SrcReg is r1 and it is tied to r0. Return true if after
-/// commuting this instruction it will be r0 = op r2, r1.
-static bool CommuteChangesDestination(MachineInstr *DefMI,
- const MCInstrDesc &MCID,
- unsigned SrcReg,
- const TargetInstrInfo *TII,
- unsigned &DstIdx) {
- if (MCID.getNumDefs() != 1 && MCID.getNumOperands() != 3)
- return false;
- if (!DefMI->getOperand(1).isReg() ||
- DefMI->getOperand(1).getReg() != SrcReg)
- return false;
- unsigned DefIdx;
- if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
- return false;
- unsigned SrcIdx1, SrcIdx2;
- if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
- return false;
- if (SrcIdx1 == 1 && SrcIdx2 == 2) {
- DstIdx = 2;
- return true;
- }
- return false;
-}
-
-/// CommuteToFoldReload -
-/// Look for
-/// r1 = load fi#1
-/// r1 = op r1, r2<kill>
-/// store r1, fi#1
-///
-/// If op is commutable and r2 is killed, then we can xform these to
-/// r2 = op r2, fi#1
-/// store r2, fi#1
-bool LocalRewriter::
-CommuteToFoldReload(MachineBasicBlock::iterator &MII,
- unsigned VirtReg, unsigned SrcReg, int SS,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- const TargetRegisterInfo *TRI) {
- if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
- return false;
-
- MachineInstr &MI = *MII;
- MachineBasicBlock::iterator DefMII = prior(MII);
- MachineInstr *DefMI = DefMII;
- const MCInstrDesc &MCID = DefMI->getDesc();
- unsigned NewDstIdx;
- if (DefMII != MBB->begin() &&
- MCID.isCommutable() &&
- CommuteChangesDestination(DefMI, MCID, SrcReg, TII, NewDstIdx)) {
- MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
- unsigned NewReg = NewDstMO.getReg();
- if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
- return false;
- MachineInstr *ReloadMI = prior(DefMII);
- int FrameIdx;
- unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
- if (DestReg != SrcReg || FrameIdx != SS)
- return false;
- int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
- if (UseIdx == -1)
- return false;
- unsigned DefIdx;
- if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
- return false;
- assert(DefMI->getOperand(DefIdx).isReg() &&
- DefMI->getOperand(DefIdx).getReg() == SrcReg);
-
- // Now commute def instruction.
- MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
- if (!CommutedMI)
- return false;
- MBB->insert(MII, CommutedMI);
- SmallVector<unsigned, 1> Ops;
- Ops.push_back(NewDstIdx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
- // Not needed since foldMemoryOperand returns new MI.
- CommutedMI->eraseFromParent();
- if (!FoldedMI)
- return false;
-
- VRM->addSpillSlotUse(SS, FoldedMI);
- VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- // Insert new def MI and spill MI.
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC, TRI);
- MII = prior(MII);
- MachineInstr *StoreMI = MII;
- VRM->addSpillSlotUse(SS, StoreMI);
- VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- MII = FoldedMI; // Update MII to backtrack.
-
- // Delete all 3 old instructions.
- InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
- EraseInstr(ReloadMI);
- InvalidateKills(*DefMI, TRI, RegKills, KillOps);
- EraseInstr(DefMI);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
-
- // If NewReg was previously holding value of some SS, it's now clobbered.
- // This has to be done now because it's a physical register. When this
- // instruction is re-visited, it's ignored.
- Spills.ClobberPhysReg(NewReg);
-
- ++NumCommutes;
- return true;
- }
-
- return false;
-}
-
-/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
-/// the last store to the same slot is now dead. If so, remove the last store.
-void LocalRewriter::
-SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
- int Idx, unsigned PhysReg, int StackSlot,
- const TargetRegisterClass *RC,
- bool isAvailable, MachineInstr *&LastStore,
- AvailableSpills &Spills,
- SmallSet<MachineInstr*, 4> &ReMatDefs,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
-
- MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
- TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC,
- TRI);
- MachineInstr *StoreMI = prior(oldNextMII);
- VRM->addSpillSlotUse(StackSlot, StoreMI);
- DEBUG(dbgs() << "Store:\t" << *StoreMI);
-
- // If there is a dead store to this stack slot, nuke it now.
- if (LastStore) {
- DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
- ++NumDSE;
- SmallVector<unsigned, 2> KillRegs;
- InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
- MachineBasicBlock::iterator PrevMII = LastStore;
- bool CheckDef = PrevMII != MBB->begin();
- if (CheckDef)
- --PrevMII;
- EraseInstr(LastStore);
- if (CheckDef) {
- // Look at defs of killed registers on the store. Mark the defs
- // as dead since the store has been deleted and they aren't
- // being reused.
- for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
- bool HasOtherDef = false;
- if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
- MachineInstr *DeadDef = PrevMII;
- if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
- // FIXME: This assumes a remat def does not have side effects.
- EraseInstr(DeadDef);
- ++NumDRM;
- }
- }
- }
- }
- }
-
- // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
- // the last of multiple instructions is the actual store.
- LastStore = prior(oldNextMII);
-
- // If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register available,
- // in PhysReg.
- Spills.ModifyStackSlotOrReMat(StackSlot);
- Spills.ClobberPhysReg(PhysReg);
- Spills.addAvailable(StackSlot, PhysReg, isAvailable);
- ++NumStores;
-}
-
-/// isSafeToDelete - Return true if this instruction doesn't produce any side
-/// effect and all of its defs are dead.
-static bool isSafeToDelete(MachineInstr &MI) {
- const MCInstrDesc &MCID = MI.getDesc();
- if (MCID.mayLoad() || MCID.mayStore() || MCID.isTerminator() ||
- MCID.isCall() || MCID.isBarrier() || MCID.isReturn() ||
- MI.isLabel() || MI.isDebugValue() ||
- MI.hasUnmodeledSideEffects())
- return false;
-
- // Technically speaking inline asm without side effects and no defs can still
- // be deleted. But there is so much bad inline asm code out there, we should
- // let them be.
- if (MI.isInlineAsm())
- return false;
-
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.getReg())
- continue;
- if (MO.isDef() && !MO.isDead())
- return false;
- if (MO.isUse() && MO.isKill())
- // FIXME: We can't remove kill markers or else the scavenger will assert.
- // An alternative is to add a ADD pseudo instruction to replace kill
- // markers.
- return false;
- }
- return true;
-}
-
-/// TransferDeadness - A identity copy definition is dead and it's being
-/// removed. Find the last def or use and mark it as dead / kill.
-void LocalRewriter::
-TransferDeadness(unsigned Reg, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- SmallPtrSet<MachineInstr*, 4> Seens;
- SmallVector<std::pair<MachineInstr*, int>,8> Refs;
- for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
- RE = MRI->reg_end(); RI != RE; ++RI) {
- MachineInstr *UDMI = &*RI;
- if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
- continue;
- DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
- if (DI == DistanceMap.end())
- continue;
- if (Seens.insert(UDMI))
- Refs.push_back(std::make_pair(UDMI, DI->second));
- }
-
- if (Refs.empty())
- return;
- std::sort(Refs.begin(), Refs.end(), RefSorter());
-
- while (!Refs.empty()) {
- MachineInstr *LastUDMI = Refs.back().first;
- Refs.pop_back();
-
- MachineOperand *LastUD = NULL;
- for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = LastUDMI->getOperand(i);
- if (!MO.isReg() || MO.getReg() != Reg)
- continue;
- if (!LastUD || (LastUD->isUse() && MO.isDef()))
- LastUD = &MO;
- if (LastUDMI->isRegTiedToDefOperand(i))
- break;
- }
- if (LastUD->isDef()) {
- // If the instruction has no side effect, delete it and propagate
- // backward further. Otherwise, mark is dead and we are done.
- if (!isSafeToDelete(*LastUDMI)) {
- LastUD->setIsDead();
- break;
- }
- EraseInstr(LastUDMI);
- } else {
- LastUD->setIsKill();
- RegKills.set(Reg);
- KillOps[Reg] = LastUD;
- break;
- }
- }
-}
-
-/// InsertEmergencySpills - Insert emergency spills before MI if requested by
-/// VRM. Return true if spills were inserted.
-bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
- if (!VRM->hasEmergencySpills(MI))
- return false;
- MachineBasicBlock::iterator MII = MI;
- SmallSet<int, 4> UsedSS;
- std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
- for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
- unsigned PhysReg = EmSpills[i];
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
- assert(RC && "Unable to determine register class!");
- int SS = VRM->getEmergencySpillSlot(RC);
- if (UsedSS.count(SS))
- llvm_unreachable("Need to spill more than one physical registers!");
- UsedSS.insert(SS);
- TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC, TRI);
- MachineInstr *StoreMI = prior(MII);
- VRM->addSpillSlotUse(SS, StoreMI);
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
- TII, *MBB->getParent());
-
- TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC, TRI);
-
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM->addSpillSlotUse(SS, LoadMI);
- ++NumPSpills;
- DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
- }
- return true;
-}
-
-/// InsertRestores - Restore registers before MI is requested by VRM. Return
-/// true is any instructions were inserted.
-bool LocalRewriter::InsertRestores(MachineInstr *MI,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- if (!VRM->isRestorePt(MI))
- return false;
- MachineBasicBlock::iterator MII = MI;
- std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
- for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
- unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
- if (!VRM->getPreSplitReg(VirtReg))
- continue; // Split interval spilled again.
- unsigned Phys = VRM->getPhys(VirtReg);
- MRI->setPhysRegUsed(Phys);
-
- // Check if the value being restored if available. If so, it must be
- // from a predecessor BB that fallthrough into this BB. We do not
- // expect:
- // BB1:
- // r1 = load fi#1
- // ...
- // = r1<kill>
- // ... # r1 not clobbered
- // ...
- // = load fi#1
- bool DoReMat = VRM->isReMaterialized(VirtReg);
- int SSorRMId = DoReMat
- ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
- unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
- if (InReg == Phys) {
- // If the value is already available in the expected register, save
- // a reload / remat.
- if (SSorRMId)
- DEBUG(dbgs() << "Reusing RM#"
- << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(InReg) << " for " << PrintReg(VirtReg)
- <<" instead of reloading into physreg "
- << TRI->getName(Phys) << '\n');
-
- // Reusing a physreg may resurrect it. But we expect ProcessUses to update
- // the kill flags for the current instruction after processing it.
-
- ++NumOmitted;
- continue;
- } else if (InReg && InReg != Phys) {
- if (SSorRMId)
- DEBUG(dbgs() << "Reusing RM#"
- << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(InReg) << " for " << PrintReg(VirtReg)
- <<" by copying it into physreg "
- << TRI->getName(Phys) << '\n');
-
- // If the reloaded / remat value is available in another register,
- // copy it to the desired register.
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
- *MBB->getParent());
- MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
- TII->get(TargetOpcode::COPY), Phys)
- .addReg(InReg, RegState::Kill);
-
- // This invalidates Phys.
- Spills.ClobberPhysReg(Phys);
- // Remember it's available.
- Spills.addAvailable(SSorRMId, Phys);
-
- CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- UpdateKills(*CopyMI, TRI, RegKills, KillOps);
-
- DEBUG(dbgs() << '\t' << *CopyMI);
- ++NumCopified;
- continue;
- }
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
- *MBB->getParent());
-
- if (VRM->isReMaterialized(VirtReg)) {
- ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
- } else {
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC, TRI);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM->addSpillSlotUse(SSorRMId, LoadMI);
- ++NumLoads;
- DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
- }
-
- // This invalidates Phys.
- Spills.ClobberPhysReg(Phys);
- // Remember it's available.
- Spills.addAvailable(SSorRMId, Phys);
-
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(MII));
- }
- return true;
-}
-
-/// InsertSpills - Insert spills after MI if requested by VRM. Return
-/// true if spills were inserted.
-bool LocalRewriter::InsertSpills(MachineInstr *MI) {
- if (!VRM->isSpillPt(MI))
- return false;
- MachineBasicBlock::iterator MII = MI;
- std::vector<std::pair<unsigned,bool> > &SpillRegs =
- VRM->getSpillPtSpills(MI);
- for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
- unsigned VirtReg = SpillRegs[i].first;
- bool isKill = SpillRegs[i].second;
- if (!VRM->getPreSplitReg(VirtReg))
- continue; // Split interval spilled again.
- const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
- unsigned Phys = VRM->getPhys(VirtReg);
- int StackSlot = VRM->getStackSlot(VirtReg);
- MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
- TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
- RC, TRI);
- MachineInstr *StoreMI = prior(oldNextMII);
- VRM->addSpillSlotUse(StackSlot, StoreMI);
- DEBUG(dbgs() << "Store:\t" << *StoreMI);
- VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- }
- return true;
-}
-
-
-/// ProcessUses - Process all of MI's spilled operands and all available
-/// operands.
-void LocalRewriter::ProcessUses(MachineInstr &MI, AvailableSpills &Spills,
- std::vector<MachineInstr*> &MaybeDeadStores,
- BitVector &RegKills,
- ReuseInfo &ReusedOperands,
- std::vector<MachineOperand*> &KillOps) {
- // Clear kill info.
- SmallSet<unsigned, 2> KilledMIRegs;
- SmallVector<unsigned, 4> VirtUseOps;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue; // Ignore non-register operands.
-
- unsigned VirtReg = MO.getReg();
-
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
- // Ignore physregs for spilling, but remember that it is used by this
- // function.
- MRI->setPhysRegUsed(VirtReg);
- continue;
- }
-
- // We want to process implicit virtual register uses first.
- if (MO.isImplicit())
- // If the virtual register is implicitly defined, emit a implicit_def
- // before so scavenger knows it's "defined".
- // FIXME: This is a horrible hack done the by register allocator to
- // remat a definition with virtual register operand.
- VirtUseOps.insert(VirtUseOps.begin(), i);
- else
- VirtUseOps.push_back(i);
-
- // A partial def causes problems because the same operand both reads and
- // writes the register. This rewriter is designed to rewrite uses and defs
- // separately, so a partial def would already have been rewritten to a
- // physreg by the time we get to processing defs.
- // Add an implicit use operand to model the partial def.
- if (MO.isDef() && MO.getSubReg() && MI.readsVirtualRegister(VirtReg) &&
- MI.findRegisterUseOperandIdx(VirtReg) == -1) {
- VirtUseOps.insert(VirtUseOps.begin(), MI.getNumOperands());
- MI.addOperand(MachineOperand::CreateReg(VirtReg,
- false, // isDef
- true)); // isImplicit
- DEBUG(dbgs() << "Partial redef: " << MI);
- }
- }
-
- // Process all of the spilled uses and all non spilled reg references.
- SmallVector<int, 2> PotentialDeadStoreSlots;
- KilledMIRegs.clear();
- for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
- unsigned i = VirtUseOps[j];
- unsigned VirtReg = MI.getOperand(i).getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "Not a virtual register?");
-
- unsigned SubIdx = MI.getOperand(i).getSubReg();
- if (VRM->isAssignedReg(VirtReg)) {
- // This virtual register was assigned a physreg!
- unsigned Phys = VRM->getPhys(VirtReg);
- MRI->setPhysRegUsed(Phys);
- if (MI.getOperand(i).isDef())
- ReusedOperands.markClobbered(Phys);
- substitutePhysReg(MI.getOperand(i), Phys, *TRI);
- if (VRM->isImplicitlyDefined(VirtReg))
- // FIXME: Is this needed?
- BuildMI(*MBB, &MI, MI.getDebugLoc(),
- TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
- continue;
- }
-
- // This virtual register is now known to be a spilled value.
- if (!MI.getOperand(i).isUse())
- continue; // Handle defs in the loop below (handle use&def here though)
-
- bool AvoidReload = MI.getOperand(i).isUndef();
- // Check if it is defined by an implicit def. It should not be spilled.
- // Note, this is for correctness reason. e.g.
- // 8 %reg1024<def> = IMPLICIT_DEF
- // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
- // The live range [12, 14) are not part of the r1024 live interval since
- // it's defined by an implicit def. It will not conflicts with live
- // interval of r1025. Now suppose both registers are spilled, you can
- // easily see a situation where both registers are reloaded before
- // the INSERT_SUBREG and both target registers that would overlap.
- bool DoReMat = VRM->isReMaterialized(VirtReg);
- int SSorRMId = DoReMat
- ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
- int ReuseSlot = SSorRMId;
-
- // Check to see if this stack slot is available.
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
-
- // If this is a sub-register use, make sure the reuse register is in the
- // right register class. For example, for x86 not all of the 32-bit
- // registers have accessible sub-registers.
- // Similarly so for EXTRACT_SUBREG. Consider this:
- // EDI = op
- // MOV32_mr fi#1, EDI
- // ...
- // = EXTRACT_SUBREG fi#1
- // fi#1 is available in EDI, but it cannot be reused because it's not in
- // the right register file.
- if (PhysReg && !AvoidReload && SubIdx) {
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- if (!RC->contains(PhysReg))
- PhysReg = 0;
- }
-
- if (PhysReg && !AvoidReload) {
- // This spilled operand might be part of a two-address operand. If this
- // is the case, then changing it will necessarily require changing the
- // def part of the instruction as well. However, in some cases, we
- // aren't allowed to modify the reused register. If none of these cases
- // apply, reuse it.
- bool CanReuse = true;
- bool isTied = MI.isRegTiedToDefOperand(i);
- if (isTied) {
- // Okay, we have a two address operand. We can reuse this physreg as
- // long as we are allowed to clobber the value and there isn't an
- // earlier def that has already clobbered the physreg.
- CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
- Spills.canClobberPhysReg(PhysReg);
- }
- // If this is an asm, and a PhysReg alias is used elsewhere as an
- // earlyclobber operand, we can't also use it as an input.
- if (MI.isInlineAsm()) {
- for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
- MachineOperand &MOk = MI.getOperand(k);
- if (MOk.isReg() && MOk.isEarlyClobber() &&
- TRI->regsOverlap(MOk.getReg(), PhysReg)) {
- CanReuse = false;
- DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
- << " for " << PrintReg(VirtReg) << ": " << MOk
- << '\n');
- break;
- }
- }
- }
-
- if (CanReuse) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(PhysReg) << " for " << PrintReg(VirtReg)
- << " instead of reloading into "
- << PrintReg(VRM->getPhys(VirtReg), TRI) << '\n');
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
-
- // Reusing a physreg may resurrect it. But we expect ProcessUses to
- // update the kill flags for the current instr after processing it.
-
- // The only technical detail we have is that we don't know that
- // PhysReg won't be clobbered by a reloaded stack slot that occurs
- // later in the instruction. In particular, consider 'op V1, V2'.
- // If V1 is available in physreg R0, we would choose to reuse it
- // here, instead of reloading it into the register the allocator
- // indicated (say R1). However, V2 might have to be reloaded
- // later, and it might indicate that it needs to live in R0. When
- // this occurs, we need to have information available that
- // indicates it is safe to use R1 for the reload instead of R0.
- //
- // To further complicate matters, we might conflict with an alias,
- // or R0 and R1 might not be compatible with each other. In this
- // case, we actually insert a reload for V1 in R1, ensuring that
- // we can get at R0 or its alias.
- ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
- VRM->getPhys(VirtReg), VirtReg);
- if (isTied)
- // Only mark it clobbered if this is a use&def operand.
- ReusedOperands.markClobbered(PhysReg);
- ++NumReused;
-
- if (MI.getOperand(i).isKill() &&
- ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
-
- // The store of this spilled value is potentially dead, but we
- // won't know for certain until we've confirmed that the re-use
- // above is valid, which means waiting until the other operands
- // are processed. For now we just track the spill slot, we'll
- // remove it after the other operands are processed if valid.
-
- PotentialDeadStoreSlots.push_back(ReuseSlot);
- }
-
- // Mark is isKill if it's there no other uses of the same virtual
- // register and it's not a two-address operand. IsKill will be
- // unset if reg is reused.
- if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
- }
- continue;
- } // CanReuse
-
- // Otherwise we have a situation where we have a two-address instruction
- // whose mod/ref operand needs to be reloaded. This reload is already
- // available in some register "PhysReg", but if we used PhysReg as the
- // operand to our 2-addr instruction, the instruction would modify
- // PhysReg. This isn't cool if something later uses PhysReg and expects
- // to get its initial value.
- //
- // To avoid this problem, and to avoid doing a load right after a store,
- // we emit a copy from PhysReg into the designated register for this
- // operand.
- //
- // This case also applies to an earlyclobber'd PhysReg.
- unsigned DesignatedReg = VRM->getPhys(VirtReg);
- assert(DesignatedReg && "Must map virtreg to physreg!");
-
- // Note that, if we reused a register for a previous operand, the
- // register we want to reload into might not actually be
- // available. If this occurs, use the register indicated by the
- // reuser.
- if (ReusedOperands.hasReuses())
- DesignatedReg = ReusedOperands.
- GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
- MaybeDeadStores, RegKills, KillOps, *VRM);
-
- // If the mapped designated register is actually the physreg we have
- // incoming, we don't need to inserted a dead copy.
- if (DesignatedReg == PhysReg) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
- << " for " << PrintReg(VirtReg)
- << " instead of reloading into same physreg.\n");
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- ReusedOperands.markClobbered(RReg);
- ++NumReused;
- continue;
- }
-
- MRI->setPhysRegUsed(DesignatedReg);
- ReusedOperands.markClobbered(DesignatedReg);
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, *MBB->getParent());
- MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
- TII->get(TargetOpcode::COPY),
- DesignatedReg).addReg(PhysReg);
- CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- UpdateKills(*CopyMI, TRI, RegKills, KillOps);
-
- // This invalidates DesignatedReg.
- Spills.ClobberPhysReg(DesignatedReg);
-
- Spills.addAvailable(ReuseSlot, DesignatedReg);
- unsigned RReg =
- SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- DEBUG(dbgs() << '\t' << *prior(InsertLoc));
- ++NumReused;
- continue;
- } // if (PhysReg)
-
- // Otherwise, reload it and remember that we have it.
- PhysReg = VRM->getPhys(VirtReg);
- assert(PhysReg && "Must map virtreg to physreg!");
-
- // Note that, if we reused a register for a previous operand, the
- // register we want to reload into might not actually be
- // available. If this occurs, use the register indicated by the
- // reuser.
- if (ReusedOperands.hasReuses())
- PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
-
- MRI->setPhysRegUsed(PhysReg);
- ReusedOperands.markClobbered(PhysReg);
- if (AvoidReload)
- ++NumAvoided;
- else {
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, *MBB->getParent());
-
- if (DoReMat) {
- ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
- } else {
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM->addSpillSlotUse(SSorRMId, LoadMI);
- ++NumLoads;
- DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
- }
- // This invalidates PhysReg.
- Spills.ClobberPhysReg(PhysReg);
-
- // Any stores to this stack slot are not dead anymore.
- if (!DoReMat)
- MaybeDeadStores[SSorRMId] = NULL;
- Spills.addAvailable(SSorRMId, PhysReg);
- // Assumes this is the last use. IsKill will be unset if reg is reused
- // unless it's a two-address operand.
- if (!MI.isRegTiedToDefOperand(i) &&
- KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
- }
-
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(InsertLoc));
- }
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- }
-
- // Ok - now we can remove stores that have been confirmed dead.
- for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
- // This was the last use and the spilled value is still available
- // for reuse. That means the spill was unnecessary!
- int PDSSlot = PotentialDeadStoreSlots[j];
- MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
- if (DeadStore) {
- DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
- InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- EraseInstr(DeadStore);
- MaybeDeadStores[PDSSlot] = NULL;
- ++NumDSE;
- }
- }
-}
-
-/// rewriteMBB - Keep track of which spills are available even after the
-/// register allocator is done with them. If possible, avoid reloading vregs.
-void
-LocalRewriter::RewriteMBB(LiveIntervals *LIs,
- AvailableSpills &Spills, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
-
- DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
- << MBB->getName() << "':\n");
-
- MachineFunction &MF = *MBB->getParent();
-
- // MaybeDeadStores - When we need to write a value back into a stack slot,
- // keep track of the inserted store. If the stack slot value is never read
- // (because the value was used from some available register, for example), and
- // subsequently stored to, the original store is dead. This map keeps track
- // of inserted stores that are not used. If we see a subsequent store to the
- // same stack slot, the original store is deleted.
- std::vector<MachineInstr*> MaybeDeadStores;
- MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
-
- // ReMatDefs - These are rematerializable def MIs which are not deleted.
- SmallSet<MachineInstr*, 4> ReMatDefs;
-
- // Keep track of the registers we have already spilled in case there are
- // multiple defs of the same register in MI.
- SmallSet<unsigned, 8> SpilledMIRegs;
-
- RegKills.reset();
- KillOps.clear();
- KillOps.resize(TRI->getNumRegs(), NULL);
-
- DistanceMap.clear();
- for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
- MII != E; ) {
- MachineBasicBlock::iterator NextMII = llvm::next(MII);
-
- if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
- NextMII = llvm::next(MII);
-
- if (InsertEmergencySpills(MII))
- NextMII = llvm::next(MII);
-
- InsertRestores(MII, Spills, RegKills, KillOps);
-
- if (InsertSpills(MII))
- NextMII = llvm::next(MII);
-
- bool Erased = false;
- bool BackTracked = false;
- MachineInstr &MI = *MII;
-
- // Remember DbgValue's which reference stack slots.
- if (MI.isDebugValue() && MI.getOperand(0).isFI())
- Slot2DbgValues[MI.getOperand(0).getIndex()].push_back(&MI);
-
- /// ReusedOperands - Keep track of operand reuse in case we need to undo
- /// reuse.
- ReuseInfo ReusedOperands(MI, TRI);
-
- ProcessUses(MI, Spills, MaybeDeadStores, RegKills, ReusedOperands, KillOps);
-
- DEBUG(dbgs() << '\t' << MI);
-
-
- // If we have folded references to memory operands, make sure we clear all
- // physical registers that may contain the value of the spilled virtual
- // register
-
- // Copy the folded virts to a small vector, we may change MI2VirtMap.
- SmallVector<std::pair<unsigned, VirtRegMap::ModRef>, 4> FoldedVirts;
- // C++0x FTW!
- for (std::pair<VirtRegMap::MI2VirtMapTy::const_iterator,
- VirtRegMap::MI2VirtMapTy::const_iterator> FVRange =
- VRM->getFoldedVirts(&MI);
- FVRange.first != FVRange.second; ++FVRange.first)
- FoldedVirts.push_back(FVRange.first->second);
-
- SmallSet<int, 2> FoldedSS;
- for (unsigned FVI = 0, FVE = FoldedVirts.size(); FVI != FVE; ++FVI) {
- unsigned VirtReg = FoldedVirts[FVI].first;
- VirtRegMap::ModRef MR = FoldedVirts[FVI].second;
- DEBUG(dbgs() << "Folded " << PrintReg(VirtReg) << " MR: " << MR);
-
- int SS = VRM->getStackSlot(VirtReg);
- if (SS == VirtRegMap::NO_STACK_SLOT)
- continue;
- FoldedSS.insert(SS);
- DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
-
- // If this folded instruction is just a use, check to see if it's a
- // straight load from the virt reg slot.
- if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
- int FrameIdx;
- unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
- if (DestReg && FrameIdx == SS) {
- // If this spill slot is available, turn it into a copy (or nothing)
- // instead of leaving it as a load!
- if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
- DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
- if (DestReg != InReg) {
- MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
- MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
- TII->get(TargetOpcode::COPY))
- .addReg(DestReg, RegState::Define, DefMO->getSubReg())
- .addReg(InReg, RegState::Kill);
- // Revisit the copy so we make sure to notice the effects of the
- // operation on the destreg (either needing to RA it if it's
- // virtual or needing to clobber any values if it's physical).
- NextMII = CopyMI;
- NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- BackTracked = true;
- } else {
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- // InvalidateKills resurrects any prior kill of the copy's source
- // allowing the source reg to be reused in place of the copy.
- Spills.disallowClobberPhysReg(InReg);
- }
-
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- Erased = true;
- goto ProcessNextInst;
- }
- } else {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- SmallVector<MachineInstr*, 4> NewMIs;
- if (PhysReg &&
- TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
- MBB->insert(MII, NewMIs[0]);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- Erased = true;
- --NextMII; // backtrack to the unfolded instruction.
- BackTracked = true;
- goto ProcessNextInst;
- }
- }
- }
-
- // If this reference is not a use, any previous store is now dead.
- // Otherwise, the store to this stack slot is not dead anymore.
- MachineInstr* DeadStore = MaybeDeadStores[SS];
- if (DeadStore) {
- bool isDead = !(MR & VirtRegMap::isRef);
- MachineInstr *NewStore = NULL;
- if (MR & VirtRegMap::isModRef) {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- SmallVector<MachineInstr*, 4> NewMIs;
- // We can reuse this physreg as long as we are allowed to clobber
- // the value and there isn't an earlier def that has already clobbered
- // the physreg.
- if (PhysReg &&
- !ReusedOperands.isClobbered(PhysReg) &&
- Spills.canClobberPhysReg(PhysReg) &&
- !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
- MachineOperand *KillOpnd =
- DeadStore->findRegisterUseOperand(PhysReg, true);
- // Note, if the store is storing a sub-register, it's possible the
- // super-register is needed below.
- if (KillOpnd && !KillOpnd->getSubReg() &&
- TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
- MBB->insert(MII, NewMIs[0]);
- NewStore = NewMIs[1];
- MBB->insert(MII, NewStore);
- VRM->addSpillSlotUse(SS, NewStore);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- Erased = true;
- --NextMII;
- --NextMII; // backtrack to the unfolded instruction.
- BackTracked = true;
- isDead = true;
- ++NumSUnfold;
- }
- }
- }
-
- if (isDead) { // Previous store is dead.
- // If we get here, the store is dead, nuke it now.
- DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
- InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- EraseInstr(DeadStore);
- if (!NewStore)
- ++NumDSE;
- }
-
- MaybeDeadStores[SS] = NULL;
- if (NewStore) {
- // Treat this store as a spill merged into a copy. That makes the
- // stack slot value available.
- VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
- goto ProcessNextInst;
- }
- }
-
- // If the spill slot value is available, and this is a new definition of
- // the value, the value is not available anymore.
- if (MR & VirtRegMap::isMod) {
- // Notice that the value in this stack slot has been modified.
- Spills.ModifyStackSlotOrReMat(SS);
-
- // If this is *just* a mod of the value, check to see if this is just a
- // store to the spill slot (i.e. the spill got merged into the copy). If
- // so, realize that the vreg is available now, and add the store to the
- // MaybeDeadStore info.
- int StackSlot;
- if (!(MR & VirtRegMap::isRef)) {
- if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
- assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- "Src hasn't been allocated yet?");
-
- if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
- Spills, RegKills, KillOps, TRI)) {
- NextMII = llvm::next(MII);
- BackTracked = true;
- goto ProcessNextInst;
- }
-
- // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
- // this as a potentially dead store in case there is a subsequent
- // store into the stack slot without a read from it.
- MaybeDeadStores[StackSlot] = &MI;
-
- // If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register
- // available in PhysReg.
- Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
- }
- }
- }
- }
-
- // Process all of the spilled defs.
- SpilledMIRegs.clear();
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!(MO.isReg() && MO.getReg() && MO.isDef()))
- continue;
-
- unsigned VirtReg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- // Check to see if this is a noop copy. If so, eliminate the
- // instruction before considering the dest reg to be changed.
- // Also check if it's copying from an "undef", if so, we can't
- // eliminate this or else the undef marker is lost and it will
- // confuses the scavenger. This is extremely rare.
- if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
- MI.getNumOperands() == 2) {
- ++NumDCE;
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- SmallVector<unsigned, 2> KillRegs;
- InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
- if (MO.isDead() && !KillRegs.empty()) {
- // Source register or an implicit super/sub-register use is killed.
- assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
- // Last def is now dead.
- TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
- }
- EraseInstr(&MI);
- Erased = true;
- Spills.disallowClobberPhysReg(VirtReg);
- goto ProcessNextInst;
- }
-
- // If it's not a no-op copy, it clobbers the value in the destreg.
- Spills.ClobberPhysReg(VirtReg);
- ReusedOperands.markClobbered(VirtReg);
-
- // Check to see if this instruction is a load from a stack slot into
- // a register. If so, this provides the stack slot value in the reg.
- int FrameIdx;
- if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
- assert(DestReg == VirtReg && "Unknown load situation!");
-
- // If it is a folded reference, then it's not safe to clobber.
- bool Folded = FoldedSS.count(FrameIdx);
- // Otherwise, if it wasn't available, remember that it is now!
- Spills.addAvailable(FrameIdx, DestReg, !Folded);
- goto ProcessNextInst;
- }
-
- continue;
- }
-
- unsigned SubIdx = MO.getSubReg();
- bool DoReMat = VRM->isReMaterialized(VirtReg);
- if (DoReMat)
- ReMatDefs.insert(&MI);
-
- // The only vregs left are stack slot definitions.
- int StackSlot = VRM->getStackSlot(VirtReg);
- const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
-
- // If this def is part of a two-address operand, make sure to execute
- // the store from the correct physical register.
- unsigned PhysReg;
- unsigned TiedOp;
- if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
- PhysReg = MI.getOperand(TiedOp).getReg();
- if (SubIdx) {
- unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
- assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
- "Can't find corresponding super-register!");
- PhysReg = SuperReg;
- }
- } else {
- PhysReg = VRM->getPhys(VirtReg);
- if (ReusedOperands.isClobbered(PhysReg)) {
- // Another def has taken the assigned physreg. It must have been a
- // use&def which got it due to reuse. Undo the reuse!
- PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
- }
- }
-
- // If StackSlot is available in a register that also holds other stack
- // slots, clobber those stack slots now.
- Spills.ClobberSharingStackSlots(StackSlot);
-
- assert(PhysReg && "VR not assigned a physical register?");
- MRI->setPhysRegUsed(PhysReg);
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- ReusedOperands.markClobbered(RReg);
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
-
- if (!MO.isDead() && SpilledMIRegs.insert(VirtReg)) {
- MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
- SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
- LastStore, Spills, ReMatDefs, RegKills, KillOps);
- NextMII = llvm::next(MII);
-
- // Check to see if this is a noop copy. If so, eliminate the
- // instruction before considering the dest reg to be changed.
- if (MI.isIdentityCopy()) {
- ++NumDCE;
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- Erased = true;
- UpdateKills(*LastStore, TRI, RegKills, KillOps);
- goto ProcessNextInst;
- }
- }
- }
- ProcessNextInst:
- // Delete dead instructions without side effects.
- if (!Erased && !BackTracked && isSafeToDelete(MI)) {
- InvalidateKills(MI, TRI, RegKills, KillOps);
- EraseInstr(&MI);
- Erased = true;
- }
- if (!Erased)
- DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
- if (!Erased && !BackTracked) {
- for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
- UpdateKills(*II, TRI, RegKills, KillOps);
- }
- MII = NextMII;
- }
-
-}
-
-llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
- switch (RewriterOpt) {
- default: llvm_unreachable("Unreachable!");
- case local:
- return new LocalRewriter();
- case trivial:
- return new TrivialRewriter();
- }
-}
diff --git a/contrib/llvm/lib/CodeGen/VirtRegRewriter.h b/contrib/llvm/lib/CodeGen/VirtRegRewriter.h
deleted file mode 100644
index 93474e0..0000000
--- a/contrib/llvm/lib/CodeGen/VirtRegRewriter.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- llvm/CodeGen/VirtRegRewriter.h - VirtRegRewriter -*- C++ -*--------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_VIRTREGREWRITER_H
-#define LLVM_CODEGEN_VIRTREGREWRITER_H
-
-namespace llvm {
- class LiveIntervals;
- class MachineFunction;
- class VirtRegMap;
-
- /// VirtRegRewriter interface: Implementations of this interface assign
- /// spilled virtual registers to stack slots, rewriting the code.
- struct VirtRegRewriter {
- virtual ~VirtRegRewriter();
- virtual bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
- LiveIntervals* LIs) = 0;
- };
-
- /// createVirtRegRewriter - Create an return a rewriter object, as specified
- /// on the command line.
- VirtRegRewriter* createVirtRegRewriter();
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
index e1ac398..dccadc4 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.cpp
@@ -165,3 +165,5 @@ DILineInfo DWARFContext::getLineInfoForAddress(uint64_t address) {
return DILineInfo(fileName.c_str(), row.Line, row.Column);
}
+
+void DWARFContextInMemory::anchor() { }
diff --git a/contrib/llvm/lib/DebugInfo/DWARFContext.h b/contrib/llvm/lib/DebugInfo/DWARFContext.h
index 746a463..d2e763a 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFContext.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFContext.h
@@ -86,6 +86,7 @@ public:
/// DWARFContext. It assumes all content is available in memory and stores
/// pointers to it.
class DWARFContextInMemory : public DWARFContext {
+ virtual void anchor();
StringRef InfoSection;
StringRef AbbrevSection;
StringRef ARangeSection;
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
index a11ae3f..6e6c37e 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.cpp
@@ -83,7 +83,7 @@ void DWARFDebugAbbrev::dump(raw_ostream &OS) const {
DWARFAbbreviationDeclarationCollMapConstIter pos;
for (pos = AbbrevCollMap.begin(); pos != AbbrevCollMap.end(); ++pos) {
- OS << format("Abbrev table for offset: 0x%8.8x\n", pos->first);
+ OS << format("Abbrev table for offset: 0x%8.8" PRIx64 "\n", pos->first);
pos->second.dump(OS);
}
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
index 03189b1..c7c0436 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAbbrev.h
@@ -25,21 +25,21 @@ typedef DWARFAbbreviationDeclarationColl::const_iterator
DWARFAbbreviationDeclarationCollConstIter;
class DWARFAbbreviationDeclarationSet {
- uint64_t Offset;
+ uint32_t Offset;
uint32_t IdxOffset;
std::vector<DWARFAbbreviationDeclaration> Decls;
public:
DWARFAbbreviationDeclarationSet()
: Offset(0), IdxOffset(0) {}
- DWARFAbbreviationDeclarationSet(uint64_t offset, uint32_t idxOffset)
+ DWARFAbbreviationDeclarationSet(uint32_t offset, uint32_t idxOffset)
: Offset(offset), IdxOffset(idxOffset) {}
void clear() {
IdxOffset = 0;
Decls.clear();
}
- uint64_t getOffset() const { return Offset; }
+ uint32_t getOffset() const { return Offset; }
void dump(raw_ostream &OS) const;
bool extract(DataExtractor data, uint32_t* offset_ptr);
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
index b0c0354..2efbfd1 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugArangeSet.cpp
@@ -122,8 +122,9 @@ void DWARFDebugArangeSet::dump(raw_ostream &OS) const {
const uint32_t hex_width = Header.AddrSize * 2;
for (DescriptorConstIter pos = ArangeDescriptors.begin(),
end = ArangeDescriptors.end(); pos != end; ++pos)
- OS << format("[0x%*.*llx -", hex_width, hex_width, pos->Address)
- << format(" 0x%*.*llx)\n", hex_width, hex_width, pos->getEndAddress());
+ OS << format("[0x%*.*" PRIx64 " -", hex_width, hex_width, pos->Address)
+ << format(" 0x%*.*" PRIx64 ")\n",
+ hex_width, hex_width, pos->getEndAddress());
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
index 576d37d..1788145 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugAranges.cpp
@@ -100,13 +100,14 @@ void DWARFDebugAranges::dump(raw_ostream &OS) const {
const uint32_t num_ranges = getNumRanges();
for (uint32_t i = 0; i < num_ranges; ++i) {
const Range &range = Aranges[i];
- OS << format("0x%8.8x: [0x%8.8llx - 0x%8.8llx)\n", range.Offset,
- (uint64_t)range.LoPC, (uint64_t)range.HiPC());
+ OS << format("0x%8.8x: [0x%8.8" PRIx64 " - 0x%8.8" PRIx64 ")\n",
+ range.Offset, (uint64_t)range.LoPC, (uint64_t)range.HiPC());
}
}
void DWARFDebugAranges::Range::dump(raw_ostream &OS) const {
- OS << format("{0x%8.8x}: [0x%8.8llx - 0x%8.8llx)\n", Offset, LoPC, HiPC());
+ OS << format("{0x%8.8x}: [0x%8.8" PRIx64 " - 0x%8.8" PRIx64 ")\n",
+ Offset, LoPC, HiPC());
}
void DWARFDebugAranges::appendRange(uint32_t offset, uint64_t low_pc,
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
index 1b089ad..236db97 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.cpp
@@ -26,7 +26,7 @@ void DWARFDebugInfoEntryMinimal::dump(raw_ostream &OS,
uint32_t offset = Offset;
if (debug_info_data.isValidOffset(offset)) {
- uint64_t abbrCode = debug_info_data.getULEB128(&offset);
+ uint32_t abbrCode = debug_info_data.getULEB128(&offset);
OS << format("\n0x%8.8x: ", Offset);
if (abbrCode) {
@@ -203,8 +203,6 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu,
AbbrevDecl = NULL;
return true; // NULL debug tag entry
}
-
- return false;
}
bool
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
index aff2e85..37b3bcd 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugInfoEntry.h
@@ -23,7 +23,7 @@ class DWARFFormValue;
/// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data.
class DWARFDebugInfoEntryMinimal {
/// Offset within the .debug_info of the start of this entry.
- uint64_t Offset;
+ uint32_t Offset;
/// How many to subtract from "this" to get the parent.
/// If zero this die has no parent.
@@ -52,7 +52,7 @@ public:
uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
bool isNULL() const { return AbbrevDecl == 0; }
- uint64_t getOffset() const { return Offset; }
+ uint32_t getOffset() const { return Offset; }
uint32_t getNumAttributes() const {
return !isNULL() ? AbbrevDecl->getNumAttributes() : 0;
}
diff --git a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
index fe1ef78..117fa31 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFDebugLine.cpp
@@ -41,8 +41,9 @@ void DWARFDebugLine::Prologue::dump(raw_ostream &OS) const {
"----------------\n";
for (uint32_t i = 0; i < FileNames.size(); ++i) {
const FileNameEntry& fileEntry = FileNames[i];
- OS << format("file_names[%3u] %4u ", i+1, fileEntry.DirIdx)
- << format("0x%8.8x 0x%8.8x ", fileEntry.ModTime, fileEntry.Length)
+ OS << format("file_names[%3u] %4" PRIu64 " ", i+1, fileEntry.DirIdx)
+ << format("0x%8.8" PRIx64 " 0x%8.8" PRIx64 " ",
+ fileEntry.ModTime, fileEntry.Length)
<< fileEntry.Name << '\n';
}
}
@@ -68,7 +69,7 @@ void DWARFDebugLine::Row::reset(bool default_is_stmt) {
}
void DWARFDebugLine::Row::dump(raw_ostream &OS) const {
- OS << format("0x%16.16llx %6u %6u", Address, Line, Column)
+ OS << format("0x%16.16" PRIx64 " %6u %6u", Address, Line, Column)
<< format(" %6u %3u ", File, Isa)
<< (IsStmt ? " is_stmt" : "")
<< (BasicBlock ? " basic_block" : "")
diff --git a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
index 705efe5..ee2a3ab 100644
--- a/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
+++ b/contrib/llvm/lib/DebugInfo/DWARFFormValue.cpp
@@ -263,12 +263,12 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
bool cu_relative_offset = false;
switch (Form) {
- case DW_FORM_addr: OS << format("0x%016x", uvalue); break;
+ case DW_FORM_addr: OS << format("0x%016" PRIx64, uvalue); break;
case DW_FORM_flag:
- case DW_FORM_data1: OS << format("0x%02x", uvalue); break;
- case DW_FORM_data2: OS << format("0x%04x", uvalue); break;
- case DW_FORM_data4: OS << format("0x%08x", uvalue); break;
- case DW_FORM_data8: OS << format("0x%016x", uvalue); break;
+ case DW_FORM_data1: OS << format("0x%02x", (uint8_t)uvalue); break;
+ case DW_FORM_data2: OS << format("0x%04x", (uint16_t)uvalue); break;
+ case DW_FORM_data4: OS << format("0x%08x", (uint32_t)uvalue); break;
+ case DW_FORM_data8: OS << format("0x%016" PRIx64, uvalue); break;
case DW_FORM_string:
OS << '"';
OS.write_escaped(getAsCString(NULL));
@@ -280,7 +280,7 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
case DW_FORM_block4:
if (uvalue > 0) {
switch (Form) {
- case DW_FORM_block: OS << format("<0x%llx> ", uvalue); break;
+ case DW_FORM_block: OS << format("<0x%" PRIx64 "> ", uvalue); break;
case DW_FORM_block1: OS << format("<0x%2.2x> ", (uint8_t)uvalue); break;
case DW_FORM_block2: OS << format("<0x%4.4x> ", (uint16_t)uvalue); break;
case DW_FORM_block4: OS << format("<0x%8.8x> ", (uint32_t)uvalue); break;
@@ -314,7 +314,7 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
break;
}
case DW_FORM_ref_addr:
- OS << format("0x%016x", uvalue);
+ OS << format("0x%016" PRIx64, uvalue);
break;
case DW_FORM_ref1:
cu_relative_offset = true;
@@ -330,11 +330,11 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
break;
case DW_FORM_ref8:
cu_relative_offset = true;
- OS << format("cu + 0x%8.8llx", uvalue);
+ OS << format("cu + 0x%8.8" PRIx64, uvalue);
break;
case DW_FORM_ref_udata:
cu_relative_offset = true;
- OS << format("cu + 0x%llx", uvalue);
+ OS << format("cu + 0x%" PRIx64, uvalue);
break;
// All DW_FORM_indirect attributes should be resolved prior to calling
@@ -348,7 +348,7 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const {
}
if (cu_relative_offset)
- OS << format(" => {0x%8.8x}", (uvalue + (cu ? cu->getOffset() : 0)));
+ OS << format(" => {0x%8.8" PRIx64 "}", uvalue + (cu ? cu->getOffset() : 0));
}
const char*
diff --git a/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
new file mode 100644
index 0000000..1c07c94
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/EventListenerCommon.h
@@ -0,0 +1,67 @@
+//===-- JIT.h - Abstract Execution Engine Interface -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common functionality for JITEventListener implementations
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef EVENT_LISTENER_COMMON_H
+#define EVENT_LISTENER_COMMON_H
+
+#include "llvm/Metadata.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Path.h"
+
+namespace llvm {
+
+namespace jitprofiling {
+
+class FilenameCache {
+ // Holds the filename of each Scope, so that we can pass a null-terminated
+ // string into oprofile. Use an AssertingVH rather than a ValueMap because we
+ // shouldn't be modifying any MDNodes while this map is alive.
+ DenseMap<AssertingVH<MDNode>, std::string> Filenames;
+ DenseMap<AssertingVH<MDNode>, std::string> Paths;
+
+ public:
+ const char *getFilename(MDNode *Scope) {
+ std::string &Filename = Filenames[Scope];
+ if (Filename.empty()) {
+ DIScope DIScope(Scope);
+ Filename = DIScope.getFilename();
+ }
+ return Filename.c_str();
+ }
+
+ const char *getFullPath(MDNode *Scope) {
+ std::string &P = Paths[Scope];
+ if (P.empty()) {
+ DIScope DIScope(Scope);
+ StringRef DirName = DIScope.getDirectory();
+ StringRef FileName = DIScope.getFilename();
+ SmallString<256> FullPath;
+ if (DirName != "." && DirName != "") {
+ FullPath = DirName;
+ }
+ if (FileName != "") {
+ sys::path::append(FullPath, FileName);
+ }
+ P = FullPath.str();
+ }
+ return P.c_str();
+ }
+};
+
+} // namespace jitprofiling
+
+} // namespace llvm
+
+#endif //EVENT_LISTENER_COMMON_H
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 525877b..a744d0c 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -28,6 +28,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include <cmath>
@@ -41,14 +42,12 @@ ExecutionEngine *(*ExecutionEngine::JITCtor)(
Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM) = 0;
ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM) = 0;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(Module *M,
@@ -308,13 +307,12 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module *module,
// Should be an array of '{ i32, void ()* }' structs. The first value is
// the init priority, which we ignore.
- if (isa<ConstantAggregateZero>(GV->getInitializer()))
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (InitList == 0)
return;
- ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
- if (isa<ConstantAggregateZero>(InitList->getOperand(i)))
- continue;
- ConstantStruct *CS = cast<ConstantStruct>(InitList->getOperand(i));
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ if (CS == 0) continue;
Constant *FP = CS->getOperand(1);
if (FP->isNullValue())
@@ -404,14 +402,15 @@ ExecutionEngine *ExecutionEngine::create(Module *M,
std::string *ErrorStr,
CodeGenOpt::Level OptLevel,
bool GVsWithCode) {
- return EngineBuilder(M)
+ EngineBuilder EB = EngineBuilder(M)
.setEngineKind(ForceInterpreter
? EngineKind::Interpreter
: EngineKind::JIT)
.setErrorStr(ErrorStr)
.setOptLevel(OptLevel)
- .setAllocateGVsWithCode(GVsWithCode)
- .create();
+ .setAllocateGVsWithCode(GVsWithCode);
+
+ return EB.create();
}
/// createJIT - This is the factory method for creating a JIT for the current
@@ -420,7 +419,7 @@ ExecutionEngine *ExecutionEngine::create(Module *M,
ExecutionEngine *ExecutionEngine::createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
+ CodeGenOpt::Level OL,
bool GVsWithCode,
Reloc::Model RM,
CodeModel::Model CMM) {
@@ -432,18 +431,25 @@ ExecutionEngine *ExecutionEngine::createJIT(Module *M,
// Use the defaults for extra parameters. Users can use EngineBuilder to
// set them.
- StringRef MArch = "";
- StringRef MCPU = "";
- SmallVector<std::string, 1> MAttrs;
-
- TargetMachine *TM =
- EngineBuilder::selectTarget(M, MArch, MCPU, MAttrs, RM, CMM, ErrorStr);
+ EngineBuilder EB(M);
+ EB.setEngineKind(EngineKind::JIT);
+ EB.setErrorStr(ErrorStr);
+ EB.setRelocationModel(RM);
+ EB.setCodeModel(CMM);
+ EB.setAllocateGVsWithCode(GVsWithCode);
+ EB.setOptLevel(OL);
+ EB.setJITMemoryManager(JMM);
+
+ // TODO: permit custom TargetOptions here
+ TargetMachine *TM = EB.selectTarget();
if (!TM || (ErrorStr && ErrorStr->length() > 0)) return 0;
- return ExecutionEngine::JITCtor(M, ErrorStr, JMM, OptLevel, GVsWithCode, TM);
+ return ExecutionEngine::JITCtor(M, ErrorStr, JMM, GVsWithCode, TM);
}
-ExecutionEngine *EngineBuilder::create() {
+ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
+ OwningPtr<TargetMachine> TheTM(TM); // Take ownership.
+
// Make sure we can resolve symbols in the program as well. The zero arg
// to the function tells DynamicLibrary to load the program, not a library.
if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
@@ -464,21 +470,24 @@ ExecutionEngine *EngineBuilder::create() {
// Unless the interpreter was explicitly selected or the JIT is not linked,
// try making a JIT.
- if (WhichEngine & EngineKind::JIT) {
- if (TargetMachine *TM = EngineBuilder::selectTarget(M, MArch, MCPU, MAttrs,
- RelocModel, CMModel,
- ErrorStr)) {
- if (UseMCJIT && ExecutionEngine::MCJITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::MCJITCtor(M, ErrorStr, JMM, OptLevel,
- AllocateGVsWithCode, TM);
- if (EE) return EE;
- } else if (ExecutionEngine::JITCtor) {
- ExecutionEngine *EE =
- ExecutionEngine::JITCtor(M, ErrorStr, JMM, OptLevel,
- AllocateGVsWithCode, TM);
- if (EE) return EE;
- }
+ if ((WhichEngine & EngineKind::JIT) && TheTM) {
+ Triple TT(M->getTargetTriple());
+ if (!TM->getTarget().hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host"
+ << " you are running. If bad things happen, please choose"
+ << " a different -march switch.\n";
+ }
+
+ if (UseMCJIT && ExecutionEngine::MCJITCtor) {
+ ExecutionEngine *EE =
+ ExecutionEngine::MCJITCtor(M, ErrorStr, JMM,
+ AllocateGVsWithCode, TheTM.take());
+ if (EE) return EE;
+ } else if (ExecutionEngine::JITCtor) {
+ ExecutionEngine *EE =
+ ExecutionEngine::JITCtor(M, ErrorStr, JMM,
+ AllocateGVsWithCode, TheTM.take());
+ if (EE) return EE;
}
}
@@ -944,30 +953,47 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
DEBUG(Init->dump());
- if (isa<UndefValue>(Init)) {
+ if (isa<UndefValue>(Init))
return;
- } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
+
+ if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize =
getTargetData()->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return;
- } else if (isa<ConstantAggregateZero>(Init)) {
+ }
+
+ if (isa<ConstantAggregateZero>(Init)) {
memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType()));
return;
- } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
+ }
+
+ if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
unsigned ElementSize =
getTargetData()->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return;
- } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
+ }
+
+ if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
const StructLayout *SL =
getTargetData()->getStructLayout(cast<StructType>(CPS->getType()));
for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
return;
- } else if (Init->getType()->isFirstClassType()) {
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Init)) {
+ // CDS is already laid out in host memory order.
+ StringRef Data = CDS->getRawDataValues();
+ memcpy(Addr, Data.data(), Data.size());
+ return;
+ }
+
+ if (Init->getType()->isFirstClassType()) {
GenericValue Val = getConstantValue(Init);
StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
return;
@@ -1123,6 +1149,6 @@ void ExecutionEngineState::AddressMapConfig::onDelete(ExecutionEngineState *EES,
void ExecutionEngineState::AddressMapConfig::onRAUW(ExecutionEngineState *,
const GlobalValue *,
const GlobalValue *) {
- assert(false && "The ExecutionEngine doesn't know how to handle a"
- " RAUW on a value it has a global mapping for.");
+ llvm_unreachable("The ExecutionEngine doesn't know how to handle a"
+ " RAUW on a value it has a global mapping for.");
}
diff --git a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index f8f1f4a..75e680a 100644
--- a/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -76,9 +76,7 @@ double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
return unwrap(GenVal)->DoubleVal;
default:
llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
- break;
}
- return 0; // Not reached
}
void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
diff --git a/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
new file mode 100644
index 0000000..5dfa78f
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp
@@ -0,0 +1,183 @@
+//===-- IntelJITEventListener.cpp - Tell Intel profiler about JITed code --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object to tell Intel(R) VTune(TM)
+// Amplifier XE 2011 about JITted functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+
+#define DEBUG_TYPE "amplifier-jit-event-listener"
+#include "llvm/Function.h"
+#include "llvm/Metadata.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/ExecutionEngine/IntelJITEventsWrapper.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/ValueHandle.h"
+#include "EventListenerCommon.h"
+
+using namespace llvm;
+using namespace llvm::jitprofiling;
+
+namespace {
+
+class IntelJITEventListener : public JITEventListener {
+ typedef DenseMap<void*, unsigned int> MethodIDMap;
+
+ IntelJITEventsWrapper& Wrapper;
+ MethodIDMap MethodIDs;
+ FilenameCache Filenames;
+
+public:
+ IntelJITEventListener(IntelJITEventsWrapper& libraryWrapper)
+ : Wrapper(libraryWrapper) {
+ }
+
+ ~IntelJITEventListener() {
+ }
+
+ virtual void NotifyFunctionEmitted(const Function &F,
+ void *FnStart, size_t FnSize,
+ const EmittedFunctionDetails &Details);
+
+ virtual void NotifyFreeingMachineCode(void *OldPtr);
+};
+
+static LineNumberInfo LineStartToIntelJITFormat(
+ uintptr_t StartAddress,
+ uintptr_t Address,
+ DebugLoc Loc) {
+ LineNumberInfo Result;
+
+ Result.Offset = Address - StartAddress;
+ Result.LineNumber = Loc.getLine();
+
+ return Result;
+}
+
+static iJIT_Method_Load FunctionDescToIntelJITFormat(
+ IntelJITEventsWrapper& Wrapper,
+ const char* FnName,
+ uintptr_t FnStart,
+ size_t FnSize) {
+ iJIT_Method_Load Result;
+ memset(&Result, 0, sizeof(iJIT_Method_Load));
+
+ Result.method_id = Wrapper.iJIT_GetNewMethodID();
+ Result.method_name = const_cast<char*>(FnName);
+ Result.method_load_address = reinterpret_cast<void*>(FnStart);
+ Result.method_size = FnSize;
+
+ Result.class_id = 0;
+ Result.class_file_name = NULL;
+ Result.user_data = NULL;
+ Result.user_data_size = 0;
+ Result.env = iJDE_JittingAPI;
+
+ return Result;
+}
+
+// Adds the just-emitted function to the symbol table.
+void IntelJITEventListener::NotifyFunctionEmitted(
+ const Function &F, void *FnStart, size_t FnSize,
+ const EmittedFunctionDetails &Details) {
+ iJIT_Method_Load FunctionMessage = FunctionDescToIntelJITFormat(Wrapper,
+ F.getName().data(),
+ reinterpret_cast<uint64_t>(FnStart),
+ FnSize);
+
+ std::vector<LineNumberInfo> LineInfo;
+
+ if (!Details.LineStarts.empty()) {
+ // Now convert the line number information from the address/DebugLoc
+ // format in Details to the offset/lineno in Intel JIT API format.
+
+ LineInfo.reserve(Details.LineStarts.size() + 1);
+
+ DebugLoc FirstLoc = Details.LineStarts[0].Loc;
+ assert(!FirstLoc.isUnknown()
+ && "LineStarts should not contain unknown DebugLocs");
+
+ MDNode *FirstLocScope = FirstLoc.getScope(F.getContext());
+ DISubprogram FunctionDI = getDISubprogram(FirstLocScope);
+ if (FunctionDI.Verify()) {
+ FunctionMessage.source_file_name = const_cast<char*>(
+ Filenames.getFullPath(FirstLocScope));
+
+ LineNumberInfo FirstLine;
+ FirstLine.Offset = 0;
+ FirstLine.LineNumber = FunctionDI.getLineNumber();
+ LineInfo.push_back(FirstLine);
+ }
+
+ for (std::vector<EmittedFunctionDetails::LineStart>::const_iterator I =
+ Details.LineStarts.begin(), E = Details.LineStarts.end();
+ I != E; ++I) {
+ // This implementation ignores the DebugLoc filename because the Intel
+ // JIT API does not support multiple source files associated with a single
+ // JIT function
+ LineInfo.push_back(LineStartToIntelJITFormat(
+ reinterpret_cast<uintptr_t>(FnStart),
+ I->Address,
+ I->Loc));
+
+ // If we have no file name yet for the function, use the filename from
+ // the first instruction that has one
+ if (FunctionMessage.source_file_name == 0) {
+ MDNode *scope = I->Loc.getScope(
+ Details.MF->getFunction()->getContext());
+ FunctionMessage.source_file_name = const_cast<char*>(
+ Filenames.getFullPath(scope));
+ }
+ }
+
+ FunctionMessage.line_number_size = LineInfo.size();
+ FunctionMessage.line_number_table = &*LineInfo.begin();
+ } else {
+ FunctionMessage.line_number_size = 0;
+ FunctionMessage.line_number_table = 0;
+ }
+
+ Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ &FunctionMessage);
+ MethodIDs[FnStart] = FunctionMessage.method_id;
+}
+
+void IntelJITEventListener::NotifyFreeingMachineCode(void *FnStart) {
+ MethodIDMap::iterator I = MethodIDs.find(FnStart);
+ if (I != MethodIDs.end()) {
+ Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START, &I->second);
+ MethodIDs.erase(I);
+ }
+}
+
+} // anonymous namespace.
+
+namespace llvm {
+JITEventListener *JITEventListener::createIntelJITEventListener() {
+ static OwningPtr<IntelJITEventsWrapper> JITProfilingWrapper(
+ new IntelJITEventsWrapper);
+ return new IntelJITEventListener(*JITProfilingWrapper);
+}
+
+// for testing
+JITEventListener *JITEventListener::createIntelJITEventListener(
+ IntelJITEventsWrapper* TestImpl) {
+ return new IntelJITEventListener(*TestImpl);
+}
+
+} // namespace llvm
+
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 27917da..af47be9 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -625,24 +625,6 @@ void Interpreter::visitReturnInst(ReturnInst &I) {
popStackAndReturnValueToCaller(RetTy, Result);
}
-void Interpreter::visitUnwindInst(UnwindInst &I) {
- // Unwind stack
- Instruction *Inst;
- do {
- ECStack.pop_back();
- if (ECStack.empty())
- report_fatal_error("Empty stack during unwind!");
- Inst = ECStack.back().Caller.getInstruction();
- } while (!(Inst && isa<InvokeInst>(Inst)));
-
- // Return from invoke
- ExecutionContext &InvokingSF = ECStack.back();
- InvokingSF.Caller = CallSite();
-
- // Go to exceptional destination BB of invoke instruction
- SwitchToNewBasicBlock(cast<InvokeInst>(Inst)->getUnwindDest(), InvokingSF);
-}
-
void Interpreter::visitUnreachableInst(UnreachableInst &I) {
report_fatal_error("Program executed an 'unreachable' instruction!");
}
@@ -668,12 +650,10 @@ void Interpreter::visitSwitchInst(SwitchInst &I) {
// Check to see if any of the cases match...
BasicBlock *Dest = 0;
- unsigned NumCases = I.getNumCases();
- // Skip the first item since that's the default case.
- for (unsigned i = 1; i < NumCases; ++i) {
- GenericValue CaseVal = getOperandValue(I.getCaseValue(i), SF);
+ for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
+ GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
- Dest = cast<BasicBlock>(I.getSuccessor(i));
+ Dest = cast<BasicBlock>(i.getCaseSuccessor());
break;
}
}
@@ -1253,8 +1233,7 @@ GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
break;
default:
dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
- llvm_unreachable(0);
- return GenericValue();
+ llvm_unreachable("Unhandled ConstantExpr");
}
return Dest;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 055875c..7a206eb 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -94,15 +94,16 @@ static ExFunc lookupFunction(const Function *F) {
FunctionType *FT = F->getFunctionType();
for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
ExtName += getTypeID(FT->getContainedType(i));
- ExtName + "_" + F->getNameStr();
+ ExtName += "_" + F->getName().str();
sys::ScopedLock Writer(*FunctionsLock);
ExFunc FnPtr = FuncNames[ExtName];
if (FnPtr == 0)
- FnPtr = FuncNames["lle_X_" + F->getNameStr()];
+ FnPtr = FuncNames["lle_X_" + F->getName().str()];
if (FnPtr == 0) // Try calling a generic function... if it exists...
FnPtr = (ExFunc)(intptr_t)
- sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_"+F->getNameStr());
+ sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_" +
+ F->getName().str());
if (FnPtr != 0)
ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
return FnPtr;
@@ -296,14 +297,8 @@ GenericValue Interpreter::callExternalFunction(Function *F,
// Functions "exported" to the running application...
//
-// Visual Studio warns about returning GenericValue in extern "C" linkage
-#ifdef _MSC_VER
- #pragma warning(disable : 4190)
-#endif
-
-extern "C" { // Don't add C++ manglings to llvm mangling :)
-
// void atexit(Function*)
+static
GenericValue lle_X_atexit(FunctionType *FT,
const std::vector<GenericValue> &Args) {
assert(Args.size() == 1);
@@ -314,6 +309,7 @@ GenericValue lle_X_atexit(FunctionType *FT,
}
// void exit(int)
+static
GenericValue lle_X_exit(FunctionType *FT,
const std::vector<GenericValue> &Args) {
TheInterpreter->exitCalled(Args[0]);
@@ -321,6 +317,7 @@ GenericValue lle_X_exit(FunctionType *FT,
}
// void abort(void)
+static
GenericValue lle_X_abort(FunctionType *FT,
const std::vector<GenericValue> &Args) {
//FIXME: should we report or raise here?
@@ -331,6 +328,7 @@ GenericValue lle_X_abort(FunctionType *FT,
// int sprintf(char *, const char *, ...) - a very rough implementation to make
// output useful.
+static
GenericValue lle_X_sprintf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
char *OutputBuffer = (char *)GVTOP(Args[0]);
@@ -408,11 +406,11 @@ GenericValue lle_X_sprintf(FunctionType *FT,
break;
}
}
- return GV;
}
// int printf(const char *, ...) - a very rough implementation to make output
// useful.
+static
GenericValue lle_X_printf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
char Buffer[10000];
@@ -425,6 +423,7 @@ GenericValue lle_X_printf(FunctionType *FT,
}
// int sscanf(const char *format, ...);
+static
GenericValue lle_X_sscanf(FunctionType *FT,
const std::vector<GenericValue> &args) {
assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
@@ -440,6 +439,7 @@ GenericValue lle_X_sscanf(FunctionType *FT,
}
// int scanf(const char *format, ...);
+static
GenericValue lle_X_scanf(FunctionType *FT,
const std::vector<GenericValue> &args) {
assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
@@ -456,6 +456,7 @@ GenericValue lle_X_scanf(FunctionType *FT,
// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
// output useful.
+static
GenericValue lle_X_fprintf(FunctionType *FT,
const std::vector<GenericValue> &Args) {
assert(Args.size() >= 2);
@@ -469,14 +470,6 @@ GenericValue lle_X_fprintf(FunctionType *FT,
return GV;
}
-} // End extern "C"
-
-// Done with externals; turn the warning back on
-#ifdef _MSC_VER
- #pragma warning(default: 4190)
-#endif
-
-
void Interpreter::initializeExternalFunctions() {
sys::ScopedLock Writer(*FunctionsLock);
FuncNames["lle_X_atexit"] = lle_X_atexit;
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
index ee2b459..28c5775 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -115,6 +115,12 @@ public:
virtual GenericValue runFunction(Function *F,
const std::vector<GenericValue> &ArgValues);
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) {
+ // FIXME: not implemented.
+ return 0;
+ }
+
/// recompileAndRelinkFunction - For the interpreter, functions are always
/// up-to-date.
///
@@ -165,7 +171,6 @@ public:
void visitCallSite(CallSite CS);
void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); }
void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); }
- void visitUnwindInst(UnwindInst &I);
void visitUnreachableInst(UnreachableInst &I);
void visitShl(BinaryOperator &I);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
deleted file mode 100644
index 2251a8e..0000000
--- a/contrib/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//===-- Intercept.cpp - System function interception routines -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// If a function call occurs to an external function, the JIT is designed to use
-// the dynamic loader interface to find a function to call. This is useful for
-// calling system calls and library functions that are not available in LLVM.
-// Some system calls, however, need to be handled specially. For this reason,
-// we intercept some of them here and use our own stubs to handle them.
-//
-//===----------------------------------------------------------------------===//
-
-#include "JIT.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Config/config.h"
-using namespace llvm;
-
-// AtExitHandlers - List of functions to call when the program exits,
-// registered with the atexit() library function.
-static std::vector<void (*)()> AtExitHandlers;
-
-/// runAtExitHandlers - Run any functions registered by the program's
-/// calls to atexit(3), which we intercept and store in
-/// AtExitHandlers.
-///
-static void runAtExitHandlers() {
- while (!AtExitHandlers.empty()) {
- void (*Fn)() = AtExitHandlers.back();
- AtExitHandlers.pop_back();
- Fn();
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Function stubs that are invoked instead of certain library calls
-//===----------------------------------------------------------------------===//
-
-// Force the following functions to be linked in to anything that uses the
-// JIT. This is a hack designed to work around the all-too-clever Glibc
-// strategy of making these functions work differently when inlined vs. when
-// not inlined, and hiding their real definitions in a separate archive file
-// that the dynamic linker can't see. For more info, search for
-// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
-#if defined(__linux__)
-#if defined(HAVE_SYS_STAT_H)
-#include <sys/stat.h>
-#endif
-#include <fcntl.h>
-#include <unistd.h>
-/* stat functions are redirecting to __xstat with a version number. On x86-64
- * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
- * available as an exported symbol, so we have to add it explicitly.
- */
-namespace {
-class StatSymbols {
-public:
- StatSymbols() {
- sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
- sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
- sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
- sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
- sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
- sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
- sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
- sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
- sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
- sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
- sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
- }
-};
-}
-static StatSymbols initStatSymbols;
-#endif // __linux__
-
-// jit_exit - Used to intercept the "exit" library call.
-static void jit_exit(int Status) {
- runAtExitHandlers(); // Run atexit handlers...
- exit(Status);
-}
-
-// jit_atexit - Used to intercept the "atexit" library call.
-static int jit_atexit(void (*Fn)()) {
- AtExitHandlers.push_back(Fn); // Take note of atexit handler...
- return 0; // Always successful
-}
-
-static int jit_noop() {
- return 0;
-}
-
-//===----------------------------------------------------------------------===//
-//
-/// getPointerToNamedFunction - This method returns the address of the specified
-/// function by using the dynamic loader interface. As such it is only useful
-/// for resolving library symbols, not code generated symbols.
-///
-void *JIT::getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure) {
- if (!isSymbolSearchingDisabled()) {
- // Check to see if this is one of the functions we want to intercept. Note,
- // we cast to intptr_t here to silence a -pedantic warning that complains
- // about casting a function pointer to a normal pointer.
- if (Name == "exit") return (void*)(intptr_t)&jit_exit;
- if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
-
- // We should not invoke parent's ctors/dtors from generated main()!
- // On Mingw and Cygwin, the symbol __main is resolved to
- // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
- // (and register wrong callee's dtors with atexit(3)).
- // We expect ExecutionEngine::runStaticConstructorsDestructors()
- // is called before ExecutionEngine::runFunctionAsMain() is called.
- if (Name == "__main") return (void*)(intptr_t)&jit_noop;
-
- const char *NameStr = Name.c_str();
- // If this is an asm specifier, skip the sentinal.
- if (NameStr[0] == 1) ++NameStr;
-
- // If it's an external function, look it up in the process image...
- void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
- if (Ptr) return Ptr;
-
- // If it wasn't found and if it starts with an underscore ('_') character,
- // and has an asm specifier, try again without the underscore.
- if (Name[0] == 1 && NameStr[0] == '_') {
- Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
- if (Ptr) return Ptr;
- }
-
- // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These
- // are references to hidden visibility symbols that dlsym cannot resolve.
- // If we have one of these, strip off $LDBLStub and try again.
-#if defined(__APPLE__) && defined(__ppc__)
- if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
- memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
- // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
- // This mirrors logic in libSystemStubs.a.
- std::string Prefix = std::string(Name.begin(), Name.end()-9);
- if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false))
- return Ptr;
- if (void *Ptr = getPointerToNamedFunction(Prefix, false))
- return Ptr;
- }
-#endif
- }
-
- /// If a LazyFunctionCreator is installed, use it to get/create the function.
- if (LazyFunctionCreator)
- if (void *RP = LazyFunctionCreator(Name))
- return RP;
-
- if (AbortOnFailure) {
- report_fatal_error("Program used external function '"+Name+
- "' which could not be resolved!");
- }
- return 0;
-}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index d773009..a942299 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetJITInfo.h"
@@ -206,7 +207,6 @@ void DarwinRegisterFrame(void* FrameBegin) {
ExecutionEngine *JIT::createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM) {
// Try to register the program as a source of symbols to resolve against.
@@ -216,7 +216,7 @@ ExecutionEngine *JIT::createJIT(Module *M,
// If the target supports JIT code generation, create the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo()) {
- return new JIT(M, *TM, *TJ, JMM, OptLevel, GVsWithCode);
+ return new JIT(M, *TM, *TJ, JMM, GVsWithCode);
} else {
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
@@ -268,9 +268,10 @@ extern "C" {
}
JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
- JITMemoryManager *JMM, CodeGenOpt::Level OptLevel, bool GVsWithCode)
- : ExecutionEngine(M), TM(tm), TJI(tji), AllocateGVsWithCode(GVsWithCode),
- isAlreadyCodeGenerating(false) {
+ JITMemoryManager *jmm, bool GVsWithCode)
+ : ExecutionEngine(M), TM(tm), TJI(tji),
+ JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()),
+ AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) {
setTargetData(TM.getTargetData());
jitstate = new JITState(M);
@@ -288,7 +289,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Turn the machine code intermediate representation into bytes in memory that
// may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE, OptLevel)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -323,6 +324,7 @@ JIT::~JIT() {
AllJits->Remove(this);
delete jitstate;
delete JCE;
+ // JMM is a ownership of JCE, so we no need delete JMM here.
delete &TM;
}
@@ -341,7 +343,7 @@ void JIT::addModule(Module *M) {
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -372,7 +374,7 @@ bool JIT::removeModule(Module *M) {
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
+ if (TM.addPassesToEmitMachineCode(PM, *JCE)) {
report_fatal_error("Target does not support machine code emission!");
}
@@ -476,7 +478,6 @@ GenericValue JIT::runFunction(Function *F,
case Type::FP128TyID:
case Type::PPC_FP128TyID:
llvm_unreachable("long double not supported yet");
- return rv;
case Type::PointerTyID:
return PTOGV(((void*(*)())(intptr_t)FPtr)());
}
@@ -708,12 +709,32 @@ void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
if (I != getBasicBlockAddressMap(locked).end()) {
return I->second;
} else {
- assert(0 && "JIT does not have BB address for address-of-label, was"
- " it eliminated by optimizer?");
- return 0;
+ llvm_unreachable("JIT does not have BB address for address-of-label, was"
+ " it eliminated by optimizer?");
}
}
+void *JIT::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure){
+ if (!isSymbolSearchingDisabled()) {
+ void *ptr = JMM->getPointerToNamedFunction(Name, false);
+ if (ptr)
+ return ptr;
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(Name))
+ return RP;
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return 0;
+}
+
+
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
index 92dcb0e..2ae155b 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
@@ -58,6 +58,7 @@ class JIT : public ExecutionEngine {
TargetMachine &TM; // The current target we are compiling to
TargetJITInfo &TJI; // The JITInfo for the target we are compiling to
JITCodeEmitter *JCE; // JCE object
+ JITMemoryManager *JMM;
std::vector<JITEventListener*> EventListeners;
/// AllocateGVsWithCode - Some applications require that global variables and
@@ -78,8 +79,7 @@ class JIT : public ExecutionEngine {
JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
- JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
- bool AllocateGVsWithCode);
+ JITMemoryManager *JMM, bool AllocateGVsWithCode);
public:
~JIT();
@@ -118,15 +118,15 @@ public:
const std::vector<GenericValue> &ArgValues);
/// getPointerToNamedFunction - This method returns the address of the
- /// specified function by using the dlsym function call. As such it is only
+ /// specified function by using the MemoryManager. As such it is only
/// useful for resolving library symbols, not code generated symbols.
///
/// If AbortOnFailure is false and no function with the given name is
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
// CompilationCallback - Invoked the first time that a call site is found,
// which causes lazy compilation of the target function.
@@ -185,7 +185,6 @@ public:
static ExecutionEngine *createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM);
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
deleted file mode 100644
index e71c20b..0000000
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-//===-- JITDebugRegisterer.cpp - Register debug symbols for JIT -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a JITDebugRegisterer object that is used by the JIT to
-// register debug info with debuggers like GDB.
-//
-//===----------------------------------------------------------------------===//
-
-#include "JITDebugRegisterer.h"
-#include "../../CodeGen/ELF.h"
-#include "../../CodeGen/ELFWriter.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Function.h"
-#include "llvm/Module.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/MutexGuard.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Mutex.h"
-#include <string>
-
-namespace llvm {
-
-// This must be kept in sync with gdb/gdb/jit.h .
-extern "C" {
-
- // Debuggers puts a breakpoint in this function.
- LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { }
-
- // We put information about the JITed function in this global, which the
- // debugger reads. Make sure to specify the version statically, because the
- // debugger checks the version before we can set it during runtime.
- struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
-
-}
-
-namespace {
-
- /// JITDebugLock - Used to serialize all code registration events, since they
- /// modify global variables.
- sys::Mutex JITDebugLock;
-
-}
-
-JITDebugRegisterer::JITDebugRegisterer(TargetMachine &tm) : TM(tm), FnMap() { }
-
-JITDebugRegisterer::~JITDebugRegisterer() {
- // Free all ELF memory.
- for (RegisteredFunctionsMap::iterator I = FnMap.begin(), E = FnMap.end();
- I != E; ++I) {
- // Call the private method that doesn't update the map so our iterator
- // doesn't break.
- UnregisterFunctionInternal(I);
- }
- FnMap.clear();
-}
-
-std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) {
- // Stack allocate an empty module with an empty LLVMContext for the ELFWriter
- // API. We don't use the real module because then the ELFWriter would write
- // out unnecessary GlobalValues during finalization.
- LLVMContext Context;
- Module M("", Context);
-
- // Make a buffer for the ELF in memory.
- std::string Buffer;
- raw_string_ostream O(Buffer);
- ELFWriter EW(O, TM);
- EW.doInitialization(M);
-
- // Copy the binary into the .text section. This isn't necessary, but it's
- // useful to be able to disassemble the ELF by hand.
- ELFSection &Text = EW.getTextSection(const_cast<Function *>(F));
- Text.Addr = (uint64_t)I.FnStart;
- // TODO: We could eliminate this copy if we somehow used a pointer/size pair
- // instead of a vector.
- Text.getData().assign(I.FnStart, I.FnEnd);
-
- // Copy the exception handling call frame information into the .eh_frame
- // section. This allows GDB to get a good stack trace, particularly on
- // linux x86_64. Mark this as a PROGBITS section that needs to be loaded
- // into memory at runtime.
- ELFSection &EH = EW.getSection(".eh_frame", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC);
- // Pointers in the DWARF EH info are all relative to the EH frame start,
- // which is stored here.
- EH.Addr = (uint64_t)I.EhStart;
- // TODO: We could eliminate this copy if we somehow used a pointer/size pair
- // instead of a vector.
- EH.getData().assign(I.EhStart, I.EhEnd);
-
- // Add this single function to the symbol table, so the debugger prints the
- // name instead of '???'. We give the symbol default global visibility.
- ELFSym *FnSym = ELFSym::getGV(F,
- ELF::STB_GLOBAL,
- ELF::STT_FUNC,
- ELF::STV_DEFAULT);
- FnSym->SectionIdx = Text.SectionIdx;
- FnSym->Size = I.FnEnd - I.FnStart;
- FnSym->Value = 0; // Offset from start of section.
- EW.SymbolList.push_back(FnSym);
-
- EW.doFinalization(M);
- O.flush();
-
- // When trying to debug why GDB isn't getting the debug info right, it's
- // awfully helpful to write the object file to disk so that it can be
- // inspected with readelf and objdump.
- if (JITEmitDebugInfoToDisk) {
- std::string Filename;
- raw_string_ostream O2(Filename);
- O2 << "/tmp/llvm_function_" << I.FnStart << "_" << F->getNameStr() << ".o";
- O2.flush();
- std::string Errors;
- raw_fd_ostream O3(Filename.c_str(), Errors);
- O3 << Buffer;
- O3.close();
- }
-
- return Buffer;
-}
-
-void JITDebugRegisterer::RegisterFunction(const Function *F, DebugInfo &I) {
- // TODO: Support non-ELF platforms.
- if (!TM.getELFWriterInfo())
- return;
-
- std::string Buffer = MakeELF(F, I);
-
- jit_code_entry *JITCodeEntry = new jit_code_entry();
- JITCodeEntry->symfile_addr = Buffer.c_str();
- JITCodeEntry->symfile_size = Buffer.size();
-
- // Add a mapping from F to the entry and buffer, so we can delete this
- // info later.
- FnMap[F] = std::make_pair(Buffer, JITCodeEntry);
-
- // Acquire the lock and do the registration.
- {
- MutexGuard locked(JITDebugLock);
- __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
-
- // Insert this entry at the head of the list.
- JITCodeEntry->prev_entry = NULL;
- jit_code_entry *NextEntry = __jit_debug_descriptor.first_entry;
- JITCodeEntry->next_entry = NextEntry;
- if (NextEntry != NULL) {
- NextEntry->prev_entry = JITCodeEntry;
- }
- __jit_debug_descriptor.first_entry = JITCodeEntry;
- __jit_debug_descriptor.relevant_entry = JITCodeEntry;
- __jit_debug_register_code();
- }
-}
-
-void JITDebugRegisterer::UnregisterFunctionInternal(
- RegisteredFunctionsMap::iterator I) {
- jit_code_entry *&JITCodeEntry = I->second.second;
-
- // Acquire the lock and do the unregistration.
- {
- MutexGuard locked(JITDebugLock);
- __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
-
- // Remove the jit_code_entry from the linked list.
- jit_code_entry *PrevEntry = JITCodeEntry->prev_entry;
- jit_code_entry *NextEntry = JITCodeEntry->next_entry;
- if (NextEntry) {
- NextEntry->prev_entry = PrevEntry;
- }
- if (PrevEntry) {
- PrevEntry->next_entry = NextEntry;
- } else {
- assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
- __jit_debug_descriptor.first_entry = NextEntry;
- }
-
- // Tell GDB which entry we removed, and unregister the code.
- __jit_debug_descriptor.relevant_entry = JITCodeEntry;
- __jit_debug_register_code();
- }
-
- delete JITCodeEntry;
- JITCodeEntry = NULL;
-
- // Free the ELF file in memory.
- std::string &Buffer = I->second.first;
- Buffer.clear();
-}
-
-void JITDebugRegisterer::UnregisterFunction(const Function *F) {
- // TODO: Support non-ELF platforms.
- if (!TM.getELFWriterInfo())
- return;
-
- RegisteredFunctionsMap::iterator I = FnMap.find(F);
- if (I == FnMap.end()) return;
- UnregisterFunctionInternal(I);
- FnMap.erase(I);
-}
-
-} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h b/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
deleted file mode 100644
index dce506b..0000000
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.h
+++ /dev/null
@@ -1,116 +0,0 @@
-//===-- JITDebugRegisterer.h - Register debug symbols for JIT -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a JITDebugRegisterer object that is used by the JIT to
-// register debug info with debuggers like GDB.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
-#define LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/DataTypes.h"
-#include <string>
-
-// This must be kept in sync with gdb/gdb/jit.h .
-extern "C" {
-
- typedef enum {
- JIT_NOACTION = 0,
- JIT_REGISTER_FN,
- JIT_UNREGISTER_FN
- } jit_actions_t;
-
- struct jit_code_entry {
- struct jit_code_entry *next_entry;
- struct jit_code_entry *prev_entry;
- const char *symfile_addr;
- uint64_t symfile_size;
- };
-
- struct jit_descriptor {
- uint32_t version;
- // This should be jit_actions_t, but we want to be specific about the
- // bit-width.
- uint32_t action_flag;
- struct jit_code_entry *relevant_entry;
- struct jit_code_entry *first_entry;
- };
-
-}
-
-namespace llvm {
-
-class ELFSection;
-class Function;
-class TargetMachine;
-
-
-/// This class encapsulates information we want to send to the debugger.
-///
-struct DebugInfo {
- uint8_t *FnStart;
- uint8_t *FnEnd;
- uint8_t *EhStart;
- uint8_t *EhEnd;
-
- DebugInfo() : FnStart(0), FnEnd(0), EhStart(0), EhEnd(0) {}
-};
-
-typedef DenseMap< const Function*, std::pair<std::string, jit_code_entry*> >
- RegisteredFunctionsMap;
-
-/// This class registers debug info for JITed code with an attached debugger.
-/// Without proper debug info, GDB can't do things like source level debugging
-/// or even produce a proper stack trace on linux-x86_64. To use this class,
-/// whenever a function is JITed, create a DebugInfo struct and pass it to the
-/// RegisterFunction method. The method will then do whatever is necessary to
-/// inform the debugger about the JITed function.
-class JITDebugRegisterer {
-
- TargetMachine &TM;
-
- /// FnMap - A map of functions that have been registered to the associated
- /// temporary files. Used for cleanup.
- RegisteredFunctionsMap FnMap;
-
- /// MakeELF - Builds the ELF file in memory and returns a std::string that
- /// contains the ELF.
- std::string MakeELF(const Function *F, DebugInfo &I);
-
-public:
- JITDebugRegisterer(TargetMachine &tm);
-
- /// ~JITDebugRegisterer - Unregisters all code and frees symbol files.
- ///
- ~JITDebugRegisterer();
-
- /// RegisterFunction - Register debug info for the given function with an
- /// attached debugger. Clients must call UnregisterFunction on all
- /// registered functions before deleting them to free the associated symbol
- /// file and unregister it from the debugger.
- void RegisterFunction(const Function *F, DebugInfo &I);
-
- /// UnregisterFunction - Unregister the debug info for the given function
- /// from the debugger and free associated memory.
- void UnregisterFunction(const Function *F);
-
-private:
- /// UnregisterFunctionInternal - Unregister the debug info for the given
- /// function from the debugger and delete any temporary files. The private
- /// version of this method does not remove the function from FnMap so that it
- /// can be called while iterating over FnMap.
- void UnregisterFunctionInternal(RegisteredFunctionsMap::iterator I);
-
-};
-
-} // end namespace llvm
-
-#endif // LLVM_EXECUTION_ENGINE_JIT_DEBUGREGISTERER_H
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
index 8f84ac7..42a136e 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -313,7 +313,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
MI != E; ++MI) {
if (!MI->isLabel()) {
- MayThrow |= MI->getDesc().isCall();
+ MayThrow |= MI->isCall();
continue;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 24020ee..504c8bd 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -14,7 +14,6 @@
#define DEBUG_TYPE "jit"
#include "JIT.h"
-#include "JITDebugRegisterer.h"
#include "JITDwarfEmitter.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Constants.h"
@@ -77,8 +76,8 @@ namespace {
struct NoRAUWValueMapConfig : public ValueMapConfig<ValueTy> {
typedef JITResolverState *ExtraData;
static void onRAUW(JITResolverState *, Value *Old, Value *New) {
- assert(false && "The JIT doesn't know how to handle a"
- " RAUW on a value it has emitted.");
+ llvm_unreachable("The JIT doesn't know how to handle a"
+ " RAUW on a value it has emitted.");
}
};
@@ -324,9 +323,6 @@ namespace {
/// DE - The dwarf emitter for the jit.
OwningPtr<JITDwarfEmitter> DE;
- /// DR - The debug registerer for the jit.
- OwningPtr<JITDebugRegisterer> DR;
-
/// LabelLocations - This vector is a mapping from Label ID's to their
/// address.
DenseMap<MCSymbol*, uintptr_t> LabelLocations;
@@ -362,22 +358,22 @@ namespace {
/// Instance of the JIT
JIT *TheJIT;
+ bool JITExceptionHandling;
+
public:
JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
: SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
- EmittedFunctions(this), TheJIT(&jit) {
+ EmittedFunctions(this), TheJIT(&jit),
+ JITExceptionHandling(TM.Options.JITExceptionHandling) {
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
if (jit.getJITInfo().needsGOT()) {
MemMgr->AllocateGOT();
DEBUG(dbgs() << "JIT is managing a GOT\n");
}
- if (JITExceptionHandling || JITEmitDebugInfo) {
+ if (JITExceptionHandling) {
DE.reset(new JITDwarfEmitter(jit));
}
- if (JITEmitDebugInfo) {
- DR.reset(new JITDebugRegisterer(TM));
- }
}
~JITEmitter() {
delete MemMgr;
@@ -968,7 +964,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
}
});
- if (JITExceptionHandling || JITEmitDebugInfo) {
+ if (JITExceptionHandling) {
uintptr_t ActualSize = 0;
SavedBufferBegin = BufferBegin;
SavedBufferEnd = BufferEnd;
@@ -983,7 +979,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
EhStart);
MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr,
FrameRegister);
- uint8_t *EhEnd = CurBufferPtr;
BufferBegin = SavedBufferBegin;
BufferEnd = SavedBufferEnd;
CurBufferPtr = SavedCurBufferPtr;
@@ -991,15 +986,6 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
if (JITExceptionHandling) {
TheJIT->RegisterTable(F.getFunction(), FrameRegister);
}
-
- if (JITEmitDebugInfo) {
- DebugInfo I;
- I.FnStart = FnStart;
- I.FnEnd = FnEnd;
- I.EhStart = EhStart;
- I.EhEnd = EhEnd;
- DR->RegisterFunction(F.getFunction(), I);
- }
}
if (MMI)
@@ -1037,17 +1023,13 @@ void JITEmitter::deallocateMemForFunction(const Function *F) {
EmittedFunctions.erase(Emitted);
}
- if(JITExceptionHandling) {
+ if (JITExceptionHandling) {
TheJIT->DeregisterTable(F);
}
-
- if (JITEmitDebugInfo) {
- DR->UnregisterFunction(F);
- }
}
-void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
+void *JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
if (BufferBegin)
return JITCodeEmitter::allocateSpace(Size, Alignment);
@@ -1059,7 +1041,7 @@ void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
return CurBufferPtr;
}
-void* JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
+void *JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
// Delegate this call through the memory manager.
return MemMgr->allocateGlobal(Size, Alignment);
}
@@ -1179,6 +1161,9 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
}
break;
}
+ case MachineJumpTableInfo::EK_GPRel64BlockAddress:
+ llvm_unreachable(
+ "JT Info emission not implemented for GPRel64BlockAddress yet.");
}
}
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index eec23ce..2d1775c 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -23,10 +23,22 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Memory.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Config/config.h"
#include <vector>
#include <cassert>
#include <climits>
#include <cstring>
+
+#if defined(__linux__)
+#if defined(HAVE_SYS_STAT_H)
+#include <sys/stat.h>
+#endif
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
using namespace llvm;
STATISTIC(NumSlabs, "Number of slabs of memory allocated by the JIT");
@@ -314,6 +326,11 @@ namespace {
/// should allocate a separate slab.
static const size_t DefaultSizeThreshold;
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call.
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
void AllocateGOT();
// Testing methods.
@@ -441,6 +458,50 @@ namespace {
return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
}
+ /// allocateCodeSection - Allocate memory for a code section.
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) {
+ // FIXME: Alignement handling.
+ FreeRangeHeader* candidateBlock = FreeMemoryList;
+ FreeRangeHeader* head = FreeMemoryList;
+ FreeRangeHeader* iter = head->Next;
+
+ uintptr_t largest = candidateBlock->BlockSize;
+
+ // Search for the largest free block.
+ while (iter != head) {
+ if (iter->BlockSize > largest) {
+ largest = iter->BlockSize;
+ candidateBlock = iter;
+ }
+ iter = iter->Next;
+ }
+
+ largest = largest - sizeof(MemoryRangeHeader);
+
+ // If this block isn't big enough for the allocation desired, allocate
+ // another block of memory and add it to the free list.
+ if (largest < Size || largest <= FreeRangeHeader::getMinBlockSize()) {
+ DEBUG(dbgs() << "JIT: Allocating another slab of memory for function.");
+ candidateBlock = allocateNewCodeSlab((size_t)Size);
+ }
+
+ // Select this candidate block for allocation
+ CurBlock = candidateBlock;
+
+ // Allocate the entire memory block.
+ FreeMemoryList = candidateBlock->AllocateBlock();
+ // Release the memory at the end of this block that isn't needed.
+ FreeMemoryList = CurBlock->TrimAllocationToSize(FreeMemoryList, Size);
+ return (uint8_t *)(CurBlock + 1);
+ }
+
+ /// allocateDataSection - Allocate memory for a data section.
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) {
+ return (uint8_t*)DataAllocator.Allocate(Size, Alignment);
+ }
+
/// startExceptionTable - Use startFunctionBody to allocate memory for the
/// function's exception table.
uint8_t* startExceptionTable(const Function* F, uintptr_t &ActualSize) {
@@ -713,6 +774,139 @@ bool DefaultJITMemoryManager::CheckInvariants(std::string &ErrorStr) {
return true;
}
+//===----------------------------------------------------------------------===//
+// getPointerToNamedFunction() implementation.
+//===----------------------------------------------------------------------===//
+
+// AtExitHandlers - List of functions to call when the program exits,
+// registered with the atexit() library function.
+static std::vector<void (*)()> AtExitHandlers;
+
+/// runAtExitHandlers - Run any functions registered by the program's
+/// calls to atexit(3), which we intercept and store in
+/// AtExitHandlers.
+///
+static void runAtExitHandlers() {
+ while (!AtExitHandlers.empty()) {
+ void (*Fn)() = AtExitHandlers.back();
+ AtExitHandlers.pop_back();
+ Fn();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Function stubs that are invoked instead of certain library calls
+//
+// Force the following functions to be linked in to anything that uses the
+// JIT. This is a hack designed to work around the all-too-clever Glibc
+// strategy of making these functions work differently when inlined vs. when
+// not inlined, and hiding their real definitions in a separate archive file
+// that the dynamic linker can't see. For more info, search for
+// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+#if defined(__linux__)
+/* stat functions are redirecting to __xstat with a version number. On x86-64
+ * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
+ * available as an exported symbol, so we have to add it explicitly.
+ */
+namespace {
+class StatSymbols {
+public:
+ StatSymbols() {
+ sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
+ sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
+ sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
+ sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
+ sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
+ sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
+ sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
+ sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
+ sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
+ sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
+ }
+};
+}
+static StatSymbols initStatSymbols;
+#endif // __linux__
+
+// jit_exit - Used to intercept the "exit" library call.
+static void jit_exit(int Status) {
+ runAtExitHandlers(); // Run atexit handlers...
+ exit(Status);
+}
+
+// jit_atexit - Used to intercept the "atexit" library call.
+static int jit_atexit(void (*Fn)()) {
+ AtExitHandlers.push_back(Fn); // Take note of atexit handler...
+ return 0; // Always successful
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+//
+/// getPointerToNamedFunction - This method returns the address of the specified
+/// function by using the dynamic loader interface. As such it is only useful
+/// for resolving library symbols, not code generated symbols.
+///
+void *DefaultJITMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ // Check to see if this is one of the functions we want to intercept. Note,
+ // we cast to intptr_t here to silence a -pedantic warning that complains
+ // about casting a function pointer to a normal pointer.
+ if (Name == "exit") return (void*)(intptr_t)&jit_exit;
+ if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
+
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (void*)(intptr_t)&jit_noop;
+
+ const char *NameStr = Name.c_str();
+ // If this is an asm specifier, skip the sentinal.
+ if (NameStr[0] == 1) ++NameStr;
+
+ // If it's an external function, look it up in the process image...
+ void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+ if (Ptr) return Ptr;
+
+ // If it wasn't found and if it starts with an underscore ('_') character,
+ // try again without the underscore.
+ if (NameStr[0] == '_') {
+ Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+ if (Ptr) return Ptr;
+ }
+
+ // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These
+ // are references to hidden visibility symbols that dlsym cannot resolve.
+ // If we have one of these, strip off $LDBLStub and try again.
+#if defined(__APPLE__) && defined(__ppc__)
+ if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
+ memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
+ // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
+ // This mirrors logic in libSystemStubs.a.
+ std::string Prefix = std::string(Name.begin(), Name.end()-9);
+ if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false))
+ return Ptr;
+ if (void *Ptr = getPointerToNamedFunction(Prefix, false))
+ return Ptr;
+ }
+#endif
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return 0;
+}
+
+
+
JITMemoryManager *JITMemoryManager::CreateDefaultMemManager() {
return new DefaultJITMemoryManager();
}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/Intercept.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/Intercept.cpp
deleted file mode 100644
index f83f428..0000000
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/Intercept.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-//===-- Intercept.cpp - System function interception routines -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// If a function call occurs to an external function, the JIT is designed to use
-// the dynamic loader interface to find a function to call. This is useful for
-// calling system calls and library functions that are not available in LLVM.
-// Some system calls, however, need to be handled specially. For this reason,
-// we intercept some of them here and use our own stubs to handle them.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCJIT.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Config/config.h"
-using namespace llvm;
-
-// AtExitHandlers - List of functions to call when the program exits,
-// registered with the atexit() library function.
-static std::vector<void (*)()> AtExitHandlers;
-
-/// runAtExitHandlers - Run any functions registered by the program's
-/// calls to atexit(3), which we intercept and store in
-/// AtExitHandlers.
-///
-static void runAtExitHandlers() {
- while (!AtExitHandlers.empty()) {
- void (*Fn)() = AtExitHandlers.back();
- AtExitHandlers.pop_back();
- Fn();
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Function stubs that are invoked instead of certain library calls
-//===----------------------------------------------------------------------===//
-
-// Force the following functions to be linked in to anything that uses the
-// JIT. This is a hack designed to work around the all-too-clever Glibc
-// strategy of making these functions work differently when inlined vs. when
-// not inlined, and hiding their real definitions in a separate archive file
-// that the dynamic linker can't see. For more info, search for
-// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
-#if defined(__linux__)
-#if defined(HAVE_SYS_STAT_H)
-#include <sys/stat.h>
-#endif
-#include <fcntl.h>
-#include <unistd.h>
-/* stat functions are redirecting to __xstat with a version number. On x86-64
- * linking with libc_nonshared.a and -Wl,--export-dynamic doesn't make 'stat'
- * available as an exported symbol, so we have to add it explicitly.
- */
-namespace {
-class StatSymbols {
-public:
- StatSymbols() {
- sys::DynamicLibrary::AddSymbol("stat", (void*)(intptr_t)stat);
- sys::DynamicLibrary::AddSymbol("fstat", (void*)(intptr_t)fstat);
- sys::DynamicLibrary::AddSymbol("lstat", (void*)(intptr_t)lstat);
- sys::DynamicLibrary::AddSymbol("stat64", (void*)(intptr_t)stat64);
- sys::DynamicLibrary::AddSymbol("\x1stat64", (void*)(intptr_t)stat64);
- sys::DynamicLibrary::AddSymbol("\x1open64", (void*)(intptr_t)open64);
- sys::DynamicLibrary::AddSymbol("\x1lseek64", (void*)(intptr_t)lseek64);
- sys::DynamicLibrary::AddSymbol("fstat64", (void*)(intptr_t)fstat64);
- sys::DynamicLibrary::AddSymbol("lstat64", (void*)(intptr_t)lstat64);
- sys::DynamicLibrary::AddSymbol("atexit", (void*)(intptr_t)atexit);
- sys::DynamicLibrary::AddSymbol("mknod", (void*)(intptr_t)mknod);
- }
-};
-}
-static StatSymbols initStatSymbols;
-#endif // __linux__
-
-// jit_exit - Used to intercept the "exit" library call.
-static void jit_exit(int Status) {
- runAtExitHandlers(); // Run atexit handlers...
- exit(Status);
-}
-
-// jit_atexit - Used to intercept the "atexit" library call.
-static int jit_atexit(void (*Fn)()) {
- AtExitHandlers.push_back(Fn); // Take note of atexit handler...
- return 0; // Always successful
-}
-
-static int jit_noop() {
- return 0;
-}
-
-//===----------------------------------------------------------------------===//
-//
-/// getPointerToNamedFunction - This method returns the address of the specified
-/// function by using the dynamic loader interface. As such it is only useful
-/// for resolving library symbols, not code generated symbols.
-///
-void *MCJIT::getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure) {
- if (!isSymbolSearchingDisabled()) {
- // Check to see if this is one of the functions we want to intercept. Note,
- // we cast to intptr_t here to silence a -pedantic warning that complains
- // about casting a function pointer to a normal pointer.
- if (Name == "exit") return (void*)(intptr_t)&jit_exit;
- if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
-
- // We should not invoke parent's ctors/dtors from generated main()!
- // On Mingw and Cygwin, the symbol __main is resolved to
- // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
- // (and register wrong callee's dtors with atexit(3)).
- // We expect ExecutionEngine::runStaticConstructorsDestructors()
- // is called before ExecutionEngine::runFunctionAsMain() is called.
- if (Name == "__main") return (void*)(intptr_t)&jit_noop;
-
- const char *NameStr = Name.c_str();
- // If this is an asm specifier, skip the sentinal.
- if (NameStr[0] == 1) ++NameStr;
-
- // If it's an external function, look it up in the process image...
- void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
- if (Ptr) return Ptr;
-
- // If it wasn't found and if it starts with an underscore ('_') character,
- // and has an asm specifier, try again without the underscore.
- if (Name[0] == 1 && NameStr[0] == '_') {
- Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
- if (Ptr) return Ptr;
- }
-
- // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf. These
- // are references to hidden visibility symbols that dlsym cannot resolve.
- // If we have one of these, strip off $LDBLStub and try again.
-#if defined(__APPLE__) && defined(__ppc__)
- if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
- memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
- // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
- // This mirrors logic in libSystemStubs.a.
- std::string Prefix = std::string(Name.begin(), Name.end()-9);
- if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128", false))
- return Ptr;
- if (void *Ptr = getPointerToNamedFunction(Prefix, false))
- return Ptr;
- }
-#endif
- }
-
- /// If a LazyFunctionCreator is installed, use it to get/create the function.
- if (LazyFunctionCreator)
- if (void *RP = LazyFunctionCreator(Name))
- return RP;
-
- if (AbortOnFailure) {
- report_fatal_error("Program used external function '"+Name+
- "' which could not be resolved!");
- }
- return 0;
-}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 7c8a740..44f89cf 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -36,7 +36,6 @@ extern "C" void LLVMLinkInMCJIT() {
ExecutionEngine *MCJIT::createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM) {
// Try to register the program as a source of symbols to resolve against.
@@ -46,8 +45,7 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
// If the target supports JIT code generation, create the JIT.
if (TargetJITInfo *TJ = TM->getJITInfo())
- return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM, M), OptLevel,
- GVsWithCode);
+ return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM, M), GVsWithCode);
if (ErrorStr)
*ErrorStr = "target does not support JIT code generation";
@@ -55,8 +53,7 @@ ExecutionEngine *MCJIT::createJIT(Module *M,
}
MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
- RTDyldMemoryManager *MM, CodeGenOpt::Level OptLevel,
- bool AllocateGVsWithCode)
+ RTDyldMemoryManager *MM, bool AllocateGVsWithCode)
: ExecutionEngine(m), TM(tm), MemMgr(MM), M(m), OS(Buffer), Dyld(MM) {
setTargetData(TM->getTargetData());
@@ -64,7 +61,7 @@ MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
- if (TM->addPassesToEmitMC(PM, Ctx, OS, CodeGenOpt::Default, false)) {
+ if (TM->addPassesToEmitMC(PM, Ctx, OS, false)) {
report_fatal_error("Target does not support MC emission!");
}
@@ -77,9 +74,9 @@ MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
OS.flush();
// Load the object into the dynamic linker.
- // FIXME: It would be nice to avoid making yet another copy.
- MemoryBuffer *MB = MemoryBuffer::getMemBufferCopy(StringRef(Buffer.data(),
- Buffer.size()));
+ MemoryBuffer *MB = MemoryBuffer::getMemBuffer(StringRef(Buffer.data(),
+ Buffer.size()),
+ "", false);
if (Dyld.loadObject(MB))
report_fatal_error(Dyld.getErrorString());
// Resolve any relocations.
@@ -88,11 +85,11 @@ MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji,
MCJIT::~MCJIT() {
delete MemMgr;
+ delete TM;
}
void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) {
report_fatal_error("not yet implemented");
- return 0;
}
void *MCJIT::getPointerToFunction(Function *F) {
@@ -211,12 +208,30 @@ GenericValue MCJIT::runFunction(Function *F,
case Type::FP128TyID:
case Type::PPC_FP128TyID:
llvm_unreachable("long double not supported yet");
- return rv;
case Type::PointerTyID:
return PTOGV(((void*(*)())(intptr_t)FPtr)());
}
}
- assert(0 && "Full-featured argument passing not supported yet!");
- return GenericValue();
+ llvm_unreachable("Full-featured argument passing not supported yet!");
+}
+
+void *MCJIT::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure){
+ if (!isSymbolSearchingDisabled() && MemMgr) {
+ void *ptr = MemMgr->getPointerToNamedFunction(Name, false);
+ if (ptr)
+ return ptr;
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(Name))
+ return RP;
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return 0;
}
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
index b64c21a..2b3df98 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -24,8 +24,7 @@ namespace llvm {
class MCJIT : public ExecutionEngine {
MCJIT(Module *M, TargetMachine *tm, TargetJITInfo &tji,
- RTDyldMemoryManager *MemMgr, CodeGenOpt::Level OptLevel,
- bool AllocateGVsWithCode);
+ RTDyldMemoryManager *MemMgr, bool AllocateGVsWithCode);
TargetMachine *TM;
MCContext *Ctx;
@@ -66,8 +65,17 @@ public:
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
- void *getPointerToNamedFunction(const std::string &Name,
- bool AbortOnFailure = true);
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) {
+ Dyld.mapSectionAddress(LocalAddress, TargetAddress);
+ }
+
/// @}
/// @name (Private) Registration Interfaces
/// @{
@@ -79,7 +87,6 @@ public:
static ExecutionEngine *createJIT(Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
- CodeGenOpt::Level OptLevel,
bool GVsWithCode,
TargetMachine *TM);
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp
new file mode 100644
index 0000000..457fe5e
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp
@@ -0,0 +1,14 @@
+//==-- MCJITMemoryManager.cpp - Definition for the Memory Manager -*-C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJITMemoryManager.h"
+
+using namespace llvm;
+
+void MCJITMemoryManager::anchor() { }
diff --git a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
index 40bc031..a68949a 100644
--- a/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
+++ b/contrib/llvm/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h
@@ -21,45 +21,30 @@ namespace llvm {
// and the RuntimeDyld interface that maps objects, by name, onto their
// matching LLVM IR counterparts in the module(s) being compiled.
class MCJITMemoryManager : public RTDyldMemoryManager {
+ virtual void anchor();
JITMemoryManager *JMM;
// FIXME: Multiple modules.
Module *M;
public:
- MCJITMemoryManager(JITMemoryManager *jmm, Module *m) : JMM(jmm), M(m) {}
+ MCJITMemoryManager(JITMemoryManager *jmm, Module *m) :
+ JMM(jmm?jmm:JITMemoryManager::CreateDefaultMemManager()), M(m) {}
+ // We own the JMM, so make sure to delete it.
+ ~MCJITMemoryManager() { delete JMM; }
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) {
+ return JMM->allocateSpace(Size, Alignment);
+ }
- // Allocate ActualSize bytes, or more, for the named function. Return
- // a pointer to the allocated memory and update Size to reflect how much
- // memory was acutally allocated.
- uint8_t *startFunctionBody(const char *Name, uintptr_t &Size) {
- // FIXME: This should really reference the MCAsmInfo to get the global
- // prefix.
- if (Name[0] == '_') ++Name;
- Function *F = M->getFunction(Name);
- // Some ObjC names have a prefixed \01 in the IR. If we failed to find
- // the symbol and it's of the ObjC conventions (starts with "-"), try
- // prepending a \01 and see if we can find it that way.
- if (!F && Name[0] == '-')
- F = M->getFunction((Twine("\1") + Name).str());
- assert(F && "No matching function in JIT IR Module!");
- return JMM->startFunctionBody(F, Size);
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID) {
+ return JMM->allocateSpace(Size, Alignment);
}
- // Mark the end of the function, including how much of the allocated
- // memory was actually used.
- void endFunctionBody(const char *Name, uint8_t *FunctionStart,
- uint8_t *FunctionEnd) {
- // FIXME: This should really reference the MCAsmInfo to get the global
- // prefix.
- if (Name[0] == '_') ++Name;
- Function *F = M->getFunction(Name);
- // Some ObjC names have a prefixed \01 in the IR. If we failed to find
- // the symbol and it's of the ObjC conventions (starts with "-"), try
- // prepending a \01 and see if we can find it that way.
- if (!F && Name[0] == '-')
- F = M->getFunction((Twine("\1") + Name).str());
- assert(F && "No matching function in JIT IR Module!");
- JMM->endFunctionBody(F, FunctionStart, FunctionEnd);
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) {
+ return JMM->getPointerToNamedFunction(Name, AbortOnFailure);
}
};
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
index 9a9ed6d..e6142e3 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp
@@ -7,51 +7,55 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a JITEventListener object that calls into OProfile to tell
-// it about JITted functions. For now, we only record function names and sizes,
-// but eventually we'll also record line number information.
-//
-// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
-// definition of the interface we're using.
+// This file defines a JITEventListener object that uses OProfileWrapper to tell
+// oprofile about JITted functions, including source line information.
//
//===----------------------------------------------------------------------===//
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+
#define DEBUG_TYPE "oprofile-jit-event-listener"
#include "llvm/Function.h"
-#include "llvm/Metadata.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Errno.h"
-#include "llvm/Config/config.h"
-#include <stddef.h>
-using namespace llvm;
+#include "EventListenerCommon.h"
-#if USE_OPROFILE
+#include <dirent.h>
+#include <fcntl.h>
-#include <opagent.h>
+using namespace llvm;
+using namespace llvm::jitprofiling;
namespace {
class OProfileJITEventListener : public JITEventListener {
- op_agent_t Agent;
+ OProfileWrapper& Wrapper;
+
+ void initialize();
+
public:
- OProfileJITEventListener();
+ OProfileJITEventListener(OProfileWrapper& LibraryWrapper)
+ : Wrapper(LibraryWrapper) {
+ initialize();
+ }
+
~OProfileJITEventListener();
virtual void NotifyFunctionEmitted(const Function &F,
- void *FnStart, size_t FnSize,
- const EmittedFunctionDetails &Details);
+ void *FnStart, size_t FnSize,
+ const JITEvent_EmittedFunctionDetails &Details);
+
virtual void NotifyFreeingMachineCode(void *OldPtr);
};
-OProfileJITEventListener::OProfileJITEventListener()
- : Agent(op_open_agent()) {
- if (Agent == NULL) {
+void OProfileJITEventListener::initialize() {
+ if (!Wrapper.op_open_agent()) {
const std::string err_str = sys::StrError();
DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str << "\n");
} else {
@@ -60,8 +64,8 @@ OProfileJITEventListener::OProfileJITEventListener()
}
OProfileJITEventListener::~OProfileJITEventListener() {
- if (Agent != NULL) {
- if (op_close_agent(Agent) == -1) {
+ if (Wrapper.isAgentAvailable()) {
+ if (Wrapper.op_close_agent() == -1) {
const std::string err_str = sys::StrError();
DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
<< err_str << "\n");
@@ -71,22 +75,6 @@ OProfileJITEventListener::~OProfileJITEventListener() {
}
}
-class FilenameCache {
- // Holds the filename of each Scope, so that we can pass a null-terminated
- // string into oprofile. Use an AssertingVH rather than a ValueMap because we
- // shouldn't be modifying any MDNodes while this map is alive.
- DenseMap<AssertingVH<MDNode>, std::string> Filenames;
-
- public:
- const char *getFilename(MDNode *Scope) {
- std::string &Filename = Filenames[Scope];
- if (Filename.empty()) {
- Filename = DIScope(Scope).getFilename();
- }
- return Filename.c_str();
- }
-};
-
static debug_line_info LineStartToOProfileFormat(
const MachineFunction &MF, FilenameCache &Filenames,
uintptr_t Address, DebugLoc Loc) {
@@ -103,9 +91,9 @@ static debug_line_info LineStartToOProfileFormat(
// Adds the just-emitted function to the symbol table.
void OProfileJITEventListener::NotifyFunctionEmitted(
const Function &F, void *FnStart, size_t FnSize,
- const EmittedFunctionDetails &Details) {
+ const JITEvent_EmittedFunctionDetails &Details) {
assert(F.hasName() && FnStart != 0 && "Bad symbol to add");
- if (op_write_native_code(Agent, F.getName().data(),
+ if (Wrapper.op_write_native_code(F.getName().data(),
reinterpret_cast<uint64_t>(FnStart),
FnStart, FnSize) == -1) {
DEBUG(dbgs() << "Failed to tell OProfile about native function "
@@ -151,8 +139,8 @@ void OProfileJITEventListener::NotifyFunctionEmitted(
// line info's address to include the start of the function.
LineInfo[0].vma = reinterpret_cast<uintptr_t>(FnStart);
- if (op_write_debug_line_info(Agent, FnStart,
- LineInfo.size(), &*LineInfo.begin()) == -1) {
+ if (Wrapper.op_write_debug_line_info(FnStart, LineInfo.size(),
+ &*LineInfo.begin()) == -1) {
DEBUG(dbgs()
<< "Failed to tell OProfile about line numbers for native function "
<< F.getName() << " at ["
@@ -164,7 +152,7 @@ void OProfileJITEventListener::NotifyFunctionEmitted(
// Removes the being-deleted function from the symbol table.
void OProfileJITEventListener::NotifyFreeingMachineCode(void *FnStart) {
assert(FnStart && "Invalid function pointer");
- if (op_unload_native_code(Agent, reinterpret_cast<uint64_t>(FnStart)) == -1) {
+ if (Wrapper.op_unload_native_code(reinterpret_cast<uint64_t>(FnStart)) == -1) {
DEBUG(dbgs()
<< "Failed to tell OProfile about unload of native function at "
<< FnStart << "\n");
@@ -174,19 +162,16 @@ void OProfileJITEventListener::NotifyFreeingMachineCode(void *FnStart) {
} // anonymous namespace.
namespace llvm {
-JITEventListener *createOProfileJITEventListener() {
- return new OProfileJITEventListener;
-}
+JITEventListener *JITEventListener::createOProfileJITEventListener() {
+ static OwningPtr<OProfileWrapper> JITProfilingWrapper(new OProfileWrapper);
+ return new OProfileJITEventListener(*JITProfilingWrapper);
}
-#else // USE_OPROFILE
-
-namespace llvm {
-// By defining this to return NULL, we can let clients call it unconditionally,
-// even if they haven't configured with the OProfile libraries.
-JITEventListener *createOProfileJITEventListener() {
- return NULL;
+// for testing
+JITEventListener *JITEventListener::createOProfileJITEventListener(
+ OProfileWrapper* TestImpl) {
+ return new OProfileJITEventListener(*TestImpl);
}
-} // namespace llvm
-#endif // USE_OPROFILE
+} // namespace llvm
+
diff --git a/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
new file mode 100644
index 0000000..d67f537
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp
@@ -0,0 +1,263 @@
+//===-- OProfileWrapper.cpp - OProfile JIT API Wrapper implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interface in OProfileWrapper.h. It is responsible
+// for loading the opagent dynamic library when the first call to an op_
+// function occurs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/OProfileWrapper.h"
+
+#define DEBUG_TYPE "oprofile-wrapper"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/ADT/SmallString.h"
+
+#include <sstream>
+#include <cstring>
+#include <stddef.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace {
+
+// Global mutex to ensure a single thread initializes oprofile agent.
+llvm::sys::Mutex OProfileInitializationMutex;
+
+} // anonymous namespace
+
+namespace llvm {
+
+OProfileWrapper::OProfileWrapper()
+: Agent(0),
+ OpenAgentFunc(0),
+ CloseAgentFunc(0),
+ WriteNativeCodeFunc(0),
+ WriteDebugLineInfoFunc(0),
+ UnloadNativeCodeFunc(0),
+ MajorVersionFunc(0),
+ MinorVersionFunc(0),
+ IsOProfileRunningFunc(0),
+ Initialized(false) {
+}
+
+bool OProfileWrapper::initialize() {
+ using namespace llvm;
+ using namespace llvm::sys;
+
+ MutexGuard Guard(OProfileInitializationMutex);
+
+ if (Initialized)
+ return OpenAgentFunc != 0;
+
+ Initialized = true;
+
+ // If the oprofile daemon is not running, don't load the opagent library
+ if (!isOProfileRunning()) {
+ DEBUG(dbgs() << "OProfile daemon is not detected.\n");
+ return false;
+ }
+
+ std::string error;
+ if(!DynamicLibrary::LoadLibraryPermanently("libopagent.so", &error)) {
+ DEBUG(dbgs()
+ << "OProfile connector library libopagent.so could not be loaded: "
+ << error << "\n");
+ }
+
+ // Get the addresses of the opagent functions
+ OpenAgentFunc = (op_open_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_open_agent");
+ CloseAgentFunc = (op_close_agent_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_close_agent");
+ WriteNativeCodeFunc = (op_write_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_native_code");
+ WriteDebugLineInfoFunc = (op_write_debug_line_info_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_write_debug_line_info");
+ UnloadNativeCodeFunc = (op_unload_native_code_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_unload_native_code");
+ MajorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_major_version");
+ MinorVersionFunc = (op_major_version_ptr_t)(intptr_t)
+ DynamicLibrary::SearchForAddressOfSymbol("op_minor_version");
+
+ // With missing functions, we can do nothing
+ if (!OpenAgentFunc
+ || !CloseAgentFunc
+ || !WriteNativeCodeFunc
+ || !WriteDebugLineInfoFunc
+ || !UnloadNativeCodeFunc) {
+ OpenAgentFunc = 0;
+ CloseAgentFunc = 0;
+ WriteNativeCodeFunc = 0;
+ WriteDebugLineInfoFunc = 0;
+ UnloadNativeCodeFunc = 0;
+ return false;
+ }
+
+ return true;
+}
+
+bool OProfileWrapper::isOProfileRunning() {
+ if (IsOProfileRunningFunc != 0)
+ return IsOProfileRunningFunc();
+ return checkForOProfileProcEntry();
+}
+
+bool OProfileWrapper::checkForOProfileProcEntry() {
+ DIR* ProcDir;
+
+ ProcDir = opendir("/proc");
+ if (!ProcDir)
+ return false;
+
+ // Walk the /proc tree looking for the oprofile daemon
+ struct dirent* Entry;
+ while (0 != (Entry = readdir(ProcDir))) {
+ if (Entry->d_type == DT_DIR) {
+ // Build a path from the current entry name
+ SmallString<256> CmdLineFName;
+ raw_svector_ostream(CmdLineFName) << "/proc/" << Entry->d_name
+ << "/cmdline";
+
+ // Open the cmdline file
+ int CmdLineFD = open(CmdLineFName.c_str(), S_IRUSR);
+ if (CmdLineFD != -1) {
+ char ExeName[PATH_MAX+1];
+ char* BaseName = 0;
+
+ // Read the cmdline file
+ ssize_t NumRead = read(CmdLineFD, ExeName, PATH_MAX+1);
+ close(CmdLineFD);
+ ssize_t Idx = 0;
+
+ // Find the terminator for the first string
+ while (Idx < NumRead-1 && ExeName[Idx] != 0) {
+ Idx++;
+ }
+
+ // Go back to the last non-null character
+ Idx--;
+
+ // Find the last path separator in the first string
+ while (Idx > 0) {
+ if (ExeName[Idx] == '/') {
+ BaseName = ExeName + Idx + 1;
+ break;
+ }
+ Idx--;
+ }
+
+ // Test this to see if it is the oprofile daemon
+ if (BaseName != 0 && !strcmp("oprofiled", BaseName)) {
+ // If it is, we're done
+ closedir(ProcDir);
+ return true;
+ }
+ }
+ }
+ }
+
+ // We've looked through all the files and didn't find the daemon
+ closedir(ProcDir);
+ return false;
+}
+
+bool OProfileWrapper::op_open_agent() {
+ if (!Initialized)
+ initialize();
+
+ if (OpenAgentFunc != 0) {
+ Agent = OpenAgentFunc();
+ return Agent != 0;
+ }
+
+ return false;
+}
+
+int OProfileWrapper::op_close_agent() {
+ if (!Initialized)
+ initialize();
+
+ int ret = -1;
+ if (Agent && CloseAgentFunc) {
+ ret = CloseAgentFunc(Agent);
+ if (ret == 0) {
+ Agent = 0;
+ }
+ }
+ return ret;
+}
+
+bool OProfileWrapper::isAgentAvailable() {
+ return Agent != 0;
+}
+
+int OProfileWrapper::op_write_native_code(const char* Name,
+ uint64_t Addr,
+ void const* Code,
+ const unsigned int Size) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteNativeCodeFunc)
+ return WriteNativeCodeFunc(Agent, Name, Addr, Code, Size);
+
+ return -1;
+}
+
+int OProfileWrapper::op_write_debug_line_info(
+ void const* Code,
+ size_t NumEntries,
+ struct debug_line_info const* Info) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && WriteDebugLineInfoFunc)
+ return WriteDebugLineInfoFunc(Agent, Code, NumEntries, Info);
+
+ return -1;
+}
+
+int OProfileWrapper::op_major_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MajorVersionFunc)
+ return MajorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_minor_version() {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && MinorVersionFunc)
+ return MinorVersionFunc();
+
+ return -1;
+}
+
+int OProfileWrapper::op_unload_native_code(uint64_t Addr) {
+ if (!Initialized)
+ initialize();
+
+ if (Agent && UnloadNativeCodeFunc)
+ return UnloadNativeCodeFunc(Agent, Addr);
+
+ return -1;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 33dd705..63cec1a 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -1,4 +1,4 @@
-//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ------*- C++ -*-===//
+//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,10 @@
#define DEBUG_TYPE "dyld"
#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldMachO.h"
+#include "llvm/Support/Path.h"
+
using namespace llvm;
using namespace llvm::object;
@@ -22,35 +26,383 @@ RuntimeDyldImpl::~RuntimeDyldImpl() {}
namespace llvm {
-void RuntimeDyldImpl::extractFunction(StringRef Name, uint8_t *StartAddress,
- uint8_t *EndAddress) {
- // Allocate memory for the function via the memory manager.
- uintptr_t Size = EndAddress - StartAddress + 1;
- uintptr_t AllocSize = Size;
- uint8_t *Mem = MemMgr->startFunctionBody(Name.data(), AllocSize);
- assert(Size >= (uint64_t)(EndAddress - StartAddress + 1) &&
- "Memory manager failed to allocate enough memory!");
- // Copy the function payload into the memory block.
- memcpy(Mem, StartAddress, Size);
- MemMgr->endFunctionBody(Name.data(), Mem, Mem + Size);
- // Remember where we put it.
- Functions[Name] = sys::MemoryBlock(Mem, Size);
- // Default the assigned address for this symbol to wherever this
- // allocated it.
- SymbolTable[Name] = Mem;
- DEBUG(dbgs() << " allocated to [" << Mem << ", " << Mem + Size << "]\n");
-}
+namespace {
+ // Helper for extensive error checking in debug builds.
+ error_code Check(error_code Err) {
+ if (Err) {
+ report_fatal_error(Err.message());
+ }
+ return Err;
+ }
+} // end anonymous namespace
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
- // Just iterate over the symbols in our symbol table and assign their
- // addresses.
- StringMap<uint8_t*>::iterator i = SymbolTable.begin();
- StringMap<uint8_t*>::iterator e = SymbolTable.end();
- for (;i != e; ++i)
- reassignSymbolAddress(i->getKey(), i->getValue());
+ // First, resolve relocations associated with external symbols.
+ resolveSymbols();
+
+ // Just iterate over the sections we have and resolve all the relocations
+ // in them. Gross overkill, but it gets the job done.
+ for (int i = 0, e = Sections.size(); i != e; ++i) {
+ reassignSectionAddress(i, Sections[i].LoadAddress);
+ }
}
+void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress,
+ uint64_t TargetAddress) {
+ for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
+ if (Sections[i].Address == LocalAddress) {
+ reassignSectionAddress(i, TargetAddress);
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to remap address of unknown section!");
+}
+
+bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
+ // FIXME: ObjectFile don't modify MemoryBuffer.
+ // It should use const MemoryBuffer as parameter.
+ OwningPtr<ObjectFile> obj(ObjectFile::createObjectFile(
+ const_cast<MemoryBuffer*>(InputBuffer)));
+ if (!obj)
+ report_fatal_error("Unable to create object image from memory buffer!");
+
+ Arch = (Triple::ArchType)obj->getArch();
+
+ LocalSymbolMap LocalSymbols; // Functions and data symbols from the
+ // object file.
+ ObjSectionToIDMap LocalSections; // Used sections from the object file
+ CommonSymbolMap CommonSymbols; // Common symbols requiring allocation
+ uint64_t CommonSize = 0;
+
+ error_code err;
+ // Parse symbols
+ DEBUG(dbgs() << "Parse symbols:\n");
+ for (symbol_iterator i = obj->begin_symbols(), e = obj->end_symbols();
+ i != e; i.increment(err)) {
+ Check(err);
+ object::SymbolRef::Type SymType;
+ StringRef Name;
+ Check(i->getType(SymType));
+ Check(i->getName(Name));
+
+ uint32_t flags;
+ Check(i->getFlags(flags));
+
+ bool isCommon = flags & SymbolRef::SF_Common;
+ if (isCommon) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = 0;
+ Check(i->getSize(Size));
+ CommonSize += Size;
+ CommonSymbols[*i] = Size;
+ } else {
+ if (SymType == object::SymbolRef::ST_Function ||
+ SymType == object::SymbolRef::ST_Data) {
+ uint64_t FileOffset;
+ StringRef sData;
+ section_iterator si = obj->end_sections();
+ Check(i->getFileOffset(FileOffset));
+ Check(i->getSection(si));
+ if (si == obj->end_sections()) continue;
+ Check(si->getContents(sData));
+ const uint8_t* SymPtr = (const uint8_t*)InputBuffer->getBufferStart() +
+ (uintptr_t)FileOffset;
+ uintptr_t SectOffset = (uintptr_t)(SymPtr - (const uint8_t*)sData.begin());
+ unsigned SectionID =
+ findOrEmitSection(*si,
+ SymType == object::SymbolRef::ST_Function,
+ LocalSections);
+ bool isGlobal = flags & SymbolRef::SF_Global;
+ LocalSymbols[Name.data()] = SymbolLoc(SectionID, SectOffset);
+ DEBUG(dbgs() << "\tFileOffset: " << format("%p", (uintptr_t)FileOffset)
+ << " flags: " << flags
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", SectOffset));
+ if (isGlobal)
+ SymbolTable[Name] = SymbolLoc(SectionID, SectOffset);
+ }
+ }
+ DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name << "\n");
+ }
+
+ // Allocate common symbols
+ if (CommonSize != 0)
+ emitCommonSymbols(CommonSymbols, CommonSize, LocalSymbols);
+
+ // Parse and proccess relocations
+ DEBUG(dbgs() << "Parse relocations:\n");
+ for (section_iterator si = obj->begin_sections(),
+ se = obj->end_sections(); si != se; si.increment(err)) {
+ Check(err);
+ bool isFirstRelocation = true;
+ unsigned SectionID = 0;
+ StubMap Stubs;
+
+ for (relocation_iterator i = si->begin_relocations(),
+ e = si->end_relocations(); i != e; i.increment(err)) {
+ Check(err);
+
+ // If it's first relocation in this section, find its SectionID
+ if (isFirstRelocation) {
+ SectionID = findOrEmitSection(*si, true, LocalSections);
+ DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+ isFirstRelocation = false;
+ }
+
+ ObjRelocationInfo RI;
+ RI.SectionID = SectionID;
+ Check(i->getAdditionalInfo(RI.AdditionalInfo));
+ Check(i->getOffset(RI.Offset));
+ Check(i->getSymbol(RI.Symbol));
+ Check(i->getType(RI.Type));
+
+ DEBUG(dbgs() << "\t\tAddend: " << RI.AdditionalInfo
+ << " Offset: " << format("%p", (uintptr_t)RI.Offset)
+ << " Type: " << (uint32_t)(RI.Type & 0xffffffffL)
+ << "\n");
+ processRelocationRef(RI, *obj, LocalSections, LocalSymbols, Stubs);
+ }
+ }
+ return false;
+}
+
+unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map,
+ uint64_t TotalSize,
+ LocalSymbolMap &LocalSymbols) {
+ // Allocate memory for the section
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr = MemMgr->allocateDataSection(TotalSize, sizeof(void*),
+ SectionID);
+ if (!Addr)
+ report_fatal_error("Unable to allocate memory for common symbols!");
+ uint64_t Offset = 0;
+ Sections.push_back(SectionEntry(Addr, TotalSize, TotalSize, 0));
+ memset(Addr, 0, TotalSize);
+
+ DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << TotalSize
+ << "\n");
+
+ // Assign the address of each symbol
+ for (CommonSymbolMap::const_iterator it = Map.begin(), itEnd = Map.end();
+ it != itEnd; it++) {
+ uint64_t Size = it->second;
+ StringRef Name;
+ it->first.getName(Name);
+ LocalSymbols[Name.data()] = SymbolLoc(SectionID, Offset);
+ Offset += Size;
+ Addr += Size;
+ }
+
+ return SectionID;
+}
+
+unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section,
+ bool IsCode) {
+
+ unsigned StubBufSize = 0,
+ StubSize = getMaxStubSize();
+ error_code err;
+ if (StubSize > 0) {
+ for (relocation_iterator i = Section.begin_relocations(),
+ e = Section.end_relocations(); i != e; i.increment(err), Check(err))
+ StubBufSize += StubSize;
+ }
+ StringRef data;
+ uint64_t Alignment64;
+ Check(Section.getContents(data));
+ Check(Section.getAlignment(Alignment64));
+
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ bool IsRequired;
+ bool IsVirtual;
+ bool IsZeroInit;
+ uint64_t DataSize;
+ Check(Section.isRequiredForExecution(IsRequired));
+ Check(Section.isVirtual(IsVirtual));
+ Check(Section.isZeroInit(IsZeroInit));
+ Check(Section.getSize(DataSize));
+
+ unsigned Allocate;
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr;
+ const char *pData = 0;
+
+ // Some sections, such as debug info, don't need to be loaded for execution.
+ // Leave those where they are.
+ if (IsRequired) {
+ Allocate = DataSize + StubBufSize;
+ Addr = IsCode
+ ? MemMgr->allocateCodeSection(Allocate, Alignment, SectionID)
+ : MemMgr->allocateDataSection(Allocate, Alignment, SectionID);
+ if (!Addr)
+ report_fatal_error("Unable to allocate section memory!");
+
+ // Virtual sections have no data in the object image, so leave pData = 0
+ if (!IsVirtual)
+ pData = data.data();
+
+ // Zero-initialize or copy the data from the image
+ if (IsZeroInit || IsVirtual)
+ memset(Addr, 0, DataSize);
+ else
+ memcpy(Addr, pData, DataSize);
+
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID
+ << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << DataSize
+ << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate
+ << "\n");
+ }
+ else {
+ // Even if we didn't load the section, we need to record an entry for it
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
+ Allocate = 0;
+ Addr = 0;
+ DEBUG(dbgs() << "emitSection SectionID: " << SectionID
+ << " obj addr: " << format("%p", data.data())
+ << " new addr: 0"
+ << " DataSize: " << DataSize
+ << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate
+ << "\n");
+ }
+
+ Sections.push_back(SectionEntry(Addr, Allocate, DataSize,(uintptr_t)pData));
+ return SectionID;
+}
+
+unsigned RuntimeDyldImpl::findOrEmitSection(const SectionRef &Section,
+ bool IsCode,
+ ObjSectionToIDMap &LocalSections) {
+
+ unsigned SectionID = 0;
+ ObjSectionToIDMap::iterator i = LocalSections.find(Section);
+ if (i != LocalSections.end())
+ SectionID = i->second;
+ else {
+ SectionID = emitSection(Section, IsCode);
+ LocalSections[Section] = SectionID;
+ }
+ return SectionID;
+}
+
+void RuntimeDyldImpl::AddRelocation(const RelocationValueRef &Value,
+ unsigned SectionID, uintptr_t Offset,
+ uint32_t RelType) {
+ DEBUG(dbgs() << "AddRelocation SymNamePtr: " << format("%p", Value.SymbolName)
+ << " SID: " << Value.SectionID
+ << " Addend: " << format("%p", Value.Addend)
+ << " Offset: " << format("%p", Offset)
+ << " RelType: " << format("%x", RelType)
+ << "\n");
+
+ if (Value.SymbolName == 0) {
+ Relocations[Value.SectionID].push_back(RelocationEntry(
+ SectionID,
+ Offset,
+ RelType,
+ Value.Addend));
+ } else
+ SymbolRelocations[Value.SymbolName].push_back(RelocationEntry(
+ SectionID,
+ Offset,
+ RelType,
+ Value.Addend));
+}
+
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
+ if (Arch == Triple::arm) {
+ uint32_t *StubAddr = (uint32_t*)Addr;
+ *StubAddr = 0xe51ff004; // ldr pc,<label>
+ return (uint8_t*)++StubAddr;
+ }
+ else
+ return Addr;
+}
+
+// Assign an address to a symbol name and resolve all the relocations
+// associated with it.
+void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ // The address to use for relocation resolution is not
+ // the address of the local section buffer. We must be doing
+ // a remote execution environment of some sort. Re-apply any
+ // relocations referencing this section with the given address.
+ //
+ // Addr is a uint64_t because we can't assume the pointer width
+ // of the target is the same as that of the host. Just use a generic
+ // "big enough" type.
+ Sections[SectionID].LoadAddress = Addr;
+ DEBUG(dbgs() << "Resolving relocations Section #" << SectionID
+ << "\t" << format("%p", (uint8_t *)Addr)
+ << "\n");
+ resolveRelocationList(Relocations[SectionID], Addr);
+}
+
+void RuntimeDyldImpl::resolveRelocationEntry(const RelocationEntry &RE,
+ uint64_t Value) {
+ // Ignore relocations for sections that were not loaded
+ if (Sections[RE.SectionID].Address != 0) {
+ uint8_t *Target = Sections[RE.SectionID].Address + RE.Offset;
+ DEBUG(dbgs() << "\tSectionID: " << RE.SectionID
+ << " + " << RE.Offset << " (" << format("%p", Target) << ")"
+ << " Data: " << RE.Data
+ << " Addend: " << RE.Addend
+ << "\n");
+
+ resolveRelocation(Target, Sections[RE.SectionID].LoadAddress + RE.Offset,
+ Value, RE.Data, RE.Addend);
+ }
+}
+
+void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
+ uint64_t Value) {
+ for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+ resolveRelocationEntry(Relocs[i], Value);
+ }
+}
+
+// resolveSymbols - Resolve any relocations to the specified symbols if
+// we know where it lives.
+void RuntimeDyldImpl::resolveSymbols() {
+ StringMap<RelocationList>::iterator i = SymbolRelocations.begin(),
+ e = SymbolRelocations.end();
+ for (; i != e; i++) {
+ StringRef Name = i->first();
+ RelocationList &Relocs = i->second;
+ StringMap<SymbolLoc>::const_iterator Loc = SymbolTable.find(Name);
+ if (Loc == SymbolTable.end()) {
+ // This is an external symbol, try to get it address from
+ // MemoryManager.
+ uint8_t *Addr = (uint8_t*) MemMgr->getPointerToNamedFunction(Name.data(),
+ true);
+ DEBUG(dbgs() << "Resolving relocations Name: " << Name
+ << "\t" << format("%p", Addr)
+ << "\n");
+ resolveRelocationList(Relocs, (uintptr_t)Addr);
+ } else {
+ // Change the relocation to be section relative rather than symbol
+ // relative and move it to the resolved relocation list.
+ DEBUG(dbgs() << "Resolving symbol '" << Name << "'\n");
+ for (int i = 0, e = Relocs.size(); i != e; ++i) {
+ RelocationEntry Entry = Relocs[i];
+ Entry.Addend += Loc->second.second;
+ Relocations[Loc->second.first].push_back(Entry);
+ }
+ Relocs.clear();
+ }
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// RuntimeDyld class implementation
RuntimeDyld::RuntimeDyld(RTDyldMemoryManager *mm) {
@@ -64,12 +416,36 @@ RuntimeDyld::~RuntimeDyld() {
bool RuntimeDyld::loadObject(MemoryBuffer *InputBuffer) {
if (!Dyld) {
- if (RuntimeDyldMachO::isKnownFormat(InputBuffer))
- Dyld = new RuntimeDyldMachO(MM);
- else
- report_fatal_error("Unknown object format!");
+ sys::LLVMFileType type = sys::IdentifyFileType(
+ InputBuffer->getBufferStart(),
+ static_cast<unsigned>(InputBuffer->getBufferSize()));
+ switch (type) {
+ case sys::ELF_Relocatable_FileType:
+ case sys::ELF_Executable_FileType:
+ case sys::ELF_SharedObject_FileType:
+ case sys::ELF_Core_FileType:
+ Dyld = new RuntimeDyldELF(MM);
+ break;
+ case sys::Mach_O_Object_FileType:
+ case sys::Mach_O_Executable_FileType:
+ case sys::Mach_O_FixedVirtualMemorySharedLib_FileType:
+ case sys::Mach_O_Core_FileType:
+ case sys::Mach_O_PreloadExecutable_FileType:
+ case sys::Mach_O_DynamicallyLinkedSharedLib_FileType:
+ case sys::Mach_O_DynamicLinker_FileType:
+ case sys::Mach_O_Bundle_FileType:
+ case sys::Mach_O_DynamicallyLinkedSharedLibStub_FileType:
+ case sys::Mach_O_DSYMCompanion_FileType:
+ Dyld = new RuntimeDyldMachO(MM);
+ break;
+ case sys::Unknown_FileType:
+ case sys::Bitcode_FileType:
+ case sys::Archive_FileType:
+ case sys::COFF_FileType:
+ report_fatal_error("Incompatible object format!");
+ }
} else {
- if(!Dyld->isCompatibleFormat(InputBuffer))
+ if (!Dyld->isCompatibleFormat(InputBuffer))
report_fatal_error("Incompatible object format!");
}
@@ -84,8 +460,14 @@ void RuntimeDyld::resolveRelocations() {
Dyld->resolveRelocations();
}
-void RuntimeDyld::reassignSymbolAddress(StringRef Name, uint8_t *Addr) {
- Dyld->reassignSymbolAddress(Name, Addr);
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ Dyld->reassignSectionAddress(SectionID, Addr);
+}
+
+void RuntimeDyld::mapSectionAddress(void *LocalAddress,
+ uint64_t TargetAddress) {
+ Dyld->mapSectionAddress(LocalAddress, TargetAddress);
}
StringRef RuntimeDyld::getErrorString() {
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
new file mode 100644
index 0000000..57fefee
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -0,0 +1,262 @@
+//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of ELF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "dyld"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "RuntimeDyldELF.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/ADT/Triple.h"
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+
+
+void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend) {
+ switch (Type) {
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ case ELF::R_X86_64_64: {
+ uint64_t *Target = (uint64_t*)(LocalAddress);
+ *Target = Value + Addend;
+ break;
+ }
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S: {
+ Value += Addend;
+ // FIXME: Handle the possibility of this assertion failing
+ assert((Type == ELF::R_X86_64_32 && !(Value & 0xFFFFFFFF00000000ULL)) ||
+ (Type == ELF::R_X86_64_32S &&
+ (Value & 0xFFFFFFFF00000000ULL) == 0xFFFFFFFF00000000ULL));
+ uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
+ uint32_t *Target = reinterpret_cast<uint32_t*>(LocalAddress);
+ *Target = TruncatedAddr;
+ break;
+ }
+ case ELF::R_X86_64_PC32: {
+ uint32_t *Placeholder = reinterpret_cast<uint32_t*>(LocalAddress);
+ int64_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
+ assert(RealOffset <= 214783647 && RealOffset >= -214783648);
+ int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
+ *Placeholder = TruncOffset;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveX86Relocation(uint8_t *LocalAddress,
+ uint32_t FinalAddress,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend) {
+ switch (Type) {
+ case ELF::R_386_32: {
+ uint32_t *Target = (uint32_t*)(LocalAddress);
+ uint32_t Placeholder = *Target;
+ *Target = Placeholder + Value + Addend;
+ break;
+ }
+ case ELF::R_386_PC32: {
+ uint32_t *Placeholder = reinterpret_cast<uint32_t*>(LocalAddress);
+ uint32_t RealOffset = *Placeholder + Value + Addend - FinalAddress;
+ *Placeholder = RealOffset;
+ break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveARMRelocation(uint8_t *LocalAddress,
+ uint32_t FinalAddress,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend) {
+ // TODO: Add Thumb relocations.
+ uint32_t* TargetPtr = (uint32_t*)LocalAddress;
+ Value += Addend;
+
+ DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: " << LocalAddress
+ << " FinalAddress: " << format("%p",FinalAddress)
+ << " Value: " << format("%x",Value)
+ << " Type: " << format("%x",Type)
+ << " Addend: " << format("%x",Addend)
+ << "\n");
+
+ switch(Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+
+ // Just write 32bit value to relocation address
+ case ELF::R_ARM_ABS32 :
+ *TargetPtr = Value;
+ break;
+
+ // Write first 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVW_ABS_NC :
+ Value = Value & 0xFFFF;
+ *TargetPtr |= Value & 0xFFF;
+ *TargetPtr |= ((Value >> 12) & 0xF) << 16;
+ break;
+
+ // Write last 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVT_ABS :
+ Value = (Value >> 16) & 0xFFFF;
+ *TargetPtr |= Value & 0xFFF;
+ *TargetPtr |= ((Value >> 12) & 0xF) << 16;
+ break;
+
+ // Write 24 bit relative value to the branch instruction.
+ case ELF::R_ARM_PC24 : // Fall through.
+ case ELF::R_ARM_CALL : // Fall through.
+ case ELF::R_ARM_JUMP24 :
+ int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
+ RelValue = (RelValue & 0x03FFFFFC) >> 2;
+ *TargetPtr &= 0xFF000000;
+ *TargetPtr |= RelValue;
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend) {
+ switch (Arch) {
+ case Triple::x86_64:
+ resolveX86_64Relocation(LocalAddress, FinalAddress, Value, Type, Addend);
+ break;
+ case Triple::x86:
+ resolveX86Relocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL),
+ (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::thumb:
+ resolveARMRelocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL),
+ (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ default: llvm_unreachable("Unsupported CPU type!");
+ }
+}
+
+void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ LocalSymbolMap &Symbols,
+ StubMap &Stubs) {
+
+ uint32_t RelType = (uint32_t)(Rel.Type & 0xffffffffL);
+ intptr_t Addend = (intptr_t)Rel.AdditionalInfo;
+ RelocationValueRef Value;
+ StringRef TargetName;
+ const SymbolRef &Symbol = Rel.Symbol;
+ Symbol.getName(TargetName);
+ DEBUG(dbgs() << "\t\tRelType: " << RelType
+ << " Addend: " << Addend
+ << " TargetName: " << TargetName
+ << "\n");
+ // First look the symbol in object file symbols.
+ LocalSymbolMap::iterator lsi = Symbols.find(TargetName.data());
+ if (lsi != Symbols.end()) {
+ Value.SectionID = lsi->second.first;
+ Value.Addend = lsi->second.second;
+ } else {
+ // Second look the symbol in global symbol table.
+ StringMap<SymbolLoc>::iterator gsi = SymbolTable.find(TargetName.data());
+ if (gsi != SymbolTable.end()) {
+ Value.SectionID = gsi->second.first;
+ Value.Addend = gsi->second.second;
+ } else {
+ SymbolRef::Type SymType;
+ Symbol.getType(SymType);
+ switch (SymType) {
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ section_iterator si = Obj.end_sections();
+ Symbol.getSection(si);
+ if (si == Obj.end_sections())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ Value.SectionID = findOrEmitSection((*si), true, ObjSectionToID);
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
+ }
+ }
+ }
+ DEBUG(dbgs() << "\t\tRel.SectionID: " << Rel.SectionID
+ << " Rel.Offset: " << Rel.Offset
+ << "\n");
+ if (Arch == Triple::arm &&
+ (RelType == ELF::R_ARM_PC24 ||
+ RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24)) {
+ // This is an ARM branch relocation, need to use a stub function.
+ DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
+ SectionEntry &Section = Sections[Rel.SectionID];
+ uint8_t *Target = Section.Address + Rel.Offset;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address +
+ i->second, RelType, 0);
+ DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr = createStubFunction(Section.Address +
+ Section.StubOffset);
+ AddRelocation(Value, Rel.SectionID,
+ StubTargetAddr - Section.Address, ELF::R_ARM_ABS32);
+ resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address +
+ Section.StubOffset, RelType, 0);
+ Section.StubOffset += getMaxStubSize();
+ }
+ } else
+ AddRelocation(Value, Rel.SectionID, Rel.Offset, RelType);
+}
+
+bool RuntimeDyldELF::isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
+ StringRef Magic = InputBuffer->getBuffer().slice(0, ELF::EI_NIDENT);
+ return (memcmp(Magic.data(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+}
+} // namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
new file mode 100644
index 0000000..36566da
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -0,0 +1,62 @@
+//===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_ELF_H
+#define LLVM_RUNTIME_DYLD_ELF_H
+
+#include "RuntimeDyldImpl.h"
+
+using namespace llvm;
+
+
+namespace llvm {
+class RuntimeDyldELF : public RuntimeDyldImpl {
+protected:
+ void resolveX86_64Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend);
+
+ void resolveX86Relocation(uint8_t *LocalAddress,
+ uint32_t FinalAddress,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend);
+
+ void resolveARMRelocation(uint8_t *LocalAddress,
+ uint32_t FinalAddress,
+ uint32_t Value,
+ uint32_t Type,
+ int32_t Addend);
+
+ virtual void resolveRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend);
+
+ virtual void processRelocationRef(const ObjRelocationInfo &Rel,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ LocalSymbolMap &Symbols, StubMap &Stubs);
+
+public:
+ RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
+
+ bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
index 7190a3c..bf678af 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -1,4 +1,4 @@
-//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT ------*- C++ -*-===//
+//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,42 +15,128 @@
#define LLVM_RUNTIME_DYLD_IMPL_H
#include "llvm/ExecutionEngine/RuntimeDyld.h"
-#include "llvm/Object/MachOObject.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ExecutionEngine/ExecutionEngine.h"
-#include "llvm/Support/Format.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/system_error.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/Triple.h"
+#include <map>
+#include "llvm/Support/Format.h"
using namespace llvm;
using namespace llvm::object;
namespace llvm {
+
+class SectionEntry {
+public:
+ uint8_t* Address;
+ size_t Size;
+ uint64_t LoadAddress; // For each section, the address it will be
+ // considered to live at for relocations. The same
+ // as the pointer to the above memory block for
+ // hosted JITs.
+ uintptr_t StubOffset; // It's used for architecturies with stub
+ // functions for far relocations like ARM.
+ uintptr_t ObjAddress; // Section address in object file. It's use for
+ // calculate MachO relocation addend
+ SectionEntry(uint8_t* address, size_t size, uintptr_t stubOffset,
+ uintptr_t objAddress)
+ : Address(address), Size(size), LoadAddress((uintptr_t)address),
+ StubOffset(stubOffset), ObjAddress(objAddress) {}
+};
+
+class RelocationEntry {
+public:
+ unsigned SectionID; // Section the relocation is contained in.
+ uintptr_t Offset; // Offset into the section for the relocation.
+ uint32_t Data; // Relocatino data. Including type of relocation
+ // and another flags and parameners from
+ intptr_t Addend; // Addend encoded in the instruction itself, if any,
+ // plus the offset into the source section for
+ // the symbol once the relocation is resolvable.
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t data, int64_t addend)
+ : SectionID(id), Offset(offset), Data(data), Addend(addend) {}
+};
+
+// Raw relocation data from object file
+class ObjRelocationInfo {
+public:
+ unsigned SectionID;
+ uint64_t Offset;
+ SymbolRef Symbol;
+ uint64_t Type;
+ int64_t AdditionalInfo;
+};
+
+class RelocationValueRef {
+public:
+ unsigned SectionID;
+ intptr_t Addend;
+ const char *SymbolName;
+ RelocationValueRef(): SectionID(0), Addend(0), SymbolName(0) {}
+
+ inline bool operator==(const RelocationValueRef &Other) const {
+ return std::memcmp(this, &Other, sizeof(RelocationValueRef)) == 0;
+ }
+ inline bool operator <(const RelocationValueRef &Other) const {
+ return std::memcmp(this, &Other, sizeof(RelocationValueRef)) < 0;
+ }
+};
+
class RuntimeDyldImpl {
protected:
- unsigned CPUType;
- unsigned CPUSubtype;
-
// The MemoryManager to load objects into.
RTDyldMemoryManager *MemMgr;
- // FIXME: This all assumes we're dealing with external symbols for anything
- // explicitly referenced. I.e., we can index by name and things
- // will work out. In practice, this may not be the case, so we
- // should find a way to effectively generalize.
+ // A list of emmitted sections.
+ typedef SmallVector<SectionEntry, 64> SectionList;
+ SectionList Sections;
- // For each function, we have a MemoryBlock of it's instruction data.
- StringMap<sys::MemoryBlock> Functions;
+ // Keep a map of sections from object file to the SectionID which
+ // references it.
+ typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
// Master symbol table. As modules are loaded and external symbols are
- // resolved, their addresses are stored here.
- StringMap<uint8_t*> SymbolTable;
+ // resolved, their addresses are stored here as a SectionID/Offset pair.
+ typedef std::pair<unsigned, uintptr_t> SymbolLoc;
+ StringMap<SymbolLoc> SymbolTable;
+ typedef DenseMap<const char*, SymbolLoc> LocalSymbolMap;
+
+ // Keep a map of common symbols to their sizes
+ typedef std::map<SymbolRef, unsigned> CommonSymbolMap;
+
+ // For each symbol, keep a list of relocations based on it. Anytime
+ // its address is reassigned (the JIT re-compiled the function, e.g.),
+ // the relocations get re-resolved.
+ // The symbol (or section) the relocation is sourced from is the Key
+ // in the relocation list where it's stored.
+ typedef SmallVector<RelocationEntry, 64> RelocationList;
+ // Relocations to sections already loaded. Indexed by SectionID which is the
+ // source of the address. The target where the address will be writen is
+ // SectionID/Offset in the relocation itself.
+ DenseMap<unsigned, RelocationList> Relocations;
+ // Relocations to external symbols that are not yet resolved.
+ // Indexed by symbol name.
+ StringMap<RelocationList> SymbolRelocations;
+
+ typedef std::map<RelocationValueRef, uintptr_t> StubMap;
+
+ Triple::ArchType Arch;
+
+ inline unsigned getMaxStubSize() {
+ if (Arch == Triple::arm || Arch == Triple::thumb)
+ return 8; // 32-bit instruction and 32-bit address
+ else
+ return 0;
+ }
bool HasError;
std::string ErrorStr;
@@ -62,25 +148,84 @@ protected:
return true;
}
- void extractFunction(StringRef Name, uint8_t *StartAddress,
- uint8_t *EndAddress);
+ uint8_t *getSectionAddress(unsigned SectionID) {
+ return (uint8_t*)Sections[SectionID].Address;
+ }
+ /// \brief Emits a section containing common symbols.
+ /// \return SectionID.
+ unsigned emitCommonSymbols(const CommonSymbolMap &Map,
+ uint64_t TotalSize,
+ LocalSymbolMap &Symbols);
+
+ /// \brief Emits section data from the object file to the MemoryManager.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ unsigned emitSection(const SectionRef &Section, bool IsCode);
+
+ /// \brief Find Section in LocalSections. If the secton is not found - emit
+ /// it and store in LocalSections.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ unsigned findOrEmitSection(const SectionRef &Section, bool IsCode,
+ ObjSectionToIDMap &LocalSections);
+
+ /// \brief If Value.SymbolName is NULL then store relocation to the
+ /// Relocations, else store it in the SymbolRelocations.
+ void AddRelocation(const RelocationValueRef &Value, unsigned SectionID,
+ uintptr_t Offset, uint32_t RelType);
+
+ /// \brief Emits long jump instruction to Addr.
+ /// \return Pointer to the memory area for emitting target address.
+ uint8_t* createStubFunction(uint8_t *Addr);
+
+ /// \brief Resolves relocations from Relocs list with address from Value.
+ void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
+ void resolveRelocationEntry(const RelocationEntry &RE, uint64_t Value);
+
+ /// \brief A object file specific relocation resolver
+ /// \param Address Address to apply the relocation action
+ /// \param Value Target symbol address to apply the relocation action
+ /// \param Type object file specific relocation type
+ /// \param Addend A constant addend used to compute the value to be stored
+ /// into the relocatable field
+ virtual void resolveRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend) = 0;
+
+ /// \brief Parses the object file relocation and store it to Relocations
+ /// or SymbolRelocations. Its depend from object file type.
+ virtual void processRelocationRef(const ObjRelocationInfo &Rel,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ LocalSymbolMap &Symbols, StubMap &Stubs) = 0;
+
+ void resolveSymbols();
public:
RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {}
virtual ~RuntimeDyldImpl();
- virtual bool loadObject(MemoryBuffer *InputBuffer) = 0;
+ bool loadObject(const MemoryBuffer *InputBuffer);
void *getSymbolAddress(StringRef Name) {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
- return SymbolTable.lookup(Name);
+ if (SymbolTable.find(Name) == SymbolTable.end())
+ return 0;
+ SymbolLoc Loc = SymbolTable.lookup(Name);
+ return getSectionAddress(Loc.first) + Loc.second;
}
void resolveRelocations();
- virtual void reassignSymbolAddress(StringRef Name, uint8_t *Addr) = 0;
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+ void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress);
// Is the linker in an error state?
bool hasError() { return HasError; }
@@ -92,58 +237,7 @@ public:
StringRef getErrorString() { return ErrorStr; }
virtual bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const = 0;
-};
-
-
-class RuntimeDyldMachO : public RuntimeDyldImpl {
- // For each symbol, keep a list of relocations based on it. Anytime
- // its address is reassigned (the JIT re-compiled the function, e.g.),
- // the relocations get re-resolved.
- struct RelocationEntry {
- std::string Target; // Object this relocation is contained in.
- uint64_t Offset; // Offset into the object for the relocation.
- uint32_t Data; // Second word of the raw macho relocation entry.
- int64_t Addend; // Addend encoded in the instruction itself, if any.
- bool isResolved; // Has this relocation been resolved previously?
-
- RelocationEntry(StringRef t, uint64_t offset, uint32_t data, int64_t addend)
- : Target(t), Offset(offset), Data(data), Addend(addend),
- isResolved(false) {}
- };
- typedef SmallVector<RelocationEntry, 4> RelocationList;
- StringMap<RelocationList> Relocations;
-
- // FIXME: Also keep a map of all the relocations contained in an object. Use
- // this to dynamically answer whether all of the relocations in it have
- // been resolved or not.
-
- bool resolveRelocation(uint8_t *Address, uint8_t *Value, bool isPCRel,
- unsigned Type, unsigned Size);
- bool resolveX86_64Relocation(uintptr_t Address, uintptr_t Value, bool isPCRel,
- unsigned Type, unsigned Size);
- bool resolveARMRelocation(uintptr_t Address, uintptr_t Value, bool isPCRel,
- unsigned Type, unsigned Size);
-
- bool loadSegment32(const MachOObject *Obj,
- const MachOObject::LoadCommandInfo *SegmentLCI,
- const InMemoryStruct<macho::SymtabLoadCommand> &SymtabLC);
- bool loadSegment64(const MachOObject *Obj,
- const MachOObject::LoadCommandInfo *SegmentLCI,
- const InMemoryStruct<macho::SymtabLoadCommand> &SymtabLC);
-
-public:
- RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
-
- bool loadObject(MemoryBuffer *InputBuffer);
-
- void reassignSymbolAddress(StringRef Name, uint8_t *Addr);
-
- static bool isKnownFormat(const MemoryBuffer *InputBuffer);
-
- bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
- return isKnownFormat(InputBuffer);
- }
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index 623e9b2..1318b44 100644
--- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -1,4 +1,4 @@
-//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT ------*- C++ -*-===//
+//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -15,73 +15,147 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLExtras.h"
-#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldMachO.h"
using namespace llvm;
using namespace llvm::object;
namespace llvm {
-bool RuntimeDyldMachO::
-resolveRelocation(uint8_t *Address, uint8_t *Value, bool isPCRel,
- unsigned Type, unsigned Size) {
+void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend) {
+ bool isPCRel = (Type >> 24) & 1;
+ unsigned MachoType = (Type >> 28) & 0xf;
+ unsigned Size = 1 << ((Type >> 25) & 3);
+
+ DEBUG(dbgs() << "resolveRelocation LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%p", Value)
+ << " Addend: " << Addend
+ << " isPCRel: " << isPCRel
+ << " MachoType: " << MachoType
+ << " Size: " << Size
+ << "\n");
+
// This just dispatches to the proper target specific routine.
- switch (CPUType) {
- default: assert(0 && "Unsupported CPU type!");
- case mach::CTM_x86_64:
- return resolveX86_64Relocation((uintptr_t)Address, (uintptr_t)Value,
- isPCRel, Type, Size);
- case mach::CTM_ARM:
- return resolveARMRelocation((uintptr_t)Address, (uintptr_t)Value,
- isPCRel, Type, Size);
+ switch (Arch) {
+ default: llvm_unreachable("Unsupported CPU type!");
+ case Triple::x86_64:
+ resolveX86_64Relocation(LocalAddress,
+ FinalAddress,
+ (uintptr_t)Value,
+ isPCRel,
+ MachoType,
+ Size,
+ Addend);
+ break;
+ case Triple::x86:
+ resolveI386Relocation(LocalAddress,
+ FinalAddress,
+ (uintptr_t)Value,
+ isPCRel,
+ Type,
+ Size,
+ Addend);
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::thumb:
+ resolveARMRelocation(LocalAddress,
+ FinalAddress,
+ (uintptr_t)Value,
+ isPCRel,
+ MachoType,
+ Size,
+ Addend);
+ break;
+ }
+}
+
+bool RuntimeDyldMachO::
+resolveI386Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
+ if (isPCRel)
+ Value -= FinalAddress + 4; // see resolveX86_64Relocation
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case macho::RIT_Vanilla: {
+ uint8_t *p = LocalAddress;
+ uint64_t ValueToWrite = Value + Addend;
+ for (unsigned i = 0; i < Size; ++i) {
+ *p++ = (uint8_t)(ValueToWrite & 0xff);
+ ValueToWrite >>= 8;
+ }
+ }
+ case macho::RIT_Difference:
+ case macho::RIT_Generic_LocalDifference:
+ case macho::RIT_Generic_PreboundLazyPointer:
+ return Error("Relocation type not implemented yet!");
}
- llvm_unreachable("");
}
bool RuntimeDyldMachO::
-resolveX86_64Relocation(uintptr_t Address, uintptr_t Value,
- bool isPCRel, unsigned Type,
- unsigned Size) {
+resolveX86_64Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (isPCRel)
// FIXME: It seems this value needs to be adjusted by 4 for an effective PC
// address. Is that expected? Only for branches, perhaps?
- Value -= Address + 4;
+ Value -= FinalAddress + 4;
switch(Type) {
default:
llvm_unreachable("Invalid relocation type!");
+ case macho::RIT_X86_64_Signed1:
+ case macho::RIT_X86_64_Signed2:
+ case macho::RIT_X86_64_Signed4:
+ case macho::RIT_X86_64_Signed:
case macho::RIT_X86_64_Unsigned:
case macho::RIT_X86_64_Branch: {
+ Value += Addend;
// Mask in the target value a byte at a time (we don't have an alignment
// guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)Address;
+ uint8_t *p = (uint8_t*)LocalAddress;
for (unsigned i = 0; i < Size; ++i) {
*p++ = (uint8_t)Value;
Value >>= 8;
}
return false;
}
- case macho::RIT_X86_64_Signed:
case macho::RIT_X86_64_GOTLoad:
case macho::RIT_X86_64_GOT:
case macho::RIT_X86_64_Subtractor:
- case macho::RIT_X86_64_Signed1:
- case macho::RIT_X86_64_Signed2:
- case macho::RIT_X86_64_Signed4:
case macho::RIT_X86_64_TLV:
return Error("Relocation type not implemented yet!");
}
- return false;
}
-bool RuntimeDyldMachO::resolveARMRelocation(uintptr_t Address, uintptr_t Value,
- bool isPCRel, unsigned Type,
- unsigned Size) {
+bool RuntimeDyldMachO::
+resolveARMRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend) {
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (isPCRel) {
- Value -= Address;
+ Value -= FinalAddress;
// ARM PCRel relocations have an effective-PC offset of two instructions
// (four bytes in Thumb mode, 8 bytes in ARM mode).
// FIXME: For now, assume ARM mode.
@@ -92,10 +166,9 @@ bool RuntimeDyldMachO::resolveARMRelocation(uintptr_t Address, uintptr_t Value,
default:
llvm_unreachable("Invalid relocation type!");
case macho::RIT_Vanilla: {
- llvm_unreachable("Invalid relocation type!");
// Mask in the target value a byte at a time (we don't have an alignment
// guarantee for the target address, so this is safest).
- uint8_t *p = (uint8_t*)Address;
+ uint8_t *p = (uint8_t*)LocalAddress;
for (unsigned i = 0; i < Size; ++i) {
*p++ = (uint8_t)Value;
Value >>= 8;
@@ -105,7 +178,7 @@ bool RuntimeDyldMachO::resolveARMRelocation(uintptr_t Address, uintptr_t Value,
case macho::RIT_ARM_Branch24Bit: {
// Mask the value into the target address. We know instructions are
// 32-bit aligned, so we can do it all at once.
- uint32_t *p = (uint32_t*)Address;
+ uint32_t *p = (uint32_t*)LocalAddress;
// The low two bits of the value are not encoded.
Value >>= 2;
// Mask the value to 24 bits.
@@ -131,388 +204,84 @@ bool RuntimeDyldMachO::resolveARMRelocation(uintptr_t Address, uintptr_t Value,
return false;
}
-bool RuntimeDyldMachO::
-loadSegment32(const MachOObject *Obj,
- const MachOObject::LoadCommandInfo *SegmentLCI,
- const InMemoryStruct<macho::SymtabLoadCommand> &SymtabLC) {
- InMemoryStruct<macho::SegmentLoadCommand> SegmentLC;
- Obj->ReadSegmentLoadCommand(*SegmentLCI, SegmentLC);
- if (!SegmentLC)
- return Error("unable to load segment load command");
-
- for (unsigned SectNum = 0; SectNum != SegmentLC->NumSections; ++SectNum) {
- InMemoryStruct<macho::Section> Sect;
- Obj->ReadSection(*SegmentLCI, SectNum, Sect);
- if (!Sect)
- return Error("unable to load section: '" + Twine(SectNum) + "'");
-
- // FIXME: For the time being, we're only loading text segments.
- if (Sect->Flags != 0x80000400)
- continue;
-
- // Address and names of symbols in the section.
- typedef std::pair<uint64_t, StringRef> SymbolEntry;
- SmallVector<SymbolEntry, 64> Symbols;
- // Index of all the names, in this section or not. Used when we're
- // dealing with relocation entries.
- SmallVector<StringRef, 64> SymbolNames;
- for (unsigned i = 0; i != SymtabLC->NumSymbolTableEntries; ++i) {
- InMemoryStruct<macho::SymbolTableEntry> STE;
- Obj->ReadSymbolTableEntry(SymtabLC->SymbolTableOffset, i, STE);
- if (!STE)
- return Error("unable to read symbol: '" + Twine(i) + "'");
- if (STE->SectionIndex > SegmentLC->NumSections)
- return Error("invalid section index for symbol: '" + Twine(i) + "'");
- // Get the symbol name.
- StringRef Name = Obj->getStringAtIndex(STE->StringIndex);
- SymbolNames.push_back(Name);
-
- // Just skip symbols not defined in this section.
- if ((unsigned)STE->SectionIndex - 1 != SectNum)
- continue;
-
- // FIXME: Check the symbol type and flags.
- if (STE->Type != 0xF) // external, defined in this section.
- continue;
- // Flags == 0x8 marks a thumb function for ARM, which is fine as it
- // doesn't require any special handling here.
- if (STE->Flags != 0x0 && STE->Flags != 0x8)
- continue;
-
- // Remember the symbol.
- Symbols.push_back(SymbolEntry(STE->Value, Name));
-
- DEBUG(dbgs() << "Function sym: '" << Name << "' @ " <<
- (Sect->Address + STE->Value) << "\n");
- }
- // Sort the symbols by address, just in case they didn't come in that way.
- array_pod_sort(Symbols.begin(), Symbols.end());
-
- // If there weren't any functions (odd, but just in case...)
- if (!Symbols.size())
- continue;
-
- // Extract the function data.
- uint8_t *Base = (uint8_t*)Obj->getData(SegmentLC->FileOffset,
- SegmentLC->FileSize).data();
- for (unsigned i = 0, e = Symbols.size() - 1; i != e; ++i) {
- uint64_t StartOffset = Sect->Address + Symbols[i].first;
- uint64_t EndOffset = Symbols[i + 1].first - 1;
- DEBUG(dbgs() << "Extracting function: " << Symbols[i].second
- << " from [" << StartOffset << ", " << EndOffset << "]\n");
- extractFunction(Symbols[i].second, Base + StartOffset, Base + EndOffset);
- }
- // The last symbol we do after since the end address is calculated
- // differently because there is no next symbol to reference.
- uint64_t StartOffset = Symbols[Symbols.size() - 1].first;
- uint64_t EndOffset = Sect->Size - 1;
- DEBUG(dbgs() << "Extracting function: " << Symbols[Symbols.size()-1].second
- << " from [" << StartOffset << ", " << EndOffset << "]\n");
- extractFunction(Symbols[Symbols.size()-1].second,
- Base + StartOffset, Base + EndOffset);
-
- // Now extract the relocation information for each function and process it.
- for (unsigned j = 0; j != Sect->NumRelocationTableEntries; ++j) {
- InMemoryStruct<macho::RelocationEntry> RE;
- Obj->ReadRelocationEntry(Sect->RelocationTableOffset, j, RE);
- if (RE->Word0 & macho::RF_Scattered)
- return Error("NOT YET IMPLEMENTED: scattered relocations.");
- // Word0 of the relocation is the offset into the section where the
- // relocation should be applied. We need to translate that into an
- // offset into a function since that's our atom.
- uint32_t Offset = RE->Word0;
- // Look for the function containing the address. This is used for JIT
- // code, so the number of functions in section is almost always going
- // to be very small (usually just one), so until we have use cases
- // where that's not true, just use a trivial linear search.
- unsigned SymbolNum;
- unsigned NumSymbols = Symbols.size();
- assert(NumSymbols > 0 && Symbols[0].first <= Offset &&
- "No symbol containing relocation!");
- for (SymbolNum = 0; SymbolNum < NumSymbols - 1; ++SymbolNum)
- if (Symbols[SymbolNum + 1].first > Offset)
- break;
- // Adjust the offset to be relative to the symbol.
- Offset -= Symbols[SymbolNum].first;
- // Get the name of the symbol containing the relocation.
- StringRef TargetName = SymbolNames[SymbolNum];
-
- bool isExtern = (RE->Word1 >> 27) & 1;
- // Figure out the source symbol of the relocation. If isExtern is true,
- // this relocation references the symbol table, otherwise it references
- // a section in the same object, numbered from 1 through NumSections
- // (SectionBases is [0, NumSections-1]).
- // FIXME: Some targets (ARM) use internal relocations even for
- // externally visible symbols, if the definition is in the same
- // file as the reference. We need to convert those back to by-name
- // references. We can resolve the address based on the section
- // offset and see if we have a symbol at that address. If we do,
- // use that; otherwise, puke.
- if (!isExtern)
- return Error("Internal relocations not supported.");
- uint32_t SourceNum = RE->Word1 & 0xffffff; // 24-bit value
- StringRef SourceName = SymbolNames[SourceNum];
-
- // FIXME: Get the relocation addend from the target address.
-
- // Now store the relocation information. Associate it with the source
- // symbol.
- Relocations[SourceName].push_back(RelocationEntry(TargetName,
- Offset,
- RE->Word1,
- 0 /*Addend*/));
- DEBUG(dbgs() << "Relocation at '" << TargetName << "' + " << Offset
- << " from '" << SourceName << "(Word1: "
- << format("0x%x", RE->Word1) << ")\n");
- }
- }
- return false;
-}
-
-
-bool RuntimeDyldMachO::
-loadSegment64(const MachOObject *Obj,
- const MachOObject::LoadCommandInfo *SegmentLCI,
- const InMemoryStruct<macho::SymtabLoadCommand> &SymtabLC) {
- InMemoryStruct<macho::Segment64LoadCommand> Segment64LC;
- Obj->ReadSegment64LoadCommand(*SegmentLCI, Segment64LC);
- if (!Segment64LC)
- return Error("unable to load segment load command");
-
- for (unsigned SectNum = 0; SectNum != Segment64LC->NumSections; ++SectNum) {
- InMemoryStruct<macho::Section64> Sect;
- Obj->ReadSection64(*SegmentLCI, SectNum, Sect);
- if (!Sect)
- return Error("unable to load section: '" + Twine(SectNum) + "'");
-
- // FIXME: For the time being, we're only loading text segments.
- if (Sect->Flags != 0x80000400)
- continue;
-
- // Address and names of symbols in the section.
- typedef std::pair<uint64_t, StringRef> SymbolEntry;
- SmallVector<SymbolEntry, 64> Symbols;
- // Index of all the names, in this section or not. Used when we're
- // dealing with relocation entries.
- SmallVector<StringRef, 64> SymbolNames;
- for (unsigned i = 0; i != SymtabLC->NumSymbolTableEntries; ++i) {
- InMemoryStruct<macho::Symbol64TableEntry> STE;
- Obj->ReadSymbol64TableEntry(SymtabLC->SymbolTableOffset, i, STE);
- if (!STE)
- return Error("unable to read symbol: '" + Twine(i) + "'");
- if (STE->SectionIndex > Segment64LC->NumSections)
- return Error("invalid section index for symbol: '" + Twine(i) + "'");
- // Get the symbol name.
- StringRef Name = Obj->getStringAtIndex(STE->StringIndex);
- SymbolNames.push_back(Name);
-
- // Just skip symbols not defined in this section.
- if ((unsigned)STE->SectionIndex - 1 != SectNum)
- continue;
-
- // FIXME: Check the symbol type and flags.
- if (STE->Type != 0xF) // external, defined in this section.
- continue;
- if (STE->Flags != 0x0)
- continue;
-
- // Remember the symbol.
- Symbols.push_back(SymbolEntry(STE->Value, Name));
-
- DEBUG(dbgs() << "Function sym: '" << Name << "' @ " <<
- (Sect->Address + STE->Value) << "\n");
+void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ LocalSymbolMap &Symbols,
+ StubMap &Stubs) {
+
+ uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL);
+ RelocationValueRef Value;
+ SectionEntry &Section = Sections[Rel.SectionID];
+ uint8_t *Target = Section.Address + Rel.Offset;
+
+ bool isExtern = (RelType >> 27) & 1;
+ if (isExtern) {
+ StringRef TargetName;
+ const SymbolRef &Symbol = Rel.Symbol;
+ Symbol.getName(TargetName);
+ // First look the symbol in object file symbols.
+ LocalSymbolMap::iterator lsi = Symbols.find(TargetName.data());
+ if (lsi != Symbols.end()) {
+ Value.SectionID = lsi->second.first;
+ Value.Addend = lsi->second.second;
+ } else {
+ // Second look the symbol in global symbol table.
+ StringMap<SymbolLoc>::iterator gsi = SymbolTable.find(TargetName.data());
+ if (gsi != SymbolTable.end()) {
+ Value.SectionID = gsi->second.first;
+ Value.Addend = gsi->second.second;
+ } else
+ Value.SymbolName = TargetName.data();
}
- // Sort the symbols by address, just in case they didn't come in that way.
- array_pod_sort(Symbols.begin(), Symbols.end());
-
- // If there weren't any functions (odd, but just in case...)
- if (!Symbols.size())
- continue;
-
- // Extract the function data.
- uint8_t *Base = (uint8_t*)Obj->getData(Segment64LC->FileOffset,
- Segment64LC->FileSize).data();
- for (unsigned i = 0, e = Symbols.size() - 1; i != e; ++i) {
- uint64_t StartOffset = Sect->Address + Symbols[i].first;
- uint64_t EndOffset = Symbols[i + 1].first - 1;
- DEBUG(dbgs() << "Extracting function: " << Symbols[i].second
- << " from [" << StartOffset << ", " << EndOffset << "]\n");
- extractFunction(Symbols[i].second, Base + StartOffset, Base + EndOffset);
+ } else {
+ error_code err;
+ uint8_t sectionIndex = static_cast<uint8_t>(RelType & 0xFF);
+ section_iterator si = Obj.begin_sections(),
+ se = Obj.end_sections();
+ for (uint8_t i = 1; i < sectionIndex; i++) {
+ error_code err;
+ si.increment(err);
+ if (si == se)
+ break;
}
- // The last symbol we do after since the end address is calculated
- // differently because there is no next symbol to reference.
- uint64_t StartOffset = Symbols[Symbols.size() - 1].first;
- uint64_t EndOffset = Sect->Size - 1;
- DEBUG(dbgs() << "Extracting function: " << Symbols[Symbols.size()-1].second
- << " from [" << StartOffset << ", " << EndOffset << "]\n");
- extractFunction(Symbols[Symbols.size()-1].second,
- Base + StartOffset, Base + EndOffset);
-
- // Now extract the relocation information for each function and process it.
- for (unsigned j = 0; j != Sect->NumRelocationTableEntries; ++j) {
- InMemoryStruct<macho::RelocationEntry> RE;
- Obj->ReadRelocationEntry(Sect->RelocationTableOffset, j, RE);
- if (RE->Word0 & macho::RF_Scattered)
- return Error("NOT YET IMPLEMENTED: scattered relocations.");
- // Word0 of the relocation is the offset into the section where the
- // relocation should be applied. We need to translate that into an
- // offset into a function since that's our atom.
- uint32_t Offset = RE->Word0;
- // Look for the function containing the address. This is used for JIT
- // code, so the number of functions in section is almost always going
- // to be very small (usually just one), so until we have use cases
- // where that's not true, just use a trivial linear search.
- unsigned SymbolNum;
- unsigned NumSymbols = Symbols.size();
- assert(NumSymbols > 0 && Symbols[0].first <= Offset &&
- "No symbol containing relocation!");
- for (SymbolNum = 0; SymbolNum < NumSymbols - 1; ++SymbolNum)
- if (Symbols[SymbolNum + 1].first > Offset)
- break;
- // Adjust the offset to be relative to the symbol.
- Offset -= Symbols[SymbolNum].first;
- // Get the name of the symbol containing the relocation.
- StringRef TargetName = SymbolNames[SymbolNum];
-
- bool isExtern = (RE->Word1 >> 27) & 1;
- // Figure out the source symbol of the relocation. If isExtern is true,
- // this relocation references the symbol table, otherwise it references
- // a section in the same object, numbered from 1 through NumSections
- // (SectionBases is [0, NumSections-1]).
- if (!isExtern)
- return Error("Internal relocations not supported.");
- uint32_t SourceNum = RE->Word1 & 0xffffff; // 24-bit value
- StringRef SourceName = SymbolNames[SourceNum];
-
- // FIXME: Get the relocation addend from the target address.
-
- // Now store the relocation information. Associate it with the source
- // symbol.
- Relocations[SourceName].push_back(RelocationEntry(TargetName,
- Offset,
- RE->Word1,
- 0 /*Addend*/));
- DEBUG(dbgs() << "Relocation at '" << TargetName << "' + " << Offset
- << " from '" << SourceName << "(Word1: "
- << format("0x%x", RE->Word1) << ")\n");
+ assert(si != se && "No section containing relocation!");
+ Value.SectionID = findOrEmitSection(*si, true, ObjSectionToID);
+ Value.Addend = *(const intptr_t *)Target;
+ if (Value.Addend) {
+ // The MachO addend is offset from the current section, we need set it
+ // as offset from destination section
+ Value.Addend += Section.ObjAddress - Sections[Value.SectionID].ObjAddress;
}
}
- return false;
-}
-
-bool RuntimeDyldMachO::loadObject(MemoryBuffer *InputBuffer) {
- // If the linker is in an error state, don't do anything.
- if (hasError())
- return true;
- // Load the Mach-O wrapper object.
- std::string ErrorStr;
- OwningPtr<MachOObject> Obj(
- MachOObject::LoadFromBuffer(InputBuffer, &ErrorStr));
- if (!Obj)
- return Error("unable to load object: '" + ErrorStr + "'");
-
- // Get the CPU type information from the header.
- const macho::Header &Header = Obj->getHeader();
-
- // FIXME: Error checking that the loaded object is compatible with
- // the system we're running on.
- CPUType = Header.CPUType;
- CPUSubtype = Header.CPUSubtype;
- // Validate that the load commands match what we expect.
- const MachOObject::LoadCommandInfo *SegmentLCI = 0, *SymtabLCI = 0,
- *DysymtabLCI = 0;
- for (unsigned i = 0; i != Header.NumLoadCommands; ++i) {
- const MachOObject::LoadCommandInfo &LCI = Obj->getLoadCommandInfo(i);
- switch (LCI.Command.Type) {
- case macho::LCT_Segment:
- case macho::LCT_Segment64:
- if (SegmentLCI)
- return Error("unexpected input object (multiple segments)");
- SegmentLCI = &LCI;
- break;
- case macho::LCT_Symtab:
- if (SymtabLCI)
- return Error("unexpected input object (multiple symbol tables)");
- SymtabLCI = &LCI;
- break;
- case macho::LCT_Dysymtab:
- if (DysymtabLCI)
- return Error("unexpected input object (multiple symbol tables)");
- DysymtabLCI = &LCI;
- break;
- default:
- return Error("unexpected input object (unexpected load command");
+ if (Arch == Triple::arm && RelType == macho::RIT_ARM_Branch24Bit) {
+ // This is an ARM branch relocation, need to use a stub function.
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end())
+ resolveRelocation(Target, (uint64_t)Target,
+ (uint64_t)Section.Address + i->second,
+ RelType, 0);
+ else {
+ // Create a new stub function.
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr = createStubFunction(Section.Address +
+ Section.StubOffset);
+ AddRelocation(Value, Rel.SectionID, StubTargetAddr - Section.Address,
+ macho::RIT_Vanilla);
+ resolveRelocation(Target, (uint64_t)Target,
+ (uint64_t)Section.Address + Section.StubOffset,
+ RelType, 0);
+ Section.StubOffset += getMaxStubSize();
}
- }
-
- if (!SymtabLCI)
- return Error("no symbol table found in object");
- if (!SegmentLCI)
- return Error("no symbol table found in object");
-
- // Read and register the symbol table data.
- InMemoryStruct<macho::SymtabLoadCommand> SymtabLC;
- Obj->ReadSymtabLoadCommand(*SymtabLCI, SymtabLC);
- if (!SymtabLC)
- return Error("unable to load symbol table load command");
- Obj->RegisterStringTable(*SymtabLC);
-
- // Read the dynamic link-edit information, if present (not present in static
- // objects).
- if (DysymtabLCI) {
- InMemoryStruct<macho::DysymtabLoadCommand> DysymtabLC;
- Obj->ReadDysymtabLoadCommand(*DysymtabLCI, DysymtabLC);
- if (!DysymtabLC)
- return Error("unable to load dynamic link-exit load command");
-
- // FIXME: We don't support anything interesting yet.
-// if (DysymtabLC->LocalSymbolsIndex != 0)
-// return Error("NOT YET IMPLEMENTED: local symbol entries");
-// if (DysymtabLC->ExternalSymbolsIndex != 0)
-// return Error("NOT YET IMPLEMENTED: non-external symbol entries");
-// if (DysymtabLC->UndefinedSymbolsIndex != SymtabLC->NumSymbolTableEntries)
-// return Error("NOT YET IMPLEMENTED: undefined symbol entries");
- }
-
- // Load the segment load command.
- if (SegmentLCI->Command.Type == macho::LCT_Segment) {
- if (loadSegment32(Obj.get(), SegmentLCI, SymtabLC))
- return true;
- } else {
- if (loadSegment64(Obj.get(), SegmentLCI, SymtabLC))
- return true;
- }
-
- return false;
+ } else
+ AddRelocation(Value, Rel.SectionID, Rel.Offset, RelType);
}
-// Assign an address to a symbol name and resolve all the relocations
-// associated with it.
-void RuntimeDyldMachO::reassignSymbolAddress(StringRef Name, uint8_t *Addr) {
- // Assign the address in our symbol table.
- SymbolTable[Name] = Addr;
-
- RelocationList &Relocs = Relocations[Name];
- for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
- RelocationEntry &RE = Relocs[i];
- uint8_t *Target = SymbolTable[RE.Target] + RE.Offset;
- bool isPCRel = (RE.Data >> 24) & 1;
- unsigned Type = (RE.Data >> 28) & 0xf;
- unsigned Size = 1 << ((RE.Data >> 25) & 3);
-
- DEBUG(dbgs() << "Resolving relocation at '" << RE.Target
- << "' + " << RE.Offset << " (" << format("%p", Target) << ")"
- << " from '" << Name << " (" << format("%p", Addr) << ")"
- << "(" << (isPCRel ? "pcrel" : "absolute")
- << ", type: " << Type << ", Size: " << Size << ").\n");
-
- resolveRelocation(Target, Addr, isPCRel, Type, Size);
- RE.isResolved = true;
- }
-}
-bool RuntimeDyldMachO::isKnownFormat(const MemoryBuffer *InputBuffer) {
+bool RuntimeDyldMachO::isCompatibleFormat(const MemoryBuffer *InputBuffer) const {
StringRef Magic = InputBuffer->getBuffer().slice(0, 4);
if (Magic == "\xFE\xED\xFA\xCE") return true;
if (Magic == "\xCE\xFA\xED\xFE") return true;
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
new file mode 100644
index 0000000..898b851
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -0,0 +1,70 @@
+//===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_MACHO_H
+#define LLVM_RUNTIME_DYLD_MACHO_H
+
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/Object/MachOObject.h"
+#include "llvm/Support/Format.h"
+#include "RuntimeDyldImpl.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+
+namespace llvm {
+class RuntimeDyldMachO : public RuntimeDyldImpl {
+protected:
+ bool resolveI386Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend);
+ bool resolveX86_64Relocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend);
+ bool resolveARMRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ bool isPCRel,
+ unsigned Type,
+ unsigned Size,
+ int64_t Addend);
+
+ virtual void processRelocationRef(const ObjRelocationInfo &Rel,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ LocalSymbolMap &Symbols, StubMap &Stubs);
+
+public:
+ virtual void resolveRelocation(uint8_t *LocalAddress,
+ uint64_t FinalAddress,
+ uint64_t Value,
+ uint32_t Type,
+ int64_t Addend);
+
+ RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
+
+ bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
index 004b865..42364f9 100644
--- a/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/TargetSelect.cpp
@@ -7,9 +7,10 @@
//
//===----------------------------------------------------------------------===//
//
-// This just asks the TargetRegistry for the appropriate JIT to use, and allows
-// the user to specify a specific one on the commandline with -march=x. Clients
-// should initialize targets prior to calling createJIT.
+// This just asks the TargetRegistry for the appropriate target to use, and
+// allows the user to specify a specific one on the commandline with -march=x,
+// -mcpu=y, and -mattr=a,-b,+c. Clients should initialize targets prior to
+// calling selectTarget().
//
//===----------------------------------------------------------------------===//
@@ -21,21 +22,27 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
+
using namespace llvm;
+TargetMachine *EngineBuilder::selectTarget() {
+ StringRef MArch = "";
+ StringRef MCPU = "";
+ SmallVector<std::string, 1> MAttrs;
+ Triple TT(M->getTargetTriple());
+
+ return selectTarget(TT, MArch, MCPU, MAttrs);
+}
+
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
-TargetMachine *EngineBuilder::selectTarget(Module *Mod,
+TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
StringRef MArch,
StringRef MCPU,
- const SmallVectorImpl<std::string>& MAttrs,
- Reloc::Model RM,
- CodeModel::Model CM,
- std::string *ErrorStr) {
- Triple TheTriple(Mod->getTargetTriple());
+ const SmallVectorImpl<std::string>& MAttrs) {
+ Triple TheTriple(TargetTriple);
if (TheTriple.getTriple().empty())
- TheTriple.setTriple(sys::getHostTriple());
+ TheTriple.setTriple(sys::getDefaultTargetTriple());
// Adjust the triple to match what the user requested.
const Target *TheTarget = 0;
@@ -55,7 +62,7 @@ TargetMachine *EngineBuilder::selectTarget(Module *Mod,
}
// Adjust the triple to match (if known), otherwise stick with the
- // module/host triple.
+ // requested/host triple.
Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
if (Type != Triple::UnknownArch)
TheTriple.setArch(Type);
@@ -69,12 +76,6 @@ TargetMachine *EngineBuilder::selectTarget(Module *Mod,
}
}
- if (!TheTarget->hasJIT()) {
- errs() << "WARNING: This target JIT is not designed for the host you are"
- << " running. If bad things happen, please choose a different "
- << "-march switch.\n";
- }
-
// Package up features to be passed to target/subtarget
std::string FeaturesStr;
if (!MAttrs.empty()) {
@@ -87,7 +88,9 @@ TargetMachine *EngineBuilder::selectTarget(Module *Mod,
// Allocate a target...
TargetMachine *Target = TheTarget->createTargetMachine(TheTriple.getTriple(),
MCPU, FeaturesStr,
- RM, CM);
+ Options,
+ RelocModel, CMModel,
+ OptLevel);
assert(Target && "Could not allocate target machine!");
return Target;
}
diff --git a/contrib/llvm/lib/Linker/LinkArchives.cpp b/contrib/llvm/lib/Linker/LinkArchives.cpp
index 2c4ed7f..c16d195 100644
--- a/contrib/llvm/lib/Linker/LinkArchives.cpp
+++ b/contrib/llvm/lib/Linker/LinkArchives.cpp
@@ -16,7 +16,6 @@
#include "llvm/Module.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/Bitcode/Archive.h"
-#include "llvm/Config/config.h"
#include <memory>
#include <set>
using namespace llvm;
@@ -141,7 +140,7 @@ Linker::LinkInArchive(const sys::Path &Filename, bool &is_native) {
// Find the modules we need to link into the target module. Note that arch
// keeps ownership of these modules and may return the same Module* from a
// subsequent call.
- std::set<Module*> Modules;
+ SmallVector<Module*, 16> Modules;
if (!arch->findModulesDefiningSymbols(UndefinedSymbols, Modules, &ErrMsg))
return error("Cannot find symbols in '" + Filename.str() +
"': " + ErrMsg);
@@ -158,7 +157,7 @@ Linker::LinkInArchive(const sys::Path &Filename, bool &is_native) {
UndefinedSymbols.end());
// Loop over all the Modules that we got back from the archive
- for (std::set<Module*>::iterator I=Modules.begin(), E=Modules.end();
+ for (SmallVectorImpl<Module*>::iterator I=Modules.begin(), E=Modules.end();
I != E; ++I) {
// Get the module we must link in.
diff --git a/contrib/llvm/lib/Linker/LinkModules.cpp b/contrib/llvm/lib/Linker/LinkModules.cpp
index 03a962e..765fcc8 100644
--- a/contrib/llvm/lib/Linker/LinkModules.cpp
+++ b/contrib/llvm/lib/Linker/LinkModules.cpp
@@ -16,11 +16,16 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <cctype>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -38,11 +43,16 @@ class TypeMapTy : public ValueMapTypeRemapper {
/// case we need to roll back.
SmallVector<Type*, 16> SpeculativeTypes;
- /// DefinitionsToResolve - This is a list of non-opaque structs in the source
- /// module that are mapped to an opaque struct in the destination module.
- SmallVector<StructType*, 16> DefinitionsToResolve;
-public:
+ /// SrcDefinitionsToResolve - This is a list of non-opaque structs in the
+ /// source module that are mapped to an opaque struct in the destination
+ /// module.
+ SmallVector<StructType*, 16> SrcDefinitionsToResolve;
+ /// DstResolvedOpaqueTypes - This is the set of opaque types in the
+ /// destination modules who are getting a body from the source module.
+ SmallPtrSet<StructType*, 16> DstResolvedOpaqueTypes;
+
+public:
/// addTypeMapping - Indicate that the specified type in the destination
/// module is conceptually equivalent to the specified type in the source
/// module.
@@ -58,6 +68,18 @@ public:
FunctionType *get(FunctionType *T) {return cast<FunctionType>(get((Type*)T));}
+ /// dump - Dump out the type map for debugging purposes.
+ void dump() const {
+ for (DenseMap<Type*, Type*>::const_iterator
+ I = MappedTypes.begin(), E = MappedTypes.end(); I != E; ++I) {
+ dbgs() << "TypeMap: ";
+ I->first->dump();
+ dbgs() << " => ";
+ I->second->dump();
+ dbgs() << '\n';
+ }
+ }
+
private:
Type *getImpl(Type *T);
/// remapType - Implement the ValueMapTypeRemapper interface.
@@ -118,11 +140,17 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
return true;
}
- // Mapping a non-opaque source type to an opaque dest. Keep the dest, but
- // fill it in later. This doesn't need to be speculative.
+ // Mapping a non-opaque source type to an opaque dest. If this is the first
+ // type that we're mapping onto this destination type then we succeed. Keep
+ // the dest, but fill it in later. This doesn't need to be speculative. If
+ // this is the second (different) type that we're trying to map onto the
+ // same opaque type then we fail.
if (cast<StructType>(DstTy)->isOpaque()) {
+ // We can only map one source type onto the opaque destination type.
+ if (!DstResolvedOpaqueTypes.insert(cast<StructType>(DstTy)))
+ return false;
+ SrcDefinitionsToResolve.push_back(SSTy);
Entry = DstTy;
- DefinitionsToResolve.push_back(SSTy);
return true;
}
}
@@ -137,6 +165,7 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
if (PointerType *PT = dyn_cast<PointerType>(DstTy)) {
if (PT->getAddressSpace() != cast<PointerType>(SrcTy)->getAddressSpace())
return false;
+
} else if (FunctionType *FT = dyn_cast<FunctionType>(DstTy)) {
if (FT->isVarArg() != cast<FunctionType>(SrcTy)->isVarArg())
return false;
@@ -174,9 +203,9 @@ void TypeMapTy::linkDefinedTypeBodies() {
SmallString<16> TmpName;
// Note that processing entries in this loop (calling 'get') can add new
- // entries to the DefinitionsToResolve vector.
- while (!DefinitionsToResolve.empty()) {
- StructType *SrcSTy = DefinitionsToResolve.pop_back_val();
+ // entries to the SrcDefinitionsToResolve vector.
+ while (!SrcDefinitionsToResolve.empty()) {
+ StructType *SrcSTy = SrcDefinitionsToResolve.pop_back_val();
StructType *DstSTy = cast<StructType>(MappedTypes[SrcSTy]);
// TypeMap is a many-to-one mapping, if there were multiple types that
@@ -204,16 +233,17 @@ void TypeMapTy::linkDefinedTypeBodies() {
TmpName.clear();
}
}
+
+ DstResolvedOpaqueTypes.clear();
}
-
/// get - Return the mapped type to use for the specified input type from the
/// source module.
Type *TypeMapTy::get(Type *Ty) {
Type *Result = getImpl(Ty);
// If this caused a reference to any struct type, resolve it before returning.
- if (!DefinitionsToResolve.empty())
+ if (!SrcDefinitionsToResolve.empty())
linkDefinedTypeBodies();
return Result;
}
@@ -252,7 +282,7 @@ Type *TypeMapTy::getImpl(Type *Ty) {
// Otherwise, rebuild a modified type.
switch (Ty->getTypeID()) {
- default: assert(0 && "unknown derived type to remap");
+ default: llvm_unreachable("unknown derived type to remap");
case Type::ArrayTyID:
return *Entry = ArrayType::get(ElementTypes[0],
cast<ArrayType>(Ty)->getNumElements());
@@ -304,12 +334,12 @@ Type *TypeMapTy::getImpl(Type *Ty) {
// Otherwise we create a new type and resolve its body later. This will be
// resolved by the top level of get().
- DefinitionsToResolve.push_back(STy);
- return *Entry = StructType::create(STy->getContext());
+ SrcDefinitionsToResolve.push_back(STy);
+ StructType *DTy = StructType::create(STy->getContext());
+ DstResolvedOpaqueTypes.insert(DTy);
+ return *Entry = DTy;
}
-
-
//===----------------------------------------------------------------------===//
// ModuleLinker implementation.
//===----------------------------------------------------------------------===//
@@ -341,6 +371,9 @@ namespace {
// Set of items not to link in from source.
SmallPtrSet<const Value*, 16> DoNotLinkFromSource;
+ // Vector of functions to lazily link in.
+ std::vector<Function*> LazilyLinkFunctions;
+
public:
std::string ErrorMsg;
@@ -360,7 +393,9 @@ namespace {
/// getLinkageResult - This analyzes the two global values and determines
/// what the result will look like in the destination module.
bool getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
- GlobalValue::LinkageTypes &LT, bool &LinkFromSrc);
+ GlobalValue::LinkageTypes &LT,
+ GlobalValue::VisibilityTypes &Vis,
+ bool &LinkFromSrc);
/// getLinkedToGlobal - Given a global in the source module, return the
/// global in the destination module that is being linked to, if any.
@@ -384,11 +419,19 @@ namespace {
}
void computeTypeMapping();
+ bool categorizeModuleFlagNodes(const NamedMDNode *ModFlags,
+ DenseMap<MDString*, MDNode*> &ErrorNode,
+ DenseMap<MDString*, MDNode*> &WarningNode,
+ DenseMap<MDString*, MDNode*> &OverrideNode,
+ DenseMap<MDString*,
+ SmallSetVector<MDNode*, 8> > &RequireNodes,
+ SmallSetVector<MDString*, 16> &SeenIDs);
bool linkAppendingVarProto(GlobalVariable *DstGV, GlobalVariable *SrcGV);
bool linkGlobalProto(GlobalVariable *SrcGV);
bool linkFunctionProto(Function *SrcF);
bool linkAliasProto(GlobalAlias *SrcA);
+ bool linkModuleFlagsMetadata();
void linkAppendingVarInit(const AppendingVarInfo &AVI);
void linkGlobalInits();
@@ -398,8 +441,6 @@ namespace {
};
}
-
-
/// forceRenaming - The LLVM SymbolTable class autorenames globals that conflict
/// in the symbol table. This is good for all clients except for us. Go
/// through the trouble to force this back.
@@ -421,9 +462,9 @@ static void forceRenaming(GlobalValue *GV, StringRef Name) {
}
}
-/// CopyGVAttributes - copy additional attributes (those not needed to construct
+/// copyGVAttributes - copy additional attributes (those not needed to construct
/// a GlobalValue) from the SrcGV to the DestGV.
-static void CopyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) {
+static void copyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) {
// Use the maximum alignment, rather than just copying the alignment of SrcGV.
unsigned Alignment = std::max(DestGV->getAlignment(), SrcGV->getAlignment());
DestGV->copyAttributesFrom(SrcGV);
@@ -432,21 +473,33 @@ static void CopyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) {
forceRenaming(DestGV, SrcGV->getName());
}
+static bool isLessConstraining(GlobalValue::VisibilityTypes a,
+ GlobalValue::VisibilityTypes b) {
+ if (a == GlobalValue::HiddenVisibility)
+ return false;
+ if (b == GlobalValue::HiddenVisibility)
+ return true;
+ if (a == GlobalValue::ProtectedVisibility)
+ return false;
+ if (b == GlobalValue::ProtectedVisibility)
+ return true;
+ return false;
+}
+
/// getLinkageResult - This analyzes the two global values and determines what
/// the result will look like in the destination module. In particular, it
-/// computes the resultant linkage type, computes whether the global in the
-/// source should be copied over to the destination (replacing the existing
-/// one), and computes whether this linkage is an error or not. It also performs
-/// visibility checks: we cannot link together two symbols with different
-/// visibilities.
+/// computes the resultant linkage type and visibility, computes whether the
+/// global in the source should be copied over to the destination (replacing
+/// the existing one), and computes whether this linkage is an error or not.
bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
- GlobalValue::LinkageTypes &LT,
+ GlobalValue::LinkageTypes &LT,
+ GlobalValue::VisibilityTypes &Vis,
bool &LinkFromSrc) {
assert(Dest && "Must have two globals being queried");
assert(!Src->hasLocalLinkage() &&
"If Src has internal linkage, Dest shouldn't be set!");
- bool SrcIsDeclaration = Src->isDeclaration();
+ bool SrcIsDeclaration = Src->isDeclaration() && !Src->isMaterializable();
bool DestIsDeclaration = Dest->isDeclaration();
if (SrcIsDeclaration) {
@@ -502,13 +555,10 @@ bool ModuleLinker::getLinkageResult(GlobalValue *Dest, const GlobalValue *Src,
"': symbol multiply defined!");
}
- // Check visibility
- if (Src->getVisibility() != Dest->getVisibility() &&
- !SrcIsDeclaration && !DestIsDeclaration &&
- !Src->hasAvailableExternallyLinkage() &&
- !Dest->hasAvailableExternallyLinkage())
- return emitError("Linking globals named '" + Src->getName() +
- "': symbols have different visibilities!");
+ // Compute the visibility. We follow the rules in the System V Application
+ // Binary Interface.
+ Vis = isLessConstraining(Src->getVisibility(), Dest->getVisibility()) ?
+ Dest->getVisibility() : Src->getVisibility();
return false;
}
@@ -539,7 +589,54 @@ void ModuleLinker::computeTypeMapping() {
if (GlobalValue *DGV = getLinkedToGlobal(I))
TypeMap.addTypeMapping(DGV->getType(), I->getType());
}
-
+
+ // Incorporate types by name, scanning all the types in the source module.
+ // At this point, the destination module may have a type "%foo = { i32 }" for
+ // example. When the source module got loaded into the same LLVMContext, if
+ // it had the same type, it would have been renamed to "%foo.42 = { i32 }".
+ std::vector<StructType*> SrcStructTypes;
+ SrcM->findUsedStructTypes(SrcStructTypes);
+ SmallPtrSet<StructType*, 32> SrcStructTypesSet(SrcStructTypes.begin(),
+ SrcStructTypes.end());
+
+ std::vector<StructType*> DstStructTypes;
+ DstM->findUsedStructTypes(DstStructTypes);
+ SmallPtrSet<StructType*, 32> DstStructTypesSet(DstStructTypes.begin(),
+ DstStructTypes.end());
+
+ for (unsigned i = 0, e = SrcStructTypes.size(); i != e; ++i) {
+ StructType *ST = SrcStructTypes[i];
+ if (!ST->hasName()) continue;
+
+ // Check to see if there is a dot in the name followed by a digit.
+ size_t DotPos = ST->getName().rfind('.');
+ if (DotPos == 0 || DotPos == StringRef::npos ||
+ ST->getName().back() == '.' || !isdigit(ST->getName()[DotPos+1]))
+ continue;
+
+ // Check to see if the destination module has a struct with the prefix name.
+ if (StructType *DST = DstM->getTypeByName(ST->getName().substr(0, DotPos)))
+ // Don't use it if this actually came from the source module. They're in
+ // the same LLVMContext after all. Also don't use it unless the type is
+ // actually used in the destination module. This can happen in situations
+ // like this:
+ //
+ // Module A Module B
+ // -------- --------
+ // %Z = type { %A } %B = type { %C.1 }
+ // %A = type { %B.1, [7 x i8] } %C.1 = type { i8* }
+ // %B.1 = type { %C } %A.2 = type { %B.3, [5 x i8] }
+ // %C = type { i8* } %B.3 = type { %C.1 }
+ //
+ // When we link Module B with Module A, the '%B' in Module B is
+ // used. However, that would then use '%C.1'. But when we process '%C.1',
+ // we prefer to take the '%C' version. So we are then left with both
+ // '%C.1' and '%C' being used for the same types. This leads to some
+ // variables using one type and some using the other.
+ if (!SrcStructTypesSet.count(DST) && DstStructTypesSet.count(DST))
+ TypeMap.addTypeMapping(DST, ST);
+ }
+
// Don't bother incorporating aliases, they aren't generally typed well.
// Now that we have discovered all of the type equivalences, get a body for
@@ -590,7 +687,7 @@ bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
DstGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
- CopyGVAttributes(NG, DstGV);
+ copyGVAttributes(NG, DstGV);
AppendingVarInfo AVI;
AVI.NewGV = NG;
@@ -615,6 +712,7 @@ bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV,
/// merge them into the dest module.
bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
GlobalValue *DGV = getLinkedToGlobal(SGV);
+ llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
if (DGV) {
// Concatenation of appending linkage variables is magic and handled later.
@@ -624,9 +722,11 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
// Determine whether linkage of these two globals follows the source
// module's definition or the destination module's definition.
GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
+ GlobalValue::VisibilityTypes NV;
bool LinkFromSrc = false;
- if (getLinkageResult(DGV, SGV, NewLinkage, LinkFromSrc))
+ if (getLinkageResult(DGV, SGV, NewLinkage, NV, LinkFromSrc))
return true;
+ NewVisibility = NV;
// If we're not linking from the source, then keep the definition that we
// have.
@@ -636,9 +736,10 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
if (DGVar->isDeclaration() && SGV->isConstant() && !DGVar->isConstant())
DGVar->setConstant(true);
- // Set calculated linkage.
+ // Set calculated linkage and visibility.
DGV->setLinkage(NewLinkage);
-
+ DGV->setVisibility(*NewVisibility);
+
// Make sure to remember this mapping.
ValueMap[SGV] = ConstantExpr::getBitCast(DGV,TypeMap.get(SGV->getType()));
@@ -660,7 +761,9 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
SGV->isThreadLocal(),
SGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
- CopyGVAttributes(NewDGV, SGV);
+ copyGVAttributes(NewDGV, SGV);
+ if (NewVisibility)
+ NewDGV->setVisibility(*NewVisibility);
if (DGV) {
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDGV, DGV->getType()));
@@ -676,17 +779,21 @@ bool ModuleLinker::linkGlobalProto(GlobalVariable *SGV) {
/// destination module if needed, setting up mapping information.
bool ModuleLinker::linkFunctionProto(Function *SF) {
GlobalValue *DGV = getLinkedToGlobal(SF);
+ llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
if (DGV) {
GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
bool LinkFromSrc = false;
- if (getLinkageResult(DGV, SF, NewLinkage, LinkFromSrc))
+ GlobalValue::VisibilityTypes NV;
+ if (getLinkageResult(DGV, SF, NewLinkage, NV, LinkFromSrc))
return true;
-
+ NewVisibility = NV;
+
if (!LinkFromSrc) {
// Set calculated linkage
DGV->setLinkage(NewLinkage);
-
+ DGV->setVisibility(*NewVisibility);
+
// Make sure to remember this mapping.
ValueMap[SF] = ConstantExpr::getBitCast(DGV, TypeMap.get(SF->getType()));
@@ -702,12 +809,21 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
// bring SF over.
Function *NewDF = Function::Create(TypeMap.get(SF->getFunctionType()),
SF->getLinkage(), SF->getName(), DstM);
- CopyGVAttributes(NewDF, SF);
+ copyGVAttributes(NewDF, SF);
+ if (NewVisibility)
+ NewDF->setVisibility(*NewVisibility);
if (DGV) {
// Any uses of DF need to change to NewDF, with cast.
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDF, DGV->getType()));
DGV->eraseFromParent();
+ } else {
+ // Internal, LO_ODR, or LO linkage - stick in set to ignore and lazily link.
+ if (SF->hasLocalLinkage() || SF->hasLinkOnceLinkage() ||
+ SF->hasAvailableExternallyLinkage()) {
+ DoNotLinkFromSource.insert(SF);
+ LazilyLinkFunctions.push_back(SF);
+ }
}
ValueMap[SF] = NewDF;
@@ -718,17 +834,21 @@ bool ModuleLinker::linkFunctionProto(Function *SF) {
/// source module.
bool ModuleLinker::linkAliasProto(GlobalAlias *SGA) {
GlobalValue *DGV = getLinkedToGlobal(SGA);
-
+ llvm::Optional<GlobalValue::VisibilityTypes> NewVisibility;
+
if (DGV) {
GlobalValue::LinkageTypes NewLinkage = GlobalValue::InternalLinkage;
+ GlobalValue::VisibilityTypes NV;
bool LinkFromSrc = false;
- if (getLinkageResult(DGV, SGA, NewLinkage, LinkFromSrc))
+ if (getLinkageResult(DGV, SGA, NewLinkage, NV, LinkFromSrc))
return true;
-
+ NewVisibility = NV;
+
if (!LinkFromSrc) {
// Set calculated linkage.
DGV->setLinkage(NewLinkage);
-
+ DGV->setVisibility(*NewVisibility);
+
// Make sure to remember this mapping.
ValueMap[SGA] = ConstantExpr::getBitCast(DGV,TypeMap.get(SGA->getType()));
@@ -744,7 +864,9 @@ bool ModuleLinker::linkAliasProto(GlobalAlias *SGA) {
GlobalAlias *NewDA = new GlobalAlias(TypeMap.get(SGA->getType()),
SGA->getLinkage(), SGA->getName(),
/*aliasee*/0, DstM);
- CopyGVAttributes(NewDA, SGA);
+ copyGVAttributes(NewDA, SGA);
+ if (NewVisibility)
+ NewDA->setVisibility(*NewVisibility);
if (DGV) {
// Any uses of DGV need to change to NewDA, with cast.
@@ -756,36 +878,27 @@ bool ModuleLinker::linkAliasProto(GlobalAlias *SGA) {
return false;
}
+static void getArrayElements(Constant *C, SmallVectorImpl<Constant*> &Dest) {
+ unsigned NumElements = cast<ArrayType>(C->getType())->getNumElements();
+
+ for (unsigned i = 0; i != NumElements; ++i)
+ Dest.push_back(C->getAggregateElement(i));
+}
+
void ModuleLinker::linkAppendingVarInit(const AppendingVarInfo &AVI) {
// Merge the initializer.
SmallVector<Constant*, 16> Elements;
- if (ConstantArray *I = dyn_cast<ConstantArray>(AVI.DstInit)) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- Elements.push_back(I->getOperand(i));
- } else {
- assert(isa<ConstantAggregateZero>(AVI.DstInit));
- ArrayType *DstAT = cast<ArrayType>(AVI.DstInit->getType());
- Type *EltTy = DstAT->getElementType();
- Elements.append(DstAT->getNumElements(), Constant::getNullValue(EltTy));
- }
+ getArrayElements(AVI.DstInit, Elements);
Constant *SrcInit = MapValue(AVI.SrcInit, ValueMap, RF_None, &TypeMap);
- if (const ConstantArray *I = dyn_cast<ConstantArray>(SrcInit)) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- Elements.push_back(I->getOperand(i));
- } else {
- assert(isa<ConstantAggregateZero>(SrcInit));
- ArrayType *SrcAT = cast<ArrayType>(SrcInit->getType());
- Type *EltTy = SrcAT->getElementType();
- Elements.append(SrcAT->getNumElements(), Constant::getNullValue(EltTy));
- }
+ getArrayElements(SrcInit, Elements);
+
ArrayType *NewType = cast<ArrayType>(AVI.NewGV->getType()->getElementType());
AVI.NewGV->setInitializer(ConstantArray::get(NewType, Elements));
}
-
-// linkGlobalInits - Update the initializers in the Dest module now that all
-// globals that may be referenced are in Dest.
+/// linkGlobalInits - Update the initializers in the Dest module now that all
+/// globals that may be referenced are in Dest.
void ModuleLinker::linkGlobalInits() {
// Loop over all of the globals in the src module, mapping them over as we go
for (Module::const_global_iterator I = SrcM->global_begin(),
@@ -802,9 +915,9 @@ void ModuleLinker::linkGlobalInits() {
}
}
-// linkFunctionBody - Copy the source function over into the dest function and
-// fix up references to values. At this point we know that Dest is an external
-// function, and that Src is not.
+/// linkFunctionBody - Copy the source function over into the dest function and
+/// fix up references to values. At this point we know that Dest is an external
+/// function, and that Src is not.
void ModuleLinker::linkFunctionBody(Function *Dst, Function *Src) {
assert(Src && Dst && Dst->isDeclaration() && !Src->isDeclaration());
@@ -833,7 +946,7 @@ void ModuleLinker::linkFunctionBody(Function *Dst, Function *Src) {
} else {
// Clone the body of the function into the dest function.
SmallVector<ReturnInst*, 8> Returns; // Ignore returns.
- CloneFunctionInto(Dst, Src, ValueMap, false, Returns);
+ CloneFunctionInto(Dst, Src, ValueMap, false, Returns, "", NULL, &TypeMap);
}
// There is no need to map the arguments anymore.
@@ -843,7 +956,7 @@ void ModuleLinker::linkFunctionBody(Function *Dst, Function *Src) {
}
-
+/// linkAliasBodies - Insert all of the aliases in Src into the Dest module.
void ModuleLinker::linkAliasBodies() {
for (Module::alias_iterator I = SrcM->alias_begin(), E = SrcM->alias_end();
I != E; ++I) {
@@ -856,11 +969,14 @@ void ModuleLinker::linkAliasBodies() {
}
}
-/// linkNamedMDNodes - Insert all of the named mdnodes in Src into the Dest
+/// linkNamedMDNodes - Insert all of the named MDNodes in Src into the Dest
/// module.
void ModuleLinker::linkNamedMDNodes() {
+ const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata();
for (Module::const_named_metadata_iterator I = SrcM->named_metadata_begin(),
E = SrcM->named_metadata_end(); I != E; ++I) {
+ // Don't link module flags here. Do them separately.
+ if (&*I == SrcModFlags) continue;
NamedMDNode *DestNMD = DstM->getOrInsertNamedMetadata(I->getName());
// Add Src elements into Dest node.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
@@ -868,10 +984,176 @@ void ModuleLinker::linkNamedMDNodes() {
RF_None, &TypeMap));
}
}
+
+/// categorizeModuleFlagNodes - Categorize the module flags according to their
+/// type: Error, Warning, Override, and Require.
+bool ModuleLinker::
+categorizeModuleFlagNodes(const NamedMDNode *ModFlags,
+ DenseMap<MDString*, MDNode*> &ErrorNode,
+ DenseMap<MDString*, MDNode*> &WarningNode,
+ DenseMap<MDString*, MDNode*> &OverrideNode,
+ DenseMap<MDString*,
+ SmallSetVector<MDNode*, 8> > &RequireNodes,
+ SmallSetVector<MDString*, 16> &SeenIDs) {
+ bool HasErr = false;
+
+ for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
+ MDNode *Op = ModFlags->getOperand(I);
+ assert(Op->getNumOperands() == 3 && "Invalid module flag metadata!");
+ assert(isa<ConstantInt>(Op->getOperand(0)) &&
+ "Module flag's first operand must be an integer!");
+ assert(isa<MDString>(Op->getOperand(1)) &&
+ "Module flag's second operand must be an MDString!");
+
+ ConstantInt *Behavior = cast<ConstantInt>(Op->getOperand(0));
+ MDString *ID = cast<MDString>(Op->getOperand(1));
+ Value *Val = Op->getOperand(2);
+ switch (Behavior->getZExtValue()) {
+ default:
+ assert(false && "Invalid behavior in module flag metadata!");
+ break;
+ case Module::Error: {
+ MDNode *&ErrNode = ErrorNode[ID];
+ if (!ErrNode) ErrNode = Op;
+ if (ErrNode->getOperand(2) != Val)
+ HasErr = emitError("linking module flags '" + ID->getString() +
+ "': IDs have conflicting values");
+ break;
+ }
+ case Module::Warning: {
+ MDNode *&WarnNode = WarningNode[ID];
+ if (!WarnNode) WarnNode = Op;
+ if (WarnNode->getOperand(2) != Val)
+ errs() << "WARNING: linking module flags '" << ID->getString()
+ << "': IDs have conflicting values";
+ break;
+ }
+ case Module::Require: RequireNodes[ID].insert(Op); break;
+ case Module::Override: {
+ MDNode *&OvrNode = OverrideNode[ID];
+ if (!OvrNode) OvrNode = Op;
+ if (OvrNode->getOperand(2) != Val)
+ HasErr = emitError("linking module flags '" + ID->getString() +
+ "': IDs have conflicting override values");
+ break;
+ }
+ }
+
+ SeenIDs.insert(ID);
+ }
+
+ return HasErr;
+}
+
+/// linkModuleFlagsMetadata - Merge the linker flags in Src into the Dest
+/// module.
+bool ModuleLinker::linkModuleFlagsMetadata() {
+ const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata();
+ if (!SrcModFlags) return false;
+
+ NamedMDNode *DstModFlags = DstM->getOrInsertModuleFlagsMetadata();
+
+ // If the destination module doesn't have module flags yet, then just copy
+ // over the source module's flags.
+ if (DstModFlags->getNumOperands() == 0) {
+ for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I)
+ DstModFlags->addOperand(SrcModFlags->getOperand(I));
+
+ return false;
+ }
+
+ bool HasErr = false;
+
+ // Otherwise, we have to merge them based on their behaviors. First,
+ // categorize all of the nodes in the modules' module flags. If an error or
+ // warning occurs, then emit the appropriate message(s).
+ DenseMap<MDString*, MDNode*> ErrorNode;
+ DenseMap<MDString*, MDNode*> WarningNode;
+ DenseMap<MDString*, MDNode*> OverrideNode;
+ DenseMap<MDString*, SmallSetVector<MDNode*, 8> > RequireNodes;
+ SmallSetVector<MDString*, 16> SeenIDs;
+
+ HasErr |= categorizeModuleFlagNodes(SrcModFlags, ErrorNode, WarningNode,
+ OverrideNode, RequireNodes, SeenIDs);
+ HasErr |= categorizeModuleFlagNodes(DstModFlags, ErrorNode, WarningNode,
+ OverrideNode, RequireNodes, SeenIDs);
+
+ // Check that there isn't both an error and warning node for a flag.
+ for (SmallSetVector<MDString*, 16>::iterator
+ I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
+ MDString *ID = *I;
+ if (ErrorNode[ID] && WarningNode[ID])
+ HasErr = emitError("linking module flags '" + ID->getString() +
+ "': IDs have conflicting behaviors");
+ }
+
+ // Early exit if we had an error.
+ if (HasErr) return true;
+
+ // Get the destination's module flags ready for new operands.
+ DstModFlags->dropAllReferences();
+
+ // Add all of the module flags to the destination module.
+ DenseMap<MDString*, SmallVector<MDNode*, 4> > AddedNodes;
+ for (SmallSetVector<MDString*, 16>::iterator
+ I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
+ MDString *ID = *I;
+ if (OverrideNode[ID]) {
+ DstModFlags->addOperand(OverrideNode[ID]);
+ AddedNodes[ID].push_back(OverrideNode[ID]);
+ } else if (ErrorNode[ID]) {
+ DstModFlags->addOperand(ErrorNode[ID]);
+ AddedNodes[ID].push_back(ErrorNode[ID]);
+ } else if (WarningNode[ID]) {
+ DstModFlags->addOperand(WarningNode[ID]);
+ AddedNodes[ID].push_back(WarningNode[ID]);
+ }
+
+ for (SmallSetVector<MDNode*, 8>::iterator
+ II = RequireNodes[ID].begin(), IE = RequireNodes[ID].end();
+ II != IE; ++II)
+ DstModFlags->addOperand(*II);
+ }
+
+ // Now check that all of the requirements have been satisfied.
+ for (SmallSetVector<MDString*, 16>::iterator
+ I = SeenIDs.begin(), E = SeenIDs.end(); I != E; ++I) {
+ MDString *ID = *I;
+ SmallSetVector<MDNode*, 8> &Set = RequireNodes[ID];
+
+ for (SmallSetVector<MDNode*, 8>::iterator
+ II = Set.begin(), IE = Set.end(); II != IE; ++II) {
+ MDNode *Node = *II;
+ assert(isa<MDNode>(Node->getOperand(2)) &&
+ "Module flag's third operand must be an MDNode!");
+ MDNode *Val = cast<MDNode>(Node->getOperand(2));
+
+ MDString *ReqID = cast<MDString>(Val->getOperand(0));
+ Value *ReqVal = Val->getOperand(1);
+
+ bool HasValue = false;
+ for (SmallVectorImpl<MDNode*>::iterator
+ RI = AddedNodes[ReqID].begin(), RE = AddedNodes[ReqID].end();
+ RI != RE; ++RI) {
+ MDNode *ReqNode = *RI;
+ if (ReqNode->getOperand(2) == ReqVal) {
+ HasValue = true;
+ break;
+ }
+ }
+
+ if (!HasValue)
+ HasErr = emitError("linking module flags '" + ReqID->getString() +
+ "': does not have the required value");
+ }
+ }
+
+ return HasErr;
+}
bool ModuleLinker::run() {
- assert(DstM && "Null Destination module");
- assert(SrcM && "Null Source Module");
+ assert(DstM && "Null destination module");
+ assert(SrcM && "Null source module");
// Inherit the target data from the source module if the destination module
// doesn't have one already.
@@ -951,7 +1233,6 @@ bool ModuleLinker::run() {
// Link in the function bodies that are defined in the source module into
// DstM.
for (Module::iterator SF = SrcM->begin(), E = SrcM->end(); SF != E; ++SF) {
-
// Skip if not linking from source.
if (DoNotLinkFromSource.count(SF)) continue;
@@ -964,16 +1245,70 @@ bool ModuleLinker::run() {
}
linkFunctionBody(cast<Function>(ValueMap[SF]), SF);
+ SF->Dematerialize();
}
// Resolve all uses of aliases with aliasees.
linkAliasBodies();
- // Remap all of the named mdnoes in Src into the DstM module. We do this
+ // Remap all of the named MDNodes in Src into the DstM module. We do this
// after linking GlobalValues so that MDNodes that reference GlobalValues
// are properly remapped.
linkNamedMDNodes();
+ // Merge the module flags into the DstM module.
+ if (linkModuleFlagsMetadata())
+ return true;
+
+ // Process vector of lazily linked in functions.
+ bool LinkedInAnyFunctions;
+ do {
+ LinkedInAnyFunctions = false;
+
+ for(std::vector<Function*>::iterator I = LazilyLinkFunctions.begin(),
+ E = LazilyLinkFunctions.end(); I != E; ++I) {
+ if (!*I)
+ continue;
+
+ Function *SF = *I;
+ Function *DF = cast<Function>(ValueMap[SF]);
+
+ if (!DF->use_empty()) {
+
+ // Materialize if necessary.
+ if (SF->isDeclaration()) {
+ if (!SF->isMaterializable())
+ continue;
+ if (SF->Materialize(&ErrorMsg))
+ return true;
+ }
+
+ // Link in function body.
+ linkFunctionBody(DF, SF);
+ SF->Dematerialize();
+
+ // "Remove" from vector by setting the element to 0.
+ *I = 0;
+
+ // Set flag to indicate we may have more functions to lazily link in
+ // since we linked in a function.
+ LinkedInAnyFunctions = true;
+ }
+ }
+ } while (LinkedInAnyFunctions);
+
+ // Remove any prototypes of functions that were not actually linked in.
+ for(std::vector<Function*>::iterator I = LazilyLinkFunctions.begin(),
+ E = LazilyLinkFunctions.end(); I != E; ++I) {
+ if (!*I)
+ continue;
+
+ Function *SF = *I;
+ Function *DF = cast<Function>(ValueMap[SF]);
+ if (DF->use_empty())
+ DF->eraseFromParent();
+ }
+
// Now that all of the types from the source are used, resolve any structs
// copied over to the dest that didn't exist there.
TypeMap.linkDefinedTypeBodies();
@@ -985,11 +1320,11 @@ bool ModuleLinker::run() {
// LinkModules entrypoint.
//===----------------------------------------------------------------------===//
-// LinkModules - This function links two modules together, with the resulting
-// left module modified to be the composite of the two input modules. If an
-// error occurs, true is returned and ErrorMsg (if not null) is set to indicate
-// the problem. Upon failure, the Dest module could be in a modified state, and
-// shouldn't be relied on to be consistent.
+/// LinkModules - This function links two modules together, with the resulting
+/// left module modified to be the composite of the two input modules. If an
+/// error occurs, true is returned and ErrorMsg (if not null) is set to indicate
+/// the problem. Upon failure, the Dest module could be in a modified state,
+/// and shouldn't be relied on to be consistent.
bool Linker::LinkModules(Module *Dest, Module *Src, unsigned Mode,
std::string *ErrorMsg) {
ModuleLinker TheLinker(Dest, Src, Mode);
@@ -997,6 +1332,6 @@ bool Linker::LinkModules(Module *Dest, Module *Src, unsigned Mode,
if (ErrorMsg) *ErrorMsg = TheLinker.ErrorMsg;
return true;
}
-
+
return false;
}
diff --git a/contrib/llvm/lib/Linker/Linker.cpp b/contrib/llvm/lib/Linker/Linker.cpp
index 59fbceb..7c6cf4f 100644
--- a/contrib/llvm/lib/Linker/Linker.cpp
+++ b/contrib/llvm/lib/Linker/Linker.cpp
@@ -17,7 +17,6 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Config/config.h"
#include "llvm/Support/system_error.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
index 3d16de5..9fc33b6 100644
--- a/contrib/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/ELFObjectWriter.cpp
@@ -11,26 +11,26 @@
//
//===----------------------------------------------------------------------===//
-#include "ELFObjectWriter.h"
+#include "MCELF.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCELFSymbolFlags.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ELF.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringSwitch.h"
-
-#include "../Target/X86/MCTargetDesc/X86FixupKinds.h"
-#include "../Target/ARM/MCTargetDesc/ARMFixupKinds.h"
-#include "../Target/PowerPC/MCTargetDesc/PPCFixupKinds.h"
#include <vector>
using namespace llvm;
@@ -38,6 +38,304 @@ using namespace llvm;
#undef DEBUG_TYPE
#define DEBUG_TYPE "reloc-info"
+namespace {
+class ELFObjectWriter : public MCObjectWriter {
+ protected:
+
+ static bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
+ static bool RelocNeedsGOT(MCSymbolRefExpr::VariantKind Variant);
+ static uint64_t SymbolValue(MCSymbolData &Data, const MCAsmLayout &Layout);
+ static bool isInSymtab(const MCAssembler &Asm, const MCSymbolData &Data,
+ bool Used, bool Renamed);
+ static bool isLocal(const MCSymbolData &Data, bool isSignature,
+ bool isUsedInReloc);
+ static bool IsELFMetaDataSection(const MCSectionData &SD);
+ static uint64_t DataSectionSize(const MCSectionData &SD);
+ static uint64_t GetSectionFileSize(const MCAsmLayout &Layout,
+ const MCSectionData &SD);
+ static uint64_t GetSectionAddressSize(const MCAsmLayout &Layout,
+ const MCSectionData &SD);
+
+ void WriteDataSectionData(MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCSectionELF &Section);
+
+ /*static bool isFixupKindX86RIPRel(unsigned Kind) {
+ return Kind == X86::reloc_riprel_4byte ||
+ Kind == X86::reloc_riprel_4byte_movq_load;
+ }*/
+
+ /// ELFSymbolData - Helper struct for containing some precomputed
+ /// information on symbols.
+ struct ELFSymbolData {
+ MCSymbolData *SymbolData;
+ uint64_t StringIndex;
+ uint32_t SectionIndex;
+
+ // Support lexicographic sorting.
+ bool operator<(const ELFSymbolData &RHS) const {
+ if (MCELF::GetType(*SymbolData) == ELF::STT_FILE)
+ return true;
+ if (MCELF::GetType(*RHS.SymbolData) == ELF::STT_FILE)
+ return false;
+ return SymbolData->getSymbol().getName() <
+ RHS.SymbolData->getSymbol().getName();
+ }
+ };
+
+ /// The target specific ELF writer instance.
+ llvm::OwningPtr<MCELFObjectTargetWriter> TargetObjectWriter;
+
+ SmallPtrSet<const MCSymbol *, 16> UsedInReloc;
+ SmallPtrSet<const MCSymbol *, 16> WeakrefUsedInReloc;
+ DenseMap<const MCSymbol *, const MCSymbol *> Renames;
+
+ llvm::DenseMap<const MCSectionData*,
+ std::vector<ELFRelocationEntry> > Relocations;
+ DenseMap<const MCSection*, uint64_t> SectionStringTableIndex;
+
+ /// @}
+ /// @name Symbol Table Data
+ /// @{
+
+ SmallString<256> StringTable;
+ std::vector<ELFSymbolData> LocalSymbolData;
+ std::vector<ELFSymbolData> ExternalSymbolData;
+ std::vector<ELFSymbolData> UndefinedSymbolData;
+
+ /// @}
+
+ bool NeedsGOT;
+
+ bool NeedsSymtabShndx;
+
+ // This holds the symbol table index of the last local symbol.
+ unsigned LastLocalSymbolIndex;
+ // This holds the .strtab section index.
+ unsigned StringTableIndex;
+ // This holds the .symtab section index.
+ unsigned SymbolTableIndex;
+
+ unsigned ShstrtabIndex;
+
+
+ const MCSymbol *SymbolToReloc(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
+
+ // TargetObjectWriter wrappers.
+ const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ return TargetObjectWriter->ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel);
+ }
+
+ bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+ bool hasRelocationAddend() const {
+ return TargetObjectWriter->hasRelocationAddend();
+ }
+ unsigned getEFlags() const {
+ return TargetObjectWriter->getEFlags();
+ }
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ return TargetObjectWriter->GetRelocType(Target, Fixup, IsPCRel,
+ IsRelocWithSymbol, Addend);
+ }
+
+
+ public:
+ ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_ostream &_OS, bool IsLittleEndian)
+ : MCObjectWriter(_OS, IsLittleEndian),
+ TargetObjectWriter(MOTW),
+ NeedsGOT(false), NeedsSymtabShndx(false){
+ }
+
+ virtual ~ELFObjectWriter();
+
+ void WriteWord(uint64_t W) {
+ if (is64Bit())
+ Write64(W);
+ else
+ Write32(W);
+ }
+
+ void StringLE16(char *buf, uint16_t Value) {
+ buf[0] = char(Value >> 0);
+ buf[1] = char(Value >> 8);
+ }
+
+ void StringLE32(char *buf, uint32_t Value) {
+ StringLE16(buf, uint16_t(Value >> 0));
+ StringLE16(buf + 2, uint16_t(Value >> 16));
+ }
+
+ void StringLE64(char *buf, uint64_t Value) {
+ StringLE32(buf, uint32_t(Value >> 0));
+ StringLE32(buf + 4, uint32_t(Value >> 32));
+ }
+
+ void StringBE16(char *buf ,uint16_t Value) {
+ buf[0] = char(Value >> 8);
+ buf[1] = char(Value >> 0);
+ }
+
+ void StringBE32(char *buf, uint32_t Value) {
+ StringBE16(buf, uint16_t(Value >> 16));
+ StringBE16(buf + 2, uint16_t(Value >> 0));
+ }
+
+ void StringBE64(char *buf, uint64_t Value) {
+ StringBE32(buf, uint32_t(Value >> 32));
+ StringBE32(buf + 4, uint32_t(Value >> 0));
+ }
+
+ void String8(MCDataFragment &F, uint8_t Value) {
+ char buf[1];
+ buf[0] = Value;
+ F.getContents() += StringRef(buf, 1);
+ }
+
+ void String16(MCDataFragment &F, uint16_t Value) {
+ char buf[2];
+ if (isLittleEndian())
+ StringLE16(buf, Value);
+ else
+ StringBE16(buf, Value);
+ F.getContents() += StringRef(buf, 2);
+ }
+
+ void String32(MCDataFragment &F, uint32_t Value) {
+ char buf[4];
+ if (isLittleEndian())
+ StringLE32(buf, Value);
+ else
+ StringBE32(buf, Value);
+ F.getContents() += StringRef(buf, 4);
+ }
+
+ void String64(MCDataFragment &F, uint64_t Value) {
+ char buf[8];
+ if (isLittleEndian())
+ StringLE64(buf, Value);
+ else
+ StringBE64(buf, Value);
+ F.getContents() += StringRef(buf, 8);
+ }
+
+ void WriteHeader(uint64_t SectionDataSize,
+ unsigned NumberOfSections);
+
+ void WriteSymbolEntry(MCDataFragment *SymtabF,
+ MCDataFragment *ShndxF,
+ uint64_t name, uint8_t info,
+ uint64_t value, uint64_t size,
+ uint8_t other, uint32_t shndx,
+ bool Reserved);
+
+ void WriteSymbol(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
+ ELFSymbolData &MSD,
+ const MCAsmLayout &Layout);
+
+ typedef DenseMap<const MCSectionELF*, uint32_t> SectionIndexMapTy;
+ void WriteSymbolTable(MCDataFragment *SymtabF,
+ MCDataFragment *ShndxF,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap);
+
+ virtual void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
+
+ uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
+ const MCSymbol *S);
+
+ // Map from a group section to the signature symbol
+ typedef DenseMap<const MCSectionELF*, const MCSymbol*> GroupMapTy;
+ // Map from a signature symbol to the group section
+ typedef DenseMap<const MCSymbol*, const MCSectionELF*> RevGroupMapTy;
+ // Map from a section to the section with the relocations
+ typedef DenseMap<const MCSectionELF*, const MCSectionELF*> RelMapTy;
+ // Map from a section to its offset
+ typedef DenseMap<const MCSectionELF*, uint64_t> SectionOffsetMapTy;
+
+ /// ComputeSymbolTable - Compute the symbol table data
+ ///
+ /// \param StringTable [out] - The string table data.
+ /// \param StringIndexMap [out] - Map from symbol names to offsets in the
+ /// string table.
+ void ComputeSymbolTable(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ RevGroupMapTy RevGroupMap,
+ unsigned NumRegularSections);
+
+ void ComputeIndexMap(MCAssembler &Asm,
+ SectionIndexMapTy &SectionIndexMap,
+ const RelMapTy &RelMap);
+
+ void CreateRelocationSections(MCAssembler &Asm, MCAsmLayout &Layout,
+ RelMapTy &RelMap);
+
+ void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout,
+ const RelMapTy &RelMap);
+
+ void CreateMetadataSections(MCAssembler &Asm, MCAsmLayout &Layout,
+ SectionIndexMapTy &SectionIndexMap,
+ const RelMapTy &RelMap);
+
+ // Create the sections that show up in the symbol table. Currently
+ // those are the .note.GNU-stack section and the group sections.
+ void CreateIndexedSections(MCAssembler &Asm, MCAsmLayout &Layout,
+ GroupMapTy &GroupMap,
+ RevGroupMapTy &RevGroupMap,
+ SectionIndexMapTy &SectionIndexMap,
+ const RelMapTy &RelMap);
+
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout);
+
+ void WriteSectionHeader(MCAssembler &Asm, const GroupMapTy &GroupMap,
+ const MCAsmLayout &Layout,
+ const SectionIndexMapTy &SectionIndexMap,
+ const SectionOffsetMapTy &SectionOffsetMap);
+
+ void ComputeSectionOrder(MCAssembler &Asm,
+ std::vector<const MCSectionELF*> &Sections);
+
+ void WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Size, uint32_t Link, uint32_t Info,
+ uint64_t Alignment, uint64_t EntrySize);
+
+ void WriteRelocationsFragment(const MCAssembler &Asm,
+ MCDataFragment *F,
+ const MCSectionData *SD);
+
+ virtual bool
+ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbolData &DataA,
+ const MCFragment &FB,
+ bool InSet,
+ bool IsPCRel) const;
+
+ virtual void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+ void WriteSection(MCAssembler &Asm,
+ const SectionIndexMapTy &SectionIndexMap,
+ uint32_t GroupSymbolIndex,
+ uint64_t Offset, uint64_t Size, uint64_t Alignment,
+ const MCSectionELF &Section);
+ };
+}
+
bool ELFObjectWriter::isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
const MCFixupKindInfo &FKI =
Asm.getBackend().getFixupKindInfo((MCFixupKind) Kind);
@@ -92,11 +390,7 @@ void ELFObjectWriter::WriteHeader(uint64_t SectionDataSize,
Write8(ELF::EV_CURRENT); // e_ident[EI_VERSION]
// e_ident[EI_OSABI]
- switch (TargetObjectWriter->getOSType()) {
- case Triple::FreeBSD: Write8(ELF::ELFOSABI_FREEBSD); break;
- case Triple::Linux: Write8(ELF::ELFOSABI_LINUX); break;
- default: Write8(ELF::ELFOSABI_NONE); break;
- }
+ Write8(TargetObjectWriter->getOSABI());
Write8(0); // e_ident[EI_ABIVERSION]
WriteZeros(ELF::EI_NIDENT - ELF::EI_PAD);
@@ -112,7 +406,7 @@ void ELFObjectWriter::WriteHeader(uint64_t SectionDataSize,
sizeof(ELF::Elf32_Ehdr))); // e_shoff = sec hdr table off in bytes
// e_flags = whatever the target wants
- WriteEFlags();
+ Write32(getEFlags());
// e_ehsize = ELF header size
Write16(is64Bit() ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr));
@@ -181,7 +475,7 @@ uint64_t ELFObjectWriter::SymbolValue(MCSymbolData &Data,
if (const MCExpr *Value = Symbol.getVariableValue()) {
int64_t IntValue;
if (Value->EvaluateAsAbsolute(IntValue, Layout))
- return (uint64_t)IntValue;
+ return (uint64_t)IntValue;
}
}
@@ -277,7 +571,7 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
MCDataFragment *ShndxF,
const MCAssembler &Asm,
const MCAsmLayout &Layout,
- const SectionIndexMapTy &SectionIndexMap) {
+ const SectionIndexMapTy &SectionIndexMap) {
// The string table must be emitted first because we need the index
// into the string table for all the symbol names.
assert(StringTable.size() && "Missing string table");
@@ -306,7 +600,8 @@ void ELFObjectWriter::WriteSymbolTable(MCDataFragment *SymtabF,
Section.getType() == ELF::SHT_SYMTAB_SHNDX)
continue;
WriteSymbolEntry(SymtabF, ShndxF, 0, ELF::STT_SECTION, 0, 0,
- ELF::STV_DEFAULT, SectionIndexMap.lookup(&Section), false);
+ ELF::STV_DEFAULT, SectionIndexMap.lookup(&Section),
+ false);
LastLocalSymbolIndex++;
}
@@ -416,7 +711,7 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
// Offset of the symbol in the section
int64_t a = Layout.getSymbolOffset(&SDB);
- // Ofeset of the relocation in the section
+ // Offset of the relocation in the section
int64_t b = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
Value += b - a;
}
@@ -445,11 +740,16 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
FixedValue = Value;
unsigned Type = GetRelocType(Target, Fixup, IsPCRel,
(RelocSymbol != 0), Addend);
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ if (RelocNeedsGOT(Modifier))
+ NeedsGOT = true;
uint64_t RelocOffset = Layout.getFragmentOffset(Fragment) +
Fixup.getOffset();
- adjustFixupOffset(Fixup, RelocOffset);
+ // FIXME: no tests cover this. Is adjustFixupOffset dead code?
+ TargetObjectWriter->adjustFixupOffset(Fixup, RelocOffset);
if (!hasRelocationAddend())
Addend = 0;
@@ -459,7 +759,7 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
else
assert(isInt<32>(Addend));
- ELFRelocationEntry ERE(RelocOffset, Index, Type, RelocSymbol, Addend);
+ ELFRelocationEntry ERE(RelocOffset, Index, Type, RelocSymbol, Addend, Fixup);
Relocations[Fragment->getParent()].push_back(ERE);
}
@@ -745,8 +1045,10 @@ void ELFObjectWriter::WriteRelocationsFragment(const MCAssembler &Asm,
MCDataFragment *F,
const MCSectionData *SD) {
std::vector<ELFRelocationEntry> &Relocs = Relocations[SD];
- // sort by the r_offset just like gnu as does
- array_pod_sort(Relocs.begin(), Relocs.end());
+
+ // Sort the relocation entries. Most targets just sort by r_offset, but some
+ // (e.g., MIPS) have additional constraints.
+ TargetObjectWriter->sortRelocs(Asm, Relocs);
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
ELFRelocationEntry entry = Relocs[e - i - 1];
@@ -1053,14 +1355,10 @@ uint64_t ELFObjectWriter::GetSectionAddressSize(const MCAsmLayout &Layout,
void ELFObjectWriter::WriteDataSectionData(MCAssembler &Asm,
const MCAsmLayout &Layout,
const MCSectionELF &Section) {
- uint64_t FileOff = OS.tell();
const MCSectionData &SD = Asm.getOrCreateSectionData(Section);
- uint64_t Padding = OffsetToAlignment(FileOff, SD.getAlignment());
+ uint64_t Padding = OffsetToAlignment(OS.tell(), SD.getAlignment());
WriteZeros(Padding);
- FileOff += Padding;
-
- FileOff += GetSectionFileSize(Layout, SD);
if (IsELFMetaDataSection(SD)) {
for (MCSectionData::const_iterator i = SD.begin(), e = SD.end(); i != e;
@@ -1070,7 +1368,7 @@ void ELFObjectWriter::WriteDataSectionData(MCAssembler &Asm,
WriteBytes(cast<MCDataFragment>(F).getContents().str());
}
} else {
- Asm.WriteSectionData(&SD, Layout);
+ Asm.writeSectionData(&SD, Layout);
}
}
@@ -1226,16 +1524,13 @@ void ELFObjectWriter::WriteObject(MCAssembler &Asm,
for (unsigned i = 0; i < NumRegularSections + 1; ++i)
WriteDataSectionData(Asm, Layout, *Sections[i]);
- FileOff = OS.tell();
- uint64_t Padding = OffsetToAlignment(FileOff, NaturalAlignment);
+ uint64_t Padding = OffsetToAlignment(OS.tell(), NaturalAlignment);
WriteZeros(Padding);
// ... then the section header table ...
WriteSectionHeader(Asm, GroupMap, Layout, SectionIndexMap,
SectionOffsetMap);
- FileOff = OS.tell();
-
// ... and then the remaining sections ...
for (unsigned i = NumRegularSections + 1; i < NumSections; ++i)
WriteDataSectionData(Asm, Layout, *Sections[i]);
@@ -1256,577 +1551,5 @@ ELFObjectWriter::IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
MCObjectWriter *llvm::createELFObjectWriter(MCELFObjectTargetWriter *MOTW,
raw_ostream &OS,
bool IsLittleEndian) {
- switch (MOTW->getEMachine()) {
- case ELF::EM_386:
- case ELF::EM_X86_64:
- return new X86ELFObjectWriter(MOTW, OS, IsLittleEndian); break;
- case ELF::EM_ARM:
- return new ARMELFObjectWriter(MOTW, OS, IsLittleEndian); break;
- case ELF::EM_MBLAZE:
- return new MBlazeELFObjectWriter(MOTW, OS, IsLittleEndian); break;
- case ELF::EM_PPC:
- case ELF::EM_PPC64:
- return new PPCELFObjectWriter(MOTW, OS, IsLittleEndian); break;
- case ELF::EM_MIPS:
- return new MipsELFObjectWriter(MOTW, OS, IsLittleEndian); break;
- default: llvm_unreachable("Unsupported architecture"); break;
- }
-}
-
-
-/// START OF SUBCLASSES for ELFObjectWriter
-//===- ARMELFObjectWriter -------------------------------------------===//
-
-ARMELFObjectWriter::ARMELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian)
- : ELFObjectWriter(MOTW, _OS, IsLittleEndian)
-{}
-
-ARMELFObjectWriter::~ARMELFObjectWriter()
-{}
-
-// FIXME: get the real EABI Version from the Triple.
-void ARMELFObjectWriter::WriteEFlags() {
- Write32(ELF::EF_ARM_EABIMASK & DefaultEABIVersion);
-}
-
-// In ARM, _MergedGlobals and other most symbols get emitted directly.
-// I.e. not as an offset to a section symbol.
-// This code is an approximation of what ARM/gcc does.
-
-STATISTIC(PCRelCount, "Total number of PIC Relocations");
-STATISTIC(NonPCRelCount, "Total number of non-PIC relocations");
-
-const MCSymbol *ARMELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- const MCSymbol &Symbol = Target.getSymA()->getSymbol();
- bool EmitThisSym = false;
-
- const MCSectionELF &Section =
- static_cast<const MCSectionELF&>(Symbol.getSection());
- bool InNormalSection = true;
- unsigned RelocType = 0;
- RelocType = GetRelocTypeInner(Target, Fixup, IsPCRel);
-
- DEBUG(
- const MCSymbolRefExpr::VariantKind Kind = Target.getSymA()->getKind();
- MCSymbolRefExpr::VariantKind Kind2;
- Kind2 = Target.getSymB() ? Target.getSymB()->getKind() :
- MCSymbolRefExpr::VK_None;
- dbgs() << "considering symbol "
- << Section.getSectionName() << "/"
- << Symbol.getName() << "/"
- << " Rel:" << (unsigned)RelocType
- << " Kind: " << (int)Kind << "/" << (int)Kind2
- << " Tmp:"
- << Symbol.isAbsolute() << "/" << Symbol.isDefined() << "/"
- << Symbol.isVariable() << "/" << Symbol.isTemporary()
- << " Counts:" << PCRelCount << "/" << NonPCRelCount << "\n");
-
- if (IsPCRel) { ++PCRelCount;
- switch (RelocType) {
- default:
- // Most relocation types are emitted as explicit symbols
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".data.rel", false)
- .Case(".bss", false)
- .Default(true);
- EmitThisSym = true;
- break;
- case ELF::R_ARM_ABS32:
- // But things get strange with R_ARM_ABS32
- // In this case, most things that go in .rodata show up
- // as section relative relocations
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".data.rel", false)
- .Case(".rodata", false)
- .Case(".bss", false)
- .Default(true);
- EmitThisSym = false;
- break;
- }
- } else {
- NonPCRelCount++;
- InNormalSection =
- StringSwitch<bool>(Section.getSectionName())
- .Case(".data.rel.ro.local", false)
- .Case(".rodata", false)
- .Case(".data.rel", false)
- .Case(".bss", false)
- .Default(true);
-
- switch (RelocType) {
- default: EmitThisSym = true; break;
- case ELF::R_ARM_ABS32: EmitThisSym = false; break;
- }
- }
-
- if (EmitThisSym)
- return &Symbol;
- if (! Symbol.isTemporary() && InNormalSection) {
- return &Symbol;
- }
- return NULL;
-}
-
-// Need to examine the Fixup when determining whether to
-// emit the relocation as an explicit symbol or as a section relative
-// offset
-unsigned ARMELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) {
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
-
- unsigned Type = GetRelocTypeInner(Target, Fixup, IsPCRel);
-
- if (RelocNeedsGOT(Modifier))
- NeedsGOT = true;
-
- return Type;
-}
-
-unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
-
- unsigned Type = 0;
- if (IsPCRel) {
- switch ((unsigned)Fixup.getKind()) {
- default: assert(0 && "Unimplemented");
- case FK_Data_4:
- switch (Modifier) {
- default: llvm_unreachable("Unsupported Modifier");
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_ARM_REL32;
- break;
- case MCSymbolRefExpr::VK_ARM_TLSGD:
- assert(0 && "unimplemented");
- break;
- case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
- Type = ELF::R_ARM_TLS_IE32;
- break;
- }
- break;
- case ARM::fixup_arm_uncondbranch:
- switch (Modifier) {
- case MCSymbolRefExpr::VK_ARM_PLT:
- Type = ELF::R_ARM_PLT32;
- break;
- default:
- Type = ELF::R_ARM_CALL;
- break;
- }
- break;
- case ARM::fixup_arm_condbranch:
- Type = ELF::R_ARM_JUMP24;
- break;
- case ARM::fixup_arm_movt_hi16:
- case ARM::fixup_arm_movt_hi16_pcrel:
- Type = ELF::R_ARM_MOVT_PREL;
- break;
- case ARM::fixup_arm_movw_lo16:
- case ARM::fixup_arm_movw_lo16_pcrel:
- Type = ELF::R_ARM_MOVW_PREL_NC;
- break;
- case ARM::fixup_t2_movt_hi16:
- case ARM::fixup_t2_movt_hi16_pcrel:
- Type = ELF::R_ARM_THM_MOVT_PREL;
- break;
- case ARM::fixup_t2_movw_lo16:
- case ARM::fixup_t2_movw_lo16_pcrel:
- Type = ELF::R_ARM_THM_MOVW_PREL_NC;
- break;
- case ARM::fixup_arm_thumb_bl:
- case ARM::fixup_arm_thumb_blx:
- switch (Modifier) {
- case MCSymbolRefExpr::VK_ARM_PLT:
- Type = ELF::R_ARM_THM_CALL;
- break;
- default:
- Type = ELF::R_ARM_NONE;
- break;
- }
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_4:
- switch (Modifier) {
- default: llvm_unreachable("Unsupported Modifier"); break;
- case MCSymbolRefExpr::VK_ARM_GOT:
- Type = ELF::R_ARM_GOT_BREL;
- break;
- case MCSymbolRefExpr::VK_ARM_TLSGD:
- Type = ELF::R_ARM_TLS_GD32;
- break;
- case MCSymbolRefExpr::VK_ARM_TPOFF:
- Type = ELF::R_ARM_TLS_LE32;
- break;
- case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
- Type = ELF::R_ARM_TLS_IE32;
- break;
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_ARM_ABS32;
- break;
- case MCSymbolRefExpr::VK_ARM_GOTOFF:
- Type = ELF::R_ARM_GOTOFF32;
- break;
- }
- break;
- case ARM::fixup_arm_ldst_pcrel_12:
- case ARM::fixup_arm_pcrel_10:
- case ARM::fixup_arm_adr_pcrel_12:
- case ARM::fixup_arm_thumb_bl:
- case ARM::fixup_arm_thumb_cb:
- case ARM::fixup_arm_thumb_cp:
- case ARM::fixup_arm_thumb_br:
- assert(0 && "Unimplemented");
- break;
- case ARM::fixup_arm_uncondbranch:
- Type = ELF::R_ARM_CALL;
- break;
- case ARM::fixup_arm_condbranch:
- Type = ELF::R_ARM_JUMP24;
- break;
- case ARM::fixup_arm_movt_hi16:
- Type = ELF::R_ARM_MOVT_ABS;
- break;
- case ARM::fixup_arm_movw_lo16:
- Type = ELF::R_ARM_MOVW_ABS_NC;
- break;
- case ARM::fixup_t2_movt_hi16:
- Type = ELF::R_ARM_THM_MOVT_ABS;
- break;
- case ARM::fixup_t2_movw_lo16:
- Type = ELF::R_ARM_THM_MOVW_ABS_NC;
- break;
- }
- }
-
- return Type;
-}
-
-//===- PPCELFObjectWriter -------------------------------------------===//
-
-PPCELFObjectWriter::PPCELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian)
- : ELFObjectWriter(MOTW, _OS, IsLittleEndian) {
-}
-
-PPCELFObjectWriter::~PPCELFObjectWriter() {
-}
-
-unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) {
- // determine the type of the relocation
- unsigned Type;
- if (IsPCRel) {
- switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented");
- case PPC::fixup_ppc_br24:
- Type = ELF::R_PPC_REL24;
- break;
- case FK_PCRel_4:
- Type = ELF::R_PPC_REL32;
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case PPC::fixup_ppc_br24:
- Type = ELF::R_PPC_ADDR24;
- break;
- case PPC::fixup_ppc_brcond14:
- Type = ELF::R_PPC_ADDR14_BRTAKEN; // XXX: or BRNTAKEN?_
- break;
- case PPC::fixup_ppc_ha16:
- Type = ELF::R_PPC_ADDR16_HA;
- break;
- case PPC::fixup_ppc_lo16:
- Type = ELF::R_PPC_ADDR16_LO;
- break;
- case PPC::fixup_ppc_lo14:
- Type = ELF::R_PPC_ADDR14;
- break;
- case FK_Data_4:
- Type = ELF::R_PPC_ADDR32;
- break;
- case FK_Data_2:
- Type = ELF::R_PPC_ADDR16;
- break;
- }
- }
- return Type;
-}
-
-void
-PPCELFObjectWriter::adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) {
- switch ((unsigned)Fixup.getKind()) {
- case PPC::fixup_ppc_ha16:
- case PPC::fixup_ppc_lo16:
- RelocOffset += 2;
- break;
- default:
- break;
- }
-}
-
-//===- MBlazeELFObjectWriter -------------------------------------------===//
-
-MBlazeELFObjectWriter::MBlazeELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian)
- : ELFObjectWriter(MOTW, _OS, IsLittleEndian) {
-}
-
-MBlazeELFObjectWriter::~MBlazeELFObjectWriter() {
-}
-
-unsigned MBlazeELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) {
- // determine the type of the relocation
- unsigned Type;
- if (IsPCRel) {
- switch ((unsigned)Fixup.getKind()) {
- default:
- llvm_unreachable("Unimplemented");
- case FK_PCRel_4:
- Type = ELF::R_MICROBLAZE_64_PCREL;
- break;
- case FK_PCRel_2:
- Type = ELF::R_MICROBLAZE_32_PCREL;
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_4:
- Type = ((IsRelocWithSymbol || Addend !=0)
- ? ELF::R_MICROBLAZE_32
- : ELF::R_MICROBLAZE_64);
- break;
- case FK_Data_2:
- Type = ELF::R_MICROBLAZE_32;
- break;
- }
- }
- return Type;
-}
-
-//===- X86ELFObjectWriter -------------------------------------------===//
-
-
-X86ELFObjectWriter::X86ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian)
- : ELFObjectWriter(MOTW, _OS, IsLittleEndian)
-{}
-
-X86ELFObjectWriter::~X86ELFObjectWriter()
-{}
-
-unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) {
- // determine the type of the relocation
-
- MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
- unsigned Type;
- if (is64Bit()) {
- if (IsPCRel) {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
-
- case FK_Data_8: Type = ELF::R_X86_64_PC64; break;
- case FK_Data_4: Type = ELF::R_X86_64_PC32; break;
- case FK_Data_2: Type = ELF::R_X86_64_PC16; break;
-
- case FK_PCRel_8:
- assert(Modifier == MCSymbolRefExpr::VK_None);
- Type = ELF::R_X86_64_PC64;
- break;
- case X86::reloc_signed_4byte:
- case X86::reloc_riprel_4byte_movq_load:
- case X86::reloc_riprel_4byte:
- case FK_PCRel_4:
- switch (Modifier) {
- default:
- llvm_unreachable("Unimplemented");
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_X86_64_PC32;
- break;
- case MCSymbolRefExpr::VK_PLT:
- Type = ELF::R_X86_64_PLT32;
- break;
- case MCSymbolRefExpr::VK_GOTPCREL:
- Type = ELF::R_X86_64_GOTPCREL;
- break;
- case MCSymbolRefExpr::VK_GOTTPOFF:
- Type = ELF::R_X86_64_GOTTPOFF;
- break;
- case MCSymbolRefExpr::VK_TLSGD:
- Type = ELF::R_X86_64_TLSGD;
- break;
- case MCSymbolRefExpr::VK_TLSLD:
- Type = ELF::R_X86_64_TLSLD;
- break;
- }
- break;
- case FK_PCRel_2:
- assert(Modifier == MCSymbolRefExpr::VK_None);
- Type = ELF::R_X86_64_PC16;
- break;
- case FK_PCRel_1:
- assert(Modifier == MCSymbolRefExpr::VK_None);
- Type = ELF::R_X86_64_PC8;
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
- case FK_Data_8: Type = ELF::R_X86_64_64; break;
- case X86::reloc_signed_4byte:
- switch (Modifier) {
- default:
- llvm_unreachable("Unimplemented");
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_X86_64_32S;
- break;
- case MCSymbolRefExpr::VK_GOT:
- Type = ELF::R_X86_64_GOT32;
- break;
- case MCSymbolRefExpr::VK_GOTPCREL:
- Type = ELF::R_X86_64_GOTPCREL;
- break;
- case MCSymbolRefExpr::VK_TPOFF:
- Type = ELF::R_X86_64_TPOFF32;
- break;
- case MCSymbolRefExpr::VK_DTPOFF:
- Type = ELF::R_X86_64_DTPOFF32;
- break;
- }
- break;
- case FK_Data_4:
- Type = ELF::R_X86_64_32;
- break;
- case FK_Data_2: Type = ELF::R_X86_64_16; break;
- case FK_PCRel_1:
- case FK_Data_1: Type = ELF::R_X86_64_8; break;
- }
- }
- } else {
- if (IsPCRel) {
- switch (Modifier) {
- default:
- llvm_unreachable("Unimplemented");
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_386_PC32;
- break;
- case MCSymbolRefExpr::VK_PLT:
- Type = ELF::R_386_PLT32;
- break;
- }
- } else {
- switch ((unsigned)Fixup.getKind()) {
- default: llvm_unreachable("invalid fixup kind!");
-
- case X86::reloc_global_offset_table:
- Type = ELF::R_386_GOTPC;
- break;
-
- // FIXME: Should we avoid selecting reloc_signed_4byte in 32 bit mode
- // instead?
- case X86::reloc_signed_4byte:
- case FK_PCRel_4:
- case FK_Data_4:
- switch (Modifier) {
- default:
- llvm_unreachable("Unimplemented");
- case MCSymbolRefExpr::VK_None:
- Type = ELF::R_386_32;
- break;
- case MCSymbolRefExpr::VK_GOT:
- Type = ELF::R_386_GOT32;
- break;
- case MCSymbolRefExpr::VK_GOTOFF:
- Type = ELF::R_386_GOTOFF;
- break;
- case MCSymbolRefExpr::VK_TLSGD:
- Type = ELF::R_386_TLS_GD;
- break;
- case MCSymbolRefExpr::VK_TPOFF:
- Type = ELF::R_386_TLS_LE_32;
- break;
- case MCSymbolRefExpr::VK_INDNTPOFF:
- Type = ELF::R_386_TLS_IE;
- break;
- case MCSymbolRefExpr::VK_NTPOFF:
- Type = ELF::R_386_TLS_LE;
- break;
- case MCSymbolRefExpr::VK_GOTNTPOFF:
- Type = ELF::R_386_TLS_GOTIE;
- break;
- case MCSymbolRefExpr::VK_TLSLDM:
- Type = ELF::R_386_TLS_LDM;
- break;
- case MCSymbolRefExpr::VK_DTPOFF:
- Type = ELF::R_386_TLS_LDO_32;
- break;
- case MCSymbolRefExpr::VK_GOTTPOFF:
- Type = ELF::R_386_TLS_IE_32;
- break;
- }
- break;
- case FK_Data_2: Type = ELF::R_386_16; break;
- case FK_PCRel_1:
- case FK_Data_1: Type = ELF::R_386_8; break;
- }
- }
- }
-
- if (RelocNeedsGOT(Modifier))
- NeedsGOT = true;
-
- return Type;
-}
-
-MipsELFObjectWriter::MipsELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian)
- : ELFObjectWriter(MOTW, _OS, IsLittleEndian) {}
-
-MipsELFObjectWriter::~MipsELFObjectWriter() {}
-
-unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
- const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) {
- // tbd
- return 1;
+ return new ELFObjectWriter(MOTW, OS, IsLittleEndian);
}
diff --git a/contrib/llvm/lib/MC/ELFObjectWriter.h b/contrib/llvm/lib/MC/ELFObjectWriter.h
deleted file mode 100644
index 862b085..0000000
--- a/contrib/llvm/lib/MC/ELFObjectWriter.h
+++ /dev/null
@@ -1,446 +0,0 @@
-//===- lib/MC/ELFObjectWriter.h - ELF File Writer -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements ELF object file writer information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_MC_ELFOBJECTWRITER_H
-#define LLVM_MC_ELFOBJECTWRITER_H
-
-#include "MCELF.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCELFSymbolFlags.h"
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCSymbol.h"
-
-#include <vector>
-
-namespace llvm {
-
-class MCSection;
-class MCDataFragment;
-class MCSectionELF;
-
-class ELFObjectWriter : public MCObjectWriter {
- protected:
-
- static bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
- static bool RelocNeedsGOT(MCSymbolRefExpr::VariantKind Variant);
- static uint64_t SymbolValue(MCSymbolData &Data, const MCAsmLayout &Layout);
- static bool isInSymtab(const MCAssembler &Asm, const MCSymbolData &Data,
- bool Used, bool Renamed);
- static bool isLocal(const MCSymbolData &Data, bool isSignature,
- bool isUsedInReloc);
- static bool IsELFMetaDataSection(const MCSectionData &SD);
- static uint64_t DataSectionSize(const MCSectionData &SD);
- static uint64_t GetSectionFileSize(const MCAsmLayout &Layout,
- const MCSectionData &SD);
- static uint64_t GetSectionAddressSize(const MCAsmLayout &Layout,
- const MCSectionData &SD);
-
- void WriteDataSectionData(MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCSectionELF &Section);
-
- /*static bool isFixupKindX86RIPRel(unsigned Kind) {
- return Kind == X86::reloc_riprel_4byte ||
- Kind == X86::reloc_riprel_4byte_movq_load;
- }*/
-
- /// ELFSymbolData - Helper struct for containing some precomputed
- /// information on symbols.
- struct ELFSymbolData {
- MCSymbolData *SymbolData;
- uint64_t StringIndex;
- uint32_t SectionIndex;
-
- // Support lexicographic sorting.
- bool operator<(const ELFSymbolData &RHS) const {
- if (MCELF::GetType(*SymbolData) == ELF::STT_FILE)
- return true;
- if (MCELF::GetType(*RHS.SymbolData) == ELF::STT_FILE)
- return false;
- return SymbolData->getSymbol().getName() <
- RHS.SymbolData->getSymbol().getName();
- }
- };
-
- /// @name Relocation Data
- /// @{
-
- struct ELFRelocationEntry {
- // Make these big enough for both 32-bit and 64-bit
- uint64_t r_offset;
- int Index;
- unsigned Type;
- const MCSymbol *Symbol;
- uint64_t r_addend;
-
- ELFRelocationEntry()
- : r_offset(0), Index(0), Type(0), Symbol(0), r_addend(0) {}
-
- ELFRelocationEntry(uint64_t RelocOffset, int Idx,
- unsigned RelType, const MCSymbol *Sym,
- uint64_t Addend)
- : r_offset(RelocOffset), Index(Idx), Type(RelType),
- Symbol(Sym), r_addend(Addend) {}
-
- // Support lexicographic sorting.
- bool operator<(const ELFRelocationEntry &RE) const {
- return RE.r_offset < r_offset;
- }
- };
-
- /// The target specific ELF writer instance.
- llvm::OwningPtr<MCELFObjectTargetWriter> TargetObjectWriter;
-
- SmallPtrSet<const MCSymbol *, 16> UsedInReloc;
- SmallPtrSet<const MCSymbol *, 16> WeakrefUsedInReloc;
- DenseMap<const MCSymbol *, const MCSymbol *> Renames;
-
- llvm::DenseMap<const MCSectionData*,
- std::vector<ELFRelocationEntry> > Relocations;
- DenseMap<const MCSection*, uint64_t> SectionStringTableIndex;
-
- /// @}
- /// @name Symbol Table Data
- /// @{
-
- SmallString<256> StringTable;
- std::vector<ELFSymbolData> LocalSymbolData;
- std::vector<ELFSymbolData> ExternalSymbolData;
- std::vector<ELFSymbolData> UndefinedSymbolData;
-
- /// @}
-
- bool NeedsGOT;
-
- bool NeedsSymtabShndx;
-
- // This holds the symbol table index of the last local symbol.
- unsigned LastLocalSymbolIndex;
- // This holds the .strtab section index.
- unsigned StringTableIndex;
- // This holds the .symtab section index.
- unsigned SymbolTableIndex;
-
- unsigned ShstrtabIndex;
-
-
- virtual const MCSymbol *SymbolToReloc(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
-
- // For arch-specific emission of explicit reloc symbol
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- return NULL;
- }
-
- bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
- bool hasRelocationAddend() const {
- return TargetObjectWriter->hasRelocationAddend();
- }
-
- public:
- ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS, bool IsLittleEndian)
- : MCObjectWriter(_OS, IsLittleEndian),
- TargetObjectWriter(MOTW),
- NeedsGOT(false), NeedsSymtabShndx(false){
- }
-
- virtual ~ELFObjectWriter();
-
- void WriteWord(uint64_t W) {
- if (is64Bit())
- Write64(W);
- else
- Write32(W);
- }
-
- void StringLE16(char *buf, uint16_t Value) {
- buf[0] = char(Value >> 0);
- buf[1] = char(Value >> 8);
- }
-
- void StringLE32(char *buf, uint32_t Value) {
- StringLE16(buf, uint16_t(Value >> 0));
- StringLE16(buf + 2, uint16_t(Value >> 16));
- }
-
- void StringLE64(char *buf, uint64_t Value) {
- StringLE32(buf, uint32_t(Value >> 0));
- StringLE32(buf + 4, uint32_t(Value >> 32));
- }
-
- void StringBE16(char *buf ,uint16_t Value) {
- buf[0] = char(Value >> 8);
- buf[1] = char(Value >> 0);
- }
-
- void StringBE32(char *buf, uint32_t Value) {
- StringBE16(buf, uint16_t(Value >> 16));
- StringBE16(buf + 2, uint16_t(Value >> 0));
- }
-
- void StringBE64(char *buf, uint64_t Value) {
- StringBE32(buf, uint32_t(Value >> 32));
- StringBE32(buf + 4, uint32_t(Value >> 0));
- }
-
- void String8(MCDataFragment &F, uint8_t Value) {
- char buf[1];
- buf[0] = Value;
- F.getContents() += StringRef(buf, 1);
- }
-
- void String16(MCDataFragment &F, uint16_t Value) {
- char buf[2];
- if (isLittleEndian())
- StringLE16(buf, Value);
- else
- StringBE16(buf, Value);
- F.getContents() += StringRef(buf, 2);
- }
-
- void String32(MCDataFragment &F, uint32_t Value) {
- char buf[4];
- if (isLittleEndian())
- StringLE32(buf, Value);
- else
- StringBE32(buf, Value);
- F.getContents() += StringRef(buf, 4);
- }
-
- void String64(MCDataFragment &F, uint64_t Value) {
- char buf[8];
- if (isLittleEndian())
- StringLE64(buf, Value);
- else
- StringBE64(buf, Value);
- F.getContents() += StringRef(buf, 8);
- }
-
- virtual void WriteHeader(uint64_t SectionDataSize, unsigned NumberOfSections);
-
- /// Default e_flags = 0
- virtual void WriteEFlags() { Write32(0); }
-
- virtual void WriteSymbolEntry(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
- uint64_t name, uint8_t info,
- uint64_t value, uint64_t size,
- uint8_t other, uint32_t shndx,
- bool Reserved);
-
- virtual void WriteSymbol(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
- ELFSymbolData &MSD,
- const MCAsmLayout &Layout);
-
- typedef DenseMap<const MCSectionELF*, uint32_t> SectionIndexMapTy;
- virtual void WriteSymbolTable(MCDataFragment *SymtabF, MCDataFragment *ShndxF,
- const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const SectionIndexMapTy &SectionIndexMap);
-
- virtual void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
- const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, uint64_t &FixedValue);
-
- virtual uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
- const MCSymbol *S);
-
- // Map from a group section to the signature symbol
- typedef DenseMap<const MCSectionELF*, const MCSymbol*> GroupMapTy;
- // Map from a signature symbol to the group section
- typedef DenseMap<const MCSymbol*, const MCSectionELF*> RevGroupMapTy;
- // Map from a section to the section with the relocations
- typedef DenseMap<const MCSectionELF*, const MCSectionELF*> RelMapTy;
- // Map from a section to its offset
- typedef DenseMap<const MCSectionELF*, uint64_t> SectionOffsetMapTy;
-
- /// ComputeSymbolTable - Compute the symbol table data
- ///
- /// \param StringTable [out] - The string table data.
- /// \param StringIndexMap [out] - Map from symbol names to offsets in the
- /// string table.
- virtual void ComputeSymbolTable(MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- RevGroupMapTy RevGroupMap,
- unsigned NumRegularSections);
-
- virtual void ComputeIndexMap(MCAssembler &Asm,
- SectionIndexMapTy &SectionIndexMap,
- const RelMapTy &RelMap);
-
- void CreateRelocationSections(MCAssembler &Asm, MCAsmLayout &Layout,
- RelMapTy &RelMap);
-
- void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout,
- const RelMapTy &RelMap);
-
- virtual void CreateMetadataSections(MCAssembler &Asm, MCAsmLayout &Layout,
- SectionIndexMapTy &SectionIndexMap,
- const RelMapTy &RelMap);
-
- // Create the sections that show up in the symbol table. Currently
- // those are the .note.GNU-stack section and the group sections.
- virtual void CreateIndexedSections(MCAssembler &Asm, MCAsmLayout &Layout,
- GroupMapTy &GroupMap,
- RevGroupMapTy &RevGroupMap,
- SectionIndexMapTy &SectionIndexMap,
- const RelMapTy &RelMap);
-
- virtual void ExecutePostLayoutBinding(MCAssembler &Asm,
- const MCAsmLayout &Layout);
-
- void WriteSectionHeader(MCAssembler &Asm, const GroupMapTy &GroupMap,
- const MCAsmLayout &Layout,
- const SectionIndexMapTy &SectionIndexMap,
- const SectionOffsetMapTy &SectionOffsetMap);
-
- void ComputeSectionOrder(MCAssembler &Asm,
- std::vector<const MCSectionELF*> &Sections);
-
- virtual void WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
- uint64_t Address, uint64_t Offset,
- uint64_t Size, uint32_t Link, uint32_t Info,
- uint64_t Alignment, uint64_t EntrySize);
-
- virtual void WriteRelocationsFragment(const MCAssembler &Asm,
- MCDataFragment *F,
- const MCSectionData *SD);
-
- virtual bool
- IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
- const MCSymbolData &DataA,
- const MCFragment &FB,
- bool InSet,
- bool IsPCRel) const;
-
- virtual void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
- virtual void WriteSection(MCAssembler &Asm,
- const SectionIndexMapTy &SectionIndexMap,
- uint32_t GroupSymbolIndex,
- uint64_t Offset, uint64_t Size, uint64_t Alignment,
- const MCSectionELF &Section);
-
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) = 0;
- virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) { }
- };
-
- //===- X86ELFObjectWriter -------------------------------------------===//
-
- class X86ELFObjectWriter : public ELFObjectWriter {
- public:
- X86ELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian);
-
- virtual ~X86ELFObjectWriter();
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend);
- };
-
-
- //===- ARMELFObjectWriter -------------------------------------------===//
-
- class ARMELFObjectWriter : public ELFObjectWriter {
- public:
- // FIXME: MCAssembler can't yet return the Subtarget,
- enum { DefaultEABIVersion = 0x05000000U };
-
- ARMELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian);
-
- virtual ~ARMELFObjectWriter();
-
- virtual void WriteEFlags();
- protected:
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const;
-
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend);
- private:
- unsigned GetRelocTypeInner(const MCValue &Target,
- const MCFixup &Fixup, bool IsPCRel) const;
-
- };
-
- //===- PPCELFObjectWriter -------------------------------------------===//
-
- class PPCELFObjectWriter : public ELFObjectWriter {
- public:
- PPCELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian);
-
- virtual ~PPCELFObjectWriter();
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend);
- virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset);
- };
-
- //===- MBlazeELFObjectWriter -------------------------------------------===//
-
- class MBlazeELFObjectWriter : public ELFObjectWriter {
- public:
- MBlazeELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian);
-
- virtual ~MBlazeELFObjectWriter();
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend);
- };
-
- //===- MipsELFObjectWriter -------------------------------------------===//
-
- class MipsELFObjectWriter : public ELFObjectWriter {
- public:
- MipsELFObjectWriter(MCELFObjectTargetWriter *MOTW,
- raw_ostream &_OS,
- bool IsLittleEndian);
-
- virtual ~MipsELFObjectWriter();
- protected:
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend);
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/MC/MCAsmBackend.cpp b/contrib/llvm/lib/MC/MCAsmBackend.cpp
index 2c150f4..0b2e4ae 100644
--- a/contrib/llvm/lib/MC/MCAsmBackend.cpp
+++ b/contrib/llvm/lib/MC/MCAsmBackend.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCFixupKindInfo.h"
using namespace llvm;
MCAsmBackend::MCAsmBackend()
@@ -21,14 +22,22 @@ MCAsmBackend::~MCAsmBackend() {
const MCFixupKindInfo &
MCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
static const MCFixupKindInfo Builtins[] = {
- { "FK_Data_1", 0, 8, 0 },
- { "FK_Data_2", 0, 16, 0 },
- { "FK_Data_4", 0, 32, 0 },
- { "FK_Data_8", 0, 64, 0 },
- { "FK_PCRel_1", 0, 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_Data_1", 0, 8, 0 },
+ { "FK_Data_2", 0, 16, 0 },
+ { "FK_Data_4", 0, 32, 0 },
+ { "FK_Data_8", 0, 64, 0 },
+ { "FK_PCRel_1", 0, 8, MCFixupKindInfo::FKF_IsPCRel },
{ "FK_PCRel_2", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
{ "FK_PCRel_4", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
- { "FK_PCRel_8", 0, 64, MCFixupKindInfo::FKF_IsPCRel }
+ { "FK_PCRel_8", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_GPRel_1", 0, 8, 0 },
+ { "FK_GPRel_2", 0, 16, 0 },
+ { "FK_GPRel_4", 0, 32, 0 },
+ { "FK_GPRel_8", 0, 64, 0 },
+ { "FK_SecRel_1", 0, 8, 0 },
+ { "FK_SecRel_2", 0, 16, 0 },
+ { "FK_SecRel_4", 0, 32, 0 },
+ { "FK_SecRel_8", 0, 64, 0 }
};
assert((size_t)Kind <= sizeof(Builtins) / sizeof(Builtins[0]) &&
diff --git a/contrib/llvm/lib/MC/MCAsmInfo.cpp b/contrib/llvm/lib/MC/MCAsmInfo.cpp
index 95861bc..8286c1d 100644
--- a/contrib/llvm/lib/MC/MCAsmInfo.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfo.cpp
@@ -29,7 +29,6 @@ MCAsmInfo::MCAsmInfo() {
HasSubsectionsViaSymbols = false;
HasMachoZeroFillDirective = false;
HasMachoTBSSDirective = false;
- StructorOutputOrder = Structors::ReversePriorityOrder;
HasStaticCtorDtorReferenceInStaticMode = false;
LinkerRequiresNonEmptyDwarfLines = false;
MaxInstLength = 4;
@@ -50,6 +49,7 @@ MCAsmInfo::MCAsmInfo() {
AllowQuotesInName = false;
AllowNameToStartWithDigit = false;
AllowPeriodsInName = true;
+ AllowUTF8 = true;
ZeroDirective = "\t.zero\t";
AsciiDirective = "\t.ascii\t";
AscizDirective = "\t.asciz\t";
@@ -68,6 +68,7 @@ MCAsmInfo::MCAsmInfo() {
AlignDirective = "\t.align\t";
AlignmentIsInBytes = true;
TextAlignFillValue = 0;
+ GPRel64Directive = 0;
GPRel32Directive = 0;
GlobalDirective = "\t.globl\t";
HasSetDirective = true;
@@ -91,6 +92,7 @@ MCAsmInfo::MCAsmInfo() {
DwarfRequiresRelocationForSectionOffset = true;
DwarfSectionOffsetDirective = 0;
DwarfUsesLabelOffsetForRanges = true;
+ DwarfUsesRelocationsForStringPool = true;
DwarfRegNumForCFI = false;
HasMicrosoftFastStdCallMangling = false;
diff --git a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
index 434d910..881d992 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoCOFF.cpp
@@ -13,9 +13,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCAsmInfoCOFF.h"
-#include "llvm/ADT/SmallVector.h"
using namespace llvm;
+void MCAsmInfoCOFF::anchor() { }
+
MCAsmInfoCOFF::MCAsmInfoCOFF() {
GlobalPrefix = "_";
COMMDirectiveAlignmentIsInBytes = false;
@@ -38,3 +39,15 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() {
SupportsDataRegions = false;
}
+
+void MCAsmInfoMicrosoft::anchor() { }
+
+MCAsmInfoMicrosoft::MCAsmInfoMicrosoft() {
+ AllowQuotesInName = true;
+}
+
+void MCAsmInfoGNUCOFF::anchor() { }
+
+MCAsmInfoGNUCOFF::MCAsmInfoGNUCOFF() {
+
+}
diff --git a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
index b20e338..c1e2635 100644
--- a/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/contrib/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -18,6 +18,8 @@
#include "llvm/MC/MCStreamer.h"
using namespace llvm;
+void MCAsmInfoDarwin::anchor() { }
+
MCAsmInfoDarwin::MCAsmInfoDarwin() {
// Common settings for all Darwin targets.
// Syntax:
@@ -39,7 +41,6 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
ZeroDirective = "\t.space\t"; // ".space N" emits N zeros.
HasMachoZeroFillDirective = true; // Uses .zerofill
HasMachoTBSSDirective = true; // Uses .tbss
- StructorOutputOrder = Structors::PriorityOrder;
HasStaticCtorDtorReferenceInStaticMode = true;
CodeBegin = "L$start$code$";
@@ -57,8 +58,9 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
HiddenVisibilityAttr = MCSA_PrivateExtern;
HiddenDeclarationVisibilityAttr = MCSA_Invalid;
+
// Doesn't support protected visibility.
- ProtectedVisibilityAttr = MCSA_Global;
+ ProtectedVisibilityAttr = MCSA_Invalid;
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
@@ -66,4 +68,5 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
DwarfRequiresRelocationForSectionOffset = false;
DwarfUsesLabelOffsetForRanges = false;
+ DwarfUsesRelocationsForStringPool = false;
}
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index 3fcbb05..11f0f72 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -29,6 +29,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PathV2.h"
#include <cctype>
using namespace llvm;
@@ -50,6 +51,7 @@ private:
unsigned ShowInst : 1;
unsigned UseLoc : 1;
unsigned UseCFI : 1;
+ unsigned UseDwarfDirectory : 1;
enum EHSymbolFlags { EHGlobal = 1,
EHWeakDefinition = 1 << 1,
@@ -59,17 +61,21 @@ private:
bool needsSet(const MCExpr *Value);
void EmitRegisterName(int64_t Register);
+ virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
+ virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame);
public:
MCAsmStreamer(MCContext &Context, formatted_raw_ostream &os,
bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *printer, MCCodeEmitter *emitter,
MCAsmBackend *asmbackend,
bool showInst)
: MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
InstPrinter(printer), Emitter(emitter), AsmBackend(asmbackend),
CommentStream(CommentToEmit), IsVerboseAsm(isVerboseAsm),
- ShowInst(showInst), UseLoc(useLoc), UseCFI(useCFI) {
+ ShowInst(showInst), UseLoc(useLoc), UseCFI(useCFI),
+ UseDwarfDirectory(useDwarfDirectory) {
if (InstPrinter && IsVerboseAsm)
InstPrinter->setCommentStream(CommentStream);
}
@@ -150,6 +156,7 @@ public:
virtual void EmitCOFFSymbolStorageClass(int StorageClass);
virtual void EmitCOFFSymbolType(int Type);
virtual void EndCOFFSymbolDef();
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
@@ -179,6 +186,8 @@ public:
virtual void EmitSLEB128Value(const MCExpr *Value);
+ virtual void EmitGPRel64Value(const MCExpr *Value);
+
virtual void EmitGPRel32Value(const MCExpr *Value);
@@ -192,19 +201,18 @@ public:
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0);
- virtual void EmitValueToOffset(const MCExpr *Offset,
+ virtual bool EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0);
virtual void EmitFileDirective(StringRef Filename);
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename);
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename);
virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
unsigned Column, unsigned Flags,
unsigned Isa, unsigned Discriminator,
StringRef FileName);
virtual void EmitCFISections(bool EH, bool Debug);
- virtual void EmitCFIStartProc();
- virtual void EmitCFIEndProc();
virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
virtual void EmitCFIDefCfaOffset(int64_t Offset);
virtual void EmitCFIDefCfaRegister(int64_t Register);
@@ -216,6 +224,7 @@ public:
virtual void EmitCFISameValue(int64_t Register);
virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
+ virtual void EmitCFISignalFrame();
virtual void EmitWin64EHStartProc(const MCSymbol *Symbol);
virtual void EmitWin64EHEndProc();
@@ -249,7 +258,7 @@ public:
/// indicated by the hasRawTextSupport() predicate.
virtual void EmitRawText(StringRef String);
- virtual void Finish();
+ virtual void FinishImpl();
/// @}
};
@@ -334,7 +343,6 @@ void MCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
void MCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
- default: assert(0 && "Invalid flag!");
case MCAF_SyntaxUnified: OS << "\t.syntax unified"; break;
case MCAF_SubsectionsViaSymbols: OS << ".subsections_via_symbols"; break;
case MCAF_Code16: OS << '\t'<< MAI.getCode16Directive(); break;
@@ -386,7 +394,7 @@ void MCAsmStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCSymbolAttr Attribute) {
switch (Attribute) {
- case MCSA_Invalid: assert(0 && "Invalid symbol attribute");
+ case MCSA_Invalid: llvm_unreachable("Invalid symbol attribute");
case MCSA_ELF_TypeFunction: /// .type _foo, STT_FUNC # aka @function
case MCSA_ELF_TypeIndFunction: /// .type _foo, STT_GNU_IFUNC
case MCSA_ELF_TypeObject: /// .type _foo, STT_OBJECT # aka @object
@@ -398,7 +406,7 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
OS << "\t.type\t" << *Symbol << ','
<< ((MAI.getCommentString()[0] != '@') ? '@' : '%');
switch (Attribute) {
- default: assert(0 && "Unknown ELF .type");
+ default: llvm_unreachable("Unknown ELF .type");
case MCSA_ELF_TypeFunction: OS << "function"; break;
case MCSA_ELF_TypeIndFunction: OS << "gnu_indirect_function"; break;
case MCSA_ELF_TypeObject: OS << "object"; break;
@@ -465,6 +473,11 @@ void MCAsmStreamer::EndCOFFSymbolDef() {
EmitEOL();
}
+void MCAsmStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol) {
+ OS << "\t.secrel32\t" << *Symbol << '\n';
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
assert(MAI.hasDotTypeDotSizeDirective());
OS << "\t.size\t" << *Symbol << ", " << *Value << '\n';
@@ -652,6 +665,12 @@ void MCAsmStreamer::EmitSLEB128Value(const MCExpr *Value) {
EmitEOL();
}
+void MCAsmStreamer::EmitGPRel64Value(const MCExpr *Value) {
+ assert(MAI.getGPRel64Directive() != 0);
+ OS << MAI.getGPRel64Directive() << *Value;
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitGPRel32Value(const MCExpr *Value) {
assert(MAI.getGPRel32Directive() != 0);
OS << MAI.getGPRel32Directive() << *Value;
@@ -733,11 +752,12 @@ void MCAsmStreamer::EmitCodeAlignment(unsigned ByteAlignment,
1, MaxBytesToEmit);
}
-void MCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
+bool MCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
// FIXME: Verify that Offset is associated with the current section.
OS << ".org " << *Offset << ", " << (unsigned) Value;
EmitEOL();
+ return false;
}
@@ -748,13 +768,27 @@ void MCAsmStreamer::EmitFileDirective(StringRef Filename) {
EmitEOL();
}
-bool MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo, StringRef Filename){
+bool MCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename) {
+ if (!UseDwarfDirectory && !Directory.empty()) {
+ if (sys::path::is_absolute(Filename))
+ return EmitDwarfFileDirective(FileNo, "", Filename);
+
+ SmallString<128> FullPathName = Directory;
+ sys::path::append(FullPathName, Filename);
+ return EmitDwarfFileDirective(FileNo, "", FullPathName);
+ }
+
if (UseLoc) {
OS << "\t.file\t" << FileNo << ' ';
+ if (!Directory.empty()) {
+ PrintQuotedString(Directory, OS);
+ OS << ' ';
+ }
PrintQuotedString(Filename, OS);
EmitEOL();
}
- return this->MCStreamer::EmitDwarfFileDirective(FileNo, Filename);
+ return this->MCStreamer::EmitDwarfFileDirective(FileNo, Directory, Filename);
}
void MCAsmStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -816,21 +850,25 @@ void MCAsmStreamer::EmitCFISections(bool EH, bool Debug) {
EmitEOL();
}
-void MCAsmStreamer::EmitCFIStartProc() {
- MCStreamer::EmitCFIStartProc();
-
- if (!UseCFI)
+void MCAsmStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
+ if (!UseCFI) {
+ RecordProcStart(Frame);
return;
+ }
OS << "\t.cfi_startproc";
EmitEOL();
}
-void MCAsmStreamer::EmitCFIEndProc() {
- MCStreamer::EmitCFIEndProc();
-
- if (!UseCFI)
+void MCAsmStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
+ if (!UseCFI) {
+ RecordProcEnd(Frame);
return;
+ }
+
+ // Put a dummy non-null value in Frame.End to mark that this frame has been
+ // closed.
+ Frame.End = (MCSymbol *) 1;
OS << "\t.cfi_endproc";
EmitEOL();
@@ -965,6 +1003,16 @@ void MCAsmStreamer::EmitCFIAdjustCfaOffset(int64_t Adjustment) {
EmitEOL();
}
+void MCAsmStreamer::EmitCFISignalFrame() {
+ MCStreamer::EmitCFISignalFrame();
+
+ if (!UseCFI)
+ return;
+
+ OS << "\t.cif_signal_frame";
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitWin64EHStartProc(const MCSymbol *Symbol) {
MCStreamer::EmitWin64EHStartProc(Symbol);
@@ -1260,10 +1308,16 @@ void MCAsmStreamer::EmitRawText(StringRef String) {
EmitEOL();
}
-void MCAsmStreamer::Finish() {
+void MCAsmStreamer::FinishImpl() {
+ // FIXME: This header is duplicated with MCObjectStreamer
// Dump out the dwarf file & directory tables and line tables.
+ const MCSymbol *LineSectionSymbol = NULL;
if (getContext().hasDwarfFiles() && !UseLoc)
- MCDwarfFileTable::Emit(this);
+ LineSectionSymbol = MCDwarfFileTable::Emit(this);
+
+ // If we are generating dwarf for assembly source files dump out the sections.
+ if (getContext().getGenDwarfForAssembly())
+ MCGenDwarfInfo::Emit(this, LineSectionSymbol);
if (!UseCFI)
EmitFrames(false);
@@ -1271,9 +1325,9 @@ void MCAsmStreamer::Finish() {
MCStreamer *llvm::createAsmStreamer(MCContext &Context,
formatted_raw_ostream &OS,
bool isVerboseAsm, bool useLoc,
- bool useCFI, MCInstPrinter *IP,
- MCCodeEmitter *CE, MCAsmBackend *MAB,
- bool ShowInst) {
+ bool useCFI, bool useDwarfDirectory,
+ MCInstPrinter *IP, MCCodeEmitter *CE,
+ MCAsmBackend *MAB, bool ShowInst) {
return new MCAsmStreamer(Context, OS, isVerboseAsm, useLoc, useCFI,
- IP, CE, MAB, ShowInst);
+ useDwarfDirectory, IP, CE, MAB, ShowInst);
}
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index 06c8aec..66ba9b8 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -13,13 +13,13 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
@@ -33,7 +33,7 @@ using namespace llvm;
namespace {
namespace stats {
STATISTIC(EmittedFragments, "Number of emitted assembler fragments");
-STATISTIC(EvaluateFixup, "Number of evaluated fixups");
+STATISTIC(evaluateFixup, "Number of evaluated fixups");
STATISTIC(FragmentLayouts, "Number of fragment layouts");
STATISTIC(ObjectBytes, "Number of emitted object file bytes");
STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
@@ -118,7 +118,7 @@ uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const {
if (Target.getSymB() && Target.getSymB()->getSymbol().isUndefined())
report_fatal_error("unable to evaluate offset to undefined symbol '" +
Target.getSymB()->getSymbol().getName() + "'");
-
+
uint64_t Offset = Target.getConstant();
if (Target.getSymA())
Offset += getSymbolOffset(&Assembler.getSymbolData(
@@ -136,7 +136,7 @@ uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const {
uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
// The size is the last fragment's end offset.
const MCFragment &F = SD->getFragmentList().back();
- return getFragmentOffset(&F) + getAssembler().ComputeFragmentSize(*this, F);
+ return getFragmentOffset(&F) + getAssembler().computeFragmentSize(*this, F);
}
uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
@@ -237,13 +237,13 @@ const MCSymbolData *MCAssembler::getAtom(const MCSymbolData *SD) const {
return SD->getFragment()->getAtom();
}
-bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
+bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
MCValue &Target, uint64_t &Value) const {
- ++stats::EvaluateFixup;
+ ++stats::evaluateFixup;
if (!Fixup.getValue()->EvaluateAsRelocatable(Target, Layout))
- report_fatal_error("expected relocatable expression");
+ getContext().FatalError(Fixup.getLoc(), "expected relocatable expression");
bool IsPCRel = Backend.getFixupKindInfo(
Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
@@ -273,13 +273,10 @@ bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
Value = Target.getConstant();
- bool IsThumb = false;
if (const MCSymbolRefExpr *A = Target.getSymA()) {
const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
if (Sym.isDefined())
Value += Layout.getSymbolOffset(&getSymbolData(Sym));
- if (isThumbFunc(&Sym))
- IsThumb = true;
}
if (const MCSymbolRefExpr *B = Target.getSymB()) {
const MCSymbol &Sym = B->getSymbol().AliasedSymbol();
@@ -295,24 +292,22 @@ bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
if (IsPCRel) {
uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset();
-
+
// A number of ARM fixups in Thumb mode require that the effective PC
// address be determined as the 32-bit aligned version of the actual offset.
if (ShouldAlignPC) Offset &= ~0x3;
Value -= Offset;
}
- // ARM fixups based from a thumb function address need to have the low
- // bit set. The actual value is always at least 16-bit aligned, so the
- // low bit is normally clear and available for use as an ISA flag for
- // interworking.
- if (IsThumb)
- Value |= 1;
+ // Let the backend adjust the fixup value if necessary, including whether
+ // we need a relocation.
+ Backend.processFixupValue(*this, Layout, Fixup, DF, Target, Value,
+ IsResolved);
return IsResolved;
}
-uint64_t MCAssembler::ComputeFragmentSize(const MCAsmLayout &Layout,
+uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
@@ -355,8 +350,7 @@ uint64_t MCAssembler::ComputeFragmentSize(const MCAsmLayout &Layout,
return cast<MCDwarfCallFrameFragment>(F).getContents().size();
}
- assert(0 && "invalid fragment kind");
- return 0;
+ llvm_unreachable("invalid fragment kind");
}
void MCAsmLayout::LayoutFragment(MCFragment *F) {
@@ -374,7 +368,7 @@ void MCAsmLayout::LayoutFragment(MCFragment *F) {
// Compute fragment offset and size.
uint64_t Offset = 0;
if (Prev)
- Offset += Prev->Offset + getAssembler().ComputeFragmentSize(*this, *Prev);
+ Offset += Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev);
F->Offset = Offset;
LastValidFragment[F->getParent()] = F;
@@ -390,7 +384,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
++stats::EmittedFragments;
// FIXME: Embed in fragments instead?
- uint64_t FragmentSize = Asm.ComputeFragmentSize(Layout, F);
+ uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F);
switch (F.getKind()) {
case MCFragment::FT_Align: {
MCAlignFragment &AF = cast<MCAlignFragment>(F);
@@ -412,7 +406,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
// bytes left to fill use the the Value and ValueSize to fill the rest.
// If we are aligning with nops, ask that target to emit the right data.
if (AF.hasEmitNops()) {
- if (!Asm.getBackend().WriteNopData(Count, OW))
+ if (!Asm.getBackend().writeNopData(Count, OW))
report_fatal_error("unable to write nop sequence of " +
Twine(Count) + " bytes");
break;
@@ -421,8 +415,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
// Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (AF.getValueSize()) {
- default:
- assert(0 && "Invalid size!");
+ default: llvm_unreachable("Invalid size!");
case 1: OW->Write8 (uint8_t (AF.getValue())); break;
case 2: OW->Write16(uint16_t(AF.getValue())); break;
case 4: OW->Write32(uint32_t(AF.getValue())); break;
@@ -446,8 +439,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) {
switch (FF.getValueSize()) {
- default:
- assert(0 && "Invalid size!");
+ default: llvm_unreachable("Invalid size!");
case 1: OW->Write8 (uint8_t (FF.getValue())); break;
case 2: OW->Write16(uint16_t(FF.getValue())); break;
case 4: OW->Write32(uint32_t(FF.getValue())); break;
@@ -493,7 +485,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
assert(OW->getStream().tell() - Start == FragmentSize);
}
-void MCAssembler::WriteSectionData(const MCSectionData *SD,
+void MCAssembler::writeSectionData(const MCSectionData *SD,
const MCAsmLayout &Layout) const {
// Ignore virtual sections.
if (SD->getSection().isVirtualSection()) {
@@ -503,8 +495,7 @@ void MCAssembler::WriteSectionData(const MCSectionData *SD,
for (MCSectionData::const_iterator it = SD->begin(),
ie = SD->end(); it != ie; ++it) {
switch (it->getKind()) {
- default:
- assert(0 && "Invalid fragment in virtual section!");
+ default: llvm_unreachable("Invalid fragment in virtual section!");
case MCFragment::FT_Data: {
// Check that we aren't trying to write a non-zero contents (or fixups)
// into a virtual section. This is to support clients which use standard
@@ -546,13 +537,13 @@ void MCAssembler::WriteSectionData(const MCSectionData *SD,
}
-uint64_t MCAssembler::HandleFixup(const MCAsmLayout &Layout,
+uint64_t MCAssembler::handleFixup(const MCAsmLayout &Layout,
MCFragment &F,
const MCFixup &Fixup) {
// Evaluate the fixup.
MCValue Target;
uint64_t FixedValue;
- if (!EvaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
+ if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue)) {
// The fixup was unresolved, we need a relocation. Inform the object
// writer of the relocation, and give it an opportunity to adjust the
// fixup value if need be.
@@ -592,7 +583,7 @@ void MCAssembler::Finish() {
}
// Layout until everything fits.
- while (LayoutOnce(Layout))
+ while (layoutOnce(Layout))
continue;
DEBUG_WITH_TYPE("mc-dump", {
@@ -600,7 +591,7 @@ void MCAssembler::Finish() {
dump(); });
// Finalize the layout, including fragment lowering.
- FinishLayout(Layout);
+ finishLayout(Layout);
DEBUG_WITH_TYPE("mc-dump", {
llvm::errs() << "assembler backend - final-layout\n--\n";
@@ -621,8 +612,8 @@ void MCAssembler::Finish() {
for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
MCFixup &Fixup = *it3;
- uint64_t FixedValue = HandleFixup(Layout, *DF, Fixup);
- getBackend().ApplyFixup(Fixup, DF->getContents().data(),
+ uint64_t FixedValue = handleFixup(Layout, *DF, Fixup);
+ getBackend().applyFixup(Fixup, DF->getContents().data(),
DF->getContents().size(), FixedValue);
}
}
@@ -631,8 +622,8 @@ void MCAssembler::Finish() {
for (MCInstFragment::fixup_iterator it3 = IF->fixup_begin(),
ie3 = IF->fixup_end(); it3 != ie3; ++it3) {
MCFixup &Fixup = *it3;
- uint64_t FixedValue = HandleFixup(Layout, *IF, Fixup);
- getBackend().ApplyFixup(Fixup, IF->getCode().data(),
+ uint64_t FixedValue = handleFixup(Layout, *IF, Fixup);
+ getBackend().applyFixup(Fixup, IF->getCode().data(),
IF->getCode().size(), FixedValue);
}
}
@@ -645,8 +636,8 @@ void MCAssembler::Finish() {
stats::ObjectBytes += OS.tell() - StartOffset;
}
-bool MCAssembler::FixupNeedsRelaxation(const MCFixup &Fixup,
- const MCFragment *DF,
+bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
+ const MCInstFragment *DF,
const MCAsmLayout &Layout) const {
if (getRelaxAll())
return true;
@@ -654,34 +645,31 @@ bool MCAssembler::FixupNeedsRelaxation(const MCFixup &Fixup,
// If we cannot resolve the fixup value, it requires relaxation.
MCValue Target;
uint64_t Value;
- if (!EvaluateFixup(Layout, Fixup, DF, Target, Value))
+ if (!evaluateFixup(Layout, Fixup, DF, Target, Value))
return true;
- // Otherwise, relax if the value is too big for a (signed) i8.
- //
- // FIXME: This is target dependent!
- return int64_t(Value) != int64_t(int8_t(Value));
+ return getBackend().fixupNeedsRelaxation(Fixup, Value, DF, Layout);
}
-bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF,
+bool MCAssembler::fragmentNeedsRelaxation(const MCInstFragment *IF,
const MCAsmLayout &Layout) const {
// If this inst doesn't ever need relaxation, ignore it. This occurs when we
// are intentionally pushing out inst fragments, or because we relaxed a
// previous instruction to one that doesn't need relaxation.
- if (!getBackend().MayNeedRelaxation(IF->getInst()))
+ if (!getBackend().mayNeedRelaxation(IF->getInst()))
return false;
for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(),
ie = IF->fixup_end(); it != ie; ++it)
- if (FixupNeedsRelaxation(*it, IF, Layout))
+ if (fixupNeedsRelaxation(*it, IF, Layout))
return true;
return false;
}
-bool MCAssembler::RelaxInstruction(MCAsmLayout &Layout,
+bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
MCInstFragment &IF) {
- if (!FragmentNeedsRelaxation(&IF, Layout))
+ if (!fragmentNeedsRelaxation(&IF, Layout))
return false;
++stats::RelaxedInstructions;
@@ -692,7 +680,7 @@ bool MCAssembler::RelaxInstruction(MCAsmLayout &Layout,
// Relax the fragment.
MCInst Relaxed;
- getBackend().RelaxInstruction(IF.getInst(), Relaxed);
+ getBackend().relaxInstruction(IF.getInst(), Relaxed);
// Encode the new instruction.
//
@@ -715,7 +703,7 @@ bool MCAssembler::RelaxInstruction(MCAsmLayout &Layout,
return true;
}
-bool MCAssembler::RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
+bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
int64_t Value = 0;
uint64_t OldSize = LF.getContents().size();
bool IsAbs = LF.getValue().EvaluateAsAbsolute(Value, Layout);
@@ -732,8 +720,8 @@ bool MCAssembler::RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
return OldSize != LF.getContents().size();
}
-bool MCAssembler::RelaxDwarfLineAddr(MCAsmLayout &Layout,
- MCDwarfLineAddrFragment &DF) {
+bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout,
+ MCDwarfLineAddrFragment &DF) {
int64_t AddrDelta = 0;
uint64_t OldSize = DF.getContents().size();
bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout);
@@ -749,7 +737,7 @@ bool MCAssembler::RelaxDwarfLineAddr(MCAsmLayout &Layout,
return OldSize != Data.size();
}
-bool MCAssembler::RelaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
MCDwarfCallFrameFragment &DF) {
int64_t AddrDelta = 0;
uint64_t OldSize = DF.getContents().size();
@@ -764,7 +752,7 @@ bool MCAssembler::RelaxDwarfCallFrameFragment(MCAsmLayout &Layout,
return OldSize != Data.size();
}
-bool MCAssembler::LayoutSectionOnce(MCAsmLayout &Layout,
+bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout,
MCSectionData &SD) {
MCFragment *FirstInvalidFragment = NULL;
// Scan for fragments that need relaxation.
@@ -776,19 +764,19 @@ bool MCAssembler::LayoutSectionOnce(MCAsmLayout &Layout,
default:
break;
case MCFragment::FT_Inst:
- relaxedFrag = RelaxInstruction(Layout, *cast<MCInstFragment>(it2));
+ relaxedFrag = relaxInstruction(Layout, *cast<MCInstFragment>(it2));
break;
case MCFragment::FT_Dwarf:
- relaxedFrag = RelaxDwarfLineAddr(Layout,
+ relaxedFrag = relaxDwarfLineAddr(Layout,
*cast<MCDwarfLineAddrFragment>(it2));
break;
case MCFragment::FT_DwarfFrame:
relaxedFrag =
- RelaxDwarfCallFrameFragment(Layout,
+ relaxDwarfCallFrameFragment(Layout,
*cast<MCDwarfCallFrameFragment>(it2));
break;
case MCFragment::FT_LEB:
- relaxedFrag = RelaxLEB(Layout, *cast<MCLEBFragment>(it2));
+ relaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(it2));
break;
}
// Update the layout, and remember that we relaxed.
@@ -802,20 +790,20 @@ bool MCAssembler::LayoutSectionOnce(MCAsmLayout &Layout,
return false;
}
-bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
+bool MCAssembler::layoutOnce(MCAsmLayout &Layout) {
++stats::RelaxationSteps;
bool WasRelaxed = false;
for (iterator it = begin(), ie = end(); it != ie; ++it) {
MCSectionData &SD = *it;
- while(LayoutSectionOnce(Layout, SD))
+ while(layoutSectionOnce(Layout, SD))
WasRelaxed = true;
}
return WasRelaxed;
}
-void MCAssembler::FinishLayout(MCAsmLayout &Layout) {
+void MCAssembler::finishLayout(MCAsmLayout &Layout) {
// The layout is done. Mark every fragment as valid.
for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin());
@@ -975,3 +963,13 @@ void MCAssembler::dump() {
}
OS << "]>\n";
}
+
+// anchors for MC*Fragment vtables
+void MCDataFragment::anchor() { }
+void MCInstFragment::anchor() { }
+void MCAlignFragment::anchor() { }
+void MCFillFragment::anchor() { }
+void MCOrgFragment::anchor() { }
+void MCLEBFragment::anchor() { }
+void MCDwarfLineAddrFragment::anchor() { }
+void MCDwarfCallFrameFragment::anchor() { }
diff --git a/contrib/llvm/lib/MC/MCCodeGenInfo.cpp b/contrib/llvm/lib/MC/MCCodeGenInfo.cpp
index 236e7de..d9dcfd0 100644
--- a/contrib/llvm/lib/MC/MCCodeGenInfo.cpp
+++ b/contrib/llvm/lib/MC/MCCodeGenInfo.cpp
@@ -15,7 +15,9 @@
#include "llvm/MC/MCCodeGenInfo.h"
using namespace llvm;
-void MCCodeGenInfo::InitMCCodeGenInfo(Reloc::Model RM, CodeModel::Model CM) {
+void MCCodeGenInfo::InitMCCodeGenInfo(Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
RelocationModel = RM;
CMModel = CM;
+ OptLevel = OL;
}
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index 82690ee..d3c4fb1 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -20,6 +20,9 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Signals.h"
using namespace llvm;
typedef StringMap<const MCSectionMachO*> MachOUniqueMapTy;
@@ -28,8 +31,8 @@ typedef StringMap<const MCSectionCOFF*> COFFUniqueMapTy;
MCContext::MCContext(const MCAsmInfo &mai, const MCRegisterInfo &mri,
- const MCObjectFileInfo *mofi) :
- MAI(mai), MRI(mri), MOFI(mofi),
+ const MCObjectFileInfo *mofi, const SourceMgr *mgr) :
+ SrcMgr(mgr), MAI(mai), MRI(mri), MOFI(mofi),
Allocator(), Symbols(Allocator), UsedNames(Allocator),
NextUniqueID(0),
CurrentDwarfLoc(0,0,0,DWARF2_FLAG_IS_STMT,0,0),
@@ -43,6 +46,8 @@ MCContext::MCContext(const MCAsmInfo &mai, const MCRegisterInfo &mri,
SecureLogUsed = false;
DwarfLocSeen = false;
+ GenDwarfForAssembly = false;
+ GenDwarfFileNumber = 0;
}
MCContext::~MCContext() {
@@ -248,7 +253,8 @@ const MCSection *MCContext::getCOFFSection(StringRef Section,
/// directory tables. If the file number has already been allocated it is an
/// error and zero is returned and the client reports the error, else the
/// allocated file number is returned. The file numbers may be in any order.
-unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
+unsigned MCContext::GetDwarfFile(StringRef Directory, StringRef FileName,
+ unsigned FileNumber) {
// TODO: a FileNumber of zero says to use the next available file number.
// Note: in GenericAsmParser::ParseDirectiveFile() FileNumber was checked
// to not be less than one. This needs to be change to be not less than zero.
@@ -266,19 +272,23 @@ unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
// Get the new MCDwarfFile slot for this FileNumber.
MCDwarfFile *&File = MCDwarfFiles[FileNumber];
- // Separate the directory part from the basename of the FileName.
- std::pair<StringRef, StringRef> Slash = FileName.rsplit('/');
+ if (Directory.empty()) {
+ // Separate the directory part from the basename of the FileName.
+ std::pair<StringRef, StringRef> Slash = FileName.rsplit('/');
+ Directory = Slash.second;
+ if (!Directory.empty()) {
+ Directory = Slash.first;
+ FileName = Slash.second;
+ }
+ }
// Find or make a entry in the MCDwarfDirs vector for this Directory.
- StringRef Name;
- unsigned DirIndex;
// Capture directory name.
- if (Slash.second.empty()) {
- Name = Slash.first;
- DirIndex = 0; // For FileNames with no directories a DirIndex of 0 is used.
+ unsigned DirIndex;
+ if (Directory.empty()) {
+ // For FileNames with no directories a DirIndex of 0 is used.
+ DirIndex = 0;
} else {
- StringRef Directory = Slash.first;
- Name = Slash.second;
DirIndex = 0;
for (unsigned End = MCDwarfDirs.size(); DirIndex < End; DirIndex++) {
if (Directory == MCDwarfDirs[DirIndex])
@@ -291,16 +301,16 @@ unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
}
// The DirIndex is one based, as DirIndex of 0 is used for FileNames with
// no directories. MCDwarfDirs[] is unlike MCDwarfFiles[] in that the
- // directory names are stored at MCDwarfDirs[DirIndex-1] where FileNames are
- // stored at MCDwarfFiles[FileNumber].Name .
+ // directory names are stored at MCDwarfDirs[DirIndex-1] where FileNames
+ // are stored at MCDwarfFiles[FileNumber].Name .
DirIndex++;
}
// Now make the MCDwarfFile entry and place it in the slot in the MCDwarfFiles
// vector.
- char *Buf = static_cast<char *>(Allocate(Name.size()));
- memcpy(Buf, Name.data(), Name.size());
- File = new (*this) MCDwarfFile(StringRef(Buf, Name.size()), DirIndex);
+ char *Buf = static_cast<char *>(Allocate(FileName.size()));
+ memcpy(Buf, FileName.data(), FileName.size());
+ File = new (*this) MCDwarfFile(StringRef(Buf, FileName.size()), DirIndex);
// return the allocated FileNumber.
return FileNumber;
@@ -314,3 +324,19 @@ bool MCContext::isValidDwarfFileNumber(unsigned FileNumber) {
return MCDwarfFiles[FileNumber] != 0;
}
+
+void MCContext::FatalError(SMLoc Loc, const Twine &Msg) {
+ // If we have a source manager and a location, use it. Otherwise just
+ // use the generic report_fatal_error().
+ if (!SrcMgr || Loc == SMLoc())
+ report_fatal_error(Msg);
+
+ // Use the source manager to print the message.
+ SrcMgr->PrintMessage(Loc, SourceMgr::DK_Error, Msg);
+
+ // If we reached here, we are failing ungracefully. Run the interrupt handlers
+ // to make sure any special cleanups get done, in particular that we remove
+ // files registered with RemoveFileOnSignal.
+ sys::RunInterruptHandlers();
+ exit(1);
+}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
index 16e66dc..35f675d 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.cpp
@@ -15,10 +15,13 @@
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class Target;
@@ -36,6 +39,12 @@ LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
int TagType, LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp) {
// Initialize targets and assembly printers/parsers.
+ // FIXME: Clients are responsible for initializing the targets. And this
+ // would be done by calling routines in "llvm-c/Target.h" which are static
+ // line functions. But the current use of LLVMCreateDisasm() is to dynamically
+ // load libLTO with dlopen() and then lookup the symbols using dlsym().
+ // And since these initialize routines are static that does not work which
+ // is why the call to them in this 'C' library API was added back.
llvm::InitializeAllTargetInfos();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmParsers();
@@ -50,6 +59,9 @@ LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
const MCAsmInfo *MAI = TheTarget->createMCAsmInfo(TripleName);
assert(MAI && "Unable to create target asm info!");
+ const MCInstrInfo *MII = TheTarget->createMCInstrInfo();
+ assert(MII && "Unable to create target instruction info!");
+
const MCRegisterInfo *MRI = TheTarget->createMCRegInfo(TripleName);
assert(MRI && "Unable to create target register info!");
@@ -73,13 +85,13 @@ LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
// Set up the instruction printer.
int AsmPrinterVariant = MAI->getAssemblerDialect();
MCInstPrinter *IP = TheTarget->createMCInstPrinter(AsmPrinterVariant,
- *MAI, *STI);
+ *MAI, *MII, *MRI, *STI);
assert(IP && "Unable to create instruction printer!");
LLVMDisasmContext *DC = new LLVMDisasmContext(TripleName, DisInfo, TagType,
GetOpInfo, SymbolLookUp,
TheTarget, MAI, MRI,
- Ctx, DisAsm, IP);
+ STI, MII, Ctx, DisAsm, IP);
assert(DC && "Allocation failure!");
return DC;
@@ -170,5 +182,5 @@ size_t LLVMDisasmInstruction(LLVMDisasmContextRef DCR, uint8_t *Bytes,
return Size;
}
}
- return 0;
+ llvm_unreachable("Invalid DecodeStatus!");
}
diff --git a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
index 238ff7d..880a31a 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/Disassembler.h
@@ -28,7 +28,9 @@ class MCContext;
class MCAsmInfo;
class MCDisassembler;
class MCInstPrinter;
+class MCInstrInfo;
class MCRegisterInfo;
+class MCSubtargetInfo;
class Target;
//
@@ -61,6 +63,10 @@ private:
llvm::OwningPtr<const llvm::MCAsmInfo> MAI;
// The register information for the target architecture.
llvm::OwningPtr<const llvm::MCRegisterInfo> MRI;
+ // The subtarget information for the target architecture.
+ llvm::OwningPtr<const llvm::MCSubtargetInfo> MSI;
+ // The instruction information for the target architecture.
+ llvm::OwningPtr<const llvm::MCInstrInfo> MII;
// The assembly context for creating symbols and MCExprs.
llvm::OwningPtr<const llvm::MCContext> Ctx;
// The disassembler for the target architecture.
@@ -78,6 +84,8 @@ public:
LLVMSymbolLookupCallback symbolLookUp,
const Target *theTarget, const MCAsmInfo *mAI,
const MCRegisterInfo *mRI,
+ const MCSubtargetInfo *mSI,
+ const MCInstrInfo *mII,
llvm::MCContext *ctx, const MCDisassembler *disAsm,
MCInstPrinter *iP) : TripleName(tripleName),
DisInfo(disInfo), TagType(tagType), GetOpInfo(getOpInfo),
@@ -85,6 +93,8 @@ public:
CommentStream(CommentsToEmit) {
MAI.reset(mAI);
MRI.reset(mRI);
+ MSI.reset(mSI);
+ MII.reset(mII);
Ctx.reset(ctx);
DisAsm.reset(disAsm);
IP.reset(iP);
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
index 83362a2..b2672ca 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.cpp
@@ -22,6 +22,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
@@ -34,10 +35,8 @@
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/TargetSelect.h"
using namespace llvm;
-bool EDDisassembler::sInitialized = false;
EDDisassembler::DisassemblerMap_t EDDisassembler::sDisassemblers;
struct TripleMap {
@@ -49,8 +48,7 @@ static struct TripleMap triplemap[] = {
{ Triple::x86, "i386-unknown-unknown" },
{ Triple::x86_64, "x86_64-unknown-unknown" },
{ Triple::arm, "arm-unknown-unknown" },
- { Triple::thumb, "thumb-unknown-unknown" },
- { Triple::InvalidArch, NULL, }
+ { Triple::thumb, "thumb-unknown-unknown" }
};
/// infoFromArch - Returns the TripleMap corresponding to a given architecture,
@@ -77,90 +75,69 @@ static const char *tripleFromArch(Triple::ArchType arch) {
static int getLLVMSyntaxVariant(Triple::ArchType arch,
EDDisassembler::AssemblySyntax syntax) {
switch (syntax) {
- default:
- return -1;
// Mappings below from X86AsmPrinter.cpp
case EDDisassembler::kEDAssemblySyntaxX86ATT:
if (arch == Triple::x86 || arch == Triple::x86_64)
return 0;
- else
- return -1;
+ break;
case EDDisassembler::kEDAssemblySyntaxX86Intel:
if (arch == Triple::x86 || arch == Triple::x86_64)
return 1;
- else
- return -1;
+ break;
case EDDisassembler::kEDAssemblySyntaxARMUAL:
if (arch == Triple::arm || arch == Triple::thumb)
return 0;
- else
- return -1;
+ break;
}
-}
-void EDDisassembler::initialize() {
- if (sInitialized)
- return;
-
- sInitialized = true;
-
- InitializeAllTargetInfos();
- InitializeAllTargetMCs();
- InitializeAllAsmParsers();
- InitializeAllDisassemblers();
+ return -1;
}
-#undef BRINGUP_TARGET
-
EDDisassembler *EDDisassembler::getDisassembler(Triple::ArchType arch,
AssemblySyntax syntax) {
+ const char *triple = tripleFromArch(arch);
+ return getDisassembler(StringRef(triple), syntax);
+}
+
+EDDisassembler *EDDisassembler::getDisassembler(StringRef str,
+ AssemblySyntax syntax) {
CPUKey key;
- key.Arch = arch;
+ key.Triple = str.str();
key.Syntax = syntax;
-
+
EDDisassembler::DisassemblerMap_t::iterator i = sDisassemblers.find(key);
-
+
if (i != sDisassemblers.end()) {
- return i->second;
- } else {
- EDDisassembler* sdd = new EDDisassembler(key);
- if (!sdd->valid()) {
- delete sdd;
- return NULL;
- }
-
- sDisassemblers[key] = sdd;
-
- return sdd;
+ return i->second;
}
-
- return NULL;
-}
-EDDisassembler *EDDisassembler::getDisassembler(StringRef str,
- AssemblySyntax syntax) {
- return getDisassembler(Triple(str).getArch(), syntax);
+ EDDisassembler *sdd = new EDDisassembler(key);
+ if (!sdd->valid()) {
+ delete sdd;
+ return NULL;
+ }
+
+ sDisassemblers[key] = sdd;
+
+ return sdd;
}
EDDisassembler::EDDisassembler(CPUKey &key) :
Valid(false),
HasSemantics(false),
ErrorStream(nulls()),
- Key(key) {
- const char *triple = tripleFromArch(key.Arch);
-
- if (!triple)
- return;
+ Key(key),
+ TgtTriple(key.Triple.c_str()) {
- LLVMSyntaxVariant = getLLVMSyntaxVariant(key.Arch, key.Syntax);
+ LLVMSyntaxVariant = getLLVMSyntaxVariant(TgtTriple.getArch(), key.Syntax);
if (LLVMSyntaxVariant < 0)
return;
- std::string tripleString(triple);
+ std::string tripleString(key.Triple);
std::string errorString;
- Tgt = TargetRegistry::lookupTarget(tripleString,
+ Tgt = TargetRegistry::lookupTarget(key.Triple,
errorString);
if (!Tgt)
@@ -189,10 +166,16 @@ EDDisassembler::EDDisassembler(CPUKey &key) :
return;
InstInfos = Disassembler->getEDInfo();
-
+
+ MII.reset(Tgt->createMCInstrInfo());
+
+ if (!MII)
+ return;
+
InstString.reset(new std::string);
InstStream.reset(new raw_string_ostream(*InstString));
- InstPrinter.reset(Tgt->createMCInstPrinter(LLVMSyntaxVariant, *AsmInfo, *STI));
+ InstPrinter.reset(Tgt->createMCInstPrinter(LLVMSyntaxVariant, *AsmInfo,
+ *MII, *MRI, *STI));
if (!InstPrinter)
return;
@@ -279,7 +262,7 @@ void EDDisassembler::initMaps(const MCRegisterInfo &registerInfo) {
RegRMap[registerName] = registerIndex;
}
- switch (Key.Arch) {
+ switch (TgtTriple.getArch()) {
default:
break;
case Triple::x86:
@@ -337,13 +320,9 @@ int EDDisassembler::printInst(std::string &str, MCInst &inst) {
return 0;
}
-static void diag_handler(const SMDiagnostic &diag,
- void *context)
-{
- if (context) {
- EDDisassembler *disassembler = static_cast<EDDisassembler*>(context);
- diag.Print("", disassembler->ErrorStream);
- }
+static void diag_handler(const SMDiagnostic &diag, void *context) {
+ if (context)
+ diag.print("", static_cast<EDDisassembler*>(context)->ErrorStream);
}
int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
@@ -351,7 +330,7 @@ int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
const std::string &str) {
int ret = 0;
- switch (Key.Arch) {
+ switch (TgtTriple.getArch()) {
default:
return -1;
case Triple::x86:
@@ -376,8 +355,7 @@ int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
context, *streamer,
*AsmInfo));
- StringRef triple = tripleFromArch(Key.Arch);
- OwningPtr<MCSubtargetInfo> STI(Tgt->createMCSubtargetInfo(triple, "", ""));
+ OwningPtr<MCSubtargetInfo> STI(Tgt->createMCSubtargetInfo(Key.Triple.c_str(), "", ""));
OwningPtr<MCTargetAsmParser>
TargetParser(Tgt->createMCAsmParser(*STI, *genericParser));
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
index 38c2203..6f71908 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDDisassembler.h
@@ -25,6 +25,7 @@
#include <map>
#include <set>
+#include <string>
#include <vector>
namespace llvm {
@@ -35,8 +36,9 @@ class MCContext;
class MCAsmInfo;
class MCAsmLexer;
class MCDisassembler;
-class MCInstPrinter;
class MCInst;
+class MCInstPrinter;
+class MCInstrInfo;
class MCParsedAsmOperand;
class MCRegisterInfo;
class MCStreamer;
@@ -74,28 +76,26 @@ struct EDDisassembler {
/// pair
struct CPUKey {
/// The architecture type
- llvm::Triple::ArchType Arch;
+ std::string Triple;
/// The assembly syntax
AssemblySyntax Syntax;
/// operator== - Equality operator
bool operator==(const CPUKey &key) const {
- return (Arch == key.Arch &&
+ return (Triple == key.Triple &&
Syntax == key.Syntax);
}
/// operator< - Less-than operator
bool operator<(const CPUKey &key) const {
- return ((Arch < key.Arch) ||
- ((Arch == key.Arch) && Syntax < (key.Syntax)));
+ return ((Triple < key.Triple) ||
+ ((Triple == key.Triple) && Syntax < (key.Syntax)));
}
};
typedef std::map<CPUKey, EDDisassembler*> DisassemblerMap_t;
- /// True if the disassembler registry has been initialized; false if not
- static bool sInitialized;
/// A map from disassembler specifications to disassemblers. Populated
/// lazily.
static DisassemblerMap_t sDisassemblers;
@@ -116,9 +116,6 @@ struct EDDisassembler {
static EDDisassembler *getDisassembler(llvm::StringRef str,
AssemblySyntax syntax);
- /// initialize - Initializes the disassembler registry and the LLVM backend
- static void initialize();
-
////////////////////////
// Per-object members //
////////////////////////
@@ -131,14 +128,18 @@ struct EDDisassembler {
/// The stream to write errors to
llvm::raw_ostream &ErrorStream;
- /// The architecture/syntax pair for the current architecture
+ /// The triple/syntax pair for the current architecture
CPUKey Key;
+ /// The Triple fur the current architecture
+ Triple TgtTriple;
/// The LLVM target corresponding to the disassembler
const llvm::Target *Tgt;
/// The assembly information for the target architecture
llvm::OwningPtr<const llvm::MCAsmInfo> AsmInfo;
/// The subtarget information for the target architecture
llvm::OwningPtr<const llvm::MCSubtargetInfo> STI;
+ // The instruction information for the target architecture.
+ llvm::OwningPtr<const llvm::MCInstrInfo> MII;
// The register information for the target architecture.
llvm::OwningPtr<const llvm::MCRegisterInfo> MRI;
/// The disassembler for the target architecture
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp
new file mode 100644
index 0000000..c658717
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDMain.cpp
@@ -0,0 +1,280 @@
+//===-- EDMain.cpp - LLVM Enhanced Disassembly C API ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the enhanced disassembler's public C API.
+//
+//===----------------------------------------------------------------------===//
+
+#include "EDDisassembler.h"
+#include "EDInst.h"
+#include "EDOperand.h"
+#include "EDToken.h"
+#include "llvm-c/EnhancedDisassembly.h"
+using namespace llvm;
+
+int EDGetDisassembler(EDDisassemblerRef *disassembler,
+ const char *triple,
+ EDAssemblySyntax_t syntax) {
+ EDDisassembler::AssemblySyntax Syntax;
+ switch (syntax) {
+ default: llvm_unreachable("Unknown assembly syntax!");
+ case kEDAssemblySyntaxX86Intel:
+ Syntax = EDDisassembler::kEDAssemblySyntaxX86Intel;
+ break;
+ case kEDAssemblySyntaxX86ATT:
+ Syntax = EDDisassembler::kEDAssemblySyntaxX86ATT;
+ break;
+ case kEDAssemblySyntaxARMUAL:
+ Syntax = EDDisassembler::kEDAssemblySyntaxARMUAL;
+ break;
+ }
+
+ EDDisassemblerRef ret = EDDisassembler::getDisassembler(triple, Syntax);
+
+ if (!ret)
+ return -1;
+ *disassembler = ret;
+ return 0;
+}
+
+int EDGetRegisterName(const char** regName,
+ EDDisassemblerRef disassembler,
+ unsigned regID) {
+ const char *name = ((EDDisassembler*)disassembler)->nameWithRegisterID(regID);
+ if (!name)
+ return -1;
+ *regName = name;
+ return 0;
+}
+
+int EDRegisterIsStackPointer(EDDisassemblerRef disassembler,
+ unsigned regID) {
+ return ((EDDisassembler*)disassembler)->registerIsStackPointer(regID) ? 1 : 0;
+}
+
+int EDRegisterIsProgramCounter(EDDisassemblerRef disassembler,
+ unsigned regID) {
+ return ((EDDisassembler*)disassembler)->registerIsProgramCounter(regID) ? 1:0;
+}
+
+unsigned int EDCreateInsts(EDInstRef *insts,
+ unsigned int count,
+ EDDisassemblerRef disassembler,
+ ::EDByteReaderCallback byteReader,
+ uint64_t address,
+ void *arg) {
+ unsigned int index;
+
+ for (index = 0; index < count; ++index) {
+ EDInst *inst = ((EDDisassembler*)disassembler)->createInst(byteReader,
+ address, arg);
+
+ if (!inst)
+ return index;
+
+ insts[index] = inst;
+ address += inst->byteSize();
+ }
+
+ return count;
+}
+
+void EDReleaseInst(EDInstRef inst) {
+ delete ((EDInst*)inst);
+}
+
+int EDInstByteSize(EDInstRef inst) {
+ return ((EDInst*)inst)->byteSize();
+}
+
+int EDGetInstString(const char **buf,
+ EDInstRef inst) {
+ return ((EDInst*)inst)->getString(*buf);
+}
+
+int EDInstID(unsigned *instID, EDInstRef inst) {
+ *instID = ((EDInst*)inst)->instID();
+ return 0;
+}
+
+int EDInstIsBranch(EDInstRef inst) {
+ return ((EDInst*)inst)->isBranch();
+}
+
+int EDInstIsMove(EDInstRef inst) {
+ return ((EDInst*)inst)->isMove();
+}
+
+int EDBranchTargetID(EDInstRef inst) {
+ return ((EDInst*)inst)->branchTargetID();
+}
+
+int EDMoveSourceID(EDInstRef inst) {
+ return ((EDInst*)inst)->moveSourceID();
+}
+
+int EDMoveTargetID(EDInstRef inst) {
+ return ((EDInst*)inst)->moveTargetID();
+}
+
+int EDNumTokens(EDInstRef inst) {
+ return ((EDInst*)inst)->numTokens();
+}
+
+int EDGetToken(EDTokenRef *token,
+ EDInstRef inst,
+ int index) {
+ return ((EDInst*)inst)->getToken(*(EDToken**)token, index);
+}
+
+int EDGetTokenString(const char **buf,
+ EDTokenRef token) {
+ return ((EDToken*)token)->getString(*buf);
+}
+
+int EDOperandIndexForToken(EDTokenRef token) {
+ return ((EDToken*)token)->operandID();
+}
+
+int EDTokenIsWhitespace(EDTokenRef token) {
+ return ((EDToken*)token)->type() == EDToken::kTokenWhitespace;
+}
+
+int EDTokenIsPunctuation(EDTokenRef token) {
+ return ((EDToken*)token)->type() == EDToken::kTokenPunctuation;
+}
+
+int EDTokenIsOpcode(EDTokenRef token) {
+ return ((EDToken*)token)->type() == EDToken::kTokenOpcode;
+}
+
+int EDTokenIsLiteral(EDTokenRef token) {
+ return ((EDToken*)token)->type() == EDToken::kTokenLiteral;
+}
+
+int EDTokenIsRegister(EDTokenRef token) {
+ return ((EDToken*)token)->type() == EDToken::kTokenRegister;
+}
+
+int EDTokenIsNegativeLiteral(EDTokenRef token) {
+ if (((EDToken*)token)->type() != EDToken::kTokenLiteral)
+ return -1;
+
+ return ((EDToken*)token)->literalSign();
+}
+
+int EDLiteralTokenAbsoluteValue(uint64_t *value, EDTokenRef token) {
+ if (((EDToken*)token)->type() != EDToken::kTokenLiteral)
+ return -1;
+
+ return ((EDToken*)token)->literalAbsoluteValue(*value);
+}
+
+int EDRegisterTokenValue(unsigned *registerID,
+ EDTokenRef token) {
+ if (((EDToken*)token)->type() != EDToken::kTokenRegister)
+ return -1;
+
+ return ((EDToken*)token)->registerID(*registerID);
+}
+
+int EDNumOperands(EDInstRef inst) {
+ return ((EDInst*)inst)->numOperands();
+}
+
+int EDGetOperand(EDOperandRef *operand,
+ EDInstRef inst,
+ int index) {
+ return ((EDInst*)inst)->getOperand(*(EDOperand**)operand, index);
+}
+
+int EDOperandIsRegister(EDOperandRef operand) {
+ return ((EDOperand*)operand)->isRegister();
+}
+
+int EDOperandIsImmediate(EDOperandRef operand) {
+ return ((EDOperand*)operand)->isImmediate();
+}
+
+int EDOperandIsMemory(EDOperandRef operand) {
+ return ((EDOperand*)operand)->isMemory();
+}
+
+int EDRegisterOperandValue(unsigned *value, EDOperandRef operand) {
+ if (!((EDOperand*)operand)->isRegister())
+ return -1;
+ *value = ((EDOperand*)operand)->regVal();
+ return 0;
+}
+
+int EDImmediateOperandValue(uint64_t *value, EDOperandRef operand) {
+ if (!((EDOperand*)operand)->isImmediate())
+ return -1;
+ *value = ((EDOperand*)operand)->immediateVal();
+ return 0;
+}
+
+int EDEvaluateOperand(uint64_t *result, EDOperandRef operand,
+ ::EDRegisterReaderCallback regReader, void *arg) {
+ return ((EDOperand*)operand)->evaluate(*result, regReader, arg);
+}
+
+#ifdef __BLOCKS__
+
+struct ByteReaderWrapper {
+ EDByteBlock_t byteBlock;
+};
+
+static int readerWrapperCallback(uint8_t *byte,
+ uint64_t address,
+ void *arg) {
+ struct ByteReaderWrapper *wrapper = (struct ByteReaderWrapper *)arg;
+ return wrapper->byteBlock(byte, address);
+}
+
+unsigned int EDBlockCreateInsts(EDInstRef *insts,
+ int count,
+ EDDisassemblerRef disassembler,
+ EDByteBlock_t byteBlock,
+ uint64_t address) {
+ struct ByteReaderWrapper wrapper;
+ wrapper.byteBlock = byteBlock;
+
+ return EDCreateInsts(insts,
+ count,
+ disassembler,
+ readerWrapperCallback,
+ address,
+ (void*)&wrapper);
+}
+
+int EDBlockEvaluateOperand(uint64_t *result, EDOperandRef operand,
+ EDRegisterBlock_t regBlock) {
+ return ((EDOperand*)operand)->evaluate(*result, regBlock);
+}
+
+int EDBlockVisitTokens(EDInstRef inst, ::EDTokenVisitor_t visitor) {
+ return ((EDInst*)inst)->visitTokens((llvm::EDTokenVisitor_t)visitor);
+}
+
+#else
+
+extern "C" unsigned int EDBlockCreateInsts() {
+ return 0;
+}
+
+extern "C" int EDBlockEvaluateOperand() {
+ return -1;
+}
+
+extern "C" int EDBlockVisitTokens() {
+ return -1;
+}
+
+#endif
diff --git a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
index 6a4e56f..48b3746 100644
--- a/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
+++ b/contrib/llvm/lib/MC/MCDisassembler/EDOperand.cpp
@@ -30,8 +30,10 @@ EDOperand::EDOperand(const EDDisassembler &disassembler,
MCOpIndex(mcOpIndex) {
unsigned int numMCOperands = 0;
- if (Disassembler.Key.Arch == Triple::x86 ||
- Disassembler.Key.Arch == Triple::x86_64) {
+ Triple::ArchType arch = Disassembler.TgtTriple.getArch();
+
+ if (arch == Triple::x86 ||
+ arch == Triple::x86_64) {
uint8_t operandType = inst.ThisInstInfo->operandTypes[opIndex];
switch (operandType) {
@@ -54,8 +56,8 @@ EDOperand::EDOperand(const EDDisassembler &disassembler,
break;
}
}
- else if (Disassembler.Key.Arch == Triple::arm ||
- Disassembler.Key.Arch == Triple::thumb) {
+ else if (arch == Triple::arm ||
+ arch == Triple::thumb) {
uint8_t operandType = inst.ThisInstInfo->operandTypes[opIndex];
switch (operandType) {
@@ -126,7 +128,9 @@ int EDOperand::evaluate(uint64_t &result,
void *arg) {
uint8_t operandType = Inst.ThisInstInfo->operandTypes[OpIndex];
- switch (Disassembler.Key.Arch) {
+ Triple::ArchType arch = Disassembler.TgtTriple.getArch();
+
+ switch (arch) {
default:
return -1;
case Triple::x86:
@@ -168,7 +172,7 @@ int EDOperand::evaluate(uint64_t &result,
unsigned segmentReg = Inst.Inst->getOperand(MCOpIndex+4).getReg();
- if (segmentReg != 0 && Disassembler.Key.Arch == Triple::x86_64) {
+ if (segmentReg != 0 && arch == Triple::x86_64) {
unsigned fsID = Disassembler.registerIDWithName("FS");
unsigned gsID = Disassembler.registerIDWithName("GS");
@@ -200,7 +204,6 @@ int EDOperand::evaluate(uint64_t &result,
return 0;
}
} // switch (operandType)
- break;
case Triple::arm:
case Triple::thumb:
switch (operandType) {
@@ -236,10 +239,7 @@ int EDOperand::evaluate(uint64_t &result,
return 0;
}
}
- break;
}
-
- return -1;
}
int EDOperand::isRegister() {
diff --git a/contrib/llvm/lib/MC/MCDwarf.cpp b/contrib/llvm/lib/MC/MCDwarf.cpp
index 4658a30..84a34f1 100644
--- a/contrib/llvm/lib/MC/MCDwarf.cpp
+++ b/contrib/llvm/lib/MC/MCDwarf.cpp
@@ -19,10 +19,12 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Config/config.h"
using namespace llvm;
// Given a special op, return the address skip amount (in units of
@@ -207,7 +209,7 @@ static inline void EmitDwarfLineTable(MCStreamer *MCOS,
//
// This emits the Dwarf file and the line tables.
//
-void MCDwarfFileTable::Emit(MCStreamer *MCOS) {
+const MCSymbol *MCDwarfFileTable::Emit(MCStreamer *MCOS) {
MCContext &context = MCOS->getContext();
// Switch to the section where the table will be emitted into.
MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfLineSection());
@@ -320,6 +322,8 @@ void MCDwarfFileTable::Emit(MCStreamer *MCOS) {
// This is the end of the section, so set the value of the symbol at the end
// of this section (that was used in a previous expression).
MCOS->EmitLabel(LineEndSym);
+
+ return LineStartSym;
}
/// Utility function to write the encoding to an object writer.
@@ -372,10 +376,7 @@ void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
// it with DW_LNS_advance_line.
if (Temp >= DWARF2_LINE_RANGE) {
OS << char(dwarf::DW_LNS_advance_line);
- SmallString<32> Tmp;
- raw_svector_ostream OSE(Tmp);
- MCObjectWriter::EncodeSLEB128(LineDelta, OSE);
- OS << OSE.str();
+ MCObjectWriter::EncodeSLEB128(LineDelta, OS);
LineDelta = 0;
Temp = 0 - DWARF2_LINE_BASE;
@@ -411,10 +412,7 @@ void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta,
// Otherwise use DW_LNS_advance_pc.
OS << char(dwarf::DW_LNS_advance_pc);
- SmallString<32> Tmp;
- raw_svector_ostream OSE(Tmp);
- MCObjectWriter::EncodeULEB128(AddrDelta, OSE);
- OS << OSE.str();
+ MCObjectWriter::EncodeULEB128(AddrDelta, OS);
if (NeedCopy)
OS << char(dwarf::DW_LNS_copy);
@@ -430,6 +428,349 @@ void MCDwarfFile::dump() const {
print(dbgs());
}
+// Utility function to write a tuple for .debug_abbrev.
+static void EmitAbbrev(MCStreamer *MCOS, uint64_t Name, uint64_t Form) {
+ MCOS->EmitULEB128IntValue(Name);
+ MCOS->EmitULEB128IntValue(Form);
+}
+
+// When generating dwarf for assembly source files this emits
+// the data for .debug_abbrev section which contains three DIEs.
+static void EmitGenDwarfAbbrev(MCStreamer *MCOS) {
+ MCContext &context = MCOS->getContext();
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfAbbrevSection());
+
+ // DW_TAG_compile_unit DIE abbrev (1).
+ MCOS->EmitULEB128IntValue(1);
+ MCOS->EmitULEB128IntValue(dwarf::DW_TAG_compile_unit);
+ MCOS->EmitIntValue(dwarf::DW_CHILDREN_yes, 1);
+ EmitAbbrev(MCOS, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4);
+ EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr);
+ EmitAbbrev(MCOS, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr);
+ EmitAbbrev(MCOS, dwarf::DW_AT_name, dwarf::DW_FORM_string);
+ EmitAbbrev(MCOS, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string);
+ StringRef DwarfDebugFlags = context.getDwarfDebugFlags();
+ if (!DwarfDebugFlags.empty())
+ EmitAbbrev(MCOS, dwarf::DW_AT_APPLE_flags, dwarf::DW_FORM_string);
+ EmitAbbrev(MCOS, dwarf::DW_AT_producer, dwarf::DW_FORM_string);
+ EmitAbbrev(MCOS, dwarf::DW_AT_language, dwarf::DW_FORM_data2);
+ EmitAbbrev(MCOS, 0, 0);
+
+ // DW_TAG_label DIE abbrev (2).
+ MCOS->EmitULEB128IntValue(2);
+ MCOS->EmitULEB128IntValue(dwarf::DW_TAG_label);
+ MCOS->EmitIntValue(dwarf::DW_CHILDREN_yes, 1);
+ EmitAbbrev(MCOS, dwarf::DW_AT_name, dwarf::DW_FORM_string);
+ EmitAbbrev(MCOS, dwarf::DW_AT_decl_file, dwarf::DW_FORM_data4);
+ EmitAbbrev(MCOS, dwarf::DW_AT_decl_line, dwarf::DW_FORM_data4);
+ EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr);
+ EmitAbbrev(MCOS, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag);
+ EmitAbbrev(MCOS, 0, 0);
+
+ // DW_TAG_unspecified_parameters DIE abbrev (3).
+ MCOS->EmitULEB128IntValue(3);
+ MCOS->EmitULEB128IntValue(dwarf::DW_TAG_unspecified_parameters);
+ MCOS->EmitIntValue(dwarf::DW_CHILDREN_no, 1);
+ EmitAbbrev(MCOS, 0, 0);
+
+ // Terminate the abbreviations for this compilation unit.
+ MCOS->EmitIntValue(0, 1);
+}
+
+// When generating dwarf for assembly source files this emits the data for
+// .debug_aranges section. Which contains a header and a table of pairs of
+// PointerSize'ed values for the address and size of section(s) with line table
+// entries (just the default .text in our case) and a terminating pair of zeros.
+static void EmitGenDwarfAranges(MCStreamer *MCOS) {
+ MCContext &context = MCOS->getContext();
+
+ // Create a symbol at the end of the section that we are creating the dwarf
+ // debugging info to use later in here as part of the expression to calculate
+ // the size of the section for the table.
+ MCOS->SwitchSection(context.getGenDwarfSection());
+ MCSymbol *SectionEndSym = context.CreateTempSymbol();
+ MCOS->EmitLabel(SectionEndSym);
+ context.setGenDwarfSectionEndSym(SectionEndSym);
+
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection());
+
+ // This will be the length of the .debug_aranges section, first account for
+ // the size of each item in the header (see below where we emit these items).
+ int Length = 4 + 2 + 4 + 1 + 1;
+
+ // Figure the padding after the header before the table of address and size
+ // pairs who's values are PointerSize'ed.
+ const MCAsmInfo &asmInfo = context.getAsmInfo();
+ int AddrSize = asmInfo.getPointerSize();
+ int Pad = 2 * AddrSize - (Length & (2 * AddrSize - 1));
+ if (Pad == 2 * AddrSize)
+ Pad = 0;
+ Length += Pad;
+
+ // Add the size of the pair of PointerSize'ed values for the address and size
+ // of the one default .text section we have in the table.
+ Length += 2 * AddrSize;
+ // And the pair of terminating zeros.
+ Length += 2 * AddrSize;
+
+
+ // Emit the header for this section.
+ // The 4 byte length not including the 4 byte value for the length.
+ MCOS->EmitIntValue(Length - 4, 4);
+ // The 2 byte version, which is 2.
+ MCOS->EmitIntValue(2, 2);
+ // The 4 byte offset to the compile unit in the .debug_info from the start
+ // of the .debug_info, it is at the start of that section so this is zero.
+ MCOS->EmitIntValue(0, 4);
+ // The 1 byte size of an address.
+ MCOS->EmitIntValue(AddrSize, 1);
+ // The 1 byte size of a segment descriptor, we use a value of zero.
+ MCOS->EmitIntValue(0, 1);
+ // Align the header with the padding if needed, before we put out the table.
+ for(int i = 0; i < Pad; i++)
+ MCOS->EmitIntValue(0, 1);
+
+ // Now emit the table of pairs of PointerSize'ed values for the section(s)
+ // address and size, in our case just the one default .text section.
+ const MCExpr *Addr = MCSymbolRefExpr::Create(
+ context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context);
+ const MCExpr *Size = MakeStartMinusEndExpr(*MCOS,
+ *context.getGenDwarfSectionStartSym(), *SectionEndSym, 0);
+ MCOS->EmitAbsValue(Addr, AddrSize);
+ MCOS->EmitAbsValue(Size, AddrSize);
+
+ // And finally the pair of terminating zeros.
+ MCOS->EmitIntValue(0, AddrSize);
+ MCOS->EmitIntValue(0, AddrSize);
+}
+
+// When generating dwarf for assembly source files this emits the data for
+// .debug_info section which contains three parts. The header, the compile_unit
+// DIE and a list of label DIEs.
+static void EmitGenDwarfInfo(MCStreamer *MCOS,
+ const MCSymbol *AbbrevSectionSymbol,
+ const MCSymbol *LineSectionSymbol) {
+ MCContext &context = MCOS->getContext();
+
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
+
+ // Create a symbol at the start and end of this section used in here for the
+ // expression to calculate the length in the header.
+ MCSymbol *InfoStart = context.CreateTempSymbol();
+ MCOS->EmitLabel(InfoStart);
+ MCSymbol *InfoEnd = context.CreateTempSymbol();
+
+ // First part: the header.
+
+ // The 4 byte total length of the information for this compilation unit, not
+ // including these 4 bytes.
+ const MCExpr *Length = MakeStartMinusEndExpr(*MCOS, *InfoStart, *InfoEnd, 4);
+ MCOS->EmitAbsValue(Length, 4);
+
+ // The 2 byte DWARF version, which is 2.
+ MCOS->EmitIntValue(2, 2);
+
+ // The 4 byte offset to the debug abbrevs from the start of the .debug_abbrev,
+ // it is at the start of that section so this is zero.
+ if (AbbrevSectionSymbol) {
+ MCOS->EmitSymbolValue(AbbrevSectionSymbol, 4);
+ } else {
+ MCOS->EmitIntValue(0, 4);
+ }
+
+ const MCAsmInfo &asmInfo = context.getAsmInfo();
+ int AddrSize = asmInfo.getPointerSize();
+ // The 1 byte size of an address.
+ MCOS->EmitIntValue(AddrSize, 1);
+
+ // Second part: the compile_unit DIE.
+
+ // The DW_TAG_compile_unit DIE abbrev (1).
+ MCOS->EmitULEB128IntValue(1);
+
+ // DW_AT_stmt_list, a 4 byte offset from the start of the .debug_line section,
+ // which is at the start of that section so this is zero.
+ if (LineSectionSymbol) {
+ MCOS->EmitSymbolValue(LineSectionSymbol, 4);
+ } else {
+ MCOS->EmitIntValue(0, 4);
+ }
+
+ // AT_low_pc, the first address of the default .text section.
+ const MCExpr *Start = MCSymbolRefExpr::Create(
+ context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitAbsValue(Start, AddrSize);
+
+ // AT_high_pc, the last address of the default .text section.
+ const MCExpr *End = MCSymbolRefExpr::Create(
+ context.getGenDwarfSectionEndSym(), MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitAbsValue(End, AddrSize);
+
+ // AT_name, the name of the source file. Reconstruct from the first directory
+ // and file table entries.
+ const std::vector<StringRef> &MCDwarfDirs =
+ context.getMCDwarfDirs();
+ if (MCDwarfDirs.size() > 0) {
+ MCOS->EmitBytes(MCDwarfDirs[0], 0);
+ MCOS->EmitBytes("/", 0);
+ }
+ const std::vector<MCDwarfFile *> &MCDwarfFiles =
+ MCOS->getContext().getMCDwarfFiles();
+ MCOS->EmitBytes(MCDwarfFiles[1]->getName(), 0);
+ MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
+
+ // AT_comp_dir, the working directory the assembly was done in.
+ llvm::sys::Path CWD = llvm::sys::Path::GetCurrentDirectory();
+ MCOS->EmitBytes(StringRef(CWD.c_str()), 0);
+ MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
+
+ // AT_APPLE_flags, the command line arguments of the assembler tool.
+ StringRef DwarfDebugFlags = context.getDwarfDebugFlags();
+ if (!DwarfDebugFlags.empty()){
+ MCOS->EmitBytes(DwarfDebugFlags, 0);
+ MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
+ }
+
+ // AT_producer, the version of the assembler tool.
+ MCOS->EmitBytes(StringRef("llvm-mc (based on LLVM "), 0);
+ MCOS->EmitBytes(StringRef(PACKAGE_VERSION), 0);
+ MCOS->EmitBytes(StringRef(")"), 0);
+ MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
+
+ // AT_language, a 4 byte value. We use DW_LANG_Mips_Assembler as the dwarf2
+ // draft has no standard code for assembler.
+ MCOS->EmitIntValue(dwarf::DW_LANG_Mips_Assembler, 2);
+
+ // Third part: the list of label DIEs.
+
+ // Loop on saved info for dwarf labels and create the DIEs for them.
+ const std::vector<const MCGenDwarfLabelEntry *> &Entries =
+ MCOS->getContext().getMCGenDwarfLabelEntries();
+ for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it =
+ Entries.begin(), ie = Entries.end(); it != ie;
+ ++it) {
+ const MCGenDwarfLabelEntry *Entry = *it;
+
+ // The DW_TAG_label DIE abbrev (2).
+ MCOS->EmitULEB128IntValue(2);
+
+ // AT_name, of the label without any leading underbar.
+ MCOS->EmitBytes(Entry->getName(), 0);
+ MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string.
+
+ // AT_decl_file, index into the file table.
+ MCOS->EmitIntValue(Entry->getFileNumber(), 4);
+
+ // AT_decl_line, source line number.
+ MCOS->EmitIntValue(Entry->getLineNumber(), 4);
+
+ // AT_low_pc, start address of the label.
+ const MCExpr *AT_low_pc = MCSymbolRefExpr::Create(Entry->getLabel(),
+ MCSymbolRefExpr::VK_None, context);
+ MCOS->EmitAbsValue(AT_low_pc, AddrSize);
+
+ // DW_AT_prototyped, a one byte flag value of 0 saying we have no prototype.
+ MCOS->EmitIntValue(0, 1);
+
+ // The DW_TAG_unspecified_parameters DIE abbrev (3).
+ MCOS->EmitULEB128IntValue(3);
+
+ // Add the NULL DIE terminating the DW_TAG_unspecified_parameters DIE's.
+ MCOS->EmitIntValue(0, 1);
+ }
+ // Deallocate the MCGenDwarfLabelEntry classes that saved away the info
+ // for the dwarf labels.
+ for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it =
+ Entries.begin(), ie = Entries.end(); it != ie;
+ ++it) {
+ const MCGenDwarfLabelEntry *Entry = *it;
+ delete Entry;
+ }
+
+ // Add the NULL DIE terminating the Compile Unit DIE's.
+ MCOS->EmitIntValue(0, 1);
+
+ // Now set the value of the symbol at the end of the info section.
+ MCOS->EmitLabel(InfoEnd);
+}
+
+//
+// When generating dwarf for assembly source files this emits the Dwarf
+// sections.
+//
+void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) {
+ // Create the dwarf sections in this order (.debug_line already created).
+ MCContext &context = MCOS->getContext();
+ const MCAsmInfo &AsmInfo = context.getAsmInfo();
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection());
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfAbbrevSection());
+ MCSymbol *AbbrevSectionSymbol;
+ if (AsmInfo.doesDwarfRequireRelocationForSectionOffset()) {
+ AbbrevSectionSymbol = context.CreateTempSymbol();
+ MCOS->EmitLabel(AbbrevSectionSymbol);
+ } else {
+ AbbrevSectionSymbol = NULL;
+ LineSectionSymbol = NULL;
+ }
+ MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection());
+
+ // If there are no line table entries then do not emit any section contents.
+ if (context.getMCLineSections().empty())
+ return;
+
+ // Output the data for .debug_aranges section.
+ EmitGenDwarfAranges(MCOS);
+
+ // Output the data for .debug_abbrev section.
+ EmitGenDwarfAbbrev(MCOS);
+
+ // Output the data for .debug_info section.
+ EmitGenDwarfInfo(MCOS, AbbrevSectionSymbol, LineSectionSymbol);
+}
+
+//
+// When generating dwarf for assembly source files this is called when symbol
+// for a label is created. If this symbol is not a temporary and is in the
+// section that dwarf is being generated for, save the needed info to create
+// a dwarf label.
+//
+void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS,
+ SourceMgr &SrcMgr, SMLoc &Loc) {
+ // We won't create dwarf labels for temporary symbols or symbols not in
+ // the default text.
+ if (Symbol->isTemporary())
+ return;
+ MCContext &context = MCOS->getContext();
+ if (context.getGenDwarfSection() != MCOS->getCurrentSection())
+ return;
+
+ // The dwarf label's name does not have the symbol name's leading
+ // underbar if any.
+ StringRef Name = Symbol->getName();
+ if (Name.startswith("_"))
+ Name = Name.substr(1, Name.size()-1);
+
+ // Get the dwarf file number to be used for the dwarf label.
+ unsigned FileNumber = context.getGenDwarfFileNumber();
+
+ // Finding the line number is the expensive part which is why we just don't
+ // pass it in as for some symbols we won't create a dwarf label.
+ int CurBuffer = SrcMgr.FindBufferContainingLoc(Loc);
+ unsigned LineNumber = SrcMgr.FindLineNumber(Loc, CurBuffer);
+
+ // We create a temporary symbol for use for the AT_high_pc and AT_low_pc
+ // values so that they don't have things like an ARM thumb bit from the
+ // original symbol. So when used they won't get a low bit set after
+ // relocation.
+ MCSymbol *Label = context.CreateTempSymbol();
+ MCOS->EmitLabel(Label);
+
+ // Create and entry for the info and add it to the other entries.
+ MCGenDwarfLabelEntry *Entry =
+ new MCGenDwarfLabelEntry(Name, FileNumber, LineNumber, Label);
+ MCOS->getContext().addMCGenDwarfLabelEntry(Entry);
+}
+
static int getDataAlignmentFactor(MCStreamer &streamer) {
MCContext &context = streamer.getContext();
const MCAsmInfo &asmInfo = context.getAsmInfo();
@@ -445,8 +786,7 @@ static unsigned getSizeForEncoding(MCStreamer &streamer,
MCContext &context = streamer.getContext();
unsigned format = symbolEncoding & 0x0f;
switch (format) {
- default:
- assert(0 && "Unknown Encoding");
+ default: llvm_unreachable("Unknown Encoding");
case dwarf::DW_EH_PE_absptr:
case dwarf::DW_EH_PE_signed:
return context.getAsmInfo().getPointerSize();
@@ -520,6 +860,7 @@ namespace {
const MCSymbol *personality,
unsigned personalityEncoding,
const MCSymbol *lsda,
+ bool IsSignalFrame,
unsigned lsdaEncoding);
MCSymbol *EmitFDE(MCStreamer &streamer,
const MCSymbol &cieStart,
@@ -536,28 +877,40 @@ namespace {
static void EmitEncodingByte(MCStreamer &Streamer, unsigned Encoding,
StringRef Prefix) {
if (Streamer.isVerboseAsm()) {
- const char *EncStr = 0;
+ const char *EncStr;
switch (Encoding) {
- default: EncStr = "<unknown encoding>";
- case dwarf::DW_EH_PE_absptr: EncStr = "absptr";
- case dwarf::DW_EH_PE_omit: EncStr = "omit";
- case dwarf::DW_EH_PE_pcrel: EncStr = "pcrel";
- case dwarf::DW_EH_PE_udata4: EncStr = "udata4";
- case dwarf::DW_EH_PE_udata8: EncStr = "udata8";
- case dwarf::DW_EH_PE_sdata4: EncStr = "sdata4";
- case dwarf::DW_EH_PE_sdata8: EncStr = "sdata8";
- case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata4: EncStr = "pcrel udata4";
- case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata4: EncStr = "pcrel sdata4";
- case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata8: EncStr = "pcrel udata8";
- case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata8: EncStr = "pcrel sdata8";
+ default: EncStr = "<unknown encoding>"; break;
+ case dwarf::DW_EH_PE_absptr: EncStr = "absptr"; break;
+ case dwarf::DW_EH_PE_omit: EncStr = "omit"; break;
+ case dwarf::DW_EH_PE_pcrel: EncStr = "pcrel"; break;
+ case dwarf::DW_EH_PE_udata4: EncStr = "udata4"; break;
+ case dwarf::DW_EH_PE_udata8: EncStr = "udata8"; break;
+ case dwarf::DW_EH_PE_sdata4: EncStr = "sdata4"; break;
+ case dwarf::DW_EH_PE_sdata8: EncStr = "sdata8"; break;
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4:
+ EncStr = "pcrel udata4";
+ break;
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4:
+ EncStr = "pcrel sdata4";
+ break;
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8:
+ EncStr = "pcrel udata8";
+ break;
+ case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8:
+ EncStr = "screl sdata8";
+ break;
case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata4:
EncStr = "indirect pcrel udata4";
+ break;
case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata4:
EncStr = "indirect pcrel sdata4";
+ break;
case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata8:
EncStr = "indirect pcrel udata8";
+ break;
case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata8:
EncStr = "indirect pcrel sdata8";
+ break;
}
Streamer.AddComment(Twine(Prefix) + " = " + EncStr);
@@ -639,11 +992,11 @@ void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer,
}
return;
}
- case MCCFIInstruction::Remember:
+ case MCCFIInstruction::RememberState:
if (VerboseAsm) Streamer.AddComment("DW_CFA_remember_state");
Streamer.EmitIntValue(dwarf::DW_CFA_remember_state, 1);
return;
- case MCCFIInstruction::Restore:
+ case MCCFIInstruction::RestoreState:
if (VerboseAsm) Streamer.AddComment("DW_CFA_restore_state");
Streamer.EmitIntValue(dwarf::DW_CFA_restore_state, 1);
return;
@@ -655,6 +1008,19 @@ void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer,
Streamer.EmitULEB128IntValue(Reg);
return;
}
+ case MCCFIInstruction::Restore: {
+ unsigned Reg = Instr.getDestination().getReg();
+ if (VerboseAsm) {
+ Streamer.AddComment("DW_CFA_restore");
+ Streamer.AddComment(Twine("Reg ") + Twine(Reg));
+ }
+ Streamer.EmitIntValue(dwarf::DW_CFA_restore | Reg, 1);
+ return;
+ }
+ case MCCFIInstruction::Escape:
+ if (VerboseAsm) Streamer.AddComment("Escape bytes");
+ Streamer.EmitBytes(Instr.getValues(), 0);
+ return;
}
llvm_unreachable("Unhandled case in switch");
}
@@ -738,8 +1104,8 @@ bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer,
// Compact Encoding
Size = getSizeForEncoding(Streamer, dwarf::DW_EH_PE_udata4);
- if (VerboseAsm) Streamer.AddComment(Twine("Compact Unwind Encoding: 0x") +
- Twine(llvm::utohexstr(Encoding)));
+ if (VerboseAsm) Streamer.AddComment("Compact Unwind Encoding: 0x" +
+ Twine::utohexstr(Encoding));
Streamer.EmitIntValue(Encoding, Size);
@@ -766,6 +1132,7 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
const MCSymbol *personality,
unsigned personalityEncoding,
const MCSymbol *lsda,
+ bool IsSignalFrame,
unsigned lsdaEncoding) {
MCContext &context = streamer.getContext();
const MCRegisterInfo &MRI = context.getRegisterInfo();
@@ -808,6 +1175,8 @@ const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
if (lsda)
Augmentation += "L";
Augmentation += "R";
+ if (IsSignalFrame)
+ Augmentation += "S";
streamer.EmitBytes(Augmentation.str(), 0);
}
streamer.EmitIntValue(0, 1);
@@ -967,17 +1336,18 @@ MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer,
namespace {
struct CIEKey {
- static const CIEKey getEmptyKey() { return CIEKey(0, 0, -1); }
- static const CIEKey getTombstoneKey() { return CIEKey(0, -1, 0); }
+ static const CIEKey getEmptyKey() { return CIEKey(0, 0, -1, false); }
+ static const CIEKey getTombstoneKey() { return CIEKey(0, -1, 0, false); }
CIEKey(const MCSymbol* Personality_, unsigned PersonalityEncoding_,
- unsigned LsdaEncoding_) : Personality(Personality_),
- PersonalityEncoding(PersonalityEncoding_),
- LsdaEncoding(LsdaEncoding_) {
+ unsigned LsdaEncoding_, bool IsSignalFrame_) :
+ Personality(Personality_), PersonalityEncoding(PersonalityEncoding_),
+ LsdaEncoding(LsdaEncoding_), IsSignalFrame(IsSignalFrame_) {
}
const MCSymbol* Personality;
unsigned PersonalityEncoding;
unsigned LsdaEncoding;
+ bool IsSignalFrame;
};
}
@@ -991,17 +1361,17 @@ namespace llvm {
return CIEKey::getTombstoneKey();
}
static unsigned getHashValue(const CIEKey &Key) {
- FoldingSetNodeID ID;
- ID.AddPointer(Key.Personality);
- ID.AddInteger(Key.PersonalityEncoding);
- ID.AddInteger(Key.LsdaEncoding);
- return ID.ComputeHash();
+ return static_cast<unsigned>(hash_combine(Key.Personality,
+ Key.PersonalityEncoding,
+ Key.LsdaEncoding,
+ Key.IsSignalFrame));
}
static bool isEqual(const CIEKey &LHS,
const CIEKey &RHS) {
return LHS.Personality == RHS.Personality &&
LHS.PersonalityEncoding == RHS.PersonalityEncoding &&
- LHS.LsdaEncoding == RHS.LsdaEncoding;
+ LHS.LsdaEncoding == RHS.LsdaEncoding &&
+ LHS.IsSignalFrame == RHS.IsSignalFrame;
}
};
}
@@ -1016,12 +1386,10 @@ void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer,
ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getFrameInfos();
// Emit the compact unwind info if available.
- // FIXME: This emits both the compact unwind and the old CIE/FDE
- // information. Only one of those is needed.
if (IsEH && MOFI->getCompactUnwindSection())
for (unsigned i = 0, n = Streamer.getNumFrameInfos(); i < n; ++i) {
const MCDwarfFrameInfo &Frame = Streamer.getFrameInfo(i);
- if (!Frame.CompactUnwindEncoding)
+ if (Frame.CompactUnwindEncoding)
Emitter.EmitCompactUnwind(Streamer, Frame);
}
@@ -1039,11 +1407,12 @@ void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer,
for (unsigned i = 0, n = FrameArray.size(); i < n; ++i) {
const MCDwarfFrameInfo &Frame = FrameArray[i];
CIEKey Key(Frame.Personality, Frame.PersonalityEncoding,
- Frame.LsdaEncoding);
+ Frame.LsdaEncoding, Frame.IsSignalFrame);
const MCSymbol *&CIEStart = IsEH ? CIEStarts[Key] : DummyDebugKey;
if (!CIEStart)
CIEStart = &Emitter.EmitCIE(Streamer, Frame.Personality,
Frame.PersonalityEncoding, Frame.Lsda,
+ Frame.IsSignalFrame,
Frame.LsdaEncoding);
FDEEnd = Emitter.EmitFDE(Streamer, *CIEStart, Frame);
diff --git a/contrib/llvm/lib/MC/MCELF.cpp b/contrib/llvm/lib/MC/MCELF.cpp
index dad2e7b..f9f98e0 100644
--- a/contrib/llvm/lib/MC/MCELF.cpp
+++ b/contrib/llvm/lib/MC/MCELF.cpp
@@ -37,7 +37,7 @@ void MCELF::SetType(MCSymbolData &SD, unsigned Type) {
assert(Type == ELF::STT_NOTYPE || Type == ELF::STT_OBJECT ||
Type == ELF::STT_FUNC || Type == ELF::STT_SECTION ||
Type == ELF::STT_FILE || Type == ELF::STT_COMMON ||
- Type == ELF::STT_TLS);
+ Type == ELF::STT_TLS || Type == ELF::STT_GNU_IFUNC);
uint32_t OtherFlags = SD.getFlags() & ~(0xf << ELF_STT_Shift);
SD.setFlags(OtherFlags | (Type << ELF_STT_Shift));
@@ -48,7 +48,7 @@ unsigned MCELF::GetType(const MCSymbolData &SD) {
assert(Type == ELF::STT_NOTYPE || Type == ELF::STT_OBJECT ||
Type == ELF::STT_FUNC || Type == ELF::STT_SECTION ||
Type == ELF::STT_FILE || Type == ELF::STT_COMMON ||
- Type == ELF::STT_TLS);
+ Type == ELF::STT_TLS || Type == ELF::STT_GNU_IFUNC);
return Type;
}
diff --git a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
index 12a02a9..171ab4d 100644
--- a/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
+++ b/contrib/llvm/lib/MC/MCELFObjectTargetWriter.cpp
@@ -7,17 +7,40 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCELFObjectWriter.h"
using namespace llvm;
MCELFObjectTargetWriter::MCELFObjectTargetWriter(bool Is64Bit_,
- Triple::OSType OSType_,
+ uint8_t OSABI_,
uint16_t EMachine_,
bool HasRelocationAddend_)
- : OSType(OSType_), EMachine(EMachine_),
+ : OSABI(OSABI_), EMachine(EMachine_),
HasRelocationAddend(HasRelocationAddend_), Is64Bit(Is64Bit_) {
}
-MCELFObjectTargetWriter::~MCELFObjectTargetWriter() {
+/// Default e_flags = 0
+unsigned MCELFObjectTargetWriter::getEFlags() const {
+ return 0;
+}
+
+const MCSymbol *MCELFObjectTargetWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ return NULL;
+}
+
+
+void MCELFObjectTargetWriter::adjustFixupOffset(const MCFixup &Fixup,
+ uint64_t &RelocOffset) {
+}
+
+void
+MCELFObjectTargetWriter::sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs) {
+ // Sort by the r_offset, just like gnu as does.
+ array_pod_sort(Relocs.begin(), Relocs.end());
}
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.cpp b/contrib/llvm/lib/MC/MCELFStreamer.cpp
index 9ada08e..6c4d0e3 100644
--- a/contrib/llvm/lib/MC/MCELFStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCELFStreamer.cpp
@@ -11,13 +11,17 @@
//
//===----------------------------------------------------------------------===//
-#include "MCELFStreamer.h"
#include "MCELF.h"
-#include "llvm/MC/MCStreamer.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCELFSymbolFlags.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
@@ -29,6 +33,123 @@
using namespace llvm;
+namespace {
+class MCELFStreamer : public MCObjectStreamer {
+public:
+ MCELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
+
+ MCELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ MCAssembler *Assembler)
+ : MCObjectStreamer(Context, TAB, OS, Emitter, Assembler) {}
+
+
+ ~MCELFStreamer() {}
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void InitSections();
+ virtual void ChangeSection(const MCSection *Section);
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitThumbFunc(MCSymbol *Func);
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+
+ virtual void EmitCOFFSymbolType(int Type) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+
+ virtual void EndCOFFSymbolDef() {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.setSize(Value);
+ }
+
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ unsigned Size = 0, unsigned ByteAlignment = 0) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) {
+ llvm_unreachable("ELF doesn't support this directive");
+ }
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace);
+
+ virtual void EmitFileDirective(StringRef Filename);
+
+ virtual void FinishImpl();
+
+private:
+ virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitInstToData(const MCInst &Inst);
+
+ void fixSymbolsInTLSFixups(const MCExpr *expr);
+
+ struct LocalCommon {
+ MCSymbolData *SD;
+ uint64_t Size;
+ unsigned ByteAlignment;
+ };
+ std::vector<LocalCommon> LocalCommons;
+
+ SmallPtrSet<MCSymbol *, 16> BindingExplicitlySet;
+ /// @}
+ void SetSection(StringRef Section, unsigned Type, unsigned Flags,
+ SectionKind Kind) {
+ SwitchSection(getContext().getELFSection(Section, Type, Flags, Kind));
+ }
+
+ void SetSectionData() {
+ SetSection(".data", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE |ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+ EmitCodeAlignment(4, 0);
+ }
+ void SetSectionText() {
+ SetSection(".text", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR |
+ ELF::SHF_ALLOC, SectionKind::getText());
+ EmitCodeAlignment(4, 0);
+ }
+ void SetSectionBss() {
+ SetSection(".bss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC, SectionKind::getBSS());
+ EmitCodeAlignment(4, 0);
+ }
+};
+}
+
void MCELFStreamer::InitSections() {
// This emulates the same behavior of GNU as. This makes it easier
// to compare the output as the major sections are in the same order.
@@ -61,7 +182,7 @@ void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
return;
}
- assert(0 && "invalid assembler flag!");
+ llvm_unreachable("invalid assembler flag!");
}
void MCELFStreamer::EmitThumbFunc(MCSymbol *Func) {
@@ -130,10 +251,8 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_WeakDefinition:
case MCSA_WeakDefAutoPrivate:
case MCSA_Invalid:
- case MCSA_ELF_TypeIndFunction:
case MCSA_IndirectSymbol:
- assert(0 && "Invalid symbol attribute for ELF!");
- break;
+ llvm_unreachable("Invalid symbol attribute for ELF!");
case MCSA_ELF_TypeGnuUniqueObject:
// Ignore for now.
@@ -162,6 +281,10 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
MCELF::SetType(SD, ELF::STT_FUNC);
break;
+ case MCSA_ELF_TypeIndFunction:
+ MCELF::SetType(SD, ELF::STT_GNU_IFUNC);
+ break;
+
case MCSA_ELF_TypeObject:
MCELF::SetType(SD, ELF::STT_OBJECT);
break;
@@ -205,10 +328,10 @@ void MCELFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
if (MCELF::GetBinding(SD) == ELF_STB_Local) {
const MCSection *Section = getAssembler().getContext().getELFSection(".bss",
- ELF::SHT_NOBITS,
- ELF::SHF_WRITE |
- ELF::SHF_ALLOC,
- SectionKind::getBSS());
+ ELF::SHT_NOBITS,
+ ELF::SHF_WRITE |
+ ELF::SHF_ALLOC,
+ SectionKind::getBSS());
Symbol->setSection(*Section);
struct LocalCommon L = {&SD, Size, ByteAlignment};
@@ -266,6 +389,13 @@ void MCELFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
getCurrentSectionData()->setAlignment(ByteAlignment);
}
+void MCELFStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ fixSymbolsInTLSFixups(Value);
+ MCObjectStreamer::EmitValueImpl(Value, Size, AddrSpace);
+}
+
+
// Add a symbol for the file name of this module. This is the second
// entry in the module's symbol table (the first being the null symbol).
void MCELFStreamer::EmitFileDirective(StringRef Filename) {
@@ -308,6 +438,10 @@ void MCELFStreamer::fixSymbolsInTLSFixups(const MCExpr *expr) {
case MCSymbolRefExpr::VK_ARM_TLSGD:
case MCSymbolRefExpr::VK_ARM_TPOFF:
case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ case MCSymbolRefExpr::VK_Mips_TLSGD:
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL:
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO:
break;
}
MCSymbolData &SD = getAssembler().getOrCreateSymbolData(symRef.getSymbol());
@@ -349,7 +483,7 @@ void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
DF->getContents().append(Code.begin(), Code.end());
}
-void MCELFStreamer::Finish() {
+void MCELFStreamer::FinishImpl() {
EmitFrames(true);
for (std::vector<LocalCommon>::const_iterator i = LocalCommons.begin(),
@@ -372,7 +506,7 @@ void MCELFStreamer::Finish() {
SectData.setAlignment(ByteAlignment);
}
- this->MCObjectStreamer::Finish();
+ this->MCObjectStreamer::FinishImpl();
}
MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB,
diff --git a/contrib/llvm/lib/MC/MCELFStreamer.h b/contrib/llvm/lib/MC/MCELFStreamer.h
deleted file mode 100644
index 10bf775..0000000
--- a/contrib/llvm/lib/MC/MCELFStreamer.h
+++ /dev/null
@@ -1,141 +0,0 @@
-//===- lib/MC/MCELFStreamer.h - ELF Object Output -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file assembles .s files and emits ELF .o object files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_MC_MCELFSTREAMER_H
-#define LLVM_MC_MCELFSTREAMER_H
-
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/MC/MCSectionELF.h"
-
-namespace llvm {
-
-class MCELFStreamer : public MCObjectStreamer {
-public:
- MCELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter)
- : MCObjectStreamer(Context, TAB, OS, Emitter) {}
-
- MCELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- MCAssembler *Assembler)
- : MCObjectStreamer(Context, TAB, OS, Emitter, Assembler) {}
-
-
- ~MCELFStreamer() {}
-
- /// @name MCStreamer Interface
- /// @{
-
- virtual void InitSections();
- virtual void ChangeSection(const MCSection *Section);
- virtual void EmitLabel(MCSymbol *Symbol);
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
- virtual void EmitThumbFunc(MCSymbol *Func);
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
- virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
- assert(0 && "ELF doesn't support this directive");
- }
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
- assert(0 && "ELF doesn't support this directive");
- }
-
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
- assert(0 && "ELF doesn't support this directive");
- }
-
- virtual void EmitCOFFSymbolType(int Type) {
- assert(0 && "ELF doesn't support this directive");
- }
-
- virtual void EndCOFFSymbolDef() {
- assert(0 && "ELF doesn't support this directive");
- }
-
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
- MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- SD.setSize(Value);
- }
-
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment);
-
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0) {
- assert(0 && "ELF doesn't support this directive");
- }
- virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0) {
- assert(0 && "ELF doesn't support this directive");
- }
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0);
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0);
-
- virtual void EmitFileDirective(StringRef Filename);
-
- virtual void Finish();
-
-private:
- virtual void EmitInstToFragment(const MCInst &Inst);
- virtual void EmitInstToData(const MCInst &Inst);
-
- void fixSymbolsInTLSFixups(const MCExpr *expr);
-
- struct LocalCommon {
- MCSymbolData *SD;
- uint64_t Size;
- unsigned ByteAlignment;
- };
- std::vector<LocalCommon> LocalCommons;
-
- SmallPtrSet<MCSymbol *, 16> BindingExplicitlySet;
- /// @}
- void SetSection(StringRef Section, unsigned Type, unsigned Flags,
- SectionKind Kind) {
- SwitchSection(getContext().getELFSection(Section, Type, Flags, Kind));
- }
-
- void SetSectionData() {
- SetSection(".data", ELF::SHT_PROGBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
- SectionKind::getDataRel());
- EmitCodeAlignment(4, 0);
- }
- void SetSectionText() {
- SetSection(".text", ELF::SHT_PROGBITS,
- ELF::SHF_EXECINSTR |
- ELF::SHF_ALLOC, SectionKind::getText());
- EmitCodeAlignment(4, 0);
- }
- void SetSectionBss() {
- SetSection(".bss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE |
- ELF::SHF_ALLOC, SectionKind::getBSS());
- EmitCodeAlignment(4, 0);
- }
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index da297fb..7880155 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -14,9 +14,11 @@
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -57,7 +59,8 @@ void MCExpr::print(raw_ostream &OS) const {
SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOT ||
SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTOFF ||
SRE.getKind() == MCSymbolRefExpr::VK_ARM_TPOFF ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF)
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1)
OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
else if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
SRE.getKind() != MCSymbolRefExpr::VK_PPC_DARWIN_HA16 &&
@@ -70,7 +73,6 @@ void MCExpr::print(raw_ostream &OS) const {
case MCExpr::Unary: {
const MCUnaryExpr &UE = cast<MCUnaryExpr>(*this);
switch (UE.getOpcode()) {
- default: assert(0 && "Invalid opcode!");
case MCUnaryExpr::LNot: OS << '!'; break;
case MCUnaryExpr::Minus: OS << '-'; break;
case MCUnaryExpr::Not: OS << '~'; break;
@@ -91,7 +93,6 @@ void MCExpr::print(raw_ostream &OS) const {
}
switch (BE.getOpcode()) {
- default: assert(0 && "Invalid opcode!");
case MCBinaryExpr::Add:
// Print "X-42" instead of "X+-42".
if (const MCConstantExpr *RHSC = dyn_cast<MCConstantExpr>(BE.getRHS())) {
@@ -132,7 +133,7 @@ void MCExpr::print(raw_ostream &OS) const {
}
}
- assert(0 && "Invalid expression kind!");
+ llvm_unreachable("Invalid expression kind!");
}
void MCExpr::dump() const {
@@ -171,7 +172,6 @@ const MCSymbolRefExpr *MCSymbolRefExpr::Create(StringRef Name, VariantKind Kind,
StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
switch (Kind) {
- default:
case VK_Invalid: return "<<invalid>>";
case VK_None: return "<<none>>";
@@ -189,18 +189,39 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
case VK_TPOFF: return "TPOFF";
case VK_DTPOFF: return "DTPOFF";
case VK_TLVP: return "TLVP";
+ case VK_SECREL: return "SECREL";
case VK_ARM_PLT: return "(PLT)";
case VK_ARM_GOT: return "(GOT)";
case VK_ARM_GOTOFF: return "(GOTOFF)";
case VK_ARM_TPOFF: return "(tpoff)";
case VK_ARM_GOTTPOFF: return "(gottpoff)";
case VK_ARM_TLSGD: return "(tlsgd)";
+ case VK_ARM_TARGET1: return "(target1)";
case VK_PPC_TOC: return "toc";
case VK_PPC_DARWIN_HA16: return "ha16";
case VK_PPC_DARWIN_LO16: return "lo16";
case VK_PPC_GAS_HA16: return "ha";
case VK_PPC_GAS_LO16: return "l";
+ case VK_Mips_GPREL: return "GPREL";
+ case VK_Mips_GOT_CALL: return "GOT_CALL";
+ case VK_Mips_GOT16: return "GOT16";
+ case VK_Mips_GOT: return "GOT";
+ case VK_Mips_ABS_HI: return "ABS_HI";
+ case VK_Mips_ABS_LO: return "ABS_LO";
+ case VK_Mips_TLSGD: return "TLSGD";
+ case VK_Mips_TLSLDM: return "TLSLDM";
+ case VK_Mips_DTPREL_HI: return "DTPREL_HI";
+ case VK_Mips_DTPREL_LO: return "DTPREL_LO";
+ case VK_Mips_GOTTPREL: return "GOTTPREL";
+ case VK_Mips_TPREL_HI: return "TPREL_HI";
+ case VK_Mips_TPREL_LO: return "TPREL_LO";
+ case VK_Mips_GPOFF_HI: return "GPOFF_HI";
+ case VK_Mips_GPOFF_LO: return "GPOFF_LO";
+ case VK_Mips_GOT_DISP: return "GOT_DISP";
+ case VK_Mips_GOT_PAGE: return "GOT_PAGE";
+ case VK_Mips_GOT_OFST: return "GOT_OFST";
}
+ llvm_unreachable("Invalid variant kind");
}
MCSymbolRefExpr::VariantKind
@@ -337,6 +358,11 @@ static void AttemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
if (Addrs && (&SecA != &SecB))
Addend += (Addrs->lookup(&SecA) - Addrs->lookup(&SecB));
+ // Pointers to Thumb symbols need to have their low-bit set to allow
+ // for interworking.
+ if (Asm->isThumbFunc(&SA))
+ Addend |= 1;
+
// Clear the symbol expr pointers to indicate we have folded these
// operands.
A = B = 0;
@@ -557,8 +583,7 @@ bool MCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
}
}
- assert(0 && "Invalid assembly expression kind!");
- return false;
+ llvm_unreachable("Invalid assembly expression kind!");
}
const MCSection *MCExpr::FindAssociatedSection() const {
@@ -599,6 +624,5 @@ const MCSection *MCExpr::FindAssociatedSection() const {
}
}
- assert(0 && "Invalid assembly expression kind!");
- return 0;
+ llvm_unreachable("Invalid assembly expression kind!");
}
diff --git a/contrib/llvm/lib/MC/MCInst.cpp b/contrib/llvm/lib/MC/MCInst.cpp
index 4cb628b..7bbfd2e 100644
--- a/contrib/llvm/lib/MC/MCInst.cpp
+++ b/contrib/llvm/lib/MC/MCInst.cpp
@@ -25,6 +25,8 @@ void MCOperand::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << "Imm:" << getImm();
else if (isExpr()) {
OS << "Expr:(" << *getExpr() << ")";
+ } else if (isInst()) {
+ OS << "Inst:(" << *getInst() << ")";
} else
OS << "UNDEFINED";
OS << ">";
diff --git a/contrib/llvm/lib/MC/MCInstPrinter.cpp b/contrib/llvm/lib/MC/MCInstPrinter.cpp
index 2317a28..847bcc0 100644
--- a/contrib/llvm/lib/MC/MCInstPrinter.cpp
+++ b/contrib/llvm/lib/MC/MCInstPrinter.cpp
@@ -8,8 +8,10 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -19,11 +21,11 @@ MCInstPrinter::~MCInstPrinter() {
/// getOpcodeName - Return the name of the specified opcode enum (e.g.
/// "MOV32ri") or empty if we can't resolve it.
StringRef MCInstPrinter::getOpcodeName(unsigned Opcode) const {
- return "";
+ return MII.getName(Opcode);
}
void MCInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- assert(0 && "Target should implement this");
+ llvm_unreachable("Target should implement this");
}
void MCInstPrinter::printAnnotation(raw_ostream &OS, StringRef Annot) {
diff --git a/contrib/llvm/lib/MC/MCLoggingStreamer.cpp b/contrib/llvm/lib/MC/MCLoggingStreamer.cpp
deleted file mode 100644
index 3fe8ac7..0000000
--- a/contrib/llvm/lib/MC/MCLoggingStreamer.cpp
+++ /dev/null
@@ -1,250 +0,0 @@
-//===- lib/MC/MCLoggingStreamer.cpp - API Logging Streamer ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
-
-class MCLoggingStreamer : public MCStreamer {
- llvm::OwningPtr<MCStreamer> Child;
-
- raw_ostream &OS;
-
-public:
- MCLoggingStreamer(MCStreamer *_Child, raw_ostream &_OS)
- : MCStreamer(_Child->getContext()), Child(_Child), OS(_OS) {}
-
- void LogCall(const char *Function) {
- OS << Function << "\n";
- }
-
- void LogCall(const char *Function, const Twine &Message) {
- OS << Function << ": " << Message << "\n";
- }
-
- virtual bool isVerboseAsm() const { return Child->isVerboseAsm(); }
-
- virtual bool hasRawTextSupport() const { return Child->hasRawTextSupport(); }
-
- virtual raw_ostream &GetCommentOS() { return Child->GetCommentOS(); }
-
- virtual void AddComment(const Twine &T) {
- LogCall("AddComment", T);
- return Child->AddComment(T);
- }
-
- virtual void AddBlankLine() {
- LogCall("AddBlankLine");
- return Child->AddBlankLine();
- }
-
- virtual void ChangeSection(const MCSection *Section) {
- LogCall("ChangeSection");
- return Child->ChangeSection(Section);
- }
-
- virtual void InitSections() {
- LogCall("InitSections");
- return Child->InitSections();
- }
-
- virtual void EmitLabel(MCSymbol *Symbol) {
- LogCall("EmitLabel");
- return Child->EmitLabel(Symbol);
- }
-
- virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
- LogCall("EmitAssemblerFlag");
- return Child->EmitAssemblerFlag(Flag);
- }
-
- virtual void EmitThumbFunc(MCSymbol *Func) {
- LogCall("EmitThumbFunc");
- return Child->EmitThumbFunc(Func);
- }
-
- virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- LogCall("EmitAssignment");
- return Child->EmitAssignment(Symbol, Value);
- }
-
- virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
- LogCall("EmitWeakReference");
- return Child->EmitWeakReference(Alias, Symbol);
- }
-
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) {
- LogCall("EmitDwarfAdvanceLineAddr");
- return Child->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label,
- PointerSize);
- }
-
- virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
- LogCall("EmitSymbolAttribute");
- return Child->EmitSymbolAttribute(Symbol, Attribute);
- }
-
- virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
- LogCall("EmitSymbolDesc");
- return Child->EmitSymbolDesc(Symbol, DescValue);
- }
-
- virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
- LogCall("BeginCOFFSymbolDef");
- return Child->BeginCOFFSymbolDef(Symbol);
- }
-
- virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
- LogCall("EmitCOFFSymbolStorageClass");
- return Child->EmitCOFFSymbolStorageClass(StorageClass);
- }
-
- virtual void EmitCOFFSymbolType(int Type) {
- LogCall("EmitCOFFSymbolType");
- return Child->EmitCOFFSymbolType(Type);
- }
-
- virtual void EndCOFFSymbolDef() {
- LogCall("EndCOFFSymbolDef");
- return Child->EndCOFFSymbolDef();
- }
-
- virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
- LogCall("EmitELFSize");
- return Child->EmitELFSize(Symbol, Value);
- }
-
- virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- LogCall("EmitCommonSymbol");
- return Child->EmitCommonSymbol(Symbol, Size, ByteAlignment);
- }
-
- virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
- unsigned ByteAlignment) {
- LogCall("EmitLocalCommonSymbol");
- return Child->EmitLocalCommonSymbol(Symbol, Size, ByteAlignment);
- }
-
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- unsigned Size = 0, unsigned ByteAlignment = 0) {
- LogCall("EmitZerofill");
- return Child->EmitZerofill(Section, Symbol, Size, ByteAlignment);
- }
-
- virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
- uint64_t Size, unsigned ByteAlignment = 0) {
- LogCall("EmitTBSSSymbol");
- return Child->EmitTBSSSymbol(Section, Symbol, Size, ByteAlignment);
- }
-
- virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {
- LogCall("EmitBytes");
- return Child->EmitBytes(Data, AddrSpace);
- }
-
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace){
- LogCall("EmitValue");
- return Child->EmitValueImpl(Value, Size, AddrSpace);
- }
-
- virtual void EmitULEB128Value(const MCExpr *Value) {
- LogCall("EmitULEB128Value");
- return Child->EmitULEB128Value(Value);
- }
-
- virtual void EmitSLEB128Value(const MCExpr *Value) {
- LogCall("EmitSLEB128Value");
- return Child->EmitSLEB128Value(Value);
- }
-
- virtual void EmitGPRel32Value(const MCExpr *Value) {
- LogCall("EmitGPRel32Value");
- return Child->EmitGPRel32Value(Value);
- }
-
- virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
- unsigned AddrSpace) {
- LogCall("EmitFill");
- return Child->EmitFill(NumBytes, FillValue, AddrSpace);
- }
-
- virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
- unsigned ValueSize = 1,
- unsigned MaxBytesToEmit = 0) {
- LogCall("EmitValueToAlignment");
- return Child->EmitValueToAlignment(ByteAlignment, Value,
- ValueSize, MaxBytesToEmit);
- }
-
- virtual void EmitCodeAlignment(unsigned ByteAlignment,
- unsigned MaxBytesToEmit = 0) {
- LogCall("EmitCodeAlignment");
- return Child->EmitCodeAlignment(ByteAlignment, MaxBytesToEmit);
- }
-
- virtual void EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0) {
- LogCall("EmitValueToOffset");
- return Child->EmitValueToOffset(Offset, Value);
- }
-
- virtual void EmitFileDirective(StringRef Filename) {
- LogCall("EmitFileDirective", "FileName:" + Filename);
- return Child->EmitFileDirective(Filename);
- }
-
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
- LogCall("EmitDwarfFileDirective",
- "FileNo:" + Twine(FileNo) + " Filename:" + Filename);
- return Child->EmitDwarfFileDirective(FileNo, Filename);
- }
-
- virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
- unsigned Column, unsigned Flags,
- unsigned Isa, unsigned Discriminator,
- StringRef FileName) {
- LogCall("EmitDwarfLocDirective",
- "FileNo:" + Twine(FileNo) + " Line:" + Twine(Line) +
- " Column:" + Twine(Column) + " Flags:" + Twine(Flags) +
- " Isa:" + Twine(Isa) + " Discriminator:" + Twine(Discriminator));
- return Child->EmitDwarfLocDirective(FileNo, Line, Column, Flags,
- Isa, Discriminator, FileName);
- }
-
- virtual void EmitInstruction(const MCInst &Inst) {
- LogCall("EmitInstruction");
- return Child->EmitInstruction(Inst);
- }
-
- virtual void EmitRawText(StringRef String) {
- LogCall("EmitRawText", "\"" + String + "\"");
- return Child->EmitRawText(String);
- }
-
- virtual void Finish() {
- LogCall("Finish");
- return Child->Finish();
- }
-
-};
-
-} // end anonymous namespace.
-
-MCStreamer *llvm::createLoggingStreamer(MCStreamer *Child, raw_ostream &OS) {
- return new MCLoggingStreamer(Child, OS);
-}
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index aa35815..bc6cf77 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -53,23 +53,23 @@ public:
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitCOFFSymbolType(int Type) {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EndCOFFSymbolDef() {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {
- assert(0 && "macho doesn't support this directive");
+ llvm_unreachable("macho doesn't support this directive");
}
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0);
@@ -89,7 +89,7 @@ public:
//report_fatal_error("unsupported directive: '.file'");
}
- virtual void Finish();
+ virtual void FinishImpl();
/// @}
};
@@ -140,7 +140,7 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
// Let the target do whatever target specific stuff it needs to do.
- getAssembler().getBackend().HandleAssemblerFlag(Flag);
+ getAssembler().getBackend().handleAssemblerFlag(Flag);
// Do any generic stuff we need to do.
switch (Flag) {
case MCAF_SyntaxUnified: return; // no-op here.
@@ -150,14 +150,10 @@ void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
case MCAF_SubsectionsViaSymbols:
getAssembler().setSubsectionsViaSymbols(true);
return;
- default:
- llvm_unreachable("invalid assembler flag!");
}
}
void MCMachOStreamer::EmitThumbFunc(MCSymbol *Symbol) {
- // FIXME: Flag the function ISA as thumb with DW_AT_APPLE_isa.
-
// Remember that the function is a thumb function. Fixup and relocation
// values will need adjusted.
getAssembler().setIsThumbFunc(Symbol);
@@ -215,8 +211,7 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_Protected:
case MCSA_Weak:
case MCSA_Local:
- assert(0 && "Invalid symbol attribute for Mach-O!");
- break;
+ llvm_unreachable("Invalid symbol attribute for Mach-O!");
case MCSA_Global:
SD.setExternal(true);
@@ -377,7 +372,7 @@ void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
DF->getContents().append(Code.begin(), Code.end());
}
-void MCMachOStreamer::Finish() {
+void MCMachOStreamer::FinishImpl() {
EmitFrames(true);
// We have to set the fragment atom associations so we can relax properly for
@@ -409,7 +404,7 @@ void MCMachOStreamer::Finish() {
}
}
- this->MCObjectStreamer::Finish();
+ this->MCObjectStreamer::FinishImpl();
}
MCStreamer *llvm::createMachOStreamer(MCContext &Context, MCAsmBackend &MAB,
diff --git a/contrib/llvm/lib/MC/MCModule.cpp b/contrib/llvm/lib/MC/MCModule.cpp
index b1d09d9..f563160 100644
--- a/contrib/llvm/lib/MC/MCModule.cpp
+++ b/contrib/llvm/lib/MC/MCModule.cpp
@@ -1,4 +1,4 @@
-//===- lib/MC/MCModule.cpp - MCModule implementation --------------------------===//
+//===- lib/MC/MCModule.cpp - MCModule implementation ----------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/MC/MCNullStreamer.cpp b/contrib/llvm/lib/MC/MCNullStreamer.cpp
index a6c0adb..7ff2d1b 100644
--- a/contrib/llvm/lib/MC/MCNullStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCNullStreamer.cpp
@@ -55,6 +55,7 @@ namespace {
virtual void EmitCOFFSymbolStorageClass(int StorageClass) {}
virtual void EmitCOFFSymbolType(int Type) {}
virtual void EndCOFFSymbolDef() {}
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol) {}
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
@@ -79,11 +80,12 @@ namespace {
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0) {}
- virtual void EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value = 0) {}
+ virtual bool EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0) { return false; }
virtual void EmitFileDirective(StringRef Filename) {}
- virtual bool EmitDwarfFileDirective(unsigned FileNo,StringRef Filename) {
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename) {
return false;
}
virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -92,7 +94,11 @@ namespace {
StringRef FileName) {}
virtual void EmitInstruction(const MCInst &Inst) {}
- virtual void Finish() {}
+ virtual void FinishImpl() {}
+
+ virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
+ RecordProcEnd(Frame);
+ }
/// @}
};
diff --git a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
index df8b99d..b22ae33 100644
--- a/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/contrib/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -56,8 +56,8 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
TLSThreadInitSection
= Ctx->getMachOSection("__DATA", "__thread_init",
- MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
- SectionKind::getDataRel());
+ MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
+ SectionKind::getDataRel());
CStringSection // .cstring
= Ctx->getMachOSection("__TEXT", "__cstring",
@@ -152,6 +152,24 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
SectionKind::getReadOnly());
// Debug Information.
+ DwarfAccelNamesSection =
+ Ctx->getMachOSection("__DWARF", "__apple_names",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
+ DwarfAccelObjCSection =
+ Ctx->getMachOSection("__DWARF", "__apple_objc",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
+ // 16 character section limit...
+ DwarfAccelNamespaceSection =
+ Ctx->getMachOSection("__DWARF", "__apple_namespac",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
+ DwarfAccelTypesSection =
+ Ctx->getMachOSection("__DWARF", "__apple_types",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
+
DwarfAbbrevSection =
Ctx->getMachOSection("__DWARF", "__debug_abbrev",
MCSectionMachO::S_ATTR_DEBUG,
@@ -168,10 +186,6 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
Ctx->getMachOSection("__DWARF", "__debug_frame",
MCSectionMachO::S_ATTR_DEBUG,
SectionKind::getMetadata());
- DwarfPubNamesSection =
- Ctx->getMachOSection("__DWARF", "__debug_pubnames",
- MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
DwarfPubTypesSection =
Ctx->getMachOSection("__DWARF", "__debug_pubtypes",
MCSectionMachO::S_ATTR_DEBUG,
@@ -207,8 +221,8 @@ void MCObjectFileInfo::InitMachOMCObjectFileInfo(Triple T) {
void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
if (T.getArch() == Triple::x86) {
PersonalityEncoding = (RelocM == Reloc::PIC_)
- ? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
- : dwarf::DW_EH_PE_absptr;
+ ? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
+ : dwarf::DW_EH_PE_absptr;
LSDAEncoding = (RelocM == Reloc::PIC_)
? dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
: dwarf::DW_EH_PE_absptr;
@@ -216,8 +230,8 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
? dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
: dwarf::DW_EH_PE_absptr;
TTypeEncoding = (RelocM == Reloc::PIC_)
- ? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
- : dwarf::DW_EH_PE_absptr;
+ ? dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4
+ : dwarf::DW_EH_PE_absptr;
} else if (T.getArch() == Triple::x86_64) {
FDECFIEncoding = dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4;
@@ -244,10 +258,22 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
}
}
+ // Solaris requires different flags for .eh_frame to seemingly every other
+ // platform.
+ EHSectionType = ELF::SHT_PROGBITS;
+ EHSectionFlags = ELF::SHF_ALLOC;
+ if (T.getOS() == Triple::Solaris) {
+ if (T.getArch() == Triple::x86_64)
+ EHSectionType = ELF::SHT_X86_64_UNWIND;
+ else
+ EHSectionFlags |= ELF::SHF_WRITE;
+ }
+
+
// ELF
BSSSection =
Ctx->getELFSection(".bss", ELF::SHT_NOBITS,
- ELF::SHF_WRITE |ELF::SHF_ALLOC,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC,
SectionKind::getBSS());
TextSection =
@@ -347,15 +373,13 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
DwarfFrameSection =
Ctx->getELFSection(".debug_frame", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
- DwarfPubNamesSection =
- Ctx->getELFSection(".debug_pubnames", ELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
DwarfPubTypesSection =
Ctx->getELFSection(".debug_pubtypes", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
DwarfStrSection =
- Ctx->getELFSection(".debug_str", ELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ Ctx->getELFSection(".debug_str", ELF::SHT_PROGBITS,
+ ELF::SHF_MERGE | ELF::SHF_STRINGS,
+ SectionKind::getMergeable1ByteCString());
DwarfLocSection =
Ctx->getELFSection(".debug_loc", ELF::SHT_PROGBITS, 0,
SectionKind::getMetadata());
@@ -390,12 +414,22 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
- StaticCtorSection =
- Ctx->getCOFFSection(".ctors",
- COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- COFF::IMAGE_SCN_MEM_READ |
- COFF::IMAGE_SCN_MEM_WRITE,
- SectionKind::getDataRel());
+ if (T.getOS() == Triple::Win32) {
+ StaticCtorSection =
+ Ctx->getCOFFSection(".CRT$XCU",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
+ } else {
+ StaticCtorSection =
+ Ctx->getCOFFSection(".ctors",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+ }
+
+
StaticDtorSection =
Ctx->getCOFFSection(".dtors",
COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
@@ -434,11 +468,6 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_MEM_DISCARDABLE |
COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
- DwarfPubNamesSection =
- Ctx->getCOFFSection(".debug_pubnames",
- COFF::IMAGE_SCN_MEM_DISCARDABLE |
- COFF::IMAGE_SCN_MEM_READ,
- SectionKind::getMetadata());
DwarfPubTypesSection =
Ctx->getCOFFSection(".debug_pubtypes",
COFF::IMAGE_SCN_MEM_DISCARDABLE |
@@ -488,6 +517,12 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) {
COFF::IMAGE_SCN_MEM_READ |
COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
+ TLSDataSection =
+ Ctx->getCOFFSection(".tls$",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
}
void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
@@ -505,8 +540,12 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm,
PersonalityEncoding = LSDAEncoding = FDEEncoding = FDECFIEncoding =
TTypeEncoding = dwarf::DW_EH_PE_absptr;
- EHFrameSection = 0; // Created on demand.
- CompactUnwindSection = 0; // Used only by selected targets.
+ EHFrameSection = 0; // Created on demand.
+ CompactUnwindSection = 0; // Used only by selected targets.
+ DwarfAccelNamesSection = 0; // Used only by selected targets.
+ DwarfAccelObjCSection = 0; // Used only by selected targets.
+ DwarfAccelNamespaceSection = 0; // Used only by selected targets.
+ DwarfAccelTypesSection = 0; // Used only by selected targets.
Triple T(TT);
Triple::ArchType Arch = T.getArch();
@@ -541,8 +580,8 @@ void MCObjectFileInfo::InitEHFrameSection() {
SectionKind::getReadOnly());
else if (Env == IsELF)
EHFrameSection =
- Ctx->getELFSection(".eh_frame", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC,
+ Ctx->getELFSection(".eh_frame", EHSectionType,
+ EHSectionFlags,
SectionKind::getDataRel());
else
EHFrameSection =
diff --git a/contrib/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
index a04ae08..bad7cfe 100644
--- a/contrib/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
@@ -7,17 +7,17 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCObjectStreamer.h"
-
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
MCObjectStreamer::MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB,
@@ -105,6 +105,14 @@ void MCObjectStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size,
DF->getContents().resize(DF->getContents().size() + Size, 0);
}
+void MCObjectStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
+ RecordProcStart(Frame);
+}
+
+void MCObjectStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
+ RecordProcEnd(Frame);
+}
+
void MCObjectStreamer::EmitLabel(MCSymbol *Symbol) {
MCStreamer::EmitLabel(Symbol);
@@ -164,7 +172,7 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
MCLineEntry::Make(this, getCurrentSection());
// If this instruction doesn't need relaxation, just emit it as data.
- if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
+ if (!getAssembler().getBackend().mayNeedRelaxation(Inst)) {
EmitInstToData(Inst);
return;
}
@@ -173,9 +181,9 @@ void MCObjectStreamer::EmitInstruction(const MCInst &Inst) {
// possible and emit it as data.
if (getAssembler().getRelaxAll()) {
MCInst Relaxed;
- getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
- while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
- getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
+ getAssembler().getBackend().relaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().mayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().relaxInstruction(Relaxed, Relaxed);
EmitInstToData(Relaxed);
return;
}
@@ -224,12 +232,12 @@ void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
new MCDwarfCallFrameFragment(*AddrDelta, getCurrentSectionData());
}
-void MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {
+bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {
int64_t Res;
if (Offset->EvaluateAsAbsolute(Res, getAssembler())) {
new MCOrgFragment(*Offset, Value, getCurrentSectionData());
- return;
+ return false;
}
MCSymbol *CurrentPos = getContext().CreateTempSymbol();
@@ -241,14 +249,30 @@ void MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset,
MCBinaryExpr::Create(MCBinaryExpr::Sub, Offset, Ref, getContext());
if (!Delta->EvaluateAsAbsolute(Res, getAssembler()))
- report_fatal_error("expected assembly-time absolute expression");
+ return true;
EmitFill(Res, Value, 0);
+ return false;
}
-void MCObjectStreamer::Finish() {
+// Associate GPRel32 fixup with data and resize data area
+void MCObjectStreamer::EmitGPRel32Value(const MCExpr *Value) {
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ DF->addFixup(MCFixup::Create(DF->getContents().size(),
+ Value,
+ FK_GPRel_4));
+ DF->getContents().resize(DF->getContents().size() + 4, 0);
+}
+
+void MCObjectStreamer::FinishImpl() {
// Dump out the dwarf file & directory tables and line tables.
+ const MCSymbol *LineSectionSymbol = NULL;
if (getContext().hasDwarfFiles())
- MCDwarfFileTable::Emit(this);
+ LineSectionSymbol = MCDwarfFileTable::Emit(this);
+
+ // If we are generating dwarf for assembly source files dump out the sections.
+ if (getContext().getGenDwarfForAssembly())
+ MCGenDwarfInfo::Emit(this, LineSectionSymbol);
getAssembler().Finish();
}
diff --git a/contrib/llvm/lib/MC/MCObjectWriter.cpp b/contrib/llvm/lib/MC/MCObjectWriter.cpp
index efe9f68..030f247 100644
--- a/contrib/llvm/lib/MC/MCObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MCObjectWriter.cpp
@@ -33,14 +33,22 @@ void MCObjectWriter::EncodeSLEB128(int64_t Value, raw_ostream &OS) {
}
/// Utility function to encode a ULEB128 value.
-void MCObjectWriter::EncodeULEB128(uint64_t Value, raw_ostream &OS) {
+void MCObjectWriter::EncodeULEB128(uint64_t Value, raw_ostream &OS,
+ unsigned Padding) {
do {
uint8_t Byte = Value & 0x7f;
Value >>= 7;
- if (Value != 0)
+ if (Value != 0 || Padding != 0)
Byte |= 0x80; // Mark this byte that that more bytes will follow.
OS << char(Byte);
} while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ OS << '\x80';
+ OS << '\x00';
+ }
}
bool
@@ -60,6 +68,8 @@ MCObjectWriter::IsSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
const MCSymbolData &DataA = Asm.getSymbolData(SA);
const MCSymbolData &DataB = Asm.getSymbolData(SB);
+ if(!DataA.getFragment() || !DataB.getFragment())
+ return false;
return IsSymbolRefDifferenceFullyResolvedImpl(Asm, DataA,
*DataB.getFragment(),
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index 1648757..2d61cac 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -14,7 +14,6 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -30,6 +29,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
@@ -122,6 +122,9 @@ private:
int64_t CppHashLineNumber;
SMLoc CppHashLoc;
+ /// AssemblerDialect. ~OU means unset value and use value provided by MAI.
+ unsigned AssemblerDialect;
+
public:
AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI);
@@ -143,9 +146,20 @@ public:
virtual MCAsmLexer &getLexer() { return Lexer; }
virtual MCContext &getContext() { return Ctx; }
virtual MCStreamer &getStreamer() { return Out; }
+ virtual unsigned getAssemblerDialect() {
+ if (AssemblerDialect == ~0U)
+ return MAI.getAssemblerDialect();
+ else
+ return AssemblerDialect;
+ }
+ virtual void setAssemblerDialect(unsigned i) {
+ AssemblerDialect = i;
+ }
- virtual bool Warning(SMLoc L, const Twine &Msg);
- virtual bool Error(SMLoc L, const Twine &Msg);
+ virtual bool Warning(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
+ virtual bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
const AsmToken &Lex();
@@ -171,14 +185,17 @@ private:
void HandleMacroExit();
void PrintMacroInstantiations();
- void PrintMessage(SMLoc Loc, const Twine &Msg, const char *Type,
- bool ShowLine = true) const {
- SrcMgr.PrintMessage(Loc, Msg, Type, ShowLine);
+ void PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const {
+ SrcMgr.PrintMessage(Loc, Kind, Msg, Ranges);
}
static void DiagHandler(const SMDiagnostic &Diag, void *Context);
/// EnterIncludeFile - Enter the specified file. This returns true on failure.
bool EnterIncludeFile(const std::string &Filename);
+ /// ProcessIncbinFile - Process the specified file for the .incbin directive.
+ /// This returns true on failure.
+ bool ProcessIncbinFile(const std::string &Filename);
/// \brief Reset the current lexer position to that given by \arg Loc. The
/// current token is not set; clients should ensure Lex() is called
@@ -225,6 +242,7 @@ private:
bool ParseDirectiveAbort(); // ".abort"
bool ParseDirectiveInclude(); // ".include"
+ bool ParseDirectiveIncbin(); // ".incbin"
bool ParseDirectiveIf(SMLoc DirectiveLoc); // ".if"
// ".ifdef" or ".ifndef", depending on expect_defined
@@ -295,6 +313,12 @@ public:
&GenericAsmParser::ParseDirectiveCFIRestoreState>(".cfi_restore_state");
AddDirectiveHandler<
&GenericAsmParser::ParseDirectiveCFISameValue>(".cfi_same_value");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIRestore>(".cfi_restore");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFIEscape>(".cfi_escape");
+ AddDirectiveHandler<
+ &GenericAsmParser::ParseDirectiveCFISignalFrame>(".cfi_signal_frame");
// Macro directives.
AddDirectiveHandler<&GenericAsmParser::ParseDirectiveMacrosOnOff>(
@@ -328,6 +352,9 @@ public:
bool ParseDirectiveCFIRememberState(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveCFIRestoreState(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveCFISameValue(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIRestore(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFIEscape(StringRef, SMLoc DirectiveLoc);
+ bool ParseDirectiveCFISignalFrame(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveMacrosOnOff(StringRef, SMLoc DirectiveLoc);
bool ParseDirectiveMacro(StringRef, SMLoc DirectiveLoc);
@@ -352,7 +379,8 @@ AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx,
MCStreamer &_Out, const MCAsmInfo &_MAI)
: Lexer(_MAI), Ctx(_Ctx), Out(_Out), MAI(_MAI), SrcMgr(_SM),
GenericParser(new GenericAsmParser), PlatformParser(0),
- CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0) {
+ CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0),
+ AssemblerDialect(~0U) {
// Save the old handler.
SavedDiagHandler = SrcMgr.getDiagHandler();
SavedDiagContext = SrcMgr.getDiagContext();
@@ -395,21 +423,21 @@ void AsmParser::PrintMacroInstantiations() {
// Print the active macro instantiation stack.
for (std::vector<MacroInstantiation*>::const_reverse_iterator
it = ActiveMacros.rbegin(), ie = ActiveMacros.rend(); it != ie; ++it)
- PrintMessage((*it)->InstantiationLoc, "while in macro instantiation",
- "note");
+ PrintMessage((*it)->InstantiationLoc, SourceMgr::DK_Note,
+ "while in macro instantiation");
}
-bool AsmParser::Warning(SMLoc L, const Twine &Msg) {
+bool AsmParser::Warning(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges) {
if (FatalAssemblerWarnings)
- return Error(L, Msg);
- PrintMessage(L, Msg, "warning");
+ return Error(L, Msg, Ranges);
+ PrintMessage(L, SourceMgr::DK_Warning, Msg, Ranges);
PrintMacroInstantiations();
return false;
}
-bool AsmParser::Error(SMLoc L, const Twine &Msg) {
+bool AsmParser::Error(SMLoc L, const Twine &Msg, ArrayRef<SMRange> Ranges) {
HadError = true;
- PrintMessage(L, Msg, "error");
+ PrintMessage(L, SourceMgr::DK_Error, Msg, Ranges);
PrintMacroInstantiations();
return true;
}
@@ -427,6 +455,21 @@ bool AsmParser::EnterIncludeFile(const std::string &Filename) {
return false;
}
+/// Process the specified .incbin file by seaching for it in the include paths
+/// then just emiting the byte contents of the file to the streamer. This
+/// returns true on failure.
+bool AsmParser::ProcessIncbinFile(const std::string &Filename) {
+ std::string IncludedFile;
+ int NewBuf = SrcMgr.AddIncludeFile(Filename, Lexer.getLoc(), IncludedFile);
+ if (NewBuf == -1)
+ return true;
+
+ // Pick up the bytes from the file and emit them.
+ getStreamer().EmitBytes(SrcMgr.getMemoryBuffer(NewBuf)->getBuffer(),
+ DEFAULT_ADDRSPACE);
+ return false;
+}
+
void AsmParser::JumpToLoc(SMLoc Loc) {
CurBuffer = SrcMgr.FindBufferContainingLoc(Loc);
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer), Loc.getPointer());
@@ -462,6 +505,17 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
HadError = false;
AsmCond StartingCondState = TheCondState;
+ // If we are generating dwarf for assembly source files save the initial text
+ // section and generate a .file directive.
+ if (getContext().getGenDwarfForAssembly()) {
+ getContext().setGenDwarfSection(getStreamer().getCurrentSection());
+ MCSymbol *SectionStartSym = getContext().CreateTempSymbol();
+ getStreamer().EmitLabel(SectionStartSym);
+ getContext().setGenDwarfSectionStartSym(SectionStartSym);
+ getStreamer().EmitDwarfFileDirective(getContext().nextGenDwarfFileNumber(),
+ StringRef(), SrcMgr.getMemoryBuffer(CurBuffer)->getBufferIdentifier());
+ }
+
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
if (!ParseStatement()) continue;
@@ -501,8 +555,9 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// FIXME: We would really like to refer back to where the symbol was
// first referenced for a source location. We need to add something
// to track that. Currently, we just point to the end of the file.
- PrintMessage(getLexer().getLoc(), "assembler local symbol '" +
- Sym->getName() + "' not defined", "error", false);
+ PrintMessage(getLexer().getLoc(), SourceMgr::DK_Error,
+ "assembler local symbol '" + Sym->getName() +
+ "' not defined");
}
}
@@ -749,8 +804,7 @@ AsmParser::ApplyModifierToExpr(const MCExpr *E,
}
}
- assert(0 && "Invalid expression kind!");
- return 0;
+ llvm_unreachable("Invalid expression kind!");
}
/// ParseExpression - Parse an expression and return it.
@@ -787,7 +841,6 @@ bool AsmParser::ParseExpression(const MCExpr *&Res, SMLoc &EndLoc) {
if (!ModifiedRes) {
return TokError("invalid modifier '" + getTok().getIdentifier() +
"' (no symbols present)");
- return true;
}
Res = ModifiedRes;
@@ -1036,6 +1089,12 @@ bool AsmParser::ParseStatement() {
// Emit the label.
Out.EmitLabel(Sym);
+ // If we are generating dwarf for assembly source files then gather the
+ // info to make a dwarf label entry for this label if needed.
+ if (getContext().getGenDwarfForAssembly())
+ MCGenDwarfLabelEntry::Make(Sym, &getStreamer(), getSourceManager(),
+ IDLoc);
+
// Consume any end of statement token, if present, to avoid spurious
// AddBlankLine calls().
if (Lexer.is(AsmToken::EndOfStatement)) {
@@ -1163,6 +1222,8 @@ bool AsmParser::ParseStatement() {
return ParseDirectiveAbort();
if (IDVal == ".include")
return ParseDirectiveInclude();
+ if (IDVal == ".incbin")
+ return ParseDirectiveIncbin();
if (IDVal == ".code16")
return TokError(Twine(IDVal) + " not supported yet");
@@ -1205,7 +1266,19 @@ bool AsmParser::ParseStatement() {
}
OS << "]";
- PrintMessage(IDLoc, OS.str(), "note");
+ PrintMessage(IDLoc, SourceMgr::DK_Note, OS.str());
+ }
+
+ // If we are generating dwarf for assembly source files and the current
+ // section is the initial text section then generate a .loc directive for
+ // the instruction.
+ if (!HadError && getContext().getGenDwarfForAssembly() &&
+ getContext().getGenDwarfSection() == getStreamer().getCurrentSection() ) {
+ getStreamer().EmitDwarfLocDirective(getContext().getGenDwarfFileNumber(),
+ SrcMgr.FindLineNumber(IDLoc, CurBuffer),
+ 0, DWARF2_LINE_DEFAULT_IS_STMT ?
+ DWARF2_FLAG_IS_STMT : 0, 0, 0,
+ StringRef());
}
// If parsing succeeded, match the instruction.
@@ -1294,7 +1367,7 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(Diag, Parser->SavedDiagContext);
else
- Diag.Print(0, OS);
+ Diag.print(0, OS);
return;
}
@@ -1309,19 +1382,15 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) {
int LineNo = Parser->CppHashLineNumber - 1 +
(DiagLocLineNo - CppHashLocLineNo);
- SMDiagnostic NewDiag(*Diag.getSourceMgr(),
- Diag.getLoc(),
- Filename,
- LineNo,
- Diag.getColumnNo(),
- Diag.getMessage(),
- Diag.getLineContents(),
- Diag.getShowLine());
+ SMDiagnostic NewDiag(*Diag.getSourceMgr(), Diag.getLoc(),
+ Filename, LineNo, Diag.getColumnNo(),
+ Diag.getKind(), Diag.getMessage(),
+ Diag.getLineContents(), Diag.getRanges());
if (Parser->SavedDiagHandler)
Parser->SavedDiagHandler(NewDiag, Parser->SavedDiagContext);
else
- NewDiag.Print(0, OS);
+ NewDiag.print(0, OS);
}
bool AsmParser::expandMacro(SmallString<256> &Buf, StringRef Body,
@@ -1458,6 +1527,11 @@ bool AsmParser::HandleMacroEntry(StringRef Name, SMLoc NameLoc,
}
Lex();
}
+ // If there weren't any arguments, erase the token vector so everything
+ // else knows that. Leaving around the vestigal empty token list confuses
+ // things.
+ if (MacroArguments.size() == 1 && MacroArguments.back().empty())
+ MacroArguments.clear();
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.
@@ -1495,23 +1569,27 @@ void AsmParser::HandleMacroExit() {
ActiveMacros.pop_back();
}
-static void MarkUsed(const MCExpr *Value) {
+static bool IsUsedIn(const MCSymbol *Sym, const MCExpr *Value) {
switch (Value->getKind()) {
- case MCExpr::Binary:
- MarkUsed(static_cast<const MCBinaryExpr*>(Value)->getLHS());
- MarkUsed(static_cast<const MCBinaryExpr*>(Value)->getRHS());
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Value);
+ return IsUsedIn(Sym, BE->getLHS()) || IsUsedIn(Sym, BE->getRHS());
break;
+ }
case MCExpr::Target:
case MCExpr::Constant:
- break;
+ return false;
case MCExpr::SymbolRef: {
- static_cast<const MCSymbolRefExpr*>(Value)->getSymbol().setUsed(true);
- break;
+ const MCSymbol &S = static_cast<const MCSymbolRefExpr*>(Value)->getSymbol();
+ if (S.isVariable())
+ return IsUsedIn(Sym, S.getVariableValue());
+ return &S == Sym;
}
case MCExpr::Unary:
- MarkUsed(static_cast<const MCUnaryExpr*>(Value)->getSubExpr());
- break;
+ return IsUsedIn(Sym, static_cast<const MCUnaryExpr*>(Value)->getSubExpr());
}
+
+ llvm_unreachable("Unknown expr kind!");
}
bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
@@ -1522,7 +1600,9 @@ bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
if (ParseExpression(Value))
return true;
- MarkUsed(Value);
+ // Note: we don't count b as used in "a = b". This is to allow
+ // a = b
+ // b = c
if (Lexer.isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in assignment");
@@ -1544,8 +1624,12 @@ bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) {
//
// FIXME: Diagnostics. Note the location of the definition as a label.
// FIXME: Diagnose assignment to protected identifier (e.g., register name).
- if (Sym->isUndefined() && !Sym->isUsed() && !Sym->isVariable())
+ if (IsUsedIn(Sym, Value))
+ return Error(EqualLoc, "Recursive use of '" + Name + "'");
+ else if (Sym->isUndefined() && !Sym->isUsed() && !Sym->isVariable())
; // Allow redefinitions of undefined symbols only used in directives.
+ else if (Sym->isVariable() && !Sym->isUsed() && allow_redef)
+ ; // Allow redefinitions of variables that haven't yet been used.
else if (!Sym->isUndefined() && (!Sym->isVariable() || !allow_redef))
return Error(EqualLoc, "redefinition of '" + Name + "'");
else if (!Sym->isVariable())
@@ -1912,6 +1996,7 @@ bool AsmParser::ParseDirectiveOrg() {
CheckForValidSection();
const MCExpr *Offset;
+ SMLoc Loc = getTok().getLoc();
if (ParseExpression(Offset))
return true;
@@ -1931,9 +2016,11 @@ bool AsmParser::ParseDirectiveOrg() {
Lex();
- // FIXME: Only limited forms of relocatable expressions are accepted here, it
- // has to be relative to the current section.
- getStreamer().EmitValueToOffset(Offset, FillExpr);
+ // Only limited forms of relocatable expressions are accepted here, it
+ // has to be relative to the current section. The streamer will return
+ // 'true' if the expression wasn't evaluatable.
+ if (getStreamer().EmitValueToOffset(Offset, FillExpr))
+ return Error(Loc, "expected assembly-time absolute expression");
return false;
}
@@ -2178,6 +2265,31 @@ bool AsmParser::ParseDirectiveInclude() {
return false;
}
+/// ParseDirectiveIncbin
+/// ::= .incbin "filename"
+bool AsmParser::ParseDirectiveIncbin() {
+ if (getLexer().isNot(AsmToken::String))
+ return TokError("expected string in '.incbin' directive");
+
+ std::string Filename = getTok().getString();
+ SMLoc IncbinLoc = getLexer().getLoc();
+ Lex();
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.incbin' directive");
+
+ // Strip the quotes.
+ Filename = Filename.substr(1, Filename.size()-2);
+
+ // Attempt to process the included file.
+ if (ProcessIncbinFile(Filename)) {
+ Error(IncbinLoc, "Could not find incbin file '" + Filename + "'");
+ return true;
+ }
+
+ return false;
+}
+
/// ParseDirectiveIf
/// ::= .if expression
bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
@@ -2305,7 +2417,8 @@ bool AsmParser::ParseDirectiveEndIf(SMLoc DirectiveLoc) {
}
/// ParseDirectiveFile
-/// ::= .file [number] string
+/// ::= .file [number] filename
+/// ::= .file number directory filename
bool GenericAsmParser::ParseDirectiveFile(StringRef, SMLoc DirectiveLoc) {
// FIXME: I'm not sure what this is.
int64_t FileNumber = -1;
@@ -2321,17 +2434,35 @@ bool GenericAsmParser::ParseDirectiveFile(StringRef, SMLoc DirectiveLoc) {
if (getLexer().isNot(AsmToken::String))
return TokError("unexpected token in '.file' directive");
- StringRef Filename = getTok().getString();
- Filename = Filename.substr(1, Filename.size()-2);
+ // Usually the directory and filename together, otherwise just the directory.
+ StringRef Path = getTok().getString();
+ Path = Path.substr(1, Path.size()-2);
Lex();
+ StringRef Directory;
+ StringRef Filename;
+ if (getLexer().is(AsmToken::String)) {
+ if (FileNumber == -1)
+ return TokError("explicit path specified, but no file number");
+ Filename = getTok().getString();
+ Filename = Filename.substr(1, Filename.size()-2);
+ Directory = Path;
+ Lex();
+ } else {
+ Filename = Path;
+ }
+
if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.file' directive");
if (FileNumber == -1)
getStreamer().EmitFileDirective(Filename);
else {
- if (getStreamer().EmitDwarfFileDirective(FileNumber, Filename))
+ if (getContext().getGenDwarfForAssembly() == true)
+ Error(DirectiveLoc, "input can't have .file dwarf directives when -g is "
+ "used to generate dwarf debug info for assembly code");
+
+ if (getStreamer().EmitDwarfFileDirective(FileNumber, Directory, Filename))
Error(FileNumberLoc, "file number already allocated");
}
@@ -2719,6 +2850,56 @@ bool GenericAsmParser::ParseDirectiveCFISameValue(StringRef IDVal,
return false;
}
+/// ParseDirectiveCFIRestore
+/// ::= .cfi_restore register
+bool GenericAsmParser::ParseDirectiveCFIRestore(StringRef IDVal,
+ SMLoc DirectiveLoc) {
+ int64_t Register = 0;
+ if (ParseRegisterOrRegisterNumber(Register, DirectiveLoc))
+ return true;
+
+ getStreamer().EmitCFIRestore(Register);
+
+ return false;
+}
+
+/// ParseDirectiveCFIEscape
+/// ::= .cfi_escape expression[,...]
+bool GenericAsmParser::ParseDirectiveCFIEscape(StringRef IDVal,
+ SMLoc DirectiveLoc) {
+ std::string Values;
+ int64_t CurrValue;
+ if (getParser().ParseAbsoluteExpression(CurrValue))
+ return true;
+
+ Values.push_back((uint8_t)CurrValue);
+
+ while (getLexer().is(AsmToken::Comma)) {
+ Lex();
+
+ if (getParser().ParseAbsoluteExpression(CurrValue))
+ return true;
+
+ Values.push_back((uint8_t)CurrValue);
+ }
+
+ getStreamer().EmitCFIEscape(Values);
+ return false;
+}
+
+/// ParseDirectiveCFISignalFrame
+/// ::= .cfi_signal_frame
+bool GenericAsmParser::ParseDirectiveCFISignalFrame(StringRef Directive,
+ SMLoc DirectiveLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(getLexer().getLoc(),
+ "unexpected token in '" + Directive + "' directive");
+
+ getStreamer().EmitCFISignalFrame();
+
+ return false;
+}
+
/// ParseDirectiveMacrosOnOff
/// ::= .macros_on
/// ::= .macros_off
diff --git a/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
index 185b516..c4cdc3c 100644
--- a/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/COFFAsmParser.cpp
@@ -45,6 +45,7 @@ class COFFAsmParser : public MCAsmParserExtension {
AddDirectiveHandler<&COFFAsmParser::ParseDirectiveScl>(".scl");
AddDirectiveHandler<&COFFAsmParser::ParseDirectiveType>(".type");
AddDirectiveHandler<&COFFAsmParser::ParseDirectiveEndef>(".endef");
+ AddDirectiveHandler<&COFFAsmParser::ParseDirectiveSecRel32>(".secrel32");
// Win64 EH directives.
AddDirectiveHandler<&COFFAsmParser::ParseSEHDirectiveStartProc>(
@@ -102,6 +103,7 @@ class COFFAsmParser : public MCAsmParserExtension {
bool ParseDirectiveScl(StringRef, SMLoc);
bool ParseDirectiveType(StringRef, SMLoc);
bool ParseDirectiveEndef(StringRef, SMLoc);
+ bool ParseDirectiveSecRel32(StringRef, SMLoc);
// Win64 EH directives.
bool ParseSEHDirectiveStartProc(StringRef, SMLoc);
@@ -217,6 +219,21 @@ bool COFFAsmParser::ParseDirectiveEndef(StringRef, SMLoc) {
return false;
}
+bool COFFAsmParser::ParseDirectiveSecRel32(StringRef, SMLoc) {
+ StringRef SymbolID;
+ if (getParser().ParseIdentifier(SymbolID))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in directive");
+
+ MCSymbol *Symbol = getContext().GetOrCreateSymbol(SymbolID);
+
+ Lex();
+ getStreamer().EmitCOFFSecRel32(Symbol);
+ return false;
+}
+
bool COFFAsmParser::ParseSEHDirectiveStartProc(StringRef, SMLoc) {
StringRef SymbolID;
if (getParser().ParseIdentifier(SymbolID))
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index d891126..ffc400b 100644
--- a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -476,6 +476,7 @@ bool ELFAsmParser::ParseDirectiveType(StringRef, SMLoc) {
.Case("common", MCSA_ELF_TypeCommon)
.Case("notype", MCSA_ELF_TypeNoType)
.Case("gnu_unique_object", MCSA_ELF_TypeGnuUniqueObject)
+ .Case("gnu_indirect_function", MCSA_ELF_TypeIndFunction)
.Default(MCSA_Invalid);
if (Attr == MCSA_Invalid)
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
index dceece7..3a3ff14 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
@@ -25,3 +25,7 @@ SMLoc MCAsmLexer::getLoc() const {
SMLoc AsmToken::getLoc() const {
return SMLoc::getFromPointer(Str.data());
}
+
+SMLoc AsmToken::getEndLoc() const {
+ return SMLoc::getFromPointer(Str.data() + Str.size() - 1);
+}
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
index 5239ec7..3a825f0 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
@@ -33,8 +33,8 @@ const AsmToken &MCAsmParser::getTok() {
return getLexer().getTok();
}
-bool MCAsmParser::TokError(const Twine &Msg) {
- Error(getLexer().getLoc(), Msg);
+bool MCAsmParser::TokError(const Twine &Msg, ArrayRef<SMRange> Ranges) {
+ Error(getLexer().getLoc(), Msg, Ranges);
return true;
}
diff --git a/contrib/llvm/lib/MC/MCPureStreamer.cpp b/contrib/llvm/lib/MC/MCPureStreamer.cpp
index 086c922..a770c97 100644
--- a/contrib/llvm/lib/MC/MCPureStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCPureStreamer.cpp
@@ -46,9 +46,9 @@ public:
unsigned MaxBytesToEmit = 0);
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0);
- virtual void EmitValueToOffset(const MCExpr *Offset,
+ virtual bool EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0);
- virtual void Finish();
+ virtual void FinishImpl();
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
@@ -93,9 +93,9 @@ public:
virtual void EmitFileDirective(StringRef Filename) {
report_fatal_error("unsupported directive in pure streamer");
}
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename) {
report_fatal_error("unsupported directive in pure streamer");
- return false;
}
/// @}
@@ -184,9 +184,10 @@ void MCPureStreamer::EmitCodeAlignment(unsigned ByteAlignment,
getCurrentSectionData()->setAlignment(ByteAlignment);
}
-void MCPureStreamer::EmitValueToOffset(const MCExpr *Offset,
+bool MCPureStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
new MCOrgFragment(*Offset, Value, getCurrentSectionData());
+ return false;
}
void MCPureStreamer::EmitInstToFragment(const MCInst &Inst) {
@@ -223,10 +224,10 @@ void MCPureStreamer::EmitInstToData(const MCInst &Inst) {
DF->getContents().append(Code.begin(), Code.end());
}
-void MCPureStreamer::Finish() {
+void MCPureStreamer::FinishImpl() {
// FIXME: Handle DWARF tables?
- this->MCObjectStreamer::Finish();
+ this->MCObjectStreamer::FinishImpl();
}
MCStreamer *llvm::createPureStreamer(MCContext &Context, MCAsmBackend &MAB,
diff --git a/contrib/llvm/lib/MC/MCStreamer.cpp b/contrib/llvm/lib/MC/MCStreamer.cpp
index 3afa22b..43e62ff 100644
--- a/contrib/llvm/lib/MC/MCStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCStreamer.cpp
@@ -16,7 +16,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include <cstdlib>
using namespace llvm;
@@ -94,17 +93,18 @@ void MCStreamer::EmitIntValue(uint64_t Value, unsigned Size,
/// EmitULEB128Value - Special case of EmitULEB128Value that avoids the
/// client having to pass in a MCExpr for constant integers.
-void MCStreamer::EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace) {
- SmallString<32> Tmp;
+void MCStreamer::EmitULEB128IntValue(uint64_t Value, unsigned AddrSpace,
+ unsigned Padding) {
+ SmallString<128> Tmp;
raw_svector_ostream OSE(Tmp);
- MCObjectWriter::EncodeULEB128(Value, OSE);
+ MCObjectWriter::EncodeULEB128(Value, OSE, Padding);
EmitBytes(OSE.str(), AddrSpace);
}
/// EmitSLEB128Value - Special case of EmitSLEB128Value that avoids the
/// client having to pass in a MCExpr for constant integers.
void MCStreamer::EmitSLEB128IntValue(int64_t Value, unsigned AddrSpace) {
- SmallString<32> Tmp;
+ SmallString<128> Tmp;
raw_svector_ostream OSE(Tmp);
MCObjectWriter::EncodeSLEB128(Value, OSE);
EmitBytes(OSE.str(), AddrSpace);
@@ -128,6 +128,10 @@ void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
AddrSpace);
}
+void MCStreamer::EmitGPRel64Value(const MCExpr *Value) {
+ report_fatal_error("unsupported directive in streamer");
+}
+
void MCStreamer::EmitGPRel32Value(const MCExpr *Value) {
report_fatal_error("unsupported directive in streamer");
}
@@ -142,8 +146,9 @@ void MCStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
}
bool MCStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Directory,
StringRef Filename) {
- return getContext().GetDwarfFile(Filename, FileNo) == 0;
+ return getContext().GetDwarfFile(Directory, Filename, FileNo) == 0;
}
void MCStreamer::EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
@@ -186,9 +191,8 @@ void MCStreamer::EmitDataRegion() {
if (!MAI.getSupportsDataRegions()) return;
// Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(
- Twine(MAI.getDataBeginLabelName()) +
- utostr(UniqueDataBeginSuffix++));
+ MCSymbol *NewSym = Context.GetOrCreateSymbol(MAI.getDataBeginLabelName() +
+ Twine(UniqueDataBeginSuffix++));
EmitLabel(NewSym);
RegionIndicator = Data;
@@ -202,9 +206,8 @@ void MCStreamer::EmitCodeRegion() {
if (!MAI.getSupportsDataRegions()) return;
// Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(
- Twine(MAI.getCodeBeginLabelName()) +
- utostr(UniqueCodeBeginSuffix++));
+ MCSymbol *NewSym = Context.GetOrCreateSymbol(MAI.getCodeBeginLabelName() +
+ Twine(UniqueCodeBeginSuffix++));
EmitLabel(NewSym);
RegionIndicator = Code;
@@ -218,9 +221,9 @@ void MCStreamer::EmitJumpTable8Region() {
if (!MAI.getSupportsDataRegions()) return;
// Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(
- Twine(MAI.getJumpTable8BeginLabelName()) +
- utostr(UniqueDataBeginSuffix++));
+ MCSymbol *NewSym =
+ Context.GetOrCreateSymbol(MAI.getJumpTable8BeginLabelName() +
+ Twine(UniqueDataBeginSuffix++));
EmitLabel(NewSym);
RegionIndicator = JumpTable8;
@@ -234,9 +237,9 @@ void MCStreamer::EmitJumpTable16Region() {
if (!MAI.getSupportsDataRegions()) return;
// Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(
- Twine(MAI.getJumpTable16BeginLabelName()) +
- utostr(UniqueDataBeginSuffix++));
+ MCSymbol *NewSym =
+ Context.GetOrCreateSymbol(MAI.getJumpTable16BeginLabelName() +
+ Twine(UniqueDataBeginSuffix++));
EmitLabel(NewSym);
RegionIndicator = JumpTable16;
@@ -251,9 +254,9 @@ void MCStreamer::EmitJumpTable32Region() {
if (!MAI.getSupportsDataRegions()) return;
// Generate a unique symbol name.
- MCSymbol *NewSym = Context.GetOrCreateSymbol(
- Twine(MAI.getJumpTable32BeginLabelName()) +
- utostr(UniqueDataBeginSuffix++));
+ MCSymbol *NewSym =
+ Context.GetOrCreateSymbol(MAI.getJumpTable32BeginLabelName() +
+ Twine(UniqueDataBeginSuffix++));
EmitLabel(NewSym);
RegionIndicator = JumpTable32;
@@ -277,8 +280,17 @@ void MCStreamer::EmitCFIStartProc() {
report_fatal_error("Starting a frame before finishing the previous one!");
MCDwarfFrameInfo Frame;
- Frame.Function = LastSymbol;
+ EmitCFIStartProcImpl(Frame);
+ FrameInfos.push_back(Frame);
+ RegionIndicator = Code;
+}
+
+void MCStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) {
+}
+
+void MCStreamer::RecordProcStart(MCDwarfFrameInfo &Frame) {
+ Frame.Function = LastSymbol;
// If the function is externally visible, we need to create a local
// symbol to avoid relocations.
StringRef Prefix = getContext().getAsmInfo().getPrivateGlobalPrefix();
@@ -288,16 +300,20 @@ void MCStreamer::EmitCFIStartProc() {
Frame.Begin = getContext().CreateTempSymbol();
EmitLabel(Frame.Begin);
}
-
- FrameInfos.push_back(Frame);
- RegionIndicator = Code;
}
void MCStreamer::EmitCFIEndProc() {
EnsureValidFrame();
MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
- CurFrame->End = getContext().CreateTempSymbol();
- EmitLabel(CurFrame->End);
+ EmitCFIEndProcImpl(*CurFrame);
+}
+
+void MCStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) {
+}
+
+void MCStreamer::RecordProcEnd(MCDwarfFrameInfo &Frame) {
+ Frame.End = getContext().CreateTempSymbol();
+ EmitLabel(Frame.End);
}
void MCStreamer::EmitCFIDefCfa(int64_t Register, int64_t Offset) {
@@ -386,7 +402,7 @@ void MCStreamer::EmitCFIRememberState() {
MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
MCSymbol *Label = getContext().CreateTempSymbol();
EmitLabel(Label);
- MCCFIInstruction Instruction(MCCFIInstruction::Remember, Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::RememberState, Label);
CurFrame->Instructions.push_back(Instruction);
}
@@ -396,7 +412,7 @@ void MCStreamer::EmitCFIRestoreState() {
MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
MCSymbol *Label = getContext().CreateTempSymbol();
EmitLabel(Label);
- MCCFIInstruction Instruction(MCCFIInstruction::Restore, Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::RestoreState, Label);
CurFrame->Instructions.push_back(Instruction);
}
@@ -409,6 +425,30 @@ void MCStreamer::EmitCFISameValue(int64_t Register) {
CurFrame->Instructions.push_back(Instruction);
}
+void MCStreamer::EmitCFIRestore(int64_t Register) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::Restore, Label, Register);
+ CurFrame->Instructions.push_back(Instruction);
+}
+
+void MCStreamer::EmitCFIEscape(StringRef Values) {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ MCSymbol *Label = getContext().CreateTempSymbol();
+ EmitLabel(Label);
+ MCCFIInstruction Instruction(MCCFIInstruction::Escape, Label, Values);
+ CurFrame->Instructions.push_back(Instruction);
+}
+
+void MCStreamer::EmitCFISignalFrame() {
+ EnsureValidFrame();
+ MCDwarfFrameInfo *CurFrame = getCurrentFrameInfo();
+ CurFrame->IsSignalFrame = true;
+}
+
void MCStreamer::setCurrentW64UnwindInfo(MCWin64EHUnwindInfo *Frame) {
W64UnwindInfos.push_back(Frame);
CurrentW64UnwindInfo = W64UnwindInfos.back();
@@ -559,6 +599,10 @@ void MCStreamer::EmitWin64EHEndProlog() {
EmitLabel(CurFrame->PrologEnd);
}
+void MCStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol) {
+ llvm_unreachable("This file format doesn't support this directive");
+}
+
void MCStreamer::EmitFnStart() {
errs() << "Not implemented yet\n";
abort();
@@ -631,3 +675,10 @@ void MCStreamer::EmitW64Tables() {
MCWin64EHUnwindEmitter::Emit(*this);
}
+
+void MCStreamer::Finish() {
+ if (!FrameInfos.empty() && !FrameInfos.back().End)
+ report_fatal_error("Unfinished frame!");
+
+ FinishImpl();
+}
diff --git a/contrib/llvm/lib/MC/MCSymbol.cpp b/contrib/llvm/lib/MC/MCSymbol.cpp
index c2fad16..e013e77 100644
--- a/contrib/llvm/lib/MC/MCSymbol.cpp
+++ b/contrib/llvm/lib/MC/MCSymbol.cpp
@@ -54,17 +54,14 @@ const MCSymbol &MCSymbol::AliasedSymbol() const {
void MCSymbol::setVariableValue(const MCExpr *Value) {
assert(!IsUsed && "Cannot set a variable that has already been used.");
assert(Value && "Invalid variable value!");
- assert((isUndefined() || (isAbsolute() && isa<MCConstantExpr>(Value))) &&
- "Invalid redefinition!");
this->Value = Value;
// Variables should always be marked as in the same "section" as the value.
const MCSection *Section = Value->FindAssociatedSection();
- if (Section) {
+ if (Section)
setSection(*Section);
- } else {
+ else
setUndefined();
- }
}
void MCSymbol::print(raw_ostream &OS) const {
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index a9219ad..8e4066c 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -8,13 +8,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCMachObjectWriter.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSymbol.h"
@@ -584,9 +584,16 @@ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
// requires the compiler to use .set to absolutize the differences between
// symbols which the compiler knows to be assembly time constants, so we
// don't need to worry about considering symbol differences fully resolved.
+ //
+ // If the file isn't using sub-sections-via-symbols, we can make the
+ // same assumptions about any symbol that we normally make about
+ // assembler locals.
if (!Asm.getBackend().hasReliableSymbolDifference()) {
- if (!SA.isTemporary() || !SA.isInSection() || &SecA != &SecB)
+ if (!SA.isInSection() || &SecA != &SecB ||
+ (!SA.isTemporary() &&
+ FB.getAtom() != Asm.getSymbolData(SA).getFragment()->getAtom() &&
+ Asm.getSubsectionsViaSymbols()))
return false;
return true;
}
@@ -628,7 +635,7 @@ IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
}
void MachObjectWriter::WriteObject(MCAssembler &Asm,
- const MCAsmLayout &Layout) {
+ const MCAsmLayout &Layout) {
unsigned NumSections = Asm.size();
// The section data starts after the header, the segment load command (and
@@ -731,7 +738,7 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm,
// Write the actual section data.
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
- Asm.WriteSectionData(it, Layout);
+ Asm.writeSectionData(it, Layout);
uint64_t Pad = getPaddingSize(it, Layout);
for (unsigned int i = 0; i < Pad; ++i)
diff --git a/contrib/llvm/lib/MC/SubtargetFeature.cpp b/contrib/llvm/lib/MC/SubtargetFeature.cpp
index 348cd4c..be41579 100644
--- a/contrib/llvm/lib/MC/SubtargetFeature.cpp
+++ b/contrib/llvm/lib/MC/SubtargetFeature.cpp
@@ -13,8 +13,8 @@
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/StringExtras.h"
#include <algorithm>
#include <cassert>
#include <cctype>
@@ -114,7 +114,7 @@ void SubtargetFeatures::AddFeature(const StringRef String,
// Don't add empty features
if (!String.empty()) {
// Convert to lowercase, prepend flag and add to vector
- Features.push_back(PrependFlag(LowercaseString(String), IsEnabled));
+ Features.push_back(PrependFlag(String.lower(), IsEnabled));
}
}
@@ -154,21 +154,19 @@ static void Help(const SubtargetFeatureKV *CPUTable, size_t CPUTableSize,
// Print the CPU table.
errs() << "Available CPUs for this target:\n\n";
for (size_t i = 0; i != CPUTableSize; i++)
- errs() << " " << CPUTable[i].Key
- << std::string(MaxCPULen - std::strlen(CPUTable[i].Key), ' ')
- << " - " << CPUTable[i].Desc << ".\n";
- errs() << "\n";
-
+ errs() << format(" %-*s - %s.\n",
+ MaxCPULen, CPUTable[i].Key, CPUTable[i].Desc);
+ errs() << '\n';
+
// Print the Feature table.
errs() << "Available features for this target:\n\n";
for (size_t i = 0; i != FeatTableSize; i++)
- errs() << " " << FeatTable[i].Key
- << std::string(MaxFeatLen - std::strlen(FeatTable[i].Key), ' ')
- << " - " << FeatTable[i].Desc << ".\n";
- errs() << "\n";
-
+ errs() << format(" %-*s - %s.\n",
+ MaxFeatLen, FeatTable[i].Key, FeatTable[i].Desc);
+ errs() << '\n';
+
errs() << "Use +feature to enable a feature, or -feature to disable it.\n"
- << "For example, llc -mcpu=mycpu -mattr=+feature1,-feature2\n";
+ "For example, llc -mcpu=mycpu -mattr=+feature1,-feature2\n";
std::exit(1);
}
diff --git a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
index b15e225..f706cac 100644
--- a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -22,8 +22,10 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/MC/MCWinCOFFObjectWriter.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -33,8 +35,6 @@
#include "llvm/Support/TimeValue.h"
-#include "../Target/X86/MCTargetDesc/X86FixupKinds.h"
-
#include <cstdio>
using namespace llvm;
@@ -128,8 +128,9 @@ public:
typedef DenseMap<MCSymbol const *, COFFSymbol *> symbol_map;
typedef DenseMap<MCSection const *, COFFSection *> section_map;
+ llvm::OwningPtr<MCWinCOFFObjectTargetWriter> TargetObjectWriter;
+
// Root level file contents.
- bool Is64Bit;
COFF::header Header;
sections Sections;
symbols Symbols;
@@ -139,7 +140,7 @@ public:
section_map SectionMap;
symbol_map SymbolMap;
- WinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
+ WinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW, raw_ostream &OS);
~WinCOFFObjectWriter();
COFFSymbol *createSymbol(StringRef Name);
@@ -281,6 +282,7 @@ StringTable::StringTable() {
// The string table data begins with the length of the entire string table
// including the length header. Allocate space for this header.
Data.resize(4);
+ update_length();
}
size_t StringTable::size() const {
@@ -313,13 +315,13 @@ size_t StringTable::insert(llvm::StringRef String) {
//------------------------------------------------------------------------------
// WinCOFFObjectWriter class implementation
-WinCOFFObjectWriter::WinCOFFObjectWriter(raw_ostream &OS, bool is64Bit)
+WinCOFFObjectWriter::WinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW,
+ raw_ostream &OS)
: MCObjectWriter(OS, true)
- , Is64Bit(is64Bit) {
+ , TargetObjectWriter(MOTW) {
memset(&Header, 0, sizeof(Header));
- Is64Bit ? Header.Machine = COFF::IMAGE_FILE_MACHINE_AMD64
- : Header.Machine = COFF::IMAGE_FILE_MACHINE_I386;
+ Header.Machine = TargetObjectWriter->getMachine();
}
WinCOFFObjectWriter::~WinCOFFObjectWriter() {
@@ -694,30 +696,13 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
if (CrossSection)
FixupKind = FK_PCRel_4;
- switch (FixupKind) {
- case FK_PCRel_4:
- case X86::reloc_riprel_4byte:
- case X86::reloc_riprel_4byte_movq_load:
- Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_REL32
- : COFF::IMAGE_REL_I386_REL32;
- // FIXME: Can anyone explain what this does other than adjust for the size
- // of the offset?
+ Reloc.Data.Type = TargetObjectWriter->getRelocType(FixupKind);
+
+ // FIXME: Can anyone explain what this does other than adjust for the size
+ // of the offset?
+ if (Reloc.Data.Type == COFF::IMAGE_REL_AMD64_REL32 ||
+ Reloc.Data.Type == COFF::IMAGE_REL_I386_REL32)
FixedValue += 4;
- break;
- case FK_Data_4:
- case X86::reloc_signed_4byte:
- Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32
- : COFF::IMAGE_REL_I386_DIR32;
- break;
- case FK_Data_8:
- if (Is64Bit)
- Reloc.Data.Type = COFF::IMAGE_REL_AMD64_ADDR64;
- else
- llvm_unreachable("unsupported relocation type");
- break;
- default:
- llvm_unreachable("unsupported relocation type");
- }
coff_section->Relocations.push_back(Reloc);
}
@@ -798,9 +783,22 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
}
if (Sec->Relocations.size() > 0) {
- Sec->Header.NumberOfRelocations = Sec->Relocations.size();
+ bool RelocationsOverflow = Sec->Relocations.size() >= 0xffff;
+
+ if (RelocationsOverflow) {
+ // Signal overflow by setting NumberOfSections to max value. Actual
+ // size is found in reloc #0. Microsoft tools understand this.
+ Sec->Header.NumberOfRelocations = 0xffff;
+ } else {
+ Sec->Header.NumberOfRelocations = Sec->Relocations.size();
+ }
Sec->Header.PointerToRelocations = offset;
+ if (RelocationsOverflow) {
+ // Reloc #0 will contain actual count, so make room for it.
+ offset += COFF::RelocationSize;
+ }
+
offset += COFF::RelocationSize * Sec->Relocations.size();
for (relocations::iterator cr = Sec->Relocations.begin(),
@@ -835,8 +833,12 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
MCAssembler::const_iterator j, je;
for (i = Sections.begin(), ie = Sections.end(); i != ie; i++)
- if ((*i)->Number != -1)
+ if ((*i)->Number != -1) {
+ if ((*i)->Relocations.size() >= 0xffff) {
+ (*i)->Header.Characteristics |= COFF::IMAGE_SCN_LNK_NRELOC_OVFL;
+ }
WriteSectionHeader((*i)->Header);
+ }
for (i = Sections.begin(), ie = Sections.end(),
j = Asm.begin(), je = Asm.end();
@@ -849,13 +851,23 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
assert(OS.tell() == (*i)->Header.PointerToRawData &&
"Section::PointerToRawData is insane!");
- Asm.WriteSectionData(j, Layout);
+ Asm.writeSectionData(j, Layout);
}
if ((*i)->Relocations.size() > 0) {
assert(OS.tell() == (*i)->Header.PointerToRelocations &&
"Section::PointerToRelocations is insane!");
+ if ((*i)->Relocations.size() >= 0xffff) {
+ // In case of overflow, write actual relocation count as first
+ // relocation. Including the synthetic reloc itself (+ 1).
+ COFF::relocation r;
+ r.VirtualAddress = (*i)->Relocations.size() + 1;
+ r.SymbolTableIndex = 0;
+ r.Type = 0;
+ WriteRelocation(r);
+ }
+
for (relocations::const_iterator k = (*i)->Relocations.begin(),
ke = (*i)->Relocations.end();
k != ke; k++) {
@@ -877,11 +889,16 @@ void WinCOFFObjectWriter::WriteObject(MCAssembler &Asm,
OS.write((char const *)&Strings.Data.front(), Strings.Data.size());
}
+MCWinCOFFObjectTargetWriter::MCWinCOFFObjectTargetWriter(unsigned Machine_) :
+ Machine(Machine_) {
+}
+
//------------------------------------------------------------------------------
// WinCOFFObjectWriter factory function
namespace llvm {
- MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit) {
- return new WinCOFFObjectWriter(OS, is64Bit);
+ MCObjectWriter *createWinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW,
+ raw_ostream &OS) {
+ return new WinCOFFObjectWriter(MOTW, OS);
}
}
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
index 7409daf..67dc649 100644
--- a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -25,13 +25,13 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCWin64EH.h"
#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+
using namespace llvm;
namespace {
@@ -60,6 +60,7 @@ public:
virtual void EmitCOFFSymbolStorageClass(int StorageClass);
virtual void EmitCOFFSymbolType(int Type);
virtual void EndCOFFSymbolDef();
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
@@ -77,7 +78,7 @@ public:
virtual void EmitFileDirective(StringRef Filename);
virtual void EmitInstruction(const MCInst &Instruction);
virtual void EmitWin64EHHandlerData();
- virtual void Finish();
+ virtual void FinishImpl();
private:
virtual void EmitInstToFragment(const MCInst &Inst) {
@@ -251,7 +252,6 @@ void WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
default:
llvm_unreachable("unsupported attribute");
- break;
}
}
@@ -293,6 +293,16 @@ void WinCOFFStreamer::EndCOFFSymbolDef() {
CurSymbol = NULL;
}
+void WinCOFFStreamer::EmitCOFFSecRel32(MCSymbol const *Symbol)
+{
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ DF->addFixup(MCFixup::Create(DF->getContents().size(),
+ MCSymbolRefExpr::Create (Symbol, getContext ()),
+ FK_SecRel_4));
+ DF->getContents().resize(DF->getContents().size() + 4, 0);
+}
+
void WinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
llvm_unreachable("not implemented");
}
@@ -389,9 +399,9 @@ void WinCOFFStreamer::EmitWin64EHHandlerData() {
MCWin64EHUnwindEmitter::EmitUnwindInfo(*this, getCurrentW64UnwindInfo());
}
-void WinCOFFStreamer::Finish() {
+void WinCOFFStreamer::FinishImpl() {
EmitW64Tables();
- MCObjectStreamer::Finish();
+ MCObjectStreamer::FinishImpl();
}
namespace llvm
diff --git a/contrib/llvm/lib/Object/Archive.cpp b/contrib/llvm/lib/Object/Archive.cpp
index e2eaff5..c5f15ba 100644
--- a/contrib/llvm/lib/Object/Archive.cpp
+++ b/contrib/llvm/lib/Object/Archive.cpp
@@ -13,14 +13,15 @@
#include "llvm/Object/Archive.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/MemoryBuffer.h"
using namespace llvm;
using namespace object;
-namespace {
-const StringRef Magic = "!<arch>\n";
+static const char *Magic = "!<arch>\n";
+namespace {
struct ArchiveMemberHeader {
char Name[16];
char LastModified[12];
@@ -32,7 +33,11 @@ struct ArchiveMemberHeader {
///! Get the name without looking up long names.
StringRef getName() const {
- char EndCond = Name[0] == '/' ? ' ' : '/';
+ char EndCond;
+ if (Name[0] == '/' || Name[0] == '#')
+ EndCond = ' ';
+ else
+ EndCond = '/';
StringRef::size_type end = StringRef(Name, sizeof(Name)).find(EndCond);
if (end == StringRef::npos)
end = sizeof(Name);
@@ -47,12 +52,30 @@ struct ArchiveMemberHeader {
return ret.getZExtValue();
}
};
+}
-const ArchiveMemberHeader *ToHeader(const char *base) {
+static const ArchiveMemberHeader *ToHeader(const char *base) {
return reinterpret_cast<const ArchiveMemberHeader *>(base);
}
+
+
+static bool isInternalMember(const ArchiveMemberHeader &amh) {
+ const char *internals[] = {
+ "/",
+ "//",
+ "#_LLVM_SYM_TAB_#"
+ };
+
+ StringRef name = amh.getName();
+ for (std::size_t i = 0; i < sizeof(internals) / sizeof(*internals); ++i) {
+ if (name == internals[i])
+ return true;
+ }
+ return false;
}
+void Archive::anchor() { }
+
Archive::Child Archive::Child::getNext() const {
size_t SpaceToSkip = sizeof(ArchiveMemberHeader) +
ToHeader(Data.data())->getSize();
@@ -101,6 +124,11 @@ error_code Archive::Child::getName(StringRef &Result) const {
return object_error::parse_failed;
Result = addr;
return object_error::success;
+ } else if (name.startswith("#1/")) {
+ APInt name_size;
+ name.substr(3).getAsInteger(10, name_size);
+ Result = Data.substr(0, name_size.getZExtValue());
+ return object_error::success;
}
// It's a simple name.
if (name[name.size() - 1] == '/')
@@ -111,14 +139,27 @@ error_code Archive::Child::getName(StringRef &Result) const {
}
uint64_t Archive::Child::getSize() const {
- return ToHeader(Data.data())->getSize();
+ uint64_t size = ToHeader(Data.data())->getSize();
+ // Don't include attached name.
+ StringRef name = ToHeader(Data.data())->getName();
+ if (name.startswith("#1/")) {
+ APInt name_size;
+ name.substr(3).getAsInteger(10, name_size);
+ size -= name_size.getZExtValue();
+ }
+ return size;
}
MemoryBuffer *Archive::Child::getBuffer() const {
StringRef name;
if (getName(name)) return NULL;
- return MemoryBuffer::getMemBuffer(Data.substr(sizeof(ArchiveMemberHeader),
- getSize()),
+ int size = sizeof(ArchiveMemberHeader);
+ if (name.startswith("#1/")) {
+ APInt name_size;
+ name.substr(3).getAsInteger(10, name_size);
+ size += name_size.getZExtValue();
+ }
+ return MemoryBuffer::getMemBuffer(Data.substr(size, getSize()),
name,
false);
}
@@ -133,8 +174,7 @@ error_code Archive::Child::getAsBinary(OwningPtr<Binary> &Result) const {
}
Archive::Archive(MemoryBuffer *source, error_code &ec)
- : Binary(Binary::isArchive, source)
- , StringTable(Child(this, StringRef(0, 0))) {
+ : Binary(Binary::ID_Archive, source) {
// Check for sufficient magic.
if (!source || source->getBufferSize()
< (8 + sizeof(ArchiveMemberHeader) + 2) // Smallest archive.
@@ -143,30 +183,90 @@ Archive::Archive(MemoryBuffer *source, error_code &ec)
return;
}
- // Get the string table. It's the 3rd member.
- child_iterator StrTable = begin_children();
+ // Get the special members.
+ child_iterator i = begin_children(false);
child_iterator e = end_children();
- for (int i = 0; StrTable != e && i < 2; ++StrTable, ++i) {}
- // Check to see if there were 3 members, or the 3rd member wasn't named "//".
- StringRef name;
- if (StrTable != e && !StrTable->getName(name) && name == "//")
- StringTable = StrTable;
+ if (i != e) ++i; // Nobody cares about the first member.
+ if (i != e) {
+ SymbolTable = i;
+ ++i;
+ }
+ if (i != e) {
+ StringTable = i;
+ }
ec = object_error::success;
}
-Archive::child_iterator Archive::begin_children() const {
- const char *Loc = Data->getBufferStart() + Magic.size();
+Archive::child_iterator Archive::begin_children(bool skip_internal) const {
+ const char *Loc = Data->getBufferStart() + strlen(Magic);
size_t Size = sizeof(ArchiveMemberHeader) +
ToHeader(Loc)->getSize();
- return Child(this, StringRef(Loc, Size));
+ Child c(this, StringRef(Loc, Size));
+ // Skip internals at the beginning of an archive.
+ if (skip_internal && isInternalMember(*ToHeader(Loc)))
+ return c.getNext();
+ return c;
}
Archive::child_iterator Archive::end_children() const {
return Child(this, StringRef(0, 0));
}
-namespace llvm {
+error_code Archive::Symbol::getName(StringRef &Result) const {
+ Result =
+ StringRef(Parent->SymbolTable->getBuffer()->getBufferStart() + StringIndex);
+ return object_error::success;
+}
+
+error_code Archive::Symbol::getMember(child_iterator &Result) const {
+ const char *buf = Parent->SymbolTable->getBuffer()->getBufferStart();
+ uint32_t member_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
+ const char *offsets = buf + 4;
+ buf += 4 + (member_count * 4); // Skip offsets.
+ const char *indicies = buf + 4;
+
+ uint16_t offsetindex =
+ *(reinterpret_cast<const support::ulittle16_t*>(indicies)
+ + SymbolIndex);
+
+ uint32_t offset = *(reinterpret_cast<const support::ulittle32_t*>(offsets)
+ + (offsetindex - 1));
-} // end namespace llvm
+ const char *Loc = Parent->getData().begin() + offset;
+ size_t Size = sizeof(ArchiveMemberHeader) +
+ ToHeader(Loc)->getSize();
+ Result = Child(Parent, StringRef(Loc, Size));
+
+ return object_error::success;
+}
+
+Archive::Symbol Archive::Symbol::getNext() const {
+ Symbol t(*this);
+ // Go to one past next null.
+ t.StringIndex =
+ Parent->SymbolTable->getBuffer()->getBuffer().find('\0', t.StringIndex) + 1;
+ ++t.SymbolIndex;
+ return t;
+}
+
+Archive::symbol_iterator Archive::begin_symbols() const {
+ const char *buf = SymbolTable->getBuffer()->getBufferStart();
+ uint32_t member_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
+ buf += 4 + (member_count * 4); // Skip offsets.
+ uint32_t symbol_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
+ buf += 4 + (symbol_count * 2); // Skip indices.
+ uint32_t string_start_offset =
+ buf - SymbolTable->getBuffer()->getBufferStart();
+ return symbol_iterator(Symbol(this, 0, string_start_offset));
+}
+
+Archive::symbol_iterator Archive::end_symbols() const {
+ const char *buf = SymbolTable->getBuffer()->getBufferStart();
+ uint32_t member_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
+ buf += 4 + (member_count * 4); // Skip offsets.
+ uint32_t symbol_count = *reinterpret_cast<const support::ulittle32_t*>(buf);
+ return symbol_iterator(
+ Symbol(this, symbol_count, 0));
+}
diff --git a/contrib/llvm/lib/Object/COFFObjectFile.cpp b/contrib/llvm/lib/Object/COFFObjectFile.cpp
index 750c34d..bd27a56 100644
--- a/contrib/llvm/lib/Object/COFFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/COFFObjectFile.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Object/COFF.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -98,24 +99,10 @@ error_code COFFObjectFile::getSymbolNext(DataRefImpl Symb,
error_code COFFObjectFile::getSymbolName(DataRefImpl Symb,
StringRef &Result) const {
const coff_symbol *symb = toSymb(Symb);
- // Check for string table entry. First 4 bytes are 0.
- if (symb->Name.Offset.Zeroes == 0) {
- uint32_t Offset = symb->Name.Offset.Offset;
- if (error_code ec = getString(Offset, Result))
- return ec;
- return object_error::success;
- }
-
- if (symb->Name.ShortName[7] == 0)
- // Null terminated, let ::strlen figure out the length.
- Result = StringRef(symb->Name.ShortName);
- else
- // Not null terminated, use all 8 bytes.
- Result = StringRef(symb->Name.ShortName, 8);
- return object_error::success;
+ return getSymbolName(symb, Result);
}
-error_code COFFObjectFile::getSymbolOffset(DataRefImpl Symb,
+error_code COFFObjectFile::getSymbolFileOffset(DataRefImpl Symb,
uint64_t &Result) const {
const coff_symbol *symb = toSymb(Symb);
const coff_section *Section = NULL;
@@ -127,7 +114,7 @@ error_code COFFObjectFile::getSymbolOffset(DataRefImpl Symb,
if (Type == 'U' || Type == 'w')
Result = UnknownAddressOrSize;
else if (Section)
- Result = Section->VirtualAddress + symb->Value;
+ Result = Section->PointerToRawData + symb->Value;
else
Result = symb->Value;
return object_error::success;
@@ -145,23 +132,21 @@ error_code COFFObjectFile::getSymbolAddress(DataRefImpl Symb,
if (Type == 'U' || Type == 'w')
Result = UnknownAddressOrSize;
else if (Section)
- Result = reinterpret_cast<uintptr_t>(base() +
- Section->PointerToRawData +
- symb->Value);
+ Result = Section->VirtualAddress + symb->Value;
else
- Result = reinterpret_cast<uintptr_t>(base() + symb->Value);
+ Result = symb->Value;
return object_error::success;
}
error_code COFFObjectFile::getSymbolType(DataRefImpl Symb,
- SymbolRef::SymbolType &Result) const {
+ SymbolRef::Type &Result) const {
const coff_symbol *symb = toSymb(Symb);
Result = SymbolRef::ST_Other;
if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED) {
- Result = SymbolRef::ST_External;
+ Result = SymbolRef::ST_Unknown;
} else {
- if (symb->Type.ComplexType == COFF::IMAGE_SYM_DTYPE_FUNCTION) {
+ if (symb->getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION) {
Result = SymbolRef::ST_Function;
} else {
char Type;
@@ -175,10 +160,27 @@ error_code COFFObjectFile::getSymbolType(DataRefImpl Symb,
return object_error::success;
}
-error_code COFFObjectFile::isSymbolGlobal(DataRefImpl Symb,
- bool &Result) const {
+error_code COFFObjectFile::getSymbolFlags(DataRefImpl Symb,
+ uint32_t &Result) const {
const coff_symbol *symb = toSymb(Symb);
- Result = (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL);
+ Result = SymbolRef::SF_None;
+
+ // TODO: Correctly set SF_FormatSpecific, SF_ThreadLocal, SF_Common
+
+ if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
+ symb->SectionNumber == COFF::IMAGE_SYM_UNDEFINED)
+ Result |= SymbolRef::SF_Undefined;
+
+ // TODO: This are certainly too restrictive.
+ if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL)
+ Result |= SymbolRef::SF_Global;
+
+ if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
+ Result |= SymbolRef::SF_Weak;
+
+ if (symb->SectionNumber == COFF::IMAGE_SYM_ABSOLUTE)
+ Result |= SymbolRef::SF_Absolute;
+
return object_error::success;
}
@@ -233,7 +235,9 @@ error_code COFFObjectFile::getSymbolNMTypeChar(DataRefImpl Symb,
if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL) {
Result = 'w';
return object_error::success; // Don't do ::toupper.
- } else
+ } else if (symb->Value != 0) // Check for common symbols.
+ ret = 'c';
+ else
ret = 'u';
break;
case COFF::IMAGE_SYM_ABSOLUTE:
@@ -269,9 +273,18 @@ error_code COFFObjectFile::getSymbolNMTypeChar(DataRefImpl Symb,
return object_error::success;
}
-error_code COFFObjectFile::isSymbolInternal(DataRefImpl Symb,
- bool &Result) const {
- Result = false;
+error_code COFFObjectFile::getSymbolSection(DataRefImpl Symb,
+ section_iterator &Result) const {
+ const coff_symbol *symb = toSymb(Symb);
+ if (symb->SectionNumber <= COFF::IMAGE_SYM_UNDEFINED)
+ Result = end_sections();
+ else {
+ const coff_section *sec = 0;
+ if (error_code ec = getSection(symb->SectionNumber, sec)) return ec;
+ DataRefImpl Sec;
+ Sec.p = reinterpret_cast<uintptr_t>(sec);
+ Result = section_iterator(SectionRef(Sec, this));
+ }
return object_error::success;
}
@@ -287,24 +300,7 @@ error_code COFFObjectFile::getSectionNext(DataRefImpl Sec,
error_code COFFObjectFile::getSectionName(DataRefImpl Sec,
StringRef &Result) const {
const coff_section *sec = toSec(Sec);
- StringRef name;
- if (sec->Name[7] == 0)
- // Null terminated, let ::strlen figure out the length.
- name = sec->Name;
- else
- // Not null terminated, use all 8 bytes.
- name = StringRef(sec->Name, 8);
-
- // Check for string table entry. First byte is '/'.
- if (name[0] == '/') {
- uint32_t Offset;
- name.substr(1).getAsInteger(10, Offset);
- if (error_code ec = getString(Offset, name))
- return ec;
- }
-
- Result = name;
- return object_error::success;
+ return getSectionName(sec, Result);
}
error_code COFFObjectFile::getSectionAddress(DataRefImpl Sec,
@@ -324,16 +320,10 @@ error_code COFFObjectFile::getSectionSize(DataRefImpl Sec,
error_code COFFObjectFile::getSectionContents(DataRefImpl Sec,
StringRef &Result) const {
const coff_section *sec = toSec(Sec);
- // The only thing that we need to verify is that the contents is contained
- // within the file bounds. We don't need to make sure it doesn't cover other
- // data, as there's nothing that says that is not allowed.
- uintptr_t con_start = uintptr_t(base()) + sec->PointerToRawData;
- uintptr_t con_end = con_start + sec->SizeOfRawData;
- if (con_end >= uintptr_t(Data->getBufferEnd()))
- return object_error::parse_failed;
- Result = StringRef(reinterpret_cast<const char*>(con_start),
- sec->SizeOfRawData);
- return object_error::success;
+ ArrayRef<uint8_t> Res;
+ error_code EC = getSectionContents(sec, Res);
+ Result = StringRef(reinterpret_cast<const char*>(Res.data()), Res.size());
+ return EC;
}
error_code COFFObjectFile::getSectionAlignment(DataRefImpl Sec,
@@ -366,12 +356,33 @@ error_code COFFObjectFile::isSectionBSS(DataRefImpl Sec,
return object_error::success;
}
+error_code COFFObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Result) const {
+ // FIXME: Unimplemented
+ Result = true;
+ return object_error::success;
+}
+
+error_code COFFObjectFile::isSectionVirtual(DataRefImpl Sec,
+ bool &Result) const {
+ const coff_section *sec = toSec(Sec);
+ Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+ return object_error::success;
+}
+
+error_code COFFObjectFile::isSectionZeroInit(DataRefImpl Sec,
+ bool &Result) const {
+ // FIXME: Unimplemented
+ Result = false;
+ return object_error::success;
+}
+
error_code COFFObjectFile::sectionContainsSymbol(DataRefImpl Sec,
DataRefImpl Symb,
bool &Result) const {
const coff_section *sec = toSec(Sec);
const coff_symbol *symb = toSymb(Symb);
- const coff_section *symb_sec;
+ const coff_section *symb_sec = 0;
if (error_code ec = getSection(symb->SectionNumber, symb_sec)) return ec;
if (symb_sec == sec)
Result = true;
@@ -383,7 +394,6 @@ error_code COFFObjectFile::sectionContainsSymbol(DataRefImpl Sec,
relocation_iterator COFFObjectFile::getSectionRelBegin(DataRefImpl Sec) const {
const coff_section *sec = toSec(Sec);
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(ret));
if (sec->NumberOfRelocations == 0)
ret.p = 0;
else
@@ -395,7 +405,6 @@ relocation_iterator COFFObjectFile::getSectionRelBegin(DataRefImpl Sec) const {
relocation_iterator COFFObjectFile::getSectionRelEnd(DataRefImpl Sec) const {
const coff_section *sec = toSec(Sec);
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(ret));
if (sec->NumberOfRelocations == 0)
ret.p = 0;
else
@@ -408,7 +417,12 @@ relocation_iterator COFFObjectFile::getSectionRelEnd(DataRefImpl Sec) const {
}
COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
- : ObjectFile(Binary::isCOFF, Object, ec) {
+ : ObjectFile(Binary::ID_COFF, Object, ec)
+ , Header(0)
+ , SectionTable(0)
+ , SymbolTable(0)
+ , StringTable(0)
+ , StringTableSize(0) {
// Check that we at least have enough room for a header.
if (!checkSize(Data, ec, sizeof(coff_file_header))) return;
@@ -421,7 +435,7 @@ COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
// PE/COFF, seek through MS-DOS compatibility stub and 4-byte
// PE signature to find 'normal' COFF header.
if (!checkSize(Data, ec, 0x3c + 8)) return;
- HeaderStart += *reinterpret_cast<const ulittle32_t *>(base() + 0x3c);
+ HeaderStart = *reinterpret_cast<const ulittle16_t *>(base() + 0x3c);
// Check the PE header. ("PE\0\0")
if (std::memcmp(base() + HeaderStart, "PE\0\0", 4) != 0) {
ec = object_error::parse_failed;
@@ -443,28 +457,30 @@ COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
Header->NumberOfSections * sizeof(coff_section)))
return;
- SymbolTable =
- reinterpret_cast<const coff_symbol *>(base()
- + Header->PointerToSymbolTable);
- if (!checkAddr(Data, ec, uintptr_t(SymbolTable),
- Header->NumberOfSymbols * sizeof(coff_symbol)))
- return;
+ if (Header->PointerToSymbolTable != 0) {
+ SymbolTable =
+ reinterpret_cast<const coff_symbol *>(base()
+ + Header->PointerToSymbolTable);
+ if (!checkAddr(Data, ec, uintptr_t(SymbolTable),
+ Header->NumberOfSymbols * sizeof(coff_symbol)))
+ return;
- // Find string table.
- StringTable = reinterpret_cast<const char *>(base())
- + Header->PointerToSymbolTable
- + Header->NumberOfSymbols * sizeof(coff_symbol);
- if (!checkAddr(Data, ec, uintptr_t(StringTable), sizeof(ulittle32_t)))
- return;
+ // Find string table.
+ StringTable = reinterpret_cast<const char *>(base())
+ + Header->PointerToSymbolTable
+ + Header->NumberOfSymbols * sizeof(coff_symbol);
+ if (!checkAddr(Data, ec, uintptr_t(StringTable), sizeof(ulittle32_t)))
+ return;
- StringTableSize = *reinterpret_cast<const ulittle32_t *>(StringTable);
- if (!checkAddr(Data, ec, uintptr_t(StringTable), StringTableSize))
- return;
- // Check that the string table is null terminated if has any in it.
- if (StringTableSize < 4
- || (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0)) {
- ec = object_error::parse_failed;
- return;
+ StringTableSize = *reinterpret_cast<const ulittle32_t *>(StringTable);
+ if (!checkAddr(Data, ec, uintptr_t(StringTable), StringTableSize))
+ return;
+ // Check that the string table is null terminated if has any in it.
+ if (StringTableSize < 4
+ || (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0)) {
+ ec = object_error::parse_failed;
+ return;
+ }
}
ec = object_error::success;
@@ -472,7 +488,6 @@ COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
symbol_iterator COFFObjectFile::begin_symbols() const {
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(DataRefImpl));
ret.p = reinterpret_cast<intptr_t>(SymbolTable);
return symbol_iterator(SymbolRef(ret, this));
}
@@ -480,21 +495,44 @@ symbol_iterator COFFObjectFile::begin_symbols() const {
symbol_iterator COFFObjectFile::end_symbols() const {
// The symbol table ends where the string table begins.
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(DataRefImpl));
ret.p = reinterpret_cast<intptr_t>(StringTable);
return symbol_iterator(SymbolRef(ret, this));
}
+symbol_iterator COFFObjectFile::begin_dynamic_symbols() const {
+ // TODO: implement
+ report_fatal_error("Dynamic symbols unimplemented in COFFObjectFile");
+}
+
+symbol_iterator COFFObjectFile::end_dynamic_symbols() const {
+ // TODO: implement
+ report_fatal_error("Dynamic symbols unimplemented in COFFObjectFile");
+}
+
+library_iterator COFFObjectFile::begin_libraries_needed() const {
+ // TODO: implement
+ report_fatal_error("Libraries needed unimplemented in COFFObjectFile");
+}
+
+library_iterator COFFObjectFile::end_libraries_needed() const {
+ // TODO: implement
+ report_fatal_error("Libraries needed unimplemented in COFFObjectFile");
+}
+
+StringRef COFFObjectFile::getLoadName() const {
+ // COFF does not have this field.
+ return "";
+}
+
+
section_iterator COFFObjectFile::begin_sections() const {
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(DataRefImpl));
ret.p = reinterpret_cast<intptr_t>(SectionTable);
return section_iterator(SectionRef(ret, this));
}
section_iterator COFFObjectFile::end_sections() const {
DataRefImpl ret;
- std::memset(&ret, 0, sizeof(DataRefImpl));
ret.p = reinterpret_cast<intptr_t>(SectionTable + Header->NumberOfSections);
return section_iterator(SectionRef(ret, this));
}
@@ -525,6 +563,11 @@ unsigned COFFObjectFile::getArch() const {
}
}
+error_code COFFObjectFile::getHeader(const coff_file_header *&Res) const {
+ Res = Header;
+ return object_error::success;
+}
+
error_code COFFObjectFile::getSection(int32_t index,
const coff_section *&Result) const {
// Check for special index values.
@@ -553,13 +596,69 @@ error_code COFFObjectFile::getString(uint32_t offset,
error_code COFFObjectFile::getSymbol(uint32_t index,
const coff_symbol *&Result) const {
- if (index > 0 && index < Header->NumberOfSymbols)
+ if (index < Header->NumberOfSymbols)
Result = SymbolTable + index;
else
return object_error::parse_failed;
return object_error::success;
}
+error_code COFFObjectFile::getSymbolName(const coff_symbol *symbol,
+ StringRef &Res) const {
+ // Check for string table entry. First 4 bytes are 0.
+ if (symbol->Name.Offset.Zeroes == 0) {
+ uint32_t Offset = symbol->Name.Offset.Offset;
+ if (error_code ec = getString(Offset, Res))
+ return ec;
+ return object_error::success;
+ }
+
+ if (symbol->Name.ShortName[7] == 0)
+ // Null terminated, let ::strlen figure out the length.
+ Res = StringRef(symbol->Name.ShortName);
+ else
+ // Not null terminated, use all 8 bytes.
+ Res = StringRef(symbol->Name.ShortName, 8);
+ return object_error::success;
+}
+
+error_code COFFObjectFile::getSectionName(const coff_section *Sec,
+ StringRef &Res) const {
+ StringRef Name;
+ if (Sec->Name[7] == 0)
+ // Null terminated, let ::strlen figure out the length.
+ Name = Sec->Name;
+ else
+ // Not null terminated, use all 8 bytes.
+ Name = StringRef(Sec->Name, 8);
+
+ // Check for string table entry. First byte is '/'.
+ if (Name[0] == '/') {
+ uint32_t Offset;
+ if (Name.substr(1).getAsInteger(10, Offset))
+ return object_error::parse_failed;
+ if (error_code ec = getString(Offset, Name))
+ return ec;
+ }
+
+ Res = Name;
+ return object_error::success;
+}
+
+error_code COFFObjectFile::getSectionContents(const coff_section *Sec,
+ ArrayRef<uint8_t> &Res) const {
+ // The only thing that we need to verify is that the contents is contained
+ // within the file bounds. We don't need to make sure it doesn't cover other
+ // data, as there's nothing that says that is not allowed.
+ uintptr_t ConStart = uintptr_t(base()) + Sec->PointerToRawData;
+ uintptr_t ConEnd = ConStart + Sec->SizeOfRawData;
+ if (ConEnd > uintptr_t(Data->getBufferEnd()))
+ return object_error::parse_failed;
+ Res = ArrayRef<uint8_t>(reinterpret_cast<const unsigned char*>(ConStart),
+ Sec->SizeOfRawData);
+ return object_error::success;
+}
+
const coff_relocation *COFFObjectFile::toRel(DataRefImpl Rel) const {
return reinterpret_cast<const coff_relocation*>(Rel.p);
}
@@ -575,6 +674,11 @@ error_code COFFObjectFile::getRelocationAddress(DataRefImpl Rel,
Res = toRel(Rel)->VirtualAddress;
return object_error::success;
}
+error_code COFFObjectFile::getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const {
+ Res = toRel(Rel)->VirtualAddress;
+ return object_error::success;
+}
error_code COFFObjectFile::getRelocationSymbol(DataRefImpl Rel,
SymbolRef &Res) const {
const coff_relocation* R = toRel(Rel);
@@ -584,7 +688,7 @@ error_code COFFObjectFile::getRelocationSymbol(DataRefImpl Rel,
return object_error::success;
}
error_code COFFObjectFile::getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const {
+ uint64_t &Res) const {
const coff_relocation* R = toRel(Rel);
Res = R->Type;
return object_error::success;
@@ -658,7 +762,6 @@ error_code COFFObjectFile::getRelocationValueString(DataRefImpl Rel,
const coff_symbol *symb = 0;
if (error_code ec = getSymbol(reloc->SymbolTableIndex, symb)) return ec;
DataRefImpl sym;
- ::memset(&sym, 0, sizeof(sym));
sym.p = reinterpret_cast<uintptr_t>(symb);
StringRef symname;
if (error_code ec = getSymbolName(sym, symname)) return ec;
@@ -666,6 +769,16 @@ error_code COFFObjectFile::getRelocationValueString(DataRefImpl Rel,
return object_error::success;
}
+error_code COFFObjectFile::getLibraryNext(DataRefImpl LibData,
+ LibraryRef &Result) const {
+ report_fatal_error("getLibraryNext not implemented in COFFObjectFile");
+}
+
+error_code COFFObjectFile::getLibraryPath(DataRefImpl LibData,
+ StringRef &Result) const {
+ report_fatal_error("getLibraryPath not implemented in COFFObjectFile");
+}
+
namespace llvm {
ObjectFile *ObjectFile::createCOFFObjectFile(MemoryBuffer *Object) {
diff --git a/contrib/llvm/lib/Object/ELFObjectFile.cpp b/contrib/llvm/lib/Object/ELFObjectFile.cpp
index 257d08c..ab5f810 100644
--- a/contrib/llvm/lib/Object/ELFObjectFile.cpp
+++ b/contrib/llvm/lib/Object/ELFObjectFile.cpp
@@ -7,1405 +7,44 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the ELFObjectFile class.
+// Part of the ELFObjectFile class implementation.
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Object/ObjectFile.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <limits>
-#include <utility>
+#include "llvm/Object/ELF.h"
-using namespace llvm;
-using namespace object;
-
-// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
-namespace {
-template<support::endianness target_endianness>
-struct ELFDataTypeTypedefHelperCommon {
- typedef support::detail::packed_endian_specific_integral
- <uint16_t, target_endianness, support::aligned> Elf_Half;
- typedef support::detail::packed_endian_specific_integral
- <uint32_t, target_endianness, support::aligned> Elf_Word;
- typedef support::detail::packed_endian_specific_integral
- <int32_t, target_endianness, support::aligned> Elf_Sword;
- typedef support::detail::packed_endian_specific_integral
- <uint64_t, target_endianness, support::aligned> Elf_Xword;
- typedef support::detail::packed_endian_specific_integral
- <int64_t, target_endianness, support::aligned> Elf_Sxword;
-};
-}
-
-namespace {
-template<support::endianness target_endianness, bool is64Bits>
-struct ELFDataTypeTypedefHelper;
-
-/// ELF 32bit types.
-template<support::endianness target_endianness>
-struct ELFDataTypeTypedefHelper<target_endianness, false>
- : ELFDataTypeTypedefHelperCommon<target_endianness> {
- typedef support::detail::packed_endian_specific_integral
- <uint32_t, target_endianness, support::aligned> Elf_Addr;
- typedef support::detail::packed_endian_specific_integral
- <uint32_t, target_endianness, support::aligned> Elf_Off;
-};
-
-/// ELF 64bit types.
-template<support::endianness target_endianness>
-struct ELFDataTypeTypedefHelper<target_endianness, true>
- : ELFDataTypeTypedefHelperCommon<target_endianness>{
- typedef support::detail::packed_endian_specific_integral
- <uint64_t, target_endianness, support::aligned> Elf_Addr;
- typedef support::detail::packed_endian_specific_integral
- <uint64_t, target_endianness, support::aligned> Elf_Off;
-};
-}
-
-// I really don't like doing this, but the alternative is copypasta.
-#define LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Addr Elf_Addr; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Off Elf_Off; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Half Elf_Half; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Word Elf_Word; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sword Elf_Sword; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Xword Elf_Xword; \
-typedef typename \
- ELFDataTypeTypedefHelper<target_endianness, is64Bits>::Elf_Sxword Elf_Sxword;
-
- // Section header.
-namespace {
-template<support::endianness target_endianness, bool is64Bits>
-struct Elf_Shdr_Base;
-
-template<support::endianness target_endianness>
-struct Elf_Shdr_Base<target_endianness, false> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, false)
- Elf_Word sh_name; // Section name (index into string table)
- Elf_Word sh_type; // Section type (SHT_*)
- Elf_Word sh_flags; // Section flags (SHF_*)
- Elf_Addr sh_addr; // Address where section is to be loaded
- Elf_Off sh_offset; // File offset of section data, in bytes
- Elf_Word sh_size; // Size of section, in bytes
- Elf_Word sh_link; // Section type-specific header table index link
- Elf_Word sh_info; // Section type-specific extra information
- Elf_Word sh_addralign;// Section address alignment
- Elf_Word sh_entsize; // Size of records contained within the section
-};
-
-template<support::endianness target_endianness>
-struct Elf_Shdr_Base<target_endianness, true> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, true)
- Elf_Word sh_name; // Section name (index into string table)
- Elf_Word sh_type; // Section type (SHT_*)
- Elf_Xword sh_flags; // Section flags (SHF_*)
- Elf_Addr sh_addr; // Address where section is to be loaded
- Elf_Off sh_offset; // File offset of section data, in bytes
- Elf_Xword sh_size; // Size of section, in bytes
- Elf_Word sh_link; // Section type-specific header table index link
- Elf_Word sh_info; // Section type-specific extra information
- Elf_Xword sh_addralign;// Section address alignment
- Elf_Xword sh_entsize; // Size of records contained within the section
-};
-
-template<support::endianness target_endianness, bool is64Bits>
-struct Elf_Shdr_Impl : Elf_Shdr_Base<target_endianness, is64Bits> {
- using Elf_Shdr_Base<target_endianness, is64Bits>::sh_entsize;
- using Elf_Shdr_Base<target_endianness, is64Bits>::sh_size;
-
- /// @brief Get the number of entities this section contains if it has any.
- unsigned getEntityCount() const {
- if (sh_entsize == 0)
- return 0;
- return sh_size / sh_entsize;
- }
-};
-}
-
-namespace {
-template<support::endianness target_endianness, bool is64Bits>
-struct Elf_Sym_Base;
-
-template<support::endianness target_endianness>
-struct Elf_Sym_Base<target_endianness, false> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, false)
- Elf_Word st_name; // Symbol name (index into string table)
- Elf_Addr st_value; // Value or address associated with the symbol
- Elf_Word st_size; // Size of the symbol
- unsigned char st_info; // Symbol's type and binding attributes
- unsigned char st_other; // Must be zero; reserved
- Elf_Half st_shndx; // Which section (header table index) it's defined in
-};
-
-template<support::endianness target_endianness>
-struct Elf_Sym_Base<target_endianness, true> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, true)
- Elf_Word st_name; // Symbol name (index into string table)
- unsigned char st_info; // Symbol's type and binding attributes
- unsigned char st_other; // Must be zero; reserved
- Elf_Half st_shndx; // Which section (header table index) it's defined in
- Elf_Addr st_value; // Value or address associated with the symbol
- Elf_Xword st_size; // Size of the symbol
-};
-
-template<support::endianness target_endianness, bool is64Bits>
-struct Elf_Sym_Impl : Elf_Sym_Base<target_endianness, is64Bits> {
- using Elf_Sym_Base<target_endianness, is64Bits>::st_info;
-
- // These accessors and mutators correspond to the ELF32_ST_BIND,
- // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
- unsigned char getBinding() const { return st_info >> 4; }
- unsigned char getType() const { return st_info & 0x0f; }
- void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
- void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
- void setBindingAndType(unsigned char b, unsigned char t) {
- st_info = (b << 4) + (t & 0x0f);
- }
-};
-}
-
-namespace {
-template<support::endianness target_endianness, bool is64Bits, bool isRela>
-struct Elf_Rel_Base;
-
-template<support::endianness target_endianness>
-struct Elf_Rel_Base<target_endianness, false, false> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, false)
- Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
- Elf_Word r_info; // Symbol table index and type of relocation to apply
-};
-
-template<support::endianness target_endianness>
-struct Elf_Rel_Base<target_endianness, true, false> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, true)
- Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
- Elf_Xword r_info; // Symbol table index and type of relocation to apply
-};
-
-template<support::endianness target_endianness>
-struct Elf_Rel_Base<target_endianness, false, true> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, false)
- Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
- Elf_Word r_info; // Symbol table index and type of relocation to apply
- Elf_Sword r_addend; // Compute value for relocatable field by adding this
-};
-
-template<support::endianness target_endianness>
-struct Elf_Rel_Base<target_endianness, true, true> {
- LLVM_ELF_IMPORT_TYPES(target_endianness, true)
- Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
- Elf_Xword r_info; // Symbol table index and type of relocation to apply
- Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
-};
-
-template<support::endianness target_endianness, bool is64Bits, bool isRela>
-struct Elf_Rel_Impl;
-
-template<support::endianness target_endianness, bool isRela>
-struct Elf_Rel_Impl<target_endianness, true, isRela>
- : Elf_Rel_Base<target_endianness, true, isRela> {
- using Elf_Rel_Base<target_endianness, true, isRela>::r_info;
- LLVM_ELF_IMPORT_TYPES(target_endianness, true)
-
- // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
- // and ELF64_R_INFO macros defined in the ELF specification:
- uint64_t getSymbol() const { return (r_info >> 32); }
- unsigned char getType() const {
- return (unsigned char) (r_info & 0xffffffffL);
- }
- void setSymbol(uint64_t s) { setSymbolAndType(s, getType()); }
- void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
- void setSymbolAndType(uint64_t s, unsigned char t) {
- r_info = (s << 32) + (t&0xffffffffL);
- }
-};
-
-template<support::endianness target_endianness, bool isRela>
-struct Elf_Rel_Impl<target_endianness, false, isRela>
- : Elf_Rel_Base<target_endianness, false, isRela> {
- using Elf_Rel_Base<target_endianness, false, isRela>::r_info;
- LLVM_ELF_IMPORT_TYPES(target_endianness, false)
-
- // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
- // and ELF32_R_INFO macros defined in the ELF specification:
- uint32_t getSymbol() const { return (r_info >> 8); }
- unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol(uint32_t s) { setSymbolAndType(s, getType()); }
- void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
- void setSymbolAndType(uint32_t s, unsigned char t) {
- r_info = (s << 8) + t;
- }
-};
+namespace llvm {
-}
+using namespace object;
namespace {
-template<support::endianness target_endianness, bool is64Bits>
-class ELFObjectFile : public ObjectFile {
- LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
-
- typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
- typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
- typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel;
- typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela;
-
- struct Elf_Ehdr {
- unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
- Elf_Half e_type; // Type of file (see ET_*)
- Elf_Half e_machine; // Required architecture for this file (see EM_*)
- Elf_Word e_version; // Must be equal to 1
- Elf_Addr e_entry; // Address to jump to in order to start program
- Elf_Off e_phoff; // Program header table's file offset, in bytes
- Elf_Off e_shoff; // Section header table's file offset, in bytes
- Elf_Word e_flags; // Processor-specific flags
- Elf_Half e_ehsize; // Size of ELF header, in bytes
- Elf_Half e_phentsize;// Size of an entry in the program header table
- Elf_Half e_phnum; // Number of entries in the program header table
- Elf_Half e_shentsize;// Size of an entry in the section header table
- Elf_Half e_shnum; // Number of entries in the section header table
- Elf_Half e_shstrndx; // Section header table index of section name
- // string table
- bool checkMagic() const {
- return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
- }
- unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
- unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
- };
-
- typedef SmallVector<const Elf_Shdr*, 1> Sections_t;
- typedef DenseMap<unsigned, unsigned> IndexMap_t;
- typedef DenseMap<const Elf_Shdr*, SmallVector<uint32_t, 1> > RelocMap_t;
-
- const Elf_Ehdr *Header;
- const Elf_Shdr *SectionHeaderTable;
- const Elf_Shdr *dot_shstrtab_sec; // Section header string table.
- const Elf_Shdr *dot_strtab_sec; // Symbol header string table.
- Sections_t SymbolTableSections;
- IndexMap_t SymbolTableSectionsIndexMap;
- DenseMap<const Elf_Sym*, ELF::Elf64_Word> ExtendedSymbolTable;
-
- /// @brief Map sections to an array of relocation sections that reference
- /// them sorted by section index.
- RelocMap_t SectionRelocMap;
-
- /// @brief Get the relocation section that contains \a Rel.
- const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
- return getSection(Rel.w.b);
- }
-
- void validateSymbol(DataRefImpl Symb) const;
- bool isRelocationHasAddend(DataRefImpl Rel) const;
- template<typename T>
- const T *getEntry(uint16_t Section, uint32_t Entry) const;
- template<typename T>
- const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
- const Elf_Sym *getSymbol(DataRefImpl Symb) const;
- const Elf_Shdr *getSection(DataRefImpl index) const;
- const Elf_Shdr *getSection(uint32_t index) const;
- const Elf_Rel *getRel(DataRefImpl Rel) const;
- const Elf_Rela *getRela(DataRefImpl Rela) const;
- const char *getString(uint32_t section, uint32_t offset) const;
- const char *getString(const Elf_Shdr *section, uint32_t offset) const;
- error_code getSymbolName(const Elf_Sym *Symb, StringRef &Res) const;
-
-protected:
- virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
- virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
- virtual error_code getSymbolOffset(DataRefImpl Symb, uint64_t &Res) const;
- virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
- virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
- virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
- virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const;
- virtual error_code isSymbolGlobal(DataRefImpl Symb, bool &Res) const;
- virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::SymbolType &Res) const;
-
- virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
- virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
- virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const;
- virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const;
- virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res) const;
- virtual error_code getSectionAlignment(DataRefImpl Sec, uint64_t &Res) const;
- virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
- virtual error_code isSectionData(DataRefImpl Sec, bool &Res) const;
- virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
- virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
- bool &Result) const;
- virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
- virtual relocation_iterator getSectionRelEnd(DataRefImpl Sec) const;
-
- virtual error_code getRelocationNext(DataRefImpl Rel,
- RelocationRef &Res) const;
- virtual error_code getRelocationAddress(DataRefImpl Rel,
- uint64_t &Res) const;
- virtual error_code getRelocationSymbol(DataRefImpl Rel,
- SymbolRef &Res) const;
- virtual error_code getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const;
- virtual error_code getRelocationTypeName(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const;
- virtual error_code getRelocationAdditionalInfo(DataRefImpl Rel,
- int64_t &Res) const;
- virtual error_code getRelocationValueString(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const;
-
-public:
- ELFObjectFile(MemoryBuffer *Object, error_code &ec);
- virtual symbol_iterator begin_symbols() const;
- virtual symbol_iterator end_symbols() const;
- virtual section_iterator begin_sections() const;
- virtual section_iterator end_sections() const;
-
- virtual uint8_t getBytesInAddress() const;
- virtual StringRef getFileFormatName() const;
- virtual unsigned getArch() const;
-
- uint64_t getNumSections() const;
- uint64_t getStringTableIndex() const;
- ELF::Elf64_Word getSymbolTableIndex(const Elf_Sym *symb) const;
- const Elf_Shdr *getSection(const Elf_Sym *symb) const;
-};
-} // end namespace
-
-template<support::endianness target_endianness, bool is64Bits>
-void ELFObjectFile<target_endianness, is64Bits>
- ::validateSymbol(DataRefImpl Symb) const {
- const Elf_Sym *symb = getSymbol(Symb);
- const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
- // FIXME: We really need to do proper error handling in the case of an invalid
- // input file. Because we don't use exceptions, I think we'll just pass
- // an error object around.
- if (!( symb
- && SymbolTableSection
- && symb >= (const Elf_Sym*)(base()
- + SymbolTableSection->sh_offset)
- && symb < (const Elf_Sym*)(base()
- + SymbolTableSection->sh_offset
- + SymbolTableSection->sh_size)))
- // FIXME: Proper error handling.
- report_fatal_error("Symb must point to a valid symbol!");
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolNext(DataRefImpl Symb,
- SymbolRef &Result) const {
- validateSymbol(Symb);
- const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
-
- ++Symb.d.a;
- // Check to see if we are at the end of this symbol table.
- if (Symb.d.a >= SymbolTableSection->getEntityCount()) {
- // We are at the end. If there are other symbol tables, jump to them.
- ++Symb.d.b;
- Symb.d.a = 1; // The 0th symbol in ELF is fake.
- // Otherwise return the terminator.
- if (Symb.d.b >= SymbolTableSections.size()) {
- Symb.d.a = std::numeric_limits<uint32_t>::max();
- Symb.d.b = std::numeric_limits<uint32_t>::max();
- }
- }
-
- Result = SymbolRef(Symb, this);
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolName(DataRefImpl Symb,
- StringRef &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
- return getSymbolName(symb, Result);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-ELF::Elf64_Word ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolTableIndex(const Elf_Sym *symb) const {
- if (symb->st_shndx == ELF::SHN_XINDEX)
- return ExtendedSymbolTable.lookup(symb);
- return symb->st_shndx;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
-ELFObjectFile<target_endianness, is64Bits>
- ::getSection(const Elf_Sym *symb) const {
- if (symb->st_shndx == ELF::SHN_XINDEX)
- return getSection(ExtendedSymbolTable.lookup(symb));
- if (symb->st_shndx >= ELF::SHN_LORESERVE)
- return 0;
- return getSection(symb->st_shndx);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolOffset(DataRefImpl Symb,
- uint64_t &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
- const Elf_Shdr *Section;
- switch (getSymbolTableIndex(symb)) {
- case ELF::SHN_COMMON:
- // Undefined symbols have no address yet.
- case ELF::SHN_UNDEF:
- Result = UnknownAddressOrSize;
- return object_error::success;
- case ELF::SHN_ABS:
- Result = symb->st_value;
- return object_error::success;
- default: Section = getSection(symb);
- }
-
- switch (symb->getType()) {
- case ELF::STT_SECTION:
- Result = Section ? Section->sh_addr : UnknownAddressOrSize;
- return object_error::success;
- case ELF::STT_FUNC:
- case ELF::STT_OBJECT:
- case ELF::STT_NOTYPE:
- Result = symb->st_value;
- return object_error::success;
- default:
- Result = UnknownAddressOrSize;
- return object_error::success;
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolAddress(DataRefImpl Symb,
- uint64_t &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
- const Elf_Shdr *Section;
- switch (getSymbolTableIndex(symb)) {
- case ELF::SHN_COMMON: // Fall through.
- // Undefined symbols have no address yet.
- case ELF::SHN_UNDEF:
- Result = UnknownAddressOrSize;
- return object_error::success;
- case ELF::SHN_ABS:
- Result = reinterpret_cast<uintptr_t>(base()+symb->st_value);
- return object_error::success;
- default: Section = getSection(symb);
+ std::pair<unsigned char, unsigned char>
+ getElfArchType(MemoryBuffer *Object) {
+ if (Object->getBufferSize() < ELF::EI_NIDENT)
+ return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE);
+ return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS]
+ , (uint8_t)Object->getBufferStart()[ELF::EI_DATA]);
}
- const uint8_t* addr = base();
- if (Section)
- addr += Section->sh_offset;
- switch (symb->getType()) {
- case ELF::STT_SECTION:
- Result = reinterpret_cast<uintptr_t>(addr);
- return object_error::success;
- case ELF::STT_FUNC: // Fall through.
- case ELF::STT_OBJECT: // Fall through.
- case ELF::STT_NOTYPE:
- addr += symb->st_value;
- Result = reinterpret_cast<uintptr_t>(addr);
- return object_error::success;
- default:
- Result = UnknownAddressOrSize;
- return object_error::success;
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolSize(DataRefImpl Symb,
- uint64_t &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
- if (symb->st_size == 0)
- Result = UnknownAddressOrSize;
- Result = symb->st_size;
- return object_error::success;
}
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolNMTypeChar(DataRefImpl Symb,
- char &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
- const Elf_Shdr *Section = getSection(symb);
-
- char ret = '?';
+// Creates an in-memory object-file by default: createELFObjectFile(Buffer)
+ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
+ std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);
+ error_code ec;
- if (Section) {
- switch (Section->sh_type) {
- case ELF::SHT_PROGBITS:
- case ELF::SHT_DYNAMIC:
- switch (Section->sh_flags) {
- case (ELF::SHF_ALLOC | ELF::SHF_EXECINSTR):
- ret = 't'; break;
- case (ELF::SHF_ALLOC | ELF::SHF_WRITE):
- ret = 'd'; break;
- case ELF::SHF_ALLOC:
- case (ELF::SHF_ALLOC | ELF::SHF_MERGE):
- case (ELF::SHF_ALLOC | ELF::SHF_MERGE | ELF::SHF_STRINGS):
- ret = 'r'; break;
- }
- break;
- case ELF::SHT_NOBITS: ret = 'b';
- }
+ if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
+ return new ELFObjectFile<support::little, false>(Object, ec);
+ else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB)
+ return new ELFObjectFile<support::big, false>(Object, ec);
+ else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB)
+ return new ELFObjectFile<support::big, true>(Object, ec);
+ else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
+ ELFObjectFile<support::little, true> *result =
+ new ELFObjectFile<support::little, true>(Object, ec);
+ return result;
}
- switch (getSymbolTableIndex(symb)) {
- case ELF::SHN_UNDEF:
- if (ret == '?')
- ret = 'U';
- break;
- case ELF::SHN_ABS: ret = 'a'; break;
- case ELF::SHN_COMMON: ret = 'c'; break;
- }
-
- switch (symb->getBinding()) {
- case ELF::STB_GLOBAL: ret = ::toupper(ret); break;
- case ELF::STB_WEAK:
- if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF)
- ret = 'w';
- else
- if (symb->getType() == ELF::STT_OBJECT)
- ret = 'V';
- else
- ret = 'W';
- }
-
- if (ret == '?' && symb->getType() == ELF::STT_SECTION) {
- StringRef name;
- if (error_code ec = getSymbolName(Symb, name))
- return ec;
- Result = StringSwitch<char>(name)
- .StartsWith(".debug", 'N')
- .StartsWith(".note", 'n')
- .Default('?');
- return object_error::success;
- }
-
- Result = ret;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolType(DataRefImpl Symb,
- SymbolRef::SymbolType &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
-
- if (getSymbolTableIndex(symb) == ELF::SHN_UNDEF) {
- Result = SymbolRef::ST_External;
- return object_error::success;
- }
-
- switch (symb->getType()) {
- case ELF::STT_FUNC:
- Result = SymbolRef::ST_Function;
- break;
- case ELF::STT_OBJECT:
- Result = SymbolRef::ST_Data;
- break;
- default:
- Result = SymbolRef::ST_Other;
- break;
- }
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::isSymbolGlobal(DataRefImpl Symb,
- bool &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
-
- Result = symb->getBinding() == ELF::STB_GLOBAL;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::isSymbolInternal(DataRefImpl Symb,
- bool &Result) const {
- validateSymbol(Symb);
- const Elf_Sym *symb = getSymbol(Symb);
-
- if ( symb->getType() == ELF::STT_FILE
- || symb->getType() == ELF::STT_SECTION)
- Result = true;
- Result = false;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionNext(DataRefImpl Sec, SectionRef &Result) const {
- const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
- sec += Header->e_shentsize;
- Sec.p = reinterpret_cast<intptr_t>(sec);
- Result = SectionRef(Sec, this);
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionName(DataRefImpl Sec,
- StringRef &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- Result = StringRef(getString(dot_shstrtab_sec, sec->sh_name));
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionAddress(DataRefImpl Sec,
- uint64_t &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- Result = sec->sh_addr;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionSize(DataRefImpl Sec,
- uint64_t &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- Result = sec->sh_size;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionContents(DataRefImpl Sec,
- StringRef &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- const char *start = (const char*)base() + sec->sh_offset;
- Result = StringRef(start, sec->sh_size);
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSectionAlignment(DataRefImpl Sec,
- uint64_t &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- Result = sec->sh_addralign;
- return object_error::success;
+ report_fatal_error("Buffer is not an ELF object file!");
}
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::isSectionText(DataRefImpl Sec,
- bool &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- if (sec->sh_flags & ELF::SHF_EXECINSTR)
- Result = true;
- else
- Result = false;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::isSectionData(DataRefImpl Sec,
- bool &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE)
- && sec->sh_type == ELF::SHT_PROGBITS)
- Result = true;
- else
- Result = false;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::isSectionBSS(DataRefImpl Sec,
- bool &Result) const {
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- if (sec->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE)
- && sec->sh_type == ELF::SHT_NOBITS)
- Result = true;
- else
- Result = false;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::sectionContainsSymbol(DataRefImpl Sec,
- DataRefImpl Symb,
- bool &Result) const {
- // FIXME: Unimplemented.
- Result = false;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-relocation_iterator ELFObjectFile<target_endianness, is64Bits>
- ::getSectionRelBegin(DataRefImpl Sec) const {
- DataRefImpl RelData;
- memset(&RelData, 0, sizeof(RelData));
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec);
- if (sec != 0 && ittr != SectionRelocMap.end()) {
- RelData.w.a = getSection(ittr->second[0])->sh_info;
- RelData.w.b = ittr->second[0];
- RelData.w.c = 0;
- }
- return relocation_iterator(RelocationRef(RelData, this));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-relocation_iterator ELFObjectFile<target_endianness, is64Bits>
- ::getSectionRelEnd(DataRefImpl Sec) const {
- DataRefImpl RelData;
- memset(&RelData, 0, sizeof(RelData));
- const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- typename RelocMap_t::const_iterator ittr = SectionRelocMap.find(sec);
- if (sec != 0 && ittr != SectionRelocMap.end()) {
- // Get the index of the last relocation section for this section.
- std::size_t relocsecindex = ittr->second[ittr->second.size() - 1];
- const Elf_Shdr *relocsec = getSection(relocsecindex);
- RelData.w.a = relocsec->sh_info;
- RelData.w.b = relocsecindex;
- RelData.w.c = relocsec->sh_size / relocsec->sh_entsize;
- }
- return relocation_iterator(RelocationRef(RelData, this));
-}
-
-// Relocations
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationNext(DataRefImpl Rel,
- RelocationRef &Result) const {
- ++Rel.w.c;
- const Elf_Shdr *relocsec = getSection(Rel.w.b);
- if (Rel.w.c >= (relocsec->sh_size / relocsec->sh_entsize)) {
- // We have reached the end of the relocations for this section. See if there
- // is another relocation section.
- typename RelocMap_t::mapped_type relocseclist =
- SectionRelocMap.lookup(getSection(Rel.w.a));
-
- // Do a binary search for the current reloc section index (which must be
- // present). Then get the next one.
- typename RelocMap_t::mapped_type::const_iterator loc =
- std::lower_bound(relocseclist.begin(), relocseclist.end(), Rel.w.b);
- ++loc;
-
- // If there is no next one, don't do anything. The ++Rel.w.c above sets Rel
- // to the end iterator.
- if (loc != relocseclist.end()) {
- Rel.w.b = *loc;
- Rel.w.a = 0;
- }
- }
- Result = RelocationRef(Rel, this);
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationSymbol(DataRefImpl Rel,
- SymbolRef &Result) const {
- uint32_t symbolIdx;
- const Elf_Shdr *sec = getSection(Rel.w.b);
- switch (sec->sh_type) {
- default :
- report_fatal_error("Invalid section type in Rel!");
- case ELF::SHT_REL : {
- symbolIdx = getRel(Rel)->getSymbol();
- break;
- }
- case ELF::SHT_RELA : {
- symbolIdx = getRela(Rel)->getSymbol();
- break;
- }
- }
- DataRefImpl SymbolData;
- IndexMap_t::const_iterator it = SymbolTableSectionsIndexMap.find(sec->sh_link);
- if (it == SymbolTableSectionsIndexMap.end())
- report_fatal_error("Relocation symbol table not found!");
- SymbolData.d.a = symbolIdx;
- SymbolData.d.b = it->second;
- Result = SymbolRef(SymbolData, this);
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationAddress(DataRefImpl Rel,
- uint64_t &Result) const {
- uint64_t offset;
- const Elf_Shdr *sec = getSection(Rel.w.b);
- switch (sec->sh_type) {
- default :
- report_fatal_error("Invalid section type in Rel!");
- case ELF::SHT_REL : {
- offset = getRel(Rel)->r_offset;
- break;
- }
- case ELF::SHT_RELA : {
- offset = getRela(Rel)->r_offset;
- break;
- }
- }
-
- Result = offset;
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationType(DataRefImpl Rel,
- uint32_t &Result) const {
- const Elf_Shdr *sec = getSection(Rel.w.b);
- switch (sec->sh_type) {
- default :
- report_fatal_error("Invalid section type in Rel!");
- case ELF::SHT_REL : {
- Result = getRel(Rel)->getType();
- break;
- }
- case ELF::SHT_RELA : {
- Result = getRela(Rel)->getType();
- break;
- }
- }
- return object_error::success;
-}
-
-#define LLVM_ELF_SWITCH_RELOC_TYPE_NAME(enum) \
- case ELF::enum: res = #enum; break;
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationTypeName(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const {
- const Elf_Shdr *sec = getSection(Rel.w.b);
- uint8_t type;
- StringRef res;
- switch (sec->sh_type) {
- default :
- return object_error::parse_failed;
- case ELF::SHT_REL : {
- type = getRel(Rel)->getType();
- break;
- }
- case ELF::SHT_RELA : {
- type = getRela(Rel)->getType();
- break;
- }
- }
- switch (Header->e_machine) {
- case ELF::EM_X86_64:
- switch (type) {
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_NONE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOT32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PLT32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_COPY);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GLOB_DAT);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_JUMP_SLOT);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_RELATIVE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPCREL);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_32S);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_16);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC16);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_8);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC8);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPMOD64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSGD);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSLD);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_DTPOFF32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTTPOFF);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TPOFF32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_PC64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTOFF64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_SIZE64);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_GOTPC32_TLSDESC);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC_CALL);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_X86_64_TLSDESC);
- default:
- res = "Unknown";
- }
- break;
- case ELF::EM_386:
- switch (type) {
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_NONE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOT32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PLT32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_COPY);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GLOB_DAT);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_JUMP_SLOT);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_RELATIVE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTOFF);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_GOTPC);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_32PLT);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTIE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_16);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC16);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_8);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_PC8);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_PUSH);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_CALL);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GD_POP);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_PUSH);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_CALL);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDM_POP);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LDO_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_IE_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_LE_32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPMOD32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DTPOFF32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_TPOFF32);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_GOTDESC);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC_CALL);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_TLS_DESC);
- LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_386_IRELATIVE);
- default:
- res = "Unknown";
- }
- break;
- default:
- res = "Unknown";
- }
- Result.append(res.begin(), res.end());
- return object_error::success;
-}
-
-#undef LLVM_ELF_SWITCH_RELOC_TYPE_NAME
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationAdditionalInfo(DataRefImpl Rel,
- int64_t &Result) const {
- const Elf_Shdr *sec = getSection(Rel.w.b);
- switch (sec->sh_type) {
- default :
- report_fatal_error("Invalid section type in Rel!");
- case ELF::SHT_REL : {
- Result = 0;
- return object_error::success;
- }
- case ELF::SHT_RELA : {
- Result = getRela(Rel)->r_addend;
- return object_error::success;
- }
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getRelocationValueString(DataRefImpl Rel,
- SmallVectorImpl<char> &Result) const {
- const Elf_Shdr *sec = getSection(Rel.w.b);
- uint8_t type;
- StringRef res;
- int64_t addend = 0;
- uint16_t symbol_index = 0;
- switch (sec->sh_type) {
- default :
- return object_error::parse_failed;
- case ELF::SHT_REL : {
- type = getRel(Rel)->getType();
- symbol_index = getRel(Rel)->getSymbol();
- // TODO: Read implicit addend from section data.
- break;
- }
- case ELF::SHT_RELA : {
- type = getRela(Rel)->getType();
- symbol_index = getRela(Rel)->getSymbol();
- addend = getRela(Rel)->r_addend;
- break;
- }
- }
- const Elf_Sym *symb = getEntry<Elf_Sym>(sec->sh_link, symbol_index);
- StringRef symname;
- if (error_code ec = getSymbolName(symb, symname))
- return ec;
- switch (Header->e_machine) {
- case ELF::EM_X86_64:
- switch (type) {
- case ELF::R_X86_64_32S:
- res = symname;
- break;
- case ELF::R_X86_64_PC32: {
- std::string fmtbuf;
- raw_string_ostream fmt(fmtbuf);
- fmt << symname << (addend < 0 ? "" : "+") << addend << "-P";
- fmt.flush();
- Result.append(fmtbuf.begin(), fmtbuf.end());
- }
- break;
- default:
- res = "Unknown";
- }
- break;
- default:
- res = "Unknown";
- }
- if (Result.empty())
- Result.append(res.begin(), res.end());
- return object_error::success;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object
- , error_code &ec)
- : ObjectFile(Binary::isELF, Object, ec)
- , SectionHeaderTable(0)
- , dot_shstrtab_sec(0)
- , dot_strtab_sec(0) {
- Header = reinterpret_cast<const Elf_Ehdr *>(base());
-
- if (Header->e_shoff == 0)
- return;
-
- SectionHeaderTable =
- reinterpret_cast<const Elf_Shdr *>(base() + Header->e_shoff);
- uint64_t SectionTableSize = getNumSections() * Header->e_shentsize;
- if (!( (const uint8_t *)SectionHeaderTable + SectionTableSize
- <= base() + Data->getBufferSize()))
- // FIXME: Proper error handling.
- report_fatal_error("Section table goes past end of file!");
-
-
- // To find the symbol tables we walk the section table to find SHT_SYMTAB.
- const Elf_Shdr* SymbolTableSectionHeaderIndex = 0;
- const Elf_Shdr* sh = reinterpret_cast<const Elf_Shdr*>(SectionHeaderTable);
- for (uint64_t i = 0, e = getNumSections(); i != e; ++i) {
- if (sh->sh_type == ELF::SHT_SYMTAB_SHNDX) {
- if (SymbolTableSectionHeaderIndex)
- // FIXME: Proper error handling.
- report_fatal_error("More than one .symtab_shndx!");
- SymbolTableSectionHeaderIndex = sh;
- }
- if (sh->sh_type == ELF::SHT_SYMTAB) {
- SymbolTableSectionsIndexMap[i] = SymbolTableSections.size();
- SymbolTableSections.push_back(sh);
- }
- if (sh->sh_type == ELF::SHT_REL || sh->sh_type == ELF::SHT_RELA) {
- SectionRelocMap[getSection(sh->sh_info)].push_back(i);
- }
- ++sh;
- }
-
- // Sort section relocation lists by index.
- for (typename RelocMap_t::iterator i = SectionRelocMap.begin(),
- e = SectionRelocMap.end(); i != e; ++i) {
- std::sort(i->second.begin(), i->second.end());
- }
-
- // Get string table sections.
- dot_shstrtab_sec = getSection(getStringTableIndex());
- if (dot_shstrtab_sec) {
- // Verify that the last byte in the string table in a null.
- if (((const char*)base() + dot_shstrtab_sec->sh_offset)
- [dot_shstrtab_sec->sh_size - 1] != 0)
- // FIXME: Proper error handling.
- report_fatal_error("String table must end with a null terminator!");
- }
-
- // Merge this into the above loop.
- for (const char *i = reinterpret_cast<const char *>(SectionHeaderTable),
- *e = i + getNumSections() * Header->e_shentsize;
- i != e; i += Header->e_shentsize) {
- const Elf_Shdr *sh = reinterpret_cast<const Elf_Shdr*>(i);
- if (sh->sh_type == ELF::SHT_STRTAB) {
- StringRef SectionName(getString(dot_shstrtab_sec, sh->sh_name));
- if (SectionName == ".strtab") {
- if (dot_strtab_sec != 0)
- // FIXME: Proper error handling.
- report_fatal_error("Already found section named .strtab!");
- dot_strtab_sec = sh;
- const char *dot_strtab = (const char*)base() + sh->sh_offset;
- if (dot_strtab[sh->sh_size - 1] != 0)
- // FIXME: Proper error handling.
- report_fatal_error("String table must end with a null terminator!");
- }
- }
- }
-
- // Build symbol name side-mapping if there is one.
- if (SymbolTableSectionHeaderIndex) {
- const Elf_Word *ShndxTable = reinterpret_cast<const Elf_Word*>(base() +
- SymbolTableSectionHeaderIndex->sh_offset);
- error_code ec;
- for (symbol_iterator si = begin_symbols(),
- se = end_symbols(); si != se; si.increment(ec)) {
- if (ec)
- report_fatal_error("Fewer extended symbol table entries than symbols!");
- if (*ShndxTable != ELF::SHN_UNDEF)
- ExtendedSymbolTable[getSymbol(si->getRawDataRefImpl())] = *ShndxTable;
- ++ShndxTable;
- }
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-symbol_iterator ELFObjectFile<target_endianness, is64Bits>
- ::begin_symbols() const {
- DataRefImpl SymbolData;
- memset(&SymbolData, 0, sizeof(SymbolData));
- if (SymbolTableSections.size() == 0) {
- SymbolData.d.a = std::numeric_limits<uint32_t>::max();
- SymbolData.d.b = std::numeric_limits<uint32_t>::max();
- } else {
- SymbolData.d.a = 1; // The 0th symbol in ELF is fake.
- SymbolData.d.b = 0;
- }
- return symbol_iterator(SymbolRef(SymbolData, this));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-symbol_iterator ELFObjectFile<target_endianness, is64Bits>
- ::end_symbols() const {
- DataRefImpl SymbolData;
- memset(&SymbolData, 0, sizeof(SymbolData));
- SymbolData.d.a = std::numeric_limits<uint32_t>::max();
- SymbolData.d.b = std::numeric_limits<uint32_t>::max();
- return symbol_iterator(SymbolRef(SymbolData, this));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-section_iterator ELFObjectFile<target_endianness, is64Bits>
- ::begin_sections() const {
- DataRefImpl ret;
- memset(&ret, 0, sizeof(DataRefImpl));
- ret.p = reinterpret_cast<intptr_t>(base() + Header->e_shoff);
- return section_iterator(SectionRef(ret, this));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-section_iterator ELFObjectFile<target_endianness, is64Bits>
- ::end_sections() const {
- DataRefImpl ret;
- memset(&ret, 0, sizeof(DataRefImpl));
- ret.p = reinterpret_cast<intptr_t>(base()
- + Header->e_shoff
- + (Header->e_shentsize*getNumSections()));
- return section_iterator(SectionRef(ret, this));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-uint8_t ELFObjectFile<target_endianness, is64Bits>::getBytesInAddress() const {
- return is64Bits ? 8 : 4;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-StringRef ELFObjectFile<target_endianness, is64Bits>
- ::getFileFormatName() const {
- switch(Header->e_ident[ELF::EI_CLASS]) {
- case ELF::ELFCLASS32:
- switch(Header->e_machine) {
- case ELF::EM_386:
- return "ELF32-i386";
- case ELF::EM_X86_64:
- return "ELF32-x86-64";
- case ELF::EM_ARM:
- return "ELF32-arm";
- default:
- return "ELF32-unknown";
- }
- case ELF::ELFCLASS64:
- switch(Header->e_machine) {
- case ELF::EM_386:
- return "ELF64-i386";
- case ELF::EM_X86_64:
- return "ELF64-x86-64";
- default:
- return "ELF64-unknown";
- }
- default:
- // FIXME: Proper error handling.
- report_fatal_error("Invalid ELFCLASS!");
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-unsigned ELFObjectFile<target_endianness, is64Bits>::getArch() const {
- switch(Header->e_machine) {
- case ELF::EM_386:
- return Triple::x86;
- case ELF::EM_X86_64:
- return Triple::x86_64;
- case ELF::EM_ARM:
- return Triple::arm;
- default:
- return Triple::UnknownArch;
- }
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-uint64_t ELFObjectFile<target_endianness, is64Bits>::getNumSections() const {
- if (Header->e_shnum == ELF::SHN_UNDEF)
- return SectionHeaderTable->sh_size;
- return Header->e_shnum;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-uint64_t
-ELFObjectFile<target_endianness, is64Bits>::getStringTableIndex() const {
- if (Header->e_shnum == ELF::SHN_UNDEF) {
- if (Header->e_shstrndx == ELF::SHN_HIRESERVE)
- return SectionHeaderTable->sh_link;
- if (Header->e_shstrndx >= getNumSections())
- return 0;
- }
- return Header->e_shstrndx;
-}
-
-
-template<support::endianness target_endianness, bool is64Bits>
-template<typename T>
-inline const T *
-ELFObjectFile<target_endianness, is64Bits>::getEntry(uint16_t Section,
- uint32_t Entry) const {
- return getEntry<T>(getSection(Section), Entry);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-template<typename T>
-inline const T *
-ELFObjectFile<target_endianness, is64Bits>::getEntry(const Elf_Shdr * Section,
- uint32_t Entry) const {
- return reinterpret_cast<const T *>(
- base()
- + Section->sh_offset
- + (Entry * Section->sh_entsize));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
-ELFObjectFile<target_endianness, is64Bits>::getSymbol(DataRefImpl Symb) const {
- return getEntry<Elf_Sym>(SymbolTableSections[Symb.d.b], Symb.d.a);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rel *
-ELFObjectFile<target_endianness, is64Bits>::getRel(DataRefImpl Rel) const {
- return getEntry<Elf_Rel>(Rel.w.b, Rel.w.c);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Rela *
-ELFObjectFile<target_endianness, is64Bits>::getRela(DataRefImpl Rela) const {
- return getEntry<Elf_Rela>(Rela.w.b, Rela.w.c);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
-ELFObjectFile<target_endianness, is64Bits>::getSection(DataRefImpl Symb) const {
- const Elf_Shdr *sec = getSection(Symb.d.b);
- if (sec->sh_type != ELF::SHT_SYMTAB || sec->sh_type != ELF::SHT_DYNSYM)
- // FIXME: Proper error handling.
- report_fatal_error("Invalid symbol table section!");
- return sec;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Shdr *
-ELFObjectFile<target_endianness, is64Bits>::getSection(uint32_t index) const {
- if (index == 0)
- return 0;
- if (!SectionHeaderTable || index >= getNumSections())
- // FIXME: Proper error handling.
- report_fatal_error("Invalid section index!");
-
- return reinterpret_cast<const Elf_Shdr *>(
- reinterpret_cast<const char *>(SectionHeaderTable)
- + (index * Header->e_shentsize));
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const char *ELFObjectFile<target_endianness, is64Bits>
- ::getString(uint32_t section,
- ELF::Elf32_Word offset) const {
- return getString(getSection(section), offset);
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-const char *ELFObjectFile<target_endianness, is64Bits>
- ::getString(const Elf_Shdr *section,
- ELF::Elf32_Word offset) const {
- assert(section && section->sh_type == ELF::SHT_STRTAB && "Invalid section!");
- if (offset >= section->sh_size)
- // FIXME: Proper error handling.
- report_fatal_error("Symbol name offset outside of string table!");
- return (const char *)base() + section->sh_offset + offset;
-}
-
-template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>
- ::getSymbolName(const Elf_Sym *symb,
- StringRef &Result) const {
- if (symb->st_name == 0) {
- const Elf_Shdr *section = getSection(symb);
- if (!section)
- Result = "";
- else
- Result = getString(dot_shstrtab_sec, section->sh_name);
- return object_error::success;
- }
-
- // Use the default symbol table name section.
- Result = getString(dot_strtab_sec, symb->st_name);
- return object_error::success;
-}
-
-// EI_CLASS, EI_DATA.
-static std::pair<unsigned char, unsigned char>
-getElfArchType(MemoryBuffer *Object) {
- if (Object->getBufferSize() < ELF::EI_NIDENT)
- return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE);
- return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS]
- , (uint8_t)Object->getBufferStart()[ELF::EI_DATA]);
-}
-
-namespace llvm {
-
- ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
- std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);
- error_code ec;
- if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
- return new ELFObjectFile<support::little, false>(Object, ec);
- else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB)
- return new ELFObjectFile<support::big, false>(Object, ec);
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB)
- return new ELFObjectFile<support::little, true>(Object, ec);
- else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB)
- return new ELFObjectFile<support::big, true>(Object, ec);
- // FIXME: Proper error handling.
- report_fatal_error("Not an ELF object file!");
- }
-
} // end namespace llvm
diff --git a/contrib/llvm/lib/Object/MachOObject.cpp b/contrib/llvm/lib/Object/MachOObject.cpp
index 9cdac86..b7e5cdc 100644
--- a/contrib/llvm/lib/Object/MachOObject.cpp
+++ b/contrib/llvm/lib/Object/MachOObject.cpp
@@ -10,11 +10,12 @@
#include "llvm/Object/MachOObject.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/Host.h"
-#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/SwapByteOrder.h"
using namespace llvm;
using namespace llvm::object;
@@ -359,25 +360,13 @@ void MachOObject::ReadSymbol64TableEntry(uint64_t SymbolTableOffset,
void MachOObject::ReadULEB128s(uint64_t Index,
SmallVectorImpl<uint64_t> &Out) const {
- const char *ptr = Buffer->getBufferStart() + Index;
+ DataExtractor extractor(Buffer->getBuffer(), true, 0);
+
+ uint32_t offset = Index;
uint64_t data = 0;
- uint64_t delta = 0;
- uint32_t shift = 0;
- while (true) {
- assert(ptr < Buffer->getBufferEnd() && "index out of bounds");
- assert(shift < 64 && "too big for uint64_t");
-
- uint8_t byte = *ptr++;
- delta |= ((byte & 0x7F) << shift);
- shift += 7;
- if (byte < 0x80) {
- if (delta == 0)
- break;
- data += delta;
- Out.push_back(data);
- delta = 0;
- shift = 0;
- }
+ while (uint64_t delta = extractor.getULEB128(&offset)) {
+ data += delta;
+ Out.push_back(data);
}
}
@@ -393,7 +382,7 @@ void MachOObject::printHeader(raw_ostream &O) const {
O << "('num_load_commands', " << Header.NumLoadCommands << ")\n";
O << "('load_commands_size', " << Header.SizeOfLoadCommands << ")\n";
O << "('flag', " << Header.Flags << ")\n";
-
+
// Print extended header if 64-bit.
if (is64Bit())
O << "('reserved', " << Header64Ext.Reserved << ")\n";
@@ -403,6 +392,6 @@ void MachOObject::print(raw_ostream &O) const {
O << "Header:\n";
printHeader(O);
O << "Load Commands:\n";
-
+
O << "Buffer:\n";
}
diff --git a/contrib/llvm/lib/Object/MachOObjectFile.cpp b/contrib/llvm/lib/Object/MachOObjectFile.cpp
index 507df58..3bcda17 100644
--- a/contrib/llvm/lib/Object/MachOObjectFile.cpp
+++ b/contrib/llvm/lib/Object/MachOObjectFile.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Object/MachO.h"
#include "llvm/Object/MachOFormat.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cctype>
@@ -29,11 +30,10 @@ namespace object {
MachOObjectFile::MachOObjectFile(MemoryBuffer *Object, MachOObject *MOO,
error_code &ec)
- : ObjectFile(Binary::isMachO, Object, ec),
+ : ObjectFile(Binary::ID_MachO, Object, ec),
MachOObj(MOO),
RegisteredStringTable(std::numeric_limits<uint32_t>::max()) {
DataRefImpl DRI;
- DRI.d.a = DRI.d.b = 0;
moveToNextSection(DRI);
uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands;
while (DRI.d.a < LoadCommandCount) {
@@ -124,23 +124,27 @@ error_code MachOObjectFile::getSymbolName(DataRefImpl DRI,
return object_error::success;
}
-error_code MachOObjectFile::getSymbolOffset(DataRefImpl DRI,
- uint64_t &Result) const {
- uint64_t SectionOffset;
- uint8_t SectionIndex;
+error_code MachOObjectFile::getSymbolFileOffset(DataRefImpl DRI,
+ uint64_t &Result) const {
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Symbol64TableEntry> Entry;
getSymbol64TableEntry(DRI, Entry);
Result = Entry->Value;
- SectionIndex = Entry->SectionIndex;
+ if (Entry->SectionIndex) {
+ InMemoryStruct<macho::Section64> Section;
+ getSection64(Sections[Entry->SectionIndex-1], Section);
+ Result += Section->Offset - Section->Address;
+ }
} else {
InMemoryStruct<macho::SymbolTableEntry> Entry;
getSymbolTableEntry(DRI, Entry);
Result = Entry->Value;
- SectionIndex = Entry->SectionIndex;
+ if (Entry->SectionIndex) {
+ InMemoryStruct<macho::Section> Section;
+ getSection(Sections[Entry->SectionIndex-1], Section);
+ Result += Section->Offset - Section->Address;
+ }
}
- getSectionAddress(Sections[SectionIndex-1], SectionOffset);
- Result -= SectionOffset;
return object_error::success;
}
@@ -161,7 +165,74 @@ error_code MachOObjectFile::getSymbolAddress(DataRefImpl DRI,
error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
uint64_t &Result) const {
- Result = UnknownAddressOrSize;
+ uint32_t LoadCommandCount = MachOObj->getHeader().NumLoadCommands;
+ uint64_t BeginOffset;
+ uint64_t EndOffset = 0;
+ uint8_t SectionIndex;
+ if (MachOObj->is64Bit()) {
+ InMemoryStruct<macho::Symbol64TableEntry> Entry;
+ getSymbol64TableEntry(DRI, Entry);
+ BeginOffset = Entry->Value;
+ SectionIndex = Entry->SectionIndex;
+ if (!SectionIndex) {
+ uint32_t flags = SymbolRef::SF_None;
+ getSymbolFlags(DRI, flags);
+ if (flags & SymbolRef::SF_Common)
+ Result = Entry->Value;
+ else
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+ // Unfortunately symbols are unsorted so we need to touch all
+ // symbols from load command
+ DRI.d.b = 0;
+ uint32_t Command = DRI.d.a;
+ while (Command == DRI.d.a) {
+ moveToNextSymbol(DRI);
+ if (DRI.d.a < LoadCommandCount) {
+ getSymbol64TableEntry(DRI, Entry);
+ if (Entry->SectionIndex == SectionIndex && Entry->Value > BeginOffset)
+ if (!EndOffset || Entry->Value < EndOffset)
+ EndOffset = Entry->Value;
+ }
+ DRI.d.b++;
+ }
+ } else {
+ InMemoryStruct<macho::SymbolTableEntry> Entry;
+ getSymbolTableEntry(DRI, Entry);
+ BeginOffset = Entry->Value;
+ SectionIndex = Entry->SectionIndex;
+ if (!SectionIndex) {
+ uint32_t flags = SymbolRef::SF_None;
+ getSymbolFlags(DRI, flags);
+ if (flags & SymbolRef::SF_Common)
+ Result = Entry->Value;
+ else
+ Result = UnknownAddressOrSize;
+ return object_error::success;
+ }
+ // Unfortunately symbols are unsorted so we need to touch all
+ // symbols from load command
+ DRI.d.b = 0;
+ uint32_t Command = DRI.d.a;
+ while (Command == DRI.d.a) {
+ moveToNextSymbol(DRI);
+ if (DRI.d.a < LoadCommandCount) {
+ getSymbolTableEntry(DRI, Entry);
+ if (Entry->SectionIndex == SectionIndex && Entry->Value > BeginOffset)
+ if (!EndOffset || Entry->Value < EndOffset)
+ EndOffset = Entry->Value;
+ }
+ DRI.d.b++;
+ }
+ }
+ if (!EndOffset) {
+ uint64_t Size;
+ getSectionSize(Sections[SectionIndex-1], Size);
+ getSectionAddress(Sections[SectionIndex-1], EndOffset);
+ EndOffset += Size;
+ }
+ Result = EndOffset - BeginOffset;
return object_error::success;
}
@@ -200,36 +271,69 @@ error_code MachOObjectFile::getSymbolNMTypeChar(DataRefImpl DRI,
return object_error::success;
}
-error_code MachOObjectFile::isSymbolInternal(DataRefImpl DRI,
- bool &Result) const {
+error_code MachOObjectFile::getSymbolFlags(DataRefImpl DRI,
+ uint32_t &Result) const {
+ uint16_t MachOFlags;
+ uint8_t MachOType;
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Symbol64TableEntry> Entry;
getSymbol64TableEntry(DRI, Entry);
- Result = Entry->Flags & macho::STF_StabsEntryMask;
+ MachOFlags = Entry->Flags;
+ MachOType = Entry->Type;
} else {
InMemoryStruct<macho::SymbolTableEntry> Entry;
getSymbolTableEntry(DRI, Entry);
- Result = Entry->Flags & macho::STF_StabsEntryMask;
+ MachOFlags = Entry->Flags;
+ MachOType = Entry->Type;
}
+
+ // TODO: Correctly set SF_ThreadLocal
+ Result = SymbolRef::SF_None;
+
+ if ((MachOType & MachO::NlistMaskType) == MachO::NListTypeUndefined)
+ Result |= SymbolRef::SF_Undefined;
+
+ if (MachOFlags & macho::STF_StabsEntryMask)
+ Result |= SymbolRef::SF_FormatSpecific;
+
+ if (MachOType & MachO::NlistMaskExternal) {
+ Result |= SymbolRef::SF_Global;
+ if ((MachOType & MachO::NlistMaskType) == MachO::NListTypeUndefined)
+ Result |= SymbolRef::SF_Common;
+ }
+
+ if (MachOFlags & (MachO::NListDescWeakRef | MachO::NListDescWeakDef))
+ Result |= SymbolRef::SF_Weak;
+
+ if ((MachOType & MachO::NlistMaskType) == MachO::NListTypeAbsolute)
+ Result |= SymbolRef::SF_Absolute;
+
return object_error::success;
}
-error_code MachOObjectFile::isSymbolGlobal(DataRefImpl Symb, bool &Res) const {
-
+error_code MachOObjectFile::getSymbolSection(DataRefImpl Symb,
+ section_iterator &Res) const {
+ uint8_t index;
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Symbol64TableEntry> Entry;
getSymbol64TableEntry(Symb, Entry);
- Res = Entry->Type & MachO::NlistMaskExternal;
+ index = Entry->SectionIndex;
} else {
InMemoryStruct<macho::SymbolTableEntry> Entry;
getSymbolTableEntry(Symb, Entry);
- Res = Entry->Type & MachO::NlistMaskExternal;
+ index = Entry->SectionIndex;
}
+
+ if (index == 0)
+ Res = end_sections();
+ else
+ Res = section_iterator(SectionRef(Sections[index-1], this));
+
return object_error::success;
}
error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
- SymbolRef::SymbolType &Res) const {
+ SymbolRef::Type &Res) const {
uint8_t n_type;
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Symbol64TableEntry> Entry;
@@ -243,12 +347,14 @@ error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
Res = SymbolRef::ST_Other;
// If this is a STAB debugging symbol, we can do nothing more.
- if (n_type & MachO::NlistMaskStab)
+ if (n_type & MachO::NlistMaskStab) {
+ Res = SymbolRef::ST_Debug;
return object_error::success;
+ }
switch (n_type & MachO::NlistMaskType) {
case MachO::NListTypeUndefined :
- Res = SymbolRef::ST_External;
+ Res = SymbolRef::ST_Unknown;
break;
case MachO::NListTypeSection :
Res = SymbolRef::ST_Function;
@@ -261,7 +367,6 @@ error_code MachOObjectFile::getSymbolType(DataRefImpl Symb,
symbol_iterator MachOObjectFile::begin_symbols() const {
// DRI.d.a = segment number; DRI.d.b = symbol index.
DataRefImpl DRI;
- DRI.d.a = DRI.d.b = 0;
moveToNextSymbol(DRI);
return symbol_iterator(SymbolRef(DRI, this));
}
@@ -269,10 +374,33 @@ symbol_iterator MachOObjectFile::begin_symbols() const {
symbol_iterator MachOObjectFile::end_symbols() const {
DataRefImpl DRI;
DRI.d.a = MachOObj->getHeader().NumLoadCommands;
- DRI.d.b = 0;
return symbol_iterator(SymbolRef(DRI, this));
}
+symbol_iterator MachOObjectFile::begin_dynamic_symbols() const {
+ // TODO: implement
+ report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile");
+}
+
+symbol_iterator MachOObjectFile::end_dynamic_symbols() const {
+ // TODO: implement
+ report_fatal_error("Dynamic symbols unimplemented in MachOObjectFile");
+}
+
+library_iterator MachOObjectFile::begin_libraries_needed() const {
+ // TODO: implement
+ report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
+}
+
+library_iterator MachOObjectFile::end_libraries_needed() const {
+ // TODO: implement
+ report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
+}
+
+StringRef MachOObjectFile::getLoadName() const {
+ // TODO: Implement
+ report_fatal_error("get_load_name() unimplemented in MachOObjectFile");
+}
/*===-- Sections ----------------------------------------------------------===*/
@@ -451,12 +579,43 @@ error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI,
return object_error::success;
}
+error_code MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec,
+ bool &Result) const {
+ // FIXME: Unimplemented
+ Result = true;
+ return object_error::success;
+}
+
+error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec,
+ bool &Result) const {
+ // FIXME: Unimplemented
+ Result = false;
+ return object_error::success;
+}
+
+error_code MachOObjectFile::isSectionZeroInit(DataRefImpl DRI,
+ bool &Result) const {
+ if (MachOObj->is64Bit()) {
+ InMemoryStruct<macho::Section64> Sect;
+ getSection64(DRI, Sect);
+ Result = (Sect->Flags & MachO::SectionTypeZeroFill ||
+ Sect->Flags & MachO::SectionTypeZeroFillLarge);
+ } else {
+ InMemoryStruct<macho::Section> Sect;
+ getSection(DRI, Sect);
+ Result = (Sect->Flags & MachO::SectionTypeZeroFill ||
+ Sect->Flags & MachO::SectionTypeZeroFillLarge);
+ }
+
+ return object_error::success;
+}
+
error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec,
DataRefImpl Symb,
bool &Result) const {
- SymbolRef::SymbolType ST;
+ SymbolRef::Type ST;
getSymbolType(Symb, ST);
- if (ST == SymbolRef::ST_External) {
+ if (ST == SymbolRef::ST_Unknown) {
Result = false;
return object_error::success;
}
@@ -483,7 +642,6 @@ error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec,
relocation_iterator MachOObjectFile::getSectionRelBegin(DataRefImpl Sec) const {
DataRefImpl ret;
- ret.d.a = 0;
ret.d.b = getSectionIndex(Sec);
return relocation_iterator(RelocationRef(ret, this));
}
@@ -506,7 +664,6 @@ relocation_iterator MachOObjectFile::getSectionRelEnd(DataRefImpl Sec) const {
section_iterator MachOObjectFile::begin_sections() const {
DataRefImpl DRI;
- DRI.d.a = DRI.d.b = 0;
moveToNextSection(DRI);
return section_iterator(SectionRef(DRI, this));
}
@@ -514,7 +671,6 @@ section_iterator MachOObjectFile::begin_sections() const {
section_iterator MachOObjectFile::end_sections() const {
DataRefImpl DRI;
DRI.d.a = MachOObj->getHeader().NumLoadCommands;
- DRI.d.b = 0;
return section_iterator(SectionRef(DRI, this));
}
@@ -543,19 +699,43 @@ error_code MachOObjectFile::getRelocationNext(DataRefImpl Rel,
}
error_code MachOObjectFile::getRelocationAddress(DataRefImpl Rel,
uint64_t &Res) const {
- const uint8_t* sectAddress = base();
+ const uint8_t* sectAddress = 0;
if (MachOObj->is64Bit()) {
InMemoryStruct<macho::Section64> Sect;
getSection64(Sections[Rel.d.b], Sect);
- sectAddress += Sect->Offset;
+ sectAddress += Sect->Address;
} else {
InMemoryStruct<macho::Section> Sect;
getSection(Sections[Rel.d.b], Sect);
- sectAddress += Sect->Offset;
+ sectAddress += Sect->Address;
}
InMemoryStruct<macho::RelocationEntry> RE;
getRelocation(Rel, RE);
- Res = reinterpret_cast<uintptr_t>(sectAddress + RE->Word0);
+
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+ uint64_t RelAddr = 0;
+ if (isScattered)
+ RelAddr = RE->Word0 & 0xFFFFFF;
+ else
+ RelAddr = RE->Word0;
+
+ Res = reinterpret_cast<uintptr_t>(sectAddress + RelAddr);
+ return object_error::success;
+}
+error_code MachOObjectFile::getRelocationOffset(DataRefImpl Rel,
+ uint64_t &Res) const {
+ InMemoryStruct<macho::RelocationEntry> RE;
+ getRelocation(Rel, RE);
+
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+ if (isScattered)
+ Res = RE->Word0 & 0xFFFFFF;
+ else
+ Res = RE->Word0;
return object_error::success;
}
error_code MachOObjectFile::getRelocationSymbol(DataRefImpl Rel,
@@ -566,7 +746,6 @@ error_code MachOObjectFile::getRelocationSymbol(DataRefImpl Rel,
bool isExtern = (RE->Word1 >> 27) & 1;
DataRefImpl Sym;
- Sym.d.a = Sym.d.b = 0;
moveToNextSymbol(Sym);
if (isExtern) {
for (unsigned i = 0; i < SymbolIdx; i++) {
@@ -580,14 +759,112 @@ error_code MachOObjectFile::getRelocationSymbol(DataRefImpl Rel,
return object_error::success;
}
error_code MachOObjectFile::getRelocationType(DataRefImpl Rel,
- uint32_t &Res) const {
+ uint64_t &Res) const {
InMemoryStruct<macho::RelocationEntry> RE;
getRelocation(Rel, RE);
- Res = RE->Word1;
+ Res = RE->Word0;
+ Res <<= 32;
+ Res |= RE->Word1;
return object_error::success;
}
error_code MachOObjectFile::getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const {
+ // TODO: Support scattered relocations.
+ StringRef res;
+ InMemoryStruct<macho::RelocationEntry> RE;
+ getRelocation(Rel, RE);
+
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+
+ unsigned r_type;
+ if (isScattered)
+ r_type = (RE->Word0 >> 24) & 0xF;
+ else
+ r_type = (RE->Word1 >> 28) & 0xF;
+
+ switch (Arch) {
+ case Triple::x86: {
+ const char* Table[] = {
+ "GENERIC_RELOC_VANILLA",
+ "GENERIC_RELOC_PAIR",
+ "GENERIC_RELOC_SECTDIFF",
+ "GENERIC_RELOC_PB_LA_PTR",
+ "GENERIC_RELOC_LOCAL_SECTDIFF",
+ "GENERIC_RELOC_TLV" };
+
+ if (r_type > 6)
+ res = "Unknown";
+ else
+ res = Table[r_type];
+ break;
+ }
+ case Triple::x86_64: {
+ const char* Table[] = {
+ "X86_64_RELOC_UNSIGNED",
+ "X86_64_RELOC_SIGNED",
+ "X86_64_RELOC_BRANCH",
+ "X86_64_RELOC_GOT_LOAD",
+ "X86_64_RELOC_GOT",
+ "X86_64_RELOC_SUBTRACTOR",
+ "X86_64_RELOC_SIGNED_1",
+ "X86_64_RELOC_SIGNED_2",
+ "X86_64_RELOC_SIGNED_4",
+ "X86_64_RELOC_TLV" };
+
+ if (r_type > 9)
+ res = "Unknown";
+ else
+ res = Table[r_type];
+ break;
+ }
+ case Triple::arm: {
+ const char* Table[] = {
+ "ARM_RELOC_VANILLA",
+ "ARM_RELOC_PAIR",
+ "ARM_RELOC_SECTDIFF",
+ "ARM_RELOC_LOCAL_SECTDIFF",
+ "ARM_RELOC_PB_LA_PTR",
+ "ARM_RELOC_BR24",
+ "ARM_THUMB_RELOC_BR22",
+ "ARM_THUMB_32BIT_BRANCH",
+ "ARM_RELOC_HALF",
+ "ARM_RELOC_HALF_SECTDIFF" };
+
+ if (r_type > 9)
+ res = "Unknown";
+ else
+ res = Table[r_type];
+ break;
+ }
+ case Triple::ppc: {
+ const char* Table[] = {
+ "PPC_RELOC_VANILLA",
+ "PPC_RELOC_PAIR",
+ "PPC_RELOC_BR14",
+ "PPC_RELOC_BR24",
+ "PPC_RELOC_HI16",
+ "PPC_RELOC_LO16",
+ "PPC_RELOC_HA16",
+ "PPC_RELOC_LO14",
+ "PPC_RELOC_SECTDIFF",
+ "PPC_RELOC_PB_LA_PTR",
+ "PPC_RELOC_HI16_SECTDIFF",
+ "PPC_RELOC_LO16_SECTDIFF",
+ "PPC_RELOC_HA16_SECTDIFF",
+ "PPC_RELOC_JBSR",
+ "PPC_RELOC_LO14_SECTDIFF",
+ "PPC_RELOC_LOCAL_SECTDIFF" };
+
+ res = Table[r_type];
+ break;
+ }
+ case Triple::UnknownArch:
+ res = "Unknown";
+ break;
+ }
+ Result.append(res.begin(), res.end());
return object_error::success;
}
error_code MachOObjectFile::getRelocationAdditionalInfo(DataRefImpl Rel,
@@ -611,11 +888,356 @@ error_code MachOObjectFile::getRelocationAdditionalInfo(DataRefImpl Rel,
}
return object_error::success;
}
+
+// Helper to advance a section or symbol iterator multiple increments at a time.
+template<class T>
+error_code advance(T &it, size_t Val) {
+ error_code ec;
+ while (Val--) {
+ it.increment(ec);
+ }
+ return ec;
+}
+
+template<class T>
+void advanceTo(T &it, size_t Val) {
+ if (error_code ec = advance(it, Val))
+ report_fatal_error(ec.message());
+}
+
+void MachOObjectFile::printRelocationTargetName(
+ InMemoryStruct<macho::RelocationEntry>& RE,
+ raw_string_ostream &fmt) const {
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+
+ // Target of a scattered relocation is an address. In the interest of
+ // generating pretty output, scan through the symbol table looking for a
+ // symbol that aligns with that address. If we find one, print it.
+ // Otherwise, we just print the hex address of the target.
+ if (isScattered) {
+ uint32_t Val = RE->Word1;
+
+ error_code ec;
+ for (symbol_iterator SI = begin_symbols(), SE = end_symbols(); SI != SE;
+ SI.increment(ec)) {
+ if (ec) report_fatal_error(ec.message());
+
+ uint64_t Addr;
+ StringRef Name;
+
+ if ((ec = SI->getAddress(Addr)))
+ report_fatal_error(ec.message());
+ if (Addr != Val) continue;
+ if ((ec = SI->getName(Name)))
+ report_fatal_error(ec.message());
+ fmt << Name;
+ return;
+ }
+
+ // If we couldn't find a symbol that this relocation refers to, try
+ // to find a section beginning instead.
+ for (section_iterator SI = begin_sections(), SE = end_sections(); SI != SE;
+ SI.increment(ec)) {
+ if (ec) report_fatal_error(ec.message());
+
+ uint64_t Addr;
+ StringRef Name;
+
+ if ((ec = SI->getAddress(Addr)))
+ report_fatal_error(ec.message());
+ if (Addr != Val) continue;
+ if ((ec = SI->getName(Name)))
+ report_fatal_error(ec.message());
+ fmt << Name;
+ return;
+ }
+
+ fmt << format("0x%x", Val);
+ return;
+ }
+
+ StringRef S;
+ bool isExtern = (RE->Word1 >> 27) & 1;
+ uint32_t Val = RE->Word1 & 0xFFFFFF;
+
+ if (isExtern) {
+ symbol_iterator SI = begin_symbols();
+ advanceTo(SI, Val);
+ SI->getName(S);
+ } else {
+ section_iterator SI = begin_sections();
+ advanceTo(SI, Val);
+ SI->getName(S);
+ }
+
+ fmt << S;
+}
+
error_code MachOObjectFile::getRelocationValueString(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const {
+ InMemoryStruct<macho::RelocationEntry> RE;
+ getRelocation(Rel, RE);
+
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+
+ std::string fmtbuf;
+ raw_string_ostream fmt(fmtbuf);
+
+ unsigned Type;
+ if (isScattered)
+ Type = (RE->Word0 >> 24) & 0xF;
+ else
+ Type = (RE->Word1 >> 28) & 0xF;
+
+ bool isPCRel;
+ if (isScattered)
+ isPCRel = ((RE->Word0 >> 30) & 1);
+ else
+ isPCRel = ((RE->Word1 >> 24) & 1);
+
+ // Determine any addends that should be displayed with the relocation.
+ // These require decoding the relocation type, which is triple-specific.
+
+ // X86_64 has entirely custom relocation types.
+ if (Arch == Triple::x86_64) {
+ bool isPCRel = ((RE->Word1 >> 24) & 1);
+
+ switch (Type) {
+ case macho::RIT_X86_64_GOTLoad: // X86_64_RELOC_GOT_LOAD
+ case macho::RIT_X86_64_GOT: { // X86_64_RELOC_GOT
+ printRelocationTargetName(RE, fmt);
+ fmt << "@GOT";
+ if (isPCRel) fmt << "PCREL";
+ break;
+ }
+ case macho::RIT_X86_64_Subtractor: { // X86_64_RELOC_SUBTRACTOR
+ InMemoryStruct<macho::RelocationEntry> RENext;
+ DataRefImpl RelNext = Rel;
+ RelNext.d.a++;
+ getRelocation(RelNext, RENext);
+
+ // X86_64_SUBTRACTOR must be followed by a relocation of type
+ // X86_64_RELOC_UNSIGNED.
+ // NOTE: Scattered relocations don't exist on x86_64.
+ unsigned RType = (RENext->Word1 >> 28) & 0xF;
+ if (RType != 0)
+ report_fatal_error("Expected X86_64_RELOC_UNSIGNED after "
+ "X86_64_RELOC_SUBTRACTOR.");
+
+ // The X86_64_RELOC_UNSIGNED contains the minuend symbol,
+ // X86_64_SUBTRACTOR contains to the subtrahend.
+ printRelocationTargetName(RENext, fmt);
+ fmt << "-";
+ printRelocationTargetName(RE, fmt);
+ }
+ case macho::RIT_X86_64_TLV:
+ printRelocationTargetName(RE, fmt);
+ fmt << "@TLV";
+ if (isPCRel) fmt << "P";
+ break;
+ case macho::RIT_X86_64_Signed1: // X86_64_RELOC_SIGNED1
+ printRelocationTargetName(RE, fmt);
+ fmt << "-1";
+ break;
+ case macho::RIT_X86_64_Signed2: // X86_64_RELOC_SIGNED2
+ printRelocationTargetName(RE, fmt);
+ fmt << "-2";
+ break;
+ case macho::RIT_X86_64_Signed4: // X86_64_RELOC_SIGNED4
+ printRelocationTargetName(RE, fmt);
+ fmt << "-4";
+ break;
+ default:
+ printRelocationTargetName(RE, fmt);
+ break;
+ }
+ // X86 and ARM share some relocation types in common.
+ } else if (Arch == Triple::x86 || Arch == Triple::arm) {
+ // Generic relocation types...
+ switch (Type) {
+ case macho::RIT_Pair: // GENERIC_RELOC_PAIR - prints no info
+ return object_error::success;
+ case macho::RIT_Difference: { // GENERIC_RELOC_SECTDIFF
+ InMemoryStruct<macho::RelocationEntry> RENext;
+ DataRefImpl RelNext = Rel;
+ RelNext.d.a++;
+ getRelocation(RelNext, RENext);
+
+ // X86 sect diff's must be followed by a relocation of type
+ // GENERIC_RELOC_PAIR.
+ bool isNextScattered = (Arch != Triple::x86_64) &&
+ (RENext->Word0 & macho::RF_Scattered);
+ unsigned RType;
+ if (isNextScattered)
+ RType = (RENext->Word0 >> 24) & 0xF;
+ else
+ RType = (RENext->Word1 >> 28) & 0xF;
+ if (RType != 1)
+ report_fatal_error("Expected GENERIC_RELOC_PAIR after "
+ "GENERIC_RELOC_SECTDIFF.");
+
+ printRelocationTargetName(RE, fmt);
+ fmt << "-";
+ printRelocationTargetName(RENext, fmt);
+ break;
+ }
+ }
+
+ if (Arch == Triple::x86) {
+ // All X86 relocations that need special printing were already
+ // handled in the generic code.
+ switch (Type) {
+ case macho::RIT_Generic_LocalDifference:{// GENERIC_RELOC_LOCAL_SECTDIFF
+ InMemoryStruct<macho::RelocationEntry> RENext;
+ DataRefImpl RelNext = Rel;
+ RelNext.d.a++;
+ getRelocation(RelNext, RENext);
+
+ // X86 sect diff's must be followed by a relocation of type
+ // GENERIC_RELOC_PAIR.
+ bool isNextScattered = (Arch != Triple::x86_64) &&
+ (RENext->Word0 & macho::RF_Scattered);
+ unsigned RType;
+ if (isNextScattered)
+ RType = (RENext->Word0 >> 24) & 0xF;
+ else
+ RType = (RENext->Word1 >> 28) & 0xF;
+ if (RType != 1)
+ report_fatal_error("Expected GENERIC_RELOC_PAIR after "
+ "GENERIC_RELOC_LOCAL_SECTDIFF.");
+
+ printRelocationTargetName(RE, fmt);
+ fmt << "-";
+ printRelocationTargetName(RENext, fmt);
+ break;
+ }
+ case macho::RIT_Generic_TLV: {
+ printRelocationTargetName(RE, fmt);
+ fmt << "@TLV";
+ if (isPCRel) fmt << "P";
+ break;
+ }
+ default:
+ printRelocationTargetName(RE, fmt);
+ }
+ } else { // ARM-specific relocations
+ switch (Type) {
+ case macho::RIT_ARM_Half: // ARM_RELOC_HALF
+ case macho::RIT_ARM_HalfDifference: { // ARM_RELOC_HALF_SECTDIFF
+ // Half relocations steal a bit from the length field to encode
+ // whether this is an upper16 or a lower16 relocation.
+ bool isUpper;
+ if (isScattered)
+ isUpper = (RE->Word0 >> 28) & 1;
+ else
+ isUpper = (RE->Word1 >> 25) & 1;
+
+ if (isUpper)
+ fmt << ":upper16:(";
+ else
+ fmt << ":lower16:(";
+ printRelocationTargetName(RE, fmt);
+
+ InMemoryStruct<macho::RelocationEntry> RENext;
+ DataRefImpl RelNext = Rel;
+ RelNext.d.a++;
+ getRelocation(RelNext, RENext);
+
+ // ARM half relocs must be followed by a relocation of type
+ // ARM_RELOC_PAIR.
+ bool isNextScattered = (Arch != Triple::x86_64) &&
+ (RENext->Word0 & macho::RF_Scattered);
+ unsigned RType;
+ if (isNextScattered)
+ RType = (RENext->Word0 >> 24) & 0xF;
+ else
+ RType = (RENext->Word1 >> 28) & 0xF;
+
+ if (RType != 1)
+ report_fatal_error("Expected ARM_RELOC_PAIR after "
+ "GENERIC_RELOC_HALF");
+
+ // NOTE: The half of the target virtual address is stashed in the
+ // address field of the secondary relocation, but we can't reverse
+ // engineer the constant offset from it without decoding the movw/movt
+ // instruction to find the other half in its immediate field.
+
+ // ARM_RELOC_HALF_SECTDIFF encodes the second section in the
+ // symbol/section pointer of the follow-on relocation.
+ if (Type == macho::RIT_ARM_HalfDifference) {
+ fmt << "-";
+ printRelocationTargetName(RENext, fmt);
+ }
+
+ fmt << ")";
+ break;
+ }
+ default: {
+ printRelocationTargetName(RE, fmt);
+ }
+ }
+ }
+ } else
+ printRelocationTargetName(RE, fmt);
+
+ fmt.flush();
+ Result.append(fmtbuf.begin(), fmtbuf.end());
+ return object_error::success;
+}
+
+error_code MachOObjectFile::getRelocationHidden(DataRefImpl Rel,
+ bool &Result) const {
+ InMemoryStruct<macho::RelocationEntry> RE;
+ getRelocation(Rel, RE);
+
+ unsigned Arch = getArch();
+ bool isScattered = (Arch != Triple::x86_64) &&
+ (RE->Word0 & macho::RF_Scattered);
+ unsigned Type;
+ if (isScattered)
+ Type = (RE->Word0 >> 24) & 0xF;
+ else
+ Type = (RE->Word1 >> 28) & 0xF;
+
+ Result = false;
+
+ // On arches that use the generic relocations, GENERIC_RELOC_PAIR
+ // is always hidden.
+ if (Arch == Triple::x86 || Arch == Triple::arm) {
+ if (Type == macho::RIT_Pair) Result = true;
+ } else if (Arch == Triple::x86_64) {
+ // On x86_64, X86_64_RELOC_UNSIGNED is hidden only when it follows
+ // an X864_64_RELOC_SUBTRACTOR.
+ if (Type == macho::RIT_X86_64_Unsigned && Rel.d.a > 0) {
+ DataRefImpl RelPrev = Rel;
+ RelPrev.d.a--;
+ InMemoryStruct<macho::RelocationEntry> REPrev;
+ getRelocation(RelPrev, REPrev);
+
+ unsigned PrevType = (REPrev->Word1 >> 28) & 0xF;
+
+ if (PrevType == macho::RIT_X86_64_Subtractor) Result = true;
+ }
+ }
+
return object_error::success;
}
+error_code MachOObjectFile::getLibraryNext(DataRefImpl LibData,
+ LibraryRef &Res) const {
+ report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
+}
+
+error_code MachOObjectFile::getLibraryPath(DataRefImpl LibData,
+ StringRef &Res) const {
+ report_fatal_error("Needed libraries unimplemented in MachOObjectFile");
+}
+
+
/*===-- Miscellaneous -----------------------------------------------------===*/
uint8_t MachOObjectFile::getBytesInAddress() const {
diff --git a/contrib/llvm/lib/Object/Object.cpp b/contrib/llvm/lib/Object/Object.cpp
index 2ea8db9..f061ea7 100644
--- a/contrib/llvm/lib/Object/Object.cpp
+++ b/contrib/llvm/lib/Object/Object.cpp
@@ -18,6 +18,7 @@
using namespace llvm;
using namespace object;
+// ObjectFile creation
LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf) {
return wrap(ObjectFile::createObjectFile(unwrap(MemBuf)));
}
@@ -26,6 +27,7 @@ void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile) {
delete unwrap(ObjectFile);
}
+// ObjectFile Section iterators
LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef ObjectFile) {
section_iterator SI = unwrap(ObjectFile)->begin_sections();
return wrap(new section_iterator(SI));
@@ -46,6 +48,34 @@ void LLVMMoveToNextSection(LLVMSectionIteratorRef SI) {
if (ec) report_fatal_error("LLVMMoveToNextSection failed: " + ec.message());
}
+void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect,
+ LLVMSymbolIteratorRef Sym) {
+ if (error_code ec = (*unwrap(Sym))->getSection(*unwrap(Sect)))
+ report_fatal_error(ec.message());
+}
+
+// ObjectFile Symbol iterators
+LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef ObjectFile) {
+ symbol_iterator SI = unwrap(ObjectFile)->begin_symbols();
+ return wrap(new symbol_iterator(SI));
+}
+
+void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI) {
+ delete unwrap(SI);
+}
+
+LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef ObjectFile,
+ LLVMSymbolIteratorRef SI) {
+ return (*unwrap(SI) == unwrap(ObjectFile)->end_symbols()) ? 1 : 0;
+}
+
+void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI) {
+ error_code ec;
+ unwrap(SI)->increment(ec);
+ if (ec) report_fatal_error("LLVMMoveToNextSymbol failed: " + ec.message());
+}
+
+// SectionRef accessors
const char *LLVMGetSectionName(LLVMSectionIteratorRef SI) {
StringRef ret;
if (error_code ec = (*unwrap(SI))->getName(ret))
@@ -66,3 +96,123 @@ const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI) {
report_fatal_error(ec.message());
return ret.data();
}
+
+uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(SI))->getAddress(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI,
+ LLVMSymbolIteratorRef Sym) {
+ bool ret;
+ if (error_code ec = (*unwrap(SI))->containsSymbol(**unwrap(Sym), ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+// Section Relocation iterators
+LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section) {
+ relocation_iterator SI = (*unwrap(Section))->begin_relocations();
+ return wrap(new relocation_iterator(SI));
+}
+
+void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef SI) {
+ delete unwrap(SI);
+}
+
+LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section,
+ LLVMRelocationIteratorRef SI) {
+ return (*unwrap(SI) == (*unwrap(Section))->end_relocations()) ? 1 : 0;
+}
+
+void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef SI) {
+ error_code ec;
+ unwrap(SI)->increment(ec);
+ if (ec) report_fatal_error("LLVMMoveToNextRelocation failed: " +
+ ec.message());
+}
+
+
+// SymbolRef accessors
+const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI) {
+ StringRef ret;
+ if (error_code ec = (*unwrap(SI))->getName(ret))
+ report_fatal_error(ec.message());
+ return ret.data();
+}
+
+uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(SI))->getAddress(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+uint64_t LLVMGetSymbolFileOffset(LLVMSymbolIteratorRef SI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(SI))->getFileOffset(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(SI))->getSize(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+// RelocationRef accessors
+uint64_t LLVMGetRelocationAddress(LLVMRelocationIteratorRef RI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(RI))->getAddress(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(RI))->getOffset(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI) {
+ SymbolRef ret;
+ if (error_code ec = (*unwrap(RI))->getSymbol(ret))
+ report_fatal_error(ec.message());
+
+ return wrap(new symbol_iterator(ret));
+}
+
+uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI) {
+ uint64_t ret;
+ if (error_code ec = (*unwrap(RI))->getType(ret))
+ report_fatal_error(ec.message());
+ return ret;
+}
+
+// NOTE: Caller takes ownership of returned string.
+const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI) {
+ SmallVector<char, 0> ret;
+ if (error_code ec = (*unwrap(RI))->getTypeName(ret))
+ report_fatal_error(ec.message());
+
+ char *str = static_cast<char*>(malloc(ret.size()));
+ std::copy(ret.begin(), ret.end(), str);
+ return str;
+}
+
+// NOTE: Caller takes ownership of returned string.
+const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI) {
+ SmallVector<char, 0> ret;
+ if (error_code ec = (*unwrap(RI))->getValueString(ret))
+ report_fatal_error(ec.message());
+
+ char *str = static_cast<char*>(malloc(ret.size()));
+ std::copy(ret.begin(), ret.end(), str);
+ return str;
+}
+
diff --git a/contrib/llvm/lib/Object/ObjectFile.cpp b/contrib/llvm/lib/Object/ObjectFile.cpp
index 69d8ed0..b14df9a 100644
--- a/contrib/llvm/lib/Object/ObjectFile.cpp
+++ b/contrib/llvm/lib/Object/ObjectFile.cpp
@@ -21,6 +21,8 @@
using namespace llvm;
using namespace object;
+void ObjectFile::anchor() { }
+
ObjectFile::ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec)
: Binary(Type, source) {
}
@@ -56,7 +58,7 @@ ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) {
ObjectFile *ObjectFile::createObjectFile(StringRef ObjectPath) {
OwningPtr<MemoryBuffer> File;
- if (error_code ec = MemoryBuffer::getFile(ObjectPath, File))
+ if (MemoryBuffer::getFile(ObjectPath, File))
return NULL;
return createObjectFile(File.take());
}
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index f238894..409d4fb 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -14,8 +14,9 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <limits.h>
@@ -1150,9 +1151,6 @@ APFloat::roundAwayFromZero(roundingMode rounding_mode,
assert(lost_fraction != lfExactlyZero);
switch (rounding_mode) {
- default:
- llvm_unreachable(0);
-
case rmNearestTiesToAway:
return lost_fraction == lfExactlyHalf || lost_fraction == lfMoreThanHalf;
@@ -1175,6 +1173,7 @@ APFloat::roundAwayFromZero(roundingMode rounding_mode,
case rmTowardNegative:
return sign == true;
}
+ llvm_unreachable("Invalid rounding mode found");
}
APFloat::opStatus
@@ -1854,20 +1853,33 @@ APFloat::convert(const fltSemantics &toSemantics,
lostFraction lostFraction;
unsigned int newPartCount, oldPartCount;
opStatus fs;
+ int shift;
+ const fltSemantics &fromSemantics = *semantics;
- assertArithmeticOK(*semantics);
+ assertArithmeticOK(fromSemantics);
assertArithmeticOK(toSemantics);
lostFraction = lfExactlyZero;
newPartCount = partCountForBits(toSemantics.precision + 1);
oldPartCount = partCount();
+ shift = toSemantics.precision - fromSemantics.precision;
- /* Handle storage complications. If our new form is wider,
- re-allocate our bit pattern into wider storage. If it is
- narrower, we ignore the excess parts, but if narrowing to a
- single part we need to free the old storage.
- Be careful not to reference significandParts for zeroes
- and infinities, since it aborts. */
+ bool X86SpecialNan = false;
+ if (&fromSemantics == &APFloat::x87DoubleExtended &&
+ &toSemantics != &APFloat::x87DoubleExtended && category == fcNaN &&
+ (!(*significandParts() & 0x8000000000000000ULL) ||
+ !(*significandParts() & 0x4000000000000000ULL))) {
+ // x86 has some unusual NaNs which cannot be represented in any other
+ // format; note them here.
+ X86SpecialNan = true;
+ }
+
+ // If this is a truncation, perform the shift before we narrow the storage.
+ if (shift < 0 && (category==fcNormal || category==fcNaN))
+ lostFraction = shiftRight(significandParts(), oldPartCount, -shift);
+
+ // Fix the storage so it can hold to new value.
if (newPartCount > oldPartCount) {
+ // The new type requires more storage; make it available.
integerPart *newParts;
newParts = new integerPart[newPartCount];
APInt::tcSet(newParts, 0, newPartCount);
@@ -1875,61 +1887,36 @@ APFloat::convert(const fltSemantics &toSemantics,
APInt::tcAssign(newParts, significandParts(), oldPartCount);
freeSignificand();
significand.parts = newParts;
- } else if (newPartCount < oldPartCount) {
- /* Capture any lost fraction through truncation of parts so we get
- correct rounding whilst normalizing. */
- if (category==fcNormal)
- lostFraction = lostFractionThroughTruncation
- (significandParts(), oldPartCount, toSemantics.precision);
- if (newPartCount == 1) {
- integerPart newPart = 0;
- if (category==fcNormal || category==fcNaN)
- newPart = significandParts()[0];
- freeSignificand();
- significand.part = newPart;
- }
+ } else if (newPartCount == 1 && oldPartCount != 1) {
+ // Switch to built-in storage for a single part.
+ integerPart newPart = 0;
+ if (category==fcNormal || category==fcNaN)
+ newPart = significandParts()[0];
+ freeSignificand();
+ significand.part = newPart;
}
+ // Now that we have the right storage, switch the semantics.
+ semantics = &toSemantics;
+
+ // If this is an extension, perform the shift now that the storage is
+ // available.
+ if (shift > 0 && (category==fcNormal || category==fcNaN))
+ APInt::tcShiftLeft(significandParts(), newPartCount, shift);
+
if (category == fcNormal) {
- /* Re-interpret our bit-pattern. */
- exponent += toSemantics.precision - semantics->precision;
- semantics = &toSemantics;
fs = normalize(rounding_mode, lostFraction);
*losesInfo = (fs != opOK);
} else if (category == fcNaN) {
- int shift = toSemantics.precision - semantics->precision;
- // Do this now so significandParts gets the right answer
- const fltSemantics *oldSemantics = semantics;
- semantics = &toSemantics;
- *losesInfo = false;
- // No normalization here, just truncate
- if (shift>0)
- APInt::tcShiftLeft(significandParts(), newPartCount, shift);
- else if (shift < 0) {
- unsigned ushift = -shift;
- // Figure out if we are losing information. This happens
- // if are shifting out something other than 0s, or if the x87 long
- // double input did not have its integer bit set (pseudo-NaN), or if the
- // x87 long double input did not have its QNan bit set (because the x87
- // hardware sets this bit when converting a lower-precision NaN to
- // x87 long double).
- if (APInt::tcLSB(significandParts(), newPartCount) < ushift)
- *losesInfo = true;
- if (oldSemantics == &APFloat::x87DoubleExtended &&
- (!(*significandParts() & 0x8000000000000000ULL) ||
- !(*significandParts() & 0x4000000000000000ULL)))
- *losesInfo = true;
- APInt::tcShiftRight(significandParts(), newPartCount, ushift);
- }
+ *losesInfo = lostFraction != lfExactlyZero || X86SpecialNan;
// gcc forces the Quiet bit on, which means (float)(double)(float_sNan)
// does not give you back the same bits. This is dubious, and we
// don't currently do it. You're really supposed to get
// an invalid operation signal at runtime, but nobody does that.
fs = opOK;
} else {
- semantics = &toSemantics;
- fs = opOK;
*losesInfo = false;
+ fs = opOK;
}
return fs;
@@ -2695,21 +2682,19 @@ APFloat::convertNormalToHexString(char *dst, unsigned int hexDigits,
return writeSignedDecimal (dst, exponent);
}
-// For good performance it is desirable for different APFloats
-// to produce different integers.
-uint32_t
-APFloat::getHashValue() const
-{
- if (category==fcZero) return sign<<8 | semantics->precision ;
- else if (category==fcInfinity) return sign<<9 | semantics->precision;
- else if (category==fcNaN) return 1<<10 | semantics->precision;
- else {
- uint32_t hash = sign<<11 | semantics->precision | exponent<<12;
- const integerPart* p = significandParts();
- for (int i=partCount(); i>0; i--, p++)
- hash ^= ((uint32_t)*p) ^ (uint32_t)((*p)>>32);
- return hash;
- }
+hash_code llvm::hash_value(const APFloat &Arg) {
+ if (Arg.category != APFloat::fcNormal)
+ return hash_combine((uint8_t)Arg.category,
+ // NaN has no sign, fix it at zero.
+ Arg.isNaN() ? (uint8_t)0 : (uint8_t)Arg.sign,
+ Arg.semantics->precision);
+
+ // Normal floats need their exponent and significand hashed.
+ return hash_combine((uint8_t)Arg.category, (uint8_t)Arg.sign,
+ Arg.semantics->precision, Arg.exponent,
+ hash_combine_range(
+ Arg.significandParts(),
+ Arg.significandParts() + Arg.partCount()));
}
// Conversion from APFloat to/from host float/double. It may eventually be
@@ -3354,7 +3339,7 @@ namespace {
// Rounding down is just a truncation, except we also want to drop
// trailing zeros from the new result.
if (buffer[FirstSignificant - 1] < '5') {
- while (buffer[FirstSignificant] == '0')
+ while (FirstSignificant < N && buffer[FirstSignificant] == '0')
FirstSignificant++;
exp += FirstSignificant;
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 3774c52..9b81fe7 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -14,9 +14,10 @@
#define DEBUG_TYPE "apint"
#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -456,16 +457,6 @@ APInt APInt::XorSlowCase(const APInt& RHS) const {
return APInt(val, getBitWidth()).clearUnusedBits();
}
-bool APInt::operator !() const {
- if (isSingleWord())
- return !VAL;
-
- for (unsigned i = 0; i < getNumWords(); ++i)
- if (pVal[i])
- return false;
- return true;
-}
-
APInt APInt::operator*(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
if (isSingleWord())
@@ -493,12 +484,6 @@ APInt APInt::operator-(const APInt& RHS) const {
return Result.clearUnusedBits();
}
-bool APInt::operator[](unsigned bitPosition) const {
- assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
- return (maskBit(bitPosition) &
- (isSingleWord() ? VAL : pVal[whichWord(bitPosition)])) != 0;
-}
-
bool APInt::EqualSlowCase(const APInt& RHS) const {
// Get some facts about the number of bits used in the two operands.
unsigned n1 = getActiveBits();
@@ -675,93 +660,11 @@ unsigned APInt::getBitsNeeded(StringRef str, uint8_t radix) {
}
}
-// From http://www.burtleburtle.net, byBob Jenkins.
-// When targeting x86, both GCC and LLVM seem to recognize this as a
-// rotate instruction.
-#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
-
-// From http://www.burtleburtle.net, by Bob Jenkins.
-#define mix(a,b,c) \
- { \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c,16); c += b; \
- b -= a; b ^= rot(a,19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
- }
-
-// From http://www.burtleburtle.net, by Bob Jenkins.
-#define final(a,b,c) \
- { \
- c ^= b; c -= rot(b,14); \
- a ^= c; a -= rot(c,11); \
- b ^= a; b -= rot(a,25); \
- c ^= b; c -= rot(b,16); \
- a ^= c; a -= rot(c,4); \
- b ^= a; b -= rot(a,14); \
- c ^= b; c -= rot(b,24); \
- }
-
-// hashword() was adapted from http://www.burtleburtle.net, by Bob
-// Jenkins. k is a pointer to an array of uint32_t values; length is
-// the length of the key, in 32-bit chunks. This version only handles
-// keys that are a multiple of 32 bits in size.
-static inline uint32_t hashword(const uint64_t *k64, size_t length)
-{
- const uint32_t *k = reinterpret_cast<const uint32_t *>(k64);
- uint32_t a,b,c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + (((uint32_t)length)<<2);
-
- /*------------------------------------------------- handle most of the key */
- while (length > 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 3;
- k += 3;
- }
-
- /*------------------------------------------- handle the last 3 uint32_t's */
- switch (length) { /* all the case statements fall through */
- case 3 : c+=k[2];
- case 2 : b+=k[1];
- case 1 : a+=k[0];
- final(a,b,c);
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*------------------------------------------------------ report the result */
- return c;
-}
-
-// hashword8() was adapted from http://www.burtleburtle.net, by Bob
-// Jenkins. This computes a 32-bit hash from one 64-bit word. When
-// targeting x86 (32 or 64 bit), both LLVM and GCC compile this
-// function into about 35 instructions when inlined.
-static inline uint32_t hashword8(const uint64_t k64)
-{
- uint32_t a,b,c;
- a = b = c = 0xdeadbeef + 4;
- b += k64 >> 32;
- a += k64 & 0xffffffff;
- final(a,b,c);
- return c;
-}
-#undef final
-#undef mix
-#undef rot
+hash_code llvm::hash_value(const APInt &Arg) {
+ if (Arg.isSingleWord())
+ return hash_combine(Arg.VAL);
-uint64_t APInt::getHashValue() const {
- uint64_t hash;
- if (isSingleWord())
- hash = hashword8(VAL);
- else
- hash = hashword(pVal, getNumWords()*2);
- return hash;
+ return hash_combine_range(Arg.pVal, Arg.pVal + Arg.getNumWords());
}
/// HiBits - This function returns the high "numBits" bits of this APInt.
@@ -803,20 +706,9 @@ unsigned APInt::countLeadingZerosSlowCase() const {
return Count;
}
-static unsigned countLeadingOnes_64(uint64_t V, unsigned skip) {
- unsigned Count = 0;
- if (skip)
- V <<= skip;
- while (V && (V & (1ULL << 63))) {
- Count++;
- V <<= 1;
- }
- return Count;
-}
-
unsigned APInt::countLeadingOnes() const {
if (isSingleWord())
- return countLeadingOnes_64(VAL, APINT_BITS_PER_WORD - BitWidth);
+ return CountLeadingOnes_64(VAL << (APINT_BITS_PER_WORD - BitWidth));
unsigned highWordBits = BitWidth % APINT_BITS_PER_WORD;
unsigned shift;
@@ -827,13 +719,13 @@ unsigned APInt::countLeadingOnes() const {
shift = APINT_BITS_PER_WORD - highWordBits;
}
int i = getNumWords() - 1;
- unsigned Count = countLeadingOnes_64(pVal[i], shift);
+ unsigned Count = CountLeadingOnes_64(pVal[i] << shift);
if (Count == highWordBits) {
for (i--; i >= 0; --i) {
if (pVal[i] == -1ULL)
Count += APINT_BITS_PER_WORD;
else {
- Count += countLeadingOnes_64(pVal[i], 0);
+ Count += CountLeadingOnes_64(pVal[i]);
break;
}
}
@@ -870,30 +762,43 @@ unsigned APInt::countPopulationSlowCase() const {
return Count;
}
+/// Perform a logical right-shift from Src to Dst, which must be equal or
+/// non-overlapping, of Words words, by Shift, which must be less than 64.
+static void lshrNear(uint64_t *Dst, uint64_t *Src, unsigned Words,
+ unsigned Shift) {
+ uint64_t Carry = 0;
+ for (int I = Words - 1; I >= 0; --I) {
+ uint64_t Tmp = Src[I];
+ Dst[I] = (Tmp >> Shift) | Carry;
+ Carry = Tmp << (64 - Shift);
+ }
+}
+
APInt APInt::byteSwap() const {
assert(BitWidth >= 16 && BitWidth % 16 == 0 && "Cannot byteswap!");
if (BitWidth == 16)
return APInt(BitWidth, ByteSwap_16(uint16_t(VAL)));
- else if (BitWidth == 32)
+ if (BitWidth == 32)
return APInt(BitWidth, ByteSwap_32(unsigned(VAL)));
- else if (BitWidth == 48) {
+ if (BitWidth == 48) {
unsigned Tmp1 = unsigned(VAL >> 16);
Tmp1 = ByteSwap_32(Tmp1);
uint16_t Tmp2 = uint16_t(VAL);
Tmp2 = ByteSwap_16(Tmp2);
return APInt(BitWidth, (uint64_t(Tmp2) << 32) | Tmp1);
- } else if (BitWidth == 64)
+ }
+ if (BitWidth == 64)
return APInt(BitWidth, ByteSwap_64(VAL));
- else {
- APInt Result(BitWidth, 0);
- char *pByte = (char*)Result.pVal;
- for (unsigned i = 0; i < BitWidth / APINT_WORD_SIZE / 2; ++i) {
- char Tmp = pByte[i];
- pByte[i] = pByte[BitWidth / APINT_WORD_SIZE - 1 - i];
- pByte[BitWidth / APINT_WORD_SIZE - i - 1] = Tmp;
- }
- return Result;
+
+ APInt Result(getNumWords() * APINT_BITS_PER_WORD, 0);
+ for (unsigned I = 0, N = getNumWords(); I != N; ++I)
+ Result.pVal[I] = ByteSwap_64(pVal[N - I - 1]);
+ if (Result.BitWidth != BitWidth) {
+ lshrNear(Result.pVal, Result.pVal, getNumWords(),
+ Result.BitWidth - BitWidth);
+ Result.BitWidth = BitWidth;
}
+ return Result;
}
APInt llvm::APIntOps::GreatestCommonDivisor(const APInt& API1,
@@ -1110,6 +1015,18 @@ APInt APInt::sextOrTrunc(unsigned width) const {
return *this;
}
+APInt APInt::zextOrSelf(unsigned width) const {
+ if (BitWidth < width)
+ return zext(width);
+ return *this;
+}
+
+APInt APInt::sextOrSelf(unsigned width) const {
+ if (BitWidth < width)
+ return sext(width);
+ return *this;
+}
+
/// Arithmetic right-shift this APInt by shiftAmt.
/// @brief Arithmetic right-shift function.
APInt APInt::ashr(const APInt &shiftAmt) const {
@@ -1209,7 +1126,7 @@ APInt APInt::lshr(const APInt &shiftAmt) const {
/// @brief Logical right-shift function.
APInt APInt::lshr(unsigned shiftAmt) const {
if (isSingleWord()) {
- if (shiftAmt == BitWidth)
+ if (shiftAmt >= BitWidth)
return APInt(BitWidth, 0);
else
return APInt(BitWidth, this->VAL >> shiftAmt);
@@ -1232,11 +1149,7 @@ APInt APInt::lshr(unsigned shiftAmt) const {
// If we are shifting less than a word, compute the shift with a simple carry
if (shiftAmt < APINT_BITS_PER_WORD) {
- uint64_t carry = 0;
- for (int i = getNumWords()-1; i >= 0; --i) {
- val[i] = (pVal[i] >> shiftAmt) | carry;
- carry = pVal[i] << (APINT_BITS_PER_WORD - shiftAmt);
- }
+ lshrNear(val, pVal, getNumWords(), shiftAmt);
return APInt(val, BitWidth).clearUnusedBits();
}
@@ -1329,14 +1242,10 @@ APInt APInt::rotl(const APInt &rotateAmt) const {
}
APInt APInt::rotl(unsigned rotateAmt) const {
+ rotateAmt %= BitWidth;
if (rotateAmt == 0)
return *this;
- // Don't get too fancy, just use existing shift/or facilities
- APInt hi(*this);
- APInt lo(*this);
- hi.shl(rotateAmt);
- lo.lshr(BitWidth - rotateAmt);
- return hi | lo;
+ return shl(rotateAmt) | lshr(BitWidth - rotateAmt);
}
APInt APInt::rotr(const APInt &rotateAmt) const {
@@ -1344,14 +1253,10 @@ APInt APInt::rotr(const APInt &rotateAmt) const {
}
APInt APInt::rotr(unsigned rotateAmt) const {
+ rotateAmt %= BitWidth;
if (rotateAmt == 0)
return *this;
- // Don't get too fancy, just use existing shift/or facilities
- APInt hi(*this);
- APInt lo(*this);
- lo.lshr(rotateAmt);
- hi.shl(BitWidth - rotateAmt);
- return hi | lo;
+ return lshr(rotateAmt) | shl(BitWidth - rotateAmt);
}
// Square Root - this method computes and returns the square root of "this".
@@ -1431,15 +1336,11 @@ APInt APInt::sqrt() const {
APInt nextSquare((x_old + 1) * (x_old +1));
if (this->ult(square))
return x_old;
- else if (this->ule(nextSquare)) {
- APInt midpoint((nextSquare - square).udiv(two));
- APInt offset(*this - square);
- if (offset.ult(midpoint))
- return x_old;
- else
- return x_old + 1;
- } else
- llvm_unreachable("Error in APInt::sqrt computation");
+ assert(this->ule(nextSquare) && "Error in APInt::sqrt computation");
+ APInt midpoint((nextSquare - square).udiv(two));
+ APInt offset(*this - square);
+ if (offset.ult(midpoint))
+ return x_old;
return x_old + 1;
}
@@ -2184,7 +2085,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
bool Signed, bool formatAsCLiteral) const {
assert((Radix == 10 || Radix == 8 || Radix == 16 || Radix == 2 ||
Radix == 36) &&
- "Radix should be 2, 8, 10, or 16!");
+ "Radix should be 2, 8, 10, 16, or 36!");
const char *Prefix = "";
if (formatAsCLiteral) {
@@ -2197,9 +2098,13 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
case 8:
Prefix = "0";
break;
+ case 10:
+ break; // No prefix
case 16:
Prefix = "0x";
break;
+ default:
+ llvm_unreachable("Invalid radix!");
}
}
diff --git a/contrib/llvm/lib/Support/Allocator.cpp b/contrib/llvm/lib/Support/Allocator.cpp
index 215b0f2..b897830 100644
--- a/contrib/llvm/lib/Support/Allocator.cpp
+++ b/contrib/llvm/lib/Support/Allocator.cpp
@@ -22,8 +22,8 @@ namespace llvm {
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
- : SlabSize(size), SizeThreshold(threshold), Allocator(allocator),
- CurSlab(0), BytesAllocated(0) { }
+ : SlabSize(size), SizeThreshold(std::min(size, threshold)),
+ Allocator(allocator), CurSlab(0), BytesAllocated(0) { }
BumpPtrAllocator::~BumpPtrAllocator() {
DeallocateSlabs(CurSlab);
diff --git a/contrib/llvm/lib/Support/Atomic.cpp b/contrib/llvm/lib/Support/Atomic.cpp
index 94760cc..3001f6c 100644
--- a/contrib/llvm/lib/Support/Atomic.cpp
+++ b/contrib/llvm/lib/Support/Atomic.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Atomic.h"
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Support/BlockFrequency.cpp b/contrib/llvm/lib/Support/BlockFrequency.cpp
index a63bf83..84a993e 100644
--- a/contrib/llvm/lib/Support/BlockFrequency.cpp
+++ b/contrib/llvm/lib/Support/BlockFrequency.cpp
@@ -70,8 +70,13 @@ BlockFrequency &BlockFrequency::operator*=(const BranchProbability &Prob) {
assert(n <= d && "Probability must be less or equal to 1.");
- // If we can overflow use 96-bit operations.
- if (n > 0 && Frequency > UINT64_MAX / n) {
+ // Calculate Frequency * n.
+ uint64_t mulLo = (Frequency & UINT32_MAX) * n;
+ uint64_t mulHi = (Frequency >> 32) * n;
+ uint64_t mulRes = (mulHi << 32) + mulLo;
+
+ // If there was overflow use 96-bit operations.
+ if (mulHi > UINT32_MAX || mulRes < mulLo) {
// 96-bit value represented as W[1]:W[0].
uint64_t W[2];
@@ -82,8 +87,7 @@ BlockFrequency &BlockFrequency::operator*=(const BranchProbability &Prob) {
return *this;
}
- Frequency *= n;
- Frequency /= d;
+ Frequency = mulRes / d;
return *this;
}
diff --git a/contrib/llvm/lib/Support/BranchProbability.cpp b/contrib/llvm/lib/Support/BranchProbability.cpp
index 49d04ed..e8b83e5 100644
--- a/contrib/llvm/lib/Support/BranchProbability.cpp
+++ b/contrib/llvm/lib/Support/BranchProbability.cpp
@@ -13,24 +13,17 @@
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-BranchProbability::BranchProbability(uint32_t n, uint32_t d) {
- assert(d > 0 && "Denomiator cannot be 0!");
- assert(n <= d && "Probability cannot be bigger than 1!");
- N = n;
- D = d;
-}
-
void BranchProbability::print(raw_ostream &OS) const {
- OS << N << " / " << D << " = " << ((double)N / D);
+ OS << N << " / " << D << " = " << format("%g%%", ((double)N / D) * 100.0);
}
void BranchProbability::dump() const {
- print(dbgs());
- dbgs() << "\n";
+ dbgs() << *this << '\n';
}
namespace llvm {
diff --git a/contrib/llvm/lib/Support/CommandLine.cpp b/contrib/llvm/lib/Support/CommandLine.cpp
index 238adcc..e6fdf16 100644
--- a/contrib/llvm/lib/Support/CommandLine.cpp
+++ b/contrib/llvm/lib/Support/CommandLine.cpp
@@ -57,6 +57,9 @@ TEMPLATE_INSTANTIATION(class opt<char>);
TEMPLATE_INSTANTIATION(class opt<bool>);
} } // end namespace llvm::cl
+void GenericOptionValue::anchor() {}
+void OptionValue<boolOrDefault>::anchor() {}
+void OptionValue<std::string>::anchor() {}
void Option::anchor() {}
void basic_parser_impl::anchor() {}
void parser<bool>::anchor() {}
@@ -263,8 +266,8 @@ static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos,
/// and a null value (StringRef()). The later is accepted for arguments that
/// don't allow a value (-foo) the former is rejected (-foo=).
static inline bool ProvideOption(Option *Handler, StringRef ArgName,
- StringRef Value, int argc, char **argv,
- int &i) {
+ StringRef Value, int argc,
+ const char *const *argv, int &i) {
// Is this a multi-argument option?
unsigned NumAdditionalVals = Handler->getNumAdditionalVals();
@@ -289,12 +292,6 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
break;
case ValueOptional:
break;
-
- default:
- errs() << ProgramName
- << ": Bad ValueMask flag! CommandLine usage error:"
- << Handler->getValueExpectedFlag() << "\n";
- llvm_unreachable(0);
}
// If this isn't a multi-arg option, just run the handler.
@@ -498,10 +495,10 @@ void cl::ParseEnvironmentOptions(const char *progName, const char *envVar,
/// ExpandResponseFiles - Copy the contents of argv into newArgv,
/// substituting the contents of the response files for the arguments
/// of type @file.
-static void ExpandResponseFiles(unsigned argc, char** argv,
+static void ExpandResponseFiles(unsigned argc, const char*const* argv,
std::vector<char*>& newArgv) {
for (unsigned i = 1; i != argc; ++i) {
- char *arg = argv[i];
+ const char *arg = argv[i];
if (arg[0] == '@') {
sys::PathWithStatus respFile(++arg);
@@ -531,7 +528,7 @@ static void ExpandResponseFiles(unsigned argc, char** argv,
}
}
-void cl::ParseCommandLineOptions(int argc, char **argv,
+void cl::ParseCommandLineOptions(int argc, const char * const *argv,
const char *Overview, bool ReadResponseFiles) {
// Process all registered options.
SmallVector<Option*, 4> PositionalOpts;
@@ -885,7 +882,6 @@ bool Option::addOccurrence(unsigned pos, StringRef ArgName,
case OneOrMore:
case ZeroOrMore:
case ConsumeAfter: break;
- default: return error("bad num occurrences flag value!");
}
return handleOccurrence(pos, ArgName, Value);
@@ -1195,7 +1191,7 @@ printOptionNoValue(const Option &O, size_t GlobalWidth) const {
static int OptNameCompare(const void *LHS, const void *RHS) {
typedef std::pair<const char *, Option*> pair_ty;
- return strcmp(((pair_ty*)LHS)->first, ((pair_ty*)RHS)->first);
+ return strcmp(((const pair_ty*)LHS)->first, ((const pair_ty*)RHS)->first);
}
// Copy Options into a vector so we can sort them as we like.
@@ -1349,7 +1345,7 @@ class VersionPrinter {
public:
void print() {
raw_ostream &OS = outs();
- OS << "Low Level Virtual Machine (http://llvm.org/):\n"
+ OS << "LLVM (http://llvm.org/):\n"
<< " " << PACKAGE_NAME << " version " << PACKAGE_VERSION;
#ifdef LLVM_VERSION_INFO
OS << LLVM_VERSION_INFO;
@@ -1369,7 +1365,7 @@ public:
#if (ENABLE_TIMESTAMPS == 1)
<< " Built " << __DATE__ << " (" << __TIME__ << ").\n"
#endif
- << " Host: " << sys::getHostTriple() << '\n'
+ << " Default target: " << sys::getDefaultTargetTriple() << '\n'
<< " Host CPU: " << CPU << '\n';
}
void operator=(bool OptionWasSpecified) {
diff --git a/contrib/llvm/lib/Support/ConstantRange.cpp b/contrib/llvm/lib/Support/ConstantRange.cpp
index c29cb53..5206cf1 100644
--- a/contrib/llvm/lib/Support/ConstantRange.cpp
+++ b/contrib/llvm/lib/Support/ConstantRange.cpp
@@ -55,7 +55,7 @@ ConstantRange ConstantRange::makeICmpRegion(unsigned Pred,
uint32_t W = CR.getBitWidth();
switch (Pred) {
- default: assert(0 && "Invalid ICmp predicate to makeICmpRegion()");
+ default: llvm_unreachable("Invalid ICmp predicate to makeICmpRegion()");
case CmpInst::ICMP_EQ:
return CR;
case CmpInst::ICMP_NE:
@@ -161,8 +161,7 @@ APInt ConstantRange::getSetSize() const {
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isWrappedSet())
return APInt::getMaxValue(getBitWidth());
- else
- return getUpper() - 1;
+ return getUpper() - 1;
}
/// getUnsignedMin - Return the smallest unsigned value contained in the
@@ -171,8 +170,7 @@ APInt ConstantRange::getUnsignedMax() const {
APInt ConstantRange::getUnsignedMin() const {
if (isFullSet() || (isWrappedSet() && getUpper() != 0))
return APInt::getMinValue(getBitWidth());
- else
- return getLower();
+ return getLower();
}
/// getSignedMax - Return the largest signed value contained in the
@@ -183,14 +181,11 @@ APInt ConstantRange::getSignedMax() const {
if (!isWrappedSet()) {
if (getLower().sle(getUpper() - 1))
return getUpper() - 1;
- else
- return SignedMax;
- } else {
- if (getLower().isNegative() == getUpper().isNegative())
- return SignedMax;
- else
- return getUpper() - 1;
+ return SignedMax;
}
+ if (getLower().isNegative() == getUpper().isNegative())
+ return SignedMax;
+ return getUpper() - 1;
}
/// getSignedMin - Return the smallest signed value contained in the
@@ -201,18 +196,13 @@ APInt ConstantRange::getSignedMin() const {
if (!isWrappedSet()) {
if (getLower().sle(getUpper() - 1))
return getLower();
- else
+ return SignedMin;
+ }
+ if ((getUpper() - 1).slt(getLower())) {
+ if (getUpper() != SignedMin)
return SignedMin;
- } else {
- if ((getUpper() - 1).slt(getLower())) {
- if (getUpper() != SignedMin)
- return SignedMin;
- else
- return getLower();
- } else {
- return getLower();
- }
}
+ return getLower();
}
/// contains - Return true if the specified value is in the set.
@@ -223,8 +213,7 @@ bool ConstantRange::contains(const APInt &V) const {
if (!isWrappedSet())
return Lower.ule(V) && V.ult(Upper);
- else
- return Lower.ule(V) || V.ult(Upper);
+ return Lower.ule(V) || V.ult(Upper);
}
/// contains - Return true if the argument is a subset of this range.
@@ -284,15 +273,14 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return ConstantRange(CR.Lower, Upper);
return CR;
- } else {
- if (Upper.ult(CR.Upper))
- return *this;
+ }
+ if (Upper.ult(CR.Upper))
+ return *this;
- if (Lower.ult(CR.Upper))
- return ConstantRange(Lower, CR.Upper);
+ if (Lower.ult(CR.Upper))
+ return ConstantRange(Lower, CR.Upper);
- return ConstantRange(getBitWidth(), false);
- }
+ return ConstantRange(getBitWidth(), false);
}
if (isWrappedSet() && !CR.isWrappedSet()) {
@@ -305,9 +293,9 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (getSetSize().ult(CR.getSetSize()))
return *this;
- else
- return CR;
- } else if (CR.Lower.ult(Lower)) {
+ return CR;
+ }
+ if (CR.Lower.ult(Lower)) {
if (CR.Upper.ule(Lower))
return ConstantRange(getBitWidth(), false);
@@ -320,15 +308,15 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Lower.ult(Upper)) {
if (getSetSize().ult(CR.getSetSize()))
return *this;
- else
- return CR;
+ return CR;
}
if (CR.Lower.ult(Lower))
return ConstantRange(Lower, CR.Upper);
return CR;
- } else if (CR.Upper.ult(Lower)) {
+ }
+ if (CR.Upper.ult(Lower)) {
if (CR.Lower.ult(Lower))
return *this;
@@ -336,8 +324,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
}
if (getSetSize().ult(CR.getSetSize()))
return *this;
- else
- return CR;
+ return CR;
}
@@ -362,8 +349,7 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
APInt d1 = CR.Lower - Upper, d2 = Lower - CR.Upper;
if (d1.ult(d2))
return ConstantRange(Lower, CR.Upper);
- else
- return ConstantRange(CR.Lower, Upper);
+ return ConstantRange(CR.Lower, Upper);
}
APInt L = Lower, U = Upper;
@@ -396,8 +382,7 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
APInt d1 = CR.Lower - Upper, d2 = Lower - CR.Upper;
if (d1.ult(d2))
return ConstantRange(Lower, CR.Upper);
- else
- return ConstantRange(CR.Lower, Upper);
+ return ConstantRange(CR.Lower, Upper);
}
// ----U L----- : this
@@ -407,13 +392,11 @@ ConstantRange ConstantRange::unionWith(const ConstantRange &CR) const {
// ------U L---- : this
// L-----U : CR
- if (CR.Lower.ult(Upper) && CR.Upper.ult(Lower))
- return ConstantRange(Lower, CR.Upper);
+ assert(CR.Lower.ult(Upper) && CR.Upper.ult(Lower) &&
+ "ConstantRange::unionWith missed a case with one range wrapped");
+ return ConstantRange(Lower, CR.Upper);
}
- assert(isWrappedSet() && CR.isWrappedSet() &&
- "ConstantRange::unionWith missed wrapped union unwrapped case");
-
// ------U L---- and ------U L---- : this
// -U L----------- and ------------U L : CR
if (CR.Lower.ule(Upper) || Lower.ule(CR.Upper))
@@ -466,10 +449,8 @@ ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
/// correspond to the possible range of values as if the source range had been
/// truncated to the specified type.
ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
- unsigned SrcTySize = getBitWidth();
- assert(SrcTySize > DstTySize && "Not a value truncation");
- APInt Size(APInt::getLowBitsSet(SrcTySize, DstTySize));
- if (isFullSet() || getSetSize().ugt(Size))
+ assert(getBitWidth() > DstTySize && "Not a value truncation");
+ if (isFullSet() || getSetSize().getActiveBits() > DstTySize)
return ConstantRange(DstTySize, /*isFullSet=*/true);
return ConstantRange(Lower.trunc(DstTySize), Upper.trunc(DstTySize));
@@ -481,10 +462,9 @@ ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
return truncate(DstTySize);
- else if (SrcTySize < DstTySize)
+ if (SrcTySize < DstTySize)
return zeroExtend(DstTySize);
- else
- return *this;
+ return *this;
}
/// sextOrTrunc - make this range have the bit width given by \p DstTySize. The
@@ -493,10 +473,9 @@ ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const {
unsigned SrcTySize = getBitWidth();
if (SrcTySize > DstTySize)
return truncate(DstTySize);
- else if (SrcTySize < DstTySize)
+ if (SrcTySize < DstTySize)
return signExtend(DstTySize);
- else
- return *this;
+ return *this;
}
ConstantRange
@@ -675,11 +654,10 @@ ConstantRange::lshr(const ConstantRange &Other) const {
}
ConstantRange ConstantRange::inverse() const {
- if (isFullSet()) {
+ if (isFullSet())
return ConstantRange(getBitWidth(), /*isFullSet=*/false);
- } else if (isEmptySet()) {
+ if (isEmptySet())
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- }
return ConstantRange(Upper, Lower);
}
diff --git a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
index 263114c..e2af0bc 100644
--- a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
+++ b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -165,7 +165,6 @@ static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo)
// Note that we don't actually get here because HandleCrash calls
// longjmp, which means the HandleCrash function never returns.
llvm_unreachable("Handled the crash, should have longjmp'ed out of here");
- return EXCEPTION_CONTINUE_SEARCH;
}
// Because the Enable and Disable calls are static, it means that
diff --git a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
index 8145664..1e89c6a 100644
--- a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -350,6 +350,9 @@ DAGDeltaAlgorithmImpl::Run() {
return Required;
}
+void DAGDeltaAlgorithm::anchor() {
+}
+
DAGDeltaAlgorithm::changeset_ty
DAGDeltaAlgorithm::Run(const changeset_ty &Changes,
const std::vector<edge_ty> &Dependencies) {
diff --git a/contrib/llvm/lib/Support/DataExtractor.cpp b/contrib/llvm/lib/Support/DataExtractor.cpp
index b946c1d..dc21155 100644
--- a/contrib/llvm/lib/Support/DataExtractor.cpp
+++ b/contrib/llvm/lib/Support/DataExtractor.cpp
@@ -75,7 +75,7 @@ uint32_t DataExtractor::getU32(uint32_t *offset_ptr) const {
uint32_t *DataExtractor::getU32(uint32_t *offset_ptr, uint32_t *dst,
uint32_t count) const {
return getUs<uint32_t>(offset_ptr, dst, count, this, IsLittleEndian,
- Data.data());;
+ Data.data());
}
uint64_t DataExtractor::getU64(uint32_t *offset_ptr) const {
diff --git a/contrib/llvm/lib/Support/DataStream.cpp b/contrib/llvm/lib/Support/DataStream.cpp
new file mode 100644
index 0000000..94d14a5
--- /dev/null
+++ b/contrib/llvm/lib/Support/DataStream.cpp
@@ -0,0 +1,98 @@
+//===--- llvm/Support/DataStream.cpp - Lazy streamed data -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements DataStreamer, which fetches bytes of Data from
+// a stream source. It provides support for streaming (lazy reading) of
+// bitcode. An example implementation of streaming from a file or stdin
+// is included.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "Data-stream"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/DataStream.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/system_error.h"
+#include <string>
+#include <cerrno>
+#include <cstdio>
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+#include <fcntl.h>
+using namespace llvm;
+
+// Interface goals:
+// * StreamableMemoryObject doesn't care about complexities like using
+// threads/async callbacks to actually overlap download+compile
+// * Don't want to duplicate Data in memory
+// * Don't need to know total Data len in advance
+// Non-goals:
+// StreamableMemoryObject already has random access so this interface only does
+// in-order streaming (no arbitrary seeking, else we'd have to buffer all the
+// Data here in addition to MemoryObject). This also means that if we want
+// to be able to to free Data, BitstreamBytes/BitcodeReader will implement it
+
+STATISTIC(NumStreamFetches, "Number of calls to Data stream fetch");
+
+namespace llvm {
+DataStreamer::~DataStreamer() {}
+}
+
+namespace {
+
+// Very simple stream backed by a file. Mostly useful for stdin and debugging;
+// actual file access is probably still best done with mmap.
+class DataFileStreamer : public DataStreamer {
+ int Fd;
+public:
+ DataFileStreamer() : Fd(0) {}
+ virtual ~DataFileStreamer() {
+ close(Fd);
+ }
+ virtual size_t GetBytes(unsigned char *buf, size_t len) {
+ NumStreamFetches++;
+ return read(Fd, buf, len);
+ }
+
+ error_code OpenFile(const std::string &Filename) {
+ if (Filename == "-") {
+ Fd = 0;
+ sys::Program::ChangeStdinToBinary();
+ return error_code::success();
+ }
+
+ int OpenFlags = O_RDONLY;
+#ifdef O_BINARY
+ OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
+#endif
+ Fd = ::open(Filename.c_str(), OpenFlags);
+ if (Fd == -1)
+ return error_code(errno, posix_category());
+ return error_code::success();
+ }
+};
+
+}
+
+namespace llvm {
+DataStreamer *getDataFileStreamer(const std::string &Filename,
+ std::string *StrError) {
+ DataFileStreamer *s = new DataFileStreamer();
+ if (error_code e = s->OpenFile(Filename)) {
+ *StrError = std::string("Could not open ") + Filename + ": " +
+ e.message() + "\n";
+ return NULL;
+ }
+ return s;
+}
+
+}
diff --git a/contrib/llvm/lib/Support/Dwarf.cpp b/contrib/llvm/lib/Support/Dwarf.cpp
index 95a9550..5c59a3e 100644
--- a/contrib/llvm/lib/Support/Dwarf.cpp
+++ b/contrib/llvm/lib/Support/Dwarf.cpp
@@ -95,6 +95,7 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
return "DW_TAG_GNU_template_parameter_pack";
case DW_TAG_GNU_formal_parameter_pack:
return "DW_TAG_GNU_formal_parameter_pack";
+ case DW_TAG_APPLE_property: return "DW_TAG_APPLE_property";
}
return 0;
}
@@ -245,6 +246,7 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
case DW_AT_APPLE_property_getter: return "DW_AT_APPLE_property_getter";
case DW_AT_APPLE_property_setter: return "DW_AT_APPLE_property_setter";
case DW_AT_APPLE_property_attribute: return "DW_AT_APPLE_property_attribute";
+ case DW_AT_APPLE_property: return "DW_AT_APPLE_property";
case DW_AT_APPLE_objc_complete_type: return "DW_AT_APPLE_objc_complete_type";
}
return 0;
diff --git a/contrib/llvm/lib/Support/FileUtilities.cpp b/contrib/llvm/lib/Support/FileUtilities.cpp
index 4c8c0c6..f9e9cf0 100644
--- a/contrib/llvm/lib/Support/FileUtilities.cpp
+++ b/contrib/llvm/lib/Support/FileUtilities.cpp
@@ -200,7 +200,6 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA,
// Now its safe to mmap the files into memory because both files
// have a non-zero size.
- error_code ec;
OwningPtr<MemoryBuffer> F1;
if (error_code ec = MemoryBuffer::getFile(FileA.c_str(), F1)) {
if (Error)
diff --git a/contrib/llvm/lib/Support/FoldingSet.cpp b/contrib/llvm/lib/Support/FoldingSet.cpp
index 17b8271..c6282c6 100644
--- a/contrib/llvm/lib/Support/FoldingSet.cpp
+++ b/contrib/llvm/lib/Support/FoldingSet.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -29,24 +30,7 @@ using namespace llvm;
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
/// used to lookup the node in the FoldingSetImpl.
unsigned FoldingSetNodeIDRef::ComputeHash() const {
- // This is adapted from SuperFastHash by Paul Hsieh.
- unsigned Hash = static_cast<unsigned>(Size);
- for (const unsigned *BP = Data, *E = BP+Size; BP != E; ++BP) {
- unsigned Data = *BP;
- Hash += Data & 0xFFFF;
- unsigned Tmp = ((Data >> 16) << 11) ^ Hash;
- Hash = (Hash << 16) ^ Tmp;
- Hash += Hash >> 11;
- }
-
- // Force "avalanching" of final 127 bits.
- Hash ^= Hash << 3;
- Hash += Hash >> 5;
- Hash ^= Hash << 4;
- Hash += Hash >> 17;
- Hash ^= Hash << 25;
- Hash += Hash >> 6;
- return Hash;
+ return static_cast<unsigned>(hash_combine_range(Data, Data+Size));
}
bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const {
@@ -281,15 +265,15 @@ void FoldingSetImpl::GrowHashTable() {
FoldingSetImpl::Node
*FoldingSetImpl::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
void *&InsertPos) {
-
- void **Bucket = GetBucketFor(ID.ComputeHash(), Buckets, NumBuckets);
+ unsigned IDHash = ID.ComputeHash();
+ void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets);
void *Probe = *Bucket;
InsertPos = 0;
FoldingSetNodeID TempID;
while (Node *NodeInBucket = GetNextPtr(Probe)) {
- if (NodeEquals(NodeInBucket, ID, TempID))
+ if (NodeEquals(NodeInBucket, ID, IDHash, TempID))
return NodeInBucket;
TempID.clear();
diff --git a/contrib/llvm/lib/Support/GraphWriter.cpp b/contrib/llvm/lib/Support/GraphWriter.cpp
index 0dba28a..32126ec 100644
--- a/contrib/llvm/lib/Support/GraphWriter.cpp
+++ b/contrib/llvm/lib/Support/GraphWriter.cpp
@@ -11,12 +11,16 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Config/config.h"
using namespace llvm;
+static cl::opt<bool> ViewBackground("view-background", cl::Hidden,
+ cl::desc("Execute graph viewer in the background. Creates tmp file litter."));
+
std::string llvm::DOT::EscapeString(const std::string &Label) {
std::string Str(Label);
for (unsigned i = 0; i != Str.length(); ++i)
@@ -49,10 +53,28 @@ std::string llvm::DOT::EscapeString(const std::string &Label) {
return Str;
}
-
+// Execute the graph viewer. Return true if successful.
+static bool LLVM_ATTRIBUTE_UNUSED
+ExecGraphViewer(const sys::Path &ExecPath, std::vector<const char*> &args,
+ const sys::Path &Filename, bool wait, std::string &ErrMsg) {
+ if (wait) {
+ if (sys::Program::ExecuteAndWait(ExecPath, &args[0],0,0,0,0,&ErrMsg)) {
+ errs() << "Error: " << ErrMsg << "\n";
+ return false;
+ }
+ Filename.eraseFromDisk();
+ errs() << " done. \n";
+ }
+ else {
+ sys::Program::ExecuteNoWait(ExecPath, &args[0],0,0,0,&ErrMsg);
+ errs() << "Remember to erase graph file: " << Filename.str() << "\n";
+ }
+ return true;
+}
void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
GraphProgram::Name program) {
+ wait &= !ViewBackground;
std::string ErrMsg;
#if HAVE_GRAPHVIZ
sys::Path Graphviz(LLVM_PATH_GRAPHVIZ);
@@ -61,14 +83,10 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
args.push_back(Graphviz.c_str());
args.push_back(Filename.c_str());
args.push_back(0);
-
+
errs() << "Running 'Graphviz' program... ";
- if (sys::Program::ExecuteAndWait(Graphviz, &args[0],0,0,0,0,&ErrMsg)) {
- errs() << "Error: " << ErrMsg << "\n";
+ if (!ExecGraphViewer(Graphviz, args, Filename, wait, ErrMsg))
return;
- }
- Filename.eraseFromDisk();
- errs() << " done. \n";
#elif HAVE_XDOT_PY
std::vector<const char*> args;
@@ -83,17 +101,12 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
case GraphProgram::CIRCO: args.push_back("-f"); args.push_back("circo");break;
default: errs() << "Unknown graph layout name; using default.\n";
}
-
+
args.push_back(0);
errs() << "Running 'xdot.py' program... ";
- if (sys::Program::ExecuteAndWait(sys::Path(LLVM_PATH_XDOT_PY),
- &args[0],0,0,0,0,&ErrMsg)) {
- errs() << "Error: " << ErrMsg << "\n";
+ if (!ExecGraphViewer(sys::Path(LLVM_PATH_XDOT_PY), args, Filename, wait, ErrMsg))
return;
- }
- Filename.eraseFromDisk();
- errs() << " done. \n";
#elif (HAVE_GV && (HAVE_DOT || HAVE_FDP || HAVE_NEATO || \
HAVE_TWOPI || HAVE_CIRCO))
@@ -150,14 +163,11 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
args.push_back("-o");
args.push_back(PSFilename.c_str());
args.push_back(0);
-
+
errs() << "Running '" << prog.str() << "' program... ";
- if (sys::Program::ExecuteAndWait(prog, &args[0], 0, 0, 0, 0, &ErrMsg)) {
- errs() << "Error: " << ErrMsg << "\n";
+ if (!ExecGraphViewer(prog, args, Filename, wait, ErrMsg))
return;
- }
- errs() << " done. \n";
sys::Path gv(LLVM_PATH_GV);
args.clear();
@@ -165,19 +175,11 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
args.push_back(PSFilename.c_str());
args.push_back("--spartan");
args.push_back(0);
-
+
ErrMsg.clear();
- if (wait) {
- if (sys::Program::ExecuteAndWait(gv, &args[0],0,0,0,0,&ErrMsg))
- errs() << "Error: " << ErrMsg << "\n";
- Filename.eraseFromDisk();
- PSFilename.eraseFromDisk();
- }
- else {
- sys::Program::ExecuteNoWait(gv, &args[0],0,0,0,&ErrMsg);
- errs() << "Remember to erase graph files: " << Filename.str() << " "
- << PSFilename.str() << "\n";
- }
+ if (!ExecGraphViewer(gv, args, PSFilename, wait, ErrMsg))
+ return;
+
#elif HAVE_DOTTY
sys::Path dotty(LLVM_PATH_DOTTY);
@@ -185,16 +187,13 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
args.push_back(dotty.c_str());
args.push_back(Filename.c_str());
args.push_back(0);
-
- errs() << "Running 'dotty' program... ";
- if (sys::Program::ExecuteAndWait(dotty, &args[0],0,0,0,0,&ErrMsg)) {
- errs() << "Error: " << ErrMsg << "\n";
- } else {
+
// Dotty spawns another app and doesn't wait until it returns
#if defined (__MINGW32__) || defined (_WINDOWS)
- return;
+ wait = false;
#endif
- Filename.eraseFromDisk();
- }
+ errs() << "Running 'dotty' program... ";
+ if (!ExecGraphViewer(dotty, args, Filename, wait, ErrMsg))
+ return;
#endif
}
diff --git a/contrib/llvm/lib/Support/Hashing.cpp b/contrib/llvm/lib/Support/Hashing.cpp
new file mode 100644
index 0000000..c69efb7
--- /dev/null
+++ b/contrib/llvm/lib/Support/Hashing.cpp
@@ -0,0 +1,29 @@
+//===-------------- lib/Support/Hashing.cpp -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides implementation bits for the LLVM common hashing
+// infrastructure. Documentation and most of the other information is in the
+// header file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Hashing.h"
+
+using namespace llvm;
+
+// Provide a definition and static initializer for the fixed seed. This
+// initializer should always be zero to ensure its value can never appear to be
+// non-zero, even during dynamic initialization.
+size_t llvm::hashing::detail::fixed_seed_override = 0;
+
+// Implement the function for forced setting of the fixed seed.
+// FIXME: Use atomic operations here so that there is no data race.
+void llvm::set_fixed_execution_hash_seed(size_t fixed_value) {
+ hashing::detail::fixed_seed_override = fixed_value;
+}
diff --git a/contrib/llvm/lib/Support/Host.cpp b/contrib/llvm/lib/Support/Host.cpp
index a19e4b4..0f06964 100644
--- a/contrib/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm/lib/Support/Host.cpp
@@ -61,6 +61,8 @@ static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX,
*rECX = registers[2];
*rEDX = registers[3];
return false;
+ #else
+ return true;
#endif
#elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)
#if defined(__GNUC__)
@@ -87,9 +89,14 @@ static bool GetX86CpuIDAndInfo(unsigned value, unsigned *rEAX,
mov dword ptr [esi],edx
}
return false;
+// pedantic #else returns to appease -Wunreachable-code (so we don't generate
+// postprocessed code that looks like "return true; return false;")
+ #else
+ return true;
#endif
-#endif
+#else
return true;
+#endif
}
static void DetectX86FamilyModel(unsigned EAX, unsigned &Family,
@@ -298,6 +305,10 @@ std::string sys::getHostCPUName() {
}
case 16:
return "amdfam10";
+ case 20:
+ return "btver1";
+ case 21:
+ return "bdver1";
default:
return "generic";
}
diff --git a/contrib/llvm/lib/Support/IntrusiveRefCntPtr.cpp b/contrib/llvm/lib/Support/IntrusiveRefCntPtr.cpp
new file mode 100644
index 0000000..a8b4559
--- /dev/null
+++ b/contrib/llvm/lib/Support/IntrusiveRefCntPtr.cpp
@@ -0,0 +1,14 @@
+//== IntrusiveRefCntPtr.cpp - Smart Refcounting Pointer ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+
+using namespace llvm;
+
+void RefCountedBaseVPTR::anchor() { }
diff --git a/contrib/llvm/lib/Support/JSONParser.cpp b/contrib/llvm/lib/Support/JSONParser.cpp
new file mode 100644
index 0000000..5dfcf29
--- /dev/null
+++ b/contrib/llvm/lib/Support/JSONParser.cpp
@@ -0,0 +1,302 @@
+//===--- JSONParser.cpp - Simple JSON parser ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a JSON parser.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/JSONParser.h"
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+
+JSONParser::JSONParser(StringRef Input, SourceMgr *SM)
+ : SM(SM), Failed(false) {
+ InputBuffer = MemoryBuffer::getMemBuffer(Input, "JSON");
+ SM->AddNewSourceBuffer(InputBuffer, SMLoc());
+ End = InputBuffer->getBuffer().end();
+ Position = InputBuffer->getBuffer().begin();
+}
+
+JSONValue *JSONParser::parseRoot() {
+ if (Position != InputBuffer->getBuffer().begin())
+ report_fatal_error("Cannot reuse JSONParser.");
+ if (isWhitespace())
+ nextNonWhitespace();
+ if (errorIfAtEndOfFile("'[' or '{' at start of JSON text"))
+ return 0;
+ switch (*Position) {
+ case '[':
+ return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this);
+ case '{':
+ return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this);
+ default:
+ setExpectedError("'[' or '{' at start of JSON text", *Position);
+ return 0;
+ }
+}
+
+bool JSONParser::validate() {
+ JSONValue *Root = parseRoot();
+ if (Root == NULL) {
+ return false;
+ }
+ return skip(*Root);
+}
+
+bool JSONParser::skip(const JSONAtom &Atom) {
+ switch(Atom.getKind()) {
+ case JSONAtom::JK_Array:
+ case JSONAtom::JK_Object:
+ return skipContainer(*cast<JSONContainer>(&Atom));
+ case JSONAtom::JK_String:
+ return true;
+ case JSONAtom::JK_KeyValuePair:
+ return skip(*cast<JSONKeyValuePair>(&Atom)->Value);
+ }
+ llvm_unreachable("Impossible enum value.");
+}
+
+// Sets the current error to:
+// "expected <Expected>, but found <Found>".
+void JSONParser::setExpectedError(StringRef Expected, StringRef Found) {
+ SM->PrintMessage(SMLoc::getFromPointer(Position), SourceMgr::DK_Error,
+ "expected " + Expected + ", but found " + Found + ".", ArrayRef<SMRange>());
+ Failed = true;
+}
+
+// Sets the current error to:
+// "expected <Expected>, but found <Found>".
+void JSONParser::setExpectedError(StringRef Expected, char Found) {
+ setExpectedError(Expected, ("'" + StringRef(&Found, 1) + "'").str());
+}
+
+// If there is no character available, returns true and sets the current error
+// to: "expected <Expected>, but found EOF.".
+bool JSONParser::errorIfAtEndOfFile(StringRef Expected) {
+ if (Position == End) {
+ setExpectedError(Expected, "EOF");
+ return true;
+ }
+ return false;
+}
+
+// Sets the current error if the current character is not C to:
+// "expected 'C', but got <current character>".
+bool JSONParser::errorIfNotAt(char C, StringRef Message) {
+ if (*Position != C) {
+ std::string Expected =
+ ("'" + StringRef(&C, 1) + "' " + Message).str();
+ if (Position == End)
+ setExpectedError(Expected, "EOF");
+ else
+ setExpectedError(Expected, *Position);
+ return true;
+ }
+ return false;
+}
+
+// Forbidding inlining improves performance by roughly 20%.
+// FIXME: Remove once llvm optimizes this to the faster version without hints.
+LLVM_ATTRIBUTE_NOINLINE static bool
+wasEscaped(StringRef::iterator First, StringRef::iterator Position);
+
+// Returns whether a character at 'Position' was escaped with a leading '\'.
+// 'First' specifies the position of the first character in the string.
+static bool wasEscaped(StringRef::iterator First,
+ StringRef::iterator Position) {
+ assert(Position - 1 >= First);
+ StringRef::iterator I = Position - 1;
+ // We calulate the number of consecutive '\'s before the current position
+ // by iterating backwards through our string.
+ while (I >= First && *I == '\\') --I;
+ // (Position - 1 - I) now contains the number of '\'s before the current
+ // position. If it is odd, the character at 'Positon' was escaped.
+ return (Position - 1 - I) % 2 == 1;
+}
+
+// Parses a JSONString, assuming that the current position is on a quote.
+JSONString *JSONParser::parseString() {
+ assert(Position != End);
+ assert(!isWhitespace());
+ if (errorIfNotAt('"', "at start of string"))
+ return 0;
+ StringRef::iterator First = Position + 1;
+
+ // Benchmarking shows that this loop is the hot path of the application with
+ // about 2/3rd of the runtime cycles. Since escaped quotes are not the common
+ // case, and multiple escaped backslashes before escaped quotes are very rare,
+ // we pessimize this case to achieve a smaller inner loop in the common case.
+ // We're doing that by having a quick inner loop that just scans for the next
+ // quote. Once we find the quote we check the last character to see whether
+ // the quote might have been escaped. If the last character is not a '\', we
+ // know the quote was not escaped and have thus found the end of the string.
+ // If the immediately preceding character was a '\', we have to scan backwards
+ // to see whether the previous character was actually an escaped backslash, or
+ // an escape character for the quote. If we find that the current quote was
+ // escaped, we continue parsing for the next quote and repeat.
+ // This optimization brings around 30% performance improvements.
+ do {
+ // Step over the current quote.
+ ++Position;
+ // Find the next quote.
+ while (Position != End && *Position != '"')
+ ++Position;
+ if (errorIfAtEndOfFile("'\"' at end of string"))
+ return 0;
+ // Repeat until the previous character was not a '\' or was an escaped
+ // backslash.
+ } while (*(Position - 1) == '\\' && wasEscaped(First, Position));
+
+ return new (ValueAllocator.Allocate<JSONString>())
+ JSONString(StringRef(First, Position - First));
+}
+
+
+// Advances the position to the next non-whitespace position.
+void JSONParser::nextNonWhitespace() {
+ do {
+ ++Position;
+ } while (isWhitespace());
+}
+
+// Checks if there is a whitespace character at the current position.
+bool JSONParser::isWhitespace() {
+ return *Position == ' ' || *Position == '\t' ||
+ *Position == '\n' || *Position == '\r';
+}
+
+bool JSONParser::failed() const {
+ return Failed;
+}
+
+// Parses a JSONValue, assuming that the current position is at the first
+// character of the value.
+JSONValue *JSONParser::parseValue() {
+ assert(Position != End);
+ assert(!isWhitespace());
+ switch (*Position) {
+ case '[':
+ return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this);
+ case '{':
+ return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this);
+ case '"':
+ return parseString();
+ default:
+ setExpectedError("'[', '{' or '\"' at start of value", *Position);
+ return 0;
+ }
+}
+
+// Parses a JSONKeyValuePair, assuming that the current position is at the first
+// character of the key, value pair.
+JSONKeyValuePair *JSONParser::parseKeyValuePair() {
+ assert(Position != End);
+ assert(!isWhitespace());
+
+ JSONString *Key = parseString();
+ if (Key == 0)
+ return 0;
+
+ nextNonWhitespace();
+ if (errorIfNotAt(':', "between key and value"))
+ return 0;
+
+ nextNonWhitespace();
+ const JSONValue *Value = parseValue();
+ if (Value == 0)
+ return 0;
+
+ return new (ValueAllocator.Allocate<JSONKeyValuePair>(1))
+ JSONKeyValuePair(Key, Value);
+}
+
+/// \brief Parses the first element of a JSON array or object, or closes the
+/// array.
+///
+/// The method assumes that the current position is before the first character
+/// of the element, with possible white space in between. When successful, it
+/// returns the new position after parsing the element. Otherwise, if there is
+/// no next value, it returns a default constructed StringRef::iterator.
+StringRef::iterator JSONParser::parseFirstElement(JSONAtom::Kind ContainerKind,
+ char StartChar, char EndChar,
+ const JSONAtom *&Element) {
+ assert(*Position == StartChar);
+ Element = 0;
+ nextNonWhitespace();
+ if (errorIfAtEndOfFile("value or end of container at start of container"))
+ return StringRef::iterator();
+
+ if (*Position == EndChar)
+ return StringRef::iterator();
+
+ Element = parseElement(ContainerKind);
+ if (Element == 0)
+ return StringRef::iterator();
+
+ return Position;
+}
+
+/// \brief Parses the next element of a JSON array or object, or closes the
+/// array.
+///
+/// The method assumes that the current position is before the ',' which
+/// separates the next element from the current element. When successful, it
+/// returns the new position after parsing the element. Otherwise, if there is
+/// no next value, it returns a default constructed StringRef::iterator.
+StringRef::iterator JSONParser::parseNextElement(JSONAtom::Kind ContainerKind,
+ char EndChar,
+ const JSONAtom *&Element) {
+ Element = 0;
+ nextNonWhitespace();
+ if (errorIfAtEndOfFile("',' or end of container for next element"))
+ return 0;
+
+ if (*Position == ',') {
+ nextNonWhitespace();
+ if (errorIfAtEndOfFile("element in container"))
+ return StringRef::iterator();
+
+ Element = parseElement(ContainerKind);
+ if (Element == 0)
+ return StringRef::iterator();
+
+ return Position;
+ } else if (*Position == EndChar) {
+ return StringRef::iterator();
+ } else {
+ setExpectedError("',' or end of container for next element", *Position);
+ return StringRef::iterator();
+ }
+}
+
+const JSONAtom *JSONParser::parseElement(JSONAtom::Kind ContainerKind) {
+ switch (ContainerKind) {
+ case JSONAtom::JK_Array:
+ return parseValue();
+ case JSONAtom::JK_Object:
+ return parseKeyValuePair();
+ default:
+ llvm_unreachable("Impossible code path");
+ }
+}
+
+bool JSONParser::skipContainer(const JSONContainer &Container) {
+ for (JSONContainer::AtomIterator I = Container.atom_current(),
+ E = Container.atom_end();
+ I != E; ++I) {
+ assert(*I != 0);
+ if (!skip(**I))
+ return false;
+ }
+ return !failed();
+}
diff --git a/contrib/llvm/lib/Support/LockFileManager.cpp b/contrib/llvm/lib/Support/LockFileManager.cpp
new file mode 100644
index 0000000..64404a1
--- /dev/null
+++ b/contrib/llvm/lib/Support/LockFileManager.cpp
@@ -0,0 +1,216 @@
+//===--- LockFileManager.cpp - File-level Locking Utility------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Support/LockFileManager.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+#include <sys/types.h>
+#include <sys/stat.h>
+#if LLVM_ON_WIN32
+#include <windows.h>
+#endif
+#if LLVM_ON_UNIX
+#include <unistd.h>
+#endif
+using namespace llvm;
+
+/// \brief Attempt to read the lock file with the given name, if it exists.
+///
+/// \param LockFileName The name of the lock file to read.
+///
+/// \returns The process ID of the process that owns this lock file
+Optional<std::pair<std::string, int> >
+LockFileManager::readLockFile(StringRef LockFileName) {
+ // Check whether the lock file exists. If not, clearly there's nothing
+ // to read, so we just return.
+ bool Exists = false;
+ if (sys::fs::exists(LockFileName, Exists) || !Exists)
+ return Optional<std::pair<std::string, int> >();
+
+ // Read the owning host and PID out of the lock file. If it appears that the
+ // owning process is dead, the lock file is invalid.
+ int PID = 0;
+ std::string Hostname;
+ std::ifstream Input(LockFileName.str().c_str());
+ if (Input >> Hostname >> PID && PID > 0 &&
+ processStillExecuting(Hostname, PID))
+ return std::make_pair(Hostname, PID);
+
+ // Delete the lock file. It's invalid anyway.
+ bool Existed;
+ sys::fs::remove(LockFileName, Existed);
+ return Optional<std::pair<std::string, int> >();
+}
+
+bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) {
+#if LLVM_ON_UNIX
+ char MyHostname[256];
+ MyHostname[255] = 0;
+ MyHostname[0] = 0;
+ gethostname(MyHostname, 255);
+ // Check whether the process is dead. If so, we're done.
+ if (MyHostname == Hostname && getsid(PID) == -1 && errno == ESRCH)
+ return false;
+#endif
+
+ return true;
+}
+
+LockFileManager::LockFileManager(StringRef FileName)
+{
+ LockFileName = FileName;
+ LockFileName += ".lock";
+
+ // If the lock file already exists, don't bother to try to create our own
+ // lock file; it won't work anyway. Just figure out who owns this lock file.
+ if ((Owner = readLockFile(LockFileName)))
+ return;
+
+ // Create a lock file that is unique to this instance.
+ UniqueLockFileName = LockFileName;
+ UniqueLockFileName += "-%%%%%%%%";
+ int UniqueLockFileID;
+ if (error_code EC
+ = sys::fs::unique_file(UniqueLockFileName.str(),
+ UniqueLockFileID,
+ UniqueLockFileName,
+ /*makeAbsolute=*/false)) {
+ Error = EC;
+ return;
+ }
+
+ // Write our process ID to our unique lock file.
+ {
+ raw_fd_ostream Out(UniqueLockFileID, /*shouldClose=*/true);
+
+#if LLVM_ON_UNIX
+ // FIXME: move getpid() call into LLVM
+ char hostname[256];
+ hostname[255] = 0;
+ hostname[0] = 0;
+ gethostname(hostname, 255);
+ Out << hostname << ' ' << getpid();
+#else
+ Out << "localhost 1";
+#endif
+ Out.close();
+
+ if (Out.has_error()) {
+ // We failed to write out PID, so make up an excuse, remove the
+ // unique lock file, and fail.
+ Error = make_error_code(errc::no_space_on_device);
+ bool Existed;
+ sys::fs::remove(UniqueLockFileName.c_str(), Existed);
+ return;
+ }
+ }
+
+ // Create a hard link from the lock file name. If this succeeds, we're done.
+ error_code EC
+ = sys::fs::create_hard_link(UniqueLockFileName.str(),
+ LockFileName.str());
+ if (EC == errc::success)
+ return;
+
+ // Creating the hard link failed.
+
+#ifdef LLVM_ON_UNIX
+ // The creation of the hard link may appear to fail, but if stat'ing the
+ // unique file returns a link count of 2, then we can still declare success.
+ struct stat StatBuf;
+ if (stat(UniqueLockFileName.c_str(), &StatBuf) == 0 &&
+ StatBuf.st_nlink == 2)
+ return;
+#endif
+
+ // Someone else managed to create the lock file first. Wipe out our unique
+ // lock file (it's useless now) and read the process ID from the lock file.
+ bool Existed;
+ sys::fs::remove(UniqueLockFileName.str(), Existed);
+ if ((Owner = readLockFile(LockFileName)))
+ return;
+
+ // There is a lock file that nobody owns; try to clean it up and report
+ // an error.
+ sys::fs::remove(LockFileName.str(), Existed);
+ Error = EC;
+}
+
+LockFileManager::LockFileState LockFileManager::getState() const {
+ if (Owner)
+ return LFS_Shared;
+
+ if (Error)
+ return LFS_Error;
+
+ return LFS_Owned;
+}
+
+LockFileManager::~LockFileManager() {
+ if (getState() != LFS_Owned)
+ return;
+
+ // Since we own the lock, remove the lock file and our own unique lock file.
+ bool Existed;
+ sys::fs::remove(LockFileName.str(), Existed);
+ sys::fs::remove(UniqueLockFileName.str(), Existed);
+}
+
+void LockFileManager::waitForUnlock() {
+ if (getState() != LFS_Shared)
+ return;
+
+#if LLVM_ON_WIN32
+ unsigned long Interval = 1;
+#else
+ struct timespec Interval;
+ Interval.tv_sec = 0;
+ Interval.tv_nsec = 1000000;
+#endif
+ // Don't wait more than an hour for the file to appear.
+ const unsigned MaxSeconds = 3600;
+ do {
+ // Sleep for the designated interval, to allow the owning process time to
+ // finish up and remove the lock file.
+ // FIXME: Should we hook in to system APIs to get a notification when the
+ // lock file is deleted?
+#if LLVM_ON_WIN32
+ Sleep(Interval);
+#else
+ nanosleep(&Interval, NULL);
+#endif
+ // If the file no longer exists, we're done.
+ bool Exists = false;
+ if (!sys::fs::exists(LockFileName.str(), Exists) && !Exists)
+ return;
+
+ if (!processStillExecuting((*Owner).first, (*Owner).second))
+ return;
+
+ // Exponentially increase the time we wait for the lock to be removed.
+#if LLVM_ON_WIN32
+ Interval *= 2;
+#else
+ Interval.tv_sec *= 2;
+ Interval.tv_nsec *= 2;
+ if (Interval.tv_nsec >= 1000000000) {
+ ++Interval.tv_sec;
+ Interval.tv_nsec -= 1000000000;
+ }
+#endif
+ } while (
+#if LLVM_ON_WIN32
+ Interval < MaxSeconds * 1000
+#else
+ Interval.tv_sec < (time_t)MaxSeconds
+#endif
+ );
+
+ // Give up.
+}
diff --git a/contrib/llvm/lib/Support/ManagedStatic.cpp b/contrib/llvm/lib/Support/ManagedStatic.cpp
index c767c15..098cccb 100644
--- a/contrib/llvm/lib/Support/ManagedStatic.cpp
+++ b/contrib/llvm/lib/Support/ManagedStatic.cpp
@@ -27,8 +27,15 @@ void ManagedStaticBase::RegisterManagedStatic(void *(*Creator)(),
if (Ptr == 0) {
void* tmp = Creator ? Creator() : 0;
+ TsanHappensBefore(this);
sys::MemoryFence();
+
+ // This write is racy against the first read in the ManagedStatic
+ // accessors. The race is benign because it does a second read after a
+ // memory fence, at which point it isn't possible to get a partial value.
+ TsanIgnoreWritesBegin();
Ptr = tmp;
+ TsanIgnoreWritesEnd();
DeleterFn = Deleter;
// Add to list of managed statics.
@@ -72,4 +79,3 @@ void llvm::llvm_shutdown() {
if (llvm_is_multithreaded()) llvm_stop_multithreaded();
}
-
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index 0771af5..16e5c7a 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -14,6 +14,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/config.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/Path.h"
@@ -29,15 +30,12 @@
#include <sys/stat.h>
#if !defined(_MSC_VER) && !defined(__MINGW32__)
#include <unistd.h>
-#include <sys/uio.h>
#else
#include <io.h>
#endif
#include <fcntl.h>
using namespace llvm;
-namespace { const llvm::error_code success; }
-
//===----------------------------------------------------------------------===//
// MemoryBuffer implementation itself.
//===----------------------------------------------------------------------===//
@@ -306,7 +304,17 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
RealMapOffset)) {
result.reset(GetNamedBuffer<MemoryBufferMMapFile>(
StringRef(Pages + Delta, MapSize), Filename, RequiresNullTerminator));
- return success;
+
+ if (RequiresNullTerminator && result->getBufferEnd()[0] != '\0') {
+ // There could be a racing issue that resulted in the file being larger
+ // than the FileSize passed by the caller. We already have an assertion
+ // for this in MemoryBuffer::init() but have a runtime guarantee that
+ // the buffer will be null-terminated here, so do a copy that adds a
+ // null-terminator.
+ result.reset(MemoryBuffer::getMemBufferCopy(result->getBuffer(),
+ Filename));
+ }
+ return error_code::success();
}
}
@@ -321,29 +329,35 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename,
char *BufPtr = const_cast<char*>(SB->getBufferStart());
size_t BytesLeft = MapSize;
+#ifndef HAVE_PREAD
if (lseek(FD, Offset, SEEK_SET) == -1)
return error_code(errno, posix_category());
+#endif
while (BytesLeft) {
+#ifdef HAVE_PREAD
+ ssize_t NumRead = ::pread(FD, BufPtr, BytesLeft, MapSize-BytesLeft+Offset);
+#else
ssize_t NumRead = ::read(FD, BufPtr, BytesLeft);
+#endif
if (NumRead == -1) {
if (errno == EINTR)
continue;
// Error while reading.
return error_code(errno, posix_category());
- } else if (NumRead == 0) {
- // We hit EOF early, truncate and terminate buffer.
- Buf->BufferEnd = BufPtr;
- *BufPtr = 0;
- result.swap(SB);
- return success;
+ }
+ if (NumRead == 0) {
+ assert(0 && "We got inaccurate FileSize value or fstat reported an "
+ "invalid file size.");
+ *BufPtr = '\0'; // null-terminate at the actual size.
+ break;
}
BytesLeft -= NumRead;
BufPtr += NumRead;
}
result.swap(SB);
- return success;
+ return error_code::success();
}
//===----------------------------------------------------------------------===//
@@ -372,5 +386,5 @@ error_code MemoryBuffer::getSTDIN(OwningPtr<MemoryBuffer> &result) {
} while (ReadBytes != 0);
result.reset(getMemBufferCopy(Buffer, "<stdin>"));
- return success;
+ return error_code::success();
}
diff --git a/contrib/llvm/lib/Support/Mutex.cpp b/contrib/llvm/lib/Support/Mutex.cpp
index 8874e94..da5baab 100644
--- a/contrib/llvm/lib/Support/Mutex.cpp
+++ b/contrib/llvm/lib/Support/Mutex.cpp
@@ -19,7 +19,7 @@
//=== independent code.
//===----------------------------------------------------------------------===//
-#if !defined(ENABLE_THREADS) || ENABLE_THREADS == 0
+#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
// Define all methods as no-ops if threading is explicitly disabled
namespace llvm {
using namespace sys;
@@ -40,109 +40,80 @@ bool MutexImpl::tryacquire() { return true; }
namespace llvm {
using namespace sys;
-
-// This variable is useful for situations where the pthread library has been
-// compiled with weak linkage for its interface symbols. This allows the
-// threading support to be turned off by simply not linking against -lpthread.
-// In that situation, the value of pthread_mutex_init will be 0 and
-// consequently pthread_enabled will be false. In such situations, all the
-// pthread operations become no-ops and the functions all return false. If
-// pthread_mutex_init does have an address, then mutex support is enabled.
-// Note: all LLVM tools will link against -lpthread if its available since it
-// is configured into the LIBS variable.
-// Note: this line of code generates a warning if pthread_mutex_init is not
-// declared with weak linkage. It's safe to ignore the warning.
-static const bool pthread_enabled = true;
-
// Construct a Mutex using pthread calls
MutexImpl::MutexImpl( bool recursive)
: data_(0)
{
- if (pthread_enabled)
- {
- // Declare the pthread_mutex data structures
- pthread_mutex_t* mutex =
- static_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t)));
- pthread_mutexattr_t attr;
-
- // Initialize the mutex attributes
- int errorcode = pthread_mutexattr_init(&attr);
- assert(errorcode == 0);
-
- // Initialize the mutex as a recursive mutex, if requested, or normal
- // otherwise.
- int kind = ( recursive ? PTHREAD_MUTEX_RECURSIVE : PTHREAD_MUTEX_NORMAL );
- errorcode = pthread_mutexattr_settype(&attr, kind);
- assert(errorcode == 0);
+ // Declare the pthread_mutex data structures
+ pthread_mutex_t* mutex =
+ static_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t)));
+ pthread_mutexattr_t attr;
+
+ // Initialize the mutex attributes
+ int errorcode = pthread_mutexattr_init(&attr);
+ assert(errorcode == 0); (void)errorcode;
+
+ // Initialize the mutex as a recursive mutex, if requested, or normal
+ // otherwise.
+ int kind = ( recursive ? PTHREAD_MUTEX_RECURSIVE : PTHREAD_MUTEX_NORMAL );
+ errorcode = pthread_mutexattr_settype(&attr, kind);
+ assert(errorcode == 0);
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__)
- // Make it a process local mutex
- errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
- assert(errorcode == 0);
+ // Make it a process local mutex
+ errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
+ assert(errorcode == 0);
#endif
- // Initialize the mutex
- errorcode = pthread_mutex_init(mutex, &attr);
- assert(errorcode == 0);
+ // Initialize the mutex
+ errorcode = pthread_mutex_init(mutex, &attr);
+ assert(errorcode == 0);
- // Destroy the attributes
- errorcode = pthread_mutexattr_destroy(&attr);
- assert(errorcode == 0);
+ // Destroy the attributes
+ errorcode = pthread_mutexattr_destroy(&attr);
+ assert(errorcode == 0);
- // Assign the data member
- data_ = mutex;
- }
+ // Assign the data member
+ data_ = mutex;
}
// Destruct a Mutex
MutexImpl::~MutexImpl()
{
- if (pthread_enabled)
- {
- pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
- pthread_mutex_destroy(mutex);
- free(mutex);
- }
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
+ assert(mutex != 0);
+ pthread_mutex_destroy(mutex);
+ free(mutex);
}
bool
MutexImpl::acquire()
{
- if (pthread_enabled)
- {
- pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
-
- int errorcode = pthread_mutex_lock(mutex);
- return errorcode == 0;
- } else return false;
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
+ assert(mutex != 0);
+
+ int errorcode = pthread_mutex_lock(mutex);
+ return errorcode == 0;
}
bool
MutexImpl::release()
{
- if (pthread_enabled)
- {
- pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
-
- int errorcode = pthread_mutex_unlock(mutex);
- return errorcode == 0;
- } else return false;
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
+ assert(mutex != 0);
+
+ int errorcode = pthread_mutex_unlock(mutex);
+ return errorcode == 0;
}
bool
MutexImpl::tryacquire()
{
- if (pthread_enabled)
- {
- pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
- assert(mutex != 0);
-
- int errorcode = pthread_mutex_trylock(mutex);
- return errorcode == 0;
- } else return false;
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
+ assert(mutex != 0);
+
+ int errorcode = pthread_mutex_trylock(mutex);
+ return errorcode == 0;
}
}
diff --git a/contrib/llvm/lib/Support/Path.cpp b/contrib/llvm/lib/Support/Path.cpp
index e5b7cd3..dcddeda 100644
--- a/contrib/llvm/lib/Support/Path.cpp
+++ b/contrib/llvm/lib/Support/Path.cpp
@@ -38,16 +38,6 @@ bool Path::operator<(const Path& that) const {
return path < that.path;
}
-Path
-Path::GetLLVMConfigDir() {
- Path result;
-#ifdef LLVM_ETCDIR
- if (result.set(LLVM_ETCDIR))
- return result;
-#endif
- return GetLLVMDefaultConfigDir();
-}
-
LLVMFileType
sys::IdentifyFileType(const char *magic, unsigned length) {
assert(magic && "Invalid magic number string");
@@ -100,7 +90,7 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
case 0xCF: {
uint16_t type = 0;
if (magic[0] == char(0xFE) && magic[1] == char(0xED) &&
- magic[2] == char(0xFA) &&
+ magic[2] == char(0xFA) &&
(magic[3] == char(0xCE) || magic[3] == char(0xCF))) {
/* Native endian */
if (length >= 16) type = magic[14] << 8 | magic[15];
@@ -162,31 +152,31 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
bool
Path::isArchive() const {
- LLVMFileType type;
+ fs::file_magic type;
if (fs::identify_magic(str(), type))
return false;
- return type == Archive_FileType;
+ return type == fs::file_magic::archive;
}
bool
Path::isDynamicLibrary() const {
- LLVMFileType type;
+ fs::file_magic type;
if (fs::identify_magic(str(), type))
return false;
switch (type) {
default: return false;
- case Mach_O_FixedVirtualMemorySharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
- case ELF_SharedObject_FileType:
- case COFF_FileType: return true;
+ case fs::file_magic::macho_fixed_virtual_memory_shared_lib:
+ case fs::file_magic::macho_dynamically_linked_shared_lib:
+ case fs::file_magic::macho_dynamically_linked_shared_lib_stub:
+ case fs::file_magic::elf_shared_object:
+ case fs::file_magic::pecoff_executable: return true;
}
}
bool
Path::isObjectFile() const {
- LLVMFileType type;
- if (fs::identify_magic(str(), type) || type == Unknown_FileType)
+ fs::file_magic type;
+ if (fs::identify_magic(str(), type) || type == fs::file_magic::unknown)
return false;
return true;
}
@@ -222,10 +212,10 @@ Path::appendSuffix(StringRef suffix) {
bool
Path::isBitcodeFile() const {
- LLVMFileType type;
+ fs::file_magic type;
if (fs::identify_magic(str(), type))
return false;
- return type == Bitcode_FileType;
+ return type == fs::file_magic::bitcode;
}
bool Path::hasMagicNumber(StringRef Magic) const {
diff --git a/contrib/llvm/lib/Support/PathV2.cpp b/contrib/llvm/lib/Support/PathV2.cpp
index bebe442..e2a69a6 100644
--- a/contrib/llvm/lib/Support/PathV2.cpp
+++ b/contrib/llvm/lib/Support/PathV2.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/PathV2.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include <cctype>
#include <cstdio>
@@ -23,15 +24,13 @@ namespace {
using llvm::sys::path::is_separator;
#ifdef LLVM_ON_WIN32
- const StringRef separators = "\\/";
- const char prefered_separator = '\\';
+ const char *separators = "\\/";
+ const char prefered_separator = '\\';
#else
- const StringRef separators = "/";
- const char prefered_separator = '/';
+ const char separators = '/';
+ const char prefered_separator = '/';
#endif
- const llvm::error_code success;
-
StringRef find_first_component(StringRef path) {
// Look for this first component in the following order.
// * empty (in this case we return an empty string)
@@ -347,7 +346,7 @@ const StringRef root_directory(StringRef path) {
const StringRef relative_path(StringRef path) {
StringRef root = root_path(path);
- return root.substr(root.size());
+ return path.substr(root.size());
}
void append(SmallVectorImpl<char> &path, const Twine &a,
@@ -492,7 +491,7 @@ bool is_separator(char value) {
void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result) {
result.clear();
-
+
// Check whether the temporary directory is specified by an environment
// variable.
const char *EnvironmentVariable;
@@ -505,7 +504,7 @@ void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result) {
result.append(RequestedDir, RequestedDir + strlen(RequestedDir));
return;
}
-
+
// Fall back to a system default.
const char *DefaultResult;
#ifdef LLVM_ON_WIN32
@@ -519,7 +518,7 @@ void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result) {
#endif
result.append(DefaultResult, DefaultResult + strlen(DefaultResult));
}
-
+
bool has_root_name(const Twine &path) {
SmallString<128> path_storage;
StringRef p = path.toStringRef(path_storage);
@@ -601,12 +600,16 @@ namespace fs {
error_code make_absolute(SmallVectorImpl<char> &path) {
StringRef p(path.data(), path.size());
- bool rootName = path::has_root_name(p),
- rootDirectory = path::has_root_directory(p);
+ bool rootDirectory = path::has_root_directory(p),
+#ifdef LLVM_ON_WIN32
+ rootName = path::has_root_name(p);
+#else
+ rootName = true;
+#endif
// Already absolute.
if (rootName && rootDirectory)
- return success;
+ return error_code::success();
// All of the following conditions will need the current directory.
SmallString<128> current_dir;
@@ -618,7 +621,7 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
path::append(current_dir, p);
// Set path to the result.
path.swap(current_dir);
- return success;
+ return error_code::success();
}
if (!rootName && rootDirectory) {
@@ -627,7 +630,7 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
path::append(curDirRootName, p);
// Set path to the result.
path.swap(curDirRootName);
- return success;
+ return error_code::success();
}
if (rootName && !rootDirectory) {
@@ -639,7 +642,7 @@ error_code make_absolute(SmallVectorImpl<char> &path) {
SmallString<128> res;
path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath);
path.swap(res);
- return success;
+ return error_code::success();
}
llvm_unreachable("All rootName and rootDirectory combinations should have "
@@ -651,12 +654,13 @@ error_code create_directories(const Twine &path, bool &existed) {
StringRef p = path.toStringRef(path_storage);
StringRef parent = path::parent_path(p);
- bool parent_exists;
+ if (!parent.empty()) {
+ bool parent_exists;
+ if (error_code ec = fs::exists(parent, parent_exists)) return ec;
- if (error_code ec = fs::exists(parent, parent_exists)) return ec;
-
- if (!parent_exists)
- if (error_code ec = create_directories(parent, existed)) return ec;
+ if (!parent_exists)
+ if (error_code ec = create_directories(parent, existed)) return ec;
+ }
return create_directory(p, existed);
}
@@ -678,7 +682,7 @@ error_code is_directory(const Twine &path, bool &result) {
if (error_code ec = status(path, st))
return ec;
result = is_directory(st);
- return success;
+ return error_code::success();
}
bool is_regular_file(file_status status) {
@@ -690,7 +694,7 @@ error_code is_regular_file(const Twine &path, bool &result) {
if (error_code ec = status(path, st))
return ec;
result = is_regular_file(st);
- return success;
+ return error_code::success();
}
bool is_symlink(file_status status) {
@@ -702,7 +706,7 @@ error_code is_symlink(const Twine &path, bool &result) {
if (error_code ec = status(path, st))
return ec;
result = is_symlink(st);
- return success;
+ return error_code::success();
}
bool is_other(file_status status) {
@@ -729,23 +733,134 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result) {
if (ec == errc::value_too_large) {
// Magic.size() > file_size(Path).
result = false;
- return success;
+ return error_code::success();
}
return ec;
}
result = Magic == Buffer;
- return success;
+ return error_code::success();
+}
+
+/// @brief Identify the magic in magic.
+file_magic identify_magic(StringRef magic) {
+ switch ((unsigned char)magic[0]) {
+ case 0xDE: // 0x0B17C0DE = BC wraper
+ if (magic[1] == (char)0xC0 && magic[2] == (char)0x17 &&
+ magic[3] == (char)0x0B)
+ return file_magic::bitcode;
+ break;
+ case 'B':
+ if (magic[1] == 'C' && magic[2] == (char)0xC0 && magic[3] == (char)0xDE)
+ return file_magic::bitcode;
+ break;
+ case '!':
+ if (magic.size() >= 8)
+ if (memcmp(magic.data(),"!<arch>\n",8) == 0)
+ return file_magic::archive;
+ break;
+
+ case '\177':
+ if (magic[1] == 'E' && magic[2] == 'L' && magic[3] == 'F') {
+ if (magic.size() >= 18 && magic[17] == 0)
+ switch (magic[16]) {
+ default: break;
+ case 1: return file_magic::elf_relocatable;
+ case 2: return file_magic::elf_executable;
+ case 3: return file_magic::elf_shared_object;
+ case 4: return file_magic::elf_core;
+ }
+ }
+ break;
+
+ case 0xCA:
+ if (magic[1] == char(0xFE) && magic[2] == char(0xBA) &&
+ magic[3] == char(0xBE)) {
+ // This is complicated by an overlap with Java class files.
+ // See the Mach-O section in /usr/share/file/magic for details.
+ if (magic.size() >= 8 && magic[7] < 43)
+ // FIXME: Universal Binary of any type.
+ return file_magic::macho_dynamically_linked_shared_lib;
+ }
+ break;
+
+ // The two magic numbers for mach-o are:
+ // 0xfeedface - 32-bit mach-o
+ // 0xfeedfacf - 64-bit mach-o
+ case 0xFE:
+ case 0xCE:
+ case 0xCF: {
+ uint16_t type = 0;
+ if (magic[0] == char(0xFE) && magic[1] == char(0xED) &&
+ magic[2] == char(0xFA) &&
+ (magic[3] == char(0xCE) || magic[3] == char(0xCF))) {
+ /* Native endian */
+ if (magic.size() >= 16) type = magic[14] << 8 | magic[15];
+ } else if ((magic[0] == char(0xCE) || magic[0] == char(0xCF)) &&
+ magic[1] == char(0xFA) && magic[2] == char(0xED) &&
+ magic[3] == char(0xFE)) {
+ /* Reverse endian */
+ if (magic.size() >= 14) type = magic[13] << 8 | magic[12];
+ }
+ switch (type) {
+ default: break;
+ case 1: return file_magic::macho_object;
+ case 2: return file_magic::macho_executable;
+ case 3: return file_magic::macho_fixed_virtual_memory_shared_lib;
+ case 4: return file_magic::macho_core;
+ case 5: return file_magic::macho_preload_executabl;
+ case 6: return file_magic::macho_dynamically_linked_shared_lib;
+ case 7: return file_magic::macho_dynamic_linker;
+ case 8: return file_magic::macho_bundle;
+ case 9: return file_magic::macho_dynamic_linker;
+ case 10: return file_magic::macho_dsym_companion;
+ }
+ break;
+ }
+ case 0xF0: // PowerPC Windows
+ case 0x83: // Alpha 32-bit
+ case 0x84: // Alpha 64-bit
+ case 0x66: // MPS R4000 Windows
+ case 0x50: // mc68K
+ case 0x4c: // 80386 Windows
+ if (magic[1] == 0x01)
+ return file_magic::coff_object;
+
+ case 0x90: // PA-RISC Windows
+ case 0x68: // mc68K Windows
+ if (magic[1] == 0x02)
+ return file_magic::coff_object;
+ break;
+
+ case 0x4d: // Possible MS-DOS stub on Windows PE file
+ if (magic[1] == 0x5a) {
+ uint32_t off =
+ *reinterpret_cast<const support::ulittle32_t*>(magic.data() + 0x3c);
+ // PE/COFF file, either EXE or DLL.
+ if (off < magic.size() && memcmp(magic.data() + off, "PE\0\0",4) == 0)
+ return file_magic::pecoff_executable;
+ }
+ break;
+
+ case 0x64: // x86-64 Windows.
+ if (magic[1] == char(0x86))
+ return file_magic::coff_object;
+ break;
+
+ default:
+ break;
+ }
+ return file_magic::unknown;
}
-error_code identify_magic(const Twine &path, LLVMFileType &result) {
+error_code identify_magic(const Twine &path, file_magic &result) {
SmallString<32> Magic;
error_code ec = get_magic(path, Magic.capacity(), Magic);
if (ec && ec != errc::value_too_large)
return ec;
- result = IdentifyFileType(Magic.data(), Magic.size());
- return success;
+ result = identify_magic(Magic);
+ return error_code::success();
}
namespace {
@@ -753,7 +868,9 @@ error_code remove_all_r(StringRef path, file_type ft, uint32_t &count) {
if (ft == file_type::directory_file) {
// This code would be a lot better with exceptions ;/.
error_code ec;
- for (directory_iterator i(path, ec), e; i != e; i.increment(ec)) {
+ directory_iterator i(path, ec);
+ if (ec) return ec;
+ for (directory_iterator e; i != e; i.increment(ec)) {
if (ec) return ec;
file_status st;
if (error_code ec = i->status(st)) return ec;
@@ -770,7 +887,7 @@ error_code remove_all_r(StringRef path, file_type ft, uint32_t &count) {
++count;
}
- return success;
+ return error_code::success();
}
} // end unnamed namespace
diff --git a/contrib/llvm/lib/Support/Program.cpp b/contrib/llvm/lib/Support/Program.cpp
index 01860b0..75bc282 100644
--- a/contrib/llvm/lib/Support/Program.cpp
+++ b/contrib/llvm/lib/Support/Program.cpp
@@ -13,6 +13,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Config/config.h"
+#include "llvm/Support/system_error.h"
using namespace llvm;
using namespace sys;
diff --git a/contrib/llvm/lib/Support/RWMutex.cpp b/contrib/llvm/lib/Support/RWMutex.cpp
index d0b1e10..6a34f2d 100644
--- a/contrib/llvm/lib/Support/RWMutex.cpp
+++ b/contrib/llvm/lib/Support/RWMutex.cpp
@@ -20,7 +20,7 @@
//=== independent code.
//===----------------------------------------------------------------------===//
-#if !defined(ENABLE_THREADS) || ENABLE_THREADS == 0
+#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
// Define all methods as no-ops if threading is explicitly disabled
namespace llvm {
using namespace sys;
@@ -42,107 +42,75 @@ bool RWMutexImpl::writer_release() { return true; }
namespace llvm {
using namespace sys;
-
-// This variable is useful for situations where the pthread library has been
-// compiled with weak linkage for its interface symbols. This allows the
-// threading support to be turned off by simply not linking against -lpthread.
-// In that situation, the value of pthread_mutex_init will be 0 and
-// consequently pthread_enabled will be false. In such situations, all the
-// pthread operations become no-ops and the functions all return false. If
-// pthread_rwlock_init does have an address, then rwlock support is enabled.
-// Note: all LLVM tools will link against -lpthread if its available since it
-// is configured into the LIBS variable.
-// Note: this line of code generates a warning if pthread_rwlock_init is not
-// declared with weak linkage. It's safe to ignore the warning.
-static const bool pthread_enabled = true;
-
// Construct a RWMutex using pthread calls
RWMutexImpl::RWMutexImpl()
: data_(0)
{
- if (pthread_enabled)
- {
- // Declare the pthread_rwlock data structures
- pthread_rwlock_t* rwlock =
- static_cast<pthread_rwlock_t*>(malloc(sizeof(pthread_rwlock_t)));
+ // Declare the pthread_rwlock data structures
+ pthread_rwlock_t* rwlock =
+ static_cast<pthread_rwlock_t*>(malloc(sizeof(pthread_rwlock_t)));
#ifdef __APPLE__
- // Workaround a bug/mis-feature in Darwin's pthread_rwlock_init.
- bzero(rwlock, sizeof(pthread_rwlock_t));
+ // Workaround a bug/mis-feature in Darwin's pthread_rwlock_init.
+ bzero(rwlock, sizeof(pthread_rwlock_t));
#endif
- // Initialize the rwlock
- int errorcode = pthread_rwlock_init(rwlock, NULL);
- (void)errorcode;
- assert(errorcode == 0);
+ // Initialize the rwlock
+ int errorcode = pthread_rwlock_init(rwlock, NULL);
+ (void)errorcode;
+ assert(errorcode == 0);
- // Assign the data member
- data_ = rwlock;
- }
+ // Assign the data member
+ data_ = rwlock;
}
// Destruct a RWMutex
RWMutexImpl::~RWMutexImpl()
{
- if (pthread_enabled)
- {
- pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
- pthread_rwlock_destroy(rwlock);
- free(rwlock);
- }
+ pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
+ assert(rwlock != 0);
+ pthread_rwlock_destroy(rwlock);
+ free(rwlock);
}
bool
RWMutexImpl::reader_acquire()
{
- if (pthread_enabled)
- {
- pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
-
- int errorcode = pthread_rwlock_rdlock(rwlock);
- return errorcode == 0;
- } else return false;
+ pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
+ assert(rwlock != 0);
+
+ int errorcode = pthread_rwlock_rdlock(rwlock);
+ return errorcode == 0;
}
bool
RWMutexImpl::reader_release()
{
- if (pthread_enabled)
- {
- pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
-
- int errorcode = pthread_rwlock_unlock(rwlock);
- return errorcode == 0;
- } else return false;
+ pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
+ assert(rwlock != 0);
+
+ int errorcode = pthread_rwlock_unlock(rwlock);
+ return errorcode == 0;
}
bool
RWMutexImpl::writer_acquire()
{
- if (pthread_enabled)
- {
- pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
-
- int errorcode = pthread_rwlock_wrlock(rwlock);
- return errorcode == 0;
- } else return false;
+ pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
+ assert(rwlock != 0);
+
+ int errorcode = pthread_rwlock_wrlock(rwlock);
+ return errorcode == 0;
}
bool
RWMutexImpl::writer_release()
{
- if (pthread_enabled)
- {
- pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
- assert(rwlock != 0);
-
- int errorcode = pthread_rwlock_unlock(rwlock);
- return errorcode == 0;
- } else return false;
+ pthread_rwlock_t* rwlock = static_cast<pthread_rwlock_t*>(data_);
+ assert(rwlock != 0);
+
+ int errorcode = pthread_rwlock_unlock(rwlock);
+ return errorcode == 0;
}
}
diff --git a/contrib/llvm/lib/Support/SmallPtrSet.cpp b/contrib/llvm/lib/Support/SmallPtrSet.cpp
index 997ce0b..68d9c29 100644
--- a/contrib/llvm/lib/Support/SmallPtrSet.cpp
+++ b/contrib/llvm/lib/Support/SmallPtrSet.cpp
@@ -14,6 +14,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/MathExtras.h"
+#include <algorithm>
#include <cstdlib>
using namespace llvm;
@@ -223,6 +224,56 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) {
NumTombstones = RHS.NumTombstones;
}
+void SmallPtrSetImpl::swap(SmallPtrSetImpl &RHS) {
+ if (this == &RHS) return;
+
+ // We can only avoid copying elements if neither set is small.
+ if (!this->isSmall() && !RHS.isSmall()) {
+ std::swap(this->CurArray, RHS.CurArray);
+ std::swap(this->CurArraySize, RHS.CurArraySize);
+ std::swap(this->NumElements, RHS.NumElements);
+ std::swap(this->NumTombstones, RHS.NumTombstones);
+ return;
+ }
+
+ // FIXME: From here on we assume that both sets have the same small size.
+
+ // If only RHS is small, copy the small elements into LHS and move the pointer
+ // from LHS to RHS.
+ if (!this->isSmall() && RHS.isSmall()) {
+ std::copy(RHS.SmallArray, RHS.SmallArray+RHS.CurArraySize,
+ this->SmallArray);
+ std::swap(this->NumElements, RHS.NumElements);
+ std::swap(this->CurArraySize, RHS.CurArraySize);
+ RHS.CurArray = this->CurArray;
+ RHS.NumTombstones = this->NumTombstones;
+ this->CurArray = this->SmallArray;
+ this->NumTombstones = 0;
+ return;
+ }
+
+ // If only LHS is small, copy the small elements into RHS and move the pointer
+ // from RHS to LHS.
+ if (this->isSmall() && !RHS.isSmall()) {
+ std::copy(this->SmallArray, this->SmallArray+this->CurArraySize,
+ RHS.SmallArray);
+ std::swap(RHS.NumElements, this->NumElements);
+ std::swap(RHS.CurArraySize, this->CurArraySize);
+ this->CurArray = RHS.CurArray;
+ this->NumTombstones = RHS.NumTombstones;
+ RHS.CurArray = RHS.SmallArray;
+ RHS.NumTombstones = 0;
+ return;
+ }
+
+ // Both a small, just swap the small elements.
+ assert(this->isSmall() && RHS.isSmall());
+ assert(this->CurArraySize == RHS.CurArraySize);
+ std::swap_ranges(this->SmallArray, this->SmallArray+this->CurArraySize,
+ RHS.SmallArray);
+ std::swap(this->NumElements, RHS.NumElements);
+}
+
SmallPtrSetImpl::~SmallPtrSetImpl() {
if (!isSmall())
free(CurArray);
diff --git a/contrib/llvm/lib/Support/SourceMgr.cpp b/contrib/llvm/lib/Support/SourceMgr.cpp
index de042a9f..bbe36b2 100644
--- a/contrib/llvm/lib/Support/SourceMgr.cpp
+++ b/contrib/llvm/lib/Support/SourceMgr.cpp
@@ -140,8 +140,9 @@ void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const {
///
/// @param Type - If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
-SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const Twine &Msg,
- const char *Type, bool ShowLine) const {
+SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
+ const Twine &Msg,
+ ArrayRef<SMRange> Ranges) const {
// First thing to do: find the current buffer containing the specified
// location.
@@ -156,33 +157,48 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const Twine &Msg,
LineStart[-1] != '\n' && LineStart[-1] != '\r')
--LineStart;
- std::string LineStr;
- if (ShowLine) {
- // Get the end of the line.
- const char *LineEnd = Loc.getPointer();
- while (LineEnd != CurMB->getBufferEnd() &&
- LineEnd[0] != '\n' && LineEnd[0] != '\r')
- ++LineEnd;
- LineStr = std::string(LineStart, LineEnd);
- }
-
- std::string PrintedMsg;
- raw_string_ostream OS(PrintedMsg);
- if (Type)
- OS << Type << ": ";
- OS << Msg;
+ // Get the end of the line.
+ const char *LineEnd = Loc.getPointer();
+ while (LineEnd != CurMB->getBufferEnd() &&
+ LineEnd[0] != '\n' && LineEnd[0] != '\r')
+ ++LineEnd;
+ std::string LineStr(LineStart, LineEnd);
+ // Convert any ranges to column ranges that only intersect the line of the
+ // location.
+ SmallVector<std::pair<unsigned, unsigned>, 4> ColRanges;
+ for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
+ SMRange R = Ranges[i];
+ if (!R.isValid()) continue;
+
+ // If the line doesn't contain any part of the range, then ignore it.
+ if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
+ continue;
+
+ // Ignore pieces of the range that go onto other lines.
+ if (R.Start.getPointer() < LineStart)
+ R.Start = SMLoc::getFromPointer(LineStart);
+ if (R.End.getPointer() > LineEnd)
+ R.End = SMLoc::getFromPointer(LineEnd);
+
+ // Translate from SMLoc ranges to column ranges.
+ ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart,
+ R.End.getPointer()-LineStart));
+ }
+
return SMDiagnostic(*this, Loc,
CurMB->getBufferIdentifier(), FindLineNumber(Loc, CurBuf),
- Loc.getPointer()-LineStart, OS.str(),
- LineStr, ShowLine);
+ Loc.getPointer()-LineStart, Kind, Msg.str(),
+ LineStr, ColRanges);
}
-void SourceMgr::PrintMessage(SMLoc Loc, const Twine &Msg,
- const char *Type, bool ShowLine) const {
+void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
+ const Twine &Msg, ArrayRef<SMRange> Ranges) const {
+ SMDiagnostic Diagnostic = GetMessage(Loc, Kind, Msg, Ranges);
+
// Report the message with the diagnostic handler if present.
if (DiagHandler) {
- DiagHandler(GetMessage(Loc, Msg, Type, ShowLine), DiagContext);
+ DiagHandler(Diagnostic, DiagContext);
return;
}
@@ -192,14 +208,24 @@ void SourceMgr::PrintMessage(SMLoc Loc, const Twine &Msg,
assert(CurBuf != -1 && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
- GetMessage(Loc, Msg, Type, ShowLine).Print(0, OS);
+ Diagnostic.print(0, OS);
}
//===----------------------------------------------------------------------===//
// SMDiagnostic Implementation
//===----------------------------------------------------------------------===//
-void SMDiagnostic::Print(const char *ProgName, raw_ostream &S) const {
+SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN,
+ int Line, int Col, SourceMgr::DiagKind Kind,
+ const std::string &Msg,
+ const std::string &LineStr,
+ ArrayRef<std::pair<unsigned,unsigned> > Ranges)
+ : SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind),
+ Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()) {
+}
+
+
+void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const {
if (ProgName && ProgName[0])
S << ProgName << ": ";
@@ -217,16 +243,71 @@ void SMDiagnostic::Print(const char *ProgName, raw_ostream &S) const {
S << ": ";
}
+ switch (Kind) {
+ case SourceMgr::DK_Error: S << "error: "; break;
+ case SourceMgr::DK_Warning: S << "warning: "; break;
+ case SourceMgr::DK_Note: S << "note: "; break;
+ }
+
S << Message << '\n';
- if (LineNo != -1 && ColumnNo != -1 && ShowLine) {
- S << LineContents << '\n';
+ if (LineNo == -1 || ColumnNo == -1)
+ return;
- // Print out spaces/tabs before the caret.
- for (unsigned i = 0; i != unsigned(ColumnNo); ++i)
- S << (LineContents[i] == '\t' ? '\t' : ' ');
- S << "^\n";
+ // Build the line with the caret and ranges.
+ std::string CaretLine(LineContents.size()+1, ' ');
+
+ // Expand any ranges.
+ for (unsigned r = 0, e = Ranges.size(); r != e; ++r) {
+ std::pair<unsigned, unsigned> R = Ranges[r];
+ for (unsigned i = R.first,
+ e = std::min(R.second, (unsigned)LineContents.size())+1; i != e; ++i)
+ CaretLine[i] = '~';
+ }
+
+ // Finally, plop on the caret.
+ if (unsigned(ColumnNo) <= LineContents.size())
+ CaretLine[ColumnNo] = '^';
+ else
+ CaretLine[LineContents.size()] = '^';
+
+ // ... and remove trailing whitespace so the output doesn't wrap for it. We
+ // know that the line isn't completely empty because it has the caret in it at
+ // least.
+ CaretLine.erase(CaretLine.find_last_not_of(' ')+1);
+
+ // Print out the source line one character at a time, so we can expand tabs.
+ for (unsigned i = 0, e = LineContents.size(), OutCol = 0; i != e; ++i) {
+ if (LineContents[i] != '\t') {
+ S << LineContents[i];
+ ++OutCol;
+ continue;
+ }
+
+ // If we have a tab, emit at least one space, then round up to 8 columns.
+ do {
+ S << ' ';
+ ++OutCol;
+ } while (OutCol & 7);
+ }
+ S << '\n';
+
+ // Print out the caret line, matching tabs in the source line.
+ for (unsigned i = 0, e = CaretLine.size(), OutCol = 0; i != e; ++i) {
+ if (i >= LineContents.size() || LineContents[i] != '\t') {
+ S << CaretLine[i];
+ ++OutCol;
+ continue;
+ }
+
+ // Okay, we have a tab. Insert the appropriate number of characters.
+ do {
+ S << CaretLine[i];
+ ++OutCol;
+ } while (OutCol & 7);
}
+
+ S << '\n';
}
diff --git a/contrib/llvm/lib/Support/Statistic.cpp b/contrib/llvm/lib/Support/Statistic.cpp
index 1e733d9..d8a6ad3 100644
--- a/contrib/llvm/lib/Support/Statistic.cpp
+++ b/contrib/llvm/lib/Support/Statistic.cpp
@@ -24,6 +24,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Mutex.h"
@@ -72,9 +73,12 @@ void Statistic::RegisterStatistic() {
if (Enabled)
StatInfo->addStatistic(this);
+ TsanHappensBefore(this);
sys::MemoryFence();
// Remember we have been registered.
+ TsanIgnoreWritesBegin();
Initialized = true;
+ TsanIgnoreWritesEnd();
}
}
@@ -126,13 +130,11 @@ void llvm::PrintStatistics(raw_ostream &OS) {
<< "===" << std::string(73, '-') << "===\n\n";
// Print all of the statistics.
- for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i) {
- std::string CountStr = utostr(Stats.Stats[i]->getValue());
- OS << std::string(MaxValLen-CountStr.size(), ' ')
- << CountStr << " " << Stats.Stats[i]->getName()
- << std::string(MaxNameLen-std::strlen(Stats.Stats[i]->getName()), ' ')
- << " - " << Stats.Stats[i]->getDesc() << "\n";
- }
+ for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i)
+ OS << format("%*u %-*s - %s\n",
+ MaxValLen, Stats.Stats[i]->getValue(),
+ MaxNameLen, Stats.Stats[i]->getName(),
+ Stats.Stats[i]->getDesc());
OS << '\n'; // Flush the output stream.
OS.flush();
diff --git a/contrib/llvm/lib/Support/StreamableMemoryObject.cpp b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
new file mode 100644
index 0000000..c23f07b
--- /dev/null
+++ b/contrib/llvm/lib/Support/StreamableMemoryObject.cpp
@@ -0,0 +1,140 @@
+//===- StreamableMemoryObject.cpp - Streamable data interface -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/StreamableMemoryObject.h"
+#include <cassert>
+#include <cstring>
+
+
+using namespace llvm;
+
+namespace {
+
+class RawMemoryObject : public StreamableMemoryObject {
+public:
+ RawMemoryObject(const unsigned char *Start, const unsigned char *End) :
+ FirstChar(Start), LastChar(End) {
+ assert(LastChar > FirstChar && "Invalid start/end range");
+ }
+
+ virtual uint64_t getBase() const { return 0; }
+ virtual uint64_t getExtent() const { return LastChar - FirstChar; }
+ virtual int readByte(uint64_t address, uint8_t* ptr) const;
+ virtual int readBytes(uint64_t address,
+ uint64_t size,
+ uint8_t* buf,
+ uint64_t* copied) const;
+ virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const;
+ virtual bool isValidAddress(uint64_t address) const {
+ return validAddress(address);
+ }
+ virtual bool isObjectEnd(uint64_t address) const {return objectEnd(address);}
+
+private:
+ const uint8_t* const FirstChar;
+ const uint8_t* const LastChar;
+
+ // These are implemented as inline functions here to avoid multiple virtual
+ // calls per public function
+ bool validAddress(uint64_t address) const {
+ return static_cast<ptrdiff_t>(address) < LastChar - FirstChar;
+ }
+ bool objectEnd(uint64_t address) const {
+ return static_cast<ptrdiff_t>(address) == LastChar - FirstChar;
+ }
+
+ RawMemoryObject(const RawMemoryObject&); // DO NOT IMPLEMENT
+ void operator=(const RawMemoryObject&); // DO NOT IMPLEMENT
+};
+
+int RawMemoryObject::readByte(uint64_t address, uint8_t* ptr) const {
+ if (!validAddress(address)) return -1;
+ *ptr = *((uint8_t *)(uintptr_t)(address + FirstChar));
+ return 0;
+}
+
+int RawMemoryObject::readBytes(uint64_t address,
+ uint64_t size,
+ uint8_t* buf,
+ uint64_t* copied) const {
+ if (!validAddress(address) || !validAddress(address + size - 1)) return -1;
+ memcpy(buf, (uint8_t *)(uintptr_t)(address + FirstChar), size);
+ if (copied) *copied = size;
+ return size;
+}
+
+const uint8_t *RawMemoryObject::getPointer(uint64_t address,
+ uint64_t size) const {
+ return FirstChar + address;
+}
+} // anonymous namespace
+
+namespace llvm {
+// If the bitcode has a header, then its size is known, and we don't have to
+// block until we actually want to read it.
+bool StreamingMemoryObject::isValidAddress(uint64_t address) const {
+ if (ObjectSize && address < ObjectSize) return true;
+ return fetchToPos(address);
+}
+
+bool StreamingMemoryObject::isObjectEnd(uint64_t address) const {
+ if (ObjectSize) return address == ObjectSize;
+ fetchToPos(address);
+ return address == ObjectSize && address != 0;
+}
+
+uint64_t StreamingMemoryObject::getExtent() const {
+ if (ObjectSize) return ObjectSize;
+ size_t pos = BytesRead + kChunkSize;
+ // keep fetching until we run out of bytes
+ while (fetchToPos(pos)) pos += kChunkSize;
+ return ObjectSize;
+}
+
+int StreamingMemoryObject::readByte(uint64_t address, uint8_t* ptr) const {
+ if (!fetchToPos(address)) return -1;
+ *ptr = Bytes[address + BytesSkipped];
+ return 0;
+}
+
+int StreamingMemoryObject::readBytes(uint64_t address,
+ uint64_t size,
+ uint8_t* buf,
+ uint64_t* copied) const {
+ if (!fetchToPos(address + size - 1)) return -1;
+ memcpy(buf, &Bytes[address + BytesSkipped], size);
+ if (copied) *copied = size;
+ return 0;
+}
+
+bool StreamingMemoryObject::dropLeadingBytes(size_t s) {
+ if (BytesRead < s) return true;
+ BytesSkipped = s;
+ BytesRead -= s;
+ return false;
+}
+
+void StreamingMemoryObject::setKnownObjectSize(size_t size) {
+ ObjectSize = size;
+ Bytes.reserve(size);
+}
+
+StreamableMemoryObject *getNonStreamedMemoryObject(
+ const unsigned char *Start, const unsigned char *End) {
+ return new RawMemoryObject(Start, End);
+}
+
+StreamableMemoryObject::~StreamableMemoryObject() { }
+
+StreamingMemoryObject::StreamingMemoryObject(DataStreamer *streamer) :
+ Bytes(kChunkSize), Streamer(streamer), BytesRead(0), BytesSkipped(0),
+ ObjectSize(0), EOFReached(false) {
+ BytesRead = streamer->GetBytes(&Bytes[0], kChunkSize);
+}
+}
diff --git a/contrib/llvm/lib/Support/StringExtras.cpp b/contrib/llvm/lib/Support/StringExtras.cpp
index 49c5ac4..d77ad7f 100644
--- a/contrib/llvm/lib/Support/StringExtras.cpp
+++ b/contrib/llvm/lib/Support/StringExtras.cpp
@@ -57,24 +57,3 @@ void llvm::SplitString(StringRef Source,
S = getToken(S.second, Delimiters);
}
}
-
-void llvm::StringRef::split(SmallVectorImpl<StringRef> &A,
- StringRef Separators, int MaxSplit,
- bool KeepEmpty) const {
- StringRef rest = *this;
-
- // rest.data() is used to distinguish cases like "a," that splits into
- // "a" + "" and "a" that splits into "a" + 0.
- for (int splits = 0;
- rest.data() != NULL && (MaxSplit < 0 || splits < MaxSplit);
- ++splits) {
- std::pair<llvm::StringRef, llvm::StringRef> p = rest.split(Separators);
-
- if (p.first.size() != 0 || KeepEmpty)
- A.push_back(p.first);
- rest = p.second;
- }
- // If we have a tail left, add it.
- if (rest.data() != NULL && (rest.size() != 0 || KeepEmpty))
- A.push_back(rest);
-}
diff --git a/contrib/llvm/lib/Support/StringMap.cpp b/contrib/llvm/lib/Support/StringMap.cpp
index a1ac512..c131fe0 100644
--- a/contrib/llvm/lib/Support/StringMap.cpp
+++ b/contrib/llvm/lib/Support/StringMap.cpp
@@ -39,11 +39,13 @@ void StringMapImpl::init(unsigned InitSize) {
NumItems = 0;
NumTombstones = 0;
- TheTable = (ItemBucket*)calloc(NumBuckets+1, sizeof(ItemBucket));
-
+ TheTable = (StringMapEntryBase **)calloc(NumBuckets+1,
+ sizeof(StringMapEntryBase **) +
+ sizeof(unsigned));
+
// Allocate one extra bucket, set it to look filled so the iterators stop at
// end.
- TheTable[NumBuckets].Item = (StringMapEntryBase*)2;
+ TheTable[NumBuckets] = (StringMapEntryBase*)2;
}
@@ -60,29 +62,29 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) {
}
unsigned FullHashValue = HashString(Name);
unsigned BucketNo = FullHashValue & (HTSize-1);
-
+ unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1);
+
unsigned ProbeAmt = 1;
int FirstTombstone = -1;
while (1) {
- ItemBucket &Bucket = TheTable[BucketNo];
- StringMapEntryBase *BucketItem = Bucket.Item;
+ StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return it.
if (BucketItem == 0) {
// If we found a tombstone, we want to reuse the tombstone instead of an
// empty bucket. This reduces probing.
if (FirstTombstone != -1) {
- TheTable[FirstTombstone].FullHashValue = FullHashValue;
+ HashTable[FirstTombstone] = FullHashValue;
return FirstTombstone;
}
- Bucket.FullHashValue = FullHashValue;
+ HashTable[BucketNo] = FullHashValue;
return BucketNo;
}
if (BucketItem == getTombstoneVal()) {
// Skip over tombstones. However, remember the first one we see.
if (FirstTombstone == -1) FirstTombstone = BucketNo;
- } else if (Bucket.FullHashValue == FullHashValue) {
+ } else if (HashTable[BucketNo] == FullHashValue) {
// If the full hash value matches, check deeply for a match. The common
// case here is that we are only looking at the buckets (for item info
// being non-null and for the full hash value) not at the items. This
@@ -115,18 +117,18 @@ int StringMapImpl::FindKey(StringRef Key) const {
if (HTSize == 0) return -1; // Really empty table?
unsigned FullHashValue = HashString(Key);
unsigned BucketNo = FullHashValue & (HTSize-1);
-
+ unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1);
+
unsigned ProbeAmt = 1;
while (1) {
- ItemBucket &Bucket = TheTable[BucketNo];
- StringMapEntryBase *BucketItem = Bucket.Item;
+ StringMapEntryBase *BucketItem = TheTable[BucketNo];
// If we found an empty bucket, this key isn't in the table yet, return.
if (BucketItem == 0)
return -1;
if (BucketItem == getTombstoneVal()) {
// Ignore tombstones.
- } else if (Bucket.FullHashValue == FullHashValue) {
+ } else if (HashTable[BucketNo] == FullHashValue) {
// If the full hash value matches, check deeply for a match. The common
// case here is that we are only looking at the buckets (for item info
// being non-null and for the full hash value) not at the items. This
@@ -165,8 +167,8 @@ StringMapEntryBase *StringMapImpl::RemoveKey(StringRef Key) {
int Bucket = FindKey(Key);
if (Bucket == -1) return 0;
- StringMapEntryBase *Result = TheTable[Bucket].Item;
- TheTable[Bucket].Item = getTombstoneVal();
+ StringMapEntryBase *Result = TheTable[Bucket];
+ TheTable[Bucket] = getTombstoneVal();
--NumItems;
++NumTombstones;
assert(NumItems + NumTombstones <= NumBuckets);
@@ -180,6 +182,7 @@ StringMapEntryBase *StringMapImpl::RemoveKey(StringRef Key) {
/// the appropriate mod-of-hashtable-size.
void StringMapImpl::RehashTable() {
unsigned NewSize;
+ unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1);
// If the hash table is now more than 3/4 full, or if fewer than 1/8 of
// the buckets are empty (meaning that many are filled with tombstones),
@@ -194,19 +197,23 @@ void StringMapImpl::RehashTable() {
// Allocate one extra bucket which will always be non-empty. This allows the
// iterators to stop at end.
- ItemBucket *NewTableArray =(ItemBucket*)calloc(NewSize+1, sizeof(ItemBucket));
- NewTableArray[NewSize].Item = (StringMapEntryBase*)2;
-
+ StringMapEntryBase **NewTableArray =
+ (StringMapEntryBase **)calloc(NewSize+1, sizeof(StringMapEntryBase *) +
+ sizeof(unsigned));
+ unsigned *NewHashArray = (unsigned *)(NewTableArray + NewSize + 1);
+ NewTableArray[NewSize] = (StringMapEntryBase*)2;
+
// Rehash all the items into their new buckets. Luckily :) we already have
// the hash values available, so we don't have to rehash any strings.
- for (ItemBucket *IB = TheTable, *E = TheTable+NumBuckets; IB != E; ++IB) {
- if (IB->Item && IB->Item != getTombstoneVal()) {
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
// Fast case, bucket available.
- unsigned FullHash = IB->FullHashValue;
+ unsigned FullHash = HashTable[I];
unsigned NewBucket = FullHash & (NewSize-1);
- if (NewTableArray[NewBucket].Item == 0) {
- NewTableArray[FullHash & (NewSize-1)].Item = IB->Item;
- NewTableArray[FullHash & (NewSize-1)].FullHashValue = FullHash;
+ if (NewTableArray[NewBucket] == 0) {
+ NewTableArray[FullHash & (NewSize-1)] = Bucket;
+ NewHashArray[FullHash & (NewSize-1)] = FullHash;
continue;
}
@@ -214,11 +221,11 @@ void StringMapImpl::RehashTable() {
unsigned ProbeSize = 1;
do {
NewBucket = (NewBucket + ProbeSize++) & (NewSize-1);
- } while (NewTableArray[NewBucket].Item);
+ } while (NewTableArray[NewBucket]);
// Finally found a slot. Fill it in.
- NewTableArray[NewBucket].Item = IB->Item;
- NewTableArray[NewBucket].FullHashValue = FullHash;
+ NewTableArray[NewBucket] = Bucket;
+ NewHashArray[NewBucket] = FullHash;
}
}
diff --git a/contrib/llvm/lib/Support/StringRef.cpp b/contrib/llvm/lib/Support/StringRef.cpp
index b5b4f94..abe570f 100644
--- a/contrib/llvm/lib/Support/StringRef.cpp
+++ b/contrib/llvm/lib/Support/StringRef.cpp
@@ -10,6 +10,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/edit_distance.h"
#include <bitset>
using namespace llvm;
@@ -25,6 +27,12 @@ static char ascii_tolower(char x) {
return x;
}
+static char ascii_toupper(char x) {
+ if (x >= 'a' && x <= 'z')
+ return x - 'a' + 'A';
+ return x;
+}
+
static bool ascii_isdigit(char x) {
return x >= '0' && x <= '9';
}
@@ -78,56 +86,29 @@ int StringRef::compare_numeric(StringRef RHS) const {
unsigned StringRef::edit_distance(llvm::StringRef Other,
bool AllowReplacements,
unsigned MaxEditDistance) {
- // The algorithm implemented below is the "classic"
- // dynamic-programming algorithm for computing the Levenshtein
- // distance, which is described here:
- //
- // http://en.wikipedia.org/wiki/Levenshtein_distance
- //
- // Although the algorithm is typically described using an m x n
- // array, only two rows are used at a time, so this implemenation
- // just keeps two separate vectors for those two rows.
- size_type m = size();
- size_type n = Other.size();
-
- const unsigned SmallBufferSize = 64;
- unsigned SmallBuffer[SmallBufferSize];
- llvm::OwningArrayPtr<unsigned> Allocated;
- unsigned *previous = SmallBuffer;
- if (2*(n + 1) > SmallBufferSize) {
- previous = new unsigned [2*(n+1)];
- Allocated.reset(previous);
- }
- unsigned *current = previous + (n + 1);
-
- for (unsigned i = 0; i <= n; ++i)
- previous[i] = i;
-
- for (size_type y = 1; y <= m; ++y) {
- current[0] = y;
- unsigned BestThisRow = current[0];
-
- for (size_type x = 1; x <= n; ++x) {
- if (AllowReplacements) {
- current[x] = min(previous[x-1] + ((*this)[y-1] == Other[x-1]? 0u:1u),
- min(current[x-1], previous[x])+1);
- }
- else {
- if ((*this)[y-1] == Other[x-1]) current[x] = previous[x-1];
- else current[x] = min(current[x-1], previous[x]) + 1;
- }
- BestThisRow = min(BestThisRow, current[x]);
- }
+ return llvm::ComputeEditDistance(
+ llvm::ArrayRef<char>(data(), size()),
+ llvm::ArrayRef<char>(Other.data(), Other.size()),
+ AllowReplacements, MaxEditDistance);
+}
- if (MaxEditDistance && BestThisRow > MaxEditDistance)
- return MaxEditDistance + 1;
+//===----------------------------------------------------------------------===//
+// String Operations
+//===----------------------------------------------------------------------===//
- unsigned *tmp = current;
- current = previous;
- previous = tmp;
+std::string StringRef::lower() const {
+ std::string Result(size(), char());
+ for (size_type i = 0, e = size(); i != e; ++i) {
+ Result[i] = ascii_tolower(Data[i]);
}
+ return Result;
+}
- unsigned Result = previous[n];
+std::string StringRef::upper() const {
+ std::string Result(size(), char());
+ for (size_type i = 0, e = size(); i != e; ++i) {
+ Result[i] = ascii_toupper(Data[i]);
+ }
return Result;
}
@@ -144,9 +125,35 @@ size_t StringRef::find(StringRef Str, size_t From) const {
size_t N = Str.size();
if (N > Length)
return npos;
- for (size_t e = Length - N + 1, i = min(From, e); i != e; ++i)
- if (substr(i, N).equals(Str))
- return i;
+
+ // For short haystacks or unsupported needles fall back to the naive algorithm
+ if (Length < 16 || N > 255 || N == 0) {
+ for (size_t e = Length - N + 1, i = min(From, e); i != e; ++i)
+ if (substr(i, N).equals(Str))
+ return i;
+ return npos;
+ }
+
+ if (From >= Length)
+ return npos;
+
+ // Build the bad char heuristic table, with uint8_t to reduce cache thrashing.
+ uint8_t BadCharSkip[256];
+ std::memset(BadCharSkip, N, 256);
+ for (unsigned i = 0; i != N-1; ++i)
+ BadCharSkip[(uint8_t)Str[i]] = N-1-i;
+
+ unsigned Len = Length-From, Pos = From;
+ while (Len >= N) {
+ if (substr(Pos, N).equals(Str)) // See if this is the correct substring.
+ return Pos;
+
+ // Otherwise skip the appropriate number of bytes.
+ uint8_t Skip = BadCharSkip[(uint8_t)(*this)[Pos+N-1]];
+ Len -= Skip;
+ Pos += Skip;
+ }
+
return npos;
}
@@ -223,6 +230,27 @@ StringRef::size_type StringRef::find_last_of(StringRef Chars,
return npos;
}
+void StringRef::split(SmallVectorImpl<StringRef> &A,
+ StringRef Separators, int MaxSplit,
+ bool KeepEmpty) const {
+ StringRef rest = *this;
+
+ // rest.data() is used to distinguish cases like "a," that splits into
+ // "a" + "" and "a" that splits into "a" + 0.
+ for (int splits = 0;
+ rest.data() != NULL && (MaxSplit < 0 || splits < MaxSplit);
+ ++splits) {
+ std::pair<StringRef, StringRef> p = rest.split(Separators);
+
+ if (KeepEmpty || p.first.size() != 0)
+ A.push_back(p.first);
+ rest = p.second;
+ }
+ // If we have a tail left, add it.
+ if (rest.data() != NULL && (rest.size() != 0 || KeepEmpty))
+ A.push_back(rest);
+}
+
//===----------------------------------------------------------------------===//
// Helpful Algorithms
//===----------------------------------------------------------------------===//
@@ -257,8 +285,8 @@ static unsigned GetAutoSenseRadix(StringRef &Str) {
/// GetAsUnsignedInteger - Workhorse method that converts a integer character
/// sequence of radix up to 36 to an unsigned long long value.
-static bool GetAsUnsignedInteger(StringRef Str, unsigned Radix,
- unsigned long long &Result) {
+bool llvm::getAsUnsignedInteger(StringRef Str, unsigned Radix,
+ unsigned long long &Result) {
// Autosense radix if not specified.
if (Radix == 0)
Radix = GetAutoSenseRadix(Str);
@@ -298,17 +326,13 @@ static bool GetAsUnsignedInteger(StringRef Str, unsigned Radix,
return false;
}
-bool StringRef::getAsInteger(unsigned Radix, unsigned long long &Result) const {
- return GetAsUnsignedInteger(*this, Radix, Result);
-}
-
-
-bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
+bool llvm::getAsSignedInteger(StringRef Str, unsigned Radix,
+ long long &Result) {
unsigned long long ULLVal;
// Handle positive strings first.
- if (empty() || front() != '-') {
- if (GetAsUnsignedInteger(*this, Radix, ULLVal) ||
+ if (Str.empty() || Str.front() != '-') {
+ if (getAsUnsignedInteger(Str, Radix, ULLVal) ||
// Check for value so large it overflows a signed value.
(long long)ULLVal < 0)
return true;
@@ -317,7 +341,7 @@ bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
}
// Get the positive part of the value.
- if (GetAsUnsignedInteger(substr(1), Radix, ULLVal) ||
+ if (getAsUnsignedInteger(Str.substr(1), Radix, ULLVal) ||
// Reject values so large they'd overflow as negative signed, but allow
// "-0". This negates the unsigned so that the negative isn't undefined
// on signed overflow.
@@ -328,24 +352,6 @@ bool StringRef::getAsInteger(unsigned Radix, long long &Result) const {
return false;
}
-bool StringRef::getAsInteger(unsigned Radix, int &Result) const {
- long long Val;
- if (getAsInteger(Radix, Val) ||
- (int)Val != Val)
- return true;
- Result = Val;
- return false;
-}
-
-bool StringRef::getAsInteger(unsigned Radix, unsigned &Result) const {
- unsigned long long Val;
- if (getAsInteger(Radix, Val) ||
- (unsigned)Val != Val)
- return true;
- Result = Val;
- return false;
-}
-
bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
StringRef Str = *this;
@@ -420,3 +426,9 @@ bool StringRef::getAsInteger(unsigned Radix, APInt &Result) const {
return false;
}
+
+
+// Implementation of StringRef hashing.
+hash_code llvm::hash_value(StringRef S) {
+ return hash_combine_range(S.begin(), S.end());
+}
diff --git a/contrib/llvm/lib/Support/TargetRegistry.cpp b/contrib/llvm/lib/Support/TargetRegistry.cpp
index 7497bfe..53c8d84 100644
--- a/contrib/llvm/lib/Support/TargetRegistry.cpp
+++ b/contrib/llvm/lib/Support/TargetRegistry.cpp
@@ -84,7 +84,7 @@ void TargetRegistry::RegisterTarget(Target &T,
}
const Target *TargetRegistry::getClosestTargetForJIT(std::string &Error) {
- const Target *TheTarget = lookupTarget(sys::getHostTriple(), Error);
+ const Target *TheTarget = lookupTarget(sys::getDefaultTargetTriple(), Error);
if (TheTarget && !TheTarget->hasJIT()) {
Error = "No JIT compatible target available for this host";
diff --git a/contrib/llvm/lib/Support/ThreadLocal.cpp b/contrib/llvm/lib/Support/ThreadLocal.cpp
index fdb251c..08b12b6 100644
--- a/contrib/llvm/lib/Support/ThreadLocal.cpp
+++ b/contrib/llvm/lib/Support/ThreadLocal.cpp
@@ -19,7 +19,7 @@
//=== independent code.
//===----------------------------------------------------------------------===//
-#if !defined(ENABLE_THREADS) || ENABLE_THREADS == 0
+#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
// Define all methods as no-ops if threading is explicitly disabled
namespace llvm {
using namespace sys;
diff --git a/contrib/llvm/lib/Support/Threading.cpp b/contrib/llvm/lib/Support/Threading.cpp
index 8f0bb93..7483225 100644
--- a/contrib/llvm/lib/Support/Threading.cpp
+++ b/contrib/llvm/lib/Support/Threading.cpp
@@ -24,7 +24,7 @@ static bool multithreaded_mode = false;
static sys::Mutex* global_lock = 0;
bool llvm::llvm_start_multithreaded() {
-#if ENABLE_THREADS != 0
+#if LLVM_ENABLE_THREADS != 0
assert(!multithreaded_mode && "Already multithreaded!");
multithreaded_mode = true;
global_lock = new sys::Mutex(true);
@@ -39,7 +39,7 @@ bool llvm::llvm_start_multithreaded() {
}
void llvm::llvm_stop_multithreaded() {
-#if ENABLE_THREADS != 0
+#if LLVM_ENABLE_THREADS != 0
assert(multithreaded_mode && "Not currently multithreaded!");
// We fence here to insure that all threaded operations are complete BEFORE we
@@ -63,7 +63,7 @@ void llvm::llvm_release_global_lock() {
if (multithreaded_mode) global_lock->release();
}
-#if ENABLE_THREADS != 0 && defined(HAVE_PTHREAD_H)
+#if LLVM_ENABLE_THREADS != 0 && defined(HAVE_PTHREAD_H)
#include <pthread.h>
struct ThreadInfo {
@@ -102,7 +102,7 @@ void llvm::llvm_execute_on_thread(void (*Fn)(void*), void *UserData,
error:
::pthread_attr_destroy(&Attr);
}
-#elif ENABLE_THREADS!=0 && defined(LLVM_ON_WIN32)
+#elif LLVM_ENABLE_THREADS!=0 && defined(LLVM_ON_WIN32)
#include "Windows/Windows.h"
#include <process.h>
diff --git a/contrib/llvm/lib/Support/Timer.cpp b/contrib/llvm/lib/Support/Timer.cpp
index a9ed5ee..598e8ad 100644
--- a/contrib/llvm/lib/Support/Timer.cpp
+++ b/contrib/llvm/lib/Support/Timer.cpp
@@ -168,10 +168,8 @@ void Timer::stopTimer() {
static void printVal(double Val, double Total, raw_ostream &OS) {
if (Total < 1e-7) // Avoid dividing by zero.
OS << " ----- ";
- else {
- OS << " " << format("%7.4f", Val) << " (";
- OS << format("%5.1f", Val*100/Total) << "%)";
- }
+ else
+ OS << format(" %7.4f (%5.1f%%)", Val, Val*100/Total);
}
void TimeRecord::print(const TimeRecord &Total, raw_ostream &OS) const {
@@ -186,7 +184,7 @@ void TimeRecord::print(const TimeRecord &Total, raw_ostream &OS) const {
OS << " ";
if (Total.getMemUsed())
- OS << format("%9lld", (long long)getMemUsed()) << " ";
+ OS << format("%9" PRId64 " ", (int64_t)getMemUsed());
}
@@ -332,11 +330,9 @@ void TimerGroup::PrintQueuedTimers(raw_ostream &OS) {
// If this is not an collection of ungrouped times, print the total time.
// Ungrouped timers don't really make sense to add up. We still print the
// TOTAL line to make the percentages make sense.
- if (this != DefaultTimerGroup) {
- OS << " Total Execution Time: ";
- OS << format("%5.4f", Total.getProcessTime()) << " seconds (";
- OS << format("%5.4f", Total.getWallTime()) << " wall clock)\n";
- }
+ if (this != DefaultTimerGroup)
+ OS << format(" Total Execution Time: %5.4f seconds (%5.4f wall clock)\n",
+ Total.getProcessTime(), Total.getWallTime());
OS << '\n';
if (Total.getUserTime())
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index c61af37..44a1b38 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -9,19 +9,19 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstring>
using namespace llvm;
const char *Triple::getArchTypeName(ArchType Kind) {
switch (Kind) {
- case InvalidArch: return "<invalid>";
case UnknownArch: return "unknown";
- case alpha: return "alpha";
case arm: return "arm";
- case bfin: return "bfin";
case cellspu: return "cellspu";
+ case hexagon: return "hexagon";
case mips: return "mips";
case mipsel: return "mipsel";
case mips64: return "mips64";
@@ -29,9 +29,9 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case msp430: return "msp430";
case ppc64: return "powerpc64";
case ppc: return "powerpc";
+ case r600: return "r600";
case sparc: return "sparc";
case sparcv9: return "sparcv9";
- case systemz: return "s390x";
case tce: return "tce";
case thumb: return "thumb";
case x86: return "i386";
@@ -44,7 +44,7 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case amdil: return "amdil";
}
- return "<invalid>";
+ llvm_unreachable("Invalid ArchType!");
}
const char *Triple::getArchTypePrefix(ArchType Kind) {
@@ -52,13 +52,9 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
default:
return 0;
- case alpha: return "alpha";
-
case arm:
case thumb: return "arm";
- case bfin: return "bfin";
-
case cellspu: return "spu";
case ppc64:
@@ -66,6 +62,10 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case mblaze: return "mblaze";
+ case hexagon: return "hexagon";
+
+ case r600: return "r600";
+
case sparcv9:
case sparc: return "sparc";
@@ -88,9 +88,11 @@ const char *Triple::getVendorTypeName(VendorType Kind) {
case Apple: return "apple";
case PC: return "pc";
case SCEI: return "scei";
+ case BGP: return "bgp";
+ case BGQ: return "bgq";
}
- return "<invalid>";
+ llvm_unreachable("Invalid VendorType!");
}
const char *Triple::getOSTypeName(OSType Kind) {
@@ -110,83 +112,59 @@ const char *Triple::getOSTypeName(OSType Kind) {
case MinGW32: return "mingw32";
case NetBSD: return "netbsd";
case OpenBSD: return "openbsd";
- case Psp: return "psp";
case Solaris: return "solaris";
case Win32: return "win32";
case Haiku: return "haiku";
case Minix: return "minix";
case RTEMS: return "rtems";
case NativeClient: return "nacl";
+ case CNK: return "cnk";
}
- return "<invalid>";
+ llvm_unreachable("Invalid OSType");
}
const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) {
switch (Kind) {
case UnknownEnvironment: return "unknown";
case GNU: return "gnu";
+ case GNUEABIHF: return "gnueabihf";
case GNUEABI: return "gnueabi";
case EABI: return "eabi";
case MachO: return "macho";
+ case ANDROIDEABI: return "androideabi";
}
- return "<invalid>";
+ llvm_unreachable("Invalid EnvironmentType!");
}
Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
- if (Name == "alpha")
- return alpha;
- if (Name == "arm")
- return arm;
- if (Name == "bfin")
- return bfin;
- if (Name == "cellspu")
- return cellspu;
- if (Name == "mips")
- return mips;
- if (Name == "mipsel")
- return mipsel;
- if (Name == "mips64")
- return mips64;
- if (Name == "mips64el")
- return mips64el;
- if (Name == "msp430")
- return msp430;
- if (Name == "ppc64")
- return ppc64;
- if (Name == "ppc32")
- return ppc;
- if (Name == "ppc")
- return ppc;
- if (Name == "mblaze")
- return mblaze;
- if (Name == "sparc")
- return sparc;
- if (Name == "sparcv9")
- return sparcv9;
- if (Name == "systemz")
- return systemz;
- if (Name == "tce")
- return tce;
- if (Name == "thumb")
- return thumb;
- if (Name == "x86")
- return x86;
- if (Name == "x86-64")
- return x86_64;
- if (Name == "xcore")
- return xcore;
- if (Name == "ptx32")
- return ptx32;
- if (Name == "ptx64")
- return ptx64;
- if (Name == "le32")
- return le32;
- if (Name == "amdil")
- return amdil;
-
- return UnknownArch;
+ return StringSwitch<Triple::ArchType>(Name)
+ .Case("arm", arm)
+ .Case("cellspu", cellspu)
+ .Case("mips", mips)
+ .Case("mipsel", mipsel)
+ .Case("mips64", mips64)
+ .Case("mips64el", mips64el)
+ .Case("msp430", msp430)
+ .Case("ppc64", ppc64)
+ .Case("ppc32", ppc)
+ .Case("ppc", ppc)
+ .Case("mblaze", mblaze)
+ .Case("r600", r600)
+ .Case("hexagon", hexagon)
+ .Case("sparc", sparc)
+ .Case("sparcv9", sparcv9)
+ .Case("tce", tce)
+ .Case("thumb", thumb)
+ .Case("x86", x86)
+ .Case("x86-64", x86_64)
+ .Case("xcore", xcore)
+ .Case("ptx32", ptx32)
+ .Case("ptx64", ptx64)
+ .Case("le32", le32)
+ .Case("amdil", amdil)
+ .Default(UnknownArch);
}
Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
@@ -202,36 +180,22 @@ Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
// This code must be kept in sync with Clang's Darwin specific argument
// translation.
- if (Str == "ppc" || Str == "ppc601" || Str == "ppc603" || Str == "ppc604" ||
- Str == "ppc604e" || Str == "ppc750" || Str == "ppc7400" ||
- Str == "ppc7450" || Str == "ppc970")
- return Triple::ppc;
-
- if (Str == "ppc64")
- return Triple::ppc64;
-
- if (Str == "i386" || Str == "i486" || Str == "i486SX" || Str == "pentium" ||
- Str == "i586" || Str == "pentpro" || Str == "i686" || Str == "pentIIm3" ||
- Str == "pentIIm5" || Str == "pentium4")
- return Triple::x86;
-
- if (Str == "x86_64")
- return Triple::x86_64;
-
- // This is derived from the driver driver.
- if (Str == "arm" || Str == "armv4t" || Str == "armv5" || Str == "xscale" ||
- Str == "armv6" || Str == "armv7" || Str == "armv7f" || Str == "armv7k" ||
- Str == "armv7s")
- return Triple::arm;
-
- if (Str == "ptx32")
- return Triple::ptx32;
- if (Str == "ptx64")
- return Triple::ptx64;
- if (Str == "amdil")
- return Triple::amdil;
-
- return Triple::UnknownArch;
+ return StringSwitch<ArchType>(Str)
+ .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", Triple::ppc)
+ .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", Triple::ppc)
+ .Case("ppc64", Triple::ppc64)
+ .Cases("i386", "i486", "i486SX", "i586", "i686", Triple::x86)
+ .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ Triple::x86)
+ .Case("x86_64", Triple::x86_64)
+ // This is derived from the driver driver.
+ .Cases("arm", "armv4t", "armv5", "armv6", Triple::arm)
+ .Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", Triple::arm)
+ .Case("r600", Triple::r600)
+ .Case("ptx32", Triple::ptx32)
+ .Case("ptx64", Triple::ptx64)
+ .Case("amdil", Triple::amdil)
+ .Default(Triple::UnknownArch);
}
// Returns architecture name that is understood by the target assembler.
@@ -239,188 +203,150 @@ const char *Triple::getArchNameForAssembler() {
if (!isOSDarwin() && getVendor() != Triple::Apple)
return NULL;
- StringRef Str = getArchName();
- if (Str == "i386")
- return "i386";
- if (Str == "x86_64")
- return "x86_64";
- if (Str == "powerpc")
- return "ppc";
- if (Str == "powerpc64")
- return "ppc64";
- if (Str == "mblaze" || Str == "microblaze")
- return "mblaze";
- if (Str == "arm")
- return "arm";
- if (Str == "armv4t" || Str == "thumbv4t")
- return "armv4t";
- if (Str == "armv5" || Str == "armv5e" || Str == "thumbv5"
- || Str == "thumbv5e")
- return "armv5";
- if (Str == "armv6" || Str == "thumbv6")
- return "armv6";
- if (Str == "armv7" || Str == "thumbv7")
- return "armv7";
- if (Str == "ptx32")
- return "ptx32";
- if (Str == "ptx64")
- return "ptx64";
- if (Str == "le32")
- return "le32";
- if (Str == "amdil")
- return "amdil";
- return NULL;
+ return StringSwitch<const char*>(getArchName())
+ .Case("i386", "i386")
+ .Case("x86_64", "x86_64")
+ .Case("powerpc", "ppc")
+ .Case("powerpc64", "ppc64")
+ .Cases("mblaze", "microblaze", "mblaze")
+ .Case("arm", "arm")
+ .Cases("armv4t", "thumbv4t", "armv4t")
+ .Cases("armv5", "armv5e", "thumbv5", "thumbv5e", "armv5")
+ .Cases("armv6", "thumbv6", "armv6")
+ .Cases("armv7", "thumbv7", "armv7")
+ .Case("r600", "r600")
+ .Case("ptx32", "ptx32")
+ .Case("ptx64", "ptx64")
+ .Case("le32", "le32")
+ .Case("amdil", "amdil")
+ .Default(NULL);
}
-//
+static Triple::ArchType parseArch(StringRef ArchName) {
+ return StringSwitch<Triple::ArchType>(ArchName)
+ .Cases("i386", "i486", "i586", "i686", Triple::x86)
+ // FIXME: Do we need to support these?
+ .Cases("i786", "i886", "i986", Triple::x86)
+ .Cases("amd64", "x86_64", Triple::x86_64)
+ .Case("powerpc", Triple::ppc)
+ .Cases("powerpc64", "ppu", Triple::ppc64)
+ .Case("mblaze", Triple::mblaze)
+ .Cases("arm", "xscale", Triple::arm)
+ // FIXME: It would be good to replace these with explicit names for all the
+ // various suffixes supported.
+ .StartsWith("armv", Triple::arm)
+ .Case("thumb", Triple::thumb)
+ .StartsWith("thumbv", Triple::thumb)
+ .Cases("spu", "cellspu", Triple::cellspu)
+ .Case("msp430", Triple::msp430)
+ .Cases("mips", "mipseb", "mipsallegrex", Triple::mips)
+ .Cases("mipsel", "mipsallegrexel", Triple::mipsel)
+ .Cases("mips64", "mips64eb", Triple::mips64)
+ .Case("mips64el", Triple::mips64el)
+ .Case("r600", Triple::r600)
+ .Case("hexagon", Triple::hexagon)
+ .Case("sparc", Triple::sparc)
+ .Case("sparcv9", Triple::sparcv9)
+ .Case("tce", Triple::tce)
+ .Case("xcore", Triple::xcore)
+ .Case("ptx32", Triple::ptx32)
+ .Case("ptx64", Triple::ptx64)
+ .Case("le32", Triple::le32)
+ .Case("amdil", Triple::amdil)
+ .Default(Triple::UnknownArch);
+}
-Triple::ArchType Triple::ParseArch(StringRef ArchName) {
- if (ArchName.size() == 4 && ArchName[0] == 'i' &&
- ArchName[2] == '8' && ArchName[3] == '6' &&
- ArchName[1] - '3' < 6) // i[3-9]86
- return x86;
- else if (ArchName == "amd64" || ArchName == "x86_64")
- return x86_64;
- else if (ArchName == "bfin")
- return bfin;
- else if (ArchName == "powerpc")
- return ppc;
- else if ((ArchName == "powerpc64") || (ArchName == "ppu"))
- return ppc64;
- else if (ArchName == "mblaze")
- return mblaze;
- else if (ArchName == "arm" ||
- ArchName.startswith("armv") ||
- ArchName == "xscale")
- return arm;
- else if (ArchName == "thumb" ||
- ArchName.startswith("thumbv"))
- return thumb;
- else if (ArchName.startswith("alpha"))
- return alpha;
- else if (ArchName == "spu" || ArchName == "cellspu")
- return cellspu;
- else if (ArchName == "msp430")
- return msp430;
- else if (ArchName == "mips" || ArchName == "mipseb" ||
- ArchName == "mipsallegrex")
- return mips;
- else if (ArchName == "mipsel" || ArchName == "mipsallegrexel" ||
- ArchName == "psp")
- return mipsel;
- else if (ArchName == "mips64" || ArchName == "mips64eb")
- return mips64;
- else if (ArchName == "mips64el")
- return mips64el;
- else if (ArchName == "sparc")
- return sparc;
- else if (ArchName == "sparcv9")
- return sparcv9;
- else if (ArchName == "s390x")
- return systemz;
- else if (ArchName == "tce")
- return tce;
- else if (ArchName == "xcore")
- return xcore;
- else if (ArchName == "ptx32")
- return ptx32;
- else if (ArchName == "ptx64")
- return ptx64;
- else if (ArchName == "le32")
- return le32;
- else if (ArchName == "amdil")
- return amdil;
- else
- return UnknownArch;
+static Triple::VendorType parseVendor(StringRef VendorName) {
+ return StringSwitch<Triple::VendorType>(VendorName)
+ .Case("apple", Triple::Apple)
+ .Case("pc", Triple::PC)
+ .Case("scei", Triple::SCEI)
+ .Case("bgp", Triple::BGP)
+ .Case("bgq", Triple::BGQ)
+ .Default(Triple::UnknownVendor);
}
-Triple::VendorType Triple::ParseVendor(StringRef VendorName) {
- if (VendorName == "apple")
- return Apple;
- else if (VendorName == "pc")
- return PC;
- else if (VendorName == "scei")
- return SCEI;
- else
- return UnknownVendor;
-}
-
-Triple::OSType Triple::ParseOS(StringRef OSName) {
- if (OSName.startswith("auroraux"))
- return AuroraUX;
- else if (OSName.startswith("cygwin"))
- return Cygwin;
- else if (OSName.startswith("darwin"))
- return Darwin;
- else if (OSName.startswith("dragonfly"))
- return DragonFly;
- else if (OSName.startswith("freebsd"))
- return FreeBSD;
- else if (OSName.startswith("ios"))
- return IOS;
- else if (OSName.startswith("kfreebsd"))
- return KFreeBSD;
- else if (OSName.startswith("linux"))
- return Linux;
- else if (OSName.startswith("lv2"))
- return Lv2;
- else if (OSName.startswith("macosx"))
- return MacOSX;
- else if (OSName.startswith("mingw32"))
- return MinGW32;
- else if (OSName.startswith("netbsd"))
- return NetBSD;
- else if (OSName.startswith("openbsd"))
- return OpenBSD;
- else if (OSName.startswith("psp"))
- return Psp;
- else if (OSName.startswith("solaris"))
- return Solaris;
- else if (OSName.startswith("win32"))
- return Win32;
- else if (OSName.startswith("haiku"))
- return Haiku;
- else if (OSName.startswith("minix"))
- return Minix;
- else if (OSName.startswith("rtems"))
- return RTEMS;
- else if (OSName.startswith("nacl"))
- return NativeClient;
- else
- return UnknownOS;
-}
-
-Triple::EnvironmentType Triple::ParseEnvironment(StringRef EnvironmentName) {
- if (EnvironmentName.startswith("eabi"))
- return EABI;
- else if (EnvironmentName.startswith("gnueabi"))
- return GNUEABI;
- else if (EnvironmentName.startswith("gnu"))
- return GNU;
- else if (EnvironmentName.startswith("macho"))
- return MachO;
- else
- return UnknownEnvironment;
+static Triple::OSType parseOS(StringRef OSName) {
+ return StringSwitch<Triple::OSType>(OSName)
+ .StartsWith("auroraux", Triple::AuroraUX)
+ .StartsWith("cygwin", Triple::Cygwin)
+ .StartsWith("darwin", Triple::Darwin)
+ .StartsWith("dragonfly", Triple::DragonFly)
+ .StartsWith("freebsd", Triple::FreeBSD)
+ .StartsWith("ios", Triple::IOS)
+ .StartsWith("kfreebsd", Triple::KFreeBSD)
+ .StartsWith("linux", Triple::Linux)
+ .StartsWith("lv2", Triple::Lv2)
+ .StartsWith("macosx", Triple::MacOSX)
+ .StartsWith("mingw32", Triple::MinGW32)
+ .StartsWith("netbsd", Triple::NetBSD)
+ .StartsWith("openbsd", Triple::OpenBSD)
+ .StartsWith("solaris", Triple::Solaris)
+ .StartsWith("win32", Triple::Win32)
+ .StartsWith("haiku", Triple::Haiku)
+ .StartsWith("minix", Triple::Minix)
+ .StartsWith("rtems", Triple::RTEMS)
+ .StartsWith("nacl", Triple::NativeClient)
+ .StartsWith("cnk", Triple::CNK)
+ .Default(Triple::UnknownOS);
}
-void Triple::Parse() const {
- assert(!isInitialized() && "Invalid parse call.");
+static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) {
+ return StringSwitch<Triple::EnvironmentType>(EnvironmentName)
+ .StartsWith("eabi", Triple::EABI)
+ .StartsWith("gnueabihf", Triple::GNUEABIHF)
+ .StartsWith("gnueabi", Triple::GNUEABI)
+ .StartsWith("gnu", Triple::GNU)
+ .StartsWith("macho", Triple::MachO)
+ .StartsWith("androideabi", Triple::ANDROIDEABI)
+ .Default(Triple::UnknownEnvironment);
+}
- Arch = ParseArch(getArchName());
- Vendor = ParseVendor(getVendorName());
- OS = ParseOS(getOSName());
- Environment = ParseEnvironment(getEnvironmentName());
+/// \brief Construct a triple from the string representation provided.
+///
+/// This stores the string representation and parses the various pieces into
+/// enum members.
+Triple::Triple(const Twine &Str)
+ : Data(Str.str()),
+ Arch(parseArch(getArchName())),
+ Vendor(parseVendor(getVendorName())),
+ OS(parseOS(getOSName())),
+ Environment(parseEnvironment(getEnvironmentName())) {
+}
- assert(isInitialized() && "Failed to initialize!");
+/// \brief Construct a triple from string representations of the architecture,
+/// vendor, and OS.
+///
+/// This joins each argument into a canonical string representation and parses
+/// them into enum members. It leaves the environment unknown and omits it from
+/// the string representation.
+Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr)
+ : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr).str()),
+ Arch(parseArch(ArchStr.str())),
+ Vendor(parseVendor(VendorStr.str())),
+ OS(parseOS(OSStr.str())),
+ Environment() {
+}
+
+/// \brief Construct a triple from string representations of the architecture,
+/// vendor, OS, and environment.
+///
+/// This joins each argument into a canonical string representation and parses
+/// them into enum members.
+Triple::Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
+ const Twine &EnvironmentStr)
+ : Data((ArchStr + Twine('-') + VendorStr + Twine('-') + OSStr + Twine('-') +
+ EnvironmentStr).str()),
+ Arch(parseArch(ArchStr.str())),
+ Vendor(parseVendor(VendorStr.str())),
+ OS(parseOS(OSStr.str())),
+ Environment(parseEnvironment(EnvironmentStr.str())) {
}
std::string Triple::normalize(StringRef Str) {
// Parse into components.
SmallVector<StringRef, 4> Components;
- for (size_t First = 0, Last = 0; Last != StringRef::npos; First = Last + 1) {
- Last = Str.find('-', First);
- Components.push_back(Str.slice(First, Last));
- }
+ Str.split(Components, "-");
// If the first component corresponds to a known architecture, preferentially
// use it for the architecture. If the second component corresponds to a
@@ -429,16 +355,16 @@ std::string Triple::normalize(StringRef Str) {
// valid os.
ArchType Arch = UnknownArch;
if (Components.size() > 0)
- Arch = ParseArch(Components[0]);
+ Arch = parseArch(Components[0]);
VendorType Vendor = UnknownVendor;
if (Components.size() > 1)
- Vendor = ParseVendor(Components[1]);
+ Vendor = parseVendor(Components[1]);
OSType OS = UnknownOS;
if (Components.size() > 2)
- OS = ParseOS(Components[2]);
+ OS = parseOS(Components[2]);
EnvironmentType Environment = UnknownEnvironment;
if (Components.size() > 3)
- Environment = ParseEnvironment(Components[3]);
+ Environment = parseEnvironment(Components[3]);
// Note which components are already in their final position. These will not
// be moved.
@@ -464,22 +390,21 @@ std::string Triple::normalize(StringRef Str) {
bool Valid = false;
StringRef Comp = Components[Idx];
switch (Pos) {
- default:
- assert(false && "unexpected component type!");
+ default: llvm_unreachable("unexpected component type!");
case 0:
- Arch = ParseArch(Comp);
+ Arch = parseArch(Comp);
Valid = Arch != UnknownArch;
break;
case 1:
- Vendor = ParseVendor(Comp);
+ Vendor = parseVendor(Comp);
Valid = Vendor != UnknownVendor;
break;
case 2:
- OS = ParseOS(Comp);
+ OS = parseOS(Comp);
Valid = OS != UnknownOS;
break;
case 3:
- Environment = ParseEnvironment(Comp);
+ Environment = parseEnvironment(Comp);
Valid = Environment != UnknownEnvironment;
break;
}
@@ -500,7 +425,8 @@ std::string Triple::normalize(StringRef Str) {
// components to the right.
for (unsigned i = Pos; !CurrentComponent.empty(); ++i) {
// Skip over any fixed components.
- while (i < array_lengthof(Found) && Found[i]) ++i;
+ while (i < array_lengthof(Found) && Found[i])
+ ++i;
// Place the component at the new position, getting the component
// that was at this position - it will be moved right.
std::swap(CurrentComponent, Components[i]);
@@ -528,7 +454,8 @@ std::string Triple::normalize(StringRef Str) {
Components.push_back(CurrentComponent);
// Advance Idx to the component's new position.
- while (++Idx < array_lengthof(Found) && Found[Idx]) {}
+ while (++Idx < array_lengthof(Found) && Found[Idx])
+ ;
} while (Idx < Pos); // Add more until the final position is reached.
}
assert(Pos < Components.size() && Components[Pos] == Comp &&
@@ -618,9 +545,47 @@ void Triple::getOSVersion(unsigned &Major, unsigned &Minor,
}
}
+bool Triple::getMacOSXVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const {
+ getOSVersion(Major, Minor, Micro);
+
+ switch (getOS()) {
+ default: llvm_unreachable("unexpected OS for Darwin triple");
+ case Darwin:
+ // Default to darwin8, i.e., MacOSX 10.4.
+ if (Major == 0)
+ Major = 8;
+ // Darwin version numbers are skewed from OS X versions.
+ if (Major < 4)
+ return false;
+ Micro = 0;
+ Minor = Major - 4;
+ Major = 10;
+ break;
+ case MacOSX:
+ // Default to 10.4.
+ if (Major == 0) {
+ Major = 10;
+ Minor = 4;
+ }
+ if (Major != 10)
+ return false;
+ break;
+ case IOS:
+ // Ignore the version from the triple. This is only handled because the
+ // the clang driver combines OS X and IOS support into a common Darwin
+ // toolchain that wants to know the OS X version number even when targeting
+ // IOS.
+ Major = 10;
+ Minor = 4;
+ Micro = 0;
+ break;
+ }
+ return true;
+}
+
void Triple::setTriple(const Twine &Str) {
- Data = Str.str();
- Arch = InvalidArch;
+ *this = Triple(Str);
}
void Triple::setArch(ArchType Kind) {
@@ -670,3 +635,126 @@ void Triple::setEnvironmentName(StringRef Str) {
void Triple::setOSAndEnvironmentName(StringRef Str) {
setTriple(getArchName() + "-" + getVendorName() + "-" + Str);
}
+
+static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
+ switch (Arch) {
+ case llvm::Triple::UnknownArch:
+ return 0;
+
+ case llvm::Triple::msp430:
+ return 16;
+
+ case llvm::Triple::amdil:
+ case llvm::Triple::arm:
+ case llvm::Triple::cellspu:
+ case llvm::Triple::hexagon:
+ case llvm::Triple::le32:
+ case llvm::Triple::mblaze:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ptx32:
+ case llvm::Triple::r600:
+ case llvm::Triple::sparc:
+ case llvm::Triple::tce:
+ case llvm::Triple::thumb:
+ case llvm::Triple::x86:
+ case llvm::Triple::xcore:
+ return 32;
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ptx64:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::x86_64:
+ return 64;
+ }
+ llvm_unreachable("Invalid architecture value");
+}
+
+bool Triple::isArch64Bit() const {
+ return getArchPointerBitWidth(getArch()) == 64;
+}
+
+bool Triple::isArch32Bit() const {
+ return getArchPointerBitWidth(getArch()) == 32;
+}
+
+bool Triple::isArch16Bit() const {
+ return getArchPointerBitWidth(getArch()) == 16;
+}
+
+Triple Triple::get32BitArchVariant() const {
+ Triple T(*this);
+ switch (getArch()) {
+ case Triple::UnknownArch:
+ case Triple::msp430:
+ T.setArch(UnknownArch);
+ break;
+
+ case Triple::amdil:
+ case Triple::arm:
+ case Triple::cellspu:
+ case Triple::hexagon:
+ case Triple::le32:
+ case Triple::mblaze:
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::ppc:
+ case Triple::ptx32:
+ case Triple::r600:
+ case Triple::sparc:
+ case Triple::tce:
+ case Triple::thumb:
+ case Triple::x86:
+ case Triple::xcore:
+ // Already 32-bit.
+ break;
+
+ case Triple::mips64: T.setArch(Triple::mips); break;
+ case Triple::mips64el: T.setArch(Triple::mipsel); break;
+ case Triple::ppc64: T.setArch(Triple::ppc); break;
+ case Triple::ptx64: T.setArch(Triple::ptx32); break;
+ case Triple::sparcv9: T.setArch(Triple::sparc); break;
+ case Triple::x86_64: T.setArch(Triple::x86); break;
+ }
+ return T;
+}
+
+Triple Triple::get64BitArchVariant() const {
+ Triple T(*this);
+ switch (getArch()) {
+ case Triple::UnknownArch:
+ case Triple::amdil:
+ case Triple::arm:
+ case Triple::cellspu:
+ case Triple::hexagon:
+ case Triple::le32:
+ case Triple::mblaze:
+ case Triple::msp430:
+ case Triple::r600:
+ case Triple::tce:
+ case Triple::thumb:
+ case Triple::xcore:
+ T.setArch(UnknownArch);
+ break;
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ case Triple::ppc64:
+ case Triple::ptx64:
+ case Triple::sparcv9:
+ case Triple::x86_64:
+ // Already 64-bit.
+ break;
+
+ case Triple::mips: T.setArch(Triple::mips64); break;
+ case Triple::mipsel: T.setArch(Triple::mips64el); break;
+ case Triple::ppc: T.setArch(Triple::ppc64); break;
+ case Triple::ptx32: T.setArch(Triple::ptx64); break;
+ case Triple::sparc: T.setArch(Triple::sparcv9); break;
+ case Triple::x86: T.setArch(Triple::x86_64); break;
+ }
+ return T;
+}
diff --git a/contrib/llvm/lib/Support/Unix/Host.inc b/contrib/llvm/lib/Support/Unix/Host.inc
index 1e438e7..7f79db0 100644
--- a/contrib/llvm/lib/Support/Unix/Host.inc
+++ b/contrib/llvm/lib/Support/Unix/Host.inc
@@ -26,6 +26,11 @@
using namespace llvm;
+#ifdef __FreeBSD__
+std::string sys::getDefaultTargetTriple() {
+ return LLVM_DEFAULT_TARGET_TRIPLE;
+}
+#else // __FreeBSD__
static std::string getOSVersion() {
struct utsname info;
@@ -35,17 +40,11 @@ static std::string getOSVersion() {
return info.release;
}
-std::string sys::getHostTriple() {
-#ifdef __FreeBSD__
- return LLVM_HOSTTRIPLE;
-#else
- // FIXME: Derive directly instead of relying on the autoconf generated
- // variable.
-
- StringRef HostTripleString(LLVM_HOSTTRIPLE);
- std::pair<StringRef, StringRef> ArchSplit = HostTripleString.split('-');
+std::string sys::getDefaultTargetTriple() {
+ StringRef TargetTripleString(LLVM_DEFAULT_TARGET_TRIPLE);
+ std::pair<StringRef, StringRef> ArchSplit = TargetTripleString.split('-');
- // Normalize the arch, since the host triple may not actually match the host.
+ // Normalize the arch, since the target triple may not actually match the target.
std::string Arch = ArchSplit.first;
std::string Triple(Arch);
@@ -58,7 +57,7 @@ std::string sys::getHostTriple() {
Triple[1] = '3';
// On darwin, we want to update the version to match that of the
- // host.
+ // target.
std::string::size_type DarwinDashIdx = Triple.find("-darwin");
if (DarwinDashIdx != std::string::npos) {
Triple.resize(DarwinDashIdx + strlen("-darwin"));
@@ -66,5 +65,5 @@ std::string sys::getHostTriple() {
}
return Triple;
-#endif
}
+#endif // __FreeBSD__
diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc
index 85c7c40..ddc1e0f 100644
--- a/contrib/llvm/lib/Support/Unix/Path.inc
+++ b/contrib/llvm/lib/Support/Unix/Path.inc
@@ -60,6 +60,11 @@
#include <mach-o/dyld.h>
#endif
+// For GNU Hurd
+#if defined(__GNU__) && !defined(MAXPATHLEN)
+# define MAXPATHLEN 4096
+#endif
+
// Put in a hack for Cygwin which falsely reports that the mkdtemp function
// is available when it is not.
#ifdef __CYGWIN__
@@ -235,11 +240,6 @@ Path::GetBitcodeLibraryPaths(std::vector<sys::Path>& Paths) {
}
Path
-Path::GetLLVMDefaultConfigDir() {
- return Path("/etc/llvm/");
-}
-
-Path
Path::GetUserHomeDirectory() {
const char* home = getenv("HOME");
Path result;
@@ -261,7 +261,7 @@ Path::GetCurrentDirectory() {
}
#if defined(__FreeBSD__) || defined (__NetBSD__) || \
- defined(__OpenBSD__) || defined(__minix)
+ defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__)
static int
test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
const char *dir, const char *bin)
@@ -313,7 +313,7 @@ getprogpath(char ret[PATH_MAX], const char *bin)
free(pv);
return (NULL);
}
-#endif // __FreeBSD__ || __NetBSD__
+#endif // __FreeBSD__ || __NetBSD__ || __FreeBSD_kernel__
/// GetMainExecutable - Return the path to the main executable, given the
/// value of argv[0] from program startup.
@@ -330,7 +330,7 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
return Path(link_path);
}
#elif defined(__FreeBSD__) || defined (__NetBSD__) || \
- defined(__OpenBSD__) || defined(__minix)
+ defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__)
char exe_path[PATH_MAX];
if (getprogpath(exe_path, argv0) != NULL)
diff --git a/contrib/llvm/lib/Support/Unix/PathV2.inc b/contrib/llvm/lib/Support/Unix/PathV2.inc
index bbbc344..edb101e 100644
--- a/contrib/llvm/lib/Support/Unix/PathV2.inc
+++ b/contrib/llvm/lib/Support/Unix/PathV2.inc
@@ -46,6 +46,11 @@
#include <limits.h>
#endif
+// For GNU Hurd
+#if defined(__GNU__) && !defined(PATH_MAX)
+# define PATH_MAX 4096
+#endif
+
using namespace llvm;
namespace {
@@ -87,7 +92,7 @@ namespace {
result.clear();
StringRef d(dir);
result.append(d.begin(), d.end());
- return success;
+ return error_code::success();
}
}
@@ -96,7 +101,12 @@ namespace sys {
namespace fs {
error_code current_path(SmallVectorImpl<char> &result) {
+#ifdef MAXPATHLEN
result.reserve(MAXPATHLEN);
+#else
+// For GNU Hurd
+ result.reserve(1024);
+#endif
while (true) {
if (::getcwd(result.data(), result.capacity()) == 0) {
@@ -110,7 +120,7 @@ error_code current_path(SmallVectorImpl<char> &result) {
}
result.set_size(strlen(result.data()));
- return success;
+ return error_code::success();
}
error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
@@ -169,7 +179,7 @@ error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
if (sz_read < 0)
return error_code(errno, system_category());
- return success;
+ return error_code::success();
}
error_code create_directory(const Twine &path, bool &existed) {
@@ -183,7 +193,7 @@ error_code create_directory(const Twine &path, bool &existed) {
} else
existed = false;
- return success;
+ return error_code::success();
}
error_code create_hard_link(const Twine &to, const Twine &from) {
@@ -196,7 +206,7 @@ error_code create_hard_link(const Twine &to, const Twine &from) {
if (::link(t.begin(), f.begin()) == -1)
return error_code(errno, system_category());
- return success;
+ return error_code::success();
}
error_code create_symlink(const Twine &to, const Twine &from) {
@@ -209,7 +219,7 @@ error_code create_symlink(const Twine &to, const Twine &from) {
if (::symlink(t.begin(), f.begin()) == -1)
return error_code(errno, system_category());
- return success;
+ return error_code::success();
}
error_code remove(const Twine &path, bool &existed) {
@@ -223,7 +233,7 @@ error_code remove(const Twine &path, bool &existed) {
} else
existed = true;
- return success;
+ return error_code::success();
}
error_code rename(const Twine &from, const Twine &to) {
@@ -245,7 +255,7 @@ error_code rename(const Twine &from, const Twine &to) {
return error_code(errno, system_category());
}
- return success;
+ return error_code::success();
}
error_code resize_file(const Twine &path, uint64_t size) {
@@ -255,7 +265,7 @@ error_code resize_file(const Twine &path, uint64_t size) {
if (::truncate(p.begin(), size) == -1)
return error_code(errno, system_category());
- return success;
+ return error_code::success();
}
error_code exists(const Twine &path, bool &result) {
@@ -270,32 +280,21 @@ error_code exists(const Twine &path, bool &result) {
} else
result = true;
- return success;
+ return error_code::success();
}
-error_code equivalent(const Twine &A, const Twine &B, bool &result) {
- // Get arguments.
- SmallString<128> a_storage;
- SmallString<128> b_storage;
- StringRef a = A.toNullTerminatedStringRef(a_storage);
- StringRef b = B.toNullTerminatedStringRef(b_storage);
-
- struct stat stat_a, stat_b;
- int error_b = ::stat(b.begin(), &stat_b);
- int error_a = ::stat(a.begin(), &stat_a);
-
- // If both are invalid, it's an error. If only one is, the result is false.
- if (error_a != 0 || error_b != 0) {
- if (error_a == error_b)
- return error_code(errno, system_category());
- result = false;
- } else {
- result =
- stat_a.st_dev == stat_b.st_dev &&
- stat_a.st_ino == stat_b.st_ino;
- }
+bool equivalent(file_status A, file_status B) {
+ assert(status_known(A) && status_known(B));
+ return A.st_dev == B.st_dev &&
+ A.st_ino == B.st_ino;
+}
- return success;
+error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+ file_status fsA, fsB;
+ if (error_code ec = status(A, fsA)) return ec;
+ if (error_code ec = status(B, fsB)) return ec;
+ result = equivalent(fsA, fsB);
+ return error_code::success();
}
error_code file_size(const Twine &path, uint64_t &result) {
@@ -309,7 +308,7 @@ error_code file_size(const Twine &path, uint64_t &result) {
return make_error_code(errc::operation_not_permitted);
result = status.st_size;
- return success;
+ return error_code::success();
}
error_code status(const Twine &path, file_status &result) {
@@ -341,7 +340,10 @@ error_code status(const Twine &path, file_status &result) {
else
result = file_status(file_type::type_unknown);
- return success;
+ result.st_dev = status.st_dev;
+ result.st_ino = status.st_ino;
+
+ return error_code::success();
}
error_code unique_file(const Twine &model, int &result_fd,
@@ -436,10 +438,11 @@ rety_open_create:
result_path.append(d.begin(), d.end());
result_fd = RandomFD;
- return success;
+ return error_code::success();
}
-error_code directory_iterator_construct(directory_iterator &it, StringRef path){
+error_code detail::directory_iterator_construct(detail::DirIterState &it,
+ StringRef path){
SmallString<128> path_null(path);
DIR *directory = ::opendir(path_null.c_str());
if (directory == 0)
@@ -452,15 +455,15 @@ error_code directory_iterator_construct(directory_iterator &it, StringRef path){
return directory_iterator_increment(it);
}
-error_code directory_iterator_destruct(directory_iterator& it) {
+error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
if (it.IterationHandle)
::closedir(reinterpret_cast<DIR *>(it.IterationHandle));
it.IterationHandle = 0;
it.CurrentEntry = directory_entry();
- return success;
+ return error_code::success();
}
-error_code directory_iterator_increment(directory_iterator& it) {
+error_code detail::directory_iterator_increment(detail::DirIterState &it) {
errno = 0;
dirent *cur_dir = ::readdir(reinterpret_cast<DIR *>(it.IterationHandle));
if (cur_dir == 0 && errno != 0) {
@@ -474,7 +477,7 @@ error_code directory_iterator_increment(directory_iterator& it) {
} else
return directory_iterator_destruct(it);
- return success;
+ return error_code::success();
}
error_code get_magic(const Twine &path, uint32_t len,
@@ -505,7 +508,7 @@ error_code get_magic(const Twine &path, uint32_t len,
}
std::fclose(file);
result.set_size(len);
- return success;
+ return error_code::success();
}
} // end namespace fs
diff --git a/contrib/llvm/lib/Support/Unix/Process.inc b/contrib/llvm/lib/Support/Unix/Process.inc
index da440fd..2d7fd38 100644
--- a/contrib/llvm/lib/Support/Unix/Process.inc
+++ b/contrib/llvm/lib/Support/Unix/Process.inc
@@ -136,7 +136,7 @@ int Process::GetCurrentGroupId() {
return getgid();
}
-#ifdef HAVE_MACH_MACH_H
+#if defined(HAVE_MACH_MACH_H) && !defined(__GNU__)
#include <mach/mach.h>
#endif
@@ -150,7 +150,7 @@ void Process::PreventCoreFiles() {
setrlimit(RLIMIT_CORE, &rlim);
#endif
-#ifdef HAVE_MACH_MACH_H
+#if defined(HAVE_MACH_MACH_H) && !defined(__GNU__)
// Disable crash reporting on Mac OS X 10.0-10.4
// get information about the original set of exception ports for the task
@@ -293,7 +293,3 @@ const char *Process::OutputBold(bool bg) {
const char *Process::ResetColor() {
return "\033[0m";
}
-
-void Process::SetWorkingDirectory(std::string Path) {
- ::chdir(Path.c_str());
-}
diff --git a/contrib/llvm/lib/Support/Unix/Program.inc b/contrib/llvm/lib/Support/Unix/Program.inc
index 346baf1..e5990d0 100644
--- a/contrib/llvm/lib/Support/Unix/Program.inc
+++ b/contrib/llvm/lib/Support/Unix/Program.inc
@@ -412,19 +412,19 @@ Program::Kill(std::string* ErrMsg) {
return false;
}
-bool Program::ChangeStdinToBinary(){
+error_code Program::ChangeStdinToBinary(){
// Do nothing, as Unix doesn't differentiate between text and binary.
- return false;
+ return make_error_code(errc::success);
}
-bool Program::ChangeStdoutToBinary(){
+error_code Program::ChangeStdoutToBinary(){
// Do nothing, as Unix doesn't differentiate between text and binary.
- return false;
+ return make_error_code(errc::success);
}
-bool Program::ChangeStderrToBinary(){
+error_code Program::ChangeStderrToBinary(){
// Do nothing, as Unix doesn't differentiate between text and binary.
- return false;
+ return make_error_code(errc::success);
}
}
diff --git a/contrib/llvm/lib/Support/Unix/Signals.inc b/contrib/llvm/lib/Support/Unix/Signals.inc
index e286869..c9ec9fc 100644
--- a/contrib/llvm/lib/Support/Unix/Signals.inc
+++ b/contrib/llvm/lib/Support/Unix/Signals.inc
@@ -30,6 +30,10 @@
#include <dlfcn.h>
#include <cxxabi.h>
#endif
+#if HAVE_MACH_MACH_H
+#include <mach/mach.h>
+#endif
+
using namespace llvm;
static RETSIGTYPE SignalHandler(int Sig); // defined below.
@@ -261,6 +265,22 @@ static void PrintStackTrace(void *) {
/// SIGSEGV) is delivered to the process, print a stack trace and then exit.
void llvm::sys::PrintStackTraceOnErrorSignal() {
AddSignalHandler(PrintStackTrace, 0);
+
+#if defined(__APPLE__)
+ // Environment variable to disable any kind of crash dialog.
+ if (getenv("LLVM_DISABLE_CRASH_REPORT")) {
+ mach_port_t self = mach_task_self();
+
+ exception_mask_t mask = EXC_MASK_CRASH;
+
+ kern_return_t ret = task_set_exception_ports(self,
+ mask,
+ MACH_PORT_NULL,
+ EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES,
+ THREAD_STATE_NONE);
+ (void)ret;
+ }
+#endif
}
diff --git a/contrib/llvm/lib/Support/Valgrind.cpp b/contrib/llvm/lib/Support/Valgrind.cpp
index 7034485..2b250a3 100644
--- a/contrib/llvm/lib/Support/Valgrind.cpp
+++ b/contrib/llvm/lib/Support/Valgrind.cpp
@@ -52,3 +52,16 @@ void llvm::sys::ValgrindDiscardTranslations(const void *Addr, size_t Len) {
}
#endif // !HAVE_VALGRIND_VALGRIND_H
+
+#if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG)
+// These functions require no implementation, tsan just looks at the arguments
+// they're called with.
+extern "C" {
+void AnnotateHappensBefore(const char *file, int line,
+ const volatile void *cv) {}
+void AnnotateHappensAfter(const char *file, int line,
+ const volatile void *cv) {}
+void AnnotateIgnoreWritesBegin(const char *file, int line) {}
+void AnnotateIgnoreWritesEnd(const char *file, int line) {}
+}
+#endif
diff --git a/contrib/llvm/lib/Support/Windows/Host.inc b/contrib/llvm/lib/Support/Windows/Host.inc
index 733830e..2e6d6f1 100644
--- a/contrib/llvm/lib/Support/Windows/Host.inc
+++ b/contrib/llvm/lib/Support/Windows/Host.inc
@@ -17,7 +17,6 @@
using namespace llvm;
-std::string sys::getHostTriple() {
- // FIXME: Adapt to running version.
- return LLVM_HOSTTRIPLE;
+std::string sys::getDefaultTargetTriple() {
+ return LLVM_DEFAULT_TARGET_TRIPLE;
}
diff --git a/contrib/llvm/lib/Support/Windows/Path.inc b/contrib/llvm/lib/Support/Windows/Path.inc
index 42a92f9..d8dc522 100644
--- a/contrib/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm/lib/Support/Windows/Path.inc
@@ -66,29 +66,20 @@ Path::operator=(StringRef that) {
return *this;
}
-// push_back 0 on create, and pop_back on delete.
-struct ScopedNullTerminator {
- std::string &str;
- ScopedNullTerminator(std::string &s) : str(s) { str.push_back(0); }
- ~ScopedNullTerminator() {
- // str.pop_back(); But wait, C++03 doesn't have this...
- assert(!str.empty() && str[str.size() - 1] == 0
- && "Null char not present!");
- str.resize(str.size() - 1);
- }
-};
-
bool
Path::isValid() const {
if (path.empty())
return false;
+ size_t len = path.size();
+ // If there is a null character, it and all its successors are ignored.
+ size_t pos = path.find_first_of('\0');
+ if (pos != std::string::npos)
+ len = pos;
+
// If there is a colon, it must be the second character, preceded by a letter
// and followed by something.
- size_t len = path.size();
- // This code assumes that path is null terminated, so make sure it is.
- ScopedNullTerminator snt(path);
- size_t pos = path.rfind(':',len);
+ pos = path.rfind(':',len);
size_t rootslash = 0;
if (pos != std::string::npos) {
if (pos != 1 || !isalpha(path[0]) || len < 3)
@@ -118,13 +109,13 @@ Path::isValid() const {
for (pos = 0; pos < len; ++pos) {
// A component may not end in a space.
if (path[pos] == ' ') {
- if (path[pos+1] == '/' || path[pos+1] == '\0')
+ if (pos+1 == len || path[pos+1] == '/' || path[pos+1] == '\0')
return false;
}
// A component may not end in a period.
if (path[pos] == '.') {
- if (path[pos+1] == '/' || path[pos+1] == '\0') {
+ if (pos+1 == len || path[pos+1] == '/') {
// Unless it is the pseudo-directory "."...
if (pos == 0 || path[pos-1] == '/' || path[pos-1] == ':')
return true;
@@ -286,14 +277,6 @@ Path::GetBitcodeLibraryPaths(std::vector<sys::Path>& Paths) {
}
Path
-Path::GetLLVMDefaultConfigDir() {
- Path ret = GetUserHomeDirectory();
- if (!ret.appendComponent(".llvm"))
- assert(0 && "Failed to append .llvm");
- return ret;
-}
-
-Path
Path::GetUserHomeDirectory() {
char buff[MAX_PATH];
HRESULT res = SHGetFolderPathA(NULL,
diff --git a/contrib/llvm/lib/Support/Windows/PathV2.inc b/contrib/llvm/lib/Support/Windows/PathV2.inc
index bc597b2..e9ce5d9 100644
--- a/contrib/llvm/lib/Support/Windows/PathV2.inc
+++ b/contrib/llvm/lib/Support/Windows/PathV2.inc
@@ -17,7 +17,6 @@
//===----------------------------------------------------------------------===//
#include "Windows.h"
-#include <wincrypt.h>
#include <fcntl.h>
#include <io.h>
#include <sys/stat.h>
@@ -63,7 +62,7 @@ namespace {
utf16.push_back(0);
utf16.pop_back();
- return success;
+ return error_code::success();
}
error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
@@ -93,7 +92,7 @@ namespace {
utf8.push_back(0);
utf8.pop_back();
- return success;
+ return error_code::success();
}
error_code TempDir(SmallVectorImpl<wchar_t> &result) {
@@ -109,17 +108,9 @@ namespace {
}
result.set_size(len);
- return success;
+ return error_code::success();
}
- // Forwarder for ScopedHandle.
- BOOL WINAPI CryptReleaseContext(HCRYPTPROV Provider) {
- return ::CryptReleaseContext(Provider, 0);
- }
-
- typedef ScopedHandle<HCRYPTPROV, uintptr_t(-1),
- BOOL (WINAPI*)(HCRYPTPROV), CryptReleaseContext>
- ScopedCryptContext;
bool is_separator(const wchar_t value) {
switch (value) {
case L'\\':
@@ -176,7 +167,7 @@ retry_cur_dir:
if (len == 0)
return windows_error(::GetLastError());
- return success;
+ return error_code::success();
}
error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
@@ -199,7 +190,7 @@ error_code copy_file(const Twine &from, const Twine &to, copy_option copt) {
if (res == 0)
return windows_error(::GetLastError());
- return success;
+ return error_code::success();
}
error_code create_directory(const Twine &path, bool &existed) {
@@ -219,7 +210,7 @@ error_code create_directory(const Twine &path, bool &existed) {
} else
existed = false;
- return success;
+ return error_code::success();
}
error_code create_hard_link(const Twine &to, const Twine &from) {
@@ -238,7 +229,7 @@ error_code create_hard_link(const Twine &to, const Twine &from) {
if (!::CreateHardLinkW(wide_from.begin(), wide_to.begin(), NULL))
return windows_error(::GetLastError());
- return success;
+ return error_code::success();
}
error_code create_symlink(const Twine &to, const Twine &from) {
@@ -261,7 +252,7 @@ error_code create_symlink(const Twine &to, const Twine &from) {
if (!create_symbolic_link_api(wide_from.begin(), wide_to.begin(), 0))
return windows_error(::GetLastError());
- return success;
+ return error_code::success();
}
error_code remove(const Twine &path, bool &existed) {
@@ -294,7 +285,7 @@ error_code remove(const Twine &path, bool &existed) {
existed = true;
}
- return success;
+ return error_code::success();
}
error_code rename(const Twine &from, const Twine &to) {
@@ -314,7 +305,7 @@ error_code rename(const Twine &from, const Twine &to) {
MOVEFILE_COPY_ALLOWED | MOVEFILE_REPLACE_EXISTING))
return windows_error(::GetLastError());
- return success;
+ return error_code::success();
}
error_code resize_file(const Twine &path, uint64_t size) {
@@ -356,72 +347,26 @@ error_code exists(const Twine &path, bool &result) {
result = false;
} else
result = true;
- return success;
+ return error_code::success();
}
-error_code equivalent(const Twine &A, const Twine &B, bool &result) {
- // Get arguments.
- SmallString<128> a_storage;
- SmallString<128> b_storage;
- StringRef a = A.toStringRef(a_storage);
- StringRef b = B.toStringRef(b_storage);
-
- // Convert to utf-16.
- SmallVector<wchar_t, 128> wide_a;
- SmallVector<wchar_t, 128> wide_b;
- if (error_code ec = UTF8ToUTF16(a, wide_a)) return ec;
- if (error_code ec = UTF8ToUTF16(b, wide_b)) return ec;
-
- AutoHandle HandleB(
- ::CreateFileW(wide_b.begin(),
- 0,
- FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
- 0,
- OPEN_EXISTING,
- FILE_FLAG_BACKUP_SEMANTICS,
- 0));
-
- AutoHandle HandleA(
- ::CreateFileW(wide_a.begin(),
- 0,
- FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
- 0,
- OPEN_EXISTING,
- FILE_FLAG_BACKUP_SEMANTICS,
- 0));
-
- // If both handles are invalid, it's an error.
- if (HandleA == INVALID_HANDLE_VALUE &&
- HandleB == INVALID_HANDLE_VALUE)
- return windows_error(::GetLastError());
-
- // If only one is invalid, it's false.
- if (HandleA == INVALID_HANDLE_VALUE &&
- HandleB == INVALID_HANDLE_VALUE) {
- result = false;
- return success;
- }
-
- // Get file information.
- BY_HANDLE_FILE_INFORMATION InfoA, InfoB;
- if (!::GetFileInformationByHandle(HandleA, &InfoA))
- return windows_error(::GetLastError());
- if (!::GetFileInformationByHandle(HandleB, &InfoB))
- return windows_error(::GetLastError());
+bool equivalent(file_status A, file_status B) {
+ assert(status_known(A) && status_known(B));
+ return A.FileIndexHigh == B.FileIndexHigh &&
+ A.FileIndexLow == B.FileIndexLow &&
+ A.FileSizeHigh == B.FileSizeHigh &&
+ A.FileSizeLow == B.FileSizeLow &&
+ A.LastWriteTimeHigh == B.LastWriteTimeHigh &&
+ A.LastWriteTimeLow == B.LastWriteTimeLow &&
+ A.VolumeSerialNumber == B.VolumeSerialNumber;
+}
- // See if it's all the same.
- result =
- InfoA.dwVolumeSerialNumber == InfoB.dwVolumeSerialNumber &&
- InfoA.nFileIndexHigh == InfoB.nFileIndexHigh &&
- InfoA.nFileIndexLow == InfoB.nFileIndexLow &&
- InfoA.nFileSizeHigh == InfoB.nFileSizeHigh &&
- InfoA.nFileSizeLow == InfoB.nFileSizeLow &&
- InfoA.ftLastWriteTime.dwLowDateTime ==
- InfoB.ftLastWriteTime.dwLowDateTime &&
- InfoA.ftLastWriteTime.dwHighDateTime ==
- InfoB.ftLastWriteTime.dwHighDateTime;
-
- return success;
+error_code equivalent(const Twine &A, const Twine &B, bool &result) {
+ file_status fsA, fsB;
+ if (error_code ec = status(A, fsA)) return ec;
+ if (error_code ec = status(B, fsB)) return ec;
+ result = equivalent(fsA, fsB);
+ return error_code::success();
}
error_code file_size(const Twine &path, uint64_t &result) {
@@ -442,7 +387,7 @@ error_code file_size(const Twine &path, uint64_t &result) {
(uint64_t(FileData.nFileSizeHigh) << (sizeof(FileData.nFileSizeLow) * 8))
+ FileData.nFileSizeLow;
- return success;
+ return error_code::success();
}
static bool isReservedName(StringRef path) {
@@ -475,11 +420,10 @@ error_code status(const Twine &path, file_status &result) {
StringRef path8 = path.toStringRef(path_storage);
if (isReservedName(path8)) {
result = file_status(file_type::character_file);
- return success;
+ return error_code::success();
}
- if (error_code ec = UTF8ToUTF16(path8,
- path_utf16))
+ if (error_code ec = UTF8ToUTF16(path8, path_utf16))
return ec;
DWORD attr = ::GetFileAttributesW(path_utf16.begin());
@@ -488,7 +432,7 @@ error_code status(const Twine &path, file_status &result) {
// Handle reparse points.
if (attr & FILE_ATTRIBUTE_REPARSE_POINT) {
- AutoHandle h(
+ ScopedFileHandle h(
::CreateFileW(path_utf16.begin(),
0, // Attributes only.
FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
@@ -496,16 +440,37 @@ error_code status(const Twine &path, file_status &result) {
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS,
0));
- if (h == INVALID_HANDLE_VALUE)
+ if (!h)
goto handle_status_error;
}
if (attr & FILE_ATTRIBUTE_DIRECTORY)
result = file_status(file_type::directory_file);
- else
+ else {
result = file_status(file_type::regular_file);
+ ScopedFileHandle h(
+ ::CreateFileW(path_utf16.begin(),
+ 0, // Attributes only.
+ FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS,
+ 0));
+ if (!h)
+ goto handle_status_error;
+ BY_HANDLE_FILE_INFORMATION Info;
+ if (!::GetFileInformationByHandle(h, &Info))
+ goto handle_status_error;
+ result.FileIndexHigh = Info.nFileIndexHigh;
+ result.FileIndexLow = Info.nFileIndexLow;
+ result.FileSizeHigh = Info.nFileSizeHigh;
+ result.FileSizeLow = Info.nFileSizeLow;
+ result.LastWriteTimeHigh = Info.ftLastWriteTime.dwHighDateTime;
+ result.LastWriteTimeLow = Info.ftLastWriteTime.dwLowDateTime;
+ result.VolumeSerialNumber = Info.dwVolumeSerialNumber;
+ }
- return success;
+ return error_code::success();
handle_status_error:
error_code ec = windows_error(::GetLastError());
@@ -519,7 +484,7 @@ handle_status_error:
return ec;
}
- return success;
+ return error_code::success();
}
error_code unique_file(const Twine &model, int &result_fd,
@@ -535,7 +500,7 @@ error_code unique_file(const Twine &model, int &result_fd,
if (makeAbsolute) {
// Make model absolute by prepending a temp directory if it's not already.
bool absolute = path::is_absolute(m);
-
+
if (!absolute) {
SmallVector<wchar_t, 64> temp_dir;
if (error_code ec = TempDir(temp_dir)) return ec;
@@ -646,7 +611,7 @@ retry_create_file:
}
result_fd = fd;
- return success;
+ return error_code::success();
}
error_code get_magic(const Twine &path, uint32_t len,
@@ -688,10 +653,11 @@ error_code get_magic(const Twine &path, uint32_t len,
}
result.set_size(len);
- return success;
+ return error_code::success();
}
-error_code directory_iterator_construct(directory_iterator &it, StringRef path){
+error_code detail::directory_iterator_construct(detail::DirIterState &it,
+ StringRef path){
SmallVector<wchar_t, 128> path_utf16;
if (error_code ec = UTF8ToUTF16(path,
@@ -722,7 +688,7 @@ error_code directory_iterator_construct(directory_iterator &it, StringRef path){
error_code ec = windows_error(::GetLastError());
// Check for end.
if (ec == windows_error::no_more_files)
- return directory_iterator_destruct(it);
+ return detail::directory_iterator_destruct(it);
return ec;
} else
FilenameLen = ::wcslen(FirstFind.cFileName);
@@ -739,25 +705,25 @@ error_code directory_iterator_construct(directory_iterator &it, StringRef path){
path::append(directory_entry_path, directory_entry_name_utf8.str());
it.CurrentEntry = directory_entry(directory_entry_path.str());
- return success;
+ return error_code::success();
}
-error_code directory_iterator_destruct(directory_iterator& it) {
+error_code detail::directory_iterator_destruct(detail::DirIterState &it) {
if (it.IterationHandle != 0)
// Closes the handle if it's valid.
ScopedFindHandle close(HANDLE(it.IterationHandle));
it.IterationHandle = 0;
it.CurrentEntry = directory_entry();
- return success;
+ return error_code::success();
}
-error_code directory_iterator_increment(directory_iterator& it) {
+error_code detail::directory_iterator_increment(detail::DirIterState &it) {
WIN32_FIND_DATAW FindData;
if (!::FindNextFileW(HANDLE(it.IterationHandle), &FindData)) {
error_code ec = windows_error(::GetLastError());
// Check for end.
if (ec == windows_error::no_more_files)
- return directory_iterator_destruct(it);
+ return detail::directory_iterator_destruct(it);
return ec;
}
@@ -774,7 +740,7 @@ error_code directory_iterator_increment(directory_iterator& it) {
return ec;
it.CurrentEntry.replace_filename(Twine(directory_entry_path_utf8));
- return success;
+ return error_code::success();
}
} // end namespace fs
diff --git a/contrib/llvm/lib/Support/Windows/Process.inc b/contrib/llvm/lib/Support/Windows/Process.inc
index fe54eb1..913b073 100644
--- a/contrib/llvm/lib/Support/Windows/Process.inc
+++ b/contrib/llvm/lib/Support/Windows/Process.inc
@@ -220,8 +220,4 @@ const char *Process::ResetColor() {
return 0;
}
-void Process::SetWorkingDirectory(std::string Path) {
- ::_chdir(Path.c_str());
-}
-
}
diff --git a/contrib/llvm/lib/Support/Windows/Program.inc b/contrib/llvm/lib/Support/Windows/Program.inc
index e486e6e..80ccaa6 100644
--- a/contrib/llvm/lib/Support/Windows/Program.inc
+++ b/contrib/llvm/lib/Support/Windows/Program.inc
@@ -299,14 +299,14 @@ Program::Execute(const Path& path,
Data_ = wpi;
// Make sure these get closed no matter what.
- AutoHandle hThread(pi.hThread);
+ ScopedCommonHandle hThread(pi.hThread);
// Assign the process to a job if a memory limit is defined.
- AutoHandle hJob(0);
+ ScopedJobHandle hJob;
if (memoryLimit != 0) {
hJob = CreateJobObject(0, 0);
bool success = false;
- if (hJob != 0) {
+ if (hJob) {
JOBOBJECT_EXTENDED_LIMIT_INFORMATION jeli;
memset(&jeli, 0, sizeof(jeli));
jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_PROCESS_MEMORY;
@@ -367,7 +367,17 @@ Program::Wait(const Path &path,
return -2;
}
- return status;
+ if (!status)
+ return 0;
+
+ // Pass 10(Warning) and 11(Error) to the callee as negative value.
+ if ((status & 0xBFFF0000U) == 0x80000000U)
+ return (int)status;
+
+ if (status & 0xFF)
+ return status & 0x7FFFFFFF;
+
+ return 1;
}
bool
@@ -387,19 +397,25 @@ Program::Kill(std::string* ErrMsg) {
return false;
}
-bool Program::ChangeStdinToBinary(){
+error_code Program::ChangeStdinToBinary(){
int result = _setmode( _fileno(stdin), _O_BINARY );
- return result == -1;
+ if (result == -1)
+ return error_code(errno, generic_category());
+ return make_error_code(errc::success);
}
-bool Program::ChangeStdoutToBinary(){
+error_code Program::ChangeStdoutToBinary(){
int result = _setmode( _fileno(stdout), _O_BINARY );
- return result == -1;
+ if (result == -1)
+ return error_code(errno, generic_category());
+ return make_error_code(errc::success);
}
-bool Program::ChangeStderrToBinary(){
+error_code Program::ChangeStderrToBinary(){
int result = _setmode( _fileno(stderr), _O_BINARY );
- return result == -1;
+ if (result == -1)
+ return error_code(errno, generic_category());
+ return make_error_code(errc::success);
}
}
diff --git a/contrib/llvm/lib/Support/Windows/Signals.inc b/contrib/llvm/lib/Support/Windows/Signals.inc
index 0d4b8a2..38308f6 100644
--- a/contrib/llvm/lib/Support/Windows/Signals.inc
+++ b/contrib/llvm/lib/Support/Windows/Signals.inc
@@ -239,7 +239,7 @@ static void RegisterHandler() {
SetConsoleCtrlHandler(LLVMConsoleCtrlHandler, TRUE);
// Environment variable to disable any kind of crash dialog.
- if (getenv("LLVM_DISABLE_CRT_DEBUG")) {
+ if (getenv("LLVM_DISABLE_CRASH_REPORT")) {
#ifdef _MSC_VER
_CrtSetReportHook(CRTReportHook);
#endif
@@ -446,7 +446,7 @@ static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
}
if (ExitOnUnhandledExceptions)
- _exit(-3);
+ _exit(ep->ExceptionRecord->ExceptionCode);
// Allow dialog box to pop up allowing choice to start debugger.
if (OldFilter)
diff --git a/contrib/llvm/lib/Support/Windows/Windows.h b/contrib/llvm/lib/Support/Windows/Windows.h
index 67b6f01..5c1da0d 100644
--- a/contrib/llvm/lib/Support/Windows/Windows.h
+++ b/contrib/llvm/lib/Support/Windows/Windows.h
@@ -26,6 +26,7 @@
#include "llvm/Config/config.h" // Get build system configuration settings
#include <windows.h>
+#include <wincrypt.h>
#include <shlobj.h>
#include <cassert>
#include <string>
@@ -41,70 +42,99 @@ inline bool MakeErrMsg(std::string* ErrMsg, const std::string& prefix) {
return true;
}
-class AutoHandle {
- HANDLE handle;
+template <typename HandleTraits>
+class ScopedHandle {
+ typedef typename HandleTraits::handle_type handle_type;
+ handle_type Handle;
+ ScopedHandle(const ScopedHandle &other); // = delete;
+ void operator=(const ScopedHandle &other); // = delete;
public:
- AutoHandle(HANDLE h) : handle(h) {}
+ ScopedHandle()
+ : Handle(HandleTraits::GetInvalid()) {}
+
+ explicit ScopedHandle(handle_type h)
+ : Handle(h) {}
- ~AutoHandle() {
- if (handle)
- CloseHandle(handle);
+ ~ScopedHandle() {
+ if (HandleTraits::IsValid(Handle))
+ HandleTraits::Close(Handle);
}
- operator HANDLE() {
- return handle;
+ handle_type take() {
+ handle_type t = Handle;
+ Handle = HandleTraits::GetInvalid();
+ return t;
}
- AutoHandle &operator=(HANDLE h) {
- handle = h;
+ ScopedHandle &operator=(handle_type h) {
+ if (HandleTraits::IsValid(Handle))
+ HandleTraits::Close(Handle);
+ Handle = h;
return *this;
}
+
+ // True if Handle is valid.
+ operator bool() const {
+ return HandleTraits::IsValid(Handle) ? true : false;
+ }
+
+ operator handle_type() const {
+ return Handle;
+ }
};
-template <class HandleType, uintptr_t InvalidHandle,
- class DeleterType, DeleterType D>
-class ScopedHandle {
- HandleType Handle;
+struct CommonHandleTraits {
+ typedef HANDLE handle_type;
-public:
- ScopedHandle() : Handle(InvalidHandle) {}
- ScopedHandle(HandleType handle) : Handle(handle) {}
+ static handle_type GetInvalid() {
+ return INVALID_HANDLE_VALUE;
+ }
- ~ScopedHandle() {
- if (Handle != HandleType(InvalidHandle))
- D(Handle);
+ static void Close(handle_type h) {
+ ::CloseHandle(h);
}
- HandleType take() {
- HandleType temp = Handle;
- Handle = HandleType(InvalidHandle);
- return temp;
+ static bool IsValid(handle_type h) {
+ return h != GetInvalid();
}
+};
- operator HandleType() const { return Handle; }
+struct JobHandleTraits : CommonHandleTraits {
+ static handle_type GetInvalid() {
+ return NULL;
+ }
+};
- ScopedHandle &operator=(HandleType handle) {
- Handle = handle;
- return *this;
+struct CryptContextTraits : CommonHandleTraits {
+ typedef HCRYPTPROV handle_type;
+
+ static handle_type GetInvalid() {
+ return 0;
}
- typedef void (*unspecified_bool_type)();
- static void unspecified_bool_true() {}
+ static void Close(handle_type h) {
+ ::CryptReleaseContext(h, 0);
+ }
- // True if Handle is valid.
- operator unspecified_bool_type() const {
- return Handle == HandleType(InvalidHandle) ? 0 : unspecified_bool_true;
+ static bool IsValid(handle_type h) {
+ return h != GetInvalid();
}
+};
- bool operator!() const {
- return Handle == HandleType(InvalidHandle);
+struct FindHandleTraits : CommonHandleTraits {
+ static void Close(handle_type h) {
+ ::FindClose(h);
}
};
-typedef ScopedHandle<HANDLE, uintptr_t(-1),
- BOOL (WINAPI*)(HANDLE), ::FindClose>
- ScopedFindHandle;
+struct FileHandleTraits : CommonHandleTraits {};
+
+typedef ScopedHandle<CommonHandleTraits> ScopedCommonHandle;
+typedef ScopedHandle<FileHandleTraits> ScopedFileHandle;
+typedef ScopedHandle<CryptContextTraits> ScopedCryptContext;
+typedef ScopedHandle<FindHandleTraits> ScopedFindHandle;
+typedef ScopedHandle<JobHandleTraits> ScopedJobHandle;
namespace llvm {
template <class T>
diff --git a/contrib/llvm/lib/Support/YAMLParser.cpp b/contrib/llvm/lib/Support/YAMLParser.cpp
new file mode 100644
index 0000000..330519f
--- /dev/null
+++ b/contrib/llvm/lib/Support/YAMLParser.cpp
@@ -0,0 +1,2117 @@
+//===--- YAMLParser.cpp - Simple YAML parser ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a YAML parser.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/YAMLParser.h"
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/SourceMgr.h"
+
+using namespace llvm;
+using namespace yaml;
+
+enum UnicodeEncodingForm {
+ UEF_UTF32_LE, //< UTF-32 Little Endian
+ UEF_UTF32_BE, //< UTF-32 Big Endian
+ UEF_UTF16_LE, //< UTF-16 Little Endian
+ UEF_UTF16_BE, //< UTF-16 Big Endian
+ UEF_UTF8, //< UTF-8 or ascii.
+ UEF_Unknown //< Not a valid Unicode encoding.
+};
+
+/// EncodingInfo - Holds the encoding type and length of the byte order mark if
+/// it exists. Length is in {0, 2, 3, 4}.
+typedef std::pair<UnicodeEncodingForm, unsigned> EncodingInfo;
+
+/// getUnicodeEncoding - Reads up to the first 4 bytes to determine the Unicode
+/// encoding form of \a Input.
+///
+/// @param Input A string of length 0 or more.
+/// @returns An EncodingInfo indicating the Unicode encoding form of the input
+/// and how long the byte order mark is if one exists.
+static EncodingInfo getUnicodeEncoding(StringRef Input) {
+ if (Input.size() == 0)
+ return std::make_pair(UEF_Unknown, 0);
+
+ switch (uint8_t(Input[0])) {
+ case 0x00:
+ if (Input.size() >= 4) {
+ if ( Input[1] == 0
+ && uint8_t(Input[2]) == 0xFE
+ && uint8_t(Input[3]) == 0xFF)
+ return std::make_pair(UEF_UTF32_BE, 4);
+ if (Input[1] == 0 && Input[2] == 0 && Input[3] != 0)
+ return std::make_pair(UEF_UTF32_BE, 0);
+ }
+
+ if (Input.size() >= 2 && Input[1] != 0)
+ return std::make_pair(UEF_UTF16_BE, 0);
+ return std::make_pair(UEF_Unknown, 0);
+ case 0xFF:
+ if ( Input.size() >= 4
+ && uint8_t(Input[1]) == 0xFE
+ && Input[2] == 0
+ && Input[3] == 0)
+ return std::make_pair(UEF_UTF32_LE, 4);
+
+ if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFE)
+ return std::make_pair(UEF_UTF16_LE, 2);
+ return std::make_pair(UEF_Unknown, 0);
+ case 0xFE:
+ if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFF)
+ return std::make_pair(UEF_UTF16_BE, 2);
+ return std::make_pair(UEF_Unknown, 0);
+ case 0xEF:
+ if ( Input.size() >= 3
+ && uint8_t(Input[1]) == 0xBB
+ && uint8_t(Input[2]) == 0xBF)
+ return std::make_pair(UEF_UTF8, 3);
+ return std::make_pair(UEF_Unknown, 0);
+ }
+
+ // It could still be utf-32 or utf-16.
+ if (Input.size() >= 4 && Input[1] == 0 && Input[2] == 0 && Input[3] == 0)
+ return std::make_pair(UEF_UTF32_LE, 0);
+
+ if (Input.size() >= 2 && Input[1] == 0)
+ return std::make_pair(UEF_UTF16_LE, 0);
+
+ return std::make_pair(UEF_UTF8, 0);
+}
+
+namespace llvm {
+namespace yaml {
+/// Token - A single YAML token.
+struct Token : ilist_node<Token> {
+ enum TokenKind {
+ TK_Error, // Uninitialized token.
+ TK_StreamStart,
+ TK_StreamEnd,
+ TK_VersionDirective,
+ TK_TagDirective,
+ TK_DocumentStart,
+ TK_DocumentEnd,
+ TK_BlockEntry,
+ TK_BlockEnd,
+ TK_BlockSequenceStart,
+ TK_BlockMappingStart,
+ TK_FlowEntry,
+ TK_FlowSequenceStart,
+ TK_FlowSequenceEnd,
+ TK_FlowMappingStart,
+ TK_FlowMappingEnd,
+ TK_Key,
+ TK_Value,
+ TK_Scalar,
+ TK_Alias,
+ TK_Anchor,
+ TK_Tag
+ } Kind;
+
+ /// A string of length 0 or more whose begin() points to the logical location
+ /// of the token in the input.
+ StringRef Range;
+
+ Token() : Kind(TK_Error) {}
+};
+}
+}
+
+namespace llvm {
+template<>
+struct ilist_sentinel_traits<Token> {
+ Token *createSentinel() const {
+ return &Sentinel;
+ }
+ static void destroySentinel(Token*) {}
+
+ Token *provideInitialHead() const { return createSentinel(); }
+ Token *ensureHead(Token*) const { return createSentinel(); }
+ static void noteHead(Token*, Token*) {}
+
+private:
+ mutable Token Sentinel;
+};
+
+template<>
+struct ilist_node_traits<Token> {
+ Token *createNode(const Token &V) {
+ return new (Alloc.Allocate<Token>()) Token(V);
+ }
+ static void deleteNode(Token *V) {}
+
+ void addNodeToList(Token *) {}
+ void removeNodeFromList(Token *) {}
+ void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
+ ilist_iterator<Token> /*first*/,
+ ilist_iterator<Token> /*last*/) {}
+
+ BumpPtrAllocator Alloc;
+};
+}
+
+typedef ilist<Token> TokenQueueT;
+
+namespace {
+/// @brief This struct is used to track simple keys.
+///
+/// Simple keys are handled by creating an entry in SimpleKeys for each Token
+/// which could legally be the start of a simple key. When peekNext is called,
+/// if the Token To be returned is referenced by a SimpleKey, we continue
+/// tokenizing until that potential simple key has either been found to not be
+/// a simple key (we moved on to the next line or went further than 1024 chars).
+/// Or when we run into a Value, and then insert a Key token (and possibly
+/// others) before the SimpleKey's Tok.
+struct SimpleKey {
+ TokenQueueT::iterator Tok;
+ unsigned Column;
+ unsigned Line;
+ unsigned FlowLevel;
+ bool IsRequired;
+
+ bool operator ==(const SimpleKey &Other) {
+ return Tok == Other.Tok;
+ }
+};
+}
+
+/// @brief The Unicode scalar value of a UTF-8 minimal well-formed code unit
+/// subsequence and the subsequence's length in code units (uint8_t).
+/// A length of 0 represents an error.
+typedef std::pair<uint32_t, unsigned> UTF8Decoded;
+
+static UTF8Decoded decodeUTF8(StringRef Range) {
+ StringRef::iterator Position= Range.begin();
+ StringRef::iterator End = Range.end();
+ // 1 byte: [0x00, 0x7f]
+ // Bit pattern: 0xxxxxxx
+ if ((*Position & 0x80) == 0) {
+ return std::make_pair(*Position, 1);
+ }
+ // 2 bytes: [0x80, 0x7ff]
+ // Bit pattern: 110xxxxx 10xxxxxx
+ if (Position + 1 != End &&
+ ((*Position & 0xE0) == 0xC0) &&
+ ((*(Position + 1) & 0xC0) == 0x80)) {
+ uint32_t codepoint = ((*Position & 0x1F) << 6) |
+ (*(Position + 1) & 0x3F);
+ if (codepoint >= 0x80)
+ return std::make_pair(codepoint, 2);
+ }
+ // 3 bytes: [0x8000, 0xffff]
+ // Bit pattern: 1110xxxx 10xxxxxx 10xxxxxx
+ if (Position + 2 != End &&
+ ((*Position & 0xF0) == 0xE0) &&
+ ((*(Position + 1) & 0xC0) == 0x80) &&
+ ((*(Position + 2) & 0xC0) == 0x80)) {
+ uint32_t codepoint = ((*Position & 0x0F) << 12) |
+ ((*(Position + 1) & 0x3F) << 6) |
+ (*(Position + 2) & 0x3F);
+ // Codepoints between 0xD800 and 0xDFFF are invalid, as
+ // they are high / low surrogate halves used by UTF-16.
+ if (codepoint >= 0x800 &&
+ (codepoint < 0xD800 || codepoint > 0xDFFF))
+ return std::make_pair(codepoint, 3);
+ }
+ // 4 bytes: [0x10000, 0x10FFFF]
+ // Bit pattern: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ if (Position + 3 != End &&
+ ((*Position & 0xF8) == 0xF0) &&
+ ((*(Position + 1) & 0xC0) == 0x80) &&
+ ((*(Position + 2) & 0xC0) == 0x80) &&
+ ((*(Position + 3) & 0xC0) == 0x80)) {
+ uint32_t codepoint = ((*Position & 0x07) << 18) |
+ ((*(Position + 1) & 0x3F) << 12) |
+ ((*(Position + 2) & 0x3F) << 6) |
+ (*(Position + 3) & 0x3F);
+ if (codepoint >= 0x10000 && codepoint <= 0x10FFFF)
+ return std::make_pair(codepoint, 4);
+ }
+ return std::make_pair(0, 0);
+}
+
+namespace llvm {
+namespace yaml {
+/// @brief Scans YAML tokens from a MemoryBuffer.
+class Scanner {
+public:
+ Scanner(const StringRef Input, SourceMgr &SM);
+
+ /// @brief Parse the next token and return it without popping it.
+ Token &peekNext();
+
+ /// @brief Parse the next token and pop it from the queue.
+ Token getNext();
+
+ void printError(SMLoc Loc, SourceMgr::DiagKind Kind, const Twine &Message,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
+ SM.PrintMessage(Loc, Kind, Message, Ranges);
+ }
+
+ void setError(const Twine &Message, StringRef::iterator Position) {
+ if (Current >= End)
+ Current = End - 1;
+
+ // Don't print out more errors after the first one we encounter. The rest
+ // are just the result of the first, and have no meaning.
+ if (!Failed)
+ printError(SMLoc::getFromPointer(Current), SourceMgr::DK_Error, Message);
+ Failed = true;
+ }
+
+ void setError(const Twine &Message) {
+ setError(Message, Current);
+ }
+
+ /// @brief Returns true if an error occurred while parsing.
+ bool failed() {
+ return Failed;
+ }
+
+private:
+ StringRef currentInput() {
+ return StringRef(Current, End - Current);
+ }
+
+ /// @brief Decode a UTF-8 minimal well-formed code unit subsequence starting
+ /// at \a Position.
+ ///
+ /// If the UTF-8 code units starting at Position do not form a well-formed
+ /// code unit subsequence, then the Unicode scalar value is 0, and the length
+ /// is 0.
+ UTF8Decoded decodeUTF8(StringRef::iterator Position) {
+ return ::decodeUTF8(StringRef(Position, End - Position));
+ }
+
+ // The following functions are based on the gramar rules in the YAML spec. The
+ // style of the function names it meant to closely match how they are written
+ // in the spec. The number within the [] is the number of the grammar rule in
+ // the spec.
+ //
+ // See 4.2 [Production Naming Conventions] for the meaning of the prefixes.
+ //
+ // c-
+ // A production starting and ending with a special character.
+ // b-
+ // A production matching a single line break.
+ // nb-
+ // A production starting and ending with a non-break character.
+ // s-
+ // A production starting and ending with a white space character.
+ // ns-
+ // A production starting and ending with a non-space character.
+ // l-
+ // A production matching complete line(s).
+
+ /// @brief Skip a single nb-char[27] starting at Position.
+ ///
+ /// A nb-char is 0x9 | [0x20-0x7E] | 0x85 | [0xA0-0xD7FF] | [0xE000-0xFEFE]
+ /// | [0xFF00-0xFFFD] | [0x10000-0x10FFFF]
+ ///
+ /// @returns The code unit after the nb-char, or Position if it's not an
+ /// nb-char.
+ StringRef::iterator skip_nb_char(StringRef::iterator Position);
+
+ /// @brief Skip a single b-break[28] starting at Position.
+ ///
+ /// A b-break is 0xD 0xA | 0xD | 0xA
+ ///
+ /// @returns The code unit after the b-break, or Position if it's not a
+ /// b-break.
+ StringRef::iterator skip_b_break(StringRef::iterator Position);
+
+ /// @brief Skip a single s-white[33] starting at Position.
+ ///
+ /// A s-white is 0x20 | 0x9
+ ///
+ /// @returns The code unit after the s-white, or Position if it's not a
+ /// s-white.
+ StringRef::iterator skip_s_white(StringRef::iterator Position);
+
+ /// @brief Skip a single ns-char[34] starting at Position.
+ ///
+ /// A ns-char is nb-char - s-white
+ ///
+ /// @returns The code unit after the ns-char, or Position if it's not a
+ /// ns-char.
+ StringRef::iterator skip_ns_char(StringRef::iterator Position);
+
+ typedef StringRef::iterator (Scanner::*SkipWhileFunc)(StringRef::iterator);
+ /// @brief Skip minimal well-formed code unit subsequences until Func
+ /// returns its input.
+ ///
+ /// @returns The code unit after the last minimal well-formed code unit
+ /// subsequence that Func accepted.
+ StringRef::iterator skip_while( SkipWhileFunc Func
+ , StringRef::iterator Position);
+
+ /// @brief Scan ns-uri-char[39]s starting at Cur.
+ ///
+ /// This updates Cur and Column while scanning.
+ ///
+ /// @returns A StringRef starting at Cur which covers the longest contiguous
+ /// sequence of ns-uri-char.
+ StringRef scan_ns_uri_char();
+
+ /// @brief Scan ns-plain-one-line[133] starting at \a Cur.
+ StringRef scan_ns_plain_one_line();
+
+ /// @brief Consume a minimal well-formed code unit subsequence starting at
+ /// \a Cur. Return false if it is not the same Unicode scalar value as
+ /// \a Expected. This updates \a Column.
+ bool consume(uint32_t Expected);
+
+ /// @brief Skip \a Distance UTF-8 code units. Updates \a Cur and \a Column.
+ void skip(uint32_t Distance);
+
+ /// @brief Return true if the minimal well-formed code unit subsequence at
+ /// Pos is whitespace or a new line
+ bool isBlankOrBreak(StringRef::iterator Position);
+
+ /// @brief If IsSimpleKeyAllowed, create and push_back a new SimpleKey.
+ void saveSimpleKeyCandidate( TokenQueueT::iterator Tok
+ , unsigned AtColumn
+ , bool IsRequired);
+
+ /// @brief Remove simple keys that can no longer be valid simple keys.
+ ///
+ /// Invalid simple keys are not on the current line or are further than 1024
+ /// columns back.
+ void removeStaleSimpleKeyCandidates();
+
+ /// @brief Remove all simple keys on FlowLevel \a Level.
+ void removeSimpleKeyCandidatesOnFlowLevel(unsigned Level);
+
+ /// @brief Unroll indentation in \a Indents back to \a Col. Creates BlockEnd
+ /// tokens if needed.
+ bool unrollIndent(int ToColumn);
+
+ /// @brief Increase indent to \a Col. Creates \a Kind token at \a InsertPoint
+ /// if needed.
+ bool rollIndent( int ToColumn
+ , Token::TokenKind Kind
+ , TokenQueueT::iterator InsertPoint);
+
+ /// @brief Skip whitespace and comments until the start of the next token.
+ void scanToNextToken();
+
+ /// @brief Must be the first token generated.
+ bool scanStreamStart();
+
+ /// @brief Generate tokens needed to close out the stream.
+ bool scanStreamEnd();
+
+ /// @brief Scan a %BLAH directive.
+ bool scanDirective();
+
+ /// @brief Scan a ... or ---.
+ bool scanDocumentIndicator(bool IsStart);
+
+ /// @brief Scan a [ or { and generate the proper flow collection start token.
+ bool scanFlowCollectionStart(bool IsSequence);
+
+ /// @brief Scan a ] or } and generate the proper flow collection end token.
+ bool scanFlowCollectionEnd(bool IsSequence);
+
+ /// @brief Scan the , that separates entries in a flow collection.
+ bool scanFlowEntry();
+
+ /// @brief Scan the - that starts block sequence entries.
+ bool scanBlockEntry();
+
+ /// @brief Scan an explicit ? indicating a key.
+ bool scanKey();
+
+ /// @brief Scan an explicit : indicating a value.
+ bool scanValue();
+
+ /// @brief Scan a quoted scalar.
+ bool scanFlowScalar(bool IsDoubleQuoted);
+
+ /// @brief Scan an unquoted scalar.
+ bool scanPlainScalar();
+
+ /// @brief Scan an Alias or Anchor starting with * or &.
+ bool scanAliasOrAnchor(bool IsAlias);
+
+ /// @brief Scan a block scalar starting with | or >.
+ bool scanBlockScalar(bool IsLiteral);
+
+ /// @brief Scan a tag of the form !stuff.
+ bool scanTag();
+
+ /// @brief Dispatch to the next scanning function based on \a *Cur.
+ bool fetchMoreTokens();
+
+ /// @brief The SourceMgr used for diagnostics and buffer management.
+ SourceMgr &SM;
+
+ /// @brief The original input.
+ MemoryBuffer *InputBuffer;
+
+ /// @brief The current position of the scanner.
+ StringRef::iterator Current;
+
+ /// @brief The end of the input (one past the last character).
+ StringRef::iterator End;
+
+ /// @brief Current YAML indentation level in spaces.
+ int Indent;
+
+ /// @brief Current column number in Unicode code points.
+ unsigned Column;
+
+ /// @brief Current line number.
+ unsigned Line;
+
+ /// @brief How deep we are in flow style containers. 0 Means at block level.
+ unsigned FlowLevel;
+
+ /// @brief Are we at the start of the stream?
+ bool IsStartOfStream;
+
+ /// @brief Can the next token be the start of a simple key?
+ bool IsSimpleKeyAllowed;
+
+ /// @brief Is the next token required to start a simple key?
+ bool IsSimpleKeyRequired;
+
+ /// @brief True if an error has occurred.
+ bool Failed;
+
+ /// @brief Queue of tokens. This is required to queue up tokens while looking
+ /// for the end of a simple key. And for cases where a single character
+ /// can produce multiple tokens (e.g. BlockEnd).
+ TokenQueueT TokenQueue;
+
+ /// @brief Indentation levels.
+ SmallVector<int, 4> Indents;
+
+ /// @brief Potential simple keys.
+ SmallVector<SimpleKey, 4> SimpleKeys;
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+/// encodeUTF8 - Encode \a UnicodeScalarValue in UTF-8 and append it to result.
+static void encodeUTF8( uint32_t UnicodeScalarValue
+ , SmallVectorImpl<char> &Result) {
+ if (UnicodeScalarValue <= 0x7F) {
+ Result.push_back(UnicodeScalarValue & 0x7F);
+ } else if (UnicodeScalarValue <= 0x7FF) {
+ uint8_t FirstByte = 0xC0 | ((UnicodeScalarValue & 0x7C0) >> 6);
+ uint8_t SecondByte = 0x80 | (UnicodeScalarValue & 0x3F);
+ Result.push_back(FirstByte);
+ Result.push_back(SecondByte);
+ } else if (UnicodeScalarValue <= 0xFFFF) {
+ uint8_t FirstByte = 0xE0 | ((UnicodeScalarValue & 0xF000) >> 12);
+ uint8_t SecondByte = 0x80 | ((UnicodeScalarValue & 0xFC0) >> 6);
+ uint8_t ThirdByte = 0x80 | (UnicodeScalarValue & 0x3F);
+ Result.push_back(FirstByte);
+ Result.push_back(SecondByte);
+ Result.push_back(ThirdByte);
+ } else if (UnicodeScalarValue <= 0x10FFFF) {
+ uint8_t FirstByte = 0xF0 | ((UnicodeScalarValue & 0x1F0000) >> 18);
+ uint8_t SecondByte = 0x80 | ((UnicodeScalarValue & 0x3F000) >> 12);
+ uint8_t ThirdByte = 0x80 | ((UnicodeScalarValue & 0xFC0) >> 6);
+ uint8_t FourthByte = 0x80 | (UnicodeScalarValue & 0x3F);
+ Result.push_back(FirstByte);
+ Result.push_back(SecondByte);
+ Result.push_back(ThirdByte);
+ Result.push_back(FourthByte);
+ }
+}
+
+bool yaml::dumpTokens(StringRef Input, raw_ostream &OS) {
+ SourceMgr SM;
+ Scanner scanner(Input, SM);
+ while (true) {
+ Token T = scanner.getNext();
+ switch (T.Kind) {
+ case Token::TK_StreamStart:
+ OS << "Stream-Start: ";
+ break;
+ case Token::TK_StreamEnd:
+ OS << "Stream-End: ";
+ break;
+ case Token::TK_VersionDirective:
+ OS << "Version-Directive: ";
+ break;
+ case Token::TK_TagDirective:
+ OS << "Tag-Directive: ";
+ break;
+ case Token::TK_DocumentStart:
+ OS << "Document-Start: ";
+ break;
+ case Token::TK_DocumentEnd:
+ OS << "Document-End: ";
+ break;
+ case Token::TK_BlockEntry:
+ OS << "Block-Entry: ";
+ break;
+ case Token::TK_BlockEnd:
+ OS << "Block-End: ";
+ break;
+ case Token::TK_BlockSequenceStart:
+ OS << "Block-Sequence-Start: ";
+ break;
+ case Token::TK_BlockMappingStart:
+ OS << "Block-Mapping-Start: ";
+ break;
+ case Token::TK_FlowEntry:
+ OS << "Flow-Entry: ";
+ break;
+ case Token::TK_FlowSequenceStart:
+ OS << "Flow-Sequence-Start: ";
+ break;
+ case Token::TK_FlowSequenceEnd:
+ OS << "Flow-Sequence-End: ";
+ break;
+ case Token::TK_FlowMappingStart:
+ OS << "Flow-Mapping-Start: ";
+ break;
+ case Token::TK_FlowMappingEnd:
+ OS << "Flow-Mapping-End: ";
+ break;
+ case Token::TK_Key:
+ OS << "Key: ";
+ break;
+ case Token::TK_Value:
+ OS << "Value: ";
+ break;
+ case Token::TK_Scalar:
+ OS << "Scalar: ";
+ break;
+ case Token::TK_Alias:
+ OS << "Alias: ";
+ break;
+ case Token::TK_Anchor:
+ OS << "Anchor: ";
+ break;
+ case Token::TK_Tag:
+ OS << "Tag: ";
+ break;
+ case Token::TK_Error:
+ break;
+ }
+ OS << T.Range << "\n";
+ if (T.Kind == Token::TK_StreamEnd)
+ break;
+ else if (T.Kind == Token::TK_Error)
+ return false;
+ }
+ return true;
+}
+
+bool yaml::scanTokens(StringRef Input) {
+ llvm::SourceMgr SM;
+ llvm::yaml::Scanner scanner(Input, SM);
+ for (;;) {
+ llvm::yaml::Token T = scanner.getNext();
+ if (T.Kind == Token::TK_StreamEnd)
+ break;
+ else if (T.Kind == Token::TK_Error)
+ return false;
+ }
+ return true;
+}
+
+std::string yaml::escape(StringRef Input) {
+ std::string EscapedInput;
+ for (StringRef::iterator i = Input.begin(), e = Input.end(); i != e; ++i) {
+ if (*i == '\\')
+ EscapedInput += "\\\\";
+ else if (*i == '"')
+ EscapedInput += "\\\"";
+ else if (*i == 0)
+ EscapedInput += "\\0";
+ else if (*i == 0x07)
+ EscapedInput += "\\a";
+ else if (*i == 0x08)
+ EscapedInput += "\\b";
+ else if (*i == 0x09)
+ EscapedInput += "\\t";
+ else if (*i == 0x0A)
+ EscapedInput += "\\n";
+ else if (*i == 0x0B)
+ EscapedInput += "\\v";
+ else if (*i == 0x0C)
+ EscapedInput += "\\f";
+ else if (*i == 0x0D)
+ EscapedInput += "\\r";
+ else if (*i == 0x1B)
+ EscapedInput += "\\e";
+ else if (*i >= 0 && *i < 0x20) { // Control characters not handled above.
+ std::string HexStr = utohexstr(*i);
+ EscapedInput += "\\x" + std::string(2 - HexStr.size(), '0') + HexStr;
+ } else if (*i & 0x80) { // UTF-8 multiple code unit subsequence.
+ UTF8Decoded UnicodeScalarValue
+ = decodeUTF8(StringRef(i, Input.end() - i));
+ if (UnicodeScalarValue.second == 0) {
+ // Found invalid char.
+ SmallString<4> Val;
+ encodeUTF8(0xFFFD, Val);
+ EscapedInput.insert(EscapedInput.end(), Val.begin(), Val.end());
+ // FIXME: Error reporting.
+ return EscapedInput;
+ }
+ if (UnicodeScalarValue.first == 0x85)
+ EscapedInput += "\\N";
+ else if (UnicodeScalarValue.first == 0xA0)
+ EscapedInput += "\\_";
+ else if (UnicodeScalarValue.first == 0x2028)
+ EscapedInput += "\\L";
+ else if (UnicodeScalarValue.first == 0x2029)
+ EscapedInput += "\\P";
+ else {
+ std::string HexStr = utohexstr(UnicodeScalarValue.first);
+ if (HexStr.size() <= 2)
+ EscapedInput += "\\x" + std::string(2 - HexStr.size(), '0') + HexStr;
+ else if (HexStr.size() <= 4)
+ EscapedInput += "\\u" + std::string(4 - HexStr.size(), '0') + HexStr;
+ else if (HexStr.size() <= 8)
+ EscapedInput += "\\U" + std::string(8 - HexStr.size(), '0') + HexStr;
+ }
+ i += UnicodeScalarValue.second - 1;
+ } else
+ EscapedInput.push_back(*i);
+ }
+ return EscapedInput;
+}
+
+Scanner::Scanner(StringRef Input, SourceMgr &sm)
+ : SM(sm)
+ , Indent(-1)
+ , Column(0)
+ , Line(0)
+ , FlowLevel(0)
+ , IsStartOfStream(true)
+ , IsSimpleKeyAllowed(true)
+ , IsSimpleKeyRequired(false)
+ , Failed(false) {
+ InputBuffer = MemoryBuffer::getMemBuffer(Input, "YAML");
+ SM.AddNewSourceBuffer(InputBuffer, SMLoc());
+ Current = InputBuffer->getBufferStart();
+ End = InputBuffer->getBufferEnd();
+}
+
+Token &Scanner::peekNext() {
+ // If the current token is a possible simple key, keep parsing until we
+ // can confirm.
+ bool NeedMore = false;
+ while (true) {
+ if (TokenQueue.empty() || NeedMore) {
+ if (!fetchMoreTokens()) {
+ TokenQueue.clear();
+ TokenQueue.push_back(Token());
+ return TokenQueue.front();
+ }
+ }
+ assert(!TokenQueue.empty() &&
+ "fetchMoreTokens lied about getting tokens!");
+
+ removeStaleSimpleKeyCandidates();
+ SimpleKey SK;
+ SK.Tok = TokenQueue.front();
+ if (std::find(SimpleKeys.begin(), SimpleKeys.end(), SK)
+ == SimpleKeys.end())
+ break;
+ else
+ NeedMore = true;
+ }
+ return TokenQueue.front();
+}
+
+Token Scanner::getNext() {
+ Token Ret = peekNext();
+ // TokenQueue can be empty if there was an error getting the next token.
+ if (!TokenQueue.empty())
+ TokenQueue.pop_front();
+
+ // There cannot be any referenced Token's if the TokenQueue is empty. So do a
+ // quick deallocation of them all.
+ if (TokenQueue.empty()) {
+ TokenQueue.Alloc.Reset();
+ }
+
+ return Ret;
+}
+
+StringRef::iterator Scanner::skip_nb_char(StringRef::iterator Position) {
+ // Check 7 bit c-printable - b-char.
+ if ( *Position == 0x09
+ || (*Position >= 0x20 && *Position <= 0x7E))
+ return Position + 1;
+
+ // Check for valid UTF-8.
+ if (uint8_t(*Position) & 0x80) {
+ UTF8Decoded u8d = decodeUTF8(Position);
+ if ( u8d.second != 0
+ && u8d.first != 0xFEFF
+ && ( u8d.first == 0x85
+ || ( u8d.first >= 0xA0
+ && u8d.first <= 0xD7FF)
+ || ( u8d.first >= 0xE000
+ && u8d.first <= 0xFFFD)
+ || ( u8d.first >= 0x10000
+ && u8d.first <= 0x10FFFF)))
+ return Position + u8d.second;
+ }
+ return Position;
+}
+
+StringRef::iterator Scanner::skip_b_break(StringRef::iterator Position) {
+ if (*Position == 0x0D) {
+ if (Position + 1 != End && *(Position + 1) == 0x0A)
+ return Position + 2;
+ return Position + 1;
+ }
+
+ if (*Position == 0x0A)
+ return Position + 1;
+ return Position;
+}
+
+
+StringRef::iterator Scanner::skip_s_white(StringRef::iterator Position) {
+ if (Position == End)
+ return Position;
+ if (*Position == ' ' || *Position == '\t')
+ return Position + 1;
+ return Position;
+}
+
+StringRef::iterator Scanner::skip_ns_char(StringRef::iterator Position) {
+ if (Position == End)
+ return Position;
+ if (*Position == ' ' || *Position == '\t')
+ return Position;
+ return skip_nb_char(Position);
+}
+
+StringRef::iterator Scanner::skip_while( SkipWhileFunc Func
+ , StringRef::iterator Position) {
+ while (true) {
+ StringRef::iterator i = (this->*Func)(Position);
+ if (i == Position)
+ break;
+ Position = i;
+ }
+ return Position;
+}
+
+static bool is_ns_hex_digit(const char C) {
+ return (C >= '0' && C <= '9')
+ || (C >= 'a' && C <= 'z')
+ || (C >= 'A' && C <= 'Z');
+}
+
+static bool is_ns_word_char(const char C) {
+ return C == '-'
+ || (C >= 'a' && C <= 'z')
+ || (C >= 'A' && C <= 'Z');
+}
+
+StringRef Scanner::scan_ns_uri_char() {
+ StringRef::iterator Start = Current;
+ while (true) {
+ if (Current == End)
+ break;
+ if (( *Current == '%'
+ && Current + 2 < End
+ && is_ns_hex_digit(*(Current + 1))
+ && is_ns_hex_digit(*(Current + 2)))
+ || is_ns_word_char(*Current)
+ || StringRef(Current, 1).find_first_of("#;/?:@&=+$,_.!~*'()[]")
+ != StringRef::npos) {
+ ++Current;
+ ++Column;
+ } else
+ break;
+ }
+ return StringRef(Start, Current - Start);
+}
+
+StringRef Scanner::scan_ns_plain_one_line() {
+ StringRef::iterator start = Current;
+ // The first character must already be verified.
+ ++Current;
+ while (true) {
+ if (Current == End) {
+ break;
+ } else if (*Current == ':') {
+ // Check if the next character is a ns-char.
+ if (Current + 1 == End)
+ break;
+ StringRef::iterator i = skip_ns_char(Current + 1);
+ if (Current + 1 != i) {
+ Current = i;
+ Column += 2; // Consume both the ':' and ns-char.
+ } else
+ break;
+ } else if (*Current == '#') {
+ // Check if the previous character was a ns-char.
+ // The & 0x80 check is to check for the trailing byte of a utf-8
+ if (*(Current - 1) & 0x80 || skip_ns_char(Current - 1) == Current) {
+ ++Current;
+ ++Column;
+ } else
+ break;
+ } else {
+ StringRef::iterator i = skip_nb_char(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ ++Column;
+ }
+ }
+ return StringRef(start, Current - start);
+}
+
+bool Scanner::consume(uint32_t Expected) {
+ if (Expected >= 0x80)
+ report_fatal_error("Not dealing with this yet");
+ if (Current == End)
+ return false;
+ if (uint8_t(*Current) >= 0x80)
+ report_fatal_error("Not dealing with this yet");
+ if (uint8_t(*Current) == Expected) {
+ ++Current;
+ ++Column;
+ return true;
+ }
+ return false;
+}
+
+void Scanner::skip(uint32_t Distance) {
+ Current += Distance;
+ Column += Distance;
+}
+
+bool Scanner::isBlankOrBreak(StringRef::iterator Position) {
+ if (Position == End)
+ return false;
+ if ( *Position == ' ' || *Position == '\t'
+ || *Position == '\r' || *Position == '\n')
+ return true;
+ return false;
+}
+
+void Scanner::saveSimpleKeyCandidate( TokenQueueT::iterator Tok
+ , unsigned AtColumn
+ , bool IsRequired) {
+ if (IsSimpleKeyAllowed) {
+ SimpleKey SK;
+ SK.Tok = Tok;
+ SK.Line = Line;
+ SK.Column = AtColumn;
+ SK.IsRequired = IsRequired;
+ SK.FlowLevel = FlowLevel;
+ SimpleKeys.push_back(SK);
+ }
+}
+
+void Scanner::removeStaleSimpleKeyCandidates() {
+ for (SmallVectorImpl<SimpleKey>::iterator i = SimpleKeys.begin();
+ i != SimpleKeys.end();) {
+ if (i->Line != Line || i->Column + 1024 < Column) {
+ if (i->IsRequired)
+ setError( "Could not find expected : for simple key"
+ , i->Tok->Range.begin());
+ i = SimpleKeys.erase(i);
+ } else
+ ++i;
+ }
+}
+
+void Scanner::removeSimpleKeyCandidatesOnFlowLevel(unsigned Level) {
+ if (!SimpleKeys.empty() && (SimpleKeys.end() - 1)->FlowLevel == Level)
+ SimpleKeys.pop_back();
+}
+
+bool Scanner::unrollIndent(int ToColumn) {
+ Token T;
+ // Indentation is ignored in flow.
+ if (FlowLevel != 0)
+ return true;
+
+ while (Indent > ToColumn) {
+ T.Kind = Token::TK_BlockEnd;
+ T.Range = StringRef(Current, 1);
+ TokenQueue.push_back(T);
+ Indent = Indents.pop_back_val();
+ }
+
+ return true;
+}
+
+bool Scanner::rollIndent( int ToColumn
+ , Token::TokenKind Kind
+ , TokenQueueT::iterator InsertPoint) {
+ if (FlowLevel)
+ return true;
+ if (Indent < ToColumn) {
+ Indents.push_back(Indent);
+ Indent = ToColumn;
+
+ Token T;
+ T.Kind = Kind;
+ T.Range = StringRef(Current, 0);
+ TokenQueue.insert(InsertPoint, T);
+ }
+ return true;
+}
+
+void Scanner::scanToNextToken() {
+ while (true) {
+ while (*Current == ' ' || *Current == '\t') {
+ skip(1);
+ }
+
+ // Skip comment.
+ if (*Current == '#') {
+ while (true) {
+ // This may skip more than one byte, thus Column is only incremented
+ // for code points.
+ StringRef::iterator i = skip_nb_char(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ ++Column;
+ }
+ }
+
+ // Skip EOL.
+ StringRef::iterator i = skip_b_break(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ ++Line;
+ Column = 0;
+ // New lines may start a simple key.
+ if (!FlowLevel)
+ IsSimpleKeyAllowed = true;
+ }
+}
+
+bool Scanner::scanStreamStart() {
+ IsStartOfStream = false;
+
+ EncodingInfo EI = getUnicodeEncoding(currentInput());
+
+ Token T;
+ T.Kind = Token::TK_StreamStart;
+ T.Range = StringRef(Current, EI.second);
+ TokenQueue.push_back(T);
+ Current += EI.second;
+ return true;
+}
+
+bool Scanner::scanStreamEnd() {
+ // Force an ending new line if one isn't present.
+ if (Column != 0) {
+ Column = 0;
+ ++Line;
+ }
+
+ unrollIndent(-1);
+ SimpleKeys.clear();
+ IsSimpleKeyAllowed = false;
+
+ Token T;
+ T.Kind = Token::TK_StreamEnd;
+ T.Range = StringRef(Current, 0);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanDirective() {
+ // Reset the indentation level.
+ unrollIndent(-1);
+ SimpleKeys.clear();
+ IsSimpleKeyAllowed = false;
+
+ StringRef::iterator Start = Current;
+ consume('%');
+ StringRef::iterator NameStart = Current;
+ Current = skip_while(&Scanner::skip_ns_char, Current);
+ StringRef Name(NameStart, Current - NameStart);
+ Current = skip_while(&Scanner::skip_s_white, Current);
+
+ if (Name == "YAML") {
+ Current = skip_while(&Scanner::skip_ns_char, Current);
+ Token T;
+ T.Kind = Token::TK_VersionDirective;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+ return true;
+ }
+ return false;
+}
+
+bool Scanner::scanDocumentIndicator(bool IsStart) {
+ unrollIndent(-1);
+ SimpleKeys.clear();
+ IsSimpleKeyAllowed = false;
+
+ Token T;
+ T.Kind = IsStart ? Token::TK_DocumentStart : Token::TK_DocumentEnd;
+ T.Range = StringRef(Current, 3);
+ skip(3);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanFlowCollectionStart(bool IsSequence) {
+ Token T;
+ T.Kind = IsSequence ? Token::TK_FlowSequenceStart
+ : Token::TK_FlowMappingStart;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+
+ // [ and { may begin a simple key.
+ saveSimpleKeyCandidate(TokenQueue.back(), Column - 1, false);
+
+ // And may also be followed by a simple key.
+ IsSimpleKeyAllowed = true;
+ ++FlowLevel;
+ return true;
+}
+
+bool Scanner::scanFlowCollectionEnd(bool IsSequence) {
+ removeSimpleKeyCandidatesOnFlowLevel(FlowLevel);
+ IsSimpleKeyAllowed = false;
+ Token T;
+ T.Kind = IsSequence ? Token::TK_FlowSequenceEnd
+ : Token::TK_FlowMappingEnd;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+ if (FlowLevel)
+ --FlowLevel;
+ return true;
+}
+
+bool Scanner::scanFlowEntry() {
+ removeSimpleKeyCandidatesOnFlowLevel(FlowLevel);
+ IsSimpleKeyAllowed = true;
+ Token T;
+ T.Kind = Token::TK_FlowEntry;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanBlockEntry() {
+ rollIndent(Column, Token::TK_BlockSequenceStart, TokenQueue.end());
+ removeSimpleKeyCandidatesOnFlowLevel(FlowLevel);
+ IsSimpleKeyAllowed = true;
+ Token T;
+ T.Kind = Token::TK_BlockEntry;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanKey() {
+ if (!FlowLevel)
+ rollIndent(Column, Token::TK_BlockMappingStart, TokenQueue.end());
+
+ removeSimpleKeyCandidatesOnFlowLevel(FlowLevel);
+ IsSimpleKeyAllowed = !FlowLevel;
+
+ Token T;
+ T.Kind = Token::TK_Key;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanValue() {
+ // If the previous token could have been a simple key, insert the key token
+ // into the token queue.
+ if (!SimpleKeys.empty()) {
+ SimpleKey SK = SimpleKeys.pop_back_val();
+ Token T;
+ T.Kind = Token::TK_Key;
+ T.Range = SK.Tok->Range;
+ TokenQueueT::iterator i, e;
+ for (i = TokenQueue.begin(), e = TokenQueue.end(); i != e; ++i) {
+ if (i == SK.Tok)
+ break;
+ }
+ assert(i != e && "SimpleKey not in token queue!");
+ i = TokenQueue.insert(i, T);
+
+ // We may also need to add a Block-Mapping-Start token.
+ rollIndent(SK.Column, Token::TK_BlockMappingStart, i);
+
+ IsSimpleKeyAllowed = false;
+ } else {
+ if (!FlowLevel)
+ rollIndent(Column, Token::TK_BlockMappingStart, TokenQueue.end());
+ IsSimpleKeyAllowed = !FlowLevel;
+ }
+
+ Token T;
+ T.Kind = Token::TK_Value;
+ T.Range = StringRef(Current, 1);
+ skip(1);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+// Forbidding inlining improves performance by roughly 20%.
+// FIXME: Remove once llvm optimizes this to the faster version without hints.
+LLVM_ATTRIBUTE_NOINLINE static bool
+wasEscaped(StringRef::iterator First, StringRef::iterator Position);
+
+// Returns whether a character at 'Position' was escaped with a leading '\'.
+// 'First' specifies the position of the first character in the string.
+static bool wasEscaped(StringRef::iterator First,
+ StringRef::iterator Position) {
+ assert(Position - 1 >= First);
+ StringRef::iterator I = Position - 1;
+ // We calculate the number of consecutive '\'s before the current position
+ // by iterating backwards through our string.
+ while (I >= First && *I == '\\') --I;
+ // (Position - 1 - I) now contains the number of '\'s before the current
+ // position. If it is odd, the character at 'Position' was escaped.
+ return (Position - 1 - I) % 2 == 1;
+}
+
+bool Scanner::scanFlowScalar(bool IsDoubleQuoted) {
+ StringRef::iterator Start = Current;
+ unsigned ColStart = Column;
+ if (IsDoubleQuoted) {
+ do {
+ ++Current;
+ while (Current != End && *Current != '"')
+ ++Current;
+ // Repeat until the previous character was not a '\' or was an escaped
+ // backslash.
+ } while (*(Current - 1) == '\\' && wasEscaped(Start + 1, Current));
+ } else {
+ skip(1);
+ while (true) {
+ // Skip a ' followed by another '.
+ if (Current + 1 < End && *Current == '\'' && *(Current + 1) == '\'') {
+ skip(2);
+ continue;
+ } else if (*Current == '\'')
+ break;
+ StringRef::iterator i = skip_nb_char(Current);
+ if (i == Current) {
+ i = skip_b_break(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ Column = 0;
+ ++Line;
+ } else {
+ if (i == End)
+ break;
+ Current = i;
+ ++Column;
+ }
+ }
+ }
+ skip(1); // Skip ending quote.
+ Token T;
+ T.Kind = Token::TK_Scalar;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+
+ saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false);
+
+ IsSimpleKeyAllowed = false;
+
+ return true;
+}
+
+bool Scanner::scanPlainScalar() {
+ StringRef::iterator Start = Current;
+ unsigned ColStart = Column;
+ unsigned LeadingBlanks = 0;
+ assert(Indent >= -1 && "Indent must be >= -1 !");
+ unsigned indent = static_cast<unsigned>(Indent + 1);
+ while (true) {
+ if (*Current == '#')
+ break;
+
+ while (!isBlankOrBreak(Current)) {
+ if ( FlowLevel && *Current == ':'
+ && !(isBlankOrBreak(Current + 1) || *(Current + 1) == ',')) {
+ setError("Found unexpected ':' while scanning a plain scalar", Current);
+ return false;
+ }
+
+ // Check for the end of the plain scalar.
+ if ( (*Current == ':' && isBlankOrBreak(Current + 1))
+ || ( FlowLevel
+ && (StringRef(Current, 1).find_first_of(",:?[]{}")
+ != StringRef::npos)))
+ break;
+
+ StringRef::iterator i = skip_nb_char(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ ++Column;
+ }
+
+ // Are we at the end?
+ if (!isBlankOrBreak(Current))
+ break;
+
+ // Eat blanks.
+ StringRef::iterator Tmp = Current;
+ while (isBlankOrBreak(Tmp)) {
+ StringRef::iterator i = skip_s_white(Tmp);
+ if (i != Tmp) {
+ if (LeadingBlanks && (Column < indent) && *Tmp == '\t') {
+ setError("Found invalid tab character in indentation", Tmp);
+ return false;
+ }
+ Tmp = i;
+ ++Column;
+ } else {
+ i = skip_b_break(Tmp);
+ if (!LeadingBlanks)
+ LeadingBlanks = 1;
+ Tmp = i;
+ Column = 0;
+ ++Line;
+ }
+ }
+
+ if (!FlowLevel && Column < indent)
+ break;
+
+ Current = Tmp;
+ }
+ if (Start == Current) {
+ setError("Got empty plain scalar", Start);
+ return false;
+ }
+ Token T;
+ T.Kind = Token::TK_Scalar;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+
+ // Plain scalars can be simple keys.
+ saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false);
+
+ IsSimpleKeyAllowed = false;
+
+ return true;
+}
+
+bool Scanner::scanAliasOrAnchor(bool IsAlias) {
+ StringRef::iterator Start = Current;
+ unsigned ColStart = Column;
+ skip(1);
+ while(true) {
+ if ( *Current == '[' || *Current == ']'
+ || *Current == '{' || *Current == '}'
+ || *Current == ','
+ || *Current == ':')
+ break;
+ StringRef::iterator i = skip_ns_char(Current);
+ if (i == Current)
+ break;
+ Current = i;
+ ++Column;
+ }
+
+ if (Start == Current) {
+ setError("Got empty alias or anchor", Start);
+ return false;
+ }
+
+ Token T;
+ T.Kind = IsAlias ? Token::TK_Alias : Token::TK_Anchor;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+
+ // Alias and anchors can be simple keys.
+ saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false);
+
+ IsSimpleKeyAllowed = false;
+
+ return true;
+}
+
+bool Scanner::scanBlockScalar(bool IsLiteral) {
+ StringRef::iterator Start = Current;
+ skip(1); // Eat | or >
+ while(true) {
+ StringRef::iterator i = skip_nb_char(Current);
+ if (i == Current) {
+ if (Column == 0)
+ break;
+ i = skip_b_break(Current);
+ if (i != Current) {
+ // We got a line break.
+ Column = 0;
+ ++Line;
+ Current = i;
+ continue;
+ } else {
+ // There was an error, which should already have been printed out.
+ return false;
+ }
+ }
+ Current = i;
+ ++Column;
+ }
+
+ if (Start == Current) {
+ setError("Got empty block scalar", Start);
+ return false;
+ }
+
+ Token T;
+ T.Kind = Token::TK_Scalar;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+ return true;
+}
+
+bool Scanner::scanTag() {
+ StringRef::iterator Start = Current;
+ unsigned ColStart = Column;
+ skip(1); // Eat !.
+ if (Current == End || isBlankOrBreak(Current)); // An empty tag.
+ else if (*Current == '<') {
+ skip(1);
+ scan_ns_uri_char();
+ if (!consume('>'))
+ return false;
+ } else {
+ // FIXME: Actually parse the c-ns-shorthand-tag rule.
+ Current = skip_while(&Scanner::skip_ns_char, Current);
+ }
+
+ Token T;
+ T.Kind = Token::TK_Tag;
+ T.Range = StringRef(Start, Current - Start);
+ TokenQueue.push_back(T);
+
+ // Tags can be simple keys.
+ saveSimpleKeyCandidate(TokenQueue.back(), ColStart, false);
+
+ IsSimpleKeyAllowed = false;
+
+ return true;
+}
+
+bool Scanner::fetchMoreTokens() {
+ if (IsStartOfStream)
+ return scanStreamStart();
+
+ scanToNextToken();
+
+ if (Current == End)
+ return scanStreamEnd();
+
+ removeStaleSimpleKeyCandidates();
+
+ unrollIndent(Column);
+
+ if (Column == 0 && *Current == '%')
+ return scanDirective();
+
+ if (Column == 0 && Current + 4 <= End
+ && *Current == '-'
+ && *(Current + 1) == '-'
+ && *(Current + 2) == '-'
+ && (Current + 3 == End || isBlankOrBreak(Current + 3)))
+ return scanDocumentIndicator(true);
+
+ if (Column == 0 && Current + 4 <= End
+ && *Current == '.'
+ && *(Current + 1) == '.'
+ && *(Current + 2) == '.'
+ && (Current + 3 == End || isBlankOrBreak(Current + 3)))
+ return scanDocumentIndicator(false);
+
+ if (*Current == '[')
+ return scanFlowCollectionStart(true);
+
+ if (*Current == '{')
+ return scanFlowCollectionStart(false);
+
+ if (*Current == ']')
+ return scanFlowCollectionEnd(true);
+
+ if (*Current == '}')
+ return scanFlowCollectionEnd(false);
+
+ if (*Current == ',')
+ return scanFlowEntry();
+
+ if (*Current == '-' && isBlankOrBreak(Current + 1))
+ return scanBlockEntry();
+
+ if (*Current == '?' && (FlowLevel || isBlankOrBreak(Current + 1)))
+ return scanKey();
+
+ if (*Current == ':' && (FlowLevel || isBlankOrBreak(Current + 1)))
+ return scanValue();
+
+ if (*Current == '*')
+ return scanAliasOrAnchor(true);
+
+ if (*Current == '&')
+ return scanAliasOrAnchor(false);
+
+ if (*Current == '!')
+ return scanTag();
+
+ if (*Current == '|' && !FlowLevel)
+ return scanBlockScalar(true);
+
+ if (*Current == '>' && !FlowLevel)
+ return scanBlockScalar(false);
+
+ if (*Current == '\'')
+ return scanFlowScalar(false);
+
+ if (*Current == '"')
+ return scanFlowScalar(true);
+
+ // Get a plain scalar.
+ StringRef FirstChar(Current, 1);
+ if (!(isBlankOrBreak(Current)
+ || FirstChar.find_first_of("-?:,[]{}#&*!|>'\"%@`") != StringRef::npos)
+ || (*Current == '-' && !isBlankOrBreak(Current + 1))
+ || (!FlowLevel && (*Current == '?' || *Current == ':')
+ && isBlankOrBreak(Current + 1))
+ || (!FlowLevel && *Current == ':'
+ && Current + 2 < End
+ && *(Current + 1) == ':'
+ && !isBlankOrBreak(Current + 2)))
+ return scanPlainScalar();
+
+ setError("Unrecognized character while tokenizing.");
+ return false;
+}
+
+Stream::Stream(StringRef Input, SourceMgr &SM)
+ : scanner(new Scanner(Input, SM))
+ , CurrentDoc(0) {}
+
+Stream::~Stream() {}
+
+bool Stream::failed() { return scanner->failed(); }
+
+void Stream::printError(Node *N, const Twine &Msg) {
+ SmallVector<SMRange, 1> Ranges;
+ Ranges.push_back(N->getSourceRange());
+ scanner->printError( N->getSourceRange().Start
+ , SourceMgr::DK_Error
+ , Msg
+ , Ranges);
+}
+
+void Stream::handleYAMLDirective(const Token &t) {
+ // TODO: Ensure version is 1.x.
+}
+
+document_iterator Stream::begin() {
+ if (CurrentDoc)
+ report_fatal_error("Can only iterate over the stream once");
+
+ // Skip Stream-Start.
+ scanner->getNext();
+
+ CurrentDoc.reset(new Document(*this));
+ return document_iterator(CurrentDoc);
+}
+
+document_iterator Stream::end() {
+ return document_iterator();
+}
+
+void Stream::skip() {
+ for (document_iterator i = begin(), e = end(); i != e; ++i)
+ i->skip();
+}
+
+Node::Node(unsigned int Type, OwningPtr<Document> &D, StringRef A)
+ : Doc(D)
+ , TypeID(Type)
+ , Anchor(A) {
+ SMLoc Start = SMLoc::getFromPointer(peekNext().Range.begin());
+ SourceRange = SMRange(Start, Start);
+}
+
+Token &Node::peekNext() {
+ return Doc->peekNext();
+}
+
+Token Node::getNext() {
+ return Doc->getNext();
+}
+
+Node *Node::parseBlockNode() {
+ return Doc->parseBlockNode();
+}
+
+BumpPtrAllocator &Node::getAllocator() {
+ return Doc->NodeAllocator;
+}
+
+void Node::setError(const Twine &Msg, Token &Tok) const {
+ Doc->setError(Msg, Tok);
+}
+
+bool Node::failed() const {
+ return Doc->failed();
+}
+
+
+
+StringRef ScalarNode::getValue(SmallVectorImpl<char> &Storage) const {
+ // TODO: Handle newlines properly. We need to remove leading whitespace.
+ if (Value[0] == '"') { // Double quoted.
+ // Pull off the leading and trailing "s.
+ StringRef UnquotedValue = Value.substr(1, Value.size() - 2);
+ // Search for characters that would require unescaping the value.
+ StringRef::size_type i = UnquotedValue.find_first_of("\\\r\n");
+ if (i != StringRef::npos)
+ return unescapeDoubleQuoted(UnquotedValue, i, Storage);
+ return UnquotedValue;
+ } else if (Value[0] == '\'') { // Single quoted.
+ // Pull off the leading and trailing 's.
+ StringRef UnquotedValue = Value.substr(1, Value.size() - 2);
+ StringRef::size_type i = UnquotedValue.find('\'');
+ if (i != StringRef::npos) {
+ // We're going to need Storage.
+ Storage.clear();
+ Storage.reserve(UnquotedValue.size());
+ for (; i != StringRef::npos; i = UnquotedValue.find('\'')) {
+ StringRef Valid(UnquotedValue.begin(), i);
+ Storage.insert(Storage.end(), Valid.begin(), Valid.end());
+ Storage.push_back('\'');
+ UnquotedValue = UnquotedValue.substr(i + 2);
+ }
+ Storage.insert(Storage.end(), UnquotedValue.begin(), UnquotedValue.end());
+ return StringRef(Storage.begin(), Storage.size());
+ }
+ return UnquotedValue;
+ }
+ // Plain or block.
+ size_t trimtrail = Value.rfind(' ');
+ return Value.drop_back(
+ trimtrail == StringRef::npos ? 0 : Value.size() - trimtrail);
+}
+
+StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
+ , StringRef::size_type i
+ , SmallVectorImpl<char> &Storage)
+ const {
+ // Use Storage to build proper value.
+ Storage.clear();
+ Storage.reserve(UnquotedValue.size());
+ for (; i != StringRef::npos; i = UnquotedValue.find_first_of("\\\r\n")) {
+ // Insert all previous chars into Storage.
+ StringRef Valid(UnquotedValue.begin(), i);
+ Storage.insert(Storage.end(), Valid.begin(), Valid.end());
+ // Chop off inserted chars.
+ UnquotedValue = UnquotedValue.substr(i);
+
+ assert(!UnquotedValue.empty() && "Can't be empty!");
+
+ // Parse escape or line break.
+ switch (UnquotedValue[0]) {
+ case '\r':
+ case '\n':
+ Storage.push_back('\n');
+ if ( UnquotedValue.size() > 1
+ && (UnquotedValue[1] == '\r' || UnquotedValue[1] == '\n'))
+ UnquotedValue = UnquotedValue.substr(1);
+ UnquotedValue = UnquotedValue.substr(1);
+ break;
+ default:
+ if (UnquotedValue.size() == 1)
+ // TODO: Report error.
+ break;
+ UnquotedValue = UnquotedValue.substr(1);
+ switch (UnquotedValue[0]) {
+ default: {
+ Token T;
+ T.Range = StringRef(UnquotedValue.begin(), 1);
+ setError("Unrecognized escape code!", T);
+ return "";
+ }
+ case '\r':
+ case '\n':
+ // Remove the new line.
+ if ( UnquotedValue.size() > 1
+ && (UnquotedValue[1] == '\r' || UnquotedValue[1] == '\n'))
+ UnquotedValue = UnquotedValue.substr(1);
+ // If this was just a single byte newline, it will get skipped
+ // below.
+ break;
+ case '0':
+ Storage.push_back(0x00);
+ break;
+ case 'a':
+ Storage.push_back(0x07);
+ break;
+ case 'b':
+ Storage.push_back(0x08);
+ break;
+ case 't':
+ case 0x09:
+ Storage.push_back(0x09);
+ break;
+ case 'n':
+ Storage.push_back(0x0A);
+ break;
+ case 'v':
+ Storage.push_back(0x0B);
+ break;
+ case 'f':
+ Storage.push_back(0x0C);
+ break;
+ case 'r':
+ Storage.push_back(0x0D);
+ break;
+ case 'e':
+ Storage.push_back(0x1B);
+ break;
+ case ' ':
+ Storage.push_back(0x20);
+ break;
+ case '"':
+ Storage.push_back(0x22);
+ break;
+ case '/':
+ Storage.push_back(0x2F);
+ break;
+ case '\\':
+ Storage.push_back(0x5C);
+ break;
+ case 'N':
+ encodeUTF8(0x85, Storage);
+ break;
+ case '_':
+ encodeUTF8(0xA0, Storage);
+ break;
+ case 'L':
+ encodeUTF8(0x2028, Storage);
+ break;
+ case 'P':
+ encodeUTF8(0x2029, Storage);
+ break;
+ case 'x': {
+ if (UnquotedValue.size() < 3)
+ // TODO: Report error.
+ break;
+ unsigned int UnicodeScalarValue;
+ UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue);
+ encodeUTF8(UnicodeScalarValue, Storage);
+ UnquotedValue = UnquotedValue.substr(2);
+ break;
+ }
+ case 'u': {
+ if (UnquotedValue.size() < 5)
+ // TODO: Report error.
+ break;
+ unsigned int UnicodeScalarValue;
+ UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue);
+ encodeUTF8(UnicodeScalarValue, Storage);
+ UnquotedValue = UnquotedValue.substr(4);
+ break;
+ }
+ case 'U': {
+ if (UnquotedValue.size() < 9)
+ // TODO: Report error.
+ break;
+ unsigned int UnicodeScalarValue;
+ UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue);
+ encodeUTF8(UnicodeScalarValue, Storage);
+ UnquotedValue = UnquotedValue.substr(8);
+ break;
+ }
+ }
+ UnquotedValue = UnquotedValue.substr(1);
+ }
+ }
+ Storage.insert(Storage.end(), UnquotedValue.begin(), UnquotedValue.end());
+ return StringRef(Storage.begin(), Storage.size());
+}
+
+Node *KeyValueNode::getKey() {
+ if (Key)
+ return Key;
+ // Handle implicit null keys.
+ {
+ Token &t = peekNext();
+ if ( t.Kind == Token::TK_BlockEnd
+ || t.Kind == Token::TK_Value
+ || t.Kind == Token::TK_Error) {
+ return Key = new (getAllocator()) NullNode(Doc);
+ }
+ if (t.Kind == Token::TK_Key)
+ getNext(); // skip TK_Key.
+ }
+
+ // Handle explicit null keys.
+ Token &t = peekNext();
+ if (t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_Value) {
+ return Key = new (getAllocator()) NullNode(Doc);
+ }
+
+ // We've got a normal key.
+ return Key = parseBlockNode();
+}
+
+Node *KeyValueNode::getValue() {
+ if (Value)
+ return Value;
+ getKey()->skip();
+ if (failed())
+ return Value = new (getAllocator()) NullNode(Doc);
+
+ // Handle implicit null values.
+ {
+ Token &t = peekNext();
+ if ( t.Kind == Token::TK_BlockEnd
+ || t.Kind == Token::TK_FlowMappingEnd
+ || t.Kind == Token::TK_Key
+ || t.Kind == Token::TK_FlowEntry
+ || t.Kind == Token::TK_Error) {
+ return Value = new (getAllocator()) NullNode(Doc);
+ }
+
+ if (t.Kind != Token::TK_Value) {
+ setError("Unexpected token in Key Value.", t);
+ return Value = new (getAllocator()) NullNode(Doc);
+ }
+ getNext(); // skip TK_Value.
+ }
+
+ // Handle explicit null values.
+ Token &t = peekNext();
+ if (t.Kind == Token::TK_BlockEnd || t.Kind == Token::TK_Key) {
+ return Value = new (getAllocator()) NullNode(Doc);
+ }
+
+ // We got a normal value.
+ return Value = parseBlockNode();
+}
+
+void MappingNode::increment() {
+ if (failed()) {
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ return;
+ }
+ if (CurrentEntry) {
+ CurrentEntry->skip();
+ if (Type == MT_Inline) {
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ return;
+ }
+ }
+ Token T = peekNext();
+ if (T.Kind == Token::TK_Key || T.Kind == Token::TK_Scalar) {
+ // KeyValueNode eats the TK_Key. That way it can detect null keys.
+ CurrentEntry = new (getAllocator()) KeyValueNode(Doc);
+ } else if (Type == MT_Block) {
+ switch (T.Kind) {
+ case Token::TK_BlockEnd:
+ getNext();
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ default:
+ setError("Unexpected token. Expected Key or Block End", T);
+ case Token::TK_Error:
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ } else {
+ switch (T.Kind) {
+ case Token::TK_FlowEntry:
+ // Eat the flow entry and recurse.
+ getNext();
+ return increment();
+ case Token::TK_FlowMappingEnd:
+ getNext();
+ case Token::TK_Error:
+ // Set this to end iterator.
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ default:
+ setError( "Unexpected token. Expected Key, Flow Entry, or Flow "
+ "Mapping End."
+ , T);
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ }
+}
+
+void SequenceNode::increment() {
+ if (failed()) {
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ return;
+ }
+ if (CurrentEntry)
+ CurrentEntry->skip();
+ Token T = peekNext();
+ if (SeqType == ST_Block) {
+ switch (T.Kind) {
+ case Token::TK_BlockEntry:
+ getNext();
+ CurrentEntry = parseBlockNode();
+ if (CurrentEntry == 0) { // An error occurred.
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ break;
+ case Token::TK_BlockEnd:
+ getNext();
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ default:
+ setError( "Unexpected token. Expected Block Entry or Block End."
+ , T);
+ case Token::TK_Error:
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ } else if (SeqType == ST_Indentless) {
+ switch (T.Kind) {
+ case Token::TK_BlockEntry:
+ getNext();
+ CurrentEntry = parseBlockNode();
+ if (CurrentEntry == 0) { // An error occurred.
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ break;
+ default:
+ case Token::TK_Error:
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ }
+ } else if (SeqType == ST_Flow) {
+ switch (T.Kind) {
+ case Token::TK_FlowEntry:
+ // Eat the flow entry and recurse.
+ getNext();
+ WasPreviousTokenFlowEntry = true;
+ return increment();
+ case Token::TK_FlowSequenceEnd:
+ getNext();
+ case Token::TK_Error:
+ // Set this to end iterator.
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ case Token::TK_StreamEnd:
+ case Token::TK_DocumentEnd:
+ case Token::TK_DocumentStart:
+ setError("Could not find closing ]!", T);
+ // Set this to end iterator.
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ default:
+ if (!WasPreviousTokenFlowEntry) {
+ setError("Expected , between entries!", T);
+ IsAtEnd = true;
+ CurrentEntry = 0;
+ break;
+ }
+ // Otherwise it must be a flow entry.
+ CurrentEntry = parseBlockNode();
+ if (!CurrentEntry) {
+ IsAtEnd = true;
+ }
+ WasPreviousTokenFlowEntry = false;
+ break;
+ }
+ }
+}
+
+Document::Document(Stream &S) : stream(S), Root(0) {
+ if (parseDirectives())
+ expectToken(Token::TK_DocumentStart);
+ Token &T = peekNext();
+ if (T.Kind == Token::TK_DocumentStart)
+ getNext();
+}
+
+bool Document::skip() {
+ if (stream.scanner->failed())
+ return false;
+ if (!Root)
+ getRoot();
+ Root->skip();
+ Token &T = peekNext();
+ if (T.Kind == Token::TK_StreamEnd)
+ return false;
+ if (T.Kind == Token::TK_DocumentEnd) {
+ getNext();
+ return skip();
+ }
+ return true;
+}
+
+Token &Document::peekNext() {
+ return stream.scanner->peekNext();
+}
+
+Token Document::getNext() {
+ return stream.scanner->getNext();
+}
+
+void Document::setError(const Twine &Message, Token &Location) const {
+ stream.scanner->setError(Message, Location.Range.begin());
+}
+
+bool Document::failed() const {
+ return stream.scanner->failed();
+}
+
+Node *Document::parseBlockNode() {
+ Token T = peekNext();
+ // Handle properties.
+ Token AnchorInfo;
+parse_property:
+ switch (T.Kind) {
+ case Token::TK_Alias:
+ getNext();
+ return new (NodeAllocator) AliasNode(stream.CurrentDoc, T.Range.substr(1));
+ case Token::TK_Anchor:
+ if (AnchorInfo.Kind == Token::TK_Anchor) {
+ setError("Already encountered an anchor for this node!", T);
+ return 0;
+ }
+ AnchorInfo = getNext(); // Consume TK_Anchor.
+ T = peekNext();
+ goto parse_property;
+ case Token::TK_Tag:
+ getNext(); // Skip TK_Tag.
+ T = peekNext();
+ goto parse_property;
+ default:
+ break;
+ }
+
+ switch (T.Kind) {
+ case Token::TK_BlockEntry:
+ // We got an unindented BlockEntry sequence. This is not terminated with
+ // a BlockEnd.
+ // Don't eat the TK_BlockEntry, SequenceNode needs it.
+ return new (NodeAllocator) SequenceNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , SequenceNode::ST_Indentless);
+ case Token::TK_BlockSequenceStart:
+ getNext();
+ return new (NodeAllocator)
+ SequenceNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , SequenceNode::ST_Block);
+ case Token::TK_BlockMappingStart:
+ getNext();
+ return new (NodeAllocator)
+ MappingNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , MappingNode::MT_Block);
+ case Token::TK_FlowSequenceStart:
+ getNext();
+ return new (NodeAllocator)
+ SequenceNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , SequenceNode::ST_Flow);
+ case Token::TK_FlowMappingStart:
+ getNext();
+ return new (NodeAllocator)
+ MappingNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , MappingNode::MT_Flow);
+ case Token::TK_Scalar:
+ getNext();
+ return new (NodeAllocator)
+ ScalarNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , T.Range);
+ case Token::TK_Key:
+ // Don't eat the TK_Key, KeyValueNode expects it.
+ return new (NodeAllocator)
+ MappingNode( stream.CurrentDoc
+ , AnchorInfo.Range.substr(1)
+ , MappingNode::MT_Inline);
+ case Token::TK_DocumentStart:
+ case Token::TK_DocumentEnd:
+ case Token::TK_StreamEnd:
+ default:
+ // TODO: Properly handle tags. "[!!str ]" should resolve to !!str "", not
+ // !!null null.
+ return new (NodeAllocator) NullNode(stream.CurrentDoc);
+ case Token::TK_Error:
+ return 0;
+ }
+ llvm_unreachable("Control flow shouldn't reach here.");
+ return 0;
+}
+
+bool Document::parseDirectives() {
+ bool isDirective = false;
+ while (true) {
+ Token T = peekNext();
+ if (T.Kind == Token::TK_TagDirective) {
+ handleTagDirective(getNext());
+ isDirective = true;
+ } else if (T.Kind == Token::TK_VersionDirective) {
+ stream.handleYAMLDirective(getNext());
+ isDirective = true;
+ } else
+ break;
+ }
+ return isDirective;
+}
+
+bool Document::expectToken(int TK) {
+ Token T = getNext();
+ if (T.Kind != TK) {
+ setError("Unexpected token", T);
+ return false;
+ }
+ return true;
+}
+
+OwningPtr<Document> document_iterator::NullDoc;
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index 4927e9a..72d3986 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -20,6 +20,7 @@
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/system_error.h"
#include "llvm/ADT/STLExtras.h"
#include <cctype>
#include <cerrno>
diff --git a/contrib/llvm/lib/TableGen/Error.cpp b/contrib/llvm/lib/TableGen/Error.cpp
index 5b2cbbf..5071ee7 100644
--- a/contrib/llvm/lib/TableGen/Error.cpp
+++ b/contrib/llvm/lib/TableGen/Error.cpp
@@ -21,11 +21,11 @@ namespace llvm {
SourceMgr SrcMgr;
void PrintError(SMLoc ErrorLoc, const Twine &Msg) {
- SrcMgr.PrintMessage(ErrorLoc, Msg, "error");
+ SrcMgr.PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
}
void PrintError(const char *Loc, const Twine &Msg) {
- SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), Msg, "error");
+ SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Error, Msg);
}
void PrintError(const Twine &Msg) {
diff --git a/contrib/llvm/lib/TableGen/Record.cpp b/contrib/llvm/lib/TableGen/Record.cpp
index b7c51ca..93eed24 100644
--- a/contrib/llvm/lib/TableGen/Record.cpp
+++ b/contrib/llvm/lib/TableGen/Record.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/Format.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -29,6 +30,8 @@ using namespace llvm;
// std::string wrapper for DenseMap purposes
//===----------------------------------------------------------------------===//
+namespace llvm {
+
/// TableGenStringKey - This is a wrapper for std::string suitable for
/// using as a key to a DenseMap. Because there isn't a particularly
/// good way to indicate tombstone or empty keys for strings, we want
@@ -43,14 +46,16 @@ public:
TableGenStringKey(const char *str) : data(str) {}
const std::string &str() const { return data; }
-
+
+ friend hash_code hash_value(const TableGenStringKey &Value) {
+ using llvm::hash_value;
+ return hash_value(Value.str());
+ }
private:
std::string data;
};
/// Specialize DenseMapInfo for TableGenStringKey.
-namespace llvm {
-
template<> struct DenseMapInfo<TableGenStringKey> {
static inline TableGenStringKey getEmptyKey() {
TableGenStringKey Empty("<<<EMPTY KEY>>>");
@@ -61,7 +66,8 @@ template<> struct DenseMapInfo<TableGenStringKey> {
return Tombstone;
}
static unsigned getHashValue(const TableGenStringKey& Val) {
- return HashString(Val.str());
+ using llvm::hash_value;
+ return hash_value(Val);
}
static bool isEqual(const TableGenStringKey& LHS,
const TableGenStringKey& RHS) {
@@ -69,7 +75,7 @@ template<> struct DenseMapInfo<TableGenStringKey> {
}
};
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// Type implementations
@@ -78,9 +84,9 @@ template<> struct DenseMapInfo<TableGenStringKey> {
BitRecTy BitRecTy::Shared;
IntRecTy IntRecTy::Shared;
StringRecTy StringRecTy::Shared;
-CodeRecTy CodeRecTy::Shared;
DagRecTy DagRecTy::Shared;
+void RecTy::anchor() { }
void RecTy::dump() const { print(errs()); }
ListRecTy *RecTy::getListTy() {
@@ -315,12 +321,6 @@ Init *ListRecTy::convertValue(TypedInit *TI) {
return 0;
}
-Init *CodeRecTy::convertValue(TypedInit *TI) {
- if (TI->getType()->typeIsConvertibleTo(this))
- return TI;
- return 0;
-}
-
Init *DagRecTy::convertValue(TypedInit *TI) {
if (TI->getType()->typeIsConvertibleTo(this))
return TI;
@@ -444,13 +444,18 @@ RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) {
// Initializer implementations
//===----------------------------------------------------------------------===//
+void Init::anchor() { }
void Init::dump() const { return print(errs()); }
+void UnsetInit::anchor() { }
+
UnsetInit *UnsetInit::get() {
static UnsetInit TheInit;
return &TheInit;
}
+void BitInit::anchor() { }
+
BitInit *BitInit::get(bool V) {
static BitInit True(true);
static BitInit False(false);
@@ -565,7 +570,9 @@ IntInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
return BitsInit::get(NewBits);
}
-StringInit *StringInit::get(const std::string &V) {
+void StringInit::anchor() { }
+
+StringInit *StringInit::get(StringRef V) {
typedef StringMap<StringInit *> Pool;
static Pool ThePool;
@@ -574,15 +581,6 @@ StringInit *StringInit::get(const std::string &V) {
return I;
}
-CodeInit *CodeInit::get(const std::string &V) {
- typedef StringMap<CodeInit *> Pool;
- static Pool ThePool;
-
- CodeInit *&I = ThePool[V];
- if (!I) I = new CodeInit(V);
- return I;
-}
-
static void ProfileListInit(FoldingSetNodeID &ID,
ArrayRef<Init *> Range,
RecTy *EltTy) {
@@ -735,7 +733,6 @@ UnOpInit *UnOpInit::get(UnaryOp opc, Init *lhs, RecTy *Type) {
Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
- default: assert(0 && "Unknown unop");
case CAST: {
if (getType()->getAsString() == "string") {
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
@@ -747,6 +744,11 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
if (LHSd) {
return StringInit::get(LHSd->getDef()->getName());
}
+
+ IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
+ if (LHSi) {
+ return StringInit::get(LHSi->getAsString());
+ }
} else {
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
if (LHSs) {
@@ -760,7 +762,9 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
return VarInit::get(Name, RV->getType());
}
- std::string TemplateArgName = CurRec->getName()+":"+Name;
+ Init *TemplateArgName = QualifyName(*CurRec, CurMultiClass, Name,
+ ":");
+
if (CurRec->isTemplateArg(TemplateArgName)) {
const RecordVal *RV = CurRec->getValue(TemplateArgName);
assert(RV && "Template arg doesn't exist??");
@@ -773,7 +777,8 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
}
if (CurMultiClass) {
- std::string MCName = CurMultiClass->Rec.getName()+"::"+Name;
+ Init *MCName = QualifyName(CurMultiClass->Rec, CurMultiClass, Name, "::");
+
if (CurMultiClass->Rec.isTemplateArg(MCName)) {
const RecordVal *RV = CurMultiClass->Rec.getValue(MCName);
assert(RV && "Template arg doesn't exist??");
@@ -885,7 +890,6 @@ BinOpInit *BinOpInit::get(BinaryOp opc, Init *lhs,
Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
- default: assert(0 && "Unknown binop");
case CONCAT: {
DagInit *LHSs = dynamic_cast<DagInit*>(LHS);
DagInit *RHSs = dynamic_cast<DagInit*>(RHS);
@@ -944,7 +948,7 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue();
int64_t Result;
switch (getOpcode()) {
- default: assert(0 && "Bad opcode!");
+ default: llvm_unreachable("Bad opcode!");
case SHL: Result = LHSv << RHSv; break;
case SRA: Result = LHSv >> RHSv; break;
case SRL: Result = (uint64_t)LHSv >> (uint64_t)RHSv; break;
@@ -1134,7 +1138,6 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type,
Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const {
switch (getOpcode()) {
- default: assert(0 && "Unknown binop");
case SUBST: {
DefInit *LHSd = dynamic_cast<DefInit*>(LHS);
VarInit *LHSv = dynamic_cast<VarInit*>(LHS);
@@ -1298,7 +1301,12 @@ TypedInit::convertInitListSlice(const std::vector<unsigned> &Elements) const {
VarInit *VarInit::get(const std::string &VN, RecTy *T) {
- typedef std::pair<RecTy *, TableGenStringKey> Key;
+ Init *Value = StringInit::get(VN);
+ return VarInit::get(Value, T);
+}
+
+VarInit *VarInit::get(Init *VN, RecTy *T) {
+ typedef std::pair<RecTy *, Init *> Key;
typedef DenseMap<Key, VarInit *> Pool;
static Pool ThePool;
@@ -1309,12 +1317,19 @@ VarInit *VarInit::get(const std::string &VN, RecTy *T) {
return I;
}
+const std::string &VarInit::getName() const {
+ StringInit *NameString =
+ dynamic_cast<StringInit *>(getNameInit());
+ assert(NameString && "VarInit name is not a string!");
+ return NameString->getValue();
+}
+
Init *VarInit::resolveBitReference(Record &R, const RecordVal *IRV,
unsigned Bit) const {
- if (R.isTemplateArg(getName())) return 0;
- if (IRV && IRV->getName() != getName()) return 0;
+ if (R.isTemplateArg(getNameInit())) return 0;
+ if (IRV && IRV->getNameInit() != getNameInit()) return 0;
- RecordVal *RV = R.getValue(getName());
+ RecordVal *RV = R.getValue(getNameInit());
assert(RV && "Reference to a non-existent variable?");
assert(dynamic_cast<BitsInit*>(RV->getValue()));
BitsInit *BI = (BitsInit*)RV->getValue();
@@ -1333,10 +1348,10 @@ Init *VarInit::resolveBitReference(Record &R, const RecordVal *IRV,
Init *VarInit::resolveListElementReference(Record &R,
const RecordVal *IRV,
unsigned Elt) const {
- if (R.isTemplateArg(getName())) return 0;
- if (IRV && IRV->getName() != getName()) return 0;
+ if (R.isTemplateArg(getNameInit())) return 0;
+ if (IRV && IRV->getNameInit() != getNameInit()) return 0;
- RecordVal *RV = R.getValue(getName());
+ RecordVal *RV = R.getValue(getNameInit());
assert(RV && "Reference to a non-existent variable?");
ListInit *LI = dynamic_cast<ListInit*>(RV->getValue());
if (!LI) {
@@ -1659,7 +1674,7 @@ void RecordVal::dump() const { errs() << *this; }
void RecordVal::print(raw_ostream &OS, bool PrintSem) const {
if (getPrefix()) OS << "field ";
- OS << *getType() << " " << getName();
+ OS << *getType() << " " << getNameInitAsString();
if (getValue())
OS << " = " << *getValue();
@@ -1669,13 +1684,22 @@ void RecordVal::print(raw_ostream &OS, bool PrintSem) const {
unsigned Record::LastID = 0;
+void Record::init() {
+ checkName();
+
+ // Every record potentially has a def at the top. This value is
+ // replaced with the top-level def name at instantiation time.
+ RecordVal DN("NAME", StringRecTy::get(), 0);
+ addValue(DN);
+}
+
void Record::checkName() {
// Ensure the record name has string type.
const TypedInit *TypedName = dynamic_cast<const TypedInit *>(Name);
assert(TypedName && "Record name is not typed!");
RecTy *Type = TypedName->getType();
if (dynamic_cast<StringRecTy *>(Type) == 0) {
- llvm_unreachable("Record name is not a string!");
+ throw "Record name is not a string!";
}
}
@@ -1695,20 +1719,13 @@ const std::string &Record::getName() const {
void Record::setName(Init *NewName) {
if (TrackedRecords.getDef(Name->getAsUnquotedString()) == this) {
TrackedRecords.removeDef(Name->getAsUnquotedString());
- Name = NewName;
TrackedRecords.addDef(this);
- } else {
+ } else if (TrackedRecords.getClass(Name->getAsUnquotedString()) == this) {
TrackedRecords.removeClass(Name->getAsUnquotedString());
- Name = NewName;
TrackedRecords.addClass(this);
- }
+ } // Otherwise this isn't yet registered.
+ Name = NewName;
checkName();
- // Since the Init for the name was changed, see if we can resolve
- // any of it using members of the Record.
- Init *ComputedName = Name->resolveReferences(*this, 0);
- if (ComputedName != Name) {
- setName(ComputedName);
- }
// DO NOT resolve record values to the name at this point because
// there might be default values for arguments of this def. Those
// arguments might not have been resolved yet so we don't want to
@@ -1731,17 +1748,25 @@ void Record::setName(const std::string &Name) {
/// references.
void Record::resolveReferencesTo(const RecordVal *RV) {
for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ if (RV == &Values[i]) // Skip resolve the same field as the given one
+ continue;
if (Init *V = Values[i].getValue())
Values[i].setValue(V->resolveReferences(*this, RV));
}
+ Init *OldName = getNameInit();
+ Init *NewName = Name->resolveReferences(*this, RV);
+ if (NewName != OldName) {
+ // Re-register with RecordKeeper.
+ setName(NewName);
+ }
}
void Record::dump() const { errs() << *this; }
raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
- OS << R.getName();
+ OS << R.getNameInitAsString();
- const std::vector<std::string> &TArgs = R.getTemplateArgs();
+ const std::vector<Init *> &TArgs = R.getTemplateArgs();
if (!TArgs.empty()) {
OS << "<";
for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
@@ -1758,7 +1783,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
if (!SC.empty()) {
OS << "\t//";
for (unsigned i = 0, e = SC.size(); i != e; ++i)
- OS << " " << SC[i]->getName();
+ OS << " " << SC[i]->getNameInitAsString();
}
OS << "\n";
@@ -1954,18 +1979,6 @@ DagInit *Record::getValueAsDag(StringRef FieldName) const {
"' does not have a dag initializer!";
}
-std::string Record::getValueAsCode(StringRef FieldName) const {
- const RecordVal *R = getValue(FieldName);
- if (R == 0 || R->getValue() == 0)
- throw "Record `" + getName() + "' does not have a field named `" +
- FieldName.str() + "'!\n";
-
- if (CodeInit *CI = dynamic_cast<CodeInit*>(R->getValue()))
- return CI->getValue();
- throw "Record `" + getName() + "', field `" + FieldName.str() +
- "' does not have a code initializer!";
-}
-
void MultiClass::dump() const {
errs() << "Record:\n";
@@ -2017,3 +2030,39 @@ RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const {
return Defs;
}
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ Init *Name, const std::string &Scoper) {
+ RecTy *Type = dynamic_cast<TypedInit *>(Name)->getType();
+
+ BinOpInit *NewName =
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ CurRec.getNameInit(),
+ StringInit::get(Scoper),
+ Type)->Fold(&CurRec, CurMultiClass),
+ Name,
+ Type);
+
+ if (CurMultiClass && Scoper != "::") {
+ NewName =
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ CurMultiClass->Rec.getNameInit(),
+ StringInit::get("::"),
+ Type)->Fold(&CurRec, CurMultiClass),
+ NewName->Fold(&CurRec, CurMultiClass),
+ Type);
+ }
+
+ return NewName->Fold(&CurRec, CurMultiClass);
+}
+
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ const std::string &Name,
+ const std::string &Scoper) {
+ return QualifyName(CurRec, CurMultiClass, StringInit::get(Name), Scoper);
+}
diff --git a/contrib/llvm/lib/TableGen/TGLexer.cpp b/contrib/llvm/lib/TableGen/TGLexer.cpp
index 8c1b429..ff322e7 100644
--- a/contrib/llvm/lib/TableGen/TGLexer.cpp
+++ b/contrib/llvm/lib/TableGen/TGLexer.cpp
@@ -15,7 +15,6 @@
#include "llvm/TableGen/Error.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Config/config.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include <cctype>
@@ -23,6 +22,9 @@
#include <cstdlib>
#include <cstring>
#include <cerrno>
+
+#include "llvm/Config/config.h" // for strtoull()/strtoll() define
+
using namespace llvm;
TGLexer::TGLexer(SourceMgr &SM) : SrcMgr(SM) {
@@ -80,6 +82,10 @@ int TGLexer::getNextChar() {
}
}
+int TGLexer::peekNextChar(int Index) {
+ return *(CurPtr + Index);
+}
+
tgtok::TokKind TGLexer::LexToken() {
TokStart = CurPtr;
// This always consumes at least one character.
@@ -87,10 +93,10 @@ tgtok::TokKind TGLexer::LexToken() {
switch (CurChar) {
default:
- // Handle letters: [a-zA-Z_#]
- if (isalpha(CurChar) || CurChar == '_' || CurChar == '#')
+ // Handle letters: [a-zA-Z_]
+ if (isalpha(CurChar) || CurChar == '_')
return LexIdentifier();
-
+
// Unknown character, emit an error.
return ReturnError(TokStart, "Unexpected character");
case EOF: return tgtok::Eof;
@@ -107,6 +113,7 @@ tgtok::TokKind TGLexer::LexToken() {
case ')': return tgtok::r_paren;
case '=': return tgtok::equal;
case '?': return tgtok::question;
+ case '#': return tgtok::paste;
case 0:
case ' ':
@@ -128,8 +135,44 @@ tgtok::TokKind TGLexer::LexToken() {
return LexToken();
case '-': case '+':
case '0': case '1': case '2': case '3': case '4': case '5': case '6':
- case '7': case '8': case '9':
+ case '7': case '8': case '9': {
+ int NextChar = 0;
+ if (isdigit(CurChar)) {
+ // Allow identifiers to start with a number if it is followed by
+ // an identifier. This can happen with paste operations like
+ // foo#8i.
+ int i = 0;
+ do {
+ NextChar = peekNextChar(i++);
+ } while (isdigit(NextChar));
+
+ if (NextChar == 'x' || NextChar == 'b') {
+ // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most
+ // likely a number.
+ int NextNextChar = peekNextChar(i);
+ switch (NextNextChar) {
+ default:
+ break;
+ case '0': case '1':
+ if (NextChar == 'b')
+ return LexNumber();
+ // Fallthrough
+ case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ if (NextChar == 'x')
+ return LexNumber();
+ break;
+ }
+ }
+ }
+
+ if (isalpha(NextChar) || NextChar == '_')
+ return LexIdentifier();
+
return LexNumber();
+ }
case '"': return LexString();
case '$': return LexVarName();
case '[': return LexBracket();
@@ -210,8 +253,7 @@ tgtok::TokKind TGLexer::LexIdentifier() {
const char *IdentStart = TokStart;
// Match the rest of the identifier regex: [0-9a-zA-Z_#]*
- while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_' ||
- *CurPtr == '#')
+ while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_')
++CurPtr;
// Check to see if this identifier is a keyword.
@@ -232,6 +274,7 @@ tgtok::TokKind TGLexer::LexIdentifier() {
.Case("dag", tgtok::Dag)
.Case("class", tgtok::Class)
.Case("def", tgtok::Def)
+ .Case("foreach", tgtok::Foreach)
.Case("defm", tgtok::Defm)
.Case("multiclass", tgtok::MultiClass)
.Case("field", tgtok::Field)
diff --git a/contrib/llvm/lib/TableGen/TGLexer.h b/contrib/llvm/lib/TableGen/TGLexer.h
index 84d328b..8a850b5 100644
--- a/contrib/llvm/lib/TableGen/TGLexer.h
+++ b/contrib/llvm/lib/TableGen/TGLexer.h
@@ -39,9 +39,10 @@ namespace tgtok {
colon, semi, // : ;
comma, period, // , .
equal, question, // = ?
-
+ paste, // #
+
// Keywords.
- Bit, Bits, Class, Code, Dag, Def, Defm, Field, In, Int, Let, List,
+ Bit, Bits, Class, Code, Dag, Def, Foreach, Defm, Field, In, Int, Let, List,
MultiClass, String,
// !keywords.
@@ -109,6 +110,7 @@ private:
tgtok::TokKind ReturnError(const char *Loc, const Twine &Msg);
int getNextChar();
+ int peekNextChar(int Index);
void SkipBCPLComment();
bool SkipCComment();
tgtok::TokKind LexIdentifier();
diff --git a/contrib/llvm/lib/TableGen/TGParser.cpp b/contrib/llvm/lib/TableGen/TGParser.cpp
index e7f00ba..04c4fc1 100644
--- a/contrib/llvm/lib/TableGen/TGParser.cpp
+++ b/contrib/llvm/lib/TableGen/TGParser.cpp
@@ -64,7 +64,7 @@ bool TGParser::AddValue(Record *CurRec, SMLoc Loc, const RecordVal &RV) {
if (CurRec == 0)
CurRec = &CurMultiClass->Rec;
- if (RecordVal *ERV = CurRec->getValue(RV.getName())) {
+ if (RecordVal *ERV = CurRec->getValue(RV.getNameInit())) {
// The value already exists in the class, treat this as a set.
if (ERV->setValue(RV.getValue()))
return Error(Loc, "New definition of '" + RV.getName() + "' of type '" +
@@ -79,7 +79,7 @@ bool TGParser::AddValue(Record *CurRec, SMLoc Loc, const RecordVal &RV) {
/// SetValue -
/// Return true on error, false on success.
-bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const std::string &ValName,
+bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName,
const std::vector<unsigned> &BitList, Init *V) {
if (!V) return false;
@@ -87,13 +87,14 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const std::string &ValName,
RecordVal *RV = CurRec->getValue(ValName);
if (RV == 0)
- return Error(Loc, "Value '" + ValName + "' unknown!");
+ return Error(Loc, "Value '" + ValName->getAsUnquotedString()
+ + "' unknown!");
// Do not allow assignments like 'X = X'. This will just cause infinite loops
// in the resolution machinery.
if (BitList.empty())
if (VarInit *VI = dynamic_cast<VarInit*>(V))
- if (VI->getName() == ValName)
+ if (VI->getNameInit() == ValName)
return false;
// If we are assigning to a subset of the bits in the value... then we must be
@@ -103,7 +104,8 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const std::string &ValName,
if (!BitList.empty()) {
BitsInit *CurVal = dynamic_cast<BitsInit*>(RV->getValue());
if (CurVal == 0)
- return Error(Loc, "Value '" + ValName + "' is not a bits type");
+ return Error(Loc, "Value '" + ValName->getAsUnquotedString()
+ + "' is not a bits type");
// Convert the incoming value to a bits type of the appropriate size...
Init *BI = V->convertInitializerTo(BitsRecTy::get(BitList.size()));
@@ -123,7 +125,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const std::string &ValName,
unsigned Bit = BitList[i];
if (NewBits[Bit])
return Error(Loc, "Cannot set bit #" + utostr(Bit) + " of value '" +
- ValName + "' more than once");
+ ValName->getAsUnquotedString() + "' more than once");
NewBits[Bit] = BInit->getBit(i);
}
@@ -135,9 +137,10 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, const std::string &ValName,
}
if (RV->setValue(V))
- return Error(Loc, "Value '" + ValName + "' of type '" +
- RV->getType()->getAsString() +
- "' is incompatible with initializer '" + V->getAsString() +"'");
+ return Error(Loc, "Value '" + ValName->getAsUnquotedString() + "' of type '"
+ + RV->getType()->getAsString() +
+ "' is incompatible with initializer '" + V->getAsString()
+ + "'");
return false;
}
@@ -151,7 +154,7 @@ bool TGParser::AddSubClass(Record *CurRec, SubClassReference &SubClass) {
if (AddValue(CurRec, SubClass.RefLoc, Vals[i]))
return true;
- const std::vector<std::string> &TArgs = SC->getTemplateArgs();
+ const std::vector<Init *> &TArgs = SC->getTemplateArgs();
// Ensure that an appropriate number of template arguments are specified.
if (TArgs.size() < SubClass.TemplateArgs.size())
@@ -174,8 +177,8 @@ bool TGParser::AddSubClass(Record *CurRec, SubClassReference &SubClass) {
} else if (!CurRec->getValue(TArgs[i])->getValue()->isComplete()) {
return Error(SubClass.RefLoc,"Value not specified for template argument #"
- + utostr(i) + " (" + TArgs[i] + ") of subclass '" +
- SC->getName() + "'!");
+ + utostr(i) + " (" + TArgs[i]->getAsUnquotedString()
+ + ") of subclass '" + SC->getNameInitAsString() + "'!");
}
}
@@ -230,7 +233,7 @@ bool TGParser::AddSubMultiClass(MultiClass *CurMC,
CurMC->DefPrototypes.push_back(NewDef);
}
- const std::vector<std::string> &SMCTArgs = SMC->Rec.getTemplateArgs();
+ const std::vector<Init *> &SMCTArgs = SMC->Rec.getTemplateArgs();
// Ensure that an appropriate number of template arguments are
// specified.
@@ -278,14 +281,121 @@ bool TGParser::AddSubMultiClass(MultiClass *CurMC,
} else if (!CurRec->getValue(SMCTArgs[i])->getValue()->isComplete()) {
return Error(SubMultiClass.RefLoc,
"Value not specified for template argument #"
- + utostr(i) + " (" + SMCTArgs[i] + ") of subclass '" +
- SMC->Rec.getName() + "'!");
+ + utostr(i) + " (" + SMCTArgs[i]->getAsUnquotedString()
+ + ") of subclass '" + SMC->Rec.getNameInitAsString() + "'!");
}
}
return false;
}
+/// ProcessForeachDefs - Given a record, apply all of the variable
+/// values in all surrounding foreach loops, creating new records for
+/// each combination of values.
+bool TGParser::ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
+ SMLoc Loc) {
+ // We want to instantiate a new copy of CurRec for each combination
+ // of nested loop iterator values. We don't want top instantiate
+ // any copies until we have values for each loop iterator.
+ IterSet IterVals;
+ for (LoopVector::iterator Loop = Loops.begin(), LoopEnd = Loops.end();
+ Loop != LoopEnd;
+ ++Loop) {
+ // Process this loop.
+ if (ProcessForeachDefs(CurRec, CurMultiClass, Loc,
+ IterVals, *Loop, Loop+1)) {
+ Error(Loc,
+ "Could not process loops for def " + CurRec->getNameInitAsString());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// ProcessForeachDefs - Given a record, a loop and a loop iterator,
+/// apply each of the variable values in this loop and then process
+/// subloops.
+bool TGParser::ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
+ SMLoc Loc, IterSet &IterVals,
+ ForeachLoop &CurLoop,
+ LoopVector::iterator NextLoop) {
+ Init *IterVar = CurLoop.IterVar;
+ ListInit *List = dynamic_cast<ListInit *>(CurLoop.ListValue);
+
+ if (List == 0) {
+ Error(Loc, "Loop list is not a list");
+ return true;
+ }
+
+ // Process each value.
+ for (int64_t i = 0; i < List->getSize(); ++i) {
+ Init *ItemVal = List->resolveListElementReference(*CurRec, 0, i);
+ IterVals.push_back(IterRecord(IterVar, ItemVal));
+
+ if (IterVals.size() == Loops.size()) {
+ // Ok, we have all of the iterator values for this point in the
+ // iteration space. Instantiate a new record to reflect this
+ // combination of values.
+ Record *IterRec = new Record(*CurRec);
+
+ // Set the iterator values now.
+ for (IterSet::iterator i = IterVals.begin(), iend = IterVals.end();
+ i != iend;
+ ++i) {
+ VarInit *IterVar = dynamic_cast<VarInit *>(i->IterVar);
+ if (IterVar == 0) {
+ Error(Loc, "foreach iterator is unresolved");
+ return true;
+ }
+
+ TypedInit *IVal = dynamic_cast<TypedInit *>(i->IterValue);
+ if (IVal == 0) {
+ Error(Loc, "foreach iterator value is untyped");
+ return true;
+ }
+
+ IterRec->addValue(RecordVal(IterVar->getName(), IVal->getType(), false));
+
+ if (SetValue(IterRec, Loc, IterVar->getName(),
+ std::vector<unsigned>(), IVal)) {
+ Error(Loc, "when instantiating this def");
+ return true;
+ }
+
+ // Resolve it next.
+ IterRec->resolveReferencesTo(IterRec->getValue(IterVar->getName()));
+
+ // Remove it.
+ IterRec->removeValue(IterVar->getName());
+ }
+
+ if (Records.getDef(IterRec->getNameInitAsString())) {
+ Error(Loc, "def already exists: " + IterRec->getNameInitAsString());
+ return true;
+ }
+
+ Records.addDef(IterRec);
+ IterRec->resolveReferences();
+ }
+
+ if (NextLoop != Loops.end()) {
+ // Process nested loops.
+ if (ProcessForeachDefs(CurRec, CurMultiClass, Loc, IterVals, *NextLoop,
+ NextLoop+1)) {
+ Error(Loc,
+ "Could not process loops for def " +
+ CurRec->getNameInitAsString());
+ return true;
+ }
+ }
+
+ // We're done with this iterator.
+ IterVals.pop_back();
+ }
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Parser Code
//===----------------------------------------------------------------------===//
@@ -293,7 +403,8 @@ bool TGParser::AddSubMultiClass(MultiClass *CurMC,
/// isObjectStart - Return true if this is a valid first token for an Object.
static bool isObjectStart(tgtok::TokKind K) {
return K == tgtok::Class || K == tgtok::Def ||
- K == tgtok::Defm || K == tgtok::Let || K == tgtok::MultiClass;
+ K == tgtok::Defm || K == tgtok::Let ||
+ K == tgtok::MultiClass || K == tgtok::Foreach;
}
static std::string GetNewAnonymousName() {
@@ -303,18 +414,39 @@ static std::string GetNewAnonymousName() {
/// ParseObjectName - If an object name is specified, return it. Otherwise,
/// return an anonymous name.
-/// ObjectName ::= ID
+/// ObjectName ::= Value [ '#' Value ]*
/// ObjectName ::= /*empty*/
///
-std::string TGParser::ParseObjectName() {
- if (Lex.getCode() != tgtok::Id)
- return GetNewAnonymousName();
+Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) {
+ switch (Lex.getCode()) {
+ case tgtok::colon:
+ case tgtok::semi:
+ case tgtok::l_brace:
+ // These are all of the tokens that can begin an object body.
+ // Some of these can also begin values but we disallow those cases
+ // because they are unlikely to be useful.
+ return StringInit::get(GetNewAnonymousName());
+ default:
+ break;
+ }
- std::string Ret = Lex.getCurStrVal();
- Lex.Lex();
- return Ret;
-}
+ Record *CurRec = 0;
+ if (CurMultiClass)
+ CurRec = &CurMultiClass->Rec;
+
+ RecTy *Type = 0;
+ if (CurRec) {
+ const TypedInit *CurRecName =
+ dynamic_cast<const TypedInit *>(CurRec->getNameInit());
+ if (!CurRecName) {
+ TokError("Record name is not typed!");
+ return 0;
+ }
+ Type = CurRecName->getType();
+ }
+ return ParseValue(CurRec, Type, ParseNameMode);
+}
/// ParseClassID - Parse and resolve a reference to a class name. This returns
/// null on error.
@@ -570,11 +702,11 @@ bool TGParser::ParseOptionalBitList(std::vector<unsigned> &Ranges) {
/// ParseType - Parse and return a tblgen type. This returns null on error.
///
/// Type ::= STRING // string type
+/// Type ::= CODE // code type
/// Type ::= BIT // bit type
/// Type ::= BITS '<' INTVAL '>' // bits<x> type
/// Type ::= INT // int type
/// Type ::= LIST '<' Type '>' // list<x> type
-/// Type ::= CODE // code type
/// Type ::= DAG // dag type
/// Type ::= ClassID // Record Type
///
@@ -582,9 +714,9 @@ RecTy *TGParser::ParseType() {
switch (Lex.getCode()) {
default: TokError("Unknown token when expecting a type"); return 0;
case tgtok::String: Lex.Lex(); return StringRecTy::get();
+ case tgtok::Code: Lex.Lex(); return StringRecTy::get();
case tgtok::Bit: Lex.Lex(); return BitRecTy::get();
case tgtok::Int: Lex.Lex(); return IntRecTy::get();
- case tgtok::Code: Lex.Lex(); return CodeRecTy::get();
case tgtok::Dag: Lex.Lex(); return DagRecTy::get();
case tgtok::Id:
if (Record *R = ParseClassID()) return RecordRecTy::get(R);
@@ -633,7 +765,7 @@ RecTy *TGParser::ParseType() {
/// IDValue ::= ID [multiclass template argument]
/// IDValue ::= ID [def name]
///
-Init *TGParser::ParseIDValue(Record *CurRec) {
+Init *TGParser::ParseIDValue(Record *CurRec, IDParseMode Mode) {
assert(Lex.getCode() == tgtok::Id && "Expected ID in ParseIDValue");
std::string Name = Lex.getCurStrVal();
SMLoc Loc = Lex.getLoc();
@@ -644,14 +776,17 @@ Init *TGParser::ParseIDValue(Record *CurRec) {
/// ParseIDValue - This is just like ParseIDValue above, but it assumes the ID
/// has already been read.
Init *TGParser::ParseIDValue(Record *CurRec,
- const std::string &Name, SMLoc NameLoc) {
+ const std::string &Name, SMLoc NameLoc,
+ IDParseMode Mode) {
if (CurRec) {
if (const RecordVal *RV = CurRec->getValue(Name))
return VarInit::get(Name, RV->getType());
- std::string TemplateArgName = CurRec->getName()+":"+Name;
+ Init *TemplateArgName = QualifyName(*CurRec, CurMultiClass, Name, ":");
+
if (CurMultiClass)
- TemplateArgName = CurMultiClass->Rec.getName()+"::"+TemplateArgName;
+ TemplateArgName = QualifyName(CurMultiClass->Rec, CurMultiClass, Name,
+ "::");
if (CurRec->isTemplateArg(TemplateArgName)) {
const RecordVal *RV = CurRec->getValue(TemplateArgName);
@@ -661,7 +796,9 @@ Init *TGParser::ParseIDValue(Record *CurRec,
}
if (CurMultiClass) {
- std::string MCName = CurMultiClass->Rec.getName()+"::"+Name;
+ Init *MCName = QualifyName(CurMultiClass->Rec, CurMultiClass, Name,
+ "::");
+
if (CurMultiClass->Rec.isTemplateArg(MCName)) {
const RecordVal *RV = CurMultiClass->Rec.getValue(MCName);
assert(RV && "Template arg doesn't exist??");
@@ -669,11 +806,27 @@ Init *TGParser::ParseIDValue(Record *CurRec,
}
}
+ // If this is in a foreach loop, make sure it's not a loop iterator
+ for (LoopVector::iterator i = Loops.begin(), iend = Loops.end();
+ i != iend;
+ ++i) {
+ VarInit *IterVar = dynamic_cast<VarInit *>(i->IterVar);
+ if (IterVar && IterVar->getName() == Name)
+ return IterVar;
+ }
+
+ if (Mode == ParseNameMode)
+ return StringInit::get(Name);
+
if (Record *D = Records.getDef(Name))
return DefInit::get(D);
- Error(NameLoc, "Variable not defined: '" + Name + "'");
- return 0;
+ if (Mode == ParseValueMode) {
+ Error(NameLoc, "Variable not defined: '" + Name + "'");
+ return 0;
+ }
+
+ return StringInit::get(Name);
}
/// ParseOperation - Parse an operator. This returns null on error.
@@ -685,7 +838,6 @@ Init *TGParser::ParseOperation(Record *CurRec) {
default:
TokError("unknown operation");
return 0;
- break;
case tgtok::XHead:
case tgtok::XTail:
case tgtok::XEmpty:
@@ -694,7 +846,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
RecTy *Type = 0;
switch (Lex.getCode()) {
- default: assert(0 && "Unhandled code!");
+ default: llvm_unreachable("Unhandled code!");
case tgtok::XCast:
Lex.Lex(); // eat the operation
Code = UnOpInit::CAST;
@@ -810,7 +962,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
RecTy *Type = 0;
switch (OpTok) {
- default: assert(0 && "Unhandled code!");
+ default: llvm_unreachable("Unhandled code!");
case tgtok::XConcat: Code = BinOpInit::CONCAT;Type = DagRecTy::get(); break;
case tgtok::XSRA: Code = BinOpInit::SRA; Type = IntRecTy::get(); break;
case tgtok::XSRL: Code = BinOpInit::SRL; Type = IntRecTy::get(); break;
@@ -874,7 +1026,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
tgtok::TokKind LexCode = Lex.getCode();
Lex.Lex(); // eat the operation
switch (LexCode) {
- default: assert(0 && "Unhandled code!");
+ default: llvm_unreachable("Unhandled code!");
case tgtok::XIf:
Code = TernOpInit::IF;
break;
@@ -919,7 +1071,7 @@ Init *TGParser::ParseOperation(Record *CurRec) {
Lex.Lex(); // eat the ')'
switch (LexCode) {
- default: assert(0 && "Unhandled code!");
+ default: llvm_unreachable("Unhandled code!");
case tgtok::XIf: {
// FIXME: The `!if' operator doesn't handle non-TypedInit well at
// all. This can be made much more robust.
@@ -989,8 +1141,6 @@ Init *TGParser::ParseOperation(Record *CurRec) {
CurMultiClass);
}
}
- TokError("could not parse operation");
- return 0;
}
/// ParseOperatorType - Parse a type for an operator. This returns
@@ -1041,10 +1191,16 @@ RecTy *TGParser::ParseOperatorType() {
/// SimpleValue ::= SRLTOK '(' Value ',' Value ')'
/// SimpleValue ::= STRCONCATTOK '(' Value ',' Value ')'
///
-Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
+Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
+ IDParseMode Mode) {
Init *R = 0;
switch (Lex.getCode()) {
default: TokError("Unknown token when parsing a value"); break;
+ case tgtok::paste:
+ // This is a leading paste operation. This is deprecated but
+ // still exists in some .td files. Ignore it.
+ Lex.Lex(); // Skip '#'.
+ return ParseSimpleValue(CurRec, ItemType, Mode);
case tgtok::IntVal: R = IntInit::get(Lex.getCurIntVal()); Lex.Lex(); break;
case tgtok::StrVal: {
std::string Val = Lex.getCurStrVal();
@@ -1060,7 +1216,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
break;
}
case tgtok::CodeFragment:
- R = CodeInit::get(Lex.getCurStrVal());
+ R = StringInit::get(Lex.getCurStrVal());
Lex.Lex();
break;
case tgtok::question:
@@ -1071,7 +1227,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
SMLoc NameLoc = Lex.getLoc();
std::string Name = Lex.getCurStrVal();
if (Lex.Lex() != tgtok::less) // consume the Id.
- return ParseIDValue(CurRec, Name, NameLoc); // Value ::= IDValue
+ return ParseIDValue(CurRec, Name, NameLoc, Mode); // Value ::= IDValue
// Value ::= ID '<' ValueListNE '>'
if (Lex.Lex() == tgtok::greater) {
@@ -1305,8 +1461,8 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType) {
/// ValueSuffix ::= '[' BitList ']'
/// ValueSuffix ::= '.' ID
///
-Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType) {
- Init *Result = ParseSimpleValue(CurRec, ItemType);
+Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) {
+ Init *Result = ParseSimpleValue(CurRec, ItemType, Mode);
if (Result == 0) return 0;
// Parse the suffixes now if present.
@@ -1314,6 +1470,10 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType) {
switch (Lex.getCode()) {
default: return Result;
case tgtok::l_brace: {
+ if (Mode == ParseNameMode || Mode == ParseForeachMode)
+ // This is the beginning of the object body.
+ return Result;
+
SMLoc CurlyLoc = Lex.getLoc();
Lex.Lex(); // eat the '{'
std::vector<unsigned> Ranges = ParseRangeList();
@@ -1368,6 +1528,56 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType) {
Result = FieldInit::get(Result, Lex.getCurStrVal());
Lex.Lex(); // eat field name
break;
+
+ case tgtok::paste:
+ SMLoc PasteLoc = Lex.getLoc();
+
+ // Create a !strconcat() operation, first casting each operand to
+ // a string if necessary.
+
+ TypedInit *LHS = dynamic_cast<TypedInit *>(Result);
+ if (!LHS) {
+ Error(PasteLoc, "LHS of paste is not typed!");
+ return 0;
+ }
+
+ if (LHS->getType() != StringRecTy::get()) {
+ LHS = UnOpInit::get(UnOpInit::CAST, LHS, StringRecTy::get());
+ }
+
+ TypedInit *RHS = 0;
+
+ Lex.Lex(); // Eat the '#'.
+ switch (Lex.getCode()) {
+ case tgtok::colon:
+ case tgtok::semi:
+ case tgtok::l_brace:
+ // These are all of the tokens that can begin an object body.
+ // Some of these can also begin values but we disallow those cases
+ // because they are unlikely to be useful.
+
+ // Trailing paste, concat with an empty string.
+ RHS = StringInit::get("");
+ break;
+
+ default:
+ Init *RHSResult = ParseValue(CurRec, ItemType, ParseNameMode);
+ RHS = dynamic_cast<TypedInit *>(RHSResult);
+ if (!RHS) {
+ Error(PasteLoc, "RHS of paste is not typed!");
+ return 0;
+ }
+
+ if (RHS->getType() != StringRecTy::get()) {
+ RHS = UnOpInit::get(UnOpInit::CAST, RHS, StringRecTy::get());
+ }
+
+ break;
+ }
+
+ Result = BinOpInit::get(BinOpInit::STRCONCAT, LHS, RHS,
+ StringRecTy::get())->Fold(CurRec, CurMultiClass);
+ break;
}
}
}
@@ -1417,7 +1627,11 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
RecTy *ItemType = EltTy;
unsigned int ArgN = 0;
if (ArgsRec != 0 && EltTy == 0) {
- const std::vector<std::string> &TArgs = ArgsRec->getTemplateArgs();
+ const std::vector<Init *> &TArgs = ArgsRec->getTemplateArgs();
+ if (!TArgs.size()) {
+ TokError("template argument provided to non-template class");
+ return std::vector<Init*>();
+ }
const RecordVal *RV = ArgsRec->getValue(TArgs[ArgN]);
if (!RV) {
errs() << "Cannot find template arg " << ArgN << " (" << TArgs[ArgN]
@@ -1434,7 +1648,7 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
Lex.Lex(); // Eat the comma
if (ArgsRec != 0 && EltTy == 0) {
- const std::vector<std::string> &TArgs = ArgsRec->getTemplateArgs();
+ const std::vector<Init *> &TArgs = ArgsRec->getTemplateArgs();
if (ArgN >= TArgs.size()) {
TokError("too many template arguments");
return std::vector<Init*>();
@@ -1462,37 +1676,38 @@ std::vector<Init*> TGParser::ParseValueList(Record *CurRec, Record *ArgsRec,
///
/// Declaration ::= FIELD? Type ID ('=' Value)?
///
-std::string TGParser::ParseDeclaration(Record *CurRec,
+Init *TGParser::ParseDeclaration(Record *CurRec,
bool ParsingTemplateArgs) {
// Read the field prefix if present.
bool HasField = Lex.getCode() == tgtok::Field;
if (HasField) Lex.Lex();
RecTy *Type = ParseType();
- if (Type == 0) return "";
+ if (Type == 0) return 0;
if (Lex.getCode() != tgtok::Id) {
TokError("Expected identifier in declaration");
- return "";
+ return 0;
}
SMLoc IdLoc = Lex.getLoc();
- std::string DeclName = Lex.getCurStrVal();
+ Init *DeclName = StringInit::get(Lex.getCurStrVal());
Lex.Lex();
if (ParsingTemplateArgs) {
if (CurRec) {
- DeclName = CurRec->getName() + ":" + DeclName;
+ DeclName = QualifyName(*CurRec, CurMultiClass, DeclName, ":");
} else {
assert(CurMultiClass);
}
if (CurMultiClass)
- DeclName = CurMultiClass->Rec.getName() + "::" + DeclName;
+ DeclName = QualifyName(CurMultiClass->Rec, CurMultiClass, DeclName,
+ "::");
}
// Add the value.
if (AddValue(CurRec, IdLoc, RecordVal(DeclName, Type, HasField)))
- return "";
+ return 0;
// If a value is present, parse it.
if (Lex.getCode() == tgtok::equal) {
@@ -1501,12 +1716,56 @@ std::string TGParser::ParseDeclaration(Record *CurRec,
Init *Val = ParseValue(CurRec, Type);
if (Val == 0 ||
SetValue(CurRec, ValLoc, DeclName, std::vector<unsigned>(), Val))
- return "";
+ return 0;
}
return DeclName;
}
+/// ParseForeachDeclaration - Read a foreach declaration, returning
+/// the name of the declared object or a NULL Init on error. Return
+/// the name of the parsed initializer list through ForeachListName.
+///
+/// ForeachDeclaration ::= ID '=' Value
+///
+Init *TGParser::ParseForeachDeclaration(Init *&ForeachListValue) {
+ if (Lex.getCode() != tgtok::Id) {
+ TokError("Expected identifier in foreach declaration");
+ return 0;
+ }
+
+ Init *DeclName = StringInit::get(Lex.getCurStrVal());
+ Lex.Lex();
+
+ // If a value is present, parse it.
+ if (Lex.getCode() != tgtok::equal) {
+ TokError("Expected '=' in foreach declaration");
+ return 0;
+ }
+ Lex.Lex(); // Eat the '='
+
+ // Expect a list initializer.
+ ForeachListValue = ParseValue(0, 0, ParseForeachMode);
+
+ TypedInit *TypedList = dynamic_cast<TypedInit *>(ForeachListValue);
+ if (TypedList == 0) {
+ TokError("Value list is untyped");
+ return 0;
+ }
+
+ RecTy *ValueType = TypedList->getType();
+ ListRecTy *ListType = dynamic_cast<ListRecTy *>(ValueType);
+ if (ListType == 0) {
+ TokError("Value list is not of list type");
+ return 0;
+ }
+
+ RecTy *IterType = ListType->getElementType();
+ VarInit *IterVar = VarInit::get(DeclName, IterType);
+
+ return IterVar;
+}
+
/// ParseTemplateArgList - Read a template argument list, which is a non-empty
/// sequence of template-declarations in <>'s. If CurRec is non-null, these are
/// template args for a def, which may or may not be in a multiclass. If null,
@@ -1521,8 +1780,8 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
Record *TheRecToAddTo = CurRec ? CurRec : &CurMultiClass->Rec;
// Read the first declaration.
- std::string TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
- if (TemplArg.empty())
+ Init *TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
+ if (TemplArg == 0)
return true;
TheRecToAddTo->addTemplateArg(TemplArg);
@@ -1532,7 +1791,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
// Read the following declarations.
TemplArg = ParseDeclaration(CurRec, true/*templateargs*/);
- if (TemplArg.empty())
+ if (TemplArg == 0)
return true;
TheRecToAddTo->addTemplateArg(TemplArg);
}
@@ -1550,7 +1809,7 @@ bool TGParser::ParseTemplateArgList(Record *CurRec) {
/// BodyItem ::= LET ID OptionalBitList '=' Value ';'
bool TGParser::ParseBodyItem(Record *CurRec) {
if (Lex.getCode() != tgtok::Let) {
- if (ParseDeclaration(CurRec, false).empty())
+ if (ParseDeclaration(CurRec, false) == 0)
return true;
if (Lex.getCode() != tgtok::semi)
@@ -1671,22 +1930,24 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
Lex.Lex(); // Eat the 'def' token.
// Parse ObjectName and make a record for it.
- Record *CurRec = new Record(ParseObjectName(), DefLoc, Records);
+ Record *CurRec = new Record(ParseObjectName(CurMultiClass), DefLoc, Records);
if (!CurMultiClass) {
// Top-level def definition.
// Ensure redefinition doesn't happen.
- if (Records.getDef(CurRec->getName())) {
- Error(DefLoc, "def '" + CurRec->getName() + "' already defined");
+ if (Records.getDef(CurRec->getNameInitAsString())) {
+ Error(DefLoc, "def '" + CurRec->getNameInitAsString()
+ + "' already defined");
return true;
}
Records.addDef(CurRec);
} else {
// Otherwise, a def inside a multiclass, add it to the multiclass.
for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size(); i != e; ++i)
- if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
- Error(DefLoc, "def '" + CurRec->getName() +
+ if (CurMultiClass->DefPrototypes[i]->getNameInit()
+ == CurRec->getNameInit()) {
+ Error(DefLoc, "def '" + CurRec->getNameInitAsString() +
"' already defined in this multiclass!");
return true;
}
@@ -1707,7 +1968,7 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
if (CurMultiClass) {
// Copy the template arguments for the multiclass into the def.
- const std::vector<std::string> &TArgs =
+ const std::vector<Init *> &TArgs =
CurMultiClass->Rec.getTemplateArgs();
for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
@@ -1717,6 +1978,63 @@ bool TGParser::ParseDef(MultiClass *CurMultiClass) {
}
}
+ if (ProcessForeachDefs(CurRec, CurMultiClass, DefLoc)) {
+ Error(DefLoc,
+ "Could not process loops for def" + CurRec->getNameInitAsString());
+ return true;
+ }
+
+ return false;
+}
+
+/// ParseForeach - Parse a for statement. Return the record corresponding
+/// to it. This returns true on error.
+///
+/// Foreach ::= FOREACH Declaration IN '{ ObjectList '}'
+/// Foreach ::= FOREACH Declaration IN Object
+///
+bool TGParser::ParseForeach(MultiClass *CurMultiClass) {
+ assert(Lex.getCode() == tgtok::Foreach && "Unknown tok");
+ Lex.Lex(); // Eat the 'for' token.
+
+ // Make a temporary object to record items associated with the for
+ // loop.
+ Init *ListValue = 0;
+ Init *IterName = ParseForeachDeclaration(ListValue);
+ if (IterName == 0)
+ return TokError("expected declaration in for");
+
+ if (Lex.getCode() != tgtok::In)
+ return TokError("Unknown tok");
+ Lex.Lex(); // Eat the in
+
+ // Create a loop object and remember it.
+ Loops.push_back(ForeachLoop(IterName, ListValue));
+
+ if (Lex.getCode() != tgtok::l_brace) {
+ // FOREACH Declaration IN Object
+ if (ParseObject(CurMultiClass))
+ return true;
+ }
+ else {
+ SMLoc BraceLoc = Lex.getLoc();
+ // Otherwise, this is a group foreach.
+ Lex.Lex(); // eat the '{'.
+
+ // Parse the object list.
+ if (ParseObjectList(CurMultiClass))
+ return true;
+
+ if (Lex.getCode() != tgtok::r_brace) {
+ TokError("expected '}' at end of foreach command");
+ return Error(BraceLoc, "to match this '{'");
+ }
+ Lex.Lex(); // Eat the }
+ }
+
+ // We've processed everything in this loop.
+ Loops.pop_back();
+
return false;
}
@@ -1734,10 +2052,11 @@ bool TGParser::ParseClass() {
Record *CurRec = Records.getClass(Lex.getCurStrVal());
if (CurRec) {
// If the body was previously defined, this is an error.
- if (!CurRec->getValues().empty() ||
+ if (CurRec->getValues().size() > 1 || // Account for NAME.
!CurRec->getSuperClasses().empty() ||
!CurRec->getTemplateArgs().empty())
- return TokError("Class '" + CurRec->getName() + "' already defined");
+ return TokError("Class '" + CurRec->getNameInitAsString()
+ + "' already defined");
} else {
// If this is the first reference to this class, create and add it.
CurRec = new Record(Lex.getCurStrVal(), Lex.getLoc(), Records);
@@ -1909,6 +2228,7 @@ bool TGParser::ParseMultiClass() {
case tgtok::Let:
case tgtok::Def:
case tgtok::Defm:
+ case tgtok::Foreach:
if (ParseObject(CurMultiClass))
return true;
break;
@@ -1924,23 +2244,31 @@ bool TGParser::ParseMultiClass() {
Record *TGParser::
InstantiateMulticlassDef(MultiClass &MC,
Record *DefProto,
- const std::string &DefmPrefix,
+ Init *DefmPrefix,
SMLoc DefmPrefixLoc) {
+ // We need to preserve DefProto so it can be reused for later
+ // instantiations, so create a new Record to inherit from it.
+
// Add in the defm name. If the defm prefix is empty, give each
// instantiated def a unique name. Otherwise, if "#NAME#" exists in the
// name, substitute the prefix for #NAME#. Otherwise, use the defm name
// as a prefix.
- std::string DefName = DefProto->getName();
- if (DefmPrefix.empty()) {
- DefName = GetNewAnonymousName();
- } else {
- std::string::size_type idx = DefName.find("#NAME#");
- if (idx != std::string::npos) {
- DefName.replace(idx, 6, DefmPrefix);
- } else {
- // Add the suffix to the defm name to get the new name.
- DefName = DefmPrefix + DefName;
- }
+
+ if (DefmPrefix == 0)
+ DefmPrefix = StringInit::get(GetNewAnonymousName());
+
+ Init *DefName = DefProto->getNameInit();
+
+ StringInit *DefNameString = dynamic_cast<StringInit *>(DefName);
+
+ if (DefNameString != 0) {
+ // We have a fully expanded string so there are no operators to
+ // resolve. We should concatenate the given prefix and name.
+ DefName =
+ BinOpInit::get(BinOpInit::STRCONCAT,
+ UnOpInit::get(UnOpInit::CAST, DefmPrefix,
+ StringRecTy::get())->Fold(DefProto, &MC),
+ DefName, StringRecTy::get())->Fold(DefProto, &MC);
}
Record *CurRec = new Record(DefName, DefmPrefixLoc, Records);
@@ -1950,6 +2278,41 @@ InstantiateMulticlassDef(MultiClass &MC,
Ref.Rec = DefProto;
AddSubClass(CurRec, Ref);
+ if (DefNameString == 0) {
+ // We must resolve references to NAME.
+ if (SetValue(CurRec, Ref.RefLoc, "NAME", std::vector<unsigned>(),
+ DefmPrefix)) {
+ Error(DefmPrefixLoc, "Could not resolve "
+ + CurRec->getNameInitAsString() + ":NAME to '"
+ + DefmPrefix->getAsUnquotedString() + "'");
+ return 0;
+ }
+
+ RecordVal *DefNameRV = CurRec->getValue("NAME");
+ CurRec->resolveReferencesTo(DefNameRV);
+ }
+
+ if (!CurMultiClass) {
+ // We do this after resolving NAME because before resolution, many
+ // multiclass defs will have the same name expression. If we are
+ // currently in a multiclass, it means this defm appears inside a
+ // multiclass and its name won't be fully resolvable until we see
+ // the top-level defm. Therefore, we don't add this to the
+ // RecordKeeper at this point. If we did we could get duplicate
+ // defs as more than one probably refers to NAME or some other
+ // common internal placeholder.
+
+ // Ensure redefinition doesn't happen.
+ if (Records.getDef(CurRec->getNameInitAsString())) {
+ Error(DefmPrefixLoc, "def '" + CurRec->getNameInitAsString() +
+ "' already defined, instantiating defm with subdef '" +
+ DefProto->getNameInitAsString() + "'");
+ return 0;
+ }
+
+ Records.addDef(CurRec);
+ }
+
return CurRec;
}
@@ -1957,7 +2320,7 @@ bool TGParser::ResolveMulticlassDefArgs(MultiClass &MC,
Record *CurRec,
SMLoc DefmPrefixLoc,
SMLoc SubClassLoc,
- const std::vector<std::string> &TArgs,
+ const std::vector<Init *> &TArgs,
std::vector<Init *> &TemplateVals,
bool DeleteArgs) {
// Loop over all of the template arguments, setting them to the specified
@@ -1979,8 +2342,9 @@ bool TGParser::ResolveMulticlassDefArgs(MultiClass &MC,
} else if (!CurRec->getValue(TArgs[i])->getValue()->isComplete()) {
return Error(SubClassLoc, "value not specified for template argument #"+
- utostr(i) + " (" + TArgs[i] + ") of multiclassclass '" +
- MC.Rec.getName() + "'");
+ utostr(i) + " (" + TArgs[i]->getAsUnquotedString()
+ + ") of multiclassclass '" + MC.Rec.getNameInitAsString()
+ + "'");
}
}
return false;
@@ -1997,25 +2361,20 @@ bool TGParser::ResolveMulticlassDef(MultiClass &MC,
LetStack[i][j].Bits, LetStack[i][j].Value))
return Error(DefmPrefixLoc, "when instantiating this defm");
- // Ensure redefinition doesn't happen.
- if (Records.getDef(CurRec->getName()))
- return Error(DefmPrefixLoc, "def '" + CurRec->getName() +
- "' already defined, instantiating defm with subdef '" +
- DefProto->getName() + "'");
-
// Don't create a top level definition for defm inside multiclasses,
// instead, only update the prototypes and bind the template args
// with the new created definition.
if (CurMultiClass) {
for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size();
i != e; ++i)
- if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName())
- return Error(DefmPrefixLoc, "defm '" + CurRec->getName() +
+ if (CurMultiClass->DefPrototypes[i]->getNameInit()
+ == CurRec->getNameInit())
+ return Error(DefmPrefixLoc, "defm '" + CurRec->getNameInitAsString() +
"' already defined in this multiclass!");
CurMultiClass->DefPrototypes.push_back(CurRec);
// Copy the template arguments for the multiclass into the new def.
- const std::vector<std::string> &TA =
+ const std::vector<Init *> &TA =
CurMultiClass->Rec.getTemplateArgs();
for (unsigned i = 0, e = TA.size(); i != e; ++i) {
@@ -2023,8 +2382,6 @@ bool TGParser::ResolveMulticlassDef(MultiClass &MC,
assert(RV && "Template arg doesn't exist?");
CurRec->addValue(*RV);
}
- } else {
- Records.addDef(CurRec);
}
return false;
@@ -2037,10 +2394,10 @@ bool TGParser::ResolveMulticlassDef(MultiClass &MC,
bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
- std::string DefmPrefix;
+ Init *DefmPrefix = 0;
+
if (Lex.Lex() == tgtok::Id) { // eat the defm.
- DefmPrefix = Lex.getCurStrVal();
- Lex.Lex(); // Eat the defm prefix.
+ DefmPrefix = ParseObjectName(CurMultiClass);
}
SMLoc DefmPrefixLoc = Lex.getLoc();
@@ -2070,7 +2427,7 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
std::vector<Init*> &TemplateVals = Ref.TemplateArgs;
// Verify that the correct number of template arguments were specified.
- const std::vector<std::string> &TArgs = MC->Rec.getTemplateArgs();
+ const std::vector<Init *> &TArgs = MC->Rec.getTemplateArgs();
if (TArgs.size() < TemplateVals.size())
return Error(SubClassLoc,
"more template args specified than multiclass expects");
@@ -2080,6 +2437,8 @@ bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
Record *DefProto = MC->DefPrototypes[i];
Record *CurRec = InstantiateMulticlassDef(*MC, DefProto, DefmPrefix, DefmPrefixLoc);
+ if (!CurRec)
+ return true;
if (ResolveMulticlassDefArgs(*MC, CurRec, DefmPrefixLoc, SubClassLoc,
TArgs, TemplateVals, true/*Delete args*/))
@@ -2165,6 +2524,7 @@ bool TGParser::ParseObject(MultiClass *MC) {
return TokError("Expected class, def, defm, multiclass or let definition");
case tgtok::Let: return ParseTopLevelLet(MC);
case tgtok::Def: return ParseDef(MC);
+ case tgtok::Foreach: return ParseForeach(MC);
case tgtok::Defm: return ParseDefm(MC);
case tgtok::Class: return ParseClass();
case tgtok::MultiClass: return ParseMultiClass();
diff --git a/contrib/llvm/lib/TableGen/TGParser.h b/contrib/llvm/lib/TableGen/TGParser.h
index db8a620..b8e7cb1 100644
--- a/contrib/llvm/lib/TableGen/TGParser.h
+++ b/contrib/llvm/lib/TableGen/TGParser.h
@@ -14,6 +14,7 @@
#ifndef TGPARSER_H
#define TGPARSER_H
+#include "llvm/TableGen/Record.h"
#include "TGLexer.h"
#include "llvm/TableGen/Error.h"
#include "llvm/ADT/Twine.h"
@@ -41,17 +42,44 @@ namespace llvm {
}
};
+ /// ForeachLoop - Record the iteration state associated with a for loop.
+ /// This is used to instantiate items in the loop body.
+ struct ForeachLoop {
+ Init *IterVar;
+ Init *ListValue;
+
+ ForeachLoop(Init *IVar, Init *LValue) : IterVar(IVar), ListValue(LValue) {}
+ };
+
class TGParser {
TGLexer Lex;
std::vector<std::vector<LetRecord> > LetStack;
std::map<std::string, MultiClass*> MultiClasses;
+ /// Loops - Keep track of any foreach loops we are within.
+ ///
+ typedef std::vector<ForeachLoop> LoopVector;
+ LoopVector Loops;
+
/// CurMultiClass - If we are parsing a 'multiclass' definition, this is the
/// current value.
MultiClass *CurMultiClass;
// Record tracker
RecordKeeper &Records;
+
+ // A "named boolean" indicating how to parse identifiers. Usually
+ // identifiers map to some existing object but in special cases
+ // (e.g. parsing def names) no such object exists yet because we are
+ // in the middle of creating in. For those situations, allow the
+ // parser to ignore missing object errors.
+ enum IDParseMode {
+ ParseValueMode, // We are parsing a value we expect to look up.
+ ParseNameMode, // We are parsing a name of an object that does not yet
+ // exist.
+ ParseForeachMode // We are parsing a foreach init.
+ };
+
public:
TGParser(SourceMgr &SrcMgr, RecordKeeper &records) :
Lex(SrcMgr), CurMultiClass(0), Records(records) {}
@@ -70,14 +98,36 @@ public:
const std::vector<std::string> &getDependencies() const {
return Lex.getDependencies();
}
+
private: // Semantic analysis methods.
bool AddValue(Record *TheRec, SMLoc Loc, const RecordVal &RV);
- bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName,
+ bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName,
const std::vector<unsigned> &BitList, Init *V);
+ bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName,
+ const std::vector<unsigned> &BitList, Init *V) {
+ return SetValue(TheRec, Loc, StringInit::get(ValName), BitList, V);
+ }
bool AddSubClass(Record *Rec, SubClassReference &SubClass);
bool AddSubMultiClass(MultiClass *CurMC,
SubMultiClassReference &SubMultiClass);
+ // IterRecord: Map an iterator name to a value.
+ struct IterRecord {
+ Init *IterVar;
+ Init *IterValue;
+ IterRecord(Init *Var, Init *Val) : IterVar(Var), IterValue(Val) {}
+ };
+
+ // IterSet: The set of all iterator values at some point in the
+ // iteration space.
+ typedef std::vector<IterRecord> IterSet;
+
+ bool ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
+ SMLoc Loc);
+ bool ProcessForeachDefs(Record *CurRec, MultiClass *CurMultiClass,
+ SMLoc Loc, IterSet &IterVals, ForeachLoop &CurLoop,
+ LoopVector::iterator NextLoop);
+
private: // Parser methods.
bool ParseObjectList(MultiClass *MC = 0);
bool ParseObject(MultiClass *MC);
@@ -85,13 +135,13 @@ private: // Parser methods.
bool ParseMultiClass();
Record *InstantiateMulticlassDef(MultiClass &MC,
Record *DefProto,
- const std::string &DefmPrefix,
+ Init *DefmPrefix,
SMLoc DefmPrefixLoc);
bool ResolveMulticlassDefArgs(MultiClass &MC,
Record *DefProto,
SMLoc DefmPrefixLoc,
SMLoc SubClassLoc,
- const std::vector<std::string> &TArgs,
+ const std::vector<Init *> &TArgs,
std::vector<Init *> &TemplateVals,
bool DeleteArgs);
bool ResolveMulticlassDef(MultiClass &MC,
@@ -100,6 +150,7 @@ private: // Parser methods.
SMLoc DefmPrefixLoc);
bool ParseDefm(MultiClass *CurMultiClass);
bool ParseDef(MultiClass *CurMultiClass);
+ bool ParseForeach(MultiClass *CurMultiClass);
bool ParseTopLevelLet(MultiClass *CurMultiClass);
std::vector<LetRecord> ParseLetList();
@@ -108,15 +159,19 @@ private: // Parser methods.
bool ParseBodyItem(Record *CurRec);
bool ParseTemplateArgList(Record *CurRec);
- std::string ParseDeclaration(Record *CurRec, bool ParsingTemplateArgs);
+ Init *ParseDeclaration(Record *CurRec, bool ParsingTemplateArgs);
+ Init *ParseForeachDeclaration(Init *&ForeachListValue);
SubClassReference ParseSubClassReference(Record *CurRec, bool isDefm);
SubMultiClassReference ParseSubMultiClassReference(MultiClass *CurMC);
- Init *ParseIDValue(Record *CurRec);
- Init *ParseIDValue(Record *CurRec, const std::string &Name, SMLoc NameLoc);
- Init *ParseSimpleValue(Record *CurRec, RecTy *ItemType = 0);
- Init *ParseValue(Record *CurRec, RecTy *ItemType = 0);
+ Init *ParseIDValue(Record *CurRec, IDParseMode Mode = ParseValueMode);
+ Init *ParseIDValue(Record *CurRec, const std::string &Name, SMLoc NameLoc,
+ IDParseMode Mode = ParseValueMode);
+ Init *ParseSimpleValue(Record *CurRec, RecTy *ItemType = 0,
+ IDParseMode Mode = ParseValueMode);
+ Init *ParseValue(Record *CurRec, RecTy *ItemType = 0,
+ IDParseMode Mode = ParseValueMode);
std::vector<Init*> ParseValueList(Record *CurRec, Record *ArgsRec = 0, RecTy *EltTy = 0);
std::vector<std::pair<llvm::Init*, std::string> > ParseDagArgList(Record *);
bool ParseOptionalRangeList(std::vector<unsigned> &Ranges);
@@ -126,7 +181,7 @@ private: // Parser methods.
RecTy *ParseType();
Init *ParseOperation(Record *CurRec);
RecTy *ParseOperatorType();
- std::string ParseObjectName();
+ Init *ParseObjectName(MultiClass *CurMultiClass);
Record *ParseClassID();
MultiClass *ParseMultiClassID();
Record *ParseDefmID();
diff --git a/contrib/llvm/lib/TableGen/TableGenAction.cpp b/contrib/llvm/lib/TableGen/TableGenAction.cpp
new file mode 100644
index 0000000..54e5083
--- /dev/null
+++ b/contrib/llvm/lib/TableGen/TableGenAction.cpp
@@ -0,0 +1,15 @@
+//===- TableGenAction.cpp - defines TableGenAction --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/TableGenAction.h"
+
+using namespace llvm;
+
+void TableGenAction::anchor() { }
+
diff --git a/contrib/llvm/lib/TableGen/TableGenBackend.cpp b/contrib/llvm/lib/TableGen/TableGenBackend.cpp
index 29588db..09bcc7a 100644
--- a/contrib/llvm/lib/TableGen/TableGenBackend.cpp
+++ b/contrib/llvm/lib/TableGen/TableGenBackend.cpp
@@ -15,7 +15,9 @@
#include "llvm/TableGen/Record.h"
using namespace llvm;
-void TableGenBackend::EmitSourceFileHeader(const std::string &Desc,
+void TableGenBackend::anchor() { }
+
+void TableGenBackend::EmitSourceFileHeader(StringRef Desc,
raw_ostream &OS) const {
OS << "//===- TableGen'erated file -------------------------------------*-"
" C++ -*-===//\n//\n// " << Desc << "\n//\n// Automatically generate"
diff --git a/contrib/llvm/lib/Target/ARM/ARM.h b/contrib/llvm/lib/Target/ARM/ARM.h
index 16d0da3..2a1e8e4 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm/lib/Target/ARM/ARM.h
@@ -1,4 +1,4 @@
-//===-- ARM.h - Top-level interface for ARM representation---- --*- C++ -*-===//
+//===-- ARM.h - Top-level interface for ARM representation ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,9 +18,7 @@
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMMCTargetDesc.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
-#include <cassert>
namespace llvm {
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index 5c727ad..9b0cb0c 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -1,4 +1,4 @@
-//===- ARM.td - Describe the ARM Target Machine ------------*- tablegen -*-===//
+//===-- ARM.td - Describe the ARM Target Machine -----------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,9 +23,6 @@ include "llvm/Target/Target.td"
def ModeThumb : SubtargetFeature<"thumb-mode", "InThumbMode", "true",
"Thumb mode">;
-def ModeNaCl : SubtargetFeature<"nacl-mode", "InNaClMode", "true",
- "Native client mode">;
-
//===----------------------------------------------------------------------===//
// ARM Subtarget features.
//
@@ -35,6 +32,9 @@ def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFPv2", "true",
def FeatureVFP3 : SubtargetFeature<"vfp3", "HasVFPv3", "true",
"Enable VFP3 instructions",
[FeatureVFP2]>;
+def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true",
+ "Enable VFP4 instructions",
+ [FeatureVFP3]>;
def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true",
"Enable NEON instructions",
[FeatureVFP3]>;
@@ -86,6 +86,11 @@ def FeatureAvoidPartialCPSR : SubtargetFeature<"avoid-partial-cpsr",
"AvoidCPSRPartialUpdate", "true",
"Avoid CPSR partial update for OOO execution">;
+// Some processors perform return stack prediction. CodeGen should avoid issue
+// "normal" call instructions to callees which do not return.
+def FeatureHasRAS : SubtargetFeature<"ras", "HasRAS", "true",
+ "Has return address stack">;
+
/// Some M architectures don't have the DSP extension (v7E-M vs. v7M)
def FeatureDSPThumb2 : SubtargetFeature<"t2dsp", "Thumb2DSP", "true",
"Supports v7 DSP instructions in Thumb2">;
@@ -201,13 +206,14 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries, [HasV6T2Ops, FeatureVFP2,
// V7a Processors.
def : Processor<"cortex-a8", CortexA8Itineraries,
[ProcA8, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2]>;
+ FeatureDSPThumb2, FeatureHasRAS]>;
def : Processor<"cortex-a9", CortexA9Itineraries,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2]>;
+ FeatureDSPThumb2, FeatureHasRAS]>;
def : Processor<"cortex-a9-mp", CortexA9Itineraries,
[ProcA9, HasV7Ops, FeatureNEON, FeatureDB,
- FeatureDSPThumb2, FeatureMP]>;
+ FeatureDSPThumb2, FeatureMP,
+ FeatureHasRAS]>;
// V7M Processors.
def : ProcNoItin<"cortex-m3", [HasV7Ops,
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index ea3319f..410790a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -13,10 +13,9 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "asm-printer"
-#include "ARM.h"
#include "ARMAsmPrinter.h"
+#include "ARM.h"
#include "ARMBuildAttrs.h"
-#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMTargetMachine.h"
@@ -35,7 +34,6 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCObjectStreamer.h"
@@ -44,10 +42,7 @@
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -85,15 +80,15 @@ namespace {
void EmitTextAttribute(unsigned Attribute, StringRef String) {
switch (Attribute) {
+ default: llvm_unreachable("Unsupported Text attribute in ASM Mode");
case ARMBuildAttrs::CPU_name:
- Streamer.EmitRawText(StringRef("\t.cpu ") + LowercaseString(String));
+ Streamer.EmitRawText(StringRef("\t.cpu ") + String.lower());
break;
/* GAS requires .fpu to be emitted regardless of EABI attribute */
case ARMBuildAttrs::Advanced_SIMD_arch:
case ARMBuildAttrs::VFP_arch:
- Streamer.EmitRawText(StringRef("\t.fpu ") + LowercaseString(String));
+ Streamer.EmitRawText(StringRef("\t.fpu ") + String.lower());
break;
- default: assert(0 && "Unsupported Text attribute in ASM Mode"); break;
}
}
void Finish() { }
@@ -197,15 +192,14 @@ namespace {
AttributeItemType item = Contents[i];
Streamer.EmitULEB128IntValue(item.Tag, 0);
switch (item.Type) {
+ default: llvm_unreachable("Invalid attribute type");
case AttributeItemType::NumericAttribute:
Streamer.EmitULEB128IntValue(item.IntValue, 0);
break;
case AttributeItemType::TextAttribute:
- Streamer.EmitBytes(UppercaseString(item.StringValue), 0);
+ Streamer.EmitBytes(item.StringValue.upper(), 0);
Streamer.EmitIntValue(0, 1); // '\0'
break;
- default:
- assert(0 && "Invalid attribute type");
}
}
@@ -300,6 +294,22 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() {
OutStreamer.EmitLabel(CurrentFnSym);
}
+void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
+ uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+ assert(Size && "C++ constructor pointer had zero size!");
+
+ const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
+ assert(GV && "C++ constructor pointer was not a GlobalValue!");
+
+ const MCExpr *E = MCSymbolRefExpr::Create(Mang->getSymbol(GV),
+ (Subtarget->isTargetDarwin()
+ ? MCSymbolRefExpr::VK_None
+ : MCSymbolRefExpr::VK_ARM_TARGET1),
+ OutContext);
+
+ OutStreamer.EmitValue(E, Size);
+}
+
/// runOnMachineFunction - This uses the EmitInstruction()
/// method to print assembly for each instruction.
///
@@ -316,8 +326,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
unsigned TF = MO.getTargetFlags();
switch (MO.getType()) {
- default:
- assert(0 && "<unknown operand type>");
+ default: llvm_unreachable("<unknown operand type>");
case MachineOperand::MO_Register: {
unsigned Reg = MO.getReg();
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
@@ -494,11 +503,21 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
return false;
}
- // These modifiers are not yet supported.
- case 'p': // The high single-precision register of a VFP double-precision
- // register.
case 'e': // The low doubleword register of a NEON quad register.
- case 'f': // The high doubleword register of a NEON quad register.
+ case 'f': { // The high doubleword register of a NEON quad register.
+ if (!MI->getOperand(OpNum).isReg())
+ return true;
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ if (!ARM::QPRRegClass.contains(Reg))
+ return true;
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ unsigned SubReg = TRI->getSubReg(Reg, ExtraCode[0] == 'e' ?
+ ARM::dsub_0 : ARM::dsub_1);
+ O << ARMInstPrinter::getRegisterName(SubReg);
+ return false;
+ }
+
+ // These modifiers are not yet supported.
case 'h': // A range of VFP/NEON registers suitable for VLD1/VST1.
case 'H': // The highest-numbered register of a pair.
return true;
@@ -576,10 +595,8 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
OutStreamer.EmitAssemblerFlag(MCAF_SyntaxUnified);
// Emit ARM Build Attributes
- if (Subtarget->isTargetELF()) {
-
+ if (Subtarget->isTargetELF())
emitAttributes();
- }
}
@@ -710,15 +727,26 @@ void ARMAsmPrinter::emitAttributes() {
if (Subtarget->hasNEON() && emitFPU) {
/* NEON is not exactly a VFP architecture, but GAS emit one of
- * neon/vfpv3/vfpv2 for .fpu parameters */
- AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch, "neon");
+ * neon/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
+ if (Subtarget->hasVFP4())
+ AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
+ "neon-vfpv4");
+ else
+ AttrEmitter->EmitTextAttribute(ARMBuildAttrs::Advanced_SIMD_arch, "neon");
/* If emitted for NEON, omit from VFP below, since you can have both
* NEON and VFP in build attributes but only one .fpu */
emitFPU = false;
}
+ /* VFPv4 + .fpu */
+ if (Subtarget->hasVFP4()) {
+ AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
+ ARMBuildAttrs::AllowFPv4A);
+ if (emitFPU)
+ AttrEmitter->EmitTextAttribute(ARMBuildAttrs::VFP_arch, "vfpv4");
+
/* VFPv3 + .fpu */
- if (Subtarget->hasVFP3()) {
+ } else if (Subtarget->hasVFP3()) {
AttrEmitter->EmitAttribute(ARMBuildAttrs::VFP_arch,
ARMBuildAttrs::AllowFPv3A);
if (emitFPU)
@@ -740,14 +768,14 @@ void ARMAsmPrinter::emitAttributes() {
}
// Signal various FP modes.
- if (!UnsafeFPMath) {
+ if (!TM.Options.UnsafeFPMath) {
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_denormal,
ARMBuildAttrs::Allowed);
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_exceptions,
ARMBuildAttrs::Allowed);
}
- if (NoInfsFPMath && NoNaNsFPMath)
+ if (TM.Options.NoInfsFPMath && TM.Options.NoNaNsFPMath)
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_FP_number_model,
ARMBuildAttrs::Allowed);
else
@@ -760,7 +788,7 @@ void ARMAsmPrinter::emitAttributes() {
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_align8_preserved, 1);
// Hard float. Use both S and D registers and conform to AAPCS-VFP.
- if (Subtarget->isAAPCS_ABI() && FloatABIType == FloatABI::Hard) {
+ if (Subtarget->isAAPCS_ABI() && TM.Options.FloatABIType == FloatABI::Hard) {
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_HardFP_use, 3);
AttrEmitter->EmitAttribute(ARMBuildAttrs::ABI_VFP_args, 1);
}
@@ -808,7 +836,6 @@ static MCSymbol *getPICLabel(const char *Prefix, unsigned FunctionNumber,
static MCSymbolRefExpr::VariantKind
getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
switch (Modifier) {
- default: llvm_unreachable("Unknown modifier!");
case ARMCP::no_modifier: return MCSymbolRefExpr::VK_None;
case ARMCP::TLSGD: return MCSymbolRefExpr::VK_ARM_TLSGD;
case ARMCP::TPOFF: return MCSymbolRefExpr::VK_ARM_TPOFF;
@@ -816,7 +843,7 @@ getModifierVariantKind(ARMCP::ARMCPModifier Modifier) {
case ARMCP::GOT: return MCSymbolRefExpr::VK_ARM_GOT;
case ARMCP::GOTOFF: return MCSymbolRefExpr::VK_ARM_GOTOFF;
}
- return MCSymbolRefExpr::VK_None;
+ llvm_unreachable("Invalid ARMCPModifier!");
}
MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
@@ -1070,7 +1097,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
}
// Try to figure out the unwinding opcode out of src / dst regs.
- if (MI->getDesc().mayStore()) {
+ if (MI->mayStore()) {
// Register saves.
assert(DstReg == ARM::SP &&
"Only stack pointer as a destination reg is supported");
@@ -1084,7 +1111,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
switch (Opc) {
default:
MI->dump();
- assert(0 && "Unsupported opcode for unwinding information");
+ llvm_unreachable("Unsupported opcode for unwinding information");
case ARM::tPUSH:
// Special case here: no src & dst reg, but two extra imp ops.
StartOp = 2; NumOffset = 2;
@@ -1099,6 +1126,7 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
break;
case ARM::STR_PRE_IMM:
case ARM::STR_PRE_REG:
+ case ARM::t2STR_PRE:
assert(MI->getOperand(2).getReg() == ARM::SP &&
"Only stack pointer as a source reg is supported");
RegList.push_back(SrcReg);
@@ -1112,14 +1140,16 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
switch (Opc) {
default:
MI->dump();
- assert(0 && "Unsupported opcode for unwinding information");
+ llvm_unreachable("Unsupported opcode for unwinding information");
case ARM::MOVr:
+ case ARM::tMOVr:
Offset = 0;
break;
case ARM::ADDri:
Offset = -MI->getOperand(2).getImm();
break;
case ARM::SUBri:
+ case ARM::t2SUBri:
Offset = MI->getOperand(2).getImm();
break;
case ARM::tSUBspi:
@@ -1157,16 +1187,16 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
OutStreamer.EmitPad(Offset);
} else {
MI->dump();
- assert(0 && "Unsupported opcode for unwinding information");
+ llvm_unreachable("Unsupported opcode for unwinding information");
}
} else if (DstReg == ARM::SP) {
// FIXME: .movsp goes here
MI->dump();
- assert(0 && "Unsupported opcode for unwinding information");
+ llvm_unreachable("Unsupported opcode for unwinding information");
}
else {
MI->dump();
- assert(0 && "Unsupported opcode for unwinding information");
+ llvm_unreachable("Unsupported opcode for unwinding information");
}
}
}
@@ -1195,7 +1225,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
// Check for manual lowerings.
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case ARM::t2MOVi32imm: assert(0 && "Should be lowered by thumb2it pass");
+ case ARM::t2MOVi32imm: llvm_unreachable("Should be lowered by thumb2it pass");
case ARM::DBG_VALUE: {
if (isVerbose() && OutStreamer.hasRawTextSupport()) {
SmallString<128> TmpStr;
@@ -1237,7 +1267,6 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
// Darwin call instructions are just normal call instructions with different
// clobber semantics (they clobber R9).
- case ARM::BXr9_CALL:
case ARM::BX_CALL: {
{
MCInst TmpInst;
@@ -1259,7 +1288,6 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
return;
}
- case ARM::tBXr9_CALL:
case ARM::tBX_CALL: {
{
MCInst TmpInst;
@@ -1282,7 +1310,6 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
return;
}
- case ARM::BMOVPCRXr9_CALL:
case ARM::BMOVPCRX_CALL: {
{
MCInst TmpInst;
@@ -1310,6 +1337,58 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
return;
}
+ case ARM::BMOVPCB_CALL: {
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ // Add 's' bit operand (always reg0 for this)
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::Bcc);
+ const GlobalValue *GV = MI->getOperand(0).getGlobal();
+ MCSymbol *GVSym = Mang->getSymbol(GV);
+ const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(GVSymExpr));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ return;
+ }
+ case ARM::t2BMOVPCB_CALL: {
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tMOVr);
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
+ TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ {
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::t2B);
+ const GlobalValue *GV = MI->getOperand(0).getGlobal();
+ MCSymbol *GVSym = Mang->getSymbol(GV);
+ const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext);
+ TmpInst.addOperand(MCOperand::CreateExpr(GVSymExpr));
+ // Add predicate operands.
+ TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ TmpInst.addOperand(MCOperand::CreateReg(0));
+ OutStreamer.EmitInstruction(TmpInst);
+ }
+ return;
+ }
case ARM::MOVi16_ga_pcrel:
case ARM::t2MOVi16_ga_pcrel: {
MCInst TmpInst;
@@ -1482,11 +1561,10 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
/// in the function. The first operand is the ID# for this instruction, the
/// second is the index into the MachineConstantPool that this is, the third
/// is the size in bytes of this constant pool entry.
+ /// The required alignment is specified on the basic block holding this MI.
unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
- EmitAlignment(2);
-
// Mark the constant pool entry as data if we're not already in a data
// region.
OutStreamer.EmitDataRegion();
@@ -1898,10 +1976,10 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
{
MCInst TmpInst;
- TmpInst.setOpcode(ARM::tLDRr);
+ TmpInst.setOpcode(ARM::tLDRi);
TmpInst.addOperand(MCOperand::CreateReg(ARM::R7));
TmpInst.addOperand(MCOperand::CreateReg(SrcReg));
- TmpInst.addOperand(MCOperand::CreateReg(0));
+ TmpInst.addOperand(MCOperand::CreateImm(0));
// Predicate.
TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
TmpInst.addOperand(MCOperand::CreateReg(0));
@@ -1935,4 +2013,3 @@ extern "C" void LLVMInitializeARMAsmPrinter() {
RegisterAsmPrinter<ARMAsmPrinter> X(TheARMTarget);
RegisterAsmPrinter<ARMAsmPrinter> Y(TheThumbTarget);
}
-
diff --git a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
index 7741fc4..af3f75a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- ARMAsmPrinter.h - Print machine code to an ARM .s file ------------===//
+//===-- ARMAsmPrinter.h - Print machine code to an ARM .s file --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -73,6 +73,7 @@ public:
virtual void EmitFunctionEntryLabel();
void EmitStartOfAsmFile(Module &M);
void EmitEndOfAsmFile(Module &M);
+ void EmitXXStructor(const Constant *CV);
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
@@ -106,7 +107,7 @@ public:
if (!Subtarget->isTargetDarwin())
return 0;
return Subtarget->isThumb() ?
- llvm::ARM::DW_ISA_ARM_thumb : llvm::ARM::DW_ISA_ARM_arm;
+ ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm;
}
MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol);
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 408edfc..c6280f8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
+//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,10 @@
#include "ARMBaseInstrInfo.h"
#include "ARM.h"
+#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
#include "ARMHazardRecognizer.h"
#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/BranchProbability.h"
@@ -47,7 +46,7 @@ EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
static cl::opt<bool>
-WidenVMOVS("widen-vmovs", cl::Hidden,
+WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
cl::desc("Widen ARM vmovs to vmovd when possible"));
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
@@ -147,7 +146,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
- bool isLoad = !MCID.mayStore();
+ bool isLoad = !MI->mayStore();
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
const MachineOperand &Base = MI->getOperand(2);
const MachineOperand &Offset = MI->getOperand(NumOps-3);
@@ -157,9 +156,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
switch (AddrMode) {
- default:
- assert(false && "Unknown indexed op!");
- return NULL;
+ default: llvm_unreachable("Unknown indexed op!");
case ARMII::AddrMode2: {
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
@@ -440,6 +437,22 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return false;
}
+bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
+ if (MI->isBundle()) {
+ MachineBasicBlock::const_instr_iterator I = MI;
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ while (++I != E && I->isInsideBundle()) {
+ int PIdx = I->findFirstPredOperandIdx();
+ if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
+ return true;
+ }
+ return false;
+ }
+
+ int PIdx = MI->findFirstPredOperandIdx();
+ return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
+}
+
bool ARMBaseInstrInfo::
PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
@@ -490,15 +503,11 @@ SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
- // FIXME: This confuses implicit_def with optional CPSR def.
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef())
- return false;
-
bool Found = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == ARM::CPSR) {
+ if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
+ (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
Pred.push_back(MO);
Found = true;
}
@@ -511,11 +520,10 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
/// By default, this returns true for every instruction with a
/// PredicateOperand.
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isPredicable())
+ if (!MI->isPredicable())
return false;
- if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
+ if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
ARMFunctionInfo *AFI =
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
return AFI->isThumb2Function();
@@ -544,83 +552,95 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
if (MCID.getSize())
return MCID.getSize();
- // If this machine instr is an inline asm, measure it.
- if (MI->getOpcode() == ARM::INLINEASM)
- return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- if (MI->isLabel())
- return 0;
+ // If this machine instr is an inline asm, measure it.
+ if (MI->getOpcode() == ARM::INLINEASM)
+ return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
+ if (MI->isLabel())
+ return 0;
unsigned Opc = MI->getOpcode();
- switch (Opc) {
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::PROLOG_LABEL:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::DBG_VALUE:
- return 0;
- case ARM::MOVi16_ga_pcrel:
- case ARM::MOVTi16_ga_pcrel:
- case ARM::t2MOVi16_ga_pcrel:
- case ARM::t2MOVTi16_ga_pcrel:
- return 4;
- case ARM::MOVi32imm:
- case ARM::t2MOVi32imm:
- return 8;
- case ARM::CONSTPOOL_ENTRY:
- // If this machine instr is a constant pool entry, its size is recorded as
- // operand #2.
- return MI->getOperand(2).getImm();
- case ARM::Int_eh_sjlj_longjmp:
- return 16;
- case ARM::tInt_eh_sjlj_longjmp:
- return 10;
- case ARM::Int_eh_sjlj_setjmp:
- case ARM::Int_eh_sjlj_setjmp_nofp:
- return 20;
- case ARM::tInt_eh_sjlj_setjmp:
- case ARM::t2Int_eh_sjlj_setjmp:
- case ARM::t2Int_eh_sjlj_setjmp_nofp:
- return 12;
- case ARM::BR_JTr:
- case ARM::BR_JTm:
- case ARM::BR_JTadd:
- case ARM::tBR_JTr:
- case ARM::t2BR_JT:
- case ARM::t2TBB_JT:
- case ARM::t2TBH_JT: {
- // These are jumptable branches, i.e. a branch followed by an inlined
- // jumptable. The size is 4 + 4 * number of entries. For TBB, each
- // entry is one byte; TBH two byte each.
- unsigned EntrySize = (Opc == ARM::t2TBB_JT)
- ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
- unsigned NumOps = MCID.getNumOperands();
- MachineOperand JTOP =
- MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2));
- unsigned JTI = JTOP.getIndex();
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- assert(MJTI != 0);
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- assert(JTI < JT.size());
- // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
- // 4 aligned. The assembler / linker may add 2 byte padding just before
- // the JT entries. The size does not include this padding; the
- // constant islands pass does separate bookkeeping for it.
- // FIXME: If we know the size of the function is less than (1 << 16) *2
- // bytes, we can use 16-bit entries instead. Then there won't be an
- // alignment issue.
- unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
- unsigned NumEntries = getNumJTEntries(JT, JTI);
- if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
- // Make sure the instruction that follows TBB is 2-byte aligned.
- // FIXME: Constant island pass should insert an "ALIGN" instruction
- // instead.
- ++NumEntries;
- return NumEntries * EntrySize + InstSize;
- }
- default:
- // Otherwise, pseudo-instruction sizes are zero.
- return 0;
- }
- return 0; // Not reached
+ switch (Opc) {
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::PROLOG_LABEL:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::DBG_VALUE:
+ return 0;
+ case TargetOpcode::BUNDLE:
+ return getInstBundleLength(MI);
+ case ARM::MOVi16_ga_pcrel:
+ case ARM::MOVTi16_ga_pcrel:
+ case ARM::t2MOVi16_ga_pcrel:
+ case ARM::t2MOVTi16_ga_pcrel:
+ return 4;
+ case ARM::MOVi32imm:
+ case ARM::t2MOVi32imm:
+ return 8;
+ case ARM::CONSTPOOL_ENTRY:
+ // If this machine instr is a constant pool entry, its size is recorded as
+ // operand #2.
+ return MI->getOperand(2).getImm();
+ case ARM::Int_eh_sjlj_longjmp:
+ return 16;
+ case ARM::tInt_eh_sjlj_longjmp:
+ return 10;
+ case ARM::Int_eh_sjlj_setjmp:
+ case ARM::Int_eh_sjlj_setjmp_nofp:
+ return 20;
+ case ARM::tInt_eh_sjlj_setjmp:
+ case ARM::t2Int_eh_sjlj_setjmp:
+ case ARM::t2Int_eh_sjlj_setjmp_nofp:
+ return 12;
+ case ARM::BR_JTr:
+ case ARM::BR_JTm:
+ case ARM::BR_JTadd:
+ case ARM::tBR_JTr:
+ case ARM::t2BR_JT:
+ case ARM::t2TBB_JT:
+ case ARM::t2TBH_JT: {
+ // These are jumptable branches, i.e. a branch followed by an inlined
+ // jumptable. The size is 4 + 4 * number of entries. For TBB, each
+ // entry is one byte; TBH two byte each.
+ unsigned EntrySize = (Opc == ARM::t2TBB_JT)
+ ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
+ unsigned NumOps = MCID.getNumOperands();
+ MachineOperand JTOP =
+ MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
+ unsigned JTI = JTOP.getIndex();
+ const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
+ assert(MJTI != 0);
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ assert(JTI < JT.size());
+ // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
+ // 4 aligned. The assembler / linker may add 2 byte padding just before
+ // the JT entries. The size does not include this padding; the
+ // constant islands pass does separate bookkeeping for it.
+ // FIXME: If we know the size of the function is less than (1 << 16) *2
+ // bytes, we can use 16-bit entries instead. Then there won't be an
+ // alignment issue.
+ unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
+ unsigned NumEntries = getNumJTEntries(JT, JTI);
+ if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
+ // Make sure the instruction that follows TBB is 2-byte aligned.
+ // FIXME: Constant island pass should insert an "ALIGN" instruction
+ // instead.
+ ++NumEntries;
+ return NumEntries * EntrySize + InstSize;
+ }
+ default:
+ // Otherwise, pseudo-instruction sizes are zero.
+ return 0;
+ }
+}
+
+unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
+ unsigned Size = 0;
+ MachineBasicBlock::const_instr_iterator I = MI;
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ while (++I != E && I->isInsideBundle()) {
+ assert(!I->isBundle() && "No nested bundle!");
+ Size += GetInstSizeInBytes(&*I);
+ }
+ return Size;
}
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
@@ -660,29 +680,51 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
- // Generate instructions for VMOVQQ and VMOVQQQQ pseudos in place.
- if (ARM::QQPRRegClass.contains(DestReg, SrcReg) ||
- ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
+ // Handle register classes that require multiple instructions.
+ unsigned BeginIdx = 0;
+ unsigned SubRegs = 0;
+ unsigned Spacing = 1;
+
+ // Use VORRq when possible.
+ if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 2;
+ else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 4;
+ // Fall back to VMOVD.
+ else if (ARM::DPairRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2;
+ else if (ARM::DTripleRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3;
+ else if (ARM::DQuadRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4;
+
+ else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2;
+ else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3, Spacing = 2;
+ else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2;
+
+ if (Opc) {
const TargetRegisterInfo *TRI = &getRegisterInfo();
- assert(ARM::qsub_0 + 3 == ARM::qsub_3 && "Expected contiguous enum.");
- unsigned EndSubReg = ARM::QQPRRegClass.contains(DestReg, SrcReg) ?
- ARM::qsub_1 : ARM::qsub_3;
- for (unsigned i = ARM::qsub_0, e = EndSubReg + 1; i != e; ++i) {
- unsigned Dst = TRI->getSubReg(DestReg, i);
- unsigned Src = TRI->getSubReg(SrcReg, i);
- MachineInstrBuilder Mov =
- AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VORRq))
- .addReg(Dst, RegState::Define)
- .addReg(Src, getKillRegState(KillSrc))
- .addReg(Src, getKillRegState(KillSrc)));
- if (i == EndSubReg) {
- Mov->addRegisterDefined(DestReg, TRI);
- if (KillSrc)
- Mov->addRegisterKilled(SrcReg, TRI);
- }
+ MachineInstrBuilder Mov;
+ for (unsigned i = 0; i != SubRegs; ++i) {
+ unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing);
+ unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing);
+ assert(Dst && Src && "Bad sub-register");
+ Mov = AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
+ .addReg(Src));
+ // VORR takes two source operands.
+ if (Opc == ARM::VORRq)
+ Mov.addReg(Src);
}
+ // Add implicit super-register defs and kills to the last instruction.
+ Mov->addRegisterDefined(DestReg, TRI);
+ if (KillSrc)
+ Mov->addRegisterKilled(SrcReg, TRI);
return;
}
+
llvm_unreachable("Impossible reg-to-reg copy");
}
@@ -710,8 +752,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Align = MFI.getObjectAlignment(FI);
MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo(
- PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
Align);
@@ -738,9 +779,10 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
llvm_unreachable("Unknown reg class!");
break;
case 16:
- if (ARM::QPRRegClass.hasSubClassEq(RC)) {
- if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo))
+ if (ARM::DPairRegClass.hasSubClassEq(RC)) {
+ // Use aligned spills if the stack can be realigned.
+ if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
.addFrameIndex(FI).addImm(16)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO));
@@ -825,7 +867,7 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return MI->getOperand(0).getReg();
}
break;
- case ARM::VST1q64Pseudo:
+ case ARM::VST1q64:
if (MI->getOperand(0).isFI() &&
MI->getOperand(2).getSubReg() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
@@ -847,7 +889,7 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
const MachineMemOperand *Dummy;
- return MI->getDesc().mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
+ return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
}
void ARMBaseInstrInfo::
@@ -862,7 +904,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Align = MFI.getObjectAlignment(FI);
MachineMemOperand *MMO =
MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
Align);
@@ -887,9 +929,9 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
llvm_unreachable("Unknown reg class!");
break;
case 16:
- if (ARM::QPRRegClass.hasSubClassEq(RC)) {
- if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg)
+ if (ARM::DPairRegClass.hasSubClassEq(RC)) {
+ if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
.addFrameIndex(FI).addImm(16)
.addMemOperand(MMO));
} else {
@@ -911,11 +953,12 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
.addFrameIndex(FI))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
- MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
+ if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ MIB.addReg(DestReg, RegState::ImplicitDefine);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -926,15 +969,16 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
.addFrameIndex(FI))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
- MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
+ if (TargetRegisterInfo::isPhysicalRegister(DestReg))
+ MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
llvm_unreachable("Unknown reg class!");
break;
@@ -971,7 +1015,7 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return MI->getOperand(0).getReg();
}
break;
- case ARM::VLD1q64Pseudo:
+ case ARM::VLD1q64:
if (MI->getOperand(1).isFI() &&
MI->getOperand(0).getSubReg() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
@@ -993,7 +1037,7 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
const MachineMemOperand *Dummy;
- return MI->getDesc().mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
+ return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
}
bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
@@ -1359,7 +1403,7 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
return false;
// Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isLabel())
+ if (MI->isTerminator() || MI->isLabel())
return true;
// Treat the start of the IT block as a scheduling boundary, but schedule
@@ -1380,7 +1424,10 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
// saves compile time, because it doesn't require every single
// stack slot reference to depend on the instruction that does the
// modification.
- if (MI->definesRegister(ARM::SP))
+ // Calls don't actually change the stack pointer, even if they have imp-defs.
+ // No ARM calling conventions change the stack pointer. (X86 calling
+ // conventions sometimes do).
+ if (!MI->isCall() && MI->definesRegister(ARM::SP))
return true;
return false;
@@ -1445,15 +1492,37 @@ llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
int llvm::getMatchingCondBranchOpcode(int Opc) {
if (Opc == ARM::B)
return ARM::Bcc;
- else if (Opc == ARM::tB)
+ if (Opc == ARM::tB)
return ARM::tBcc;
- else if (Opc == ARM::t2B)
- return ARM::t2Bcc;
+ if (Opc == ARM::t2B)
+ return ARM::t2Bcc;
llvm_unreachable("Unknown unconditional branch opcode!");
- return 0;
}
+/// commuteInstruction - Handle commutable instructions.
+MachineInstr *
+ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
+ switch (MI->getOpcode()) {
+ case ARM::MOVCCr:
+ case ARM::t2MOVCCr: {
+ // MOVCC can be commuted by inverting the condition.
+ unsigned PredReg = 0;
+ ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
+ // MOVCC AL can't be inverted. Shouldn't happen.
+ if (CC == ARMCC::AL || PredReg != ARM::CPSR)
+ return NULL;
+ MI = TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
+ if (!MI)
+ return NULL;
+ // After swapping the MOVCC operands, also invert the condition.
+ MI->getOperand(MI->findFirstPredOperandIdx())
+ .setImm(ARMCC::getOppositeCondition(CC));
+ return MI;
+ }
+ }
+ return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
+}
/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
/// instruction is encoded with an 'S' bit is determined by the optional CPSR
@@ -1478,7 +1547,6 @@ static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
{ARM::SUBSrsr, ARM::SUBrsr},
{ARM::RSBSri, ARM::RSBri},
- {ARM::RSBSrr, ARM::RSBrr},
{ARM::RSBSrsi, ARM::RSBrsi},
{ARM::RSBSrsr, ARM::RSBrsr},
@@ -1625,7 +1693,6 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
}
default:
llvm_unreachable("Unsupported addressing mode!");
- break;
}
Offset += InstrOffs * Scale;
@@ -1765,8 +1832,7 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
// Check that CPSR isn't set between the comparison instruction and the one we
// want to change.
- MachineBasicBlock::const_iterator I = CmpInstr, E = MI,
- B = MI->getParent()->begin();
+ MachineBasicBlock::iterator I = CmpInstr,E = MI, B = MI->getParent()->begin();
// Early exit if CmpInstr is at the beginning of the BB.
if (I == B) return false;
@@ -1777,6 +1843,8 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
const MachineOperand &MO = Instr.getOperand(IO);
+ if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR))
+ return false;
if (!MO.isReg()) continue;
// This instruction modifies or uses CPSR after the one we want to
@@ -1838,6 +1906,10 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
for (unsigned IO = 0, EO = Instr.getNumOperands();
!isSafe && IO != EO; ++IO) {
const MachineOperand &MO = Instr.getOperand(IO);
+ if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
+ isSafe = true;
+ break;
+ }
if (!MO.isReg() || MO.getReg() != ARM::CPSR)
continue;
if (MO.isDef()) {
@@ -1889,6 +1961,25 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
if (!MRI->hasOneNonDBGUse(Reg))
return false;
+ const MCInstrDesc &DefMCID = DefMI->getDesc();
+ if (DefMCID.hasOptionalDef()) {
+ unsigned NumOps = DefMCID.getNumOperands();
+ const MachineOperand &MO = DefMI->getOperand(NumOps-1);
+ if (MO.getReg() == ARM::CPSR && !MO.isDead())
+ // If DefMI defines CPSR and it is not dead, it's obviously not safe
+ // to delete DefMI.
+ return false;
+ }
+
+ const MCInstrDesc &UseMCID = UseMI->getDesc();
+ if (UseMCID.hasOptionalDef()) {
+ unsigned NumOps = UseMCID.getNumOperands();
+ if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR)
+ // If the instruction sets the flag, do not attempt this optimization
+ // since it may change the semantics of the code.
+ return false;
+ }
+
unsigned UseOpc = UseMI->getOpcode();
unsigned NewUseOpc = 0;
uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
@@ -1960,7 +2051,7 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
bool isKill = UseMI->getOperand(OpIdx).isKill();
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
- *UseMI, UseMI->getDebugLoc(),
+ UseMI, UseMI->getDebugLoc(),
get(NewUseOpc), NewReg)
.addReg(Reg1, getKillRegState(isKill))
.addImm(SOImmValV1)));
@@ -1988,7 +2079,6 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
switch (Opc) {
default:
llvm_unreachable("Unexpected multi-uops instruction!");
- break;
case ARM::VLDMQIA:
case ARM::VSTMQIA:
return 2;
@@ -2335,6 +2425,59 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return UseCycle;
}
+static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
+ const MachineInstr *MI, unsigned Reg,
+ unsigned &DefIdx, unsigned &Dist) {
+ Dist = 0;
+
+ MachineBasicBlock::const_iterator I = MI; ++I;
+ MachineBasicBlock::const_instr_iterator II =
+ llvm::prior(I.getInstrIterator());
+ assert(II->isInsideBundle() && "Empty bundle?");
+
+ int Idx = -1;
+ while (II->isInsideBundle()) {
+ Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
+ if (Idx != -1)
+ break;
+ --II;
+ ++Dist;
+ }
+
+ assert(Idx != -1 && "Cannot find bundled definition!");
+ DefIdx = Idx;
+ return II;
+}
+
+static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
+ const MachineInstr *MI, unsigned Reg,
+ unsigned &UseIdx, unsigned &Dist) {
+ Dist = 0;
+
+ MachineBasicBlock::const_instr_iterator II = MI; ++II;
+ assert(II->isInsideBundle() && "Empty bundle?");
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+
+ // FIXME: This doesn't properly handle multiple uses.
+ int Idx = -1;
+ while (II != E && II->isInsideBundle()) {
+ Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
+ if (Idx != -1)
+ break;
+ if (II->getOpcode() != ARM::t2IT)
+ ++Dist;
+ ++II;
+ }
+
+ if (Idx == -1) {
+ Dist = 0;
+ return 0;
+ }
+
+ UseIdx = Idx;
+ return II;
+}
+
int
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
@@ -2343,35 +2486,77 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
DefMI->isRegSequence() || DefMI->isImplicitDef())
return 1;
- const MCInstrDesc &DefMCID = DefMI->getDesc();
if (!ItinData || ItinData->isEmpty())
- return DefMCID.mayLoad() ? 3 : 1;
+ return DefMI->mayLoad() ? 3 : 1;
- const MCInstrDesc &UseMCID = UseMI->getDesc();
+ const MCInstrDesc *DefMCID = &DefMI->getDesc();
+ const MCInstrDesc *UseMCID = &UseMI->getDesc();
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
- if (DefMO.getReg() == ARM::CPSR) {
+ unsigned Reg = DefMO.getReg();
+ if (Reg == ARM::CPSR) {
if (DefMI->getOpcode() == ARM::FMSTAT) {
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
return Subtarget.isCortexA9() ? 1 : 20;
}
// CPSR set and branch can be paired in the same cycle.
- if (UseMCID.isBranch())
+ if (UseMI->isBranch())
return 0;
+
+ // Otherwise it takes the instruction latency (generally one).
+ int Latency = getInstrLatency(ItinData, DefMI);
+
+ // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
+ // its uses. Instructions which are otherwise scheduled between them may
+ // incur a code size penalty (not able to use the CPSR setting 16-bit
+ // instructions).
+ if (Latency > 0 && Subtarget.isThumb2()) {
+ const MachineFunction *MF = DefMI->getParent()->getParent();
+ if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ --Latency;
+ }
+ return Latency;
}
unsigned DefAlign = DefMI->hasOneMemOperand()
? (*DefMI->memoperands_begin())->getAlignment() : 0;
unsigned UseAlign = UseMI->hasOneMemOperand()
? (*UseMI->memoperands_begin())->getAlignment() : 0;
- int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
- UseMCID, UseIdx, UseAlign);
+
+ unsigned DefAdj = 0;
+ if (DefMI->isBundle()) {
+ DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
+ if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
+ DefMI->isRegSequence() || DefMI->isImplicitDef())
+ return 1;
+ DefMCID = &DefMI->getDesc();
+ }
+ unsigned UseAdj = 0;
+ if (UseMI->isBundle()) {
+ unsigned NewUseIdx;
+ const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
+ Reg, NewUseIdx, UseAdj);
+ if (NewUseMI) {
+ UseMI = NewUseMI;
+ UseIdx = NewUseIdx;
+ UseMCID = &UseMI->getDesc();
+ }
+ }
+
+ int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
+ *UseMCID, UseIdx, UseAlign);
+ int Adj = DefAdj + UseAdj;
+ if (Adj) {
+ Latency -= (int)(DefAdj + UseAdj);
+ if (Latency < 1)
+ return 1;
+ }
if (Latency > 1 &&
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
// variants are one cycle cheaper.
- switch (DefMCID.getOpcode()) {
+ switch (DefMCID->getOpcode()) {
default: break;
case ARM::LDRrs:
case ARM::LDRBrs: {
@@ -2396,28 +2581,38 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
}
if (DefAlign < 8 && Subtarget.isCortexA9())
- switch (DefMCID.getOpcode()) {
+ switch (DefMCID->getOpcode()) {
default: break;
case ARM::VLD1q8:
case ARM::VLD1q16:
case ARM::VLD1q32:
case ARM::VLD1q64:
- case ARM::VLD1q8_UPD:
- case ARM::VLD1q16_UPD:
- case ARM::VLD1q32_UPD:
- case ARM::VLD1q64_UPD:
+ case ARM::VLD1q8wb_fixed:
+ case ARM::VLD1q16wb_fixed:
+ case ARM::VLD1q32wb_fixed:
+ case ARM::VLD1q64wb_fixed:
+ case ARM::VLD1q8wb_register:
+ case ARM::VLD1q16wb_register:
+ case ARM::VLD1q32wb_register:
+ case ARM::VLD1q64wb_register:
case ARM::VLD2d8:
case ARM::VLD2d16:
case ARM::VLD2d32:
case ARM::VLD2q8:
case ARM::VLD2q16:
case ARM::VLD2q32:
- case ARM::VLD2d8_UPD:
- case ARM::VLD2d16_UPD:
- case ARM::VLD2d32_UPD:
- case ARM::VLD2q8_UPD:
- case ARM::VLD2q16_UPD:
- case ARM::VLD2q32_UPD:
+ case ARM::VLD2d8wb_fixed:
+ case ARM::VLD2d16wb_fixed:
+ case ARM::VLD2d32wb_fixed:
+ case ARM::VLD2q8wb_fixed:
+ case ARM::VLD2q16wb_fixed:
+ case ARM::VLD2q32wb_fixed:
+ case ARM::VLD2d8wb_register:
+ case ARM::VLD2d16wb_register:
+ case ARM::VLD2d32wb_register:
+ case ARM::VLD2q8wb_register:
+ case ARM::VLD2q16wb_register:
+ case ARM::VLD2q32wb_register:
case ARM::VLD3d8:
case ARM::VLD3d16:
case ARM::VLD3d32:
@@ -2425,7 +2620,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD3d8_UPD:
case ARM::VLD3d16_UPD:
case ARM::VLD3d32_UPD:
- case ARM::VLD1d64T_UPD:
+ case ARM::VLD1d64Twb_fixed:
+ case ARM::VLD1d64Twb_register:
case ARM::VLD3q8_UPD:
case ARM::VLD3q16_UPD:
case ARM::VLD3q32_UPD:
@@ -2436,22 +2632,29 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD4d8_UPD:
case ARM::VLD4d16_UPD:
case ARM::VLD4d32_UPD:
- case ARM::VLD1d64Q_UPD:
+ case ARM::VLD1d64Qwb_fixed:
+ case ARM::VLD1d64Qwb_register:
case ARM::VLD4q8_UPD:
case ARM::VLD4q16_UPD:
case ARM::VLD4q32_UPD:
case ARM::VLD1DUPq8:
case ARM::VLD1DUPq16:
case ARM::VLD1DUPq32:
- case ARM::VLD1DUPq8_UPD:
- case ARM::VLD1DUPq16_UPD:
- case ARM::VLD1DUPq32_UPD:
+ case ARM::VLD1DUPq8wb_fixed:
+ case ARM::VLD1DUPq16wb_fixed:
+ case ARM::VLD1DUPq32wb_fixed:
+ case ARM::VLD1DUPq8wb_register:
+ case ARM::VLD1DUPq16wb_register:
+ case ARM::VLD1DUPq32wb_register:
case ARM::VLD2DUPd8:
case ARM::VLD2DUPd16:
case ARM::VLD2DUPd32:
- case ARM::VLD2DUPd8_UPD:
- case ARM::VLD2DUPd16_UPD:
- case ARM::VLD2DUPd32_UPD:
+ case ARM::VLD2DUPd8wb_fixed:
+ case ARM::VLD2DUPd16wb_fixed:
+ case ARM::VLD2DUPd32wb_fixed:
+ case ARM::VLD2DUPd8wb_register:
+ case ARM::VLD2DUPd16wb_register:
+ case ARM::VLD2DUPd32wb_register:
case ARM::VLD4DUPd8:
case ARM::VLD4DUPd16:
case ARM::VLD4DUPd32:
@@ -2559,26 +2762,36 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
if (DefAlign < 8 && Subtarget.isCortexA9())
switch (DefMCID.getOpcode()) {
default: break;
- case ARM::VLD1q8Pseudo:
- case ARM::VLD1q16Pseudo:
- case ARM::VLD1q32Pseudo:
- case ARM::VLD1q64Pseudo:
- case ARM::VLD1q8Pseudo_UPD:
- case ARM::VLD1q16Pseudo_UPD:
- case ARM::VLD1q32Pseudo_UPD:
- case ARM::VLD1q64Pseudo_UPD:
- case ARM::VLD2d8Pseudo:
- case ARM::VLD2d16Pseudo:
- case ARM::VLD2d32Pseudo:
+ case ARM::VLD1q8:
+ case ARM::VLD1q16:
+ case ARM::VLD1q32:
+ case ARM::VLD1q64:
+ case ARM::VLD1q8wb_register:
+ case ARM::VLD1q16wb_register:
+ case ARM::VLD1q32wb_register:
+ case ARM::VLD1q64wb_register:
+ case ARM::VLD1q8wb_fixed:
+ case ARM::VLD1q16wb_fixed:
+ case ARM::VLD1q32wb_fixed:
+ case ARM::VLD1q64wb_fixed:
+ case ARM::VLD2d8:
+ case ARM::VLD2d16:
+ case ARM::VLD2d32:
case ARM::VLD2q8Pseudo:
case ARM::VLD2q16Pseudo:
case ARM::VLD2q32Pseudo:
- case ARM::VLD2d8Pseudo_UPD:
- case ARM::VLD2d16Pseudo_UPD:
- case ARM::VLD2d32Pseudo_UPD:
- case ARM::VLD2q8Pseudo_UPD:
- case ARM::VLD2q16Pseudo_UPD:
- case ARM::VLD2q32Pseudo_UPD:
+ case ARM::VLD2d8wb_fixed:
+ case ARM::VLD2d16wb_fixed:
+ case ARM::VLD2d32wb_fixed:
+ case ARM::VLD2q8PseudoWB_fixed:
+ case ARM::VLD2q16PseudoWB_fixed:
+ case ARM::VLD2q32PseudoWB_fixed:
+ case ARM::VLD2d8wb_register:
+ case ARM::VLD2d16wb_register:
+ case ARM::VLD2d32wb_register:
+ case ARM::VLD2q8PseudoWB_register:
+ case ARM::VLD2q16PseudoWB_register:
+ case ARM::VLD2q32PseudoWB_register:
case ARM::VLD3d8Pseudo:
case ARM::VLD3d16Pseudo:
case ARM::VLD3d32Pseudo:
@@ -2586,7 +2799,6 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD3d8Pseudo_UPD:
case ARM::VLD3d16Pseudo_UPD:
case ARM::VLD3d32Pseudo_UPD:
- case ARM::VLD1d64TPseudo_UPD:
case ARM::VLD3q8Pseudo_UPD:
case ARM::VLD3q16Pseudo_UPD:
case ARM::VLD3q32Pseudo_UPD:
@@ -2603,7 +2815,6 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD4d8Pseudo_UPD:
case ARM::VLD4d16Pseudo_UPD:
case ARM::VLD4d32Pseudo_UPD:
- case ARM::VLD1d64QPseudo_UPD:
case ARM::VLD4q8Pseudo_UPD:
case ARM::VLD4q16Pseudo_UPD:
case ARM::VLD4q32Pseudo_UPD:
@@ -2613,18 +2824,24 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
case ARM::VLD4q8oddPseudo_UPD:
case ARM::VLD4q16oddPseudo_UPD:
case ARM::VLD4q32oddPseudo_UPD:
- case ARM::VLD1DUPq8Pseudo:
- case ARM::VLD1DUPq16Pseudo:
- case ARM::VLD1DUPq32Pseudo:
- case ARM::VLD1DUPq8Pseudo_UPD:
- case ARM::VLD1DUPq16Pseudo_UPD:
- case ARM::VLD1DUPq32Pseudo_UPD:
- case ARM::VLD2DUPd8Pseudo:
- case ARM::VLD2DUPd16Pseudo:
- case ARM::VLD2DUPd32Pseudo:
- case ARM::VLD2DUPd8Pseudo_UPD:
- case ARM::VLD2DUPd16Pseudo_UPD:
- case ARM::VLD2DUPd32Pseudo_UPD:
+ case ARM::VLD1DUPq8:
+ case ARM::VLD1DUPq16:
+ case ARM::VLD1DUPq32:
+ case ARM::VLD1DUPq8wb_fixed:
+ case ARM::VLD1DUPq16wb_fixed:
+ case ARM::VLD1DUPq32wb_fixed:
+ case ARM::VLD1DUPq8wb_register:
+ case ARM::VLD1DUPq16wb_register:
+ case ARM::VLD1DUPq32wb_register:
+ case ARM::VLD2DUPd8:
+ case ARM::VLD2DUPd16:
+ case ARM::VLD2DUPd32:
+ case ARM::VLD2DUPd8wb_fixed:
+ case ARM::VLD2DUPd16wb_fixed:
+ case ARM::VLD2DUPd32wb_fixed:
+ case ARM::VLD2DUPd8wb_register:
+ case ARM::VLD2DUPd16wb_register:
+ case ARM::VLD2DUPd32wb_register:
case ARM::VLD4DUPd8Pseudo:
case ARM::VLD4DUPd16Pseudo:
case ARM::VLD4DUPd32Pseudo:
@@ -2666,6 +2883,19 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return Latency;
}
+unsigned
+ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *DepMI) const {
+ unsigned Reg = DefMI->getOperand(DefIdx).getReg();
+ if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI))
+ return 1;
+
+ // If the second MI is predicated, then there is an implicit use dependency.
+ return getOperandLatency(ItinData, DefMI, DefIdx, DepMI,
+ DepMI->getNumOperands());
+}
+
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -2676,10 +2906,21 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
if (!ItinData || ItinData->isEmpty())
return 1;
+ if (MI->isBundle()) {
+ int Latency = 0;
+ MachineBasicBlock::const_instr_iterator I = MI;
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ while (++I != E && I->isInsideBundle()) {
+ if (I->getOpcode() != ARM::t2IT)
+ Latency += getInstrLatency(ItinData, I, PredCost);
+ }
+ return Latency;
+ }
+
const MCInstrDesc &MCID = MI->getDesc();
unsigned Class = MCID.getSchedClass();
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
- if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR))
+ if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)))
// When predicated, CPSR is an additional source operand for CPSR updating
// instructions, this apparently increases their latencies.
*PredCost = 1;
@@ -2828,3 +3069,7 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
// This will go before any implicit ops.
AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));
}
+
+bool ARMBaseInstrInfo::hasNOP() const {
+ return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0;
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 0f9f321..2fe8507 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -1,4 +1,4 @@
-//===- ARMBaseInstrInfo.h - ARM Base Instruction Information ----*- C++ -*-===//
+//===-- ARMBaseInstrInfo.h - ARM Base Instruction Information ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,6 +35,9 @@ protected:
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
public:
+ // Return whether the target has an explicit NOP encoding.
+ bool hasNOP() const;
+
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
@@ -69,10 +72,7 @@ public:
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
// Predication support.
- bool isPredicated(const MachineInstr *MI) const {
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
- }
+ bool isPredicated(const MachineInstr *MI) const;
ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
@@ -139,6 +139,8 @@ public:
MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const;
+ MachineInstr *commuteInstruction(MachineInstr*, bool=false) const;
+
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
const MachineRegisterInfo *MRI) const;
@@ -213,12 +215,18 @@ public:
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const;
+ virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *DepMI) const;
+
/// VFP/NEON execution domains.
std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
private:
+ unsigned getInstBundleLength(const MachineInstr *MI) const;
+
int getVLDMDefCycle(const InstrItineraryData *ItinData,
const MCInstrDesc &DefMCID,
unsigned DefClass,
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 154f1f8..3907f75 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
+//===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,11 +11,10 @@
//
//===----------------------------------------------------------------------===//
+#include "ARMBaseRegisterInfo.h"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
-#include "ARMBaseRegisterInfo.h"
#include "ARMFrameLowering.h"
-#include "ARMInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
@@ -61,41 +60,14 @@ ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
BasePtr(ARM::R6) {
}
-const unsigned*
+const uint16_t*
ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- bool ghcCall = false;
-
- if (MF) {
- const Function *F = MF->getFunction();
- ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
- }
-
- static const unsigned CalleeSavedRegs[] = {
- ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
- ARM::R7, ARM::R6, ARM::R5, ARM::R4,
-
- ARM::D15, ARM::D14, ARM::D13, ARM::D12,
- ARM::D11, ARM::D10, ARM::D9, ARM::D8,
- 0
- };
-
- static const unsigned DarwinCalleeSavedRegs[] = {
- // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
- // register.
- ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
- ARM::R11, ARM::R10, ARM::R8,
-
- ARM::D15, ARM::D14, ARM::D13, ARM::D12,
- ARM::D11, ARM::D10, ARM::D9, ARM::D8,
- 0
- };
-
- static const unsigned GhcCalleeSavedRegs[] = {
- 0
- };
+ return (STI.isTargetIOS()) ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
+}
- return ghcCall ? GhcCalleeSavedRegs :
- STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
+const uint32_t*
+ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
+ return (STI.isTargetIOS()) ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
}
BitVector ARMBaseRegisterInfo::
@@ -148,104 +120,6 @@ bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
return false;
}
-const TargetRegisterClass *
-ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B,
- unsigned SubIdx) const {
- switch (SubIdx) {
- default: return 0;
- case ARM::ssub_0:
- case ARM::ssub_1:
- case ARM::ssub_2:
- case ARM::ssub_3: {
- // S sub-registers.
- if (A->getSize() == 8) {
- if (B == &ARM::SPR_8RegClass)
- return &ARM::DPR_8RegClass;
- assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
- if (A == &ARM::DPR_8RegClass)
- return A;
- return &ARM::DPR_VFP2RegClass;
- }
-
- if (A->getSize() == 16) {
- if (B == &ARM::SPR_8RegClass)
- return &ARM::QPR_8RegClass;
- return &ARM::QPR_VFP2RegClass;
- }
-
- if (A->getSize() == 32) {
- if (B == &ARM::SPR_8RegClass)
- return 0; // Do not allow coalescing!
- return &ARM::QQPR_VFP2RegClass;
- }
-
- assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
- return 0; // Do not allow coalescing!
- }
- case ARM::dsub_0:
- case ARM::dsub_1:
- case ARM::dsub_2:
- case ARM::dsub_3: {
- // D sub-registers.
- if (A->getSize() == 16) {
- if (B == &ARM::DPR_VFP2RegClass)
- return &ARM::QPR_VFP2RegClass;
- if (B == &ARM::DPR_8RegClass)
- return 0; // Do not allow coalescing!
- return A;
- }
-
- if (A->getSize() == 32) {
- if (B == &ARM::DPR_VFP2RegClass)
- return &ARM::QQPR_VFP2RegClass;
- if (B == &ARM::DPR_8RegClass)
- return 0; // Do not allow coalescing!
- return A;
- }
-
- assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
- if (B != &ARM::DPRRegClass)
- return 0; // Do not allow coalescing!
- return A;
- }
- case ARM::dsub_4:
- case ARM::dsub_5:
- case ARM::dsub_6:
- case ARM::dsub_7: {
- // D sub-registers of QQQQ registers.
- if (A->getSize() == 64 && B == &ARM::DPRRegClass)
- return A;
- return 0; // Do not allow coalescing!
- }
-
- case ARM::qsub_0:
- case ARM::qsub_1: {
- // Q sub-registers.
- if (A->getSize() == 32) {
- if (B == &ARM::QPR_VFP2RegClass)
- return &ARM::QQPR_VFP2RegClass;
- if (B == &ARM::QPR_8RegClass)
- return 0; // Do not allow coalescing!
- return A;
- }
-
- assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
- if (B == &ARM::QPRRegClass)
- return A;
- return 0; // Do not allow coalescing!
- }
- case ARM::qsub_2:
- case ARM::qsub_3: {
- // Q sub-registers of QQQQ registers.
- if (A->getSize() == 64 && B == &ARM::QPRRegClass)
- return A;
- return 0; // Do not allow coalescing!
- }
- }
- return 0;
-}
-
bool
ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
SmallVectorImpl<unsigned> &SubIndices,
@@ -416,7 +290,7 @@ ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
/// getRawAllocationOrder - Returns the register allocation order for a
/// specified register class with a target-dependent hint.
-ArrayRef<unsigned>
+ArrayRef<uint16_t>
ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
unsigned HintType, unsigned HintReg,
const MachineFunction &MF) const {
@@ -425,71 +299,71 @@ ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
// of register pairs.
// No FP, R9 is available.
- static const unsigned GPREven1[] = {
+ static const uint16_t GPREven1[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
ARM::R9, ARM::R11
};
- static const unsigned GPROdd1[] = {
+ static const uint16_t GPROdd1[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
ARM::R8, ARM::R10
};
// FP is R7, R9 is available.
- static const unsigned GPREven2[] = {
+ static const uint16_t GPREven2[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10,
ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
ARM::R9, ARM::R11
};
- static const unsigned GPROdd2[] = {
+ static const uint16_t GPROdd2[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11,
ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
ARM::R8, ARM::R10
};
// FP is R11, R9 is available.
- static const unsigned GPREven3[] = {
+ static const uint16_t GPREven3[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
ARM::R9
};
- static const unsigned GPROdd3[] = {
+ static const uint16_t GPROdd3[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
ARM::R8
};
// No FP, R9 is not available.
- static const unsigned GPREven4[] = {
+ static const uint16_t GPREven4[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10,
ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
ARM::R11
};
- static const unsigned GPROdd4[] = {
+ static const uint16_t GPROdd4[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11,
ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
ARM::R10
};
// FP is R7, R9 is not available.
- static const unsigned GPREven5[] = {
+ static const uint16_t GPREven5[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R10,
ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
ARM::R11
};
- static const unsigned GPROdd5[] = {
+ static const uint16_t GPROdd5[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R11,
ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
ARM::R10
};
// FP is R11, R9 is not available.
- static const unsigned GPREven6[] = {
+ static const uint16_t GPREven6[] = {
ARM::R0, ARM::R2, ARM::R4, ARM::R6,
ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
};
- static const unsigned GPROdd6[] = {
+ static const uint16_t GPROdd6[] = {
ARM::R1, ARM::R3, ARM::R5, ARM::R7,
ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
};
@@ -610,11 +484,15 @@ ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
if (!EnableBasePointer)
return false;
- if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
+ // When outgoing call frames are so large that we adjust the stack pointer
+ // around the call, we can no longer use the stack pointer to reach the
+ // emergency spill slot.
+ if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
return true;
// Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
@@ -638,14 +516,29 @@ bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
}
bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
// We can't realign the stack if:
// 1. Dynamic stack realignment is explicitly disabled,
// 2. This is a Thumb1 function (it's not useful, so we don't bother), or
// 3. There are VLAs in the function and the base pointer is disabled.
- return (RealignStack && !AFI->isThumb1OnlyFunction() &&
- (!MFI->hasVarSizedObjects() || EnableBasePointer));
+ if (!MF.getTarget().Options.RealignStack)
+ return false;
+ if (AFI->isThumb1OnlyFunction())
+ return false;
+ // Stack realignment requires a frame pointer. If we already started
+ // register allocation with frame pointer elimination, it is too late now.
+ if (!MRI->canReserveReg(FramePtr))
+ return false;
+ // We may also need a base pointer if there are dynamic allocas or stack
+ // pointer adjustments around calls.
+ if (MF.getTarget().getFrameLowering()->hasReservedCallFrame(MF))
+ return true;
+ if (!EnableBasePointer)
+ return false;
+ // A base pointer is required and allowed. Check that it isn't too late to
+ // reserve it.
+ return MRI->canReserveReg(BasePtr);
}
bool ARMBaseRegisterInfo::
@@ -653,7 +546,7 @@ needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
- bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
+ bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
F->hasFnAttr(Attribute::StackAlignment));
return requiresRealignment && canRealignStack(MF);
@@ -662,7 +555,7 @@ needsStackRealignment(const MachineFunction &MF) const {
bool ARMBaseRegisterInfo::
cannotEliminateFrame(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- if (DisableFramePointerElim(MF) && MFI->adjustsStack())
+ if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack())
return true;
return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
|| needsStackRealignment(MF);
@@ -679,12 +572,10 @@ ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
- return 0;
}
unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
- return 0;
}
unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
@@ -892,7 +783,7 @@ int64_t ARMBaseRegisterInfo::
getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
const MCInstrDesc &Desc = MI->getDesc();
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
- int64_t InstrOffs = 0;;
+ int64_t InstrOffs = 0;
int Scale = 1;
unsigned ImmIdx = 0;
switch (AddrMode) {
@@ -933,7 +824,6 @@ getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
}
default:
llvm_unreachable("Unsupported addressing mode!");
- break;
}
return InstrOffs * Scale;
@@ -1129,7 +1019,6 @@ bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
break;
default:
llvm_unreachable("Unsupported addressing mode!");
- break;
}
Offset += getFrameIndexInstrOffset(MI, i);
@@ -1171,6 +1060,21 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
+ // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
+ // call frame setup/destroy instructions have already been eliminated. That
+ // means the stack pointer cannot be used to access the emergency spill slot
+ // when !hasReservedCallFrame().
+#ifndef NDEBUG
+ if (RS && FrameReg == ARM::SP && FrameIndex == RS->getScavengingFrameIndex()){
+ assert(TFI->hasReservedCallFrame(MF) &&
+ "Cannot use SP to access the emergency spill slot in "
+ "functions without a reserved call frame");
+ assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
+ "Cannot use SP to access the emergency spill slot in "
+ "functions with variable sized frame objects");
+ }
+#endif // NDEBUG
+
// Special handling of dbg_value instructions.
if (MI.isDebugValue()) {
MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/);
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index fee17ff..af79351 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- ARMBaseRegisterInfo.h - ARM Register Information Impl ----*- C++ -*-===//
+//===-- ARMBaseRegisterInfo.h - ARM Register Information Impl ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,7 +35,7 @@ namespace ARMRI {
/// isARMArea1Register - Returns true if the register is a low register (r0-r7)
/// or a stack/pc register that we should push/pop.
-static inline bool isARMArea1Register(unsigned Reg, bool isDarwin) {
+static inline bool isARMArea1Register(unsigned Reg, bool isIOS) {
using namespace ARM;
switch (Reg) {
case R0: case R1: case R2: case R3:
@@ -43,25 +43,25 @@ static inline bool isARMArea1Register(unsigned Reg, bool isDarwin) {
case LR: case SP: case PC:
return true;
case R8: case R9: case R10: case R11:
- // For darwin we want r7 and lr to be next to each other.
- return !isDarwin;
+ // For iOS we want r7 and lr to be next to each other.
+ return !isIOS;
default:
return false;
}
}
-static inline bool isARMArea2Register(unsigned Reg, bool isDarwin) {
+static inline bool isARMArea2Register(unsigned Reg, bool isIOS) {
using namespace ARM;
switch (Reg) {
case R8: case R9: case R10: case R11:
- // Darwin has this second area.
- return isDarwin;
+ // iOS has this second area.
+ return isIOS;
default:
return false;
}
}
-static inline bool isARMArea3Register(unsigned Reg, bool isDarwin) {
+static inline bool isARMArea3Register(unsigned Reg, bool isIOS) {
using namespace ARM;
switch (Reg) {
case D15: case D14: case D13: case D12:
@@ -94,17 +94,11 @@ protected:
public:
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
- /// getMatchingSuperRegClass - Return a subclass of the specified register
- /// class A so that each register in it has a sub-register of the
- /// specified sub-register index which is in the specified register class B.
- virtual const TargetRegisterClass *
- getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const;
-
/// canCombineSubRegIndices - Given a register class and a list of
/// subregister indices, return true if it's possible to combine the
/// subregister indices into one that corresponds to a larger
@@ -125,7 +119,7 @@ public:
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
- ArrayRef<unsigned> getRawAllocationOrder(const TargetRegisterClass *RC,
+ ArrayRef<uint16_t> getRawAllocationOrder(const TargetRegisterClass *RC,
unsigned HintType, unsigned HintReg,
const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h b/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
index 69eddf0..11bd6a4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBuildAttrs.h
@@ -1,4 +1,4 @@
-//===-------- ARMBuildAttrs.h - ARM Build Attributes ------------*- C++ -*-===//
+//===-- ARMBuildAttrs.h - ARM Build Attributes ------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
index ff7db1f..0bd1c3e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h
@@ -1,4 +1,4 @@
-//===-- ARMCallingConv.h - ARM Custom Calling Convention Routines ---------===//
+//=== ARMCallingConv.h - ARM Custom Calling Convention Routines -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,13 +15,12 @@
#ifndef ARMCALLINGCONV_H
#define ARMCALLINGCONV_H
+#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
+#include "ARMSubtarget.h"
#include "llvm/CallingConv.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
-#include "ARM.h"
namespace llvm {
@@ -29,7 +28,7 @@ namespace llvm {
static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
CCState &State, bool CanFail) {
- static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+ static const uint16_t RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
// Try to get the first register.
if (unsigned Reg = State.AllocateReg(RegList, 4))
@@ -72,9 +71,9 @@ static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
CCState &State, bool CanFail) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
- static const unsigned ShadowRegList[] = { ARM::R0, ARM::R1 };
+ static const uint16_t HiRegList[] = { ARM::R0, ARM::R2 };
+ static const uint16_t LoRegList[] = { ARM::R1, ARM::R3 };
+ static const uint16_t ShadowRegList[] = { ARM::R0, ARM::R1 };
unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList, 2);
if (Reg == 0) {
@@ -118,8 +117,8 @@ static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo, CCState &State) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
+ static const uint16_t HiRegList[] = { ARM::R0, ARM::R2 };
+ static const uint16_t LoRegList[] = { ARM::R1, ARM::R3 };
unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
if (Reg == 0)
diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
index 47b2e98..d33364b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
+++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td
@@ -1,4 +1,4 @@
-//===- ARMCallingConv.td - Calling Conventions for ARM -----*- tablegen -*-===//
+//===-- ARMCallingConv.td - Calling Conventions for ARM ----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,7 +25,7 @@ def CC_ARM_APCS : CallingConv<[
// Handles byval parameters.
CCIfByVal<CCPassByVal<4, 4>>,
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
@@ -43,6 +43,7 @@ def CC_ARM_APCS : CallingConv<[
]>;
def RetCC_ARM_APCS : CallingConv<[
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
CCIfType<[f32], CCBitConvertToType<i32>>,
// Handle all vector types as either f64 or v2f64.
@@ -82,25 +83,6 @@ def RetFastCC_ARM_APCS : CallingConv<[
CCDelegateTo<RetCC_ARM_APCS>
]>;
-//===----------------------------------------------------------------------===//
-// ARM APCS Calling Convention for GHC
-//===----------------------------------------------------------------------===//
-
-def CC_ARM_APCS_GHC : CallingConv<[
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
- CCIfType<[f64], CCAssignToReg<[D8, D9, D10, D11]>>,
- CCIfType<[f32], CCAssignToReg<[S16, S17, S18, S19, S20, S21, S22, S23]>>,
-
- // Promote i8/i16 arguments to i32.
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
-
- // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, SpLim
- CCIfType<[i32], CCAssignToReg<[R4, R5, R6, R7, R8, R9, R10, R11]>>
-]>;
//===----------------------------------------------------------------------===//
// ARM AAPCS (EABI) Calling Convention, common parts
@@ -108,7 +90,7 @@ def CC_ARM_APCS_GHC : CallingConv<[
def CC_ARM_AAPCS_Common : CallingConv<[
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
// i64/f64 is passed in even pairs of GPRs
// i64 is 8-aligned i32 here, so we may need to eat R1 as a pad register
@@ -125,6 +107,7 @@ def CC_ARM_AAPCS_Common : CallingConv<[
]>;
def RetCC_ARM_AAPCS_Common : CallingConv<[
+ CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
CCIfType<[i64], CCAssignToRegWithShadow<[R0, R2], [R1, R3]>>
]>;
@@ -181,3 +164,14 @@ def RetCC_ARM_AAPCS_VFP : CallingConv<[
S9, S10, S11, S12, S13, S14, S15]>>,
CCDelegateTo<RetCC_ARM_AAPCS_Common>
]>;
+
+//===----------------------------------------------------------------------===//
+// Callee-saved register lists.
+//===----------------------------------------------------------------------===//
+
+def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4,
+ (sequence "D%u", 15, 8))>;
+
+// iOS ABI deviates from ARM standard ABI. R9 is not a callee-saved register.
+// Also save R7-R4 first to match the stack frame fixed spill areas.
+def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index 4148d4a..32ef345 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -15,7 +15,7 @@
#define DEBUG_TYPE "jit"
#include "ARM.h"
#include "ARMConstantPoolValue.h"
-#include "ARMInstrInfo.h"
+#include "ARMBaseInstrInfo.h"
#include "ARMRelocations.h"
#include "ARMSubtarget.h"
#include "ARMTargetMachine.h"
@@ -46,7 +46,7 @@ namespace {
class ARMCodeEmitter : public MachineFunctionPass {
ARMJITInfo *JTI;
- const ARMInstrInfo *II;
+ const ARMBaseInstrInfo *II;
const TargetData *TD;
const ARMSubtarget *Subtarget;
TargetMachine &TM;
@@ -66,7 +66,7 @@ namespace {
public:
ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
: MachineFunctionPass(ID), JTI(0),
- II((const ARMInstrInfo *)tm.getInstrInfo()),
+ II((const ARMBaseInstrInfo *)tm.getInstrInfo()),
TD(tm.getTargetData()), TM(tm),
MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
@@ -74,7 +74,7 @@ namespace {
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
/// machine instructions.
- unsigned getBinaryCodeForInstr(const MachineInstr &MI) const;
+ uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
bool runOnMachineFunction(MachineFunction &MF);
@@ -189,6 +189,8 @@ namespace {
unsigned Op) const { return 0; }
unsigned getARMBranchTargetOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
+ unsigned getARMBLTargetOpValue(const MachineInstr &MI, unsigned Op)
+ const { return 0; }
unsigned getARMBLXTargetOpValue(const MachineInstr &MI, unsigned Op)
const { return 0; }
unsigned getCCOutOpValue(const MachineInstr &MI, unsigned Op)
@@ -366,9 +368,9 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
assert((MF.getTarget().getRelocationModel() != Reloc::Default ||
MF.getTarget().getRelocationModel() != Reloc::Static) &&
"JIT relocation model must be set to static or default!");
- JTI = ((ARMTargetMachine &)MF.getTarget()).getJITInfo();
- II = ((const ARMTargetMachine &)MF.getTarget()).getInstrInfo();
- TD = ((const ARMTargetMachine &)MF.getTarget()).getTargetData();
+ JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo();
+ II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo();
+ TD = MF.getTarget().getTargetData();
Subtarget = &TM.getSubtarget<ARMSubtarget>();
MCPEs = &MF.getConstantPool()->getConstants();
MJTEs = 0;
@@ -386,7 +388,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB) {
MCE.StartMachineBasicBlock(MBB);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I)
emitInstruction(*I);
}
@@ -406,7 +408,6 @@ unsigned ARMCodeEmitter::getShiftOp(unsigned Imm) const {
case ARM_AM::ror:
case ARM_AM::rrx: return 3;
}
- return 0;
}
/// getMovi32Value - Return binary encoding of operand for movw/movt. If the
@@ -532,7 +533,6 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
switch (MI.getDesc().TSFlags & ARMII::FormMask) {
default: {
llvm_unreachable("Unhandled instruction encoding format!");
- break;
}
case ARMII::MiscFrm:
if (MI.getOpcode() == ARM::LEApcrelJT) {
@@ -541,7 +541,6 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
break;
}
llvm_unreachable("Unhandled instruction encoding!");
- break;
case ARMII::Pseudo:
emitPseudoInstruction(MI);
break;
@@ -837,9 +836,7 @@ void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
default:
llvm_unreachable("ARMCodeEmitter::emitPseudoInstruction");
case ARM::BX_CALL:
- case ARM::BMOVPCRX_CALL:
- case ARM::BXr9_CALL:
- case ARM::BMOVPCRXr9_CALL: {
+ case ARM::BMOVPCRX_CALL: {
// First emit mov lr, pc
unsigned Binary = 0x01a0e00f;
Binary |= II->getPredicate(&MI) << ARMII::CondShift;
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 3e3a413..fc35c7c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -16,16 +16,17 @@
#define DEBUG_TYPE "arm-cp-islands"
#include "ARM.h"
#include "ARMMachineFunctionInfo.h"
-#include "ARMInstrInfo.h"
#include "Thumb2InstrInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -51,6 +52,44 @@ static cl::opt<bool>
AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
cl::desc("Adjust basic block layout to better use TB[BH]"));
+// FIXME: This option should be removed once it has received sufficient testing.
+static cl::opt<bool>
+AlignConstantIslands("arm-align-constant-islands", cl::Hidden, cl::init(true),
+ cl::desc("Align constant islands in code"));
+
+/// UnknownPadding - Return the worst case padding that could result from
+/// unknown offset bits. This does not include alignment padding caused by
+/// known offset bits.
+///
+/// @param LogAlign log2(alignment)
+/// @param KnownBits Number of known low offset bits.
+static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
+ if (KnownBits < LogAlign)
+ return (1u << LogAlign) - (1u << KnownBits);
+ return 0;
+}
+
+/// WorstCaseAlign - Assuming only the low KnownBits bits in Offset are exact,
+/// add padding such that:
+///
+/// 1. The result is aligned to 1 << LogAlign.
+///
+/// 2. No other value of the unknown bits would require more padding.
+///
+/// This may add more padding than is required to satisfy just one of the
+/// constraints. It is necessary to compute alignment this way to guarantee
+/// that we don't underestimate the padding before an aligned block. If the
+/// real padding before a block is larger than we think, constant pool entries
+/// may go out of range.
+static inline unsigned WorstCaseAlign(unsigned Offset, unsigned LogAlign,
+ unsigned KnownBits) {
+ // Add the worst possible padding that the unknown bits could cause.
+ Offset += UnknownPadding(LogAlign, KnownBits);
+
+ // Then align the result.
+ return RoundUpToAlignment(Offset, 1u << LogAlign);
+}
+
namespace {
/// ARMConstantIslands - Due to limited PC-relative displacements, ARM
/// requires constant pool entries to be scattered among the instructions
@@ -64,16 +103,70 @@ namespace {
/// CPE - A constant pool entry that has been placed somewhere, which
/// tracks a list of users.
class ARMConstantIslands : public MachineFunctionPass {
- /// BBSizes - The size of each MachineBasicBlock in bytes of code, indexed
- /// by MBB Number. The two-byte pads required for Thumb alignment are
- /// counted as part of the following block (i.e., the offset and size for
- /// a padded block will both be ==2 mod 4).
- std::vector<unsigned> BBSizes;
+ /// BasicBlockInfo - Information about the offset and size of a single
+ /// basic block.
+ struct BasicBlockInfo {
+ /// Offset - Distance from the beginning of the function to the beginning
+ /// of this basic block.
+ ///
+ /// The offset is always aligned as required by the basic block.
+ unsigned Offset;
+
+ /// Size - Size of the basic block in bytes. If the block contains
+ /// inline assembly, this is a worst case estimate.
+ ///
+ /// The size does not include any alignment padding whether from the
+ /// beginning of the block, or from an aligned jump table at the end.
+ unsigned Size;
+
+ /// KnownBits - The number of low bits in Offset that are known to be
+ /// exact. The remaining bits of Offset are an upper bound.
+ uint8_t KnownBits;
+
+ /// Unalign - When non-zero, the block contains instructions (inline asm)
+ /// of unknown size. The real size may be smaller than Size bytes by a
+ /// multiple of 1 << Unalign.
+ uint8_t Unalign;
+
+ /// PostAlign - When non-zero, the block terminator contains a .align
+ /// directive, so the end of the block is aligned to 1 << PostAlign
+ /// bytes.
+ uint8_t PostAlign;
+
+ BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0),
+ PostAlign(0) {}
+
+ /// Compute the number of known offset bits internally to this block.
+ /// This number should be used to predict worst case padding when
+ /// splitting the block.
+ unsigned internalKnownBits() const {
+ return Unalign ? Unalign : KnownBits;
+ }
+
+ /// Compute the offset immediately following this block. If LogAlign is
+ /// specified, return the offset the successor block will get if it has
+ /// this alignment.
+ unsigned postOffset(unsigned LogAlign = 0) const {
+ unsigned PO = Offset + Size;
+ unsigned LA = std::max(unsigned(PostAlign), LogAlign);
+ if (!LA)
+ return PO;
+ // Add alignment padding from the terminator.
+ return WorstCaseAlign(PO, LA, internalKnownBits());
+ }
+
+ /// Compute the number of known low bits of postOffset. If this block
+ /// contains inline asm, the number of known bits drops to the
+ /// instruction alignment. An aligned terminator may increase the number
+ /// of know bits.
+ /// If LogAlign is given, also consider the alignment of the next block.
+ unsigned postKnownBits(unsigned LogAlign = 0) const {
+ return std::max(std::max(unsigned(PostAlign), LogAlign),
+ internalKnownBits());
+ }
+ };
- /// BBOffsets - the offset of each MBB in bytes, starting from 0.
- /// The two-byte pads required for Thumb alignment are counted as part of
- /// the following block.
- std::vector<unsigned> BBOffsets;
+ std::vector<BasicBlockInfo> BBInfo;
/// WaterList - A sorted list of basic blocks where islands could be placed
/// (i.e. blocks that don't fall through to the following block, due
@@ -102,14 +195,24 @@ namespace {
MachineInstr *MI;
MachineInstr *CPEMI;
MachineBasicBlock *HighWaterMark;
+ private:
unsigned MaxDisp;
+ public:
bool NegOk;
bool IsSoImm;
+ bool KnownAlignment;
CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
bool neg, bool soimm)
- : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
+ : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
+ KnownAlignment(false) {
HighWaterMark = CPEMI->getParent();
}
+ /// getMaxDisp - Returns the maximum displacement supported by MI.
+ /// Correct for unknown alignment.
+ /// Conservatively subtract 2 bytes to handle weird alignment effects.
+ unsigned getMaxDisp() const {
+ return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
+ }
};
/// CPUsers - Keep track of all of the machine instructions that use various
@@ -162,10 +265,9 @@ namespace {
/// the branch fix up pass.
bool HasFarJump;
- /// HasInlineAsm - True if the function contains inline assembly.
- bool HasInlineAsm;
-
- const ARMInstrInfo *TII;
+ MachineFunction *MF;
+ MachineConstantPool *MCP;
+ const ARMBaseInstrInfo *TII;
const ARMSubtarget *STI;
ARMFunctionInfo *AFI;
bool isThumb;
@@ -182,85 +284,100 @@ namespace {
}
private:
- void DoInitialPlacement(MachineFunction &MF,
- std::vector<MachineInstr*> &CPEMIs);
+ void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- void JumpTableFunctionScan(MachineFunction &MF);
- void InitialFunctionScan(MachineFunction &MF,
- const std::vector<MachineInstr*> &CPEMIs);
- MachineBasicBlock *SplitBlockBeforeInstr(MachineInstr *MI);
- void UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB);
- void AdjustBBOffsetsAfter(MachineBasicBlock *BB, int delta);
- bool DecrementOldEntry(unsigned CPI, MachineInstr* CPEMI);
- int LookForExistingCPEntry(CPUser& U, unsigned UserOffset);
- bool LookForWater(CPUser&U, unsigned UserOffset, water_iterator &WaterIter);
- void CreateNewWater(unsigned CPUserIndex, unsigned UserOffset,
+ unsigned getCPELogAlign(const MachineInstr *CPEMI);
+ void scanFunctionJumpTables();
+ void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
+ void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
+ void adjustBBOffsetsAfter(MachineBasicBlock *BB);
+ bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
+ int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
+ bool findAvailableWater(CPUser&U, unsigned UserOffset,
+ water_iterator &WaterIter);
+ void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
MachineBasicBlock *&NewMBB);
- bool HandleConstantPoolUser(MachineFunction &MF, unsigned CPUserIndex);
- void RemoveDeadCPEMI(MachineInstr *CPEMI);
- bool RemoveUnusedCPEntries();
- bool CPEIsInRange(MachineInstr *MI, unsigned UserOffset,
- MachineInstr *CPEMI, unsigned Disp, bool NegOk,
- bool DoDump = false);
- bool WaterIsInRange(unsigned UserOffset, MachineBasicBlock *Water,
- CPUser &U);
- bool OffsetIsInRange(unsigned UserOffset, unsigned TrialOffset,
- unsigned Disp, bool NegativeOK, bool IsSoImm = false);
- bool BBIsInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
- bool FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br);
- bool FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br);
- bool FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br);
- bool UndoLRSpillRestore();
- bool OptimizeThumb2Instructions(MachineFunction &MF);
- bool OptimizeThumb2Branches(MachineFunction &MF);
- bool ReorderThumb2JumpTables(MachineFunction &MF);
- bool OptimizeThumb2JumpTables(MachineFunction &MF);
- MachineBasicBlock *AdjustJTTargetBlockForward(MachineBasicBlock *BB,
+ bool handleConstantPoolUser(unsigned CPUserIndex);
+ void removeDeadCPEMI(MachineInstr *CPEMI);
+ bool removeUnusedCPEntries();
+ bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned Disp, bool NegOk,
+ bool DoDump = false);
+ bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
+ CPUser &U, unsigned &Growth);
+ bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
+ bool fixupImmediateBr(ImmBranch &Br);
+ bool fixupConditionalBr(ImmBranch &Br);
+ bool fixupUnconditionalBr(ImmBranch &Br);
+ bool undoLRSpillRestore();
+ bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
+ bool optimizeThumb2Instructions();
+ bool optimizeThumb2Branches();
+ bool reorderThumb2JumpTables();
+ bool optimizeThumb2JumpTables();
+ MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
MachineBasicBlock *JTBB);
- unsigned GetOffsetOf(MachineInstr *MI) const;
+ void computeBlockSize(MachineBasicBlock *MBB);
+ unsigned getOffsetOf(MachineInstr *MI) const;
+ unsigned getUserOffset(CPUser&) const;
void dumpBBs();
- void verify(MachineFunction &MF);
+ void verify();
+
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ unsigned Disp, bool NegativeOK, bool IsSoImm = false);
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset,
+ U.getMaxDisp(), U.NegOk, U.IsSoImm);
+ }
};
char ARMConstantIslands::ID = 0;
}
/// verify - check BBOffsets, BBSizes, alignment of islands
-void ARMConstantIslands::verify(MachineFunction &MF) {
- assert(BBOffsets.size() == BBSizes.size());
- for (unsigned i = 1, e = BBOffsets.size(); i != e; ++i)
- assert(BBOffsets[i-1]+BBSizes[i-1] == BBOffsets[i]);
- if (!isThumb)
- return;
+void ARMConstantIslands::verify() {
#ifndef NDEBUG
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
- if (!MBB->empty() &&
- MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
- unsigned MBBId = MBB->getNumber();
- assert(HasInlineAsm ||
- (BBOffsets[MBBId]%4 == 0 && BBSizes[MBBId]%4 == 0) ||
- (BBOffsets[MBBId]%4 != 0 && BBSizes[MBBId]%4 != 0));
- }
+ unsigned Align = MBB->getAlignment();
+ unsigned MBBId = MBB->getNumber();
+ assert(BBInfo[MBBId].Offset % (1u << Align) == 0);
+ assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
}
+ DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
CPUser &U = CPUsers[i];
- unsigned UserOffset = GetOffsetOf(U.MI) + (isThumb ? 4 : 8);
- unsigned CPEOffset = GetOffsetOf(U.CPEMI);
- unsigned Disp = UserOffset < CPEOffset ? CPEOffset - UserOffset :
- UserOffset - CPEOffset;
- assert(Disp <= U.MaxDisp || "Constant pool entry out of range!");
+ unsigned UserOffset = getUserOffset(U);
+ // Verify offset using the real max displacement without the safety
+ // adjustment.
+ if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
+ /* DoDump = */ true)) {
+ DEBUG(dbgs() << "OK\n");
+ continue;
+ }
+ DEBUG(dbgs() << "Out of range.\n");
+ dumpBBs();
+ DEBUG(MF->dump());
+ llvm_unreachable("Constant pool entry out of range!");
}
#endif
}
/// print block size and offset information - debugging
void ARMConstantIslands::dumpBBs() {
- for (unsigned J = 0, E = BBOffsets.size(); J !=E; ++J) {
- DEBUG(errs() << "block " << J << " offset " << BBOffsets[J]
- << " size " << BBSizes[J] << "\n");
- }
+ DEBUG({
+ for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
+ const BasicBlockInfo &BBI = BBInfo[J];
+ dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
+ << " kb=" << unsigned(BBI.KnownBits)
+ << " ua=" << unsigned(BBI.Unalign)
+ << " pa=" << unsigned(BBI.PostAlign)
+ << format(" size=%#x\n", BBInfo[J].Size);
+ }
+ });
}
/// createARMConstantIslandPass - returns an instance of the constpool
@@ -269,34 +386,41 @@ FunctionPass *llvm::createARMConstantIslandPass() {
return new ARMConstantIslands();
}
-bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
- MachineConstantPool &MCP = *MF.getConstantPool();
+bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ MCP = mf.getConstantPool();
- TII = (const ARMInstrInfo*)MF.getTarget().getInstrInfo();
- AFI = MF.getInfo<ARMFunctionInfo>();
- STI = &MF.getTarget().getSubtarget<ARMSubtarget>();
+ DEBUG(dbgs() << "***** ARMConstantIslands: "
+ << MCP->getConstants().size() << " CP entries, aligned to "
+ << MCP->getConstantPoolAlignment() << " bytes *****\n");
+
+ TII = (const ARMBaseInstrInfo*)MF->getTarget().getInstrInfo();
+ AFI = MF->getInfo<ARMFunctionInfo>();
+ STI = &MF->getTarget().getSubtarget<ARMSubtarget>();
isThumb = AFI->isThumbFunction();
isThumb1 = AFI->isThumb1OnlyFunction();
isThumb2 = AFI->isThumb2Function();
HasFarJump = false;
- HasInlineAsm = false;
+
+ // This pass invalidates liveness information when it splits basic blocks.
+ MF->getRegInfo().invalidateLiveness();
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
- MF.RenumberBlocks();
+ MF->RenumberBlocks();
// Try to reorder and otherwise adjust the block layout to make good use
// of the TB[BH] instructions.
bool MadeChange = false;
if (isThumb2 && AdjustJumpTableBlocks) {
- JumpTableFunctionScan(MF);
- MadeChange |= ReorderThumb2JumpTables(MF);
+ scanFunctionJumpTables();
+ MadeChange |= reorderThumb2JumpTables();
// Data is out of date, so clear it. It'll be re-computed later.
T2JumpTables.clear();
// Blocks may have shifted around. Keep the numbering up to date.
- MF.RenumberBlocks();
+ MF->RenumberBlocks();
}
// Thumb1 functions containing constant pools get 4-byte alignment.
@@ -304,16 +428,13 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
// ARM and Thumb2 functions need to be 4-byte aligned.
if (!isThumb1)
- MF.EnsureAlignment(2); // 2 = log2(4)
+ MF->EnsureAlignment(2); // 2 = log2(4)
// Perform the initial placement of the constant pool entries. To start with,
// we put them all at the end of the function.
std::vector<MachineInstr*> CPEMIs;
- if (!MCP.isEmpty()) {
- DoInitialPlacement(MF, CPEMIs);
- if (isThumb1)
- MF.EnsureAlignment(2); // 2 = log2(4)
- }
+ if (!MCP->isEmpty())
+ doInitialPlacement(CPEMIs);
/// The next UID to take is the first unused one.
AFI->initPICLabelUId(CPEMIs.size());
@@ -321,34 +442,36 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
// Do the initial scan of the function, building up information about the
// sizes of each block, the location of all the water, and finding all of the
// constant pool users.
- InitialFunctionScan(MF, CPEMIs);
+ initializeFunctionInfo(CPEMIs);
CPEMIs.clear();
DEBUG(dumpBBs());
/// Remove dead constant pool entries.
- MadeChange |= RemoveUnusedCPEntries();
+ MadeChange |= removeUnusedCPEntries();
// Iteratively place constant pool entries and fix up branches until there
// is no change.
unsigned NoCPIters = 0, NoBRIters = 0;
while (true) {
+ DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
bool CPChange = false;
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
- CPChange |= HandleConstantPoolUser(MF, i);
+ CPChange |= handleConstantPoolUser(i);
if (CPChange && ++NoCPIters > 30)
- llvm_unreachable("Constant Island pass failed to converge!");
+ report_fatal_error("Constant Island pass failed to converge!");
DEBUG(dumpBBs());
// Clear NewWaterList now. If we split a block for branches, it should
// appear as "new water" for the next iteration of constant pool placement.
NewWaterList.clear();
+ DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
bool BRChange = false;
for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
- BRChange |= FixUpImmediateBr(MF, ImmBranches[i]);
+ BRChange |= fixupImmediateBr(ImmBranches[i]);
if (BRChange && ++NoBRIters > 30)
- llvm_unreachable("Branch Fix Up pass failed to converge!");
+ report_fatal_error("Branch Fix Up pass failed to converge!");
DEBUG(dumpBBs());
if (!CPChange && !BRChange)
@@ -358,15 +481,15 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
// Shrink 32-bit Thumb2 branch, load, and store instructions.
if (isThumb2 && !STI->prefers32BitThumb())
- MadeChange |= OptimizeThumb2Instructions(MF);
+ MadeChange |= optimizeThumb2Instructions();
// After a while, this might be made debug-only, but it is not expensive.
- verify(MF);
+ verify();
// If LR has been forced spilled and no far jump (i.e. BL) has been issued,
// undo the spill / restore of LR if possible.
if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
- MadeChange |= UndoLRSpillRestore();
+ MadeChange |= undoLRSpillRestore();
// Save the mapping between original and cloned constpool entries.
for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
@@ -376,10 +499,9 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
}
}
- DEBUG(errs() << '\n'; dumpBBs());
+ DEBUG(dbgs() << '\n'; dumpBBs());
- BBSizes.clear();
- BBOffsets.clear();
+ BBInfo.clear();
WaterList.clear();
CPUsers.clear();
CPEntries.clear();
@@ -390,39 +512,68 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
return MadeChange;
}
-/// DoInitialPlacement - Perform the initial placement of the constant pool
+/// doInitialPlacement - Perform the initial placement of the constant pool
/// entries. To start with, we put them all at the end of the function.
-void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
- std::vector<MachineInstr*> &CPEMIs) {
+void
+ARMConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
// Create the basic block to hold the CPE's.
- MachineBasicBlock *BB = MF.CreateMachineBasicBlock();
- MF.push_back(BB);
+ MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
+ MF->push_back(BB);
+
+ // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
+ unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+
+ // Mark the basic block as required by the const-pool.
+ // If AlignConstantIslands isn't set, use 4-byte alignment for everything.
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
+
+ // The function needs to be as aligned as the basic blocks. The linker may
+ // move functions around based on their alignment.
+ MF->EnsureAlignment(BB->getAlignment());
+
+ // Order the entries in BB by descending alignment. That ensures correct
+ // alignment of all entries as long as BB is sufficiently aligned. Keep
+ // track of the insertion point for each alignment. We are going to bucket
+ // sort the entries as they are created.
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
// Add all of the constants from the constant pool to the end block, use an
// identity mapping of CPI's to CPE's.
- const std::vector<MachineConstantPoolEntry> &CPs =
- MF.getConstantPool()->getConstants();
+ const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
- const TargetData &TD = *MF.getTarget().getTargetData();
+ const TargetData &TD = *MF->getTarget().getTargetData();
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
- // Verify that all constant pool entries are a multiple of 4 bytes. If not,
- // we would have to pad them out or something so that instructions stay
- // aligned.
- assert((Size & 3) == 0 && "CP Entry not multiple of 4 bytes!");
+ assert(Size >= 4 && "Too small constant pool entry");
+ unsigned Align = CPs[i].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid alignment");
+ // Verify that all constant pool entries are a multiple of their alignment.
+ // If not, we would have to pad them out so that instructions stay aligned.
+ assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
+
+ // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
+ unsigned LogAlign = Log2_32(Align);
+ MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
MachineInstr *CPEMI =
- BuildMI(BB, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
+ BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
.addImm(i).addConstantPoolIndex(i).addImm(Size);
CPEMIs.push_back(CPEMI);
+ // Ensure that future entries with higher alignment get inserted before
+ // CPEMI. This is bucket sort with iterators.
+ for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ if (InsPoint[a] == InsAt)
+ InsPoint[a] = CPEMI;
+
// Add a new CPEntry, but no corresponding CPUser yet.
std::vector<CPEntry> CPEs;
CPEs.push_back(CPEntry(CPEMI, i));
CPEntries.push_back(CPEs);
++NumCPEs;
- DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i
- << "\n");
+ DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
+ << Size << ", align = " << Align <<'\n');
}
+ DEBUG(BB->dump());
}
/// BBHasFallthrough - Return true if the specified basic block can fallthrough
@@ -458,41 +609,61 @@ ARMConstantIslands::CPEntry
return NULL;
}
-/// JumpTableFunctionScan - Do a scan of the function, building up
+/// getCPELogAlign - Returns the required alignment of the constant pool entry
+/// represented by CPEMI. Alignment is measured in log2(bytes) units.
+unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
+ assert(CPEMI && CPEMI->getOpcode() == ARM::CONSTPOOL_ENTRY);
+
+ // Everything is 4-byte aligned unless AlignConstantIslands is set.
+ if (!AlignConstantIslands)
+ return 2;
+
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
+ unsigned Align = MCP->getConstants()[CPI].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
+ return Log2_32(Align);
+}
+
+/// scanFunctionJumpTables - Do a scan of the function, building up
/// information about the sizes of each block and the locations of all
/// the jump tables.
-void ARMConstantIslands::JumpTableFunctionScan(MachineFunction &MF) {
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
+void ARMConstantIslands::scanFunctionJumpTables() {
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock &MBB = *MBBI;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I)
- if (I->getDesc().isBranch() && I->getOpcode() == ARM::t2BR_JT)
+ if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
T2JumpTables.push_back(I);
}
}
-/// InitialFunctionScan - Do the initial scan of the function, building up
+/// initializeFunctionInfo - Do the initial scan of the function, building up
/// information about the sizes of each block, the location of all the water,
/// and finding all of the constant pool users.
-void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
- const std::vector<MachineInstr*> &CPEMIs) {
- // First thing, see if the function has any inline assembly in it. If so,
- // we have to be conservative about alignment assumptions, as we don't
- // know for sure the size of any instructions in the inline assembly.
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I)
- if (I->getOpcode() == ARM::INLINEASM)
- HasInlineAsm = true;
- }
+void ARMConstantIslands::
+initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
+ BBInfo.clear();
+ BBInfo.resize(MF->getNumBlockIDs());
+
+ // First thing, compute the size of all basic blocks, and see if the function
+ // has any inline assembly in it. If so, we have to be conservative about
+ // alignment assumptions, as we don't know for sure the size of any
+ // instructions in the inline assembly.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
+ computeBlockSize(I);
+
+ // The known bits of the entry block offset are determined by the function
+ // alignment.
+ BBInfo.front().KnownBits = MF->getAlignment();
+
+ // Compute block offsets and known bits.
+ adjustBBOffsetsAfter(MF->begin());
// Now go back through the instructions and build up our data structures.
- unsigned Offset = 0;
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock &MBB = *MBBI;
@@ -501,16 +672,13 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
if (!BBHasFallthrough(&MBB))
WaterList.push_back(&MBB);
- unsigned MBBSize = 0;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
if (I->isDebugValue())
continue;
- // Add instruction size to MBBSize.
- MBBSize += TII->GetInstSizeInBytes(I);
int Opc = I->getOpcode();
- if (I->getDesc().isBranch()) {
+ if (I->isBranch()) {
bool isCond = false;
unsigned Bits = 0;
unsigned Scale = 1;
@@ -518,18 +686,6 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
switch (Opc) {
default:
continue; // Ignore other JT branches
- case ARM::tBR_JTr:
- // A Thumb1 table jump may involve padding; for the offsets to
- // be right, functions containing these must be 4-byte aligned.
- // tBR_JTr expands to a mov pc followed by .align 2 and then the jump
- // table entries. So this code checks whether offset of tBR_JTr + 2
- // is aligned. That is held in Offset+MBBSize, which already has
- // 2 added in for the size of the mov pc instruction.
- MF.EnsureAlignment(2U);
- if ((Offset+MBBSize)%4 != 0 || HasInlineAsm)
- // FIXME: Add a pseudo ALIGN instruction instead.
- MBBSize += 2; // padding
- continue; // Does not get an entry in ImmBranches
case ARM::t2BR_JT:
T2JumpTables.push_back(I);
continue; // Does not get an entry in ImmBranches
@@ -589,7 +745,6 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
switch (Opc) {
default:
llvm_unreachable("Unknown addressing mode for CP reference!");
- break;
// Taking the address of a CP entry.
case ARM::LEApcrel:
@@ -647,45 +802,53 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
break;
}
}
+ }
+}
+
+/// computeBlockSize - Compute the size and some alignment information for MBB.
+/// This function updates BBInfo directly.
+void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
+ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
+ BBI.Size = 0;
+ BBI.Unalign = 0;
+ BBI.PostAlign = 0;
+
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I) {
+ BBI.Size += TII->GetInstSizeInBytes(I);
+ // For inline asm, GetInstSizeInBytes returns a conservative estimate.
+ // The actual size may be smaller, but still a multiple of the instr size.
+ if (I->isInlineAsm())
+ BBI.Unalign = isThumb ? 1 : 2;
+ // Also consider instructions that may be shrunk later.
+ else if (isThumb && mayOptimizeThumb2Instruction(I))
+ BBI.Unalign = 1;
+ }
- // In thumb mode, if this block is a constpool island, we may need padding
- // so it's aligned on 4 byte boundary.
- if (isThumb &&
- !MBB.empty() &&
- MBB.begin()->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- ((Offset%4) != 0 || HasInlineAsm))
- MBBSize += 2;
-
- BBSizes.push_back(MBBSize);
- BBOffsets.push_back(Offset);
- Offset += MBBSize;
+ // tBR_JTr contains a .align 2 directive.
+ if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
+ BBI.PostAlign = 2;
+ MBB->getParent()->EnsureAlignment(2);
}
}
-/// GetOffsetOf - Return the current offset of the specified machine instruction
+/// getOffsetOf - Return the current offset of the specified machine instruction
/// from the start of the function. This offset changes as stuff is moved
/// around inside the function.
-unsigned ARMConstantIslands::GetOffsetOf(MachineInstr *MI) const {
+unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
MachineBasicBlock *MBB = MI->getParent();
// The offset is composed of two things: the sum of the sizes of all MBB's
// before this instruction's block, and the offset from the start of the block
// it is in.
- unsigned Offset = BBOffsets[MBB->getNumber()];
-
- // If we're looking for a CONSTPOOL_ENTRY in Thumb, see if this block has
- // alignment padding, and compensate if so.
- if (isThumb &&
- MI->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- (Offset%4 != 0 || HasInlineAsm))
- Offset += 2;
+ unsigned Offset = BBInfo[MBB->getNumber()].Offset;
// Sum instructions before MI in MBB.
- for (MachineBasicBlock::iterator I = MBB->begin(); ; ++I) {
+ for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- if (&*I == MI) return Offset;
Offset += TII->GetInstSizeInBytes(I);
}
+ return Offset;
}
/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
@@ -695,19 +858,16 @@ static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
return LHS->getNumber() < RHS->getNumber();
}
-/// UpdateForInsertedWaterBlock - When a block is newly inserted into the
+/// updateForInsertedWaterBlock - When a block is newly inserted into the
/// machine function, it upsets all of the block numbers. Renumber the blocks
/// and update the arrays that parallel this numbering.
-void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
+void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
// Renumber the MBB's to keep them consecutive.
NewBB->getParent()->RenumberBlocks(NewBB);
- // Insert a size into BBSizes to align it properly with the (newly
+ // Insert an entry into BBInfo to align it properly with the (newly
// renumbered) block numbers.
- BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
-
- // Likewise for BBOffsets.
- BBOffsets.insert(BBOffsets.begin()+NewBB->getNumber(), 0);
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
// Next, update WaterList. Specifically, we need to add NewMBB as having
// available water after it.
@@ -721,15 +881,14 @@ void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
/// Split the basic block containing MI into two blocks, which are joined by
/// an unconditional branch. Update data structures and renumber blocks to
/// account for this change and returns the newly created block.
-MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
+MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
MachineBasicBlock *OrigBB = MI->getParent();
- MachineFunction &MF = *OrigBB->getParent();
// Create a new MBB for the code after the OrigBB.
MachineBasicBlock *NewBB =
- MF.CreateMachineBasicBlock(OrigBB->getBasicBlock());
+ MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
MachineFunction::iterator MBBI = OrigBB; ++MBBI;
- MF.insert(MBBI, NewBB);
+ MF->insert(MBBI, NewBB);
// Splice the instructions starting with MI over to NewBB.
NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
@@ -747,31 +906,19 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
++NumSplit;
// Update the CFG. All succs of OrigBB are now succs of NewBB.
- while (!OrigBB->succ_empty()) {
- MachineBasicBlock *Succ = *OrigBB->succ_begin();
- OrigBB->removeSuccessor(Succ);
- NewBB->addSuccessor(Succ);
-
- // This pass should be run after register allocation, so there should be no
- // PHI nodes to update.
- assert((Succ->empty() || !Succ->begin()->isPHI())
- && "PHI nodes should be eliminated by now!");
- }
+ NewBB->transferSuccessors(OrigBB);
// OrigBB branches to NewBB.
OrigBB->addSuccessor(NewBB);
// Update internal data structures to account for the newly inserted MBB.
- // This is almost the same as UpdateForInsertedWaterBlock, except that
+ // This is almost the same as updateForInsertedWaterBlock, except that
// the Water goes after OrigBB, not NewBB.
- MF.RenumberBlocks(NewBB);
+ MF->RenumberBlocks(NewBB);
- // Insert a size into BBSizes to align it properly with the (newly
+ // Insert an entry into BBInfo to align it properly with the (newly
// renumbered) block numbers.
- BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
-
- // Likewise for BBOffsets.
- BBOffsets.insert(BBOffsets.begin()+NewBB->getNumber(), 0);
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
// Next, update WaterList. Specifically, we need to add OrigMBB as having
// available water after it (but not if it's already there, which happens
@@ -787,86 +934,56 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
WaterList.insert(IP, OrigBB);
NewWaterList.insert(OrigBB);
- unsigned OrigBBI = OrigBB->getNumber();
- unsigned NewBBI = NewBB->getNumber();
-
- int delta = isThumb1 ? 2 : 4;
-
// Figure out how large the OrigBB is. As the first half of the original
// block, it cannot contain a tablejump. The size includes
// the new jump we added. (It should be possible to do this without
// recounting everything, but it's very confusing, and this is rarely
// executed.)
- unsigned OrigBBSize = 0;
- for (MachineBasicBlock::iterator I = OrigBB->begin(), E = OrigBB->end();
- I != E; ++I)
- OrigBBSize += TII->GetInstSizeInBytes(I);
- BBSizes[OrigBBI] = OrigBBSize;
-
- // ...and adjust BBOffsets for NewBB accordingly.
- BBOffsets[NewBBI] = BBOffsets[OrigBBI] + BBSizes[OrigBBI];
+ computeBlockSize(OrigBB);
// Figure out how large the NewMBB is. As the second half of the original
// block, it may contain a tablejump.
- unsigned NewBBSize = 0;
- for (MachineBasicBlock::iterator I = NewBB->begin(), E = NewBB->end();
- I != E; ++I)
- NewBBSize += TII->GetInstSizeInBytes(I);
- // Set the size of NewBB in BBSizes. It does not include any padding now.
- BBSizes[NewBBI] = NewBBSize;
-
- MachineInstr* ThumbJTMI = prior(NewBB->end());
- if (ThumbJTMI->getOpcode() == ARM::tBR_JTr) {
- // We've added another 2-byte instruction before this tablejump, which
- // means we will always need padding if we didn't before, and vice versa.
-
- // The original offset of the jump instruction was:
- unsigned OrigOffset = BBOffsets[OrigBBI] + BBSizes[OrigBBI] - delta;
- if (OrigOffset%4 == 0) {
- // We had padding before and now we don't. No net change in code size.
- delta = 0;
- } else {
- // We didn't have padding before and now we do.
- BBSizes[NewBBI] += 2;
- delta = 4;
- }
- }
+ computeBlockSize(NewBB);
// All BBOffsets following these blocks must be modified.
- if (delta)
- AdjustBBOffsetsAfter(NewBB, delta);
+ adjustBBOffsetsAfter(OrigBB);
return NewBB;
}
-/// OffsetIsInRange - Checks whether UserOffset (the location of a constant pool
+/// getUserOffset - Compute the offset of U.MI as seen by the hardware
+/// displacement computation. Update U.KnownAlignment to match its current
+/// basic block location.
+unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
+ unsigned UserOffset = getOffsetOf(U.MI);
+ const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
+ unsigned KnownBits = BBI.internalKnownBits();
+
+ // The value read from PC is offset from the actual instruction address.
+ UserOffset += (isThumb ? 4 : 8);
+
+ // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
+ // Make sure U.getMaxDisp() returns a constrained range.
+ U.KnownAlignment = (KnownBits >= 2);
+
+ // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
+ // purposes of the displacement computation; compensate for that here.
+ // For unknown alignments, getMaxDisp() constrains the range instead.
+ if (isThumb && U.KnownAlignment)
+ UserOffset &= ~3u;
+
+ return UserOffset;
+}
+
+/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
/// reference) is within MaxDisp of TrialOffset (a proposed location of a
/// constant pool entry).
-bool ARMConstantIslands::OffsetIsInRange(unsigned UserOffset,
+/// UserOffset is computed by getUserOffset above to include PC adjustments. If
+/// the mod 4 alignment of UserOffset is not known, the uncertainty must be
+/// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
+bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
unsigned TrialOffset, unsigned MaxDisp,
bool NegativeOK, bool IsSoImm) {
- // On Thumb offsets==2 mod 4 are rounded down by the hardware for
- // purposes of the displacement computation; compensate for that here.
- // Effectively, the valid range of displacements is 2 bytes smaller for such
- // references.
- unsigned TotalAdj = 0;
- if (isThumb && UserOffset%4 !=0) {
- UserOffset -= 2;
- TotalAdj = 2;
- }
- // CPEs will be rounded up to a multiple of 4.
- if (isThumb && TrialOffset%4 != 0) {
- TrialOffset += 2;
- TotalAdj += 2;
- }
-
- // In Thumb2 mode, later branch adjustments can shift instructions up and
- // cause alignment change. In the worst case scenario this can cause the
- // user's effective address to be subtracted by 2 and the CPE's address to
- // be plus 2.
- if (isThumb2 && TotalAdj != 4)
- MaxDisp -= (4 - TotalAdj);
-
if (UserOffset <= TrialOffset) {
// User before the Trial.
if (TrialOffset - UserOffset <= MaxDisp)
@@ -880,40 +997,71 @@ bool ARMConstantIslands::OffsetIsInRange(unsigned UserOffset,
return false;
}
-/// WaterIsInRange - Returns true if a CPE placed after the specified
+/// isWaterInRange - Returns true if a CPE placed after the specified
/// Water (a basic block) will be in range for the specific MI.
-
-bool ARMConstantIslands::WaterIsInRange(unsigned UserOffset,
- MachineBasicBlock* Water, CPUser &U) {
- unsigned MaxDisp = U.MaxDisp;
- unsigned CPEOffset = BBOffsets[Water->getNumber()] +
- BBSizes[Water->getNumber()];
-
- // If the CPE is to be inserted before the instruction, that will raise
- // the offset of the instruction.
- if (CPEOffset < UserOffset)
- UserOffset += U.CPEMI->getOperand(2).getImm();
-
- return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, U.NegOk, U.IsSoImm);
+///
+/// Compute how much the function will grow by inserting a CPE after Water.
+bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
+ MachineBasicBlock* Water, CPUser &U,
+ unsigned &Growth) {
+ unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
+ unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
+ unsigned NextBlockOffset, NextBlockAlignment;
+ MachineFunction::const_iterator NextBlock = Water;
+ if (++NextBlock == MF->end()) {
+ NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
+ NextBlockAlignment = 0;
+ } else {
+ NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
+ NextBlockAlignment = NextBlock->getAlignment();
+ }
+ unsigned Size = U.CPEMI->getOperand(2).getImm();
+ unsigned CPEEnd = CPEOffset + Size;
+
+ // The CPE may be able to hide in the alignment padding before the next
+ // block. It may also cause more padding to be required if it is more aligned
+ // that the next block.
+ if (CPEEnd > NextBlockOffset) {
+ Growth = CPEEnd - NextBlockOffset;
+ // Compute the padding that would go at the end of the CPE to align the next
+ // block.
+ Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
+
+ // If the CPE is to be inserted before the instruction, that will raise
+ // the offset of the instruction. Also account for unknown alignment padding
+ // in blocks between CPE and the user.
+ if (CPEOffset < UserOffset)
+ UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
+ } else
+ // CPE fits in existing padding.
+ Growth = 0;
+
+ return isOffsetInRange(UserOffset, CPEOffset, U);
}
-/// CPEIsInRange - Returns true if the distance between specific MI and
+/// isCPEntryInRange - Returns true if the distance between specific MI and
/// specific ConstPool entry instruction can fit in MI's displacement field.
-bool ARMConstantIslands::CPEIsInRange(MachineInstr *MI, unsigned UserOffset,
+bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
MachineInstr *CPEMI, unsigned MaxDisp,
bool NegOk, bool DoDump) {
- unsigned CPEOffset = GetOffsetOf(CPEMI);
- assert((CPEOffset%4 == 0 || HasInlineAsm) && "Misaligned CPE");
+ unsigned CPEOffset = getOffsetOf(CPEMI);
+ assert(CPEOffset % 4 == 0 && "Misaligned CPE");
if (DoDump) {
- DEBUG(errs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
- << " max delta=" << MaxDisp
- << " insn address=" << UserOffset
- << " CPE address=" << CPEOffset
- << " offset=" << int(CPEOffset-UserOffset) << "\t" << *MI);
+ DEBUG({
+ unsigned Block = MI->getParent()->getNumber();
+ const BasicBlockInfo &BBI = BBInfo[Block];
+ dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
+ << " max delta=" << MaxDisp
+ << format(" insn address=%#x", UserOffset)
+ << " in BB#" << Block << ": "
+ << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
+ << format("CPE address=%#x offset=%+d: ", CPEOffset,
+ int(CPEOffset-UserOffset));
+ });
}
- return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
+ return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
}
#ifndef NDEBUG
@@ -933,69 +1081,40 @@ static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
}
#endif // NDEBUG
-void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB,
- int delta) {
- MachineFunction::iterator MBBI = BB; MBBI = llvm::next(MBBI);
- for(unsigned i = BB->getNumber()+1, e = BB->getParent()->getNumBlockIDs();
- i < e; ++i) {
- BBOffsets[i] += delta;
- // If some existing blocks have padding, adjust the padding as needed, a
- // bit tricky. delta can be negative so don't use % on that.
- if (!isThumb)
- continue;
- MachineBasicBlock *MBB = MBBI;
- if (!MBB->empty() && !HasInlineAsm) {
- // Constant pool entries require padding.
- if (MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
- unsigned OldOffset = BBOffsets[i] - delta;
- if ((OldOffset%4) == 0 && (BBOffsets[i]%4) != 0) {
- // add new padding
- BBSizes[i] += 2;
- delta += 2;
- } else if ((OldOffset%4) != 0 && (BBOffsets[i]%4) == 0) {
- // remove existing padding
- BBSizes[i] -= 2;
- delta -= 2;
- }
- }
- // Thumb1 jump tables require padding. They should be at the end;
- // following unconditional branches are removed by AnalyzeBranch.
- // tBR_JTr expands to a mov pc followed by .align 2 and then the jump
- // table entries. So this code checks whether offset of tBR_JTr
- // is aligned; if it is, the offset of the jump table following the
- // instruction will not be aligned, and we need padding.
- MachineInstr *ThumbJTMI = prior(MBB->end());
- if (ThumbJTMI->getOpcode() == ARM::tBR_JTr) {
- unsigned NewMIOffset = GetOffsetOf(ThumbJTMI);
- unsigned OldMIOffset = NewMIOffset - delta;
- if ((OldMIOffset%4) == 0 && (NewMIOffset%4) != 0) {
- // remove existing padding
- BBSizes[i] -= 2;
- delta -= 2;
- } else if ((OldMIOffset%4) != 0 && (NewMIOffset%4) == 0) {
- // add new padding
- BBSizes[i] += 2;
- delta += 2;
- }
- }
- if (delta==0)
- return;
- }
- MBBI = llvm::next(MBBI);
+void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
+ unsigned BBNum = BB->getNumber();
+ for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
+ // Get the offset and known bits at the end of the layout predecessor.
+ // Include the alignment of the current block.
+ unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
+ unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
+ unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
+
+ // This is where block i begins. Stop if the offset is already correct,
+ // and we have updated 2 blocks. This is the maximum number of blocks
+ // changed before calling this function.
+ if (i > BBNum + 2 &&
+ BBInfo[i].Offset == Offset &&
+ BBInfo[i].KnownBits == KnownBits)
+ break;
+
+ BBInfo[i].Offset = Offset;
+ BBInfo[i].KnownBits = KnownBits;
}
}
-/// DecrementOldEntry - find the constant pool entry with index CPI
+/// decrementCPEReferenceCount - find the constant pool entry with index CPI
/// and instruction CPEMI, and decrement its refcount. If the refcount
/// becomes 0 remove the entry and instruction. Returns true if we removed
/// the entry, false if we didn't.
-bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) {
+bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
+ MachineInstr *CPEMI) {
// Find the old entry. Eliminate it if it is no longer used.
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
assert(CPE && "Unexpected!");
if (--CPE->RefCount == 0) {
- RemoveDeadCPEMI(CPEMI);
+ removeDeadCPEMI(CPEMI);
CPE->CPEMI = NULL;
--NumCPEs;
return true;
@@ -1009,14 +1128,15 @@ bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) {
/// 0 = no existing entry found
/// 1 = entry found, and there were no code insertions or deletions
/// 2 = entry found, and there were code insertions or deletions
-int ARMConstantIslands::LookForExistingCPEntry(CPUser& U, unsigned UserOffset)
+int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
{
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
// Check to see if the CPE is already in-range.
- if (CPEIsInRange(UserMI, UserOffset, CPEMI, U.MaxDisp, U.NegOk, true)) {
- DEBUG(errs() << "In range\n");
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
+ true)) {
+ DEBUG(dbgs() << "In range\n");
return 1;
}
@@ -1030,8 +1150,9 @@ int ARMConstantIslands::LookForExistingCPEntry(CPUser& U, unsigned UserOffset)
// Removing CPEs can leave empty entries, skip
if (CPEs[i].CPEMI == NULL)
continue;
- if (CPEIsInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.MaxDisp, U.NegOk)) {
- DEBUG(errs() << "Replacing CPE#" << CPI << " with CPE#"
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
+ U.NegOk)) {
+ DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
<< CPEs[i].CPI << "\n");
// Point the CPUser node to the replacement
U.CPEMI = CPEs[i].CPEMI;
@@ -1045,7 +1166,7 @@ int ARMConstantIslands::LookForExistingCPEntry(CPUser& U, unsigned UserOffset)
CPEs[i].RefCount++;
// ...and the original. If we didn't remove the old entry, none of the
// addresses changed, so we don't need another pass.
- return DecrementOldEntry(CPI, CPEMI) ? 2 : 1;
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
}
}
return 0;
@@ -1066,7 +1187,7 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
return ((1<<23)-1)*4;
}
-/// LookForWater - Look for an existing entry in the WaterList in which
+/// findAvailableWater - Look for an existing entry in the WaterList in which
/// we can place the CPE referenced from U so it's within range of U's MI.
/// Returns true if found, false if not. If it returns true, WaterIter
/// is set to the WaterList entry. For Thumb, prefer water that will not
@@ -1074,15 +1195,14 @@ static inline unsigned getUnconditionalBrDisp(int Opc) {
/// terminates, the CPE location for a particular CPUser is only allowed to
/// move to a lower address, so search backward from the end of the list and
/// prefer the first water that is in range.
-bool ARMConstantIslands::LookForWater(CPUser &U, unsigned UserOffset,
+bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
water_iterator &WaterIter) {
if (WaterList.empty())
return false;
- bool FoundWaterThatWouldPad = false;
- water_iterator IPThatWouldPad;
- for (water_iterator IP = prior(WaterList.end()),
- B = WaterList.begin();; --IP) {
+ unsigned BestGrowth = ~0u;
+ for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
+ --IP) {
MachineBasicBlock* WaterBB = *IP;
// Check if water is in range and is either at a lower address than the
// current "high water mark" or a new water block that was created since
@@ -1092,166 +1212,186 @@ bool ARMConstantIslands::LookForWater(CPUser &U, unsigned UserOffset,
// should be relatively uncommon and when it does happen, we want to be
// sure to take advantage of it for all the CPEs near that block, so that
// we don't insert more branches than necessary.
- if (WaterIsInRange(UserOffset, WaterBB, U) &&
+ unsigned Growth;
+ if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
(WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
- NewWaterList.count(WaterBB))) {
- unsigned WBBId = WaterBB->getNumber();
- if (isThumb &&
- (BBOffsets[WBBId] + BBSizes[WBBId])%4 != 0) {
- // This is valid Water, but would introduce padding. Remember
- // it in case we don't find any Water that doesn't do this.
- if (!FoundWaterThatWouldPad) {
- FoundWaterThatWouldPad = true;
- IPThatWouldPad = IP;
- }
- } else {
- WaterIter = IP;
+ NewWaterList.count(WaterBB)) && Growth < BestGrowth) {
+ // This is the least amount of required padding seen so far.
+ BestGrowth = Growth;
+ WaterIter = IP;
+ DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
+ << " Growth=" << Growth << '\n');
+
+ // Keep looking unless it is perfect.
+ if (BestGrowth == 0)
return true;
- }
}
if (IP == B)
break;
}
- if (FoundWaterThatWouldPad) {
- WaterIter = IPThatWouldPad;
- return true;
- }
- return false;
+ return BestGrowth != ~0u;
}
-/// CreateNewWater - No existing WaterList entry will work for
+/// createNewWater - No existing WaterList entry will work for
/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
/// block is used if in range, and the conditional branch munged so control
/// flow is correct. Otherwise the block is split to create a hole with an
/// unconditional branch around it. In either case NewMBB is set to a
/// block following which the new island can be inserted (the WaterList
/// is not adjusted).
-void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex,
+void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
unsigned UserOffset,
MachineBasicBlock *&NewMBB) {
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPELogAlign = getCPELogAlign(CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
- unsigned OffsetOfNextBlock = BBOffsets[UserMBB->getNumber()] +
- BBSizes[UserMBB->getNumber()];
- assert(OffsetOfNextBlock== BBOffsets[UserMBB->getNumber()+1]);
+ const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
// If the block does not end in an unconditional branch already, and if the
// end of the block is within range, make new water there. (The addition
// below is for the unconditional branch we will be adding: 4 bytes on ARM +
- // Thumb2, 2 on Thumb1. Possible Thumb1 alignment padding is allowed for
- // inside OffsetIsInRange.
- if (BBHasFallthrough(UserMBB) &&
- OffsetIsInRange(UserOffset, OffsetOfNextBlock + (isThumb1 ? 2: 4),
- U.MaxDisp, U.NegOk, U.IsSoImm)) {
- DEBUG(errs() << "Split at end of block\n");
- if (&UserMBB->back() == UserMI)
- assert(BBHasFallthrough(UserMBB) && "Expected a fallthrough BB!");
- NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
- // Add an unconditional branch from UserMBB to fallthrough block.
- // Record it for branch lengthening; this new branch will not get out of
- // range, but if the preceding conditional branch is out of range, the
- // targets will be exchanged, and the altered branch may be out of
- // range, so the machinery has to know about it.
- int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
- if (!isThumb)
- BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
- else
- BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
- .addImm(ARMCC::AL).addReg(0);
- unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
- ImmBranches.push_back(ImmBranch(&UserMBB->back(),
- MaxDisp, false, UncondBr));
- int delta = isThumb1 ? 2 : 4;
- BBSizes[UserMBB->getNumber()] += delta;
- AdjustBBOffsetsAfter(UserMBB, delta);
- } else {
- // What a big block. Find a place within the block to split it.
- // This is a little tricky on Thumb1 since instructions are 2 bytes
- // and constant pool entries are 4 bytes: if instruction I references
- // island CPE, and instruction I+1 references CPE', it will
- // not work well to put CPE as far forward as possible, since then
- // CPE' cannot immediately follow it (that location is 2 bytes
- // farther away from I+1 than CPE was from I) and we'd need to create
- // a new island. So, we make a first guess, then walk through the
- // instructions between the one currently being looked at and the
- // possible insertion point, and make sure any other instructions
- // that reference CPEs will be able to use the same island area;
- // if not, we back up the insertion point.
-
- // The 4 in the following is for the unconditional branch we'll be
- // inserting (allows for long branch on Thumb1). Alignment of the
- // island is handled inside OffsetIsInRange.
- unsigned BaseInsertOffset = UserOffset + U.MaxDisp -4;
- // This could point off the end of the block if we've already got
- // constant pool entries following this block; only the last one is
- // in the water list. Back past any possible branches (allow for a
- // conditional and a maximally long unconditional).
- if (BaseInsertOffset >= BBOffsets[UserMBB->getNumber()+1])
- BaseInsertOffset = BBOffsets[UserMBB->getNumber()+1] -
- (isThumb1 ? 6 : 8);
- unsigned EndInsertOffset = BaseInsertOffset +
- CPEMI->getOperand(2).getImm();
- MachineBasicBlock::iterator MI = UserMI;
- ++MI;
- unsigned CPUIndex = CPUserIndex+1;
- unsigned NumCPUsers = CPUsers.size();
- MachineInstr *LastIT = 0;
- for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
- Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI),
- MI = llvm::next(MI)) {
- if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
- CPUser &U = CPUsers[CPUIndex];
- if (!OffsetIsInRange(Offset, EndInsertOffset,
- U.MaxDisp, U.NegOk, U.IsSoImm)) {
- BaseInsertOffset -= (isThumb1 ? 2 : 4);
- EndInsertOffset -= (isThumb1 ? 2 : 4);
- }
- // This is overly conservative, as we don't account for CPEMIs
- // being reused within the block, but it doesn't matter much.
- EndInsertOffset += CPUsers[CPUIndex].CPEMI->getOperand(2).getImm();
- CPUIndex++;
- }
+ // Thumb2, 2 on Thumb1.
+ if (BBHasFallthrough(UserMBB)) {
+ // Size of branch to insert.
+ unsigned Delta = isThumb1 ? 2 : 4;
+ // End of UserBlock after adding a branch.
+ unsigned UserBlockEnd = UserBBI.postOffset() + Delta;
+ // Compute the offset where the CPE will begin.
+ unsigned CPEOffset = WorstCaseAlign(UserBlockEnd, CPELogAlign,
+ UserBBI.postKnownBits());
+
+ if (isOffsetInRange(UserOffset, CPEOffset, U)) {
+ DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
+ << format(", expected CPE offset %#x\n", CPEOffset));
+ NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
+ // Add an unconditional branch from UserMBB to fallthrough block. Record
+ // it for branch lengthening; this new branch will not get out of range,
+ // but if the preceding conditional branch is out of range, the targets
+ // will be exchanged, and the altered branch may be out of range, so the
+ // machinery has to know about it.
+ int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
+ if (!isThumb)
+ BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
+ else
+ BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
+ .addImm(ARMCC::AL).addReg(0);
+ unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
+ ImmBranches.push_back(ImmBranch(&UserMBB->back(),
+ MaxDisp, false, UncondBr));
+ BBInfo[UserMBB->getNumber()].Size += Delta;
+ adjustBBOffsetsAfter(UserMBB);
+ return;
+ }
+ }
- // Remember the last IT instruction.
- if (MI->getOpcode() == ARM::t2IT)
- LastIT = MI;
+ // What a big block. Find a place within the block to split it. This is a
+ // little tricky on Thumb1 since instructions are 2 bytes and constant pool
+ // entries are 4 bytes: if instruction I references island CPE, and
+ // instruction I+1 references CPE', it will not work well to put CPE as far
+ // forward as possible, since then CPE' cannot immediately follow it (that
+ // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
+ // need to create a new island. So, we make a first guess, then walk through
+ // the instructions between the one currently being looked at and the
+ // possible insertion point, and make sure any other instructions that
+ // reference CPEs will be able to use the same island area; if not, we back
+ // up the insertion point.
+
+ // Try to split the block so it's fully aligned. Compute the latest split
+ // point where we can add a 4-byte branch instruction, and then
+ // WorstCaseAlign to LogAlign.
+ unsigned LogAlign = MF->getAlignment();
+ assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
+ unsigned KnownBits = UserBBI.internalKnownBits();
+ unsigned UPad = UnknownPadding(LogAlign, KnownBits);
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
+
+ // Account for alignment and unknown padding.
+ BaseInsertOffset &= ~((1u << LogAlign) - 1);
+ BaseInsertOffset -= UPad;
+
+ // The 4 in the following is for the unconditional branch we'll be inserting
+ // (allows for long branch on Thumb1). Alignment of the island is handled
+ // inside isOffsetInRange.
+ BaseInsertOffset -= 4;
+
+ DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << LogAlign
+ << " kb=" << KnownBits
+ << " up=" << UPad << '\n');
+
+ // This could point off the end of the block if we've already got constant
+ // pool entries following this block; only the last one is in the water list.
+ // Back past any possible branches (allow for a conditional and a maximally
+ // long unconditional).
+ if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset)
+ BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset -
+ (isThumb1 ? 6 : 8);
+ unsigned EndInsertOffset =
+ WorstCaseAlign(BaseInsertOffset + 4, LogAlign, KnownBits) +
+ CPEMI->getOperand(2).getImm();
+ MachineBasicBlock::iterator MI = UserMI;
+ ++MI;
+ unsigned CPUIndex = CPUserIndex+1;
+ unsigned NumCPUsers = CPUsers.size();
+ MachineInstr *LastIT = 0;
+ for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
+ Offset < BaseInsertOffset;
+ Offset += TII->GetInstSizeInBytes(MI),
+ MI = llvm::next(MI)) {
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ CPUser &U = CPUsers[CPUIndex];
+ if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
+ // Shift intertion point by one unit of alignment so it is within reach.
+ BaseInsertOffset -= 1u << LogAlign;
+ EndInsertOffset -= 1u << LogAlign;
+ }
+ // This is overly conservative, as we don't account for CPEMIs being
+ // reused within the block, but it doesn't matter much. Also assume CPEs
+ // are added in order with alignment padding. We may eventually be able
+ // to pack the aligned CPEs better.
+ EndInsertOffset = RoundUpToAlignment(EndInsertOffset,
+ 1u << getCPELogAlign(U.CPEMI)) +
+ U.CPEMI->getOperand(2).getImm();
+ CPUIndex++;
}
- DEBUG(errs() << "Split in middle of big block\n");
- --MI;
+ // Remember the last IT instruction.
+ if (MI->getOpcode() == ARM::t2IT)
+ LastIT = MI;
+ }
+
+ --MI;
- // Avoid splitting an IT block.
- if (LastIT) {
- unsigned PredReg = 0;
- ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
- if (CC != ARMCC::AL)
- MI = LastIT;
- }
- NewMBB = SplitBlockBeforeInstr(MI);
+ // Avoid splitting an IT block.
+ if (LastIT) {
+ unsigned PredReg = 0;
+ ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
+ if (CC != ARMCC::AL)
+ MI = LastIT;
}
+ NewMBB = splitBlockBeforeInstr(MI);
}
-/// HandleConstantPoolUser - Analyze the specified user, checking to see if it
+/// handleConstantPoolUser - Analyze the specified user, checking to see if it
/// is out-of-range. If so, pick up the constant pool value and move it some
/// place in-range. Return true if we changed any addresses (thus must run
/// another pass of branch lengthening), false otherwise.
-bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
- unsigned CPUserIndex) {
+bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
unsigned CPI = CPEMI->getOperand(1).getIndex();
unsigned Size = CPEMI->getOperand(2).getImm();
- // Compute this only once, it's expensive. The 4 or 8 is the value the
- // hardware keeps in the PC.
- unsigned UserOffset = GetOffsetOf(UserMI) + (isThumb ? 4 : 8);
+ // Compute this only once, it's expensive.
+ unsigned UserOffset = getUserOffset(U);
// See if the current entry is within range, or there is a clone of it
// in range.
- int result = LookForExistingCPEntry(U, UserOffset);
+ int result = findInRangeCPEntry(U, UserOffset);
if (result==1) return false;
else if (result==2) return true;
@@ -1260,11 +1400,11 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
unsigned ID = AFI->createPICLabelUId();
// Look for water where we can place this CPE.
- MachineBasicBlock *NewIsland = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
MachineBasicBlock *NewMBB;
water_iterator IP;
- if (LookForWater(U, UserOffset, IP)) {
- DEBUG(errs() << "found water in range\n");
+ if (findAvailableWater(U, UserOffset, IP)) {
+ DEBUG(dbgs() << "Found water in range\n");
MachineBasicBlock *WaterBB = *IP;
// If the original WaterList entry was "new water" on this iteration,
@@ -1279,10 +1419,10 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
} else {
// No water found.
- DEBUG(errs() << "No water found\n");
- CreateNewWater(CPUserIndex, UserOffset, NewMBB);
+ DEBUG(dbgs() << "No water found\n");
+ createNewWater(CPUserIndex, UserOffset, NewMBB);
- // SplitBlockBeforeInstr adds to WaterList, which is important when it is
+ // splitBlockBeforeInstr adds to WaterList, which is important when it is
// called while handling branches so that the water will be seen on the
// next iteration for constant pools, but in this context, we don't want
// it. Check for this so it will be removed from the WaterList.
@@ -1304,13 +1444,13 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
WaterList.erase(IP);
// Okay, we know we can put an island before NewMBB now, do it!
- MF.insert(NewMBB, NewIsland);
+ MF->insert(NewMBB, NewIsland);
// Update internal data structures to account for the newly inserted MBB.
- UpdateForInsertedWaterBlock(NewIsland);
+ updateForInsertedWaterBlock(NewIsland);
// Decrement the old entry, and remove it if refcount becomes 0.
- DecrementOldEntry(CPI, CPEMI);
+ decrementCPEReferenceCount(CPI, CPEMI);
// Now that we have an island to add the CPE to, clone the original CPE and
// add it to the island.
@@ -1320,13 +1460,12 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
++NumCPEs;
- BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
- // Compensate for .align 2 in thumb mode.
- if (isThumb && (BBOffsets[NewIsland->getNumber()]%4 != 0 || HasInlineAsm))
- Size += 2;
+ // Mark the basic block as aligned as required by the const-pool entry.
+ NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+
// Increase the size of the island block to account for the new entry.
- BBSizes[NewIsland->getNumber()] += Size;
- AdjustBBOffsetsAfter(NewIsland, Size);
+ BBInfo[NewIsland->getNumber()].Size += Size;
+ adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
// Finally, change the CPI in the instruction operand to be ID.
for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
@@ -1335,31 +1474,30 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
break;
}
- DEBUG(errs() << " Moved CPE to #" << ID << " CPI=" << CPI
- << '\t' << *UserMI);
+ DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
return true;
}
-/// RemoveDeadCPEMI - Remove a dead constant pool entry instruction. Update
+/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
/// sizes and offsets of impacted basic blocks.
-void ARMConstantIslands::RemoveDeadCPEMI(MachineInstr *CPEMI) {
+void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
MachineBasicBlock *CPEBB = CPEMI->getParent();
unsigned Size = CPEMI->getOperand(2).getImm();
CPEMI->eraseFromParent();
- BBSizes[CPEBB->getNumber()] -= Size;
+ BBInfo[CPEBB->getNumber()].Size -= Size;
// All succeeding offsets have the current size value added in, fix this.
if (CPEBB->empty()) {
- // In thumb1 mode, the size of island may be padded by two to compensate for
- // the alignment requirement. Then it will now be 2 when the block is
- // empty, so fix this.
- // All succeeding offsets have the current size value added in, fix this.
- if (BBSizes[CPEBB->getNumber()] != 0) {
- Size += BBSizes[CPEBB->getNumber()];
- BBSizes[CPEBB->getNumber()] = 0;
- }
- }
- AdjustBBOffsetsAfter(CPEBB, -Size);
+ BBInfo[CPEBB->getNumber()].Size = 0;
+
+ // This block no longer needs to be aligned. <rdar://problem/10534709>.
+ CPEBB->setAlignment(0);
+ } else
+ // Entries are sorted by descending alignment, so realign from the front.
+ CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+
+ adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
// this BB's predecessor jumps directly to this BB's successor. This
// shouldn't happen currently.
@@ -1367,15 +1505,15 @@ void ARMConstantIslands::RemoveDeadCPEMI(MachineInstr *CPEMI) {
// FIXME: remove the empty blocks after all the work is done?
}
-/// RemoveUnusedCPEntries - Remove constant pool entries whose refcounts
+/// removeUnusedCPEntries - Remove constant pool entries whose refcounts
/// are zero.
-bool ARMConstantIslands::RemoveUnusedCPEntries() {
+bool ARMConstantIslands::removeUnusedCPEntries() {
unsigned MadeChange = false;
for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
std::vector<CPEntry> &CPEs = CPEntries[i];
for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
- RemoveDeadCPEMI(CPEs[j].CPEMI);
+ removeDeadCPEMI(CPEs[j].CPEMI);
CPEs[j].CPEMI = NULL;
MadeChange = true;
}
@@ -1384,18 +1522,18 @@ bool ARMConstantIslands::RemoveUnusedCPEntries() {
return MadeChange;
}
-/// BBIsInRange - Returns true if the distance between specific MI and
+/// isBBInRange - Returns true if the distance between specific MI and
/// specific BB can fit in MI's displacement field.
-bool ARMConstantIslands::BBIsInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
+bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
unsigned MaxDisp) {
unsigned PCAdj = isThumb ? 4 : 8;
- unsigned BrOffset = GetOffsetOf(MI) + PCAdj;
- unsigned DestOffset = BBOffsets[DestBB->getNumber()];
+ unsigned BrOffset = getOffsetOf(MI) + PCAdj;
+ unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
- DEBUG(errs() << "Branch of destination BB#" << DestBB->getNumber()
+ DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
<< " from BB#" << MI->getParent()->getNumber()
<< " max delta=" << MaxDisp
- << " from " << GetOffsetOf(MI) << " to " << DestOffset
+ << " from " << getOffsetOf(MI) << " to " << DestOffset
<< " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
if (BrOffset <= DestOffset) {
@@ -1409,50 +1547,50 @@ bool ARMConstantIslands::BBIsInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
return false;
}
-/// FixUpImmediateBr - Fix up an immediate branch whose destination is too far
+/// fixupImmediateBr - Fix up an immediate branch whose destination is too far
/// away to fit in its displacement field.
-bool ARMConstantIslands::FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br) {
+bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
// Check to see if the DestBB is already in-range.
- if (BBIsInRange(MI, DestBB, Br.MaxDisp))
+ if (isBBInRange(MI, DestBB, Br.MaxDisp))
return false;
if (!Br.isCond)
- return FixUpUnconditionalBr(MF, Br);
- return FixUpConditionalBr(MF, Br);
+ return fixupUnconditionalBr(Br);
+ return fixupConditionalBr(Br);
}
-/// FixUpUnconditionalBr - Fix up an unconditional branch whose destination is
+/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
/// too far away to fit in its displacement field. If the LR register has been
/// spilled in the epilogue, then we can use BL to implement a far jump.
/// Otherwise, add an intermediate branch instruction to a branch.
bool
-ARMConstantIslands::FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br) {
+ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
MachineBasicBlock *MBB = MI->getParent();
if (!isThumb1)
- llvm_unreachable("FixUpUnconditionalBr is Thumb1 only!");
+ llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
// Use BL to implement far jump.
Br.MaxDisp = (1 << 21) * 2;
MI->setDesc(TII->get(ARM::tBfar));
- BBSizes[MBB->getNumber()] += 2;
- AdjustBBOffsetsAfter(MBB, 2);
+ BBInfo[MBB->getNumber()].Size += 2;
+ adjustBBOffsetsAfter(MBB);
HasFarJump = true;
++NumUBrFixed;
- DEBUG(errs() << " Changed B to long jump " << *MI);
+ DEBUG(dbgs() << " Changed B to long jump " << *MI);
return true;
}
-/// FixUpConditionalBr - Fix up a conditional branch whose destination is too
+/// fixupConditionalBr - Fix up a conditional branch whose destination is too
/// far away to fit in its displacement field. It is converted to an inverse
/// conditional branch + an unconditional branch to the destination.
bool
-ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
+ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
MachineInstr *MI = Br.MI;
MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
@@ -1486,8 +1624,8 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
// bne L2
// b L1
MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
- if (BBIsInRange(MI, NewDest, Br.MaxDisp)) {
- DEBUG(errs() << " Invert Bcc condition and swap its destination with "
+ if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
+ DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
<< *BMI);
BMI->getOperand(0).setMBB(DestBB);
MI->getOperand(0).setMBB(NewDest);
@@ -1498,19 +1636,17 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
}
if (NeedSplit) {
- SplitBlockBeforeInstr(MI);
+ splitBlockBeforeInstr(MI);
// No need for the branch to the next block. We're adding an unconditional
// branch to the destination.
int delta = TII->GetInstSizeInBytes(&MBB->back());
- BBSizes[MBB->getNumber()] -= delta;
- MachineBasicBlock* SplitBB = llvm::next(MachineFunction::iterator(MBB));
- AdjustBBOffsetsAfter(SplitBB, -delta);
+ BBInfo[MBB->getNumber()].Size -= delta;
MBB->back().eraseFromParent();
- // BBOffsets[SplitBB] is wrong temporarily, fixed below
+ // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
}
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
- DEBUG(errs() << " Insert B to BB#" << DestBB->getNumber()
+ DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
<< " also invert condition and change dest. to BB#"
<< NextBB->getNumber() << "\n");
@@ -1519,30 +1655,27 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
.addMBB(NextBB).addImm(CC).addReg(CCReg);
Br.MI = &MBB->back();
- BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
if (isThumb)
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
.addImm(ARMCC::AL).addReg(0);
else
BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
- BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
// Remove the old conditional branch. It may or may not still be in MBB.
- BBSizes[MI->getParent()->getNumber()] -= TII->GetInstSizeInBytes(MI);
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
MI->eraseFromParent();
-
- // The net size change is an addition of one unconditional branch.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
- AdjustBBOffsetsAfter(MBB, delta);
+ adjustBBOffsetsAfter(MBB);
return true;
}
-/// UndoLRSpillRestore - Remove Thumb push / pop instructions that only spills
+/// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
/// LR / restores LR to pc. FIXME: This is done here because it's only possible
/// to do this if tBfar is not used.
-bool ARMConstantIslands::UndoLRSpillRestore() {
+bool ARMConstantIslands::undoLRSpillRestore() {
bool MadeChange = false;
for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
MachineInstr *MI = PushPopMIs[i];
@@ -1561,7 +1694,26 @@ bool ARMConstantIslands::UndoLRSpillRestore() {
return MadeChange;
}
-bool ARMConstantIslands::OptimizeThumb2Instructions(MachineFunction &MF) {
+// mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions
+// below may shrink MI.
+bool
+ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ // optimizeThumb2Instructions.
+ case ARM::t2LEApcrel:
+ case ARM::t2LDRpci:
+ // optimizeThumb2Branches.
+ case ARM::t2B:
+ case ARM::t2Bcc:
+ case ARM::tBcc:
+ // optimizeThumb2JumpTables.
+ case ARM::t2BR_JT:
+ return true;
+ }
+ return false;
+}
+
+bool ARMConstantIslands::optimizeThumb2Instructions() {
bool MadeChange = false;
// Shrink ADR and LDR from constantpool.
@@ -1592,25 +1744,31 @@ bool ARMConstantIslands::OptimizeThumb2Instructions(MachineFunction &MF) {
if (!NewOpc)
continue;
- unsigned UserOffset = GetOffsetOf(U.MI) + 4;
+ unsigned UserOffset = getUserOffset(U);
unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
+
+ // Be conservative with inline asm.
+ if (!U.KnownAlignment)
+ MaxOffs -= 2;
+
// FIXME: Check if offset is multiple of scale if scale is not 4.
- if (CPEIsInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
+ if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
+ DEBUG(dbgs() << "Shrink: " << *U.MI);
U.MI->setDesc(TII->get(NewOpc));
MachineBasicBlock *MBB = U.MI->getParent();
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
+ BBInfo[MBB->getNumber()].Size -= 2;
+ adjustBBOffsetsAfter(MBB);
++NumT2CPShrunk;
MadeChange = true;
}
}
- MadeChange |= OptimizeThumb2Branches(MF);
- MadeChange |= OptimizeThumb2JumpTables(MF);
+ MadeChange |= optimizeThumb2Branches();
+ MadeChange |= optimizeThumb2JumpTables();
return MadeChange;
}
-bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
+bool ARMConstantIslands::optimizeThumb2Branches() {
bool MadeChange = false;
for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) {
@@ -1636,11 +1794,12 @@ bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
if (NewOpc) {
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
- if (BBIsInRange(Br.MI, DestBB, MaxOffs)) {
+ if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
+ DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
Br.MI->setDesc(TII->get(NewOpc));
MachineBasicBlock *MBB = Br.MI->getParent();
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
+ BBInfo[MBB->getNumber()].Size -= 2;
+ adjustBBOffsetsAfter(MBB);
++NumT2BrShrunk;
MadeChange = true;
}
@@ -1650,9 +1809,14 @@ bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
if (Opcode != ARM::tBcc)
continue;
+ // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
+ // so this transformation is not safe.
+ if (!Br.MI->killsRegister(ARM::CPSR))
+ continue;
+
NewOpc = 0;
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(Br.MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(Br.MI, PredReg);
if (Pred == ARMCC::EQ)
NewOpc = ARM::tCBZ;
else if (Pred == ARMCC::NE)
@@ -1662,27 +1826,28 @@ bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
// Check if the distance is within 126. Subtract starting offset by 2
// because the cmp will be eliminated.
- unsigned BrOffset = GetOffsetOf(Br.MI) + 4 - 2;
- unsigned DestOffset = BBOffsets[DestBB->getNumber()];
+ unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
+ unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
MachineBasicBlock::iterator CmpMI = Br.MI;
if (CmpMI != Br.MI->getParent()->begin()) {
--CmpMI;
if (CmpMI->getOpcode() == ARM::tCMPi8) {
unsigned Reg = CmpMI->getOperand(0).getReg();
- Pred = llvm::getInstrPredicate(CmpMI, PredReg);
+ Pred = getInstrPredicate(CmpMI, PredReg);
if (Pred == ARMCC::AL &&
CmpMI->getOperand(1).getImm() == 0 &&
isARMLowRegister(Reg)) {
MachineBasicBlock *MBB = Br.MI->getParent();
+ DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
MachineInstr *NewBR =
BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
.addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
CmpMI->eraseFromParent();
Br.MI->eraseFromParent();
Br.MI = NewBR;
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
+ BBInfo[MBB->getNumber()].Size -= 2;
+ adjustBBOffsetsAfter(MBB);
++NumCBZ;
MadeChange = true;
}
@@ -1694,14 +1859,14 @@ bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
return MadeChange;
}
-/// OptimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
+/// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
/// jumptables when it's possible.
-bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
+bool ARMConstantIslands::optimizeThumb2JumpTables() {
bool MadeChange = false;
// FIXME: After the tables are shrunk, can we get rid some of the
// constantpool tables?
- MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
if (MJTI == 0) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
@@ -1709,18 +1874,18 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
MachineInstr *MI = T2JumpTables[i];
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
- unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
+ unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
MachineOperand JTOP = MI->getOperand(JTOpIdx);
unsigned JTI = JTOP.getIndex();
assert(JTI < JT.size());
bool ByteOk = true;
bool HalfWordOk = true;
- unsigned JTOffset = GetOffsetOf(MI) + 4;
+ unsigned JTOffset = getOffsetOf(MI) + 4;
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
MachineBasicBlock *MBB = JTBBs[j];
- unsigned DstOffset = BBOffsets[MBB->getNumber()];
+ unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
// Negative offset is not ok. FIXME: We should change BB layout to make
// sure all the branches are forward.
if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
@@ -1791,11 +1956,14 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
if (!OptOk)
continue;
+ DEBUG(dbgs() << "Shrink JT: " << *MI << " addr: " << *AddrMI
+ << " lea: " << *LeaMI);
unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc))
.addReg(IdxReg, getKillRegState(IdxRegKill))
.addJumpTableIndex(JTI, JTOP.getTargetFlags())
.addImm(MI->getOperand(JTOpIdx+1).getImm());
+ DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
// FIXME: Insert an "ALIGN" instruction to ensure the next instruction
// is 2-byte aligned. For now, asm printer will fix it up.
unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
@@ -1808,8 +1976,8 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
MI->eraseFromParent();
int delta = OrigSize - NewSize;
- BBSizes[MBB->getNumber()] -= delta;
- AdjustBBOffsetsAfter(MBB, -delta);
+ BBInfo[MBB->getNumber()].Size -= delta;
+ adjustBBOffsetsAfter(MBB);
++NumTBs;
MadeChange = true;
@@ -1819,12 +1987,12 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
return MadeChange;
}
-/// ReorderThumb2JumpTables - Adjust the function's block layout to ensure that
+/// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
/// jump tables always branch forwards, since that's what tbb and tbh need.
-bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
+bool ARMConstantIslands::reorderThumb2JumpTables() {
bool MadeChange = false;
- MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
if (MJTI == 0) return false;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
@@ -1832,7 +2000,7 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
MachineInstr *MI = T2JumpTables[i];
const MCInstrDesc &MCID = MI->getDesc();
unsigned NumOps = MCID.getNumOperands();
- unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
+ unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 3 : 2);
MachineOperand JTOP = MI->getOperand(JTOpIdx);
unsigned JTI = JTOP.getIndex();
assert(JTI < JT.size());
@@ -1850,7 +2018,7 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
// The destination precedes the switch. Try to move the block forward
// so we have a positive offset.
MachineBasicBlock *NewBB =
- AdjustJTTargetBlockForward(MBB, MI->getParent());
+ adjustJTTargetBlockForward(MBB, MI->getParent());
if (NewBB)
MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
MadeChange = true;
@@ -1862,10 +2030,7 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
}
MachineBasicBlock *ARMConstantIslands::
-AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
-{
- MachineFunction &MF = *BB->getParent();
-
+adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
// If the destination block is terminated by an unconditional branch,
// try to move it; otherwise, create a new block following the jump
// table that branches back to the actual target. This is a very simple
@@ -1882,22 +2047,22 @@ AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
// If the block ends in an unconditional branch, move it. The prior block
// has to have an analyzable terminator for us to move this one. Be paranoid
// and make sure we're not trying to move the entry block of the function.
- if (!B && Cond.empty() && BB != MF.begin() &&
+ if (!B && Cond.empty() && BB != MF->begin() &&
!TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
BB->moveAfter(JTBB);
OldPrior->updateTerminator();
BB->updateTerminator();
// Update numbering to account for the block being moved.
- MF.RenumberBlocks();
+ MF->RenumberBlocks();
++NumJTMoved;
return NULL;
}
// Create a new MBB for the code after the jump BB.
MachineBasicBlock *NewBB =
- MF.CreateMachineBasicBlock(JTBB->getBasicBlock());
+ MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
MachineFunction::iterator MBBI = JTBB; ++MBBI;
- MF.insert(MBBI, NewBB);
+ MF->insert(MBBI, NewBB);
// Add an unconditional branch from NewBB to BB.
// There doesn't seem to be meaningful DebugInfo available; this doesn't
@@ -1907,7 +2072,7 @@ AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
.addImm(ARMCC::AL).addReg(0);
// Update internal data structures to account for the newly inserted MBB.
- MF.RenumberBlocks(NewBB);
+ MF->RenumberBlocks(NewBB);
// Update the CFG.
NewBB->addSuccessor(BB);
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
index aadfd47..fa3226e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
@@ -1,4 +1,4 @@
-//===- ARMConstantPoolValue.cpp - ARM constantpool value --------*- C++ -*-===//
+//===-- ARMConstantPoolValue.cpp - ARM constantpool value -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -48,7 +48,6 @@ ARMConstantPoolValue::~ARMConstantPoolValue() {}
const char *ARMConstantPoolValue::getModifierText() const {
switch (Modifier) {
- default: llvm_unreachable("Unknown modifier!");
// FIXME: Are these case sensitive? It'd be nice to lower-case all the
// strings if that's legal.
case ARMCP::no_modifier: return "none";
@@ -58,12 +57,12 @@ const char *ARMConstantPoolValue::getModifierText() const {
case ARMCP::GOTTPOFF: return "gottpoff";
case ARMCP::TPOFF: return "tpoff";
}
+ llvm_unreachable("Unknown modifier!");
}
int ARMConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) {
- assert(false && "Shouldn't be calling this directly!");
- return -1;
+ llvm_unreachable("Shouldn't be calling this directly!");
}
void
@@ -315,5 +314,6 @@ void ARMConstantPoolMBB::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
}
void ARMConstantPoolMBB::print(raw_ostream &O) const {
+ O << "BB#" << MBB->getNumber();
ARMConstantPoolValue::print(O);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 0d0def3..6b98d44 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -1,4 +1,4 @@
-//===- ARMConstantPoolValue.h - ARM constantpool value ----------*- C++ -*-===//
+//===-- ARMConstantPoolValue.h - ARM constantpool value ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
index 51e68b4..f671317 100644
--- a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.cpp
@@ -41,43 +41,38 @@ unsigned ARMELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
case ARM::reloc_arm_machine_cp_entry:
case ARM::reloc_arm_jt_base:
case ARM::reloc_arm_pic_jt:
- assert(0 && "unsupported ARM relocation type"); break;
-
- case ARM::reloc_arm_branch: return ELF::R_ARM_CALL; break;
- case ARM::reloc_arm_movt: return ELF::R_ARM_MOVT_ABS; break;
- case ARM::reloc_arm_movw: return ELF::R_ARM_MOVW_ABS_NC; break;
+ llvm_unreachable("unsupported ARM relocation type");
+
+ case ARM::reloc_arm_branch: return ELF::R_ARM_CALL;
+ case ARM::reloc_arm_movt: return ELF::R_ARM_MOVT_ABS;
+ case ARM::reloc_arm_movw: return ELF::R_ARM_MOVW_ABS_NC;
default:
- llvm_unreachable("unknown ARM relocation type"); break;
+ llvm_unreachable("unknown ARM relocation type");
}
- return 0;
}
long int ARMELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
long int Modifier) const {
- assert(0 && "ARMELFWriterInfo::getDefaultAddendForRelTy() not implemented");
- return 0;
+ llvm_unreachable("ARMELFWriterInfo::getDefaultAddendForRelTy() not "
+ "implemented");
}
unsigned ARMELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
- assert(0 && "ARMELFWriterInfo::getRelocationTySize() not implemented");
- return 0;
+ llvm_unreachable("ARMELFWriterInfo::getRelocationTySize() not implemented");
}
bool ARMELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
- assert(0 && "ARMELFWriterInfo::isPCRelativeRel() not implemented");
- return 1;
+ llvm_unreachable("ARMELFWriterInfo::isPCRelativeRel() not implemented");
}
unsigned ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
- assert(0 &&
- "ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not implemented");
- return 0;
+ llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not "
+ "implemented");
}
long int ARMELFWriterInfo::computeRelocation(unsigned SymOffset,
unsigned RelOffset,
unsigned RelTy) const {
- assert(0 &&
- "ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not implemented");
- return 0;
+ llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not "
+ "implemented");
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
index 1c4e532..6a84f8a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMELFWriterInfo.h
@@ -17,6 +17,7 @@
#include "llvm/Target/TargetELFWriterInfo.h"
namespace llvm {
+ class TargetMachine;
class ARMELFWriterInfo : public TargetELFWriterInfo {
public:
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 7872cb9..5fc0360 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1,4 +1,4 @@
-//===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -----*- C++ -*-=//
+//===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,7 +19,6 @@
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -61,7 +60,7 @@ namespace {
void ExpandVST(MachineBasicBlock::iterator &MBBI);
void ExpandLaneOp(MachineBasicBlock::iterator &MBBI);
void ExpandVTBL(MachineBasicBlock::iterator &MBBI,
- unsigned Opc, bool IsExt, unsigned NumRegs);
+ unsigned Opc, bool IsExt);
void ExpandMOV32BitImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI);
};
@@ -99,13 +98,20 @@ namespace {
// Entries for NEON load/store information table. The table is sorted by
// PseudoOpc for fast binary-search lookups.
struct NEONLdStTableEntry {
- unsigned PseudoOpc;
- unsigned RealOpc;
+ uint16_t PseudoOpc;
+ uint16_t RealOpc;
bool IsLoad;
- bool HasWriteBack;
+ bool isUpdating;
+ bool hasWritebackOperand;
NEONRegSpacing RegSpacing;
unsigned char NumRegs; // D registers loaded or stored
unsigned char RegElts; // elements per D register; used for lane ops
+ // FIXME: Temporary flag to denote whether the real instruction takes
+ // a single register (like the encoding) or all of the registers in
+ // the list (like the asm syntax and the isel DAG). When all definitions
+ // are converted to take only the single encoded register, this will
+ // go away.
+ bool copyAllListRegs;
// Comparison methods for binary search of the table.
bool operator<(const NEONLdStTableEntry &TE) const {
@@ -122,243 +128,203 @@ namespace {
}
static const NEONLdStTableEntry NEONLdStTable[] = {
-{ ARM::VLD1DUPq16Pseudo, ARM::VLD1DUPq16, true, false, SingleSpc, 2, 4},
-{ ARM::VLD1DUPq16Pseudo_UPD, ARM::VLD1DUPq16_UPD, true, true, SingleSpc, 2, 4},
-{ ARM::VLD1DUPq32Pseudo, ARM::VLD1DUPq32, true, false, SingleSpc, 2, 2},
-{ ARM::VLD1DUPq32Pseudo_UPD, ARM::VLD1DUPq32_UPD, true, true, SingleSpc, 2, 2},
-{ ARM::VLD1DUPq8Pseudo, ARM::VLD1DUPq8, true, false, SingleSpc, 2, 8},
-{ ARM::VLD1DUPq8Pseudo_UPD, ARM::VLD1DUPq8_UPD, true, true, SingleSpc, 2, 8},
-
-{ ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, EvenDblSpc, 1, 4 },
-{ ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, EvenDblSpc, 1, 4 },
-{ ARM::VLD1LNq32Pseudo, ARM::VLD1LNd32, true, false, EvenDblSpc, 1, 2 },
-{ ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, EvenDblSpc, 1, 2 },
-{ ARM::VLD1LNq8Pseudo, ARM::VLD1LNd8, true, false, EvenDblSpc, 1, 8 },
-{ ARM::VLD1LNq8Pseudo_UPD, ARM::VLD1LNd8_UPD, true, true, EvenDblSpc, 1, 8 },
-
-{ ARM::VLD1d64QPseudo, ARM::VLD1d64Q, true, false, SingleSpc, 4, 1 },
-{ ARM::VLD1d64QPseudo_UPD, ARM::VLD1d64Q_UPD, true, true, SingleSpc, 4, 1 },
-{ ARM::VLD1d64TPseudo, ARM::VLD1d64T, true, false, SingleSpc, 3, 1 },
-{ ARM::VLD1d64TPseudo_UPD, ARM::VLD1d64T_UPD, true, true, SingleSpc, 3, 1 },
-
-{ ARM::VLD1q16Pseudo, ARM::VLD1q16, true, false, SingleSpc, 2, 4 },
-{ ARM::VLD1q16Pseudo_UPD, ARM::VLD1q16_UPD, true, true, SingleSpc, 2, 4 },
-{ ARM::VLD1q32Pseudo, ARM::VLD1q32, true, false, SingleSpc, 2, 2 },
-{ ARM::VLD1q32Pseudo_UPD, ARM::VLD1q32_UPD, true, true, SingleSpc, 2, 2 },
-{ ARM::VLD1q64Pseudo, ARM::VLD1q64, true, false, SingleSpc, 2, 1 },
-{ ARM::VLD1q64Pseudo_UPD, ARM::VLD1q64_UPD, true, true, SingleSpc, 2, 1 },
-{ ARM::VLD1q8Pseudo, ARM::VLD1q8, true, false, SingleSpc, 2, 8 },
-{ ARM::VLD1q8Pseudo_UPD, ARM::VLD1q8_UPD, true, true, SingleSpc, 2, 8 },
-
-{ ARM::VLD2DUPd16Pseudo, ARM::VLD2DUPd16, true, false, SingleSpc, 2, 4},
-{ ARM::VLD2DUPd16Pseudo_UPD, ARM::VLD2DUPd16_UPD, true, true, SingleSpc, 2, 4},
-{ ARM::VLD2DUPd32Pseudo, ARM::VLD2DUPd32, true, false, SingleSpc, 2, 2},
-{ ARM::VLD2DUPd32Pseudo_UPD, ARM::VLD2DUPd32_UPD, true, true, SingleSpc, 2, 2},
-{ ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd8, true, false, SingleSpc, 2, 8},
-{ ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd8_UPD, true, true, SingleSpc, 2, 8},
-
-{ ARM::VLD2LNd16Pseudo, ARM::VLD2LNd16, true, false, SingleSpc, 2, 4 },
-{ ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, SingleSpc, 2, 4 },
-{ ARM::VLD2LNd32Pseudo, ARM::VLD2LNd32, true, false, SingleSpc, 2, 2 },
-{ ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, SingleSpc, 2, 2 },
-{ ARM::VLD2LNd8Pseudo, ARM::VLD2LNd8, true, false, SingleSpc, 2, 8 },
-{ ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd8_UPD, true, true, SingleSpc, 2, 8 },
-{ ARM::VLD2LNq16Pseudo, ARM::VLD2LNq16, true, false, EvenDblSpc, 2, 4 },
-{ ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, EvenDblSpc, 2, 4 },
-{ ARM::VLD2LNq32Pseudo, ARM::VLD2LNq32, true, false, EvenDblSpc, 2, 2 },
-{ ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, EvenDblSpc, 2, 2 },
-
-{ ARM::VLD2d16Pseudo, ARM::VLD2d16, true, false, SingleSpc, 2, 4 },
-{ ARM::VLD2d16Pseudo_UPD, ARM::VLD2d16_UPD, true, true, SingleSpc, 2, 4 },
-{ ARM::VLD2d32Pseudo, ARM::VLD2d32, true, false, SingleSpc, 2, 2 },
-{ ARM::VLD2d32Pseudo_UPD, ARM::VLD2d32_UPD, true, true, SingleSpc, 2, 2 },
-{ ARM::VLD2d8Pseudo, ARM::VLD2d8, true, false, SingleSpc, 2, 8 },
-{ ARM::VLD2d8Pseudo_UPD, ARM::VLD2d8_UPD, true, true, SingleSpc, 2, 8 },
-
-{ ARM::VLD2q16Pseudo, ARM::VLD2q16, true, false, SingleSpc, 4, 4 },
-{ ARM::VLD2q16Pseudo_UPD, ARM::VLD2q16_UPD, true, true, SingleSpc, 4, 4 },
-{ ARM::VLD2q32Pseudo, ARM::VLD2q32, true, false, SingleSpc, 4, 2 },
-{ ARM::VLD2q32Pseudo_UPD, ARM::VLD2q32_UPD, true, true, SingleSpc, 4, 2 },
-{ ARM::VLD2q8Pseudo, ARM::VLD2q8, true, false, SingleSpc, 4, 8 },
-{ ARM::VLD2q8Pseudo_UPD, ARM::VLD2q8_UPD, true, true, SingleSpc, 4, 8 },
-
-{ ARM::VLD3DUPd16Pseudo, ARM::VLD3DUPd16, true, false, SingleSpc, 3, 4},
-{ ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, SingleSpc, 3, 4},
-{ ARM::VLD3DUPd32Pseudo, ARM::VLD3DUPd32, true, false, SingleSpc, 3, 2},
-{ ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, SingleSpc, 3, 2},
-{ ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd8, true, false, SingleSpc, 3, 8},
-{ ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd8_UPD, true, true, SingleSpc, 3, 8},
-
-{ ARM::VLD3LNd16Pseudo, ARM::VLD3LNd16, true, false, SingleSpc, 3, 4 },
-{ ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, SingleSpc, 3, 4 },
-{ ARM::VLD3LNd32Pseudo, ARM::VLD3LNd32, true, false, SingleSpc, 3, 2 },
-{ ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, SingleSpc, 3, 2 },
-{ ARM::VLD3LNd8Pseudo, ARM::VLD3LNd8, true, false, SingleSpc, 3, 8 },
-{ ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd8_UPD, true, true, SingleSpc, 3, 8 },
-{ ARM::VLD3LNq16Pseudo, ARM::VLD3LNq16, true, false, EvenDblSpc, 3, 4 },
-{ ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, EvenDblSpc, 3, 4 },
-{ ARM::VLD3LNq32Pseudo, ARM::VLD3LNq32, true, false, EvenDblSpc, 3, 2 },
-{ ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, EvenDblSpc, 3, 2 },
-
-{ ARM::VLD3d16Pseudo, ARM::VLD3d16, true, false, SingleSpc, 3, 4 },
-{ ARM::VLD3d16Pseudo_UPD, ARM::VLD3d16_UPD, true, true, SingleSpc, 3, 4 },
-{ ARM::VLD3d32Pseudo, ARM::VLD3d32, true, false, SingleSpc, 3, 2 },
-{ ARM::VLD3d32Pseudo_UPD, ARM::VLD3d32_UPD, true, true, SingleSpc, 3, 2 },
-{ ARM::VLD3d8Pseudo, ARM::VLD3d8, true, false, SingleSpc, 3, 8 },
-{ ARM::VLD3d8Pseudo_UPD, ARM::VLD3d8_UPD, true, true, SingleSpc, 3, 8 },
-
-{ ARM::VLD3q16Pseudo_UPD, ARM::VLD3q16_UPD, true, true, EvenDblSpc, 3, 4 },
-{ ARM::VLD3q16oddPseudo, ARM::VLD3q16, true, false, OddDblSpc, 3, 4 },
-{ ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, OddDblSpc, 3, 4 },
-{ ARM::VLD3q32Pseudo_UPD, ARM::VLD3q32_UPD, true, true, EvenDblSpc, 3, 2 },
-{ ARM::VLD3q32oddPseudo, ARM::VLD3q32, true, false, OddDblSpc, 3, 2 },
-{ ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, OddDblSpc, 3, 2 },
-{ ARM::VLD3q8Pseudo_UPD, ARM::VLD3q8_UPD, true, true, EvenDblSpc, 3, 8 },
-{ ARM::VLD3q8oddPseudo, ARM::VLD3q8, true, false, OddDblSpc, 3, 8 },
-{ ARM::VLD3q8oddPseudo_UPD, ARM::VLD3q8_UPD, true, true, OddDblSpc, 3, 8 },
-
-{ ARM::VLD4DUPd16Pseudo, ARM::VLD4DUPd16, true, false, SingleSpc, 4, 4},
-{ ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, SingleSpc, 4, 4},
-{ ARM::VLD4DUPd32Pseudo, ARM::VLD4DUPd32, true, false, SingleSpc, 4, 2},
-{ ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, SingleSpc, 4, 2},
-{ ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd8, true, false, SingleSpc, 4, 8},
-{ ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd8_UPD, true, true, SingleSpc, 4, 8},
-
-{ ARM::VLD4LNd16Pseudo, ARM::VLD4LNd16, true, false, SingleSpc, 4, 4 },
-{ ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, SingleSpc, 4, 4 },
-{ ARM::VLD4LNd32Pseudo, ARM::VLD4LNd32, true, false, SingleSpc, 4, 2 },
-{ ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, SingleSpc, 4, 2 },
-{ ARM::VLD4LNd8Pseudo, ARM::VLD4LNd8, true, false, SingleSpc, 4, 8 },
-{ ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd8_UPD, true, true, SingleSpc, 4, 8 },
-{ ARM::VLD4LNq16Pseudo, ARM::VLD4LNq16, true, false, EvenDblSpc, 4, 4 },
-{ ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, EvenDblSpc, 4, 4 },
-{ ARM::VLD4LNq32Pseudo, ARM::VLD4LNq32, true, false, EvenDblSpc, 4, 2 },
-{ ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, EvenDblSpc, 4, 2 },
-
-{ ARM::VLD4d16Pseudo, ARM::VLD4d16, true, false, SingleSpc, 4, 4 },
-{ ARM::VLD4d16Pseudo_UPD, ARM::VLD4d16_UPD, true, true, SingleSpc, 4, 4 },
-{ ARM::VLD4d32Pseudo, ARM::VLD4d32, true, false, SingleSpc, 4, 2 },
-{ ARM::VLD4d32Pseudo_UPD, ARM::VLD4d32_UPD, true, true, SingleSpc, 4, 2 },
-{ ARM::VLD4d8Pseudo, ARM::VLD4d8, true, false, SingleSpc, 4, 8 },
-{ ARM::VLD4d8Pseudo_UPD, ARM::VLD4d8_UPD, true, true, SingleSpc, 4, 8 },
-
-{ ARM::VLD4q16Pseudo_UPD, ARM::VLD4q16_UPD, true, true, EvenDblSpc, 4, 4 },
-{ ARM::VLD4q16oddPseudo, ARM::VLD4q16, true, false, OddDblSpc, 4, 4 },
-{ ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, OddDblSpc, 4, 4 },
-{ ARM::VLD4q32Pseudo_UPD, ARM::VLD4q32_UPD, true, true, EvenDblSpc, 4, 2 },
-{ ARM::VLD4q32oddPseudo, ARM::VLD4q32, true, false, OddDblSpc, 4, 2 },
-{ ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, OddDblSpc, 4, 2 },
-{ ARM::VLD4q8Pseudo_UPD, ARM::VLD4q8_UPD, true, true, EvenDblSpc, 4, 8 },
-{ ARM::VLD4q8oddPseudo, ARM::VLD4q8, true, false, OddDblSpc, 4, 8 },
-{ ARM::VLD4q8oddPseudo_UPD, ARM::VLD4q8_UPD, true, true, OddDblSpc, 4, 8 },
-
-{ ARM::VST1LNq16Pseudo, ARM::VST1LNd16, false, false, EvenDblSpc, 1, 4 },
-{ ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD,false, true, EvenDblSpc, 1, 4 },
-{ ARM::VST1LNq32Pseudo, ARM::VST1LNd32, false, false, EvenDblSpc, 1, 2 },
-{ ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD,false, true, EvenDblSpc, 1, 2 },
-{ ARM::VST1LNq8Pseudo, ARM::VST1LNd8, false, false, EvenDblSpc, 1, 8 },
-{ ARM::VST1LNq8Pseudo_UPD, ARM::VST1LNd8_UPD, false, true, EvenDblSpc, 1, 8 },
-
-{ ARM::VST1d64QPseudo, ARM::VST1d64Q, false, false, SingleSpc, 4, 1 },
-{ ARM::VST1d64QPseudo_UPD, ARM::VST1d64Q_UPD, false, true, SingleSpc, 4, 1 },
-{ ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, SingleSpc, 3, 1 },
-{ ARM::VST1d64TPseudo_UPD, ARM::VST1d64T_UPD, false, true, SingleSpc, 3, 1 },
-
-{ ARM::VST1q16Pseudo, ARM::VST1q16, false, false, SingleSpc, 2, 4 },
-{ ARM::VST1q16Pseudo_UPD, ARM::VST1q16_UPD, false, true, SingleSpc, 2, 4 },
-{ ARM::VST1q32Pseudo, ARM::VST1q32, false, false, SingleSpc, 2, 2 },
-{ ARM::VST1q32Pseudo_UPD, ARM::VST1q32_UPD, false, true, SingleSpc, 2, 2 },
-{ ARM::VST1q64Pseudo, ARM::VST1q64, false, false, SingleSpc, 2, 1 },
-{ ARM::VST1q64Pseudo_UPD, ARM::VST1q64_UPD, false, true, SingleSpc, 2, 1 },
-{ ARM::VST1q8Pseudo, ARM::VST1q8, false, false, SingleSpc, 2, 8 },
-{ ARM::VST1q8Pseudo_UPD, ARM::VST1q8_UPD, false, true, SingleSpc, 2, 8 },
-
-{ ARM::VST2LNd16Pseudo, ARM::VST2LNd16, false, false, SingleSpc, 2, 4 },
-{ ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, SingleSpc, 2, 4 },
-{ ARM::VST2LNd32Pseudo, ARM::VST2LNd32, false, false, SingleSpc, 2, 2 },
-{ ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, SingleSpc, 2, 2 },
-{ ARM::VST2LNd8Pseudo, ARM::VST2LNd8, false, false, SingleSpc, 2, 8 },
-{ ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd8_UPD, false, true, SingleSpc, 2, 8 },
-{ ARM::VST2LNq16Pseudo, ARM::VST2LNq16, false, false, EvenDblSpc, 2, 4},
-{ ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, EvenDblSpc, 2, 4},
-{ ARM::VST2LNq32Pseudo, ARM::VST2LNq32, false, false, EvenDblSpc, 2, 2},
-{ ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, EvenDblSpc, 2, 2},
-
-{ ARM::VST2d16Pseudo, ARM::VST2d16, false, false, SingleSpc, 2, 4 },
-{ ARM::VST2d16Pseudo_UPD, ARM::VST2d16_UPD, false, true, SingleSpc, 2, 4 },
-{ ARM::VST2d32Pseudo, ARM::VST2d32, false, false, SingleSpc, 2, 2 },
-{ ARM::VST2d32Pseudo_UPD, ARM::VST2d32_UPD, false, true, SingleSpc, 2, 2 },
-{ ARM::VST2d8Pseudo, ARM::VST2d8, false, false, SingleSpc, 2, 8 },
-{ ARM::VST2d8Pseudo_UPD, ARM::VST2d8_UPD, false, true, SingleSpc, 2, 8 },
-
-{ ARM::VST2q16Pseudo, ARM::VST2q16, false, false, SingleSpc, 4, 4 },
-{ ARM::VST2q16Pseudo_UPD, ARM::VST2q16_UPD, false, true, SingleSpc, 4, 4 },
-{ ARM::VST2q32Pseudo, ARM::VST2q32, false, false, SingleSpc, 4, 2 },
-{ ARM::VST2q32Pseudo_UPD, ARM::VST2q32_UPD, false, true, SingleSpc, 4, 2 },
-{ ARM::VST2q8Pseudo, ARM::VST2q8, false, false, SingleSpc, 4, 8 },
-{ ARM::VST2q8Pseudo_UPD, ARM::VST2q8_UPD, false, true, SingleSpc, 4, 8 },
-
-{ ARM::VST3LNd16Pseudo, ARM::VST3LNd16, false, false, SingleSpc, 3, 4 },
-{ ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, SingleSpc, 3, 4 },
-{ ARM::VST3LNd32Pseudo, ARM::VST3LNd32, false, false, SingleSpc, 3, 2 },
-{ ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, SingleSpc, 3, 2 },
-{ ARM::VST3LNd8Pseudo, ARM::VST3LNd8, false, false, SingleSpc, 3, 8 },
-{ ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd8_UPD, false, true, SingleSpc, 3, 8 },
-{ ARM::VST3LNq16Pseudo, ARM::VST3LNq16, false, false, EvenDblSpc, 3, 4},
-{ ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, EvenDblSpc, 3, 4},
-{ ARM::VST3LNq32Pseudo, ARM::VST3LNq32, false, false, EvenDblSpc, 3, 2},
-{ ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, EvenDblSpc, 3, 2},
-
-{ ARM::VST3d16Pseudo, ARM::VST3d16, false, false, SingleSpc, 3, 4 },
-{ ARM::VST3d16Pseudo_UPD, ARM::VST3d16_UPD, false, true, SingleSpc, 3, 4 },
-{ ARM::VST3d32Pseudo, ARM::VST3d32, false, false, SingleSpc, 3, 2 },
-{ ARM::VST3d32Pseudo_UPD, ARM::VST3d32_UPD, false, true, SingleSpc, 3, 2 },
-{ ARM::VST3d8Pseudo, ARM::VST3d8, false, false, SingleSpc, 3, 8 },
-{ ARM::VST3d8Pseudo_UPD, ARM::VST3d8_UPD, false, true, SingleSpc, 3, 8 },
-
-{ ARM::VST3q16Pseudo_UPD, ARM::VST3q16_UPD, false, true, EvenDblSpc, 3, 4 },
-{ ARM::VST3q16oddPseudo, ARM::VST3q16, false, false, OddDblSpc, 3, 4 },
-{ ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, OddDblSpc, 3, 4 },
-{ ARM::VST3q32Pseudo_UPD, ARM::VST3q32_UPD, false, true, EvenDblSpc, 3, 2 },
-{ ARM::VST3q32oddPseudo, ARM::VST3q32, false, false, OddDblSpc, 3, 2 },
-{ ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, OddDblSpc, 3, 2 },
-{ ARM::VST3q8Pseudo_UPD, ARM::VST3q8_UPD, false, true, EvenDblSpc, 3, 8 },
-{ ARM::VST3q8oddPseudo, ARM::VST3q8, false, false, OddDblSpc, 3, 8 },
-{ ARM::VST3q8oddPseudo_UPD, ARM::VST3q8_UPD, false, true, OddDblSpc, 3, 8 },
-
-{ ARM::VST4LNd16Pseudo, ARM::VST4LNd16, false, false, SingleSpc, 4, 4 },
-{ ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, SingleSpc, 4, 4 },
-{ ARM::VST4LNd32Pseudo, ARM::VST4LNd32, false, false, SingleSpc, 4, 2 },
-{ ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, SingleSpc, 4, 2 },
-{ ARM::VST4LNd8Pseudo, ARM::VST4LNd8, false, false, SingleSpc, 4, 8 },
-{ ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd8_UPD, false, true, SingleSpc, 4, 8 },
-{ ARM::VST4LNq16Pseudo, ARM::VST4LNq16, false, false, EvenDblSpc, 4, 4},
-{ ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, EvenDblSpc, 4, 4},
-{ ARM::VST4LNq32Pseudo, ARM::VST4LNq32, false, false, EvenDblSpc, 4, 2},
-{ ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, EvenDblSpc, 4, 2},
-
-{ ARM::VST4d16Pseudo, ARM::VST4d16, false, false, SingleSpc, 4, 4 },
-{ ARM::VST4d16Pseudo_UPD, ARM::VST4d16_UPD, false, true, SingleSpc, 4, 4 },
-{ ARM::VST4d32Pseudo, ARM::VST4d32, false, false, SingleSpc, 4, 2 },
-{ ARM::VST4d32Pseudo_UPD, ARM::VST4d32_UPD, false, true, SingleSpc, 4, 2 },
-{ ARM::VST4d8Pseudo, ARM::VST4d8, false, false, SingleSpc, 4, 8 },
-{ ARM::VST4d8Pseudo_UPD, ARM::VST4d8_UPD, false, true, SingleSpc, 4, 8 },
-
-{ ARM::VST4q16Pseudo_UPD, ARM::VST4q16_UPD, false, true, EvenDblSpc, 4, 4 },
-{ ARM::VST4q16oddPseudo, ARM::VST4q16, false, false, OddDblSpc, 4, 4 },
-{ ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, OddDblSpc, 4, 4 },
-{ ARM::VST4q32Pseudo_UPD, ARM::VST4q32_UPD, false, true, EvenDblSpc, 4, 2 },
-{ ARM::VST4q32oddPseudo, ARM::VST4q32, false, false, OddDblSpc, 4, 2 },
-{ ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, OddDblSpc, 4, 2 },
-{ ARM::VST4q8Pseudo_UPD, ARM::VST4q8_UPD, false, true, EvenDblSpc, 4, 8 },
-{ ARM::VST4q8oddPseudo, ARM::VST4q8, false, false, OddDblSpc, 4, 8 },
-{ ARM::VST4q8oddPseudo_UPD, ARM::VST4q8_UPD, false, true, OddDblSpc, 4, 8 }
+{ ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, false, EvenDblSpc, 1, 4 ,true},
+{ ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, true, EvenDblSpc, 1, 4 ,true},
+{ ARM::VLD1LNq32Pseudo, ARM::VLD1LNd32, true, false, false, EvenDblSpc, 1, 2 ,true},
+{ ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, true, EvenDblSpc, 1, 2 ,true},
+{ ARM::VLD1LNq8Pseudo, ARM::VLD1LNd8, true, false, false, EvenDblSpc, 1, 8 ,true},
+{ ARM::VLD1LNq8Pseudo_UPD, ARM::VLD1LNd8_UPD, true, true, true, EvenDblSpc, 1, 8 ,true},
+
+{ ARM::VLD1d64QPseudo, ARM::VLD1d64Q, true, false, false, SingleSpc, 4, 1 ,false},
+{ ARM::VLD1d64TPseudo, ARM::VLD1d64T, true, false, false, SingleSpc, 3, 1 ,false},
+
+{ ARM::VLD2LNd16Pseudo, ARM::VLD2LNd16, true, false, false, SingleSpc, 2, 4 ,true},
+{ ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, true, SingleSpc, 2, 4 ,true},
+{ ARM::VLD2LNd32Pseudo, ARM::VLD2LNd32, true, false, false, SingleSpc, 2, 2 ,true},
+{ ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, true, SingleSpc, 2, 2 ,true},
+{ ARM::VLD2LNd8Pseudo, ARM::VLD2LNd8, true, false, false, SingleSpc, 2, 8 ,true},
+{ ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd8_UPD, true, true, true, SingleSpc, 2, 8 ,true},
+{ ARM::VLD2LNq16Pseudo, ARM::VLD2LNq16, true, false, false, EvenDblSpc, 2, 4 ,true},
+{ ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, true, EvenDblSpc, 2, 4 ,true},
+{ ARM::VLD2LNq32Pseudo, ARM::VLD2LNq32, true, false, false, EvenDblSpc, 2, 2 ,true},
+{ ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, true, EvenDblSpc, 2, 2 ,true},
+
+{ ARM::VLD2q16Pseudo, ARM::VLD2q16, true, false, false, SingleSpc, 4, 4 ,false},
+{ ARM::VLD2q16PseudoWB_fixed, ARM::VLD2q16wb_fixed, true, true, false, SingleSpc, 4, 4 ,false},
+{ ARM::VLD2q16PseudoWB_register, ARM::VLD2q16wb_register, true, true, true, SingleSpc, 4, 4 ,false},
+{ ARM::VLD2q32Pseudo, ARM::VLD2q32, true, false, false, SingleSpc, 4, 2 ,false},
+{ ARM::VLD2q32PseudoWB_fixed, ARM::VLD2q32wb_fixed, true, true, false, SingleSpc, 4, 2 ,false},
+{ ARM::VLD2q32PseudoWB_register, ARM::VLD2q32wb_register, true, true, true, SingleSpc, 4, 2 ,false},
+{ ARM::VLD2q8Pseudo, ARM::VLD2q8, true, false, false, SingleSpc, 4, 8 ,false},
+{ ARM::VLD2q8PseudoWB_fixed, ARM::VLD2q8wb_fixed, true, true, false, SingleSpc, 4, 8 ,false},
+{ ARM::VLD2q8PseudoWB_register, ARM::VLD2q8wb_register, true, true, true, SingleSpc, 4, 8 ,false},
+
+{ ARM::VLD3DUPd16Pseudo, ARM::VLD3DUPd16, true, false, false, SingleSpc, 3, 4,true},
+{ ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, true, SingleSpc, 3, 4,true},
+{ ARM::VLD3DUPd32Pseudo, ARM::VLD3DUPd32, true, false, false, SingleSpc, 3, 2,true},
+{ ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, true, SingleSpc, 3, 2,true},
+{ ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd8, true, false, false, SingleSpc, 3, 8,true},
+{ ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd8_UPD, true, true, true, SingleSpc, 3, 8,true},
+
+{ ARM::VLD3LNd16Pseudo, ARM::VLD3LNd16, true, false, false, SingleSpc, 3, 4 ,true},
+{ ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
+{ ARM::VLD3LNd32Pseudo, ARM::VLD3LNd32, true, false, false, SingleSpc, 3, 2 ,true},
+{ ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
+{ ARM::VLD3LNd8Pseudo, ARM::VLD3LNd8, true, false, false, SingleSpc, 3, 8 ,true},
+{ ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
+{ ARM::VLD3LNq16Pseudo, ARM::VLD3LNq16, true, false, false, EvenDblSpc, 3, 4 ,true},
+{ ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
+{ ARM::VLD3LNq32Pseudo, ARM::VLD3LNq32, true, false, false, EvenDblSpc, 3, 2 ,true},
+{ ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
+
+{ ARM::VLD3d16Pseudo, ARM::VLD3d16, true, false, false, SingleSpc, 3, 4 ,true},
+{ ARM::VLD3d16Pseudo_UPD, ARM::VLD3d16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
+{ ARM::VLD3d32Pseudo, ARM::VLD3d32, true, false, false, SingleSpc, 3, 2 ,true},
+{ ARM::VLD3d32Pseudo_UPD, ARM::VLD3d32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
+{ ARM::VLD3d8Pseudo, ARM::VLD3d8, true, false, false, SingleSpc, 3, 8 ,true},
+{ ARM::VLD3d8Pseudo_UPD, ARM::VLD3d8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
+
+{ ARM::VLD3q16Pseudo_UPD, ARM::VLD3q16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
+{ ARM::VLD3q16oddPseudo, ARM::VLD3q16, true, false, false, OddDblSpc, 3, 4 ,true},
+{ ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, true, OddDblSpc, 3, 4 ,true},
+{ ARM::VLD3q32Pseudo_UPD, ARM::VLD3q32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
+{ ARM::VLD3q32oddPseudo, ARM::VLD3q32, true, false, false, OddDblSpc, 3, 2 ,true},
+{ ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, true, OddDblSpc, 3, 2 ,true},
+{ ARM::VLD3q8Pseudo_UPD, ARM::VLD3q8_UPD, true, true, true, EvenDblSpc, 3, 8 ,true},
+{ ARM::VLD3q8oddPseudo, ARM::VLD3q8, true, false, false, OddDblSpc, 3, 8 ,true},
+{ ARM::VLD3q8oddPseudo_UPD, ARM::VLD3q8_UPD, true, true, true, OddDblSpc, 3, 8 ,true},
+
+{ ARM::VLD4DUPd16Pseudo, ARM::VLD4DUPd16, true, false, false, SingleSpc, 4, 4,true},
+{ ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, true, SingleSpc, 4, 4,true},
+{ ARM::VLD4DUPd32Pseudo, ARM::VLD4DUPd32, true, false, false, SingleSpc, 4, 2,true},
+{ ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, true, SingleSpc, 4, 2,true},
+{ ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd8, true, false, false, SingleSpc, 4, 8,true},
+{ ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd8_UPD, true, true, true, SingleSpc, 4, 8,true},
+
+{ ARM::VLD4LNd16Pseudo, ARM::VLD4LNd16, true, false, false, SingleSpc, 4, 4 ,true},
+{ ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
+{ ARM::VLD4LNd32Pseudo, ARM::VLD4LNd32, true, false, false, SingleSpc, 4, 2 ,true},
+{ ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
+{ ARM::VLD4LNd8Pseudo, ARM::VLD4LNd8, true, false, false, SingleSpc, 4, 8 ,true},
+{ ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
+{ ARM::VLD4LNq16Pseudo, ARM::VLD4LNq16, true, false, false, EvenDblSpc, 4, 4 ,true},
+{ ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
+{ ARM::VLD4LNq32Pseudo, ARM::VLD4LNq32, true, false, false, EvenDblSpc, 4, 2 ,true},
+{ ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
+
+{ ARM::VLD4d16Pseudo, ARM::VLD4d16, true, false, false, SingleSpc, 4, 4 ,true},
+{ ARM::VLD4d16Pseudo_UPD, ARM::VLD4d16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
+{ ARM::VLD4d32Pseudo, ARM::VLD4d32, true, false, false, SingleSpc, 4, 2 ,true},
+{ ARM::VLD4d32Pseudo_UPD, ARM::VLD4d32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
+{ ARM::VLD4d8Pseudo, ARM::VLD4d8, true, false, false, SingleSpc, 4, 8 ,true},
+{ ARM::VLD4d8Pseudo_UPD, ARM::VLD4d8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
+
+{ ARM::VLD4q16Pseudo_UPD, ARM::VLD4q16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
+{ ARM::VLD4q16oddPseudo, ARM::VLD4q16, true, false, false, OddDblSpc, 4, 4 ,true},
+{ ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, true, OddDblSpc, 4, 4 ,true},
+{ ARM::VLD4q32Pseudo_UPD, ARM::VLD4q32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
+{ ARM::VLD4q32oddPseudo, ARM::VLD4q32, true, false, false, OddDblSpc, 4, 2 ,true},
+{ ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, true, OddDblSpc, 4, 2 ,true},
+{ ARM::VLD4q8Pseudo_UPD, ARM::VLD4q8_UPD, true, true, true, EvenDblSpc, 4, 8 ,true},
+{ ARM::VLD4q8oddPseudo, ARM::VLD4q8, true, false, false, OddDblSpc, 4, 8 ,true},
+{ ARM::VLD4q8oddPseudo_UPD, ARM::VLD4q8_UPD, true, true, true, OddDblSpc, 4, 8 ,true},
+
+{ ARM::VST1LNq16Pseudo, ARM::VST1LNd16, false, false, false, EvenDblSpc, 1, 4 ,true},
+{ ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD, false, true, true, EvenDblSpc, 1, 4 ,true},
+{ ARM::VST1LNq32Pseudo, ARM::VST1LNd32, false, false, false, EvenDblSpc, 1, 2 ,true},
+{ ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD, false, true, true, EvenDblSpc, 1, 2 ,true},
+{ ARM::VST1LNq8Pseudo, ARM::VST1LNd8, false, false, false, EvenDblSpc, 1, 8 ,true},
+{ ARM::VST1LNq8Pseudo_UPD, ARM::VST1LNd8_UPD, false, true, true, EvenDblSpc, 1, 8 ,true},
+
+{ ARM::VST1d64QPseudo, ARM::VST1d64Q, false, false, false, SingleSpc, 4, 1 ,false},
+{ ARM::VST1d64QPseudoWB_fixed, ARM::VST1d64Qwb_fixed, false, true, false, SingleSpc, 4, 1 ,false},
+{ ARM::VST1d64QPseudoWB_register, ARM::VST1d64Qwb_register, false, true, true, SingleSpc, 4, 1 ,false},
+{ ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, false, SingleSpc, 3, 1 ,false},
+{ ARM::VST1d64TPseudoWB_fixed, ARM::VST1d64Twb_fixed, false, true, false, SingleSpc, 3, 1 ,false},
+{ ARM::VST1d64TPseudoWB_register, ARM::VST1d64Twb_register, false, true, true, SingleSpc, 3, 1 ,false},
+
+{ ARM::VST2LNd16Pseudo, ARM::VST2LNd16, false, false, false, SingleSpc, 2, 4 ,true},
+{ ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, true, SingleSpc, 2, 4 ,true},
+{ ARM::VST2LNd32Pseudo, ARM::VST2LNd32, false, false, false, SingleSpc, 2, 2 ,true},
+{ ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, true, SingleSpc, 2, 2 ,true},
+{ ARM::VST2LNd8Pseudo, ARM::VST2LNd8, false, false, false, SingleSpc, 2, 8 ,true},
+{ ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd8_UPD, false, true, true, SingleSpc, 2, 8 ,true},
+{ ARM::VST2LNq16Pseudo, ARM::VST2LNq16, false, false, false, EvenDblSpc, 2, 4,true},
+{ ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, true, EvenDblSpc, 2, 4,true},
+{ ARM::VST2LNq32Pseudo, ARM::VST2LNq32, false, false, false, EvenDblSpc, 2, 2,true},
+{ ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, true, EvenDblSpc, 2, 2,true},
+
+{ ARM::VST2q16Pseudo, ARM::VST2q16, false, false, false, SingleSpc, 4, 4 ,false},
+{ ARM::VST2q16PseudoWB_fixed, ARM::VST2q16wb_fixed, false, true, false, SingleSpc, 4, 4 ,false},
+{ ARM::VST2q16PseudoWB_register, ARM::VST2q16wb_register, false, true, true, SingleSpc, 4, 4 ,false},
+{ ARM::VST2q32Pseudo, ARM::VST2q32, false, false, false, SingleSpc, 4, 2 ,false},
+{ ARM::VST2q32PseudoWB_fixed, ARM::VST2q32wb_fixed, false, true, false, SingleSpc, 4, 2 ,false},
+{ ARM::VST2q32PseudoWB_register, ARM::VST2q32wb_register, false, true, true, SingleSpc, 4, 2 ,false},
+{ ARM::VST2q8Pseudo, ARM::VST2q8, false, false, false, SingleSpc, 4, 8 ,false},
+{ ARM::VST2q8PseudoWB_fixed, ARM::VST2q8wb_fixed, false, true, false, SingleSpc, 4, 8 ,false},
+{ ARM::VST2q8PseudoWB_register, ARM::VST2q8wb_register, false, true, true, SingleSpc, 4, 8 ,false},
+
+{ ARM::VST3LNd16Pseudo, ARM::VST3LNd16, false, false, false, SingleSpc, 3, 4 ,true},
+{ ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
+{ ARM::VST3LNd32Pseudo, ARM::VST3LNd32, false, false, false, SingleSpc, 3, 2 ,true},
+{ ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
+{ ARM::VST3LNd8Pseudo, ARM::VST3LNd8, false, false, false, SingleSpc, 3, 8 ,true},
+{ ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
+{ ARM::VST3LNq16Pseudo, ARM::VST3LNq16, false, false, false, EvenDblSpc, 3, 4,true},
+{ ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, true, EvenDblSpc, 3, 4,true},
+{ ARM::VST3LNq32Pseudo, ARM::VST3LNq32, false, false, false, EvenDblSpc, 3, 2,true},
+{ ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, true, EvenDblSpc, 3, 2,true},
+
+{ ARM::VST3d16Pseudo, ARM::VST3d16, false, false, false, SingleSpc, 3, 4 ,true},
+{ ARM::VST3d16Pseudo_UPD, ARM::VST3d16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
+{ ARM::VST3d32Pseudo, ARM::VST3d32, false, false, false, SingleSpc, 3, 2 ,true},
+{ ARM::VST3d32Pseudo_UPD, ARM::VST3d32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
+{ ARM::VST3d8Pseudo, ARM::VST3d8, false, false, false, SingleSpc, 3, 8 ,true},
+{ ARM::VST3d8Pseudo_UPD, ARM::VST3d8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
+
+{ ARM::VST3q16Pseudo_UPD, ARM::VST3q16_UPD, false, true, true, EvenDblSpc, 3, 4 ,true},
+{ ARM::VST3q16oddPseudo, ARM::VST3q16, false, false, false, OddDblSpc, 3, 4 ,true},
+{ ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, true, OddDblSpc, 3, 4 ,true},
+{ ARM::VST3q32Pseudo_UPD, ARM::VST3q32_UPD, false, true, true, EvenDblSpc, 3, 2 ,true},
+{ ARM::VST3q32oddPseudo, ARM::VST3q32, false, false, false, OddDblSpc, 3, 2 ,true},
+{ ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, true, OddDblSpc, 3, 2 ,true},
+{ ARM::VST3q8Pseudo_UPD, ARM::VST3q8_UPD, false, true, true, EvenDblSpc, 3, 8 ,true},
+{ ARM::VST3q8oddPseudo, ARM::VST3q8, false, false, false, OddDblSpc, 3, 8 ,true},
+{ ARM::VST3q8oddPseudo_UPD, ARM::VST3q8_UPD, false, true, true, OddDblSpc, 3, 8 ,true},
+
+{ ARM::VST4LNd16Pseudo, ARM::VST4LNd16, false, false, false, SingleSpc, 4, 4 ,true},
+{ ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
+{ ARM::VST4LNd32Pseudo, ARM::VST4LNd32, false, false, false, SingleSpc, 4, 2 ,true},
+{ ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
+{ ARM::VST4LNd8Pseudo, ARM::VST4LNd8, false, false, false, SingleSpc, 4, 8 ,true},
+{ ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
+{ ARM::VST4LNq16Pseudo, ARM::VST4LNq16, false, false, false, EvenDblSpc, 4, 4,true},
+{ ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, true, EvenDblSpc, 4, 4,true},
+{ ARM::VST4LNq32Pseudo, ARM::VST4LNq32, false, false, false, EvenDblSpc, 4, 2,true},
+{ ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, true, EvenDblSpc, 4, 2,true},
+
+{ ARM::VST4d16Pseudo, ARM::VST4d16, false, false, false, SingleSpc, 4, 4 ,true},
+{ ARM::VST4d16Pseudo_UPD, ARM::VST4d16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
+{ ARM::VST4d32Pseudo, ARM::VST4d32, false, false, false, SingleSpc, 4, 2 ,true},
+{ ARM::VST4d32Pseudo_UPD, ARM::VST4d32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
+{ ARM::VST4d8Pseudo, ARM::VST4d8, false, false, false, SingleSpc, 4, 8 ,true},
+{ ARM::VST4d8Pseudo_UPD, ARM::VST4d8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
+
+{ ARM::VST4q16Pseudo_UPD, ARM::VST4q16_UPD, false, true, true, EvenDblSpc, 4, 4 ,true},
+{ ARM::VST4q16oddPseudo, ARM::VST4q16, false, false, false, OddDblSpc, 4, 4 ,true},
+{ ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, true, OddDblSpc, 4, 4 ,true},
+{ ARM::VST4q32Pseudo_UPD, ARM::VST4q32_UPD, false, true, true, EvenDblSpc, 4, 2 ,true},
+{ ARM::VST4q32oddPseudo, ARM::VST4q32, false, false, false, OddDblSpc, 4, 2 ,true},
+{ ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, true, OddDblSpc, 4, 2 ,true},
+{ ARM::VST4q8Pseudo_UPD, ARM::VST4q8_UPD, false, true, true, EvenDblSpc, 4, 8 ,true},
+{ ARM::VST4q8oddPseudo, ARM::VST4q8, false, false, false, OddDblSpc, 4, 8 ,true},
+{ ARM::VST4q8oddPseudo_UPD, ARM::VST4q8_UPD, false, true, true, OddDblSpc, 4, 8 ,true}
};
/// LookupNEONLdSt - Search the NEONLdStTable for information about a NEON
/// load or store pseudo instruction.
static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
- unsigned NumEntries = array_lengthof(NEONLdStTable);
+ const unsigned NumEntries = array_lengthof(NEONLdStTable);
#ifndef NDEBUG
// Make sure the table is sorted.
@@ -422,21 +388,22 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
unsigned DstReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
- MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
- .addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
- if (NumRegs > 2)
+ MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
+ if (NumRegs > 1 && TableEntry->copyAllListRegs)
+ MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
+ if (NumRegs > 2 && TableEntry->copyAllListRegs)
MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
- if (NumRegs > 3)
+ if (NumRegs > 3 && TableEntry->copyAllListRegs)
MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
- if (TableEntry->HasWriteBack)
+ if (TableEntry->isUpdating)
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the addrmode6 operands.
MIB.addOperand(MI.getOperand(OpIdx++));
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the am6offset operand.
- if (TableEntry->HasWriteBack)
+ if (TableEntry->hasWritebackOperand)
MIB.addOperand(MI.getOperand(OpIdx++));
// For an instruction writing double-spaced subregs, the pseudo instruction
@@ -481,24 +448,26 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(TableEntry->RealOpc));
unsigned OpIdx = 0;
- if (TableEntry->HasWriteBack)
+ if (TableEntry->isUpdating)
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the addrmode6 operands.
MIB.addOperand(MI.getOperand(OpIdx++));
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the am6offset operand.
- if (TableEntry->HasWriteBack)
+ if (TableEntry->hasWritebackOperand)
MIB.addOperand(MI.getOperand(OpIdx++));
bool SrcIsKill = MI.getOperand(OpIdx).isKill();
unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
- MIB.addReg(D0).addReg(D1);
- if (NumRegs > 2)
+ MIB.addReg(D0);
+ if (NumRegs > 1 && TableEntry->copyAllListRegs)
+ MIB.addReg(D1);
+ if (NumRegs > 2 && TableEntry->copyAllListRegs)
MIB.addReg(D2);
- if (NumRegs > 3)
+ if (NumRegs > 3 && TableEntry->copyAllListRegs)
MIB.addReg(D3);
// Copy the predicate operands.
@@ -558,14 +527,14 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
}
- if (TableEntry->HasWriteBack)
+ if (TableEntry->isUpdating)
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the addrmode6 operands.
MIB.addOperand(MI.getOperand(OpIdx++));
MIB.addOperand(MI.getOperand(OpIdx++));
// Copy the am6offset operand.
- if (TableEntry->HasWriteBack)
+ if (TableEntry->hasWritebackOperand)
MIB.addOperand(MI.getOperand(OpIdx++));
// Grab the super-register source.
@@ -599,13 +568,15 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
// Add an implicit def for the super-register.
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
+ // Transfer memoperands.
+ MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MI.eraseFromParent();
}
/// ExpandVTBL - Translate VTBL and VTBX pseudo instructions with Q or QQ
/// register operands to real instructions with D register operands.
void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
- unsigned Opc, bool IsExt, unsigned NumRegs) {
+ unsigned Opc, bool IsExt) {
MachineInstr &MI = *MBBI;
MachineBasicBlock &MBB = *MI.getParent();
@@ -621,11 +592,7 @@ void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
unsigned SrcReg = MI.getOperand(OpIdx++).getReg();
unsigned D0, D1, D2, D3;
GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3);
- MIB.addReg(D0).addReg(D1);
- if (NumRegs > 2)
- MIB.addReg(D2);
- if (NumRegs > 3)
- MIB.addReg(D3);
+ MIB.addReg(D0);
// Copy the other source register operand.
MIB.addOperand(MI.getOperand(OpIdx++));
@@ -645,7 +612,7 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(&MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(&MI, PredReg);
unsigned DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
@@ -809,7 +776,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MI.eraseFromParent();
return true;
}
- case ARM::Int_eh_sjlj_dispatchsetup: {
+ case ARM::Int_eh_sjlj_dispatchsetup:
+ case ARM::Int_eh_sjlj_dispatchsetup_nofp:
+ case ARM::tInt_eh_sjlj_dispatchsetup: {
MachineFunction &MF = *MI.getParent()->getParent();
const ARMBaseInstrInfo *AII =
static_cast<const ARMBaseInstrInfo*>(TII);
@@ -824,15 +793,15 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
"base pointer without frame pointer?");
if (AFI->isThumb2Function()) {
- llvm::emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
+ emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
+ FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
} else if (AFI->isThumbFunction()) {
- llvm::emitThumbRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, *TII, RI);
+ emitThumbRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
+ FramePtr, -NumBytes, *TII, RI);
} else {
- llvm::emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, ARMCC::AL, 0,
- *TII);
+ emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
+ FramePtr, -NumBytes, ARMCC::AL, 0,
+ *TII);
}
// If there's dynamic realignment, adjust for it.
if (RI.needsStackRealignment(MF)) {
@@ -996,6 +965,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Add an implicit def for the super-register.
MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
TransferImpOps(MI, MIB, MIB);
+ MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MI.eraseFromParent();
return true;
}
@@ -1026,6 +996,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MIB->addRegisterKilled(SrcReg, TRI, true);
TransferImpOps(MI, MIB, MIB);
+ MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
MI.eraseFromParent();
return true;
}
@@ -1057,26 +1028,15 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
return true;
}
- case ARM::VLD1q8Pseudo:
- case ARM::VLD1q16Pseudo:
- case ARM::VLD1q32Pseudo:
- case ARM::VLD1q64Pseudo:
- case ARM::VLD1q8Pseudo_UPD:
- case ARM::VLD1q16Pseudo_UPD:
- case ARM::VLD1q32Pseudo_UPD:
- case ARM::VLD1q64Pseudo_UPD:
- case ARM::VLD2d8Pseudo:
- case ARM::VLD2d16Pseudo:
- case ARM::VLD2d32Pseudo:
case ARM::VLD2q8Pseudo:
case ARM::VLD2q16Pseudo:
case ARM::VLD2q32Pseudo:
- case ARM::VLD2d8Pseudo_UPD:
- case ARM::VLD2d16Pseudo_UPD:
- case ARM::VLD2d32Pseudo_UPD:
- case ARM::VLD2q8Pseudo_UPD:
- case ARM::VLD2q16Pseudo_UPD:
- case ARM::VLD2q32Pseudo_UPD:
+ case ARM::VLD2q8PseudoWB_fixed:
+ case ARM::VLD2q16PseudoWB_fixed:
+ case ARM::VLD2q32PseudoWB_fixed:
+ case ARM::VLD2q8PseudoWB_register:
+ case ARM::VLD2q16PseudoWB_register:
+ case ARM::VLD2q32PseudoWB_register:
case ARM::VLD3d8Pseudo:
case ARM::VLD3d16Pseudo:
case ARM::VLD3d32Pseudo:
@@ -1084,7 +1044,6 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VLD3d8Pseudo_UPD:
case ARM::VLD3d16Pseudo_UPD:
case ARM::VLD3d32Pseudo_UPD:
- case ARM::VLD1d64TPseudo_UPD:
case ARM::VLD3q8Pseudo_UPD:
case ARM::VLD3q16Pseudo_UPD:
case ARM::VLD3q32Pseudo_UPD:
@@ -1101,7 +1060,6 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VLD4d8Pseudo_UPD:
case ARM::VLD4d16Pseudo_UPD:
case ARM::VLD4d32Pseudo_UPD:
- case ARM::VLD1d64QPseudo_UPD:
case ARM::VLD4q8Pseudo_UPD:
case ARM::VLD4q16Pseudo_UPD:
case ARM::VLD4q32Pseudo_UPD:
@@ -1111,18 +1069,6 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VLD4q8oddPseudo_UPD:
case ARM::VLD4q16oddPseudo_UPD:
case ARM::VLD4q32oddPseudo_UPD:
- case ARM::VLD1DUPq8Pseudo:
- case ARM::VLD1DUPq16Pseudo:
- case ARM::VLD1DUPq32Pseudo:
- case ARM::VLD1DUPq8Pseudo_UPD:
- case ARM::VLD1DUPq16Pseudo_UPD:
- case ARM::VLD1DUPq32Pseudo_UPD:
- case ARM::VLD2DUPd8Pseudo:
- case ARM::VLD2DUPd16Pseudo:
- case ARM::VLD2DUPd32Pseudo:
- case ARM::VLD2DUPd8Pseudo_UPD:
- case ARM::VLD2DUPd16Pseudo_UPD:
- case ARM::VLD2DUPd32Pseudo_UPD:
case ARM::VLD3DUPd8Pseudo:
case ARM::VLD3DUPd16Pseudo:
case ARM::VLD3DUPd32Pseudo:
@@ -1138,26 +1084,15 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
ExpandVLD(MBBI);
return true;
- case ARM::VST1q8Pseudo:
- case ARM::VST1q16Pseudo:
- case ARM::VST1q32Pseudo:
- case ARM::VST1q64Pseudo:
- case ARM::VST1q8Pseudo_UPD:
- case ARM::VST1q16Pseudo_UPD:
- case ARM::VST1q32Pseudo_UPD:
- case ARM::VST1q64Pseudo_UPD:
- case ARM::VST2d8Pseudo:
- case ARM::VST2d16Pseudo:
- case ARM::VST2d32Pseudo:
case ARM::VST2q8Pseudo:
case ARM::VST2q16Pseudo:
case ARM::VST2q32Pseudo:
- case ARM::VST2d8Pseudo_UPD:
- case ARM::VST2d16Pseudo_UPD:
- case ARM::VST2d32Pseudo_UPD:
- case ARM::VST2q8Pseudo_UPD:
- case ARM::VST2q16Pseudo_UPD:
- case ARM::VST2q32Pseudo_UPD:
+ case ARM::VST2q8PseudoWB_fixed:
+ case ARM::VST2q16PseudoWB_fixed:
+ case ARM::VST2q32PseudoWB_fixed:
+ case ARM::VST2q8PseudoWB_register:
+ case ARM::VST2q16PseudoWB_register:
+ case ARM::VST2q32PseudoWB_register:
case ARM::VST3d8Pseudo:
case ARM::VST3d16Pseudo:
case ARM::VST3d32Pseudo:
@@ -1165,7 +1100,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VST3d8Pseudo_UPD:
case ARM::VST3d16Pseudo_UPD:
case ARM::VST3d32Pseudo_UPD:
- case ARM::VST1d64TPseudo_UPD:
+ case ARM::VST1d64TPseudoWB_fixed:
+ case ARM::VST1d64TPseudoWB_register:
case ARM::VST3q8Pseudo_UPD:
case ARM::VST3q16Pseudo_UPD:
case ARM::VST3q32Pseudo_UPD:
@@ -1182,7 +1118,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
case ARM::VST4d8Pseudo_UPD:
case ARM::VST4d16Pseudo_UPD:
case ARM::VST4d32Pseudo_UPD:
- case ARM::VST1d64QPseudo_UPD:
+ case ARM::VST1d64QPseudoWB_fixed:
+ case ARM::VST1d64QPseudoWB_register:
case ARM::VST4q8Pseudo_UPD:
case ARM::VST4q16Pseudo_UPD:
case ARM::VST4q32Pseudo_UPD:
@@ -1270,15 +1207,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
ExpandLaneOp(MBBI);
return true;
- case ARM::VTBL2Pseudo: ExpandVTBL(MBBI, ARM::VTBL2, false, 2); return true;
- case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false, 3); return true;
- case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false, 4); return true;
- case ARM::VTBX2Pseudo: ExpandVTBL(MBBI, ARM::VTBX2, true, 2); return true;
- case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true, 3); return true;
- case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true, 4); return true;
+ case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false); return true;
+ case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false); return true;
+ case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true); return true;
+ case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true); return true;
}
-
- return false;
}
bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
index dc8e54d..2e1eaca 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -16,7 +16,6 @@
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMCallingConv.h"
-#include "ARMRegisterInfo.h"
#include "ARMTargetMachine.h"
#include "ARMSubtarget.h"
#include "ARMConstantPoolValue.h"
@@ -37,7 +36,6 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -90,7 +88,7 @@ class ARMFastISel : public FastISel {
ARMFunctionInfo *AFI;
// Convenience variables to avoid some queries.
- bool isThumb;
+ bool isThumb2;
LLVMContext *Context;
public:
@@ -101,7 +99,7 @@ class ARMFastISel : public FastISel {
TLI(*TM.getTargetLowering()) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
- isThumb = AFI->isThumbFunction();
+ isThumb2 = AFI->isThumbFunction();
Context = &funcInfo.Fn->getContext();
}
@@ -148,6 +146,8 @@ class ARMFastISel : public FastISel {
virtual bool TargetSelectInstruction(const Instruction *I);
virtual unsigned TargetMaterializeConstant(const Constant *C);
virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
+ virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI);
#include "ARMGenFastISel.inc"
@@ -156,27 +156,40 @@ class ARMFastISel : public FastISel {
bool SelectLoad(const Instruction *I);
bool SelectStore(const Instruction *I);
bool SelectBranch(const Instruction *I);
+ bool SelectIndirectBr(const Instruction *I);
bool SelectCmp(const Instruction *I);
bool SelectFPExt(const Instruction *I);
bool SelectFPTrunc(const Instruction *I);
- bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode);
- bool SelectSIToFP(const Instruction *I);
- bool SelectFPToSI(const Instruction *I);
- bool SelectSDiv(const Instruction *I);
- bool SelectSRem(const Instruction *I);
- bool SelectCall(const Instruction *I);
+ bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
+ bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
+ bool SelectIToFP(const Instruction *I, bool isSigned);
+ bool SelectFPToI(const Instruction *I, bool isSigned);
+ bool SelectDiv(const Instruction *I, bool isSigned);
+ bool SelectRem(const Instruction *I, bool isSigned);
+ bool SelectCall(const Instruction *I, const char *IntrMemName);
+ bool SelectIntrinsicCall(const IntrinsicInst &I);
bool SelectSelect(const Instruction *I);
bool SelectRet(const Instruction *I);
- bool SelectIntCast(const Instruction *I);
+ bool SelectTrunc(const Instruction *I);
+ bool SelectIntExt(const Instruction *I);
// Utility routines.
private:
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
- bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr);
- bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr);
+ bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
+ bool isZExt);
+ bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
+ unsigned Alignment = 0, bool isZExt = true,
+ bool allocReg = true);
+
+ bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
+ unsigned Alignment = 0);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
- void ARMSimplifyAddress(Address &Addr, EVT VT);
+ void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
+ bool ARMIsMemCpySmall(uint64_t Len);
+ bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len);
+ unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
unsigned ARMMaterializeInt(const Constant *C, EVT VT);
unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
@@ -186,8 +199,6 @@ class ARMFastISel : public FastISel {
// Call handling routines.
private:
- bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
- unsigned &ResultReg);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return);
bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
SmallVectorImpl<unsigned> &ArgRegs,
@@ -208,7 +219,7 @@ class ARMFastISel : public FastISel {
const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
void AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags);
+ unsigned Flags, bool useAM3);
};
} // end anonymous namespace
@@ -219,8 +230,7 @@ class ARMFastISel : public FastISel {
// we don't care about implicit defs here, just places we'll need to add a
// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.hasOptionalDef())
+ if (!MI->hasOptionalDef())
return false;
// Look to see if our OptionalDef is defining CPSR or CCR.
@@ -290,10 +300,10 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -310,11 +320,11 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill));
@@ -333,12 +343,12 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
@@ -357,11 +367,11 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm));
@@ -379,11 +389,11 @@ unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm));
@@ -402,12 +412,12 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
@@ -425,10 +435,10 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addImm(Imm));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addImm(Imm));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -444,10 +454,10 @@ unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- if (II.getNumDefs() >= 1)
+ if (II.getNumDefs() >= 1) {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addImm(Imm1).addImm(Imm2));
- else {
+ } else {
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addImm(Imm1).addImm(Imm2));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -464,9 +474,10 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
"Cannot yet extract from physregs");
+
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
- .addReg(Op0, getKillRegState(Op0IsKill), Idx));
+ DL, TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(Op0, getKillRegState(Op0IsKill), Idx));
return ResultReg;
}
@@ -477,7 +488,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::VMOVRS), MoveReg)
+ TII.get(ARM::VMOVSR), MoveReg)
.addReg(SrcReg));
return MoveReg;
}
@@ -487,7 +498,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::VMOVSR), MoveReg)
+ TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
return MoveReg;
}
@@ -541,22 +552,42 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
- // For now 32-bit only.
- if (VT != MVT::i32) return false;
-
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
+ return false;
// If we can do this in a single instruction without a constant pool entry
// do so now.
const ConstantInt *CI = cast<ConstantInt>(C);
- if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getSExtValue())) {
- unsigned Opc = isThumb ? ARM::t2MOVi16 : ARM::MOVi16;
+ if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
+ unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
+ unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(Opc), DestReg)
- .addImm(CI->getSExtValue()));
- return DestReg;
+ TII.get(Opc), ImmReg)
+ .addImm(CI->getZExtValue()));
+ return ImmReg;
}
+ // Use MVN to emit negative constants.
+ if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
+ unsigned Imm = (unsigned)~(CI->getSExtValue());
+ bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
+ if (UseImm) {
+ unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
+ unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ImmReg)
+ .addImm(Imm));
+ return ImmReg;
+ }
+ }
+
+ // Load from constant pool. For now 32-bit only.
+ if (VT != MVT::i32)
+ return false;
+
+ unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+
// MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType());
if (Align == 0) {
@@ -565,7 +596,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
}
unsigned Idx = MCP.getConstantPoolIndex(C, Align);
- if (isThumb)
+ if (isThumb2)
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::t2LDRpci), DestReg)
.addConstantPoolIndex(Idx));
@@ -586,44 +617,69 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
Reloc::Model RelocM = TM.getRelocationModel();
// TODO: Need more magic for ARM PIC.
- if (!isThumb && (RelocM == Reloc::PIC_)) return 0;
-
- // MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(GV->getType());
- if (Align == 0) {
- // TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(GV->getType());
- }
+ if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0;
- // Grab index.
- unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8);
- unsigned Id = AFI->createPICLabelUId();
- ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
- ARMCP::CPValue,
- PCAdj);
- unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
-
- // Load value.
- MachineInstrBuilder MIB;
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
- if (isThumb) {
- unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
- .addConstantPoolIndex(Idx);
- if (RelocM == Reloc::PIC_)
- MIB.addImm(Id);
+
+ // Use movw+movt when possible, it avoids constant pool entries.
+ // Darwin targets don't support movt with Reloc::Static, see
+ // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support
+ // static movt relocations.
+ if (Subtarget->useMovt() &&
+ Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) {
+ unsigned Opc;
+ switch (RelocM) {
+ case Reloc::PIC_:
+ Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
+ break;
+ case Reloc::DynamicNoPIC:
+ Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn;
+ break;
+ default:
+ Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
+ break;
+ }
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
+ DestReg).addGlobalAddress(GV));
} else {
- // The extra immediate is for addrmode2.
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
- DestReg)
- .addConstantPoolIndex(Idx)
- .addImm(0);
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = TD.getPrefTypeAlignment(GV->getType());
+ if (Align == 0) {
+ // TODO: Figure out if this is correct.
+ Align = TD.getTypeAllocSize(GV->getType());
+ }
+
+ // Grab index.
+ unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 :
+ (Subtarget->isThumb() ? 4 : 8);
+ unsigned Id = AFI->createPICLabelUId();
+ ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
+ ARMCP::CPValue,
+ PCAdj);
+ unsigned Idx = MCP.getConstantPoolIndex(CPV, Align);
+
+ // Load value.
+ MachineInstrBuilder MIB;
+ if (isThumb2) {
+ unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx);
+ if (RelocM == Reloc::PIC_)
+ MIB.addImm(Id);
+ } else {
+ // The extra immediate is for addrmode2.
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
+ DestReg)
+ .addConstantPoolIndex(Idx)
+ .addImm(0);
+ }
+ AddOptionalDefs(MIB);
}
- AddOptionalDefs(MIB);
if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) {
+ MachineInstrBuilder MIB;
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
- if (isThumb)
+ if (isThumb2)
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::t2LDRi12), NewDestReg)
.addReg(DestReg)
@@ -656,6 +712,8 @@ unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
return 0;
}
+// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
+
unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
// Don't handle dynamic allocas.
if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
@@ -669,10 +727,10 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
// This will get lowered later into the correct offsets and registers
// via rewriteXFrameIndex.
if (SI != FuncInfo.StaticAllocaMap.end()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(VT);
+ const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
+ unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addFrameIndex(SI->second)
.addImm(0));
@@ -699,7 +757,7 @@ bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
// If this is a type than can be sign or zero-extended to a basic operation
// go ahead and accept it now.
- if (VT == MVT::i8 || VT == MVT::i16)
+ if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
return true;
return false;
@@ -813,35 +871,33 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
}
}
- // Materialize the global variable's address into a reg which can
- // then be used later to load the variable.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
- unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType()));
- if (Tmp == 0) return false;
-
- Addr.Base.Reg = Tmp;
- return true;
- }
-
// Try to get this in a register if nothing else has worked.
if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
return Addr.Base.Reg != 0;
}
-void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
+void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
bool needsLowering = false;
switch (VT.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Unhandled load/store type!");
+ default: llvm_unreachable("Unhandled load/store type!");
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
- // Integer loads/stores handle 12-bit offsets.
- needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ if (!useAM3) {
+ // Integer loads/stores handle 12-bit offsets.
+ needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
+ // Handle negative offsets.
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
+ } else {
+ // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
+ needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
+ }
break;
case MVT::f32:
case MVT::f64:
@@ -854,11 +910,11 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
// put the alloca address into a register, set the base type back to
// register and continue. This should almost never happen.
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
- TargetRegisterClass *RC = isThumb ? ARM::tGPRRegisterClass :
- ARM::GPRRegisterClass;
+ const TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass
+ : ARM::GPRRegisterClass;
unsigned ResultReg = createResultReg(RC);
- unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL,
+ unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
.addFrameIndex(Addr.Base.FI)
.addImm(0));
@@ -877,7 +933,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT) {
void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
const MachineInstrBuilder &MIB,
- unsigned Flags) {
+ unsigned Flags, bool useAM3) {
// addrmode5 output depends on the selection dag addressing dividing the
// offset by 4 that it then later multiplies. Do this here as well.
if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
@@ -897,60 +953,127 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
// Now add the rest of the operands.
MIB.addFrameIndex(FI);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional
+ // operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
- // ARM halfword load/stores need an additional operand.
- if (!isThumb && VT.getSimpleVT().SimpleTy == MVT::i16) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ // ARM halfword load/stores and signed byte loads need an additional
+ // operand.
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
}
AddOptionalDefs(MIB);
}
-bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr) {
-
+bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
+ unsigned Alignment, bool isZExt, bool allocReg) {
assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
- TargetRegisterClass *RC;
+ bool useAM3 = false;
+ bool needVMOV = false;
+ const TargetRegisterClass *RC;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
- case MVT::i16:
- Opc = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
+ case MVT::i1:
+ case MVT::i8:
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
+ else
+ Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
+ } else {
+ if (isZExt) {
+ Opc = ARM::LDRBi12;
+ } else {
+ Opc = ARM::LDRSB;
+ useAM3 = true;
+ }
+ }
RC = ARM::GPRRegisterClass;
break;
- case MVT::i8:
- Opc = isThumb ? ARM::t2LDRBi12 : ARM::LDRBi12;
+ case MVT::i16:
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
+ else
+ Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
+ } else {
+ Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
+ useAM3 = true;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::i32:
- Opc = isThumb ? ARM::t2LDRi12 : ARM::LDRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ Opc = ARM::t2LDRi8;
+ else
+ Opc = ARM::t2LDRi12;
+ } else {
+ Opc = ARM::LDRi12;
+ }
RC = ARM::GPRRegisterClass;
break;
case MVT::f32:
- Opc = ARM::VLDRS;
- RC = TLI.getRegClassFor(VT);
+ if (!Subtarget->hasVFP2()) return false;
+ // Unaligned loads need special handling. Floats require word-alignment.
+ if (Alignment && Alignment < 4) {
+ needVMOV = true;
+ VT = MVT::i32;
+ Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
+ RC = ARM::GPRRegisterClass;
+ } else {
+ Opc = ARM::VLDRS;
+ RC = TLI.getRegClassFor(VT);
+ }
break;
case MVT::f64:
+ if (!Subtarget->hasVFP2()) return false;
+ // FIXME: Unaligned loads need special handling. Doublewords require
+ // word-alignment.
+ if (Alignment && Alignment < 4)
+ return false;
+
Opc = ARM::VLDRD;
RC = TLI.getRegClassFor(VT);
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
- ResultReg = createResultReg(RC);
+ if (allocReg)
+ ResultReg = createResultReg(RC);
+ assert (ResultReg > 255 && "Expected an allocated virtual register.");
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg);
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
+
+ // If we had an unaligned load of a float we've converted it to an regular
+ // load. Now we must move from the GRP to the FP register.
+ if (needVMOV) {
+ unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVSR), MoveReg)
+ .addReg(ResultReg));
+ ResultReg = MoveReg;
+ }
return true;
}
@@ -969,51 +1092,92 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
unsigned ResultReg;
- if (!ARMEmitLoad(VT, ResultReg, Addr)) return false;
+ if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
+ return false;
UpdateValueMap(I, ResultReg);
return true;
}
-bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) {
+bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
+ unsigned Alignment) {
unsigned StrOpc;
+ bool useAM3 = false;
switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
- unsigned Res = createResultReg(isThumb ? ARM::tGPRRegisterClass :
+ unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass :
ARM::GPRRegisterClass);
- unsigned Opc = isThumb ? ARM::t2ANDri : ARM::ANDri;
+ unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
SrcReg = Res;
} // Fallthrough here.
case MVT::i8:
- StrOpc = isThumb ? ARM::t2STRBi12 : ARM::STRBi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRBi8;
+ else
+ StrOpc = ARM::t2STRBi12;
+ } else {
+ StrOpc = ARM::STRBi12;
+ }
break;
case MVT::i16:
- StrOpc = isThumb ? ARM::t2STRHi12 : ARM::STRH;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRHi8;
+ else
+ StrOpc = ARM::t2STRHi12;
+ } else {
+ StrOpc = ARM::STRH;
+ useAM3 = true;
+ }
break;
case MVT::i32:
- StrOpc = isThumb ? ARM::t2STRi12 : ARM::STRi12;
+ if (isThumb2) {
+ if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
+ StrOpc = ARM::t2STRi8;
+ else
+ StrOpc = ARM::t2STRi12;
+ } else {
+ StrOpc = ARM::STRi12;
+ }
break;
case MVT::f32:
if (!Subtarget->hasVFP2()) return false;
- StrOpc = ARM::VSTRS;
+ // Unaligned stores need special handling. Floats require word-alignment.
+ if (Alignment && Alignment < 4) {
+ unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(ARM::VMOVRS), MoveReg)
+ .addReg(SrcReg));
+ SrcReg = MoveReg;
+ VT = MVT::i32;
+ StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
+ } else {
+ StrOpc = ARM::VSTRS;
+ }
break;
case MVT::f64:
if (!Subtarget->hasVFP2()) return false;
+ // FIXME: Unaligned stores need special handling. Doublewords require
+ // word-alignment.
+ if (Alignment && Alignment < 4)
+ return false;
+
StrOpc = ARM::VSTRD;
break;
}
// Simplify this down to something we can handle.
- ARMSimplifyAddress(Addr, VT);
+ ARMSimplifyAddress(Addr, VT, useAM3);
// Create the base instruction, then add the operands.
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(StrOpc))
- .addReg(SrcReg, getKillRegState(true));
- AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore);
+ .addReg(SrcReg);
+ AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
return true;
}
@@ -1039,7 +1203,8 @@ bool ARMFastISel::SelectStore(const Instruction *I) {
if (!ARMComputeAddress(I->getOperand(1), Addr))
return false;
- if (!ARMEmitStore(VT, SrcReg, Addr)) return false;
+ if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
+ return false;
return true;
}
@@ -1099,30 +1264,8 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// If we can, avoid recomputing the compare - redoing it could lead to wonky
// behavior.
- // TODO: Factor this out.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- MVT SourceVT;
- Type *Ty = CI->getOperand(0)->getType();
- if (CI->hasOneUse() && (CI->getParent() == I->getParent())
- && isTypeLegal(Ty, SourceVT)) {
- bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
- if (isFloat && !Subtarget->hasVFP2())
- return false;
-
- unsigned CmpOpc;
- switch (SourceVT.SimpleTy) {
- default: return false;
- // TODO: Verify compares.
- case MVT::f32:
- CmpOpc = ARM::VCMPES;
- break;
- case MVT::f64:
- CmpOpc = ARM::VCMPED;
- break;
- case MVT::i32:
- CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
- break;
- }
+ if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
// Get the compare predicate.
// Try to take advantage of fallthrough opportunities.
@@ -1137,23 +1280,11 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// We may not handle every CC for now.
if (ARMPred == ARMCC::AL) return false;
- unsigned Arg1 = getRegForValue(CI->getOperand(0));
- if (Arg1 == 0) return false;
-
- unsigned Arg2 = getRegForValue(CI->getOperand(1));
- if (Arg2 == 0) return false;
-
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CmpOpc))
- .addReg(Arg1).addReg(Arg2));
-
- // For floating point we need to move the result to a comparison register
- // that we can then use for branches.
- if (isFloat)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(ARM::FMSTAT)));
+ // Emit the compare.
+ if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+ return false;
- unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+ unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
.addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
FastEmitBranch(FBB, DL);
@@ -1164,7 +1295,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
MVT SourceVT;
if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
(isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
- unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
unsigned OpReg = getRegForValue(TI->getOperand(0));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TstOpc))
@@ -1176,7 +1307,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
CCMode = ARMCC::EQ;
}
- unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+ unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
@@ -1184,6 +1315,12 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
+ } else if (const ConstantInt *CI =
+ dyn_cast<ConstantInt>(BI->getCondition())) {
+ uint64_t Imm = CI->getZExtValue();
+ MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
+ FastEmitBranch(Target, DL);
+ return true;
}
unsigned CmpReg = getRegForValue(BI->getCondition());
@@ -1196,7 +1333,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// Regardless, the compare has been done in the predecessor block,
// and it left a value for us in a virtual register. Ergo, we test
// the one-bit value left in the virtual register.
- unsigned TstOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
.addReg(CmpReg).addImm(1));
@@ -1206,7 +1343,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
CCMode = ARMCC::EQ;
}
- unsigned BrOpc = isThumb ? ARM::t2Bcc : ARM::Bcc;
+ unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
FastEmitBranch(FBB, DL);
@@ -1214,70 +1351,155 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
return true;
}
-bool ARMFastISel::SelectCmp(const Instruction *I) {
- const CmpInst *CI = cast<CmpInst>(I);
+bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
+ unsigned AddrReg = getRegForValue(I->getOperand(0));
+ if (AddrReg == 0) return false;
- MVT VT;
- Type *Ty = CI->getOperand(0)->getType();
- if (!isTypeLegal(Ty, VT))
- return false;
+ unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ .addReg(AddrReg));
+ return true;
+}
- bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy());
+bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
+ bool isZExt) {
+ Type *Ty = Src1Value->getType();
+ EVT SrcVT = TLI.getValueType(Ty, true);
+ if (!SrcVT.isSimple()) return false;
+
+ bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
if (isFloat && !Subtarget->hasVFP2())
return false;
+ // Check to see if the 2nd operand is a constant that we can encode directly
+ // in the compare.
+ int Imm = 0;
+ bool UseImm = false;
+ bool isNegativeImm = false;
+ // FIXME: At -O0 we don't have anything that canonicalizes operand order.
+ // Thus, Src1Value may be a ConstantInt, but we're missing it.
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
+ if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
+ SrcVT == MVT::i1) {
+ const APInt &CIVal = ConstInt->getValue();
+ Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
+ // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
+ // then a cmn, because there is no way to represent 2147483648 as a
+ // signed 32-bit int.
+ if (Imm < 0 && Imm != (int)0x80000000) {
+ isNegativeImm = true;
+ Imm = -Imm;
+ }
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
+ }
+ } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
+ if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
+ if (ConstFP->isZero() && !ConstFP->isNegative())
+ UseImm = true;
+ }
+
unsigned CmpOpc;
- unsigned CondReg;
- switch (VT.SimpleTy) {
+ bool isICmp = true;
+ bool needsExt = false;
+ switch (SrcVT.getSimpleVT().SimpleTy) {
default: return false;
// TODO: Verify compares.
case MVT::f32:
- CmpOpc = ARM::VCMPES;
- CondReg = ARM::FPSCR;
+ isICmp = false;
+ CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES;
break;
case MVT::f64:
- CmpOpc = ARM::VCMPED;
- CondReg = ARM::FPSCR;
+ isICmp = false;
+ CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED;
break;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ needsExt = true;
+ // Intentional fall-through.
case MVT::i32:
- CmpOpc = isThumb ? ARM::t2CMPrr : ARM::CMPrr;
- CondReg = ARM::CPSR;
+ if (isThumb2) {
+ if (!UseImm)
+ CmpOpc = ARM::t2CMPrr;
+ else
+ CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri;
+ } else {
+ if (!UseImm)
+ CmpOpc = ARM::CMPrr;
+ else
+ CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri;
+ }
break;
}
- // Get the compare predicate.
- ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+ unsigned SrcReg1 = getRegForValue(Src1Value);
+ if (SrcReg1 == 0) return false;
- // We may not handle every CC for now.
- if (ARMPred == ARMCC::AL) return false;
+ unsigned SrcReg2 = 0;
+ if (!UseImm) {
+ SrcReg2 = getRegForValue(Src2Value);
+ if (SrcReg2 == 0) return false;
+ }
- unsigned Arg1 = getRegForValue(CI->getOperand(0));
- if (Arg1 == 0) return false;
+ // We have i1, i8, or i16, we need to either zero extend or sign extend.
+ if (needsExt) {
+ SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
+ if (SrcReg1 == 0) return false;
+ if (!UseImm) {
+ SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
+ if (SrcReg2 == 0) return false;
+ }
+ }
- unsigned Arg2 = getRegForValue(CI->getOperand(1));
- if (Arg2 == 0) return false;
+ if (!UseImm) {
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CmpOpc))
+ .addReg(SrcReg1).addReg(SrcReg2));
+ } else {
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
+ .addReg(SrcReg1);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(Arg1).addReg(Arg2));
+ // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
+ if (isICmp)
+ MIB.addImm(Imm);
+ AddOptionalDefs(MIB);
+ }
// For floating point we need to move the result to a comparison register
// that we can then use for branches.
- if (isFloat)
+ if (Ty->isFloatTy() || Ty->isDoubleTy())
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::FMSTAT)));
+ return true;
+}
+
+bool ARMFastISel::SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
+
+ // Get the compare predicate.
+ ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
+
+ // We may not handle every CC for now.
+ if (ARMPred == ARMCC::AL) return false;
+
+ // Emit the compare.
+ if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
+ return false;
// Now set a register based on the comparison. Explicitly set the predicates
// here.
- unsigned MovCCOpc = isThumb ? ARM::t2MOVCCi : ARM::MOVCCi;
- TargetRegisterClass *RC = isThumb ? ARM::rGPRRegisterClass
- : ARM::GPRRegisterClass;
+ unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
+ const TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass
+ : ARM::GPRRegisterClass;
unsigned DestReg = createResultReg(RC);
- Constant *Zero
- = ConstantInt::get(Type::getInt32Ty(*Context), 0);
+ Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = TargetMaterializeConstant(Zero);
+ // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
.addReg(ZeroReg).addImm(1)
- .addImm(ARMPred).addReg(CondReg);
+ .addImm(ARMPred).addReg(ARM::CPSR);
UpdateValueMap(I, DestReg);
return true;
@@ -1321,7 +1543,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
return true;
}
-bool ARMFastISel::SelectSIToFP(const Instruction *I) {
+bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
// Make sure we have VFP.
if (!Subtarget->hasVFP2()) return false;
@@ -1330,21 +1552,30 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) {
if (!isTypeLegal(Ty, DstVT))
return false;
- // FIXME: Handle sign-extension where necessary.
- if (!I->getOperand(0)->getType()->isIntegerTy(32))
+ Value *Src = I->getOperand(0);
+ EVT SrcVT = TLI.getValueType(Src->getType(), true);
+ if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
return false;
- unsigned Op = getRegForValue(I->getOperand(0));
- if (Op == 0) return false;
+ unsigned SrcReg = getRegForValue(Src);
+ if (SrcReg == 0) return false;
+
+ // Handle sign-extension.
+ if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
+ EVT DestVT = MVT::i32;
+ SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
+ /*isZExt*/!isSigned);
+ if (SrcReg == 0) return false;
+ }
// The conversion routine works on fp-reg to fp-reg and the operand above
// was an integer, move it to the fp registers if possible.
- unsigned FP = ARMMoveToFPReg(MVT::f32, Op);
+ unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
if (FP == 0) return false;
unsigned Opc;
- if (Ty->isFloatTy()) Opc = ARM::VSITOS;
- else if (Ty->isDoubleTy()) Opc = ARM::VSITOD;
+ if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
+ else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
else return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
@@ -1355,7 +1586,7 @@ bool ARMFastISel::SelectSIToFP(const Instruction *I) {
return true;
}
-bool ARMFastISel::SelectFPToSI(const Instruction *I) {
+bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
// Make sure we have VFP.
if (!Subtarget->hasVFP2()) return false;
@@ -1369,11 +1600,11 @@ bool ARMFastISel::SelectFPToSI(const Instruction *I) {
unsigned Opc;
Type *OpTy = I->getOperand(0)->getType();
- if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS;
- else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD;
+ if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
+ else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
else return false;
- // f64->s32 or f32->s32 both need an intermediate f32 reg.
+ // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
ResultReg)
@@ -1401,22 +1632,54 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
if (CondReg == 0) return false;
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- unsigned Op2Reg = getRegForValue(I->getOperand(2));
- if (Op2Reg == 0) return false;
- unsigned CmpOpc = isThumb ? ARM::t2TSTri : ARM::TSTri;
+ // Check to see if we can use an immediate in the conditional move.
+ int Imm = 0;
+ bool UseImm = false;
+ bool isNegativeImm = false;
+ if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
+ assert (VT == MVT::i32 && "Expecting an i32.");
+ Imm = (int)ConstInt->getValue().getZExtValue();
+ if (Imm < 0) {
+ isNegativeImm = true;
+ Imm = ~Imm;
+ }
+ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
+ (ARM_AM::getSOImmVal(Imm) != -1);
+ }
+
+ unsigned Op2Reg = 0;
+ if (!UseImm) {
+ Op2Reg = getRegForValue(I->getOperand(2));
+ if (Op2Reg == 0) return false;
+ }
+
+ unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addImm(1));
+ .addReg(CondReg).addImm(0));
+
+ unsigned MovCCOpc;
+ if (!UseImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
+ } else {
+ if (!isNegativeImm) {
+ MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
+ } else {
+ MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
+ }
+ }
unsigned ResultReg = createResultReg(RC);
- unsigned MovCCOpc = isThumb ? ARM::t2MOVCCr : ARM::MOVCCr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op1Reg).addReg(Op2Reg)
- .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ if (!UseImm)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ else
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
+ .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
UpdateValueMap(I, ResultReg);
return true;
}
-bool ARMFastISel::SelectSDiv(const Instruction *I) {
+bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
MVT VT;
Type *Ty = I->getType();
if (!isTypeLegal(Ty, VT))
@@ -1430,21 +1693,21 @@ bool ARMFastISel::SelectSDiv(const Instruction *I) {
// Otherwise emit a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i8)
- LC = RTLIB::SDIV_I8;
+ LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
else if (VT == MVT::i16)
- LC = RTLIB::SDIV_I16;
+ LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
else if (VT == MVT::i32)
- LC = RTLIB::SDIV_I32;
+ LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
else if (VT == MVT::i64)
- LC = RTLIB::SDIV_I64;
+ LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
else if (VT == MVT::i128)
- LC = RTLIB::SDIV_I128;
+ LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
return ARMEmitLibcall(I, LC);
}
-bool ARMFastISel::SelectSRem(const Instruction *I) {
+bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
MVT VT;
Type *Ty = I->getType();
if (!isTypeLegal(Ty, VT))
@@ -1452,21 +1715,59 @@ bool ARMFastISel::SelectSRem(const Instruction *I) {
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == MVT::i8)
- LC = RTLIB::SREM_I8;
+ LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
else if (VT == MVT::i16)
- LC = RTLIB::SREM_I16;
+ LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
else if (VT == MVT::i32)
- LC = RTLIB::SREM_I32;
+ LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
else if (VT == MVT::i64)
- LC = RTLIB::SREM_I64;
+ LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
else if (VT == MVT::i128)
- LC = RTLIB::SREM_I128;
+ LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
return ARMEmitLibcall(I, LC);
}
-bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
+bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
+ EVT DestVT = TLI.getValueType(I->getType(), true);
+
+ // We can get here in the case when we have a binary operation on a non-legal
+ // type and the target independent selector doesn't know how to handle it.
+ if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
+ return false;
+
+ unsigned Opc;
+ switch (ISDOpcode) {
+ default: return false;
+ case ISD::ADD:
+ Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
+ break;
+ case ISD::OR:
+ Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
+ break;
+ case ISD::SUB:
+ Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
+ break;
+ }
+
+ unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ if (SrcReg1 == 0) return false;
+
+ // TODO: Often the 2nd operand is an immediate, which can be encoded directly
+ // in the instruction, rather then materializing the value in a register.
+ unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ if (SrcReg2 == 0) return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg)
+ .addReg(SrcReg1).addReg(SrcReg2));
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
+bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
EVT VT = TLI.getValueType(I->getType(), true);
// We can get here in the case when we want to use NEON for our fp
@@ -1478,12 +1779,6 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
if (isFloat && !Subtarget->hasVFP2())
return false;
- unsigned Op1 = getRegForValue(I->getOperand(0));
- if (Op1 == 0) return false;
-
- unsigned Op2 = getRegForValue(I->getOperand(1));
- if (Op2 == 0) return false;
-
unsigned Opc;
bool is64bit = VT == MVT::f64 || VT == MVT::i64;
switch (ISDOpcode) {
@@ -1498,6 +1793,12 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
Opc = is64bit ? ARM::VMULD : ARM::VMULS;
break;
}
+ unsigned Op1 = getRegForValue(I->getOperand(0));
+ if (Op1 == 0) return false;
+
+ unsigned Op2 = getRegForValue(I->getOperand(1));
+ if (Op2 == 0) return false;
+
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg)
@@ -1508,18 +1809,6 @@ bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) {
// Call Handling Code
-bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src,
- EVT SrcVT, unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
- Src, /*TODO: Kill=*/false);
-
- if (RR != 0) {
- ResultReg = RR;
- return true;
- } else
- return false;
-}
-
// This is largely taken directly from CCAssignFnForNode - we don't support
// varargs in FastISel so that part has been removed.
// TODO: We may not support all of this.
@@ -1536,7 +1825,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
// Use target triple & subtarget features to do actual dispatch.
if (Subtarget->isAAPCS_ABI()) {
if (Subtarget->hasVFP2() &&
- FloatABIType == FloatABI::Hard)
+ TM.Options.FloatABIType == FloatABI::Hard)
return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
else
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
@@ -1548,11 +1837,6 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) {
return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
- case CallingConv::GHC:
- if (Return)
- llvm_unreachable("Can't return in GHC call convention");
- else
- return CC_ARM_APCS_GHC;
}
}
@@ -1567,6 +1851,48 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false));
+ // Check that we can handle all of the arguments. If we can't, then bail out
+ // now before we add code to the MBB.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ MVT ArgVT = ArgVTs[VA.getValNo()];
+
+ // We don't handle NEON/vector parameters yet.
+ if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
+ return false;
+
+ // Now copy/store arg to correct locations.
+ if (VA.isRegLoc() && !VA.needsCustom()) {
+ continue;
+ } else if (VA.needsCustom()) {
+ // TODO: We need custom lowering for vector (v2f64) args.
+ if (VA.getLocVT() != MVT::f64 ||
+ // TODO: Only handle register args for now.
+ !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
+ return false;
+ } else {
+ switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) {
+ default:
+ return false;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ break;
+ case MVT::f32:
+ if (!Subtarget->hasVFP2())
+ return false;
+ break;
+ case MVT::f64:
+ if (!Subtarget->hasVFP2())
+ return false;
+ break;
+ }
+ }
+ }
+
+ // At the point, we are able to handle the call's arguments in fast isel.
+
// Get a count of how many bytes are to be pushed on the stack.
NumBytes = CCInfo.getNextStackOffset();
@@ -1582,41 +1908,26 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
unsigned Arg = ArgRegs[VA.getValNo()];
MVT ArgVT = ArgVTs[VA.getValNo()];
- // We don't handle NEON/vector parameters yet.
- if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
- return false;
+ assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
+ "We don't handle NEON/vector parameters yet.");
// Handle arg promotion, etc.
switch (VA.getLocInfo()) {
case CCValAssign::Full: break;
case CCValAssign::SExt: {
- bool Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
- Emitted = true;
- ArgVT = VA.getLocVT();
+ MVT DestVT = VA.getLocVT();
+ Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
+ assert (Arg != 0 && "Failed to emit a sext");
+ ArgVT = DestVT;
break;
}
+ case CCValAssign::AExt:
+ // Intentional fall-through. Handle AExt and ZExt.
case CCValAssign::ZExt: {
- bool Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
- Emitted = true;
- ArgVT = VA.getLocVT();
- break;
- }
- case CCValAssign::AExt: {
- bool Emitted = FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- if (!Emitted)
- Emitted = FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
- if (!Emitted)
- Emitted = FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
- Arg, ArgVT, Arg);
-
- assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
- ArgVT = VA.getLocVT();
+ MVT DestVT = VA.getLocVT();
+ Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
+ assert (Arg != 0 && "Failed to emit a sext");
+ ArgVT = DestVT;
break;
}
case CCValAssign::BCvt: {
@@ -1634,16 +1945,17 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
if (VA.isRegLoc() && !VA.needsCustom()) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
VA.getLocReg())
- .addReg(Arg);
+ .addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else if (VA.needsCustom()) {
// TODO: We need custom lowering for vector (v2f64) args.
- if (VA.getLocVT() != MVT::f64) return false;
+ assert(VA.getLocVT() == MVT::f64 &&
+ "Custom lowering for v2f64 args not available");
CCValAssign &NextVA = ArgLocs[++i];
- // TODO: Only handle register args for now.
- if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false;
+ assert(VA.isRegLoc() && NextVA.isRegLoc() &&
+ "We only handle register args!");
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VMOVRRD), VA.getLocReg())
@@ -1659,9 +1971,11 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
Addr.Base.Reg = ARM::SP;
Addr.Offset = VA.getLocMemOffset();
- if (!ARMEmitStore(ArgVT, Arg, Addr)) return false;
+ bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
+ assert(EmitRet && "Could not emit a store for argument!");
}
}
+
return true;
}
@@ -1685,7 +1999,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
// For this move we copy into two registers and then move into the
// double fp reg we want.
EVT DestVT = RVLocs[0].getValVT();
- TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
+ const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
unsigned ResultReg = createResultReg(DstRC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(ARM::VMOVDRR), ResultReg)
@@ -1700,7 +2014,12 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
} else {
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
EVT CopyVT = RVLocs[0].getValVT();
- TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
+
+ // Special handling for extended integers.
+ if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
+ CopyVT = MVT::i32;
+
+ const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
unsigned ResultReg = createResultReg(DstRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
@@ -1753,13 +2072,26 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
// Only handle register returns for now.
if (!VA.isRegLoc())
return false;
- // TODO: For now, don't try to handle cases where getLocInfo()
- // says Full but the types don't match.
- if (TLI.getValueType(RV->getType()) != VA.getValVT())
- return false;
- // Make the copy.
unsigned SrcReg = Reg + VA.getValNo();
+ EVT RVVT = TLI.getValueType(RV->getType());
+ EVT DestVT = VA.getValVT();
+ // Special handling for extended integers.
+ if (RVVT != DestVT) {
+ if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
+ return false;
+
+ assert(DestVT == MVT::i32 && "ARM should always ext to i32");
+
+ // Perform extension if flagged as either zext or sext. Otherwise, do
+ // nothing.
+ if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
+ SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
+ if (SrcReg == 0) return false;
+ }
+ }
+
+ // Make the copy.
unsigned DstReg = VA.getLocReg();
const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
// Avoid a cross-class copy. This is very unlikely.
@@ -1772,20 +2104,17 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
MRI.addLiveOut(VA.getLocReg());
}
- unsigned RetOpc = isThumb ? ARM::tBX_RET : ARM::BX_RET;
+ unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(RetOpc)));
return true;
}
unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) {
-
- // Darwin needs the r9 versions of the opcodes.
- bool isDarwin = Subtarget->isTargetDarwin();
- if (isThumb) {
- return isDarwin ? ARM::tBLr9 : ARM::tBL;
+ if (isThumb2) {
+ return ARM::tBL;
} else {
- return isDarwin ? ARM::BLr9 : ARM::BL;
+ return ARM::BL;
}
}
@@ -1844,11 +2173,10 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
return false;
- // Issue the call, BLr9 for darwin, BL otherwise.
- // TODO: Turn this into the table of arm call ops.
+ // Issue the call.
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(NULL);
- if(isThumb)
+ if (isThumb2)
// Explicitly adding the predicate here.
MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(CallOpc)))
@@ -1863,6 +2191,10 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
+ // Add a register mask with the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CC));
+
// Finish off the call including any return values.
SmallVector<unsigned, 4> UsedRegs;
if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
@@ -1873,12 +2205,13 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
return true;
}
-bool ARMFastISel::SelectCall(const Instruction *I) {
+bool ARMFastISel::SelectCall(const Instruction *I,
+ const char *IntrMemName = 0) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = CI->getCalledValue();
- // Can't handle inline asm or worry about intrinsics yet.
- if (isa<InlineAsm>(Callee) || isa<IntrinsicInst>(CI)) return false;
+ // Can't handle inline asm.
+ if (isa<InlineAsm>(Callee)) return false;
// Only handle global variable Callees.
const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
@@ -1902,7 +2235,8 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
MVT RetVT;
if (RetTy->isVoidTy())
RetVT = MVT::isVoid;
- else if (!isTypeLegal(RetTy, RetVT))
+ else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
+ RetVT != MVT::i8 && RetVT != MVT::i1)
return false;
// TODO: For now if we have long calls specified we don't handle the call.
@@ -1913,16 +2247,18 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
SmallVector<unsigned, 8> ArgRegs;
SmallVector<MVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
- Args.reserve(CS.arg_size());
- ArgRegs.reserve(CS.arg_size());
- ArgVTs.reserve(CS.arg_size());
- ArgFlags.reserve(CS.arg_size());
+ unsigned arg_size = CS.arg_size();
+ Args.reserve(arg_size);
+ ArgRegs.reserve(arg_size);
+ ArgVTs.reserve(arg_size);
+ ArgFlags.reserve(arg_size);
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
- unsigned Arg = getRegForValue(*i);
+ // If we're lowering a memory intrinsic instead of a regular call, skip the
+ // last two arguments, which shouldn't be passed to the underlying function.
+ if (IntrMemName && e-i <= 2)
+ break;
- if (Arg == 0)
- return false;
ISD::ArgFlagsTy Flags;
unsigned AttrInd = i - CS.arg_begin() + 1;
if (CS.paramHasAttr(AttrInd, Attribute::SExt))
@@ -1930,7 +2266,7 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
Flags.setZExt();
- // FIXME: Only handle *easy* calls for now.
+ // FIXME: Only handle *easy* calls for now.
if (CS.paramHasAttr(AttrInd, Attribute::InReg) ||
CS.paramHasAttr(AttrInd, Attribute::StructRet) ||
CS.paramHasAttr(AttrInd, Attribute::Nest) ||
@@ -1939,8 +2275,14 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
Type *ArgTy = (*i)->getType();
MVT ArgVT;
- if (!isTypeLegal(ArgTy, ArgVT))
+ if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
+ ArgVT != MVT::i1)
return false;
+
+ unsigned Arg = getRegForValue(*i);
+ if (Arg == 0)
+ return false;
+
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
@@ -1956,26 +2298,38 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes))
return false;
- // Issue the call, BLr9 for darwin, BL otherwise.
- // TODO: Turn this into the table of arm call ops.
+ // Issue the call.
MachineInstrBuilder MIB;
unsigned CallOpc = ARMSelectCallOp(GV);
// Explicitly adding the predicate here.
- if(isThumb)
+ if(isThumb2) {
// Explicitly adding the predicate here.
MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc)))
- .addGlobalAddress(GV, 0, 0);
- else
- // Explicitly adding the predicate here.
- MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
- TII.get(CallOpc))
- .addGlobalAddress(GV, 0, 0));
-
+ TII.get(CallOpc)));
+ if (!IntrMemName)
+ MIB.addGlobalAddress(GV, 0, 0);
+ else
+ MIB.addExternalSymbol(IntrMemName, 0);
+ } else {
+ if (!IntrMemName)
+ // Explicitly adding the predicate here.
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, 0));
+ else
+ MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(CallOpc))
+ .addExternalSymbol(IntrMemName, 0));
+ }
+
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
+ // Add a register mask with the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CC));
+
// Finish off the call including any return values.
SmallVector<unsigned, 4> UsedRegs;
if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false;
@@ -1984,83 +2338,187 @@ bool ARMFastISel::SelectCall(const Instruction *I) {
static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
return true;
+}
+bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
+ return Len <= 16;
}
-bool ARMFastISel::SelectIntCast(const Instruction *I) {
- // On ARM, in general, integer casts don't involve legal types; this code
- // handles promotable integers. The high bits for a type smaller than
- // the register size are assumed to be undefined.
- Type *DestTy = I->getType();
- Value *Op = I->getOperand(0);
- Type *SrcTy = Op->getType();
+bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src,
+ uint64_t Len) {
+ // Make sure we don't bloat code by inlining very large memcpy's.
+ if (!ARMIsMemCpySmall(Len))
+ return false;
- EVT SrcVT, DestVT;
- SrcVT = TLI.getValueType(SrcTy, true);
- DestVT = TLI.getValueType(DestTy, true);
+ // We don't care about alignment here since we just emit integer accesses.
+ while (Len) {
+ MVT VT;
+ if (Len >= 4)
+ VT = MVT::i32;
+ else if (Len >= 2)
+ VT = MVT::i16;
+ else {
+ assert(Len == 1);
+ VT = MVT::i8;
+ }
- if (isa<TruncInst>(I)) {
- if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
- return false;
- if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
+ bool RV;
+ unsigned ResultReg;
+ RV = ARMEmitLoad(VT, ResultReg, Src);
+ assert (RV == true && "Should be able to handle this load.");
+ RV = ARMEmitStore(VT, ResultReg, Dest);
+ assert (RV == true && "Should be able to handle this store.");
+ (void)RV;
+
+ unsigned Size = VT.getSizeInBits()/8;
+ Len -= Size;
+ Dest.Offset += Size;
+ Src.Offset += Size;
+ }
+
+ return true;
+}
+
+bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
+ // FIXME: Handle more intrinsics.
+ switch (I.getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ const MemTransferInst &MTI = cast<MemTransferInst>(I);
+ // Don't handle volatile.
+ if (MTI.isVolatile())
return false;
- unsigned SrcReg = getRegForValue(Op);
- if (!SrcReg) return false;
+ // Disable inlining for memmove before calls to ComputeAddress. Otherwise,
+ // we would emit dead code because we don't currently handle memmoves.
+ bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
+ if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
+ // Small memcpy's are common enough that we want to do them without a call
+ // if possible.
+ uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
+ if (ARMIsMemCpySmall(Len)) {
+ Address Dest, Src;
+ if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
+ !ARMComputeAddress(MTI.getRawSource(), Src))
+ return false;
+ if (ARMTryEmitSmallMemCpy(Dest, Src, Len))
+ return true;
+ }
+ }
+
+ if (!MTI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
+ return false;
- // Because the high bits are undefined, a truncate doesn't generate
- // any code.
- UpdateValueMap(I, SrcReg);
- return true;
+ const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
+ return SelectCall(&I, IntrMemName);
}
- if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
+ case Intrinsic::memset: {
+ const MemSetInst &MSI = cast<MemSetInst>(I);
+ // Don't handle volatile.
+ if (MSI.isVolatile())
+ return false;
+
+ if (!MSI.getLength()->getType()->isIntegerTy(32))
+ return false;
+
+ if (MSI.getDestAddressSpace() > 255)
+ return false;
+
+ return SelectCall(&I, "memset");
+ }
+ }
+}
+
+bool ARMFastISel::SelectTrunc(const Instruction *I) {
+ // The high bits for a type smaller than the register size are assumed to be
+ // undefined.
+ Value *Op = I->getOperand(0);
+
+ EVT SrcVT, DestVT;
+ SrcVT = TLI.getValueType(Op->getType(), true);
+ DestVT = TLI.getValueType(I->getType(), true);
+
+ if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
+ return false;
+ if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
+ unsigned SrcReg = getRegForValue(Op);
+ if (!SrcReg) return false;
+
+ // Because the high bits are undefined, a truncate doesn't generate
+ // any code.
+ UpdateValueMap(I, SrcReg);
+ return true;
+}
+
+unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
+ bool isZExt) {
+ if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
+ return 0;
+
unsigned Opc;
- bool isZext = isa<ZExtInst>(I);
bool isBoolZext = false;
- if (!SrcVT.isSimple())
- return false;
+ if (!SrcVT.isSimple()) return 0;
switch (SrcVT.getSimpleVT().SimpleTy) {
- default: return false;
+ default: return 0;
case MVT::i16:
- if (!Subtarget->hasV6Ops()) return false;
- if (isZext)
- Opc = isThumb ? ARM::t2UXTH : ARM::UXTH;
+ if (!Subtarget->hasV6Ops()) return 0;
+ if (isZExt)
+ Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH;
else
- Opc = isThumb ? ARM::t2SXTH : ARM::SXTH;
+ Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH;
break;
case MVT::i8:
- if (!Subtarget->hasV6Ops()) return false;
- if (isZext)
- Opc = isThumb ? ARM::t2UXTB : ARM::UXTB;
+ if (!Subtarget->hasV6Ops()) return 0;
+ if (isZExt)
+ Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB;
else
- Opc = isThumb ? ARM::t2SXTB : ARM::SXTB;
+ Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB;
break;
case MVT::i1:
- if (isZext) {
- Opc = isThumb ? ARM::t2ANDri : ARM::ANDri;
+ if (isZExt) {
+ Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
isBoolZext = true;
break;
}
- return false;
+ return 0;
}
- // FIXME: We could save an instruction in many cases by special-casing
- // load instructions.
- unsigned SrcReg = getRegForValue(Op);
- if (!SrcReg) return false;
-
- unsigned DestReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32));
MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
.addReg(SrcReg);
if (isBoolZext)
MIB.addImm(1);
else
MIB.addImm(0);
AddOptionalDefs(MIB);
- UpdateValueMap(I, DestReg);
+ return ResultReg;
+}
+
+bool ARMFastISel::SelectIntExt(const Instruction *I) {
+ // On ARM, in general, integer casts don't involve legal types; this code
+ // handles promotable integers.
+ Type *DestTy = I->getType();
+ Value *Src = I->getOperand(0);
+ Type *SrcTy = Src->getType();
+
+ EVT SrcVT, DestVT;
+ SrcVT = TLI.getValueType(SrcTy, true);
+ DestVT = TLI.getValueType(DestTy, true);
+
+ bool isZExt = isa<ZExtInst>(I);
+ unsigned SrcReg = getRegForValue(Src);
+ if (!SrcReg) return false;
+
+ unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
+ if (ResultReg == 0) return false;
+ UpdateValueMap(I, ResultReg);
return true;
}
@@ -2074,6 +2532,8 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
return SelectStore(I);
case Instruction::Br:
return SelectBranch(I);
+ case Instruction::IndirectBr:
+ return SelectIndirectBr(I);
case Instruction::ICmp:
case Instruction::FCmp:
return SelectCmp(I);
@@ -2082,42 +2542,105 @@ bool ARMFastISel::TargetSelectInstruction(const Instruction *I) {
case Instruction::FPTrunc:
return SelectFPTrunc(I);
case Instruction::SIToFP:
- return SelectSIToFP(I);
+ return SelectIToFP(I, /*isSigned*/ true);
+ case Instruction::UIToFP:
+ return SelectIToFP(I, /*isSigned*/ false);
case Instruction::FPToSI:
- return SelectFPToSI(I);
+ return SelectFPToI(I, /*isSigned*/ true);
+ case Instruction::FPToUI:
+ return SelectFPToI(I, /*isSigned*/ false);
+ case Instruction::Add:
+ return SelectBinaryIntOp(I, ISD::ADD);
+ case Instruction::Or:
+ return SelectBinaryIntOp(I, ISD::OR);
+ case Instruction::Sub:
+ return SelectBinaryIntOp(I, ISD::SUB);
case Instruction::FAdd:
- return SelectBinaryOp(I, ISD::FADD);
+ return SelectBinaryFPOp(I, ISD::FADD);
case Instruction::FSub:
- return SelectBinaryOp(I, ISD::FSUB);
+ return SelectBinaryFPOp(I, ISD::FSUB);
case Instruction::FMul:
- return SelectBinaryOp(I, ISD::FMUL);
+ return SelectBinaryFPOp(I, ISD::FMUL);
case Instruction::SDiv:
- return SelectSDiv(I);
+ return SelectDiv(I, /*isSigned*/ true);
+ case Instruction::UDiv:
+ return SelectDiv(I, /*isSigned*/ false);
case Instruction::SRem:
- return SelectSRem(I);
+ return SelectRem(I, /*isSigned*/ true);
+ case Instruction::URem:
+ return SelectRem(I, /*isSigned*/ false);
case Instruction::Call:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ return SelectIntrinsicCall(*II);
return SelectCall(I);
case Instruction::Select:
return SelectSelect(I);
case Instruction::Ret:
return SelectRet(I);
case Instruction::Trunc:
+ return SelectTrunc(I);
case Instruction::ZExt:
case Instruction::SExt:
- return SelectIntCast(I);
+ return SelectIntExt(I);
default: break;
}
return false;
}
+/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+/// vreg is being provided by the specified load instruction. If possible,
+/// try to fold the load as an operand to the instruction, returning true if
+/// successful.
+bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
+ const LoadInst *LI) {
+ // Verify we have a legal type before going any further.
+ MVT VT;
+ if (!isLoadTypeLegal(LI->getType(), VT))
+ return false;
+
+ // Combine load followed by zero- or sign-extend.
+ // ldrb r1, [r0] ldrb r1, [r0]
+ // uxtb r2, r1 =>
+ // mov r3, r2 mov r3, r1
+ bool isZExt = true;
+ switch(MI->getOpcode()) {
+ default: return false;
+ case ARM::SXTH:
+ case ARM::t2SXTH:
+ isZExt = false;
+ case ARM::UXTH:
+ case ARM::t2UXTH:
+ if (VT != MVT::i16)
+ return false;
+ break;
+ case ARM::SXTB:
+ case ARM::t2SXTB:
+ isZExt = false;
+ case ARM::UXTB:
+ case ARM::t2UXTB:
+ if (VT != MVT::i8)
+ return false;
+ break;
+ }
+ // See if we can handle this address.
+ Address Addr;
+ if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
+
+ unsigned ResultReg = MI->getOperand(0).getReg();
+ if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false))
+ return false;
+ MI->eraseFromParent();
+ return true;
+}
+
namespace llvm {
- llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
- // Completely untested on non-darwin.
+ FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) {
+ // Completely untested on non-iOS.
const TargetMachine &TM = funcInfo.MF->getTarget();
// Darwin and thumb1 only for now.
const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>();
- if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() &&
+ if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() &&
!DisableARMFastISel)
return new ARMFastISel(funcInfo);
return 0;
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 412751b..402ecb0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1,4 +1,4 @@
-//=======- ARMFrameLowering.cpp - ARM Frame Information --------*- C++ -*-====//
+//===-- ARMFrameLowering.cpp - ARM Frame Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,31 +15,40 @@
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMMachineFunctionInfo.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Function.h"
#include "MCTargetDesc/ARMAddressingModes.h"
+#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
+static cl::opt<bool>
+SpillAlignedNEONRegs("align-neon-spills", cl::Hidden, cl::init(true),
+ cl::desc("Align ARM NEON spills in prolog and epilog"));
+
+static MachineBasicBlock::iterator
+skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI,
+ unsigned NumAlignedDPRCS2Regs);
+
/// hasFP - Return true if the specified function should have a dedicated frame
/// pointer register. This is true if the function has variable sized allocas
/// or if frame pointer elimination is disabled.
bool ARMFrameLowering::hasFP(const MachineFunction &MF) const {
const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- if (STI.isTargetDarwin())
+ // iOS requires FP not to be clobbered for backtracing purpose.
+ if (STI.isTargetIOS())
return true;
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Always eliminate non-leaf frame pointers.
- return ((DisableFramePointerElim(MF) && MFI->hasCalls()) ||
+ return ((MF.getTarget().Options.DisableFramePointerElim(MF) &&
+ MFI->hasCalls()) ||
RegInfo->needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken());
@@ -72,7 +81,7 @@ ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
}
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
+static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) {
for (unsigned i = 0; CSRegs[i]; ++i)
if (Reg == CSRegs[i])
return true;
@@ -81,7 +90,7 @@ static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
static bool isCSRestore(MachineInstr *MI,
const ARMBaseInstrInfo &TII,
- const unsigned *CSRegs) {
+ const uint16_t *CSRegs) {
// Integer spill area is handled with "pop".
if (MI->getOpcode() == ARM::LDMIA_RET ||
MI->getOpcode() == ARM::t2LDMIA_RET ||
@@ -140,10 +149,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
// belongs to which callee-save spill areas.
unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
int FramePtrSpillFI = 0;
-
- // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
- return;
+ int D8SpillFI = 0;
// Allocate the vararg register save area. This is not counted in NumBytes.
if (VARegSaveSize)
@@ -177,7 +183,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
case ARM::R11:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
+ if (STI.isTargetIOS()) {
AFI->addGPRCalleeSavedArea2Frame(FI);
GPRCS2Size += 4;
} else {
@@ -186,8 +192,13 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
}
break;
default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
- DPRCSSize += 8;
+ // This is a DPR. Exclude the aligned DPRCS2 spills.
+ if (Reg == ARM::D8)
+ D8SpillFI = FI;
+ if (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs()) {
+ AFI->addDPRCalleeSavedAreaFrame(FI);
+ DPRCSSize += 8;
+ }
}
}
@@ -195,8 +206,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
if (GPRCS1Size > 0) MBBI++;
// Set FP to point to the stack slot that contains the previous FP.
- // For Darwin, FP is R7, which has now been stored in spill area 1.
- // Otherwise, if this is not Darwin, all the callee-saved registers go
+ // For iOS, FP is R7, which has now been stored in spill area 1.
+ // Otherwise, if this is not iOS, all the callee-saved registers go
// into spill area 1, including the FP in R11. In either case, it is
// now safe to emit this assignment.
bool HasFP = hasFP(MF);
@@ -232,7 +243,17 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
MBBI++;
}
- NumBytes = DPRCSOffset;
+ // Move past the aligned DPRCS2 area.
+ if (AFI->getNumAlignedDPRCS2Regs() > 0) {
+ MBBI = skipAlignedDPRCS2Spills(MBBI, AFI->getNumAlignedDPRCS2Regs());
+ // The code inserted by emitAlignedDPRCS2Spills realigns the stack, and
+ // leaves the stack pointer pointing to the DPRCS2 area.
+ //
+ // Adjust NumBytes to represent the stack slots below the DPRCS2 area.
+ NumBytes += MFI->getObjectOffset(D8SpillFI);
+ } else
+ NumBytes = DPRCSOffset;
+
if (NumBytes) {
// Adjust SP after all the callee-save spills.
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
@@ -259,7 +280,9 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
// If we need dynamic stack realignment, do it here. Be paranoid and make
// sure if we also have VLAs, we have a base pointer for frame access.
- if (RegInfo->needsStackRealignment(MF)) {
+ // If aligned NEON registers were spilled, the stack has already been
+ // realigned.
+ if (!AFI->getNumAlignedDPRCS2Regs() && RegInfo->needsStackRealignment(MF)) {
unsigned MaxAlign = MFI->getMaxAlignment();
assert (!AFI->isThumb1OnlyFunction());
if (!AFI->isThumbFunction()) {
@@ -315,8 +338,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- assert(MBBI->getDesc().isReturn() &&
- "Can only insert epilog into returning blocks");
+ assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc dl = MBBI->getDebugLoc();
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -332,16 +354,12 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
int NumBytes = (int)MFI->getStackSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
- // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
- return;
-
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
} else {
// Unwind MBBI to point to first LDR / VLDRD.
- const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
if (MBBI != MBB.begin()) {
do
--MBBI;
@@ -365,7 +383,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
ARMCC::AL, 0, TII);
else {
// It's not possible to restore SP from FP in a single instruction.
- // For Darwin, this looks like:
+ // For iOS, this looks like:
// mov sp, r7
// sub sp, #24
// This is bad, if an interrupt is taken after the mov, sp is in an
@@ -404,17 +422,16 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
if (AFI->getGPRCalleeSavedArea1Size()) MBBI++;
}
- if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
- RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
+ if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri) {
// Tail call return: adjust the stack pointer and jump to callee.
MBBI = MBB.getLastNonDebugInstr();
MachineOperand &JumpTarget = MBBI->getOperand(0);
// Jump to label or value in register.
- if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND) {
- unsigned TCOpcode = (RetOpcode == ARM::TCRETURNdi)
- ? (STI.isThumb() ? ARM::tTAILJMPd : ARM::TAILJMPd)
- : (STI.isThumb() ? ARM::tTAILJMPdND : ARM::TAILJMPdND);
+ if (RetOpcode == ARM::TCRETURNdi) {
+ unsigned TCOpcode = STI.isThumb() ?
+ (STI.isTargetIOS() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
+ ARM::TAILJMPd;
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
if (JumpTarget.isGlobal())
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
@@ -431,10 +448,6 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
BuildMI(MBB, MBBI, dl,
TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)).
addReg(JumpTarget.getReg(), RegState::Kill);
- } else if (RetOpcode == ARM::TCRETURNriND) {
- BuildMI(MBB, MBBI, dl,
- TII.get(STI.isThumb() ? ARM::tTAILJMPrND : ARM::TAILJMPrND)).
- addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = prior(MBBI);
@@ -481,6 +494,10 @@ ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF,
else if (AFI->isDPRCalleeSavedAreaFrame(FI))
return Offset - AFI->getDPRCalleeSavedAreaOffset();
+ // SP can move around if there are allocas. We may also lose track of SP
+ // when emergency spilling inside a non-reserved call frame setup.
+ bool hasMovingSP = !hasReservedCallFrame(MF);
+
// When dynamically realigning the stack, use the frame pointer for
// parameters, and the stack/base pointer for locals.
if (RegInfo->needsStackRealignment(MF)) {
@@ -488,7 +505,7 @@ ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF,
if (isFixed) {
FrameReg = RegInfo->getFrameRegister(MF);
Offset = FPOffset;
- } else if (MFI->hasVarSizedObjects()) {
+ } else if (hasMovingSP) {
assert(RegInfo->hasBasePointer(MF) &&
"VLAs and dynamic stack alignment, but missing base pointer!");
FrameReg = RegInfo->getBaseRegister();
@@ -500,11 +517,10 @@ ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF,
if (hasFP(MF) && AFI->hasStackFrame()) {
// Use frame pointer to reference fixed objects. Use it for locals if
// there are VLAs (and thus the SP isn't reliable as a base).
- if (isFixed || (MFI->hasVarSizedObjects() &&
- !RegInfo->hasBasePointer(MF))) {
+ if (isFixed || (hasMovingSP && !RegInfo->hasBasePointer(MF))) {
FrameReg = RegInfo->getFrameRegister(MF);
return FPOffset;
- } else if (MFI->hasVarSizedObjects()) {
+ } else if (hasMovingSP) {
assert(RegInfo->hasBasePointer(MF) && "missing base pointer!");
if (AFI->isThumb2Function()) {
// Try to use the frame pointer if we can, else use the base pointer
@@ -551,6 +567,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
unsigned StmOpc, unsigned StrOpc,
bool NoGap,
bool(*Func)(unsigned, bool),
+ unsigned NumAlignedDPRCS2Regs,
unsigned MIFlags) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
@@ -564,7 +581,11 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
unsigned LastReg = 0;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+ if (!(Func)(Reg, STI.isTargetIOS())) continue;
+
+ // D-registers in the aligned area DPRCS2 are NOT spilled here.
+ if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
+ continue;
// Add the callee-saved register as live-in unless it's LR and
// @llvm.returnaddress is called. If LR is returned for
@@ -614,16 +635,15 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
const std::vector<CalleeSavedInfo> &CSI,
unsigned LdmOpc, unsigned LdrOpc,
bool isVarArg, bool NoGap,
- bool(*Func)(unsigned, bool)) const {
+ bool(*Func)(unsigned, bool),
+ unsigned NumAlignedDPRCS2Regs) const {
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
DebugLoc DL = MI->getDebugLoc();
unsigned RetOpcode = MI->getOpcode();
bool isTailCall = (RetOpcode == ARM::TCRETURNdi ||
- RetOpcode == ARM::TCRETURNdiND ||
- RetOpcode == ARM::TCRETURNri ||
- RetOpcode == ARM::TCRETURNriND);
+ RetOpcode == ARM::TCRETURNri);
SmallVector<unsigned, 4> Regs;
unsigned i = CSI.size();
@@ -632,7 +652,11 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
bool DeleteRet = false;
for (; i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- if (!(Func)(Reg, STI.isTargetDarwin())) continue;
+ if (!(Func)(Reg, STI.isTargetIOS())) continue;
+
+ // The aligned reloads from area DPRCS2 are not inserted here.
+ if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs)
+ continue;
if (Reg == ARM::LR && !isTailCall && !isVarArg && STI.hasV5TOps()) {
Reg = ARM::PC;
@@ -686,6 +710,247 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
}
}
+/// Emit aligned spill instructions for NumAlignedDPRCS2Regs D-registers
+/// starting from d8. Also insert stack realignment code and leave the stack
+/// pointer pointing to the d8 spill slot.
+static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned NumAlignedDPRCS2Regs,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) {
+ MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ // Mark the D-register spill slots as properly aligned. Since MFI computes
+ // stack slot layout backwards, this can actually mean that the d-reg stack
+ // slot offsets can be wrong. The offset for d8 will always be correct.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned DNum = CSI[i].getReg() - ARM::D8;
+ if (DNum >= 8)
+ continue;
+ int FI = CSI[i].getFrameIdx();
+ // The even-numbered registers will be 16-byte aligned, the odd-numbered
+ // registers will be 8-byte aligned.
+ MFI.setObjectAlignment(FI, DNum % 2 ? 8 : 16);
+
+ // The stack slot for D8 needs to be maximally aligned because this is
+ // actually the point where we align the stack pointer. MachineFrameInfo
+ // computes all offsets relative to the incoming stack pointer which is a
+ // bit weird when realigning the stack. Any extra padding for this
+ // over-alignment is not realized because the code inserted below adjusts
+ // the stack pointer by numregs * 8 before aligning the stack pointer.
+ if (DNum == 0)
+ MFI.setObjectAlignment(FI, MFI.getMaxAlignment());
+ }
+
+ // Move the stack pointer to the d8 spill slot, and align it at the same
+ // time. Leave the stack slot address in the scratch register r4.
+ //
+ // sub r4, sp, #numregs * 8
+ // bic r4, r4, #align - 1
+ // mov sp, r4
+ //
+ bool isThumb = AFI->isThumbFunction();
+ assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1");
+ AFI->setShouldRestoreSPFromFP(true);
+
+ // sub r4, sp, #numregs * 8
+ // The immediate is <= 64, so it doesn't need any special encoding.
+ unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri;
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
+ .addReg(ARM::SP)
+ .addImm(8 * NumAlignedDPRCS2Regs)));
+
+ // bic r4, r4, #align-1
+ Opc = isThumb ? ARM::t2BICri : ARM::BICri;
+ unsigned MaxAlign = MF.getFrameInfo()->getMaxAlignment();
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
+ .addReg(ARM::R4, RegState::Kill)
+ .addImm(MaxAlign - 1)));
+
+ // mov sp, r4
+ // The stack pointer must be adjusted before spilling anything, otherwise
+ // the stack slots could be clobbered by an interrupt handler.
+ // Leave r4 live, it is used below.
+ Opc = isThumb ? ARM::tMOVr : ARM::MOVr;
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Opc), ARM::SP)
+ .addReg(ARM::R4);
+ MIB = AddDefaultPred(MIB);
+ if (!isThumb)
+ AddDefaultCC(MIB);
+
+ // Now spill NumAlignedDPRCS2Regs registers starting from d8.
+ // r4 holds the stack slot address.
+ unsigned NextReg = ARM::D8;
+
+ // 16-byte aligned vst1.64 with 4 d-regs and address writeback.
+ // The writeback is only needed when emitting two vst1.64 instructions.
+ if (NumAlignedDPRCS2Regs >= 6) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QQPRRegisterClass);
+ MBB.addLiveIn(SupReg);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed),
+ ARM::R4)
+ .addReg(ARM::R4, RegState::Kill).addImm(16)
+ .addReg(NextReg)
+ .addReg(SupReg, RegState::ImplicitKill));
+ NextReg += 4;
+ NumAlignedDPRCS2Regs -= 4;
+ }
+
+ // We won't modify r4 beyond this point. It currently points to the next
+ // register to be spilled.
+ unsigned R4BaseReg = NextReg;
+
+ // 16-byte aligned vst1.64 with 4 d-regs, no writeback.
+ if (NumAlignedDPRCS2Regs >= 4) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QQPRRegisterClass);
+ MBB.addLiveIn(SupReg);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q))
+ .addReg(ARM::R4).addImm(16).addReg(NextReg)
+ .addReg(SupReg, RegState::ImplicitKill));
+ NextReg += 4;
+ NumAlignedDPRCS2Regs -= 4;
+ }
+
+ // 16-byte aligned vst1.64 with 2 d-regs.
+ if (NumAlignedDPRCS2Regs >= 2) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QPRRegisterClass);
+ MBB.addLiveIn(SupReg);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64))
+ .addReg(ARM::R4).addImm(16).addReg(SupReg));
+ NextReg += 2;
+ NumAlignedDPRCS2Regs -= 2;
+ }
+
+ // Finally, use a vanilla vstr.64 for the odd last register.
+ if (NumAlignedDPRCS2Regs) {
+ MBB.addLiveIn(NextReg);
+ // vstr.64 uses addrmode5 which has an offset scale of 4.
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VSTRD))
+ .addReg(NextReg)
+ .addReg(ARM::R4).addImm((NextReg-R4BaseReg)*2));
+ }
+
+ // The last spill instruction inserted should kill the scratch register r4.
+ llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
+}
+
+/// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an
+/// iterator to the following instruction.
+static MachineBasicBlock::iterator
+skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI,
+ unsigned NumAlignedDPRCS2Regs) {
+ // sub r4, sp, #numregs * 8
+ // bic r4, r4, #align - 1
+ // mov sp, r4
+ ++MI; ++MI; ++MI;
+ assert(MI->mayStore() && "Expecting spill instruction");
+
+ // These switches all fall through.
+ switch(NumAlignedDPRCS2Regs) {
+ case 7:
+ ++MI;
+ assert(MI->mayStore() && "Expecting spill instruction");
+ default:
+ ++MI;
+ assert(MI->mayStore() && "Expecting spill instruction");
+ case 1:
+ case 2:
+ case 4:
+ assert(MI->killsRegister(ARM::R4) && "Missed kill flag");
+ ++MI;
+ }
+ return MI;
+}
+
+/// Emit aligned reload instructions for NumAlignedDPRCS2Regs D-registers
+/// starting from d8. These instructions are assumed to execute while the
+/// stack is still aligned, unlike the code inserted by emitPopInst.
+static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned NumAlignedDPRCS2Regs,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) {
+ MachineFunction &MF = *MBB.getParent();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ DebugLoc DL = MI->getDebugLoc();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ // Find the frame index assigned to d8.
+ int D8SpillFI = 0;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i)
+ if (CSI[i].getReg() == ARM::D8) {
+ D8SpillFI = CSI[i].getFrameIdx();
+ break;
+ }
+
+ // Materialize the address of the d8 spill slot into the scratch register r4.
+ // This can be fairly complicated if the stack frame is large, so just use
+ // the normal frame index elimination mechanism to do it. This code runs as
+ // the initial part of the epilog where the stack and base pointers haven't
+ // been changed yet.
+ bool isThumb = AFI->isThumbFunction();
+ assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1");
+
+ unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
+ .addFrameIndex(D8SpillFI).addImm(0)));
+
+ // Now restore NumAlignedDPRCS2Regs registers starting from d8.
+ unsigned NextReg = ARM::D8;
+
+ // 16-byte aligned vld1.64 with 4 d-regs and writeback.
+ if (NumAlignedDPRCS2Regs >= 6) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QQPRRegisterClass);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg)
+ .addReg(ARM::R4, RegState::Define)
+ .addReg(ARM::R4, RegState::Kill).addImm(16)
+ .addReg(SupReg, RegState::ImplicitDefine));
+ NextReg += 4;
+ NumAlignedDPRCS2Regs -= 4;
+ }
+
+ // We won't modify r4 beyond this point. It currently points to the next
+ // register to be spilled.
+ unsigned R4BaseReg = NextReg;
+
+ // 16-byte aligned vld1.64 with 4 d-regs, no writeback.
+ if (NumAlignedDPRCS2Regs >= 4) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QQPRRegisterClass);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg)
+ .addReg(ARM::R4).addImm(16)
+ .addReg(SupReg, RegState::ImplicitDefine));
+ NextReg += 4;
+ NumAlignedDPRCS2Regs -= 4;
+ }
+
+ // 16-byte aligned vld1.64 with 2 d-regs.
+ if (NumAlignedDPRCS2Regs >= 2) {
+ unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0,
+ ARM::QPRRegisterClass);
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg)
+ .addReg(ARM::R4).addImm(16));
+ NextReg += 2;
+ NumAlignedDPRCS2Regs -= 2;
+ }
+
+ // Finally, use a vanilla vldr.64 for the remaining odd register.
+ if (NumAlignedDPRCS2Regs)
+ AddDefaultPred(BuildMI(MBB, MI, DL, TII.get(ARM::VLDRD), NextReg)
+ .addReg(ARM::R4).addImm(2*(NextReg-R4BaseReg)));
+
+ // Last store kills r4.
+ llvm::prior(MI)->addRegisterKilled(ARM::R4, TRI);
+}
+
bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
@@ -700,12 +965,19 @@ bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
unsigned PushOneOpc = AFI->isThumbFunction() ?
ARM::t2STR_PRE : ARM::STR_PRE_IMM;
unsigned FltOpc = ARM::VSTMDDB_UPD;
- emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea1Register,
+ unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs();
+ emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea1Register, 0,
MachineInstr::FrameSetup);
- emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea2Register,
+ emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea2Register, 0,
MachineInstr::FrameSetup);
emitPushInst(MBB, MI, CSI, FltOpc, 0, true, &isARMArea3Register,
- MachineInstr::FrameSetup);
+ NumAlignedDPRCS2Regs, MachineInstr::FrameSetup);
+
+ // The code above does not insert spill code for the aligned DPRCS2 registers.
+ // The stack realignment code will be inserted between the push instructions
+ // and these spills.
+ if (NumAlignedDPRCS2Regs)
+ emitAlignedDPRCS2Spills(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI);
return true;
}
@@ -720,15 +992,22 @@ bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
+ unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs();
+
+ // The emitPopInst calls below do not insert reloads for the aligned DPRCS2
+ // registers. Do that here instead.
+ if (NumAlignedDPRCS2Regs)
+ emitAlignedDPRCS2Restores(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI);
unsigned PopOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_UPD : ARM::LDMIA_UPD;
unsigned LdrOpc = AFI->isThumbFunction() ? ARM::t2LDR_POST :ARM::LDR_POST_IMM;
unsigned FltOpc = ARM::VLDMDIA_UPD;
- emitPopInst(MBB, MI, CSI, FltOpc, 0, isVarArg, true, &isARMArea3Register);
+ emitPopInst(MBB, MI, CSI, FltOpc, 0, isVarArg, true, &isARMArea3Register,
+ NumAlignedDPRCS2Regs);
emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false,
- &isARMArea2Register);
+ &isARMArea2Register, 0);
emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false,
- &isARMArea1Register);
+ &isARMArea1Register, 0);
return true;
}
@@ -852,6 +1131,55 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
return Limit;
}
+// In functions that realign the stack, it can be an advantage to spill the
+// callee-saved vector registers after realigning the stack. The vst1 and vld1
+// instructions take alignment hints that can improve performance.
+//
+static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
+ MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(0);
+ if (!SpillAlignedNEONRegs)
+ return;
+
+ // Naked functions don't spill callee-saved registers.
+ if (MF.getFunction()->hasFnAttr(Attribute::Naked))
+ return;
+
+ // We are planning to use NEON instructions vst1 / vld1.
+ if (!MF.getTarget().getSubtarget<ARMSubtarget>().hasNEON())
+ return;
+
+ // Don't bother if the default stack alignment is sufficiently high.
+ if (MF.getTarget().getFrameLowering()->getStackAlignment() >= 8)
+ return;
+
+ // Aligned spills require stack realignment.
+ const ARMBaseRegisterInfo *RegInfo =
+ static_cast<const ARMBaseRegisterInfo*>(MF.getTarget().getRegisterInfo());
+ if (!RegInfo->canRealignStack(MF))
+ return;
+
+ // We always spill contiguous d-registers starting from d8. Count how many
+ // needs spilling. The register allocator will almost always use the
+ // callee-saved registers in order, but it can happen that there are holes in
+ // the range. Registers above the hole will be spilled to the standard DPRCS
+ // area.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned NumSpills = 0;
+ for (; NumSpills < 8; ++NumSpills)
+ if (!MRI.isPhysRegOrOverlapUsed(ARM::D8 + NumSpills))
+ break;
+
+ // Don't do this for just one d-register. It's not worth it.
+ if (NumSpills < 2)
+ return;
+
+ // Spill the first NumSpills D-registers after realigning the stack.
+ MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(NumSpills);
+
+ // A scratch register is required for the vst1 / vld1 instructions.
+ MF.getRegInfo().setPhysRegUsed(ARM::R4);
+}
+
void
ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
@@ -898,28 +1226,22 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
MF.getRegInfo().setPhysRegUsed(ARM::R4);
}
+ // See if we can spill vector registers to aligned stack.
+ checkNumAlignedDPRCS2Regs(MF);
+
// Spill the BasePtr if it's used.
if (RegInfo->hasBasePointer(MF))
MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
// Don't spill FP if the frame can be eliminated. This is determined
// by scanning the callee-save registers to see if any is used.
- const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
bool Spilled = false;
- if (MF.getRegInfo().isPhysRegUsed(Reg)) {
+ if (MF.getRegInfo().isPhysRegOrOverlapUsed(Reg)) {
Spilled = true;
CanEliminateFrame = false;
- } else {
- // Check alias registers too.
- for (const unsigned *Aliases =
- RegInfo->getAliasSet(Reg); *Aliases; ++Aliases) {
- if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
- Spilled = true;
- CanEliminateFrame = false;
- }
- }
}
if (!ARM::GPRRegisterClass->contains(Reg))
@@ -928,7 +1250,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
if (Spilled) {
NumGPRSpills++;
- if (!STI.isTargetDarwin()) {
+ if (!STI.isTargetIOS()) {
if (Reg == ARM::LR)
LRSpilled = true;
CS1Spilled = true;
@@ -948,7 +1270,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
break;
}
} else {
- if (!STI.isTargetDarwin()) {
+ if (!STI.isTargetIOS()) {
UnspilledCS1GPRs.push_back(Reg);
continue;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
index 61bb8af..a1c2b93 100644
--- a/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMFrameLowering.h
@@ -63,12 +63,13 @@ public:
void emitPushInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI, unsigned StmOpc,
unsigned StrOpc, bool NoGap,
- bool(*Func)(unsigned, bool),
+ bool(*Func)(unsigned, bool), unsigned NumAlignedDPRCS2Regs,
unsigned MIFlags = 0) const;
void emitPopInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI, unsigned LdmOpc,
unsigned LdrOpc, bool isVarArg, bool NoGap,
- bool(*Func)(unsigned, bool)) const;
+ bool(*Func)(unsigned, bool),
+ unsigned NumAlignedDPRCS2Regs) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
index 787f6a2..a5fd15b 100644
--- a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -21,7 +21,7 @@ static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI,
// FIXME: Detect integer instructions properly.
const MCInstrDesc &MCID = MI->getDesc();
unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
- if (MCID.mayStore())
+ if (MI->mayStore())
return false;
unsigned Opcode = MCID.getOpcode();
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
@@ -38,9 +38,6 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
MachineInstr *MI = SU->getInstr();
if (!MI->isDebugValue()) {
- if (ITBlockSize && MI != ITBlockMIs[ITBlockSize-1])
- return Hazard;
-
// Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
// a VMLA / VMLS will cause 4 cycle stall.
const MCInstrDesc &MCID = MI->getDesc();
@@ -48,9 +45,9 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
MachineInstr *DefMI = LastMI;
const MCInstrDesc &LastMCID = LastMI->getDesc();
// Skip over one non-VFP / NEON instruction.
- if (!LastMCID.isBarrier() &&
+ if (!LastMI->isBarrier() &&
// On A9, AGU and NEON/FPU are muxed.
- !(STI.isCortexA9() && (LastMCID.mayLoad() || LastMCID.mayStore())) &&
+ !(STI.isCortexA9() && (LastMI->mayLoad() || LastMI->mayStore())) &&
(LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
MachineBasicBlock::iterator I = LastMI;
if (I != LastMI->getParent()->begin()) {
@@ -76,30 +73,11 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
void ARMHazardRecognizer::Reset() {
LastMI = 0;
FpMLxStalls = 0;
- ITBlockSize = 0;
ScoreboardHazardRecognizer::Reset();
}
void ARMHazardRecognizer::EmitInstruction(SUnit *SU) {
MachineInstr *MI = SU->getInstr();
- unsigned Opcode = MI->getOpcode();
- if (ITBlockSize) {
- --ITBlockSize;
- } else if (Opcode == ARM::t2IT) {
- unsigned Mask = MI->getOperand(1).getImm();
- unsigned NumTZ = CountTrailingZeros_32(Mask);
- assert(NumTZ <= 3 && "Invalid IT mask!");
- ITBlockSize = 4 - NumTZ;
- MachineBasicBlock::iterator I = MI;
- for (unsigned i = 0; i < ITBlockSize; ++i) {
- // Advance to the next instruction, skipping any dbg_value instructions.
- do {
- ++I;
- } while (I->isDebugValue());
- ITBlockMIs[ITBlockSize-1-i] = &*I;
- }
- }
-
if (!MI->isDebugValue()) {
LastMI = MI;
FpMLxStalls = 0;
diff --git a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
index 2bc218d..98bfc4c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
+++ b/contrib/llvm/lib/Target/ARM/ARMHazardRecognizer.h
@@ -23,6 +23,10 @@ class ARMBaseRegisterInfo;
class ARMSubtarget;
class MachineInstr;
+/// ARMHazardRecognizer handles special constraints that are not expressed in
+/// the scheduling itinerary. This is only used during postRA scheduling. The
+/// ARM preRA scheduler uses an unspecialized instance of the
+/// ScoreboardHazardRecognizer.
class ARMHazardRecognizer : public ScoreboardHazardRecognizer {
const ARMBaseInstrInfo &TII;
const ARMBaseRegisterInfo &TRI;
@@ -30,8 +34,6 @@ class ARMHazardRecognizer : public ScoreboardHazardRecognizer {
MachineInstr *LastMI;
unsigned FpMLxStalls;
- unsigned ITBlockSize; // No. of MIs in current IT block yet to be scheduled.
- MachineInstr *ITBlockMIs[4];
public:
ARMHazardRecognizer(const InstrItineraryData *ItinData,
@@ -40,7 +42,7 @@ public:
const ARMSubtarget &sti,
const ScheduleDAG *DAG) :
ScoreboardHazardRecognizer(ItinData, DAG, "post-RA-sched"), TII(tii),
- TRI(tri), STI(sti), LastMI(0), ITBlockSize(0) {}
+ TRI(tri), STI(sti), LastMI(0) {}
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void Reset();
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 5ee009c..1eafbbc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -244,6 +244,7 @@ private:
/// SelectCMOVOp - Select CMOV instructions for ARM.
SDNode *SelectCMOVOp(SDNode *N);
+ SDNode *SelectConditionalOp(SDNode *N);
SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
ARMCC::CondCodes CCVal, SDValue CCR,
SDValue InFlag);
@@ -923,7 +924,7 @@ bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
// The maximum alignment is equal to the memory size being referenced.
unsigned LSNAlign = LSN->getAlignment();
unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
- if (LSNAlign > MemSize && MemSize > 1)
+ if (LSNAlign >= MemSize && MemSize > 1)
Alignment = MemSize;
} else {
// All other uses of addrmode6 are for intrinsics. For now just record
@@ -1549,6 +1550,52 @@ SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
return CurDAG->getTargetConstant(Alignment, MVT::i32);
}
+// Get the register stride update opcode of a VLD/VST instruction that
+// is otherwise equivalent to the given fixed stride updating instruction.
+static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
+ switch (Opc) {
+ default: break;
+ case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
+ case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
+ case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
+ case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
+ case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
+ case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
+ case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
+ case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
+
+ case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
+ case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
+ case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
+ case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
+ case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
+ case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
+ case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
+ case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
+ case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
+ case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
+
+ case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
+ case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
+ case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
+ case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
+ case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
+ case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
+
+ case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
+ case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
+ case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
+ case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
+ case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
+ case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
+
+ case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
+ case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
+ case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
+ }
+ return Opc; // If not one we handle, return it unchanged.
+}
+
SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
unsigned *DOpcodes, unsigned *QOpcodes0,
unsigned *QOpcodes1) {
@@ -1612,7 +1659,15 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
Ops.push_back(Align);
if (isUpdating) {
SDValue Inc = N->getOperand(AddrOpIdx + 1);
- Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
+ // case entirely when the rest are updated to that form, too.
+ if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
+ // check for that explicitly too. Horribly hacky, but temporary.
+ if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
+ !isa<ConstantSDNode>(Inc.getNode()))
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
}
Ops.push_back(Pred);
Ops.push_back(Reg0);
@@ -1754,7 +1809,15 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
Ops.push_back(Align);
if (isUpdating) {
SDValue Inc = N->getOperand(AddrOpIdx + 1);
- Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
+ // case entirely when the rest are updated to that form, too.
+ if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
+ Opc = getVLDSTRegisterUpdateOpcode(Opc);
+ // We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
+ // check for that explicitly too. Horribly hacky, but temporary.
+ if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
+ !isa<ConstantSDNode>(Inc.getNode()))
+ Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
}
Ops.push_back(SrcReg);
Ops.push_back(Pred);
@@ -1977,8 +2040,14 @@ SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
Ops.push_back(MemAddr);
Ops.push_back(Align);
if (isUpdating) {
+ // fixed-stride update instructions don't have an explicit writeback
+ // operand. It's implicit in the opcode itself.
SDValue Inc = N->getOperand(2);
- Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
+ if (!isa<ConstantSDNode>(Inc.getNode()))
+ Ops.push_back(Inc);
+ // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
+ else if (NumVecs > 2)
+ Ops.push_back(Reg0);
}
Ops.push_back(Pred);
Ops.push_back(Reg0);
@@ -2116,7 +2185,6 @@ SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
default:
llvm_unreachable("Unknown so_reg opcode!");
- break;
}
SDValue SOShImm =
CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
@@ -2227,9 +2295,6 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
// Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
// Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
// Pattern complexity = 18 cost = 1 size = 0
- SDValue CPTmp0;
- SDValue CPTmp1;
- SDValue CPTmp2;
if (Subtarget->isThumb()) {
SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
CCVal, CCR, InFlag);
@@ -2286,8 +2351,7 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
unsigned Opc = 0;
switch (VT.getSimpleVT().SimpleTy) {
- default: assert(false && "Illegal conditional move type!");
- break;
+ default: llvm_unreachable("Illegal conditional move type!");
case MVT::i32:
Opc = Subtarget->isThumb()
? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
@@ -2303,6 +2367,115 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
}
+SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
+ SDValue FalseVal = N->getOperand(0);
+ SDValue TrueVal = N->getOperand(1);
+ ARMCC::CondCodes CCVal =
+ (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ SDValue CCR = N->getOperand(3);
+ assert(CCR.getOpcode() == ISD::Register);
+ SDValue InFlag = N->getOperand(4);
+ SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
+ SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+
+ if (Subtarget->isThumb()) {
+ SDValue CPTmp0;
+ SDValue CPTmp1;
+ if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::t2ANDCCrs; break;
+ case ARMISD::COR: Opc = ARM::t2ORRCCrs; break;
+ case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break;
+ }
+ SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
+ }
+
+ ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
+ if (T) {
+ unsigned TrueImm = T->getZExtValue();
+ if (is_t2_so_imm(TrueImm)) {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::t2ANDCCri; break;
+ case ARMISD::COR: Opc = ARM::t2ORRCCri; break;
+ case ARMISD::CXOR: Opc = ARM::t2EORCCri; break;
+ }
+ SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
+ SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ }
+ }
+
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::t2ANDCCrr; break;
+ case ARMISD::COR: Opc = ARM::t2ORRCCrr; break;
+ case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break;
+ }
+ SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ }
+
+ SDValue CPTmp0;
+ SDValue CPTmp1;
+ SDValue CPTmp2;
+ if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::ANDCCrsi; break;
+ case ARMISD::COR: Opc = ARM::ORRCCrsi; break;
+ case ARMISD::CXOR: Opc = ARM::EORCCrsi; break;
+ }
+ SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
+ }
+
+ if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::ANDCCrsr; break;
+ case ARMISD::COR: Opc = ARM::ORRCCrsr; break;
+ case ARMISD::CXOR: Opc = ARM::EORCCrsr; break;
+ }
+ SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
+ }
+
+ ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
+ if (T) {
+ unsigned TrueImm = T->getZExtValue();
+ if (is_so_imm(TrueImm)) {
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::ANDCCri; break;
+ case ARMISD::COR: Opc = ARM::ORRCCri; break;
+ case ARMISD::CXOR: Opc = ARM::EORCCri; break;
+ }
+ SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
+ SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+ }
+ }
+
+ unsigned Opc;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ARMISD::CAND: Opc = ARM::ANDCCrr; break;
+ case ARMISD::COR: Opc = ARM::ORRCCrr; break;
+ case ARMISD::CXOR: Opc = ARM::EORCCrr; break;
+ }
+ SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
+ return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
+}
+
/// Target-specific DAG combining for ISD::XOR.
/// Target-independent combining lowers SELECT_CC nodes of the form
/// select_cc setg[ge] X, 0, X, -X
@@ -2316,7 +2489,6 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
SDValue XORSrc0 = N->getOperand(0);
SDValue XORSrc1 = N->getOperand(1);
- DebugLoc DL = N->getDebugLoc();
EVT VT = N->getValueType(0);
if (DisableARMIntABS)
@@ -2641,6 +2813,10 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::CMOV:
return SelectCMOVOp(N);
+ case ARMISD::CAND:
+ case ARMISD::COR:
+ case ARMISD::CXOR:
+ return SelectConditionalOp(N);
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
@@ -2649,7 +2825,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case MVT::v8i8: Opc = ARM::VZIPd8; break;
case MVT::v4i16: Opc = ARM::VZIPd16; break;
case MVT::v2f32:
- case MVT::v2i32: Opc = ARM::VZIPd32; break;
+ // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
+ case MVT::v2i32: Opc = ARM::VTRNd32; break;
case MVT::v16i8: Opc = ARM::VZIPq8; break;
case MVT::v8i16: Opc = ARM::VZIPq16; break;
case MVT::v4f32:
@@ -2668,7 +2845,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case MVT::v8i8: Opc = ARM::VUZPd8; break;
case MVT::v4i16: Opc = ARM::VUZPd16; break;
case MVT::v2f32:
- case MVT::v2i32: Opc = ARM::VUZPd32; break;
+ // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
+ case MVT::v2i32: Opc = ARM::VTRNd32; break;
case MVT::v16i8: Opc = ARM::VUZPq8; break;
case MVT::v8i16: Opc = ARM::VUZPq16; break;
case MVT::v4f32:
@@ -2715,8 +2893,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VLD2DUP: {
- unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo,
- ARM::VLD2DUPd32Pseudo };
+ unsigned Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
+ ARM::VLD2DUPd32 };
return SelectVLDDup(N, false, 2, Opcodes);
}
@@ -2733,8 +2911,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VLD2DUP_UPD: {
- unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd16Pseudo_UPD,
- ARM::VLD2DUPd32Pseudo_UPD };
+ unsigned Opcodes[] = { ARM::VLD2DUPd8wb_fixed, ARM::VLD2DUPd16wb_fixed,
+ ARM::VLD2DUPd32wb_fixed };
return SelectVLDDup(N, true, 2, Opcodes);
}
@@ -2751,24 +2929,29 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VLD1_UPD: {
- unsigned DOpcodes[] = { ARM::VLD1d8_UPD, ARM::VLD1d16_UPD,
- ARM::VLD1d32_UPD, ARM::VLD1d64_UPD };
- unsigned QOpcodes[] = { ARM::VLD1q8Pseudo_UPD, ARM::VLD1q16Pseudo_UPD,
- ARM::VLD1q32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
+ unsigned DOpcodes[] = { ARM::VLD1d8wb_fixed, ARM::VLD1d16wb_fixed,
+ ARM::VLD1d32wb_fixed, ARM::VLD1d64wb_fixed };
+ unsigned QOpcodes[] = { ARM::VLD1q8wb_fixed,
+ ARM::VLD1q16wb_fixed,
+ ARM::VLD1q32wb_fixed,
+ ARM::VLD1q64wb_fixed };
return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
}
case ARMISD::VLD2_UPD: {
- unsigned DOpcodes[] = { ARM::VLD2d8Pseudo_UPD, ARM::VLD2d16Pseudo_UPD,
- ARM::VLD2d32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VLD2q8Pseudo_UPD, ARM::VLD2q16Pseudo_UPD,
- ARM::VLD2q32Pseudo_UPD };
+ unsigned DOpcodes[] = { ARM::VLD2d8wb_fixed,
+ ARM::VLD2d16wb_fixed,
+ ARM::VLD2d32wb_fixed,
+ ARM::VLD1q64wb_fixed};
+ unsigned QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
+ ARM::VLD2q16PseudoWB_fixed,
+ ARM::VLD2q32PseudoWB_fixed };
return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
}
case ARMISD::VLD3_UPD: {
unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD,
- ARM::VLD3d32Pseudo_UPD, ARM::VLD1d64TPseudo_UPD };
+ ARM::VLD3d32Pseudo_UPD, ARM::VLD1q64wb_fixed};
unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
ARM::VLD3q16Pseudo_UPD,
ARM::VLD3q32Pseudo_UPD };
@@ -2780,7 +2963,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ARMISD::VLD4_UPD: {
unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD,
- ARM::VLD4d32Pseudo_UPD, ARM::VLD1d64QPseudo_UPD };
+ ARM::VLD4d32Pseudo_UPD, ARM::VLD1q64wb_fixed};
unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
ARM::VLD4q16Pseudo_UPD,
ARM::VLD4q32Pseudo_UPD };
@@ -2815,24 +2998,29 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
}
case ARMISD::VST1_UPD: {
- unsigned DOpcodes[] = { ARM::VST1d8_UPD, ARM::VST1d16_UPD,
- ARM::VST1d32_UPD, ARM::VST1d64_UPD };
- unsigned QOpcodes[] = { ARM::VST1q8Pseudo_UPD, ARM::VST1q16Pseudo_UPD,
- ARM::VST1q32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
+ unsigned DOpcodes[] = { ARM::VST1d8wb_fixed, ARM::VST1d16wb_fixed,
+ ARM::VST1d32wb_fixed, ARM::VST1d64wb_fixed };
+ unsigned QOpcodes[] = { ARM::VST1q8wb_fixed,
+ ARM::VST1q16wb_fixed,
+ ARM::VST1q32wb_fixed,
+ ARM::VST1q64wb_fixed };
return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
}
case ARMISD::VST2_UPD: {
- unsigned DOpcodes[] = { ARM::VST2d8Pseudo_UPD, ARM::VST2d16Pseudo_UPD,
- ARM::VST2d32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
- unsigned QOpcodes[] = { ARM::VST2q8Pseudo_UPD, ARM::VST2q16Pseudo_UPD,
- ARM::VST2q32Pseudo_UPD };
+ unsigned DOpcodes[] = { ARM::VST2d8wb_fixed,
+ ARM::VST2d16wb_fixed,
+ ARM::VST2d32wb_fixed,
+ ARM::VST1q64wb_fixed};
+ unsigned QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
+ ARM::VST2q16PseudoWB_fixed,
+ ARM::VST2q32PseudoWB_fixed };
return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
}
case ARMISD::VST3_UPD: {
unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD,
- ARM::VST3d32Pseudo_UPD, ARM::VST1d64TPseudo_UPD };
+ ARM::VST3d32Pseudo_UPD,ARM::VST1d64TPseudoWB_fixed};
unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
ARM::VST3q16Pseudo_UPD,
ARM::VST3q32Pseudo_UPD };
@@ -2844,7 +3032,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case ARMISD::VST4_UPD: {
unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD,
- ARM::VST4d32Pseudo_UPD, ARM::VST1d64QPseudo_UPD };
+ ARM::VST4d32Pseudo_UPD,ARM::VST1d64QPseudoWB_fixed};
unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
ARM::VST4q16Pseudo_UPD,
ARM::VST4q32Pseudo_UPD };
@@ -2993,14 +3181,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case Intrinsic::arm_neon_vld1: {
unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
ARM::VLD1d32, ARM::VLD1d64 };
- unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo,
- ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo };
+ unsigned QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
+ ARM::VLD1q32, ARM::VLD1q64};
return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vld2: {
- unsigned DOpcodes[] = { ARM::VLD2d8Pseudo, ARM::VLD2d16Pseudo,
- ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo };
+ unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
+ ARM::VLD2d32, ARM::VLD1q64 };
unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
ARM::VLD2q32Pseudo };
return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
@@ -3054,14 +3242,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
case Intrinsic::arm_neon_vst1: {
unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
ARM::VST1d32, ARM::VST1d64 };
- unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo,
- ARM::VST1q32Pseudo, ARM::VST1q64Pseudo };
+ unsigned QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
+ ARM::VST1q32, ARM::VST1q64 };
return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
}
case Intrinsic::arm_neon_vst2: {
- unsigned DOpcodes[] = { ARM::VST2d8Pseudo, ARM::VST2d16Pseudo,
- ARM::VST2d32Pseudo, ARM::VST1q64Pseudo };
+ unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
+ ARM::VST2d32, ARM::VST1q64 };
unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
ARM::VST2q32Pseudo };
return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
@@ -3122,14 +3310,14 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
case Intrinsic::arm_neon_vtbl2:
- return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo);
+ return SelectVTBL(N, false, 2, ARM::VTBL2);
case Intrinsic::arm_neon_vtbl3:
return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
case Intrinsic::arm_neon_vtbl4:
return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
case Intrinsic::arm_neon_vtbx2:
- return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo);
+ return SelectVTBL(N, true, 2, ARM::VTBX2);
case Intrinsic::arm_neon_vtbx3:
return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
case Intrinsic::arm_neon_vtbx4:
@@ -3163,7 +3351,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
Ops.push_back(N->getOperand(2));
Ops.push_back(getAL(CurDAG)); // Predicate
Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
- return CurDAG->getMachineNode(ARM::VTBL2Pseudo, dl, VT,
+ return CurDAG->getMachineNode(ARM::VTBL2, dl, VT,
Ops.data(), Ops.size());
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index f60d177..a103c94 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13,13 +13,12 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "arm-isel"
+#include "ARMISelLowering.h"
#include "ARM.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
-#include "ARMISelLowering.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMPerfectShuffle.h"
-#include "ARMRegisterInfo.h"
#include "ARMSubtarget.h"
#include "ARMTargetMachine.h"
#include "ARMTargetObjectFile.h"
@@ -40,18 +39,15 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <sstream>
using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
@@ -73,7 +69,7 @@ ARMInterworking("arm-interworking", cl::Hidden,
cl::desc("Enable / disable ARM interworking (for debugging only)"),
cl::init(true));
-namespace llvm {
+namespace {
class ARMCCState : public CCState {
public:
ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
@@ -89,7 +85,7 @@ namespace llvm {
}
// The APCS parameter registers.
-static const unsigned GPRArgRegs[] = {
+static const uint16_t GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
@@ -108,8 +104,14 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
EVT ElemTy = VT.getVectorElementType();
if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
setOperationAction(ISD::SETCC, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getSimpleVT(), Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
- if (ElemTy != MVT::i32) {
+ if (ElemTy == MVT::i32) {
+ setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Custom);
+ } else {
setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
@@ -121,18 +123,12 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand);
if (VT.isInteger()) {
setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
- setLoadExtAction(ISD::SEXTLOAD, VT.getSimpleVT(), Expand);
- setLoadExtAction(ISD::ZEXTLOAD, VT.getSimpleVT(), Expand);
- for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
- setTruncStoreAction(VT.getSimpleVT(),
- (MVT::SimpleValueType)InnerVT, Expand);
}
- setLoadExtAction(ISD::EXTLOAD, VT.getSimpleVT(), Expand);
// Promote all bit-wise operations.
if (VT.isInteger() && VT != PromotedBitwiseVT) {
@@ -263,7 +259,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::SRL_I128, 0);
setLibcallName(RTLIB::SRA_I128, 0);
- if (Subtarget->isAAPCS_ABI()) {
+ if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) {
// Double-precision floating-point arithmetic helper functions
// RTABI chapter 4.1.2, Table 2
setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
@@ -388,8 +384,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// Long long helper functions
// RTABI chapter 4.2, Table 9
setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul");
- setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
- setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl");
setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr");
setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr");
@@ -405,21 +399,28 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv");
setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv");
setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv");
+ setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod");
setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv");
setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv");
setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv");
+ setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod");
setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS);
// Memory operations
// RTABI chapter 4.3.4
setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy");
setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove");
setLibcallName(RTLIB::MEMSET, "__aeabi_memset");
+ setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS);
+ setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS);
}
// Use divmod compiler-rt calls for iOS 5.0 and later.
@@ -433,7 +434,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
else
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
+ !Subtarget->isThumb1Only()) {
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
if (!Subtarget->isFPOnlySP())
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
@@ -441,6 +443,19 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
+ for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+ for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT)
+ setTruncStoreAction((MVT::SimpleValueType)VT,
+ (MVT::SimpleValueType)InnerVT, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand);
+ }
+
+ setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
+
if (Subtarget->hasNEON()) {
addDRTypeForNEON(MVT::v2f32);
addDRTypeForNEON(MVT::v8i8);
@@ -457,13 +472,23 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// v2f64 is legal so that QR subregs can be extracted as f64 elements, but
// neither Neon nor VFP support any arithmetic operations on it.
+ // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
+ // supported for v4f32.
setOperationAction(ISD::FADD, MVT::v2f64, Expand);
setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
+ // FIXME: Code duplication: FDIV and FREM are expanded always, see
+ // ARMTargetLowering::addTypeForNEON method for details.
setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
setOperationAction(ISD::FREM, MVT::v2f64, Expand);
+ // FIXME: Create unittest.
+ // In another words, find a way when "copysign" appears in DAG with vector
+ // operands.
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
+ // FIXME: Code duplication: SETCC has custom operation action, see
+ // ARMTargetLowering::addTypeForNEON method for details.
setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
+ // FIXME: Create unittest for FNEG and for FABS.
setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
setOperationAction(ISD::FABS, MVT::v2f64, Expand);
setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
@@ -476,13 +501,23 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
+ // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
- setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
+ setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
+ setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
+ setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
+ setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
+ setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
+ setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
+ setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
// Neon does not support some operations on v1i64 and v2i64 types.
setOperationAction(ISD::MUL, MVT::v1i64, Expand);
@@ -498,9 +533,13 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::v1i64, Expand);
setOperationAction(ISD::SETCC, MVT::v2i64, Expand);
// Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
- // a destination type that is wider than the source.
+ // a destination type that is wider than the source, and nor does
+ // it have a FP_TO_[SU]INT instruction with a narrower destination than
+ // source.
setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
@@ -519,6 +558,16 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::FP_TO_UINT);
setTargetDAGCombine(ISD::FDIV);
+
+ // It is legal to extload from v4i8 to v4i16 or v4i32.
+ MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
+ MVT::v4i16, MVT::v2i16,
+ MVT::v2i32};
+ for (unsigned i = 0; i < 6; ++i) {
+ setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal);
+ setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal);
+ setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal);
+ }
}
computeRegisterProperties();
@@ -576,6 +625,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ // These just redirect to CTTZ and CTLZ on ARM.
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand);
+
// Only ARMv6 has BSWAP.
if (!Subtarget->hasV6Ops())
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -606,10 +659,15 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setExceptionPointerRegister(ARM::R0);
- setExceptionSelectorRegister(ARM::R1);
+
+ if (!Subtarget->isTargetDarwin()) {
+ // Non-Darwin platforms may return values in these registers via the
+ // personality function.
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setExceptionPointerRegister(ARM::R0);
+ setExceptionSelectorRegister(ARM::R1);
+ }
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
@@ -664,7 +722,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
+ !Subtarget->isThumb1Only()) {
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
// iff target supports vfp2.
setOperationAction(ISD::BITCAST, MVT::i64, Custom);
@@ -676,7 +735,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (Subtarget->isTargetDarwin()) {
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
- setOperationAction(ISD::EH_SJLJ_DISPATCHSETUP, MVT::Other, Custom);
setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
}
@@ -703,18 +761,21 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FCOS, MVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f32, Expand);
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() &&
+ !Subtarget->isThumb1Only()) {
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
}
setOperationAction(ISD::FPOW, MVT::f64, Expand);
setOperationAction(ISD::FPOW, MVT::f32, Expand);
- setOperationAction(ISD::FMA, MVT::f64, Expand);
- setOperationAction(ISD::FMA, MVT::f32, Expand);
+ if (!Subtarget->hasVFP4()) {
+ setOperationAction(ISD::FMA, MVT::f64, Expand);
+ setOperationAction(ISD::FMA, MVT::f32, Expand);
+ }
// Various VFP goodness
- if (!UseSoftFloat && !Subtarget->isThumb1Only()) {
+ if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) {
// int <-> fp are custom expanded into bit_convert + ARMISD ops.
if (Subtarget->hasVFP2()) {
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
@@ -735,20 +796,27 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::MUL);
- if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON())
- setTargetDAGCombine(ISD::OR);
- if (Subtarget->hasNEON())
+ if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) {
setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::XOR);
+ }
+
+ if (Subtarget->hasV6Ops())
+ setTargetDAGCombine(ISD::SRL);
setStackPointerRegisterToSaveRestore(ARM::SP);
- if (UseSoftFloat || Subtarget->isThumb1Only() || !Subtarget->hasVFP2())
+ if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() ||
+ !Subtarget->hasVFP2())
setSchedulingPreference(Sched::RegPressure);
else
setSchedulingPreference(Sched::Hybrid);
//// temporary - rewrite interface to use type
maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 1;
+ maxStoresPerMemset = 16;
+ maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
// On ARM arguments smaller than 4 bytes are extended, so all arguments
// are at least 4 bytes aligned.
@@ -828,7 +896,11 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
+
case ARMISD::CMOV: return "ARMISD::CMOV";
+ case ARMISD::CAND: return "ARMISD::CAND";
+ case ARMISD::COR: return "ARMISD::COR";
+ case ARMISD::CXOR: return "ARMISD::CXOR";
case ARMISD::RBIT: return "ARMISD::RBIT";
@@ -851,7 +923,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
- case ARMISD::EH_SJLJ_DISPATCHSETUP:return "ARMISD::EH_SJLJ_DISPATCHSETUP";
case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
@@ -899,6 +970,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
+ case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM";
case ARMISD::VDUP: return "ARMISD::VDUP";
case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
case ARMISD::VEXT: return "ARMISD::VEXT";
@@ -949,7 +1021,7 @@ EVT ARMTargetLowering::getSetCCResultType(EVT VT) const {
/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
-TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
+const TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
// Map v4i64 to QQ registers but do not make the type legal. Similarly map
// v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
// load / store 4 to 8 consecutive D registers.
@@ -984,7 +1056,7 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
if (VT == MVT::Glue || VT == MVT::Other)
continue;
if (VT.isFloatingPoint() || VT.isVector())
- return Sched::Latency;
+ return Sched::ILP;
}
if (!N->isMachineOpcode())
@@ -999,7 +1071,7 @@ Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
return Sched::RegPressure;
if (!Itins->isEmpty() &&
Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
- return Sched::Latency;
+ return Sched::ILP;
return Sched::RegPressure;
}
@@ -1081,18 +1153,19 @@ CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
if (!Subtarget->isAAPCS_ABI())
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
else if (Subtarget->hasVFP2() &&
- FloatABIType == FloatABI::Hard && !isVarArg)
+ getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
+ !isVarArg)
return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
}
case CallingConv::ARM_AAPCS_VFP:
- return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ if (!isVarArg)
+ return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
+ // Fallthrough
case CallingConv::ARM_AAPCS:
return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
case CallingConv::ARM_APCS:
return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
- case CallingConv::GHC:
- return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
}
}
@@ -1215,7 +1288,7 @@ void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
SDValue
ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -1334,7 +1407,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(j, Load));
}
@@ -1350,12 +1423,10 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset);
SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset,
MVT::i32);
- // TODO: Disable AlwaysInline when it becomes possible
- // to emit a nested call sequence.
MemOpChains.push_back(DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
Flags.getByValAlign(),
/*isVolatile=*/false,
- /*AlwaysInline=*/true,
+ /*AlwaysInline=*/false,
MachinePointerInfo(0),
MachinePointerInfo(0)));
@@ -1429,7 +1500,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
} else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
const char *Sym = S->getSymbol();
@@ -1444,7 +1515,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
}
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
@@ -1465,7 +1536,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
@@ -1494,7 +1565,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Callee = DAG.getLoad(getPointerTy(), dl,
DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
@@ -1513,12 +1584,20 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
+ else if (doesNotRet && isDirect && !isARMFunc &&
+ Subtarget->hasRAS() && !Subtarget->isThumb1Only())
+ // "mov lr, pc; b _foo" to avoid confusing the RSP
+ CallOpc = ARMISD::CALL_NOLINK;
else
CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
} else {
- CallOpc = (isDirect || Subtarget->hasV5TOps())
- ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
- : ARMISD::CALL_NOLINK;
+ if (!isDirect && !Subtarget->hasV5TOps()) {
+ CallOpc = ARMISD::CALL_NOLINK;
+ } else if (doesNotRet && isDirect && Subtarget->hasRAS())
+ // "mov lr, pc; b _foo" to avoid confusing the RSP
+ CallOpc = ARMISD::CALL_NOLINK;
+ else
+ CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
}
std::vector<SDValue> Ops;
@@ -1531,6 +1610,12 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -1558,7 +1643,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
/// and then confiscate the rest of the parameter registers to insure
/// this.
void
-llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
+ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
unsigned reg = State->AllocateReg(GPRArgRegs, 4);
assert((State->getCallOrPrologue() == Prologue ||
State->getCallOrPrologue() == Call) &&
@@ -1588,7 +1673,7 @@ llvm::ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const {
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
- const ARMInstrInfo *TII) {
+ const TargetInstrInfo *TII) {
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
@@ -1723,8 +1808,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// the caller's fixed stack objects.
MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
- const ARMInstrInfo *TII =
- ((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
i != e;
++i, ++realArgIdx) {
@@ -1852,63 +1936,72 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
return result;
}
-bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
+bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
if (N->getNumValues() != 1)
return false;
if (!N->hasNUsesOfValue(1, 0))
return false;
- unsigned NumCopies = 0;
- SDNode* Copies[2];
- SDNode *Use = *N->use_begin();
- if (Use->getOpcode() == ISD::CopyToReg) {
- Copies[NumCopies++] = Use;
- } else if (Use->getOpcode() == ARMISD::VMOVRRD) {
+ SDValue TCChain = Chain;
+ SDNode *Copy = *N->use_begin();
+ if (Copy->getOpcode() == ISD::CopyToReg) {
+ // If the copy has a glue operand, we conservatively assume it isn't safe to
+ // perform a tail call.
+ if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
+ return false;
+ TCChain = Copy->getOperand(0);
+ } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
+ SDNode *VMov = Copy;
// f64 returned in a pair of GPRs.
- for (SDNode::use_iterator UI = Use->use_begin(), UE = Use->use_end();
+ SmallPtrSet<SDNode*, 2> Copies;
+ for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
UI != UE; ++UI) {
if (UI->getOpcode() != ISD::CopyToReg)
return false;
- Copies[UI.getUse().getResNo()] = *UI;
- ++NumCopies;
+ Copies.insert(*UI);
}
- } else if (Use->getOpcode() == ISD::BITCAST) {
+ if (Copies.size() > 2)
+ return false;
+
+ for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
+ UI != UE; ++UI) {
+ SDValue UseChain = UI->getOperand(0);
+ if (Copies.count(UseChain.getNode()))
+ // Second CopyToReg
+ Copy = *UI;
+ else
+ // First CopyToReg
+ TCChain = UseChain;
+ }
+ } else if (Copy->getOpcode() == ISD::BITCAST) {
// f32 returned in a single GPR.
- if (!Use->hasNUsesOfValue(1, 0))
+ if (!Copy->hasOneUse())
return false;
- Use = *Use->use_begin();
- if (Use->getOpcode() != ISD::CopyToReg || !Use->hasNUsesOfValue(1, 0))
+ Copy = *Copy->use_begin();
+ if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
return false;
- Copies[NumCopies++] = Use;
+ Chain = Copy->getOperand(0);
} else {
return false;
}
- if (NumCopies != 1 && NumCopies != 2)
- return false;
-
bool HasRet = false;
- for (unsigned i = 0; i < NumCopies; ++i) {
- SDNode *Copy = Copies[i];
- for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
- UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::CopyToReg) {
- SDNode *Use = *UI;
- if (Use == Copies[0] || Use == Copies[1])
- continue;
- return false;
- }
- if (UI->getOpcode() != ARMISD::RET_FLAG)
- return false;
- HasRet = true;
- }
+ for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
+ UI != UE; ++UI) {
+ if (UI->getOpcode() != ARMISD::RET_FLAG)
+ return false;
+ HasRet = true;
}
- return HasRet;
+ if (!HasRet)
+ return false;
+
+ Chain = TCChain;
+ return true;
}
bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!EnableARMTailCalls)
+ if (!EnableARMTailCalls && !Subtarget->supportsTailCall())
return false;
if (!CI->isTailCall())
@@ -1965,7 +2058,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
if (RelocM == Reloc::Static)
return Result;
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -1989,7 +2082,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue Chain = Argument.getValue(1);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -2005,7 +2098,8 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, (Type *) Type::getInt32Ty(*DAG.getContext()),
false, false, false, false,
- 0, CallingConv::C, false, /*isReturnValueUsed=*/true,
+ 0, CallingConv::C, /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
return CallResult.first;
}
@@ -2037,7 +2131,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
Chain = Offset.getValue(1);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -2045,7 +2139,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
} else {
// local exec model
ARMConstantPoolValue *CPV =
@@ -2054,7 +2148,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
}
// The address of the thread local variable is the add of the thread
@@ -2092,13 +2186,14 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue Chain = Result.getValue(1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
if (!UseGOTOFF)
Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- MachinePointerInfo::getGOT(), false, false, 0);
+ MachinePointerInfo::getGOT(),
+ false, false, false, 0);
return Result;
}
@@ -2115,7 +2210,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
}
}
@@ -2128,7 +2223,8 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- // FIXME: Enable this for static codegen when tool issues are fixed.
+ // FIXME: Enable this for static codegen when tool issues are fixed. Also
+ // update ARMFastISel::ARMMaterializeGV.
if (Subtarget->useMovt() && RelocM != Reloc::Static) {
++NumMovwMovt;
// FIXME: Once remat is capable of dealing with instructions with register
@@ -2143,7 +2239,8 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
DAG.getTargetGlobalAddress(GV, dl, PtrVT));
if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(), false, false, 0);
+ MachinePointerInfo::getGOT(),
+ false, false, false, 0);
return Result;
}
@@ -2163,7 +2260,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue Chain = Result.getValue(1);
if (RelocM == Reloc::PIC_) {
@@ -2173,7 +2270,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(),
- false, false, 0);
+ false, false, false, 0);
return Result;
}
@@ -2195,20 +2292,12 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
}
SDValue
-ARMTargetLowering::LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG)
- const {
- DebugLoc dl = Op.getDebugLoc();
- return DAG.getNode(ARMISD::EH_SJLJ_DISPATCHSETUP, dl, MVT::Other,
- Op.getOperand(0), Op.getOperand(1));
-}
-
-SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
SDValue Val = DAG.getConstant(0, MVT::i32);
@@ -2253,7 +2342,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
SDValue Result =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
MachinePointerInfo::getConstantPool(),
- false, false, 0);
+ false, false, false, 0);
if (RelocM == Reloc::PIC_) {
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -2366,7 +2455,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- TargetRegisterClass *RC;
+ const TargetRegisterClass *RC;
if (AFI->isThumb1OnlyFunction())
RC = ARM::tGPRRegisterClass;
else
@@ -2385,7 +2474,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0);
+ false, false, false, 0);
} else {
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
@@ -2452,7 +2541,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
SmallVector<SDValue, 4> MemOps;
for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) {
- TargetRegisterClass *RC;
+ const TargetRegisterClass *RC;
if (AFI->isThumb1OnlyFunction())
RC = ARM::tGPRRegisterClass;
else
@@ -2521,7 +2610,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0);
+ false, false, false, 0);
} else {
ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
Chain, DAG, dl);
@@ -2535,7 +2624,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
} else {
- TargetRegisterClass *RC;
+ const TargetRegisterClass *RC;
if (RegVT == MVT::f32)
RC = ARM::SPRRegisterClass;
@@ -2612,7 +2701,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0));
+ false, false, false, 0));
}
lastInsIndex = index;
}
@@ -2777,6 +2866,11 @@ SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
}
+ // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
+ // undefined bits before doing a full-word comparison with zero.
+ Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
+ DAG.getConstant(1, Cond.getValueType()));
+
return DAG.getSelectCC(dl, Cond,
DAG.getConstant(0, Cond.getValueType()),
SelectTrue, SelectFalse, ISD::SETNE);
@@ -2847,7 +2941,7 @@ static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->getAlignment());
+ Ld->isInvariant(), Ld->getAlignment());
llvm_unreachable("Unknown VFP cmp argument!");
}
@@ -2866,7 +2960,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
Ld->getChain(), Ptr,
Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->getAlignment());
+ Ld->isInvariant(), Ld->getAlignment());
EVT PtrType = Ptr.getValueType();
unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
@@ -2876,7 +2970,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
Ld->getChain(), NewPtr,
Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
- NewAlign);
+ Ld->isInvariant(), NewAlign);
return;
}
@@ -2894,12 +2988,11 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(4);
DebugLoc dl = Op.getDebugLoc();
- bool SeenZero = false;
- if (canChangeToInt(LHS, SeenZero, Subtarget) &&
- canChangeToInt(RHS, SeenZero, Subtarget) &&
- // If one of the operand is zero, it's safe to ignore the NaN case since
- // we only care about equality comparisons.
- (SeenZero || (DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS)))) {
+ bool LHSSeenZero = false;
+ bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
+ bool RHSSeenZero = false;
+ bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
+ if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
// If unsafe fp math optimization is enabled and there are no other uses of
// the CMP operands, and the condition code is EQ or NE, we can optimize it
// to an integer comparison.
@@ -2908,10 +3001,13 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
else if (CC == ISD::SETUNE)
CC = ISD::SETNE;
+ SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32);
SDValue ARMcc;
if (LHS.getValueType() == MVT::f32) {
- LHS = bitcastf32Toi32(LHS, DAG);
- RHS = bitcastf32Toi32(RHS, DAG);
+ LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
+ bitcastf32Toi32(LHS, DAG), Mask);
+ RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
+ bitcastf32Toi32(RHS, DAG), Mask);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
@@ -2922,6 +3018,8 @@ ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
SDValue RHS1, RHS2;
expandf64Toi32(LHS, DAG, LHS1, LHS2);
expandf64Toi32(RHS, DAG, RHS1, RHS2);
+ LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
+ RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
@@ -2950,7 +3048,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
- if (UnsafeFPMath &&
+ if (getTargetMachine().Options.UnsafeFPMath &&
(CC == ISD::SETEQ || CC == ISD::SETOEQ ||
CC == ISD::SETNE || CC == ISD::SETUNE)) {
SDValue Result = OptimizeVFPBrcond(Op, DAG);
@@ -3000,25 +3098,48 @@ SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
MachinePointerInfo::getJumpTable(),
- false, false, 0);
+ false, false, false, 0);
Chain = Addr.getValue(1);
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
} else {
Addr = DAG.getLoad(PTy, dl, Chain, Addr,
- MachinePointerInfo::getJumpTable(), false, false, 0);
+ MachinePointerInfo::getJumpTable(),
+ false, false, false, 0);
Chain = Addr.getValue(1);
return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
}
}
+static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+
+ if (Op.getValueType().getVectorElementType() == MVT::i32) {
+ if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
+ return Op;
+ return DAG.UnrollVectorOp(Op.getNode());
+ }
+
+ assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
+ "Invalid type for custom lowering!");
+ if (VT != MVT::v4i16)
+ return DAG.UnrollVectorOp(Op.getNode());
+
+ Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
+}
+
static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
+ EVT VT = Op.getValueType();
+ if (VT.isVector())
+ return LowerVectorFP_TO_INT(Op, DAG);
+
DebugLoc dl = Op.getDebugLoc();
unsigned Opc;
switch (Op.getOpcode()) {
- default:
- assert(0 && "Invalid opcode!");
+ default: llvm_unreachable("Invalid opcode!");
case ISD::FP_TO_SINT:
Opc = ARMISD::FTOSI;
break;
@@ -3034,6 +3155,12 @@ static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
+ if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
+ if (VT.getVectorElementType() == MVT::f32)
+ return Op;
+ return DAG.UnrollVectorOp(Op.getNode());
+ }
+
assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
"Invalid type for custom lowering!");
if (VT != MVT::v4f32)
@@ -3042,8 +3169,7 @@ static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
unsigned CastOpc;
unsigned Opc;
switch (Op.getOpcode()) {
- default:
- assert(0 && "Invalid opcode!");
+ default: llvm_unreachable("Invalid opcode!");
case ISD::SINT_TO_FP:
CastOpc = ISD::SIGN_EXTEND;
Opc = ISD::SINT_TO_FP;
@@ -3067,8 +3193,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
unsigned Opc;
switch (Op.getOpcode()) {
- default:
- assert(0 && "Invalid opcode!");
+ default: llvm_unreachable("Invalid opcode!");
case ISD::SINT_TO_FP:
Opc = ARMISD::SITOF;
break;
@@ -3176,7 +3301,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
SDValue Offset = DAG.getConstant(4, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
// Return LR, which contains the return address. Mark it an implicit live-in.
@@ -3197,7 +3322,7 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return FrameAddr;
}
@@ -3442,7 +3567,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (Op.getOperand(1).getValueType().isFloatingPoint()) {
switch (SetCCOpcode) {
- default: llvm_unreachable("Illegal FP comparison"); break;
+ default: llvm_unreachable("Illegal FP comparison");
case ISD::SETUNE:
case ISD::SETNE: Invert = true; // Fallthrough
case ISD::SETOEQ:
@@ -3481,7 +3606,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
} else {
// Integer comparisons.
switch (SetCCOpcode) {
- default: llvm_unreachable("Illegal integer comparison"); break;
+ default: llvm_unreachable("Illegal integer comparison");
case ISD::SETNE: Invert = true;
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
case ISD::SETLT: Swap = true;
@@ -3688,14 +3813,65 @@ static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
default:
llvm_unreachable("unexpected size for isNEONModifiedImm");
- return SDValue();
}
unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
return DAG.getTargetConstant(EncodedVal, MVT::i32);
}
-static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
+SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *ST) const {
+ if (!ST->useNEONForSinglePrecisionFP() || !ST->hasVFP3() || ST->hasD16())
+ return SDValue();
+
+ ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
+ assert(Op.getValueType() == MVT::f32 &&
+ "ConstantFP custom lowering should only occur for f32.");
+
+ // Try splatting with a VMOV.f32...
+ APFloat FPVal = CFP->getValueAPF();
+ int ImmVal = ARM_AM::getFP32Imm(FPVal);
+ if (ImmVal != -1) {
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32);
+ SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
+ NewVal);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
+ DAG.getConstant(0, MVT::i32));
+ }
+
+ // If that fails, try a VMOV.i32
+ EVT VMovVT;
+ unsigned iVal = FPVal.bitcastToAPInt().getZExtValue();
+ SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false,
+ VMOVModImm);
+ if (NewVal != SDValue()) {
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
+ NewVal);
+ SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
+ VecConstant);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
+ DAG.getConstant(0, MVT::i32));
+ }
+
+ // Finally, try a VMVN.i32
+ NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false,
+ VMVNModImm);
+ if (NewVal != SDValue()) {
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
+ SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
+ VecConstant);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
+ DAG.getConstant(0, MVT::i32));
+ }
+
+ return SDValue();
+}
+
+
+static bool isVEXTMask(ArrayRef<int> M, EVT VT,
bool &ReverseVEXT, unsigned &Imm) {
unsigned NumElts = VT.getVectorNumElements();
ReverseVEXT = false;
@@ -3734,8 +3910,7 @@ static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
/// isVREVMask - Check if a vector shuffle corresponds to a VREV
/// instruction with the specified blocksize. (The order of the elements
/// within each block of the vector is reversed.)
-static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned BlockSize) {
+static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
"Only possible block sizes for VREV are: 16, 32, 64");
@@ -3761,15 +3936,14 @@ static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
return true;
}
-static bool isVTBLMask(const SmallVectorImpl<int> &M, EVT VT) {
+static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
// We can handle <8 x i8> vector shuffles. If the index in the mask is out of
// range, then 0 is placed into the resulting vector. So pretty much any mask
// of 8 elements can work here.
return VT == MVT::v8i8 && M.size() == 8;
}
-static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3787,8 +3961,7 @@ static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
-static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3803,8 +3976,7 @@ static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
return true;
}
-static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3827,8 +3999,7 @@ static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
-static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3852,8 +4023,7 @@ static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
return true;
}
-static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3878,8 +4048,7 @@ static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
-static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
+static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
unsigned EltSz = VT.getVectorElementType().getSizeInBits();
if (EltSz == 64)
return false;
@@ -3955,6 +4124,15 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
}
+
+ // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
+ if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
+ int ImmVal = ARM_AM::getFP32Imm(SplatBits);
+ if (ImmVal != -1) {
+ SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
+ return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
+ }
+ }
}
}
@@ -4302,7 +4480,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
}
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
- SmallVectorImpl<int> &ShuffleMask,
+ ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) {
// Check to see if we can use the VTBL instruction.
SDValue V1 = Op.getOperand(0);
@@ -4310,7 +4488,7 @@ static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
DebugLoc DL = Op.getDebugLoc();
SmallVector<SDValue, 8> VTBLMask;
- for (SmallVectorImpl<int>::iterator
+ for (ArrayRef<int>::iterator
I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
VTBLMask.push_back(DAG.getConstant(*I, MVT::i32));
@@ -4330,7 +4508,6 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
- SmallVector<int, 8> ShuffleMask;
// Convert shuffles that are directly supported on NEON to target-specific
// DAG nodes, instead of keeping them as shuffles and matching them again
@@ -4338,7 +4515,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// of inconsistencies between legalization and selection.
// FIXME: floating-point vectors should be canonicalized to integer vectors
// of the same time so that they get CSEd properly.
- SVN->getMask(ShuffleMask);
+ ArrayRef<int> ShuffleMask = SVN->getMask();
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
if (EltSize <= 32) {
@@ -4347,9 +4524,24 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// If this is undef splat, generate it via "just" vdup, if possible.
if (Lane == -1) Lane = 0;
+ // Test if V1 is a SCALAR_TO_VECTOR.
if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
}
+ // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
+ // (and probably will turn into a SCALAR_TO_VECTOR once legalization
+ // reaches it).
+ if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
+ !isa<ConstantSDNode>(V1.getOperand(0))) {
+ bool IsScalarToVector = true;
+ for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
+ if (V1.getOperand(i).getOpcode() != ISD::UNDEF) {
+ IsScalarToVector = false;
+ break;
+ }
+ if (IsScalarToVector)
+ return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
+ }
return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
DAG.getConstant(Lane, MVT::i32));
}
@@ -4450,6 +4642,15 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
+static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
+ // INSERT_VECTOR_ELT is legal only for immediate indexes.
+ SDValue Lane = Op.getOperand(2);
+ if (!isa<ConstantSDNode>(Lane))
+ return SDValue();
+
+ return Op;
+}
+
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
// EXTRACT_VECTOR_ELT is legal only for immediate indexes.
SDValue Lane = Op.getOperand(1);
@@ -4526,11 +4727,10 @@ static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
unsigned HalfSize = EltSize / 2;
if (isSigned) {
- int64_t SExtVal = C->getSExtValue();
- if ((SExtVal >> HalfSize) != (SExtVal >> EltSize))
+ if (!isIntN(HalfSize, C->getSExtValue()))
return false;
} else {
- if ((C->getZExtValue() >> HalfSize) != 0)
+ if (!isUIntN(HalfSize, C->getZExtValue()))
return false;
}
continue;
@@ -4569,7 +4769,8 @@ static SDValue SkipExtension(SDNode *N, SelectionDAG &DAG) {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
return DAG.getLoad(LD->getMemoryVT(), N->getDebugLoc(), LD->getChain(),
LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(), LD->getAlignment());
+ LD->isNonTemporal(), LD->isInvariant(),
+ LD->getAlignment());
// Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
// have been legalized as a BITCAST from v4i32.
if (N->getOpcode() == ISD::BITCAST) {
@@ -4874,7 +5075,7 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
unsigned Opc;
bool ExtraOp = false;
switch (Op.getOpcode()) {
- default: assert(0 && "Invalid code");
+ default: llvm_unreachable("Invalid code");
case ISD::ADDC: Opc = ARMISD::ADDC; break;
case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
case ISD::SUBC: Opc = ARMISD::SUBC; break;
@@ -4959,7 +5160,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
- case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
Subtarget);
case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
@@ -4971,8 +5171,10 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
case ISD::SETCC: return LowerVSETCC(Op, DAG);
+ case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
@@ -4986,7 +5188,6 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
}
- return SDValue();
}
/// ReplaceNodeResults - Replace the results of node with an illegal result
@@ -4998,7 +5199,6 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
switch (N->getOpcode()) {
default:
llvm_unreachable("Don't know how to custom expand this!");
- break;
case ISD::BITCAST:
Res = ExpandBITCAST(N, DAG);
break;
@@ -5194,7 +5394,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- TargetRegisterClass *TRC =
+ const TargetRegisterClass *TRC =
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
@@ -5304,7 +5504,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- TargetRegisterClass *TRC =
+ const TargetRegisterClass *TRC =
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
unsigned scratch = MRI.createVirtualRegister(TRC);
unsigned scratch2 = MRI.createVirtualRegister(TRC);
@@ -5414,7 +5614,7 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
- TargetRegisterClass *TRC =
+ const TargetRegisterClass *TRC =
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
unsigned storesuccess = MRI.createVirtualRegister(TRC);
@@ -5500,52 +5700,6 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
return BB;
}
-/// EmitBasePointerRecalculation - For functions using a base pointer, we
-/// rematerialize it (via the frame pointer).
-void ARMTargetLowering::
-EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
- MachineBasicBlock *DispatchBB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
- MachineFunction &MF = *MI->getParent()->getParent();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
-
- if (!RI.hasBasePointer(MF)) return;
-
- MachineBasicBlock::iterator MBBI = MI;
-
- int32_t NumBytes = AFI->getFramePtrSpillOffset();
- unsigned FramePtr = RI.getFrameRegister(MF);
- assert(MF.getTarget().getFrameLowering()->hasFP(MF) &&
- "Base pointer without frame pointer?");
-
- if (AFI->isThumb2Function())
- llvm::emitT2RegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
- else if (AFI->isThumbFunction())
- llvm::emitThumbRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, *AII, RI);
- else
- llvm::emitARMRegPlusImmediate(*MBB, MBBI, MI->getDebugLoc(), ARM::R6,
- FramePtr, -NumBytes, ARMCC::AL, 0, *AII);
-
- if (!RI.needsStackRealignment(MF)) return;
-
- // If there's dynamic realignment, adjust for it.
- MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned MaxAlign = MFI->getMaxAlignment();
- assert(!AFI->isThumb1OnlyFunction());
-
- // Emit bic r6, r6, MaxAlign
- unsigned bicOpc = AFI->isThumbFunction() ? ARM::t2BICri : ARM::BICri;
- AddDefaultCC(
- AddDefaultPred(
- BuildMI(*MBB, MBBI, MI->getDebugLoc(), TII->get(bicOpc), ARM::R6)
- .addReg(ARM::R6, RegState::Kill)
- .addImm(MaxAlign - 1)));
-}
-
/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
void ARMTargetLowering::
@@ -5580,8 +5734,6 @@ SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB,
MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOStore, 4, 4);
- EmitBasePointerRecalculation(MI, MBB, DispatchBB);
-
// Load the address of the dispatch MBB into the jump buffer.
if (isThumb2) {
// Incoming value: jbuf
@@ -5683,7 +5835,8 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad;
unsigned MaxCSNum = 0;
MachineModuleInfo &MMI = MF->getMMI();
- for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; ++BB) {
+ for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
+ ++BB) {
if (!BB->isLandingPad()) continue;
// FIXME: We should assert that the EH_LABEL is the first MI in the landing
@@ -5741,12 +5894,10 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
DispatchBB->addSuccessor(DispContBB);
- // Insert and renumber MBBs.
- MachineBasicBlock *Last = &MF->back();
+ // Insert and MBBs.
MF->insert(MF->end(), DispatchBB);
MF->insert(MF->end(), DispContBB);
MF->insert(MF->end(), TrapBB);
- MF->RenumberBlocks(Last);
// Insert code into the entry block that creates and registers the function
// context.
@@ -5757,35 +5908,63 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineMemOperand::MOLoad |
MachineMemOperand::MOVolatile, 4, 4);
+ if (AFI->isThumb1OnlyFunction())
+ BuildMI(DispatchBB, dl, TII->get(ARM::tInt_eh_sjlj_dispatchsetup));
+ else if (!Subtarget->hasVFP2())
+ BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup_nofp));
+ else
+ BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
+
+ unsigned NumLPads = LPadList.size();
if (Subtarget->isThumb2()) {
unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(FIMMOLd));
- AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
- .addReg(NewVReg1)
- .addImm(LPadList.size()));
+
+ if (NumLPads < 256) {
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
+ .addReg(NewVReg1)
+ .addImm(LPadList.size()));
+ } else {
+ unsigned VReg1 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
+ .addImm(NumLPads & 0xFFFF));
+
+ unsigned VReg2 = VReg1;
+ if ((NumLPads & 0xFFFF0000) != 0) {
+ VReg2 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
+ .addReg(VReg1)
+ .addImm(NumLPads >> 16));
+ }
+
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
+ .addReg(NewVReg1)
+ .addReg(VReg2));
+ }
+
BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
.addMBB(TrapBB)
.addImm(ARMCC::HI)
.addReg(ARM::CPSR);
- unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
- AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg2)
+ unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3)
.addJumpTableIndex(MJTI)
.addImm(UId));
- unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
+ unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
AddDefaultCC(
AddDefaultPred(
- BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg3)
- .addReg(NewVReg2, RegState::Kill)
+ BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
+ .addReg(NewVReg3, RegState::Kill)
.addReg(NewVReg1)
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
- .addReg(NewVReg3, RegState::Kill)
+ .addReg(NewVReg4, RegState::Kill)
.addReg(NewVReg1)
.addJumpTableIndex(MJTI)
.addImm(UId);
@@ -5796,9 +5975,30 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
.addImm(1)
.addMemOperand(FIMMOLd));
- AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
- .addReg(NewVReg1)
- .addImm(LPadList.size()));
+ if (NumLPads < 256) {
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
+ .addReg(NewVReg1)
+ .addImm(NumLPads));
+ } else {
+ MachineConstantPool *ConstantPool = MF->getConstantPool();
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ if (Align == 0)
+ Align = getTargetData()->getTypeAllocSize(C->getType());
+ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
+
+ unsigned VReg1 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
+ .addReg(VReg1, RegState::Define)
+ .addConstantPoolIndex(Idx));
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
+ .addReg(NewVReg1)
+ .addReg(VReg1));
+ }
+
BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
.addMBB(TrapBB)
.addImm(ARMCC::HI)
@@ -5847,38 +6047,77 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(FIMMOLd));
- AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
- .addReg(NewVReg1)
- .addImm(LPadList.size()));
+
+ if (NumLPads < 256) {
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
+ .addReg(NewVReg1)
+ .addImm(NumLPads));
+ } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
+ unsigned VReg1 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
+ .addImm(NumLPads & 0xFFFF));
+
+ unsigned VReg2 = VReg1;
+ if ((NumLPads & 0xFFFF0000) != 0) {
+ VReg2 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
+ .addReg(VReg1)
+ .addImm(NumLPads >> 16));
+ }
+
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
+ .addReg(NewVReg1)
+ .addReg(VReg2));
+ } else {
+ MachineConstantPool *ConstantPool = MF->getConstantPool();
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
+
+ // MachineConstantPool wants an explicit alignment.
+ unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+ if (Align == 0)
+ Align = getTargetData()->getTypeAllocSize(C->getType());
+ unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
+
+ unsigned VReg1 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
+ .addReg(VReg1, RegState::Define)
+ .addConstantPoolIndex(Idx)
+ .addImm(0));
+ AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
+ .addReg(NewVReg1)
+ .addReg(VReg1, RegState::Kill));
+ }
+
BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
.addMBB(TrapBB)
.addImm(ARMCC::HI)
.addReg(ARM::CPSR);
- unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
+ unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
AddDefaultCC(
- AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg2)
+ AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
.addReg(NewVReg1)
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
- unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
- AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg3)
+ unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
+ AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
.addJumpTableIndex(MJTI)
.addImm(UId));
MachineMemOperand *JTMMOLd =
MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(),
MachineMemOperand::MOLoad, 4, 4);
- unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
+ unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
AddDefaultPred(
- BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg4)
- .addReg(NewVReg2, RegState::Kill)
- .addReg(NewVReg3)
+ BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
+ .addReg(NewVReg3, RegState::Kill)
+ .addReg(NewVReg4)
.addImm(0)
.addMemOperand(JTMMOLd));
BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
- .addReg(NewVReg4, RegState::Kill)
- .addReg(NewVReg3)
+ .addReg(NewVReg5, RegState::Kill)
+ .addReg(NewVReg4)
.addJumpTableIndex(MJTI)
.addImm(UId);
}
@@ -5893,21 +6132,24 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
PrevMBB = CurMBB;
}
+ // N.B. the order the invoke BBs are processed in doesn't matter here.
const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
- const unsigned *SavedRegs = RI.getCalleeSavedRegs(MF);
+ const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF);
+ SmallVector<MachineBasicBlock*, 64> MBBLPads;
for (SmallPtrSet<MachineBasicBlock*, 64>::iterator
I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) {
MachineBasicBlock *BB = *I;
// Remove the landing pad successor from the invoke block and replace it
// with the new dispatch block.
- for (MachineBasicBlock::succ_iterator
- SI = BB->succ_begin(), SE = BB->succ_end(); SI != SE; ++SI) {
- MachineBasicBlock *SMBB = *SI;
+ SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
+ BB->succ_end());
+ while (!Successors.empty()) {
+ MachineBasicBlock *SMBB = Successors.pop_back_val();
if (SMBB->isLandingPad()) {
BB->removeSuccessor(SMBB);
- SMBB->setIsLandingPad(false);
+ MBBLPads.push_back(SMBB);
}
}
@@ -5919,7 +6161,7 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
// executed.
for (MachineBasicBlock::reverse_iterator
II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
- if (!II->getDesc().isCall()) continue;
+ if (!II->isCall()) continue;
DenseMap<unsigned, bool> DefRegs;
for (MachineInstr::mop_iterator
@@ -5932,15 +6174,31 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const {
MachineInstrBuilder MIB(&*II);
for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
- if (!TRC->contains(SavedRegs[i])) continue;
- if (!DefRegs[SavedRegs[i]])
- MIB.addReg(SavedRegs[i], RegState::ImplicitDefine | RegState::Dead);
+ unsigned Reg = SavedRegs[i];
+ if (Subtarget->isThumb2() &&
+ !ARM::tGPRRegisterClass->contains(Reg) &&
+ !ARM::hGPRRegisterClass->contains(Reg))
+ continue;
+ else if (Subtarget->isThumb1Only() &&
+ !ARM::tGPRRegisterClass->contains(Reg))
+ continue;
+ else if (!Subtarget->isThumb() &&
+ !ARM::GPRRegisterClass->contains(Reg))
+ continue;
+ if (!DefRegs[Reg])
+ MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
}
break;
}
}
+ // Mark all former landing pads as non-landing pads. The dispatch is the only
+ // landing pad now.
+ for (SmallVectorImpl<MachineBasicBlock*>::iterator
+ I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
+ (*I)->setIsLandingPad(false);
+
// The instruction is gone now.
MI->eraseFromParent();
@@ -6222,20 +6480,28 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return BB;
}
+ case ARM::Int_eh_sjlj_setjmp:
+ case ARM::Int_eh_sjlj_setjmp_nofp:
+ case ARM::tInt_eh_sjlj_setjmp:
+ case ARM::t2Int_eh_sjlj_setjmp:
+ case ARM::t2Int_eh_sjlj_setjmp_nofp:
+ EmitSjLjDispatchBlock(MI, BB);
+ return BB;
+
case ARM::ABS:
case ARM::t2ABS: {
// To insert an ABS instruction, we have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// source vreg to test against 0, the destination vreg to set,
// the condition code register to branch on, the
- // true/false values to select between, and a branch opcode to use.
+ // true/false values to select between, and a branch opcode to use.
// It transforms
// V1 = ABS V0
// into
// V2 = MOVS V0
// BCC (branch to SinkBB if V0 >= 0)
// RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0)
- // SinkBB: V1 = PHI(V2, V3)
+ // SinkBB: V1 = PHI(V2, V3)
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator BBI = BB;
++BBI;
@@ -6276,19 +6542,19 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(ARM::CPSR, RegState::Define);
// insert a bcc with opposite CC to ARMCC::MI at the end of BB
- BuildMI(BB, dl,
+ BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
.addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
// insert rsbri in RSBBB
// Note: BCC and rsbri will be converted into predicated rsbmi
// by if-conversion pass
- BuildMI(*RSBBB, RSBBB->begin(), dl,
+ BuildMI(*RSBBB, RSBBB->begin(), dl,
TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
.addReg(NewMovDstReg, RegState::Kill)
.addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
- // insert PHI in SinkBB,
+ // insert PHI in SinkBB,
// reuse ABSDstReg to not change uses of ABS instruction
BuildMI(*SinkBB, SinkBB->begin(), dl,
TII->get(ARM::PHI), ABSDstReg)
@@ -6296,7 +6562,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(NewMovDstReg).addMBB(BB);
// remove ABS instruction
- MI->eraseFromParent();
+ MI->eraseFromParent();
// return last added BB
return SinkBB;
@@ -6306,32 +6572,40 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
SDNode *Node) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.hasPostISelHook()) {
+ if (!MI->hasPostISelHook()) {
assert(!convertAddSubFlagsOpcode(MI->getOpcode()) &&
"Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'");
return;
}
+ const MCInstrDesc *MCID = &MI->getDesc();
// Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
// RSC. Coming out of isel, they have an implicit CPSR def, but the optional
// operand is still set to noreg. If needed, set the optional operand's
// register to CPSR, and remove the redundant implicit def.
//
- // e.g. ADCS (...opt:%noreg, CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
+ // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
// Rename pseudo opcodes.
unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode());
if (NewOpc) {
const ARMBaseInstrInfo *TII =
static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo());
- MI->setDesc(TII->get(NewOpc));
+ MCID = &TII->get(NewOpc);
+
+ assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 &&
+ "converted opcode should be the same except for cc_out");
+
+ MI->setDesc(*MCID);
+
+ // Add the optional cc_out operand
+ MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
}
- unsigned ccOutIdx = MCID.getNumOperands() - 1;
+ unsigned ccOutIdx = MCID->getNumOperands() - 1;
// Any ARM instruction that sets the 's' bit should specify an optional
// "cc_out" operand in the last operand position.
- if (!MCID.hasOptionalDef() || !MCID.OpInfo[ccOutIdx].isOptionalDef()) {
+ if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
assert(!NewOpc && "Optional cc_out operand required");
return;
}
@@ -6339,7 +6613,7 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
// since we already have an optional CPSR def.
bool definesCPSR = false;
bool deadCPSR = false;
- for (unsigned i = MCID.getNumOperands(), e = MI->getNumOperands();
+ for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands();
i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
@@ -6513,7 +6787,7 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
default:
- assert(0 && "Invalid vector element type for padd optimization.");
+ llvm_unreachable("Invalid vector element type for padd optimization.");
}
SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
@@ -6632,41 +6906,115 @@ static SDValue PerformMULCombine(SDNode *N,
if (!C)
return SDValue();
- uint64_t MulAmt = C->getZExtValue();
+ int64_t MulAmt = C->getSExtValue();
unsigned ShiftAmt = CountTrailingZeros_64(MulAmt);
+
ShiftAmt = ShiftAmt & (32 - 1);
SDValue V = N->getOperand(0);
DebugLoc DL = N->getDebugLoc();
SDValue Res;
MulAmt >>= ShiftAmt;
- if (isPowerOf2_32(MulAmt - 1)) {
- // (mul x, 2^N + 1) => (add (shl x, N), x)
- Res = DAG.getNode(ISD::ADD, DL, VT,
- V, DAG.getNode(ISD::SHL, DL, VT,
- V, DAG.getConstant(Log2_32(MulAmt-1),
- MVT::i32)));
- } else if (isPowerOf2_32(MulAmt + 1)) {
- // (mul x, 2^N - 1) => (sub (shl x, N), x)
- Res = DAG.getNode(ISD::SUB, DL, VT,
- DAG.getNode(ISD::SHL, DL, VT,
- V, DAG.getConstant(Log2_32(MulAmt+1),
- MVT::i32)),
- V);
- } else
- return SDValue();
+
+ if (MulAmt >= 0) {
+ if (isPowerOf2_32(MulAmt - 1)) {
+ // (mul x, 2^N + 1) => (add (shl x, N), x)
+ Res = DAG.getNode(ISD::ADD, DL, VT,
+ V,
+ DAG.getNode(ISD::SHL, DL, VT,
+ V,
+ DAG.getConstant(Log2_32(MulAmt - 1),
+ MVT::i32)));
+ } else if (isPowerOf2_32(MulAmt + 1)) {
+ // (mul x, 2^N - 1) => (sub (shl x, N), x)
+ Res = DAG.getNode(ISD::SUB, DL, VT,
+ DAG.getNode(ISD::SHL, DL, VT,
+ V,
+ DAG.getConstant(Log2_32(MulAmt + 1),
+ MVT::i32)),
+ V);
+ } else
+ return SDValue();
+ } else {
+ uint64_t MulAmtAbs = -MulAmt;
+ if (isPowerOf2_32(MulAmtAbs + 1)) {
+ // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
+ Res = DAG.getNode(ISD::SUB, DL, VT,
+ V,
+ DAG.getNode(ISD::SHL, DL, VT,
+ V,
+ DAG.getConstant(Log2_32(MulAmtAbs + 1),
+ MVT::i32)));
+ } else if (isPowerOf2_32(MulAmtAbs - 1)) {
+ // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
+ Res = DAG.getNode(ISD::ADD, DL, VT,
+ V,
+ DAG.getNode(ISD::SHL, DL, VT,
+ V,
+ DAG.getConstant(Log2_32(MulAmtAbs-1),
+ MVT::i32)));
+ Res = DAG.getNode(ISD::SUB, DL, VT,
+ DAG.getConstant(0, MVT::i32),Res);
+
+ } else
+ return SDValue();
+ }
if (ShiftAmt != 0)
- Res = DAG.getNode(ISD::SHL, DL, VT, Res,
- DAG.getConstant(ShiftAmt, MVT::i32));
+ Res = DAG.getNode(ISD::SHL, DL, VT,
+ Res, DAG.getConstant(ShiftAmt, MVT::i32));
// Do not add new nodes to DAG combiner worklist.
DCI.CombineTo(N, Res, false);
return SDValue();
}
+static bool isCMOVWithZeroOrAllOnesLHS(SDValue N, bool AllOnes) {
+ if (N.getOpcode() != ARMISD::CMOV || !N.getNode()->hasOneUse())
+ return false;
+
+ SDValue FalseVal = N.getOperand(0);
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(FalseVal);
+ if (!C)
+ return false;
+ if (AllOnes)
+ return C->isAllOnesValue();
+ return C->isNullValue();
+}
+
+/// formConditionalOp - Combine an operation with a conditional move operand
+/// to form a conditional op. e.g. (or x, (cmov 0, y, cond)) => (or.cond x, y)
+/// (and x, (cmov -1, y, cond)) => (and.cond, x, y)
+static SDValue formConditionalOp(SDNode *N, SelectionDAG &DAG,
+ bool Commutable) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ bool isAND = N->getOpcode() == ISD::AND;
+ bool isCand = isCMOVWithZeroOrAllOnesLHS(N1, isAND);
+ if (!isCand && Commutable) {
+ isCand = isCMOVWithZeroOrAllOnesLHS(N0, isAND);
+ if (isCand)
+ std::swap(N0, N1);
+ }
+ if (!isCand)
+ return SDValue();
+
+ unsigned Opc = 0;
+ switch (N->getOpcode()) {
+ default: llvm_unreachable("Unexpected node");
+ case ISD::AND: Opc = ARMISD::CAND; break;
+ case ISD::OR: Opc = ARMISD::COR; break;
+ case ISD::XOR: Opc = ARMISD::CXOR; break;
+ }
+ return DAG.getNode(Opc, N->getDebugLoc(), N->getValueType(0), N0,
+ N1.getOperand(1), N1.getOperand(2), N1.getOperand(3),
+ N1.getOperand(4));
+}
+
static SDValue PerformANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const ARMSubtarget *Subtarget) {
// Attempt to use immediate-form VBIC
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
@@ -6697,6 +7045,13 @@ static SDValue PerformANDCombine(SDNode *N,
}
}
+ if (!Subtarget->isThumb1Only()) {
+ // (and x, (cmov -1, y, cond)) => (and.cond x, y)
+ SDValue CAND = formConditionalOp(N, DAG, true);
+ if (CAND.getNode())
+ return CAND;
+ }
+
return SDValue();
}
@@ -6733,6 +7088,13 @@ static SDValue PerformORCombine(SDNode *N,
}
}
+ if (!Subtarget->isThumb1Only()) {
+ // (or x, (cmov 0, y, cond)) => (or.cond x, y)
+ SDValue COR = formConditionalOp(N, DAG, true);
+ if (COR.getNode())
+ return COR;
+ }
+
SDValue N0 = N->getOperand(0);
if (N0.getOpcode() != ISD::AND)
return SDValue();
@@ -6881,6 +7243,25 @@ static SDValue PerformORCombine(SDNode *N,
return SDValue();
}
+static SDValue PerformXORCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const ARMSubtarget *Subtarget) {
+ EVT VT = N->getValueType(0);
+ SelectionDAG &DAG = DCI.DAG;
+
+ if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+ return SDValue();
+
+ if (!Subtarget->isThumb1Only()) {
+ // (xor x, (cmov 0, y, cond)) => (xor.cond x, y)
+ SDValue CXOR = formConditionalOp(N, DAG, true);
+ if (CXOR.getNode())
+ return CXOR;
+ }
+
+ return SDValue();
+}
+
/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
/// the bits being cleared by the AND are not demanded by the BFI.
static SDValue PerformBFICombine(SDNode *N,
@@ -6926,13 +7307,14 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
SDValue BasePtr = LD->getBasePtr();
SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr,
LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(), LD->getAlignment());
+ LD->isNonTemporal(), LD->isInvariant(),
+ LD->getAlignment());
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(4, MVT::i32));
SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr,
LD->getPointerInfo(), LD->isVolatile(),
- LD->isNonTemporal(),
+ LD->isNonTemporal(), LD->isInvariant(),
std::min(4U, LD->getAlignment() / 2));
DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
@@ -6967,15 +7349,99 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
/// ISD::STORE.
static SDValue PerformSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
- // Bitcast an i64 store extracted from a vector to f64.
- // Otherwise, the i64 value will be legalized to a pair of i32 values.
StoreSDNode *St = cast<StoreSDNode>(N);
+ if (St->isVolatile())
+ return SDValue();
+
+ // Optimize trunc store (of multiple scalars) to shuffle and store. First,
+ // pack all of the elements in one place. Next, store to memory in fewer
+ // chunks.
SDValue StVal = St->getValue();
- if (!ISD::isNormalStore(St) || St->isVolatile())
+ EVT VT = StVal.getValueType();
+ if (St->isTruncatingStore() && VT.isVector()) {
+ SelectionDAG &DAG = DCI.DAG;
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT StVT = St->getMemoryVT();
+ unsigned NumElems = VT.getVectorNumElements();
+ assert(StVT != VT && "Cannot truncate to the same type");
+ unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits();
+
+ // From, To sizes and ElemCount must be pow of two
+ if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
+
+ // We are going to use the original vector elt for storing.
+ // Accumulated smaller vector elements must be a multiple of the store size.
+ if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
+
+ unsigned SizeRatio = FromEltSz / ToEltSz;
+ assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
+
+ // Create a type on which we perform the shuffle.
+ EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
+ NumElems*SizeRatio);
+ assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
+
+ DebugLoc DL = St->getDebugLoc();
+ SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
+ SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
+ for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio;
+
+ // Can't shuffle using an illegal type.
+ if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
+
+ SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
+ DAG.getUNDEF(WideVec.getValueType()),
+ ShuffleVec.data());
+ // At this point all of the data is stored at the bottom of the
+ // register. We now need to save it to mem.
+
+ // Find the largest store unit
+ MVT StoreType = MVT::i8;
+ for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE;
+ tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) {
+ MVT Tp = (MVT::SimpleValueType)tp;
+ if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
+ StoreType = Tp;
+ }
+ // Didn't find a legal store type.
+ if (!TLI.isTypeLegal(StoreType))
+ return SDValue();
+
+ // Bitcast the original vector into a vector of store-size units
+ EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
+ StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
+ assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
+ SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
+ SmallVector<SDValue, 8> Chains;
+ SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
+ TLI.getPointerTy());
+ SDValue BasePtr = St->getBasePtr();
+
+ // Perform one or more big stores into memory.
+ unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
+ for (unsigned I = 0; I < E; I++) {
+ SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
+ StoreType, ShuffWide,
+ DAG.getIntPtrConstant(I));
+ SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
+ St->getPointerInfo(), St->isVolatile(),
+ St->isNonTemporal(), St->getAlignment());
+ BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
+ Increment);
+ Chains.push_back(Ch);
+ }
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
+ Chains.size());
+ }
+
+ if (!ISD::isNormalStore(St))
return SDValue();
+ // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
+ // ARM stores of arguments in the same cache line.
if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
- StVal.getNode()->hasOneUse() && !St->isVolatile()) {
+ StVal.getNode()->hasOneUse()) {
SelectionDAG &DAG = DCI.DAG;
DebugLoc DL = St->getDebugLoc();
SDValue BasePtr = St->getBasePtr();
@@ -6996,6 +7462,8 @@ static SDValue PerformSTORECombine(SDNode *N,
StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
+ // Bitcast an i64 store extracted from a vector to f64.
+ // Otherwise, the i64 value will be legalized to a pair of i32 values.
SelectionDAG &DAG = DCI.DAG;
DebugLoc dl = StVal.getDebugLoc();
SDValue IntVec = StVal.getOperand(0);
@@ -7177,7 +7645,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
if (isIntrinsic) {
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IntNo) {
- default: assert(0 && "unexpected intrinsic for Neon base update");
+ default: llvm_unreachable("unexpected intrinsic for Neon base update");
case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD;
NumVecs = 1; break;
case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD;
@@ -7210,7 +7678,7 @@ static SDValue CombineBaseUpdate(SDNode *N,
} else {
isLaneOp = true;
switch (N->getOpcode()) {
- default: assert(0 && "unexpected opcode for Neon base update");
+ default: llvm_unreachable("unexpected opcode for Neon base update");
case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
@@ -7703,6 +8171,18 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) {
EVT VT = N->getValueType(0);
+ if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
+ // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
+ // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
+ SDValue N1 = N->getOperand(1);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
+ SDValue N0 = N->getOperand(0);
+ if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
+ DAG.MaskedValueIsZero(N0.getOperand(0),
+ APInt::getHighBitsSet(32, 16)))
+ return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1);
+ }
+ }
// Nothing to be done for scalar shifts.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -7824,7 +8304,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
// will return -0, so vmin can only be used for unsafe math or if one of
// the operands is known to be nonzero.
if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
- !UnsafeFPMath &&
+ !DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
@@ -7846,7 +8326,7 @@ static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
// will return +0, so vmax can only be used for unsafe math or if one of
// the operands is known to be nonzero.
if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
- !UnsafeFPMath &&
+ !DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
@@ -7906,8 +8386,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
if (Res.getNode()) {
APInt KnownZero, KnownOne;
- APInt Mask = APInt::getAllOnesValue(VT.getScalarType().getSizeInBits());
- DAG.ComputeMaskedBits(SDValue(N,0), Mask, KnownZero, KnownOne);
+ DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne);
// Capture demanded bits information that would be otherwise lost.
if (KnownZero == 0xfffffffe)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
@@ -7931,7 +8410,8 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SUB: return PerformSUBCombine(N, DCI);
case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
- case ISD::AND: return PerformANDCombine(N, DCI);
+ case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget);
+ case ISD::AND: return PerformANDCombine(N, DCI, Subtarget);
case ARMISD::BFI: return PerformBFICombine(N, DCI);
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
@@ -8001,6 +8481,41 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
}
}
+static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
+ unsigned AlignCheck) {
+ return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
+ (DstAlign == 0 || DstAlign % AlignCheck == 0));
+}
+
+EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool IsZeroVal,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const {
+ const Function *F = MF.getFunction();
+
+ // See if we can use NEON instructions for this...
+ if (IsZeroVal &&
+ !F->hasFnAttr(Attribute::NoImplicitFloat) &&
+ Subtarget->hasNEON()) {
+ if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) {
+ return MVT::v4i32;
+ } else if (memOpAlign(SrcAlign, DstAlign, 8) && Size >= 8) {
+ return MVT::v2i32;
+ }
+ }
+
+ // Lowering to i32/i16 if the size permits.
+ if (Size >= 4) {
+ return MVT::i32;
+ } else if (Size >= 2) {
+ return MVT::i16;
+ }
+
+ // Let the target-independent logic figure it out.
+ return MVT::Other;
+}
+
static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
if (V < 0)
return false;
@@ -8188,7 +8703,6 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
if (Scale & 1) return false;
return isPowerOf2_32(Scale);
}
- break;
}
return true;
}
@@ -8198,10 +8712,12 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
/// a register against the immediate without having to materialize the
/// immediate into a register.
bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ // Thumb2 and ARM modes can use cmn for negative immediates.
if (!Subtarget->isThumb())
- return ARM_AM::getSOImmVal(Imm) != -1;
+ return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1;
if (Subtarget->isThumb2())
- return ARM_AM::getT2SOImmVal(Imm) != -1;
+ return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1;
+ // Thumb1 doesn't have cmn, and only 8-bit immediates.
return Imm >= 0 && Imm <= 255;
}
@@ -8388,22 +8904,20 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
}
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
+ KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
- DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
+ DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
if (KnownZero == 0 && KnownOne == 0) return;
APInt KnownZeroRHS, KnownOneRHS;
- DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
- KnownZeroRHS, KnownOneRHS, Depth+1);
+ DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
KnownZero &= KnownZeroRHS;
KnownOne &= KnownOneRHS;
return;
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 5da9b27..352d980 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -15,6 +15,7 @@
#ifndef ARMISELLOWERING_H
#define ARMISELLOWERING_H
+#include "ARM.h"
#include "ARMSubtarget.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -56,7 +57,11 @@ namespace llvm {
CMPFP, // ARM VFP compare instruction, sets FPSCR.
CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
FMSTAT, // ARM fmstat instruction.
+
CMOV, // ARM conditional move instructions.
+ CAND, // ARM conditional and instructions.
+ COR, // ARM conditional or instructions.
+ CXOR, // ARM conditional xor instructions.
BCC_i64,
@@ -81,7 +86,6 @@ namespace llvm {
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
- EH_SJLJ_DISPATCHSETUP, // SjLj exception handling dispatch setup.
TC_RETURN, // Tail call return pseudo.
@@ -146,6 +150,9 @@ namespace llvm {
VMOVIMM,
VMVNIMM,
+ // Vector move f32 immediate:
+ VMOVFPIMM,
+
// Vector duplicate:
VDUP,
VDUPLANE,
@@ -266,9 +273,14 @@ namespace llvm {
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type.
- /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
+ virtual EVT getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool IsZeroVal,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const;
+
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
@@ -303,7 +315,6 @@ namespace llvm {
SelectionDAG &DAG) const;
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -338,7 +349,7 @@ namespace llvm {
/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
- virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
+ virtual const TargetRegisterClass *getRegClassFor(EVT VT) const;
/// getMaximalGlobalOffset - Returns the maximal possible offset which can
/// be used for loads / stores from the global.
@@ -402,7 +413,6 @@ namespace llvm {
ISD::ArgFlagsTy Flags) const;
SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerEH_SJLJ_DISPATCHSETUP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -424,6 +434,8 @@ namespace llvm {
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
+ const ARMSubtarget *ST) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const;
@@ -452,7 +464,7 @@ namespace llvm {
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -481,7 +493,7 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
- virtual bool isUsedByReturnOnly(SDNode *N) const;
+ virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
@@ -512,9 +524,6 @@ namespace llvm {
bool signExtend,
ARMCC::CondCodes Cond) const;
- void EmitBasePointerRecalculation(MachineInstr *MI, MachineBasicBlock *MBB,
- MachineBasicBlock *DispatchBB) const;
-
void SetupEntryBlockForSjLj(MachineInstr *MI,
MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB, int FI) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index 7cbc911..1d38bcf 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -1,4 +1,4 @@
-//===- ARMInstrFormats.td - ARM Instruction Formats ----------*- tablegen -*-=//
+//===-- ARMInstrFormats.td - ARM Instruction Formats -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -174,7 +174,7 @@ def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> {
// ARM special operands for disassembly only.
//
-def SetEndAsmOperand : AsmOperandClass {
+def SetEndAsmOperand : ImmAsmOperand {
let Name = "SetEndImm";
let ParserMethod = "parseSetEndImm";
}
@@ -201,21 +201,29 @@ def msr_mask : Operand<i32> {
// 16 imm6<5:4> = '01', 16 - <imm> is encoded in imm6<3:0>
// 32 imm6<5> = '1', 32 - <imm> is encoded in imm6<4:0>
// 64 64 - <imm> is encoded in imm6<5:0>
+def shr_imm8_asm_operand : ImmAsmOperand { let Name = "ShrImm8"; }
def shr_imm8 : Operand<i32> {
let EncoderMethod = "getShiftRight8Imm";
let DecoderMethod = "DecodeShiftRight8Imm";
+ let ParserMatchClass = shr_imm8_asm_operand;
}
+def shr_imm16_asm_operand : ImmAsmOperand { let Name = "ShrImm16"; }
def shr_imm16 : Operand<i32> {
let EncoderMethod = "getShiftRight16Imm";
let DecoderMethod = "DecodeShiftRight16Imm";
+ let ParserMatchClass = shr_imm16_asm_operand;
}
+def shr_imm32_asm_operand : ImmAsmOperand { let Name = "ShrImm32"; }
def shr_imm32 : Operand<i32> {
let EncoderMethod = "getShiftRight32Imm";
let DecoderMethod = "DecodeShiftRight32Imm";
+ let ParserMatchClass = shr_imm32_asm_operand;
}
+def shr_imm64_asm_operand : ImmAsmOperand { let Name = "ShrImm64"; }
def shr_imm64 : Operand<i32> {
let EncoderMethod = "getShiftRight64Imm";
let DecoderMethod = "DecodeShiftRight64Imm";
+ let ParserMatchClass = shr_imm64_asm_operand;
}
//===----------------------------------------------------------------------===//
@@ -231,6 +239,14 @@ class VFP2InstAlias<string Asm, dag Result, bit Emit = 0b1>
: InstAlias<Asm, Result, Emit>, Requires<[HasVFP2]>;
class VFP3InstAlias<string Asm, dag Result, bit Emit = 0b1>
: InstAlias<Asm, Result, Emit>, Requires<[HasVFP3]>;
+class NEONInstAlias<string Asm, dag Result, bit Emit = 0b1>
+ : InstAlias<Asm, Result, Emit>, Requires<[HasNEON]>;
+
+
+class VFP2MnemonicAlias<string src, string dst> : MnemonicAlias<src, dst>,
+ Requires<[HasVFP2]>;
+class NEONMnemonicAlias<string src, string dst> : MnemonicAlias<src, dst>,
+ Requires<[HasNEON]>;
//===----------------------------------------------------------------------===//
// ARM Instruction templates.
@@ -274,6 +290,14 @@ class InstTemplate<AddrMode am, int sz, IndexMode im,
class Encoding {
field bits<32> Inst;
+ // Mask of bits that cause an encoding to be UNPREDICTABLE.
+ // If a bit is set, then if the corresponding bit in the
+ // target encoding differs from its value in the "Inst" field,
+ // the instruction is UNPREDICTABLE (SoftFail in abstract parlance).
+ field bits<32> Unpredictable = 0;
+ // SoftFail is the generic name for this field, but we alias it so
+ // as to make it more obvious what it means in ARM-land.
+ field bits<32> SoftFail = Unpredictable;
}
class InstARM<AddrMode am, int sz, IndexMode im,
@@ -290,6 +314,32 @@ class InstThumb<AddrMode am, int sz, IndexMode im,
let DecoderNamespace = "Thumb";
}
+// Pseudo-instructions for alternate assembly syntax (never used by codegen).
+// These are aliases that require C++ handling to convert to the target
+// instruction, while InstAliases can be handled directly by tblgen.
+class AsmPseudoInst<string asm, dag iops>
+ : InstTemplate<AddrModeNone, 0, IndexModeNone, Pseudo, GenericDomain,
+ "", NoItinerary> {
+ let OutOperandList = (outs);
+ let InOperandList = iops;
+ let Pattern = [];
+ let isCodeGenOnly = 0; // So we get asm matcher for it.
+ let AsmString = asm;
+ let isPseudo = 1;
+}
+
+class ARMAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
+ Requires<[IsARM]>;
+class tAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
+ Requires<[IsThumb]>;
+class t2AsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
+ Requires<[IsThumb2]>;
+class VFP2AsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
+ Requires<[HasVFP2]>;
+class NEONAsmPseudo<string asm, dag iops> : AsmPseudoInst<asm, iops>,
+ Requires<[HasNEON]>;
+
+// Pseudo instructions for the code generator.
class PseudoInst<dag oops, dag iops, InstrItinClass itin, list<dag> pattern>
: InstTemplate<AddrModeNone, 0, IndexModeNone, Pseudo,
GenericDomain, "", itin> {
@@ -481,6 +531,8 @@ class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
let Inst{15-12} = Rt;
let Inst{11-4} = 0b00001001;
let Inst{3-0} = Rt2;
+
+ let DecoderMethod = "DecodeSwap";
}
// addrmode1 instructions
@@ -792,7 +844,7 @@ class AMiscA1I<bits<8> opcod, bits<4> opc7_4, dag oops, dag iops,
}
// PKH instructions
-def PKHLSLAsmOperand : AsmOperandClass {
+def PKHLSLAsmOperand : ImmAsmOperand {
let Name = "PKHLSLImm";
let ParserMethod = "parsePKHLSLImm";
}
@@ -1550,8 +1602,11 @@ class AVConv1XI<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
dag oops, dag iops, InstrItinClass itin, string opc, string asm,
list<dag> pattern>
: AVConv1I<op1, op2, op3, op4, oops, iops, itin, opc, asm, pattern> {
+ bits<5> fbits;
// size (fixed-point number): sx == 0 ? 16 : 32
let Inst{7} = op5; // sx
+ let Inst{5} = fbits{0};
+ let Inst{3-0} = fbits{4-1};
}
// VFP conversion instructions, if no NEON
@@ -1963,3 +2018,54 @@ class NVDupLane<bits<4> op19_16, bit op6, dag oops, dag iops,
class NEONFPPat<dag pattern, dag result> : Pat<pattern, result> {
list<Predicate> Predicates = [HasNEON,UseNEONForFP];
}
+
+// VFP/NEON Instruction aliases for type suffices.
+class VFPDataTypeInstAlias<string opc, string dt, string asm, dag Result> :
+ InstAlias<!strconcat(opc, dt, "\t", asm), Result>, Requires<[HasVFP2]>;
+
+multiclass VFPDTAnyInstAlias<string opc, string asm, dag Result> {
+ def : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+}
+
+multiclass NEONDTAnyInstAlias<string opc, string asm, dag Result> {
+ let Predicates = [HasNEON] in {
+ def : VFPDataTypeInstAlias<opc, ".8", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".16", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".32", asm, Result>;
+ def : VFPDataTypeInstAlias<opc, ".64", asm, Result>;
+}
+}
+
+// The same alias classes using AsmPseudo instead, for the more complex
+// stuff in NEON that InstAlias can't quite handle.
+// Note that we can't use anonymous defm references here like we can
+// above, as we care about the ultimate instruction enum names generated, unlike
+// for instalias defs.
+class NEONDataTypeAsmPseudoInst<string opc, string dt, string asm, dag iops> :
+ AsmPseudoInst<!strconcat(opc, dt, "\t", asm), iops>, Requires<[HasNEON]>;
+
+// Data type suffix token aliases. Implements Table A7-3 in the ARM ARM.
+def : TokenAlias<".s8", ".i8">;
+def : TokenAlias<".u8", ".i8">;
+def : TokenAlias<".s16", ".i16">;
+def : TokenAlias<".u16", ".i16">;
+def : TokenAlias<".s32", ".i32">;
+def : TokenAlias<".u32", ".i32">;
+def : TokenAlias<".s64", ".i64">;
+def : TokenAlias<".u64", ".i64">;
+
+def : TokenAlias<".i8", ".8">;
+def : TokenAlias<".i16", ".16">;
+def : TokenAlias<".i32", ".32">;
+def : TokenAlias<".i64", ".64">;
+
+def : TokenAlias<".p8", ".8">;
+def : TokenAlias<".p16", ".16">;
+
+def : TokenAlias<".f32", ".32">;
+def : TokenAlias<".f64", ".64">;
+def : TokenAlias<".f", ".f32">;
+def : TokenAlias<".d", ".f64">;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index 48da03f..b8f607e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
+//===-- ARMInstrInfo.cpp - ARM Instruction Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,12 +21,29 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCInst.h"
using namespace llvm;
ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
+/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+void ARMInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
+ if (hasNOP()) {
+ NopInst.setOpcode(ARM::NOP);
+ NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ NopInst.addOperand(MCOperand::CreateReg(0));
+ } else {
+ NopInst.setOpcode(ARM::MOVr);
+ NopInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ NopInst.addOperand(MCOperand::CreateReg(ARM::R0));
+ NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ NopInst.addOperand(MCOperand::CreateReg(0));
+ NopInst.addOperand(MCOperand::CreateReg(0));
+ }
+}
+
unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
switch (Opc) {
default: break;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
index f2c7bdc..5d3e059 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -1,4 +1,4 @@
-//===- ARMInstrInfo.h - ARM Instruction Information -------------*- C++ -*-===//
+//===-- ARMInstrInfo.h - ARM Instruction Information ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,11 +14,10 @@
#ifndef ARMINSTRUCTIONINFO_H
#define ARMINSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
+#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMRegisterInfo.h"
#include "ARMSubtarget.h"
-#include "ARM.h"
namespace llvm {
class ARMSubtarget;
@@ -28,6 +27,9 @@ class ARMInstrInfo : public ARMBaseInstrInfo {
public:
explicit ARMInstrInfo(const ARMSubtarget &STI);
+ /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+ void getNoopForMachoTarget(MCInst &NopInst) const;
+
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index 2cf0f09..3caaa23 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -58,8 +58,6 @@ def SDT_ARMEH_SJLJ_Setjmp : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisPtrTy<1>,
SDTCisInt<2>]>;
def SDT_ARMEH_SJLJ_Longjmp: SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
-def SDT_ARMEH_SJLJ_DispatchSetup: SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-
def SDT_ARMMEMBARRIER : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMPREFETCH : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisSameAs<1, 2>,
@@ -143,9 +141,6 @@ def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP",
SDT_ARMEH_SJLJ_Setjmp, [SDNPHasChain]>;
def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP",
SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>;
-def ARMeh_sjlj_dispatchsetup: SDNode<"ARMISD::EH_SJLJ_DISPATCHSETUP",
- SDT_ARMEH_SJLJ_DispatchSetup, [SDNPHasChain]>;
-
def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER,
[SDNPHasChain]>;
@@ -184,6 +179,8 @@ def HasVFP2 : Predicate<"Subtarget->hasVFP2()">,
AssemblerPredicate<"FeatureVFP2">;
def HasVFP3 : Predicate<"Subtarget->hasVFP3()">,
AssemblerPredicate<"FeatureVFP3">;
+def HasVFP4 : Predicate<"Subtarget->hasVFP4()">,
+ AssemblerPredicate<"FeatureVFP4">;
def HasNEON : Predicate<"Subtarget->hasNEON()">,
AssemblerPredicate<"FeatureNEON">;
def HasFP16 : Predicate<"Subtarget->hasFP16()">,
@@ -211,16 +208,20 @@ def IsARClass : Predicate<"!Subtarget->isMClass()">,
AssemblerPredicate<"!FeatureMClass">;
def IsARM : Predicate<"!Subtarget->isThumb()">,
AssemblerPredicate<"!ModeThumb">;
-def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
-def IsNotDarwin : Predicate<"!Subtarget->isTargetDarwin()">;
-def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">,
- AssemblerPredicate<"ModeNaCl">;
+def IsIOS : Predicate<"Subtarget->isTargetIOS()">;
+def IsNotIOS : Predicate<"!Subtarget->isTargetIOS()">;
+def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
// FIXME: Eventually this will be just "hasV6T2Ops".
def UseMovt : Predicate<"Subtarget->useMovt()">;
def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
+// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
+// But only select them if more precision in FP computation is allowed.
+def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision">;
+def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4()">;
+
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
@@ -244,25 +245,28 @@ def so_imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
-/// imm1_15 predicate - True if the 32-bit immediate is in the range [1,15].
-def imm1_15 : ImmLeaf<i32, [{
- return (int32_t)Imm >= 1 && (int32_t)Imm < 16;
-}]>;
-
/// imm16_31 predicate - True if the 32-bit immediate is in the range [16,31].
def imm16_31 : ImmLeaf<i32, [{
return (int32_t)Imm >= 16 && (int32_t)Imm < 32;
}]>;
-def so_imm_neg :
- PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(-(uint32_t)N->getZExtValue()) != -1;
- }], so_imm_neg_XFORM>;
+def so_imm_neg_asmoperand : AsmOperandClass { let Name = "ARMSOImmNeg"; }
+def so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
+ int64_t Value = -(int)N->getZExtValue();
+ return Value && ARM_AM::getSOImmVal(Value) != -1;
+ }], so_imm_neg_XFORM> {
+ let ParserMatchClass = so_imm_neg_asmoperand;
+}
-def so_imm_not :
- PatLeaf<(imm), [{
+// Note: this pattern doesn't require an encoder method and such, as it's
+// only used on aliases (Pat<> and InstAlias<>). The actual encoding
+// is handled by the destination instructions, which use so_imm.
+def so_imm_not_asmoperand : AsmOperandClass { let Name = "ARMSOImmNot"; }
+def so_imm_not : Operand<i32>, PatLeaf<(imm), [{
return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1;
- }], so_imm_not_XFORM>;
+ }], so_imm_not_XFORM> {
+ let ParserMatchClass = so_imm_not_asmoperand;
+}
// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
def sext_16_node : PatLeaf<(i32 GPR:$a), [{
@@ -279,14 +283,6 @@ def lo16AllZero : PatLeaf<(i32 imm), [{
return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
}], hi16>;
-/// imm0_65535 - An immediate is in the range [0.65535].
-def Imm0_65535AsmOperand: AsmOperandClass { let Name = "Imm0_65535"; }
-def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
- return Imm >= 0 && Imm < 65536;
-}]> {
- let ParserMatchClass = Imm0_65535AsmOperand;
-}
-
class BinOpWithFlagFrag<dag res> :
PatFrag<(ops node:$LHS, node:$RHS, node:$FLAG), res>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
@@ -321,6 +317,9 @@ def fsub_mlx : PatFrag<(ops node:$lhs, node:$rhs),(fsub node:$lhs, node:$rhs),[{
// Operand Definitions.
//
+// Immediate operands with a shared generic asm render method.
+class ImmAsmOperand : AsmOperandClass { let RenderMethod = "addImmOperands"; }
+
// Branch target.
// FIXME: rename brtarget to t2_brtarget
def brtarget : Operand<OtherVT> {
@@ -352,13 +351,11 @@ def bltarget : Operand<i32> {
// Call target for ARM. Handles conditional/unconditional
// FIXME: rename bl_target to t2_bltarget?
def bl_target : Operand<i32> {
- // Encoded the same as branch targets.
- let EncoderMethod = "getARMBranchTargetOpValue";
+ let EncoderMethod = "getARMBLTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
def blx_target : Operand<i32> {
- // Encoded the same as branch targets.
let EncoderMethod = "getARMBLXTargetOpValue";
let OperandType = "OPERAND_PCREL";
}
@@ -475,6 +472,7 @@ def shift_so_reg_reg : Operand<i32>, // reg reg imm
let EncoderMethod = "getSORegRegOpValue";
let PrintMethod = "printSORegRegOperand";
let DecoderMethod = "DecodeSORegRegOperand";
+ let ParserMatchClass = ShiftedRegAsmOperand;
let MIOperandInfo = (ops GPR, GPR, i32imm);
}
@@ -485,13 +483,14 @@ def shift_so_reg_imm : Operand<i32>, // reg reg imm
let EncoderMethod = "getSORegImmOpValue";
let PrintMethod = "printSORegImmOperand";
let DecoderMethod = "DecodeSORegImmOperand";
+ let ParserMatchClass = ShiftedImmAsmOperand;
let MIOperandInfo = (ops GPR, i32imm);
}
// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits.
-def SOImmAsmOperand: AsmOperandClass { let Name = "ARMSOImm"; }
+def SOImmAsmOperand: ImmAsmOperand { let Name = "ARMSOImm"; }
def so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getSOImmVal(Imm) != -1;
}]> {
@@ -515,16 +514,60 @@ def arm_i32imm : PatLeaf<(imm), [{
return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
}]>;
+/// imm0_1 predicate - Immediate in the range [0,1].
+def Imm0_1AsmOperand: ImmAsmOperand { let Name = "Imm0_1"; }
+def imm0_1 : Operand<i32> { let ParserMatchClass = Imm0_1AsmOperand; }
+
+/// imm0_3 predicate - Immediate in the range [0,3].
+def Imm0_3AsmOperand: ImmAsmOperand { let Name = "Imm0_3"; }
+def imm0_3 : Operand<i32> { let ParserMatchClass = Imm0_3AsmOperand; }
+
/// imm0_7 predicate - Immediate in the range [0,7].
-def Imm0_7AsmOperand: AsmOperandClass { let Name = "Imm0_7"; }
+def Imm0_7AsmOperand: ImmAsmOperand { let Name = "Imm0_7"; }
def imm0_7 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 8;
}]> {
let ParserMatchClass = Imm0_7AsmOperand;
}
+/// imm8 predicate - Immediate is exactly 8.
+def Imm8AsmOperand: ImmAsmOperand { let Name = "Imm8"; }
+def imm8 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 8; }]> {
+ let ParserMatchClass = Imm8AsmOperand;
+}
+
+/// imm16 predicate - Immediate is exactly 16.
+def Imm16AsmOperand: ImmAsmOperand { let Name = "Imm16"; }
+def imm16 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 16; }]> {
+ let ParserMatchClass = Imm16AsmOperand;
+}
+
+/// imm32 predicate - Immediate is exactly 32.
+def Imm32AsmOperand: ImmAsmOperand { let Name = "Imm32"; }
+def imm32 : Operand<i32>, ImmLeaf<i32, [{ return Imm == 32; }]> {
+ let ParserMatchClass = Imm32AsmOperand;
+}
+
+/// imm1_7 predicate - Immediate in the range [1,7].
+def Imm1_7AsmOperand: ImmAsmOperand { let Name = "Imm1_7"; }
+def imm1_7 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 8; }]> {
+ let ParserMatchClass = Imm1_7AsmOperand;
+}
+
+/// imm1_15 predicate - Immediate in the range [1,15].
+def Imm1_15AsmOperand: ImmAsmOperand { let Name = "Imm1_15"; }
+def imm1_15 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 16; }]> {
+ let ParserMatchClass = Imm1_15AsmOperand;
+}
+
+/// imm1_31 predicate - Immediate in the range [1,31].
+def Imm1_31AsmOperand: ImmAsmOperand { let Name = "Imm1_31"; }
+def imm1_31 : Operand<i32>, ImmLeaf<i32, [{ return Imm > 0 && Imm < 32; }]> {
+ let ParserMatchClass = Imm1_31AsmOperand;
+}
+
/// imm0_15 predicate - Immediate in the range [0,15].
-def Imm0_15AsmOperand: AsmOperandClass { let Name = "Imm0_15"; }
+def Imm0_15AsmOperand: ImmAsmOperand { let Name = "Imm0_15"; }
def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 16;
}]> {
@@ -532,33 +575,57 @@ def imm0_15 : Operand<i32>, ImmLeaf<i32, [{
}
/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
-def Imm0_31AsmOperand: AsmOperandClass { let Name = "Imm0_31"; }
+def Imm0_31AsmOperand: ImmAsmOperand { let Name = "Imm0_31"; }
def imm0_31 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 32;
}]> {
let ParserMatchClass = Imm0_31AsmOperand;
}
+/// imm0_32 predicate - True if the 32-bit immediate is in the range [0,32].
+def Imm0_32AsmOperand: ImmAsmOperand { let Name = "Imm0_32"; }
+def imm0_32 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm >= 0 && Imm < 32;
+}]> {
+ let ParserMatchClass = Imm0_32AsmOperand;
+}
+
+/// imm0_63 predicate - True if the 32-bit immediate is in the range [0,63].
+def Imm0_63AsmOperand: ImmAsmOperand { let Name = "Imm0_63"; }
+def imm0_63 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm >= 0 && Imm < 64;
+}]> {
+ let ParserMatchClass = Imm0_63AsmOperand;
+}
+
/// imm0_255 predicate - Immediate in the range [0,255].
-def Imm0_255AsmOperand : AsmOperandClass { let Name = "Imm0_255"; }
+def Imm0_255AsmOperand : ImmAsmOperand { let Name = "Imm0_255"; }
def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
let ParserMatchClass = Imm0_255AsmOperand;
}
+/// imm0_65535 - An immediate is in the range [0.65535].
+def Imm0_65535AsmOperand: ImmAsmOperand { let Name = "Imm0_65535"; }
+def imm0_65535 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm >= 0 && Imm < 65536;
+}]> {
+ let ParserMatchClass = Imm0_65535AsmOperand;
+}
+
// imm0_65535_expr - For movt/movw - 16-bit immediate that can also reference
// a relocatable expression.
//
// FIXME: This really needs a Thumb version separate from the ARM version.
// While the range is the same, and can thus use the same match class,
// the encoding is different so it should have a different encoder method.
-def Imm0_65535ExprAsmOperand: AsmOperandClass { let Name = "Imm0_65535Expr"; }
+def Imm0_65535ExprAsmOperand: ImmAsmOperand { let Name = "Imm0_65535Expr"; }
def imm0_65535_expr : Operand<i32> {
let EncoderMethod = "getHiLo16ImmOpValue";
let ParserMatchClass = Imm0_65535ExprAsmOperand;
}
/// imm24b - True if the 32-bit immediate is encodable in 24 bits.
-def Imm24bitAsmOperand: AsmOperandClass { let Name = "Imm24bit"; }
+def Imm24bitAsmOperand: ImmAsmOperand { let Name = "Imm24bit"; }
def imm24b : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm <= 0xffffff;
}]> {
@@ -572,6 +639,7 @@ def BitfieldAsmOperand : AsmOperandClass {
let Name = "Bitfield";
let ParserMethod = "parseBitfield";
}
+
def bf_inv_mask_imm : Operand<i32>,
PatLeaf<(imm), [{
return ARM::isBitFieldInvertedMask(N->getZExtValue());
@@ -670,7 +738,7 @@ def postidx_reg : Operand<i32> {
let DecoderMethod = "DecodePostIdxReg";
let PrintMethod = "printPostIdxRegOperand";
let ParserMatchClass = PostIdxRegAsmOperand;
- let MIOperandInfo = (ops GPR, i32imm);
+ let MIOperandInfo = (ops GPRnopc, i32imm);
}
@@ -699,7 +767,7 @@ def am2offset_reg : Operand<i32>,
let PrintMethod = "printAddrMode2OffsetOperand";
// When using this for assembly, it's always as a post-index offset.
let ParserMatchClass = PostIdxRegShiftedAsmOperand;
- let MIOperandInfo = (ops GPR, i32imm);
+ let MIOperandInfo = (ops GPRnopc, i32imm);
}
// FIXME: am2offset_imm should only need the immediate, not the GPR. Having
@@ -711,7 +779,7 @@ def am2offset_imm : Operand<i32>,
let EncoderMethod = "getAddrMode2OffsetOpValue";
let PrintMethod = "printAddrMode2OffsetOperand";
let ParserMatchClass = AM2OffsetImmAsmOperand;
- let MIOperandInfo = (ops GPR, i32imm);
+ let MIOperandInfo = (ops GPRnopc, i32imm);
}
@@ -799,6 +867,9 @@ def addrmode6dup : Operand<i32>,
let PrintMethod = "printAddrMode6Operand";
let MIOperandInfo = (ops GPR:$addr, i32imm);
let EncoderMethod = "getAddrMode6DupAddressOpValue";
+ // FIXME: This is close, but not quite right. The alignment specifier is
+ // different.
+ let ParserMatchClass = AddrMode6AsmOperand;
}
// addrmodepc := pc + reg
@@ -1041,69 +1112,58 @@ multiclass AsI1_rbin_irs<bits<4> opcod, string opc,
}
-/// AsI1_rbin_s_is - Same as AsI1_rbin_s_is except it sets 's' bit by default.
+/// AsI1_bin_s_irs - Same as AsI1_bin_irs except it sets the 's' bit by default.
///
/// These opcodes will be converted to the real non-S opcodes by
-/// AdjustInstrPostInstrSelection after giving then an optional CPSR operand.
-let hasPostISelHook = 1, isCodeGenOnly = 1, isPseudo = 1, Defs = [CPSR] in {
-multiclass AsI1_rbin_s_is<bits<4> opcod, string opc,
- InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
- iii, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>;
-
- def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
- iir, opc, "\t$Rd, $Rn, $Rm",
- [/* pattern left blank */]>;
-
- def rsi : AsI1<opcod, (outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
- iis, opc, "\t$Rd, $Rn, $shift",
- [(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift, GPR:$Rn))]>;
-
- def rsr : AsI1<opcod, (outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
- iis, opc, "\t$Rd, $Rn, $shift",
- [(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift, GPR:$Rn))]> {
- bits<4> Rd;
- bits<4> Rn;
- bits<12> shift;
- let Inst{25} = 0;
- let Inst{19-16} = Rn;
- let Inst{15-12} = Rd;
- let Inst{11-8} = shift{11-8};
- let Inst{7} = 0;
- let Inst{6-5} = shift{6-5};
- let Inst{4} = 1;
- let Inst{3-0} = shift{3-0};
+/// AdjustInstrPostInstrSelection after giving them an optional CPSR operand.
+let hasPostISelHook = 1, Defs = [CPSR] in {
+multiclass AsI1_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
+ InstrItinClass iis, PatFrag opnode,
+ bit Commutable = 0> {
+ def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
+ 4, iii,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>;
+
+ def rr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, pred:$p),
+ 4, iir,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm))]> {
+ let isCommutable = Commutable;
}
+ def rsi : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_reg_imm:$shift, pred:$p),
+ 4, iis,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn,
+ so_reg_imm:$shift))]>;
+
+ def rsr : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_reg_reg:$shift, pred:$p),
+ 4, iis,
+ [(set GPR:$Rd, CPSR, (opnode GPR:$Rn,
+ so_reg_reg:$shift))]>;
}
}
-/// AsI1_bin_s_irs - Same as AsI1_bin_irs except it sets the 's' bit by default.
-///
-/// These opcodes will be converted to the real non-S opcodes by
-/// AdjustInstrPostInstrSelection after giving then an optional CPSR operand.
-let hasPostISelHook = 1, isCodeGenOnly = 1, isPseudo = 1, Defs = [CPSR] in {
-multiclass AsI1_bin_s_irs<bits<4> opcod, string opc,
- InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> {
- def ri : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm), DPFrm,
- iii, opc, "\t$Rd, $Rn, $imm",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_imm:$imm))]>;
- def rr : AsI1<opcod, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm,
- iir, opc, "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, GPR:$Rm))]>;
- def rsi : AsI1<opcod, (outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm,
- iis, opc, "\t$Rd, $Rn, $shift",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_reg_imm:$shift))]>;
+/// AsI1_rbin_s_is - Same as AsI1_bin_s_irs, except selection DAG
+/// operands are reversed.
+let hasPostISelHook = 1, Defs = [CPSR] in {
+multiclass AsI1_rbin_s_is<InstrItinClass iii, InstrItinClass iir,
+ InstrItinClass iis, PatFrag opnode,
+ bit Commutable = 0> {
+ def ri : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$Rn, so_imm:$imm, pred:$p),
+ 4, iii,
+ [(set GPR:$Rd, CPSR, (opnode so_imm:$imm, GPR:$Rn))]>;
- def rsr : AsI1<opcod, (outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm,
- iis, opc, "\t$Rd, $Rn, $shift",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_reg_reg:$shift))]>;
+ def rsi : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_reg_imm:$shift, pred:$p),
+ 4, iis,
+ [(set GPR:$Rd, CPSR, (opnode so_reg_imm:$shift,
+ GPR:$Rn))]>;
+
+ def rsr : ARMPseudoInst<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_reg_reg:$shift, pred:$p),
+ 4, iis,
+ [(set GPR:$Rd, CPSR, (opnode so_reg_reg:$shift,
+ GPR:$Rn))]>;
}
}
@@ -1272,10 +1332,10 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
}
- def rsr : AsI1<opcod, (outs GPR:$Rd),
- (ins GPR:$Rn, so_reg_reg:$shift),
+ def rsr : AsI1<opcod, (outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, so_reg_reg:$shift),
DPSoRegRegFrm, IIC_iALUsr, opc, "\t$Rd, $Rn, $shift",
- [(set GPR:$Rd, CPSR, (opnode GPR:$Rn, so_reg_reg:$shift, CPSR))]>,
+ [(set GPRnopc:$Rd, CPSR, (opnode GPRnopc:$Rn, so_reg_reg:$shift, CPSR))]>,
Requires<[IsARM]> {
bits<4> Rd;
bits<4> Rn;
@@ -1309,7 +1369,7 @@ multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
cc_out:$s)>,
Requires<[IsARM]>;
def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
- (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPR:$Rdn, GPR:$Rdn,
+ (!cast<Instruction>(!strconcat(baseOpc, "rsr")) GPRnopc:$Rdn, GPRnopc:$Rdn,
so_reg_reg:$shift, pred:$p,
cc_out:$s)>,
Requires<[IsARM]>;
@@ -1550,7 +1610,7 @@ PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
}
// Atomic pseudo-insts which will be lowered to ldrexd/strexd loops.
-// (These psuedos use a hand-written selection code).
+// (These pseudos use a hand-written selection code).
let usesCustomInserter = 1, Defs = [CPSR], mayLoad = 1, mayStore = 1 in {
def ATOMOR6432 : PseudoInst<(outs GPR:$dst1, GPR:$dst2),
(ins GPR:$addr, GPR:$src1, GPR:$src2),
@@ -1652,7 +1712,7 @@ class CPS<dag iops, string asm_ops>
let Inst{27-20} = 0b00010000;
let Inst{19-18} = imod;
let Inst{17} = M; // Enabled if mode is set;
- let Inst{16} = 0;
+ let Inst{16-9} = 0b00000000;
let Inst{8-6} = iflags;
let Inst{5} = 0;
let Inst{4-0} = mode;
@@ -1839,20 +1899,17 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
}
}
-// All calls clobber the non-callee saved registers. SP is marked as
-// a use to prevent stack-pointer assignments that appear immediately
-// before calls from potentially appearing dead.
+// SP is marked as a use to prevent stack-pointer assignments that appear
+// immediately before calls from potentially appearing dead.
let isCall = 1,
- // On non-Darwin platforms R9 is callee-saved.
// FIXME: Do we really need a non-predicated version? If so, it should
// at least be a pseudo instruction expanding to the predicated version
// at MC lowering time.
- Defs = [R0, R1, R2, R3, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
- Uses = [SP] in {
+ Defs = [LR], Uses = [SP] in {
def BL : ABXI<0b1011, (outs), (ins bl_target:$func, variable_ops),
IIC_Br, "bl\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsARM, IsNotDarwin]> {
+ Requires<[IsARM]> {
let Inst{31-28} = 0b1110;
bits<24> func;
let Inst{23-0} = func;
@@ -1862,7 +1919,7 @@ let isCall = 1,
def BL_pred : ABI<0b1011, (outs), (ins bl_target:$func, variable_ops),
IIC_Br, "bl", "\t$func",
[(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM, IsNotDarwin]> {
+ Requires<[IsARM]> {
bits<24> func;
let Inst{23-0} = func;
let DecoderMethod = "DecodeBranchImmInstruction";
@@ -1872,7 +1929,7 @@ let isCall = 1,
def BLX : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx\t$func",
[(ARMcall GPR:$func)]>,
- Requires<[IsARM, HasV5T, IsNotDarwin]> {
+ Requires<[IsARM, HasV5T]> {
bits<4> func;
let Inst{31-4} = 0b1110000100101111111111110011;
let Inst{3-0} = func;
@@ -1881,7 +1938,7 @@ let isCall = 1,
def BLX_pred : AI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
IIC_Br, "blx", "\t$func",
[(ARMcall_pred GPR:$func)]>,
- Requires<[IsARM, HasV5T, IsNotDarwin]> {
+ Requires<[IsARM, HasV5T]> {
bits<4> func;
let Inst{27-4} = 0b000100101111111111110011;
let Inst{3-0} = func;
@@ -1891,55 +1948,19 @@ let isCall = 1,
// Note: Restrict $func to the tGPR regclass to prevent it being in LR.
def BX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsNotDarwin]>;
+ Requires<[IsARM, HasV4T]>;
// ARMv4
def BMOVPCRX_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsNotDarwin]>;
-}
-
-let isCall = 1,
- // On Darwin R9 is call-clobbered.
- // R7 is marked as a use to prevent frame-pointer assignments from being
- // moved above / below calls.
- Defs = [R0, R1, R2, R3, R9, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
- Uses = [R7, SP] in {
- def BLr9 : ARMPseudoExpand<(outs), (ins bl_target:$func, variable_ops),
- 4, IIC_Br,
- [(ARMcall tglobaladdr:$func)], (BL bl_target:$func)>,
- Requires<[IsARM, IsDarwin]>;
-
- def BLr9_pred : ARMPseudoExpand<(outs),
- (ins bl_target:$func, pred:$p, variable_ops),
- 4, IIC_Br,
- [(ARMcall_pred tglobaladdr:$func)],
- (BL_pred bl_target:$func, pred:$p)>,
- Requires<[IsARM, IsDarwin]>;
-
- // ARMv5T and above
- def BLXr9 : ARMPseudoExpand<(outs), (ins GPR:$func, variable_ops),
- 4, IIC_Br,
- [(ARMcall GPR:$func)],
- (BLX GPR:$func)>,
- Requires<[IsARM, HasV5T, IsDarwin]>;
-
- def BLXr9_pred: ARMPseudoExpand<(outs), (ins GPR:$func, pred:$p,variable_ops),
- 4, IIC_Br,
- [(ARMcall_pred GPR:$func)],
- (BLX_pred GPR:$func, pred:$p)>,
- Requires<[IsARM, HasV5T, IsDarwin]>;
+ Requires<[IsARM, NoV4T]>;
- // ARMv4T
- // Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
- 8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsDarwin]>;
-
- // ARMv4
- def BMOVPCRXr9_CALL : ARMPseudoInst<(outs), (ins tGPR:$func, variable_ops),
- 8, IIC_Br, [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsDarwin]>;
+ // mov lr, pc; b if callee is marked noreturn to avoid confusing the
+ // return stack predictor.
+ def BMOVPCB_CALL : ARMPseudoInst<(outs),
+ (ins bl_target:$func, variable_ops),
+ 8, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
+ Requires<[IsARM]>;
}
let isBranch = 1, isTerminator = 1 in {
@@ -2006,47 +2027,22 @@ def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
// Tail calls.
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
- // Darwin versions.
- let Defs = [R0, R1, R2, R3, R9, R12, QQQQ0, QQQQ2, QQQQ3, PC],
- Uses = [SP] in {
- def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
- IIC_Br, []>, Requires<[IsDarwin]>;
-
- def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
- IIC_Br, []>, Requires<[IsDarwin]>;
-
- def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst, variable_ops),
- 4, IIC_Br, [],
- (Bcc br_target:$dst, (ops 14, zero_reg))>,
- Requires<[IsARM, IsDarwin]>;
-
- def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
- 4, IIC_Br, [],
- (BX GPR:$dst)>,
- Requires<[IsARM, IsDarwin]>;
-
- }
-
- // Non-Darwin versions (the difference is R9).
- let Defs = [R0, R1, R2, R3, R12, QQQQ0, QQQQ2, QQQQ3, PC],
- Uses = [SP] in {
- def TCRETURNdiND : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
- IIC_Br, []>, Requires<[IsNotDarwin]>;
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
+ def TCRETURNdi : PseudoInst<(outs), (ins i32imm:$dst, variable_ops),
+ IIC_Br, []>;
- def TCRETURNriND : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
- IIC_Br, []>, Requires<[IsNotDarwin]>;
+ def TCRETURNri : PseudoInst<(outs), (ins tcGPR:$dst, variable_ops),
+ IIC_Br, []>;
- def TAILJMPdND : ARMPseudoExpand<(outs), (ins brtarget:$dst, variable_ops),
- 4, IIC_Br, [],
- (Bcc br_target:$dst, (ops 14, zero_reg))>,
- Requires<[IsARM, IsNotDarwin]>;
+ def TAILJMPd : ARMPseudoExpand<(outs), (ins br_target:$dst, variable_ops),
+ 4, IIC_Br, [],
+ (Bcc br_target:$dst, (ops 14, zero_reg))>,
+ Requires<[IsARM]>;
- def TAILJMPrND : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
- 4, IIC_Br, [],
- (BX GPR:$dst)>,
- Requires<[IsARM, IsNotDarwin]>;
- }
+ def TAILJMPr : ARMPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
+ 4, IIC_Br, [],
+ (BX GPR:$dst)>,
+ Requires<[IsARM]>;
}
// Secure Monitor Call is a system instruction.
@@ -2145,7 +2141,7 @@ def RFEIB_UPD : RFEI<1, "rfeib\t$Rn!"> {
}
//===----------------------------------------------------------------------===//
-// Load / store Instructions.
+// Load / Store Instructions.
//
// Load
@@ -2197,9 +2193,10 @@ def LDRD : AI3ld<0b1101, 0, (outs GPR:$Rd, GPR:$dst2),
}
// Indexed loads
-multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
+multiclass AI2_ldridx<bit isByte, string opc,
+ InstrItinClass iii, InstrItinClass iir> {
def _PRE_IMM : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins addrmode_imm12:$addr), IndexModePre, LdFrm, itin,
+ (ins addrmode_imm12:$addr), IndexModePre, LdFrm, iii,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 0;
@@ -2211,7 +2208,7 @@ multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
}
def _PRE_REG : AI2ldstidx<1, isByte, 1, (outs GPR:$Rt, GPR:$Rn_wb),
- (ins ldst_so_reg:$addr), IndexModePre, LdFrm, itin,
+ (ins ldst_so_reg:$addr), IndexModePre, LdFrm, iir,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 1;
@@ -2225,7 +2222,7 @@ multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
def _POST_REG : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_reg:$offset),
- IndexModePost, LdFrm, itin,
+ IndexModePost, LdFrm, iir,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
@@ -2242,7 +2239,7 @@ multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
def _POST_IMM : AI2ldstidx<1, isByte, 0, (outs GPR:$Rt, GPR:$Rn_wb),
(ins addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, LdFrm, itin,
+ IndexModePost, LdFrm, iii,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
@@ -2260,8 +2257,10 @@ multiclass AI2_ldridx<bit isByte, string opc, InstrItinClass itin> {
}
let mayLoad = 1, neverHasSideEffects = 1 in {
-defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_ru>;
-defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_ru>;
+// FIXME: for LDR_PRE_REG etc. the itineray should be either IIC_iLoad_ru or
+// IIC_iLoad_siu depending on whether it the offset register is shifted.
+defm LDR : AI2_ldridx<0, "ldr", IIC_iLoad_iu, IIC_iLoad_ru>;
+defm LDRB : AI2_ldridx<1, "ldrb", IIC_iLoad_bh_iu, IIC_iLoad_bh_ru>;
}
multiclass AI3_ldridx<bits<4> op, string opc, InstrItinClass itin> {
@@ -2416,7 +2415,7 @@ multiclass AI3ldrT<bits<4> op, string opc> {
let Inst{3-0} = offset{3-0};
let AsmMatchConverter = "cvtLdExtTWriteBackImm";
}
- def r : AI3ldstidxT<op, 1, (outs GPR:$Rt, GPR:$base_wb),
+ def r : AI3ldstidxT<op, 1, (outs GPRnopc:$Rt, GPRnopc:$base_wb),
(ins addr_offset_none:$addr, postidx_reg:$Rm),
IndexModePost, LdMiscFrm, IIC_iLoad_bh_ru, opc,
"\t$Rt, $addr, $Rm", "$addr.base = $base_wb", []> {
@@ -2424,8 +2423,10 @@ multiclass AI3ldrT<bits<4> op, string opc> {
let Inst{23} = Rm{4};
let Inst{22} = 0;
let Inst{11-8} = 0;
+ let Unpredictable{11-8} = 0b1111;
let Inst{3-0} = Rm{3-0};
let AsmMatchConverter = "cvtLdExtTWriteBackReg";
+ let DecoderMethod = "DecodeLDR";
}
}
@@ -2451,10 +2452,11 @@ def STRD : AI3str<0b1111, (outs), (ins GPR:$Rt, GPR:$src2, addrmode3:$addr),
}
// Indexed stores
-multiclass AI2_stridx<bit isByte, string opc, InstrItinClass itin> {
+multiclass AI2_stridx<bit isByte, string opc,
+ InstrItinClass iii, InstrItinClass iir> {
def _PRE_IMM : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addrmode_imm12:$addr), IndexModePre,
- StFrm, itin,
+ StFrm, iii,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 0;
@@ -2467,7 +2469,7 @@ multiclass AI2_stridx<bit isByte, string opc, InstrItinClass itin> {
def _PRE_REG : AI2ldstidx<0, isByte, 1, (outs GPR:$Rn_wb),
(ins GPR:$Rt, ldst_so_reg:$addr),
- IndexModePre, StFrm, itin,
+ IndexModePre, StFrm, iir,
opc, "\t$Rt, $addr!", "$addr.base = $Rn_wb", []> {
bits<17> addr;
let Inst{25} = 1;
@@ -2480,7 +2482,7 @@ multiclass AI2_stridx<bit isByte, string opc, InstrItinClass itin> {
}
def _POST_REG : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_reg:$offset),
- IndexModePost, StFrm, itin,
+ IndexModePost, StFrm, iir,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
@@ -2497,7 +2499,7 @@ multiclass AI2_stridx<bit isByte, string opc, InstrItinClass itin> {
def _POST_IMM : AI2ldstidx<0, isByte, 0, (outs GPR:$Rn_wb),
(ins GPR:$Rt, addr_offset_none:$addr, am2offset_imm:$offset),
- IndexModePost, StFrm, itin,
+ IndexModePost, StFrm, iii,
opc, "\t$Rt, $addr, $offset",
"$addr.base = $Rn_wb", []> {
// {12} isAdd
@@ -2514,8 +2516,10 @@ multiclass AI2_stridx<bit isByte, string opc, InstrItinClass itin> {
}
let mayStore = 1, neverHasSideEffects = 1 in {
-defm STR : AI2_stridx<0, "str", IIC_iStore_ru>;
-defm STRB : AI2_stridx<1, "strb", IIC_iStore_bh_ru>;
+// FIXME: for STR_PRE_REG etc. the itineray should be either IIC_iStore_ru or
+// IIC_iStore_siu depending on whether it the offset register is shifted.
+defm STR : AI2_stridx<0, "str", IIC_iStore_iu, IIC_iStore_ru>;
+defm STRB : AI2_stridx<1, "strb", IIC_iStore_bh_iu, IIC_iStore_bh_ru>;
}
def : ARMPat<(post_store GPR:$Rt, addr_offset_none:$addr,
@@ -2745,23 +2749,25 @@ defm STRHT : AI3strT<0b1011, "strht">;
// Load / store multiple Instructions.
//
-multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
+multiclass arm_ldst_mult<string asm, string sfx, bit L_bit, bit P_bit, Format f,
InstrItinClass itin, InstrItinClass itin_upd> {
// IA is the default, so no need for an explicit suffix on the
// mnemonic here. Without it is the cannonical spelling.
def IA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
- !strconcat(asm, "${p}\t$Rn, $regs"), "", []> {
+ !strconcat(asm, "${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b01; // Increment After
+ let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
- !strconcat(asm, "${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ !strconcat(asm, "${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b01; // Increment After
+ let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
@@ -2770,16 +2776,18 @@ multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
def DA :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
- !strconcat(asm, "da${p}\t$Rn, $regs"), "", []> {
+ !strconcat(asm, "da${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b00; // Decrement After
+ let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DA_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
- !strconcat(asm, "da${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ !strconcat(asm, "da${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b00; // Decrement After
+ let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
@@ -2788,16 +2796,18 @@ multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
def DB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
- !strconcat(asm, "db${p}\t$Rn, $regs"), "", []> {
+ !strconcat(asm, "db${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def DB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
- !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ !strconcat(asm, "db${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b10; // Decrement Before
+ let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
@@ -2806,16 +2816,18 @@ multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
def IB :
AXI4<(outs), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeNone, f, itin,
- !strconcat(asm, "ib${p}\t$Rn, $regs"), "", []> {
+ !strconcat(asm, "ib${p}\t$Rn, $regs", sfx), "", []> {
let Inst{24-23} = 0b11; // Increment Before
+ let Inst{22} = P_bit;
let Inst{21} = 0; // No writeback
let Inst{20} = L_bit;
}
def IB_UPD :
AXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, reglist:$regs, variable_ops),
IndexModeUpd, f, itin_upd,
- !strconcat(asm, "ib${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
+ !strconcat(asm, "ib${p}\t$Rn!, $regs", sfx), "$Rn = $wb", []> {
let Inst{24-23} = 0b11; // Increment Before
+ let Inst{22} = P_bit;
let Inst{21} = 1; // Writeback
let Inst{20} = L_bit;
@@ -2826,10 +2838,12 @@ multiclass arm_ldst_mult<string asm, bit L_bit, Format f,
let neverHasSideEffects = 1 in {
let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
-defm LDM : arm_ldst_mult<"ldm", 1, LdStMulFrm, IIC_iLoad_m, IIC_iLoad_mu>;
+defm LDM : arm_ldst_mult<"ldm", "", 1, 0, LdStMulFrm, IIC_iLoad_m,
+ IIC_iLoad_mu>;
let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-defm STM : arm_ldst_mult<"stm", 0, LdStMulFrm, IIC_iStore_m, IIC_iStore_mu>;
+defm STM : arm_ldst_mult<"stm", "", 0, 0, LdStMulFrm, IIC_iStore_m,
+ IIC_iStore_mu>;
} // neverHasSideEffects
@@ -2843,6 +2857,16 @@ def LDMIA_RET : ARMPseudoExpand<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
(LDMIA_UPD GPR:$wb, GPR:$Rn, pred:$p, reglist:$regs)>,
RegConstraint<"$Rn = $wb">;
+let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
+defm sysLDM : arm_ldst_mult<"ldm", " ^", 1, 1, LdStMulFrm, IIC_iLoad_m,
+ IIC_iLoad_mu>;
+
+let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
+defm sysSTM : arm_ldst_mult<"stm", " ^", 0, 1, LdStMulFrm, IIC_iStore_m,
+ IIC_iStore_mu>;
+
+
+
//===----------------------------------------------------------------------===//
// Move Instructions.
//
@@ -2860,7 +2884,7 @@ def MOVr : AsI1<0b1101, (outs GPR:$Rd), (ins GPR:$Rm), DPFrm, IIC_iMOVr,
let Inst{15-12} = Rd;
}
-def : ARMInstAlias<"movs${p} $Rd, $Rm",
+def : ARMInstAlias<"movs${p} $Rd, $Rm",
(MOVr GPR:$Rd, GPR:$Rm, pred:$p, CPSR)>;
// A version for the smaller set of tail call registers.
@@ -3080,20 +3104,18 @@ defm SUB : AsI1_bin_irs<0b0010, "sub",
// ADD and SUB with 's' bit set.
//
-// Currently, t2ADDS/t2SUBS are pseudo opcodes that exist only in the
-// selection DAG. They are "lowered" to real t2ADD/t2SUB opcodes by
+// Currently, ADDS/SUBS are pseudo opcodes that exist only in the
+// selection DAG. They are "lowered" to real ADD/SUB opcodes by
// AdjustInstrPostInstrSelection where we determine whether or not to
// set the "s" bit based on CPSR liveness.
//
-// FIXME: Eliminate t2ADDS/t2SUBS pseudo opcodes after adding tablegen
+// FIXME: Eliminate ADDS/SUBS pseudo opcodes after adding tablegen
// support for an optional CPSR definition that corresponds to the DAG
// node's second value. We can then eliminate the implicit def of CPSR.
-defm ADDS : AsI1_bin_s_irs<0b0100, "add",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
-defm SUBS : AsI1_bin_s_irs<0b0010, "sub",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm ADDS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
+ BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
+defm SUBS : AsI1_bin_s_irs<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
+ BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm ADC : AI1_adde_sube_irs<0b0101, "adc",
BinOpWithFlagFrag<(ARMadde node:$LHS, node:$RHS, node:$FLAG)>,
@@ -3108,9 +3130,8 @@ defm RSB : AsI1_rbin_irs <0b0011, "rsb",
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
-defm RSBS : AsI1_rbin_s_is<0b0011, "rsb",
- IIC_iALUi, IIC_iALUr, IIC_iALUsr,
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm RSBS : AsI1_rbin_s_is<IIC_iALUi, IIC_iALUr, IIC_iALUsr,
+ BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
defm RSC : AI1_rsc_irs<0b0111, "rsc",
BinOpWithFlagFrag<(ARMsube node:$LHS, node:$RHS, node:$FLAG)>,
@@ -3153,6 +3174,8 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
let Inst{19-16} = Rn;
let Inst{15-12} = Rd;
let Inst{3-0} = Rm;
+
+ let Unpredictable{11-8} = 0b1111;
}
// Saturating add/subtract
@@ -3445,19 +3468,20 @@ class AsMul1I64<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
// property. Remove them when it's possible to add those properties
// on an individual MachineInstr, not just an instuction description.
let isCommutable = 1 in {
-def MUL : AsMul1I32<0b0000000, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
+def MUL : AsMul1I32<0b0000000, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
IIC_iMUL32, "mul", "\t$Rd, $Rn, $Rm",
- [(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))]>,
+ [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))]>,
Requires<[IsARM, HasV6]> {
let Inst{15-12} = 0b0000;
+ let Unpredictable{15-12} = 0b1111;
}
let Constraints = "@earlyclobber $Rd" in
-def MULv5: ARMPseudoExpand<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm,
+def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm,
pred:$p, cc_out:$s),
4, IIC_iMUL32,
- [(set GPR:$Rd, (mul GPR:$Rn, GPR:$Rm))],
- (MUL GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))],
+ (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>,
Requires<[IsARM, NoV6]>;
}
@@ -3952,10 +3976,13 @@ def BCCZi64 : PseudoInst<(outs),
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
+
+let isCommutable = 1 in
def MOVCCr : ARMPseudoInst<(outs GPR:$Rd), (ins GPR:$false, GPR:$Rm, pred:$p),
4, IIC_iCMOVr,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, GPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
+
def MOVCCsi : ARMPseudoInst<(outs GPR:$Rd),
(ins GPR:$false, so_reg_imm:$shift, pred:$p),
4, IIC_iCMOVsr,
@@ -3996,8 +4023,44 @@ def MVNCCi : ARMPseudoInst<(outs GPR:$Rd),
4, IIC_iCMOVi,
[/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd">;
+
+// Conditional instructions
+multiclass AsI1_bincc_irs<Instruction iri, Instruction irr, Instruction irsi,
+ Instruction irsr,
+ InstrItinClass iii, InstrItinClass iir,
+ InstrItinClass iis> {
+ def ri : ARMPseudoExpand<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s),
+ 4, iii, [],
+ (iri GPR:$Rd, GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+ def rr : ARMPseudoExpand<(outs GPR:$Rd),
+ (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s),
+ 4, iir, [],
+ (irr GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+ def rsi : ARMPseudoExpand<(outs GPR:$Rd),
+ (ins GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s),
+ 4, iis, [],
+ (irsi GPR:$Rd, GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+ def rsr : ARMPseudoExpand<(outs GPRnopc:$Rd),
+ (ins GPRnopc:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s),
+ 4, iis, [],
+ (irsr GPR:$Rd, GPR:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+}
+
+defm ANDCC : AsI1_bincc_irs<ANDri, ANDrr, ANDrsi, ANDrsr,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
+defm ORRCC : AsI1_bincc_irs<ORRri, ORRrr, ORRrsi, ORRrsr,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
+defm EORCC : AsI1_bincc_irs<EORri, EORrr, EORrsi, EORrsr,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsr>;
+
} // neverHasSideEffects
+
//===----------------------------------------------------------------------===//
// Atomic operations intrinsics
//
@@ -4076,10 +4139,10 @@ let usesCustomInserter = 1 in {
[(set GPR:$dst, (atomic_load_max_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_8 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umin_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_8 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umax_8 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_ADD_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_add_16 GPR:$ptr, GPR:$incr))]>;
@@ -4106,10 +4169,10 @@ let usesCustomInserter = 1 in {
[(set GPR:$dst, (atomic_load_max_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_16 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umin_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I16 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_16 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umax_16 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_ADD_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
[(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$incr))]>;
@@ -4136,10 +4199,10 @@ let usesCustomInserter = 1 in {
[(set GPR:$dst, (atomic_load_max_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMIN_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_min_32 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umin_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_LOAD_UMAX_I32 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$val), NoItinerary,
- [(set GPR:$dst, (atomic_load_max_32 GPR:$ptr, GPR:$val))]>;
+ [(set GPR:$dst, (atomic_load_umax_32 GPR:$ptr, GPR:$val))]>;
def ATOMIC_SWAP_I8 : PseudoInst<
(outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
@@ -4185,14 +4248,14 @@ def STREXH: AIstrex<0b11, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strexh", "\t$Rd, $Rt, $addr", []>;
def STREX : AIstrex<0b00, (outs GPR:$Rd), (ins GPR:$Rt, addr_offset_none:$addr),
NoItinerary, "strex", "\t$Rd, $Rt, $addr", []>;
-}
-
-let hasExtraSrcRegAllocReq = 1, Constraints = "@earlyclobber $Rd" in
+let hasExtraSrcRegAllocReq = 1 in
def STREXD : AIstrex<0b01, (outs GPR:$Rd),
(ins GPR:$Rt, GPR:$Rt2, addr_offset_none:$addr),
NoItinerary, "strexd", "\t$Rd, $Rt, $Rt2, $addr", []> {
let DecoderMethod = "DecodeDoubleRegStore";
}
+}
+
def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>,
Requires<[IsARM, HasV7]> {
@@ -4451,10 +4514,16 @@ def MCR : MovRCopro<"mcr", 0 /* from ARM core register to coprocessor */,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
+def : ARMInstAlias<"mcr${p} $cop, $opc1, $Rt, $CRn, $CRm",
+ (MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
+ c_imm:$CRm, 0, pred:$p)>;
def MRC : MovRCopro<"mrc", 1 /* from coprocessor to ARM core register */,
(outs GPR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
+def : ARMInstAlias<"mrc${p} $cop, $opc1, $Rt, $CRn, $CRm",
+ (MRC GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0, pred:$p)>;
def : ARMPat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
(MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
@@ -4488,10 +4557,16 @@ def MCR2 : MovRCopro2<"mcr2", 0 /* from ARM core register to coprocessor */,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
+def : ARMInstAlias<"mcr2$ $cop, $opc1, $Rt, $CRn, $CRm",
+ (MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
def MRC2 : MovRCopro2<"mrc2", 1 /* from coprocessor to ARM core register */,
(outs GPR:$Rt),
(ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn, c_imm:$CRm,
imm0_7:$opc2), []>;
+def : ARMInstAlias<"mrc2$ $cop, $opc1, $Rt, $CRn, $CRm",
+ (MRC2 GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn,
imm:$CRm, imm:$opc2),
@@ -4635,7 +4710,8 @@ let isCall = 1,
// no encoding information is necessary.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
- QQQQ0, QQQQ1, QQQQ2, QQQQ3 ], hasSideEffects = 1, isBarrier = 1 in {
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15 ],
+ hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
def Int_eh_sjlj_setjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
@@ -4644,31 +4720,37 @@ let Defs =
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
- hasSideEffects = 1, isBarrier = 1 in {
+ hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
def Int_eh_sjlj_setjmp_nofp : PseudoInst<(outs), (ins GPR:$src, GPR:$val),
NoItinerary,
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, NoVFP]>;
}
-// FIXME: Non-Darwin version(s)
+// FIXME: Non-IOS version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1,
Defs = [ R7, LR, SP ] in {
def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch),
NoItinerary,
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsARM, IsDarwin]>;
+ Requires<[IsARM, IsIOS]>;
}
-// eh.sjlj.dispatchsetup pseudo-instruction.
-// This pseudo is used for ARM, Thumb1 and Thumb2. Any differences are
+// eh.sjlj.dispatchsetup pseudo-instructions.
+// These pseudos are used for both ARM and Thumb2. Any differences are
// handled when the pseudo is expanded (which happens before any passes
// that need the instruction size).
-let isBarrier = 1, hasSideEffects = 1 in
-def Int_eh_sjlj_dispatchsetup :
- PseudoInst<(outs), (ins GPR:$src), NoItinerary,
- [(ARMeh_sjlj_dispatchsetup GPR:$src)]>,
- Requires<[IsDarwin]>;
+let Defs =
+ [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15 ],
+ isBarrier = 1 in
+def Int_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>;
+
+let Defs =
+ [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
+ isBarrier = 1 in
+def Int_eh_sjlj_dispatchsetup_nofp : PseudoInst<(outs), (ins), NoItinerary, []>;
+
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -4725,30 +4807,15 @@ def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
// TODO: add,sub,and, 3-instr forms?
-// Tail calls
-def : ARMPat<(ARMtcret tcGPR:$dst),
- (TCRETURNri tcGPR:$dst)>, Requires<[IsDarwin]>;
-
-def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
- (TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
-
-def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
- (TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
-
-def : ARMPat<(ARMtcret tcGPR:$dst),
- (TCRETURNriND tcGPR:$dst)>, Requires<[IsNotDarwin]>;
-
-def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
- (TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
-
-def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
- (TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
+// Tail calls. These patterns also apply to Thumb mode.
+def : Pat<(ARMtcret tcGPR:$dst), (TCRETURNri tcGPR:$dst)>;
+def : Pat<(ARMtcret (i32 tglobaladdr:$dst)), (TCRETURNdi texternalsym:$dst)>;
+def : Pat<(ARMtcret (i32 texternalsym:$dst)), (TCRETURNdi texternalsym:$dst)>;
// Direct calls
-def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>,
- Requires<[IsARM, IsNotDarwin]>;
-def : ARMPat<(ARMcall texternalsym:$func), (BLr9 texternalsym:$func)>,
- Requires<[IsARM, IsDarwin]>;
+def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>;
+def : ARMPat<(ARMcall_nolink texternalsym:$func),
+ (BMOVPCB_CALL texternalsym:$func)>;
// zextload i1 -> zextload i8
def : ARMPat<(zextloadi1 addrmode_imm12:$addr), (LDRBi12 addrmode_imm12:$addr)>;
@@ -4992,13 +5059,113 @@ def : MnemonicAlias<"uqsubaddx", "uqsax">;
// USAX == USUBADDX
def : MnemonicAlias<"usubaddx", "usax">;
-// LDRSBT/LDRHT/LDRSHT post-index offset if optional.
-// Note that the write-back output register is a dummy operand for MC (it's
-// only meaningful for codegen), so we just pass zero here.
-// FIXME: tblgen not cooperating with argument conversions.
-//def : InstAlias<"ldrsbt${p} $Rt, $addr",
-// (LDRSBTi GPR:$Rt, GPR:$Rt, addr_offset_none:$addr, 0,pred:$p)>;
-//def : InstAlias<"ldrht${p} $Rt, $addr",
-// (LDRHTi GPR:$Rt, GPR:$Rt, addr_offset_none:$addr, 0, pred:$p)>;
-//def : InstAlias<"ldrsht${p} $Rt, $addr",
-// (LDRSHTi GPR:$Rt, GPR:$Rt, addr_offset_none:$addr, 0, pred:$p)>;
+// "mov Rd, so_imm_not" can be handled via "mvn" in assembly, just like
+// for isel.
+def : ARMInstAlias<"mov${s}${p} $Rd, $imm",
+ (MVNi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"mvn${s}${p} $Rd, $imm",
+ (MOVi rGPR:$Rd, so_imm_not:$imm, pred:$p, cc_out:$s)>;
+// Same for AND <--> BIC
+def : ARMInstAlias<"bic${s}${p} $Rd, $Rn, $imm",
+ (ANDri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"bic${s}${p} $Rdn, $imm",
+ (ANDri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"and${s}${p} $Rd, $Rn, $imm",
+ (BICri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"and${s}${p} $Rdn, $imm",
+ (BICri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+
+// Likewise, "add Rd, so_imm_neg" -> sub
+def : ARMInstAlias<"add${s}${p} $Rd, $Rn, $imm",
+ (SUBri GPR:$Rd, GPR:$Rn, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"add${s}${p} $Rd, $imm",
+ (SUBri GPR:$Rd, GPR:$Rd, so_imm_neg:$imm, pred:$p, cc_out:$s)>;
+// Same for CMP <--> CMN via so_imm_neg
+def : ARMInstAlias<"cmp${p} $Rd, $imm",
+ (CMNzri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
+def : ARMInstAlias<"cmn${p} $Rd, $imm",
+ (CMPri rGPR:$Rd, so_imm_neg:$imm, pred:$p)>;
+
+// The shifter forms of the MOV instruction are aliased to the ASR, LSL,
+// LSR, ROR, and RRX instructions.
+// FIXME: We need C++ parser hooks to map the alias to the MOV
+// encoding. It seems we should be able to do that sort of thing
+// in tblgen, but it could get ugly.
+def ASRi : ARMAsmPseudo<"asr${s}${p} $Rd, $Rm, $imm",
+ (ins GPR:$Rd, GPR:$Rm, imm0_32:$imm, pred:$p,
+ cc_out:$s)>;
+def LSRi : ARMAsmPseudo<"lsr${s}${p} $Rd, $Rm, $imm",
+ (ins GPR:$Rd, GPR:$Rm, imm0_32:$imm, pred:$p,
+ cc_out:$s)>;
+def LSLi : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rm, $imm",
+ (ins GPR:$Rd, GPR:$Rm, imm0_31:$imm, pred:$p,
+ cc_out:$s)>;
+def RORi : ARMAsmPseudo<"ror${s}${p} $Rd, $Rm, $imm",
+ (ins GPR:$Rd, GPR:$Rm, imm0_31:$imm, pred:$p,
+ cc_out:$s)>;
+def RRXi : ARMAsmPseudo<"rrx${s}${p} $Rd, $Rm",
+ (ins GPRnopc:$Rd, GPRnopc:$Rm, pred:$p, cc_out:$s)>;
+def ASRr : ARMAsmPseudo<"asr${s}${p} $Rd, $Rn, $Rm",
+ (ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def LSRr : ARMAsmPseudo<"lsr${s}${p} $Rd, $Rn, $Rm",
+ (ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def LSLr : ARMAsmPseudo<"lsl${s}${p} $Rd, $Rn, $Rm",
+ (ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def RORr : ARMAsmPseudo<"ror${s}${p} $Rd, $Rn, $Rm",
+ (ins GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+// shifter instructions also support a two-operand form.
+def : ARMInstAlias<"asr${s}${p} $Rm, $imm",
+ (ASRi GPR:$Rm, GPR:$Rm, imm0_32:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"lsr${s}${p} $Rm, $imm",
+ (LSRi GPR:$Rm, GPR:$Rm, imm0_32:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"lsl${s}${p} $Rm, $imm",
+ (LSLi GPR:$Rm, GPR:$Rm, imm0_31:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"ror${s}${p} $Rm, $imm",
+ (RORi GPR:$Rm, GPR:$Rm, imm0_31:$imm, pred:$p, cc_out:$s)>;
+def : ARMInstAlias<"asr${s}${p} $Rn, $Rm",
+ (ASRr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def : ARMInstAlias<"lsr${s}${p} $Rn, $Rm",
+ (LSRr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def : ARMInstAlias<"lsl${s}${p} $Rn, $Rm",
+ (LSLr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+def : ARMInstAlias<"ror${s}${p} $Rn, $Rm",
+ (RORr GPRnopc:$Rn, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p,
+ cc_out:$s)>;
+
+
+// 'mul' instruction can be specified with only two operands.
+def : ARMInstAlias<"mul${s}${p} $Rn, $Rm",
+ (MUL rGPR:$Rn, rGPR:$Rm, rGPR:$Rn, pred:$p, cc_out:$s)>;
+
+// "neg" is and alias for "rsb rd, rn, #0"
+def : ARMInstAlias<"neg${s}${p} $Rd, $Rm",
+ (RSBri GPR:$Rd, GPR:$Rm, 0, pred:$p, cc_out:$s)>;
+
+// Pre-v6, 'mov r0, r0' was used as a NOP encoding.
+def : InstAlias<"nop${p}", (MOVr R0, R0, pred:$p, zero_reg)>,
+ Requires<[IsARM, NoV6]>;
+
+// UMULL/SMULL are available on all arches, but the instruction definitions
+// need difference constraints pre-v6. Use these aliases for the assembly
+// parsing on pre-v6.
+def : InstAlias<"smull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
+ (SMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
+def : InstAlias<"umull${s}${p} $RdLo, $RdHi, $Rn, $Rm",
+ (UMULL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>,
+ Requires<[IsARM, NoV6]>;
+
+// 'it' blocks in ARM mode just validate the predicates. The IT itself
+// is discarded.
+def ITasm : ARMAsmPseudo<"it$mask $cc", (ins it_pred:$cc, it_mask:$mask)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 7aad186..c7219a6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -1,4 +1,4 @@
-//===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
+//===-- ARMInstrNEON.td - NEON support for ARM -------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,6 +15,45 @@
//===----------------------------------------------------------------------===//
// NEON-specific Operands.
//===----------------------------------------------------------------------===//
+def nModImm : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+}
+
+def nImmSplatI8AsmOperand : AsmOperandClass { let Name = "NEONi8splat"; }
+def nImmSplatI8 : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmSplatI8AsmOperand;
+}
+def nImmSplatI16AsmOperand : AsmOperandClass { let Name = "NEONi16splat"; }
+def nImmSplatI16 : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmSplatI16AsmOperand;
+}
+def nImmSplatI32AsmOperand : AsmOperandClass { let Name = "NEONi32splat"; }
+def nImmSplatI32 : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmSplatI32AsmOperand;
+}
+def nImmVMOVI32AsmOperand : AsmOperandClass { let Name = "NEONi32vmov"; }
+def nImmVMOVI32 : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMOVI32AsmOperand;
+}
+def nImmVMOVI32NegAsmOperand : AsmOperandClass { let Name = "NEONi32vmovNeg"; }
+def nImmVMOVI32Neg : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmVMOVI32NegAsmOperand;
+}
+def nImmVMOVF32 : Operand<i32> {
+ let PrintMethod = "printFPImmOperand";
+ let ParserMatchClass = FPImmOperand;
+}
+def nImmSplatI64AsmOperand : AsmOperandClass { let Name = "NEONi64splat"; }
+def nImmSplatI64 : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
+ let ParserMatchClass = nImmSplatI64AsmOperand;
+}
+
def VectorIndex8Operand : AsmOperandClass { let Name = "VectorIndex8"; }
def VectorIndex16Operand : AsmOperandClass { let Name = "VectorIndex16"; }
def VectorIndex32Operand : AsmOperandClass { let Name = "VectorIndex32"; }
@@ -40,6 +79,326 @@ def VectorIndex32 : Operand<i32>, ImmLeaf<i32, [{
let MIOperandInfo = (ops i32imm);
}
+// Register list of one D register.
+def VecListOneDAsmOperand : AsmOperandClass {
+ let Name = "VecListOneD";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListOneD : RegisterOperand<DPR, "printVectorListOne"> {
+ let ParserMatchClass = VecListOneDAsmOperand;
+}
+// Register list of two sequential D registers.
+def VecListDPairAsmOperand : AsmOperandClass {
+ let Name = "VecListDPair";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListDPair : RegisterOperand<DPair, "printVectorListTwo"> {
+ let ParserMatchClass = VecListDPairAsmOperand;
+}
+// Register list of three sequential D registers.
+def VecListThreeDAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeD";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListThreeD : RegisterOperand<DPR, "printVectorListThree"> {
+ let ParserMatchClass = VecListThreeDAsmOperand;
+}
+// Register list of four sequential D registers.
+def VecListFourDAsmOperand : AsmOperandClass {
+ let Name = "VecListFourD";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListFourD : RegisterOperand<DPR, "printVectorListFour"> {
+ let ParserMatchClass = VecListFourDAsmOperand;
+}
+// Register list of two D registers spaced by 2 (two sequential Q registers).
+def VecListDPairSpacedAsmOperand : AsmOperandClass {
+ let Name = "VecListDPairSpaced";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListDPairSpaced : RegisterOperand<DPair, "printVectorListTwoSpaced"> {
+ let ParserMatchClass = VecListDPairSpacedAsmOperand;
+}
+// Register list of three D registers spaced by 2 (three Q registers).
+def VecListThreeQAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeQ";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListThreeQ : RegisterOperand<DPR, "printVectorListThreeSpaced"> {
+ let ParserMatchClass = VecListThreeQAsmOperand;
+}
+// Register list of three D registers spaced by 2 (three Q registers).
+def VecListFourQAsmOperand : AsmOperandClass {
+ let Name = "VecListFourQ";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListFourQ : RegisterOperand<DPR, "printVectorListFourSpaced"> {
+ let ParserMatchClass = VecListFourQAsmOperand;
+}
+
+// Register list of one D register, with "all lanes" subscripting.
+def VecListOneDAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListOneDAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListOneDAllLanes : RegisterOperand<DPR, "printVectorListOneAllLanes"> {
+ let ParserMatchClass = VecListOneDAllLanesAsmOperand;
+}
+// Register list of two D registers, with "all lanes" subscripting.
+def VecListDPairAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListDPairAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListDPairAllLanes : RegisterOperand<DPair,
+ "printVectorListTwoAllLanes"> {
+ let ParserMatchClass = VecListDPairAllLanesAsmOperand;
+}
+// Register list of two D registers spaced by 2 (two sequential Q registers).
+def VecListDPairSpacedAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListDPairSpacedAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListDPairSpacedAllLanes : RegisterOperand<DPair,
+ "printVectorListTwoSpacedAllLanes"> {
+ let ParserMatchClass = VecListDPairSpacedAllLanesAsmOperand;
+}
+// Register list of three D registers, with "all lanes" subscripting.
+def VecListThreeDAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeDAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListThreeDAllLanes : RegisterOperand<DPR,
+ "printVectorListThreeAllLanes"> {
+ let ParserMatchClass = VecListThreeDAllLanesAsmOperand;
+}
+// Register list of three D registers spaced by 2 (three sequential Q regs).
+def VecListThreeQAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeQAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListThreeQAllLanes : RegisterOperand<DPR,
+ "printVectorListThreeSpacedAllLanes"> {
+ let ParserMatchClass = VecListThreeQAllLanesAsmOperand;
+}
+// Register list of four D registers, with "all lanes" subscripting.
+def VecListFourDAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListFourDAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListFourDAllLanes : RegisterOperand<DPR, "printVectorListFourAllLanes"> {
+ let ParserMatchClass = VecListFourDAllLanesAsmOperand;
+}
+// Register list of four D registers spaced by 2 (four sequential Q regs).
+def VecListFourQAllLanesAsmOperand : AsmOperandClass {
+ let Name = "VecListFourQAllLanes";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListOperands";
+}
+def VecListFourQAllLanes : RegisterOperand<DPR,
+ "printVectorListFourSpacedAllLanes"> {
+ let ParserMatchClass = VecListFourQAllLanesAsmOperand;
+}
+
+
+// Register list of one D register, with byte lane subscripting.
+def VecListOneDByteIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListOneDByteIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListOneDByteIndexed : Operand<i32> {
+ let ParserMatchClass = VecListOneDByteIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with half-word lane subscripting.
+def VecListOneDHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListOneDHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListOneDHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListOneDHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListOneDWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListOneDWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListOneDWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListOneDWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+
+// Register list of two D registers with byte lane subscripting.
+def VecListTwoDByteIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoDByteIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoDByteIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoDByteIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with half-word lane subscripting.
+def VecListTwoDHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoDHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoDHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoDHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListTwoDWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoDWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoDWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoDWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// Register list of two Q registers with half-word lane subscripting.
+def VecListTwoQHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoQHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoQHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoQHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListTwoQWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListTwoQWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListTwoQWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListTwoQWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+
+
+// Register list of three D registers with byte lane subscripting.
+def VecListThreeDByteIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeDByteIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListThreeDByteIndexed : Operand<i32> {
+ let ParserMatchClass = VecListThreeDByteIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with half-word lane subscripting.
+def VecListThreeDHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeDHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListThreeDHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListThreeDHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListThreeDWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeDWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListThreeDWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListThreeDWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// Register list of three Q registers with half-word lane subscripting.
+def VecListThreeQHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeQHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListThreeQHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListThreeQHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListThreeQWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListThreeQWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListThreeQWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListThreeQWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+
+// Register list of four D registers with byte lane subscripting.
+def VecListFourDByteIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListFourDByteIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListFourDByteIndexed : Operand<i32> {
+ let ParserMatchClass = VecListFourDByteIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with half-word lane subscripting.
+def VecListFourDHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListFourDHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListFourDHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListFourDHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListFourDWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListFourDWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListFourDWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListFourDWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// Register list of four Q registers with half-word lane subscripting.
+def VecListFourQHWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListFourQHWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListFourQHWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListFourQHWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+// ...with word lane subscripting.
+def VecListFourQWordIndexAsmOperand : AsmOperandClass {
+ let Name = "VecListFourQWordIndexed";
+ let ParserMethod = "parseVectorList";
+ let RenderMethod = "addVecListIndexedOperands";
+}
+def VecListFourQWordIndexed : Operand<i32> {
+ let ParserMatchClass = VecListFourQWordIndexAsmOperand;
+ let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx);
+}
+
+
//===----------------------------------------------------------------------===//
// NEON-specific DAG Nodes.
//===----------------------------------------------------------------------===//
@@ -103,6 +462,7 @@ def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
+def NEONvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>;
def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
SDTCisVT<2, i32>]>;
@@ -164,30 +524,22 @@ def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
}]>;
//===----------------------------------------------------------------------===//
-// NEON operand definitions
-//===----------------------------------------------------------------------===//
-
-def nModImm : Operand<i32> {
- let PrintMethod = "printNEONModImmOperand";
-}
-
-//===----------------------------------------------------------------------===//
// NEON load / store instructions
//===----------------------------------------------------------------------===//
// Use VLDM to load a Q register as a D register pair.
// This is a pseudo instruction that is expanded to VLDMD after reg alloc.
def VLDMQIA
- : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
+ : PseudoVFPLdStM<(outs DPair:$dst), (ins GPR:$Rn),
IIC_fpLoad_m, "",
- [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
+ [(set DPair:$dst, (v2f64 (load GPR:$Rn)))]>;
// Use VSTM to store a Q register as a D register pair.
// This is a pseudo instruction that is expanded to VSTMD after reg alloc.
def VSTMQIA
- : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
+ : PseudoVFPLdStM<(outs), (ins DPair:$src, GPR:$Rn),
IIC_fpStore_m, "",
- [(store (v2f64 QPR:$src), GPR:$Rn)]>;
+ [(store (v2f64 DPair:$src), GPR:$Rn)]>;
// Classes for VLD* pseudo-instructions with multi-register operands.
// These are expanded to real instructions after register allocation.
@@ -197,12 +549,31 @@ class VLDQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
(ins addrmode6:$addr, am6offset:$offset), itin,
"$addr.addr = $wb">;
+class VLDQWBfixedPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr), itin,
+ "$addr.addr = $wb">;
+class VLDQWBregisterPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr, rGPR:$offset), itin,
+ "$addr.addr = $wb">;
+
class VLDQQPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), itin, "">;
class VLDQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
(ins addrmode6:$addr, am6offset:$offset), itin,
"$addr.addr = $wb">;
+class VLDQQWBfixedPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr), itin,
+ "$addr.addr = $wb">;
+class VLDQQWBregisterPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
+ (ins addrmode6:$addr, rGPR:$offset), itin,
+ "$addr.addr = $wb">;
+
+
class VLDQQQQPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs QQQQPR:$dst), (ins addrmode6:$addr, QQQQPR:$src),itin,
"$src = $dst">;
@@ -215,17 +586,17 @@ let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// VLD1 : Vector Load (multiple single elements)
class VLD1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$Vd),
+ : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
(ins addrmode6:$Rn), IIC_VLD1,
- "vld1", Dt, "\\{$Vd\\}, $Rn", "", []> {
+ "vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDInstruction";
}
class VLD1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b1010,op7_4, (outs DPR:$Vd, DPR:$dst2),
+ : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd),
(ins addrmode6:$Rn), IIC_VLD1x2,
- "vld1", Dt, "\\{$Vd, $dst2\\}, $Rn", "", []> {
+ "vld1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDInstruction";
@@ -241,59 +612,82 @@ def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
-def VLD1q8Pseudo : VLDQPseudo<IIC_VLD1x2>;
-def VLD1q16Pseudo : VLDQPseudo<IIC_VLD1x2>;
-def VLD1q32Pseudo : VLDQPseudo<IIC_VLD1x2>;
-def VLD1q64Pseudo : VLDQPseudo<IIC_VLD1x2>;
-
// ...with address register writeback:
-class VLD1DWB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$Vd, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1u,
- "vld1", Dt, "\\{$Vd\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+multiclass VLD1DWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn), IIC_VLD1u,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-class VLD1QWB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b1010,op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x2u,
- "vld1", Dt, "\\{$Vd, $dst2\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+multiclass VLD1QWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListDPair:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-def VLD1d8_UPD : VLD1DWB<{0,0,0,?}, "8">;
-def VLD1d16_UPD : VLD1DWB<{0,1,0,?}, "16">;
-def VLD1d32_UPD : VLD1DWB<{1,0,0,?}, "32">;
-def VLD1d64_UPD : VLD1DWB<{1,1,0,?}, "64">;
-
-def VLD1q8_UPD : VLD1QWB<{0,0,?,?}, "8">;
-def VLD1q16_UPD : VLD1QWB<{0,1,?,?}, "16">;
-def VLD1q32_UPD : VLD1QWB<{1,0,?,?}, "32">;
-def VLD1q64_UPD : VLD1QWB<{1,1,?,?}, "64">;
-
-def VLD1q8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
-def VLD1q16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
-def VLD1q32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
-def VLD1q64Pseudo_UPD : VLDQWBPseudo<IIC_VLD1x2u>;
+defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
+defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
+defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
+defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
+defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
+defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
+defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
+defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
-// ...with 3 registers (some of these are only for the disassembler):
+// ...with 3 registers
class VLD1D3<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
+ : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
(ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
- "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
+ "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLDInstruction";
}
-class VLD1D3WB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0110,op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x3u, "vld1", Dt,
- "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLDInstruction";
+multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
@@ -301,31 +695,40 @@ def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
-def VLD1d8T_UPD : VLD1D3WB<{0,0,0,?}, "8">;
-def VLD1d16T_UPD : VLD1D3WB<{0,1,0,?}, "16">;
-def VLD1d32T_UPD : VLD1D3WB<{1,0,0,?}, "32">;
-def VLD1d64T_UPD : VLD1D3WB<{1,1,0,?}, "64">;
+defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
+defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
+defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
+defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
-def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
-def VLD1d64TPseudo_UPD : VLDQQWBPseudo<IIC_VLD1x3u>;
+def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
-// ...with 4 registers (some of these are only for the disassembler):
+// ...with 4 registers
class VLD1D4<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0010,op7_4,(outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
+ : NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
(ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
- "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
+ "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDInstruction";
}
-class VLD1D4WB<bits<4> op7_4, string Dt>
- : NLdSt<0,0b10,0b0010,op7_4,
- (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x4u, "vld1", Dt,
- "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm", "$Rn.addr = $wb",
- []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
@@ -333,91 +736,80 @@ def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
-def VLD1d8Q_UPD : VLD1D4WB<{0,0,?,?}, "8">;
-def VLD1d16Q_UPD : VLD1D4WB<{0,1,?,?}, "16">;
-def VLD1d32Q_UPD : VLD1D4WB<{1,0,?,?}, "32">;
-def VLD1d64Q_UPD : VLD1D4WB<{1,1,?,?}, "64">;
+defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8">;
+defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16">;
+defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32">;
+defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64">;
-def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
-def VLD1d64QPseudo_UPD : VLDQQWBPseudo<IIC_VLD1x4u>;
+def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
// VLD2 : Vector Load (multiple 2-element structures)
-class VLD2D<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
- (ins addrmode6:$Rn), IIC_VLD2,
- "vld2", Dt, "\\{$Vd, $dst2\\}, $Rn", "", []> {
- let Rm = 0b1111;
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
-}
-class VLD2Q<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, 0b0011, op7_4,
- (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$Rn), IIC_VLD2x2,
- "vld2", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
+class VLD2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
+ InstrItinClass itin>
+ : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
+ (ins addrmode6:$Rn), itin,
+ "vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVLDInstruction";
}
-def VLD2d8 : VLD2D<0b1000, {0,0,?,?}, "8">;
-def VLD2d16 : VLD2D<0b1000, {0,1,?,?}, "16">;
-def VLD2d32 : VLD2D<0b1000, {1,0,?,?}, "32">;
+def VLD2d8 : VLD2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2>;
+def VLD2d16 : VLD2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2>;
+def VLD2d32 : VLD2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2>;
-def VLD2q8 : VLD2Q<{0,0,?,?}, "8">;
-def VLD2q16 : VLD2Q<{0,1,?,?}, "16">;
-def VLD2q32 : VLD2Q<{1,0,?,?}, "32">;
-
-def VLD2d8Pseudo : VLDQPseudo<IIC_VLD2>;
-def VLD2d16Pseudo : VLDQPseudo<IIC_VLD2>;
-def VLD2d32Pseudo : VLDQPseudo<IIC_VLD2>;
+def VLD2q8 : VLD2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2>;
+def VLD2q16 : VLD2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2>;
+def VLD2q32 : VLD2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2>;
def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
// ...with address register writeback:
-class VLD2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2u,
- "vld2", Dt, "\\{$Vd, $dst2\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
-}
-class VLD2QWB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b10, 0b0011, op7_4,
- (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2x2u,
- "vld2", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVLDInstruction";
+multiclass VLD2WB<bits<4> op11_8, bits<4> op7_4, string Dt,
+ RegisterOperand VdTy, InstrItinClass itin> {
+ def _fixed : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn), itin,
+ "vld2", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm), itin,
+ "vld2", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVLDInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-def VLD2d8_UPD : VLD2DWB<0b1000, {0,0,?,?}, "8">;
-def VLD2d16_UPD : VLD2DWB<0b1000, {0,1,?,?}, "16">;
-def VLD2d32_UPD : VLD2DWB<0b1000, {1,0,?,?}, "32">;
+defm VLD2d8wb : VLD2WB<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VLD2u>;
+defm VLD2d16wb : VLD2WB<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VLD2u>;
+defm VLD2d32wb : VLD2WB<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VLD2u>;
-def VLD2q8_UPD : VLD2QWB<{0,0,?,?}, "8">;
-def VLD2q16_UPD : VLD2QWB<{0,1,?,?}, "16">;
-def VLD2q32_UPD : VLD2QWB<{1,0,?,?}, "32">;
+defm VLD2q8wb : VLD2WB<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VLD2x2u>;
+defm VLD2q16wb : VLD2WB<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VLD2x2u>;
+defm VLD2q32wb : VLD2WB<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VLD2x2u>;
-def VLD2d8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
-def VLD2d16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
-def VLD2d32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
+def VLD2q8PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
+def VLD2q16PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
+def VLD2q32PseudoWB_fixed : VLDQQWBfixedPseudo<IIC_VLD2x2u>;
+def VLD2q8PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
+def VLD2q16PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
+def VLD2q32PseudoWB_register : VLDQQWBregisterPseudo<IIC_VLD2x2u>;
-def VLD2q8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
-def VLD2q16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
-def VLD2q32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
-
-// ...with double-spaced registers (for disassembly only):
-def VLD2b8 : VLD2D<0b1001, {0,0,?,?}, "8">;
-def VLD2b16 : VLD2D<0b1001, {0,1,?,?}, "16">;
-def VLD2b32 : VLD2D<0b1001, {1,0,?,?}, "32">;
-def VLD2b8_UPD : VLD2DWB<0b1001, {0,0,?,?}, "8">;
-def VLD2b16_UPD : VLD2DWB<0b1001, {0,1,?,?}, "16">;
-def VLD2b32_UPD : VLD2DWB<0b1001, {1,0,?,?}, "32">;
+// ...with double-spaced registers
+def VLD2b8 : VLD2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2>;
+def VLD2b16 : VLD2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2>;
+def VLD2b32 : VLD2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2>;
+defm VLD2b8wb : VLD2WB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VLD2u>;
+defm VLD2b16wb : VLD2WB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VLD2u>;
+defm VLD2b32wb : VLD2WB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VLD2u>;
// VLD3 : Vector Load (multiple 3-element structures)
class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -601,12 +993,11 @@ def VLD1LNd8 : VLD1LN<0b0000, {?,?,?,0}, "8", v8i8, extloadi8> {
}
def VLD1LNd16 : VLD1LN<0b0100, {?,?,0,?}, "16", v4i16, extloadi16> {
let Inst{7-6} = lane{1-0};
- let Inst{4} = Rn{4};
+ let Inst{5-4} = Rn{5-4};
}
def VLD1LNd32 : VLD1LN32<0b1000, {?,0,?,?}, "32", v2i32, load> {
let Inst{7} = lane{0};
- let Inst{5} = Rn{4};
- let Inst{4} = Rn{4};
+ let Inst{5-4} = Rn{5-4};
}
def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>;
@@ -776,7 +1167,7 @@ def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
}
def VLD3LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
@@ -787,7 +1178,7 @@ def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
}
def VLD3LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
@@ -802,7 +1193,7 @@ class VLD4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
"\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn",
"$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
let Rm = 0b1111;
- let Inst{4} = Rn{4};
+ let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD4LN";
}
@@ -813,7 +1204,7 @@ def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
let Inst{5} = Rn{5};
}
@@ -826,7 +1217,7 @@ def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
let Inst{5} = Rn{5};
}
@@ -854,7 +1245,7 @@ def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
let Inst{5} = Rn{5};
}
@@ -866,7 +1257,7 @@ def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16"> {
let Inst{7-6} = lane{1-0};
}
def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
- let Inst{7} = lane{0};
+ let Inst{7} = lane{0};
let Inst{5} = Rn{5};
}
@@ -877,117 +1268,142 @@ def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
// VLD1DUP : Vector Load (single element to all lanes)
class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
- : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd), (ins addrmode6dup:$Rn),
- IIC_VLD1dup, "vld1", Dt, "\\{$Vd[]\\}, $Rn", "",
- [(set DPR:$Vd, (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListOneDAllLanes:$Vd),
+ (ins addrmode6dup:$Rn),
+ IIC_VLD1dup, "vld1", Dt, "$Vd, $Rn", "",
+ [(set VecListOneDAllLanes:$Vd,
+ (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-class VLD1QDUPPseudo<ValueType Ty, PatFrag LoadOp> : VLDQPseudo<IIC_VLD1dup> {
- let Pattern = [(set QPR:$dst,
- (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$addr)))))];
-}
-
def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
-def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>;
-def VLD1DUPq16Pseudo : VLD1QDUPPseudo<v8i16, extloadi16>;
-def VLD1DUPq32Pseudo : VLD1QDUPPseudo<v4i32, load>;
-
def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
(VLD1DUPd32 addrmode6:$addr)>;
-def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
- (VLD1DUPq32Pseudo addrmode6:$addr)>;
-
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
-class VLD1QDUP<bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2),
+class VLD1QDUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
+ : NLdSt<1, 0b10, 0b1100, op7_4, (outs VecListDPairAllLanes:$Vd),
(ins addrmode6dup:$Rn), IIC_VLD1dup,
- "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
+ "vld1", Dt, "$Vd, $Rn", "",
+ [(set VecListDPairAllLanes:$Vd,
+ (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD1DupInstruction";
}
-def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8">;
-def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16">;
-def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32">;
+def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8", v16i8, extloadi8>;
+def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16", v8i16, extloadi16>;
+def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32", v4i32, load>;
+def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
+ (VLD1DUPq32 addrmode6:$addr)>;
+
+let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
// ...with address register writeback:
-class VLD1DUPWB<bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, GPR:$wb),
- (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
- "vld1", Dt, "\\{$Vd[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLD1DupInstruction";
+multiclass VLD1DUPWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
+ (outs VecListOneDAllLanes:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD1DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<1, 0b10, 0b1100, op7_4,
+ (outs VecListOneDAllLanes:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD1DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-class VLD1QDUPWB<bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
- (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
- "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLD1DupInstruction";
+multiclass VLD1QDUPWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<1, 0b10, 0b1100, op7_4,
+ (outs VecListDPairAllLanes:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn), IIC_VLD1dupu,
+ "vld1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD1DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<1, 0b10, 0b1100, op7_4,
+ (outs VecListDPairAllLanes:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD1dupu,
+ "vld1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD1DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-def VLD1DUPd8_UPD : VLD1DUPWB<{0,0,0,0}, "8">;
-def VLD1DUPd16_UPD : VLD1DUPWB<{0,1,0,?}, "16">;
-def VLD1DUPd32_UPD : VLD1DUPWB<{1,0,0,?}, "32">;
-
-def VLD1DUPq8_UPD : VLD1QDUPWB<{0,0,1,0}, "8">;
-def VLD1DUPq16_UPD : VLD1QDUPWB<{0,1,1,?}, "16">;
-def VLD1DUPq32_UPD : VLD1QDUPWB<{1,0,1,?}, "32">;
+defm VLD1DUPd8wb : VLD1DUPWB<{0,0,0,0}, "8">;
+defm VLD1DUPd16wb : VLD1DUPWB<{0,1,0,?}, "16">;
+defm VLD1DUPd32wb : VLD1DUPWB<{1,0,0,?}, "32">;
-def VLD1DUPq8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
-def VLD1DUPq16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
-def VLD1DUPq32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
+defm VLD1DUPq8wb : VLD1QDUPWB<{0,0,1,0}, "8">;
+defm VLD1DUPq16wb : VLD1QDUPWB<{0,1,1,?}, "16">;
+defm VLD1DUPq32wb : VLD1QDUPWB<{1,0,1,?}, "32">;
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
-class VLD2DUP<bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2),
+class VLD2DUP<bits<4> op7_4, string Dt, RegisterOperand VdTy>
+ : NLdSt<1, 0b10, 0b1101, op7_4, (outs VdTy:$Vd),
(ins addrmode6dup:$Rn), IIC_VLD2dup,
- "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
+ "vld2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVLD2DupInstruction";
}
-def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8">;
-def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16">;
-def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32">;
-
-def VLD2DUPd8Pseudo : VLDQPseudo<IIC_VLD2dup>;
-def VLD2DUPd16Pseudo : VLDQPseudo<IIC_VLD2dup>;
-def VLD2DUPd32Pseudo : VLDQPseudo<IIC_VLD2dup>;
+def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8", VecListDPairAllLanes>;
+def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16", VecListDPairAllLanes>;
+def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32", VecListDPairAllLanes>;
-// ...with double-spaced registers (not used for codegen):
-def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8">;
-def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16">;
-def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32">;
+// ...with double-spaced registers
+def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8", VecListDPairSpacedAllLanes>;
+def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
+def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
// ...with address register writeback:
-class VLD2DUPWB<bits<4> op7_4, string Dt>
- : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
- (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD2dupu,
- "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVLD2DupInstruction";
+multiclass VLD2DUPWB<bits<4> op7_4, string Dt, RegisterOperand VdTy> {
+ def _fixed : NLdSt<1, 0b10, 0b1101, op7_4,
+ (outs VdTy:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn), IIC_VLD2dupu,
+ "vld2", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD2DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbFixed";
+ }
+ def _register : NLdSt<1, 0b10, 0b1101, op7_4,
+ (outs VdTy:$Vd, GPR:$wb),
+ (ins addrmode6dup:$Rn, rGPR:$Rm), IIC_VLD2dupu,
+ "vld2", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVLD2DupInstruction";
+ let AsmMatchConverter = "cvtVLDwbRegister";
+ }
}
-def VLD2DUPd8_UPD : VLD2DUPWB<{0,0,0,0}, "8">;
-def VLD2DUPd16_UPD : VLD2DUPWB<{0,1,0,?}, "16">;
-def VLD2DUPd32_UPD : VLD2DUPWB<{1,0,0,?}, "32">;
-
-def VLD2DUPd8x2_UPD : VLD2DUPWB<{0,0,1,0}, "8">;
-def VLD2DUPd16x2_UPD : VLD2DUPWB<{0,1,1,?}, "16">;
-def VLD2DUPd32x2_UPD : VLD2DUPWB<{1,0,1,?}, "32">;
+defm VLD2DUPd8wb : VLD2DUPWB<{0,0,0,0}, "8", VecListDPairAllLanes>;
+defm VLD2DUPd16wb : VLD2DUPWB<{0,1,0,?}, "16", VecListDPairAllLanes>;
+defm VLD2DUPd32wb : VLD2DUPWB<{1,0,0,?}, "32", VecListDPairAllLanes>;
-def VLD2DUPd8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
-def VLD2DUPd16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
-def VLD2DUPd32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
+defm VLD2DUPd8x2wb : VLD2DUPWB<{0,0,1,0}, "8", VecListDPairSpacedAllLanes>;
+defm VLD2DUPd16x2wb : VLD2DUPWB<{0,1,1,?}, "16", VecListDPairSpacedAllLanes>;
+defm VLD2DUPd32x2wb : VLD2DUPWB<{1,0,1,?}, "32", VecListDPairSpacedAllLanes>;
// VLD3DUP : Vector Load (single 3-element structure to all lanes)
class VLD3DUP<bits<4> op7_4, string Dt>
@@ -1008,9 +1424,9 @@ def VLD3DUPd16Pseudo : VLDQQPseudo<IIC_VLD3dup>;
def VLD3DUPd32Pseudo : VLDQQPseudo<IIC_VLD3dup>;
// ...with double-spaced registers (not used for codegen):
-def VLD3DUPd8x2 : VLD3DUP<{0,0,1,?}, "8">;
-def VLD3DUPd16x2 : VLD3DUP<{0,1,1,?}, "16">;
-def VLD3DUPd32x2 : VLD3DUP<{1,0,1,?}, "32">;
+def VLD3DUPq8 : VLD3DUP<{0,0,1,?}, "8">;
+def VLD3DUPq16 : VLD3DUP<{0,1,1,?}, "16">;
+def VLD3DUPq32 : VLD3DUP<{1,0,1,?}, "32">;
// ...with address register writeback:
class VLD3DUPWB<bits<4> op7_4, string Dt>
@@ -1026,9 +1442,9 @@ def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
-def VLD3DUPd8x2_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
-def VLD3DUPd16x2_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
-def VLD3DUPd32x2_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
+def VLD3DUPq8_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
+def VLD3DUPq16_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
+def VLD3DUPq32_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
@@ -1054,9 +1470,9 @@ def VLD4DUPd16Pseudo : VLDQQPseudo<IIC_VLD4dup>;
def VLD4DUPd32Pseudo : VLDQQPseudo<IIC_VLD4dup>;
// ...with double-spaced registers (not used for codegen):
-def VLD4DUPd8x2 : VLD4DUP<{0,0,1,?}, "8">;
-def VLD4DUPd16x2 : VLD4DUP<{0,1,1,?}, "16">;
-def VLD4DUPd32x2 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
+def VLD4DUPq8 : VLD4DUP<{0,0,1,?}, "8">;
+def VLD4DUPq16 : VLD4DUP<{0,1,1,?}, "16">;
+def VLD4DUPq32 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
// ...with address register writeback:
class VLD4DUPWB<bits<4> op7_4, string Dt>
@@ -1073,9 +1489,9 @@ def VLD4DUPd8_UPD : VLD4DUPWB<{0,0,0,0}, "8">;
def VLD4DUPd16_UPD : VLD4DUPWB<{0,1,0,?}, "16">;
def VLD4DUPd32_UPD : VLD4DUPWB<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
-def VLD4DUPd8x2_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
-def VLD4DUPd16x2_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
-def VLD4DUPd32x2_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
+def VLD4DUPq8_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
+def VLD4DUPq16_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
+def VLD4DUPq32_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
@@ -1093,12 +1509,29 @@ class VSTQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs GPR:$wb),
(ins addrmode6:$addr, am6offset:$offset, QPR:$src), itin,
"$addr.addr = $wb">;
+class VSTQWBfixedPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, QPR:$src), itin,
+ "$addr.addr = $wb">;
+class VSTQWBregisterPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, rGPR:$offset, QPR:$src), itin,
+ "$addr.addr = $wb">;
class VSTQQPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), itin, "">;
class VSTQQWBPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs GPR:$wb),
(ins addrmode6:$addr, am6offset:$offset, QQPR:$src), itin,
"$addr.addr = $wb">;
+class VSTQQWBfixedPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, QQPR:$src), itin,
+ "$addr.addr = $wb">;
+class VSTQQWBregisterPseudo<InstrItinClass itin>
+ : PseudoNLdSt<(outs GPR:$wb),
+ (ins addrmode6:$addr, rGPR:$offset, QQPR:$src), itin,
+ "$addr.addr = $wb">;
+
class VSTQQQQPseudo<InstrItinClass itin>
: PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src), itin, "">;
class VSTQQQQWBPseudo<InstrItinClass itin>
@@ -1108,16 +1541,15 @@ class VSTQQQQWBPseudo<InstrItinClass itin>
// VST1 : Vector Store (multiple single elements)
class VST1D<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, DPR:$Vd),
- IIC_VST1, "vst1", Dt, "\\{$Vd\\}, $Rn", "", []> {
+ : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
+ IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVSTInstruction";
}
class VST1Q<bits<4> op7_4, string Dt>
- : NLdSt<0,0b00,0b1010,op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2), IIC_VST1x2,
- "vst1", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
+ : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListDPair:$Vd),
+ IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVSTInstruction";
@@ -1133,185 +1565,233 @@ def VST1q16 : VST1Q<{0,1,?,?}, "16">;
def VST1q32 : VST1Q<{1,0,?,?}, "32">;
def VST1q64 : VST1Q<{1,1,?,?}, "64">;
-def VST1q8Pseudo : VSTQPseudo<IIC_VST1x2>;
-def VST1q16Pseudo : VSTQPseudo<IIC_VST1x2>;
-def VST1q32Pseudo : VSTQPseudo<IIC_VST1x2>;
-def VST1q64Pseudo : VSTQPseudo<IIC_VST1x2>;
-
// ...with address register writeback:
-class VST1DWB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b0111, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd), IIC_VST1u,
- "vst1", Dt, "\\{$Vd\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST1DWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b00, 0b0111,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VecListOneD:$Vd), IIC_VLD1u,
+ "vst1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
+ IIC_VLD1u,
+ "vst1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{4} = Rn{4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-class VST1QWB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b1010, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
- IIC_VST1x2u, "vst1", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST1QWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VecListDPair:$Vd), IIC_VLD1x2u,
+ "vst1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VecListDPair:$Vd),
+ IIC_VLD1x2u,
+ "vst1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-def VST1d8_UPD : VST1DWB<{0,0,0,?}, "8">;
-def VST1d16_UPD : VST1DWB<{0,1,0,?}, "16">;
-def VST1d32_UPD : VST1DWB<{1,0,0,?}, "32">;
-def VST1d64_UPD : VST1DWB<{1,1,0,?}, "64">;
-
-def VST1q8_UPD : VST1QWB<{0,0,?,?}, "8">;
-def VST1q16_UPD : VST1QWB<{0,1,?,?}, "16">;
-def VST1q32_UPD : VST1QWB<{1,0,?,?}, "32">;
-def VST1q64_UPD : VST1QWB<{1,1,?,?}, "64">;
+defm VST1d8wb : VST1DWB<{0,0,0,?}, "8">;
+defm VST1d16wb : VST1DWB<{0,1,0,?}, "16">;
+defm VST1d32wb : VST1DWB<{1,0,0,?}, "32">;
+defm VST1d64wb : VST1DWB<{1,1,0,?}, "64">;
-def VST1q8Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
-def VST1q16Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
-def VST1q32Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
-def VST1q64Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
+defm VST1q8wb : VST1QWB<{0,0,?,?}, "8">;
+defm VST1q16wb : VST1QWB<{0,1,?,?}, "16">;
+defm VST1q32wb : VST1QWB<{1,0,?,?}, "32">;
+defm VST1q64wb : VST1QWB<{1,1,?,?}, "64">;
-// ...with 3 registers (some of these are only for the disassembler):
+// ...with 3 registers
class VST1D3<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0110, op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3),
- IIC_VST1x3, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
+ (ins addrmode6:$Rn, VecListThreeD:$Vd),
+ IIC_VST1x3, "vst1", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVSTInstruction";
}
-class VST1D3WB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b0110, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm,
- DPR:$Vd, DPR:$src2, DPR:$src3),
- IIC_VST1x3u, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{4} = Rn{4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST1D3WB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
+ "vst1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
+ IIC_VLD1x3u,
+ "vst1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-def VST1d8T : VST1D3<{0,0,0,?}, "8">;
-def VST1d16T : VST1D3<{0,1,0,?}, "16">;
-def VST1d32T : VST1D3<{1,0,0,?}, "32">;
-def VST1d64T : VST1D3<{1,1,0,?}, "64">;
+def VST1d8T : VST1D3<{0,0,0,?}, "8">;
+def VST1d16T : VST1D3<{0,1,0,?}, "16">;
+def VST1d32T : VST1D3<{1,0,0,?}, "32">;
+def VST1d64T : VST1D3<{1,1,0,?}, "64">;
-def VST1d8T_UPD : VST1D3WB<{0,0,0,?}, "8">;
-def VST1d16T_UPD : VST1D3WB<{0,1,0,?}, "16">;
-def VST1d32T_UPD : VST1D3WB<{1,0,0,?}, "32">;
-def VST1d64T_UPD : VST1D3WB<{1,1,0,?}, "64">;
+defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8">;
+defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16">;
+defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32">;
+defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64">;
-def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
-def VST1d64TPseudo_UPD : VSTQQWBPseudo<IIC_VST1x3u>;
+def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
+def VST1d64TPseudoWB_fixed : VSTQQWBPseudo<IIC_VST1x3u>;
+def VST1d64TPseudoWB_register : VSTQQWBPseudo<IIC_VST1x3u>;
-// ...with 4 registers (some of these are only for the disassembler):
+// ...with 4 registers
class VST1D4<bits<4> op7_4, string Dt>
: NLdSt<0, 0b00, 0b0010, op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST1x4, "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn", "",
+ (ins addrmode6:$Rn, VecListFourD:$Vd),
+ IIC_VST1x4, "vst1", Dt, "$Vd, $Rn", "",
[]> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVSTInstruction";
}
-class VST1D4WB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b0010, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm,
- DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST1x4u,
- "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST1D4WB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
+ "vst1", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ IIC_VLD1x4u,
+ "vst1", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
-def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
-def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
-def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
+def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
+def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
+def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
+def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
-def VST1d8Q_UPD : VST1D4WB<{0,0,?,?}, "8">;
-def VST1d16Q_UPD : VST1D4WB<{0,1,?,?}, "16">;
-def VST1d32Q_UPD : VST1D4WB<{1,0,?,?}, "32">;
-def VST1d64Q_UPD : VST1D4WB<{1,1,?,?}, "64">;
+defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8">;
+defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16">;
+defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32">;
+defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64">;
-def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
-def VST1d64QPseudo_UPD : VSTQQWBPseudo<IIC_VST1x4u>;
+def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
+def VST1d64QPseudoWB_fixed : VSTQQWBPseudo<IIC_VST1x4u>;
+def VST1d64QPseudoWB_register : VSTQQWBPseudo<IIC_VST1x4u>;
// VST2 : Vector Store (multiple 2-element structures)
-class VST2D<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2),
- IIC_VST2, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
- let Rm = 0b1111;
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
-}
-class VST2Q<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b0011, op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST2x2, "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
- "", []> {
+class VST2<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy,
+ InstrItinClass itin>
+ : NLdSt<0, 0b00, op11_8, op7_4, (outs), (ins addrmode6:$Rn, VdTy:$Vd),
+ itin, "vst2", Dt, "$Vd, $Rn", "", []> {
let Rm = 0b1111;
let Inst{5-4} = Rn{5-4};
let DecoderMethod = "DecodeVSTInstruction";
}
-def VST2d8 : VST2D<0b1000, {0,0,?,?}, "8">;
-def VST2d16 : VST2D<0b1000, {0,1,?,?}, "16">;
-def VST2d32 : VST2D<0b1000, {1,0,?,?}, "32">;
+def VST2d8 : VST2<0b1000, {0,0,?,?}, "8", VecListDPair, IIC_VST2>;
+def VST2d16 : VST2<0b1000, {0,1,?,?}, "16", VecListDPair, IIC_VST2>;
+def VST2d32 : VST2<0b1000, {1,0,?,?}, "32", VecListDPair, IIC_VST2>;
-def VST2q8 : VST2Q<{0,0,?,?}, "8">;
-def VST2q16 : VST2Q<{0,1,?,?}, "16">;
-def VST2q32 : VST2Q<{1,0,?,?}, "32">;
-
-def VST2d8Pseudo : VSTQPseudo<IIC_VST2>;
-def VST2d16Pseudo : VSTQPseudo<IIC_VST2>;
-def VST2d32Pseudo : VSTQPseudo<IIC_VST2>;
+def VST2q8 : VST2<0b0011, {0,0,?,?}, "8", VecListFourD, IIC_VST2x2>;
+def VST2q16 : VST2<0b0011, {0,1,?,?}, "16", VecListFourD, IIC_VST2x2>;
+def VST2q32 : VST2<0b0011, {1,0,?,?}, "32", VecListFourD, IIC_VST2x2>;
def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
// ...with address register writeback:
-class VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
- IIC_VST2u, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt,
+ RegisterOperand VdTy> {
+ def _fixed : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VdTy:$Vd), IIC_VLD1u,
+ "vst2", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VdTy:$Vd), IIC_VLD1u,
+ "vst2", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-class VST2QWB<bits<4> op7_4, string Dt>
- : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm,
- DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST2x2u,
- "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
- "$Rn.addr = $wb", []> {
- let Inst{5-4} = Rn{5-4};
- let DecoderMethod = "DecodeVSTInstruction";
+multiclass VST2QWB<bits<4> op7_4, string Dt> {
+ def _fixed : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1u,
+ "vst2", Dt, "$Vd, $Rn!",
+ "$Rn.addr = $wb", []> {
+ let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbFixed";
+ }
+ def _register : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
+ (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
+ IIC_VLD1u,
+ "vst2", Dt, "$Vd, $Rn, $Rm",
+ "$Rn.addr = $wb", []> {
+ let Inst{5-4} = Rn{5-4};
+ let DecoderMethod = "DecodeVSTInstruction";
+ let AsmMatchConverter = "cvtVSTwbRegister";
+ }
}
-def VST2d8_UPD : VST2DWB<0b1000, {0,0,?,?}, "8">;
-def VST2d16_UPD : VST2DWB<0b1000, {0,1,?,?}, "16">;
-def VST2d32_UPD : VST2DWB<0b1000, {1,0,?,?}, "32">;
+defm VST2d8wb : VST2DWB<0b1000, {0,0,?,?}, "8", VecListDPair>;
+defm VST2d16wb : VST2DWB<0b1000, {0,1,?,?}, "16", VecListDPair>;
+defm VST2d32wb : VST2DWB<0b1000, {1,0,?,?}, "32", VecListDPair>;
-def VST2q8_UPD : VST2QWB<{0,0,?,?}, "8">;
-def VST2q16_UPD : VST2QWB<{0,1,?,?}, "16">;
-def VST2q32_UPD : VST2QWB<{1,0,?,?}, "32">;
+defm VST2q8wb : VST2QWB<{0,0,?,?}, "8">;
+defm VST2q16wb : VST2QWB<{0,1,?,?}, "16">;
+defm VST2q32wb : VST2QWB<{1,0,?,?}, "32">;
-def VST2d8Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
-def VST2d16Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
-def VST2d32Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
+def VST2q8PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
+def VST2q16PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
+def VST2q32PseudoWB_fixed : VSTQQWBfixedPseudo<IIC_VST2x2u>;
+def VST2q8PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
+def VST2q16PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
+def VST2q32PseudoWB_register : VSTQQWBregisterPseudo<IIC_VST2x2u>;
-def VST2q8Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
-def VST2q16Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
-def VST2q32Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
-
-// ...with double-spaced registers (for disassembly only):
-def VST2b8 : VST2D<0b1001, {0,0,?,?}, "8">;
-def VST2b16 : VST2D<0b1001, {0,1,?,?}, "16">;
-def VST2b32 : VST2D<0b1001, {1,0,?,?}, "32">;
-def VST2b8_UPD : VST2DWB<0b1001, {0,0,?,?}, "8">;
-def VST2b16_UPD : VST2DWB<0b1001, {0,1,?,?}, "16">;
-def VST2b32_UPD : VST2DWB<0b1001, {1,0,?,?}, "32">;
+// ...with double-spaced registers
+def VST2b8 : VST2<0b1001, {0,0,?,?}, "8", VecListDPairSpaced, IIC_VST2>;
+def VST2b16 : VST2<0b1001, {0,1,?,?}, "16", VecListDPairSpaced, IIC_VST2>;
+def VST2b32 : VST2<0b1001, {1,0,?,?}, "32", VecListDPairSpaced, IIC_VST2>;
+defm VST2b8wb : VST2DWB<0b1001, {0,0,?,?}, "8", VecListDPairSpaced>;
+defm VST2b16wb : VST2DWB<0b1001, {0,1,?,?}, "16", VecListDPairSpaced>;
+defm VST2b32wb : VST2DWB<0b1001, {1,0,?,?}, "32", VecListDPairSpaced>;
// VST3 : Vector Store (multiple 3-element structures)
class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
@@ -1458,20 +1938,11 @@ class VSTQQQQLNWBPseudo<InstrItinClass itin>
// VST1LN : Vector Store (single element from one lane)
class VST1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
- PatFrag StoreOp, SDNode ExtractOp>
- : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6:$Rn, DPR:$Vd, nohash_imm:$lane),
- IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
- [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
- let Rm = 0b1111;
- let DecoderMethod = "DecodeVST1LN";
-}
-class VST1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
- PatFrag StoreOp, SDNode ExtractOp>
+ PatFrag StoreOp, SDNode ExtractOp, Operand AddrMode>
: NLdStLn<1, 0b00, op11_8, op7_4, (outs),
- (ins addrmode6oneL32:$Rn, DPR:$Vd, nohash_imm:$lane),
+ (ins AddrMode:$Rn, DPR:$Vd, nohash_imm:$lane),
IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
- [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6oneL32:$Rn)]>{
+ [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), AddrMode:$Rn)]> {
let Rm = 0b1111;
let DecoderMethod = "DecodeVST1LN";
}
@@ -1482,16 +1953,17 @@ class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
}
def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
- NEONvgetlaneu> {
+ NEONvgetlaneu, addrmode6> {
let Inst{7-5} = lane{2-0};
}
def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
- NEONvgetlaneu> {
+ NEONvgetlaneu, addrmode6> {
let Inst{7-6} = lane{1-0};
let Inst{4} = Rn{5};
}
-def VST1LNd32 : VST1LN32<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt> {
+def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt,
+ addrmode6oneL32> {
let Inst{7} = lane{0};
let Inst{5-4} = Rn{5-4};
}
@@ -1507,14 +1979,14 @@ def : Pat<(store (extractelt (v4f32 QPR:$src), imm:$lane), addrmode6:$addr),
// ...with address register writeback:
class VST1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
- PatFrag StoreOp, SDNode ExtractOp>
+ PatFrag StoreOp, SDNode ExtractOp, Operand AdrMode>
: NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$Rn, am6offset:$Rm,
+ (ins AdrMode:$Rn, am6offset:$Rm,
DPR:$Vd, nohash_imm:$lane), IIC_VST1lnu, "vst1", Dt,
"\\{$Vd[$lane]\\}, $Rn$Rm",
"$Rn.addr = $wb",
[(set GPR:$wb, (StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane),
- addrmode6:$Rn, am6offset:$Rm))]> {
+ AdrMode:$Rn, am6offset:$Rm))]> {
let DecoderMethod = "DecodeVST1LN";
}
class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
@@ -1524,16 +1996,16 @@ class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
}
def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8,
- NEONvgetlaneu> {
+ NEONvgetlaneu, addrmode6> {
let Inst{7-5} = lane{2-0};
}
def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16,
- NEONvgetlaneu> {
+ NEONvgetlaneu, addrmode6> {
let Inst{7-6} = lane{1-0};
let Inst{4} = Rn{5};
}
def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store,
- extractelt> {
+ extractelt, addrmode6oneL32> {
let Inst{7} = lane{0};
let Inst{5-4} = Rn{5-4};
}
@@ -1585,10 +2057,10 @@ def VST2LNq32Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
// ...with address register writeback:
class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
: NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, am6offset:$offset,
- DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
- "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
- "$addr.addr = $wb", []> {
+ (ins addrmode6:$Rn, am6offset:$Rm,
+ DPR:$Vd, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
+ "\\{$Vd[$lane], $src2[$lane]\\}, $Rn$Rm",
+ "$Rn.addr = $wb", []> {
let Inst{4} = Rn{4};
let DecoderMethod = "DecodeVST2LN";
}
@@ -1914,8 +2386,8 @@ class N3VDSL<bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType Ty, SDNode ShOp>
: N3VLane32<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
@@ -1924,8 +2396,8 @@ class N3VDSL<bits<2> op21_20, bits<4> op11_8,
class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
: N3VLane16<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm[$lane]","",
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane","",
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
@@ -1954,8 +2426,8 @@ class N3VQSL<bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode ShOp>
: N3VLane32<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
@@ -1965,8 +2437,8 @@ class N3VQSL<bits<2> op21_20, bits<4> op11_8,
class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode ShOp>
: N3VLane16<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm[$lane]","",
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_8:$Vm),
@@ -1987,8 +2459,8 @@ class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
: N3VLane32<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (Ty DPR:$Vd),
(Ty (IntOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
@@ -1998,8 +2470,8 @@ class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
: N3VLane16<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (Ty DPR:$Vd),
(Ty (IntOp (Ty DPR:$Vn),
(Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
@@ -2028,8 +2500,8 @@ class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane32<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
@@ -2040,8 +2512,8 @@ class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane16<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$Vn),
(ResTy (NEONvduplane (OpTy DPR_8:$Vm),
@@ -2073,9 +2545,9 @@ class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
ValueType Ty, SDPatternOperator MulOp, SDPatternOperator ShOp>
: N3VLane32<0, 1, op21_20, op11_8, 1, 0,
(outs DPR:$Vd),
- (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$src1),
(Ty (MulOp DPR:$Vn,
@@ -2086,9 +2558,9 @@ class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
ValueType Ty, SDNode MulOp, SDNode ShOp>
: N3VLane16<0, 1, op21_20, op11_8, 1, 0,
(outs DPR:$Vd),
- (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (Ty DPR:$Vd),
(Ty (ShOp (Ty DPR:$src1),
(Ty (MulOp DPR:$Vn,
@@ -2108,9 +2580,9 @@ class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
SDPatternOperator MulOp, SDPatternOperator ShOp>
: N3VLane32<1, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
- (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$src1),
(ResTy (MulOp QPR:$Vn,
@@ -2122,9 +2594,9 @@ class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
SDNode MulOp, SDNode ShOp>
: N3VLane16<1, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
- (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (ResTy QPR:$Vd),
(ResTy (ShOp (ResTy QPR:$src1),
(ResTy (MulOp QPR:$Vn,
@@ -2182,9 +2654,9 @@ class N3VLMulOpSL<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
- (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set QPR:$Vd,
(OpNode (TyQ QPR:$src1),
(TyQ (MulOp (TyD DPR:$Vn),
@@ -2194,9 +2666,9 @@ class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
- (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set QPR:$Vd,
(OpNode (TyQ QPR:$src1),
(TyQ (MulOp (TyD DPR:$Vn),
@@ -2230,9 +2702,9 @@ class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
- (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$src1),
(OpTy DPR:$Vn),
@@ -2243,9 +2715,9 @@ class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
(outs QPR:$Vd),
- (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
+ (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
NVMulSLFrm, itin,
- OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "$src1 = $Vd",
+ OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (ResTy QPR:$src1),
(OpTy DPR:$Vn),
@@ -2277,8 +2749,8 @@ class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set QPR:$Vd,
(TyQ (OpNode (TyD DPR:$Vn),
(TyD (NEONvduplane (TyD DPR_VFP2:$Vm),imm:$lane)))))]>;
@@ -2286,8 +2758,8 @@ class N3VLSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType TyQ, ValueType TyD, SDNode OpNode>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set QPR:$Vd,
(TyQ (OpNode (TyD DPR:$Vn),
(TyD (NEONvduplane (TyD DPR_8:$Vm), imm:$lane)))))]>;
@@ -2332,8 +2804,8 @@ class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (OpTy DPR:$Vn),
(OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
@@ -2342,8 +2814,8 @@ class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
: N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, nohash_imm:$lane),
- NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm[$lane]", "",
+ (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
+ NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
[(set (ResTy QPR:$Vd),
(ResTy (IntOp (OpTy DPR:$Vn),
(OpTy (NEONvduplane (OpTy DPR_8:$Vm),
@@ -2417,9 +2889,9 @@ class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
// Long shift by immediate.
class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode>
+ ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
: N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs QPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), N2RegVShLFrm,
+ (outs QPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), N2RegVShLFrm,
IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
[(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
(i32 imm:$SIMM))))]>;
@@ -2649,14 +3121,11 @@ multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
v4i32, v4i32, OpNode, Commutable>;
}
-multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, string Dt, SDNode ShOp> {
- def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
- v4i16, ShOp>;
- def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, !strconcat(Dt,"32"),
- v2i32, ShOp>;
- def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
- v8i16, v4i16, ShOp>;
- def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, !strconcat(Dt,"32"),
+multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, SDNode ShOp> {
+ def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, "i16", v4i16, ShOp>;
+ def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, "i32", v2i32, ShOp>;
+ def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, "i16", v8i16, v4i16, ShOp>;
+ def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, "i32",
v4i32, v2i32, ShOp>;
}
@@ -3165,7 +3634,7 @@ multiclass N2VShL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
}
multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
- SDNode OpNode> {
+ string baseOpc, SDNode OpNode> {
// 64-bit vector types.
def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
@@ -3199,6 +3668,33 @@ multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
// imm6 = xxxxxx
+
+ // Aliases for two-operand forms (source and dest regs the same).
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "8 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i8"))
+ DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "16 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i16"))
+ DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "32 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v2i32"))
+ DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "64 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v1i64"))
+ DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
+
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "8 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v16i8"))
+ QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "16 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v8i16"))
+ QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "32 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v4i32"))
+ QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+ def : NEONInstAlias<!strconcat(OpcodeStr, "${p}.", Dt, "64 $Vdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "v2i64"))
+ QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
}
// Neon Shift-Accumulate vector operations,
@@ -3321,15 +3817,15 @@ multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode> {
+ OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, imm1_7, OpNode> {
let Inst{21-19} = 0b001; // imm6 = 001xxx
}
def v4i32 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode> {
+ OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, imm1_15, OpNode> {
let Inst{21-20} = 0b01; // imm6 = 01xxxx
}
def v2i64 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode> {
+ OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, imm1_31, OpNode> {
let Inst{21} = 0b1; // imm6 = 1xxxxx
}
}
@@ -3418,7 +3914,7 @@ def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VFMULD, "vmul", "f32",
v2f32, v2f32, fmul, 1>;
def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VFMULQ, "vmul", "f32",
v4f32, v4f32, fmul, 1>;
-defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
+defm VMULsl : N3VSL_HS<0b1000, "vmul", mul>;
def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
def VMULslfq : N3VQSL<0b10, 0b1001, IIC_VBINQ, "vmul", "f32", v4f32,
v2f32, fmul>;
@@ -3509,10 +4005,10 @@ defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
v2f32, fmul_su, fadd_mlx>,
- Requires<[HasNEON, UseFPVMLx]>;
+ Requires<[HasNEON, UseFPVMLx, DontUseFusedMAC]>;
def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
v4f32, fmul_su, fadd_mlx>,
- Requires<[HasNEON, UseFPVMLx]>;
+ Requires<[HasNEON, UseFPVMLx, DontUseFusedMAC]>;
defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
@@ -3567,10 +4063,10 @@ defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
v2f32, fmul_su, fsub_mlx>,
- Requires<[HasNEON, UseFPVMLx]>;
+ Requires<[HasNEON, UseFPVMLx, DontUseFusedMAC]>;
def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
v4f32, fmul_su, fsub_mlx>,
- Requires<[HasNEON, UseFPVMLx]>;
+ Requires<[HasNEON, UseFPVMLx, DontUseFusedMAC]>;
defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
@@ -3619,6 +4115,37 @@ defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, IIC_VMACi16D, IIC_VMACi32D,
"vqdmlsl", "s", int_arm_neon_vqdmlsl>;
defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
+// Fused Vector Multiply-Accumulate and Fused Multiply-Subtract Operations.
+def VFMAfd : N3VDMulOp<0, 0, 0b00, 0b1100, 1, IIC_VFMACD, "vfma", "f32",
+ v2f32, fmul_su, fadd_mlx>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+def VFMAfq : N3VQMulOp<0, 0, 0b00, 0b1100, 1, IIC_VFMACQ, "vfma", "f32",
+ v4f32, fmul_su, fadd_mlx>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+// Fused Vector Multiply Subtract (floating-point)
+def VFMSfd : N3VDMulOp<0, 0, 0b10, 0b1100, 1, IIC_VFMACD, "vfms", "f32",
+ v2f32, fmul_su, fsub_mlx>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+def VFMSfq : N3VQMulOp<0, 0, 0b10, 0b1100, 1, IIC_VFMACQ, "vfms", "f32",
+ v4f32, fmul_su, fsub_mlx>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+// Match @llvm.fma.* intrinsics
+def : Pat<(v2f32 (fma DPR:$src1, DPR:$Vn, DPR:$Vm)),
+ (VFMAfd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(v4f32 (fma QPR:$src1, QPR:$Vn, QPR:$Vm)),
+ (VFMAfq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(v2f32 (fma (fneg DPR:$src1), DPR:$Vn, DPR:$Vm)),
+ (VFMSfd DPR:$src1, DPR:$Vn, DPR:$Vm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(v4f32 (fma (fneg QPR:$src1), QPR:$Vn, QPR:$Vm)),
+ (VFMSfq QPR:$src1, QPR:$Vn, QPR:$Vm)>,
+ Requires<[HasVFP4]>;
+
// Vector Subtract Operations.
// VSUB : Vector Subtract (integer and floating-point)
@@ -3741,7 +4268,7 @@ def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
v4i32, v4i32, or, 1>;
def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
- (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
IIC_VMOVImm,
"vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
[(set DPR:$Vd,
@@ -3750,7 +4277,7 @@ def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
}
def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
- (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
IIC_VMOVImm,
"vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
[(set DPR:$Vd,
@@ -3759,7 +4286,7 @@ def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
}
def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
- (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
IIC_VMOVImm,
"vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
[(set QPR:$Vd,
@@ -3768,7 +4295,7 @@ def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
}
def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
- (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
IIC_VMOVImm,
"vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
[(set QPR:$Vd,
@@ -3790,7 +4317,7 @@ def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
(vnotq QPR:$Vm))))]>;
def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
- (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
IIC_VMOVImm,
"vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
[(set DPR:$Vd,
@@ -3799,7 +4326,7 @@ def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
}
def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
- (outs DPR:$Vd), (ins nModImm:$SIMM, DPR:$src),
+ (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
IIC_VMOVImm,
"vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
[(set DPR:$Vd,
@@ -3808,7 +4335,7 @@ def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
}
def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
- (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
IIC_VMOVImm,
"vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
[(set QPR:$Vd,
@@ -3817,7 +4344,7 @@ def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
}
def VBICiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 1, 1,
- (outs QPR:$Vd), (ins nModImm:$SIMM, QPR:$src),
+ (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
IIC_VMOVImm,
"vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
[(set QPR:$Vd,
@@ -3842,28 +4369,28 @@ def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$Vd),
let isReMaterializable = 1 in {
def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
"vmvn", "i16", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
let Inst{9} = SIMM{9};
}
def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
"vmvn", "i16", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
let Inst{9} = SIMM{9};
}
def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
"vmvn", "i32", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
let Inst{11-8} = SIMM{11-8};
}
def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
"vmvn", "i32", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
let Inst{11-8} = SIMM{11-8};
@@ -3912,12 +4439,12 @@ def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
(outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
N3RegFrm, IIC_VBINiD,
"vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
- [/* For disassembly only; pattern left blank */]>;
+ []>;
def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
(outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
N3RegFrm, IIC_VBINiQ,
"vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
- [/* For disassembly only; pattern left blank */]>;
+ []>;
// VBIT : Vector Bitwise Insert if True
// like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
@@ -3926,12 +4453,12 @@ def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
(outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
N3RegFrm, IIC_VBINiD,
"vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
- [/* For disassembly only; pattern left blank */]>;
+ []>;
def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
(outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
N3RegFrm, IIC_VBINiQ,
"vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
- [/* For disassembly only; pattern left blank */]>;
+ []>;
// VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
// for equivalent operations with different register constraints; it just
@@ -4119,8 +4646,10 @@ defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm,
defm VSHLi : N2VShL_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
// VSHR : Vector Shift Right (Immediate)
-defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s",NEONvshrs>;
-defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u",NEONvshru>;
+defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s", "VSHRs",
+ NEONvshrs>;
+defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", "VSHRu",
+ NEONvshru>;
// VSHLL : Vector Shift Left Long
defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
@@ -4129,18 +4658,18 @@ defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
// VSHLL : Vector Shift Left Long (with maximum shift count)
class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
- ValueType OpTy, SDNode OpNode>
+ ValueType OpTy, Operand ImmTy, SDNode OpNode>
: N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
- ResTy, OpTy, OpNode> {
+ ResTy, OpTy, ImmTy, OpNode> {
let Inst{21-16} = op21_16;
let DecoderMethod = "DecodeVSHLMaxInstruction";
}
def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
- v8i16, v8i8, NEONvshlli>;
+ v8i16, v8i8, imm8, NEONvshlli>;
def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
- v4i32, v4i16, NEONvshlli>;
+ v4i32, v4i16, imm16, NEONvshlli>;
def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
- v2i64, v2i32, NEONvshlli>;
+ v2i64, v2i32, imm32, NEONvshlli>;
// VSHRN : Vector Shift Right and Narrow
defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
@@ -4154,8 +4683,10 @@ defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm,
IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
"vrshl", "u", int_arm_neon_vrshiftu>;
// VRSHR : Vector Rounding Shift Right
-defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s",NEONvrshrs>;
-defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u",NEONvrshru>;
+defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s", "VRSHRs",
+ NEONvrshrs>;
+defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u", "VRSHRu",
+ NEONvrshru>;
// VRSHRN : Vector Rounding Shift Right and Narrow
defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
@@ -4298,13 +4829,15 @@ def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
IIC_VCNTiQ, "vcnt", "8",
v16i8, v16i8, int_arm_neon_vcnt>;
-// Vector Swap -- for disassembly only.
+// Vector Swap
def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
- (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
- "vswp", "$Vd, $Vm", "", []>;
+ (outs DPR:$Vd, DPR:$Vm), (ins DPR:$in1, DPR:$in2),
+ NoItinerary, "vswp", "$Vd, $Vm", "$in1 = $Vd, $in2 = $Vm",
+ []>;
def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
- (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
- "vswp", "$Vd, $Vm", "", []>;
+ (outs QPR:$Vd, QPR:$Vm), (ins QPR:$in1, QPR:$in2),
+ NoItinerary, "vswp", "$Vd, $Vm", "$in1 = $Vd, $in2 = $Vm",
+ []>;
// Vector Move Operations.
@@ -4318,89 +4851,98 @@ def : InstAlias<"vmov${p} $Vd, $Vm",
let isReMaterializable = 1 in {
def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
let Inst{9} = SIMM{9};
}
def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
let Inst{9} = SIMM{9};
}
def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
let Inst{11-8} = SIMM{11-8};
}
def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
let Inst{11-8} = SIMM{11-8};
}
def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$Vd, $SIMM", "",
[(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
- (ins nModImm:$SIMM), IIC_VMOVImm,
+ (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$Vd, $SIMM", "",
[(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
+
+def VMOVv2f32 : N1ModImm<1, 0b000, 0b1111, 0, 0, 0, 1, (outs DPR:$Vd),
+ (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
+ "vmov", "f32", "$Vd, $SIMM", "",
+ [(set DPR:$Vd, (v2f32 (NEONvmovFPImm timm:$SIMM)))]>;
+def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd),
+ (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
+ "vmov", "f32", "$Vd, $SIMM", "",
+ [(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>;
} // isReMaterializable
// VMOV : Vector Get Lane (move scalar to ARM core register)
def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
- (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s8", "$R, $V[$lane]",
+ (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
+ IIC_VMOVSI, "vmov", "s8", "$R, $V$lane",
[(set GPR:$R, (NEONvgetlanes (v8i8 DPR:$V),
imm:$lane))]> {
let Inst{21} = lane{2};
let Inst{6-5} = lane{1-0};
}
def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
- (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s16", "$R, $V[$lane]",
+ (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
+ IIC_VMOVSI, "vmov", "s16", "$R, $V$lane",
[(set GPR:$R, (NEONvgetlanes (v4i16 DPR:$V),
imm:$lane))]> {
let Inst{21} = lane{1};
let Inst{6} = lane{0};
}
def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
- (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u8", "$R, $V[$lane]",
+ (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
+ IIC_VMOVSI, "vmov", "u8", "$R, $V$lane",
[(set GPR:$R, (NEONvgetlaneu (v8i8 DPR:$V),
imm:$lane))]> {
let Inst{21} = lane{2};
let Inst{6-5} = lane{1-0};
}
def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
- (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u16", "$R, $V[$lane]",
+ (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
+ IIC_VMOVSI, "vmov", "u16", "$R, $V$lane",
[(set GPR:$R, (NEONvgetlaneu (v4i16 DPR:$V),
imm:$lane))]> {
let Inst{21} = lane{1};
let Inst{6} = lane{0};
}
def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
- (outs GPR:$R), (ins DPR:$V, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "32", "$R, $V[$lane]",
+ (outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane),
+ IIC_VMOVSI, "vmov", "32", "$R, $V$lane",
[(set GPR:$R, (extractelt (v2i32 DPR:$V),
imm:$lane))]> {
let Inst{21} = lane{0};
@@ -4442,24 +4984,24 @@ def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
let Constraints = "$src1 = $V" in {
def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$V),
- (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "8", "$V[$lane], $R",
+ (ins DPR:$src1, GPR:$R, VectorIndex8:$lane),
+ IIC_VMOVISL, "vmov", "8", "$V$lane, $R",
[(set DPR:$V, (vector_insert (v8i8 DPR:$src1),
GPR:$R, imm:$lane))]> {
let Inst{21} = lane{2};
let Inst{6-5} = lane{1-0};
}
def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$V),
- (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "16", "$V[$lane], $R",
+ (ins DPR:$src1, GPR:$R, VectorIndex16:$lane),
+ IIC_VMOVISL, "vmov", "16", "$V$lane, $R",
[(set DPR:$V, (vector_insert (v4i16 DPR:$src1),
GPR:$R, imm:$lane))]> {
let Inst{21} = lane{1};
let Inst{6} = lane{0};
}
def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
- (ins DPR:$src1, GPR:$R, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "32", "$V[$lane], $R",
+ (ins DPR:$src1, GPR:$R, VectorIndex32:$lane),
+ IIC_VMOVISL, "vmov", "32", "$V$lane, $R",
[(set DPR:$V, (insertelt (v2i32 DPR:$src1),
GPR:$R, imm:$lane))]> {
let Inst{21} = lane{0};
@@ -4627,6 +5169,9 @@ defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, IIC_VQUNAiD,
// VMOVL : Vector Lengthening Move
defm VMOVLs : N2VL_QHS<0b01,0b10100,0,1, "vmovl", "s", sext>;
defm VMOVLu : N2VL_QHS<0b11,0b10100,0,1, "vmovl", "u", zext>;
+def : Pat<(v8i16 (anyext (v8i8 DPR:$Vm))), (VMOVLuv8i16 DPR:$Vm)>;
+def : Pat<(v4i32 (anyext (v4i16 DPR:$Vm))), (VMOVLuv4i32 DPR:$Vm)>;
+def : Pat<(v2i64 (anyext (v2i32 DPR:$Vm))), (VMOVLuv2i64 DPR:$Vm)>;
// Vector Conversions.
@@ -4650,6 +5195,7 @@ def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
v4f32, v4i32, uint_to_fp>;
// VCVT : Vector Convert Between Floating-Point and Fixed-Point.
+let DecoderMethod = "DecodeVCVTD" in {
def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
def VCVTf2xud : N2VCvtD<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
@@ -4658,7 +5204,9 @@ def VCVTxs2fd : N2VCvtD<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
def VCVTxu2fd : N2VCvtD<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
+}
+let DecoderMethod = "DecodeVCVTQ" in {
def VCVTf2xsq : N2VCvtQ<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
def VCVTf2xuq : N2VCvtQ<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
@@ -4667,6 +5215,7 @@ def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
+}
// VCVT : Vector Convert Between Half-Precision and Single-Precision.
def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
@@ -4759,34 +5308,34 @@ def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
// VEXT : Vector Extract
-class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
+class VEXTd<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
: N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
- (ins DPR:$Vn, DPR:$Vm, i32imm:$index), NVExtFrm,
+ (ins DPR:$Vn, DPR:$Vm, immTy:$index), NVExtFrm,
IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
[(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
- (Ty DPR:$Vm), imm:$index)))]> {
+ (Ty DPR:$Vm), imm:$index)))]> {
bits<4> index;
let Inst{11-8} = index{3-0};
}
-class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
+class VEXTq<string OpcodeStr, string Dt, ValueType Ty, Operand immTy>
: N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$Vd),
- (ins QPR:$Vn, QPR:$Vm, i32imm:$index), NVExtFrm,
+ (ins QPR:$Vn, QPR:$Vm, imm0_15:$index), NVExtFrm,
IIC_VEXTQ, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
[(set QPR:$Vd, (Ty (NEONvext (Ty QPR:$Vn),
- (Ty QPR:$Vm), imm:$index)))]> {
+ (Ty QPR:$Vm), imm:$index)))]> {
bits<4> index;
let Inst{11-8} = index{3-0};
}
-def VEXTd8 : VEXTd<"vext", "8", v8i8> {
+def VEXTd8 : VEXTd<"vext", "8", v8i8, imm0_7> {
let Inst{11-8} = index{3-0};
}
-def VEXTd16 : VEXTd<"vext", "16", v4i16> {
+def VEXTd16 : VEXTd<"vext", "16", v4i16, imm0_3> {
let Inst{11-9} = index{2-0};
let Inst{8} = 0b0;
}
-def VEXTd32 : VEXTd<"vext", "32", v2i32> {
+def VEXTd32 : VEXTd<"vext", "32", v2i32, imm0_1> {
let Inst{11-10} = index{1-0};
let Inst{9-8} = 0b00;
}
@@ -4795,17 +5344,21 @@ def : Pat<(v2f32 (NEONvext (v2f32 DPR:$Vn),
(i32 imm:$index))),
(VEXTd32 DPR:$Vn, DPR:$Vm, imm:$index)>;
-def VEXTq8 : VEXTq<"vext", "8", v16i8> {
+def VEXTq8 : VEXTq<"vext", "8", v16i8, imm0_15> {
let Inst{11-8} = index{3-0};
}
-def VEXTq16 : VEXTq<"vext", "16", v8i16> {
+def VEXTq16 : VEXTq<"vext", "16", v8i16, imm0_7> {
let Inst{11-9} = index{2-0};
let Inst{8} = 0b0;
}
-def VEXTq32 : VEXTq<"vext", "32", v4i32> {
+def VEXTq32 : VEXTq<"vext", "32", v4i32, imm0_3> {
let Inst{11-10} = index{1-0};
let Inst{9-8} = 0b00;
}
+def VEXTq64 : VEXTq<"vext", "64", v2i64, imm0_1> {
+ let Inst{11} = index{0};
+ let Inst{10-8} = 0b000;
+}
def : Pat<(v4f32 (NEONvext (v4f32 QPR:$Vn),
(v4f32 QPR:$Vm),
(i32 imm:$index))),
@@ -4825,7 +5378,9 @@ def VTRNq32 : N2VQShuffle<0b10, 0b00001, IIC_VPERMQ, "vtrn", "32">;
def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp", "8">;
def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp", "16">;
-def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp", "32">;
+// vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
+def : NEONInstAlias<"vuzp${p}.32 $Dd, $Dm",
+ (VTRNd32 DPR:$Dd, DPR:$Dm, pred:$p)>;
def VUZPq8 : N2VQShuffle<0b00, 0b00010, IIC_VPERMQ3, "vuzp", "8">;
def VUZPq16 : N2VQShuffle<0b01, 0b00010, IIC_VPERMQ3, "vuzp", "16">;
@@ -4835,7 +5390,9 @@ def VUZPq32 : N2VQShuffle<0b10, 0b00010, IIC_VPERMQ3, "vuzp", "32">;
def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip", "8">;
def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip", "16">;
-def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip", "32">;
+// vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
+def : NEONInstAlias<"vzip${p}.32 $Dd, $Dm",
+ (VTRNd32 DPR:$Dd, DPR:$Dm, pred:$p)>;
def VZIPq8 : N2VQShuffle<0b00, 0b00011, IIC_VPERMQ3, "vzip", "8">;
def VZIPq16 : N2VQShuffle<0b01, 0b00011, IIC_VPERMQ3, "vzip", "16">;
@@ -4847,27 +5404,25 @@ def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
let DecoderMethod = "DecodeTBLInstruction" in {
def VTBL1
: N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
- (ins DPR:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
- "vtbl", "8", "$Vd, \\{$Vn\\}, $Vm", "",
- [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 DPR:$Vn, DPR:$Vm)))]>;
+ (ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
+ "vtbl", "8", "$Vd, $Vn, $Vm", "",
+ [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
let hasExtraSrcRegAllocReq = 1 in {
def VTBL2
: N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
- (ins DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTB2,
- "vtbl", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "", []>;
+ (ins VecListDPair:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB2,
+ "vtbl", "8", "$Vd, $Vn, $Vm", "", []>;
def VTBL3
: N3V<1,1,0b11,0b1010,0,0, (outs DPR:$Vd),
- (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm), NVTBLFrm, IIC_VTB3,
- "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm", "", []>;
+ (ins VecListThreeD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB3,
+ "vtbl", "8", "$Vd, $Vn, $Vm", "", []>;
def VTBL4
: N3V<1,1,0b11,0b1011,0,0, (outs DPR:$Vd),
- (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm),
+ (ins VecListFourD:$Vn, DPR:$Vm),
NVTBLFrm, IIC_VTB4,
- "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm", "", []>;
+ "vtbl", "8", "$Vd, $Vn, $Vm", "", []>;
} // hasExtraSrcRegAllocReq = 1
-def VTBL2Pseudo
- : PseudoNeonI<(outs DPR:$dst), (ins QPR:$tbl, DPR:$src), IIC_VTB2, "", []>;
def VTBL3Pseudo
: PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB3, "", []>;
def VTBL4Pseudo
@@ -4876,31 +5431,28 @@ def VTBL4Pseudo
// VTBX : Vector Table Extension
def VTBX1
: N3V<1,1,0b11,0b1000,1,0, (outs DPR:$Vd),
- (ins DPR:$orig, DPR:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
- "vtbx", "8", "$Vd, \\{$Vn\\}, $Vm", "$orig = $Vd",
+ (ins DPR:$orig, VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
+ "vtbx", "8", "$Vd, $Vn, $Vm", "$orig = $Vd",
[(set DPR:$Vd, (v8i8 (int_arm_neon_vtbx1
- DPR:$orig, DPR:$Vn, DPR:$Vm)))]>;
+ DPR:$orig, VecListOneD:$Vn, DPR:$Vm)))]>;
let hasExtraSrcRegAllocReq = 1 in {
def VTBX2
: N3V<1,1,0b11,0b1001,1,0, (outs DPR:$Vd),
- (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
- "vtbx", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "$orig = $Vd", []>;
+ (ins DPR:$orig, VecListDPair:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
+ "vtbx", "8", "$Vd, $Vn, $Vm", "$orig = $Vd", []>;
def VTBX3
: N3V<1,1,0b11,0b1010,1,0, (outs DPR:$Vd),
- (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm),
+ (ins DPR:$orig, VecListThreeD:$Vn, DPR:$Vm),
NVTBLFrm, IIC_VTBX3,
- "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm",
+ "vtbx", "8", "$Vd, $Vn, $Vm",
"$orig = $Vd", []>;
def VTBX4
- : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd), (ins DPR:$orig, DPR:$Vn,
- DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
- "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm",
+ : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd),
+ (ins DPR:$orig, VecListFourD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
+ "vtbx", "8", "$Vd, $Vn, $Vm",
"$orig = $Vd", []>;
} // hasExtraSrcRegAllocReq = 1
-def VTBX2Pseudo
- : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QPR:$tbl, DPR:$src),
- IIC_VTBX2, "$orig = $dst", []>;
def VTBX3Pseudo
: PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
IIC_VTBX3, "$orig = $dst", []>;
@@ -4950,9 +5502,13 @@ def : N3VSPat<fadd, VADDfd>;
def : N3VSPat<fsub, VSUBfd>;
def : N3VSPat<fmul, VMULfd>;
def : N3VSMulOpPat<fmul, fadd, VMLAfd>,
- Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
+ Requires<[HasNEON, UseNEONForFP, UseFPVMLx, DontUseFusedMAC]>;
def : N3VSMulOpPat<fmul, fsub, VMLSfd>,
- Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
+ Requires<[HasNEON, UseNEONForFP, UseFPVMLx, DontUseFusedMAC]>;
+def : N3VSMulOpPat<fmul, fadd, VFMAfd>,
+ Requires<[HasVFP4, UseNEONForFP, UseFusedMAC]>;
+def : N3VSMulOpPat<fmul, fsub, VFMSfd>,
+ Requires<[HasVFP4, UseNEONForFP, UseFusedMAC]>;
def : N2VSPat<fabs, VABSfd>;
def : N2VSPat<fneg, VNEGfd>;
def : N3VSPat<NEONfmax, VMAXfd>;
@@ -5028,3 +5584,1448 @@ def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
+
+// Vector lengthening move with load, matching extending loads.
+
+// extload, zextload and sextload for a standard lengthening load. Example:
+// Lengthen_Single<"8", "i16", "i8"> = Pat<(v8i16 (extloadvi8 addrmode5:$addr))
+// (VMOVLuv8i16 (VLDRD addrmode5:$addr))>;
+multiclass Lengthen_Single<string DestLanes, string DestTy, string SrcTy> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<Instruction>("VMOVLuv" # DestLanes # DestTy)
+ (VLDRD addrmode5:$addr))>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<Instruction>("VMOVLuv" # DestLanes # DestTy)
+ (VLDRD addrmode5:$addr))>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (!cast<Instruction>("VMOVLsv" # DestLanes # DestTy)
+ (VLDRD addrmode5:$addr))>;
+}
+
+// extload, zextload and sextload for a lengthening load which only uses
+// half the lanes available. Example:
+// Lengthen_HalfSingle<"4", "i16", "8", "i16", "i8"> =
+// Pat<(v4i16 (extloadvi8 addrmode5:$addr))
+// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+// (VLDRS addrmode5:$addr),
+// ssub_0)),
+// dsub_0)>;
+multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
+ string InsnLanes, string InsnTy> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # InsnLanes # InsnTy)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # InsnLanes # InsnTy)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)>;
+}
+
+// extload, zextload and sextload for a lengthening load followed by another
+// lengthening load, to quadruple the initial length.
+// Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0> =
+// Pat<(v4i32 (extloadvi8 addrmode5:$addr))
+// (EXTRACT_SUBREG (VMOVLuv4i32
+// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+// (VLDRS addrmode5:$addr),
+// ssub_0)),
+// dsub_0)),
+// qsub_0)>;
+multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy,
+ string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
+ string Insn2Ty, SubRegIndex RegType> {
+ def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
+ ssub_0)), dsub_0)),
+ RegType)>;
+ def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
+ ssub_0)), dsub_0)),
+ RegType)>;
+ def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
+ (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
+ (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
+ ssub_0)), dsub_0)),
+ RegType)>;
+}
+
+defm : Lengthen_Single<"8", "i16", "i8">; // v8i8 -> v8i16
+defm : Lengthen_Single<"4", "i32", "i16">; // v4i16 -> v4i32
+defm : Lengthen_Single<"2", "i64", "i32">; // v2i32 -> v2i64
+
+defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16
+defm : Lengthen_HalfSingle<"2", "i16", "i8", "8", "i16">; // v2i8 -> v2i16
+defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32
+
+// Double lengthening - v4i8 -> v4i16 -> v4i32
+defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0>;
+// v2i8 -> v2i16 -> v2i32
+defm : Lengthen_Double<"2", "i32", "i8", "8", "i16", "4", "i32", dsub_0>;
+// v2i16 -> v2i32 -> v2i64
+defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64", qsub_0>;
+
+// Triple lengthening - v2i8 -> v2i16 -> v2i32 -> v2i64
+def : Pat<(v2i64 (extloadvi8 addrmode5:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)), dsub_0))>;
+def : Pat<(v2i64 (zextloadvi8 addrmode5:$addr)),
+ (VMOVLuv2i64 (EXTRACT_SUBREG (VMOVLuv4i32 (EXTRACT_SUBREG (VMOVLuv8i16
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)), dsub_0))>;
+def : Pat<(v2i64 (sextloadvi8 addrmode5:$addr)),
+ (VMOVLsv2i64 (EXTRACT_SUBREG (VMOVLsv4i32 (EXTRACT_SUBREG (VMOVLsv8i16
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)),
+ dsub_0)), dsub_0))>;
+
+//===----------------------------------------------------------------------===//
+// Assembler aliases
+//
+
+def : VFP2InstAlias<"fmdhr${p} $Dd, $Rn",
+ (VSETLNi32 DPR:$Dd, GPR:$Rn, 1, pred:$p)>;
+def : VFP2InstAlias<"fmdlr${p} $Dd, $Rn",
+ (VSETLNi32 DPR:$Dd, GPR:$Rn, 0, pred:$p)>;
+
+
+// VADD two-operand aliases.
+def : NEONInstAlias<"vadd${p}.i8 $Vdn, $Vm",
+ (VADDv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i16 $Vdn, $Vm",
+ (VADDv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i32 $Vdn, $Vm",
+ (VADDv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i64 $Vdn, $Vm",
+ (VADDv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vadd${p}.i8 $Vdn, $Vm",
+ (VADDv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i16 $Vdn, $Vm",
+ (VADDv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i32 $Vdn, $Vm",
+ (VADDv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.i64 $Vdn, $Vm",
+ (VADDv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vadd${p}.f32 $Vdn, $Vm",
+ (VADDfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vadd${p}.f32 $Vdn, $Vm",
+ (VADDfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// VSUB two-operand aliases.
+def : NEONInstAlias<"vsub${p}.i8 $Vdn, $Vm",
+ (VSUBv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i16 $Vdn, $Vm",
+ (VSUBv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i32 $Vdn, $Vm",
+ (VSUBv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i64 $Vdn, $Vm",
+ (VSUBv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vsub${p}.i8 $Vdn, $Vm",
+ (VSUBv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i16 $Vdn, $Vm",
+ (VSUBv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i32 $Vdn, $Vm",
+ (VSUBv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.i64 $Vdn, $Vm",
+ (VSUBv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vsub${p}.f32 $Vdn, $Vm",
+ (VSUBfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vsub${p}.f32 $Vdn, $Vm",
+ (VSUBfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// VADDW two-operand aliases.
+def : NEONInstAlias<"vaddw${p}.s8 $Vdn, $Vm",
+ (VADDWsv8i16 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vaddw${p}.s16 $Vdn, $Vm",
+ (VADDWsv4i32 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vaddw${p}.s32 $Vdn, $Vm",
+ (VADDWsv2i64 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vaddw${p}.u8 $Vdn, $Vm",
+ (VADDWuv8i16 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vaddw${p}.u16 $Vdn, $Vm",
+ (VADDWuv4i32 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vaddw${p}.u32 $Vdn, $Vm",
+ (VADDWuv2i64 QPR:$Vdn, QPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+// VAND/VBIC/VEOR/VORR accept but do not require a type suffix.
+defm : NEONDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
+ (VANDq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbic${p}", "$Vd, $Vn, $Vm",
+ (VBICd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbic${p}", "$Vd, $Vn, $Vm",
+ (VBICq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
+ (VEORq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
+ (VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+// ... two-operand aliases
+def : NEONInstAlias<"vand${p} $Vdn, $Vm",
+ (VANDd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vand${p} $Vdn, $Vm",
+ (VANDq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vbic${p} $Vdn, $Vm",
+ (VBICd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vbic${p} $Vdn, $Vm",
+ (VBICq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"veor${p} $Vdn, $Vm",
+ (VEORd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"veor${p} $Vdn, $Vm",
+ (VEORq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vorr${p} $Vdn, $Vm",
+ (VORRd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vorr${p} $Vdn, $Vm",
+ (VORRq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+defm : NEONDTAnyInstAlias<"vand${p}", "$Vdn, $Vm",
+ (VANDd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vand${p}", "$Vdn, $Vm",
+ (VANDq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"veor${p}", "$Vdn, $Vm",
+ (VEORd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"veor${p}", "$Vdn, $Vm",
+ (VEORq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vorr${p}", "$Vdn, $Vm",
+ (VORRd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vorr${p}", "$Vdn, $Vm",
+ (VORRq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// VMUL two-operand aliases.
+def : NEONInstAlias<"vmul${p}.p8 $Qdn, $Qm",
+ (VMULpq QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i8 $Qdn, $Qm",
+ (VMULv16i8 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i16 $Qdn, $Qm",
+ (VMULv8i16 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i32 $Qdn, $Qm",
+ (VMULv4i32 QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
+
+def : NEONInstAlias<"vmul${p}.p8 $Ddn, $Dm",
+ (VMULpd DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i8 $Ddn, $Dm",
+ (VMULv8i8 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i16 $Ddn, $Dm",
+ (VMULv4i16 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i32 $Ddn, $Dm",
+ (VMULv2i32 DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
+
+def : NEONInstAlias<"vmul${p}.f32 $Qdn, $Qm",
+ (VMULfq QPR:$Qdn, QPR:$Qdn, QPR:$Qm, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.f32 $Ddn, $Dm",
+ (VMULfd DPR:$Ddn, DPR:$Ddn, DPR:$Dm, pred:$p)>;
+
+def : NEONInstAlias<"vmul${p}.i16 $Ddn, $Dm$lane",
+ (VMULslv4i16 DPR:$Ddn, DPR:$Ddn, DPR_8:$Dm,
+ VectorIndex16:$lane, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i16 $Qdn, $Dm$lane",
+ (VMULslv8i16 QPR:$Qdn, QPR:$Qdn, DPR_8:$Dm,
+ VectorIndex16:$lane, pred:$p)>;
+
+def : NEONInstAlias<"vmul${p}.i32 $Ddn, $Dm$lane",
+ (VMULslv2i32 DPR:$Ddn, DPR:$Ddn, DPR_VFP2:$Dm,
+ VectorIndex32:$lane, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.i32 $Qdn, $Dm$lane",
+ (VMULslv4i32 QPR:$Qdn, QPR:$Qdn, DPR_VFP2:$Dm,
+ VectorIndex32:$lane, pred:$p)>;
+
+def : NEONInstAlias<"vmul${p}.f32 $Ddn, $Dm$lane",
+ (VMULslfd DPR:$Ddn, DPR:$Ddn, DPR_VFP2:$Dm,
+ VectorIndex32:$lane, pred:$p)>;
+def : NEONInstAlias<"vmul${p}.f32 $Qdn, $Dm$lane",
+ (VMULslfq QPR:$Qdn, QPR:$Qdn, DPR_VFP2:$Dm,
+ VectorIndex32:$lane, pred:$p)>;
+
+// VQADD (register) two-operand aliases.
+def : NEONInstAlias<"vqadd${p}.s8 $Vdn, $Vm",
+ (VQADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s16 $Vdn, $Vm",
+ (VQADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s32 $Vdn, $Vm",
+ (VQADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s64 $Vdn, $Vm",
+ (VQADDsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u8 $Vdn, $Vm",
+ (VQADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u16 $Vdn, $Vm",
+ (VQADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u32 $Vdn, $Vm",
+ (VQADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u64 $Vdn, $Vm",
+ (VQADDuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vqadd${p}.s8 $Vdn, $Vm",
+ (VQADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s16 $Vdn, $Vm",
+ (VQADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s32 $Vdn, $Vm",
+ (VQADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.s64 $Vdn, $Vm",
+ (VQADDsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u8 $Vdn, $Vm",
+ (VQADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u16 $Vdn, $Vm",
+ (VQADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u32 $Vdn, $Vm",
+ (VQADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqadd${p}.u64 $Vdn, $Vm",
+ (VQADDuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// VSHL (immediate) two-operand aliases.
+def : NEONInstAlias<"vshl${p}.i8 $Vdn, $imm",
+ (VSHLiv8i8 DPR:$Vdn, DPR:$Vdn, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i16 $Vdn, $imm",
+ (VSHLiv4i16 DPR:$Vdn, DPR:$Vdn, imm0_15:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i32 $Vdn, $imm",
+ (VSHLiv2i32 DPR:$Vdn, DPR:$Vdn, imm0_31:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i64 $Vdn, $imm",
+ (VSHLiv1i64 DPR:$Vdn, DPR:$Vdn, imm0_63:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vshl${p}.i8 $Vdn, $imm",
+ (VSHLiv16i8 QPR:$Vdn, QPR:$Vdn, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i16 $Vdn, $imm",
+ (VSHLiv8i16 QPR:$Vdn, QPR:$Vdn, imm0_15:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i32 $Vdn, $imm",
+ (VSHLiv4i32 QPR:$Vdn, QPR:$Vdn, imm0_31:$imm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.i64 $Vdn, $imm",
+ (VSHLiv2i64 QPR:$Vdn, QPR:$Vdn, imm0_63:$imm, pred:$p)>;
+
+// VSHL (register) two-operand aliases.
+def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
+ (VSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
+ (VSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
+ (VSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
+ (VSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
+ (VSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
+ (VSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
+ (VSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
+ (VSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vshl${p}.s8 $Vdn, $Vm",
+ (VSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s16 $Vdn, $Vm",
+ (VSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s32 $Vdn, $Vm",
+ (VSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.s64 $Vdn, $Vm",
+ (VSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u8 $Vdn, $Vm",
+ (VSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u16 $Vdn, $Vm",
+ (VSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
+ (VSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
+ (VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// VSHL (immediate) two-operand aliases.
+def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
+ (VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
+ (VSHRsv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
+ (VSHRsv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
+ (VSHRsv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
+ (VSHRsv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
+ (VSHRsv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
+ (VSHRsv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
+ (VSHRsv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
+ (VSHRuv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
+ (VSHRuv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
+ (VSHRuv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
+ (VSHRuv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
+ (VSHRuv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
+ (VSHRuv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
+ (VSHRuv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
+ (VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
+
+// VLD1 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD1LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr!",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD1LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr!",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD1LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr!",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD1LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD1LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD1LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld1${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VST1 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VST1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST1LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST1LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VST1LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr!",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST1LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr!",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST1LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr!",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST1LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListOneDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST1LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListOneDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST1LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst1${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListOneDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VLD2 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD2LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr!",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr!",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD2LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD2LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD2LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD2LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD2LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld2${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VST2 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VST2LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VST2LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr!",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".16", "$list, $addr!",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr!",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST2LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListTwoDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST2LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
+ (ins VecListTwoDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST2LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListTwoDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST2LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".16","$list, $addr, $Rm",
+ (ins VecListTwoQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST2LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst2${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListTwoQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VLD3 all-lanes pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD3DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPdAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPdAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD3DUPdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD3DUPdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3DUPdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3DUPdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3DUPqWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3DUPqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3DUPqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VLD3 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD3LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD3LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VLD3 multiple structure pseudo-instructions. These need special handling for
+// the vector operands that the normal instructions don't yet model.
+// FIXME: Remove these when the register classes and instructions are updated.
+def VLD3dAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3dAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3dAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qAsm_8 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qAsm_16 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qAsm_32 : NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD3dWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3dWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3dWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD3qWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD3dWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3dWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3dWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3qWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3qWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD3qWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VST3 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VST3LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VST3LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST3LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VST3 multiple structure pseudo-instructions. These need special handling for
+// the vector operands that the normal instructions don't yet model.
+// FIXME: Remove these when the register classes and instructions are updated.
+def VST3dAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3dAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3dAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3qAsm_8 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VST3qAsm_16 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VST3qAsm_32 : NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+
+def VST3dWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3dWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3dWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeD:$list, addrmode6:$addr, pred:$p)>;
+def VST3qWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VST3qWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VST3qWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr!",
+ (ins VecListThreeQ:$list, addrmode6:$addr, pred:$p)>;
+def VST3dWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3dWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3dWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3qWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3qWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST3qWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst3${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListThreeQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VLD4 all-lanes pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD4DUPdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPdAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPdAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD4DUPdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr, pred:$p)>;
+def VLD4DUPdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4DUPdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4DUPdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourDAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4DUPqWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4DUPqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4DUPqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourQAllLanes:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VLD4 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VLD4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD4LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VLD4LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+
+// VLD4 multiple structure pseudo-instructions. These need special handling for
+// the vector operands that the normal instructions don't yet model.
+// FIXME: Remove these when the register classes and instructions are updated.
+def VLD4dAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4dAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4dAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qAsm_8 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qAsm_16 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qAsm_32 : NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+
+def VLD4dWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4dWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4dWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD4qWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VLD4dWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4dWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4dWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4qWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4qWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VLD4qWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vld4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VST4 single-lane pseudo-instructions. These need special handling for
+// the lane index that an InstAlias can't handle, so we use these instead.
+def VST4LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNdAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNdAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNqAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNqAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+
+def VST4LNdWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNdWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNdWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNqWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNqWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr, pred:$p)>;
+def VST4LNdWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourDByteIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4LNdWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourDHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4LNdWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourDWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4LNqWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourQHWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4LNqWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourQWordIndexed:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+
+// VST4 multiple structure pseudo-instructions. These need special handling for
+// the vector operands that the normal instructions don't yet model.
+// FIXME: Remove these when the register classes and instructions are updated.
+def VST4dAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4dAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4dAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4qAsm_8 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VST4qAsm_16 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VST4qAsm_32 : NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+
+def VST4dWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4dWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4dWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
+ (ins VecListFourD:$list, addrmode6:$addr, pred:$p)>;
+def VST4qWB_fixed_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VST4qWB_fixed_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VST4qWB_fixed_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr!",
+ (ins VecListFourQ:$list, addrmode6:$addr, pred:$p)>;
+def VST4dWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4dWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4dWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourD:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4qWB_register_Asm_8 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".8", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4qWB_register_Asm_16 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".16", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+def VST4qWB_register_Asm_32 :
+ NEONDataTypeAsmPseudoInst<"vst4${p}", ".32", "$list, $addr, $Rm",
+ (ins VecListFourQ:$list, addrmode6:$addr,
+ rGPR:$Rm, pred:$p)>;
+
+// VMOV takes an optional datatype suffix
+defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm",
+ (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vmov${p}", "$Vd, $Vm",
+ (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
+
+// VCLT (register) is an assembler alias for VCGT w/ the operands reversed.
+// D-register versions.
+def : NEONInstAlias<"vcle${p}.s8 $Dd, $Dn, $Dm",
+ (VCGEsv8i8 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.s16 $Dd, $Dn, $Dm",
+ (VCGEsv4i16 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.s32 $Dd, $Dn, $Dm",
+ (VCGEsv2i32 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u8 $Dd, $Dn, $Dm",
+ (VCGEuv8i8 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u16 $Dd, $Dn, $Dm",
+ (VCGEuv4i16 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u32 $Dd, $Dn, $Dm",
+ (VCGEuv2i32 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.f32 $Dd, $Dn, $Dm",
+ (VCGEfd DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+// Q-register versions.
+def : NEONInstAlias<"vcle${p}.s8 $Qd, $Qn, $Qm",
+ (VCGEsv16i8 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.s16 $Qd, $Qn, $Qm",
+ (VCGEsv8i16 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.s32 $Qd, $Qn, $Qm",
+ (VCGEsv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u8 $Qd, $Qn, $Qm",
+ (VCGEuv16i8 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u16 $Qd, $Qn, $Qm",
+ (VCGEuv8i16 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.u32 $Qd, $Qn, $Qm",
+ (VCGEuv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vcle${p}.f32 $Qd, $Qn, $Qm",
+ (VCGEfq QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+
+// VCLT (register) is an assembler alias for VCGT w/ the operands reversed.
+// D-register versions.
+def : NEONInstAlias<"vclt${p}.s8 $Dd, $Dn, $Dm",
+ (VCGTsv8i8 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.s16 $Dd, $Dn, $Dm",
+ (VCGTsv4i16 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.s32 $Dd, $Dn, $Dm",
+ (VCGTsv2i32 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u8 $Dd, $Dn, $Dm",
+ (VCGTuv8i8 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u16 $Dd, $Dn, $Dm",
+ (VCGTuv4i16 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u32 $Dd, $Dn, $Dm",
+ (VCGTuv2i32 DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.f32 $Dd, $Dn, $Dm",
+ (VCGTfd DPR:$Dd, DPR:$Dm, DPR:$Dn, pred:$p)>;
+// Q-register versions.
+def : NEONInstAlias<"vclt${p}.s8 $Qd, $Qn, $Qm",
+ (VCGTsv16i8 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.s16 $Qd, $Qn, $Qm",
+ (VCGTsv8i16 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.s32 $Qd, $Qn, $Qm",
+ (VCGTsv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u8 $Qd, $Qn, $Qm",
+ (VCGTuv16i8 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u16 $Qd, $Qn, $Qm",
+ (VCGTuv8i16 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.u32 $Qd, $Qn, $Qm",
+ (VCGTuv4i32 QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+def : NEONInstAlias<"vclt${p}.f32 $Qd, $Qn, $Qm",
+ (VCGTfq QPR:$Qd, QPR:$Qm, QPR:$Qn, pred:$p)>;
+
+// Two-operand variants for VEXT
+def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
+ (VEXTd8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
+ (VEXTd16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_3:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
+ (VEXTd32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, imm0_1:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vext${p}.8 $Vdn, $Vm, $imm",
+ (VEXTq8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_15:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.16 $Vdn, $Vm, $imm",
+ (VEXTq16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_7:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.32 $Vdn, $Vm, $imm",
+ (VEXTq32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_3:$imm, pred:$p)>;
+def : NEONInstAlias<"vext${p}.64 $Vdn, $Vm, $imm",
+ (VEXTq64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, imm0_1:$imm, pred:$p)>;
+
+// Two-operand variants for VQDMULH
+def : NEONInstAlias<"vqdmulh${p}.s16 $Vdn, $Vm",
+ (VQDMULHv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqdmulh${p}.s32 $Vdn, $Vm",
+ (VQDMULHv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vqdmulh${p}.s16 $Vdn, $Vm",
+ (VQDMULHv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vqdmulh${p}.s32 $Vdn, $Vm",
+ (VQDMULHv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// Two-operand variants for VMAX.
+def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
+ (VMAXsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
+ (VMAXsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
+ (VMAXsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
+ (VMAXuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
+ (VMAXuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
+ (VMAXuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
+ (VMAXfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vmax${p}.s8 $Vdn, $Vm",
+ (VMAXsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s16 $Vdn, $Vm",
+ (VMAXsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.s32 $Vdn, $Vm",
+ (VMAXsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u8 $Vdn, $Vm",
+ (VMAXuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u16 $Vdn, $Vm",
+ (VMAXuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.u32 $Vdn, $Vm",
+ (VMAXuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmax${p}.f32 $Vdn, $Vm",
+ (VMAXfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// Two-operand variants for VMIN.
+def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
+ (VMINsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
+ (VMINsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
+ (VMINsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
+ (VMINuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
+ (VMINuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
+ (VMINuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
+ (VMINfd DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+def : NEONInstAlias<"vmin${p}.s8 $Vdn, $Vm",
+ (VMINsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s16 $Vdn, $Vm",
+ (VMINsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.s32 $Vdn, $Vm",
+ (VMINsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u8 $Vdn, $Vm",
+ (VMINuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u16 $Vdn, $Vm",
+ (VMINuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.u32 $Vdn, $Vm",
+ (VMINuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vmin${p}.f32 $Vdn, $Vm",
+ (VMINfq QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
+
+// Two-operand variants for VPADD.
+def : NEONInstAlias<"vpadd${p}.i8 $Vdn, $Vm",
+ (VPADDi8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vpadd${p}.i16 $Vdn, $Vm",
+ (VPADDi16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vpadd${p}.i32 $Vdn, $Vm",
+ (VPADDi32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+def : NEONInstAlias<"vpadd${p}.f32 $Vdn, $Vm",
+ (VPADDf DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
+
+// Two-operand variants for VSRA.
+ // Signed.
+def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
+ (VSRAsv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
+ (VSRAsv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
+ (VSRAsv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
+ (VSRAsv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
+ (VSRAsv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
+ (VSRAsv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
+ (VSRAsv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
+ (VSRAsv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+ // Unsigned.
+def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
+ (VSRAuv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
+ (VSRAuv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
+ (VSRAuv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
+ (VSRAuv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
+ (VSRAuv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
+ (VSRAuv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
+ (VSRAuv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
+ (VSRAuv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+// Two-operand variants for VSRI.
+def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm",
+ (VSRIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.16 $Vdm, $imm",
+ (VSRIv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.32 $Vdm, $imm",
+ (VSRIv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.64 $Vdm, $imm",
+ (VSRIv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm",
+ (VSRIv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.16 $Vdm, $imm",
+ (VSRIv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.32 $Vdm, $imm",
+ (VSRIv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsri${p}.64 $Vdm, $imm",
+ (VSRIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+// Two-operand variants for VSLI.
+def : NEONInstAlias<"vsli${p}.8 $Vdm, $imm",
+ (VSLIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.16 $Vdm, $imm",
+ (VSLIv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm",
+ (VSLIv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm",
+ (VSLIv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsli${p}.8 $Vdm, $imm",
+ (VSLIv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.16 $Vdm, $imm",
+ (VSLIv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm",
+ (VSLIv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm",
+ (VSLIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+// VSWP allows, but does not require, a type suffix.
+defm : NEONDTAnyInstAlias<"vswp${p}", "$Vd, $Vm",
+ (VSWPd DPR:$Vd, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vswp${p}", "$Vd, $Vm",
+ (VSWPq QPR:$Vd, QPR:$Vm, pred:$p)>;
+
+// VBIF, VBIT, and VBSL allow, but do not require, a type suffix.
+defm : NEONDTAnyInstAlias<"vbif${p}", "$Vd, $Vn, $Vm",
+ (VBIFd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbit${p}", "$Vd, $Vn, $Vm",
+ (VBITd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbsl${p}", "$Vd, $Vn, $Vm",
+ (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbif${p}", "$Vd, $Vn, $Vm",
+ (VBIFq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbit${p}", "$Vd, $Vn, $Vm",
+ (VBITq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+defm : NEONDTAnyInstAlias<"vbsl${p}", "$Vd, $Vn, $Vm",
+ (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
+
+// "vmov Rd, #-imm" can be handled via "vmvn".
+def : NEONInstAlias<"vmov${p}.i32 $Vd, $imm",
+ (VMVNv2i32 DPR:$Vd, nImmVMOVI32Neg:$imm, pred:$p)>;
+def : NEONInstAlias<"vmov${p}.i32 $Vd, $imm",
+ (VMVNv4i32 QPR:$Vd, nImmVMOVI32Neg:$imm, pred:$p)>;
+def : NEONInstAlias<"vmvn${p}.i32 $Vd, $imm",
+ (VMOVv2i32 DPR:$Vd, nImmVMOVI32Neg:$imm, pred:$p)>;
+def : NEONInstAlias<"vmvn${p}.i32 $Vd, $imm",
+ (VMOVv4i32 QPR:$Vd, nImmVMOVI32Neg:$imm, pred:$p)>;
+
+// 'gas' compatibility aliases for quad-word instructions. Strictly speaking,
+// these should restrict to just the Q register variants, but the register
+// classes are enough to match correctly regardless, so we keep it simple
+// and just use MnemonicAlias.
+def : NEONMnemonicAlias<"vbicq", "vbic">;
+def : NEONMnemonicAlias<"vandq", "vand">;
+def : NEONMnemonicAlias<"veorq", "veor">;
+def : NEONMnemonicAlias<"vorrq", "vorr">;
+
+def : NEONMnemonicAlias<"vmovq", "vmov">;
+def : NEONMnemonicAlias<"vmvnq", "vmvn">;
+// Explicit versions for floating point so that the FPImm variants get
+// handled early. The parser gets confused otherwise.
+def : NEONMnemonicAlias<"vmovq.f32", "vmov.f32">;
+def : NEONMnemonicAlias<"vmovq.f64", "vmov.f64">;
+
+def : NEONMnemonicAlias<"vaddq", "vadd">;
+def : NEONMnemonicAlias<"vsubq", "vsub">;
+
+def : NEONMnemonicAlias<"vminq", "vmin">;
+def : NEONMnemonicAlias<"vmaxq", "vmax">;
+
+def : NEONMnemonicAlias<"vmulq", "vmul">;
+
+def : NEONMnemonicAlias<"vabsq", "vabs">;
+
+def : NEONMnemonicAlias<"vshlq", "vshl">;
+def : NEONMnemonicAlias<"vshrq", "vshr">;
+
+def : NEONMnemonicAlias<"vcvtq", "vcvt">;
+
+def : NEONMnemonicAlias<"vcleq", "vcle">;
+def : NEONMnemonicAlias<"vceqq", "vceq">;
+
+def : NEONMnemonicAlias<"vzipq", "vzip">;
+def : NEONMnemonicAlias<"vswpq", "vswp">;
+
+def : NEONMnemonicAlias<"vrecpeq.f32", "vrecpe.f32">;
+def : NEONMnemonicAlias<"vrecpeq.u32", "vrecpe.u32">;
+
+
+// Alias for loading floating point immediates that aren't representable
+// using the vmov.f32 encoding but the bitpattern is representable using
+// the .i32 encoding.
+def : NEONInstAlias<"vmov${p}.f32 $Vd, $imm",
+ (VMOVv4i32 QPR:$Vd, nImmVMOVI32:$imm, pred:$p)>;
+def : NEONInstAlias<"vmov${p}.f32 $Vd, $imm",
+ (VMOVv2i32 DPR:$Vd, nImmVMOVI32:$imm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index cedb547..6335229 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -1,4 +1,4 @@
-//===- ARMInstrThumb.td - Thumb support for ARM ------------*- tablegen -*-===//
+//===-- ARMInstrThumb.td - Thumb support for ARM -----------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -91,6 +91,12 @@ def t_imm0_508s4 : Operand<i32> {
let ParserMatchClass = t_imm0_508s4_asmoperand;
let OperandType = "OPERAND_IMMEDIATE";
}
+// Alias use only, so no printer is necessary.
+def t_imm0_508s4_neg_asmoperand: AsmOperandClass { let Name = "Imm0_508s4Neg"; }
+def t_imm0_508s4_neg : Operand<i32> {
+ let ParserMatchClass = t_imm0_508s4_neg_asmoperand;
+ let OperandType = "OPERAND_IMMEDIATE";
+}
// Define Thumb specific addressing modes.
@@ -345,6 +351,11 @@ def tSUBspi : T1pIt<(outs GPRsp:$Rdn), (ins GPRsp:$Rn, t_imm0_508s4:$imm),
let DecoderMethod = "DecodeThumbAddSPImm";
}
+def : tInstAlias<"add${p} sp, $imm",
+ (tSUBspi SP, t_imm0_508s4_neg:$imm, pred:$p)>;
+def : tInstAlias<"add${p} sp, sp, $imm",
+ (tSUBspi SP, t_imm0_508s4_neg:$imm, pred:$p)>;
+
// Can optionally specify SP as a three operand instruction.
def : tInstAlias<"add${p} sp, sp, $imm",
(tADDspi SP, t_imm0_508s4:$imm, pred:$p)>;
@@ -387,6 +398,7 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
bits<4> Rm;
let Inst{6-3} = Rm;
let Inst{2-0} = 0b000;
+ let Unpredictable{2-0} = 0b111;
}
}
@@ -404,15 +416,13 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
// prevent stack-pointer assignments that appear immediately before calls from
// potentially appearing dead.
let isCall = 1,
- // On non-Darwin platforms R9 is callee-saved.
- Defs = [R0, R1, R2, R3, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
- Uses = [SP] in {
+ Defs = [LR], Uses = [SP] in {
// Also used for Thumb2
def tBL : TIx2<0b11110, 0b11, 1,
(outs), (ins pred:$p, t_bltarget:$func, variable_ops), IIC_Br,
"bl${p}\t$func",
[(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb, IsNotDarwin]> {
+ Requires<[IsThumb]> {
bits<22> func;
let Inst{26} = func{21};
let Inst{25-16} = func{20-11};
@@ -426,7 +436,7 @@ let isCall = 1,
(outs), (ins pred:$p, t_blxtarget:$func, variable_ops), IIC_Br,
"blx${p}\t$func",
[(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]> {
+ Requires<[IsThumb, HasV5T]> {
bits<21> func;
let Inst{25-16} = func{20-11};
let Inst{13} = 1;
@@ -439,7 +449,7 @@ let isCall = 1,
def tBLXr : TI<(outs), (ins pred:$p, GPR:$func, variable_ops), IIC_Br,
"blx${p}\t$func",
[(ARMtcall GPR:$func)]>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>,
+ Requires<[IsThumb, HasV5T]>,
T1Special<{1,1,1,?}> { // A6.2.3 & A8.6.24;
bits<4> func;
let Inst{6-3} = func;
@@ -450,38 +460,7 @@ let isCall = 1,
def tBX_CALL : tPseudoInst<(outs), (ins tGPR:$func, variable_ops),
4, IIC_Br,
[(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb, IsThumb1Only, IsNotDarwin]>;
-}
-
-let isCall = 1,
- // On Darwin R9 is call-clobbered.
- // R7 is marked as a use to prevent frame-pointer assignments from being
- // moved above / below calls.
- Defs = [R0, R1, R2, R3, R9, R12, LR, QQQQ0, QQQQ2, QQQQ3, CPSR, FPSCR],
- Uses = [R7, SP] in {
- // Also used for Thumb2
- def tBLr9 : tPseudoExpand<(outs), (ins pred:$p, t_bltarget:$func, variable_ops),
- 4, IIC_Br, [(ARMtcall tglobaladdr:$func)],
- (tBL pred:$p, t_bltarget:$func)>,
- Requires<[IsThumb, IsDarwin]>;
-
- // ARMv5T and above, also used for Thumb2
- def tBLXi_r9 : tPseudoExpand<(outs), (ins pred:$p, t_blxtarget:$func, variable_ops),
- 4, IIC_Br, [(ARMcall tglobaladdr:$func)],
- (tBLXi pred:$p, t_blxtarget:$func)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
-
- // Also used for Thumb2
- def tBLXr_r9 : tPseudoExpand<(outs), (ins pred:$p, GPR:$func, variable_ops),
- 2, IIC_Br, [(ARMtcall GPR:$func)],
- (tBLXr pred:$p, GPR:$func)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
-
- // ARMv4T
- def tBXr9_CALL : tPseudoInst<(outs), (ins tGPR:$func, variable_ops),
- 4, IIC_Br,
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb, IsThumb1Only, IsDarwin]>;
+ Requires<[IsThumb, IsThumb1Only]>;
}
let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
@@ -523,28 +502,22 @@ let isBranch = 1, isTerminator = 1 in
// Tail calls
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
- // Darwin versions.
- let Defs = [R0, R1, R2, R3, R9, R12, QQQQ0, QQQQ2, QQQQ3, PC],
- Uses = [SP] in {
- // tTAILJMPd: Darwin version uses a Thumb2 branch (no Thumb1 tail calls
- // on Darwin), so it's in ARMInstrThumb2.td.
+ // IOS versions.
+ let Uses = [SP] in {
def tTAILJMPr : tPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
4, IIC_Br, [],
(tBX GPR:$dst, (ops 14, zero_reg))>,
- Requires<[IsThumb, IsDarwin]>;
+ Requires<[IsThumb]>;
}
- // Non-Darwin versions (the difference is R9).
- let Defs = [R0, R1, R2, R3, R12, QQQQ0, QQQQ2, QQQQ3, PC],
- Uses = [SP] in {
+ // tTAILJMPd: IOS version uses a Thumb2 branch (no Thumb1 tail calls
+ // on IOS), so it's in ARMInstrThumb2.td.
+ // Non-IOS version:
+ let Uses = [SP] in {
def tTAILJMPdND : tPseudoExpand<(outs),
(ins t_brtarget:$dst, pred:$p, variable_ops),
4, IIC_Br, [],
(tB t_brtarget:$dst, pred:$p)>,
- Requires<[IsThumb, IsNotDarwin]>;
- def tTAILJMPrND : tPseudoExpand<(outs), (ins tcGPR:$dst, variable_ops),
- 4, IIC_Br, [],
- (tBX GPR:$dst, (ops 14, zero_reg))>,
- Requires<[IsThumb, IsNotDarwin]>;
+ Requires<[IsThumb, IsNotIOS]>;
}
}
@@ -652,7 +625,7 @@ def tLDRspi : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_sp:$addr), IIC_iLoad_i,
}
// Load tconstpool
-// FIXME: Use ldr.n to work around a Darwin assembler bug.
+// FIXME: Use ldr.n to work around a darwin assembler bug.
let canFoldAsLoad = 1, isReMaterializable = 1, isCodeGenOnly = 1 in
def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
"ldr", ".n\t$Rt, $addr",
@@ -666,10 +639,9 @@ def tLDRpci : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
}
// FIXME: Remove this entry when the above ldr.n workaround is fixed.
-// For disassembly use only.
-def tLDRpciDIS : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
- "ldr", "\t$Rt, $addr",
- [/* disassembly only */]>,
+// For assembly/disassembly use only.
+def tLDRpciASM : T1pIs<(outs tGPR:$Rt), (ins t_addrmode_pc:$addr), IIC_iLoad_i,
+ "ldr", "\t$Rt, $addr", []>,
T1Encoding<{0,1,0,0,1,?}> {
// A6.2 & A8.6.59
bits<3> Rt;
@@ -1131,9 +1103,6 @@ def tRSB : // A8.6.141
"rsb", "\t$Rd, $Rn, #0",
[(set tGPR:$Rd, (ineg tGPR:$Rn))]>;
-def : tInstAlias<"neg${s}${p} $Rd, $Rm",
- (tRSB tGPR:$Rd, s_cc_out:$s, tGPR:$Rm, pred:$p)>;
-
// Subtract with carry register
let Uses = [CPSR] in
def tSBC : // A8.6.151
@@ -1259,19 +1228,24 @@ def tTPsoft : tPseudoInst<(outs), (ins), 4, IIC_Br,
// preserve all of the callee-saved resgisters, which is exactly what we want.
// $val is a scratch register for our use.
let Defs = [ R0, R1, R2, R3, R4, R5, R6, R7, R12, CPSR ],
- hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in
+ hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+ usesCustomInserter = 1 in
def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
AddrModeNone, 0, NoItinerary, "","",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
-// FIXME: Non-Darwin version(s)
+// FIXME: Non-IOS version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1, isCodeGenOnly = 1,
Defs = [ R7, LR, SP ] in
def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
AddrModeNone, 0, IndexModeNone,
Pseudo, NoItinerary, "", "",
[(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>,
- Requires<[IsThumb, IsDarwin]>;
+ Requires<[IsThumb, IsIOS]>;
+
+let Defs = [ R0, R1, R2, R3, R4, R5, R6, R7, R12, CPSR ],
+ isBarrier = 1 in
+def tInt_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -1309,20 +1283,14 @@ def : T1Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
// Direct calls
def : T1Pat<(ARMtcall texternalsym:$func), (tBL texternalsym:$func)>,
- Requires<[IsThumb, IsNotDarwin]>;
-def : T1Pat<(ARMtcall texternalsym:$func), (tBLr9 texternalsym:$func)>,
- Requires<[IsThumb, IsDarwin]>;
+ Requires<[IsThumb]>;
def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi texternalsym:$func)>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
-def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi_r9 texternalsym:$func)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
+ Requires<[IsThumb, HasV5T]>;
// Indirect calls to ARM routines
def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr GPR:$dst)>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
-def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr_r9 GPR:$dst)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
+ Requires<[IsThumb, HasV5T]>;
// zextload i1 -> zextload i8
def : T1Pat<(zextloadi1 t_addrmode_rrs1:$addr),
@@ -1434,3 +1402,16 @@ def : InstAlias<"nop", (tMOVr R8, R8, 14, 0)>,Requires<[IsThumb, IsThumb1Only]>;
// nothing).
def : tInstAlias<"cps$imod", (tCPS imod_op:$imod, 0)>;
def : tInstAlias<"cps$imod", (tCPS imod_op:$imod, 0)>;
+
+// "neg" is and alias for "rsb rd, rn, #0"
+def : tInstAlias<"neg${s}${p} $Rd, $Rm",
+ (tRSB tGPR:$Rd, s_cc_out:$s, tGPR:$Rm, pred:$p)>;
+
+
+// Implied destination operand forms for shifts.
+def : tInstAlias<"lsl${s}${p} $Rdm, $imm",
+ (tLSLri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm0_31:$imm, pred:$p)>;
+def : tInstAlias<"lsr${s}${p} $Rdm, $imm",
+ (tLSRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>;
+def : tInstAlias<"asr${s}${p} $Rdm, $imm",
+ (tASRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 05dcc89..e6fb9d5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -1,4 +1,4 @@
-//===- ARMInstrThumb2.td - Thumb2 support for ARM -------------------------===//
+//===-- ARMInstrThumb2.td - Thumb2 support for ARM ---------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -65,7 +65,7 @@ def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
// t2_so_imm - Match a 32-bit immediate operand, which is an
// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
// immediate splatted into multiple bytes of the word.
-def t2_so_imm_asmoperand : AsmOperandClass { let Name = "T2SOImm"; }
+def t2_so_imm_asmoperand : ImmAsmOperand { let Name = "T2SOImm"; }
def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{
return ARM_AM::getT2SOImmVal(Imm) != -1;
}]> {
@@ -76,26 +76,39 @@ def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{
// t2_so_imm_not - Match an immediate that is a complement
// of a t2_so_imm.
-def t2_so_imm_not : Operand<i32>,
- PatLeaf<(imm), [{
+// Note: this pattern doesn't require an encoder method and such, as it's
+// only used on aliases (Pat<> and InstAlias<>). The actual encoding
+// is handled by the destination instructions, which use t2_so_imm.
+def t2_so_imm_not_asmoperand : AsmOperandClass { let Name = "T2SOImmNot"; }
+def t2_so_imm_not : Operand<i32>, PatLeaf<(imm), [{
return ARM_AM::getT2SOImmVal(~((uint32_t)N->getZExtValue())) != -1;
-}], t2_so_imm_not_XFORM>;
+}], t2_so_imm_not_XFORM> {
+ let ParserMatchClass = t2_so_imm_not_asmoperand;
+}
// t2_so_imm_neg - Match an immediate that is a negation of a t2_so_imm.
-def t2_so_imm_neg : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::getT2SOImmVal(-((uint32_t)N->getZExtValue())) != -1;
-}], t2_so_imm_neg_XFORM>;
+def t2_so_imm_neg_asmoperand : AsmOperandClass { let Name = "T2SOImmNeg"; }
+def t2_so_imm_neg : Operand<i32>, PatLeaf<(imm), [{
+ int64_t Value = -(int)N->getZExtValue();
+ return Value && ARM_AM::getT2SOImmVal(Value) != -1;
+}], t2_so_imm_neg_XFORM> {
+ let ParserMatchClass = t2_so_imm_neg_asmoperand;
+}
/// imm0_4095 predicate - True if the 32-bit immediate is in the range [0.4095].
-def imm0_4095 : Operand<i32>,
- ImmLeaf<i32, [{
+def imm0_4095_asmoperand: ImmAsmOperand { let Name = "Imm0_4095"; }
+def imm0_4095 : Operand<i32>, ImmLeaf<i32, [{
return Imm >= 0 && Imm < 4096;
-}]>;
+}]> {
+ let ParserMatchClass = imm0_4095_asmoperand;
+}
-def imm0_4095_neg : PatLeaf<(i32 imm), [{
+def imm0_4095_neg_asmoperand: AsmOperandClass { let Name = "Imm0_4095Neg"; }
+def imm0_4095_neg : Operand<i32>, PatLeaf<(i32 imm), [{
return (uint32_t)(-N->getZExtValue()) < 4096;
-}], imm_neg_XFORM>;
+}], imm_neg_XFORM> {
+ let ParserMatchClass = imm0_4095_neg_asmoperand;
+}
def imm0_255_neg : PatLeaf<(i32 imm), [{
return (uint32_t)(-N->getZExtValue()) < 255;
@@ -129,6 +142,12 @@ def t2ldrlabel : Operand<i32> {
let PrintMethod = "printT2LdrLabelOperand";
}
+def t2ldr_pcrel_imm12_asmoperand : AsmOperandClass {let Name = "MemPCRelImm12";}
+def t2ldr_pcrel_imm12 : Operand<i32> {
+ let ParserMatchClass = t2ldr_pcrel_imm12_asmoperand;
+ // used for assembler pseudo instruction and maps to t2ldrlabel, so
+ // doesn't need encoder or print methods of its own.
+}
// ADR instruction labels.
def t2adrlabel : Operand<i32> {
@@ -545,6 +564,11 @@ multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
PatFrag opnode, string baseOpc, bit Commutable = 0> :
T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, baseOpc, Commutable, ".w"> {
+ // Assembler aliases w/ the ".w" suffix.
+ def : t2InstAlias<!strconcat(opc, "${s}${p}.w", " $Rd, $Rn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rd, rGPR:$Rn,
+ t2_so_imm:$imm, pred:$p,
+ cc_out:$s)>;
// Assembler aliases w/o the ".w" suffix.
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rd, $Rn, $Rm"),
(!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rd, rGPR:$Rn,
@@ -556,6 +580,10 @@ multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
cc_out:$s)>;
// and with the optional destination operand, too.
+ def : t2InstAlias<!strconcat(opc, "${s}${p}.w", " $Rdn, $imm"),
+ (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
+ t2_so_imm:$imm, pred:$p,
+ cc_out:$s)>;
def : t2InstAlias<!strconcat(opc, "${s}${p}", " $Rdn, $Rm"),
(!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
rGPR:$Rm, pred:$p,
@@ -608,25 +636,48 @@ multiclass T2I_rbin_irs<bits<4> opcod, string opc, PatFrag opnode> {
///
/// These opcodes will be converted to the real non-S opcodes by
/// AdjustInstrPostInstrSelection after giving then an optional CPSR operand.
-let hasPostISelHook = 1, isCodeGenOnly = 1, isPseudo = 1, Defs = [CPSR] in {
-multiclass T2I_bin_s_irs<bits<4> opcod, string opc,
- InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
- PatFrag opnode, bit Commutable = 0> {
+let hasPostISelHook = 1, Defs = [CPSR] in {
+multiclass T2I_bin_s_irs<InstrItinClass iii, InstrItinClass iir,
+ InstrItinClass iis, PatFrag opnode,
+ bit Commutable = 0> {
// shifted imm
- def ri : T2sTwoRegImm<
- (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm), iii,
- opc, ".w\t$Rd, $Rn, $imm",
- [(set rGPR:$Rd, CPSR, (opnode GPR:$Rn, t2_so_imm:$imm))]>;
+ def ri : t2PseudoInst<(outs rGPR:$Rd),
+ (ins GPRnopc:$Rn, t2_so_imm:$imm, pred:$p),
+ 4, iii,
+ [(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
+ t2_so_imm:$imm))]>;
// register
- def rr : T2sThreeReg<
- (outs rGPR:$Rd), (ins GPR:$Rn, rGPR:$Rm), iir,
- opc, ".w\t$Rd, $Rn, $Rm",
- [(set rGPR:$Rd, CPSR, (opnode GPR:$Rn, rGPR:$Rm))]>;
+ def rr : t2PseudoInst<(outs rGPR:$Rd), (ins GPRnopc:$Rn, rGPR:$Rm, pred:$p),
+ 4, iir,
+ [(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
+ rGPR:$Rm))]> {
+ let isCommutable = Commutable;
+ }
// shifted register
- def rs : T2sTwoRegShiftedReg<
- (outs rGPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm), iis,
- opc, ".w\t$Rd, $Rn, $ShiftedRm",
- [(set rGPR:$Rd, CPSR, (opnode GPR:$Rn, t2_so_reg:$ShiftedRm))]>;
+ def rs : t2PseudoInst<(outs rGPR:$Rd),
+ (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
+ 4, iis,
+ [(set rGPR:$Rd, CPSR, (opnode GPRnopc:$Rn,
+ t2_so_reg:$ShiftedRm))]>;
+}
+}
+
+/// T2I_rbin_s_is - Same as T2I_bin_s_irs, except selection DAG
+/// operands are reversed.
+let hasPostISelHook = 1, Defs = [CPSR] in {
+multiclass T2I_rbin_s_is<PatFrag opnode> {
+ // shifted imm
+ def ri : t2PseudoInst<(outs rGPR:$Rd),
+ (ins GPRnopc:$Rn, t2_so_imm:$imm, pred:$p),
+ 4, IIC_iALUi,
+ [(set rGPR:$Rd, CPSR, (opnode t2_so_imm:$imm,
+ GPRnopc:$Rn))]>;
+ // shifted register
+ def rs : t2PseudoInst<(outs rGPR:$Rd),
+ (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm, pred:$p),
+ 4, IIC_iALUsi,
+ [(set rGPR:$Rd, CPSR, (opnode t2_so_reg:$ShiftedRm,
+ GPRnopc:$Rn))]>;
}
}
@@ -735,26 +786,6 @@ multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
}
}
-/// T2I_rbin_s_is - Same as T2I_rbin_irs except sets 's' bit and the register
-/// version is not needed since this is only for codegen.
-///
-/// These opcodes will be converted to the real non-S opcodes by
-/// AdjustInstrPostInstrSelection after giving then an optional CPSR operand.
-let hasPostISelHook = 1, isCodeGenOnly = 1, isPseudo = 1, Defs = [CPSR] in {
-multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> {
- // shifted imm
- def ri : T2sTwoRegImm<
- (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), IIC_iALUi,
- opc, ".w\t$Rd, $Rn, $imm",
- [(set rGPR:$Rd, CPSR, (opnode t2_so_imm:$imm, rGPR:$Rn))]>;
- // shifted register
- def rs : T2sTwoRegShiftedReg<
- (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_reg:$ShiftedRm),
- IIC_iALUsi, opc, "\t$Rd, $Rn, $ShiftedRm",
- [(set rGPR:$Rd, CPSR, (opnode t2_so_reg:$ShiftedRm, rGPR:$Rn))]>;
-}
-}
-
/// T2I_sh_ir - Defines a set of (op reg, {so_imm|r}) patterns for a shift /
// rotate operation that produces a value.
multiclass T2I_sh_ir<bits<2> opcod, string opc, Operand ty, PatFrag opnode,
@@ -930,7 +961,8 @@ multiclass T2I_ld<bit signed, bits<2> opcod, string opc,
let DecoderMethod = "DecodeT2LoadShift";
}
- // FIXME: Is the pci variant actually needed?
+ // pci variant is very similar to i12, but supports negative offsets
+ // from the PC.
def pci : T2Ipc <(outs target:$Rt), (ins t2ldrlabel:$addr), iii,
opc, ".w\t$Rt, $addr",
[(set target:$Rt, (opnode (ARMWrapper tconstpool:$addr)))]> {
@@ -1315,14 +1347,16 @@ defm t2STRH:T2I_st<0b01,"strh", IIC_iStore_bh_i, IIC_iStore_bh_si,
rGPR, BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
// Store doubleword
-let mayLoad = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
+let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in
def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
(ins GPR:$Rt, GPR:$Rt2, t2addrmode_imm8s4:$addr),
IIC_iStore_d_r, "strd", "\t$Rt, $Rt2, $addr", "", []>;
// Indexed stores
+
+let mayStore = 1, neverHasSideEffects = 1 in {
def t2STR_PRE : T2Ipreldst<0, 0b10, 0, 1, (outs GPRnopc:$Rn_wb),
- (ins rGPR:$Rt, t2addrmode_imm8:$addr),
+ (ins GPRnopc:$Rt, t2addrmode_imm8:$addr),
AddrModeT2_i8, IndexModePre, IIC_iStore_iu,
"str", "\t$Rt, $addr!",
"$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
@@ -1343,15 +1377,16 @@ def t2STRB_PRE : T2Ipreldst<0, 0b00, 0, 1, (outs GPRnopc:$Rn_wb),
"$addr.base = $Rn_wb,@earlyclobber $Rn_wb", []> {
let AsmMatchConverter = "cvtStWriteBackRegT2AddrModeImm8";
}
+} // mayStore = 1, neverHasSideEffects = 1
def t2STR_POST : T2Ipostldst<0, 0b10, 0, 0, (outs GPRnopc:$Rn_wb),
- (ins rGPR:$Rt, addr_offset_none:$Rn,
+ (ins GPRnopc:$Rt, addr_offset_none:$Rn,
t2am_imm8_offset:$offset),
AddrModeT2_i8, IndexModePost, IIC_iStore_iu,
"str", "\t$Rt, $Rn$offset",
"$Rn = $Rn_wb,@earlyclobber $Rn_wb",
[(set GPRnopc:$Rn_wb,
- (post_store rGPR:$Rt, addr_offset_none:$Rn,
+ (post_store GPRnopc:$Rt, addr_offset_none:$Rn,
t2am_imm8_offset:$offset))]>;
def t2STRH_POST : T2Ipostldst<0, 0b01, 0, 0, (outs GPRnopc:$Rn_wb),
@@ -1398,7 +1433,6 @@ def t2STRH_preidx: t2PseudoInst<(outs GPRnopc:$Rn_wb),
(pre_truncsti16 rGPR:$Rt, GPRnopc:$Rn, t2am_imm8_offset:$offset))]>;
}
-
// STRT, STRBT, STRHT all have offset mode (PUW=0b110) and are for disassembly
// only.
// Ref: A8.6.193 STR (immediate, Thumb) Encoding T4
@@ -1455,7 +1489,7 @@ def t2STRD_POST : T2Ii8s4post<0, 1, 0, (outs GPR:$wb),
"$addr.base = $wb", []>;
// T2Ipl (Preload Data/Instruction) signals the memory system of possible future
-// data/instruction access. These are for disassembly only.
+// data/instruction access.
// instr_write is inverted for Thumb mode: (prefetch 3) -> (preload 0),
// (prefetch 1) -> (preload 2), (prefetch 2) -> (preload 1).
multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
@@ -1513,6 +1547,10 @@ multiclass T2Ipl<bits<1> write, bits<1> instr, string opc> {
let DecoderMethod = "DecodeT2LoadShift";
}
+ // FIXME: We should have a separate 'pci' variant here. As-is we represent
+ // it via the i12 variant, which it's related to, but that means we can
+ // represent negative immediates, which aren't legal for anything except
+ // the 'pci' case (Rn == 15).
}
defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>;
@@ -1689,6 +1727,8 @@ def t2MOVr : T2sTwoReg<(outs GPRnopc:$Rd), (ins GPR:$Rm), IIC_iMOVr,
let Inst{14-12} = 0b000;
let Inst{7-4} = 0b0000;
}
+def : t2InstAlias<"mov${p}.w $Rd, $Rm", (t2MOVr GPRnopc:$Rd, GPR:$Rm,
+ pred:$p, zero_reg)>;
def : t2InstAlias<"movs${p}.w $Rd, $Rm", (t2MOVr GPRnopc:$Rd, GPR:$Rm,
pred:$p, CPSR)>;
def : t2InstAlias<"movs${p} $Rd, $Rm", (t2MOVr GPRnopc:$Rd, GPR:$Rm,
@@ -1837,11 +1877,9 @@ defm t2SUB : T2I_bin_ii12rs<0b101, "sub",
// FIXME: Eliminate t2ADDS/t2SUBS pseudo opcodes after adding tablegen
// support for an optional CPSR definition that corresponds to the DAG
// node's second value. We can then eliminate the implicit def of CPSR.
-defm t2ADDS : T2I_bin_s_irs <0b1000, "add",
- IIC_iALUi, IIC_iALUr, IIC_iALUsi,
+defm t2ADDS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi,
BinOpFrag<(ARMaddc node:$LHS, node:$RHS)>, 1>;
-defm t2SUBS : T2I_bin_s_irs <0b1101, "sub",
- IIC_iALUi, IIC_iALUr, IIC_iALUsi,
+defm t2SUBS : T2I_bin_s_irs <IIC_iALUi, IIC_iALUr, IIC_iALUsi,
BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
let hasPostISelHook = 1 in {
@@ -1857,8 +1895,7 @@ defm t2RSB : T2I_rbin_irs <0b1110, "rsb",
// FIXME: Eliminate them if we can write def : Pat patterns which defines
// CPSR and the implicit def of CPSR is not needed.
-defm t2RSBS : T2I_rbin_s_is <0b1110, "rsb",
- BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
+defm t2RSBS : T2I_rbin_s_is <BinOpFrag<(ARMsubc node:$LHS, node:$RHS)>>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
// The assume-no-carry-in form uses the negation of the input since add/sub
@@ -2840,6 +2877,8 @@ defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
// FIXME: should be able to write a pattern for ARMcmov, but can't use
// a two-value operand where a dag node expects two operands. :(
let neverHasSideEffects = 1 in {
+
+let isCommutable = 1 in
def t2MOVCCr : t2PseudoInst<(outs rGPR:$Rd),
(ins rGPR:$false, rGPR:$Rm, pred:$p),
4, IIC_iCMOVr,
@@ -2883,7 +2922,7 @@ def t2MOVCCi32imm : PseudoInst<(outs rGPR:$dst),
let isMoveImm = 1 in
def t2MVNCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm),
- IIC_iCMOVi, "mvn", ".w\t$Rd, $imm",
+ IIC_iCMOVi, "mvn", "\t$Rd, $imm",
[/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm_not:$imm,
imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $Rd"> {
@@ -2922,6 +2961,35 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd),
IIC_iCMOVsi, "ror", ".w\t$Rd, $Rm, $imm", []>,
RegConstraint<"$false = $Rd">;
} // isCodeGenOnly = 1
+
+multiclass T2I_bincc_irs<Instruction iri, Instruction irr, Instruction irs,
+ InstrItinClass iii, InstrItinClass iir, InstrItinClass iis> {
+ // shifted imm
+ def ri : t2PseudoExpand<(outs rGPR:$Rd),
+ (ins rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s),
+ 4, iii, [],
+ (iri rGPR:$Rd, rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+ // register
+ def rr : t2PseudoExpand<(outs rGPR:$Rd),
+ (ins rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s),
+ 4, iir, [],
+ (irr rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+ // shifted register
+ def rs : t2PseudoExpand<(outs rGPR:$Rd),
+ (ins rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s),
+ 4, iis, [],
+ (irs rGPR:$Rd, rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s)>,
+ RegConstraint<"$Rn = $Rd">;
+} // T2I_bincc_irs
+
+defm t2ANDCC : T2I_bincc_irs<t2ANDri, t2ANDrr, t2ANDrs,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
+defm t2ORRCC : T2I_bincc_irs<t2ORRri, t2ORRrr, t2ORRrs,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
+defm t2EORCC : T2I_bincc_irs<t2EORri, t2EORrr, t2EORrs,
+ IIC_iBITi, IIC_iBITr, IIC_iBITsi>;
} // neverHasSideEffects
//===----------------------------------------------------------------------===//
@@ -3043,9 +3111,7 @@ def t2STREX : Thumb2I<(outs rGPR:$Rd), (ins rGPR:$Rt,
let Inst{11-8} = Rd;
let Inst{7-0} = addr{7-0};
}
-}
-
-let hasExtraSrcRegAllocReq = 1, Constraints = "@earlyclobber $Rd" in
+let hasExtraSrcRegAllocReq = 1 in
def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd),
(ins rGPR:$Rt, rGPR:$Rt2, addr_offset_none:$addr),
AddrModeNone, 4, NoItinerary,
@@ -3054,6 +3120,7 @@ def t2STREXD : T2I_strex<0b11, (outs rGPR:$Rd),
bits<4> Rt2;
let Inst{11-8} = Rt2;
}
+}
def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>,
Requires<[IsThumb2, HasV7]> {
@@ -3081,8 +3148,9 @@ def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "", []>,
// $val is a scratch register for our use.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR,
- QQQQ0, QQQQ1, QQQQ2, QQQQ3 ],
- hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
+ Q0, Q1, Q2, Q3, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15],
+ hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+ usesCustomInserter = 1 in {
def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
AddrModeNone, 0, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,
@@ -3091,7 +3159,8 @@ let Defs =
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ],
- hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
+ hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
+ usesCustomInserter = 1 in {
def t2Int_eh_sjlj_setjmp_nofp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
AddrModeNone, 0, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,
@@ -3128,6 +3197,7 @@ def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br,
let Inst{13} = target{17};
let Inst{21-16} = target{16-11};
let Inst{10-0} = target{10-0};
+ let DecoderMethod = "DecodeT2BInstruction";
}
let isNotDuplicable = 1, isIndirectBranch = 1 in {
@@ -3195,19 +3265,32 @@ def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
let DecoderMethod = "DecodeThumb2BCCInstruction";
}
-// Tail calls. The Darwin version of thumb tail calls uses a t2 branch, so
+// Tail calls. The IOS version of thumb tail calls uses a t2 branch, so
// it goes here.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
- // Darwin version.
- let Defs = [R0, R1, R2, R3, R9, R12, QQQQ0, QQQQ2, QQQQ3, PC],
- Uses = [SP] in
+ // IOS version.
+ let Uses = [SP] in
def tTAILJMPd: tPseudoExpand<(outs),
(ins uncondbrtarget:$dst, pred:$p, variable_ops),
4, IIC_Br, [],
(t2B uncondbrtarget:$dst, pred:$p)>,
- Requires<[IsThumb2, IsDarwin]>;
+ Requires<[IsThumb2, IsIOS]>;
}
+let isCall = 1, Defs = [LR], Uses = [SP] in {
+ // mov lr, pc; b if callee is marked noreturn to avoid confusing the
+ // return stack predictor.
+ def t2BMOVPCB_CALL : tPseudoInst<(outs),
+ (ins t_bltarget:$func, variable_ops),
+ 6, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>,
+ Requires<[IsThumb]>;
+}
+
+// Direct calls
+def : T2Pat<(ARMcall_nolink texternalsym:$func),
+ (t2BMOVPCB_CALL texternalsym:$func)>,
+ Requires<[IsThumb]>;
+
// IT block
let Defs = [ITSTATE] in
def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
@@ -3430,7 +3513,7 @@ def t2LDRpci_pic : PseudoInst<(outs rGPR:$dst), (ins i32imm:$addr, pclabel:$cp),
imm:$cp))]>,
Requires<[IsThumb2]>;
-// Pseudo isntruction that combines movs + predicated rsbmi
+// Pseudo isntruction that combines movs + predicated rsbmi
// to implement integer ABS
let usesCustomInserter = 1, Defs = [CPSR] in {
def t2ABS : PseudoInst<(outs rGPR:$dst), (ins rGPR:$src),
@@ -3667,20 +3750,32 @@ def t2MCR : t2MovRCopro<0b1110, "mcr", 0,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
+def : t2InstAlias<"mcr $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MCR p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
def t2MCR2 : t2MovRCopro<0b1111, "mcr2", 0,
(outs), (ins p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2),
[(int_arm_mcr2 imm:$cop, imm:$opc1, GPR:$Rt, imm:$CRn,
imm:$CRm, imm:$opc2)]>;
+def : t2InstAlias<"mcr2 $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MCR2 p_imm:$cop, imm0_7:$opc1, GPR:$Rt, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
/* from coprocessor to ARM core register */
def t2MRC : t2MovRCopro<0b1110, "mrc", 1,
(outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2), []>;
+def : t2InstAlias<"mrc $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MRC GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
def t2MRC2 : t2MovRCopro<0b1111, "mrc2", 1,
(outs GPR:$Rt), (ins p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
c_imm:$CRm, imm0_7:$opc2), []>;
+def : t2InstAlias<"mrc2 $cop, $opc1, $Rt, $CRn, $CRm",
+ (t2MRC2 GPR:$Rt, p_imm:$cop, imm0_7:$opc1, c_imm:$CRn,
+ c_imm:$CRm, 0)>;
def : T2v6Pat<(int_arm_mrc imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2),
(t2MRC imm:$cop, imm:$opc1, imm:$CRn, imm:$CRm, imm:$opc2)>;
@@ -3851,6 +3946,29 @@ def : t2InstAlias<"add${s}${p} $Rd, $Rn, $Rm",
def : t2InstAlias<"add${s}${p} $Rd, $Rn, $ShiftedRm",
(t2ADDrs GPRnopc:$Rd, GPRnopc:$Rn, t2_so_reg:$ShiftedRm,
pred:$p, cc_out:$s)>;
+// ... and with the destination and source register combined.
+def : t2InstAlias<"add${s}${p} $Rdn, $imm",
+ (t2ADDri GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_imm:$imm, pred:$p, cc_out:$s)>;
+def : t2InstAlias<"add${p} $Rdn, $imm",
+ (t2ADDri12 GPRnopc:$Rdn, GPRnopc:$Rdn, imm0_4095:$imm, pred:$p)>;
+def : t2InstAlias<"add${s}${p} $Rdn, $Rm",
+ (t2ADDrr GPRnopc:$Rdn, GPRnopc:$Rdn, rGPR:$Rm, pred:$p, cc_out:$s)>;
+def : t2InstAlias<"add${s}${p} $Rdn, $ShiftedRm",
+ (t2ADDrs GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_reg:$ShiftedRm,
+ pred:$p, cc_out:$s)>;
+
+// add w/ negative immediates is just a sub.
+def : t2InstAlias<"add${s}${p} $Rd, $Rn, $imm",
+ (t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm, pred:$p,
+ cc_out:$s)>;
+def : t2InstAlias<"add${p} $Rd, $Rn, $imm",
+ (t2SUBri12 GPRnopc:$Rd, GPR:$Rn, imm0_4095_neg:$imm, pred:$p)>;
+def : t2InstAlias<"add${s}${p} $Rdn, $imm",
+ (t2SUBri GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_imm_neg:$imm, pred:$p,
+ cc_out:$s)>;
+def : t2InstAlias<"add${p} $Rdn, $imm",
+ (t2SUBri12 GPRnopc:$Rdn, GPRnopc:$Rdn, imm0_4095_neg:$imm, pred:$p)>;
+
// Aliases for SUB without the ".w" optional width specifier.
def : t2InstAlias<"sub${s}${p} $Rd, $Rn, $imm",
@@ -3862,6 +3980,18 @@ def : t2InstAlias<"sub${s}${p} $Rd, $Rn, $Rm",
def : t2InstAlias<"sub${s}${p} $Rd, $Rn, $ShiftedRm",
(t2SUBrs GPRnopc:$Rd, GPRnopc:$Rn, t2_so_reg:$ShiftedRm,
pred:$p, cc_out:$s)>;
+// ... and with the destination and source register combined.
+def : t2InstAlias<"sub${s}${p} $Rdn, $imm",
+ (t2SUBri GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_imm:$imm, pred:$p, cc_out:$s)>;
+def : t2InstAlias<"sub${p} $Rdn, $imm",
+ (t2SUBri12 GPRnopc:$Rdn, GPRnopc:$Rdn, imm0_4095:$imm, pred:$p)>;
+def : t2InstAlias<"sub${s}${p}.w $Rdn, $Rm",
+ (t2SUBrr GPRnopc:$Rdn, GPRnopc:$Rdn, rGPR:$Rm, pred:$p, cc_out:$s)>;
+def : t2InstAlias<"sub${s}${p} $Rdn, $Rm",
+ (t2SUBrr GPRnopc:$Rdn, GPRnopc:$Rdn, rGPR:$Rm, pred:$p, cc_out:$s)>;
+def : t2InstAlias<"sub${s}${p} $Rdn, $ShiftedRm",
+ (t2SUBrs GPRnopc:$Rdn, GPRnopc:$Rdn, t2_so_reg:$ShiftedRm,
+ pred:$p, cc_out:$s)>;
// Alias for compares without the ".w" optional width specifier.
def : t2InstAlias<"cmn${p} $Rn, $Rm",
@@ -3900,7 +4030,20 @@ def : t2InstAlias<"ldrsb${p} $Rt, $addr",
def : t2InstAlias<"ldrsh${p} $Rt, $addr",
(t2LDRSHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
-// Alias for MVN without the ".w" optional width specifier.
+def : t2InstAlias<"ldr${p} $Rt, $addr",
+ (t2LDRpci GPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+def : t2InstAlias<"ldrb${p} $Rt, $addr",
+ (t2LDRBpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+def : t2InstAlias<"ldrh${p} $Rt, $addr",
+ (t2LDRHpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+def : t2InstAlias<"ldrsb${p} $Rt, $addr",
+ (t2LDRSBpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+def : t2InstAlias<"ldrsh${p} $Rt, $addr",
+ (t2LDRSHpci rGPR:$Rt, t2ldrlabel:$addr, pred:$p)>;
+
+// Alias for MVN with(out) the ".w" optional width specifier.
+def : t2InstAlias<"mvn${s}${p}.w $Rd, $imm",
+ (t2MVNi rGPR:$Rd, t2_so_imm:$imm, pred:$p, cc_out:$s)>;
def : t2InstAlias<"mvn${s}${p} $Rd, $Rm",
(t2MVNr rGPR:$Rd, rGPR:$Rm, pred:$p, cc_out:$s)>;
def : t2InstAlias<"mvn${s}${p} $Rd, $ShiftedRm",
@@ -3921,6 +4064,30 @@ def : t2InstAlias<"push${p} $regs", (t2STMDB_UPD SP, pred:$p, reglist:$regs)>;
def : t2InstAlias<"pop${p}.w $regs", (t2LDMIA_UPD SP, pred:$p, reglist:$regs)>;
def : t2InstAlias<"pop${p} $regs", (t2LDMIA_UPD SP, pred:$p, reglist:$regs)>;
+// STMIA/STMIA_UPD aliases w/o the optional .w suffix
+def : t2InstAlias<"stm${p} $Rn, $regs",
+ (t2STMIA GPR:$Rn, pred:$p, reglist:$regs)>;
+def : t2InstAlias<"stm${p} $Rn!, $regs",
+ (t2STMIA_UPD GPR:$Rn, pred:$p, reglist:$regs)>;
+
+// LDMIA/LDMIA_UPD aliases w/o the optional .w suffix
+def : t2InstAlias<"ldm${p} $Rn, $regs",
+ (t2LDMIA GPR:$Rn, pred:$p, reglist:$regs)>;
+def : t2InstAlias<"ldm${p} $Rn!, $regs",
+ (t2LDMIA_UPD GPR:$Rn, pred:$p, reglist:$regs)>;
+
+// STMDB/STMDB_UPD aliases w/ the optional .w suffix
+def : t2InstAlias<"stmdb${p}.w $Rn, $regs",
+ (t2STMDB GPR:$Rn, pred:$p, reglist:$regs)>;
+def : t2InstAlias<"stmdb${p}.w $Rn!, $regs",
+ (t2STMDB_UPD GPR:$Rn, pred:$p, reglist:$regs)>;
+
+// LDMDB/LDMDB_UPD aliases w/ the optional .w suffix
+def : t2InstAlias<"ldmdb${p}.w $Rn, $regs",
+ (t2LDMDB GPR:$Rn, pred:$p, reglist:$regs)>;
+def : t2InstAlias<"ldmdb${p}.w $Rn!, $regs",
+ (t2LDMDB_UPD GPR:$Rn, pred:$p, reglist:$regs)>;
+
// Alias for REV/REV16/REVSH without the ".w" optional width specifier.
def : t2InstAlias<"rev${p} $Rd, $Rm", (t2REV rGPR:$Rd, rGPR:$Rm, pred:$p)>;
def : t2InstAlias<"rev16${p} $Rd, $Rm", (t2REV16 rGPR:$Rd, rGPR:$Rm, pred:$p)>;
@@ -4016,3 +4183,87 @@ def : t2InstAlias<"sxtb16${p} $Rd, $Rm$rot",
(t2SXTB16 rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
def : t2InstAlias<"sxth${p} $Rd, $Rm$rot",
(t2SXTH rGPR:$Rd, rGPR:$Rm, rot_imm:$rot, pred:$p)>;
+
+
+// "mov Rd, t2_so_imm_not" can be handled via "mvn" in assembly, just like
+// for isel.
+def : t2InstAlias<"mov${p} $Rd, $imm",
+ (t2MVNi rGPR:$Rd, t2_so_imm_not:$imm, pred:$p, zero_reg)>;
+def : t2InstAlias<"mvn${p} $Rd, $imm",
+ (t2MOVi rGPR:$Rd, t2_so_imm_not:$imm, pred:$p, zero_reg)>;
+// Same for AND <--> BIC
+def : t2InstAlias<"bic${s}${p} $Rd, $Rn, $imm",
+ (t2ANDri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstAlias<"bic${s}${p} $Rdn, $imm",
+ (t2ANDri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstAlias<"and${s}${p} $Rd, $Rn, $imm",
+ (t2BICri rGPR:$Rd, rGPR:$Rn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstAlias<"and${s}${p} $Rdn, $imm",
+ (t2BICri rGPR:$Rdn, rGPR:$Rdn, so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+// Likewise, "add Rd, t2_so_imm_neg" -> sub
+def : t2InstAlias<"add${s}${p} $Rd, $Rn, $imm",
+ (t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstAlias<"add${s}${p} $Rd, $imm",
+ (t2SUBri GPRnopc:$Rd, GPRnopc:$Rd, t2_so_imm_neg:$imm,
+ pred:$p, cc_out:$s)>;
+// Same for CMP <--> CMN via t2_so_imm_neg
+def : t2InstAlias<"cmp${p} $Rd, $imm",
+ (t2CMNzri rGPR:$Rd, t2_so_imm_neg:$imm, pred:$p)>;
+def : t2InstAlias<"cmn${p} $Rd, $imm",
+ (t2CMPri rGPR:$Rd, t2_so_imm_neg:$imm, pred:$p)>;
+
+
+// Wide 'mul' encoding can be specified with only two operands.
+def : t2InstAlias<"mul${p} $Rn, $Rm",
+ (t2MUL rGPR:$Rn, rGPR:$Rm, rGPR:$Rn, pred:$p)>;
+
+// "neg" is and alias for "rsb rd, rn, #0"
+def : t2InstAlias<"neg${s}${p} $Rd, $Rm",
+ (t2RSBri rGPR:$Rd, rGPR:$Rm, 0, pred:$p, cc_out:$s)>;
+
+// MOV so_reg assembler pseudos. InstAlias isn't expressive enough for
+// these, unfortunately.
+def t2MOVsi: t2AsmPseudo<"mov${p} $Rd, $shift",
+ (ins rGPR:$Rd, t2_so_reg:$shift, pred:$p)>;
+def t2MOVSsi: t2AsmPseudo<"movs${p} $Rd, $shift",
+ (ins rGPR:$Rd, t2_so_reg:$shift, pred:$p)>;
+
+def t2MOVsr: t2AsmPseudo<"mov${p} $Rd, $shift",
+ (ins rGPR:$Rd, so_reg_reg:$shift, pred:$p)>;
+def t2MOVSsr: t2AsmPseudo<"movs${p} $Rd, $shift",
+ (ins rGPR:$Rd, so_reg_reg:$shift, pred:$p)>;
+
+// ADR w/o the .w suffix
+def : t2InstAlias<"adr${p} $Rd, $addr",
+ (t2ADR rGPR:$Rd, t2adrlabel:$addr, pred:$p)>;
+
+// LDR(literal) w/ alternate [pc, #imm] syntax.
+def t2LDRpcrel : t2AsmPseudo<"ldr${p} $Rt, $addr",
+ (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def t2LDRBpcrel : t2AsmPseudo<"ldrb${p} $Rt, $addr",
+ (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def t2LDRHpcrel : t2AsmPseudo<"ldrh${p} $Rt, $addr",
+ (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def t2LDRSBpcrel : t2AsmPseudo<"ldrsb${p} $Rt, $addr",
+ (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def t2LDRSHpcrel : t2AsmPseudo<"ldrsh${p} $Rt, $addr",
+ (ins GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+ // Version w/ the .w suffix.
+def : t2InstAlias<"ldr${p}.w $Rt, $addr",
+ (t2LDRpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def : t2InstAlias<"ldrb${p}.w $Rt, $addr",
+ (t2LDRBpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def : t2InstAlias<"ldrh${p}.w $Rt, $addr",
+ (t2LDRHpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def : t2InstAlias<"ldrsb${p}.w $Rt, $addr",
+ (t2LDRSBpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+def : t2InstAlias<"ldrsh${p}.w $Rt, $addr",
+ (t2LDRSHpcrel GPRnopc:$Rt, t2ldr_pcrel_imm12:$addr, pred:$p)>;
+
+def : t2InstAlias<"add${p} $Rd, pc, $imm",
+ (t2ADR rGPR:$Rd, imm0_4095:$imm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index e746cf2..3600b88 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -1,4 +1,4 @@
-//===- ARMInstrVFP.td - VFP support for ARM ----------------*- tablegen -*-===//
+//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -61,6 +61,22 @@ def vfp_f64imm : Operand<f64>,
let ParserMatchClass = FPImmOperand;
}
+// The VCVT to/from fixed-point instructions encode the 'fbits' operand
+// (the number of fixed bits) differently than it appears in the assembly
+// source. It's encoded as "Size - fbits" where Size is the size of the
+// fixed-point representation (32 or 16) and fbits is the value appearing
+// in the assembly source, an integer in [0,16] or (0,32], depending on size.
+def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
+def fbits32 : Operand<i32> {
+ let PrintMethod = "printFBits32";
+ let ParserMatchClass = fbits32_asm_operand;
+}
+
+def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
+def fbits16 : Operand<i32> {
+ let PrintMethod = "printFBits16";
+ let ParserMatchClass = fbits16_asm_operand;
+}
//===----------------------------------------------------------------------===//
// Load / store Instructions.
@@ -69,11 +85,11 @@ def vfp_f64imm : Operand<f64>,
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
- IIC_fpLoad64, "vldr", ".64\t$Dd, $addr",
+ IIC_fpLoad64, "vldr", "\t$Dd, $addr",
[(set DPR:$Dd, (f64 (load addrmode5:$addr)))]>;
def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
- IIC_fpLoad32, "vldr", ".32\t$Sd, $addr",
+ IIC_fpLoad32, "vldr", "\t$Sd, $addr",
[(set SPR:$Sd, (load addrmode5:$addr))]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -83,11 +99,11 @@ def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
- IIC_fpStore64, "vstr", ".64\t$Dd, $addr",
+ IIC_fpStore64, "vstr", "\t$Dd, $addr",
[(store (f64 DPR:$Dd), addrmode5:$addr)]>;
def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
- IIC_fpStore32, "vstr", ".32\t$Sd, $addr",
+ IIC_fpStore32, "vstr", "\t$Sd, $addr",
[(store SPR:$Sd, addrmode5:$addr)]> {
// Some single precision VFP instructions may be executed on both NEON and VFP
// pipelines.
@@ -190,6 +206,14 @@ def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
Requires<[HasVFP2]>;
def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
Requires<[HasVFP2]>;
+defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
+ (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
+defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
+ (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
+defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
+ (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
+defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
+ (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
// FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
@@ -270,7 +294,7 @@ def : Pat<(fmul (fneg SPR:$a), SPR:$b),
(VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
// These are encoded as unary instructions.
-let Defs = [FPSCR] in {
+let Defs = [FPSCR_NZCV] in {
def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
(outs), (ins DPR:$Dd, DPR:$Dm),
IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
@@ -299,7 +323,7 @@ def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-} // Defs = [FPSCR]
+} // Defs = [FPSCR_NZCV]
//===----------------------------------------------------------------------===//
// FP Unary Operations.
@@ -319,7 +343,7 @@ def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
let D = VFPNeonA8Domain;
}
-let Defs = [FPSCR] in {
+let Defs = [FPSCR_NZCV] in {
def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
(outs), (ins DPR:$Dd),
IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
@@ -360,7 +384,7 @@ def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-} // Defs = [FPSCR]
+} // Defs = [FPSCR_NZCV]
def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
(outs DPR:$Dd), (ins SPR:$Sm),
@@ -790,127 +814,131 @@ def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
// S32 (U=0, sx=1) -> SL
// U32 (U=1, sx=1) -> UL
-// FIXME: Marking these as codegen only seems wrong. They are real
-// instructions(?)
-let Constraints = "$a = $dst", isCodeGenOnly = 1 in {
+let Constraints = "$a = $dst" in {
// FP to Fixed-Point:
-def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+// Single Precision register
+class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
+ dag oops, dag iops, InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
+ bits<5> dst;
+ // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
+ let Inst{22} = dst{0};
+ let Inst{15-12} = dst{4-1};
+}
+
+// Double Precision register
+class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
+ dag oops, dag iops, InstrItinClass itin, string opc, string asm,
+ list<dag> pattern>
+ : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
+ bits<5> dst;
+ // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
+ let Inst{22} = dst{4};
+ let Inst{15-12} = dst{3-0};
+}
+
+def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
+ (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
+ IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>;
-def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
+ (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
+ IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>;
-def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
+ (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
+ IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>;
-def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
+ (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
+ IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>;
// Fixed-Point to FP:
-def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
+ (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
+ IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]> {
+def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
+ (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
+ IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
}
-def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
+ (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
+ IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>;
-def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
+ (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
+ IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>;
-def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
+ (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
+ IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>;
-def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
+def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
+ (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
+ IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>;
-} // End of 'let Constraints = "$a = $dst", isCodeGenOnly = 1 in'
+} // End of 'let Constraints = "$a = $dst" in'
//===----------------------------------------------------------------------===//
// FP Multiply-Accumulate Operations.
@@ -922,7 +950,7 @@ def VMLAD : ADbI<0b11100, 0b00, 0, 0,
[(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -930,7 +958,7 @@ def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
[(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
SPR:$Sdin))]>,
RegConstraint<"$Sdin = $Sd">,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
@@ -938,10 +966,10 @@ def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
- Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
+ Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>;
def VMLSD : ADbI<0b11100, 0b00, 1, 0,
(outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
@@ -949,7 +977,7 @@ def VMLSD : ADbI<0b11100, 0b00, 1, 0,
[(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -957,7 +985,7 @@ def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
[(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
SPR:$Sdin))]>,
RegConstraint<"$Sdin = $Sd">,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
@@ -965,10 +993,10 @@ def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
(VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
(VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
(outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
@@ -976,7 +1004,7 @@ def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
[(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
@@ -984,7 +1012,7 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
[(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
SPR:$Sdin))]>,
RegConstraint<"$Sdin = $Sd">,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
@@ -992,10 +1020,10 @@ def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
(VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
(VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
(outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
@@ -1003,14 +1031,14 @@ def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
[(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
(f64 DPR:$Ddin)))]>,
RegConstraint<"$Ddin = $Dd">,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
(outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
[(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
RegConstraint<"$Sdin = $Sd">,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]> {
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
// Some single precision VFP instructions may be executed on both NEON and
// VFP pipelines on A8.
let D = VFPNeonA8Domain;
@@ -1018,11 +1046,172 @@ def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
(VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
- Requires<[HasVFP2,UseFPVMLx]>;
+ Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
(VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
- Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
+ Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
+
+//===----------------------------------------------------------------------===//
+// Fused FP Multiply-Accumulate Operations.
+//
+def VFMAD : ADbI<0b11101, 0b10, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+}
+
+def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
+ (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
+ (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
+
+// Match @llvm.fma.* intrinsics
+def : Pat<(f64 (fma DPR:$Ddin, DPR:$Dn, DPR:$Dm)),
+ (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(f32 (fma SPR:$Sdin, SPR:$Sn, SPR:$Sm)),
+ (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+
+def VFMSD : ADbI<0b11101, 0b10, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+}
+
+def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
+ (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
+ (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
+
+// Match @llvm.fma.* intrinsics
+// (fma (fneg x), y, z) -> (vfms x, y, z)
+def : Pat<(f64 (fma (fneg DPR:$Ddin), DPR:$Dn, DPR:$Dm)),
+ (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(f32 (fma (fneg SPR:$Sdin), SPR:$Sn, SPR:$Sm)),
+ (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+// (fneg (fma x, (fneg y), z) -> (vfms x, y, z)
+def : Pat<(fneg (f64 (fma DPR:$Ddin, (fneg DPR:$Dn), DPR:$Dm))),
+ (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(fneg (f32 (fma SPR:$Sdin, (fneg SPR:$Sn), SPR:$Sm))),
+ (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+
+def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
+ SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+}
+def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
+ (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
+ (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
+
+// Match @llvm.fma.* intrinsics
+// (fneg (fma x, y, z)) -> (vfnma x, y, z)
+def : Pat<(fneg (fma (f64 DPR:$Ddin), (f64 DPR:$Dn), (f64 DPR:$Dm))),
+ (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(fneg (fma (f32 SPR:$Sdin), (f32 SPR:$Sn), (f32 SPR:$Sm))),
+ (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+// (fma (fneg x), y, (fneg z)) -> (vfnma x, y, z)
+def : Pat<(f64 (fma (fneg DPR:$Ddin), DPR:$Dn, (fneg DPR:$Dm))),
+ (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(f32 (fma (fneg SPR:$Sdin), SPR:$Sn, (fneg SPR:$Sm))),
+ (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+
+def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
+ (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
+ IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
+ [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
+ (f64 DPR:$Ddin)))]>,
+ RegConstraint<"$Ddin = $Dd">,
+ Requires<[HasVFP4,UseFusedMAC]>;
+
+def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
+ (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
+ IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
+ [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
+ RegConstraint<"$Sdin = $Sd">,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
+ // Some single precision VFP instructions may be executed on both NEON and
+ // VFP pipelines.
+}
+
+def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
+ (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
+ Requires<[HasVFP4,UseFusedMAC]>;
+def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
+ (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
+ Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
+
+// Match @llvm.fma.* intrinsics
+// (fneg (fma (fneg x), y, z)) -> (vnfms x, y, z)
+def : Pat<(fneg (f64 (fma (fneg DPR:$Ddin), DPR:$Dn, DPR:$Dm))),
+ (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(fneg (f32 (fma (fneg SPR:$Sdin), SPR:$Sn, SPR:$Sm))),
+ (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
+// (fma x, (fneg y), z) -> (vnfms x, y, z)
+def : Pat<(f64 (fma DPR:$Ddin, (fneg DPR:$Dn), DPR:$Dm)),
+ (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
+ Requires<[HasVFP4]>;
+def : Pat<(f32 (fma SPR:$Sdin, (fneg SPR:$Sn), SPR:$Sm)),
+ (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
+ Requires<[HasVFP4]>;
//===----------------------------------------------------------------------===//
// FP Conditional moves.
@@ -1063,9 +1252,9 @@ class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
// to APSR.
-let Defs = [CPSR], Uses = [FPSCR], Rt = 0b1111 /* apsr_nzcv */ in
+let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in
def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
- "vmrs", "\tapsr_nzcv, fpscr", [(arm_fmstat)]>;
+ "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
// Application level FPSCR -> GPR
let hasSideEffects = 1, Uses = [FPSCR] in
@@ -1079,6 +1268,10 @@ let Uses = [FPSCR] in {
"vmrs", "\t$Rt, fpexc", []>;
def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
"vmrs", "\t$Rt, fpsid", []>;
+ def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, mvfr0", []>;
+ def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPR:$Rt), (ins),
+ "vmrs", "\t$Rt, mvfr1", []>;
}
//===----------------------------------------------------------------------===//
@@ -1160,6 +1353,115 @@ def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
//===----------------------------------------------------------------------===//
// Assembler aliases.
//
+// A few mnemnoic aliases for pre-unifixed syntax. We don't guarantee to
+// support them all, but supporting at least some of the basics is
+// good to be friendly.
+def : VFP2MnemonicAlias<"flds", "vldr">;
+def : VFP2MnemonicAlias<"fldd", "vldr">;
+def : VFP2MnemonicAlias<"fmrs", "vmov">;
+def : VFP2MnemonicAlias<"fmsr", "vmov">;
+def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
+def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
+def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
+def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
+def : VFP2MnemonicAlias<"fmrdd", "vmov">;
+def : VFP2MnemonicAlias<"fmrds", "vmov">;
+def : VFP2MnemonicAlias<"fmrrd", "vmov">;
+def : VFP2MnemonicAlias<"fmdrr", "vmov">;
+def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
+def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
+def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
+def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
+def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
+def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
+def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
+def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
+def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
+def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
+def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
+def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
+def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
+def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
+def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
+def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
+def : VFP2MnemonicAlias<"fsts", "vstr">;
+def : VFP2MnemonicAlias<"fstd", "vstr">;
+def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
+def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
+def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
+def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
+def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
+def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
+def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
+def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
+def : VFP2MnemonicAlias<"fmrx", "vmrs">;
+def : VFP2MnemonicAlias<"fmxr", "vmsr">;
+
+// Be friendly and accept the old form of zero-compare
+def : VFP2InstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
+def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
-def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
+def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
+def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
+ (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
+def : VFP2InstAlias<"faddd${p} $Dd, $Dn, $Dm",
+ (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
+ (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
+def : VFP2InstAlias<"fsubd${p} $Dd, $Dn, $Dm",
+ (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
+
+// No need for the size suffix on VSQRT. It's implied by the register classes.
+def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
+def : VFP2InstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
+
+// VLDR/VSTR accept an optional type suffix.
+def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
+ (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
+ (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
+def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
+ (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
+def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
+ (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
+
+// VMUL has a two-operand form (implied destination operand)
+def : VFP2InstAlias<"vmul${p}.f64 $Dn, $Dm",
+ (VMULD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2InstAlias<"vmul${p}.f32 $Sn, $Sm",
+ (VMULS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
+// VADD has a two-operand form (implied destination operand)
+def : VFP2InstAlias<"vadd${p}.f64 $Dn, $Dm",
+ (VADDD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2InstAlias<"vadd${p}.f32 $Sn, $Sm",
+ (VADDS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
+// VSUB has a two-operand form (implied destination operand)
+def : VFP2InstAlias<"vsub${p}.f64 $Dn, $Dm",
+ (VSUBD DPR:$Dn, DPR:$Dn, DPR:$Dm, pred:$p)>;
+def : VFP2InstAlias<"vsub${p}.f32 $Sn, $Sm",
+ (VSUBS SPR:$Sn, SPR:$Sn, SPR:$Sm, pred:$p)>;
+
+// VMOV can accept optional 32-bit or less data type suffix suffix.
+def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
+ (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
+ (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
+ (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
+ (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
+ (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
+ (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
+
+def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
+ (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
+def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
+ (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
+
+// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
+// VMOVD does.
+def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
+ (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
index 45b7e48..98930cc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.cpp
@@ -13,7 +13,7 @@
#define DEBUG_TYPE "jit"
#include "ARMJITInfo.h"
-#include "ARMInstrInfo.h"
+#include "ARM.h"
#include "ARMConstantPoolValue.h"
#include "ARMRelocations.h"
#include "ARMSubtarget.h"
@@ -61,7 +61,7 @@ extern "C" {
// concerned, so we can't just preserve the callee saved regs.
"stmdb sp!, {r0, r1, r2, r3, lr}\n"
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- "fstmfdd sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
+ "vstmdb sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
#endif
// The LR contains the address of the stub function on entry.
// pass it as the argument to the C part of the callback
@@ -85,7 +85,7 @@ extern "C" {
//
#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
// Restore VFP caller-saved registers.
- "fldmfdd sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
+ "vldmia sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
#endif
//
// We need to exchange the values in slots 0 and 1 so we can
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
index 2f97928..7928184 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
@@ -1,4 +1,4 @@
-//===- ARMJITInfo.h - ARM implementation of the JIT interface --*- C++ -*-===//
+//===-- ARMJITInfo.h - ARM implementation of the JIT interface -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index faa8ba7..9ef2ace 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1,4 +1,4 @@
-//===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
+//===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,8 +15,8 @@
#define DEBUG_TYPE "arm-ldst-opt"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
@@ -32,6 +32,8 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -62,6 +64,7 @@ namespace {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const ARMSubtarget *STI;
ARMFunctionInfo *AFI;
RegScavenger *RS;
bool isThumb2;
@@ -90,7 +93,9 @@ namespace {
bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
int Offset, unsigned Base, bool BaseKill, int Opcode,
ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
- DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
+ DebugLoc dl,
+ ArrayRef<std::pair<unsigned, bool> > Regs,
+ ArrayRef<unsigned> ImpDefs);
void MergeOpsUpdate(MachineBasicBlock &MBB,
MemOpQueue &MemOps,
unsigned memOpsBegin,
@@ -141,7 +146,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::db: return ARM::LDMDB;
case ARM_AM::ib: return ARM::LDMIB;
}
- break;
case ARM::STRi12:
++NumSTMGened;
switch (Mode) {
@@ -151,7 +155,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::db: return ARM::STMDB;
case ARM_AM::ib: return ARM::STMIB;
}
- break;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
++NumLDMGened;
@@ -160,7 +163,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::t2LDMIA;
case ARM_AM::db: return ARM::t2LDMDB;
}
- break;
case ARM::t2STRi8:
case ARM::t2STRi12:
++NumSTMGened;
@@ -169,7 +171,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::t2STMIA;
case ARM_AM::db: return ARM::t2STMDB;
}
- break;
case ARM::VLDRS:
++NumVLDMGened;
switch (Mode) {
@@ -177,7 +178,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::VLDMSIA;
case ARM_AM::db: return 0; // Only VLDMSDB_UPD exists.
}
- break;
case ARM::VSTRS:
++NumVSTMGened;
switch (Mode) {
@@ -185,7 +185,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::VSTMSIA;
case ARM_AM::db: return 0; // Only VSTMSDB_UPD exists.
}
- break;
case ARM::VLDRD:
++NumVLDMGened;
switch (Mode) {
@@ -193,7 +192,6 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::VLDMDIA;
case ARM_AM::db: return 0; // Only VLDMDDB_UPD exists.
}
- break;
case ARM::VSTRD:
++NumVSTMGened;
switch (Mode) {
@@ -201,10 +199,7 @@ static int getLoadStoreMultipleOpcode(int Opcode, ARM_AM::AMSubMode Mode) {
case ARM_AM::ia: return ARM::VSTMDIA;
case ARM_AM::db: return 0; // Only VSTMDDB_UPD exists.
}
- break;
}
-
- return 0;
}
namespace llvm {
@@ -259,8 +254,6 @@ AMSubMode getLoadStoreMultipleSubMode(int Opcode) {
case ARM::STMIB_UPD:
return ARM_AM::ib;
}
-
- return ARM_AM::bad_am_submode;
}
} // end namespace ARM_AM
@@ -291,7 +284,8 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
int Offset, unsigned Base, bool BaseKill,
int Opcode, ARMCC::CondCodes Pred,
unsigned PredReg, unsigned Scratch, DebugLoc dl,
- SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
+ ArrayRef<std::pair<unsigned, bool> > Regs,
+ ArrayRef<unsigned> ImpDefs) {
// Only a single register to load / store. Don't bother.
unsigned NumRegs = Regs.size();
if (NumRegs <= 1)
@@ -359,6 +353,10 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
| getKillRegState(Regs[i].second));
+ // Add implicit defs for super-registers.
+ for (unsigned i = 0, e = ImpDefs.size(); i != e; ++i)
+ MIB.addReg(ImpDefs[i], RegState::ImplicitDefine);
+
return true;
}
@@ -393,19 +391,29 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
}
SmallVector<std::pair<unsigned, bool>, 8> Regs;
+ SmallVector<unsigned, 8> ImpDefs;
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
unsigned Reg = memOps[i].Reg;
// If we are inserting the merged operation after an operation that
// uses the same register, make sure to transfer any kill flag.
bool isKill = memOps[i].isKill || KilledRegs.count(Reg);
Regs.push_back(std::make_pair(Reg, isKill));
+
+ // Collect any implicit defs of super-registers. They must be preserved.
+ for (MIOperands MO(memOps[i].MBBI); MO.isValid(); ++MO) {
+ if (!MO->isReg() || !MO->isDef() || !MO->isImplicit() || MO->isDead())
+ continue;
+ unsigned DefReg = MO->getReg();
+ if (std::find(ImpDefs.begin(), ImpDefs.end(), DefReg) == ImpDefs.end())
+ ImpDefs.push_back(DefReg);
+ }
}
// Try to do the merge.
MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
++Loc;
if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
- Pred, PredReg, Scratch, dl, Regs))
+ Pred, PredReg, Scratch, dl, Regs, ImpDefs))
return;
// Merge succeeded, update records.
@@ -506,50 +514,84 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
return;
}
-static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes, unsigned Limit,
- ARMCC::CondCodes Pred, unsigned PredReg){
+static bool definesCPSR(MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
+ // If the instruction has live CPSR def, then it's not safe to fold it
+ // into load / store.
+ return true;
+ }
+
+ return false;
+}
+
+static bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
+ unsigned Bytes, unsigned Limit,
+ ARMCC::CondCodes Pred, unsigned PredReg) {
unsigned MyPredReg = 0;
if (!MI)
return false;
- if (MI->getOpcode() != ARM::t2SUBri &&
- MI->getOpcode() != ARM::tSUBspi &&
- MI->getOpcode() != ARM::SUBri)
- return false;
+
+ bool CheckCPSRDef = false;
+ switch (MI->getOpcode()) {
+ default: return false;
+ case ARM::t2SUBri:
+ case ARM::SUBri:
+ CheckCPSRDef = true;
+ // fallthrough
+ case ARM::tSUBspi:
+ break;
+ }
// Make sure the offset fits in 8 bits.
if (Bytes == 0 || (Limit && Bytes >= Limit))
return false;
unsigned Scale = (MI->getOpcode() == ARM::tSUBspi) ? 4 : 1; // FIXME
- return (MI->getOperand(0).getReg() == Base &&
- MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
- llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
- MyPredReg == PredReg);
+ if (!(MI->getOperand(0).getReg() == Base &&
+ MI->getOperand(1).getReg() == Base &&
+ (MI->getOperand(2).getImm()*Scale) == Bytes &&
+ getInstrPredicate(MI, MyPredReg) == Pred &&
+ MyPredReg == PredReg))
+ return false;
+
+ return CheckCPSRDef ? !definesCPSR(MI) : true;
}
-static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes, unsigned Limit,
- ARMCC::CondCodes Pred, unsigned PredReg){
+static bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
+ unsigned Bytes, unsigned Limit,
+ ARMCC::CondCodes Pred, unsigned PredReg) {
unsigned MyPredReg = 0;
if (!MI)
return false;
- if (MI->getOpcode() != ARM::t2ADDri &&
- MI->getOpcode() != ARM::tADDspi &&
- MI->getOpcode() != ARM::ADDri)
- return false;
+
+ bool CheckCPSRDef = false;
+ switch (MI->getOpcode()) {
+ default: return false;
+ case ARM::t2ADDri:
+ case ARM::ADDri:
+ CheckCPSRDef = true;
+ // fallthrough
+ case ARM::tADDspi:
+ break;
+ }
if (Bytes == 0 || (Limit && Bytes >= Limit))
// Make sure the offset fits in 8 bits.
return false;
unsigned Scale = (MI->getOpcode() == ARM::tADDspi) ? 4 : 1; // FIXME
- return (MI->getOperand(0).getReg() == Base &&
- MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
- llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
- MyPredReg == PredReg);
+ if (!(MI->getOperand(0).getReg() == Base &&
+ MI->getOperand(1).getReg() == Base &&
+ (MI->getOperand(2).getImm()*Scale) == Bytes &&
+ getInstrPredicate(MI, MyPredReg) == Pred &&
+ MyPredReg == PredReg))
+ return false;
+
+ return CheckCPSRDef ? !definesCPSR(MI) : true;
}
static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
@@ -603,7 +645,6 @@ static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
case ARM_AM::da: return ARM::LDMDA_UPD;
case ARM_AM::db: return ARM::LDMDB_UPD;
}
- break;
case ARM::STMIA:
case ARM::STMDA:
case ARM::STMDB:
@@ -615,7 +656,6 @@ static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
case ARM_AM::da: return ARM::STMDA_UPD;
case ARM_AM::db: return ARM::STMDB_UPD;
}
- break;
case ARM::t2LDMIA:
case ARM::t2LDMDB:
switch (Mode) {
@@ -623,7 +663,6 @@ static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
case ARM_AM::ia: return ARM::t2LDMIA_UPD;
case ARM_AM::db: return ARM::t2LDMDB_UPD;
}
- break;
case ARM::t2STMIA:
case ARM::t2STMDB:
switch (Mode) {
@@ -631,38 +670,31 @@ static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
case ARM_AM::ia: return ARM::t2STMIA_UPD;
case ARM_AM::db: return ARM::t2STMDB_UPD;
}
- break;
case ARM::VLDMSIA:
switch (Mode) {
default: llvm_unreachable("Unhandled submode!");
case ARM_AM::ia: return ARM::VLDMSIA_UPD;
case ARM_AM::db: return ARM::VLDMSDB_UPD;
}
- break;
case ARM::VLDMDIA:
switch (Mode) {
default: llvm_unreachable("Unhandled submode!");
case ARM_AM::ia: return ARM::VLDMDIA_UPD;
case ARM_AM::db: return ARM::VLDMDDB_UPD;
}
- break;
case ARM::VSTMSIA:
switch (Mode) {
default: llvm_unreachable("Unhandled submode!");
case ARM_AM::ia: return ARM::VSTMSIA_UPD;
case ARM_AM::db: return ARM::VSTMSDB_UPD;
}
- break;
case ARM::VSTMDIA:
switch (Mode) {
default: llvm_unreachable("Unhandled submode!");
case ARM_AM::ia: return ARM::VSTMDIA_UPD;
case ARM_AM::db: return ARM::VSTMDDB_UPD;
}
- break;
}
-
- return 0;
}
/// MergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
@@ -686,7 +718,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
bool BaseKill = MI->getOperand(0).isKill();
unsigned Bytes = getLSMultipleTransferSize(MI);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
int Opcode = MI->getOpcode();
DebugLoc dl = MI->getDebugLoc();
@@ -783,7 +815,6 @@ static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
return ARM::t2STR_PRE;
default: llvm_unreachable("Unhandled opcode!");
}
- return 0;
}
static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
@@ -809,7 +840,6 @@ static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
return ARM::t2STR_POST;
default: llvm_unreachable("Unhandled opcode!");
}
- return 0;
}
/// MergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
@@ -841,7 +871,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
return false;
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
bool DoMerge = false;
ARM_AM::AddrOpc AddSub = ARM_AM::add;
unsigned NewOpc = 0;
@@ -1071,11 +1101,17 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
unsigned Opcode = MI->getOpcode();
if (Opcode == ARM::LDRD || Opcode == ARM::STRD ||
Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) {
+ const MachineOperand &BaseOp = MI->getOperand(2);
+ unsigned BaseReg = BaseOp.getReg();
unsigned EvenReg = MI->getOperand(0).getReg();
unsigned OddReg = MI->getOperand(1).getReg();
unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
- if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
+ // ARM errata 602117: LDRD with base in list may result in incorrect base
+ // register when interrupted or faulted.
+ bool Errata602117 = EvenReg == BaseReg && STI->isCortexM3();
+ if (!Errata602117 &&
+ ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum))
return false;
MachineBasicBlock::iterator NewBBI = MBBI;
@@ -1087,15 +1123,13 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
bool OddDeadKill = isLd ?
MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
bool OddUndef = MI->getOperand(1).isUndef();
- const MachineOperand &BaseOp = MI->getOperand(2);
- unsigned BaseReg = BaseOp.getReg();
bool BaseKill = BaseOp.isKill();
bool BaseUndef = BaseOp.isUndef();
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
int OffImm = getMemoryOpOffset(MI);
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
if (OddRegNum > EvenRegNum && OffImm == 0) {
// Ascending register numbers and no offset. It's safe to change it to a
@@ -1126,6 +1160,11 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
unsigned NewOpc = (isLd)
? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
: (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
+ // Be extra careful for thumb2. t2LDRi8 can't reference a zero offset,
+ // so adjust and use t2LDRi12 here for that.
+ unsigned NewOpc2 = (isLd)
+ ? (isT2 ? (OffImm+4 < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
+ : (isT2 ? (OffImm+4 < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
DebugLoc dl = MBBI->getDebugLoc();
// If this is a load and base register is killed, it may have been
// re-defed by the load, make sure the first load does not clobber it.
@@ -1133,11 +1172,13 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
(BaseKill || OffKill) &&
(TRI->regsOverlap(EvenReg, BaseReg))) {
assert(!TRI->regsOverlap(OddReg, BaseReg));
- InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
+ InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
OddReg, OddDeadKill, false,
BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
NewBBI = llvm::prior(MBBI);
+ if (isT2 && NewOpc == ARM::t2LDRi8 && OffImm+4 >= 0)
+ NewOpc = ARM::t2LDRi12;
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, false,
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
@@ -1150,12 +1191,16 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
EvenDeadKill = false;
OddDeadKill = true;
}
+ // Never kill the base register in the first instruction.
+ // <rdar://problem/11101911>
+ if (EvenReg == BaseReg)
+ EvenDeadKill = false;
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, EvenUndef,
BaseReg, false, BaseUndef, false, OffUndef,
Pred, PredReg, TII, isT2);
NewBBI = llvm::prior(MBBI);
- InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
+ InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc2,
OddReg, OddDeadKill, OddUndef,
BaseReg, BaseKill, BaseUndef, OffKill, OffUndef,
Pred, PredReg, TII, isT2);
@@ -1206,7 +1251,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
bool isKill = MO.isDef() ? false : MO.isKill();
unsigned Base = MBBI->getOperand(1).getReg();
unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MBBI, PredReg);
+ ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
int Offset = getMemoryOpOffset(MBBI);
// Watch out for:
// r4 := ldr [r5]
@@ -1380,6 +1425,7 @@ bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
AFI = Fn.getInfo<ARMFunctionInfo>();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
+ STI = &TM.getSubtarget<ARMSubtarget>();
RS = new RegScavenger();
isThumb2 = AFI->isThumb2Function();
@@ -1464,19 +1510,18 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
while (++I != E) {
if (I->isDebugValue() || MemOps.count(&*I))
continue;
- const MCInstrDesc &MCID = I->getDesc();
- if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects())
+ if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects())
return false;
- if (isLd && MCID.mayStore())
+ if (isLd && I->mayStore())
return false;
if (!isLd) {
- if (MCID.mayLoad())
+ if (I->mayLoad())
return false;
// It's not safe to move the first 'str' down.
// str r1, [r0]
// strh r5, [r0]
// str r4, [r0, #+4]
- if (MCID.mayStore())
+ if (I->mayStore())
return false;
}
for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
@@ -1498,6 +1543,23 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
return AddedRegPressure.size() <= MemRegs.size() * 2;
}
+
+/// Copy Op0 and Op1 operands into a new array assigned to MI.
+static void concatenateMemOperands(MachineInstr *MI, MachineInstr *Op0,
+ MachineInstr *Op1) {
+ assert(MI->memoperands_empty() && "expected a new machineinstr");
+ size_t numMemRefs = (Op0->memoperands_end() - Op0->memoperands_begin())
+ + (Op1->memoperands_end() - Op1->memoperands_begin());
+
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineSDNode::mmo_iterator MemBegin = MF->allocateMemRefsArray(numMemRefs);
+ MachineSDNode::mmo_iterator MemEnd =
+ std::copy(Op0->memoperands_begin(), Op0->memoperands_end(), MemBegin);
+ MemEnd =
+ std::copy(Op1->memoperands_begin(), Op1->memoperands_end(), MemEnd);
+ MI->setMemRefs(MemBegin, MemEnd);
+}
+
bool
ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
DebugLoc &dl,
@@ -1565,7 +1627,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
if (EvenReg == OddReg)
return false;
BaseReg = Op0->getOperand(1).getReg();
- Pred = llvm::getInstrPredicate(Op0, PredReg);
+ Pred = getInstrPredicate(Op0, PredReg);
dl = Op0->getDebugLoc();
return true;
}
@@ -1615,8 +1677,9 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
LastOp = Op;
}
- unsigned Opcode = Op->getOpcode();
- if (LastOpcode && Opcode != LastOpcode)
+ unsigned LSMOpcode
+ = getLoadStoreMultipleOpcode(Op->getOpcode(), ARM_AM::ia);
+ if (LastOpcode && LSMOpcode != LastOpcode)
break;
int Offset = getMemoryOpOffset(Op);
@@ -1627,7 +1690,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
}
LastOffset = Offset;
LastBytes = Bytes;
- LastOpcode = Opcode;
+ LastOpcode = LSMOpcode;
if (++NumMove == 8) // FIXME: Tune this limit.
break;
}
@@ -1692,6 +1755,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumLDRDFormed;
} else {
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
@@ -1704,6 +1769,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
if (!isT2)
MIB.addReg(0);
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
+ concatenateMemOperands(MIB, Op0, Op1);
+ DEBUG(dbgs() << "Formed " << *MIB << "\n");
++NumSTRDFormed;
}
MBB->erase(Op0);
@@ -1745,8 +1812,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
while (MBBI != E) {
for (; MBBI != E; ++MBBI) {
MachineInstr *MI = MBBI;
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.isCall() || MCID.isTerminator()) {
+ if (MI->isCall() || MI->isTerminator()) {
// Stop at barriers.
++MBBI;
break;
@@ -1758,7 +1824,7 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
if (!isMemoryOp(MI))
continue;
unsigned PredReg = 0;
- if (llvm::getInstrPredicate(MI, PredReg) != ARMCC::AL)
+ if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
continue;
int Opc = MI->getOpcode();
diff --git a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
index daa126d..e2ac9a4 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMMCInstLower.cpp
@@ -31,8 +31,7 @@ MCOperand ARMAsmPrinter::GetSymbolRef(const MachineOperand &MO,
Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
OutContext);
switch (MO.getTargetFlags()) {
- default:
- assert(0 && "Unknown target flag on symbol operand");
+ default: llvm_unreachable("Unknown target flag on symbol operand");
case 0:
break;
case ARMII::MO_LO16:
@@ -67,9 +66,7 @@ MCOperand ARMAsmPrinter::GetSymbolRef(const MachineOperand &MO,
bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
MCOperand &MCOp) {
switch (MO.getType()) {
- default:
- assert(0 && "unknown operand type");
- return false;
+ default: llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
// Ignore all non-CPSR implicit register operands.
if (MO.isImplicit() && MO.getReg() != ARM::CPSR)
@@ -107,6 +104,9 @@ bool ARMAsmPrinter::lowerOperand(const MachineOperand &MO,
MCOp = MCOperand::CreateFPImm(Val.convertToDouble());
break;
}
+ case MachineOperand::MO_RegisterMask:
+ // Ignore call clobbers.
+ return false;
}
return true;
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp
new file mode 100644
index 0000000..af445e2
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- ARMMachineFuctionInfo.cpp - ARM machine function info -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void ARMFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 138f0c2..f1c8fc8 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -1,4 +1,4 @@
-//====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
+//===-- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,7 @@ namespace llvm {
/// ARMFunctionInfo - This class is derived from MachineFunctionInfo and
/// contains private ARM-specific information for each MachineFunction.
class ARMFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
/// isThumb - True if this function is compiled under Thumb mode.
/// Used to initialized Align, so must precede it.
@@ -63,6 +64,9 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// GPR callee-saved (2) : r8, r10, r11
/// --------------------------------------------
/// DPR callee-saved : d8 - d15
+ ///
+ /// Also see AlignedDPRCSRegs below. Not all D-regs need to go in area 3.
+ /// Some may be spilled after the stack has been realigned.
unsigned GPRCS1Offset;
unsigned GPRCS2Offset;
unsigned DPRCSOffset;
@@ -79,6 +83,15 @@ class ARMFunctionInfo : public MachineFunctionInfo {
BitVector GPRCS2Frames;
BitVector DPRCSFrames;
+ /// NumAlignedDPRCS2Regs - The number of callee-saved DPRs that are saved in
+ /// the aligned portion of the stack frame. This is always a contiguous
+ /// sequence of D-registers starting from d8.
+ ///
+ /// We do not keep track of the frame indices used for these registers - they
+ /// behave like any other frame index in the aligned stack frame. These
+ /// registers also aren't included in DPRCSSize above.
+ unsigned NumAlignedDPRCS2Regs;
+
/// JumpTableUId - Unique id for jumptables.
///
unsigned JumpTableUId;
@@ -104,6 +117,7 @@ public:
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
+ NumAlignedDPRCS2Regs(0),
JumpTableUId(0), PICLabelUId(0),
VarArgsFrameIndex(0), HasITBlocks(false) {}
@@ -137,6 +151,9 @@ public:
unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; }
void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; }
+ unsigned getNumAlignedDPRCS2Regs() const { return NumAlignedDPRCS2Regs; }
+ void setNumAlignedDPRCS2Regs(unsigned n) { NumAlignedDPRCS2Regs = n; }
+
unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; }
unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; }
unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; }
diff --git a/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h b/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
index 18e1620..efa22fb 100644
--- a/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
+++ b/contrib/llvm/lib/Target/ARM/ARMPerfectShuffle.h
@@ -1,4 +1,4 @@
-//===-- ARMPerfectShuffle.h - NEON Perfect Shuffle Table ------------------===//
+//===-- ARMPerfectShuffle.h - NEON Perfect Shuffle Table --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
index 1cba1ba..6f3819a 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- ARMRegisterInfo.cpp - ARM Register Information -----------*- C++ -*-===//
+//===-- ARMRegisterInfo.cpp - ARM Register Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,11 +11,13 @@
//
//===----------------------------------------------------------------------===//
+#include "ARMRegisterInfo.h"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
-#include "ARMRegisterInfo.h"
using namespace llvm;
+void ARMRegisterInfo::anchor() { }
+
ARMRegisterInfo::ARMRegisterInfo(const ARMBaseInstrInfo &tii,
const ARMSubtarget &sti)
: ARMBaseRegisterInfo(tii, sti) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
index 8edfb9a..8a24842 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- ARMRegisterInfo.h - ARM Register Information Impl --------*- C++ -*-===//
+//===-- ARMRegisterInfo.h - ARM Register Information Impl -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,15 +15,15 @@
#define ARMREGISTERINFO_H
#include "ARM.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "ARMBaseRegisterInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class ARMSubtarget;
class ARMBaseInstrInfo;
- class Type;
struct ARMRegisterInfo : public ARMBaseRegisterInfo {
+ virtual void anchor();
public:
ARMRegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
};
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 036822d..1466e98 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -1,4 +1,4 @@
-//===- ARMRegisterInfo.td - ARM Register defs --------------*- tablegen -*-===//
+//===-- ARMRegisterInfo.td - ARM Register defs -------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,6 +16,8 @@ class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> {
field bits<4> Num;
let Namespace = "ARM";
let SubRegs = subregs;
+ // All bits of ARM registers with sub-registers are covered by sub-registers.
+ let CoveredBySubRegs = 1;
}
class ARMFReg<bits<6> num, string n> : Register<n> {
@@ -25,28 +27,30 @@ class ARMFReg<bits<6> num, string n> : Register<n> {
// Subregister indices.
let Namespace = "ARM" in {
+def qqsub_0 : SubRegIndex;
+def qqsub_1 : SubRegIndex;
+
// Note: Code depends on these having consecutive numbers.
-def ssub_0 : SubRegIndex;
-def ssub_1 : SubRegIndex;
-def ssub_2 : SubRegIndex; // In a Q reg.
-def ssub_3 : SubRegIndex;
+def qsub_0 : SubRegIndex;
+def qsub_1 : SubRegIndex;
+def qsub_2 : SubRegIndex<[qqsub_1, qsub_0]>;
+def qsub_3 : SubRegIndex<[qqsub_1, qsub_1]>;
def dsub_0 : SubRegIndex;
def dsub_1 : SubRegIndex;
-def dsub_2 : SubRegIndex;
-def dsub_3 : SubRegIndex;
-def dsub_4 : SubRegIndex;
-def dsub_5 : SubRegIndex;
-def dsub_6 : SubRegIndex;
-def dsub_7 : SubRegIndex;
+def dsub_2 : SubRegIndex<[qsub_1, dsub_0]>;
+def dsub_3 : SubRegIndex<[qsub_1, dsub_1]>;
+def dsub_4 : SubRegIndex<[qsub_2, dsub_0]>;
+def dsub_5 : SubRegIndex<[qsub_2, dsub_1]>;
+def dsub_6 : SubRegIndex<[qsub_3, dsub_0]>;
+def dsub_7 : SubRegIndex<[qsub_3, dsub_1]>;
-def qsub_0 : SubRegIndex;
-def qsub_1 : SubRegIndex;
-def qsub_2 : SubRegIndex;
-def qsub_3 : SubRegIndex;
-
-def qqsub_0 : SubRegIndex;
-def qqsub_1 : SubRegIndex;
+def ssub_0 : SubRegIndex;
+def ssub_1 : SubRegIndex;
+def ssub_2 : SubRegIndex<[dsub_1, ssub_0]>;
+def ssub_3 : SubRegIndex<[dsub_1, ssub_1]>;
+// Let TableGen synthesize the remaining 12 ssub_* indices.
+// We don't need to name them.
}
// Integer registers
@@ -127,9 +131,7 @@ def D30 : ARMFReg<30, "d30">, DwarfRegNum<[286]>;
def D31 : ARMFReg<31, "d31">, DwarfRegNum<[287]>;
// Advanced SIMD (NEON) defines 16 quad-word aliases
-let SubRegIndices = [dsub_0, dsub_1],
- CompositeIndices = [(ssub_2 dsub_1, ssub_0),
- (ssub_3 dsub_1, ssub_1)] in {
+let SubRegIndices = [dsub_0, dsub_1] in {
def Q0 : ARMReg< 0, "q0", [D0, D1]>;
def Q1 : ARMReg< 1, "q1", [D2, D3]>;
def Q2 : ARMReg< 2, "q2", [D4, D5]>;
@@ -150,45 +152,22 @@ def Q14 : ARMReg<14, "q14", [D28, D29]>;
def Q15 : ARMReg<15, "q15", [D30, D31]>;
}
-// Pseudo 256-bit registers to represent pairs of Q registers. These should
-// never be present in the emitted code.
-// These are used for NEON load / store instructions, e.g., vld4, vst3.
-// NOTE: It's possible to define more QQ registers since technically the
-// starting D register number doesn't have to be multiple of 4, e.g.,
-// D1, D2, D3, D4 would be a legal quad, but that would make the subregister
-// stuff very messy.
-let SubRegIndices = [qsub_0, qsub_1],
- CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1)] in {
-def QQ0 : ARMReg<0, "qq0", [Q0, Q1]>;
-def QQ1 : ARMReg<1, "qq1", [Q2, Q3]>;
-def QQ2 : ARMReg<2, "qq2", [Q4, Q5]>;
-def QQ3 : ARMReg<3, "qq3", [Q6, Q7]>;
-def QQ4 : ARMReg<4, "qq4", [Q8, Q9]>;
-def QQ5 : ARMReg<5, "qq5", [Q10, Q11]>;
-def QQ6 : ARMReg<6, "qq6", [Q12, Q13]>;
-def QQ7 : ARMReg<7, "qq7", [Q14, Q15]>;
-}
-
-// Pseudo 512-bit registers to represent four consecutive Q registers.
-let SubRegIndices = [qqsub_0, qqsub_1],
- CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1),
- (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1),
- (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3)] in {
-def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>;
-def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>;
-def QQQQ2 : ARMReg<2, "qqqq2", [QQ4, QQ5]>;
-def QQQQ3 : ARMReg<3, "qqqq3", [QQ6, QQ7]>;
-}
-
// Current Program Status Register.
-def CPSR : ARMReg<0, "cpsr">;
-def APSR : ARMReg<1, "apsr">;
-def SPSR : ARMReg<2, "spsr">;
-def FPSCR : ARMReg<3, "fpscr">;
-def ITSTATE : ARMReg<4, "itstate">;
+// We model fpscr with two registers: FPSCR models the control bits and will be
+// reserved. FPSCR_NZCV models the flag bits and will be unreserved.
+def CPSR : ARMReg<0, "cpsr">;
+def APSR : ARMReg<1, "apsr">;
+def SPSR : ARMReg<2, "spsr">;
+def FPSCR : ARMReg<3, "fpscr">;
+def FPSCR_NZCV : ARMReg<3, "fpscr_nzcv"> {
+ let Aliases = [FPSCR];
+}
+def ITSTATE : ARMReg<4, "itstate">;
// Special Registers - only available in privileged mode.
def FPSID : ARMReg<0, "fpsid">;
+def MVFR1 : ARMReg<6, "mvfr1">;
+def MVFR0 : ARMReg<7, "mvfr0">;
def FPEXC : ARMReg<8, "fpexc">;
// Register classes.
@@ -261,6 +240,12 @@ def tcGPR : RegisterClass<"ARM", [i32], 32, (add R0, R1, R2, R3, R9, R12)> {
}];
}
+// Condition code registers.
+def CCR : RegisterClass<"ARM", [i32], 32, (add CPSR)> {
+ let CopyCost = -1; // Don't allow copying of status registers.
+ let isAllocatable = 0;
+}
+
// Scalar single precision floating point register class..
def SPR : RegisterClass<"ARM", [f32], 32, (sequence "S%u", 0, 31)>;
@@ -316,37 +301,100 @@ def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
(DPR_8 dsub_0, dsub_1)];
}
+// Pseudo-registers representing odd-even pairs of D registers. The even-odd
+// pairs are already represented by the Q registers.
+// These are needed by NEON instructions requiring two consecutive D registers.
+// There is no D31_D0 register as that is always an UNPREDICTABLE encoding.
+def TuplesOE2D : RegisterTuples<[dsub_0, dsub_1],
+ [(decimate (shl DPR, 1), 2),
+ (decimate (shl DPR, 2), 2)]>;
+
+// Register class representing a pair of consecutive D registers.
+// Use the Q registers for the even-odd pairs.
+def DPair : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+ 128, (interleave QPR, TuplesOE2D)> {
+ // Allocate starting at non-VFP2 registers D16-D31 first.
+ // Prefer even-odd pairs as they are easier to copy.
+ let AltOrders = [(add (rotl QPR, 8), (rotl DPair, 16))];
+ let AltOrderSelect = [{ return 1; }];
+}
+
+// Pseudo-registers representing 3 consecutive D registers.
+def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2],
+ [(shl DPR, 0),
+ (shl DPR, 1),
+ (shl DPR, 2)]>;
+
+// 3 consecutive D registers.
+def DTriple : RegisterClass<"ARM", [untyped], 64, (add Tuples3D)> {
+ let Size = 192; // 3 x 64 bits, we have no predefined type of that size.
+}
+
+// Pseudo 256-bit registers to represent pairs of Q registers. These should
+// never be present in the emitted code.
+// These are used for NEON load / store instructions, e.g., vld4, vst3.
+def Tuples2Q : RegisterTuples<[qsub_0, qsub_1], [(shl QPR, 0), (shl QPR, 1)]>;
+
// Pseudo 256-bit vector register class to model pairs of Q registers
// (4 consecutive D registers).
-def QQPR : RegisterClass<"ARM", [v4i64], 256, (sequence "QQ%u", 0, 7)> {
+def QQPR : RegisterClass<"ARM", [v4i64], 256, (add Tuples2Q)> {
let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3),
(QPR qsub_0, qsub_1)];
// Allocate non-VFP2 aliases first.
- let AltOrders = [(rotl QQPR, 4)];
+ let AltOrders = [(rotl QQPR, 8)];
let AltOrderSelect = [{ return 1; }];
}
-// Subset of QQPR that have 32-bit SPR subregs.
-def QQPR_VFP2 : RegisterClass<"ARM", [v4i64], 256, (trunc QQPR, 4)> {
- let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3),
- (DPR_VFP2 dsub_0, dsub_1, dsub_2, dsub_3),
- (QPR_VFP2 qsub_0, qsub_1)];
+// Tuples of 4 D regs that isn't also a pair of Q regs.
+def TuplesOE4D : RegisterTuples<[dsub_0, dsub_1, dsub_2, dsub_3],
+ [(decimate (shl DPR, 1), 2),
+ (decimate (shl DPR, 2), 2),
+ (decimate (shl DPR, 3), 2),
+ (decimate (shl DPR, 4), 2)]>;
-}
+// 4 consecutive D registers.
+def DQuad : RegisterClass<"ARM", [v4i64], 256,
+ (interleave Tuples2Q, TuplesOE4D)>;
+
+// Pseudo 512-bit registers to represent four consecutive Q registers.
+def Tuples2QQ : RegisterTuples<[qqsub_0, qqsub_1],
+ [(shl QQPR, 0), (shl QQPR, 2)]>;
// Pseudo 512-bit vector register class to model 4 consecutive Q registers
// (8 consecutive D registers).
-def QQQQPR : RegisterClass<"ARM", [v8i64], 256, (sequence "QQQQ%u", 0, 3)> {
+def QQQQPR : RegisterClass<"ARM", [v8i64], 256, (add Tuples2QQ)> {
let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3,
dsub_4, dsub_5, dsub_6, dsub_7),
(QPR qsub_0, qsub_1, qsub_2, qsub_3)];
// Allocate non-VFP2 aliases first.
- let AltOrders = [(rotl QQQQPR, 2)];
+ let AltOrders = [(rotl QQQQPR, 8)];
let AltOrderSelect = [{ return 1; }];
}
-// Condition code registers.
-def CCR : RegisterClass<"ARM", [i32], 32, (add CPSR)> {
- let CopyCost = -1; // Don't allow copying of status registers.
- let isAllocatable = 0;
+
+// Pseudo-registers representing 2-spaced consecutive D registers.
+def Tuples2DSpc : RegisterTuples<[dsub_0, dsub_2],
+ [(shl DPR, 0),
+ (shl DPR, 2)]>;
+
+// Spaced pairs of D registers.
+def DPairSpc : RegisterClass<"ARM", [v2i64], 64, (add Tuples2DSpc)>;
+
+def Tuples3DSpc : RegisterTuples<[dsub_0, dsub_2, dsub_4],
+ [(shl DPR, 0),
+ (shl DPR, 2),
+ (shl DPR, 4)]>;
+
+// Spaced triples of D registers.
+def DTripleSpc : RegisterClass<"ARM", [untyped], 64, (add Tuples3DSpc)> {
+ let Size = 192; // 3 x 64 bits, we have no predefined type of that size.
}
+
+def Tuples4DSpc : RegisterTuples<[dsub_0, dsub_2, dsub_4, dsub_6],
+ [(shl DPR, 0),
+ (shl DPR, 2),
+ (shl DPR, 4),
+ (shl DPR, 6)]>;
+
+// Spaced quads of D registers.
+def DQuadSpc : RegisterClass<"ARM", [v4i64], 64, (add Tuples3DSpc)>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMRelocations.h b/contrib/llvm/lib/Target/ARM/ARMRelocations.h
index 86e7206..21877fd 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRelocations.h
+++ b/contrib/llvm/lib/Target/ARM/ARMRelocations.h
@@ -1,4 +1,4 @@
-//===- ARMRelocations.h - ARM Code Relocations ------------------*- C++ -*-===//
+//===-- ARMRelocations.h - ARM Code Relocations -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMSchedule.td b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
index 958c5c6..45486fd 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSchedule.td
+++ b/contrib/llvm/lib/Target/ARM/ARMSchedule.td
@@ -1,10 +1,10 @@
-//===- ARMSchedule.td - ARM Scheduling Definitions ---------*- tablegen -*-===//
-//
+//===-- ARMSchedule.td - ARM Scheduling Definitions --------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -118,6 +118,8 @@ def IIC_fpMUL32 : InstrItinClass;
def IIC_fpMUL64 : InstrItinClass;
def IIC_fpMAC32 : InstrItinClass;
def IIC_fpMAC64 : InstrItinClass;
+def IIC_fpFMAC32 : InstrItinClass;
+def IIC_fpFMAC64 : InstrItinClass;
def IIC_fpDIV32 : InstrItinClass;
def IIC_fpDIV64 : InstrItinClass;
def IIC_fpSQRT32 : InstrItinClass;
@@ -208,6 +210,8 @@ def IIC_VPERMQ : InstrItinClass;
def IIC_VPERMQ3 : InstrItinClass;
def IIC_VMACD : InstrItinClass;
def IIC_VMACQ : InstrItinClass;
+def IIC_VFMACD : InstrItinClass;
+def IIC_VFMACQ : InstrItinClass;
def IIC_VRECSD : InstrItinClass;
def IIC_VRECSQ : InstrItinClass;
def IIC_VCNTiD : InstrItinClass;
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
index 8d86c01..8b1fb93 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
@@ -324,6 +324,15 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrStage<19, [A8_NPipe], 0>,
InstrStage<19, [A8_NLSPipe]>], [19, 2, 1, 1]>,
//
+ // Single-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC32, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [7, 2, 1, 1]>,
+ //
+ // Double-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC64, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<19, [A8_NPipe], 0>,
+ InstrStage<19, [A8_NLSPipe]>], [19, 2, 1, 1]>,
+ //
// Single-precision FP DIV
InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<20, [A8_NPipe], 0>,
@@ -860,6 +869,16 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_VMACQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<2, [A8_NPipe]>], [10, 3, 2, 2]>,
//
+ // Double-register Fused FP Multiple-Accumulate
+ InstrItinData<IIC_VFMACD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<1, [A8_NPipe]>], [9, 3, 2, 2]>,
+ //
+ // Quad-register Fused FP Multiple-Accumulate
+ // Result written in N9, but that is relative to the last cycle of multicycle,
+ // so we use 10 for those cases
+ InstrItinData<IIC_VFMACQ, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
+ InstrStage<2, [A8_NPipe]>], [10, 3, 2, 2]>,
+ //
// Double-register Reciprical Step
InstrItinData<IIC_VRECSD, [InstrStage<1, [A8_Pipe0, A8_Pipe1], 0>,
InstrStage<1, [A8_NPipe]>], [9, 2, 2]>,
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index 49fedf6..0d710cc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -604,6 +604,22 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<2, [A9_NPipe]>],
[9, 1, 1, 1]>,
//
+ // Single-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC32, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrStage<9, [A9_DRegsN], 0, Reserved>,
+ InstrStage<1, [A9_NPipe]>],
+ [8, 1, 1, 1]>,
+ //
+ // Double-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC64, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsVFP], 0, Required>,
+ InstrStage<10, [A9_DRegsN], 0, Reserved>,
+ InstrStage<2, [A9_NPipe]>],
+ [9, 1, 1, 1]>,
+ //
// Single-precision FP DIV
InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
InstrStage<1, [A9_MUX0], 0>,
@@ -1697,6 +1713,26 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrStage<4, [A9_NPipe]>],
[8, 4, 2, 1]>,
//
+ // Double-register Fused FP Multiple-Accumulate
+ InstrItinData<IIC_VFMACD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 7 cycles
+ InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<2, [A9_NPipe]>],
+ [6, 3, 2, 1]>,
+ //
+ // Quad-register Fused FP Multiple-Accumulate
+ // Result written in N9, but that is relative to the last cycle of multicycle,
+ // so we use 10 for those cases
+ InstrItinData<IIC_VFMACQ, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
+ InstrStage<1, [A9_MUX0], 0>,
+ InstrStage<1, [A9_DRegsN], 0, Required>,
+ // Extra latency cycles since wbck is 9 cycles
+ InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
+ InstrStage<4, [A9_NPipe]>],
+ [8, 4, 2, 1]>,
+ //
// Double-register Reciprical Step
InstrItinData<IIC_VRECSD, [InstrStage<1, [A9_Issue0, A9_Issue1], 0>,
InstrStage<1, [A9_MUX0], 0>,
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
index c1880a7..0ace9bc 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
@@ -1,10 +1,10 @@
-//===- ARMScheduleV6.td - ARM v6 Scheduling Definitions ----*- tablegen -*-===//
-//
+//===-- ARMScheduleV6.td - ARM v6 Scheduling Definitions ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the ARM v6 processors.
@@ -243,6 +243,12 @@ def ARMV6Itineraries : ProcessorItineraries<
// Double-precision FP MAC
InstrItinData<IIC_fpMAC64 , [InstrStage<2, [V6_Pipe]>], [9, 2, 2, 2]>,
//
+ // Single-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC32, [InstrStage<1, [V6_Pipe]>], [9, 2, 2, 2]>,
+ //
+ // Double-precision Fused FP MAC
+ InstrItinData<IIC_fpFMAC64, [InstrStage<2, [V6_Pipe]>], [9, 2, 2, 2]>,
+ //
// Single-precision FP DIV
InstrItinData<IIC_fpDIV32 , [InstrStage<15, [V6_Pipe]>], [20, 2, 2]>,
//
diff --git a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index a3a3d58..e2530d0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -67,7 +67,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
SrcPtrInfo.getWithOffset(SrcOff), isVolatile,
- false, 0);
+ false, false, 0);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -105,7 +105,8 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcPtrInfo.getWithOffset(SrcOff), false, false, 0);
+ SrcPtrInfo.getWithOffset(SrcOff),
+ false, false, false, 0);
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
@@ -144,8 +145,8 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
SDValue Src, SDValue Size,
unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
- // Use default for non AAPCS subtargets
- if (!Subtarget->isAAPCS_ABI())
+ // Use default for non AAPCS (or Darwin) subtargets
+ if (!Subtarget->isAAPCS_ABI() || Subtarget->isTargetDarwin())
return SDValue();
const ARMTargetLowering &TLI =
@@ -188,6 +189,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
0, // number of fixed arguments
TLI.getLibcallCallingConv(RTLIB::MEMSET), // call conv
false, // is tail call
+ false, // does not return
false, // is return val used
DAG.getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
TLI.getPointerTy()), // callee
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 247d6be..e247b76 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -1,4 +1,4 @@
-//===-- ARMSubtarget.cpp - ARM Subtarget Information ------------*- C++ -*-===//
+//===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,7 +16,6 @@
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/SmallVector.h"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
@@ -47,13 +46,13 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
, HasV7Ops(false)
, HasVFPv2(false)
, HasVFPv3(false)
+ , HasVFPv4(false)
, HasNEON(false)
, UseNEONForSinglePrecisionFP(false)
, SlowFPVMLx(false)
, HasVMLxForwarding(false)
, SlowFPBrcc(false)
, InThumbMode(false)
- , InNaClMode(false)
, HasThumb2(false)
, IsMClass(false)
, NoARM(false)
@@ -104,18 +103,19 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
computeIssueWidth();
if (TT.find("eabi") != std::string::npos)
+ // FIXME: We might want to separate AAPCS and EABI. Some systems, e.g.
+ // Darwin-EABI conforms to AACPS but not the rest of EABI.
TargetABI = ARM_ABI_AAPCS;
if (isAAPCS_ABI())
stackAlignment = 8;
- if (!isTargetDarwin())
+ if (!isTargetIOS())
UseMovt = hasV6T2Ops();
else {
IsR9Reserved = ReserveR9 | !HasV6Ops;
UseMovt = DarwinUseMOVT && hasV6T2Ops();
- const Triple &T = getTargetTriple();
- SupportsTailCall = T.getOS() == Triple::IOS && !T.isOSVersionLT(5, 0);
+ SupportsTailCall = !getTargetTriple().isOSVersionLT(5, 0);
}
if (!isThumb() || hasThumb2())
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index b63e108..e72b06f 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -1,4 +1,4 @@
-//=====---- ARMSubtarget.h - Define Subtarget for the ARM -----*- C++ -*--====//
+//===-- ARMSubtarget.h - Define Subtarget for the ARM ----------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -45,10 +45,11 @@ protected:
bool HasV6T2Ops;
bool HasV7Ops;
- /// HasVFPv2, HasVFPv3, HasNEON - Specify what floating point ISAs are
- /// supported.
+ /// HasVFPv2, HasVFPv3, HasVFPv4, HasNEON - Specify what
+ /// floating point ISAs are supported.
bool HasVFPv2;
bool HasVFPv3;
+ bool HasVFPv4;
bool HasNEON;
/// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
@@ -70,9 +71,6 @@ protected:
/// InThumbMode - True if compiling for Thumb, false for ARM.
bool InThumbMode;
- /// InNaClMode - True if targeting Native Client
- bool InNaClMode;
-
/// HasThumb2 - True if Thumb2 instructions are supported.
bool HasThumb2;
@@ -126,6 +124,10 @@ protected:
/// CPSR setting instruction.
bool AvoidCPSRPartialUpdate;
+ /// HasRAS - Some processors perform return stack prediction. CodeGen should
+ /// avoid issue "normal" call instructions to callees which do not return.
+ bool HasRAS;
+
/// HasMPExtension - True if the subtarget supports Multiprocessing
/// extension (ARMv7 only).
bool HasMPExtension;
@@ -194,11 +196,13 @@ protected:
bool isCortexA8() const { return ARMProcFamily == CortexA8; }
bool isCortexA9() const { return ARMProcFamily == CortexA9; }
+ bool isCortexM3() const { return CPUString == "cortex-m3"; }
bool hasARMOps() const { return !NoARM; }
bool hasVFP2() const { return HasVFPv2; }
bool hasVFP3() const { return HasVFPv3; }
+ bool hasVFP4() const { return HasVFPv4; }
bool hasNEON() const { return HasNEON; }
bool useNEONForSinglePrecisionFP() const {
return hasNEON() && UseNEONForSinglePrecisionFP; }
@@ -212,6 +216,7 @@ protected:
bool isFPOnlySP() const { return FPOnlySP; }
bool prefers32BitThumb() const { return Pref32BitThumb; }
bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
+ bool hasRAS() const { return HasRAS; }
bool hasMPExtension() const { return HasMPExtension; }
bool hasThumb2DSP() const { return Thumb2DSP; }
@@ -220,6 +225,7 @@ protected:
const Triple &getTargetTriple() const { return TargetTriple; }
+ bool isTargetIOS() const { return TargetTriple.getOS() == Triple::IOS; }
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetNaCl() const {
return TargetTriple.getOS() == Triple::NativeClient;
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 96b1e89..047efc2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -20,6 +20,7 @@
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
static cl::opt<bool>
@@ -33,24 +34,32 @@ extern "C" void LLVMInitializeARMTarget() {
RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);
}
+
/// TargetMachine ctor - Create an ARM architecture model.
///
ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
JITInfo(),
InstrItins(Subtarget.getInstrItineraryData()) {
// Default to soft float ABI
- if (FloatABIType == FloatABI::Default)
- FloatABIType = FloatABI::Soft;
+ if (Options.FloatABIType == FloatABI::Default)
+ this->Options.FloatABIType = FloatABI::Soft;
}
+void ARMTargetMachine::anchor() { }
+
ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : ARMBaseTargetMachine(T, TT, CPU, FS, RM, CM), InstrInfo(Subtarget),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ InstrInfo(Subtarget),
DataLayout(Subtarget.isAPCS_ABI() ?
std::string("e-p:32:32-f64:32:64-i64:32:64-"
"v128:32:128-v64:32:64-n32-S32") :
@@ -68,10 +77,14 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
"support ARM mode execution!");
}
+void ThumbTargetMachine::anchor() { }
+
ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : ARMBaseTargetMachine(T, TT, CPU, FS, RM, CM),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
InstrInfo(Subtarget.hasThumb2()
? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
: ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
@@ -94,37 +107,62 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
: (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
}
-bool ARMBaseTargetMachine::addPreISel(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel != CodeGenOpt::None && EnableGlobalMerge)
- PM.add(createARMGlobalMergePass(getTargetLowering()));
+namespace {
+/// ARM Code Generator Pass Configuration Options.
+class ARMPassConfig : public TargetPassConfig {
+public:
+ ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ ARMBaseTargetMachine &getARMTargetMachine() const {
+ return getTM<ARMBaseTargetMachine>();
+ }
+
+ const ARMSubtarget &getARMSubtarget() const {
+ return *getARMTargetMachine().getSubtargetImpl();
+ }
+
+ virtual bool addPreISel();
+ virtual bool addInstSelector();
+ virtual bool addPreRegAlloc();
+ virtual bool addPreSched2();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new ARMPassConfig(this, PM);
+}
+
+bool ARMPassConfig::addPreISel() {
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
+ PM.add(createGlobalMergePass(TM->getTargetLowering()));
return false;
}
-bool ARMBaseTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createARMISelDag(*this, OptLevel));
+bool ARMPassConfig::addInstSelector() {
+ PM.add(createARMISelDag(getARMTargetMachine(), getOptLevel()));
return false;
}
-bool ARMBaseTargetMachine::addPreRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool ARMPassConfig::addPreRegAlloc() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
+ if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
PM.add(createARMLoadStoreOptimizationPass(true));
- if (OptLevel != CodeGenOpt::None && Subtarget.isCortexA9())
+ if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
PM.add(createMLxExpansionPass());
return true;
}
-bool ARMBaseTargetMachine::addPreSched2(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool ARMPassConfig::addPreSched2() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None) {
- if (!Subtarget.isThumb1Only())
+ if (getOptLevel() != CodeGenOpt::None) {
+ if (!getARMSubtarget().isThumb1Only()) {
PM.add(createARMLoadStoreOptimizationPass());
- if (Subtarget.hasNEON())
+ printAndVerify("After ARM load / store optimizer");
+ }
+ if (getARMSubtarget().hasNEON())
PM.add(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
@@ -132,27 +170,31 @@ bool ARMBaseTargetMachine::addPreSched2(PassManagerBase &PM,
// proper scheduling.
PM.add(createARMExpandPseudoPass());
- if (OptLevel != CodeGenOpt::None) {
- if (!Subtarget.isThumb1Only())
- PM.add(createIfConverterPass());
+ if (getOptLevel() != CodeGenOpt::None) {
+ if (!getARMSubtarget().isThumb1Only())
+ addPass(IfConverterID);
}
- if (Subtarget.isThumb2())
+ if (getARMSubtarget().isThumb2())
PM.add(createThumb2ITBlockPass());
return true;
}
-bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- if (Subtarget.isThumb2() && !Subtarget.prefers32BitThumb())
- PM.add(createThumb2SizeReductionPass());
+bool ARMPassConfig::addPreEmitPass() {
+ if (getARMSubtarget().isThumb2()) {
+ if (!getARMSubtarget().prefers32BitThumb())
+ PM.add(createThumb2SizeReductionPass());
+
+ // Constant island pass work on unbundled instructions.
+ addPass(UnpackMachineBundlesID);
+ }
PM.add(createARMConstantIslandPass());
+
return true;
}
bool ARMBaseTargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE) {
// Machine code emitter pass for ARM.
PM.add(createARMJITCodeEmitterPass(*this, JCE));
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
index c8c601c..abcdb24 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.h
@@ -41,7 +41,9 @@ private:
public:
ARMBaseTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual ARMJITInfo *getJITInfo() { return &JITInfo; }
virtual const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; }
@@ -50,18 +52,15 @@ public:
}
// Pass Pipeline Configuration
- virtual bool addPreISel(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreSched2(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
- JITCodeEmitter &MCE);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &MCE);
};
/// ARMTargetMachine - ARM target machine.
///
class ARMTargetMachine : public ARMBaseTargetMachine {
+ virtual void anchor();
ARMInstrInfo InstrInfo;
const TargetData DataLayout; // Calculates type size & alignment
ARMELFWriterInfo ELFWriterInfo;
@@ -71,7 +70,9 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
public:
ARMTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const ARMRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
@@ -100,6 +101,7 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
/// Thumb-1 and Thumb-2.
///
class ThumbTargetMachine : public ARMBaseTargetMachine {
+ virtual void anchor();
// Either Thumb1InstrInfo or Thumb2InstrInfo.
OwningPtr<ARMBaseInstrInfo> InstrInfo;
const TargetData DataLayout; // Calculates type size & alignment
@@ -111,7 +113,9 @@ class ThumbTargetMachine : public ARMBaseTargetMachine {
public:
ThumbTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
/// returns either Thumb1RegisterInfo or Thumb2RegisterInfo
virtual const ARMBaseRegisterInfo *getRegisterInfo() const {
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
index 19defa1..a5ea1c2 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.cpp
@@ -14,6 +14,7 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ELF.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/StringExtras.h"
using namespace llvm;
using namespace dwarf;
@@ -24,8 +25,9 @@ using namespace dwarf;
void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ isAAPCS_ABI = TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI();
- if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {
+ if (isAAPCS_ABI) {
StaticCtorSection =
getContext().getELFSection(".init_array", ELF::SHT_INIT_ARRAY,
ELF::SHF_WRITE |
@@ -45,3 +47,33 @@ void ARMElfTargetObjectFile::Initialize(MCContext &Ctx,
0,
SectionKind::getMetadata());
}
+
+const MCSection *
+ARMElfTargetObjectFile::getStaticCtorSection(unsigned Priority) const {
+ if (!isAAPCS_ABI)
+ return TargetLoweringObjectFileELF::getStaticCtorSection(Priority);
+
+ if (Priority == 65535)
+ return StaticCtorSection;
+
+ // Emit ctors in priority order.
+ std::string Name = std::string(".init_array.") + utostr(Priority);
+ return getContext().getELFSection(Name, ELF::SHT_INIT_ARRAY,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+}
+
+const MCSection *
+ARMElfTargetObjectFile::getStaticDtorSection(unsigned Priority) const {
+ if (!isAAPCS_ABI)
+ return TargetLoweringObjectFileELF::getStaticDtorSection(Priority);
+
+ if (Priority == 65535)
+ return StaticDtorSection;
+
+ // Emit dtors in priority order.
+ std::string Name = std::string(".fini_array.") + utostr(Priority);
+ return getContext().getELFSection(Name, ELF::SHT_FINI_ARRAY,
+ ELF::SHF_ALLOC | ELF::SHF_WRITE,
+ SectionKind::getDataRel());
+}
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
index c6a7261..ff21060 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetObjectFile.h
@@ -20,6 +20,7 @@ class TargetMachine;
class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
protected:
const MCSection *AttributesSection;
+ bool isAAPCS_ABI;
public:
ARMElfTargetObjectFile() :
TargetLoweringObjectFileELF(),
@@ -31,6 +32,9 @@ public:
virtual const MCSection *getAttributesSection() const {
return AttributesSection;
}
+
+ const MCSection * getStaticCtorSection(unsigned Priority) const;
+ const MCSection * getStaticDtorSection(unsigned Priority) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
index 14d35ba..fda8536 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
@@ -17,9 +17,6 @@
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include <string>
@@ -107,11 +104,9 @@ AsmToken ARMBaseAsmLexer::LexTokenUAL() {
SetError(Lexer->getErrLoc(), Lexer->getErr());
break;
case AsmToken::Identifier: {
- std::string upperCase = lexedToken.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
- StringRef lowerRef(lowerCase);
+ std::string lowerCase = lexedToken.getString().lower();
- unsigned regID = MatchRegisterName(lowerRef);
+ unsigned regID = MatchRegisterName(lowerCase);
// Check for register aliases.
// r13 -> sp
// r14 -> lr
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 24f15b4..e55a7da 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -30,7 +30,6 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
@@ -40,9 +39,15 @@ namespace {
class ARMOperand;
+enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
+
class ARMAsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
+ const MCRegisterInfo *MRI;
+
+ // Map of register aliases registers via the .req directive.
+ StringMap<unsigned> RegisterReqs;
struct {
ARMCC::CondCodes Cond; // Condition for IT block.
@@ -91,9 +96,14 @@ class ARMAsmParser : public MCTargetAsmParser {
unsigned &ShiftAmount);
bool parseDirectiveWord(unsigned Size, SMLoc L);
bool parseDirectiveThumb(SMLoc L);
+ bool parseDirectiveARM(SMLoc L);
bool parseDirectiveThumbFunc(SMLoc L);
bool parseDirectiveCode(SMLoc L);
bool parseDirectiveSyntax(SMLoc L);
+ bool parseDirectiveReq(StringRef Name, SMLoc L);
+ bool parseDirectiveUnreq(SMLoc L);
+ bool parseDirectiveArch(SMLoc L);
+ bool parseDirectiveEabiAttr(SMLoc L);
StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
bool &CarrySetting, unsigned &ProcessorIMod,
@@ -161,6 +171,8 @@ class ARMAsmParser : public MCTargetAsmParser {
OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
+ OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
// Asm Match Converter Methods
bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
@@ -197,10 +209,18 @@ class ARMAsmParser : public MCTargetAsmParser {
const SmallVectorImpl<MCParsedAsmOperand*> &);
bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
const SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &);
+ bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &);
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
- void processInstruction(MCInst &Inst,
+ bool processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
bool shouldOmitCCOutOperand(StringRef Mnemonic,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
@@ -217,6 +237,9 @@ public:
: MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
+ // Cache the MCRegisterInfo.
+ MRI = &getContext().getRegisterInfo();
+
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
@@ -251,7 +274,6 @@ class ARMOperand : public MCParsedAsmOperand {
k_CoprocReg,
k_CoprocOption,
k_Immediate,
- k_FPImmediate,
k_MemBarrierOpt,
k_Memory,
k_PostIndexRegister,
@@ -262,6 +284,9 @@ class ARMOperand : public MCParsedAsmOperand {
k_RegisterList,
k_DPRRegisterList,
k_SPRRegisterList,
+ k_VectorList,
+ k_VectorListAllLanes,
+ k_VectorListIndexed,
k_ShiftedRegister,
k_ShiftedImmediate,
k_ShifterImmediate,
@@ -311,6 +336,14 @@ class ARMOperand : public MCParsedAsmOperand {
unsigned RegNum;
} Reg;
+ // A vector register list is a sequential list of 1 to 4 registers.
+ struct {
+ unsigned RegNum;
+ unsigned Count;
+ unsigned LaneIndex;
+ bool isDoubleSpaced;
+ } VectorList;
+
struct {
unsigned Val;
} VectorIndex;
@@ -319,10 +352,6 @@ class ARMOperand : public MCParsedAsmOperand {
const MCExpr *Val;
} Imm;
- struct {
- unsigned Val; // encoded 8-bit representation
- } FPImm;
-
/// Combined record for all forms of ARM address expressions.
struct {
unsigned BaseRegNum;
@@ -333,7 +362,7 @@ class ARMOperand : public MCParsedAsmOperand {
ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
unsigned ShiftImm; // shift for OffsetReg.
unsigned Alignment; // 0 = no alignment specified
- // n = alignment in bytes (8, 16, or 32)
+ // n = alignment in bytes (2, 4, 8, 16, or 32)
unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
} Memory;
@@ -393,6 +422,11 @@ public:
case k_SPRRegisterList:
Registers = o.Registers;
break;
+ case k_VectorList:
+ case k_VectorListAllLanes:
+ case k_VectorListIndexed:
+ VectorList = o.VectorList;
+ break;
case k_CoprocNum:
case k_CoprocReg:
Cop = o.Cop;
@@ -403,9 +437,6 @@ public:
case k_Immediate:
Imm = o.Imm;
break;
- case k_FPImmediate:
- FPImm = o.FPImm;
- break;
case k_MemBarrierOpt:
MBOpt = o.MBOpt;
break;
@@ -474,15 +505,10 @@ public:
}
const MCExpr *getImm() const {
- assert(Kind == k_Immediate && "Invalid access!");
+ assert(isImm() && "Invalid access!");
return Imm.Val;
}
- unsigned getFPImm() const {
- assert(Kind == k_FPImmediate && "Invalid access!");
- return FPImm.Val;
- }
-
unsigned getVectorIndex() const {
assert(Kind == k_VectorIndex && "Invalid access!");
return VectorIndex.Val;
@@ -511,90 +537,219 @@ public:
bool isITMask() const { return Kind == k_ITCondMask; }
bool isITCondCode() const { return Kind == k_CondCode; }
bool isImm() const { return Kind == k_Immediate; }
- bool isFPImm() const { return Kind == k_FPImmediate; }
+ bool isFPImm() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
+ return Val != -1;
+ }
+ bool isFBits16() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value <= 16;
+ }
+ bool isFBits32() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 1 && Value <= 32;
+ }
bool isImm8s4() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
}
bool isImm0_1020s4() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
}
bool isImm0_508s4() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
}
+ bool isImm0_508s4Neg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = -CE->getValue();
+ // explicitly exclude zero. we want that to use the normal 0_508 version.
+ return ((Value & 3) == 0) && Value > 0 && Value <= 508;
+ }
bool isImm0_255() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 256;
}
+ bool isImm0_4095() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 4096;
+ }
+ bool isImm0_4095Neg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = -CE->getValue();
+ return Value > 0 && Value < 4096;
+ }
+ bool isImm0_1() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 2;
+ }
+ bool isImm0_3() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 4;
+ }
bool isImm0_7() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 8;
}
bool isImm0_15() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 16;
}
bool isImm0_31() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 32;
}
+ bool isImm0_63() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 64;
+ }
+ bool isImm8() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value == 8;
+ }
+ bool isImm16() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value == 16;
+ }
+ bool isImm32() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value == 32;
+ }
+ bool isShrImm8() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= 8;
+ }
+ bool isShrImm16() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= 16;
+ }
+ bool isShrImm32() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= 32;
+ }
+ bool isShrImm64() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= 64;
+ }
+ bool isImm1_7() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value < 8;
+ }
+ bool isImm1_15() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value < 16;
+ }
+ bool isImm1_31() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value < 32;
+ }
bool isImm1_16() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && Value < 17;
}
bool isImm1_32() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && Value < 33;
}
+ bool isImm0_32() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < 33;
+ }
bool isImm0_65535() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 65536;
}
bool isImm0_65535Expr() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
// If it's not a constant expression, it'll generate a fixup and be
// handled later.
@@ -603,56 +758,81 @@ public:
return Value >= 0 && Value < 65536;
}
bool isImm24bit() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value <= 0xffffff;
}
bool isImmThumbSR() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && Value < 33;
}
bool isPKHLSLImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value >= 0 && Value < 32;
}
bool isPKHASRImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return Value > 0 && Value <= 32;
}
bool isARMSOImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getSOImmVal(Value) != -1;
}
+ bool isARMSOImmNot() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return ARM_AM::getSOImmVal(~Value) != -1;
+ }
+ bool isARMSOImmNeg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // Only use this when not representable as a plain so_imm.
+ return ARM_AM::getSOImmVal(Value) == -1 &&
+ ARM_AM::getSOImmVal(-Value) != -1;
+ }
bool isT2SOImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
return ARM_AM::getT2SOImmVal(Value) != -1;
}
+ bool isT2SOImmNot() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ return ARM_AM::getT2SOImmVal(~Value) != -1;
+ }
+ bool isT2SOImmNeg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // Only use this when not representable as a plain so_imm.
+ return ARM_AM::getT2SOImmVal(Value) == -1 &&
+ ARM_AM::getT2SOImmVal(-Value) != -1;
+ }
bool isSetEndImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Value = CE->getValue();
@@ -672,7 +852,7 @@ public:
bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
bool isPostIdxReg() const {
- return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift;
+ return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
}
bool isMemNoOffset(bool alignOK = false) const {
if (!isMemory())
@@ -681,6 +861,17 @@ public:
return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
(alignOK || Memory.Alignment == 0);
}
+ bool isMemPCRelImm12() const {
+ if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
+ return false;
+ // Base register must be PC.
+ if (Memory.BaseRegNum != ARM::PC)
+ return false;
+ // Immediate offset in range [-4095, 4095].
+ if (!Memory.OffsetImm) return true;
+ int64_t Val = Memory.OffsetImm->getValue();
+ return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
+ }
bool isAlignedMemory() const {
return isMemNoOffset(true);
}
@@ -694,8 +885,7 @@ public:
return Val > -4096 && Val < 4096;
}
bool isAM2OffsetImm() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
// Immediate offset in range [-4095, 4095].
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
@@ -703,6 +893,11 @@ public:
return Val > -4096 && Val < 4096;
}
bool isAddrMode3() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
if (!isMemory() || Memory.Alignment != 0) return false;
// No shifts are legal for AM3.
if (Memory.ShiftType != ARM_AM::no_shift) return false;
@@ -726,6 +921,11 @@ public:
return (Val > -256 && Val < 256) || Val == INT32_MIN;
}
bool isAddrMode5() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
if (!isMemory() || Memory.Alignment != 0) return false;
// Check for register offset.
if (Memory.OffsetRegNum) return false;
@@ -733,7 +933,7 @@ public:
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
- Val == INT32_MIN;
+ Val == INT32_MIN;
}
bool isMemTBB() const {
if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
@@ -810,6 +1010,11 @@ public:
return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
}
bool isMemImm8s4Offset() const {
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
+ return true;
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset a multiple of 4 in range [-1020, 1020].
@@ -828,6 +1033,8 @@ public:
bool isMemImm8Offset() const {
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
+ // Base reg of PC isn't allowed for these encodings.
+ if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, 255].
if (!Memory.OffsetImm) return true;
int64_t Val = Memory.OffsetImm->getValue();
@@ -844,18 +1051,14 @@ public:
bool isMemNegImm8Offset() const {
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
+ // Base reg of PC isn't allowed for these encodings.
+ if (Memory.BaseRegNum == ARM::PC) return false;
// Immediate offset in range [-255, -1].
- if (!Memory.OffsetImm) return true;
+ if (!Memory.OffsetImm) return false;
int64_t Val = Memory.OffsetImm->getValue();
- return Val > -256 && Val < 0;
+ return (Val == INT32_MIN) || (Val > -256 && Val < 0);
}
bool isMemUImm12Offset() const {
- // If we have an immediate that's not a constant, treat it as a label
- // reference needing a fixup. If it is a constant, it's something else
- // and we reject it.
- if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
- return true;
-
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
return false;
// Immediate offset in range [0, 4095].
@@ -867,7 +1070,7 @@ public:
// If we have an immediate that's not a constant, treat it as a label
// reference needing a fixup. If it is a constant, it's something else
// and we reject it.
- if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
+ if (isImm() && !isa<MCConstantExpr>(getImm()))
return true;
if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
@@ -878,16 +1081,14 @@ public:
return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
}
bool isPostIdxImm8() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
return (Val > -256 && Val < 256) || (Val == INT32_MIN);
}
bool isPostIdxImm8s4() const {
- if (Kind != k_Immediate)
- return false;
+ if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
int64_t Val = CE->getValue();
@@ -898,6 +1099,188 @@ public:
bool isMSRMask() const { return Kind == k_MSRMask; }
bool isProcIFlags() const { return Kind == k_ProcIFlags; }
+ // NEON operands.
+ bool isSingleSpacedVectorList() const {
+ return Kind == k_VectorList && !VectorList.isDoubleSpaced;
+ }
+ bool isDoubleSpacedVectorList() const {
+ return Kind == k_VectorList && VectorList.isDoubleSpaced;
+ }
+ bool isVecListOneD() const {
+ if (!isSingleSpacedVectorList()) return false;
+ return VectorList.Count == 1;
+ }
+
+ bool isVecListDPair() const {
+ if (!isSingleSpacedVectorList()) return false;
+ return (ARMMCRegisterClasses[ARM::DPairRegClassID]
+ .contains(VectorList.RegNum));
+ }
+
+ bool isVecListThreeD() const {
+ if (!isSingleSpacedVectorList()) return false;
+ return VectorList.Count == 3;
+ }
+
+ bool isVecListFourD() const {
+ if (!isSingleSpacedVectorList()) return false;
+ return VectorList.Count == 4;
+ }
+
+ bool isVecListDPairSpaced() const {
+ if (isSingleSpacedVectorList()) return false;
+ return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
+ .contains(VectorList.RegNum));
+ }
+
+ bool isVecListThreeQ() const {
+ if (!isDoubleSpacedVectorList()) return false;
+ return VectorList.Count == 3;
+ }
+
+ bool isVecListFourQ() const {
+ if (!isDoubleSpacedVectorList()) return false;
+ return VectorList.Count == 4;
+ }
+
+ bool isSingleSpacedVectorAllLanes() const {
+ return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
+ }
+ bool isDoubleSpacedVectorAllLanes() const {
+ return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
+ }
+ bool isVecListOneDAllLanes() const {
+ if (!isSingleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 1;
+ }
+
+ bool isVecListDPairAllLanes() const {
+ if (!isSingleSpacedVectorAllLanes()) return false;
+ return (ARMMCRegisterClasses[ARM::DPairRegClassID]
+ .contains(VectorList.RegNum));
+ }
+
+ bool isVecListDPairSpacedAllLanes() const {
+ if (!isDoubleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 2;
+ }
+
+ bool isVecListThreeDAllLanes() const {
+ if (!isSingleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 3;
+ }
+
+ bool isVecListThreeQAllLanes() const {
+ if (!isDoubleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 3;
+ }
+
+ bool isVecListFourDAllLanes() const {
+ if (!isSingleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 4;
+ }
+
+ bool isVecListFourQAllLanes() const {
+ if (!isDoubleSpacedVectorAllLanes()) return false;
+ return VectorList.Count == 4;
+ }
+
+ bool isSingleSpacedVectorIndexed() const {
+ return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
+ }
+ bool isDoubleSpacedVectorIndexed() const {
+ return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
+ }
+ bool isVecListOneDByteIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
+ }
+
+ bool isVecListOneDHWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListOneDWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListTwoDByteIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
+ }
+
+ bool isVecListTwoDHWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListTwoQWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListTwoQHWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListTwoDWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListThreeDByteIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
+ }
+
+ bool isVecListThreeDHWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListThreeQWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListThreeQHWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListThreeDWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListFourDByteIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
+ }
+
+ bool isVecListFourDHWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListFourQWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
+ }
+
+ bool isVecListFourQHWordIndexed() const {
+ if (!isDoubleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
+ }
+
+ bool isVecListFourDWordIndexed() const {
+ if (!isSingleSpacedVectorIndexed()) return false;
+ return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
+ }
+
bool isVectorIndex8() const {
if (Kind != k_VectorIndex) return false;
return VectorIndex.Val < 8;
@@ -911,7 +1294,82 @@ public:
return VectorIndex.Val < 2;
}
+ bool isNEONi8splat() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // i8 value splatted across 8 bytes. The immediate is just the 8 byte
+ // value.
+ return Value >= 0 && Value < 256;
+ }
+
+ bool isNEONi16splat() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // i16 value in the range [0,255] or [0x0100, 0xff00]
+ return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
+ }
+
+ bool isNEONi32splat() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
+ return (Value >= 0 && Value < 256) ||
+ (Value >= 0x0100 && Value <= 0xff00) ||
+ (Value >= 0x010000 && Value <= 0xff0000) ||
+ (Value >= 0x01000000 && Value <= 0xff000000);
+ }
+ bool isNEONi32vmov() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ int64_t Value = CE->getValue();
+ // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
+ // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
+ return (Value >= 0 && Value < 256) ||
+ (Value >= 0x0100 && Value <= 0xff00) ||
+ (Value >= 0x010000 && Value <= 0xff0000) ||
+ (Value >= 0x01000000 && Value <= 0xff000000) ||
+ (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
+ (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
+ }
+ bool isNEONi32vmovNeg() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ int64_t Value = ~CE->getValue();
+ // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
+ // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
+ return (Value >= 0 && Value < 256) ||
+ (Value >= 0x0100 && Value <= 0xff00) ||
+ (Value >= 0x010000 && Value <= 0xff0000) ||
+ (Value >= 0x01000000 && Value <= 0xff000000) ||
+ (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
+ (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
+ }
+
+ bool isNEONi64splat() const {
+ if (!isImm()) return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ // Must be a constant.
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
+ // i64 value with each byte being either 0 or 0xff.
+ for (unsigned i = 0; i < 8; ++i)
+ if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
+ return true;
+ }
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
@@ -967,7 +1425,8 @@ public:
void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!");
+ assert(isRegShiftedReg() &&
+ "addRegShiftedRegOperands() on non RegShiftedReg!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
Inst.addOperand(MCOperand::CreateImm(
@@ -976,7 +1435,8 @@ public:
void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
- assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!");
+ assert(isRegShiftedImm() &&
+ "addRegShiftedImmOperands() on non RegShiftedImm!");
Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
Inst.addOperand(MCOperand::CreateImm(
ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
@@ -1026,9 +1486,23 @@ public:
addExpr(Inst, getImm());
}
+ void addFBits16Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
+ }
+
+ void addFBits32Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
+ }
+
void addFPImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getFPImm()));
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
+ Inst.addOperand(MCOperand::CreateImm(Val));
}
void addImm8s4Operands(MCInst &Inst, unsigned N) const {
@@ -1047,32 +1521,20 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
}
- void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
+ void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The immediate is scaled by four in the encoding and is stored
// in the MCInst as such. Lop off the low two bits here.
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
+ Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
}
- void addImm0_255Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_7Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_15Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_31Operands(MCInst &Inst, unsigned N) const {
+ void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ // The immediate is scaled by four in the encoding and is stored
+ // in the MCInst as such. Lop off the low two bits here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
}
void addImm1_16Operands(MCInst &Inst, unsigned N) const {
@@ -1091,21 +1553,6 @@ public:
Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
}
- void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImm24bitOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// The constant encodes as the immediate, except for 32, which encodes as
@@ -1115,11 +1562,6 @@ public:
Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
}
- void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
// An ASR value of 32 encodes as 0, so that's how we want to add it to
@@ -1129,19 +1571,44 @@ public:
Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
}
- void addARMSOImmOperands(MCInst &Inst, unsigned N) const {
+ void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ // The operand is actually a t2_so_imm, but we have its bitwise
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
}
- void addT2SOImmOperands(MCInst &Inst, unsigned N) const {
+ void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ // The operand is actually a t2_so_imm, but we have its
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
}
- void addSetEndImmOperands(MCInst &Inst, unsigned N) const {
+ void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
+ // The operand is actually an imm0_4095, but we have its
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
+ }
+
+ void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The operand is actually a so_imm, but we have its bitwise
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
+ }
+
+ void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The operand is actually a so_imm, but we have its
+ // negation in the assembly source, so twiddle it here.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
}
void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
@@ -1154,6 +1621,14 @@ public:
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
}
+ void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ int32_t Imm = Memory.OffsetImm->getValue();
+ // FIXME: Handle #-0
+ if (Imm == INT32_MIN) Imm = 0;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
@@ -1196,6 +1671,16 @@ public:
void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ Inst.addOperand(MCOperand::CreateReg(0));
+ Inst.addOperand(MCOperand::CreateImm(0));
+ return;
+ }
+
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
if (!Memory.OffsetRegNum) {
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
@@ -1237,6 +1722,15 @@ public:
void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ Inst.addOperand(MCOperand::CreateImm(0));
+ return;
+ }
+
// The lower two bits are always zero and as such are not encoded.
int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
@@ -1250,6 +1744,15 @@ public:
void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
+ // If we have an immediate that's not a constant, treat it as a label
+ // reference needing a fixup. If it is a constant, it's something else
+ // and we reject it.
+ if (isImm()) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ Inst.addOperand(MCOperand::CreateImm(0));
+ return;
+ }
+
int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::CreateImm(Val));
@@ -1281,7 +1784,7 @@ public:
void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If this is an immediate, it's a label reference.
- if (Kind == k_Immediate) {
+ if (isImm()) {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::CreateImm(0));
return;
@@ -1296,7 +1799,7 @@ public:
void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 2 && "Invalid number of operands!");
// If this is an immediate, it's a label reference.
- if (Kind == k_Immediate) {
+ if (isImm()) {
addExpr(Inst, getImm());
Inst.addOperand(MCOperand::CreateImm(0));
return;
@@ -1322,8 +1825,9 @@ public:
void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
- unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
- Memory.ShiftImm, Memory.ShiftType);
+ unsigned Val =
+ ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
+ Memory.ShiftImm, Memory.ShiftType);
Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
Inst.addOperand(MCOperand::CreateImm(Val));
@@ -1420,6 +1924,17 @@ public:
Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
}
+ void addVecListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ }
+
+ void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
+ }
+
void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
@@ -1435,6 +1950,80 @@ public:
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
}
+ void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ // Mask in that this is an i8 splat.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
+ }
+
+ void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = CE->getValue();
+ if (Value >= 256)
+ Value = (Value >> 8) | 0xa00;
+ else
+ Value |= 0x800;
+ Inst.addOperand(MCOperand::CreateImm(Value));
+ }
+
+ void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = CE->getValue();
+ if (Value >= 256 && Value <= 0xff00)
+ Value = (Value >> 8) | 0x200;
+ else if (Value > 0xffff && Value <= 0xff0000)
+ Value = (Value >> 16) | 0x400;
+ else if (Value > 0xffffff)
+ Value = (Value >> 24) | 0x600;
+ Inst.addOperand(MCOperand::CreateImm(Value));
+ }
+
+ void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = CE->getValue();
+ if (Value >= 256 && Value <= 0xffff)
+ Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
+ else if (Value > 0xffff && Value <= 0xffffff)
+ Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
+ else if (Value > 0xffffff)
+ Value = (Value >> 24) | 0x600;
+ Inst.addOperand(MCOperand::CreateImm(Value));
+ }
+
+ void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ unsigned Value = ~CE->getValue();
+ if (Value >= 256 && Value <= 0xffff)
+ Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
+ else if (Value > 0xffff && Value <= 0xffffff)
+ Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
+ else if (Value > 0xffffff)
+ Value = (Value >> 24) | 0x600;
+ Inst.addOperand(MCOperand::CreateImm(Value));
+ }
+
+ void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ // The immediate encodes the type of constant as well as the value.
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ unsigned Imm = 0;
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
+ Imm |= (Value & 1) << i;
+ }
+ Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
+ }
+
virtual void print(raw_ostream &OS) const;
static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
@@ -1579,6 +2168,43 @@ public:
return Op;
}
+ static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
+ bool isDoubleSpaced, SMLoc S, SMLoc E) {
+ ARMOperand *Op = new ARMOperand(k_VectorList);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.isDoubleSpaced = isDoubleSpaced;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
+ bool isDoubleSpaced,
+ SMLoc S, SMLoc E) {
+ ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.isDoubleSpaced = isDoubleSpaced;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
+ unsigned Index,
+ bool isDoubleSpaced,
+ SMLoc S, SMLoc E) {
+ ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.LaneIndex = Index;
+ Op->VectorList.isDoubleSpaced = isDoubleSpaced;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
MCContext &Ctx) {
ARMOperand *Op = new ARMOperand(k_VectorIndex);
@@ -1596,14 +2222,6 @@ public:
return Op;
}
- static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
- ARMOperand *Op = new ARMOperand(k_FPImmediate);
- Op->FPImm.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = S;
- return Op;
- }
-
static ARMOperand *CreateMem(unsigned BaseRegNum,
const MCConstantExpr *OffsetImm,
unsigned OffsetRegNum,
@@ -1668,10 +2286,6 @@ public:
void ARMOperand::print(raw_ostream &OS) const {
switch (Kind) {
- case k_FPImmediate:
- OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
- << ") >";
- break;
case k_CondCode:
OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
break;
@@ -1679,9 +2293,10 @@ void ARMOperand::print(raw_ostream &OS) const {
OS << "<ccout " << getReg() << ">";
break;
case k_ITCondMask: {
- static char MaskStr[][6] = { "()", "(t)", "(e)", "(tt)", "(et)", "(te)",
- "(ee)", "(ttt)", "(ett)", "(tet)", "(eet)", "(tte)", "(ete)",
- "(tee)", "(eee)" };
+ static const char *MaskStr[] = {
+ "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
+ "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
+ };
assert((ITMask.Mask & 0xf) == ITMask.Mask);
OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
break;
@@ -1735,18 +2350,15 @@ void ARMOperand::print(raw_ostream &OS) const {
break;
case k_ShiftedRegister:
OS << "<so_reg_reg "
- << RegShiftedReg.SrcReg
- << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm))
- << ", " << RegShiftedReg.ShiftReg << ", "
- << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm)
- << ">";
+ << RegShiftedReg.SrcReg << " "
+ << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
+ << " " << RegShiftedReg.ShiftReg << ">";
break;
case k_ShiftedImmediate:
OS << "<so_reg_imm "
- << RegShiftedImm.SrcReg
- << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm))
- << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm)
- << ">";
+ << RegShiftedImm.SrcReg << " "
+ << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
+ << " #" << RegShiftedImm.ShiftImm << ">";
break;
case k_RotateImmediate:
OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
@@ -1770,6 +2382,18 @@ void ARMOperand::print(raw_ostream &OS) const {
OS << ">";
break;
}
+ case k_VectorList:
+ OS << "<vector_list " << VectorList.Count << " * "
+ << VectorList.RegNum << ">";
+ break;
+ case k_VectorListAllLanes:
+ OS << "<vector_list(all lanes) " << VectorList.Count << " * "
+ << VectorList.RegNum << ">";
+ break;
+ case k_VectorListIndexed:
+ OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
+ << VectorList.Count << " * " << VectorList.RegNum << ">";
+ break;
case k_Token:
OS << "'" << getToken() << "'";
break;
@@ -1788,7 +2412,9 @@ static unsigned MatchRegisterName(StringRef Name);
bool ARMAsmParser::ParseRegister(unsigned &RegNo,
SMLoc &StartLoc, SMLoc &EndLoc) {
+ StartLoc = Parser.getTok().getLoc();
RegNo = tryParseRegister();
+ EndLoc = Parser.getTok().getLoc();
return (RegNo == (unsigned)-1);
}
@@ -1801,10 +2427,7 @@ int ARMAsmParser::tryParseRegister() {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier)) return -1;
- // FIXME: Validate register for the current architecture; we have to do
- // validation later, so maybe there is no need for this here.
- std::string upperCase = Tok.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
+ std::string lowerCase = Tok.getString().lower();
unsigned RegNum = MatchRegisterName(lowerCase);
if (!RegNum) {
RegNum = StringSwitch<unsigned>(lowerCase)
@@ -1812,44 +2435,38 @@ int ARMAsmParser::tryParseRegister() {
.Case("r14", ARM::LR)
.Case("r15", ARM::PC)
.Case("ip", ARM::R12)
+ // Additional register name aliases for 'gas' compatibility.
+ .Case("a1", ARM::R0)
+ .Case("a2", ARM::R1)
+ .Case("a3", ARM::R2)
+ .Case("a4", ARM::R3)
+ .Case("v1", ARM::R4)
+ .Case("v2", ARM::R5)
+ .Case("v3", ARM::R6)
+ .Case("v4", ARM::R7)
+ .Case("v5", ARM::R8)
+ .Case("v6", ARM::R9)
+ .Case("v7", ARM::R10)
+ .Case("v8", ARM::R11)
+ .Case("sb", ARM::R9)
+ .Case("sl", ARM::R10)
+ .Case("fp", ARM::R11)
.Default(0);
}
- if (!RegNum) return -1;
+ if (!RegNum) {
+ // Check for aliases registered via .req. Canonicalize to lower case.
+ // That's more consistent since register names are case insensitive, and
+ // it's how the original entry was passed in from MC/MCParser/AsmParser.
+ StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
+ // If no match, return failure.
+ if (Entry == RegisterReqs.end())
+ return -1;
+ Parser.Lex(); // Eat identifier token.
+ return Entry->getValue();
+ }
Parser.Lex(); // Eat identifier token.
-#if 0
- // Also check for an index operand. This is only legal for vector registers,
- // but that'll get caught OK in operand matching, so we don't need to
- // explicitly filter everything else out here.
- if (Parser.getTok().is(AsmToken::LBrac)) {
- SMLoc SIdx = Parser.getTok().getLoc();
- Parser.Lex(); // Eat left bracket token.
-
- const MCExpr *ImmVal;
- SMLoc ExprLoc = Parser.getTok().getLoc();
- if (getParser().ParseExpression(ImmVal))
- return MatchOperand_ParseFail;
- const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!MCE) {
- TokError("immediate value expected for vector index");
- return MatchOperand_ParseFail;
- }
-
- SMLoc E = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::RBrac)) {
- Error(E, "']' expected");
- return MatchOperand_ParseFail;
- }
-
- Parser.Lex(); // Eat right bracket token.
-
- Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
- SIdx, E,
- getContext()));
- }
-#endif
-
return RegNum;
}
@@ -1864,9 +2481,9 @@ int ARMAsmParser::tryParseShiftRegister(
const AsmToken &Tok = Parser.getTok();
assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
- std::string upperCase = Tok.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
+ std::string lowerCase = Tok.getString().lower();
ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
+ .Case("asl", ARM_AM::lsl)
.Case("lsl", ARM_AM::lsl)
.Case("lsr", ARM_AM::lsr)
.Case("asr", ARM_AM::asr)
@@ -1895,7 +2512,8 @@ int ARMAsmParser::tryParseShiftRegister(
ShiftReg = SrcReg;
} else {
// Figure out if this is shifted by a constant or a register (for non-RRX).
- if (Parser.getTok().is(AsmToken::Hash)) {
+ if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Dollar)) {
Parser.Lex(); // Eat hash.
SMLoc ImmLoc = Parser.getTok().getLoc();
const MCExpr *ShiftExpr = 0;
@@ -1919,6 +2537,10 @@ int ARMAsmParser::tryParseShiftRegister(
Error(ImmLoc, "immediate shift value out of range");
return -1;
}
+ // shift by zero is a nop. Always send it through as lsl.
+ // ('as' compatibility)
+ if (Imm == 0)
+ ShiftTy = ARM_AM::lsl;
} else if (Parser.getTok().is(AsmToken::Identifier)) {
ShiftReg = tryParseRegister();
SMLoc L = Parser.getTok().getLoc();
@@ -1976,20 +2598,15 @@ tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.Lex(); // Eat left bracket token.
const MCExpr *ImmVal;
- SMLoc ExprLoc = Parser.getTok().getLoc();
if (getParser().ParseExpression(ImmVal))
- return MatchOperand_ParseFail;
+ return true;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!MCE) {
- TokError("immediate value expected for vector index");
- return MatchOperand_ParseFail;
- }
+ if (!MCE)
+ return TokError("immediate value expected for vector index");
SMLoc E = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::RBrac)) {
- Error(E, "']' expected");
- return MatchOperand_ParseFail;
- }
+ if (Parser.getTok().isNot(AsmToken::RBrac))
+ return Error(E, "']' expected");
Parser.Lex(); // Eat right bracket token.
@@ -2008,7 +2625,7 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
// Use the same layout as the tablegen'erated register name matcher. Ugly,
// but efficient.
switch (Name.size()) {
- default: break;
+ default: return -1;
case 2:
if (Name[0] != CoprocOp)
return -1;
@@ -2025,7 +2642,6 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
case '8': return 8;
case '9': return 9;
}
- break;
case 3:
if (Name[0] != CoprocOp || Name[1] != '1')
return -1;
@@ -2038,10 +2654,7 @@ static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
case '4': return 14;
case '5': return 15;
}
- break;
}
-
- return -1;
}
/// parseITCondCode - Try to parse a condition code for an IT instruction.
@@ -2161,7 +2774,7 @@ static unsigned getNextRegister(unsigned Reg) {
if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
return Reg + 1;
switch(Reg) {
- default: assert(0 && "Invalid GPR number!");
+ default: llvm_unreachable("Invalid GPR number!");
case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
@@ -2173,6 +2786,29 @@ static unsigned getNextRegister(unsigned Reg) {
}
}
+// Return the low-subreg of a given Q register.
+static unsigned getDRegFromQReg(unsigned QReg) {
+ switch (QReg) {
+ default: llvm_unreachable("expected a Q register!");
+ case ARM::Q0: return ARM::D0;
+ case ARM::Q1: return ARM::D2;
+ case ARM::Q2: return ARM::D4;
+ case ARM::Q3: return ARM::D6;
+ case ARM::Q4: return ARM::D8;
+ case ARM::Q5: return ARM::D10;
+ case ARM::Q6: return ARM::D12;
+ case ARM::Q7: return ARM::D14;
+ case ARM::Q8: return ARM::D16;
+ case ARM::Q9: return ARM::D18;
+ case ARM::Q10: return ARM::D20;
+ case ARM::Q11: return ARM::D22;
+ case ARM::Q12: return ARM::D24;
+ case ARM::Q13: return ARM::D26;
+ case ARM::Q14: return ARM::D28;
+ case ARM::Q15: return ARM::D30;
+ }
+}
+
/// Parse a register list.
bool ARMAsmParser::
parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -2188,7 +2824,17 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (Reg == -1)
return Error(RegLoc, "register expected");
- MCRegisterClass *RC;
+ // The reglist instructions have at most 16 registers, so reserve
+ // space for that many.
+ SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
+
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ ++Reg;
+ }
+ const MCRegisterClass *RC;
if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
@@ -2198,10 +2844,7 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
else
return Error(RegLoc, "invalid register in register list");
- // The reglist instructions have at most 16 registers, so reserve
- // space for that many.
- SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
- // Store the first register.
+ // Store the register.
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
// This starts immediately after the first register token in the list,
@@ -2210,11 +2853,14 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
while (Parser.getTok().is(AsmToken::Comma) ||
Parser.getTok().is(AsmToken::Minus)) {
if (Parser.getTok().is(AsmToken::Minus)) {
- Parser.Lex(); // Eat the comma.
+ Parser.Lex(); // Eat the minus.
SMLoc EndLoc = Parser.getTok().getLoc();
int EndReg = tryParseRegister();
if (EndReg == -1)
return Error(EndLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
+ EndReg = getDRegFromQReg(EndReg) + 1;
// If the register is the same as the start reg, there's nothing
// more to do.
if (Reg == EndReg)
@@ -2236,15 +2882,31 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.Lex(); // Eat the comma.
RegLoc = Parser.getTok().getLoc();
int OldReg = Reg;
+ const AsmToken RegTok = Parser.getTok();
Reg = tryParseRegister();
if (Reg == -1)
return Error(RegLoc, "register expected");
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ bool isQReg = false;
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ isQReg = true;
+ }
// The register must be in the same register class as the first.
if (!RC->contains(Reg))
return Error(RegLoc, "invalid register in register list");
// List must be monotonically increasing.
- if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg))
- return Error(RegLoc, "register list not in ascending order");
+ if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
+ if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
+ Warning(RegLoc, "register list not in ascending order");
+ else
+ return Error(RegLoc, "register list not in ascending order");
+ }
+ if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
+ Warning(RegLoc, "duplicated register (" + RegTok.getString() +
+ ") in register list");
+ continue;
+ }
// VFP register lists must also be contiguous.
// It's OK to use the enumeration values directly here rather, as the
// VFP register classes have the enum sorted properly.
@@ -2252,6 +2914,8 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Reg != OldReg + 1)
return Error(RegLoc, "non-contiguous register range");
Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
+ if (isQReg)
+ Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
}
SMLoc E = Parser.getTok().getLoc();
@@ -2259,10 +2923,319 @@ parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return Error(E, "'}' expected");
Parser.Lex(); // Eat '}' token.
+ // Push the register list operand.
Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
+
+ // The ARM system instruction variants for LDM/STM have a '^' token here.
+ if (Parser.getTok().is(AsmToken::Caret)) {
+ Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
+ Parser.Lex(); // Eat '^' token.
+ }
+
return false;
}
+// Helper function to parse the lane index for vector lists.
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
+ Index = 0; // Always return a defined index value.
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ Parser.Lex(); // Eat the '['.
+ if (Parser.getTok().is(AsmToken::RBrac)) {
+ // "Dn[]" is the 'all lanes' syntax.
+ LaneKind = AllLanes;
+ Parser.Lex(); // Eat the ']'.
+ return MatchOperand_Success;
+ }
+
+ // There's an optional '#' token here. Normally there wouldn't be, but
+ // inline assemble puts one in, and it's friendly to accept that.
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat the '#'
+
+ const MCExpr *LaneIndex;
+ SMLoc Loc = Parser.getTok().getLoc();
+ if (getParser().ParseExpression(LaneIndex)) {
+ Error(Loc, "illegal expression");
+ return MatchOperand_ParseFail;
+ }
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
+ if (!CE) {
+ Error(Loc, "lane index must be empty or an integer");
+ return MatchOperand_ParseFail;
+ }
+ if (Parser.getTok().isNot(AsmToken::RBrac)) {
+ Error(Parser.getTok().getLoc(), "']' expected");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex(); // Eat the ']'.
+ int64_t Val = CE->getValue();
+
+ // FIXME: Make this range check context sensitive for .8, .16, .32.
+ if (Val < 0 || Val > 7) {
+ Error(Parser.getTok().getLoc(), "lane index out of range");
+ return MatchOperand_ParseFail;
+ }
+ Index = Val;
+ LaneKind = IndexedLane;
+ return MatchOperand_Success;
+ }
+ LaneKind = NoLanes;
+ return MatchOperand_Success;
+}
+
+// parse a vector register list
+ARMAsmParser::OperandMatchResultTy ARMAsmParser::
+parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ VectorLaneTy LaneKind;
+ unsigned LaneIndex;
+ SMLoc S = Parser.getTok().getLoc();
+ // As an extension (to match gas), support a plain D register or Q register
+ // (without encosing curly braces) as a single or double entry list,
+ // respectively.
+ if (Parser.getTok().is(AsmToken::Identifier)) {
+ int Reg = tryParseRegister();
+ if (Reg == -1)
+ return MatchOperand_NoMatch;
+ SMLoc E = Parser.getTok().getLoc();
+ if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
+ OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
+ if (Res != MatchOperand_Success)
+ return Res;
+ switch (LaneKind) {
+ case NoLanes:
+ E = Parser.getTok().getLoc();
+ Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
+ break;
+ case AllLanes:
+ E = Parser.getTok().getLoc();
+ Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
+ S, E));
+ break;
+ case IndexedLane:
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
+ LaneIndex,
+ false, S, E));
+ break;
+ }
+ return MatchOperand_Success;
+ }
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ Reg = getDRegFromQReg(Reg);
+ OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
+ if (Res != MatchOperand_Success)
+ return Res;
+ switch (LaneKind) {
+ case NoLanes:
+ E = Parser.getTok().getLoc();
+ Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
+ &ARMMCRegisterClasses[ARM::DPairRegClassID]);
+ Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
+ break;
+ case AllLanes:
+ E = Parser.getTok().getLoc();
+ Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
+ &ARMMCRegisterClasses[ARM::DPairRegClassID]);
+ Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
+ S, E));
+ break;
+ case IndexedLane:
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
+ LaneIndex,
+ false, S, E));
+ break;
+ }
+ return MatchOperand_Success;
+ }
+ Error(S, "vector register expected");
+ return MatchOperand_ParseFail;
+ }
+
+ if (Parser.getTok().isNot(AsmToken::LCurly))
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat '{' token.
+ SMLoc RegLoc = Parser.getTok().getLoc();
+
+ int Reg = tryParseRegister();
+ if (Reg == -1) {
+ Error(RegLoc, "register expected");
+ return MatchOperand_ParseFail;
+ }
+ unsigned Count = 1;
+ int Spacing = 0;
+ unsigned FirstReg = Reg;
+ // The list is of D registers, but we also allow Q regs and just interpret
+ // them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ FirstReg = Reg = getDRegFromQReg(Reg);
+ Spacing = 1; // double-spacing requires explicit D registers, otherwise
+ // it's ambiguous with four-register single spaced.
+ ++Reg;
+ ++Count;
+ }
+ if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+
+ while (Parser.getTok().is(AsmToken::Comma) ||
+ Parser.getTok().is(AsmToken::Minus)) {
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ if (!Spacing)
+ Spacing = 1; // Register range implies a single spaced list.
+ else if (Spacing == 2) {
+ Error(Parser.getTok().getLoc(),
+ "sequential registers in double spaced list");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex(); // Eat the minus.
+ SMLoc EndLoc = Parser.getTok().getLoc();
+ int EndReg = tryParseRegister();
+ if (EndReg == -1) {
+ Error(EndLoc, "register expected");
+ return MatchOperand_ParseFail;
+ }
+ // Allow Q regs and just interpret them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
+ EndReg = getDRegFromQReg(EndReg) + 1;
+ // If the register is the same as the start reg, there's nothing
+ // more to do.
+ if (Reg == EndReg)
+ continue;
+ // The register must be in the same register class as the first.
+ if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
+ Error(EndLoc, "invalid register in register list");
+ return MatchOperand_ParseFail;
+ }
+ // Ranges must go from low to high.
+ if (Reg > EndReg) {
+ Error(EndLoc, "bad range in register list");
+ return MatchOperand_ParseFail;
+ }
+ // Parse the lane specifier if present.
+ VectorLaneTy NextLaneKind;
+ unsigned NextLaneIndex;
+ if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+ if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
+ Error(EndLoc, "mismatched lane index in register list");
+ return MatchOperand_ParseFail;
+ }
+ EndLoc = Parser.getTok().getLoc();
+
+ // Add all the registers in the range to the register list.
+ Count += EndReg - Reg;
+ Reg = EndReg;
+ continue;
+ }
+ Parser.Lex(); // Eat the comma.
+ RegLoc = Parser.getTok().getLoc();
+ int OldReg = Reg;
+ Reg = tryParseRegister();
+ if (Reg == -1) {
+ Error(RegLoc, "register expected");
+ return MatchOperand_ParseFail;
+ }
+ // vector register lists must be contiguous.
+ // It's OK to use the enumeration values directly here rather, as the
+ // VFP register classes have the enum sorted properly.
+ //
+ // The list is of D registers, but we also allow Q regs and just interpret
+ // them as the two D sub-registers.
+ if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
+ if (!Spacing)
+ Spacing = 1; // Register range implies a single spaced list.
+ else if (Spacing == 2) {
+ Error(RegLoc,
+ "invalid register in double-spaced list (must be 'D' register')");
+ return MatchOperand_ParseFail;
+ }
+ Reg = getDRegFromQReg(Reg);
+ if (Reg != OldReg + 1) {
+ Error(RegLoc, "non-contiguous register range");
+ return MatchOperand_ParseFail;
+ }
+ ++Reg;
+ Count += 2;
+ // Parse the lane specifier if present.
+ VectorLaneTy NextLaneKind;
+ unsigned NextLaneIndex;
+ SMLoc EndLoc = Parser.getTok().getLoc();
+ if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+ if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
+ Error(EndLoc, "mismatched lane index in register list");
+ return MatchOperand_ParseFail;
+ }
+ continue;
+ }
+ // Normal D register.
+ // Figure out the register spacing (single or double) of the list if
+ // we don't know it already.
+ if (!Spacing)
+ Spacing = 1 + (Reg == OldReg + 2);
+
+ // Just check that it's contiguous and keep going.
+ if (Reg != OldReg + Spacing) {
+ Error(RegLoc, "non-contiguous register range");
+ return MatchOperand_ParseFail;
+ }
+ ++Count;
+ // Parse the lane specifier if present.
+ VectorLaneTy NextLaneKind;
+ unsigned NextLaneIndex;
+ SMLoc EndLoc = Parser.getTok().getLoc();
+ if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
+ return MatchOperand_ParseFail;
+ if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
+ Error(EndLoc, "mismatched lane index in register list");
+ return MatchOperand_ParseFail;
+ }
+ }
+
+ SMLoc E = Parser.getTok().getLoc();
+ if (Parser.getTok().isNot(AsmToken::RCurly)) {
+ Error(E, "'}' expected");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex(); // Eat '}' token.
+
+ switch (LaneKind) {
+ case NoLanes:
+ // Two-register operands have been converted to the
+ // composite register classes.
+ if (Count == 2) {
+ const MCRegisterClass *RC = (Spacing == 1) ?
+ &ARMMCRegisterClasses[ARM::DPairRegClassID] :
+ &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
+ FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
+ }
+
+ Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
+ (Spacing == 2), S, E));
+ break;
+ case AllLanes:
+ // Two-register operands have been converted to the
+ // composite register classes.
+ if (Count == 2) {
+ const MCRegisterClass *RC = (Spacing == 1) ?
+ &ARMMCRegisterClasses[ARM::DPairRegClassID] :
+ &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
+ FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
+ }
+ Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
+ (Spacing == 2),
+ S, E));
+ break;
+ case IndexedLane:
+ Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
+ LaneIndex,
+ (Spacing == 2),
+ S, E));
+ break;
+ }
+ return MatchOperand_Success;
+}
+
/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
@@ -2337,7 +3310,8 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
if (isMClass()) {
// See ARMv6-M 10.1.1
- unsigned FlagsVal = StringSwitch<unsigned>(Mask)
+ std::string Name = Mask.lower();
+ unsigned FlagsVal = StringSwitch<unsigned>(Name)
.Case("apsr", 0)
.Case("iapsr", 1)
.Case("eapsr", 2)
@@ -2353,14 +3327,14 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
.Case("faultmask", 19)
.Case("control", 20)
.Default(~0U);
-
+
if (FlagsVal == ~0U)
return MatchOperand_NoMatch;
if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
// basepri, basepri_max and faultmask only valid for V7m.
return MatchOperand_NoMatch;
-
+
Parser.Lex(); // Eat identifier token.
Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
return MatchOperand_Success;
@@ -2369,7 +3343,7 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
size_t Start = 0, Next = Mask.find('_');
StringRef Flags = "";
- std::string SpecReg = LowercaseString(Mask.slice(Start, Next));
+ std::string SpecReg = Mask.slice(Start, Next).lower();
if (Next != StringRef::npos)
Flags = Mask.slice(Next+1, Mask.size());
@@ -2392,7 +3366,8 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
FlagsVal = 8; // No flag
}
} else if (SpecReg == "cpsr" || SpecReg == "spsr") {
- if (Flags == "all") // cpsr_all is an alias for cpsr_fc
+ // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
+ if (Flags == "all" || Flags == "")
Flags = "fc";
for (int i = 0, e = Flags.size(); i != e; ++i) {
unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
@@ -2411,9 +3386,13 @@ parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
} else // No match for special register.
return MatchOperand_NoMatch;
- // Special register without flags are equivalent to "fc" flags.
- if (!FlagsVal)
- FlagsVal = 0x9;
+ // Special register without flags is NOT equivalent to "fc" flags.
+ // NOTE: This is a divergence from gas' behavior. Uncommenting the following
+ // two lines would enable gas compatibility at the expense of breaking
+ // round-tripping.
+ //
+ // if (!FlagsVal)
+ // FlagsVal = 0x9;
// Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
if (SpecReg == "spsr")
@@ -2433,8 +3412,8 @@ parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
return MatchOperand_ParseFail;
}
StringRef ShiftName = Tok.getString();
- std::string LowerOp = LowercaseString(Op);
- std::string UpperOp = UppercaseString(Op);
+ std::string LowerOp = Op.lower();
+ std::string UpperOp = Op.upper();
if (ShiftName != LowerOp && ShiftName != UpperOp) {
Error(Parser.getTok().getLoc(), Op + " operand expected.");
return MatchOperand_ParseFail;
@@ -2442,7 +3421,8 @@ parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
Parser.Lex(); // Eat shift type token.
// There must be a '#' and a shift amount.
- if (Parser.getTok().isNot(AsmToken::Hash)) {
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
@@ -2520,7 +3500,8 @@ parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.Lex(); // Eat the operator.
// A '#' and a shift amount.
- if (Parser.getTok().isNot(AsmToken::Hash)) {
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
@@ -2580,7 +3561,8 @@ parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.Lex(); // Eat the operator.
// A '#' and a rotate amount.
- if (Parser.getTok().isNot(AsmToken::Hash)) {
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
@@ -2617,7 +3599,8 @@ ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Parser.getTok().getLoc();
// The bitfield descriptor is really two operands, the LSB and the width.
- if (Parser.getTok().isNot(AsmToken::Hash)) {
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
@@ -2649,7 +3632,8 @@ parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_ParseFail;
}
Parser.Lex(); // Eat hash token.
- if (Parser.getTok().isNot(AsmToken::Hash)) {
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
Error(Parser.getTok().getLoc(), "'#' expected");
return MatchOperand_ParseFail;
}
@@ -2743,7 +3727,8 @@ parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
SMLoc S = Tok.getLoc();
// Do immediates first, as we always parse those if we have a '#'.
- if (Parser.getTok().is(AsmToken::Hash)) {
+ if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Dollar)) {
Parser.Lex(); // Eat the '#'.
// Explicitly look for a '-', as we need to encode negative zero
// differently.
@@ -3082,18 +4067,80 @@ cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
}
((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
- ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1);
- // If we have a three-operand form, use that, else the second source operand
- // is just the destination operand again.
- if (Operands.size() == 6)
- ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
- else
- Inst.addOperand(Inst.getOperand(0));
+ // If we have a three-operand form, make sure to set Rn to be the operand
+ // that isn't the same as Rd.
+ unsigned RegOp = 4;
+ if (Operands.size() == 6 &&
+ ((ARMOperand*)Operands[4])->getReg() ==
+ ((ARMOperand*)Operands[3])->getReg())
+ RegOp = 5;
+ ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
+ Inst.addOperand(Inst.getOperand(0));
((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
return true;
}
+bool ARMAsmParser::
+cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Vd
+ ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
+ // Create a writeback register dummy placeholder.
+ Inst.addOperand(MCOperand::CreateImm(0));
+ // Vn
+ ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
+ // pred
+ ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
+ return true;
+}
+
+bool ARMAsmParser::
+cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Vd
+ ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
+ // Create a writeback register dummy placeholder.
+ Inst.addOperand(MCOperand::CreateImm(0));
+ // Vn
+ ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
+ // Vm
+ ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
+ // pred
+ ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
+ return true;
+}
+
+bool ARMAsmParser::
+cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Create a writeback register dummy placeholder.
+ Inst.addOperand(MCOperand::CreateImm(0));
+ // Vn
+ ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
+ // Vt
+ ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
+ // pred
+ ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
+ return true;
+}
+
+bool ARMAsmParser::
+cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Create a writeback register dummy placeholder.
+ Inst.addOperand(MCOperand::CreateImm(0));
+ // Vn
+ ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
+ // Vm
+ ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
+ // Vt
+ ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
+ // pred
+ ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
+ return true;
+}
+
/// Parse an ARM memory expression, return false if successful else return true
/// or an error. The first token must be a '[' when called.
bool ARMAsmParser::
@@ -3153,7 +4200,10 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
unsigned Align = 0;
switch (CE->getValue()) {
default:
- return Error(E, "alignment specifier must be 64, 128, or 256 bits");
+ return Error(E,
+ "alignment specifier must be 16, 32, 64, 128, or 256 bits");
+ case 16: Align = 2; break;
+ case 32: Align = 4; break;
case 64: Align = 8; break;
case 128: Align = 16; break;
case 256: Align = 32; break;
@@ -3182,9 +4232,13 @@ parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
}
// If we have a '#', it's an immediate offset, else assume it's a register
- // offset.
- if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex(); // Eat the '#'.
+ // offset. Be friendly and also accept a plain integer (without a leading
+ // hash) for gas compatibility.
+ if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Dollar) ||
+ Parser.getTok().is(AsmToken::Integer)) {
+ if (Parser.getTok().isNot(AsmToken::Integer))
+ Parser.Lex(); // Eat the '#'.
E = Parser.getTok().getLoc();
bool isNegative = getParser().getTok().is(AsmToken::Minus);
@@ -3281,7 +4335,8 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
if (Tok.isNot(AsmToken::Identifier))
return true;
StringRef ShiftName = Tok.getString();
- if (ShiftName == "lsl" || ShiftName == "LSL")
+ if (ShiftName == "lsl" || ShiftName == "LSL" ||
+ ShiftName == "asl" || ShiftName == "ASL")
St = ARM_AM::lsl;
else if (ShiftName == "lsr" || ShiftName == "LSR")
St = ARM_AM::lsr;
@@ -3301,7 +4356,8 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
Loc = Parser.getTok().getLoc();
// A '#' and a shift amount.
const AsmToken &HashTok = Parser.getTok();
- if (HashTok.isNot(AsmToken::Hash))
+ if (HashTok.isNot(AsmToken::Hash) &&
+ HashTok.isNot(AsmToken::Dollar))
return Error(HashTok.getLoc(), "'#' expected");
Parser.Lex(); // Eat hash token.
@@ -3328,10 +4384,36 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
/// parseFPImm - A floating point immediate expression operand.
ARMAsmParser::OperandMatchResultTy ARMAsmParser::
parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Anything that can accept a floating point constant as an operand
+ // needs to go through here, as the regular ParseExpression is
+ // integer only.
+ //
+ // This routine still creates a generic Immediate operand, containing
+ // a bitcast of the 64-bit floating point value. The various operands
+ // that accept floats can check whether the value is valid for them
+ // via the standard is*() predicates.
+
SMLoc S = Parser.getTok().getLoc();
- if (Parser.getTok().isNot(AsmToken::Hash))
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
+
+ // Disambiguate the VMOV forms that can accept an FP immediate.
+ // vmov.f32 <sreg>, #imm
+ // vmov.f64 <dreg>, #imm
+ // vmov.f32 <dreg>, #imm @ vector f32x2
+ // vmov.f32 <qreg>, #imm @ vector f32x4
+ //
+ // There are also the NEON VMOV instructions which expect an
+ // integer constant. Make sure we don't try to parse an FPImm
+ // for these:
+ // vmov.i{8|16|32|64} <dreg|qreg>, #imm
+ ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
+ if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
+ TyOp->getToken() != ".f64"))
+ return MatchOperand_NoMatch;
+
Parser.Lex(); // Eat the '#'.
// Handle negation, as that still comes through as a separate token.
@@ -3341,34 +4423,39 @@ parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
Parser.Lex();
}
const AsmToken &Tok = Parser.getTok();
+ SMLoc Loc = Tok.getLoc();
if (Tok.is(AsmToken::Real)) {
- APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
+ APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
// If we had a '-' in front, toggle the sign bit.
- IntVal ^= (uint64_t)isNegative << 63;
- int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
+ IntVal ^= (uint64_t)isNegative << 31;
Parser.Lex(); // Eat the token.
- if (Val == -1) {
- TokError("floating point value out of range");
- return MatchOperand_ParseFail;
- }
- Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
+ Operands.push_back(ARMOperand::CreateImm(
+ MCConstantExpr::Create(IntVal, getContext()),
+ S, Parser.getTok().getLoc()));
return MatchOperand_Success;
}
+ // Also handle plain integers. Instructions which allow floating point
+ // immediates also allow a raw encoded 8-bit value.
if (Tok.is(AsmToken::Integer)) {
int64_t Val = Tok.getIntVal();
Parser.Lex(); // Eat the token.
if (Val > 255 || Val < 0) {
- TokError("encoded floating point value out of range");
+ Error(Loc, "encoded floating point value out of range");
return MatchOperand_ParseFail;
}
- Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
+ double RealVal = ARM_AM::getFPImmFloat(Val);
+ Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
+ Operands.push_back(ARMOperand::CreateImm(
+ MCConstantExpr::Create(Val, getContext()), S,
+ Parser.getTok().getLoc()));
return MatchOperand_Success;
}
- TokError("invalid floating point immediate");
+ Error(Loc, "invalid floating point immediate");
return MatchOperand_ParseFail;
}
+
/// Parse a arm instruction operand. For now this parses the operand regardless
/// of the mnemonic.
bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
@@ -3391,7 +4478,6 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
Error(Parser.getTok().getLoc(), "unexpected token in operand");
return true;
case AsmToken::Identifier: {
- // If this is VMRS, check for the apsr_nzcv operand.
if (!tryParseRegisterWithWriteBack(Operands))
return false;
int Res = tryParseShiftRegister(Operands);
@@ -3399,17 +4485,21 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return false;
else if (Res == -1) // irrecoverable error
return true;
- if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
+ // If this is VMRS, check for the apsr_nzcv operand.
+ if (Mnemonic == "vmrs" &&
+ Parser.getTok().getString().equals_lower("apsr_nzcv")) {
S = Parser.getTok().getLoc();
Parser.Lex();
- Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
+ Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
return false;
}
// Fall though for the Identifier case that is not a register or a
// special name.
}
+ case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
case AsmToken::Integer: // things like 1f and 2b as a branch targets
+ case AsmToken::String: // quoted label names.
case AsmToken::Dot: { // . as a branch target
// This was not a register so parse other operands that start with an
// identifier (like labels) as expressions and create them as immediates.
@@ -3425,6 +4515,7 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return parseMemory(Operands);
case AsmToken::LCurly:
return parseRegisterList(Operands);
+ case AsmToken::Dollar:
case AsmToken::Hash: {
// #42 -> immediate.
// TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
@@ -3435,13 +4526,11 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
if (getParser().ParseExpression(ImmVal))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!CE) {
- Error(S, "constant expression expected");
- return MatchOperand_ParseFail;
+ if (CE) {
+ int32_t Val = CE->getValue();
+ if (isNegative && Val == 0)
+ ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
}
- int32_t Val = CE->getValue();
- if (isNegative && Val == 0)
- ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
return false;
@@ -3524,7 +4613,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" ||
Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" ||
Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" ||
- Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
+ Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
+ Mnemonic == "fmuls")
return Mnemonic;
// First, split out any predication code. Ignore mnemonics we know aren't
@@ -3565,7 +4655,11 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
- Mnemonic == "vrsqrts" || Mnemonic == "srs" ||
+ Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
+ Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
+ Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
+ Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
+ Mnemonic == "vfms" || Mnemonic == "vfnms" ||
(Mnemonic == "movs" && isThumb()))) {
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
CarrySetting = true;
@@ -3609,6 +4703,7 @@ getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
Mnemonic == "orr" || Mnemonic == "mvn" ||
Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
+ Mnemonic == "vfm" || Mnemonic == "vfnm" ||
(!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
Mnemonic == "mla" || Mnemonic == "smlal" ||
Mnemonic == "umlal" || Mnemonic == "umull"))) {
@@ -3677,7 +4772,7 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
static_cast<ARMOperand*>(Operands[4])->isReg() &&
static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
- (static_cast<ARMOperand*>(Operands[5])->isReg() ||
+ ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
return true;
// For Thumb2, add/sub immediate does not have a cc_out operand for the
@@ -3694,9 +4789,11 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
//
// If either register is a high reg, it's either one of the SP
// variants (handled above) or a 32-bit encoding, so we just
- // check against T3.
+ // check against T3. If the second register is the PC, this is an
+ // alternate form of ADR, which uses encoding T4, so check for that too.
if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
!isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
+ static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
return false;
// If both registers are low, we're in an IT block, and the immediate is
@@ -3726,6 +4823,7 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
// remove the cc_out operand.
(!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
!isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
+ !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
!inITBlock() ||
(static_cast<ARMOperand*>(Operands[3])->getReg() !=
static_cast<ARMOperand*>(Operands[5])->getReg() &&
@@ -3733,6 +4831,20 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
static_cast<ARMOperand*>(Operands[4])->getReg())))
return true;
+ // Also check the 'mul' syntax variant that doesn't specify an explicit
+ // destination register.
+ if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
+ static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
+ static_cast<ARMOperand*>(Operands[3])->isReg() &&
+ static_cast<ARMOperand*>(Operands[4])->isReg() &&
+ // If the registers aren't low regs or the cc_out operand is zero
+ // outside of an IT block, we have to use the 32-bit encoding, so
+ // remove the cc_out operand.
+ (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
+ !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
+ !inITBlock()))
+ return true;
+
// Register-register 'add/sub' for thumb does not have a cc_out operand
@@ -3744,15 +4856,52 @@ bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
(Operands.size() == 5 || Operands.size() == 6) &&
static_cast<ARMOperand*>(Operands[3])->isReg() &&
static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
- static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
+ static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
+ (static_cast<ARMOperand*>(Operands[4])->isImm() ||
+ (Operands.size() == 6 &&
+ static_cast<ARMOperand*>(Operands[5])->isImm())))
return true;
return false;
}
+static bool isDataTypeToken(StringRef Tok) {
+ return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
+ Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
+ Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
+ Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
+ Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
+ Tok == ".f" || Tok == ".d";
+}
+
+// FIXME: This bit should probably be handled via an explicit match class
+// in the .td files that matches the suffix instead of having it be
+// a literal string token the way it is now.
+static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
+ return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
+}
+
+static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
/// Parse an arm instruction mnemonic followed by its operands.
bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ // Apply mnemonic aliases before doing anything else, as the destination
+ // mnemnonic may include suffices and we want to handle them normally.
+ // The generic tblgen'erated code does this later, at the start of
+ // MatchInstructionImpl(), but that's too late for aliases that include
+ // any sort of suffix.
+ unsigned AvailableFeatures = getAvailableFeatures();
+ applyMnemonicAliases(Name, AvailableFeatures);
+
+ // First check for the ARM-specific .req directive.
+ if (Parser.getTok().is(AsmToken::Identifier) &&
+ Parser.getTok().getIdentifier() == ".req") {
+ parseDirectiveReq(Name, NameLoc);
+ // We always return 'error' for this, as we're done with this
+ // statement and don't need to match the 'instruction."
+ return true;
+ }
+
// Create the leading tokens for the mnemonic, split by '.' characters.
size_t Start = 0, Next = Name.find('.');
StringRef Mnemonic = Name.slice(Start, Next);
@@ -3854,9 +5003,12 @@ bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
Next = Name.find('.', Start + 1);
StringRef ExtraToken = Name.slice(Start, Next);
- // For now, we're only parsing Thumb1 (for the most part), so
- // just ignore ".n" qualifiers. We'll use them to restrict
- // matching when we do Thumb2.
+ // Some NEON instructions have an optional datatype suffix that is
+ // completely ignored. Check for that.
+ if (isDataTypeToken(ExtraToken) &&
+ doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
+ continue;
+
if (ExtraToken != ".n") {
SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
@@ -3941,12 +5093,21 @@ bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
}
}
// Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
- // end. Convert it to a token here.
+ // end. Convert it to a token here. Take care not to convert those
+ // that should hit the Thumb2 encoding.
if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
+ static_cast<ARMOperand*>(Operands[3])->isReg() &&
+ static_cast<ARMOperand*>(Operands[4])->isReg() &&
static_cast<ARMOperand*>(Operands[5])->isImm()) {
ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
- if (CE && CE->getValue() == 0) {
+ if (CE && CE->getValue() == 0 &&
+ (isThumbOne() ||
+ // The cc_out operand matches the IT block.
+ ((inITBlock() != CarrySetting) &&
+ // Neither register operand is a high register.
+ (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
+ isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
Operands.erase(Operands.begin() + 5);
Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
delete Op;
@@ -3990,9 +5151,9 @@ static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
// the ARMInsts array) instead. Getting that here requires awkward
// API changes, though. Better way?
namespace llvm {
-extern MCInstrDesc ARMInsts[];
+extern const MCInstrDesc ARMInsts[];
}
-static MCInstrDesc &getInstDesc(unsigned Opcode) {
+static const MCInstrDesc &getInstDesc(unsigned Opcode) {
return ARMInsts[Opcode];
}
@@ -4000,13 +5161,14 @@ static MCInstrDesc &getInstDesc(unsigned Opcode) {
bool ARMAsmParser::
validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
+ const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
SMLoc Loc = Operands[0]->getStartLoc();
// Check the IT block state first.
- // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
- // being allowed in IT blocks, but not being predicable. It just always
+ // NOTE: BKPT instruction has the interesting property of being
+ // allowed in IT blocks, but not being predicable. It just always
// executes.
- if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
+ if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
+ Inst.getOpcode() != ARM::BKPT) {
unsigned bit = 1;
if (ITState.FirstCond)
ITState.FirstCond = false;
@@ -4115,16 +5277,21 @@ validateInstruction(MCInst &Inst,
"in register list");
break;
}
+ // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
+ // so only issue a diagnostic for thumb1. The instructions will be
+ // switched to the t2 encodings in processInstruction() if necessary.
case ARM::tPOP: {
bool listContainsBase;
- if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase))
+ if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
+ !isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or pc");
break;
}
case ARM::tPUSH: {
bool listContainsBase;
- if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase))
+ if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
+ !isThumbTwo())
return Error(Operands[2]->getStartLoc(),
"registers must be in range r0-r7 or lr");
break;
@@ -4141,10 +5308,1553 @@ validateInstruction(MCInst &Inst,
return false;
}
-void ARMAsmParser::
+static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
+ switch(Opc) {
+ default: llvm_unreachable("unexpected opcode!");
+ // VST1LN
+ case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
+ case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
+ case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
+ case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
+ case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
+ case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
+ case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
+ case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
+ case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
+
+ // VST2LN
+ case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
+ case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
+ case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
+ case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
+ case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
+
+ case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
+ case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
+ case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
+ case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
+ case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
+
+ case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
+ case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
+ case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
+ case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
+ case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
+
+ // VST3LN
+ case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
+ case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
+ case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
+ case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
+ case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
+ case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
+ case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
+ case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
+ case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
+ case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
+ case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
+ case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
+ case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
+ case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
+ case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
+
+ // VST3
+ case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
+ case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
+ case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
+ case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
+ case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
+ case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
+ case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
+ case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
+ case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
+ case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
+ case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
+ case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
+ case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
+ case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
+ case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
+ case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
+ case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
+ case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
+
+ // VST4LN
+ case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
+ case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
+ case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
+ case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
+ case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
+ case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
+ case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
+ case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
+ case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
+ case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
+ case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
+ case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
+ case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
+ case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
+ case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
+
+ // VST4
+ case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
+ case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
+ case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
+ case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
+ case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
+ case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
+ case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
+ case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
+ case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
+ case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
+ case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
+ case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
+ case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
+ case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
+ case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
+ case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
+ case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
+ case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
+ }
+}
+
+static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
+ switch(Opc) {
+ default: llvm_unreachable("unexpected opcode!");
+ // VLD1LN
+ case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
+ case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
+ case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
+ case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
+ case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
+ case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
+ case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
+ case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
+ case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
+
+ // VLD2LN
+ case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
+ case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
+ case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
+ case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
+ case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
+ case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
+ case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
+ case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
+ case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
+ case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
+ case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
+ case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
+ case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
+ case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
+ case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
+
+ // VLD3DUP
+ case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
+ case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
+ case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
+ case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
+ case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
+ case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
+ case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
+ case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
+ case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
+ case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
+ case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
+ case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
+ case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
+ case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
+ case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
+ case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
+ case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
+ case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
+
+ // VLD3LN
+ case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
+ case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
+ case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
+ case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
+ case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
+ case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
+ case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
+ case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
+ case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
+ case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
+ case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
+ case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
+ case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
+ case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
+ case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
+
+ // VLD3
+ case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
+ case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
+ case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
+ case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
+ case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
+ case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
+ case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
+ case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
+ case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
+ case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
+ case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
+ case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
+ case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
+ case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
+ case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
+ case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
+ case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
+ case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
+
+ // VLD4LN
+ case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
+ case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
+ case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
+ case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
+ case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
+ case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
+ case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
+ case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
+ case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
+ case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
+ case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
+ case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
+ case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
+ case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
+ case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
+
+ // VLD4DUP
+ case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
+ case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
+ case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
+ case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
+ case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
+ case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
+ case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
+ case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
+ case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
+ case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
+ case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
+ case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
+ case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
+ case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
+ case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
+ case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
+ case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
+ case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
+
+ // VLD4
+ case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
+ case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
+ case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
+ case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
+ case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
+ case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
+ case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
+ case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
+ case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
+ case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
+ case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
+ case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
+ case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
+ case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
+ case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
+ case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
+ case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
+ case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
+ }
+}
+
+bool ARMAsmParser::
processInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
switch (Inst.getOpcode()) {
+ // Aliases for alternate PC+imm syntax of LDR instructions.
+ case ARM::t2LDRpcrel:
+ Inst.setOpcode(ARM::t2LDRpci);
+ return true;
+ case ARM::t2LDRBpcrel:
+ Inst.setOpcode(ARM::t2LDRBpci);
+ return true;
+ case ARM::t2LDRHpcrel:
+ Inst.setOpcode(ARM::t2LDRHpci);
+ return true;
+ case ARM::t2LDRSBpcrel:
+ Inst.setOpcode(ARM::t2LDRSBpci);
+ return true;
+ case ARM::t2LDRSHpcrel:
+ Inst.setOpcode(ARM::t2LDRSHpci);
+ return true;
+ // Handle NEON VST complex aliases.
+ case ARM::VST1LNdWB_register_Asm_8:
+ case ARM::VST1LNdWB_register_Asm_16:
+ case ARM::VST1LNdWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST2LNdWB_register_Asm_8:
+ case ARM::VST2LNdWB_register_Asm_16:
+ case ARM::VST2LNdWB_register_Asm_32:
+ case ARM::VST2LNqWB_register_Asm_16:
+ case ARM::VST2LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST3LNdWB_register_Asm_8:
+ case ARM::VST3LNdWB_register_Asm_16:
+ case ARM::VST3LNdWB_register_Asm_32:
+ case ARM::VST3LNqWB_register_Asm_16:
+ case ARM::VST3LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST4LNdWB_register_Asm_8:
+ case ARM::VST4LNdWB_register_Asm_16:
+ case ARM::VST4LNdWB_register_Asm_32:
+ case ARM::VST4LNqWB_register_Asm_16:
+ case ARM::VST4LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST1LNdWB_fixed_Asm_8:
+ case ARM::VST1LNdWB_fixed_Asm_16:
+ case ARM::VST1LNdWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST2LNdWB_fixed_Asm_8:
+ case ARM::VST2LNdWB_fixed_Asm_16:
+ case ARM::VST2LNdWB_fixed_Asm_32:
+ case ARM::VST2LNqWB_fixed_Asm_16:
+ case ARM::VST2LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST3LNdWB_fixed_Asm_8:
+ case ARM::VST3LNdWB_fixed_Asm_16:
+ case ARM::VST3LNdWB_fixed_Asm_32:
+ case ARM::VST3LNqWB_fixed_Asm_16:
+ case ARM::VST3LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST4LNdWB_fixed_Asm_8:
+ case ARM::VST4LNdWB_fixed_Asm_16:
+ case ARM::VST4LNdWB_fixed_Asm_32:
+ case ARM::VST4LNqWB_fixed_Asm_16:
+ case ARM::VST4LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST1LNdAsm_8:
+ case ARM::VST1LNdAsm_16:
+ case ARM::VST1LNdAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST2LNdAsm_8:
+ case ARM::VST2LNdAsm_16:
+ case ARM::VST2LNdAsm_32:
+ case ARM::VST2LNqAsm_16:
+ case ARM::VST2LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST3LNdAsm_8:
+ case ARM::VST3LNdAsm_16:
+ case ARM::VST3LNdAsm_32:
+ case ARM::VST3LNqAsm_16:
+ case ARM::VST3LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST4LNdAsm_8:
+ case ARM::VST4LNdAsm_16:
+ case ARM::VST4LNdAsm_32:
+ case ARM::VST4LNqAsm_16:
+ case ARM::VST4LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // Handle NEON VLD complex aliases.
+ case ARM::VLD1LNdWB_register_Asm_8:
+ case ARM::VLD1LNdWB_register_Asm_16:
+ case ARM::VLD1LNdWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD2LNdWB_register_Asm_8:
+ case ARM::VLD2LNdWB_register_Asm_16:
+ case ARM::VLD2LNdWB_register_Asm_32:
+ case ARM::VLD2LNqWB_register_Asm_16:
+ case ARM::VLD2LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3LNdWB_register_Asm_8:
+ case ARM::VLD3LNdWB_register_Asm_16:
+ case ARM::VLD3LNdWB_register_Asm_32:
+ case ARM::VLD3LNqWB_register_Asm_16:
+ case ARM::VLD3LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4LNdWB_register_Asm_8:
+ case ARM::VLD4LNdWB_register_Asm_16:
+ case ARM::VLD4LNdWB_register_Asm_32:
+ case ARM::VLD4LNqWB_register_Asm_16:
+ case ARM::VLD4LNqWB_register_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(4)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(5)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD1LNdWB_fixed_Asm_8:
+ case ARM::VLD1LNdWB_fixed_Asm_16:
+ case ARM::VLD1LNdWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD2LNdWB_fixed_Asm_8:
+ case ARM::VLD2LNdWB_fixed_Asm_16:
+ case ARM::VLD2LNdWB_fixed_Asm_32:
+ case ARM::VLD2LNqWB_fixed_Asm_16:
+ case ARM::VLD2LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3LNdWB_fixed_Asm_8:
+ case ARM::VLD3LNdWB_fixed_Asm_16:
+ case ARM::VLD3LNdWB_fixed_Asm_32:
+ case ARM::VLD3LNqWB_fixed_Asm_16:
+ case ARM::VLD3LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4LNdWB_fixed_Asm_8:
+ case ARM::VLD4LNdWB_fixed_Asm_16:
+ case ARM::VLD4LNdWB_fixed_Asm_32:
+ case ARM::VLD4LNqWB_fixed_Asm_16:
+ case ARM::VLD4LNqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD1LNdAsm_8:
+ case ARM::VLD1LNdAsm_16:
+ case ARM::VLD1LNdAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD2LNdAsm_8:
+ case ARM::VLD2LNdAsm_16:
+ case ARM::VLD2LNdAsm_32:
+ case ARM::VLD2LNqAsm_16:
+ case ARM::VLD2LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3LNdAsm_8:
+ case ARM::VLD3LNdAsm_16:
+ case ARM::VLD3LNdAsm_32:
+ case ARM::VLD3LNqAsm_16:
+ case ARM::VLD3LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4LNdAsm_8:
+ case ARM::VLD4LNdAsm_16:
+ case ARM::VLD4LNdAsm_32:
+ case ARM::VLD4LNqAsm_16:
+ case ARM::VLD4LNqAsm_32: {
+ MCInst TmpInst;
+ // Shuffle the operands around so the lane index operand is in the
+ // right place.
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(2)); // Rn
+ TmpInst.addOperand(Inst.getOperand(3)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // lane
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VLD3DUP single 3-element structure to all lanes instructions.
+ case ARM::VLD3DUPdAsm_8:
+ case ARM::VLD3DUPdAsm_16:
+ case ARM::VLD3DUPdAsm_32:
+ case ARM::VLD3DUPqAsm_8:
+ case ARM::VLD3DUPqAsm_16:
+ case ARM::VLD3DUPqAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3DUPdWB_fixed_Asm_8:
+ case ARM::VLD3DUPdWB_fixed_Asm_16:
+ case ARM::VLD3DUPdWB_fixed_Asm_32:
+ case ARM::VLD3DUPqWB_fixed_Asm_8:
+ case ARM::VLD3DUPqWB_fixed_Asm_16:
+ case ARM::VLD3DUPqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3DUPdWB_register_Asm_8:
+ case ARM::VLD3DUPdWB_register_Asm_16:
+ case ARM::VLD3DUPdWB_register_Asm_32:
+ case ARM::VLD3DUPqWB_register_Asm_8:
+ case ARM::VLD3DUPqWB_register_Asm_16:
+ case ARM::VLD3DUPqWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VLD3 multiple 3-element structure instructions.
+ case ARM::VLD3dAsm_8:
+ case ARM::VLD3dAsm_16:
+ case ARM::VLD3dAsm_32:
+ case ARM::VLD3qAsm_8:
+ case ARM::VLD3qAsm_16:
+ case ARM::VLD3qAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3dWB_fixed_Asm_8:
+ case ARM::VLD3dWB_fixed_Asm_16:
+ case ARM::VLD3dWB_fixed_Asm_32:
+ case ARM::VLD3qWB_fixed_Asm_8:
+ case ARM::VLD3qWB_fixed_Asm_16:
+ case ARM::VLD3qWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD3dWB_register_Asm_8:
+ case ARM::VLD3dWB_register_Asm_16:
+ case ARM::VLD3dWB_register_Asm_32:
+ case ARM::VLD3qWB_register_Asm_8:
+ case ARM::VLD3qWB_register_Asm_16:
+ case ARM::VLD3qWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VLD4DUP single 3-element structure to all lanes instructions.
+ case ARM::VLD4DUPdAsm_8:
+ case ARM::VLD4DUPdAsm_16:
+ case ARM::VLD4DUPdAsm_32:
+ case ARM::VLD4DUPqAsm_8:
+ case ARM::VLD4DUPqAsm_16:
+ case ARM::VLD4DUPqAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4DUPdWB_fixed_Asm_8:
+ case ARM::VLD4DUPdWB_fixed_Asm_16:
+ case ARM::VLD4DUPdWB_fixed_Asm_32:
+ case ARM::VLD4DUPqWB_fixed_Asm_8:
+ case ARM::VLD4DUPqWB_fixed_Asm_16:
+ case ARM::VLD4DUPqWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4DUPdWB_register_Asm_8:
+ case ARM::VLD4DUPdWB_register_Asm_16:
+ case ARM::VLD4DUPdWB_register_Asm_32:
+ case ARM::VLD4DUPqWB_register_Asm_8:
+ case ARM::VLD4DUPqWB_register_Asm_16:
+ case ARM::VLD4DUPqWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VLD4 multiple 4-element structure instructions.
+ case ARM::VLD4dAsm_8:
+ case ARM::VLD4dAsm_16:
+ case ARM::VLD4dAsm_32:
+ case ARM::VLD4qAsm_8:
+ case ARM::VLD4qAsm_16:
+ case ARM::VLD4qAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4dWB_fixed_Asm_8:
+ case ARM::VLD4dWB_fixed_Asm_16:
+ case ARM::VLD4dWB_fixed_Asm_32:
+ case ARM::VLD4qWB_fixed_Asm_8:
+ case ARM::VLD4qWB_fixed_Asm_16:
+ case ARM::VLD4qWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VLD4dWB_register_Asm_8:
+ case ARM::VLD4dWB_register_Asm_16:
+ case ARM::VLD4dWB_register_Asm_32:
+ case ARM::VLD4qWB_register_Asm_8:
+ case ARM::VLD4qWB_register_Asm_16:
+ case ARM::VLD4qWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VST3 multiple 3-element structure instructions.
+ case ARM::VST3dAsm_8:
+ case ARM::VST3dAsm_16:
+ case ARM::VST3dAsm_32:
+ case ARM::VST3qAsm_8:
+ case ARM::VST3qAsm_16:
+ case ARM::VST3qAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST3dWB_fixed_Asm_8:
+ case ARM::VST3dWB_fixed_Asm_16:
+ case ARM::VST3dWB_fixed_Asm_32:
+ case ARM::VST3qWB_fixed_Asm_8:
+ case ARM::VST3qWB_fixed_Asm_16:
+ case ARM::VST3qWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST3dWB_register_Asm_8:
+ case ARM::VST3dWB_register_Asm_16:
+ case ARM::VST3dWB_register_Asm_32:
+ case ARM::VST3qWB_register_Asm_8:
+ case ARM::VST3qWB_register_Asm_16:
+ case ARM::VST3qWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // VST4 multiple 3-element structure instructions.
+ case ARM::VST4dAsm_8:
+ case ARM::VST4dAsm_16:
+ case ARM::VST4dAsm_32:
+ case ARM::VST4qAsm_8:
+ case ARM::VST4qAsm_16:
+ case ARM::VST4qAsm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST4dWB_fixed_Asm_8:
+ case ARM::VST4dWB_fixed_Asm_16:
+ case ARM::VST4dWB_fixed_Asm_32:
+ case ARM::VST4qWB_fixed_Asm_8:
+ case ARM::VST4qWB_fixed_Asm_16:
+ case ARM::VST4qWB_fixed_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+
+ case ARM::VST4dWB_register_Asm_8:
+ case ARM::VST4dWB_register_Asm_16:
+ case ARM::VST4dWB_register_Asm_32:
+ case ARM::VST4qWB_register_Asm_8:
+ case ARM::VST4qWB_register_Asm_16:
+ case ARM::VST4qWB_register_Asm_32: {
+ MCInst TmpInst;
+ unsigned Spacing;
+ TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // alignment
+ TmpInst.addOperand(Inst.getOperand(3)); // Rm
+ TmpInst.addOperand(Inst.getOperand(0)); // Vd
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 2));
+ TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
+ Spacing * 3));
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+
+ // Handle encoding choice for the shift-immediate instructions.
+ case ARM::t2LSLri:
+ case ARM::t2LSRri:
+ case ARM::t2ASRri: {
+ if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
+ Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
+ !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
+ static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
+ unsigned NewOpc;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode");
+ case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
+ case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
+ case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
+ }
+ // The Thumb1 operands aren't in the same order. Awesome, eh?
+ MCInst TmpInst;
+ TmpInst.setOpcode(NewOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+
+ // Handle the Thumb2 mode MOV complex aliases.
+ case ARM::t2MOVsr:
+ case ARM::t2MOVSsr: {
+ // Which instruction to expand to depends on the CCOut operand and
+ // whether we're in an IT block if the register operands are low
+ // registers.
+ bool isNarrow = false;
+ if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
+ isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ isARMLowRegister(Inst.getOperand(2).getReg()) &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
+ inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
+ isNarrow = true;
+ MCInst TmpInst;
+ unsigned newOpc;
+ switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
+ default: llvm_unreachable("unexpected opcode!");
+ case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
+ case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
+ case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
+ case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
+ }
+ TmpInst.setOpcode(newOpc);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rd
+ if (isNarrow)
+ TmpInst.addOperand(MCOperand::CreateReg(
+ Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // Rm
+ TmpInst.addOperand(Inst.getOperand(4)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(5));
+ if (!isNarrow)
+ TmpInst.addOperand(MCOperand::CreateReg(
+ Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::t2MOVsi:
+ case ARM::t2MOVSsi: {
+ // Which instruction to expand to depends on the CCOut operand and
+ // whether we're in an IT block if the register operands are low
+ // registers.
+ bool isNarrow = false;
+ if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
+ isARMLowRegister(Inst.getOperand(1).getReg()) &&
+ inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
+ isNarrow = true;
+ MCInst TmpInst;
+ unsigned newOpc;
+ switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
+ default: llvm_unreachable("unexpected opcode!");
+ case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
+ case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
+ case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
+ case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
+ case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
+ }
+ unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
+ if (Ammount == 32) Ammount = 0;
+ TmpInst.setOpcode(newOpc);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rd
+ if (isNarrow)
+ TmpInst.addOperand(MCOperand::CreateReg(
+ Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ if (newOpc != ARM::t2RRX)
+ TmpInst.addOperand(MCOperand::CreateImm(Ammount));
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ if (!isNarrow)
+ TmpInst.addOperand(MCOperand::CreateReg(
+ Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
+ Inst = TmpInst;
+ return true;
+ }
+ // Handle the ARM mode MOV complex aliases.
+ case ARM::ASRr:
+ case ARM::LSRr:
+ case ARM::LSLr:
+ case ARM::RORr: {
+ ARM_AM::ShiftOpc ShiftTy;
+ switch(Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode!");
+ case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
+ case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
+ case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
+ case ARM::RORr: ShiftTy = ARM_AM::ror; break;
+ }
+ unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVsr);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rd
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(Inst.getOperand(2)); // Rm
+ TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ TmpInst.addOperand(Inst.getOperand(5)); // cc_out
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::ASRi:
+ case ARM::LSRi:
+ case ARM::LSLi:
+ case ARM::RORi: {
+ ARM_AM::ShiftOpc ShiftTy;
+ switch(Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode!");
+ case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
+ case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
+ case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
+ case ARM::RORi: ShiftTy = ARM_AM::ror; break;
+ }
+ // A shift by zero is a plain MOVr, not a MOVsi.
+ unsigned Amt = Inst.getOperand(2).getImm();
+ unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
+ unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
+ MCInst TmpInst;
+ TmpInst.setOpcode(Opc);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rd
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ if (Opc == ARM::MOVsi)
+ TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
+ TmpInst.addOperand(Inst.getOperand(3)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(4));
+ TmpInst.addOperand(Inst.getOperand(5)); // cc_out
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::RRXi: {
+ unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVsi);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rd
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
+ TmpInst.addOperand(Inst.getOperand(2)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4)); // cc_out
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::t2LDMIA_UPD: {
+ // If this is a load of a single register, then we should use
+ // a post-indexed LDR instruction instead, per the ARM ARM.
+ if (Inst.getNumOperands() != 5)
+ return false;
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::t2LDR_POST);
+ TmpInst.addOperand(Inst.getOperand(4)); // Rt
+ TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(MCOperand::CreateImm(4));
+ TmpInst.addOperand(Inst.getOperand(2)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(3));
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::t2STMDB_UPD: {
+ // If this is a store of a single register, then we should use
+ // a pre-indexed STR instruction instead, per the ARM ARM.
+ if (Inst.getNumOperands() != 5)
+ return false;
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::t2STR_PRE);
+ TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
+ TmpInst.addOperand(Inst.getOperand(4)); // Rt
+ TmpInst.addOperand(Inst.getOperand(1)); // Rn
+ TmpInst.addOperand(MCOperand::CreateImm(-4));
+ TmpInst.addOperand(Inst.getOperand(2)); // CondCode
+ TmpInst.addOperand(Inst.getOperand(3));
+ Inst = TmpInst;
+ return true;
+ }
case ARM::LDMIA_UPD:
// If this is a load of a single register via a 'pop', then we should use
// a post-indexed LDR instruction instead, per the ARM ARM.
@@ -4160,6 +6870,7 @@ processInstruction(MCInst &Inst,
TmpInst.addOperand(Inst.getOperand(2)); // CondCode
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
+ return true;
}
break;
case ARM::STMDB_UPD:
@@ -4178,41 +6889,117 @@ processInstruction(MCInst &Inst,
Inst = TmpInst;
}
break;
+ case ARM::t2ADDri12:
+ // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
+ // mnemonic was used (not "addw"), encoding T3 is preferred.
+ if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
+ ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
+ break;
+ Inst.setOpcode(ARM::t2ADDri);
+ Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ break;
+ case ARM::t2SUBri12:
+ // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
+ // mnemonic was used (not "subw"), encoding T3 is preferred.
+ if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
+ ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
+ break;
+ Inst.setOpcode(ARM::t2SUBri);
+ Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
+ break;
case ARM::tADDi8:
// If the immediate is in the range 0-7, we want tADDi3 iff Rd was
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
- if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6)
+ if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tADDi3);
+ return true;
+ }
break;
case ARM::tSUBi8:
// If the immediate is in the range 0-7, we want tADDi3 iff Rd was
// explicitly specified. From the ARM ARM: "Encoding T1 is preferred
// to encoding T2 if <Rd> is specified and encoding T2 is preferred
// to encoding T1 if <Rd> is omitted."
- if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6)
+ if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
Inst.setOpcode(ARM::tSUBi3);
+ return true;
+ }
break;
+ case ARM::t2ADDri:
+ case ARM::t2SUBri: {
+ // If the destination and first source operand are the same, and
+ // the flags are compatible with the current IT status, use encoding T2
+ // instead of T3. For compatibility with the system 'as'. Make sure the
+ // wide encoding wasn't explicit.
+ if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
+ !isARMLowRegister(Inst.getOperand(0).getReg()) ||
+ (unsigned)Inst.getOperand(2).getImm() > 255 ||
+ ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
+ (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
+ (static_cast<ARMOperand*>(Operands[3])->isToken() &&
+ static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
+ break;
+ MCInst TmpInst;
+ TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
+ ARM::tADDi8 : ARM::tSUBi8);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
+ case ARM::t2ADDrr: {
+ // If the destination and first source operand are the same, and
+ // there's no setting of the flags, use encoding T2 instead of T3.
+ // Note that this is only for ADD, not SUB. This mirrors the system
+ // 'as' behaviour. Make sure the wide encoding wasn't explicit.
+ if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
+ Inst.getOperand(5).getReg() != 0 ||
+ (static_cast<ARMOperand*>(Operands[3])->isToken() &&
+ static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
+ break;
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::tADDhirr);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ Inst = TmpInst;
+ return true;
+ }
case ARM::tB:
// A Thumb conditional branch outside of an IT block is a tBcc.
- if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock())
+ if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
Inst.setOpcode(ARM::tBcc);
+ return true;
+ }
break;
case ARM::t2B:
// A Thumb2 conditional branch outside of an IT block is a t2Bcc.
- if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock())
+ if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
Inst.setOpcode(ARM::t2Bcc);
+ return true;
+ }
break;
case ARM::t2Bcc:
// If the conditional is AL or we're in an IT block, we really want t2B.
- if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock())
+ if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
Inst.setOpcode(ARM::t2B);
+ return true;
+ }
break;
case ARM::tBcc:
// If the conditional is AL, we really want tB.
- if (Inst.getOperand(1).getImm() == ARMCC::AL)
+ if (Inst.getOperand(1).getImm() == ARMCC::AL) {
Inst.setOpcode(ARM::tB);
+ return true;
+ }
break;
case ARM::tLDMIA: {
// If the register list contains any high registers, or if the writeback
@@ -4235,6 +7022,7 @@ processInstruction(MCInst &Inst,
if (hasWritebackToken)
Inst.insert(Inst.begin(),
MCOperand::CreateReg(Inst.getOperand(0).getReg()));
+ return true;
}
break;
}
@@ -4248,14 +7036,40 @@ processInstruction(MCInst &Inst,
// 16-bit encoding isn't sufficient. Switch to the 32-bit version.
assert (isThumbTwo());
Inst.setOpcode(ARM::t2STMIA_UPD);
+ return true;
}
break;
}
+ case ARM::tPOP: {
+ bool listContainsBase;
+ // If the register list contains any high registers, we need to use
+ // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
+ // should have generated an error in validateInstruction().
+ if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
+ return false;
+ assert (isThumbTwo());
+ Inst.setOpcode(ARM::t2LDMIA_UPD);
+ // Add the base register and writeback operands.
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
+ return true;
+ }
+ case ARM::tPUSH: {
+ bool listContainsBase;
+ if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
+ return false;
+ assert (isThumbTwo());
+ Inst.setOpcode(ARM::t2STMDB_UPD);
+ // Add the base register and writeback operands.
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
+ Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
+ return true;
+ }
case ARM::t2MOVi: {
// If we can use the 16-bit encoding and the user didn't explicitly
// request the 32-bit variant, transform it here.
if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
- Inst.getOperand(1).getImm() <= 255 &&
+ (unsigned)Inst.getOperand(1).getImm() <= 255 &&
((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
Inst.getOperand(4).getReg() == ARM::CPSR) ||
(inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
@@ -4270,6 +7084,7 @@ processInstruction(MCInst &Inst,
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
+ return true;
}
break;
}
@@ -4290,6 +7105,7 @@ processInstruction(MCInst &Inst,
TmpInst.addOperand(Inst.getOperand(2));
TmpInst.addOperand(Inst.getOperand(3));
Inst = TmpInst;
+ return true;
}
break;
}
@@ -4320,9 +7136,61 @@ processInstruction(MCInst &Inst,
TmpInst.addOperand(Inst.getOperand(3));
TmpInst.addOperand(Inst.getOperand(4));
Inst = TmpInst;
+ return true;
}
break;
}
+ case ARM::MOVsi: {
+ ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
+ if (SOpc == ARM_AM::rrx) return false;
+ if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
+ // Shifting by zero is accepted as a vanilla 'MOVr'
+ MCInst TmpInst;
+ TmpInst.setOpcode(ARM::MOVr);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(3));
+ TmpInst.addOperand(Inst.getOperand(4));
+ TmpInst.addOperand(Inst.getOperand(5));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+ case ARM::ANDrsi:
+ case ARM::ORRrsi:
+ case ARM::EORrsi:
+ case ARM::BICrsi:
+ case ARM::SUBrsi:
+ case ARM::ADDrsi: {
+ unsigned newOpc;
+ ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
+ if (SOpc == ARM_AM::rrx) return false;
+ switch (Inst.getOpcode()) {
+ default: llvm_unreachable("unexpected opcode!");
+ case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
+ case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
+ case ARM::EORrsi: newOpc = ARM::EORrr; break;
+ case ARM::BICrsi: newOpc = ARM::BICrr; break;
+ case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
+ case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
+ }
+ // If the shift is by zero, use the non-shifted instruction definition.
+ if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
+ MCInst TmpInst;
+ TmpInst.setOpcode(newOpc);
+ TmpInst.addOperand(Inst.getOperand(0));
+ TmpInst.addOperand(Inst.getOperand(1));
+ TmpInst.addOperand(Inst.getOperand(2));
+ TmpInst.addOperand(Inst.getOperand(4));
+ TmpInst.addOperand(Inst.getOperand(5));
+ TmpInst.addOperand(Inst.getOperand(6));
+ Inst = TmpInst;
+ return true;
+ }
+ return false;
+ }
+ case ARM::ITasm:
case ARM::t2IT: {
// The mask bits for all but the first condition are represented as
// the low bit of the condition code value implies 't'. We currently
@@ -4352,13 +7220,14 @@ processInstruction(MCInst &Inst,
break;
}
}
+ return false;
}
unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
// 16-bit thumb arithmetic instructions either require or preclude the 'S'
// suffix depending on whether they're in an IT block or not.
unsigned Opc = Inst.getOpcode();
- MCInstrDesc &MCID = getInstDesc(Opc);
+ const MCInstrDesc &MCID = getInstDesc(Opc);
if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
assert(MCID.hasOptionalDef() &&
"optionally flag setting instruction missing optional def operand");
@@ -4417,14 +7286,23 @@ MatchAndEmitInstruction(SMLoc IDLoc,
}
// Some instructions need post-processing to, for example, tweak which
- // encoding is selected.
- processInstruction(Inst, Operands);
+ // encoding is selected. Loop on it while changes happen so the
+ // individual transformations can chain off each other. E.g.,
+ // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
+ while (processInstruction(Inst, Operands))
+ ;
// Only move forward at the very end so that everything in validate
// and process gets a consistent answer about whether we're in an IT
// block.
forwardITPosition();
+ // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
+ // doesn't actually encode.
+ if (Inst.getOpcode() == ARM::ITasm)
+ return false;
+
+ Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
return false;
case Match_MissingFeature:
@@ -4458,7 +7336,6 @@ MatchAndEmitInstruction(SMLoc IDLoc,
}
llvm_unreachable("Implement any new match types added!");
- return true;
}
/// parseDirective parses the arm specific directives
@@ -4468,12 +7345,20 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveWord(4, DirectiveID.getLoc());
else if (IDVal == ".thumb")
return parseDirectiveThumb(DirectiveID.getLoc());
+ else if (IDVal == ".arm")
+ return parseDirectiveARM(DirectiveID.getLoc());
else if (IDVal == ".thumb_func")
return parseDirectiveThumbFunc(DirectiveID.getLoc());
else if (IDVal == ".code")
return parseDirectiveCode(DirectiveID.getLoc());
else if (IDVal == ".syntax")
return parseDirectiveSyntax(DirectiveID.getLoc());
+ else if (IDVal == ".unreq")
+ return parseDirectiveUnreq(DirectiveID.getLoc());
+ else if (IDVal == ".arch")
+ return parseDirectiveArch(DirectiveID.getLoc());
+ else if (IDVal == ".eabi_attribute")
+ return parseDirectiveEabiAttr(DirectiveID.getLoc());
return true;
}
@@ -4509,9 +7394,22 @@ bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
return Error(L, "unexpected token in directive");
Parser.Lex();
- // TODO: set thumb mode
- // TODO: tell the MC streamer the mode
- // getParser().getStreamer().Emit???();
+ if (!isThumb())
+ SwitchMode();
+ getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
+ return false;
+}
+
+/// parseDirectiveARM
+/// ::= .arm
+bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(L, "unexpected token in directive");
+ Parser.Lex();
+
+ if (isThumb())
+ SwitchMode();
+ getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
return false;
}
@@ -4521,24 +7419,33 @@ bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
bool isMachO = MAI.hasSubsectionsViaSymbols();
StringRef Name;
+ bool needFuncName = true;
- // Darwin asm has function name after .thumb_func direction
+ // Darwin asm has (optionally) function name after .thumb_func direction
// ELF doesn't
if (isMachO) {
const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
- return Error(L, "unexpected token in .thumb_func directive");
- Name = Tok.getString();
- Parser.Lex(); // Consume the identifier token.
+ if (Tok.isNot(AsmToken::EndOfStatement)) {
+ if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
+ return Error(L, "unexpected token in .thumb_func directive");
+ Name = Tok.getIdentifier();
+ Parser.Lex(); // Consume the identifier token.
+ needFuncName = false;
+ }
}
if (getLexer().isNot(AsmToken::EndOfStatement))
return Error(L, "unexpected token in directive");
- Parser.Lex();
+
+ // Eat the end of statement and any blank lines that follow.
+ while (getLexer().is(AsmToken::EndOfStatement))
+ Parser.Lex();
// FIXME: assuming function name will be the line following .thumb_func
- if (!isMachO) {
- Name = Parser.getTok().getString();
+ // We really should be checking the next symbol definition even if there's
+ // stuff in between.
+ if (needFuncName) {
+ Name = Parser.getTok().getIdentifier();
}
// Mark symbol as a thumb symbol.
@@ -4601,6 +7508,57 @@ bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
return false;
}
+/// parseDirectiveReq
+/// ::= name .req registername
+bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
+ Parser.Lex(); // Eat the '.req' token.
+ unsigned Reg;
+ SMLoc SRegLoc, ERegLoc;
+ if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
+ Parser.EatToEndOfStatement();
+ return Error(SRegLoc, "register name expected");
+ }
+
+ // Shouldn't be anything else.
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
+ Parser.EatToEndOfStatement();
+ return Error(Parser.getTok().getLoc(),
+ "unexpected input in .req directive.");
+ }
+
+ Parser.Lex(); // Consume the EndOfStatement
+
+ if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
+ return Error(SRegLoc, "redefinition of '" + Name +
+ "' does not match original.");
+
+ return false;
+}
+
+/// parseDirectiveUneq
+/// ::= .unreq registername
+bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Parser.EatToEndOfStatement();
+ return Error(L, "unexpected input in .unreq directive.");
+ }
+ RegisterReqs.erase(Parser.getTok().getIdentifier());
+ Parser.Lex(); // Eat the identifier.
+ return false;
+}
+
+/// parseDirectiveArch
+/// ::= .arch token
+bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
+ return true;
+}
+
+/// parseDirectiveEabiAttr
+/// ::= .eabi_attribute int, int
+bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
+ return true;
+}
+
extern "C" void LLVMInitializeARMAsmLexer();
/// Force static initialization.
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 8f2f813..2f504b7 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -1,4 +1,4 @@
-//===- ARMDisassembler.cpp - Disassembler for ARM/Thumb ISA -----*- C++ -*-===//
+//===-- ARMDisassembler.cpp - Disassembler for ARM/Thumb ISA --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,17 +9,16 @@
#define DEBUG_TYPE "arm-disassembler"
-#include "ARM.h"
-#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMMCExpr.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/ErrorHandling.h"
@@ -52,7 +51,7 @@ public:
raw_ostream &cStream) const;
/// getEDInfo - See MCDisassembler.
- EDInstInfo *getEDInfo() const;
+ const EDInstInfo *getEDInfo() const;
private:
};
@@ -77,7 +76,7 @@ public:
raw_ostream &cStream) const;
/// getEDInfo - See MCDisassembler.
- EDInstInfo *getEDInfo() const;
+ const EDInstInfo *getEDInfo() const;
private:
mutable std::vector<unsigned> ITBlock;
DecodeStatus AddThumbPredicate(MCInst&) const;
@@ -97,224 +96,236 @@ static bool Check(DecodeStatus &Out, DecodeStatus In) {
Out = In;
return false;
}
- return false;
+ llvm_unreachable("Invalid DecodeStatus!");
}
// Forward declare these because the autogenerated code will reference them.
// Definitions are further down.
-static DecodeStatus DecodeGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeGPRnopcRegisterClass(llvm::MCInst &Inst,
+static DecodeStatus DecodeGPRnopcRegisterClass(MCInst &Inst,
unsigned RegNo, uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodetGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodetGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodetcGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecoderGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecoderGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeSPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeDPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDPR_8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeDPR_8RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDPR_VFP2RegisterClass(llvm::MCInst &Inst,
+static DecodeStatus DecodeDPR_VFP2RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeQPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeQPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeDPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeDPairSpacedRegisterClass(MCInst &Inst,
+ unsigned RegNo, uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodePredicateOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodePredicateOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeCCOutOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSOImmOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSOImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeDPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeBitfieldMaskOperand(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeBitfieldMaskOperand(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeCopMemInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrMode2IdxInstruction(llvm::MCInst &Inst,
+static DecodeStatus DecodeAddrMode2IdxInstruction(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeSORegMemOperand(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSORegMemOperand(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrMode3Instruction(llvm::MCInst &Inst,unsigned Insn,
+static DecodeStatus DecodeAddrMode3Instruction(MCInst &Inst,unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSORegImmOperand(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSORegImmOperand(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSORegRegOperand(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSORegRegOperand(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeMemMultipleWritebackInstruction(llvm::MCInst & Inst,
+static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst & Inst,
unsigned Insn,
uint64_t Adddress,
const void *Decoder);
-static DecodeStatus DecodeT2MOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2MOVTWInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeArmMOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeArmMOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSMLAInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSMLAInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeCPSInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2CPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2CPSInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeAddrModeImm12Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrModeImm12Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrMode5Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrMode7Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeBranchImmInstruction(llvm::MCInst &Inst,unsigned Insn,
+static DecodeStatus DecodeBranchImmInstruction(MCInst &Inst,unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVCVTImmOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode6Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeAddrMode6Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD1DupInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD2DupInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVLD3DupInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD3DupInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD4DupInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeNEONModImmInstruction(MCInst &Inst,unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeNEONModImmInstruction(llvm::MCInst &Inst,unsigned Val,
+static DecodeStatus DecodeVSHLMaxInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVSHLMaxInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight8Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRight8Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight16Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRight16Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight32Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRight32Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight64Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeShiftRight64Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeTBLInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeTBLInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodePostIdxReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodePostIdxReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeCoprocessor(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeCoprocessor(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeMemBarrierOption(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeMemBarrierOption(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeMSRMask(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDoubleRegLoad(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeDoubleRegStore(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeLDRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeLDRPreImm(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeLDRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeLDRPreReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSTRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSTRPreImm(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSTRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeSTRPreReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD1LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD2LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD3LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVLD4LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVST1LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVST2LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVST3LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVST4LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVMOVSRR(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVMOVSRR(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVMOVRRS(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeVMOVRRS(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddSpecialReg(llvm::MCInst &Inst, uint16_t Insn,
+
+static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbBROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbBROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2BROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2BROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbCmpBROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbCmpBROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddrModeRR(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeRR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddrModeIS(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeIS(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddrModePC(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModePC(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddrModeSP(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeSP(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2AddrModeSOReg(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2LoadShift(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2Imm8S4(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2AddrModeImm8s4(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm8s4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2AddrModeImm0_1020s4(llvm::MCInst &Inst,unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm0_1020s4(MCInst &Inst,unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2Imm8(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2Imm8(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2AddrModeImm8(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddSPImm(llvm::MCInst &Inst, uint16_t Val,
+static DecodeStatus DecodeThumbAddSPImm(MCInst &Inst, uint16_t Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbAddSPReg(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbCPS(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbCPS(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbBLXOffset(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeThumbBLXOffset(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2AddrModeImm12(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbTableBranch(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbTableBranch(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumb2BCCInstruction(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumb2BCCInstruction(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2SOImm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2SOImm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbBCCTargetOperand(llvm::MCInst &Inst,unsigned Val,
+static DecodeStatus DecodeThumbBCCTargetOperand(MCInst &Inst,unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeThumbBLTargetOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbBLTargetOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeIT(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeIT(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2LDRDPreInstruction(llvm::MCInst &Inst,unsigned Insn,
+static DecodeStatus DecodeT2LDRDPreInstruction(MCInst &Inst,unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2STRDPreInstruction(llvm::MCInst &Inst,unsigned Insn,
+static DecodeStatus DecodeT2STRDPreInstruction(MCInst &Inst,unsigned Insn,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2Adr(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2Adr(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2LdStPre(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-static DecodeStatus DecodeT2ShifterImmOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
-
-
+static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder);
#include "ARMGenDisassemblerTables.inc"
#include "ARMGenInstrInfo.inc"
#include "ARMGenEDInfo.inc"
@@ -327,11 +338,11 @@ static MCDisassembler *createThumbDisassembler(const Target &T, const MCSubtarge
return new ThumbDisassembler(STI);
}
-EDInstInfo *ARMDisassembler::getEDInfo() const {
+const EDInstInfo *ARMDisassembler::getEDInfo() const {
return instInfoARM;
}
-EDInstInfo *ThumbDisassembler::getEDInfo() const {
+const EDInstInfo *ThumbDisassembler::getEDInfo() const {
return instInfoARM;
}
@@ -415,7 +426,7 @@ DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
namespace llvm {
-extern MCInstrDesc ARMInsts[];
+extern const MCInstrDesc ARMInsts[];
}
/// tryAddingSymbolicOperand - trys to add a symbolic operand in place of the
@@ -435,40 +446,38 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
MCInst &MI, const void *Decoder) {
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
LLVMOpInfoCallback getOpInfo = Dis->getLLVMOpInfoCallback();
- if (!getOpInfo)
- return false;
-
struct LLVMOpInfo1 SymbolicOp;
+ memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
SymbolicOp.Value = Value;
void *DisInfo = Dis->getDisInfoBlock();
- if (!getOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp)) {
- if (isBranch) {
- LLVMSymbolLookupCallback SymbolLookUp =
- Dis->getLLVMSymbolLookupCallback();
- if (SymbolLookUp) {
- uint64_t ReferenceType;
- ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
- const char *ReferenceName;
- const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address,
- &ReferenceName);
- if (Name) {
- SymbolicOp.AddSymbol.Name = Name;
- SymbolicOp.AddSymbol.Present = true;
- SymbolicOp.Value = 0;
- }
- else {
- SymbolicOp.Value = Value;
- }
- if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
- (*Dis->CommentStream) << "symbol stub for: " << ReferenceName;
- }
- else {
- return false;
- }
- }
- else {
+
+ if (!getOpInfo ||
+ !getOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp)) {
+ // Clear SymbolicOp.Value from above and also all other fields.
+ memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
+ LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
+ if (!SymbolLookUp)
return false;
+ uint64_t ReferenceType;
+ if (isBranch)
+ ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
+ else
+ ReferenceType = LLVMDisassembler_ReferenceType_InOut_None;
+ const char *ReferenceName;
+ const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address,
+ &ReferenceName);
+ if (Name) {
+ SymbolicOp.AddSymbol.Name = Name;
+ SymbolicOp.AddSymbol.Present = true;
}
+ // For branches always create an MCExpr so it gets printed as hex address.
+ else if (isBranch) {
+ SymbolicOp.Value = Value;
+ }
+ if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
+ (*Dis->CommentStream) << "symbol stub for: " << ReferenceName;
+ if (!Name && !isBranch)
+ return false;
}
MCContext *Ctx = Dis->getMCContext();
@@ -527,8 +536,8 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
MI.addOperand(MCOperand::CreateExpr(Expr));
- else
- assert(0 && "bad SymbolicOp.VariantKind");
+ else
+ llvm_unreachable("bad SymbolicOp.VariantKind");
return true;
}
@@ -543,7 +552,7 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value,
/// a literal 'C' string if the referenced address of the literal pool's entry
/// is an address into a section with 'C' string literals.
static void tryAddingPcLoadReferenceComment(uint64_t Address, int Value,
- const void *Decoder) {
+ const void *Decoder) {
const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
if (SymbolLookUp) {
@@ -841,14 +850,14 @@ extern "C" void LLVMInitializeARMDisassembler() {
createThumbDisassembler);
}
-static const unsigned GPRDecoderTable[] = {
+static const uint16_t GPRDecoderTable[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3,
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
ARM::R8, ARM::R9, ARM::R10, ARM::R11,
ARM::R12, ARM::SP, ARM::LR, ARM::PC
};
-static DecodeStatus DecodeGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 15)
return MCDisassembler::Fail;
@@ -859,20 +868,26 @@ static DecodeStatus DecodeGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
}
static DecodeStatus
-DecodeGPRnopcRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+DecodeGPRnopcRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
- if (RegNo == 15) return MCDisassembler::Fail;
- return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
+ DecodeStatus S = MCDisassembler::Success;
+
+ if (RegNo == 15)
+ S = MCDisassembler::SoftFail;
+
+ Check(S, DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder));
+
+ return S;
}
-static DecodeStatus DecodetGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodetGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 7)
return MCDisassembler::Fail;
return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
}
-static DecodeStatus DecodetcGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodetcGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
unsigned Register = 0;
switch (RegNo) {
@@ -902,13 +917,13 @@ static DecodeStatus DecodetcGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
-static DecodeStatus DecoderGPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecoderGPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo == 13 || RegNo == 15) return MCDisassembler::Fail;
return DecodeGPRRegisterClass(Inst, RegNo, Address, Decoder);
}
-static const unsigned SPRDecoderTable[] = {
+static const uint16_t SPRDecoderTable[] = {
ARM::S0, ARM::S1, ARM::S2, ARM::S3,
ARM::S4, ARM::S5, ARM::S6, ARM::S7,
ARM::S8, ARM::S9, ARM::S10, ARM::S11,
@@ -919,7 +934,7 @@ static const unsigned SPRDecoderTable[] = {
ARM::S28, ARM::S29, ARM::S30, ARM::S31
};
-static DecodeStatus DecodeSPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeSPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
@@ -929,7 +944,7 @@ static DecodeStatus DecodeSPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
-static const unsigned DPRDecoderTable[] = {
+static const uint16_t DPRDecoderTable[] = {
ARM::D0, ARM::D1, ARM::D2, ARM::D3,
ARM::D4, ARM::D5, ARM::D6, ARM::D7,
ARM::D8, ARM::D9, ARM::D10, ARM::D11,
@@ -940,7 +955,7 @@ static const unsigned DPRDecoderTable[] = {
ARM::D28, ARM::D29, ARM::D30, ARM::D31
};
-static DecodeStatus DecodeDPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeDPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
@@ -950,7 +965,7 @@ static DecodeStatus DecodeDPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeDPR_8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeDPR_8RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 7)
return MCDisassembler::Fail;
@@ -958,14 +973,14 @@ static DecodeStatus DecodeDPR_8RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
}
static DecodeStatus
-DecodeDPR_VFP2RegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+DecodeDPR_VFP2RegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 15)
return MCDisassembler::Fail;
return DecodeDPRRegisterClass(Inst, RegNo, Address, Decoder);
}
-static const unsigned QPRDecoderTable[] = {
+static const uint16_t QPRDecoderTable[] = {
ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7,
ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11,
@@ -973,7 +988,7 @@ static const unsigned QPRDecoderTable[] = {
};
-static DecodeStatus DecodeQPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
+static DecodeStatus DecodeQPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address, const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
@@ -984,7 +999,49 @@ static DecodeStatus DecodeQPRRegisterClass(llvm::MCInst &Inst, unsigned RegNo,
return MCDisassembler::Success;
}
-static DecodeStatus DecodePredicateOperand(llvm::MCInst &Inst, unsigned Val,
+static const uint16_t DPairDecoderTable[] = {
+ ARM::Q0, ARM::D1_D2, ARM::Q1, ARM::D3_D4, ARM::Q2, ARM::D5_D6,
+ ARM::Q3, ARM::D7_D8, ARM::Q4, ARM::D9_D10, ARM::Q5, ARM::D11_D12,
+ ARM::Q6, ARM::D13_D14, ARM::Q7, ARM::D15_D16, ARM::Q8, ARM::D17_D18,
+ ARM::Q9, ARM::D19_D20, ARM::Q10, ARM::D21_D22, ARM::Q11, ARM::D23_D24,
+ ARM::Q12, ARM::D25_D26, ARM::Q13, ARM::D27_D28, ARM::Q14, ARM::D29_D30,
+ ARM::Q15
+};
+
+static DecodeStatus DecodeDPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ if (RegNo > 30)
+ return MCDisassembler::Fail;
+
+ unsigned Register = DPairDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static const uint16_t DPairSpacedDecoderTable[] = {
+ ARM::D0_D2, ARM::D1_D3, ARM::D2_D4, ARM::D3_D5,
+ ARM::D4_D6, ARM::D5_D7, ARM::D6_D8, ARM::D7_D9,
+ ARM::D8_D10, ARM::D9_D11, ARM::D10_D12, ARM::D11_D13,
+ ARM::D12_D14, ARM::D13_D15, ARM::D14_D16, ARM::D15_D17,
+ ARM::D16_D18, ARM::D17_D19, ARM::D18_D20, ARM::D19_D21,
+ ARM::D20_D22, ARM::D21_D23, ARM::D22_D24, ARM::D23_D25,
+ ARM::D24_D26, ARM::D25_D27, ARM::D26_D28, ARM::D27_D29,
+ ARM::D28_D30, ARM::D29_D31
+};
+
+static DecodeStatus DecodeDPairSpacedRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 29)
+ return MCDisassembler::Fail;
+
+ unsigned Register = DPairSpacedDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::CreateReg(Register));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodePredicateOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
if (Val == 0xF) return MCDisassembler::Fail;
// AL predicate is not allowed on Thumb1 branches.
@@ -998,7 +1055,7 @@ static DecodeStatus DecodePredicateOperand(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCCOutOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeCCOutOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
if (Val)
Inst.addOperand(MCOperand::CreateReg(ARM::CPSR));
@@ -1007,7 +1064,7 @@ static DecodeStatus DecodeCCOutOperand(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeSOImmOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSOImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
uint32_t imm = Val & 0xFF;
uint32_t rot = (Val & 0xF00) >> 7;
@@ -1016,7 +1073,7 @@ static DecodeStatus DecodeSOImmOperand(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeSORegImmOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSORegImmOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1053,7 +1110,7 @@ static DecodeStatus DecodeSORegImmOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeSORegRegOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSORegRegOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1088,7 +1145,7 @@ static DecodeStatus DecodeSORegRegOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1123,7 +1180,7 @@ static DecodeStatus DecodeRegListOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeSPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1140,7 +1197,7 @@ static DecodeStatus DecodeSPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeDPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeDPRRegListOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1157,7 +1214,7 @@ static DecodeStatus DecodeDPRRegListOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeBitfieldMaskOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeBitfieldMaskOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
// This operand encodes a mask of contiguous zeros between a specified MSB
// and LSB. To decode it, we create the mask of all bits MSB-and-lower,
@@ -1178,7 +1235,7 @@ static DecodeStatus DecodeBitfieldMaskOperand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeCopMemInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeCopMemInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1234,16 +1291,6 @@ static DecodeStatus DecodeCopMemInstruction(llvm::MCInst &Inst, unsigned Insn,
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
- unsigned P = fieldFromInstruction32(Insn, 24, 1);
- unsigned W = fieldFromInstruction32(Insn, 21, 1);
-
- bool writeback = (P == 0) || (W == 1);
- unsigned idx_mode = 0;
- if (P && writeback)
- idx_mode = ARMII::IndexModePre;
- else if (!P && writeback)
- idx_mode = ARMII::IndexModePost;
-
switch (Inst.getOpcode()) {
case ARM::t2LDC2_OFFSET:
case ARM::t2LDC2L_OFFSET:
@@ -1333,7 +1380,7 @@ static DecodeStatus DecodeCopMemInstruction(llvm::MCInst &Inst, unsigned Insn,
}
static DecodeStatus
-DecodeAddrMode2IdxInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1436,7 +1483,7 @@ DecodeAddrMode2IdxInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeSORegMemOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeSORegMemOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1477,7 +1524,7 @@ static DecodeStatus DecodeSORegMemOperand(llvm::MCInst &Inst, unsigned Val,
}
static DecodeStatus
-DecodeAddrMode3Instruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeAddrMode3Instruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1490,6 +1537,7 @@ DecodeAddrMode3Instruction(llvm::MCInst &Inst, unsigned Insn,
unsigned pred = fieldFromInstruction32(Insn, 28, 4);
unsigned W = fieldFromInstruction32(Insn, 21, 1);
unsigned P = fieldFromInstruction32(Insn, 24, 1);
+ unsigned Rt2 = Rt + 1;
bool writeback = (W == 1) | (P == 0);
@@ -1501,7 +1549,86 @@ DecodeAddrMode3Instruction(llvm::MCInst &Inst, unsigned Insn,
case ARM::LDRD:
case ARM::LDRD_PRE:
case ARM::LDRD_POST:
- if (Rt & 0x1) return MCDisassembler::Fail;
+ if (Rt & 0x1) S = MCDisassembler::SoftFail;
+ break;
+ default:
+ break;
+ }
+ switch (Inst.getOpcode()) {
+ case ARM::STRD:
+ case ARM::STRD_PRE:
+ case ARM::STRD_POST:
+ if (P == 0 && W == 1)
+ S = MCDisassembler::SoftFail;
+
+ if (writeback && (Rn == 15 || Rn == Rt || Rn == Rt2))
+ S = MCDisassembler::SoftFail;
+ if (type && Rm == 15)
+ S = MCDisassembler::SoftFail;
+ if (Rt2 == 15)
+ S = MCDisassembler::SoftFail;
+ if (!type && fieldFromInstruction32(Insn, 8, 4))
+ S = MCDisassembler::SoftFail;
+ break;
+ case ARM::STRH:
+ case ARM::STRH_PRE:
+ case ARM::STRH_POST:
+ if (Rt == 15)
+ S = MCDisassembler::SoftFail;
+ if (writeback && (Rn == 15 || Rn == Rt))
+ S = MCDisassembler::SoftFail;
+ if (!type && Rm == 15)
+ S = MCDisassembler::SoftFail;
+ break;
+ case ARM::LDRD:
+ case ARM::LDRD_PRE:
+ case ARM::LDRD_POST:
+ if (type && Rn == 15){
+ if (Rt2 == 15)
+ S = MCDisassembler::SoftFail;
+ break;
+ }
+ if (P == 0 && W == 1)
+ S = MCDisassembler::SoftFail;
+ if (!type && (Rt2 == 15 || Rm == 15 || Rm == Rt || Rm == Rt2))
+ S = MCDisassembler::SoftFail;
+ if (!type && writeback && Rn == 15)
+ S = MCDisassembler::SoftFail;
+ if (writeback && (Rn == Rt || Rn == Rt2))
+ S = MCDisassembler::SoftFail;
+ break;
+ case ARM::LDRH:
+ case ARM::LDRH_PRE:
+ case ARM::LDRH_POST:
+ if (type && Rn == 15){
+ if (Rt == 15)
+ S = MCDisassembler::SoftFail;
+ break;
+ }
+ if (Rt == 15)
+ S = MCDisassembler::SoftFail;
+ if (!type && Rm == 15)
+ S = MCDisassembler::SoftFail;
+ if (!type && writeback && (Rn == 15 || Rn == Rt))
+ S = MCDisassembler::SoftFail;
+ break;
+ case ARM::LDRSH:
+ case ARM::LDRSH_PRE:
+ case ARM::LDRSH_POST:
+ case ARM::LDRSB:
+ case ARM::LDRSB_PRE:
+ case ARM::LDRSB_POST:
+ if (type && Rn == 15){
+ if (Rt == 15)
+ S = MCDisassembler::SoftFail;
+ break;
+ }
+ if (type && (Rt == 15 || (writeback && Rn == Rt)))
+ S = MCDisassembler::SoftFail;
+ if (!type && (Rt == 15 || Rm == 15))
+ S = MCDisassembler::SoftFail;
+ if (!type && writeback && (Rn == 15 || Rn == Rt))
+ S = MCDisassembler::SoftFail;
break;
default:
break;
@@ -1588,7 +1715,7 @@ DecodeAddrMode3Instruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeRFEInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeRFEInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1617,7 +1744,7 @@ static DecodeStatus DecodeRFEInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeMemMultipleWritebackInstruction(llvm::MCInst &Inst,
+static DecodeStatus DecodeMemMultipleWritebackInstruction(MCInst &Inst,
unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1702,7 +1829,7 @@ static DecodeStatus DecodeMemMultipleWritebackInstruction(llvm::MCInst &Inst,
return S;
}
-static DecodeStatus DecodeCPSInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeCPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
unsigned imod = fieldFromInstruction32(Insn, 18, 2);
unsigned M = fieldFromInstruction32(Insn, 17, 1);
@@ -1742,7 +1869,7 @@ static DecodeStatus DecodeCPSInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeT2CPSInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2CPSInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
unsigned imod = fieldFromInstruction32(Insn, 9, 2);
unsigned M = fieldFromInstruction32(Insn, 8, 1);
@@ -1782,7 +1909,7 @@ static DecodeStatus DecodeT2CPSInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeT2MOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2MOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1806,7 +1933,7 @@ static DecodeStatus DecodeT2MOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeArmMOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeArmMOVTWInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1832,7 +1959,7 @@ static DecodeStatus DecodeArmMOVTWInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeSMLAInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSMLAInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1860,7 +1987,7 @@ static DecodeStatus DecodeSMLAInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeAddrModeImm12Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrModeImm12Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1880,7 +2007,7 @@ static DecodeStatus DecodeAddrModeImm12Operand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeAddrMode5Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode5Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1899,13 +2026,28 @@ static DecodeStatus DecodeAddrMode5Operand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeAddrMode7Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
return DecodeGPRRegisterClass(Inst, Val, Address, Decoder);
}
static DecodeStatus
-DecodeBranchImmInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeT2BInstruction(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+ unsigned imm = (fieldFromInstruction32(Insn, 0, 11) << 0) |
+ (fieldFromInstruction32(Insn, 11, 1) << 18) |
+ (fieldFromInstruction32(Insn, 13, 1) << 17) |
+ (fieldFromInstruction32(Insn, 16, 6) << 11) |
+ (fieldFromInstruction32(Insn, 26, 1) << 19);
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<20>(imm<<1) + 4,
+ true, 4, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<20>(imm << 1)));
+ return S;
+}
+
+static DecodeStatus
+DecodeBranchImmInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1915,12 +2057,14 @@ DecodeBranchImmInstruction(llvm::MCInst &Inst, unsigned Insn,
if (pred == 0xF) {
Inst.setOpcode(ARM::BLXi);
imm |= fieldFromInstruction32(Insn, 24, 1) << 1;
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<26>(imm) + 8,
+ true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<26>(imm)));
return S;
}
- if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<26>(imm) + 8, true,
- 4, Inst, Decoder))
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<26>(imm) + 8,
+ true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<26>(imm)));
if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
return MCDisassembler::Fail;
@@ -1929,13 +2073,7 @@ DecodeBranchImmInstruction(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeVCVTImmOperand(llvm::MCInst &Inst, unsigned Val,
- uint64_t Address, const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(64 - Val));
- return MCDisassembler::Success;
-}
-
-static DecodeStatus DecodeAddrMode6Operand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeAddrMode6Operand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1952,7 +2090,7 @@ static DecodeStatus DecodeAddrMode6Operand(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLDInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -1964,47 +2102,38 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
// First output register
- if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
- return MCDisassembler::Fail;
+ switch (Inst.getOpcode()) {
+ case ARM::VLD1q16: case ARM::VLD1q32: case ARM::VLD1q64: case ARM::VLD1q8:
+ case ARM::VLD1q16wb_fixed: case ARM::VLD1q16wb_register:
+ case ARM::VLD1q32wb_fixed: case ARM::VLD1q32wb_register:
+ case ARM::VLD1q64wb_fixed: case ARM::VLD1q64wb_register:
+ case ARM::VLD1q8wb_fixed: case ARM::VLD1q8wb_register:
+ case ARM::VLD2d16: case ARM::VLD2d32: case ARM::VLD2d8:
+ case ARM::VLD2d16wb_fixed: case ARM::VLD2d16wb_register:
+ case ARM::VLD2d32wb_fixed: case ARM::VLD2d32wb_register:
+ case ARM::VLD2d8wb_fixed: case ARM::VLD2d8wb_register:
+ if (!Check(S, DecodeDPairRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ case ARM::VLD2b16:
+ case ARM::VLD2b32:
+ case ARM::VLD2b8:
+ case ARM::VLD2b16wb_fixed:
+ case ARM::VLD2b16wb_register:
+ case ARM::VLD2b32wb_fixed:
+ case ARM::VLD2b32wb_register:
+ case ARM::VLD2b8wb_fixed:
+ case ARM::VLD2b8wb_register:
+ if (!Check(S, DecodeDPairSpacedRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ default:
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
// Second output register
switch (Inst.getOpcode()) {
- case ARM::VLD1q8:
- case ARM::VLD1q16:
- case ARM::VLD1q32:
- case ARM::VLD1q64:
- case ARM::VLD1q8_UPD:
- case ARM::VLD1q16_UPD:
- case ARM::VLD1q32_UPD:
- case ARM::VLD1q64_UPD:
- case ARM::VLD1d8T:
- case ARM::VLD1d16T:
- case ARM::VLD1d32T:
- case ARM::VLD1d64T:
- case ARM::VLD1d8T_UPD:
- case ARM::VLD1d16T_UPD:
- case ARM::VLD1d32T_UPD:
- case ARM::VLD1d64T_UPD:
- case ARM::VLD1d8Q:
- case ARM::VLD1d16Q:
- case ARM::VLD1d32Q:
- case ARM::VLD1d64Q:
- case ARM::VLD1d8Q_UPD:
- case ARM::VLD1d16Q_UPD:
- case ARM::VLD1d32Q_UPD:
- case ARM::VLD1d64Q_UPD:
- case ARM::VLD2d8:
- case ARM::VLD2d16:
- case ARM::VLD2d32:
- case ARM::VLD2d8_UPD:
- case ARM::VLD2d16_UPD:
- case ARM::VLD2d32_UPD:
- case ARM::VLD2q8:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8_UPD:
- case ARM::VLD2q16_UPD:
- case ARM::VLD2q32_UPD:
case ARM::VLD3d8:
case ARM::VLD3d16:
case ARM::VLD3d32:
@@ -2020,12 +2149,6 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
if (!Check(S, DecodeDPRRegisterClass(Inst, (Rd+1)%32, Address, Decoder)))
return MCDisassembler::Fail;
break;
- case ARM::VLD2b8:
- case ARM::VLD2b16:
- case ARM::VLD2b32:
- case ARM::VLD2b8_UPD:
- case ARM::VLD2b16_UPD:
- case ARM::VLD2b32_UPD:
case ARM::VLD3q8:
case ARM::VLD3q16:
case ARM::VLD3q32:
@@ -2046,28 +2169,6 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
// Third output register
switch(Inst.getOpcode()) {
- case ARM::VLD1d8T:
- case ARM::VLD1d16T:
- case ARM::VLD1d32T:
- case ARM::VLD1d64T:
- case ARM::VLD1d8T_UPD:
- case ARM::VLD1d16T_UPD:
- case ARM::VLD1d32T_UPD:
- case ARM::VLD1d64T_UPD:
- case ARM::VLD1d8Q:
- case ARM::VLD1d16Q:
- case ARM::VLD1d32Q:
- case ARM::VLD1d64Q:
- case ARM::VLD1d8Q_UPD:
- case ARM::VLD1d16Q_UPD:
- case ARM::VLD1d32Q_UPD:
- case ARM::VLD1d64Q_UPD:
- case ARM::VLD2q8:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8_UPD:
- case ARM::VLD2q16_UPD:
- case ARM::VLD2q32_UPD:
case ARM::VLD3d8:
case ARM::VLD3d16:
case ARM::VLD3d32:
@@ -2104,20 +2205,6 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
// Fourth output register
switch (Inst.getOpcode()) {
- case ARM::VLD1d8Q:
- case ARM::VLD1d16Q:
- case ARM::VLD1d32Q:
- case ARM::VLD1d64Q:
- case ARM::VLD1d8Q_UPD:
- case ARM::VLD1d16Q_UPD:
- case ARM::VLD1d32Q_UPD:
- case ARM::VLD1d64Q_UPD:
- case ARM::VLD2q8:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8_UPD:
- case ARM::VLD2q16_UPD:
- case ARM::VLD2q32_UPD:
case ARM::VLD4d8:
case ARM::VLD4d16:
case ARM::VLD4d32:
@@ -2142,31 +2229,58 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
// Writeback operand
switch (Inst.getOpcode()) {
- case ARM::VLD1d8_UPD:
- case ARM::VLD1d16_UPD:
- case ARM::VLD1d32_UPD:
- case ARM::VLD1d64_UPD:
- case ARM::VLD1q8_UPD:
- case ARM::VLD1q16_UPD:
- case ARM::VLD1q32_UPD:
- case ARM::VLD1q64_UPD:
- case ARM::VLD1d8T_UPD:
- case ARM::VLD1d16T_UPD:
- case ARM::VLD1d32T_UPD:
- case ARM::VLD1d64T_UPD:
- case ARM::VLD1d8Q_UPD:
- case ARM::VLD1d16Q_UPD:
- case ARM::VLD1d32Q_UPD:
- case ARM::VLD1d64Q_UPD:
- case ARM::VLD2d8_UPD:
- case ARM::VLD2d16_UPD:
- case ARM::VLD2d32_UPD:
- case ARM::VLD2q8_UPD:
- case ARM::VLD2q16_UPD:
- case ARM::VLD2q32_UPD:
- case ARM::VLD2b8_UPD:
- case ARM::VLD2b16_UPD:
- case ARM::VLD2b32_UPD:
+ case ARM::VLD1d8wb_fixed:
+ case ARM::VLD1d16wb_fixed:
+ case ARM::VLD1d32wb_fixed:
+ case ARM::VLD1d64wb_fixed:
+ case ARM::VLD1d8wb_register:
+ case ARM::VLD1d16wb_register:
+ case ARM::VLD1d32wb_register:
+ case ARM::VLD1d64wb_register:
+ case ARM::VLD1q8wb_fixed:
+ case ARM::VLD1q16wb_fixed:
+ case ARM::VLD1q32wb_fixed:
+ case ARM::VLD1q64wb_fixed:
+ case ARM::VLD1q8wb_register:
+ case ARM::VLD1q16wb_register:
+ case ARM::VLD1q32wb_register:
+ case ARM::VLD1q64wb_register:
+ case ARM::VLD1d8Twb_fixed:
+ case ARM::VLD1d8Twb_register:
+ case ARM::VLD1d16Twb_fixed:
+ case ARM::VLD1d16Twb_register:
+ case ARM::VLD1d32Twb_fixed:
+ case ARM::VLD1d32Twb_register:
+ case ARM::VLD1d64Twb_fixed:
+ case ARM::VLD1d64Twb_register:
+ case ARM::VLD1d8Qwb_fixed:
+ case ARM::VLD1d8Qwb_register:
+ case ARM::VLD1d16Qwb_fixed:
+ case ARM::VLD1d16Qwb_register:
+ case ARM::VLD1d32Qwb_fixed:
+ case ARM::VLD1d32Qwb_register:
+ case ARM::VLD1d64Qwb_fixed:
+ case ARM::VLD1d64Qwb_register:
+ case ARM::VLD2d8wb_fixed:
+ case ARM::VLD2d16wb_fixed:
+ case ARM::VLD2d32wb_fixed:
+ case ARM::VLD2q8wb_fixed:
+ case ARM::VLD2q16wb_fixed:
+ case ARM::VLD2q32wb_fixed:
+ case ARM::VLD2d8wb_register:
+ case ARM::VLD2d16wb_register:
+ case ARM::VLD2d32wb_register:
+ case ARM::VLD2q8wb_register:
+ case ARM::VLD2q16wb_register:
+ case ARM::VLD2q32wb_register:
+ case ARM::VLD2b8wb_fixed:
+ case ARM::VLD2b16wb_fixed:
+ case ARM::VLD2b32wb_fixed:
+ case ARM::VLD2b8wb_register:
+ case ARM::VLD2b16wb_register:
+ case ARM::VLD2b32wb_register:
+ Inst.addOperand(MCOperand::CreateImm(0));
+ break;
case ARM::VLD3d8_UPD:
case ARM::VLD3d16_UPD:
case ARM::VLD3d32_UPD:
@@ -2191,17 +2305,66 @@ static DecodeStatus DecodeVLDInstruction(llvm::MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
// AddrMode6 Offset (register)
- if (Rm == 0xD)
- Inst.addOperand(MCOperand::CreateReg(0));
- else if (Rm != 0xF) {
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
+ switch (Inst.getOpcode()) {
+ default:
+ // The below have been updated to have explicit am6offset split
+ // between fixed and register offset. For those instructions not
+ // yet updated, we need to add an additional reg0 operand for the
+ // fixed variant.
+ //
+ // The fixed offset encodes as Rm == 0xd, so we check for that.
+ if (Rm == 0xd) {
+ Inst.addOperand(MCOperand::CreateReg(0));
+ break;
+ }
+ // Fall through to handle the register offset variant.
+ case ARM::VLD1d8wb_fixed:
+ case ARM::VLD1d16wb_fixed:
+ case ARM::VLD1d32wb_fixed:
+ case ARM::VLD1d64wb_fixed:
+ case ARM::VLD1d8Twb_fixed:
+ case ARM::VLD1d16Twb_fixed:
+ case ARM::VLD1d32Twb_fixed:
+ case ARM::VLD1d64Twb_fixed:
+ case ARM::VLD1d8Qwb_fixed:
+ case ARM::VLD1d16Qwb_fixed:
+ case ARM::VLD1d32Qwb_fixed:
+ case ARM::VLD1d64Qwb_fixed:
+ case ARM::VLD1d8wb_register:
+ case ARM::VLD1d16wb_register:
+ case ARM::VLD1d32wb_register:
+ case ARM::VLD1d64wb_register:
+ case ARM::VLD1q8wb_fixed:
+ case ARM::VLD1q16wb_fixed:
+ case ARM::VLD1q32wb_fixed:
+ case ARM::VLD1q64wb_fixed:
+ case ARM::VLD1q8wb_register:
+ case ARM::VLD1q16wb_register:
+ case ARM::VLD1q32wb_register:
+ case ARM::VLD1q64wb_register:
+ // The fixed offset post-increment encodes Rm == 0xd. The no-writeback
+ // variant encodes Rm == 0xf. Anything else is a register offset post-
+ // increment and we need to add the register operand to the instruction.
+ if (Rm != 0xD && Rm != 0xF &&
+ !Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
+ break;
+ case ARM::VLD2d8wb_fixed:
+ case ARM::VLD2d16wb_fixed:
+ case ARM::VLD2d32wb_fixed:
+ case ARM::VLD2b8wb_fixed:
+ case ARM::VLD2b16wb_fixed:
+ case ARM::VLD2b32wb_fixed:
+ case ARM::VLD2q8wb_fixed:
+ case ARM::VLD2q16wb_fixed:
+ case ARM::VLD2q32wb_fixed:
+ break;
}
return S;
}
-static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVSTInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2214,31 +2377,60 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
// Writeback Operand
switch (Inst.getOpcode()) {
- case ARM::VST1d8_UPD:
- case ARM::VST1d16_UPD:
- case ARM::VST1d32_UPD:
- case ARM::VST1d64_UPD:
- case ARM::VST1q8_UPD:
- case ARM::VST1q16_UPD:
- case ARM::VST1q32_UPD:
- case ARM::VST1q64_UPD:
- case ARM::VST1d8T_UPD:
- case ARM::VST1d16T_UPD:
- case ARM::VST1d32T_UPD:
- case ARM::VST1d64T_UPD:
- case ARM::VST1d8Q_UPD:
- case ARM::VST1d16Q_UPD:
- case ARM::VST1d32Q_UPD:
- case ARM::VST1d64Q_UPD:
- case ARM::VST2d8_UPD:
- case ARM::VST2d16_UPD:
- case ARM::VST2d32_UPD:
- case ARM::VST2q8_UPD:
- case ARM::VST2q16_UPD:
- case ARM::VST2q32_UPD:
- case ARM::VST2b8_UPD:
- case ARM::VST2b16_UPD:
- case ARM::VST2b32_UPD:
+ case ARM::VST1d8wb_fixed:
+ case ARM::VST1d16wb_fixed:
+ case ARM::VST1d32wb_fixed:
+ case ARM::VST1d64wb_fixed:
+ case ARM::VST1d8wb_register:
+ case ARM::VST1d16wb_register:
+ case ARM::VST1d32wb_register:
+ case ARM::VST1d64wb_register:
+ case ARM::VST1q8wb_fixed:
+ case ARM::VST1q16wb_fixed:
+ case ARM::VST1q32wb_fixed:
+ case ARM::VST1q64wb_fixed:
+ case ARM::VST1q8wb_register:
+ case ARM::VST1q16wb_register:
+ case ARM::VST1q32wb_register:
+ case ARM::VST1q64wb_register:
+ case ARM::VST1d8Twb_fixed:
+ case ARM::VST1d16Twb_fixed:
+ case ARM::VST1d32Twb_fixed:
+ case ARM::VST1d64Twb_fixed:
+ case ARM::VST1d8Twb_register:
+ case ARM::VST1d16Twb_register:
+ case ARM::VST1d32Twb_register:
+ case ARM::VST1d64Twb_register:
+ case ARM::VST1d8Qwb_fixed:
+ case ARM::VST1d16Qwb_fixed:
+ case ARM::VST1d32Qwb_fixed:
+ case ARM::VST1d64Qwb_fixed:
+ case ARM::VST1d8Qwb_register:
+ case ARM::VST1d16Qwb_register:
+ case ARM::VST1d32Qwb_register:
+ case ARM::VST1d64Qwb_register:
+ case ARM::VST2d8wb_fixed:
+ case ARM::VST2d16wb_fixed:
+ case ARM::VST2d32wb_fixed:
+ case ARM::VST2d8wb_register:
+ case ARM::VST2d16wb_register:
+ case ARM::VST2d32wb_register:
+ case ARM::VST2q8wb_fixed:
+ case ARM::VST2q16wb_fixed:
+ case ARM::VST2q32wb_fixed:
+ case ARM::VST2q8wb_register:
+ case ARM::VST2q16wb_register:
+ case ARM::VST2q32wb_register:
+ case ARM::VST2b8wb_fixed:
+ case ARM::VST2b16wb_fixed:
+ case ARM::VST2b32wb_fixed:
+ case ARM::VST2b8wb_register:
+ case ARM::VST2b16wb_register:
+ case ARM::VST2b32wb_register:
+ if (Rm == 0xF)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateImm(0));
+ break;
case ARM::VST3d8_UPD:
case ARM::VST3d16_UPD:
case ARM::VST3d32_UPD:
@@ -2263,55 +2455,89 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
// AddrMode6 Offset (register)
- if (Rm == 0xD)
- Inst.addOperand(MCOperand::CreateReg(0));
- else if (Rm != 0xF) {
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
- return MCDisassembler::Fail;
+ switch (Inst.getOpcode()) {
+ default:
+ if (Rm == 0xD)
+ Inst.addOperand(MCOperand::CreateReg(0));
+ else if (Rm != 0xF) {
+ if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
+ break;
+ case ARM::VST1d8wb_fixed:
+ case ARM::VST1d16wb_fixed:
+ case ARM::VST1d32wb_fixed:
+ case ARM::VST1d64wb_fixed:
+ case ARM::VST1q8wb_fixed:
+ case ARM::VST1q16wb_fixed:
+ case ARM::VST1q32wb_fixed:
+ case ARM::VST1q64wb_fixed:
+ case ARM::VST1d8Twb_fixed:
+ case ARM::VST1d16Twb_fixed:
+ case ARM::VST1d32Twb_fixed:
+ case ARM::VST1d64Twb_fixed:
+ case ARM::VST1d8Qwb_fixed:
+ case ARM::VST1d16Qwb_fixed:
+ case ARM::VST1d32Qwb_fixed:
+ case ARM::VST1d64Qwb_fixed:
+ case ARM::VST2d8wb_fixed:
+ case ARM::VST2d16wb_fixed:
+ case ARM::VST2d32wb_fixed:
+ case ARM::VST2q8wb_fixed:
+ case ARM::VST2q16wb_fixed:
+ case ARM::VST2q32wb_fixed:
+ case ARM::VST2b8wb_fixed:
+ case ARM::VST2b16wb_fixed:
+ case ARM::VST2b32wb_fixed:
+ break;
}
+
// First input register
- if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
- return MCDisassembler::Fail;
+ switch (Inst.getOpcode()) {
+ case ARM::VST1q16:
+ case ARM::VST1q32:
+ case ARM::VST1q64:
+ case ARM::VST1q8:
+ case ARM::VST1q16wb_fixed:
+ case ARM::VST1q16wb_register:
+ case ARM::VST1q32wb_fixed:
+ case ARM::VST1q32wb_register:
+ case ARM::VST1q64wb_fixed:
+ case ARM::VST1q64wb_register:
+ case ARM::VST1q8wb_fixed:
+ case ARM::VST1q8wb_register:
+ case ARM::VST2d16:
+ case ARM::VST2d32:
+ case ARM::VST2d8:
+ case ARM::VST2d16wb_fixed:
+ case ARM::VST2d16wb_register:
+ case ARM::VST2d32wb_fixed:
+ case ARM::VST2d32wb_register:
+ case ARM::VST2d8wb_fixed:
+ case ARM::VST2d8wb_register:
+ if (!Check(S, DecodeDPairRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ case ARM::VST2b16:
+ case ARM::VST2b32:
+ case ARM::VST2b8:
+ case ARM::VST2b16wb_fixed:
+ case ARM::VST2b16wb_register:
+ case ARM::VST2b32wb_fixed:
+ case ARM::VST2b32wb_register:
+ case ARM::VST2b8wb_fixed:
+ case ARM::VST2b8wb_register:
+ if (!Check(S, DecodeDPairSpacedRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ default:
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ }
// Second input register
switch (Inst.getOpcode()) {
- case ARM::VST1q8:
- case ARM::VST1q16:
- case ARM::VST1q32:
- case ARM::VST1q64:
- case ARM::VST1q8_UPD:
- case ARM::VST1q16_UPD:
- case ARM::VST1q32_UPD:
- case ARM::VST1q64_UPD:
- case ARM::VST1d8T:
- case ARM::VST1d16T:
- case ARM::VST1d32T:
- case ARM::VST1d64T:
- case ARM::VST1d8T_UPD:
- case ARM::VST1d16T_UPD:
- case ARM::VST1d32T_UPD:
- case ARM::VST1d64T_UPD:
- case ARM::VST1d8Q:
- case ARM::VST1d16Q:
- case ARM::VST1d32Q:
- case ARM::VST1d64Q:
- case ARM::VST1d8Q_UPD:
- case ARM::VST1d16Q_UPD:
- case ARM::VST1d32Q_UPD:
- case ARM::VST1d64Q_UPD:
- case ARM::VST2d8:
- case ARM::VST2d16:
- case ARM::VST2d32:
- case ARM::VST2d8_UPD:
- case ARM::VST2d16_UPD:
- case ARM::VST2d32_UPD:
- case ARM::VST2q8:
- case ARM::VST2q16:
- case ARM::VST2q32:
- case ARM::VST2q8_UPD:
- case ARM::VST2q16_UPD:
- case ARM::VST2q32_UPD:
case ARM::VST3d8:
case ARM::VST3d16:
case ARM::VST3d32:
@@ -2327,12 +2553,6 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
if (!Check(S, DecodeDPRRegisterClass(Inst, (Rd+1)%32, Address, Decoder)))
return MCDisassembler::Fail;
break;
- case ARM::VST2b8:
- case ARM::VST2b16:
- case ARM::VST2b32:
- case ARM::VST2b8_UPD:
- case ARM::VST2b16_UPD:
- case ARM::VST2b32_UPD:
case ARM::VST3q8:
case ARM::VST3q16:
case ARM::VST3q32:
@@ -2354,28 +2574,6 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
// Third input register
switch (Inst.getOpcode()) {
- case ARM::VST1d8T:
- case ARM::VST1d16T:
- case ARM::VST1d32T:
- case ARM::VST1d64T:
- case ARM::VST1d8T_UPD:
- case ARM::VST1d16T_UPD:
- case ARM::VST1d32T_UPD:
- case ARM::VST1d64T_UPD:
- case ARM::VST1d8Q:
- case ARM::VST1d16Q:
- case ARM::VST1d32Q:
- case ARM::VST1d64Q:
- case ARM::VST1d8Q_UPD:
- case ARM::VST1d16Q_UPD:
- case ARM::VST1d32Q_UPD:
- case ARM::VST1d64Q_UPD:
- case ARM::VST2q8:
- case ARM::VST2q16:
- case ARM::VST2q32:
- case ARM::VST2q8_UPD:
- case ARM::VST2q16_UPD:
- case ARM::VST2q32_UPD:
case ARM::VST3d8:
case ARM::VST3d16:
case ARM::VST3d32:
@@ -2412,20 +2610,6 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
// Fourth input register
switch (Inst.getOpcode()) {
- case ARM::VST1d8Q:
- case ARM::VST1d16Q:
- case ARM::VST1d32Q:
- case ARM::VST1d64Q:
- case ARM::VST1d8Q_UPD:
- case ARM::VST1d16Q_UPD:
- case ARM::VST1d32Q_UPD:
- case ARM::VST1d64Q_UPD:
- case ARM::VST2q8:
- case ARM::VST2q16:
- case ARM::VST2q32:
- case ARM::VST2q8_UPD:
- case ARM::VST2q16_UPD:
- case ARM::VST2q32_UPD:
case ARM::VST4d8:
case ARM::VST4d16:
case ARM::VST4d32:
@@ -2451,7 +2635,7 @@ static DecodeStatus DecodeVSTInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVLD1DupInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2461,15 +2645,21 @@ static DecodeStatus DecodeVLD1DupInstruction(llvm::MCInst &Inst, unsigned Insn,
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
unsigned align = fieldFromInstruction32(Insn, 4, 1);
unsigned size = fieldFromInstruction32(Insn, 6, 2);
- unsigned regs = fieldFromInstruction32(Insn, 5, 1) + 1;
align *= (1 << size);
- if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
- return MCDisassembler::Fail;
- if (regs == 2) {
- if (!Check(S, DecodeDPRRegisterClass(Inst, (Rd+1)%32, Address, Decoder)))
+ switch (Inst.getOpcode()) {
+ case ARM::VLD1DUPq16: case ARM::VLD1DUPq32: case ARM::VLD1DUPq8:
+ case ARM::VLD1DUPq16wb_fixed: case ARM::VLD1DUPq16wb_register:
+ case ARM::VLD1DUPq32wb_fixed: case ARM::VLD1DUPq32wb_register:
+ case ARM::VLD1DUPq8wb_fixed: case ARM::VLD1DUPq8wb_register:
+ if (!Check(S, DecodeDPairRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ default:
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
+ break;
}
if (Rm != 0xF) {
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
@@ -2480,17 +2670,17 @@ static DecodeStatus DecodeVLD1DupInstruction(llvm::MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(align));
- if (Rm == 0xD)
- Inst.addOperand(MCOperand::CreateReg(0));
- else if (Rm != 0xF) {
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
- return MCDisassembler::Fail;
- }
+ // The fixed offset post-increment encodes Rm == 0xd. The no-writeback
+ // variant encodes Rm == 0xf. Anything else is a register offset post-
+ // increment and we need to add the register operand to the instruction.
+ if (Rm != 0xD && Rm != 0xF &&
+ !Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
+ return MCDisassembler::Fail;
return S;
}
-static DecodeStatus DecodeVLD2DupInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2500,18 +2690,33 @@ static DecodeStatus DecodeVLD2DupInstruction(llvm::MCInst &Inst, unsigned Insn,
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
unsigned align = fieldFromInstruction32(Insn, 4, 1);
unsigned size = 1 << fieldFromInstruction32(Insn, 6, 2);
- unsigned inc = fieldFromInstruction32(Insn, 5, 1) + 1;
+ unsigned pred = fieldFromInstruction32(Insn, 22, 4);
align *= 2*size;
- if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
- return MCDisassembler::Fail;
- if (!Check(S, DecodeDPRRegisterClass(Inst, (Rd+inc)%32, Address, Decoder)))
- return MCDisassembler::Fail;
- if (Rm != 0xF) {
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
+ switch (Inst.getOpcode()) {
+ case ARM::VLD2DUPd16: case ARM::VLD2DUPd32: case ARM::VLD2DUPd8:
+ case ARM::VLD2DUPd16wb_fixed: case ARM::VLD2DUPd16wb_register:
+ case ARM::VLD2DUPd32wb_fixed: case ARM::VLD2DUPd32wb_register:
+ case ARM::VLD2DUPd8wb_fixed: case ARM::VLD2DUPd8wb_register:
+ if (!Check(S, DecodeDPairRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ case ARM::VLD2DUPd16x2: case ARM::VLD2DUPd32x2: case ARM::VLD2DUPd8x2:
+ case ARM::VLD2DUPd16x2wb_fixed: case ARM::VLD2DUPd16x2wb_register:
+ case ARM::VLD2DUPd32x2wb_fixed: case ARM::VLD2DUPd32x2wb_register:
+ case ARM::VLD2DUPd8x2wb_fixed: case ARM::VLD2DUPd8x2wb_register:
+ if (!Check(S, DecodeDPairSpacedRegisterClass(Inst, Rd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ default:
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
+ break;
}
+ if (Rm != 0xF)
+ Inst.addOperand(MCOperand::CreateImm(0));
+
if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder)))
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(align));
@@ -2523,10 +2728,13 @@ static DecodeStatus DecodeVLD2DupInstruction(llvm::MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
}
+ if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
+ return MCDisassembler::Fail;
+
return S;
}
-static DecodeStatus DecodeVLD3DupInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD3DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2561,7 +2769,7 @@ static DecodeStatus DecodeVLD3DupInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVLD4DupInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2614,7 +2822,7 @@ static DecodeStatus DecodeVLD4DupInstruction(llvm::MCInst &Inst, unsigned Insn,
}
static DecodeStatus
-DecodeNEONModImmInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeNEONModImmInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2659,7 +2867,7 @@ DecodeNEONModImmInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVSHLMaxInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVSHLMaxInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2678,31 +2886,31 @@ static DecodeStatus DecodeVSHLMaxInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeShiftRight8Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight8Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateImm(8 - Val));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeShiftRight16Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight16Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateImm(16 - Val));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeShiftRight32Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight32Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateImm(32 - Val));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeShiftRight64Imm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeShiftRight64Imm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateImm(64 - Val));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeTBLInstruction(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeTBLInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2713,7 +2921,6 @@ static DecodeStatus DecodeTBLInstruction(llvm::MCInst &Inst, unsigned Insn,
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
Rm |= fieldFromInstruction32(Insn, 5, 1) << 4;
unsigned op = fieldFromInstruction32(Insn, 6, 1);
- unsigned length = fieldFromInstruction32(Insn, 8, 2) + 1;
if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder)))
return MCDisassembler::Fail;
@@ -2722,9 +2929,15 @@ static DecodeStatus DecodeTBLInstruction(llvm::MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail; // Writeback
}
- for (unsigned i = 0; i < length; ++i) {
- if (!Check(S, DecodeDPRRegisterClass(Inst, (Rn+i)%32, Address, Decoder)))
- return MCDisassembler::Fail;
+ switch (Inst.getOpcode()) {
+ case ARM::VTBL2:
+ case ARM::VTBX2:
+ if (!Check(S, DecodeDPairRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+ break;
+ default:
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
}
if (!Check(S, DecodeDPRRegisterClass(Inst, Rm, Address, Decoder)))
@@ -2733,7 +2946,7 @@ static DecodeStatus DecodeTBLInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeThumbAddSpecialReg(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbAddSpecialReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2757,25 +2970,31 @@ static DecodeStatus DecodeThumbAddSpecialReg(llvm::MCInst &Inst, uint16_t Insn,
return S;
}
-static DecodeStatus DecodeThumbBROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbBROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<12>(Val << 1)));
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<12>(Val<<1) + 4,
+ true, 2, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<12>(Val << 1)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeT2BROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2BROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<21>(Val)));
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<22>(Val<<1) + 4,
+ true, 4, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<21>(Val)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeThumbCmpBROperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbCmpBROperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<7>(Val << 1)));
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<7>(Val<<1) + 4,
+ true, 2, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<7>(Val << 1)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeThumbAddrModeRR(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeRR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2790,7 +3009,7 @@ static DecodeStatus DecodeThumbAddrModeRR(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeThumbAddrModeIS(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeIS(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2804,7 +3023,7 @@ static DecodeStatus DecodeThumbAddrModeIS(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeThumbAddrModePC(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModePC(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
unsigned imm = Val << 2;
@@ -2814,7 +3033,7 @@ static DecodeStatus DecodeThumbAddrModePC(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeThumbAddrModeSP(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbAddrModeSP(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
Inst.addOperand(MCOperand::CreateReg(ARM::SP));
Inst.addOperand(MCOperand::CreateImm(Val));
@@ -2822,7 +3041,7 @@ static DecodeStatus DecodeThumbAddrModeSP(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeT2AddrModeSOReg(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeSOReg(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2839,7 +3058,7 @@ static DecodeStatus DecodeT2AddrModeSOReg(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeT2LoadShift(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2894,7 +3113,7 @@ static DecodeStatus DecodeT2LoadShift(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeT2Imm8S4(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
int imm = Val & 0xFF;
if (!(Val & 0x100)) imm *= -1;
@@ -2903,7 +3122,7 @@ static DecodeStatus DecodeT2Imm8S4(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeT2AddrModeImm8s4(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm8s4(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2918,7 +3137,7 @@ static DecodeStatus DecodeT2AddrModeImm8s4(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeT2AddrModeImm0_1020s4(llvm::MCInst &Inst,unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm0_1020s4(MCInst &Inst,unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2933,7 +3152,7 @@ static DecodeStatus DecodeT2AddrModeImm0_1020s4(llvm::MCInst &Inst,unsigned Val,
return S;
}
-static DecodeStatus DecodeT2Imm8(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2Imm8(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
int imm = Val & 0xFF;
if (Val == 0)
@@ -2946,7 +3165,7 @@ static DecodeStatus DecodeT2Imm8(llvm::MCInst &Inst, unsigned Val,
}
-static DecodeStatus DecodeT2AddrModeImm8(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -2977,7 +3196,7 @@ static DecodeStatus DecodeT2AddrModeImm8(llvm::MCInst &Inst, unsigned Val,
return S;
}
-static DecodeStatus DecodeT2LdStPre(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeT2LdStPre(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3007,7 +3226,7 @@ static DecodeStatus DecodeT2LdStPre(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeT2AddrModeImm12(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2AddrModeImm12(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3022,7 +3241,7 @@ static DecodeStatus DecodeT2AddrModeImm12(llvm::MCInst &Inst, unsigned Val,
}
-static DecodeStatus DecodeThumbAddSPImm(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbAddSPImm(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
unsigned imm = fieldFromInstruction16(Insn, 0, 7);
@@ -3033,7 +3252,7 @@ static DecodeStatus DecodeThumbAddSPImm(llvm::MCInst &Inst, uint16_t Insn,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeThumbAddSPReg(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbAddSPReg(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3058,7 +3277,7 @@ static DecodeStatus DecodeThumbAddSPReg(llvm::MCInst &Inst, uint16_t Insn,
return S;
}
-static DecodeStatus DecodeThumbCPS(llvm::MCInst &Inst, uint16_t Insn,
+static DecodeStatus DecodeThumbCPS(MCInst &Inst, uint16_t Insn,
uint64_t Address, const void *Decoder) {
unsigned imod = fieldFromInstruction16(Insn, 4, 1) | 0x2;
unsigned flags = fieldFromInstruction16(Insn, 0, 3);
@@ -3069,29 +3288,29 @@ static DecodeStatus DecodeThumbCPS(llvm::MCInst &Inst, uint16_t Insn,
return MCDisassembler::Success;
}
-static DecodeStatus DecodePostIdxReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodePostIdxReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
unsigned add = fieldFromInstruction32(Insn, 4, 1);
- if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(add));
return S;
}
-static DecodeStatus DecodeThumbBLXOffset(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbBLXOffset(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
- if (!tryAddingSymbolicOperand(Address,
+ if (!tryAddingSymbolicOperand(Address,
(Address & ~2u) + SignExtend32<22>(Val << 1) + 4,
true, 4, Inst, Decoder))
Inst.addOperand(MCOperand::CreateImm(SignExtend32<22>(Val << 1)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCoprocessor(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeCoprocessor(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
if (Val == 0xA || Val == 0xB)
return MCDisassembler::Fail;
@@ -3101,7 +3320,7 @@ static DecodeStatus DecodeCoprocessor(llvm::MCInst &Inst, unsigned Val,
}
static DecodeStatus
-DecodeThumbTableBranch(llvm::MCInst &Inst, unsigned Insn,
+DecodeThumbTableBranch(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3117,7 +3336,7 @@ DecodeThumbTableBranch(llvm::MCInst &Inst, unsigned Insn,
}
static DecodeStatus
-DecodeThumb2BCCInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeThumb2BCCInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3159,7 +3378,7 @@ DecodeThumb2BCCInstruction(llvm::MCInst &Inst, unsigned Insn,
// Decode a shifted immediate operand. These basically consist
// of an 8-bit value, and a 4-bit directive that specifies either
// a splat operation or a rotation.
-static DecodeStatus DecodeT2SOImm(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeT2SOImm(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
unsigned ctrl = fieldFromInstruction32(Val, 10, 2);
if (ctrl == 0) {
@@ -3191,19 +3410,23 @@ static DecodeStatus DecodeT2SOImm(llvm::MCInst &Inst, unsigned Val,
}
static DecodeStatus
-DecodeThumbBCCTargetOperand(llvm::MCInst &Inst, unsigned Val,
+DecodeThumbBCCTargetOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder){
- Inst.addOperand(MCOperand::CreateImm(Val << 1));
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<8>(Val<<1) + 4,
+ true, 2, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<8>(Val << 1)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeThumbBLTargetOperand(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeThumbBLTargetOperand(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder){
- Inst.addOperand(MCOperand::CreateImm(SignExtend32<22>(Val << 1)));
+ if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<22>(Val<<1) + 4,
+ true, 4, Inst, Decoder))
+ Inst.addOperand(MCOperand::CreateImm(SignExtend32<22>(Val << 1)));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeMemBarrierOption(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeMemBarrierOption(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
switch (Val) {
default:
@@ -3223,14 +3446,14 @@ static DecodeStatus DecodeMemBarrierOption(llvm::MCInst &Inst, unsigned Val,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeMSRMask(llvm::MCInst &Inst, unsigned Val,
+static DecodeStatus DecodeMSRMask(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
if (!Val) return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(Val));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeDoubleRegLoad(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeDoubleRegLoad(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3253,7 +3476,7 @@ static DecodeStatus DecodeDoubleRegLoad(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeDoubleRegStore(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeDoubleRegStore(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder){
DecodeStatus S = MCDisassembler::Success;
@@ -3280,7 +3503,7 @@ static DecodeStatus DecodeDoubleRegStore(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeLDRPreImm(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeLDRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3305,7 +3528,7 @@ static DecodeStatus DecodeLDRPreImm(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeLDRPreReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeLDRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3333,7 +3556,7 @@ static DecodeStatus DecodeLDRPreReg(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeSTRPreImm(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSTRPreImm(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3358,7 +3581,7 @@ static DecodeStatus DecodeSTRPreImm(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeSTRPreReg(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeSTRPreReg(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3383,7 +3606,7 @@ static DecodeStatus DecodeSTRPreReg(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVLD1LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3442,7 +3665,7 @@ static DecodeStatus DecodeVLD1LN(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVST1LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3500,7 +3723,7 @@ static DecodeStatus DecodeVST1LN(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeVLD2LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3567,7 +3790,7 @@ static DecodeStatus DecodeVLD2LN(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVST2LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST2LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3631,7 +3854,7 @@ static DecodeStatus DecodeVST2LN(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeVLD3LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3701,7 +3924,7 @@ static DecodeStatus DecodeVLD3LN(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVST3LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST3LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3765,7 +3988,7 @@ static DecodeStatus DecodeVST3LN(llvm::MCInst &Inst, unsigned Insn,
}
-static DecodeStatus DecodeVLD4LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3839,7 +4062,7 @@ static DecodeStatus DecodeVLD4LN(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVST4LN(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -3904,7 +4127,7 @@ static DecodeStatus DecodeVST4LN(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVMOVSRR(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVMOVSRR(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
@@ -3930,7 +4153,7 @@ static DecodeStatus DecodeVMOVSRR(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeVMOVRRS(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeVMOVRRS(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
@@ -3956,7 +4179,7 @@ static DecodeStatus DecodeVMOVRRS(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeIT(llvm::MCInst &Inst, unsigned Insn,
+static DecodeStatus DecodeIT(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
unsigned pred = fieldFromInstruction16(Insn, 4, 4);
@@ -3983,7 +4206,7 @@ static DecodeStatus DecodeIT(llvm::MCInst &Inst, unsigned Insn,
}
static DecodeStatus
-DecodeT2LDRDPreInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeT2LDRDPreInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -4020,7 +4243,7 @@ DecodeT2LDRDPreInstruction(llvm::MCInst &Inst, unsigned Insn,
}
static DecodeStatus
-DecodeT2STRDPreInstruction(llvm::MCInst &Inst, unsigned Insn,
+DecodeT2STRDPreInstruction(MCInst &Inst, unsigned Insn,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -4054,7 +4277,7 @@ DecodeT2STRDPreInstruction(llvm::MCInst &Inst, unsigned Insn,
return S;
}
-static DecodeStatus DecodeT2Adr(llvm::MCInst &Inst, uint32_t Insn,
+static DecodeStatus DecodeT2Adr(MCInst &Inst, uint32_t Insn,
uint64_t Address, const void *Decoder) {
unsigned sign1 = fieldFromInstruction32(Insn, 21, 1);
unsigned sign2 = fieldFromInstruction32(Insn, 23, 1);
@@ -4069,7 +4292,7 @@ static DecodeStatus DecodeT2Adr(llvm::MCInst &Inst, uint32_t Insn,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeT2ShifterImmOperand(llvm::MCInst &Inst, uint32_t Val,
+static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, uint32_t Val,
uint64_t Address,
const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
@@ -4080,3 +4303,109 @@ static DecodeStatus DecodeT2ShifterImmOperand(llvm::MCInst &Inst, uint32_t Val,
return S;
}
+static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned Rt = fieldFromInstruction32(Insn, 12, 4);
+ unsigned Rt2 = fieldFromInstruction32(Insn, 0, 4);
+ unsigned Rn = fieldFromInstruction32(Insn, 16, 4);
+ unsigned pred = fieldFromInstruction32(Insn, 28, 4);
+
+ if (pred == 0xF)
+ return DecodeCPSInstruction(Inst, Insn, Address, Decoder);
+
+ DecodeStatus S = MCDisassembler::Success;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ return S;
+}
+
+static DecodeStatus DecodeVCVTD(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned Vd = (fieldFromInstruction32(Insn, 12, 4) << 0);
+ Vd |= (fieldFromInstruction32(Insn, 22, 1) << 4);
+ unsigned Vm = (fieldFromInstruction32(Insn, 0, 4) << 0);
+ Vm |= (fieldFromInstruction32(Insn, 5, 1) << 4);
+ unsigned imm = fieldFromInstruction32(Insn, 16, 6);
+ unsigned cmode = fieldFromInstruction32(Insn, 8, 4);
+
+ DecodeStatus S = MCDisassembler::Success;
+
+ // VMOVv2f32 is ambiguous with these decodings.
+ if (!(imm & 0x38) && cmode == 0xF) {
+ Inst.setOpcode(ARM::VMOVv2f32);
+ return DecodeNEONModImmInstruction(Inst, Insn, Address, Decoder);
+ }
+
+ if (!(imm & 0x20)) Check(S, MCDisassembler::SoftFail);
+
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Vd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeDPRRegisterClass(Inst, Vm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateImm(64 - imm));
+
+ return S;
+}
+
+static DecodeStatus DecodeVCVTQ(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned Vd = (fieldFromInstruction32(Insn, 12, 4) << 0);
+ Vd |= (fieldFromInstruction32(Insn, 22, 1) << 4);
+ unsigned Vm = (fieldFromInstruction32(Insn, 0, 4) << 0);
+ Vm |= (fieldFromInstruction32(Insn, 5, 1) << 4);
+ unsigned imm = fieldFromInstruction32(Insn, 16, 6);
+ unsigned cmode = fieldFromInstruction32(Insn, 8, 4);
+
+ DecodeStatus S = MCDisassembler::Success;
+
+ // VMOVv4f32 is ambiguous with these decodings.
+ if (!(imm & 0x38) && cmode == 0xF) {
+ Inst.setOpcode(ARM::VMOVv4f32);
+ return DecodeNEONModImmInstruction(Inst, Insn, Address, Decoder);
+ }
+
+ if (!(imm & 0x20)) Check(S, MCDisassembler::SoftFail);
+
+ if (!Check(S, DecodeQPRRegisterClass(Inst, Vd, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeQPRRegisterClass(Inst, Vm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateImm(64 - imm));
+
+ return S;
+}
+
+static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ unsigned Rn = fieldFromInstruction32(Val, 16, 4);
+ unsigned Rt = fieldFromInstruction32(Val, 12, 4);
+ unsigned Rm = fieldFromInstruction32(Val, 0, 4);
+ Rm |= (fieldFromInstruction32(Val, 23, 1) << 4);
+ unsigned Cond = fieldFromInstruction32(Val, 28, 4);
+
+ if (fieldFromInstruction32(Val, 8, 4) != 0 || Rn == Rt)
+ S = MCDisassembler::SoftFail;
+
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodeAddrMode7Operand(Inst, Rn, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodePostIdxReg(Inst, Rm, Address, Decoder)))
+ return MCDisassembler::Fail;
+ if (!Check(S, DecodePredicateOperand(Inst, Cond, Address, Decoder)))
+ return MCDisassembler::Fail;
+
+ return S;
+}
+
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
index ccdac3e..b3eeafe 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp
@@ -18,11 +18,11 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-#define GET_INSTRUCTION_NAME
#include "ARMGenAsmWriter.inc"
/// translateShiftImm - Convert shift immediate from 0-31 to 1-32 for printing.
@@ -36,16 +36,14 @@ static unsigned translateShiftImm(unsigned imm) {
ARMInstPrinter::ARMInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) :
- MCInstPrinter(MAI) {
+ MCInstPrinter(MAI, MII, MRI) {
// Initialize the set of available features.
setAvailableFeatures(STI.getFeatureBits());
}
-StringRef ARMInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
void ARMInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
OS << getRegisterName(RegNo);
}
@@ -101,7 +99,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
// A8.6.123 PUSH
if ((Opcode == ARM::STMDB_UPD || Opcode == ARM::t2STMDB_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP) {
+ MI->getOperand(0).getReg() == ARM::SP &&
+ MI->getNumOperands() > 5) {
+ // Should only print PUSH if there are at least two registers in the list.
O << '\t' << "push";
printPredicateOperand(MI, 2, O);
if (Opcode == ARM::t2STMDB_UPD)
@@ -122,7 +122,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
// A8.6.122 POP
if ((Opcode == ARM::LDMIA_UPD || Opcode == ARM::t2LDMIA_UPD) &&
- MI->getOperand(0).getReg() == ARM::SP) {
+ MI->getOperand(0).getReg() == ARM::SP &&
+ MI->getNumOperands() > 5) {
+ // Should only print POP if there are at least two registers in the list.
O << '\t' << "pop";
printPredicateOperand(MI, 2, O);
if (Opcode == ARM::t2LDMIA_UPD)
@@ -250,7 +252,7 @@ void ARMInstPrinter::printSORegRegOperand(const MCInst *MI, unsigned OpNum,
O << ", " << ARM_AM::getShiftOpcStr(ShOpc);
if (ShOpc == ARM_AM::rrx)
return;
-
+
O << ' ' << getRegisterName(MO2.getReg());
assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
}
@@ -433,6 +435,12 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op,
void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned Op,
raw_ostream &O) {
+ const MCOperand &MO1 = MI->getOperand(Op);
+ if (!MO1.isReg()) { // For label symbolic references.
+ printOperand(MI, Op, O);
+ return;
+ }
+
const MCOperand &MO3 = MI->getOperand(Op+2);
unsigned IdxMode = ARM_AM::getAM3IdxMode(MO3.getImm());
@@ -636,7 +644,7 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
if (getAvailableFeatures() & ARM::FeatureMClass) {
switch (Op.getImm()) {
- default: assert(0 && "Unexpected mask value!");
+ default: llvm_unreachable("Unexpected mask value!");
case 0: O << "apsr"; return;
case 1: O << "iapsr"; return;
case 2: O << "eapsr"; return;
@@ -659,12 +667,11 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
if (!SpecRegRBit && (Mask == 8 || Mask == 4 || Mask == 12)) {
O << "APSR_";
switch (Mask) {
- default: assert(0);
+ default: llvm_unreachable("Unexpected mask value!");
case 4: O << "g"; return;
case 8: O << "nzcvq"; return;
case 12: O << "nzcvqg"; return;
}
- llvm_unreachable("Unexpected mask value!");
}
if (SpecRegRBit)
@@ -684,7 +691,10 @@ void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum,
void ARMInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- if (CC != ARMCC::AL)
+ // Handle the undefined 15 CC value here for printing so we don't abort().
+ if ((unsigned)CC == 15)
+ O << "<und>";
+ else if (CC != ARMCC::AL)
O << ARMCondCodeToString(CC);
}
@@ -882,6 +892,11 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
const MCOperand &MO1 = MI->getOperand(OpNum);
const MCOperand &MO2 = MI->getOperand(OpNum+1);
+ if (!MO1.isReg()) { // For label symbolic references.
+ printOperand(MI, OpNum, O);
+ return;
+ }
+
O << "[" << getRegisterName(MO1.getReg());
int32_t OffImm = (int32_t)MO2.getImm() / 4;
@@ -963,7 +978,8 @@ void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
unsigned EncodedImm = MI->getOperand(OpNum).getImm();
unsigned EltBits;
uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits);
- O << "#0x" << utohexstr(Val);
+ O << "#0x";
+ O.write_hex(Val);
}
void ARMInstPrinter::printImmPlusOneOperand(const MCInst *MI, unsigned OpNum,
@@ -986,7 +1002,153 @@ void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum,
}
}
+void ARMInstPrinter::printFBits16(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << "#" << 16 - MI->getOperand(OpNum).getImm();
+}
+
+void ARMInstPrinter::printFBits32(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << "#" << 32 - MI->getOperand(OpNum).getImm();
+}
+
void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
raw_ostream &O) {
O << "[" << MI->getOperand(OpNum).getImm() << "]";
}
+
+void ARMInstPrinter::printVectorListOne(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "}";
+}
+
+void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
+ unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1);
+ O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}";
+}
+
+void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
+ unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2);
+ O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}";
+}
+
+void ARMInstPrinter::printVectorListThree(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "}";
+}
+
+void ARMInstPrinter::printVectorListFour(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "}";
+}
+
+void ARMInstPrinter::printVectorListOneAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
+ unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1);
+ O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListThreeAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListTwoSpacedAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0);
+ unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2);
+ O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListThreeSpacedAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListFourSpacedAllLanes(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[], "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "[]}";
+}
+
+void ARMInstPrinter::printVectorListThreeSpaced(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "}";
+}
+
+void ARMInstPrinter::printVectorListFourSpaced(const MCInst *MI,
+ unsigned OpNum,
+ raw_ostream &O) {
+ // Normally, it's not safe to use register enum values directly with
+ // addition to get the next register, but for VFP registers, the
+ // sort order is guaranteed because they're all of the form D<n>.
+ O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << ", "
+ << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "}";
+}
diff --git a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
index 5c2173f..8acb7ee 100644
--- a/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/InstPrinter/ARMInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- ARMInstPrinter.h - Convert ARM MCInst to assembly syntax ----------===//
+//===- ARMInstPrinter.h - Convert ARM MCInst to assembly syntax -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,14 +23,12 @@ class MCOperand;
class ARMInstPrinter : public MCInstPrinter {
public:
- ARMInstPrinter(const MCAsmInfo &MAI, const MCSubtargetInfo &STI);
+ ARMInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- static const char *getInstructionName(unsigned Opcode);
-
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
@@ -128,7 +126,33 @@ public:
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printFBits16(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printFBits32(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVectorIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorListOne(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorListTwo(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorListTwoSpaced(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListThree(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorListFour(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printVectorListOneAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListTwoAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListThreeAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListFourAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListTwoSpacedAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListThreeSpacedAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListFourSpacedAllLanes(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListThreeSpaced(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
+ void printVectorListFourSpaced(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
index 9982fa6..62473b2 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h
@@ -1,4 +1,4 @@
-//===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===//
+//===-- ARMAddressingModes.h - ARM Addressing Modes -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,6 +16,7 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -43,7 +44,7 @@ namespace ARM_AM {
static inline const char *getShiftOpcStr(ShiftOpc Op) {
switch (Op) {
- default: assert(0 && "Unknown shift opc!");
+ default: llvm_unreachable("Unknown shift opc!");
case ARM_AM::asr: return "asr";
case ARM_AM::lsl: return "lsl";
case ARM_AM::lsr: return "lsr";
@@ -54,7 +55,7 @@ namespace ARM_AM {
static inline unsigned getShiftOpcEncoding(ShiftOpc Op) {
switch (Op) {
- default: assert(0 && "Unknown shift opc!");
+ default: llvm_unreachable("Unknown shift opc!");
case ARM_AM::asr: return 2;
case ARM_AM::lsl: return 0;
case ARM_AM::lsr: return 1;
@@ -72,7 +73,7 @@ namespace ARM_AM {
static inline const char *getAMSubModeStr(AMSubMode Mode) {
switch (Mode) {
- default: assert(0 && "Unknown addressing sub-mode!");
+ default: llvm_unreachable("Unknown addressing sub-mode!");
case ARM_AM::ia: return "ia";
case ARM_AM::ib: return "ib";
case ARM_AM::da: return "da";
@@ -569,7 +570,7 @@ namespace ARM_AM {
}
EltBits = 64;
} else {
- assert(false && "Unsupported NEON immediate");
+ llvm_unreachable("Unsupported NEON immediate");
}
return Val;
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index c31c5e6..d10bfc1 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -11,17 +11,18 @@
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MCTargetDesc/ARMFixupKinds.h"
#include "MCTargetDesc/ARMAddressingModes.h"
-#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCValue.h"
#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
@@ -31,8 +32,8 @@ using namespace llvm;
namespace {
class ARMELFObjectWriter : public MCELFObjectTargetWriter {
public:
- ARMELFObjectWriter(Triple::OSType OSType)
- : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSType, ELF::EM_ARM,
+ ARMELFObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
/*HasRelocationAddend*/ false) {}
};
@@ -60,15 +61,16 @@ public:
// ARMFixupKinds.h.
//
// Name Offset (bits) Size (bits) Flags
-{ "fixup_arm_ldst_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_t2_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
-{ "fixup_arm_pcrel_10", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{ "fixup_thumb_adr_pcrel_10",0, 8, MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
-{ "fixup_arm_adr_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_t2_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel |
MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
{ "fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
@@ -76,6 +78,9 @@ public:
{ "fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
+{ "fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
{ "fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
@@ -100,13 +105,50 @@ public:
return Infos[Kind - FirstTargetFixupKind];
}
- bool MayNeedRelaxation(const MCInst &Inst) const;
+ /// processFixupValue - Target hook to process the literal value of a fixup
+ /// if necessary.
+ void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {
+ const MCSymbolRefExpr *A = Target.getSymA();
+ // Some fixups to thumb function symbols need the low bit (thumb bit)
+ // twiddled.
+ if ((unsigned)Fixup.getKind() != ARM::fixup_arm_ldst_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_t2_ldst_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_arm_adr_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_thumb_adr_pcrel_10 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_t2_adr_pcrel_12 &&
+ (unsigned)Fixup.getKind() != ARM::fixup_arm_thumb_cp) {
+ if (A) {
+ const MCSymbol &Sym = A->getSymbol().AliasedSymbol();
+ if (Asm.isThumbFunc(&Sym))
+ Value |= 1;
+ }
+ }
+ // We must always generate a relocation for BL/BLX instructions if we have
+ // a symbol to reference, as the linker relies on knowing the destination
+ // symbol's thumb-ness to get interworking right.
+ if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_blx ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl ||
+ (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl))
+ IsResolved = false;
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst) const;
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const;
- void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
- bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
- void HandleAssemblerFlag(MCAssemblerFlag Flag) {
+ void handleAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
default: break;
case MCAF_Code16:
@@ -124,21 +166,81 @@ public:
};
} // end anonymous namespace
-bool ARMAsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
- // FIXME: Thumb targets, different move constant targets..
+static unsigned getRelaxedOpcode(unsigned Op) {
+ switch (Op) {
+ default: return Op;
+ case ARM::tBcc: return ARM::t2Bcc;
+ case ARM::tLDRpciASM: return ARM::t2LDRpci;
+ case ARM::tADR: return ARM::t2ADR;
+ case ARM::tB: return ARM::t2B;
+ }
+}
+
+bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
+ if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode())
+ return true;
return false;
}
-void ARMAsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
- assert(0 && "ARMAsmBackend::RelaxInstruction() unimplemented");
- return;
+bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const {
+ switch ((unsigned)Fixup.getKind()) {
+ case ARM::fixup_arm_thumb_br: {
+ // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
+ // low bit being an implied zero. There's an implied +4 offset for the
+ // branch, so we adjust the other way here to determine what's
+ // encodable.
+ //
+ // Relax if the value is too big for a (signed) i8.
+ int64_t Offset = int64_t(Value) - 4;
+ return Offset > 2046 || Offset < -2048;
+ }
+ case ARM::fixup_arm_thumb_bcc: {
+ // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
+ // low bit being an implied zero. There's an implied +4 offset for the
+ // branch, so we adjust the other way here to determine what's
+ // encodable.
+ //
+ // Relax if the value is too big for a (signed) i8.
+ int64_t Offset = int64_t(Value) - 4;
+ return Offset > 254 || Offset < -256;
+ }
+ case ARM::fixup_thumb_adr_pcrel_10:
+ case ARM::fixup_arm_thumb_cp: {
+ // If the immediate is negative, greater than 1020, or not a multiple
+ // of four, the wide version of the instruction must be used.
+ int64_t Offset = int64_t(Value) - 4;
+ return Offset > 1020 || Offset < 0 || Offset & 3;
+ }
+ }
+ llvm_unreachable("Unexpected fixup kind in fixupNeedsRelaxation()!");
+}
+
+void ARMAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
+
+ // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
+ if (RelaxedOp == Inst.getOpcode()) {
+ SmallString<256> Tmp;
+ raw_svector_ostream OS(Tmp);
+ Inst.dump_pretty(OS);
+ OS << "\n";
+ report_fatal_error("unexpected instruction to relax: " + OS.str());
+ }
+
+ // The instructions we're relaxing have (so far) the same operands.
+ // We just need to update to the proper opcode.
+ Res = Inst;
+ Res.setOpcode(RelaxedOp);
}
-bool ARMAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
const uint32_t ARMv4_NopEncoding = 0xe1a0000; // using MOV r0,r0
- const uint32_t ARMv6T2_NopEncoding = 0xe3207800; // NOP
+ const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
if (isThumb()) {
const uint16_t nopEncoding = hasNOP() ? Thumb2_16bitNopEncoding
: Thumb1_16bitNopEncoding;
@@ -269,6 +371,9 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case ARM::fixup_arm_condbranch:
case ARM::fixup_arm_uncondbranch:
+ case ARM::fixup_arm_uncondbl:
+ case ARM::fixup_arm_condbl:
+ case ARM::fixup_arm_blx:
// These values don't encode the low two bits since they're always zero.
// Offset by 8 just as above.
return 0xffffff & ((Value - 8) >> 2);
@@ -359,6 +464,19 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case ARM::fixup_arm_thumb_bcc:
// Offset by 4 and don't encode the lower bit, which is always 0.
return ((Value - 4) >> 1) & 0xff;
+ case ARM::fixup_arm_pcrel_10_unscaled: {
+ Value = Value - 8; // ARM fixups offset by an additional word and don't
+ // need to adjust for the half-word ordering.
+ bool isAdd = true;
+ if ((int64_t)Value < 0) {
+ Value = -Value;
+ isAdd = false;
+ }
+ // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
+ assert ((Value < 256) && "Out of range pc-relative fixup value!");
+ Value = (Value & 0xf) | ((Value & 0xf0) << 4);
+ return Value | (isAdd << 23);
+ }
case ARM::fixup_arm_pcrel_10:
Value = Value - 4; // ARM fixups offset by an additional word and don't
// need to adjust for the half-word ordering.
@@ -376,8 +494,8 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
assert ((Value < 256) && "Out of range pc-relative fixup value!");
Value |= isAdd << 23;
- // Same addressing mode as fixup_arm_pcrel_10,
- // but with 16-bit halfwords swapped.
+ // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
+ // swapped.
if (Kind == ARM::fixup_t2_pcrel_10) {
uint32_t swapped = (Value & 0xFFFF0000) >> 16;
swapped |= (Value & 0x0000FFFF) << 16;
@@ -395,22 +513,21 @@ namespace {
// ELF is an ELF of course...
class ELFARMAsmBackend : public ARMAsmBackend {
public:
- Triple::OSType OSType;
+ uint8_t OSABI;
ELFARMAsmBackend(const Target &T, const StringRef TT,
- Triple::OSType _OSType)
- : ARMAsmBackend(T, TT), OSType(_OSType) { }
+ uint8_t _OSABI)
+ : ARMAsmBackend(T, TT), OSABI(_OSABI) { }
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const;
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(new ARMELFObjectWriter(OSType), OS,
- /*IsLittleEndian*/ true);
+ return createARMELFObjectWriter(OS, OSABI);
}
};
// FIXME: Raise this to share code between Darwin and ELF.
-void ELFARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+void ELFARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value) const {
unsigned NumBytes = 4; // FIXME: 2 for Thumb
Value = adjustFixupValue(Fixup.getKind(), Value);
@@ -439,7 +556,7 @@ public:
Subtype);
}
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const;
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -464,9 +581,13 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
case ARM::fixup_arm_thumb_cb:
return 2;
+ case ARM::fixup_arm_pcrel_10_unscaled:
case ARM::fixup_arm_ldst_pcrel_12:
case ARM::fixup_arm_pcrel_10:
case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_uncondbl:
+ case ARM::fixup_arm_condbl:
+ case ARM::fixup_arm_blx:
case ARM::fixup_arm_condbranch:
case ARM::fixup_arm_uncondbranch:
return 3;
@@ -491,7 +612,7 @@ static unsigned getFixupKindNumBytes(unsigned Kind) {
}
}
-void DarwinARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+void DarwinARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value) const {
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
Value = adjustFixupValue(Fixup.getKind(), Value);
@@ -527,5 +648,6 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT) {
if (TheTriple.isOSWindows())
assert(0 && "Windows not supported on ARM");
- return new ELFARMAsmBackend(T, TT, Triple(TT).getOS());
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ return new ELFARMAsmBackend(T, TT, OSABI);
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index ec4b6ff..ae11be8 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -67,7 +67,6 @@ namespace ARMCC {
inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
switch (CC) {
- default: llvm_unreachable("Unknown condition code");
case ARMCC::EQ: return "eq";
case ARMCC::NE: return "ne";
case ARMCC::HS: return "hs";
@@ -84,6 +83,7 @@ inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
case ARMCC::LE: return "le";
case ARMCC::AL: return "al";
}
+ llvm_unreachable("Unknown condition code");
}
namespace ARM_PROC {
@@ -185,6 +185,39 @@ inline static unsigned getARMRegisterNumbering(unsigned Reg) {
case S29: case D29: return 29;
case S30: case D30: return 30;
case S31: case D31: return 31;
+
+ // Composite registers use the regnum of the first register in the list.
+ /* Q0 */ case D0_D2: return 0;
+ case D1_D2: case D1_D3: return 1;
+ /* Q1 */ case D2_D4: return 2;
+ case D3_D4: case D3_D5: return 3;
+ /* Q2 */ case D4_D6: return 4;
+ case D5_D6: case D5_D7: return 5;
+ /* Q3 */ case D6_D8: return 6;
+ case D7_D8: case D7_D9: return 7;
+ /* Q4 */ case D8_D10: return 8;
+ case D9_D10: case D9_D11: return 9;
+ /* Q5 */ case D10_D12: return 10;
+ case D11_D12: case D11_D13: return 11;
+ /* Q6 */ case D12_D14: return 12;
+ case D13_D14: case D13_D15: return 13;
+ /* Q7 */ case D14_D16: return 14;
+ case D15_D16: case D15_D17: return 15;
+ /* Q8 */ case D16_D18: return 16;
+ case D17_D18: case D17_D19: return 17;
+ /* Q9 */ case D18_D20: return 18;
+ case D19_D20: case D19_D21: return 19;
+ /* Q10 */ case D20_D22: return 20;
+ case D21_D22: case D21_D23: return 21;
+ /* Q11 */ case D22_D24: return 22;
+ case D23_D24: case D23_D25: return 23;
+ /* Q12 */ case D24_D26: return 24;
+ case D25_D26: case D25_D27: return 25;
+ /* Q13 */ case D26_D28: return 26;
+ case D27_D28: case D27_D29: return 27;
+ /* Q14 */ case D28_D30: return 28;
+ case D29_D30: case D29_D31: return 29;
+ /* Q15 */
}
}
@@ -237,7 +270,6 @@ namespace ARMII {
inline static const char *AddrModeToString(AddrMode addrmode) {
switch (addrmode) {
- default: llvm_unreachable("Unknown memory operation");
case AddrModeNone: return "AddrModeNone";
case AddrMode1: return "AddrMode1";
case AddrMode2: return "AddrMode2";
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
new file mode 100644
index 0000000..aa649ba
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -0,0 +1,283 @@
+//===-- ARMELFObjectWriter.cpp - ARM ELF Writer ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/ARMFixupKinds.h"
+#include "MCTargetDesc/ARMMCTargetDesc.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCValue.h"
+
+using namespace llvm;
+
+namespace {
+ class ARMELFObjectWriter : public MCELFObjectTargetWriter {
+ enum { DefaultEABIVersion = 0x05000000U };
+ unsigned GetRelocTypeInner(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
+
+
+ public:
+ ARMELFObjectWriter(uint8_t OSABI);
+
+ virtual ~ARMELFObjectWriter();
+
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const;
+ virtual unsigned getEFlags() const;
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
+ };
+}
+
+ARMELFObjectWriter::ARMELFObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI,
+ ELF::EM_ARM,
+ /*HasRelocationAddend*/ false) {}
+
+ARMELFObjectWriter::~ARMELFObjectWriter() {}
+
+// FIXME: get the real EABI Version from the Triple.
+unsigned ARMELFObjectWriter::getEFlags() const {
+ return ELF::EF_ARM_EABIMASK & DefaultEABIVersion;
+}
+
+// In ARM, _MergedGlobals and other most symbols get emitted directly.
+// I.e. not as an offset to a section symbol.
+// This code is an approximation of what ARM/gcc does.
+
+STATISTIC(PCRelCount, "Total number of PIC Relocations");
+STATISTIC(NonPCRelCount, "Total number of non-PIC relocations");
+
+const MCSymbol *ARMELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ const MCSymbol &Symbol = Target.getSymA()->getSymbol().AliasedSymbol();
+ bool EmitThisSym = false;
+
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(Symbol.getSection());
+ bool InNormalSection = true;
+ unsigned RelocType = 0;
+ RelocType = GetRelocTypeInner(Target, Fixup, IsPCRel);
+
+ DEBUG(
+ const MCSymbolRefExpr::VariantKind Kind = Target.getSymA()->getKind();
+ MCSymbolRefExpr::VariantKind Kind2;
+ Kind2 = Target.getSymB() ? Target.getSymB()->getKind() :
+ MCSymbolRefExpr::VK_None;
+ dbgs() << "considering symbol "
+ << Section.getSectionName() << "/"
+ << Symbol.getName() << "/"
+ << " Rel:" << (unsigned)RelocType
+ << " Kind: " << (int)Kind << "/" << (int)Kind2
+ << " Tmp:"
+ << Symbol.isAbsolute() << "/" << Symbol.isDefined() << "/"
+ << Symbol.isVariable() << "/" << Symbol.isTemporary()
+ << " Counts:" << PCRelCount << "/" << NonPCRelCount << "\n");
+
+ if (IsPCRel) { ++PCRelCount;
+ switch (RelocType) {
+ default:
+ // Most relocation types are emitted as explicit symbols
+ InNormalSection =
+ StringSwitch<bool>(Section.getSectionName())
+ .Case(".data.rel.ro.local", false)
+ .Case(".data.rel", false)
+ .Case(".bss", false)
+ .Default(true);
+ EmitThisSym = true;
+ break;
+ case ELF::R_ARM_ABS32:
+ // But things get strange with R_ARM_ABS32
+ // In this case, most things that go in .rodata show up
+ // as section relative relocations
+ InNormalSection =
+ StringSwitch<bool>(Section.getSectionName())
+ .Case(".data.rel.ro.local", false)
+ .Case(".data.rel", false)
+ .Case(".rodata", false)
+ .Case(".bss", false)
+ .Default(true);
+ EmitThisSym = false;
+ break;
+ }
+ } else {
+ NonPCRelCount++;
+ InNormalSection =
+ StringSwitch<bool>(Section.getSectionName())
+ .Case(".data.rel.ro.local", false)
+ .Case(".rodata", false)
+ .Case(".data.rel", false)
+ .Case(".bss", false)
+ .Default(true);
+
+ switch (RelocType) {
+ default: EmitThisSym = true; break;
+ case ELF::R_ARM_ABS32: EmitThisSym = false; break;
+ }
+ }
+
+ if (EmitThisSym)
+ return &Symbol;
+ if (! Symbol.isTemporary() && InNormalSection) {
+ return &Symbol;
+ }
+ return NULL;
+}
+
+// Need to examine the Fixup when determining whether to
+// emit the relocation as an explicit symbol or as a section relative
+// offset
+unsigned ARMELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ return GetRelocTypeInner(Target, Fixup, IsPCRel);
+}
+
+unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+
+ unsigned Type = 0;
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("Unimplemented");
+ case FK_Data_4:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_REL32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSGD:
+ llvm_unreachable("unimplemented");
+ case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ Type = ELF::R_ARM_TLS_IE32;
+ break;
+ }
+ break;
+ case ARM::fixup_arm_uncondbl:
+ case ARM::fixup_arm_blx:
+ case ARM::fixup_arm_uncondbranch:
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_ARM_PLT:
+ Type = ELF::R_ARM_PLT32;
+ break;
+ default:
+ Type = ELF::R_ARM_CALL;
+ break;
+ }
+ break;
+ case ARM::fixup_arm_condbl:
+ case ARM::fixup_arm_condbranch:
+ Type = ELF::R_ARM_JUMP24;
+ break;
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ Type = ELF::R_ARM_MOVT_PREL;
+ break;
+ case ARM::fixup_arm_movw_lo16:
+ case ARM::fixup_arm_movw_lo16_pcrel:
+ Type = ELF::R_ARM_MOVW_PREL_NC;
+ break;
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ Type = ELF::R_ARM_THM_MOVT_PREL;
+ break;
+ case ARM::fixup_t2_movw_lo16:
+ case ARM::fixup_t2_movw_lo16_pcrel:
+ Type = ELF::R_ARM_THM_MOVW_PREL_NC;
+ break;
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_blx:
+ Type = ELF::R_ARM_THM_CALL;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ switch (Modifier) {
+ default: llvm_unreachable("Unsupported Modifier");
+ case MCSymbolRefExpr::VK_ARM_GOT:
+ Type = ELF::R_ARM_GOT_BREL;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TLSGD:
+ Type = ELF::R_ARM_TLS_GD32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TPOFF:
+ Type = ELF::R_ARM_TLS_LE32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_GOTTPOFF:
+ Type = ELF::R_ARM_TLS_IE32;
+ break;
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_ARM_ABS32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_GOTOFF:
+ Type = ELF::R_ARM_GOTOFF32;
+ break;
+ case MCSymbolRefExpr::VK_ARM_TARGET1:
+ Type = ELF::R_ARM_TARGET1;
+ break;
+ }
+ break;
+ case ARM::fixup_arm_ldst_pcrel_12:
+ case ARM::fixup_arm_pcrel_10:
+ case ARM::fixup_arm_adr_pcrel_12:
+ case ARM::fixup_arm_thumb_bl:
+ case ARM::fixup_arm_thumb_cb:
+ case ARM::fixup_arm_thumb_cp:
+ case ARM::fixup_arm_thumb_br:
+ llvm_unreachable("Unimplemented");
+ case ARM::fixup_arm_uncondbranch:
+ Type = ELF::R_ARM_CALL;
+ break;
+ case ARM::fixup_arm_condbranch:
+ Type = ELF::R_ARM_JUMP24;
+ break;
+ case ARM::fixup_arm_movt_hi16:
+ Type = ELF::R_ARM_MOVT_ABS;
+ break;
+ case ARM::fixup_arm_movw_lo16:
+ Type = ELF::R_ARM_MOVW_ABS_NC;
+ break;
+ case ARM::fixup_t2_movt_hi16:
+ Type = ELF::R_ARM_THM_MOVT_ABS;
+ break;
+ case ARM::fixup_t2_movw_lo16:
+ Type = ELF::R_ARM_THM_MOVW_ABS_NC;
+ break;
+ }
+ }
+
+ return Type;
+}
+
+MCObjectWriter *llvm::createARMELFObjectWriter(raw_ostream &OS,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new ARMELFObjectWriter(OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
+}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
index 350c92d..0085feb 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
@@ -1,4 +1,4 @@
-//===-- ARM/ARMFixupKinds.h - ARM Specific Fixup Entries --------*- C++ -*-===//
+//===-- ARMFixupKinds.h - ARM Specific Fixup Entries ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,6 +23,9 @@ enum Fixups {
// the 16-bit halfwords reordered.
fixup_t2_ldst_pcrel_12,
+ // fixup_arm_pcrel_10_unscaled - 10-bit PC relative relocation for symbol
+ // addresses used in LDRD/LDRH/LDRB/etc. instructions. All bits are encoded.
+ fixup_arm_pcrel_10_unscaled,
// fixup_arm_pcrel_10 - 10-bit PC relative relocation for symbol addresses
// used in VFP instructions where the lower 2 bits are not encoded
// (so it's encoded as an 8-bit immediate).
@@ -56,6 +59,25 @@ enum Fixups {
// fixup_arm_thumb_br - 12-bit fixup for Thumb B instructions.
fixup_arm_thumb_br,
+ // The following fixups handle the ARM BL instructions. These can be
+ // conditionalised; however, the ARM ELF ABI requires a different relocation
+ // in that case: R_ARM_JUMP24 instead of R_ARM_CALL. The difference is that
+ // R_ARM_CALL is allowed to change the instruction to a BLX inline, which has
+ // no conditional version; R_ARM_JUMP24 would have to insert a veneer.
+ //
+ // MachO does not draw a distinction between the two cases, so it will treat
+ // fixup_arm_uncondbl and fixup_arm_condbl as identical fixups.
+
+ // fixup_arm_uncondbl - Fixup for unconditional ARM BL instructions.
+ fixup_arm_uncondbl,
+
+ // fixup_arm_condbl - Fixup for ARM BL instructions with nontrivial
+ // conditionalisation.
+ fixup_arm_condbl,
+
+ // fixup_arm_blx - Fixup for ARM BLX instructions.
+ fixup_arm_blx,
+
// fixup_arm_thumb_bl - Fixup for Thumb BL instructions.
fixup_arm_thumb_bl,
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
index 1c109e0..03e8d5f 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp
@@ -1,4 +1,4 @@
-//===-- ARMMCAsmInfo.cpp - ARM asm properties -------------------*- C++ -*-===//
+//===-- ARMMCAsmInfo.cpp - ARM asm properties -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -48,6 +48,8 @@ static const char *const arm_asm_table[] = {
0,0
};
+void ARMMCAsmInfoDarwin::anchor() { }
+
ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
AsmTransCBE = arm_asm_table;
Data64bitsDirective = 0;
@@ -61,6 +63,8 @@ ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
ExceptionsType = ExceptionHandling::SjLj;
}
+void ARMELFMCAsmInfo::anchor() { }
+
ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
// ".comm align is in bytes but .align is pow-2."
AlignmentIsInBytes = false;
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
index 90f7822..f0b289c 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- ARMMCAsmInfo.h - ARM asm properties -------------*- C++ -*--====//
+//===-- ARMMCAsmInfo.h - ARM asm properties --------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,11 +18,15 @@
namespace llvm {
- struct ARMMCAsmInfoDarwin : public MCAsmInfoDarwin {
+ class ARMMCAsmInfoDarwin : public MCAsmInfoDarwin {
+ virtual void anchor();
+ public:
explicit ARMMCAsmInfoDarwin();
};
- struct ARMELFMCAsmInfo : public MCAsmInfo {
+ class ARMELFMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit ARMELFMCAsmInfo();
};
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 865c3e2..10d1c48 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -64,7 +64,7 @@ public:
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
- unsigned getBinaryCodeForInstr(const MCInst &MI,
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
@@ -118,8 +118,10 @@ public:
/// branch target.
uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const;
+ uint32_t getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups) const;
/// getAdrLabelOpValue - Return encoding info for 12-bit immediate
/// ADR label target.
@@ -166,7 +168,7 @@ public:
SmallVectorImpl<MCFixup> &Fixups) const {
ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm();
switch (Mode) {
- default: assert(0 && "Unknown addressing sub-mode!");
+ default: llvm_unreachable("Unknown addressing sub-mode!");
case ARM_AM::da: return 0;
case ARM_AM::ia: return 1;
case ARM_AM::db: return 2;
@@ -177,7 +179,6 @@ public:
///
unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const {
switch (ShOpc) {
- default: llvm_unreachable("Unknown shift opc!");
case ARM_AM::no_shift:
case ARM_AM::lsl: return 0;
case ARM_AM::lsr: return 1;
@@ -185,7 +186,7 @@ public:
case ARM_AM::ror:
case ARM_AM::rrx: return 3;
}
- return 0;
+ llvm_unreachable("Invalid ShiftOpc!");
}
/// getAddrMode2OpValue - Return encoding for addrmode2 operands.
@@ -423,7 +424,6 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
}
llvm_unreachable("Unable to encode MCOperand!");
- return 0;
}
/// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand.
@@ -466,7 +466,7 @@ static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
assert(MO.isExpr() && "Unexpected branch target type!");
const MCExpr *Expr = MO.getExpr();
MCFixupKind Kind = MCFixupKind(FixupKind);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
// All of the information is in the fixup.
return 0;
@@ -594,17 +594,26 @@ getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
}
uint32_t ARMMCCodeEmitter::
-getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
+getARMBLTargetOpValue(const MCInst &MI, unsigned OpIdx,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand MO = MI.getOperand(OpIdx);
if (MO.isExpr()) {
if (HasConditionalBranch(MI))
- return ::getBranchTargetOpValue(MI, OpIdx,
- ARM::fixup_arm_condbranch, Fixups);
- return ::getBranchTargetOpValue(MI, OpIdx,
- ARM::fixup_arm_uncondbranch, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx,
+ ARM::fixup_arm_condbl, Fixups);
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_uncondbl, Fixups);
}
+ return MO.getImm() >> 2;
+}
+
+uint32_t ARMMCCodeEmitter::
+getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand MO = MI.getOperand(OpIdx);
+ if (MO.isExpr())
+ return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_blx, Fixups);
+
return MO.getImm() >> 1;
}
@@ -718,12 +727,13 @@ getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx,
Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12);
else
Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
++MCNumCPRelocations;
} else {
Reg = ARM::PC;
int32_t Offset = MO.getImm();
+ // FIXME: Handle #-0.
if (Offset < 0) {
Offset *= -1;
isAdd = false;
@@ -791,8 +801,8 @@ getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx,
assert(MO.isExpr() && "Unexpected machine operand type!");
const MCExpr *Expr = MO.getExpr();
- MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+ MCFixupKind Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
++MCNumCPRelocations;
} else
@@ -833,7 +843,7 @@ getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx,
// but this is good enough for now.
static bool EvaluateAsPCRel(const MCExpr *Expr) {
switch (Expr->getKind()) {
- default: assert(0 && "Unexpected expression type");
+ default: llvm_unreachable("Unexpected expression type");
case MCExpr::SymbolRef: return false;
case MCExpr::Binary: return true;
}
@@ -857,7 +867,7 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
MCFixupKind Kind;
switch (ARM16Expr->getKind()) {
- default: assert(0 && "Unsupported ARMFixup");
+ default: llvm_unreachable("Unsupported ARMFixup");
case ARMMCExpr::VK_ARM_HI16:
if (!isTargetDarwin() && EvaluateAsPCRel(E))
Kind = MCFixupKind(isThumb2()
@@ -879,12 +889,11 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
: ARM::fixup_arm_movw_lo16);
break;
}
- Fixups.push_back(MCFixup::Create(0, E, Kind));
+ Fixups.push_back(MCFixup::Create(0, E, Kind, MI.getLoc()));
return 0;
};
llvm_unreachable("Unsupported MCExpr type in MCOperand!");
- return 0;
}
uint32_t ARMMCCodeEmitter::
@@ -993,6 +1002,19 @@ getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx,
const MCOperand &MO = MI.getOperand(OpIdx);
const MCOperand &MO1 = MI.getOperand(OpIdx+1);
const MCOperand &MO2 = MI.getOperand(OpIdx+2);
+
+ // If The first operand isn't a register, we have a label reference.
+ if (!MO.isReg()) {
+ unsigned Rn = getARMRegisterNumbering(ARM::PC); // Rn is PC.
+
+ assert(MO.isExpr() && "Unexpected machine operand type!");
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10_unscaled);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
+
+ ++MCNumCPRelocations;
+ return (Rn << 9) | (1 << 13);
+ }
unsigned Rn = getARMRegisterNumbering(MO.getReg());
unsigned Imm = MO2.getImm();
bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add;
@@ -1066,7 +1088,7 @@ getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx,
Kind = MCFixupKind(ARM::fixup_t2_pcrel_10);
else
Kind = MCFixupKind(ARM::fixup_arm_pcrel_10);
- Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind, MI.getLoc()));
++MCNumCPRelocations;
} else {
@@ -1312,8 +1334,8 @@ getRegisterListOpValue(const MCInst &MI, unsigned Op,
// LDM/STM:
// {15-0} = Bitfield of GPRs.
unsigned Reg = MI.getOperand(Op).getReg();
- bool SPRRegs = llvm::ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg);
- bool DPRRegs = llvm::ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg);
+ bool SPRRegs = ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg);
+ bool DPRRegs = ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg);
unsigned Binary = 0;
@@ -1372,11 +1394,11 @@ getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op,
switch (Imm.getImm()) {
default: break;
- case 2:
- case 4:
case 8:
- case 16: Align = 0x00; break;
- case 32: Align = 0x03; break;
+ case 16:
+ case 32: // Default '0' value for invalid alignments of 8, 16, 32 bytes.
+ case 2: Align = 0x00; break;
+ case 4: Align = 0x03; break;
}
return RegNo | (Align << 4);
@@ -1412,7 +1434,7 @@ getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op,
SmallVectorImpl<MCFixup> &Fixups) const {
const MCOperand &MO = MI.getOperand(Op);
if (MO.getReg() == 0) return 0x0D;
- return MO.getReg();
+ return getARMRegisterNumbering(MO.getReg());
}
unsigned ARMMCCodeEmitter::
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
index 2727ba8..22e14a2 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp
@@ -21,7 +21,7 @@ ARMMCExpr::Create(VariantKind Kind, const MCExpr *Expr,
void ARMMCExpr::PrintImpl(raw_ostream &OS) const {
switch (Kind) {
- default: assert(0 && "Invalid kind!");
+ default: llvm_unreachable("Invalid kind!");
case VK_ARM_HI16: OS << ":upper16:"; break;
case VK_ARM_LO16: OS << ":lower16:"; break;
}
@@ -45,8 +45,7 @@ ARMMCExpr::EvaluateAsRelocatableImpl(MCValue &Res,
static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) {
switch (Value->getKind()) {
case MCExpr::Target:
- assert(0 && "Can't handle nested target expr!");
- break;
+ llvm_unreachable("Can't handle nested target expr!");
case MCExpr::Constant:
break;
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
index 0a2e883..a727e08 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h
@@ -1,4 +1,4 @@
-//===-- ARMMCExpr.h - ARM specific MC expression classes ------------------===//
+//===-- ARMMCExpr.h - ARM specific MC expression classes --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index a55c410..e3512cd 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- ARMMCTargetDesc.cpp - ARM Target Descriptions -----------*- C++ -*-===//
+//===-- ARMMCTargetDesc.cpp - ARM Target Descriptions ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -89,14 +89,6 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) {
ARMArchFeature += ",+thumb-mode";
}
- Triple TheTriple(TT);
- if (TheTriple.getOS() == Triple::NativeClient) {
- if (ARMArchFeature.empty())
- ARMArchFeature = "+nacl-mode";
- else
- ARMArchFeature += ",+nacl-mode";
- }
-
return ARMArchFeature;
}
@@ -137,14 +129,15 @@ static MCAsmInfo *createARMMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createARMMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
if (RM == Reloc::Default) {
Triple TheTriple(TT);
// Default relocation model on Darwin is PIC, not DynamicNoPIC.
RM = TheTriple.isOSDarwin() ? Reloc::PIC_ : Reloc::DynamicNoPIC;
}
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
@@ -158,22 +151,23 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
Triple TheTriple(TT);
if (TheTriple.isOSDarwin())
- return createMachOStreamer(Ctx, MAB, OS, Emitter, RelaxAll);
+ return createMachOStreamer(Ctx, MAB, OS, Emitter, false);
if (TheTriple.isOSWindows()) {
llvm_unreachable("ARM does not support Windows COFF format");
- return NULL;
}
- return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ return createELFStreamer(Ctx, MAB, OS, Emitter, false, NoExecStack);
}
static MCInstPrinter *createARMMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
- return new ARMInstPrinter(MAI, STI);
+ return new ARMInstPrinter(MAI, MII, MRI, STI);
return 0;
}
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index 9b3d3bd..88472d7 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -46,6 +46,10 @@ MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII,
MCAsmBackend *createARMAsmBackend(const Target &T, StringRef TT);
+/// createARMELFObjectWriter - Construct an ELF Mach-O object writer.
+MCObjectWriter *createARMELFObjectWriter(raw_ostream &OS,
+ uint8_t OSABI);
+
/// createARMMachObjectWriter - Construct an ARM Mach-O object writer.
MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS,
bool Is64Bit,
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 352c73e..8057cb6 100644
--- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -13,9 +13,11 @@
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCMachOSymbolFlags.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Object/MachOFormat.h"
#include "llvm/Support/ErrorHandling.h"
@@ -32,12 +34,12 @@ class ARMMachObjectWriter : public MCMachObjectTargetWriter {
MCValue Target,
unsigned Log2Size,
uint64_t &FixedValue);
- void RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup, MCValue Target,
- uint64_t &FixedValue);
+ void RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue);
public:
ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType,
@@ -80,6 +82,9 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
case ARM::fixup_arm_adr_pcrel_12:
case ARM::fixup_arm_condbranch:
case ARM::fixup_arm_uncondbranch:
+ case ARM::fixup_arm_uncondbl:
+ case ARM::fixup_arm_condbl:
+ case ARM::fixup_arm_blx:
RelocType = unsigned(macho::RIT_ARM_Branch24Bit);
// Report as 'long', even though that is not quite accurate.
Log2Size = llvm::Log2_32(4);
@@ -98,34 +103,47 @@ static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
Log2Size = llvm::Log2_32(4);
return true;
+ // For movw/movt r_type relocations they always have a pair following them and
+ // the r_length bits are used differently. The encoding of the r_length is as
+ // follows:
+ // low bit of r_length:
+ // 0 - :lower16: for movw instructions
+ // 1 - :upper16: for movt instructions
+ // high bit of r_length:
+ // 0 - arm instructions
+ // 1 - thumb instructions
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movt_hi16_pcrel:
+ RelocType = unsigned(macho::RIT_ARM_Half);
+ Log2Size = 1;
+ return true;
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movt_hi16_pcrel:
- RelocType = unsigned(macho::RIT_ARM_HalfDifference);
- // Report as 'long', even though that is not quite accurate.
- Log2Size = llvm::Log2_32(4);
+ RelocType = unsigned(macho::RIT_ARM_Half);
+ Log2Size = 3;
return true;
case ARM::fixup_arm_movw_lo16:
case ARM::fixup_arm_movw_lo16_pcrel:
+ RelocType = unsigned(macho::RIT_ARM_Half);
+ Log2Size = 0;
+ return true;
case ARM::fixup_t2_movw_lo16:
case ARM::fixup_t2_movw_lo16_pcrel:
RelocType = unsigned(macho::RIT_ARM_Half);
- // Report as 'long', even though that is not quite accurate.
- Log2Size = llvm::Log2_32(4);
+ Log2Size = 2;
return true;
}
}
void ARMMachObjectWriter::
-RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
- const MCAssembler &Asm,
- const MCAsmLayout &Layout,
- const MCFragment *Fragment,
- const MCFixup &Fixup,
- MCValue Target,
- uint64_t &FixedValue) {
+RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue) {
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Type = macho::RIT_ARM_Half;
@@ -135,7 +153,8 @@ RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
- report_fatal_error("symbol '" + A->getName() +
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "symbol '" + A->getName() +
"' can not be undefined in a subtraction expression");
uint32_t Value = Writer->getSymbolAddress(A_SD, Layout);
@@ -148,7 +167,8 @@ RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
- report_fatal_error("symbol '" + B->getSymbol().getName() +
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "symbol '" + B->getSymbol().getName() +
"' can not be undefined in a subtraction expression");
// Select the appropriate difference relocation type.
@@ -178,9 +198,16 @@ RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
case ARM::fixup_arm_movt_hi16:
case ARM::fixup_arm_movt_hi16_pcrel:
MovtBit = 1;
+ // The thumb bit shouldn't be set in the 'other-half' bit of the
+ // relocation, but it will be set in FixedValue if the base symbol
+ // is a thumb function. Clear it out here.
+ if (A_SD->getFlags() & SF_ThumbFunc)
+ FixedValue &= 0xfffffffe;
break;
case ARM::fixup_t2_movt_hi16:
case ARM::fixup_t2_movt_hi16_pcrel:
+ if (A_SD->getFlags() & SF_ThumbFunc)
+ FixedValue &= 0xfffffffe;
MovtBit = 1;
// Fallthrough
case ARM::fixup_t2_movw_lo16:
@@ -189,7 +216,6 @@ RecordARMMovwMovtRelocation(MachObjectWriter *Writer,
break;
}
-
if (Type == macho::RIT_ARM_HalfDifference) {
uint32_t OtherHalf = MovtBit
? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16);
@@ -233,7 +259,8 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
MCSymbolData *A_SD = &Asm.getSymbolData(*A);
if (!A_SD->getFragment())
- report_fatal_error("symbol '" + A->getName() +
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "symbol '" + A->getName() +
"' can not be undefined in a subtraction expression");
uint32_t Value = Writer->getSymbolAddress(A_SD, Layout);
@@ -245,7 +272,8 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer,
MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
if (!B_SD->getFragment())
- report_fatal_error("symbol '" + B->getSymbol().getName() +
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "symbol '" + B->getSymbol().getName() +
"' can not be undefined in a subtraction expression");
// Select the appropriate difference relocation type.
@@ -287,19 +315,21 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind());
unsigned Log2Size;
unsigned RelocType = macho::RIT_Vanilla;
- if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size)) {
- report_fatal_error("unknown ARM fixup kind!");
- return;
- }
+ if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size))
+ // If we failed to get fixup kind info, it's because there's no legal
+ // relocation type for the fixup kind. This happens when it's a fixup that's
+ // expected to always be resolvable at assembly time and not have any
+ // relocations needed.
+ Asm.getContext().FatalError(Fixup.getLoc(),
+ "unsupported relocation on symbol");
// If this is a difference or a defined symbol plus an offset, then we need a
// scattered relocation entry. Differences always require scattered
// relocations.
if (Target.getSymB()) {
- if (RelocType == macho::RIT_ARM_Half ||
- RelocType == macho::RIT_ARM_HalfDifference)
- return RecordARMMovwMovtRelocation(Writer, Asm, Layout, Fragment, Fixup,
- Target, FixedValue);
+ if (RelocType == macho::RIT_ARM_Half)
+ return RecordARMScatteredHalfRelocation(Writer, Asm, Layout, Fragment,
+ Fixup, Target, FixedValue);
return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup,
Target, Log2Size, FixedValue);
}
@@ -374,6 +404,30 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer,
(Log2Size << 25) |
(IsExtern << 27) |
(Type << 28));
+
+ // Even when it's not a scattered relocation, movw/movt always uses
+ // a PAIR relocation.
+ if (Type == macho::RIT_ARM_Half) {
+ // The other-half value only gets populated for the movt relocation.
+ uint32_t Value = 0;;
+ switch ((unsigned)Fixup.getKind()) {
+ default: break;
+ case ARM::fixup_arm_movt_hi16:
+ case ARM::fixup_arm_movt_hi16_pcrel:
+ case ARM::fixup_t2_movt_hi16:
+ case ARM::fixup_t2_movt_hi16_pcrel:
+ Value = FixedValue;
+ break;
+ }
+ macho::RelocationEntry MREPair;
+ MREPair.Word0 = Value;
+ MREPair.Word1 = ((0xffffff) |
+ (Log2Size << 25) |
+ (macho::RIT_Pair << 28));
+
+ Writer->addRelocation(Fragment->getParent(), MREPair);
+ }
+
Writer->addRelocation(Fragment->getParent(), MRE);
}
diff --git a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 2df0053..2899836 100644
--- a/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -1,4 +1,4 @@
-//===-- MLxExpansionPass.cpp - Expand MLx instrs to avoid hazards ----------=//
+//===-- MLxExpansionPass.cpp - Expand MLx instrs to avoid hazards ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -139,7 +139,7 @@ bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const {
// FIXME: Detect integer instructions properly.
const MCInstrDesc &MCID = MI->getDesc();
unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
- if (MCID.mayStore())
+ if (MI->mayStore())
return false;
unsigned Opcode = MCID.getOpcode();
if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
@@ -222,14 +222,14 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
const MCInstrDesc &MCID2 = TII->get(AddSubOpc);
unsigned TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI));
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID1, TmpReg)
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID1, TmpReg)
.addReg(Src1Reg, getKillRegState(Src1Kill))
.addReg(Src2Reg, getKillRegState(Src2Kill));
if (HasLane)
MIB.addImm(LaneImm);
MIB.addImm(Pred).addReg(PredReg);
- MIB = BuildMI(MBB, *MI, MI->getDebugLoc(), MCID2)
+ MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID2)
.addReg(DstReg, getDefRegState(true) | getDeadRegState(DstDead));
if (NegAcc) {
@@ -274,7 +274,7 @@ bool MLxExpansion::ExpandFPMLxInstructions(MachineBasicBlock &MBB) {
}
const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.isBarrier()) {
+ if (MI->isBarrier()) {
clearStack();
Skip = 0;
++MII;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index d848177..edd73c2 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -1,4 +1,4 @@
-//======- Thumb1FrameLowering.cpp - Thumb1 Frame Information ---*- C++ -*-====//
+//===-- Thumb1FrameLowering.cpp - Thumb1 Frame Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "Thumb1FrameLowering.h"
-#include "ARMBaseInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -101,7 +100,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
case ARM::R11:
if (Reg == FramePtr)
FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
+ if (STI.isTargetIOS()) {
AFI->addGPRCalleeSavedArea2Frame(FI);
GPRCS2Size += 4;
} else {
@@ -175,14 +174,14 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF) const {
AFI->setShouldRestoreSPFromFP(true);
}
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
+static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) {
for (unsigned i = 0; CSRegs[i]; ++i)
if (Reg == CSRegs[i])
return true;
return false;
}
-static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
+static bool isCSRestore(MachineInstr *MI, const uint16_t *CSRegs) {
if (MI->getOpcode() == ARM::tLDRspi &&
MI->getOperand(1).isFI() &&
isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
@@ -214,7 +213,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
int NumBytes = (int)MFI->getStackSize();
- const unsigned *CSRegs = RegInfo->getCalleeSavedRegs();
+ const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
if (!AFI->hasStackFrame()) {
@@ -278,8 +277,11 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, VARegSaveSize);
- AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg))
- .addReg(ARM::R3, RegState::Kill));
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg))
+ .addReg(ARM::R3, RegState::Kill);
+ AddDefaultPred(MIB);
+ MIB->copyImplicitOps(&*MBBI);
// erase the old tBX_RET instruction
MBB.erase(MBBI);
}
@@ -350,6 +352,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
continue;
Reg = ARM::PC;
(*MIB).setDesc(TII.get(ARM::tPOP_RET));
+ MIB->copyImplicitOps(&*MI);
MI = MBB.erase(MI);
}
MIB.addReg(Reg, getDefRegState(true));
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index 218311d..e03e758 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- Thumb1InstrInfo.cpp - Thumb-1 Instruction Information ----*- C++ -*-===//
+//===-- Thumb1InstrInfo.cpp - Thumb-1 Instruction Information -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,14 +13,11 @@
#include "Thumb1InstrInfo.h"
#include "ARM.h"
-#include "ARMMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/ADT/SmallVector.h"
-#include "Thumb1InstrInfo.h"
+#include "llvm/MC/MCInst.h"
using namespace llvm;
@@ -28,6 +25,15 @@ Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
+/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+void Thumb1InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
+ NopInst.setOpcode(ARM::tMOVr);
+ NopInst.addOperand(MCOperand::CreateReg(ARM::R8));
+ NopInst.addOperand(MCOperand::CreateReg(ARM::R8));
+ NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ NopInst.addOperand(MCOperand::CreateReg(0));
+}
+
unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
@@ -60,8 +66,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
@@ -89,8 +94,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 17ef2f7..36af204 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -1,4 +1,4 @@
-//===- Thumb1InstrInfo.h - Thumb-1 Instruction Information ------*- C++ -*-===//
+//===-- Thumb1InstrInfo.h - Thumb-1 Instruction Information -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,9 +14,8 @@
#ifndef THUMB1INSTRUCTIONINFO_H
#define THUMB1INSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "ARM.h"
-#include "ARMInstrInfo.h"
+#include "ARMBaseInstrInfo.h"
#include "Thumb1RegisterInfo.h"
namespace llvm {
@@ -27,6 +26,9 @@ class Thumb1InstrInfo : public ARMBaseInstrInfo {
public:
explicit Thumb1InstrInfo(const ARMSubtarget &STI);
+ /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+ void getNoopForMachoTarget(MCInst &NopInst) const;
+
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
index e8ed482..ef77bbd 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- Thumb1RegisterInfo.cpp - Thumb-1 Register Information ----*- C++ -*-===//
+//===-- Thumb1RegisterInfo.cpp - Thumb-1 Register Information -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,12 +12,11 @@
//
//===----------------------------------------------------------------------===//
+#include "Thumb1RegisterInfo.h"
#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
-#include "Thumb1InstrInfo.h"
-#include "Thumb1RegisterInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -28,6 +27,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
@@ -570,6 +570,11 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
// If this instruction affects R12, adjust our restore point.
for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = II->getOperand(i);
+ if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
+ UseMI = II;
+ done = true;
+ break;
+ }
if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
TargetRegisterInfo::isVirtualRegister(MO.getReg()))
continue;
@@ -624,6 +629,21 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
FrameReg = BasePtr;
}
+ // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
+ // call frame setup/destroy instructions have already been eliminated. That
+ // means the stack pointer cannot be used to access the emergency spill slot
+ // when !hasReservedCallFrame().
+#ifndef NDEBUG
+ if (RS && FrameReg == ARM::SP && FrameIndex == RS->getScavengingFrameIndex()){
+ assert(MF.getTarget().getFrameLowering()->hasReservedCallFrame(MF) &&
+ "Cannot use SP to access the emergency spill slot in "
+ "functions without a reserved call frame");
+ assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
+ "Cannot use SP to access the emergency spill slot in "
+ "functions with variable sized frame objects");
+ }
+#endif // NDEBUG
+
// Special handling of dbg_value instructions.
if (MI.isDebugValue()) {
MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/);
@@ -643,14 +663,13 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
assert(Offset && "This code isn't needed if offset already handled!");
unsigned Opcode = MI.getOpcode();
- const MCInstrDesc &Desc = MI.getDesc();
// Remove predicate first.
int PIdx = MI.findFirstPredOperandIdx();
if (PIdx != -1)
removeOperands(MI, PIdx);
- if (Desc.mayLoad()) {
+ if (MI.mayLoad()) {
// Use the destination register to materialize sp + offset.
unsigned TmpReg = MI.getOperand(0).getReg();
bool UseRR = false;
@@ -673,7 +692,7 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
// register. The offset is already handled in the vreg value.
MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false);
- } else if (Desc.mayStore()) {
+ } else if (MI.mayStore()) {
VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
bool UseRR = false;
@@ -695,11 +714,11 @@ Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// register. The offset is already handled in the vreg value.
MI.getOperand(i+1).ChangeToRegister(FrameReg, false, false, false);
} else {
- assert(false && "Unexpected opcode!");
+ llvm_unreachable("Unexpected opcode!");
}
// Add predicate back if it's needed.
- if (MI.getDesc().isPredicable()) {
+ if (MI.isPredicable()) {
MachineInstrBuilder MIB(&MI);
AddDefaultPred(MIB);
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
index 9060e59..6971842 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -16,13 +16,12 @@
#define THUMB1REGISTERINFO_H
#include "ARM.h"
-#include "ARMRegisterInfo.h"
+#include "ARMBaseRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class ARMSubtarget;
class ARMBaseInstrInfo;
- class Type;
struct Thumb1RegisterInfo : public ARMBaseRegisterInfo {
public:
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index b627400..ecb4c2f 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -1,4 +1,4 @@
-//===-- Thumb2ITBlockPass.cpp - Insert Thumb IT blocks ----------*- C++ -*-===//
+//===-- Thumb2ITBlockPass.cpp - Insert Thumb-2 IT blocks ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,7 @@
#include "Thumb2InstrInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
@@ -75,7 +76,7 @@ static void TrackDefUses(MachineInstr *MI,
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
unsigned Reg = LocalUses[i];
Uses.insert(Reg);
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
Uses.insert(*Subreg);
}
@@ -83,7 +84,7 @@ static void TrackDefUses(MachineInstr *MI,
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
Defs.insert(Reg);
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
Defs.insert(*Subreg);
if (Reg == ARM::CPSR)
@@ -141,7 +142,7 @@ Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI,
// rsb r2, 0
//
const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.hasOptionalDef() &&
+ if (MI->hasOptionalDef() &&
MI->getOperand(MCID.getNumOperands() - 1).getReg() == ARM::CPSR)
return false;
@@ -153,7 +154,7 @@ Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI,
++I;
if (I != E) {
unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = llvm::getITInstrPredicate(I, NPredReg);
+ ARMCC::CondCodes NCC = getITInstrPredicate(I, NPredReg);
if (NCC == CC || NCC == OCC)
return true;
}
@@ -170,7 +171,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
MachineInstr *MI = &*MBBI;
DebugLoc dl = MI->getDebugLoc();
unsigned PredReg = 0;
- ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
+ ARMCC::CondCodes CC = getITInstrPredicate(MI, PredReg);
if (CC == ARMCC::AL) {
++MBBI;
continue;
@@ -198,7 +199,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
// Branches, including tricky ones like LDM_RET, need to end an IT
// block so check the instruction we just put in the block.
for (; MBBI != E && Pos &&
- (!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) {
+ (!MI->isBranch() && !MI->isReturn()) ; ++MBBI) {
if (MBBI->isDebugValue())
continue;
@@ -206,7 +207,7 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
MI = NMI;
unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
+ ARMCC::CondCodes NCC = getITInstrPredicate(NMI, NPredReg);
if (NCC == CC || NCC == OCC) {
Mask |= (NCC & 1) << Pos;
// Add implicit use of ITSTATE.
@@ -237,6 +238,10 @@ bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
// Last instruction in IT block kills ITSTATE.
LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();
+ // Finalize the bundle.
+ MachineBasicBlock::instr_iterator LI = LastITMI;
+ finalizeBundle(MBB, InsertPos.getInstrIterator(), llvm::next(LI));
+
Modified = true;
++NumITs;
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index cf040c82..8ab486b 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information ----*- C++ -*-===//
+//===-- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,13 +15,11 @@
#include "ARM.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
-#include "Thumb2InstrInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -35,6 +33,13 @@ Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
+/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+void Thumb2InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
+ NopInst.setOpcode(ARM::tNOP);
+ NopInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+ NopInst.addOperand(MCOperand::CreateReg(0));
+}
+
unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
// FIXME
return 0;
@@ -53,7 +58,7 @@ Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
// If the first instruction of Tail is predicated, we may have to update
// the IT instruction.
unsigned PredReg = 0;
- ARMCC::CondCodes CC = llvm::getInstrPredicate(Tail, PredReg);
+ ARMCC::CondCodes CC = getInstrPredicate(Tail, PredReg);
MachineBasicBlock::iterator MBBI = Tail;
if (CC != ARMCC::AL)
// Expecting at least the t2IT instruction before it.
@@ -101,7 +106,7 @@ Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
}
unsigned PredReg = 0;
- return llvm::getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
+ return getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
}
void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
@@ -130,8 +135,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOStore,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
@@ -158,8 +162,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FI),
MFI.getObjectAlignment(FI));
@@ -570,7 +573,7 @@ Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
return;
unsigned PredReg = 0;
- ARMCC::CondCodes CC = llvm::getInstrPredicate(UseMI, PredReg);
+ ARMCC::CondCodes CC = getInstrPredicate(UseMI, PredReg);
if (CC == ARMCC::AL || PredReg != ARM::CPSR)
return;
@@ -586,10 +589,10 @@ Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
continue;
MachineInstr *NMI = &*MBBI;
- ARMCC::CondCodes NCC = llvm::getInstrPredicate(NMI, PredReg);
+ ARMCC::CondCodes NCC = getInstrPredicate(NMI, PredReg);
if (!(NCC == CC || NCC == OCC) ||
NMI->modifiesRegister(SrcReg, &TRI) ||
- NMI->definesRegister(ARM::CPSR))
+ NMI->modifiesRegister(ARM::CPSR, &TRI))
break;
if (++NumInsts == 4)
// Too many in a row!
@@ -607,5 +610,5 @@ llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
unsigned Opc = MI->getOpcode();
if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
return ARMCC::AL;
- return llvm::getInstrPredicate(MI, PredReg);
+ return getInstrPredicate(MI, PredReg);
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index f2637d7..0911f8a 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -1,4 +1,4 @@
-//===- Thumb2InstrInfo.h - Thumb-2 Instruction Information ------*- C++ -*-===//
+//===-- Thumb2InstrInfo.h - Thumb-2 Instruction Information -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,9 +14,8 @@
#ifndef THUMB2INSTRUCTIONINFO_H
#define THUMB2INSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "ARM.h"
-#include "ARMInstrInfo.h"
+#include "ARMBaseInstrInfo.h"
#include "Thumb2RegisterInfo.h"
namespace llvm {
@@ -28,6 +27,9 @@ class Thumb2InstrInfo : public ARMBaseInstrInfo {
public:
explicit Thumb2InstrInfo(const ARMSubtarget &STI);
+ /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+ void getNoopForMachoTarget(MCInst &NopInst) const;
+
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
index 355c3bf..29a87d0 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- Thumb2RegisterInfo.cpp - Thumb-2 Register Information ----*- C++ -*-===//
+//===-- Thumb2RegisterInfo.cpp - Thumb-2 Register Information -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,10 +12,10 @@
//
//===----------------------------------------------------------------------===//
+#include "Thumb2RegisterInfo.h"
#include "ARM.h"
+#include "ARMBaseInstrInfo.h"
#include "ARMSubtarget.h"
-#include "Thumb2InstrInfo.h"
-#include "Thumb2RegisterInfo.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
index 824378a..6b397e8 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
@@ -16,13 +16,12 @@
#define THUMB2REGISTERINFO_H
#include "ARM.h"
-#include "ARMRegisterInfo.h"
+#include "ARMBaseRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class ARMSubtarget;
class ARMBaseInstrInfo;
- class Type;
struct Thumb2RegisterInfo : public ARMBaseRegisterInfo {
public:
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 89a155c..b5a397e 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -39,9 +39,9 @@ namespace {
/// ReduceTable - A static table with information on mapping from wide
/// opcodes to narrow
struct ReduceEntry {
- unsigned WideOpc; // Wide opcode
- unsigned NarrowOpc1; // Narrow opcode to transform to
- unsigned NarrowOpc2; // Narrow opcode when it's two-address
+ uint16_t WideOpc; // Wide opcode
+ uint16_t NarrowOpc1; // Narrow opcode to transform to
+ uint16_t NarrowOpc2; // Narrow opcode when it's two-address
uint8_t Imm1Limit; // Limit of immediate field (bits)
uint8_t Imm2Limit; // Limit of immediate field when it's two-address
unsigned LowRegs1 : 1; // Only possible if low-registers are used
@@ -146,7 +146,8 @@ namespace {
/// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
DenseMap<unsigned, unsigned> ReduceOpcodeMap;
- bool canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use);
+ bool canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use,
+ bool IsSelfLoop);
bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
bool is2Addr, ARMCC::CondCodes Pred,
@@ -157,19 +158,21 @@ namespace {
bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry, bool LiveCPSR,
- MachineInstr *CPSRDef);
+ MachineInstr *CPSRDef, bool IsSelfLoop);
/// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
/// instruction.
bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
- bool LiveCPSR, MachineInstr *CPSRDef);
+ bool LiveCPSR, MachineInstr *CPSRDef,
+ bool IsSelfLoop);
/// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
/// non-two-address instruction.
bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
- bool LiveCPSR, MachineInstr *CPSRDef);
+ bool LiveCPSR, MachineInstr *CPSRDef,
+ bool IsSelfLoop);
/// ReduceMBB - Reduce width of instructions in the specified basic block.
bool ReduceMBB(MachineBasicBlock &MBB);
@@ -186,7 +189,7 @@ Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
}
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
- for (const unsigned *Regs = MCID.ImplicitDefs; *Regs; ++Regs)
+ for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
if (*Regs == ARM::CPSR)
return true;
return false;
@@ -210,10 +213,17 @@ static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
/// In this case it would have been ok to narrow the mul.w to muls since there
/// are indirect RAW dependency between the muls and the mul.w
bool
-Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use) {
- if (!Def || !STI->avoidCPSRPartialUpdate())
+Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Def, MachineInstr *Use,
+ bool FirstInSelfLoop) {
+ // FIXME: Disable check for -Oz (aka OptimizeForSizeHarder).
+ if (!STI->avoidCPSRPartialUpdate())
return false;
+ if (!Def)
+ // If this BB loops back to itself, conservatively avoid narrowing the
+ // first instruction that does partial flag update.
+ return FirstInSelfLoop;
+
SmallSet<unsigned, 2> Defs;
for (unsigned i = 0, e = Def->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = Def->getOperand(i);
@@ -442,7 +452,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
// Add the 16-bit load / store instruction.
DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Opc));
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
if (!isLdStMul) {
MIB.addOperand(MI->getOperand(0));
MIB.addOperand(MI->getOperand(1));
@@ -468,7 +478,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
- MBB.erase(MI);
+ MBB.erase_instr(MI);
++NumLdSts;
return true;
}
@@ -476,15 +486,16 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
bool
Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
- bool LiveCPSR, MachineInstr *CPSRDef) {
+ bool LiveCPSR, MachineInstr *CPSRDef,
+ bool IsSelfLoop) {
unsigned Opc = MI->getOpcode();
if (Opc == ARM::t2ADDri) {
// If the source register is SP, try to reduce to tADDrSPi, otherwise
// it's a normal reduce.
if (MI->getOperand(1).getReg() != ARM::SP) {
- if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef))
+ if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop))
return true;
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop);
}
// Try to reduce to tADDrSPi.
unsigned Imm = MI->getOperand(2).getImm();
@@ -502,7 +513,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
return false;
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, MI->getDebugLoc(),
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
TII->get(ARM::tADDrSPi))
.addOperand(MI->getOperand(0))
.addOperand(MI->getOperand(1))
@@ -514,7 +525,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
- MBB.erase(MI);
+ MBB.erase_instr(MI);
++NumNarrows;
return true;
}
@@ -522,8 +533,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
if (Entry.LowRegs1 && !VerifyLowRegs(MI))
return false;
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.mayLoad() || MCID.mayStore())
+ if (MI->mayLoad() || MI->mayStore())
return ReduceLoadStore(MBB, MI, Entry);
switch (Opc) {
@@ -535,12 +545,12 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
switch (Opc) {
default: break;
case ARM::t2ADDSri: {
- if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef))
+ if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop))
return true;
// fallthrough
}
case ARM::t2ADDSrr:
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop);
}
}
break;
@@ -552,13 +562,13 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
case ARM::t2UXTB:
case ARM::t2UXTH:
if (MI->getOperand(2).getImm() == 0)
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop);
break;
case ARM::t2MOVi16:
// Can convert only 'pure' immediate operands, not immediates obtained as
// globals' addresses.
if (MI->getOperand(1).isImm())
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop);
break;
case ARM::t2CMPrr: {
// Try to reduce to the lo-reg only version first. Why there are two
@@ -568,9 +578,9 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
// source insn opcode. So for now, we hack a local entry record to use.
static const ReduceEntry NarrowEntry =
{ ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1 };
- if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, CPSRDef))
+ if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, CPSRDef, IsSelfLoop))
return true;
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef);
+ return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop);
}
}
return false;
@@ -579,14 +589,32 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
bool
Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
- bool LiveCPSR, MachineInstr *CPSRDef) {
+ bool LiveCPSR, MachineInstr *CPSRDef,
+ bool IsSelfLoop) {
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false;
unsigned Reg0 = MI->getOperand(0).getReg();
unsigned Reg1 = MI->getOperand(1).getReg();
- if (Reg0 != Reg1) {
+ // t2MUL is "special". The tied source operand is second, not first.
+ if (MI->getOpcode() == ARM::t2MUL) {
+ unsigned Reg2 = MI->getOperand(2).getReg();
+ // Early exit if the regs aren't all low regs.
+ if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
+ || !isARMLowRegister(Reg2))
+ return false;
+ if (Reg0 != Reg2) {
+ // If the other operand also isn't the same as the destination, we
+ // can't reduce.
+ if (Reg1 != Reg0)
+ return false;
+ // Try to commute the operands to make it a 2-address instruction.
+ MachineInstr *CommutedMI = TII->commuteInstruction(MI);
+ if (!CommutedMI)
+ return false;
+ }
+ } else if (Reg0 != Reg1) {
// Try to commute the operands to make it a 2-address instruction.
unsigned CommOpIdx1, CommOpIdx2;
if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
@@ -637,12 +665,12 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
- canAddPseudoFlagDep(CPSRDef, MI))
+ canAddPseudoFlagDep(CPSRDef, MI, IsSelfLoop))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID);
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
MIB.addOperand(MI->getOperand(0));
if (NewMCID.hasOptionalDef()) {
if (HasCC)
@@ -666,7 +694,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
- MBB.erase(MI);
+ MBB.erase_instr(MI);
++Num2Addrs;
return true;
}
@@ -674,7 +702,8 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
bool
Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
const ReduceEntry &Entry,
- bool LiveCPSR, MachineInstr *CPSRDef) {
+ bool LiveCPSR, MachineInstr *CPSRDef,
+ bool IsSelfLoop) {
if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
return false;
@@ -727,12 +756,12 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
// Avoid adding a false dependency on partial flag update by some 16-bit
// instructions which has the 's' bit set.
if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
- canAddPseudoFlagDep(CPSRDef, MI))
+ canAddPseudoFlagDep(CPSRDef, MI, IsSelfLoop))
return false;
// Add the 16-bit instruction.
DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewMCID);
+ MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
MIB.addOperand(MI->getOperand(0));
if (NewMCID.hasOptionalDef()) {
if (HasCC)
@@ -772,7 +801,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
- MBB.erase(MI);
+ MBB.erase_instr(MI);
++NumNarrows;
return true;
}
@@ -817,13 +846,22 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
// Yes, CPSR could be livein.
bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
MachineInstr *CPSRDef = 0;
+ MachineInstr *BundleMI = 0;
- MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
- MachineBasicBlock::iterator NextMII;
+ // If this BB loops back to itself, conservatively avoid narrowing the
+ // first instruction that does partial flag update.
+ bool IsSelfLoop = MBB.isSuccessor(&MBB);
+ MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
+ MachineBasicBlock::instr_iterator NextMII;
for (; MII != E; MII = NextMII) {
NextMII = llvm::next(MII);
MachineInstr *MI = &*MII;
+ if (MI->isBundle()) {
+ BundleMI = MI;
+ continue;
+ }
+
LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
unsigned Opcode = MI->getOpcode();
@@ -832,9 +870,9 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
const ReduceEntry &Entry = ReduceTable[OPI->second];
// Ignore "special" cases for now.
if (Entry.Special) {
- if (ReduceSpecial(MBB, MI, Entry, LiveCPSR, CPSRDef)) {
+ if (ReduceSpecial(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) {
Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
+ MachineBasicBlock::instr_iterator I = prior(NextMII);
MI = &*I;
}
goto ProcessNext;
@@ -842,31 +880,46 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
// Try to transform to a 16-bit two-address instruction.
if (Entry.NarrowOpc2 &&
- ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef)) {
+ ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) {
Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
+ MachineBasicBlock::instr_iterator I = prior(NextMII);
MI = &*I;
goto ProcessNext;
}
// Try to transform to a 16-bit non-two-address instruction.
if (Entry.NarrowOpc1 &&
- ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef)) {
+ ReduceToNarrow(MBB, MI, Entry, LiveCPSR, CPSRDef, IsSelfLoop)) {
Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
+ MachineBasicBlock::instr_iterator I = prior(NextMII);
MI = &*I;
}
}
ProcessNext:
+ if (NextMII != E && MI->isInsideBundle() && !NextMII->isInsideBundle()) {
+ // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
+ // marker is only on the BUNDLE instruction. Process the BUNDLE
+ // instruction as we finish with the bundled instruction to work around
+ // the inconsistency.
+ if (BundleMI->killsRegister(ARM::CPSR))
+ LiveCPSR = false;
+ MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
+ if (MO && !MO->isDead())
+ LiveCPSR = true;
+ }
+
bool DefCPSR = false;
LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
- if (MI->getDesc().isCall())
+ if (MI->isCall()) {
// Calls don't really set CPSR.
CPSRDef = 0;
- else if (DefCPSR)
+ IsSelfLoop = false;
+ } else if (DefCPSR) {
// This is the last CPSR defining instruction.
CPSRDef = MI;
+ IsSelfLoop = false;
+ }
}
return Modified;
diff --git a/contrib/llvm/lib/Target/Alpha/Alpha.h b/contrib/llvm/lib/Target/Alpha/Alpha.h
deleted file mode 100644
index 6ffaf45..0000000
--- a/contrib/llvm/lib/Target/Alpha/Alpha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- Alpha.h - Top-level interface for Alpha representation --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in the LLVM
-// Alpha back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TARGET_ALPHA_H
-#define TARGET_ALPHA_H
-
-#include "MCTargetDesc/AlphaMCTargetDesc.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- namespace Alpha {
- // These describe LDAx
-
- static const int IMM_LOW = -32768;
- static const int IMM_HIGH = 32767;
- static const int IMM_MULT = 65536;
- }
-
- class AlphaTargetMachine;
- class FunctionPass;
- class formatted_raw_ostream;
-
- FunctionPass *createAlphaISelDag(AlphaTargetMachine &TM);
- FunctionPass *createAlphaPatternInstructionSelector(TargetMachine &TM);
- FunctionPass *createAlphaJITCodeEmitterPass(AlphaTargetMachine &TM,
- JITCodeEmitter &JCE);
- FunctionPass *createAlphaLLRPPass(AlphaTargetMachine &tm);
- FunctionPass *createAlphaBranchSelectionPass();
-
-} // end namespace llvm;
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/Alpha.td b/contrib/llvm/lib/Target/Alpha/Alpha.td
deleted file mode 100644
index ae79c2e..0000000
--- a/contrib/llvm/lib/Target/Alpha/Alpha.td
+++ /dev/null
@@ -1,68 +0,0 @@
-//===- Alpha.td - Describe the Alpha Target Machine --------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-// Get the target-independent interfaces which we are implementing...
-//
-include "llvm/Target/Target.td"
-
-//Alpha is little endian
-
-//===----------------------------------------------------------------------===//
-// Subtarget Features
-//===----------------------------------------------------------------------===//
-
-def FeatureCIX : SubtargetFeature<"cix", "HasCT", "true",
- "Enable CIX extensions">;
-
-//===----------------------------------------------------------------------===//
-// Register File Description
-//===----------------------------------------------------------------------===//
-
-include "AlphaRegisterInfo.td"
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Description
-//===----------------------------------------------------------------------===//
-
-include "AlphaCallingConv.td"
-
-//===----------------------------------------------------------------------===//
-// Schedule Description
-//===----------------------------------------------------------------------===//
-
-include "AlphaSchedule.td"
-
-//===----------------------------------------------------------------------===//
-// Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "AlphaInstrInfo.td"
-
-def AlphaInstrInfo : InstrInfo;
-
-//===----------------------------------------------------------------------===//
-// Alpha Processor Definitions
-//===----------------------------------------------------------------------===//
-
-def : Processor<"generic", Alpha21264Itineraries, []>;
-def : Processor<"ev6" , Alpha21264Itineraries, []>;
-def : Processor<"ev67" , Alpha21264Itineraries, [FeatureCIX]>;
-
-//===----------------------------------------------------------------------===//
-// The Alpha Target
-//===----------------------------------------------------------------------===//
-
-
-def Alpha : Target {
- // Pull in Instruction Info:
- let InstructionSet = AlphaInstrInfo;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp b/contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp
deleted file mode 100644
index 5dce06a..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaAsmPrinter.cpp
+++ /dev/null
@@ -1,166 +0,0 @@
-//===-- AlphaAsmPrinter.cpp - Alpha LLVM assembly writer ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format Alpha assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "Alpha.h"
-#include "AlphaInstrInfo.h"
-#include "AlphaTargetMachine.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
- struct AlphaAsmPrinter : public AsmPrinter {
- /// Unique incrementer for label values for referencing Global values.
- ///
-
- explicit AlphaAsmPrinter(TargetMachine &tm, MCStreamer &Streamer)
- : AsmPrinter(tm, Streamer) {}
-
- virtual const char *getPassName() const {
- return "Alpha Assembly Printer";
- }
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
- void EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
- }
- static const char *getRegisterName(unsigned RegNo);
-
- void printOp(const MachineOperand &MO, raw_ostream &O);
- void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- virtual void EmitFunctionBodyStart();
- virtual void EmitFunctionBodyEnd();
- void EmitStartOfAsmFile(Module &M);
-
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- bool PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O);
- };
-} // end of anonymous namespace
-
-#include "AlphaGenAsmWriter.inc"
-
-void AlphaAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- if (MO.isReg()) {
- assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
- "Not physreg??");
- O << getRegisterName(MO.getReg());
- } else if (MO.isImm()) {
- O << MO.getImm();
- assert(MO.getImm() < (1 << 30));
- } else {
- printOp(MO, O);
- }
-}
-
-
-void AlphaAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- O << getRegisterName(MO.getReg());
- return;
-
- case MachineOperand::MO_Immediate:
- assert(0 && "printOp() does not handle immediate values");
- return;
-
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
-
- case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
- << MO.getIndex();
- return;
-
- case MachineOperand::MO_ExternalSymbol:
- O << MO.getSymbolName();
- return;
-
- case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
- return;
-
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- return;
-
- default:
- O << "<unknown operand type: " << MO.getType() << ">";
- return;
- }
-}
-
-/// EmitFunctionBodyStart - Targets can override this to emit stuff before
-/// the first basic block in the function.
-void AlphaAsmPrinter::EmitFunctionBodyStart() {
- OutStreamer.EmitRawText("\t.ent " + Twine(CurrentFnSym->getName()));
-}
-
-/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
-/// the last basic block in the function.
-void AlphaAsmPrinter::EmitFunctionBodyEnd() {
- OutStreamer.EmitRawText("\t.end " + Twine(CurrentFnSym->getName()));
-}
-
-void AlphaAsmPrinter::EmitStartOfAsmFile(Module &M) {
- OutStreamer.EmitRawText(StringRef("\t.arch ev6"));
- OutStreamer.EmitRawText(StringRef("\t.set noat"));
-}
-
-/// PrintAsmOperand - Print out an operand for an inline asm expression.
-///
-bool AlphaAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &O) {
- printOperand(MI, OpNo, O);
- return false;
-}
-
-bool AlphaAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode,
- raw_ostream &O) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
- O << "0(";
- printOperand(MI, OpNo, O);
- O << ")";
- return false;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeAlphaAsmPrinter() {
- RegisterAsmPrinter<AlphaAsmPrinter> X(TheAlphaTarget);
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaBranchSelector.cpp b/contrib/llvm/lib/Target/Alpha/AlphaBranchSelector.cpp
deleted file mode 100644
index 3768117..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaBranchSelector.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-//===-- AlphaBranchSelector.cpp - Convert Pseudo branchs ----------*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Replace Pseudo COND_BRANCH_* with their appropriate real branch
-// Simplified version of the PPC Branch Selector
-//
-//===----------------------------------------------------------------------===//
-
-#include "Alpha.h"
-#include "AlphaInstrInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/MC/MCAsmInfo.h"
-using namespace llvm;
-
-namespace {
- struct AlphaBSel : public MachineFunctionPass {
- static char ID;
- AlphaBSel() : MachineFunctionPass(ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "Alpha Branch Selection";
- }
- };
- char AlphaBSel::ID = 0;
-}
-
-/// createAlphaBranchSelectionPass - returns an instance of the Branch Selection
-/// Pass
-///
-FunctionPass *llvm::createAlphaBranchSelectionPass() {
- return new AlphaBSel();
-}
-
-bool AlphaBSel::runOnMachineFunction(MachineFunction &Fn) {
-
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock *MBB = MFI;
-
- for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
- MBBI != EE; ++MBBI) {
- if (MBBI->getOpcode() == Alpha::COND_BRANCH_I ||
- MBBI->getOpcode() == Alpha::COND_BRANCH_F) {
-
- // condbranch operands:
- // 0. bc opcode
- // 1. reg
- // 2. target MBB
- const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
- MBBI->setDesc(TII->get(MBBI->getOperand(0).getImm()));
- }
- }
- }
-
- return true;
-}
-
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaCallingConv.td b/contrib/llvm/lib/Target/Alpha/AlphaCallingConv.td
deleted file mode 100644
index bde8819..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaCallingConv.td
+++ /dev/null
@@ -1,38 +0,0 @@
-//===- AlphaCallingConv.td - Calling Conventions for Alpha -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for Alpha architecture.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Alpha Return Value Calling Convention
-//===----------------------------------------------------------------------===//
-def RetCC_Alpha : CallingConv<[
- // i64 is returned in register R0
- // R1 is an llvm extension, I don't know what gcc does
- CCIfType<[i64], CCAssignToReg<[R0,R1]>>,
-
- // f32 / f64 are returned in F0/F1
- CCIfType<[f32, f64], CCAssignToReg<[F0, F1]>>
-]>;
-
-//===----------------------------------------------------------------------===//
-// Alpha Argument Calling Conventions
-//===----------------------------------------------------------------------===//
-def CC_Alpha : CallingConv<[
- // The first 6 arguments are passed in registers, whether integer or
- // floating-point
- CCIfType<[i64], CCAssignToRegWithShadow<[R16, R17, R18, R19, R20, R21],
- [F16, F17, F18, F19, F20, F21]>>,
-
- CCIfType<[f32, f64], CCAssignToRegWithShadow<[F16, F17, F18, F19, F20, F21],
- [R16, R17, R18, R19, R20, R21]>>,
-
- // Stack slots are 8 bytes in size and 8-byte aligned.
- CCIfType<[i64, f32, f64], CCAssignToStack<8, 8>>
-]>;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp
deleted file mode 100644
index 690cd1d..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-//=====- AlphaFrameLowering.cpp - Alpha Frame Information ------*- C++ -*-====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AlphaFrameLowering.h"
-#include "AlphaInstrInfo.h"
-#include "AlphaMachineFunctionInfo.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/ADT/Twine.h"
-
-using namespace llvm;
-
-static long getUpper16(long l) {
- long y = l / Alpha::IMM_MULT;
- if (l % Alpha::IMM_MULT > Alpha::IMM_HIGH)
- ++y;
- return y;
-}
-
-static long getLower16(long l) {
- long h = getUpper16(l);
- return l - h * Alpha::IMM_MULT;
-}
-
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-//
-bool AlphaFrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->hasVarSizedObjects();
-}
-
-void AlphaFrameLowering::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
-
- DebugLoc dl = (MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc());
- bool FP = hasFP(MF);
-
- // Handle GOP offset
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAHg), Alpha::R29)
- .addGlobalAddress(MF.getFunction()).addReg(Alpha::R27).addImm(++curgpdist);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAg), Alpha::R29)
- .addGlobalAddress(MF.getFunction()).addReg(Alpha::R29).addImm(curgpdist);
-
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::ALTENT))
- .addGlobalAddress(MF.getFunction());
-
- // Get the number of bytes to allocate from the FrameInfo
- long NumBytes = MFI->getStackSize();
-
- if (FP)
- NumBytes += 8; //reserve space for the old FP
-
- // Do we need to allocate space on the stack?
- if (NumBytes == 0) return;
-
- unsigned Align = getStackAlignment();
- NumBytes = (NumBytes+Align-1)/Align*Align;
-
- // Update frame info to pretend that this is part of the stack...
- MFI->setStackSize(NumBytes);
-
- // adjust stack pointer: r30 -= numbytes
- NumBytes = -NumBytes;
- if (NumBytes >= Alpha::IMM_LOW) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
- .addReg(Alpha::R30);
- } else if (getUpper16(NumBytes) >= Alpha::IMM_LOW) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
- .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
- .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
- } else {
- report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
- }
-
- // Now if we need to, save the old FP and set the new
- if (FP) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::STQ))
- .addReg(Alpha::R15).addImm(0).addReg(Alpha::R30);
- // This must be the last instr in the prolog
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R15)
- .addReg(Alpha::R30).addReg(Alpha::R30);
- }
-
-}
-
-void AlphaFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
-
- assert((MBBI->getOpcode() == Alpha::RETDAG ||
- MBBI->getOpcode() == Alpha::RETDAGp)
- && "Can only insert epilog into returning blocks");
- DebugLoc dl = MBBI->getDebugLoc();
-
- bool FP = hasFP(MF);
-
- // Get the number of bytes allocated from the FrameInfo...
- long NumBytes = MFI->getStackSize();
-
- //now if we need to, restore the old FP
- if (FP) {
- //copy the FP into the SP (discards allocas)
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::BISr), Alpha::R30).addReg(Alpha::R15)
- .addReg(Alpha::R15);
- //restore the FP
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDQ), Alpha::R15)
- .addImm(0).addReg(Alpha::R15);
- }
-
- if (NumBytes != 0) {
- if (NumBytes <= Alpha::IMM_HIGH) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30).addImm(NumBytes)
- .addReg(Alpha::R30);
- } else if (getUpper16(NumBytes) <= Alpha::IMM_HIGH) {
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDAH), Alpha::R30)
- .addImm(getUpper16(NumBytes)).addReg(Alpha::R30);
- BuildMI(MBB, MBBI, dl, TII.get(Alpha::LDA), Alpha::R30)
- .addImm(getLower16(NumBytes)).addReg(Alpha::R30);
- } else {
- report_fatal_error("Too big a stack frame at " + Twine(NumBytes));
- }
- }
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h b/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h
deleted file mode 100644
index ebd9e1b..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaFrameLowering.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//==-- AlphaFrameLowering.h - Define frame lowering for Alpha --*- C++ -*---==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHA_FRAMEINFO_H
-#define ALPHA_FRAMEINFO_H
-
-#include "Alpha.h"
-#include "AlphaSubtarget.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
- class AlphaSubtarget;
-
-class AlphaFrameLowering : public TargetFrameLowering {
- const AlphaSubtarget &STI;
- // FIXME: This should end in MachineFunctionInfo, not here!
- mutable int curgpdist;
-public:
- explicit AlphaFrameLowering(const AlphaSubtarget &sti)
- : TargetFrameLowering(StackGrowsDown, 16, 0), STI(sti), curgpdist(0) {
- }
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- bool hasFP(const MachineFunction &MF) const;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
deleted file mode 100644
index f877c65..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
+++ /dev/null
@@ -1,425 +0,0 @@
-//===-- AlphaISelDAGToDAG.cpp - Alpha pattern matching inst selector ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a pattern matching instruction selector for Alpha,
-// converting from a legalized dag to a Alpha dag.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Alpha.h"
-#include "AlphaTargetMachine.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-using namespace llvm;
-
-namespace {
-
- //===--------------------------------------------------------------------===//
- /// AlphaDAGToDAGISel - Alpha specific code to select Alpha machine
- /// instructions for SelectionDAG operations.
- class AlphaDAGToDAGISel : public SelectionDAGISel {
- static const int64_t IMM_LOW = -32768;
- static const int64_t IMM_HIGH = 32767;
- static const int64_t IMM_MULT = 65536;
- static const int64_t IMM_FULLHIGH = IMM_HIGH + IMM_HIGH * IMM_MULT;
- static const int64_t IMM_FULLLOW = IMM_LOW + IMM_LOW * IMM_MULT;
-
- static int64_t get_ldah16(int64_t x) {
- int64_t y = x / IMM_MULT;
- if (x % IMM_MULT > IMM_HIGH)
- ++y;
- return y;
- }
-
- static int64_t get_lda16(int64_t x) {
- return x - get_ldah16(x) * IMM_MULT;
- }
-
- /// get_zapImm - Return a zap mask if X is a valid immediate for a zapnot
- /// instruction (if not, return 0). Note that this code accepts partial
- /// zap masks. For example (and LHS, 1) is a valid zap, as long we know
- /// that the bits 1-7 of LHS are already zero. If LHS is non-null, we are
- /// in checking mode. If LHS is null, we assume that the mask has already
- /// been validated before.
- uint64_t get_zapImm(SDValue LHS, uint64_t Constant) const {
- uint64_t BitsToCheck = 0;
- unsigned Result = 0;
- for (unsigned i = 0; i != 8; ++i) {
- if (((Constant >> 8*i) & 0xFF) == 0) {
- // nothing to do.
- } else {
- Result |= 1 << i;
- if (((Constant >> 8*i) & 0xFF) == 0xFF) {
- // If the entire byte is set, zapnot the byte.
- } else if (LHS.getNode() == 0) {
- // Otherwise, if the mask was previously validated, we know its okay
- // to zapnot this entire byte even though all the bits aren't set.
- } else {
- // Otherwise we don't know that the it's okay to zapnot this entire
- // byte. Only do this iff we can prove that the missing bits are
- // already null, so the bytezap doesn't need to really null them.
- BitsToCheck |= ~Constant & (0xFFULL << 8*i);
- }
- }
- }
-
- // If there are missing bits in a byte (for example, X & 0xEF00), check to
- // see if the missing bits (0x1000) are already known zero if not, the zap
- // isn't okay to do, as it won't clear all the required bits.
- if (BitsToCheck &&
- !CurDAG->MaskedValueIsZero(LHS,
- APInt(LHS.getValueSizeInBits(),
- BitsToCheck)))
- return 0;
-
- return Result;
- }
-
- static uint64_t get_zapImm(uint64_t x) {
- unsigned build = 0;
- for(int i = 0; i != 8; ++i) {
- if ((x & 0x00FF) == 0x00FF)
- build |= 1 << i;
- else if ((x & 0x00FF) != 0)
- return 0;
- x >>= 8;
- }
- return build;
- }
-
-
- static uint64_t getNearPower2(uint64_t x) {
- if (!x) return 0;
- unsigned at = CountLeadingZeros_64(x);
- uint64_t complow = 1ULL << (63 - at);
- uint64_t comphigh = complow << 1;
- if (x - complow <= comphigh - x)
- return complow;
- else
- return comphigh;
- }
-
- static bool chkRemNearPower2(uint64_t x, uint64_t r, bool swap) {
- uint64_t y = getNearPower2(x);
- if (swap)
- return (y - x) == r;
- else
- return (x - y) == r;
- }
-
- public:
- explicit AlphaDAGToDAGISel(AlphaTargetMachine &TM)
- : SelectionDAGISel(TM)
- {}
-
- /// getI64Imm - Return a target constant with the specified value, of type
- /// i64.
- inline SDValue getI64Imm(int64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i64);
- }
-
- // Select - Convert the specified operand from a target-independent to a
- // target-specific node if it hasn't already been changed.
- SDNode *Select(SDNode *N);
-
- virtual const char *getPassName() const {
- return "Alpha DAG->DAG Pattern Instruction Selection";
- }
-
- /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
- /// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps) {
- SDValue Op0;
- switch (ConstraintCode) {
- default: return true;
- case 'm': // memory
- Op0 = Op;
- break;
- }
-
- OutOps.push_back(Op0);
- return false;
- }
-
-// Include the pieces autogenerated from the target description.
-#include "AlphaGenDAGISel.inc"
-
-private:
- /// getTargetMachine - Return a reference to the TargetMachine, casted
- /// to the target-specific type.
- const AlphaTargetMachine &getTargetMachine() {
- return static_cast<const AlphaTargetMachine &>(TM);
- }
-
- /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
- /// to the target-specific type.
- const AlphaInstrInfo *getInstrInfo() {
- return getTargetMachine().getInstrInfo();
- }
-
- SDNode *getGlobalBaseReg();
- SDNode *getGlobalRetAddr();
- void SelectCALL(SDNode *Op);
-
- };
-}
-
-/// getGlobalBaseReg - Output the instructions required to put the
-/// GOT address into a register.
-///
-SDNode *AlphaDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
-}
-
-/// getGlobalRetAddr - Grab the return address.
-///
-SDNode *AlphaDAGToDAGISel::getGlobalRetAddr() {
- unsigned GlobalRetAddr = getInstrInfo()->getGlobalRetAddr(MF);
- return CurDAG->getRegister(GlobalRetAddr, TLI.getPointerTy()).getNode();
-}
-
-// Select - Convert the specified operand from a target-independent to a
-// target-specific node if it hasn't already been changed.
-SDNode *AlphaDAGToDAGISel::Select(SDNode *N) {
- if (N->isMachineOpcode())
- return NULL; // Already selected.
- DebugLoc dl = N->getDebugLoc();
-
- switch (N->getOpcode()) {
- default: break;
- case AlphaISD::CALL:
- SelectCALL(N);
- return NULL;
-
- case ISD::FrameIndex: {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- return CurDAG->SelectNodeTo(N, Alpha::LDA, MVT::i64,
- CurDAG->getTargetFrameIndex(FI, MVT::i32),
- getI64Imm(0));
- }
- case ISD::GLOBAL_OFFSET_TABLE:
- return getGlobalBaseReg();
- case AlphaISD::GlobalRetAddr:
- return getGlobalRetAddr();
-
- case AlphaISD::DivCall: {
- SDValue Chain = CurDAG->getEntryNode();
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue N2 = N->getOperand(2);
- Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R24, N1,
- SDValue(0,0));
- Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R25, N2,
- Chain.getValue(1));
- Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, N0,
- Chain.getValue(1));
- SDNode *CNode =
- CurDAG->getMachineNode(Alpha::JSRs, dl, MVT::Other, MVT::Glue,
- Chain, Chain.getValue(1));
- Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, MVT::i64,
- SDValue(CNode, 1));
- return CurDAG->SelectNodeTo(N, Alpha::BISr, MVT::i64, Chain, Chain);
- }
-
- case ISD::READCYCLECOUNTER: {
- SDValue Chain = N->getOperand(0);
- return CurDAG->getMachineNode(Alpha::RPCC, dl, MVT::i64, MVT::Other,
- Chain);
- }
-
- case ISD::Constant: {
- uint64_t uval = cast<ConstantSDNode>(N)->getZExtValue();
-
- if (uval == 0) {
- SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- Alpha::R31, MVT::i64);
- ReplaceUses(SDValue(N, 0), Result);
- return NULL;
- }
-
- int64_t val = (int64_t)uval;
- int32_t val32 = (int32_t)val;
- if (val <= IMM_HIGH + IMM_HIGH * IMM_MULT &&
- val >= IMM_LOW + IMM_LOW * IMM_MULT)
- break; //(LDAH (LDA))
- if ((uval >> 32) == 0 && //empty upper bits
- val32 <= IMM_HIGH + IMM_HIGH * IMM_MULT)
- // val32 >= IMM_LOW + IMM_LOW * IMM_MULT) //always true
- break; //(zext (LDAH (LDA)))
- //Else use the constant pool
- ConstantInt *C = ConstantInt::get(
- Type::getInt64Ty(*CurDAG->getContext()), uval);
- SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64);
- SDNode *Tmp = CurDAG->getMachineNode(Alpha::LDAHr, dl, MVT::i64, CPI,
- SDValue(getGlobalBaseReg(), 0));
- return CurDAG->SelectNodeTo(N, Alpha::LDQr, MVT::i64, MVT::Other,
- CPI, SDValue(Tmp, 0), CurDAG->getEntryNode());
- }
- case ISD::TargetConstantFP:
- case ISD::ConstantFP: {
- ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N);
- bool isDouble = N->getValueType(0) == MVT::f64;
- EVT T = isDouble ? MVT::f64 : MVT::f32;
- if (CN->getValueAPF().isPosZero()) {
- return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS,
- T, CurDAG->getRegister(Alpha::F31, T),
- CurDAG->getRegister(Alpha::F31, T));
- } else if (CN->getValueAPF().isNegZero()) {
- return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYSNT : Alpha::CPYSNS,
- T, CurDAG->getRegister(Alpha::F31, T),
- CurDAG->getRegister(Alpha::F31, T));
- } else {
- report_fatal_error("Unhandled FP constant type");
- }
- break;
- }
-
- case ISD::SETCC:
- if (N->getOperand(0).getNode()->getValueType(0).isFloatingPoint()) {
- ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
-
- unsigned Opc = Alpha::WTF;
- bool rev = false;
- bool inv = false;
- switch(CC) {
- default: DEBUG(N->dump(CurDAG)); llvm_unreachable("Unknown FP comparison!");
- case ISD::SETEQ: case ISD::SETOEQ: case ISD::SETUEQ:
- Opc = Alpha::CMPTEQ; break;
- case ISD::SETLT: case ISD::SETOLT: case ISD::SETULT:
- Opc = Alpha::CMPTLT; break;
- case ISD::SETLE: case ISD::SETOLE: case ISD::SETULE:
- Opc = Alpha::CMPTLE; break;
- case ISD::SETGT: case ISD::SETOGT: case ISD::SETUGT:
- Opc = Alpha::CMPTLT; rev = true; break;
- case ISD::SETGE: case ISD::SETOGE: case ISD::SETUGE:
- Opc = Alpha::CMPTLE; rev = true; break;
- case ISD::SETNE: case ISD::SETONE: case ISD::SETUNE:
- Opc = Alpha::CMPTEQ; inv = true; break;
- case ISD::SETO:
- Opc = Alpha::CMPTUN; inv = true; break;
- case ISD::SETUO:
- Opc = Alpha::CMPTUN; break;
- };
- SDValue tmp1 = N->getOperand(rev?1:0);
- SDValue tmp2 = N->getOperand(rev?0:1);
- SDNode *cmp = CurDAG->getMachineNode(Opc, dl, MVT::f64, tmp1, tmp2);
- if (inv)
- cmp = CurDAG->getMachineNode(Alpha::CMPTEQ, dl,
- MVT::f64, SDValue(cmp, 0),
- CurDAG->getRegister(Alpha::F31, MVT::f64));
- switch(CC) {
- case ISD::SETUEQ: case ISD::SETULT: case ISD::SETULE:
- case ISD::SETUNE: case ISD::SETUGT: case ISD::SETUGE:
- {
- SDNode* cmp2 = CurDAG->getMachineNode(Alpha::CMPTUN, dl, MVT::f64,
- tmp1, tmp2);
- cmp = CurDAG->getMachineNode(Alpha::ADDT, dl, MVT::f64,
- SDValue(cmp2, 0), SDValue(cmp, 0));
- break;
- }
- default: break;
- }
-
- SDNode* LD = CurDAG->getMachineNode(Alpha::FTOIT, dl,
- MVT::i64, SDValue(cmp, 0));
- return CurDAG->getMachineNode(Alpha::CMPULT, dl, MVT::i64,
- CurDAG->getRegister(Alpha::R31, MVT::i64),
- SDValue(LD,0));
- }
- break;
-
- case ISD::AND: {
- ConstantSDNode* SC = NULL;
- ConstantSDNode* MC = NULL;
- if (N->getOperand(0).getOpcode() == ISD::SRL &&
- (MC = dyn_cast<ConstantSDNode>(N->getOperand(1))) &&
- (SC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1)))) {
- uint64_t sval = SC->getZExtValue();
- uint64_t mval = MC->getZExtValue();
- // If the result is a zap, let the autogened stuff handle it.
- if (get_zapImm(N->getOperand(0), mval))
- break;
- // given mask X, and shift S, we want to see if there is any zap in the
- // mask if we play around with the botton S bits
- uint64_t dontcare = (~0ULL) >> (64 - sval);
- uint64_t mask = mval << sval;
-
- if (get_zapImm(mask | dontcare))
- mask = mask | dontcare;
-
- if (get_zapImm(mask)) {
- SDValue Z =
- SDValue(CurDAG->getMachineNode(Alpha::ZAPNOTi, dl, MVT::i64,
- N->getOperand(0).getOperand(0),
- getI64Imm(get_zapImm(mask))), 0);
- return CurDAG->getMachineNode(Alpha::SRLr, dl, MVT::i64, Z,
- getI64Imm(sval));
- }
- }
- break;
- }
-
- }
-
- return SelectCode(N);
-}
-
-void AlphaDAGToDAGISel::SelectCALL(SDNode *N) {
- //TODO: add flag stuff to prevent nondeturministic breakage!
-
- SDValue Chain = N->getOperand(0);
- SDValue Addr = N->getOperand(1);
- SDValue InFlag = N->getOperand(N->getNumOperands() - 1);
- DebugLoc dl = N->getDebugLoc();
-
- if (Addr.getOpcode() == AlphaISD::GPRelLo) {
- SDValue GOT = SDValue(getGlobalBaseReg(), 0);
- Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R29, GOT, InFlag);
- InFlag = Chain.getValue(1);
- Chain = SDValue(CurDAG->getMachineNode(Alpha::BSR, dl, MVT::Other,
- MVT::Glue, Addr.getOperand(0),
- Chain, InFlag), 0);
- } else {
- Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, Addr, InFlag);
- InFlag = Chain.getValue(1);
- Chain = SDValue(CurDAG->getMachineNode(Alpha::JSR, dl, MVT::Other,
- MVT::Glue, Chain, InFlag), 0);
- }
- InFlag = Chain.getValue(1);
-
- ReplaceUses(SDValue(N, 0), Chain);
- ReplaceUses(SDValue(N, 1), InFlag);
-}
-
-
-/// createAlphaISelDag - This pass converts a legalized DAG into a
-/// Alpha-specific DAG, ready for instruction scheduling.
-///
-FunctionPass *llvm::createAlphaISelDag(AlphaTargetMachine &TM) {
- return new AlphaDAGToDAGISel(TM);
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
deleted file mode 100644
index 3057eb8..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
+++ /dev/null
@@ -1,962 +0,0 @@
-//===-- AlphaISelLowering.cpp - Alpha DAG Lowering Implementation ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AlphaISelLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AlphaISelLowering.h"
-#include "AlphaTargetMachine.h"
-#include "AlphaMachineFunctionInfo.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/Module.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Type.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-/// AddLiveIn - This helper function adds the specified physical register to the
-/// MachineFunction as a live in value. It also creates a corresponding virtual
-/// register for it.
-static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
- TargetRegisterClass *RC) {
- assert(RC->contains(PReg) && "Not the correct regclass!");
- unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
- MF.getRegInfo().addLiveIn(PReg, VReg);
- return VReg;
-}
-
-AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
- // Set up the TargetLowering object.
- //I am having problems with shr n i8 1
- setBooleanContents(ZeroOrOneBooleanContent);
- setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
-
- addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);
- addRegisterClass(MVT::f64, Alpha::F8RCRegisterClass);
- addRegisterClass(MVT::f32, Alpha::F4RCRegisterClass);
-
- // We want to custom lower some of our intrinsics.
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
-
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // setOperationAction(ISD::BRIND, MVT::Other, Expand);
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::Other, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- setOperationAction(ISD::FREM, MVT::f32, Expand);
- setOperationAction(ISD::FREM, MVT::f64, Expand);
-
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
- setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
- setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
-
- if (!TM.getSubtarget<AlphaSubtarget>().hasCT()) {
- setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
- setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
- setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
- }
- setOperationAction(ISD::BSWAP , MVT::i64, Expand);
- setOperationAction(ISD::ROTL , MVT::i64, Expand);
- setOperationAction(ISD::ROTR , MVT::i64, Expand);
-
- setOperationAction(ISD::SREM , MVT::i64, Custom);
- setOperationAction(ISD::UREM , MVT::i64, Custom);
- setOperationAction(ISD::SDIV , MVT::i64, Custom);
- setOperationAction(ISD::UDIV , MVT::i64, Custom);
-
- setOperationAction(ISD::ADDC , MVT::i64, Expand);
- setOperationAction(ISD::ADDE , MVT::i64, Expand);
- setOperationAction(ISD::SUBC , MVT::i64, Expand);
- setOperationAction(ISD::SUBE , MVT::i64, Expand);
-
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
-
- setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
- setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
-
- // We don't support sin/cos/sqrt/pow
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
-
- setOperationAction(ISD::FSQRT, MVT::f64, Expand);
- setOperationAction(ISD::FSQRT, MVT::f32, Expand);
-
- setOperationAction(ISD::FPOW , MVT::f32, Expand);
- setOperationAction(ISD::FPOW , MVT::f64, Expand);
-
- setOperationAction(ISD::FMA, MVT::f64, Expand);
- setOperationAction(ISD::FMA, MVT::f32, Expand);
-
- setOperationAction(ISD::SETCC, MVT::f32, Promote);
-
- setOperationAction(ISD::BITCAST, MVT::f32, Promote);
-
- setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
-
- // Not implemented yet.
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
-
- // We want to legalize GlobalAddress and ConstantPool and
- // ExternalSymbols nodes into the appropriate instructions to
- // materialize the address.
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
- setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
- setOperationAction(ISD::ExternalSymbol, MVT::i64, Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
-
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setOperationAction(ISD::VACOPY, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::i32, Custom);
-
- setOperationAction(ISD::JumpTable, MVT::i64, Custom);
- setOperationAction(ISD::JumpTable, MVT::i32, Custom);
-
- setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
- setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
-
- setStackPointerRegisterToSaveRestore(Alpha::R30);
-
- setJumpBufSize(272);
- setJumpBufAlignment(16);
-
- setMinFunctionAlignment(4);
-
- setInsertFencesForAtomic(true);
-
- computeRegisterProperties();
-}
-
-EVT AlphaTargetLowering::getSetCCResultType(EVT VT) const {
- return MVT::i64;
-}
-
-const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return 0;
- case AlphaISD::CVTQT_: return "Alpha::CVTQT_";
- case AlphaISD::CVTQS_: return "Alpha::CVTQS_";
- case AlphaISD::CVTTQ_: return "Alpha::CVTTQ_";
- case AlphaISD::GPRelHi: return "Alpha::GPRelHi";
- case AlphaISD::GPRelLo: return "Alpha::GPRelLo";
- case AlphaISD::RelLit: return "Alpha::RelLit";
- case AlphaISD::GlobalRetAddr: return "Alpha::GlobalRetAddr";
- case AlphaISD::CALL: return "Alpha::CALL";
- case AlphaISD::DivCall: return "Alpha::DivCall";
- case AlphaISD::RET_FLAG: return "Alpha::RET_FLAG";
- case AlphaISD::COND_BRANCH_I: return "Alpha::COND_BRANCH_I";
- case AlphaISD::COND_BRANCH_F: return "Alpha::COND_BRANCH_F";
- }
-}
-
-static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
- EVT PtrVT = Op.getValueType();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
- // FIXME there isn't really any debug info here
- DebugLoc dl = Op.getDebugLoc();
-
- SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, JTI,
- DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
- SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, JTI, Hi);
- return Lo;
-}
-
-//http://www.cs.arizona.edu/computer.help/policy/DIGITAL_unix/
-//AA-PY8AC-TET1_html/callCH3.html#BLOCK21
-
-//For now, just use variable size stack frame format
-
-//In a standard call, the first six items are passed in registers $16
-//- $21 and/or registers $f16 - $f21. (See Section 4.1.2 for details
-//of argument-to-register correspondence.) The remaining items are
-//collected in a memory argument list that is a naturally aligned
-//array of quadwords. In a standard call, this list, if present, must
-//be passed at 0(SP).
-//7 ... n 0(SP) ... (n-7)*8(SP)
-
-// //#define FP $15
-// //#define RA $26
-// //#define PV $27
-// //#define GP $29
-// //#define SP $30
-
-#include "AlphaGenCallingConv.inc"
-
-SDValue
-AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // Alpha target does not yet support tail call optimization.
- isTailCall = false;
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallOperands(Outs, CC_Alpha);
-
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
-
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
- getPointerTy(), true));
-
- SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
- SmallVector<SDValue, 12> MemOpChains;
- SDValue StackPtr;
-
- // Walk the register/memloc assignments, inserting copies/loads.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
-
- SDValue Arg = OutVals[i];
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default: assert(0 && "Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- }
-
- // Arguments that can be passed on register must be kept at RegsToPass
- // vector
- if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
- assert(VA.isMemLoc());
-
- if (StackPtr.getNode() == 0)
- StackPtr = DAG.getCopyFromReg(Chain, dl, Alpha::R30, MVT::i64);
-
- SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
- StackPtr,
- DAG.getIntPtrConstant(VA.getLocMemOffset()));
-
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),false, false, 0));
- }
- }
-
- // Transform all store nodes into one single node because all store nodes are
- // independent of each other.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
-
- // Build a sequence of copy-to-reg nodes chained together with token chain and
- // flag operands which copy the outgoing args into registers. The InFlag in
- // necessary since all emitted instructions must be stuck together.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- // Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
-
- // Add argument registers to the end of the list so that they are
- // known live into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
-
- if (InFlag.getNode())
- Ops.push_back(InFlag);
-
- Chain = DAG.getNode(AlphaISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
-
- // Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy(), true),
- DAG.getConstant(0, getPointerTy(), true),
- InFlag);
- InFlag = Chain.getValue(1);
-
- // Handle result values, copying them out of physregs into vregs that we
- // return.
- return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
- Ins, dl, DAG, InVals);
-}
-
-/// LowerCallResult - Lower the result values of a call into the
-/// appropriate copies out of appropriate physical registers.
-///
-SDValue
-AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallResult(Ins, RetCC_Alpha);
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
-
- Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
- VA.getLocVT(), InFlag).getValue(1);
- SDValue RetValue = Chain.getValue(0);
- InFlag = Chain.getValue(2);
-
- // If this is an 8/16/32-bit value, it is really passed promoted to 64
- // bits. Insert an assert[sz]ext to capture this, then truncate to the
- // right size.
- if (VA.getLocInfo() == CCValAssign::SExt)
- RetValue = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), RetValue,
- DAG.getValueType(VA.getValVT()));
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- RetValue = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), RetValue,
- DAG.getValueType(VA.getValVT()));
-
- if (VA.getLocInfo() != CCValAssign::Full)
- RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue);
-
- InVals.push_back(RetValue);
- }
-
- return Chain;
-}
-
-SDValue
-AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- AlphaMachineFunctionInfo *FuncInfo = MF.getInfo<AlphaMachineFunctionInfo>();
-
- unsigned args_int[] = {
- Alpha::R16, Alpha::R17, Alpha::R18, Alpha::R19, Alpha::R20, Alpha::R21};
- unsigned args_float[] = {
- Alpha::F16, Alpha::F17, Alpha::F18, Alpha::F19, Alpha::F20, Alpha::F21};
-
- for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
- SDValue argt;
- EVT ObjectVT = Ins[ArgNo].VT;
- SDValue ArgVal;
-
- if (ArgNo < 6) {
- switch (ObjectVT.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Invalid value type!");
- case MVT::f64:
- args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
- &Alpha::F8RCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
- break;
- case MVT::f32:
- args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
- &Alpha::F4RCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
- break;
- case MVT::i64:
- args_int[ArgNo] = AddLiveIn(MF, args_int[ArgNo],
- &Alpha::GPRCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], MVT::i64);
- break;
- }
- } else { //more args
- // Create the frame index object for this incoming parameter...
- int FI = MFI->CreateFixedObject(8, 8 * (ArgNo - 6), true);
-
- // Create the SelectionDAG nodes corresponding to a load
- //from this parameter
- SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
- false, false, 0);
- }
- InVals.push_back(ArgVal);
- }
-
- // If the functions takes variable number of arguments, copy all regs to stack
- if (isVarArg) {
- FuncInfo->setVarArgsOffset(Ins.size() * 8);
- std::vector<SDValue> LS;
- for (int i = 0; i < 6; ++i) {
- if (TargetRegisterInfo::isPhysicalRegister(args_int[i]))
- args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass);
- SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], MVT::i64);
- int FI = MFI->CreateFixedObject(8, -8 * (6 - i), true);
- if (i == 0) FuncInfo->setVarArgsBase(FI);
- SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
- LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, MachinePointerInfo(),
- false, false, 0));
-
- if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
- args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass);
- argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
- FI = MFI->CreateFixedObject(8, - 8 * (12 - i), true);
- SDFI = DAG.getFrameIndex(FI, MVT::i64);
- LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, MachinePointerInfo(),
- false, false, 0));
- }
-
- //Set up a token factor with all the stack traffic
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LS[0], LS.size());
- }
-
- return Chain;
-}
-
-SDValue
-AlphaTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
-
- SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26,
- DAG.getNode(AlphaISD::GlobalRetAddr,
- DebugLoc(), MVT::i64),
- SDValue());
- switch (Outs.size()) {
- default:
- llvm_unreachable("Do not know how to return this many arguments!");
- case 0:
- break;
- //return SDValue(); // ret void is legal
- case 1: {
- EVT ArgVT = Outs[0].VT;
- unsigned ArgReg;
- if (ArgVT.isInteger())
- ArgReg = Alpha::R0;
- else {
- assert(ArgVT.isFloatingPoint());
- ArgReg = Alpha::F0;
- }
- Copy = DAG.getCopyToReg(Copy, dl, ArgReg,
- OutVals[0], Copy.getValue(1));
- if (DAG.getMachineFunction().getRegInfo().liveout_empty())
- DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg);
- break;
- }
- case 2: {
- EVT ArgVT = Outs[0].VT;
- unsigned ArgReg1, ArgReg2;
- if (ArgVT.isInteger()) {
- ArgReg1 = Alpha::R0;
- ArgReg2 = Alpha::R1;
- } else {
- assert(ArgVT.isFloatingPoint());
- ArgReg1 = Alpha::F0;
- ArgReg2 = Alpha::F1;
- }
- Copy = DAG.getCopyToReg(Copy, dl, ArgReg1,
- OutVals[0], Copy.getValue(1));
- if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
- DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg1)
- == DAG.getMachineFunction().getRegInfo().liveout_end())
- DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg1);
- Copy = DAG.getCopyToReg(Copy, dl, ArgReg2,
- OutVals[1], Copy.getValue(1));
- if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
- DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg2)
- == DAG.getMachineFunction().getRegInfo().liveout_end())
- DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg2);
- break;
- }
- }
- return DAG.getNode(AlphaISD::RET_FLAG, dl,
- MVT::Other, Copy, Copy.getValue(1));
-}
-
-void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
- SDValue &DataPtr,
- SelectionDAG &DAG) const {
- Chain = N->getOperand(0);
- SDValue VAListP = N->getOperand(1);
- const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
- DebugLoc dl = N->getDebugLoc();
-
- SDValue Base = DAG.getLoad(MVT::i64, dl, Chain, VAListP,
- MachinePointerInfo(VAListS),
- false, false, 0);
- SDValue Tmp = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
- DAG.getConstant(8, MVT::i64));
- SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Base.getValue(1),
- Tmp, MachinePointerInfo(),
- MVT::i32, false, false, 0);
- DataPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Base, Offset);
- if (N->getValueType(0).isFloatingPoint())
- {
- //if fp && Offset < 6*8, then subtract 6*8 from DataPtr
- SDValue FPDataPtr = DAG.getNode(ISD::SUB, dl, MVT::i64, DataPtr,
- DAG.getConstant(8*6, MVT::i64));
- SDValue CC = DAG.getSetCC(dl, MVT::i64, Offset,
- DAG.getConstant(8*6, MVT::i64), ISD::SETLT);
- DataPtr = DAG.getNode(ISD::SELECT, dl, MVT::i64, CC, FPDataPtr, DataPtr);
- }
-
- SDValue NewOffset = DAG.getNode(ISD::ADD, dl, MVT::i64, Offset,
- DAG.getConstant(8, MVT::i64));
- Chain = DAG.getTruncStore(Offset.getValue(1), dl, NewOffset, Tmp,
- MachinePointerInfo(),
- MVT::i32, false, false, 0);
-}
-
-/// LowerOperation - Provide custom lowering hooks for some operations.
-///
-SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
- switch (Op.getOpcode()) {
- default: llvm_unreachable("Wasn't expecting to be able to lower this!");
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
-
- case ISD::INTRINSIC_WO_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- switch (IntNo) {
- default: break; // Don't custom lower most intrinsics.
- case Intrinsic::alpha_umulh:
- return DAG.getNode(ISD::MULHU, dl, MVT::i64,
- Op.getOperand(1), Op.getOperand(2));
- }
- }
-
- case ISD::SRL_PARTS: {
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- SDValue bm = DAG.getNode(ISD::SUB, dl, MVT::i64,
- DAG.getConstant(64, MVT::i64), ShAmt);
- SDValue BMCC = DAG.getSetCC(dl, MVT::i64, bm,
- DAG.getConstant(0, MVT::i64), ISD::SETLE);
- // if 64 - shAmt <= 0
- SDValue Hi_Neg = DAG.getConstant(0, MVT::i64);
- SDValue ShAmt_Neg = DAG.getNode(ISD::SUB, dl, MVT::i64,
- DAG.getConstant(0, MVT::i64), bm);
- SDValue Lo_Neg = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt_Neg);
- // else
- SDValue carries = DAG.getNode(ISD::SHL, dl, MVT::i64, ShOpHi, bm);
- SDValue Hi_Pos = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt);
- SDValue Lo_Pos = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpLo, ShAmt);
- Lo_Pos = DAG.getNode(ISD::OR, dl, MVT::i64, Lo_Pos, carries);
- // Merge
- SDValue Hi = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Hi_Neg, Hi_Pos);
- SDValue Lo = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Lo_Neg, Lo_Pos);
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
- }
- // case ISD::SRA_PARTS:
-
- // case ISD::SHL_PARTS:
-
-
- case ISD::SINT_TO_FP: {
- assert(Op.getOperand(0).getValueType() == MVT::i64 &&
- "Unhandled SINT_TO_FP type in custom expander!");
- SDValue LD;
- bool isDouble = Op.getValueType() == MVT::f64;
- LD = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
- SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
- isDouble?MVT::f64:MVT::f32, LD);
- return FP;
- }
- case ISD::FP_TO_SINT: {
- bool isDouble = Op.getOperand(0).getValueType() == MVT::f64;
- SDValue src = Op.getOperand(0);
-
- if (!isDouble) //Promote
- src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, src);
-
- src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
-
- return DAG.getNode(ISD::BITCAST, dl, MVT::i64, src);
- }
- case ISD::ConstantPool: {
- ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- const Constant *C = CP->getConstVal();
- SDValue CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment());
- // FIXME there isn't really any debug info here
-
- SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, CPI,
- DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
- SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, CPI, Hi);
- return Lo;
- }
- case ISD::GlobalTLSAddress:
- llvm_unreachable("TLS not implemented for Alpha.");
- case ISD::GlobalAddress: {
- GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
- GSDN->getOffset());
- // FIXME there isn't really any debug info here
-
- // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
- // && !GV->hasLinkOnceLinkage()) {
- if (GV->hasLocalLinkage()) {
- SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
- DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
- SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, GA, Hi);
- return Lo;
- } else
- return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64, GA,
- DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
- }
- case ISD::ExternalSymbol: {
- return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64,
- DAG.getTargetExternalSymbol(cast<ExternalSymbolSDNode>(Op)
- ->getSymbol(), MVT::i64),
- DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
- }
-
- case ISD::UREM:
- case ISD::SREM:
- //Expand only on constant case
- if (Op.getOperand(1).getOpcode() == ISD::Constant) {
- EVT VT = Op.getNode()->getValueType(0);
- SDValue Tmp1 = Op.getNode()->getOpcode() == ISD::UREM ?
- BuildUDIV(Op.getNode(), DAG, NULL) :
- BuildSDIV(Op.getNode(), DAG, NULL);
- Tmp1 = DAG.getNode(ISD::MUL, dl, VT, Tmp1, Op.getOperand(1));
- Tmp1 = DAG.getNode(ISD::SUB, dl, VT, Op.getOperand(0), Tmp1);
- return Tmp1;
- }
- //fall through
- case ISD::SDIV:
- case ISD::UDIV:
- if (Op.getValueType().isInteger()) {
- if (Op.getOperand(1).getOpcode() == ISD::Constant)
- return Op.getOpcode() == ISD::SDIV ? BuildSDIV(Op.getNode(), DAG, NULL)
- : BuildUDIV(Op.getNode(), DAG, NULL);
- const char* opstr = 0;
- switch (Op.getOpcode()) {
- case ISD::UREM: opstr = "__remqu"; break;
- case ISD::SREM: opstr = "__remq"; break;
- case ISD::UDIV: opstr = "__divqu"; break;
- case ISD::SDIV: opstr = "__divq"; break;
- }
- SDValue Tmp1 = Op.getOperand(0),
- Tmp2 = Op.getOperand(1),
- Addr = DAG.getExternalSymbol(opstr, MVT::i64);
- return DAG.getNode(AlphaISD::DivCall, dl, MVT::i64, Addr, Tmp1, Tmp2);
- }
- break;
-
- case ISD::VAARG: {
- SDValue Chain, DataPtr;
- LowerVAARG(Op.getNode(), Chain, DataPtr, DAG);
-
- SDValue Result;
- if (Op.getValueType() == MVT::i32)
- Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Chain, DataPtr,
- MachinePointerInfo(), MVT::i32, false, false, 0);
- else
- Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr,
- MachinePointerInfo(),
- false, false, 0);
- return Result;
- }
- case ISD::VACOPY: {
- SDValue Chain = Op.getOperand(0);
- SDValue DestP = Op.getOperand(1);
- SDValue SrcP = Op.getOperand(2);
- const Value *DestS = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
- const Value *SrcS = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
-
- SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP,
- MachinePointerInfo(SrcS),
- false, false, 0);
- SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
- MachinePointerInfo(DestS),
- false, false, 0);
- SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
- DAG.getConstant(8, MVT::i64));
- Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Result,
- NP, MachinePointerInfo(), MVT::i32, false, false, 0);
- SDValue NPD = DAG.getNode(ISD::ADD, dl, MVT::i64, DestP,
- DAG.getConstant(8, MVT::i64));
- return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD,
- MachinePointerInfo(), MVT::i32,
- false, false, 0);
- }
- case ISD::VASTART: {
- MachineFunction &MF = DAG.getMachineFunction();
- AlphaMachineFunctionInfo *FuncInfo = MF.getInfo<AlphaMachineFunctionInfo>();
-
- SDValue Chain = Op.getOperand(0);
- SDValue VAListP = Op.getOperand(1);
- const Value *VAListS = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
-
- // vastart stores the address of the VarArgsBase and VarArgsOffset
- SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsBase(), MVT::i64);
- SDValue S1 = DAG.getStore(Chain, dl, FR, VAListP,
- MachinePointerInfo(VAListS), false, false, 0);
- SDValue SA2 = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
- DAG.getConstant(8, MVT::i64));
- return DAG.getTruncStore(S1, dl,
- DAG.getConstant(FuncInfo->getVarArgsOffset(),
- MVT::i64),
- SA2, MachinePointerInfo(),
- MVT::i32, false, false, 0);
- }
- case ISD::RETURNADDR:
- return DAG.getNode(AlphaISD::GlobalRetAddr, DebugLoc(), MVT::i64);
- //FIXME: implement
- case ISD::FRAMEADDR: break;
- }
-
- return SDValue();
-}
-
-void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const {
- DebugLoc dl = N->getDebugLoc();
- assert(N->getValueType(0) == MVT::i32 &&
- N->getOpcode() == ISD::VAARG &&
- "Unknown node to custom promote!");
-
- SDValue Chain, DataPtr;
- LowerVAARG(N, Chain, DataPtr, DAG);
- SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
- MachinePointerInfo(),
- false, false, 0);
- Results.push_back(Res);
- Results.push_back(SDValue(Res.getNode(), 1));
-}
-
-
-//Inline Asm
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-AlphaTargetLowering::ConstraintType
-AlphaTargetLowering::getConstraintType(const std::string &Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default: break;
- case 'f':
- case 'r':
- return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-/// Examine constraint type and operand type and determine a weight value.
-/// This object must already have been set up with the operand type
-/// and the current alternative constraint selected.
-TargetLowering::ConstraintWeight
-AlphaTargetLowering::getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const {
- ConstraintWeight weight = CW_Invalid;
- Value *CallOperandVal = info.CallOperandVal;
- // If we don't have a value, we can't do a match,
- // but allow it at the lowest weight.
- if (CallOperandVal == NULL)
- return CW_Default;
- // Look at the constraint type.
- switch (*constraint) {
- default:
- weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- break;
- case 'f':
- weight = CW_Register;
- break;
- }
- return weight;
-}
-
-/// Given a register class constraint, like 'r', if this corresponds directly
-/// to an LLVM register class, return a register of 0 and the register class
-/// pointer.
-std::pair<unsigned, const TargetRegisterClass*> AlphaTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
-{
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'r':
- return std::make_pair(0U, Alpha::GPRCRegisterClass);
- case 'f':
- return VT == MVT::f64 ? std::make_pair(0U, Alpha::F8RCRegisterClass) :
- std::make_pair(0U, Alpha::F4RCRegisterClass);
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-//===----------------------------------------------------------------------===//
-// Other Lowering Code
-//===----------------------------------------------------------------------===//
-
-MachineBasicBlock *
-AlphaTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- assert((MI->getOpcode() == Alpha::CAS32 ||
- MI->getOpcode() == Alpha::CAS64 ||
- MI->getOpcode() == Alpha::LAS32 ||
- MI->getOpcode() == Alpha::LAS64 ||
- MI->getOpcode() == Alpha::SWAP32 ||
- MI->getOpcode() == Alpha::SWAP64) &&
- "Unexpected instr type to insert");
-
- bool is32 = MI->getOpcode() == Alpha::CAS32 ||
- MI->getOpcode() == Alpha::LAS32 ||
- MI->getOpcode() == Alpha::SWAP32;
-
- //Load locked store conditional for atomic ops take on the same form
- //start:
- //ll
- //do stuff (maybe branch to exit)
- //sc
- //test sc and maybe branck to start
- //exit:
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- DebugLoc dl = MI->getDebugLoc();
- MachineFunction::iterator It = BB;
- ++It;
-
- MachineBasicBlock *thisMBB = BB;
- MachineFunction *F = BB->getParent();
- MachineBasicBlock *llscMBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
-
- sinkMBB->splice(sinkMBB->begin(), thisMBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- thisMBB->end());
- sinkMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
-
- F->insert(It, llscMBB);
- F->insert(It, sinkMBB);
-
- BuildMI(thisMBB, dl, TII->get(Alpha::BR)).addMBB(llscMBB);
-
- unsigned reg_res = MI->getOperand(0).getReg(),
- reg_ptr = MI->getOperand(1).getReg(),
- reg_v2 = MI->getOperand(2).getReg(),
- reg_store = F->getRegInfo().createVirtualRegister(&Alpha::GPRCRegClass);
-
- BuildMI(llscMBB, dl, TII->get(is32 ? Alpha::LDL_L : Alpha::LDQ_L),
- reg_res).addImm(0).addReg(reg_ptr);
- switch (MI->getOpcode()) {
- case Alpha::CAS32:
- case Alpha::CAS64: {
- unsigned reg_cmp
- = F->getRegInfo().createVirtualRegister(&Alpha::GPRCRegClass);
- BuildMI(llscMBB, dl, TII->get(Alpha::CMPEQ), reg_cmp)
- .addReg(reg_v2).addReg(reg_res);
- BuildMI(llscMBB, dl, TII->get(Alpha::BEQ))
- .addImm(0).addReg(reg_cmp).addMBB(sinkMBB);
- BuildMI(llscMBB, dl, TII->get(Alpha::BISr), reg_store)
- .addReg(Alpha::R31).addReg(MI->getOperand(3).getReg());
- break;
- }
- case Alpha::LAS32:
- case Alpha::LAS64: {
- BuildMI(llscMBB, dl,TII->get(is32 ? Alpha::ADDLr : Alpha::ADDQr), reg_store)
- .addReg(reg_res).addReg(reg_v2);
- break;
- }
- case Alpha::SWAP32:
- case Alpha::SWAP64: {
- BuildMI(llscMBB, dl, TII->get(Alpha::BISr), reg_store)
- .addReg(reg_v2).addReg(reg_v2);
- break;
- }
- }
- BuildMI(llscMBB, dl, TII->get(is32 ? Alpha::STL_C : Alpha::STQ_C), reg_store)
- .addReg(reg_store).addImm(0).addReg(reg_ptr);
- BuildMI(llscMBB, dl, TII->get(Alpha::BEQ))
- .addImm(0).addReg(reg_store).addMBB(llscMBB);
- BuildMI(llscMBB, dl, TII->get(Alpha::BR)).addMBB(sinkMBB);
-
- thisMBB->addSuccessor(llscMBB);
- llscMBB->addSuccessor(llscMBB);
- llscMBB->addSuccessor(sinkMBB);
- MI->eraseFromParent(); // The pseudo instruction is gone now.
-
- return sinkMBB;
-}
-
-bool
-AlphaTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
- // The Alpha target isn't yet aware of offsets.
- return false;
-}
-
-bool AlphaTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- if (VT != MVT::f32 && VT != MVT::f64)
- return false;
- // +0.0 F31
- // +0.0f F31
- // -0.0 -F31
- // -0.0f -F31
- return Imm.isZero() || Imm.isNegZero();
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
deleted file mode 100644
index 80f8efa..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
+++ /dev/null
@@ -1,142 +0,0 @@
-//===-- AlphaISelLowering.h - Alpha DAG Lowering Interface ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that Alpha uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_ALPHA_ALPHAISELLOWERING_H
-#define LLVM_TARGET_ALPHA_ALPHAISELLOWERING_H
-
-#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "Alpha.h"
-
-namespace llvm {
-
- namespace AlphaISD {
- enum NodeType {
- // Start the numbering where the builting ops and target ops leave off.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
- //These corrospond to the identical Instruction
- CVTQT_, CVTQS_, CVTTQ_,
-
- /// GPRelHi/GPRelLo - These represent the high and low 16-bit
- /// parts of a global address respectively.
- GPRelHi, GPRelLo,
-
- /// RetLit - Literal Relocation of a Global
- RelLit,
-
- /// GlobalRetAddr - used to restore the return address
- GlobalRetAddr,
-
- /// CALL - Normal call.
- CALL,
-
- /// DIVCALL - used for special library calls for div and rem
- DivCall,
-
- /// return flag operand
- RET_FLAG,
-
- /// CHAIN = COND_BRANCH CHAIN, OPC, (G|F)PRC, DESTBB [, INFLAG] - This
- /// corresponds to the COND_BRANCH pseudo instruction.
- /// *PRC is the input register to compare to zero,
- /// OPC is the branch opcode to use (e.g. Alpha::BEQ),
- /// DESTBB is the destination block to branch to, and INFLAG is
- /// an optional input flag argument.
- COND_BRANCH_I, COND_BRANCH_F
-
- };
- }
-
- class AlphaTargetLowering : public TargetLowering {
- public:
- explicit AlphaTargetLowering(TargetMachine &TM);
-
- virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
-
- /// getSetCCResultType - Get the SETCC result ValueType
- virtual EVT getSetCCResultType(EVT VT) const;
-
- /// LowerOperation - Provide custom lowering hooks for some operations.
- ///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
-
- /// ReplaceNodeResults - Replace the results of node with an illegal result
- /// type with new values built out of custom code.
- ///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
-
- // Friendly names for dumps
- const char *getTargetNodeName(unsigned Opcode) const;
-
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- ConstraintType getConstraintType(const std::string &Constraint) const;
-
- /// Examine constraint string and operand type and determine a weight value.
- /// The operand object must already have been set up with the operand type.
- ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
-
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
-
- MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
-
- /// isFPImmLegal - Returns true if the target can instruction select the
- /// specified FP immediate natively. If false, the legalizer will
- /// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
-
- private:
- // Helpers for custom lowering.
- void LowerVAARG(SDNode *N, SDValue &Chain, SDValue &DataPtr,
- SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
- };
-}
-
-#endif // LLVM_TARGET_ALPHA_ALPHAISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td b/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td
deleted file mode 100644
index 6f4ebf2..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td
+++ /dev/null
@@ -1,268 +0,0 @@
-//===- AlphaInstrFormats.td - Alpha Instruction Formats ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-//3.3:
-//Memory
-//Branch
-//Operate
-//Floating-point
-//PALcode
-
-def u8imm : Operand<i64>;
-def s14imm : Operand<i64>;
-def s16imm : Operand<i64>;
-def s21imm : Operand<i64>;
-def s64imm : Operand<i64>;
-def u64imm : Operand<i64>;
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-// Alpha instruction baseline
-class InstAlpha<bits<6> op, string asmstr, InstrItinClass itin> : Instruction {
- field bits<32> Inst;
- let Namespace = "Alpha";
- let AsmString = asmstr;
- let Inst{31-26} = op;
- let Itinerary = itin;
-}
-
-
-//3.3.1
-class MForm<bits<6> opcode, bit load, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let canFoldAsLoad = load;
- let Defs = [R28]; //We may use this for frame index calculations, so reserve it here
-
- bits<5> Ra;
- bits<16> disp;
- bits<5> Rb;
-
- let Inst{25-21} = Ra;
- let Inst{20-16} = Rb;
- let Inst{15-0} = disp;
-}
-class MfcForm<bits<6> opcode, bits<16> fc, string asmstr, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- bits<5> Ra;
-
- let OutOperandList = (outs GPRC:$RA);
- let InOperandList = (ins);
- let Inst{25-21} = Ra;
- let Inst{20-16} = 0;
- let Inst{15-0} = fc;
-}
-class MfcPForm<bits<6> opcode, bits<16> fc, string asmstr, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let OutOperandList = (outs);
- let InOperandList = (ins);
- let Inst{25-21} = 0;
- let Inst{20-16} = 0;
- let Inst{15-0} = fc;
-}
-
-class MbrForm<bits<6> opcode, bits<2> TB, dag OL, string asmstr, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- bits<5> Ra;
- bits<5> Rb;
- bits<14> disp;
-
- let OutOperandList = (outs);
- let InOperandList = OL;
-
- let Inst{25-21} = Ra;
- let Inst{20-16} = Rb;
- let Inst{15-14} = TB;
- let Inst{13-0} = disp;
-}
-class MbrpForm<bits<6> opcode, bits<2> TB, dag OL, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern=pattern;
- bits<5> Ra;
- bits<5> Rb;
- bits<14> disp;
-
- let OutOperandList = (outs);
- let InOperandList = OL;
-
- let Inst{25-21} = Ra;
- let Inst{20-16} = Rb;
- let Inst{15-14} = TB;
- let Inst{13-0} = disp;
-}
-
-//3.3.2
-def target : Operand<OtherVT> {}
-
-let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
-class BFormN<bits<6> opcode, dag OL, string asmstr, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let OutOperandList = (outs);
- let InOperandList = OL;
- bits<64> Opc; //dummy
- bits<5> Ra;
- bits<21> disp;
-
- let Inst{25-21} = Ra;
- let Inst{20-0} = disp;
-}
-}
-
-let isBranch = 1, isTerminator = 1 in
-class BFormD<bits<6> opcode, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs);
- let InOperandList = (ins target:$DISP);
- bits<5> Ra;
- bits<21> disp;
-
- let Inst{25-21} = Ra;
- let Inst{20-0} = disp;
-}
-
-//3.3.3
-class OForm<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs GPRC:$RC);
- let InOperandList = (ins GPRC:$RA, GPRC:$RB);
-
- bits<5> Rc;
- bits<5> Ra;
- bits<5> Rb;
- bits<7> Function = fun;
-
- let Inst{25-21} = Ra;
- let Inst{20-16} = Rb;
- let Inst{15-13} = 0;
- let Inst{12} = 0;
- let Inst{11-5} = Function;
- let Inst{4-0} = Rc;
-}
-
-class OForm2<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs GPRC:$RC);
- let InOperandList = (ins GPRC:$RB);
-
- bits<5> Rc;
- bits<5> Rb;
- bits<7> Function = fun;
-
- let Inst{25-21} = 31;
- let Inst{20-16} = Rb;
- let Inst{15-13} = 0;
- let Inst{12} = 0;
- let Inst{11-5} = Function;
- let Inst{4-0} = Rc;
-}
-
-class OForm4<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs GPRC:$RDEST);
- let InOperandList = (ins GPRC:$RCOND, GPRC:$RTRUE, GPRC:$RFALSE);
- let Constraints = "$RFALSE = $RDEST";
- let DisableEncoding = "$RFALSE";
-
- bits<5> Rc;
- bits<5> Ra;
- bits<5> Rb;
- bits<7> Function = fun;
-
-// let Constraints = "$RFALSE = $RDEST";
- let Inst{25-21} = Ra;
- let Inst{20-16} = Rb;
- let Inst{15-13} = 0;
- let Inst{12} = 0;
- let Inst{11-5} = Function;
- let Inst{4-0} = Rc;
-}
-
-
-class OFormL<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs GPRC:$RC);
- let InOperandList = (ins GPRC:$RA, u8imm:$L);
-
- bits<5> Rc;
- bits<5> Ra;
- bits<8> LIT;
- bits<7> Function = fun;
-
- let Inst{25-21} = Ra;
- let Inst{20-13} = LIT;
- let Inst{12} = 1;
- let Inst{11-5} = Function;
- let Inst{4-0} = Rc;
-}
-
-class OForm4L<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
- let OutOperandList = (outs GPRC:$RDEST);
- let InOperandList = (ins GPRC:$RCOND, s64imm:$RTRUE, GPRC:$RFALSE);
- let Constraints = "$RFALSE = $RDEST";
- let DisableEncoding = "$RFALSE";
-
- bits<5> Rc;
- bits<5> Ra;
- bits<8> LIT;
- bits<7> Function = fun;
-
-// let Constraints = "$RFALSE = $RDEST";
- let Inst{25-21} = Ra;
- let Inst{20-13} = LIT;
- let Inst{12} = 1;
- let Inst{11-5} = Function;
- let Inst{4-0} = Rc;
-}
-
-//3.3.4
-class FPForm<bits<6> opcode, bits<11> fun, string asmstr, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let Pattern = pattern;
-
- bits<5> Fc;
- bits<5> Fa;
- bits<5> Fb;
- bits<11> Function = fun;
-
- let Inst{25-21} = Fa;
- let Inst{20-16} = Fb;
- let Inst{15-5} = Function;
- let Inst{4-0} = Fc;
-}
-
-//3.3.5
-class PALForm<bits<6> opcode, dag OL, string asmstr, InstrItinClass itin>
- : InstAlpha<opcode, asmstr, itin> {
- let OutOperandList = (outs);
- let InOperandList = OL;
- bits<26> Function;
-
- let Inst{25-0} = Function;
-}
-
-
-// Pseudo instructions.
-class PseudoInstAlpha<dag OOL, dag IOL, string nm, list<dag> pattern, InstrItinClass itin>
- : InstAlpha<0, nm, itin> {
- let OutOperandList = OOL;
- let InOperandList = IOL;
- let Pattern = pattern;
-
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp
deleted file mode 100644
index 8df2ed7..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp
+++ /dev/null
@@ -1,382 +0,0 @@
-//===- AlphaInstrInfo.cpp - Alpha Instruction Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Alpha.h"
-#include "AlphaInstrInfo.h"
-#include "AlphaMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/ErrorHandling.h"
-
-#define GET_INSTRINFO_CTOR
-#include "AlphaGenInstrInfo.inc"
-using namespace llvm;
-
-AlphaInstrInfo::AlphaInstrInfo()
- : AlphaGenInstrInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
- RI(*this) {
-}
-
-
-unsigned
-AlphaInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- case Alpha::LDL:
- case Alpha::LDQ:
- case Alpha::LDBU:
- case Alpha::LDWU:
- case Alpha::LDS:
- case Alpha::LDT:
- if (MI->getOperand(1).isFI()) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-unsigned
-AlphaInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- case Alpha::STL:
- case Alpha::STQ:
- case Alpha::STB:
- case Alpha::STW:
- case Alpha::STS:
- case Alpha::STT:
- if (MI->getOperand(1).isFI()) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-static bool isAlphaIntCondCode(unsigned Opcode) {
- switch (Opcode) {
- case Alpha::BEQ:
- case Alpha::BNE:
- case Alpha::BGE:
- case Alpha::BGT:
- case Alpha::BLE:
- case Alpha::BLT:
- case Alpha::BLBC:
- case Alpha::BLBS:
- return true;
- default:
- return false;
- }
-}
-
-unsigned AlphaInstrInfo::InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 2 || Cond.size() == 0) &&
- "Alpha branch conditions have two components!");
-
- // One-way branch.
- if (FBB == 0) {
- if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(TBB);
- else // Conditional branch
- if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- else
- BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- return 1;
- }
-
- // Two-way Conditional Branch.
- if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- else
- BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(FBB);
- return 2;
-}
-
-void AlphaInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- if (Alpha::GPRCRegClass.contains(DestReg, SrcReg)) {
- BuildMI(MBB, MI, DL, get(Alpha::BISr), DestReg)
- .addReg(SrcReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- } else if (Alpha::F4RCRegClass.contains(DestReg, SrcReg)) {
- BuildMI(MBB, MI, DL, get(Alpha::CPYSS), DestReg)
- .addReg(SrcReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- } else if (Alpha::F8RCRegClass.contains(DestReg, SrcReg)) {
- BuildMI(MBB, MI, DL, get(Alpha::CPYST), DestReg)
- .addReg(SrcReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- } else {
- llvm_unreachable("Attempt to copy register that is not GPR or FPR");
- }
-}
-
-void
-AlphaInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- //cerr << "Trying to store " << getPrettyName(SrcReg) << " to "
- // << FrameIdx << "\n";
- //BuildMI(MBB, MI, Alpha::WTF, 0).addReg(SrcReg);
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- if (RC == Alpha::F4RCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::STS))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else if (RC == Alpha::F8RCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::STT))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else if (RC == Alpha::GPRCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::STQ))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else
- llvm_unreachable("Unhandled register class");
-}
-
-void
-AlphaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- //cerr << "Trying to load " << getPrettyName(DestReg) << " to "
- // << FrameIdx << "\n";
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- if (RC == Alpha::F4RCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::LDS), DestReg)
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else if (RC == Alpha::F8RCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::LDT), DestReg)
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else if (RC == Alpha::GPRCRegisterClass)
- BuildMI(MBB, MI, DL, get(Alpha::LDQ), DestReg)
- .addFrameIndex(FrameIdx).addReg(Alpha::F31);
- else
- llvm_unreachable("Unhandled register class");
-}
-
-static unsigned AlphaRevCondCode(unsigned Opcode) {
- switch (Opcode) {
- case Alpha::BEQ: return Alpha::BNE;
- case Alpha::BNE: return Alpha::BEQ;
- case Alpha::BGE: return Alpha::BLT;
- case Alpha::BGT: return Alpha::BLE;
- case Alpha::BLE: return Alpha::BGT;
- case Alpha::BLT: return Alpha::BGE;
- case Alpha::BLBC: return Alpha::BLBS;
- case Alpha::BLBS: return Alpha::BLBC;
- case Alpha::FBEQ: return Alpha::FBNE;
- case Alpha::FBNE: return Alpha::FBEQ;
- case Alpha::FBGE: return Alpha::FBLT;
- case Alpha::FBGT: return Alpha::FBLE;
- case Alpha::FBLE: return Alpha::FBGT;
- case Alpha::FBLT: return Alpha::FBGE;
- default:
- llvm_unreachable("Unknown opcode");
- }
- return 0; // Not reached
-}
-
-// Branch analysis.
-bool AlphaInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- // If the block has no terminators, it just falls into the block after it.
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin())
- return false;
- --I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return false;
- --I;
- }
- if (!isUnpredicatedTerminator(I))
- return false;
-
- // Get the last instruction in the block.
- MachineInstr *LastInst = I;
-
- // If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (LastInst->getOpcode() == Alpha::BR) {
- TBB = LastInst->getOperand(0).getMBB();
- return false;
- } else if (LastInst->getOpcode() == Alpha::COND_BRANCH_I ||
- LastInst->getOpcode() == Alpha::COND_BRANCH_F) {
- // Block ends with fall-through condbranch.
- TBB = LastInst->getOperand(2).getMBB();
- Cond.push_back(LastInst->getOperand(0));
- Cond.push_back(LastInst->getOperand(1));
- return false;
- }
- // Otherwise, don't know what this is.
- return true;
- }
-
- // Get the instruction before it if it's a terminator.
- MachineInstr *SecondLastInst = I;
-
- // If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() &&
- isUnpredicatedTerminator(--I))
- return true;
-
- // If the block ends with Alpha::BR and Alpha::COND_BRANCH_*, handle it.
- if ((SecondLastInst->getOpcode() == Alpha::COND_BRANCH_I ||
- SecondLastInst->getOpcode() == Alpha::COND_BRANCH_F) &&
- LastInst->getOpcode() == Alpha::BR) {
- TBB = SecondLastInst->getOperand(2).getMBB();
- Cond.push_back(SecondLastInst->getOperand(0));
- Cond.push_back(SecondLastInst->getOperand(1));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
-
- // If the block ends with two Alpha::BRs, handle it. The second one is not
- // executed, so remove it.
- if (SecondLastInst->getOpcode() == Alpha::BR &&
- LastInst->getOpcode() == Alpha::BR) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return false;
- }
-
- // Otherwise, can't handle this.
- return true;
-}
-
-unsigned AlphaInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) return 0;
- --I;
- while (I->isDebugValue()) {
- if (I == MBB.begin())
- return 0;
- --I;
- }
- if (I->getOpcode() != Alpha::BR &&
- I->getOpcode() != Alpha::COND_BRANCH_I &&
- I->getOpcode() != Alpha::COND_BRANCH_F)
- return 0;
-
- // Remove the branch.
- I->eraseFromParent();
-
- I = MBB.end();
-
- if (I == MBB.begin()) return 1;
- --I;
- if (I->getOpcode() != Alpha::COND_BRANCH_I &&
- I->getOpcode() != Alpha::COND_BRANCH_F)
- return 1;
-
- // Remove the branch.
- I->eraseFromParent();
- return 2;
-}
-
-void AlphaInstrInfo::insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const {
- DebugLoc DL;
- BuildMI(MBB, MI, DL, get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31)
- .addReg(Alpha::R31);
-}
-
-bool AlphaInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
- assert(Cond.size() == 2 && "Invalid Alpha branch opcode!");
- Cond[0].setImm(AlphaRevCondCode(Cond[0].getImm()));
- return false;
-}
-
-/// getGlobalBaseReg - Return a virtual register initialized with the
-/// the global base register value. Output instructions required to
-/// initialize the register in the function entry block, if necessary.
-///
-unsigned AlphaInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- AlphaMachineFunctionInfo *AlphaFI = MF->getInfo<AlphaMachineFunctionInfo>();
- unsigned GlobalBaseReg = AlphaFI->getGlobalBaseReg();
- if (GlobalBaseReg != 0)
- return GlobalBaseReg;
-
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
- GlobalBaseReg = RegInfo.createVirtualRegister(&Alpha::GPRCRegClass);
- BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
- GlobalBaseReg).addReg(Alpha::R29);
- RegInfo.addLiveIn(Alpha::R29);
-
- AlphaFI->setGlobalBaseReg(GlobalBaseReg);
- return GlobalBaseReg;
-}
-
-/// getGlobalRetAddr - Return a virtual register initialized with the
-/// the global base register value. Output instructions required to
-/// initialize the register in the function entry block, if necessary.
-///
-unsigned AlphaInstrInfo::getGlobalRetAddr(MachineFunction *MF) const {
- AlphaMachineFunctionInfo *AlphaFI = MF->getInfo<AlphaMachineFunctionInfo>();
- unsigned GlobalRetAddr = AlphaFI->getGlobalRetAddr();
- if (GlobalRetAddr != 0)
- return GlobalRetAddr;
-
- // Insert the set of GlobalRetAddr into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
- GlobalRetAddr = RegInfo.createVirtualRegister(&Alpha::GPRCRegClass);
- BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
- GlobalRetAddr).addReg(Alpha::R26);
- RegInfo.addLiveIn(Alpha::R26);
-
- AlphaFI->setGlobalRetAddr(GlobalRetAddr);
- return GlobalRetAddr;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h
deleted file mode 100644
index 337a85c..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//===- AlphaInstrInfo.h - Alpha Instruction Information ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHAINSTRUCTIONINFO_H
-#define ALPHAINSTRUCTIONINFO_H
-
-#include "llvm/Target/TargetInstrInfo.h"
-#include "AlphaRegisterInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "AlphaGenInstrInfo.inc"
-
-namespace llvm {
-
-class AlphaInstrInfo : public AlphaGenInstrInfo {
- const AlphaRegisterInfo RI;
-public:
- AlphaInstrInfo();
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- virtual const AlphaRegisterInfo &getRegisterInfo() const { return RI; }
-
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- void insertNoop(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const;
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
- /// getGlobalBaseReg - Return a virtual register initialized with the
- /// the global base register value. Output instructions required to
- /// initialize the register in the function entry block, if necessary.
- ///
- unsigned getGlobalBaseReg(MachineFunction *MF) const;
-
- /// getGlobalRetAddr - Return a virtual register initialized with the
- /// the global return address register value. Output instructions required to
- /// initialize the register in the function entry block, if necessary.
- ///
- unsigned getGlobalRetAddr(MachineFunction *MF) const;
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
deleted file mode 100644
index c8c9377..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
+++ /dev/null
@@ -1,1159 +0,0 @@
-//===- AlphaInstrInfo.td - The Alpha Instruction Set -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-include "AlphaInstrFormats.td"
-
-//********************
-//Custom DAG Nodes
-//********************
-
-def SDTFPUnaryOpUnC : SDTypeProfile<1, 1, [
- SDTCisFP<1>, SDTCisFP<0>
-]>;
-def Alpha_cvtqt : SDNode<"AlphaISD::CVTQT_", SDTFPUnaryOpUnC, []>;
-def Alpha_cvtqs : SDNode<"AlphaISD::CVTQS_", SDTFPUnaryOpUnC, []>;
-def Alpha_cvttq : SDNode<"AlphaISD::CVTTQ_" , SDTFPUnaryOp, []>;
-def Alpha_gprello : SDNode<"AlphaISD::GPRelLo", SDTIntBinOp, []>;
-def Alpha_gprelhi : SDNode<"AlphaISD::GPRelHi", SDTIntBinOp, []>;
-def Alpha_rellit : SDNode<"AlphaISD::RelLit", SDTIntBinOp, [SDNPMayLoad]>;
-
-def retflag : SDNode<"AlphaISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-// These are target-independent nodes, but have target-specific formats.
-def SDT_AlphaCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i64> ]>;
-def SDT_AlphaCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i64>,
- SDTCisVT<1, i64> ]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_AlphaCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_AlphaCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
-//********************
-//Paterns for matching
-//********************
-def invX : SDNodeXForm<imm, [{ //invert
- return getI64Imm(~N->getZExtValue());
-}]>;
-def negX : SDNodeXForm<imm, [{ //negate
- return getI64Imm(~N->getZExtValue() + 1);
-}]>;
-def SExt32 : SDNodeXForm<imm, [{ //signed extend int to long
- return getI64Imm(((int64_t)N->getZExtValue() << 32) >> 32);
-}]>;
-def SExt16 : SDNodeXForm<imm, [{ //signed extend int to long
- return getI64Imm(((int64_t)N->getZExtValue() << 48) >> 48);
-}]>;
-def LL16 : SDNodeXForm<imm, [{ //lda part of constant
- return getI64Imm(get_lda16(N->getZExtValue()));
-}]>;
-def LH16 : SDNodeXForm<imm, [{ //ldah part of constant (or more if too big)
- return getI64Imm(get_ldah16(N->getZExtValue()));
-}]>;
-def iZAPX : SDNodeXForm<and, [{ // get imm to ZAPi
- ConstantSDNode *RHS = cast<ConstantSDNode>(N->getOperand(1));
- return getI64Imm(get_zapImm(SDValue(), RHS->getZExtValue()));
-}]>;
-def nearP2X : SDNodeXForm<imm, [{
- return getI64Imm(Log2_64(getNearPower2((uint64_t)N->getZExtValue())));
-}]>;
-def nearP2RemX : SDNodeXForm<imm, [{
- uint64_t x =
- abs64(N->getZExtValue() - getNearPower2((uint64_t)N->getZExtValue()));
- return getI64Imm(Log2_64(x));
-}]>;
-
-def immUExt8 : PatLeaf<(imm), [{ //imm fits in 8 bit zero extended field
- return (uint64_t)N->getZExtValue() == (uint8_t)N->getZExtValue();
-}]>;
-def immUExt8inv : PatLeaf<(imm), [{ //inverted imm fits in 8 bit zero extended field
- return (uint64_t)~N->getZExtValue() == (uint8_t)~N->getZExtValue();
-}], invX>;
-def immUExt8neg : PatLeaf<(imm), [{ //negated imm fits in 8 bit zero extended field
- return ((uint64_t)~N->getZExtValue() + 1) ==
- (uint8_t)((uint64_t)~N->getZExtValue() + 1);
-}], negX>;
-def immSExt16 : PatLeaf<(imm), [{ //imm fits in 16 bit sign extended field
- return ((int64_t)N->getZExtValue() << 48) >> 48 ==
- (int64_t)N->getZExtValue();
-}]>;
-def immSExt16int : PatLeaf<(imm), [{ //(int)imm fits in a 16 bit sign extended field
- return ((int64_t)N->getZExtValue() << 48) >> 48 ==
- ((int64_t)N->getZExtValue() << 32) >> 32;
-}], SExt16>;
-
-def zappat : PatFrag<(ops node:$LHS), (and node:$LHS, imm), [{
- ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (!RHS) return 0;
- uint64_t build = get_zapImm(N->getOperand(0), (uint64_t)RHS->getZExtValue());
- return build != 0;
-}]>;
-
-def immFPZ : PatLeaf<(fpimm), [{ //the only fpconstant nodes are +/- 0.0
- (void)N; // silence warning.
- return true;
-}]>;
-
-def immRem1 :PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),1,0);}]>;
-def immRem2 :PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),2,0);}]>;
-def immRem3 :PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),3,0);}]>;
-def immRem4 :PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),4,0);}]>;
-def immRem5 :PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),5,0);}]>;
-def immRem1n:PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),1,1);}]>;
-def immRem2n:PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),2,1);}]>;
-def immRem3n:PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),3,1);}]>;
-def immRem4n:PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),4,1);}]>;
-def immRem5n:PatLeaf<(imm),[{return chkRemNearPower2(N->getZExtValue(),5,1);}]>;
-
-def immRemP2n : PatLeaf<(imm), [{
- return isPowerOf2_64(getNearPower2((uint64_t)N->getZExtValue()) -
- N->getZExtValue());
-}]>;
-def immRemP2 : PatLeaf<(imm), [{
- return isPowerOf2_64(N->getZExtValue() -
- getNearPower2((uint64_t)N->getZExtValue()));
-}]>;
-def immUExt8ME : PatLeaf<(imm), [{ //use this imm for mulqi
- int64_t d = abs64((int64_t)N->getZExtValue() -
- (int64_t)getNearPower2((uint64_t)N->getZExtValue()));
- if (isPowerOf2_64(d)) return false;
- switch (d) {
- case 1: case 3: case 5: return false;
- default: return (uint64_t)N->getZExtValue() == (uint8_t)N->getZExtValue();
- };
-}]>;
-
-def intop : PatFrag<(ops node:$op), (sext_inreg node:$op, i32)>;
-def add4 : PatFrag<(ops node:$op1, node:$op2),
- (add (shl node:$op1, 2), node:$op2)>;
-def sub4 : PatFrag<(ops node:$op1, node:$op2),
- (sub (shl node:$op1, 2), node:$op2)>;
-def add8 : PatFrag<(ops node:$op1, node:$op2),
- (add (shl node:$op1, 3), node:$op2)>;
-def sub8 : PatFrag<(ops node:$op1, node:$op2),
- (sub (shl node:$op1, 3), node:$op2)>;
-class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
-class CmpOpFrag<dag res> : PatFrag<(ops node:$R), res>;
-
-//Pseudo ops for selection
-
-def WTF : PseudoInstAlpha<(outs), (ins variable_ops), "#wtf", [], s_pseudo>;
-
-let hasCtrlDep = 1, Defs = [R30], Uses = [R30] in {
-def ADJUSTSTACKUP : PseudoInstAlpha<(outs), (ins s64imm:$amt),
- "; ADJUP $amt",
- [(callseq_start timm:$amt)], s_pseudo>;
-def ADJUSTSTACKDOWN : PseudoInstAlpha<(outs), (ins s64imm:$amt1, s64imm:$amt2),
- "; ADJDOWN $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)], s_pseudo>;
-}
-
-def ALTENT : PseudoInstAlpha<(outs), (ins s64imm:$TARGET), "$$$TARGET..ng:\n", [], s_pseudo>;
-def PCLABEL : PseudoInstAlpha<(outs), (ins s64imm:$num), "PCMARKER_$num:\n",[], s_pseudo>;
-def MEMLABEL : PseudoInstAlpha<(outs), (ins s64imm:$i, s64imm:$j, s64imm:$k, s64imm:$m),
- "LSMARKER$$$i$$$j$$$k$$$m:", [], s_pseudo>;
-
-
-let usesCustomInserter = 1 in { // Expanded after instruction selection.
-def CAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_cmp_swap_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
-def CAS64 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_cmp_swap_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
-
-def LAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_load_add_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
-def LAS64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_load_add_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
-
-def SWAP32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_swap_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
-def SWAP64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_swap_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
-}
-
-//***********************
-//Real instructions
-//***********************
-
-//Operation Form:
-
-//conditional moves, int
-
-multiclass cmov_inst<bits<7> fun, string asmstr, PatFrag OpNode> {
-def r : OForm4<0x11, fun, !strconcat(asmstr, " $RCOND,$RTRUE,$RDEST"),
- [(set GPRC:$RDEST, (select (OpNode GPRC:$RCOND), GPRC:$RTRUE, GPRC:$RFALSE))], s_cmov>;
-def i : OForm4L<0x11, fun, !strconcat(asmstr, " $RCOND,$RTRUE,$RDEST"),
- [(set GPRC:$RDEST, (select (OpNode GPRC:$RCOND), immUExt8:$RTRUE, GPRC:$RFALSE))], s_cmov>;
-}
-
-defm CMOVEQ : cmov_inst<0x24, "cmoveq", CmpOpFrag<(seteq node:$R, 0)>>;
-defm CMOVNE : cmov_inst<0x26, "cmovne", CmpOpFrag<(setne node:$R, 0)>>;
-defm CMOVLT : cmov_inst<0x44, "cmovlt", CmpOpFrag<(setlt node:$R, 0)>>;
-defm CMOVLE : cmov_inst<0x64, "cmovle", CmpOpFrag<(setle node:$R, 0)>>;
-defm CMOVGT : cmov_inst<0x66, "cmovgt", CmpOpFrag<(setgt node:$R, 0)>>;
-defm CMOVGE : cmov_inst<0x46, "cmovge", CmpOpFrag<(setge node:$R, 0)>>;
-defm CMOVLBC : cmov_inst<0x16, "cmovlbc", CmpOpFrag<(xor node:$R, 1)>>;
-defm CMOVLBS : cmov_inst<0x14, "cmovlbs", CmpOpFrag<(and node:$R, 1)>>;
-
-//General pattern for cmov
-def : Pat<(select GPRC:$which, GPRC:$src1, GPRC:$src2),
- (CMOVNEr GPRC:$src2, GPRC:$src1, GPRC:$which)>;
-def : Pat<(select GPRC:$which, GPRC:$src1, immUExt8:$src2),
- (CMOVEQi GPRC:$src1, immUExt8:$src2, GPRC:$which)>;
-
-//Invert sense when we can for constants:
-def : Pat<(select (setne GPRC:$RCOND, 0), GPRC:$RTRUE, immUExt8:$RFALSE),
- (CMOVEQi GPRC:$RCOND, immUExt8:$RFALSE, GPRC:$RTRUE)>;
-def : Pat<(select (setgt GPRC:$RCOND, 0), GPRC:$RTRUE, immUExt8:$RFALSE),
- (CMOVLEi GPRC:$RCOND, immUExt8:$RFALSE, GPRC:$RTRUE)>;
-def : Pat<(select (setge GPRC:$RCOND, 0), GPRC:$RTRUE, immUExt8:$RFALSE),
- (CMOVLTi GPRC:$RCOND, immUExt8:$RFALSE, GPRC:$RTRUE)>;
-def : Pat<(select (setlt GPRC:$RCOND, 0), GPRC:$RTRUE, immUExt8:$RFALSE),
- (CMOVGEi GPRC:$RCOND, immUExt8:$RFALSE, GPRC:$RTRUE)>;
-def : Pat<(select (setle GPRC:$RCOND, 0), GPRC:$RTRUE, immUExt8:$RFALSE),
- (CMOVGTi GPRC:$RCOND, immUExt8:$RFALSE, GPRC:$RTRUE)>;
-
-multiclass all_inst<bits<6> opc, bits<7> funl, bits<7> funq,
- string asmstr, PatFrag OpNode, InstrItinClass itin> {
- def Lr : OForm< opc, funl, !strconcat(asmstr, "l $RA,$RB,$RC"),
- [(set GPRC:$RC, (intop (OpNode GPRC:$RA, GPRC:$RB)))], itin>;
- def Li : OFormL<opc, funl, !strconcat(asmstr, "l $RA,$L,$RC"),
- [(set GPRC:$RC, (intop (OpNode GPRC:$RA, immUExt8:$L)))], itin>;
- def Qr : OForm< opc, funq, !strconcat(asmstr, "q $RA,$RB,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, GPRC:$RB))], itin>;
- def Qi : OFormL<opc, funq, !strconcat(asmstr, "q $RA,$L,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, immUExt8:$L))], itin>;
-}
-
-defm MUL : all_inst<0x13, 0x00, 0x20, "mul", BinOpFrag<(mul node:$LHS, node:$RHS)>, s_imul>;
-defm ADD : all_inst<0x10, 0x00, 0x20, "add", BinOpFrag<(add node:$LHS, node:$RHS)>, s_iadd>;
-defm S4ADD : all_inst<0x10, 0x02, 0x22, "s4add", add4, s_iadd>;
-defm S8ADD : all_inst<0x10, 0x12, 0x32, "s8add", add8, s_iadd>;
-defm S4SUB : all_inst<0x10, 0x0B, 0x2B, "s4sub", sub4, s_iadd>;
-defm S8SUB : all_inst<0x10, 0x1B, 0x3B, "s8sub", sub8, s_iadd>;
-defm SUB : all_inst<0x10, 0x09, 0x29, "sub", BinOpFrag<(sub node:$LHS, node:$RHS)>, s_iadd>;
-//Const cases since legalize does sub x, int -> add x, inv(int) + 1
-def : Pat<(intop (add GPRC:$RA, immUExt8neg:$L)), (SUBLi GPRC:$RA, immUExt8neg:$L)>;
-def : Pat<(add GPRC:$RA, immUExt8neg:$L), (SUBQi GPRC:$RA, immUExt8neg:$L)>;
-def : Pat<(intop (add4 GPRC:$RA, immUExt8neg:$L)), (S4SUBLi GPRC:$RA, immUExt8neg:$L)>;
-def : Pat<(add4 GPRC:$RA, immUExt8neg:$L), (S4SUBQi GPRC:$RA, immUExt8neg:$L)>;
-def : Pat<(intop (add8 GPRC:$RA, immUExt8neg:$L)), (S8SUBLi GPRC:$RA, immUExt8neg:$L)>;
-def : Pat<(add8 GPRC:$RA, immUExt8neg:$L), (S8SUBQi GPRC:$RA, immUExt8neg:$L)>;
-
-multiclass log_inst<bits<6> opc, bits<7> fun, string asmstr, SDNode OpNode, InstrItinClass itin> {
-def r : OForm<opc, fun, !strconcat(asmstr, " $RA,$RB,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, GPRC:$RB))], itin>;
-def i : OFormL<opc, fun, !strconcat(asmstr, " $RA,$L,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, immUExt8:$L))], itin>;
-}
-multiclass inv_inst<bits<6> opc, bits<7> fun, string asmstr, SDNode OpNode, InstrItinClass itin> {
-def r : OForm<opc, fun, !strconcat(asmstr, " $RA,$RB,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, (not GPRC:$RB)))], itin>;
-def i : OFormL<opc, fun, !strconcat(asmstr, " $RA,$L,$RC"),
- [(set GPRC:$RC, (OpNode GPRC:$RA, immUExt8inv:$L))], itin>;
-}
-
-defm AND : log_inst<0x11, 0x00, "and", and, s_ilog>;
-defm BIC : inv_inst<0x11, 0x08, "bic", and, s_ilog>;
-defm BIS : log_inst<0x11, 0x20, "bis", or, s_ilog>;
-defm ORNOT : inv_inst<0x11, 0x28, "ornot", or, s_ilog>;
-defm XOR : log_inst<0x11, 0x40, "xor", xor, s_ilog>;
-defm EQV : inv_inst<0x11, 0x48, "eqv", xor, s_ilog>;
-
-defm SL : log_inst<0x12, 0x39, "sll", shl, s_ishf>;
-defm SRA : log_inst<0x12, 0x3c, "sra", sra, s_ishf>;
-defm SRL : log_inst<0x12, 0x34, "srl", srl, s_ishf>;
-defm UMULH : log_inst<0x13, 0x30, "umulh", mulhu, s_imul>;
-
-def CTLZ : OForm2<0x1C, 0x32, "CTLZ $RB,$RC",
- [(set GPRC:$RC, (ctlz GPRC:$RB))], s_imisc>;
-def CTPOP : OForm2<0x1C, 0x30, "CTPOP $RB,$RC",
- [(set GPRC:$RC, (ctpop GPRC:$RB))], s_imisc>;
-def CTTZ : OForm2<0x1C, 0x33, "CTTZ $RB,$RC",
- [(set GPRC:$RC, (cttz GPRC:$RB))], s_imisc>;
-def EXTBL : OForm< 0x12, 0x06, "EXTBL $RA,$RB,$RC",
- [(set GPRC:$RC, (and (srl GPRC:$RA, (shl GPRC:$RB, 3)), 255))], s_ishf>;
-def EXTWL : OForm< 0x12, 0x16, "EXTWL $RA,$RB,$RC",
- [(set GPRC:$RC, (and (srl GPRC:$RA, (shl GPRC:$RB, 3)), 65535))], s_ishf>;
-def EXTLL : OForm< 0x12, 0x26, "EXTLL $RA,$RB,$RC",
- [(set GPRC:$RC, (and (srl GPRC:$RA, (shl GPRC:$RB, 3)), 4294967295))], s_ishf>;
-def SEXTB : OForm2<0x1C, 0x00, "sextb $RB,$RC",
- [(set GPRC:$RC, (sext_inreg GPRC:$RB, i8))], s_ishf>;
-def SEXTW : OForm2<0x1C, 0x01, "sextw $RB,$RC",
- [(set GPRC:$RC, (sext_inreg GPRC:$RB, i16))], s_ishf>;
-
-//def EXTBLi : OFormL<0x12, 0x06, "EXTBL $RA,$L,$RC", []>; //Extract byte low
-//def EXTLH : OForm< 0x12, 0x6A, "EXTLH $RA,$RB,$RC", []>; //Extract longword high
-//def EXTLHi : OFormL<0x12, 0x6A, "EXTLH $RA,$L,$RC", []>; //Extract longword high
-//def EXTLLi : OFormL<0x12, 0x26, "EXTLL $RA,$L,$RC", []>; //Extract longword low
-//def EXTQH : OForm< 0x12, 0x7A, "EXTQH $RA,$RB,$RC", []>; //Extract quadword high
-//def EXTQHi : OFormL<0x12, 0x7A, "EXTQH $RA,$L,$RC", []>; //Extract quadword high
-//def EXTQ : OForm< 0x12, 0x36, "EXTQ $RA,$RB,$RC", []>; //Extract quadword low
-//def EXTQi : OFormL<0x12, 0x36, "EXTQ $RA,$L,$RC", []>; //Extract quadword low
-//def EXTWH : OForm< 0x12, 0x5A, "EXTWH $RA,$RB,$RC", []>; //Extract word high
-//def EXTWHi : OFormL<0x12, 0x5A, "EXTWH $RA,$L,$RC", []>; //Extract word high
-//def EXTWLi : OFormL<0x12, 0x16, "EXTWL $RA,$L,$RC", []>; //Extract word low
-
-//def INSBL : OForm< 0x12, 0x0B, "INSBL $RA,$RB,$RC", []>; //Insert byte low
-//def INSBLi : OFormL<0x12, 0x0B, "INSBL $RA,$L,$RC", []>; //Insert byte low
-//def INSLH : OForm< 0x12, 0x67, "INSLH $RA,$RB,$RC", []>; //Insert longword high
-//def INSLHi : OFormL<0x12, 0x67, "INSLH $RA,$L,$RC", []>; //Insert longword high
-//def INSLL : OForm< 0x12, 0x2B, "INSLL $RA,$RB,$RC", []>; //Insert longword low
-//def INSLLi : OFormL<0x12, 0x2B, "INSLL $RA,$L,$RC", []>; //Insert longword low
-//def INSQH : OForm< 0x12, 0x77, "INSQH $RA,$RB,$RC", []>; //Insert quadword high
-//def INSQHi : OFormL<0x12, 0x77, "INSQH $RA,$L,$RC", []>; //Insert quadword high
-//def INSQL : OForm< 0x12, 0x3B, "INSQL $RA,$RB,$RC", []>; //Insert quadword low
-//def INSQLi : OFormL<0x12, 0x3B, "INSQL $RA,$L,$RC", []>; //Insert quadword low
-//def INSWH : OForm< 0x12, 0x57, "INSWH $RA,$RB,$RC", []>; //Insert word high
-//def INSWHi : OFormL<0x12, 0x57, "INSWH $RA,$L,$RC", []>; //Insert word high
-//def INSWL : OForm< 0x12, 0x1B, "INSWL $RA,$RB,$RC", []>; //Insert word low
-//def INSWLi : OFormL<0x12, 0x1B, "INSWL $RA,$L,$RC", []>; //Insert word low
-
-//def MSKBL : OForm< 0x12, 0x02, "MSKBL $RA,$RB,$RC", []>; //Mask byte low
-//def MSKBLi : OFormL<0x12, 0x02, "MSKBL $RA,$L,$RC", []>; //Mask byte low
-//def MSKLH : OForm< 0x12, 0x62, "MSKLH $RA,$RB,$RC", []>; //Mask longword high
-//def MSKLHi : OFormL<0x12, 0x62, "MSKLH $RA,$L,$RC", []>; //Mask longword high
-//def MSKLL : OForm< 0x12, 0x22, "MSKLL $RA,$RB,$RC", []>; //Mask longword low
-//def MSKLLi : OFormL<0x12, 0x22, "MSKLL $RA,$L,$RC", []>; //Mask longword low
-//def MSKQH : OForm< 0x12, 0x72, "MSKQH $RA,$RB,$RC", []>; //Mask quadword high
-//def MSKQHi : OFormL<0x12, 0x72, "MSKQH $RA,$L,$RC", []>; //Mask quadword high
-//def MSKQL : OForm< 0x12, 0x32, "MSKQL $RA,$RB,$RC", []>; //Mask quadword low
-//def MSKQLi : OFormL<0x12, 0x32, "MSKQL $RA,$L,$RC", []>; //Mask quadword low
-//def MSKWH : OForm< 0x12, 0x52, "MSKWH $RA,$RB,$RC", []>; //Mask word high
-//def MSKWHi : OFormL<0x12, 0x52, "MSKWH $RA,$L,$RC", []>; //Mask word high
-//def MSKWL : OForm< 0x12, 0x12, "MSKWL $RA,$RB,$RC", []>; //Mask word low
-//def MSKWLi : OFormL<0x12, 0x12, "MSKWL $RA,$L,$RC", []>; //Mask word low
-
-def ZAPNOTi : OFormL<0x12, 0x31, "zapnot $RA,$L,$RC", [], s_ishf>;
-
-// Define the pattern that produces ZAPNOTi.
-def : Pat<(zappat:$imm GPRC:$RA),
- (ZAPNOTi GPRC:$RA, (iZAPX GPRC:$imm))>;
-
-
-//Comparison, int
-//So this is a waste of what this instruction can do, but it still saves something
-def CMPBGE : OForm< 0x10, 0x0F, "cmpbge $RA,$RB,$RC",
- [(set GPRC:$RC, (setuge (and GPRC:$RA, 255), (and GPRC:$RB, 255)))], s_ilog>;
-def CMPBGEi : OFormL<0x10, 0x0F, "cmpbge $RA,$L,$RC",
- [(set GPRC:$RC, (setuge (and GPRC:$RA, 255), immUExt8:$L))], s_ilog>;
-def CMPEQ : OForm< 0x10, 0x2D, "cmpeq $RA,$RB,$RC",
- [(set GPRC:$RC, (seteq GPRC:$RA, GPRC:$RB))], s_iadd>;
-def CMPEQi : OFormL<0x10, 0x2D, "cmpeq $RA,$L,$RC",
- [(set GPRC:$RC, (seteq GPRC:$RA, immUExt8:$L))], s_iadd>;
-def CMPLE : OForm< 0x10, 0x6D, "cmple $RA,$RB,$RC",
- [(set GPRC:$RC, (setle GPRC:$RA, GPRC:$RB))], s_iadd>;
-def CMPLEi : OFormL<0x10, 0x6D, "cmple $RA,$L,$RC",
- [(set GPRC:$RC, (setle GPRC:$RA, immUExt8:$L))], s_iadd>;
-def CMPLT : OForm< 0x10, 0x4D, "cmplt $RA,$RB,$RC",
- [(set GPRC:$RC, (setlt GPRC:$RA, GPRC:$RB))], s_iadd>;
-def CMPLTi : OFormL<0x10, 0x4D, "cmplt $RA,$L,$RC",
- [(set GPRC:$RC, (setlt GPRC:$RA, immUExt8:$L))], s_iadd>;
-def CMPULE : OForm< 0x10, 0x3D, "cmpule $RA,$RB,$RC",
- [(set GPRC:$RC, (setule GPRC:$RA, GPRC:$RB))], s_iadd>;
-def CMPULEi : OFormL<0x10, 0x3D, "cmpule $RA,$L,$RC",
- [(set GPRC:$RC, (setule GPRC:$RA, immUExt8:$L))], s_iadd>;
-def CMPULT : OForm< 0x10, 0x1D, "cmpult $RA,$RB,$RC",
- [(set GPRC:$RC, (setult GPRC:$RA, GPRC:$RB))], s_iadd>;
-def CMPULTi : OFormL<0x10, 0x1D, "cmpult $RA,$L,$RC",
- [(set GPRC:$RC, (setult GPRC:$RA, immUExt8:$L))], s_iadd>;
-
-//Patterns for unsupported int comparisons
-def : Pat<(setueq GPRC:$X, GPRC:$Y), (CMPEQ GPRC:$X, GPRC:$Y)>;
-def : Pat<(setueq GPRC:$X, immUExt8:$Y), (CMPEQi GPRC:$X, immUExt8:$Y)>;
-
-def : Pat<(setugt GPRC:$X, GPRC:$Y), (CMPULT GPRC:$Y, GPRC:$X)>;
-def : Pat<(setugt immUExt8:$X, GPRC:$Y), (CMPULTi GPRC:$Y, immUExt8:$X)>;
-
-def : Pat<(setuge GPRC:$X, GPRC:$Y), (CMPULE GPRC:$Y, GPRC:$X)>;
-def : Pat<(setuge immUExt8:$X, GPRC:$Y), (CMPULEi GPRC:$Y, immUExt8:$X)>;
-
-def : Pat<(setgt GPRC:$X, GPRC:$Y), (CMPLT GPRC:$Y, GPRC:$X)>;
-def : Pat<(setgt immUExt8:$X, GPRC:$Y), (CMPLTi GPRC:$Y, immUExt8:$X)>;
-
-def : Pat<(setge GPRC:$X, GPRC:$Y), (CMPLE GPRC:$Y, GPRC:$X)>;
-def : Pat<(setge immUExt8:$X, GPRC:$Y), (CMPLEi GPRC:$Y, immUExt8:$X)>;
-
-def : Pat<(setne GPRC:$X, GPRC:$Y), (CMPEQi (CMPEQ GPRC:$X, GPRC:$Y), 0)>;
-def : Pat<(setne GPRC:$X, immUExt8:$Y), (CMPEQi (CMPEQi GPRC:$X, immUExt8:$Y), 0)>;
-
-def : Pat<(setune GPRC:$X, GPRC:$Y), (CMPEQi (CMPEQ GPRC:$X, GPRC:$Y), 0)>;
-def : Pat<(setune GPRC:$X, immUExt8:$Y), (CMPEQi (CMPEQ GPRC:$X, immUExt8:$Y), 0)>;
-
-
-let isReturn = 1, isTerminator = 1, isBarrier = 1, Ra = 31, Rb = 26, disp = 1, Uses = [R26] in {
- def RETDAG : MbrForm< 0x1A, 0x02, (ins), "ret $$31,($$26),1", s_jsr>; //Return from subroutine
- def RETDAGp : MbrpForm< 0x1A, 0x02, (ins), "ret $$31,($$26),1", [(retflag)], s_jsr>; //Return from subroutine
-}
-
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1, Ra = 31, disp = 0 in
-def JMP : MbrpForm< 0x1A, 0x00, (ins GPRC:$RS), "jmp $$31,($RS),0",
- [(brind GPRC:$RS)], s_jsr>; //Jump
-
-let isCall = 1, Ra = 26,
- Defs = [R0, R1, R2, R3, R4, R5, R6, R7, R8, R16, R17, R18, R19,
- R20, R21, R22, R23, R24, R25, R26, R27, R28, R29,
- F0, F1,
- F10, F11, F12, F13, F14, F15, F16, F17, F18, F19,
- F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30], Uses = [R29] in {
- def BSR : BFormD<0x34, "bsr $$26,$$$DISP..ng", [], s_jsr>; //Branch to subroutine
-}
-let isCall = 1, Ra = 26, Rb = 27, disp = 0,
- Defs = [R0, R1, R2, R3, R4, R5, R6, R7, R8, R16, R17, R18, R19,
- R20, R21, R22, R23, R24, R25, R26, R27, R28, R29,
- F0, F1,
- F10, F11, F12, F13, F14, F15, F16, F17, F18, F19,
- F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30], Uses = [R27, R29] in {
- def JSR : MbrForm< 0x1A, 0x01, (ins), "jsr $$26,($$27),0", s_jsr>; //Jump to subroutine
-}
-
-let isCall = 1, Ra = 23, Rb = 27, disp = 0,
- Defs = [R23, R24, R25, R27, R28], Uses = [R24, R25, R27] in
- def JSRs : MbrForm< 0x1A, 0x01, (ins), "jsr $$23,($$27),0", s_jsr>; //Jump to div or rem
-
-
-def JSR_COROUTINE : MbrForm< 0x1A, 0x03, (ins GPRC:$RD, GPRC:$RS, s14imm:$DISP), "jsr_coroutine $RD,($RS),$DISP", s_jsr>; //Jump to subroutine return
-
-
-let OutOperandList = (outs GPRC:$RA), InOperandList = (ins s64imm:$DISP, GPRC:$RB) in {
-def LDQ : MForm<0x29, 1, "ldq $RA,$DISP($RB)",
- [(set GPRC:$RA, (load (add GPRC:$RB, immSExt16:$DISP)))], s_ild>;
-def LDQr : MForm<0x29, 1, "ldq $RA,$DISP($RB)\t\t!gprellow",
- [(set GPRC:$RA, (load (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_ild>;
-def LDL : MForm<0x28, 1, "ldl $RA,$DISP($RB)",
- [(set GPRC:$RA, (sextloadi32 (add GPRC:$RB, immSExt16:$DISP)))], s_ild>;
-def LDLr : MForm<0x28, 1, "ldl $RA,$DISP($RB)\t\t!gprellow",
- [(set GPRC:$RA, (sextloadi32 (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_ild>;
-def LDBU : MForm<0x0A, 1, "ldbu $RA,$DISP($RB)",
- [(set GPRC:$RA, (zextloadi8 (add GPRC:$RB, immSExt16:$DISP)))], s_ild>;
-def LDBUr : MForm<0x0A, 1, "ldbu $RA,$DISP($RB)\t\t!gprellow",
- [(set GPRC:$RA, (zextloadi8 (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_ild>;
-def LDWU : MForm<0x0C, 1, "ldwu $RA,$DISP($RB)",
- [(set GPRC:$RA, (zextloadi16 (add GPRC:$RB, immSExt16:$DISP)))], s_ild>;
-def LDWUr : MForm<0x0C, 1, "ldwu $RA,$DISP($RB)\t\t!gprellow",
- [(set GPRC:$RA, (zextloadi16 (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_ild>;
-}
-
-
-let OutOperandList = (outs), InOperandList = (ins GPRC:$RA, s64imm:$DISP, GPRC:$RB) in {
-def STB : MForm<0x0E, 0, "stb $RA,$DISP($RB)",
- [(truncstorei8 GPRC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_ist>;
-def STBr : MForm<0x0E, 0, "stb $RA,$DISP($RB)\t\t!gprellow",
- [(truncstorei8 GPRC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_ist>;
-def STW : MForm<0x0D, 0, "stw $RA,$DISP($RB)",
- [(truncstorei16 GPRC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_ist>;
-def STWr : MForm<0x0D, 0, "stw $RA,$DISP($RB)\t\t!gprellow",
- [(truncstorei16 GPRC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_ist>;
-def STL : MForm<0x2C, 0, "stl $RA,$DISP($RB)",
- [(truncstorei32 GPRC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_ist>;
-def STLr : MForm<0x2C, 0, "stl $RA,$DISP($RB)\t\t!gprellow",
- [(truncstorei32 GPRC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_ist>;
-def STQ : MForm<0x2D, 0, "stq $RA,$DISP($RB)",
- [(store GPRC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_ist>;
-def STQr : MForm<0x2D, 0, "stq $RA,$DISP($RB)\t\t!gprellow",
- [(store GPRC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_ist>;
-}
-
-//Load address
-let OutOperandList = (outs GPRC:$RA), InOperandList = (ins s64imm:$DISP, GPRC:$RB) in {
-def LDA : MForm<0x08, 0, "lda $RA,$DISP($RB)",
- [(set GPRC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_lda>;
-def LDAr : MForm<0x08, 0, "lda $RA,$DISP($RB)\t\t!gprellow",
- [(set GPRC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_lda>; //Load address
-def LDAH : MForm<0x09, 0, "ldah $RA,$DISP($RB)",
- [], s_lda>; //Load address high
-def LDAHr : MForm<0x09, 0, "ldah $RA,$DISP($RB)\t\t!gprelhigh",
- [(set GPRC:$RA, (Alpha_gprelhi tglobaladdr:$DISP, GPRC:$RB))], s_lda>; //Load address high
-}
-
-let OutOperandList = (outs), InOperandList = (ins F4RC:$RA, s64imm:$DISP, GPRC:$RB) in {
-def STS : MForm<0x26, 0, "sts $RA,$DISP($RB)",
- [(store F4RC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_fst>;
-def STSr : MForm<0x26, 0, "sts $RA,$DISP($RB)\t\t!gprellow",
- [(store F4RC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_fst>;
-}
-let OutOperandList = (outs F4RC:$RA), InOperandList = (ins s64imm:$DISP, GPRC:$RB) in {
-def LDS : MForm<0x22, 1, "lds $RA,$DISP($RB)",
- [(set F4RC:$RA, (load (add GPRC:$RB, immSExt16:$DISP)))], s_fld>;
-def LDSr : MForm<0x22, 1, "lds $RA,$DISP($RB)\t\t!gprellow",
- [(set F4RC:$RA, (load (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_fld>;
-}
-let OutOperandList = (outs), InOperandList = (ins F8RC:$RA, s64imm:$DISP, GPRC:$RB) in {
-def STT : MForm<0x27, 0, "stt $RA,$DISP($RB)",
- [(store F8RC:$RA, (add GPRC:$RB, immSExt16:$DISP))], s_fst>;
-def STTr : MForm<0x27, 0, "stt $RA,$DISP($RB)\t\t!gprellow",
- [(store F8RC:$RA, (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB))], s_fst>;
-}
-let OutOperandList = (outs F8RC:$RA), InOperandList = (ins s64imm:$DISP, GPRC:$RB) in {
-def LDT : MForm<0x23, 1, "ldt $RA,$DISP($RB)",
- [(set F8RC:$RA, (load (add GPRC:$RB, immSExt16:$DISP)))], s_fld>;
-def LDTr : MForm<0x23, 1, "ldt $RA,$DISP($RB)\t\t!gprellow",
- [(set F8RC:$RA, (load (Alpha_gprello tglobaladdr:$DISP, GPRC:$RB)))], s_fld>;
-}
-
-
-//constpool rels
-def : Pat<(i64 (load (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDQr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (sextloadi32 (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDLr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (zextloadi8 (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDBUr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (zextloadi16 (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDWUr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (Alpha_gprello tconstpool:$DISP, GPRC:$RB)),
- (LDAr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (Alpha_gprelhi tconstpool:$DISP, GPRC:$RB)),
- (LDAHr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(f32 (load (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDSr tconstpool:$DISP, GPRC:$RB)>;
-def : Pat<(f64 (load (Alpha_gprello tconstpool:$DISP, GPRC:$RB))),
- (LDTr tconstpool:$DISP, GPRC:$RB)>;
-
-//jumptable rels
-def : Pat<(i64 (Alpha_gprelhi tjumptable:$DISP, GPRC:$RB)),
- (LDAHr tjumptable:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (Alpha_gprello tjumptable:$DISP, GPRC:$RB)),
- (LDAr tjumptable:$DISP, GPRC:$RB)>;
-
-
-//misc ext patterns
-def : Pat<(i64 (extloadi8 (add GPRC:$RB, immSExt16:$DISP))),
- (LDBU immSExt16:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (extloadi16 (add GPRC:$RB, immSExt16:$DISP))),
- (LDWU immSExt16:$DISP, GPRC:$RB)>;
-def : Pat<(i64 (extloadi32 (add GPRC:$RB, immSExt16:$DISP))),
- (LDL immSExt16:$DISP, GPRC:$RB)>;
-
-//0 disp patterns
-def : Pat<(i64 (load GPRC:$addr)),
- (LDQ 0, GPRC:$addr)>;
-def : Pat<(f64 (load GPRC:$addr)),
- (LDT 0, GPRC:$addr)>;
-def : Pat<(f32 (load GPRC:$addr)),
- (LDS 0, GPRC:$addr)>;
-def : Pat<(i64 (sextloadi32 GPRC:$addr)),
- (LDL 0, GPRC:$addr)>;
-def : Pat<(i64 (zextloadi16 GPRC:$addr)),
- (LDWU 0, GPRC:$addr)>;
-def : Pat<(i64 (zextloadi8 GPRC:$addr)),
- (LDBU 0, GPRC:$addr)>;
-def : Pat<(i64 (extloadi8 GPRC:$addr)),
- (LDBU 0, GPRC:$addr)>;
-def : Pat<(i64 (extloadi16 GPRC:$addr)),
- (LDWU 0, GPRC:$addr)>;
-def : Pat<(i64 (extloadi32 GPRC:$addr)),
- (LDL 0, GPRC:$addr)>;
-
-def : Pat<(store GPRC:$DATA, GPRC:$addr),
- (STQ GPRC:$DATA, 0, GPRC:$addr)>;
-def : Pat<(store F8RC:$DATA, GPRC:$addr),
- (STT F8RC:$DATA, 0, GPRC:$addr)>;
-def : Pat<(store F4RC:$DATA, GPRC:$addr),
- (STS F4RC:$DATA, 0, GPRC:$addr)>;
-def : Pat<(truncstorei32 GPRC:$DATA, GPRC:$addr),
- (STL GPRC:$DATA, 0, GPRC:$addr)>;
-def : Pat<(truncstorei16 GPRC:$DATA, GPRC:$addr),
- (STW GPRC:$DATA, 0, GPRC:$addr)>;
-def : Pat<(truncstorei8 GPRC:$DATA, GPRC:$addr),
- (STB GPRC:$DATA, 0, GPRC:$addr)>;
-
-
-//load address, rellocated gpdist form
-let OutOperandList = (outs GPRC:$RA),
- InOperandList = (ins s16imm:$DISP, GPRC:$RB, s16imm:$NUM),
- mayLoad = 1 in {
-def LDAg : MForm<0x08, 1, "lda $RA,0($RB)\t\t!gpdisp!$NUM", [], s_lda>; //Load address
-def LDAHg : MForm<0x09, 1, "ldah $RA,0($RB)\t\t!gpdisp!$NUM", [], s_lda>; //Load address
-}
-
-//Load quad, rellocated literal form
-let OutOperandList = (outs GPRC:$RA), InOperandList = (ins s64imm:$DISP, GPRC:$RB) in
-def LDQl : MForm<0x29, 1, "ldq $RA,$DISP($RB)\t\t!literal",
- [(set GPRC:$RA, (Alpha_rellit tglobaladdr:$DISP, GPRC:$RB))], s_ild>;
-def : Pat<(Alpha_rellit texternalsym:$ext, GPRC:$RB),
- (LDQl texternalsym:$ext, GPRC:$RB)>;
-
-let OutOperandList = (outs GPRC:$RR),
- InOperandList = (ins GPRC:$RA, s64imm:$DISP, GPRC:$RB),
- Constraints = "$RA = $RR",
- DisableEncoding = "$RR" in {
-def STQ_C : MForm<0x2F, 0, "stq_l $RA,$DISP($RB)", [], s_ist>;
-def STL_C : MForm<0x2E, 0, "stl_l $RA,$DISP($RB)", [], s_ist>;
-}
-let OutOperandList = (outs GPRC:$RA),
- InOperandList = (ins s64imm:$DISP, GPRC:$RB),
- mayLoad = 1 in {
-def LDQ_L : MForm<0x2B, 1, "ldq_l $RA,$DISP($RB)", [], s_ild>;
-def LDL_L : MForm<0x2A, 1, "ldl_l $RA,$DISP($RB)", [], s_ild>;
-}
-
-def RPCC : MfcForm<0x18, 0xC000, "rpcc $RA", s_rpcc>; //Read process cycle counter
-def MB : MfcPForm<0x18, 0x4000, "mb", s_imisc>; //memory barrier
-def WMB : MfcPForm<0x18, 0x4400, "wmb", s_imisc>; //write memory barrier
-
-def : Pat<(membarrier (i64 imm), (i64 imm), (i64 imm), (i64 1), (i64 imm)),
- (WMB)>;
-def : Pat<(membarrier (i64 imm), (i64 imm), (i64 imm), (i64 imm), (i64 imm)),
- (MB)>;
-
-def : Pat<(atomic_fence (imm), (imm)), (MB)>;
-
-//Basic Floating point ops
-
-//Floats
-
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins F4RC:$RB), Fa = 31 in
-def SQRTS : FPForm<0x14, 0x58B, "sqrts/su $RB,$RC",
- [(set F4RC:$RC, (fsqrt F4RC:$RB))], s_fsqrts>;
-
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins F4RC:$RA, F4RC:$RB) in {
-def ADDS : FPForm<0x16, 0x580, "adds/su $RA,$RB,$RC",
- [(set F4RC:$RC, (fadd F4RC:$RA, F4RC:$RB))], s_fadd>;
-def SUBS : FPForm<0x16, 0x581, "subs/su $RA,$RB,$RC",
- [(set F4RC:$RC, (fsub F4RC:$RA, F4RC:$RB))], s_fadd>;
-def DIVS : FPForm<0x16, 0x583, "divs/su $RA,$RB,$RC",
- [(set F4RC:$RC, (fdiv F4RC:$RA, F4RC:$RB))], s_fdivs>;
-def MULS : FPForm<0x16, 0x582, "muls/su $RA,$RB,$RC",
- [(set F4RC:$RC, (fmul F4RC:$RA, F4RC:$RB))], s_fmul>;
-
-def CPYSS : FPForm<0x17, 0x020, "cpys $RA,$RB,$RC",
- [(set F4RC:$RC, (fcopysign F4RC:$RB, F4RC:$RA))], s_fadd>;
-def CPYSES : FPForm<0x17, 0x022, "cpyse $RA,$RB,$RC",[], s_fadd>; //Copy sign and exponent
-def CPYSNS : FPForm<0x17, 0x021, "cpysn $RA,$RB,$RC",
- [(set F4RC:$RC, (fneg (fcopysign F4RC:$RB, F4RC:$RA)))], s_fadd>;
-}
-
-//Doubles
-
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F8RC:$RB), Fa = 31 in
-def SQRTT : FPForm<0x14, 0x5AB, "sqrtt/su $RB,$RC",
- [(set F8RC:$RC, (fsqrt F8RC:$RB))], s_fsqrtt>;
-
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F8RC:$RA, F8RC:$RB) in {
-def ADDT : FPForm<0x16, 0x5A0, "addt/su $RA,$RB,$RC",
- [(set F8RC:$RC, (fadd F8RC:$RA, F8RC:$RB))], s_fadd>;
-def SUBT : FPForm<0x16, 0x5A1, "subt/su $RA,$RB,$RC",
- [(set F8RC:$RC, (fsub F8RC:$RA, F8RC:$RB))], s_fadd>;
-def DIVT : FPForm<0x16, 0x5A3, "divt/su $RA,$RB,$RC",
- [(set F8RC:$RC, (fdiv F8RC:$RA, F8RC:$RB))], s_fdivt>;
-def MULT : FPForm<0x16, 0x5A2, "mult/su $RA,$RB,$RC",
- [(set F8RC:$RC, (fmul F8RC:$RA, F8RC:$RB))], s_fmul>;
-
-def CPYST : FPForm<0x17, 0x020, "cpys $RA,$RB,$RC",
- [(set F8RC:$RC, (fcopysign F8RC:$RB, F8RC:$RA))], s_fadd>;
-def CPYSET : FPForm<0x17, 0x022, "cpyse $RA,$RB,$RC",[], s_fadd>; //Copy sign and exponent
-def CPYSNT : FPForm<0x17, 0x021, "cpysn $RA,$RB,$RC",
- [(set F8RC:$RC, (fneg (fcopysign F8RC:$RB, F8RC:$RA)))], s_fadd>;
-
-def CMPTEQ : FPForm<0x16, 0x5A5, "cmpteq/su $RA,$RB,$RC", [], s_fadd>;
-// [(set F8RC:$RC, (seteq F8RC:$RA, F8RC:$RB))]>;
-def CMPTLE : FPForm<0x16, 0x5A7, "cmptle/su $RA,$RB,$RC", [], s_fadd>;
-// [(set F8RC:$RC, (setle F8RC:$RA, F8RC:$RB))]>;
-def CMPTLT : FPForm<0x16, 0x5A6, "cmptlt/su $RA,$RB,$RC", [], s_fadd>;
-// [(set F8RC:$RC, (setlt F8RC:$RA, F8RC:$RB))]>;
-def CMPTUN : FPForm<0x16, 0x5A4, "cmptun/su $RA,$RB,$RC", [], s_fadd>;
-// [(set F8RC:$RC, (setuo F8RC:$RA, F8RC:$RB))]>;
-}
-
-//More CPYS forms:
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F4RC:$RA, F8RC:$RB) in {
-def CPYSTs : FPForm<0x17, 0x020, "cpys $RA,$RB,$RC",
- [(set F8RC:$RC, (fcopysign F8RC:$RB, F4RC:$RA))], s_fadd>;
-def CPYSNTs : FPForm<0x17, 0x021, "cpysn $RA,$RB,$RC",
- [(set F8RC:$RC, (fneg (fcopysign F8RC:$RB, F4RC:$RA)))], s_fadd>;
-}
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins F8RC:$RA, F4RC:$RB) in {
-def CPYSSt : FPForm<0x17, 0x020, "cpys $RA,$RB,$RC",
- [(set F4RC:$RC, (fcopysign F4RC:$RB, F8RC:$RA))], s_fadd>;
-def CPYSESt : FPForm<0x17, 0x022, "cpyse $RA,$RB,$RC",[], s_fadd>; //Copy sign and exponent
-def CPYSNSt : FPForm<0x17, 0x021, "cpysn $RA,$RB,$RC",
- [(set F4RC:$RC, (fneg (fcopysign F4RC:$RB, F8RC:$RA)))], s_fadd>;
-}
-
-//conditional moves, floats
-let OutOperandList = (outs F4RC:$RDEST),
- InOperandList = (ins F4RC:$RFALSE, F4RC:$RTRUE, F8RC:$RCOND),
- Constraints = "$RTRUE = $RDEST" in {
-def FCMOVEQS : FPForm<0x17, 0x02A,
- "fcmoveq $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; //FCMOVE if = zero
-def FCMOVGES : FPForm<0x17, 0x02D,
- "fcmovge $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; //FCMOVE if >= zero
-def FCMOVGTS : FPForm<0x17, 0x02F,
- "fcmovgt $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; //FCMOVE if > zero
-def FCMOVLES : FPForm<0x17, 0x02E,
- "fcmovle $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; //FCMOVE if <= zero
-def FCMOVLTS : FPForm<0x17, 0x02C,
- "fcmovlt $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; // FCMOVE if < zero
-def FCMOVNES : FPForm<0x17, 0x02B,
- "fcmovne $RCOND,$RTRUE,$RDEST",
- [], s_fcmov>; //FCMOVE if != zero
-}
-//conditional moves, doubles
-let OutOperandList = (outs F8RC:$RDEST),
- InOperandList = (ins F8RC:$RFALSE, F8RC:$RTRUE, F8RC:$RCOND),
- Constraints = "$RTRUE = $RDEST" in {
-def FCMOVEQT : FPForm<0x17, 0x02A, "fcmoveq $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-def FCMOVGET : FPForm<0x17, 0x02D, "fcmovge $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-def FCMOVGTT : FPForm<0x17, 0x02F, "fcmovgt $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-def FCMOVLET : FPForm<0x17, 0x02E, "fcmovle $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-def FCMOVLTT : FPForm<0x17, 0x02C, "fcmovlt $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-def FCMOVNET : FPForm<0x17, 0x02B, "fcmovne $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
-}
-
-//misc FP selects
-//Select double
-
-def : Pat<(select (seteq F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setoeq F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setueq F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setne F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVEQT F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setone F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVEQT F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setune F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVEQT F8RC:$sf, F8RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setgt F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setogt F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setugt F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-
-def : Pat<(select (setge F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setoge F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setuge F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-
-def : Pat<(select (setlt F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setolt F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setult F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setle F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setole F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setule F8RC:$RA, F8RC:$RB), F8RC:$st, F8RC:$sf),
- (FCMOVNET F8RC:$sf, F8RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-
-//Select single
-def : Pat<(select (seteq F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setoeq F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setueq F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setne F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVEQS F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setone F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVEQS F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setune F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVEQS F4RC:$sf, F4RC:$st, (CMPTEQ F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setgt F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setogt F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setugt F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RB, F8RC:$RA))>;
-
-def : Pat<(select (setge F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setoge F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-def : Pat<(select (setuge F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RB, F8RC:$RA))>;
-
-def : Pat<(select (setlt F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setolt F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setult F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLT F8RC:$RA, F8RC:$RB))>;
-
-def : Pat<(select (setle F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setole F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-def : Pat<(select (setule F8RC:$RA, F8RC:$RB), F4RC:$st, F4RC:$sf),
- (FCMOVNES F4RC:$sf, F4RC:$st, (CMPTLE F8RC:$RA, F8RC:$RB))>;
-
-
-
-let OutOperandList = (outs GPRC:$RC), InOperandList = (ins F4RC:$RA), Fb = 31 in
-def FTOIS : FPForm<0x1C, 0x078, "ftois $RA,$RC",
- [(set GPRC:$RC, (bitconvert F4RC:$RA))], s_ftoi>; //Floating to integer move, S_floating
-let OutOperandList = (outs GPRC:$RC), InOperandList = (ins F8RC:$RA), Fb = 31 in
-def FTOIT : FPForm<0x1C, 0x070, "ftoit $RA,$RC",
- [(set GPRC:$RC, (bitconvert F8RC:$RA))], s_ftoi>; //Floating to integer move
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins GPRC:$RA), Fb = 31 in
-def ITOFS : FPForm<0x14, 0x004, "itofs $RA,$RC",
- [(set F4RC:$RC, (bitconvert GPRC:$RA))], s_itof>; //Integer to floating move, S_floating
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins GPRC:$RA), Fb = 31 in
-def ITOFT : FPForm<0x14, 0x024, "itoft $RA,$RC",
- [(set F8RC:$RC, (bitconvert GPRC:$RA))], s_itof>; //Integer to floating move
-
-
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins F8RC:$RB), Fa = 31 in
-def CVTQS : FPForm<0x16, 0x7BC, "cvtqs/sui $RB,$RC",
- [(set F4RC:$RC, (Alpha_cvtqs F8RC:$RB))], s_fadd>;
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F8RC:$RB), Fa = 31 in
-def CVTQT : FPForm<0x16, 0x7BE, "cvtqt/sui $RB,$RC",
- [(set F8RC:$RC, (Alpha_cvtqt F8RC:$RB))], s_fadd>;
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F8RC:$RB), Fa = 31 in
-def CVTTQ : FPForm<0x16, 0x52F, "cvttq/svc $RB,$RC",
- [(set F8RC:$RC, (Alpha_cvttq F8RC:$RB))], s_fadd>;
-let OutOperandList = (outs F8RC:$RC), InOperandList = (ins F4RC:$RB), Fa = 31 in
-def CVTST : FPForm<0x16, 0x6AC, "cvtst/s $RB,$RC",
- [(set F8RC:$RC, (fextend F4RC:$RB))], s_fadd>;
-let OutOperandList = (outs F4RC:$RC), InOperandList = (ins F8RC:$RB), Fa = 31 in
-def CVTTS : FPForm<0x16, 0x7AC, "cvtts/sui $RB,$RC",
- [(set F4RC:$RC, (fround F8RC:$RB))], s_fadd>;
-
-def : Pat<(select GPRC:$RC, F8RC:$st, F8RC:$sf),
- (f64 (FCMOVEQT F8RC:$st, F8RC:$sf, (ITOFT GPRC:$RC)))>;
-def : Pat<(select GPRC:$RC, F4RC:$st, F4RC:$sf),
- (f32 (FCMOVEQS F4RC:$st, F4RC:$sf, (ITOFT GPRC:$RC)))>;
-
-/////////////////////////////////////////////////////////
-//Branching
-/////////////////////////////////////////////////////////
-class br_icc<bits<6> opc, string asmstr>
- : BFormN<opc, (ins u64imm:$opc, GPRC:$R, target:$dst),
- !strconcat(asmstr, " $R,$dst"), s_icbr>;
-class br_fcc<bits<6> opc, string asmstr>
- : BFormN<opc, (ins u64imm:$opc, F8RC:$R, target:$dst),
- !strconcat(asmstr, " $R,$dst"), s_fbr>;
-
-let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
-let Ra = 31, isBarrier = 1 in
-def BR : BFormD<0x30, "br $$31,$DISP", [(br bb:$DISP)], s_ubr>;
-
-def COND_BRANCH_I : BFormN<0, (ins u64imm:$opc, GPRC:$R, target:$dst),
- "{:comment} COND_BRANCH imm:$opc, GPRC:$R, bb:$dst",
- s_icbr>;
-def COND_BRANCH_F : BFormN<0, (ins u64imm:$opc, F8RC:$R, target:$dst),
- "{:comment} COND_BRANCH imm:$opc, F8RC:$R, bb:$dst",
- s_fbr>;
-//Branches, int
-def BEQ : br_icc<0x39, "beq">;
-def BGE : br_icc<0x3E, "bge">;
-def BGT : br_icc<0x3F, "bgt">;
-def BLBC : br_icc<0x38, "blbc">;
-def BLBS : br_icc<0x3C, "blbs">;
-def BLE : br_icc<0x3B, "ble">;
-def BLT : br_icc<0x3A, "blt">;
-def BNE : br_icc<0x3D, "bne">;
-
-//Branches, float
-def FBEQ : br_fcc<0x31, "fbeq">;
-def FBGE : br_fcc<0x36, "fbge">;
-def FBGT : br_fcc<0x37, "fbgt">;
-def FBLE : br_fcc<0x33, "fble">;
-def FBLT : br_fcc<0x32, "fblt">;
-def FBNE : br_fcc<0x36, "fbne">;
-}
-
-//An ugly trick to get the opcode as an imm I can use
-def immBRCond : SDNodeXForm<imm, [{
- switch((uint64_t)N->getZExtValue()) {
- default: assert(0 && "Unknown branch type");
- case 0: return getI64Imm(Alpha::BEQ);
- case 1: return getI64Imm(Alpha::BNE);
- case 2: return getI64Imm(Alpha::BGE);
- case 3: return getI64Imm(Alpha::BGT);
- case 4: return getI64Imm(Alpha::BLE);
- case 5: return getI64Imm(Alpha::BLT);
- case 6: return getI64Imm(Alpha::BLBS);
- case 7: return getI64Imm(Alpha::BLBC);
- case 20: return getI64Imm(Alpha::FBEQ);
- case 21: return getI64Imm(Alpha::FBNE);
- case 22: return getI64Imm(Alpha::FBGE);
- case 23: return getI64Imm(Alpha::FBGT);
- case 24: return getI64Imm(Alpha::FBLE);
- case 25: return getI64Imm(Alpha::FBLT);
- }
-}]>;
-
-//Int cond patterns
-def : Pat<(brcond (seteq GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 0), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setge GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 2), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setgt GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 3), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (and GPRC:$RA, 1), bb:$DISP),
- (COND_BRANCH_I (immBRCond 6), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setle GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 4), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setlt GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 5), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setne GPRC:$RA, 0), bb:$DISP),
- (COND_BRANCH_I (immBRCond 1), GPRC:$RA, bb:$DISP)>;
-
-def : Pat<(brcond GPRC:$RA, bb:$DISP),
- (COND_BRANCH_I (immBRCond 1), GPRC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setne GPRC:$RA, GPRC:$RB), bb:$DISP),
- (COND_BRANCH_I (immBRCond 0), (CMPEQ GPRC:$RA, GPRC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setne GPRC:$RA, immUExt8:$L), bb:$DISP),
- (COND_BRANCH_I (immBRCond 0), (CMPEQi GPRC:$RA, immUExt8:$L), bb:$DISP)>;
-
-//FP cond patterns
-def : Pat<(brcond (seteq F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), F8RC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setne F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), F8RC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setge F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 22), F8RC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setgt F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 23), F8RC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setle F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 24), F8RC:$RA, bb:$DISP)>;
-def : Pat<(brcond (setlt F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 25), F8RC:$RA, bb:$DISP)>;
-
-
-def : Pat<(brcond (seteq F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setoeq F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setueq F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-
-def : Pat<(brcond (setlt F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setolt F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setult F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-
-def : Pat<(brcond (setle F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setole F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setule F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-
-def : Pat<(brcond (setgt F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-def : Pat<(brcond (setogt F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-def : Pat<(brcond (setugt F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLT F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-
-def : Pat<(brcond (setge F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-def : Pat<(brcond (setoge F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-def : Pat<(brcond (setuge F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), (CMPTLE F8RC:$RB, F8RC:$RA), bb:$DISP)>;
-
-def : Pat<(brcond (setne F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setone F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-def : Pat<(brcond (setune F8RC:$RA, F8RC:$RB), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), (CMPTEQ F8RC:$RA, F8RC:$RB), bb:$DISP)>;
-
-
-def : Pat<(brcond (setoeq F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setueq F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 20), F8RC:$RA,bb:$DISP)>;
-
-def : Pat<(brcond (setoge F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 22), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setuge F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 22), F8RC:$RA,bb:$DISP)>;
-
-def : Pat<(brcond (setogt F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 23), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setugt F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 23), F8RC:$RA,bb:$DISP)>;
-
-def : Pat<(brcond (setole F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 24), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setule F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 24), F8RC:$RA,bb:$DISP)>;
-
-def : Pat<(brcond (setolt F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 25), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setult F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 25), F8RC:$RA,bb:$DISP)>;
-
-def : Pat<(brcond (setone F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), F8RC:$RA,bb:$DISP)>;
-def : Pat<(brcond (setune F8RC:$RA, immFPZ), bb:$DISP),
- (COND_BRANCH_F (immBRCond 21), F8RC:$RA,bb:$DISP)>;
-
-//End Branches
-
-//S_floating : IEEE Single
-//T_floating : IEEE Double
-
-//Unused instructions
-//Mnemonic Format Opcode Description
-//CALL_PAL Pcd 00 Trap to PALcode
-//ECB Mfc 18.E800 Evict cache block
-//EXCB Mfc 18.0400 Exception barrier
-//FETCH Mfc 18.8000 Prefetch data
-//FETCH_M Mfc 18.A000 Prefetch data, modify intent
-//LDQ_U Mem 0B Load unaligned quadword
-//MB Mfc 18.4000 Memory barrier
-//STQ_U Mem 0F Store unaligned quadword
-//TRAPB Mfc 18.0000 Trap barrier
-//WH64 Mfc 18.F800 Write hint  64 bytes
-//WMB Mfc 18.4400 Write memory barrier
-//MF_FPCR F-P 17.025 Move from FPCR
-//MT_FPCR F-P 17.024 Move to FPCR
-//There are in the Multimedia extensions, so let's not use them yet
-//def MAXSB8 : OForm<0x1C, 0x3E, "MAXSB8 $RA,$RB,$RC">; //Vector signed byte maximum
-//def MAXSW4 : OForm< 0x1C, 0x3F, "MAXSW4 $RA,$RB,$RC">; //Vector signed word maximum
-//def MAXUB8 : OForm<0x1C, 0x3C, "MAXUB8 $RA,$RB,$RC">; //Vector unsigned byte maximum
-//def MAXUW4 : OForm< 0x1C, 0x3D, "MAXUW4 $RA,$RB,$RC">; //Vector unsigned word maximum
-//def MINSB8 : OForm< 0x1C, 0x38, "MINSB8 $RA,$RB,$RC">; //Vector signed byte minimum
-//def MINSW4 : OForm< 0x1C, 0x39, "MINSW4 $RA,$RB,$RC">; //Vector signed word minimum
-//def MINUB8 : OForm< 0x1C, 0x3A, "MINUB8 $RA,$RB,$RC">; //Vector unsigned byte minimum
-//def MINUW4 : OForm< 0x1C, 0x3B, "MINUW4 $RA,$RB,$RC">; //Vector unsigned word minimum
-//def PERR : OForm< 0x1C, 0x31, "PERR $RA,$RB,$RC">; //Pixel error
-//def PKLB : OForm< 0x1C, 0x37, "PKLB $RA,$RB,$RC">; //Pack longwords to bytes
-//def PKWB : OForm<0x1C, 0x36, "PKWB $RA,$RB,$RC">; //Pack words to bytes
-//def UNPKBL : OForm< 0x1C, 0x35, "UNPKBL $RA,$RB,$RC">; //Unpack bytes to longwords
-//def UNPKBW : OForm< 0x1C, 0x34, "UNPKBW $RA,$RB,$RC">; //Unpack bytes to words
-//CVTLQ F-P 17.010 Convert longword to quadword
-//CVTQL F-P 17.030 Convert quadword to longword
-
-
-//Constant handling
-
-def immConst2Part : PatLeaf<(imm), [{
- //true if imm fits in a LDAH LDA pair
- int64_t val = (int64_t)N->getZExtValue();
- return (val <= IMM_FULLHIGH && val >= IMM_FULLLOW);
-}]>;
-def immConst2PartInt : PatLeaf<(imm), [{
- //true if imm fits in a LDAH LDA pair with zeroext
- uint64_t uval = N->getZExtValue();
- int32_t val32 = (int32_t)uval;
- return ((uval >> 32) == 0 && //empty upper bits
- val32 <= IMM_FULLHIGH);
-// val32 >= IMM_FULLLOW + IMM_LOW * IMM_MULT); //Always True
-}], SExt32>;
-
-def : Pat<(i64 immConst2Part:$imm),
- (LDA (LL16 immConst2Part:$imm), (LDAH (LH16 immConst2Part:$imm), R31))>;
-
-def : Pat<(i64 immSExt16:$imm),
- (LDA immSExt16:$imm, R31)>;
-
-def : Pat<(i64 immSExt16int:$imm),
- (ZAPNOTi (LDA (SExt16 immSExt16int:$imm), R31), 15)>;
-def : Pat<(i64 immConst2PartInt:$imm),
- (ZAPNOTi (LDA (LL16 (i64 (SExt32 immConst2PartInt:$imm))),
- (LDAH (LH16 (i64 (SExt32 immConst2PartInt:$imm))), R31)), 15)>;
-
-
-//TODO: I want to just define these like this!
-//def : Pat<(i64 0),
-// (R31)>;
-//def : Pat<(f64 0.0),
-// (F31)>;
-//def : Pat<(f64 -0.0),
-// (CPYSNT F31, F31)>;
-//def : Pat<(f32 0.0),
-// (F31)>;
-//def : Pat<(f32 -0.0),
-// (CPYSNS F31, F31)>;
-
-//Misc Patterns:
-
-def : Pat<(sext_inreg GPRC:$RB, i32),
- (ADDLi GPRC:$RB, 0)>;
-
-def : Pat<(fabs F8RC:$RB),
- (CPYST F31, F8RC:$RB)>;
-def : Pat<(fabs F4RC:$RB),
- (CPYSS F31, F4RC:$RB)>;
-def : Pat<(fneg F8RC:$RB),
- (CPYSNT F8RC:$RB, F8RC:$RB)>;
-def : Pat<(fneg F4RC:$RB),
- (CPYSNS F4RC:$RB, F4RC:$RB)>;
-
-def : Pat<(fcopysign F4RC:$A, (fneg F4RC:$B)),
- (CPYSNS F4RC:$B, F4RC:$A)>;
-def : Pat<(fcopysign F8RC:$A, (fneg F8RC:$B)),
- (CPYSNT F8RC:$B, F8RC:$A)>;
-def : Pat<(fcopysign F4RC:$A, (fneg F8RC:$B)),
- (CPYSNSt F8RC:$B, F4RC:$A)>;
-def : Pat<(fcopysign F8RC:$A, (fneg F4RC:$B)),
- (CPYSNTs F4RC:$B, F8RC:$A)>;
-
-//Yes, signed multiply high is ugly
-def : Pat<(mulhs GPRC:$RA, GPRC:$RB),
- (SUBQr (UMULHr GPRC:$RA, GPRC:$RB), (ADDQr (CMOVGEr GPRC:$RB, R31, GPRC:$RA),
- (CMOVGEr GPRC:$RA, R31, GPRC:$RB)))>;
-
-//Stupid crazy arithmetic stuff:
-let AddedComplexity = 1 in {
-def : Pat<(mul GPRC:$RA, 5), (S4ADDQr GPRC:$RA, GPRC:$RA)>;
-def : Pat<(mul GPRC:$RA, 9), (S8ADDQr GPRC:$RA, GPRC:$RA)>;
-def : Pat<(mul GPRC:$RA, 3), (S4SUBQr GPRC:$RA, GPRC:$RA)>;
-def : Pat<(mul GPRC:$RA, 7), (S8SUBQr GPRC:$RA, GPRC:$RA)>;
-
-//slight tree expansion if we are multiplying near to a power of 2
-//n is above a power of 2
-def : Pat<(mul GPRC:$RA, immRem1:$imm),
- (ADDQr (SLr GPRC:$RA, (nearP2X immRem1:$imm)), GPRC:$RA)>;
-def : Pat<(mul GPRC:$RA, immRem2:$imm),
- (ADDQr (SLr GPRC:$RA, (nearP2X immRem2:$imm)), (ADDQr GPRC:$RA, GPRC:$RA))>;
-def : Pat<(mul GPRC:$RA, immRem3:$imm),
- (ADDQr (SLr GPRC:$RA, (nearP2X immRem3:$imm)), (S4SUBQr GPRC:$RA, GPRC:$RA))>;
-def : Pat<(mul GPRC:$RA, immRem4:$imm),
- (S4ADDQr GPRC:$RA, (SLr GPRC:$RA, (nearP2X immRem4:$imm)))>;
-def : Pat<(mul GPRC:$RA, immRem5:$imm),
- (ADDQr (SLr GPRC:$RA, (nearP2X immRem5:$imm)), (S4ADDQr GPRC:$RA, GPRC:$RA))>;
-def : Pat<(mul GPRC:$RA, immRemP2:$imm),
- (ADDQr (SLr GPRC:$RA, (nearP2X immRemP2:$imm)), (SLi GPRC:$RA, (nearP2RemX immRemP2:$imm)))>;
-
-//n is below a power of 2
-//FIXME: figure out why something is truncating the imm to 32bits
-// this will fix 2007-11-27-mulneg3
-//def : Pat<(mul GPRC:$RA, immRem1n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRem1n:$imm)), GPRC:$RA)>;
-//def : Pat<(mul GPRC:$RA, immRem2n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRem2n:$imm)), (ADDQr GPRC:$RA, GPRC:$RA))>;
-//def : Pat<(mul GPRC:$RA, immRem3n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRem3n:$imm)), (S4SUBQr GPRC:$RA, GPRC:$RA))>;
-//def : Pat<(mul GPRC:$RA, immRem4n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRem4n:$imm)), (SLi GPRC:$RA, 2))>;
-//def : Pat<(mul GPRC:$RA, immRem5n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRem5n:$imm)), (S4ADDQr GPRC:$RA, GPRC:$RA))>;
-//def : Pat<(mul GPRC:$RA, immRemP2n:$imm),
-// (SUBQr (SLr GPRC:$RA, (nearP2X immRemP2n:$imm)), (SLi GPRC:$RA, (nearP2RemX immRemP2n:$imm)))>;
-} //Added complexity
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaLLRP.cpp b/contrib/llvm/lib/Target/Alpha/AlphaLLRP.cpp
deleted file mode 100644
index 85fbfd1..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaLLRP.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-//===-- AlphaLLRP.cpp - Alpha Load Load Replay Trap elimination pass. -- --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Here we check for potential replay traps introduced by the spiller
-// We also align some branch targets if we can do so for free.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "alpha-nops"
-#include "Alpha.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/ADT/SetOperations.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
-using namespace llvm;
-
-STATISTIC(nopintro, "Number of nops inserted");
-STATISTIC(nopalign, "Number of nops inserted for alignment");
-
-namespace {
- cl::opt<bool>
- AlignAll("alpha-align-all", cl::Hidden,
- cl::desc("Align all blocks"));
-
- struct AlphaLLRPPass : public MachineFunctionPass {
- /// Target machine description which we query for reg. names, data
- /// layout, etc.
- ///
- AlphaTargetMachine &TM;
-
- static char ID;
- AlphaLLRPPass(AlphaTargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm) { }
-
- virtual const char *getPassName() const {
- return "Alpha NOP inserter";
- }
-
- bool runOnMachineFunction(MachineFunction &F) {
- const TargetInstrInfo *TII = F.getTarget().getInstrInfo();
- bool Changed = false;
- MachineInstr* prev[3] = {0,0,0};
- DebugLoc dl;
- unsigned count = 0;
- for (MachineFunction::iterator FI = F.begin(), FE = F.end();
- FI != FE; ++FI) {
- MachineBasicBlock& MBB = *FI;
- bool ub = false;
- for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
- if (count%4 == 0)
- prev[0] = prev[1] = prev[2] = 0; //Slots cleared at fetch boundary
- ++count;
- MachineInstr *MI = I++;
- switch (MI->getOpcode()) {
- case Alpha::LDQ: case Alpha::LDL:
- case Alpha::LDWU: case Alpha::LDBU:
- case Alpha::LDT: case Alpha::LDS:
- case Alpha::STQ: case Alpha::STL:
- case Alpha::STW: case Alpha::STB:
- case Alpha::STT: case Alpha::STS:
- if (MI->getOperand(2).getReg() == Alpha::R30) {
- if (prev[0] &&
- prev[0]->getOperand(2).getReg() == MI->getOperand(2).getReg()&&
- prev[0]->getOperand(1).getImm() == MI->getOperand(1).getImm()){
- prev[0] = prev[1];
- prev[1] = prev[2];
- prev[2] = 0;
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31)
- .addReg(Alpha::R31);
- Changed = true; nopintro += 1;
- count += 1;
- } else if (prev[1]
- && prev[1]->getOperand(2).getReg() ==
- MI->getOperand(2).getReg()
- && prev[1]->getOperand(1).getImm() ==
- MI->getOperand(1).getImm()) {
- prev[0] = prev[2];
- prev[1] = prev[2] = 0;
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31)
- .addReg(Alpha::R31);
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31)
- .addReg(Alpha::R31);
- Changed = true; nopintro += 2;
- count += 2;
- } else if (prev[2]
- && prev[2]->getOperand(2).getReg() ==
- MI->getOperand(2).getReg()
- && prev[2]->getOperand(1).getImm() ==
- MI->getOperand(1).getImm()) {
- prev[0] = prev[1] = prev[2] = 0;
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31).addReg(Alpha::R31);
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31).addReg(Alpha::R31);
- BuildMI(MBB, MI, dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31).addReg(Alpha::R31);
- Changed = true; nopintro += 3;
- count += 3;
- }
- prev[0] = prev[1];
- prev[1] = prev[2];
- prev[2] = MI;
- break;
- }
- prev[0] = prev[1];
- prev[1] = prev[2];
- prev[2] = 0;
- break;
- case Alpha::ALTENT:
- case Alpha::MEMLABEL:
- case Alpha::PCLABEL:
- --count;
- break;
- case Alpha::BR:
- case Alpha::JMP:
- ub = true;
- //fall through
- default:
- prev[0] = prev[1];
- prev[1] = prev[2];
- prev[2] = 0;
- break;
- }
- }
- if (ub || AlignAll) {
- //we can align stuff for free at this point
- while (count % 4) {
- BuildMI(MBB, MBB.end(), dl, TII->get(Alpha::BISr), Alpha::R31)
- .addReg(Alpha::R31).addReg(Alpha::R31);
- ++count;
- ++nopalign;
- prev[0] = prev[1];
- prev[1] = prev[2];
- prev[2] = 0;
- }
- }
- }
- return Changed;
- }
- };
- char AlphaLLRPPass::ID = 0;
-} // end of anonymous namespace
-
-FunctionPass *llvm::createAlphaLLRPPass(AlphaTargetMachine &tm) {
- return new AlphaLLRPPass(tm);
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaMachineFunctionInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaMachineFunctionInfo.h
deleted file mode 100644
index 186738c..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaMachineFunctionInfo.h
+++ /dev/null
@@ -1,62 +0,0 @@
-//====- AlphaMachineFuctionInfo.h - Alpha machine function info -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares Alpha-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHAMACHINEFUNCTIONINFO_H
-#define ALPHAMACHINEFUNCTIONINFO_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// AlphaMachineFunctionInfo - This class is derived from MachineFunction
-/// private Alpha target-specific information for each MachineFunction.
-class AlphaMachineFunctionInfo : public MachineFunctionInfo {
- /// GlobalBaseReg - keeps track of the virtual register initialized for
- /// use as the global base register. This is used for PIC in some PIC
- /// relocation models.
- unsigned GlobalBaseReg;
-
- /// GlobalRetAddr = keeps track of the virtual register initialized for
- /// the return address value.
- unsigned GlobalRetAddr;
-
- /// VarArgsOffset - What is the offset to the first vaarg
- int VarArgsOffset;
- /// VarArgsBase - What is the base FrameIndex
- int VarArgsBase;
-
-public:
- AlphaMachineFunctionInfo() : GlobalBaseReg(0), GlobalRetAddr(0),
- VarArgsOffset(0), VarArgsBase(0) {}
-
- explicit AlphaMachineFunctionInfo(MachineFunction &MF) : GlobalBaseReg(0),
- GlobalRetAddr(0),
- VarArgsOffset(0),
- VarArgsBase(0) {}
-
- unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
- void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
-
- unsigned getGlobalRetAddr() const { return GlobalRetAddr; }
- void setGlobalRetAddr(unsigned Reg) { GlobalRetAddr = Reg; }
-
- int getVarArgsOffset() const { return VarArgsOffset; }
- void setVarArgsOffset(int Offset) { VarArgsOffset = Offset; }
-
- int getVarArgsBase() const { return VarArgsBase; }
- void setVarArgsBase(int Base) { VarArgsBase = Base; }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
deleted file mode 100644
index 8b6230f..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-//===- AlphaRegisterInfo.cpp - Alpha Register Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "reginfo"
-#include "Alpha.h"
-#include "AlphaRegisterInfo.h"
-#include "llvm/Constants.h"
-#include "llvm/Type.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include <cstdlib>
-
-#define GET_REGINFO_TARGET_DESC
-#include "AlphaGenRegisterInfo.inc"
-
-using namespace llvm;
-
-AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
- : AlphaGenRegisterInfo(Alpha::R26), TII(tii) {
-}
-
-static long getUpper16(long l) {
- long y = l / Alpha::IMM_MULT;
- if (l % Alpha::IMM_MULT > Alpha::IMM_HIGH)
- ++y;
- return y;
-}
-
-static long getLower16(long l) {
- long h = getUpper16(l);
- return l - h * Alpha::IMM_MULT;
-}
-
-const unsigned* AlphaRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
- const {
- static const unsigned CalleeSavedRegs[] = {
- Alpha::R9, Alpha::R10,
- Alpha::R11, Alpha::R12,
- Alpha::R13, Alpha::R14,
- Alpha::F2, Alpha::F3,
- Alpha::F4, Alpha::F5,
- Alpha::F6, Alpha::F7,
- Alpha::F8, Alpha::F9, 0
- };
- return CalleeSavedRegs;
-}
-
-BitVector AlphaRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
- Reserved.set(Alpha::R15);
- Reserved.set(Alpha::R29);
- Reserved.set(Alpha::R30);
- Reserved.set(Alpha::R31);
- return Reserved;
-}
-
-//===----------------------------------------------------------------------===//
-// Stack Frame Processing methods
-//===----------------------------------------------------------------------===//
-
-void AlphaRegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- if (TFI->hasFP(MF)) {
- // If we have a frame pointer, turn the adjcallstackup instruction into a
- // 'sub ESP, <amt>' and the adjcallstackdown instruction into 'add ESP,
- // <amt>'
- MachineInstr *Old = I;
- uint64_t Amount = Old->getOperand(0).getImm();
- if (Amount != 0) {
- // We need to keep the stack aligned properly. To do this, we round the
- // amount of space needed for the outgoing arguments up to the next
- // alignment boundary.
- unsigned Align = TFI->getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
-
- MachineInstr *New;
- if (Old->getOpcode() == Alpha::ADJUSTSTACKDOWN) {
- New=BuildMI(MF, Old->getDebugLoc(), TII.get(Alpha::LDA), Alpha::R30)
- .addImm(-Amount).addReg(Alpha::R30);
- } else {
- assert(Old->getOpcode() == Alpha::ADJUSTSTACKUP);
- New=BuildMI(MF, Old->getDebugLoc(), TII.get(Alpha::LDA), Alpha::R30)
- .addImm(Amount).addReg(Alpha::R30);
- }
-
- // Replace the pseudo instruction with a new instruction...
- MBB.insert(I, New);
- }
- }
-
- MBB.erase(I);
-}
-
-//Alpha has a slightly funny stack:
-//Args
-//<- incoming SP
-//fixed locals (and spills, callee saved, etc)
-//<- FP
-//variable locals
-//<- SP
-
-void
-AlphaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS) const {
- assert(SPAdj == 0 && "Unexpected");
-
- unsigned i = 0;
- MachineInstr &MI = *II;
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- bool FP = TFI->hasFP(MF);
-
- while (!MI.getOperand(i).isFI()) {
- ++i;
- assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
- }
-
- int FrameIndex = MI.getOperand(i).getIndex();
-
- // Add the base register of R30 (SP) or R15 (FP).
- MI.getOperand(i + 1).ChangeToRegister(FP ? Alpha::R15 : Alpha::R30, false);
-
- // Now add the frame object offset to the offset from the virtual frame index.
- int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
-
- DEBUG(errs() << "FI: " << FrameIndex << " Offset: " << Offset << "\n");
-
- Offset += MF.getFrameInfo()->getStackSize();
-
- DEBUG(errs() << "Corrected Offset " << Offset
- << " for stack size: " << MF.getFrameInfo()->getStackSize() << "\n");
-
- if (Offset > Alpha::IMM_HIGH || Offset < Alpha::IMM_LOW) {
- DEBUG(errs() << "Unconditionally using R28 for evil purposes Offset: "
- << Offset << "\n");
- //so in this case, we need to use a temporary register, and move the
- //original inst off the SP/FP
- //fix up the old:
- MI.getOperand(i + 1).ChangeToRegister(Alpha::R28, false);
- MI.getOperand(i).ChangeToImmediate(getLower16(Offset));
- //insert the new
- MachineInstr* nMI=BuildMI(MF, MI.getDebugLoc(),
- TII.get(Alpha::LDAH), Alpha::R28)
- .addImm(getUpper16(Offset)).addReg(FP ? Alpha::R15 : Alpha::R30);
- MBB.insert(II, nMI);
- } else {
- MI.getOperand(i).ChangeToImmediate(Offset);
- }
-}
-
-unsigned AlphaRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- return TFI->hasFP(MF) ? Alpha::R15 : Alpha::R30;
-}
-
-unsigned AlphaRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
- return 0;
-}
-
-unsigned AlphaRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
- return 0;
-}
-
-std::string AlphaRegisterInfo::getPrettyName(unsigned reg)
-{
- std::string s(AlphaRegDesc[reg].Name);
- return s;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
deleted file mode 100644
index e35be27..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===- AlphaRegisterInfo.h - Alpha Register Information Impl ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Alpha implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHAREGISTERINFO_H
-#define ALPHAREGISTERINFO_H
-
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "AlphaGenRegisterInfo.inc"
-
-namespace llvm {
-
-class TargetInstrInfo;
-class Type;
-
-struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
- const TargetInstrInfo &TII;
-
- AlphaRegisterInfo(const TargetInstrInfo &tii);
-
- /// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS = NULL) const;
-
- // Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
- static std::string getPrettyName(unsigned reg);
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.td b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.td
deleted file mode 100644
index 32120d7..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.td
+++ /dev/null
@@ -1,133 +0,0 @@
-//===- AlphaRegisterInfo.td - The Alpha Register File ------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the Alpha register set.
-//
-//===----------------------------------------------------------------------===//
-
-class AlphaReg<string n> : Register<n> {
- field bits<5> Num;
- let Namespace = "Alpha";
-}
-
-// We identify all our registers with a 5-bit ID, for consistency's sake.
-
-// GPR - One of the 32 32-bit general-purpose registers
-class GPR<bits<5> num, string n> : AlphaReg<n> {
- let Num = num;
-}
-
-// FPR - One of the 32 64-bit floating-point registers
-class FPR<bits<5> num, string n> : AlphaReg<n> {
- let Num = num;
-}
-
-//#define FP $15
-//#define RA $26
-//#define PV $27
-//#define GP $29
-//#define SP $30
-
-// General-purpose registers
-def R0 : GPR< 0, "$0">, DwarfRegNum<[0]>;
-def R1 : GPR< 1, "$1">, DwarfRegNum<[1]>;
-def R2 : GPR< 2, "$2">, DwarfRegNum<[2]>;
-def R3 : GPR< 3, "$3">, DwarfRegNum<[3]>;
-def R4 : GPR< 4, "$4">, DwarfRegNum<[4]>;
-def R5 : GPR< 5, "$5">, DwarfRegNum<[5]>;
-def R6 : GPR< 6, "$6">, DwarfRegNum<[6]>;
-def R7 : GPR< 7, "$7">, DwarfRegNum<[7]>;
-def R8 : GPR< 8, "$8">, DwarfRegNum<[8]>;
-def R9 : GPR< 9, "$9">, DwarfRegNum<[9]>;
-def R10 : GPR<10, "$10">, DwarfRegNum<[10]>;
-def R11 : GPR<11, "$11">, DwarfRegNum<[11]>;
-def R12 : GPR<12, "$12">, DwarfRegNum<[12]>;
-def R13 : GPR<13, "$13">, DwarfRegNum<[13]>;
-def R14 : GPR<14, "$14">, DwarfRegNum<[14]>;
-def R15 : GPR<15, "$15">, DwarfRegNum<[15]>;
-def R16 : GPR<16, "$16">, DwarfRegNum<[16]>;
-def R17 : GPR<17, "$17">, DwarfRegNum<[17]>;
-def R18 : GPR<18, "$18">, DwarfRegNum<[18]>;
-def R19 : GPR<19, "$19">, DwarfRegNum<[19]>;
-def R20 : GPR<20, "$20">, DwarfRegNum<[20]>;
-def R21 : GPR<21, "$21">, DwarfRegNum<[21]>;
-def R22 : GPR<22, "$22">, DwarfRegNum<[22]>;
-def R23 : GPR<23, "$23">, DwarfRegNum<[23]>;
-def R24 : GPR<24, "$24">, DwarfRegNum<[24]>;
-def R25 : GPR<25, "$25">, DwarfRegNum<[25]>;
-def R26 : GPR<26, "$26">, DwarfRegNum<[26]>;
-def R27 : GPR<27, "$27">, DwarfRegNum<[27]>;
-def R28 : GPR<28, "$28">, DwarfRegNum<[28]>;
-def R29 : GPR<29, "$29">, DwarfRegNum<[29]>;
-def R30 : GPR<30, "$30">, DwarfRegNum<[30]>;
-def R31 : GPR<31, "$31">, DwarfRegNum<[31]>;
-
-// Floating-point registers
-def F0 : FPR< 0, "$f0">, DwarfRegNum<[33]>;
-def F1 : FPR< 1, "$f1">, DwarfRegNum<[34]>;
-def F2 : FPR< 2, "$f2">, DwarfRegNum<[35]>;
-def F3 : FPR< 3, "$f3">, DwarfRegNum<[36]>;
-def F4 : FPR< 4, "$f4">, DwarfRegNum<[37]>;
-def F5 : FPR< 5, "$f5">, DwarfRegNum<[38]>;
-def F6 : FPR< 6, "$f6">, DwarfRegNum<[39]>;
-def F7 : FPR< 7, "$f7">, DwarfRegNum<[40]>;
-def F8 : FPR< 8, "$f8">, DwarfRegNum<[41]>;
-def F9 : FPR< 9, "$f9">, DwarfRegNum<[42]>;
-def F10 : FPR<10, "$f10">, DwarfRegNum<[43]>;
-def F11 : FPR<11, "$f11">, DwarfRegNum<[44]>;
-def F12 : FPR<12, "$f12">, DwarfRegNum<[45]>;
-def F13 : FPR<13, "$f13">, DwarfRegNum<[46]>;
-def F14 : FPR<14, "$f14">, DwarfRegNum<[47]>;
-def F15 : FPR<15, "$f15">, DwarfRegNum<[48]>;
-def F16 : FPR<16, "$f16">, DwarfRegNum<[49]>;
-def F17 : FPR<17, "$f17">, DwarfRegNum<[50]>;
-def F18 : FPR<18, "$f18">, DwarfRegNum<[51]>;
-def F19 : FPR<19, "$f19">, DwarfRegNum<[52]>;
-def F20 : FPR<20, "$f20">, DwarfRegNum<[53]>;
-def F21 : FPR<21, "$f21">, DwarfRegNum<[54]>;
-def F22 : FPR<22, "$f22">, DwarfRegNum<[55]>;
-def F23 : FPR<23, "$f23">, DwarfRegNum<[56]>;
-def F24 : FPR<24, "$f24">, DwarfRegNum<[57]>;
-def F25 : FPR<25, "$f25">, DwarfRegNum<[58]>;
-def F26 : FPR<26, "$f26">, DwarfRegNum<[59]>;
-def F27 : FPR<27, "$f27">, DwarfRegNum<[60]>;
-def F28 : FPR<28, "$f28">, DwarfRegNum<[61]>;
-def F29 : FPR<29, "$f29">, DwarfRegNum<[62]>;
-def F30 : FPR<30, "$f30">, DwarfRegNum<[63]>;
-def F31 : FPR<31, "$f31">, DwarfRegNum<[64]>;
-
- // //#define FP $15
- // //#define RA $26
- // //#define PV $27
- // //#define GP $29
- // //#define SP $30
- // $28 is undefined after any and all calls
-
-/// Register classes
-def GPRC : RegisterClass<"Alpha", [i64], 64, (add
- // Volatile
- R0, R1, R2, R3, R4, R5, R6, R7, R8, R16, R17, R18, R19, R20, R21, R22,
- R23, R24, R25, R28,
- //Special meaning, but volatile
- R27, //procedure address
- R26, //return address
- R29, //global offset table address
- // Non-volatile
- R9, R10, R11, R12, R13, R14,
-// Don't allocate 15, 30, 31
- R15, R30, R31)>; //zero
-
-def F4RC : RegisterClass<"Alpha", [f32], 64, (add F0, F1,
- F10, F11, F12, F13, F14, F15, F16, F17, F18, F19,
- F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30,
- // Saved:
- F2, F3, F4, F5, F6, F7, F8, F9,
- F31)>; //zero
-
-def F8RC : RegisterClass<"Alpha", [f64], 64, (add F4RC)>;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRelocations.h b/contrib/llvm/lib/Target/Alpha/AlphaRelocations.h
deleted file mode 100644
index 4c92045..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaRelocations.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- AlphaRelocations.h - Alpha Code Relocations --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the Alpha target-specific relocation types.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHARELOCATIONS_H
-#define ALPHARELOCATIONS_H
-
-#include "llvm/CodeGen/MachineRelocation.h"
-
-namespace llvm {
- namespace Alpha {
- enum RelocationType {
- reloc_literal,
- reloc_gprellow,
- reloc_gprelhigh,
- reloc_gpdist,
- reloc_bsr
- };
- }
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td b/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td
deleted file mode 100644
index 3703dd4..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaSchedule.td
+++ /dev/null
@@ -1,85 +0,0 @@
-//===- AlphaSchedule.td - Alpha Scheduling Definitions -----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//This is table 2-2 from the 21264 compiler writers guide
-//modified some
-
-//Pipelines
-
-def L0 : FuncUnit;
-def L1 : FuncUnit;
-def FST0 : FuncUnit;
-def FST1 : FuncUnit;
-def U0 : FuncUnit;
-def U1 : FuncUnit;
-def FA : FuncUnit;
-def FM : FuncUnit;
-
-def s_ild : InstrItinClass;
-def s_fld : InstrItinClass;
-def s_ist : InstrItinClass;
-def s_fst : InstrItinClass;
-def s_lda : InstrItinClass;
-def s_rpcc : InstrItinClass;
-def s_rx : InstrItinClass;
-def s_mxpr : InstrItinClass;
-def s_icbr : InstrItinClass;
-def s_ubr : InstrItinClass;
-def s_jsr : InstrItinClass;
-def s_iadd : InstrItinClass;
-def s_ilog : InstrItinClass;
-def s_ishf : InstrItinClass;
-def s_cmov : InstrItinClass;
-def s_imul : InstrItinClass;
-def s_imisc : InstrItinClass;
-def s_fbr : InstrItinClass;
-def s_fadd : InstrItinClass;
-def s_fmul : InstrItinClass;
-def s_fcmov : InstrItinClass;
-def s_fdivt : InstrItinClass;
-def s_fdivs : InstrItinClass;
-def s_fsqrts: InstrItinClass;
-def s_fsqrtt: InstrItinClass;
-def s_ftoi : InstrItinClass;
-def s_itof : InstrItinClass;
-def s_pseudo : InstrItinClass;
-
-//Table 2-4 Instruction Class Latency in Cycles
-//modified some
-
-def Alpha21264Itineraries : ProcessorItineraries<
- [L0, L1, FST0, FST1, U0, U1, FA, FM], [], [
- InstrItinData<s_ild , [InstrStage<3, [L0, L1]>]>,
- InstrItinData<s_fld , [InstrStage<4, [L0, L1]>]>,
- InstrItinData<s_ist , [InstrStage<0, [L0, L1]>]>,
- InstrItinData<s_fst , [InstrStage<0, [FST0, FST1, L0, L1]>]>,
- InstrItinData<s_lda , [InstrStage<1, [L0, L1, U0, U1]>]>,
- InstrItinData<s_rpcc , [InstrStage<1, [L1]>]>,
- InstrItinData<s_rx , [InstrStage<1, [L1]>]>,
- InstrItinData<s_mxpr , [InstrStage<1, [L0, L1]>]>,
- InstrItinData<s_icbr , [InstrStage<0, [U0, U1]>]>,
- InstrItinData<s_ubr , [InstrStage<3, [U0, U1]>]>,
- InstrItinData<s_jsr , [InstrStage<3, [L0]>]>,
- InstrItinData<s_iadd , [InstrStage<1, [L0, U0, L1, U1]>]>,
- InstrItinData<s_ilog , [InstrStage<1, [L0, U0, L1, U1]>]>,
- InstrItinData<s_ishf , [InstrStage<1, [U0, U1]>]>,
- InstrItinData<s_cmov , [InstrStage<1, [L0, U0, L1, U1]>]>,
- InstrItinData<s_imul , [InstrStage<7, [U1]>]>,
- InstrItinData<s_imisc , [InstrStage<3, [U0]>]>,
- InstrItinData<s_fbr , [InstrStage<0, [FA]>]>,
- InstrItinData<s_fadd , [InstrStage<6, [FA]>]>,
- InstrItinData<s_fmul , [InstrStage<6, [FM]>]>,
- InstrItinData<s_fcmov , [InstrStage<6, [FA]>]>,
- InstrItinData<s_fdivs , [InstrStage<12, [FA]>]>,
- InstrItinData<s_fdivt , [InstrStage<15, [FA]>]>,
- InstrItinData<s_fsqrts , [InstrStage<18, [FA]>]>,
- InstrItinData<s_fsqrtt , [InstrStage<33, [FA]>]>,
- InstrItinData<s_ftoi , [InstrStage<3, [FST0, FST1, L0, L1]>]>,
- InstrItinData<s_itof , [InstrStage<4, [L0, L1]>]>
-]>;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.cpp
deleted file mode 100644
index f1958fe..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- AlphaSelectionDAGInfo.cpp - Alpha SelectionDAG Info ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AlphaSelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "alpha-selectiondag-info"
-#include "AlphaTargetMachine.h"
-using namespace llvm;
-
-AlphaSelectionDAGInfo::AlphaSelectionDAGInfo(const AlphaTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
-
-AlphaSelectionDAGInfo::~AlphaSelectionDAGInfo() {
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.h
deleted file mode 100644
index 3405cc0..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaSelectionDAGInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- AlphaSelectionDAGInfo.h - Alpha SelectionDAG Info -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the Alpha subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHASELECTIONDAGINFO_H
-#define ALPHASELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class AlphaTargetMachine;
-
-class AlphaSelectionDAGInfo : public TargetSelectionDAGInfo {
-public:
- explicit AlphaSelectionDAGInfo(const AlphaTargetMachine &TM);
- ~AlphaSelectionDAGInfo();
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.cpp b/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.cpp
deleted file mode 100644
index bd55ce9..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-//===- AlphaSubtarget.cpp - Alpha Subtarget Information ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the Alpha specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AlphaSubtarget.h"
-#include "Alpha.h"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "AlphaGenSubtargetInfo.inc"
-
-using namespace llvm;
-
-AlphaSubtarget::AlphaSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS)
- : AlphaGenSubtargetInfo(TT, CPU, FS), HasCT(false) {
- std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "generic";
-
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
-
- // Initialize scheduling itinerary for the specified CPU.
- InstrItins = getInstrItineraryForCPU(CPUName);
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.h b/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.h
deleted file mode 100644
index 70b3116..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaSubtarget.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//=====-- AlphaSubtarget.h - Define Subtarget for the Alpha --*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Alpha specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHASUBTARGET_H
-#define ALPHASUBTARGET_H
-
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/MC/MCInstrItineraries.h"
-#include <string>
-
-#define GET_SUBTARGETINFO_HEADER
-#include "AlphaGenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRe;
-
-class AlphaSubtarget : public AlphaGenSubtargetInfo {
-protected:
-
- bool HasCT;
-
- InstrItineraryData InstrItins;
-
-public:
- /// This constructor initializes the data members to match that
- /// of the specified triple.
- ///
- AlphaSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
-
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
- bool hasCT() const { return HasCT; }
-};
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp
deleted file mode 100644
index fc9a677..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-//===-- AlphaTargetMachine.cpp - Define TargetMachine for Alpha -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#include "Alpha.h"
-#include "AlphaTargetMachine.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-extern "C" void LLVMInitializeAlphaTarget() {
- // Register the target.
- RegisterTargetMachine<AlphaTargetMachine> X(TheAlphaTarget);
-}
-
-AlphaTargetMachine::AlphaTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- DataLayout("e-f128:128:128-n64"),
- FrameLowering(Subtarget),
- Subtarget(TT, CPU, FS),
- TLInfo(*this),
- TSInfo(*this) {
-}
-
-//===----------------------------------------------------------------------===//
-// Pass Pipeline Configuration
-//===----------------------------------------------------------------------===//
-
-bool AlphaTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createAlphaISelDag(*this));
- return false;
-}
-bool AlphaTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // Must run branch selection immediately preceding the asm printer
- PM.add(createAlphaBranchSelectionPass());
- PM.add(createAlphaLLRPPass(*this));
- return false;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h b/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h
deleted file mode 100644
index 48bb948..0000000
--- a/contrib/llvm/lib/Target/Alpha/AlphaTargetMachine.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//===-- AlphaTargetMachine.h - Define TargetMachine for Alpha ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Alpha-specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ALPHA_TARGETMACHINE_H
-#define ALPHA_TARGETMACHINE_H
-
-#include "AlphaInstrInfo.h"
-#include "AlphaISelLowering.h"
-#include "AlphaFrameLowering.h"
-#include "AlphaSelectionDAGInfo.h"
-#include "AlphaSubtarget.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
-
-class GlobalValue;
-
-class AlphaTargetMachine : public LLVMTargetMachine {
- const TargetData DataLayout; // Calculates type size & alignment
- AlphaInstrInfo InstrInfo;
- AlphaFrameLowering FrameLowering;
- AlphaSubtarget Subtarget;
- AlphaTargetLowering TLInfo;
- AlphaSelectionDAGInfo TSInfo;
-
-public:
- AlphaTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
-
- virtual const AlphaInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const AlphaSubtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const AlphaRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
- virtual const AlphaTargetLowering* getTargetLowering() const {
- return &TLInfo;
- }
- virtual const AlphaSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
- virtual const TargetData *getTargetData() const { return &DataLayout; }
-
- // Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.cpp b/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.cpp
deleted file mode 100644
index a35e884..0000000
--- a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- AlphaMCAsmInfo.cpp - Alpha asm properties ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the AlphaMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AlphaMCAsmInfo.h"
-using namespace llvm;
-
-AlphaMCAsmInfo::AlphaMCAsmInfo(const Target &T, StringRef TT) {
- AlignmentIsInBytes = false;
- PrivateGlobalPrefix = "$";
- GPRel32Directive = ".gprel32";
- WeakRefDirective = "\t.weak\t";
- HasSetDirective = false;
-}
diff --git a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.cpp b/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.cpp
deleted file mode 100644
index 4ad021c..0000000
--- a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-//===-- AlphaMCTargetDesc.cpp - Alpha Target Descriptions -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Alpha specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AlphaMCTargetDesc.h"
-#include "AlphaMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_MC_DESC
-#include "AlphaGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "AlphaGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "AlphaGenRegisterInfo.inc"
-
-using namespace llvm;
-
-
-static MCInstrInfo *createAlphaMCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitAlphaMCInstrInfo(X);
- return X;
-}
-
-static MCRegisterInfo *createAlphaMCRegisterInfo(StringRef TT) {
- MCRegisterInfo *X = new MCRegisterInfo();
- InitAlphaMCRegisterInfo(X, Alpha::R26);
- return X;
-}
-
-static MCSubtargetInfo *createAlphaMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitAlphaMCSubtargetInfo(X, TT, CPU, FS);
- return X;
-}
-
-static MCCodeGenInfo *createAlphaMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(Reloc::PIC_, CM);
- return X;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeAlphaTargetMC() {
- // Register the MC asm info.
- RegisterMCAsmInfo<AlphaMCAsmInfo> X(TheAlphaTarget);
-
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheAlphaTarget,
- createAlphaMCCodeGenInfo);
-
- // Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheAlphaTarget, createAlphaMCInstrInfo);
-
- // Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheAlphaTarget, createAlphaMCRegisterInfo);
-
- // Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(TheAlphaTarget,
- createAlphaMCSubtargetInfo);
-}
diff --git a/contrib/llvm/lib/Target/Alpha/TargetInfo/AlphaTargetInfo.cpp b/contrib/llvm/lib/Target/Alpha/TargetInfo/AlphaTargetInfo.cpp
deleted file mode 100644
index bdc69e7..0000000
--- a/contrib/llvm/lib/Target/Alpha/TargetInfo/AlphaTargetInfo.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//===-- AlphaTargetInfo.cpp - Alpha Target Implementation -----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Alpha.h"
-#include "llvm/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-llvm::Target llvm::TheAlphaTarget;
-
-extern "C" void LLVMInitializeAlphaTargetInfo() {
- RegisterTarget<Triple::alpha, /*HasJIT=*/true>
- X(TheAlphaTarget, "alpha", "Alpha [experimental]");
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/Blackfin.h b/contrib/llvm/lib/Target/Blackfin/Blackfin.h
deleted file mode 100644
index a00ff4c..0000000
--- a/contrib/llvm/lib/Target/Blackfin/Blackfin.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//=== Blackfin.h - Top-level interface for Blackfin backend -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in the LLVM
-// Blackfin back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TARGET_BLACKFIN_H
-#define TARGET_BLACKFIN_H
-
-#include "MCTargetDesc/BlackfinMCTargetDesc.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
- class FunctionPass;
- class BlackfinTargetMachine;
-
- FunctionPass *createBlackfinISelDag(BlackfinTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/Blackfin.td b/contrib/llvm/lib/Target/Blackfin/Blackfin.td
deleted file mode 100644
index cd90962..0000000
--- a/contrib/llvm/lib/Target/Blackfin/Blackfin.td
+++ /dev/null
@@ -1,202 +0,0 @@
-//===- Blackfin.td - Describe the Blackfin Target Machine --*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces which we are implementing
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// Blackfin Subtarget features.
-//===----------------------------------------------------------------------===//
-
-def FeatureSDRAM : SubtargetFeature<"sdram", "sdram", "true",
- "Build for SDRAM">;
-
-def FeatureICPLB : SubtargetFeature<"icplb", "icplb", "true",
- "Assume instruction cache lookaside buffers are enabled at runtime">;
-
-//===----------------------------------------------------------------------===//
-// Bugs in the silicon becomes workarounds in the compiler.
-// See http://www.analog.com/ for the full list of IC anomalies.
-//===----------------------------------------------------------------------===//
-
-def WA_MI_SHIFT : SubtargetFeature<"mi-shift-anomaly","wa_mi_shift", "true",
- "Work around 05000074 - "
- "Multi-Issue Instruction with dsp32shiftimm and P-reg Store">;
-
-def WA_CSYNC : SubtargetFeature<"csync-anomaly","wa_csync", "true",
- "Work around 05000244 - "
- "If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control">;
-
-def WA_SPECLD : SubtargetFeature<"specld-anomaly","wa_specld", "true",
- "Work around 05000245 - "
- "Access in the Shadow of a Conditional Branch">;
-
-def WA_HWLOOP : SubtargetFeature<"hwloop-anomaly","wa_hwloop", "true",
- "Work around 05000257 - "
- "Interrupt/Exception During Short Hardware Loop">;
-
-def WA_MMR_STALL : SubtargetFeature<"mmr-stall-anomaly","wa_mmr_stall", "true",
- "Work around 05000283 - "
- "System MMR Write Is Stalled Indefinitely when Killed">;
-
-def WA_LCREGS : SubtargetFeature<"lcregs-anomaly","wa_lcregs", "true",
- "Work around 05000312 - "
- "SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted">;
-
-def WA_KILLED_MMR : SubtargetFeature<"killed-mmr-anomaly",
- "wa_killed_mmr", "true",
- "Work around 05000315 - "
- "Killed System MMR Write Completes Erroneously on Next System MMR Access">;
-
-def WA_RETS : SubtargetFeature<"rets-anomaly", "wa_rets", "true",
- "Work around 05000371 - "
- "Possible RETS Register Corruption when Subroutine Is under 5 Cycles">;
-
-def WA_IND_CALL : SubtargetFeature<"ind-call-anomaly", "wa_ind_call", "true",
- "Work around 05000426 - "
- "Speculative Fetches of Indirect-Pointer Instructions">;
-
-//===----------------------------------------------------------------------===//
-// Register File, Calling Conv, Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "BlackfinRegisterInfo.td"
-include "BlackfinCallingConv.td"
-include "BlackfinIntrinsics.td"
-include "BlackfinInstrInfo.td"
-
-def BlackfinInstrInfo : InstrInfo {}
-
-//===----------------------------------------------------------------------===//
-// Blackfin processors supported.
-//===----------------------------------------------------------------------===//
-
-class Proc<string Name, string Suffix, list<SubtargetFeature> Features>
- : Processor<!strconcat(Name, Suffix), NoItineraries, Features>;
-
-def : Proc<"generic", "", []>;
-
-multiclass Core<string Name,string Suffix,
- list<SubtargetFeature> Features> {
- def : Proc<Name, Suffix, Features>;
- def : Proc<Name, "", Features>;
- def : Proc<Name, "-none", []>;
-}
-
-multiclass CoreEdinburgh<string Name>
- : Core<Name, "-0.6", [WA_MI_SHIFT, WA_SPECLD, WA_LCREGS]> {
- def : Proc<Name, "-0.5",
- [WA_MI_SHIFT, WA_SPECLD, WA_MMR_STALL, WA_LCREGS, WA_KILLED_MMR,
- WA_RETS]>;
- def : Proc<Name, "-0.4",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS]>;
- def : Proc<Name, "-0.3",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS]>;
- def : Proc<Name, "-any",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS]>;
-}
-multiclass CoreBraemar<string Name>
- : Core<Name, "-0.3",
- [WA_MI_SHIFT, WA_SPECLD, WA_LCREGS, WA_RETS, WA_IND_CALL]> {
- def : Proc<Name, "-0.2",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-any",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS, WA_IND_CALL]>;
-}
-multiclass CoreStirling<string Name>
- : Core<Name, "-0.5", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]> {
- def : Proc<Name, "-0.4",
- [WA_MI_SHIFT, WA_SPECLD, WA_LCREGS, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-0.3",
- [WA_MI_SHIFT, WA_SPECLD, WA_MMR_STALL, WA_LCREGS, WA_KILLED_MMR,
- WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-any",
- [WA_MI_SHIFT, WA_SPECLD, WA_MMR_STALL, WA_LCREGS, WA_KILLED_MMR,
- WA_RETS, WA_IND_CALL]>;
-}
-multiclass CoreMoab<string Name>
- : Core<Name, "-0.3", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]> {
- def : Proc<Name, "-0.2", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]>;
- def : Proc<Name, "-0.1", [WA_MI_SHIFT, WA_SPECLD, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-0.0",
- [WA_MI_SHIFT, WA_SPECLD, WA_LCREGS, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-any",
- [WA_MI_SHIFT, WA_SPECLD, WA_LCREGS, WA_RETS, WA_IND_CALL]>;
-}
-multiclass CoreTeton<string Name>
- : Core<Name, "-0.5",
- [WA_MI_SHIFT, WA_SPECLD, WA_MMR_STALL, WA_LCREGS, WA_KILLED_MMR,
- WA_RETS, WA_IND_CALL]> {
- def : Proc<Name, "-0.3",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-any",
- [WA_MI_SHIFT, WA_CSYNC, WA_SPECLD, WA_HWLOOP, WA_MMR_STALL, WA_LCREGS,
- WA_KILLED_MMR, WA_RETS, WA_IND_CALL]>;
-}
-multiclass CoreKookaburra<string Name>
- : Core<Name, "-0.2", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]> {
- def : Proc<Name, "-0.1", [WA_MI_SHIFT, WA_SPECLD, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-0.0", [WA_MI_SHIFT, WA_SPECLD, WA_RETS, WA_IND_CALL]>;
- def : Proc<Name, "-any", [WA_MI_SHIFT, WA_SPECLD, WA_RETS, WA_IND_CALL]>;
-}
-multiclass CoreMockingbird<string Name>
- : Core<Name, "-0.1", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]> {
- def : Proc<Name, "-0.0", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]>;
- def : Proc<Name, "-any", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]>;
-}
-multiclass CoreBrodie<string Name>
- : Core<Name, "-0.1", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]> {
- def : Proc<Name, "-0.0", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]>;
- def : Proc<Name, "-any", [WA_MI_SHIFT, WA_SPECLD, WA_IND_CALL]>;
-}
-
-defm BF512 : CoreBrodie<"bf512">;
-defm BF514 : CoreBrodie<"bf514">;
-defm BF516 : CoreBrodie<"bf516">;
-defm BF518 : CoreBrodie<"bf518">;
-defm BF522 : CoreMockingbird<"bf522">;
-defm BF523 : CoreKookaburra<"bf523">;
-defm BF524 : CoreMockingbird<"bf524">;
-defm BF525 : CoreKookaburra<"bf525">;
-defm BF526 : CoreMockingbird<"bf526">;
-defm BF527 : CoreKookaburra<"bf527">;
-defm BF531 : CoreEdinburgh<"bf531">;
-defm BF532 : CoreEdinburgh<"bf532">;
-defm BF533 : CoreEdinburgh<"bf533">;
-defm BF534 : CoreBraemar<"bf534">;
-defm BF536 : CoreBraemar<"bf536">;
-defm BF537 : CoreBraemar<"bf537">;
-defm BF538 : CoreStirling<"bf538">;
-defm BF539 : CoreStirling<"bf539">;
-defm BF542 : CoreMoab<"bf542">;
-defm BF544 : CoreMoab<"bf544">;
-defm BF548 : CoreMoab<"bf548">;
-defm BF549 : CoreMoab<"bf549">;
-defm BF561 : CoreTeton<"bf561">;
-
-//===----------------------------------------------------------------------===//
-// Declare the target which we are implementing
-//===----------------------------------------------------------------------===//
-
-def Blackfin : Target {
- // Pull in Instruction Info:
- let InstructionSet = BlackfinInstrInfo;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp
deleted file mode 100644
index ed9844e..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinAsmPrinter.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-//===-- BlackfinAsmPrinter.cpp - Blackfin LLVM assembly writer ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format BLACKFIN assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "Blackfin.h"
-#include "BlackfinInstrInfo.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
- class BlackfinAsmPrinter : public AsmPrinter {
- public:
- BlackfinAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {}
-
- virtual const char *getPassName() const {
- return "Blackfin Assembly Printer";
- }
-
- void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printMemoryOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
- void printInstruction(const MachineInstr *MI, raw_ostream &O);// autogen'd.
- static const char *getRegisterName(unsigned RegNo);
-
- void EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
- }
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O);
- };
-} // end of anonymous namespace
-
-#include "BlackfinGenAsmWriter.inc"
-
-extern "C" void LLVMInitializeBlackfinAsmPrinter() {
- RegisterAsmPrinter<BlackfinAsmPrinter> X(TheBlackfinTarget);
-}
-
-void BlackfinAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(opNum);
- switch (MO.getType()) {
- case MachineOperand::MO_Register:
- assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
- "Virtual registers should be already mapped!");
- O << getRegisterName(MO.getReg());
- break;
-
- case MachineOperand::MO_Immediate:
- O << MO.getImm();
- break;
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
- case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
- printOffset(MO.getOffset(), O);
- break;
- case MachineOperand::MO_ExternalSymbol:
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
- << MO.getIndex();
- break;
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- break;
- default:
- llvm_unreachable("<unknown operand type>");
- break;
- }
-}
-
-void BlackfinAsmPrinter::printMemoryOperand(const MachineInstr *MI, int opNum,
- raw_ostream &O) {
- printOperand(MI, opNum, O);
-
- if (MI->getOperand(opNum+1).isImm() && MI->getOperand(opNum+1).getImm() == 0)
- return;
-
- O << " + ";
- printOperand(MI, opNum+1, O);
-}
-
-/// PrintAsmOperand - Print out an operand for an inline asm expression.
-///
-bool BlackfinAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
- unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode,
- raw_ostream &O) {
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
- case 'r':
- break;
- }
- }
-
- printOperand(MI, OpNo, O);
-
- return false;
-}
-
-bool BlackfinAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode,
- raw_ostream &O) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier
-
- O << '[';
- printOperand(MI, OpNo, O);
- O << ']';
-
- return false;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinCallingConv.td b/contrib/llvm/lib/Target/Blackfin/BlackfinCallingConv.td
deleted file mode 100644
index 0abc84c..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinCallingConv.td
+++ /dev/null
@@ -1,30 +0,0 @@
-//===--- BlackfinCallingConv.td - Calling Conventions ------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This describes the calling conventions for the Blackfin architectures.
-//
-//===----------------------------------------------------------------------===//
-
-// Blackfin C Calling convention.
-def CC_Blackfin : CallingConv<[
- CCIfType<[i16], CCPromoteToType<i32>>,
- CCIfSRet<CCAssignToReg<[P0]>>,
- CCAssignToReg<[R0, R1, R2]>,
- CCAssignToStack<4, 4>
-]>;
-
-//===----------------------------------------------------------------------===//
-// Return Value Calling Conventions
-//===----------------------------------------------------------------------===//
-
-// Blackfin C return-value convention.
-def RetCC_Blackfin : CallingConv<[
- CCIfType<[i16], CCPromoteToType<i32>>,
- CCAssignToReg<[R0, R1]>
-]>;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp
deleted file mode 100644
index 0b0984d..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//====- BlackfinFrameLowering.cpp - Blackfin Frame Information --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinFrameLowering.h"
-#include "BlackfinInstrInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetOptions.h"
-
-using namespace llvm;
-
-
-// hasFP - Return true if the specified function should have a dedicated frame
-// pointer register. This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
-bool BlackfinFrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) ||
- MFI->adjustsStack() || MFI->hasVarSizedObjects();
-}
-
-// Always reserve a call frame. We dont have enough registers to adjust SP.
-bool BlackfinFrameLowering::
-hasReservedCallFrame(const MachineFunction &MF) const {
- return true;
-}
-
-// Emit a prologue that sets up a stack frame.
-// On function entry, R0-R2 and P0 may hold arguments.
-// R3, P1, and P2 may be used as scratch registers
-void BlackfinFrameLowering::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const BlackfinRegisterInfo *RegInfo =
- static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
- const BlackfinInstrInfo &TII =
- *static_cast<const BlackfinInstrInfo*>(MF.getTarget().getInstrInfo());
-
- DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- int FrameSize = MFI->getStackSize();
- if (FrameSize%4) {
- FrameSize = (FrameSize+3) & ~3;
- MFI->setStackSize(FrameSize);
- }
-
- if (!hasFP(MF)) {
- assert(!MFI->adjustsStack() &&
- "FP elimination on a non-leaf function is not supported");
- RegInfo->adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, -FrameSize);
- return;
- }
-
- // emit a LINK instruction
- if (FrameSize <= 0x3ffff) {
- BuildMI(MBB, MBBI, dl, TII.get(BF::LINK)).addImm(FrameSize);
- return;
- }
-
- // Frame is too big, do a manual LINK:
- // [--SP] = RETS;
- // [--SP] = FP;
- // FP = SP;
- // P1 = -FrameSize;
- // SP = SP + P1;
- BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
- .addReg(BF::RETS, RegState::Kill);
- BuildMI(MBB, MBBI, dl, TII.get(BF::PUSH))
- .addReg(BF::FP, RegState::Kill);
- BuildMI(MBB, MBBI, dl, TII.get(BF::MOVE), BF::FP)
- .addReg(BF::SP);
- RegInfo->loadConstant(MBB, MBBI, dl, BF::P1, -FrameSize);
- BuildMI(MBB, MBBI, dl, TII.get(BF::ADDpp), BF::SP)
- .addReg(BF::SP, RegState::Kill)
- .addReg(BF::P1, RegState::Kill);
-
-}
-
-void BlackfinFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const BlackfinRegisterInfo *RegInfo =
- static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
- const BlackfinInstrInfo &TII =
- *static_cast<const BlackfinInstrInfo*>(MF.getTarget().getInstrInfo());
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- DebugLoc dl = MBBI->getDebugLoc();
-
- int FrameSize = MFI->getStackSize();
- assert(FrameSize%4 == 0 && "Misaligned frame size");
-
- if (!hasFP(MF)) {
- assert(!MFI->adjustsStack() &&
- "FP elimination on a non-leaf function is not supported");
- RegInfo->adjustRegister(MBB, MBBI, dl, BF::SP, BF::P1, FrameSize);
- return;
- }
-
- // emit an UNLINK instruction
- BuildMI(MBB, MBBI, dl, TII.get(BF::UNLINK));
-}
-
-void BlackfinFrameLowering::
-processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const BlackfinRegisterInfo *RegInfo =
- static_cast<const BlackfinRegisterInfo*>(MF.getTarget().getRegisterInfo());
- const TargetRegisterClass *RC = BF::DPRegisterClass;
-
- if (RegInfo->requiresRegisterScavenging(MF)) {
- // Reserve a slot close to SP or frame pointer.
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h b/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h
deleted file mode 100644
index 169aa8e..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinFrameLowering.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//=- BlackfinFrameLowering.h - Define frame lowering for Blackfin -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFIN_FRAMEINFO_H
-#define BLACKFIN_FRAMEINFO_H
-
-#include "Blackfin.h"
-#include "BlackfinSubtarget.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
- class BlackfinSubtarget;
-
-class BlackfinFrameLowering : public TargetFrameLowering {
-protected:
- const BlackfinSubtarget &STI;
-
-public:
- explicit BlackfinFrameLowering(const BlackfinSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 4, 0), STI(sti) {
- }
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- bool hasFP(const MachineFunction &MF) const;
- bool hasReservedCallFrame(const MachineFunction &MF) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
deleted file mode 100644
index 215ca43..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-//===- BlackfinISelDAGToDAG.cpp - A dag to dag inst selector for Blackfin -===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the Blackfin target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Blackfin.h"
-#include "BlackfinTargetMachine.h"
-#include "BlackfinRegisterInfo.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-/// BlackfinDAGToDAGISel - Blackfin specific code to select blackfin machine
-/// instructions for SelectionDAG operations.
-namespace {
- class BlackfinDAGToDAGISel : public SelectionDAGISel {
- /// Subtarget - Keep a pointer to the Blackfin Subtarget around so that we
- /// can make the right decision when generating code for different targets.
- //const BlackfinSubtarget &Subtarget;
- public:
- BlackfinDAGToDAGISel(BlackfinTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel) {}
-
- virtual void PostprocessISelDAG();
-
- virtual const char *getPassName() const {
- return "Blackfin DAG->DAG Pattern Instruction Selection";
- }
-
- // Include the pieces autogenerated from the target description.
-#include "BlackfinGenDAGISel.inc"
-
- private:
- SDNode *Select(SDNode *N);
- bool SelectADDRspii(SDValue Addr, SDValue &Base, SDValue &Offset);
-
- // Walk the DAG after instruction selection, fixing register class issues.
- void FixRegisterClasses(SelectionDAG &DAG);
-
- const BlackfinInstrInfo &getInstrInfo() {
- return *static_cast<const BlackfinTargetMachine&>(TM).getInstrInfo();
- }
- const BlackfinRegisterInfo *getRegisterInfo() {
- return static_cast<const BlackfinTargetMachine&>(TM).getRegisterInfo();
- }
- };
-} // end anonymous namespace
-
-FunctionPass *llvm::createBlackfinISelDag(BlackfinTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new BlackfinDAGToDAGISel(TM, OptLevel);
-}
-
-void BlackfinDAGToDAGISel::PostprocessISelDAG() {
- FixRegisterClasses(*CurDAG);
-}
-
-SDNode *BlackfinDAGToDAGISel::Select(SDNode *N) {
- if (N->isMachineOpcode())
- return NULL; // Already selected.
-
- switch (N->getOpcode()) {
- default: break;
- case ISD::FrameIndex: {
- // Selects to ADDpp FI, 0 which in turn will become ADDimm7 SP, imm or ADDpp
- // SP, Px
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
- return CurDAG->SelectNodeTo(N, BF::ADDpp, MVT::i32, TFI,
- CurDAG->getTargetConstant(0, MVT::i32));
- }
- }
-
- return SelectCode(N);
-}
-
-bool BlackfinDAGToDAGISel::SelectADDRspii(SDValue Addr,
- SDValue &Base,
- SDValue &Offset) {
- FrameIndexSDNode *FIN = 0;
- if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- Offset = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
- if (Addr.getOpcode() == ISD::ADD) {
- ConstantSDNode *CN = 0;
- if ((FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) &&
- (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) &&
- (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
- // Constant positive word offset from frame index
- Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
- Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
- return true;
- }
- }
- return false;
-}
-
-static inline bool isCC(const TargetRegisterClass *RC) {
- return BF::AnyCCRegClass.hasSubClassEq(RC);
-}
-
-static inline bool isDCC(const TargetRegisterClass *RC) {
- return BF::DRegClass.hasSubClassEq(RC) || isCC(RC);
-}
-
-static void UpdateNodeOperand(SelectionDAG &DAG,
- SDNode *N,
- unsigned Num,
- SDValue Val) {
- SmallVector<SDValue, 8> ops(N->op_begin(), N->op_end());
- ops[Num] = Val;
- SDNode *New = DAG.UpdateNodeOperands(N, ops.data(), ops.size());
- DAG.ReplaceAllUsesWith(N, New);
-}
-
-// After instruction selection, insert COPY_TO_REGCLASS nodes to help in
-// choosing the proper register classes.
-void BlackfinDAGToDAGISel::FixRegisterClasses(SelectionDAG &DAG) {
- const BlackfinInstrInfo &TII = getInstrInfo();
- const BlackfinRegisterInfo *TRI = getRegisterInfo();
- DAG.AssignTopologicalOrder();
- HandleSDNode Dummy(DAG.getRoot());
-
- for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin();
- NI != DAG.allnodes_end(); ++NI) {
- if (NI->use_empty() || !NI->isMachineOpcode())
- continue;
- const MCInstrDesc &DefMCID = TII.get(NI->getMachineOpcode());
- for (SDNode::use_iterator UI = NI->use_begin(); !UI.atEnd(); ++UI) {
- if (!UI->isMachineOpcode())
- continue;
-
- if (UI.getUse().getResNo() >= DefMCID.getNumDefs())
- continue;
- const TargetRegisterClass *DefRC =
- TII.getRegClass(DefMCID, UI.getUse().getResNo(), TRI);
-
- const MCInstrDesc &UseMCID = TII.get(UI->getMachineOpcode());
- if (UseMCID.getNumDefs()+UI.getOperandNo() >= UseMCID.getNumOperands())
- continue;
- const TargetRegisterClass *UseRC =
- TII.getRegClass(UseMCID, UseMCID.getNumDefs()+UI.getOperandNo(), TRI);
- if (!DefRC || !UseRC)
- continue;
- // We cannot copy CC <-> !(CC/D)
- if ((isCC(DefRC) && !isDCC(UseRC)) || (isCC(UseRC) && !isDCC(DefRC))) {
- SDNode *Copy =
- DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
- NI->getDebugLoc(),
- MVT::i32,
- UI.getUse().get(),
- DAG.getTargetConstant(BF::DRegClassID, MVT::i32));
- UpdateNodeOperand(DAG, *UI, UI.getOperandNo(), SDValue(Copy, 0));
- }
- }
- }
- DAG.setRoot(Dummy.getValue());
-}
-
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
deleted file mode 100644
index 7d4c45f..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
+++ /dev/null
@@ -1,645 +0,0 @@
-//===- BlackfinISelLowering.cpp - Blackfin DAG Lowering Implementation ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the interfaces that Blackfin uses to lower LLVM code
-// into a selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinISelLowering.h"
-#include "BlackfinTargetMachine.h"
-#include "llvm/Function.h"
-#include "llvm/Type.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinGenCallingConv.inc"
-
-//===----------------------------------------------------------------------===//
-// TargetLowering Implementation
-//===----------------------------------------------------------------------===//
-
-BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
- setBooleanContents(ZeroOrOneBooleanContent);
- setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
- setStackPointerRegisterToSaveRestore(BF::SP);
- setIntDivIsCheap(false);
-
- // Set up the legal register classes.
- addRegisterClass(MVT::i32, BF::DRegisterClass);
- addRegisterClass(MVT::i16, BF::D16RegisterClass);
-
- computeRegisterProperties();
-
- // Blackfin doesn't have i1 loads or stores
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::JumpTable, MVT::i32, Custom);
-
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::Other, Expand);
-
- // i16 registers don't do much
- setOperationAction(ISD::AND, MVT::i16, Promote);
- setOperationAction(ISD::OR, MVT::i16, Promote);
- setOperationAction(ISD::XOR, MVT::i16, Promote);
- setOperationAction(ISD::CTPOP, MVT::i16, Promote);
- // The expansion of CTLZ/CTTZ uses AND/OR, so we might as well promote
- // immediately.
- setOperationAction(ISD::CTLZ, MVT::i16, Promote);
- setOperationAction(ISD::CTTZ, MVT::i16, Promote);
- setOperationAction(ISD::SETCC, MVT::i16, Promote);
-
- // Blackfin has no division
- setOperationAction(ISD::SDIV, MVT::i16, Expand);
- setOperationAction(ISD::SDIV, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i16, Expand);
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIV, MVT::i16, Expand);
- setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UREM, MVT::i16, Expand);
- setOperationAction(ISD::UREM, MVT::i32, Expand);
-
- setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::MULHU, MVT::i32, Expand);
- setOperationAction(ISD::MULHS, MVT::i32, Expand);
-
- // No carry-in operations.
- setOperationAction(ISD::ADDE, MVT::i32, Custom);
- setOperationAction(ISD::SUBE, MVT::i32, Custom);
-
- // Blackfin has no intrinsics for these particular operations.
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
-
- setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- // i32 has native CTPOP, but not CTLZ/CTTZ
- setOperationAction(ISD::CTLZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
-
- // READCYCLECOUNTER needs special type legalization.
- setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
-
- setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
-
- // Use the default implementation.
- setOperationAction(ISD::VACOPY, MVT::Other, Expand);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
-
- setMinFunctionAlignment(2);
-}
-
-const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return 0;
- case BFISD::CALL: return "BFISD::CALL";
- case BFISD::RET_FLAG: return "BFISD::RET_FLAG";
- case BFISD::Wrapper: return "BFISD::Wrapper";
- }
-}
-
-EVT BlackfinTargetLowering::getSetCCResultType(EVT VT) const {
- // SETCC always sets the CC register. Technically that is an i1 register, but
- // that type is not legal, so we treat it as an i32 register.
- return MVT::i32;
-}
-
-SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
-
- Op = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
- return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
-}
-
-SDValue BlackfinTargetLowering::LowerJumpTable(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
- int JTI = cast<JumpTableSDNode>(Op)->getIndex();
-
- Op = DAG.getTargetJumpTable(JTI, MVT::i32);
- return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
-}
-
-SDValue
-BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
- CCInfo.AnalyzeFormalArguments(Ins, CC_Blackfin);
-
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
-
- if (VA.isRegLoc()) {
- EVT RegVT = VA.getLocVT();
- TargetRegisterClass *RC = VA.getLocReg() == BF::P0 ?
- BF::PRegisterClass : BF::DRegisterClass;
- assert(RC->contains(VA.getLocReg()) && "Unexpected regclass in CCState");
- assert(RC->hasType(RegVT) && "Unexpected regclass in CCState");
-
- unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
- MF.getRegInfo().addLiveIn(VA.getLocReg(), Reg);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
-
- // If this is an 8 or 16-bit value, it is really passed promoted to 32
- // bits. Insert an assert[sz]ext to capture this, then truncate to the
- // right size.
- if (VA.getLocInfo() == CCValAssign::SExt)
- ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
-
- if (VA.getLocInfo() != CCValAssign::Full)
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
-
- InVals.push_back(ArgValue);
- } else {
- assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
- unsigned ObjSize = VA.getLocVT().getStoreSize();
- int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
- SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
- MachinePointerInfo(),
- false, false, 0));
- }
- }
-
- return Chain;
-}
-
-SDValue
-BlackfinTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
-
- // CCValAssign - represent the assignment of the return value to locations.
- SmallVector<CCValAssign, 16> RVLocs;
-
- // CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), RVLocs, *DAG.getContext());
-
- // Analize return values.
- CCInfo.AnalyzeReturn(Outs, RetCC_Blackfin);
-
- // If this is the first return lowered for this function, add the regs to the
- // liveout set for the function.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
- for (unsigned i = 0; i != RVLocs.size(); ++i)
- DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
- }
-
- SDValue Flag;
-
- // Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue Opi = OutVals[i];
-
- // Expand to i32 if necessary
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Opi = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Opi);
- break;
- case CCValAssign::ZExt:
- Opi = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Opi);
- break;
- case CCValAssign::AExt:
- Opi = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Opi);
- break;
- }
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Opi, SDValue());
- // Guarantee that all emitted copies are stuck together with flags.
- Flag = Chain.getValue(1);
- }
-
- if (Flag.getNode()) {
- return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
- } else {
- return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain);
- }
-}
-
-SDValue
-BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // Blackfin target does not yet support tail call optimization.
- isTailCall = false;
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), ArgLocs, *DAG.getContext());
- CCInfo.AllocateStack(12, 4); // ABI requires 12 bytes stack space
- CCInfo.AnalyzeCallOperands(Outs, CC_Blackfin);
-
- // Get the size of the outgoing arguments stack space requirement.
- unsigned ArgsSize = CCInfo.getNextStackOffset();
-
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, true));
- SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
- SmallVector<SDValue, 8> MemOpChains;
-
- // Walk the register/memloc assignments, inserting copies/loads.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
- SDValue Arg = OutVals[i];
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- }
-
- // Arguments that can be passed on register must be kept at
- // RegsToPass vector
- if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
- assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
- int Offset = VA.getLocMemOffset();
- assert(Offset%4 == 0 && "Unaligned LocMemOffset");
- assert(VA.getLocVT()==MVT::i32 && "Illegal CCValAssign type");
- SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, MVT::i32);
- SDValue OffsetN = DAG.getIntPtrConstant(Offset);
- OffsetN = DAG.getNode(ISD::ADD, dl, MVT::i32, SPN, OffsetN);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, OffsetN,
- MachinePointerInfo(),false, false, 0));
- }
- }
-
- // Transform all store nodes into one single node because
- // all store nodes are independent of each other.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
-
- // Build a sequence of copy-to-reg nodes chained together with token
- // chain and flag operands which copy the outgoing args into registers.
- // The InFlag in necessary since all emitted instructions must be
- // stuck together.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- // If the callee is a GlobalAddress node (quite common, every direct call is)
- // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
- // Likewise ExternalSymbol -> TargetExternalSymbol.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
- else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
-
- std::vector<EVT> NodeTys;
- NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use.
- SDValue Ops[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(BFISD::CALL, dl, NodeTys, Ops,
- InFlag.getNode() ? 3 : 2);
- InFlag = Chain.getValue(1);
-
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, true),
- DAG.getIntPtrConstant(0, true), InFlag);
- InFlag = Chain.getValue(1);
-
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- DAG.getTarget(), RVLocs, *DAG.getContext());
-
- RVInfo.AnalyzeCallResult(Ins, RetCC_Blackfin);
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &RV = RVLocs[i];
- unsigned Reg = RV.getLocReg();
-
- Chain = DAG.getCopyFromReg(Chain, dl, Reg,
- RVLocs[i].getLocVT(), InFlag);
- SDValue Val = Chain.getValue(0);
- InFlag = Chain.getValue(2);
- Chain = Chain.getValue(1);
-
- // Callee is responsible for extending any i16 return values.
- switch (RV.getLocInfo()) {
- case CCValAssign::SExt:
- Val = DAG.getNode(ISD::AssertSext, dl, RV.getLocVT(), Val,
- DAG.getValueType(RV.getValVT()));
- break;
- case CCValAssign::ZExt:
- Val = DAG.getNode(ISD::AssertZext, dl, RV.getLocVT(), Val,
- DAG.getValueType(RV.getValVT()));
- break;
- default:
- break;
- }
-
- // Truncate to valtype
- if (RV.getLocInfo() != CCValAssign::Full)
- Val = DAG.getNode(ISD::TRUNCATE, dl, RV.getValVT(), Val);
- InVals.push_back(Val);
- }
-
- return Chain;
-}
-
-// Expansion of ADDE / SUBE. This is a bit involved since blackfin doesn't have
-// add-with-carry instructions.
-SDValue BlackfinTargetLowering::LowerADDE(SDValue Op, SelectionDAG &DAG) const {
- // Operands: lhs, rhs, carry-in (AC0 flag)
- // Results: sum, carry-out (AC0 flag)
- DebugLoc dl = Op.getDebugLoc();
-
- unsigned Opcode = Op.getOpcode()==ISD::ADDE ? BF::ADD : BF::SUB;
-
- // zext incoming carry flag in AC0 to 32 bits
- SDNode* CarryIn = DAG.getMachineNode(BF::MOVE_cc_ac0, dl, MVT::i32,
- /* flag= */ Op.getOperand(2));
- CarryIn = DAG.getMachineNode(BF::MOVECC_zext, dl, MVT::i32,
- SDValue(CarryIn, 0));
-
- // Add operands, produce sum and carry flag
- SDNode *Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Glue,
- Op.getOperand(0), Op.getOperand(1));
-
- // Store intermediate carry from Sum
- SDNode* Carry1 = DAG.getMachineNode(BF::MOVE_cc_ac0, dl, MVT::i32,
- /* flag= */ SDValue(Sum, 1));
-
- // Add incoming carry, again producing an output flag
- Sum = DAG.getMachineNode(Opcode, dl, MVT::i32, MVT::Glue,
- SDValue(Sum, 0), SDValue(CarryIn, 0));
-
- // Update AC0 with the intermediate carry, producing a flag.
- SDNode *CarryOut = DAG.getMachineNode(BF::OR_ac0_cc, dl, MVT::Glue,
- SDValue(Carry1, 0));
-
- // Compose (i32, flag) pair
- SDValue ops[2] = { SDValue(Sum, 0), SDValue(CarryOut, 0) };
- return DAG.getMergeValues(ops, 2, dl);
-}
-
-SDValue BlackfinTargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- default:
- Op.getNode()->dump();
- llvm_unreachable("Should not custom lower this!");
- case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::GlobalTLSAddress:
- llvm_unreachable("TLS not implemented for Blackfin.");
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- // Frame & Return address. Currently unimplemented
- case ISD::FRAMEADDR: return SDValue();
- case ISD::RETURNADDR: return SDValue();
- case ISD::ADDE:
- case ISD::SUBE: return LowerADDE(Op, DAG);
- }
-}
-
-void
-BlackfinTargetLowering::ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const {
- DebugLoc dl = N->getDebugLoc();
- switch (N->getOpcode()) {
- default:
- llvm_unreachable("Do not know how to custom type legalize this operation!");
- return;
- case ISD::READCYCLECOUNTER: {
- // The low part of the cycle counter is in CYCLES, the high part in
- // CYCLES2. Reading CYCLES will latch the value of CYCLES2, so we must read
- // CYCLES2 last.
- SDValue TheChain = N->getOperand(0);
- SDValue lo = DAG.getCopyFromReg(TheChain, dl, BF::CYCLES, MVT::i32);
- SDValue hi = DAG.getCopyFromReg(lo.getValue(1), dl, BF::CYCLES2, MVT::i32);
- // Use a buildpair to merge the two 32-bit values into a 64-bit one.
- Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, lo, hi));
- // Outgoing chain. If we were to use the chain from lo instead, it would be
- // possible to entirely eliminate the CYCLES2 read in (i32 (trunc
- // readcyclecounter)). Unfortunately this could possibly delay the CYCLES2
- // read beyond the next CYCLES read, leading to invalid results.
- Results.push_back(hi.getValue(1));
- return;
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Blackfin Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-BlackfinTargetLowering::ConstraintType
-BlackfinTargetLowering::getConstraintType(const std::string &Constraint) const {
- if (Constraint.size() != 1)
- return TargetLowering::getConstraintType(Constraint);
-
- switch (Constraint[0]) {
- // Standard constraints
- case 'r':
- return C_RegisterClass;
-
- // Blackfin-specific constraints
- case 'a':
- case 'd':
- case 'z':
- case 'D':
- case 'W':
- case 'e':
- case 'b':
- case 'v':
- case 'f':
- case 'c':
- case 't':
- case 'u':
- case 'k':
- case 'x':
- case 'y':
- case 'w':
- return C_RegisterClass;
- case 'A':
- case 'B':
- case 'C':
- case 'Z':
- case 'Y':
- return C_Register;
- }
-
- // Not implemented: q0-q7, qA. Use {R2} etc instead
-
- return TargetLowering::getConstraintType(Constraint);
-}
-
-/// Examine constraint type and operand type and determine a weight value.
-/// This object must already have been set up with the operand type
-/// and the current alternative constraint selected.
-TargetLowering::ConstraintWeight
-BlackfinTargetLowering::getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const {
- ConstraintWeight weight = CW_Invalid;
- Value *CallOperandVal = info.CallOperandVal;
- // If we don't have a value, we can't do a match,
- // but allow it at the lowest weight.
- if (CallOperandVal == NULL)
- return CW_Default;
- // Look at the constraint type.
- switch (*constraint) {
- default:
- weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- break;
-
- // Blackfin-specific constraints
- case 'a':
- case 'd':
- case 'z':
- case 'D':
- case 'W':
- case 'e':
- case 'b':
- case 'v':
- case 'f':
- case 'c':
- case 't':
- case 'u':
- case 'k':
- case 'x':
- case 'y':
- case 'w':
- return CW_Register;
- case 'A':
- case 'B':
- case 'C':
- case 'Z':
- case 'Y':
- return CW_SpecificReg;
- }
- return weight;
-}
-
-/// getRegForInlineAsmConstraint - Return register no and class for a C_Register
-/// constraint.
-std::pair<unsigned, const TargetRegisterClass*> BlackfinTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
- typedef std::pair<unsigned, const TargetRegisterClass*> Pair;
- using namespace BF;
-
- if (Constraint.size() != 1)
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-
- switch (Constraint[0]) {
- // Standard constraints
- case 'r':
- return Pair(0U, VT == MVT::i16 ? D16RegisterClass : DPRegisterClass);
-
- // Blackfin-specific constraints
- case 'a': return Pair(0U, PRegisterClass);
- case 'd': return Pair(0U, DRegisterClass);
- case 'e': return Pair(0U, AccuRegisterClass);
- case 'A': return Pair(A0, AccuRegisterClass);
- case 'B': return Pair(A1, AccuRegisterClass);
- case 'b': return Pair(0U, IRegisterClass);
- case 'v': return Pair(0U, BRegisterClass);
- case 'f': return Pair(0U, MRegisterClass);
- case 'C': return Pair(CC, JustCCRegisterClass);
- case 'x': return Pair(0U, GRRegisterClass);
- case 'w': return Pair(0U, ALLRegisterClass);
- case 'Z': return Pair(P3, PRegisterClass);
- case 'Y': return Pair(P1, PRegisterClass);
- case 'z': return Pair(0U, zConsRegisterClass);
- case 'D': return Pair(0U, DConsRegisterClass);
- case 'W': return Pair(0U, WConsRegisterClass);
- case 'c': return Pair(0U, cConsRegisterClass);
- case 't': return Pair(0U, tConsRegisterClass);
- case 'u': return Pair(0U, uConsRegisterClass);
- case 'k': return Pair(0U, kConsRegisterClass);
- case 'y': return Pair(0U, yConsRegisterClass);
- }
-
- // Not implemented: q0-q7, qA. Use {R2} etc instead.
-
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-bool BlackfinTargetLowering::
-isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
- // The Blackfin target isn't yet aware of offsets.
- return false;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
deleted file mode 100644
index 90908ba..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
+++ /dev/null
@@ -1,83 +0,0 @@
-//===- BlackfinISelLowering.h - Blackfin DAG Lowering Interface -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that Blackfin uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFIN_ISELLOWERING_H
-#define BLACKFIN_ISELLOWERING_H
-
-#include "llvm/Target/TargetLowering.h"
-#include "Blackfin.h"
-
-namespace llvm {
-
- namespace BFISD {
- enum {
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
- CALL, // A call instruction.
- RET_FLAG, // Return with a flag operand.
- Wrapper // Address wrapper
- };
- }
-
- class BlackfinTargetLowering : public TargetLowering {
- public:
- BlackfinTargetLowering(TargetMachine &TM);
- virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i16; }
- virtual EVT getSetCCResultType(EVT VT) const;
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
- virtual void ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const;
-
- ConstraintType getConstraintType(const std::string &Constraint) const;
-
- /// Examine constraint string and operand type and determine a weight value.
- /// The operand object must already have been set up with the operand type.
- ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
-
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
- virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
- const char *getTargetNodeName(unsigned Opcode) const;
-
- private:
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerADDE(SDValue Op, SelectionDAG &DAG) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
- };
-} // end namespace llvm
-
-#endif // BLACKFIN_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrFormats.td b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrFormats.td
deleted file mode 100644
index d8e6e25..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrFormats.td
+++ /dev/null
@@ -1,34 +0,0 @@
-//===--- BlackfinInstrFormats.td ---------------------------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-
-class InstBfin<dag outs, dag ins, string asmstr, list<dag> pattern>
- : Instruction {
- field bits<32> Inst;
-
- let Namespace = "BF";
-
- dag OutOperandList = outs;
- dag InOperandList = ins;
- let AsmString = asmstr;
- let Pattern = pattern;
-}
-
-// Single-word (16-bit) instructions
-class F1<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstBfin<outs, ins, asmstr, pattern> {
-}
-
-// Double-word (32-bit) instructions
-class F2<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstBfin<outs, ins, asmstr, pattern> {
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp
deleted file mode 100644
index c06a919..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp
+++ /dev/null
@@ -1,256 +0,0 @@
-//===- BlackfinInstrInfo.cpp - Blackfin Instruction Information -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinInstrInfo.h"
-#include "BlackfinSubtarget.h"
-#include "Blackfin.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_CTOR
-#include "BlackfinGenInstrInfo.inc"
-
-using namespace llvm;
-
-BlackfinInstrInfo::BlackfinInstrInfo(BlackfinSubtarget &ST)
- : BlackfinGenInstrInfo(BF::ADJCALLSTACKDOWN, BF::ADJCALLSTACKUP),
- RI(ST, *this),
- Subtarget(ST) {}
-
-/// isLoadFromStackSlot - If the specified machine instruction is a direct
-/// load from a stack slot, return the virtual or physical register number of
-/// the destination along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than loading from the stack slot.
-unsigned BlackfinInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case BF::LOAD32fi:
- case BF::LOAD16fi:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-/// isStoreToStackSlot - If the specified machine instruction is a direct
-/// store to a stack slot, return the virtual or physical register number of
-/// the source reg along with the FrameIndex of the loaded stack slot. If
-/// not, return 0. This predicate must return 0 if the instruction has
-/// any side effects other than storing to the stack slot.
-unsigned BlackfinInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case BF::STORE32fi:
- case BF::STORE16fi:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-unsigned BlackfinInstrInfo::
-InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- // Shouldn't be a fall through.
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 1 || Cond.size() == 0) &&
- "Branch conditions have one component!");
-
- if (Cond.empty()) {
- // Unconditional branch?
- assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(BF::JUMPa)).addMBB(TBB);
- return 1;
- }
-
- // Conditional branch.
- llvm_unreachable("Implement conditional branches!");
-}
-
-void BlackfinInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- if (BF::ALLRegClass.contains(DestReg, SrcReg)) {
- BuildMI(MBB, I, DL, get(BF::MOVE), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
-
- if (BF::D16RegClass.contains(DestReg, SrcReg)) {
- BuildMI(MBB, I, DL, get(BF::SLL16i), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc))
- .addImm(0);
- return;
- }
-
- if (BF::DRegClass.contains(DestReg)) {
- if (SrcReg == BF::NCC) {
- BuildMI(MBB, I, DL, get(BF::MOVENCC_z), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- BuildMI(MBB, I, DL, get(BF::BITTGL), DestReg).addReg(DestReg).addImm(0);
- return;
- }
- if (SrcReg == BF::CC) {
- BuildMI(MBB, I, DL, get(BF::MOVECC_zext), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
- }
-
- if (BF::DRegClass.contains(SrcReg)) {
- if (DestReg == BF::NCC) {
- BuildMI(MBB, I, DL, get(BF::SETEQri_not), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc)).addImm(0);
- return;
- }
- if (DestReg == BF::CC) {
- BuildMI(MBB, I, DL, get(BF::MOVECC_nz), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
- }
-
-
- if (DestReg == BF::NCC && SrcReg == BF::CC) {
- BuildMI(MBB, I, DL, get(BF::MOVE_ncccc), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
-
- if (DestReg == BF::CC && SrcReg == BF::NCC) {
- BuildMI(MBB, I, DL, get(BF::MOVE_ccncc), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
-
- llvm_unreachable("Bad reg-to-reg copy");
-}
-
-static bool inClass(const TargetRegisterClass &Test,
- unsigned Reg,
- const TargetRegisterClass *RC) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return Test.contains(Reg);
- else
- return Test.hasSubClassEq(RC);
-}
-
-void
-BlackfinInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned SrcReg,
- bool isKill,
- int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
-
- if (inClass(BF::DPRegClass, SrcReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::STORE32fi))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- if (inClass(BF::D16RegClass, SrcReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::STORE16fi))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- if (inClass(BF::AnyCCRegClass, SrcReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::STORE8fi))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- llvm_unreachable((std::string("Cannot store regclass to stack slot: ")+
- RC->getName()).c_str());
-}
-
-void BlackfinInstrInfo::
-storeRegToAddr(MachineFunction &MF,
- unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- llvm_unreachable("storeRegToAddr not implemented");
-}
-
-void
-BlackfinInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg,
- int FI,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
- if (inClass(BF::DPRegClass, DestReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::LOAD32fi), DestReg)
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- if (inClass(BF::D16RegClass, DestReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::LOAD16fi), DestReg)
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- if (inClass(BF::AnyCCRegClass, DestReg, RC)) {
- BuildMI(MBB, I, DL, get(BF::LOAD8fi), DestReg)
- .addFrameIndex(FI)
- .addImm(0);
- return;
- }
-
- llvm_unreachable("Cannot load regclass from stack slot");
-}
-
-void BlackfinInstrInfo::
-loadRegFromAddr(MachineFunction &MF,
- unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- llvm_unreachable("loadRegFromAddr not implemented");
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h
deleted file mode 100644
index d22ddf0..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h
+++ /dev/null
@@ -1,81 +0,0 @@
-//===- BlackfinInstrInfo.h - Blackfin Instruction Information ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFININSTRUCTIONINFO_H
-#define BLACKFININSTRUCTIONINFO_H
-
-#include "llvm/Target/TargetInstrInfo.h"
-#include "BlackfinRegisterInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "BlackfinGenInstrInfo.inc"
-
-namespace llvm {
-
- class BlackfinInstrInfo : public BlackfinGenInstrInfo {
- const BlackfinRegisterInfo RI;
- const BlackfinSubtarget& Subtarget;
- public:
- explicit BlackfinInstrInfo(BlackfinSubtarget &ST);
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- virtual const BlackfinRegisterInfo &getRegisterInfo() const { return RI; }
-
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual unsigned
- InsertBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill,
- int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void storeRegToAddr(MachineFunction &MF,
- unsigned SrcReg, bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const;
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
deleted file mode 100644
index 5b59d77..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
+++ /dev/null
@@ -1,862 +0,0 @@
-//===- BlackfinInstrInfo.td - Target Description for Blackfin Target ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the Blackfin instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction format superclass
-//===----------------------------------------------------------------------===//
-
-include "BlackfinInstrFormats.td"
-
-// These are target-independent nodes, but have target-specific formats.
-def SDT_BfinCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
-def SDT_BfinCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
- SDTCisVT<1, i32> ]>;
-
-def BfinCallseqStart : SDNode<"ISD::CALLSEQ_START", SDT_BfinCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
-def BfinCallseqEnd : SDNode<"ISD::CALLSEQ_END", SDT_BfinCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-
-def SDT_BfinCall : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
-def BfinCall : SDNode<"BFISD::CALL", SDT_BfinCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPVariadic]>;
-
-def BfinRet: SDNode<"BFISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-def BfinWrapper: SDNode<"BFISD::Wrapper", SDTIntUnaryOp>;
-
-//===----------------------------------------------------------------------===//
-// Transformations
-//===----------------------------------------------------------------------===//
-
-def trailingZeros_xform : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingZeros(),
- MVT::i32);
-}]>;
-
-def trailingOnes_xform : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingOnes(),
- MVT::i32);
-}]>;
-
-def LO16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((unsigned short)N->getZExtValue(), MVT::i16);
-}]>;
-
-def HI16 : SDNodeXForm<imm, [{
- // Transformation function: shift the immediate value down into the low bits.
- return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 16, MVT::i16);
-}]>;
-
-//===----------------------------------------------------------------------===//
-// Immediates
-//===----------------------------------------------------------------------===//
-
-def imm3 : PatLeaf<(imm), [{return isInt<3>(N->getSExtValue());}]>;
-def uimm3 : PatLeaf<(imm), [{return isUInt<3>(N->getZExtValue());}]>;
-def uimm4 : PatLeaf<(imm), [{return isUInt<4>(N->getZExtValue());}]>;
-def uimm5 : PatLeaf<(imm), [{return isUInt<5>(N->getZExtValue());}]>;
-
-def uimm5m2 : PatLeaf<(imm), [{
- uint64_t value = N->getZExtValue();
- return value % 2 == 0 && isUInt<5>(value);
-}]>;
-
-def uimm6m4 : PatLeaf<(imm), [{
- uint64_t value = N->getZExtValue();
- return value % 4 == 0 && isUInt<6>(value);
-}]>;
-
-def imm7 : PatLeaf<(imm), [{return isInt<7>(N->getSExtValue());}]>;
-def imm16 : PatLeaf<(imm), [{return isInt<16>(N->getSExtValue());}]>;
-def uimm16 : PatLeaf<(imm), [{return isUInt<16>(N->getZExtValue());}]>;
-
-def ximm16 : PatLeaf<(imm), [{
- int64_t value = N->getSExtValue();
- return value < (1<<16) && value >= -(1<<15);
-}]>;
-
-def imm17m2 : PatLeaf<(imm), [{
- int64_t value = N->getSExtValue();
- return value % 2 == 0 && isInt<17>(value);
-}]>;
-
-def imm18m4 : PatLeaf<(imm), [{
- int64_t value = N->getSExtValue();
- return value % 4 == 0 && isInt<18>(value);
-}]>;
-
-// 32-bit bitmask transformed to a bit number
-def uimm5mask : Operand<i32>, PatLeaf<(imm), [{
- return isPowerOf2_32(N->getZExtValue());
-}], trailingZeros_xform>;
-
-// 32-bit inverse bitmask transformed to a bit number
-def uimm5imask : Operand<i32>, PatLeaf<(imm), [{
- return isPowerOf2_32(~N->getZExtValue());
-}], trailingOnes_xform>;
-
-//===----------------------------------------------------------------------===//
-// Operands
-//===----------------------------------------------------------------------===//
-
-def calltarget : Operand<iPTR>;
-
-def brtarget : Operand<OtherVT>;
-
-// Addressing modes
-def ADDRspii : ComplexPattern<i32, 2, "SelectADDRspii", [add, frameindex], []>;
-
-// Address operands
-def MEMii : Operand<i32> {
- let PrintMethod = "printMemoryOperand";
- let MIOperandInfo = (ops i32imm, i32imm);
-}
-
-//===----------------------------------------------------------------------===//
-// Instructions
-//===----------------------------------------------------------------------===//
-
-// Pseudo instructions.
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstBfin<outs, ins, asmstr, pattern>;
-
-let Defs = [SP], Uses = [SP] in {
-def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
- "${:comment}ADJCALLSTACKDOWN $amt",
- [(BfinCallseqStart timm:$amt)]>;
-def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "${:comment}ADJCALLSTACKUP $amt1 $amt2",
- [(BfinCallseqEnd timm:$amt1, timm:$amt2)]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Table C-9. Program Flow Control Instructions
-//===----------------------------------------------------------------------===//
-
-let isBranch = 1, isTerminator = 1 in {
-
-let isIndirectBranch = 1 in
-def JUMPp : F1<(outs), (ins P:$target),
- "JUMP ($target);",
- [(brind P:$target)]>;
-
-// TODO JUMP (PC-P)
-
-// NOTE: assembler chooses between JUMP.S and JUMP.L
-def JUMPa : F1<(outs), (ins brtarget:$target),
- "jump $target;",
- [(br bb:$target)]>;
-
-def JUMPcc : F1<(outs), (ins AnyCC:$cc, brtarget:$target),
- "if $cc jump $target;",
- [(brcond AnyCC:$cc, bb:$target)]>;
-}
-
-let isCall = 1,
- Defs = [R0, R1, R2, R3, P0, P1, P2, LB0, LB1, LC0, LC1, RETS, ASTAT] in {
-def CALLa: F1<(outs), (ins calltarget:$func, variable_ops),
- "call $func;", []>;
-def CALLp: F1<(outs), (ins P:$func, variable_ops),
- "call ($func);", [(BfinCall P:$func)]>;
-}
-
-let isReturn = 1,
- isTerminator = 1,
- isBarrier = 1,
- Uses = [RETS] in
-def RTS: F1<(outs), (ins), "rts;", [(BfinRet)]>;
-
-//===----------------------------------------------------------------------===//
-// Table C-10. Load / Store Instructions
-//===----------------------------------------------------------------------===//
-
-// Immediate constant loads
-
-// sext immediate, i32 D/P regs
-def LOADimm7: F1<(outs DP:$dst), (ins i32imm:$src),
- "$dst = $src (x);",
- [(set DP:$dst, imm7:$src)]>;
-
-// zext immediate, i32 reg groups 0-3
-def LOADuimm16: F2<(outs GR:$dst), (ins i32imm:$src),
- "$dst = $src (z);",
- [(set GR:$dst, uimm16:$src)]>;
-
-// sext immediate, i32 reg groups 0-3
-def LOADimm16: F2<(outs GR:$dst), (ins i32imm:$src),
- "$dst = $src (x);",
- [(set GR:$dst, imm16:$src)]>;
-
-// Pseudo-instruction for loading a general 32-bit constant.
-def LOAD32imm: Pseudo<(outs GR:$dst), (ins i32imm:$src),
- "$dst.h = ($src >> 16); $dst.l = ($src & 0xffff);",
- [(set GR:$dst, imm:$src)]>;
-
-def LOAD32sym: Pseudo<(outs GR:$dst), (ins i32imm:$src),
- "$dst.h = $src; $dst.l = $src;", []>;
-
-
-// 16-bit immediate, i16 reg groups 0-3
-def LOAD16i: F2<(outs GR16:$dst), (ins i16imm:$src),
- "$dst = $src;", []>;
-
-def : Pat<(BfinWrapper (i32 tglobaladdr:$addr)),
- (LOAD32sym tglobaladdr:$addr)>;
-
-def : Pat<(BfinWrapper (i32 tjumptable:$addr)),
- (LOAD32sym tjumptable:$addr)>;
-
-// We cannot copy from GR16 to D16, and codegen wants to insert copies if we
-// emit GR16 instructions. As a hack, we use this fake instruction instead.
-def LOAD16i_d16: F2<(outs D16:$dst), (ins i16imm:$src),
- "$dst = $src;",
- [(set D16:$dst, ximm16:$src)]>;
-
-// Memory loads with patterns
-
-def LOAD32p: F1<(outs DP:$dst), (ins P:$ptr),
- "$dst = [$ptr];",
- [(set DP:$dst, (load P:$ptr))]>;
-
-// Pseudo-instruction for loading a stack slot
-def LOAD32fi: Pseudo<(outs DP:$dst), (ins MEMii:$mem),
- "${:comment}FI $dst = [$mem];",
- [(set DP:$dst, (load ADDRspii:$mem))]>;
-
-// Note: Expands to multiple insns
-def LOAD16fi: Pseudo<(outs D16:$dst), (ins MEMii:$mem),
- "${:comment}FI $dst = [$mem];",
- [(set D16:$dst, (load ADDRspii:$mem))]>;
-
-// Pseudo-instruction for loading a stack slot, used for AnyCC regs.
-// Replaced with Load D + CC=D
-def LOAD8fi: Pseudo<(outs AnyCC:$dst), (ins MEMii:$mem),
- "${:comment}FI $dst = B[$mem];",
- [(set AnyCC:$dst, (load ADDRspii:$mem))]>;
-
-def LOAD32p_uimm6m4: F1<(outs DP:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = [$ptr + $off];",
- [(set DP:$dst, (load (add P:$ptr, uimm6m4:$off)))]>;
-
-def LOAD32p_imm18m4: F2<(outs DP:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = [$ptr + $off];",
- [(set DP:$dst, (load (add P:$ptr, imm18m4:$off)))]>;
-
-def LOAD32p_16z: F1<(outs D:$dst), (ins P:$ptr),
- "$dst = W[$ptr] (z);",
- [(set D:$dst, (zextloadi16 P:$ptr))]>;
-
-def : Pat<(i32 (extloadi16 P:$ptr)),(LOAD32p_16z P:$ptr)>;
-
-def LOAD32p_uimm5m2_16z: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = w[$ptr + $off] (z);",
- [(set D:$dst, (zextloadi16 (add P:$ptr,
- uimm5m2:$off)))]>;
-
-def : Pat<(i32 (extloadi16 (add P:$ptr, uimm5m2:$off))),
- (LOAD32p_uimm5m2_16z P:$ptr, imm:$off)>;
-
-def LOAD32p_imm17m2_16z: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = w[$ptr + $off] (z);",
- [(set D:$dst,
- (zextloadi16 (add P:$ptr, imm17m2:$off)))]>;
-
-def : Pat<(i32 (extloadi16 (add P:$ptr, imm17m2:$off))),
- (LOAD32p_imm17m2_16z P:$ptr, imm:$off)>;
-
-def LOAD32p_16s: F1<(outs D:$dst), (ins P:$ptr),
- "$dst = w[$ptr] (x);",
- [(set D:$dst, (sextloadi16 P:$ptr))]>;
-
-def LOAD32p_uimm5m2_16s: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = w[$ptr + $off] (x);",
- [(set D:$dst,
- (sextloadi16 (add P:$ptr, uimm5m2:$off)))]>;
-
-def LOAD32p_imm17m2_16s: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = w[$ptr + $off] (x);",
- [(set D:$dst,
- (sextloadi16 (add P:$ptr, imm17m2:$off)))]>;
-
-def LOAD16pi: F1<(outs D16:$dst), (ins PI:$ptr),
- "$dst = w[$ptr];",
- [(set D16:$dst, (load PI:$ptr))]>;
-
-def LOAD32p_8z: F1<(outs D:$dst), (ins P:$ptr),
- "$dst = B[$ptr] (z);",
- [(set D:$dst, (zextloadi8 P:$ptr))]>;
-
-def : Pat<(i32 (extloadi8 P:$ptr)), (LOAD32p_8z P:$ptr)>;
-def : Pat<(i16 (extloadi8 P:$ptr)),
- (EXTRACT_SUBREG (LOAD32p_8z P:$ptr), lo16)>;
-def : Pat<(i16 (zextloadi8 P:$ptr)),
- (EXTRACT_SUBREG (LOAD32p_8z P:$ptr), lo16)>;
-
-def LOAD32p_imm16_8z: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = b[$ptr + $off] (z);",
- [(set D:$dst, (zextloadi8 (add P:$ptr, imm16:$off)))]>;
-
-def : Pat<(i32 (extloadi8 (add P:$ptr, imm16:$off))),
- (LOAD32p_imm16_8z P:$ptr, imm:$off)>;
-def : Pat<(i16 (extloadi8 (add P:$ptr, imm16:$off))),
- (EXTRACT_SUBREG (LOAD32p_imm16_8z P:$ptr, imm:$off),
- lo16)>;
-def : Pat<(i16 (zextloadi8 (add P:$ptr, imm16:$off))),
- (EXTRACT_SUBREG (LOAD32p_imm16_8z P:$ptr, imm:$off),
- lo16)>;
-
-def LOAD32p_8s: F1<(outs D:$dst), (ins P:$ptr),
- "$dst = b[$ptr] (x);",
- [(set D:$dst, (sextloadi8 P:$ptr))]>;
-
-def : Pat<(i16 (sextloadi8 P:$ptr)),
- (EXTRACT_SUBREG (LOAD32p_8s P:$ptr), lo16)>;
-
-def LOAD32p_imm16_8s: F1<(outs D:$dst), (ins P:$ptr, i32imm:$off),
- "$dst = b[$ptr + $off] (x);",
- [(set D:$dst, (sextloadi8 (add P:$ptr, imm16:$off)))]>;
-
-def : Pat<(i16 (sextloadi8 (add P:$ptr, imm16:$off))),
- (EXTRACT_SUBREG (LOAD32p_imm16_8s P:$ptr, imm:$off),
- lo16)>;
-// Memory loads without patterns
-
-let mayLoad = 1 in {
-
-multiclass LOAD_incdec<RegisterClass drc, RegisterClass prc,
- string mem="", string suf=";"> {
- def _inc : F1<(outs drc:$dst, prc:$ptr_wb), (ins prc:$ptr),
- !strconcat(!subst("M", mem, "$dst = M[$ptr++]"), suf), []>;
- def _dec : F1<(outs drc:$dst, prc:$ptr_wb), (ins prc:$ptr),
- !strconcat(!subst("M", mem, "$dst = M[$ptr--]"), suf), []>;
-}
-multiclass LOAD_incdecpost<RegisterClass drc, RegisterClass prc,
- string mem="", string suf=";">
- : LOAD_incdec<drc, prc, mem, suf> {
- def _post : F1<(outs drc:$dst, prc:$ptr_wb), (ins prc:$ptr, prc:$off),
- !strconcat(!subst("M", mem, "$dst = M[$ptr++$off]"), suf), []>;
-}
-
-defm LOAD32p: LOAD_incdec<DP, P>;
-defm LOAD32i: LOAD_incdec<D, I>;
-defm LOAD8z32p: LOAD_incdec<D, P, "b", " (z);">;
-defm LOAD8s32p: LOAD_incdec<D, P, "b", " (x);">;
-defm LOADhi: LOAD_incdec<D16, I, "w">;
-defm LOAD16z32p: LOAD_incdecpost<D, P, "w", " (z);">;
-defm LOAD16s32p: LOAD_incdecpost<D, P, "w", " (x);">;
-
-def LOAD32p_post: F1<(outs D:$dst, P:$ptr_wb), (ins P:$ptr, P:$off),
- "$dst = [$ptr ++ $off];", []>;
-
-// Note: $fp MUST be FP
-def LOAD32fp_nimm7m4: F1<(outs DP:$dst), (ins P:$fp, i32imm:$off),
- "$dst = [$fp - $off];", []>;
-
-def LOAD32i: F1<(outs D:$dst), (ins I:$ptr),
- "$dst = [$ptr];", []>;
-def LOAD32i_post: F1<(outs D:$dst, I:$ptr_wb), (ins I:$ptr, M:$off),
- "$dst = [$ptr ++ $off];", []>;
-
-
-
-def LOADhp_post: F1<(outs D16:$dst, P:$ptr_wb), (ins P:$ptr, P:$off),
- "$dst = w[$ptr ++ $off];", []>;
-
-
-}
-
-// Memory stores with patterns
-def STORE32p: F1<(outs), (ins DP:$val, P:$ptr),
- "[$ptr] = $val;",
- [(store DP:$val, P:$ptr)]>;
-
-// Pseudo-instructions for storing to a stack slot
-def STORE32fi: Pseudo<(outs), (ins DP:$val, MEMii:$mem),
- "${:comment}FI [$mem] = $val;",
- [(store DP:$val, ADDRspii:$mem)]>;
-
-// Note: This stack-storing pseudo-instruction is expanded to multiple insns
-def STORE16fi: Pseudo<(outs), (ins D16:$val, MEMii:$mem),
- "${:comment}FI [$mem] = $val;",
- [(store D16:$val, ADDRspii:$mem)]>;
-
-// Pseudo-instructions for storing AnyCC register to a stack slot.
-// Replaced with D=CC + STORE byte
-def STORE8fi: Pseudo<(outs), (ins AnyCC:$val, MEMii:$mem),
- "${:comment}FI b[$mem] = $val;",
- [(store AnyCC:$val, ADDRspii:$mem)]>;
-
-def STORE32p_uimm6m4: F1<(outs), (ins DP:$val, P:$ptr, i32imm:$off),
- "[$ptr + $off] = $val;",
- [(store DP:$val, (add P:$ptr, uimm6m4:$off))]>;
-
-def STORE32p_imm18m4: F1<(outs), (ins DP:$val, P:$ptr, i32imm:$off),
- "[$ptr + $off] = $val;",
- [(store DP:$val, (add P:$ptr, imm18m4:$off))]>;
-
-def STORE16pi: F1<(outs), (ins D16:$val, PI:$ptr),
- "w[$ptr] = $val;",
- [(store D16:$val, PI:$ptr)]>;
-
-def STORE8p: F1<(outs), (ins D:$val, P:$ptr),
- "b[$ptr] = $val;",
- [(truncstorei8 D:$val, P:$ptr)]>;
-
-def STORE8p_imm16: F1<(outs), (ins D:$val, P:$ptr, i32imm:$off),
- "b[$ptr + $off] = $val;",
- [(truncstorei8 D:$val, (add P:$ptr, imm16:$off))]>;
-
-let Constraints = "$ptr = $ptr_wb" in {
-
-multiclass STORE_incdec<RegisterClass drc, RegisterClass prc,
- int off=4, string pre=""> {
- def _inc : F1<(outs prc:$ptr_wb), (ins drc:$val, prc:$ptr),
- !strconcat(pre, "[$ptr++] = $val;"),
- [(set prc:$ptr_wb, (post_store drc:$val, prc:$ptr, off))]>;
- def _dec : F1<(outs prc:$ptr_wb), (ins drc:$val, prc:$ptr),
- !strconcat(pre, "[$ptr--] = $val;"),
- [(set prc:$ptr_wb, (post_store drc:$val, prc:$ptr,
- (ineg off)))]>;
-}
-
-defm STORE32p: STORE_incdec<DP, P>;
-defm STORE16i: STORE_incdec<D16, I, 2, "w">;
-defm STORE8p: STORE_incdec<D, P, 1, "b">;
-
-def STORE32p_post: F1<(outs P:$ptr_wb), (ins D:$val, P:$ptr, P:$off),
- "[$ptr ++ $off] = $val;",
- [(set P:$ptr_wb, (post_store D:$val, P:$ptr, P:$off))]>;
-
-def STORE16p_post: F1<(outs P:$ptr_wb), (ins D16:$val, P:$ptr, P:$off),
- "w[$ptr ++ $off] = $val;",
- [(set P:$ptr_wb, (post_store D16:$val, P:$ptr, P:$off))]>;
-}
-
-// Memory stores without patterns
-
-let mayStore = 1 in {
-
-// Note: only works for $fp == FP
-def STORE32fp_nimm7m4: F1<(outs), (ins DP:$val, P:$fp, i32imm:$off),
- "[$fp - $off] = $val;", []>;
-
-def STORE32i: F1<(outs), (ins D:$val, I:$ptr),
- "[$ptr] = $val;", []>;
-
-def STORE32i_inc: F1<(outs I:$ptr_wb), (ins D:$val, I:$ptr),
- "[$ptr++] = $val;", []>;
-
-def STORE32i_dec: F1<(outs I:$ptr_wb), (ins D:$val, I:$ptr),
- "[$ptr--] = $val;", []>;
-
-def STORE32i_post: F1<(outs I:$ptr_wb), (ins D:$val, I:$ptr, M:$off),
- "[$ptr ++ $off] = $val;", []>;
-}
-
-def : Pat<(truncstorei16 D:$val, PI:$ptr),
- (STORE16pi (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS D:$val, D)),
- lo16), PI:$ptr)>;
-
-def : Pat<(truncstorei16 (srl D:$val, (i16 16)), PI:$ptr),
- (STORE16pi (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS D:$val, D)),
- hi16), PI:$ptr)>;
-
-def : Pat<(truncstorei8 D16L:$val, P:$ptr),
- (STORE8p (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
- (i16 (COPY_TO_REGCLASS D16L:$val, D16L)),
- lo16),
- P:$ptr)>;
-
-//===----------------------------------------------------------------------===//
-// Table C-11. Move Instructions.
-//===----------------------------------------------------------------------===//
-
-def MOVE: F1<(outs ALL:$dst), (ins ALL:$src),
- "$dst = $src;",
- []>;
-
-let Constraints = "$src1 = $dst" in
-def MOVEcc: F1<(outs DP:$dst), (ins DP:$src1, DP:$src2, AnyCC:$cc),
- "if $cc $dst = $src2;",
- [(set DP:$dst, (select AnyCC:$cc, DP:$src2, DP:$src1))]>;
-
-let Defs = [AZ, AN, AC0, V] in {
-def MOVEzext: F1<(outs D:$dst), (ins D16L:$src),
- "$dst = $src (z);",
- [(set D:$dst, (zext D16L:$src))]>;
-
-def MOVEsext: F1<(outs D:$dst), (ins D16L:$src),
- "$dst = $src (x);",
- [(set D:$dst, (sext D16L:$src))]>;
-
-def MOVEzext8: F1<(outs D:$dst), (ins D:$src),
- "$dst = $src.b (z);",
- [(set D:$dst, (and D:$src, 0xff))]>;
-
-def MOVEsext8: F1<(outs D:$dst), (ins D:$src),
- "$dst = $src.b (x);",
- [(set D:$dst, (sext_inreg D:$src, i8))]>;
-
-}
-
-def : Pat<(sext_inreg D16L:$src, i8),
- (EXTRACT_SUBREG (MOVEsext8
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
- D16L:$src,
- lo16)),
- lo16)>;
-
-def : Pat<(sext_inreg D:$src, i16),
- (MOVEsext (EXTRACT_SUBREG D:$src, lo16))>;
-
-def : Pat<(and D:$src, 0xffff),
- (MOVEzext (EXTRACT_SUBREG D:$src, lo16))>;
-
-def : Pat<(i32 (anyext D16L:$src)),
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
- (i16 (COPY_TO_REGCLASS D16L:$src, D16L)),
- lo16)>;
-
-// TODO Dreg = Dreg_byte (X/Z)
-
-// TODO Accumulator moves
-
-//===----------------------------------------------------------------------===//
-// Table C-12. Stack Control Instructions
-//===----------------------------------------------------------------------===//
-
-let Uses = [SP], Defs = [SP] in {
-def PUSH: F1<(outs), (ins ALL:$src),
- "[--sp] = $src;", []> { let mayStore = 1; }
-
-// NOTE: POP does not work for DP regs, use LOAD instead
-def POP: F1<(outs ALL:$dst), (ins),
- "$dst = [sp++];", []> { let mayLoad = 1; }
-}
-
-// TODO: push/pop multiple
-
-def LINK: F2<(outs), (ins i32imm:$amount),
- "link $amount;", []>;
-
-def UNLINK: F2<(outs), (ins),
- "unlink;", []>;
-
-//===----------------------------------------------------------------------===//
-// Table C-13. Control Code Bit Management Instructions
-//===----------------------------------------------------------------------===//
-
-multiclass SETCC<PatFrag opnode, PatFrag invnode, string cond, string suf=";"> {
- def dd : F1<(outs JustCC:$cc), (ins D:$a, D:$b),
- !strconcat(!subst("XX", cond, "cc = $a XX $b"), suf),
- [(set JustCC:$cc, (opnode D:$a, D:$b))]>;
-
- def ri : F1<(outs JustCC:$cc), (ins DP:$a, i32imm:$b),
- !strconcat(!subst("XX", cond, "cc = $a XX $b"), suf),
- [(set JustCC:$cc, (opnode DP:$a, imm3:$b))]>;
-
- def pp : F1<(outs JustCC:$cc), (ins P:$a, P:$b),
- !strconcat(!subst("XX", cond, "cc = $a XX $b"), suf),
- []>;
-
- def ri_not : F1<(outs NotCC:$cc), (ins DP:$a, i32imm:$b),
- !strconcat(!subst("XX", cond, "cc = $a XX $b"), suf),
- [(set NotCC:$cc, (invnode DP:$a, imm3:$b))]>;
-}
-
-defm SETEQ : SETCC<seteq, setne, "==">;
-defm SETLT : SETCC<setlt, setge, "<">;
-defm SETLE : SETCC<setle, setgt, "<=">;
-defm SETULT : SETCC<setult, setuge, "<", " (iu);">;
-defm SETULE : SETCC<setule, setugt, "<=", " (iu);">;
-
-def SETNEdd : F1<(outs NotCC:$cc), (ins D:$a, D:$b),
- "cc = $a == $b;",
- [(set NotCC:$cc, (setne D:$a, D:$b))]>;
-
-def : Pat<(setgt D:$a, D:$b), (SETLTdd D:$b, D:$a)>;
-def : Pat<(setge D:$a, D:$b), (SETLEdd D:$b, D:$a)>;
-def : Pat<(setugt D:$a, D:$b), (SETULTdd D:$b, D:$a)>;
-def : Pat<(setuge D:$a, D:$b), (SETULEdd D:$b, D:$a)>;
-
-// TODO: compare pointer for P-P comparisons
-// TODO: compare accumulator
-
-let Defs = [AC0] in
-def OR_ac0_cc : F1<(outs), (ins JustCC:$cc),
- "ac0 \\|= cc;", []>;
-
-let Uses = [AC0] in
-def MOVE_cc_ac0 : F1<(outs JustCC:$cc), (ins),
- "cc = ac0;", []>;
-
-def MOVE_ccncc : F1<(outs JustCC:$cc), (ins NotCC:$sb),
- "cc = !cc;", []>;
-
-def MOVE_ncccc : F1<(outs NotCC:$cc), (ins JustCC:$sb),
- "cc = !cc;", []>;
-
-def MOVECC_zext : F1<(outs D:$dst), (ins JustCC:$cc),
- "$dst = $cc;", []>;
-
-def MOVENCC_z : F1<(outs D:$dst), (ins NotCC:$cc),
- "$dst = cc;", []>;
-
-def MOVECC_nz : F1<(outs AnyCC:$cc), (ins D:$src),
- "cc = $src;",
- [(set AnyCC:$cc, (setne D:$src, 0))]>;
-
-//===----------------------------------------------------------------------===//
-// Table C-14. Logical Operations Instructions
-//===----------------------------------------------------------------------===//
-
-def AND: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 & $src2;",
- [(set D:$dst, (and D:$src1, D:$src2))]>;
-
-def NOT: F1<(outs D:$dst), (ins D:$src),
- "$dst = ~$src;",
- [(set D:$dst, (not D:$src))]>;
-
-def OR: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 \\| $src2;",
- [(set D:$dst, (or D:$src1, D:$src2))]>;
-
-def XOR: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 ^ $src2;",
- [(set D:$dst, (xor D:$src1, D:$src2))]>;
-
-// missing: BXOR, BXORSHIFT
-
-//===----------------------------------------------------------------------===//
-// Table C-15. Bit Operations Instructions
-//===----------------------------------------------------------------------===//
-
-let Constraints = "$src1 = $dst" in {
-def BITCLR: F1<(outs D:$dst), (ins D:$src1, uimm5imask:$src2),
- "bitclr($dst, $src2);",
- [(set D:$dst, (and D:$src1, uimm5imask:$src2))]>;
-
-def BITSET: F1<(outs D:$dst), (ins D:$src1, uimm5mask:$src2),
- "bitset($dst, $src2);",
- [(set D:$dst, (or D:$src1, uimm5mask:$src2))]>;
-
-def BITTGL: F1<(outs D:$dst), (ins D:$src1, uimm5mask:$src2),
- "bittgl($dst, $src2);",
- [(set D:$dst, (xor D:$src1, uimm5mask:$src2))]>;
-}
-
-def BITTST: F1<(outs JustCC:$cc), (ins D:$src1, uimm5mask:$src2),
- "cc = bittst($src1, $src2);",
- [(set JustCC:$cc, (setne (and D:$src1, uimm5mask:$src2),
- (i32 0)))]>;
-
-def NBITTST: F1<(outs JustCC:$cc), (ins D:$src1, uimm5mask:$src2),
- "cc = !bittst($src1, $src2);",
- [(set JustCC:$cc, (seteq (and D:$src1, uimm5mask:$src2),
- (i32 0)))]>;
-
-// TODO: DEPOSIT, EXTRACT, BITMUX
-
-def ONES: F2<(outs D16L:$dst), (ins D:$src),
- "$dst = ones $src;",
- [(set D16L:$dst, (trunc (ctpop D:$src)))]>;
-
-def : Pat<(ctpop D:$src), (MOVEzext (ONES D:$src))>;
-
-//===----------------------------------------------------------------------===//
-// Table C-16. Shift / Rotate Instructions
-//===----------------------------------------------------------------------===//
-
-multiclass SHIFT32<SDNode opnode, string ops> {
- def i : F1<(outs D:$dst), (ins D:$src, i16imm:$amount),
- !subst("XX", ops, "$dst XX= $amount;"),
- [(set D:$dst, (opnode D:$src, (i16 uimm5:$amount)))]>;
- def r : F1<(outs D:$dst), (ins D:$src, D:$amount),
- !subst("XX", ops, "$dst XX= $amount;"),
- [(set D:$dst, (opnode D:$src, D:$amount))]>;
-}
-
-let Defs = [AZ, AN, V, VS],
- Constraints = "$src = $dst" in {
-defm SRA : SHIFT32<sra, ">>>">;
-defm SRL : SHIFT32<srl, ">>">;
-defm SLL : SHIFT32<shl, "<<">;
-}
-
-// TODO: automatic switching between 2-addr and 3-addr (?)
-
-let Defs = [AZ, AN, V, VS] in {
-def SLLr16: F2<(outs D:$dst), (ins D:$src, D16L:$amount),
- "$dst = lshift $src by $amount;",
- [(set D:$dst, (shl D:$src, D16L:$amount))]>;
-
-// Arithmetic left-shift = saturing overflow.
-def SLAr16: F2<(outs D:$dst), (ins D:$src, D16L:$amount),
- "$dst = ashift $src by $amount;",
- [(set D:$dst, (sra D:$src, (ineg D16L:$amount)))]>;
-
-def SRA16i: F1<(outs D16:$dst), (ins D16:$src, i16imm:$amount),
- "$dst = $src >>> $amount;",
- [(set D16:$dst, (sra D16:$src, (i16 uimm4:$amount)))]>;
-
-def SRL16i: F1<(outs D16:$dst), (ins D16:$src, i16imm:$amount),
- "$dst = $src >> $amount;",
- [(set D16:$dst, (srl D16:$src, (i16 uimm4:$amount)))]>;
-
-// Arithmetic left-shift = saturing overflow.
-def SLA16r: F1<(outs D16:$dst), (ins D16:$src, D16L:$amount),
- "$dst = ashift $src BY $amount;",
- [(set D16:$dst, (srl D16:$src, (ineg D16L:$amount)))]>;
-
-def SLL16i: F1<(outs D16:$dst), (ins D16:$src, i16imm:$amount),
- "$dst = $src << $amount;",
- [(set D16:$dst, (shl D16:$src, (i16 uimm4:$amount)))]>;
-
-def SLL16r: F1<(outs D16:$dst), (ins D16:$src, D16L:$amount),
- "$dst = lshift $src by $amount;",
- [(set D16:$dst, (shl D16:$src, D16L:$amount))]>;
-
-}
-
-//===----------------------------------------------------------------------===//
-// Table C-17. Arithmetic Operations Instructions
-//===----------------------------------------------------------------------===//
-
-// TODO: ABS
-
-let Defs = [AZ, AN, AC0, V, VS] in {
-
-def ADD: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 + $src2;",
- [(set D:$dst, (add D:$src1, D:$src2))]>;
-
-def ADD16: F2<(outs D16:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 + $src2;",
- [(set D16:$dst, (add D16:$src1, D16:$src2))]>;
-
-let Constraints = "$src1 = $dst" in
-def ADDimm7: F1<(outs D:$dst), (ins D:$src1, i32imm:$src2),
- "$dst += $src2;",
- [(set D:$dst, (add D:$src1, imm7:$src2))]>;
-
-def SUB: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 - $src2;",
- [(set D:$dst, (sub D:$src1, D:$src2))]>;
-
-def SUB16: F2<(outs D16:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 - $src2;",
- [(set D16:$dst, (sub D16:$src1, D16:$src2))]>;
-
-}
-
-def : Pat<(addc D:$src1, D:$src2), (ADD D:$src1, D:$src2)>;
-def : Pat<(subc D:$src1, D:$src2), (SUB D:$src1, D:$src2)>;
-
-let Defs = [AZ, AN, V, VS] in
-def NEG: F1<(outs D:$dst), (ins D:$src),
- "$dst = -$src;",
- [(set D:$dst, (ineg D:$src))]>;
-
-// No pattern, it would confuse isel to have two i32 = i32+i32 patterns
-def ADDpp: F1<(outs P:$dst), (ins P:$src1, P:$src2),
- "$dst = $src1 + $src2;", []>;
-
-let Constraints = "$src1 = $dst" in
-def ADDpp_imm7: F1<(outs P:$dst), (ins P:$src1, i32imm:$src2),
- "$dst += $src2;", []>;
-
-let Defs = [AZ, AN, V] in
-def ADD_RND20: F2<(outs D16:$dst), (ins D:$src1, D:$src2),
- "$dst = $src1 + $src2 (rnd20);", []>;
-
-let Defs = [V, VS] in {
-def MUL16: F2<(outs D16:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 * $src2 (is);",
- [(set D16:$dst, (mul D16:$src1, D16:$src2))]>;
-
-def MULHS16: F2<(outs D16:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 * $src2 (ih);",
- [(set D16:$dst, (mulhs D16:$src1, D16:$src2))]>;
-
-def MULhh32s: F2<(outs D:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 * $src2 (is);",
- [(set D:$dst, (mul (sext D16:$src1), (sext D16:$src2)))]>;
-
-def MULhh32u: F2<(outs D:$dst), (ins D16:$src1, D16:$src2),
- "$dst = $src1 * $src2 (is);",
- [(set D:$dst, (mul (zext D16:$src1), (zext D16:$src2)))]>;
-}
-
-
-let Constraints = "$src1 = $dst" in
-def MUL32: F1<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst *= $src2;",
- [(set D:$dst, (mul D:$src1, D:$src2))]>;
-
-//===----------------------------------------------------------------------===//
-// Table C-18. External Exent Management Instructions
-//===----------------------------------------------------------------------===//
-
-def IDLE : F1<(outs), (ins), "idle;", [(int_bfin_idle)]>;
-def CSYNC : F1<(outs), (ins), "csync;", [(int_bfin_csync)]>;
-def SSYNC : F1<(outs), (ins), "ssync;", [(int_bfin_ssync)]>;
-def EMUEXCPT : F1<(outs), (ins), "emuexcpt;", []>;
-def CLI : F1<(outs D:$mask), (ins), "cli $mask;", []>;
-def STI : F1<(outs), (ins D:$mask), "sti $mask;", []>;
-def RAISE : F1<(outs), (ins i32imm:$itr), "raise $itr;", []>;
-def EXCPT : F1<(outs), (ins i32imm:$exc), "excpt $exc;", []>;
-def NOP : F1<(outs), (ins), "nop;", []>;
-def MNOP : F2<(outs), (ins), "mnop;", []>;
-def ABORT : F1<(outs), (ins), "abort;", []>;
-
-//===----------------------------------------------------------------------===//
-// Table C-19. Cache Control Instructions
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Table C-20. Video Pixel Operations Instructions
-//===----------------------------------------------------------------------===//
-
-def ALIGN8 : F2<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = align8($src1, $src2);",
- [(set D:$dst, (or (shl D:$src1, (i32 24)),
- (srl D:$src2, (i32 8))))]>;
-
-def ALIGN16 : F2<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = align16($src1, $src2);",
- [(set D:$dst, (or (shl D:$src1, (i32 16)),
- (srl D:$src2, (i32 16))))]>;
-
-def ALIGN24 : F2<(outs D:$dst), (ins D:$src1, D:$src2),
- "$dst = align16($src1, $src2);",
- [(set D:$dst, (or (shl D:$src1, (i32 8)),
- (srl D:$src2, (i32 24))))]>;
-
-def DISALGNEXCPT : F2<(outs), (ins), "disalignexcpt;", []>;
-
-// TODO: BYTEOP3P, BYTEOP16P, BYTEOP1P, BYTEOP2P, BYTEOP16M, SAA,
-// BYTEPACK, BYTEUNPACK
-
-// Table C-21. Vector Operations Instructions
-
-// Patterns
-def : Pat<(BfinCall (i32 tglobaladdr:$dst)),
- (CALLa tglobaladdr:$dst)>;
-def : Pat<(BfinCall (i32 texternalsym:$dst)),
- (CALLa texternalsym:$dst)>;
-def : Pat<(i16 (trunc D:$src)),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS D:$src, D)), lo16)>;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp
deleted file mode 100644
index 7135676..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-//===- BlackfinIntrinsicInfo.cpp - Intrinsic Information --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of TargetIntrinsicInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinIntrinsicInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstring>
-
-using namespace llvm;
-
-namespace bfinIntrinsic {
-
- enum ID {
- last_non_bfin_intrinsic = Intrinsic::num_intrinsics-1,
-#define GET_INTRINSIC_ENUM_VALUES
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_INTRINSIC_ENUM_VALUES
- , num_bfin_intrinsics
- };
-
-}
-
-std::string BlackfinIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
- unsigned numTys) const {
- static const char *const names[] = {
-#define GET_INTRINSIC_NAME_TABLE
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_INTRINSIC_NAME_TABLE
- };
-
- assert(!isOverloaded(IntrID) && "Blackfin intrinsics are not overloaded");
- if (IntrID < Intrinsic::num_intrinsics)
- return 0;
- assert(IntrID < bfinIntrinsic::num_bfin_intrinsics && "Invalid intrinsic ID");
-
- std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
- return Result;
-}
-
-unsigned
-BlackfinIntrinsicInfo::lookupName(const char *Name, unsigned Len) const {
- if (Len < 5 || Name[4] != '.' || Name[0] != 'l' || Name[1] != 'l'
- || Name[2] != 'v' || Name[3] != 'm')
- return 0; // All intrinsics start with 'llvm.'
-
-#define GET_FUNCTION_RECOGNIZER
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_FUNCTION_RECOGNIZER
- return 0;
-}
-
-bool BlackfinIntrinsicInfo::isOverloaded(unsigned IntrID) const {
- // Overload Table
- const bool OTable[] = {
-#define GET_INTRINSIC_OVERLOAD_TABLE
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_INTRINSIC_OVERLOAD_TABLE
- };
- if (IntrID == 0)
- return false;
- else
- return OTable[IntrID - Intrinsic::num_intrinsics];
-}
-
-/// This defines the "getAttributes(ID id)" method.
-#define GET_INTRINSIC_ATTRIBUTES
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_INTRINSIC_ATTRIBUTES
-
-static FunctionType *getType(LLVMContext &Context, unsigned id) {
- Type *ResultTy = NULL;
- std::vector<Type*> ArgTys;
- bool IsVarArg = false;
-
-#define GET_INTRINSIC_GENERATOR
-#include "BlackfinGenIntrinsics.inc"
-#undef GET_INTRINSIC_GENERATOR
-
- return FunctionType::get(ResultTy, ArgTys, IsVarArg);
-}
-
-Function *BlackfinIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
- Type **Tys,
- unsigned numTy) const {
- assert(!isOverloaded(IntrID) && "Blackfin intrinsics are not overloaded");
- AttrListPtr AList = getAttributes((bfinIntrinsic::ID) IntrID);
- return cast<Function>(M->getOrInsertFunction(getName(IntrID),
- getType(M->getContext(), IntrID),
- AList));
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.h
deleted file mode 100644
index f05db5a..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsicInfo.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===- BlackfinIntrinsicInfo.h - Blackfin Intrinsic Information -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of TargetIntrinsicInfo.
-//
-//===----------------------------------------------------------------------===//
-#ifndef BLACKFININTRINSICS_H
-#define BLACKFININTRINSICS_H
-
-#include "llvm/Target/TargetIntrinsicInfo.h"
-
-namespace llvm {
-
- class BlackfinIntrinsicInfo : public TargetIntrinsicInfo {
- public:
- std::string getName(unsigned IntrID, Type **Tys = 0,
- unsigned numTys = 0) const;
- unsigned lookupName(const char *Name, unsigned Len) const;
- bool isOverloaded(unsigned IID) const;
- Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
- unsigned numTys = 0) const;
- };
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsics.td b/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsics.td
deleted file mode 100644
index ce21b08..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinIntrinsics.td
+++ /dev/null
@@ -1,34 +0,0 @@
-//===- BlackfinIntrinsics.td - Defines Blackfin intrinsics -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the blackfin-specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-let TargetPrefix = "bfin", isTarget = 1 in {
-
-//===----------------------------------------------------------------------===//
-// Core synchronisation etc.
-//
-// These intrinsics have sideeffects. Each represent a single instruction, but
-// workarounds are sometimes required depending on the cpu.
-
-// Execute csync instruction with workarounds
-def int_bfin_csync : GCCBuiltin<"__builtin_bfin_csync">,
- Intrinsic<[]>;
-
-// Execute ssync instruction with workarounds
-def int_bfin_ssync : GCCBuiltin<"__builtin_bfin_ssync">,
- Intrinsic<[]>;
-
-// Execute idle instruction with workarounds
-def int_bfin_idle : GCCBuiltin<"__builtin_bfin_idle">,
- Intrinsic<[]>;
-
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
deleted file mode 100644
index 0d415c5..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
+++ /dev/null
@@ -1,344 +0,0 @@
-//===- BlackfinRegisterInfo.cpp - Blackfin Register Information -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Blackfin.h"
-#include "BlackfinRegisterInfo.h"
-#include "BlackfinSubtarget.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Type.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/STLExtras.h"
-
-#define GET_REGINFO_TARGET_DESC
-#include "BlackfinGenRegisterInfo.inc"
-
-using namespace llvm;
-
-BlackfinRegisterInfo::BlackfinRegisterInfo(BlackfinSubtarget &st,
- const TargetInstrInfo &tii)
- : BlackfinGenRegisterInfo(BF::RETS), Subtarget(st), TII(tii) {}
-
-const unsigned*
-BlackfinRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- using namespace BF;
- static const unsigned CalleeSavedRegs[] = {
- FP,
- R4, R5, R6, R7,
- P3, P4, P5,
- 0 };
- return CalleeSavedRegs;
-}
-
-BitVector
-BlackfinRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- using namespace BF;
- BitVector Reserved(getNumRegs());
- Reserved.set(AZ);
- Reserved.set(AN);
- Reserved.set(AQ);
- Reserved.set(AC0);
- Reserved.set(AC1);
- Reserved.set(AV0);
- Reserved.set(AV0S);
- Reserved.set(AV1);
- Reserved.set(AV1S);
- Reserved.set(V);
- Reserved.set(VS);
- Reserved.set(CYCLES).set(CYCLES2);
- Reserved.set(L0);
- Reserved.set(L1);
- Reserved.set(L2);
- Reserved.set(L3);
- Reserved.set(SP);
- Reserved.set(RETS);
- if (TFI->hasFP(MF))
- Reserved.set(FP);
- return Reserved;
-}
-
-bool BlackfinRegisterInfo::
-requiresRegisterScavenging(const MachineFunction &MF) const {
- return true;
-}
-
-// Emit instructions to add delta to D/P register. ScratchReg must be of the
-// same class as Reg (P).
-void BlackfinRegisterInfo::adjustRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- DebugLoc DL,
- unsigned Reg,
- unsigned ScratchReg,
- int delta) const {
- if (!delta)
- return;
- if (isInt<7>(delta)) {
- BuildMI(MBB, I, DL, TII.get(BF::ADDpp_imm7), Reg)
- .addReg(Reg) // No kill on two-addr operand
- .addImm(delta);
- return;
- }
-
- // We must load delta into ScratchReg and add that.
- loadConstant(MBB, I, DL, ScratchReg, delta);
- if (BF::PRegClass.contains(Reg)) {
- assert(BF::PRegClass.contains(ScratchReg) &&
- "ScratchReg must be a P register");
- BuildMI(MBB, I, DL, TII.get(BF::ADDpp), Reg)
- .addReg(Reg, RegState::Kill)
- .addReg(ScratchReg, RegState::Kill);
- } else {
- assert(BF::DRegClass.contains(Reg) && "Reg must be a D or P register");
- assert(BF::DRegClass.contains(ScratchReg) &&
- "ScratchReg must be a D register");
- BuildMI(MBB, I, DL, TII.get(BF::ADD), Reg)
- .addReg(Reg, RegState::Kill)
- .addReg(ScratchReg, RegState::Kill);
- }
-}
-
-// Emit instructions to load a constant into D/P register
-void BlackfinRegisterInfo::loadConstant(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- DebugLoc DL,
- unsigned Reg,
- int value) const {
- if (isInt<7>(value)) {
- BuildMI(MBB, I, DL, TII.get(BF::LOADimm7), Reg).addImm(value);
- return;
- }
-
- if (isUInt<16>(value)) {
- BuildMI(MBB, I, DL, TII.get(BF::LOADuimm16), Reg).addImm(value);
- return;
- }
-
- if (isInt<16>(value)) {
- BuildMI(MBB, I, DL, TII.get(BF::LOADimm16), Reg).addImm(value);
- return;
- }
-
- // We must split into halves
- BuildMI(MBB, I, DL,
- TII.get(BF::LOAD16i), getSubReg(Reg, BF::hi16))
- .addImm((value >> 16) & 0xffff)
- .addReg(Reg, RegState::ImplicitDefine);
- BuildMI(MBB, I, DL,
- TII.get(BF::LOAD16i), getSubReg(Reg, BF::lo16))
- .addImm(value & 0xffff)
- .addReg(Reg, RegState::ImplicitKill)
- .addReg(Reg, RegState::ImplicitDefine);
-}
-
-void BlackfinRegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- if (!TFI->hasReservedCallFrame(MF)) {
- int64_t Amount = I->getOperand(0).getImm();
- if (Amount != 0) {
- assert(Amount%4 == 0 && "Unaligned call frame size");
- if (I->getOpcode() == BF::ADJCALLSTACKDOWN) {
- adjustRegister(MBB, I, I->getDebugLoc(), BF::SP, BF::P1, -Amount);
- } else {
- assert(I->getOpcode() == BF::ADJCALLSTACKUP &&
- "Unknown call frame pseudo instruction");
- adjustRegister(MBB, I, I->getDebugLoc(), BF::SP, BF::P1, Amount);
- }
- }
- }
- MBB.erase(I);
-}
-
-/// findScratchRegister - Find a 'free' register. Try for a call-clobbered
-/// register first and then a spilled callee-saved register if that fails.
-static unsigned findScratchRegister(MachineBasicBlock::iterator II,
- RegScavenger *RS,
- const TargetRegisterClass *RC,
- int SPAdj) {
- assert(RS && "Register scavenging must be on");
- unsigned Reg = RS->FindUnusedReg(RC);
- if (Reg == 0)
- Reg = RS->scavengeRegister(RC, II, SPAdj);
- return Reg;
-}
-
-void
-BlackfinRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS) const {
- MachineInstr &MI = *II;
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
- DebugLoc DL = MI.getDebugLoc();
-
- unsigned FIPos;
- for (FIPos=0; !MI.getOperand(FIPos).isFI(); ++FIPos) {
- assert(FIPos < MI.getNumOperands() &&
- "Instr doesn't have FrameIndex operand!");
- }
- int FrameIndex = MI.getOperand(FIPos).getIndex();
- assert(FIPos+1 < MI.getNumOperands() && MI.getOperand(FIPos+1).isImm());
- int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex)
- + MI.getOperand(FIPos+1).getImm();
- unsigned BaseReg = BF::FP;
- if (TFI->hasFP(MF)) {
- assert(SPAdj==0 && "Unexpected SP adjust in function with frame pointer");
- } else {
- BaseReg = BF::SP;
- Offset += MF.getFrameInfo()->getStackSize() + SPAdj;
- }
-
- bool isStore = false;
-
- switch (MI.getOpcode()) {
- case BF::STORE32fi:
- isStore = true;
- case BF::LOAD32fi: {
- assert(Offset%4 == 0 && "Unaligned i32 stack access");
- assert(FIPos==1 && "Bad frame index operand");
- MI.getOperand(FIPos).ChangeToRegister(BaseReg, false);
- MI.getOperand(FIPos+1).setImm(Offset);
- if (isUInt<6>(Offset)) {
- MI.setDesc(TII.get(isStore
- ? BF::STORE32p_uimm6m4
- : BF::LOAD32p_uimm6m4));
- return;
- }
- if (BaseReg == BF::FP && isUInt<7>(-Offset)) {
- MI.setDesc(TII.get(isStore
- ? BF::STORE32fp_nimm7m4
- : BF::LOAD32fp_nimm7m4));
- MI.getOperand(FIPos+1).setImm(-Offset);
- return;
- }
- if (isInt<18>(Offset)) {
- MI.setDesc(TII.get(isStore
- ? BF::STORE32p_imm18m4
- : BF::LOAD32p_imm18m4));
- return;
- }
- // Use RegScavenger to calculate proper offset...
- MI.dump();
- llvm_unreachable("Stack frame offset too big");
- break;
- }
- case BF::ADDpp: {
- assert(MI.getOperand(0).isReg() && "ADD instruction needs a register");
- unsigned DestReg = MI.getOperand(0).getReg();
- // We need to produce a stack offset in a P register. We emit:
- // P0 = offset;
- // P0 = BR + P0;
- assert(FIPos==1 && "Bad frame index operand");
- loadConstant(MBB, II, DL, DestReg, Offset);
- MI.getOperand(1).ChangeToRegister(DestReg, false, false, true);
- MI.getOperand(2).ChangeToRegister(BaseReg, false);
- break;
- }
- case BF::STORE16fi:
- isStore = true;
- case BF::LOAD16fi: {
- assert(Offset%2 == 0 && "Unaligned i16 stack access");
- assert(FIPos==1 && "Bad frame index operand");
- // We need a P register to use as an address
- unsigned ScratchReg = findScratchRegister(II, RS, &BF::PRegClass, SPAdj);
- assert(ScratchReg && "Could not scavenge register");
- loadConstant(MBB, II, DL, ScratchReg, Offset);
- BuildMI(MBB, II, DL, TII.get(BF::ADDpp), ScratchReg)
- .addReg(ScratchReg, RegState::Kill)
- .addReg(BaseReg);
- MI.setDesc(TII.get(isStore ? BF::STORE16pi : BF::LOAD16pi));
- MI.getOperand(1).ChangeToRegister(ScratchReg, false, false, true);
- MI.RemoveOperand(2);
- break;
- }
- case BF::STORE8fi: {
- // This is an AnyCC spill, we need a scratch register.
- assert(FIPos==1 && "Bad frame index operand");
- MachineOperand SpillReg = MI.getOperand(0);
- unsigned ScratchReg = findScratchRegister(II, RS, &BF::DRegClass, SPAdj);
- assert(ScratchReg && "Could not scavenge register");
- if (SpillReg.getReg()==BF::NCC) {
- BuildMI(MBB, II, DL, TII.get(BF::MOVENCC_z), ScratchReg)
- .addOperand(SpillReg);
- BuildMI(MBB, II, DL, TII.get(BF::BITTGL), ScratchReg)
- .addReg(ScratchReg).addImm(0);
- } else {
- BuildMI(MBB, II, DL, TII.get(BF::MOVECC_zext), ScratchReg)
- .addOperand(SpillReg);
- }
- // STORE D
- MI.setDesc(TII.get(BF::STORE8p_imm16));
- MI.getOperand(0).ChangeToRegister(ScratchReg, false, false, true);
- MI.getOperand(FIPos).ChangeToRegister(BaseReg, false);
- MI.getOperand(FIPos+1).setImm(Offset);
- break;
- }
- case BF::LOAD8fi: {
- // This is an restore, we need a scratch register.
- assert(FIPos==1 && "Bad frame index operand");
- MachineOperand SpillReg = MI.getOperand(0);
- unsigned ScratchReg = findScratchRegister(II, RS, &BF::DRegClass, SPAdj);
- assert(ScratchReg && "Could not scavenge register");
- MI.setDesc(TII.get(BF::LOAD32p_imm16_8z));
- MI.getOperand(0).ChangeToRegister(ScratchReg, true);
- MI.getOperand(FIPos).ChangeToRegister(BaseReg, false);
- MI.getOperand(FIPos+1).setImm(Offset);
- ++II;
- if (SpillReg.getReg()==BF::CC) {
- // CC = D
- BuildMI(MBB, II, DL, TII.get(BF::MOVECC_nz), BF::CC)
- .addReg(ScratchReg, RegState::Kill);
- } else {
- // Restore NCC (CC = D==0)
- BuildMI(MBB, II, DL, TII.get(BF::SETEQri_not), BF::NCC)
- .addReg(ScratchReg, RegState::Kill)
- .addImm(0);
- }
- break;
- }
- default:
- llvm_unreachable("Cannot eliminate frame index");
- break;
- }
-}
-
-unsigned
-BlackfinRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- return TFI->hasFP(MF) ? BF::FP : BF::SP;
-}
-
-unsigned BlackfinRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
- return 0;
-}
-
-unsigned BlackfinRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
- return 0;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
deleted file mode 100644
index 6ac22af..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===- BlackfinRegisterInfo.h - Blackfin Register Information ..-*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Blackfin implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFINREGISTERINFO_H
-#define BLACKFINREGISTERINFO_H
-
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "BlackfinGenRegisterInfo.inc"
-
-namespace llvm {
-
- class BlackfinSubtarget;
- class TargetInstrInfo;
- class Type;
-
- struct BlackfinRegisterInfo : public BlackfinGenRegisterInfo {
- BlackfinSubtarget &Subtarget;
- const TargetInstrInfo &TII;
-
- BlackfinRegisterInfo(BlackfinSubtarget &st, const TargetInstrInfo &tii);
-
- /// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
-
- // getSubReg implemented by tablegen
-
- const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const {
- return &BF::PRegClass;
- }
-
- bool requiresRegisterScavenging(const MachineFunction &MF) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS = NULL) const;
-
- unsigned getFrameRegister(const MachineFunction &MF) const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
- // Utility functions
- void adjustRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- DebugLoc DL,
- unsigned Reg,
- unsigned ScratchReg,
- int delta) const;
- void loadConstant(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- DebugLoc DL,
- unsigned Reg,
- int value) const;
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td
deleted file mode 100644
index 1c42205..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.td
+++ /dev/null
@@ -1,277 +0,0 @@
-//===- BlackfinRegisterInfo.td - Blackfin Register defs ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Declarations that describe the Blackfin register file
-//===----------------------------------------------------------------------===//
-
-// Subregs are:
-// 1: .L
-// 2: .H
-// 3: .W (32 low bits of 40-bit accu)
-let Namespace = "BF" in {
-def lo16 : SubRegIndex;
-def hi16 : SubRegIndex;
-def lo32 : SubRegIndex;
-def hi32 : SubRegIndex;
-}
-
-// Registers are identified with 3-bit group and 3-bit ID numbers.
-class BlackfinReg<string n> : Register<n> {
- field bits<3> Group;
- field bits<3> Num;
- let Namespace = "BF";
-}
-
-// Rc - 1-bit registers
-class Rc<bits<5> bitno, string n> : BlackfinReg<n> {
- field bits<5> BitNum = bitno;
-}
-
-// Rs - 16-bit integer registers
-class Rs<bits<3> group, bits<3> num, bits<1> hi, string n> : BlackfinReg<n> {
- let Group = group;
- let Num = num;
- field bits<1> High = hi;
-}
-
-// Ri - 32-bit integer registers with subregs
-class Ri<bits<3> group, bits<3> num, string n> : BlackfinReg<n> {
- let Group = group;
- let Num = num;
-}
-
-// Ra 40-bit accumulator registers
-class Ra<bits<3> num, string n, list<Register> subs> : BlackfinReg<n> {
- let SubRegs = subs;
- let SubRegIndices = [hi32, lo32];
- let Group = 4;
- let Num = num;
-}
-
-// Two halves of 32-bit register
-multiclass Rss<bits<3> group, bits<3> num, string n> {
- def H : Rs<group, num, 1, !strconcat(n, ".h")>;
- def L : Rs<group, num, 0, !strconcat(n, ".l")>;
-}
-
-// Rii - 32-bit integer registers with subregs
-class Rii<bits<3> group, bits<3> num, string n, list<Register> subs>
- : BlackfinReg<n> {
- let SubRegs = subs;
- let SubRegIndices = [hi16, lo16];
- let Group = group;
- let Num = num;
-}
-
-// Status bits are all part of ASTAT
-def AZ : Rc<0, "az">;
-def AN : Rc<1, "an">;
-def CC : Rc<5, "cc">, DwarfRegNum<[34]>;
-def NCC : Rc<5, "!cc"> { let Aliases = [CC]; }
-def AQ : Rc<6, "aq">;
-def AC0 : Rc<12, "ac0">;
-def AC1 : Rc<13, "ac1">;
-def AV0 : Rc<16, "av0">;
-def AV0S : Rc<17, "av0s">;
-def AV1 : Rc<18, "av1">;
-def AV1S : Rc<19, "av1s">;
-def V : Rc<24, "v">;
-def VS : Rc<25, "vs">;
-// Skipped non-status bits: AC0_COPY, V_COPY, RND_MOD
-
-// Group 0: Integer registers
-defm R0 : Rss<0, 0, "r0">;
-def R0 : Rii<0, 0, "r0", [R0H, R0L]>, DwarfRegNum<[0]>;
-defm R1 : Rss<0, 1, "r1">;
-def R1 : Rii<0, 1, "r1", [R1H, R1L]>, DwarfRegNum<[1]>;
-defm R2 : Rss<0, 2, "r2">;
-def R2 : Rii<0, 2, "r2", [R2H, R2L]>, DwarfRegNum<[2]>;
-defm R3 : Rss<0, 3, "r3">;
-def R3 : Rii<0, 3, "r3", [R3H, R3L]>, DwarfRegNum<[3]>;
-defm R4 : Rss<0, 4, "r4">;
-def R4 : Rii<0, 4, "r4", [R4H, R4L]>, DwarfRegNum<[4]>;
-defm R5 : Rss<0, 5, "r5">;
-def R5 : Rii<0, 5, "r5", [R5H, R5L]>, DwarfRegNum<[5]>;
-defm R6 : Rss<0, 6, "r6">;
-def R6 : Rii<0, 6, "r6", [R6H, R6L]>, DwarfRegNum<[6]>;
-defm R7 : Rss<0, 7, "r7">;
-def R7 : Rii<0, 7, "r7", [R7H, R7L]>, DwarfRegNum<[7]>;
-
-// Group 1: Pointer registers
-defm P0 : Rss<1, 0, "p0">;
-def P0 : Rii<1, 0, "p0", [P0H, P0L]>, DwarfRegNum<[8]>;
-defm P1 : Rss<1, 1, "p1">;
-def P1 : Rii<1, 1, "p1", [P1H, P1L]>, DwarfRegNum<[9]>;
-defm P2 : Rss<1, 2, "p2">;
-def P2 : Rii<1, 2, "p2", [P2H, P2L]>, DwarfRegNum<[10]>;
-defm P3 : Rss<1, 3, "p3">;
-def P3 : Rii<1, 3, "p3", [P3H, P3L]>, DwarfRegNum<[11]>;
-defm P4 : Rss<1, 4, "p4">;
-def P4 : Rii<1, 4, "p4", [P4H, P4L]>, DwarfRegNum<[12]>;
-defm P5 : Rss<1, 5, "p5">;
-def P5 : Rii<1, 5, "p5", [P5H, P5L]>, DwarfRegNum<[13]>;
-defm SP : Rss<1, 6, "sp">;
-def SP : Rii<1, 6, "sp", [SPH, SPL]>, DwarfRegNum<[14]>;
-defm FP : Rss<1, 7, "fp">;
-def FP : Rii<1, 7, "fp", [FPH, FPL]>, DwarfRegNum<[15]>;
-
-// Group 2: Index registers
-defm I0 : Rss<2, 0, "i0">;
-def I0 : Rii<2, 0, "i0", [I0H, I0L]>, DwarfRegNum<[16]>;
-defm I1 : Rss<2, 1, "i1">;
-def I1 : Rii<2, 1, "i1", [I1H, I1L]>, DwarfRegNum<[17]>;
-defm I2 : Rss<2, 2, "i2">;
-def I2 : Rii<2, 2, "i2", [I2H, I2L]>, DwarfRegNum<[18]>;
-defm I3 : Rss<2, 3, "i3">;
-def I3 : Rii<2, 3, "i3", [I3H, I3L]>, DwarfRegNum<[19]>;
-defm M0 : Rss<2, 4, "m0">;
-def M0 : Rii<2, 4, "m0", [M0H, M0L]>, DwarfRegNum<[20]>;
-defm M1 : Rss<2, 5, "m1">;
-def M1 : Rii<2, 5, "m1", [M1H, M1L]>, DwarfRegNum<[21]>;
-defm M2 : Rss<2, 6, "m2">;
-def M2 : Rii<2, 6, "m2", [M2H, M2L]>, DwarfRegNum<[22]>;
-defm M3 : Rss<2, 7, "m3">;
-def M3 : Rii<2, 7, "m3", [M3H, M3L]>, DwarfRegNum<[23]>;
-
-// Group 3: Cyclic indexing registers
-defm B0 : Rss<3, 0, "b0">;
-def B0 : Rii<3, 0, "b0", [B0H, B0L]>, DwarfRegNum<[24]>;
-defm B1 : Rss<3, 1, "b1">;
-def B1 : Rii<3, 1, "b1", [B1H, B1L]>, DwarfRegNum<[25]>;
-defm B2 : Rss<3, 2, "b2">;
-def B2 : Rii<3, 2, "b2", [B2H, B2L]>, DwarfRegNum<[26]>;
-defm B3 : Rss<3, 3, "b3">;
-def B3 : Rii<3, 3, "b3", [B3H, B3L]>, DwarfRegNum<[27]>;
-defm L0 : Rss<3, 4, "l0">;
-def L0 : Rii<3, 4, "l0", [L0H, L0L]>, DwarfRegNum<[28]>;
-defm L1 : Rss<3, 5, "l1">;
-def L1 : Rii<3, 5, "l1", [L1H, L1L]>, DwarfRegNum<[29]>;
-defm L2 : Rss<3, 6, "l2">;
-def L2 : Rii<3, 6, "l2", [L2H, L2L]>, DwarfRegNum<[30]>;
-defm L3 : Rss<3, 7, "l3">;
-def L3 : Rii<3, 7, "l3", [L3H, L3L]>, DwarfRegNum<[31]>;
-
-// Accumulators
-def A0X : Ri <4, 0, "a0.x">;
-defm A0 : Rss<4, 1, "a0">;
-def A0W : Rii<4, 1, "a0.w", [A0H, A0L]>, DwarfRegNum<[32]>;
-def A0 : Ra <0, "a0", [A0X, A0W]>;
-
-def A1X : Ri <4, 2, "a1.x">;
-defm A1 : Rss<4, 3, "a1">;
-def A1W : Rii<4, 3, "a1.w", [A1H, A1L]>, DwarfRegNum<[33]>;
-def A1 : Ra <2, "a1", [A1X, A1W]>;
-
-def RETS : Ri<4, 7, "rets">, DwarfRegNum<[35]>;
-def RETI : Ri<7, 3, "reti">, DwarfRegNum<[36]>;
-def RETX : Ri<7, 4, "retx">, DwarfRegNum<[37]>;
-def RETN : Ri<7, 5, "retn">, DwarfRegNum<[38]>;
-def RETE : Ri<7, 6, "rete">, DwarfRegNum<[39]>;
-
-def ASTAT : Ri<4, 6, "astat">, DwarfRegNum<[40]> {
- let Aliases = [AZ, AN, CC, NCC, AQ, AC0, AC1, AV0, AV0S, AV1, AV1S, V, VS];
-}
-
-def SEQSTAT : Ri<7, 1, "seqstat">, DwarfRegNum<[41]>;
-def USP : Ri<7, 0, "usp">, DwarfRegNum<[42]>;
-def EMUDAT : Ri<7, 7, "emudat">, DwarfRegNum<[43]>;
-def SYSCFG : Ri<7, 2, "syscfg">;
-def CYCLES : Ri<6, 6, "cycles">;
-def CYCLES2 : Ri<6, 7, "cycles2">;
-
-// Hardware loops
-def LT0 : Ri<6, 1, "lt0">, DwarfRegNum<[44]>;
-def LT1 : Ri<6, 4, "lt1">, DwarfRegNum<[45]>;
-def LC0 : Ri<6, 0, "lc0">, DwarfRegNum<[46]>;
-def LC1 : Ri<6, 3, "lc1">, DwarfRegNum<[47]>;
-def LB0 : Ri<6, 2, "lb0">, DwarfRegNum<[48]>;
-def LB1 : Ri<6, 5, "lb1">, DwarfRegNum<[49]>;
-
-// Register classes.
-def D16L : RegisterClass<"BF", [i16], 16, (sequence "R%uL", 0, 7)>;
-
-def D16H : RegisterClass<"BF", [i16], 16, (sequence "R%uH", 0, 7)>;
-
-def D16 : RegisterClass<"BF", [i16], 16, (add D16L, D16H)>;
-
-def P16L : RegisterClass<"BF", [i16], 16,
- (add (sequence "P%uL", 0, 5), SPL, FPL)>;
-
-def P16H : RegisterClass<"BF", [i16], 16,
- (add (sequence "P%uH", 0, 5), SPH, FPH)>;
-
-def P16 : RegisterClass<"BF", [i16], 16, (add P16L, P16H)>;
-
-def DP16 : RegisterClass<"BF", [i16], 16, (add D16, P16)>;
-
-def DP16L : RegisterClass<"BF", [i16], 16, (add D16L, P16L)>;
-
-def DP16H : RegisterClass<"BF", [i16], 16, (add D16H, P16H)>;
-
-def GR16 : RegisterClass<"BF", [i16], 16,
- (add DP16,
- I0H, I0L, I1H, I1L, I2H, I2L, I3H, I3L,
- M0H, M0L, M1H, M1L, M2H, M2L, M3H, M3L,
- B0H, B0L, B1H, B1L, B2H, B2L, B3H, B3L,
- L0H, L0L, L1H, L1L, L2H, L2L, L3H, L3L)>;
-
-def D : RegisterClass<"BF", [i32], 32, (sequence "R%u", 0, 7)> {
- let SubRegClasses = [(D16L lo16), (D16H hi16)];
-}
-
-def P : RegisterClass<"BF", [i32], 32, (add (sequence "P%u", 0, 5), FP, SP)> {
- let SubRegClasses = [(P16L lo16), (P16H hi16)];
-}
-
-def DP : RegisterClass<"BF", [i32], 32, (add D, P)> {
- let SubRegClasses = [(DP16L lo16), (DP16H hi16)];
-}
-
-def I : RegisterClass<"BF", [i32], 32, (add I0, I1, I2, I3)>;
-def M : RegisterClass<"BF", [i32], 32, (add M0, M1, M2, M3)>;
-def B : RegisterClass<"BF", [i32], 32, (add B0, B1, B2, B3)>;
-def L : RegisterClass<"BF", [i32], 32, (add L0, L1, L2, L3)>;
-
-def GR : RegisterClass<"BF", [i32], 32, (add DP, I, M, B, L)>;
-
-def ALL : RegisterClass<"BF", [i32], 32,
- (add GR,
- A0X, A0W, A1X, A1W, ASTAT, RETS,
- LC0, LT0, LB0, LC1, LT1, LB1, CYCLES, CYCLES2,
- USP, SEQSTAT, SYSCFG, RETI, RETX, RETN, RETE, EMUDAT)>;
-
-def PI : RegisterClass<"BF", [i32], 32, (add P, I)>;
-
-// We are going to pretend that CC and !CC are 32-bit registers, even though
-// they only can hold 1 bit.
-let CopyCost = -1, Size = 8 in {
-def JustCC : RegisterClass<"BF", [i32], 8, (add CC)>;
-def NotCC : RegisterClass<"BF", [i32], 8, (add NCC)>;
-def AnyCC : RegisterClass<"BF", [i32], 8, (add CC, NCC)>;
-def StatBit : RegisterClass<"BF", [i1], 8,
- (add AZ, AN, CC, AQ, AC0, AC1, AV0, AV0S, AV1, AV1S, V, VS)>;
-}
-
-// Should be i40, but that isn't defined. It is not a legal type yet anyway.
-def Accu : RegisterClass<"BF", [i64], 64, (add A0, A1)>;
-
-// Register classes to match inline asm constraints.
-def zCons : RegisterClass<"BF", [i32], 32, (add P0, P1, P2)>;
-def DCons : RegisterClass<"BF", [i32], 32, (add R0, R2, R4, R6)>;
-def WCons : RegisterClass<"BF", [i32], 32, (add R1, R3, R5, R7)>;
-def cCons : RegisterClass<"BF", [i32], 32, (add I0, I1, I2, I3,
- B0, B1, B2, B3,
- L0, L1, L2, L3)>;
-def tCons : RegisterClass<"BF", [i32], 32, (add LT0, LT1)>;
-def uCons : RegisterClass<"BF", [i32], 32, (add LB0, LB1)>;
-def kCons : RegisterClass<"BF", [i32], 32, (add LC0, LC1)>;
-def yCons : RegisterClass<"BF", [i32], 32, (add RETS, RETN, RETI, RETX,
- RETE, ASTAT, SEQSTAT,
- USP)>;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.cpp
deleted file mode 100644
index a21f696..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- BlackfinSelectionDAGInfo.cpp - Blackfin SelectionDAG Info ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the BlackfinSelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "blackfin-selectiondag-info"
-#include "BlackfinTargetMachine.h"
-using namespace llvm;
-
-BlackfinSelectionDAGInfo::BlackfinSelectionDAGInfo(
- const BlackfinTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
-
-BlackfinSelectionDAGInfo::~BlackfinSelectionDAGInfo() {
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.h
deleted file mode 100644
index f1ce348..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinSelectionDAGInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- BlackfinSelectionDAGInfo.h - Blackfin SelectionDAG Info -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the Blackfin subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFINSELECTIONDAGINFO_H
-#define BLACKFINSELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class BlackfinTargetMachine;
-
-class BlackfinSelectionDAGInfo : public TargetSelectionDAGInfo {
-public:
- explicit BlackfinSelectionDAGInfo(const BlackfinTargetMachine &TM);
- ~BlackfinSelectionDAGInfo();
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.cpp
deleted file mode 100644
index 0bdce09..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===- BlackfinSubtarget.cpp - BLACKFIN Subtarget Information -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the blackfin specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinSubtarget.h"
-#include "Blackfin.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "BlackfinGenSubtargetInfo.inc"
-
-using namespace llvm;
-
-BlackfinSubtarget::BlackfinSubtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS)
- : BlackfinGenSubtargetInfo(TT, CPU, FS), sdram(false),
- icplb(false),
- wa_mi_shift(false),
- wa_csync(false),
- wa_specld(false),
- wa_mmr_stall(false),
- wa_lcregs(false),
- wa_hwloop(false),
- wa_ind_call(false),
- wa_killed_mmr(false),
- wa_rets(false)
-{
- std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "generic";
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.h b/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.h
deleted file mode 100644
index 1a01a81..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinSubtarget.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===- BlackfinSubtarget.h - Define Subtarget for the Blackfin -*- C++ -*-====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the BLACKFIN specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFIN_SUBTARGET_H
-#define BLACKFIN_SUBTARGET_H
-
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <string>
-
-#define GET_SUBTARGETINFO_HEADER
-#include "BlackfinGenSubtargetInfo.inc"
-
-namespace llvm {
-class StringRef;
-
- class BlackfinSubtarget : public BlackfinGenSubtargetInfo {
- bool sdram;
- bool icplb;
- bool wa_mi_shift;
- bool wa_csync;
- bool wa_specld;
- bool wa_mmr_stall;
- bool wa_lcregs;
- bool wa_hwloop;
- bool wa_ind_call;
- bool wa_killed_mmr;
- bool wa_rets;
- public:
- BlackfinSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
-
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp
deleted file mode 100644
index a4ae46b..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===-- BlackfinTargetMachine.cpp - Define TargetMachine for Blackfin -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinTargetMachine.h"
-#include "Blackfin.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-extern "C" void LLVMInitializeBlackfinTarget() {
- RegisterTargetMachine<BlackfinTargetMachine> X(TheBlackfinTarget);
-}
-
-BlackfinTargetMachine::BlackfinTargetMachine(const Target &T,
- StringRef TT,
- StringRef CPU,
- StringRef FS,
- Reloc::Model RM,
- CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- DataLayout("e-p:32:32-i64:32-f64:32-n32"),
- Subtarget(TT, CPU, FS),
- TLInfo(*this),
- TSInfo(*this),
- InstrInfo(Subtarget),
- FrameLowering(Subtarget) {
-}
-
-bool BlackfinTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createBlackfinISelDag(*this, OptLevel));
- return false;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h b/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h
deleted file mode 100644
index c85337fe2..0000000
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinTargetMachine.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//===-- BlackfinTargetMachine.h - TargetMachine for Blackfin ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the Blackfin specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFINTARGETMACHINE_H
-#define BLACKFINTARGETMACHINE_H
-
-#include "BlackfinInstrInfo.h"
-#include "BlackfinIntrinsicInfo.h"
-#include "BlackfinISelLowering.h"
-#include "BlackfinFrameLowering.h"
-#include "BlackfinSubtarget.h"
-#include "BlackfinSelectionDAGInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
-
- class BlackfinTargetMachine : public LLVMTargetMachine {
- const TargetData DataLayout;
- BlackfinSubtarget Subtarget;
- BlackfinTargetLowering TLInfo;
- BlackfinSelectionDAGInfo TSInfo;
- BlackfinInstrInfo InstrInfo;
- BlackfinFrameLowering FrameLowering;
- BlackfinIntrinsicInfo IntrinsicInfo;
- public:
- BlackfinTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
-
- virtual const BlackfinInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const BlackfinSubtarget *getSubtargetImpl() const {
- return &Subtarget;
- }
- virtual const BlackfinRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
- virtual const BlackfinTargetLowering* getTargetLowering() const {
- return &TLInfo;
- }
- virtual const BlackfinSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
- virtual const TargetData *getTargetData() const { return &DataLayout; }
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- const TargetIntrinsicInfo *getIntrinsicInfo() const {
- return &IntrinsicInfo;
- }
- };
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.cpp b/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.cpp
deleted file mode 100644
index 5b9d4a2..0000000
--- a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//===-- BlackfinMCAsmInfo.cpp - Blackfin asm properties -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the BlackfinMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinMCAsmInfo.h"
-
-using namespace llvm;
-
-BlackfinMCAsmInfo::BlackfinMCAsmInfo(const Target &T, StringRef TT) {
- GlobalPrefix = "_";
- CommentString = "//";
- HasSetDirective = false;
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.h b/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.h
deleted file mode 100644
index c372aa2..0000000
--- a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCAsmInfo.h
+++ /dev/null
@@ -1,29 +0,0 @@
-//===-- BlackfinMCAsmInfo.h - Blackfin asm properties ---------*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the BlackfinMCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFINTARGETASMINFO_H
-#define BLACKFINTARGETASMINFO_H
-
-#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/MCAsmInfo.h"
-
-namespace llvm {
- class Target;
-
- struct BlackfinMCAsmInfo : public MCAsmInfo {
- explicit BlackfinMCAsmInfo(const Target &T, StringRef TT);
- };
-
-} // namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.cpp b/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.cpp
deleted file mode 100644
index 272e3c2..0000000
--- a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- BlackfinMCTargetDesc.cpp - Blackfin Target Descriptions -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Blackfin specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "BlackfinMCTargetDesc.h"
-#include "BlackfinMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_MC_DESC
-#include "BlackfinGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "BlackfinGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "BlackfinGenRegisterInfo.inc"
-
-using namespace llvm;
-
-
-static MCInstrInfo *createBlackfinMCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitBlackfinMCInstrInfo(X);
- return X;
-}
-
-static MCRegisterInfo *createBlackfinMCRegisterInfo(StringRef TT) {
- MCRegisterInfo *X = new MCRegisterInfo();
- InitBlackfinMCRegisterInfo(X, BF::RETS);
- return X;
-}
-
-static MCSubtargetInfo *createBlackfinMCSubtargetInfo(StringRef TT,
- StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitBlackfinMCSubtargetInfo(X, TT, CPU, FS);
- return X;
-}
-
-static MCCodeGenInfo *createBlackfinMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM);
- return X;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeBlackfinTargetMC() {
- // Register the MC asm info.
- RegisterMCAsmInfo<BlackfinMCAsmInfo> X(TheBlackfinTarget);
-
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheBlackfinTarget,
- createBlackfinMCCodeGenInfo);
-
- // Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheBlackfinTarget,
- createBlackfinMCInstrInfo);
-
- // Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheBlackfinTarget,
- createBlackfinMCRegisterInfo);
-
- // Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(TheBlackfinTarget,
- createBlackfinMCSubtargetInfo);
-}
diff --git a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.h b/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.h
deleted file mode 100644
index 5bffe94..0000000
--- a/contrib/llvm/lib/Target/Blackfin/MCTargetDesc/BlackfinMCTargetDesc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- BlackfinMCTargetDesc.h - Blackfin Target Descriptions ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides Blackfin specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BLACKFINMCTARGETDESC_H
-#define BLACKFINMCTARGETDESC_H
-
-namespace llvm {
-class MCSubtargetInfo;
-class Target;
-class StringRef;
-
-extern Target TheBlackfinTarget;
-
-} // End llvm namespace
-
-// Defines symbolic names for Blackfin registers. This defines a mapping from
-// register name to register number.
-#define GET_REGINFO_ENUM
-#include "BlackfinGenRegisterInfo.inc"
-
-// Defines symbolic names for the Blackfin instructions.
-#define GET_INSTRINFO_ENUM
-#include "BlackfinGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "BlackfinGenSubtargetInfo.inc"
-
-#endif
diff --git a/contrib/llvm/lib/Target/Blackfin/TargetInfo/BlackfinTargetInfo.cpp b/contrib/llvm/lib/Target/Blackfin/TargetInfo/BlackfinTargetInfo.cpp
deleted file mode 100644
index 57f1d3e..0000000
--- a/contrib/llvm/lib/Target/Blackfin/TargetInfo/BlackfinTargetInfo.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//===-- BlackfinTargetInfo.cpp - Blackfin Target Implementation -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Blackfin.h"
-#include "llvm/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-Target llvm::TheBlackfinTarget;
-
-extern "C" void LLVMInitializeBlackfinTargetInfo() {
- RegisterTarget<Triple::bfin> X(TheBlackfinTarget, "bfin",
- "Analog Devices Blackfin [experimental]");
-}
diff --git a/contrib/llvm/lib/Target/CBackend/CBackend.cpp b/contrib/llvm/lib/Target/CBackend/CBackend.cpp
deleted file mode 100644
index 69d8c46..0000000
--- a/contrib/llvm/lib/Target/CBackend/CBackend.cpp
+++ /dev/null
@@ -1,3617 +0,0 @@
-//===-- CBackend.cpp - Library for converting LLVM code to C --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This library converts LLVM code to C code, compilable by GCC and other C
-// compilers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CTargetMachine.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Instructions.h"
-#include "llvm/Pass.h"
-#include "llvm/PassManager.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Analysis/ConstantsScanner.h"
-#include "llvm/Analysis/FindUsedTypes.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCObjectFileInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/InstVisitor.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/Host.h"
-#include "llvm/Config/config.h"
-#include <algorithm>
-// Some ms header decided to define setjmp as _setjmp, undo this for this file.
-#ifdef _MSC_VER
-#undef setjmp
-#endif
-using namespace llvm;
-
-extern "C" void LLVMInitializeCBackendTarget() {
- // Register the target.
- RegisterTargetMachine<CTargetMachine> X(TheCBackendTarget);
-}
-
-namespace {
- class CBEMCAsmInfo : public MCAsmInfo {
- public:
- CBEMCAsmInfo() {
- GlobalPrefix = "";
- PrivateGlobalPrefix = "";
- }
- };
-
- /// CWriter - This class is the main chunk of code that converts an LLVM
- /// module to a C translation unit.
- class CWriter : public FunctionPass, public InstVisitor<CWriter> {
- formatted_raw_ostream &Out;
- IntrinsicLowering *IL;
- Mangler *Mang;
- LoopInfo *LI;
- const Module *TheModule;
- const MCAsmInfo* TAsm;
- const MCRegisterInfo *MRI;
- const MCObjectFileInfo *MOFI;
- MCContext *TCtx;
- const TargetData* TD;
-
- std::map<const ConstantFP *, unsigned> FPConstantMap;
- std::set<Function*> intrinsicPrototypesAlreadyGenerated;
- std::set<const Argument*> ByValParams;
- unsigned FPCounter;
- unsigned OpaqueCounter;
- DenseMap<const Value*, unsigned> AnonValueNumbers;
- unsigned NextAnonValueNumber;
-
- /// UnnamedStructIDs - This contains a unique ID for each struct that is
- /// either anonymous or has no name.
- DenseMap<StructType*, unsigned> UnnamedStructIDs;
-
- public:
- static char ID;
- explicit CWriter(formatted_raw_ostream &o)
- : FunctionPass(ID), Out(o), IL(0), Mang(0), LI(0),
- TheModule(0), TAsm(0), MRI(0), MOFI(0), TCtx(0), TD(0),
- OpaqueCounter(0), NextAnonValueNumber(0) {
- initializeLoopInfoPass(*PassRegistry::getPassRegistry());
- FPCounter = 0;
- }
-
- virtual const char *getPassName() const { return "C backend"; }
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<LoopInfo>();
- AU.setPreservesAll();
- }
-
- virtual bool doInitialization(Module &M);
-
- bool runOnFunction(Function &F) {
- // Do not codegen any 'available_externally' functions at all, they have
- // definitions outside the translation unit.
- if (F.hasAvailableExternallyLinkage())
- return false;
-
- LI = &getAnalysis<LoopInfo>();
-
- // Get rid of intrinsics we can't handle.
- lowerIntrinsics(F);
-
- // Output all floating point constants that cannot be printed accurately.
- printFloatingPointConstants(F);
-
- printFunction(F);
- return false;
- }
-
- virtual bool doFinalization(Module &M) {
- // Free memory...
- delete IL;
- delete TD;
- delete Mang;
- delete TCtx;
- delete TAsm;
- delete MRI;
- delete MOFI;
- FPConstantMap.clear();
- ByValParams.clear();
- intrinsicPrototypesAlreadyGenerated.clear();
- UnnamedStructIDs.clear();
- return false;
- }
-
- raw_ostream &printType(raw_ostream &Out, Type *Ty,
- bool isSigned = false,
- const std::string &VariableName = "",
- bool IgnoreName = false,
- const AttrListPtr &PAL = AttrListPtr());
- raw_ostream &printSimpleType(raw_ostream &Out, Type *Ty,
- bool isSigned,
- const std::string &NameSoFar = "");
-
- void printStructReturnPointerFunctionType(raw_ostream &Out,
- const AttrListPtr &PAL,
- PointerType *Ty);
-
- std::string getStructName(StructType *ST);
-
- /// writeOperandDeref - Print the result of dereferencing the specified
- /// operand with '*'. This is equivalent to printing '*' then using
- /// writeOperand, but avoids excess syntax in some cases.
- void writeOperandDeref(Value *Operand) {
- if (isAddressExposed(Operand)) {
- // Already something with an address exposed.
- writeOperandInternal(Operand);
- } else {
- Out << "*(";
- writeOperand(Operand);
- Out << ")";
- }
- }
-
- void writeOperand(Value *Operand, bool Static = false);
- void writeInstComputationInline(Instruction &I);
- void writeOperandInternal(Value *Operand, bool Static = false);
- void writeOperandWithCast(Value* Operand, unsigned Opcode);
- void writeOperandWithCast(Value* Operand, const ICmpInst &I);
- bool writeInstructionCast(const Instruction &I);
-
- void writeMemoryAccess(Value *Operand, Type *OperandType,
- bool IsVolatile, unsigned Alignment);
-
- private :
- std::string InterpretASMConstraint(InlineAsm::ConstraintInfo& c);
-
- void lowerIntrinsics(Function &F);
- /// Prints the definition of the intrinsic function F. Supports the
- /// intrinsics which need to be explicitly defined in the CBackend.
- void printIntrinsicDefinition(const Function &F, raw_ostream &Out);
-
- void printModuleTypes();
- void printContainedStructs(Type *Ty, SmallPtrSet<Type *, 16> &);
- void printFloatingPointConstants(Function &F);
- void printFloatingPointConstants(const Constant *C);
- void printFunctionSignature(const Function *F, bool Prototype);
-
- void printFunction(Function &);
- void printBasicBlock(BasicBlock *BB);
- void printLoop(Loop *L);
-
- void printCast(unsigned opcode, Type *SrcTy, Type *DstTy);
- void printConstant(Constant *CPV, bool Static);
- void printConstantWithCast(Constant *CPV, unsigned Opcode);
- bool printConstExprCast(const ConstantExpr *CE, bool Static);
- void printConstantArray(ConstantArray *CPA, bool Static);
- void printConstantVector(ConstantVector *CV, bool Static);
-
- /// isAddressExposed - Return true if the specified value's name needs to
- /// have its address taken in order to get a C value of the correct type.
- /// This happens for global variables, byval parameters, and direct allocas.
- bool isAddressExposed(const Value *V) const {
- if (const Argument *A = dyn_cast<Argument>(V))
- return ByValParams.count(A);
- return isa<GlobalVariable>(V) || isDirectAlloca(V);
- }
-
- // isInlinableInst - Attempt to inline instructions into their uses to build
- // trees as much as possible. To do this, we have to consistently decide
- // what is acceptable to inline, so that variable declarations don't get
- // printed and an extra copy of the expr is not emitted.
- //
- static bool isInlinableInst(const Instruction &I) {
- // Always inline cmp instructions, even if they are shared by multiple
- // expressions. GCC generates horrible code if we don't.
- if (isa<CmpInst>(I))
- return true;
-
- // Must be an expression, must be used exactly once. If it is dead, we
- // emit it inline where it would go.
- if (I.getType() == Type::getVoidTy(I.getContext()) || !I.hasOneUse() ||
- isa<TerminatorInst>(I) || isa<CallInst>(I) || isa<PHINode>(I) ||
- isa<LoadInst>(I) || isa<VAArgInst>(I) || isa<InsertElementInst>(I) ||
- isa<InsertValueInst>(I))
- // Don't inline a load across a store or other bad things!
- return false;
-
- // Must not be used in inline asm, extractelement, or shufflevector.
- if (I.hasOneUse()) {
- const Instruction &User = cast<Instruction>(*I.use_back());
- if (isInlineAsm(User) || isa<ExtractElementInst>(User) ||
- isa<ShuffleVectorInst>(User))
- return false;
- }
-
- // Only inline instruction it if it's use is in the same BB as the inst.
- return I.getParent() == cast<Instruction>(I.use_back())->getParent();
- }
-
- // isDirectAlloca - Define fixed sized allocas in the entry block as direct
- // variables which are accessed with the & operator. This causes GCC to
- // generate significantly better code than to emit alloca calls directly.
- //
- static const AllocaInst *isDirectAlloca(const Value *V) {
- const AllocaInst *AI = dyn_cast<AllocaInst>(V);
- if (!AI) return 0;
- if (AI->isArrayAllocation())
- return 0; // FIXME: we can also inline fixed size array allocas!
- if (AI->getParent() != &AI->getParent()->getParent()->getEntryBlock())
- return 0;
- return AI;
- }
-
- // isInlineAsm - Check if the instruction is a call to an inline asm chunk.
- static bool isInlineAsm(const Instruction& I) {
- if (const CallInst *CI = dyn_cast<CallInst>(&I))
- return isa<InlineAsm>(CI->getCalledValue());
- return false;
- }
-
- // Instruction visitation functions
- friend class InstVisitor<CWriter>;
-
- void visitReturnInst(ReturnInst &I);
- void visitBranchInst(BranchInst &I);
- void visitSwitchInst(SwitchInst &I);
- void visitIndirectBrInst(IndirectBrInst &I);
- void visitInvokeInst(InvokeInst &I) {
- llvm_unreachable("Lowerinvoke pass didn't work!");
- }
- void visitUnwindInst(UnwindInst &I) {
- llvm_unreachable("Lowerinvoke pass didn't work!");
- }
- void visitResumeInst(ResumeInst &I) {
- llvm_unreachable("DwarfEHPrepare pass didn't work!");
- }
- void visitUnreachableInst(UnreachableInst &I);
-
- void visitPHINode(PHINode &I);
- void visitBinaryOperator(Instruction &I);
- void visitICmpInst(ICmpInst &I);
- void visitFCmpInst(FCmpInst &I);
-
- void visitCastInst (CastInst &I);
- void visitSelectInst(SelectInst &I);
- void visitCallInst (CallInst &I);
- void visitInlineAsm(CallInst &I);
- bool visitBuiltinCall(CallInst &I, Intrinsic::ID ID, bool &WroteCallee);
-
- void visitAllocaInst(AllocaInst &I);
- void visitLoadInst (LoadInst &I);
- void visitStoreInst (StoreInst &I);
- void visitGetElementPtrInst(GetElementPtrInst &I);
- void visitVAArgInst (VAArgInst &I);
-
- void visitInsertElementInst(InsertElementInst &I);
- void visitExtractElementInst(ExtractElementInst &I);
- void visitShuffleVectorInst(ShuffleVectorInst &SVI);
-
- void visitInsertValueInst(InsertValueInst &I);
- void visitExtractValueInst(ExtractValueInst &I);
-
- void visitInstruction(Instruction &I) {
-#ifndef NDEBUG
- errs() << "C Writer does not know about " << I;
-#endif
- llvm_unreachable(0);
- }
-
- void outputLValue(Instruction *I) {
- Out << " " << GetValueName(I) << " = ";
- }
-
- bool isGotoCodeNecessary(BasicBlock *From, BasicBlock *To);
- void printPHICopiesForSuccessor(BasicBlock *CurBlock,
- BasicBlock *Successor, unsigned Indent);
- void printBranchToBlock(BasicBlock *CurBlock, BasicBlock *SuccBlock,
- unsigned Indent);
- void printGEPExpression(Value *Ptr, gep_type_iterator I,
- gep_type_iterator E, bool Static);
-
- std::string GetValueName(const Value *Operand);
- };
-}
-
-char CWriter::ID = 0;
-
-
-
-static std::string CBEMangle(const std::string &S) {
- std::string Result;
-
- for (unsigned i = 0, e = S.size(); i != e; ++i)
- if (isalnum(S[i]) || S[i] == '_') {
- Result += S[i];
- } else {
- Result += '_';
- Result += 'A'+(S[i]&15);
- Result += 'A'+((S[i]>>4)&15);
- Result += '_';
- }
- return Result;
-}
-
-std::string CWriter::getStructName(StructType *ST) {
- if (!ST->isLiteral() && !ST->getName().empty())
- return CBEMangle("l_"+ST->getName().str());
-
- return "l_unnamed_" + utostr(UnnamedStructIDs[ST]);
-}
-
-
-/// printStructReturnPointerFunctionType - This is like printType for a struct
-/// return type, except, instead of printing the type as void (*)(Struct*, ...)
-/// print it as "Struct (*)(...)", for struct return functions.
-void CWriter::printStructReturnPointerFunctionType(raw_ostream &Out,
- const AttrListPtr &PAL,
- PointerType *TheTy) {
- FunctionType *FTy = cast<FunctionType>(TheTy->getElementType());
- std::string tstr;
- raw_string_ostream FunctionInnards(tstr);
- FunctionInnards << " (*) (";
- bool PrintedType = false;
-
- FunctionType::param_iterator I = FTy->param_begin(), E = FTy->param_end();
- Type *RetTy = cast<PointerType>(*I)->getElementType();
- unsigned Idx = 1;
- for (++I, ++Idx; I != E; ++I, ++Idx) {
- if (PrintedType)
- FunctionInnards << ", ";
- Type *ArgTy = *I;
- if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
- assert(ArgTy->isPointerTy());
- ArgTy = cast<PointerType>(ArgTy)->getElementType();
- }
- printType(FunctionInnards, ArgTy,
- /*isSigned=*/PAL.paramHasAttr(Idx, Attribute::SExt), "");
- PrintedType = true;
- }
- if (FTy->isVarArg()) {
- if (!PrintedType)
- FunctionInnards << " int"; //dummy argument for empty vararg functs
- FunctionInnards << ", ...";
- } else if (!PrintedType) {
- FunctionInnards << "void";
- }
- FunctionInnards << ')';
- printType(Out, RetTy,
- /*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt), FunctionInnards.str());
-}
-
-raw_ostream &
-CWriter::printSimpleType(raw_ostream &Out, Type *Ty, bool isSigned,
- const std::string &NameSoFar) {
- assert((Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) &&
- "Invalid type for printSimpleType");
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return Out << "void " << NameSoFar;
- case Type::IntegerTyID: {
- unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
- if (NumBits == 1)
- return Out << "bool " << NameSoFar;
- else if (NumBits <= 8)
- return Out << (isSigned?"signed":"unsigned") << " char " << NameSoFar;
- else if (NumBits <= 16)
- return Out << (isSigned?"signed":"unsigned") << " short " << NameSoFar;
- else if (NumBits <= 32)
- return Out << (isSigned?"signed":"unsigned") << " int " << NameSoFar;
- else if (NumBits <= 64)
- return Out << (isSigned?"signed":"unsigned") << " long long "<< NameSoFar;
- else {
- assert(NumBits <= 128 && "Bit widths > 128 not implemented yet");
- return Out << (isSigned?"llvmInt128":"llvmUInt128") << " " << NameSoFar;
- }
- }
- case Type::FloatTyID: return Out << "float " << NameSoFar;
- case Type::DoubleTyID: return Out << "double " << NameSoFar;
- // Lacking emulation of FP80 on PPC, etc., we assume whichever of these is
- // present matches host 'long double'.
- case Type::X86_FP80TyID:
- case Type::PPC_FP128TyID:
- case Type::FP128TyID: return Out << "long double " << NameSoFar;
-
- case Type::X86_MMXTyID:
- return printSimpleType(Out, Type::getInt32Ty(Ty->getContext()), isSigned,
- " __attribute__((vector_size(64))) " + NameSoFar);
-
- case Type::VectorTyID: {
- VectorType *VTy = cast<VectorType>(Ty);
- return printSimpleType(Out, VTy->getElementType(), isSigned,
- " __attribute__((vector_size(" +
- utostr(TD->getTypeAllocSize(VTy)) + " ))) " + NameSoFar);
- }
-
- default:
-#ifndef NDEBUG
- errs() << "Unknown primitive type: " << *Ty << "\n";
-#endif
- llvm_unreachable(0);
- }
-}
-
-// Pass the Type* and the variable name and this prints out the variable
-// declaration.
-//
-raw_ostream &CWriter::printType(raw_ostream &Out, Type *Ty,
- bool isSigned, const std::string &NameSoFar,
- bool IgnoreName, const AttrListPtr &PAL) {
- if (Ty->isPrimitiveType() || Ty->isIntegerTy() || Ty->isVectorTy()) {
- printSimpleType(Out, Ty, isSigned, NameSoFar);
- return Out;
- }
-
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: {
- FunctionType *FTy = cast<FunctionType>(Ty);
- std::string tstr;
- raw_string_ostream FunctionInnards(tstr);
- FunctionInnards << " (" << NameSoFar << ") (";
- unsigned Idx = 1;
- for (FunctionType::param_iterator I = FTy->param_begin(),
- E = FTy->param_end(); I != E; ++I) {
- Type *ArgTy = *I;
- if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
- assert(ArgTy->isPointerTy());
- ArgTy = cast<PointerType>(ArgTy)->getElementType();
- }
- if (I != FTy->param_begin())
- FunctionInnards << ", ";
- printType(FunctionInnards, ArgTy,
- /*isSigned=*/PAL.paramHasAttr(Idx, Attribute::SExt), "");
- ++Idx;
- }
- if (FTy->isVarArg()) {
- if (!FTy->getNumParams())
- FunctionInnards << " int"; //dummy argument for empty vaarg functs
- FunctionInnards << ", ...";
- } else if (!FTy->getNumParams()) {
- FunctionInnards << "void";
- }
- FunctionInnards << ')';
- printType(Out, FTy->getReturnType(),
- /*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt), FunctionInnards.str());
- return Out;
- }
- case Type::StructTyID: {
- StructType *STy = cast<StructType>(Ty);
-
- // Check to see if the type is named.
- if (!IgnoreName)
- return Out << getStructName(STy) << ' ' << NameSoFar;
-
- Out << NameSoFar + " {\n";
- unsigned Idx = 0;
- for (StructType::element_iterator I = STy->element_begin(),
- E = STy->element_end(); I != E; ++I) {
- Out << " ";
- printType(Out, *I, false, "field" + utostr(Idx++));
- Out << ";\n";
- }
- Out << '}';
- if (STy->isPacked())
- Out << " __attribute__ ((packed))";
- return Out;
- }
-
- case Type::PointerTyID: {
- PointerType *PTy = cast<PointerType>(Ty);
- std::string ptrName = "*" + NameSoFar;
-
- if (PTy->getElementType()->isArrayTy() ||
- PTy->getElementType()->isVectorTy())
- ptrName = "(" + ptrName + ")";
-
- if (!PAL.isEmpty())
- // Must be a function ptr cast!
- return printType(Out, PTy->getElementType(), false, ptrName, true, PAL);
- return printType(Out, PTy->getElementType(), false, ptrName);
- }
-
- case Type::ArrayTyID: {
- ArrayType *ATy = cast<ArrayType>(Ty);
- unsigned NumElements = ATy->getNumElements();
- if (NumElements == 0) NumElements = 1;
- // Arrays are wrapped in structs to allow them to have normal
- // value semantics (avoiding the array "decay").
- Out << NameSoFar << " { ";
- printType(Out, ATy->getElementType(), false,
- "array[" + utostr(NumElements) + "]");
- return Out << "; }";
- }
-
- default:
- llvm_unreachable("Unhandled case in getTypeProps!");
- }
-
- return Out;
-}
-
-void CWriter::printConstantArray(ConstantArray *CPA, bool Static) {
-
- // As a special case, print the array as a string if it is an array of
- // ubytes or an array of sbytes with positive values.
- //
- Type *ETy = CPA->getType()->getElementType();
- bool isString = (ETy == Type::getInt8Ty(CPA->getContext()) ||
- ETy == Type::getInt8Ty(CPA->getContext()));
-
- // Make sure the last character is a null char, as automatically added by C
- if (isString && (CPA->getNumOperands() == 0 ||
- !cast<Constant>(*(CPA->op_end()-1))->isNullValue()))
- isString = false;
-
- if (isString) {
- Out << '\"';
- // Keep track of whether the last number was a hexadecimal escape.
- bool LastWasHex = false;
-
- // Do not include the last character, which we know is null
- for (unsigned i = 0, e = CPA->getNumOperands()-1; i != e; ++i) {
- unsigned char C = cast<ConstantInt>(CPA->getOperand(i))->getZExtValue();
-
- // Print it out literally if it is a printable character. The only thing
- // to be careful about is when the last letter output was a hex escape
- // code, in which case we have to be careful not to print out hex digits
- // explicitly (the C compiler thinks it is a continuation of the previous
- // character, sheesh...)
- //
- if (isprint(C) && (!LastWasHex || !isxdigit(C))) {
- LastWasHex = false;
- if (C == '"' || C == '\\')
- Out << "\\" << (char)C;
- else
- Out << (char)C;
- } else {
- LastWasHex = false;
- switch (C) {
- case '\n': Out << "\\n"; break;
- case '\t': Out << "\\t"; break;
- case '\r': Out << "\\r"; break;
- case '\v': Out << "\\v"; break;
- case '\a': Out << "\\a"; break;
- case '\"': Out << "\\\""; break;
- case '\'': Out << "\\\'"; break;
- default:
- Out << "\\x";
- Out << (char)(( C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A'));
- Out << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
- LastWasHex = true;
- break;
- }
- }
- }
- Out << '\"';
- } else {
- Out << '{';
- if (CPA->getNumOperands()) {
- Out << ' ';
- printConstant(cast<Constant>(CPA->getOperand(0)), Static);
- for (unsigned i = 1, e = CPA->getNumOperands(); i != e; ++i) {
- Out << ", ";
- printConstant(cast<Constant>(CPA->getOperand(i)), Static);
- }
- }
- Out << " }";
- }
-}
-
-void CWriter::printConstantVector(ConstantVector *CP, bool Static) {
- Out << '{';
- if (CP->getNumOperands()) {
- Out << ' ';
- printConstant(cast<Constant>(CP->getOperand(0)), Static);
- for (unsigned i = 1, e = CP->getNumOperands(); i != e; ++i) {
- Out << ", ";
- printConstant(cast<Constant>(CP->getOperand(i)), Static);
- }
- }
- Out << " }";
-}
-
-// isFPCSafeToPrint - Returns true if we may assume that CFP may be written out
-// textually as a double (rather than as a reference to a stack-allocated
-// variable). We decide this by converting CFP to a string and back into a
-// double, and then checking whether the conversion results in a bit-equal
-// double to the original value of CFP. This depends on us and the target C
-// compiler agreeing on the conversion process (which is pretty likely since we
-// only deal in IEEE FP).
-//
-static bool isFPCSafeToPrint(const ConstantFP *CFP) {
- bool ignored;
- // Do long doubles in hex for now.
- if (CFP->getType() != Type::getFloatTy(CFP->getContext()) &&
- CFP->getType() != Type::getDoubleTy(CFP->getContext()))
- return false;
- APFloat APF = APFloat(CFP->getValueAPF()); // copy
- if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
-#if HAVE_PRINTF_A && ENABLE_CBE_PRINTF_A
- char Buffer[100];
- sprintf(Buffer, "%a", APF.convertToDouble());
- if (!strncmp(Buffer, "0x", 2) ||
- !strncmp(Buffer, "-0x", 3) ||
- !strncmp(Buffer, "+0x", 3))
- return APF.bitwiseIsEqual(APFloat(atof(Buffer)));
- return false;
-#else
- std::string StrVal = ftostr(APF);
-
- while (StrVal[0] == ' ')
- StrVal.erase(StrVal.begin());
-
- // Check to make sure that the stringized number is not some string like "Inf"
- // or NaN. Check that the string matches the "[-+]?[0-9]" regex.
- if ((StrVal[0] >= '0' && StrVal[0] <= '9') ||
- ((StrVal[0] == '-' || StrVal[0] == '+') &&
- (StrVal[1] >= '0' && StrVal[1] <= '9')))
- // Reparse stringized version!
- return APF.bitwiseIsEqual(APFloat(atof(StrVal.c_str())));
- return false;
-#endif
-}
-
-/// Print out the casting for a cast operation. This does the double casting
-/// necessary for conversion to the destination type, if necessary.
-/// @brief Print a cast
-void CWriter::printCast(unsigned opc, Type *SrcTy, Type *DstTy) {
- // Print the destination type cast
- switch (opc) {
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::IntToPtr:
- case Instruction::Trunc:
- case Instruction::BitCast:
- case Instruction::FPExt:
- case Instruction::FPTrunc: // For these the DstTy sign doesn't matter
- Out << '(';
- printType(Out, DstTy);
- Out << ')';
- break;
- case Instruction::ZExt:
- case Instruction::PtrToInt:
- case Instruction::FPToUI: // For these, make sure we get an unsigned dest
- Out << '(';
- printSimpleType(Out, DstTy, false);
- Out << ')';
- break;
- case Instruction::SExt:
- case Instruction::FPToSI: // For these, make sure we get a signed dest
- Out << '(';
- printSimpleType(Out, DstTy, true);
- Out << ')';
- break;
- default:
- llvm_unreachable("Invalid cast opcode");
- }
-
- // Print the source type cast
- switch (opc) {
- case Instruction::UIToFP:
- case Instruction::ZExt:
- Out << '(';
- printSimpleType(Out, SrcTy, false);
- Out << ')';
- break;
- case Instruction::SIToFP:
- case Instruction::SExt:
- Out << '(';
- printSimpleType(Out, SrcTy, true);
- Out << ')';
- break;
- case Instruction::IntToPtr:
- case Instruction::PtrToInt:
- // Avoid "cast to pointer from integer of different size" warnings
- Out << "(unsigned long)";
- break;
- case Instruction::Trunc:
- case Instruction::BitCast:
- case Instruction::FPExt:
- case Instruction::FPTrunc:
- case Instruction::FPToSI:
- case Instruction::FPToUI:
- break; // These don't need a source cast.
- default:
- llvm_unreachable("Invalid cast opcode");
- break;
- }
-}
-
-// printConstant - The LLVM Constant to C Constant converter.
-void CWriter::printConstant(Constant *CPV, bool Static) {
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CPV)) {
- switch (CE->getOpcode()) {
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast:
- Out << "(";
- printCast(CE->getOpcode(), CE->getOperand(0)->getType(), CE->getType());
- if (CE->getOpcode() == Instruction::SExt &&
- CE->getOperand(0)->getType() == Type::getInt1Ty(CPV->getContext())) {
- // Make sure we really sext from bool here by subtracting from 0
- Out << "0-";
- }
- printConstant(CE->getOperand(0), Static);
- if (CE->getType() == Type::getInt1Ty(CPV->getContext()) &&
- (CE->getOpcode() == Instruction::Trunc ||
- CE->getOpcode() == Instruction::FPToUI ||
- CE->getOpcode() == Instruction::FPToSI ||
- CE->getOpcode() == Instruction::PtrToInt)) {
- // Make sure we really truncate to bool here by anding with 1
- Out << "&1u";
- }
- Out << ')';
- return;
-
- case Instruction::GetElementPtr:
- Out << "(";
- printGEPExpression(CE->getOperand(0), gep_type_begin(CPV),
- gep_type_end(CPV), Static);
- Out << ")";
- return;
- case Instruction::Select:
- Out << '(';
- printConstant(CE->getOperand(0), Static);
- Out << '?';
- printConstant(CE->getOperand(1), Static);
- Out << ':';
- printConstant(CE->getOperand(2), Static);
- Out << ')';
- return;
- case Instruction::Add:
- case Instruction::FAdd:
- case Instruction::Sub:
- case Instruction::FSub:
- case Instruction::Mul:
- case Instruction::FMul:
- case Instruction::SDiv:
- case Instruction::UDiv:
- case Instruction::FDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::ICmp:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- {
- Out << '(';
- bool NeedsClosingParens = printConstExprCast(CE, Static);
- printConstantWithCast(CE->getOperand(0), CE->getOpcode());
- switch (CE->getOpcode()) {
- case Instruction::Add:
- case Instruction::FAdd: Out << " + "; break;
- case Instruction::Sub:
- case Instruction::FSub: Out << " - "; break;
- case Instruction::Mul:
- case Instruction::FMul: Out << " * "; break;
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem: Out << " % "; break;
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::FDiv: Out << " / "; break;
- case Instruction::And: Out << " & "; break;
- case Instruction::Or: Out << " | "; break;
- case Instruction::Xor: Out << " ^ "; break;
- case Instruction::Shl: Out << " << "; break;
- case Instruction::LShr:
- case Instruction::AShr: Out << " >> "; break;
- case Instruction::ICmp:
- switch (CE->getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << " == "; break;
- case ICmpInst::ICMP_NE: Out << " != "; break;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_ULT: Out << " < "; break;
- case ICmpInst::ICMP_SLE:
- case ICmpInst::ICMP_ULE: Out << " <= "; break;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_UGT: Out << " > "; break;
- case ICmpInst::ICMP_SGE:
- case ICmpInst::ICMP_UGE: Out << " >= "; break;
- default: llvm_unreachable("Illegal ICmp predicate");
- }
- break;
- default: llvm_unreachable("Illegal opcode here!");
- }
- printConstantWithCast(CE->getOperand(1), CE->getOpcode());
- if (NeedsClosingParens)
- Out << "))";
- Out << ')';
- return;
- }
- case Instruction::FCmp: {
- Out << '(';
- bool NeedsClosingParens = printConstExprCast(CE, Static);
- if (CE->getPredicate() == FCmpInst::FCMP_FALSE)
- Out << "0";
- else if (CE->getPredicate() == FCmpInst::FCMP_TRUE)
- Out << "1";
- else {
- const char* op = 0;
- switch (CE->getPredicate()) {
- default: llvm_unreachable("Illegal FCmp predicate");
- case FCmpInst::FCMP_ORD: op = "ord"; break;
- case FCmpInst::FCMP_UNO: op = "uno"; break;
- case FCmpInst::FCMP_UEQ: op = "ueq"; break;
- case FCmpInst::FCMP_UNE: op = "une"; break;
- case FCmpInst::FCMP_ULT: op = "ult"; break;
- case FCmpInst::FCMP_ULE: op = "ule"; break;
- case FCmpInst::FCMP_UGT: op = "ugt"; break;
- case FCmpInst::FCMP_UGE: op = "uge"; break;
- case FCmpInst::FCMP_OEQ: op = "oeq"; break;
- case FCmpInst::FCMP_ONE: op = "one"; break;
- case FCmpInst::FCMP_OLT: op = "olt"; break;
- case FCmpInst::FCMP_OLE: op = "ole"; break;
- case FCmpInst::FCMP_OGT: op = "ogt"; break;
- case FCmpInst::FCMP_OGE: op = "oge"; break;
- }
- Out << "llvm_fcmp_" << op << "(";
- printConstantWithCast(CE->getOperand(0), CE->getOpcode());
- Out << ", ";
- printConstantWithCast(CE->getOperand(1), CE->getOpcode());
- Out << ")";
- }
- if (NeedsClosingParens)
- Out << "))";
- Out << ')';
- return;
- }
- default:
-#ifndef NDEBUG
- errs() << "CWriter Error: Unhandled constant expression: "
- << *CE << "\n";
-#endif
- llvm_unreachable(0);
- }
- } else if (isa<UndefValue>(CPV) && CPV->getType()->isSingleValueType()) {
- Out << "((";
- printType(Out, CPV->getType()); // sign doesn't matter
- Out << ")/*UNDEF*/";
- if (!CPV->getType()->isVectorTy()) {
- Out << "0)";
- } else {
- Out << "{})";
- }
- return;
- }
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
- Type* Ty = CI->getType();
- if (Ty == Type::getInt1Ty(CPV->getContext()))
- Out << (CI->getZExtValue() ? '1' : '0');
- else if (Ty == Type::getInt32Ty(CPV->getContext()))
- Out << CI->getZExtValue() << 'u';
- else if (Ty->getPrimitiveSizeInBits() > 32)
- Out << CI->getZExtValue() << "ull";
- else {
- Out << "((";
- printSimpleType(Out, Ty, false) << ')';
- if (CI->isMinValue(true))
- Out << CI->getZExtValue() << 'u';
- else
- Out << CI->getSExtValue();
- Out << ')';
- }
- return;
- }
-
- switch (CPV->getType()->getTypeID()) {
- case Type::FloatTyID:
- case Type::DoubleTyID:
- case Type::X86_FP80TyID:
- case Type::PPC_FP128TyID:
- case Type::FP128TyID: {
- ConstantFP *FPC = cast<ConstantFP>(CPV);
- std::map<const ConstantFP*, unsigned>::iterator I = FPConstantMap.find(FPC);
- if (I != FPConstantMap.end()) {
- // Because of FP precision problems we must load from a stack allocated
- // value that holds the value in hex.
- Out << "(*(" << (FPC->getType() == Type::getFloatTy(CPV->getContext()) ?
- "float" :
- FPC->getType() == Type::getDoubleTy(CPV->getContext()) ?
- "double" :
- "long double")
- << "*)&FPConstant" << I->second << ')';
- } else {
- double V;
- if (FPC->getType() == Type::getFloatTy(CPV->getContext()))
- V = FPC->getValueAPF().convertToFloat();
- else if (FPC->getType() == Type::getDoubleTy(CPV->getContext()))
- V = FPC->getValueAPF().convertToDouble();
- else {
- // Long double. Convert the number to double, discarding precision.
- // This is not awesome, but it at least makes the CBE output somewhat
- // useful.
- APFloat Tmp = FPC->getValueAPF();
- bool LosesInfo;
- Tmp.convert(APFloat::IEEEdouble, APFloat::rmTowardZero, &LosesInfo);
- V = Tmp.convertToDouble();
- }
-
- if (IsNAN(V)) {
- // The value is NaN
-
- // FIXME the actual NaN bits should be emitted.
- // The prefix for a quiet NaN is 0x7FF8. For a signalling NaN,
- // it's 0x7ff4.
- const unsigned long QuietNaN = 0x7ff8UL;
- //const unsigned long SignalNaN = 0x7ff4UL;
-
- // We need to grab the first part of the FP #
- char Buffer[100];
-
- uint64_t ll = DoubleToBits(V);
- sprintf(Buffer, "0x%llx", static_cast<long long>(ll));
-
- std::string Num(&Buffer[0], &Buffer[6]);
- unsigned long Val = strtoul(Num.c_str(), 0, 16);
-
- if (FPC->getType() == Type::getFloatTy(FPC->getContext()))
- Out << "LLVM_NAN" << (Val == QuietNaN ? "" : "S") << "F(\""
- << Buffer << "\") /*nan*/ ";
- else
- Out << "LLVM_NAN" << (Val == QuietNaN ? "" : "S") << "(\""
- << Buffer << "\") /*nan*/ ";
- } else if (IsInf(V)) {
- // The value is Inf
- if (V < 0) Out << '-';
- Out << "LLVM_INF" <<
- (FPC->getType() == Type::getFloatTy(FPC->getContext()) ? "F" : "")
- << " /*inf*/ ";
- } else {
- std::string Num;
-#if HAVE_PRINTF_A && ENABLE_CBE_PRINTF_A
- // Print out the constant as a floating point number.
- char Buffer[100];
- sprintf(Buffer, "%a", V);
- Num = Buffer;
-#else
- Num = ftostr(FPC->getValueAPF());
-#endif
- Out << Num;
- }
- }
- break;
- }
-
- case Type::ArrayTyID:
- // Use C99 compound expression literal initializer syntax.
- if (!Static) {
- Out << "(";
- printType(Out, CPV->getType());
- Out << ")";
- }
- Out << "{ "; // Arrays are wrapped in struct types.
- if (ConstantArray *CA = dyn_cast<ConstantArray>(CPV)) {
- printConstantArray(CA, Static);
- } else {
- assert(isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV));
- ArrayType *AT = cast<ArrayType>(CPV->getType());
- Out << '{';
- if (AT->getNumElements()) {
- Out << ' ';
- Constant *CZ = Constant::getNullValue(AT->getElementType());
- printConstant(CZ, Static);
- for (unsigned i = 1, e = AT->getNumElements(); i != e; ++i) {
- Out << ", ";
- printConstant(CZ, Static);
- }
- }
- Out << " }";
- }
- Out << " }"; // Arrays are wrapped in struct types.
- break;
-
- case Type::VectorTyID:
- // Use C99 compound expression literal initializer syntax.
- if (!Static) {
- Out << "(";
- printType(Out, CPV->getType());
- Out << ")";
- }
- if (ConstantVector *CV = dyn_cast<ConstantVector>(CPV)) {
- printConstantVector(CV, Static);
- } else {
- assert(isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV));
- VectorType *VT = cast<VectorType>(CPV->getType());
- Out << "{ ";
- Constant *CZ = Constant::getNullValue(VT->getElementType());
- printConstant(CZ, Static);
- for (unsigned i = 1, e = VT->getNumElements(); i != e; ++i) {
- Out << ", ";
- printConstant(CZ, Static);
- }
- Out << " }";
- }
- break;
-
- case Type::StructTyID:
- // Use C99 compound expression literal initializer syntax.
- if (!Static) {
- Out << "(";
- printType(Out, CPV->getType());
- Out << ")";
- }
- if (isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV)) {
- StructType *ST = cast<StructType>(CPV->getType());
- Out << '{';
- if (ST->getNumElements()) {
- Out << ' ';
- printConstant(Constant::getNullValue(ST->getElementType(0)), Static);
- for (unsigned i = 1, e = ST->getNumElements(); i != e; ++i) {
- Out << ", ";
- printConstant(Constant::getNullValue(ST->getElementType(i)), Static);
- }
- }
- Out << " }";
- } else {
- Out << '{';
- if (CPV->getNumOperands()) {
- Out << ' ';
- printConstant(cast<Constant>(CPV->getOperand(0)), Static);
- for (unsigned i = 1, e = CPV->getNumOperands(); i != e; ++i) {
- Out << ", ";
- printConstant(cast<Constant>(CPV->getOperand(i)), Static);
- }
- }
- Out << " }";
- }
- break;
-
- case Type::PointerTyID:
- if (isa<ConstantPointerNull>(CPV)) {
- Out << "((";
- printType(Out, CPV->getType()); // sign doesn't matter
- Out << ")/*NULL*/0)";
- break;
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(CPV)) {
- writeOperand(GV, Static);
- break;
- }
- // FALL THROUGH
- default:
-#ifndef NDEBUG
- errs() << "Unknown constant type: " << *CPV << "\n";
-#endif
- llvm_unreachable(0);
- }
-}
-
-// Some constant expressions need to be casted back to the original types
-// because their operands were casted to the expected type. This function takes
-// care of detecting that case and printing the cast for the ConstantExpr.
-bool CWriter::printConstExprCast(const ConstantExpr* CE, bool Static) {
- bool NeedsExplicitCast = false;
- Type *Ty = CE->getOperand(0)->getType();
- bool TypeIsSigned = false;
- switch (CE->getOpcode()) {
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // We need to cast integer arithmetic so that it is always performed
- // as unsigned, to avoid undefined behavior on overflow.
- case Instruction::LShr:
- case Instruction::URem:
- case Instruction::UDiv: NeedsExplicitCast = true; break;
- case Instruction::AShr:
- case Instruction::SRem:
- case Instruction::SDiv: NeedsExplicitCast = true; TypeIsSigned = true; break;
- case Instruction::SExt:
- Ty = CE->getType();
- NeedsExplicitCast = true;
- TypeIsSigned = true;
- break;
- case Instruction::ZExt:
- case Instruction::Trunc:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast:
- Ty = CE->getType();
- NeedsExplicitCast = true;
- break;
- default: break;
- }
- if (NeedsExplicitCast) {
- Out << "((";
- if (Ty->isIntegerTy() && Ty != Type::getInt1Ty(Ty->getContext()))
- printSimpleType(Out, Ty, TypeIsSigned);
- else
- printType(Out, Ty); // not integer, sign doesn't matter
- Out << ")(";
- }
- return NeedsExplicitCast;
-}
-
-// Print a constant assuming that it is the operand for a given Opcode. The
-// opcodes that care about sign need to cast their operands to the expected
-// type before the operation proceeds. This function does the casting.
-void CWriter::printConstantWithCast(Constant* CPV, unsigned Opcode) {
-
- // Extract the operand's type, we'll need it.
- Type* OpTy = CPV->getType();
-
- // Indicate whether to do the cast or not.
- bool shouldCast = false;
- bool typeIsSigned = false;
-
- // Based on the Opcode for which this Constant is being written, determine
- // the new type to which the operand should be casted by setting the value
- // of OpTy. If we change OpTy, also set shouldCast to true so it gets
- // casted below.
- switch (Opcode) {
- default:
- // for most instructions, it doesn't matter
- break;
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // We need to cast integer arithmetic so that it is always performed
- // as unsigned, to avoid undefined behavior on overflow.
- case Instruction::LShr:
- case Instruction::UDiv:
- case Instruction::URem:
- shouldCast = true;
- break;
- case Instruction::AShr:
- case Instruction::SDiv:
- case Instruction::SRem:
- shouldCast = true;
- typeIsSigned = true;
- break;
- }
-
- // Write out the casted constant if we should, otherwise just write the
- // operand.
- if (shouldCast) {
- Out << "((";
- printSimpleType(Out, OpTy, typeIsSigned);
- Out << ")";
- printConstant(CPV, false);
- Out << ")";
- } else
- printConstant(CPV, false);
-}
-
-std::string CWriter::GetValueName(const Value *Operand) {
-
- // Resolve potential alias.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Operand)) {
- if (const Value *V = GA->resolveAliasedGlobal(false))
- Operand = V;
- }
-
- // Mangle globals with the standard mangler interface for LLC compatibility.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(Operand)) {
- SmallString<128> Str;
- Mang->getNameWithPrefix(Str, GV, false);
- return CBEMangle(Str.str().str());
- }
-
- std::string Name = Operand->getName();
-
- if (Name.empty()) { // Assign unique names to local temporaries.
- unsigned &No = AnonValueNumbers[Operand];
- if (No == 0)
- No = ++NextAnonValueNumber;
- Name = "tmp__" + utostr(No);
- }
-
- std::string VarName;
- VarName.reserve(Name.capacity());
-
- for (std::string::iterator I = Name.begin(), E = Name.end();
- I != E; ++I) {
- char ch = *I;
-
- if (!((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch <= '9') || ch == '_')) {
- char buffer[5];
- sprintf(buffer, "_%x_", ch);
- VarName += buffer;
- } else
- VarName += ch;
- }
-
- return "llvm_cbe_" + VarName;
-}
-
-/// writeInstComputationInline - Emit the computation for the specified
-/// instruction inline, with no destination provided.
-void CWriter::writeInstComputationInline(Instruction &I) {
- // We can't currently support integer types other than 1, 8, 16, 32, 64.
- // Validate this.
- Type *Ty = I.getType();
- if (Ty->isIntegerTy() && (Ty!=Type::getInt1Ty(I.getContext()) &&
- Ty!=Type::getInt8Ty(I.getContext()) &&
- Ty!=Type::getInt16Ty(I.getContext()) &&
- Ty!=Type::getInt32Ty(I.getContext()) &&
- Ty!=Type::getInt64Ty(I.getContext()))) {
- report_fatal_error("The C backend does not currently support integer "
- "types of widths other than 1, 8, 16, 32, 64.\n"
- "This is being tracked as PR 4158.");
- }
-
- // If this is a non-trivial bool computation, make sure to truncate down to
- // a 1 bit value. This is important because we want "add i1 x, y" to return
- // "0" when x and y are true, not "2" for example.
- bool NeedBoolTrunc = false;
- if (I.getType() == Type::getInt1Ty(I.getContext()) &&
- !isa<ICmpInst>(I) && !isa<FCmpInst>(I))
- NeedBoolTrunc = true;
-
- if (NeedBoolTrunc)
- Out << "((";
-
- visit(I);
-
- if (NeedBoolTrunc)
- Out << ")&1)";
-}
-
-
-void CWriter::writeOperandInternal(Value *Operand, bool Static) {
- if (Instruction *I = dyn_cast<Instruction>(Operand))
- // Should we inline this instruction to build a tree?
- if (isInlinableInst(*I) && !isDirectAlloca(I)) {
- Out << '(';
- writeInstComputationInline(*I);
- Out << ')';
- return;
- }
-
- Constant* CPV = dyn_cast<Constant>(Operand);
-
- if (CPV && !isa<GlobalValue>(CPV))
- printConstant(CPV, Static);
- else
- Out << GetValueName(Operand);
-}
-
-void CWriter::writeOperand(Value *Operand, bool Static) {
- bool isAddressImplicit = isAddressExposed(Operand);
- if (isAddressImplicit)
- Out << "(&"; // Global variables are referenced as their addresses by llvm
-
- writeOperandInternal(Operand, Static);
-
- if (isAddressImplicit)
- Out << ')';
-}
-
-// Some instructions need to have their result value casted back to the
-// original types because their operands were casted to the expected type.
-// This function takes care of detecting that case and printing the cast
-// for the Instruction.
-bool CWriter::writeInstructionCast(const Instruction &I) {
- Type *Ty = I.getOperand(0)->getType();
- switch (I.getOpcode()) {
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // We need to cast integer arithmetic so that it is always performed
- // as unsigned, to avoid undefined behavior on overflow.
- case Instruction::LShr:
- case Instruction::URem:
- case Instruction::UDiv:
- Out << "((";
- printSimpleType(Out, Ty, false);
- Out << ")(";
- return true;
- case Instruction::AShr:
- case Instruction::SRem:
- case Instruction::SDiv:
- Out << "((";
- printSimpleType(Out, Ty, true);
- Out << ")(";
- return true;
- default: break;
- }
- return false;
-}
-
-// Write the operand with a cast to another type based on the Opcode being used.
-// This will be used in cases where an instruction has specific type
-// requirements (usually signedness) for its operands.
-void CWriter::writeOperandWithCast(Value* Operand, unsigned Opcode) {
-
- // Extract the operand's type, we'll need it.
- Type* OpTy = Operand->getType();
-
- // Indicate whether to do the cast or not.
- bool shouldCast = false;
-
- // Indicate whether the cast should be to a signed type or not.
- bool castIsSigned = false;
-
- // Based on the Opcode for which this Operand is being written, determine
- // the new type to which the operand should be casted by setting the value
- // of OpTy. If we change OpTy, also set shouldCast to true.
- switch (Opcode) {
- default:
- // for most instructions, it doesn't matter
- break;
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // We need to cast integer arithmetic so that it is always performed
- // as unsigned, to avoid undefined behavior on overflow.
- case Instruction::LShr:
- case Instruction::UDiv:
- case Instruction::URem: // Cast to unsigned first
- shouldCast = true;
- castIsSigned = false;
- break;
- case Instruction::GetElementPtr:
- case Instruction::AShr:
- case Instruction::SDiv:
- case Instruction::SRem: // Cast to signed first
- shouldCast = true;
- castIsSigned = true;
- break;
- }
-
- // Write out the casted operand if we should, otherwise just write the
- // operand.
- if (shouldCast) {
- Out << "((";
- printSimpleType(Out, OpTy, castIsSigned);
- Out << ")";
- writeOperand(Operand);
- Out << ")";
- } else
- writeOperand(Operand);
-}
-
-// Write the operand with a cast to another type based on the icmp predicate
-// being used.
-void CWriter::writeOperandWithCast(Value* Operand, const ICmpInst &Cmp) {
- // This has to do a cast to ensure the operand has the right signedness.
- // Also, if the operand is a pointer, we make sure to cast to an integer when
- // doing the comparison both for signedness and so that the C compiler doesn't
- // optimize things like "p < NULL" to false (p may contain an integer value
- // f.e.).
- bool shouldCast = Cmp.isRelational();
-
- // Write out the casted operand if we should, otherwise just write the
- // operand.
- if (!shouldCast) {
- writeOperand(Operand);
- return;
- }
-
- // Should this be a signed comparison? If so, convert to signed.
- bool castIsSigned = Cmp.isSigned();
-
- // If the operand was a pointer, convert to a large integer type.
- Type* OpTy = Operand->getType();
- if (OpTy->isPointerTy())
- OpTy = TD->getIntPtrType(Operand->getContext());
-
- Out << "((";
- printSimpleType(Out, OpTy, castIsSigned);
- Out << ")";
- writeOperand(Operand);
- Out << ")";
-}
-
-// generateCompilerSpecificCode - This is where we add conditional compilation
-// directives to cater to specific compilers as need be.
-//
-static void generateCompilerSpecificCode(formatted_raw_ostream& Out,
- const TargetData *TD) {
- // Alloca is hard to get, and we don't want to include stdlib.h here.
- Out << "/* get a declaration for alloca */\n"
- << "#if defined(__CYGWIN__) || defined(__MINGW32__)\n"
- << "#define alloca(x) __builtin_alloca((x))\n"
- << "#define _alloca(x) __builtin_alloca((x))\n"
- << "#elif defined(__APPLE__)\n"
- << "extern void *__builtin_alloca(unsigned long);\n"
- << "#define alloca(x) __builtin_alloca(x)\n"
- << "#define longjmp _longjmp\n"
- << "#define setjmp _setjmp\n"
- << "#elif defined(__sun__)\n"
- << "#if defined(__sparcv9)\n"
- << "extern void *__builtin_alloca(unsigned long);\n"
- << "#else\n"
- << "extern void *__builtin_alloca(unsigned int);\n"
- << "#endif\n"
- << "#define alloca(x) __builtin_alloca(x)\n"
- << "#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__arm__)\n"
- << "#define alloca(x) __builtin_alloca(x)\n"
- << "#elif defined(_MSC_VER)\n"
- << "#define inline _inline\n"
- << "#define alloca(x) _alloca(x)\n"
- << "#else\n"
- << "#include <alloca.h>\n"
- << "#endif\n\n";
-
- // We output GCC specific attributes to preserve 'linkonce'ness on globals.
- // If we aren't being compiled with GCC, just drop these attributes.
- Out << "#ifndef __GNUC__ /* Can only support \"linkonce\" vars with GCC */\n"
- << "#define __attribute__(X)\n"
- << "#endif\n\n";
-
- // On Mac OS X, "external weak" is spelled "__attribute__((weak_import))".
- Out << "#if defined(__GNUC__) && defined(__APPLE_CC__)\n"
- << "#define __EXTERNAL_WEAK__ __attribute__((weak_import))\n"
- << "#elif defined(__GNUC__)\n"
- << "#define __EXTERNAL_WEAK__ __attribute__((weak))\n"
- << "#else\n"
- << "#define __EXTERNAL_WEAK__\n"
- << "#endif\n\n";
-
- // For now, turn off the weak linkage attribute on Mac OS X. (See above.)
- Out << "#if defined(__GNUC__) && defined(__APPLE_CC__)\n"
- << "#define __ATTRIBUTE_WEAK__\n"
- << "#elif defined(__GNUC__)\n"
- << "#define __ATTRIBUTE_WEAK__ __attribute__((weak))\n"
- << "#else\n"
- << "#define __ATTRIBUTE_WEAK__\n"
- << "#endif\n\n";
-
- // Add hidden visibility support. FIXME: APPLE_CC?
- Out << "#if defined(__GNUC__)\n"
- << "#define __HIDDEN__ __attribute__((visibility(\"hidden\")))\n"
- << "#endif\n\n";
-
- // Define NaN and Inf as GCC builtins if using GCC, as 0 otherwise
- // From the GCC documentation:
- //
- // double __builtin_nan (const char *str)
- //
- // This is an implementation of the ISO C99 function nan.
- //
- // Since ISO C99 defines this function in terms of strtod, which we do
- // not implement, a description of the parsing is in order. The string is
- // parsed as by strtol; that is, the base is recognized by leading 0 or
- // 0x prefixes. The number parsed is placed in the significand such that
- // the least significant bit of the number is at the least significant
- // bit of the significand. The number is truncated to fit the significand
- // field provided. The significand is forced to be a quiet NaN.
- //
- // This function, if given a string literal, is evaluated early enough
- // that it is considered a compile-time constant.
- //
- // float __builtin_nanf (const char *str)
- //
- // Similar to __builtin_nan, except the return type is float.
- //
- // double __builtin_inf (void)
- //
- // Similar to __builtin_huge_val, except a warning is generated if the
- // target floating-point format does not support infinities. This
- // function is suitable for implementing the ISO C99 macro INFINITY.
- //
- // float __builtin_inff (void)
- //
- // Similar to __builtin_inf, except the return type is float.
- Out << "#ifdef __GNUC__\n"
- << "#define LLVM_NAN(NanStr) __builtin_nan(NanStr) /* Double */\n"
- << "#define LLVM_NANF(NanStr) __builtin_nanf(NanStr) /* Float */\n"
- << "#define LLVM_NANS(NanStr) __builtin_nans(NanStr) /* Double */\n"
- << "#define LLVM_NANSF(NanStr) __builtin_nansf(NanStr) /* Float */\n"
- << "#define LLVM_INF __builtin_inf() /* Double */\n"
- << "#define LLVM_INFF __builtin_inff() /* Float */\n"
- << "#define LLVM_PREFETCH(addr,rw,locality) "
- "__builtin_prefetch(addr,rw,locality)\n"
- << "#define __ATTRIBUTE_CTOR__ __attribute__((constructor))\n"
- << "#define __ATTRIBUTE_DTOR__ __attribute__((destructor))\n"
- << "#define LLVM_ASM __asm__\n"
- << "#else\n"
- << "#define LLVM_NAN(NanStr) ((double)0.0) /* Double */\n"
- << "#define LLVM_NANF(NanStr) 0.0F /* Float */\n"
- << "#define LLVM_NANS(NanStr) ((double)0.0) /* Double */\n"
- << "#define LLVM_NANSF(NanStr) 0.0F /* Float */\n"
- << "#define LLVM_INF ((double)0.0) /* Double */\n"
- << "#define LLVM_INFF 0.0F /* Float */\n"
- << "#define LLVM_PREFETCH(addr,rw,locality) /* PREFETCH */\n"
- << "#define __ATTRIBUTE_CTOR__\n"
- << "#define __ATTRIBUTE_DTOR__\n"
- << "#define LLVM_ASM(X)\n"
- << "#endif\n\n";
-
- Out << "#if __GNUC__ < 4 /* Old GCC's, or compilers not GCC */ \n"
- << "#define __builtin_stack_save() 0 /* not implemented */\n"
- << "#define __builtin_stack_restore(X) /* noop */\n"
- << "#endif\n\n";
-
- // Output typedefs for 128-bit integers. If these are needed with a
- // 32-bit target or with a C compiler that doesn't support mode(TI),
- // more drastic measures will be needed.
- Out << "#if __GNUC__ && __LP64__ /* 128-bit integer types */\n"
- << "typedef int __attribute__((mode(TI))) llvmInt128;\n"
- << "typedef unsigned __attribute__((mode(TI))) llvmUInt128;\n"
- << "#endif\n\n";
-
- // Output target-specific code that should be inserted into main.
- Out << "#define CODE_FOR_MAIN() /* Any target-specific code for main()*/\n";
-}
-
-/// FindStaticTors - Given a static ctor/dtor list, unpack its contents into
-/// the StaticTors set.
-static void FindStaticTors(GlobalVariable *GV, std::set<Function*> &StaticTors){
- ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
- if (!InitList) return;
-
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i))){
- if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
-
- if (CS->getOperand(1)->isNullValue())
- return; // Found a null terminator, exit printing.
- Constant *FP = CS->getOperand(1);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
- if (CE->isCast())
- FP = CE->getOperand(0);
- if (Function *F = dyn_cast<Function>(FP))
- StaticTors.insert(F);
- }
-}
-
-enum SpecialGlobalClass {
- NotSpecial = 0,
- GlobalCtors, GlobalDtors,
- NotPrinted
-};
-
-/// getGlobalVariableClass - If this is a global that is specially recognized
-/// by LLVM, return a code that indicates how we should handle it.
-static SpecialGlobalClass getGlobalVariableClass(const GlobalVariable *GV) {
- // If this is a global ctors/dtors list, handle it now.
- if (GV->hasAppendingLinkage() && GV->use_empty()) {
- if (GV->getName() == "llvm.global_ctors")
- return GlobalCtors;
- else if (GV->getName() == "llvm.global_dtors")
- return GlobalDtors;
- }
-
- // Otherwise, if it is other metadata, don't print it. This catches things
- // like debug information.
- if (GV->getSection() == "llvm.metadata")
- return NotPrinted;
-
- return NotSpecial;
-}
-
-// PrintEscapedString - Print each character of the specified string, escaping
-// it if it is not printable or if it is an escape char.
-static void PrintEscapedString(const char *Str, unsigned Length,
- raw_ostream &Out) {
- for (unsigned i = 0; i != Length; ++i) {
- unsigned char C = Str[i];
- if (isprint(C) && C != '\\' && C != '"')
- Out << C;
- else if (C == '\\')
- Out << "\\\\";
- else if (C == '\"')
- Out << "\\\"";
- else if (C == '\t')
- Out << "\\t";
- else
- Out << "\\x" << hexdigit(C >> 4) << hexdigit(C & 0x0F);
- }
-}
-
-// PrintEscapedString - Print each character of the specified string, escaping
-// it if it is not printable or if it is an escape char.
-static void PrintEscapedString(const std::string &Str, raw_ostream &Out) {
- PrintEscapedString(Str.c_str(), Str.size(), Out);
-}
-
-bool CWriter::doInitialization(Module &M) {
- FunctionPass::doInitialization(M);
-
- // Initialize
- TheModule = &M;
-
- TD = new TargetData(&M);
- IL = new IntrinsicLowering(*TD);
- IL->AddPrototypes(M);
-
-#if 0
- std::string Triple = TheModule->getTargetTriple();
- if (Triple.empty())
- Triple = llvm::sys::getHostTriple();
-
- std::string E;
- if (const Target *Match = TargetRegistry::lookupTarget(Triple, E))
- TAsm = Match->createMCAsmInfo(Triple);
-#endif
- TAsm = new CBEMCAsmInfo();
- MRI = new MCRegisterInfo();
- TCtx = new MCContext(*TAsm, *MRI, NULL);
- Mang = new Mangler(*TCtx, *TD);
-
- // Keep track of which functions are static ctors/dtors so they can have
- // an attribute added to their prototypes.
- std::set<Function*> StaticCtors, StaticDtors;
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- switch (getGlobalVariableClass(I)) {
- default: break;
- case GlobalCtors:
- FindStaticTors(I, StaticCtors);
- break;
- case GlobalDtors:
- FindStaticTors(I, StaticDtors);
- break;
- }
- }
-
- // get declaration for alloca
- Out << "/* Provide Declarations */\n";
- Out << "#include <stdarg.h>\n"; // Varargs support
- Out << "#include <setjmp.h>\n"; // Unwind support
- Out << "#include <limits.h>\n"; // With overflow intrinsics support.
- generateCompilerSpecificCode(Out, TD);
-
- // Provide a definition for `bool' if not compiling with a C++ compiler.
- Out << "\n"
- << "#ifndef __cplusplus\ntypedef unsigned char bool;\n#endif\n"
-
- << "\n\n/* Support for floating point constants */\n"
- << "typedef unsigned long long ConstantDoubleTy;\n"
- << "typedef unsigned int ConstantFloatTy;\n"
- << "typedef struct { unsigned long long f1; unsigned short f2; "
- "unsigned short pad[3]; } ConstantFP80Ty;\n"
- // This is used for both kinds of 128-bit long double; meaning differs.
- << "typedef struct { unsigned long long f1; unsigned long long f2; }"
- " ConstantFP128Ty;\n"
- << "\n\n/* Global Declarations */\n";
-
- // First output all the declarations for the program, because C requires
- // Functions & globals to be declared before they are used.
- //
- if (!M.getModuleInlineAsm().empty()) {
- Out << "/* Module asm statements */\n"
- << "asm(";
-
- // Split the string into lines, to make it easier to read the .ll file.
- std::string Asm = M.getModuleInlineAsm();
- size_t CurPos = 0;
- size_t NewLine = Asm.find_first_of('\n', CurPos);
- while (NewLine != std::string::npos) {
- // We found a newline, print the portion of the asm string from the
- // last newline up to this newline.
- Out << "\"";
- PrintEscapedString(std::string(Asm.begin()+CurPos, Asm.begin()+NewLine),
- Out);
- Out << "\\n\"\n";
- CurPos = NewLine+1;
- NewLine = Asm.find_first_of('\n', CurPos);
- }
- Out << "\"";
- PrintEscapedString(std::string(Asm.begin()+CurPos, Asm.end()), Out);
- Out << "\");\n"
- << "/* End Module asm statements */\n";
- }
-
- // Loop over the symbol table, emitting all named constants.
- printModuleTypes();
-
- // Global variable declarations...
- if (!M.global_empty()) {
- Out << "\n/* External Global Variable Declarations */\n";
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
-
- if (I->hasExternalLinkage() || I->hasExternalWeakLinkage() ||
- I->hasCommonLinkage())
- Out << "extern ";
- else if (I->hasDLLImportLinkage())
- Out << "__declspec(dllimport) ";
- else
- continue; // Internal Global
-
- // Thread Local Storage
- if (I->isThreadLocal())
- Out << "__thread ";
-
- printType(Out, I->getType()->getElementType(), false, GetValueName(I));
-
- if (I->hasExternalWeakLinkage())
- Out << " __EXTERNAL_WEAK__";
- Out << ";\n";
- }
- }
-
- // Function declarations
- Out << "\n/* Function Declarations */\n";
- Out << "double fmod(double, double);\n"; // Support for FP rem
- Out << "float fmodf(float, float);\n";
- Out << "long double fmodl(long double, long double);\n";
-
- // Store the intrinsics which will be declared/defined below.
- SmallVector<const Function*, 8> intrinsicsToDefine;
-
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
- // Don't print declarations for intrinsic functions.
- // Store the used intrinsics, which need to be explicitly defined.
- if (I->isIntrinsic()) {
- switch (I->getIntrinsicID()) {
- default:
- break;
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- intrinsicsToDefine.push_back(I);
- break;
- }
- continue;
- }
-
- if (I->getName() == "setjmp" ||
- I->getName() == "longjmp" || I->getName() == "_setjmp")
- continue;
-
- if (I->hasExternalWeakLinkage())
- Out << "extern ";
- printFunctionSignature(I, true);
- if (I->hasWeakLinkage() || I->hasLinkOnceLinkage())
- Out << " __ATTRIBUTE_WEAK__";
- if (I->hasExternalWeakLinkage())
- Out << " __EXTERNAL_WEAK__";
- if (StaticCtors.count(I))
- Out << " __ATTRIBUTE_CTOR__";
- if (StaticDtors.count(I))
- Out << " __ATTRIBUTE_DTOR__";
- if (I->hasHiddenVisibility())
- Out << " __HIDDEN__";
-
- if (I->hasName() && I->getName()[0] == 1)
- Out << " LLVM_ASM(\"" << I->getName().substr(1) << "\")";
-
- Out << ";\n";
- }
-
- // Output the global variable declarations
- if (!M.global_empty()) {
- Out << "\n\n/* Global Variable Declarations */\n";
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- if (!I->isDeclaration()) {
- // Ignore special globals, such as debug info.
- if (getGlobalVariableClass(I))
- continue;
-
- if (I->hasLocalLinkage())
- Out << "static ";
- else
- Out << "extern ";
-
- // Thread Local Storage
- if (I->isThreadLocal())
- Out << "__thread ";
-
- printType(Out, I->getType()->getElementType(), false,
- GetValueName(I));
-
- if (I->hasLinkOnceLinkage())
- Out << " __attribute__((common))";
- else if (I->hasCommonLinkage()) // FIXME is this right?
- Out << " __ATTRIBUTE_WEAK__";
- else if (I->hasWeakLinkage())
- Out << " __ATTRIBUTE_WEAK__";
- else if (I->hasExternalWeakLinkage())
- Out << " __EXTERNAL_WEAK__";
- if (I->hasHiddenVisibility())
- Out << " __HIDDEN__";
- Out << ";\n";
- }
- }
-
- // Output the global variable definitions and contents...
- if (!M.global_empty()) {
- Out << "\n\n/* Global Variable Definitions and Initialization */\n";
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- if (!I->isDeclaration()) {
- // Ignore special globals, such as debug info.
- if (getGlobalVariableClass(I))
- continue;
-
- if (I->hasLocalLinkage())
- Out << "static ";
- else if (I->hasDLLImportLinkage())
- Out << "__declspec(dllimport) ";
- else if (I->hasDLLExportLinkage())
- Out << "__declspec(dllexport) ";
-
- // Thread Local Storage
- if (I->isThreadLocal())
- Out << "__thread ";
-
- printType(Out, I->getType()->getElementType(), false,
- GetValueName(I));
- if (I->hasLinkOnceLinkage())
- Out << " __attribute__((common))";
- else if (I->hasWeakLinkage())
- Out << " __ATTRIBUTE_WEAK__";
- else if (I->hasCommonLinkage())
- Out << " __ATTRIBUTE_WEAK__";
-
- if (I->hasHiddenVisibility())
- Out << " __HIDDEN__";
-
- // If the initializer is not null, emit the initializer. If it is null,
- // we try to avoid emitting large amounts of zeros. The problem with
- // this, however, occurs when the variable has weak linkage. In this
- // case, the assembler will complain about the variable being both weak
- // and common, so we disable this optimization.
- // FIXME common linkage should avoid this problem.
- if (!I->getInitializer()->isNullValue()) {
- Out << " = " ;
- writeOperand(I->getInitializer(), true);
- } else if (I->hasWeakLinkage()) {
- // We have to specify an initializer, but it doesn't have to be
- // complete. If the value is an aggregate, print out { 0 }, and let
- // the compiler figure out the rest of the zeros.
- Out << " = " ;
- if (I->getInitializer()->getType()->isStructTy() ||
- I->getInitializer()->getType()->isVectorTy()) {
- Out << "{ 0 }";
- } else if (I->getInitializer()->getType()->isArrayTy()) {
- // As with structs and vectors, but with an extra set of braces
- // because arrays are wrapped in structs.
- Out << "{ { 0 } }";
- } else {
- // Just print it out normally.
- writeOperand(I->getInitializer(), true);
- }
- }
- Out << ";\n";
- }
- }
-
- if (!M.empty())
- Out << "\n\n/* Function Bodies */\n";
-
- // Emit some helper functions for dealing with FCMP instruction's
- // predicates
- Out << "static inline int llvm_fcmp_ord(double X, double Y) { ";
- Out << "return X == X && Y == Y; }\n";
- Out << "static inline int llvm_fcmp_uno(double X, double Y) { ";
- Out << "return X != X || Y != Y; }\n";
- Out << "static inline int llvm_fcmp_ueq(double X, double Y) { ";
- Out << "return X == Y || llvm_fcmp_uno(X, Y); }\n";
- Out << "static inline int llvm_fcmp_une(double X, double Y) { ";
- Out << "return X != Y; }\n";
- Out << "static inline int llvm_fcmp_ult(double X, double Y) { ";
- Out << "return X < Y || llvm_fcmp_uno(X, Y); }\n";
- Out << "static inline int llvm_fcmp_ugt(double X, double Y) { ";
- Out << "return X > Y || llvm_fcmp_uno(X, Y); }\n";
- Out << "static inline int llvm_fcmp_ule(double X, double Y) { ";
- Out << "return X <= Y || llvm_fcmp_uno(X, Y); }\n";
- Out << "static inline int llvm_fcmp_uge(double X, double Y) { ";
- Out << "return X >= Y || llvm_fcmp_uno(X, Y); }\n";
- Out << "static inline int llvm_fcmp_oeq(double X, double Y) { ";
- Out << "return X == Y ; }\n";
- Out << "static inline int llvm_fcmp_one(double X, double Y) { ";
- Out << "return X != Y && llvm_fcmp_ord(X, Y); }\n";
- Out << "static inline int llvm_fcmp_olt(double X, double Y) { ";
- Out << "return X < Y ; }\n";
- Out << "static inline int llvm_fcmp_ogt(double X, double Y) { ";
- Out << "return X > Y ; }\n";
- Out << "static inline int llvm_fcmp_ole(double X, double Y) { ";
- Out << "return X <= Y ; }\n";
- Out << "static inline int llvm_fcmp_oge(double X, double Y) { ";
- Out << "return X >= Y ; }\n";
-
- // Emit definitions of the intrinsics.
- for (SmallVector<const Function*, 8>::const_iterator
- I = intrinsicsToDefine.begin(),
- E = intrinsicsToDefine.end(); I != E; ++I) {
- printIntrinsicDefinition(**I, Out);
- }
-
- return false;
-}
-
-
-/// Output all floating point constants that cannot be printed accurately...
-void CWriter::printFloatingPointConstants(Function &F) {
- // Scan the module for floating point constants. If any FP constant is used
- // in the function, we want to redirect it here so that we do not depend on
- // the precision of the printed form, unless the printed form preserves
- // precision.
- //
- for (constant_iterator I = constant_begin(&F), E = constant_end(&F);
- I != E; ++I)
- printFloatingPointConstants(*I);
-
- Out << '\n';
-}
-
-void CWriter::printFloatingPointConstants(const Constant *C) {
- // If this is a constant expression, recursively check for constant fp values.
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i)
- printFloatingPointConstants(CE->getOperand(i));
- return;
- }
-
- // Otherwise, check for a FP constant that we need to print.
- const ConstantFP *FPC = dyn_cast<ConstantFP>(C);
- if (FPC == 0 ||
- // Do not put in FPConstantMap if safe.
- isFPCSafeToPrint(FPC) ||
- // Already printed this constant?
- FPConstantMap.count(FPC))
- return;
-
- FPConstantMap[FPC] = FPCounter; // Number the FP constants
-
- if (FPC->getType() == Type::getDoubleTy(FPC->getContext())) {
- double Val = FPC->getValueAPF().convertToDouble();
- uint64_t i = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
- Out << "static const ConstantDoubleTy FPConstant" << FPCounter++
- << " = 0x" << utohexstr(i)
- << "ULL; /* " << Val << " */\n";
- } else if (FPC->getType() == Type::getFloatTy(FPC->getContext())) {
- float Val = FPC->getValueAPF().convertToFloat();
- uint32_t i = (uint32_t)FPC->getValueAPF().bitcastToAPInt().
- getZExtValue();
- Out << "static const ConstantFloatTy FPConstant" << FPCounter++
- << " = 0x" << utohexstr(i)
- << "U; /* " << Val << " */\n";
- } else if (FPC->getType() == Type::getX86_FP80Ty(FPC->getContext())) {
- // api needed to prevent premature destruction
- APInt api = FPC->getValueAPF().bitcastToAPInt();
- const uint64_t *p = api.getRawData();
- Out << "static const ConstantFP80Ty FPConstant" << FPCounter++
- << " = { 0x" << utohexstr(p[0])
- << "ULL, 0x" << utohexstr((uint16_t)p[1]) << ",{0,0,0}"
- << "}; /* Long double constant */\n";
- } else if (FPC->getType() == Type::getPPC_FP128Ty(FPC->getContext()) ||
- FPC->getType() == Type::getFP128Ty(FPC->getContext())) {
- APInt api = FPC->getValueAPF().bitcastToAPInt();
- const uint64_t *p = api.getRawData();
- Out << "static const ConstantFP128Ty FPConstant" << FPCounter++
- << " = { 0x"
- << utohexstr(p[0]) << ", 0x" << utohexstr(p[1])
- << "}; /* Long double constant */\n";
-
- } else {
- llvm_unreachable("Unknown float type!");
- }
-}
-
-
-/// printSymbolTable - Run through symbol table looking for type names. If a
-/// type name is found, emit its declaration...
-///
-void CWriter::printModuleTypes() {
- Out << "/* Helper union for bitcasts */\n";
- Out << "typedef union {\n";
- Out << " unsigned int Int32;\n";
- Out << " unsigned long long Int64;\n";
- Out << " float Float;\n";
- Out << " double Double;\n";
- Out << "} llvmBitCastUnion;\n";
-
- // Get all of the struct types used in the module.
- std::vector<StructType*> StructTypes;
- TheModule->findUsedStructTypes(StructTypes);
-
- if (StructTypes.empty()) return;
-
- Out << "/* Structure forward decls */\n";
-
- unsigned NextTypeID = 0;
-
- // If any of them are missing names, add a unique ID to UnnamedStructIDs.
- // Print out forward declarations for structure types.
- for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) {
- StructType *ST = StructTypes[i];
-
- if (ST->isLiteral() || ST->getName().empty())
- UnnamedStructIDs[ST] = NextTypeID++;
-
- std::string Name = getStructName(ST);
-
- Out << "typedef struct " << Name << ' ' << Name << ";\n";
- }
-
- Out << '\n';
-
- // Keep track of which structures have been printed so far.
- SmallPtrSet<Type *, 16> StructPrinted;
-
- // Loop over all structures then push them into the stack so they are
- // printed in the correct order.
- //
- Out << "/* Structure contents */\n";
- for (unsigned i = 0, e = StructTypes.size(); i != e; ++i)
- if (StructTypes[i]->isStructTy())
- // Only print out used types!
- printContainedStructs(StructTypes[i], StructPrinted);
-}
-
-// Push the struct onto the stack and recursively push all structs
-// this one depends on.
-//
-// TODO: Make this work properly with vector types
-//
-void CWriter::printContainedStructs(Type *Ty,
- SmallPtrSet<Type *, 16> &StructPrinted) {
- // Don't walk through pointers.
- if (Ty->isPointerTy() || Ty->isPrimitiveType() || Ty->isIntegerTy())
- return;
-
- // Print all contained types first.
- for (Type::subtype_iterator I = Ty->subtype_begin(),
- E = Ty->subtype_end(); I != E; ++I)
- printContainedStructs(*I, StructPrinted);
-
- if (StructType *ST = dyn_cast<StructType>(Ty)) {
- // Check to see if we have already printed this struct.
- if (!StructPrinted.insert(Ty)) return;
-
- // Print structure type out.
- printType(Out, ST, false, getStructName(ST), true);
- Out << ";\n\n";
- }
-}
-
-void CWriter::printFunctionSignature(const Function *F, bool Prototype) {
- /// isStructReturn - Should this function actually return a struct by-value?
- bool isStructReturn = F->hasStructRetAttr();
-
- if (F->hasLocalLinkage()) Out << "static ";
- if (F->hasDLLImportLinkage()) Out << "__declspec(dllimport) ";
- if (F->hasDLLExportLinkage()) Out << "__declspec(dllexport) ";
- switch (F->getCallingConv()) {
- case CallingConv::X86_StdCall:
- Out << "__attribute__((stdcall)) ";
- break;
- case CallingConv::X86_FastCall:
- Out << "__attribute__((fastcall)) ";
- break;
- case CallingConv::X86_ThisCall:
- Out << "__attribute__((thiscall)) ";
- break;
- default:
- break;
- }
-
- // Loop over the arguments, printing them...
- FunctionType *FT = cast<FunctionType>(F->getFunctionType());
- const AttrListPtr &PAL = F->getAttributes();
-
- std::string tstr;
- raw_string_ostream FunctionInnards(tstr);
-
- // Print out the name...
- FunctionInnards << GetValueName(F) << '(';
-
- bool PrintedArg = false;
- if (!F->isDeclaration()) {
- if (!F->arg_empty()) {
- Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- unsigned Idx = 1;
-
- // If this is a struct-return function, don't print the hidden
- // struct-return argument.
- if (isStructReturn) {
- assert(I != E && "Invalid struct return function!");
- ++I;
- ++Idx;
- }
-
- std::string ArgName;
- for (; I != E; ++I) {
- if (PrintedArg) FunctionInnards << ", ";
- if (I->hasName() || !Prototype)
- ArgName = GetValueName(I);
- else
- ArgName = "";
- Type *ArgTy = I->getType();
- if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
- ArgTy = cast<PointerType>(ArgTy)->getElementType();
- ByValParams.insert(I);
- }
- printType(FunctionInnards, ArgTy,
- /*isSigned=*/PAL.paramHasAttr(Idx, Attribute::SExt),
- ArgName);
- PrintedArg = true;
- ++Idx;
- }
- }
- } else {
- // Loop over the arguments, printing them.
- FunctionType::param_iterator I = FT->param_begin(), E = FT->param_end();
- unsigned Idx = 1;
-
- // If this is a struct-return function, don't print the hidden
- // struct-return argument.
- if (isStructReturn) {
- assert(I != E && "Invalid struct return function!");
- ++I;
- ++Idx;
- }
-
- for (; I != E; ++I) {
- if (PrintedArg) FunctionInnards << ", ";
- Type *ArgTy = *I;
- if (PAL.paramHasAttr(Idx, Attribute::ByVal)) {
- assert(ArgTy->isPointerTy());
- ArgTy = cast<PointerType>(ArgTy)->getElementType();
- }
- printType(FunctionInnards, ArgTy,
- /*isSigned=*/PAL.paramHasAttr(Idx, Attribute::SExt));
- PrintedArg = true;
- ++Idx;
- }
- }
-
- if (!PrintedArg && FT->isVarArg()) {
- FunctionInnards << "int vararg_dummy_arg";
- PrintedArg = true;
- }
-
- // Finish printing arguments... if this is a vararg function, print the ...,
- // unless there are no known types, in which case, we just emit ().
- //
- if (FT->isVarArg() && PrintedArg) {
- FunctionInnards << ",..."; // Output varargs portion of signature!
- } else if (!FT->isVarArg() && !PrintedArg) {
- FunctionInnards << "void"; // ret() -> ret(void) in C.
- }
- FunctionInnards << ')';
-
- // Get the return tpe for the function.
- Type *RetTy;
- if (!isStructReturn)
- RetTy = F->getReturnType();
- else {
- // If this is a struct-return function, print the struct-return type.
- RetTy = cast<PointerType>(FT->getParamType(0))->getElementType();
- }
-
- // Print out the return type and the signature built above.
- printType(Out, RetTy,
- /*isSigned=*/PAL.paramHasAttr(0, Attribute::SExt),
- FunctionInnards.str());
-}
-
-static inline bool isFPIntBitCast(const Instruction &I) {
- if (!isa<BitCastInst>(I))
- return false;
- Type *SrcTy = I.getOperand(0)->getType();
- Type *DstTy = I.getType();
- return (SrcTy->isFloatingPointTy() && DstTy->isIntegerTy()) ||
- (DstTy->isFloatingPointTy() && SrcTy->isIntegerTy());
-}
-
-void CWriter::printFunction(Function &F) {
- /// isStructReturn - Should this function actually return a struct by-value?
- bool isStructReturn = F.hasStructRetAttr();
-
- printFunctionSignature(&F, false);
- Out << " {\n";
-
- // If this is a struct return function, handle the result with magic.
- if (isStructReturn) {
- Type *StructTy =
- cast<PointerType>(F.arg_begin()->getType())->getElementType();
- Out << " ";
- printType(Out, StructTy, false, "StructReturn");
- Out << "; /* Struct return temporary */\n";
-
- Out << " ";
- printType(Out, F.arg_begin()->getType(), false,
- GetValueName(F.arg_begin()));
- Out << " = &StructReturn;\n";
- }
-
- bool PrintedVar = false;
-
- // print local variable information for the function
- for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
- if (const AllocaInst *AI = isDirectAlloca(&*I)) {
- Out << " ";
- printType(Out, AI->getAllocatedType(), false, GetValueName(AI));
- Out << "; /* Address-exposed local */\n";
- PrintedVar = true;
- } else if (I->getType() != Type::getVoidTy(F.getContext()) &&
- !isInlinableInst(*I)) {
- Out << " ";
- printType(Out, I->getType(), false, GetValueName(&*I));
- Out << ";\n";
-
- if (isa<PHINode>(*I)) { // Print out PHI node temporaries as well...
- Out << " ";
- printType(Out, I->getType(), false,
- GetValueName(&*I)+"__PHI_TEMPORARY");
- Out << ";\n";
- }
- PrintedVar = true;
- }
- // We need a temporary for the BitCast to use so it can pluck a value out
- // of a union to do the BitCast. This is separate from the need for a
- // variable to hold the result of the BitCast.
- if (isFPIntBitCast(*I)) {
- Out << " llvmBitCastUnion " << GetValueName(&*I)
- << "__BITCAST_TEMPORARY;\n";
- PrintedVar = true;
- }
- }
-
- if (PrintedVar)
- Out << '\n';
-
- if (F.hasExternalLinkage() && F.getName() == "main")
- Out << " CODE_FOR_MAIN();\n";
-
- // print the basic blocks
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- if (Loop *L = LI->getLoopFor(BB)) {
- if (L->getHeader() == BB && L->getParentLoop() == 0)
- printLoop(L);
- } else {
- printBasicBlock(BB);
- }
- }
-
- Out << "}\n\n";
-}
-
-void CWriter::printLoop(Loop *L) {
- Out << " do { /* Syntactic loop '" << L->getHeader()->getName()
- << "' to make GCC happy */\n";
- for (unsigned i = 0, e = L->getBlocks().size(); i != e; ++i) {
- BasicBlock *BB = L->getBlocks()[i];
- Loop *BBLoop = LI->getLoopFor(BB);
- if (BBLoop == L)
- printBasicBlock(BB);
- else if (BB == BBLoop->getHeader() && BBLoop->getParentLoop() == L)
- printLoop(BBLoop);
- }
- Out << " } while (1); /* end of syntactic loop '"
- << L->getHeader()->getName() << "' */\n";
-}
-
-void CWriter::printBasicBlock(BasicBlock *BB) {
-
- // Don't print the label for the basic block if there are no uses, or if
- // the only terminator use is the predecessor basic block's terminator.
- // We have to scan the use list because PHI nodes use basic blocks too but
- // do not require a label to be generated.
- //
- bool NeedsLabel = false;
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if (isGotoCodeNecessary(*PI, BB)) {
- NeedsLabel = true;
- break;
- }
-
- if (NeedsLabel) Out << GetValueName(BB) << ":\n";
-
- // Output all of the instructions in the basic block...
- for (BasicBlock::iterator II = BB->begin(), E = --BB->end(); II != E;
- ++II) {
- if (!isInlinableInst(*II) && !isDirectAlloca(II)) {
- if (II->getType() != Type::getVoidTy(BB->getContext()) &&
- !isInlineAsm(*II))
- outputLValue(II);
- else
- Out << " ";
- writeInstComputationInline(*II);
- Out << ";\n";
- }
- }
-
- // Don't emit prefix or suffix for the terminator.
- visit(*BB->getTerminator());
-}
-
-
-// Specific Instruction type classes... note that all of the casts are
-// necessary because we use the instruction classes as opaque types...
-//
-void CWriter::visitReturnInst(ReturnInst &I) {
- // If this is a struct return function, return the temporary struct.
- bool isStructReturn = I.getParent()->getParent()->hasStructRetAttr();
-
- if (isStructReturn) {
- Out << " return StructReturn;\n";
- return;
- }
-
- // Don't output a void return if this is the last basic block in the function
- if (I.getNumOperands() == 0 &&
- &*--I.getParent()->getParent()->end() == I.getParent() &&
- !I.getParent()->size() == 1) {
- return;
- }
-
- Out << " return";
- if (I.getNumOperands()) {
- Out << ' ';
- writeOperand(I.getOperand(0));
- }
- Out << ";\n";
-}
-
-void CWriter::visitSwitchInst(SwitchInst &SI) {
-
- Value* Cond = SI.getCondition();
-
- Out << " switch (";
- writeOperand(Cond);
- Out << ") {\n default:\n";
- printPHICopiesForSuccessor (SI.getParent(), SI.getDefaultDest(), 2);
- printBranchToBlock(SI.getParent(), SI.getDefaultDest(), 2);
- Out << ";\n";
-
- unsigned NumCases = SI.getNumCases();
- // Skip the first item since that's the default case.
- for (unsigned i = 1; i < NumCases; ++i) {
- ConstantInt* CaseVal = SI.getCaseValue(i);
- BasicBlock* Succ = SI.getSuccessor(i);
- Out << " case ";
- writeOperand(CaseVal);
- Out << ":\n";
- printPHICopiesForSuccessor (SI.getParent(), Succ, 2);
- printBranchToBlock(SI.getParent(), Succ, 2);
- if (Function::iterator(Succ) == llvm::next(Function::iterator(SI.getParent())))
- Out << " break;\n";
- }
-
- Out << " }\n";
-}
-
-void CWriter::visitIndirectBrInst(IndirectBrInst &IBI) {
- Out << " goto *(void*)(";
- writeOperand(IBI.getOperand(0));
- Out << ");\n";
-}
-
-void CWriter::visitUnreachableInst(UnreachableInst &I) {
- Out << " /*UNREACHABLE*/;\n";
-}
-
-bool CWriter::isGotoCodeNecessary(BasicBlock *From, BasicBlock *To) {
- /// FIXME: This should be reenabled, but loop reordering safe!!
- return true;
-
- if (llvm::next(Function::iterator(From)) != Function::iterator(To))
- return true; // Not the direct successor, we need a goto.
-
- //isa<SwitchInst>(From->getTerminator())
-
- if (LI->getLoopFor(From) != LI->getLoopFor(To))
- return true;
- return false;
-}
-
-void CWriter::printPHICopiesForSuccessor (BasicBlock *CurBlock,
- BasicBlock *Successor,
- unsigned Indent) {
- for (BasicBlock::iterator I = Successor->begin(); isa<PHINode>(I); ++I) {
- PHINode *PN = cast<PHINode>(I);
- // Now we have to do the printing.
- Value *IV = PN->getIncomingValueForBlock(CurBlock);
- if (!isa<UndefValue>(IV)) {
- Out << std::string(Indent, ' ');
- Out << " " << GetValueName(I) << "__PHI_TEMPORARY = ";
- writeOperand(IV);
- Out << "; /* for PHI node */\n";
- }
- }
-}
-
-void CWriter::printBranchToBlock(BasicBlock *CurBB, BasicBlock *Succ,
- unsigned Indent) {
- if (isGotoCodeNecessary(CurBB, Succ)) {
- Out << std::string(Indent, ' ') << " goto ";
- writeOperand(Succ);
- Out << ";\n";
- }
-}
-
-// Branch instruction printing - Avoid printing out a branch to a basic block
-// that immediately succeeds the current one.
-//
-void CWriter::visitBranchInst(BranchInst &I) {
-
- if (I.isConditional()) {
- if (isGotoCodeNecessary(I.getParent(), I.getSuccessor(0))) {
- Out << " if (";
- writeOperand(I.getCondition());
- Out << ") {\n";
-
- printPHICopiesForSuccessor (I.getParent(), I.getSuccessor(0), 2);
- printBranchToBlock(I.getParent(), I.getSuccessor(0), 2);
-
- if (isGotoCodeNecessary(I.getParent(), I.getSuccessor(1))) {
- Out << " } else {\n";
- printPHICopiesForSuccessor (I.getParent(), I.getSuccessor(1), 2);
- printBranchToBlock(I.getParent(), I.getSuccessor(1), 2);
- }
- } else {
- // First goto not necessary, assume second one is...
- Out << " if (!";
- writeOperand(I.getCondition());
- Out << ") {\n";
-
- printPHICopiesForSuccessor (I.getParent(), I.getSuccessor(1), 2);
- printBranchToBlock(I.getParent(), I.getSuccessor(1), 2);
- }
-
- Out << " }\n";
- } else {
- printPHICopiesForSuccessor (I.getParent(), I.getSuccessor(0), 0);
- printBranchToBlock(I.getParent(), I.getSuccessor(0), 0);
- }
- Out << "\n";
-}
-
-// PHI nodes get copied into temporary values at the end of predecessor basic
-// blocks. We now need to copy these temporary values into the REAL value for
-// the PHI.
-void CWriter::visitPHINode(PHINode &I) {
- writeOperand(&I);
- Out << "__PHI_TEMPORARY";
-}
-
-
-void CWriter::visitBinaryOperator(Instruction &I) {
- // binary instructions, shift instructions, setCond instructions.
- assert(!I.getType()->isPointerTy());
-
- // We must cast the results of binary operations which might be promoted.
- bool needsCast = false;
- if ((I.getType() == Type::getInt8Ty(I.getContext())) ||
- (I.getType() == Type::getInt16Ty(I.getContext()))
- || (I.getType() == Type::getFloatTy(I.getContext()))) {
- needsCast = true;
- Out << "((";
- printType(Out, I.getType(), false);
- Out << ")(";
- }
-
- // If this is a negation operation, print it out as such. For FP, we don't
- // want to print "-0.0 - X".
- if (BinaryOperator::isNeg(&I)) {
- Out << "-(";
- writeOperand(BinaryOperator::getNegArgument(cast<BinaryOperator>(&I)));
- Out << ")";
- } else if (BinaryOperator::isFNeg(&I)) {
- Out << "-(";
- writeOperand(BinaryOperator::getFNegArgument(cast<BinaryOperator>(&I)));
- Out << ")";
- } else if (I.getOpcode() == Instruction::FRem) {
- // Output a call to fmod/fmodf instead of emitting a%b
- if (I.getType() == Type::getFloatTy(I.getContext()))
- Out << "fmodf(";
- else if (I.getType() == Type::getDoubleTy(I.getContext()))
- Out << "fmod(";
- else // all 3 flavors of long double
- Out << "fmodl(";
- writeOperand(I.getOperand(0));
- Out << ", ";
- writeOperand(I.getOperand(1));
- Out << ")";
- } else {
-
- // Write out the cast of the instruction's value back to the proper type
- // if necessary.
- bool NeedsClosingParens = writeInstructionCast(I);
-
- // Certain instructions require the operand to be forced to a specific type
- // so we use writeOperandWithCast here instead of writeOperand. Similarly
- // below for operand 1
- writeOperandWithCast(I.getOperand(0), I.getOpcode());
-
- switch (I.getOpcode()) {
- case Instruction::Add:
- case Instruction::FAdd: Out << " + "; break;
- case Instruction::Sub:
- case Instruction::FSub: Out << " - "; break;
- case Instruction::Mul:
- case Instruction::FMul: Out << " * "; break;
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem: Out << " % "; break;
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::FDiv: Out << " / "; break;
- case Instruction::And: Out << " & "; break;
- case Instruction::Or: Out << " | "; break;
- case Instruction::Xor: Out << " ^ "; break;
- case Instruction::Shl : Out << " << "; break;
- case Instruction::LShr:
- case Instruction::AShr: Out << " >> "; break;
- default:
-#ifndef NDEBUG
- errs() << "Invalid operator type!" << I;
-#endif
- llvm_unreachable(0);
- }
-
- writeOperandWithCast(I.getOperand(1), I.getOpcode());
- if (NeedsClosingParens)
- Out << "))";
- }
-
- if (needsCast) {
- Out << "))";
- }
-}
-
-void CWriter::visitICmpInst(ICmpInst &I) {
- // We must cast the results of icmp which might be promoted.
- bool needsCast = false;
-
- // Write out the cast of the instruction's value back to the proper type
- // if necessary.
- bool NeedsClosingParens = writeInstructionCast(I);
-
- // Certain icmp predicate require the operand to be forced to a specific type
- // so we use writeOperandWithCast here instead of writeOperand. Similarly
- // below for operand 1
- writeOperandWithCast(I.getOperand(0), I);
-
- switch (I.getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << " == "; break;
- case ICmpInst::ICMP_NE: Out << " != "; break;
- case ICmpInst::ICMP_ULE:
- case ICmpInst::ICMP_SLE: Out << " <= "; break;
- case ICmpInst::ICMP_UGE:
- case ICmpInst::ICMP_SGE: Out << " >= "; break;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_SLT: Out << " < "; break;
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_SGT: Out << " > "; break;
- default:
-#ifndef NDEBUG
- errs() << "Invalid icmp predicate!" << I;
-#endif
- llvm_unreachable(0);
- }
-
- writeOperandWithCast(I.getOperand(1), I);
- if (NeedsClosingParens)
- Out << "))";
-
- if (needsCast) {
- Out << "))";
- }
-}
-
-void CWriter::visitFCmpInst(FCmpInst &I) {
- if (I.getPredicate() == FCmpInst::FCMP_FALSE) {
- Out << "0";
- return;
- }
- if (I.getPredicate() == FCmpInst::FCMP_TRUE) {
- Out << "1";
- return;
- }
-
- const char* op = 0;
- switch (I.getPredicate()) {
- default: llvm_unreachable("Illegal FCmp predicate");
- case FCmpInst::FCMP_ORD: op = "ord"; break;
- case FCmpInst::FCMP_UNO: op = "uno"; break;
- case FCmpInst::FCMP_UEQ: op = "ueq"; break;
- case FCmpInst::FCMP_UNE: op = "une"; break;
- case FCmpInst::FCMP_ULT: op = "ult"; break;
- case FCmpInst::FCMP_ULE: op = "ule"; break;
- case FCmpInst::FCMP_UGT: op = "ugt"; break;
- case FCmpInst::FCMP_UGE: op = "uge"; break;
- case FCmpInst::FCMP_OEQ: op = "oeq"; break;
- case FCmpInst::FCMP_ONE: op = "one"; break;
- case FCmpInst::FCMP_OLT: op = "olt"; break;
- case FCmpInst::FCMP_OLE: op = "ole"; break;
- case FCmpInst::FCMP_OGT: op = "ogt"; break;
- case FCmpInst::FCMP_OGE: op = "oge"; break;
- }
-
- Out << "llvm_fcmp_" << op << "(";
- // Write the first operand
- writeOperand(I.getOperand(0));
- Out << ", ";
- // Write the second operand
- writeOperand(I.getOperand(1));
- Out << ")";
-}
-
-static const char * getFloatBitCastField(Type *Ty) {
- switch (Ty->getTypeID()) {
- default: llvm_unreachable("Invalid Type");
- case Type::FloatTyID: return "Float";
- case Type::DoubleTyID: return "Double";
- case Type::IntegerTyID: {
- unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
- if (NumBits <= 32)
- return "Int32";
- else
- return "Int64";
- }
- }
-}
-
-void CWriter::visitCastInst(CastInst &I) {
- Type *DstTy = I.getType();
- Type *SrcTy = I.getOperand(0)->getType();
- if (isFPIntBitCast(I)) {
- Out << '(';
- // These int<->float and long<->double casts need to be handled specially
- Out << GetValueName(&I) << "__BITCAST_TEMPORARY."
- << getFloatBitCastField(I.getOperand(0)->getType()) << " = ";
- writeOperand(I.getOperand(0));
- Out << ", " << GetValueName(&I) << "__BITCAST_TEMPORARY."
- << getFloatBitCastField(I.getType());
- Out << ')';
- return;
- }
-
- Out << '(';
- printCast(I.getOpcode(), SrcTy, DstTy);
-
- // Make a sext from i1 work by subtracting the i1 from 0 (an int).
- if (SrcTy == Type::getInt1Ty(I.getContext()) &&
- I.getOpcode() == Instruction::SExt)
- Out << "0-";
-
- writeOperand(I.getOperand(0));
-
- if (DstTy == Type::getInt1Ty(I.getContext()) &&
- (I.getOpcode() == Instruction::Trunc ||
- I.getOpcode() == Instruction::FPToUI ||
- I.getOpcode() == Instruction::FPToSI ||
- I.getOpcode() == Instruction::PtrToInt)) {
- // Make sure we really get a trunc to bool by anding the operand with 1
- Out << "&1u";
- }
- Out << ')';
-}
-
-void CWriter::visitSelectInst(SelectInst &I) {
- Out << "((";
- writeOperand(I.getCondition());
- Out << ") ? (";
- writeOperand(I.getTrueValue());
- Out << ") : (";
- writeOperand(I.getFalseValue());
- Out << "))";
-}
-
-// Returns the macro name or value of the max or min of an integer type
-// (as defined in limits.h).
-static void printLimitValue(IntegerType &Ty, bool isSigned, bool isMax,
- raw_ostream &Out) {
- const char* type;
- const char* sprefix = "";
-
- unsigned NumBits = Ty.getBitWidth();
- if (NumBits <= 8) {
- type = "CHAR";
- sprefix = "S";
- } else if (NumBits <= 16) {
- type = "SHRT";
- } else if (NumBits <= 32) {
- type = "INT";
- } else if (NumBits <= 64) {
- type = "LLONG";
- } else {
- llvm_unreachable("Bit widths > 64 not implemented yet");
- }
-
- if (isSigned)
- Out << sprefix << type << (isMax ? "_MAX" : "_MIN");
- else
- Out << "U" << type << (isMax ? "_MAX" : "0");
-}
-
-#ifndef NDEBUG
-static bool isSupportedIntegerSize(IntegerType &T) {
- return T.getBitWidth() == 8 || T.getBitWidth() == 16 ||
- T.getBitWidth() == 32 || T.getBitWidth() == 64;
-}
-#endif
-
-void CWriter::printIntrinsicDefinition(const Function &F, raw_ostream &Out) {
- FunctionType *funT = F.getFunctionType();
- Type *retT = F.getReturnType();
- IntegerType *elemT = cast<IntegerType>(funT->getParamType(1));
-
- assert(isSupportedIntegerSize(*elemT) &&
- "CBackend does not support arbitrary size integers.");
- assert(cast<StructType>(retT)->getElementType(0) == elemT &&
- elemT == funT->getParamType(0) && funT->getNumParams() == 2);
-
- switch (F.getIntrinsicID()) {
- default:
- llvm_unreachable("Unsupported Intrinsic.");
- case Intrinsic::uadd_with_overflow:
- // static inline Rty uadd_ixx(unsigned ixx a, unsigned ixx b) {
- // Rty r;
- // r.field0 = a + b;
- // r.field1 = (r.field0 < a);
- // return r;
- // }
- Out << "static inline ";
- printType(Out, retT);
- Out << GetValueName(&F);
- Out << "(";
- printSimpleType(Out, elemT, false);
- Out << "a,";
- printSimpleType(Out, elemT, false);
- Out << "b) {\n ";
- printType(Out, retT);
- Out << "r;\n";
- Out << " r.field0 = a + b;\n";
- Out << " r.field1 = (r.field0 < a);\n";
- Out << " return r;\n}\n";
- break;
-
- case Intrinsic::sadd_with_overflow:
- // static inline Rty sadd_ixx(ixx a, ixx b) {
- // Rty r;
- // r.field1 = (b > 0 && a > XX_MAX - b) ||
- // (b < 0 && a < XX_MIN - b);
- // r.field0 = r.field1 ? 0 : a + b;
- // return r;
- // }
- Out << "static ";
- printType(Out, retT);
- Out << GetValueName(&F);
- Out << "(";
- printSimpleType(Out, elemT, true);
- Out << "a,";
- printSimpleType(Out, elemT, true);
- Out << "b) {\n ";
- printType(Out, retT);
- Out << "r;\n";
- Out << " r.field1 = (b > 0 && a > ";
- printLimitValue(*elemT, true, true, Out);
- Out << " - b) || (b < 0 && a < ";
- printLimitValue(*elemT, true, false, Out);
- Out << " - b);\n";
- Out << " r.field0 = r.field1 ? 0 : a + b;\n";
- Out << " return r;\n}\n";
- break;
- }
-}
-
-void CWriter::lowerIntrinsics(Function &F) {
- // This is used to keep track of intrinsics that get generated to a lowered
- // function. We must generate the prototypes before the function body which
- // will only be expanded on first use (by the loop below).
- std::vector<Function*> prototypesToGen;
-
- // Examine all the instructions in this function to find the intrinsics that
- // need to be lowered.
- for (Function::iterator BB = F.begin(), EE = F.end(); BB != EE; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; )
- if (CallInst *CI = dyn_cast<CallInst>(I++))
- if (Function *F = CI->getCalledFunction())
- switch (F->getIntrinsicID()) {
- case Intrinsic::not_intrinsic:
- case Intrinsic::vastart:
- case Intrinsic::vacopy:
- case Intrinsic::vaend:
- case Intrinsic::returnaddress:
- case Intrinsic::frameaddress:
- case Intrinsic::setjmp:
- case Intrinsic::longjmp:
- case Intrinsic::prefetch:
- case Intrinsic::powi:
- case Intrinsic::x86_sse_cmp_ss:
- case Intrinsic::x86_sse_cmp_ps:
- case Intrinsic::x86_sse2_cmp_sd:
- case Intrinsic::x86_sse2_cmp_pd:
- case Intrinsic::ppc_altivec_lvsl:
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- // We directly implement these intrinsics
- break;
- default:
- // If this is an intrinsic that directly corresponds to a GCC
- // builtin, we handle it.
- const char *BuiltinName = "";
-#define GET_GCC_BUILTIN_NAME
-#include "llvm/Intrinsics.gen"
-#undef GET_GCC_BUILTIN_NAME
- // If we handle it, don't lower it.
- if (BuiltinName[0]) break;
-
- // All other intrinsic calls we must lower.
- Instruction *Before = 0;
- if (CI != &BB->front())
- Before = prior(BasicBlock::iterator(CI));
-
- IL->LowerIntrinsicCall(CI);
- if (Before) { // Move iterator to instruction after call
- I = Before; ++I;
- } else {
- I = BB->begin();
- }
- // If the intrinsic got lowered to another call, and that call has
- // a definition then we need to make sure its prototype is emitted
- // before any calls to it.
- if (CallInst *Call = dyn_cast<CallInst>(I))
- if (Function *NewF = Call->getCalledFunction())
- if (!NewF->isDeclaration())
- prototypesToGen.push_back(NewF);
-
- break;
- }
-
- // We may have collected some prototypes to emit in the loop above.
- // Emit them now, before the function that uses them is emitted. But,
- // be careful not to emit them twice.
- std::vector<Function*>::iterator I = prototypesToGen.begin();
- std::vector<Function*>::iterator E = prototypesToGen.end();
- for ( ; I != E; ++I) {
- if (intrinsicPrototypesAlreadyGenerated.insert(*I).second) {
- Out << '\n';
- printFunctionSignature(*I, true);
- Out << ";\n";
- }
- }
-}
-
-void CWriter::visitCallInst(CallInst &I) {
- if (isa<InlineAsm>(I.getCalledValue()))
- return visitInlineAsm(I);
-
- bool WroteCallee = false;
-
- // Handle intrinsic function calls first...
- if (Function *F = I.getCalledFunction())
- if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
- if (visitBuiltinCall(I, ID, WroteCallee))
- return;
-
- Value *Callee = I.getCalledValue();
-
- PointerType *PTy = cast<PointerType>(Callee->getType());
- FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
-
- // If this is a call to a struct-return function, assign to the first
- // parameter instead of passing it to the call.
- const AttrListPtr &PAL = I.getAttributes();
- bool hasByVal = I.hasByValArgument();
- bool isStructRet = I.hasStructRetAttr();
- if (isStructRet) {
- writeOperandDeref(I.getArgOperand(0));
- Out << " = ";
- }
-
- if (I.isTailCall()) Out << " /*tail*/ ";
-
- if (!WroteCallee) {
- // If this is an indirect call to a struct return function, we need to cast
- // the pointer. Ditto for indirect calls with byval arguments.
- bool NeedsCast = (hasByVal || isStructRet) && !isa<Function>(Callee);
-
- // GCC is a real PITA. It does not permit codegening casts of functions to
- // function pointers if they are in a call (it generates a trap instruction
- // instead!). We work around this by inserting a cast to void* in between
- // the function and the function pointer cast. Unfortunately, we can't just
- // form the constant expression here, because the folder will immediately
- // nuke it.
- //
- // Note finally, that this is completely unsafe. ANSI C does not guarantee
- // that void* and function pointers have the same size. :( To deal with this
- // in the common case, we handle casts where the number of arguments passed
- // match exactly.
- //
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Callee))
- if (CE->isCast())
- if (Function *RF = dyn_cast<Function>(CE->getOperand(0))) {
- NeedsCast = true;
- Callee = RF;
- }
-
- if (NeedsCast) {
- // Ok, just cast the pointer type.
- Out << "((";
- if (isStructRet)
- printStructReturnPointerFunctionType(Out, PAL,
- cast<PointerType>(I.getCalledValue()->getType()));
- else if (hasByVal)
- printType(Out, I.getCalledValue()->getType(), false, "", true, PAL);
- else
- printType(Out, I.getCalledValue()->getType());
- Out << ")(void*)";
- }
- writeOperand(Callee);
- if (NeedsCast) Out << ')';
- }
-
- Out << '(';
-
- bool PrintedArg = false;
- if(FTy->isVarArg() && !FTy->getNumParams()) {
- Out << "0 /*dummy arg*/";
- PrintedArg = true;
- }
-
- unsigned NumDeclaredParams = FTy->getNumParams();
- CallSite CS(&I);
- CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
- unsigned ArgNo = 0;
- if (isStructRet) { // Skip struct return argument.
- ++AI;
- ++ArgNo;
- }
-
-
- for (; AI != AE; ++AI, ++ArgNo) {
- if (PrintedArg) Out << ", ";
- if (ArgNo < NumDeclaredParams &&
- (*AI)->getType() != FTy->getParamType(ArgNo)) {
- Out << '(';
- printType(Out, FTy->getParamType(ArgNo),
- /*isSigned=*/PAL.paramHasAttr(ArgNo+1, Attribute::SExt));
- Out << ')';
- }
- // Check if the argument is expected to be passed by value.
- if (I.paramHasAttr(ArgNo+1, Attribute::ByVal))
- writeOperandDeref(*AI);
- else
- writeOperand(*AI);
- PrintedArg = true;
- }
- Out << ')';
-}
-
-/// visitBuiltinCall - Handle the call to the specified builtin. Returns true
-/// if the entire call is handled, return false if it wasn't handled, and
-/// optionally set 'WroteCallee' if the callee has already been printed out.
-bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
- bool &WroteCallee) {
- switch (ID) {
- default: {
- // If this is an intrinsic that directly corresponds to a GCC
- // builtin, we emit it here.
- const char *BuiltinName = "";
- Function *F = I.getCalledFunction();
-#define GET_GCC_BUILTIN_NAME
-#include "llvm/Intrinsics.gen"
-#undef GET_GCC_BUILTIN_NAME
- assert(BuiltinName[0] && "Unknown LLVM intrinsic!");
-
- Out << BuiltinName;
- WroteCallee = true;
- return false;
- }
- case Intrinsic::vastart:
- Out << "0; ";
-
- Out << "va_start(*(va_list*)";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- // Output the last argument to the enclosing function.
- if (I.getParent()->getParent()->arg_empty())
- Out << "vararg_dummy_arg";
- else
- writeOperand(--I.getParent()->getParent()->arg_end());
- Out << ')';
- return true;
- case Intrinsic::vaend:
- if (!isa<ConstantPointerNull>(I.getArgOperand(0))) {
- Out << "0; va_end(*(va_list*)";
- writeOperand(I.getArgOperand(0));
- Out << ')';
- } else {
- Out << "va_end(*(va_list*)0)";
- }
- return true;
- case Intrinsic::vacopy:
- Out << "0; ";
- Out << "va_copy(*(va_list*)";
- writeOperand(I.getArgOperand(0));
- Out << ", *(va_list*)";
- writeOperand(I.getArgOperand(1));
- Out << ')';
- return true;
- case Intrinsic::returnaddress:
- Out << "__builtin_return_address(";
- writeOperand(I.getArgOperand(0));
- Out << ')';
- return true;
- case Intrinsic::frameaddress:
- Out << "__builtin_frame_address(";
- writeOperand(I.getArgOperand(0));
- Out << ')';
- return true;
- case Intrinsic::powi:
- Out << "__builtin_powi(";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- writeOperand(I.getArgOperand(1));
- Out << ')';
- return true;
- case Intrinsic::setjmp:
- Out << "setjmp(*(jmp_buf*)";
- writeOperand(I.getArgOperand(0));
- Out << ')';
- return true;
- case Intrinsic::longjmp:
- Out << "longjmp(*(jmp_buf*)";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- writeOperand(I.getArgOperand(1));
- Out << ')';
- return true;
- case Intrinsic::prefetch:
- Out << "LLVM_PREFETCH((const void *)";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- writeOperand(I.getArgOperand(1));
- Out << ", ";
- writeOperand(I.getArgOperand(2));
- Out << ")";
- return true;
- case Intrinsic::stacksave:
- // Emit this as: Val = 0; *((void**)&Val) = __builtin_stack_save()
- // to work around GCC bugs (see PR1809).
- Out << "0; *((void**)&" << GetValueName(&I)
- << ") = __builtin_stack_save()";
- return true;
- case Intrinsic::x86_sse_cmp_ss:
- case Intrinsic::x86_sse_cmp_ps:
- case Intrinsic::x86_sse2_cmp_sd:
- case Intrinsic::x86_sse2_cmp_pd:
- Out << '(';
- printType(Out, I.getType());
- Out << ')';
- // Multiple GCC builtins multiplex onto this intrinsic.
- switch (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue()) {
- default: llvm_unreachable("Invalid llvm.x86.sse.cmp!");
- case 0: Out << "__builtin_ia32_cmpeq"; break;
- case 1: Out << "__builtin_ia32_cmplt"; break;
- case 2: Out << "__builtin_ia32_cmple"; break;
- case 3: Out << "__builtin_ia32_cmpunord"; break;
- case 4: Out << "__builtin_ia32_cmpneq"; break;
- case 5: Out << "__builtin_ia32_cmpnlt"; break;
- case 6: Out << "__builtin_ia32_cmpnle"; break;
- case 7: Out << "__builtin_ia32_cmpord"; break;
- }
- if (ID == Intrinsic::x86_sse_cmp_ps || ID == Intrinsic::x86_sse2_cmp_pd)
- Out << 'p';
- else
- Out << 's';
- if (ID == Intrinsic::x86_sse_cmp_ss || ID == Intrinsic::x86_sse_cmp_ps)
- Out << 's';
- else
- Out << 'd';
-
- Out << "(";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- writeOperand(I.getArgOperand(1));
- Out << ")";
- return true;
- case Intrinsic::ppc_altivec_lvsl:
- Out << '(';
- printType(Out, I.getType());
- Out << ')';
- Out << "__builtin_altivec_lvsl(0, (void*)";
- writeOperand(I.getArgOperand(0));
- Out << ")";
- return true;
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- Out << GetValueName(I.getCalledFunction()) << "(";
- writeOperand(I.getArgOperand(0));
- Out << ", ";
- writeOperand(I.getArgOperand(1));
- Out << ")";
- return true;
- }
-}
-
-//This converts the llvm constraint string to something gcc is expecting.
-//TODO: work out platform independent constraints and factor those out
-// of the per target tables
-// handle multiple constraint codes
-std::string CWriter::InterpretASMConstraint(InlineAsm::ConstraintInfo& c) {
- assert(c.Codes.size() == 1 && "Too many asm constraint codes to handle");
-
- // Grab the translation table from MCAsmInfo if it exists.
- const MCAsmInfo *TargetAsm;
- std::string Triple = TheModule->getTargetTriple();
- if (Triple.empty())
- Triple = llvm::sys::getHostTriple();
-
- std::string E;
- if (const Target *Match = TargetRegistry::lookupTarget(Triple, E))
- TargetAsm = Match->createMCAsmInfo(Triple);
- else
- return c.Codes[0];
-
- const char *const *table = TargetAsm->getAsmCBE();
-
- // Search the translation table if it exists.
- for (int i = 0; table && table[i]; i += 2)
- if (c.Codes[0] == table[i]) {
- delete TargetAsm;
- return table[i+1];
- }
-
- // Default is identity.
- delete TargetAsm;
- return c.Codes[0];
-}
-
-//TODO: import logic from AsmPrinter.cpp
-static std::string gccifyAsm(std::string asmstr) {
- for (std::string::size_type i = 0; i != asmstr.size(); ++i)
- if (asmstr[i] == '\n')
- asmstr.replace(i, 1, "\\n");
- else if (asmstr[i] == '\t')
- asmstr.replace(i, 1, "\\t");
- else if (asmstr[i] == '$') {
- if (asmstr[i + 1] == '{') {
- std::string::size_type a = asmstr.find_first_of(':', i + 1);
- std::string::size_type b = asmstr.find_first_of('}', i + 1);
- std::string n = "%" +
- asmstr.substr(a + 1, b - a - 1) +
- asmstr.substr(i + 2, a - i - 2);
- asmstr.replace(i, b - i + 1, n);
- i += n.size() - 1;
- } else
- asmstr.replace(i, 1, "%");
- }
- else if (asmstr[i] == '%')//grr
- { asmstr.replace(i, 1, "%%"); ++i;}
-
- return asmstr;
-}
-
-//TODO: assumptions about what consume arguments from the call are likely wrong
-// handle communitivity
-void CWriter::visitInlineAsm(CallInst &CI) {
- InlineAsm* as = cast<InlineAsm>(CI.getCalledValue());
- InlineAsm::ConstraintInfoVector Constraints = as->ParseConstraints();
-
- std::vector<std::pair<Value*, int> > ResultVals;
- if (CI.getType() == Type::getVoidTy(CI.getContext()))
- ;
- else if (StructType *ST = dyn_cast<StructType>(CI.getType())) {
- for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i)
- ResultVals.push_back(std::make_pair(&CI, (int)i));
- } else {
- ResultVals.push_back(std::make_pair(&CI, -1));
- }
-
- // Fix up the asm string for gcc and emit it.
- Out << "__asm__ volatile (\"" << gccifyAsm(as->getAsmString()) << "\"\n";
- Out << " :";
-
- unsigned ValueCount = 0;
- bool IsFirst = true;
-
- // Convert over all the output constraints.
- for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
- E = Constraints.end(); I != E; ++I) {
-
- if (I->Type != InlineAsm::isOutput) {
- ++ValueCount;
- continue; // Ignore non-output constraints.
- }
-
- assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
- std::string C = InterpretASMConstraint(*I);
- if (C.empty()) continue;
-
- if (!IsFirst) {
- Out << ", ";
- IsFirst = false;
- }
-
- // Unpack the dest.
- Value *DestVal;
- int DestValNo = -1;
-
- if (ValueCount < ResultVals.size()) {
- DestVal = ResultVals[ValueCount].first;
- DestValNo = ResultVals[ValueCount].second;
- } else
- DestVal = CI.getArgOperand(ValueCount-ResultVals.size());
-
- if (I->isEarlyClobber)
- C = "&"+C;
-
- Out << "\"=" << C << "\"(" << GetValueName(DestVal);
- if (DestValNo != -1)
- Out << ".field" << DestValNo; // Multiple retvals.
- Out << ")";
- ++ValueCount;
- }
-
-
- // Convert over all the input constraints.
- Out << "\n :";
- IsFirst = true;
- ValueCount = 0;
- for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
- E = Constraints.end(); I != E; ++I) {
- if (I->Type != InlineAsm::isInput) {
- ++ValueCount;
- continue; // Ignore non-input constraints.
- }
-
- assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
- std::string C = InterpretASMConstraint(*I);
- if (C.empty()) continue;
-
- if (!IsFirst) {
- Out << ", ";
- IsFirst = false;
- }
-
- assert(ValueCount >= ResultVals.size() && "Input can't refer to result");
- Value *SrcVal = CI.getArgOperand(ValueCount-ResultVals.size());
-
- Out << "\"" << C << "\"(";
- if (!I->isIndirect)
- writeOperand(SrcVal);
- else
- writeOperandDeref(SrcVal);
- Out << ")";
- }
-
- // Convert over the clobber constraints.
- IsFirst = true;
- for (InlineAsm::ConstraintInfoVector::iterator I = Constraints.begin(),
- E = Constraints.end(); I != E; ++I) {
- if (I->Type != InlineAsm::isClobber)
- continue; // Ignore non-input constraints.
-
- assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
- std::string C = InterpretASMConstraint(*I);
- if (C.empty()) continue;
-
- if (!IsFirst) {
- Out << ", ";
- IsFirst = false;
- }
-
- Out << '\"' << C << '"';
- }
-
- Out << ")";
-}
-
-void CWriter::visitAllocaInst(AllocaInst &I) {
- Out << '(';
- printType(Out, I.getType());
- Out << ") alloca(sizeof(";
- printType(Out, I.getType()->getElementType());
- Out << ')';
- if (I.isArrayAllocation()) {
- Out << " * " ;
- writeOperand(I.getOperand(0));
- }
- Out << ')';
-}
-
-void CWriter::printGEPExpression(Value *Ptr, gep_type_iterator I,
- gep_type_iterator E, bool Static) {
-
- // If there are no indices, just print out the pointer.
- if (I == E) {
- writeOperand(Ptr);
- return;
- }
-
- // Find out if the last index is into a vector. If so, we have to print this
- // specially. Since vectors can't have elements of indexable type, only the
- // last index could possibly be of a vector element.
- VectorType *LastIndexIsVector = 0;
- {
- for (gep_type_iterator TmpI = I; TmpI != E; ++TmpI)
- LastIndexIsVector = dyn_cast<VectorType>(*TmpI);
- }
-
- Out << "(";
-
- // If the last index is into a vector, we can't print it as &a[i][j] because
- // we can't index into a vector with j in GCC. Instead, emit this as
- // (((float*)&a[i])+j)
- if (LastIndexIsVector) {
- Out << "((";
- printType(Out, PointerType::getUnqual(LastIndexIsVector->getElementType()));
- Out << ")(";
- }
-
- Out << '&';
-
- // If the first index is 0 (very typical) we can do a number of
- // simplifications to clean up the code.
- Value *FirstOp = I.getOperand();
- if (!isa<Constant>(FirstOp) || !cast<Constant>(FirstOp)->isNullValue()) {
- // First index isn't simple, print it the hard way.
- writeOperand(Ptr);
- } else {
- ++I; // Skip the zero index.
-
- // Okay, emit the first operand. If Ptr is something that is already address
- // exposed, like a global, avoid emitting (&foo)[0], just emit foo instead.
- if (isAddressExposed(Ptr)) {
- writeOperandInternal(Ptr, Static);
- } else if (I != E && (*I)->isStructTy()) {
- // If we didn't already emit the first operand, see if we can print it as
- // P->f instead of "P[0].f"
- writeOperand(Ptr);
- Out << "->field" << cast<ConstantInt>(I.getOperand())->getZExtValue();
- ++I; // eat the struct index as well.
- } else {
- // Instead of emitting P[0][1], emit (*P)[1], which is more idiomatic.
- Out << "(*";
- writeOperand(Ptr);
- Out << ")";
- }
- }
-
- for (; I != E; ++I) {
- if ((*I)->isStructTy()) {
- Out << ".field" << cast<ConstantInt>(I.getOperand())->getZExtValue();
- } else if ((*I)->isArrayTy()) {
- Out << ".array[";
- writeOperandWithCast(I.getOperand(), Instruction::GetElementPtr);
- Out << ']';
- } else if (!(*I)->isVectorTy()) {
- Out << '[';
- writeOperandWithCast(I.getOperand(), Instruction::GetElementPtr);
- Out << ']';
- } else {
- // If the last index is into a vector, then print it out as "+j)". This
- // works with the 'LastIndexIsVector' code above.
- if (isa<Constant>(I.getOperand()) &&
- cast<Constant>(I.getOperand())->isNullValue()) {
- Out << "))"; // avoid "+0".
- } else {
- Out << ")+(";
- writeOperandWithCast(I.getOperand(), Instruction::GetElementPtr);
- Out << "))";
- }
- }
- }
- Out << ")";
-}
-
-void CWriter::writeMemoryAccess(Value *Operand, Type *OperandType,
- bool IsVolatile, unsigned Alignment) {
-
- bool IsUnaligned = Alignment &&
- Alignment < TD->getABITypeAlignment(OperandType);
-
- if (!IsUnaligned)
- Out << '*';
- if (IsVolatile || IsUnaligned) {
- Out << "((";
- if (IsUnaligned)
- Out << "struct __attribute__ ((packed, aligned(" << Alignment << "))) {";
- printType(Out, OperandType, false, IsUnaligned ? "data" : "volatile*");
- if (IsUnaligned) {
- Out << "; } ";
- if (IsVolatile) Out << "volatile ";
- Out << "*";
- }
- Out << ")";
- }
-
- writeOperand(Operand);
-
- if (IsVolatile || IsUnaligned) {
- Out << ')';
- if (IsUnaligned)
- Out << "->data";
- }
-}
-
-void CWriter::visitLoadInst(LoadInst &I) {
- writeMemoryAccess(I.getOperand(0), I.getType(), I.isVolatile(),
- I.getAlignment());
-
-}
-
-void CWriter::visitStoreInst(StoreInst &I) {
- writeMemoryAccess(I.getPointerOperand(), I.getOperand(0)->getType(),
- I.isVolatile(), I.getAlignment());
- Out << " = ";
- Value *Operand = I.getOperand(0);
- Constant *BitMask = 0;
- if (IntegerType* ITy = dyn_cast<IntegerType>(Operand->getType()))
- if (!ITy->isPowerOf2ByteWidth())
- // We have a bit width that doesn't match an even power-of-2 byte
- // size. Consequently we must & the value with the type's bit mask
- BitMask = ConstantInt::get(ITy, ITy->getBitMask());
- if (BitMask)
- Out << "((";
- writeOperand(Operand);
- if (BitMask) {
- Out << ") & ";
- printConstant(BitMask, false);
- Out << ")";
- }
-}
-
-void CWriter::visitGetElementPtrInst(GetElementPtrInst &I) {
- printGEPExpression(I.getPointerOperand(), gep_type_begin(I),
- gep_type_end(I), false);
-}
-
-void CWriter::visitVAArgInst(VAArgInst &I) {
- Out << "va_arg(*(va_list*)";
- writeOperand(I.getOperand(0));
- Out << ", ";
- printType(Out, I.getType());
- Out << ");\n ";
-}
-
-void CWriter::visitInsertElementInst(InsertElementInst &I) {
- Type *EltTy = I.getType()->getElementType();
- writeOperand(I.getOperand(0));
- Out << ";\n ";
- Out << "((";
- printType(Out, PointerType::getUnqual(EltTy));
- Out << ")(&" << GetValueName(&I) << "))[";
- writeOperand(I.getOperand(2));
- Out << "] = (";
- writeOperand(I.getOperand(1));
- Out << ")";
-}
-
-void CWriter::visitExtractElementInst(ExtractElementInst &I) {
- // We know that our operand is not inlined.
- Out << "((";
- Type *EltTy =
- cast<VectorType>(I.getOperand(0)->getType())->getElementType();
- printType(Out, PointerType::getUnqual(EltTy));
- Out << ")(&" << GetValueName(I.getOperand(0)) << "))[";
- writeOperand(I.getOperand(1));
- Out << "]";
-}
-
-void CWriter::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
- Out << "(";
- printType(Out, SVI.getType());
- Out << "){ ";
- VectorType *VT = SVI.getType();
- unsigned NumElts = VT->getNumElements();
- Type *EltTy = VT->getElementType();
-
- for (unsigned i = 0; i != NumElts; ++i) {
- if (i) Out << ", ";
- int SrcVal = SVI.getMaskValue(i);
- if ((unsigned)SrcVal >= NumElts*2) {
- Out << " 0/*undef*/ ";
- } else {
- Value *Op = SVI.getOperand((unsigned)SrcVal >= NumElts);
- if (isa<Instruction>(Op)) {
- // Do an extractelement of this value from the appropriate input.
- Out << "((";
- printType(Out, PointerType::getUnqual(EltTy));
- Out << ")(&" << GetValueName(Op)
- << "))[" << (SrcVal & (NumElts-1)) << "]";
- } else if (isa<ConstantAggregateZero>(Op) || isa<UndefValue>(Op)) {
- Out << "0";
- } else {
- printConstant(cast<ConstantVector>(Op)->getOperand(SrcVal &
- (NumElts-1)),
- false);
- }
- }
- }
- Out << "}";
-}
-
-void CWriter::visitInsertValueInst(InsertValueInst &IVI) {
- // Start by copying the entire aggregate value into the result variable.
- writeOperand(IVI.getOperand(0));
- Out << ";\n ";
-
- // Then do the insert to update the field.
- Out << GetValueName(&IVI);
- for (const unsigned *b = IVI.idx_begin(), *i = b, *e = IVI.idx_end();
- i != e; ++i) {
- Type *IndexedTy =
- ExtractValueInst::getIndexedType(IVI.getOperand(0)->getType(),
- makeArrayRef(b, i+1));
- if (IndexedTy->isArrayTy())
- Out << ".array[" << *i << "]";
- else
- Out << ".field" << *i;
- }
- Out << " = ";
- writeOperand(IVI.getOperand(1));
-}
-
-void CWriter::visitExtractValueInst(ExtractValueInst &EVI) {
- Out << "(";
- if (isa<UndefValue>(EVI.getOperand(0))) {
- Out << "(";
- printType(Out, EVI.getType());
- Out << ") 0/*UNDEF*/";
- } else {
- Out << GetValueName(EVI.getOperand(0));
- for (const unsigned *b = EVI.idx_begin(), *i = b, *e = EVI.idx_end();
- i != e; ++i) {
- Type *IndexedTy =
- ExtractValueInst::getIndexedType(EVI.getOperand(0)->getType(),
- makeArrayRef(b, i+1));
- if (IndexedTy->isArrayTy())
- Out << ".array[" << *i << "]";
- else
- Out << ".field" << *i;
- }
- }
- Out << ")";
-}
-
-//===----------------------------------------------------------------------===//
-// External Interface declaration
-//===----------------------------------------------------------------------===//
-
-bool CTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
- formatted_raw_ostream &o,
- CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
- bool DisableVerify) {
- if (FileType != TargetMachine::CGFT_AssemblyFile) return true;
-
- PM.add(createGCLoweringPass());
- PM.add(createLowerInvokePass());
- PM.add(createCFGSimplificationPass()); // clean up after lower invoke.
- PM.add(new CWriter(o));
- PM.add(createGCInfoDeleter());
- return false;
-}
diff --git a/contrib/llvm/lib/Target/CBackend/CTargetMachine.h b/contrib/llvm/lib/Target/CBackend/CTargetMachine.h
deleted file mode 100644
index 4f1ca97..0000000
--- a/contrib/llvm/lib/Target/CBackend/CTargetMachine.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//===-- CTargetMachine.h - TargetMachine for the C backend ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the TargetMachine that is used by the C backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CTARGETMACHINE_H
-#define CTARGETMACHINE_H
-
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-
-namespace llvm {
-
-struct CTargetMachine : public TargetMachine {
- CTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : TargetMachine(T, TT, CPU, FS) {}
-
- virtual bool addPassesToEmitFile(PassManagerBase &PM,
- formatted_raw_ostream &Out,
- CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
- bool DisableVerify);
-
- virtual const TargetData *getTargetData() const { return 0; }
-};
-
-extern Target TheCBackendTarget;
-
-} // End llvm namespace
-
-
-#endif
diff --git a/contrib/llvm/lib/Target/CBackend/TargetInfo/CBackendTargetInfo.cpp b/contrib/llvm/lib/Target/CBackend/TargetInfo/CBackendTargetInfo.cpp
deleted file mode 100644
index e8274ff..0000000
--- a/contrib/llvm/lib/Target/CBackend/TargetInfo/CBackendTargetInfo.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-//===-- CBackendTargetInfo.cpp - CBackend Target Implementation -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CTargetMachine.h"
-#include "llvm/Module.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::TheCBackendTarget;
-
-extern "C" void LLVMInitializeCBackendTargetInfo() {
- RegisterTarget<> X(TheCBackendTarget, "c", "C backend");
-}
-
-extern "C" void LLVMInitializeCBackendTargetMC() {}
diff --git a/contrib/llvm/lib/Target/CellSPU/CellSDKIntrinsics.td b/contrib/llvm/lib/Target/CellSPU/CellSDKIntrinsics.td
index 9468aee..cdb4099 100644
--- a/contrib/llvm/lib/Target/CellSPU/CellSDKIntrinsics.td
+++ b/contrib/llvm/lib/Target/CellSPU/CellSDKIntrinsics.td
@@ -1,5 +1,5 @@
//===-- CellSDKIntrinsics.td - Cell SDK Intrinsics ---------*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
diff --git a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.cpp b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.cpp
index 8c1176a..4bad37e 100644
--- a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.cpp
@@ -14,6 +14,8 @@
#include "SPUMCAsmInfo.h"
using namespace llvm;
+void SPULinuxMCAsmInfo::anchor() { }
+
SPULinuxMCAsmInfo::SPULinuxMCAsmInfo(const Target &T, StringRef TT) {
IsLittleEndian = false;
diff --git a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.h b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.h
index 7f850d3..f786147 100644
--- a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCAsmInfo.h
@@ -20,7 +20,9 @@
namespace llvm {
class Target;
- struct SPULinuxMCAsmInfo : public MCAsmInfo {
+ class SPULinuxMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit SPULinuxMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.cpp b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.cpp
index d5af2a8..8450e2c 100644
--- a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- SPUMCTargetDesc.cpp - Cell SPU Target Descriptions -----*- C++ -*-===//
+//===-- SPUMCTargetDesc.cpp - Cell SPU Target Descriptions ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,6 +18,7 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -62,11 +63,12 @@ static MCAsmInfo *createSPUMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createSPUMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
// For the time being, use static relocations, since there's really no
// support for PIC yet.
- X->InitMCCodeGenInfo(Reloc::Static, CM);
+ X->InitMCCodeGenInfo(Reloc::Static, CM, OL);
return X;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.h b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.h
index a3717b0..d26449e 100644
--- a/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/CellSPU/MCTargetDesc/SPUMCTargetDesc.h
@@ -15,9 +15,7 @@
#define SPUMCTARGETDESC_H
namespace llvm {
-class MCSubtargetInfo;
class Target;
-class StringRef;
extern Target TheCellSPUTarget;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU.h b/contrib/llvm/lib/Target/CellSPU/SPU.h
index b51fbc7..c660131 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPU.h
@@ -1,4 +1,4 @@
-//===-- SPU.h - Top-level interface for Cell SPU Target ----------*- C++ -*-==//
+//===-- SPU.h - Top-level interface for Cell SPU Target ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU.td b/contrib/llvm/lib/Target/CellSPU/SPU.td
index 8327fe0..e835b9c 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPU.td
@@ -1,5 +1,5 @@
-//===- SPU.td - Describe the STI Cell SPU Target Machine ----*- tablegen -*-===//
-//
+//===-- SPU.td - Describe the STI Cell SPU Target Machine --*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU128InstrInfo.td b/contrib/llvm/lib/Target/CellSPU/SPU128InstrInfo.td
index 3031fda..e051e04 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU128InstrInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPU128InstrInfo.td
@@ -1,9 +1,9 @@
-//===--- SPU128InstrInfo.td - Cell SPU 128-bit operations -*- tablegen -*--===//
+//===-- SPU128InstrInfo.td - Cell SPU 128-bit operations --*- tablegen -*--===//
//
// Cell SPU 128-bit operations
//
//===----------------------------------------------------------------------===//
-
+
// zext 32->128: Zero extend 32-bit to 128-bit
def : Pat<(i128 (zext R32C:$rSrc)),
(ROTQMBYIr128_zext_r32 R32C:$rSrc, 12)>;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td b/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
index f340edf..bea33b5 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPU64InstrInfo.td
@@ -1,4 +1,4 @@
-//====--- SPU64InstrInfo.td - Cell SPU 64-bit operations -*- tablegen -*--====//
+//====-- SPU64InstrInfo.td - Cell SPU 64-bit operations ---*- tablegen -*--===//
//
// Cell SPU 64-bit operations
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 90b5270..14021fe 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- SPUAsmPrinter.cpp - Print machine instrs to Cell SPU assembly -------=//
+//===-- SPUAsmPrinter.cpp - Print machine instrs to Cell SPU assembly -----===//
//
// The LLVM Compiler Infrastructure
//
@@ -248,7 +248,6 @@ void SPUAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
switch (MO.getType()) {
case MachineOperand::MO_Immediate:
report_fatal_error("printOp() does not handle immediate values");
- return;
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td b/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
index 04fa2ae..9f9692b 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
@@ -1,10 +1,10 @@
//===- SPUCallingConv.td - Calling Conventions for CellSPU -*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This describes the calling conventions for the STI Cell SPU architecture.
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
index 093f99f..fac806e1 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "SPU.h"
#include "SPUFrameLowering.h"
+#include "SPU.h"
#include "SPUInstrBuilder.h"
#include "SPUInstrInfo.h"
#include "llvm/Function.h"
@@ -47,7 +47,8 @@ bool SPUFrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
return MFI->getStackSize() &&
- (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects());
+ (MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects());
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h
index b837f2c..11c5281 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameLowering.h
@@ -1,4 +1,4 @@
-//=====-- SPUFrameLowering.h - SPU Frame Lowering stuff -*- C++ -*----========//
+//===-- SPUFrameLowering.h - SPU Frame Lowering stuff ----------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index a297d03..c27caea 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -22,7 +22,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Constants.h"
@@ -91,8 +90,6 @@ namespace {
short s_val = (short) i_val;
return i_val == s_val;
}
-
- return false;
}
//! ConstantFPSDNode predicate for representing floats as 16-bit sign ext.
@@ -216,7 +213,7 @@ namespace {
HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl,
CurDAG->getEntryNode(), CGPoolOffset,
MachinePointerInfo::getConstantPool(),
- false, false, Alignment));
+ false, false, false, Alignment));
CurDAG->ReplaceAllUsesWith(SDValue(bvNode, 0), Dummy.getValue());
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N;
@@ -287,8 +284,8 @@ namespace {
llvm_unreachable("InlineAsmMemoryOperand 'v' constraint not handled.");
#else
SelectAddrIdxOnly(Op, Op, Op0, Op1);
-#endif
break;
+#endif
}
OutOps.push_back(Op0);
@@ -327,7 +324,7 @@ SPUDAGToDAGISel::SelectAFormAddr(SDNode *Op, SDValue N, SDValue &Base,
val = dyn_cast<ConstantSDNode>(N.getNode())->getSExtValue();
Base = CurDAG->getTargetConstant( val , MVT::i32);
Index = Zero;
- return true; break;
+ return true;
case ISD::ConstantPool:
case ISD::GlobalAddress:
report_fatal_error("SPU SelectAFormAddr: Pool/Global not lowered.");
@@ -579,22 +576,16 @@ SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
switch( VT.SimpleTy ) {
case MVT::i8:
return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
- break;
case MVT::i16:
return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
- break;
case MVT::i32:
return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
- break;
case MVT::f32:
return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
- break;
case MVT::i64:
return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
- break;
case MVT::i128:
return CurDAG->getTargetConstant(SPU::GPRCRegClass.getID(), MVT::i32);
- break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
@@ -602,11 +593,10 @@ SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
case MVT::v2i64:
case MVT::v2f64:
return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
- break;
default:
assert( false && "add a new case here" );
+ return SDValue();
}
- return SDValue();
}
//! Convert the operand from a target-independent to a target-specific node
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
index ac33111..0623741 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -27,19 +27,14 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <map>
using namespace llvm;
-// Used in getTargetNodeName() below
namespace {
- std::map<unsigned, const char *> node_names;
-
// Byte offset of the preferred slot (counted from the MSB)
int prefslotOffset(EVT VT) {
int retval=0;
@@ -84,8 +79,9 @@ namespace {
Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, TLI.getLibcallCallingConv(LC), false,
- /*isReturnValueUsed=*/true,
+ 0, TLI.getLibcallCallingConv(LC),
+ /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
Callee, Args, DAG, Op.getDebugLoc());
return CallInfo.first;
@@ -296,12 +292,22 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
setOperationAction(ISD::CTTZ , MVT::i64, Expand);
setOperationAction(ISD::CTTZ , MVT::i128, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i128, Expand);
setOperationAction(ISD::CTLZ , MVT::i8, Promote);
setOperationAction(ISD::CTLZ , MVT::i16, Promote);
setOperationAction(ISD::CTLZ , MVT::i32, Legal);
setOperationAction(ISD::CTLZ , MVT::i64, Expand);
setOperationAction(ISD::CTLZ , MVT::i128, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i128, Expand);
// SPU has a version of select that implements (a&~c)|(b&c), just like
// select ought to work:
@@ -424,6 +430,13 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
+ // Expand all trunc stores
+ for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) {
+ MVT::SimpleValueType TargetVT = (MVT::SimpleValueType)j;
+ setTruncStoreAction(VT, TargetVT, Expand);
+ }
+
// Custom lower build_vector, constant pool spills, insert and
// extract vector elements:
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
@@ -434,6 +447,8 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
}
+ setOperationAction(ISD::SHL, MVT::v2i64, Expand);
+
setOperationAction(ISD::AND, MVT::v16i8, Custom);
setOperationAction(ISD::OR, MVT::v16i8, Custom);
setOperationAction(ISD::XOR, MVT::v16i8, Custom);
@@ -462,40 +477,34 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setSchedulingPreference(Sched::RegPressure);
}
-const char *
-SPUTargetLowering::getTargetNodeName(unsigned Opcode) const
-{
- if (node_names.empty()) {
- node_names[(unsigned) SPUISD::RET_FLAG] = "SPUISD::RET_FLAG";
- node_names[(unsigned) SPUISD::Hi] = "SPUISD::Hi";
- node_names[(unsigned) SPUISD::Lo] = "SPUISD::Lo";
- node_names[(unsigned) SPUISD::PCRelAddr] = "SPUISD::PCRelAddr";
- node_names[(unsigned) SPUISD::AFormAddr] = "SPUISD::AFormAddr";
- node_names[(unsigned) SPUISD::IndirectAddr] = "SPUISD::IndirectAddr";
- node_names[(unsigned) SPUISD::LDRESULT] = "SPUISD::LDRESULT";
- node_names[(unsigned) SPUISD::CALL] = "SPUISD::CALL";
- node_names[(unsigned) SPUISD::SHUFB] = "SPUISD::SHUFB";
- node_names[(unsigned) SPUISD::SHUFFLE_MASK] = "SPUISD::SHUFFLE_MASK";
- node_names[(unsigned) SPUISD::CNTB] = "SPUISD::CNTB";
- node_names[(unsigned) SPUISD::PREFSLOT2VEC] = "SPUISD::PREFSLOT2VEC";
- node_names[(unsigned) SPUISD::VEC2PREFSLOT] = "SPUISD::VEC2PREFSLOT";
- node_names[(unsigned) SPUISD::SHL_BITS] = "SPUISD::SHL_BITS";
- node_names[(unsigned) SPUISD::SHL_BYTES] = "SPUISD::SHL_BYTES";
- node_names[(unsigned) SPUISD::VEC_ROTL] = "SPUISD::VEC_ROTL";
- node_names[(unsigned) SPUISD::VEC_ROTR] = "SPUISD::VEC_ROTR";
- node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
- node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
- "SPUISD::ROTBYTES_LEFT_BITS";
- node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
- node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
- node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
- node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
- node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
- }
-
- std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
-
- return ((i != node_names.end()) ? i->second : 0);
+const char *SPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default: return 0;
+ case SPUISD::RET_FLAG: return "SPUISD::RET_FLAG";
+ case SPUISD::Hi: return "SPUISD::Hi";
+ case SPUISD::Lo: return "SPUISD::Lo";
+ case SPUISD::PCRelAddr: return "SPUISD::PCRelAddr";
+ case SPUISD::AFormAddr: return "SPUISD::AFormAddr";
+ case SPUISD::IndirectAddr: return "SPUISD::IndirectAddr";
+ case SPUISD::LDRESULT: return "SPUISD::LDRESULT";
+ case SPUISD::CALL: return "SPUISD::CALL";
+ case SPUISD::SHUFB: return "SPUISD::SHUFB";
+ case SPUISD::SHUFFLE_MASK: return "SPUISD::SHUFFLE_MASK";
+ case SPUISD::CNTB: return "SPUISD::CNTB";
+ case SPUISD::PREFSLOT2VEC: return "SPUISD::PREFSLOT2VEC";
+ case SPUISD::VEC2PREFSLOT: return "SPUISD::VEC2PREFSLOT";
+ case SPUISD::SHL_BITS: return "SPUISD::SHL_BITS";
+ case SPUISD::SHL_BYTES: return "SPUISD::SHL_BYTES";
+ case SPUISD::VEC_ROTL: return "SPUISD::VEC_ROTL";
+ case SPUISD::VEC_ROTR: return "SPUISD::VEC_ROTR";
+ case SPUISD::ROTBYTES_LEFT: return "SPUISD::ROTBYTES_LEFT";
+ case SPUISD::ROTBYTES_LEFT_BITS: return "SPUISD::ROTBYTES_LEFT_BITS";
+ case SPUISD::SELECT_MASK: return "SPUISD::SELECT_MASK";
+ case SPUISD::SELB: return "SPUISD::SELB";
+ case SPUISD::ADD64_MARKER: return "SPUISD::ADD64_MARKER";
+ case SPUISD::SUB64_MARKER: return "SPUISD::SUB64_MARKER";
+ case SPUISD::MUL64_MARKER: return "SPUISD::MUL64_MARKER";
+ }
}
//===----------------------------------------------------------------------===//
@@ -658,7 +667,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// Do the load as a i128 to allow possible shifting
SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
lowMemPtr,
- LN->isVolatile(), LN->isNonTemporal(), 16);
+ LN->isVolatile(), LN->isNonTemporal(), false, 16);
// When the size is not greater than alignment we get all data with just
// one load
@@ -695,7 +704,8 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
basePtr,
DAG.getConstant(16, PtrVT)),
highMemPtr,
- LN->isVolatile(), LN->isNonTemporal(), 16);
+ LN->isVolatile(), LN->isNonTemporal(), false,
+ 16);
the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
high.getValue(1));
@@ -850,7 +860,8 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// Load the lower part of the memory to which to store.
SDValue low = DAG.getLoad(vecVT, dl, the_chain, basePtr,
- lowMemPtr, SN->isVolatile(), SN->isNonTemporal(), 16);
+ lowMemPtr, SN->isVolatile(), SN->isNonTemporal(),
+ false, 16);
// if we don't need to store over the 16 byte boundary, one store suffices
if (alignment >= StVT.getSizeInBits()/8) {
@@ -950,7 +961,8 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
DAG.getConstant( 16, PtrVT)),
highMemPtr,
- SN->isVolatile(), SN->isNonTemporal(), 16);
+ SN->isVolatile(), SN->isNonTemporal(),
+ false, 16);
the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
hi.getValue(1));
@@ -1017,7 +1029,6 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
llvm_unreachable("LowerConstantPool: Relocation model other than static"
" not supported.");
- return SDValue();
}
//! Alternate entry point for generating the address of a constant pool entry
@@ -1048,7 +1059,6 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
llvm_unreachable("LowerJumpTable: Relocation model other than static"
" not supported.");
- return SDValue();
}
static SDValue
@@ -1076,8 +1086,6 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
"not supported.");
/*NOTREACHED*/
}
-
- return SDValue();
}
//! Custom lower double precision floating point constants
@@ -1185,7 +1193,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
ArgOffset += StackSlotSize;
}
@@ -1198,7 +1206,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
if (isVarArg) {
// FIXME: we should be able to query the argument registers from
// tablegen generated code.
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
@@ -1212,7 +1220,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
};
// size of ArgRegs array
- unsigned NumArgRegs = 77;
+ const unsigned NumArgRegs = 77;
// We will spill (79-3)+1 registers to the stack
SmallVector<SDValue, 79-3+1> MemOps;
@@ -1257,7 +1265,7 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
SDValue
SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -1675,7 +1683,6 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(Value32, MVT::i32);
return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
- break;
}
case MVT::v2f64: {
uint64_t f64val = uint64_t(SplatBits);
@@ -1685,7 +1692,6 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(f64val, MVT::i64);
return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
- break;
}
case MVT::v16i8: {
// 8-bit constants have to be expanded to 16-bits
@@ -1712,8 +1718,6 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
}
}
-
- return SDValue();
}
/*!
@@ -1743,9 +1747,11 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Both upper and lower are special, lower to a constant pool load:
if (lower_special && upper_special) {
- SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
- return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
- SplatValCN, SplatValCN);
+ SDValue UpperVal = DAG.getConstant(upper, MVT::i32);
+ SDValue LowerVal = DAG.getConstant(lower, MVT::i32);
+ SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+ UpperVal, LowerVal, UpperVal, LowerVal);
+ return DAG.getNode(ISD::BITCAST, dl, OpVT, BV);
}
SDValue LO32;
@@ -1985,8 +1991,6 @@ static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
}
}
-
- return SDValue();
}
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
@@ -2020,8 +2024,7 @@ static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
int elt_byte = EltNo * VT.getSizeInBits() / 8;
switch (VT.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Invalid value type!");
+ default: llvm_unreachable("Invalid value type!");
case MVT::i8: {
prefslot_begin = prefslot_end = 3;
break;
@@ -2199,8 +2202,6 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
switch (Opc) {
default:
llvm_unreachable("Unhandled i8 math operator");
- /*NOTREACHED*/
- break;
case ISD::ADD: {
// 8-bit addition: Promote the arguments up to 16-bits and truncate
// the result:
@@ -2285,11 +2286,8 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
DAG.getNode(Opc, dl, MVT::i16, N0, N1));
- break;
}
}
-
- return SDValue();
}
//! Lower byte immediate operations for v16i8 vectors:
@@ -2354,8 +2352,7 @@ static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
switch (VT.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Invalid value type!");
+ default: llvm_unreachable("Invalid value type!");
case MVT::i8: {
SDValue N = Op.getOperand(0);
SDValue Elt0 = DAG.getConstant(0, MVT::i32);
@@ -3161,7 +3158,6 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
//! Compute used/known bits for a SPU operand
void
SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -3227,7 +3223,7 @@ bool SPUTargetLowering::isLegalAddressImmediate(int64_t V,
return (V > -(1 << 18) && V < (1 << 18) - 1);
}
-bool SPUTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
+bool SPUTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const {
return false;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
index aa4a168..e3db7b2 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
@@ -15,9 +15,9 @@
#ifndef SPU_ISELLOWERING_H
#define SPU_ISELLOWERING_H
+#include "SPU.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "SPU.h"
namespace llvm {
namespace SPUISD {
@@ -121,7 +121,6 @@ namespace llvm {
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -162,7 +161,7 @@ namespace llvm {
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrBuilder.h b/contrib/llvm/lib/Target/CellSPU/SPUInstrBuilder.h
index 5e268f8..b495537 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrBuilder.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrBuilder.h
@@ -1,4 +1,4 @@
-//==-- SPUInstrBuilder.h - Aides for building Cell SPU insts -----*- C++ -*-==//
+//===-- SPUInstrBuilder.h - Aides for building Cell SPU insts ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrFormats.td b/contrib/llvm/lib/Target/CellSPU/SPUInstrFormats.td
index bdbe255..cd3f422 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrFormats.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrFormats.td
@@ -1,10 +1,10 @@
-//==== SPUInstrFormats.td - Cell SPU Instruction Formats ---*- tablegen -*-===//
-//
+//===-- SPUInstrFormats.td - Cell SPU Instruction Formats --*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
index 007bc0e..759923d 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
+//===-- SPUInstrInfo.cpp - Cell SPU Instruction Information ---------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
index bc1ba71..85e5821 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
@@ -1,4 +1,4 @@
-//===- SPUInstrInfo.h - Cell SPU Instruction Information --------*- C++ -*-===//
+//===-- SPUInstrInfo.h - Cell SPU Instruction Information -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,8 +15,8 @@
#define SPU_INSTRUCTIONINFO_H
#include "SPU.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "SPURegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "SPUGenInstrInfo.inc"
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.cpp b/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.cpp
new file mode 100644
index 0000000..3e948d0
--- /dev/null
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.cpp
@@ -0,0 +1,14 @@
+//==-- SPUMachineFunctionInfo.cpp - Private data used for CellSPU ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPUMachineFunction.h"
+
+using namespace llvm;
+
+void SPUFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.h b/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.h
index 3ef3ccb..399684b 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMachineFunction.h
@@ -21,7 +21,8 @@ namespace llvm {
/// SPUFunctionInfo - Cell SPU target-specific information for each
/// MachineFunction
class SPUFunctionInfo : public MachineFunctionInfo {
-private:
+ virtual void anchor();
+
/// UsesLR - Indicates whether LR is used in the current function.
///
bool UsesLR;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMathInstr.td b/contrib/llvm/lib/Target/CellSPU/SPUMathInstr.td
index ed7129e..9a5c397 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUMathInstr.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMathInstr.td
@@ -1,4 +1,4 @@
-//======--- SPUMathInst.td - Cell SPU math operations -*- tablegen -*---======//
+//===-- SPUMathInst.td - Cell SPU math operations ---------*- tablegen -*--===//
//
// Cell SPU math operations
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
index a6e621f..a47e9ef 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
@@ -1,4 +1,4 @@
-//===- SPUNodes.td - Specialized SelectionDAG nodes used for CellSPU ------===//
+//=== SPUNodes.td - Specialized SelectionDAG nodes by CellSPU -*- tablegen -*-//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp b/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp
index e2bd2d7..7c58041 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUNopFiller.cpp
@@ -1,4 +1,4 @@
-//===-- SPUNopFiller.cpp - Add nops/lnops to align the pipelines---===//
+//===-- SPUNopFiller.cpp - Add nops/lnops to align the pipelines ----------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUOperands.td b/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
index 96cde51..6f8deef 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUOperands.td
@@ -1,10 +1,10 @@
-//===- SPUOperands.td - Cell SPU Instruction Operands ------*- tablegen -*-===//
-//
+//===-- SPUOperands.td - Cell SPU Instruction Operands -----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
// Cell SPU Instruction Operands:
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
index bbac6fd..1b2da5f 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- SPURegisterInfo.cpp - Cell SPU Register Information ----------------===//
+//===-- SPURegisterInfo.cpp - Cell SPU Register Information ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "reginfo"
-#include "SPU.h"
#include "SPURegisterInfo.h"
+#include "SPU.h"
#include "SPUInstrBuilder.h"
#include "SPUSubtarget.h"
#include "SPUMachineFunction.h"
@@ -197,11 +197,11 @@ SPURegisterInfo::getPointerRegClass(unsigned Kind) const {
return &SPU::R32CRegClass;
}
-const unsigned *
+const uint16_t *
SPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const
{
// Cell ABI calling convention
- static const unsigned SPU_CalleeSaveRegs[] = {
+ static const uint16_t SPU_CalleeSaveRegs[] = {
SPU::R80, SPU::R81, SPU::R82, SPU::R83,
SPU::R84, SPU::R85, SPU::R86, SPU::R87,
SPU::R88, SPU::R89, SPU::R90, SPU::R91,
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
index b7818a4..e5ab224 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
@@ -1,4 +1,4 @@
-//===- SPURegisterInfo.h - Cell SPU Register Information Impl ----*- C++ -*-==//
+//===-- SPURegisterInfo.h - Cell SPU Register Information Impl --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -57,7 +57,7 @@ namespace llvm {
}
//! Return the array of callee-saved registers
- virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF) const;
+ virtual const uint16_t* getCalleeSavedRegs(const MachineFunction *MF) const;
//! Allow for scavenging, so we can get scratch registers when needed.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.td b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.td
index e16f51f..f27b042 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.td
@@ -1,10 +1,10 @@
-//===- SPURegisterInfo.td - The Cell SPU Register File -----*- tablegen -*-===//
-//
+//===-- SPURegisterInfo.td - The Cell SPU Register File ----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td b/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
index 9cd3c23..9ccd084 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSchedule.td
@@ -1,10 +1,10 @@
-//===- SPUSchedule.td - Cell Scheduling Definitions --------*- tablegen -*-===//
-//
+//===-- SPUSchedule.td - Cell Scheduling Definitions -------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
index 43335ab..eec2d25 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- SPUSubtarget.cpp - STI Cell SPU Subtarget Information --------------===//
+//===-- SPUSubtarget.cpp - STI Cell SPU Subtarget Information -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,7 +15,6 @@
#include "SPU.h"
#include "SPURegisterInfo.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/ADT/SmallVector.h"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
index 93a7f6e..21f6b25 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -11,17 +11,16 @@
//
//===----------------------------------------------------------------------===//
-#include "SPU.h"
#include "SPUTargetMachine.h"
+#include "SPU.h"
#include "llvm/PassManager.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-extern "C" void LLVMInitializeCellSPUTarget() {
+extern "C" void LLVMInitializeCellSPUTarget() {
// Register the target.
RegisterTargetMachine<SPUTargetMachine> X(TheCellSPUTarget);
}
@@ -34,8 +33,10 @@ SPUFrameLowering::getCalleeSaveSpillSlots(unsigned &NumEntries) const {
SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
DataLayout(Subtarget.getTargetDataString()),
InstrInfo(*this),
@@ -49,16 +50,34 @@ SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT,
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
-bool SPUTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+namespace {
+/// SPU Code Generator Pass Configuration Options.
+class SPUPassConfig : public TargetPassConfig {
+public:
+ SPUPassConfig(SPUTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ SPUTargetMachine &getSPUTargetMachine() const {
+ return getTM<SPUTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *SPUTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new SPUPassConfig(this, PM);
+}
+
+bool SPUPassConfig::addInstSelector() {
// Install an instruction selector.
- PM.add(createSPUISelDag(*this));
+ PM.add(createSPUISelDag(getSPUTargetMachine()));
return false;
}
// passes to run just before printing the assembly
-bool SPUTargetMachine::
-addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
+bool SPUPassConfig::addPreEmitPass() {
// load the TCE instruction scheduler, if available via
// loaded plugins
typedef llvm::FunctionPass* (*BuilderFunc)(const char*);
@@ -69,6 +88,6 @@ addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
PM.add(schedulerCreator("cellspu"));
//align instructions with nops/lnops for dual issue
- PM.add(createSPUNopFillerPass(*this));
+ PM.add(createSPUNopFillerPass(getSPUTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
index fffe77c..3e5d38c 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUTargetMachine.h
@@ -1,4 +1,4 @@
-//===-- SPUTargetMachine.h - Define TargetMachine for Cell SPU ----*- C++ -*-=//
+//===-- SPUTargetMachine.h - Define TargetMachine for Cell SPU --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,9 +23,6 @@
#include "llvm/Target/TargetData.h"
namespace llvm {
-class PassManager;
-class GlobalValue;
-class TargetFrameLowering;
/// SPUTargetMachine
///
@@ -39,8 +36,9 @@ class SPUTargetMachine : public LLVMTargetMachine {
InstrItineraryData InstrItins;
public:
SPUTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
/// Return the subtarget implementation object
virtual const SPUSubtarget *getSubtargetImpl() const {
@@ -60,7 +58,7 @@ public:
return NULL;
}
- virtual const SPUTargetLowering *getTargetLowering() const {
+ virtual const SPUTargetLowering *getTargetLowering() const {
return &TLInfo;
}
@@ -71,7 +69,7 @@ public:
virtual const SPURegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
-
+
virtual const TargetData *getTargetData() const {
return &DataLayout;
}
@@ -79,11 +77,9 @@ public:
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
-
+
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index 394ea2b..69f0ff8 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -33,8 +33,9 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/config.h"
#include <algorithm>
-#include <set>
+#include <cstdio>
#include <map>
+#include <set>
using namespace llvm;
static cl::opt<std::string>
@@ -189,13 +190,24 @@ static std::string getTypePrefix(Type *Ty) {
case Type::VectorTyID: return "packed_";
default: return "other_";
}
- return "unknown_";
}
void CppWriter::error(const std::string& msg) {
report_fatal_error(msg);
}
+static inline std::string ftostr(const APFloat& V) {
+ std::string Buf;
+ if (&V.getSemantics() == &APFloat::IEEEdouble) {
+ raw_string_ostream(Buf) << V.convertToDouble();
+ return Buf;
+ } else if (&V.getSemantics() == &APFloat::IEEEsingle) {
+ raw_string_ostream(Buf) << (double)V.convertToFloat();
+ return Buf;
+ }
+ return "<unknown format in ftostr>"; // error
+}
+
// printCFP - Print a floating point constant .. very carefully :)
// This makes sure that conversion to/from floating yields the same binary
// result so that we don't lose precision.
@@ -301,7 +313,6 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
switch (VisType) {
- default: llvm_unreachable("Unknown GVar visibility");
case GlobalValue::DefaultVisibility:
Out << "GlobalValue::DefaultVisibility";
break;
@@ -443,7 +454,7 @@ void CppWriter::printAttributes(const AttrListPtr &PAL,
for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
unsigned index = PAL.getSlot(i).Index;
Attributes attrs = PAL.getSlot(i).Attrs;
- Out << "PAWI.Index = " << index << "U; PAWI.Attrs = 0 ";
+ Out << "PAWI.Index = " << index << "U; PAWI.Attrs = Attribute::None ";
#define HANDLE_ATTR(X) \
if (attrs & Attribute::X) \
Out << " | Attribute::" #X; \
@@ -678,11 +689,6 @@ void CppWriter::printConstant(const Constant *CV) {
std::string constName(getCppName(CV));
std::string typeName(getCppName(CV->getType()));
- if (isa<GlobalValue>(CV)) {
- // Skip variables and functions, we emit them elsewhere
- return;
- }
-
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
std::string constValue = CI->getValue().toString(10, true);
Out << "ConstantInt* " << constName
@@ -700,38 +706,17 @@ void CppWriter::printConstant(const Constant *CV) {
printCFP(CFP);
Out << ";";
} else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
- if (CA->isString() &&
- CA->getType()->getElementType() ==
- Type::getInt8Ty(CA->getContext())) {
- Out << "Constant* " << constName <<
- " = ConstantArray::get(mod->getContext(), \"";
- std::string tmp = CA->getAsString();
- bool nullTerminate = false;
- if (tmp[tmp.length()-1] == 0) {
- tmp.erase(tmp.length()-1);
- nullTerminate = true;
- }
- printEscapedString(tmp);
- // Determine if we want null termination or not.
- if (nullTerminate)
- Out << "\", true"; // Indicate that the null terminator should be
- // added.
- else
- Out << "\", false";// No null terminator
- Out << ");";
- } else {
- Out << "std::vector<Constant*> " << constName << "_elems;";
+ Out << "std::vector<Constant*> " << constName << "_elems;";
+ nl(Out);
+ unsigned N = CA->getNumOperands();
+ for (unsigned i = 0; i < N; ++i) {
+ printConstant(CA->getOperand(i)); // recurse to print operands
+ Out << constName << "_elems.push_back("
+ << getCppName(CA->getOperand(i)) << ");";
nl(Out);
- unsigned N = CA->getNumOperands();
- for (unsigned i = 0; i < N; ++i) {
- printConstant(CA->getOperand(i)); // recurse to print operands
- Out << constName << "_elems.push_back("
- << getCppName(CA->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName << " = ConstantArray::get("
- << typeName << ", " << constName << "_elems);";
}
+ Out << "Constant* " << constName << " = ConstantArray::get("
+ << typeName << ", " << constName << "_elems);";
} else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
Out << "std::vector<Constant*> " << constName << "_fields;";
nl(Out);
@@ -744,14 +729,14 @@ void CppWriter::printConstant(const Constant *CV) {
}
Out << "Constant* " << constName << " = ConstantStruct::get("
<< typeName << ", " << constName << "_fields);";
- } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+ } else if (const ConstantVector *CVec = dyn_cast<ConstantVector>(CV)) {
Out << "std::vector<Constant*> " << constName << "_elems;";
nl(Out);
- unsigned N = CP->getNumOperands();
+ unsigned N = CVec->getNumOperands();
for (unsigned i = 0; i < N; ++i) {
- printConstant(CP->getOperand(i));
+ printConstant(CVec->getOperand(i));
Out << constName << "_elems.push_back("
- << getCppName(CP->getOperand(i)) << ");";
+ << getCppName(CVec->getOperand(i)) << ");";
nl(Out);
}
Out << "Constant* " << constName << " = ConstantVector::get("
@@ -759,6 +744,41 @@ void CppWriter::printConstant(const Constant *CV) {
} else if (isa<UndefValue>(CV)) {
Out << "UndefValue* " << constName << " = UndefValue::get("
<< typeName << ");";
+ } else if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(CV)) {
+ if (CDS->isString()) {
+ Out << "Constant *" << constName <<
+ " = ConstantDataArray::getString(mod->getContext(), \"";
+ StringRef Str = CDS->getAsString();
+ bool nullTerminate = false;
+ if (Str.back() == 0) {
+ Str = Str.drop_back();
+ nullTerminate = true;
+ }
+ printEscapedString(Str);
+ // Determine if we want null termination or not.
+ if (nullTerminate)
+ Out << "\", true);";
+ else
+ Out << "\", false);";// No null terminator
+ } else {
+ // TODO: Could generate more efficient code generating CDS calls instead.
+ Out << "std::vector<Constant*> " << constName << "_elems;";
+ nl(Out);
+ for (unsigned i = 0; i != CDS->getNumElements(); ++i) {
+ Constant *Elt = CDS->getElementAsConstant(i);
+ printConstant(Elt);
+ Out << constName << "_elems.push_back(" << getCppName(Elt) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName;
+
+ if (isa<ArrayType>(CDS->getType()))
+ Out << " = ConstantArray::get(";
+ else
+ Out << " = ConstantVector::get(";
+ Out << typeName << ", " << constName << "_elems);";
+ }
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
if (CE->getOpcode() == Instruction::GetElementPtr) {
Out << "std::vector<Constant*> " << constName << "_indices;";
@@ -1083,10 +1103,10 @@ void CppWriter::printInstruction(const Instruction *I,
<< getOpName(SI->getDefaultDest()) << ", "
<< SI->getNumCases() << ", " << bbname << ");";
nl(Out);
- unsigned NumCases = SI->getNumCases();
- for (unsigned i = 1; i < NumCases; ++i) {
- const ConstantInt* CaseVal = SI->getCaseValue(i);
- const BasicBlock* BB = SI->getSuccessor(i);
+ for (SwitchInst::ConstCaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ const ConstantInt* CaseVal = i.getCaseValue();
+ const BasicBlock *BB = i.getCaseSuccessor();
Out << iName << "->addCase("
<< getOpName(CaseVal) << ", "
<< getOpName(BB) << ");";
@@ -1135,11 +1155,6 @@ void CppWriter::printInstruction(const Instruction *I,
nl(Out);
break;
}
- case Instruction::Unwind: {
- Out << "new UnwindInst("
- << bbname << ");";
- break;
- }
case Instruction::Unreachable: {
Out << "new UnreachableInst("
<< "mod->getContext(), "
@@ -1354,7 +1369,7 @@ void CppWriter::printInstruction(const Instruction *I,
case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
case Instruction::BitCast: Out << "BitCastInst"; break;
- default: assert(0 && "Unreachable"); break;
+ default: llvm_unreachable("Unreachable");
}
Out << "(" << opNames[0] << ", "
<< getCppName(cst->getType()) << ", \"";
@@ -2049,8 +2064,6 @@ bool CppWriter::runOnModule(Module &M) {
fname = "makeLLVMType";
printType(fname,tgtname);
break;
- default:
- error("Invalid generation option");
}
return false;
@@ -2065,7 +2078,6 @@ char CppWriter::ID = 0;
bool CPPTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &o,
CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
bool DisableVerify) {
if (FileType != TargetMachine::CGFT_AssemblyFile) return true;
PM.add(new CppWriter(o));
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
index 287e537..92bca6c 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
+++ b/contrib/llvm/lib/Target/CppBackend/CPPTargetMachine.h
@@ -23,14 +23,14 @@ class formatted_raw_ostream;
struct CPPTargetMachine : public TargetMachine {
CPPTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : TargetMachine(T, TT, CPU, FS) {}
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : TargetMachine(T, TT, CPU, FS, Options) {}
virtual bool addPassesToEmitFile(PassManagerBase &PM,
formatted_raw_ostream &Out,
CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
bool DisableVerify);
virtual const TargetData *getTargetData() const { return 0; }
diff --git a/contrib/llvm/lib/Target/Hexagon/Hexagon.h b/contrib/llvm/lib/Target/Hexagon/Hexagon.h
new file mode 100644
index 0000000..43858b9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/Hexagon.h
@@ -0,0 +1,74 @@
+//=-- Hexagon.h - Top-level interface for Hexagon representation --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// Hexagon back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_Hexagon_H
+#define TARGET_Hexagon_H
+
+#include "MCTargetDesc/HexagonMCTargetDesc.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+ class FunctionPass;
+ class TargetMachine;
+ class MachineInstr;
+ class MCInst;
+ class HexagonAsmPrinter;
+ class HexagonTargetMachine;
+ class raw_ostream;
+
+ FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM);
+ FunctionPass *createHexagonDelaySlotFillerPass(TargetMachine &TM);
+ FunctionPass *createHexagonFPMoverPass(TargetMachine &TM);
+ FunctionPass *createHexagonRemoveExtendOps(HexagonTargetMachine &TM);
+ FunctionPass *createHexagonCFGOptimizer(HexagonTargetMachine &TM);
+
+ FunctionPass *createHexagonSplitTFRCondSets(HexagonTargetMachine &TM);
+ FunctionPass *createHexagonExpandPredSpillCode(HexagonTargetMachine &TM);
+
+ FunctionPass *createHexagonHardwareLoops();
+ FunctionPass *createHexagonPeephole();
+ FunctionPass *createHexagonFixupHwLoops();
+ FunctionPass *createHexagonPacketizer();
+
+/* TODO: object output.
+ MCCodeEmitter *createHexagonMCCodeEmitter(const Target &,
+ TargetMachine &TM,
+ MCContext &Ctx);
+*/
+/* TODO: assembler input.
+ TargetAsmBackend *createHexagonAsmBackend(const Target &, const std::string &);
+*/
+ void HexagonLowerToMC(const MachineInstr *MI, MCInst &MCI,
+ HexagonAsmPrinter &AP);
+} // end namespace llvm;
+
+#define Hexagon_POINTER_SIZE 4
+
+#define Hexagon_PointerSize (Hexagon_POINTER_SIZE)
+#define Hexagon_PointerSize_Bits (Hexagon_POINTER_SIZE * 8)
+#define Hexagon_WordSize Hexagon_PointerSize
+#define Hexagon_WordSize_Bits Hexagon_PointerSize_Bits
+
+// allocframe saves LR and FP on stack before allocating
+// a new stack frame. This takes 8 bytes.
+#define HEXAGON_LRFP_SIZE 8
+
+// Normal instruction size (in bytes).
+#define HEXAGON_INSTR_SIZE 4
+
+// Maximum number of words in a packet (in instructions).
+#define HEXAGON_PACKET_SIZE 4
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/Hexagon.td b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
new file mode 100644
index 0000000..4a50d16
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/Hexagon.td
@@ -0,0 +1,72 @@
+//===-- Hexagon.td - Describe the Hexagon Target Machine --*- tablegen -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the top level entry point for the Hexagon target.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// Hexagon Subtarget features.
+//===----------------------------------------------------------------------===//
+
+// Hexagon Archtectures
+def ArchV2 : SubtargetFeature<"v2", "HexagonArchVersion", "V2",
+ "Hexagon v2">;
+def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3",
+ "Hexagon v3">;
+def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4",
+ "Hexagon v4">;
+
+//===----------------------------------------------------------------------===//
+// Register File, Calling Conv, Instruction Descriptions
+//===----------------------------------------------------------------------===//
+include "HexagonSchedule.td"
+include "HexagonRegisterInfo.td"
+include "HexagonCallingConv.td"
+include "HexagonInstrInfo.td"
+include "HexagonIntrinsics.td"
+include "HexagonIntrinsicsDerived.td"
+
+def HexagonInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// Hexagon processors supported.
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, ProcessorItineraries Itin,
+ list<SubtargetFeature> Features>
+ : Processor<Name, Itin, Features>;
+
+def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>;
+def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>;
+def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>;
+
+// Hexagon Uses the MC printer for assembler output, so make sure the TableGen
+// AsmWriter bits get associated with the correct class.
+def HexagonAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Declare the target which we are implementing
+//===----------------------------------------------------------------------===//
+
+def Hexagon : Target {
+ // Pull in Instruction Info:
+ let InstructionSet = HexagonInstrInfo;
+
+ let AssemblyWriters = [HexagonAsmWriter];
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
new file mode 100644
index 0000000..2cc8b81
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -0,0 +1,313 @@
+//===-- HexagonAsmPrinter.cpp - Print machine instrs to Hexagon assembly --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to Hexagon assembly language. This printer is
+// the output mechanism used by `llc'.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "Hexagon.h"
+#include "HexagonAsmPrinter.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonMCInst.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "InstPrinter/HexagonInstPrinter.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include <map>
+
+using namespace llvm;
+
+static cl::opt<bool> AlignCalls(
+ "hexagon-align-calls", cl::Hidden, cl::init(true),
+ cl::desc("Insert falign after call instruction for Hexagon target"));
+
+void HexagonAsmPrinter::EmitAlignment(unsigned NumBits,
+ const GlobalValue *GV) const {
+ // For basic block level alignment, use ".falign".
+ if (!GV) {
+ OutStreamer.EmitRawText(StringRef("\t.falign"));
+ return;
+ }
+
+ AsmPrinter::EmitAlignment(NumBits, GV);
+}
+
+void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+
+ switch (MO.getType()) {
+ default: llvm_unreachable("<unknown operand type>");
+ case MachineOperand::MO_Register:
+ O << HexagonInstPrinter::getRegisterName(MO.getReg());
+ return;
+ case MachineOperand::MO_Immediate:
+ O << MO.getImm();
+ return;
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ return;
+ case MachineOperand::MO_JumpTableIndex:
+ O << *GetJTISymbol(MO.getIndex());
+ // FIXME: PIC relocation model.
+ return;
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << *GetCPISymbol(MO.getIndex());
+ return;
+ case MachineOperand::MO_ExternalSymbol:
+ O << *GetExternalSymbolSymbol(MO.getSymbolName());
+ return;
+ case MachineOperand::MO_GlobalAddress:
+ // Computing the address of a global symbol, not calling it.
+ O << *Mang->getSymbol(MO.getGlobal());
+ printOffset(MO.getOffset(), O);
+ return;
+ }
+}
+
+//
+// isBlockOnlyReachableByFallthrough - We need to override this since the
+// default AsmPrinter does not print labels for any basic block that
+// is only reachable by a fall through. That works for all cases except
+// for the case in which the basic block is reachable by a fall through but
+// through an indirect from a jump table. In this case, the jump table
+// will contain a label not defined by AsmPrinter.
+//
+bool HexagonAsmPrinter::
+isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
+ if (MBB->hasAddressTaken()) {
+ return false;
+ }
+ return AsmPrinter::isBlockOnlyReachableByFallthrough(MBB);
+}
+
+
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool HexagonAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default: return true; // Unknown modifier.
+ case 'c': // Don't print "$" before a global var name or constant.
+ // Hexagon never has a prefix.
+ printOperand(MI, OpNo, OS);
+ return false;
+ case 'L': // Write second word of DImode reference.
+ // Verify that this operand has two consecutive registers.
+ if (!MI->getOperand(OpNo).isReg() ||
+ OpNo+1 == MI->getNumOperands() ||
+ !MI->getOperand(OpNo+1).isReg())
+ return true;
+ ++OpNo; // Return the high-part.
+ break;
+ case 'I':
+ // Write 'i' if an integer constant, otherwise nothing. Used to print
+ // addi vs add, etc.
+ if (MI->getOperand(OpNo).isImm())
+ OS << "i";
+ return false;
+ }
+ }
+
+ printOperand(MI, OpNo, OS);
+ return false;
+}
+
+bool HexagonAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier.
+
+ const MachineOperand &Base = MI->getOperand(OpNo);
+ const MachineOperand &Offset = MI->getOperand(OpNo+1);
+
+ if (Base.isReg())
+ printOperand(MI, OpNo, O);
+ else
+ llvm_unreachable("Unimplemented");
+
+ if (Offset.isImm()) {
+ if (Offset.getImm())
+ O << " + #" << Offset.getImm();
+ }
+ else
+ llvm_unreachable("Unimplemented");
+
+ return false;
+}
+
+void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ llvm_unreachable("Unimplemented");
+}
+
+
+/// printMachineInstruction -- Print out a single Hexagon MI in Darwin syntax to
+/// the current output stream.
+///
+void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ if (MI->isBundle()) {
+ std::vector<const MachineInstr*> BundleMIs;
+
+ const MachineBasicBlock *MBB = MI->getParent();
+ MachineBasicBlock::const_instr_iterator MII = MI;
+ ++MII;
+ unsigned int IgnoreCount = 0;
+ while (MII != MBB->end() && MII->isInsideBundle()) {
+ const MachineInstr *MInst = MII;
+ if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
+ MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
+ IgnoreCount++;
+ ++MII;
+ continue;
+ }
+ //BundleMIs.push_back(&*MII);
+ BundleMIs.push_back(MInst);
+ ++MII;
+ }
+ unsigned Size = BundleMIs.size();
+ assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
+ for (unsigned Index = 0; Index < Size; Index++) {
+ HexagonMCInst MCI;
+ MCI.setStartPacket(Index == 0);
+ MCI.setEndPacket(Index == (Size-1));
+
+ HexagonLowerToMC(BundleMIs[Index], MCI, *this);
+ OutStreamer.EmitInstruction(MCI);
+ }
+ }
+ else {
+ HexagonMCInst MCI;
+ if (MI->getOpcode() == Hexagon::ENDLOOP0) {
+ MCI.setStartPacket(true);
+ MCI.setEndPacket(true);
+ }
+ HexagonLowerToMC(MI, MCI, *this);
+ OutStreamer.EmitInstruction(MCI);
+ }
+
+ return;
+}
+
+/// PrintUnmangledNameSafely - Print out the printable characters in the name.
+/// Don't print things like \n or \0.
+// static void PrintUnmangledNameSafely(const Value *V, raw_ostream &OS) {
+// for (const char *Name = V->getNameStart(), *E = Name+V->getNameLen();
+// Name != E; ++Name)
+// if (isprint(*Name))
+// OS << *Name;
+// }
+
+
+void HexagonAsmPrinter::printAddrModeBasePlusOffset(const MachineInstr *MI,
+ int OpNo, raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << HexagonInstPrinter::getRegisterName(MO1.getReg())
+ << " + #"
+ << MO2.getImm();
+}
+
+
+void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ assert( (MO.getType() == MachineOperand::MO_GlobalAddress) &&
+ "Expecting global address");
+
+ O << *Mang->getSymbol(MO.getGlobal());
+ if (MO.getOffset() != 0) {
+ O << " + ";
+ O << MO.getOffset();
+ }
+}
+
+void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
+ "Expecting jump table index");
+
+ // Hexagon_TODO: Do we need name mangling?
+ O << *GetJTISymbol(MO.getIndex());
+}
+
+void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) &&
+ "Expecting constant pool index");
+
+ // Hexagon_TODO: Do we need name mangling?
+ O << *GetCPISymbol(MO.getIndex());
+}
+
+static MCInstPrinter *createHexagonMCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI) {
+ if (SyntaxVariant == 0)
+ return(new HexagonInstPrinter(MAI, MII, MRI));
+ else
+ return NULL;
+}
+
+extern "C" void LLVMInitializeHexagonAsmPrinter() {
+ RegisterAsmPrinter<HexagonAsmPrinter> X(TheHexagonTarget);
+
+ TargetRegistry::RegisterMCInstPrinter(TheHexagonTarget,
+ createHexagonMCInstPrinter);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h
new file mode 100755
index 0000000..bc2af63
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonAsmPrinter.h
@@ -0,0 +1,165 @@
+//===-- HexagonAsmPrinter.h - Print machine code to an Hexagon .s file ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hexagon Assembly printer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONASMPRINTER_H
+#define HEXAGONASMPRINTER_H
+
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+ class HexagonAsmPrinter : public AsmPrinter {
+ const HexagonSubtarget *Subtarget;
+
+ public:
+ explicit HexagonAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer) {
+ Subtarget = &TM.getSubtarget<HexagonSubtarget>();
+ }
+
+ virtual const char *getPassName() const {
+ return "Hexagon Assembly Printer";
+ }
+
+ bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+ virtual void EmitInstruction(const MachineInstr *MI);
+ virtual void EmitAlignment(unsigned NumBits,
+ const GlobalValue *GV = 0) const;
+
+ void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// printInstruction - This method is automatically generated by tablegen
+ /// from the instruction set description. This method returns true if the
+ /// machine instruction was sufficiently described to print it, otherwise it
+ /// returns false.
+ void printInstruction(const MachineInstr *MI, raw_ostream &O);
+
+ // void printMachineInstruction(const MachineInstr *MI);
+ void printOp(const MachineOperand &MO, raw_ostream &O);
+
+ /// printRegister - Print register according to target requirements.
+ ///
+ void printRegister(const MachineOperand &MO, bool R0AsZero,
+ raw_ostream &O) {
+ unsigned RegNo = MO.getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(RegNo) && "Not physreg??");
+ O << getRegisterName(RegNo);
+ }
+
+ void printImmOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ int value = MI->getOperand(OpNo).getImm();
+ O << value;
+ }
+
+ void printNegImmOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ int value = MI->getOperand(OpNo).getImm();
+ O << -value;
+ }
+
+ void printMEMriOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << getRegisterName(MO1.getReg())
+ << " + #"
+ << (int) MO2.getImm();
+ }
+
+ void printFrameIndexOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO1 = MI->getOperand(OpNo);
+ const MachineOperand &MO2 = MI->getOperand(OpNo+1);
+
+ O << getRegisterName(MO1.getReg())
+ << ", #"
+ << MO2.getImm();
+ }
+
+ void printBranchOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ // Branches can take an immediate operand. This is used by the branch
+ // selection pass to print $+8, an eight byte displacement from the PC.
+ if (MI->getOperand(OpNo).isImm()) {
+ O << "$+" << MI->getOperand(OpNo).getImm()*4;
+ } else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ }
+
+ void printCallOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ }
+
+ void printAbsAddrOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ }
+
+ void printSymbolHi(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
+ O << "#HI(";
+ if (MI->getOperand(OpNo).isImm()) {
+ printImmOperand(MI, OpNo, O);
+ }
+ else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ O << ")";
+ }
+
+ void printSymbolLo(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) {
+ O << "#HI(";
+ if (MI->getOperand(OpNo).isImm()) {
+ printImmOperand(MI, OpNo, O);
+ }
+ else {
+ printOp(MI->getOperand(OpNo), O);
+ }
+ O << ")";
+ }
+
+ void printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O);
+
+#if 0
+ void printModuleLevelGV(const GlobalVariable* GVar, raw_ostream &O);
+#endif
+
+ void printAddrModeBasePlusOffset(const MachineInstr *MI, int OpNo,
+ raw_ostream &O);
+
+ void printGlobalOperand(const MachineInstr *MI, int OpNo, raw_ostream &O);
+ void printJumpTable(const MachineInstr *MI, int OpNo, raw_ostream &O);
+ void printConstantPool(const MachineInstr *MI, int OpNo, raw_ostream &O);
+
+ static const char *getRegisterName(unsigned RegNo);
+
+#if 0
+ void EmitStartOfAsmFile(Module &M);
+#endif
+ };
+
+} // end of llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
new file mode 100644
index 0000000..9bca9e0
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -0,0 +1,235 @@
+//===-- HexagonCFGOptimizer.cpp - CFG optimizations -----------------------===//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon_cfg"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+namespace {
+
+class HexagonCFGOptimizer : public MachineFunctionPass {
+
+private:
+ HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ void InvertAndChangeJumpTarget(MachineInstr*, MachineBasicBlock*);
+
+ public:
+ static char ID;
+ HexagonCFGOptimizer(HexagonTargetMachine& TM) : MachineFunctionPass(ID),
+ QTM(TM),
+ QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon CFG Optimizer";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonCFGOptimizer::ID = 0;
+
+static bool IsConditionalBranch(int Opc) {
+ return (Opc == Hexagon::JMP_c) || (Opc == Hexagon::JMP_cNot)
+ || (Opc == Hexagon::JMP_cdnPt) || (Opc == Hexagon::JMP_cdnNotPt);
+}
+
+
+static bool IsUnconditionalJump(int Opc) {
+ return (Opc == Hexagon::JMP);
+}
+
+
+void
+HexagonCFGOptimizer::InvertAndChangeJumpTarget(MachineInstr* MI,
+ MachineBasicBlock* NewTarget) {
+ const HexagonInstrInfo *QII = QTM.getInstrInfo();
+ int NewOpcode = 0;
+ switch(MI->getOpcode()) {
+ case Hexagon::JMP_c:
+ NewOpcode = Hexagon::JMP_cNot;
+ break;
+
+ case Hexagon::JMP_cNot:
+ NewOpcode = Hexagon::JMP_c;
+ break;
+
+ case Hexagon::JMP_cdnPt:
+ NewOpcode = Hexagon::JMP_cdnNotPt;
+ break;
+
+ case Hexagon::JMP_cdnNotPt:
+ NewOpcode = Hexagon::JMP_cdnPt;
+ break;
+
+ default:
+ llvm_unreachable("Cannot handle this case");
+ }
+
+ MI->setDesc(QII->get(NewOpcode));
+ MI->getOperand(1).setMBB(NewTarget);
+}
+
+
+bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+
+ // Traverse the basic block.
+ MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
+ if (MII != MBB->end()) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (IsConditionalBranch(Opc)) {
+
+ //
+ // (Case 1) Transform the code if the following condition occurs:
+ // BB1: if (p0) jump BB3
+ // ...falls-through to BB2 ...
+ // BB2: jump BB4
+ // ...next block in layout is BB3...
+ // BB3: ...
+ //
+ // Transform this to:
+ // BB1: if (!p0) jump BB4
+ // Remove BB2
+ // BB3: ...
+ //
+ // (Case 2) A variation occurs when BB3 contains a JMP to BB4:
+ // BB1: if (p0) jump BB3
+ // ...falls-through to BB2 ...
+ // BB2: jump BB4
+ // ...other basic blocks ...
+ // BB4:
+ // ...not a fall-thru
+ // BB3: ...
+ // jump BB4
+ //
+ // Transform this to:
+ // BB1: if (!p0) jump BB4
+ // Remove BB2
+ // BB3: ...
+ // BB4: ...
+ //
+ unsigned NumSuccs = MBB->succ_size();
+ MachineBasicBlock::succ_iterator SI = MBB->succ_begin();
+ MachineBasicBlock* FirstSucc = *SI;
+ MachineBasicBlock* SecondSucc = *(++SI);
+ MachineBasicBlock* LayoutSucc = NULL;
+ MachineBasicBlock* JumpAroundTarget = NULL;
+
+ if (MBB->isLayoutSuccessor(FirstSucc)) {
+ LayoutSucc = FirstSucc;
+ JumpAroundTarget = SecondSucc;
+ } else if (MBB->isLayoutSuccessor(SecondSucc)) {
+ LayoutSucc = SecondSucc;
+ JumpAroundTarget = FirstSucc;
+ } else {
+ // Odd case...cannot handle.
+ }
+
+ // The target of the unconditional branch must be JumpAroundTarget.
+ // TODO: If not, we should not invert the unconditional branch.
+ MachineBasicBlock* CondBranchTarget = NULL;
+ if ((MI->getOpcode() == Hexagon::JMP_c) ||
+ (MI->getOpcode() == Hexagon::JMP_cNot)) {
+ CondBranchTarget = MI->getOperand(1).getMBB();
+ }
+
+ if (!LayoutSucc || (CondBranchTarget != JumpAroundTarget)) {
+ continue;
+ }
+
+ if ((NumSuccs == 2) && LayoutSucc && (LayoutSucc->pred_size() == 1)) {
+
+ // Ensure that BB2 has one instruction -- an unconditional jump.
+ if ((LayoutSucc->size() == 1) &&
+ IsUnconditionalJump(LayoutSucc->front().getOpcode())) {
+ MachineBasicBlock* UncondTarget =
+ LayoutSucc->front().getOperand(0).getMBB();
+ // Check if the layout successor of BB2 is BB3.
+ bool case1 = LayoutSucc->isLayoutSuccessor(JumpAroundTarget);
+ bool case2 = JumpAroundTarget->isSuccessor(UncondTarget) &&
+ JumpAroundTarget->size() >= 1 &&
+ IsUnconditionalJump(JumpAroundTarget->back().getOpcode()) &&
+ JumpAroundTarget->pred_size() == 1 &&
+ JumpAroundTarget->succ_size() == 1;
+
+ if (case1 || case2) {
+ InvertAndChangeJumpTarget(MI, UncondTarget);
+ MBB->removeSuccessor(JumpAroundTarget);
+ MBB->addSuccessor(UncondTarget);
+
+ // Remove the unconditional branch in LayoutSucc.
+ LayoutSucc->erase(LayoutSucc->begin());
+ LayoutSucc->removeSuccessor(UncondTarget);
+ LayoutSucc->addSuccessor(JumpAroundTarget);
+
+ // This code performs the conversion for case 2, which moves
+ // the block to the fall-thru case (BB3 in the code above).
+ if (case2 && !case1) {
+ JumpAroundTarget->moveAfter(LayoutSucc);
+ // only move a block if it doesn't have a fall-thru. otherwise
+ // the CFG will be incorrect.
+ if (!UncondTarget->canFallThrough()) {
+ UncondTarget->moveAfter(JumpAroundTarget);
+ }
+ }
+
+ //
+ // Correct live-in information. Is used by post-RA scheduler
+ // The live-in to LayoutSucc is now all values live-in to
+ // JumpAroundTarget.
+ //
+ std::vector<unsigned> OrigLiveIn(LayoutSucc->livein_begin(),
+ LayoutSucc->livein_end());
+ std::vector<unsigned> NewLiveIn(JumpAroundTarget->livein_begin(),
+ JumpAroundTarget->livein_end());
+ for (unsigned i = 0; i < OrigLiveIn.size(); ++i) {
+ LayoutSucc->removeLiveIn(OrigLiveIn[i]);
+ }
+ for (unsigned i = 0; i < NewLiveIn.size(); ++i) {
+ LayoutSucc->addLiveIn(NewLiveIn[i]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+}
+
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonCFGOptimizer(HexagonTargetMachine &TM) {
+ return new HexagonCFGOptimizer(TM);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td
new file mode 100644
index 0000000..bd9608b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConv.td
@@ -0,0 +1,35 @@
+//===- HexagonCallingConv.td - Calling Conventions Hexagon -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the Hexagon architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Conventions
+//===----------------------------------------------------------------------===//
+
+// Hexagon 32-bit C return-value convention.
+def RetCC_Hexagon32 : CallingConv<[
+ CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+
+ // Alternatively, they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
+
+// Hexagon 32-bit C Calling convention.
+def CC_Hexagon32 : CallingConv<[
+ // All arguments get passed in integer registers if there is space.
+ CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>,
+ CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>,
+
+ // Alternatively, they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
new file mode 100644
index 0000000..46c20e9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.cpp
@@ -0,0 +1,207 @@
+//===-- llvm/CallingConvLower.cpp - Calling Convention lowering -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Hexagon_CCState class, used for lowering and
+// implementing calling conventions. Adapted from the machine independent
+// version of the class (CCState) but this handles calls to varargs functions
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonCallingConvLower.h"
+#include "Hexagon.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
+ const TargetMachine &tm,
+ SmallVector<CCValAssign, 16> &locs,
+ LLVMContext &c)
+ : CallingConv(CC), IsVarArg(isVarArg), TM(tm),
+ TRI(*TM.getRegisterInfo()), Locs(locs), Context(c) {
+ // No stack is used.
+ StackOffset = 0;
+
+ UsedRegs.resize((TRI.getNumRegs()+31)/32);
+}
+
+// HandleByVal - Allocate a stack slot large enough to pass an argument by
+// value. The size and alignment information of the argument is encoded in its
+// parameter attribute.
+void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign,
+ ISD::ArgFlagsTy ArgFlags) {
+ unsigned Align = ArgFlags.getByValAlign();
+ unsigned Size = ArgFlags.getByValSize();
+ if (MinSize > (int)Size)
+ Size = MinSize;
+ if (MinAlign > (int)Align)
+ Align = MinAlign;
+ unsigned Offset = AllocateStack(Size, Align);
+
+ addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset,
+ LocVT.getSimpleVT(), LocInfo));
+}
+
+/// MarkAllocated - Mark a register and all of its aliases as allocated.
+void Hexagon_CCState::MarkAllocated(unsigned Reg) {
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+
+ if (const uint16_t *RegAliases = TRI.getAliasSet(Reg))
+ for (; (Reg = *RegAliases); ++RegAliases)
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+}
+
+/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+/// incorporating info about the formals into this state.
+void
+Hexagon_CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg>
+ &Ins,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+ unsigned NumArgs = Ins.size();
+ unsigned i = 0;
+
+ // If the function returns a small struct in registers, skip
+ // over the first (dummy) argument.
+ if (SretValueInRegs != 0) {
+ ++i;
+ }
+
+
+ for (; i != NumArgs; ++i) {
+ EVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, 0, 0, false)) {
+ dbgs() << "Formal argument #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+/// incorporating info about the result values into this state.
+void
+Hexagon_CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+
+ // For Hexagon, Return small structures in registers.
+ if (SretValueInRegs != 0) {
+ if (SretValueInRegs <= 32) {
+ unsigned Reg = Hexagon::R0;
+ addLoc(CCValAssign::getReg(0, MVT::i32, Reg, MVT::i32,
+ CCValAssign::Full));
+ return;
+ }
+ if (SretValueInRegs <= 64) {
+ unsigned Reg = Hexagon::D0;
+ addLoc(CCValAssign::getReg(0, MVT::i64, Reg, MVT::i64,
+ CCValAssign::Full));
+ return;
+ }
+ }
+
+
+ // Determine which register each value should be copied into.
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this, -1, -1, false)){
+ dbgs() << "Return operand #" << i << " has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+
+/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
+/// about the passed values into this state.
+void
+Hexagon_CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg>
+ &Outs,
+ Hexagon_CCAssignFn Fn,
+ int NonVarArgsParams,
+ unsigned SretValueSize) {
+ unsigned NumOps = Outs.size();
+
+ unsigned i = 0;
+ // If the called function returns a small struct in registers, skip
+ // the first actual parameter. We do not want to pass a pointer to
+ // the stack location.
+ if (SretValueSize != 0) {
+ ++i;
+ }
+
+ for (; i != NumOps; ++i) {
+ EVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this,
+ NonVarArgsParams, i+1, false)) {
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallOperands - Same as above except it takes vectors of types
+/// and argument flags.
+void
+Hexagon_CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ Hexagon_CCAssignFn Fn) {
+ unsigned NumOps = ArgVTs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ EVT ArgVT = ArgVTs[i];
+ ISD::ArgFlagsTy ArgFlags = Flags[i];
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this, -1, -1,
+ false)) {
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+/// incorporating info about the passed values into this state.
+void
+Hexagon_CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn,
+ unsigned SretValueInRegs) {
+
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ EVT VT = Ins[i].VT;
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this, -1, -1, false)) {
+ dbgs() << "Call result #" << i << " has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+ }
+}
+
+/// AnalyzeCallResult - Same as above except it's specialized for calls which
+/// produce a single value.
+void Hexagon_CCState::AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn) {
+ if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this, -1, -1,
+ false)) {
+ dbgs() << "Call result has unhandled type "
+ << VT.getEVTString() << "\n";
+ abort();
+ }
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h
new file mode 100644
index 0000000..1f601e8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonCallingConvLower.h
@@ -0,0 +1,189 @@
+//===-- HexagonCallingConvLower.h - Calling Conventions ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Hexagon_CCState class, used for lowering
+// and implementing calling conventions. Adapted from the target independent
+// version but this handles calls to varargs functions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_Hexagon_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_Hexagon_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+
+//
+// Need to handle varargs.
+//
+namespace llvm {
+ class TargetRegisterInfo;
+ class TargetMachine;
+ class Hexagon_CCState;
+ class SDNode;
+
+
+/// Hexagon_CCAssignFn - This function assigns a location for Val, updating
+/// State to reflect the change.
+typedef bool Hexagon_CCAssignFn(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, Hexagon_CCState &State,
+ int NonVarArgsParams,
+ int CurrentParam,
+ bool ForceMem);
+
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values. It captures which registers are already assigned and which
+/// stack slots are used. It provides accessors to allocate these values.
+class Hexagon_CCState {
+ CallingConv::ID CallingConv;
+ bool IsVarArg;
+ const TargetMachine &TM;
+ const TargetRegisterInfo &TRI;
+ SmallVector<CCValAssign, 16> &Locs;
+ LLVMContext &Context;
+
+ unsigned StackOffset;
+ SmallVector<uint32_t, 16> UsedRegs;
+public:
+ Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
+ SmallVector<CCValAssign, 16> &locs, LLVMContext &c);
+
+ void addLoc(const CCValAssign &V) {
+ Locs.push_back(V);
+ }
+
+ LLVMContext &getContext() const { return Context; }
+ const TargetMachine &getTarget() const { return TM; }
+ unsigned getCallingConv() const { return CallingConv; }
+ bool isVarArg() const { return IsVarArg; }
+
+ unsigned getNextStackOffset() const { return StackOffset; }
+
+ /// isAllocated - Return true if the specified register (or an alias) is
+ /// allocated.
+ bool isAllocated(unsigned Reg) const {
+ return UsedRegs[Reg/32] & (1 << (Reg&31));
+ }
+
+ /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+ /// incorporating info about the formals into this state.
+ void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+ /// incorporating info about the result values into this state.
+ void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
+ /// about the passed values into this state.
+ void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ Hexagon_CCAssignFn Fn, int NonVarArgsParams,
+ unsigned SretValueSize);
+
+ /// AnalyzeCallOperands - Same as above except it takes vectors of types
+ /// and argument flags.
+ void AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ Hexagon_CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+ /// incorporating info about the passed values into this state.
+ void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ Hexagon_CCAssignFn Fn, unsigned SretValueInRegs);
+
+ /// AnalyzeCallResult - Same as above except it's specialized for calls which
+ /// produce a single value.
+ void AnalyzeCallResult(EVT VT, Hexagon_CCAssignFn Fn);
+
+ /// getFirstUnallocated - Return the first unallocated register in the set, or
+ /// NumRegs if they are all allocated.
+ unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const {
+ for (unsigned i = 0; i != NumRegs; ++i)
+ if (!isAllocated(Regs[i]))
+ return i;
+ return NumRegs;
+ }
+
+ /// AllocateReg - Attempt to allocate one register. If it is not available,
+ /// return zero. Otherwise, return the register, marking it and any aliases
+ /// as allocated.
+ unsigned AllocateReg(unsigned Reg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with extra register to be shadowed.
+ unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateReg - Attempt to allocate one of the specified registers. If none
+ /// are available, return zero. Otherwise, return the first one available,
+ /// marking it and any aliases as allocated.
+ unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc];
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with list of registers to be shadowed.
+ unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs,
+ unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateStack - Allocate a chunk of stack space with the specified size
+ /// and alignment.
+ unsigned AllocateStack(unsigned Size, unsigned Align) {
+ assert(Align && ((Align-1) & Align) == 0); // Align is power of 2.
+ StackOffset = ((StackOffset + Align-1) & ~(Align-1));
+ unsigned Result = StackOffset;
+ StackOffset += Size;
+ return Result;
+ }
+
+ // HandleByVal - Allocate a stack slot large enough to pass an argument by
+ // value. The size and alignment information of the argument is encoded in its
+ // parameter attribute.
+ void HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+private:
+ /// MarkAllocated - Mark a register and all of its aliases as allocated.
+ void MarkAllocated(unsigned Reg);
+};
+
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
new file mode 100644
index 0000000..2100474
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
@@ -0,0 +1,177 @@
+//===-- HexagonExpandPredSpillCode.cpp - Expand Predicate Spill Code ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// The Hexagon processor has no instructions that load or store predicate
+// registers directly. So, when these registers must be spilled a general
+// purpose register must be found and the value copied to/from it from/to
+// the predicate register. This code currently does not use the register
+// scavenger mechanism available in the allocator. There are two registers
+// reserved to allow spilling/restoring predicate registers. One is used to
+// hold the predicate value. The other is used when stack frame offsets are
+// too large.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+
+namespace {
+
+class HexagonExpandPredSpillCode : public MachineFunctionPass {
+ HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ public:
+ static char ID;
+ HexagonExpandPredSpillCode(HexagonTargetMachine& TM) :
+ MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon Expand Predicate Spill Code";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonExpandPredSpillCode::ID = 0;
+
+
+bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
+
+ const HexagonInstrInfo *TII = QTM.getInstrInfo();
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+ // Traverse the basic block.
+ for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
+ ++MII) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (Opc == Hexagon::STriw_pred) {
+ // STriw_pred [R30], ofst, SrcReg;
+ unsigned FP = MI->getOperand(0).getReg();
+ assert(FP == QTM.getRegisterInfo()->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
+ assert(MI->getOperand(1).isImm() && "Not an offset");
+ int Offset = MI->getOperand(1).getImm();
+ int SrcReg = MI->getOperand(2).getReg();
+ assert(Hexagon::PredRegsRegClass.contains(SrcReg) &&
+ "Not a predicate register");
+ if (!TII->isValidOffset(Hexagon::STriw, Offset)) {
+ if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_rr),
+ HEXAGON_RESERVED_REG_1)
+ .addReg(FP).addReg(HEXAGON_RESERVED_REG_1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::STriw))
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0).addReg(HEXAGON_RESERVED_REG_2);
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw))
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0)
+ .addReg(HEXAGON_RESERVED_REG_2);
+ }
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd),
+ HEXAGON_RESERVED_REG_2).addReg(SrcReg);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)).
+ addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ } else if (Opc == Hexagon::LDriw_pred) {
+ // DstReg = LDriw_pred [R30], ofst.
+ int DstReg = MI->getOperand(0).getReg();
+ assert(Hexagon::PredRegsRegClass.contains(DstReg) &&
+ "Not a predicate register");
+ unsigned FP = MI->getOperand(1).getReg();
+ assert(FP == QTM.getRegisterInfo()->getFrameRegister() &&
+ "Not a Frame Pointer, Nor a Spill Slot");
+ assert(MI->getOperand(2).isImm() && "Not an offset");
+ int Offset = MI->getOperand(2).getImm();
+ if (!TII->isValidOffset(Hexagon::LDriw, Offset)) {
+ if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(),
+ TII->get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_rr),
+ HEXAGON_RESERVED_REG_1)
+ .addReg(FP)
+ .addReg(HEXAGON_RESERVED_REG_1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2)
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::ADD_ri),
+ HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2)
+ .addReg(HEXAGON_RESERVED_REG_1)
+ .addImm(0);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ } else {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::LDriw),
+ HEXAGON_RESERVED_REG_2).addReg(FP).addImm(Offset);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_PdRs),
+ DstReg).addReg(HEXAGON_RESERVED_REG_2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ }
+ }
+ }
+
+ return true;
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonExpandPredSpillCode(HexagonTargetMachine &TM) {
+ return new HexagonExpandPredSpillCode(TM);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
new file mode 100644
index 0000000..e8a6924
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -0,0 +1,332 @@
+//===-- HexagonFrameLowering.cpp - Define frame lowering ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonFrameLowering.h"
+#include "Hexagon.h"
+#include "HexagonInstrInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+static cl::opt<bool> DisableDeallocRet(
+ "disable-hexagon-dealloc-ret",
+ cl::Hidden,
+ cl::desc("Disable Dealloc Return for Hexagon target"));
+
+/// determineFrameLayout - Determine the size of the frame and maximum call
+/// frame size.
+void HexagonFrameLowering::determineFrameLayout(MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ unsigned FrameSize = MFI->getStackSize();
+
+ // Get the alignments provided by the target.
+ unsigned TargetAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
+ // Get the maximum call frame size of all the calls.
+ unsigned maxCallFrameSize = MFI->getMaxCallFrameSize();
+
+ // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
+ // that allocations will be aligned.
+ if (MFI->hasVarSizedObjects())
+ maxCallFrameSize = RoundUpToAlignment(maxCallFrameSize, TargetAlign);
+
+ // Update maximum call frame size.
+ MFI->setMaxCallFrameSize(maxCallFrameSize);
+
+ // Include call frame size in total.
+ FrameSize += maxCallFrameSize;
+
+ // Make sure the frame is aligned.
+ FrameSize = RoundUpToAlignment(FrameSize, TargetAlign);
+
+ // Update frame info.
+ MFI->setStackSize(FrameSize);
+}
+
+
+void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ const HexagonRegisterInfo *QRI =
+ static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo());
+ DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ determineFrameLayout(MF);
+
+ // Check if frame moves are needed for EH.
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
+ !MF.getFunction()->needsUnwindTableEntry();
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ int NumBytes = (int) MFI->getStackSize();
+
+ // LLVM expects allocframe not to be the first instruction in the
+ // basic block.
+ MachineBasicBlock::iterator InsertPt = MBB.begin();
+
+ //
+ // ALLOCA adjust regs. Iterate over ADJDYNALLOC nodes and change the offset.
+ //
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ const std::vector<MachineInstr*>& AdjustRegs =
+ FuncInfo->getAllocaAdjustInsts();
+ for (std::vector<MachineInstr*>::const_iterator i = AdjustRegs.begin(),
+ e = AdjustRegs.end();
+ i != e; ++i) {
+ MachineInstr* MI = *i;
+ assert((MI->getOpcode() == Hexagon::ADJDYNALLOC) &&
+ "Expected adjust alloca node");
+
+ MachineOperand& MO = MI->getOperand(2);
+ assert(MO.isImm() && "Expected immediate");
+ MO.setImm(MFI->getMaxCallFrameSize());
+ }
+
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+
+ if (needsFrameMoves) {
+ // Advance CFA. DW_CFA_def_cfa
+ unsigned FPReg = QRI->getFrameRegister();
+ unsigned RAReg = QRI->getRARegister();
+
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(FPReg, -8);
+ Moves.push_back(MachineMove(0, Dst, Src));
+
+ // R31 = (R31 - #4)
+ MachineLocation LRDst(RAReg, -4);
+ MachineLocation LRSrc(RAReg);
+ Moves.push_back(MachineMove(0, LRDst, LRSrc));
+
+ // R30 = (R30 - #8)
+ MachineLocation SPDst(FPReg, -8);
+ MachineLocation SPSrc(FPReg);
+ Moves.push_back(MachineMove(0, SPDst, SPSrc));
+ }
+
+ //
+ // Only insert ALLOCFRAME if we need to.
+ //
+ if (hasFP(MF)) {
+ // Check for overflow.
+ // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
+ const int ALLOCFRAME_MAX = 16384;
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ if (NumBytes >= ALLOCFRAME_MAX) {
+ // Emit allocframe(#0).
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(0);
+
+ // Subtract offset from frame pointer.
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::CONST32_Int_Real),
+ HEXAGON_RESERVED_REG_1).addImm(NumBytes);
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::SUB_rr),
+ QRI->getStackRegister()).
+ addReg(QRI->getStackRegister()).
+ addReg(HEXAGON_RESERVED_REG_1);
+ } else {
+ BuildMI(MBB, InsertPt, dl, TII.get(Hexagon::ALLOCFRAME)).addImm(NumBytes);
+ }
+ }
+}
+// Returns true if MBB has a machine instructions that indicates a tail call
+// in the block.
+bool HexagonFrameLowering::hasTailCall(MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ unsigned RetOpcode = MBBI->getOpcode();
+
+ return RetOpcode == Hexagon::TCRETURNtg || RetOpcode == Hexagon::TCRETURNtext;}
+
+void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ DebugLoc dl = MBBI->getDebugLoc();
+ //
+ // Only insert deallocframe if we need to.
+ //
+ if (hasFP(MF)) {
+ MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ MachineBasicBlock::iterator MBBI_end = MBB.end();
+ //
+ // For Hexagon, we don't need the frame size.
+ //
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ int NumBytes = (int) MFI->getStackSize();
+
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+
+ // Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
+ // versions.
+ if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPR
+ && !DisableDeallocRet) {
+ // Remove jumpr node.
+ MBB.erase(MBBI);
+ // Add dealloc_return.
+ BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4))
+ .addImm(NumBytes);
+ } else { // Add deallocframe for V2 and V3.
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME)).addImm(NumBytes);
+ }
+ }
+}
+
+bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ return (MFI->hasCalls() || (MFI->getStackSize() > 0) ||
+ FuncInfo->hasClobberLR() );
+}
+
+bool
+HexagonFrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ if (CSI.empty()) {
+ return false;
+ }
+
+ // We can only schedule double loads if we spill contiguous callee-saved regs
+ // For instance, we cannot scheduled double-word loads if we spill r24,
+ // r26, and r27.
+ // Hexagon_TODO: We can try to double-word align odd registers for -O2 and
+ // above.
+ bool ContiguousRegs = true;
+
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ //
+ // Check if we can use a double-word store.
+ //
+ const uint16_t* SuperReg = TRI->getSuperRegisters(Reg);
+
+ // Assume that there is exactly one superreg.
+ assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
+ bool CanUseDblStore = false;
+ const TargetRegisterClass* SuperRegClass = 0;
+
+ if (ContiguousRegs && (i < CSI.size()-1)) {
+ const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
+ assert(SuperRegNext[0] && !SuperRegNext[1] &&
+ "Expected exactly one superreg");
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
+ CanUseDblStore = (SuperRegNext[0] == SuperReg[0]);
+ }
+
+
+ if (CanUseDblStore) {
+ TII.storeRegToStackSlot(MBB, MI, SuperReg[0], true,
+ CSI[i+1].getFrameIdx(), SuperRegClass, TRI);
+ MBB.addLiveIn(SuperReg[0]);
+ ++i;
+ } else {
+ // Cannot use a double-word store.
+ ContiguousRegs = false;
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(), RC,
+ TRI);
+ MBB.addLiveIn(Reg);
+ }
+ }
+ return true;
+}
+
+
+bool HexagonFrameLowering::restoreCalleeSavedRegisters(
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+
+ MachineFunction *MF = MBB.getParent();
+ const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
+
+ if (CSI.empty()) {
+ return false;
+ }
+
+ // We can only schedule double loads if we spill contiguous callee-saved regs
+ // For instance, we cannot scheduled double-word loads if we spill r24,
+ // r26, and r27.
+ // Hexagon_TODO: We can try to double-word align odd registers for -O2 and
+ // above.
+ bool ContiguousRegs = true;
+
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ unsigned Reg = CSI[i].getReg();
+
+ //
+ // Check if we can use a double-word load.
+ //
+ const uint16_t* SuperReg = TRI->getSuperRegisters(Reg);
+ const TargetRegisterClass* SuperRegClass = 0;
+
+ // Assume that there is exactly one superreg.
+ assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg");
+ bool CanUseDblLoad = false;
+ if (ContiguousRegs && (i < CSI.size()-1)) {
+ const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg());
+ assert(SuperRegNext[0] && !SuperRegNext[1] &&
+ "Expected exactly one superreg");
+ SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]);
+ CanUseDblLoad = (SuperRegNext[0] == SuperReg[0]);
+ }
+
+
+ if (CanUseDblLoad) {
+ TII.loadRegFromStackSlot(MBB, MI, SuperReg[0], CSI[i+1].getFrameIdx(),
+ SuperRegClass, TRI);
+ MBB.addLiveIn(SuperReg[0]);
+ ++i;
+ } else {
+ // Cannot use a double-word load.
+ ContiguousRegs = false;
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
+ MBB.addLiveIn(Reg);
+ }
+ }
+ return true;
+}
+
+int HexagonFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ return MF.getFrameInfo()->getObjectOffset(FI);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
new file mode 100644
index 0000000..ad87f11
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonFrameLowering.h
@@ -0,0 +1,50 @@
+//=- HexagonFrameLowering.h - Define frame lowering for Hexagon --*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGON_FRAMEINFO_H
+#define HEXAGON_FRAMEINFO_H
+
+#include "Hexagon.h"
+#include "HexagonSubtarget.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+
+class HexagonFrameLowering : public TargetFrameLowering {
+private:
+ const HexagonSubtarget &STI;
+ void determineFrameLayout(MachineFunction &MF) const;
+
+public:
+ explicit HexagonFrameLowering(const HexagonSubtarget &sti)
+ : TargetFrameLowering(StackGrowsDown, 8, 0), STI(sti) {
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+ virtual bool
+ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ virtual bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
+ bool hasFP(const MachineFunction &MF) const;
+ bool hasTailCall(MachineBasicBlock &MBB) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
new file mode 100644
index 0000000..57772a5
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -0,0 +1,644 @@
+//===-- HexagonHardwareLoops.cpp - Identify and generate hardware loops ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass identifies loops where we can generate the Hexagon hardware
+// loop instruction. The hardware loop can perform loop branches with a
+// zero-cycle overhead.
+//
+// The pattern that defines the induction variable can changed depending on
+// prior optimizations. For example, the IndVarSimplify phase run by 'opt'
+// normalizes induction variables, and the Loop Strength Reduction pass
+// run by 'llc' may also make changes to the induction variable.
+// The pattern detected by this phase is due to running Strength Reduction.
+//
+// Criteria for hardware loops:
+// - Countable loops (w/ ind. var for a trip count)
+// - Assumes loops are normalized by IndVarSimplify
+// - Try inner-most loops first
+// - No nested hardware loops.
+// - No function calls in loops.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hwloops"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/Constants.h"
+#include "llvm/PassSupport.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include <algorithm>
+
+using namespace llvm;
+
+STATISTIC(NumHWLoops, "Number of loops converted to hardware loops");
+
+namespace {
+ class CountValue;
+ struct HexagonHardwareLoops : public MachineFunctionPass {
+ MachineLoopInfo *MLI;
+ MachineRegisterInfo *MRI;
+ const TargetInstrInfo *TII;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+
+ HexagonHardwareLoops() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const { return "Hexagon Hardware Loops"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ /// getCanonicalInductionVariable - Check to see if the loop has a canonical
+ /// induction variable.
+ /// Should be defined in MachineLoop. Based upon version in class Loop.
+ const MachineInstr *getCanonicalInductionVariable(MachineLoop *L) const;
+
+ /// getTripCount - Return a loop-invariant LLVM register indicating the
+ /// number of times the loop will be executed. If the trip-count cannot
+ /// be determined, this return null.
+ CountValue *getTripCount(MachineLoop *L) const;
+
+ /// isInductionOperation - Return true if the instruction matches the
+ /// pattern for an opertion that defines an induction variable.
+ bool isInductionOperation(const MachineInstr *MI, unsigned IVReg) const;
+
+ /// isInvalidOperation - Return true if the instruction is not valid within
+ /// a hardware loop.
+ bool isInvalidLoopOperation(const MachineInstr *MI) const;
+
+ /// containsInavlidInstruction - Return true if the loop contains an
+ /// instruction that inhibits using the hardware loop.
+ bool containsInvalidInstruction(MachineLoop *L) const;
+
+ /// converToHardwareLoop - Given a loop, check if we can convert it to a
+ /// hardware loop. If so, then perform the conversion and return true.
+ bool convertToHardwareLoop(MachineLoop *L);
+
+ };
+
+ char HexagonHardwareLoops::ID = 0;
+
+
+ // CountValue class - Abstraction for a trip count of a loop. A
+ // smaller vesrsion of the MachineOperand class without the concerns
+ // of changing the operand representation.
+ class CountValue {
+ public:
+ enum CountValueType {
+ CV_Register,
+ CV_Immediate
+ };
+ private:
+ CountValueType Kind;
+ union Values {
+ unsigned RegNum;
+ int64_t ImmVal;
+ Values(unsigned r) : RegNum(r) {}
+ Values(int64_t i) : ImmVal(i) {}
+ } Contents;
+ bool isNegative;
+
+ public:
+ CountValue(unsigned r, bool neg) : Kind(CV_Register), Contents(r),
+ isNegative(neg) {}
+ explicit CountValue(int64_t i) : Kind(CV_Immediate), Contents(i),
+ isNegative(i < 0) {}
+ CountValueType getType() const { return Kind; }
+ bool isReg() const { return Kind == CV_Register; }
+ bool isImm() const { return Kind == CV_Immediate; }
+ bool isNeg() const { return isNegative; }
+
+ unsigned getReg() const {
+ assert(isReg() && "Wrong CountValue accessor");
+ return Contents.RegNum;
+ }
+ void setReg(unsigned Val) {
+ Contents.RegNum = Val;
+ }
+ int64_t getImm() const {
+ assert(isImm() && "Wrong CountValue accessor");
+ if (isNegative) {
+ return -Contents.ImmVal;
+ }
+ return Contents.ImmVal;
+ }
+ void setImm(int64_t Val) {
+ Contents.ImmVal = Val;
+ }
+
+ void print(raw_ostream &OS, const TargetMachine *TM = 0) const {
+ if (isReg()) { OS << PrintReg(getReg()); }
+ if (isImm()) { OS << getImm(); }
+ }
+ };
+
+ struct HexagonFixupHwLoops : public MachineFunctionPass {
+ public:
+ static char ID; // Pass identification, replacement for typeid.
+
+ HexagonFixupHwLoops() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const { return "Hexagon Hardware Loop Fixup"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ /// Maximum distance between the loop instr and the basic block.
+ /// Just an estimate.
+ static const unsigned MAX_LOOP_DISTANCE = 200;
+
+ /// fixupLoopInstrs - Check the offset between each loop instruction and
+ /// the loop basic block to determine if we can use the LOOP instruction
+ /// or if we need to set the LC/SA registers explicitly.
+ bool fixupLoopInstrs(MachineFunction &MF);
+
+ /// convertLoopInstr - Add the instruction to set the LC and SA registers
+ /// explicitly.
+ void convertLoopInstr(MachineFunction &MF,
+ MachineBasicBlock::iterator &MII,
+ RegScavenger &RS);
+
+ };
+
+ char HexagonFixupHwLoops::ID = 0;
+
+} // end anonymous namespace
+
+
+/// isHardwareLoop - Returns true if the instruction is a hardware loop
+/// instruction.
+static bool isHardwareLoop(const MachineInstr *MI) {
+ return MI->getOpcode() == Hexagon::LOOP0_r ||
+ MI->getOpcode() == Hexagon::LOOP0_i;
+}
+
+/// isCompareEquals - Returns true if the instruction is a compare equals
+/// instruction with an immediate operand.
+static bool isCompareEqualsImm(const MachineInstr *MI) {
+ return MI->getOpcode() == Hexagon::CMPEQri;
+}
+
+
+/// createHexagonHardwareLoops - Factory for creating
+/// the hardware loop phase.
+FunctionPass *llvm::createHexagonHardwareLoops() {
+ return new HexagonHardwareLoops();
+}
+
+
+bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
+
+ bool Changed = false;
+
+ // get the loop information
+ MLI = &getAnalysis<MachineLoopInfo>();
+ // get the register information
+ MRI = &MF.getRegInfo();
+ // the target specific instructio info.
+ TII = MF.getTarget().getInstrInfo();
+
+ for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
+ I != E; ++I) {
+ MachineLoop *L = *I;
+ if (!L->getParentLoop()) {
+ Changed |= convertToHardwareLoop(L);
+ }
+ }
+
+ return Changed;
+}
+
+/// getCanonicalInductionVariable - Check to see if the loop has a canonical
+/// induction variable. We check for a simple recurrence pattern - an
+/// integer recurrence that decrements by one each time through the loop and
+/// ends at zero. If so, return the phi node that corresponds to it.
+///
+/// Based upon the similar code in LoopInfo except this code is specific to
+/// the machine.
+/// This method assumes that the IndVarSimplify pass has been run by 'opt'.
+///
+const MachineInstr
+*HexagonHardwareLoops::getCanonicalInductionVariable(MachineLoop *L) const {
+ MachineBasicBlock *TopMBB = L->getTopBlock();
+ MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
+ assert(PI != TopMBB->pred_end() &&
+ "Loop must have more than one incoming edge!");
+ MachineBasicBlock *Backedge = *PI++;
+ if (PI == TopMBB->pred_end()) return 0; // dead loop
+ MachineBasicBlock *Incoming = *PI++;
+ if (PI != TopMBB->pred_end()) return 0; // multiple backedges?
+
+ // make sure there is one incoming and one backedge and determine which
+ // is which.
+ if (L->contains(Incoming)) {
+ if (L->contains(Backedge))
+ return 0;
+ std::swap(Incoming, Backedge);
+ } else if (!L->contains(Backedge))
+ return 0;
+
+ // Loop over all of the PHI nodes, looking for a canonical induction variable:
+ // - The PHI node is "reg1 = PHI reg2, BB1, reg3, BB2".
+ // - The recurrence comes from the backedge.
+ // - the definition is an induction operatio.n
+ for (MachineBasicBlock::iterator I = TopMBB->begin(), E = TopMBB->end();
+ I != E && I->isPHI(); ++I) {
+ const MachineInstr *MPhi = &*I;
+ unsigned DefReg = MPhi->getOperand(0).getReg();
+ for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) {
+ // Check each operand for the value from the backedge.
+ MachineBasicBlock *MBB = MPhi->getOperand(i+1).getMBB();
+ if (L->contains(MBB)) { // operands comes from the backedge
+ // Check if the definition is an induction operation.
+ const MachineInstr *DI = MRI->getVRegDef(MPhi->getOperand(i).getReg());
+ if (isInductionOperation(DI, DefReg)) {
+ return MPhi;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/// getTripCount - Return a loop-invariant LLVM value indicating the
+/// number of times the loop will be executed. The trip count can
+/// be either a register or a constant value. If the trip-count
+/// cannot be determined, this returns null.
+///
+/// We find the trip count from the phi instruction that defines the
+/// induction variable. We follow the links to the CMP instruction
+/// to get the trip count.
+///
+/// Based upon getTripCount in LoopInfo.
+///
+CountValue *HexagonHardwareLoops::getTripCount(MachineLoop *L) const {
+ // Check that the loop has a induction variable.
+ const MachineInstr *IV_Inst = getCanonicalInductionVariable(L);
+ if (IV_Inst == 0) return 0;
+
+ // Canonical loops will end with a 'cmpeq_ri IV, Imm',
+ // if Imm is 0, get the count from the PHI opnd
+ // if Imm is -M, than M is the count
+ // Otherwise, Imm is the count
+ const MachineOperand *IV_Opnd;
+ const MachineOperand *InitialValue;
+ if (!L->contains(IV_Inst->getOperand(2).getMBB())) {
+ InitialValue = &IV_Inst->getOperand(1);
+ IV_Opnd = &IV_Inst->getOperand(3);
+ } else {
+ InitialValue = &IV_Inst->getOperand(3);
+ IV_Opnd = &IV_Inst->getOperand(1);
+ }
+
+ // Look for the cmp instruction to determine if we
+ // can get a useful trip count. The trip count can
+ // be either a register or an immediate. The location
+ // of the value depends upon the type (reg or imm).
+ while ((IV_Opnd = IV_Opnd->getNextOperandForReg())) {
+ const MachineInstr *MI = IV_Opnd->getParent();
+ if (L->contains(MI) && isCompareEqualsImm(MI)) {
+ const MachineOperand &MO = MI->getOperand(2);
+ assert(MO.isImm() && "IV Cmp Operand should be 0");
+ int64_t ImmVal = MO.getImm();
+
+ const MachineInstr *IV_DefInstr = MRI->getVRegDef(IV_Opnd->getReg());
+ assert(L->contains(IV_DefInstr->getParent()) &&
+ "IV definition should occurs in loop");
+ int64_t iv_value = IV_DefInstr->getOperand(2).getImm();
+
+ if (ImmVal == 0) {
+ // Make sure the induction variable changes by one on each iteration.
+ if (iv_value != 1 && iv_value != -1) {
+ return 0;
+ }
+ return new CountValue(InitialValue->getReg(), iv_value > 0);
+ } else {
+ assert(InitialValue->isReg() && "Expecting register for init value");
+ const MachineInstr *DefInstr = MRI->getVRegDef(InitialValue->getReg());
+ if (DefInstr && DefInstr->getOpcode() == Hexagon::TFRI) {
+ int64_t count = ImmVal - DefInstr->getOperand(1).getImm();
+ if ((count % iv_value) != 0) {
+ return 0;
+ }
+ return new CountValue(count/iv_value);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/// isInductionOperation - return true if the operation is matches the
+/// pattern that defines an induction variable:
+/// add iv, c
+///
+bool
+HexagonHardwareLoops::isInductionOperation(const MachineInstr *MI,
+ unsigned IVReg) const {
+ return (MI->getOpcode() ==
+ Hexagon::ADD_ri && MI->getOperand(1).getReg() == IVReg);
+}
+
+/// isInvalidOperation - Return true if the operation is invalid within
+/// hardware loop.
+bool
+HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI) const {
+
+ // call is not allowed because the callee may use a hardware loop
+ if (MI->getDesc().isCall()) {
+ return true;
+ }
+ // do not allow nested hardware loops
+ if (isHardwareLoop(MI)) {
+ return true;
+ }
+ // check if the instruction defines a hardware loop register
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() &&
+ (MO.getReg() == Hexagon::LC0 || MO.getReg() == Hexagon::LC1 ||
+ MO.getReg() == Hexagon::SA0 || MO.getReg() == Hexagon::SA0)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// containsInvalidInstruction - Return true if the loop contains
+/// an instruction that inhibits the use of the hardware loop function.
+///
+bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
+ const std::vector<MachineBasicBlock*> Blocks = L->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = Blocks[i];
+ for (MachineBasicBlock::iterator
+ MII = MBB->begin(), E = MBB->end(); MII != E; ++MII) {
+ const MachineInstr *MI = &*MII;
+ if (isInvalidLoopOperation(MI)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// converToHardwareLoop - check if the loop is a candidate for
+/// converting to a hardware loop. If so, then perform the
+/// transformation.
+///
+/// This function works on innermost loops first. A loop can
+/// be converted if it is a counting loop; either a register
+/// value or an immediate.
+///
+/// The code makes several assumptions about the representation
+/// of the loop in llvm.
+bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) {
+ bool Changed = false;
+ // Process nested loops first.
+ for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
+ Changed |= convertToHardwareLoop(*I);
+ }
+ // If a nested loop has been converted, then we can't convert this loop.
+ if (Changed) {
+ return Changed;
+ }
+ // Are we able to determine the trip count for the loop?
+ CountValue *TripCount = getTripCount(L);
+ if (TripCount == 0) {
+ return false;
+ }
+ // Does the loop contain any invalid instructions?
+ if (containsInvalidInstruction(L)) {
+ return false;
+ }
+ MachineBasicBlock *Preheader = L->getLoopPreheader();
+ // No preheader means there's not place for the loop instr.
+ if (Preheader == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator InsertPos = Preheader->getFirstTerminator();
+
+ MachineBasicBlock *LastMBB = L->getExitingBlock();
+ // Don't generate hw loop if the loop has more than one exit.
+ if (LastMBB == 0) {
+ return false;
+ }
+ MachineBasicBlock::iterator LastI = LastMBB->getFirstTerminator();
+
+ // Determine the loop start.
+ MachineBasicBlock *LoopStart = L->getTopBlock();
+ if (L->getLoopLatch() != LastMBB) {
+ // When the exit and latch are not the same, use the latch block as the
+ // start.
+ // The loop start address is used only after the 1st iteration, and the loop
+ // latch may contains instrs. that need to be executed after the 1st iter.
+ LoopStart = L->getLoopLatch();
+ // Make sure the latch is a successor of the exit, otherwise it won't work.
+ if (!LastMBB->isSuccessor(LoopStart)) {
+ return false;
+ }
+ }
+
+ // Convert the loop to a hardware loop
+ DEBUG(dbgs() << "Change to hardware loop at "; L->dump());
+
+ if (TripCount->isReg()) {
+ // Create a copy of the loop count register.
+ MachineFunction *MF = LastMBB->getParent();
+ const TargetRegisterClass *RC =
+ MF->getRegInfo().getRegClass(TripCount->getReg());
+ unsigned CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), CountReg).addReg(TripCount->getReg());
+ if (TripCount->isNeg()) {
+ unsigned CountReg1 = CountReg;
+ CountReg = MF->getRegInfo().createVirtualRegister(RC);
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::NEG), CountReg).addReg(CountReg1);
+ }
+
+ // Add the Loop instruction to the begining of the loop.
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg);
+ } else {
+ assert(TripCount->isImm() && "Expecting immedate vaule for trip count");
+ // Add the Loop immediate instruction to the beginning of the loop.
+ int64_t CountImm = TripCount->getImm();
+ BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(),
+ TII->get(Hexagon::LOOP0_i)).addMBB(LoopStart).addImm(CountImm);
+ }
+
+ // Make sure the loop start always has a reference in the CFG. We need to
+ // create a BlockAddress operand to get this mechanism to work both the
+ // MachineBasicBlock and BasicBlock objects need the flag set.
+ LoopStart->setHasAddressTaken();
+ // This line is needed to set the hasAddressTaken flag on the BasicBlock
+ // object
+ BlockAddress::get(const_cast<BasicBlock *>(LoopStart->getBasicBlock()));
+
+ // Replace the loop branch with an endloop instruction.
+ DebugLoc dl = LastI->getDebugLoc();
+ BuildMI(*LastMBB, LastI, dl, TII->get(Hexagon::ENDLOOP0)).addMBB(LoopStart);
+
+ // The loop ends with either:
+ // - a conditional branch followed by an unconditional branch, or
+ // - a conditional branch to the loop start.
+ if (LastI->getOpcode() == Hexagon::JMP_c ||
+ LastI->getOpcode() == Hexagon::JMP_cNot) {
+ // delete one and change/add an uncond. branch to out of the loop
+ MachineBasicBlock *BranchTarget = LastI->getOperand(1).getMBB();
+ LastI = LastMBB->erase(LastI);
+ if (!L->contains(BranchTarget)) {
+ if (LastI != LastMBB->end()) {
+ TII->RemoveBranch(*LastMBB);
+ }
+ SmallVector<MachineOperand, 0> Cond;
+ TII->InsertBranch(*LastMBB, BranchTarget, 0, Cond, dl);
+ }
+ } else {
+ // Conditional branch to loop start; just delete it.
+ LastMBB->erase(LastI);
+ }
+ delete TripCount;
+
+ ++NumHWLoops;
+ return true;
+}
+
+/// createHexagonFixupHwLoops - Factory for creating the hardware loop
+/// phase.
+FunctionPass *llvm::createHexagonFixupHwLoops() {
+ return new HexagonFixupHwLoops();
+}
+
+bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "****** Hexagon Hardware Loop Fixup ******\n");
+
+ bool Changed = fixupLoopInstrs(MF);
+ return Changed;
+}
+
+/// fixupLoopInsts - For Hexagon, if the loop label is to far from the
+/// loop instruction then we need to set the LC0 and SA0 registers
+/// explicitly instead of using LOOP(start,count). This function
+/// checks the distance, and generates register assignments if needed.
+///
+/// This function makes two passes over the basic blocks. The first
+/// pass computes the offset of the basic block from the start.
+/// The second pass checks all the loop instructions.
+bool HexagonFixupHwLoops::fixupLoopInstrs(MachineFunction &MF) {
+
+ // Offset of the current instruction from the start.
+ unsigned InstOffset = 0;
+ // Map for each basic block to it's first instruction.
+ DenseMap<MachineBasicBlock*, unsigned> BlockToInstOffset;
+
+ // First pass - compute the offset of each basic block.
+ for (MachineFunction::iterator MBB = MF.begin(), MBBe = MF.end();
+ MBB != MBBe; ++MBB) {
+ BlockToInstOffset[MBB] = InstOffset;
+ InstOffset += (MBB->size() * 4);
+ }
+
+ // Second pass - check each loop instruction to see if it needs to
+ // be converted.
+ InstOffset = 0;
+ bool Changed = false;
+ RegScavenger RS;
+
+ // Loop over all the basic blocks.
+ for (MachineFunction::iterator MBB = MF.begin(), MBBe = MF.end();
+ MBB != MBBe; ++MBB) {
+ InstOffset = BlockToInstOffset[MBB];
+ RS.enterBasicBlock(MBB);
+
+ // Loop over all the instructions.
+ MachineBasicBlock::iterator MIE = MBB->end();
+ MachineBasicBlock::iterator MII = MBB->begin();
+ while (MII != MIE) {
+ if (isHardwareLoop(MII)) {
+ RS.forward(MII);
+ assert(MII->getOperand(0).isMBB() &&
+ "Expect a basic block as loop operand");
+ int diff = InstOffset - BlockToInstOffset[MII->getOperand(0).getMBB()];
+ diff = (diff > 0 ? diff : -diff);
+ if ((unsigned)diff > MAX_LOOP_DISTANCE) {
+ // Convert to explicity setting LC0 and SA0.
+ convertLoopInstr(MF, MII, RS);
+ MII = MBB->erase(MII);
+ Changed = true;
+ } else {
+ ++MII;
+ }
+ } else {
+ ++MII;
+ }
+ InstOffset += 4;
+ }
+ }
+
+ return Changed;
+
+}
+
+/// convertLoopInstr - convert a loop instruction to a sequence of instructions
+/// that set the lc and sa register explicitly.
+void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF,
+ MachineBasicBlock::iterator &MII,
+ RegScavenger &RS) {
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ MachineBasicBlock *MBB = MII->getParent();
+ DebugLoc DL = MII->getDebugLoc();
+ unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0);
+
+ // First, set the LC0 with the trip count.
+ if (MII->getOperand(1).isReg()) {
+ // Trip count is a register
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ .addReg(MII->getOperand(1).getReg());
+ } else {
+ // Trip count is an immediate.
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFRI), Scratch)
+ .addImm(MII->getOperand(1).getImm());
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::LC0)
+ .addReg(Scratch);
+ }
+ // Then, set the SA0 with the loop start address.
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::CONST32_Label), Scratch)
+ .addMBB(MII->getOperand(0).getMBB());
+ BuildMI(*MBB, MII, DL, TII->get(Hexagon::TFCR), Hexagon::SA0).addReg(Scratch);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
new file mode 100644
index 0000000..9df965e
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -0,0 +1,1485 @@
+//===-- HexagonISelDAGToDAG.cpp - A dag to dag inst selector for Hexagon --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the Hexagon target.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-isel"
+#include "HexagonISelLowering.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===--------------------------------------------------------------------===//
+/// HexagonDAGToDAGISel - Hexagon specific code to select Hexagon machine
+/// instructions for SelectionDAG operations.
+///
+namespace {
+class HexagonDAGToDAGISel : public SelectionDAGISel {
+ /// Subtarget - Keep a pointer to the Hexagon Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const HexagonSubtarget &Subtarget;
+
+ // Keep a reference to HexagonTargetMachine.
+ HexagonTargetMachine& TM;
+ const HexagonInstrInfo *TII;
+
+public:
+ explicit HexagonDAGToDAGISel(HexagonTargetMachine &targetmachine)
+ : SelectionDAGISel(targetmachine),
+ Subtarget(targetmachine.getSubtarget<HexagonSubtarget>()),
+ TM(targetmachine),
+ TII(static_cast<const HexagonInstrInfo*>(TM.getInstrInfo())) {
+
+ }
+
+ SDNode *Select(SDNode *N);
+
+ // Complex Pattern Selectors.
+ bool SelectADDRri(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_0(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_1(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriS11_2(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectMEMriS11_2(SDValue& Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRriS11_3(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRrr(SDValue &Addr, SDValue &Base, SDValue &Offset);
+ bool SelectADDRriU6_0(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriU6_1(SDValue& N, SDValue &R1, SDValue &R2);
+ bool SelectADDRriU6_2(SDValue& N, SDValue &R1, SDValue &R2);
+
+ virtual const char *getPassName() const {
+ return "Hexagon DAG->DAG Pattern Instruction Selection";
+ }
+
+ /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+ /// inline asm expressions.
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps);
+ bool SelectAddr(SDNode *Op, SDValue Addr, SDValue &Base, SDValue &Offset);
+
+ SDNode *SelectLoad(SDNode *N);
+ SDNode *SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl);
+ SDNode *SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl);
+ SDNode *SelectIndexedLoadZeroExtend64(LoadSDNode *LD, unsigned Opcode,
+ DebugLoc dl);
+ SDNode *SelectIndexedLoadSignExtend64(LoadSDNode *LD, unsigned Opcode,
+ DebugLoc dl);
+ SDNode *SelectBaseOffsetStore(StoreSDNode *ST, DebugLoc dl);
+ SDNode *SelectIndexedStore(StoreSDNode *ST, DebugLoc dl);
+ SDNode *SelectStore(SDNode *N);
+ SDNode *SelectSHL(SDNode *N);
+ SDNode *SelectSelect(SDNode *N);
+ SDNode *SelectTruncate(SDNode *N);
+ SDNode *SelectMul(SDNode *N);
+ SDNode *SelectZeroExtend(SDNode *N);
+ SDNode *SelectIntrinsicWOChain(SDNode *N);
+ SDNode *SelectConstant(SDNode *N);
+ SDNode *SelectAdd(SDNode *N);
+
+ // Include the pieces autogenerated from the target description.
+#include "HexagonGenDAGISel.inc"
+};
+} // end anonymous namespace
+
+
+/// createHexagonISelDag - This pass converts a legalized DAG into a
+/// Hexagon-specific DAG, ready for instruction scheduling.
+///
+FunctionPass *llvm::createHexagonISelDag(HexagonTargetMachine &TM) {
+ return new HexagonDAGToDAGISel(TM);
+}
+
+static bool IsS11_0_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<11>(v);
+}
+
+
+static bool IsS11_1_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,1>(v);
+}
+
+
+static bool IsS11_2_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,2>(v);
+}
+
+
+static bool IsS11_3_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,3>(v);
+}
+
+
+static bool IsU6_0_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<6>(v);
+}
+
+
+static bool IsU6_1_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,1>(v);
+}
+
+
+static bool IsU6_2_Offset(SDNode * S) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // u6 predicate - True if the immediate fits in a 6-bit unsigned extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,2>(v);
+}
+
+
+// Intrinsics that return a a predicate.
+static unsigned doesIntrinsicReturnPredicate(unsigned ID)
+{
+ switch (ID) {
+ default:
+ return 0;
+ case Intrinsic::hexagon_C2_cmpeq:
+ case Intrinsic::hexagon_C2_cmpgt:
+ case Intrinsic::hexagon_C2_cmpgtu:
+ case Intrinsic::hexagon_C2_cmpgtup:
+ case Intrinsic::hexagon_C2_cmpgtp:
+ case Intrinsic::hexagon_C2_cmpeqp:
+ case Intrinsic::hexagon_C2_bitsset:
+ case Intrinsic::hexagon_C2_bitsclr:
+ case Intrinsic::hexagon_C2_cmpeqi:
+ case Intrinsic::hexagon_C2_cmpgti:
+ case Intrinsic::hexagon_C2_cmpgtui:
+ case Intrinsic::hexagon_C2_cmpgei:
+ case Intrinsic::hexagon_C2_cmpgeui:
+ case Intrinsic::hexagon_C2_cmplt:
+ case Intrinsic::hexagon_C2_cmpltu:
+ case Intrinsic::hexagon_C2_bitsclri:
+ case Intrinsic::hexagon_C2_and:
+ case Intrinsic::hexagon_C2_or:
+ case Intrinsic::hexagon_C2_xor:
+ case Intrinsic::hexagon_C2_andn:
+ case Intrinsic::hexagon_C2_not:
+ case Intrinsic::hexagon_C2_orn:
+ case Intrinsic::hexagon_C2_pxfer_map:
+ case Intrinsic::hexagon_C2_any8:
+ case Intrinsic::hexagon_C2_all8:
+ case Intrinsic::hexagon_A2_vcmpbeq:
+ case Intrinsic::hexagon_A2_vcmpbgtu:
+ case Intrinsic::hexagon_A2_vcmpheq:
+ case Intrinsic::hexagon_A2_vcmphgt:
+ case Intrinsic::hexagon_A2_vcmphgtu:
+ case Intrinsic::hexagon_A2_vcmpweq:
+ case Intrinsic::hexagon_A2_vcmpwgt:
+ case Intrinsic::hexagon_A2_vcmpwgtu:
+ case Intrinsic::hexagon_C2_tfrrp:
+ case Intrinsic::hexagon_S2_tstbit_i:
+ case Intrinsic::hexagon_S2_tstbit_r:
+ return 1;
+ }
+}
+
+
+// Intrinsics that have predicate operands.
+static unsigned doesIntrinsicContainPredicate(unsigned ID)
+{
+ switch (ID) {
+ default:
+ return 0;
+ case Intrinsic::hexagon_C2_tfrpr:
+ return Hexagon::TFR_RsPd;
+ case Intrinsic::hexagon_C2_and:
+ return Hexagon::AND_pp;
+ case Intrinsic::hexagon_C2_xor:
+ return Hexagon::XOR_pp;
+ case Intrinsic::hexagon_C2_or:
+ return Hexagon::OR_pp;
+ case Intrinsic::hexagon_C2_not:
+ return Hexagon::NOT_p;
+ case Intrinsic::hexagon_C2_any8:
+ return Hexagon::ANY_pp;
+ case Intrinsic::hexagon_C2_all8:
+ return Hexagon::ALL_pp;
+ case Intrinsic::hexagon_C2_vitpack:
+ return Hexagon::VITPACK_pp;
+ case Intrinsic::hexagon_C2_mask:
+ return Hexagon::MASK_p;
+ case Intrinsic::hexagon_C2_mux:
+ return Hexagon::MUX_rr;
+
+ // Mapping hexagon_C2_muxir to MUX_pri. This is pretty weird - but
+ // that's how it's mapped in q6protos.h.
+ case Intrinsic::hexagon_C2_muxir:
+ return Hexagon::MUX_ri;
+
+ // Mapping hexagon_C2_muxri to MUX_pir. This is pretty weird - but
+ // that's how it's mapped in q6protos.h.
+ case Intrinsic::hexagon_C2_muxri:
+ return Hexagon::MUX_ir;
+
+ case Intrinsic::hexagon_C2_muxii:
+ return Hexagon::MUX_ii;
+ case Intrinsic::hexagon_C2_vmux:
+ return Hexagon::VMUX_prr64;
+ case Intrinsic::hexagon_S2_valignrb:
+ return Hexagon::VALIGN_rrp;
+ case Intrinsic::hexagon_S2_vsplicerb:
+ return Hexagon::VSPLICE_rrp;
+ }
+}
+
+
+static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
+ if (MemType == MVT::i64 && isShiftedInt<11,3>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i32 && isShiftedInt<11,2>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i16 && isShiftedInt<11,1>(Offset)) {
+ return true;
+ }
+ if (MemType == MVT::i8 && isInt<11>(Offset)) {
+ return true;
+ }
+ return false;
+}
+
+
+//
+// Try to lower loads of GlobalAdresses into base+offset loads. Custom
+// lowering for GlobalAddress nodes has already turned it into a
+// CONST32.
+//
+SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) {
+ SDValue Chain = LD->getChain();
+ SDNode* Const32 = LD->getBasePtr().getNode();
+ unsigned Opcode = 0;
+
+ if (Const32->getOpcode() == HexagonISD::CONST32 &&
+ ISD::isNormalLoad(LD)) {
+ SDValue Base = Const32->getOperand(0);
+ EVT LoadedVT = LD->getMemoryVT();
+ int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
+ if (Offset != 0 && OffsetFitsS11(LoadedVT, Offset)) {
+ MVT PointerTy = TLI.getPointerTy();
+ const GlobalValue* GV =
+ cast<GlobalAddressSDNode>(Base)->getGlobal();
+ SDValue TargAddr =
+ CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
+ SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
+ dl, PointerTy,
+ TargAddr);
+ // Figure out base + offset opcode
+ if (LoadedVT == MVT::i64) Opcode = Hexagon::LDrid_indexed;
+ else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed;
+ else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed;
+ else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed;
+ else assert (0 && "unknown memory type");
+
+ // Build indexed load.
+ SDValue TargetConstOff = CurDAG->getTargetConstant(Offset, PointerTy);
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::Other,
+ SDValue(NewBase,0),
+ TargetConstOff,
+ Chain);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ ReplaceUses(LD, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD,
+ unsigned Opcode,
+ DebugLoc dl)
+{
+ SDValue Chain = LD->getChain();
+ EVT LoadedVT = LD->getMemoryVT();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ SDValue N1 = LD->getOperand(1);
+ SDValue CPTmpN1_0;
+ SDValue CPTmpN1_1;
+ if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
+ N1.getNode()->getValueType(0) == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConst = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::i32,
+ MVT::Other, Base, TargetConst,
+ Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl, MVT::i64,
+ SDValue(Result_1, 0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_2, 0),
+ SDValue(Result_1, 1),
+ SDValue(Result_1, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_2;
+ }
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other, Base, TargetConst0,
+ Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::SXTW, dl,
+ MVT::i64, SDValue(Result_1, 0));
+ SDNode* Result_3 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl,
+ MVT::i32, Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_2, 0),
+ SDValue(Result_3, 0),
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_2;
+ }
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoadZeroExtend64(LoadSDNode *LD,
+ unsigned Opcode,
+ DebugLoc dl)
+{
+ SDValue Chain = LD->getChain();
+ EVT LoadedVT = LD->getMemoryVT();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ SDValue N1 = LD->getOperand(1);
+ SDValue CPTmpN1_0;
+ SDValue CPTmpN1_1;
+ if (SelectADDRriS11_2(N1, CPTmpN1_0, CPTmpN1_1) &&
+ N1.getNode()->getValueType(0) == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::i32, MVT::Other, Base,
+ TargetConstVal, Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2,0),
+ SDValue(Result_1,0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_3, 0),
+ SDValue(Result_1, 1),
+ SDValue(Result_1, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_3;
+ }
+
+ // Generate an indirect load.
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other,
+ Base, TargetConst0, Chain);
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2,0),
+ SDValue(Result_1,0));
+ // Add offset to base.
+ SDNode* Result_4 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_3, 0), // Load value.
+ SDValue(Result_4, 0), // New address.
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_3;
+ }
+
+ return SelectCode(LD);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) {
+ SDValue Chain = LD->getChain();
+ SDValue Base = LD->getBasePtr();
+ SDValue Offset = LD->getOffset();
+ SDNode *OffsetNode = Offset.getNode();
+ // Get the constant value.
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ EVT LoadedVT = LD->getMemoryVT();
+ unsigned Opcode = 0;
+
+ // Check for zero ext loads.
+ bool zextval = (LD->getExtensionType() == ISD::ZEXTLOAD);
+
+ // Figure out the opcode.
+ if (LoadedVT == MVT::i64) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = Hexagon::POST_LDrid;
+ else
+ Opcode = Hexagon::LDrid;
+ } else if (LoadedVT == MVT::i32) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = Hexagon::POST_LDriw;
+ else
+ Opcode = Hexagon::LDriw;
+ } else if (LoadedVT == MVT::i16) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = zextval ? Hexagon::POST_LDriuh : Hexagon::POST_LDrih;
+ else
+ Opcode = zextval ? Hexagon::LDriuh : Hexagon::LDrih;
+ } else if (LoadedVT == MVT::i8) {
+ if (TII->isValidAutoIncImm(LoadedVT, Val))
+ Opcode = zextval ? Hexagon::POST_LDriub : Hexagon::POST_LDrib;
+ else
+ Opcode = zextval ? Hexagon::LDriub : Hexagon::LDrib;
+ } else
+ assert (0 && "unknown memory type");
+
+ // For zero ext i64 loads, we need to add combine instructions.
+ if (LD->getValueType(0) == MVT::i64 &&
+ LD->getExtensionType() == ISD::ZEXTLOAD) {
+ return SelectIndexedLoadZeroExtend64(LD, Opcode, dl);
+ }
+ if (LD->getValueType(0) == MVT::i64 &&
+ LD->getExtensionType() == ISD::SEXTLOAD) {
+ // Handle sign ext i64 loads.
+ return SelectIndexedLoadSignExtend64(LD, Opcode, dl);
+ }
+ if (TII->isValidAutoIncImm(LoadedVT, Val)) {
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::i32, MVT::Other, Base,
+ TargetConstVal, Chain);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result, 0),
+ SDValue(Result, 1),
+ SDValue(Result, 2)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result;
+ } else {
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl,
+ LD->getValueType(0),
+ MVT::Other, Base, TargetConst0,
+ Chain);
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base, TargetConstVal,
+ SDValue(Result_1, 1));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = LD->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+ const SDValue Froms[] = { SDValue(LD, 0),
+ SDValue(LD, 1),
+ SDValue(LD, 2)
+ };
+ const SDValue Tos[] = { SDValue(Result_1, 0),
+ SDValue(Result_2, 0),
+ SDValue(Result_1, 1)
+ };
+ ReplaceUses(Froms, Tos, 3);
+ return Result_1;
+ }
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
+ SDNode *result;
+ DebugLoc dl = N->getDebugLoc();
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ ISD::MemIndexedMode AM = LD->getAddressingMode();
+
+ // Handle indexed loads.
+ if (AM != ISD::UNINDEXED) {
+ result = SelectIndexedLoad(LD, dl);
+ } else {
+ result = SelectBaseOffsetLoad(LD, dl);
+ }
+
+ return result;
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) {
+ SDValue Chain = ST->getChain();
+ SDValue Base = ST->getBasePtr();
+ SDValue Offset = ST->getOffset();
+ SDValue Value = ST->getValue();
+ SDNode *OffsetNode = Offset.getNode();
+ // Get the constant value.
+ int32_t Val = cast<ConstantSDNode>(OffsetNode)->getSExtValue();
+ EVT StoredVT = ST->getMemoryVT();
+
+ // Offset value must be within representable range
+ // and must have correct alignment properties.
+ if (TII->isValidAutoIncImm(StoredVT, Val)) {
+ SDValue Ops[] = { Value, Base,
+ CurDAG->getTargetConstant(Val, MVT::i32), Chain};
+ unsigned Opcode = 0;
+
+ // Figure out the post inc version of opcode.
+ if (StoredVT == MVT::i64) Opcode = Hexagon::POST_STdri;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::POST_STwri;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::POST_SThri;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::POST_STbri;
+ else assert (0 && "unknown memory type");
+
+ // Build post increment store.
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32,
+ MVT::Other, Ops, 4);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(ST, Result);
+ ReplaceUses(SDValue(ST,1), SDValue(Result,1));
+ return Result;
+ }
+
+ // Note: Order of operands matches the def of instruction:
+ // def STrid : STInst<(outs), (ins MEMri:$addr, DoubleRegs:$src1), ...
+ // and it differs for POST_ST* for instance.
+ SDValue Ops[] = { Base, CurDAG->getTargetConstant(0, MVT::i32), Value,
+ Chain};
+ unsigned Opcode = 0;
+
+ // Figure out the opcode.
+ if (StoredVT == MVT::i64) Opcode = Hexagon::STrid;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib;
+ else assert (0 && "unknown memory type");
+
+ // Build regular store.
+ SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ SDNode* Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops,
+ 4);
+ // Build splitted incriment instruction.
+ SDNode* Result_2 = CurDAG->getMachineNode(Hexagon::ADD_ri, dl, MVT::i32,
+ Base,
+ TargetConstVal,
+ SDValue(Result_1, 0));
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result_1)->setMemRefs(MemOp, MemOp + 1);
+
+ ReplaceUses(SDValue(ST,0), SDValue(Result_2,0));
+ ReplaceUses(SDValue(ST,1), SDValue(Result_1,0));
+ return Result_2;
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST,
+ DebugLoc dl) {
+ SDValue Chain = ST->getChain();
+ SDNode* Const32 = ST->getBasePtr().getNode();
+ SDValue Value = ST->getValue();
+ unsigned Opcode = 0;
+
+ // Try to lower stores of GlobalAdresses into indexed stores. Custom
+ // lowering for GlobalAddress nodes has already turned it into a
+ // CONST32. Avoid truncating stores for the moment. Post-inc stores
+ // do the same. Don't think there's a reason for it, so will file a
+ // bug to fix.
+ if ((Const32->getOpcode() == HexagonISD::CONST32) &&
+ !(Value.getValueType() == MVT::i64 && ST->isTruncatingStore())) {
+ SDValue Base = Const32->getOperand(0);
+ if (Base.getOpcode() == ISD::TargetGlobalAddress) {
+ EVT StoredVT = ST->getMemoryVT();
+ int64_t Offset = cast<GlobalAddressSDNode>(Base)->getOffset();
+ if (Offset != 0 && OffsetFitsS11(StoredVT, Offset)) {
+ MVT PointerTy = TLI.getPointerTy();
+ const GlobalValue* GV =
+ cast<GlobalAddressSDNode>(Base)->getGlobal();
+ SDValue TargAddr =
+ CurDAG->getTargetGlobalAddress(GV, dl, PointerTy, 0);
+ SDNode* NewBase = CurDAG->getMachineNode(Hexagon::CONST32_set,
+ dl, PointerTy,
+ TargAddr);
+
+ // Figure out base + offset opcode
+ if (StoredVT == MVT::i64) Opcode = Hexagon::STrid_indexed;
+ else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed;
+ else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed;
+ else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed;
+ else assert (0 && "unknown memory type");
+
+ SDValue Ops[] = {SDValue(NewBase,0),
+ CurDAG->getTargetConstant(Offset,PointerTy),
+ Value, Chain};
+ // build indexed store
+ SDNode* Result = CurDAG->getMachineNode(Opcode, dl,
+ MVT::Other, Ops, 4);
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = ST->getMemOperand();
+ cast<MachineSDNode>(Result)->setMemRefs(MemOp, MemOp + 1);
+ ReplaceUses(ST, Result);
+ return Result;
+ }
+ }
+ }
+
+ return SelectCode(ST);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+
+ // Handle indexed stores.
+ if (AM != ISD::UNINDEXED) {
+ return SelectIndexedStore(ST, dl);
+ }
+
+ return SelectBaseOffsetStore(ST, dl);
+}
+
+SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+
+ //
+ // %conv.i = sext i32 %tmp1 to i64
+ // %conv2.i = sext i32 %add to i64
+ // %mul.i = mul nsw i64 %conv2.i, %conv.i
+ //
+ // --- match with the following ---
+ //
+ // %mul.i = mpy (%tmp1, %add)
+ //
+
+ if (N->getValueType(0) == MVT::i64) {
+ // Shifting a i64 signed multiply.
+ SDValue MulOp0 = N->getOperand(0);
+ SDValue MulOp1 = N->getOperand(1);
+
+ SDValue OP0;
+ SDValue OP1;
+
+ // Handle sign_extend and sextload.
+ if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext0 = MulOp0.getOperand(0);
+ if (Sext0.getNode()->getValueType(0) != MVT::i32) {
+ SelectCode(N);
+ }
+
+ OP0 = Sext0;
+ } else if (MulOp0.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp0.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ SelectCode(N);
+ }
+
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(), TargetConst0,
+ Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Same goes for the second operand.
+ if (MulOp1.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext1 = MulOp1.getOperand(0);
+ if (Sext1.getNode()->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ OP1 = Sext1;
+ } else if (MulOp1.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp1.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(), TargetConst0,
+ Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Generate a mpy instruction.
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY64, dl, MVT::i64,
+ OP0, OP1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectSelect(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDValue N0 = N->getOperand(0);
+ if (N0.getOpcode() == ISD::SETCC) {
+ SDValue N00 = N0.getOperand(0);
+ if (N00.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ SDValue N000 = N00.getOperand(0);
+ SDValue N001 = N00.getOperand(1);
+ if (cast<VTSDNode>(N001)->getVT() == MVT::i16) {
+ SDValue N01 = N0.getOperand(1);
+ SDValue N02 = N0.getOperand(2);
+
+ // Pattern: (select:i32 (setcc:i1 (sext_inreg:i32 IntRegs:i32:$src2,
+ // i16:Other),IntRegs:i32:$src1, SETLT:Other),IntRegs:i32:$src1,
+ // IntRegs:i32:$src2)
+ // Emits: (MAXh_rr:i32 IntRegs:i32:$src1, IntRegs:i32:$src2)
+ // Pattern complexity = 9 cost = 1 size = 0.
+ if (cast<CondCodeSDNode>(N02)->get() == ISD::SETLT) {
+ SDValue N1 = N->getOperand(1);
+ if (N01 == N1) {
+ SDValue N2 = N->getOperand(2);
+ if (N000 == N2 &&
+ N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
+ N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ MVT::i32, N000);
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MAXw_rr, dl,
+ MVT::i32,
+ SDValue(SextNode, 0),
+ N1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+
+ // Pattern: (select:i32 (setcc:i1 (sext_inreg:i32 IntRegs:i32:$src2,
+ // i16:Other), IntRegs:i32:$src1, SETGT:Other), IntRegs:i32:$src1,
+ // IntRegs:i32:$src2)
+ // Emits: (MINh_rr:i32 IntRegs:i32:$src1, IntRegs:i32:$src2)
+ // Pattern complexity = 9 cost = 1 size = 0.
+ if (cast<CondCodeSDNode>(N02)->get() == ISD::SETGT) {
+ SDValue N1 = N->getOperand(1);
+ if (N01 == N1) {
+ SDValue N2 = N->getOperand(2);
+ if (N000 == N2 &&
+ N0.getNode()->getValueType(N0.getResNo()) == MVT::i1 &&
+ N00.getNode()->getValueType(N00.getResNo()) == MVT::i32) {
+ SDNode *SextNode = CurDAG->getMachineNode(Hexagon::SXTH, dl,
+ MVT::i32, N000);
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MINw_rr, dl,
+ MVT::i32,
+ SDValue(SextNode, 0),
+ N1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectTruncate(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDValue Shift = N->getOperand(0);
+
+ //
+ // %conv.i = sext i32 %tmp1 to i64
+ // %conv2.i = sext i32 %add to i64
+ // %mul.i = mul nsw i64 %conv2.i, %conv.i
+ // %shr5.i = lshr i64 %mul.i, 32
+ // %conv3.i = trunc i64 %shr5.i to i32
+ //
+ // --- match with the following ---
+ //
+ // %conv3.i = mpy (%tmp1, %add)
+ //
+ // Trunc to i32.
+ if (N->getValueType(0) == MVT::i32) {
+ // Trunc from i64.
+ if (Shift.getNode()->getValueType(0) == MVT::i64) {
+ // Trunc child is logical shift right.
+ if (Shift.getOpcode() != ISD::SRL) {
+ return SelectCode(N);
+ }
+
+ SDValue ShiftOp0 = Shift.getOperand(0);
+ SDValue ShiftOp1 = Shift.getOperand(1);
+
+ // Shift by const 32
+ if (ShiftOp1.getOpcode() != ISD::Constant) {
+ return SelectCode(N);
+ }
+
+ int32_t ShiftConst =
+ cast<ConstantSDNode>(ShiftOp1.getNode())->getSExtValue();
+ if (ShiftConst != 32) {
+ return SelectCode(N);
+ }
+
+ // Shifting a i64 signed multiply
+ SDValue Mul = ShiftOp0;
+ if (Mul.getOpcode() != ISD::MUL) {
+ return SelectCode(N);
+ }
+
+ SDValue MulOp0 = Mul.getOperand(0);
+ SDValue MulOp1 = Mul.getOperand(1);
+
+ SDValue OP0;
+ SDValue OP1;
+
+ // Handle sign_extend and sextload
+ if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext0 = MulOp0.getOperand(0);
+ if (Sext0.getNode()->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ OP0 = Sext0;
+ } else if (MulOp0.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp0.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP0 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(),
+ TargetConst0, Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Same goes for the second operand.
+ if (MulOp1.getOpcode() == ISD::SIGN_EXTEND) {
+ SDValue Sext1 = MulOp1.getOperand(0);
+ if (Sext1.getNode()->getValueType(0) != MVT::i32)
+ return SelectCode(N);
+
+ OP1 = Sext1;
+ } else if (MulOp1.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = cast<LoadSDNode>(MulOp1.getNode());
+ if (LD->getMemoryVT() != MVT::i32 ||
+ LD->getExtensionType() != ISD::SEXTLOAD ||
+ LD->getAddressingMode() != ISD::UNINDEXED) {
+ return SelectCode(N);
+ }
+
+ SDValue Chain = LD->getChain();
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ OP1 = SDValue (CurDAG->getMachineNode(Hexagon::LDriw, dl, MVT::i32,
+ MVT::Other,
+ LD->getBasePtr(),
+ TargetConst0, Chain), 0);
+ } else {
+ return SelectCode(N);
+ }
+
+ // Generate a mpy instruction.
+ SDNode *Result = CurDAG->getMachineNode(Hexagon::MPY, dl, MVT::i32,
+ OP0, OP1);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+SDNode *HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) == MVT::i32) {
+ SDValue Shl_0 = N->getOperand(0);
+ SDValue Shl_1 = N->getOperand(1);
+ // RHS is const.
+ if (Shl_1.getOpcode() == ISD::Constant) {
+ if (Shl_0.getOpcode() == ISD::MUL) {
+ SDValue Mul_0 = Shl_0.getOperand(0); // Val
+ SDValue Mul_1 = Shl_0.getOperand(1); // Const
+ // RHS of mul is const.
+ if (Mul_1.getOpcode() == ISD::Constant) {
+ int32_t ShlConst =
+ cast<ConstantSDNode>(Shl_1.getNode())->getSExtValue();
+ int32_t MulConst =
+ cast<ConstantSDNode>(Mul_1.getNode())->getSExtValue();
+ int32_t ValConst = MulConst << ShlConst;
+ SDValue Val = CurDAG->getTargetConstant(ValConst,
+ MVT::i32);
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val.getNode()))
+ if (isInt<9>(CN->getSExtValue())) {
+ SDNode* Result =
+ CurDAG->getMachineNode(Hexagon::MPYI_ri, dl,
+ MVT::i32, Mul_0, Val);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+
+ }
+ } else if (Shl_0.getOpcode() == ISD::SUB) {
+ SDValue Sub_0 = Shl_0.getOperand(0); // Const 0
+ SDValue Sub_1 = Shl_0.getOperand(1); // Val
+ if (Sub_0.getOpcode() == ISD::Constant) {
+ int32_t SubConst =
+ cast<ConstantSDNode>(Sub_0.getNode())->getSExtValue();
+ if (SubConst == 0) {
+ if (Sub_1.getOpcode() == ISD::SHL) {
+ SDValue Shl2_0 = Sub_1.getOperand(0); // Val
+ SDValue Shl2_1 = Sub_1.getOperand(1); // Const
+ if (Shl2_1.getOpcode() == ISD::Constant) {
+ int32_t ShlConst =
+ cast<ConstantSDNode>(Shl_1.getNode())->getSExtValue();
+ int32_t Shl2Const =
+ cast<ConstantSDNode>(Shl2_1.getNode())->getSExtValue();
+ int32_t ValConst = 1 << (ShlConst+Shl2Const);
+ SDValue Val = CurDAG->getTargetConstant(-ValConst, MVT::i32);
+ if (ConstantSDNode *CN =
+ dyn_cast<ConstantSDNode>(Val.getNode()))
+ if (isInt<9>(CN->getSExtValue())) {
+ SDNode* Result =
+ CurDAG->getMachineNode(Hexagon::MPYI_ri, dl, MVT::i32,
+ Shl2_0, Val);
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return SelectCode(N);
+}
+
+
+//
+// If there is an zero_extend followed an intrinsic in DAG (this means - the
+// result of the intrinsic is predicate); convert the zero_extend to
+// transfer instruction.
+//
+// Zero extend -> transfer is lowered here. Otherwise, zero_extend will be
+// converted into a MUX as predicate registers defined as 1 bit in the
+// compiler. Architecture defines them as 8-bit registers.
+// We want to preserve all the lower 8-bits and, not just 1 LSB bit.
+//
+SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ SDNode *IsIntrinsic = N->getOperand(0).getNode();
+ if ((IsIntrinsic->getOpcode() == ISD::INTRINSIC_WO_CHAIN)) {
+ unsigned ID =
+ cast<ConstantSDNode>(IsIntrinsic->getOperand(0))->getZExtValue();
+ if (doesIntrinsicReturnPredicate(ID)) {
+ // Now we need to differentiate target data types.
+ if (N->getValueType(0) == MVT::i64) {
+ // Convert the zero_extend to Rs = Pd followed by COMBINE_rr(0,Rs).
+ SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32);
+ SDNode *Result_1 = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ MVT::i32,
+ SDValue(IsIntrinsic, 0));
+ SDNode *Result_2 = CurDAG->getMachineNode(Hexagon::TFRI, dl,
+ MVT::i32,
+ TargetConst0);
+ SDNode *Result_3 = CurDAG->getMachineNode(Hexagon::COMBINE_rr, dl,
+ MVT::i64, MVT::Other,
+ SDValue(Result_2, 0),
+ SDValue(Result_1, 0));
+ ReplaceUses(N, Result_3);
+ return Result_3;
+ }
+ if (N->getValueType(0) == MVT::i32) {
+ // Convert the zero_extend to Rs = Pd
+ SDNode* RsPd = CurDAG->getMachineNode(Hexagon::TFR_RsPd, dl,
+ MVT::i32,
+ SDValue(IsIntrinsic, 0));
+ ReplaceUses(N, RsPd);
+ return RsPd;
+ }
+ llvm_unreachable("Unexpected value type");
+ }
+ }
+ return SelectCode(N);
+}
+
+
+//
+// Checking for intrinsics which have predicate registers as operand(s)
+// and lowering to the actual intrinsic.
+//
+SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID);
+
+ // We are concerned with only those intrinsics that have predicate registers
+ // as at least one of the operands.
+ if (IntrinsicWithPred) {
+ SmallVector<SDValue, 8> Ops;
+ const MCInstrDesc &MCID = TII->get(IntrinsicWithPred);
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ // Iterate over all the operands of the intrinsics.
+ // For PredRegs, do the transfer.
+ // For Double/Int Regs, just preserve the value
+ // For immediates, lower it.
+ for (unsigned i = 1; i < N->getNumOperands(); ++i) {
+ SDNode *Arg = N->getOperand(i).getNode();
+ const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
+
+ if (RC == Hexagon::IntRegsRegisterClass ||
+ RC == Hexagon::DoubleRegsRegisterClass) {
+ Ops.push_back(SDValue(Arg, 0));
+ } else if (RC == Hexagon::PredRegsRegisterClass) {
+ // Do the transfer.
+ SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
+ SDValue(Arg, 0));
+ Ops.push_back(SDValue(PdRs,0));
+ } else if (RC == NULL && (dyn_cast<ConstantSDNode>(Arg) != NULL)) {
+ // This is immediate operand. Lower it here making sure that we DO have
+ // const SDNode for immediate value.
+ int32_t Val = cast<ConstantSDNode>(Arg)->getSExtValue();
+ SDValue SDVal = CurDAG->getTargetConstant(Val, MVT::i32);
+ Ops.push_back(SDVal);
+ } else {
+ llvm_unreachable("Unimplemented");
+ }
+ }
+ EVT ReturnValueVT = N->getValueType(0);
+ SDNode *Result = CurDAG->getMachineNode(IntrinsicWithPred, dl,
+ ReturnValueVT,
+ Ops.data(), Ops.size());
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ return SelectCode(N);
+}
+
+
+//
+// Map predicate true (encoded as -1 in LLVM) to a XOR.
+//
+SDNode *HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) == MVT::i1) {
+ SDNode* Result;
+ int32_t Val = cast<ConstantSDNode>(N)->getSExtValue();
+ if (Val == -1) {
+ // Create the IntReg = 1 node.
+ SDNode* IntRegTFR =
+ CurDAG->getMachineNode(Hexagon::TFRI, dl, MVT::i32,
+ CurDAG->getTargetConstant(0, MVT::i32));
+
+ // Pd = IntReg
+ SDNode* Pd = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1,
+ SDValue(IntRegTFR, 0));
+
+ // not(Pd)
+ SDNode* NotPd = CurDAG->getMachineNode(Hexagon::NOT_p, dl, MVT::i1,
+ SDValue(Pd, 0));
+
+ // xor(not(Pd))
+ Result = CurDAG->getMachineNode(Hexagon::XOR_pp, dl, MVT::i1,
+ SDValue(Pd, 0), SDValue(NotPd, 0));
+
+ // We have just built:
+ // Rs = Pd
+ // Pd = xor(not(Pd), Pd)
+
+ ReplaceUses(N, Result);
+ return Result;
+ }
+ }
+
+ return SelectCode(N);
+}
+
+
+//
+// Map add followed by a asr -> asr +=.
+//
+SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) {
+ DebugLoc dl = N->getDebugLoc();
+ if (N->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+ // Identify nodes of the form: add(asr(...)).
+ SDNode* Src1 = N->getOperand(0).getNode();
+ if (Src1->getOpcode() != ISD::SRA || !Src1->hasOneUse()
+ || Src1->getValueType(0) != MVT::i32) {
+ return SelectCode(N);
+ }
+
+ // Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that
+ // Rd and Rd' are assigned to the same register
+ SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_rr_acc, dl, MVT::i32,
+ N->getOperand(1),
+ Src1->getOperand(0),
+ Src1->getOperand(1));
+ ReplaceUses(N, Result);
+
+ return Result;
+}
+
+
+SDNode *HexagonDAGToDAGISel::Select(SDNode *N) {
+ if (N->isMachineOpcode())
+ return NULL; // Already selected.
+
+
+ switch (N->getOpcode()) {
+ case ISD::Constant:
+ return SelectConstant(N);
+
+ case ISD::ADD:
+ return SelectAdd(N);
+
+ case ISD::SHL:
+ return SelectSHL(N);
+
+ case ISD::LOAD:
+ return SelectLoad(N);
+
+ case ISD::STORE:
+ return SelectStore(N);
+
+ case ISD::SELECT:
+ return SelectSelect(N);
+
+ case ISD::TRUNCATE:
+ return SelectTruncate(N);
+
+ case ISD::MUL:
+ return SelectMul(N);
+
+ case ISD::ZERO_EXTEND:
+ return SelectZeroExtend(N);
+
+ case ISD::INTRINSIC_WO_CHAIN:
+ return SelectIntrinsicWOChain(N);
+ }
+
+ return SelectCode(N);
+}
+
+
+//
+// Hexagon_TODO: Five functions for ADDRri?! Surely there must be a better way
+// to define these instructions.
+//
+bool HexagonDAGToDAGISel::SelectADDRri(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_0(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_0_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_0_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_1(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_1_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_1_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_2_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_2_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_0(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_0_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_0_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_1(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_1_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_1_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriU6_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_2_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsU6_2_Offset(Offset.getNode()));
+}
+
+
+bool HexagonDAGToDAGISel::SelectMEMriS11_2(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+
+ if (Addr.getOpcode() != ISD::ADD) {
+ return(SelectADDRriS11_2(Addr, Base, Offset));
+ }
+
+ return SelectADDRriS11_2(Addr, Base, Offset);
+}
+
+
+bool HexagonDAGToDAGISel::SelectADDRriS11_3(SDValue& Addr, SDValue &Base,
+ SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_3_Offset(Offset.getNode()));
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return (IsS11_3_Offset(Offset.getNode()));
+}
+
+bool HexagonDAGToDAGISel::SelectADDRrr(SDValue &Addr, SDValue &R1,
+ SDValue &R2) {
+ if (Addr.getOpcode() == ISD::FrameIndex) return false;
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ if (isInt<13>(CN->getSExtValue()))
+ return false; // Let the reg+imm pattern catch this!
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ return true;
+ }
+
+ R1 = Addr;
+
+ return true;
+}
+
+
+// Handle generic address case. It is accessed from inlined asm =m constraints,
+// which could have any kind of pointer.
+bool HexagonDAGToDAGISel::SelectAddr(SDNode *Op, SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress)
+ return false; // Direct calls.
+
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+}
+
+
+bool HexagonDAGToDAGISel::
+SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1;
+
+ switch (ConstraintCode) {
+ case 'o': // Offsetable.
+ case 'v': // Not offsetable.
+ default: return true;
+ case 'm': // Memory.
+ if (!SelectAddr(Op.getNode(), Op, Op0, Op1))
+ return true;
+ break;
+ }
+
+ OutOps.push_back(Op0);
+ OutOps.push_back(Op1);
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
new file mode 100644
index 0000000..d6da0d0
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -0,0 +1,1496 @@
+//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interfaces that Hexagon uses to lower LLVM code
+// into a selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonISelLowering.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "HexagonTargetObjectFile.h"
+#include "HexagonSubtarget.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/GlobalAlias.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+const unsigned Hexagon_MAX_RET_SIZE = 64;
+
+static cl::opt<bool>
+EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
+ cl::desc("Control jump table emission on Hexagon target"));
+
+int NumNamedVarArgParams = -1;
+
+// Implement calling convention for Hexagon.
+static bool
+CC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+static bool
+CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ // NumNamedVarArgParams can not be zero for a VarArg function.
+ assert ( (NumNamedVarArgParams > 0) &&
+ "NumNamedVarArgParams is not bigger than zero.");
+
+ if ( (int)ValNo < NumNamedVarArgParams ) {
+ // Deal with named arguments.
+ return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
+ }
+
+ // Deal with un-named arguments.
+ unsigned ofst;
+ if (ArgFlags.isByVal()) {
+ // If pass-by-value, the size allocated on stack is decided
+ // by ArgFlags.getByValSize(), not by the size of LocVT.
+ assert ((ArgFlags.getByValSize() > 8) &&
+ "ByValSize must be bigger than 8 bytes");
+ ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ if (LocVT == MVT::i32) {
+ ofst = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ if (LocVT == MVT::i64) {
+ ofst = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
+ return false;
+ }
+ llvm_unreachable(0);
+}
+
+
+static bool
+CC_Hexagon (unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (ArgFlags.isByVal()) {
+ // Passed on stack.
+ assert ((ArgFlags.getByValSize() > 8) &&
+ "ByValSize must be bigger than 8 bytes");
+ unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ ValVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ if (LocVT == MVT::i32) {
+ if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ if (LocVT == MVT::i64) {
+ if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ return true; // CC didn't match.
+}
+
+
+static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ static const uint16_t RegList[] = {
+ Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
+ Hexagon::R5
+ };
+ if (unsigned Reg = State.AllocateReg(RegList, 6)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ unsigned Offset = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ static const uint16_t RegList1[] = {
+ Hexagon::D1, Hexagon::D2
+ };
+ static const uint16_t RegList2[] = {
+ Hexagon::R1, Hexagon::R3
+ };
+ if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+
+ unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+
+ if (LocVT == MVT::i1 ||
+ LocVT == MVT::i8 ||
+ LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ ValVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ if (LocVT == MVT::i32) {
+ if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ if (LocVT == MVT::i64) {
+ if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+ }
+
+ return true; // CC didn't match.
+}
+
+static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (LocVT == MVT::i32) {
+ if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ unsigned Offset = State.AllocateStack(4, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ if (LocVT == MVT::i64) {
+ if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ unsigned Offset = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return false;
+}
+
+SDValue
+HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
+const {
+ return SDValue();
+}
+
+/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
+/// by "Src" to address "Dst" of size "Size". Alignment information is
+/// specified by the specific parameter attribute. The copy will be passed as
+/// a byval function parameter. Sometimes what we are copying is the end of a
+/// larger object, the part that does not fit in registers.
+static SDValue
+CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
+ ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
+ DebugLoc dl) {
+
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(), MachinePointerInfo());
+}
+
+
+// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
+// passed by value, the function prototype is modified to return void and
+// the value is stored in memory pointed by a pointer passed by caller.
+SDValue
+HexagonTargetLowering::LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
+
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+
+ // Analyze return values of ISD::RET
+ CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
+
+ // If this is the first return lowered for this function, add the regs to the
+ // liveout set for the function.
+ if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i)
+ if (RVLocs[i].isRegLoc())
+ DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
+ }
+
+ SDValue Flag;
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ }
+
+ if (Flag.getNode())
+ return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+
+ return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+
+
+
+/// LowerCallResult - Lower the result values of an ISD::CALL into the
+/// appropriate copies out of appropriate physical registers. This assumes that
+/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
+/// being lowered. Returns a SDNode with the same number of values as the
+/// ISD::CALL.
+SDValue
+HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const
+ SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDValue Callee) const {
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), RVLocs, *DAG.getContext());
+
+ CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ Chain = DAG.getCopyFromReg(Chain, dl,
+ RVLocs[i].getLocReg(),
+ RVLocs[i].getValVT(), InFlag).getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
+
+ return Chain;
+}
+
+/// LowerCall - Functions arguments are copied from virtual regs to
+/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
+SDValue
+HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool doesNotRet, bool &isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+
+ bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+
+ // Check for varargs.
+ NumNamedVarArgParams = -1;
+ if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
+ {
+ const Function* CalleeFn = NULL;
+ Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
+ if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
+ {
+ // If a function has zero args and is a vararg function, that's
+ // disallowed so it must be an undeclared function. Do not assume
+ // varargs if the callee is undefined.
+ if (CalleeFn->isVarArg() &&
+ CalleeFn->getFunctionType()->getNumParams() != 0) {
+ NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
+ }
+ }
+ }
+
+ if (NumNamedVarArgParams > 0)
+ CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
+ else
+ CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
+
+
+ if(isTailCall) {
+ bool StructAttrFlag =
+ DAG.getMachineFunction().getFunction()->hasStructRetAttr();
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
+ isVarArg, IsStructRet,
+ StructAttrFlag,
+ Outs, OutVals, Ins, DAG);
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isMemLoc()) {
+ isTailCall = false;
+ break;
+ }
+ }
+ if (isTailCall) {
+ DEBUG(dbgs () << "Eligible for Tail Call\n");
+ } else {
+ DEBUG(dbgs () <<
+ "Argument must be passed on stack. Not eligible for Tail Call\n");
+ }
+ }
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
+ SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+
+ SDValue StackPtr =
+ DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(),
+ getPointerTy());
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = OutVals[i];
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ // Loc info must be one of Full, SExt, ZExt, or AExt.
+ llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isMemLoc()) {
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+
+ if (Flags.isByVal()) {
+ // The argument is a struct passed by value. According to LLVM, "Arg"
+ // is is pointer.
+ MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
+ Flags, DAG, dl));
+ } else {
+ // The argument is not passed by value. "Arg" is a buildin type. It is
+ // not a pointer.
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
+ MachinePointerInfo(),false, false,
+ 0));
+ }
+ continue;
+ }
+
+ // Arguments that can be passed on register must be kept at RegsToPass
+ // vector.
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ }
+ }
+
+ // Transform all store nodes into one single node because all store
+ // nodes are independent of each other.
+ if (!MemOpChains.empty()) {
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0],
+ MemOpChains.size());
+ }
+
+ if (!isTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
+ getPointerTy(), true));
+
+ // Build a sequence of copy-to-reg nodes chained together with token
+ // chain and flag operands which copy the outgoing args into registers.
+ // The InFlag in necessary since all emited instructions must be
+ // stuck together.
+ SDValue InFlag;
+ if (!isTailCall) {
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ // For tail calls lower the arguments to the 'real' stack slot.
+ if (isTailCall) {
+ // Force all the incoming stack arguments to be loaded from the stack
+ // before any new outgoing arguments are stored to the stack, because the
+ // outgoing stack slots may alias the incoming argument stack slots, and
+ // the alias isn't otherwise explicit. This is slightly more conservative
+ // than necessary, because it means that each store effectively depends
+ // on every argument instead of just those arguments it would clobber.
+ //
+ // Do not flag preceeding copytoreg stuff together with the following stuff.
+ InFlag = SDValue();
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ InFlag =SDValue();
+ }
+
+ // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
+ // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
+ // node so that legalize doesn't hack it.
+ if (flag_aligned_memcpy) {
+ const char *MemcpyName =
+ "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
+ Callee =
+ DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
+ flag_aligned_memcpy = false;
+ } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
+ } else if (ExternalSymbolSDNode *S =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+ }
+
+ // Returns a chain & a flag for retval copy to use.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+ }
+
+ if (InFlag.getNode()) {
+ Ops.push_back(InFlag);
+ }
+
+ if (isTailCall)
+ return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+
+ Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ // Create the CALLSEQ_END node.
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ DAG.getIntPtrConstant(0, true), InFlag);
+ InFlag = Chain.getValue(1);
+
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
+ InVals, OutVals, Callee);
+}
+
+static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
+ bool isSEXTLoad, SDValue &Base,
+ SDValue &Offset, bool &isInc,
+ SelectionDAG &DAG) {
+ if (Ptr->getOpcode() != ISD::ADD)
+ return false;
+
+ if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
+ isInc = (Ptr->getOpcode() == ISD::ADD);
+ Base = Ptr->getOperand(0);
+ Offset = Ptr->getOperand(1);
+ // Ensure that Offset is a constant.
+ return (isa<ConstantSDNode>(Offset));
+ }
+
+ return false;
+}
+
+// TODO: Put this function along with the other isS* functions in
+// HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
+// functions defined in HexagonImmediates.td.
+static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
+ ConstantSDNode *N = cast<ConstantSDNode>(S);
+
+ // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ int64_t m = 0;
+ if (ShiftAmount > 0) {
+ m = v % ShiftAmount;
+ v = v >> ShiftAmount;
+ }
+ return (v <= 7) && (v >= -8) && (m == 0);
+}
+
+/// getPostIndexedAddressParts - returns true by value, base pointer and
+/// offset pointer and addressing mode by reference if this node can be
+/// combined with a load / store to form a post-indexed load / store.
+bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base,
+ SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const
+{
+ EVT VT;
+ SDValue Ptr;
+ bool isSEXTLoad = false;
+
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+ VT = LD->getMemoryVT();
+ isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
+ } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
+ VT = ST->getMemoryVT();
+ if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ bool isInc = false;
+ bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
+ isInc, DAG);
+ // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
+ int ShiftAmount = VT.getSizeInBits() / 16;
+ if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
+ AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
+ return true;
+ }
+
+ return false;
+}
+
+SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ MachineFunction &MF = DAG.getMachineFunction();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+ switch (Node->getOpcode()) {
+ case ISD::INLINEASM: {
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
+ --NumOps; // Ignore the flag operand.
+
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
+ if (FuncInfo->hasClobberLR())
+ break;
+ unsigned Flags =
+ cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+ ++i; // Skip the ID value.
+
+ switch (InlineAsm::getKind(Flags)) {
+ default: llvm_unreachable("Bad flags!");
+ case InlineAsm::Kind_RegDef:
+ case InlineAsm::Kind_RegUse:
+ case InlineAsm::Kind_Imm:
+ case InlineAsm::Kind_Clobber:
+ case InlineAsm::Kind_Mem: {
+ for (; NumVals; --NumVals, ++i) {}
+ break;
+ }
+ case InlineAsm::Kind_RegDefEarlyClobber: {
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg =
+ cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+
+ // Check it to be lr
+ if (Reg == TM.getRegisterInfo()->getRARegister()) {
+ FuncInfo->setHasClobberLR(true);
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+ } // Node->getOpcode
+ return Op;
+}
+
+
+//
+// Taken from the XCore backend.
+//
+SDValue HexagonTargetLowering::
+LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
+{
+ SDValue Chain = Op.getOperand(0);
+ SDValue Table = Op.getOperand(1);
+ SDValue Index = Op.getOperand(2);
+ DebugLoc dl = Op.getDebugLoc();
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
+ unsigned JTI = JT->getIndex();
+ MachineFunction &MF = DAG.getMachineFunction();
+ const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
+
+ // Mark all jump table targets as address taken.
+ const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
+ const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
+ for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
+ MachineBasicBlock *MBB = JTBBs[i];
+ MBB->setHasAddressTaken();
+ // This line is needed to set the hasAddressTaken flag on the BasicBlock
+ // object.
+ BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
+ }
+
+ SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
+ getPointerTy(), TargetJT);
+ SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
+ DAG.getConstant(2, MVT::i32));
+ SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
+ ShiftIndex);
+ SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
+ MachinePointerInfo(), false, false, false,
+ 0);
+ return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
+}
+
+
+SDValue
+HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Size = Op.getOperand(1);
+ DebugLoc dl = Op.getDebugLoc();
+
+ unsigned SPReg = getStackPointerRegisterToSaveRestore();
+
+ // Get a reference to the stack pointer.
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
+
+ // Subtract the dynamic size from the actual stack size to
+ // obtain the new stack size.
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+
+ //
+ // For Hexagon, the outgoing memory arguments area should be on top of the
+ // alloca area on the stack i.e., the outgoing memory arguments should be
+ // at a lower address than the alloca area. Move the alloca area down the
+ // stack by adding back the space reserved for outgoing arguments to SP
+ // here.
+ //
+ // We do not know what the size of the outgoing args is at this point.
+ // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
+ // stack pointer. We patch this instruction with the correct, known
+ // offset in emitPrologue().
+ //
+ // Use a placeholder immediate (zero) for now. This will be patched up
+ // by emitPrologue().
+ SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
+ MVT::i32,
+ Sub,
+ DAG.getConstant(0, MVT::i32));
+
+ // The Sub result contains the new stack start address, so it
+ // must be placed in the stack pointer register.
+ SDValue CopyChain = DAG.getCopyToReg(Chain, dl,
+ TM.getRegisterInfo()->getStackRegister(),
+ Sub);
+
+ SDValue Ops[2] = { ArgAdjust, CopyChain };
+ return DAG.getMergeValues(Ops, 2, dl);
+}
+
+SDValue
+HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv,
+ bool isVarArg,
+ const
+ SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals)
+const {
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF.getInfo<HexagonMachineFunctionInfo>();
+
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+ getTargetMachine(), ArgLocs, *DAG.getContext());
+
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
+
+ // For LLVM, in the case when returning a struct by value (>8byte),
+ // the first argument is a pointer that points to the location on caller's
+ // stack where the return value will be stored. For Hexagon, the location on
+ // caller's stack is passed only when the struct size is smaller than (and
+ // equal to) 8 bytes. If not, no address will be passed into callee and
+ // callee return the result direclty through R0/R1.
+
+ SmallVector<SDValue, 4> MemOps;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ unsigned ObjSize;
+ unsigned StackLocation;
+ int FI;
+
+ if ( (VA.isRegLoc() && !Flags.isByVal())
+ || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
+ // Arguments passed in registers
+ // 1. int, long long, ptr args that get allocated in register.
+ // 2. Large struct that gets an register to put its address in.
+ EVT RegVT = VA.getLocVT();
+ if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) {
+ unsigned VReg =
+ RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
+ } else if (RegVT == MVT::i64) {
+ unsigned VReg =
+ RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
+ InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
+ } else {
+ assert (0);
+ }
+ } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
+ assert (0 && "ByValSize must be bigger than 8 bytes");
+ } else {
+ // Sanity check.
+ assert(VA.isMemLoc());
+
+ if (Flags.isByVal()) {
+ // If it's a byval parameter, then we need to compute the
+ // "real" size, not the size of the pointer.
+ ObjSize = Flags.getByValSize();
+ } else {
+ ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
+ }
+
+ StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
+ // Create the frame index object for this incoming parameter...
+ FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
+
+ // Create the SelectionDAG nodes cordl, responding to a load
+ // from this parameter.
+ SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+
+ if (Flags.isByVal()) {
+ // If it's a pass-by-value aggregate, then do not dereference the stack
+ // location. Instead, we should generate a reference to the stack
+ // location.
+ InVals.push_back(FIN);
+ } else {
+ InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
+ MachinePointerInfo(), false, false,
+ false, 0));
+ }
+ }
+ }
+
+ if (!MemOps.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
+ MemOps.size());
+
+ if (isVarArg) {
+ // This will point to the next argument passed via stack.
+ int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
+ HEXAGON_LRFP_SIZE +
+ CCInfo.getNextStackOffset(),
+ true);
+ FuncInfo->setVarArgsFrameIndex(FrameIndex);
+ }
+
+ return Chain;
+}
+
+SDValue
+HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ // VASTART stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ MachineFunction &MF = DAG.getMachineFunction();
+ HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
+ SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), Op.getDebugLoc(), Addr,
+ Op.getOperand(1), MachinePointerInfo(SV), false,
+ false, 0);
+}
+
+SDValue
+HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDNode* OpNode = Op.getNode();
+
+ SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1,
+ Op.getOperand(2), Op.getOperand(3),
+ Op.getOperand(4));
+ return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0),
+ Cond, Op.getOperand(0),
+ Op.getOperand(1));
+}
+
+SDValue
+HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ if (Depth) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset = DAG.getConstant(4, MVT::i32);
+ return DAG.getLoad(VT, dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
+ MachinePointerInfo(), false, false, false, 0);
+ }
+
+ // Return LR, which contains the return address. Mark it an implicit live-in.
+ unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
+}
+
+SDValue
+HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
+ const HexagonRegisterInfo *TRI = TM.getRegisterInfo();
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ TRI->getFrameRegister(), VT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
+ MachinePointerInfo(),
+ false, false, false, 0);
+ return FrameAddr;
+}
+
+
+SDValue HexagonTargetLowering::LowerMEMBARRIER(SDValue Op,
+ SelectionDAG& DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
+}
+
+
+SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
+ SelectionDAG& DAG) const {
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
+}
+
+
+SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Result;
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
+ DebugLoc dl = Op.getDebugLoc();
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
+
+ HexagonTargetObjectFile &TLOF =
+ (HexagonTargetObjectFile&)getObjFileLowering();
+ if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
+ return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
+ }
+
+ return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
+}
+
+//===----------------------------------------------------------------------===//
+// TargetLowering Implementation
+//===----------------------------------------------------------------------===//
+
+HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
+ &targetmachine)
+ : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
+ TM(targetmachine) {
+
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass);
+ addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass);
+
+ addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass);
+
+ computeRegisterProperties();
+
+ // Align loop entry
+ setPrefLoopAlignment(4);
+
+ // Limits for inline expansion of memcpy/memmove
+ maxStoresPerMemcpy = 6;
+ maxStoresPerMemmove = 6;
+
+ //
+ // Library calls for unsupported operations
+ //
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+
+ setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
+ setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
+ setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
+ setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
+ setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
+
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
+ setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
+
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
+ setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
+
+ setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
+ setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
+ setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
+ setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
+ setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
+
+ setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
+
+ setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
+ setOperationAction(ISD::SDIV, MVT::i32, Expand);
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
+ setOperationAction(ISD::SDIV, MVT::i64, Expand);
+ setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
+ setOperationAction(ISD::UDIV, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
+ setOperationAction(ISD::UDIV, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+
+ setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
+ setOperationAction(ISD::FDIV, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
+ setOperationAction(ISD::FDIV, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
+ setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
+ setOperationAction(ISD::FADD, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
+ setOperationAction(ISD::FADD, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
+ setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
+ setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
+ setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
+ setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
+ setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+
+ setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
+ setOperationAction(ISD::FMUL, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
+ setOperationAction(ISD::MUL, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
+ setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
+
+
+ setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
+ setOperationAction(ISD::SUB, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
+ setOperationAction(ISD::SUB, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
+ setCondCodeAction(ISD::SETO, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
+ setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
+
+ setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETO, MVT::f32, Expand);
+
+ setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
+ setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
+
+ setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
+ setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
+
+ setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+
+ // Turn FP extload into load/fextend.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+ // Hexagon has a i1 sign extending load.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ // Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // Custom legalize GlobalAddress nodes into CONST32.
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
+ // Truncate action?
+ setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
+
+ // Hexagon doesn't have sext_inreg, replace them with shl/sra.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+
+ // Hexagon has no REM or DIVREM operations.
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Expand fp<->uint.
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+
+ // Hexagon has no select or setcc: expand to SELECT_CC.
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+
+ // Lower SELECT_CC to SETCC and SELECT.
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+ // This is a workaround documented in DAGCombiner.cpp:2892 We don't
+ // support SELECT_CC on every type.
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+
+ setOperationAction(ISD::BR_CC, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+ if (EmitJumpTables) {
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
+ } else {
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ }
+
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ setOperationAction(ISD::FSIN , MVT::f64, Expand);
+ setOperationAction(ISD::FCOS , MVT::f64, Expand);
+ setOperationAction(ISD::FREM , MVT::f64, Expand);
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL , MVT::i32, Expand);
+ setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+
+ setOperationAction(ISD::EH_RETURN, MVT::Other, Expand);
+
+ if (TM.getSubtargetImpl()->isSubtargetV2()) {
+ setExceptionPointerRegister(Hexagon::R20);
+ setExceptionSelectorRegister(Hexagon::R21);
+ } else {
+ setExceptionPointerRegister(Hexagon::R0);
+ setExceptionSelectorRegister(Hexagon::R1);
+ }
+
+ // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
+ setOperationAction(ISD::VASTART , MVT::Other, Custom);
+
+ // Use the default implementation.
+ setOperationAction(ISD::VAARG , MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+ setOperationAction(ISD::VAEND , MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
+
+
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
+ setOperationAction(ISD::INLINEASM , MVT::Other, Custom);
+
+ setMinFunctionAlignment(2);
+
+ // Needed for DYNAMIC_STACKALLOC expansion.
+ unsigned StackRegister = TM.getRegisterInfo()->getStackRegister();
+ setStackPointerRegisterToSaveRestore(StackRegister);
+ setSchedulingPreference(Sched::VLIW);
+}
+
+
+const char*
+HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default: return 0;
+ case HexagonISD::CONST32: return "HexagonISD::CONST32";
+ case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
+ case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
+ case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
+ case HexagonISD::BRICC: return "HexagonISD::BRICC";
+ case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
+ case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
+ case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
+ case HexagonISD::Hi: return "HexagonISD::Hi";
+ case HexagonISD::Lo: return "HexagonISD::Lo";
+ case HexagonISD::FTOI: return "HexagonISD::FTOI";
+ case HexagonISD::ITOF: return "HexagonISD::ITOF";
+ case HexagonISD::CALL: return "HexagonISD::CALL";
+ case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
+ case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
+ case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
+ }
+}
+
+bool
+HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ EVT MTy1 = EVT::getEVT(Ty1);
+ EVT MTy2 = EVT::getEVT(Ty2);
+ if (!MTy1.isSimple() || !MTy2.isSimple()) {
+ return false;
+ }
+ return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
+}
+
+bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
+ if (!VT1.isSimple() || !VT2.isSimple()) {
+ return false;
+ }
+ return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
+}
+
+SDValue
+HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Should not custom lower this!");
+ // Frame & Return address. Currently unimplemented.
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::GlobalTLSAddress:
+ llvm_unreachable("TLS not implemented for Hexagon.");
+ case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
+ case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
+ case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
+ case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::BR_JT: return LowerBR_JT(Op, DAG);
+
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
+
+ }
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Hexagon Scheduler Hooks
+//===----------------------------------------------------------------------===//
+MachineBasicBlock *
+HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB)
+const {
+ switch (MI->getOpcode()) {
+ case Hexagon::ADJDYNALLOC: {
+ MachineFunction *MF = BB->getParent();
+ HexagonMachineFunctionInfo *FuncInfo =
+ MF->getInfo<HexagonMachineFunctionInfo>();
+ FuncInfo->addAllocaAdjustInst(MI);
+ return BB;
+ }
+ default: llvm_unreachable("Unexpected instr type to insert");
+ } // switch
+}
+
+//===----------------------------------------------------------------------===//
+// Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+std::pair<unsigned, const TargetRegisterClass*>
+HexagonTargetLowering::getRegForInlineAsmConstraint(const
+ std::string &Constraint,
+ EVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r': // R0-R31
+ switch (VT.getSimpleVT().SimpleTy) {
+ default:
+ llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
+ case MVT::i32:
+ case MVT::i16:
+ case MVT::i8:
+ return std::make_pair(0U, Hexagon::IntRegsRegisterClass);
+ case MVT::i64:
+ return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass);
+ }
+ default:
+ llvm_unreachable("Unknown asm register class");
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented by
+/// AM is legal for this target, for a load/store of the specified type.
+bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ // Allows a signed-extended 11-bit immediate field.
+ if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
+ return false;
+ }
+
+ // No global is ever allowed as a base.
+ if (AM.BaseGV) {
+ return false;
+ }
+
+ int Scale = AM.Scale;
+ if (Scale < 0) Scale = -Scale;
+ switch (Scale) {
+ case 0: // No scale reg, "r+i", "r", or just "i".
+ break;
+ default: // No scaled addressing mode.
+ return false;
+ }
+ return true;
+}
+
+/// isLegalICmpImmediate - Return true if the specified immediate is legal
+/// icmp immediate, that is the target has icmp instructions which can compare
+/// a register against the immediate without having to materialize the
+/// immediate into a register.
+bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
+ return Imm >= -512 && Imm <= 511;
+}
+
+/// IsEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization. Targets which want to do tail call
+/// optimization should implement this function.
+bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
+ SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const {
+ const Function *CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF->getCallingConv();
+ bool CCMatch = CallerCC == CalleeCC;
+
+ // ***************************************************************************
+ // Look for obvious safe cases to perform tail call optimization that do not
+ // require ABI changes.
+ // ***************************************************************************
+
+ // If this is a tail call via a function pointer, then don't do it!
+ if (!(dyn_cast<GlobalAddressSDNode>(Callee))
+ && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
+ return false;
+ }
+
+ // Do not optimize if the calling conventions do not match.
+ if (!CCMatch)
+ return false;
+
+ // Do not tail call optimize vararg calls.
+ if (isVarArg)
+ return false;
+
+ // Also avoid tail call optimization if either caller or callee uses struct
+ // return semantics.
+ if (isCalleeStructRet || isCallerStructRet)
+ return false;
+
+ // In addition to the cases above, we also disable Tail Call Optimization if
+ // the calling convention code that at least one outgoing argument needs to
+ // go on the stack. We cannot check that here because at this point that
+ // information is not available.
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
new file mode 100644
index 0000000..4208bcb
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -0,0 +1,162 @@
+//===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that Hexagon uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef Hexagon_ISELLOWERING_H
+#define Hexagon_ISELLOWERING_H
+
+#include "Hexagon.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+
+namespace llvm {
+ namespace HexagonISD {
+ enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ CONST32,
+ CONST32_GP, // For marking data present in GP.
+ SETCC,
+ ADJDYNALLOC,
+ ARGEXTEND,
+
+ CMPICC, // Compare two GPR operands, set icc.
+ CMPFCC, // Compare two FP operands, set fcc.
+ BRICC, // Branch to dest on icc condition
+ BRFCC, // Branch to dest on fcc condition
+ SELECT_ICC, // Select between two values using the current ICC flags.
+ SELECT_FCC, // Select between two values using the current FCC flags.
+
+ Hi, Lo, // Hi/Lo operations, typically on a global address.
+
+ FTOI, // FP to Int within a FP register.
+ ITOF, // Int to FP within a FP register.
+
+ CALL, // A call instruction.
+ RET_FLAG, // Return with a flag operand.
+ BR_JT, // Jump table.
+ BARRIER, // Memory barrier.
+ WrapperJT,
+ TC_RETURN
+ };
+ }
+
+ class HexagonTargetLowering : public TargetLowering {
+ int VarArgsFrameOffset; // Frame offset to start of varargs area.
+
+ bool CanReturnSmallStruct(const Function* CalleeFn,
+ unsigned& RetSize) const;
+
+ public:
+ HexagonTargetMachine &TM;
+ explicit HexagonTargetLowering(HexagonTargetMachine &targetmachine);
+
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Targets which want to do tail call
+ /// optimization should implement this function.
+ bool
+ IsEligibleForTailCallOptimization(SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const;
+
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+ virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+ SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerCall(SDValue Chain, SDValue Callee,
+ CallingConv::ID CallConv, bool isVarArg,
+ bool doesNotRet, bool &isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDValue Callee) const;
+
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const;
+
+ virtual MachineBasicBlock
+ *EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ virtual EVT getSetCCResultType(EVT VT) const {
+ return MVT::i1;
+ }
+
+ virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base, SDValue &Offset,
+ ISD::MemIndexedMode &AM,
+ SelectionDAG &DAG) const;
+
+ std::pair<unsigned, const TargetRegisterClass*>
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ EVT VT) const;
+
+ // Intrinsics
+ virtual SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const;
+ /// isLegalAddressingMode - Return true if the addressing mode represented
+ /// by AM is legal for this target, for a load/store of the specified type.
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type.
+ /// TODO: Handle pre/postinc as well.
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can
+ /// compare a register against the immediate without having to materialize
+ /// the immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ };
+} // end namespace llvm
+
+#endif // Hexagon_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td b/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td
new file mode 100644
index 0000000..e78bb79
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonImmediates.td
@@ -0,0 +1,508 @@
+//===- HexagonImmediates.td - Hexagon immediate processing -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illnois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// From IA64's InstrInfo file
+def s32Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s16Imm : Operand<i32> {
+ let PrintMethod = "printImmOperand";
+}
+
+def s12Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s11Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s11_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s11_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s11_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s11_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s10Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s9Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s8Imm64 : Operand<i64> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s4Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s4_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s4_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s4_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def s4_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u64Imm : Operand<i64> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u32Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u16Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u16_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u16_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u16_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u11_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u10Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u9Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u7Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u6_0Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u6_1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u6_2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u6_3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u5Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u4Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u3Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u2Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def u1Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def n8Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def m6Imm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printImmOperand";
+}
+
+def nOneImm : Operand<i32> {
+ // For now, we use a generic print function for all operands.
+ let PrintMethod = "printNOneImmOperand";
+}
+
+//
+// Immediate predicates
+//
+def s32ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<32>(v);
+}]>;
+
+def s32_24ImmPred : PatLeaf<(i32 imm), [{
+ // s32_24ImmPred predicate - True if the immediate fits in a 32-bit sign
+ // extended field that is a multiple of 0x1000000.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<32,24>(v);
+}]>;
+
+def s32_16s8ImmPred : PatLeaf<(i32 imm), [{
+ // s32_16s8ImmPred predicate - True if the immediate fits in a 32-bit sign
+ // extended field that is a multiple of 0x10000.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<24,16>(v);
+}]>;
+
+def s16ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<16>(v);
+}]>;
+
+
+def s13ImmPred : PatLeaf<(i32 imm), [{
+ // immS13 predicate - True if the immediate fits in a 13-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<13>(v);
+}]>;
+
+
+def s12ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<12>(v);
+}]>;
+
+def s11_0ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<11>(v);
+}]>;
+
+
+def s11_1ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,1>(v);
+}]>;
+
+
+def s11_2ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,2>(v);
+}]>;
+
+
+def s11_3ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<11,3>(v);
+}]>;
+
+
+def s10ImmPred : PatLeaf<(i32 imm), [{
+ // s10ImmPred predicate - True if the immediate fits in a 10-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<10>(v);
+}]>;
+
+
+def s9ImmPred : PatLeaf<(i32 imm), [{
+ // s9ImmPred predicate - True if the immediate fits in a 9-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<9>(v);
+}]>;
+
+
+def s8ImmPred : PatLeaf<(i32 imm), [{
+ // s8ImmPred predicate - True if the immediate fits in a 8-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<8>(v);
+}]>;
+
+
+def s8Imm64Pred : PatLeaf<(i64 imm), [{
+ // s8ImmPred predicate - True if the immediate fits in a 8-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<8>(v);
+}]>;
+
+
+def s6ImmPred : PatLeaf<(i32 imm), [{
+ // s6ImmPred predicate - True if the immediate fits in a 6-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<6>(v);
+}]>;
+
+
+def s4_0ImmPred : PatLeaf<(i32 imm), [{
+ // s4_0ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<4>(v);
+}]>;
+
+
+def s4_1ImmPred : PatLeaf<(i32 imm), [{
+ // s4_1ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field of 2.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,1>(v);
+}]>;
+
+
+def s4_2ImmPred : PatLeaf<(i32 imm), [{
+ // s4_2ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field that is a multiple of 4.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,2>(v);
+}]>;
+
+
+def s4_3ImmPred : PatLeaf<(i32 imm), [{
+ // s4_3ImmPred predicate - True if the immediate fits in a 4-bit sign extended
+ // field that is a multiple of 8.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedInt<4,3>(v);
+}]>;
+
+
+def u64ImmPred : PatLeaf<(i64 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ // Adding "N ||" to supress gcc unused warning.
+ return (N || true);
+}]>;
+
+def u32ImmPred : PatLeaf<(i32 imm), [{
+ // immS16 predicate - True if the immediate fits in a 16-bit sign extended
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<32>(v);
+}]>;
+
+def u16ImmPred : PatLeaf<(i32 imm), [{
+ // u16ImmPred predicate - True if the immediate fits in a 16-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<16>(v);
+}]>;
+
+def u16_s8ImmPred : PatLeaf<(i32 imm), [{
+ // u16_s8ImmPred predicate - True if the immediate fits in a 16-bit sign
+ // extended s8 field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<16,8>(v);
+}]>;
+
+def u9ImmPred : PatLeaf<(i32 imm), [{
+ // u9ImmPred predicate - True if the immediate fits in a 9-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<9>(v);
+}]>;
+
+
+def u8ImmPred : PatLeaf<(i32 imm), [{
+ // u8ImmPred predicate - True if the immediate fits in a 8-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<8>(v);
+}]>;
+
+def u7ImmPred : PatLeaf<(i32 imm), [{
+ // u7ImmPred predicate - True if the immediate fits in a 8-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<7>(v);
+}]>;
+
+
+def u6ImmPred : PatLeaf<(i32 imm), [{
+ // u6ImmPred predicate - True if the immediate fits in a 6-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<6>(v);
+}]>;
+
+def u6_0ImmPred : PatLeaf<(i32 imm), [{
+ // u6_0ImmPred predicate - True if the immediate fits in a 6-bit unsigned
+ // field. Same as u6ImmPred.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<6>(v);
+}]>;
+
+def u6_1ImmPred : PatLeaf<(i32 imm), [{
+ // u6_1ImmPred predicate - True if the immediate fits in a 6-bit unsigned
+ // field that is 1 bit alinged - multiple of 2.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,1>(v);
+}]>;
+
+def u6_2ImmPred : PatLeaf<(i32 imm), [{
+ // u6_2ImmPred predicate - True if the immediate fits in a 6-bit unsigned
+ // field that is 2 bits alinged - multiple of 4.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,2>(v);
+}]>;
+
+def u6_3ImmPred : PatLeaf<(i32 imm), [{
+ // u6_3ImmPred predicate - True if the immediate fits in a 6-bit unsigned
+ // field that is 3 bits alinged - multiple of 8.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isShiftedUInt<6,3>(v);
+}]>;
+
+def u5ImmPred : PatLeaf<(i32 imm), [{
+ // u5ImmPred predicate - True if the immediate fits in a 5-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<5>(v);
+}]>;
+
+
+def u3ImmPred : PatLeaf<(i32 imm), [{
+ // u3ImmPred predicate - True if the immediate fits in a 3-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<3>(v);
+}]>;
+
+
+def u2ImmPred : PatLeaf<(i32 imm), [{
+ // u2ImmPred predicate - True if the immediate fits in a 2-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<2>(v);
+}]>;
+
+
+def u1ImmPred : PatLeaf<(i1 imm), [{
+ // u1ImmPred predicate - True if the immediate fits in a 1-bit unsigned
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isUInt<1>(v);
+}]>;
+
+def m6ImmPred : PatLeaf<(i32 imm), [{
+ // m6ImmPred predicate - True if the immediate is negative and fits in
+ // a 6-bit negative number.
+ int64_t v = (int64_t)N->getSExtValue();
+ return isInt<6>(v);
+}]>;
+
+//InN means negative integers in [-(2^N - 1), 0]
+def n8ImmPred : PatLeaf<(i32 imm), [{
+ // n8ImmPred predicate - True if the immediate fits in a 8-bit signed
+ // field.
+ int64_t v = (int64_t)N->getSExtValue();
+ return (-255 <= v && v <= 0);
+}]>;
+
+def nOneImmPred : PatLeaf<(i32 imm), [{
+ // nOneImmPred predicate - True if the immediate is -1.
+ int64_t v = (int64_t)N->getSExtValue();
+ return (-1 == v);
+}]>;
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
new file mode 100644
index 0000000..48f0f01
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -0,0 +1,308 @@
+//==- HexagonInstrFormats.td - Hexagon Instruction Formats --*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Hexagon Intruction Flags +
+//
+// *** Must match HexagonBaseInfo.h ***
+//===----------------------------------------------------------------------===//
+
+class Type<bits<5> t> {
+ bits<5> Value = t;
+}
+def TypePSEUDO : Type<0>;
+def TypeALU32 : Type<1>;
+def TypeCR : Type<2>;
+def TypeJR : Type<3>;
+def TypeJ : Type<4>;
+def TypeLD : Type<5>;
+def TypeST : Type<6>;
+def TypeSYSTEM : Type<7>;
+def TypeXTYPE : Type<8>;
+def TypeMARKER : Type<31>;
+
+//===----------------------------------------------------------------------===//
+// Intruction Class Declaration +
+//===----------------------------------------------------------------------===//
+
+class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr, InstrItinClass itin, Type type> : Instruction {
+ field bits<32> Inst;
+
+ let Namespace = "Hexagon";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+ let Constraints = cstr;
+ let Itinerary = itin;
+
+ // *** Must match HexagonBaseInfo.h ***
+ Type HexagonType = type;
+ let TSFlags{4-0} = HexagonType.Value;
+ bits<1> isHexagonSolo = 0;
+ let TSFlags{5} = isHexagonSolo;
+
+ // Predicated instructions.
+ bits<1> isPredicated = 0;
+ let TSFlags{6} = isPredicated;
+
+ // *** The code above must match HexagonBaseInfo.h ***
+}
+
+//===----------------------------------------------------------------------===//
+// Intruction Classes Definitions +
+//===----------------------------------------------------------------------===//
+
+// LD Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+ let mayLoad = 1;
+}
+
+// LD Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<13> imm13;
+ let mayLoad = 1;
+}
+
+// ST Instruction Class in V2/V3 can take SLOT0 only.
+// ST Instruction Class in V4 can take SLOT0 & SLOT1.
+// Definition of the instruction class CHANGED from V2/V3 to V4.
+class STInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+ let mayStore = 1;
+}
+
+// SYSTEM Instruction Class in V4 can take SLOT0 only
+// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
+class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+}
+
+// ST Instruction Class in V2/V3 can take SLOT0 only.
+// ST Instruction Class in V4 can take SLOT0 & SLOT1.
+// Definition of the instruction class CHANGED from V2/V3 to V4.
+class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<13> imm13;
+ let mayStore = 1;
+}
+
+// ALU32 Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> imm16;
+ bits<16> imm16_2;
+}
+
+// ALU64 Instruction Class in V2/V3.
+// XTYPE Instruction Class in V4.
+// Definition of the instruction class NOT CHANGED.
+// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
+class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> imm16;
+ bits<16> imm16_2;
+}
+
+class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> imm16;
+ bits<16> imm16_2;
+}
+
+// M Instruction Class in V2/V3.
+// XTYPE Instruction Class in V4.
+// Definition of the instruction class NOT CHANGED.
+// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
+class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+}
+
+// M Instruction Class in V2/V3.
+// XTYPE Instruction Class in V4.
+// Definition of the instruction class NOT CHANGED.
+// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
+class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+}
+
+// S Instruction Class in V2/V3.
+// XTYPE Instruction Class in V4.
+// Definition of the instruction class NOT CHANGED.
+// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
+class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+}
+
+// S Instruction Class in V2/V3.
+// XTYPE Instruction Class in V4.
+// Definition of the instruction class NOT CHANGED.
+// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
+class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> {
+// : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
+// : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+}
+
+// J Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class JType<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> {
+ bits<16> imm16;
+}
+
+// JR Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> {
+ bits<5> rs;
+ bits<5> pu; // Predicate register
+}
+
+// CR Instruction Class in V2/V3/V4.
+// Definition of the instruction class NOT CHANGED.
+class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> {
+ bits<5> rs;
+ bits<10> imm10;
+}
+
+class Marker<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
+
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Intruction Classes Definitions -
+//===----------------------------------------------------------------------===//
+
+
+//
+// ALU32 patterns
+//.
+class ALU32_rr<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU32Type<outs, ins, asmstr, pattern> {
+}
+
+class ALU32_ir<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU32Type<outs, ins, asmstr, pattern> {
+ let rt{0-4} = 0;
+}
+
+class ALU32_ri<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU32Type<outs, ins, asmstr, pattern> {
+ let rt{0-4} = 0;
+}
+
+class ALU32_ii<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU32Type<outs, ins, asmstr, pattern> {
+ let rt{0-4} = 0;
+}
+
+//
+// ALU64 patterns.
+//
+class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU64Type<outs, ins, asmstr, pattern> {
+}
+
+class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : ALU64Type<outs, ins, asmstr, pattern> {
+ let rt{0-4} = 0;
+}
+
+// J Type Instructions.
+class JInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : JType<outs, ins, asmstr, pattern> {
+}
+
+// JR type Instructions.
+class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : JRType<outs, ins, asmstr, pattern> {
+}
+
+
+// Post increment ST Instruction.
+class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
+ : STInstPost<outs, ins, asmstr, pattern, cstr> {
+ let rt{0-4} = 0;
+ let mayStore = 1;
+}
+
+// Post increment LD Instruction.
+class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
+ : LDInstPost<outs, ins, asmstr, pattern, cstr> {
+ let rt{0-4} = 0;
+ let mayLoad = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// V4 Instruction Format Definitions +
+//===----------------------------------------------------------------------===//
+
+include "HexagonInstrFormatsV4.td"
+
+//===----------------------------------------------------------------------===//
+// V4 Instruction Format Definitions +
+//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
new file mode 100644
index 0000000..49741a3
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -0,0 +1,67 @@
+//==- HexagonInstrFormats.td - Hexagon Instruction Formats --*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon V4 instruction classes in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//----------------------------------------------------------------------------//
+// Hexagon Intruction Flags +
+//
+// *** Must match BaseInfo.h ***
+//----------------------------------------------------------------------------//
+
+def TypeMEMOP : Type<9>;
+def TypeNV : Type<10>;
+def TypePREFIX : Type<30>;
+
+//----------------------------------------------------------------------------//
+// Intruction Classes Definitions +
+//----------------------------------------------------------------------------//
+
+//
+// NV type instructions.
+//
+class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<13> imm13;
+}
+
+// Definition of Post increment new value store.
+class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<5> rt;
+ bits<13> imm13;
+}
+
+// Post increment ST Instruction.
+class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
+ string cstr>
+ : NVInstPost_V4<outs, ins, asmstr, pattern, cstr> {
+ let rt{0-4} = 0;
+}
+
+class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> {
+ bits<5> rd;
+ bits<5> rs;
+ bits<6> imm6;
+}
+
+class Immext<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> {
+ let isCodeGenOnly = 1;
+
+ bits<26> imm26;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
new file mode 100644
index 0000000..8685ec1
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -0,0 +1,2732 @@
+//===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Hexagon implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonInstrInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "Hexagon.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Support/MathExtras.h"
+#define GET_INSTRINFO_CTOR
+#include "HexagonGenInstrInfo.inc"
+#include "HexagonGenDFAPacketizer.inc"
+
+using namespace llvm;
+
+///
+/// Constants for Hexagon instructions.
+///
+const int Hexagon_MEMW_OFFSET_MAX = 4095;
+const int Hexagon_MEMW_OFFSET_MIN = 4096;
+const int Hexagon_MEMD_OFFSET_MAX = 8191;
+const int Hexagon_MEMD_OFFSET_MIN = 8192;
+const int Hexagon_MEMH_OFFSET_MAX = 2047;
+const int Hexagon_MEMH_OFFSET_MIN = 2048;
+const int Hexagon_MEMB_OFFSET_MAX = 1023;
+const int Hexagon_MEMB_OFFSET_MIN = 1024;
+const int Hexagon_ADDI_OFFSET_MAX = 32767;
+const int Hexagon_ADDI_OFFSET_MIN = 32768;
+const int Hexagon_MEMD_AUTOINC_MAX = 56;
+const int Hexagon_MEMD_AUTOINC_MIN = 64;
+const int Hexagon_MEMW_AUTOINC_MAX = 28;
+const int Hexagon_MEMW_AUTOINC_MIN = 32;
+const int Hexagon_MEMH_AUTOINC_MAX = 14;
+const int Hexagon_MEMH_AUTOINC_MIN = 16;
+const int Hexagon_MEMB_AUTOINC_MAX = 7;
+const int Hexagon_MEMB_AUTOINC_MIN = 8;
+
+
+
+HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
+ : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
+ RI(ST, *this), Subtarget(ST) {
+}
+
+
+/// isLoadFromStackSlot - If the specified machine instruction is a direct
+/// load from a stack slot, return the virtual or physical register number of
+/// the destination along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than loading from the stack slot.
+unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+
+
+ switch (MI->getOpcode()) {
+ case Hexagon::LDriw:
+ case Hexagon::LDrid:
+ case Hexagon::LDrih:
+ case Hexagon::LDrib:
+ case Hexagon::LDriub:
+ if (MI->getOperand(2).isFI() &&
+ MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
+ FrameIndex = MI->getOperand(2).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+/// isStoreToStackSlot - If the specified machine instruction is a direct
+/// store to a stack slot, return the virtual or physical register number of
+/// the source reg along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than storing to the stack slot.
+unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ case Hexagon::STriw:
+ case Hexagon::STrid:
+ case Hexagon::STrih:
+ case Hexagon::STrib:
+ if (MI->getOperand(2).isFI() &&
+ MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
+ FrameIndex = MI->getOperand(2).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+unsigned
+HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const{
+
+ int BOpc = Hexagon::JMP;
+ int BccOpc = Hexagon::JMP_c;
+
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+
+ int regPos = 0;
+ // Check if ReverseBranchCondition has asked to reverse this branch
+ // If we want to reverse the branch an odd number of times, we want
+ // JMP_cNot.
+ if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
+ BccOpc = Hexagon::JMP_cNot;
+ regPos = 1;
+ }
+
+ if (FBB == 0) {
+ if (Cond.empty()) {
+ // Due to a bug in TailMerging/CFG Optimization, we need to add a
+ // special case handling of a predicated jump followed by an
+ // unconditional jump. If not, Tail Merging and CFG Optimization go
+ // into an infinite loop.
+ MachineBasicBlock *NewTBB, *NewFBB;
+ SmallVector<MachineOperand, 4> Cond;
+ MachineInstr *Term = MBB.getFirstTerminator();
+ if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
+ false)) {
+ MachineBasicBlock *NextBB =
+ llvm::next(MachineFunction::iterator(&MBB));
+ if (NewTBB == NextBB) {
+ ReverseBranchCondition(Cond);
+ RemoveBranch(MBB);
+ return InsertBranch(MBB, TBB, 0, Cond, DL);
+ }
+ }
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
+ } else {
+ BuildMI(&MBB, DL,
+ get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
+ }
+ return 1;
+ }
+
+ BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
+
+ return 2;
+}
+
+
+bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ FBB = NULL;
+
+ // If the block has no terminators, it just falls into the block after it.
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin())
+ return false;
+
+ // A basic block may looks like this:
+ //
+ // [ insn
+ // EH_LABEL
+ // insn
+ // insn
+ // insn
+ // EH_LABEL
+ // insn ]
+ //
+ // It has two succs but does not have a terminator
+ // Don't know how to handle it.
+ do {
+ --I;
+ if (I->isEHLabel())
+ return true;
+ } while (I != MBB.begin());
+
+ I = MBB.end();
+ --I;
+
+ while (I->isDebugValue()) {
+ if (I == MBB.begin())
+ return false;
+ --I;
+ }
+ if (!isUnpredicatedTerminator(I))
+ return false;
+
+ // Get the last instruction in the block.
+ MachineInstr *LastInst = I;
+
+ // If there is only one terminator instruction, process it.
+ if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
+ if (LastInst->getOpcode() == Hexagon::JMP) {
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+ if (LastInst->getOpcode() == Hexagon::JMP_c) {
+ // Block ends with fall-through true condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
+ }
+ if (LastInst->getOpcode() == Hexagon::JMP_cNot) {
+ // Block ends with fall-through false condbranch.
+ TBB = LastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(0));
+ Cond.push_back(LastInst->getOperand(0));
+ return false;
+ }
+ // Otherwise, don't know what this is.
+ return true;
+ }
+
+ // Get the instruction before it if it's a terminator.
+ MachineInstr *SecondLastInst = I;
+
+ // If there are three terminators, we don't know what sort of block this is.
+ if (SecondLastInst && I != MBB.begin() &&
+ isUnpredicatedTerminator(--I))
+ return true;
+
+ // If the block ends with Hexagon::BRCOND and Hexagon:JMP, handle it.
+ if (((SecondLastInst->getOpcode() == Hexagon::BRCOND) ||
+ (SecondLastInst->getOpcode() == Hexagon::JMP_c)) &&
+ LastInst->getOpcode() == Hexagon::JMP) {
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block ends with Hexagon::JMP_cNot and Hexagon:JMP, handle it.
+ if ((SecondLastInst->getOpcode() == Hexagon::JMP_cNot) &&
+ LastInst->getOpcode() == Hexagon::JMP) {
+ TBB = SecondLastInst->getOperand(1).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(0));
+ Cond.push_back(SecondLastInst->getOperand(0));
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block ends with two Hexagon:JMPs, handle it. The second one is not
+ // executed, so remove it.
+ if (SecondLastInst->getOpcode() == Hexagon::JMP &&
+ LastInst->getOpcode() == Hexagon::JMP) {
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ I = LastInst;
+ if (AllowModify)
+ I->eraseFromParent();
+ return false;
+ }
+
+ // Otherwise, can't handle this.
+ return true;
+}
+
+
+unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ int BOpc = Hexagon::JMP;
+ int BccOpc = Hexagon::JMP_c;
+ int BccOpcNot = Hexagon::JMP_cNot;
+
+ MachineBasicBlock::iterator I = MBB.end();
+ if (I == MBB.begin()) return 0;
+ --I;
+ if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc &&
+ I->getOpcode() != BccOpcNot)
+ return 0;
+
+ // Remove the branch.
+ I->eraseFromParent();
+
+ I = MBB.end();
+
+ if (I == MBB.begin()) return 1;
+ --I;
+ if (I->getOpcode() != BccOpc && I->getOpcode() != BccOpcNot)
+ return 1;
+
+ // Remove the branch.
+ I->eraseFromParent();
+ return 2;
+}
+
+
+void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
+ BuildMI(MBB, I, DL, get(Hexagon::TFR), DestReg).addReg(SrcReg);
+ return;
+ }
+ if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
+ BuildMI(MBB, I, DL, get(Hexagon::TFR_64), DestReg).addReg(SrcReg);
+ return;
+ }
+ if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
+ // Map Pd = Ps to Pd = or(Ps, Ps).
+ BuildMI(MBB, I, DL, get(Hexagon::OR_pp),
+ DestReg).addReg(SrcReg).addReg(SrcReg);
+ return;
+ }
+ if (Hexagon::DoubleRegsRegClass.contains(DestReg, SrcReg)) {
+ // We can have an overlap between single and double reg: r1:0 = r0.
+ if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
+ // r1:0 = r0
+ BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
+ Hexagon::subreg_hireg))).addImm(0);
+ } else {
+ // r1:0 = r1 or no overlap.
+ BuildMI(MBB, I, DL, get(Hexagon::TFR), (RI.getSubReg(DestReg,
+ Hexagon::subreg_loreg))).addReg(SrcReg);
+ BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
+ Hexagon::subreg_hireg))).addImm(0);
+ }
+ return;
+ }
+ if (Hexagon::CRRegsRegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
+ return;
+ }
+
+ llvm_unreachable("Unimplemented");
+}
+
+
+void HexagonInstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+
+ DebugLoc DL = MBB.findDebugLoc(I);
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ unsigned Align = MFI.getObjectAlignment(FI);
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOStore,
+ MFI.getObjectSize(FI),
+ Align);
+
+ if (Hexagon::IntRegsRegisterClass->hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STriw))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ } else if (Hexagon::DoubleRegsRegisterClass->hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STrid))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ } else if (Hexagon::PredRegsRegisterClass->hasSubClassEq(RC)) {
+ BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
+ .addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ } else {
+ llvm_unreachable("Unimplemented");
+ }
+}
+
+
+void HexagonInstrInfo::storeRegToAddr(
+ MachineFunction &MF, unsigned SrcReg,
+ bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const
+{
+ llvm_unreachable("Unimplemented");
+}
+
+
+void HexagonInstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBB.findDebugLoc(I);
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ unsigned Align = MFI.getObjectAlignment(FI);
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(
+ MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
+ MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI),
+ Align);
+
+ if (RC == Hexagon::IntRegsRegisterClass) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (RC == Hexagon::DoubleRegsRegisterClass) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else if (RC == Hexagon::PredRegsRegisterClass) {
+ BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
+ } else {
+ llvm_unreachable("Can't store this register to stack slot");
+ }
+}
+
+
+void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ llvm_unreachable("Unimplemented");
+}
+
+
+MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FI) const {
+ // Hexagon_TODO: Implement.
+ return(0);
+}
+
+
+unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
+
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ const TargetRegisterClass *TRC;
+ if (VT == MVT::i1) {
+ TRC = Hexagon::PredRegsRegisterClass;
+ } else if (VT == MVT::i32) {
+ TRC = Hexagon::IntRegsRegisterClass;
+ } else if (VT == MVT::i64) {
+ TRC = Hexagon::DoubleRegsRegisterClass;
+ } else {
+ llvm_unreachable("Cannot handle this register class");
+ }
+
+ unsigned NewReg = RegInfo.createVirtualRegister(TRC);
+ return NewReg;
+}
+
+bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ return true;
+
+ // TFR_FI
+ case Hexagon::TFR_FI:
+ return true;
+
+
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+
+ // V4 absolute set addressing.
+ case Hexagon::LDrid_abs_setimm_V4:
+ case Hexagon::LDriw_abs_setimm_V4:
+ case Hexagon::LDrih_abs_setimm_V4:
+ case Hexagon::LDrib_abs_setimm_V4:
+ case Hexagon::LDriuh_abs_setimm_V4:
+ case Hexagon::LDriub_abs_setimm_V4:
+
+ case Hexagon::STrid_abs_setimm_V4:
+ case Hexagon::STrib_abs_setimm_V4:
+ case Hexagon::STrih_abs_setimm_V4:
+ case Hexagon::STriw_abs_setimm_V4:
+
+ // V4 global address load.
+ case Hexagon::LDrid_GP_cPt_V4 :
+ case Hexagon::LDrid_GP_cNotPt_V4 :
+ case Hexagon::LDrid_GP_cdnPt_V4 :
+ case Hexagon::LDrid_GP_cdnNotPt_V4 :
+ case Hexagon::LDrib_GP_cPt_V4 :
+ case Hexagon::LDrib_GP_cNotPt_V4 :
+ case Hexagon::LDrib_GP_cdnPt_V4 :
+ case Hexagon::LDrib_GP_cdnNotPt_V4 :
+ case Hexagon::LDriub_GP_cPt_V4 :
+ case Hexagon::LDriub_GP_cNotPt_V4 :
+ case Hexagon::LDriub_GP_cdnPt_V4 :
+ case Hexagon::LDriub_GP_cdnNotPt_V4 :
+ case Hexagon::LDrih_GP_cPt_V4 :
+ case Hexagon::LDrih_GP_cNotPt_V4 :
+ case Hexagon::LDrih_GP_cdnPt_V4 :
+ case Hexagon::LDrih_GP_cdnNotPt_V4 :
+ case Hexagon::LDriuh_GP_cPt_V4 :
+ case Hexagon::LDriuh_GP_cNotPt_V4 :
+ case Hexagon::LDriuh_GP_cdnPt_V4 :
+ case Hexagon::LDriuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDriw_GP_cPt_V4 :
+ case Hexagon::LDriw_GP_cNotPt_V4 :
+ case Hexagon::LDriw_GP_cdnPt_V4 :
+ case Hexagon::LDriw_GP_cdnNotPt_V4 :
+ case Hexagon::LDd_GP_cPt_V4 :
+ case Hexagon::LDd_GP_cNotPt_V4 :
+ case Hexagon::LDd_GP_cdnPt_V4 :
+ case Hexagon::LDd_GP_cdnNotPt_V4 :
+ case Hexagon::LDb_GP_cPt_V4 :
+ case Hexagon::LDb_GP_cNotPt_V4 :
+ case Hexagon::LDb_GP_cdnPt_V4 :
+ case Hexagon::LDb_GP_cdnNotPt_V4 :
+ case Hexagon::LDub_GP_cPt_V4 :
+ case Hexagon::LDub_GP_cNotPt_V4 :
+ case Hexagon::LDub_GP_cdnPt_V4 :
+ case Hexagon::LDub_GP_cdnNotPt_V4 :
+ case Hexagon::LDh_GP_cPt_V4 :
+ case Hexagon::LDh_GP_cNotPt_V4 :
+ case Hexagon::LDh_GP_cdnPt_V4 :
+ case Hexagon::LDh_GP_cdnNotPt_V4 :
+ case Hexagon::LDuh_GP_cPt_V4 :
+ case Hexagon::LDuh_GP_cNotPt_V4 :
+ case Hexagon::LDuh_GP_cdnPt_V4 :
+ case Hexagon::LDuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDw_GP_cPt_V4 :
+ case Hexagon::LDw_GP_cNotPt_V4 :
+ case Hexagon::LDw_GP_cdnPt_V4 :
+ case Hexagon::LDw_GP_cdnNotPt_V4 :
+
+ // V4 global address store.
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cdnPt_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cdnPt_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cdnPt_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cdnPt_V4 :
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cdnPt_V4 :
+ case Hexagon::STb_GP_cdnNotPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cdnPt_V4 :
+ case Hexagon::STh_GP_cdnNotPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cdnPt_V4 :
+ case Hexagon::STw_GP_cdnNotPt_V4 :
+
+ // V4 predicated global address new value store.
+ case Hexagon::STrib_GP_cPt_nv_V4 :
+ case Hexagon::STrib_GP_cNotPt_nv_V4 :
+ case Hexagon::STrib_GP_cdnPt_nv_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_GP_cPt_nv_V4 :
+ case Hexagon::STrih_GP_cNotPt_nv_V4 :
+ case Hexagon::STrih_GP_cdnPt_nv_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_GP_cPt_nv_V4 :
+ case Hexagon::STriw_GP_cNotPt_nv_V4 :
+ case Hexagon::STriw_GP_cdnPt_nv_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STb_GP_cPt_nv_V4 :
+ case Hexagon::STb_GP_cNotPt_nv_V4 :
+ case Hexagon::STb_GP_cdnPt_nv_V4 :
+ case Hexagon::STb_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STh_GP_cPt_nv_V4 :
+ case Hexagon::STh_GP_cNotPt_nv_V4 :
+ case Hexagon::STh_GP_cdnPt_nv_V4 :
+ case Hexagon::STh_GP_cdnNotPt_nv_V4 :
+ case Hexagon::STw_GP_cPt_nv_V4 :
+ case Hexagon::STw_GP_cNotPt_nv_V4 :
+ case Hexagon::STw_GP_cdnPt_nv_V4 :
+ case Hexagon::STw_GP_cdnNotPt_nv_V4 :
+
+ // TFR_FI
+ case Hexagon::TFR_FI_immext_V4:
+ return true;
+
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+
+ // JMP_EQri - with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+
+ // JMP_GTri - with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+ return true;
+
+ default:
+ return false;
+ }
+ return false;
+}
+
+unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const {
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown type of instruction");
+
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_nv_V4:
+ return Hexagon::JMP_EQriPt_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ return Hexagon::JMP_EQriNotPt_ie_nv_V4;
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ return Hexagon::JMP_EQriPnt_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+ return Hexagon::JMP_EQriNotPnt_ie_nv_V4;
+
+ // JMP_EQri -- with -1
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ return Hexagon::JMP_EQriPtneg_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ return Hexagon::JMP_EQriNotPtneg_ie_nv_V4;
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ return Hexagon::JMP_EQriPntneg_ie_nv_V4;
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+ return Hexagon::JMP_EQriNotPntneg_ie_nv_V4;
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ return Hexagon::JMP_EQrrPt_ie_nv_V4;
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ return Hexagon::JMP_EQrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ return Hexagon::JMP_EQrrPnt_ie_nv_V4;
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+ return Hexagon::JMP_EQrrNotPnt_ie_nv_V4;
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_nv_V4:
+ return Hexagon::JMP_GTriPt_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ return Hexagon::JMP_GTriNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ return Hexagon::JMP_GTriPnt_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+ return Hexagon::JMP_GTriNotPnt_ie_nv_V4;
+
+ // JMP_GTri -- with -1
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ return Hexagon::JMP_GTriPtneg_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ return Hexagon::JMP_GTriNotPtneg_ie_nv_V4;
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ return Hexagon::JMP_GTriPntneg_ie_nv_V4;
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ return Hexagon::JMP_GTriNotPntneg_ie_nv_V4;
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ return Hexagon::JMP_GTrrPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ return Hexagon::JMP_GTrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ return Hexagon::JMP_GTrrPnt_ie_nv_V4;
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrNotPnt_ie_nv_V4;
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ return Hexagon::JMP_GTrrdnPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnPnt_ie_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4;
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ return Hexagon::JMP_GTUriPt_ie_nv_V4;
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ return Hexagon::JMP_GTUriNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ return Hexagon::JMP_GTUriPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+ return Hexagon::JMP_GTUriNotPnt_ie_nv_V4;
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ return Hexagon::JMP_GTUrrPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ return Hexagon::JMP_GTUrrPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4;
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4;
+
+ case Hexagon::TFR_FI:
+ return Hexagon::TFR_FI_immext_V4;
+
+ case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDi_MEM_V4 :
+ case Hexagon::MEMw_SUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDr_MEM_V4 :
+ case Hexagon::MEMw_SUBr_MEM_V4 :
+ case Hexagon::MEMw_ANDr_MEM_V4 :
+ case Hexagon::MEMw_ORr_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDi_MEM_V4 :
+ case Hexagon::MEMh_SUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDr_MEM_V4 :
+ case Hexagon::MEMh_SUBr_MEM_V4 :
+ case Hexagon::MEMh_ANDr_MEM_V4 :
+ case Hexagon::MEMh_ORr_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDi_MEM_V4 :
+ case Hexagon::MEMb_SUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDr_MEM_V4 :
+ case Hexagon::MEMb_SUBr_MEM_V4 :
+ case Hexagon::MEMb_ANDr_MEM_V4 :
+ case Hexagon::MEMb_ORr_MEM_V4 :
+ llvm_unreachable("Needs implementing");
+ }
+}
+
+unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI) const {
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown type of jump instruction");
+
+ // JMP_EQri
+ case Hexagon::JMP_EQriPt_ie_nv_V4:
+ return Hexagon::JMP_EQriPt_nv_V4;
+ case Hexagon::JMP_EQriNotPt_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPt_nv_V4;
+ case Hexagon::JMP_EQriPnt_ie_nv_V4:
+ return Hexagon::JMP_EQriPnt_nv_V4;
+ case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPnt_nv_V4;
+
+ // JMP_EQri -- with -1
+ case Hexagon::JMP_EQriPtneg_ie_nv_V4:
+ return Hexagon::JMP_EQriPtneg_nv_V4;
+ case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPtneg_nv_V4;
+ case Hexagon::JMP_EQriPntneg_ie_nv_V4:
+ return Hexagon::JMP_EQriPntneg_nv_V4;
+ case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
+ return Hexagon::JMP_EQriNotPntneg_nv_V4;
+
+ // JMP_EQrr
+ case Hexagon::JMP_EQrrPt_ie_nv_V4:
+ return Hexagon::JMP_EQrrPt_nv_V4;
+ case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_EQrrNotPt_nv_V4;
+ case Hexagon::JMP_EQrrPnt_ie_nv_V4:
+ return Hexagon::JMP_EQrrPnt_nv_V4;
+ case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_EQrrNotPnt_nv_V4;
+
+ // JMP_GTri
+ case Hexagon::JMP_GTriPt_ie_nv_V4:
+ return Hexagon::JMP_GTriPt_nv_V4;
+ case Hexagon::JMP_GTriNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPt_nv_V4;
+ case Hexagon::JMP_GTriPnt_ie_nv_V4:
+ return Hexagon::JMP_GTriPnt_nv_V4;
+ case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPnt_nv_V4;
+
+ // JMP_GTri -- with -1
+ case Hexagon::JMP_GTriPtneg_ie_nv_V4:
+ return Hexagon::JMP_GTriPtneg_nv_V4;
+ case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPtneg_nv_V4;
+ case Hexagon::JMP_GTriPntneg_ie_nv_V4:
+ return Hexagon::JMP_GTriPntneg_nv_V4;
+ case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
+ return Hexagon::JMP_GTriNotPntneg_nv_V4;
+
+ // JMP_GTrr
+ case Hexagon::JMP_GTrrPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrPt_nv_V4;
+ case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrNotPt_nv_V4;
+ case Hexagon::JMP_GTrrPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrPnt_nv_V4;
+ case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrNotPnt_nv_V4;
+
+ // JMP_GTrrdn
+ case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnPt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnPnt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
+
+ // JMP_GTUri
+ case Hexagon::JMP_GTUriPt_ie_nv_V4:
+ return Hexagon::JMP_GTUriPt_nv_V4;
+ case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUriNotPt_nv_V4;
+ case Hexagon::JMP_GTUriPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUriPnt_nv_V4;
+ case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUriNotPnt_nv_V4;
+
+ // JMP_GTUrr
+ case Hexagon::JMP_GTUrrPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrPt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrPnt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrNotPnt_nv_V4;
+
+ // JMP_GTUrrdn
+ case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnPt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnPnt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
+ }
+}
+
+
+bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+
+ // Store Byte
+ case Hexagon::STrib_nv_V4:
+ case Hexagon::STrib_indexed_nv_V4:
+ case Hexagon::STrib_indexed_shl_nv_V4:
+ case Hexagon::STrib_shl_nv_V4:
+ case Hexagon::STrib_GP_nv_V4:
+ case Hexagon::STb_GP_nv_V4:
+ case Hexagon::POST_STbri_nv_V4:
+ case Hexagon::STrib_cPt_nv_V4:
+ case Hexagon::STrib_cdnPt_nv_V4:
+ case Hexagon::STrib_cNotPt_nv_V4:
+ case Hexagon::STrib_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_cPt_nv_V4:
+ case Hexagon::STrib_indexed_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_cNotPt_nv_V4:
+ case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_STbri_cPt_nv_V4:
+ case Hexagon::POST_STbri_cdnPt_nv_V4:
+ case Hexagon::POST_STbri_cNotPt_nv_V4:
+ case Hexagon::POST_STbri_cdnNotPt_nv_V4:
+ case Hexagon::STb_GP_cPt_nv_V4:
+ case Hexagon::STb_GP_cNotPt_nv_V4:
+ case Hexagon::STb_GP_cdnPt_nv_V4:
+ case Hexagon::STb_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_GP_cPt_nv_V4:
+ case Hexagon::STrib_GP_cNotPt_nv_V4:
+ case Hexagon::STrib_GP_cdnPt_nv_V4:
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_abs_nv_V4:
+ case Hexagon::STrib_abs_cPt_nv_V4:
+ case Hexagon::STrib_abs_cdnPt_nv_V4:
+ case Hexagon::STrib_abs_cNotPt_nv_V4:
+ case Hexagon::STrib_abs_cdnNotPt_nv_V4:
+ case Hexagon::STrib_imm_abs_nv_V4:
+ case Hexagon::STrib_imm_abs_cPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4:
+
+ // Store Halfword
+ case Hexagon::STrih_nv_V4:
+ case Hexagon::STrih_indexed_nv_V4:
+ case Hexagon::STrih_indexed_shl_nv_V4:
+ case Hexagon::STrih_shl_nv_V4:
+ case Hexagon::STrih_GP_nv_V4:
+ case Hexagon::STh_GP_nv_V4:
+ case Hexagon::POST_SThri_nv_V4:
+ case Hexagon::STrih_cPt_nv_V4:
+ case Hexagon::STrih_cdnPt_nv_V4:
+ case Hexagon::STrih_cNotPt_nv_V4:
+ case Hexagon::STrih_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cPt_nv_V4:
+ case Hexagon::STrih_indexed_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_cNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_SThri_cPt_nv_V4:
+ case Hexagon::POST_SThri_cdnPt_nv_V4:
+ case Hexagon::POST_SThri_cNotPt_nv_V4:
+ case Hexagon::POST_SThri_cdnNotPt_nv_V4:
+ case Hexagon::STh_GP_cPt_nv_V4:
+ case Hexagon::STh_GP_cNotPt_nv_V4:
+ case Hexagon::STh_GP_cdnPt_nv_V4:
+ case Hexagon::STh_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_GP_cPt_nv_V4:
+ case Hexagon::STrih_GP_cNotPt_nv_V4:
+ case Hexagon::STrih_GP_cdnPt_nv_V4:
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_abs_nv_V4:
+ case Hexagon::STrih_abs_cPt_nv_V4:
+ case Hexagon::STrih_abs_cdnPt_nv_V4:
+ case Hexagon::STrih_abs_cNotPt_nv_V4:
+ case Hexagon::STrih_abs_cdnNotPt_nv_V4:
+ case Hexagon::STrih_imm_abs_nv_V4:
+ case Hexagon::STrih_imm_abs_cPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4:
+
+ // Store Word
+ case Hexagon::STriw_nv_V4:
+ case Hexagon::STriw_indexed_nv_V4:
+ case Hexagon::STriw_indexed_shl_nv_V4:
+ case Hexagon::STriw_shl_nv_V4:
+ case Hexagon::STriw_GP_nv_V4:
+ case Hexagon::STw_GP_nv_V4:
+ case Hexagon::POST_STwri_nv_V4:
+ case Hexagon::STriw_cPt_nv_V4:
+ case Hexagon::STriw_cdnPt_nv_V4:
+ case Hexagon::STriw_cNotPt_nv_V4:
+ case Hexagon::STriw_cdnNotPt_nv_V4:
+ case Hexagon::STriw_indexed_cPt_nv_V4:
+ case Hexagon::STriw_indexed_cdnPt_nv_V4:
+ case Hexagon::STriw_indexed_cNotPt_nv_V4:
+ case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
+ case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::POST_STwri_cPt_nv_V4:
+ case Hexagon::POST_STwri_cdnPt_nv_V4:
+ case Hexagon::POST_STwri_cNotPt_nv_V4:
+ case Hexagon::POST_STwri_cdnNotPt_nv_V4:
+ case Hexagon::STw_GP_cPt_nv_V4:
+ case Hexagon::STw_GP_cNotPt_nv_V4:
+ case Hexagon::STw_GP_cdnPt_nv_V4:
+ case Hexagon::STw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_GP_cPt_nv_V4:
+ case Hexagon::STriw_GP_cNotPt_nv_V4:
+ case Hexagon::STriw_GP_cdnPt_nv_V4:
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_abs_nv_V4:
+ case Hexagon::STriw_abs_cPt_nv_V4:
+ case Hexagon::STriw_abs_cdnPt_nv_V4:
+ case Hexagon::STriw_abs_cNotPt_nv_V4:
+ case Hexagon::STriw_abs_cdnNotPt_nv_V4:
+ case Hexagon::STriw_imm_abs_nv_V4:
+ case Hexagon::STriw_imm_abs_cPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cdnPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cNotPt_nv_V4:
+ case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4:
+ return true;
+
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
+ switch (MI->getOpcode())
+ {
+ // Load Byte
+ case Hexagon::POST_LDrib:
+ case Hexagon::POST_LDrib_cPt:
+ case Hexagon::POST_LDrib_cNotPt:
+ case Hexagon::POST_LDrib_cdnPt_V4:
+ case Hexagon::POST_LDrib_cdnNotPt_V4:
+
+ // Load unsigned byte
+ case Hexagon::POST_LDriub:
+ case Hexagon::POST_LDriub_cPt:
+ case Hexagon::POST_LDriub_cNotPt:
+ case Hexagon::POST_LDriub_cdnPt_V4:
+ case Hexagon::POST_LDriub_cdnNotPt_V4:
+
+ // Load halfword
+ case Hexagon::POST_LDrih:
+ case Hexagon::POST_LDrih_cPt:
+ case Hexagon::POST_LDrih_cNotPt:
+ case Hexagon::POST_LDrih_cdnPt_V4:
+ case Hexagon::POST_LDrih_cdnNotPt_V4:
+
+ // Load unsigned halfword
+ case Hexagon::POST_LDriuh:
+ case Hexagon::POST_LDriuh_cPt:
+ case Hexagon::POST_LDriuh_cNotPt:
+ case Hexagon::POST_LDriuh_cdnPt_V4:
+ case Hexagon::POST_LDriuh_cdnNotPt_V4:
+
+ // Load word
+ case Hexagon::POST_LDriw:
+ case Hexagon::POST_LDriw_cPt:
+ case Hexagon::POST_LDriw_cNotPt:
+ case Hexagon::POST_LDriw_cdnPt_V4:
+ case Hexagon::POST_LDriw_cdnNotPt_V4:
+
+ // Load double word
+ case Hexagon::POST_LDrid:
+ case Hexagon::POST_LDrid_cPt:
+ case Hexagon::POST_LDrid_cNotPt:
+ case Hexagon::POST_LDrid_cdnPt_V4:
+ case Hexagon::POST_LDrid_cdnNotPt_V4:
+
+ // Store byte
+ case Hexagon::POST_STbri:
+ case Hexagon::POST_STbri_cPt:
+ case Hexagon::POST_STbri_cNotPt:
+ case Hexagon::POST_STbri_cdnPt_V4:
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+
+ // Store halfword
+ case Hexagon::POST_SThri:
+ case Hexagon::POST_SThri_cPt:
+ case Hexagon::POST_SThri_cNotPt:
+ case Hexagon::POST_SThri_cdnPt_V4:
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+
+ // Store word
+ case Hexagon::POST_STwri:
+ case Hexagon::POST_STwri_cPt:
+ case Hexagon::POST_STwri_cNotPt:
+ case Hexagon::POST_STwri_cdnPt_V4:
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+
+ // Store double word
+ case Hexagon::POST_STdri:
+ case Hexagon::POST_STdri_cPt:
+ case Hexagon::POST_STdri_cNotPt:
+ case Hexagon::POST_STdri_cdnPt_V4:
+ case Hexagon::POST_STdri_cdnNotPt_V4:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
+ return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
+}
+
+bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
+ bool isPred = MI->getDesc().isPredicable();
+
+ if (!isPred)
+ return false;
+
+ const int Opc = MI->getOpcode();
+
+ switch(Opc) {
+ case Hexagon::TFRI:
+ return isInt<12>(MI->getOperand(1).getImm());
+
+ case Hexagon::STrid:
+ case Hexagon::STrid_indexed:
+ return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
+
+ case Hexagon::STriw:
+ case Hexagon::STriw_indexed:
+ case Hexagon::STriw_nv_V4:
+ return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
+
+ case Hexagon::STrih:
+ case Hexagon::STrih_indexed:
+ case Hexagon::STrih_nv_V4:
+ return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
+
+ case Hexagon::STrib:
+ case Hexagon::STrib_indexed:
+ case Hexagon::STrib_nv_V4:
+ return isUInt<6>(MI->getOperand(1).getImm());
+
+ case Hexagon::LDrid:
+ case Hexagon::LDrid_indexed:
+ return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
+
+ case Hexagon::LDriw:
+ case Hexagon::LDriw_indexed:
+ return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
+
+ case Hexagon::LDrih:
+ case Hexagon::LDriuh:
+ case Hexagon::LDrih_indexed:
+ case Hexagon::LDriuh_indexed:
+ return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
+
+ case Hexagon::LDrib:
+ case Hexagon::LDriub:
+ case Hexagon::LDrib_indexed:
+ case Hexagon::LDriub_indexed:
+ return isUInt<6>(MI->getOperand(2).getImm());
+
+ case Hexagon::POST_LDrid:
+ return isShiftedInt<4,3>(MI->getOperand(3).getImm());
+
+ case Hexagon::POST_LDriw:
+ return isShiftedInt<4,2>(MI->getOperand(3).getImm());
+
+ case Hexagon::POST_LDrih:
+ case Hexagon::POST_LDriuh:
+ return isShiftedInt<4,1>(MI->getOperand(3).getImm());
+
+ case Hexagon::POST_LDrib:
+ case Hexagon::POST_LDriub:
+ return isInt<4>(MI->getOperand(3).getImm());
+
+ case Hexagon::STrib_imm_V4:
+ case Hexagon::STrih_imm_V4:
+ case Hexagon::STriw_imm_V4:
+ return (isUInt<6>(MI->getOperand(1).getImm()) &&
+ isInt<6>(MI->getOperand(2).getImm()));
+
+ case Hexagon::ADD_ri:
+ return isInt<8>(MI->getOperand(2).getImm());
+
+ case Hexagon::ASLH:
+ case Hexagon::ASRH:
+ case Hexagon::SXTB:
+ case Hexagon::SXTH:
+ case Hexagon::ZXTB:
+ case Hexagon::ZXTH:
+ return Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+
+ case Hexagon::JMPR:
+ return false;
+ }
+
+ return true;
+}
+
+unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
+ switch(Opc) {
+ case Hexagon::TFR_cPt:
+ return Hexagon::TFR_cNotPt;
+ case Hexagon::TFR_cNotPt:
+ return Hexagon::TFR_cPt;
+
+ case Hexagon::TFRI_cPt:
+ return Hexagon::TFRI_cNotPt;
+ case Hexagon::TFRI_cNotPt:
+ return Hexagon::TFRI_cPt;
+
+ case Hexagon::JMP_c:
+ return Hexagon::JMP_cNot;
+ case Hexagon::JMP_cNot:
+ return Hexagon::JMP_c;
+
+ case Hexagon::ADD_ri_cPt:
+ return Hexagon::ADD_ri_cNotPt;
+ case Hexagon::ADD_ri_cNotPt:
+ return Hexagon::ADD_ri_cPt;
+
+ case Hexagon::ADD_rr_cPt:
+ return Hexagon::ADD_rr_cNotPt;
+ case Hexagon::ADD_rr_cNotPt:
+ return Hexagon::ADD_rr_cPt;
+
+ case Hexagon::XOR_rr_cPt:
+ return Hexagon::XOR_rr_cNotPt;
+ case Hexagon::XOR_rr_cNotPt:
+ return Hexagon::XOR_rr_cPt;
+
+ case Hexagon::AND_rr_cPt:
+ return Hexagon::AND_rr_cNotPt;
+ case Hexagon::AND_rr_cNotPt:
+ return Hexagon::AND_rr_cPt;
+
+ case Hexagon::OR_rr_cPt:
+ return Hexagon::OR_rr_cNotPt;
+ case Hexagon::OR_rr_cNotPt:
+ return Hexagon::OR_rr_cPt;
+
+ case Hexagon::SUB_rr_cPt:
+ return Hexagon::SUB_rr_cNotPt;
+ case Hexagon::SUB_rr_cNotPt:
+ return Hexagon::SUB_rr_cPt;
+
+ case Hexagon::COMBINE_rr_cPt:
+ return Hexagon::COMBINE_rr_cNotPt;
+ case Hexagon::COMBINE_rr_cNotPt:
+ return Hexagon::COMBINE_rr_cPt;
+
+ case Hexagon::ASLH_cPt_V4:
+ return Hexagon::ASLH_cNotPt_V4;
+ case Hexagon::ASLH_cNotPt_V4:
+ return Hexagon::ASLH_cPt_V4;
+
+ case Hexagon::ASRH_cPt_V4:
+ return Hexagon::ASRH_cNotPt_V4;
+ case Hexagon::ASRH_cNotPt_V4:
+ return Hexagon::ASRH_cPt_V4;
+
+ case Hexagon::SXTB_cPt_V4:
+ return Hexagon::SXTB_cNotPt_V4;
+ case Hexagon::SXTB_cNotPt_V4:
+ return Hexagon::SXTB_cPt_V4;
+
+ case Hexagon::SXTH_cPt_V4:
+ return Hexagon::SXTH_cNotPt_V4;
+ case Hexagon::SXTH_cNotPt_V4:
+ return Hexagon::SXTH_cPt_V4;
+
+ case Hexagon::ZXTB_cPt_V4:
+ return Hexagon::ZXTB_cNotPt_V4;
+ case Hexagon::ZXTB_cNotPt_V4:
+ return Hexagon::ZXTB_cPt_V4;
+
+ case Hexagon::ZXTH_cPt_V4:
+ return Hexagon::ZXTH_cNotPt_V4;
+ case Hexagon::ZXTH_cNotPt_V4:
+ return Hexagon::ZXTH_cPt_V4;
+
+
+ case Hexagon::JMPR_cPt:
+ return Hexagon::JMPR_cNotPt;
+ case Hexagon::JMPR_cNotPt:
+ return Hexagon::JMPR_cPt;
+
+ // V4 indexed+scaled load.
+ case Hexagon::LDrid_indexed_cPt_V4:
+ return Hexagon::LDrid_indexed_cNotPt_V4;
+ case Hexagon::LDrid_indexed_cNotPt_V4:
+ return Hexagon::LDrid_indexed_cPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cPt_V4:
+ return Hexagon::LDrid_indexed_shl_cNotPt_V4;
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4:
+ return Hexagon::LDrid_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrib_indexed_cPt_V4:
+ return Hexagon::LDrib_indexed_cNotPt_V4;
+ case Hexagon::LDrib_indexed_cNotPt_V4:
+ return Hexagon::LDrib_indexed_cPt_V4;
+
+ case Hexagon::LDriub_indexed_cPt_V4:
+ return Hexagon::LDriub_indexed_cNotPt_V4;
+ case Hexagon::LDriub_indexed_cNotPt_V4:
+ return Hexagon::LDriub_indexed_cPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cPt_V4:
+ return Hexagon::LDrib_indexed_shl_cNotPt_V4;
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4:
+ return Hexagon::LDrib_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cPt_V4:
+ return Hexagon::LDriub_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4:
+ return Hexagon::LDriub_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrih_indexed_cPt_V4:
+ return Hexagon::LDrih_indexed_cNotPt_V4;
+ case Hexagon::LDrih_indexed_cNotPt_V4:
+ return Hexagon::LDrih_indexed_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_cPt_V4:
+ return Hexagon::LDriuh_indexed_cNotPt_V4;
+ case Hexagon::LDriuh_indexed_cNotPt_V4:
+ return Hexagon::LDriuh_indexed_cPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cPt_V4:
+ return Hexagon::LDrih_indexed_shl_cNotPt_V4;
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4:
+ return Hexagon::LDrih_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cPt_V4:
+ return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4:
+ return Hexagon::LDriuh_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriw_indexed_cPt_V4:
+ return Hexagon::LDriw_indexed_cNotPt_V4;
+ case Hexagon::LDriw_indexed_cNotPt_V4:
+ return Hexagon::LDriw_indexed_cPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cPt_V4:
+ return Hexagon::LDriw_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4:
+ return Hexagon::LDriw_indexed_shl_cPt_V4;
+
+ // Byte.
+ case Hexagon::POST_STbri_cPt:
+ return Hexagon::POST_STbri_cNotPt;
+ case Hexagon::POST_STbri_cNotPt:
+ return Hexagon::POST_STbri_cPt;
+
+ case Hexagon::STrib_cPt:
+ return Hexagon::STrib_cNotPt;
+ case Hexagon::STrib_cNotPt:
+ return Hexagon::STrib_cPt;
+
+ case Hexagon::STrib_indexed_cPt:
+ return Hexagon::STrib_indexed_cNotPt;
+ case Hexagon::STrib_indexed_cNotPt:
+ return Hexagon::STrib_indexed_cPt;
+
+ case Hexagon::STrib_imm_cPt_V4:
+ return Hexagon::STrib_imm_cNotPt_V4;
+ case Hexagon::STrib_imm_cNotPt_V4:
+ return Hexagon::STrib_imm_cPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_V4:
+ return Hexagon::STrib_indexed_shl_cNotPt_V4;
+ case Hexagon::STrib_indexed_shl_cNotPt_V4:
+ return Hexagon::STrib_indexed_shl_cPt_V4;
+
+ // Halfword.
+ case Hexagon::POST_SThri_cPt:
+ return Hexagon::POST_SThri_cNotPt;
+ case Hexagon::POST_SThri_cNotPt:
+ return Hexagon::POST_SThri_cPt;
+
+ case Hexagon::STrih_cPt:
+ return Hexagon::STrih_cNotPt;
+ case Hexagon::STrih_cNotPt:
+ return Hexagon::STrih_cPt;
+
+ case Hexagon::STrih_indexed_cPt:
+ return Hexagon::STrih_indexed_cNotPt;
+ case Hexagon::STrih_indexed_cNotPt:
+ return Hexagon::STrih_indexed_cPt;
+
+ case Hexagon::STrih_imm_cPt_V4:
+ return Hexagon::STrih_imm_cNotPt_V4;
+ case Hexagon::STrih_imm_cNotPt_V4:
+ return Hexagon::STrih_imm_cPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_V4:
+ return Hexagon::STrih_indexed_shl_cNotPt_V4;
+ case Hexagon::STrih_indexed_shl_cNotPt_V4:
+ return Hexagon::STrih_indexed_shl_cPt_V4;
+
+ // Word.
+ case Hexagon::POST_STwri_cPt:
+ return Hexagon::POST_STwri_cNotPt;
+ case Hexagon::POST_STwri_cNotPt:
+ return Hexagon::POST_STwri_cPt;
+
+ case Hexagon::STriw_cPt:
+ return Hexagon::STriw_cNotPt;
+ case Hexagon::STriw_cNotPt:
+ return Hexagon::STriw_cPt;
+
+ case Hexagon::STriw_indexed_cPt:
+ return Hexagon::STriw_indexed_cNotPt;
+ case Hexagon::STriw_indexed_cNotPt:
+ return Hexagon::STriw_indexed_cPt;
+
+ case Hexagon::STriw_indexed_shl_cPt_V4:
+ return Hexagon::STriw_indexed_shl_cNotPt_V4;
+ case Hexagon::STriw_indexed_shl_cNotPt_V4:
+ return Hexagon::STriw_indexed_shl_cPt_V4;
+
+ case Hexagon::STriw_imm_cPt_V4:
+ return Hexagon::STriw_imm_cNotPt_V4;
+ case Hexagon::STriw_imm_cNotPt_V4:
+ return Hexagon::STriw_imm_cPt_V4;
+
+ // Double word.
+ case Hexagon::POST_STdri_cPt:
+ return Hexagon::POST_STdri_cNotPt;
+ case Hexagon::POST_STdri_cNotPt:
+ return Hexagon::POST_STdri_cPt;
+
+ case Hexagon::STrid_cPt:
+ return Hexagon::STrid_cNotPt;
+ case Hexagon::STrid_cNotPt:
+ return Hexagon::STrid_cPt;
+
+ case Hexagon::STrid_indexed_cPt:
+ return Hexagon::STrid_indexed_cNotPt;
+ case Hexagon::STrid_indexed_cNotPt:
+ return Hexagon::STrid_indexed_cPt;
+
+ case Hexagon::STrid_indexed_shl_cPt_V4:
+ return Hexagon::STrid_indexed_shl_cNotPt_V4;
+ case Hexagon::STrid_indexed_shl_cNotPt_V4:
+ return Hexagon::STrid_indexed_shl_cPt_V4;
+
+ // Load.
+ case Hexagon::LDrid_cPt:
+ return Hexagon::LDrid_cNotPt;
+ case Hexagon::LDrid_cNotPt:
+ return Hexagon::LDrid_cPt;
+
+ case Hexagon::LDriw_cPt:
+ return Hexagon::LDriw_cNotPt;
+ case Hexagon::LDriw_cNotPt:
+ return Hexagon::LDriw_cPt;
+
+ case Hexagon::LDrih_cPt:
+ return Hexagon::LDrih_cNotPt;
+ case Hexagon::LDrih_cNotPt:
+ return Hexagon::LDrih_cPt;
+
+ case Hexagon::LDriuh_cPt:
+ return Hexagon::LDriuh_cNotPt;
+ case Hexagon::LDriuh_cNotPt:
+ return Hexagon::LDriuh_cPt;
+
+ case Hexagon::LDrib_cPt:
+ return Hexagon::LDrib_cNotPt;
+ case Hexagon::LDrib_cNotPt:
+ return Hexagon::LDrib_cPt;
+
+ case Hexagon::LDriub_cPt:
+ return Hexagon::LDriub_cNotPt;
+ case Hexagon::LDriub_cNotPt:
+ return Hexagon::LDriub_cPt;
+
+ // Load Indexed.
+ case Hexagon::LDrid_indexed_cPt:
+ return Hexagon::LDrid_indexed_cNotPt;
+ case Hexagon::LDrid_indexed_cNotPt:
+ return Hexagon::LDrid_indexed_cPt;
+
+ case Hexagon::LDriw_indexed_cPt:
+ return Hexagon::LDriw_indexed_cNotPt;
+ case Hexagon::LDriw_indexed_cNotPt:
+ return Hexagon::LDriw_indexed_cPt;
+
+ case Hexagon::LDrih_indexed_cPt:
+ return Hexagon::LDrih_indexed_cNotPt;
+ case Hexagon::LDrih_indexed_cNotPt:
+ return Hexagon::LDrih_indexed_cPt;
+
+ case Hexagon::LDriuh_indexed_cPt:
+ return Hexagon::LDriuh_indexed_cNotPt;
+ case Hexagon::LDriuh_indexed_cNotPt:
+ return Hexagon::LDriuh_indexed_cPt;
+
+ case Hexagon::LDrib_indexed_cPt:
+ return Hexagon::LDrib_indexed_cNotPt;
+ case Hexagon::LDrib_indexed_cNotPt:
+ return Hexagon::LDrib_indexed_cPt;
+
+ case Hexagon::LDriub_indexed_cPt:
+ return Hexagon::LDriub_indexed_cNotPt;
+ case Hexagon::LDriub_indexed_cNotPt:
+ return Hexagon::LDriub_indexed_cPt;
+
+ // Post Inc Load.
+ case Hexagon::POST_LDrid_cPt:
+ return Hexagon::POST_LDrid_cNotPt;
+ case Hexagon::POST_LDriw_cNotPt:
+ return Hexagon::POST_LDriw_cPt;
+
+ case Hexagon::POST_LDrih_cPt:
+ return Hexagon::POST_LDrih_cNotPt;
+ case Hexagon::POST_LDrih_cNotPt:
+ return Hexagon::POST_LDrih_cPt;
+
+ case Hexagon::POST_LDriuh_cPt:
+ return Hexagon::POST_LDriuh_cNotPt;
+ case Hexagon::POST_LDriuh_cNotPt:
+ return Hexagon::POST_LDriuh_cPt;
+
+ case Hexagon::POST_LDrib_cPt:
+ return Hexagon::POST_LDrib_cNotPt;
+ case Hexagon::POST_LDrib_cNotPt:
+ return Hexagon::POST_LDrib_cPt;
+
+ case Hexagon::POST_LDriub_cPt:
+ return Hexagon::POST_LDriub_cNotPt;
+ case Hexagon::POST_LDriub_cNotPt:
+ return Hexagon::POST_LDriub_cPt;
+
+ // Dealloc_return.
+ case Hexagon::DEALLOC_RET_cPt_V4:
+ return Hexagon::DEALLOC_RET_cNotPt_V4;
+ case Hexagon::DEALLOC_RET_cNotPt_V4:
+ return Hexagon::DEALLOC_RET_cPt_V4;
+
+ // New Value Jump.
+ // JMPEQ_ri - with -1.
+ case Hexagon::JMP_EQriPtneg_nv_V4:
+ return Hexagon::JMP_EQriNotPtneg_nv_V4;
+ case Hexagon::JMP_EQriNotPtneg_nv_V4:
+ return Hexagon::JMP_EQriPtneg_nv_V4;
+
+ case Hexagon::JMP_EQriPntneg_nv_V4:
+ return Hexagon::JMP_EQriNotPntneg_nv_V4;
+ case Hexagon::JMP_EQriNotPntneg_nv_V4:
+ return Hexagon::JMP_EQriPntneg_nv_V4;
+
+ // JMPEQ_ri.
+ case Hexagon::JMP_EQriPt_nv_V4:
+ return Hexagon::JMP_EQriNotPt_nv_V4;
+ case Hexagon::JMP_EQriNotPt_nv_V4:
+ return Hexagon::JMP_EQriPt_nv_V4;
+
+ case Hexagon::JMP_EQriPnt_nv_V4:
+ return Hexagon::JMP_EQriNotPnt_nv_V4;
+ case Hexagon::JMP_EQriNotPnt_nv_V4:
+ return Hexagon::JMP_EQriPnt_nv_V4;
+
+ // JMPEQ_rr.
+ case Hexagon::JMP_EQrrPt_nv_V4:
+ return Hexagon::JMP_EQrrNotPt_nv_V4;
+ case Hexagon::JMP_EQrrNotPt_nv_V4:
+ return Hexagon::JMP_EQrrPt_nv_V4;
+
+ case Hexagon::JMP_EQrrPnt_nv_V4:
+ return Hexagon::JMP_EQrrNotPnt_nv_V4;
+ case Hexagon::JMP_EQrrNotPnt_nv_V4:
+ return Hexagon::JMP_EQrrPnt_nv_V4;
+
+ // JMPGT_ri - with -1.
+ case Hexagon::JMP_GTriPtneg_nv_V4:
+ return Hexagon::JMP_GTriNotPtneg_nv_V4;
+ case Hexagon::JMP_GTriNotPtneg_nv_V4:
+ return Hexagon::JMP_GTriPtneg_nv_V4;
+
+ case Hexagon::JMP_GTriPntneg_nv_V4:
+ return Hexagon::JMP_GTriNotPntneg_nv_V4;
+ case Hexagon::JMP_GTriNotPntneg_nv_V4:
+ return Hexagon::JMP_GTriPntneg_nv_V4;
+
+ // JMPGT_ri.
+ case Hexagon::JMP_GTriPt_nv_V4:
+ return Hexagon::JMP_GTriNotPt_nv_V4;
+ case Hexagon::JMP_GTriNotPt_nv_V4:
+ return Hexagon::JMP_GTriPt_nv_V4;
+
+ case Hexagon::JMP_GTriPnt_nv_V4:
+ return Hexagon::JMP_GTriNotPnt_nv_V4;
+ case Hexagon::JMP_GTriNotPnt_nv_V4:
+ return Hexagon::JMP_GTriPnt_nv_V4;
+
+ // JMPGT_rr.
+ case Hexagon::JMP_GTrrPt_nv_V4:
+ return Hexagon::JMP_GTrrNotPt_nv_V4;
+ case Hexagon::JMP_GTrrNotPt_nv_V4:
+ return Hexagon::JMP_GTrrPt_nv_V4;
+
+ case Hexagon::JMP_GTrrPnt_nv_V4:
+ return Hexagon::JMP_GTrrNotPnt_nv_V4;
+ case Hexagon::JMP_GTrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrPnt_nv_V4;
+
+ // JMPGT_rrdn.
+ case Hexagon::JMP_GTrrdnPt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTrrdnPt_nv_V4;
+
+ case Hexagon::JMP_GTrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
+ case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTrrdnPnt_nv_V4;
+
+ // JMPGTU_ri.
+ case Hexagon::JMP_GTUriPt_nv_V4:
+ return Hexagon::JMP_GTUriNotPt_nv_V4;
+ case Hexagon::JMP_GTUriNotPt_nv_V4:
+ return Hexagon::JMP_GTUriPt_nv_V4;
+
+ case Hexagon::JMP_GTUriPnt_nv_V4:
+ return Hexagon::JMP_GTUriNotPnt_nv_V4;
+ case Hexagon::JMP_GTUriNotPnt_nv_V4:
+ return Hexagon::JMP_GTUriPnt_nv_V4;
+
+ // JMPGTU_rr.
+ case Hexagon::JMP_GTUrrPt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrPt_nv_V4;
+
+ case Hexagon::JMP_GTUrrPnt_nv_V4:
+ return Hexagon::JMP_GTUrrNotPnt_nv_V4;
+ case Hexagon::JMP_GTUrrNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrPnt_nv_V4;
+
+ // JMPGTU_rrdn.
+ case Hexagon::JMP_GTUrrdnPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPt_nv_V4;
+
+ case Hexagon::JMP_GTUrrdnPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
+ case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
+ return Hexagon::JMP_GTUrrdnPnt_nv_V4;
+
+ default:
+ llvm_unreachable("Unexpected predicated instruction");
+ }
+}
+
+
+int HexagonInstrInfo::
+getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
+ switch(Opc) {
+ case Hexagon::TFR:
+ return !invertPredicate ? Hexagon::TFR_cPt :
+ Hexagon::TFR_cNotPt;
+ case Hexagon::TFRI:
+ return !invertPredicate ? Hexagon::TFRI_cPt :
+ Hexagon::TFRI_cNotPt;
+ case Hexagon::JMP:
+ return !invertPredicate ? Hexagon::JMP_c :
+ Hexagon::JMP_cNot;
+ case Hexagon::ADD_ri:
+ return !invertPredicate ? Hexagon::ADD_ri_cPt :
+ Hexagon::ADD_ri_cNotPt;
+ case Hexagon::ADD_rr:
+ return !invertPredicate ? Hexagon::ADD_rr_cPt :
+ Hexagon::ADD_rr_cNotPt;
+ case Hexagon::XOR_rr:
+ return !invertPredicate ? Hexagon::XOR_rr_cPt :
+ Hexagon::XOR_rr_cNotPt;
+ case Hexagon::AND_rr:
+ return !invertPredicate ? Hexagon::AND_rr_cPt :
+ Hexagon::AND_rr_cNotPt;
+ case Hexagon::OR_rr:
+ return !invertPredicate ? Hexagon::OR_rr_cPt :
+ Hexagon::OR_rr_cNotPt;
+ case Hexagon::SUB_rr:
+ return !invertPredicate ? Hexagon::SUB_rr_cPt :
+ Hexagon::SUB_rr_cNotPt;
+ case Hexagon::COMBINE_rr:
+ return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
+ Hexagon::COMBINE_rr_cNotPt;
+ case Hexagon::ASLH:
+ return !invertPredicate ? Hexagon::ASLH_cPt_V4 :
+ Hexagon::ASLH_cNotPt_V4;
+ case Hexagon::ASRH:
+ return !invertPredicate ? Hexagon::ASRH_cPt_V4 :
+ Hexagon::ASRH_cNotPt_V4;
+ case Hexagon::SXTB:
+ return !invertPredicate ? Hexagon::SXTB_cPt_V4 :
+ Hexagon::SXTB_cNotPt_V4;
+ case Hexagon::SXTH:
+ return !invertPredicate ? Hexagon::SXTH_cPt_V4 :
+ Hexagon::SXTH_cNotPt_V4;
+ case Hexagon::ZXTB:
+ return !invertPredicate ? Hexagon::ZXTB_cPt_V4 :
+ Hexagon::ZXTB_cNotPt_V4;
+ case Hexagon::ZXTH:
+ return !invertPredicate ? Hexagon::ZXTH_cPt_V4 :
+ Hexagon::ZXTH_cNotPt_V4;
+
+ case Hexagon::JMPR:
+ return !invertPredicate ? Hexagon::JMPR_cPt :
+ Hexagon::JMPR_cNotPt;
+
+ // V4 indexed+scaled load.
+ case Hexagon::LDrid_indexed_V4:
+ return !invertPredicate ? Hexagon::LDrid_indexed_cPt_V4 :
+ Hexagon::LDrid_indexed_cNotPt_V4;
+ case Hexagon::LDrid_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDrid_indexed_shl_cPt_V4 :
+ Hexagon::LDrid_indexed_shl_cNotPt_V4;
+ case Hexagon::LDrib_indexed_V4:
+ return !invertPredicate ? Hexagon::LDrib_indexed_cPt_V4 :
+ Hexagon::LDrib_indexed_cNotPt_V4;
+ case Hexagon::LDriub_indexed_V4:
+ return !invertPredicate ? Hexagon::LDriub_indexed_cPt_V4 :
+ Hexagon::LDriub_indexed_cNotPt_V4;
+ case Hexagon::LDriub_ae_indexed_V4:
+ return !invertPredicate ? Hexagon::LDriub_indexed_cPt_V4 :
+ Hexagon::LDriub_indexed_cNotPt_V4;
+ case Hexagon::LDrib_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDrib_indexed_shl_cPt_V4 :
+ Hexagon::LDrib_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriub_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDriub_indexed_shl_cPt_V4 :
+ Hexagon::LDriub_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriub_ae_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDriub_indexed_shl_cPt_V4 :
+ Hexagon::LDriub_indexed_shl_cNotPt_V4;
+ case Hexagon::LDrih_indexed_V4:
+ return !invertPredicate ? Hexagon::LDrih_indexed_cPt_V4 :
+ Hexagon::LDrih_indexed_cNotPt_V4;
+ case Hexagon::LDriuh_indexed_V4:
+ return !invertPredicate ? Hexagon::LDriuh_indexed_cPt_V4 :
+ Hexagon::LDriuh_indexed_cNotPt_V4;
+ case Hexagon::LDriuh_ae_indexed_V4:
+ return !invertPredicate ? Hexagon::LDriuh_indexed_cPt_V4 :
+ Hexagon::LDriuh_indexed_cNotPt_V4;
+ case Hexagon::LDrih_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDrih_indexed_shl_cPt_V4 :
+ Hexagon::LDrih_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriuh_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ Hexagon::LDriuh_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriuh_ae_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ Hexagon::LDriuh_indexed_shl_cNotPt_V4;
+ case Hexagon::LDriw_indexed_V4:
+ return !invertPredicate ? Hexagon::LDriw_indexed_cPt_V4 :
+ Hexagon::LDriw_indexed_cNotPt_V4;
+ case Hexagon::LDriw_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 :
+ Hexagon::LDriw_indexed_shl_cNotPt_V4;
+ // Byte.
+ case Hexagon::POST_STbri:
+ return !invertPredicate ? Hexagon::POST_STbri_cPt :
+ Hexagon::POST_STbri_cNotPt;
+ case Hexagon::STrib:
+ return !invertPredicate ? Hexagon::STrib_cPt :
+ Hexagon::STrib_cNotPt;
+ case Hexagon::STrib_indexed:
+ return !invertPredicate ? Hexagon::STrib_indexed_cPt :
+ Hexagon::STrib_indexed_cNotPt;
+ case Hexagon::STrib_imm_V4:
+ return !invertPredicate ? Hexagon::STrib_imm_cPt_V4 :
+ Hexagon::STrib_imm_cNotPt_V4;
+ case Hexagon::STrib_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::STrib_indexed_shl_cPt_V4 :
+ Hexagon::STrib_indexed_shl_cNotPt_V4;
+ // Halfword.
+ case Hexagon::POST_SThri:
+ return !invertPredicate ? Hexagon::POST_SThri_cPt :
+ Hexagon::POST_SThri_cNotPt;
+ case Hexagon::STrih:
+ return !invertPredicate ? Hexagon::STrih_cPt :
+ Hexagon::STrih_cNotPt;
+ case Hexagon::STrih_indexed:
+ return !invertPredicate ? Hexagon::STrih_indexed_cPt :
+ Hexagon::STrih_indexed_cNotPt;
+ case Hexagon::STrih_imm_V4:
+ return !invertPredicate ? Hexagon::STrih_imm_cPt_V4 :
+ Hexagon::STrih_imm_cNotPt_V4;
+ case Hexagon::STrih_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::STrih_indexed_shl_cPt_V4 :
+ Hexagon::STrih_indexed_shl_cNotPt_V4;
+ // Word.
+ case Hexagon::POST_STwri:
+ return !invertPredicate ? Hexagon::POST_STwri_cPt :
+ Hexagon::POST_STwri_cNotPt;
+ case Hexagon::STriw:
+ return !invertPredicate ? Hexagon::STriw_cPt :
+ Hexagon::STriw_cNotPt;
+ case Hexagon::STriw_indexed:
+ return !invertPredicate ? Hexagon::STriw_indexed_cPt :
+ Hexagon::STriw_indexed_cNotPt;
+ case Hexagon::STriw_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::STriw_indexed_shl_cPt_V4 :
+ Hexagon::STriw_indexed_shl_cNotPt_V4;
+ case Hexagon::STriw_imm_V4:
+ return !invertPredicate ? Hexagon::STriw_imm_cPt_V4 :
+ Hexagon::STriw_imm_cNotPt_V4;
+ // Double word.
+ case Hexagon::POST_STdri:
+ return !invertPredicate ? Hexagon::POST_STdri_cPt :
+ Hexagon::POST_STdri_cNotPt;
+ case Hexagon::STrid:
+ return !invertPredicate ? Hexagon::STrid_cPt :
+ Hexagon::STrid_cNotPt;
+ case Hexagon::STrid_indexed:
+ return !invertPredicate ? Hexagon::STrid_indexed_cPt :
+ Hexagon::STrid_indexed_cNotPt;
+ case Hexagon::STrid_indexed_shl_V4:
+ return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 :
+ Hexagon::STrid_indexed_shl_cNotPt_V4;
+ // Load.
+ case Hexagon::LDrid:
+ return !invertPredicate ? Hexagon::LDrid_cPt :
+ Hexagon::LDrid_cNotPt;
+ case Hexagon::LDriw:
+ return !invertPredicate ? Hexagon::LDriw_cPt :
+ Hexagon::LDriw_cNotPt;
+ case Hexagon::LDrih:
+ return !invertPredicate ? Hexagon::LDrih_cPt :
+ Hexagon::LDrih_cNotPt;
+ case Hexagon::LDriuh:
+ return !invertPredicate ? Hexagon::LDriuh_cPt :
+ Hexagon::LDriuh_cNotPt;
+ case Hexagon::LDrib:
+ return !invertPredicate ? Hexagon::LDrib_cPt :
+ Hexagon::LDrib_cNotPt;
+ case Hexagon::LDriub:
+ return !invertPredicate ? Hexagon::LDriub_cPt :
+ Hexagon::LDriub_cNotPt;
+ case Hexagon::LDriubit:
+ return !invertPredicate ? Hexagon::LDriub_cPt :
+ Hexagon::LDriub_cNotPt;
+ // Load Indexed.
+ case Hexagon::LDrid_indexed:
+ return !invertPredicate ? Hexagon::LDrid_indexed_cPt :
+ Hexagon::LDrid_indexed_cNotPt;
+ case Hexagon::LDriw_indexed:
+ return !invertPredicate ? Hexagon::LDriw_indexed_cPt :
+ Hexagon::LDriw_indexed_cNotPt;
+ case Hexagon::LDrih_indexed:
+ return !invertPredicate ? Hexagon::LDrih_indexed_cPt :
+ Hexagon::LDrih_indexed_cNotPt;
+ case Hexagon::LDriuh_indexed:
+ return !invertPredicate ? Hexagon::LDriuh_indexed_cPt :
+ Hexagon::LDriuh_indexed_cNotPt;
+ case Hexagon::LDrib_indexed:
+ return !invertPredicate ? Hexagon::LDrib_indexed_cPt :
+ Hexagon::LDrib_indexed_cNotPt;
+ case Hexagon::LDriub_indexed:
+ return !invertPredicate ? Hexagon::LDriub_indexed_cPt :
+ Hexagon::LDriub_indexed_cNotPt;
+ // Post Increment Load.
+ case Hexagon::POST_LDrid:
+ return !invertPredicate ? Hexagon::POST_LDrid_cPt :
+ Hexagon::POST_LDrid_cNotPt;
+ case Hexagon::POST_LDriw:
+ return !invertPredicate ? Hexagon::POST_LDriw_cPt :
+ Hexagon::POST_LDriw_cNotPt;
+ case Hexagon::POST_LDrih:
+ return !invertPredicate ? Hexagon::POST_LDrih_cPt :
+ Hexagon::POST_LDrih_cNotPt;
+ case Hexagon::POST_LDriuh:
+ return !invertPredicate ? Hexagon::POST_LDriuh_cPt :
+ Hexagon::POST_LDriuh_cNotPt;
+ case Hexagon::POST_LDrib:
+ return !invertPredicate ? Hexagon::POST_LDrib_cPt :
+ Hexagon::POST_LDrib_cNotPt;
+ case Hexagon::POST_LDriub:
+ return !invertPredicate ? Hexagon::POST_LDriub_cPt :
+ Hexagon::POST_LDriub_cNotPt;
+ // DEALLOC_RETURN.
+ case Hexagon::DEALLOC_RET_V4:
+ return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
+ Hexagon::DEALLOC_RET_cNotPt_V4;
+ }
+ llvm_unreachable("Unexpected predicable instruction");
+}
+
+
+bool HexagonInstrInfo::
+PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Cond) const {
+ int Opc = MI->getOpcode();
+ assert (isPredicable(MI) && "Expected predicable instruction");
+ bool invertJump = (!Cond.empty() && Cond[0].isImm() &&
+ (Cond[0].getImm() == 0));
+ MI->setDesc(get(getMatchingCondBranchOpcode(Opc, invertJump)));
+ //
+ // This assumes that the predicate is always the first operand
+ // in the set of inputs.
+ //
+ MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
+ int oper;
+ for (oper = MI->getNumOperands() - 3; oper >= 0; --oper) {
+ MachineOperand MO = MI->getOperand(oper);
+ if ((MO.isReg() && !MO.isUse() && !MO.isImplicit())) {
+ break;
+ }
+
+ if (MO.isReg()) {
+ MI->getOperand(oper+1).ChangeToRegister(MO.getReg(), MO.isDef(),
+ MO.isImplicit(), MO.isKill(),
+ MO.isDead(), MO.isUndef(),
+ MO.isDebug());
+ } else if (MO.isImm()) {
+ MI->getOperand(oper+1).ChangeToImmediate(MO.getImm());
+ } else {
+ llvm_unreachable("Unexpected operand type");
+ }
+ }
+
+ int regPos = invertJump ? 1 : 0;
+ MachineOperand PredMO = Cond[regPos];
+ MI->getOperand(oper+1).ChangeToRegister(PredMO.getReg(), PredMO.isDef(),
+ PredMO.isImplicit(), PredMO.isKill(),
+ PredMO.isDead(), PredMO.isUndef(),
+ PredMO.isDebug());
+
+ return true;
+}
+
+
+bool
+HexagonInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCyles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const {
+ return true;
+}
+
+
+bool
+HexagonInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles,
+ unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles,
+ unsigned ExtraFCycles,
+ const BranchProbability &Probability) const {
+ return true;
+}
+
+
+bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
+}
+
+
+bool
+HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const {
+ for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
+ MachineOperand MO = MI->getOperand(oper);
+ if (MO.isReg() && MO.isDef()) {
+ const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg());
+ if (RC == Hexagon::PredRegsRegisterClass) {
+ Pred.push_back(MO);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+bool
+HexagonInstrInfo::
+SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2) const {
+ // TODO: Fix this
+ return false;
+}
+
+
+//
+// We indicate that we want to reverse the branch by
+// inserting a 0 at the beginning of the Cond vector.
+//
+bool HexagonInstrInfo::
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
+ Cond.erase(Cond.begin());
+ } else {
+ Cond.insert(Cond.begin(), MachineOperand::CreateImm(0));
+ }
+ return false;
+}
+
+
+bool HexagonInstrInfo::
+isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
+ const BranchProbability &Probability) const {
+ return (NumInstrs <= 4);
+}
+
+bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ case Hexagon::DEALLOC_RET_V4 :
+ case Hexagon::DEALLOC_RET_cPt_V4 :
+ case Hexagon::DEALLOC_RET_cNotPt_V4 :
+ case Hexagon::DEALLOC_RET_cdnPnt_V4 :
+ case Hexagon::DEALLOC_RET_cNotdnPnt_V4 :
+ case Hexagon::DEALLOC_RET_cdnPt_V4 :
+ case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
+ return true;
+ }
+ return false;
+}
+
+
+bool HexagonInstrInfo::
+isValidOffset(const int Opcode, const int Offset) const {
+ // This function is to check whether the "Offset" is in the correct range of
+ // the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is
+ // inserted to calculate the final address. Due to this reason, the function
+ // assumes that the "Offset" has correct alignment.
+
+ switch(Opcode) {
+
+ case Hexagon::LDriw:
+ case Hexagon::STriw:
+ assert((Offset % 4 == 0) && "Offset has incorrect alignment");
+ return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
+ (Offset <= Hexagon_MEMW_OFFSET_MAX);
+
+ case Hexagon::LDrid:
+ case Hexagon::STrid:
+ assert((Offset % 8 == 0) && "Offset has incorrect alignment");
+ return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
+ (Offset <= Hexagon_MEMD_OFFSET_MAX);
+
+ case Hexagon::LDrih:
+ case Hexagon::LDriuh:
+ case Hexagon::STrih:
+ case Hexagon::LDrih_ae:
+ assert((Offset % 2 == 0) && "Offset has incorrect alignment");
+ return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
+ (Offset <= Hexagon_MEMH_OFFSET_MAX);
+
+ case Hexagon::LDrib:
+ case Hexagon::STrib:
+ case Hexagon::LDriub:
+ case Hexagon::LDriubit:
+ case Hexagon::LDrib_ae:
+ case Hexagon::LDriub_ae:
+ return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
+ (Offset <= Hexagon_MEMB_OFFSET_MAX);
+
+ case Hexagon::ADD_ri:
+ case Hexagon::TFR_FI:
+ return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
+ (Offset <= Hexagon_ADDI_OFFSET_MAX);
+
+ case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDi_MEM_V4 :
+ case Hexagon::MEMw_SUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDr_MEM_V4 :
+ case Hexagon::MEMw_SUBr_MEM_V4 :
+ case Hexagon::MEMw_ANDr_MEM_V4 :
+ case Hexagon::MEMw_ORr_MEM_V4 :
+ assert ((Offset % 4) == 0 && "MEMOPw offset is not aligned correctly." );
+ return (0 <= Offset && Offset <= 255);
+
+ case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDi_MEM_V4 :
+ case Hexagon::MEMh_SUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDr_MEM_V4 :
+ case Hexagon::MEMh_SUBr_MEM_V4 :
+ case Hexagon::MEMh_ANDr_MEM_V4 :
+ case Hexagon::MEMh_ORr_MEM_V4 :
+ assert ((Offset % 2) == 0 && "MEMOPh offset is not aligned correctly." );
+ return (0 <= Offset && Offset <= 127);
+
+ case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDi_MEM_V4 :
+ case Hexagon::MEMb_SUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDr_MEM_V4 :
+ case Hexagon::MEMb_SUBr_MEM_V4 :
+ case Hexagon::MEMb_ANDr_MEM_V4 :
+ case Hexagon::MEMb_ORr_MEM_V4 :
+ return (0 <= Offset && Offset <= 63);
+
+ // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
+ // any size. Later pass knows how to handle it.
+ case Hexagon::STriw_pred:
+ case Hexagon::LDriw_pred:
+ return true;
+
+ // INLINEASM is very special.
+ case Hexagon::INLINEASM:
+ return true;
+ }
+
+ llvm_unreachable("No offset range is defined for this opcode. "
+ "Please define it in the above switch statement!");
+}
+
+
+//
+// Check if the Offset is a valid auto-inc imm by Load/Store Type.
+//
+bool HexagonInstrInfo::
+isValidAutoIncImm(const EVT VT, const int Offset) const {
+
+ if (VT == MVT::i64) {
+ return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
+ Offset <= Hexagon_MEMD_AUTOINC_MAX &&
+ (Offset & 0x7) == 0);
+ }
+ if (VT == MVT::i32) {
+ return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
+ Offset <= Hexagon_MEMW_AUTOINC_MAX &&
+ (Offset & 0x3) == 0);
+ }
+ if (VT == MVT::i16) {
+ return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
+ Offset <= Hexagon_MEMH_AUTOINC_MAX &&
+ (Offset & 0x1) == 0);
+ }
+ if (VT == MVT::i8) {
+ return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
+ Offset <= Hexagon_MEMB_AUTOINC_MAX);
+ }
+ llvm_unreachable("Not an auto-inc opc!");
+}
+
+
+bool HexagonInstrInfo::
+isMemOp(const MachineInstr *MI) const {
+ switch (MI->getOpcode())
+ {
+ case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMw_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDi_MEM_V4 :
+ case Hexagon::MEMw_SUBi_MEM_V4 :
+ case Hexagon::MEMw_ADDr_MEM_V4 :
+ case Hexagon::MEMw_SUBr_MEM_V4 :
+ case Hexagon::MEMw_ANDr_MEM_V4 :
+ case Hexagon::MEMw_ORr_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMh_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDi_MEM_V4 :
+ case Hexagon::MEMh_SUBi_MEM_V4 :
+ case Hexagon::MEMh_ADDr_MEM_V4 :
+ case Hexagon::MEMh_SUBr_MEM_V4 :
+ case Hexagon::MEMh_ANDr_MEM_V4 :
+ case Hexagon::MEMh_ORr_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ORr_indexed_MEM_V4 :
+ case Hexagon::MEMb_ADDSUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDi_MEM_V4 :
+ case Hexagon::MEMb_SUBi_MEM_V4 :
+ case Hexagon::MEMb_ADDr_MEM_V4 :
+ case Hexagon::MEMb_SUBr_MEM_V4 :
+ case Hexagon::MEMb_ANDr_MEM_V4 :
+ case Hexagon::MEMb_ORr_MEM_V4 :
+ return true;
+ }
+ return false;
+}
+
+
+bool HexagonInstrInfo::
+isSpillPredRegOp(const MachineInstr *MI) const {
+ switch (MI->getOpcode())
+ {
+ case Hexagon::STriw_pred :
+ case Hexagon::LDriw_pred :
+ return true;
+ }
+ return false;
+}
+
+bool HexagonInstrInfo::
+isConditionalTransfer (const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ case Hexagon::TFR_cPt:
+ case Hexagon::TFR_cNotPt:
+ case Hexagon::TFRI_cPt:
+ case Hexagon::TFRI_cNotPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::TFRI_cdnNotPt:
+ return true;
+
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ case Hexagon::ADD_ri_cPt:
+ case Hexagon::ADD_ri_cNotPt:
+ case Hexagon::ADD_rr_cPt:
+ case Hexagon::ADD_rr_cNotPt:
+ case Hexagon::XOR_rr_cPt:
+ case Hexagon::XOR_rr_cNotPt:
+ case Hexagon::AND_rr_cPt:
+ case Hexagon::AND_rr_cNotPt:
+ case Hexagon::OR_rr_cPt:
+ case Hexagon::OR_rr_cNotPt:
+ case Hexagon::SUB_rr_cPt:
+ case Hexagon::SUB_rr_cNotPt:
+ case Hexagon::COMBINE_rr_cPt:
+ case Hexagon::COMBINE_rr_cNotPt:
+ return true;
+ case Hexagon::ASLH_cPt_V4:
+ case Hexagon::ASLH_cNotPt_V4:
+ case Hexagon::ASRH_cPt_V4:
+ case Hexagon::ASRH_cNotPt_V4:
+ case Hexagon::SXTB_cPt_V4:
+ case Hexagon::SXTB_cNotPt_V4:
+ case Hexagon::SXTH_cPt_V4:
+ case Hexagon::SXTH_cNotPt_V4:
+ case Hexagon::ZXTB_cPt_V4:
+ case Hexagon::ZXTB_cNotPt_V4:
+ case Hexagon::ZXTH_cPt_V4:
+ case Hexagon::ZXTH_cNotPt_V4:
+ return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+
+ default:
+ return false;
+ }
+}
+
+bool HexagonInstrInfo::
+isConditionalLoad (const MachineInstr* MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ case Hexagon::LDrid_cPt :
+ case Hexagon::LDrid_cNotPt :
+ case Hexagon::LDrid_indexed_cPt :
+ case Hexagon::LDrid_indexed_cNotPt :
+ case Hexagon::LDriw_cPt :
+ case Hexagon::LDriw_cNotPt :
+ case Hexagon::LDriw_indexed_cPt :
+ case Hexagon::LDriw_indexed_cNotPt :
+ case Hexagon::LDrih_cPt :
+ case Hexagon::LDrih_cNotPt :
+ case Hexagon::LDrih_indexed_cPt :
+ case Hexagon::LDrih_indexed_cNotPt :
+ case Hexagon::LDrib_cPt :
+ case Hexagon::LDrib_cNotPt :
+ case Hexagon::LDrib_indexed_cPt :
+ case Hexagon::LDrib_indexed_cNotPt :
+ case Hexagon::LDriuh_cPt :
+ case Hexagon::LDriuh_cNotPt :
+ case Hexagon::LDriuh_indexed_cPt :
+ case Hexagon::LDriuh_indexed_cNotPt :
+ case Hexagon::LDriub_cPt :
+ case Hexagon::LDriub_cNotPt :
+ case Hexagon::LDriub_indexed_cPt :
+ case Hexagon::LDriub_indexed_cNotPt :
+ return true;
+ case Hexagon::POST_LDrid_cPt :
+ case Hexagon::POST_LDrid_cNotPt :
+ case Hexagon::POST_LDriw_cPt :
+ case Hexagon::POST_LDriw_cNotPt :
+ case Hexagon::POST_LDrih_cPt :
+ case Hexagon::POST_LDrih_cNotPt :
+ case Hexagon::POST_LDrib_cPt :
+ case Hexagon::POST_LDrib_cNotPt :
+ case Hexagon::POST_LDriuh_cPt :
+ case Hexagon::POST_LDriuh_cNotPt :
+ case Hexagon::POST_LDriub_cPt :
+ case Hexagon::POST_LDriub_cNotPt :
+ return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+ case Hexagon::LDrid_indexed_cPt_V4 :
+ case Hexagon::LDrid_indexed_cNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_cPt_V4 :
+ case Hexagon::LDrib_indexed_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_cPt_V4 :
+ case Hexagon::LDriub_indexed_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_cPt_V4 :
+ case Hexagon::LDrih_indexed_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cPt_V4 :
+ case Hexagon::LDriuh_indexed_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_cPt_V4 :
+ case Hexagon::LDriw_indexed_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
+ return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4;
+ default:
+ return false;
+ }
+}
+
+// Returns true if an instruction is a conditional store.
+//
+// Note: It doesn't include conditional new-value stores as they can't be
+// converted to .new predicate.
+//
+// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
+// ^ ^
+// / \ (not OK. it will cause new-value store to be
+// / X conditional on p0.new while R2 producer is
+// / \ on p0)
+// / \.
+// p.new store p.old NV store
+// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
+// ^ ^
+// \ /
+// \ /
+// \ /
+// p.old store
+// [if (p0)memw(R0+#0)=R2]
+//
+// The above diagram shows the steps involoved in the conversion of a predicated
+// store instruction to its .new predicated new-value form.
+//
+// The following set of instructions further explains the scenario where
+// conditional new-value store becomes invalid when promoted to .new predicate
+// form.
+//
+// { 1) if (p0) r0 = add(r1, r2)
+// 2) p0 = cmp.eq(r3, #0) }
+//
+// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
+// the first two instructions because in instr 1, r0 is conditional on old value
+// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
+// is not valid for new-value stores.
+bool HexagonInstrInfo::
+isConditionalStore (const MachineInstr* MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ case Hexagon::STrib_imm_cPt_V4 :
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrib_cPt :
+ case Hexagon::STrib_cNotPt :
+ case Hexagon::POST_STbri_cPt :
+ case Hexagon::POST_STbri_cNotPt :
+ case Hexagon::STrid_indexed_cPt :
+ case Hexagon::STrid_indexed_cNotPt :
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ case Hexagon::POST_STdri_cPt :
+ case Hexagon::POST_STdri_cNotPt :
+ case Hexagon::STrih_cPt :
+ case Hexagon::STrih_cNotPt :
+ case Hexagon::STrih_indexed_cPt :
+ case Hexagon::STrih_indexed_cNotPt :
+ case Hexagon::STrih_imm_cPt_V4 :
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::POST_SThri_cPt :
+ case Hexagon::POST_SThri_cNotPt :
+ case Hexagon::STriw_cPt :
+ case Hexagon::STriw_cNotPt :
+ case Hexagon::STriw_indexed_cPt :
+ case Hexagon::STriw_indexed_cNotPt :
+ case Hexagon::STriw_imm_cPt_V4 :
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::POST_STwri_cPt :
+ case Hexagon::POST_STwri_cNotPt :
+ return QRI.Subtarget.hasV4TOps();
+
+ // V4 global address store before promoting to dot new.
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ return QRI.Subtarget.hasV4TOps();
+
+ // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
+ // from the "Conditional Store" list. Because a predicated new value store
+ // would NOT be promoted to a double dot new store. See diagram below:
+ // This function returns yes for those stores that are predicated but not
+ // yet promoted to predicate dot new instructions.
+ //
+ // +---------------------+
+ // /-----| if (p0) memw(..)=r0 |---------\~
+ // || +---------------------+ ||
+ // promote || /\ /\ || promote
+ // || /||\ /||\ ||
+ // \||/ demote || \||/
+ // \/ || || \/
+ // +-------------------------+ || +-------------------------+
+ // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
+ // +-------------------------+ || +-------------------------+
+ // || || ||
+ // || demote \||/
+ // promote || \/ NOT possible
+ // || || /\~
+ // \||/ || /||\~
+ // \/ || ||
+ // +-----------------------------+
+ // | if (p0.new) memw(..)=r0.new |
+ // +-----------------------------+
+ // Double Dot New Store
+ //
+
+ default:
+ return false;
+
+ }
+ return false;
+}
+
+
+
+DFAPacketizer *HexagonInstrInfo::
+CreateTargetScheduleState(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const {
+ const InstrItineraryData *II = TM->getInstrItineraryData();
+ return TM->getSubtarget<HexagonGenSubtargetInfo>().createDFAPacketizer(II);
+}
+
+bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const {
+ // Debug info is never a scheduling boundary. It's necessary to be explicit
+ // due to the special treatment of IT instructions below, otherwise a
+ // dbg_value followed by an IT will result in the IT instruction being
+ // considered a scheduling hazard, which is wrong. It should be the actual
+ // instruction preceding the dbg_value instruction(s), just like it is
+ // when debug info is not present.
+ if (MI->isDebugValue())
+ return false;
+
+ // Terminators and labels can't be scheduled around.
+ if (MI->getDesc().isTerminator() || MI->isLabel() || MI->isInlineAsm())
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
new file mode 100644
index 0000000..6a45871
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -0,0 +1,185 @@
+//===- HexagonInstrInfo.h - Hexagon Instruction Information -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Hexagon implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonINSTRUCTIONINFO_H
+#define HexagonINSTRUCTIONINFO_H
+
+#include "HexagonRegisterInfo.h"
+#include "MCTargetDesc/HexagonBaseInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+
+#define GET_INSTRINFO_HEADER
+#include "HexagonGenInstrInfo.inc"
+
+namespace llvm {
+
+class HexagonInstrInfo : public HexagonGenInstrInfo {
+ const HexagonRegisterInfo RI;
+ const HexagonSubtarget& Subtarget;
+public:
+ explicit HexagonInstrInfo(HexagonSubtarget &ST);
+
+ /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
+ /// such, whenever a client has an instance of instruction info, it should
+ /// always be able to get register info as well (through this method).
+ ///
+ virtual const HexagonRegisterInfo &getRegisterInfo() const { return RI; }
+
+ /// isLoadFromStackSlot - If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+ /// isStoreToStackSlot - If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const;
+
+
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const;
+
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
+
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
+
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+
+ virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const;
+
+ virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
+
+ virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
+ unsigned createVR(MachineFunction* MF, MVT VT) const;
+
+ virtual bool isPredicable(MachineInstr *MI) const;
+ virtual bool
+ PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Cond) const;
+
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const;
+
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ const BranchProbability &Probability) const;
+
+ virtual bool isPredicated(const MachineInstr *MI) const;
+ virtual bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const;
+ virtual bool
+ SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2) const;
+
+ virtual bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
+
+ virtual bool
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumCycles,
+ const BranchProbability &Probability) const;
+
+ virtual DFAPacketizer*
+ CreateTargetScheduleState(const TargetMachine *TM,
+ const ScheduleDAG *DAG) const;
+
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
+ bool isValidOffset(const int Opcode, const int Offset) const;
+ bool isValidAutoIncImm(const EVT VT, const int Offset) const;
+ bool isMemOp(const MachineInstr *MI) const;
+ bool isSpillPredRegOp(const MachineInstr *MI) const;
+ bool isU6_3Immediate(const int value) const;
+ bool isU6_2Immediate(const int value) const;
+ bool isU6_1Immediate(const int value) const;
+ bool isU6_0Immediate(const int value) const;
+ bool isS4_3Immediate(const int value) const;
+ bool isS4_2Immediate(const int value) const;
+ bool isS4_1Immediate(const int value) const;
+ bool isS4_0Immediate(const int value) const;
+ bool isS12_Immediate(const int value) const;
+ bool isU6_Immediate(const int value) const;
+ bool isS8_Immediate(const int value) const;
+ bool isS6_Immediate(const int value) const;
+
+ bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const;
+ bool isConditionalTransfer(const MachineInstr* MI) const;
+ bool isConditionalALU32 (const MachineInstr* MI) const;
+ bool isConditionalLoad (const MachineInstr* MI) const;
+ bool isConditionalStore(const MachineInstr* MI) const;
+ bool isDeallocRet(const MachineInstr *MI) const;
+ unsigned getInvertedPredicatedOpcode(const int Opc) const;
+ bool isExtendable(const MachineInstr* MI) const;
+ bool isExtended(const MachineInstr* MI) const;
+ bool isPostIncrement(const MachineInstr* MI) const;
+ bool isNewValueStore(const MachineInstr* MI) const;
+ bool isNewValueJump(const MachineInstr* MI) const;
+ unsigned getImmExtForm(const MachineInstr* MI) const;
+ unsigned getNormalBranchForm(const MachineInstr* MI) const;
+
+private:
+ int getMatchingCondBranchOpcode(int Opc, bool sense) const;
+
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
new file mode 100644
index 0000000..fd5adef
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfo.td
@@ -0,0 +1,3052 @@
+//==- HexagonInstrInfo.td - Target Description for Hexagon -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "HexagonInstrFormats.td"
+include "HexagonImmediates.td"
+
+//===----------------------------------------------------------------------===//
+// Hexagon Instruction Predicate Definitions.
+//===----------------------------------------------------------------------===//
+def HasV2T : Predicate<"Subtarget.hasV2TOps()">;
+def HasV2TOnly : Predicate<"Subtarget.hasV2TOpsOnly()">;
+def NoV2T : Predicate<"!Subtarget.hasV2TOps()">;
+def HasV3T : Predicate<"Subtarget.hasV3TOps()">;
+def HasV3TOnly : Predicate<"Subtarget.hasV3TOpsOnly()">;
+def NoV3T : Predicate<"!Subtarget.hasV3TOps()">;
+def HasV4T : Predicate<"Subtarget.hasV4TOps()">;
+def NoV4T : Predicate<"!Subtarget.hasV4TOps()">;
+def UseMEMOP : Predicate<"Subtarget.useMemOps()">;
+
+// Addressing modes.
+def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>;
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex], []>;
+def ADDRriS11_0 : ComplexPattern<i32, 2, "SelectADDRriS11_0", [frameindex], []>;
+def ADDRriS11_1 : ComplexPattern<i32, 2, "SelectADDRriS11_1", [frameindex], []>;
+def ADDRriS11_2 : ComplexPattern<i32, 2, "SelectADDRriS11_2", [frameindex], []>;
+def ADDRriS11_3 : ComplexPattern<i32, 2, "SelectADDRriS11_3", [frameindex], []>;
+def ADDRriU6_0 : ComplexPattern<i32, 2, "SelectADDRriU6_0", [frameindex], []>;
+def ADDRriU6_1 : ComplexPattern<i32, 2, "SelectADDRriU6_1", [frameindex], []>;
+def ADDRriU6_2 : ComplexPattern<i32, 2, "SelectADDRriU6_2", [frameindex], []>;
+
+// Address operands.
+def MEMrr : Operand<i32> {
+ let PrintMethod = "printMEMrrOperand";
+ let MIOperandInfo = (ops IntRegs, IntRegs);
+}
+
+// Address operands
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMEMriOperand";
+ let MIOperandInfo = (ops IntRegs, IntRegs);
+}
+
+def MEMri_s11_2 : Operand<i32>,
+ ComplexPattern<i32, 2, "SelectMEMriS11_2", []> {
+ let PrintMethod = "printMEMriOperand";
+ let MIOperandInfo = (ops IntRegs, s11Imm);
+}
+
+def FrameIndex : Operand<i32> {
+ let PrintMethod = "printFrameIndexOperand";
+ let MIOperandInfo = (ops IntRegs, s11Imm);
+}
+
+let PrintMethod = "printGlobalOperand" in
+ def globaladdress : Operand<i32>;
+
+let PrintMethod = "printJumpTable" in
+ def jumptablebase : Operand<i32>;
+
+def brtarget : Operand<OtherVT>;
+def calltarget : Operand<i32>;
+
+def bblabel : Operand<i32>;
+def bbl : SDNode<"ISD::BasicBlock", SDTPtrLeaf , [], "BasicBlockSDNode">;
+
+def symbolHi32 : Operand<i32> {
+ let PrintMethod = "printSymbolHi";
+}
+def symbolLo32 : Operand<i32> {
+ let PrintMethod = "printSymbolLo";
+}
+
+// Multi-class for logical operators.
+multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> {
+ def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")),
+ [(set IntRegs:$dst, (OpNode s10Imm:$b, IntRegs:$c))]>;
+}
+
+// Multi-class for compare ops.
+let isCompare = 1 in {
+multiclass CMP64_rr<string OpcStr, PatFrag OpNode> {
+ def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst, (OpNode DoubleRegs:$b, DoubleRegs:$c))]>;
+}
+multiclass CMP32_rr<string OpcStr, PatFrag OpNode> {
+ def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+}
+
+multiclass CMP32_rr_ri_s10<string OpcStr, PatFrag OpNode> {
+ def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Imm:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, s10ImmPred:$c))]>;
+}
+
+multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> {
+ def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>;
+ def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>;
+}
+
+multiclass CMP32_ri_u9<string OpcStr, PatFrag OpNode> {
+ def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>;
+}
+
+multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> {
+ def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Imm:$c),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")),
+ [(set PredRegs:$dst, (OpNode IntRegs:$b, s8ImmPred:$c))]>;
+}
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// http://qualnet.qualcomm.com/~erich/v1/htmldocs/index.html
+// http://qualnet.qualcomm.com/~erich/v2/htmldocs/index.html
+// http://qualnet.qualcomm.com/~erich/v3/htmldocs/index.html
+// http://qualnet.qualcomm.com/~erich/v4/htmldocs/index.html
+// http://qualnet.qualcomm.com/~erich/v5/htmldocs/index.html
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU32/ALU +
+//===----------------------------------------------------------------------===//
+// Add.
+let isPredicable = 1 in
+def ADD_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = add($src1, $src2)",
+ [(set IntRegs:$dst, (add IntRegs:$src1, IntRegs:$src2))]>;
+
+let isPredicable = 1 in
+def ADD_ri : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s16Imm:$src2),
+ "$dst = add($src1, #$src2)",
+ [(set IntRegs:$dst, (add IntRegs:$src1, s16ImmPred:$src2))]>;
+
+// Logical operations.
+let isPredicable = 1 in
+def XOR_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = xor($src1, $src2)",
+ [(set IntRegs:$dst, (xor IntRegs:$src1, IntRegs:$src2))]>;
+
+let isPredicable = 1 in
+def AND_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = and($src1, $src2)",
+ [(set IntRegs:$dst, (and IntRegs:$src1, IntRegs:$src2))]>;
+
+def OR_ri : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = or($src1, #$src2)",
+ [(set IntRegs:$dst, (or IntRegs:$src1, s8ImmPred:$src2))]>;
+
+def NOT_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1),
+ "$dst = not($src1)",
+ [(set IntRegs:$dst, (not IntRegs:$src1))]>;
+
+def AND_ri : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s10Imm:$src2),
+ "$dst = and($src1, #$src2)",
+ [(set IntRegs:$dst, (and IntRegs:$src1, s10ImmPred:$src2))]>;
+
+let isPredicable = 1 in
+def OR_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = or($src1, $src2)",
+ [(set IntRegs:$dst, (or IntRegs:$src1, IntRegs:$src2))]>;
+
+// Negate.
+def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = neg($src1)",
+ [(set IntRegs:$dst, (ineg IntRegs:$src1))]>;
+// Nop.
+let neverHasSideEffects = 1 in
+def NOP : ALU32_rr<(outs), (ins),
+ "nop",
+ []>;
+
+// Subtract.
+let isPredicable = 1 in
+def SUB_rr : ALU32_rr<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = sub($src1, $src2)",
+ [(set IntRegs:$dst, (sub IntRegs:$src1, IntRegs:$src2))]>;
+
+// Transfer immediate.
+let isReMaterializable = 1, isPredicable = 1 in
+def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Imm:$src1),
+ "$dst = #$src1",
+ [(set IntRegs:$dst, s16ImmPred:$src1)]>;
+
+// Transfer register.
+let neverHasSideEffects = 1, isPredicable = 1 in
+def TFR : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = $src1",
+ []>;
+
+// Transfer control register.
+let neverHasSideEffects = 1 in
+def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1),
+ "$dst = $src1",
+ []>;
+//===----------------------------------------------------------------------===//
+// ALU32/ALU -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// ALU32/PERM +
+//===----------------------------------------------------------------------===//
+
+// Combine.
+let isPredicable = 1, neverHasSideEffects = 1 in
+def COMBINE_rr : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = combine($src1, $src2)",
+ []>;
+
+// Mux.
+def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1,
+ DoubleRegs:$src2,
+ DoubleRegs:$src3),
+ "$dst = vmux($src1, $src2, $src3)",
+ []>;
+
+def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3),
+ "$dst = mux($src1, $src2, $src3)",
+ [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
+ IntRegs:$src3),
+ "$dst = mux($src1, #$src2, $src3)",
+ [(set IntRegs:$dst, (select PredRegs:$src1,
+ s8ImmPred:$src2, IntRegs:$src3))]>;
+
+def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2,
+ s8Imm:$src3),
+ "$dst = mux($src1, $src2, #$src3)",
+ [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
+ s8ImmPred:$src3))]>;
+
+def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2,
+ s8Imm:$src3),
+ "$dst = mux($src1, #$src2, #$src3)",
+ [(set IntRegs:$dst, (select PredRegs:$src1, s8ImmPred:$src2,
+ s8ImmPred:$src3))]>;
+
+// Shift halfword.
+let isPredicable = 1 in
+def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = aslh($src1)",
+ [(set IntRegs:$dst, (shl 16, IntRegs:$src1))]>;
+
+let isPredicable = 1 in
+def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = asrh($src1)",
+ [(set IntRegs:$dst, (sra 16, IntRegs:$src1))]>;
+
+// Sign extend.
+let isPredicable = 1 in
+def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = sxtb($src1)",
+ [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i8))]>;
+
+let isPredicable = 1 in
+def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = sxth($src1)",
+ [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i16))]>;
+
+// Zero extend.
+let isPredicable = 1, neverHasSideEffects = 1 in
+def ZXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = zxtb($src1)",
+ []>;
+
+let isPredicable = 1, neverHasSideEffects = 1 in
+def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = zxth($src1)",
+ []>;
+//===----------------------------------------------------------------------===//
+// ALU32/PERM -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// ALU32/PRED +
+//===----------------------------------------------------------------------===//
+
+// Conditional add.
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ "if ($src1) $dst = add($src2, #$src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ "if (!$src1) $dst = add($src2, #$src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ "if ($src1.new) $dst = add($src2, #$src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3),
+ "if (!$src1.new) $dst = add($src2, #$src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = add($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = add($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = add($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ADD_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = add($src2, $src3)",
+ []>;
+
+
+// Conditional combine.
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def COMBINE_rr_cPt : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = combine($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def COMBINE_rr_cNotPt : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = combine($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def COMBINE_rr_cdnPt : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = combine($src2, $src3)",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = combine($src2, $src3)",
+ []>;
+
+// Conditional logical operations.
+
+let isPredicated = 1 in
+def XOR_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = xor($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def XOR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = xor($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def XOR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = xor($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def XOR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = xor($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def AND_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = and($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def AND_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = and($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def AND_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = and($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def AND_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = and($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def OR_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = or($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def OR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = or($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def OR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = or($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def OR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = or($src2, $src3)",
+ []>;
+
+
+// Conditional subtract.
+
+let isPredicated = 1 in
+def SUB_rr_cPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst = sub($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def SUB_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst = sub($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def SUB_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst = sub($src2, $src3)",
+ []>;
+
+let isPredicated = 1 in
+def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst = sub($src2, $src3)",
+ []>;
+
+
+// Conditional transfer.
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR_cNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2),
+ "if (!$src1) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2),
+ "if ($src1) $dst = #$src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ s12Imm:$src2),
+ "if (!$src1) $dst = #$src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR_cdnPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2),
+ "if ($src1.new) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFR_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2),
+ "if (!$src1.new) $dst = $src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ s12Imm:$src2),
+ "if ($src1.new) $dst = #$src2",
+ []>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ s12Imm:$src2),
+ "if (!$src1.new) $dst = #$src2",
+ []>;
+
+// Compare.
+defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", setugt>;
+defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", setgt>;
+defm CMPLT : CMP32_rr<"cmp.lt", setlt>;
+defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", seteq>;
+defm CMPGE : CMP32_ri_s8<"cmp.ge", setge>;
+defm CMPGEU : CMP32_ri_u9<"cmp.geu", setuge>;
+//===----------------------------------------------------------------------===//
+// ALU32/PRED -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU32/VH +
+//===----------------------------------------------------------------------===//
+// Vector add halfwords
+
+// Vector averagehalfwords
+
+// Vector subtract halfwords
+//===----------------------------------------------------------------------===//
+// ALU32/VH -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// ALU64/ALU +
+//===----------------------------------------------------------------------===//
+// Add.
+def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = add($src1, $src2)",
+ [(set DoubleRegs:$dst, (add DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+// Add halfword.
+
+// Compare.
+defm CMPEHexagon4 : CMP64_rr<"cmp.eq", seteq>;
+defm CMPGT64 : CMP64_rr<"cmp.gt", setgt>;
+defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>;
+
+// Logical operations.
+def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = and($src1, $src2)",
+ [(set DoubleRegs:$dst, (and DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = or($src1, $src2)",
+ [(set DoubleRegs:$dst, (or DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = xor($src1, $src2)",
+ [(set DoubleRegs:$dst, (xor DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+// Maximum.
+def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = max($src2, $src1)",
+ [(set IntRegs:$dst, (select (i1 (setlt IntRegs:$src2,
+ IntRegs:$src1)),
+ IntRegs:$src1, IntRegs:$src2))]>;
+
+// Minimum.
+def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = min($src2, $src1)",
+ [(set IntRegs:$dst, (select (i1 (setgt IntRegs:$src2,
+ IntRegs:$src1)),
+ IntRegs:$src1, IntRegs:$src2))]>;
+
+// Subtract.
+def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = sub($src1, $src2)",
+ [(set DoubleRegs:$dst, (sub DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+// Subtract halfword.
+
+// Transfer register.
+let neverHasSideEffects = 1 in
+def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ "$dst = $src1",
+ []>;
+//===----------------------------------------------------------------------===//
+// ALU64/ALU -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/BIT +
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+// ALU64/BIT -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/PERM +
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+// ALU64/PERM -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/VB +
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+// ALU64/VB -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/VH +
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+// ALU64/VH -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/VW +
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+// ALU64/VW -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// CR +
+//===----------------------------------------------------------------------===//
+// Logical reductions on predicates.
+
+// Looping instructions.
+
+// Pipelined looping instructions.
+
+// Logical operations on predicates.
+def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
+ "$dst = and($src1, $src2)",
+ [(set PredRegs:$dst, (and PredRegs:$src1, PredRegs:$src2))]>;
+
+let neverHasSideEffects = 1 in
+def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1,
+ PredRegs:$src2),
+ "$dst = and($src1, !$src2)",
+ []>;
+
+def ANY_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
+ "$dst = any8($src1)",
+ []>;
+
+def ALL_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
+ "$dst = all8($src1)",
+ []>;
+
+def VITPACK_pp : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ PredRegs:$src2),
+ "$dst = vitpack($src1, $src2)",
+ []>;
+
+def VALIGN_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2,
+ PredRegs:$src3),
+ "$dst = valignb($src1, $src2, $src3)",
+ []>;
+
+def VSPLICE_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2,
+ PredRegs:$src3),
+ "$dst = vspliceb($src1, $src2, $src3)",
+ []>;
+
+def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1),
+ "$dst = mask($src1)",
+ []>;
+
+def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1),
+ "$dst = not($src1)",
+ [(set PredRegs:$dst, (not PredRegs:$src1))]>;
+
+def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
+ "$dst = or($src1, $src2)",
+ [(set PredRegs:$dst, (or PredRegs:$src1, PredRegs:$src2))]>;
+
+def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2),
+ "$dst = xor($src1, $src2)",
+ [(set PredRegs:$dst, (xor PredRegs:$src1, PredRegs:$src2))]>;
+
+
+// User control register transfer.
+//===----------------------------------------------------------------------===//
+// CR -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// J +
+//===----------------------------------------------------------------------===//
+// Jump to address.
+let isBranch = 1, isTerminator=1, isBarrier = 1, isPredicable = 1 in {
+ def JMP : JInst< (outs),
+ (ins brtarget:$offset),
+ "jump $offset",
+ [(br bb:$offset)]>;
+}
+
+// if (p0) jump
+let isBranch = 1, isTerminator=1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_c : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if ($src) jump $offset",
+ [(brcond PredRegs:$src, bb:$offset)]>;
+}
+
+// if (!p0) jump
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_cNot : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if (!$src) jump $offset",
+ []>;
+}
+
+let isTerminator = 1, isBranch = 1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def BRCOND : JInst < (outs), (ins PredRegs:$pred, brtarget:$dst),
+ "if ($pred) jump $dst",
+ []>;
+}
+
+// Jump to address conditioned on new predicate.
+// if (p0) jump:t
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_cdnPt : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if ($src.new) jump:t $offset",
+ []>;
+}
+
+// if (!p0) jump:t
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_cdnNotPt : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if (!$src.new) jump:t $offset",
+ []>;
+}
+
+// Not taken.
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_cdnPnt : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if ($src.new) jump:nt $offset",
+ []>;
+}
+
+// Not taken.
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC],
+ isPredicated = 1 in {
+ def JMP_cdnNotPnt : JInst< (outs),
+ (ins PredRegs:$src, brtarget:$offset),
+ "if (!$src.new) jump:nt $offset",
+ []>;
+}
+//===----------------------------------------------------------------------===//
+// J -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// JR +
+//===----------------------------------------------------------------------===//
+def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+// Jump to address from register.
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR: JRInst<(outs), (ins),
+ "jumpr r31",
+ [(retflag)]>;
+}
+
+// Jump to address from register.
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1),
+ "if ($src1) jumpr r31",
+ []>;
+}
+
+// Jump to address from register.
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1),
+ "if (!$src1) jumpr r31",
+ []>;
+}
+
+//===----------------------------------------------------------------------===//
+// JR -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// LD +
+//===----------------------------------------------------------------------===//
+///
+/// Make sure that in post increment load, the first operand is always the post
+/// increment operand.
+///
+// Load doubleword.
+let isPredicable = 1 in
+def LDrid : LDInst<(outs DoubleRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memd($addr)",
+ [(set DoubleRegs:$dst, (load ADDRriS11_3:$addr))]>;
+
+let isPredicable = 1, AddedComplexity = 20 in
+def LDrid_indexed : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, s11_3Imm:$offset),
+ "$dst=memd($src1+#$offset)",
+ [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
+ s11_3ImmPred:$offset)))]>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_GP : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memd(#$global+$offset)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDd_GP : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memd(#$global)",
+ []>;
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memd($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load doubleword conditionally.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_cPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memd($addr)",
+ []>;
+
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_cNotPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memd($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_indexed_cPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
+ "if ($src1) $dst=memd($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_indexed_cNotPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
+ "if (!$src1) $dst=memd($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrid_cPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
+ "if ($src1) $dst1 = memd($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrid_cNotPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
+ "if (!$src1) $dst1 = memd($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_cdnPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memd($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memd($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_indexed_cdnPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
+ "if ($src1.new) $dst=memd($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_indexed_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
+ "if (!$src1.new) $dst=memd($src2+#$src3)",
+ []>;
+
+
+// Load byte.
+let isPredicable = 1 in
+def LDrib : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memb($addr)",
+ [(set IntRegs:$dst, (sextloadi8 ADDRriS11_0:$addr))]>;
+
+def LDrib_ae : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memb($addr)",
+ [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>;
+
+// Indexed load byte.
+let isPredicable = 1, AddedComplexity = 20 in
+def LDrib_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_0Imm:$offset),
+ "$dst=memb($src1+#$offset)",
+ [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1,
+ s11_0ImmPred:$offset)))]>;
+
+
+// Indexed load byte any-extend.
+let AddedComplexity = 20 in
+def LDrib_ae_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_0Imm:$offset),
+ "$dst=memb($src1+#$offset)",
+ [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
+ s11_0ImmPred:$offset)))]>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memb(#$global+$offset)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDb_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memb(#$global)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDub_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memub(#$global)",
+ []>;
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memb($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load byte conditionally.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memb($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memb($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_indexed_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if ($src1) $dst = memb($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if (!$src1) $dst = memb($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrib_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if ($src1) $dst1 = memb($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrib_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if (!$src1) $dst1 = memb($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memb($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memb($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if ($src1.new) $dst = memb($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if (!$src1.new) $dst = memb($src2+#$src3)",
+ []>;
+
+
+// Load halfword.
+let isPredicable = 1 in
+def LDrih : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memh($addr)",
+ [(set IntRegs:$dst, (sextloadi16 ADDRriS11_1:$addr))]>;
+
+let isPredicable = 1, AddedComplexity = 20 in
+def LDrih_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_1Imm:$offset),
+ "$dst=memh($src1+#$offset)",
+ [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1,
+ s11_1ImmPred:$offset)))] >;
+
+def LDrih_ae : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memh($addr)",
+ [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>;
+
+let AddedComplexity = 20 in
+def LDrih_ae_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_1Imm:$offset),
+ "$dst=memh($src1+#$offset)",
+ [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1,
+ s11_1ImmPred:$offset)))] >;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memh(#$global+$offset)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDh_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memh(#$global)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDuh_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memuh(#$global)",
+ []>;
+
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memh($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load halfword conditionally.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_indexed_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if ($src1) $dst = memh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if (!$src1) $dst = memh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrih_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if ($src1) $dst1 = memh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDrih_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if (!$src1) $dst1 = memh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if ($src1.new) $dst = memh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if (!$src1.new) $dst = memh($src2+#$src3)",
+ []>;
+
+// Load unsigned byte.
+let isPredicable = 1 in
+def LDriub : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memub($addr)",
+ [(set IntRegs:$dst, (zextloadi8 ADDRriS11_0:$addr))]>;
+
+let isPredicable = 1 in
+def LDriubit : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memub($addr)",
+ [(set IntRegs:$dst, (zextloadi1 ADDRriS11_0:$addr))]>;
+
+let isPredicable = 1, AddedComplexity = 20 in
+def LDriub_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_0Imm:$offset),
+ "$dst=memub($src1+#$offset)",
+ [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1,
+ s11_0ImmPred:$offset)))]>;
+
+let AddedComplexity = 20 in
+def LDriubit_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_0Imm:$offset),
+ "$dst=memub($src1+#$offset)",
+ [(set IntRegs:$dst, (zextloadi1 (add IntRegs:$src1,
+ s11_0ImmPred:$offset)))]>;
+
+def LDriub_ae : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memub($addr)",
+ [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>;
+
+
+let AddedComplexity = 20 in
+def LDriub_ae_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_0Imm:$offset),
+ "$dst=memub($src1+#$offset)",
+ [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
+ s11_0ImmPred:$offset)))]>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memub(#$global+$offset)",
+ []>;
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memub($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load unsigned byte conditionally.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memub($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memub($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_indexed_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if ($src1) $dst = memub($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if (!$src1) $dst = memub($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriub_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if ($src1) $dst1 = memub($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriub_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if (!$src1) $dst1 = memub($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memub($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memub($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if ($src1.new) $dst = memub($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
+ "if (!$src1.new) $dst = memub($src2+#$src3)",
+ []>;
+
+// Load unsigned halfword.
+let isPredicable = 1 in
+def LDriuh : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memuh($addr)",
+ [(set IntRegs:$dst, (zextloadi16 ADDRriS11_1:$addr))]>;
+
+// Indexed load unsigned halfword.
+let isPredicable = 1, AddedComplexity = 20 in
+def LDriuh_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_1Imm:$offset),
+ "$dst=memuh($src1+#$offset)",
+ [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1,
+ s11_1ImmPred:$offset)))]>;
+
+def LDriuh_ae : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr),
+ "$dst = memuh($addr)",
+ [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>;
+
+
+// Indexed load unsigned halfword any-extend.
+let AddedComplexity = 20 in
+def LDriuh_ae_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_1Imm:$offset),
+ "$dst=memuh($src1+#$offset)",
+ [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1,
+ s11_1ImmPred:$offset)))] >;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memuh(#$global+$offset)",
+ []>;
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memuh($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load unsigned halfword conditionally.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memuh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memuh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_indexed_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if ($src1) $dst = memuh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if (!$src1) $dst = memuh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriuh_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if ($src1) $dst1 = memuh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriuh_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if (!$src1) $dst1 = memuh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memuh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memuh($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if ($src1.new) $dst = memuh($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
+ "if (!$src1.new) $dst = memuh($src2+#$src3)",
+ []>;
+
+
+// Load word.
+let isPredicable = 1 in
+def LDriw : LDInst<(outs IntRegs:$dst),
+ (ins MEMri:$addr), "$dst = memw($addr)",
+ [(set IntRegs:$dst, (load ADDRriS11_2:$addr))]>;
+
+// Load predicate.
+let mayLoad = 1, Defs = [R10,R11] in
+def LDriw_pred : LDInst<(outs PredRegs:$dst),
+ (ins MEMri:$addr),
+ "Error; should not emit",
+ []>;
+
+// Indexed load.
+let isPredicable = 1, AddedComplexity = 20 in
+def LDriw_indexed : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s11_2Imm:$offset),
+ "$dst=memw($src1+#$offset)",
+ [(set IntRegs:$dst, (load (add IntRegs:$src1,
+ s11_2ImmPred:$offset)))]>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memw(#$global+$offset)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDw_GP : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memw(#$global)",
+ []>;
+
+let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
+ (ins IntRegs:$src1, s4Imm:$offset),
+ "$dst = memw($src1++#$offset)",
+ [],
+ "$src1 = $dst2">;
+
+// Load word conditionally.
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1) $dst = memw($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1) $dst = memw($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_indexed_cPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
+ "if ($src1) $dst=memw($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
+ "if (!$src1) $dst=memw($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriw_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
+ "if ($src1) $dst1 = memw($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
+def POST_LDriw_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
+ "if (!$src1) $dst1 = memw($src2++#$src3)",
+ [],
+ "$src2 = $dst2">;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if ($src1.new) $dst = memw($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, MEMri:$addr),
+ "if (!$src1.new) $dst = memw($addr)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
+ "if ($src1.new) $dst=memw($src2+#$src3)",
+ []>;
+
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
+ "if (!$src1.new) $dst=memw($src2+#$src3)",
+ []>;
+
+// Deallocate stack frame.
+let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in {
+ def DEALLOCFRAME : LDInst<(outs), (ins i32imm:$amt1),
+ "deallocframe",
+ []>;
+}
+
+// Load and unpack bytes to halfwords.
+//===----------------------------------------------------------------------===//
+// LD -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/ALU +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// MTYPE/ALU -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/COMPLEX +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// MTYPE/COMPLEX -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/MPYH +
+//===----------------------------------------------------------------------===//
+// Multiply and use lower result.
+// Rd=+mpyi(Rs,#u8)
+def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
+ "$dst =+ mpyi($src1, #$src2)",
+ [(set IntRegs:$dst, (mul IntRegs:$src1, u8ImmPred:$src2))]>;
+
+// Rd=-mpyi(Rs,#u8)
+def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2),
+ "$dst =- mpyi($src1, #$src2)",
+ [(set IntRegs:$dst,
+ (mul IntRegs:$src1, n8ImmPred:$src2))]>;
+
+// Rd=mpyi(Rs,#m9)
+// s9 is NOT the same as m9 - but it works.. so far.
+// Assembler maps to either Rd=+mpyi(Rs,#u8 or Rd=-mpyi(Rs,#u8)
+// depending on the value of m9. See Arch Spec.
+def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2),
+ "$dst = mpyi($src1, #$src2)",
+ [(set IntRegs:$dst, (mul IntRegs:$src1, s9ImmPred:$src2))]>;
+
+// Rd=mpyi(Rs,Rt)
+def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpyi($src1, $src2)",
+ [(set IntRegs:$dst, (mul IntRegs:$src1, IntRegs:$src2))]>;
+
+// Rx+=mpyi(Rs,#u8)
+def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
+ "$dst += mpyi($src2, #$src3)",
+ [(set IntRegs:$dst,
+ (add (mul IntRegs:$src2, u8ImmPred:$src3), IntRegs:$src1))],
+ "$src1 = $dst">;
+
+// Rx+=mpyi(Rs,Rt)
+def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst += mpyi($src2, $src3)",
+ [(set IntRegs:$dst,
+ (add (mul IntRegs:$src2, IntRegs:$src3), IntRegs:$src1))],
+ "$src1 = $dst">;
+
+// Rx-=mpyi(Rs,#u8)
+def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3),
+ "$dst -= mpyi($src2, #$src3)",
+ [(set IntRegs:$dst,
+ (sub IntRegs:$src1, (mul IntRegs:$src2, u8ImmPred:$src3)))],
+ "$src1 = $dst">;
+
+// Multiply and use upper result.
+// Rd=mpy(Rs,Rt.H):<<1:rnd:sat
+// Rd=mpy(Rs,Rt.L):<<1:rnd:sat
+// Rd=mpy(Rs,Rt)
+def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpy($src1, $src2)",
+ [(set IntRegs:$dst, (mulhs IntRegs:$src1, IntRegs:$src2))]>;
+
+// Rd=mpy(Rs,Rt):rnd
+// Rd=mpyu(Rs,Rt)
+def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpyu($src1, $src2)",
+ [(set IntRegs:$dst, (mulhu IntRegs:$src1, IntRegs:$src2))]>;
+
+// Multiply and use full result.
+// Rdd=mpyu(Rs,Rt)
+def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpyu($src1, $src2)",
+ [(set DoubleRegs:$dst, (mul (i64 (anyext IntRegs:$src1)),
+ (i64 (anyext IntRegs:$src2))))]>;
+
+// Rdd=mpy(Rs,Rt)
+def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpy($src1, $src2)",
+ [(set DoubleRegs:$dst, (mul (i64 (sext IntRegs:$src1)),
+ (i64 (sext IntRegs:$src2))))]>;
+
+
+// Multiply and accumulate, use full result.
+// Rxx[+-]=mpy(Rs,Rt)
+// Rxx+=mpy(Rs,Rt)
+def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst += mpy($src2, $src3)",
+ [(set DoubleRegs:$dst,
+ (add (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3))),
+ DoubleRegs:$src1))],
+ "$src1 = $dst">;
+
+// Rxx-=mpy(Rs,Rt)
+def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst -= mpy($src2, $src3)",
+ [(set DoubleRegs:$dst,
+ (sub DoubleRegs:$src1,
+ (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3)))))],
+ "$src1 = $dst">;
+
+// Rxx[+-]=mpyu(Rs,Rt)
+// Rxx+=mpyu(Rs,Rt)
+def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3),
+ "$dst += mpyu($src2, $src3)",
+ [(set DoubleRegs:$dst, (add (mul (i64 (anyext IntRegs:$src2)),
+ (i64 (anyext IntRegs:$src3))),
+ DoubleRegs:$src1))],"$src1 = $dst">;
+
+// Rxx-=mpyu(Rs,Rt)
+def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst += mpyu($src2, $src3)",
+ [(set DoubleRegs:$dst,
+ (sub DoubleRegs:$src1,
+ (mul (i64 (anyext IntRegs:$src2)),
+ (i64 (anyext IntRegs:$src3)))))],
+ "$src1 = $dst">;
+
+
+def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3),
+ "$dst += add($src2, $src3)",
+ [(set IntRegs:$dst, (add (add IntRegs:$src2, IntRegs:$src3),
+ IntRegs:$src1))],
+ "$src1 = $dst">;
+
+def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
+ IntRegs:$src2, s8Imm:$src3),
+ "$dst += add($src2, #$src3)",
+ [(set IntRegs:$dst, (add (add IntRegs:$src2, s8ImmPred:$src3),
+ IntRegs:$src1))],
+ "$src1 = $dst">;
+
+def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
+ IntRegs:$src2, IntRegs:$src3),
+ "$dst -= add($src2, $src3)",
+ [(set IntRegs:$dst, (sub IntRegs:$src1, (add IntRegs:$src2,
+ IntRegs:$src3)))],
+ "$src1 = $dst">;
+
+def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1,
+ IntRegs:$src2, s8Imm:$src3),
+ "$dst -= add($src2, #$src3)",
+ [(set IntRegs:$dst, (sub IntRegs:$src1,
+ (add IntRegs:$src2, s8ImmPred:$src3)))],
+ "$src1 = $dst">;
+
+//===----------------------------------------------------------------------===//
+// MTYPE/MPYH -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/MPYS +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// MTYPE/MPYS -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/VB +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// MTYPE/VB -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MTYPE/VH +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// MTYPE/VH -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ST +
+//===----------------------------------------------------------------------===//
+///
+/// Assumptions::: ****** DO NOT IGNORE ********
+/// 1. Make sure that in post increment store, the zero'th operand is always the
+/// post increment operand.
+/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the
+/// last operand.
+///
+// Store doubleword.
+let isPredicable = 1 in
+def STrid : STInst<(outs),
+ (ins MEMri:$addr, DoubleRegs:$src1),
+ "memd($addr) = $src1",
+ [(store DoubleRegs:$src1, ADDRriS11_3:$addr)]>;
+
+// Indexed store double word.
+let AddedComplexity = 10, isPredicable = 1 in
+def STrid_indexed : STInst<(outs),
+ (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3),
+ "memd($src1+#$src2) = $src3",
+ [(store DoubleRegs:$src3,
+ (add IntRegs:$src1, s11_3ImmPred:$src2))]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrid_GP : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src),
+ "memd(#$global+$offset) = $src",
+ []>;
+
+let hasCtrlDep = 1, isPredicable = 1 in
+def POST_STdri : STInstPI<(outs IntRegs:$dst),
+ (ins DoubleRegs:$src1, IntRegs:$src2, s4Imm:$offset),
+ "memd($src2++#$offset) = $src1",
+ [(set IntRegs:$dst,
+ (post_store DoubleRegs:$src1, IntRegs:$src2, s4_3ImmPred:$offset))],
+ "$src2 = $dst">;
+
+// Store doubleword conditionally.
+// if ([!]Pv) memd(Rs+#u6:3)=Rtt
+// if (Pv) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
+def STrid_cPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
+ "if ($src1) memd($addr) = $src2",
+ []>;
+
+// if (!Pv) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
+def STrid_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
+ "if (!$src1) memd($addr) = $src2",
+ []>;
+
+// if (Pv) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
+def STrid_indexed_cPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
+ DoubleRegs:$src4),
+ "if ($src1) memd($src2+#$src3) = $src4",
+ []>;
+
+// if (!Pv) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
+def STrid_indexed_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
+ DoubleRegs:$src4),
+ "if (!$src1) memd($src2+#$src3) = $src4",
+ []>;
+
+// if ([!]Pv) memd(Rx++#s4:3)=Rtt
+// if (Pv) memd(Rx++#s4:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
+def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
+ s4_3Imm:$offset),
+ "if ($src1) memd($src3++#$offset) = $src2",
+ [],
+ "$src3 = $dst">;
+
+// if (!Pv) memd(Rx++#s4:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cNotPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
+ s4_3Imm:$offset),
+ "if (!$src1) memd($src3++#$offset) = $src2",
+ [],
+ "$src3 = $dst">;
+
+
+// Store byte.
+// memb(Rs+#s11:0)=Rt
+let isPredicable = 1 in
+def STrib : STInst<(outs),
+ (ins MEMri:$addr, IntRegs:$src1),
+ "memb($addr) = $src1",
+ [(truncstorei8 IntRegs:$src1, ADDRriS11_0:$addr)]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def STrib_indexed : STInst<(outs),
+ (ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3),
+ "memb($src1+#$src2) = $src3",
+ [(truncstorei8 IntRegs:$src3, (add IntRegs:$src1,
+ s11_0ImmPred:$src2))]>;
+
+// memb(gp+#u16:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memb(#$global+$offset) = $src",
+ []>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP : STInst<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memb(#$global) = $src",
+ []>;
+
+// memb(Rx++#s4:0)=Rt
+let hasCtrlDep = 1, isPredicable = 1 in
+def POST_STbri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1,
+ IntRegs:$src2,
+ s4Imm:$offset),
+ "memb($src2++#$offset) = $src1",
+ [(set IntRegs:$dst,
+ (post_truncsti8 IntRegs:$src1, IntRegs:$src2,
+ s4_0ImmPred:$offset))],
+ "$src2 = $dst">;
+
+// Store byte conditionally.
+// if ([!]Pv) memb(Rs+#u6:0)=Rt
+// if (Pv) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_cPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memb($addr) = $src2",
+ []>;
+
+// if (!Pv) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memb($addr) = $src2",
+ []>;
+
+// if (Pv) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_indexed_cPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if ($src1) memb($src2+#$src3) = $src4",
+ []>;
+
+// if (!Pv) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_indexed_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memb($src2+#$src3) = $src4",
+ []>;
+
+// if ([!]Pv) memb(Rx++#s4:0)=Rt
+// if (Pv) memb(Rx++#s4:0)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_STbri_cPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if ($src1) memb($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+// if (!Pv) memb(Rx++#s4:0)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_STbri_cNotPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if (!$src1) memb($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+
+// Store halfword.
+// memh(Rs+#s11:1)=Rt
+let isPredicable = 1 in
+def STrih : STInst<(outs),
+ (ins MEMri:$addr, IntRegs:$src1),
+ "memh($addr) = $src1",
+ [(truncstorei16 IntRegs:$src1, ADDRriS11_1:$addr)]>;
+
+
+let AddedComplexity = 10, isPredicable = 1 in
+def STrih_indexed : STInst<(outs),
+ (ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3),
+ "memh($src1+#$src2) = $src3",
+ [(truncstorei16 IntRegs:$src3, (add IntRegs:$src1,
+ s11_1ImmPred:$src2))]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memh(#$global+$offset) = $src",
+ []>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP : STInst<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memh(#$global) = $src",
+ []>;
+
+// memh(Rx++#s4:1)=Rt.H
+// memh(Rx++#s4:1)=Rt
+let hasCtrlDep = 1, isPredicable = 1 in
+def POST_SThri : STInstPI<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset),
+ "memh($src2++#$offset) = $src1",
+ [(set IntRegs:$dst,
+ (post_truncsti16 IntRegs:$src1, IntRegs:$src2,
+ s4_1ImmPred:$offset))],
+ "$src2 = $dst">;
+
+// Store halfword conditionally.
+// if ([!]Pv) memh(Rs+#u6:1)=Rt
+// if (Pv) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_cPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memh($addr) = $src2",
+ []>;
+
+// if (!Pv) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memh($addr) = $src2",
+ []>;
+
+// if (Pv) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_indexed_cPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if ($src1) memh($src2+#$src3) = $src4",
+ []>;
+
+// if (!Pv) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_indexed_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memh($src2+#$src3) = $src4",
+ []>;
+
+// if ([!]Pv) memh(Rx++#s4:1)=Rt
+// if (Pv) memh(Rx++#s4:1)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_SThri_cPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if ($src1) memh($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+// if (!Pv) memh(Rx++#s4:1)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if (!$src1) memh($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+
+// Store word.
+// Store predicate.
+let Defs = [R10,R11] in
+def STriw_pred : STInst<(outs),
+ (ins MEMri:$addr, PredRegs:$src1),
+ "Error; should not emit",
+ []>;
+
+// memw(Rs+#s11:2)=Rt
+let isPredicable = 1 in
+def STriw : STInst<(outs),
+ (ins MEMri:$addr, IntRegs:$src1),
+ "memw($addr) = $src1",
+ [(store IntRegs:$src1, ADDRriS11_2:$addr)]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def STriw_indexed : STInst<(outs),
+ (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
+ "memw($src1+#$src2) = $src3",
+ [(store IntRegs:$src3, (add IntRegs:$src1, s11_2ImmPred:$src2))]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memw(#$global+$offset) = $src",
+ []>;
+
+let hasCtrlDep = 1, isPredicable = 1 in
+def POST_STwri : STInstPI<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset),
+ "memw($src2++#$offset) = $src1",
+ [(set IntRegs:$dst,
+ (post_store IntRegs:$src1, IntRegs:$src2, s4_2ImmPred:$offset))],
+ "$src2 = $dst">;
+
+// Store word conditionally.
+// if ([!]Pv) memw(Rs+#u6:2)=Rt
+// if (Pv) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_cPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memw($addr) = $src2",
+ []>;
+
+// if (!Pv) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memw($addr) = $src2",
+ []>;
+
+// if (Pv) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_indexed_cPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if ($src1) memw($src2+#$src3) = $src4",
+ []>;
+
+// if (!Pv) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_indexed_cNotPt : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memw($src2+#$src3) = $src4",
+ []>;
+
+// if ([!]Pv) memw(Rx++#s4:2)=Rt
+// if (Pv) memw(Rx++#s4:2)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_STwri_cPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if ($src1) memw($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+// if (!Pv) memw(Rx++#s4:2)=Rt
+let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
+def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if (!$src1) memw($src3++#$offset) = $src2",
+ [],"$src3 = $dst">;
+
+
+
+// Allocate stack frame.
+let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in {
+ def ALLOCFRAME : STInst<(outs),
+ (ins i32imm:$amt),
+ "allocframe(#$amt)",
+ []>;
+}
+//===----------------------------------------------------------------------===//
+// ST -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/ALU +
+//===----------------------------------------------------------------------===//
+// Logical NOT.
+def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1),
+ "$dst = not($src1)",
+ [(set DoubleRegs:$dst, (not DoubleRegs:$src1))]>;
+
+
+// Sign extend word to doubleword.
+def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
+ "$dst = sxtw($src1)",
+ [(set DoubleRegs:$dst, (sext IntRegs:$src1))]>;
+//===----------------------------------------------------------------------===//
+// STYPE/ALU -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/BIT +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/BIT -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// STYPE/COMPLEX +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/COMPLEX -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/PERM +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/PERM -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/PRED +
+//===----------------------------------------------------------------------===//
+// Predicate transfer.
+let neverHasSideEffects = 1 in
+def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1),
+ "$dst = $src1 // Should almost never emit this",
+ []>;
+
+def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1),
+ "$dst = $src1 // Should almost never emit!",
+ [(set PredRegs:$dst, (trunc IntRegs:$src1))]>;
+//===----------------------------------------------------------------------===//
+// STYPE/PRED -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/SHIFT +
+//===----------------------------------------------------------------------===//
+// Shift by immediate.
+def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = asr($src1, #$src2)",
+ [(set IntRegs:$dst, (sra IntRegs:$src1, u5ImmPred:$src2))]>;
+
+def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ "$dst = asr($src1, #$src2)",
+ [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, u6ImmPred:$src2))]>;
+
+def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = asl($src1, #$src2)",
+ [(set IntRegs:$dst, (shl IntRegs:$src1, u5ImmPred:$src2))]>;
+
+def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ "$dst = lsr($src1, #$src2)",
+ [(set IntRegs:$dst, (srl IntRegs:$src1, u5ImmPred:$src2))]>;
+
+def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ "$dst = lsr($src1, #$src2)",
+ [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, u6ImmPred:$src2))]>;
+
+def LSRd_ri_acc : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2,
+ u6Imm:$src3),
+ "$dst += lsr($src2, #$src3)",
+ [(set DoubleRegs:$dst, (add DoubleRegs:$src1,
+ (srl DoubleRegs:$src2,
+ u6ImmPred:$src3)))],
+ "$src1 = $dst">;
+
+// Shift by immediate and accumulate.
+def ASR_rr_acc : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1,
+ IntRegs:$src2,
+ IntRegs:$src3),
+ "$dst += asr($src2, $src3)",
+ [], "$src1 = $dst">;
+
+// Shift by immediate and add.
+def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ u3Imm:$src3),
+ "$dst = addasl($src1, $src2, #$src3)",
+ [(set IntRegs:$dst, (add IntRegs:$src1,
+ (shl IntRegs:$src2,
+ u3ImmPred:$src3)))]>;
+
+// Shift by register.
+def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = asl($src1, $src2)",
+ [(set IntRegs:$dst, (shl IntRegs:$src1, IntRegs:$src2))]>;
+
+def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = asr($src1, $src2)",
+ [(set IntRegs:$dst, (sra IntRegs:$src1, IntRegs:$src2))]>;
+
+
+def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = lsr($src1, $src2)",
+ [(set IntRegs:$dst, (srl IntRegs:$src1, IntRegs:$src2))]>;
+
+def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ "$dst = lsl($src1, $src2)",
+ [(set DoubleRegs:$dst, (shl DoubleRegs:$src1, IntRegs:$src2))]>;
+
+def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ IntRegs:$src2),
+ "$dst = asr($src1, $src2)",
+ [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, IntRegs:$src2))]>;
+
+def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ IntRegs:$src2),
+ "$dst = lsr($src1, $src2)",
+ [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, IntRegs:$src2))]>;
+
+//===----------------------------------------------------------------------===//
+// STYPE/SHIFT -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/VH +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/VH -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// STYPE/VW +
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// STYPE/VW -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// SYSTEM/SUPER +
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// SYSTEM/USER +
+//===----------------------------------------------------------------------===//
+def SDHexagonBARRIER: SDTypeProfile<0, 0, []>;
+def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER,
+ [SDNPHasChain]>;
+
+let hasSideEffects = 1 in
+def BARRIER : STInst<(outs), (ins),
+ "barrier",
+ [(HexagonBARRIER)]>;
+
+//===----------------------------------------------------------------------===//
+// SYSTEM/SUPER -
+//===----------------------------------------------------------------------===//
+
+// TFRI64 - assembly mapped.
+let isReMaterializable = 1 in
+def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1),
+ "$dst = #$src1",
+ [(set DoubleRegs:$dst, s8Imm64Pred:$src1)]>;
+
+// Pseudo instruction to encode a set of conditional transfers.
+// This instruction is used instead of a mux and trades-off codesize
+// for performance. We conduct this transformation optimistically in
+// the hope that these instructions get promoted to dot-new transfers.
+let AddedComplexity = 100 in
+def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1,
+ IntRegs:$src2,
+ IntRegs:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+let AddedComplexity = 100 in
+def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst,
+ (select PredRegs:$src1, IntRegs:$src2, s12ImmPred:$src3))]>;
+
+let AddedComplexity = 100 in
+def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst,
+ (select PredRegs:$src1, s12ImmPred:$src2, IntRegs:$src3))]>;
+
+let AddedComplexity = 100 in
+def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3),
+ "Error; should not emit",
+ [(set IntRegs:$dst, (select PredRegs:$src1,
+ s12ImmPred:$src2,
+ s12ImmPred:$src3))]>;
+
+// Generate frameindex addresses.
+let isReMaterializable = 1 in
+def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1),
+ "$dst = add($src1)",
+ [(set IntRegs:$dst, ADDRri:$src1)]>;
+
+//
+// CR - Type.
+//
+let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
+def LOOP0_i : CRInst<(outs), (ins brtarget:$offset, u10Imm:$src2),
+ "loop0($offset, #$src2)",
+ []>;
+}
+
+let neverHasSideEffects = 1, Defs = [SA0, LC0] in {
+def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2),
+ "loop0($offset, $src2)",
+ []>;
+}
+
+let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1,
+ Defs = [PC, LC0], Uses = [SA0, LC0] in {
+def ENDLOOP0 : CRInst<(outs), (ins brtarget:$offset),
+ ":endloop0",
+ []>;
+}
+
+// Support for generating global address.
+// Taken from X86InstrInfo.td.
+def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
+def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>;
+def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>;
+
+// This pattern is incorrect. When we add small data, we should change
+// this pattern to use memw(#foo).
+let isMoveImm = 1 in
+def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst = CONST32(#$global)",
+ [(set IntRegs:$dst,
+ (load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst = CONST32(#$global)",
+ [(set IntRegs:$dst,
+ (HexagonCONST32 tglobaladdr:$global))]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32_set_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt),
+ "$dst = CONST32(#$jt)",
+ [(set IntRegs:$dst,
+ (HexagonCONST32 tjumptable:$jt))]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32GP_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global),
+ "$dst = CONST32(#$global)",
+ [(set IntRegs:$dst,
+ (HexagonCONST32_GP tglobaladdr:$global))]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32_Int_Real : LDInst<(outs IntRegs:$dst), (ins i32imm:$global),
+ "$dst = CONST32(#$global)",
+ [(set IntRegs:$dst, imm:$global) ]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST32_Label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label),
+ "$dst = CONST32($label)",
+ [(set IntRegs:$dst, (HexagonCONST32 bbl:$label))]>;
+
+let isReMaterializable = 1, isMoveImm = 1 in
+def CONST64_Int_Real : LDInst<(outs DoubleRegs:$dst), (ins i64imm:$global),
+ "$dst = CONST64(#$global)",
+ [(set DoubleRegs:$dst, imm:$global) ]>;
+
+def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins),
+ "$dst = xor($dst, $dst)",
+ [(set PredRegs:$dst, 0)]>;
+
+def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = mpy($src1, $src2)",
+ [(set IntRegs:$dst,
+ (trunc (i64 (srl (i64 (mul (i64 (sext IntRegs:$src1)),
+ (i64 (sext IntRegs:$src2)))),
+ (i32 32)))))]>;
+
+// Pseudo instructions.
+def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
+
+def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32> ]>;
+
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+
+def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+
+def call : SDNode<"HexagonISD::CALL", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>;
+
+// For tailcalls a HexagonTCRet SDNode has 3 SDNode Properties - a chain,
+// Optional Flag and Variable Arguments.
+// Its 1 Operand has pointer type.
+def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+let Defs = [R29, R30], Uses = [R31, R30, R29] in {
+ def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
+ "Should never be emitted",
+ [(callseq_start timm:$amt)]>;
+}
+
+let Defs = [R29, R30, R31], Uses = [R29] in {
+ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "Should never be emitted",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+}
+// Call subroutine.
+let isCall = 1, neverHasSideEffects = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
+ R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def CALL : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "call $dst", []>;
+}
+
+// Call subroutine from register.
+let isCall = 1, neverHasSideEffects = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
+ R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def CALLR : JRInst<(outs), (ins IntRegs:$dst, variable_ops),
+ "callr $dst",
+ []>;
+ }
+
+// Tail Calls.
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
+ R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def TCRETURNtg : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "jump $dst // TAILCALL", []>;
+}
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
+ R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def TCRETURNtext : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "jump $dst // TAILCALL", []>;
+}
+
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
+ R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def TCRETURNR : JInst<(outs), (ins IntRegs:$dst, variable_ops),
+ "jumpr $dst // TAILCALL", []>;
+}
+// Map call instruction.
+def : Pat<(call IntRegs:$dst),
+ (CALLR IntRegs:$dst)>, Requires<[HasV2TOnly]>;
+def : Pat<(call tglobaladdr:$dst),
+ (CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>;
+def : Pat<(call texternalsym:$dst),
+ (CALL texternalsym:$dst)>, Requires<[HasV2TOnly]>;
+//Tail calls.
+def : Pat<(HexagonTCRet tglobaladdr:$dst),
+ (TCRETURNtg tglobaladdr:$dst)>;
+def : Pat<(HexagonTCRet texternalsym:$dst),
+ (TCRETURNtext texternalsym:$dst)>;
+def : Pat<(HexagonTCRet IntRegs:$dst),
+ (TCRETURNR IntRegs:$dst)>;
+
+// Map from r0 = and(r1, 65535) to r0 = zxth(r1).
+def : Pat <(and IntRegs:$src1, 65535),
+ (ZXTH IntRegs:$src1)>;
+
+// Map from r0 = and(r1, 255) to r0 = zxtb(r1).
+def : Pat <(and IntRegs:$src1, 255),
+ (ZXTB IntRegs:$src1)>;
+
+// Map Add(p1, true) to p1 = not(p1).
+// Add(p1, false) should never be produced,
+// if it does, it got to be mapped to NOOP.
+def : Pat <(add PredRegs:$src1, -1),
+ (NOT_p PredRegs:$src1)>;
+
+// Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) =>
+// p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1).
+def : Pat <(select (i1 (setlt IntRegs:$src1, IntRegs:$src2)), IntRegs:$src3,
+ IntRegs:$src4),
+ (TFR_condset_rr (CMPLTrr IntRegs:$src1, IntRegs:$src2), IntRegs:$src4,
+ IntRegs:$src3)>, Requires<[HasV2TOnly]>;
+
+// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i).
+def : Pat <(select (not PredRegs:$src1), s8ImmPred:$src2, s8ImmPred:$src3),
+ (TFR_condset_ii PredRegs:$src1, s8ImmPred:$src3, s8ImmPred:$src2)>;
+
+// Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump.
+def : Pat <(brcond (not PredRegs:$src1), bb:$offset),
+ (JMP_cNot PredRegs:$src1, bb:$offset)>;
+
+// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2).
+def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)),
+ (AND_pnotp PredRegs:$src1, PredRegs:$src2)>;
+
+// Map from store(globaladdress + x) -> memd(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(store DoubleRegs:$src1,
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, DoubleRegs:$src1)>;
+
+// Map from store(globaladdress) -> memd(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(store DoubleRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
+ (STrid_GP tglobaladdr:$global, 0, DoubleRegs:$src1)>;
+
+// Map from store(globaladdress + x) -> memw(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(store IntRegs:$src1, (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+
+// Map from store(globaladdress) -> memw(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
+ (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>;
+
+// Map from store(globaladdress) -> memw(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)),
+ (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>;
+
+// Map from store(globaladdress + x) -> memh(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(truncstorei16 IntRegs:$src1,
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+
+// Map from store(globaladdress) -> memh(#foo).
+let AddedComplexity = 100 in
+def : Pat <(truncstorei16 IntRegs:$src1,
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STh_GP tglobaladdr:$global, IntRegs:$src1)>;
+
+// Map from store(globaladdress + x) -> memb(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(truncstorei8 IntRegs:$src1,
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>;
+
+// Map from store(globaladdress) -> memb(#foo).
+let AddedComplexity = 100 in
+def : Pat <(truncstorei8 IntRegs:$src1,
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP tglobaladdr:$global, IntRegs:$src1)>;
+
+// Map from load(globaladdress + x) -> memw(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memw(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(load (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDw_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress + x) -> memd(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memw(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (LDd_GP tglobaladdr:$global)>;
+
+
+// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress + 0), Pd = Rd.
+let AddedComplexity = 100 in
+def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (TFR_PdRs (LDrib_GP tglobaladdr:$global, 0))>;
+
+// Map from load(globaladdress + x) -> memh(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memh(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDrih_GP tglobaladdr:$global, 0)>;
+
+// Map from load(globaladdress + x) -> memuh(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memuh(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDriuh_GP tglobaladdr:$global, 0)>;
+
+// Map from load(globaladdress + x) -> memuh(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memuh(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDriuh_GP tglobaladdr:$global, 0)>;
+// Map from load(globaladdress + x) -> memub(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memuh(#foo + 0).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDriub_GP tglobaladdr:$global, 0)>;
+
+// Map from load(globaladdress + x) -> memb(#foo + x).
+let AddedComplexity = 100 in
+def : Pat <(sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset)>;
+
+// Map from load(globaladdress) -> memb(#foo).
+let AddedComplexity = 100 in
+def : Pat <(extloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDb_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress) -> memb(#foo).
+let AddedComplexity = 100 in
+def : Pat <(sextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDb_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress) -> memub(#foo).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDub_GP tglobaladdr:$global)>;
+
+// When the Interprocedural Global Variable optimizer realizes that a
+// certain global variable takes only two constant values, it shrinks the
+// global to a boolean. Catch those loads here in the following 3 patterns.
+let AddedComplexity = 100 in
+def : Pat <(extloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDb_GP tglobaladdr:$global)>;
+
+let AddedComplexity = 100 in
+def : Pat <(sextloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDb_GP tglobaladdr:$global)>;
+
+let AddedComplexity = 100 in
+def : Pat <(zextloadi1 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDub_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress) -> memh(#foo).
+let AddedComplexity = 100 in
+def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDh_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress) -> memh(#foo).
+let AddedComplexity = 100 in
+def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDh_GP tglobaladdr:$global)>;
+
+// Map from load(globaladdress) -> memuh(#foo).
+let AddedComplexity = 100 in
+def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (LDuh_GP tglobaladdr:$global)>;
+
+// Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned.
+def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)),
+ (AND_rr (LDrib ADDRriS11_0:$addr), (TFRI 0x1))>;
+
+// Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo).
+def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i32)),
+ (i64 (SXTW (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg)))>;
+
+// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)).
+def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i16)),
+ (i64 (SXTW (SXTH (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>;
+
+// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)).
+def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i8)),
+ (i64 (SXTW (SXTB (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>;
+
+// We want to prevent emiting pnot's as much as possible.
+// Map brcond with an unsupported setcc to a JMP_cNot.
+def : Pat <(brcond (i1 (setne IntRegs:$src1, IntRegs:$src2)), bb:$offset),
+ (JMP_cNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+
+def : Pat <(brcond (i1 (setne IntRegs:$src1, s10ImmPred:$src2)), bb:$offset),
+ (JMP_cNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>;
+
+def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 -1))), bb:$offset),
+ (JMP_cNot PredRegs:$src1, bb:$offset)>;
+
+def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 0))), bb:$offset),
+ (JMP_c PredRegs:$src1, bb:$offset)>;
+
+def : Pat <(brcond (i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), bb:$offset),
+ (JMP_cNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>;
+
+def : Pat <(brcond (i1 (setlt IntRegs:$src1, IntRegs:$src2)), bb:$offset),
+ (JMP_c (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+
+def : Pat <(brcond (i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
+ bb:$offset),
+ (JMP_cNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1),
+ bb:$offset)>;
+
+def : Pat <(brcond (i1 (setule IntRegs:$src1, IntRegs:$src2)), bb:$offset),
+ (JMP_cNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>;
+
+def : Pat <(brcond (i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
+ bb:$offset),
+ (JMP_cNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2),
+ bb:$offset)>;
+
+// Map from a 64-bit select to an emulated 64-bit mux.
+// Hexagon does not support 64-bit MUXes; so emulate with combines.
+def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
+ (COMBINE_rr
+ (MUX_rr PredRegs:$src1,
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src3, subreg_hireg)),
+ (MUX_rr PredRegs:$src1,
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src3, subreg_loreg)))>;
+
+// Map from a 1-bit select to logical ops.
+// From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3).
+def : Pat <(select PredRegs:$src1, PredRegs:$src2, PredRegs:$src3),
+ (OR_pp (AND_pp PredRegs:$src1, PredRegs:$src2),
+ (AND_pp (NOT_p PredRegs:$src1), PredRegs:$src3))>;
+
+// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs.
+def : Pat<(i1 (load ADDRriS11_2:$addr)),
+ (i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>;
+
+// Map for truncating from 64 immediates to 32 bit immediates.
+def : Pat<(i32 (trunc DoubleRegs:$src)),
+ (i32 (EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))>;
+
+// Map for truncating from i64 immediates to i1 bit immediates.
+def : Pat<(i1 (trunc DoubleRegs:$src)),
+ (i1 (TFR_PdRs (i32(EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))))>;
+
+// Map memb(Rs) = Rdd -> memb(Rs) = Rt.
+def : Pat<(truncstorei8 DoubleRegs:$src, ADDRriS11_0:$addr),
+ (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+ subreg_loreg)))>;
+
+// Map memh(Rs) = Rdd -> memh(Rs) = Rt.
+def : Pat<(truncstorei16 DoubleRegs:$src, ADDRriS11_0:$addr),
+ (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+ subreg_loreg)))>;
+
+// Map memw(Rs) = Rdd -> memw(Rs) = Rt.
+def : Pat<(truncstorei32 DoubleRegs:$src, ADDRriS11_0:$addr),
+ (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src,
+ subreg_loreg)))>;
+
+// Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0.
+def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
+ (STrib ADDRriS11_2:$addr, (TFRI 1))>;
+
+let AddedComplexity = 100 in
+// Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1;
+// memw(#foo) = r0
+def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP tglobaladdr:$global, (TFRI 1))>;
+
+
+// Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0.
+def : Pat<(store (i1 -1), ADDRriS11_2:$addr),
+ (STrib ADDRriS11_2:$addr, (TFRI 1))>;
+
+// Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt.
+def : Pat<(store PredRegs:$src1, ADDRriS11_2:$addr),
+ (STrib ADDRriS11_2:$addr, (i32 (MUX_ii PredRegs:$src1, 1, 0)) )>;
+
+// Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs).
+// Hexagon_TODO: We can probably use combine but that will cost 2 instructions.
+// Better way to do this?
+def : Pat<(i64 (anyext IntRegs:$src1)),
+ (i64 (SXTW IntRegs:$src1))>;
+
+// Map cmple -> cmpgt.
+// rs <= rt -> !(rs > rt).
+def : Pat<(i1 (setle IntRegs:$src1, s10ImmPred:$src2)),
+ (i1 (NOT_p (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>;
+
+// rs <= rt -> !(rs > rt).
+def : Pat<(i1 (setle IntRegs:$src1, IntRegs:$src2)),
+ (i1 (NOT_p (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>;
+
+// Rss <= Rtt -> !(Rss > Rtt).
+def : Pat<(i1 (setle DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (NOT_p (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
+
+// Map cmpne -> cmpeq.
+// Hexagon_TODO: We should improve on this.
+// rs != rt -> !(rs == rt).
+def : Pat <(i1 (setne IntRegs:$src1, s10ImmPred:$src2)),
+ (i1 (NOT_p(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>;
+
+// Map cmpne(Rs) -> !cmpeqe(Rs).
+// rs != rt -> !(rs == rt).
+def : Pat <(i1 (setne IntRegs:$src1, IntRegs:$src2)),
+ (i1 (NOT_p(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>;
+
+// Convert setne back to xor for hexagon since we compute w/ pred registers.
+def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)),
+ (i1 (XOR_pp PredRegs:$src1, PredRegs:$src2))>;
+
+// Map cmpne(Rss) -> !cmpew(Rss).
+// rs != rt -> !(rs == rt).
+def : Pat <(i1 (setne DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (NOT_p(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>;
+
+// Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt).
+// rs >= rt -> !(rt > rs).
+def : Pat <(i1 (setge IntRegs:$src1, IntRegs:$src2)),
+ (i1 (NOT_p(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>;
+
+def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)),
+ (i1 (CMPGEri IntRegs:$src1, s8ImmPred:$src2))>;
+
+// Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss).
+// rss >= rtt -> !(rtt > rss).
+def : Pat <(i1 (setge DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (NOT_p(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>;
+
+// Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm).
+// rs < rt -> !(rs >= rt).
+def : Pat <(i1 (setlt IntRegs:$src1, s8ImmPred:$src2)),
+ (i1 (NOT_p (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>;
+
+// Map cmplt(Rs, Rt) -> cmplt(Rs, Rt).
+// rs < rt -> rs < rt. Let assembler map it.
+def : Pat <(i1 (setlt IntRegs:$src1, IntRegs:$src2)),
+ (i1 (CMPLTrr IntRegs:$src2, IntRegs:$src1))>;
+
+// Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss).
+// rss < rtt -> (rtt > rss).
+def : Pat <(i1 (setlt DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))>;
+
+// Map from cmpltu(Rs, Rd) -> !cmpgtu(Rs, Rd - 1).
+// rs < rt -> rt > rs.
+def : Pat <(i1 (setult IntRegs:$src1, IntRegs:$src2)),
+ (i1 (CMPGTUrr IntRegs:$src2, IntRegs:$src1))>;
+
+// Map from cmpltu(Rss, Rdd) -> !cmpgtu(Rss, Rdd - 1).
+// rs < rt -> rt > rs.
+def : Pat <(i1 (setult DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1))>;
+
+// Map from Rs >= Rt -> !(Rt > Rs).
+// rs >= rt -> !(rt > rs).
+def : Pat <(i1 (setuge IntRegs:$src1, IntRegs:$src2)),
+ (i1 (NOT_p (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>;
+
+// Map from Rs >= Rt -> !(Rt > Rs).
+// rs >= rt -> !(rt > rs).
+def : Pat <(i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>;
+
+// Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs).
+// Map from (Rs <= Rt) -> !(Rs > Rt).
+def : Pat <(i1 (setule IntRegs:$src1, IntRegs:$src2)),
+ (i1 (NOT_p (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>;
+
+// Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1).
+// Map from (Rs <= Rt) -> !(Rs > Rt).
+def : Pat <(i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)),
+ (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>;
+
+// Sign extends.
+// i1 -> i32
+def : Pat <(i32 (sext PredRegs:$src1)),
+ (i32 (MUX_ii PredRegs:$src1, -1, 0))>;
+
+// Convert sign-extended load back to load and sign extend.
+// i8 -> i64
+def: Pat <(i64 (sextloadi8 ADDRriS11_0:$src1)),
+ (i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
+
+// Convert any-extended load back to load and sign extend.
+// i8 -> i64
+def: Pat <(i64 (extloadi8 ADDRriS11_0:$src1)),
+ (i64 (SXTW (LDrib ADDRriS11_0:$src1)))>;
+
+// Convert sign-extended load back to load and sign extend.
+// i16 -> i64
+def: Pat <(i64 (sextloadi16 ADDRriS11_1:$src1)),
+ (i64 (SXTW (LDrih ADDRriS11_1:$src1)))>;
+
+// Convert sign-extended load back to load and sign extend.
+// i32 -> i64
+def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)),
+ (i64 (SXTW (LDriw ADDRriS11_2:$src1)))>;
+
+
+// Zero extends.
+// i1 -> i32
+def : Pat <(i32 (zext PredRegs:$src1)),
+ (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+
+// i1 -> i64
+def : Pat <(i64 (zext PredRegs:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (MUX_ii PredRegs:$src1, 1, 0)))>;
+
+// i32 -> i64
+def : Pat <(i64 (zext IntRegs:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>;
+
+// i8 -> i64
+def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDriub ADDRriS11_0:$src1)))>;
+
+// i16 -> i64
+def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDriuh ADDRriS11_1:$src1)))>;
+
+// i32 -> i64
+def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>;
+
+def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)),
+ (i32 (LDriw ADDRriS11_0:$src1))>;
+
+// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
+def : Pat <(i32 (zext PredRegs:$src1)),
+ (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+
+// Map from Rs = Pd to Pd = mux(Pd, #1, #0)
+def : Pat <(i32 (anyext PredRegs:$src1)),
+ (i32 (MUX_ii PredRegs:$src1, 1, 0))>;
+
+// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0))
+def : Pat <(i64 (anyext PredRegs:$src1)),
+ (i64 (SXTW (i32 (MUX_ii PredRegs:$src1, 1, 0))))>;
+
+
+// Any extended 64-bit load.
+// anyext i32 -> i64
+def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDriw ADDRriS11_2:$src1)))>;
+
+// anyext i16 -> i64.
+def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>;
+
+// Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs).
+def : Pat<(i64 (zext IntRegs:$src1)),
+ (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>;
+
+// Multiply 64-bit unsigned and use upper result.
+def : Pat <(mulhu DoubleRegs:$src1, DoubleRegs:$src2),
+ (MPYU64_acc(COMBINE_rr (TFRI 0),
+ (EXTRACT_SUBREG
+ (LSRd_ri(MPYU64_acc(MPYU64_acc(COMBINE_rr (TFRI 0),
+ (EXTRACT_SUBREG (LSRd_ri(MPYU64
+ (EXTRACT_SUBREG DoubleRegs:$src1,
+ subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2,
+ subreg_loreg)),
+ 32) ,subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1,
+ subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src2,
+ subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
+ 32),subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)
+ )>;
+
+// Multiply 64-bit signed and use upper result.
+def : Pat <(mulhs DoubleRegs:$src1, DoubleRegs:$src2),
+ (MPY64_acc(COMBINE_rr (TFRI 0),
+ (EXTRACT_SUBREG
+ (LSRd_ri(MPY64_acc(MPY64_acc(COMBINE_rr (TFRI 0),
+ (EXTRACT_SUBREG (LSRd_ri(MPYU64
+ (EXTRACT_SUBREG DoubleRegs:$src1,
+ subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2,
+ subreg_loreg)),
+ 32) ,subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1,
+ subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src2,
+ subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
+ 32),subreg_loreg)),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)
+ )>;
+
+// Hexagon specific ISD nodes.
+def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>;
+def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC",
+ SDTHexagonADJDYNALLOC>;
+// Needed to tag these instructions for stack layout.
+let usesCustomInserter = 1 in
+def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1,
+ s16Imm:$src2),
+ "$dst = add($src1, #$src2)",
+ [(set IntRegs:$dst, (Hexagon_ADJDYNALLOC IntRegs:$src1,
+ s16ImmPred:$src2))]>;
+
+def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, []>;
+def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>;
+def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1),
+ "$dst = $src1",
+ [(set IntRegs:$dst, (Hexagon_ARGEXTEND IntRegs:$src1))]>;
+
+let AddedComplexity = 100 in
+def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND IntRegs:$src1), i16)),
+ (TFR IntRegs:$src1)>;
+
+
+def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>;
+
+let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in
+def BR_JT : JRInst<(outs), (ins IntRegs:$src),
+ "jumpr $src",
+ [(HexagonBR_JT IntRegs:$src)]>;
+def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>;
+
+def : Pat<(HexagonWrapperJT tjumptable:$dst),
+ (CONST32_set_jt tjumptable:$dst)>;
+
+
+//===----------------------------------------------------------------------===//
+// V3 Instructions +
+//===----------------------------------------------------------------------===//
+
+include "HexagonInstrInfoV3.td"
+
+//===----------------------------------------------------------------------===//
+// V3 Instructions -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// V4 Instructions +
+//===----------------------------------------------------------------------===//
+
+include "HexagonInstrInfoV4.td"
+
+//===----------------------------------------------------------------------===//
+// V4 Instructions -
+//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td
new file mode 100644
index 0000000..2bd6770
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV3.td
@@ -0,0 +1,137 @@
+//=- HexagonInstrInfoV3.td - Target Desc. for Hexagon Target -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon V3 instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// J +
+//===----------------------------------------------------------------------===//
+// Call subroutine.
+let isCall = 1, neverHasSideEffects = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
+ P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def CALLv3 : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "call $dst", []>, Requires<[HasV3T]>;
+}
+
+//===----------------------------------------------------------------------===//
+// J -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// JR +
+//===----------------------------------------------------------------------===//
+// Call subroutine from register.
+let isCall = 1, neverHasSideEffects = 1,
+ Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31,
+ P0, P1, P2, P3, LC0, LC1, SA0, SA1] in {
+ def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst, variable_ops),
+ "callr $dst",
+ []>, Requires<[HasV3TOnly]>;
+ }
+
+
+// Jump to address from register
+// if(p?.new) jumpr:t r?
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) jumpr:t $src2",
+ []>, Requires<[HasV3T]>;
+}
+
+// if (!p?.new) jumpr:t r?
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) jumpr:t $src2",
+ []>, Requires<[HasV3T]>;
+}
+
+// Not taken.
+// if(p?.new) jumpr:nt r?
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) jumpr:nt $src2",
+ []>, Requires<[HasV3T]>;
+}
+
+// if (!p?.new) jumpr:nt r?
+let isReturn = 1, isTerminator = 1, isBarrier = 1,
+ Defs = [PC], Uses = [R31] in {
+ def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) jumpr:nt $src2",
+ []>, Requires<[HasV3T]>;
+}
+
+//===----------------------------------------------------------------------===//
+// JR -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ALU64/ALU +
+//===----------------------------------------------------------------------===//
+
+let AddedComplexity = 200 in
+def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = max($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>,
+Requires<[HasV3T]>;
+
+let AddedComplexity = 200 in
+def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ "$dst = min($src2, $src1)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src1))),
+ (i64 DoubleRegs:$src1),
+ (i64 DoubleRegs:$src2))))]>,
+Requires<[HasV3T]>;
+
+//===----------------------------------------------------------------------===//
+// ALU64/ALU -
+//===----------------------------------------------------------------------===//
+
+
+
+
+//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+
+//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+
+//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+
+//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset),
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+
+//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset),
+// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
+
+
+// Map call instruction
+def : Pat<(call (i32 IntRegs:$dst)),
+ (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
+def : Pat<(call tglobaladdr:$dst),
+ (CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
+def : Pat<(call texternalsym:$dst),
+ (CALLv3 texternalsym:$dst)>, Requires<[HasV3T]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
new file mode 100644
index 0000000..f507e4f
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -0,0 +1,5746 @@
+//=- HexagonInstrInfoV4.td - Target Desc. for Hexagon Target -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon V4 instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+def IMMEXT : Immext<(outs), (ins),
+ "##immext //should never emit this",
+ []>,
+ Requires<[HasV4T]>;
+
+// Hexagon V4 Architecture spec defines 8 instruction classes:
+// LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not implemented in the
+// compiler)
+
+// LD Instructions:
+// ========================================
+// Loads (8/16/32/64 bit)
+// Deallocframe
+
+// ST Instructions:
+// ========================================
+// Stores (8/16/32/64 bit)
+// Allocframe
+
+// ALU32 Instructions:
+// ========================================
+// Arithmetic / Logical (32 bit)
+// Vector Halfword
+
+// XTYPE Instructions (32/64 bit):
+// ========================================
+// Arithmetic, Logical, Bit Manipulation
+// Multiply (Integer, Fractional, Complex)
+// Permute / Vector Permute Operations
+// Predicate Operations
+// Shift / Shift with Add/Sub/Logical
+// Vector Byte ALU
+// Vector Halfword (ALU, Shift, Multiply)
+// Vector Word (ALU, Shift)
+
+// J Instructions:
+// ========================================
+// Jump/Call PC-relative
+
+// JR Instructions:
+// ========================================
+// Jump/Call Register
+
+// MEMOP Instructions:
+// ========================================
+// Operation on memory (8/16/32 bit)
+
+// NV Instructions:
+// ========================================
+// New-value Jumps
+// New-value Stores
+
+// CR Instructions:
+// ========================================
+// Control-Register Transfers
+// Hardware Loop Setup
+// Predicate Logicals & Reductions
+
+// SYSTEM Instructions (not implemented in the compiler):
+// ========================================
+// Prefetch
+// Cache Maintenance
+// Bus Operations
+
+
+//===----------------------------------------------------------------------===//
+// ALU32 +
+//===----------------------------------------------------------------------===//
+
+// Shift halfword.
+
+let isPredicated = 1 in
+def ASLH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = aslh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASLH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = aslh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASLH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = aslh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASLH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = aslh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASRH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = asrh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASRH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = asrh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASRH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = asrh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def ASRH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = asrh($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Sign extend.
+
+let isPredicated = 1 in
+def SXTB_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = sxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTB_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = sxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTB_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = sxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTB_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = sxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicated = 1 in
+def SXTH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = sxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = sxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = sxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicated = 1 in
+def SXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = sxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Zero exten.
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTB_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = zxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTB_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = zxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTB_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = zxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTB_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = zxtb($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTH_cPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1) $dst = zxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTH_cNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1) $dst = zxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTH_cdnPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if ($src1.new) $dst = zxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+let neverHasSideEffects = 1, isPredicated = 1 in
+def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2),
+ "if (!$src1.new) $dst = zxth($src2)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Generate frame index addresses.
+let neverHasSideEffects = 1, isReMaterializable = 1 in
+def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s32Imm:$offset),
+ "$dst = add($src1, ##$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+//===----------------------------------------------------------------------===//
+// ALU32 -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// ALU32/PERM +
+//===----------------------------------------------------------------------===//
+
+// Combine
+// Rdd=combine(Rs, #s8)
+let neverHasSideEffects = 1 in
+def COMBINE_ri_V4 : ALU32_ri<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = combine($src1, #$src2)",
+ []>,
+ Requires<[HasV4T]>;
+// Rdd=combine(#s8, Rs)
+let neverHasSideEffects = 1 in
+def COMBINE_ir_V4 : ALU32_ir<(outs DoubleRegs:$dst),
+ (ins s8Imm:$src1, IntRegs:$src2),
+ "$dst = combine(#$src1, $src2)",
+ []>,
+ Requires<[HasV4T]>;
+//===----------------------------------------------------------------------===//
+// ALU32/PERM +
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// LD +
+//===----------------------------------------------------------------------===//
+//
+// These absolute set addressing mode instructions accept immediate as
+// an operand. We have duplicated these patterns to take global address.
+
+let neverHasSideEffects = 1 in
+def LDrid_abs_setimm_V4 : LDInst<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memd($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memb(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrib_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memb($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDrih_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memh($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memub(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriub_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memub($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memuh(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriuh_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memuh($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Re=#U6)
+let neverHasSideEffects = 1 in
+def LDriw_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins u6Imm:$addr),
+ "$dst1 = memw($dst2=#$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Following patterns are defined for absolute set addressing mode
+// instruction which take global address as operand.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_abs_set_V4 : LDInst<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memd($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memb(Re=#U6)
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memb($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memh(Re=#U6)
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memh($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memub(Re=#U6)
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memub($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memuh(Re=#U6)
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memuh($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Re=#U6)
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins globaladdress:$addr),
+ "$dst1 = memw($dst2=##$addr)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Load doubleword.
+//
+// Make sure that in post increment load, the first operand is always the post
+// increment operand.
+//
+// Rdd=memd(Rs+Rt<<#u2)
+// Special case pattern for indexed load without offset which is easier to
+// match. AddedComplexity of this pattern should be lower than base+offset load
+// and lower yet than the more generic version with offset/shift below
+// Similar approach is taken for all other base+index loads.
+let AddedComplexity = 10, isPredicable = 1 in
+def LDrid_indexed_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memd($src1+$src2<<#0)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (load (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDrid_indexed_shl_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memd($src1+$src2<<#$offset)",
+ [(set (i64 DoubleRegs:$dst),
+ (i64 (load (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+//// Load doubleword conditionally.
+// if ([!]Pv[.new]) Rd=memd(Rs+Rt<<#u2)
+// if (Pv) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memd($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memd($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memd($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrid_indexed_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memd($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memd($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memd($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memd($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memd(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memd($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rdd=memd(Rt<<#u2+#U6)
+
+//// Load byte.
+// Rd=memb(Rs+Rt<<#u2)
+let AddedComplexity = 10, isPredicable = 1 in
+def LDrib_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memb($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def LDriub_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memub($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def LDriub_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memub($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi8 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDrib_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memb($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDriub_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memub($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDriub_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memub($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi8 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+//// Load byte conditionally.
+// if ([!]Pv[.new]) Rd=memb(Rs+Rt<<#u2)
+// if (Pv) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memb($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memb($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memb($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrib_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memb($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memb($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memb($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memb($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memb(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrib_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memb($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+//// Load unsigned byte conditionally.
+// if ([!]Pv[.new]) Rd=memub(Rs+Rt<<#u2)
+// if (Pv) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memub($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memub($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memub($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriub_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memub($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memub($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memub($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memub($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memub(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memub($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memb(Rt<<#u2+#U6)
+
+//// Load halfword
+// Rd=memh(Rs+Rt<<#u2)
+let AddedComplexity = 10, isPredicable = 1 in
+def LDrih_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memh($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def LDriuh_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memuh($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10, isPredicable = 1 in
+def LDriuh_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memuh($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi16 (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+// Rd=memh(Rs+Rt<<#u2)
+let AddedComplexity = 40, isPredicable = 1 in
+def LDrih_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memh($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (sextloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDriuh_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memuh($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (zextloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 40, isPredicable = 1 in
+def LDriuh_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memuh($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (extloadi16 (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+//// Load halfword conditionally.
+// if ([!]Pv[.new]) Rd=memh(Rs+Rt<<#u2)
+// if (Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDrih_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDrih_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+//// Load unsigned halfword conditionally.
+// if ([!]Pv[.new]) Rd=memuh(Rs+Rt<<#u2)
+// if (Pv) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memuh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memuh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memuh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriuh_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memuh($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memuh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memuh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memuh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memuh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memuh($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memh(Rt<<#u2+#U6)
+
+//// Load word.
+// Load predicate: Fix for bug 5279.
+let mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_pred_V4 : LDInst<(outs PredRegs:$dst),
+ (ins MEMri:$addr),
+ "Error; should not emit",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Re=#U6)
+
+// Rd=memw(Rs+Rt<<#u2)
+let AddedComplexity = 10, isPredicable = 1 in
+def LDriw_indexed_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst=memw($src1+$src2<<#0)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (load (add (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)))))]>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Rs+Rt<<#u2)
+let AddedComplexity = 40, isPredicable = 1 in
+def LDriw_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset),
+ "$dst=memw($src1+$src2<<#$offset)",
+ [(set (i32 IntRegs:$dst),
+ (i32 (load (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$offset)))))]>,
+ Requires<[HasV4T]>;
+
+//// Load word conditionally.
+// if ([!]Pv[.new]) Rd=memw(Rs+Rt<<#u2)
+// if (Pv) Rd=memw(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1) $dst=memw($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if ($src1.new) $dst=memw($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1) $dst=memw($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in
+def LDriw_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "if (!$src1.new) $dst=memw($src2+$src3<<#0)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1) $dst=memw($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if ($src1.new) $dst=memw($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1) $dst=memw($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) Rd=memh(Rs+Rt<<#u2)
+let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in
+def LDriw_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
+ u2Imm:$offset),
+ "if (!$src1.new) $dst=memw($src2+$src3<<#$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+// Rd=memw(Rt<<#u2+#U6)
+
+
+// Post-inc Load, Predicated, Dot new
+
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cdnPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
+ "if ($src1.new) $dst1 = memd($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrid_cdnNotPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
+ "if (!$src1.new) $dst1 = memd($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if ($src1.new) $dst1 = memb($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrib_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if (!$src1.new) $dst1 = memb($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if ($src1.new) $dst1 = memh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDrih_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if (!$src1.new) $dst1 = memh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if ($src1.new) $dst1 = memub($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriub_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
+ "if (!$src1.new) $dst1 = memub($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if ($src1.new) $dst1 = memuh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriuh_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
+ "if (!$src1.new) $dst1 = memuh($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
+ "if ($src1.new) $dst1 = memw($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
+ (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
+ "if (!$src1.new) $dst1 = memw($src2++#$src3)",
+ [],
+ "$src2 = $dst2">,
+ Requires<[HasV4T]>;
+
+/// Load from global offset
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDrid_GP_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memd(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrid_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memd(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDrib_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memb(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrib_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memb(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDriub_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memub(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memub(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDrih_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memh(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDrih_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDriuh_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memuh(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memuh(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDriw_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global, u16Imm:$offset),
+ "$dst=memw(#$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if ($src1.new) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDriw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset),
+ "if (!$src1.new) $dst=memw(##$global+$offset)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDd_GP_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memd(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rtt=memd(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rtt=memd(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rtt=memd(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rtt=memd(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDd_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memd(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDb_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memb(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memb(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memb(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memb(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memb(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDb_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memb(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDub_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memub(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memub(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memub(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memub(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memub(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memub(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDh_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memh(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDuh_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memuh(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memuh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memuh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memuh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) Rt=memuh(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memuh(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
+def LDw_GP_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$global),
+ "$dst=memw(#$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memw(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memw(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) Rt=memw(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if ($src1.new) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if (!Pv) Rt=memw(##global)
+let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def LDw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$global),
+ "if (!$src1.new) $dst=memw(##$global)",
+ []>,
+ Requires<[HasV4T]>;
+
+
+
+def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i64 (LDd_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDw_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memw(#foo + 0)
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i64 (LDd_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd
+let AddedComplexity = 100 in
+def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>,
+ Requires<[HasV4T]>;
+
+// When the Interprocedural Global Variable optimizer realizes that a certain
+// global variable takes only two constant values, it shrinks the global to
+// a boolean. Catch those loads here in the following 3 patterns.
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDb_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memub(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDub_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memuh(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress) -> memw(#foo)
+let AddedComplexity = 100 in
+def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
+ (i32 (LDw_GP_V4 tglobaladdr:$global))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+def : Pat <(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memd(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memub(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memuh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+
+// Map from load(globaladdress + x) -> memuh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+// Map from load(globaladdress + x) -> memw(#foo + x)
+let AddedComplexity = 100 in
+def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset))),
+ (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>,
+ Requires<[HasV4T]>;
+
+
+//===----------------------------------------------------------------------===//
+// LD -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// ST +
+//===----------------------------------------------------------------------===//
+///
+/// Assumptions::: ****** DO NOT IGNORE ********
+/// 1. Make sure that in post increment store, the zero'th operand is always the
+/// post increment operand.
+/// 2. Make sure that the store value operand(Rt/Rtt) in a store is always the
+/// last operand.
+///
+
+// memd(Re=#U6)=Rtt
+def STrid_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
+ (ins DoubleRegs:$src1, u6Imm:$src2),
+ "memd($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Re=#U6)=Rs
+def STrib_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memb($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Re=#U6)=Rs
+def STrih_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memh($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Re=#U6)=Rs
+def STriw_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, u6Imm:$src2),
+ "memw($dst1=#$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memd(Re=#U6)=Rtt
+def STrid_abs_set_V4 : STInst<(outs IntRegs:$dst1),
+ (ins DoubleRegs:$src1, globaladdress:$src2),
+ "memd($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Re=#U6)=Rs
+def STrib_abs_set_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memb($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Re=#U6)=Rs
+def STrih_abs_set_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memh($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Re=#U6)=Rs
+def STriw_abs_set_V4 : STInst<(outs IntRegs:$dst1),
+ (ins IntRegs:$src1, globaladdress:$src2),
+ "memw($dst1=##$src2) = $src1",
+ []>,
+ Requires<[HasV4T]>;
+
+// memd(Rs+Ru<<#u2)=Rtt
+let AddedComplexity = 10, isPredicable = 1 in
+def STrid_indexed_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, DoubleRegs:$src4),
+ "memd($src1+$src2<<#$src3) = $src4",
+ [(store (i64 DoubleRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2), u2ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// memd(Ru<<#u2+#U6)=Rtt
+let AddedComplexity = 10 in
+def STrid_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, DoubleRegs:$src4),
+ "memd($src1<<#$src2+#$src3) = $src4",
+ [(store (i64 DoubleRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
+ Requires<[HasV4T]>;
+
+// memd(Rx++#s4:3)=Rtt
+// memd(Rx++#s4:3:circ(Mu))=Rtt
+// memd(Rx++I:circ(Mu))=Rtt
+// memd(Rx++Mu)=Rtt
+// memd(Rx++Mu:brev)=Rtt
+// memd(gp+#u16:3)=Rtt
+
+// Store doubleword conditionally.
+// if ([!]Pv[.new]) memd(#u6)=Rtt
+// TODO: needs to be implemented.
+
+// if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt
+// if (Pv) memd(Rs+#u6:3)=Rtt
+// if (Pv.new) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
+ "if ($src1.new) memd($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(Rs+#u6:3)=Rtt
+// if (!Pv.new) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
+ "if (!$src1.new) memd($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memd(Rs+#u6:3)=Rtt
+// if (Pv.new) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
+ DoubleRegs:$src4),
+ "if ($src1.new) memd($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(Rs+#u6:3)=Rtt
+// if (!Pv.new) memd(Rs+#u6:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
+ DoubleRegs:$src4),
+ "if (!$src1.new) memd($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt
+// if (Pv) memd(Rs+Ru<<#u2)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ DoubleRegs:$src5),
+ "if ($src1) memd($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memd(Rs+Ru<<#u2)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ DoubleRegs:$src5),
+ "if ($src1.new) memd($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+// if (!Pv) memd(Rs+Ru<<#u2)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ DoubleRegs:$src5),
+ "if (!$src1) memd($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+// if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ DoubleRegs:$src5),
+ "if (!$src1.new) memd($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt
+// if (Pv) memd(Rx++#s4:3)=Rtt
+// if (Pv.new) memd(Rx++#s4:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
+ s4_3Imm:$offset),
+ "if ($src1.new) memd($src3++#$offset) = $src2",
+ [],
+ "$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(Rx++#s4:3)=Rtt
+// if (!Pv.new) memd(Rx++#s4:3)=Rtt
+let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
+ s4_3Imm:$offset),
+ "if (!$src1.new) memd($src3++#$offset) = $src2",
+ [],
+ "$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Store byte.
+// memb(Rs+#u6:0)=#S8
+let AddedComplexity = 10, isPredicable = 1 in
+def STrib_imm_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3),
+ "memb($src1+#$src2) = #$src3",
+ [(truncstorei8 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
+ u6_0ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// memb(Rs+Ru<<#u2)=Rt
+let AddedComplexity = 10, isPredicable = 1 in
+def STrib_indexed_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memb($src1+$src2<<#$src3) = $src4",
+ [(truncstorei8 (i32 IntRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// memb(Ru<<#u2+#U6)=Rt
+let AddedComplexity = 10 in
+def STrib_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memb($src1<<#$src2+#$src3) = $src4",
+ [(truncstorei8 (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
+ Requires<[HasV4T]>;
+
+// memb(Rx++#s4:0:circ(Mu))=Rt
+// memb(Rx++I:circ(Mu))=Rt
+// memb(Rx++Mu)=Rt
+// memb(Rx++Mu:brev)=Rt
+// memb(gp+#u16:0)=Rt
+
+
+// Store byte conditionally.
+// if ([!]Pv[.new]) memb(#u6)=Rt
+// if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6
+// if (Pv) memb(Rs+#u6:0)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
+ "if ($src1) memb($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rs+#u6:0)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
+ "if ($src1.new) memb($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+#u6:0)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
+ "if (!$src1) memb($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+#u6:0)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_imm_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4),
+ "if (!$src1.new) memb($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt
+// if (Pv) memb(Rs+#u6:0)=Rt
+// if (Pv.new) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memb($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+#u6:0)=Rt
+// if (!Pv.new) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memb($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(Rs+#u6:0)=Rt
+// if (!Pv) memb(Rs+#u6:0)=Rt
+// if (Pv.new) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memb($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+#u6:0)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memb($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Rt
+// if (Pv) memb(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memb($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memb($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memb($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memb($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memb(Rx++#s4:0)=Rt
+// if (Pv) memb(Rx++#s4:0)=Rt
+// if (Pv.new) memb(Rx++#s4:0)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if ($src1.new) memb($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rx++#s4:0)=Rt
+// if (!Pv.new) memb(Rx++#s4:0)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if (!$src1.new) memb($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Store halfword.
+// TODO: needs to be implemented
+// memh(Re=#U6)=Rt.H
+// memh(Rs+#s11:1)=Rt.H
+// memh(Rs+#u6:1)=#S8
+let AddedComplexity = 10, isPredicable = 1 in
+def STrih_imm_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_1Imm:$src2, s8Imm:$src3),
+ "memh($src1+#$src2) = #$src3",
+ [(truncstorei16 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
+ u6_1ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// memh(Rs+Ru<<#u2)=Rt.H
+// TODO: needs to be implemented.
+
+// memh(Rs+Ru<<#u2)=Rt
+let AddedComplexity = 10, isPredicable = 1 in
+def STrih_indexed_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memh($src1+$src2<<#$src3) = $src4",
+ [(truncstorei16 (i32 IntRegs:$src4),
+ (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// memh(Ru<<#u2+#U6)=Rt.H
+// memh(Ru<<#u2+#U6)=Rt
+let AddedComplexity = 10 in
+def STrih_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memh($src1<<#$src2+#$src3) = $src4",
+ [(truncstorei16 (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
+ Requires<[HasV4T]>;
+
+// memh(Rx++#s4:1:circ(Mu))=Rt.H
+// memh(Rx++#s4:1:circ(Mu))=Rt
+// memh(Rx++I:circ(Mu))=Rt.H
+// memh(Rx++I:circ(Mu))=Rt
+// memh(Rx++Mu)=Rt.H
+// memh(Rx++Mu)=Rt
+// memh(Rx++Mu:brev)=Rt.H
+// memh(Rx++Mu:brev)=Rt
+// memh(gp+#u16:1)=Rt
+// if ([!]Pv[.new]) memh(#u6)=Rt.H
+// if ([!]Pv[.new]) memh(#u6)=Rt
+
+// if ([!]Pv[.new]) memh(Rs+#u6:1)=#S6
+// if (Pv) memh(Rs+#u6:1)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
+ "if ($src1) memh($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+#u6:1)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
+ "if ($src1.new) memh($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+#u6:1)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
+ "if (!$src1) memh($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+#u6:1)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_imm_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4),
+ "if (!$src1.new) memh($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt.H
+// TODO: needs to be implemented.
+
+// if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt
+// if (Pv) memh(Rs+#u6:1)=Rt
+// if (Pv.new) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memh($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+#u6:1)=Rt
+// if (!Pv.new) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memh($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memh($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+#u6:1)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memh($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt.H
+// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt
+// if (Pv) memh(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memh($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memh($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memh($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memh($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt.H
+// TODO: Needs to be implemented.
+
+// if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt
+// if (Pv) memh(Rx++#s4:1)=Rt
+// if (Pv.new) memh(Rx++#s4:1)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if ($src1.new) memh($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rx++#s4:1)=Rt
+// if (!Pv.new) memh(Rx++#s4:1)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if (!$src1.new) memh($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Store word.
+// memw(Re=#U6)=Rt
+// TODO: Needs to be implemented.
+
+// Store predicate:
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_pred_V4 : STInst<(outs),
+ (ins MEMri:$addr, PredRegs:$src1),
+ "Error; should not emit",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// memw(Rs+#u6:2)=#S8
+let AddedComplexity = 10, isPredicable = 1 in
+def STriw_imm_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_2Imm:$src2, s8Imm:$src3),
+ "memw($src1+#$src2) = #$src3",
+ [(store s8ImmPred:$src3, (add (i32 IntRegs:$src1),
+ u6_2ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// memw(Rs+Ru<<#u2)=Rt
+let AddedComplexity = 10, isPredicable = 1 in
+def STriw_indexed_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memw($src1+$src2<<#$src3) = $src4",
+ [(store (i32 IntRegs:$src4), (add (i32 IntRegs:$src1),
+ (shl (i32 IntRegs:$src2),
+ u2ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// memw(Ru<<#u2+#U6)=Rt
+let AddedComplexity = 10 in
+def STriw_shl_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memw($src1<<#$src2+#$src3) = $src4",
+ [(store (i32 IntRegs:$src4),
+ (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
+ u6ImmPred:$src3))]>,
+ Requires<[HasV4T]>;
+
+// memw(Rx++#s4:2)=Rt
+// memw(Rx++#s4:2:circ(Mu))=Rt
+// memw(Rx++I:circ(Mu))=Rt
+// memw(Rx++Mu)=Rt
+// memw(Rx++Mu:brev)=Rt
+// memw(gp+#u16:2)=Rt
+
+
+// Store word conditionally.
+
+// if ([!]Pv[.new]) memw(Rs+#u6:2)=#S6
+// if (Pv) memw(Rs+#u6:2)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
+ "if ($src1) memw($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rs+#u6:2)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
+ "if ($src1.new) memw($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+#u6:2)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
+ "if (!$src1) memw($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+#u6:2)=#S6
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_imm_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4),
+ "if (!$src1.new) memw($src2+#$src3) = #$src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt
+// if (Pv) memw(Rs+#u6:2)=Rt
+// if (Pv.new) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memw($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+#u6:2)=Rt
+// if (!Pv.new) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memw($addr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(Rs+#u6:2)=Rt
+// if (!Pv) memw(Rs+#u6:2)=Rt
+// if (Pv.new) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memw($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+#u6:2)=Rt
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memw($src2+#$src3) = $src4",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt
+// if (Pv) memw(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memw($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memw($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memw($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+Ru<<#u2)=Rt
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memw($src2+$src3<<#$src4) = $src5",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt
+// if (Pv) memw(Rx++#s4:2)=Rt
+// if (Pv.new) memw(Rx++#s4:2)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if ($src1.new) memw($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rx++#s4:2)=Rt
+// if (!Pv.new) memw(Rx++#s4:2)=Rt
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if (!$src1.new) memw($src3++#$offset) = $src2",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+/// store to global address
+
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STrid_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src),
+ "memd(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if ($src1) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if (!$src1) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if ($src1.new) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrid_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memb(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrib_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memb(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memh(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STrih_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memh(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memw(#$global+$offset) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STriw_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memw(##$global+$offset) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memd(#global)=Rtt
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STd_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, DoubleRegs:$src),
+ "memd(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memd(##global) = Rtt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if ($src1) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(##global) = Rtt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if (!$src1) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memd(##global) = Rtt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if ($src1.new) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memd(##global) = Rtt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STd_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(#global)=Rt
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memb(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STb_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memb(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(#global)=Rt
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memh(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STh_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memh(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(#global)=Rt
+let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_V4 : STInst<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memw(#$global) = $src",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
+def STw_GP_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memw(##$global) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+// 64 bit atomic store
+def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
+ (i64 DoubleRegs:$src1)),
+ (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memd(#foo)
+let AddedComplexity = 100 in
+def : Pat <(store (i64 DoubleRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// 8 bit atomic store
+def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memb(#foo)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1"
+// to "r0 = 1; memw(#foo) = r0"
+let AddedComplexity = 100 in
+def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memh(#foo)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1),
+ (HexagonCONST32_GP tglobaladdr:$global)),
+ (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// 32 bit atomic store
+def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
+ (i32 IntRegs:$src1)),
+ (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress) -> memw(#foo)
+let AddedComplexity = 100 in
+def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)),
+ (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i64 DoubleRegs:$src1)),
+ (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset),
+ (i32 IntRegs:$src1)),
+ (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memd(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(store (i64 DoubleRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
+ (i64 DoubleRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memb(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memh(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+// Map from store(globaladdress + x) -> memw(#foo + x)
+let AddedComplexity = 100 in
+def : Pat<(store (i32 IntRegs:$src1),
+ (add (HexagonCONST32_GP tglobaladdr:$global),
+ u16ImmPred:$offset)),
+ (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>,
+ Requires<[HasV4T]>;
+
+
+
+//===----------------------------------------------------------------------===
+// ST -
+//===----------------------------------------------------------------------===
+
+
+//===----------------------------------------------------------------------===//
+// NV/ST +
+//===----------------------------------------------------------------------===//
+
+// Store new-value byte.
+
+// memb(Re=#U6)=Nt.new
+// memb(Rs+#s11:0)=Nt.new
+let mayStore = 1, isPredicable = 1 in
+def STrib_nv_V4 : NVInst_V4<(outs), (ins MEMri:$addr, IntRegs:$src1),
+ "memb($addr) = $src1.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, isPredicable = 1 in
+def STrib_indexed_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3),
+ "memb($src1+#$src2) = $src3.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10, isPredicable = 1 in
+def STrib_indexed_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memb($src1+$src2<<#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Ru<<#u2+#U6)=Nt.new
+let mayStore = 1, AddedComplexity = 10 in
+def STrib_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memb($src1<<#$src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(Rx++#s4:0)=Nt.new
+let mayStore = 1, hasCtrlDep = 1, isPredicable = 1 in
+def POST_STbri_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s4_0Imm:$offset),
+ "memb($src2++#$offset) = $src1.new",
+ [],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// memb(Rx++#s4:0:circ(Mu))=Nt.new
+// memb(Rx++I:circ(Mu))=Nt.new
+// memb(Rx++Mu)=Nt.new
+// memb(Rx++Mu:brev)=Nt.new
+
+// memb(gp+#u16:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memb(#$global+$offset) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memb(#global)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memb(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// Store new-value byte conditionally.
+// if ([!]Pv[.new]) memb(#u6)=Nt.new
+// if (Pv) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memb($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memb($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memb($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memb($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if ($src1) memb($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memb($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memb($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+#u6:0)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memb($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Nt.new
+// if (Pv) memb(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memb($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memb($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memb($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memb($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memb(Rx++#s4:0)=Nt.new
+// if (Pv) memb(Rx++#s4:0)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if ($src1) memb($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memb(Rx++#s4:0)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if ($src1.new) memb($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(Rx++#s4:0)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if (!$src1) memb($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memb(Rx++#s4:0)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STbri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset),
+ "if (!$src1.new) memb($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Store new-value halfword.
+// memh(Re=#U6)=Nt.new
+// memh(Rs+#s11:1)=Nt.new
+let mayStore = 1, isPredicable = 1 in
+def STrih_nv_V4 : NVInst_V4<(outs), (ins MEMri:$addr, IntRegs:$src1),
+ "memh($addr) = $src1.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, isPredicable = 1 in
+def STrih_indexed_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3),
+ "memh($src1+#$src2) = $src3.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10, isPredicable = 1 in
+def STrih_indexed_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memh($src1+$src2<<#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Ru<<#u2+#U6)=Nt.new
+let mayStore = 1, AddedComplexity = 10 in
+def STrih_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memh($src1<<#$src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(Rx++#s4:1)=Nt.new
+let mayStore = 1, hasCtrlDep = 1, isPredicable = 1 in
+def POST_SThri_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s4_1Imm:$offset),
+ "memh($src2++#$offset) = $src1.new",
+ [],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// memh(Rx++#s4:1:circ(Mu))=Nt.new
+// memh(Rx++I:circ(Mu))=Nt.new
+// memh(Rx++Mu)=Nt.new
+// memh(Rx++Mu:brev)=Nt.new
+
+// memh(gp+#u16:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memh(#$global+$offset) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memh(#global)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memh(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// Store new-value halfword conditionally.
+
+// if ([!]Pv[.new]) memh(#u6)=Nt.new
+
+// if ([!]Pv[.new]) memh(Rs+#u6:1)=Nt.new
+// if (Pv) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memh($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memh($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memh($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memh($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if ($src1) memh($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memh($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memh($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+#u6:1)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memh($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Nt.new
+// if (Pv) memh(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memh($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memh($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memh($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memh($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[]) memh(Rx++#s4:1)=Nt.new
+// if (Pv) memh(Rx++#s4:1)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if ($src1) memh($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memh(Rx++#s4:1)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if ($src1.new) memh($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(Rx++#s4:1)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if (!$src1) memh($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memh(Rx++#s4:1)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_SThri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset),
+ "if (!$src1.new) memh($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Store new-value word.
+
+// memw(Re=#U6)=Nt.new
+// memw(Rs+#s11:2)=Nt.new
+let mayStore = 1, isPredicable = 1 in
+def STriw_nv_V4 : NVInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$src1),
+ "memw($addr) = $src1.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, isPredicable = 1 in
+def STriw_indexed_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3),
+ "memw($src1+#$src2) = $src3.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10, isPredicable = 1 in
+def STriw_indexed_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4),
+ "memw($src1+$src2<<#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Ru<<#u2+#U6)=Nt.new
+let mayStore = 1, AddedComplexity = 10 in
+def STriw_shl_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4),
+ "memw($src1<<#$src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// memw(Rx++#s4:2)=Nt.new
+let mayStore = 1, hasCtrlDep = 1, isPredicable = 1 in
+def POST_STwri_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s4_2Imm:$offset),
+ "memw($src2++#$offset) = $src1.new",
+ [],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// memw(Rx++#s4:2:circ(Mu))=Nt.new
+// memw(Rx++I:circ(Mu))=Nt.new
+// memw(Rx++Mu)=Nt.new
+// memw(Rx++Mu:brev)=Nt.new
+// memw(gp+#u16:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
+ "memw(#$global+$offset) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_nv_V4 : NVInst_V4<(outs),
+ (ins globaladdress:$global, IntRegs:$src),
+ "memw(#$global) = $src.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// Store new-value word conditionally.
+
+// if ([!]Pv[.new]) memw(#u6)=Nt.new
+
+// if ([!]Pv[.new]) memw(Rs+#u6:2)=Nt.new
+// if (Pv) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1) memw($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if ($src1.new) memw($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1) memw($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
+ "if (!$src1.new) memw($addr) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if ($src1) memw($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if ($src1.new) memw($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if (!$src1) memw($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+#u6:2)=Nt.new
+let mayStore = 1, neverHasSideEffects = 1,
+ isPredicated = 1 in
+def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4),
+ "if (!$src1.new) memw($src2+#$src3) = $src4.new",
+ []>,
+ Requires<[HasV4T]>;
+
+
+// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Nt.new
+// if (Pv) memw(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1) memw($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if ($src1.new) memw($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1) memw($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new
+let mayStore = 1, AddedComplexity = 10,
+ isPredicated = 1 in
+def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4,
+ IntRegs:$src5),
+ "if (!$src1.new) memw($src2+$src3<<#$src4) = $src5.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new
+// if (Pv) memw(Rx++#s4:2)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if ($src1) memw($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (Pv.new) memw(Rx++#s4:2)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if ($src1.new) memw($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(Rx++#s4:2)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if (!$src1) memw($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+// if (!Pv.new) memw(Rx++#s4:2)=Nt.new
+let mayStore = 1, hasCtrlDep = 1,
+ isPredicated = 1 in
+def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset),
+ "if (!$src1.new) memw($src3++#$offset) = $src2.new",
+ [],"$src3 = $dst">,
+ Requires<[HasV4T]>;
+
+
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memb(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memb(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memh(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memh(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if ($src1.new) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+// if (!Pv) memw(##global) = Rt
+let mayStore = 1, neverHasSideEffects = 1 in
+def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2),
+ "if (!$src1.new) memw(##$global) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrib_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memb(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STrih_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memh(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if ($src1.new) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+let mayStore = 1, neverHasSideEffects = 1 in
+def STriw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
+ (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset,
+ IntRegs:$src2),
+ "if (!$src1.new) memw(##$global+$offset) = $src2.new",
+ []>,
+ Requires<[HasV4T]>;
+
+//===----------------------------------------------------------------------===//
+// NV/ST -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// NV/J +
+//===----------------------------------------------------------------------===//
+
+multiclass NVJ_type_basic_reg<string NotStr, string OpcStr, string TakenStr> {
+ def _ie_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, $src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, $src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr, string TakenStr> {
+ def _ie_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1, $src2.new)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1, $src2.new)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+multiclass NVJ_type_basic_imm<string NotStr, string OpcStr, string TakenStr> {
+ def _ie_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+multiclass NVJ_type_basic_neg<string NotStr, string OpcStr, string TakenStr> {
+ def _ie_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, nOneImm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, nOneImm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr, string TakenStr> {
+ def _ie_nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _nv_V4 : NVInst_V4<(outs),
+ (ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset),
+ !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr,
+ !strconcat("($src1.new, #$src2)) jump:",
+ !strconcat(TakenStr, " $offset"))))),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Multiclass for regular dot new of Ist operand register.
+multiclass NVJ_type_br_pred_reg<string NotStr, string OpcStr> {
+ defm Pt : NVJ_type_basic_reg<NotStr, OpcStr, "t">;
+ defm Pnt : NVJ_type_basic_reg<NotStr, OpcStr, "nt">;
+}
+
+// Multiclass for dot new of 2nd operand register.
+multiclass NVJ_type_br_pred_2ndDotNew<string NotStr, string OpcStr> {
+ defm Pt : NVJ_type_basic_2ndDotNew<NotStr, OpcStr, "t">;
+ defm Pnt : NVJ_type_basic_2ndDotNew<NotStr, OpcStr, "nt">;
+}
+
+// Multiclass for 2nd operand immediate, including -1.
+multiclass NVJ_type_br_pred_imm<string NotStr, string OpcStr> {
+ defm Pt : NVJ_type_basic_imm<NotStr, OpcStr, "t">;
+ defm Pnt : NVJ_type_basic_imm<NotStr, OpcStr, "nt">;
+ defm Ptneg : NVJ_type_basic_neg<NotStr, OpcStr, "t">;
+ defm Pntneg : NVJ_type_basic_neg<NotStr, OpcStr, "nt">;
+}
+
+// Multiclass for 2nd operand immediate, excluding -1.
+multiclass NVJ_type_br_pred_imm_only<string NotStr, string OpcStr> {
+ defm Pt : NVJ_type_basic_imm<NotStr, OpcStr, "t">;
+ defm Pnt : NVJ_type_basic_imm<NotStr, OpcStr, "nt">;
+}
+
+// Multiclass for tstbit, where 2nd operand is always #0.
+multiclass NVJ_type_br_pred_tstbit<string NotStr, string OpcStr> {
+ defm Pt : NVJ_type_basic_tstbit<NotStr, OpcStr, "t">;
+ defm Pnt : NVJ_type_basic_tstbit<NotStr, OpcStr, "nt">;
+}
+
+// Multiclass for GT.
+multiclass NVJ_type_rr_ri<string OpcStr> {
+ defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
+ defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
+ defm rrdnNot : NVJ_type_br_pred_2ndDotNew<"!", OpcStr>;
+ defm rrdn : NVJ_type_br_pred_2ndDotNew<"", OpcStr>;
+ defm riNot : NVJ_type_br_pred_imm<"!", OpcStr>;
+ defm ri : NVJ_type_br_pred_imm<"", OpcStr>;
+}
+
+// Multiclass for EQ.
+multiclass NVJ_type_rr_ri_no_2ndDotNew<string OpcStr> {
+ defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
+ defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
+ defm riNot : NVJ_type_br_pred_imm<"!", OpcStr>;
+ defm ri : NVJ_type_br_pred_imm<"", OpcStr>;
+}
+
+// Multiclass for GTU.
+multiclass NVJ_type_rr_ri_no_nOne<string OpcStr> {
+ defm rrNot : NVJ_type_br_pred_reg<"!", OpcStr>;
+ defm rr : NVJ_type_br_pred_reg<"", OpcStr>;
+ defm rrdnNot : NVJ_type_br_pred_2ndDotNew<"!", OpcStr>;
+ defm rrdn : NVJ_type_br_pred_2ndDotNew<"", OpcStr>;
+ defm riNot : NVJ_type_br_pred_imm_only<"!", OpcStr>;
+ defm ri : NVJ_type_br_pred_imm_only<"", OpcStr>;
+}
+
+// Multiclass for tstbit.
+multiclass NVJ_type_r0<string OpcStr> {
+ defm r0Not : NVJ_type_br_pred_tstbit<"!", OpcStr>;
+ defm r0 : NVJ_type_br_pred_tstbit<"", OpcStr>;
+ }
+
+// Base Multiclass for New Value Jump.
+multiclass NVJ_type {
+ defm GT : NVJ_type_rr_ri<"cmp.gt">;
+ defm EQ : NVJ_type_rr_ri_no_2ndDotNew<"cmp.eq">;
+ defm GTU : NVJ_type_rr_ri_no_nOne<"cmp.gtu">;
+ defm TSTBIT : NVJ_type_r0<"tstbit">;
+}
+
+let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC] in {
+ defm JMP_ : NVJ_type;
+}
+
+//===----------------------------------------------------------------------===//
+// NV/J -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// XTYPE/ALU +
+//===----------------------------------------------------------------------===//
+
+// Add and accumulate.
+// Rd=add(Rs,add(Ru,#s6))
+def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3),
+ "$dst = add($src1, add($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
+ s6ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// Rd=add(Rs,sub(#s6,Ru))
+def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
+ "$dst = add($src1, sub(#$src2, $src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (sub s6ImmPred:$src2,
+ (i32 IntRegs:$src3))))]>,
+ Requires<[HasV4T]>;
+
+// Generates the same instruction as ADDr_SUBri_V4 but matches different
+// pattern.
+// Rd=add(Rs,sub(#s6,Ru))
+def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
+ "$dst = add($src1, sub(#$src2, $src3))",
+ [(set (i32 IntRegs:$dst),
+ (sub (add (i32 IntRegs:$src1), s6ImmPred:$src2),
+ (i32 IntRegs:$src3)))]>,
+ Requires<[HasV4T]>;
+
+
+// Add or subtract doublewords with carry.
+//TODO:
+// Rdd=add(Rss,Rtt,Px):carry
+//TODO:
+// Rdd=sub(Rss,Rtt,Px):carry
+
+
+// Logical doublewords.
+// Rdd=and(Rtt,~Rss)
+def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ "$dst = and($src1, ~$src2)",
+ [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
+ (not (i64 DoubleRegs:$src2))))]>,
+ Requires<[HasV4T]>;
+
+// Rdd=or(Rtt,~Rss)
+def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ "$dst = or($src1, ~$src2)",
+ [(set (i64 DoubleRegs:$dst),
+ (or (i64 DoubleRegs:$src1), (not (i64 DoubleRegs:$src2))))]>,
+ Requires<[HasV4T]>;
+
+
+// Logical-logical doublewords.
+// Rxx^=xor(Rss,Rtt)
+def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
+ "$dst ^= xor($src2, $src3)",
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2),
+ (i64 DoubleRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Logical-logical words.
+// Rx=or(Ru,and(Rx,#s10))
+def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
+ "$dst = or($src1, and($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx[&|^]=and(Rs,Rt)
+// Rx&=and(Rs,Rt)
+def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst &= and($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=and(Rs,Rt)
+def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst |= and($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx^=and(Rs,Rt)
+def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst ^= and($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx[&|^]=and(Rs,~Rt)
+// Rx&=and(Rs,~Rt)
+def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst &= and($src2, ~$src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=and(Rs,~Rt)
+def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst |= and($src2, ~$src3)",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx^=and(Rs,~Rt)
+def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst ^= and($src2, ~$src3)",
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ (not (i32 IntRegs:$src3)))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx[&|^]=or(Rs,Rt)
+// Rx&=or(Rs,Rt)
+def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst &= or($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=or(Rs,Rt)
+def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst |= or($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx^=or(Rs,Rt)
+def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst ^= or($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx[&|^]=xor(Rs,Rt)
+// Rx&=xor(Rs,Rt)
+def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst &= xor($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=xor(Rs,Rt)
+def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst |= xor($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx^=xor(Rs,Rt)
+def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
+ "$dst ^= xor($src2, $src3)",
+ [(set (i32 IntRegs:$dst),
+ (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=and(Rs,#s10)
+def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
+ "$dst |= and($src2, #$src3)",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx|=or(Rs,#s10)
+def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
+ "$dst |= or($src2, #$src3)",
+ [(set (i32 IntRegs:$dst),
+ (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
+ s10ImmPred:$src3)))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Modulo wrap
+// Rd=modwrap(Rs,Rt)
+// Round
+// Rd=cround(Rs,#u5)
+// Rd=cround(Rs,Rt)
+// Rd=round(Rs,#u5)[:sat]
+// Rd=round(Rs,Rt)[:sat]
+// Vector reduce add unsigned halfwords
+// Rd=vraddh(Rss,Rtt)
+// Vector add bytes
+// Rdd=vaddb(Rss,Rtt)
+// Vector conditional negate
+// Rdd=vcnegh(Rss,Rt)
+// Rxx+=vrcnegh(Rss,Rt)
+// Vector maximum bytes
+// Rdd=vmaxb(Rtt,Rss)
+// Vector reduce maximum halfwords
+// Rxx=vrmaxh(Rss,Ru)
+// Rxx=vrmaxuh(Rss,Ru)
+// Vector reduce maximum words
+// Rxx=vrmaxuw(Rss,Ru)
+// Rxx=vrmaxw(Rss,Ru)
+// Vector minimum bytes
+// Rdd=vminb(Rtt,Rss)
+// Vector reduce minimum halfwords
+// Rxx=vrminh(Rss,Ru)
+// Rxx=vrminuh(Rss,Ru)
+// Vector reduce minimum words
+// Rxx=vrminuw(Rss,Ru)
+// Rxx=vrminw(Rss,Ru)
+// Vector subtract bytes
+// Rdd=vsubb(Rss,Rtt)
+
+//===----------------------------------------------------------------------===//
+// XTYPE/ALU -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// XTYPE/MPY +
+//===----------------------------------------------------------------------===//
+
+// Multiply and user lower result.
+// Rd=add(#u6,mpyi(Rs,#U6))
+def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst),
+ (ins u6Imm:$src1, IntRegs:$src2, u6Imm:$src3),
+ "$dst = add(#$src1, mpyi($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3),
+ u6ImmPred:$src1))]>,
+ Requires<[HasV4T]>;
+
+// Rd=add(#u6,mpyi(Rs,Rt))
+
+def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst),
+ (ins u6Imm:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst = add(#$src1, mpyi($src2, $src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
+ u6ImmPred:$src1))]>,
+ Requires<[HasV4T]>;
+
+// Rd=add(Ru,mpyi(#u6:2,Rs))
+def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ "$dst = add($src1, mpyi(#$src2, $src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3),
+ u6_2ImmPred:$src2)))]>,
+ Requires<[HasV4T]>;
+
+// Rd=add(Ru,mpyi(Rs,#u6))
+def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, u6Imm:$src3),
+ "$dst = add($src1, mpyi($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
+ u6ImmPred:$src3)))]>,
+ Requires<[HasV4T]>;
+
+// Rx=add(Ru,mpyi(Rx,Rs))
+def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
+ "$dst = add($src1, mpyi($src2, $src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+
+// Polynomial multiply words
+// Rdd=pmpyw(Rs,Rt)
+// Rxx^=pmpyw(Rs,Rt)
+
+// Vector reduce multiply word by signed half (32x16)
+// Rdd=vrmpyweh(Rss,Rtt)[:<<1]
+// Rdd=vrmpywoh(Rss,Rtt)[:<<1]
+// Rxx+=vrmpyweh(Rss,Rtt)[:<<1]
+// Rxx+=vrmpywoh(Rss,Rtt)[:<<1]
+
+// Multiply and use upper result
+// Rd=mpy(Rs,Rt.H):<<1:sat
+// Rd=mpy(Rs,Rt.L):<<1:sat
+// Rd=mpy(Rs,Rt):<<1
+// Rd=mpy(Rs,Rt):<<1:sat
+// Rd=mpysu(Rs,Rt)
+// Rx+=mpy(Rs,Rt):<<1:sat
+// Rx-=mpy(Rs,Rt):<<1:sat
+
+// Vector multiply bytes
+// Rdd=vmpybsu(Rs,Rt)
+// Rdd=vmpybu(Rs,Rt)
+// Rxx+=vmpybsu(Rs,Rt)
+// Rxx+=vmpybu(Rs,Rt)
+
+// Vector polynomial multiply halfwords
+// Rdd=vpmpyh(Rs,Rt)
+// Rxx^=vpmpyh(Rs,Rt)
+
+//===----------------------------------------------------------------------===//
+// XTYPE/MPY -
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// XTYPE/SHIFT +
+//===----------------------------------------------------------------------===//
+
+// Shift by immediate and accumulate.
+// Rx=add(#u8,asl(Rx,#U5))
+def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = add(#$src1, asl($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx=add(#u8,lsr(Rx,#U5))
+def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = add(#$src1, lsr($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx=sub(#u8,asl(Rx,#U5))
+def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = sub(#$src1, asl($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+// Rx=sub(#u8,lsr(Rx,#U5))
+def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = sub(#$src1, lsr($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+
+//Shift by immediate and logical.
+//Rx=and(#u8,asl(Rx,#U5))
+def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = and(#$src1, asl($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rx=and(#u8,lsr(Rx,#U5))
+def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = and(#$src1, lsr($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rx=or(#u8,asl(Rx,#U5))
+let AddedComplexity = 30 in
+def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = or(#$src1, asl($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rx=or(#u8,lsr(Rx,#U5))
+let AddedComplexity = 30 in
+def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
+ (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
+ "$dst = or(#$src1, lsr($src2, #$src3))",
+ [(set (i32 IntRegs:$dst),
+ (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
+ u8ImmPred:$src1))],
+ "$src2 = $dst">,
+ Requires<[HasV4T]>;
+
+
+//Shift by register.
+//Rd=lsl(#s6,Rt)
+def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2),
+ "$dst = lsl(#$src1, $src2)",
+ [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1,
+ (i32 IntRegs:$src2)))]>,
+ Requires<[HasV4T]>;
+
+
+//Shift by register and logical.
+//Rxx^=asl(Rss,Rt)
+def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
+ "$dst ^= asl($src2, $src3)",
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (shl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rxx^=asr(Rss,Rt)
+def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
+ "$dst ^= asr($src2, $src3)",
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (sra (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rxx^=lsl(Rss,Rt)
+def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
+ "$dst ^= lsl($src2, $src3)",
+ [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
+ (shl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+//Rxx^=lsr(Rss,Rt)
+def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
+ (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
+ "$dst ^= lsr($src2, $src3)",
+ [(set (i64 DoubleRegs:$dst),
+ (xor (i64 DoubleRegs:$src1), (srl (i64 DoubleRegs:$src2),
+ (i32 IntRegs:$src3))))],
+ "$src1 = $dst">,
+ Requires<[HasV4T]>;
+
+
+//===----------------------------------------------------------------------===//
+// XTYPE/SHIFT -
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MEMOP: Word, Half, Byte
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MEMOP: Word
+//
+// Implemented:
+// MEMw_ADDi_indexed_V4 : memw(Rs+#u6:2)+=#U5
+// MEMw_SUBi_indexed_V4 : memw(Rs+#u6:2)-=#U5
+// MEMw_ADDr_indexed_V4 : memw(Rs+#u6:2)+=Rt
+// MEMw_SUBr_indexed_V4 : memw(Rs+#u6:2)-=Rt
+// MEMw_CLRr_indexed_V4 : memw(Rs+#u6:2)&=Rt
+// MEMw_SETr_indexed_V4 : memw(Rs+#u6:2)|=Rt
+// MEMw_ADDi_V4 : memw(Rs+#u6:2)+=#U5
+// MEMw_SUBi_V4 : memw(Rs+#u6:2)-=#U5
+// MEMw_ADDr_V4 : memw(Rs+#u6:2)+=Rt
+// MEMw_SUBr_V4 : memw(Rs+#u6:2)-=Rt
+// MEMw_CLRr_V4 : memw(Rs+#u6:2)&=Rt
+// MEMw_SETr_V4 : memw(Rs+#u6:2)|=Rt
+//
+// Not implemented:
+// MEMw_CLRi_indexed_V4 : memw(Rs+#u6:2)=clrbit(#U5)
+// MEMw_SETi_indexed_V4 : memw(Rs+#u6:2)=setbit(#U5)
+// MEMw_CLRi_V4 : memw(Rs+#u6:2)=clrbit(#U5)
+// MEMw_SETi_V4 : memw(Rs+#u6:2)=setbit(#U5)
+//===----------------------------------------------------------------------===//
+
+
+// MEMw_ADDSUBi_indexed_V4:
+// pseudo operation for MEMw_ADDi_indexed_V4 and
+// MEMw_SUBi_indexed_V4 a later pass will change it
+// to the corresponding pattern.
+let AddedComplexity = 30 in
+def MEMw_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, m6Imm:$addend),
+ "Error; should not emit",
+ [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ m6ImmPred:$addend),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) += #U5
+let AddedComplexity = 30 in
+def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend),
+ "memw($base+#$offset) += #$addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) -= #U5
+let AddedComplexity = 30 in
+def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend),
+ "memw($base+#$offset) -= #$subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) += Rt
+let AddedComplexity = 30 in
+def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend),
+ "memw($base+#$offset) += $addend",
+ [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) -= Rt
+let AddedComplexity = 30 in
+def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend),
+ "memw($base+#$offset) -= $subend",
+ [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) &= Rt
+let AddedComplexity = 30 in
+def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend),
+ "memw($base+#$offset) &= $andend",
+ [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) |= Rt
+let AddedComplexity = 30 in
+def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend),
+ "memw($base+#$offset) |= $orend",
+ [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)),
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// MEMw_ADDSUBi_V4:
+// Pseudo operation for MEMw_ADDi_V4 and MEMw_SUBi_V4
+// a later pass will change it to the right pattern.
+let AddedComplexity = 30 in
+def MEMw_ADDSUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, m6Imm:$addend),
+ "Error; should not emit",
+ [(store (add (load ADDRriU6_2:$addr), m6ImmPred:$addend),
+ ADDRriU6_2:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) += #U5
+let AddedComplexity = 30 in
+def MEMw_ADDi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$addend),
+ "memw($addr) += $addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) -= #U5
+let AddedComplexity = 30 in
+def MEMw_SUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$subend),
+ "memw($addr) -= $subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) += Rt
+let AddedComplexity = 30 in
+def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$addend),
+ "memw($addr) += $addend",
+ [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)),
+ ADDRriU6_2:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) -= Rt
+let AddedComplexity = 30 in
+def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$subend),
+ "memw($addr) -= $subend",
+ [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)),
+ ADDRriU6_2:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) &= Rt
+let AddedComplexity = 30 in
+def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$andend),
+ "memw($addr) &= $andend",
+ [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)),
+ ADDRriU6_2:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memw(Rs+#u6:2) |= Rt
+let AddedComplexity = 30 in
+def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$orend),
+ "memw($addr) |= $orend",
+ [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)),
+ ADDRriU6_2:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+//===----------------------------------------------------------------------===//
+// MEMOP: Halfword
+//
+// Implemented:
+// MEMh_ADDi_indexed_V4 : memw(Rs+#u6:2)+=#U5
+// MEMh_SUBi_indexed_V4 : memw(Rs+#u6:2)-=#U5
+// MEMh_ADDr_indexed_V4 : memw(Rs+#u6:2)+=Rt
+// MEMh_SUBr_indexed_V4 : memw(Rs+#u6:2)-=Rt
+// MEMh_CLRr_indexed_V4 : memw(Rs+#u6:2)&=Rt
+// MEMh_SETr_indexed_V4 : memw(Rs+#u6:2)|=Rt
+// MEMh_ADDi_V4 : memw(Rs+#u6:2)+=#U5
+// MEMh_SUBi_V4 : memw(Rs+#u6:2)-=#U5
+// MEMh_ADDr_V4 : memw(Rs+#u6:2)+=Rt
+// MEMh_SUBr_V4 : memw(Rs+#u6:2)-=Rt
+// MEMh_CLRr_V4 : memw(Rs+#u6:2)&=Rt
+// MEMh_SETr_V4 : memw(Rs+#u6:2)|=Rt
+//
+// Not implemented:
+// MEMh_CLRi_indexed_V4 : memw(Rs+#u6:2)=clrbit(#U5)
+// MEMh_SETi_indexed_V4 : memw(Rs+#u6:2)=setbit(#U5)
+// MEMh_CLRi_V4 : memw(Rs+#u6:2)=clrbit(#U5)
+// MEMh_SETi_V4 : memw(Rs+#u6:2)=setbit(#U5)
+//===----------------------------------------------------------------------===//
+
+
+// MEMh_ADDSUBi_indexed_V4:
+// Pseudo operation for MEMh_ADDi_indexed_V4 and
+// MEMh_SUBi_indexed_V4 a later pass will change it
+// to the corresponding pattern.
+let AddedComplexity = 30 in
+def MEMh_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, m6Imm:$addend),
+ "Error; should not emit",
+ [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base),
+ u6_1ImmPred:$offset)),
+ m6ImmPred:$addend),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) += #U5
+let AddedComplexity = 30 in
+def MEMh_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$addend),
+ "memh($base+#$offset) += $addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) -= #U5
+let AddedComplexity = 30 in
+def MEMh_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, u5Imm:$subend),
+ "memh($base+#$offset) -= $subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) += Rt
+let AddedComplexity = 30 in
+def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend),
+ "memh($base+#$offset) += $addend",
+ [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base),
+ u6_1ImmPred:$offset)),
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) -= Rt
+let AddedComplexity = 30 in
+def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend),
+ "memh($base+#$offset) -= $subend",
+ [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base),
+ u6_1ImmPred:$offset)),
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) &= Rt
+let AddedComplexity = 30 in
+def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend),
+ "memh($base+#$offset) += $andend",
+ [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base),
+ u6_1ImmPred:$offset)),
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) |= Rt
+let AddedComplexity = 30 in
+def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend),
+ "memh($base+#$offset) |= $orend",
+ [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base),
+ u6_1ImmPred:$offset)),
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// MEMh_ADDSUBi_V4:
+// Pseudo operation for MEMh_ADDi_V4 and MEMh_SUBi_V4
+// a later pass will change it to the right pattern.
+let AddedComplexity = 30 in
+def MEMh_ADDSUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, m6Imm:$addend),
+ "Error; should not emit",
+ [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr),
+ m6ImmPred:$addend), ADDRriU6_1:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) += #U5
+let AddedComplexity = 30 in
+def MEMh_ADDi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$addend),
+ "memh($addr) += $addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) -= #U5
+let AddedComplexity = 30 in
+def MEMh_SUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$subend),
+ "memh($addr) -= $subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) += Rt
+let AddedComplexity = 30 in
+def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$addend),
+ "memh($addr) += $addend",
+ [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr),
+ (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) -= Rt
+let AddedComplexity = 30 in
+def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$subend),
+ "memh($addr) -= $subend",
+ [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr),
+ (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) &= Rt
+let AddedComplexity = 30 in
+def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$andend),
+ "memh($addr) &= $andend",
+ [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr),
+ (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memh(Rs+#u6:1) |= Rt
+let AddedComplexity = 30 in
+def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$orend),
+ "memh($addr) |= $orend",
+ [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr),
+ (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+
+//===----------------------------------------------------------------------===//
+// MEMOP: Byte
+//
+// Implemented:
+// MEMb_ADDi_indexed_V4 : memb(Rs+#u6:0)+=#U5
+// MEMb_SUBi_indexed_V4 : memb(Rs+#u6:0)-=#U5
+// MEMb_ADDr_indexed_V4 : memb(Rs+#u6:0)+=Rt
+// MEMb_SUBr_indexed_V4 : memb(Rs+#u6:0)-=Rt
+// MEMb_CLRr_indexed_V4 : memb(Rs+#u6:0)&=Rt
+// MEMb_SETr_indexed_V4 : memb(Rs+#u6:0)|=Rt
+// MEMb_ADDi_V4 : memb(Rs+#u6:0)+=#U5
+// MEMb_SUBi_V4 : memb(Rs+#u6:0)-=#U5
+// MEMb_ADDr_V4 : memb(Rs+#u6:0)+=Rt
+// MEMb_SUBr_V4 : memb(Rs+#u6:0)-=Rt
+// MEMb_CLRr_V4 : memb(Rs+#u6:0)&=Rt
+// MEMb_SETr_V4 : memb(Rs+#u6:0)|=Rt
+//
+// Not implemented:
+// MEMb_CLRi_indexed_V4 : memb(Rs+#u6:0)=clrbit(#U5)
+// MEMb_SETi_indexed_V4 : memb(Rs+#u6:0)=setbit(#U5)
+// MEMb_CLRi_V4 : memb(Rs+#u6:0)=clrbit(#U5)
+// MEMb_SETi_V4 : memb(Rs+#u6:0)=setbit(#U5)
+//===----------------------------------------------------------------------===//
+
+
+// MEMb_ADDSUBi_indexed_V4:
+// Pseudo operation for MEMb_ADDi_indexed_V4 and
+// MEMb_SUBi_indexed_V4 a later pass will change it
+// to the corresponding pattern.
+let AddedComplexity = 30 in
+def MEMb_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, m6Imm:$addend),
+ "Error; should not emit",
+ [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
+ u6_0ImmPred:$offset)),
+ m6ImmPred:$addend),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) += #U5
+let AddedComplexity = 30 in
+def MEMb_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$addend),
+ "memb($base+#$offset) += $addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) -= #U5
+let AddedComplexity = 30 in
+def MEMb_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, u5Imm:$subend),
+ "memb($base+#$offset) -= $subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) += Rt
+let AddedComplexity = 30 in
+def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend),
+ "memb($base+#$offset) += $addend",
+ [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
+ u6_0ImmPred:$offset)),
+ (i32 IntRegs:$addend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) -= Rt
+let AddedComplexity = 30 in
+def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend),
+ "memb($base+#$offset) -= $subend",
+ [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base),
+ u6_0ImmPred:$offset)),
+ (i32 IntRegs:$subend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) &= Rt
+let AddedComplexity = 30 in
+def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend),
+ "memb($base+#$offset) += $andend",
+ [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base),
+ u6_0ImmPred:$offset)),
+ (i32 IntRegs:$andend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) |= Rt
+let AddedComplexity = 30 in
+def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
+ (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend),
+ "memb($base+#$offset) |= $orend",
+ [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base),
+ u6_0ImmPred:$offset)),
+ (i32 IntRegs:$orend)),
+ (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// MEMb_ADDSUBi_V4:
+// Pseudo operation for MEMb_ADDi_V4 and MEMb_SUBi_V4
+// a later pass will change it to the right pattern.
+let AddedComplexity = 30 in
+def MEMb_ADDSUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, m6Imm:$addend),
+ "Error; should not emit",
+ [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr),
+ m6ImmPred:$addend), ADDRriU6_0:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) += #U5
+let AddedComplexity = 30 in
+def MEMb_ADDi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$addend),
+ "memb($addr) += $addend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) -= #U5
+let AddedComplexity = 30 in
+def MEMb_SUBi_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, u5Imm:$subend),
+ "memb($addr) -= $subend",
+ []>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) += Rt
+let AddedComplexity = 30 in
+def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$addend),
+ "memb($addr) += $addend",
+ [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr),
+ (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) -= Rt
+let AddedComplexity = 30 in
+def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$subend),
+ "memb($addr) -= $subend",
+ [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr),
+ (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) &= Rt
+let AddedComplexity = 30 in
+def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$andend),
+ "memb($addr) &= $andend",
+ [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr),
+ (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+// memb(Rs+#u6:0) |= Rt
+let AddedComplexity = 30 in
+def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
+ (ins MEMri:$addr, IntRegs:$orend),
+ "memb($addr) |= $orend",
+ [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr),
+ (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>,
+ Requires<[HasV4T, UseMEMOP]>;
+
+
+//===----------------------------------------------------------------------===//
+// XTYPE/PRED +
+//===----------------------------------------------------------------------===//
+
+// Hexagon V4 only supports these flavors of byte/half compare instructions:
+// EQ/GT/GTU. Other flavors like GE/GEU/LT/LTU/LE/LEU are not supported by
+// hardware. However, compiler can still implement these patterns through
+// appropriate patterns combinations based on current implemented patterns.
+// The implemented patterns are: EQ/GT/GTU.
+// Missing patterns are: GE/GEU/LT/LTU/LE/LEU.
+
+// Following instruction is not being extended as it results into the
+// incorrect code for negative numbers.
+// Pd=cmpb.eq(Rs,#u8)
+
+let isCompare = 1 in
+def CMPbEQri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, u8Imm:$src2),
+ "$dst = cmpb.eq($src1, #$src2)",
+ [(set (i1 PredRegs:$dst),
+ (seteq (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// Pd=cmpb.eq(Rs,Rt)
+let isCompare = 1 in
+def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmpb.eq($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (seteq (and (xor (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)), 255), 0))]>,
+ Requires<[HasV4T]>;
+
+// Pd=cmpb.eq(Rs,Rt)
+let isCompare = 1 in
+def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmpb.eq($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (seteq (shl (i32 IntRegs:$src1), (i32 24)),
+ (shl (i32 IntRegs:$src2), (i32 24))))]>,
+ Requires<[HasV4T]>;
+
+/* Incorrect Pattern -- immediate should be right shifted before being
+used in the cmpb.gt instruction.
+// Pd=cmpb.gt(Rs,#s8)
+let isCompare = 1 in
+def CMPbGTri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = cmpb.gt($src1, #$src2)",
+ [(set (i1 PredRegs:$dst), (setgt (shl (i32 IntRegs:$src1), (i32 24)),
+ s8ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+*/
+
+// Pd=cmpb.gt(Rs,Rt)
+let isCompare = 1 in
+def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmpb.gt($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 24)),
+ (shl (i32 IntRegs:$src2), (i32 24))))]>,
+ Requires<[HasV4T]>;
+
+// Pd=cmpb.gtu(Rs,#u7)
+let isCompare = 1 in
+def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, u7Imm:$src2),
+ "$dst = cmpb.gtu($src1, #$src2)",
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
+ u7ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// Pd=cmpb.gtu(Rs,Rt)
+let isCompare = 1 in
+def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmpb.gtu($src1, $src2)",
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255),
+ (and (i32 IntRegs:$src2), 255)))]>,
+ Requires<[HasV4T]>;
+
+// Following instruction is not being extended as it results into the incorrect
+// code for negative numbers.
+
+// Signed half compare(.eq) ri.
+// Pd=cmph.eq(Rs,#s8)
+let isCompare = 1 in
+def CMPhEQri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = cmph.eq($src1, #$src2)",
+ [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1), 65535),
+ s8ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+// Signed half compare(.eq) rr.
+// Case 1: xor + and, then compare:
+// r0=xor(r0,r1)
+// r0=and(r0,#0xffff)
+// p0=cmp.eq(r0,#0)
+// Pd=cmph.eq(Rs,Rt)
+let isCompare = 1 in
+def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmph.eq($src1, $src2)",
+ [(set (i1 PredRegs:$dst), (seteq (and (xor (i32 IntRegs:$src1),
+ (i32 IntRegs:$src2)),
+ 65535), 0))]>,
+ Requires<[HasV4T]>;
+
+// Signed half compare(.eq) rr.
+// Case 2: shift left 16 bits then compare:
+// r0=asl(r0,16)
+// r1=asl(r1,16)
+// p0=cmp.eq(r0,r1)
+// Pd=cmph.eq(Rs,Rt)
+let isCompare = 1 in
+def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmph.eq($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (seteq (shl (i32 IntRegs:$src1), (i32 16)),
+ (shl (i32 IntRegs:$src2), (i32 16))))]>,
+ Requires<[HasV4T]>;
+
+/* Incorrect Pattern -- immediate should be right shifted before being
+used in the cmph.gt instruction.
+// Signed half compare(.gt) ri.
+// Pd=cmph.gt(Rs,#s8)
+
+let isCompare = 1 in
+def CMPhGTri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, s8Imm:$src2),
+ "$dst = cmph.gt($src1, #$src2)",
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 16)),
+ s8ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+*/
+
+// Signed half compare(.gt) rr.
+// Pd=cmph.gt(Rs,Rt)
+let isCompare = 1 in
+def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmph.gt($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (setgt (shl (i32 IntRegs:$src1), (i32 16)),
+ (shl (i32 IntRegs:$src2), (i32 16))))]>,
+ Requires<[HasV4T]>;
+
+// Unsigned half compare rr (.gtu).
+// Pd=cmph.gtu(Rs,Rt)
+let isCompare = 1 in
+def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, IntRegs:$src2),
+ "$dst = cmph.gtu($src1, $src2)",
+ [(set (i1 PredRegs:$dst),
+ (setugt (and (i32 IntRegs:$src1), 65535),
+ (and (i32 IntRegs:$src2), 65535)))]>,
+ Requires<[HasV4T]>;
+
+// Unsigned half compare ri (.gtu).
+// Pd=cmph.gtu(Rs,#u7)
+let isCompare = 1 in
+def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst),
+ (ins IntRegs:$src1, u7Imm:$src2),
+ "$dst = cmph.gtu($src1, #$src2)",
+ [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 65535),
+ u7ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+//===----------------------------------------------------------------------===//
+// XTYPE/PRED -
+//===----------------------------------------------------------------------===//
+
+//Deallocate frame and return.
+// dealloc_return
+let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
+ def DEALLOC_RET_V4 : NVInst_V4<(outs), (ins i32imm:$amt1),
+ "dealloc_return",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Restore registers and dealloc return function call.
+let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "jump $dst // Restore_and_dealloc_return",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Restore registers and dealloc frame before a tail call.
+let isCall = 1, isBarrier = 1,
+ Defs = [R29, R30, R31, PC] in {
+ def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "call $dst // Restore_and_dealloc_before_tailcall",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// Save registers function call.
+let isCall = 1, isBarrier = 1,
+ Uses = [R29, R31] in {
+ def SAVE_REGISTERS_CALL_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops),
+ "call $dst // Save_calle_saved_registers",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (Ps) dealloc_return
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1),
+ "if ($src1) dealloc_return",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (!Ps) dealloc_return
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
+ i32imm:$amt1),
+ "if (!$src1) dealloc_return",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (Ps.new) dealloc_return:nt
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
+ i32imm:$amt1),
+ "if ($src1.new) dealloc_return:nt",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (!Ps.new) dealloc_return:nt
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
+ i32imm:$amt1),
+ "if (!$src1.new) dealloc_return:nt",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (Ps.new) dealloc_return:t
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
+ i32imm:$amt1),
+ "if ($src1.new) dealloc_return:t",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+// if (!Ps.new) dealloc_return:nt
+let isReturn = 1, isTerminator = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ isPredicated = 1 in {
+ def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
+ i32imm:$amt1),
+ "if (!$src1.new) dealloc_return:t",
+ []>,
+ Requires<[HasV4T]>;
+}
+
+
+// Load/Store with absolute addressing mode
+// memw(#u6)=Rt
+
+multiclass ST_abs<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : STInst<(outs),
+ (ins globaladdress:$absaddr, IntRegs:$src),
+ !strconcat(OpcStr, "(##$absaddr) = $src"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1)", !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1)", !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2")),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _abs_nv_V4 : STInst<(outs),
+ (ins globaladdress:$absaddr, IntRegs:$src),
+ !strconcat(OpcStr, "(##$absaddr) = $src.new"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if ($src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2),
+ !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+let AddedComplexity = 30, isPredicable = 1 in
+def STrid_abs_V4 : STInst<(outs),
+ (ins globaladdress:$absaddr, DoubleRegs:$src),
+ "memd(##$absaddr) = $src",
+ [(store (i64 DoubleRegs:$src), (HexagonCONST32 tglobaladdr:$absaddr))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if ($src1) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if (!$src1) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if ($src1.new) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def STrid_abs_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2),
+ "if (!$src1.new) memd(##$absaddr) = $src2",
+ []>,
+ Requires<[HasV4T]>;
+
+defm STrib : ST_abs<"memb">;
+defm STrih : ST_abs<"memh">;
+defm STriw : ST_abs<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)),
+ (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
+
+
+multiclass LD_abs<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : LDInst<(outs IntRegs:$dst),
+ (ins globaladdress:$absaddr),
+ !strconcat("$dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if ($src1) $dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if (!$src1) $dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if ($src1.new) $dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ !strconcat("if (!$src1.new) $dst = ", !strconcat(OpcStr, "(##$absaddr)")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+let AddedComplexity = 30 in
+def LDrid_abs_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins globaladdress:$absaddr),
+ "$dst = memd(##$absaddr)",
+ [(set (i64 DoubleRegs:$dst), (load (HexagonCONST32 tglobaladdr:$absaddr)))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if ($src1) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if (!$src1) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if ($src1.new) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 30, isPredicated = 1 in
+def LDrid_abs_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$absaddr),
+ "if (!$src1.new) $dst = memd(##$absaddr)",
+ []>,
+ Requires<[HasV4T]>;
+
+defm LDrib : LD_abs<"memb">;
+defm LDriub : LD_abs<"memub">;
+defm LDrih : LD_abs<"memh">;
+defm LDriuh : LD_abs<"memuh">;
+defm LDriw : LD_abs<"memw">;
+
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriw_abs_V4 tglobaladdr: $absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDrib_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriub_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDrih_abs_V4 tglobaladdr:$absaddr)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))),
+ (LDriuh_abs_V4 tglobaladdr:$absaddr)>;
+
+// Transfer global address into a register
+let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in
+def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$src1),
+ "$dst = ##$src1",
+ [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if($src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if(!$src1) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if($src1.new) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, globaladdress:$src2),
+ "if(!$src1.new) $dst = ##$src2",
+ []>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 50, Predicates = [HasV4T] in
+def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
+ (TFRI_V4 tglobaladdr:$src1)>;
+
+
+// Load - Indirect with long offset: These instructions take global address
+// as an operand
+let AddedComplexity = 10 in
+def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
+ "$dst=memd($src1<<#$src2+##$offset)",
+ [(set (i64 DoubleRegs:$dst),
+ (load (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$offset))))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10 in
+multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> {
+ def _lo_V4 : LDInst<(outs IntRegs:$dst),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
+ !strconcat("$dst = ", !strconcat(OpcStr, "($src1<<#$src2+##$offset)")),
+ [(set IntRegs:$dst,
+ (i32 (OpNode (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$offset)))))]>,
+ Requires<[HasV4T]>;
+}
+
+defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>;
+defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>;
+defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>;
+defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>;
+defm LDriw_ind : LD_indirect_lo<"memw", load>;
+
+// Store - Indirect with long offset: These instructions take global address
+// as an operand
+let AddedComplexity = 10 in
+def STrid_ind_lo_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
+ DoubleRegs:$src4),
+ "memd($src1<<#$src2+#$src3) = $src4",
+ [(store (i64 DoubleRegs:$src4),
+ (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3)))]>,
+ Requires<[HasV4T]>;
+
+let AddedComplexity = 10 in
+multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> {
+ def _lo_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
+ IntRegs:$src4),
+ !strconcat(OpcStr, "($src1<<#$src2+##$src3) = $src4"),
+ [(OpNode (i32 IntRegs:$src4),
+ (add (shl IntRegs:$src1, u2ImmPred:$src2),
+ (HexagonCONST32 tglobaladdr:$src3)))]>,
+ Requires<[HasV4T]>;
+}
+
+defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>;
+defm STrih_ind : ST_indirect_lo<"memh", truncstorei16>;
+defm STriw_ind : ST_indirect_lo<"memw", store>;
+
+// Store - absolute addressing mode: These instruction take constant
+// value as the extended operand
+multiclass ST_absimm<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : STInst<(outs),
+ (ins u6Imm:$src1, IntRegs:$src2),
+ !strconcat(OpcStr, "(#$src1) = $src2"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1.new)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(#$src2) = $src3")),
+ []>,
+ Requires<[HasV4T]>;
+
+ def _abs_nv_V4 : STInst<(outs),
+ (ins u6Imm:$src1, IntRegs:$src2),
+ !strconcat(OpcStr, "(#$src1) = $src2.new"),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if ($src1.new)", !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_nv_V4 : STInst<(outs),
+ (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
+ !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(#$src2) = $src3.new")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+defm STrib_imm : ST_absimm<"memb">;
+defm STrih_imm : ST_absimm<"memh">;
+defm STriw_imm : ST_absimm<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei8 (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STrib_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(truncstorei16 (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STrih_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(store (i32 IntRegs:$src1), u6ImmPred:$src2),
+ (STriw_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
+
+
+// Load - absolute addressing mode: These instruction take constant
+// value as the extended operand
+
+multiclass LD_absimm<string OpcStr> {
+ let isPredicable = 1 in
+ def _abs_V4 : LDInst<(outs IntRegs:$dst),
+ (ins u6Imm:$src),
+ !strconcat("$dst = ", !strconcat(OpcStr, "(#$src)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if ($src1) $dst = ", !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if (!$src1) $dst = ", !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if ($src1.new) $dst = ", !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+
+ let isPredicated = 1 in
+ def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
+ (ins PredRegs:$src1, u6Imm:$src2),
+ !strconcat("if (!$src1.new) $dst = ", !strconcat(OpcStr, "(#$src2)")),
+ []>,
+ Requires<[HasV4T]>;
+}
+
+defm LDrib_imm : LD_absimm<"memb">;
+defm LDriub_imm : LD_absimm<"memub">;
+defm LDrih_imm : LD_absimm<"memh">;
+defm LDriuh_imm : LD_absimm<"memuh">;
+defm LDriw_imm : LD_absimm<"memw">;
+
+let Predicates = [HasV4T], AddedComplexity = 30 in
+def : Pat<(i32 (load u6ImmPred:$src)),
+ (LDriw_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi8 u6ImmPred:$src)),
+ (LDrib_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi8 u6ImmPred:$src)),
+ (LDriub_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (sextloadi16 u6ImmPred:$src)),
+ (LDrih_imm_abs_V4 u6ImmPred:$src)>;
+
+let Predicates = [HasV4T], AddedComplexity=30 in
+def : Pat<(i32 (zextloadi16 u6ImmPred:$src)),
+ (LDriuh_imm_abs_V4 u6ImmPred:$src)>;
+
+
+// Indexed store double word - global address.
+// memw(Rs+#u6:2)=#S8
+let AddedComplexity = 10 in
+def STriw_offset_ext_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3),
+ "memw($src1+#$src2) = ##$src3",
+ [(store (HexagonCONST32 tglobaladdr:$src3),
+ (add IntRegs:$src1, u6_2ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
+
+
+// Indexed store double word - global address.
+// memw(Rs+#u6:2)=#S8
+let AddedComplexity = 10 in
+def STrih_offset_ext_V4 : STInst<(outs),
+ (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3),
+ "memh($src1+#$src2) = ##$src3",
+ [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3),
+ (add IntRegs:$src1, u6_1ImmPred:$src2))]>,
+ Requires<[HasV4T]>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td
new file mode 100644
index 0000000..b15e293
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsics.td
@@ -0,0 +1,3462 @@
+//===-- HexagonIntrinsics.td - Instruction intrinsics ------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This is populated based on the following specs:
+// Hexagon V2 Architecture
+// Application-Level Specification
+// 80-V9418-8 Rev. B
+// March 4, 2008
+//===----------------------------------------------------------------------===//
+
+//
+// ALU 32 types.
+//
+
+class qi_ALU32_sisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_ALU32_sis10<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_ALU32_sis8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_ALU32_siu8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_ALU32_siu9<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u9Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_ALU32_qisisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_ALU32_qis8si<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2,
+ IntRegs:$src3))]>;
+
+class si_ALU32_qisis8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ s8Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ imm:$src3))]>;
+
+class si_ALU32_qis8s8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2, s8Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2, imm:$src3))]>;
+
+class si_ALU32_sisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU32_sisi_sat<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU32_sisi_rnd<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU32_sis16<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_ALU32_sis10<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_ALU32_s10si<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins s10Imm:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "(#$src1, $src2)")),
+ [(set IntRegs:$dst, (IntID imm:$src1, IntRegs:$src2))]>;
+
+class si_lo_ALU32_siu16<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u16Imm:$src2),
+ !strconcat("$dst.l = ", !strconcat(opc , "#$src2")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_hi_ALU32_siu16<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u16Imm:$src2),
+ !strconcat("$dst.h = ", !strconcat(opc , "#$src2")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_ALU32_s16<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins s16Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1")),
+ [(set IntRegs:$dst, (IntID imm:$src1))]>;
+
+class di_ALU32_s8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs DoubleRegs:$dst), (ins s8Imm:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "#$src1")),
+ [(set DoubleRegs:$dst, (IntID imm:$src1))]>;
+
+class di_ALU64_di<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "$src")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
+
+class si_ALU32_si<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_ALU32_si_tfr<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "$src")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+//
+// ALU 64 types.
+//
+
+class si_ALU64_si_sat<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_ALU64_didi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class di_ALU64_sidi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2))]>;
+
+class di_ALU64_didi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_ALU64_qididi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2,
+ DoubleRegs:$src3))]>;
+
+class di_ALU64_sisi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_ALU64_didi_sat<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_ALU64_didi_rnd<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_ALU64_didi_crnd<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):crnd")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_ALU64_didi_rnd_sat<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_ALU64_didi_crnd_sat<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):crnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class qi_ALU64_didi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class si_ALU64_sisi<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_sat_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_sat_hh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_sat_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_sat_hl<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_sat_ll<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_hh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_hl<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_l16_ll<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_sat_hh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):sat:<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_sat_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.H):sat:<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_sat_hl<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.L):sat:<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_sat_ll<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.L):sat:<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_hh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_hl<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_h16_ll<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<16")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_lh<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_ll<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_ALU64_sisi_sat<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+//
+// SInst classes.
+//
+
+class qi_SInst_qi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
+
+class qi_SInst_qi_pxfer<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "$src")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
+
+class qi_SInst_qiqi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_SInst_qiqi_neg<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, !$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_SInst_di<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
+
+class di_SInst_di_sat<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src))]>;
+
+class si_SInst_di<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src))]>;
+
+class si_SInst_di_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src))]>;
+
+class di_SInst_disi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
+
+class di_SInst_didi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class di_SInst_si<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1),
+ !strconcat("$dst = ", !strconcat(opc , "($src1)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>;
+
+class si_SInst_sisiu3<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u3Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ imm:$src3))]>;
+
+class si_SInst_diu5<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+class si_SInst_disi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
+
+class si_SInst_sidi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, DoubleRegs:$src2))]>;
+
+class di_SInst_disisi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class di_SInst_sisi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_SInst_siu5<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_SInst_siu6<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u6Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_SInst_sisi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_SInst_si<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_SInst_si_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+class di_SInst_qi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "($src)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_SInst_qi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "$src")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_SInst_qiqi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_SInst_si<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src),
+ !strconcat("$dst = ", !strconcat(opc , "$src")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src))]>;
+
+class si_SInst_sisi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_SInst_diu6<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>;
+
+class si_SInst_siu5<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_SInst_siu5_rnd<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_SInst_siu5u5<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2, u5Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2, imm:$src3))]>;
+
+class si_SInst_sisisi_acc<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisisi_nac<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didisi_acc<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didisi_nac<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1, IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisiu5u5<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2, u5Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, #$src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2, imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisidi<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6u6<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u6Imm:$src2, u6Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, #$src2, #$src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ imm:$src2, imm:$src3))],
+ "$dst2 = $dst">;
+
+class di_SInst_dididi<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_diu6u6<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2,
+ u6Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2, #$src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2,
+ imm:$src3))]>;
+
+class di_SInst_didisi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3))]>;
+
+class di_SInst_didiqi<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
+ IntRegs:$src3))]>;
+
+class di_SInst_didiu3<string opc, Intrinsic IntID>
+ : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2,
+ u3Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, #$src3)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2,
+ imm:$src3))]>;
+
+class di_SInst_didisi_or<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst |= ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didisi_and<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst &= ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6_and<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u6Imm:$src2),
+ !strconcat("$dst &= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6_or<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u6Imm:$src2),
+ !strconcat("$dst |= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6_xor<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u6Imm:$src2),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisisi_and<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst &= ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisisi_or<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst |= ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+
+class si_SInst_sisiu5_and<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst &= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisiu5_or<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst |= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisiu5_xor<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisiu5_acc<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class si_SInst_sisiu5_nac<string opc, Intrinsic IntID>
+ : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6_acc<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1, imm:$src2))],
+ "$dst2 = $dst">;
+
+class di_SInst_didiu6_nac<string opc, Intrinsic IntID>
+ : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ u5Imm:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ imm:$src2))],
+ "$dst2 = $dst">;
+
+
+//
+// MInst classes.
+//
+
+class di_MInst_sisi_rnd_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_hh<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1:rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_hl<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.L):rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1:rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_lh<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.H):rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1:rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_rnd_ll<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.L):rnd")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_disisi_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_sat_conj<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2*):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_sat_conj<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1, $src2*):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_s1_sat_conj<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1, $src2*):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_s1_sat_conj<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1, $src2*):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_s8s8<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins s8Imm:$src1, s8Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "(#$src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID imm:$src1, imm:$src2))]>;
+
+class si_MInst_sisi<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_hh<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_lh<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_hl<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_ll<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+
+class si_MInst_sisi_hh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_lh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_hl<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_ll<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_up<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_didi<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_MInst_didi_conj<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2*)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_MInst_sisi_s1_sat_conj<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2*):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_didi_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):<<1:rnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_MInst_didi_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class di_MInst_didi_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):rnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class si_SInst_sisi_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_l_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2.L):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_h_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2.H):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_sat_conj<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2*):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_s1_rnd_sat_conj<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2*):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisisi_xacc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst += ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst -= ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ IntRegs:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisis8_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ s8Imm:$src3),
+ !strconcat("$dst += ", !strconcat(opc , "($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisis8_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ s8Imm:$src3),
+ !strconcat("$dst -= ", !strconcat(opc , "($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisiu4u5<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ u4Imm:$src2, u5Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, #$src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ imm:$src2, imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisiu8_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ u8Imm:$src3),
+ !strconcat("$dst += ", !strconcat(opc , "($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisiu8_nac<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src2,
+ u8Imm:$src3),
+ !strconcat("$dst -= ", !strconcat(opc , "($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src2,
+ imm:$src3))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_sat_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.L, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_acc_sat_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc ,
+ "($src1.H, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hh_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hh_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hl_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_hl_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_lh_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_lh_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_ll_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_nac_ll_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_ALU32_sisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_sat_conj<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2*):sat")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_sisi_s1_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_didi_s1_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1,
+ DoubleRegs:$src2))]>;
+
+class si_MInst_didi_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, $src2):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class si_MInst_didi_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):rnd:sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class si_MInst_sisi_sat_hh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_hl<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.H, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_lh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.H):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.H):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_ll<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1.L, $src2.L):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.L, $src2.L):<<1:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_hh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_hh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_hh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc ,
+ "($src1.H, $src2.H):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_hl<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.H, $src2.L):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.H, $src2.L):<<1:rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_hl<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.H, $src2.L):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_hl_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.H, $src2.L):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_lh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.H):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_lh<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.H):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.H):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_lh_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.H):<<1:rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_ll<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.L):rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_sat_rnd_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.L):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_ll<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.L):rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_sisi_rnd_ll_s1<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1.L, $src2.L):<<1:rnd")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_dididi_acc_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
+ DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2):sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_dididi_acc_rnd_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):rnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_dididi_acc_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_dididi_acc_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):<<1:rnd:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_dididi_acc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_dididi_acc_conj<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1, $src2*)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1.H, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1.H, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1.L, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1.L, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_hh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_hl<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.H, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_lh<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.H)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_ll<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ", !strconcat(opc , "($src1.L, $src2.L)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_hh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ",
+ !strconcat(opc , "($src1.H, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_hl_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ",
+ !strconcat(opc , "($src1.H, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_lh_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ",
+ !strconcat(opc , "($src1.L, $src2.H):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_nac_ll_s1<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst -= ",
+ !strconcat(opc , "($src1.L, $src2.L):<<1")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disisi_acc_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, IntRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class di_MInst_disi_s1_sat<string opc, Intrinsic IntID>
+ : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
+
+class di_MInst_didisi_acc_s1_sat<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ IntRegs:$src2),
+ !strconcat("$dst += ",
+ !strconcat(opc , "($src1, $src2):<<1:sat")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2,
+ DoubleRegs:$src1,
+ IntRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_disi_s1_rnd_sat<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ",
+ !strconcat(opc , "($src1, $src2):<<1:rnd:sat")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>;
+
+class si_MInst_didi<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+
+/********************************************************************
+* ALU32/ALU *
+*********************************************************************/
+
+// ALU32 / ALU / Add.
+def Hexagon_A2_add:
+ si_ALU32_sisi <"add", int_hexagon_A2_add>;
+def Hexagon_A2_addi:
+ si_ALU32_sis16 <"add", int_hexagon_A2_addi>;
+
+// ALU32 / ALU / Logical operations.
+def Hexagon_A2_and:
+ si_ALU32_sisi <"and", int_hexagon_A2_and>;
+def Hexagon_A2_andir:
+ si_ALU32_sis10 <"and", int_hexagon_A2_andir>;
+def Hexagon_A2_not:
+ si_ALU32_si <"not", int_hexagon_A2_not>;
+def Hexagon_A2_or:
+ si_ALU32_sisi <"or", int_hexagon_A2_or>;
+def Hexagon_A2_orir:
+ si_ALU32_sis10 <"or", int_hexagon_A2_orir>;
+def Hexagon_A2_xor:
+ si_ALU32_sisi <"xor", int_hexagon_A2_xor>;
+
+// ALU32 / ALU / Negate.
+def Hexagon_A2_neg:
+ si_ALU32_si <"neg", int_hexagon_A2_neg>;
+
+// ALU32 / ALU / Subtract.
+def Hexagon_A2_sub:
+ si_ALU32_sisi <"sub", int_hexagon_A2_sub>;
+def Hexagon_A2_subri:
+ si_ALU32_s10si <"sub", int_hexagon_A2_subri>;
+
+// ALU32 / ALU / Transfer Immediate.
+def Hexagon_A2_tfril:
+ si_lo_ALU32_siu16 <"", int_hexagon_A2_tfril>;
+def Hexagon_A2_tfrih:
+ si_hi_ALU32_siu16 <"", int_hexagon_A2_tfrih>;
+def Hexagon_A2_tfrsi:
+ si_ALU32_s16 <"", int_hexagon_A2_tfrsi>;
+def Hexagon_A2_tfrpi:
+ di_ALU32_s8 <"", int_hexagon_A2_tfrpi>;
+
+// ALU32 / ALU / Transfer Register.
+def Hexagon_A2_tfr:
+ si_ALU32_si_tfr <"", int_hexagon_A2_tfr>;
+
+/********************************************************************
+* ALU32/PERM *
+*********************************************************************/
+
+// ALU32 / PERM / Combine.
+def Hexagon_A2_combinew:
+ di_ALU32_sisi <"combine", int_hexagon_A2_combinew>;
+def Hexagon_A2_combine_hh:
+ si_MInst_sisi_hh <"combine", int_hexagon_A2_combine_hh>;
+def Hexagon_A2_combine_lh:
+ si_MInst_sisi_lh <"combine", int_hexagon_A2_combine_lh>;
+def Hexagon_A2_combine_hl:
+ si_MInst_sisi_hl <"combine", int_hexagon_A2_combine_hl>;
+def Hexagon_A2_combine_ll:
+ si_MInst_sisi_ll <"combine", int_hexagon_A2_combine_ll>;
+def Hexagon_A2_combineii:
+ di_MInst_s8s8 <"combine", int_hexagon_A2_combineii>;
+
+// ALU32 / PERM / Mux.
+def Hexagon_C2_mux:
+ si_ALU32_qisisi <"mux", int_hexagon_C2_mux>;
+def Hexagon_C2_muxri:
+ si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>;
+def Hexagon_C2_muxir:
+ si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>;
+def Hexagon_C2_muxii:
+ si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>;
+
+// ALU32 / PERM / Shift halfword.
+def Hexagon_A2_aslh:
+ si_ALU32_si <"aslh", int_hexagon_A2_aslh>;
+def Hexagon_A2_asrh:
+ si_ALU32_si <"asrh", int_hexagon_A2_asrh>;
+def SI_to_SXTHI_asrh:
+ si_ALU32_si <"asrh", int_hexagon_SI_to_SXTHI_asrh>;
+
+// ALU32 / PERM / Sign/zero extend.
+def Hexagon_A2_sxth:
+ si_ALU32_si <"sxth", int_hexagon_A2_sxth>;
+def Hexagon_A2_sxtb:
+ si_ALU32_si <"sxtb", int_hexagon_A2_sxtb>;
+def Hexagon_A2_zxth:
+ si_ALU32_si <"zxth", int_hexagon_A2_zxth>;
+def Hexagon_A2_zxtb:
+ si_ALU32_si <"zxtb", int_hexagon_A2_zxtb>;
+
+/********************************************************************
+* ALU32/PRED *
+*********************************************************************/
+
+// ALU32 / PRED / Compare.
+def Hexagon_C2_cmpeq:
+ qi_ALU32_sisi <"cmp.eq", int_hexagon_C2_cmpeq>;
+def Hexagon_C2_cmpeqi:
+ qi_ALU32_sis10 <"cmp.eq", int_hexagon_C2_cmpeqi>;
+def Hexagon_C2_cmpgei:
+ qi_ALU32_sis8 <"cmp.ge", int_hexagon_C2_cmpgei>;
+def Hexagon_C2_cmpgeui:
+ qi_ALU32_siu8 <"cmp.geu", int_hexagon_C2_cmpgeui>;
+def Hexagon_C2_cmpgt:
+ qi_ALU32_sisi <"cmp.gt", int_hexagon_C2_cmpgt>;
+def Hexagon_C2_cmpgti:
+ qi_ALU32_sis10 <"cmp.gt", int_hexagon_C2_cmpgti>;
+def Hexagon_C2_cmpgtu:
+ qi_ALU32_sisi <"cmp.gtu", int_hexagon_C2_cmpgtu>;
+def Hexagon_C2_cmpgtui:
+ qi_ALU32_siu9 <"cmp.gtu", int_hexagon_C2_cmpgtui>;
+def Hexagon_C2_cmplt:
+ qi_ALU32_sisi <"cmp.lt", int_hexagon_C2_cmplt>;
+def Hexagon_C2_cmpltu:
+ qi_ALU32_sisi <"cmp.ltu", int_hexagon_C2_cmpltu>;
+
+/********************************************************************
+* ALU32/VH *
+*********************************************************************/
+
+// ALU32 / VH / Vector add halfwords.
+// Rd32=vadd[u]h(Rs32,Rt32:sat]
+def Hexagon_A2_svaddh:
+ si_ALU32_sisi <"vaddh", int_hexagon_A2_svaddh>;
+def Hexagon_A2_svaddhs:
+ si_ALU32_sisi_sat <"vaddh", int_hexagon_A2_svaddhs>;
+def Hexagon_A2_svadduhs:
+ si_ALU32_sisi_sat <"vadduh", int_hexagon_A2_svadduhs>;
+
+// ALU32 / VH / Vector average halfwords.
+def Hexagon_A2_svavgh:
+ si_ALU32_sisi <"vavgh", int_hexagon_A2_svavgh>;
+def Hexagon_A2_svavghs:
+ si_ALU32_sisi_rnd <"vavgh", int_hexagon_A2_svavghs>;
+def Hexagon_A2_svnavgh:
+ si_ALU32_sisi <"vnavgh", int_hexagon_A2_svnavgh>;
+
+// ALU32 / VH / Vector subtract halfwords.
+def Hexagon_A2_svsubh:
+ si_ALU32_sisi <"vsubh", int_hexagon_A2_svsubh>;
+def Hexagon_A2_svsubhs:
+ si_ALU32_sisi_sat <"vsubh", int_hexagon_A2_svsubhs>;
+def Hexagon_A2_svsubuhs:
+ si_ALU32_sisi_sat <"vsubuh", int_hexagon_A2_svsubuhs>;
+
+/********************************************************************
+* ALU64/ALU *
+*********************************************************************/
+
+// ALU64 / ALU / Add.
+def Hexagon_A2_addp:
+ di_ALU64_didi <"add", int_hexagon_A2_addp>;
+def Hexagon_A2_addsat:
+ si_ALU64_sisi_sat <"add", int_hexagon_A2_addsat>;
+
+// ALU64 / ALU / Add halfword.
+// Even though the definition says hl, it should be lh -
+//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
+def Hexagon_A2_addh_l16_hl:
+ si_ALU64_sisi_l16_lh <"add", int_hexagon_A2_addh_l16_hl>;
+def Hexagon_A2_addh_l16_ll:
+ si_ALU64_sisi_l16_ll <"add", int_hexagon_A2_addh_l16_ll>;
+
+def Hexagon_A2_addh_l16_sat_hl:
+ si_ALU64_sisi_l16_sat_lh <"add", int_hexagon_A2_addh_l16_sat_hl>;
+def Hexagon_A2_addh_l16_sat_ll:
+ si_ALU64_sisi_l16_sat_ll <"add", int_hexagon_A2_addh_l16_sat_ll>;
+
+def Hexagon_A2_addh_h16_hh:
+ si_ALU64_sisi_h16_hh <"add", int_hexagon_A2_addh_h16_hh>;
+def Hexagon_A2_addh_h16_hl:
+ si_ALU64_sisi_h16_hl <"add", int_hexagon_A2_addh_h16_hl>;
+def Hexagon_A2_addh_h16_lh:
+ si_ALU64_sisi_h16_lh <"add", int_hexagon_A2_addh_h16_lh>;
+def Hexagon_A2_addh_h16_ll:
+ si_ALU64_sisi_h16_ll <"add", int_hexagon_A2_addh_h16_ll>;
+
+def Hexagon_A2_addh_h16_sat_hh:
+ si_ALU64_sisi_h16_sat_hh <"add", int_hexagon_A2_addh_h16_sat_hh>;
+def Hexagon_A2_addh_h16_sat_hl:
+ si_ALU64_sisi_h16_sat_hl <"add", int_hexagon_A2_addh_h16_sat_hl>;
+def Hexagon_A2_addh_h16_sat_lh:
+ si_ALU64_sisi_h16_sat_lh <"add", int_hexagon_A2_addh_h16_sat_lh>;
+def Hexagon_A2_addh_h16_sat_ll:
+ si_ALU64_sisi_h16_sat_ll <"add", int_hexagon_A2_addh_h16_sat_ll>;
+
+// ALU64 / ALU / Compare.
+def Hexagon_C2_cmpeqp:
+ qi_ALU64_didi <"cmp.eq", int_hexagon_C2_cmpeqp>;
+def Hexagon_C2_cmpgtp:
+ qi_ALU64_didi <"cmp.gt", int_hexagon_C2_cmpgtp>;
+def Hexagon_C2_cmpgtup:
+ qi_ALU64_didi <"cmp.gtu", int_hexagon_C2_cmpgtup>;
+
+// ALU64 / ALU / Logical operations.
+def Hexagon_A2_andp:
+ di_ALU64_didi <"and", int_hexagon_A2_andp>;
+def Hexagon_A2_orp:
+ di_ALU64_didi <"or", int_hexagon_A2_orp>;
+def Hexagon_A2_xorp:
+ di_ALU64_didi <"xor", int_hexagon_A2_xorp>;
+
+// ALU64 / ALU / Maximum.
+def Hexagon_A2_max:
+ si_ALU64_sisi <"max", int_hexagon_A2_max>;
+def Hexagon_A2_maxu:
+ si_ALU64_sisi <"maxu", int_hexagon_A2_maxu>;
+
+// ALU64 / ALU / Minimum.
+def Hexagon_A2_min:
+ si_ALU64_sisi <"min", int_hexagon_A2_min>;
+def Hexagon_A2_minu:
+ si_ALU64_sisi <"minu", int_hexagon_A2_minu>;
+
+// ALU64 / ALU / Subtract.
+def Hexagon_A2_subp:
+ di_ALU64_didi <"sub", int_hexagon_A2_subp>;
+def Hexagon_A2_subsat:
+ si_ALU64_sisi_sat <"sub", int_hexagon_A2_subsat>;
+
+// ALU64 / ALU / Subtract halfword.
+// Even though the definition says hl, it should be lh -
+//so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits.
+def Hexagon_A2_subh_l16_hl:
+ si_ALU64_sisi_l16_lh <"sub", int_hexagon_A2_subh_l16_hl>;
+def Hexagon_A2_subh_l16_ll:
+ si_ALU64_sisi_l16_ll <"sub", int_hexagon_A2_subh_l16_ll>;
+
+def Hexagon_A2_subh_l16_sat_hl:
+ si_ALU64_sisi_l16_sat_lh <"sub", int_hexagon_A2_subh_l16_sat_hl>;
+def Hexagon_A2_subh_l16_sat_ll:
+ si_ALU64_sisi_l16_sat_ll <"sub", int_hexagon_A2_subh_l16_sat_ll>;
+
+def Hexagon_A2_subh_h16_hh:
+ si_ALU64_sisi_h16_hh <"sub", int_hexagon_A2_subh_h16_hh>;
+def Hexagon_A2_subh_h16_hl:
+ si_ALU64_sisi_h16_hl <"sub", int_hexagon_A2_subh_h16_hl>;
+def Hexagon_A2_subh_h16_lh:
+ si_ALU64_sisi_h16_lh <"sub", int_hexagon_A2_subh_h16_lh>;
+def Hexagon_A2_subh_h16_ll:
+ si_ALU64_sisi_h16_ll <"sub", int_hexagon_A2_subh_h16_ll>;
+
+def Hexagon_A2_subh_h16_sat_hh:
+ si_ALU64_sisi_h16_sat_hh <"sub", int_hexagon_A2_subh_h16_sat_hh>;
+def Hexagon_A2_subh_h16_sat_hl:
+ si_ALU64_sisi_h16_sat_hl <"sub", int_hexagon_A2_subh_h16_sat_hl>;
+def Hexagon_A2_subh_h16_sat_lh:
+ si_ALU64_sisi_h16_sat_lh <"sub", int_hexagon_A2_subh_h16_sat_lh>;
+def Hexagon_A2_subh_h16_sat_ll:
+ si_ALU64_sisi_h16_sat_ll <"sub", int_hexagon_A2_subh_h16_sat_ll>;
+
+// ALU64 / ALU / Transfer register.
+def Hexagon_A2_tfrp:
+ di_ALU64_di <"", int_hexagon_A2_tfrp>;
+
+/********************************************************************
+* ALU64/BIT *
+*********************************************************************/
+
+// ALU64 / BIT / Masked parity.
+def Hexagon_S2_parityp:
+ si_ALU64_didi <"parity", int_hexagon_S2_parityp>;
+
+/********************************************************************
+* ALU64/PERM *
+*********************************************************************/
+
+// ALU64 / PERM / Vector pack high and low halfwords.
+def Hexagon_S2_packhl:
+ di_ALU64_sisi <"packhl", int_hexagon_S2_packhl>;
+
+/********************************************************************
+* ALU64/VB *
+*********************************************************************/
+
+// ALU64 / VB / Vector add unsigned bytes.
+def Hexagon_A2_vaddub:
+ di_ALU64_didi <"vaddub", int_hexagon_A2_vaddub>;
+def Hexagon_A2_vaddubs:
+ di_ALU64_didi_sat <"vaddub", int_hexagon_A2_vaddubs>;
+
+// ALU64 / VB / Vector average unsigned bytes.
+def Hexagon_A2_vavgub:
+ di_ALU64_didi <"vavgub", int_hexagon_A2_vavgub>;
+def Hexagon_A2_vavgubr:
+ di_ALU64_didi_rnd <"vavgub", int_hexagon_A2_vavgubr>;
+
+// ALU64 / VB / Vector compare unsigned bytes.
+def Hexagon_A2_vcmpbeq:
+ qi_ALU64_didi <"vcmpb.eq", int_hexagon_A2_vcmpbeq>;
+def Hexagon_A2_vcmpbgtu:
+ qi_ALU64_didi <"vcmpb.gtu",int_hexagon_A2_vcmpbgtu>;
+
+// ALU64 / VB / Vector maximum/minimum unsigned bytes.
+def Hexagon_A2_vmaxub:
+ di_ALU64_didi <"vmaxub", int_hexagon_A2_vmaxub>;
+def Hexagon_A2_vminub:
+ di_ALU64_didi <"vminub", int_hexagon_A2_vminub>;
+
+// ALU64 / VB / Vector subtract unsigned bytes.
+def Hexagon_A2_vsubub:
+ di_ALU64_didi <"vsubub", int_hexagon_A2_vsubub>;
+def Hexagon_A2_vsububs:
+ di_ALU64_didi_sat <"vsubub", int_hexagon_A2_vsububs>;
+
+// ALU64 / VB / Vector mux.
+def Hexagon_C2_vmux:
+ di_ALU64_qididi <"vmux", int_hexagon_C2_vmux>;
+
+
+/********************************************************************
+* ALU64/VH *
+*********************************************************************/
+
+// ALU64 / VH / Vector add halfwords.
+// Rdd64=vadd[u]h(Rss64,Rtt64:sat]
+def Hexagon_A2_vaddh:
+ di_ALU64_didi <"vaddh", int_hexagon_A2_vaddh>;
+def Hexagon_A2_vaddhs:
+ di_ALU64_didi_sat <"vaddh", int_hexagon_A2_vaddhs>;
+def Hexagon_A2_vadduhs:
+ di_ALU64_didi_sat <"vadduh", int_hexagon_A2_vadduhs>;
+
+// ALU64 / VH / Vector average halfwords.
+// Rdd64=v[n]avg[u]h(Rss64,Rtt64:rnd/:crnd][:sat]
+def Hexagon_A2_vavgh:
+ di_ALU64_didi <"vavgh", int_hexagon_A2_vavgh>;
+def Hexagon_A2_vavghcr:
+ di_ALU64_didi_crnd <"vavgh", int_hexagon_A2_vavghcr>;
+def Hexagon_A2_vavghr:
+ di_ALU64_didi_rnd <"vavgh", int_hexagon_A2_vavghr>;
+def Hexagon_A2_vavguh:
+ di_ALU64_didi <"vavguh", int_hexagon_A2_vavguh>;
+def Hexagon_A2_vavguhr:
+ di_ALU64_didi_rnd <"vavguh", int_hexagon_A2_vavguhr>;
+def Hexagon_A2_vnavgh:
+ di_ALU64_didi <"vnavgh", int_hexagon_A2_vnavgh>;
+def Hexagon_A2_vnavghcr:
+ di_ALU64_didi_crnd_sat <"vnavgh", int_hexagon_A2_vnavghcr>;
+def Hexagon_A2_vnavghr:
+ di_ALU64_didi_rnd_sat <"vnavgh", int_hexagon_A2_vnavghr>;
+
+// ALU64 / VH / Vector compare halfwords.
+def Hexagon_A2_vcmpheq:
+ qi_ALU64_didi <"vcmph.eq", int_hexagon_A2_vcmpheq>;
+def Hexagon_A2_vcmphgt:
+ qi_ALU64_didi <"vcmph.gt", int_hexagon_A2_vcmphgt>;
+def Hexagon_A2_vcmphgtu:
+ qi_ALU64_didi <"vcmph.gtu",int_hexagon_A2_vcmphgtu>;
+
+// ALU64 / VH / Vector maximum halfwords.
+def Hexagon_A2_vmaxh:
+ di_ALU64_didi <"vmaxh", int_hexagon_A2_vmaxh>;
+def Hexagon_A2_vmaxuh:
+ di_ALU64_didi <"vmaxuh", int_hexagon_A2_vmaxuh>;
+
+// ALU64 / VH / Vector minimum halfwords.
+def Hexagon_A2_vminh:
+ di_ALU64_didi <"vminh", int_hexagon_A2_vminh>;
+def Hexagon_A2_vminuh:
+ di_ALU64_didi <"vminuh", int_hexagon_A2_vminuh>;
+
+// ALU64 / VH / Vector subtract halfwords.
+def Hexagon_A2_vsubh:
+ di_ALU64_didi <"vsubh", int_hexagon_A2_vsubh>;
+def Hexagon_A2_vsubhs:
+ di_ALU64_didi_sat <"vsubh", int_hexagon_A2_vsubhs>;
+def Hexagon_A2_vsubuhs:
+ di_ALU64_didi_sat <"vsubuh", int_hexagon_A2_vsubuhs>;
+
+
+/********************************************************************
+* ALU64/VW *
+*********************************************************************/
+
+// ALU64 / VW / Vector add words.
+// Rdd32=vaddw(Rss32,Rtt32)[:sat]
+def Hexagon_A2_vaddw:
+ di_ALU64_didi <"vaddw", int_hexagon_A2_vaddw>;
+def Hexagon_A2_vaddws:
+ di_ALU64_didi_sat <"vaddw", int_hexagon_A2_vaddws>;
+
+// ALU64 / VW / Vector average words.
+def Hexagon_A2_vavguw:
+ di_ALU64_didi <"vavguw", int_hexagon_A2_vavguw>;
+def Hexagon_A2_vavguwr:
+ di_ALU64_didi_rnd <"vavguw", int_hexagon_A2_vavguwr>;
+def Hexagon_A2_vavgw:
+ di_ALU64_didi <"vavgw", int_hexagon_A2_vavgw>;
+def Hexagon_A2_vavgwcr:
+ di_ALU64_didi_crnd <"vavgw", int_hexagon_A2_vavgwcr>;
+def Hexagon_A2_vavgwr:
+ di_ALU64_didi_rnd <"vavgw", int_hexagon_A2_vavgwr>;
+def Hexagon_A2_vnavgw:
+ di_ALU64_didi <"vnavgw", int_hexagon_A2_vnavgw>;
+def Hexagon_A2_vnavgwcr:
+ di_ALU64_didi_crnd_sat <"vnavgw", int_hexagon_A2_vnavgwcr>;
+def Hexagon_A2_vnavgwr:
+ di_ALU64_didi_rnd_sat <"vnavgw", int_hexagon_A2_vnavgwr>;
+
+// ALU64 / VW / Vector compare words.
+def Hexagon_A2_vcmpweq:
+ qi_ALU64_didi <"vcmpw.eq", int_hexagon_A2_vcmpweq>;
+def Hexagon_A2_vcmpwgt:
+ qi_ALU64_didi <"vcmpw.gt", int_hexagon_A2_vcmpwgt>;
+def Hexagon_A2_vcmpwgtu:
+ qi_ALU64_didi <"vcmpw.gtu",int_hexagon_A2_vcmpwgtu>;
+
+// ALU64 / VW / Vector maximum words.
+def Hexagon_A2_vmaxw:
+ di_ALU64_didi <"vmaxw", int_hexagon_A2_vmaxw>;
+def Hexagon_A2_vmaxuw:
+ di_ALU64_didi <"vmaxuw", int_hexagon_A2_vmaxuw>;
+
+// ALU64 / VW / Vector minimum words.
+def Hexagon_A2_vminw:
+ di_ALU64_didi <"vminw", int_hexagon_A2_vminw>;
+def Hexagon_A2_vminuw:
+ di_ALU64_didi <"vminuw", int_hexagon_A2_vminuw>;
+
+// ALU64 / VW / Vector subtract words.
+def Hexagon_A2_vsubw:
+ di_ALU64_didi <"vsubw", int_hexagon_A2_vsubw>;
+def Hexagon_A2_vsubws:
+ di_ALU64_didi_sat <"vsubw", int_hexagon_A2_vsubws>;
+
+
+/********************************************************************
+* CR *
+*********************************************************************/
+
+// CR / Logical reductions on predicates.
+def Hexagon_C2_all8:
+ qi_SInst_qi <"all8", int_hexagon_C2_all8>;
+def Hexagon_C2_any8:
+ qi_SInst_qi <"any8", int_hexagon_C2_any8>;
+
+// CR / Logical operations on predicates.
+def Hexagon_C2_pxfer_map:
+ qi_SInst_qi_pxfer <"", int_hexagon_C2_pxfer_map>;
+def Hexagon_C2_and:
+ qi_SInst_qiqi <"and", int_hexagon_C2_and>;
+def Hexagon_C2_andn:
+ qi_SInst_qiqi_neg <"and", int_hexagon_C2_andn>;
+def Hexagon_C2_not:
+ qi_SInst_qi <"not", int_hexagon_C2_not>;
+def Hexagon_C2_or:
+ qi_SInst_qiqi <"or", int_hexagon_C2_or>;
+def Hexagon_C2_orn:
+ qi_SInst_qiqi_neg <"or", int_hexagon_C2_orn>;
+def Hexagon_C2_xor:
+ qi_SInst_qiqi <"xor", int_hexagon_C2_xor>;
+
+
+/********************************************************************
+* MTYPE/ALU *
+*********************************************************************/
+
+// MTYPE / ALU / Add and accumulate.
+def Hexagon_M2_acci:
+ si_MInst_sisisi_acc <"add", int_hexagon_M2_acci>;
+def Hexagon_M2_accii:
+ si_MInst_sisis8_acc <"add", int_hexagon_M2_accii>;
+def Hexagon_M2_nacci:
+ si_MInst_sisisi_nac <"add", int_hexagon_M2_nacci>;
+def Hexagon_M2_naccii:
+ si_MInst_sisis8_nac <"add", int_hexagon_M2_naccii>;
+
+// MTYPE / ALU / Subtract and accumulate.
+def Hexagon_M2_subacc:
+ si_MInst_sisisi_acc <"sub", int_hexagon_M2_subacc>;
+
+// MTYPE / ALU / Vector absolute difference.
+def Hexagon_M2_vabsdiffh:
+ di_MInst_didi <"vabsdiffh",int_hexagon_M2_vabsdiffh>;
+def Hexagon_M2_vabsdiffw:
+ di_MInst_didi <"vabsdiffw",int_hexagon_M2_vabsdiffw>;
+
+// MTYPE / ALU / XOR and xor with destination.
+def Hexagon_M2_xor_xacc:
+ si_MInst_sisisi_xacc <"xor", int_hexagon_M2_xor_xacc>;
+
+
+/********************************************************************
+* MTYPE/COMPLEX *
+*********************************************************************/
+
+// MTYPE / COMPLEX / Complex multiply.
+// Rdd[-+]=cmpy(Rs, Rt:<<1]:sat
+def Hexagon_M2_cmpys_s1:
+ di_MInst_sisi_s1_sat <"cmpy", int_hexagon_M2_cmpys_s1>;
+def Hexagon_M2_cmpys_s0:
+ di_MInst_sisi_sat <"cmpy", int_hexagon_M2_cmpys_s0>;
+def Hexagon_M2_cmpysc_s1:
+ di_MInst_sisi_s1_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s1>;
+def Hexagon_M2_cmpysc_s0:
+ di_MInst_sisi_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s0>;
+
+def Hexagon_M2_cmacs_s1:
+ di_MInst_disisi_acc_s1_sat <"cmpy", int_hexagon_M2_cmacs_s1>;
+def Hexagon_M2_cmacs_s0:
+ di_MInst_disisi_acc_sat <"cmpy", int_hexagon_M2_cmacs_s0>;
+def Hexagon_M2_cmacsc_s1:
+ di_MInst_disisi_acc_s1_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s1>;
+def Hexagon_M2_cmacsc_s0:
+ di_MInst_disisi_acc_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s0>;
+
+def Hexagon_M2_cnacs_s1:
+ di_MInst_disisi_nac_s1_sat <"cmpy", int_hexagon_M2_cnacs_s1>;
+def Hexagon_M2_cnacs_s0:
+ di_MInst_disisi_nac_sat <"cmpy", int_hexagon_M2_cnacs_s0>;
+def Hexagon_M2_cnacsc_s1:
+ di_MInst_disisi_nac_s1_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s1>;
+def Hexagon_M2_cnacsc_s0:
+ di_MInst_disisi_nac_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s0>;
+
+// MTYPE / COMPLEX / Complex multiply real or imaginary.
+def Hexagon_M2_cmpyr_s0:
+ di_MInst_sisi <"cmpyr", int_hexagon_M2_cmpyr_s0>;
+def Hexagon_M2_cmacr_s0:
+ di_MInst_disisi_acc <"cmpyr", int_hexagon_M2_cmacr_s0>;
+
+def Hexagon_M2_cmpyi_s0:
+ di_MInst_sisi <"cmpyi", int_hexagon_M2_cmpyi_s0>;
+def Hexagon_M2_cmaci_s0:
+ di_MInst_disisi_acc <"cmpyi", int_hexagon_M2_cmaci_s0>;
+
+// MTYPE / COMPLEX / Complex multiply with round and pack.
+// Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat
+def Hexagon_M2_cmpyrs_s0:
+ si_MInst_sisi_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s0>;
+def Hexagon_M2_cmpyrs_s1:
+ si_MInst_sisi_s1_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s1>;
+
+def Hexagon_M2_cmpyrsc_s0:
+ si_MInst_sisi_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s0>;
+def Hexagon_M2_cmpyrsc_s1:
+ si_MInst_sisi_s1_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s1>;
+
+//MTYPE / COMPLEX / Vector complex multiply real or imaginary.
+def Hexagon_M2_vcmpy_s0_sat_i:
+ di_MInst_didi_sat <"vcmpyi", int_hexagon_M2_vcmpy_s0_sat_i>;
+def Hexagon_M2_vcmpy_s1_sat_i:
+ di_MInst_didi_s1_sat <"vcmpyi", int_hexagon_M2_vcmpy_s1_sat_i>;
+
+def Hexagon_M2_vcmpy_s0_sat_r:
+ di_MInst_didi_sat <"vcmpyr", int_hexagon_M2_vcmpy_s0_sat_r>;
+def Hexagon_M2_vcmpy_s1_sat_r:
+ di_MInst_didi_s1_sat <"vcmpyr", int_hexagon_M2_vcmpy_s1_sat_r>;
+
+def Hexagon_M2_vcmac_s0_sat_i:
+ di_MInst_dididi_acc_sat <"vcmpyi", int_hexagon_M2_vcmac_s0_sat_i>;
+def Hexagon_M2_vcmac_s0_sat_r:
+ di_MInst_dididi_acc_sat <"vcmpyr", int_hexagon_M2_vcmac_s0_sat_r>;
+
+//MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
+def Hexagon_M2_vrcmpyi_s0:
+ di_MInst_didi <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0>;
+def Hexagon_M2_vrcmpyr_s0:
+ di_MInst_didi <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0>;
+
+def Hexagon_M2_vrcmpyi_s0c:
+ di_MInst_didi_conj <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0c>;
+def Hexagon_M2_vrcmpyr_s0c:
+ di_MInst_didi_conj <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0c>;
+
+def Hexagon_M2_vrcmaci_s0:
+ di_MInst_dididi_acc <"vrcmpyi", int_hexagon_M2_vrcmaci_s0>;
+def Hexagon_M2_vrcmacr_s0:
+ di_MInst_dididi_acc <"vrcmpyr", int_hexagon_M2_vrcmacr_s0>;
+
+def Hexagon_M2_vrcmaci_s0c:
+ di_MInst_dididi_acc_conj <"vrcmpyi", int_hexagon_M2_vrcmaci_s0c>;
+def Hexagon_M2_vrcmacr_s0c:
+ di_MInst_dididi_acc_conj <"vrcmpyr", int_hexagon_M2_vrcmacr_s0c>;
+
+
+/********************************************************************
+* MTYPE/MPYH *
+*********************************************************************/
+
+// MTYPE / MPYH / Multiply and use lower result.
+//def Hexagon_M2_mpysmi:
+// si_MInst_sim9 <"mpyi", int_hexagon_M2_mpysmi>;
+def Hexagon_M2_mpyi:
+ si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>;
+def Hexagon_M2_mpyui:
+ si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>;
+def Hexagon_M2_macsip:
+ si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>;
+def Hexagon_M2_maci:
+ si_MInst_sisisi_acc <"mpyi", int_hexagon_M2_maci>;
+def Hexagon_M2_macsin:
+ si_MInst_sisiu8_nac <"mpyi", int_hexagon_M2_macsin>;
+
+// MTYPE / MPYH / Multiply word by half (32x16).
+//Rdd[+]=vmpywoh(Rss,Rtt)[:<<1][:rnd][:sat]
+//Rdd[+]=vmpyweh(Rss,Rtt)[:<<1][:rnd][:sat]
+def Hexagon_M2_mmpyl_rs1:
+ di_MInst_didi_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs1>;
+def Hexagon_M2_mmpyl_s1:
+ di_MInst_didi_s1_sat <"vmpyweh", int_hexagon_M2_mmpyl_s1>;
+def Hexagon_M2_mmpyl_rs0:
+ di_MInst_didi_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs0>;
+def Hexagon_M2_mmpyl_s0:
+ di_MInst_didi_sat <"vmpyweh", int_hexagon_M2_mmpyl_s0>;
+def Hexagon_M2_mmpyh_rs1:
+ di_MInst_didi_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs1>;
+def Hexagon_M2_mmpyh_s1:
+ di_MInst_didi_s1_sat <"vmpywoh", int_hexagon_M2_mmpyh_s1>;
+def Hexagon_M2_mmpyh_rs0:
+ di_MInst_didi_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs0>;
+def Hexagon_M2_mmpyh_s0:
+ di_MInst_didi_sat <"vmpywoh", int_hexagon_M2_mmpyh_s0>;
+def Hexagon_M2_mmacls_rs1:
+ di_MInst_dididi_acc_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs1>;
+def Hexagon_M2_mmacls_s1:
+ di_MInst_dididi_acc_s1_sat <"vmpyweh", int_hexagon_M2_mmacls_s1>;
+def Hexagon_M2_mmacls_rs0:
+ di_MInst_dididi_acc_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs0>;
+def Hexagon_M2_mmacls_s0:
+ di_MInst_dididi_acc_sat <"vmpyweh", int_hexagon_M2_mmacls_s0>;
+def Hexagon_M2_mmachs_rs1:
+ di_MInst_dididi_acc_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs1>;
+def Hexagon_M2_mmachs_s1:
+ di_MInst_dididi_acc_s1_sat <"vmpywoh", int_hexagon_M2_mmachs_s1>;
+def Hexagon_M2_mmachs_rs0:
+ di_MInst_dididi_acc_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs0>;
+def Hexagon_M2_mmachs_s0:
+ di_MInst_dididi_acc_sat <"vmpywoh", int_hexagon_M2_mmachs_s0>;
+
+// MTYPE / MPYH / Multiply word by unsigned half (32x16).
+//Rdd[+]=vmpywouh(Rss,Rtt)[:<<1][:rnd][:sat]
+//Rdd[+]=vmpyweuh(Rss,Rtt)[:<<1][:rnd][:sat]
+def Hexagon_M2_mmpyul_rs1:
+ di_MInst_didi_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs1>;
+def Hexagon_M2_mmpyul_s1:
+ di_MInst_didi_s1_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s1>;
+def Hexagon_M2_mmpyul_rs0:
+ di_MInst_didi_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs0>;
+def Hexagon_M2_mmpyul_s0:
+ di_MInst_didi_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s0>;
+def Hexagon_M2_mmpyuh_rs1:
+ di_MInst_didi_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs1>;
+def Hexagon_M2_mmpyuh_s1:
+ di_MInst_didi_s1_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s1>;
+def Hexagon_M2_mmpyuh_rs0:
+ di_MInst_didi_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs0>;
+def Hexagon_M2_mmpyuh_s0:
+ di_MInst_didi_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s0>;
+def Hexagon_M2_mmaculs_rs1:
+ di_MInst_dididi_acc_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs1>;
+def Hexagon_M2_mmaculs_s1:
+ di_MInst_dididi_acc_s1_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s1>;
+def Hexagon_M2_mmaculs_rs0:
+ di_MInst_dididi_acc_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs0>;
+def Hexagon_M2_mmaculs_s0:
+ di_MInst_dididi_acc_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s0>;
+def Hexagon_M2_mmacuhs_rs1:
+ di_MInst_dididi_acc_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs1>;
+def Hexagon_M2_mmacuhs_s1:
+ di_MInst_dididi_acc_s1_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s1>;
+def Hexagon_M2_mmacuhs_rs0:
+ di_MInst_dididi_acc_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs0>;
+def Hexagon_M2_mmacuhs_s0:
+ di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>;
+
+// MTYPE / MPYH / Multiply and use upper result.
+def Hexagon_M2_hmmpyh_rs1:
+ si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>;
+def Hexagon_M2_hmmpyl_rs1:
+ si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>;
+def Hexagon_M2_mpy_up:
+ si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>;
+def Hexagon_M2_dpmpyss_rnd_s0:
+ si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>;
+def Hexagon_M2_mpyu_up:
+ si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>;
+
+// MTYPE / MPYH / Multiply and use full result.
+def Hexagon_M2_dpmpyuu_s0:
+ di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>;
+def Hexagon_M2_dpmpyuu_acc_s0:
+ di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>;
+def Hexagon_M2_dpmpyuu_nac_s0:
+ di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>;
+def Hexagon_M2_dpmpyss_s0:
+ di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>;
+def Hexagon_M2_dpmpyss_acc_s0:
+ di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>;
+def Hexagon_M2_dpmpyss_nac_s0:
+ di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>;
+
+
+/********************************************************************
+* MTYPE/MPYS *
+*********************************************************************/
+
+// MTYPE / MPYS / Scalar 16x16 multiply signed.
+//Rd=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]|
+// [:<<0[:rnd|:sat|:rnd:sat]|:<<1[:rnd|:sat|:rnd:sat]]]
+def Hexagon_M2_mpy_hh_s0:
+ si_MInst_sisi_hh <"mpy", int_hexagon_M2_mpy_hh_s0>;
+def Hexagon_M2_mpy_hh_s1:
+ si_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpy_hh_s1>;
+def Hexagon_M2_mpy_rnd_hh_s1:
+ si_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_rnd_hh_s1>;
+def Hexagon_M2_mpy_sat_rnd_hh_s1:
+ si_MInst_sisi_sat_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s1>;
+def Hexagon_M2_mpy_sat_hh_s1:
+ si_MInst_sisi_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_hh_s1>;
+def Hexagon_M2_mpy_rnd_hh_s0:
+ si_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpy_rnd_hh_s0>;
+def Hexagon_M2_mpy_sat_rnd_hh_s0:
+ si_MInst_sisi_sat_rnd_hh <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s0>;
+def Hexagon_M2_mpy_sat_hh_s0:
+ si_MInst_sisi_sat_hh <"mpy", int_hexagon_M2_mpy_sat_hh_s0>;
+
+def Hexagon_M2_mpy_hl_s0:
+ si_MInst_sisi_hl <"mpy", int_hexagon_M2_mpy_hl_s0>;
+def Hexagon_M2_mpy_hl_s1:
+ si_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpy_hl_s1>;
+def Hexagon_M2_mpy_rnd_hl_s1:
+ si_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_rnd_hl_s1>;
+def Hexagon_M2_mpy_sat_rnd_hl_s1:
+ si_MInst_sisi_sat_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s1>;
+def Hexagon_M2_mpy_sat_hl_s1:
+ si_MInst_sisi_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_hl_s1>;
+def Hexagon_M2_mpy_rnd_hl_s0:
+ si_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpy_rnd_hl_s0>;
+def Hexagon_M2_mpy_sat_rnd_hl_s0:
+ si_MInst_sisi_sat_rnd_hl <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s0>;
+def Hexagon_M2_mpy_sat_hl_s0:
+ si_MInst_sisi_sat_hl <"mpy", int_hexagon_M2_mpy_sat_hl_s0>;
+
+def Hexagon_M2_mpy_lh_s0:
+ si_MInst_sisi_lh <"mpy", int_hexagon_M2_mpy_lh_s0>;
+def Hexagon_M2_mpy_lh_s1:
+ si_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpy_lh_s1>;
+def Hexagon_M2_mpy_rnd_lh_s1:
+ si_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_rnd_lh_s1>;
+def Hexagon_M2_mpy_sat_rnd_lh_s1:
+ si_MInst_sisi_sat_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s1>;
+def Hexagon_M2_mpy_sat_lh_s1:
+ si_MInst_sisi_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_lh_s1>;
+def Hexagon_M2_mpy_rnd_lh_s0:
+ si_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpy_rnd_lh_s0>;
+def Hexagon_M2_mpy_sat_rnd_lh_s0:
+ si_MInst_sisi_sat_rnd_lh <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s0>;
+def Hexagon_M2_mpy_sat_lh_s0:
+ si_MInst_sisi_sat_lh <"mpy", int_hexagon_M2_mpy_sat_lh_s0>;
+
+def Hexagon_M2_mpy_ll_s0:
+ si_MInst_sisi_ll <"mpy", int_hexagon_M2_mpy_ll_s0>;
+def Hexagon_M2_mpy_ll_s1:
+ si_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpy_ll_s1>;
+def Hexagon_M2_mpy_rnd_ll_s1:
+ si_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_rnd_ll_s1>;
+def Hexagon_M2_mpy_sat_rnd_ll_s1:
+ si_MInst_sisi_sat_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s1>;
+def Hexagon_M2_mpy_sat_ll_s1:
+ si_MInst_sisi_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_ll_s1>;
+def Hexagon_M2_mpy_rnd_ll_s0:
+ si_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpy_rnd_ll_s0>;
+def Hexagon_M2_mpy_sat_rnd_ll_s0:
+ si_MInst_sisi_sat_rnd_ll <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s0>;
+def Hexagon_M2_mpy_sat_ll_s0:
+ si_MInst_sisi_sat_ll <"mpy", int_hexagon_M2_mpy_sat_ll_s0>;
+
+//Rdd=mpy(Rs.[H|L],Rt.[H|L])[[:<<0|:<<1]|[:<<0:rnd|:<<1:rnd]]
+def Hexagon_M2_mpyd_hh_s0:
+ di_MInst_sisi_hh <"mpy", int_hexagon_M2_mpyd_hh_s0>;
+def Hexagon_M2_mpyd_hh_s1:
+ di_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpyd_hh_s1>;
+def Hexagon_M2_mpyd_rnd_hh_s1:
+ di_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hh_s1>;
+def Hexagon_M2_mpyd_rnd_hh_s0:
+ di_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpyd_rnd_hh_s0>;
+
+def Hexagon_M2_mpyd_hl_s0:
+ di_MInst_sisi_hl <"mpy", int_hexagon_M2_mpyd_hl_s0>;
+def Hexagon_M2_mpyd_hl_s1:
+ di_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpyd_hl_s1>;
+def Hexagon_M2_mpyd_rnd_hl_s1:
+ di_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hl_s1>;
+def Hexagon_M2_mpyd_rnd_hl_s0:
+ di_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpyd_rnd_hl_s0>;
+
+def Hexagon_M2_mpyd_lh_s0:
+ di_MInst_sisi_lh <"mpy", int_hexagon_M2_mpyd_lh_s0>;
+def Hexagon_M2_mpyd_lh_s1:
+ di_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpyd_lh_s1>;
+def Hexagon_M2_mpyd_rnd_lh_s1:
+ di_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_lh_s1>;
+def Hexagon_M2_mpyd_rnd_lh_s0:
+ di_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpyd_rnd_lh_s0>;
+
+def Hexagon_M2_mpyd_ll_s0:
+ di_MInst_sisi_ll <"mpy", int_hexagon_M2_mpyd_ll_s0>;
+def Hexagon_M2_mpyd_ll_s1:
+ di_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpyd_ll_s1>;
+def Hexagon_M2_mpyd_rnd_ll_s1:
+ di_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpyd_rnd_ll_s1>;
+def Hexagon_M2_mpyd_rnd_ll_s0:
+ di_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpyd_rnd_ll_s0>;
+
+//Rx+=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
+def Hexagon_M2_mpy_acc_hh_s0:
+ si_MInst_sisisi_acc_hh <"mpy", int_hexagon_M2_mpy_acc_hh_s0>;
+def Hexagon_M2_mpy_acc_hh_s1:
+ si_MInst_sisisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_hh_s1>;
+def Hexagon_M2_mpy_acc_sat_hh_s1:
+ si_MInst_sisisi_acc_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s1>;
+def Hexagon_M2_mpy_acc_sat_hh_s0:
+ si_MInst_sisisi_acc_sat_hh <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s0>;
+
+def Hexagon_M2_mpy_acc_hl_s0:
+ si_MInst_sisisi_acc_hl <"mpy", int_hexagon_M2_mpy_acc_hl_s0>;
+def Hexagon_M2_mpy_acc_hl_s1:
+ si_MInst_sisisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_hl_s1>;
+def Hexagon_M2_mpy_acc_sat_hl_s1:
+ si_MInst_sisisi_acc_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s1>;
+def Hexagon_M2_mpy_acc_sat_hl_s0:
+ si_MInst_sisisi_acc_sat_hl <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s0>;
+
+def Hexagon_M2_mpy_acc_lh_s0:
+ si_MInst_sisisi_acc_lh <"mpy", int_hexagon_M2_mpy_acc_lh_s0>;
+def Hexagon_M2_mpy_acc_lh_s1:
+ si_MInst_sisisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_lh_s1>;
+def Hexagon_M2_mpy_acc_sat_lh_s1:
+ si_MInst_sisisi_acc_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s1>;
+def Hexagon_M2_mpy_acc_sat_lh_s0:
+ si_MInst_sisisi_acc_sat_lh <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s0>;
+
+def Hexagon_M2_mpy_acc_ll_s0:
+ si_MInst_sisisi_acc_ll <"mpy", int_hexagon_M2_mpy_acc_ll_s0>;
+def Hexagon_M2_mpy_acc_ll_s1:
+ si_MInst_sisisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_ll_s1>;
+def Hexagon_M2_mpy_acc_sat_ll_s1:
+ si_MInst_sisisi_acc_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s1>;
+def Hexagon_M2_mpy_acc_sat_ll_s0:
+ si_MInst_sisisi_acc_sat_ll <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s0>;
+
+//Rx-=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]]
+def Hexagon_M2_mpy_nac_hh_s0:
+ si_MInst_sisisi_nac_hh <"mpy", int_hexagon_M2_mpy_nac_hh_s0>;
+def Hexagon_M2_mpy_nac_hh_s1:
+ si_MInst_sisisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_hh_s1>;
+def Hexagon_M2_mpy_nac_sat_hh_s1:
+ si_MInst_sisisi_nac_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s1>;
+def Hexagon_M2_mpy_nac_sat_hh_s0:
+ si_MInst_sisisi_nac_sat_hh <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s0>;
+
+def Hexagon_M2_mpy_nac_hl_s0:
+ si_MInst_sisisi_nac_hl <"mpy", int_hexagon_M2_mpy_nac_hl_s0>;
+def Hexagon_M2_mpy_nac_hl_s1:
+ si_MInst_sisisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_hl_s1>;
+def Hexagon_M2_mpy_nac_sat_hl_s1:
+ si_MInst_sisisi_nac_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s1>;
+def Hexagon_M2_mpy_nac_sat_hl_s0:
+ si_MInst_sisisi_nac_sat_hl <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s0>;
+
+def Hexagon_M2_mpy_nac_lh_s0:
+ si_MInst_sisisi_nac_lh <"mpy", int_hexagon_M2_mpy_nac_lh_s0>;
+def Hexagon_M2_mpy_nac_lh_s1:
+ si_MInst_sisisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_lh_s1>;
+def Hexagon_M2_mpy_nac_sat_lh_s1:
+ si_MInst_sisisi_nac_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s1>;
+def Hexagon_M2_mpy_nac_sat_lh_s0:
+ si_MInst_sisisi_nac_sat_lh <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s0>;
+
+def Hexagon_M2_mpy_nac_ll_s0:
+ si_MInst_sisisi_nac_ll <"mpy", int_hexagon_M2_mpy_nac_ll_s0>;
+def Hexagon_M2_mpy_nac_ll_s1:
+ si_MInst_sisisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_ll_s1>;
+def Hexagon_M2_mpy_nac_sat_ll_s1:
+ si_MInst_sisisi_nac_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s1>;
+def Hexagon_M2_mpy_nac_sat_ll_s0:
+ si_MInst_sisisi_nac_sat_ll <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s0>;
+
+//Rx+=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
+def Hexagon_M2_mpyd_acc_hh_s0:
+ di_MInst_disisi_acc_hh <"mpy", int_hexagon_M2_mpyd_acc_hh_s0>;
+def Hexagon_M2_mpyd_acc_hh_s1:
+ di_MInst_disisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpyd_acc_hh_s1>;
+
+def Hexagon_M2_mpyd_acc_hl_s0:
+ di_MInst_disisi_acc_hl <"mpy", int_hexagon_M2_mpyd_acc_hl_s0>;
+def Hexagon_M2_mpyd_acc_hl_s1:
+ di_MInst_disisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpyd_acc_hl_s1>;
+
+def Hexagon_M2_mpyd_acc_lh_s0:
+ di_MInst_disisi_acc_lh <"mpy", int_hexagon_M2_mpyd_acc_lh_s0>;
+def Hexagon_M2_mpyd_acc_lh_s1:
+ di_MInst_disisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpyd_acc_lh_s1>;
+
+def Hexagon_M2_mpyd_acc_ll_s0:
+ di_MInst_disisi_acc_ll <"mpy", int_hexagon_M2_mpyd_acc_ll_s0>;
+def Hexagon_M2_mpyd_acc_ll_s1:
+ di_MInst_disisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpyd_acc_ll_s1>;
+
+//Rx-=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]
+def Hexagon_M2_mpyd_nac_hh_s0:
+ di_MInst_disisi_nac_hh <"mpy", int_hexagon_M2_mpyd_nac_hh_s0>;
+def Hexagon_M2_mpyd_nac_hh_s1:
+ di_MInst_disisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpyd_nac_hh_s1>;
+
+def Hexagon_M2_mpyd_nac_hl_s0:
+ di_MInst_disisi_nac_hl <"mpy", int_hexagon_M2_mpyd_nac_hl_s0>;
+def Hexagon_M2_mpyd_nac_hl_s1:
+ di_MInst_disisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpyd_nac_hl_s1>;
+
+def Hexagon_M2_mpyd_nac_lh_s0:
+ di_MInst_disisi_nac_lh <"mpy", int_hexagon_M2_mpyd_nac_lh_s0>;
+def Hexagon_M2_mpyd_nac_lh_s1:
+ di_MInst_disisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpyd_nac_lh_s1>;
+
+def Hexagon_M2_mpyd_nac_ll_s0:
+ di_MInst_disisi_nac_ll <"mpy", int_hexagon_M2_mpyd_nac_ll_s0>;
+def Hexagon_M2_mpyd_nac_ll_s1:
+ di_MInst_disisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpyd_nac_ll_s1>;
+
+// MTYPE / MPYS / Scalar 16x16 multiply unsigned.
+//Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyu_hh_s0:
+ si_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyu_hh_s0>;
+def Hexagon_M2_mpyu_hh_s1:
+ si_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyu_hh_s1>;
+def Hexagon_M2_mpyu_hl_s0:
+ si_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyu_hl_s0>;
+def Hexagon_M2_mpyu_hl_s1:
+ si_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyu_hl_s1>;
+def Hexagon_M2_mpyu_lh_s0:
+ si_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyu_lh_s0>;
+def Hexagon_M2_mpyu_lh_s1:
+ si_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyu_lh_s1>;
+def Hexagon_M2_mpyu_ll_s0:
+ si_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyu_ll_s0>;
+def Hexagon_M2_mpyu_ll_s1:
+ si_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyu_ll_s1>;
+
+//Rdd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyud_hh_s0:
+ di_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyud_hh_s0>;
+def Hexagon_M2_mpyud_hh_s1:
+ di_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyud_hh_s1>;
+def Hexagon_M2_mpyud_hl_s0:
+ di_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyud_hl_s0>;
+def Hexagon_M2_mpyud_hl_s1:
+ di_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyud_hl_s1>;
+def Hexagon_M2_mpyud_lh_s0:
+ di_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyud_lh_s0>;
+def Hexagon_M2_mpyud_lh_s1:
+ di_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyud_lh_s1>;
+def Hexagon_M2_mpyud_ll_s0:
+ di_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyud_ll_s0>;
+def Hexagon_M2_mpyud_ll_s1:
+ di_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyud_ll_s1>;
+
+//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyu_acc_hh_s0:
+ si_MInst_sisisi_acc_hh <"mpyu", int_hexagon_M2_mpyu_acc_hh_s0>;
+def Hexagon_M2_mpyu_acc_hh_s1:
+ si_MInst_sisisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hh_s1>;
+def Hexagon_M2_mpyu_acc_hl_s0:
+ si_MInst_sisisi_acc_hl <"mpyu", int_hexagon_M2_mpyu_acc_hl_s0>;
+def Hexagon_M2_mpyu_acc_hl_s1:
+ si_MInst_sisisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hl_s1>;
+def Hexagon_M2_mpyu_acc_lh_s0:
+ si_MInst_sisisi_acc_lh <"mpyu", int_hexagon_M2_mpyu_acc_lh_s0>;
+def Hexagon_M2_mpyu_acc_lh_s1:
+ si_MInst_sisisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_lh_s1>;
+def Hexagon_M2_mpyu_acc_ll_s0:
+ si_MInst_sisisi_acc_ll <"mpyu", int_hexagon_M2_mpyu_acc_ll_s0>;
+def Hexagon_M2_mpyu_acc_ll_s1:
+ si_MInst_sisisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyu_acc_ll_s1>;
+
+//Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyu_nac_hh_s0:
+ si_MInst_sisisi_nac_hh <"mpyu", int_hexagon_M2_mpyu_nac_hh_s0>;
+def Hexagon_M2_mpyu_nac_hh_s1:
+ si_MInst_sisisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hh_s1>;
+def Hexagon_M2_mpyu_nac_hl_s0:
+ si_MInst_sisisi_nac_hl <"mpyu", int_hexagon_M2_mpyu_nac_hl_s0>;
+def Hexagon_M2_mpyu_nac_hl_s1:
+ si_MInst_sisisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hl_s1>;
+def Hexagon_M2_mpyu_nac_lh_s0:
+ si_MInst_sisisi_nac_lh <"mpyu", int_hexagon_M2_mpyu_nac_lh_s0>;
+def Hexagon_M2_mpyu_nac_lh_s1:
+ si_MInst_sisisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_lh_s1>;
+def Hexagon_M2_mpyu_nac_ll_s0:
+ si_MInst_sisisi_nac_ll <"mpyu", int_hexagon_M2_mpyu_nac_ll_s0>;
+def Hexagon_M2_mpyu_nac_ll_s1:
+ si_MInst_sisisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyu_nac_ll_s1>;
+
+//Rdd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyud_acc_hh_s0:
+ di_MInst_disisi_acc_hh <"mpyu", int_hexagon_M2_mpyud_acc_hh_s0>;
+def Hexagon_M2_mpyud_acc_hh_s1:
+ di_MInst_disisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hh_s1>;
+def Hexagon_M2_mpyud_acc_hl_s0:
+ di_MInst_disisi_acc_hl <"mpyu", int_hexagon_M2_mpyud_acc_hl_s0>;
+def Hexagon_M2_mpyud_acc_hl_s1:
+ di_MInst_disisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hl_s1>;
+def Hexagon_M2_mpyud_acc_lh_s0:
+ di_MInst_disisi_acc_lh <"mpyu", int_hexagon_M2_mpyud_acc_lh_s0>;
+def Hexagon_M2_mpyud_acc_lh_s1:
+ di_MInst_disisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_lh_s1>;
+def Hexagon_M2_mpyud_acc_ll_s0:
+ di_MInst_disisi_acc_ll <"mpyu", int_hexagon_M2_mpyud_acc_ll_s0>;
+def Hexagon_M2_mpyud_acc_ll_s1:
+ di_MInst_disisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyud_acc_ll_s1>;
+
+//Rdd-=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1]
+def Hexagon_M2_mpyud_nac_hh_s0:
+ di_MInst_disisi_nac_hh <"mpyu", int_hexagon_M2_mpyud_nac_hh_s0>;
+def Hexagon_M2_mpyud_nac_hh_s1:
+ di_MInst_disisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hh_s1>;
+def Hexagon_M2_mpyud_nac_hl_s0:
+ di_MInst_disisi_nac_hl <"mpyu", int_hexagon_M2_mpyud_nac_hl_s0>;
+def Hexagon_M2_mpyud_nac_hl_s1:
+ di_MInst_disisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hl_s1>;
+def Hexagon_M2_mpyud_nac_lh_s0:
+ di_MInst_disisi_nac_lh <"mpyu", int_hexagon_M2_mpyud_nac_lh_s0>;
+def Hexagon_M2_mpyud_nac_lh_s1:
+ di_MInst_disisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_lh_s1>;
+def Hexagon_M2_mpyud_nac_ll_s0:
+ di_MInst_disisi_nac_ll <"mpyu", int_hexagon_M2_mpyud_nac_ll_s0>;
+def Hexagon_M2_mpyud_nac_ll_s1:
+ di_MInst_disisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyud_nac_ll_s1>;
+
+
+/********************************************************************
+* MTYPE/VB *
+*********************************************************************/
+
+// MTYPE / VB / Vector reduce add unsigned bytes.
+def Hexagon_A2_vraddub:
+ di_MInst_didi <"vraddub", int_hexagon_A2_vraddub>;
+def Hexagon_A2_vraddub_acc:
+ di_MInst_dididi_acc <"vraddub", int_hexagon_A2_vraddub_acc>;
+
+// MTYPE / VB / Vector sum of absolute differences unsigned bytes.
+def Hexagon_A2_vrsadub:
+ di_MInst_didi <"vrsadub", int_hexagon_A2_vrsadub>;
+def Hexagon_A2_vrsadub_acc:
+ di_MInst_dididi_acc <"vrsadub", int_hexagon_A2_vrsadub_acc>;
+
+/********************************************************************
+* MTYPE/VH *
+*********************************************************************/
+
+// MTYPE / VH / Vector dual multiply.
+def Hexagon_M2_vdmpys_s1:
+ di_MInst_didi_s1_sat <"vdmpy", int_hexagon_M2_vdmpys_s1>;
+def Hexagon_M2_vdmpys_s0:
+ di_MInst_didi_sat <"vdmpy", int_hexagon_M2_vdmpys_s0>;
+def Hexagon_M2_vdmacs_s1:
+ di_MInst_dididi_acc_s1_sat <"vdmpy", int_hexagon_M2_vdmacs_s1>;
+def Hexagon_M2_vdmacs_s0:
+ di_MInst_dididi_acc_sat <"vdmpy", int_hexagon_M2_vdmacs_s0>;
+
+// MTYPE / VH / Vector dual multiply with round and pack.
+def Hexagon_M2_vdmpyrs_s0:
+ si_MInst_didi_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s0>;
+def Hexagon_M2_vdmpyrs_s1:
+ si_MInst_didi_s1_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s1>;
+
+// MTYPE / VH / Vector multiply even halfwords.
+def Hexagon_M2_vmpy2es_s1:
+ di_MInst_didi_s1_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s1>;
+def Hexagon_M2_vmpy2es_s0:
+ di_MInst_didi_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s0>;
+def Hexagon_M2_vmac2es:
+ di_MInst_dididi_acc <"vmpyeh", int_hexagon_M2_vmac2es>;
+def Hexagon_M2_vmac2es_s1:
+ di_MInst_dididi_acc_s1_sat <"vmpyeh", int_hexagon_M2_vmac2es_s1>;
+def Hexagon_M2_vmac2es_s0:
+ di_MInst_dididi_acc_sat <"vmpyeh", int_hexagon_M2_vmac2es_s0>;
+
+// MTYPE / VH / Vector multiply halfwords.
+def Hexagon_M2_vmpy2s_s0:
+ di_MInst_sisi_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0>;
+def Hexagon_M2_vmpy2s_s1:
+ di_MInst_sisi_s1_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1>;
+def Hexagon_M2_vmac2:
+ di_MInst_disisi_acc <"vmpyh", int_hexagon_M2_vmac2>;
+def Hexagon_M2_vmac2s_s0:
+ di_MInst_disisi_acc_sat <"vmpyh", int_hexagon_M2_vmac2s_s0>;
+def Hexagon_M2_vmac2s_s1:
+ di_MInst_disisi_acc_s1_sat <"vmpyh", int_hexagon_M2_vmac2s_s1>;
+
+// MTYPE / VH / Vector multiply halfwords with round and pack.
+def Hexagon_M2_vmpy2s_s0pack:
+ si_MInst_sisi_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0pack>;
+def Hexagon_M2_vmpy2s_s1pack:
+ si_MInst_sisi_s1_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1pack>;
+
+// MTYPE / VH / Vector reduce multiply halfwords.
+// Rxx32+=vrmpyh(Rss32,Rtt32)
+def Hexagon_M2_vrmpy_s0:
+ di_MInst_didi <"vrmpyh", int_hexagon_M2_vrmpy_s0>;
+def Hexagon_M2_vrmac_s0:
+ di_MInst_dididi_acc <"vrmpyh", int_hexagon_M2_vrmac_s0>;
+
+
+/********************************************************************
+* STYPE/ALU *
+*********************************************************************/
+
+// STYPE / ALU / Absolute value.
+def Hexagon_A2_abs:
+ si_SInst_si <"abs", int_hexagon_A2_abs>;
+def Hexagon_A2_absp:
+ di_SInst_di <"abs", int_hexagon_A2_absp>;
+def Hexagon_A2_abssat:
+ si_SInst_si_sat <"abs", int_hexagon_A2_abssat>;
+
+// STYPE / ALU / Negate.
+def Hexagon_A2_negp:
+ di_SInst_di <"neg", int_hexagon_A2_negp>;
+def Hexagon_A2_negsat:
+ si_SInst_si_sat <"neg", int_hexagon_A2_negsat>;
+
+// STYPE / ALU / Logical Not.
+def Hexagon_A2_notp:
+ di_SInst_di <"not", int_hexagon_A2_notp>;
+
+// STYPE / ALU / Sign extend word to doubleword.
+def Hexagon_A2_sxtw:
+ di_SInst_si <"sxtw", int_hexagon_A2_sxtw>;
+
+
+/********************************************************************
+* STYPE/BIT *
+*********************************************************************/
+
+// STYPE / BIT / Count leading.
+def Hexagon_S2_cl0:
+ si_SInst_si <"cl0", int_hexagon_S2_cl0>;
+def Hexagon_S2_cl0p:
+ si_SInst_di <"cl0", int_hexagon_S2_cl0p>;
+def Hexagon_S2_cl1:
+ si_SInst_si <"cl1", int_hexagon_S2_cl1>;
+def Hexagon_S2_cl1p:
+ si_SInst_di <"cl1", int_hexagon_S2_cl1p>;
+def Hexagon_S2_clb:
+ si_SInst_si <"clb", int_hexagon_S2_clb>;
+def Hexagon_S2_clbp:
+ si_SInst_di <"clb", int_hexagon_S2_clbp>;
+def Hexagon_S2_clbnorm:
+ si_SInst_si <"normamt", int_hexagon_S2_clbnorm>;
+
+// STYPE / BIT / Count trailing.
+def Hexagon_S2_ct0:
+ si_SInst_si <"ct0", int_hexagon_S2_ct0>;
+def Hexagon_S2_ct1:
+ si_SInst_si <"ct1", int_hexagon_S2_ct1>;
+
+// STYPE / BIT / Compare bit mask.
+def HEXAGON_C2_bitsclr:
+ qi_SInst_sisi <"bitsclr", int_hexagon_C2_bitsclr>;
+def HEXAGON_C2_bitsclri:
+ qi_SInst_siu6 <"bitsclr", int_hexagon_C2_bitsclri>;
+def HEXAGON_C2_bitsset:
+ qi_SInst_sisi <"bitsset", int_hexagon_C2_bitsset>;
+
+// STYPE / BIT / Extract unsigned.
+// Rd[d][32/64]=extractu(Rs[s],Rt[t],[imm])
+def Hexagon_S2_extractu:
+ si_SInst_siu5u5 <"extractu",int_hexagon_S2_extractu>;
+def Hexagon_S2_extractu_rp:
+ si_SInst_sidi <"extractu",int_hexagon_S2_extractu_rp>;
+def Hexagon_S2_extractup:
+ di_SInst_diu6u6 <"extractu",int_hexagon_S2_extractup>;
+def Hexagon_S2_extractup_rp:
+ di_SInst_didi <"extractu",int_hexagon_S2_extractup_rp>;
+
+// STYPE / BIT / Insert bitfield.
+def HEXAGON_S2_insert:
+ si_SInst_sisiu5u5 <"insert", int_hexagon_S2_insert>;
+def HEXAGON_S2_insert_rp:
+ si_SInst_sisidi <"insert", int_hexagon_S2_insert_rp>;
+def HEXAGON_S2_insertp:
+ di_SInst_didiu6u6 <"insert", int_hexagon_S2_insertp>;
+def HEXAGON_S2_insertp_rp:
+ di_SInst_dididi <"insert", int_hexagon_S2_insertp_rp>;
+
+// STYPE / BIT / Innterleave/deinterleave.
+def HEXAGON_S2_interleave:
+ di_SInst_di <"interleave", int_hexagon_S2_interleave>;
+def HEXAGON_S2_deinterleave:
+ di_SInst_di <"deinterleave", int_hexagon_S2_deinterleave>;
+
+// STYPE / BIT / Linear feedback-shift Iteration.
+def HEXAGON_S2_lfsp:
+ di_SInst_didi <"lfs", int_hexagon_S2_lfsp>;
+
+// STYPE / BIT / Bit reverse.
+def HEXAGON_S2_brev:
+ si_SInst_si <"brev", int_hexagon_S2_brev>;
+
+// STYPE / BIT / Set/Clear/Toggle Bit.
+def Hexagon_S2_setbit_i:
+ si_SInst_siu5 <"setbit", int_hexagon_S2_setbit_i>;
+def Hexagon_S2_togglebit_i:
+ si_SInst_siu5 <"togglebit", int_hexagon_S2_togglebit_i>;
+def Hexagon_S2_clrbit_i:
+ si_SInst_siu5 <"clrbit", int_hexagon_S2_clrbit_i>;
+def Hexagon_S2_setbit_r:
+ si_SInst_sisi <"setbit", int_hexagon_S2_setbit_r>;
+def Hexagon_S2_togglebit_r:
+ si_SInst_sisi <"togglebit", int_hexagon_S2_togglebit_r>;
+def Hexagon_S2_clrbit_r:
+ si_SInst_sisi <"clrbit", int_hexagon_S2_clrbit_r>;
+
+// STYPE / BIT / Test Bit.
+def Hexagon_S2_tstbit_i:
+ qi_SInst_siu5 <"tstbit", int_hexagon_S2_tstbit_i>;
+def Hexagon_S2_tstbit_r:
+ qi_SInst_sisi <"tstbit", int_hexagon_S2_tstbit_r>;
+
+
+/********************************************************************
+* STYPE/COMPLEX *
+*********************************************************************/
+
+// STYPE / COMPLEX / Vector Complex conjugate.
+def Hexagon_A2_vconj:
+ di_SInst_di_sat <"vconj", int_hexagon_A2_vconj>;
+
+// STYPE / COMPLEX / Vector Complex rotate.
+def Hexagon_S2_vcrotate:
+ di_SInst_disi <"vcrotate",int_hexagon_S2_vcrotate>;
+
+
+/********************************************************************
+* STYPE/PERM *
+*********************************************************************/
+
+// STYPE / PERM / Saturate.
+def Hexagon_A2_sat:
+ si_SInst_di <"sat", int_hexagon_A2_sat>;
+def Hexagon_A2_satb:
+ si_SInst_si <"satb", int_hexagon_A2_satb>;
+def Hexagon_A2_sath:
+ si_SInst_si <"sath", int_hexagon_A2_sath>;
+def Hexagon_A2_satub:
+ si_SInst_si <"satub", int_hexagon_A2_satub>;
+def Hexagon_A2_satuh:
+ si_SInst_si <"satuh", int_hexagon_A2_satuh>;
+
+// STYPE / PERM / Swizzle bytes.
+def Hexagon_A2_swiz:
+ si_SInst_si <"swiz", int_hexagon_A2_swiz>;
+
+// STYPE / PERM / Vector align.
+// Need custom lowering
+def Hexagon_S2_valignib:
+ di_SInst_didiu3 <"valignb", int_hexagon_S2_valignib>;
+def Hexagon_S2_valignrb:
+ di_SInst_didiqi <"valignb", int_hexagon_S2_valignrb>;
+
+// STYPE / PERM / Vector round and pack.
+def Hexagon_S2_vrndpackwh:
+ si_SInst_di <"vrndwh", int_hexagon_S2_vrndpackwh>;
+def Hexagon_S2_vrndpackwhs:
+ si_SInst_di_sat <"vrndwh", int_hexagon_S2_vrndpackwhs>;
+
+// STYPE / PERM / Vector saturate and pack.
+def Hexagon_S2_svsathb:
+ si_SInst_si <"vsathb", int_hexagon_S2_svsathb>;
+def Hexagon_S2_vsathb:
+ si_SInst_di <"vsathb", int_hexagon_S2_vsathb>;
+def Hexagon_S2_svsathub:
+ si_SInst_si <"vsathub", int_hexagon_S2_svsathub>;
+def Hexagon_S2_vsathub:
+ si_SInst_di <"vsathub", int_hexagon_S2_vsathub>;
+def Hexagon_S2_vsatwh:
+ si_SInst_di <"vsatwh", int_hexagon_S2_vsatwh>;
+def Hexagon_S2_vsatwuh:
+ si_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh>;
+
+// STYPE / PERM / Vector saturate without pack.
+def Hexagon_S2_vsathb_nopack:
+ di_SInst_di <"vsathb", int_hexagon_S2_vsathb_nopack>;
+def Hexagon_S2_vsathub_nopack:
+ di_SInst_di <"vsathub", int_hexagon_S2_vsathub_nopack>;
+def Hexagon_S2_vsatwh_nopack:
+ di_SInst_di <"vsatwh", int_hexagon_S2_vsatwh_nopack>;
+def Hexagon_S2_vsatwuh_nopack:
+ di_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh_nopack>;
+
+// STYPE / PERM / Vector shuffle.
+def Hexagon_S2_shuffeb:
+ di_SInst_didi <"shuffeb", int_hexagon_S2_shuffeb>;
+def Hexagon_S2_shuffeh:
+ di_SInst_didi <"shuffeh", int_hexagon_S2_shuffeh>;
+def Hexagon_S2_shuffob:
+ di_SInst_didi <"shuffob", int_hexagon_S2_shuffob>;
+def Hexagon_S2_shuffoh:
+ di_SInst_didi <"shuffoh", int_hexagon_S2_shuffoh>;
+
+// STYPE / PERM / Vector splat bytes.
+def Hexagon_S2_vsplatrb:
+ si_SInst_si <"vsplatb", int_hexagon_S2_vsplatrb>;
+
+// STYPE / PERM / Vector splat halfwords.
+def Hexagon_S2_vsplatrh:
+ di_SInst_si <"vsplath", int_hexagon_S2_vsplatrh>;
+
+// STYPE / PERM / Vector splice.
+def HEXAGON_S2_vsplicerb:
+ di_SInst_didiqi <"vspliceb",int_hexagon_S2_vsplicerb>;
+def HEXAGON_S2_vspliceib:
+ di_SInst_didiu3 <"vspliceb",int_hexagon_S2_vspliceib>;
+
+// STYPE / PERM / Sign extend.
+def Hexagon_S2_vsxtbh:
+ di_SInst_si <"vsxtbh", int_hexagon_S2_vsxtbh>;
+def Hexagon_S2_vsxthw:
+ di_SInst_si <"vsxthw", int_hexagon_S2_vsxthw>;
+
+// STYPE / PERM / Truncate.
+def Hexagon_S2_vtrunehb:
+ si_SInst_di <"vtrunehb",int_hexagon_S2_vtrunehb>;
+def Hexagon_S2_vtrunohb:
+ si_SInst_di <"vtrunohb",int_hexagon_S2_vtrunohb>;
+def Hexagon_S2_vtrunewh:
+ di_SInst_didi <"vtrunewh",int_hexagon_S2_vtrunewh>;
+def Hexagon_S2_vtrunowh:
+ di_SInst_didi <"vtrunowh",int_hexagon_S2_vtrunowh>;
+
+// STYPE / PERM / Zero extend.
+def Hexagon_S2_vzxtbh:
+ di_SInst_si <"vzxtbh", int_hexagon_S2_vzxtbh>;
+def Hexagon_S2_vzxthw:
+ di_SInst_si <"vzxthw", int_hexagon_S2_vzxthw>;
+
+
+/********************************************************************
+* STYPE/PRED *
+*********************************************************************/
+
+// STYPE / PRED / Mask generate from predicate.
+def Hexagon_C2_mask:
+ di_SInst_qi <"mask", int_hexagon_C2_mask>;
+
+// STYPE / PRED / Predicate transfer.
+def Hexagon_C2_tfrpr:
+ si_SInst_qi <"", int_hexagon_C2_tfrpr>;
+def Hexagon_C2_tfrrp:
+ qi_SInst_si <"", int_hexagon_C2_tfrrp>;
+
+// STYPE / PRED / Viterbi pack even and odd predicate bits.
+def Hexagon_C2_vitpack:
+ si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>;
+
+
+/********************************************************************
+* STYPE/SHIFT *
+*********************************************************************/
+
+// STYPE / SHIFT / Shift by immediate.
+def Hexagon_S2_asl_i_r:
+ si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>;
+def Hexagon_S2_asr_i_r:
+ si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>;
+def Hexagon_S2_lsr_i_r:
+ si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>;
+def Hexagon_S2_asl_i_p:
+ di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>;
+def Hexagon_S2_asr_i_p:
+ di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>;
+def Hexagon_S2_lsr_i_p:
+ di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>;
+
+// STYPE / SHIFT / Shift by immediate and accumulate.
+def Hexagon_S2_asl_i_r_acc:
+ si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>;
+def Hexagon_S2_asr_i_r_acc:
+ si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>;
+def Hexagon_S2_lsr_i_r_acc:
+ si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>;
+def Hexagon_S2_asl_i_r_nac:
+ si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>;
+def Hexagon_S2_asr_i_r_nac:
+ si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>;
+def Hexagon_S2_lsr_i_r_nac:
+ si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>;
+def Hexagon_S2_asl_i_p_acc:
+ di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>;
+def Hexagon_S2_asr_i_p_acc:
+ di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>;
+def Hexagon_S2_lsr_i_p_acc:
+ di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>;
+def Hexagon_S2_asl_i_p_nac:
+ di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>;
+def Hexagon_S2_asr_i_p_nac:
+ di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>;
+def Hexagon_S2_lsr_i_p_nac:
+ di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>;
+
+// STYPE / SHIFT / Shift by immediate and add.
+def Hexagon_S2_addasl_rrri:
+ si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>;
+
+// STYPE / SHIFT / Shift by immediate and logical.
+def Hexagon_S2_asl_i_r_and:
+ si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>;
+def Hexagon_S2_asr_i_r_and:
+ si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>;
+def Hexagon_S2_lsr_i_r_and:
+ si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>;
+
+def Hexagon_S2_asl_i_r_xacc:
+ si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>;
+def Hexagon_S2_lsr_i_r_xacc:
+ si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>;
+
+def Hexagon_S2_asl_i_r_or:
+ si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>;
+def Hexagon_S2_asr_i_r_or:
+ si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>;
+def Hexagon_S2_lsr_i_r_or:
+ si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>;
+
+def Hexagon_S2_asl_i_p_and:
+ di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>;
+def Hexagon_S2_asr_i_p_and:
+ di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>;
+def Hexagon_S2_lsr_i_p_and:
+ di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>;
+
+def Hexagon_S2_asl_i_p_xacc:
+ di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>;
+def Hexagon_S2_lsr_i_p_xacc:
+ di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>;
+
+def Hexagon_S2_asl_i_p_or:
+ di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>;
+def Hexagon_S2_asr_i_p_or:
+ di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>;
+def Hexagon_S2_lsr_i_p_or:
+ di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>;
+
+// STYPE / SHIFT / Shift right by immediate with rounding.
+def Hexagon_S2_asr_i_r_rnd:
+ si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>;
+def Hexagon_S2_asr_i_r_rnd_goodsyntax:
+ si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>;
+
+// STYPE / SHIFT / Shift left by immediate with saturation.
+def Hexagon_S2_asl_i_r_sat:
+ si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>;
+
+// STYPE / SHIFT / Shift by register.
+def Hexagon_S2_asl_r_r:
+ si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>;
+def Hexagon_S2_asr_r_r:
+ si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>;
+def Hexagon_S2_lsl_r_r:
+ si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>;
+def Hexagon_S2_lsr_r_r:
+ si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>;
+def Hexagon_S2_asl_r_p:
+ di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>;
+def Hexagon_S2_asr_r_p:
+ di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>;
+def Hexagon_S2_lsl_r_p:
+ di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>;
+def Hexagon_S2_lsr_r_p:
+ di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>;
+
+// STYPE / SHIFT / Shift by register and accumulate.
+def Hexagon_S2_asl_r_r_acc:
+ si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>;
+def Hexagon_S2_asr_r_r_acc:
+ si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>;
+def Hexagon_S2_lsl_r_r_acc:
+ si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>;
+def Hexagon_S2_lsr_r_r_acc:
+ si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>;
+def Hexagon_S2_asl_r_p_acc:
+ di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>;
+def Hexagon_S2_asr_r_p_acc:
+ di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>;
+def Hexagon_S2_lsl_r_p_acc:
+ di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>;
+def Hexagon_S2_lsr_r_p_acc:
+ di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>;
+
+def Hexagon_S2_asl_r_r_nac:
+ si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>;
+def Hexagon_S2_asr_r_r_nac:
+ si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>;
+def Hexagon_S2_lsl_r_r_nac:
+ si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>;
+def Hexagon_S2_lsr_r_r_nac:
+ si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>;
+def Hexagon_S2_asl_r_p_nac:
+ di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>;
+def Hexagon_S2_asr_r_p_nac:
+ di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>;
+def Hexagon_S2_lsl_r_p_nac:
+ di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>;
+def Hexagon_S2_lsr_r_p_nac:
+ di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>;
+
+// STYPE / SHIFT / Shift by register and logical.
+def Hexagon_S2_asl_r_r_and:
+ si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>;
+def Hexagon_S2_asr_r_r_and:
+ si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>;
+def Hexagon_S2_lsl_r_r_and:
+ si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>;
+def Hexagon_S2_lsr_r_r_and:
+ si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>;
+
+def Hexagon_S2_asl_r_r_or:
+ si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>;
+def Hexagon_S2_asr_r_r_or:
+ si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>;
+def Hexagon_S2_lsl_r_r_or:
+ si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>;
+def Hexagon_S2_lsr_r_r_or:
+ si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>;
+
+def Hexagon_S2_asl_r_p_and:
+ di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>;
+def Hexagon_S2_asr_r_p_and:
+ di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>;
+def Hexagon_S2_lsl_r_p_and:
+ di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>;
+def Hexagon_S2_lsr_r_p_and:
+ di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>;
+
+def Hexagon_S2_asl_r_p_or:
+ di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>;
+def Hexagon_S2_asr_r_p_or:
+ di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>;
+def Hexagon_S2_lsl_r_p_or:
+ di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>;
+def Hexagon_S2_lsr_r_p_or:
+ di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>;
+
+// STYPE / SHIFT / Shift by register with saturation.
+def Hexagon_S2_asl_r_r_sat:
+ si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>;
+def Hexagon_S2_asr_r_r_sat:
+ si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>;
+
+// STYPE / SHIFT / Table Index.
+def HEXAGON_S2_tableidxb_goodsyntax:
+ si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>;
+def HEXAGON_S2_tableidxd_goodsyntax:
+ si_MInst_sisiu4u5 <"tableidxd",int_hexagon_S2_tableidxd_goodsyntax>;
+def HEXAGON_S2_tableidxh_goodsyntax:
+ si_MInst_sisiu4u5 <"tableidxh",int_hexagon_S2_tableidxh_goodsyntax>;
+def HEXAGON_S2_tableidxw_goodsyntax:
+ si_MInst_sisiu4u5 <"tableidxw",int_hexagon_S2_tableidxw_goodsyntax>;
+
+
+/********************************************************************
+* STYPE/VH *
+*********************************************************************/
+
+// STYPE / VH / Vector absolute value halfwords.
+// Rdd64=vabsh(Rss64)
+def Hexagon_A2_vabsh:
+ di_SInst_di <"vabsh", int_hexagon_A2_vabsh>;
+def Hexagon_A2_vabshsat:
+ di_SInst_di_sat <"vabsh", int_hexagon_A2_vabshsat>;
+
+// STYPE / VH / Vector shift halfwords by immediate.
+// Rdd64=v[asl/asr/lsr]h(Rss64,Rt32)
+def Hexagon_S2_asl_i_vh:
+ di_SInst_disi <"vaslh", int_hexagon_S2_asl_i_vh>;
+def Hexagon_S2_asr_i_vh:
+ di_SInst_disi <"vasrh", int_hexagon_S2_asr_i_vh>;
+def Hexagon_S2_lsr_i_vh:
+ di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_i_vh>;
+
+// STYPE / VH / Vector shift halfwords by register.
+// Rdd64=v[asl/asr/lsl/lsr]w(Rss64,Rt32)
+def Hexagon_S2_asl_r_vh:
+ di_SInst_disi <"vaslh", int_hexagon_S2_asl_r_vh>;
+def Hexagon_S2_asr_r_vh:
+ di_SInst_disi <"vasrh", int_hexagon_S2_asr_r_vh>;
+def Hexagon_S2_lsl_r_vh:
+ di_SInst_disi <"vlslh", int_hexagon_S2_lsl_r_vh>;
+def Hexagon_S2_lsr_r_vh:
+ di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_r_vh>;
+
+
+/********************************************************************
+* STYPE/VW *
+*********************************************************************/
+
+// STYPE / VW / Vector absolute value words.
+def Hexagon_A2_vabsw:
+ di_SInst_di <"vabsw", int_hexagon_A2_vabsw>;
+def Hexagon_A2_vabswsat:
+ di_SInst_di_sat <"vabsw", int_hexagon_A2_vabswsat>;
+
+// STYPE / VW / Vector shift words by immediate.
+// Rdd64=v[asl/vsl]w(Rss64,Rt32)
+def Hexagon_S2_asl_i_vw:
+ di_SInst_disi <"vaslw", int_hexagon_S2_asl_i_vw>;
+def Hexagon_S2_asr_i_vw:
+ di_SInst_disi <"vasrw", int_hexagon_S2_asr_i_vw>;
+def Hexagon_S2_lsr_i_vw:
+ di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_i_vw>;
+
+// STYPE / VW / Vector shift words by register.
+// Rdd64=v[asl/vsl]w(Rss64,Rt32)
+def Hexagon_S2_asl_r_vw:
+ di_SInst_disi <"vaslw", int_hexagon_S2_asl_r_vw>;
+def Hexagon_S2_asr_r_vw:
+ di_SInst_disi <"vasrw", int_hexagon_S2_asr_r_vw>;
+def Hexagon_S2_lsl_r_vw:
+ di_SInst_disi <"vlslw", int_hexagon_S2_lsl_r_vw>;
+def Hexagon_S2_lsr_r_vw:
+ di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_r_vw>;
+
+// STYPE / VW / Vector shift words with truncate and pack.
+def Hexagon_S2_asr_r_svw_trun:
+ si_SInst_disi <"vasrw", int_hexagon_S2_asr_r_svw_trun>;
+def Hexagon_S2_asr_i_svw_trun:
+ si_SInst_diu5 <"vasrw", int_hexagon_S2_asr_i_svw_trun>;
+
+include "HexagonIntrinsicsV3.td"
+include "HexagonIntrinsicsV4.td"
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
new file mode 100644
index 0000000..68eaf68
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsDerived.td
@@ -0,0 +1,29 @@
+//===-- HexagonIntrinsicsDerived.td - Derived intrinsics ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Multiply 64-bit and use lower result
+//
+// Optimized with intrinisics accumulates
+//
+def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2),
+ (COMBINE_rr
+ (Hexagon_M2_maci
+ (Hexagon_M2_maci (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
+ subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)),
+ (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)),
+ subreg_loreg))>;
+
+
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV3.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV3.td
new file mode 100644
index 0000000..2a54e62
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV3.td
@@ -0,0 +1,50 @@
+//=- HexagonIntrinsicsV3.td - Target Description for Hexagon -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Hexagon V3 Compiler Intrinsics in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+
+
+
+// MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary.
+def Hexagon_M2_vrcmpys_s1:
+ di_MInst_disi_s1_sat <"vrcmpys", int_hexagon_M2_vrcmpys_s1>;
+def Hexagon_M2_vrcmpys_acc_s1:
+ di_MInst_didisi_acc_s1_sat <"vrcmpys", int_hexagon_M2_vrcmpys_acc_s1>;
+def Hexagon_M2_vrcmpys_s1rp:
+ si_MInst_disi_s1_rnd_sat <"vrcmpys", int_hexagon_M2_vrcmpys_s1rp>;
+
+
+
+
+/********************************************************************
+* MTYPE/VB *
+*********************************************************************/
+
+// MTYPE / VB / Vector reduce add unsigned bytes.
+def Hexagon_M2_vradduh:
+ si_MInst_didi <"vradduh", int_hexagon_M2_vradduh>;
+
+
+/********************************************************************
+* ALU64/ALU *
+*********************************************************************/
+
+// ALU64 / ALU / Add.
+def Hexagon_A2_addsp:
+ di_ALU64_sidi <"add", int_hexagon_A2_addsp>;
+def Hexagon_A2_addpsat:
+ di_ALU64_didi <"add", int_hexagon_A2_addpsat>;
+
+def Hexagon_A2_maxp:
+ di_ALU64_didi <"max", int_hexagon_A2_maxp>;
+def Hexagon_A2_maxup:
+ di_ALU64_didi <"maxu", int_hexagon_A2_maxup>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td
new file mode 100644
index 0000000..dd28ebb
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonIntrinsicsV4.td
@@ -0,0 +1,369 @@
+//===- HexagonIntrinsicsV4.td - V4 Instruction intrinsics --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This is populated based on the following specs:
+// Hexagon V4 Architecture Extensions
+// Application-Level Specification
+// 80-V9418-12 Rev. A
+// June 15, 2010
+
+
+//
+// ALU 32 types.
+//
+
+class si_ALU32_sisi_not<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, ~$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class di_ALU32_s8si<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs DoubleRegs:$dst), (ins s8Imm:$src1, IntRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "(#$src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID imm:$src1, IntRegs:$src2))]>;
+
+class di_ALU32_sis8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set DoubleRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_neg_ALU32_sisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_neg_ALU32_sis10<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class qi_neg_ALU32_siu9<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$src1, u9Imm:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_neg_ALU32_sisi<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class si_neg_ALU32_sis8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+class si_ALU32_sis8<string opc, Intrinsic IntID>
+ : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, s8Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+
+//
+// SInst Classes.
+//
+class qi_neg_SInst_qiqi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2),
+ !strconcat("$dst = !", !strconcat(opc , "($src1, $src2)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>;
+
+class qi_SInst_qi_andqiqi_neg<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, and($src2, !$src3)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class qi_SInst_qi_andqiqi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, and($src2, $src3)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class qi_SInst_qi_orqiqi_neg<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, or($src2, !$src3)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class qi_SInst_qi_orqiqi<string opc, Intrinsic IntID>
+ : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, or($src2, $src3)")),
+ [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_SInst_si_addsis6<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, add($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ imm:$src3))]>;
+
+class si_SInst_si_subs6si<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, sub(#$src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2,
+ IntRegs:$src3))]>;
+
+class di_ALU64_didi_neg<string opc, Intrinsic IntID>
+ : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, ~$src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>;
+
+class di_MInst_dididi_xacc<string opc, Intrinsic IntID>
+ : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src1, $src2)")),
+ [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1,
+ DoubleRegs:$src2))],
+ "$dst2 = $dst">;
+
+class si_MInst_sisisi_and<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst &= ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_MInst_sisisi_andn<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst &= ", !strconcat(opc , "($src2, ~$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_SInst_sisis10_andi<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s10Imm:$src3),
+ !strconcat("$dst = ", !strconcat(opc ,
+ "($src1, and($src2, #$src3))")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2,
+ imm:$src3))]>;
+
+class si_MInst_sisisi_xor<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_MInst_sisisi_xorn<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst ^= ", !strconcat(opc , "($src2, ~$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_SInst_sisis10_or<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2, s10Imm:$src3),
+ !strconcat("$dst |= ", !strconcat(opc , "($src2, #$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ imm:$src3))]>;
+
+class si_MInst_sisisi_or<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst |= ", !strconcat(opc , "($src2, $src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_MInst_sisisi_orn<string opc, Intrinsic IntID>
+ : MInst<(outs IntRegs:$dst), (ins IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3),
+ !strconcat("$dst |= ", !strconcat(opc , "($src2, ~$src3)")),
+ [(set IntRegs:$dst, (IntID IntRegs:$dst1, IntRegs:$src2,
+ IntRegs:$src3))]>;
+
+class si_SInst_siu5_sat<string opc, Intrinsic IntID>
+ : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
+ !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")),
+ [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>;
+
+
+/********************************************************************
+* ALU32/ALU *
+*********************************************************************/
+
+// ALU32 / ALU / Logical Operations.
+def Hexagon_A4_orn : si_ALU32_sisi_not <"or", int_hexagon_A4_orn>;
+def Hexagon_A4_andn : si_ALU32_sisi_not <"and", int_hexagon_A4_andn>;
+
+
+/********************************************************************
+* ALU32/PERM *
+*********************************************************************/
+
+// ALU32 / PERM / Combine Words Into Doublewords.
+def Hexagon_A4_combineir : di_ALU32_s8si <"combine", int_hexagon_A4_combineir>;
+def Hexagon_A4_combineri : di_ALU32_sis8 <"combine", int_hexagon_A4_combineri>;
+
+
+/********************************************************************
+* ALU32/PRED *
+*********************************************************************/
+
+// ALU32 / PRED / Conditional Shift Halfword.
+// ALU32 / PRED / Conditional Sign Extend.
+// ALU32 / PRED / Conditional Zero Extend.
+// ALU32 / PRED / Compare.
+def Hexagon_C4_cmpneq : qi_neg_ALU32_sisi <"cmp.eq", int_hexagon_C4_cmpneq>;
+def Hexagon_C4_cmpneqi : qi_neg_ALU32_sis10 <"cmp.eq", int_hexagon_C4_cmpneqi>;
+def Hexagon_C4_cmplte : qi_neg_ALU32_sisi <"cmp.gt", int_hexagon_C4_cmplte>;
+def Hexagon_C4_cmpltei : qi_neg_ALU32_sis10 <"cmp.gt", int_hexagon_C4_cmpltei>;
+def Hexagon_C4_cmplteu : qi_neg_ALU32_sisi <"cmp.gtu",int_hexagon_C4_cmplteu>;
+def Hexagon_C4_cmplteui: qi_neg_ALU32_siu9 <"cmp.gtu",int_hexagon_C4_cmplteui>;
+
+// ALU32 / PRED / cmpare To General Register.
+def Hexagon_A4_rcmpneq : si_neg_ALU32_sisi <"cmp.eq", int_hexagon_A4_rcmpneq>;
+def Hexagon_A4_rcmpneqi: si_neg_ALU32_sis8 <"cmp.eq", int_hexagon_A4_rcmpneqi>;
+def Hexagon_A4_rcmpeq : si_ALU32_sisi <"cmp.eq", int_hexagon_A4_rcmpeq>;
+def Hexagon_A4_rcmpeqi : si_ALU32_sis8 <"cmp.eq", int_hexagon_A4_rcmpeqi>;
+
+
+/********************************************************************
+* CR *
+*********************************************************************/
+
+// CR / Corner Detection Acceleration.
+def Hexagon_C4_fastcorner9:
+ qi_SInst_qiqi<"fastcorner9", int_hexagon_C4_fastcorner9>;
+def Hexagon_C4_fastcorner9_not:
+ qi_neg_SInst_qiqi<"fastcorner9",int_hexagon_C4_fastcorner9_not>;
+
+// CR / Logical Operations On Predicates.
+def Hexagon_C4_and_andn:
+ qi_SInst_qi_andqiqi_neg <"and", int_hexagon_C4_and_andn>;
+def Hexagon_C4_and_and:
+ qi_SInst_qi_andqiqi <"and", int_hexagon_C4_and_and>;
+def Hexagon_C4_and_orn:
+ qi_SInst_qi_orqiqi_neg <"and", int_hexagon_C4_and_orn>;
+def Hexagon_C4_and_or:
+ qi_SInst_qi_orqiqi <"and", int_hexagon_C4_and_or>;
+def Hexagon_C4_or_andn:
+ qi_SInst_qi_andqiqi_neg <"or", int_hexagon_C4_or_andn>;
+def Hexagon_C4_or_and:
+ qi_SInst_qi_andqiqi <"or", int_hexagon_C4_or_and>;
+def Hexagon_C4_or_orn:
+ qi_SInst_qi_orqiqi_neg <"or", int_hexagon_C4_or_orn>;
+def Hexagon_C4_or_or:
+ qi_SInst_qi_orqiqi <"or", int_hexagon_C4_or_or>;
+
+
+/********************************************************************
+* XTYPE/ALU *
+*********************************************************************/
+
+// XTYPE / ALU / Add And Accumulate.
+def Hexagon_S4_addaddi:
+ si_SInst_si_addsis6 <"add", int_hexagon_S4_addaddi>;
+def Hexagon_S4_subaddi:
+ si_SInst_si_subs6si <"add", int_hexagon_S4_subaddi>;
+
+// XTYPE / ALU / Logical Doublewords.
+def Hexagon_S4_andnp:
+ di_ALU64_didi_neg <"and", int_hexagon_A4_andnp>;
+def Hexagon_S4_ornp:
+ di_ALU64_didi_neg <"or", int_hexagon_A4_ornp>;
+
+// XTYPE / ALU / Logical-logical Doublewords.
+def Hexagon_M4_xor_xacc:
+ di_MInst_dididi_xacc <"xor", int_hexagon_M4_xor_xacc>;
+
+// XTYPE / ALU / Logical-logical Words.
+def HEXAGON_M4_and_and:
+ si_MInst_sisisi_and <"and", int_hexagon_M4_and_and>;
+def HEXAGON_M4_and_or:
+ si_MInst_sisisi_and <"or", int_hexagon_M4_and_or>;
+def HEXAGON_M4_and_xor:
+ si_MInst_sisisi_and <"xor", int_hexagon_M4_and_xor>;
+def HEXAGON_M4_and_andn:
+ si_MInst_sisisi_andn <"and", int_hexagon_M4_and_andn>;
+def HEXAGON_M4_xor_and:
+ si_MInst_sisisi_xor <"and", int_hexagon_M4_xor_and>;
+def HEXAGON_M4_xor_or:
+ si_MInst_sisisi_xor <"or", int_hexagon_M4_xor_or>;
+def HEXAGON_M4_xor_andn:
+ si_MInst_sisisi_xorn <"and", int_hexagon_M4_xor_andn>;
+def HEXAGON_M4_or_and:
+ si_MInst_sisisi_or <"and", int_hexagon_M4_or_and>;
+def HEXAGON_M4_or_or:
+ si_MInst_sisisi_or <"or", int_hexagon_M4_or_or>;
+def HEXAGON_M4_or_xor:
+ si_MInst_sisisi_or <"xor", int_hexagon_M4_or_xor>;
+def HEXAGON_M4_or_andn:
+ si_MInst_sisisi_orn <"and", int_hexagon_M4_or_andn>;
+def HEXAGON_S4_or_andix:
+ si_SInst_sisis10_andi <"or", int_hexagon_S4_or_andix>;
+def HEXAGON_S4_or_andi:
+ si_SInst_sisis10_or <"and", int_hexagon_S4_or_andi>;
+def HEXAGON_S4_or_ori:
+ si_SInst_sisis10_or <"or", int_hexagon_S4_or_ori>;
+
+// XTYPE / ALU / Modulo wrap.
+def HEXAGON_A4_modwrapu:
+ si_ALU64_sisi <"modwrap", int_hexagon_A4_modwrapu>;
+
+// XTYPE / ALU / Round.
+def HEXAGON_A4_cround_ri:
+ si_SInst_siu5 <"cround", int_hexagon_A4_cround_ri>;
+def HEXAGON_A4_cround_rr:
+ si_SInst_sisi <"cround", int_hexagon_A4_cround_rr>;
+def HEXAGON_A4_round_ri:
+ si_SInst_siu5 <"round", int_hexagon_A4_round_ri>;
+def HEXAGON_A4_round_rr:
+ si_SInst_sisi <"round", int_hexagon_A4_round_rr>;
+def HEXAGON_A4_round_ri_sat:
+ si_SInst_siu5_sat <"round", int_hexagon_A4_round_ri_sat>;
+def HEXAGON_A4_round_rr_sat:
+ si_SInst_sisi_sat <"round", int_hexagon_A4_round_rr_sat>;
+
+// XTYPE / ALU / Vector reduce add unsigned halfwords.
+// XTYPE / ALU / Vector add bytes.
+// XTYPE / ALU / Vector conditional negate.
+// XTYPE / ALU / Vector maximum bytes.
+// XTYPE / ALU / Vector reduce maximum halfwords.
+// XTYPE / ALU / Vector reduce maximum words.
+// XTYPE / ALU / Vector minimum bytes.
+// XTYPE / ALU / Vector reduce minimum halfwords.
+// XTYPE / ALU / Vector reduce minimum words.
+// XTYPE / ALU / Vector subtract bytes.
+
+
+/********************************************************************
+* XTYPE/BIT *
+*********************************************************************/
+
+// XTYPE / BIT / Count leading.
+// XTYPE / BIT / Count trailing.
+// XTYPE / BIT / Extract bitfield.
+// XTYPE / BIT / Masked parity.
+// XTYPE / BIT / Bit reverse.
+// XTYPE / BIT / Split bitfield.
+
+
+/********************************************************************
+* XTYPE/COMPLEX *
+*********************************************************************/
+
+// XTYPE / COMPLEX / Complex add/sub halfwords.
+// XTYPE / COMPLEX / Complex add/sub words.
+// XTYPE / COMPLEX / Complex multiply 32x16.
+// XTYPE / COMPLEX / Vector reduce complex rotate.
+
+
+/********************************************************************
+* XTYPE/MPY *
+*********************************************************************/
+
+// XTYPE / COMPLEX / Complex add/sub halfwords.
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h b/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h
new file mode 100644
index 0000000..16ea7cf
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMCInst.h
@@ -0,0 +1,41 @@
+//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class extends MCInst to allow some VLIW annotation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONMCINST_H
+#define HEXAGONMCINST_H
+
+#include "llvm/MC/MCInst.h"
+#include "llvm/CodeGen/MachineInstr.h"
+
+namespace llvm {
+ class HexagonMCInst: public MCInst {
+ // Packet start and end markers
+ unsigned startPacket: 1, endPacket: 1;
+ const MachineInstr *MachineI;
+ public:
+ explicit HexagonMCInst(): MCInst(),
+ startPacket(0), endPacket(0) {}
+
+ const MachineInstr* getMI() const { return MachineI; };
+
+ void setMI(const MachineInstr *MI) { MachineI = MI; };
+
+ bool isStartPacket() const { return (startPacket); };
+ bool isEndPacket() const { return (endPacket); };
+
+ void setStartPacket(bool yes) { startPacket = yes; };
+ void setEndPacket(bool yes) { endPacket = yes; };
+ };
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
new file mode 100644
index 0000000..70bddcc
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp
@@ -0,0 +1,93 @@
+//===- HexagonMCInstLower.cpp - Convert Hexagon MachineInstr to an MCInst -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower Hexagon MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Hexagon.h"
+#include "HexagonAsmPrinter.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Target/Mangler.h"
+
+using namespace llvm;
+
+static MCOperand GetSymbolRef(const MachineOperand& MO, const MCSymbol* Symbol,
+ HexagonAsmPrinter& Printer) {
+ MCContext &MC = Printer.OutContext;
+ const MCExpr *ME;
+
+ ME = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, MC);
+
+ if (!MO.isJTI() && MO.getOffset())
+ ME = MCBinaryExpr::CreateAdd(ME, MCConstantExpr::Create(MO.getOffset(), MC),
+ MC);
+
+ return (MCOperand::CreateExpr(ME));
+}
+
+// Create an MCInst from a MachineInstr
+void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI,
+ HexagonAsmPrinter& AP) {
+ MCI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i < e; i++) {
+ const MachineOperand &MO = MI->getOperand(i);
+ MCOperand MCO;
+
+ switch (MO.getType()) {
+ default:
+ MI->dump();
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit()) continue;
+ MCO = MCOperand::CreateReg(MO.getReg());
+ break;
+ case MachineOperand::MO_FPImmediate: {
+ APFloat Val = MO.getFPImm()->getValueAPF();
+ // FP immediates are used only when setting GPRs, so they may be dealt
+ // with like regular immediates from this point on.
+ MCO = MCOperand::CreateImm(*Val.bitcastToAPInt().getRawData());
+ break;
+ }
+ case MachineOperand::MO_Immediate:
+ MCO = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCO = MCOperand::CreateExpr
+ (MCSymbolRefExpr::Create(MO.getMBB()->getSymbol(),
+ AP.OutContext));
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCO = GetSymbolRef(MO, AP.Mang->getSymbol(MO.getGlobal()), AP);
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCO = GetSymbolRef(MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()),
+ AP);
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCO = GetSymbolRef(MO, AP.GetJTISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCO = GetSymbolRef(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCO = GetSymbolRef(MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()),AP);
+ break;
+ }
+
+ MCI.addOperand(MCO);
+ }
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
new file mode 100644
index 0000000..0318c519
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
@@ -0,0 +1,75 @@
+//=- HexagonMachineFuctionInfo.h - Hexagon machine function info --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonMACHINEFUNCTIONINFO_H
+#define HexagonMACHINEFUNCTIONINFO_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+ namespace Hexagon {
+ const unsigned int StartPacket = 0x1;
+ const unsigned int EndPacket = 0x2;
+ }
+
+
+/// Hexagon target-specific information for each MachineFunction.
+class HexagonMachineFunctionInfo : public MachineFunctionInfo {
+ // SRetReturnReg - Some subtargets require that sret lowering includes
+ // returning the value of the returned struct in a register. This field
+ // holds the virtual register into which the sret argument is passed.
+ unsigned SRetReturnReg;
+ std::vector<MachineInstr*> AllocaAdjustInsts;
+ int VarArgsFrameIndex;
+ bool HasClobberLR;
+
+ std::map<const MachineInstr*, unsigned> PacketInfo;
+
+
+public:
+ HexagonMachineFunctionInfo() : SRetReturnReg(0), HasClobberLR(0) {}
+
+ HexagonMachineFunctionInfo(MachineFunction &MF) : SRetReturnReg(0),
+ HasClobberLR(0) {}
+
+ unsigned getSRetReturnReg() const { return SRetReturnReg; }
+ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
+
+ void addAllocaAdjustInst(MachineInstr* MI) {
+ AllocaAdjustInsts.push_back(MI);
+ }
+ const std::vector<MachineInstr*>& getAllocaAdjustInsts() {
+ return AllocaAdjustInsts;
+ }
+
+ void setVarArgsFrameIndex(int v) { VarArgsFrameIndex = v; }
+ int getVarArgsFrameIndex() { return VarArgsFrameIndex; }
+
+ void setStartPacket(MachineInstr* MI) {
+ PacketInfo[MI] |= Hexagon::StartPacket;
+ }
+ void setEndPacket(MachineInstr* MI) {
+ PacketInfo[MI] |= Hexagon::EndPacket;
+ }
+ bool isStartPacket(const MachineInstr* MI) const {
+ return (PacketInfo.count(MI) &&
+ (PacketInfo.find(MI)->second & Hexagon::StartPacket));
+ }
+ bool isEndPacket(const MachineInstr* MI) const {
+ return (PacketInfo.count(MI) &&
+ (PacketInfo.find(MI)->second & Hexagon::EndPacket));
+ }
+ void setHasClobberLR(bool v) { HasClobberLR = v; }
+ bool hasClobberLR() const { return HasClobberLR; }
+
+};
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
new file mode 100644
index 0000000..55cbc09
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -0,0 +1,288 @@
+//===-- HexagonPeephole.cpp - Hexagon Peephole Optimiztions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+// This peephole pass optimizes in the following cases.
+// 1. Optimizes redundant sign extends for the following case
+// Transform the following pattern
+// %vreg170<def> = SXTW %vreg166
+// ...
+// %vreg176<def> = COPY %vreg170:subreg_loreg
+//
+// Into
+// %vreg176<def> = COPY vreg166
+//
+// 2. Optimizes redundant negation of predicates.
+// %vreg15<def> = CMPGTrr %vreg6, %vreg2
+// ...
+// %vreg16<def> = NOT_p %vreg15<kill>
+// ...
+// JMP_c %vreg16<kill>, <BB#1>, %PC<imp-def,dead>
+//
+// Into
+// %vreg15<def> = CMPGTrr %vreg6, %vreg2;
+// ...
+// JMP_cNot %vreg15<kill>, <BB#1>, %PC<imp-def,dead>;
+//
+// Note: The peephole pass makes the instrucstions like
+// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
+// redundant and relies on some form of dead removal instrucions, like
+// DCE or DIE to actually eliminate them.
+
+
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-peephole"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/Constants.h"
+#include "llvm/PassSupport.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include <algorithm>
+
+using namespace llvm;
+
+static cl::opt<bool> DisableHexagonPeephole("disable-hexagon-peephole",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable Peephole Optimization"));
+
+static cl::opt<int>
+DbgPNPCount("pnp-count", cl::init(-1), cl::Hidden,
+ cl::desc("Maximum number of P=NOT(P) to be optimized"));
+
+static cl::opt<bool> DisablePNotP("disable-hexagon-pnotp",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable Optimization of PNotP"));
+
+static cl::opt<bool> DisableOptSZExt("disable-hexagon-optszext",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Disable Optimization of Sign/Zero Extends"));
+
+namespace {
+ struct HexagonPeephole : public MachineFunctionPass {
+ const HexagonInstrInfo *QII;
+ const HexagonRegisterInfo *QRI;
+ const MachineRegisterInfo *MRI;
+
+ public:
+ static char ID;
+ HexagonPeephole() : MachineFunctionPass(ID) { }
+
+ bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const {
+ return "Hexagon optimize redundant zero and size extends";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ void ChangeOpInto(MachineOperand &Dst, MachineOperand &Src);
+ };
+}
+
+char HexagonPeephole::ID = 0;
+
+bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
+
+ QII = static_cast<const HexagonInstrInfo *>(MF.getTarget().
+ getInstrInfo());
+ QRI = static_cast<const HexagonRegisterInfo *>(MF.getTarget().
+ getRegisterInfo());
+ MRI = &MF.getRegInfo();
+
+ DenseMap<unsigned, unsigned> PeepholeMap;
+
+ if (DisableHexagonPeephole) return false;
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = MF.begin(), MBBe = MF.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+ PeepholeMap.clear();
+
+ // Traverse the basic block.
+ for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
+ ++MII) {
+ MachineInstr *MI = MII;
+ // Look for sign extends:
+ // %vreg170<def> = SXTW %vreg166
+ if (!DisableOptSZExt && MI->getOpcode() == Hexagon::SXTW) {
+ assert (MI->getNumOperands() == 2);
+ MachineOperand &Dst = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcReg = Src.getReg();
+ // Just handle virtual registers.
+ if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ // Map the following:
+ // %vreg170<def> = SXTW %vreg166
+ // PeepholeMap[170] = vreg166
+ PeepholeMap[DstReg] = SrcReg;
+ }
+ }
+
+ // Look for P=NOT(P).
+ if (!DisablePNotP &&
+ (MI->getOpcode() == Hexagon::NOT_p)) {
+ assert (MI->getNumOperands() == 2);
+ MachineOperand &Dst = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcReg = Src.getReg();
+ // Just handle virtual registers.
+ if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ // Map the following:
+ // %vreg170<def> = NOT_xx %vreg166
+ // PeepholeMap[170] = vreg166
+ PeepholeMap[DstReg] = SrcReg;
+ }
+ }
+
+ // Look for copy:
+ // %vreg176<def> = COPY %vreg170:subreg_loreg
+ if (!DisableOptSZExt && MI->isCopy()) {
+ assert (MI->getNumOperands() == 2);
+ MachineOperand &Dst = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+
+ // Make sure we are copying the lower 32 bits.
+ if (Src.getSubReg() != Hexagon::subreg_loreg)
+ continue;
+
+ unsigned DstReg = Dst.getReg();
+ unsigned SrcReg = Src.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ // Try to find in the map.
+ if (unsigned PeepholeSrc = PeepholeMap.lookup(SrcReg)) {
+ // Change the 1st operand.
+ MI->RemoveOperand(1);
+ MI->addOperand(MachineOperand::CreateReg(PeepholeSrc, false));
+ }
+ }
+ }
+
+ // Look for Predicated instructions.
+ if (!DisablePNotP) {
+ bool Done = false;
+ if (QII->isPredicated(MI)) {
+ MachineOperand &Op0 = MI->getOperand(0);
+ unsigned Reg0 = Op0.getReg();
+ const TargetRegisterClass *RC0 = MRI->getRegClass(Reg0);
+ if (RC0->getID() == Hexagon::PredRegsRegClassID) {
+ // Handle instructions that have a prediate register in op0
+ // (most cases of predicable instructions).
+ if (TargetRegisterInfo::isVirtualRegister(Reg0)) {
+ // Try to find in the map.
+ if (unsigned PeepholeSrc = PeepholeMap.lookup(Reg0)) {
+ // Change the 1st operand and, flip the opcode.
+ MI->getOperand(0).setReg(PeepholeSrc);
+ int NewOp = QII->getInvertedPredicatedOpcode(MI->getOpcode());
+ MI->setDesc(QII->get(NewOp));
+ Done = true;
+ }
+ }
+ }
+ }
+
+ if (!Done) {
+ // Handle special instructions.
+ unsigned Op = MI->getOpcode();
+ unsigned NewOp = 0;
+ unsigned PR = 1, S1 = 2, S2 = 3; // Operand indices.
+
+ switch (Op) {
+ case Hexagon::TFR_condset_rr:
+ case Hexagon::TFR_condset_ii:
+ case Hexagon::MUX_ii:
+ case Hexagon::MUX_rr:
+ NewOp = Op;
+ break;
+ case Hexagon::TFR_condset_ri:
+ NewOp = Hexagon::TFR_condset_ir;
+ break;
+ case Hexagon::TFR_condset_ir:
+ NewOp = Hexagon::TFR_condset_ri;
+ break;
+ case Hexagon::MUX_ri:
+ NewOp = Hexagon::MUX_ir;
+ break;
+ case Hexagon::MUX_ir:
+ NewOp = Hexagon::MUX_ri;
+ break;
+ }
+ if (NewOp) {
+ unsigned PSrc = MI->getOperand(PR).getReg();
+ if (unsigned POrig = PeepholeMap.lookup(PSrc)) {
+ MI->getOperand(PR).setReg(POrig);
+ MI->setDesc(QII->get(NewOp));
+ // Swap operands S1 and S2.
+ MachineOperand Op1 = MI->getOperand(S1);
+ MachineOperand Op2 = MI->getOperand(S2);
+ ChangeOpInto(MI->getOperand(S1), Op2);
+ ChangeOpInto(MI->getOperand(S2), Op1);
+ }
+ } // if (NewOp)
+ } // if (!Done)
+
+ } // if (!DisablePNotP)
+
+ } // Instruction
+ } // Basic Block
+ return true;
+}
+
+void HexagonPeephole::ChangeOpInto(MachineOperand &Dst, MachineOperand &Src) {
+ assert (&Dst != &Src && "Cannot duplicate into itself");
+ switch (Dst.getType()) {
+ case MachineOperand::MO_Register:
+ if (Src.isReg()) {
+ Dst.setReg(Src.getReg());
+ } else if (Src.isImm()) {
+ Dst.ChangeToImmediate(Src.getImm());
+ } else {
+ llvm_unreachable("Unexpected src operand type");
+ }
+ break;
+
+ case MachineOperand::MO_Immediate:
+ if (Src.isImm()) {
+ Dst.setImm(Src.getImm());
+ } else if (Src.isReg()) {
+ Dst.ChangeToRegister(Src.getReg(), Src.isDef(), Src.isImplicit(),
+ Src.isKill(), Src.isDead(), Src.isUndef(),
+ Src.isDebug());
+ } else {
+ llvm_unreachable("Unexpected src operand type");
+ }
+ break;
+
+ default:
+ llvm_unreachable("Unexpected dst operand type");
+ break;
+ }
+}
+
+FunctionPass *llvm::createHexagonPeephole() {
+ return new HexagonPeephole();
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
new file mode 100644
index 0000000..2a9de92
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -0,0 +1,315 @@
+//===-- HexagonRegisterInfo.cpp - Hexagon Register Information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Hexagon implementation of the TargetRegisterInfo
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonRegisterInfo.h"
+#include "Hexagon.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/Function.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+
+HexagonRegisterInfo::HexagonRegisterInfo(HexagonSubtarget &st,
+ const HexagonInstrInfo &tii)
+ : HexagonGenRegisterInfo(Hexagon::R31),
+ Subtarget(st),
+ TII(tii) {
+}
+
+const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction
+ *MF)
+ const {
+ static const uint16_t CalleeSavedRegsV2[] = {
+ Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
+ };
+ static const uint16_t CalleeSavedRegsV3[] = {
+ Hexagon::R16, Hexagon::R17, Hexagon::R18, Hexagon::R19,
+ Hexagon::R20, Hexagon::R21, Hexagon::R22, Hexagon::R23,
+ Hexagon::R24, Hexagon::R25, Hexagon::R26, Hexagon::R27, 0
+ };
+
+ switch(Subtarget.getHexagonArchVersion()) {
+ case HexagonSubtarget::V1:
+ break;
+ case HexagonSubtarget::V2:
+ return CalleeSavedRegsV2;
+ case HexagonSubtarget::V3:
+ case HexagonSubtarget::V4:
+ return CalleeSavedRegsV3;
+ }
+ llvm_unreachable("Callee saved registers requested for unknown architecture "
+ "version");
+}
+
+BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF)
+ const {
+ BitVector Reserved(getNumRegs());
+ Reserved.set(HEXAGON_RESERVED_REG_1);
+ Reserved.set(HEXAGON_RESERVED_REG_2);
+ Reserved.set(Hexagon::R29);
+ Reserved.set(Hexagon::R30);
+ Reserved.set(Hexagon::R31);
+ Reserved.set(Hexagon::D14);
+ Reserved.set(Hexagon::D15);
+ Reserved.set(Hexagon::LC0);
+ Reserved.set(Hexagon::LC1);
+ Reserved.set(Hexagon::SA0);
+ Reserved.set(Hexagon::SA1);
+ return Reserved;
+}
+
+
+const TargetRegisterClass* const*
+HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
+ static const TargetRegisterClass * const CalleeSavedRegClassesV2[] = {
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ };
+ static const TargetRegisterClass * const CalleeSavedRegClassesV3[] = {
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ &Hexagon::IntRegsRegClass, &Hexagon::IntRegsRegClass,
+ };
+
+ switch(Subtarget.getHexagonArchVersion()) {
+ case HexagonSubtarget::V1:
+ break;
+ case HexagonSubtarget::V2:
+ return CalleeSavedRegClassesV2;
+ case HexagonSubtarget::V3:
+ case HexagonSubtarget::V4:
+ return CalleeSavedRegClassesV3;
+ }
+ llvm_unreachable("Callee saved register classes requested for unknown "
+ "architecture version");
+}
+
+void HexagonRegisterInfo::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ MachineInstr &MI = *I;
+
+ if (MI.getOpcode() == Hexagon::ADJCALLSTACKDOWN) {
+ // Hexagon_TODO: add code
+ } else if (MI.getOpcode() == Hexagon::ADJCALLSTACKUP) {
+ // Hexagon_TODO: add code
+ } else {
+ llvm_unreachable("Cannot handle this call frame pseudo instruction");
+ }
+ MBB.erase(I);
+}
+
+void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, RegScavenger *RS) const {
+
+ //
+ // Hexagon_TODO: Do we need to enforce this for Hexagon?
+ assert(SPAdj == 0 && "Unexpected");
+
+
+ unsigned i = 0;
+ MachineInstr &MI = *II;
+ while (!MI.getOperand(i).isFI()) {
+ ++i;
+ assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
+ }
+
+ int FrameIndex = MI.getOperand(i).getIndex();
+
+ // Addressable stack objects are accessed using neg. offsets from %fp.
+ MachineFunction &MF = *MI.getParent()->getParent();
+ int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+
+ unsigned FrameReg = getFrameRegister(MF);
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ if (!TFI->hasFP(MF)) {
+ // We will not reserve space on the stack for the lr and fp registers.
+ Offset -= 2 * Hexagon_WordSize;
+ }
+
+ const unsigned FrameSize = MFI.getStackSize();
+
+ if (!MFI.hasVarSizedObjects() &&
+ TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset)) &&
+ !TII.isSpillPredRegOp(&MI)) {
+ // Replace frame index with a stack pointer reference.
+ MI.getOperand(i).ChangeToRegister(getStackRegister(), false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(FrameSize+Offset);
+ } else {
+ // Replace frame index with a frame pointer reference.
+ if (!TII.isValidOffset(MI.getOpcode(), Offset)) {
+
+ // If the offset overflows, then correct it.
+ //
+ // For loads, we do not need a reserved register
+ // r0 = memw(r30 + #10000) to:
+ //
+ // r0 = add(r30, #10000)
+ // r0 = memw(r0)
+ if ( (MI.getOpcode() == Hexagon::LDriw) ||
+ (MI.getOpcode() == Hexagon::LDrid) ||
+ (MI.getOpcode() == Hexagon::LDrih) ||
+ (MI.getOpcode() == Hexagon::LDriuh) ||
+ (MI.getOpcode() == Hexagon::LDrib) ||
+ (MI.getOpcode() == Hexagon::LDriub) ) {
+ unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ?
+ *getSubRegisters(MI.getOperand(0).getReg()) :
+ MI.getOperand(0).getReg();
+
+ // Check if offset can fit in addi.
+ if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::CONST32_Int_Real), dstReg).addImm(Offset);
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_rr),
+ dstReg).addReg(FrameReg).addReg(dstReg);
+ } else {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_ri),
+ dstReg).addReg(FrameReg).addImm(Offset);
+ }
+
+ MI.getOperand(i).ChangeToRegister(dstReg, false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(0);
+ } else if ((MI.getOpcode() == Hexagon::STriw) ||
+ (MI.getOpcode() == Hexagon::STrid) ||
+ (MI.getOpcode() == Hexagon::STrih) ||
+ (MI.getOpcode() == Hexagon::STrib)) {
+ // For stores, we need a reserved register. Change
+ // memw(r30 + #10000) = r0 to:
+ //
+ // rs = add(r30, #10000);
+ // memw(rs) = r0
+ unsigned resReg = HEXAGON_RESERVED_REG_1;
+
+ // Check if offset can fit in addi.
+ if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::CONST32_Int_Real), resReg).addImm(Offset);
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_rr),
+ resReg).addReg(FrameReg).addReg(resReg);
+ } else {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_ri),
+ resReg).addReg(FrameReg).addImm(Offset);
+ }
+ MI.getOperand(i).ChangeToRegister(resReg, false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(0);
+ } else if (TII.isMemOp(&MI)) {
+ unsigned resReg = HEXAGON_RESERVED_REG_1;
+ if (!MFI.hasVarSizedObjects() &&
+ TII.isValidOffset(MI.getOpcode(), (FrameSize+Offset))) {
+ MI.getOperand(i).ChangeToRegister(getStackRegister(), false, false,
+ true);
+ MI.getOperand(i+1).ChangeToImmediate(FrameSize+Offset);
+ } else if (!TII.isValidOffset(Hexagon::ADD_ri, Offset)) {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::CONST32_Int_Real), resReg).addImm(Offset);
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_rr),
+ resReg).addReg(FrameReg).addReg(resReg);
+ MI.getOperand(i).ChangeToRegister(resReg, false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(0);
+ } else {
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_ri),
+ resReg).addReg(FrameReg).addImm(Offset);
+ MI.getOperand(i).ChangeToRegister(resReg, false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(0);
+ }
+ } else {
+ unsigned dstReg = MI.getOperand(0).getReg();
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::CONST32_Int_Real), dstReg).addImm(Offset);
+ BuildMI(*MI.getParent(), II, MI.getDebugLoc(),
+ TII.get(Hexagon::ADD_rr),
+ dstReg).addReg(FrameReg).addReg(dstReg);
+ // Can we delete MI??? r2 = add (r2, #0).
+ MI.getOperand(i).ChangeToRegister(dstReg, false, false, true);
+ MI.getOperand(i+1).ChangeToImmediate(0);
+ }
+ } else {
+ // If the offset is small enough to fit in the immediate field, directly
+ // encode it.
+ MI.getOperand(i).ChangeToRegister(FrameReg, false);
+ MI.getOperand(i+1).ChangeToImmediate(Offset);
+ }
+ }
+
+}
+
+unsigned HexagonRegisterInfo::getRARegister() const {
+ return Hexagon::R31;
+}
+
+unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction
+ &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ if (TFI->hasFP(MF)) {
+ return Hexagon::R30;
+ }
+
+ return Hexagon::R29;
+}
+
+unsigned HexagonRegisterInfo::getFrameRegister() const {
+ return Hexagon::R30;
+}
+
+unsigned HexagonRegisterInfo::getStackRegister() const {
+ return Hexagon::R29;
+}
+
+void HexagonRegisterInfo::getInitialFrameState(std::vector<MachineMove>
+ &Moves) const
+{
+ // VirtualFP = (R30 + #0).
+ unsigned FPReg = getFrameRegister();
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(FPReg, 0);
+ Moves.push_back(MachineMove(0, Dst, Src));
+}
+
+unsigned HexagonRegisterInfo::getEHExceptionRegister() const {
+ llvm_unreachable("What is the exception register");
+}
+
+unsigned HexagonRegisterInfo::getEHHandlerRegister() const {
+ llvm_unreachable("What is the exception handler register");
+}
+
+#define GET_REGINFO_TARGET_DESC
+#include "HexagonGenRegisterInfo.inc"
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
new file mode 100644
index 0000000..6cf727b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -0,0 +1,90 @@
+//==- HexagonRegisterInfo.h - Hexagon Register Information Impl --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Hexagon implementation of the TargetRegisterInfo
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonREGISTERINFO_H
+#define HexagonREGISTERINFO_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/MC/MachineLocation.h"
+
+#define GET_REGINFO_HEADER
+#include "HexagonGenRegisterInfo.inc"
+
+//
+// We try not to hard code the reserved registers in our code,
+// so the following two macros were defined. However, there
+// are still a few places that R11 and R10 are hard wired.
+// See below. If, in the future, we decided to change the reserved
+// register. Don't forget changing the following places.
+//
+// 1. the "Defs" set of STriw_pred in HexagonInstrInfo.td
+// 2. the "Defs" set of LDri_pred in HexagonInstrInfo.td
+// 3. the definition of "IntRegs" in HexagonRegisterInfo.td
+// 4. the definition of "DoubleRegs" in HexagonRegisterInfo.td
+//
+#define HEXAGON_RESERVED_REG_1 Hexagon::R10
+#define HEXAGON_RESERVED_REG_2 Hexagon::R11
+
+namespace llvm {
+
+class HexagonSubtarget;
+class HexagonInstrInfo;
+class Type;
+
+struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
+ HexagonSubtarget &Subtarget;
+ const HexagonInstrInfo &TII;
+
+ HexagonRegisterInfo(HexagonSubtarget &st, const HexagonInstrInfo &tii);
+
+ /// Code Generation virtual methods...
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+
+ const TargetRegisterClass* const* getCalleeSavedRegClasses(
+ const MachineFunction *MF = 0) const;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const;
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, RegScavenger *RS = NULL) const;
+
+ /// determineFrameLayout - Determine the size of the frame and maximum call
+ /// frame size.
+ void determineFrameLayout(MachineFunction &MF) const;
+
+ /// requiresRegisterScavenging - returns true since we may need scavenging for
+ /// a temporary register when generating hardware loop instructions.
+ bool requiresRegisterScavenging(const MachineFunction &MF) const {
+ return true;
+ }
+
+ // Debug information queries.
+ unsigned getRARegister() const;
+ unsigned getFrameRegister(const MachineFunction &MF) const;
+ unsigned getFrameRegister() const;
+ void getInitialFrameState(std::vector<MachineMove> &Moves) const;
+ unsigned getStackRegister() const;
+
+ // Exception handling queries.
+ unsigned getEHExceptionRegister() const;
+ unsigned getEHHandlerRegister() const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
new file mode 100644
index 0000000..d44eae3
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -0,0 +1,167 @@
+//===-- HexagonRegisterInfo.td - Hexagon Register defs -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the Hexagon register file.
+//===----------------------------------------------------------------------===//
+
+let Namespace = "Hexagon" in {
+
+ class HexagonReg<string n> : Register<n> {
+ field bits<5> Num;
+ }
+
+ class HexagonDoubleReg<string n, list<Register> subregs> :
+ RegisterWithSubRegs<n, subregs> {
+ field bits<5> Num;
+ }
+
+ // Registers are identified with 5-bit ID numbers.
+ // Ri - 32-bit integer registers.
+ class Ri<bits<5> num, string n> : HexagonReg<n> {
+ let Num = num;
+ }
+
+ // Rf - 32-bit floating-point registers.
+ class Rf<bits<5> num, string n> : HexagonReg<n> {
+ let Num = num;
+ }
+
+
+ // Rd - 64-bit registers.
+ class Rd<bits<5> num, string n, list<Register> subregs> :
+ HexagonDoubleReg<n, subregs> {
+ let Num = num;
+ let SubRegs = subregs;
+ }
+
+ // Rp - predicate registers
+ class Rp<bits<5> num, string n> : HexagonReg<n> {
+ let Num = num;
+ }
+
+ // Rc - control registers
+ class Rc<bits<5> num, string n> : HexagonReg<n> {
+ let Num = num;
+ }
+
+ // Rj - aliased integer registers
+ class Rj<string n, Ri R>: HexagonReg<n> {
+ let Num = R.Num;
+ let Aliases = [R];
+ }
+
+ def subreg_loreg : SubRegIndex;
+ def subreg_hireg : SubRegIndex;
+
+ // Integer registers.
+ def R0 : Ri< 0, "r0">, DwarfRegNum<[0]>;
+ def R1 : Ri< 1, "r1">, DwarfRegNum<[1]>;
+ def R2 : Ri< 2, "r2">, DwarfRegNum<[2]>;
+ def R3 : Ri< 3, "r3">, DwarfRegNum<[3]>;
+ def R4 : Ri< 4, "r4">, DwarfRegNum<[4]>;
+ def R5 : Ri< 5, "r5">, DwarfRegNum<[5]>;
+ def R6 : Ri< 6, "r6">, DwarfRegNum<[6]>;
+ def R7 : Ri< 7, "r7">, DwarfRegNum<[7]>;
+ def R8 : Ri< 8, "r8">, DwarfRegNum<[8]>;
+ def R9 : Ri< 9, "r9">, DwarfRegNum<[9]>;
+ def R10 : Ri<10, "r10">, DwarfRegNum<[10]>;
+ def R11 : Ri<11, "r11">, DwarfRegNum<[11]>;
+ def R12 : Ri<12, "r12">, DwarfRegNum<[12]>;
+ def R13 : Ri<13, "r13">, DwarfRegNum<[13]>;
+ def R14 : Ri<14, "r14">, DwarfRegNum<[14]>;
+ def R15 : Ri<15, "r15">, DwarfRegNum<[15]>;
+ def R16 : Ri<16, "r16">, DwarfRegNum<[16]>;
+ def R17 : Ri<17, "r17">, DwarfRegNum<[17]>;
+ def R18 : Ri<18, "r18">, DwarfRegNum<[18]>;
+ def R19 : Ri<19, "r19">, DwarfRegNum<[19]>;
+ def R20 : Ri<20, "r20">, DwarfRegNum<[20]>;
+ def R21 : Ri<21, "r21">, DwarfRegNum<[21]>;
+ def R22 : Ri<22, "r22">, DwarfRegNum<[22]>;
+ def R23 : Ri<23, "r23">, DwarfRegNum<[23]>;
+ def R24 : Ri<24, "r24">, DwarfRegNum<[24]>;
+ def R25 : Ri<25, "r25">, DwarfRegNum<[25]>;
+ def R26 : Ri<26, "r26">, DwarfRegNum<[26]>;
+ def R27 : Ri<27, "r27">, DwarfRegNum<[27]>;
+ def R28 : Ri<28, "r28">, DwarfRegNum<[28]>;
+ def R29 : Ri<29, "r29">, DwarfRegNum<[29]>;
+ def R30 : Ri<30, "r30">, DwarfRegNum<[30]>;
+ def R31 : Ri<31, "r31">, DwarfRegNum<[31]>;
+
+ def SP : Rj<"sp", R29>, DwarfRegNum<[29]>;
+ def FP : Rj<"fp", R30>, DwarfRegNum<[30]>;
+ def LR : Rj<"lr", R31>, DwarfRegNum<[31]>;
+
+ // Aliases of the R* registers used to hold 64-bit int values (doubles).
+ let SubRegIndices = [subreg_loreg, subreg_hireg], CoveredBySubRegs = 1 in {
+ def D0 : Rd< 0, "r1:0", [R0, R1]>, DwarfRegNum<[32]>;
+ def D1 : Rd< 2, "r3:2", [R2, R3]>, DwarfRegNum<[34]>;
+ def D2 : Rd< 4, "r5:4", [R4, R5]>, DwarfRegNum<[36]>;
+ def D3 : Rd< 6, "r7:6", [R6, R7]>, DwarfRegNum<[38]>;
+ def D4 : Rd< 8, "r9:8", [R8, R9]>, DwarfRegNum<[40]>;
+ def D5 : Rd<10, "r11:10", [R10, R11]>, DwarfRegNum<[42]>;
+ def D6 : Rd<12, "r13:12", [R12, R13]>, DwarfRegNum<[44]>;
+ def D7 : Rd<14, "r15:14", [R14, R15]>, DwarfRegNum<[46]>;
+ def D8 : Rd<16, "r17:16", [R16, R17]>, DwarfRegNum<[48]>;
+ def D9 : Rd<18, "r19:18", [R18, R19]>, DwarfRegNum<[50]>;
+ def D10 : Rd<20, "r21:20", [R20, R21]>, DwarfRegNum<[52]>;
+ def D11 : Rd<22, "r23:22", [R22, R23]>, DwarfRegNum<[54]>;
+ def D12 : Rd<24, "r25:24", [R24, R25]>, DwarfRegNum<[56]>;
+ def D13 : Rd<26, "r27:26", [R26, R27]>, DwarfRegNum<[58]>;
+ def D14 : Rd<28, "r29:28", [R28, R29]>, DwarfRegNum<[60]>;
+ def D15 : Rd<30, "r31:30", [R30, R31]>, DwarfRegNum<[62]>;
+ }
+
+ // Predicate registers.
+ def P0 : Rp<0, "p0">, DwarfRegNum<[63]>;
+ def P1 : Rp<1, "p1">, DwarfRegNum<[64]>;
+ def P2 : Rp<2, "p2">, DwarfRegNum<[65]>;
+ def P3 : Rp<3, "p3">, DwarfRegNum<[66]>;
+
+ // Control registers.
+ def SA0 : Rc<0, "sa0">, DwarfRegNum<[67]>;
+ def LC0 : Rc<1, "lc0">, DwarfRegNum<[68]>;
+
+ def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>;
+ def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>;
+
+ def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct?
+ def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct?
+}
+
+// Register classes.
+//
+// FIXME: the register order should be defined in terms of the preferred
+// allocation order...
+//
+def IntRegs : RegisterClass<"Hexagon", [i32], 32,
+ (add (sequence "R%u", 0, 9),
+ (sequence "R%u", 12, 28),
+ R10, R11, R29, R30, R31)> {
+}
+
+
+
+def DoubleRegs : RegisterClass<"Hexagon", [i64], 64,
+ (add (sequence "D%u", 0, 4),
+ (sequence "D%u", 6, 13), D5, D14, D15)> {
+ let SubRegClasses = [(IntRegs subreg_loreg, subreg_hireg)];
+}
+
+
+def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
+{
+ let Size = 32;
+}
+
+def CRRegs : RegisterClass<"Hexagon", [i32], 32,
+ (add (sequence "LC%u", 0, 1),
+ (sequence "SA%u", 0, 1), PC, GP)> {
+ let Size = 32;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
new file mode 100644
index 0000000..66a00e1
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -0,0 +1,82 @@
+//===- HexagonRemoveExtendArgs.cpp - Remove unecessary argument sign extends =//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Pass that removes sign extends for function parameters. These parameters
+// are already sign extended by the caller per Hexagon's ABI
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonTargetMachine.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+namespace {
+ struct HexagonRemoveExtendArgs : public FunctionPass {
+ public:
+ static char ID;
+ HexagonRemoveExtendArgs() : FunctionPass(ID) {}
+ virtual bool runOnFunction(Function &F);
+
+ const char *getPassName() const {
+ return "Remove sign extends";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<MachineFunctionAnalysis>();
+ AU.addPreserved<MachineFunctionAnalysis>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+char HexagonRemoveExtendArgs::ID = 0;
+RegisterPass<HexagonRemoveExtendArgs> X("reargs",
+ "Remove Sign and Zero Extends for Args"
+ );
+
+
+
+bool HexagonRemoveExtendArgs::runOnFunction(Function &F) {
+ unsigned Idx = 1;
+ for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); AI != AE;
+ ++AI, ++Idx) {
+ if (F.paramHasAttr(Idx, Attribute::SExt)) {
+ Argument* Arg = AI;
+ if (!isa<PointerType>(Arg->getType())) {
+ for (Instruction::use_iterator UI = Arg->use_begin();
+ UI != Arg->use_end();) {
+ if (isa<SExtInst>(*UI)) {
+ Instruction* Use = cast<Instruction>(*UI);
+ SExtInst* SI = new SExtInst(Arg, Use->getType());
+ assert (EVT::getEVT(SI->getType()) ==
+ (EVT::getEVT(Use->getType())));
+ ++UI;
+ Use->replaceAllUsesWith(SI);
+ Instruction* First = F.getEntryBlock().begin();
+ SI->insertBefore(First);
+ Use->eraseFromParent();
+ } else {
+ ++UI;
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
+
+
+FunctionPass *llvm::createHexagonRemoveExtendOps(HexagonTargetMachine &TM) {
+ return new HexagonRemoveExtendArgs();
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
new file mode 100644
index 0000000..c488796
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSchedule.td
@@ -0,0 +1,54 @@
+//===- HexagonSchedule.td - Hexagon Scheduling Definitions -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Functional Units
+def LUNIT : FuncUnit;
+def LSUNIT : FuncUnit;
+def MUNIT : FuncUnit;
+def SUNIT : FuncUnit;
+
+// Itinerary classes
+def ALU32 : InstrItinClass;
+def ALU64 : InstrItinClass;
+def CR : InstrItinClass;
+def J : InstrItinClass;
+def JR : InstrItinClass;
+def LD : InstrItinClass;
+def M : InstrItinClass;
+def ST : InstrItinClass;
+def S : InstrItinClass;
+def SYS : InstrItinClass;
+def MARKER : InstrItinClass;
+def PSEUDO : InstrItinClass;
+
+def HexagonItineraries :
+ ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
+ InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
+ InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
+ InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
+ InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
+ InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
+ InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
+ InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
+ InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
+ InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
+ InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
+ InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
+ InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
+ ]>;
+
+//===----------------------------------------------------------------------===//
+// V4 Machine Info +
+//===----------------------------------------------------------------------===//
+
+include "HexagonScheduleV4.td"
+
+//===----------------------------------------------------------------------===//
+// V4 Machine Info -
+//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
new file mode 100644
index 0000000..1d82dbb
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -0,0 +1,59 @@
+//=-HexagonScheduleV4.td - HexagonV4 Scheduling Definitions --*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
+// This file describes that machine information.
+
+//
+// |===========|==================================================|
+// | PIPELINE | Instruction Classes |
+// |===========|==================================================|
+// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
+// |-----------|--------------------------------------------------|
+// | SLOT1 | LD ST ALU32 |
+// |-----------|--------------------------------------------------|
+// | SLOT2 | XTYPE ALU32 J JR |
+// |-----------|--------------------------------------------------|
+// | SLOT3 | XTYPE ALU32 J CR |
+// |===========|==================================================|
+
+// Functional Units.
+def SLOT0 : FuncUnit;
+def SLOT1 : FuncUnit;
+def SLOT2 : FuncUnit;
+def SLOT3 : FuncUnit;
+
+// Itinerary classes.
+def NV_V4 : InstrItinClass;
+def MEM_V4 : InstrItinClass;
+// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
+def PREFIX : InstrItinClass;
+
+def HexagonItinerariesV4 :
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [
+ InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
+ InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
+ InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
+ ]>;
+
+//===----------------------------------------------------------------------===//
+// Hexagon V4 Resource Definitions -
+//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSelectCCInfo.td b/contrib/llvm/lib/Target/Hexagon/HexagonSelectCCInfo.td
new file mode 100644
index 0000000..d8feb89
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSelectCCInfo.td
@@ -0,0 +1,121 @@
+//===-- HexagoSelectCCInfo.td - Selectcc mappings ----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+//
+// selectcc mappings.
+//
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETEQ)),
+ (i32 (MUX_rr (i1 (CMPEQrr IntRegs:$lhs, IntRegs:$rhs)),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETNE)),
+ (i32 (MUX_rr (i1 (NOT_p (CMPEQrr IntRegs:$lhs, IntRegs:$rhs))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETGT)),
+ (i32 (MUX_rr (i1 (CMPGTrr IntRegs:$lhs, IntRegs:$rhs)),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETUGT)),
+ (i32 (MUX_rr (i1 (CMPGTUrr IntRegs:$lhs, IntRegs:$rhs)),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETULT)),
+ (i32 (MUX_rr (i1 (NOT_p (CMPGTUrr IntRegs:$lhs,
+ (ADD_ri IntRegs:$rhs, -1)))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETLT)),
+ (i32 (MUX_rr (i1 (NOT_p (CMPGTrr IntRegs:$lhs,
+ (ADD_ri IntRegs:$rhs, -1)))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETLE)),
+ (i32 (MUX_rr (i1 (NOT_p (CMPGTrr IntRegs:$lhs, IntRegs:$rhs))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETULE)),
+ (i32 (MUX_rr (i1 (NOT_p (CMPGTUrr IntRegs:$lhs, IntRegs:$rhs))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+
+//
+// selectcc mappings for greater-equal-to Rs => greater-than Rs-1.
+//
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETGE)),
+ (i32 (MUX_rr (i1 (CMPGTrr IntRegs:$lhs, (ADD_ri IntRegs:$rhs, -1))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc IntRegs:$lhs, IntRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETUGE)),
+ (i32 (MUX_rr (i1 (CMPGTUrr IntRegs:$lhs, (ADD_ri IntRegs:$rhs, -1))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+
+
+//
+// selectcc mappings for predicate comparisons.
+//
+// Convert Rd = selectcc(p0, p1, true_val, false_val, SETEQ) into:
+// pt = not(p1 xor p2)
+// Rd = mux(pt, true_val, false_val)
+// and similarly for SETNE
+//
+def : Pat <(i32 (selectcc PredRegs:$lhs, PredRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETNE)),
+ (i32 (MUX_rr (i1 (XOR_pp PredRegs:$lhs, PredRegs:$rhs)), IntRegs:$tval,
+ IntRegs:$fval))>;
+
+def : Pat <(i32 (selectcc PredRegs:$lhs, PredRegs:$rhs, IntRegs:$tval,
+ IntRegs:$fval, SETEQ)),
+ (i32 (MUX_rr (i1 (NOT_p (XOR_pp PredRegs:$lhs, PredRegs:$rhs))),
+ IntRegs:$tval, IntRegs:$fval))>;
+
+
+//
+// selectcc mappings for 64-bit operands are messy. Hexagon does not have a
+// MUX64 o, use this:
+// selectcc(Rss, Rdd, tval, fval, cond) ->
+// combine(mux(cmp_cond(Rss, Rdd), tval.hi, fval.hi),
+// mux(cmp_cond(Rss, Rdd), tval.lo, fval.lo))
+
+// setgt-64.
+def : Pat<(i64 (selectcc DoubleRegs:$lhs, DoubleRegs:$rhs, DoubleRegs:$tval,
+ DoubleRegs:$fval, SETGT)),
+ (COMBINE_rr (MUX_rr (CMPGT64rr DoubleRegs:$lhs, DoubleRegs:$rhs),
+ (EXTRACT_SUBREG DoubleRegs:$tval, subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$fval, subreg_hireg)),
+ (MUX_rr (CMPGT64rr DoubleRegs:$lhs, DoubleRegs:$rhs),
+ (EXTRACT_SUBREG DoubleRegs:$tval, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$fval, subreg_loreg)))>;
+
+
+// setlt-64 -> setgt-64.
+def : Pat<(i64 (selectcc DoubleRegs:$lhs, DoubleRegs:$rhs, DoubleRegs:$tval,
+ DoubleRegs:$fval, SETLT)),
+ (COMBINE_rr (MUX_rr (CMPGT64rr DoubleRegs:$lhs,
+ (ADD64_rr DoubleRegs:$rhs, (TFRI64 -1))),
+ (EXTRACT_SUBREG DoubleRegs:$tval, subreg_hireg),
+ (EXTRACT_SUBREG DoubleRegs:$fval, subreg_hireg)),
+ (MUX_rr (CMPGT64rr DoubleRegs:$lhs,
+ (ADD64_rr DoubleRegs:$rhs, (TFRI64 -1))),
+ (EXTRACT_SUBREG DoubleRegs:$tval, subreg_loreg),
+ (EXTRACT_SUBREG DoubleRegs:$fval, subreg_loreg)))>;
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
new file mode 100644
index 0000000..a52c604
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp
@@ -0,0 +1,46 @@
+//===-- HexagonSelectionDAGInfo.cpp - Hexagon SelectionDAG Info -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HexagonSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "hexagon-selectiondag-info"
+#include "HexagonTargetMachine.h"
+using namespace llvm;
+
+bool llvm::flag_aligned_memcpy;
+
+HexagonSelectionDAGInfo::HexagonSelectionDAGInfo(const HexagonTargetMachine
+ &TM)
+ : TargetSelectionDAGInfo(TM) {
+}
+
+HexagonSelectionDAGInfo::~HexagonSelectionDAGInfo() {
+}
+
+SDValue
+HexagonSelectionDAGInfo::
+EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
+ bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
+ flag_aligned_memcpy = false;
+ if ((Align & 0x3) == 0) {
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ if (ConstantSize) {
+ uint64_t SizeVal = ConstantSize->getZExtValue();
+ if ((SizeVal > 32) && ((SizeVal % 8) == 0))
+ flag_aligned_memcpy = true;
+ }
+ }
+
+ return SDValue();
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
new file mode 100644
index 0000000..0673e4d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
@@ -0,0 +1,40 @@
+//===-- HexagonSelectionDAGInfo.h - Hexagon SelectionDAG Info ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Hexagon subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonSELECTIONDAGINFO_H
+#define HexagonSELECTIONDAGINFO_H
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+class HexagonTargetMachine;
+
+class HexagonSelectionDAGInfo : public TargetSelectionDAGInfo {
+public:
+ explicit HexagonSelectionDAGInfo(const HexagonTargetMachine &TM);
+ ~HexagonSelectionDAGInfo();
+
+ virtual
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
new file mode 100644
index 0000000..d10c9f2
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp
@@ -0,0 +1,129 @@
+//===-- HexagonSplitTFRCondSets.cpp - split TFR condsets into xfers -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+// This pass tries to provide opportunities for better optimization of muxes.
+// The default code generated for something like: flag = (a == b) ? 1 : 3;
+// would be:
+//
+// {p0 = cmp.eq(r0,r1)}
+// {r3 = mux(p0,#1,#3)}
+//
+// This requires two packets. If we use .new predicated immediate transfers,
+// then we can do this in a single packet, e.g.:
+//
+// {p0 = cmp.eq(r0,r1)
+// if (p0.new) r3 = #1
+// if (!p0.new) r3 = #3}
+//
+// Note that the conditional assignments are not generated in .new form here.
+// We assume opptimisically that they will be formed later.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "xfer"
+#include "HexagonTargetMachine.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+namespace {
+
+class HexagonSplitTFRCondSets : public MachineFunctionPass {
+ HexagonTargetMachine& QTM;
+ const HexagonSubtarget &QST;
+
+ public:
+ static char ID;
+ HexagonSplitTFRCondSets(HexagonTargetMachine& TM) :
+ MachineFunctionPass(ID), QTM(TM), QST(*TM.getSubtargetImpl()) {}
+
+ const char *getPassName() const {
+ return "Hexagon Split TFRCondSets";
+ }
+ bool runOnMachineFunction(MachineFunction &Fn);
+};
+
+
+char HexagonSplitTFRCondSets::ID = 0;
+
+
+bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) {
+
+ const TargetInstrInfo *TII = QTM.getInstrInfo();
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock* MBB = MBBb;
+ // Traverse the basic block.
+ for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
+ ++MII) {
+ MachineInstr *MI = MII;
+ int Opc = MI->getOpcode();
+ if (Opc == Hexagon::TFR_condset_rr) {
+
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg1 = MI->getOperand(2).getReg();
+ int SrcReg2 = MI->getOperand(3).getReg();
+
+ // Minor optimization: do not emit the predicated copy if the source and
+ // the destination is the same register
+ if (DestReg != SrcReg1) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cPt),
+ DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1);
+ }
+ if (DestReg != SrcReg2) {
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cNotPt),
+ DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2);
+ }
+ MII = MBB->erase(MI);
+ --MII;
+ } else if (Opc == Hexagon::TFR_condset_ii) {
+ int DestReg = MI->getOperand(0).getReg();
+ int SrcReg1 = MI->getOperand(1).getReg();
+ int Immed1 = MI->getOperand(2).getImm();
+ int Immed2 = MI->getOperand(3).getImm();
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt),
+ DestReg).addReg(SrcReg1).addImm(Immed1);
+ BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt),
+ DestReg).addReg(SrcReg1).addImm(Immed2);
+ MII = MBB->erase(MI);
+ --MII;
+ }
+ }
+ }
+
+ return true;
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonSplitTFRCondSets(HexagonTargetMachine &TM) {
+ return new HexagonSplitTFRCondSets(TM);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
new file mode 100644
index 0000000..654d336
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -0,0 +1,62 @@
+//===-- HexagonSubtarget.cpp - Hexagon Subtarget Information --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Hexagon specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonSubtarget.h"
+#include "Hexagon.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_TARGET_DESC
+#include "HexagonGenSubtargetInfo.inc"
+
+static cl::opt<bool>
+EnableV3("enable-hexagon-v3", cl::Hidden,
+ cl::desc("Enable Hexagon V3 instructions."));
+
+static cl::opt<bool>
+EnableMemOps(
+ "enable-hexagon-memops",
+ cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed,
+ cl::desc("Generate V4 MEMOP in code generation for Hexagon target"));
+
+HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS):
+ HexagonGenSubtargetInfo(TT, CPU, FS),
+ HexagonArchVersion(V1),
+ CPUString(CPU.str()) {
+ ParseSubtargetFeatures(CPU, FS);
+
+ switch(HexagonArchVersion) {
+ case HexagonSubtarget::V2:
+ break;
+ case HexagonSubtarget::V3:
+ EnableV3 = true;
+ break;
+ case HexagonSubtarget::V4:
+ break;
+ default:
+ llvm_unreachable("Unknown Architecture Version.");
+ }
+
+ // Initialize scheduling itinerary for the specified CPU.
+ InstrItins = getInstrItineraryForCPU(CPUString);
+
+ // Max issue per cycle == bundle width.
+ InstrItins.IssueWidth = 4;
+
+ if (EnableMemOps)
+ UseMemOps = true;
+ else
+ UseMemOps = false;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
new file mode 100644
index 0000000..3079086
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -0,0 +1,74 @@
+//===-- HexagonSubtarget.h - Define Subtarget for the Hexagon ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Hexagon specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef Hexagon_SUBTARGET_H
+#define Hexagon_SUBTARGET_H
+
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "HexagonGenSubtargetInfo.inc"
+
+#define Hexagon_SMALL_DATA_THRESHOLD 8
+
+namespace llvm {
+
+class HexagonSubtarget : public HexagonGenSubtargetInfo {
+
+ bool UseMemOps;
+
+public:
+ enum HexagonArchEnum {
+ V1, V2, V3, V4
+ };
+
+ HexagonArchEnum HexagonArchVersion;
+ std::string CPUString;
+ InstrItineraryData InstrItins;
+
+public:
+ HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS);
+
+ /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// selection.
+ const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+
+
+ /// ParseSubtargetFeatures - Parses features string setting specified
+ /// subtarget options. Definition of function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ bool hasV2TOps () const { return HexagonArchVersion >= V2; }
+ bool hasV2TOpsOnly () const { return HexagonArchVersion == V2; }
+ bool hasV3TOps () const { return HexagonArchVersion >= V3; }
+ bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; }
+ bool hasV4TOps () const { return HexagonArchVersion >= V4; }
+ bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; }
+
+ bool isSubtargetV2() const { return HexagonArchVersion == V2;}
+ const std::string &getCPUString () const { return CPUString; }
+
+ // Threshold for small data section
+ unsigned getSmallDataThreshold() const {
+ return Hexagon_SMALL_DATA_THRESHOLD;
+ }
+ const HexagonArchEnum &getHexagonArchVersion() const {
+ return HexagonArchVersion;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
new file mode 100644
index 0000000..411325b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -0,0 +1,145 @@
+//===-- HexagonTargetMachine.cpp - Define TargetMachine for Hexagon -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements the info about Hexagon target spec.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonTargetMachine.h"
+#include "Hexagon.h"
+#include "HexagonISelLowering.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/PassManager.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+static cl::
+opt<bool> DisableHardwareLoops(
+ "disable-hexagon-hwloops", cl::Hidden,
+ cl::desc("Disable Hardware Loops for Hexagon target"));
+
+/// HexagonTargetMachineModule - Note that this is used on hosts that
+/// cannot link in a library unless there are references into the
+/// library. In particular, it seems that it is not possible to get
+/// things to work on Win32 without this. Though it is unused, do not
+/// remove it.
+extern "C" int HexagonTargetMachineModule;
+int HexagonTargetMachineModule = 0;
+
+extern "C" void LLVMInitializeHexagonTarget() {
+ // Register the target.
+ RegisterTargetMachine<HexagonTargetMachine> X(TheHexagonTarget);
+}
+
+
+/// HexagonTargetMachine ctor - Create an ILP32 architecture model.
+///
+
+/// Hexagon_TODO: Do I need an aggregate alignment?
+///
+HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ DataLayout("e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-a0:0") ,
+ Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
+ TSInfo(*this),
+ FrameLowering(Subtarget),
+ InstrItins(&Subtarget.getInstrItineraryData()) {
+ setMCUseCFI(false);
+}
+
+// addPassesForOptimizations - Allow the backend (target) to add Target
+// Independent Optimization passes to the Pass Manager.
+bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) {
+
+ PM.add(createConstantPropagationPass());
+ PM.add(createLoopSimplifyPass());
+ PM.add(createDeadCodeEliminationPass());
+ PM.add(createConstantPropagationPass());
+ PM.add(createLoopUnrollPass());
+ PM.add(createLoopStrengthReducePass(getTargetLowering()));
+ return true;
+}
+
+namespace {
+/// Hexagon Code Generator Pass Configuration Options.
+class HexagonPassConfig : public TargetPassConfig {
+public:
+ HexagonPassConfig(HexagonTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ HexagonTargetMachine &getHexagonTargetMachine() const {
+ return getTM<HexagonTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreRegAlloc();
+ virtual bool addPostRegAlloc();
+ virtual bool addPreSched2();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new HexagonPassConfig(this, PM);
+}
+
+bool HexagonPassConfig::addInstSelector() {
+ PM.add(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
+ PM.add(createHexagonISelDag(getHexagonTargetMachine()));
+ PM.add(createHexagonPeephole());
+ return false;
+}
+
+
+bool HexagonPassConfig::addPreRegAlloc() {
+ if (!DisableHardwareLoops) {
+ PM.add(createHexagonHardwareLoops());
+ }
+
+ return false;
+}
+
+bool HexagonPassConfig::addPostRegAlloc() {
+ PM.add(createHexagonCFGOptimizer(getHexagonTargetMachine()));
+ return true;
+}
+
+
+bool HexagonPassConfig::addPreSched2() {
+ addPass(IfConverterID);
+ return true;
+}
+
+bool HexagonPassConfig::addPreEmitPass() {
+
+ if (!DisableHardwareLoops) {
+ PM.add(createHexagonFixupHwLoops());
+ }
+
+ // Expand Spill code for predicate registers.
+ PM.add(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
+
+ // Split up TFRcondsets into conditional transfers.
+ PM.add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
+
+ // Create Packets.
+ PM.add(createHexagonPacketizer());
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
new file mode 100644
index 0000000..0336965
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -0,0 +1,83 @@
+//=-- HexagonTargetMachine.h - Define TargetMachine for Hexagon ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Hexagon specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonTARGETMACHINE_H
+#define HexagonTARGETMACHINE_H
+
+#include "HexagonInstrInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonISelLowering.h"
+#include "HexagonSelectionDAGInfo.h"
+#include "HexagonFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+
+namespace llvm {
+
+class Module;
+
+class HexagonTargetMachine : public LLVMTargetMachine {
+ const TargetData DataLayout; // Calculates type size & alignment.
+ HexagonSubtarget Subtarget;
+ HexagonInstrInfo InstrInfo;
+ HexagonTargetLowering TLInfo;
+ HexagonSelectionDAGInfo TSInfo;
+ HexagonFrameLowering FrameLowering;
+ const InstrItineraryData* InstrItins;
+
+public:
+ HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+
+ virtual const HexagonInstrInfo *getInstrInfo() const {
+ return &InstrInfo;
+ }
+ virtual const HexagonSubtarget *getSubtargetImpl() const {
+ return &Subtarget;
+ }
+ virtual const HexagonRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+
+ virtual const InstrItineraryData* getInstrItineraryData() const {
+ return InstrItins;
+ }
+
+
+ virtual const HexagonTargetLowering* getTargetLowering() const {
+ return &TLInfo;
+ }
+
+ virtual const HexagonFrameLowering* getFrameLowering() const {
+ return &FrameLowering;
+ }
+
+ virtual const HexagonSelectionDAGInfo* getSelectionDAGInfo() const {
+ return &TSInfo;
+ }
+
+ virtual const TargetData *getTargetData() const { return &DataLayout; }
+ static unsigned getModuleMatchQuality(const Module &M);
+
+ // Pass Pipeline Configuration.
+ virtual bool addPassesForOptimizations(PassManagerBase &PM);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+};
+
+extern bool flag_aligned_memcpy;
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
new file mode 100644
index 0000000..32cc709
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.cpp
@@ -0,0 +1,94 @@
+//===-- HexagonTargetObjectFile.cpp - Hexagon asm properties --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the HexagonTargetAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonTargetObjectFile.h"
+#include "HexagonSubtarget.h"
+#include "HexagonTargetMachine.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+static cl::opt<int> SmallDataThreshold("hexagon-small-data-threshold",
+ cl::init(8), cl::Hidden);
+
+void HexagonTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+
+
+ SmallDataSection =
+ getContext().getELFSection(".sdata", ELF::SHT_PROGBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+ SmallBSSSection =
+ getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+ ELF::SHF_WRITE | ELF::SHF_ALLOC,
+ SectionKind::getBSS());
+}
+
+// sdata/sbss support taken largely from the MIPS Backend.
+static bool IsInSmallSection(uint64_t Size) {
+ return Size > 0 && Size <= (uint64_t)SmallDataThreshold;
+}
+/// IsGlobalInSmallSection - Return true if this global value should be
+/// placed into small data/bss section.
+bool HexagonTargetObjectFile::IsGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM) const {
+ // If the primary definition of this global value is outside the current
+ // translation unit or the global value is available for inspection but not
+ // emission, then do nothing.
+ if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
+ return false;
+
+ // Otherwise, Check if GV should be in sdata/sbss, when normally it would end
+ // up in getKindForGlobal(GV, TM).
+ return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM));
+}
+
+/// IsGlobalInSmallSection - Return true if this global value should be
+/// placed into small data/bss section.
+bool HexagonTargetObjectFile::
+IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM,
+ SectionKind Kind) const {
+ // Only global variables, not functions.
+ const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV);
+ if (!GVA)
+ return false;
+
+ if (Kind.isBSS() || Kind.isDataNoRel() || Kind.isCommon()) {
+ Type *Ty = GV->getType()->getElementType();
+ return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+ }
+
+ return false;
+}
+
+const MCSection *HexagonTargetObjectFile::
+SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler *Mang, const TargetMachine &TM) const {
+
+ // Handle Small Section classification here.
+ if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind))
+ return SmallBSSSection;
+ if (Kind.isDataNoRel() && IsGlobalInSmallSection(GV, TM, Kind))
+ return SmallDataSection;
+
+ // Otherwise, we work the same as ELF.
+ return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM);
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h
new file mode 100644
index 0000000..6933450
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonTargetObjectFile.h
@@ -0,0 +1,40 @@
+//===-- HexagonTargetAsmInfo.h - Hexagon asm properties --------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HexagonTARGETOBJECTFILE_H
+#define HexagonTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/MC/MCSectionELF.h"
+
+namespace llvm {
+
+ class HexagonTargetObjectFile : public TargetLoweringObjectFileELF {
+ const MCSectionELF *SmallDataSection;
+ const MCSectionELF *SmallBSSSection;
+ public:
+ virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+
+ /// IsGlobalInSmallSection - Return true if this global address should be
+ /// placed into small data/bss section.
+ bool IsGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM,
+ SectionKind Kind) const;
+ bool IsGlobalInSmallSection(const GlobalValue *GV,
+ const TargetMachine &TM) const;
+
+ const MCSection* SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind,
+ Mangler *Mang,
+ const TargetMachine &TM) const;
+ };
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
new file mode 100644
index 0000000..c6e7bd1
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -0,0 +1,3642 @@
+//===----- HexagonPacketizer.cpp - vliw packetizer ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements a simple VLIW packetizer using DFA. The packetizer works on
+// machine basic blocks. For each instruction I in BB, the packetizer consults
+// the DFA to see if machine resources are available to execute I. If so, the
+// packetizer checks if I depends on any instruction J in the current packet.
+// If no dependency is found, I is added to current packet and machine resource
+// is marked as taken. If any dependency is found, a target API call is made to
+// prune the dependence.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "packets"
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/LatencyPriorityQueue.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "Hexagon.h"
+#include "HexagonTargetMachine.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "HexagonMachineFunctionInfo.h"
+
+#include <map>
+
+using namespace llvm;
+
+namespace {
+ class HexagonPacketizer : public MachineFunctionPass {
+
+ public:
+ static char ID;
+ HexagonPacketizer() : MachineFunctionPass(ID) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ const char *getPassName() const {
+ return "Hexagon Packetizer";
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn);
+ };
+ char HexagonPacketizer::ID = 0;
+
+ class HexagonPacketizerList : public VLIWPacketizerList {
+
+ private:
+
+ // Has the instruction been promoted to a dot-new instruction.
+ bool PromotedToDotNew;
+
+ // Has the instruction been glued to allocframe.
+ bool GlueAllocframeStore;
+
+ // Has the feeder instruction been glued to new value jump.
+ bool GlueToNewValueJump;
+
+ // Check if there is a dependence between some instruction already in this
+ // packet and this instruction.
+ bool Dependence;
+
+ // Only check for dependence if there are resources available to
+ // schedule this instruction.
+ bool FoundSequentialDependence;
+
+ public:
+ // Ctor.
+ HexagonPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
+ MachineDominatorTree &MDT);
+
+ // initPacketizerState - initialize some internal flags.
+ void initPacketizerState();
+
+ // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+ bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB);
+
+ // isSoloInstruction - return true if instruction MI can not be packetized
+ // with any other instruction, which means that MI itself is a packet.
+ bool isSoloInstruction(MachineInstr *MI);
+
+ // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
+ // together.
+ bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ);
+
+ // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
+ // and SUJ.
+ bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ);
+
+ MachineBasicBlock::iterator addToPacket(MachineInstr *MI);
+ private:
+ bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg);
+ bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC);
+ bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC);
+ bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII);
+ bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr* PacketMI,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit);
+ bool DemoteToDotOld(MachineInstr* MI);
+ bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2,
+ std::map <MachineInstr*, SUnit*> MIToSUnit);
+ bool RestrictingDepExistInPacket(MachineInstr*,
+ unsigned, std::map <MachineInstr*, SUnit*>);
+ bool isNewifiable(MachineInstr* MI);
+ bool isCondInst(MachineInstr* MI);
+ bool IsNewifyStore (MachineInstr* MI);
+ bool tryAllocateResourcesForConstExt(MachineInstr* MI);
+ bool canReserveResourcesForConstExt(MachineInstr *MI);
+ void reserveResourcesForConstExt(MachineInstr* MI);
+ bool isNewValueInst(MachineInstr* MI);
+ bool isDotNewInst(MachineInstr* MI);
+ };
+}
+
+// HexagonPacketizerList Ctor.
+HexagonPacketizerList::HexagonPacketizerList(
+ MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT)
+ : VLIWPacketizerList(MF, MLI, MDT, true){
+}
+
+bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) {
+ const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+
+ // Instantiate the packetizer.
+ HexagonPacketizerList Packetizer(Fn, MLI, MDT);
+
+ // DFA state table should not be empty.
+ assert(Packetizer.getResourceTracker() && "Empty DFA table!");
+
+ //
+ // Loop over all basic blocks and remove KILL pseudo-instructions
+ // These instructions confuse the dependence analysis. Consider:
+ // D0 = ... (Insn 0)
+ // R0 = KILL R0, D0 (Insn 1)
+ // R0 = ... (Insn 2)
+ // Here, Insn 1 will result in the dependence graph not emitting an output
+ // dependence between Insn 0 and Insn 2. This can lead to incorrect
+ // packetization
+ //
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ MachineBasicBlock::iterator End = MBB->end();
+ MachineBasicBlock::iterator MI = MBB->begin();
+ while (MI != End) {
+ if (MI->isKill()) {
+ MachineBasicBlock::iterator DeleteMI = MI;
+ ++MI;
+ MBB->erase(DeleteMI);
+ End = MBB->end();
+ continue;
+ }
+ ++MI;
+ }
+ }
+
+ // Loop over all of the basic blocks.
+ for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
+ MBB != MBBe; ++MBB) {
+ // Find scheduling regions and schedule / packetize each region.
+ unsigned RemainingCount = MBB->size();
+ for(MachineBasicBlock::iterator RegionEnd = MBB->end();
+ RegionEnd != MBB->begin();) {
+ // The next region starts above the previous region. Look backward in the
+ // instruction stream until we find the nearest boundary.
+ MachineBasicBlock::iterator I = RegionEnd;
+ for(;I != MBB->begin(); --I, --RemainingCount) {
+ if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
+ break;
+ }
+ I = MBB->begin();
+
+ // Skip empty scheduling regions.
+ if (I == RegionEnd) {
+ RegionEnd = llvm::prior(RegionEnd);
+ --RemainingCount;
+ continue;
+ }
+ // Skip regions with one instruction.
+ if (I == llvm::prior(RegionEnd)) {
+ RegionEnd = llvm::prior(RegionEnd);
+ continue;
+ }
+
+ Packetizer.PacketizeMIs(MBB, I, RegionEnd);
+ RegionEnd = I;
+ }
+ }
+
+ return true;
+}
+
+
+static bool IsIndirectCall(MachineInstr* MI) {
+ return ((MI->getOpcode() == Hexagon::CALLR) ||
+ (MI->getOpcode() == Hexagon::CALLRv3));
+}
+
+// Reserve resources for constant extender. Trigure an assertion if
+// reservation fail.
+void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr(
+ QII->get(Hexagon::IMMEXT), MI->getDebugLoc());
+
+ if (ResourceTracker->canReserveResources(PseudoMI)) {
+ ResourceTracker->reserveResources(PseudoMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ } else {
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ llvm_unreachable("can not reserve resources for constant extender.");
+ }
+ return;
+}
+
+bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ assert(QII->isExtended(MI) &&
+ "Should only be called for constant extended instructions");
+ MachineFunction *MF = MI->getParent()->getParent();
+ MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT),
+ MI->getDebugLoc());
+ bool CanReserve = ResourceTracker->canReserveResources(PseudoMI);
+ MF->DeleteMachineInstr(PseudoMI);
+ return CanReserve;
+}
+
+// Allocate resources (i.e. 4 bytes) for constant extender. If succeed, return
+// true, otherwise, return false.
+bool HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr(
+ QII->get(Hexagon::IMMEXT), MI->getDebugLoc());
+
+ if (ResourceTracker->canReserveResources(PseudoMI)) {
+ ResourceTracker->reserveResources(PseudoMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ return true;
+ } else {
+ MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
+ return false;
+ }
+}
+
+
+bool HexagonPacketizerList::IsCallDependent(MachineInstr* MI,
+ SDep::Kind DepType,
+ unsigned DepReg) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo();
+
+ // Check for lr dependence
+ if (DepReg == QRI->getRARegister()) {
+ return true;
+ }
+
+ if (QII->isDeallocRet(MI)) {
+ if (DepReg == QRI->getFrameRegister() ||
+ DepReg == QRI->getStackRegister())
+ return true;
+ }
+
+ // Check if this is a predicate dependence
+ const TargetRegisterClass* RC = QRI->getMinimalPhysRegClass(DepReg);
+ if (RC == Hexagon::PredRegsRegisterClass) {
+ return true;
+ }
+
+ //
+ // Lastly check for an operand used in an indirect call
+ // If we had an attribute for checking if an instruction is an indirect call,
+ // then we could have avoided this relatively brittle implementation of
+ // IsIndirectCall()
+ //
+ // Assumes that the first operand of the CALLr is the function address
+ //
+ if (IsIndirectCall(MI) && (DepType == SDep::Data)) {
+ MachineOperand MO = MI->getOperand(0);
+ if (MO.isReg() && MO.isUse() && (MO.getReg() == DepReg)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool IsRegDependence(const SDep::Kind DepType) {
+ return (DepType == SDep::Data || DepType == SDep::Anti ||
+ DepType == SDep::Output);
+}
+
+static bool IsDirectJump(MachineInstr* MI) {
+ return (MI->getOpcode() == Hexagon::JMP);
+}
+
+static bool IsSchedBarrier(MachineInstr* MI) {
+ switch (MI->getOpcode()) {
+ case Hexagon::BARRIER:
+ return true;
+ }
+ return false;
+}
+
+static bool IsControlFlow(MachineInstr* MI) {
+ return (MI->getDesc().isTerminator() || MI->getDesc().isCall());
+}
+
+bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ if (QII->isNewValueJump(MI))
+ return true;
+
+ if (QII->isNewValueStore(MI))
+ return true;
+
+ return false;
+}
+
+// Function returns true if an instruction can be promoted to the new-value
+// store. It will always return false for v2 and v3.
+// It lists all the conditional and unconditional stores that can be promoted
+// to the new-value stores.
+
+bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
+ const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ switch (MI->getOpcode())
+ {
+ // store byte
+ case Hexagon::STrib:
+ case Hexagon::STrib_indexed:
+ case Hexagon::STrib_indexed_shl_V4:
+ case Hexagon::STrib_shl_V4:
+ case Hexagon::STrib_GP_V4:
+ case Hexagon::STb_GP_V4:
+ case Hexagon::POST_STbri:
+ case Hexagon::STrib_cPt:
+ case Hexagon::STrib_cdnPt_V4:
+ case Hexagon::STrib_cNotPt:
+ case Hexagon::STrib_cdnNotPt_V4:
+ case Hexagon::STrib_indexed_cPt:
+ case Hexagon::STrib_indexed_cdnPt_V4:
+ case Hexagon::STrib_indexed_cNotPt:
+ case Hexagon::STrib_indexed_cdnNotPt_V4:
+ case Hexagon::STrib_indexed_shl_cPt_V4:
+ case Hexagon::STrib_indexed_shl_cdnPt_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_V4:
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_STbri_cPt:
+ case Hexagon::POST_STbri_cdnPt_V4:
+ case Hexagon::POST_STbri_cNotPt:
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+ case Hexagon::STb_GP_cPt_V4:
+ case Hexagon::STb_GP_cNotPt_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cPt_V4:
+ case Hexagon::STrib_GP_cNotPt_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+
+ // store halfword
+ case Hexagon::STrih:
+ case Hexagon::STrih_indexed:
+ case Hexagon::STrih_indexed_shl_V4:
+ case Hexagon::STrih_shl_V4:
+ case Hexagon::STrih_GP_V4:
+ case Hexagon::STh_GP_V4:
+ case Hexagon::POST_SThri:
+ case Hexagon::STrih_cPt:
+ case Hexagon::STrih_cdnPt_V4:
+ case Hexagon::STrih_cNotPt:
+ case Hexagon::STrih_cdnNotPt_V4:
+ case Hexagon::STrih_indexed_cPt:
+ case Hexagon::STrih_indexed_cdnPt_V4:
+ case Hexagon::STrih_indexed_cNotPt:
+ case Hexagon::STrih_indexed_cdnNotPt_V4:
+ case Hexagon::STrih_indexed_shl_cPt_V4:
+ case Hexagon::STrih_indexed_shl_cdnPt_V4:
+ case Hexagon::STrih_indexed_shl_cNotPt_V4:
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_SThri_cPt:
+ case Hexagon::POST_SThri_cdnPt_V4:
+ case Hexagon::POST_SThri_cNotPt:
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+ case Hexagon::STh_GP_cPt_V4:
+ case Hexagon::STh_GP_cNotPt_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cPt_V4:
+ case Hexagon::STrih_GP_cNotPt_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+
+ // store word
+ case Hexagon::STriw:
+ case Hexagon::STriw_indexed:
+ case Hexagon::STriw_indexed_shl_V4:
+ case Hexagon::STriw_shl_V4:
+ case Hexagon::STriw_GP_V4:
+ case Hexagon::STw_GP_V4:
+ case Hexagon::POST_STwri:
+ case Hexagon::STriw_cPt:
+ case Hexagon::STriw_cdnPt_V4:
+ case Hexagon::STriw_cNotPt:
+ case Hexagon::STriw_cdnNotPt_V4:
+ case Hexagon::STriw_indexed_cPt:
+ case Hexagon::STriw_indexed_cdnPt_V4:
+ case Hexagon::STriw_indexed_cNotPt:
+ case Hexagon::STriw_indexed_cdnNotPt_V4:
+ case Hexagon::STriw_indexed_shl_cPt_V4:
+ case Hexagon::STriw_indexed_shl_cdnPt_V4:
+ case Hexagon::STriw_indexed_shl_cNotPt_V4:
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
+ case Hexagon::POST_STwri_cPt:
+ case Hexagon::POST_STwri_cdnPt_V4:
+ case Hexagon::POST_STwri_cNotPt:
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+ case Hexagon::STw_GP_cPt_V4:
+ case Hexagon::STw_GP_cNotPt_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cPt_V4:
+ case Hexagon::STriw_GP_cNotPt_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ return QRI->Subtarget.hasV4TOps();
+ }
+ return false;
+}
+
+static bool IsLoopN(MachineInstr *MI) {
+ return (MI->getOpcode() == Hexagon::LOOP0_i ||
+ MI->getOpcode() == Hexagon::LOOP0_r);
+}
+
+/// DoesModifyCalleeSavedReg - Returns true if the instruction modifies a
+/// callee-saved register.
+static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
+ const TargetRegisterInfo *TRI) {
+ for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) {
+ unsigned CalleeSavedReg = *CSR;
+ if (MI->modifiesRegister(CalleeSavedReg, TRI))
+ return true;
+ }
+ return false;
+}
+
+// Return the new value instruction for a given store.
+static int GetDotNewOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .new type");
+
+ // store new value byte
+ case Hexagon::STrib:
+ return Hexagon::STrib_nv_V4;
+
+ case Hexagon::STrib_indexed:
+ return Hexagon::STrib_indexed_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_V4:
+ return Hexagon::STrib_indexed_shl_nv_V4;
+
+ case Hexagon::STrib_shl_V4:
+ return Hexagon::STrib_shl_nv_V4;
+
+ case Hexagon::STrib_GP_V4:
+ return Hexagon::STrib_GP_nv_V4;
+
+ case Hexagon::STb_GP_V4:
+ return Hexagon::STb_GP_nv_V4;
+
+ case Hexagon::POST_STbri:
+ return Hexagon::POST_STbri_nv_V4;
+
+ case Hexagon::STrib_cPt:
+ return Hexagon::STrib_cPt_nv_V4;
+
+ case Hexagon::STrib_cdnPt_V4:
+ return Hexagon::STrib_cdnPt_nv_V4;
+
+ case Hexagon::STrib_cNotPt:
+ return Hexagon::STrib_cNotPt_nv_V4;
+
+ case Hexagon::STrib_cdnNotPt_V4:
+ return Hexagon::STrib_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cPt:
+ return Hexagon::STrib_indexed_cPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cdnPt_V4:
+ return Hexagon::STrib_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cNotPt:
+ return Hexagon::STrib_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cdnNotPt_V4:
+ return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_V4:
+ return Hexagon::STrib_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnPt_V4:
+ return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cNotPt_V4:
+ return Hexagon::STrib_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cPt:
+ return Hexagon::POST_STbri_cPt_nv_V4;
+
+ case Hexagon::POST_STbri_cdnPt_V4:
+ return Hexagon::POST_STbri_cdnPt_nv_V4;
+
+ case Hexagon::POST_STbri_cNotPt:
+ return Hexagon::POST_STbri_cNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cdnNotPt_V4:
+ return Hexagon::POST_STbri_cdnNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cPt_V4:
+ return Hexagon::STb_GP_cPt_nv_V4;
+
+ case Hexagon::STb_GP_cNotPt_V4:
+ return Hexagon::STb_GP_cNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cdnPt_V4:
+ return Hexagon::STb_GP_cdnPt_nv_V4;
+
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ return Hexagon::STb_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cPt_V4:
+ return Hexagon::STrib_GP_cPt_nv_V4;
+
+ case Hexagon::STrib_GP_cNotPt_V4:
+ return Hexagon::STrib_GP_cNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cdnPt_V4:
+ return Hexagon::STrib_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ return Hexagon::STrib_GP_cdnNotPt_nv_V4;
+
+ // store new value halfword
+ case Hexagon::STrih:
+ return Hexagon::STrih_nv_V4;
+
+ case Hexagon::STrih_indexed:
+ return Hexagon::STrih_indexed_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_V4:
+ return Hexagon::STrih_indexed_shl_nv_V4;
+
+ case Hexagon::STrih_shl_V4:
+ return Hexagon::STrih_shl_nv_V4;
+
+ case Hexagon::STrih_GP_V4:
+ return Hexagon::STrih_GP_nv_V4;
+
+ case Hexagon::STh_GP_V4:
+ return Hexagon::STh_GP_nv_V4;
+
+ case Hexagon::POST_SThri:
+ return Hexagon::POST_SThri_nv_V4;
+
+ case Hexagon::STrih_cPt:
+ return Hexagon::STrih_cPt_nv_V4;
+
+ case Hexagon::STrih_cdnPt_V4:
+ return Hexagon::STrih_cdnPt_nv_V4;
+
+ case Hexagon::STrih_cNotPt:
+ return Hexagon::STrih_cNotPt_nv_V4;
+
+ case Hexagon::STrih_cdnNotPt_V4:
+ return Hexagon::STrih_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cPt:
+ return Hexagon::STrih_indexed_cPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cdnPt_V4:
+ return Hexagon::STrih_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cNotPt:
+ return Hexagon::STrih_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cdnNotPt_V4:
+ return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_V4:
+ return Hexagon::STrih_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnPt_V4:
+ return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cNotPt_V4:
+ return Hexagon::STrih_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cPt:
+ return Hexagon::POST_SThri_cPt_nv_V4;
+
+ case Hexagon::POST_SThri_cdnPt_V4:
+ return Hexagon::POST_SThri_cdnPt_nv_V4;
+
+ case Hexagon::POST_SThri_cNotPt:
+ return Hexagon::POST_SThri_cNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cdnNotPt_V4:
+ return Hexagon::POST_SThri_cdnNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cPt_V4:
+ return Hexagon::STh_GP_cPt_nv_V4;
+
+ case Hexagon::STh_GP_cNotPt_V4:
+ return Hexagon::STh_GP_cNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cdnPt_V4:
+ return Hexagon::STh_GP_cdnPt_nv_V4;
+
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ return Hexagon::STh_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cPt_V4:
+ return Hexagon::STrih_GP_cPt_nv_V4;
+
+ case Hexagon::STrih_GP_cNotPt_V4:
+ return Hexagon::STrih_GP_cNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cdnPt_V4:
+ return Hexagon::STrih_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ return Hexagon::STrih_GP_cdnNotPt_nv_V4;
+
+ // store new value word
+ case Hexagon::STriw:
+ return Hexagon::STriw_nv_V4;
+
+ case Hexagon::STriw_indexed:
+ return Hexagon::STriw_indexed_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_V4:
+ return Hexagon::STriw_indexed_shl_nv_V4;
+
+ case Hexagon::STriw_shl_V4:
+ return Hexagon::STriw_shl_nv_V4;
+
+ case Hexagon::STriw_GP_V4:
+ return Hexagon::STriw_GP_nv_V4;
+
+ case Hexagon::STw_GP_V4:
+ return Hexagon::STw_GP_nv_V4;
+
+ case Hexagon::POST_STwri:
+ return Hexagon::POST_STwri_nv_V4;
+
+ case Hexagon::STriw_cPt:
+ return Hexagon::STriw_cPt_nv_V4;
+
+ case Hexagon::STriw_cdnPt_V4:
+ return Hexagon::STriw_cdnPt_nv_V4;
+
+ case Hexagon::STriw_cNotPt:
+ return Hexagon::STriw_cNotPt_nv_V4;
+
+ case Hexagon::STriw_cdnNotPt_V4:
+ return Hexagon::STriw_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cPt:
+ return Hexagon::STriw_indexed_cPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cdnPt_V4:
+ return Hexagon::STriw_indexed_cdnPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cNotPt:
+ return Hexagon::STriw_indexed_cNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cdnNotPt_V4:
+ return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_V4:
+ return Hexagon::STriw_indexed_shl_cPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnPt_V4:
+ return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cNotPt_V4:
+ return Hexagon::STriw_indexed_shl_cNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
+ return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cPt:
+ return Hexagon::POST_STwri_cPt_nv_V4;
+
+ case Hexagon::POST_STwri_cdnPt_V4:
+ return Hexagon::POST_STwri_cdnPt_nv_V4;
+
+ case Hexagon::POST_STwri_cNotPt:
+ return Hexagon::POST_STwri_cNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cdnNotPt_V4:
+ return Hexagon::POST_STwri_cdnNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cPt_V4:
+ return Hexagon::STw_GP_cPt_nv_V4;
+
+ case Hexagon::STw_GP_cNotPt_V4:
+ return Hexagon::STw_GP_cNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cdnPt_V4:
+ return Hexagon::STw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ return Hexagon::STw_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cPt_V4:
+ return Hexagon::STriw_GP_cPt_nv_V4;
+
+ case Hexagon::STriw_GP_cNotPt_V4:
+ return Hexagon::STriw_GP_cNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cdnPt_V4:
+ return Hexagon::STriw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ return Hexagon::STriw_GP_cdnNotPt_nv_V4;
+ }
+}
+
+// Return .new predicate version for an instruction
+static int GetDotNewPredOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .new type");
+
+ // Conditional stores
+ // Store byte conditionally
+ case Hexagon::STrib_cPt :
+ return Hexagon::STrib_cdnPt_V4;
+
+ case Hexagon::STrib_cNotPt :
+ return Hexagon::STrib_cdnNotPt_V4;
+
+ case Hexagon::STrib_indexed_cPt :
+ return Hexagon::STrib_indexed_cdnPt_V4;
+
+ case Hexagon::STrib_indexed_cNotPt :
+ return Hexagon::STrib_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrib_imm_cPt_V4 :
+ return Hexagon::STrib_imm_cdnPt_V4;
+
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ return Hexagon::STrib_imm_cdnNotPt_V4;
+
+ case Hexagon::POST_STbri_cPt :
+ return Hexagon::POST_STbri_cdnPt_V4;
+
+ case Hexagon::POST_STbri_cNotPt :
+ return Hexagon::POST_STbri_cdnNotPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ return Hexagon::STrib_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrib_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::STb_GP_cPt_V4 :
+ return Hexagon::STb_GP_cdnPt_V4;
+
+ case Hexagon::STb_GP_cNotPt_V4 :
+ return Hexagon::STb_GP_cdnNotPt_V4;
+
+ case Hexagon::STrib_GP_cPt_V4 :
+ return Hexagon::STrib_GP_cdnPt_V4;
+
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ return Hexagon::STrib_GP_cdnNotPt_V4;
+
+ // Store doubleword conditionally
+ case Hexagon::STrid_cPt :
+ return Hexagon::STrid_cdnPt_V4;
+
+ case Hexagon::STrid_cNotPt :
+ return Hexagon::STrid_cdnNotPt_V4;
+
+ case Hexagon::STrid_indexed_cPt :
+ return Hexagon::STrid_indexed_cdnPt_V4;
+
+ case Hexagon::STrid_indexed_cNotPt :
+ return Hexagon::STrid_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ return Hexagon::STrid_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrid_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_STdri_cPt :
+ return Hexagon::POST_STdri_cdnPt_V4;
+
+ case Hexagon::POST_STdri_cNotPt :
+ return Hexagon::POST_STdri_cdnNotPt_V4;
+
+ case Hexagon::STd_GP_cPt_V4 :
+ return Hexagon::STd_GP_cdnPt_V4;
+
+ case Hexagon::STd_GP_cNotPt_V4 :
+ return Hexagon::STd_GP_cdnNotPt_V4;
+
+ case Hexagon::STrid_GP_cPt_V4 :
+ return Hexagon::STrid_GP_cdnPt_V4;
+
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ return Hexagon::STrid_GP_cdnNotPt_V4;
+
+ // Store halfword conditionally
+ case Hexagon::STrih_cPt :
+ return Hexagon::STrih_cdnPt_V4;
+
+ case Hexagon::STrih_cNotPt :
+ return Hexagon::STrih_cdnNotPt_V4;
+
+ case Hexagon::STrih_indexed_cPt :
+ return Hexagon::STrih_indexed_cdnPt_V4;
+
+ case Hexagon::STrih_indexed_cNotPt :
+ return Hexagon::STrih_indexed_cdnNotPt_V4;
+
+ case Hexagon::STrih_imm_cPt_V4 :
+ return Hexagon::STrih_imm_cdnPt_V4;
+
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ return Hexagon::STrih_imm_cdnNotPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ return Hexagon::STrih_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ return Hexagon::STrih_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_SThri_cPt :
+ return Hexagon::POST_SThri_cdnPt_V4;
+
+ case Hexagon::POST_SThri_cNotPt :
+ return Hexagon::POST_SThri_cdnNotPt_V4;
+
+ case Hexagon::STh_GP_cPt_V4 :
+ return Hexagon::STh_GP_cdnPt_V4;
+
+ case Hexagon::STh_GP_cNotPt_V4 :
+ return Hexagon::STh_GP_cdnNotPt_V4;
+
+ case Hexagon::STrih_GP_cPt_V4 :
+ return Hexagon::STrih_GP_cdnPt_V4;
+
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ return Hexagon::STrih_GP_cdnNotPt_V4;
+
+ // Store word conditionally
+ case Hexagon::STriw_cPt :
+ return Hexagon::STriw_cdnPt_V4;
+
+ case Hexagon::STriw_cNotPt :
+ return Hexagon::STriw_cdnNotPt_V4;
+
+ case Hexagon::STriw_indexed_cPt :
+ return Hexagon::STriw_indexed_cdnPt_V4;
+
+ case Hexagon::STriw_indexed_cNotPt :
+ return Hexagon::STriw_indexed_cdnNotPt_V4;
+
+ case Hexagon::STriw_imm_cPt_V4 :
+ return Hexagon::STriw_imm_cdnPt_V4;
+
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ return Hexagon::STriw_imm_cdnNotPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ return Hexagon::STriw_indexed_shl_cdnPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ return Hexagon::STriw_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::POST_STwri_cPt :
+ return Hexagon::POST_STwri_cdnPt_V4;
+
+ case Hexagon::POST_STwri_cNotPt :
+ return Hexagon::POST_STwri_cdnNotPt_V4;
+
+ case Hexagon::STw_GP_cPt_V4 :
+ return Hexagon::STw_GP_cdnPt_V4;
+
+ case Hexagon::STw_GP_cNotPt_V4 :
+ return Hexagon::STw_GP_cdnNotPt_V4;
+
+ case Hexagon::STriw_GP_cPt_V4 :
+ return Hexagon::STriw_GP_cdnPt_V4;
+
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ return Hexagon::STriw_GP_cdnNotPt_V4;
+
+ // Condtional Jumps
+ case Hexagon::JMP_c:
+ return Hexagon::JMP_cdnPt;
+
+ case Hexagon::JMP_cNot:
+ return Hexagon::JMP_cdnNotPt;
+
+ case Hexagon::JMPR_cPt:
+ return Hexagon::JMPR_cdnPt_V3;
+
+ case Hexagon::JMPR_cNotPt:
+ return Hexagon::JMPR_cdnNotPt_V3;
+
+ // Conditional Transfers
+ case Hexagon::TFR_cPt:
+ return Hexagon::TFR_cdnPt;
+
+ case Hexagon::TFR_cNotPt:
+ return Hexagon::TFR_cdnNotPt;
+
+ case Hexagon::TFRI_cPt:
+ return Hexagon::TFRI_cdnPt;
+
+ case Hexagon::TFRI_cNotPt:
+ return Hexagon::TFRI_cdnNotPt;
+
+ // Load double word
+ case Hexagon::LDrid_cPt :
+ return Hexagon::LDrid_cdnPt;
+
+ case Hexagon::LDrid_cNotPt :
+ return Hexagon::LDrid_cdnNotPt;
+
+ case Hexagon::LDrid_indexed_cPt :
+ return Hexagon::LDrid_indexed_cdnPt;
+
+ case Hexagon::LDrid_indexed_cNotPt :
+ return Hexagon::LDrid_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrid_cPt :
+ return Hexagon::POST_LDrid_cdnPt_V4;
+
+ case Hexagon::POST_LDrid_cNotPt :
+ return Hexagon::POST_LDrid_cdnNotPt_V4;
+
+ // Load word
+ case Hexagon::LDriw_cPt :
+ return Hexagon::LDriw_cdnPt;
+
+ case Hexagon::LDriw_cNotPt :
+ return Hexagon::LDriw_cdnNotPt;
+
+ case Hexagon::LDriw_indexed_cPt :
+ return Hexagon::LDriw_indexed_cdnPt;
+
+ case Hexagon::LDriw_indexed_cNotPt :
+ return Hexagon::LDriw_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriw_cPt :
+ return Hexagon::POST_LDriw_cdnPt_V4;
+
+ case Hexagon::POST_LDriw_cNotPt :
+ return Hexagon::POST_LDriw_cdnNotPt_V4;
+
+ // Load halfword
+ case Hexagon::LDrih_cPt :
+ return Hexagon::LDrih_cdnPt;
+
+ case Hexagon::LDrih_cNotPt :
+ return Hexagon::LDrih_cdnNotPt;
+
+ case Hexagon::LDrih_indexed_cPt :
+ return Hexagon::LDrih_indexed_cdnPt;
+
+ case Hexagon::LDrih_indexed_cNotPt :
+ return Hexagon::LDrih_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrih_cPt :
+ return Hexagon::POST_LDrih_cdnPt_V4;
+
+ case Hexagon::POST_LDrih_cNotPt :
+ return Hexagon::POST_LDrih_cdnNotPt_V4;
+
+ // Load byte
+ case Hexagon::LDrib_cPt :
+ return Hexagon::LDrib_cdnPt;
+
+ case Hexagon::LDrib_cNotPt :
+ return Hexagon::LDrib_cdnNotPt;
+
+ case Hexagon::LDrib_indexed_cPt :
+ return Hexagon::LDrib_indexed_cdnPt;
+
+ case Hexagon::LDrib_indexed_cNotPt :
+ return Hexagon::LDrib_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDrib_cPt :
+ return Hexagon::POST_LDrib_cdnPt_V4;
+
+ case Hexagon::POST_LDrib_cNotPt :
+ return Hexagon::POST_LDrib_cdnNotPt_V4;
+
+ // Load unsigned halfword
+ case Hexagon::LDriuh_cPt :
+ return Hexagon::LDriuh_cdnPt;
+
+ case Hexagon::LDriuh_cNotPt :
+ return Hexagon::LDriuh_cdnNotPt;
+
+ case Hexagon::LDriuh_indexed_cPt :
+ return Hexagon::LDriuh_indexed_cdnPt;
+
+ case Hexagon::LDriuh_indexed_cNotPt :
+ return Hexagon::LDriuh_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriuh_cPt :
+ return Hexagon::POST_LDriuh_cdnPt_V4;
+
+ case Hexagon::POST_LDriuh_cNotPt :
+ return Hexagon::POST_LDriuh_cdnNotPt_V4;
+
+ // Load unsigned byte
+ case Hexagon::LDriub_cPt :
+ return Hexagon::LDriub_cdnPt;
+
+ case Hexagon::LDriub_cNotPt :
+ return Hexagon::LDriub_cdnNotPt;
+
+ case Hexagon::LDriub_indexed_cPt :
+ return Hexagon::LDriub_indexed_cdnPt;
+
+ case Hexagon::LDriub_indexed_cNotPt :
+ return Hexagon::LDriub_indexed_cdnNotPt;
+
+ case Hexagon::POST_LDriub_cPt :
+ return Hexagon::POST_LDriub_cdnPt_V4;
+
+ case Hexagon::POST_LDriub_cNotPt :
+ return Hexagon::POST_LDriub_cdnNotPt_V4;
+
+ // V4 indexed+scaled load
+
+ case Hexagon::LDrid_indexed_cPt_V4 :
+ return Hexagon::LDrid_indexed_cdnPt_V4;
+
+ case Hexagon::LDrid_indexed_cNotPt_V4 :
+ return Hexagon::LDrid_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDrib_indexed_cPt_V4 :
+ return Hexagon::LDrib_indexed_cdnPt_V4;
+
+ case Hexagon::LDrib_indexed_cNotPt_V4 :
+ return Hexagon::LDrib_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriub_indexed_cPt_V4 :
+ return Hexagon::LDriub_indexed_cdnPt_V4;
+
+ case Hexagon::LDriub_indexed_cNotPt_V4 :
+ return Hexagon::LDriub_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDrih_indexed_cPt_V4 :
+ return Hexagon::LDrih_indexed_cdnPt_V4;
+
+ case Hexagon::LDrih_indexed_cNotPt_V4 :
+ return Hexagon::LDrih_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_cPt_V4 :
+ return Hexagon::LDriuh_indexed_cdnPt_V4;
+
+ case Hexagon::LDriuh_indexed_cNotPt_V4 :
+ return Hexagon::LDriuh_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4;
+
+ case Hexagon::LDriw_indexed_cPt_V4 :
+ return Hexagon::LDriw_indexed_cdnPt_V4;
+
+ case Hexagon::LDriw_indexed_cNotPt_V4 :
+ return Hexagon::LDriw_indexed_cdnNotPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cdnPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cdnNotPt_V4;
+
+ // V4 global address load
+
+ case Hexagon::LDd_GP_cPt_V4:
+ return Hexagon::LDd_GP_cdnPt_V4;
+
+ case Hexagon::LDd_GP_cNotPt_V4:
+ return Hexagon::LDd_GP_cdnNotPt_V4;
+
+ case Hexagon::LDb_GP_cPt_V4:
+ return Hexagon::LDb_GP_cdnPt_V4;
+
+ case Hexagon::LDb_GP_cNotPt_V4:
+ return Hexagon::LDb_GP_cdnNotPt_V4;
+
+ case Hexagon::LDub_GP_cPt_V4:
+ return Hexagon::LDub_GP_cdnPt_V4;
+
+ case Hexagon::LDub_GP_cNotPt_V4:
+ return Hexagon::LDub_GP_cdnNotPt_V4;
+
+ case Hexagon::LDh_GP_cPt_V4:
+ return Hexagon::LDh_GP_cdnPt_V4;
+
+ case Hexagon::LDh_GP_cNotPt_V4:
+ return Hexagon::LDh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDuh_GP_cPt_V4:
+ return Hexagon::LDuh_GP_cdnPt_V4;
+
+ case Hexagon::LDuh_GP_cNotPt_V4:
+ return Hexagon::LDuh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDw_GP_cPt_V4:
+ return Hexagon::LDw_GP_cdnPt_V4;
+
+ case Hexagon::LDw_GP_cNotPt_V4:
+ return Hexagon::LDw_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrid_GP_cPt_V4:
+ return Hexagon::LDrid_GP_cdnPt_V4;
+
+ case Hexagon::LDrid_GP_cNotPt_V4:
+ return Hexagon::LDrid_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrib_GP_cPt_V4:
+ return Hexagon::LDrib_GP_cdnPt_V4;
+
+ case Hexagon::LDrib_GP_cNotPt_V4:
+ return Hexagon::LDrib_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriub_GP_cPt_V4:
+ return Hexagon::LDriub_GP_cdnPt_V4;
+
+ case Hexagon::LDriub_GP_cNotPt_V4:
+ return Hexagon::LDriub_GP_cdnNotPt_V4;
+
+ case Hexagon::LDrih_GP_cPt_V4:
+ return Hexagon::LDrih_GP_cdnPt_V4;
+
+ case Hexagon::LDrih_GP_cNotPt_V4:
+ return Hexagon::LDrih_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriuh_GP_cPt_V4:
+ return Hexagon::LDriuh_GP_cdnPt_V4;
+
+ case Hexagon::LDriuh_GP_cNotPt_V4:
+ return Hexagon::LDriuh_GP_cdnNotPt_V4;
+
+ case Hexagon::LDriw_GP_cPt_V4:
+ return Hexagon::LDriw_GP_cdnPt_V4;
+
+ case Hexagon::LDriw_GP_cNotPt_V4:
+ return Hexagon::LDriw_GP_cdnNotPt_V4;
+
+ // Conditional store new-value byte
+ case Hexagon::STrib_cPt_nv_V4 :
+ return Hexagon::STrib_cdnPt_nv_V4;
+ case Hexagon::STrib_cNotPt_nv_V4 :
+ return Hexagon::STrib_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_cPt_nv_V4 :
+ return Hexagon::STrib_indexed_cdnPt_nv_V4;
+ case Hexagon::STrib_indexed_cNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STbri_cPt_nv_V4 :
+ return Hexagon::POST_STbri_cdnPt_nv_V4;
+ case Hexagon::POST_STbri_cNotPt_nv_V4 :
+ return Hexagon::POST_STbri_cdnNotPt_nv_V4;
+
+ case Hexagon::STb_GP_cPt_nv_V4 :
+ return Hexagon::STb_GP_cdnPt_nv_V4;
+
+ case Hexagon::STb_GP_cNotPt_nv_V4 :
+ return Hexagon::STb_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrib_GP_cPt_nv_V4 :
+ return Hexagon::STrib_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrib_GP_cNotPt_nv_V4 :
+ return Hexagon::STrib_GP_cdnNotPt_nv_V4;
+
+ // Conditional store new-value halfword
+ case Hexagon::STrih_cPt_nv_V4 :
+ return Hexagon::STrih_cdnPt_nv_V4;
+ case Hexagon::STrih_cNotPt_nv_V4 :
+ return Hexagon::STrih_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_cPt_nv_V4 :
+ return Hexagon::STrih_indexed_cdnPt_nv_V4;
+ case Hexagon::STrih_indexed_cNotPt_nv_V4 :
+ return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_SThri_cPt_nv_V4 :
+ return Hexagon::POST_SThri_cdnPt_nv_V4;
+ case Hexagon::POST_SThri_cNotPt_nv_V4 :
+ return Hexagon::POST_SThri_cdnNotPt_nv_V4;
+
+ case Hexagon::STh_GP_cPt_nv_V4 :
+ return Hexagon::STh_GP_cdnPt_nv_V4;
+
+ case Hexagon::STh_GP_cNotPt_nv_V4 :
+ return Hexagon::STh_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STrih_GP_cPt_nv_V4 :
+ return Hexagon::STrih_GP_cdnPt_nv_V4;
+
+ case Hexagon::STrih_GP_cNotPt_nv_V4 :
+ return Hexagon::STrih_GP_cdnNotPt_nv_V4;
+
+ // Conditional store new-value word
+ case Hexagon::STriw_cPt_nv_V4 :
+ return Hexagon::STriw_cdnPt_nv_V4;
+ case Hexagon::STriw_cNotPt_nv_V4 :
+ return Hexagon::STriw_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_cPt_nv_V4 :
+ return Hexagon::STriw_indexed_cdnPt_nv_V4;
+ case Hexagon::STriw_indexed_cNotPt_nv_V4 :
+ return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
+ return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
+ return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
+
+ case Hexagon::POST_STwri_cPt_nv_V4 :
+ return Hexagon::POST_STwri_cdnPt_nv_V4;
+ case Hexagon::POST_STwri_cNotPt_nv_V4:
+ return Hexagon::POST_STwri_cdnNotPt_nv_V4;
+
+ case Hexagon::STw_GP_cPt_nv_V4 :
+ return Hexagon::STw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STw_GP_cNotPt_nv_V4 :
+ return Hexagon::STw_GP_cdnNotPt_nv_V4;
+
+ case Hexagon::STriw_GP_cPt_nv_V4 :
+ return Hexagon::STriw_GP_cdnPt_nv_V4;
+
+ case Hexagon::STriw_GP_cNotPt_nv_V4 :
+ return Hexagon::STriw_GP_cdnNotPt_nv_V4;
+
+ // Conditional add
+ case Hexagon::ADD_ri_cPt :
+ return Hexagon::ADD_ri_cdnPt;
+ case Hexagon::ADD_ri_cNotPt :
+ return Hexagon::ADD_ri_cdnNotPt;
+
+ case Hexagon::ADD_rr_cPt :
+ return Hexagon::ADD_rr_cdnPt;
+ case Hexagon::ADD_rr_cNotPt :
+ return Hexagon::ADD_rr_cdnNotPt;
+
+ // Conditional logical Operations
+ case Hexagon::XOR_rr_cPt :
+ return Hexagon::XOR_rr_cdnPt;
+ case Hexagon::XOR_rr_cNotPt :
+ return Hexagon::XOR_rr_cdnNotPt;
+
+ case Hexagon::AND_rr_cPt :
+ return Hexagon::AND_rr_cdnPt;
+ case Hexagon::AND_rr_cNotPt :
+ return Hexagon::AND_rr_cdnNotPt;
+
+ case Hexagon::OR_rr_cPt :
+ return Hexagon::OR_rr_cdnPt;
+ case Hexagon::OR_rr_cNotPt :
+ return Hexagon::OR_rr_cdnNotPt;
+
+ // Conditional Subtract
+ case Hexagon::SUB_rr_cPt :
+ return Hexagon::SUB_rr_cdnPt;
+ case Hexagon::SUB_rr_cNotPt :
+ return Hexagon::SUB_rr_cdnNotPt;
+
+ // Conditional combine
+ case Hexagon::COMBINE_rr_cPt :
+ return Hexagon::COMBINE_rr_cdnPt;
+ case Hexagon::COMBINE_rr_cNotPt :
+ return Hexagon::COMBINE_rr_cdnNotPt;
+
+ case Hexagon::ASLH_cPt_V4 :
+ return Hexagon::ASLH_cdnPt_V4;
+ case Hexagon::ASLH_cNotPt_V4 :
+ return Hexagon::ASLH_cdnNotPt_V4;
+
+ case Hexagon::ASRH_cPt_V4 :
+ return Hexagon::ASRH_cdnPt_V4;
+ case Hexagon::ASRH_cNotPt_V4 :
+ return Hexagon::ASRH_cdnNotPt_V4;
+
+ case Hexagon::SXTB_cPt_V4 :
+ return Hexagon::SXTB_cdnPt_V4;
+ case Hexagon::SXTB_cNotPt_V4 :
+ return Hexagon::SXTB_cdnNotPt_V4;
+
+ case Hexagon::SXTH_cPt_V4 :
+ return Hexagon::SXTH_cdnPt_V4;
+ case Hexagon::SXTH_cNotPt_V4 :
+ return Hexagon::SXTH_cdnNotPt_V4;
+
+ case Hexagon::ZXTB_cPt_V4 :
+ return Hexagon::ZXTB_cdnPt_V4;
+ case Hexagon::ZXTB_cNotPt_V4 :
+ return Hexagon::ZXTB_cdnNotPt_V4;
+
+ case Hexagon::ZXTH_cPt_V4 :
+ return Hexagon::ZXTH_cdnPt_V4;
+ case Hexagon::ZXTH_cNotPt_V4 :
+ return Hexagon::ZXTH_cdnNotPt_V4;
+ }
+}
+
+// Returns true if an instruction can be promoted to .new predicate
+// or new-value store.
+bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) {
+ if ( isCondInst(MI) || IsNewifyStore(MI))
+ return true;
+ else
+ return false;
+}
+
+bool HexagonPacketizerList::isCondInst (MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const MCInstrDesc& TID = MI->getDesc();
+ // bug 5670: until that is fixed,
+ // this portion is disabled.
+ if ( TID.isConditionalBranch() // && !IsRegisterJump(MI)) ||
+ || QII->isConditionalTransfer(MI)
+ || QII->isConditionalALU32(MI)
+ || QII->isConditionalLoad(MI)
+ || QII->isConditionalStore(MI)) {
+ return true;
+ }
+ return false;
+}
+
+
+// Promote an instructiont to its .new form.
+// At this time, we have already made a call to CanPromoteToDotNew
+// and made sure that it can *indeed* be promoted.
+bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI,
+ SDep::Kind DepType, MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC) {
+
+ assert (DepType == SDep::Data);
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ int NewOpcode;
+ if (RC == Hexagon::PredRegsRegisterClass)
+ NewOpcode = GetDotNewPredOp(MI->getOpcode());
+ else
+ NewOpcode = GetDotNewOp(MI->getOpcode());
+ MI->setDesc(QII->get(NewOpcode));
+
+ return true;
+}
+
+// Returns the most basic instruction for the .new predicated instructions and
+// new-value stores.
+// For example, all of the following instructions will be converted back to the
+// same instruction:
+// 1) if (p0.new) memw(R0+#0) = R1.new --->
+// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
+// 3) if (p0.new) memw(R0+#0) = R1 --->
+//
+// To understand the translation of instruction 1 to its original form, consider
+// a packet with 3 instructions.
+// { p0 = cmp.eq(R0,R1)
+// if (p0.new) R2 = add(R3, R4)
+// R5 = add (R3, R1)
+// }
+// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
+//
+// This instruction can be part of the previous packet only if both p0 and R2
+// are promoted to .new values. This promotion happens in steps, first
+// predicate register is promoted to .new and in the next iteration R2 is
+// promoted. Therefore, in case of dependence check failure (due to R5) during
+// next iteration, it should be converted back to its most basic form.
+
+static int GetDotOldOp(const int opc) {
+ switch (opc) {
+ default: llvm_unreachable("Unknown .old type");
+
+ case Hexagon::TFR_cdnPt:
+ return Hexagon::TFR_cPt;
+
+ case Hexagon::TFR_cdnNotPt:
+ return Hexagon::TFR_cNotPt;
+
+ case Hexagon::TFRI_cdnPt:
+ return Hexagon::TFRI_cPt;
+
+ case Hexagon::TFRI_cdnNotPt:
+ return Hexagon::TFRI_cNotPt;
+
+ case Hexagon::JMP_cdnPt:
+ return Hexagon::JMP_c;
+
+ case Hexagon::JMP_cdnNotPt:
+ return Hexagon::JMP_cNot;
+
+ case Hexagon::JMPR_cdnPt_V3:
+ return Hexagon::JMPR_cPt;
+
+ case Hexagon::JMPR_cdnNotPt_V3:
+ return Hexagon::JMPR_cNotPt;
+
+ // Load double word
+
+ case Hexagon::LDrid_cdnPt :
+ return Hexagon::LDrid_cPt;
+
+ case Hexagon::LDrid_cdnNotPt :
+ return Hexagon::LDrid_cNotPt;
+
+ case Hexagon::LDrid_indexed_cdnPt :
+ return Hexagon::LDrid_indexed_cPt;
+
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ return Hexagon::LDrid_indexed_cNotPt;
+
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ return Hexagon::POST_LDrid_cPt;
+
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ return Hexagon::POST_LDrid_cNotPt;
+
+ // Load word
+
+ case Hexagon::LDriw_cdnPt :
+ return Hexagon::LDriw_cPt;
+
+ case Hexagon::LDriw_cdnNotPt :
+ return Hexagon::LDriw_cNotPt;
+
+ case Hexagon::LDriw_indexed_cdnPt :
+ return Hexagon::LDriw_indexed_cPt;
+
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ return Hexagon::LDriw_indexed_cNotPt;
+
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ return Hexagon::POST_LDriw_cPt;
+
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ return Hexagon::POST_LDriw_cNotPt;
+
+ // Load half
+
+ case Hexagon::LDrih_cdnPt :
+ return Hexagon::LDrih_cPt;
+
+ case Hexagon::LDrih_cdnNotPt :
+ return Hexagon::LDrih_cNotPt;
+
+ case Hexagon::LDrih_indexed_cdnPt :
+ return Hexagon::LDrih_indexed_cPt;
+
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ return Hexagon::LDrih_indexed_cNotPt;
+
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ return Hexagon::POST_LDrih_cPt;
+
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ return Hexagon::POST_LDrih_cNotPt;
+
+ // Load byte
+
+ case Hexagon::LDrib_cdnPt :
+ return Hexagon::LDrib_cPt;
+
+ case Hexagon::LDrib_cdnNotPt :
+ return Hexagon::LDrib_cNotPt;
+
+ case Hexagon::LDrib_indexed_cdnPt :
+ return Hexagon::LDrib_indexed_cPt;
+
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ return Hexagon::LDrib_indexed_cNotPt;
+
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ return Hexagon::POST_LDrib_cPt;
+
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ return Hexagon::POST_LDrib_cNotPt;
+
+ // Load unsigned half
+
+ case Hexagon::LDriuh_cdnPt :
+ return Hexagon::LDriuh_cPt;
+
+ case Hexagon::LDriuh_cdnNotPt :
+ return Hexagon::LDriuh_cNotPt;
+
+ case Hexagon::LDriuh_indexed_cdnPt :
+ return Hexagon::LDriuh_indexed_cPt;
+
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ return Hexagon::LDriuh_indexed_cNotPt;
+
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ return Hexagon::POST_LDriuh_cPt;
+
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ return Hexagon::POST_LDriuh_cNotPt;
+
+ // Load unsigned byte
+ case Hexagon::LDriub_cdnPt :
+ return Hexagon::LDriub_cPt;
+
+ case Hexagon::LDriub_cdnNotPt :
+ return Hexagon::LDriub_cNotPt;
+
+ case Hexagon::LDriub_indexed_cdnPt :
+ return Hexagon::LDriub_indexed_cPt;
+
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ return Hexagon::LDriub_indexed_cNotPt;
+
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ return Hexagon::POST_LDriub_cPt;
+
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+ return Hexagon::POST_LDriub_cNotPt;
+
+ // V4 indexed+scaled Load
+
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ return Hexagon::LDrid_indexed_cPt_V4;
+
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrid_indexed_cNotPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrid_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ return Hexagon::LDrib_indexed_cPt_V4;
+
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrib_indexed_cNotPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrib_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ return Hexagon::LDriub_indexed_cPt_V4;
+
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriub_indexed_cNotPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriub_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ return Hexagon::LDrih_indexed_cPt_V4;
+
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ return Hexagon::LDrih_indexed_cNotPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cPt_V4;
+
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDrih_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ return Hexagon::LDriuh_indexed_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriuh_indexed_cNotPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
+
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ return Hexagon::LDriw_indexed_cPt_V4;
+
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ return Hexagon::LDriw_indexed_cNotPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cPt_V4;
+
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::LDriw_indexed_shl_cNotPt_V4;
+
+ // V4 global address load
+
+ case Hexagon::LDd_GP_cdnPt_V4:
+ return Hexagon::LDd_GP_cPt_V4;
+
+ case Hexagon::LDd_GP_cdnNotPt_V4:
+ return Hexagon::LDd_GP_cNotPt_V4;
+
+ case Hexagon::LDb_GP_cdnPt_V4:
+ return Hexagon::LDb_GP_cPt_V4;
+
+ case Hexagon::LDb_GP_cdnNotPt_V4:
+ return Hexagon::LDb_GP_cNotPt_V4;
+
+ case Hexagon::LDub_GP_cdnPt_V4:
+ return Hexagon::LDub_GP_cPt_V4;
+
+ case Hexagon::LDub_GP_cdnNotPt_V4:
+ return Hexagon::LDub_GP_cNotPt_V4;
+
+ case Hexagon::LDh_GP_cdnPt_V4:
+ return Hexagon::LDh_GP_cPt_V4;
+
+ case Hexagon::LDh_GP_cdnNotPt_V4:
+ return Hexagon::LDh_GP_cNotPt_V4;
+
+ case Hexagon::LDuh_GP_cdnPt_V4:
+ return Hexagon::LDuh_GP_cPt_V4;
+
+ case Hexagon::LDuh_GP_cdnNotPt_V4:
+ return Hexagon::LDuh_GP_cNotPt_V4;
+
+ case Hexagon::LDw_GP_cdnPt_V4:
+ return Hexagon::LDw_GP_cPt_V4;
+
+ case Hexagon::LDw_GP_cdnNotPt_V4:
+ return Hexagon::LDw_GP_cNotPt_V4;
+
+ case Hexagon::LDrid_GP_cdnPt_V4:
+ return Hexagon::LDrid_GP_cPt_V4;
+
+ case Hexagon::LDrid_GP_cdnNotPt_V4:
+ return Hexagon::LDrid_GP_cNotPt_V4;
+
+ case Hexagon::LDrib_GP_cdnPt_V4:
+ return Hexagon::LDrib_GP_cPt_V4;
+
+ case Hexagon::LDrib_GP_cdnNotPt_V4:
+ return Hexagon::LDrib_GP_cNotPt_V4;
+
+ case Hexagon::LDriub_GP_cdnPt_V4:
+ return Hexagon::LDriub_GP_cPt_V4;
+
+ case Hexagon::LDriub_GP_cdnNotPt_V4:
+ return Hexagon::LDriub_GP_cNotPt_V4;
+
+ case Hexagon::LDrih_GP_cdnPt_V4:
+ return Hexagon::LDrih_GP_cPt_V4;
+
+ case Hexagon::LDrih_GP_cdnNotPt_V4:
+ return Hexagon::LDrih_GP_cNotPt_V4;
+
+ case Hexagon::LDriuh_GP_cdnPt_V4:
+ return Hexagon::LDriuh_GP_cPt_V4;
+
+ case Hexagon::LDriuh_GP_cdnNotPt_V4:
+ return Hexagon::LDriuh_GP_cNotPt_V4;
+
+ case Hexagon::LDriw_GP_cdnPt_V4:
+ return Hexagon::LDriw_GP_cPt_V4;
+
+ case Hexagon::LDriw_GP_cdnNotPt_V4:
+ return Hexagon::LDriw_GP_cNotPt_V4;
+
+ // Conditional add
+
+ case Hexagon::ADD_ri_cdnPt :
+ return Hexagon::ADD_ri_cPt;
+ case Hexagon::ADD_ri_cdnNotPt :
+ return Hexagon::ADD_ri_cNotPt;
+
+ case Hexagon::ADD_rr_cdnPt :
+ return Hexagon::ADD_rr_cPt;
+ case Hexagon::ADD_rr_cdnNotPt:
+ return Hexagon::ADD_rr_cNotPt;
+
+ // Conditional logical Operations
+
+ case Hexagon::XOR_rr_cdnPt :
+ return Hexagon::XOR_rr_cPt;
+ case Hexagon::XOR_rr_cdnNotPt :
+ return Hexagon::XOR_rr_cNotPt;
+
+ case Hexagon::AND_rr_cdnPt :
+ return Hexagon::AND_rr_cPt;
+ case Hexagon::AND_rr_cdnNotPt :
+ return Hexagon::AND_rr_cNotPt;
+
+ case Hexagon::OR_rr_cdnPt :
+ return Hexagon::OR_rr_cPt;
+ case Hexagon::OR_rr_cdnNotPt :
+ return Hexagon::OR_rr_cNotPt;
+
+ // Conditional Subtract
+
+ case Hexagon::SUB_rr_cdnPt :
+ return Hexagon::SUB_rr_cPt;
+ case Hexagon::SUB_rr_cdnNotPt :
+ return Hexagon::SUB_rr_cNotPt;
+
+ // Conditional combine
+
+ case Hexagon::COMBINE_rr_cdnPt :
+ return Hexagon::COMBINE_rr_cPt;
+ case Hexagon::COMBINE_rr_cdnNotPt :
+ return Hexagon::COMBINE_rr_cNotPt;
+
+// Conditional shift operations
+
+ case Hexagon::ASLH_cdnPt_V4 :
+ return Hexagon::ASLH_cPt_V4;
+ case Hexagon::ASLH_cdnNotPt_V4 :
+ return Hexagon::ASLH_cNotPt_V4;
+
+ case Hexagon::ASRH_cdnPt_V4 :
+ return Hexagon::ASRH_cPt_V4;
+ case Hexagon::ASRH_cdnNotPt_V4 :
+ return Hexagon::ASRH_cNotPt_V4;
+
+ case Hexagon::SXTB_cdnPt_V4 :
+ return Hexagon::SXTB_cPt_V4;
+ case Hexagon::SXTB_cdnNotPt_V4 :
+ return Hexagon::SXTB_cNotPt_V4;
+
+ case Hexagon::SXTH_cdnPt_V4 :
+ return Hexagon::SXTH_cPt_V4;
+ case Hexagon::SXTH_cdnNotPt_V4 :
+ return Hexagon::SXTH_cNotPt_V4;
+
+ case Hexagon::ZXTB_cdnPt_V4 :
+ return Hexagon::ZXTB_cPt_V4;
+ case Hexagon::ZXTB_cdnNotPt_V4 :
+ return Hexagon::ZXTB_cNotPt_V4;
+
+ case Hexagon::ZXTH_cdnPt_V4 :
+ return Hexagon::ZXTH_cPt_V4;
+ case Hexagon::ZXTH_cdnNotPt_V4 :
+ return Hexagon::ZXTH_cNotPt_V4;
+
+ // Store byte
+
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ return Hexagon::STrib_imm_cPt_V4;
+
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ return Hexagon::STrib_imm_cNotPt_V4;
+
+ case Hexagon::STrib_cdnPt_nv_V4 :
+ case Hexagon::STrib_cPt_nv_V4 :
+ case Hexagon::STrib_cdnPt_V4 :
+ return Hexagon::STrib_cPt;
+
+ case Hexagon::STrib_cdnNotPt_nv_V4 :
+ case Hexagon::STrib_cNotPt_nv_V4 :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ return Hexagon::STrib_cNotPt;
+
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cPt_nv_V4 :
+ case Hexagon::STrib_indexed_cdnPt_nv_V4 :
+ return Hexagon::STrib_indexed_cPt;
+
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cNotPt_nv_V4 :
+ case Hexagon::STrib_indexed_cdnNotPt_nv_V4 :
+ return Hexagon::STrib_indexed_cNotPt;
+
+ case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrib_indexed_shl_cPt_V4;
+
+ case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
+ case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrib_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STbri_cdnPt_nv_V4 :
+ case Hexagon::POST_STbri_cPt_nv_V4 :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ return Hexagon::POST_STbri_cPt;
+
+ case Hexagon::POST_STbri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_STbri_cNotPt_nv_V4:
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ return Hexagon::POST_STbri_cNotPt;
+
+ case Hexagon::STb_GP_cdnPt_nv_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cPt_nv_V4:
+ return Hexagon::STb_GP_cPt_V4;
+
+ case Hexagon::STb_GP_cdnNotPt_nv_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STb_GP_cNotPt_nv_V4:
+ return Hexagon::STb_GP_cNotPt_V4;
+
+ case Hexagon::STrib_GP_cdnPt_nv_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cPt_nv_V4:
+ return Hexagon::STrib_GP_cPt_V4;
+
+ case Hexagon::STrib_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cNotPt_nv_V4:
+ return Hexagon::STrib_GP_cNotPt_V4;
+
+ // Store new-value byte - unconditional
+ case Hexagon::STrib_nv_V4:
+ return Hexagon::STrib;
+
+ case Hexagon::STrib_indexed_nv_V4:
+ return Hexagon::STrib_indexed;
+
+ case Hexagon::STrib_indexed_shl_nv_V4:
+ return Hexagon::STrib_indexed_shl_V4;
+
+ case Hexagon::STrib_shl_nv_V4:
+ return Hexagon::STrib_shl_V4;
+
+ case Hexagon::STrib_GP_nv_V4:
+ return Hexagon::STrib_GP_V4;
+
+ case Hexagon::STb_GP_nv_V4:
+ return Hexagon::STb_GP_V4;
+
+ case Hexagon::POST_STbri_nv_V4:
+ return Hexagon::POST_STbri;
+
+ // Store halfword
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ return Hexagon::STrih_imm_cPt_V4;
+
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ return Hexagon::STrih_imm_cNotPt_V4;
+
+ case Hexagon::STrih_cdnPt_nv_V4 :
+ case Hexagon::STrih_cPt_nv_V4 :
+ case Hexagon::STrih_cdnPt_V4 :
+ return Hexagon::STrih_cPt;
+
+ case Hexagon::STrih_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_cNotPt_nv_V4 :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ return Hexagon::STrih_cNotPt;
+
+ case Hexagon::STrih_indexed_cdnPt_nv_V4:
+ case Hexagon::STrih_indexed_cPt_nv_V4 :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ return Hexagon::STrih_indexed_cPt;
+
+ case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
+ case Hexagon::STrih_indexed_cNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ return Hexagon::STrih_indexed_cNotPt;
+
+ case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrih_indexed_shl_cPt_V4;
+
+ case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrih_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_SThri_cdnPt_nv_V4 :
+ case Hexagon::POST_SThri_cPt_nv_V4 :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ return Hexagon::POST_SThri_cPt;
+
+ case Hexagon::POST_SThri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_SThri_cNotPt_nv_V4 :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+ return Hexagon::POST_SThri_cNotPt;
+
+ case Hexagon::STh_GP_cdnPt_nv_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cPt_nv_V4:
+ return Hexagon::STh_GP_cPt_V4;
+
+ case Hexagon::STh_GP_cdnNotPt_nv_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STh_GP_cNotPt_nv_V4:
+ return Hexagon::STh_GP_cNotPt_V4;
+
+ case Hexagon::STrih_GP_cdnPt_nv_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cPt_nv_V4:
+ return Hexagon::STrih_GP_cPt_V4;
+
+ case Hexagon::STrih_GP_cdnNotPt_nv_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cNotPt_nv_V4:
+ return Hexagon::STrih_GP_cNotPt_V4;
+
+ // Store new-value halfword - unconditional
+
+ case Hexagon::STrih_nv_V4:
+ return Hexagon::STrih;
+
+ case Hexagon::STrih_indexed_nv_V4:
+ return Hexagon::STrih_indexed;
+
+ case Hexagon::STrih_indexed_shl_nv_V4:
+ return Hexagon::STrih_indexed_shl_V4;
+
+ case Hexagon::STrih_shl_nv_V4:
+ return Hexagon::STrih_shl_V4;
+
+ case Hexagon::STrih_GP_nv_V4:
+ return Hexagon::STrih_GP_V4;
+
+ case Hexagon::STh_GP_nv_V4:
+ return Hexagon::STh_GP_V4;
+
+ case Hexagon::POST_SThri_nv_V4:
+ return Hexagon::POST_SThri;
+
+ // Store word
+
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ return Hexagon::STriw_imm_cPt_V4;
+
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ return Hexagon::STriw_imm_cNotPt_V4;
+
+ case Hexagon::STriw_cdnPt_nv_V4 :
+ case Hexagon::STriw_cPt_nv_V4 :
+ case Hexagon::STriw_cdnPt_V4 :
+ return Hexagon::STriw_cPt;
+
+ case Hexagon::STriw_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_cNotPt_nv_V4 :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ return Hexagon::STriw_cNotPt;
+
+ case Hexagon::STriw_indexed_cdnPt_nv_V4 :
+ case Hexagon::STriw_indexed_cPt_nv_V4 :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ return Hexagon::STriw_indexed_cPt;
+
+ case Hexagon::STriw_indexed_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_cNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ return Hexagon::STriw_indexed_cNotPt;
+
+ case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ return Hexagon::STriw_indexed_shl_cPt_V4;
+
+ case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STriw_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STwri_cdnPt_nv_V4 :
+ case Hexagon::POST_STwri_cPt_nv_V4 :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ return Hexagon::POST_STwri_cPt;
+
+ case Hexagon::POST_STwri_cdnNotPt_nv_V4 :
+ case Hexagon::POST_STwri_cNotPt_nv_V4 :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+ return Hexagon::POST_STwri_cNotPt;
+
+ case Hexagon::STw_GP_cdnPt_nv_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cPt_nv_V4:
+ return Hexagon::STw_GP_cPt_V4;
+
+ case Hexagon::STw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+ case Hexagon::STw_GP_cNotPt_nv_V4:
+ return Hexagon::STw_GP_cNotPt_V4;
+
+ case Hexagon::STriw_GP_cdnPt_nv_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cPt_nv_V4:
+ return Hexagon::STriw_GP_cPt_V4;
+
+ case Hexagon::STriw_GP_cdnNotPt_nv_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cNotPt_nv_V4:
+ return Hexagon::STriw_GP_cNotPt_V4;
+
+ // Store new-value word - unconditional
+
+ case Hexagon::STriw_nv_V4:
+ return Hexagon::STriw;
+
+ case Hexagon::STriw_indexed_nv_V4:
+ return Hexagon::STriw_indexed;
+
+ case Hexagon::STriw_indexed_shl_nv_V4:
+ return Hexagon::STriw_indexed_shl_V4;
+
+ case Hexagon::STriw_shl_nv_V4:
+ return Hexagon::STriw_shl_V4;
+
+ case Hexagon::STriw_GP_nv_V4:
+ return Hexagon::STriw_GP_V4;
+
+ case Hexagon::STw_GP_nv_V4:
+ return Hexagon::STw_GP_V4;
+
+ case Hexagon::POST_STwri_nv_V4:
+ return Hexagon::POST_STwri;
+
+ // Store doubleword
+
+ case Hexagon::STrid_cdnPt_V4 :
+ return Hexagon::STrid_cPt;
+
+ case Hexagon::STrid_cdnNotPt_V4 :
+ return Hexagon::STrid_cNotPt;
+
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ return Hexagon::STrid_indexed_cPt;
+
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ return Hexagon::STrid_indexed_cNotPt;
+
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ return Hexagon::STrid_indexed_shl_cPt_V4;
+
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ return Hexagon::STrid_indexed_shl_cNotPt_V4;
+
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ return Hexagon::POST_STdri_cPt;
+
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+ return Hexagon::POST_STdri_cNotPt;
+
+ case Hexagon::STd_GP_cdnPt_V4 :
+ return Hexagon::STd_GP_cPt_V4;
+
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ return Hexagon::STd_GP_cNotPt_V4;
+
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ return Hexagon::STrid_GP_cPt_V4;
+
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ return Hexagon::STrid_GP_cNotPt_V4;
+ }
+}
+
+bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ int NewOpcode = GetDotOldOp(MI->getOpcode());
+ MI->setDesc(QII->get(NewOpcode));
+ return true;
+}
+
+// Returns true if an instruction is predicated on p0 and false if it's
+// predicated on !p0.
+
+static bool GetPredicateSense(MachineInstr* MI,
+ const HexagonInstrInfo *QII) {
+
+ switch (MI->getOpcode()) {
+ case Hexagon::TFR_cPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFRI_cPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::STrib_cPt :
+ case Hexagon::STrib_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cPt :
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_shl_cPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STbri_cPt :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ case Hexagon::STrih_cPt :
+ case Hexagon::STrih_cdnPt_V4 :
+ case Hexagon::STrih_indexed_cPt :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ case Hexagon::STrih_indexed_shl_cPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_SThri_cPt :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ case Hexagon::STriw_cPt :
+ case Hexagon::STriw_cdnPt_V4 :
+ case Hexagon::STriw_indexed_cPt :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ case Hexagon::STriw_indexed_shl_cPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STwri_cPt :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ case Hexagon::STrib_imm_cPt_V4 :
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ case Hexagon::STrid_cPt :
+ case Hexagon::STrid_cdnPt_V4 :
+ case Hexagon::STrid_indexed_cPt :
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ case Hexagon::STrid_indexed_shl_cPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::POST_STdri_cPt :
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ case Hexagon::STrih_imm_cPt_V4 :
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ case Hexagon::STriw_imm_cPt_V4 :
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ case Hexagon::JMP_cdnPt :
+ case Hexagon::LDrid_cPt :
+ case Hexagon::LDrid_cdnPt :
+ case Hexagon::LDrid_indexed_cPt :
+ case Hexagon::LDrid_indexed_cdnPt :
+ case Hexagon::POST_LDrid_cPt :
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ case Hexagon::LDriw_cPt :
+ case Hexagon::LDriw_cdnPt :
+ case Hexagon::LDriw_indexed_cPt :
+ case Hexagon::LDriw_indexed_cdnPt :
+ case Hexagon::POST_LDriw_cPt :
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ case Hexagon::LDrih_cPt :
+ case Hexagon::LDrih_cdnPt :
+ case Hexagon::LDrih_indexed_cPt :
+ case Hexagon::LDrih_indexed_cdnPt :
+ case Hexagon::POST_LDrih_cPt :
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ case Hexagon::LDrib_cPt :
+ case Hexagon::LDrib_cdnPt :
+ case Hexagon::LDrib_indexed_cPt :
+ case Hexagon::LDrib_indexed_cdnPt :
+ case Hexagon::POST_LDrib_cPt :
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ case Hexagon::LDriuh_cPt :
+ case Hexagon::LDriuh_cdnPt :
+ case Hexagon::LDriuh_indexed_cPt :
+ case Hexagon::LDriuh_indexed_cdnPt :
+ case Hexagon::POST_LDriuh_cPt :
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ case Hexagon::LDriub_cPt :
+ case Hexagon::LDriub_cdnPt :
+ case Hexagon::LDriub_indexed_cPt :
+ case Hexagon::LDriub_indexed_cdnPt :
+ case Hexagon::POST_LDriub_cPt :
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_cPt_V4 :
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_cPt_V4 :
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_cPt_V4 :
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_cPt_V4 :
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_cPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_cPt_V4 :
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::ADD_ri_cPt :
+ case Hexagon::ADD_ri_cdnPt :
+ case Hexagon::ADD_rr_cPt :
+ case Hexagon::ADD_rr_cdnPt :
+ case Hexagon::XOR_rr_cPt :
+ case Hexagon::XOR_rr_cdnPt :
+ case Hexagon::AND_rr_cPt :
+ case Hexagon::AND_rr_cdnPt :
+ case Hexagon::OR_rr_cPt :
+ case Hexagon::OR_rr_cdnPt :
+ case Hexagon::SUB_rr_cPt :
+ case Hexagon::SUB_rr_cdnPt :
+ case Hexagon::COMBINE_rr_cPt :
+ case Hexagon::COMBINE_rr_cdnPt :
+ case Hexagon::ASLH_cPt_V4 :
+ case Hexagon::ASLH_cdnPt_V4 :
+ case Hexagon::ASRH_cPt_V4 :
+ case Hexagon::ASRH_cdnPt_V4 :
+ case Hexagon::SXTB_cPt_V4 :
+ case Hexagon::SXTB_cdnPt_V4 :
+ case Hexagon::SXTH_cPt_V4 :
+ case Hexagon::SXTH_cdnPt_V4 :
+ case Hexagon::ZXTB_cPt_V4 :
+ case Hexagon::ZXTB_cdnPt_V4 :
+ case Hexagon::ZXTH_cPt_V4 :
+ case Hexagon::ZXTH_cdnPt_V4 :
+ case Hexagon::LDrid_GP_cPt_V4 :
+ case Hexagon::LDrib_GP_cPt_V4 :
+ case Hexagon::LDriub_GP_cPt_V4 :
+ case Hexagon::LDrih_GP_cPt_V4 :
+ case Hexagon::LDriuh_GP_cPt_V4 :
+ case Hexagon::LDriw_GP_cPt_V4 :
+ case Hexagon::LDd_GP_cPt_V4 :
+ case Hexagon::LDb_GP_cPt_V4 :
+ case Hexagon::LDub_GP_cPt_V4 :
+ case Hexagon::LDh_GP_cPt_V4 :
+ case Hexagon::LDuh_GP_cPt_V4 :
+ case Hexagon::LDw_GP_cPt_V4 :
+ case Hexagon::STrid_GP_cPt_V4 :
+ case Hexagon::STrib_GP_cPt_V4 :
+ case Hexagon::STrih_GP_cPt_V4 :
+ case Hexagon::STriw_GP_cPt_V4 :
+ case Hexagon::STd_GP_cPt_V4 :
+ case Hexagon::STb_GP_cPt_V4 :
+ case Hexagon::STh_GP_cPt_V4 :
+ case Hexagon::STw_GP_cPt_V4 :
+ case Hexagon::LDrid_GP_cdnPt_V4 :
+ case Hexagon::LDrib_GP_cdnPt_V4 :
+ case Hexagon::LDriub_GP_cdnPt_V4 :
+ case Hexagon::LDrih_GP_cdnPt_V4 :
+ case Hexagon::LDriuh_GP_cdnPt_V4 :
+ case Hexagon::LDriw_GP_cdnPt_V4 :
+ case Hexagon::LDd_GP_cdnPt_V4 :
+ case Hexagon::LDb_GP_cdnPt_V4 :
+ case Hexagon::LDub_GP_cdnPt_V4 :
+ case Hexagon::LDh_GP_cdnPt_V4 :
+ case Hexagon::LDuh_GP_cdnPt_V4 :
+ case Hexagon::LDw_GP_cdnPt_V4 :
+ case Hexagon::STrid_GP_cdnPt_V4 :
+ case Hexagon::STrib_GP_cdnPt_V4 :
+ case Hexagon::STrih_GP_cdnPt_V4 :
+ case Hexagon::STriw_GP_cdnPt_V4 :
+ case Hexagon::STd_GP_cdnPt_V4 :
+ case Hexagon::STb_GP_cdnPt_V4 :
+ case Hexagon::STh_GP_cdnPt_V4 :
+ case Hexagon::STw_GP_cdnPt_V4 :
+ return true;
+
+ case Hexagon::TFR_cNotPt:
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFRI_cNotPt:
+ case Hexagon::TFRI_cdnNotPt:
+ case Hexagon::STrib_cNotPt :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cNotPt :
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STbri_cNotPt :
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ case Hexagon::STrih_cNotPt :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_cNotPt :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_SThri_cNotPt :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+ case Hexagon::STriw_cNotPt :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_cNotPt :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STwri_cNotPt :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+ case Hexagon::STrib_imm_cNotPt_V4 :
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ case Hexagon::STrid_cNotPt :
+ case Hexagon::STrid_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_cNotPt :
+ case Hexagon::STrid_indexed_shl_cNotPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STdri_cNotPt :
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+ case Hexagon::STrih_imm_cNotPt_V4 :
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ case Hexagon::STriw_imm_cNotPt_V4 :
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ case Hexagon::JMP_cdnNotPt :
+ case Hexagon::LDrid_cNotPt :
+ case Hexagon::LDrid_cdnNotPt :
+ case Hexagon::LDrid_indexed_cNotPt :
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ case Hexagon::POST_LDrid_cNotPt :
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ case Hexagon::LDriw_cNotPt :
+ case Hexagon::LDriw_cdnNotPt :
+ case Hexagon::LDriw_indexed_cNotPt :
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ case Hexagon::POST_LDriw_cNotPt :
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ case Hexagon::LDrih_cNotPt :
+ case Hexagon::LDrih_cdnNotPt :
+ case Hexagon::LDrih_indexed_cNotPt :
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ case Hexagon::POST_LDrih_cNotPt :
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ case Hexagon::LDrib_cNotPt :
+ case Hexagon::LDrib_cdnNotPt :
+ case Hexagon::LDrib_indexed_cNotPt :
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ case Hexagon::POST_LDrib_cNotPt :
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ case Hexagon::LDriuh_cNotPt :
+ case Hexagon::LDriuh_cdnNotPt :
+ case Hexagon::LDriuh_indexed_cNotPt :
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ case Hexagon::POST_LDriuh_cNotPt :
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ case Hexagon::LDriub_cNotPt :
+ case Hexagon::LDriub_cdnNotPt :
+ case Hexagon::LDriub_indexed_cNotPt :
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ case Hexagon::POST_LDriub_cNotPt :
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_cNotPt_V4 :
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::ADD_ri_cNotPt :
+ case Hexagon::ADD_ri_cdnNotPt :
+ case Hexagon::ADD_rr_cNotPt :
+ case Hexagon::ADD_rr_cdnNotPt :
+ case Hexagon::XOR_rr_cNotPt :
+ case Hexagon::XOR_rr_cdnNotPt :
+ case Hexagon::AND_rr_cNotPt :
+ case Hexagon::AND_rr_cdnNotPt :
+ case Hexagon::OR_rr_cNotPt :
+ case Hexagon::OR_rr_cdnNotPt :
+ case Hexagon::SUB_rr_cNotPt :
+ case Hexagon::SUB_rr_cdnNotPt :
+ case Hexagon::COMBINE_rr_cNotPt :
+ case Hexagon::COMBINE_rr_cdnNotPt :
+ case Hexagon::ASLH_cNotPt_V4 :
+ case Hexagon::ASLH_cdnNotPt_V4 :
+ case Hexagon::ASRH_cNotPt_V4 :
+ case Hexagon::ASRH_cdnNotPt_V4 :
+ case Hexagon::SXTB_cNotPt_V4 :
+ case Hexagon::SXTB_cdnNotPt_V4 :
+ case Hexagon::SXTH_cNotPt_V4 :
+ case Hexagon::SXTH_cdnNotPt_V4 :
+ case Hexagon::ZXTB_cNotPt_V4 :
+ case Hexagon::ZXTB_cdnNotPt_V4 :
+ case Hexagon::ZXTH_cNotPt_V4 :
+ case Hexagon::ZXTH_cdnNotPt_V4 :
+
+ case Hexagon::LDrid_GP_cNotPt_V4 :
+ case Hexagon::LDrib_GP_cNotPt_V4 :
+ case Hexagon::LDriub_GP_cNotPt_V4 :
+ case Hexagon::LDrih_GP_cNotPt_V4 :
+ case Hexagon::LDriuh_GP_cNotPt_V4 :
+ case Hexagon::LDriw_GP_cNotPt_V4 :
+ case Hexagon::LDd_GP_cNotPt_V4 :
+ case Hexagon::LDb_GP_cNotPt_V4 :
+ case Hexagon::LDub_GP_cNotPt_V4 :
+ case Hexagon::LDh_GP_cNotPt_V4 :
+ case Hexagon::LDuh_GP_cNotPt_V4 :
+ case Hexagon::LDw_GP_cNotPt_V4 :
+ case Hexagon::STrid_GP_cNotPt_V4 :
+ case Hexagon::STrib_GP_cNotPt_V4 :
+ case Hexagon::STrih_GP_cNotPt_V4 :
+ case Hexagon::STriw_GP_cNotPt_V4 :
+ case Hexagon::STd_GP_cNotPt_V4 :
+ case Hexagon::STb_GP_cNotPt_V4 :
+ case Hexagon::STh_GP_cNotPt_V4 :
+ case Hexagon::STw_GP_cNotPt_V4 :
+ case Hexagon::LDrid_GP_cdnNotPt_V4 :
+ case Hexagon::LDrib_GP_cdnNotPt_V4 :
+ case Hexagon::LDriub_GP_cdnNotPt_V4 :
+ case Hexagon::LDrih_GP_cdnNotPt_V4 :
+ case Hexagon::LDriuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDriw_GP_cdnNotPt_V4 :
+ case Hexagon::LDd_GP_cdnNotPt_V4 :
+ case Hexagon::LDb_GP_cdnNotPt_V4 :
+ case Hexagon::LDub_GP_cdnNotPt_V4 :
+ case Hexagon::LDh_GP_cdnNotPt_V4 :
+ case Hexagon::LDuh_GP_cdnNotPt_V4 :
+ case Hexagon::LDw_GP_cdnNotPt_V4 :
+ case Hexagon::STrid_GP_cdnNotPt_V4 :
+ case Hexagon::STrib_GP_cdnNotPt_V4 :
+ case Hexagon::STrih_GP_cdnNotPt_V4 :
+ case Hexagon::STriw_GP_cdnNotPt_V4 :
+ case Hexagon::STd_GP_cdnNotPt_V4 :
+ case Hexagon::STb_GP_cdnNotPt_V4 :
+ case Hexagon::STh_GP_cdnNotPt_V4 :
+ case Hexagon::STw_GP_cdnNotPt_V4 :
+ return false;
+
+ default:
+ assert (false && "Unknown predicate sense of the instruction");
+ }
+ // return *some value* to avoid compiler warning
+ return false;
+}
+
+bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) {
+ if (isNewValueInst(MI))
+ return true;
+
+ switch (MI->getOpcode()) {
+ case Hexagon::TFR_cdnNotPt:
+ case Hexagon::TFR_cdnPt:
+ case Hexagon::TFRI_cdnNotPt:
+ case Hexagon::TFRI_cdnPt:
+ case Hexagon::LDrid_cdnPt :
+ case Hexagon::LDrid_cdnNotPt :
+ case Hexagon::LDrid_indexed_cdnPt :
+ case Hexagon::LDrid_indexed_cdnNotPt :
+ case Hexagon::POST_LDrid_cdnPt_V4 :
+ case Hexagon::POST_LDrid_cdnNotPt_V4 :
+ case Hexagon::LDriw_cdnPt :
+ case Hexagon::LDriw_cdnNotPt :
+ case Hexagon::LDriw_indexed_cdnPt :
+ case Hexagon::LDriw_indexed_cdnNotPt :
+ case Hexagon::POST_LDriw_cdnPt_V4 :
+ case Hexagon::POST_LDriw_cdnNotPt_V4 :
+ case Hexagon::LDrih_cdnPt :
+ case Hexagon::LDrih_cdnNotPt :
+ case Hexagon::LDrih_indexed_cdnPt :
+ case Hexagon::LDrih_indexed_cdnNotPt :
+ case Hexagon::POST_LDrih_cdnPt_V4 :
+ case Hexagon::POST_LDrih_cdnNotPt_V4 :
+ case Hexagon::LDrib_cdnPt :
+ case Hexagon::LDrib_cdnNotPt :
+ case Hexagon::LDrib_indexed_cdnPt :
+ case Hexagon::LDrib_indexed_cdnNotPt :
+ case Hexagon::POST_LDrib_cdnPt_V4 :
+ case Hexagon::POST_LDrib_cdnNotPt_V4 :
+ case Hexagon::LDriuh_cdnPt :
+ case Hexagon::LDriuh_cdnNotPt :
+ case Hexagon::LDriuh_indexed_cdnPt :
+ case Hexagon::LDriuh_indexed_cdnNotPt :
+ case Hexagon::POST_LDriuh_cdnPt_V4 :
+ case Hexagon::POST_LDriuh_cdnNotPt_V4 :
+ case Hexagon::LDriub_cdnPt :
+ case Hexagon::LDriub_cdnNotPt :
+ case Hexagon::LDriub_indexed_cdnPt :
+ case Hexagon::LDriub_indexed_cdnNotPt :
+ case Hexagon::POST_LDriub_cdnPt_V4 :
+ case Hexagon::POST_LDriub_cdnNotPt_V4 :
+
+ case Hexagon::LDrid_indexed_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_cdnNotPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_cdnNotPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
+
+// Coditional add
+ case Hexagon::ADD_ri_cdnPt:
+ case Hexagon::ADD_ri_cdnNotPt:
+ case Hexagon::ADD_rr_cdnPt:
+ case Hexagon::ADD_rr_cdnNotPt:
+
+ // Conditional logical operations
+ case Hexagon::XOR_rr_cdnPt :
+ case Hexagon::XOR_rr_cdnNotPt :
+ case Hexagon::AND_rr_cdnPt :
+ case Hexagon::AND_rr_cdnNotPt :
+ case Hexagon::OR_rr_cdnPt :
+ case Hexagon::OR_rr_cdnNotPt :
+
+ // Conditonal subtract
+ case Hexagon::SUB_rr_cdnPt :
+ case Hexagon::SUB_rr_cdnNotPt :
+
+ // Conditional combine
+ case Hexagon::COMBINE_rr_cdnPt :
+ case Hexagon::COMBINE_rr_cdnNotPt :
+
+ // Conditional shift operations
+ case Hexagon::ASLH_cdnPt_V4:
+ case Hexagon::ASLH_cdnNotPt_V4:
+ case Hexagon::ASRH_cdnPt_V4:
+ case Hexagon::ASRH_cdnNotPt_V4:
+ case Hexagon::SXTB_cdnPt_V4:
+ case Hexagon::SXTB_cdnNotPt_V4:
+ case Hexagon::SXTH_cdnPt_V4:
+ case Hexagon::SXTH_cdnNotPt_V4:
+ case Hexagon::ZXTB_cdnPt_V4:
+ case Hexagon::ZXTB_cdnNotPt_V4:
+ case Hexagon::ZXTH_cdnPt_V4:
+ case Hexagon::ZXTH_cdnNotPt_V4:
+
+ // Conditional stores
+ case Hexagon::STrib_imm_cdnPt_V4 :
+ case Hexagon::STrib_imm_cdnNotPt_V4 :
+ case Hexagon::STrib_cdnPt_V4 :
+ case Hexagon::STrib_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_cdnPt_V4 :
+ case Hexagon::STrib_indexed_cdnNotPt_V4 :
+ case Hexagon::POST_STbri_cdnPt_V4 :
+ case Hexagon::POST_STbri_cdnNotPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
+
+ // Store doubleword conditionally
+ case Hexagon::STrid_indexed_cdnPt_V4 :
+ case Hexagon::STrid_indexed_cdnNotPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STdri_cdnPt_V4 :
+ case Hexagon::POST_STdri_cdnNotPt_V4 :
+
+ // Store halfword conditionally
+ case Hexagon::STrih_cdnPt_V4 :
+ case Hexagon::STrih_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_cdnPt_V4 :
+ case Hexagon::STrih_indexed_cdnNotPt_V4 :
+ case Hexagon::STrih_imm_cdnPt_V4 :
+ case Hexagon::STrih_imm_cdnNotPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnPt_V4 :
+ case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_SThri_cdnPt_V4 :
+ case Hexagon::POST_SThri_cdnNotPt_V4 :
+
+ // Store word conditionally
+ case Hexagon::STriw_cdnPt_V4 :
+ case Hexagon::STriw_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_cdnPt_V4 :
+ case Hexagon::STriw_indexed_cdnNotPt_V4 :
+ case Hexagon::STriw_imm_cdnPt_V4 :
+ case Hexagon::STriw_imm_cdnNotPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnPt_V4 :
+ case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
+ case Hexagon::POST_STwri_cdnPt_V4 :
+ case Hexagon::POST_STwri_cdnNotPt_V4 :
+
+ case Hexagon::LDd_GP_cdnPt_V4:
+ case Hexagon::LDd_GP_cdnNotPt_V4:
+ case Hexagon::LDb_GP_cdnPt_V4:
+ case Hexagon::LDb_GP_cdnNotPt_V4:
+ case Hexagon::LDub_GP_cdnPt_V4:
+ case Hexagon::LDub_GP_cdnNotPt_V4:
+ case Hexagon::LDh_GP_cdnPt_V4:
+ case Hexagon::LDh_GP_cdnNotPt_V4:
+ case Hexagon::LDuh_GP_cdnPt_V4:
+ case Hexagon::LDuh_GP_cdnNotPt_V4:
+ case Hexagon::LDw_GP_cdnPt_V4:
+ case Hexagon::LDw_GP_cdnNotPt_V4:
+ case Hexagon::LDrid_GP_cdnPt_V4:
+ case Hexagon::LDrid_GP_cdnNotPt_V4:
+ case Hexagon::LDrib_GP_cdnPt_V4:
+ case Hexagon::LDrib_GP_cdnNotPt_V4:
+ case Hexagon::LDriub_GP_cdnPt_V4:
+ case Hexagon::LDriub_GP_cdnNotPt_V4:
+ case Hexagon::LDrih_GP_cdnPt_V4:
+ case Hexagon::LDrih_GP_cdnNotPt_V4:
+ case Hexagon::LDriuh_GP_cdnPt_V4:
+ case Hexagon::LDriuh_GP_cdnNotPt_V4:
+ case Hexagon::LDriw_GP_cdnPt_V4:
+ case Hexagon::LDriw_GP_cdnNotPt_V4:
+
+ case Hexagon::STrid_GP_cdnPt_V4:
+ case Hexagon::STrid_GP_cdnNotPt_V4:
+ case Hexagon::STrib_GP_cdnPt_V4:
+ case Hexagon::STrib_GP_cdnNotPt_V4:
+ case Hexagon::STrih_GP_cdnPt_V4:
+ case Hexagon::STrih_GP_cdnNotPt_V4:
+ case Hexagon::STriw_GP_cdnPt_V4:
+ case Hexagon::STriw_GP_cdnNotPt_V4:
+ case Hexagon::STd_GP_cdnPt_V4:
+ case Hexagon::STd_GP_cdnNotPt_V4:
+ case Hexagon::STb_GP_cdnPt_V4:
+ case Hexagon::STb_GP_cdnNotPt_V4:
+ case Hexagon::STh_GP_cdnPt_V4:
+ case Hexagon::STh_GP_cdnNotPt_V4:
+ case Hexagon::STw_GP_cdnPt_V4:
+ case Hexagon::STw_GP_cdnNotPt_V4:
+
+ return true;
+ }
+ return false;
+}
+
+static MachineOperand& GetPostIncrementOperand(MachineInstr *MI,
+ const HexagonInstrInfo *QII) {
+ assert(QII->isPostIncrement(MI) && "Not a post increment operation.");
+#ifndef NDEBUG
+ // Post Increment means duplicates. Use dense map to find duplicates in the
+ // list. Caution: Densemap initializes with the minimum of 64 buckets,
+ // whereas there are at most 5 operands in the post increment.
+ DenseMap<unsigned, unsigned> DefRegsSet;
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++)
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).isDef()) {
+ DefRegsSet[MI->getOperand(opNum).getReg()] = 1;
+ }
+
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++)
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).isUse()) {
+ if (DefRegsSet[MI->getOperand(opNum).getReg()]) {
+ return MI->getOperand(opNum);
+ }
+ }
+#else
+ if (MI->getDesc().mayLoad()) {
+ // The 2nd operand is always the post increment operand in load.
+ assert(MI->getOperand(1).isReg() &&
+ "Post increment operand has be to a register.");
+ return (MI->getOperand(1));
+ }
+ if (MI->getDesc().mayStore()) {
+ // The 1st operand is always the post increment operand in store.
+ assert(MI->getOperand(0).isReg() &&
+ "Post increment operand has be to a register.");
+ return (MI->getOperand(0));
+ }
+#endif
+ // we should never come here.
+ llvm_unreachable("mayLoad or mayStore not set for Post Increment operation");
+}
+
+// get the value being stored
+static MachineOperand& GetStoreValueOperand(MachineInstr *MI) {
+ // value being stored is always the last operand.
+ return (MI->getOperand(MI->getNumOperands()-1));
+}
+
+// can be new value store?
+// Following restrictions are to be respected in convert a store into
+// a new value store.
+// 1. If an instruction uses auto-increment, its address register cannot
+// be a new-value register. Arch Spec 5.4.2.1
+// 2. If an instruction uses absolute-set addressing mode,
+// its address register cannot be a new-value register.
+// Arch Spec 5.4.2.1.TODO: This is not enabled as
+// as absolute-set address mode patters are not implemented.
+// 3. If an instruction produces a 64-bit result, its registers cannot be used
+// as new-value registers. Arch Spec 5.4.2.2.
+// 4. If the instruction that sets a new-value register is conditional, then
+// the instruction that uses the new-value register must also be conditional,
+// and both must always have their predicates evaluate identically.
+// Arch Spec 5.4.2.3.
+// 5. There is an implied restriction of a packet can not have another store,
+// if there is a new value store in the packet. Corollary, if there is
+// already a store in a packet, there can not be a new value store.
+// Arch Spec: 3.4.4.2
+bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
+ MachineInstr *PacketMI, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit)
+{
+ // Make sure we are looking at the store
+ if (!IsNewifyStore(MI))
+ return false;
+
+ // Make sure there is dependency and can be new value'ed
+ if (GetStoreValueOperand(MI).isReg() &&
+ GetStoreValueOperand(MI).getReg() != DepReg)
+ return false;
+
+ const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ const MCInstrDesc& MCID = PacketMI->getDesc();
+ // first operand is always the result
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI);
+
+ // if there is already an store in the packet, no can do new value store
+ // Arch Spec 3.4.4.2.
+ for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE); ++VI) {
+ SUnit* PacketSU = MIToSUnit[*VI];
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ // if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME,
+ // then we don't need this
+ PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::DEALLOCFRAME)
+ return false;
+ }
+
+ if (PacketRC == Hexagon::DoubleRegsRegisterClass) {
+ // new value store constraint: double regs can not feed into new value store
+ // arch spec section: 5.4.2.2
+ return false;
+ }
+
+ // Make sure it's NOT the post increment register that we are going to
+ // new value.
+ if (QII->isPostIncrement(MI) &&
+ MI->getDesc().mayStore() &&
+ GetPostIncrementOperand(MI, QII).getReg() == DepReg) {
+ return false;
+ }
+
+ if (QII->isPostIncrement(PacketMI) &&
+ PacketMI->getDesc().mayLoad() &&
+ GetPostIncrementOperand(PacketMI, QII).getReg() == DepReg) {
+ // if source is post_inc, or absolute-set addressing,
+ // it can not feed into new value store
+ // r3 = memw(r2++#4)
+ // memw(r30 + #-1404) = r2.new -> can not be new value store
+ // arch spec section: 5.4.2.1
+ return false;
+ }
+
+ // If the source that feeds the store is predicated, new value store must also be
+ // also predicated.
+ if (QII->isPredicated(PacketMI)) {
+ if (!QII->isPredicated(MI))
+ return false;
+
+ // Check to make sure that they both will have their predicates
+ // evaluate identically
+ unsigned predRegNumSrc;
+ unsigned predRegNumDst;
+ const TargetRegisterClass* predRegClass;
+
+ // Get predicate register used in the source instruction
+ for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) {
+ if ( PacketMI->getOperand(opNum).isReg())
+ predRegNumSrc = PacketMI->getOperand(opNum).getReg();
+ predRegClass = QRI->getMinimalPhysRegClass(predRegNumSrc);
+ if (predRegClass == Hexagon::PredRegsRegisterClass) {
+ break;
+ }
+ }
+ assert ((predRegClass == Hexagon::PredRegsRegisterClass ) &&
+ ("predicate register not found in a predicated PacketMI instruction"));
+
+ // Get predicate register used in new-value store instruction
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
+ if ( MI->getOperand(opNum).isReg())
+ predRegNumDst = MI->getOperand(opNum).getReg();
+ predRegClass = QRI->getMinimalPhysRegClass(predRegNumDst);
+ if (predRegClass == Hexagon::PredRegsRegisterClass) {
+ break;
+ }
+ }
+ assert ((predRegClass == Hexagon::PredRegsRegisterClass ) &&
+ ("predicate register not found in a predicated MI instruction"));
+
+ // New-value register producer and user (store) need to satisfy these
+ // constraints:
+ // 1) Both instructions should be predicated on the same register.
+ // 2) If producer of the new-value register is .new predicated then store
+ // should also be .new predicated and if producer is not .new predicated
+ // then store should not be .new predicated.
+ // 3) Both new-value register producer and user should have same predicate
+ // sense, i.e, either both should be negated or both should be none negated.
+
+ if (( predRegNumDst != predRegNumSrc) ||
+ isDotNewInst(PacketMI) != isDotNewInst(MI) ||
+ GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) {
+ return false;
+ }
+ }
+
+ // Make sure that other than the new-value register no other store instruction
+ // register has been modified in the same packet. Predicate registers can be
+ // modified by they should not be modified between the producer and the store
+ // instruction as it will make them both conditional on different values.
+ // We already know this to be true for all the instructions before and
+ // including PacketMI. Howerver, we need to perform the check for the
+ // remaining instructions in the packet.
+
+ std::vector<MachineInstr*>::iterator VI;
+ std::vector<MachineInstr*>::iterator VE;
+ unsigned StartCheck = 0;
+
+ for (VI=CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end();
+ (VI != VE); ++VI) {
+ SUnit* TempSU = MIToSUnit[*VI];
+ MachineInstr* TempMI = TempSU->getInstr();
+
+ // Following condition is true for all the instructions until PacketMI is
+ // reached (StartCheck is set to 0 before the for loop).
+ // StartCheck flag is 1 for all the instructions after PacketMI.
+ if (TempMI != PacketMI && !StartCheck) // start processing only after
+ continue; // encountering PacketMI
+
+ StartCheck = 1;
+ if (TempMI == PacketMI) // We don't want to check PacketMI for dependence
+ continue;
+
+ for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
+ if (MI->getOperand(opNum).isReg() &&
+ TempSU->getInstr()->modifiesRegister(MI->getOperand(opNum).getReg(), QRI))
+ return false;
+ }
+ }
+
+ // Make sure that for non POST_INC stores:
+ // 1. The only use of reg is DepReg and no other registers.
+ // This handles V4 base+index registers.
+ // The following store can not be dot new.
+ // Eg. r0 = add(r0, #3)a
+ // memw(r1+r0<<#2) = r0
+ if (!QII->isPostIncrement(MI) &&
+ GetStoreValueOperand(MI).isReg() &&
+ GetStoreValueOperand(MI).getReg() == DepReg) {
+ for(unsigned opNum = 0; opNum < MI->getNumOperands()-1; opNum++) {
+ if (MI->getOperand(opNum).isReg() &&
+ MI->getOperand(opNum).getReg() == DepReg) {
+ return false;
+ }
+ }
+ // 2. If data definition is because of implicit definition of the register,
+ // do not newify the store. Eg.
+ // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def>
+ // STrih_indexed %R8, 2, %R12<kill>; mem:ST2[%scevgep343]
+ for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) {
+ if (PacketMI->getOperand(opNum).isReg() &&
+ PacketMI->getOperand(opNum).getReg() == DepReg &&
+ PacketMI->getOperand(opNum).isDef() &&
+ PacketMI->getOperand(opNum).isImplicit()) {
+ return false;
+ }
+ }
+ }
+
+ // Can be dot new store.
+ return true;
+}
+
+// can this MI to promoted to either
+// new value store or new value jump
+bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI,
+ SUnit *PacketSU, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII)
+{
+
+ const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ if (!QRI->Subtarget.hasV4TOps() ||
+ !IsNewifyStore(MI))
+ return false;
+
+ MachineInstr *PacketMI = PacketSU->getInstr();
+
+ // Check to see the store can be new value'ed.
+ if (CanPromoteToNewValueStore(MI, PacketMI, DepReg, MIToSUnit))
+ return true;
+
+ // Check to see the compare/jump can be new value'ed.
+ // This is done as a pass on its own. Don't need to check it here.
+ return false;
+}
+
+// Check to see if an instruction can be dot new
+// There are three kinds.
+// 1. dot new on predicate - V2/V3/V4
+// 2. dot new on stores NV/ST - V4
+// 3. dot new on jump NV/J - V4 -- This is generated in a pass.
+bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
+ SUnit *PacketSU, unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit,
+ MachineBasicBlock::iterator &MII,
+ const TargetRegisterClass* RC )
+{
+ // already a dot new instruction
+ if (isDotNewInst(MI) && !IsNewifyStore(MI))
+ return false;
+
+ if (!isNewifiable(MI))
+ return false;
+
+ // predicate .new
+ if (RC == Hexagon::PredRegsRegisterClass && isCondInst(MI))
+ return true;
+ else if (RC != Hexagon::PredRegsRegisterClass &&
+ !IsNewifyStore(MI)) // MI is not a new-value store
+ return false;
+ else {
+ // Create a dot new machine instruction to see if resources can be
+ // allocated. If not, bail out now.
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ int NewOpcode = GetDotNewOp(MI->getOpcode());
+ const MCInstrDesc &desc = QII->get(NewOpcode);
+ DebugLoc dl;
+ MachineInstr *NewMI = MI->getParent()->getParent()->CreateMachineInstr(desc, dl);
+ bool ResourcesAvailable = ResourceTracker->canReserveResources(NewMI);
+ MI->getParent()->getParent()->DeleteMachineInstr(NewMI);
+
+ if (!ResourcesAvailable)
+ return false;
+
+ // new value store only
+ // new new value jump generated as a passes
+ if (!CanPromoteToNewValue(MI, PacketSU, DepReg, MIToSUnit, MII)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Go through the packet instructions and search for anti dependency
+// between them and DepReg from MI
+// Consider this case:
+// Trying to add
+// a) %R1<def> = TFRI_cdNotPt %P3, 2
+// to this packet:
+// {
+// b) %P0<def> = OR_pp %P3<kill>, %P0<kill>
+// c) %P3<def> = TFR_PdRs %R23
+// d) %R1<def> = TFRI_cdnPt %P3, 4
+// }
+// The P3 from a) and d) will be complements after
+// a)'s P3 is converted to .new form
+// Anti Dep between c) and b) is irrelevant for this case
+bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr* MI,
+ unsigned DepReg,
+ std::map <MachineInstr*, SUnit*> MIToSUnit) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ SUnit* PacketSUDep = MIToSUnit[MI];
+
+ for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(),
+ VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
+
+ // We only care for dependencies to predicated instructions
+ if(!QII->isPredicated(*VIN)) continue;
+
+ // Scheduling Unit for current insn in the packet
+ SUnit* PacketSU = MIToSUnit[*VIN];
+
+ // Look at dependencies between current members of the packet
+ // and predicate defining instruction MI.
+ // Make sure that dependency is on the exact register
+ // we care about.
+ if (PacketSU->isSucc(PacketSUDep)) {
+ for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) {
+ if ((PacketSU->Succs[i].getSUnit() == PacketSUDep) &&
+ (PacketSU->Succs[i].getKind() == SDep::Anti) &&
+ (PacketSU->Succs[i].getReg() == DepReg)) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+
+// Given two predicated instructions, this function detects whether
+// the predicates are complements
+bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
+ MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit) {
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ // Currently can only reason about conditional transfers
+ if (!QII->isConditionalTransfer(MI1) || !QII->isConditionalTransfer(MI2)) {
+ return false;
+ }
+
+ // Scheduling unit for candidate
+ SUnit* SU = MIToSUnit[MI1];
+
+ // One corner case deals with the following scenario:
+ // Trying to add
+ // a) %R24<def> = TFR_cPt %P0, %R25
+ // to this packet:
+ //
+ // {
+ // b) %R25<def> = TFR_cNotPt %P0, %R24
+ // c) %P0<def> = CMPEQri %R26, 1
+ // }
+ //
+ // On general check a) and b) are complements, but
+ // presence of c) will convert a) to .new form, and
+ // then it is not a complement
+ // We attempt to detect it by analyzing existing
+ // dependencies in the packet
+
+ // Analyze relationships between all existing members of the packet.
+ // Look for Anti dependecy on the same predicate reg
+ // as used in the candidate
+ for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(),
+ VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
+
+ // Scheduling Unit for current insn in the packet
+ SUnit* PacketSU = MIToSUnit[*VIN];
+
+ // If this instruction in the packet is succeeded by the candidate...
+ if (PacketSU->isSucc(SU)) {
+ for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) {
+ // The corner case exist when there is true data
+ // dependency between candidate and one of current
+ // packet members, this dep is on predicate reg, and
+ // there already exist anti dep on the same pred in
+ // the packet.
+ if (PacketSU->Succs[i].getSUnit() == SU &&
+ Hexagon::PredRegsRegisterClass->contains(
+ PacketSU->Succs[i].getReg()) &&
+ PacketSU->Succs[i].getKind() == SDep::Data &&
+ // Here I know that *VIN is predicate setting instruction
+ // with true data dep to candidate on the register
+ // we care about - c) in the above example.
+ // Now I need to see if there is an anti dependency
+ // from c) to any other instruction in the
+ // same packet on the pred reg of interest
+ RestrictingDepExistInPacket(*VIN,PacketSU->Succs[i].getReg(),
+ MIToSUnit)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // If the above case does not apply, check regular
+ // complement condition.
+ // Check that the predicate register is the same and
+ // that the predicate sense is different
+ // We also need to differentiate .old vs. .new:
+ // !p0 is not complimentary to p0.new
+ return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) &&
+ (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) &&
+ (isDotNewInst(MI1) == isDotNewInst(MI2)));
+}
+
+// initPacketizerState - Initialize packetizer flags
+void HexagonPacketizerList::initPacketizerState() {
+
+ Dependence = false;
+ PromotedToDotNew = false;
+ GlueToNewValueJump = false;
+ GlueAllocframeStore = false;
+ FoundSequentialDependence = false;
+
+ return;
+}
+
+// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+bool HexagonPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ if (MI->isDebugValue())
+ return true;
+
+ // We must print out inline assembly
+ if (MI->isInlineAsm())
+ return false;
+
+ // We check if MI has any functional units mapped to it.
+ // If it doesn't, we ignore the instruction.
+ const MCInstrDesc& TID = MI->getDesc();
+ unsigned SchedClass = TID.getSchedClass();
+ const InstrStage* IS = ResourceTracker->getInstrItins()->beginStage(SchedClass);
+ unsigned FuncUnits = IS->getUnits();
+ return !FuncUnits;
+}
+
+// isSoloInstruction: - Returns true for instructions that must be
+// scheduled in their own packet.
+bool HexagonPacketizerList::isSoloInstruction(MachineInstr *MI) {
+
+ if (MI->isInlineAsm())
+ return true;
+
+ if (MI->isEHLabel())
+ return true;
+
+ // From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping constraints:
+ // trap, pause, barrier, icinva, isync, and syncht are solo instructions.
+ // They must not be grouped with other instructions in a packet.
+ if (IsSchedBarrier(MI))
+ return true;
+
+ return false;
+}
+
+// isLegalToPacketizeTogether:
+// SUI is the current instruction that is out side of the current packet.
+// SUJ is the current instruction inside the current packet against which that
+// SUI will be packetized.
+bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+ MachineInstr *I = SUI->getInstr();
+ MachineInstr *J = SUJ->getInstr();
+ assert(I && J && "Unable to packetize null instruction!");
+
+ const MCInstrDesc &MCIDI = I->getDesc();
+ const MCInstrDesc &MCIDJ = J->getDesc();
+
+ MachineBasicBlock::iterator II = I;
+
+ const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
+ const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo();
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ // Inline asm cannot go in the packet.
+ if (I->getOpcode() == Hexagon::INLINEASM)
+ llvm_unreachable("Should not meet inline asm here!");
+
+ if (isSoloInstruction(I))
+ llvm_unreachable("Should not meet solo instr here!");
+
+ // A save callee-save register function call can only be in a packet
+ // with instructions that don't write to the callee-save registers.
+ if ((QII->isSaveCalleeSavedRegsCall(I) &&
+ DoesModifyCalleeSavedReg(J, QRI)) ||
+ (QII->isSaveCalleeSavedRegsCall(J) &&
+ DoesModifyCalleeSavedReg(I, QRI))) {
+ Dependence = true;
+ return false;
+ }
+
+ // Two control flow instructions cannot go in the same packet.
+ if (IsControlFlow(I) && IsControlFlow(J)) {
+ Dependence = true;
+ return false;
+ }
+
+ // A LoopN instruction cannot appear in the same packet as a jump or call.
+ if (IsLoopN(I) && ( IsDirectJump(J)
+ || MCIDJ.isCall()
+ || QII->isDeallocRet(J))) {
+ Dependence = true;
+ return false;
+ }
+ if (IsLoopN(J) && ( IsDirectJump(I)
+ || MCIDI.isCall()
+ || QII->isDeallocRet(I))) {
+ Dependence = true;
+ return false;
+ }
+
+ // dealloc_return cannot appear in the same packet as a conditional or
+ // unconditional jump.
+ if (QII->isDeallocRet(I) && ( MCIDJ.isBranch()
+ || MCIDJ.isCall()
+ || MCIDJ.isBarrier())) {
+ Dependence = true;
+ return false;
+ }
+
+
+ // V4 allows dual store. But does not allow second store, if the
+ // first store is not in SLOT0. New value store, new value jump,
+ // dealloc_return and memop always take SLOT0.
+ // Arch spec 3.4.4.2
+ if (QRI->Subtarget.hasV4TOps()) {
+
+ if (MCIDI.mayStore() && MCIDJ.mayStore() && isNewValueInst(J)) {
+ Dependence = true;
+ return false;
+ }
+
+ if ( (QII->isMemOp(J) && MCIDI.mayStore())
+ || (MCIDJ.mayStore() && QII->isMemOp(I))
+ || (QII->isMemOp(J) && QII->isMemOp(I))) {
+ Dependence = true;
+ return false;
+ }
+
+ //if dealloc_return
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)){
+ Dependence = true;
+ return false;
+ }
+
+ // If an instruction feeds new value jump, glue it.
+ MachineBasicBlock::iterator NextMII = I;
+ ++NextMII;
+ MachineInstr *NextMI = NextMII;
+
+ if (QII->isNewValueJump(NextMI)) {
+
+ bool secondRegMatch = false;
+ bool maintainNewValueJump = false;
+
+ if (NextMI->getOperand(1).isReg() &&
+ I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) {
+ secondRegMatch = true;
+ maintainNewValueJump = true;
+ }
+
+ if (!secondRegMatch &&
+ I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) {
+ maintainNewValueJump = true;
+ }
+
+ for (std::vector<MachineInstr*>::iterator
+ VI = CurrentPacketMIs.begin(),
+ VE = CurrentPacketMIs.end();
+ (VI != VE && maintainNewValueJump); ++VI) {
+ SUnit* PacketSU = MIToSUnit[*VI];
+
+ // NVJ can not be part of the dual jump - Arch Spec: section 7.8
+ if (PacketSU->getInstr()->getDesc().isCall()) {
+ Dependence = true;
+ break;
+ }
+ // Validate
+ // 1. Packet does not have a store in it.
+ // 2. If the first operand of the nvj is newified, and the second
+ // operand is also a reg, it (second reg) is not defined in
+ // the same packet.
+ // 3. If the second operand of the nvj is newified, (which means
+ // first operand is also a reg), first reg is not defined in
+ // the same packet.
+ if (PacketSU->getInstr()->getDesc().mayStore() ||
+ PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME ||
+ // Check #2.
+ (!secondRegMatch && NextMI->getOperand(1).isReg() &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(1).getReg(), QRI)) ||
+ // Check #3.
+ (secondRegMatch &&
+ PacketSU->getInstr()->modifiesRegister(
+ NextMI->getOperand(0).getReg(), QRI))) {
+ Dependence = true;
+ break;
+ }
+ }
+ if (!Dependence)
+ GlueToNewValueJump = true;
+ else
+ return false;
+ }
+ }
+
+ if (SUJ->isSucc(SUI)) {
+ for (unsigned i = 0;
+ (i < SUJ->Succs.size()) && !FoundSequentialDependence;
+ ++i) {
+
+ if (SUJ->Succs[i].getSUnit() != SUI) {
+ continue;
+ }
+
+ SDep::Kind DepType = SUJ->Succs[i].getKind();
+
+ // For direct calls:
+ // Ignore register dependences for call instructions for
+ // packetization purposes except for those due to r31 and
+ // predicate registers.
+ //
+ // For indirect calls:
+ // Same as direct calls + check for true dependences to the register
+ // used in the indirect call.
+ //
+ // We completely ignore Order dependences for call instructions
+ //
+ // For returns:
+ // Ignore register dependences for return instructions like jumpr,
+ // dealloc return unless we have dependencies on the explicit uses
+ // of the registers used by jumpr (like r31) or dealloc return
+ // (like r29 or r30).
+ //
+ // TODO: Currently, jumpr is handling only return of r31. So, the
+ // following logic (specificaly IsCallDependent) is working fine.
+ // We need to enable jumpr for register other than r31 and then,
+ // we need to rework the last part, where it handles indirect call
+ // of that (IsCallDependent) function. Bug 6216 is opened for this.
+ //
+ unsigned DepReg;
+ const TargetRegisterClass* RC;
+ if (DepType == SDep::Data) {
+ DepReg = SUJ->Succs[i].getReg();
+ RC = QRI->getMinimalPhysRegClass(DepReg);
+ }
+ if ((MCIDI.isCall() || MCIDI.isReturn()) &&
+ (!IsRegDependence(DepType) ||
+ !IsCallDependent(I, DepType, SUJ->Succs[i].getReg()))) {
+ /* do nothing */
+ }
+
+ // For instructions that can be promoted to dot-new, try to promote.
+ else if ((DepType == SDep::Data) &&
+ CanPromoteToDotNew(I, SUJ, DepReg, MIToSUnit, II, RC) &&
+ PromoteToDotNew(I, DepType, II, RC)) {
+ PromotedToDotNew = true;
+ /* do nothing */
+ }
+
+ else if ((DepType == SDep::Data) &&
+ (QII->isNewValueJump(I))) {
+ /* do nothing */
+ }
+
+ // For predicated instructions, if the predicates are complements
+ // then there can be no dependence.
+ else if (QII->isPredicated(I) &&
+ QII->isPredicated(J) &&
+ ArePredicatesComplements(I, J, MIToSUnit)) {
+ /* do nothing */
+
+ }
+ else if (IsDirectJump(I) &&
+ !MCIDJ.isBranch() &&
+ !MCIDJ.isCall() &&
+ (DepType == SDep::Order)) {
+ // Ignore Order dependences between unconditional direct branches
+ // and non-control-flow instructions
+ /* do nothing */
+ }
+ else if (MCIDI.isConditionalBranch() && (DepType != SDep::Data) &&
+ (DepType != SDep::Output)) {
+ // Ignore all dependences for jumps except for true and output
+ // dependences
+ /* do nothing */
+ }
+
+ // Ignore output dependences due to superregs. We can
+ // write to two different subregisters of R1:0 for instance
+ // in the same cycle
+ //
+
+ //
+ // Let the
+ // If neither I nor J defines DepReg, then this is a
+ // superfluous output dependence. The dependence must be of the
+ // form:
+ // R0 = ...
+ // R1 = ...
+ // and there is an output dependence between the two instructions
+ // with
+ // DepReg = D0
+ // We want to ignore these dependences.
+ // Ideally, the dependence constructor should annotate such
+ // dependences. We can then avoid this relatively expensive check.
+ //
+ else if (DepType == SDep::Output) {
+ // DepReg is the register that's responsible for the dependence.
+ unsigned DepReg = SUJ->Succs[i].getReg();
+
+ // Check if I and J really defines DepReg.
+ if (I->definesRegister(DepReg) ||
+ J->definesRegister(DepReg)) {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ // We ignore Order dependences for
+ // 1. Two loads unless they are volatile.
+ // 2. Two stores in V4 unless they are volatile.
+ else if ((DepType == SDep::Order) &&
+ !I->hasVolatileMemoryRef() &&
+ !J->hasVolatileMemoryRef()) {
+ if (QRI->Subtarget.hasV4TOps() &&
+ // hexagonv4 allows dual store.
+ MCIDI.mayStore() && MCIDJ.mayStore()) {
+ /* do nothing */
+ }
+ // store followed by store-- not OK on V2
+ // store followed by load -- not OK on all (OK if addresses
+ // are not aliased)
+ // load followed by store -- OK on all
+ // load followed by load -- OK on all
+ else if ( !MCIDJ.mayStore()) {
+ /* do nothing */
+ }
+ else {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ // For V4, special case ALLOCFRAME. Even though there is dependency
+ // between ALLOCAFRAME and subsequent store, allow it to be
+ // packetized in a same packet. This implies that the store is using
+ // caller's SP. Hense, offset needs to be updated accordingly.
+ else if (DepType == SDep::Data
+ && QRI->Subtarget.hasV4TOps()
+ && J->getOpcode() == Hexagon::ALLOCFRAME
+ && (I->getOpcode() == Hexagon::STrid
+ || I->getOpcode() == Hexagon::STriw
+ || I->getOpcode() == Hexagon::STrib)
+ && I->getOperand(0).getReg() == QRI->getStackRegister()
+ && QII->isValidOffset(I->getOpcode(),
+ I->getOperand(1).getImm() -
+ (FrameSize + HEXAGON_LRFP_SIZE)))
+ {
+ GlueAllocframeStore = true;
+ // Since this store is to be glued with allocframe in the same
+ // packet, it will use SP of the previous stack frame, i.e
+ // caller's SP. Therefore, we need to recalculate offset according
+ // to this change.
+ I->getOperand(1).setImm(I->getOperand(1).getImm() -
+ (FrameSize + HEXAGON_LRFP_SIZE));
+ }
+
+ //
+ // Skip over anti-dependences. Two instructions that are
+ // anti-dependent can share a packet
+ //
+ else if (DepType != SDep::Anti) {
+ FoundSequentialDependence = true;
+ break;
+ }
+ }
+
+ if (FoundSequentialDependence) {
+ Dependence = true;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// isLegalToPruneDependencies
+bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+ MachineInstr *I = SUI->getInstr();
+ assert(I && SUJ->getInstr() && "Unable to packetize null instruction!");
+
+ const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
+
+ if (Dependence) {
+
+ // Check if the instruction was promoted to a dot-new. If so, demote it
+ // back into a dot-old.
+ if (PromotedToDotNew) {
+ DemoteToDotOld(I);
+ }
+
+ // Check if the instruction (must be a store) was glued with an Allocframe
+ // instruction. If so, restore its offset to its original value, i.e. use
+ // curent SP instead of caller's SP.
+ if (GlueAllocframeStore) {
+ I->getOperand(1).setImm(I->getOperand(1).getImm() +
+ FrameSize + HEXAGON_LRFP_SIZE);
+ }
+
+ return false;
+ }
+ return true;
+}
+
+MachineBasicBlock::iterator HexagonPacketizerList::addToPacket(MachineInstr *MI) {
+
+ MachineBasicBlock::iterator MII = MI;
+ MachineBasicBlock *MBB = MI->getParent();
+
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+
+ if (GlueToNewValueJump) {
+
+ ++MII;
+ MachineInstr *nvjMI = MII;
+ assert(ResourceTracker->canReserveResources(MI));
+ ResourceTracker->reserveResources(MI);
+ if (QII->isExtended(MI) &&
+ !tryAllocateResourcesForConstExt(MI)) {
+ endPacket(MBB, MI);
+ ResourceTracker->reserveResources(MI);
+ assert(canReserveResourcesForConstExt(MI) &&
+ "Ensure that there is a slot");
+ reserveResourcesForConstExt(MI);
+ // Reserve resources for new value jump constant extender.
+ assert(canReserveResourcesForConstExt(MI) &&
+ "Ensure that there is a slot");
+ reserveResourcesForConstExt(nvjMI);
+ assert(ResourceTracker->canReserveResources(nvjMI) &&
+ "Ensure that there is a slot");
+
+ } else if ( // Extended instruction takes two slots in the packet.
+ // Try reserve and allocate 4-byte in the current packet first.
+ (QII->isExtended(nvjMI)
+ && (!tryAllocateResourcesForConstExt(nvjMI)
+ || !ResourceTracker->canReserveResources(nvjMI)))
+ || // For non-extended instruction, no need to allocate extra 4 bytes.
+ (!QII->isExtended(nvjMI) && !ResourceTracker->canReserveResources(nvjMI)))
+ {
+ endPacket(MBB, MI);
+ // A new and empty packet starts.
+ // We are sure that the resources requirements can be satisfied.
+ // Therefore, do not need to call "canReserveResources" anymore.
+ ResourceTracker->reserveResources(MI);
+ if (QII->isExtended(nvjMI))
+ reserveResourcesForConstExt(nvjMI);
+ }
+ // Here, we are sure that "reserveResources" would succeed.
+ ResourceTracker->reserveResources(nvjMI);
+ CurrentPacketMIs.push_back(MI);
+ CurrentPacketMIs.push_back(nvjMI);
+ } else {
+ if ( QII->isExtended(MI)
+ && ( !tryAllocateResourcesForConstExt(MI)
+ || !ResourceTracker->canReserveResources(MI)))
+ {
+ endPacket(MBB, MI);
+ // Check if the instruction was promoted to a dot-new. If so, demote it
+ // back into a dot-old
+ if (PromotedToDotNew) {
+ DemoteToDotOld(MI);
+ }
+ reserveResourcesForConstExt(MI);
+ }
+ // In case that "MI" is not an extended insn,
+ // the resource availability has already been checked.
+ ResourceTracker->reserveResources(MI);
+ CurrentPacketMIs.push_back(MI);
+ }
+ return MII;
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonPacketizer() {
+ return new HexagonPacketizer();
+}
+
diff --git a/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
new file mode 100644
index 0000000..9305c27
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/HexagonVarargsCallingConvention.h
@@ -0,0 +1,141 @@
+//===-- HexagonVarargsCallingConvention.h - Calling Conventions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the functions that assign locations to outgoing function
+// arguments. Adapted from the target independent version but this handles
+// calls to varargs functions
+//
+//===----------------------------------------------------------------------===//
+//
+
+
+
+
+static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags,
+ Hexagon_CCState &State,
+ int NonVarArgsParams,
+ int CurrentParam,
+ bool ForceMem);
+
+
+static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags,
+ Hexagon_CCState &State,
+ int NonVarArgsParams,
+ int CurrentParam,
+ bool ForceMem) {
+ unsigned ByValSize = 0;
+ if (ArgFlags.isByVal() &&
+ ((ByValSize = ArgFlags.getByValSize()) >
+ (MVT(MVT::i64).getSizeInBits() / 8))) {
+ ForceMem = true;
+ }
+
+
+ // Only assign registers for named (non varargs) arguments
+ if ( !ForceMem && ((NonVarArgsParams == -1) || (CurrentParam <=
+ NonVarArgsParams))) {
+
+ if (LocVT == MVT::i32 ||
+ LocVT == MVT::i16 ||
+ LocVT == MVT::i8 ||
+ LocVT == MVT::f32) {
+ static const unsigned RegList1[] = {
+ Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
+ Hexagon::R5
+ };
+ if (unsigned Reg = State.AllocateReg(RegList1, 6)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::i64 ||
+ LocVT == MVT::f64) {
+ static const unsigned RegList2[] = {
+ Hexagon::D0, Hexagon::D1, Hexagon::D2
+ };
+ if (unsigned Reg = State.AllocateReg(RegList2, 3)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+ }
+ }
+ }
+
+ const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
+ unsigned Alignment =
+ State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+ unsigned Size =
+ State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+
+ // If it's passed by value, then we need the size of the aggregate not of
+ // the pointer.
+ if (ArgFlags.isByVal()) {
+ Size = ByValSize;
+
+ // Hexagon_TODO: Get the alignment of the contained type here.
+ Alignment = 8;
+ }
+
+ unsigned Offset3 = State.AllocateStack(Size, Alignment);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+}
+
+
+static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags,
+ Hexagon_CCState &State,
+ int NonVarArgsParams,
+ int CurrentParam,
+ bool ForceMem) {
+
+ if (LocVT == MVT::i32 ||
+ LocVT == MVT::f32) {
+ static const unsigned RegList1[] = {
+ Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
+ Hexagon::R5
+ };
+ if (unsigned Reg = State.AllocateReg(RegList1, 6)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::i64 ||
+ LocVT == MVT::f64) {
+ static const unsigned RegList2[] = {
+ Hexagon::D0, Hexagon::D1, Hexagon::D2
+ };
+ if (unsigned Reg = State.AllocateReg(RegList2, 3)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT.getSimpleVT(), Reg,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+ }
+ }
+
+ const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
+ unsigned Alignment =
+ State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+ unsigned Size =
+ State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+
+ unsigned Offset3 = State.AllocateStack(Size, Alignment);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
+ LocVT.getSimpleVT(), LocInfo));
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
new file mode 100644
index 0000000..75d6bfb
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
@@ -0,0 +1,198 @@
+//===- HexagonInstPrinter.cpp - Convert Hexagon MCInst to assembly syntax -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an Hexagon MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "Hexagon.h"
+#include "HexagonAsmPrinter.h"
+#include "HexagonInstPrinter.h"
+#include "HexagonMCInst.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+
+using namespace llvm;
+
+#define GET_INSTRUCTION_NAME
+#include "HexagonGenAsmWriter.inc"
+
+StringRef HexagonInstPrinter::getOpcodeName(unsigned Opcode) const {
+ return MII.getName(Opcode);
+}
+
+StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const {
+ return getRegisterName(RegNo);
+}
+
+void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ printInst((const HexagonMCInst*)(MI), O, Annot);
+}
+
+void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ const char packetPadding[] = " ";
+ const char startPacket = '{',
+ endPacket = '}';
+ // TODO: add outer HW loop when it's supported too.
+ if (MI->getOpcode() == Hexagon::ENDLOOP0) {
+ // Ending a harware loop is different from ending an regular packet.
+ assert(MI->isEndPacket() && "Loop end must also end the packet");
+
+ if (MI->isStartPacket()) {
+ // There must be a packet to end a loop.
+ // FIXME: when shuffling is always run, this shouldn't be needed.
+ HexagonMCInst Nop;
+ StringRef NoAnnot;
+
+ Nop.setOpcode (Hexagon::NOP);
+ Nop.setStartPacket (MI->isStartPacket());
+ printInst (&Nop, O, NoAnnot);
+ }
+
+ // Close the packet.
+ if (MI->isEndPacket())
+ O << packetPadding << endPacket;
+
+ printInstruction(MI, O);
+ }
+ else {
+ // Prefix the insn opening the packet.
+ if (MI->isStartPacket())
+ O << packetPadding << startPacket << '\n';
+
+ printInstruction(MI, O);
+
+ // Suffix the insn closing the packet.
+ if (MI->isEndPacket())
+ // Suffix the packet in a new line always, since the GNU assembler has
+ // issues with a closing brace on the same line as CONST{32,64}.
+ O << '\n' << packetPadding << endPacket;
+ }
+
+ printAnnotation(O, Annot);
+}
+
+void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ const MCOperand& MO = MI->getOperand(OpNo);
+
+ if (MO.isReg()) {
+ O << getRegisterName(MO.getReg());
+ } else if(MO.isExpr()) {
+ O << *MO.getExpr();
+ } else if(MO.isImm()) {
+ printImmOperand(MI, OpNo, O);
+ } else {
+ assert(false && "Unknown operand");
+ }
+}
+
+void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ O << MI->getOperand(OpNo).getImm();
+}
+
+void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ O << MI->getOperand(OpNo).getImm();
+}
+
+void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ O << MI->getOperand(OpNo).getImm();
+}
+
+void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ O << -MI->getOperand(OpNo).getImm();
+}
+
+void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ O << -1;
+}
+
+void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ const MCOperand& MO0 = MI->getOperand(OpNo);
+ const MCOperand& MO1 = MI->getOperand(OpNo + 1);
+
+ O << getRegisterName(MO0.getReg());
+ O << " + #" << MO1.getImm();
+}
+
+void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ const MCOperand& MO0 = MI->getOperand(OpNo);
+ const MCOperand& MO1 = MI->getOperand(OpNo + 1);
+
+ O << getRegisterName(MO0.getReg()) << ", #" << MO1.getImm();
+}
+
+void HexagonInstPrinter::printGlobalOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ assert(MI->getOperand(OpNo).isExpr() && "Expecting expression");
+
+ printOperand(MI, OpNo, O);
+}
+
+void HexagonInstPrinter::printJumpTable(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ assert(MI->getOperand(OpNo).isExpr() && "Expecting expression");
+
+ printOperand(MI, OpNo, O);
+}
+
+void HexagonInstPrinter::printConstantPool(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ assert(MI->getOperand(OpNo).isExpr() && "Expecting expression");
+
+ printOperand(MI, OpNo, O);
+}
+
+void HexagonInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+ // Branches can take an immediate operand. This is used by the branch
+ // selection pass to print $+8, an eight byte displacement from the PC.
+ assert("Unknown branch operand.");
+}
+
+void HexagonInstPrinter::printCallOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+}
+
+void HexagonInstPrinter::printAbsAddrOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+}
+
+void HexagonInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) const {
+}
+
+void HexagonInstPrinter::printSymbol(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O, bool hi) const {
+ const MCOperand& MO = MI->getOperand(OpNo);
+
+ O << '#' << (hi? "HI": "LO") << '(';
+ if (MO.isImm()) {
+ O << '#';
+ printOperand(MI, OpNo, O);
+ } else {
+ assert("Unknown symbol operand");
+ printOperand(MI, OpNo, O);
+ }
+ O << ')';
+}
diff --git a/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
new file mode 100644
index 0000000..3ce7dfc
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
@@ -0,0 +1,75 @@
+//===-- HexagonInstPrinter.h - Convert Hexagon MCInst to assembly syntax --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an Hexagon MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONINSTPRINTER_H
+#define HEXAGONINSTPRINTER_H
+
+#include "HexagonMCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+ class HexagonInstPrinter : public MCInstPrinter {
+ public:
+ explicit HexagonInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
+ void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
+ virtual StringRef getOpcodeName(unsigned Opcode) const;
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ StringRef getRegName(unsigned RegNo) const;
+ static const char *getRegisterName(unsigned RegNo);
+
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+ void printImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+ void printExtOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+ void printUnsignedImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printNegImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printNOneImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printMEMriOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printFrameIndexOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printCallOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printAbsAddrOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printPredicateOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printGlobalOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O)
+ const;
+ void printJumpTable(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+
+ void printConstantPool(const MCInst *MI, unsigned OpNo, raw_ostream &O) const;
+
+ void printSymbolHi(const MCInst *MI, unsigned OpNo, raw_ostream &O) const
+ { printSymbol(MI, OpNo, O, true); }
+ void printSymbolLo(const MCInst *MI, unsigned OpNo, raw_ostream &O) const
+ { printSymbol(MI, OpNo, O, false); }
+
+ bool isConstExtended(const MCInst *MI) const;
+ protected:
+ void printSymbol(const MCInst *MI, unsigned OpNo, raw_ostream &O, bool hi)
+ const;
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
new file mode 100644
index 0000000..7221e90
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -0,0 +1,70 @@
+//===-- HexagonBaseInfo.h - Top level definitions for Hexagon --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains small standalone helper functions and enum definitions for
+// the Hexagon target useful for the compiler back-end and the MC libraries.
+// As such, it deliberately does not include references to LLVM core
+// code gen types, passes, etc..
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONBASEINFO_H
+#define HEXAGONBASEINFO_H
+
+namespace llvm {
+
+/// HexagonII - This namespace holds all of the target specific flags that
+/// instruction info tracks.
+///
+namespace HexagonII {
+ // *** The code below must match HexagonInstrFormat*.td *** //
+
+ // Insn types.
+ // *** Must match HexagonInstrFormat*.td ***
+ enum Type {
+ TypePSEUDO = 0,
+ TypeALU32 = 1,
+ TypeCR = 2,
+ TypeJR = 3,
+ TypeJ = 4,
+ TypeLD = 5,
+ TypeST = 6,
+ TypeSYSTEM = 7,
+ TypeXTYPE = 8,
+ TypeMEMOP = 9,
+ TypeNV = 10,
+ TypePREFIX = 30, // Such as extenders.
+ TypeMARKER = 31 // Such as end of a HW loop.
+ };
+
+
+
+ // MCInstrDesc TSFlags
+ // *** Must match HexagonInstrFormat*.td ***
+ enum {
+ // This 5-bit field describes the insn type.
+ TypePos = 0,
+ TypeMask = 0x1f,
+
+ // Solo instructions.
+ SoloPos = 5,
+ SoloMask = 0x1,
+
+ // Predicated instructions.
+ PredicatedPos = 6,
+ PredicatedMask = 0x1
+ };
+
+ // *** The code above must match HexagonInstrFormat*.td *** //
+
+} // End namespace HexagonII.
+
+} // End namespace llvm.
+
+#endif
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
new file mode 100644
index 0000000..d6e6c36
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -0,0 +1,36 @@
+//===-- HexagonMCAsmInfo.cpp - Hexagon asm properties ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the HexagonMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonMCAsmInfo.h"
+
+using namespace llvm;
+
+HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) {
+ Data16bitsDirective = "\t.half\t";
+ Data32bitsDirective = "\t.word\t";
+ Data64bitsDirective = 0; // .xword is only supported by V9.
+ ZeroDirective = "\t.skip\t";
+ CommentString = "//";
+ HasLEB128 = true;
+
+ PrivateGlobalPrefix = ".L";
+ LCOMMDirectiveType = LCOMM::ByteAlignment;
+ InlineAsmStart = "# InlineAsm Start";
+ InlineAsmEnd = "# InlineAsm End";
+ ZeroDirective = "\t.space\t";
+ AscizDirective = "\t.string\t";
+ WeakRefDirective = "\t.weak\t";
+
+ UsesELFSectionDirectiveForBSS = true;
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+}
diff --git a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
index 837844b..d336cd5 100644
--- a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- AlphaMCAsmInfo.h - Alpha asm properties -------------*- C++ -*--====//
+//===-- HexagonTargetAsmInfo.h - Hexagon asm properties --------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,12 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains the declaration of the AlphaMCAsmInfo class.
+// This file contains the declaration of the HexagonMCAsmInfo class.
//
//===----------------------------------------------------------------------===//
-#ifndef ALPHATARGETASMINFO_H
-#define ALPHATARGETASMINFO_H
+#ifndef HexagonMCASMINFO_H
+#define HexagonMCASMINFO_H
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -20,8 +20,9 @@
namespace llvm {
class Target;
- struct AlphaMCAsmInfo : public MCAsmInfo {
- explicit AlphaMCAsmInfo(const Target &T, StringRef TT);
+ class HexagonMCAsmInfo : public MCAsmInfo {
+ public:
+ explicit HexagonMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
new file mode 100644
index 0000000..3cfa4fd
--- /dev/null
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -0,0 +1,95 @@
+//===-- HexagonMCTargetDesc.cpp - Hexagon Target Descriptions -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Hexagon specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "HexagonMCTargetDesc.h"
+#include "HexagonMCAsmInfo.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "HexagonGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "HexagonGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "HexagonGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static MCInstrInfo *createHexagonMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitHexagonMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createHexagonMCRegisterInfo(StringRef TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitHexagonMCRegisterInfo(X, Hexagon::R0);
+ return X;
+}
+
+static MCSubtargetInfo *createHexagonMCSubtargetInfo(StringRef TT,
+ StringRef CPU,
+ StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
+ InitHexagonMCSubtargetInfo(X, TT, CPU, FS);
+ return X;
+}
+
+static MCAsmInfo *createHexagonMCAsmInfo(const Target &T, StringRef TT) {
+ MCAsmInfo *MAI = new HexagonMCAsmInfo(T, TT);
+
+ // VirtualFP = (R30 + #0).
+ MachineLocation Dst(MachineLocation::VirtualFP);
+ MachineLocation Src(Hexagon::R30, 0);
+ MAI->addInitialFrameState(0, Dst, Src);
+
+ return MAI;
+}
+
+static MCCodeGenInfo *createHexagonMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ MCCodeGenInfo *X = new MCCodeGenInfo();
+ // For the time being, use static relocations, since there's really no
+ // support for PIC yet.
+ X->InitMCCodeGenInfo(Reloc::Static, CM, OL);
+ return X;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeHexagonTargetMC() {
+ // Register the MC asm info.
+ RegisterMCAsmInfoFn X(TheHexagonTarget, createHexagonMCAsmInfo);
+
+ // Register the MC codegen info.
+ TargetRegistry::RegisterMCCodeGenInfo(TheHexagonTarget,
+ createHexagonMCCodeGenInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(TheHexagonTarget, createHexagonMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(TheHexagonTarget,
+ createHexagonMCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(TheHexagonTarget,
+ createHexagonMCSubtargetInfo);
+}
diff --git a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.h b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
index b0619e6..2238b1a 100644
--- a/contrib/llvm/lib/Target/Alpha/MCTargetDesc/AlphaMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
@@ -1,4 +1,4 @@
-//===-- AlphaMCTargetDesc.h - Alpha Target Descriptions ---------*- C++ -*-===//
+//===-- HexagonMCTargetDesc.h - Hexagon Target Descriptions -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,34 +7,33 @@
//
//===----------------------------------------------------------------------===//
//
-// This file provides Alpha specific target descriptions.
+// This file provides Hexagon specific target descriptions.
//
//===----------------------------------------------------------------------===//
-#ifndef ALPHAMCTARGETDESC_H
-#define ALPHAMCTARGETDESC_H
+#ifndef HEXAGONMCTARGETDESC_H
+#define HEXAGONMCTARGETDESC_H
namespace llvm {
class MCSubtargetInfo;
class Target;
-class StringRef;
-extern Target TheAlphaTarget;
+extern Target TheHexagonTarget;
} // End llvm namespace
-// Defines symbolic names for Alpha registers. This defines a mapping from
+// Define symbolic names for Hexagon registers. This defines a mapping from
// register name to register number.
//
#define GET_REGINFO_ENUM
-#include "AlphaGenRegisterInfo.inc"
+#include "HexagonGenRegisterInfo.inc"
-// Defines symbolic names for the Alpha instructions.
+// Defines symbolic names for the Hexagon instructions.
//
#define GET_INSTRINFO_ENUM
-#include "AlphaGenInstrInfo.inc"
+#include "HexagonGenInstrInfo.inc"
#define GET_SUBTARGETINFO_ENUM
-#include "AlphaGenSubtargetInfo.inc"
+#include "HexagonGenSubtargetInfo.inc"
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp b/contrib/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp
index da99282..7aa5dd3 100644
--- a/contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp
+++ b/contrib/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp
@@ -1,4 +1,4 @@
-//===-- SystemZTargetInfo.cpp - SystemZ Target Implementation -------------===//
+//===-- HexagonTargetInfo.cpp - Hexagon Target Implementation ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
-#include "SystemZ.h"
+#include "Hexagon.h"
#include "llvm/Module.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
-Target llvm::TheSystemZTarget;
+Target llvm::TheHexagonTarget;
-extern "C" void LLVMInitializeSystemZTargetInfo() {
- RegisterTarget<Triple::systemz> X(TheSystemZTarget, "systemz", "SystemZ");
+extern "C" void LLVMInitializeHexagonTargetInfo() {
+ RegisterTarget<Triple::hexagon, /*HasJIT=*/false> X(TheHexagonTarget, "hexagon", "Hexagon");
}
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
index 2d357bb..59a1ed9 100644
--- a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
@@ -9,10 +9,6 @@
#include "MCTargetDesc/MBlazeBaseInfo.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
@@ -100,11 +96,7 @@ AsmToken MBlazeBaseAsmLexer::LexTokenUAL() {
return AsmToken(lexedToken);
case AsmToken::Identifier:
{
- std::string upperCase = lexedToken.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
- StringRef lowerRef(lowerCase);
-
- unsigned regID = MatchRegisterName(lowerRef);
+ unsigned regID = MatchRegisterName(lexedToken.getString().lower());
if (regID) {
return AsmToken(AsmToken::Register,
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
index 97d311c..38fb0e8 100644
--- a/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp
@@ -18,9 +18,7 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
using namespace llvm;
@@ -347,7 +345,6 @@ MatchAndEmitInstruction(SMLoc IDLoc,
}
llvm_unreachable("Implement any new match types added!");
- return true;
}
MBlazeOperand *MBlazeAsmParser::
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
index fd761f1..6b958c8 100644
--- a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeDisassembler.cpp - Disassembler for MicroBlaze ----*- C++ -*-===//
+//===-- MBlazeDisassembler.cpp - Disassembler for MicroBlaze -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,13 +13,12 @@
//===----------------------------------------------------------------------===//
#include "MBlaze.h"
-#include "MBlazeInstrInfo.h"
#include "MBlazeDisassembler.h"
#include "llvm/MC/EDInstInfo.h"
#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/TargetRegistry.h"
@@ -30,14 +29,14 @@
#include "MBlazeGenEDInfo.inc"
namespace llvm {
-extern MCInstrDesc MBlazeInsts[];
+extern const MCInstrDesc MBlazeInsts[];
}
using namespace llvm;
-const unsigned UNSUPPORTED = -1;
+const uint16_t UNSUPPORTED = -1;
-static unsigned mblazeBinary2Opcode[] = {
+static const uint16_t mblazeBinary2Opcode[] = {
MBlaze::ADD, MBlaze::RSUB, MBlaze::ADDC, MBlaze::RSUBC, //00,01,02,03
MBlaze::ADDK, MBlaze::RSUBK, MBlaze::ADDKC, MBlaze::RSUBKC, //04,05,06,07
MBlaze::ADDI, MBlaze::RSUBI, MBlaze::ADDIC, MBlaze::RSUBIC, //08,09,0A,0B
@@ -124,6 +123,7 @@ static unsigned decodeSEXT(uint32_t insn) {
case 0x41: return MBlaze::SRL;
case 0x21: return MBlaze::SRC;
case 0x01: return MBlaze::SRA;
+ case 0xE0: return MBlaze::CLZ;
}
}
@@ -177,6 +177,13 @@ static unsigned decodeBR(uint32_t insn) {
}
static unsigned decodeBRI(uint32_t insn) {
+ switch (insn&0x3FFFFFF) {
+ default: break;
+ case 0x0020004: return MBlaze::IDMEMBAR;
+ case 0x0220004: return MBlaze::DMEMBAR;
+ case 0x0420004: return MBlaze::IMEMBAR;
+ }
+
switch ((insn>>16)&0x1F) {
default: return UNSUPPORTED;
case 0x00: return MBlaze::BRI;
@@ -485,7 +492,7 @@ static unsigned getOPCODE(uint32_t insn) {
}
}
-EDInstInfo *MBlazeDisassembler::getEDInfo() const {
+const EDInstInfo *MBlazeDisassembler::getEDInfo() const {
return instInfoMBlaze;
}
@@ -532,6 +539,9 @@ MCDisassembler::DecodeStatus MBlazeDisassembler::getInstruction(MCInst &instr,
default:
return Fail;
+ case MBlazeII::FC:
+ break;
+
case MBlazeII::FRRRR:
if (RD == UNSUPPORTED || RA == UNSUPPORTED || RB == UNSUPPORTED)
return Fail;
@@ -548,6 +558,13 @@ MCDisassembler::DecodeStatus MBlazeDisassembler::getInstruction(MCInst &instr,
instr.addOperand(MCOperand::CreateReg(RB));
break;
+ case MBlazeII::FRR:
+ if (RD == UNSUPPORTED || RA == UNSUPPORTED)
+ return Fail;
+ instr.addOperand(MCOperand::CreateReg(RD));
+ instr.addOperand(MCOperand::CreateReg(RA));
+ break;
+
case MBlazeII::FRI:
switch (opcode) {
default:
diff --git a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
index 0ac0d89..5c4ae3b 100644
--- a/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
+++ b/contrib/llvm/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.h
@@ -1,4 +1,4 @@
-//===- MBlazeDisassembler.h - Disassembler for MicroBlaze ------*- C++ -*-===//
+//===-- MBlazeDisassembler.h - Disassembler for MicroBlaze -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,8 +17,6 @@
#include "llvm/MC/MCDisassembler.h"
-struct InternalInstruction;
-
namespace llvm {
class MCInst;
@@ -48,7 +46,7 @@ public:
raw_ostream &cStream) const;
/// getEDInfo - See MCDisassembler.
- EDInstInfo *getEDInfo() const;
+ const EDInstInfo *getEDInfo() const;
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
index 570ab08..51ba7c3 100644
--- a/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
+++ b/contrib/llvm/lib/Target/MBlaze/InstPrinter/MBlazeInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- MBLazeInstPrinter.h - Convert MBlaze MCInst to assembly syntax ----===//
+//= MBlazeInstPrinter.h - Convert MBlaze MCInst to assembly syntax -*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -21,15 +21,15 @@ namespace llvm {
class MBlazeInstPrinter : public MCInstPrinter {
public:
- MBlazeInstPrinter(const MCAsmInfo &MAI)
- : MCInstPrinter(MAI) {}
+ MBlazeInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O,
const char *Modifier = 0);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlaze.td b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
index 1245658..b4edff0 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlaze.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlaze.td
@@ -1,4 +1,4 @@
-//===- MBlaze.td - Describe the MBlaze Target Machine ------*- tablegen -*-===//
+//===-- MBlaze.td - Describe the MBlaze Target Machine -----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
index 97bd083..55fffe3 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeAsmPrinter.cpp
@@ -38,8 +38,6 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
@@ -119,7 +117,7 @@ namespace {
static void printHex32(unsigned int Value, raw_ostream &O) {
O << "0x";
for (int i = 7; i >= 0; i--)
- O << utohexstr((Value & (0xF << (i*4))) >> (i*4));
+ O.write_hex((Value & (0xF << (i*4))) >> (i*4));
}
// Create a bitmask with all callee saved registers for CPU or Floating Point
@@ -311,9 +309,9 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
// Check if the last terminator is an unconditional branch.
MachineBasicBlock::const_iterator I = Pred->end();
- while (I != Pred->begin() && !(--I)->getDesc().isTerminator())
+ while (I != Pred->begin() && !(--I)->isTerminator())
; // Noop
- return I == Pred->end() || !I->getDesc().isBarrier();
+ return I == Pred->end() || !I->isBarrier();
}
// Force static initialization.
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
index c07570a..19e787d 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
@@ -29,13 +29,11 @@ using namespace llvm;
STATISTIC(FilledSlots, "Number of delay slots filled");
-namespace llvm {
-cl::opt<bool> DisableDelaySlotFiller(
+static cl::opt<bool> MBDisableDelaySlotFiller(
"disable-mblaze-delay-filler",
cl::init(false),
cl::desc("Disable the MBlaze delay slot filter."),
cl::Hidden);
-}
namespace {
struct Filler : public MachineFunctionPass {
@@ -109,7 +107,6 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
// Hazard check
MachineBasicBlock::iterator a = candidate;
MachineBasicBlock::iterator b = slot;
- MCInstrDesc desc = candidate->getDesc();
// MBB layout:-
// candidate := a0 = operation(a1, a2)
@@ -123,7 +120,7 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
// 4. b0 is one or more of {a1, a2}
// 5. a accesses memory, and the middle bit
// contains a store operation.
- bool a_is_memory = desc.mayLoad() || desc.mayStore();
+ bool a_is_memory = candidate->mayLoad() || candidate->mayStore();
// Determine the number of operands in the slot instruction and in the
// candidate instruction.
@@ -156,7 +153,7 @@ static bool delayHasHazard(MachineBasicBlock::iterator &candidate,
}
// Check hazard type 5
- if (a_is_memory && m->getDesc().mayStore())
+ if (a_is_memory && m->mayStore())
return true;
}
@@ -183,8 +180,8 @@ static bool isDelayFiller(MachineBasicBlock &MBB,
if (candidate == MBB.begin())
return false;
- MCInstrDesc brdesc = (--candidate)->getDesc();
- return (brdesc.hasDelaySlot());
+ --candidate;
+ return (candidate->hasDelaySlot());
}
static bool hasUnknownSideEffects(MachineBasicBlock::iterator &I) {
@@ -211,9 +208,8 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) {
break;
--I;
- MCInstrDesc desc = I->getDesc();
- if (desc.hasDelaySlot() || desc.isBranch() || isDelayFiller(MBB,I) ||
- desc.isCall() || desc.isReturn() || desc.isBarrier() ||
+ if (I->hasDelaySlot() || I->isBranch() || isDelayFiller(MBB,I) ||
+ I->isCall() || I->isReturn() || I->isBarrier() ||
hasUnknownSideEffects(I))
break;
@@ -232,11 +228,11 @@ findDelayInstr(MachineBasicBlock &MBB,MachineBasicBlock::iterator slot) {
bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->getDesc().hasDelaySlot()) {
+ if (I->hasDelaySlot()) {
MachineBasicBlock::iterator D = MBB.end();
MachineBasicBlock::iterator J = I;
- if (!DisableDelaySlotFiller)
+ if (!MBDisableDelaySlotFiller)
D = findDelayInstr(MBB,I);
++FilledSlots;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
index 3f26ed1..e3c7236 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
@@ -41,7 +41,6 @@ unsigned MBlazeELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
default:
llvm_unreachable("unknown mblaze machine relocation type");
}
- return 0;
}
long int MBlazeELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
@@ -54,7 +53,6 @@ long int MBlazeELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
default:
llvm_unreachable("unknown mblaze relocation type");
}
- return 0;
}
unsigned MBlazeELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
@@ -102,10 +100,8 @@ unsigned MBlazeELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
long int MBlazeELFWriterInfo::computeRelocation(unsigned SymOffset,
unsigned RelOffset,
unsigned RelTy) const {
- if (RelTy == ELF::R_MICROBLAZE_32_PCREL || ELF::R_MICROBLAZE_64_PCREL)
- return SymOffset - (RelOffset + 4);
- else
- assert("computeRelocation unknown for this relocation type");
-
- return 0;
+ assert((RelTy == ELF::R_MICROBLAZE_32_PCREL ||
+ RelTy == ELF::R_MICROBLAZE_64_PCREL) &&
+ "computeRelocation unknown for this relocation type");
+ return SymOffset - (RelOffset + 4);
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
index 63bfc0d..a314eb7 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeELFWriterInfo.h
@@ -17,6 +17,7 @@
#include "llvm/Target/TargetELFWriterInfo.h"
namespace llvm {
+ class TargetMachine;
class MBlazeELFWriterInfo : public TargetELFWriterInfo {
public:
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
index f28d5a7..d2f14a5 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeFrameLowering.cpp - MBlaze Frame Information ------*- C++ -*-====//
+//===-- MBlazeFrameLowering.cpp - MBlaze Frame Information ---------------====//
//
// The LLVM Compiler Infrastructure
//
@@ -32,13 +32,11 @@
using namespace llvm;
-namespace llvm {
- cl::opt<bool> DisableStackAdjust(
- "disable-mblaze-stack-adjust",
- cl::init(false),
- cl::desc("Disable MBlaze stack layout adjustment."),
- cl::Hidden);
-}
+static cl::opt<bool> MBDisableStackAdjust(
+ "disable-mblaze-stack-adjust",
+ cl::init(false),
+ cl::desc("Disable MBlaze stack layout adjustment."),
+ cl::Hidden);
static void replaceFrameIndexes(MachineFunction &MF,
SmallVector<std::pair<int,int64_t>, 16> &FR) {
@@ -85,7 +83,7 @@ static void replaceFrameIndexes(MachineFunction &MF,
//===----------------------------------------------------------------------===//
static void analyzeFrameIndexes(MachineFunction &MF) {
- if (DisableStackAdjust) return;
+ if (MBDisableStackAdjust) return;
MachineFrameInfo *MFI = MF.getFrameInfo();
MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
@@ -213,13 +211,13 @@ static void analyzeFrameIndexes(MachineFunction &MF) {
static void interruptFrameLayout(MachineFunction &MF) {
const Function *F = MF.getFunction();
- llvm::CallingConv::ID CallConv = F->getCallingConv();
+ CallingConv::ID CallConv = F->getCallingConv();
// If this function is not using either the interrupt_handler
// calling convention or the save_volatiles calling convention
// then we don't need to do any additional frame layout.
- if (CallConv != llvm::CallingConv::MBLAZE_INTR &&
- CallConv != llvm::CallingConv::MBLAZE_SVOL)
+ if (CallConv != CallingConv::MBLAZE_INTR &&
+ CallConv != CallingConv::MBLAZE_SVOL)
return;
MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -230,7 +228,7 @@ static void interruptFrameLayout(MachineFunction &MF) {
// Determine if the calling convention is the interrupt_handler
// calling convention. Some pieces of the prologue and epilogue
// only need to be emitted if we are lowering and interrupt handler.
- bool isIntr = CallConv == llvm::CallingConv::MBLAZE_INTR;
+ bool isIntr = CallConv == CallingConv::MBLAZE_INTR;
// Determine where to put prologue and epilogue additions
MachineBasicBlock &MENT = MF.front();
@@ -336,7 +334,8 @@ int MBlazeFrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI)
// if frame pointer elimination is disabled.
bool MBlazeFrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects();
}
void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const {
@@ -348,8 +347,8 @@ void MBlazeFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
- llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+ CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
// Determine the correct frame layout
determineFrameLayout(MF);
@@ -394,8 +393,8 @@ void MBlazeFrameLowering::emitEpilogue(MachineFunction &MF,
DebugLoc dl = MBBI->getDebugLoc();
- llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+ CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
// Get the FI's where RA and FP are saved.
int FPOffset = MBlazeFI->getFPStackOffset();
@@ -432,8 +431,8 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
- llvm::CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
- bool requiresRA = CallConv == llvm::CallingConv::MBLAZE_INTR;
+ CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ bool requiresRA = CallConv == CallingConv::MBLAZE_INTR;
if (MFI->adjustsStack() || requiresRA) {
MBlazeFI->setRAStackOffset(0);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h
index 8be15bf..01e6578 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeFrameLowering.h
@@ -15,11 +15,10 @@
#define MBLAZE_FRAMEINFO_H
#include "MBlaze.h"
-#include "MBlazeSubtarget.h"
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
- class MBlazeSubtarget;
+class MBlazeSubtarget;
class MBlazeFrameLowering : public TargetFrameLowering {
protected:
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
index 8ec548f..edfc335 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -167,7 +167,9 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -214,7 +216,7 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB)
const {
switch (MI->getOpcode()) {
- default: assert(false && "Unexpected instr type to insert");
+ default: llvm_unreachable("Unexpected instr type to insert");
case MBlaze::ShiftRL:
case MBlaze::ShiftRA:
@@ -600,7 +602,6 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
SDValue MBlazeTargetLowering::
LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("TLS not implemented for MicroBlaze.");
- return SDValue(); // Not reached
}
SDValue MBlazeTargetLowering::
@@ -656,7 +657,7 @@ static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
MBlaze::R5, MBlaze::R6, MBlaze::R7,
MBlaze::R8, MBlaze::R9, MBlaze::R10
};
@@ -681,7 +682,7 @@ static bool CC_MBlaze_AssignReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
/// TODO: isVarArg, isTailCall.
SDValue MBlazeTargetLowering::
LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
- bool isVarArg, bool &isTailCall,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -895,7 +896,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
if (VA.isRegLoc()) {
MVT RegVT = VA.getLocVT();
ArgRegEnd = VA.getLocReg();
- TargetRegisterClass *RC = 0;
+ const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
RC = MBlaze::GPRRegisterClass;
@@ -951,7 +952,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0));
+ false, false, false, 0));
}
}
@@ -963,7 +964,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
StackPtr = DAG.getRegister(StackReg, getPointerTy());
// The last register argument that must be saved is MBlaze::R10
- TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
+ const TargetRegisterClass *RC = MBlaze::GPRRegisterClass;
unsigned Begin = getMBlazeRegisterNumbering(MBlaze::R5);
unsigned Start = getMBlazeRegisterNumbering(ArgRegEnd+1);
@@ -1045,10 +1046,10 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
// If this function is using the interrupt_handler calling convention
// then use "rtid r14, 0" otherwise use "rtsd r15, 8"
- unsigned Ret = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
- : MBlazeISD::Ret;
- unsigned Reg = (CallConv == llvm::CallingConv::MBLAZE_INTR) ? MBlaze::R14
- : MBlaze::R15;
+ unsigned Ret = (CallConv == CallingConv::MBLAZE_INTR) ? MBlazeISD::IRet
+ : MBlazeISD::Ret;
+ unsigned Reg = (CallConv == CallingConv::MBLAZE_INTR) ? MBlaze::R14
+ : MBlaze::R15;
SDValue DReg = DAG.getRegister(Reg, MVT::i32);
if (Flag.getNode())
@@ -1079,7 +1080,6 @@ getConstraintType(const std::string &Constraint) const
case 'y':
case 'f':
return C_RegisterClass;
- break;
}
}
return TargetLowering::getConstraintType(Constraint);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
index 8b49bc3..6a79fc1 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
@@ -15,11 +15,11 @@
#ifndef MBlazeISELLOWERING_H
#define MBlazeISELLOWERING_H
+#include "MBlaze.h"
+#include "MBlazeSubtarget.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
-#include "MBlaze.h"
-#include "MBlazeSubtarget.h"
namespace llvm {
namespace MBlazeCC {
@@ -134,7 +134,7 @@ namespace llvm {
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
index 4acdcfd..3f14593 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFPU.td
@@ -1,4 +1,4 @@
-//===- MBlazeInstrFPU.td - MBlaze FPU Instruction defs -----*- tablegen -*-===//
+//===-- MBlazeInstrFPU.td - MBlaze FPU Instruction defs ----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
index 3082a7e..91b69de 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFSL.td
@@ -1,4 +1,4 @@
-//===- MBlazeInstrFSL.td - MBlaze FSL Instruction defs -----*- tablegen -*-===//
+//===-- MBlazeInstrFSL.td - MBlaze FSL Instruction defs ----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
index 54f605f..e40432a 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrFormats.td
@@ -1,4 +1,4 @@
-//===- MBlazeInstrFormats.td - MB Instruction defs ---------*- tablegen -*-===//
+//===-- MBlazeInstrFormats.td - MB Instruction defs --------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,6 +35,7 @@ def FRIR : Format<17>; // RSUBI
def FRRRR : Format<18>; // RSUB, FRSUB
def FRI : Format<19>; // RSUB, FRSUB
def FC : Format<20>; // NOP
+def FRR : Format<21>; // CLZ
//===----------------------------------------------------------------------===//
// Describe MBlaze instructions format
@@ -202,3 +203,26 @@ class MSR<bits<6> op, bits<6> flags, dag outs, dag ins, string asmstr,
let Inst{11-16} = flags;
let Inst{17-31} = imm15;
}
+
+//===----------------------------------------------------------------------===//
+// TCLZ instruction class in MBlaze : <|opcode|rd|imm15|>
+//===----------------------------------------------------------------------===//
+class TCLZ<bits<6> op, bits<16> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op, FRR, outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+ bits<5> ra;
+
+ let Inst{6-10} = rd;
+ let Inst{11-15} = ra;
+ let Inst{16-31} = flags;
+}
+
+//===----------------------------------------------------------------------===//
+// MBAR instruction class in MBlaze : <|opcode|rd|imm15|>
+//===----------------------------------------------------------------------===//
+class MBAR<bits<6> op, bits<26> flags, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin> :
+ MBlazeInst<op, FC, outs, ins, asmstr, pattern, itin> {
+ let Inst{6-31} = flags;
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
index 7ae05b3..db71434 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeInstrInfo.cpp - MBlaze Instruction Information -----*- C++ -*-===//
+//===-- MBlazeInstrInfo.cpp - MBlaze Instruction Information --------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
index 7174405..5252147 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
@@ -1,4 +1,4 @@
-//===- MBlazeInstrInfo.h - MBlaze Instruction Information -------*- C++ -*-===//
+//===-- MBlazeInstrInfo.h - MBlaze Instruction Information ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,9 +15,9 @@
#define MBLAZEINSTRUCTIONINFO_H
#include "MBlaze.h"
+#include "MBlazeRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "MBlazeRegisterInfo.h"
#define GET_INSTRINFO_HEADER
#include "MBlazeGenInstrInfo.inc"
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
index 1d8c987..02a2157 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.td
@@ -1,4 +1,4 @@
-//===- MBlazeInstrInfo.td - MBlaze Instruction defs --------*- tablegen -*-===//
+//===-- MBlazeInstrInfo.td - MBlaze Instruction defs -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -594,9 +594,18 @@ let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1,
//===----------------------------------------------------------------------===//
let neverHasSideEffects = 1 in {
- def NOP : MBlazeInst< 0x20, FC, (outs), (ins), "nop ", [], IIC_ALU>;
+ def NOP : MBlazeInst<0x20, FC, (outs), (ins), "nop ", [], IIC_ALU>;
}
+let Predicates=[HasPatCmp] in {
+ def CLZ : TCLZ<0x24, 0x00E0, (outs GPR:$dst), (ins GPR:$src),
+ "clz $dst, $src", [], IIC_ALU>;
+}
+
+def IMEMBAR : MBAR<0x2E, 0x0420004, (outs), (ins), "mbar 2", [], IIC_ALU>;
+def DMEMBAR : MBAR<0x2E, 0x0220004, (outs), (ins), "mbar 1", [], IIC_ALU>;
+def IDMEMBAR : MBAR<0x2E, 0x0020004, (outs), (ins), "mbar 0", [], IIC_ALU>;
+
let usesCustomInserter = 1 in {
def Select_CC : MBlazePseudo<(outs GPR:$dst),
(ins GPR:$T, GPR:$F, GPR:$CMP, i32imm:$CC), // F T reversed
@@ -751,6 +760,56 @@ def : Pat<(sra GPR:$L, GPR:$R), (ShiftRA GPR:$L, GPR:$R)>;
def : Pat<(srl GPR:$L, GPR:$R), (ShiftRL GPR:$L, GPR:$R)>;
// SET_CC operations
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 1)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETNE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 2)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 3)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 4)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 5)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETLE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$L, 6)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU (i32 R0), GPR:$L), 3)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU (i32 R0), GPR:$L), 4)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETUGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU (i32 R0), GPR:$L), 5)>;
+def : Pat<(setcc (i32 GPR:$L), (i32 0), SETULE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU (i32 R0), GPR:$L), 6)>;
+
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETEQ),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 1)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETNE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 2)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 3)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 4)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 5)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETLE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0), GPR:$R, 6)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, (i32 R0)), 3)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULT),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, (i32 R0)), 4)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETUGE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, (i32 R0)), 5)>;
+def : Pat<(setcc (i32 0), (i32 GPR:$R), SETULE),
+ (Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
+ (CMPU GPR:$R, (i32 R0)), 6)>;
+
def : Pat<(setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ),
(Select_CC (ADDIK (i32 R0), 1), (ADDIK (i32 R0), 0),
(CMP GPR:$R, GPR:$L), 1)>;
@@ -787,6 +846,68 @@ def : Pat<(select (i32 GPR:$C), (i32 GPR:$T), (i32 GPR:$F)),
(Select_CC GPR:$T, GPR:$F, GPR:$C, 2)>;
// SELECT_CC
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETEQ),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 1)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETNE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 2)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETGT),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 3)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETLT),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 4)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETGE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 5)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETLE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$L, 6)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 3)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETULT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 4)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 5)>;
+def : Pat<(selectcc (i32 GPR:$L), (i32 0),
+ (i32 GPR:$T), (i32 GPR:$F), SETULE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU (i32 R0), GPR:$L), 6)>;
+
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETEQ),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 1)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETNE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 2)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETGT),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 3)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETLT),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 4)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETGE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 5)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETLE),
+ (Select_CC GPR:$T, GPR:$F, GPR:$R, 6)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 3)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETULT),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 4)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETUGE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 5)>;
+def : Pat<(selectcc (i32 0), (i32 GPR:$R),
+ (i32 GPR:$T), (i32 GPR:$F), SETULE),
+ (Select_CC GPR:$T, GPR:$F, (CMPU GPR:$R, (i32 R0)), 6)>;
+
def : Pat<(selectcc (i32 GPR:$L), (i32 GPR:$R),
(i32 GPR:$T), (i32 GPR:$F), SETEQ),
(Select_CC GPR:$T, GPR:$F, (CMP GPR:$R, GPR:$L), 1)>;
@@ -827,6 +948,48 @@ def : Pat<(br bb:$T), (BRID bb:$T)>;
def : Pat<(brind GPR:$T), (BRAD GPR:$T)>;
// BRCOND instructions
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETEQ), bb:$T),
+ (BEQID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETNE), bb:$T),
+ (BNEID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGT), bb:$T),
+ (BGTID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLT), bb:$T),
+ (BLTID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETGE), bb:$T),
+ (BGEID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETLE), bb:$T),
+ (BLEID GPR:$L, bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGT), bb:$T),
+ (BGTID (CMPU (i32 R0), GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULT), bb:$T),
+ (BLTID (CMPU (i32 R0), GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETUGE), bb:$T),
+ (BGEID (CMPU (i32 R0), GPR:$L), bb:$T)>;
+def : Pat<(brcond (setcc (i32 GPR:$L), (i32 0), SETULE), bb:$T),
+ (BLEID (CMPU (i32 R0), GPR:$L), bb:$T)>;
+
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETEQ), bb:$T),
+ (BEQID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETNE), bb:$T),
+ (BNEID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGT), bb:$T),
+ (BGTID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLT), bb:$T),
+ (BLTID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETGE), bb:$T),
+ (BGEID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETLE), bb:$T),
+ (BLEID GPR:$R, bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGT), bb:$T),
+ (BGTID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULT), bb:$T),
+ (BLTID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETUGE), bb:$T),
+ (BGEID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
+def : Pat<(brcond (setcc (i32 0), (i32 GPR:$R), SETULE), bb:$T),
+ (BLEID (CMPU GPR:$R, (i32 R0)), bb:$T)>;
+
def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETEQ), bb:$T),
(BEQID (CMP GPR:$R, GPR:$L), bb:$T)>;
def : Pat<(brcond (setcc (i32 GPR:$L), (i32 GPR:$R), SETNE), bb:$T),
@@ -869,11 +1032,11 @@ def : Pat<(store (i32 GPR:$dst), xaddr:$addr), (SW GPR:$dst, xaddr:$addr)>;
def : Pat<(load xaddr:$addr), (i32 (LW xaddr:$addr))>;
// 16-bit load and store
-def : Pat<(truncstorei16 (i32 GPR:$dst), xaddr:$addr), (SH GPR:$dst, xaddr:$addr)>;
+def : Pat<(truncstorei16 (i32 GPR:$dst), xaddr:$ad), (SH GPR:$dst, xaddr:$ad)>;
def : Pat<(zextloadi16 xaddr:$addr), (i32 (LHU xaddr:$addr))>;
// 8-bit load and store
-def : Pat<(truncstorei8 (i32 GPR:$dst), xaddr:$addr), (SB GPR:$dst, xaddr:$addr)>;
+def : Pat<(truncstorei8 (i32 GPR:$dst), xaddr:$ad), (SB GPR:$dst, xaddr:$ad)>;
def : Pat<(zextloadi8 xaddr:$addr), (i32 (LBU xaddr:$addr))>;
// Peepholes
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
index ea81dd6..91aaf94 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeIntrinsicInfo.cpp - Intrinsic Information -00-------*- C++ -*-===//
+//===-- MBlazeIntrinsicInfo.cpp - Intrinsic Information -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,6 +18,7 @@
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstring>
using namespace llvm;
@@ -73,16 +74,13 @@ lookupGCCName(const char *Name) const {
}
bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const {
- // Overload Table
- const bool OTable[] = {
+ if (IntrID == 0)
+ return false;
+
+ unsigned id = IntrID - Intrinsic::num_intrinsics + 1;
#define GET_INTRINSIC_OVERLOAD_TABLE
#include "MBlazeGenIntrinsics.inc"
#undef GET_INTRINSIC_OVERLOAD_TABLE
- };
- if (IntrID == 0)
- return false;
- else
- return OTable[IntrID - Intrinsic::num_intrinsics];
}
/// This defines the "getAttributes(ID id)" method.
@@ -92,7 +90,7 @@ bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const {
static FunctionType *getType(LLVMContext &Context, unsigned id) {
Type *ResultTy = NULL;
- std::vector<Type*> ArgTys;
+ SmallVector<Type*, 8> ArgTys;
bool IsVarArg = false;
#define GET_INTRINSIC_GENERATOR
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
index 80760d8..34f3792 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsicInfo.h
@@ -1,4 +1,4 @@
-//===- MBlazeIntrinsicInfo.h - MBlaze Intrinsic Information -----*- C++ -*-===//
+//===-- MBlazeIntrinsicInfo.h - MBlaze Intrinsic Information ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
index 278afbe..b5dc595 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeIntrinsics.td
@@ -1,4 +1,4 @@
-//===- IntrinsicsMBlaze.td - Defines MBlaze intrinsics -----*- tablegen -*-===//
+//===-- IntrinsicsMBlaze.td - Defines MBlaze intrinsics ----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp
index a7e400b..6b9f42e 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.cpp
@@ -1,4 +1,4 @@
-//===-- MBLazeMCInstLower.cpp - Convert MBlaze MachineInstr to an MCInst---===//
+//===-- MBlazeMCInstLower.cpp - Convert MBlaze MachineInstr to an MCInst---===//
//
// The LLVM Compiler Infrastructure
//
@@ -85,9 +85,7 @@ GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
MCSymbol *MBlazeMCInstLower::
GetBlockAddressSymbol(const MachineOperand &MO) const {
switch (MO.getTargetFlags()) {
- default:
- assert(0 && "Unknown target flag on GV operand");
-
+ default: llvm_unreachable("Unknown target flag on GV operand");
case 0: break;
}
@@ -150,7 +148,7 @@ void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
case MachineOperand::MO_BlockAddress:
MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
break;
- case MachineOperand::MO_FPImmediate:
+ case MachineOperand::MO_FPImmediate: {
bool ignored;
APFloat FVal = MO.getFPImm()->getValueAPF();
FVal.convert(APFloat::IEEEsingle, APFloat::rmTowardZero, &ignored);
@@ -160,6 +158,9 @@ void MBlazeMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
MCOp = MCOperand::CreateImm(Val);
break;
}
+ case MachineOperand::MO_RegisterMask:
+ continue;
+ }
OutMI.addOperand(MCOp);
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
index 92196f2..7b97744 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCInstLower.h
@@ -1,4 +1,4 @@
-//===-- MBlazeMCInstLower.h - Lower MachineInstr to MCInst ----------------===//
+//===-- MBlazeMCInstLower.h - Lower MachineInstr to MCInst ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,6 @@
namespace llvm {
class AsmPrinter;
- class MCAsmInfo;
class MCContext;
class MCInst;
class MCOperand;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.cpp
new file mode 100644
index 0000000..2217b54
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.cpp
@@ -0,0 +1,14 @@
+//===-- MBlazeMachineFunctionInfo.cpp - Private data ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MBlazeMachineFunction.h"
+
+using namespace llvm;
+
+void MBlazeFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
index df39509..95cc507 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMachineFunction.h
@@ -1,4 +1,4 @@
-//===-- MBlazeMachineFunctionInfo.h - Private data ----------------*- C++ -*-=//
+//===-- MBlazeMachineFunctionInfo.h - Private data --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,7 +16,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -25,8 +24,8 @@ namespace llvm {
/// MBlazeFunctionInfo - This class is derived from MachineFunction private
/// MBlaze target-specific information for each MachineFunction.
class MBlazeFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
-private:
/// Holds for each function where on the stack the Frame Pointer must be
/// saved. This is used on Prologue and Epilogue to emit FP save/restore
int FPStackOffset;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
index 9788ba9..46f5207 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeRegisterInfo.cpp - MBlaze Register Information -== -*- C++ -*-===//
+//===-- MBlazeRegisterInfo.cpp - MBlaze Register Information --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,9 +14,9 @@
#define DEBUG_TYPE "mblaze-frame-info"
+#include "MBlazeRegisterInfo.h"
#include "MBlaze.h"
#include "MBlazeSubtarget.h"
-#include "MBlazeRegisterInfo.h"
#include "MBlazeMachineFunction.h"
#include "llvm/Constants.h"
#include "llvm/Type.h"
@@ -54,10 +54,10 @@ unsigned MBlazeRegisterInfo::getPICCallReg() {
//===----------------------------------------------------------------------===//
/// MBlaze Callee Saved Registers
-const unsigned* MBlazeRegisterInfo::
+const uint16_t* MBlazeRegisterInfo::
getCalleeSavedRegs(const MachineFunction *MF) const {
// MBlaze callee-save register range is R20 - R31
- static const unsigned CalleeSavedRegs[] = {
+ static const uint16_t CalleeSavedRegs[] = {
MBlaze::R20, MBlaze::R21, MBlaze::R22, MBlaze::R23,
MBlaze::R24, MBlaze::R25, MBlaze::R26, MBlaze::R27,
MBlaze::R28, MBlaze::R29, MBlaze::R30, MBlaze::R31,
@@ -184,10 +184,8 @@ unsigned MBlazeRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
unsigned MBlazeRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
- return 0;
}
unsigned MBlazeRegisterInfo::getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
- return 0;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
index 7e4b269..1d51162 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- MBlazeRegisterInfo.h - MBlaze Register Information Impl --*- C++ -*-===//
+//===-- MBlazeRegisterInfo.h - MBlaze Register Information Impl -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -46,7 +46,7 @@ struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
static unsigned getPICCallReg();
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
index 13c46ba..64cae5c 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.td
@@ -1,4 +1,4 @@
-//===- MBlazeRegisterInfo.td - MBlaze Register defs --------*- tablegen -*-===//
+//===-- MBlazeRegisterInfo.td - MBlaze Register defs -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h b/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h
index c298eda..6387ee2 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRelocations.h
@@ -1,4 +1,4 @@
-//===- MBlazeRelocations.h - MBlaze Code Relocations ------------*- C++ -*-===//
+//===-- MBlazeRelocations.h - MBlaze Code Relocations -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
index 4662f25..4a3ae5f 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule.td
@@ -1,4 +1,4 @@
-//===- MBlazeSchedule.td - MBlaze Scheduling Definitions ---*- tablegen -*-===//
+//===-- MBlazeSchedule.td - MBlaze Scheduling Definitions --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule3.td b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule3.td
index ccbf99d..20257a6 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule3.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule3.td
@@ -1,4 +1,4 @@
-//===- MBlazeSchedule3.td - MBlaze Scheduling Definitions --*- tablegen -*-===//
+//===-- MBlazeSchedule3.td - MBlaze Scheduling Definitions -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule5.td b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule5.td
index fa88766..ab53b42 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule5.td
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSchedule5.td
@@ -1,4 +1,4 @@
-//===- MBlazeSchedule5.td - MBlaze Scheduling Definitions --*- tablegen -*-===//
+//===-- MBlazeSchedule5.td - MBlaze Scheduling Definitions -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
index 7e5667f..d12d142 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- MBlazeSubtarget.cpp - MBlaze Subtarget Information -------*- C++ -*-===//
+//===-- MBlazeSubtarget.cpp - MBlaze Subtarget Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.h b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.h
index 43b0197..eb37504 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeSubtarget.h
@@ -1,4 +1,4 @@
-//=====-- MBlazeSubtarget.h - Define Subtarget for the MBlaze -*- C++ -*--====//
+//===-- MBlazeSubtarget.h - Define Subtarget for the MBlaze ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
index 7bff53e..dd7de9b 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MBlaze.h"
#include "MBlazeTargetMachine.h"
+#include "MBlaze.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/FormattedStream.h"
@@ -33,30 +33,49 @@ extern "C" void LLVMInitializeMBlazeTarget() {
// an easier handling.
MBlazeTargetMachine::
MBlazeTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM):
- LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- Subtarget(TT, CPU, FS),
- DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"),
- InstrInfo(*this),
- FrameLowering(Subtarget),
- TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS),
+ DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"),
+ InstrInfo(*this),
+ FrameLowering(Subtarget),
+ TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this),
+ InstrItins(Subtarget.getInstrItineraryData()) {
+}
+
+namespace {
+/// MBlaze Code Generator Pass Configuration Options.
+class MBlazePassConfig : public TargetPassConfig {
+public:
+ MBlazePassConfig(MBlazeTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ MBlazeTargetMachine &getMBlazeTargetMachine() const {
+ return getTM<MBlazeTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new MBlazePassConfig(this, PM);
}
// Install an instruction selector pass using
// the ISelDag to gen MBlaze code.
-bool MBlazeTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createMBlazeISelDag(*this));
+bool MBlazePassConfig::addInstSelector() {
+ PM.add(createMBlazeISelDag(getMBlazeTargetMachine()));
return false;
}
// Implemented by targets that want to run passes immediately before
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
-bool MBlazeTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createMBlazeDelaySlotFillerPass(*this));
+bool MBlazePassConfig::addPreEmitPass() {
+ PM.add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
return true;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
index c1bc08a..1647a21 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeTargetMachine.h
@@ -1,4 +1,4 @@
-//===-- MBlazeTargetMachine.h - Define TargetMachine for MBlaze --- C++ ---===//
+//===-- MBlazeTargetMachine.h - Define TargetMachine for MBlaze -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -43,7 +43,9 @@ namespace llvm {
public:
MBlazeTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const MBlazeInstrInfo *getInstrInfo() const
{ return &InstrInfo; }
@@ -77,8 +79,7 @@ namespace llvm {
}
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level Opt);
- virtual bool addPreEmitPass(PassManagerBase &PM,CodeGenOpt::Level Opt);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp
index 08f7d46a..f383fec 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp
@@ -27,7 +27,7 @@ using namespace llvm;
static unsigned getFixupKindSize(unsigned Kind) {
switch (Kind) {
- default: assert(0 && "invalid fixup kind!");
+ default: llvm_unreachable("invalid fixup kind!");
case FK_Data_1: return 1;
case FK_PCRel_2:
case FK_Data_2: return 2;
@@ -39,12 +39,6 @@ static unsigned getFixupKindSize(unsigned Kind) {
namespace {
-class MBlazeELFObjectWriter : public MCELFObjectTargetWriter {
-public:
- MBlazeELFObjectWriter(Triple::OSType OSType)
- : MCELFObjectTargetWriter(/*is64Bit*/ false, OSType, ELF::EM_MBLAZE,
- /*HasRelocationAddend*/ true) {}
-};
class MBlazeAsmBackend : public MCAsmBackend {
public:
@@ -56,11 +50,16 @@ public:
return 2;
}
- bool MayNeedRelaxation(const MCInst &Inst) const;
+ bool mayNeedRelaxation(const MCInst &Inst) const;
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const;
- void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
- bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
unsigned getPointerSize() const {
return 4;
@@ -76,7 +75,7 @@ static unsigned getRelaxedOpcode(unsigned Op) {
}
}
-bool MBlazeAsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+bool MBlazeAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
return false;
@@ -87,12 +86,24 @@ bool MBlazeAsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
return hasExprOrImm;
}
-void MBlazeAsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+bool MBlazeAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // FIXME: Is this right? It's what the "generic" code was doing before,
+ // but is X86 specific. Is it actually true for MBlaze also, or was it
+ // just close enough to not be a big deal?
+ //
+ // Relax if the value is too big for a (signed) i8.
+ return int64_t(Value) != int64_t(int8_t(Value));
+}
+
+void MBlazeAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
Res = Inst;
Res.setOpcode(getRelaxedOpcode(Inst.getOpcode()));
}
-bool MBlazeAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+bool MBlazeAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
if ((Count % 4) != 0)
return false;
@@ -106,20 +117,19 @@ bool MBlazeAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
namespace {
class ELFMBlazeAsmBackend : public MBlazeAsmBackend {
public:
- Triple::OSType OSType;
- ELFMBlazeAsmBackend(const Target &T, Triple::OSType _OSType)
- : MBlazeAsmBackend(T), OSType(_OSType) { }
+ uint8_t OSABI;
+ ELFMBlazeAsmBackend(const Target &T, uint8_t _OSABI)
+ : MBlazeAsmBackend(T), OSABI(_OSABI) { }
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const;
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(new MBlazeELFObjectWriter(OSType), OS,
- /*IsLittleEndian*/ false);
+ return createMBlazeELFObjectWriter(OS, OSABI);
}
};
-void ELFMBlazeAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data,
+void ELFMBlazeAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned DataSize, uint64_t Value) const {
unsigned Size = getFixupKindSize(Fixup.getKind());
@@ -155,5 +165,6 @@ MCAsmBackend *llvm::createMBlazeAsmBackend(const Target &T, StringRef TT) {
if (TheTriple.isOSWindows())
assert(0 && "Windows not supported on MBlaze");
- return new ELFMBlazeAsmBackend(T, TheTriple.getOS());
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ return new ELFMBlazeAsmBackend(T, OSABI);
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h
index 776dbc4..437026e 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeBaseInfo.h
@@ -51,6 +51,7 @@ namespace MBlazeII {
FRRRR,
FRI,
FC,
+ FRR,
FormMask = 63
//===------------------------------------------------------------------===//
@@ -95,7 +96,6 @@ static inline bool isSpecialMBlazeRegister(unsigned Reg) {
default:
return false;
}
- return false; // Not reached
}
/// getMBlazeRegisterNumbering - Given the enum value for some register, e.g.
@@ -160,7 +160,6 @@ static inline unsigned getMBlazeRegisterNumbering(unsigned RegEnum) {
case MBlaze::RPVR11 : return 0x200B;
default: llvm_unreachable("Unknown register number!");
}
- return 0; // Not reached
}
/// getRegisterFromNumbering - Given the enum value for some register, e.g.
@@ -201,7 +200,6 @@ static inline unsigned getMBlazeRegisterFromNumbering(unsigned Reg) {
case 31 : return MBlaze::R31;
default: llvm_unreachable("Unknown register number!");
}
- return 0; // Not reached
}
static inline unsigned getSpecialMBlazeRegisterFromNumbering(unsigned Reg) {
@@ -232,7 +230,6 @@ static inline unsigned getSpecialMBlazeRegisterFromNumbering(unsigned Reg) {
case 0x200B : return MBlaze::RPVR11;
default: llvm_unreachable("Unknown register number!");
}
- return 0; // Not reached
}
} // end namespace llvm;
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp
new file mode 100644
index 0000000..2824b3c
--- /dev/null
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeELFObjectWriter.cpp
@@ -0,0 +1,77 @@
+//===-- MBlazeELFObjectWriter.cpp - MBlaze ELF Writer ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MBlazeMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+ class MBlazeELFObjectWriter : public MCELFObjectTargetWriter {
+ public:
+ MBlazeELFObjectWriter(uint8_t OSABI);
+
+ virtual ~MBlazeELFObjectWriter();
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const;
+ };
+}
+
+MBlazeELFObjectWriter::MBlazeELFObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_MBLAZE,
+ /*HasRelocationAddend*/ false) {}
+
+MBlazeELFObjectWriter::~MBlazeELFObjectWriter() {
+}
+
+unsigned MBlazeELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ // determine the type of the relocation
+ unsigned Type;
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case FK_PCRel_4:
+ Type = ELF::R_MICROBLAZE_64_PCREL;
+ break;
+ case FK_PCRel_2:
+ Type = ELF::R_MICROBLAZE_32_PCREL;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ Type = ((IsRelocWithSymbol || Addend !=0)
+ ? ELF::R_MICROBLAZE_32
+ : ELF::R_MICROBLAZE_64);
+ break;
+ case FK_Data_2:
+ Type = ELF::R_MICROBLAZE_32;
+ break;
+ }
+ }
+ return Type;
+}
+
+
+
+MCObjectWriter *llvm::createMBlazeELFObjectWriter(raw_ostream &OS,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new MBlazeELFObjectWriter(OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/ false);
+}
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp
index 0d88466..8231f07 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.cpp
@@ -14,6 +14,8 @@
#include "MBlazeMCAsmInfo.h"
using namespace llvm;
+void MBlazeMCAsmInfo::anchor() { }
+
MBlazeMCAsmInfo::MBlazeMCAsmInfo() {
IsLittleEndian = false;
StackGrowsUp = false;
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h
index e68dd58..977f9a6 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- MBlazeMCAsmInfo.h - MBlaze asm properties -----------*- C++ -*--====//
+//===-- MBlazeMCAsmInfo.h - MBlaze asm properties --------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,13 @@
#ifndef MBLAZETARGETASMINFO_H
#define MBLAZETARGETASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
class MBlazeMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
public:
explicit MBlazeMCAsmInfo();
};
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
index 1514557..c9b1636 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp
@@ -43,7 +43,7 @@ public:
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
- unsigned getBinaryCodeForInstr(const MCInst &MI) const;
+ uint64_t getBinaryCodeForInstr(const MCInst &MI) const;
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
@@ -54,8 +54,8 @@ public:
static unsigned GetMBlazeRegNum(const MCOperand &MO) {
// FIXME: getMBlazeRegisterNumbering() is sufficient?
- assert(0 && "MBlazeMCCodeEmitter::GetMBlazeRegNum() not yet implemented.");
- return 0;
+ llvm_unreachable("MBlazeMCCodeEmitter::GetMBlazeRegNum() not yet "
+ "implemented.");
}
void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
@@ -109,17 +109,14 @@ unsigned MBlazeMCCodeEmitter::getMachineOpValue(const MCInst &MI,
const MCOperand &MO) const {
if (MO.isReg())
return getMBlazeRegisterNumbering(MO.getReg());
- else if (MO.isImm())
+ if (MO.isImm())
return static_cast<unsigned>(MO.getImm());
- else if (MO.isExpr())
- return 0; // The relocation has already been recorded at this point.
- else {
+ if (MO.isExpr())
+ return 0; // The relocation has already been recorded at this point.
#ifndef NDEBUG
- errs() << MO;
+ errs() << MO;
#endif
- llvm_unreachable(0);
- }
- return 0;
+ llvm_unreachable(0);
}
void MBlazeMCCodeEmitter::
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
index 43ae281..9a7549b 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- MBlazeMCTargetDesc.cpp - MBlaze Target Descriptions -----*- C++ -*-===//
+//===-- MBlazeMCTargetDesc.cpp - MBlaze Target Descriptions ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -62,13 +62,14 @@ static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createMBlazeMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
if (RM == Reloc::Default)
RM = Reloc::Static;
if (CM == CodeModel::Default)
CM = CodeModel::Small;
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
@@ -82,12 +83,10 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
if (TheTriple.isOSDarwin()) {
llvm_unreachable("MBlaze does not support Darwin MACH-O format");
- return NULL;
}
if (TheTriple.isOSWindows()) {
llvm_unreachable("MBlaze does not support Windows COFF format");
- return NULL;
}
return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
@@ -96,9 +95,11 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
static MCInstPrinter *createMBlazeMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
- return new MBlazeInstPrinter(MAI);
+ return new MBlazeInstPrinter(MAI, MII, MRI);
return 0;
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
index deff5cb..ae82c32 100644
--- a/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h
@@ -14,24 +14,28 @@
#ifndef MBLAZEMCTARGETDESC_H
#define MBLAZEMCTARGETDESC_H
+#include "llvm/Support/DataTypes.h"
+
namespace llvm {
class MCAsmBackend;
class MCContext;
class MCCodeEmitter;
class MCInstrInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class Target;
class StringRef;
-class formatted_raw_ostream;
+class raw_ostream;
extern Target TheMBlazeTarget;
MCCodeEmitter *createMBlazeMCCodeEmitter(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-
+
MCAsmBackend *createMBlazeAsmBackend(const Target &T, StringRef TT);
+MCObjectWriter *createMBlazeELFObjectWriter(raw_ostream &OS, uint8_t OSABI);
} // End llvm namespace
// Defines symbolic names for MBlaze registers. This defines a mapping from
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
index 5d6c6ad..0930c45 100644
--- a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.cpp
@@ -92,7 +92,6 @@ void MSP430InstPrinter::printCCOperand(const MCInst *MI, unsigned OpNo,
switch (CC) {
default:
llvm_unreachable("Unsupported CC code");
- break;
case MSP430CC::COND_E:
O << "eq";
break;
diff --git a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
index a1984a8..d32eb3a 100644
--- a/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
+++ b/contrib/llvm/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
@@ -1,4 +1,4 @@
-//===-- MSP430InstPrinter.h - Convert MSP430 MCInst to assembly syntax ----===//
+//= MSP430InstPrinter.h - Convert MSP430 MCInst to assembly syntax -*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -21,8 +21,9 @@ namespace llvm {
class MSP430InstPrinter : public MCInstPrinter {
public:
- MSP430InstPrinter(const MCAsmInfo &MAI)
- : MCInstPrinter(MAI) {}
+ MSP430InstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index ad7d380..2e328cb 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -12,8 +12,11 @@
//===----------------------------------------------------------------------===//
#include "MSP430MCAsmInfo.h"
+#include "llvm/ADT/StringRef.h"
using namespace llvm;
+void MSP430MCAsmInfo::anchor() { }
+
MSP430MCAsmInfo::MSP430MCAsmInfo(const Target &T, StringRef TT) {
PointerSize = 2;
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
index f3138a2..e5c2fc2 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- MSP430MCAsmInfo.h - MSP430 asm properties -----------*- C++ -*--====//
+//===-- MSP430MCAsmInfo.h - MSP430 asm properties --------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,15 @@
#ifndef MSP430TARGETASMINFO_H
#define MSP430TARGETASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
+ class StringRef;
class Target;
- struct MSP430MCAsmInfo : public MCAsmInfo {
+ class MSP430MCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit MSP430MCAsmInfo(const Target &T, StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
index fda70b8..c455f6b 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- MSP430MCTargetDesc.cpp - MSP430 Target Descriptions -----*- C++ -*-===//
+//===-- MSP430MCTargetDesc.cpp - MSP430 Target Descriptions ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -51,18 +51,21 @@ static MCSubtargetInfo *createMSP430MCSubtargetInfo(StringRef TT, StringRef CPU,
}
static MCCodeGenInfo *createMSP430MCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
static MCInstPrinter *createMSP430MCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
- return new MSP430InstPrinter(MAI);
+ return new MSP430InstPrinter(MAI, MII, MRI);
return 0;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.h b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.h
index 35f2590..7f3505c 100644
--- a/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.h
@@ -15,9 +15,7 @@
#define MSP430MCTARGETDESC_H
namespace llvm {
-class MCSubtargetInfo;
class Target;
-class StringRef;
extern Target TheMSP430Target;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430.td b/contrib/llvm/lib/Target/MSP430/MSP430.td
index 5cc5e6e..c6796b3 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430.td
@@ -1,4 +1,4 @@
-//===- MSP430.td - Describe the MSP430 Target Machine ---------*- tblgen -*-==//
+//===-- MSP430.td - Describe the MSP430 Target Machine -----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
index 8836549..1d1094b 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430AsmPrinter.cpp
@@ -65,7 +65,7 @@ void MSP430AsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
raw_ostream &O, const char *Modifier) {
const MachineOperand &MO = MI->getOperand(OpNum);
switch (MO.getType()) {
- default: assert(0 && "Not implemented yet!");
+ default: llvm_unreachable("Not implemented yet!");
case MachineOperand::MO_Register:
O << MSP430InstPrinter::getRegisterName(MO.getReg());
return;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp b/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
index bd64443..bdeb0c5 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -1,4 +1,4 @@
-//===-- MSP430BranchSelector.cpp - Emit long conditional branches--*- C++ -*-=//
+//===-- MSP430BranchSelector.cpp - Emit long conditional branches ---------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index c99f4ab..61d7f2b 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -1,4 +1,4 @@
-//======-- MSP430FrameLowering.cpp - MSP430 Frame Information -------=========//
+//===-- MSP430FrameLowering.cpp - MSP430 Frame Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,7 +29,7 @@ using namespace llvm;
bool MSP430FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return (DisableFramePointerElim(MF) ||
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
MF.getFrameInfo()->hasVarSizedObjects() ||
MFI->isFrameAddressTaken());
}
@@ -140,7 +140,7 @@ void MSP430FrameLowering::emitEpilogue(MachineFunction &MF,
while (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PI = prior(MBBI);
unsigned Opc = PI->getOpcode();
- if (Opc != MSP430::POP16r && !PI->getDesc().isTerminator())
+ if (Opc != MSP430::POP16r && !PI->isTerminator())
break;
--MBBI;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index dc37431..071a2f7 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -29,7 +29,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -37,7 +36,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/VectorExtras.h"
using namespace llvm;
typedef enum {
@@ -80,7 +78,6 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
setStackPointerRegisterToSaveRestore(MSP430::SPW);
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
- setSchedulingPreference(Sched::Latency);
// We have post-incremented loads / stores.
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
@@ -124,8 +121,12 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
setOperationAction(ISD::CTTZ, MVT::i8, Expand);
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i8, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
setOperationAction(ISD::CTLZ, MVT::i8, Expand);
setOperationAction(ISD::CTLZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Expand);
setOperationAction(ISD::CTPOP, MVT::i8, Expand);
setOperationAction(ISD::CTPOP, MVT::i16, Expand);
@@ -193,7 +194,6 @@ SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
- return SDValue();
}
}
@@ -259,19 +259,16 @@ MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
case CallingConv::Fast:
return LowerCCCArguments(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals);
case CallingConv::MSP430_INTR:
- if (Ins.empty())
- return Chain;
- else {
+ if (Ins.empty())
+ return Chain;
report_fatal_error("ISRs cannot have arguments");
- return SDValue();
- }
}
}
SDValue
MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -289,7 +286,6 @@ MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Outs, OutVals, Ins, dl, DAG, InVals);
case CallingConv::MSP430_INTR:
report_fatal_error("ISRs cannot be called directly");
- return SDValue();
}
}
@@ -372,7 +368,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0));
+ false, false, false, 0));
}
}
@@ -390,10 +386,8 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
SmallVector<CCValAssign, 16> RVLocs;
// ISRs cannot return any value.
- if (CallConv == CallingConv::MSP430_INTR && !Outs.empty()) {
+ if (CallConv == CallingConv::MSP430_INTR && !Outs.empty())
report_fatal_error("ISRs cannot return any value");
- return SDValue();
- }
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
@@ -599,8 +593,7 @@ SDValue MSP430TargetLowering::LowerShifts(SDValue Op,
// Expand non-constant shifts to loops:
if (!isa<ConstantSDNode>(N->getOperand(1)))
switch (Opc) {
- default:
- assert(0 && "Invalid shift opcode!");
+ default: llvm_unreachable("Invalid shift opcode!");
case ISD::SHL:
return DAG.getNode(MSP430ISD::SHL, dl,
VT, N->getOperand(0), N->getOperand(1));
@@ -651,7 +644,7 @@ SDValue MSP430TargetLowering::LowerExternalSymbol(SDValue Op,
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
- return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result);;
+ return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result);
}
SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
@@ -660,7 +653,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op,
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
- return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result);;
+ return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result);
}
static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
@@ -908,13 +901,13 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, MachinePointerInfo(), false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, false, 0);
}
SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -930,7 +923,7 @@ SDValue MSP430TargetLowering::LowerFRAMEADDR(SDValue Op,
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return FrameAddr;
}
@@ -1028,8 +1021,7 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
unsigned Opc;
const TargetRegisterClass * RC;
switch (MI->getOpcode()) {
- default:
- assert(0 && "Invalid shift opcode!");
+ default: llvm_unreachable("Invalid shift opcode!");
case MSP430::Shl8:
Opc = MSP430::SHL8r1;
RC = MSP430::GR8RegisterClass;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
index 237f604..e372f00 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
@@ -1,4 +1,4 @@
-//==-- MSP430ISelLowering.h - MSP430 DAG Lowering Interface ------*- C++ -*-==//
+//===-- MSP430ISelLowering.h - MSP430 DAG Lowering Interface ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -152,8 +152,8 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrFormats.td b/contrib/llvm/lib/Target/MSP430/MSP430InstrFormats.td
index 73aef1f..a9e87da 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrFormats.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrFormats.td
@@ -1,4 +1,4 @@
-//===- MSP430InstrFormats.td - MSP430 Instruction Formats-----*- tblgen -*-===//
+//===-- MSP430InstrFormats.td - MSP430 Instruction Formats -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index ffd4318..c03ba47 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- MSP430InstrInfo.cpp - MSP430 Instruction Information ---------------===//
+//===-- MSP430InstrInfo.cpp - MSP430 Instruction Information --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,15 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "MSP430.h"
#include "MSP430InstrInfo.h"
+#include "MSP430.h"
#include "MSP430MachineFunctionInfo.h"
#include "MSP430TargetMachine.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -43,8 +42,7 @@ void MSP430InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
@@ -72,8 +70,7 @@ void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
@@ -133,9 +130,7 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
MSP430CC::CondCodes CC = static_cast<MSP430CC::CondCodes>(Cond[0].getImm());
switch (CC) {
- default:
- assert(0 && "Invalid branch condition!");
- break;
+ default: llvm_unreachable("Invalid branch condition!");
case MSP430CC::COND_E:
CC = MSP430CC::COND_NE;
break;
@@ -161,13 +156,12 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
}
bool MSP430InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isTerminator()) return false;
+ if (!MI->isTerminator()) return false;
// Conditional branch is a special case.
- if (MCID.isBranch() && !MCID.isBarrier())
+ if (MI->isBranch() && !MI->isBarrier())
return true;
- if (!MCID.isPredicable())
+ if (!MI->isPredicable())
return true;
return !isPredicated(MI);
}
@@ -192,7 +186,7 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// A terminator that isn't a branch can't easily be handled
// by this analysis.
- if (!I->getDesc().isBranch())
+ if (!I->isBranch())
return true;
// Cannot handle indirect branches.
@@ -301,8 +295,7 @@ unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
switch (Desc.TSFlags & MSP430II::SizeMask) {
default:
switch (Desc.getOpcode()) {
- default:
- assert(0 && "Unknown instruction size!");
+ default: llvm_unreachable("Unknown instruction size!");
case TargetOpcode::PROLOG_LABEL:
case TargetOpcode::EH_LABEL:
case TargetOpcode::IMPLICIT_DEF:
@@ -318,8 +311,7 @@ unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
}
case MSP430II::SizeSpecial:
switch (MI->getOpcode()) {
- default:
- assert(0 && "Unknown instruction size!");
+ default: llvm_unreachable("Unknown instruction size!");
case MSP430::SAR8r1c:
case MSP430::SAR16r1c:
return 4;
@@ -331,6 +323,4 @@ unsigned MSP430InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case MSP430II::Size6Bytes:
return 6;
}
-
- return 6;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 90013f5..04f339b 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -1,4 +1,4 @@
-//===- MSP430InstrInfo.h - MSP430 Instruction Information -------*- C++ -*-===//
+//===-- MSP430InstrInfo.h - MSP430 Instruction Information ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,8 +14,8 @@
#ifndef LLVM_TARGET_MSP430INSTRINFO_H
#define LLVM_TARGET_MSP430INSTRINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "MSP430RegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "MSP430GenInstrInfo.inc"
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
index 59cb598..4348dd5 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
@@ -1,4 +1,4 @@
-//===- MSP430InstrInfo.td - MSP430 Instruction defs -----------*- tblgen-*-===//
+//===-- MSP430InstrInfo.td - MSP430 Instruction defs -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
index d1d9a11..b1773fb 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp
@@ -1,4 +1,4 @@
-//===-- MSP430MCInstLower.cpp - Convert MSP430 MachineInstr to an MCInst---===//
+//===-- MSP430MCInstLower.cpp - Convert MSP430 MachineInstr to an MCInst --===//
//
// The LLVM Compiler Infrastructure
//
@@ -39,7 +39,7 @@ GetGlobalAddressSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetExternalSymbolSymbol(const MachineOperand &MO) const {
switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
+ default: llvm_unreachable("Unknown target flag on GV operand");
case 0: break;
}
@@ -81,7 +81,7 @@ GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
MCSymbol *MSP430MCInstLower::
GetBlockAddressSymbol(const MachineOperand &MO) const {
switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
+ default: llvm_unreachable("Unknown target flag on GV operand");
case 0: break;
}
@@ -116,7 +116,7 @@ void MSP430MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
switch (MO.getType()) {
default:
MI->dump();
- assert(0 && "unknown operand type");
+ llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
if (MO.isImplicit()) continue;
@@ -143,6 +143,9 @@ void MSP430MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
case MachineOperand::MO_BlockAddress:
MCOp = LowerSymbolOperand(MO, GetBlockAddressSymbol(MO));
+ break;
+ case MachineOperand::MO_RegisterMask:
+ continue;
}
OutMI.addOperand(MCOp);
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
index e937696..24151e2 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCInstLower.h
@@ -1,4 +1,4 @@
-//===-- MSP430MCInstLower.h - Lower MachineInstr to MCInst ----------------===//
+//===-- MSP430MCInstLower.h - Lower MachineInstr to MCInst ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,6 @@
namespace llvm {
class AsmPrinter;
- class MCAsmInfo;
class MCContext;
class MCInst;
class MCOperand;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp
new file mode 100644
index 0000000..0f75399
--- /dev/null
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- MSP430MachineFuctionInfo.cpp - MSP430 machine function info -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MSP430MachineFunctionInfo.h"
+
+using namespace llvm;
+
+void MSP430MachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.h
index 383fd2e..632d6de 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MachineFunctionInfo.h
@@ -21,6 +21,8 @@ namespace llvm {
/// MSP430MachineFunctionInfo - This class is derived from MachineFunction and
/// contains private MSP430 target-specific information for each MachineFunction.
class MSP430MachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
+
/// CalleeSavedFrameSize - Size of the callee-saved register portion of the
/// stack frame in bytes.
unsigned CalleeSavedFrameSize;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 9049c4b..51ec71a 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- MSP430RegisterInfo.cpp - MSP430 Register Information ---------------===//
+//===-- MSP430RegisterInfo.cpp - MSP430 Register Information --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,9 +13,9 @@
#define DEBUG_TYPE "msp430-reg-info"
+#include "MSP430RegisterInfo.h"
#include "MSP430.h"
#include "MSP430MachineFunctionInfo.h"
-#include "MSP430RegisterInfo.h"
#include "MSP430TargetMachine.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -38,27 +38,27 @@ MSP430RegisterInfo::MSP430RegisterInfo(MSP430TargetMachine &tm,
StackAlign = TM.getFrameLowering()->getStackAlignment();
}
-const unsigned*
+const uint16_t*
MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const TargetFrameLowering *TFI = MF->getTarget().getFrameLowering();
const Function* F = MF->getFunction();
- static const unsigned CalleeSavedRegs[] = {
+ static const uint16_t CalleeSavedRegs[] = {
MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
0
};
- static const unsigned CalleeSavedRegsFP[] = {
+ static const uint16_t CalleeSavedRegsFP[] = {
MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
0
};
- static const unsigned CalleeSavedRegsIntr[] = {
+ static const uint16_t CalleeSavedRegsIntr[] = {
MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W,
0
};
- static const unsigned CalleeSavedRegsIntrFP[] = {
+ static const uint16_t CalleeSavedRegsIntrFP[] = {
MSP430::R5W, MSP430::R6W, MSP430::R7W,
MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W,
MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index 10a3d53..82ee499 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -1,4 +1,4 @@
-//===- MSP430RegisterInfo.h - MSP430 Register Information Impl --*- C++ -*-===//
+//===-- MSP430RegisterInfo.h - MSP430 Register Information Impl -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -36,18 +36,11 @@ public:
MSP430RegisterInfo(MSP430TargetMachine &tm, const TargetInstrInfo &tii);
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
const TargetRegisterClass* getPointerRegClass(unsigned Kind = 0) const;
- const TargetRegisterClass *
- getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const {
- // No sub-classes makes this really easy.
- return A;
- }
-
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
index d1c2e3f..3f2eb8c 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.td
@@ -1,4 +1,4 @@
-//===- MSP430RegisterInfo.td - MSP430 Register defs ----------*- tblgen -*-===//
+//===-- MSP430RegisterInfo.td - MSP430 Register defs -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
index 3ee14d9..edeaf34 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.cpp
@@ -1,4 +1,4 @@
-//===- MSP430Subtarget.cpp - MSP430 Subtarget Information ---------*- C++ -*-=//
+//===-- MSP430Subtarget.cpp - MSP430 Subtarget Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,6 +21,8 @@
using namespace llvm;
+void MSP430Subtarget::anchor() { }
+
MSP430Subtarget::MSP430Subtarget(const std::string &TT,
const std::string &CPU,
const std::string &FS) :
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
index 1ce5f11..4d8792e 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430Subtarget.h
@@ -1,4 +1,4 @@
-//====-- MSP430Subtarget.h - Define Subtarget for the MSP430 ---*- C++ -*--===//
+//===-- MSP430Subtarget.h - Define Subtarget for the MSP430 ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,16 +15,16 @@
#define LLVM_TARGET_MSP430_SUBTARGET_H
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "MSP430GenSubtargetInfo.inc"
-#include <string>
-
namespace llvm {
class StringRef;
class MSP430Subtarget : public MSP430GenSubtargetInfo {
+ virtual void anchor();
bool ExtendedInsts;
public:
/// This constructor initializes the data members to match that
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
index 4dd8933..9f2eda1 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MSP430.h"
#include "MSP430TargetMachine.h"
+#include "MSP430.h"
#include "llvm/PassManager.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -28,24 +28,43 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T,
StringRef TT,
StringRef CPU,
StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
// FIXME: Check TargetData string.
DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
FrameLowering(Subtarget) { }
+namespace {
+/// MSP430 Code Generator Pass Configuration Options.
+class MSP430PassConfig : public TargetPassConfig {
+public:
+ MSP430PassConfig(MSP430TargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ MSP430TargetMachine &getMSP430TargetMachine() const {
+ return getTM<MSP430TargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *MSP430TargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new MSP430PassConfig(this, PM);
+}
-bool MSP430TargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool MSP430PassConfig::addInstSelector() {
// Install an instruction selector.
- PM.add(createMSP430ISelDag(*this, OptLevel));
+ PM.add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel()));
return false;
}
-bool MSP430TargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool MSP430PassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
PM.add(createMSP430BranchSelectionPass());
return false;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
index eb483dc..f54146b 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430TargetMachine.h
@@ -1,4 +1,4 @@
-//==-- MSP430TargetMachine.h - Define TargetMachine for MSP430 ---*- C++ -*-==//
+//===-- MSP430TargetMachine.h - Define TargetMachine for MSP430 -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -39,8 +39,9 @@ class MSP430TargetMachine : public LLVMTargetMachine {
public:
MSP430TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const TargetFrameLowering *getFrameLowering() const {
return &FrameLowering;
@@ -61,8 +62,7 @@ public:
return &TSInfo;
}
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
}; // MSP430TargetMachine.
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Mangler.cpp b/contrib/llvm/lib/Target/Mangler.cpp
index 53ad155f..786a0c5 100644
--- a/contrib/llvm/lib/Target/Mangler.cpp
+++ b/contrib/llvm/lib/Target/Mangler.cpp
@@ -22,12 +22,13 @@
#include "llvm/ADT/Twine.h"
using namespace llvm;
-static bool isAcceptableChar(char C, bool AllowPeriod) {
+static bool isAcceptableChar(char C, bool AllowPeriod, bool AllowUTF8) {
if ((C < 'a' || C > 'z') &&
(C < 'A' || C > 'Z') &&
(C < '0' || C > '9') &&
C != '_' && C != '$' && C != '@' &&
- !(AllowPeriod && C == '.'))
+ !(AllowPeriod && C == '.') &&
+ !(AllowUTF8 && (C & 0x80)))
return false;
return true;
}
@@ -56,8 +57,9 @@ static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) {
// If any of the characters in the string is an unacceptable character, force
// quotes.
bool AllowPeriod = MAI.doesAllowPeriodsInName();
+ bool AllowUTF8 = MAI.doesAllowUTF8();
for (unsigned i = 0, e = Str.size(); i != e; ++i)
- if (!isAcceptableChar(Str[i], AllowPeriod))
+ if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8))
return true;
return false;
}
@@ -74,8 +76,9 @@ static void appendMangledName(SmallVectorImpl<char> &OutName, StringRef Str,
}
bool AllowPeriod = MAI.doesAllowPeriodsInName();
+ bool AllowUTF8 = MAI.doesAllowUTF8();
for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- if (!isAcceptableChar(Str[i], AllowPeriod))
+ if (!isAcceptableChar(Str[i], AllowPeriod, AllowUTF8))
MangleLetter(OutName, Str[i]);
else
OutName.push_back(Str[i]);
diff --git a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
new file mode 100644
index 0000000..58b5590
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -0,0 +1,66 @@
+//===-- MipsAsmParser.cpp - Parse Mips assembly to MCInst instructions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+namespace {
+class MipsAsmParser : public MCTargetAsmParser {
+ bool MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out);
+
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
+
+ bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+
+ bool ParseDirective(AsmToken DirectiveID);
+
+public:
+ MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
+ : MCTargetAsmParser() {
+ }
+
+};
+}
+
+bool MipsAsmParser::
+MatchAndEmitInstruction(SMLoc IDLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out) {
+ return true;
+}
+
+bool MipsAsmParser::
+ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
+ return true;
+}
+
+bool MipsAsmParser::
+ParseInstruction(StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return true;
+}
+
+bool MipsAsmParser::
+ParseDirective(AsmToken DirectiveID) {
+ return true;
+}
+
+extern "C" void LLVMInitializeMipsAsmParser() {
+ RegisterMCAsmParser<MipsAsmParser> X(TheMipsTarget);
+ RegisterMCAsmParser<MipsAsmParser> Y(TheMipselTarget);
+ RegisterMCAsmParser<MipsAsmParser> A(TheMips64Target);
+ RegisterMCAsmParser<MipsAsmParser> B(TheMips64elTarget);
+}
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index 3dafc61..6886b17 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -13,14 +13,15 @@
#define DEBUG_TYPE "asm-printer"
#include "MipsInstPrinter.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/StringExtras.h"
using namespace llvm;
-#define GET_INSTRUCTION_NAME
#include "MipsGenAsmWriter.inc"
const char* Mips::MipsFCCToString(Mips::CondCode CC) {
@@ -61,12 +62,8 @@ const char* Mips::MipsFCCToString(Mips::CondCode CC) {
llvm_unreachable("Impossible condition code!");
}
-StringRef MipsInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
void MipsInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- OS << '$' << LowercaseString(getRegisterName(RegNo));
+ OS << '$' << StringRef(getRegisterName(RegNo)).lower();
}
void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
@@ -75,6 +72,59 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
printAnnotation(O, Annot);
}
+static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
+ int Offset = 0;
+ const MCSymbolRefExpr *SRE;
+
+ if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) {
+ SRE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
+ assert(SRE && CE && "Binary expression must be sym+const.");
+ Offset = CE->getValue();
+ }
+ else if (!(SRE = dyn_cast<MCSymbolRefExpr>(Expr)))
+ assert(false && "Unexpected MCExpr type.");
+
+ MCSymbolRefExpr::VariantKind Kind = SRE->getKind();
+
+ switch (Kind) {
+ default: llvm_unreachable("Invalid kind!");
+ case MCSymbolRefExpr::VK_None: break;
+ case MCSymbolRefExpr::VK_Mips_GPREL: OS << "%gp_rel("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT_CALL: OS << "%call16("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT16: OS << "%got("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT: OS << "%got("; break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI: OS << "%hi("; break;
+ case MCSymbolRefExpr::VK_Mips_ABS_LO: OS << "%lo("; break;
+ case MCSymbolRefExpr::VK_Mips_TLSGD: OS << "%tlsgd("; break;
+ case MCSymbolRefExpr::VK_Mips_TLSLDM: OS << "%tlsldm("; break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_HI: OS << "%dtprel_hi("; break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_LO: OS << "%dtprel_lo("; break;
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL: OS << "%gottprel("; break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI: OS << "%tprel_hi("; break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO: OS << "%tprel_lo("; break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_HI: OS << "%hi(%neg(%gp_rel("; break;
+ case MCSymbolRefExpr::VK_Mips_GPOFF_LO: OS << "%lo(%neg(%gp_rel("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT_DISP: OS << "%got_disp("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT_PAGE: OS << "%got_page("; break;
+ case MCSymbolRefExpr::VK_Mips_GOT_OFST: OS << "%got_ofst("; break;
+ }
+
+ OS << SRE->getSymbol();
+
+ if (Offset) {
+ if (Offset > 0)
+ OS << '+';
+ OS << Offset;
+ }
+
+ if ((Kind == MCSymbolRefExpr::VK_Mips_GPOFF_HI) ||
+ (Kind == MCSymbolRefExpr::VK_Mips_GPOFF_LO))
+ OS << ")))";
+ else if (Kind != MCSymbolRefExpr::VK_None)
+ OS << ')';
+}
+
void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -82,14 +132,14 @@ void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
printRegName(O, Op.getReg());
return;
}
-
+
if (Op.isImm()) {
O << Op.getImm();
return;
}
-
+
assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << *Op.getExpr();
+ printExpr(Op.getExpr(), O);
}
void MipsInstPrinter::printUnsignedImm(const MCInst *MI, int opNum,
diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index 5c11165..76b839b 100644
--- a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- MipsInstPrinter.h - Convert Mips MCInst to assembly syntax --------===//
+//=== MipsInstPrinter.h - Convert Mips MCInst to assembly syntax -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -18,7 +18,7 @@
namespace llvm {
// These enumeration declarations were orignally in MipsInstrInfo.h but
// had to be moved here to avoid circular dependencies between
-// LLVMMipsCodeGen and LLVMMipsAsmPrinter.
+// LLVMMipsCodeGen and LLVMMipsAsmPrinter.
namespace Mips {
// Mips Branch Codes
enum FPBranchCode {
@@ -77,17 +77,17 @@ class TargetMachine;
class MipsInstPrinter : public MCInstPrinter {
public:
- MipsInstPrinter(const MCAsmInfo &MAI) : MCInstPrinter(MAI) {}
-
+ MipsInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
- static const char *getInstructionName(unsigned Opcode);
static const char *getRegisterName(unsigned RegNo);
-
- virtual StringRef getOpcodeName(unsigned Opcode) const;
+
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
-
+
private:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index f190ec4..e79be33 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -1,44 +1,172 @@
+//===-- MipsASMBackend.cpp - Mips Asm Backend ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MipsAsmBackend and MipsELFObjectWriter classes.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include "MipsBaseInfo.h"
+#include "MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Object/MachOFormat.h"
-#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+
using namespace llvm;
-namespace {
-class MipsELFObjectWriter : public MCELFObjectTargetWriter {
-public:
- MipsELFObjectWriter(bool is64Bit, Triple::OSType OSType, uint16_t EMachine,
- bool HasRelocationAddend)
- : MCELFObjectTargetWriter(is64Bit, OSType, EMachine,
- HasRelocationAddend) {}
-};
+// Prepare value for the target space for it
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+
+ // Add/subtract and shift
+ switch (Kind) {
+ default:
+ return 0;
+ case FK_GPRel_4:
+ case FK_Data_4:
+ case Mips::fixup_Mips_LO16:
+ break;
+ case Mips::fixup_Mips_PC16:
+ // So far we are only using this type for branches.
+ // For branches we start 1 instruction after the branch
+ // so the displacement will be one instruction size less.
+ Value -= 4;
+ // The displacement is then divided by 4 to give us an 18 bit
+ // address range.
+ Value >>= 2;
+ break;
+ case Mips::fixup_Mips_26:
+ // So far we are only using this type for jumps.
+ // The displacement is then divided by 4 to give us an 28 bit
+ // address range.
+ Value >>= 2;
+ break;
+ case Mips::fixup_Mips_HI16:
+ case Mips::fixup_Mips_GOT_Local:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ Value = ((Value + 0x8000) >> 16) & 0xffff;
+ break;
+ }
+
+ return Value;
+}
+namespace {
class MipsAsmBackend : public MCAsmBackend {
+ Triple::OSType OSType;
+ bool IsLittle; // Big or little endian
+ bool Is64Bit; // 32 or 64 bit words
+
public:
- MipsAsmBackend(const Target &T)
- : MCAsmBackend() {}
+ MipsAsmBackend(const Target &T, Triple::OSType _OSType,
+ bool _isLittle, bool _is64Bit)
+ :MCAsmBackend(), OSType(_OSType), IsLittle(_isLittle), Is64Bit(_is64Bit) {}
- unsigned getNumFixupKinds() const {
- return 1; //tbd
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return createMipsELFObjectWriter(OS, OSType, IsLittle, Is64Bit);
}
/// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
+ MCFixupKind Kind = Fixup.getKind();
+ Value = adjustFixupValue((unsigned)Kind, Value);
+ int64_t SymOffset = MipsGetSymAndOffset(Fixup).second;
+
+ if (!Value && !SymOffset)
+ return; // Doesn't change encoding.
+
+ // Where do we start in the object
+ unsigned Offset = Fixup.getOffset();
+ // Number of bytes we need to fixup
+ unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
+ // Used to point to big endian bytes
+ unsigned FullSize;
+
+ switch ((unsigned)Kind) {
+ case Mips::fixup_Mips_16:
+ FullSize = 2;
+ break;
+ case Mips::fixup_Mips_64:
+ FullSize = 8;
+ break;
+ default:
+ FullSize = 4;
+ break;
+ }
+
+ // Grab current value, if any, from bits.
+ uint64_t CurVal = 0;
+
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittle ? i : (FullSize - 1 - i);
+ CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8);
+ }
+
+ uint64_t Mask = ((uint64_t)(-1) >> (64 - getFixupKindInfo(Kind).TargetSize));
+ CurVal |= (Value + SymOffset) & Mask;
+
+ // Write out the fixed up bytes back to the code/data bits.
+ for (unsigned i = 0; i != NumBytes; ++i) {
+ unsigned Idx = IsLittle ? i : (FullSize - 1 - i);
+ Data[Offset + Idx] = (uint8_t)((CurVal >> (i*8)) & 0xff);
+ }
+ }
+
+ unsigned getNumFixupKinds() const { return Mips::NumTargetFixupKinds; }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] = {
+ // This table *must* be in same the order of fixup_* kinds in
+ // MipsFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_Mips_16", 0, 16, 0 },
+ { "fixup_Mips_32", 0, 32, 0 },
+ { "fixup_Mips_REL32", 0, 32, 0 },
+ { "fixup_Mips_26", 0, 26, 0 },
+ { "fixup_Mips_HI16", 0, 16, 0 },
+ { "fixup_Mips_LO16", 0, 16, 0 },
+ { "fixup_Mips_GPREL16", 0, 16, 0 },
+ { "fixup_Mips_LITERAL", 0, 16, 0 },
+ { "fixup_Mips_GOT_Global", 0, 16, 0 },
+ { "fixup_Mips_GOT_Local", 0, 16, 0 },
+ { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_Mips_CALL16", 0, 16, 0 },
+ { "fixup_Mips_GPREL32", 0, 32, 0 },
+ { "fixup_Mips_SHIFT5", 6, 5, 0 },
+ { "fixup_Mips_SHIFT6", 6, 5, 0 },
+ { "fixup_Mips_64", 0, 64, 0 },
+ { "fixup_Mips_TLSGD", 0, 16, 0 },
+ { "fixup_Mips_GOTTPREL", 0, 16, 0 },
+ { "fixup_Mips_TPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_TPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_TLSLDM", 0, 16, 0 },
+ { "fixup_Mips_DTPREL_HI", 0, 16, 0 },
+ { "fixup_Mips_DTPREL_LO", 0, 16, 0 },
+ { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
}
/// @name Target Relaxation Interfaces
@@ -48,70 +176,62 @@ public:
/// relaxation.
///
/// \param Inst - The instruction to test.
- bool MayNeedRelaxation(const MCInst &Inst) const {
+ bool mayNeedRelaxation(const MCInst &Inst) const {
return false;
}
- /// RelaxInstruction - Relax the instruction in the given fragment to the next
- /// wider instruction.
+ /// fixupNeedsRelaxation - Target specific predicate for whether a given
+ /// fixup requires the associated instruction to be relaxed.
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // FIXME.
+ assert(0 && "RelaxInstruction() unimplemented");
+ return false;
+ }
+
+ /// RelaxInstruction - Relax the instruction in the given fragment
+ /// to the next wider instruction.
///
- /// \param Inst - The instruction to relax, which may be the same as the
- /// output.
+ /// \param Inst - The instruction to relax, which may be the same
+ /// as the output.
/// \parm Res [output] - On return, the relaxed instruction.
- void RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
}
-
+
/// @}
- /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
- /// output. If the target cannot generate such a sequence, it should return an
- /// error.
+ /// WriteNopData - Write an (optimal) nop sequence of Count bytes
+ /// to the given output. If the target cannot generate such a sequence,
+ /// it should return an error.
///
/// \return - True on success.
- bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
- return false;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
+ return true;
}
-};
+}; // class MipsAsmBackend
-class MipsEB_AsmBackend : public MipsAsmBackend {
-public:
- Triple::OSType OSType;
-
- MipsEB_AsmBackend(const Target &T, Triple::OSType _OSType)
- : MipsAsmBackend(T), OSType(_OSType) {}
-
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(createELFObjectTargetWriter(),
- OS, /*IsLittleEndian*/ false);
- }
-
- MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
- return new MipsELFObjectWriter(false, OSType, ELF::EM_MIPS, false);
- }
-};
-
-class MipsEL_AsmBackend : public MipsAsmBackend {
-public:
- Triple::OSType OSType;
-
- MipsEL_AsmBackend(const Target &T, Triple::OSType _OSType)
- : MipsAsmBackend(T), OSType(_OSType) {}
+} // namespace
- MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(createELFObjectTargetWriter(),
- OS, /*IsLittleEndian*/ true);
- }
+// MCAsmBackend
+MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT) {
+ return new MipsAsmBackend(T, Triple(TT).getOS(),
+ /*IsLittle*/true, /*Is64Bit*/false);
+}
- MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
- return new MipsELFObjectWriter(false, OSType, ELF::EM_MIPS, false);
- }
-};
+MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT) {
+ return new MipsAsmBackend(T, Triple(TT).getOS(),
+ /*IsLittle*/false, /*Is64Bit*/false);
}
-MCAsmBackend *llvm::createMipsAsmBackend(const Target &T, StringRef TT) {
- Triple TheTriple(TT);
+MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT) {
+ return new MipsAsmBackend(T, Triple(TT).getOS(),
+ /*IsLittle*/true, /*Is64Bit*/true);
+}
- // just return little endian for now
- //
- return new MipsEL_AsmBackend(T, Triple(TT).getOS());
+MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT) {
+ return new MipsAsmBackend(T, Triple(TT).getOS(),
+ /*IsLittle*/false, /*Is64Bit*/true);
}
+
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index f7a6fa9..fb1c5ce 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -1,4 +1,4 @@
-//===-- MipsBaseInfo.h - Top level definitions for ARM ------- --*- C++ -*-===//
+//===-- MipsBaseInfo.h - Top level definitions for MIPS MC ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,11 +14,103 @@
#ifndef MIPSBASEINFO_H
#define MIPSBASEINFO_H
+#include "MipsFixupKinds.h"
#include "MipsMCTargetDesc.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
+
+/// MipsII - This namespace holds all of the target specific flags that
+/// instruction info tracks.
+///
+namespace MipsII {
+ /// Target Operand Flag enum.
+ enum TOF {
+ //===------------------------------------------------------------------===//
+ // Mips Specific MachineOperand flags.
+
+ MO_NO_FLAG,
+
+ /// MO_GOT16 - Represents the offset into the global offset table at which
+ /// the address the relocation entry symbol resides during execution.
+ MO_GOT16,
+ MO_GOT,
+
+ /// MO_GOT_CALL - Represents the offset into the global offset table at
+ /// which the address of a call site relocation entry symbol resides
+ /// during execution. This is different from the above since this flag
+ /// can only be present in call instructions.
+ MO_GOT_CALL,
+
+ /// MO_GPREL - Represents the offset from the current gp value to be used
+ /// for the relocatable object file being produced.
+ MO_GPREL,
+
+ /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
+ /// address.
+ MO_ABS_HI,
+ MO_ABS_LO,
+
+ /// MO_TLSGD - Represents the offset into the global offset table at which
+ // the module ID and TSL block offset reside during execution (General
+ // Dynamic TLS).
+ MO_TLSGD,
+
+ /// MO_TLSLDM - Represents the offset into the global offset table at which
+ // the module ID and TSL block offset reside during execution (Local
+ // Dynamic TLS).
+ MO_TLSLDM,
+ MO_DTPREL_HI,
+ MO_DTPREL_LO,
+
+ /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
+ // Exec TLS).
+ MO_GOTTPREL,
+
+ /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
+ // the thread pointer (Local Exec TLS).
+ MO_TPREL_HI,
+ MO_TPREL_LO,
+
+ // N32/64 Flags.
+ MO_GPOFF_HI,
+ MO_GPOFF_LO,
+ MO_GOT_DISP,
+ MO_GOT_PAGE,
+ MO_GOT_OFST
+ };
+
+ enum {
+ //===------------------------------------------------------------------===//
+ // Instruction encodings. These are the standard/most common forms for
+ // Mips instructions.
+ //
+
+ // Pseudo - This represents an instruction that is a pseudo instruction
+ // or one that has not been implemented yet. It is illegal to code generate
+ // it, but tolerated for intermediate implementation stages.
+ Pseudo = 0,
+
+ /// FrmR - This form is for instructions of the format R.
+ FrmR = 1,
+ /// FrmI - This form is for instructions of the format I.
+ FrmI = 2,
+ /// FrmJ - This form is for instructions of the format J.
+ FrmJ = 3,
+ /// FrmFR - This form is for instructions of the format FR.
+ FrmFR = 4,
+ /// FrmFI - This form is for instructions of the format FI.
+ FrmFI = 5,
+ /// FrmOther - This form is for instructions that have no specific format.
+ FrmOther = 6,
+
+ FormMask = 15
+ };
+}
+
+
/// getMipsRegisterNumbering - Given the enum value for some register,
/// return the number that it corresponds to.
inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
@@ -98,15 +190,43 @@ inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
case Mips::D14:
return 28;
case Mips::SP: case Mips::SP_64: case Mips::F29: case Mips::D29_64:
+ case Mips::HWR29:
return 29;
case Mips::FP: case Mips::FP_64: case Mips::F30: case Mips::D30_64:
- case Mips::D15:
+ case Mips::D15:
return 30;
case Mips::RA: case Mips::RA_64: case Mips::F31: case Mips::D31_64:
return 31;
default: llvm_unreachable("Unknown register number!");
}
- return 0; // Not reached
+}
+
+inline static std::pair<const MCSymbolRefExpr*, int64_t>
+MipsGetSymAndOffset(const MCFixup &Fixup) {
+ MCFixupKind FixupKind = Fixup.getKind();
+
+ if ((FixupKind < FirstTargetFixupKind) ||
+ (FixupKind >= MCFixupKind(Mips::LastTargetFixupKind)))
+ return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
+
+ const MCExpr *Expr = Fixup.getValue();
+ MCExpr::ExprKind Kind = Expr->getKind();
+
+ if (Kind == MCExpr::Binary) {
+ const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Expr);
+ const MCExpr *LHS = BE->getLHS();
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS());
+
+ if ((LHS->getKind() != MCExpr::SymbolRef) || !CE)
+ return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
+
+ return std::make_pair(cast<MCSymbolRefExpr>(LHS), CE->getValue());
+ }
+
+ if (Kind != MCExpr::SymbolRef)
+ return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0);
+
+ return std::make_pair(cast<MCSymbolRefExpr>(Expr), 0);
}
}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
new file mode 100644
index 0000000..2091bec
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -0,0 +1,249 @@
+//===-- MipsELFObjectWriter.cpp - Mips ELF Writer -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <list>
+
+using namespace llvm;
+
+namespace {
+ struct RelEntry {
+ RelEntry(const ELFRelocationEntry &R, const MCSymbol *S, int64_t O) :
+ Reloc(R), Sym(S), Offset(O) {}
+ ELFRelocationEntry Reloc;
+ const MCSymbol *Sym;
+ int64_t Offset;
+ };
+
+ typedef std::list<RelEntry> RelLs;
+ typedef RelLs::iterator RelLsIter;
+
+ class MipsELFObjectWriter : public MCELFObjectTargetWriter {
+ public:
+ MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI);
+
+ virtual ~MipsELFObjectWriter();
+
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const;
+ virtual unsigned getEFlags() const;
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
+ virtual void sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs);
+ };
+}
+
+MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI)
+ : MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS,
+ /*HasRelocationAddend*/ false) {}
+
+MipsELFObjectWriter::~MipsELFObjectWriter() {}
+
+// FIXME: get the real EABI Version from the Subtarget class.
+unsigned MipsELFObjectWriter::getEFlags() const {
+
+ // FIXME: We can't tell if we are PIC (dynamic) or CPIC (static)
+ unsigned Flag = ELF::EF_MIPS_NOREORDER;
+
+ if (is64Bit())
+ Flag |= ELF::EF_MIPS_ARCH_64R2;
+ else
+ Flag |= ELF::EF_MIPS_ARCH_32R2;
+ return Flag;
+}
+
+const MCSymbol *MipsELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ assert(Target.getSymA() && "SymA cannot be 0.");
+ const MCSymbol &Sym = Target.getSymA()->getSymbol().AliasedSymbol();
+
+ if (Sym.getSection().getKind().isMergeableCString() ||
+ Sym.getSection().getKind().isMergeableConst())
+ return &Sym;
+
+ return NULL;
+}
+
+unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ // determine the type of the relocation
+ unsigned Type = (unsigned)ELF::R_MIPS_NONE;
+ unsigned Kind = (unsigned)Fixup.getKind();
+
+ switch (Kind) {
+ default:
+ llvm_unreachable("invalid fixup kind!");
+ case FK_Data_4:
+ Type = ELF::R_MIPS_32;
+ break;
+ case FK_GPRel_4:
+ Type = ELF::R_MIPS_GPREL32;
+ break;
+ case Mips::fixup_Mips_GPREL16:
+ Type = ELF::R_MIPS_GPREL16;
+ break;
+ case Mips::fixup_Mips_26:
+ Type = ELF::R_MIPS_26;
+ break;
+ case Mips::fixup_Mips_CALL16:
+ Type = ELF::R_MIPS_CALL16;
+ break;
+ case Mips::fixup_Mips_GOT_Global:
+ case Mips::fixup_Mips_GOT_Local:
+ Type = ELF::R_MIPS_GOT16;
+ break;
+ case Mips::fixup_Mips_HI16:
+ Type = ELF::R_MIPS_HI16;
+ break;
+ case Mips::fixup_Mips_LO16:
+ Type = ELF::R_MIPS_LO16;
+ break;
+ case Mips::fixup_Mips_TLSGD:
+ Type = ELF::R_MIPS_TLS_GD;
+ break;
+ case Mips::fixup_Mips_GOTTPREL:
+ Type = ELF::R_MIPS_TLS_GOTTPREL;
+ break;
+ case Mips::fixup_Mips_TPREL_HI:
+ Type = ELF::R_MIPS_TLS_TPREL_HI16;
+ break;
+ case Mips::fixup_Mips_TPREL_LO:
+ Type = ELF::R_MIPS_TLS_TPREL_LO16;
+ break;
+ case Mips::fixup_Mips_TLSLDM:
+ Type = ELF::R_MIPS_TLS_LDM;
+ break;
+ case Mips::fixup_Mips_DTPREL_HI:
+ Type = ELF::R_MIPS_TLS_DTPREL_HI16;
+ break;
+ case Mips::fixup_Mips_DTPREL_LO:
+ Type = ELF::R_MIPS_TLS_DTPREL_LO16;
+ break;
+ case Mips::fixup_Mips_Branch_PCRel:
+ case Mips::fixup_Mips_PC16:
+ Type = ELF::R_MIPS_PC16;
+ break;
+ }
+
+ return Type;
+}
+
+// Return true if R is either a GOT16 against a local symbol or HI16.
+static bool NeedsMatchingLo(const MCAssembler &Asm, const RelEntry &R) {
+ if (!R.Sym)
+ return false;
+
+ MCSymbolData &SD = Asm.getSymbolData(R.Sym->AliasedSymbol());
+
+ return ((R.Reloc.Type == ELF::R_MIPS_GOT16) && !SD.isExternal()) ||
+ (R.Reloc.Type == ELF::R_MIPS_HI16);
+}
+
+static bool HasMatchingLo(const MCAssembler &Asm, RelLsIter I, RelLsIter Last) {
+ if (I == Last)
+ return false;
+
+ RelLsIter Hi = I++;
+
+ return (I->Reloc.Type == ELF::R_MIPS_LO16) && (Hi->Sym == I->Sym) &&
+ (Hi->Offset == I->Offset);
+}
+
+static bool HasSameSymbol(const RelEntry &R0, const RelEntry &R1) {
+ return R0.Sym == R1.Sym;
+}
+
+static int CompareOffset(const RelEntry &R0, const RelEntry &R1) {
+ return (R0.Offset > R1.Offset) ? 1 : ((R0.Offset == R1.Offset) ? 0 : -1);
+}
+
+void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs) {
+ // Call the defualt function first. Relocations are sorted in descending
+ // order of r_offset.
+ MCELFObjectTargetWriter::sortRelocs(Asm, Relocs);
+
+ RelLs RelocLs;
+ std::vector<RelLsIter> Unmatched;
+
+ // Fill RelocLs. Traverse Relocs backwards so that relocations in RelocLs
+ // are in ascending order of r_offset.
+ for (std::vector<ELFRelocationEntry>::reverse_iterator R = Relocs.rbegin();
+ R != Relocs.rend(); ++R) {
+ std::pair<const MCSymbolRefExpr*, int64_t> P =
+ MipsGetSymAndOffset(*R->Fixup);
+ RelocLs.push_back(RelEntry(*R, P.first ? &P.first->getSymbol() : 0,
+ P.second));
+ }
+
+ // Get list of unmatched HI16 and GOT16.
+ for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R)
+ if (NeedsMatchingLo(Asm, *R) && !HasMatchingLo(Asm, R, --RelocLs.end()))
+ Unmatched.push_back(R);
+
+ // Insert unmatched HI16 and GOT16 immediately before their matching LO16.
+ for (std::vector<RelLsIter>::iterator U = Unmatched.begin();
+ U != Unmatched.end(); ++U) {
+ RelLsIter LoPos = RelocLs.end(), HiPos = *U;
+ bool MatchedLo = false;
+
+ for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R) {
+ if ((R->Reloc.Type == ELF::R_MIPS_LO16) && HasSameSymbol(*HiPos, *R) &&
+ (CompareOffset(*R, *HiPos) >= 0) &&
+ ((LoPos == RelocLs.end()) || ((CompareOffset(*R, *LoPos) < 0)) ||
+ (!MatchedLo && !CompareOffset(*R, *LoPos))))
+ LoPos = R;
+
+ MatchedLo = NeedsMatchingLo(Asm, *R) &&
+ HasMatchingLo(Asm, R, --RelocLs.end());
+ }
+
+ // If a matching LoPos was found, move HiPos and insert it before LoPos.
+ // Make the offsets of HiPos and LoPos match.
+ if (LoPos != RelocLs.end()) {
+ HiPos->Offset = LoPos->Offset;
+ RelocLs.insert(LoPos, *HiPos);
+ RelocLs.erase(HiPos);
+ }
+ }
+
+ // Put the sorted list back in reverse order.
+ assert(Relocs.size() == RelocLs.size());
+ unsigned I = RelocLs.size();
+
+ for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R)
+ Relocs[--I] = R->Reloc;
+}
+
+MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS,
+ uint8_t OSABI,
+ bool IsLittleEndian,
+ bool Is64Bit) {
+ MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI);
+ return createELFObjectWriter(MOTW, OS, IsLittleEndian);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index 8b099ea..9b76eda 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -1,7 +1,4 @@
-#ifndef LLVM_Mips_MipsFIXUPKINDS_H
-#define LLVM_Mips_MipsFIXUPKINDS_H
-
-//===-- Mips/MipsFixupKinds.h - Mips Specific Fixup Entries --------*- C++ -*-===//
+//===-- MipsFixupKinds.h - Mips Specific Fixup Entries ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,81 +7,100 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_MIPS_MIPSFIXUPKINDS_H
+#define LLVM_MIPS_MIPSFIXUPKINDS_H
#include "llvm/MC/MCFixup.h"
namespace llvm {
namespace Mips {
- enum Fixups {
- // fixup_Mips_xxx - R_MIPS_NONE
- fixup_Mips_NONE = FirstTargetFixupKind,
+ // Although most of the current fixup types reflect a unique relocation
+ // one can have multiple fixup types for a given relocation and thus need
+ // to be uniquely named.
+ //
+ // This table *must* be in the save order of
+ // MCFixupKindInfo Infos[Mips::NumTargetFixupKinds]
+ // in MipsAsmBackend.cpp.
+ //
+ enum Fixups {
+ // Branch fixups resulting in R_MIPS_16.
+ fixup_Mips_16 = FirstTargetFixupKind,
- // fixup_Mips_xxx - R_MIPS_16.
- fixup_Mips_16,
+ // Pure 32 bit data fixup resulting in - R_MIPS_32.
+ fixup_Mips_32,
- // fixup_Mips_xxx - R_MIPS_32.
- fixup_Mips_32,
+ // Full 32 bit data relative data fixup resulting in - R_MIPS_REL32.
+ fixup_Mips_REL32,
- // fixup_Mips_xxx - R_MIPS_REL32.
- fixup_Mips_REL32,
+ // Jump 26 bit fixup resulting in - R_MIPS_26.
+ fixup_Mips_26,
- // fixup_Mips_xxx - R_MIPS_26.
- fixup_Mips_26,
+ // Pure upper 16 bit fixup resulting in - R_MIPS_HI16.
+ fixup_Mips_HI16,
- // fixup_Mips_xxx - R_MIPS_HI16.
- fixup_Mips_HI16,
+ // Pure lower 16 bit fixup resulting in - R_MIPS_LO16.
+ fixup_Mips_LO16,
- // fixup_Mips_xxx - R_MIPS_LO16.
- fixup_Mips_LO16,
+ // 16 bit fixup for GP offest resulting in - R_MIPS_GPREL16.
+ fixup_Mips_GPREL16,
- // fixup_Mips_xxx - R_MIPS_GPREL16.
- fixup_Mips_GPREL16,
+ // 16 bit literal fixup resulting in - R_MIPS_LITERAL.
+ fixup_Mips_LITERAL,
- // fixup_Mips_xxx - R_MIPS_LITERAL.
- fixup_Mips_LITERAL,
+ // Global symbol fixup resulting in - R_MIPS_GOT16.
+ fixup_Mips_GOT_Global,
- // fixup_Mips_xxx - R_MIPS_GOT16.
- fixup_Mips_GOT16,
+ // Local symbol fixup resulting in - R_MIPS_GOT16.
+ fixup_Mips_GOT_Local,
- // fixup_Mips_xxx - R_MIPS_PC16.
- fixup_Mips_PC16,
+ // PC relative branch fixup resulting in - R_MIPS_PC16.
+ fixup_Mips_PC16,
- // fixup_Mips_xxx - R_MIPS_CALL16.
- fixup_Mips_CALL16,
+ // resulting in - R_MIPS_CALL16.
+ fixup_Mips_CALL16,
- // fixup_Mips_xxx - R_MIPS_GPREL32.
- fixup_Mips_GPREL32,
+ // resulting in - R_MIPS_GPREL32.
+ fixup_Mips_GPREL32,
- // fixup_Mips_xxx - R_MIPS_SHIFT5.
- fixup_Mips_SHIFT5,
+ // resulting in - R_MIPS_SHIFT5.
+ fixup_Mips_SHIFT5,
- // fixup_Mips_xxx - R_MIPS_SHIFT6.
- fixup_Mips_SHIFT6,
+ // resulting in - R_MIPS_SHIFT6.
+ fixup_Mips_SHIFT6,
- // fixup_Mips_xxx - R_MIPS_64.
- fixup_Mips_64,
+ // Pure 64 bit data fixup resulting in - R_MIPS_64.
+ fixup_Mips_64,
- // fixup_Mips_xxx - R_MIPS_TLS_GD.
- fixup_Mips_TLSGD,
+ // resulting in - R_MIPS_TLS_GD.
+ fixup_Mips_TLSGD,
- // fixup_Mips_xxx - R_MIPS_TLS_GOTTPREL.
- fixup_Mips_GOTTPREL,
+ // resulting in - R_MIPS_TLS_GOTTPREL.
+ fixup_Mips_GOTTPREL,
- // fixup_Mips_xxx - R_MIPS_TLS_TPREL_HI16.
- fixup_Mips_TPREL_HI,
+ // resulting in - R_MIPS_TLS_TPREL_HI16.
+ fixup_Mips_TPREL_HI,
- // fixup_Mips_xxx - R_MIPS_TLS_TPREL_LO16.
- fixup_Mips_TPREL_LO,
+ // resulting in - R_MIPS_TLS_TPREL_LO16.
+ fixup_Mips_TPREL_LO,
- // fixup_Mips_xxx - yyy. // This should become R_MIPS_PC16
- fixup_Mips_Branch_PCRel,
+ // resulting in - R_MIPS_TLS_LDM.
+ fixup_Mips_TLSLDM,
- // Marker
- LastTargetFixupKind,
- NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
- };
-} // namespace llvm
+ // resulting in - R_MIPS_TLS_DTPREL_HI16.
+ fixup_Mips_DTPREL_HI,
+
+ // resulting in - R_MIPS_TLS_DTPREL_LO16.
+ fixup_Mips_DTPREL_LO,
+
+ // PC relative branch fixup resulting in - R_MIPS_PC16
+ fixup_Mips_Branch_PCRel,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+ };
} // namespace Mips
+} // namespace llvm
-#endif /* LLVM_Mips_MipsFIXUPKINDS_H */
+#endif // LLVM_MIPS_MIPSFIXUPKINDS_H
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 71ae804..9d67aa1 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -1,4 +1,4 @@
-//===-- MipsMCAsmInfo.cpp - Mips asm properties ---------------------------===//
+//===-- MipsMCAsmInfo.cpp - Mips Asm Properties ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,6 +16,8 @@
using namespace llvm;
+void MipsMCAsmInfo::anchor() { }
+
MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
Triple TheTriple(TT);
if ((TheTriple.getArch() == Triple::mips) ||
@@ -25,11 +27,12 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
AlignmentIsInBytes = false;
Data16bitsDirective = "\t.2byte\t";
Data32bitsDirective = "\t.4byte\t";
- Data64bitsDirective = 0;
+ Data64bitsDirective = "\t.8byte\t";
PrivateGlobalPrefix = "$";
CommentString = "#";
ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
+ GPRel64Directive = "\t.gpdword\t";
WeakRefDirective = "\t.weak\t";
SupportsDebugInformation = true;
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
index 41b7192..e1d8789 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- MipsMCAsmInfo.h - Mips asm properties ---------------*- C++ -*--====//
+//===-- MipsMCAsmInfo.h - Mips Asm Info ------------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,14 @@
#ifndef MIPSTARGETASMINFO_H
#define MIPSTARGETASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
+ class StringRef;
class Target;
class MipsMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
public:
explicit MipsMCAsmInfo(const Target &T, StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index d66de23..27954b1 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- MipsMCCodeEmitter.cpp - Convert Mips code to machine code ---------===//
+//===-- MipsMCCodeEmitter.cpp - Convert Mips Code to Machine Code ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,16 +12,18 @@
//===----------------------------------------------------------------------===//
//
#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "MCTargetDesc/MipsFixupKinds.h"
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
-#include "MCTargetDesc/MipsMCTargetDesc.h"
using namespace llvm;
@@ -31,22 +33,252 @@ class MipsMCCodeEmitter : public MCCodeEmitter {
void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT
const MCInstrInfo &MCII;
const MCSubtargetInfo &STI;
+ MCContext &Ctx;
+ bool IsLittleEndian;
public:
MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
- MCContext &ctx)
- : MCII(mcii), STI(sti) {}
+ MCContext &ctx, bool IsLittle) :
+ MCII(mcii), STI(sti) , Ctx(ctx), IsLittleEndian(IsLittle) {}
~MipsMCCodeEmitter() {}
- void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ void EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
}
+
+ void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
+ // Output the instruction encoding in little endian byte order.
+ for (unsigned i = 0; i < Size; ++i) {
+ unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
+ EmitByte((Val >> Shift) & 0xff, OS);
+ }
+ }
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchJumpOpValue - Return binary encoding of the jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getBranchTargetOpValue - Return binary encoding of the branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // getMachineOpValue - Return binary encoding of operand. If the machin
+ // operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
}; // class MipsMCCodeEmitter
} // namespace
-MCCodeEmitter *llvm::createMipsMCCodeEmitter(const MCInstrInfo &MCII,
- const MCSubtargetInfo &STI,
- MCContext &Ctx) {
- return new MipsMCCodeEmitter(MCII, STI, Ctx);
+MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx)
+{
+ return new MipsMCCodeEmitter(MCII, STI, Ctx, false);
+}
+
+MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx)
+{
+ return new MipsMCCodeEmitter(MCII, STI, Ctx, true);
+}
+
+/// EncodeInstruction - Emit the instruction.
+/// Size the instruction (currently only 4 bytes
+void MipsMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const
+{
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+
+ // Check for unimplemented opcodes.
+ // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0
+ // so we have to special check for them.
+ unsigned Opcode = MI.getOpcode();
+ if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary)
+ llvm_unreachable("unimplemented opcode in EncodeInstruction()");
+
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ uint64_t TSFlags = Desc.TSFlags;
+
+ // Pseudo instructions don't get encoded and shouldn't be here
+ // in the first place!
+ if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo)
+ llvm_unreachable("Pseudo opcode found in EncodeInstruction()");
+
+ // For now all instructions are 4 bytes
+ int Size = 4; // FIXME: Have Desc.getSize() return the correct value!
+
+ EmitInstruction(Binary, Size, OS);
}
+
+/// getBranchTargetOpValue - Return binary encoding of the branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_PC16)));
+ return 0;
+}
+
+/// getJumpTargetOpValue - Return binary encoding of the jump
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_Mips_26)));
+ return 0;
+}
+
+/// getMachineOpValue - Return binary encoding of operand. If the machine
+/// operand requires relocation, record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ unsigned RegNo = getMipsRegisterNumbering(Reg);
+ return RegNo;
+ } else if (MO.isImm()) {
+ return static_cast<unsigned>(MO.getImm());
+ } else if (MO.isFPImm()) {
+ return static_cast<unsigned>(APFloat(MO.getFPImm())
+ .bitcastToAPInt().getHiBits(32).getLimitedValue());
+ }
+
+ // MO must be an Expr.
+ assert(MO.isExpr());
+
+ const MCExpr *Expr = MO.getExpr();
+ MCExpr::ExprKind Kind = Expr->getKind();
+
+ if (Kind == MCExpr::Binary) {
+ Expr = static_cast<const MCBinaryExpr*>(Expr)->getLHS();
+ Kind = Expr->getKind();
+ }
+
+ assert (Kind == MCExpr::SymbolRef);
+
+ Mips::Fixups FixupKind;
+
+ switch(cast<MCSymbolRefExpr>(Expr)->getKind()) {
+ case MCSymbolRefExpr::VK_Mips_GPREL:
+ FixupKind = Mips::fixup_Mips_GPREL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT_CALL:
+ FixupKind = Mips::fixup_Mips_CALL16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT16:
+ FixupKind = Mips::fixup_Mips_GOT_Global;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOT:
+ FixupKind = Mips::fixup_Mips_GOT_Local;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_HI:
+ FixupKind = Mips::fixup_Mips_HI16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_ABS_LO:
+ FixupKind = Mips::fixup_Mips_LO16;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSGD:
+ FixupKind = Mips::fixup_Mips_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TLSLDM:
+ FixupKind = Mips::fixup_Mips_TLSLDM;
+ break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
+ FixupKind = Mips::fixup_Mips_DTPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
+ FixupKind = Mips::fixup_Mips_DTPREL_LO;
+ break;
+ case MCSymbolRefExpr::VK_Mips_GOTTPREL:
+ FixupKind = Mips::fixup_Mips_GOTTPREL;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_HI:
+ FixupKind = Mips::fixup_Mips_TPREL_HI;
+ break;
+ case MCSymbolRefExpr::VK_Mips_TPREL_LO:
+ FixupKind = Mips::fixup_Mips_TPREL_LO;
+ break;
+ default:
+ break;
+ } // switch
+
+ Fixups.push_back(MCFixup::Create(0, MO.getExpr(), MCFixupKind(FixupKind)));
+
+ // All of the information is in the fixup.
+ return 0;
+}
+
+/// getMemEncoding - Return binary encoding of memory related operand.
+/// If the offset operand requires relocation, record the relocation.
+unsigned
+MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+
+ return (OffBits & 0xFFFF) | RegBits;
+}
+
+unsigned
+MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpNo).isImm());
+ unsigned SizeEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
+ return SizeEncoding - 1;
+}
+
+// FIXME: should be called getMSBEncoding
+//
+unsigned
+MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpNo-1).isImm());
+ assert(MI.getOperand(OpNo).isImm());
+ unsigned Position = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups);
+ unsigned Size = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups);
+
+ return Position + Size - 1;
+}
+
+#include "MipsGenMCCodeEmitter.inc"
+
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 1f9e3dd..3c544f6 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- MipsMCTargetDesc.cpp - Mips Target Descriptions ---------*- C++ -*-===//
+//===-- MipsMCTargetDesc.cpp - Mips Target Descriptions -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MipsMCTargetDesc.h"
#include "MipsMCAsmInfo.h"
+#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/MC/MCCodeGenInfo.h"
@@ -20,6 +20,7 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -63,19 +64,24 @@ static MCAsmInfo *createMipsMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createMipsMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default)
+ if (CM == CodeModel::JITDefault)
+ RM = Reloc::Static;
+ else if (RM == Reloc::Default)
RM = Reloc::PIC_;
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
static MCInstPrinter *createMipsMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
- return new MipsInstPrinter(MAI);
+ return new MipsInstPrinter(MAI, MII, MRI);
}
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
@@ -110,7 +116,8 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterMCInstrInfo(TheMipsTarget, createMipsMCInstrInfo);
TargetRegistry::RegisterMCInstrInfo(TheMipselTarget, createMipsMCInstrInfo);
TargetRegistry::RegisterMCInstrInfo(TheMips64Target, createMipsMCInstrInfo);
- TargetRegistry::RegisterMCInstrInfo(TheMips64elTarget, createMipsMCInstrInfo);
+ TargetRegistry::RegisterMCInstrInfo(TheMips64elTarget,
+ createMipsMCInstrInfo);
// Register the MC register info.
TargetRegistry::RegisterMCRegInfo(TheMipsTarget, createMipsMCRegisterInfo);
@@ -120,25 +127,31 @@ extern "C" void LLVMInitializeMipsTargetMC() {
createMipsMCRegisterInfo);
// Register the MC Code Emitter
- TargetRegistry::RegisterMCCodeEmitter(TheMipsTarget, createMipsMCCodeEmitter);
+ TargetRegistry::RegisterMCCodeEmitter(TheMipsTarget,
+ createMipsMCCodeEmitterEB);
TargetRegistry::RegisterMCCodeEmitter(TheMipselTarget,
- createMipsMCCodeEmitter);
+ createMipsMCCodeEmitterEL);
TargetRegistry::RegisterMCCodeEmitter(TheMips64Target,
- createMipsMCCodeEmitter);
+ createMipsMCCodeEmitterEB);
TargetRegistry::RegisterMCCodeEmitter(TheMips64elTarget,
- createMipsMCCodeEmitter);
+ createMipsMCCodeEmitterEL);
// Register the object streamer.
TargetRegistry::RegisterMCObjectStreamer(TheMipsTarget, createMCStreamer);
TargetRegistry::RegisterMCObjectStreamer(TheMipselTarget, createMCStreamer);
TargetRegistry::RegisterMCObjectStreamer(TheMips64Target, createMCStreamer);
- TargetRegistry::RegisterMCObjectStreamer(TheMips64elTarget, createMCStreamer);
+ TargetRegistry::RegisterMCObjectStreamer(TheMips64elTarget,
+ createMCStreamer);
// Register the asm backend.
- TargetRegistry::RegisterMCAsmBackend(TheMipsTarget, createMipsAsmBackend);
- TargetRegistry::RegisterMCAsmBackend(TheMipselTarget, createMipsAsmBackend);
- TargetRegistry::RegisterMCAsmBackend(TheMips64Target, createMipsAsmBackend);
- TargetRegistry::RegisterMCAsmBackend(TheMips64elTarget, createMipsAsmBackend);
+ TargetRegistry::RegisterMCAsmBackend(TheMipsTarget,
+ createMipsAsmBackendEB32);
+ TargetRegistry::RegisterMCAsmBackend(TheMipselTarget,
+ createMipsAsmBackendEL32);
+ TargetRegistry::RegisterMCAsmBackend(TheMips64Target,
+ createMipsAsmBackendEB64);
+ TargetRegistry::RegisterMCAsmBackend(TheMips64elTarget,
+ createMipsAsmBackendEL64);
// Register the MC subtarget info.
TargetRegistry::RegisterMCSubtargetInfo(TheMipsTarget,
diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 7a0042a..547ccdd 100644
--- a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -14,25 +14,40 @@
#ifndef MIPSMCTARGETDESC_H
#define MIPSMCTARGETDESC_H
+#include "llvm/Support/DataTypes.h"
+
namespace llvm {
class MCAsmBackend;
-class MCInstrInfo;
class MCCodeEmitter;
class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class StringRef;
class Target;
+class raw_ostream;
extern Target TheMipsTarget;
extern Target TheMipselTarget;
extern Target TheMips64Target;
extern Target TheMips64elTarget;
-MCCodeEmitter *createMipsMCCodeEmitter(const MCInstrInfo &MCII,
- const MCSubtargetInfo &STI,
- MCContext &Ctx);
+MCCodeEmitter *createMipsMCCodeEmitterEB(const MCInstrInfo &MCII,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+
+MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT);
+MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT);
+MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT);
+MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT);
-MCAsmBackend *createMipsAsmBackend(const Target &T, StringRef TT);
+MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS,
+ uint8_t OSABI,
+ bool IsLittleEndian,
+ bool Is64Bit);
} // End llvm namespace
// Defines symbolic names for Mips registers. This defines a mapping from
diff --git a/contrib/llvm/lib/Target/Mips/Mips.h b/contrib/llvm/lib/Target/Mips/Mips.h
index bacecf2..bafadc8 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.h
+++ b/contrib/llvm/lib/Target/Mips/Mips.h
@@ -21,8 +21,6 @@
namespace llvm {
class MipsTargetMachine;
class FunctionPass;
- class MachineCodeEmitter;
- class formatted_raw_ostream;
FunctionPass *createMipsISelDag(MipsTargetMachine &TM);
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td
index 39c2c16..cbebe84 100644
--- a/contrib/llvm/lib/Target/Mips/Mips.td
+++ b/contrib/llvm/lib/Target/Mips/Mips.td
@@ -1,4 +1,4 @@
-//===- Mips.td - Describe the Mips Target Machine ----------*- tablegen -*-===//
+//===-- Mips.td - Describe the Mips Target Machine ---------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -63,7 +63,7 @@ def FeatureMips32 : SubtargetFeature<"mips32", "MipsArchVersion", "Mips32",
[FeatureCondMov, FeatureBitCount]>;
def FeatureMips32r2 : SubtargetFeature<"mips32r2", "MipsArchVersion",
"Mips32r2", "Mips32r2 ISA Support",
- [FeatureMips32, FeatureSEInReg]>;
+ [FeatureMips32, FeatureSEInReg, FeatureSwap]>;
def FeatureMips64 : SubtargetFeature<"mips64", "MipsArchVersion",
"Mips64", "Mips64 ISA Support",
[FeatureGP64Bit, FeatureFP64Bit,
@@ -79,9 +79,9 @@ def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion",
class Proc<string Name, list<SubtargetFeature> Features>
: Processor<Name, MipsGenericItineraries, Features>;
-def : Proc<"mips32r1", [FeatureMips32]>;
-def : Proc<"4ke", [FeatureMips32r2]>;
-def : Proc<"mips64r1", [FeatureMips64]>;
+def : Proc<"mips32", [FeatureMips32]>;
+def : Proc<"mips32r2", [FeatureMips32r2]>;
+def : Proc<"mips64", [FeatureMips64]>;
def : Proc<"mips64r2", [FeatureMips64r2]>;
def MipsAsmWriter : AsmWriter {
diff --git a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
index 9758f4b..427e8d9 100644
--- a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td
@@ -25,68 +25,48 @@ def uimm16_64 : Operand<i64> {
// Transformation Function - get Imm - 32.
def Subtract32 : SDNodeXForm<imm, [{
- return getI32Imm((unsigned)N->getZExtValue() - 32);
+ return getImm(N, (unsigned)N->getZExtValue() - 32);
}]>;
-// imm32_63 predicate - True if imm is in range [32, 63].
-def imm32_63 : ImmLeaf<i64,
- [{return (int32_t)Imm >= 32 && (int32_t)Imm < 64;}],
- Subtract32>;
+// shamt must fit in 6 bits.
+def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
// Shifts
-class LogicR_shift_rotate_imm64<bits<6> func, bits<5> _rs, string instr_asm,
- SDNode OpNode, PatFrag PF>:
- FR<0x00, func, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt, shamt_64:$shamt),
- !strconcat(instr_asm, "\t$rd, $rt, $shamt"),
- [(set CPU64Regs:$rd, (OpNode CPU64Regs:$rt, (i64 PF:$shamt)))],
- IIAlu> {
- let rs = _rs;
-}
-
-class LogicR_shift_rotate_reg64<bits<6> func, bits<5> _shamt, string instr_asm,
- SDNode OpNode>:
- FR<0x00, func, (outs CPU64Regs:$rd), (ins CPU64Regs:$rs, CPU64Regs:$rt),
- !strconcat(instr_asm, "\t$rd, $rt, $rs"),
- [(set CPU64Regs:$rd, (OpNode CPU64Regs:$rt, CPU64Regs:$rs))], IIAlu> {
- let shamt = _shamt;
-}
+// 64-bit shift instructions.
+class shift_rotate_imm64<bits<6> func, bits<5> isRotate, string instr_asm,
+ SDNode OpNode>:
+ shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt6, shamt,
+ CPU64Regs>;
// Mul, Div
-let rd = 0, shamt = 0, Defs = [HI64, LO64] in {
- let isCommutable = 1 in
- class Mul64<bits<6> func, string instr_asm, InstrItinClass itin>:
- FR<0x00, func, (outs), (ins CPU64Regs:$rs, CPU64Regs:$rt),
- !strconcat(instr_asm, "\t$rs, $rt"), [], itin>;
-
- class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
- FR<0x00, func, (outs), (ins CPU64Regs:$rs, CPU64Regs:$rt),
- !strconcat(instr_asm, "\t$$zero, $rs, $rt"),
- [(op CPU64Regs:$rs, CPU64Regs:$rt)], itin>;
+class Mult64<bits<6> func, string instr_asm, InstrItinClass itin>:
+ Mult<func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
+class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
+ Div<op, func, instr_asm, itin, CPU64Regs, [HI64, LO64]>;
+
+multiclass Atomic2Ops64<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]>;
}
-// Move from Hi/Lo
-let shamt = 0 in {
-let rs = 0, rt = 0 in
-class MoveFromLOHI64<bits<6> func, string instr_asm>:
- FR<0x00, func, (outs CPU64Regs:$rd), (ins),
- !strconcat(instr_asm, "\t$rd"), [], IIHiLo>;
-
-let rt = 0, rd = 0 in
-class MoveToLOHI64<bits<6> func, string instr_asm>:
- FR<0x00, func, (outs), (ins CPU64Regs:$rs),
- !strconcat(instr_asm, "\t$rs"), [], IIHiLo>;
+multiclass AtomicCmpSwap64<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>,
+ Requires<[IsN64]>;
}
-// Count Leading Ones/Zeros in Word
-class CountLeading64<bits<6> func, string instr_asm, list<dag> pattern>:
- FR<0x1c, func, (outs CPU64Regs:$rd), (ins CPU64Regs:$rs),
- !strconcat(instr_asm, "\t$rd, $rs"), pattern, IIAlu>,
- Requires<[HasBitCount]> {
- let shamt = 0;
- let rt = rd;
+let usesCustomInserter = 1, Predicates = [HasMips64] in {
+ defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">;
+ defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">;
+ defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64, "load_and_64">;
+ defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64, "load_or_64">;
+ defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64, "load_xor_64">;
+ defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64, "load_nand_64">;
+ defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64, "swap_64">;
+ defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64, "64">;
}
//===----------------------------------------------------------------------===//
@@ -101,6 +81,7 @@ def SLTi64 : SetCC_I<0x0a, "slti", setlt, simm16_64, immSExt16, CPU64Regs>;
def SLTiu64 : SetCC_I<0x0b, "sltiu", setult, simm16_64, immSExt16, CPU64Regs>;
def ORi64 : ArithLogicI<0x0d, "ori", or, uimm16_64, immZExt16, CPU64Regs>;
def XORi64 : ArithLogicI<0x0e, "xori", xor, uimm16_64, immZExt16, CPU64Regs>;
+def LUi64 : LoadUpper<0x0f, "lui", CPU64Regs, uimm16_64>;
/// Arithmetic Instructions (3-Operand, R-Type)
def DADDu : ArithLogicR<0x00, 0x2d, "daddu", add, IIAlu, CPU64Regs, 1>;
@@ -113,26 +94,21 @@ def XOR64 : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPU64Regs, 1>;
def NOR64 : LogicNOR<0x00, 0x27, "nor", CPU64Regs>;
/// Shift Instructions
-def DSLL : LogicR_shift_rotate_imm64<0x38, 0x00, "dsll", shl, immZExt5>;
-def DSRL : LogicR_shift_rotate_imm64<0x3a, 0x00, "dsrl", srl, immZExt5>;
-def DSRA : LogicR_shift_rotate_imm64<0x3b, 0x00, "dsra", sra, immZExt5>;
-def DSLL32 : LogicR_shift_rotate_imm64<0x3c, 0x00, "dsll32", shl, imm32_63>;
-def DSRL32 : LogicR_shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl, imm32_63>;
-def DSRA32 : LogicR_shift_rotate_imm64<0x3f, 0x00, "dsra32", sra, imm32_63>;
-def DSLLV : LogicR_shift_rotate_reg64<0x24, 0x00, "dsllv", shl>;
-def DSRLV : LogicR_shift_rotate_reg64<0x26, 0x00, "dsrlv", srl>;
-def DSRAV : LogicR_shift_rotate_reg64<0x27, 0x00, "dsrav", sra>;
+def DSLL : shift_rotate_imm64<0x38, 0x00, "dsll", shl>;
+def DSRL : shift_rotate_imm64<0x3a, 0x00, "dsrl", srl>;
+def DSRA : shift_rotate_imm64<0x3b, 0x00, "dsra", sra>;
+def DSLLV : shift_rotate_reg<0x24, 0x00, "dsllv", shl, CPU64Regs>;
+def DSRLV : shift_rotate_reg<0x26, 0x00, "dsrlv", srl, CPU64Regs>;
+def DSRAV : shift_rotate_reg<0x27, 0x00, "dsrav", sra, CPU64Regs>;
// Rotate Instructions
let Predicates = [HasMips64r2] in {
- def DROTR : LogicR_shift_rotate_imm64<0x3a, 0x01, "drotr", rotr, immZExt5>;
- def DROTR32 : LogicR_shift_rotate_imm64<0x3e, 0x01, "drotr32", rotr,
- imm32_63>;
- def DROTRV : LogicR_shift_rotate_reg64<0x16, 0x01, "drotrv", rotr>;
+ def DROTR : shift_rotate_imm64<0x3a, 0x01, "drotr", rotr>;
+ def DROTRV : shift_rotate_reg<0x16, 0x01, "drotrv", rotr, CPU64Regs>;
}
/// Load and Store Instructions
-/// aligned
+/// aligned
defm LB64 : LoadM64<0x20, "lb", sextloadi8>;
defm LBu64 : LoadM64<0x24, "lbu", zextloadi8>;
defm LH64 : LoadM64<0x21, "lh", sextloadi16_a>;
@@ -154,7 +130,14 @@ defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>;
defm ULD : LoadM64<0x37, "uld", load_u, 1>;
defm USD : StoreM64<0x3f, "usd", store_u, 1>;
+/// Load-linked, Store-conditional
+def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>;
+def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]>;
+def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>;
+def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]>;
+
/// Jump and Branch Instructions
+def JR64 : JumpFR<0x00, 0x08, "jr", CPU64Regs>;
def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>;
def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>;
def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>;
@@ -162,46 +145,104 @@ def BGTZ64 : CBranchZero<0x07, 0, "bgtz", setgt, CPU64Regs>;
def BLEZ64 : CBranchZero<0x07, 0, "blez", setle, CPU64Regs>;
def BLTZ64 : CBranchZero<0x01, 0, "bltz", setlt, CPU64Regs>;
+def JALR64 : JumpLinkReg<0x00, 0x09, "jalr", CPU64Regs>;
+
/// Multiply and Divide Instructions.
-def DMULT : Mul64<0x1c, "dmult", IIImul>;
-def DMULTu : Mul64<0x1d, "dmultu", IIImul>;
+def DMULT : Mult64<0x1c, "dmult", IIImul>;
+def DMULTu : Mult64<0x1d, "dmultu", IIImul>;
def DSDIV : Div64<MipsDivRem, 0x1e, "ddiv", IIIdiv>;
def DUDIV : Div64<MipsDivRemU, 0x1f, "ddivu", IIIdiv>;
-let Defs = [HI64] in
- def MTHI64 : MoveToLOHI64<0x11, "mthi">;
-let Defs = [LO64] in
- def MTLO64 : MoveToLOHI64<0x13, "mtlo">;
+def MTHI64 : MoveToLOHI<0x11, "mthi", CPU64Regs, [HI64]>;
+def MTLO64 : MoveToLOHI<0x13, "mtlo", CPU64Regs, [LO64]>;
+def MFHI64 : MoveFromLOHI<0x10, "mfhi", CPU64Regs, [HI64]>;
+def MFLO64 : MoveFromLOHI<0x12, "mflo", CPU64Regs, [LO64]>;
-let Uses = [HI64] in
- def MFHI64 : MoveFromLOHI64<0x10, "mfhi">;
-let Uses = [LO64] in
- def MFLO64 : MoveFromLOHI64<0x12, "mflo">;
+/// Sign Ext In Register Instructions.
+def SEB64 : SignExtInReg<0x10, "seb", i8, CPU64Regs>;
+def SEH64 : SignExtInReg<0x18, "seh", i16, CPU64Regs>;
/// Count Leading
-def DCLZ : CountLeading64<0x24, "dclz",
- [(set CPU64Regs:$rd, (ctlz CPU64Regs:$rs))]>;
-def DCLO : CountLeading64<0x25, "dclo",
- [(set CPU64Regs:$rd, (ctlz (not CPU64Regs:$rs)))]>;
+def DCLZ : CountLeading0<0x24, "dclz", CPU64Regs>;
+def DCLO : CountLeading1<0x25, "dclo", CPU64Regs>;
+
+/// Double Word Swap Bytes/HalfWords
+def DSBH : SubwordSwap<0x24, 0x2, "dsbh", CPU64Regs>;
+def DSHD : SubwordSwap<0x24, 0x5, "dshd", CPU64Regs>;
+
+def LEA_ADDiu64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>;
+
+let Uses = [SP_64] in
+def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>,
+ Requires<[IsN64]>;
+
+def RDHWR64 : ReadHardware<CPU64Regs, HWRegs64>;
+
+def DEXT : ExtBase<3, "dext", CPU64Regs>;
+def DINS : InsBase<7, "dins", CPU64Regs>;
+
+def DSLL64_32 : FR<0x3c, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
+ "dsll\t$rd, $rt, 32", [], IIAlu>;
+
+def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
+ "sll\t$rd, $rt, 0", [], IIAlu>;
+def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt),
+ "sll\t$rd, $rt, 0", [], IIAlu>;
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
-// Small immediates
-def : Pat<(i64 immSExt16:$in),
- (DADDiu ZERO_64, imm:$in)>;
-def : Pat<(i64 immZExt16:$in),
- (ORi64 ZERO_64, imm:$in)>;
-
-// zextloadi32_u
-def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>,
- Requires<[IsN64]>;
-def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>,
- Requires<[NotN64]>;
+// extended loads
+let Predicates = [NotN64] in {
+ def : Pat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
+ def : Pat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
+ def : Pat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>;
+ def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>;
+ def : Pat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>;
+ def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>;
+ def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>;
+}
+let Predicates = [IsN64] in {
+ def : Pat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
+ def : Pat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
+ def : Pat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>;
+ def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>;
+ def : Pat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>;
+ def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>;
+ def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>;
+}
// hi/lo relocs
-def : Pat<(i64 (MipsLo tglobaladdr:$in)), (DADDiu ZERO_64, tglobaladdr:$in)>;
+def : Pat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
+def : Pat<(MipsHi tblockaddress:$in), (LUi64 tblockaddress:$in)>;
+def : Pat<(MipsHi tjumptable:$in), (LUi64 tjumptable:$in)>;
+def : Pat<(MipsHi tconstpool:$in), (LUi64 tconstpool:$in)>;
+def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>;
+
+def : Pat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>;
+def : Pat<(MipsLo tblockaddress:$in), (DADDiu ZERO_64, tblockaddress:$in)>;
+def : Pat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>;
+def : Pat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>;
+def : Pat<(MipsLo tglobaltlsaddr:$in), (DADDiu ZERO_64, tglobaltlsaddr:$in)>;
+
+def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>;
+def : Pat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>;
+def : Pat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)),
+ (DADDiu CPU64Regs:$hi, tjumptable:$lo)>;
+def : Pat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)),
+ (DADDiu CPU64Regs:$hi, tconstpool:$lo)>;
+def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>;
+
+def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>;
+def : WrapperPat<tconstpool, DADDiu, CPU64Regs>;
+def : WrapperPat<texternalsym, DADDiu, CPU64Regs>;
+def : WrapperPat<tblockaddress, DADDiu, CPU64Regs>;
+def : WrapperPat<tjumptable, DADDiu, CPU64Regs>;
+def : WrapperPat<tglobaltlsaddr, DADDiu, CPU64Regs>;
defm : BrcondPats<CPU64Regs, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
ZERO_64>;
@@ -212,3 +253,21 @@ defm : SetlePats<CPU64Regs, SLT64, SLTu64>;
defm : SetgtPats<CPU64Regs, SLT64, SLTu64>;
defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
+
+// select MipsDynAlloc
+def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>;
+
+// truncate
+def : Pat<(i32 (trunc CPU64Regs:$src)),
+ (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>;
+
+// 32-to-64-bit extension
+def : Pat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+def : Pat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
+def : Pat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+
+// Sign extend in register
+def : Pat<(i64 (sext_inreg CPU64Regs:$src, i32)), (SLL64_64 CPU64Regs:$src)>;
+
+// bswap pattern
+def : Pat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
new file mode 100644
index 0000000..dc8fbd0
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp
@@ -0,0 +1,153 @@
+//===-- MipsAnalyzeImmediate.cpp - Analyze Immediates ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "MipsAnalyzeImmediate.h"
+#include "Mips.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+MipsAnalyzeImmediate::Inst::Inst(unsigned O, unsigned I) : Opc(O), ImmOpnd(I) {}
+
+// Add I to the instruction sequences.
+void MipsAnalyzeImmediate::AddInstr(InstSeqLs &SeqLs, const Inst &I) {
+ // Add an instruction seqeunce consisting of just I.
+ if (SeqLs.empty()) {
+ SeqLs.push_back(InstSeq(1, I));
+ return;
+ }
+
+ for (InstSeqLs::iterator Iter = SeqLs.begin(); Iter != SeqLs.end(); ++Iter)
+ Iter->push_back(I);
+}
+
+void MipsAnalyzeImmediate::GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize,
+ InstSeqLs &SeqLs) {
+ GetInstSeqLs((Imm + 0x8000ULL) & 0xffffffffffff0000ULL, RemSize, SeqLs);
+ AddInstr(SeqLs, Inst(ADDiu, Imm & 0xffffULL));
+}
+
+void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize,
+ InstSeqLs &SeqLs) {
+ GetInstSeqLs(Imm & 0xffffffffffff0000ULL, RemSize, SeqLs);
+ AddInstr(SeqLs, Inst(ORi, Imm & 0xffffULL));
+}
+
+void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize,
+ InstSeqLs &SeqLs) {
+ unsigned Shamt = CountTrailingZeros_64(Imm);
+ GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs);
+ AddInstr(SeqLs, Inst(SLL, Shamt));
+}
+
+void MipsAnalyzeImmediate::GetInstSeqLs(uint64_t Imm, unsigned RemSize,
+ InstSeqLs &SeqLs) {
+ uint64_t MaskedImm = Imm & (0xffffffffffffffffULL >> (64 - Size));
+
+ // Do nothing if Imm is 0.
+ if (!MaskedImm)
+ return;
+
+ // A single ADDiu will do if RemSize <= 16.
+ if (RemSize <= 16) {
+ AddInstr(SeqLs, Inst(ADDiu, MaskedImm));
+ return;
+ }
+
+ // Shift if the lower 16-bit is cleared.
+ if (!(Imm & 0xffff)) {
+ GetInstSeqLsSLL(Imm, RemSize, SeqLs);
+ return;
+ }
+
+ GetInstSeqLsADDiu(Imm, RemSize, SeqLs);
+
+ // If bit 15 is cleared, it doesn't make a difference whether the last
+ // instruction is an ADDiu or ORi. In that case, do not call GetInstSeqLsORi.
+ if (Imm & 0x8000) {
+ InstSeqLs SeqLsORi;
+ GetInstSeqLsORi(Imm, RemSize, SeqLsORi);
+ SeqLs.insert(SeqLs.end(), SeqLsORi.begin(), SeqLsORi.end());
+ }
+}
+
+// Replace a ADDiu & SLL pair with a LUi.
+// e.g. the following two instructions
+// ADDiu 0x0111
+// SLL 18
+// are replaced with
+// LUi 0x444
+void MipsAnalyzeImmediate::ReplaceADDiuSLLWithLUi(InstSeq &Seq) {
+ // Check if the first two instructions are ADDiu and SLL and the shift amount
+ // is at least 16.
+ if ((Seq.size() < 2) || (Seq[0].Opc != ADDiu) ||
+ (Seq[1].Opc != SLL) || (Seq[1].ImmOpnd < 16))
+ return;
+
+ // Sign-extend and shift operand of ADDiu and see if it still fits in 16-bit.
+ int64_t Imm = SignExtend64<16>(Seq[0].ImmOpnd);
+ int64_t ShiftedImm = Imm << (Seq[1].ImmOpnd - 16);
+
+ if (!isInt<16>(ShiftedImm))
+ return;
+
+ // Replace the first instruction and erase the second.
+ Seq[0].Opc = LUi;
+ Seq[0].ImmOpnd = (unsigned)(ShiftedImm & 0xffff);
+ Seq.erase(Seq.begin() + 1);
+}
+
+void MipsAnalyzeImmediate::GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts) {
+ InstSeqLs::iterator ShortestSeq = SeqLs.end();
+ // The length of an instruction sequence is at most 7.
+ unsigned ShortestLength = 8;
+
+ for (InstSeqLs::iterator S = SeqLs.begin(); S != SeqLs.end(); ++S) {
+ ReplaceADDiuSLLWithLUi(*S);
+ assert(S->size() <= 7);
+
+ if (S->size() < ShortestLength) {
+ ShortestSeq = S;
+ ShortestLength = S->size();
+ }
+ }
+
+ Insts.clear();
+ Insts.append(ShortestSeq->begin(), ShortestSeq->end());
+}
+
+const MipsAnalyzeImmediate::InstSeq
+&MipsAnalyzeImmediate::Analyze(uint64_t Imm, unsigned Size,
+ bool LastInstrIsADDiu) {
+ this->Size = Size;
+
+ if (Size == 32) {
+ ADDiu = Mips::ADDiu;
+ ORi = Mips::ORi;
+ SLL = Mips::SLL;
+ LUi = Mips::LUi;
+ } else {
+ ADDiu = Mips::DADDiu;
+ ORi = Mips::ORi64;
+ SLL = Mips::DSLL;
+ LUi = Mips::LUi64;
+ }
+
+ InstSeqLs SeqLs;
+
+ // Get the list of instruction sequences.
+ if (LastInstrIsADDiu | !Imm)
+ GetInstSeqLsADDiu(Imm, Size, SeqLs);
+ else
+ GetInstSeqLs(Imm, Size, SeqLs);
+
+ // Set Insts to the shortest instruction sequence.
+ GetShortestSeq(SeqLs, Insts);
+
+ return Insts;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h
new file mode 100644
index 0000000..a094dda
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h
@@ -0,0 +1,63 @@
+//===-- MipsAnalyzeImmediate.h - Analyze Immediates ------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MIPS_ANALYZE_IMMEDIATE_H
+#define MIPS_ANALYZE_IMMEDIATE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+ class MipsAnalyzeImmediate {
+ public:
+ struct Inst {
+ unsigned Opc, ImmOpnd;
+ Inst(unsigned Opc, unsigned ImmOpnd);
+ };
+ typedef SmallVector<Inst, 7 > InstSeq;
+
+ /// Analyze - Get an instrucion sequence to load immediate Imm. The last
+ /// instruction in the sequence must be an ADDiu if LastInstrIsADDiu is
+ /// true;
+ const InstSeq &Analyze(uint64_t Imm, unsigned Size, bool LastInstrIsADDiu);
+ private:
+ typedef SmallVector<InstSeq, 5> InstSeqLs;
+
+ /// AddInstr - Add I to all instruction sequences in SeqLs.
+ void AddInstr(InstSeqLs &SeqLs, const Inst &I);
+
+ /// GetInstSeqLsADDiu - Get instrucion sequences which end with an ADDiu to
+ /// load immediate Imm
+ void GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
+
+ /// GetInstSeqLsORi - Get instrucion sequences which end with an ORi to
+ /// load immediate Imm
+ void GetInstSeqLsORi(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
+
+ /// GetInstSeqLsSLL - Get instrucion sequences which end with a SLL to
+ /// load immediate Imm
+ void GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
+
+ /// GetInstSeqLs - Get instrucion sequences to load immediate Imm.
+ void GetInstSeqLs(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
+
+ /// ReplaceADDiuSLLWithLUi - Replace an ADDiu & SLL pair with a LUi.
+ void ReplaceADDiuSLLWithLUi(InstSeq &Seq);
+
+ /// GetShortestSeq - Find the shortest instruction sequence in SeqLs and
+ /// return it in Insts.
+ void GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts);
+
+ unsigned Size;
+ unsigned ADDiu, ORi, SLL, LUi;
+ InstSeq Insts;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index 0e82681..8206cfc 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- MipsAsmPrinter.cpp - Mips LLVM assembly writer --------------------===//
+//===-- MipsAsmPrinter.cpp - Mips LLVM Assembly Printer -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,10 +16,12 @@
#include "MipsAsmPrinter.h"
#include "Mips.h"
#include "MipsInstrInfo.h"
-#include "MipsMachineFunction.h"
-#include "MipsMCInstLower.h"
-#include "MipsMCSymbolRefExpr.h"
#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/DebugInfo.h"
#include "llvm/BasicBlock.h"
#include "llvm/Instructions.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -27,55 +29,125 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/Instructions.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Analysis/DebugInfo.h"
using namespace llvm;
-static bool isUnalignedLoadStore(unsigned Opc) {
- return Opc == Mips::ULW || Opc == Mips::ULH || Opc == Mips::ULHu ||
- Opc == Mips::USW || Opc == Mips::USH ||
- Opc == Mips::ULW_P8 || Opc == Mips::ULH_P8 || Opc == Mips::ULHu_P8 ||
- Opc == Mips::USW_P8 || Opc == Mips::USH_P8;
+void MipsAsmPrinter::EmitInstrWithMacroNoAT(const MachineInstr *MI) {
+ MCInst TmpInst;
+
+ MCInstLowering.Lower(MI, TmpInst);
+ OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
+ if (MipsFI->getEmitNOAT())
+ OutStreamer.EmitRawText(StringRef("\t.set\tat"));
+ OutStreamer.EmitInstruction(TmpInst);
+ if (MipsFI->getEmitNOAT())
+ OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
+ OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
+}
+
+bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ MipsFI = MF.getInfo<MipsFunctionInfo>();
+ AsmPrinter::runOnMachineFunction(MF);
+ return true;
}
void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
-
if (MI->isDebugValue()) {
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+
PrintDebugValueComment(MI, OS);
return;
}
- MipsMCInstLower MCInstLowering(Mang, *MF, *this);
unsigned Opc = MI->getOpcode();
MCInst TmpInst0;
- MCInstLowering.Lower(MI, TmpInst0);
-
- // Enclose unaligned load or store with .macro & .nomacro directives.
- if (isUnalignedLoadStore(Opc)) {
- MCInst Directive;
- Directive.setOpcode(Mips::MACRO);
- OutStreamer.EmitInstruction(Directive);
- OutStreamer.EmitInstruction(TmpInst0);
- Directive.setOpcode(Mips::NOMACRO);
- OutStreamer.EmitInstruction(Directive);
+ SmallVector<MCInst, 4> MCInsts;
+
+ switch (Opc) {
+ case Mips::ULW:
+ case Mips::ULH:
+ case Mips::ULHu:
+ case Mips::USW:
+ case Mips::USH:
+ case Mips::ULW_P8:
+ case Mips::ULH_P8:
+ case Mips::ULHu_P8:
+ case Mips::USW_P8:
+ case Mips::USH_P8:
+ case Mips::ULD:
+ case Mips::ULW64:
+ case Mips::ULH64:
+ case Mips::ULHu64:
+ case Mips::USD:
+ case Mips::USW64:
+ case Mips::USH64:
+ case Mips::ULD_P8:
+ case Mips::ULW64_P8:
+ case Mips::ULH64_P8:
+ case Mips::ULHu64_P8:
+ case Mips::USD_P8:
+ case Mips::USW64_P8:
+ case Mips::USH64_P8: {
+ if (OutStreamer.hasRawTextSupport()) {
+ EmitInstrWithMacroNoAT(MI);
+ return;
+ }
+
+ MCInstLowering.LowerUnalignedLoadStore(MI, MCInsts);
+ for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); I
+ != MCInsts.end(); ++I)
+ OutStreamer.EmitInstruction(*I);
+
return;
}
+ case Mips::CPRESTORE: {
+ const MachineOperand &MO = MI->getOperand(0);
+ assert(MO.isImm() && "CPRESTORE's operand must be an immediate.");
+ int64_t Offset = MO.getImm();
+
+ if (OutStreamer.hasRawTextSupport()) {
+ if (!isInt<16>(Offset)) {
+ EmitInstrWithMacroNoAT(MI);
+ return;
+ }
+ } else {
+ MCInstLowering.LowerCPRESTORE(Offset, MCInsts);
+
+ for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
+ I != MCInsts.end(); ++I)
+ OutStreamer.EmitInstruction(*I);
+ return;
+ }
+
+ break;
+ }
+ case Mips::SETGP01: {
+ MCInstLowering.LowerSETGP01(MI, MCInsts);
+
+ for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
+ I != MCInsts.end(); ++I)
+ OutStreamer.EmitInstruction(*I);
+
+ return;
+ }
+ default:
+ break;
+ }
+
+ MCInstLowering.Lower(MI, TmpInst0);
OutStreamer.EmitInstruction(TmpInst0);
}
@@ -138,7 +210,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
if (Mips::CPURegsRegisterClass->contains(Reg))
break;
- unsigned RegNum = MipsRegisterInfo::getRegisterNumbering(Reg);
+ unsigned RegNum = getMipsRegisterNumbering(Reg);
if (Mips::AFGR64RegisterClass->contains(Reg)) {
FPUBitmask |= (3 << RegNum);
CSFPRegsSize += AFGR64RegSize;
@@ -153,7 +225,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
// Set CPU Bitmask.
for (; i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- unsigned RegNum = MipsRegisterInfo::getRegisterNumbering(Reg);
+ unsigned RegNum = getMipsRegisterNumbering(Reg);
CPUBitmask |= (1 << RegNum);
}
@@ -177,7 +249,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
void MipsAsmPrinter::printHex32(unsigned Value, raw_ostream &O) {
O << "0x";
for (int i = 7; i >= 0; i--)
- O << utohexstr((Value & (0xF << (i*4))) >> (i*4));
+ O.write_hex((Value & (0xF << (i*4))) >> (i*4));
}
//===----------------------------------------------------------------------===//
@@ -192,10 +264,11 @@ void MipsAsmPrinter::emitFrameDirective() {
unsigned returnReg = RI.getRARegister();
unsigned stackSize = MF->getFrameInfo()->getStackSize();
- OutStreamer.EmitRawText("\t.frame\t$" +
- Twine(LowercaseString(MipsInstPrinter::getRegisterName(stackReg))) +
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.frame\t$" +
+ StringRef(MipsInstPrinter::getRegisterName(stackReg)).lower() +
"," + Twine(stackSize) + ",$" +
- Twine(LowercaseString(MipsInstPrinter::getRegisterName(returnReg))));
+ StringRef(MipsInstPrinter::getRegisterName(returnReg)).lower());
}
/// Emit Set directives.
@@ -205,27 +278,49 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
case MipsSubtarget::N32: return "abiN32";
case MipsSubtarget::N64: return "abi64";
case MipsSubtarget::EABI: return "eabi32"; // TODO: handle eabi64
- default: break;
+ default: llvm_unreachable("Unknown Mips ABI");;
}
-
- llvm_unreachable("Unknown Mips ABI");
- return NULL;
}
void MipsAsmPrinter::EmitFunctionEntryLabel() {
- OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
OutStreamer.EmitLabel(CurrentFnSym);
}
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
void MipsAsmPrinter::EmitFunctionBodyStart() {
+ MCInstLowering.Initialize(Mang, &MF->getContext());
+
emitFrameDirective();
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printSavedRegsBitmask(OS);
- OutStreamer.EmitRawText(OS.str());
+ bool EmitCPLoad = (MF->getTarget().getRelocationModel() == Reloc::PIC_) &&
+ Subtarget->isABI_O32() && MipsFI->globalBaseRegSet() &&
+ MipsFI->globalBaseRegFixed();
+
+ if (OutStreamer.hasRawTextSupport()) {
+ SmallString<128> Str;
+ raw_svector_ostream OS(Str);
+ printSavedRegsBitmask(OS);
+ OutStreamer.EmitRawText(OS.str());
+
+ OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder"));
+
+ // Emit .cpload directive if needed.
+ if (EmitCPLoad)
+ OutStreamer.EmitRawText(StringRef("\t.cpload\t$25"));
+
+ OutStreamer.EmitRawText(StringRef("\t.set\tnomacro"));
+ if (MipsFI->getEmitNOAT())
+ OutStreamer.EmitRawText(StringRef("\t.set\tnoat"));
+ } else if (EmitCPLoad) {
+ SmallVector<MCInst, 4> MCInsts;
+ MCInstLowering.LowerCPLOAD(MCInsts);
+ for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
+ I != MCInsts.end(); ++I)
+ OutStreamer.EmitInstruction(*I);
+ }
}
/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
@@ -234,11 +329,15 @@ void MipsAsmPrinter::EmitFunctionBodyEnd() {
// There are instruction for this macros, but they must
// always be at the function end, and we can't emit and
// break with BB logic.
- OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
- OutStreamer.EmitRawText(StringRef("\t.set\treorder"));
- OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
-}
+ if (OutStreamer.hasRawTextSupport()) {
+ if (MipsFI->getEmitNOAT())
+ OutStreamer.EmitRawText(StringRef("\t.set\tat"));
+ OutStreamer.EmitRawText(StringRef("\t.set\tmacro"));
+ OutStreamer.EmitRawText(StringRef("\t.set\treorder"));
+ OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
+ }
+}
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
/// exactly one predecessor and the control transfer mechanism between
@@ -262,24 +361,24 @@ bool MipsAsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock*
// If there isn't exactly one predecessor, it can't be a fall through.
MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
++PI2;
-
+
if (PI2 != MBB->pred_end())
- return false;
+ return false;
// The predecessor has to be immediately before this block.
if (!Pred->isLayoutSuccessor(MBB))
return false;
-
+
// If the block is completely empty, then it definitely does fall through.
if (Pred->empty())
return true;
-
+
// Otherwise, check the last instruction.
// Check if the last terminator is an unconditional branch.
MachineBasicBlock::const_iterator I = Pred->end();
- while (I != Pred->begin() && !(--I)->getDesc().isTerminator()) ;
+ while (I != Pred->begin() && !(--I)->isTerminator()) ;
- return !I->getDesc().isBarrier();
+ return !I->isBarrier();
}
// Print out an operand for an inline asm expression.
@@ -300,7 +399,7 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
raw_ostream &O) {
if (ExtraCode && ExtraCode[0])
return true; // Unknown modifier.
-
+
const MachineOperand &MO = MI->getOperand(OpNum);
assert(MO.isReg() && "unexpected inline asm memory operand");
O << "0($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")";
@@ -335,7 +434,7 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
switch (MO.getType()) {
case MachineOperand::MO_Register:
O << '$'
- << LowercaseString(MipsInstPrinter::getRegisterName(MO.getReg()));
+ << StringRef(MipsInstPrinter::getRegisterName(MO.getReg())).lower();
break;
case MachineOperand::MO_Immediate:
@@ -420,18 +519,23 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// FIXME: Use SwitchSection.
// Tell the assembler which ABI we are using
- OutStreamer.EmitRawText("\t.section .mdebug." + Twine(getCurrentABIString()));
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText("\t.section .mdebug." +
+ Twine(getCurrentABIString()));
// TODO: handle O64 ABI
- if (Subtarget->isABI_EABI()) {
- if (Subtarget->isGP32bit())
- OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long32"));
- else
- OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long64"));
+ if (OutStreamer.hasRawTextSupport()) {
+ if (Subtarget->isABI_EABI()) {
+ if (Subtarget->isGP32bit())
+ OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long32"));
+ else
+ OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long64"));
+ }
}
// return to previous section
- OutStreamer.EmitRawText(StringRef("\t.previous"));
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText(StringRef("\t.previous"));
}
MachineLocation
diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
index 16461ff..562bf9c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
+++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- MipsAsmPrinter.h - Mips LLVM assembly writer ----------------------===//
+//===-- MipsAsmPrinter.h - Mips LLVM Assembly Printer ----------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,8 @@
#ifndef MIPSASMPRINTER_H
#define MIPSASMPRINTER_H
+#include "MipsMachineFunction.h"
+#include "MipsMCInstLower.h"
#include "MipsSubtarget.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/Support/Compiler.h"
@@ -22,16 +24,22 @@
namespace llvm {
class MCStreamer;
class MachineInstr;
-class raw_ostream;
class MachineBasicBlock;
class Module;
+class raw_ostream;
class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
- const MipsSubtarget *Subtarget;
-
+
+ void EmitInstrWithMacroNoAT(const MachineInstr *MI);
+
public:
+
+ const MipsSubtarget *Subtarget;
+ const MipsFunctionInfo *MipsFI;
+ MipsMCInstLower MCInstLowering;
+
explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
+ : AsmPrinter(TM, Streamer), MCInstLowering(*this) {
Subtarget = &TM.getSubtarget<MipsSubtarget>();
}
@@ -39,6 +47,8 @@ public:
return "Mips Assembly Printer";
}
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
void EmitInstruction(const MachineInstr *MI);
void printSavedRegsBitmask(raw_ostream &O);
void printHex32(unsigned int Value, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
index 0ae4ef6..4b7e1d3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
+++ b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td
@@ -1,4 +1,4 @@
-//===- MipsCallingConv.td - Calling Conventions for Mips ---*- tablegen -*-===//
+//===-- MipsCallingConv.td - Calling Conventions for Mips --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,12 +35,18 @@ def RetCC_MipsO32 : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsN : CallingConv<[
- // FIXME: Handle byval, complex and float double parameters.
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
- // Promote i8/i16/i32 arguments to i64.
- CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
// Integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToRegWithShadow<[A0, A1, A2, A3,
+ T0, T1, T2, T3],
+ [F12, F13, F14, F15,
+ F16, F17, F18, F19]>>,
+
CCIfType<[i64], CCAssignToRegWithShadow<[A0_64, A1_64, A2_64, A3_64,
T0_64, T1_64, T2_64, T3_64],
[D12_64, D13_64, D14_64, D15_64,
@@ -59,13 +65,30 @@ def CC_MipsN : CallingConv<[
T0_64, T1_64, T2_64, T3_64]>>,
// All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
- CCIfType<[i64, f64], CCAssignToStack<8, 8>>,
- CCIfType<[f32], CCAssignToStack<4, 8>>
+ CCIfType<[i32, f32], CCAssignToStack<4, 8>>,
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>
]>;
-def RetCC_MipsN : CallingConv<[
- // FIXME: Handle complex and float double return values.
+// N32/64 variable arguments.
+// All arguments are passed in integer registers.
+def CC_MipsN_VarArg : CallingConv<[
+ // Handles byval parameters.
+ CCIfByVal<CCCustom<"CC_Mips64Byval">>,
+
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ CCIfType<[i32, f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
+
+ CCIfType<[i64, f64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64,
+ T0_64, T1_64, T2_64, T3_64]>>,
+
+ // All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
+ CCIfType<[i32, f32], CCAssignToStack<4, 8>>,
+ CCIfType<[i64, f64], CCAssignToStack<8, 8>>
+]>;
+def RetCC_MipsN : CallingConv<[
// i32 are returned in registers V0, V1
CCIfType<[i32], CCAssignToReg<[V0, V1]>>,
@@ -137,3 +160,20 @@ def RetCC_Mips : CallingConv<[
CCIfSubtarget<"isABI_N64()", CCDelegateTo<RetCC_MipsN>>,
CCDelegateTo<RetCC_MipsO32>
]>;
+
+//===----------------------------------------------------------------------===//
+// Callee-saved register lists.
+//===----------------------------------------------------------------------===//
+
+def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP,
+ (sequence "S%u", 7, 0))>;
+
+def CSR_O32 : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP,
+ (sequence "S%u", 7, 0))>;
+
+def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64,
+ D23_64, D22_64, D21_64, RA_64, FP_64, GP_64,
+ (sequence "S%u_64", 7, 0))>;
+
+def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64,
+ GP_64, (sequence "S%u_64", 7, 0))>;
diff --git a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
index 23fabe3..7d81902 100644
--- a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- Mips/MipsCodeEmitter.cpp - Convert Mips code to machine code -----===//
+//===-- Mips/MipsCodeEmitter.cpp - Convert Mips Code to Machine Code ------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,18 +18,20 @@
#include "MipsRelocations.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/PassManager.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/ADT/Statistic.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -37,8 +39,6 @@
#include <iomanip>
#endif
-#include "llvm/CodeGen/MachineOperand.h"
-
using namespace llvm;
STATISTIC(NumEmitted, "Number of machine instructions emitted");
@@ -66,9 +66,9 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) :
MachineFunctionPass(ID), JTI(0),
- II((const MipsInstrInfo *) tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
+ II((const MipsInstrInfo *) tm.getInstrInfo()),
+ TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
}
bool runOnMachineFunction(MachineFunction &MF);
@@ -80,7 +80,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
/// machine instructions.
- unsigned getBinaryCodeForInstr(const MachineInstr &MI) const;
+ uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
void emitInstruction(const MachineInstr &MI);
@@ -91,7 +91,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
/// Routines that handle operands which add machine relocations which are
/// fixed up by the relocation stage.
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const;
+ bool MayNeedFarStub) const;
void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const;
void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const;
void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const;
@@ -105,9 +105,22 @@ class MipsCodeEmitter : public MachineFunctionPass {
unsigned getRelocation(const MachineInstr &MI,
const MachineOperand &MO) const;
+ unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
+
+ unsigned getBranchTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
+
+ int emitULW(const MachineInstr &MI);
+ int emitUSW(const MachineInstr &MI);
+ int emitULH(const MachineInstr &MI);
+ int emitULHu(const MachineInstr &MI);
+ int emitUSH(const MachineInstr &MI);
+
+ void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc,
+ int Offset) const;
};
}
@@ -132,7 +145,7 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
MBB != E; ++MBB){
MCE.StartMachineBasicBlock(MBB);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I)
emitInstruction(*I);
}
@@ -149,30 +162,50 @@ unsigned MipsCodeEmitter::getRelocation(const MachineInstr &MI,
if (Form == MipsII::FrmJ)
return Mips::reloc_mips_26;
if ((Form == MipsII::FrmI || Form == MipsII::FrmFI)
- && MI.getDesc().isBranch())
- return Mips::reloc_mips_branch;
+ && MI.isBranch())
+ return Mips::reloc_mips_pc16;
if (Form == MipsII::FrmI && MI.getOpcode() == Mips::LUi)
return Mips::reloc_mips_hi;
return Mips::reloc_mips_lo;
}
+unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ MachineOperand MO = MI.getOperand(OpNo);
+ if (MO.isGlobal())
+ emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true);
+ else if (MO.isSymbol())
+ emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO));
+ else if (MO.isMBB())
+ emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO));
+ else
+ llvm_unreachable("Unexpected jump target operand kind.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
+ unsigned OpNo) const {
+ MachineOperand MO = MI.getOperand(OpNo);
+ emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO));
+ return 0;
+}
+
unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// Base register is encoded in bits 20-16, offset is encoded in bits 15-0.
assert(MI.getOperand(OpNo).isReg());
unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo)) << 16;
- return
- (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
+ return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
}
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
}
unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
- unsigned OpNo) const {
+ unsigned OpNo) const {
// size is encoded as pos+size-1.
return getMachineOpValue(MI, MI.getOperand(OpNo-1)) +
getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
@@ -181,14 +214,20 @@ unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) const {
+ const MachineOperand &MO) const {
if (MO.isReg())
- return MipsRegisterInfo::getRegisterNumbering(MO.getReg());
+ return getMipsRegisterNumbering(MO.getReg());
else if (MO.isImm())
return static_cast<unsigned>(MO.getImm());
- else if (MO.isGlobal())
- emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true);
- else if (MO.isSymbol())
+ else if (MO.isGlobal()) {
+ if (MI.getOpcode() == Mips::ULW || MI.getOpcode() == Mips::USW ||
+ MI.getOpcode() == Mips::ULH || MI.getOpcode() == Mips::ULHu)
+ emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 4);
+ else if (MI.getOpcode() == Mips::USH)
+ emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 8);
+ else
+ emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true);
+ } else if (MO.isSymbol())
emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO));
else if (MO.isCPI())
emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO));
@@ -202,9 +241,18 @@ unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
}
void MipsCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub) const {
+ bool MayNeedFarStub) const {
MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- const_cast<GlobalValue *>(GV), 0, MayNeedFarStub));
+ const_cast<GlobalValue *>(GV), 0,
+ MayNeedFarStub));
+}
+
+void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV,
+ unsigned Reloc, int Offset) const {
+ MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
+ const_cast<GlobalValue *>(GV), 0, false));
+ MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset() + Offset,
+ Reloc, const_cast<GlobalValue *>(GV), 0, false));
}
void MipsCodeEmitter::
@@ -225,11 +273,108 @@ emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const {
}
void MipsCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
- unsigned Reloc) const {
+ unsigned Reloc) const {
MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
Reloc, BB));
}
+int MipsCodeEmitter::emitUSW(const MachineInstr &MI) {
+ unsigned src = getMachineOpValue(MI, MI.getOperand(0));
+ unsigned base = getMachineOpValue(MI, MI.getOperand(1));
+ unsigned offset = getMachineOpValue(MI, MI.getOperand(2));
+ // swr src, offset(base)
+ // swl src, offset+3(base)
+ MCE.emitWordLE(
+ (0x2e << 26) | (base << 21) | (src << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x2a << 26) | (base << 21) | (src << 16) | ((offset+3) & 0xffff));
+ return 2;
+}
+
+int MipsCodeEmitter::emitULW(const MachineInstr &MI) {
+ unsigned dst = getMachineOpValue(MI, MI.getOperand(0));
+ unsigned base = getMachineOpValue(MI, MI.getOperand(1));
+ unsigned offset = getMachineOpValue(MI, MI.getOperand(2));
+ unsigned at = 1;
+ if (dst != base) {
+ // lwr dst, offset(base)
+ // lwl dst, offset+3(base)
+ MCE.emitWordLE(
+ (0x26 << 26) | (base << 21) | (dst << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x22 << 26) | (base << 21) | (dst << 16) | ((offset+3) & 0xffff));
+ return 2;
+ } else {
+ // lwr at, offset(base)
+ // lwl at, offset+3(base)
+ // addu dst, at, $zero
+ MCE.emitWordLE(
+ (0x26 << 26) | (base << 21) | (at << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x22 << 26) | (base << 21) | (at << 16) | ((offset+3) & 0xffff));
+ MCE.emitWordLE(
+ (0x0 << 26) | (at << 21) | (0x0 << 16) | (dst << 11) | (0x0 << 6) | 0x21);
+ return 3;
+ }
+}
+
+int MipsCodeEmitter::emitUSH(const MachineInstr &MI) {
+ unsigned src = getMachineOpValue(MI, MI.getOperand(0));
+ unsigned base = getMachineOpValue(MI, MI.getOperand(1));
+ unsigned offset = getMachineOpValue(MI, MI.getOperand(2));
+ unsigned at = 1;
+ // sb src, offset(base)
+ // srl at,src,8
+ // sb at, offset+1(base)
+ MCE.emitWordLE(
+ (0x28 << 26) | (base << 21) | (src << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x0 << 26) | (0x0 << 21) | (src << 16) | (at << 11) | (0x8 << 6) | 0x2);
+ MCE.emitWordLE(
+ (0x28 << 26) | (base << 21) | (at << 16) | ((offset+1) & 0xffff));
+ return 3;
+}
+
+int MipsCodeEmitter::emitULH(const MachineInstr &MI) {
+ unsigned dst = getMachineOpValue(MI, MI.getOperand(0));
+ unsigned base = getMachineOpValue(MI, MI.getOperand(1));
+ unsigned offset = getMachineOpValue(MI, MI.getOperand(2));
+ unsigned at = 1;
+ // lbu at, offset(base)
+ // lb dst, offset+1(base)
+ // sll dst,dst,8
+ // or dst,dst,at
+ MCE.emitWordLE(
+ (0x24 << 26) | (base << 21) | (at << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x20 << 26) | (base << 21) | (dst << 16) | ((offset+1) & 0xffff));
+ MCE.emitWordLE(
+ (0x0 << 26) | (0x0 << 21) | (dst << 16) | (dst << 11) | (0x8 << 6) | 0x0);
+ MCE.emitWordLE(
+ (0x0 << 26) | (dst << 21) | (at << 16) | (dst << 11) | (0x0 << 6) | 0x25);
+ return 4;
+}
+
+int MipsCodeEmitter::emitULHu(const MachineInstr &MI) {
+ unsigned dst = getMachineOpValue(MI, MI.getOperand(0));
+ unsigned base = getMachineOpValue(MI, MI.getOperand(1));
+ unsigned offset = getMachineOpValue(MI, MI.getOperand(2));
+ unsigned at = 1;
+ // lbu at, offset(base)
+ // lbu dst, offset+1(base)
+ // sll dst,dst,8
+ // or dst,dst,at
+ MCE.emitWordLE(
+ (0x24 << 26) | (base << 21) | (at << 16) | (offset & 0xffff));
+ MCE.emitWordLE(
+ (0x24 << 26) | (base << 21) | (dst << 16) | ((offset+1) & 0xffff));
+ MCE.emitWordLE(
+ (0x0 << 26) | (0x0 << 21) | (dst << 16) | (dst << 11) | (0x8 << 6) | 0x0);
+ MCE.emitWordLE(
+ (0x0 << 26) | (dst << 21) | (at << 16) | (dst << 11) | (0x0 << 6) | 0x25);
+ return 4;
+}
+
void MipsCodeEmitter::emitInstruction(const MachineInstr &MI) {
DEBUG(errs() << "JIT: " << (void*)MCE.getCurrentPCValue() << ":\t" << MI);
@@ -239,11 +384,27 @@ void MipsCodeEmitter::emitInstruction(const MachineInstr &MI) {
if ((MI.getDesc().TSFlags & MipsII::FormMask) == MipsII::Pseudo)
return;
- ++NumEmitted; // Keep track of the # of mi's emitted
switch (MI.getOpcode()) {
+ case Mips::USW:
+ NumEmitted += emitUSW(MI);
+ break;
+ case Mips::ULW:
+ NumEmitted += emitULW(MI);
+ break;
+ case Mips::ULH:
+ NumEmitted += emitULH(MI);
+ break;
+ case Mips::ULHu:
+ NumEmitted += emitULHu(MI);
+ break;
+ case Mips::USH:
+ NumEmitted += emitUSH(MI);
+ break;
+
default:
emitWordLE(getBinaryCodeForInstr(MI));
+ ++NumEmitted; // Keep track of the # of mi's emitted
break;
}
@@ -259,7 +420,7 @@ void MipsCodeEmitter::emitWordLE(unsigned Word) {
/// createMipsJITCodeEmitterPass - Return a pass that emits the collected Mips
/// code to the specified MCE object.
FunctionPass *llvm::createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
- JITCodeEmitter &JCE) {
+ JITCodeEmitter &JCE) {
return new MipsCodeEmitter(TM, JCE);
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsCondMov.td b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
new file mode 100644
index 0000000..075a3e8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsCondMov.td
@@ -0,0 +1,194 @@
+//===-- MipsCondMov.td - Describe Mips Conditional Moves --*- tablegen -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the Conditional Moves implementation.
+//
+//===----------------------------------------------------------------------===//
+
+// Conditional moves:
+// These instructions are expanded in
+// MipsISelLowering::EmitInstrWithCustomInserter if target does not have
+// conditional move instructions.
+// cond:int, data:int
+class CondMovIntInt<RegisterClass CRC, RegisterClass DRC, bits<6> funct,
+ string instr_asm> :
+ FR<0, funct, (outs DRC:$rd), (ins DRC:$rs, CRC:$rt, DRC:$F),
+ !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], NoItinerary> {
+ let shamt = 0;
+ let Constraints = "$F = $rd";
+}
+
+// cond:int, data:float
+class CondMovIntFP<RegisterClass CRC, RegisterClass DRC, bits<5> fmt,
+ bits<6> func, string instr_asm> :
+ FFR<0x11, func, fmt, (outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F),
+ !strconcat(instr_asm, "\t$fd, $fs, $rt"), []> {
+ bits<5> rt;
+ let ft = rt;
+ let Constraints = "$F = $fd";
+}
+
+// cond:float, data:int
+class CondMovFPInt<RegisterClass RC, SDNode cmov, bits<1> tf,
+ string instr_asm> :
+ FCMOV<tf, (outs RC:$rd), (ins RC:$rs, RC:$F),
+ !strconcat(instr_asm, "\t$rd, $rs, $$fcc0"),
+ [(set RC:$rd, (cmov RC:$rs, RC:$F))]> {
+ let cc = 0;
+ let Uses = [FCR31];
+ let Constraints = "$F = $rd";
+}
+
+// cond:float, data:float
+class CondMovFPFP<RegisterClass RC, SDNode cmov, bits<5> fmt, bits<1> tf,
+ string instr_asm> :
+ FFCMOV<fmt, tf, (outs RC:$fd), (ins RC:$fs, RC:$F),
+ !strconcat(instr_asm, "\t$fd, $fs, $$fcc0"),
+ [(set RC:$fd, (cmov RC:$fs, RC:$F))]> {
+ let cc = 0;
+ let Uses = [FCR31];
+ let Constraints = "$F = $fd";
+}
+
+// select patterns
+multiclass MovzPats0<RegisterClass CRC, RegisterClass DRC,
+ Instruction MOVZInst, Instruction SLTOp,
+ Instruction SLTuOp, Instruction SLTiOp,
+ Instruction SLTiuOp> {
+ def : Pat<(select (i32 (setge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : Pat<(select (i32 (setuge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTuOp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : Pat<(select (i32 (setge CRC:$lhs, immSExt16:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTiOp CRC:$lhs, immSExt16:$rhs), DRC:$F)>;
+ def : Pat<(select (i32 (setuge CRC:$lh, immSExt16:$rh)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTiuOp CRC:$lh, immSExt16:$rh), DRC:$F)>;
+ def : Pat<(select (i32 (setle CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
+ def : Pat<(select (i32 (setule CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (SLTuOp CRC:$rhs, CRC:$lhs), DRC:$F)>;
+}
+
+multiclass MovzPats1<RegisterClass CRC, RegisterClass DRC,
+ Instruction MOVZInst, Instruction XOROp> {
+ def : Pat<(select (i32 (seteq CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : Pat<(select (i32 (seteq CRC:$lhs, 0)), DRC:$T, DRC:$F),
+ (MOVZInst DRC:$T, CRC:$lhs, DRC:$F)>;
+}
+
+multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst,
+ Instruction XOROp> {
+ def : Pat<(select (i32 (setne CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>;
+ def : Pat<(select CRC:$cond, DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, CRC:$cond, DRC:$F)>;
+ def : Pat<(select (i32 (setne CRC:$lhs, 0)),DRC:$T, DRC:$F),
+ (MOVNInst DRC:$T, CRC:$lhs, DRC:$F)>;
+}
+
+// Instantiation of instructions.
+def MOVZ_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0a, "movz">;
+let Predicates = [HasMips64] in {
+ def MOVZ_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0a, "movz">;
+ def MOVZ_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0a, "movz">;
+ def MOVZ_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0a, "movz">;
+}
+
+def MOVN_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0b, "movn">;
+let Predicates = [HasMips64] in {
+ def MOVN_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0b, "movn">;
+ def MOVN_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0b, "movn">;
+ def MOVN_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0b, "movn">;
+}
+
+def MOVZ_I_S : CondMovIntFP<CPURegs, FGR32, 16, 18, "movz.s">;
+def MOVZ_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 18, "movz.s">,
+ Requires<[HasMips64]>;
+
+def MOVN_I_S : CondMovIntFP<CPURegs, FGR32, 16, 19, "movn.s">;
+def MOVN_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 19, "movn.s">,
+ Requires<[HasMips64]>;
+
+let Predicates = [NotFP64bit] in {
+ def MOVZ_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 18, "movz.d">;
+ def MOVN_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 19, "movn.d">;
+}
+let Predicates = [IsFP64bit] in {
+ def MOVZ_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 18, "movz.d">;
+ def MOVZ_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 18, "movz.d">;
+ def MOVN_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 19, "movn.d">;
+ def MOVN_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 19, "movn.d">;
+}
+
+def MOVT_I : CondMovFPInt<CPURegs, MipsCMovFP_T, 1, "movt">;
+def MOVT_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_T, 1, "movt">,
+ Requires<[HasMips64]>;
+
+def MOVF_I : CondMovFPInt<CPURegs, MipsCMovFP_F, 0, "movf">;
+def MOVF_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_F, 0, "movf">,
+ Requires<[HasMips64]>;
+
+def MOVT_S : CondMovFPFP<FGR32, MipsCMovFP_T, 16, 1, "movt.s">;
+def MOVF_S : CondMovFPFP<FGR32, MipsCMovFP_F, 16, 0, "movf.s">;
+
+let Predicates = [NotFP64bit] in {
+ def MOVT_D32 : CondMovFPFP<AFGR64, MipsCMovFP_T, 17, 1, "movt.d">;
+ def MOVF_D32 : CondMovFPFP<AFGR64, MipsCMovFP_F, 17, 0, "movf.d">;
+}
+let Predicates = [IsFP64bit] in {
+ def MOVT_D64 : CondMovFPFP<FGR64, MipsCMovFP_T, 17, 1, "movt.d">;
+ def MOVF_D64 : CondMovFPFP<FGR64, MipsCMovFP_F, 17, 0, "movf.d">;
+}
+
+// Instantiation of conditional move patterns.
+defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>;
+let Predicates = [HasMips64] in {
+ defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64,
+ SLTiu64>;
+ defm : MovzPats0<CPU64Regs, CPU64Regs, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
+ SLTiu64>;
+ defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>;
+ defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>;
+ defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>;
+}
+
+defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>;
+let Predicates = [HasMips64] in {
+ defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>;
+ defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>;
+ defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>;
+}
+
+defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>;
+defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>;
+let Predicates = [HasMips64] in {
+ defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
+ SLTiu64>;
+ defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>;
+ defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>;
+}
+
+let Predicates = [NotFP64bit] in {
+ defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>;
+ defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>;
+}
+let Predicates = [IsFP64bit] in {
+ defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
+ SLTiu64>;
+ defm : MovzPats1<CPURegs, FGR64, MOVZ_I_D64, XOR>;
+ defm : MovzPats1<CPU64Regs, FGR64, MOVZ_I64_D64, XOR64>;
+ defm : MovnPats<CPURegs, FGR64, MOVN_I_D64, XOR>;
+ defm : MovnPats<CPU64Regs, FGR64, MOVN_I64_D64, XOR64>;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index be3b7a0..debf2f1 100644
--- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -1,4 +1,4 @@
-//===-- DelaySlotFiller.cpp - Mips delay slot filler ---------------------===//
+//===-- DelaySlotFiller.cpp - Mips Delay Slot Filler ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -96,7 +96,7 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) {
LastFiller = MBB.end();
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->getDesc().hasDelaySlot()) {
+ if (I->hasDelaySlot()) {
++FilledSlots;
Changed = true;
@@ -105,8 +105,7 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) {
if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) {
MBB.splice(llvm::next(I), &MBB, D);
++UsefulSlots;
- }
- else
+ } else
BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
// Record the filler instruction that filled the delay slot.
@@ -146,7 +145,7 @@ bool Filler::findDelayInstr(MachineBasicBlock &MBB,
|| I->isInlineAsm()
|| I->isLabel()
|| FI == LastFiller
- || I->getDesc().isPseudo()
+ || I->isPseudo()
//
// Should not allow:
// ERET, DERET or WAIT, PAUSE. Need to add these to instruction
@@ -167,23 +166,21 @@ bool Filler::findDelayInstr(MachineBasicBlock &MBB,
}
bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
- bool &sawLoad,
- bool &sawStore,
+ bool &sawLoad, bool &sawStore,
SmallSet<unsigned, 32> &RegDefs,
SmallSet<unsigned, 32> &RegUses) {
if (candidate->isImplicitDef() || candidate->isKill())
return true;
- MCInstrDesc MCID = candidate->getDesc();
// Loads or stores cannot be moved past a store to the delay slot
- // and stores cannot be moved past a load.
- if (MCID.mayLoad()) {
+ // and stores cannot be moved past a load.
+ if (candidate->mayLoad()) {
if (sawStore)
return true;
sawLoad = true;
}
- if (MCID.mayStore()) {
+ if (candidate->mayStore()) {
if (sawStore)
return true;
sawStore = true;
@@ -191,7 +188,7 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
return true;
}
- assert((!MCID.isCall() && !MCID.isReturn()) &&
+ assert((!candidate->isCall() && !candidate->isReturn()) &&
"Cannot put calls or returns in delay slot.");
for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) {
@@ -221,11 +218,11 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
SmallSet<unsigned, 32>& RegUses) {
// If MI is a call or return, just examine the explicit non-variadic operands.
MCInstrDesc MCID = MI->getDesc();
- unsigned e = MCID.isCall() || MCID.isReturn() ? MCID.getNumOperands() :
- MI->getNumOperands();
-
- // Add RA to RegDefs to prevent users of RA from going into delay slot.
- if (MCID.isCall())
+ unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() :
+ MI->getNumOperands();
+
+ // Add RA to RegDefs to prevent users of RA from going into delay slot.
+ if (MI->isCall())
RegDefs.insert(Mips::RA);
for (unsigned i = 0; i != e; ++i) {
@@ -247,7 +244,7 @@ bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg) {
if (RegSet.count(Reg))
return true;
// check Aliased Registers
- for (const unsigned *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
+ for (const uint16_t *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
*Alias; ++Alias)
if (RegSet.count(*Alias))
return true;
diff --git a/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp b/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp
index 03d922f..119d1a8 100644
--- a/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp
@@ -1,4 +1,4 @@
-//===-- MipsEmitGPRestore.cpp - Emit GP restore instruction----------------===//
+//===-- MipsEmitGPRestore.cpp - Emit GP Restore Instruction ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -44,11 +44,14 @@ namespace {
} // end of anonymous namespace
bool Inserter::runOnMachineFunction(MachineFunction &F) {
- if (TM.getRelocationModel() != Reloc::PIC_)
+ MipsFunctionInfo *MipsFI = F.getInfo<MipsFunctionInfo>();
+
+ if ((TM.getRelocationModel() != Reloc::PIC_) ||
+ (!MipsFI->globalBaseRegFixed()))
return false;
bool Changed = false;
- int FI = F.getInfo<MipsFunctionInfo>()->getGPFI();
+ int FI = MipsFI->getGPFI();
for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
MFI != MFE; ++MFI) {
@@ -60,7 +63,7 @@ bool Inserter::runOnMachineFunction(MachineFunction &F) {
if (MBB.isLandingPad()) {
// Find EH_LABEL first.
for (; I->getOpcode() != TargetOpcode::EH_LABEL; ++I) ;
-
+
// Insert lw.
++I;
DebugLoc dl = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
@@ -81,7 +84,7 @@ bool Inserter::runOnMachineFunction(MachineFunction &F) {
.addImm(0);
Changed = true;
}
- }
+ }
return Changed;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
index a622258..baeae97 100644
--- a/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp
@@ -1,4 +1,4 @@
-//===-- MipsExpandPseudo.cpp - Expand pseudo instructions ----------------===//
+//===-- MipsExpandPseudo.cpp - Expand Pseudo Instructions ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -64,16 +64,22 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
const MCInstrDesc& MCid = I->getDesc();
switch(MCid.getOpcode()) {
- default:
+ default:
++I;
continue;
+ case Mips::SETGP2:
+ // Convert "setgp2 $globalreg, $t9" to "addu $globalreg, $v0, $t9"
+ BuildMI(MBB, I, I->getDebugLoc(), TII->get(Mips::ADDu),
+ I->getOperand(0).getReg())
+ .addReg(Mips::V0).addReg(I->getOperand(1).getReg());
+ break;
case Mips::BuildPairF64:
ExpandBuildPairF64(MBB, I);
break;
case Mips::ExtractElementF64:
ExpandExtractElementF64(MBB, I);
break;
- }
+ }
// delete original instr
MBB.erase(I++);
@@ -84,12 +90,12 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
}
void MipsExpandPseudo::ExpandBuildPairF64(MachineBasicBlock& MBB,
- MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator I) {
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
const MCInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
- const unsigned* SubReg =
+ const uint16_t* SubReg =
TM.getRegisterInfo()->getSubRegisters(DstReg);
// mtc1 Lo, $fp
@@ -105,12 +111,12 @@ void MipsExpandPseudo::ExpandExtractElementF64(MachineBasicBlock& MBB,
unsigned N = I->getOperand(2).getImm();
const MCInstrDesc& Mfc1Tdd = TII->get(Mips::MFC1);
DebugLoc dl = I->getDebugLoc();
- const unsigned* SubReg = TM.getRegisterInfo()->getSubRegisters(SrcReg);
+ const uint16_t* SubReg = TM.getRegisterInfo()->getSubRegisters(SrcReg);
BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(*(SubReg + N));
}
-/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo
+/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo
/// instrs into real instrs
FunctionPass *llvm::createMipsExpandPseudoPass(MipsTargetMachine &tm) {
return new MipsExpandPseudo(tm);
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
index 22d1e47..f8ea3d0 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp
@@ -1,4 +1,4 @@
-//=======- MipsFrameLowering.cpp - Mips Frame Information ------*- C++ -*-====//
+//===-- MipsFrameLowering.cpp - Mips Frame Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,10 @@
//===----------------------------------------------------------------------===//
#include "MipsFrameLowering.h"
+#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -84,55 +86,44 @@ using namespace llvm;
// if frame pointer elimination is disabled.
bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()
- || MFI->isFrameAddressTaken();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
}
bool MipsFrameLowering::targetHandlesStackFrameRounding() const {
return true;
}
-static unsigned AlignOffset(unsigned Offset, unsigned Align) {
- return (Offset + Align - 1) / Align * Align;
-}
-
-// expand pair of register and immediate if the immediate doesn't fit in the
-// 16-bit offset field.
-// e.g.
-// if OrigImm = 0x10000, OrigReg = $sp:
-// generate the following sequence of instrs:
-// lui $at, hi(0x10000)
-// addu $at, $sp, $at
-//
-// (NewReg, NewImm) = ($at, lo(Ox10000))
-// return true
-static bool expandRegLargeImmPair(unsigned OrigReg, int OrigImm,
- unsigned& NewReg, int& NewImm,
- MachineBasicBlock& MBB,
- MachineBasicBlock::iterator I) {
- // OrigImm fits in the 16-bit field
- if (OrigImm < 0x8000 && OrigImm >= -0x8000) {
- NewReg = OrigReg;
- NewImm = OrigImm;
- return false;
- }
+// Build an instruction sequence to load an immediate that is too large to fit
+// in 16-bit and add the result to Reg.
+static void expandLargeImm(unsigned Reg, int64_t Imm, bool IsN64,
+ const MipsInstrInfo &TII, MachineBasicBlock& MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL) {
+ unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi;
+ unsigned ADDu = IsN64 ? Mips::DADDu : Mips::ADDu;
+ unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT;
+ MipsAnalyzeImmediate AnalyzeImm;
+ const MipsAnalyzeImmediate::InstSeq &Seq =
+ AnalyzeImm.Analyze(Imm, IsN64 ? 64 : 32, false /* LastInstrIsADDiu */);
+ MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
+
+ // The first instruction can be a LUi, which is different from other
+ // instructions (ADDiu, ORI and SLL) in that it does not have a register
+ // operand.
+ if (Inst->Opc == LUi)
+ BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+ else
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
- MachineFunction* MF = MBB.getParent();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- DebugLoc DL = I->getDebugLoc();
- int ImmLo = (short)(OrigImm & 0xffff);
- int ImmHi = (((unsigned)OrigImm & 0xffff0000) >> 16) +
- ((OrigImm & 0x8000) != 0);
-
- // FIXME: change this when mips goes MC".
- BuildMI(MBB, I, DL, TII->get(Mips::NOAT));
- BuildMI(MBB, I, DL, TII->get(Mips::LUi), Mips::AT).addImm(ImmHi);
- BuildMI(MBB, I, DL, TII->get(Mips::ADDu), Mips::AT).addReg(OrigReg)
- .addReg(Mips::AT);
- NewReg = Mips::AT;
- NewImm = ImmLo;
+ // Build the remaining instructions in Seq.
+ for (++Inst; Inst != Seq.end(); ++Inst)
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
- return true;
+ BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(Reg).addReg(ATReg);
}
void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
@@ -146,29 +137,41 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
bool isPIC = (MF.getTarget().getRelocationModel() == Reloc::PIC_);
- unsigned NewReg = 0;
- int NewImm = 0;
- bool ATUsed;
+ unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
+ unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
// First, compute final stack size.
unsigned RegSize = STI.isGP32bit() ? 4 : 8;
unsigned StackAlign = getStackAlignment();
- unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ?
+ unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ?
(MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) :
MipsFI->getMaxCallFrameSize();
- unsigned StackSize = AlignOffset(LocalVarAreaOffset, StackAlign) +
- AlignOffset(MFI->getStackSize(), StackAlign);
+ uint64_t StackSize = RoundUpToAlignment(LocalVarAreaOffset, StackAlign) +
+ RoundUpToAlignment(MFI->getStackSize(), StackAlign);
// Update stack size
- MFI->setStackSize(StackSize);
-
- BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
-
- // TODO: check need from GP here.
- if (isPIC && STI.isABI_O32())
- BuildMI(MBB, MBBI, dl, TII.get(Mips::CPLOAD))
- .addReg(RegInfo->getPICCallReg());
- BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
+ MFI->setStackSize(StackSize);
+
+ // Emit instructions that set the global base register if the target ABI is
+ // O32.
+ if (isPIC && MipsFI->globalBaseRegSet() && STI.isABI_O32() &&
+ !MipsFI->globalBaseRegFixed()) {
+ // See MipsInstrInfo.td for explanation.
+ MachineBasicBlock *NewEntry = MF.CreateMachineBasicBlock();
+ MF.insert(&MBB, NewEntry);
+ NewEntry->addSuccessor(&MBB);
+
+ // Copy live in registers.
+ for (MachineBasicBlock::livein_iterator R = MBB.livein_begin();
+ R != MBB.livein_end(); ++R)
+ NewEntry->addLiveIn(*R);
+
+ BuildMI(*NewEntry, NewEntry->begin(), dl, TII.get(Mips:: SETGP01),
+ Mips::V0);
+ }
// No need to allocate space on the stack.
if (StackSize == 0 && !MFI->adjustsStack()) return;
@@ -177,15 +180,13 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
std::vector<MachineMove> &Moves = MMI.getFrameMoves();
MachineLocation DstML, SrcML;
- // Adjust stack : addi sp, sp, (-imm)
- ATUsed = expandRegLargeImmPair(Mips::SP, -StackSize, NewReg, NewImm, MBB,
- MBBI);
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
- .addReg(NewReg).addImm(NewImm);
-
- // FIXME: change this when mips goes MC".
- if (ATUsed)
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO));
+ // Adjust stack.
+ if (isInt<16>(-StackSize)) // addi sp, sp, (-stacksize)
+ BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(-StackSize);
+ else { // Expand immediate that doesn't fit in 16-bit.
+ MipsFI->setEmitNOAT();
+ expandLargeImm(SP, -StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl);
+ }
// emit ".cfi_def_cfa_offset StackSize"
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
@@ -202,13 +203,13 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
// register to the stack.
for (unsigned i = 0; i < CSI.size(); ++i)
++MBBI;
-
+
// Iterate over list of callee-saved registers and emit .cfi_offset
// directives.
MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
-
+
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
E = CSI.end(); I != E; ++I) {
int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
@@ -217,7 +218,7 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
// If Reg is a double precision register, emit two cfa_offsets,
// one for each of the paired single precision registers.
if (Mips::AFGR64RegisterClass->contains(Reg)) {
- const unsigned *SubRegs = RegInfo->getSubRegisters(Reg);
+ const uint16_t *SubRegs = RegInfo->getSubRegisters(Reg);
MachineLocation DstML0(MachineLocation::VirtualFP, Offset);
MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4);
MachineLocation SrcML0(*SubRegs);
@@ -236,19 +237,18 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
}
}
- }
+ }
// if framepointer enabled, set it to point to the stack pointer.
if (hasFP(MF)) {
- // Insert instruction "move $fp, $sp" at this location.
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDu), Mips::FP)
- .addReg(Mips::SP).addReg(Mips::ZERO);
+ // Insert instruction "move $fp, $sp" at this location.
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO);
- // emit ".cfi_def_cfa_register $fp"
+ // emit ".cfi_def_cfa_register $fp"
MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
- DstML = MachineLocation(Mips::FP);
+ DstML = MachineLocation(FP);
SrcML = MachineLocation(MachineLocation::VirtualFP);
Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
}
@@ -256,12 +256,8 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
// Restore GP from the saved stack location
if (MipsFI->needGPSaveRestore()) {
unsigned Offset = MFI->getObjectOffset(MipsFI->getGPFI());
- BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE)).addImm(Offset);
-
- if (Offset >= 0x8000) {
- BuildMI(MBB, llvm::prior(MBBI), dl, TII.get(Mips::MACRO));
- BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
- }
+ BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE)).addImm(Offset)
+ .addReg(Mips::GP);
}
}
@@ -272,59 +268,59 @@ void MipsFrameLowering::emitEpilogue(MachineFunction &MF,
const MipsInstrInfo &TII =
*static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo());
DebugLoc dl = MBBI->getDebugLoc();
-
- // Get the number of bytes from FrameInfo
- unsigned StackSize = MFI->getStackSize();
-
- unsigned NewReg = 0;
- int NewImm = 0;
- bool ATUsed = false;
+ unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
+ unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
// if framepointer enabled, restore the stack pointer.
if (hasFP(MF)) {
// Find the first instruction that restores a callee-saved register.
MachineBasicBlock::iterator I = MBBI;
-
+
for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
--I;
// Insert instruction "move $sp, $fp" at this location.
- BuildMI(MBB, I, dl, TII.get(Mips::ADDu), Mips::SP)
- .addReg(Mips::FP).addReg(Mips::ZERO);
+ BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO);
}
- // adjust stack : insert addi sp, sp, (imm)
- if (StackSize) {
- ATUsed = expandRegLargeImmPair(Mips::SP, StackSize, NewReg, NewImm, MBB,
- MBBI);
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ADDiu), Mips::SP)
- .addReg(NewReg).addImm(NewImm);
+ // Get the number of bytes from FrameInfo
+ uint64_t StackSize = MFI->getStackSize();
- // FIXME: change this when mips goes MC".
- if (ATUsed)
- BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO));
- }
+ if (!StackSize)
+ return;
+
+ // Adjust stack.
+ if (isInt<16>(StackSize)) // addi sp, sp, (-stacksize)
+ BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(StackSize);
+ else // Expand immediate that doesn't fit in 16-bit.
+ expandLargeImm(SP, StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl);
}
void MipsFrameLowering::
processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
MachineRegisterInfo& MRI = MF.getRegInfo();
+ unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
// FIXME: remove this code if register allocator can correctly mark
// $fp and $ra used or unused.
// Mark $fp and $ra as used or unused.
if (hasFP(MF))
- MRI.setPhysRegUsed(Mips::FP);
+ MRI.setPhysRegUsed(FP);
- // The register allocator might determine $ra is used after seeing
+ // The register allocator might determine $ra is used after seeing
// instruction "jr $ra", but we do not want PrologEpilogInserter to insert
// instructions to save/restore $ra unless there is a function call.
// To correct this, $ra is explicitly marked unused if there is no
// function call.
if (MF.getFrameInfo()->hasCalls())
MRI.setPhysRegUsed(Mips::RA);
- else
+ else {
MRI.setPhysRegUnused(Mips::RA);
+ MRI.setPhysRegUnused(Mips::RA_64);
+ }
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
index c249756..bd1d89f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h
@@ -1,4 +1,4 @@
-//==--- MipsFrameLowering.h - Define frame lowering for Mips --*- C++ -*---===//
+//===-- MipsFrameLowering.h - Define frame lowering for Mips ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 9c831ed..f0651c6 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -1,4 +1,4 @@
-//===-- MipsISelDAGToDAG.cpp - A dag to dag inst selector for Mips --------===//
+//===-- MipsISelDAGToDAG.cpp - A Dag to Dag Inst Selector for Mips --------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,12 @@
#define DEBUG_TYPE "mips-isel"
#include "Mips.h"
+#include "MipsAnalyzeImmediate.h"
#include "MipsMachineFunction.h"
#include "MipsRegisterInfo.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/GlobalValue.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
@@ -28,6 +30,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -63,6 +66,7 @@ public:
return "MIPS DAG->DAG Pattern Instruction Selection";
}
+ virtual bool runOnMachineFunction(MachineFunction &MF);
private:
// Include the pieces autogenerated from the target description.
@@ -81,17 +85,24 @@ private:
}
SDNode *getGlobalBaseReg();
+
+ std::pair<SDNode*, SDNode*> SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl,
+ EVT Ty, bool HasLo, bool HasHi);
+
SDNode *Select(SDNode *N);
// Complex Pattern.
- bool SelectAddr(SDValue N, SDValue &Base, SDValue &Offset);
+ bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset);
- // getI32Imm - Return a target constant with the specified
- // value, of type i32.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
+ // getImm - Return a target constant with the specified value.
+ inline SDValue getImm(const SDNode *Node, unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
}
+ void ProcessFunctionAfterISel(MachineFunction &MF);
+ bool ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&);
+ void InitGlobalBaseReg(MachineFunction &MF);
+
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps);
@@ -99,20 +110,163 @@ private:
}
+// Insert instructions to initialize the global base register in the
+// first MBB of the function. When the ABI is O32 and the relocation model is
+// PIC, the necessary instructions are emitted later to prevent optimization
+// passes from moving them.
+void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
+ if (!MipsFI->globalBaseRegSet())
+ return;
+
+ MachineBasicBlock &MBB = MF.front();
+ MachineBasicBlock::iterator I = MBB.begin();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
+ DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
+ unsigned V0, V1, GlobalBaseReg = MipsFI->getGlobalBaseReg();
+ bool FixGlobalBaseReg = MipsFI->globalBaseRegFixed();
+
+ if (Subtarget.isABI_O32() && FixGlobalBaseReg)
+ // $gp is the global base register.
+ V0 = V1 = GlobalBaseReg;
+ else {
+ const TargetRegisterClass *RC;
+ RC = Subtarget.isABI_N64() ?
+ Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
+
+ V0 = RegInfo.createVirtualRegister(RC);
+ V1 = RegInfo.createVirtualRegister(RC);
+ }
+
+ if (Subtarget.isABI_N64()) {
+ MF.getRegInfo().addLiveIn(Mips::T9_64);
+ MBB.addLiveIn(Mips::T9_64);
+
+ // lui $v0, %hi(%neg(%gp_rel(fname)))
+ // daddu $v1, $v0, $t9
+ // daddiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
+ const GlobalValue *FName = MF.getFunction();
+ BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
+ BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0).addReg(Mips::T9_64);
+ BuildMI(MBB, I, DL, TII.get(Mips::DADDiu), GlobalBaseReg).addReg(V1)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
+ } else if (MF.getTarget().getRelocationModel() == Reloc::Static) {
+ // Set global register to __gnu_local_gp.
+ //
+ // lui $v0, %hi(__gnu_local_gp)
+ // addiu $globalbasereg, $v0, %lo(__gnu_local_gp)
+ BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
+ .addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_HI);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V0)
+ .addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_LO);
+ } else {
+ MF.getRegInfo().addLiveIn(Mips::T9);
+ MBB.addLiveIn(Mips::T9);
+
+ if (Subtarget.isABI_N32()) {
+ // lui $v0, %hi(%neg(%gp_rel(fname)))
+ // addu $v1, $v0, $t9
+ // addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
+ const GlobalValue *FName = MF.getFunction();
+ BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9);
+ BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V1)
+ .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO);
+ } else if (!MipsFI->globalBaseRegFixed()) {
+ assert(Subtarget.isABI_O32());
+
+ BuildMI(MBB, I, DL, TII.get(Mips::SETGP2), GlobalBaseReg)
+ .addReg(Mips::T9);
+ }
+ }
+}
+
+bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI,
+ const MachineInstr& MI) {
+ unsigned DstReg = 0, ZeroReg = 0;
+
+ // Check if MI is "addiu $dst, $zero, 0" or "daddiu $dst, $zero, 0".
+ if ((MI.getOpcode() == Mips::ADDiu) &&
+ (MI.getOperand(1).getReg() == Mips::ZERO) &&
+ (MI.getOperand(2).getImm() == 0)) {
+ DstReg = MI.getOperand(0).getReg();
+ ZeroReg = Mips::ZERO;
+ } else if ((MI.getOpcode() == Mips::DADDiu) &&
+ (MI.getOperand(1).getReg() == Mips::ZERO_64) &&
+ (MI.getOperand(2).getImm() == 0)) {
+ DstReg = MI.getOperand(0).getReg();
+ ZeroReg = Mips::ZERO_64;
+ }
+
+ if (!DstReg)
+ return false;
+
+ // Replace uses with ZeroReg.
+ for (MachineRegisterInfo::use_iterator U = MRI->use_begin(DstReg),
+ E = MRI->use_end(); U != E; ++U) {
+ MachineOperand &MO = U.getOperand();
+ MachineInstr *MI = MO.getParent();
+
+ // Do not replace if it is a phi's operand or is tied to def operand.
+ if (MI->isPHI() || MI->isRegTiedToDefOperand(U.getOperandNo()))
+ continue;
+
+ MO.setReg(ZeroReg);
+ }
+
+ return true;
+}
+
+void MipsDAGToDAGISel::ProcessFunctionAfterISel(MachineFunction &MF) {
+ InitGlobalBaseReg(MF);
+
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+
+ for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end(); MFI != MFE;
+ ++MFI)
+ for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I)
+ ReplaceUsesWithZeroReg(MRI, *I);
+}
+
+bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
+ bool Ret = SelectionDAGISel::runOnMachineFunction(MF);
+
+ ProcessFunctionAfterISel(MF);
+
+ return Ret;
+}
/// getGlobalBaseReg - Output the instructions required to put the
/// GOT address into a register.
SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
+ unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg();
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
/// ComplexPattern used on MipsInstrInfo
/// Used on Mips Load/Store instructions
bool MipsDAGToDAGISel::
-SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
+SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
EVT ValTy = Addr.getValueType();
- unsigned GPReg = ValTy == MVT::i32 ? Mips::GP : Mips::GP_64;
+
+ // If Parent is an unaligned f32 load or store, select a (base + index)
+ // floating point load/store instruction (luxc1 or suxc1).
+ const LSBaseSDNode* LS = 0;
+
+ if (Parent && (LS = dyn_cast<LSBaseSDNode>(Parent))) {
+ EVT VT = LS->getMemoryVT();
+
+ if (VT.getSizeInBits() / 8 > LS->getAlignment()) {
+ assert(TLI.allowsUnalignedMemoryAccesses(VT) &&
+ "Unaligned loads/stores not supported for this type.");
+ if (VT == MVT::f32)
+ return false;
+ }
+ }
// if Address is FI, get the TargetFrameIndex.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
@@ -122,21 +276,16 @@ SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
}
// on PIC code Load GA
- if (TM.getRelocationModel() == Reloc::PIC_) {
- if (Addr.getOpcode() == MipsISD::WrapperPIC) {
- Base = CurDAG->getRegister(GPReg, ValTy);
- Offset = Addr.getOperand(0);
- return true;
- }
- } else {
+ if (Addr.getOpcode() == MipsISD::Wrapper) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ if (TM.getRelocationModel() != Reloc::PIC_) {
if ((Addr.getOpcode() == ISD::TargetExternalSymbol ||
Addr.getOpcode() == ISD::TargetGlobalAddress))
return false;
- else if (Addr.getOpcode() == ISD::TargetGlobalTLSAddress) {
- Base = CurDAG->getRegister(GPReg, ValTy);
- Offset = Addr;
- return true;
- }
}
// Addresses of the form FI+const or FI|const
@@ -166,17 +315,20 @@ SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
// Generate:
// lui $2, %hi($CPI1_0)
// lwc1 $f0, %lo($CPI1_0)($2)
- if ((Addr.getOperand(0).getOpcode() == MipsISD::Hi ||
- Addr.getOperand(0).getOpcode() == ISD::LOAD) &&
- Addr.getOperand(1).getOpcode() == MipsISD::Lo) {
+ if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) {
SDValue LoVal = Addr.getOperand(1);
- if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) ||
+ if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) ||
isa<GlobalAddressSDNode>(LoVal.getOperand(0))) {
Base = Addr.getOperand(0);
Offset = LoVal.getOperand(0);
return true;
}
}
+
+ // If an indexed floating point load/store can be emitted, return false.
+ if (LS && (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) &&
+ Subtarget.hasMips32r2Or64())
+ return false;
}
Base = Addr;
@@ -184,6 +336,28 @@ SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
return true;
}
+/// Select multiply instructions.
+std::pair<SDNode*, SDNode*>
+MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
+ bool HasLo, bool HasHi) {
+ SDNode *Lo = 0, *Hi = 0;
+ SDNode *Mul = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N->getOperand(0),
+ N->getOperand(1));
+ SDValue InFlag = SDValue(Mul, 0);
+
+ if (HasLo) {
+ Lo = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64, dl,
+ Ty, MVT::Glue, InFlag);
+ InFlag = SDValue(Lo, 1);
+ }
+ if (HasHi)
+ Hi = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64, dl,
+ Ty, InFlag);
+
+ return std::make_pair(Lo, Hi);
+}
+
+
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
@@ -203,123 +377,167 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
// Instruction Selection not handled by the auto-generated
// tablegen selection should be handled here.
///
+ EVT NodeTy = Node->getValueType(0);
+ unsigned MultOpc;
+
switch(Opcode) {
- default: break;
-
- case ISD::SUBE:
- case ISD::ADDE: {
- SDValue InFlag = Node->getOperand(2), CmpLHS;
- unsigned Opc = InFlag.getOpcode(); (void)Opc;
- assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
- (Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
- "(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
-
- unsigned MOp;
- if (Opcode == ISD::ADDE) {
- CmpLHS = InFlag.getValue(0);
- MOp = Mips::ADDu;
- } else {
- CmpLHS = InFlag.getOperand(0);
- MOp = Mips::SUBu;
- }
+ default: break;
+
+ case ISD::SUBE:
+ case ISD::ADDE: {
+ SDValue InFlag = Node->getOperand(2), CmpLHS;
+ unsigned Opc = InFlag.getOpcode(); (void)Opc;
+ assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
+ (Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
+ "(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
+
+ unsigned MOp;
+ if (Opcode == ISD::ADDE) {
+ CmpLHS = InFlag.getValue(0);
+ MOp = Mips::ADDu;
+ } else {
+ CmpLHS = InFlag.getOperand(0);
+ MOp = Mips::SUBu;
+ }
- SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
+ SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) };
- SDValue LHS = Node->getOperand(0);
- SDValue RHS = Node->getOperand(1);
+ SDValue LHS = Node->getOperand(0);
+ SDValue RHS = Node->getOperand(1);
- EVT VT = LHS.getValueType();
- SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, dl, VT, Ops, 2);
- SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT,
- SDValue(Carry,0), RHS);
+ EVT VT = LHS.getValueType();
+ SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, dl, VT, Ops, 2);
+ SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT,
+ SDValue(Carry,0), RHS);
- return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue,
- LHS, SDValue(AddCarry,0));
- }
+ return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue,
+ LHS, SDValue(AddCarry,0));
+ }
- /// Mul with two results
- case ISD::SMUL_LOHI:
- case ISD::UMUL_LOHI: {
- assert(Node->getValueType(0) != MVT::i64 &&
- "64-bit multiplication with two results not handled.");
- SDValue Op1 = Node->getOperand(0);
- SDValue Op2 = Node->getOperand(1);
+ /// Mul with two results
+ case ISD::SMUL_LOHI:
+ case ISD::UMUL_LOHI: {
+ if (NodeTy == MVT::i32)
+ MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT);
+ else
+ MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::DMULTu : Mips::DMULT);
- unsigned Op;
- Op = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT);
+ std::pair<SDNode*, SDNode*> LoHi = SelectMULT(Node, MultOpc, dl, NodeTy,
+ true, true);
- SDNode *Mul = CurDAG->getMachineNode(Op, dl, MVT::Glue, Op1, Op2);
+ if (!SDValue(Node, 0).use_empty())
+ ReplaceUses(SDValue(Node, 0), SDValue(LoHi.first, 0));
- SDValue InFlag = SDValue(Mul, 0);
- SDNode *Lo = CurDAG->getMachineNode(Mips::MFLO, dl, MVT::i32,
- MVT::Glue, InFlag);
- InFlag = SDValue(Lo,1);
- SDNode *Hi = CurDAG->getMachineNode(Mips::MFHI, dl, MVT::i32, InFlag);
+ if (!SDValue(Node, 1).use_empty())
+ ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0));
- if (!SDValue(Node, 0).use_empty())
- ReplaceUses(SDValue(Node, 0), SDValue(Lo,0));
+ return NULL;
+ }
- if (!SDValue(Node, 1).use_empty())
- ReplaceUses(SDValue(Node, 1), SDValue(Hi,0));
+ /// Special Muls
+ case ISD::MUL: {
+ // Mips32 has a 32-bit three operand mul instruction.
+ if (Subtarget.hasMips32() && NodeTy == MVT::i32)
+ break;
+ return SelectMULT(Node, NodeTy == MVT::i32 ? Mips::MULT : Mips::DMULT,
+ dl, NodeTy, true, false).first;
+ }
+ case ISD::MULHS:
+ case ISD::MULHU: {
+ if (NodeTy == MVT::i32)
+ MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
+ else
+ MultOpc = (Opcode == ISD::MULHU ? Mips::DMULTu : Mips::DMULT);
+
+ return SelectMULT(Node, MultOpc, dl, NodeTy, false, true).second;
+ }
- return NULL;
- }
+ // Get target GOT address.
+ case ISD::GLOBAL_OFFSET_TABLE:
+ return getGlobalBaseReg();
- /// Special Muls
- case ISD::MUL:
- // Mips32 has a 32-bit three operand mul instruction.
- if (Subtarget.hasMips32() && Node->getValueType(0) == MVT::i32)
- break;
- case ISD::MULHS:
- case ISD::MULHU: {
- assert((Opcode == ISD::MUL || Node->getValueType(0) != MVT::i64) &&
- "64-bit MULH* not handled.");
- EVT Ty = Node->getValueType(0);
- SDValue MulOp1 = Node->getOperand(0);
- SDValue MulOp2 = Node->getOperand(1);
-
- unsigned MulOp = (Opcode == ISD::MULHU ?
- Mips::MULTu :
- (Ty == MVT::i32 ? Mips::MULT : Mips::DMULT));
- SDNode *MulNode = CurDAG->getMachineNode(MulOp, dl,
- MVT::Glue, MulOp1, MulOp2);
-
- SDValue InFlag = SDValue(MulNode, 0);
-
- if (Opcode == ISD::MUL) {
- unsigned Opc = (Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64);
- return CurDAG->getMachineNode(Opc, dl, Ty, InFlag);
+ case ISD::ConstantFP: {
+ ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node);
+ if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) {
+ if (Subtarget.hasMips64()) {
+ SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ Mips::ZERO_64, MVT::i64);
+ return CurDAG->getMachineNode(Mips::DMTC1, dl, MVT::f64, Zero);
}
- else
- return CurDAG->getMachineNode(Mips::MFHI, dl, MVT::i32, InFlag);
+
+ SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ Mips::ZERO, MVT::i32);
+ return CurDAG->getMachineNode(Mips::BuildPairF64, dl, MVT::f64, Zero,
+ Zero);
}
+ break;
+ }
- // Get target GOT address.
- case ISD::GLOBAL_OFFSET_TABLE:
- return getGlobalBaseReg();
+ case ISD::Constant: {
+ const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node);
+ unsigned Size = CN->getValueSizeInBits(0);
- case ISD::ConstantFP: {
- ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node);
- if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) {
- SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- Mips::ZERO, MVT::i32);
- return CurDAG->getMachineNode(Mips::BuildPairF64, dl, MVT::f64, Zero,
- Zero);
- }
+ if (Size == 32)
break;
+
+ MipsAnalyzeImmediate AnalyzeImm;
+ int64_t Imm = CN->getSExtValue();
+
+ const MipsAnalyzeImmediate::InstSeq &Seq =
+ AnalyzeImm.Analyze(Imm, Size, false);
+
+ MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
+ DebugLoc DL = CN->getDebugLoc();
+ SDNode *RegOpnd;
+ SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
+ MVT::i64);
+
+ // The first instruction can be a LUi which is different from other
+ // instructions (ADDiu, ORI and SLL) in that it does not have a register
+ // operand.
+ if (Inst->Opc == Mips::LUi64)
+ RegOpnd = CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64, ImmOpnd);
+ else
+ RegOpnd =
+ CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64,
+ CurDAG->getRegister(Mips::ZERO_64, MVT::i64),
+ ImmOpnd);
+
+ // The remaining instructions in the sequence are handled here.
+ for (++Inst; Inst != Seq.end(); ++Inst) {
+ ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
+ MVT::i64);
+ RegOpnd = CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64,
+ SDValue(RegOpnd, 0), ImmOpnd);
}
- case MipsISD::ThreadPointer: {
- unsigned SrcReg = Mips::HWR29;
- unsigned DestReg = Mips::V1;
- SDNode *Rdhwr = CurDAG->getMachineNode(Mips::RDHWR, Node->getDebugLoc(),
- Node->getValueType(0), CurDAG->getRegister(SrcReg, MVT::i32));
- SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, DestReg,
- SDValue(Rdhwr, 0));
- SDValue ResNode = CurDAG->getCopyFromReg(Chain, dl, DestReg, MVT::i32);
- ReplaceUses(SDValue(Node, 0), ResNode);
- return ResNode.getNode();
+ return RegOpnd;
+ }
+
+ case MipsISD::ThreadPointer: {
+ EVT PtrVT = TLI.getPointerTy();
+ unsigned RdhwrOpc, SrcReg, DestReg;
+
+ if (PtrVT == MVT::i32) {
+ RdhwrOpc = Mips::RDHWR;
+ SrcReg = Mips::HWR29;
+ DestReg = Mips::V1;
+ } else {
+ RdhwrOpc = Mips::RDHWR64;
+ SrcReg = Mips::HWR29_64;
+ DestReg = Mips::V1_64;
}
+
+ SDNode *Rdhwr =
+ CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(),
+ Node->getValueType(0),
+ CurDAG->getRegister(SrcReg, PtrVT));
+ SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, DestReg,
+ SDValue(Rdhwr, 0));
+ SDValue ResNode = CurDAG->getCopyFromReg(Chain, dl, DestReg, PtrVT);
+ ReplaceUses(SDValue(Node, 0), ResNode);
+ return ResNode.getNode();
+ }
}
// Select the default instruction
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 1932e74..6a23bc3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -18,12 +18,13 @@
#include "MipsTargetMachine.h"
#include "MipsTargetObjectFile.h"
#include "MipsSubtarget.h"
+#include "InstPrinter/MipsInstPrinter.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/CallingConv.h"
-#include "InstPrinter/MipsInstPrinter.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -35,27 +36,29 @@
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-// If I is a shifted mask, set the size (Size) and the first bit of the
+// If I is a shifted mask, set the size (Size) and the first bit of the
// mask (Pos), and return true.
-// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
+// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
static bool IsShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
- if (!isUInt<32>(I) || !isShiftedMask_32(I))
+ if (!isShiftedMask_64(I))
return false;
- Size = CountPopulation_32(I);
- Pos = CountTrailingZeros_32(I);
+ Size = CountPopulation_64(I);
+ Pos = CountTrailingZeros_64(I);
return true;
}
+static SDValue GetGlobalReg(SelectionDAG &DAG, EVT Ty) {
+ MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>();
+ return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
+}
+
const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
case MipsISD::JmpLink: return "MipsISD::JmpLink";
case MipsISD::Hi: return "MipsISD::Hi";
case MipsISD::Lo: return "MipsISD::Lo";
case MipsISD::GPRel: return "MipsISD::GPRel";
- case MipsISD::TlsGd: return "MipsISD::TlsGd";
- case MipsISD::TprelHi: return "MipsISD::TprelHi";
- case MipsISD::TprelLo: return "MipsISD::TprelLo";
case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
case MipsISD::Ret: return "MipsISD::Ret";
case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
@@ -71,7 +74,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::DivRemU: return "MipsISD::DivRemU";
case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
- case MipsISD::WrapperPIC: return "MipsISD::WrapperPIC";
+ case MipsISD::Wrapper: return "MipsISD::Wrapper";
case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
case MipsISD::Sync: return "MipsISD::Sync";
case MipsISD::Ext: return "MipsISD::Ext";
@@ -84,7 +87,8 @@ MipsTargetLowering::
MipsTargetLowering(MipsTargetMachine &TM)
: TargetLowering(TM, new MipsTargetObjectFile()),
Subtarget(&TM.getSubtarget<MipsSubtarget>()),
- HasMips64(Subtarget->hasMips64()), IsN64(Subtarget->isABI_N64()) {
+ HasMips64(Subtarget->hasMips64()), IsN64(Subtarget->isABI_N64()),
+ IsO32(Subtarget->isABI_O32()) {
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
@@ -93,17 +97,20 @@ MipsTargetLowering(MipsTargetMachine &TM)
// Set up the register classes
addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass);
- addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
if (HasMips64)
addRegisterClass(MVT::i64, Mips::CPU64RegsRegisterClass);
- // When dealing with single precision only, use libcalls
- if (!Subtarget->isSingleFloat()) {
- if (HasMips64)
- addRegisterClass(MVT::f64, Mips::FGR64RegisterClass);
- else
- addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
+ if (!TM.Options.UseSoftFloat) {
+ addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
+
+ // When dealing with single precision only, use libcalls
+ if (!Subtarget->isSingleFloat()) {
+ if (HasMips64)
+ addRegisterClass(MVT::f64, Mips::FGR64RegisterClass);
+ else
+ addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
+ }
}
// Load extented operations for i1 types must be promoted
@@ -123,7 +130,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
// Mips Custom Operations
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
@@ -131,9 +137,30 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Custom);
setOperationAction(ISD::SELECT, MVT::i32, Custom);
+ setOperationAction(ISD::SETCC, MVT::f32, Custom);
+ setOperationAction(ISD::SETCC, MVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
+
+ if (!TM.Options.NoNaNsFPMath) {
+ setOperationAction(ISD::FABS, MVT::f32, Custom);
+ setOperationAction(ISD::FABS, MVT::f64, Custom);
+ }
+
+ if (HasMips64) {
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+ setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
+ setOperationAction(ISD::JumpTable, MVT::i64, Custom);
+ setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT, MVT::i64, Custom);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
+ }
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
@@ -149,10 +176,18 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTPOP, MVT::i64, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
setOperationAction(ISD::ROTL, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i64, Expand);
@@ -165,8 +200,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f64, Expand);
setOperationAction(ISD::FCOS, MVT::f32, Expand);
@@ -180,9 +213,18 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FEXP, MVT::f32, Expand);
setOperationAction(ISD::FMA, MVT::f32, Expand);
setOperationAction(ISD::FMA, MVT::f64, Expand);
+ setOperationAction(ISD::FREM, MVT::f32, Expand);
+ setOperationAction(ISD::FREM, MVT::f64, Expand);
+
+ if (!TM.Options.NoNaNsFPMath) {
+ setOperationAction(ISD::FNEG, MVT::f32, Expand);
+ setOperationAction(ISD::FNEG, MVT::f64, Expand);
+ }
setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
setOperationAction(ISD::VAARG, MVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
@@ -192,11 +234,10 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
-
- setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
- setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
setInsertFencesForAtomic(true);
@@ -208,32 +249,46 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
}
- if (!Subtarget->hasBitCount())
+ if (!Subtarget->hasBitCount()) {
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ, MVT::i64, Expand);
+ }
- if (!Subtarget->hasSwap())
+ if (!Subtarget->hasSwap()) {
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+ }
setTargetDAGCombine(ISD::ADDE);
setTargetDAGCombine(ISD::SUBE);
setTargetDAGCombine(ISD::SDIVREM);
setTargetDAGCombine(ISD::UDIVREM);
- setTargetDAGCombine(ISD::SETCC);
+ setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::OR);
- setMinFunctionAlignment(2);
+ setMinFunctionAlignment(HasMips64 ? 3 : 2);
- setStackPointerRegisterToSaveRestore(Mips::SP);
+ setStackPointerRegisterToSaveRestore(IsN64 ? Mips::SP_64 : Mips::SP);
computeRegisterProperties();
- setExceptionPointerRegister(Mips::A0);
- setExceptionSelectorRegister(Mips::A1);
+ setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0);
+ setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1);
}
bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
- return SVT == MVT::i64 || SVT == MVT::i32 || SVT == MVT::i16;
+
+ switch (SVT) {
+ case MVT::i64:
+ case MVT::i32:
+ case MVT::i16:
+ return true;
+ case MVT::f32:
+ return Subtarget->hasMips32r2Or64();
+ default:
+ return false;
+ }
}
EVT MipsTargetLowering::getSetCCResultType(EVT VT) const {
@@ -290,8 +345,7 @@ static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) {
// create MipsMAdd(u) node
MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
- SDValue MAdd = CurDAG->getNode(MultOpc, dl,
- MVT::Glue,
+ SDValue MAdd = CurDAG->getNode(MultOpc, dl, MVT::Glue,
MultNode->getOperand(0),// Factor 0
MultNode->getOperand(1),// Factor 1
ADDCNode->getOperand(1),// Lo0
@@ -364,8 +418,7 @@ static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) {
// create MipsSub(u) node
MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
- SDValue MSub = CurDAG->getNode(MultOpc, dl,
- MVT::Glue,
+ SDValue MSub = CurDAG->getNode(MultOpc, dl, MVT::Glue,
MultNode->getOperand(0),// Factor 0
MultNode->getOperand(1),// Factor 1
SUBCNode->getOperand(0),// Lo0
@@ -394,7 +447,8 @@ static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMadd(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMadd(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -406,7 +460,8 @@ static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG,
if (DCI.isBeforeLegalize())
return SDValue();
- if (Subtarget->hasMips32() && SelectMsub(N, &DAG))
+ if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
+ SelectMsub(N, &DAG))
return SDValue(N, 0);
return SDValue();
@@ -419,8 +474,8 @@ static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG& DAG,
return SDValue();
EVT Ty = N->getValueType(0);
- unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
- unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
+ unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
+ unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
unsigned opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem :
MipsISD::DivRemU;
DebugLoc dl = N->getDebugLoc();
@@ -481,11 +536,10 @@ static bool InvertFPCondCode(Mips::CondCode CC) {
if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
return false;
- if (CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT)
- return true;
+ assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
+ "Illegal Condition Code");
- assert(false && "Illegal Condition Code");
- return false;
+ return true;
}
// Creates and returns an FPCmp node from a setcc node.
@@ -522,21 +576,37 @@ static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True,
True.getValueType(), True, False, Cond);
}
-static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG& DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const MipsSubtarget* Subtarget) {
+static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget* Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
- SDValue Cond = CreateFPCmp(DAG, SDValue(N, 0));
+ SDValue SetCC = N->getOperand(0);
- if (Cond.getOpcode() != MipsISD::FPCmp)
+ if ((SetCC.getOpcode() != ISD::SETCC) ||
+ !SetCC.getOperand(0).getValueType().isInteger())
return SDValue();
- SDValue True = DAG.getConstant(1, MVT::i32);
- SDValue False = DAG.getConstant(0, MVT::i32);
+ SDValue False = N->getOperand(2);
+ EVT FalseTy = False.getValueType();
+
+ if (!FalseTy.isInteger())
+ return SDValue();
+
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(False);
+
+ if (!CN || CN->getZExtValue())
+ return SDValue();
- return CreateCMovFP(DAG, Cond, True, False, N->getDebugLoc());
+ const DebugLoc DL = N->getDebugLoc();
+ ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
+ SDValue True = N->getOperand(1);
+
+ SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
+ SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
+
+ return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
}
static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
@@ -549,20 +619,20 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
return SDValue();
SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
-
+ unsigned ShiftRightOpc = ShiftRight.getOpcode();
+
// Op's first operand must be a shift right.
- if (ShiftRight.getOpcode() != ISD::SRA && ShiftRight.getOpcode() != ISD::SRL)
+ if (ShiftRightOpc != ISD::SRA && ShiftRightOpc != ISD::SRL)
return SDValue();
// The second operand of the shift must be an immediate.
- uint64_t Pos;
ConstantSDNode *CN;
if (!(CN = dyn_cast<ConstantSDNode>(ShiftRight.getOperand(1))))
return SDValue();
-
- Pos = CN->getZExtValue();
+ uint64_t Pos = CN->getZExtValue();
uint64_t SMPos, SMSize;
+
// Op's second operand must be a shifted mask.
if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
!IsShiftedMask(CN->getZExtValue(), SMPos, SMSize))
@@ -570,21 +640,21 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
// Return if the shifted mask does not start at bit 0 or the sum of its size
// and Pos exceeds the word's size.
- if (SMPos != 0 || Pos + SMSize > 32)
+ EVT ValTy = N->getValueType(0);
+ if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
return SDValue();
- return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), MVT::i32,
- ShiftRight.getOperand(0),
- DAG.getConstant(Pos, MVT::i32),
+ return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy,
+ ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
DAG.getConstant(SMSize, MVT::i32));
}
-
+
static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
TargetLowering::DAGCombinerInfo &DCI,
const MipsSubtarget* Subtarget) {
// Pattern match INS.
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
- // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
+ // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
// => ins $dst, $src, size, pos, $src1
if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
return SDValue();
@@ -604,7 +674,7 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
// See if Op's second operand matches (and (shl $src, pos), mask1).
if (And1.getOpcode() != ISD::AND)
return SDValue();
-
+
if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
!IsShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
return SDValue();
@@ -623,17 +693,16 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
unsigned Shamt = CN->getZExtValue();
// Return if the shift amount and the first bit position of mask are not the
- // same.
- if (Shamt != SMPos0)
+ // same.
+ EVT ValTy = N->getValueType(0);
+ if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
return SDValue();
-
- return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), MVT::i32,
- Shl.getOperand(0),
+
+ return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0),
DAG.getConstant(SMPos0, MVT::i32),
- DAG.getConstant(SMSize0, MVT::i32),
- And0.getOperand(0));
+ DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
}
-
+
SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
const {
SelectionDAG &DAG = DCI.DAG;
@@ -648,8 +717,8 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
case ISD::SDIVREM:
case ISD::UDIVREM:
return PerformDivRemCombine(N, DAG, DCI, Subtarget);
- case ISD::SETCC:
- return PerformSETCCCombine(N, DAG, DCI, Subtarget);
+ case ISD::SELECT:
+ return PerformSELECTCombine(N, DAG, DCI, Subtarget);
case ISD::AND:
return PerformANDCombine(N, DAG, DCI, Subtarget);
case ISD::OR:
@@ -672,8 +741,10 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
+ case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
+ case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
@@ -689,7 +760,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
// MachineFunction as a live in value. It also creates a corresponding
// virtual register for it.
static unsigned
-AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
+AddLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
{
assert(RC->contains(PReg) && "Not the correct regclass!");
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
@@ -702,12 +773,13 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
return Mips::BRANCH_T;
- if (CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT)
- return Mips::BRANCH_F;
+ assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
+ "Invalid CondCode.");
- return Mips::BRANCH_INVALID;
+ return Mips::BRANCH_F;
}
+/*
static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB,
DebugLoc dl,
const MipsSubtarget* Subtarget,
@@ -783,89 +855,115 @@ static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB,
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
-
+*/
MachineBasicBlock *
MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
-
switch (MI->getOpcode()) {
- default:
- assert(false && "Unexpected instr type to insert");
- return NULL;
- case Mips::MOVT:
- case Mips::MOVT_S:
- case Mips::MOVT_D:
- return ExpandCondMov(MI, BB, dl, Subtarget, TII, true, Mips::BC1F);
- case Mips::MOVF:
- case Mips::MOVF_S:
- case Mips::MOVF_D:
- return ExpandCondMov(MI, BB, dl, Subtarget, TII, true, Mips::BC1T);
- case Mips::MOVZ_I:
- case Mips::MOVZ_S:
- case Mips::MOVZ_D:
- return ExpandCondMov(MI, BB, dl, Subtarget, TII, false, Mips::BNE);
- case Mips::MOVN_I:
- case Mips::MOVN_S:
- case Mips::MOVN_D:
- return ExpandCondMov(MI, BB, dl, Subtarget, TII, false, Mips::BEQ);
-
+ default: llvm_unreachable("Unexpected instr type to insert");
case Mips::ATOMIC_LOAD_ADD_I8:
+ case Mips::ATOMIC_LOAD_ADD_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
+ case Mips::ATOMIC_LOAD_ADD_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
+ case Mips::ATOMIC_LOAD_ADD_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::ADDu);
+ case Mips::ATOMIC_LOAD_ADD_I64:
+ case Mips::ATOMIC_LOAD_ADD_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
+ case Mips::ATOMIC_LOAD_AND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
+ case Mips::ATOMIC_LOAD_AND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
+ case Mips::ATOMIC_LOAD_AND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::AND);
+ case Mips::ATOMIC_LOAD_AND_I64:
+ case Mips::ATOMIC_LOAD_AND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
+ case Mips::ATOMIC_LOAD_OR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
+ case Mips::ATOMIC_LOAD_OR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
+ case Mips::ATOMIC_LOAD_OR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::OR);
+ case Mips::ATOMIC_LOAD_OR_I64:
+ case Mips::ATOMIC_LOAD_OR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
+ case Mips::ATOMIC_LOAD_XOR_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
+ case Mips::ATOMIC_LOAD_XOR_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
+ case Mips::ATOMIC_LOAD_XOR_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::XOR);
+ case Mips::ATOMIC_LOAD_XOR_I64:
+ case Mips::ATOMIC_LOAD_XOR_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
+ case Mips::ATOMIC_LOAD_NAND_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
+ case Mips::ATOMIC_LOAD_NAND_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
+ case Mips::ATOMIC_LOAD_NAND_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0, true);
+ case Mips::ATOMIC_LOAD_NAND_I64:
+ case Mips::ATOMIC_LOAD_NAND_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
+ case Mips::ATOMIC_LOAD_SUB_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
+ case Mips::ATOMIC_LOAD_SUB_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
+ case Mips::ATOMIC_LOAD_SUB_I32_P8:
return EmitAtomicBinary(MI, BB, 4, Mips::SUBu);
+ case Mips::ATOMIC_LOAD_SUB_I64:
+ case Mips::ATOMIC_LOAD_SUB_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
+ case Mips::ATOMIC_SWAP_I8_P8:
return EmitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
+ case Mips::ATOMIC_SWAP_I16_P8:
return EmitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
+ case Mips::ATOMIC_SWAP_I32_P8:
return EmitAtomicBinary(MI, BB, 4, 0);
+ case Mips::ATOMIC_SWAP_I64:
+ case Mips::ATOMIC_SWAP_I64_P8:
+ return EmitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
+ case Mips::ATOMIC_CMP_SWAP_I8_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
+ case Mips::ATOMIC_CMP_SWAP_I16_P8:
return EmitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
+ case Mips::ATOMIC_CMP_SWAP_I32_P8:
return EmitAtomicCmpSwap(MI, BB, 4);
+ case Mips::ATOMIC_CMP_SWAP_I64:
+ case Mips::ATOMIC_CMP_SWAP_I64_P8:
+ return EmitAtomicCmpSwap(MI, BB, 8);
}
}
@@ -875,13 +973,31 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode,
bool Nand) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicBinary.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, AND, NOR, ZERO, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ AND = Mips::AND;
+ NOR = Mips::NOR;
+ ZERO = Mips::ZERO;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ AND = Mips::AND64;
+ NOR = Mips::NOR64;
+ ZERO = Mips::ZERO_64;
+ BEQ = Mips::BEQ64;
+ }
unsigned OldVal = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -919,23 +1035,20 @@ MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// sc success, storeval, 0(ptr)
// beq success, $0, loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
if (Nand) {
// and andres, oldval, incr
// nor storeval, $0, andres
- BuildMI(BB, dl, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr);
- BuildMI(BB, dl, TII->get(Mips::NOR), StoreVal)
- .addReg(Mips::ZERO).addReg(AndRes);
+ BuildMI(BB, dl, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr);
+ BuildMI(BB, dl, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes);
} else if (BinOpcode) {
// <binop> storeval, oldval, incr
BuildMI(BB, dl, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr);
} else {
StoreVal = Incr;
}
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
- .addReg(StoreVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
+ BuildMI(BB, dl, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -955,6 +1068,8 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -992,8 +1107,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(loopMBB);
@@ -1025,7 +1139,6 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, dl, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
BuildMI(BB, dl, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr);
-
// atomic.load.binop
// loopMBB:
// ll oldval,0(alignedaddr)
@@ -1046,7 +1159,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1064,12 +1177,12 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
// and newval, incr2, mask
BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
}
-
+
BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1100,13 +1213,29 @@ MachineBasicBlock *
MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned Size) const {
- assert(Size == 4 && "Unsupported size for EmitAtomicCmpSwap.");
+ assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
+ const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL, SC, ZERO, BNE, BEQ;
+
+ if (Size == 4) {
+ LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ ZERO = Mips::ZERO;
+ BNE = Mips::BNE;
+ BEQ = Mips::BEQ;
+ }
+ else {
+ LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
+ SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ ZERO = Mips::ZERO_64;
+ BNE = Mips::BNE64;
+ BEQ = Mips::BEQ64;
+ }
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1128,8 +1257,7 @@ MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -1145,18 +1273,18 @@ MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
// ll dest, 0(ptr)
// bne dest, oldval, exitMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), Dest).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BNE))
+ BuildMI(BB, dl, TII->get(LL), Dest).addReg(Ptr).addImm(0);
+ BuildMI(BB, dl, TII->get(BNE))
.addReg(Dest).addReg(OldVal).addMBB(exitMBB);
// loop2MBB:
// sc success, newval, 0(ptr)
// beq success, $0, loop1MBB
BB = loop2MBB;
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(NewVal).addReg(Ptr).addImm(0);
- BuildMI(BB, dl, TII->get(Mips::BEQ))
- .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
+ BuildMI(BB, dl, TII->get(BEQ))
+ .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
MI->eraseFromParent(); // The instruction is gone now.
@@ -1175,6 +1303,8 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
+ unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
+ unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1215,8 +1345,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(loop1MBB);
@@ -1265,7 +1394,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, dl, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, dl, TII->get(Mips::BNE))
@@ -1281,7 +1410,7 @@ MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, dl, TII->get(Mips::SC), Success)
+ BuildMI(BB, dl, TII->get(SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, dl, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1313,6 +1442,7 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
{
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP;
assert(getTargetMachine().getFrameLowering()->getStackAlignment() >=
cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() &&
@@ -1324,20 +1454,19 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
// Get a reference from Mips stack pointer
- SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32);
+ SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy());
// Subtract the dynamic size from the actual stack size to
// obtain the new stack size.
- SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size);
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
- Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub,
- SDValue());
+ Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue());
// This node always has two return values: a new stack pointer
// value and a chain
- SDVTList VTLs = DAG.getVTList(MVT::i32, MVT::Other);
+ SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other);
SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy());
SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) };
@@ -1381,11 +1510,23 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG) const
Op.getDebugLoc());
}
+SDValue MipsTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Cond = CreateFPCmp(DAG, Op);
+
+ assert(Cond.getOpcode() == MipsISD::FPCmp &&
+ "Floating point operand expected.");
+
+ SDValue True = DAG.getConstant(1, MVT::i32);
+ SDValue False = DAG.getConstant(0, MVT::i32);
+
+ return CreateCMovFP(DAG, Cond, True, False, Op.getDebugLoc());
+}
+
SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
SDVTList VTs = DAG.getVTList(MVT::i32);
@@ -1413,21 +1554,20 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
EVT ValTy = Op.getValueType();
bool HasGotOfst = (GV->hasInternalLinkage() ||
(GV->hasLocalLinkage() && !isa<Function>(GV)));
- unsigned GotFlag = IsN64 ?
+ unsigned GotFlag = HasMips64 ?
(HasGotOfst ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT_DISP) :
- MipsII::MO_GOT;
+ (HasGotOfst ? MipsII::MO_GOT : MipsII::MO_GOT16);
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0, GotFlag);
- GA = DAG.getNode(MipsISD::WrapperPIC, dl, ValTy, GA);
- SDValue ResNode = DAG.getLoad(ValTy, dl,
- DAG.getEntryNode(), GA, MachinePointerInfo(),
- false, false, 0);
+ GA = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), GA);
+ SDValue ResNode = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), GA,
+ MachinePointerInfo(), false, false, false, 0);
// On functions and global targets not internal linked only
// a load from got/GP is necessary for PIC to work.
if (!HasGotOfst)
return ResNode;
SDValue GALo = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0,
- IsN64 ? MipsII::MO_GOT_OFST :
- MipsII::MO_ABS_LO);
+ HasMips64 ? MipsII::MO_GOT_OFST :
+ MipsII::MO_ABS_LO);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, GALo);
return DAG.getNode(ISD::ADD, dl, ValTy, ResNode, Lo);
}
@@ -1438,35 +1578,34 @@ SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op,
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
// %hi/%lo relocation
- SDValue BAHi = DAG.getBlockAddress(BA, MVT::i32, true,
- MipsII::MO_ABS_HI);
- SDValue BALo = DAG.getBlockAddress(BA, MVT::i32, true,
- MipsII::MO_ABS_LO);
+ SDValue BAHi = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_HI);
+ SDValue BALo = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_LO);
SDValue Hi = DAG.getNode(MipsISD::Hi, dl, MVT::i32, BAHi);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALo);
return DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo);
}
- SDValue BAGOTOffset = DAG.getBlockAddress(BA, MVT::i32, true,
- MipsII::MO_GOT);
- BAGOTOffset = DAG.getNode(MipsISD::WrapperPIC, dl, MVT::i32, BAGOTOffset);
- SDValue BALOOffset = DAG.getBlockAddress(BA, MVT::i32, true,
- MipsII::MO_ABS_LO);
- SDValue Load = DAG.getLoad(MVT::i32, dl,
- DAG.getEntryNode(), BAGOTOffset,
- MachinePointerInfo(), false, false, 0);
- SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALOOffset);
- return DAG.getNode(ISD::ADD, dl, MVT::i32, Load, Lo);
+ EVT ValTy = Op.getValueType();
+ unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ SDValue BAGOTOffset = DAG.getBlockAddress(BA, ValTy, true, GOTFlag);
+ BAGOTOffset = DAG.getNode(MipsISD::Wrapper, dl, ValTy,
+ GetGlobalReg(DAG, ValTy), BAGOTOffset);
+ SDValue BALOOffset = DAG.getBlockAddress(BA, ValTy, true, OFSTFlag);
+ SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), BAGOTOffset,
+ MachinePointerInfo(), false, false, false, 0);
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, BALOOffset);
+ return DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo);
}
SDValue MipsTargetLowering::
LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
{
- // If the relocation model is PIC, use the General Dynamic TLS Model,
- // otherwise use the Initial Exec or Local Exec TLS Model.
- // TODO: implement Local Dynamic TLS model
+ // If the relocation model is PIC, use the General Dynamic TLS Model or
+ // Local Dynamic TLS model, otherwise use the Initial Exec or
+ // Local Exec TLS Model.
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
DebugLoc dl = GA->getDebugLoc();
@@ -1475,45 +1614,63 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
// General Dynamic TLS Model
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32,
- 0, MipsII::MO_TLSGD);
- SDValue Tlsgd = DAG.getNode(MipsISD::TlsGd, dl, MVT::i32, TGA);
- SDValue GP = DAG.getRegister(Mips::GP, MVT::i32);
- SDValue Argument = DAG.getNode(ISD::ADD, dl, MVT::i32, GP, Tlsgd);
+ bool LocalDynamic = GV->hasInternalLinkage();
+ unsigned Flag = LocalDynamic ? MipsII::MO_TLSLDM :MipsII::MO_TLSGD;
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, Flag);
+ SDValue Argument = DAG.getNode(MipsISD::Wrapper, dl, PtrVT,
+ GetGlobalReg(DAG, PtrVT), TGA);
+ unsigned PtrSize = PtrVT.getSizeInBits();
+ IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
+
+ SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
ArgListTy Args;
ArgListEntry Entry;
Entry.Node = Argument;
- Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
+ Entry.Ty = PtrTy;
Args.push_back(Entry);
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(DAG.getEntryNode(),
- (Type *) Type::getInt32Ty(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C, false, true,
- DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG,
- dl);
- return CallResult.first;
+ std::pair<SDValue, SDValue> CallResult =
+ LowerCallTo(DAG.getEntryNode(), PtrTy,
+ false, false, false, false, 0, CallingConv::C,
+ /*isTailCall=*/false, /*doesNotRet=*/false,
+ /*isReturnValueUsed=*/true,
+ TlsGetAddr, Args, DAG, dl);
+
+ SDValue Ret = CallResult.first;
+
+ if (!LocalDynamic)
+ return Ret;
+
+ SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
+ MipsII::MO_DTPREL_HI);
+ SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi);
+ SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
+ MipsII::MO_DTPREL_LO);
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo);
+ SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Ret);
+ return DAG.getNode(ISD::ADD, dl, PtrVT, Add, Lo);
}
SDValue Offset;
if (GV->isDeclaration()) {
// Initial Exec TLS Model
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
MipsII::MO_GOTTPREL);
- Offset = DAG.getLoad(MVT::i32, dl,
+ TGA = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT),
+ TGA);
+ Offset = DAG.getLoad(PtrVT, dl,
DAG.getEntryNode(), TGA, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
} else {
// Local Exec TLS Model
- SDVTList VTs = DAG.getVTList(MVT::i32);
- SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
+ SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
MipsII::MO_TPREL_HI);
- SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
+ SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
MipsII::MO_TPREL_LO);
- SDValue Hi = DAG.getNode(MipsISD::TprelHi, dl, VTs, &TGAHi, 1);
- SDValue Lo = DAG.getNode(MipsISD::TprelLo, dl, MVT::i32, TGALo);
- Offset = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo);
+ SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi);
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo);
+ Offset = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
}
SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, dl, PtrVT);
@@ -1523,34 +1680,30 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
SDValue MipsTargetLowering::
LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{
- SDValue ResNode;
- SDValue HiPart;
+ SDValue HiPart, JTI, JTILo;
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- unsigned char OpFlag = IsPIC ? MipsII::MO_GOT : MipsII::MO_ABS_HI;
-
EVT PtrVT = Op.getValueType();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
-
- if (!IsPIC) {
- SDValue Ops[] = { JTI };
- HiPart = DAG.getNode(MipsISD::Hi, dl, DAG.getVTList(MVT::i32), Ops, 1);
+ if (!IsPIC && !IsN64) {
+ JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_HI);
+ HiPart = DAG.getNode(MipsISD::Hi, dl, PtrVT, JTI);
+ JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_LO);
} else {// Emit Load from Global Pointer
- JTI = DAG.getNode(MipsISD::WrapperPIC, dl, MVT::i32, JTI);
- HiPart = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), JTI,
- MachinePointerInfo(),
- false, false, 0);
+ unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ unsigned OfstFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, GOTFlag);
+ JTI = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT),
+ JTI);
+ HiPart = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), JTI,
+ MachinePointerInfo(), false, false, false, 0);
+ JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OfstFlag);
}
- SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
- MipsII::MO_ABS_LO);
- SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, JTILo);
- ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
-
- return ResNode;
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, JTILo);
+ return DAG.getNode(ISD::ADD, dl, PtrVT, HiPart, Lo);
}
SDValue MipsTargetLowering::
@@ -1572,7 +1725,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
- if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
SDValue CPHi = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
N->getOffset(), MipsII::MO_ABS_HI);
SDValue CPLo = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
@@ -1581,16 +1734,19 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CPLo);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
} else {
- SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
- N->getOffset(), MipsII::MO_GOT);
- CP = DAG.getNode(MipsISD::WrapperPIC, dl, MVT::i32, CP);
- SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
- CP, MachinePointerInfo::getConstantPool(),
+ EVT ValTy = Op.getValueType();
+ unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ SDValue CP = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(),
+ N->getOffset(), GOTFlag);
+ CP = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), CP);
+ SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), CP,
+ MachinePointerInfo::getConstantPool(), false,
false, false, 0);
- SDValue CPLo = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
- N->getOffset(), MipsII::MO_ABS_LO);
- SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CPLo);
- ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, Load, Lo);
+ SDValue CPLo = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(),
+ N->getOffset(), OFSTFlag);
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, CPLo);
+ ResNode = DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo);
}
return ResNode;
@@ -1608,62 +1764,165 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// memory location argument.
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
- MachinePointerInfo(SV),
- false, false, 0);
+ MachinePointerInfo(SV), false, false, 0);
}
-static SDValue LowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG) {
- // FIXME: Use ext/ins instructions if target architecture is Mips32r2.
- DebugLoc dl = Op.getDebugLoc();
- SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op.getOperand(0));
- SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op.getOperand(1));
- SDValue And0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op0,
- DAG.getConstant(0x7fffffff, MVT::i32));
- SDValue And1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op1,
- DAG.getConstant(0x80000000, MVT::i32));
- SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, And0, And1);
- return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Result);
+static SDValue LowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+ EVT TyX = Op.getOperand(0).getValueType();
+ EVT TyY = Op.getOperand(1).getValueType();
+ SDValue Const1 = DAG.getConstant(1, MVT::i32);
+ SDValue Const31 = DAG.getConstant(31, MVT::i32);
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue Res;
+
+ // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
+ // to i32.
+ SDValue X = (TyX == MVT::f32) ?
+ DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
+ DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
+ Const1);
+ SDValue Y = (TyY == MVT::f32) ?
+ DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
+ DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
+ Const1);
+
+ if (HasR2) {
+ // ext E, Y, 31, 1 ; extract bit31 of Y
+ // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
+ SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
+ Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
+ } else {
+ // sll SllX, X, 1
+ // srl SrlX, SllX, 1
+ // srl SrlY, Y, 31
+ // sll SllY, SrlX, 31
+ // or Or, SrlX, SllY
+ SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
+ SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
+ SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
+ SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
+ Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
+ }
+
+ if (TyX == MVT::f32)
+ return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
+
+ SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Op.getOperand(0), DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue LowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool isLittle) {
- // FIXME:
- // Use ext/ins instructions if target architecture is Mips32r2.
- // Eliminate redundant mfc1 and mtc1 instructions.
- unsigned LoIdx = 0, HiIdx = 1;
+static SDValue LowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+ unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
+ unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
+ EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
+ SDValue Const1 = DAG.getConstant(1, MVT::i32);
+ DebugLoc DL = Op.getDebugLoc();
+
+ // Bitcast to integer nodes.
+ SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
+ SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
+
+ if (HasR2) {
+ // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
+ // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
+ SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
+ DAG.getConstant(WidthY - 1, MVT::i32), Const1);
+
+ if (WidthX > WidthY)
+ E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
+ else if (WidthY > WidthX)
+ E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
+
+ SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
+ DAG.getConstant(WidthX - 1, MVT::i32), Const1, X);
+ return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
+ }
- if (!isLittle)
- std::swap(LoIdx, HiIdx);
+ // (d)sll SllX, X, 1
+ // (d)srl SrlX, SllX, 1
+ // (d)srl SrlY, Y, width(Y)-1
+ // (d)sll SllY, SrlX, width(Y)-1
+ // or Or, SrlX, SllY
+ SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
+ SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
+ SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
+ DAG.getConstant(WidthY - 1, MVT::i32));
+
+ if (WidthX > WidthY)
+ SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
+ else if (WidthY > WidthX)
+ SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
+
+ SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
+ DAG.getConstant(WidthX - 1, MVT::i32));
+ SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
+ return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
+}
- DebugLoc dl = Op.getDebugLoc();
- SDValue Word0 = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
- Op.getOperand(0),
- DAG.getConstant(LoIdx, MVT::i32));
- SDValue Hi0 = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
- Op.getOperand(0), DAG.getConstant(HiIdx, MVT::i32));
- SDValue Hi1 = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
- Op.getOperand(1), DAG.getConstant(HiIdx, MVT::i32));
- SDValue And0 = DAG.getNode(ISD::AND, dl, MVT::i32, Hi0,
- DAG.getConstant(0x7fffffff, MVT::i32));
- SDValue And1 = DAG.getNode(ISD::AND, dl, MVT::i32, Hi1,
- DAG.getConstant(0x80000000, MVT::i32));
- SDValue Word1 = DAG.getNode(ISD::OR, dl, MVT::i32, And0, And1);
-
- if (!isLittle)
- std::swap(Word0, Word1);
-
- return DAG.getNode(MipsISD::BuildPairF64, dl, MVT::f64, Word0, Word1);
-}
-
-SDValue MipsTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
- const {
- EVT Ty = Op.getValueType();
+SDValue
+MipsTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
+ if (Subtarget->hasMips64())
+ return LowerFCOPYSIGN64(Op, DAG, Subtarget->hasMips32r2());
- assert(Ty == MVT::f32 || Ty == MVT::f64);
+ return LowerFCOPYSIGN32(Op, DAG, Subtarget->hasMips32r2());
+}
- if (Ty == MVT::f32)
- return LowerFCOPYSIGN32(Op, DAG);
- else
- return LowerFCOPYSIGN64(Op, DAG, Subtarget->isLittle());
+static SDValue LowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+ SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
+ DebugLoc DL = Op.getDebugLoc();
+
+ // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
+ // to i32.
+ SDValue X = (Op.getValueType() == MVT::f32) ?
+ DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
+ DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
+ Const1);
+
+ // Clear MSB.
+ if (HasR2)
+ Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
+ DAG.getRegister(Mips::ZERO, MVT::i32),
+ DAG.getConstant(31, MVT::i32), Const1, X);
+ else {
+ SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
+ Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
+ }
+
+ if (Op.getValueType() == MVT::f32)
+ return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
+
+ SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Op.getOperand(0), DAG.getConstant(0, MVT::i32));
+ return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
+}
+
+static SDValue LowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+ SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
+ DebugLoc DL = Op.getDebugLoc();
+
+ // Bitcast to integer node.
+ SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
+
+ // Clear MSB.
+ if (HasR2)
+ Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
+ DAG.getRegister(Mips::ZERO_64, MVT::i64),
+ DAG.getConstant(63, MVT::i32), Const1, X);
+ else {
+ SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
+ Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
+ }
+
+ return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
+}
+
+SDValue
+MipsTargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
+ if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64))
+ return LowerFABS64(Op, DAG, Subtarget->hasMips32r2());
+
+ return LowerFABS32(Op, DAG, Subtarget->hasMips32r2());
}
SDValue MipsTargetLowering::
@@ -1676,13 +1935,14 @@ LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Mips::FP, VT);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ IsN64 ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
}
// TODO: set SType according to the desired memory barrier behavior.
-SDValue MipsTargetLowering::LowerMEMBARRIER(SDValue Op,
- SelectionDAG& DAG) const {
+SDValue
+MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const {
unsigned SType = 0;
DebugLoc dl = Op.getDebugLoc();
return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0),
@@ -1703,8 +1963,6 @@ SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
-#include "MipsGenCallingConv.inc"
-
//===----------------------------------------------------------------------===//
// TODO: Implement a generic logic using tblgen that can support this.
// Mips O32 ABI rules:
@@ -1726,13 +1984,13 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
static const unsigned IntRegsSize=4, FloatRegsSize=2;
- static const unsigned IntRegs[] = {
+ static const uint16_t IntRegs[] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
};
- static const unsigned F32Regs[] = {
+ static const uint16_t F32Regs[] = {
Mips::F12, Mips::F14
};
- static const unsigned F64Regs[] = {
+ static const uint16_t F64Regs[] = {
Mips::D6, Mips::D7
};
@@ -1811,13 +2069,77 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
return false; // CC must always match
}
+static const uint16_t Mips64IntRegs[8] =
+ {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
+ Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
+static const uint16_t Mips64DPRegs[8] =
+ {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
+ Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64};
+
+static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8);
+ unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8;
+ unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8);
+
+ assert(Align <= 16 && "Cannot handle alignments larger than 16.");
+
+ // If byval is 16-byte aligned, the first arg register must be even.
+ if ((Align == 16) && (FirstIdx % 2)) {
+ State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
+ ++FirstIdx;
+ }
+
+ // Mark the registers allocated.
+ for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I)
+ State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]);
+
+ // Allocate space on caller's stack.
+ unsigned Offset = State.AllocateStack(Size, Align);
+
+ if (FirstIdx < 8)
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
+ LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+
+ return true;
+}
+
+#include "MipsGenCallingConv.inc"
+
+static void
+AnalyzeMips64CallOperands(CCState &CCInfo,
+ const SmallVectorImpl<ISD::OutputArg> &Outs) {
+ unsigned NumOps = Outs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ MVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ bool R;
+
+ if (Outs[i].IsFixed)
+ R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+ else
+ R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
+
+ if (R) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << EVT(ArgVT).getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
static const unsigned O32IntRegsSize = 4;
-static const unsigned O32IntRegs[] = {
+static const uint16_t O32IntRegs[] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
};
@@ -1848,9 +2170,8 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
DAG.getConstant(Offset, MVT::i32));
SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr,
- MachinePointerInfo(),
- false, false, std::min(ByValAlign,
- (unsigned )4));
+ MachinePointerInfo(), false, false, false,
+ std::min(ByValAlign, (unsigned )4));
MemOpChains.push_back(LoadVal.getValue(1));
unsigned DstReg = O32IntRegs[LocMemOffset / 4];
RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
@@ -1886,7 +2207,7 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
// Read second subword if necessary.
if (RemainingSize != 0) {
assert(RemainingSize == 1 && "There must be one byte remaining.");
- LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
+ LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
DAG.getConstant(Offset, MVT::i32));
unsigned Alignment = std::min(ByValAlign, (unsigned )2);
SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
@@ -1919,13 +2240,101 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
MachinePointerInfo(0), MachinePointerInfo(0));
}
+// Copy Mips64 byVal arg to registers and stack.
+void static
+PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
+ SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass,
+ SmallVector<SDValue, 8>& MemOpChains, int& LastFI,
+ MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ EVT PtrTy, bool isLittle) {
+ unsigned ByValSize = Flags.getByValSize();
+ unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
+ bool IsRegLoc = VA.isRegLoc();
+ unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
+ unsigned LocMemOffset = 0;
+ unsigned MemCpySize = ByValSize;
+
+ if (!IsRegLoc)
+ LocMemOffset = VA.getLocMemOffset();
+ else {
+ const uint16_t *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8,
+ VA.getLocReg());
+ const uint16_t *RegEnd = Mips64IntRegs + 8;
+
+ // Copy double words to registers.
+ for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) {
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr,
+ MachinePointerInfo(), false, false, false,
+ Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+ RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
+ }
+
+ // Return if the struct has been fully copied.
+ if (!(MemCpySize = ByValSize - Offset))
+ return;
+
+ // If there is an argument register available, copy the remainder of the
+ // byval argument with sub-doubleword loads and shifts.
+ if (Reg != RegEnd) {
+ assert((ByValSize < Offset + 8) &&
+ "Size of the remainder should be smaller than 8-byte.");
+ SDValue Val;
+ for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) {
+ unsigned RemSize = ByValSize - Offset;
+
+ if (RemSize < LoadSize)
+ continue;
+
+ SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ SDValue LoadVal =
+ DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
+ MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
+ false, false, Alignment);
+ MemOpChains.push_back(LoadVal.getValue(1));
+
+ // Offset in number of bits from double word boundary.
+ unsigned OffsetDW = (Offset % 8) * 8;
+ unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
+ SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
+ DAG.getConstant(Shamt, MVT::i32));
+
+ Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
+ Shift;
+ Offset += LoadSize;
+ Alignment = std::min(Alignment, LoadSize);
+ }
+
+ RegsToPass.push_back(std::make_pair(*Reg, Val));
+ return;
+ }
+ }
+
+ assert(MemCpySize && "MemCpySize must not be zero.");
+
+ // Create a fixed object on stack at offset LocMemOffset and copy
+ // remainder of byval arg to it with memcpy.
+ SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
+ DAG.getConstant(Offset, PtrTy));
+ LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true);
+ SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy);
+ ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src,
+ DAG.getConstant(MemCpySize, PtrTy), Alignment,
+ /*isVolatile=*/false, /*AlwaysInline=*/false,
+ MachinePointerInfo(0), MachinePointerInfo(0));
+}
+
/// LowerCall - functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
/// TODO: isTailCall.
SDValue
MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -1943,10 +2352,12 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
- if (Subtarget->isABI_O32())
+ if (IsO32)
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
+ else if (HasMips64)
+ AnalyzeMips64CallOperands(CCInfo, Outs);
else
CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
@@ -1963,7 +2374,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// If this is the first call, create a stack frame object that points to
// a location to which .cprestore saves $gp.
- if (IsPIC && !MipsFI->getGPFI())
+ if (IsO32 && IsPIC && MipsFI->globalBaseRegFixed() && !MipsFI->getGPFI())
MipsFI->setGPFI(MFI->CreateFixedObject(4, 0, true));
// Get the frame index of the stack frame object that points to the location
@@ -1973,7 +2384,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Update size of the maximum argument space.
// For O32, a minimum of four words (16 bytes) of argument space is
// allocated.
- if (Subtarget->isABI_O32())
+ if (IsO32)
NextStackOffset = std::max(NextStackOffset, (unsigned)16);
unsigned MaxCallFrameSize = MipsFI->getMaxCallFrameSize();
@@ -1988,7 +2399,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
NextStackOffset = (NextStackOffset + StackAlignment - 1) /
StackAlignment * StackAlignment;
- if (IsPIC)
+ if (MipsFI->needGPSaveRestore())
MFI->setObjectOffset(MipsFI->getGPFI(), NextStackOffset);
MFI->setObjectOffset(DynAllocFI, NextStackOffset);
@@ -2004,22 +2415,40 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
SDValue Arg = OutVals[i];
CCValAssign &VA = ArgLocs[i];
+ MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+ // ByVal Arg.
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32)
+ WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ else
+ PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
+ MFI, DAG, Arg, VA, Flags, getPointerTy(),
+ Subtarget->isLittle());
+ continue;
+ }
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full:
- if (Subtarget->isABI_O32() && VA.isRegLoc()) {
- if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
- Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
- if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
+ if (VA.isRegLoc()) {
+ if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
+ (ValVT == MVT::f64 && LocVT == MVT::i64))
+ Arg = DAG.getNode(ISD::BITCAST, dl, LocVT, Arg);
+ else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
Arg, DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
Arg, DAG.getConstant(1, MVT::i32));
if (!Subtarget->isLittle())
std::swap(Lo, Hi);
- unsigned LocRegLo = VA.getLocReg();
+ unsigned LocRegLo = VA.getLocReg();
unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
@@ -2028,13 +2457,13 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
}
break;
case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, LocVT, Arg);
break;
case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, LocVT, Arg);
break;
case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, LocVT, Arg);
break;
}
@@ -2048,28 +2477,15 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// Register can't get to this point...
assert(VA.isMemLoc());
- // ByVal Arg.
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- if (Flags.isByVal()) {
- assert(Subtarget->isABI_O32() &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, MFI,
- DAG, Arg, VA, Flags, getPointerTy(), Subtarget->isLittle());
- continue;
- }
-
// Create the frame index object for this incoming parameter
- LastFI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
+ LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
// emit ISD::STORE whichs stores the
// parameter value to a stack Location
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MachinePointerInfo(), false, false, 0));
}
// Extend range of indices of frame objects for outgoing arguments that were
@@ -2093,52 +2509,68 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
- bool LoadSymAddr = false;
+ unsigned char OpFlag;
+ bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25
+ bool GlobalOrExternal = false;
SDValue CalleeLo;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- if (IsPIC && G->getGlobal()->hasInternalLinkage()) {
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
- getPointerTy(), 0,MipsII:: MO_GOT);
+ if (IsPICCall && G->getGlobal()->hasInternalLinkage()) {
+ OpFlag = IsO32 ? MipsII::MO_GOT : MipsII::MO_GOT_PAGE;
+ unsigned char LoFlag = IsO32 ? MipsII::MO_ABS_LO : MipsII::MO_GOT_OFST;
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(), 0,
+ OpFlag);
CalleeLo = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(),
- 0, MipsII::MO_ABS_LO);
+ 0, LoFlag);
} else {
+ OpFlag = IsPICCall ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
getPointerTy(), 0, OpFlag);
}
- LoadSymAddr = true;
+ GlobalOrExternal = true;
}
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
- getPointerTy(), OpFlag);
- LoadSymAddr = true;
+ if (IsN64 || (!IsO32 && IsPIC))
+ OpFlag = MipsII::MO_GOT_DISP;
+ else if (!IsPIC) // !N64 && static
+ OpFlag = MipsII::MO_NO_FLAG;
+ else // O32 & PIC
+ OpFlag = MipsII::MO_GOT_CALL;
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
+ OpFlag);
+ GlobalOrExternal = true;
}
SDValue InFlag;
// Create nodes that load address of callee and copy it to T9
- if (IsPIC) {
- if (LoadSymAddr) {
+ if (IsPICCall) {
+ if (GlobalOrExternal) {
// Load callee address
- Callee = DAG.getNode(MipsISD::WrapperPIC, dl, MVT::i32, Callee);
- SDValue LoadValue = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), Callee,
- MachinePointerInfo::getGOT(),
- false, false, 0);
+ Callee = DAG.getNode(MipsISD::Wrapper, dl, getPointerTy(),
+ GetGlobalReg(DAG, getPointerTy()), Callee);
+ SDValue LoadValue = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ Callee, MachinePointerInfo::getGOT(),
+ false, false, false, 0);
// Use GOT+LO if callee has internal linkage.
if (CalleeLo.getNode()) {
- SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CalleeLo);
- Callee = DAG.getNode(ISD::ADD, dl, MVT::i32, LoadValue, Lo);
+ SDValue Lo = DAG.getNode(MipsISD::Lo, dl, getPointerTy(), CalleeLo);
+ Callee = DAG.getNode(ISD::ADD, dl, getPointerTy(), LoadValue, Lo);
} else
Callee = LoadValue;
}
+ }
+ // T9 should contain the address of the callee function if
+ // -reloction-model=pic or it is an indirect call.
+ if (IsPICCall || !GlobalOrExternal) {
// copy to T9
- Chain = DAG.getCopyToReg(Chain, dl, Mips::T9, Callee, SDValue(0, 0));
+ unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
+ Chain = DAG.getCopyToReg(Chain, dl, T9Reg, Callee, SDValue(0, 0));
InFlag = Chain.getValue(1);
- Callee = DAG.getRegister(Mips::T9, MVT::i32);
+ Callee = DAG.getRegister(T9Reg, getPointerTy());
}
// Build a sequence of copy-to-reg nodes chained together with token
@@ -2166,6 +2598,12 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -2216,7 +2654,8 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
std::vector<SDValue>& OutChains,
SelectionDAG &DAG, unsigned NumWords, SDValue FIN,
- const CCValAssign &VA, const ISD::ArgFlagsTy& Flags) {
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ const Argument *FuncArg) {
unsigned LocMem = VA.getLocMemOffset();
unsigned FirstWord = LocMem / 4;
@@ -2231,20 +2670,58 @@ static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN,
DAG.getConstant(i * 4, MVT::i32));
SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32),
- StorePtr, MachinePointerInfo(), false,
- false, 0);
+ StorePtr, MachinePointerInfo(FuncArg, i * 4),
+ false, false, 0);
OutChains.push_back(Store);
}
}
+// Create frame object on stack and copy registers used for byval passing to it.
+static unsigned
+CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
+ std::vector<SDValue>& OutChains, SelectionDAG &DAG,
+ const CCValAssign &VA, const ISD::ArgFlagsTy& Flags,
+ MachineFrameInfo *MFI, bool IsRegLoc,
+ SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
+ EVT PtrTy, const Argument *FuncArg) {
+ const uint16_t *Reg = Mips64IntRegs + 8;
+ int FOOffset; // Frame object offset from virtual frame pointer.
+
+ if (IsRegLoc) {
+ Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg());
+ FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8;
+ }
+ else
+ FOOffset = VA.getLocMemOffset();
+
+ // Create frame object.
+ unsigned NumRegs = (Flags.getByValSize() + 7) / 8;
+ unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy);
+ InVals.push_back(FIN);
+
+ // Copy arg registers.
+ for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
+ ++Reg, ++I) {
+ unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass);
+ SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
+ DAG.getConstant(I * 8, PtrTy));
+ SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
+ StorePtr, MachinePointerInfo(FuncArg, I * 8),
+ false, false, 0);
+ OutChains.push_back(Store);
+ }
+
+ return LastFI;
+}
+
/// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack.
SDValue
MipsTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
@@ -2260,23 +2737,46 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
- if (Subtarget->isABI_O32())
+ if (IsO32)
CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
else
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
+ Function::const_arg_iterator FuncArg =
+ DAG.getMachineFunction().getFunction()->arg_begin();
int LastFI = 0;// MipsFI->LastInArgFI is 0 at the entry of this function.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) {
CCValAssign &VA = ArgLocs[i];
+ EVT ValVT = VA.getValVT();
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ bool IsRegLoc = VA.isRegLoc();
+
+ if (Flags.isByVal()) {
+ assert(Flags.getByValSize() &&
+ "ByVal args of size 0 should have been ignored by front-end.");
+ if (IsO32) {
+ unsigned NumWords = (Flags.getByValSize() + 3) / 4;
+ LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
+ true);
+ SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
+ InVals.push_back(FIN);
+ ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags,
+ &*FuncArg);
+ } else // N32/64
+ LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags,
+ MFI, IsRegLoc, InVals, MipsFI,
+ getPointerTy(), &*FuncArg);
+ continue;
+ }
// Arguments stored on registers
- if (VA.isRegLoc()) {
+ if (IsRegLoc) {
EVT RegVT = VA.getLocVT();
unsigned ArgReg = VA.getLocReg();
- TargetRegisterClass *RC = 0;
+ const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
RC = Mips::CPURegsRegisterClass;
@@ -2305,23 +2805,22 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
Opcode = ISD::AssertZext;
if (Opcode)
ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
+ DAG.getValueType(ValVT));
+ ArgValue = DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
}
- // Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
- if (Subtarget->isABI_O32()) {
- if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
- ArgValue = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
- if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
- unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
- getNextIntArgReg(ArgReg), RC);
- SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
- if (!Subtarget->isLittle())
- std::swap(ArgValue, ArgValue2);
- ArgValue = DAG.getNode(MipsISD::BuildPairF64, dl, MVT::f64,
- ArgValue, ArgValue2);
- }
+ // Handle floating point arguments passed in integer registers.
+ if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
+ (RegVT == MVT::i64 && ValVT == MVT::f64))
+ ArgValue = DAG.getNode(ISD::BITCAST, dl, ValVT, ArgValue);
+ else if (IsO32 && RegVT == MVT::i32 && ValVT == MVT::f64) {
+ unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
+ getNextIntArgReg(ArgReg), RC);
+ SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
+ if (!Subtarget->isLittle())
+ std::swap(ArgValue, ArgValue2);
+ ArgValue = DAG.getNode(MipsISD::BuildPairF64, dl, MVT::f64,
+ ArgValue, ArgValue2);
}
InVals.push_back(ArgValue);
@@ -2330,32 +2829,15 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// sanity check
assert(VA.isMemLoc());
- ISD::ArgFlagsTy Flags = Ins[i].Flags;
-
- if (Flags.isByVal()) {
- assert(Subtarget->isABI_O32() &&
- "No support for ByVal args by ABIs other than O32 yet.");
- assert(Flags.getByValSize() &&
- "ByVal args of size 0 should have been ignored by front-end.");
- unsigned NumWords = (Flags.getByValSize() + 3) / 4;
- LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
- true);
- SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
- InVals.push_back(FIN);
- ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags);
-
- continue;
- }
-
// The stack pointer offset is relative to the caller stack frame.
- LastFI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
+ LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), true);
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
+ InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(LastFI),
- false, false, 0));
+ false, false, false, 0));
}
}
@@ -2372,28 +2854,43 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
- if (isVarArg && Subtarget->isABI_O32()) {
+ if (isVarArg) {
+ unsigned NumOfRegs = IsO32 ? 4 : 8;
+ const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
+ unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
+ int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
+ const TargetRegisterClass *RC
+ = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass;
+ unsigned RegSize = RC->getSize();
+ int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
+
+ // Offset of the first variable argument from stack pointer.
+ int FirstVaArgOffset;
+
+ if (IsO32 || (Idx == NumOfRegs)) {
+ FirstVaArgOffset =
+ (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize;
+ } else
+ FirstVaArgOffset = RegSlotOffset;
+
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
- unsigned NextStackOffset = CCInfo.getNextStackOffset();
- assert(NextStackOffset % 4 == 0 &&
- "NextStackOffset must be aligned to 4-byte boundaries.");
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true);
MipsFI->setVarArgsFrameIndex(LastFI);
- // If NextStackOffset is smaller than o32's 16-byte reserved argument area,
- // copy the integer registers that have not been used for argument passing
- // to the caller's stack frame.
- for (; NextStackOffset < 16; NextStackOffset += 4) {
- TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
- unsigned Idx = NextStackOffset / 4;
- unsigned Reg = AddLiveIn(DAG.getMachineFunction(), O32IntRegs[Idx], RC);
- SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, MVT::i32);
- LastFI = MFI->CreateFixedObject(4, NextStackOffset, true);
+ // Copy the integer registers that have not been used for argument passing
+ // to the argument register save area. For O32, the save area is allocated
+ // in the caller's stack frame, while for N32/64, it is allocated in the
+ // callee's stack frame.
+ for (int StackOffset = RegSlotOffset;
+ Idx < NumOfRegs; ++Idx, StackOffset += RegSize) {
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
+ MVT::getIntegerVT(RegSize * 8));
+ LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true);
SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
+ MachinePointerInfo(), false, false, 0));
}
}
@@ -2447,8 +2944,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- OutVals[i], Flag);
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
// guarantee that all emitted copies are
// stuck together, avoiding something bad
@@ -2505,7 +3001,6 @@ getConstraintType(const std::string &Constraint) const
case 'y':
case 'f':
return C_RegisterClass;
- break;
}
}
return TargetLowering::getConstraintType(Constraint);
@@ -2553,14 +3048,19 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
- return std::make_pair(0U, Mips::CPURegsRegisterClass);
+ if (VT == MVT::i32)
+ return std::make_pair(0U, Mips::CPURegsRegisterClass);
+ assert(VT == MVT::i64 && "Unexpected type.");
+ return std::make_pair(0U, Mips::CPU64RegsRegisterClass);
case 'f':
if (VT == MVT::f32)
return std::make_pair(0U, Mips::FGR32RegisterClass);
- if (VT == MVT::f64)
- if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
+ if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
+ if (Subtarget->isFP64bit())
+ return std::make_pair(0U, Mips::FGR64RegisterClass);
+ else
return std::make_pair(0U, Mips::AFGR64RegisterClass);
- break;
+ }
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
@@ -2579,3 +3079,10 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
return Imm.isZero();
}
+
+unsigned MipsTargetLowering::getJumpTableEncoding() const {
+ if (IsN64)
+ return MachineJumpTableInfo::EK_GPRel64BlockAddress;
+
+ return TargetLowering::getJumpTableEncoding();
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index 4be3fed5..c36f40f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -15,10 +15,10 @@
#ifndef MipsISELLOWERING_H
#define MipsISELLOWERING_H
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetLowering.h"
#include "Mips.h"
#include "MipsSubtarget.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
namespace llvm {
namespace MipsISD {
@@ -40,13 +40,6 @@ namespace llvm {
// Handle gp_rel (small data/bss sections) relocation.
GPRel,
- // General Dynamic TLS
- TlsGd,
-
- // Local Exec TLS
- TprelHi,
- TprelLo,
-
// Thread Pointer
ThreadPointer,
@@ -79,7 +72,7 @@ namespace llvm {
BuildPairF64,
ExtractElementF64,
- WrapperPIC,
+ Wrapper,
DynAlloc,
@@ -98,6 +91,8 @@ namespace llvm {
public:
explicit MipsTargetLowering(MipsTargetMachine &TM);
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+
virtual bool allowsUnalignedMemoryAccesses (EVT VT) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
@@ -114,8 +109,8 @@ namespace llvm {
private:
// Subtarget Info
const MipsSubtarget *Subtarget;
-
- bool HasMips64, IsN64;
+
+ bool HasMips64, IsN64, IsO32;
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
@@ -133,8 +128,10 @@ namespace llvm {
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
@@ -149,7 +146,7 @@ namespace llvm {
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -186,6 +183,8 @@ namespace llvm {
/// materialize the FP immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ virtual unsigned getJumpTableEncoding() const;
+
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned Size, unsigned BinOpcode, bool Nand = false) const;
MachineBasicBlock *EmitAtomicBinaryPartword(MachineInstr *MI,
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
index 1fb779d..b655945 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td
@@ -1,4 +1,4 @@
-//===- MipsInstrFPU.td - Mips FPU Instruction Information --*- tablegen -*-===//
+//===-- MipsInstrFPU.td - Mips FPU Instruction Information -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -59,6 +59,15 @@ def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">;
def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">;
def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">;
+// FP immediate patterns.
+def fpimm0 : PatLeaf<(fpimm), [{
+ return N->isExactlyValue(+0.0);
+}]>;
+
+def fpimm0neg : PatLeaf<(fpimm), [{
+ return N->isExactlyValue(-0.0);
+}]>;
+
//===----------------------------------------------------------------------===//
// Instruction Class Templates
//
@@ -74,19 +83,35 @@ def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">;
//===----------------------------------------------------------------------===//
// FP load.
-class FPLoad<bits<6> op, string opstr, PatFrag FOp, RegisterClass RC,
- Operand MemOpnd>:
+class FPLoad<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
FMem<op, (outs RC:$ft), (ins MemOpnd:$addr),
- !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (FOp addr:$addr))],
+ !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (load_a addr:$addr))],
IILoad>;
// FP store.
-class FPStore<bits<6> op, string opstr, PatFrag FOp, RegisterClass RC,
- Operand MemOpnd>:
+class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>:
FMem<op, (outs), (ins RC:$ft, MemOpnd:$addr),
- !strconcat(opstr, "\t$ft, $addr"), [(store RC:$ft, addr:$addr)],
+ !strconcat(opstr, "\t$ft, $addr"), [(store_a RC:$ft, addr:$addr)],
IIStore>;
+// FP indexed load.
+class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC,
+ RegisterClass PRC, PatFrag FOp>:
+ FFMemIdx<funct, (outs DRC:$fd), (ins PRC:$base, PRC:$index),
+ !strconcat(opstr, "\t$fd, $index($base)"),
+ [(set DRC:$fd, (FOp (add PRC:$base, PRC:$index)))]> {
+ let fs = 0;
+}
+
+// FP indexed store.
+class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC,
+ RegisterClass PRC, PatFrag FOp>:
+ FFMemIdx<funct, (outs), (ins DRC:$fs, PRC:$base, PRC:$index),
+ !strconcat(opstr, "\t$fs, $index($base)"),
+ [(FOp DRC:$fs, (add PRC:$base, PRC:$index))]> {
+ let fd = 0;
+}
+
// Instructions that convert an FP value to 32-bit fixed point.
multiclass FFR1_W_M<bits<6> funct, string opstr> {
def _S : FFR1<funct, 16, opstr, "w.s", FGR32, FGR32>;
@@ -122,6 +147,19 @@ multiclass FFR2P_M<bits<6> funct, string opstr, SDNode OpNode, bit isComm = 0> {
}
}
+// FP madd/msub/nmadd/nmsub instruction classes.
+class FMADDSUB<bits<3> funct, bits<3> fmt, string opstr, string fmtstr,
+ SDNode OpNode, RegisterClass RC> :
+ FFMADDSUB<funct, fmt, (outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
+ !strconcat(opstr, ".", fmtstr, "\t$fd, $fr, $fs, $ft"),
+ [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))]>;
+
+class FNMADDSUB<bits<3> funct, bits<3> fmt, string opstr, string fmtstr,
+ SDNode OpNode, RegisterClass RC> :
+ FFMADDSUB<funct, fmt, (outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
+ !strconcat(opstr, ".", fmtstr, "\t$fd, $fr, $fs, $ft"),
+ [(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))]>;
+
//===----------------------------------------------------------------------===//
// Floating Point Instructions
//===----------------------------------------------------------------------===//
@@ -152,8 +190,10 @@ let Predicates = [IsFP64bit] in {
def CVT_D64_L : FFR1<0x21, 21, "cvt", "d.l", FGR64, FGR64>;
}
-defm FABS : FFR1P_M<0x5, "abs", fabs>;
-defm FNEG : FFR1P_M<0x7, "neg", fneg>;
+let Predicates = [NoNaNsFPMath] in {
+ defm FABS : FFR1P_M<0x5, "abs", fabs>;
+ defm FNEG : FFR1P_M<0x7, "neg", fneg>;
+}
defm FSQRT : FFR1P_M<0x4, "sqrt", fsqrt>;
// The odd-numbered registers are only referenced when doing loads,
@@ -183,6 +223,14 @@ def MTC1 : FFRGPR<0x04, (outs FGR32:$fs), (ins CPURegs:$rt),
"mtc1\t$rt, $fs",
[(set FGR32:$fs, (bitconvert CPURegs:$rt))]>;
+def DMFC1 : FFRGPR<0x01, (outs CPU64Regs:$rt), (ins FGR64:$fs),
+ "dmfc1\t$rt, $fs",
+ [(set CPU64Regs:$rt, (bitconvert FGR64:$fs))]>;
+
+def DMTC1 : FFRGPR<0x05, (outs FGR64:$fs), (ins CPU64Regs:$rt),
+ "dmtc1\t$rt, $fs",
+ [(set FGR64:$fs, (bitconvert CPU64Regs:$rt))]>;
+
def FMOV_S : FFR1<0x6, 16, "mov", "s", FGR32, FGR32>;
def FMOV_D32 : FFR1<0x6, 17, "mov", "d", AFGR64, AFGR64>,
Requires<[NotFP64bit]>;
@@ -191,23 +239,53 @@ def FMOV_D64 : FFR1<0x6, 17, "mov", "d", FGR64, FGR64>,
/// Floating Point Memory Instructions
let Predicates = [IsN64] in {
- def LWC1_P8 : FPLoad<0x31, "lwc1", load, FGR32, mem64>;
- def SWC1_P8 : FPStore<0x39, "swc1", store, FGR32, mem64>;
- def LDC164_P8 : FPLoad<0x35, "ldc1", load, FGR64, mem64>;
- def SDC164_P8 : FPStore<0x3d, "sdc1", store, FGR64, mem64>;
+ def LWC1_P8 : FPLoad<0x31, "lwc1", FGR32, mem64>;
+ def SWC1_P8 : FPStore<0x39, "swc1", FGR32, mem64>;
+ def LDC164_P8 : FPLoad<0x35, "ldc1", FGR64, mem64>;
+ def SDC164_P8 : FPStore<0x3d, "sdc1", FGR64, mem64>;
}
let Predicates = [NotN64] in {
- def LWC1 : FPLoad<0x31, "lwc1", load, FGR32, mem>;
- def SWC1 : FPStore<0x39, "swc1", store, FGR32, mem>;
- let Predicates = [HasMips64] in {
- def LDC164 : FPLoad<0x35, "ldc1", load, FGR64, mem>;
- def SDC164 : FPStore<0x3d, "sdc1", store, FGR64, mem>;
- }
- let Predicates = [NotMips64] in {
- def LDC1 : FPLoad<0x35, "ldc1", load, AFGR64, mem>;
- def SDC1 : FPStore<0x3d, "sdc1", store, AFGR64, mem>;
- }
+ def LWC1 : FPLoad<0x31, "lwc1", FGR32, mem>;
+ def SWC1 : FPStore<0x39, "swc1", FGR32, mem>;
+}
+
+let Predicates = [NotN64, HasMips64] in {
+ def LDC164 : FPLoad<0x35, "ldc1", FGR64, mem>;
+ def SDC164 : FPStore<0x3d, "sdc1", FGR64, mem>;
+}
+
+let Predicates = [NotN64, NotMips64] in {
+ def LDC1 : FPLoad<0x35, "ldc1", AFGR64, mem>;
+ def SDC1 : FPStore<0x3d, "sdc1", AFGR64, mem>;
+}
+
+// Indexed loads and stores.
+let Predicates = [HasMips32r2Or64] in {
+ def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>;
+ def LUXC1 : FPIdxLoad<0x5, "luxc1", FGR32, CPURegs, load_u>;
+ def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>;
+ def SUXC1 : FPIdxStore<0xd, "suxc1", FGR32, CPURegs, store_u>;
+}
+
+let Predicates = [HasMips32r2, NotMips64] in {
+ def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>;
+ def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>;
+}
+
+let Predicates = [HasMips64, NotN64] in {
+ def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>;
+ def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>;
+}
+
+// n64
+let Predicates = [IsN64] in {
+ def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>;
+ def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>;
+ def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
+ def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>;
+ def SUXC1_P8 : FPIdxStore<0xd, "suxc1", FGR32, CPU64Regs, store_u>;
+ def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>;
}
/// Floating-point Aritmetic
@@ -216,6 +294,36 @@ defm FDIV : FFR2P_M<0x03, "div", fdiv>;
defm FMUL : FFR2P_M<0x02, "mul", fmul, 1>;
defm FSUB : FFR2P_M<0x01, "sub", fsub>;
+let Predicates = [HasMips32r2] in {
+ def MADD_S : FMADDSUB<0x4, 0, "madd", "s", fadd, FGR32>;
+ def MSUB_S : FMADDSUB<0x5, 0, "msub", "s", fsub, FGR32>;
+}
+
+let Predicates = [HasMips32r2, NoNaNsFPMath] in {
+ def NMADD_S : FNMADDSUB<0x6, 0, "nmadd", "s", fadd, FGR32>;
+ def NMSUB_S : FNMADDSUB<0x7, 0, "nmsub", "s", fsub, FGR32>;
+}
+
+let Predicates = [HasMips32r2, NotFP64bit] in {
+ def MADD_D32 : FMADDSUB<0x4, 1, "madd", "d", fadd, AFGR64>;
+ def MSUB_D32 : FMADDSUB<0x5, 1, "msub", "d", fsub, AFGR64>;
+}
+
+let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath] in {
+ def NMADD_D32 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, AFGR64>;
+ def NMSUB_D32 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, AFGR64>;
+}
+
+let Predicates = [HasMips32r2, IsFP64bit] in {
+ def MADD_D64 : FMADDSUB<0x4, 1, "madd", "d", fadd, FGR64>;
+ def MSUB_D64 : FMADDSUB<0x5, 1, "msub", "d", fsub, FGR64>;
+}
+
+let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath] in {
+ def NMADD_D64 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, FGR64>;
+ def NMSUB_D64 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, FGR64>;
+}
+
//===----------------------------------------------------------------------===//
// Floating Point Branch Codes
//===----------------------------------------------------------------------===//
@@ -259,71 +367,16 @@ def MIPS_FCOND_NGE : PatLeaf<(i32 13)>;
def MIPS_FCOND_LE : PatLeaf<(i32 14)>;
def MIPS_FCOND_NGT : PatLeaf<(i32 15)>;
+class FCMP<bits<5> fmt, RegisterClass RC, string typestr> :
+ FCC<fmt, (outs), (ins RC:$fs, RC:$ft, condcode:$cc),
+ !strconcat("c.$cc.", typestr, "\t$fs, $ft"),
+ [(MipsFPCmp RC:$fs, RC:$ft, imm:$cc)]>;
+
/// Floating Point Compare
let Defs=[FCR31] in {
- def FCMP_S32 : FCC<0x10, (outs), (ins FGR32:$fs, FGR32:$ft, condcode:$cc),
- "c.$cc.s\t$fs, $ft",
- [(MipsFPCmp FGR32:$fs, FGR32:$ft, imm:$cc)]>;
-
- def FCMP_D32 : FCC<0x11, (outs), (ins AFGR64:$fs, AFGR64:$ft, condcode:$cc),
- "c.$cc.d\t$fs, $ft",
- [(MipsFPCmp AFGR64:$fs, AFGR64:$ft, imm:$cc)]>,
- Requires<[NotFP64bit]>;
-}
-
-
-// Conditional moves:
-// These instructions are expanded in
-// MipsISelLowering::EmitInstrWithCustomInserter if target does not have
-// conditional move instructions.
-// flag:int, data:float
-let usesCustomInserter = 1, Constraints = "$F = $dst" in
-class CondMovIntFP<RegisterClass RC, bits<5> fmt, bits<6> func,
- string instr_asm> :
- FFR<0x11, func, fmt, (outs RC:$dst), (ins RC:$T, CPURegs:$cond, RC:$F),
- !strconcat(instr_asm, "\t$dst, $T, $cond"), []>;
-
-def MOVZ_S : CondMovIntFP<FGR32, 16, 18, "movz.s">;
-def MOVN_S : CondMovIntFP<FGR32, 16, 19, "movn.s">;
-
-let Predicates = [NotFP64bit] in {
- def MOVZ_D : CondMovIntFP<AFGR64, 17, 18, "movz.d">;
- def MOVN_D : CondMovIntFP<AFGR64, 17, 19, "movn.d">;
-}
-
-defm : MovzPats<FGR32, MOVZ_S>;
-defm : MovnPats<FGR32, MOVN_S>;
-
-let Predicates = [NotFP64bit] in {
- defm : MovzPats<AFGR64, MOVZ_D>;
- defm : MovnPats<AFGR64, MOVN_D>;
-}
-
-let cc = 0, usesCustomInserter = 1, Uses = [FCR31],
- Constraints = "$F = $dst" in {
-// flag:float, data:int
-class CondMovFPInt<SDNode cmov, bits<1> tf, string instr_asm> :
- FCMOV<tf, (outs CPURegs:$dst), (ins CPURegs:$T, CPURegs:$F),
- !strconcat(instr_asm, "\t$dst, $T, $$fcc0"),
- [(set CPURegs:$dst, (cmov CPURegs:$T, CPURegs:$F))]>;
-
-// flag:float, data:float
-let cc = 0 in
-class CondMovFPFP<RegisterClass RC, SDNode cmov, bits<5> fmt, bits<1> tf,
- string instr_asm> :
- FFCMOV<fmt, tf, (outs RC:$dst), (ins RC:$T, RC:$F),
- !strconcat(instr_asm, "\t$dst, $T, $$fcc0"),
- [(set RC:$dst, (cmov RC:$T, RC:$F))]>;
-}
-
-def MOVT : CondMovFPInt<MipsCMovFP_T, 1, "movt">;
-def MOVF : CondMovFPInt<MipsCMovFP_F, 0, "movf">;
-def MOVT_S : CondMovFPFP<FGR32, MipsCMovFP_T, 16, 1, "movt.s">;
-def MOVF_S : CondMovFPFP<FGR32, MipsCMovFP_F, 16, 0, "movf.s">;
-
-let Predicates = [NotFP64bit] in {
- def MOVT_D : CondMovFPFP<AFGR64, MipsCMovFP_T, 17, 1, "movt.d">;
- def MOVF_D : CondMovFPFP<AFGR64, MipsCMovFP_F, 17, 0, "movf.d">;
+ def FCMP_S32 : FCMP<0x10, FGR32, "s">;
+ def FCMP_D32 : FCMP<0x11, AFGR64, "d">, Requires<[NotFP64bit]>;
+ def FCMP_D64 : FCMP<0x11, FGR64, "d">, Requires<[IsFP64bit]>;
}
//===----------------------------------------------------------------------===//
@@ -352,25 +405,46 @@ def ExtractElementF64 :
//===----------------------------------------------------------------------===//
// Floating Point Patterns
//===----------------------------------------------------------------------===//
-def fpimm0 : PatLeaf<(fpimm), [{
- return N->isExactlyValue(+0.0);
-}]>;
-
-def fpimm0neg : PatLeaf<(fpimm), [{
- return N->isExactlyValue(-0.0);
-}]>;
-
def : Pat<(f32 fpimm0), (MTC1 ZERO)>;
def : Pat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>;
def : Pat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>;
-def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D32_W (MTC1 CPURegs:$src))>;
-
def : Pat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>;
-def : Pat<(i32 (fp_to_sint AFGR64:$src)), (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
let Predicates = [NotFP64bit] in {
+ def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D32_W (MTC1 CPURegs:$src))>;
+ def : Pat<(i32 (fp_to_sint AFGR64:$src)), (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
def : Pat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>;
def : Pat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>;
}
+let Predicates = [IsFP64bit] in {
+ def : Pat<(f64 fpimm0), (DMTC1 ZERO_64)>;
+ def : Pat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
+
+ def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D64_W (MTC1 CPURegs:$src))>;
+ def : Pat<(f32 (sint_to_fp CPU64Regs:$src)),
+ (CVT_S_L (DMTC1 CPU64Regs:$src))>;
+ def : Pat<(f64 (sint_to_fp CPU64Regs:$src)),
+ (CVT_D64_L (DMTC1 CPU64Regs:$src))>;
+
+ def : Pat<(i32 (fp_to_sint FGR64:$src)), (MFC1 (TRUNC_W_D64 FGR64:$src))>;
+ def : Pat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>;
+ def : Pat<(i64 (fp_to_sint FGR64:$src)), (DMFC1 (TRUNC_L_D64 FGR64:$src))>;
+
+ def : Pat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
+ def : Pat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
+}
+
+// Patterns for unaligned floating point loads and stores.
+let Predicates = [HasMips32r2Or64, NotN64] in {
+ def : Pat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>;
+ def : Pat<(store_u FGR32:$src, CPURegs:$addr),
+ (SUXC1 FGR32:$src, CPURegs:$addr, ZERO)>;
+}
+
+let Predicates = [IsN64] in {
+ def : Pat<(f32 (load_u CPU64Regs:$addr)), (LUXC1_P8 CPU64Regs:$addr, ZERO_64)>;
+ def : Pat<(store_u FGR32:$src, CPU64Regs:$addr),
+ (SUXC1_P8 FGR32:$src, CPU64Regs:$addr, ZERO_64)>;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
index e1725fa..4555303 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td
@@ -1,4 +1,4 @@
-//===- MipsInstrFormats.td - Mips Instruction Formats ------*- tablegen -*-===//
+//===-- MipsInstrFormats.td - Mips Instruction Formats -----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -115,7 +115,7 @@ class FI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
let Inst{15-0} = imm16;
}
-class CBranchBase<bits<6> op, dag outs, dag ins, string asmstr,
+class BranchBase<bits<6> op, dag outs, dag ins, string asmstr,
list<dag> pattern, InstrItinClass itin>:
MipsInst<outs, ins, asmstr, pattern, itin, FrmI>
{
@@ -290,3 +290,40 @@ class FFR2P<bits<6> funct, bits<5> fmt, string opstr,
FFR<0x11, funct, fmt, (outs RC:$fd), (ins RC:$fs, RC:$ft),
!strconcat(opstr, ".", fmtstr, "\t$fd, $fs, $ft"),
[(set RC:$fd, (OpNode RC:$fs, RC:$ft))]>;
+
+// Floating point madd/msub/nmadd/nmsub.
+class FFMADDSUB<bits<3> funct, bits<3> fmt, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> {
+ bits<5> fd;
+ bits<5> fr;
+ bits<5> fs;
+ bits<5> ft;
+
+ let Opcode = 0x13;
+ let Inst{25-21} = fr;
+ let Inst{20-16} = ft;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5-3} = funct;
+ let Inst{2-0} = fmt;
+}
+
+// FP indexed load/store instructions.
+class FFMemIdx<bits<6> funct, dag outs, dag ins, string asmstr,
+ list<dag> pattern> :
+ MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther>
+{
+ bits<5> base;
+ bits<5> index;
+ bits<5> fs;
+ bits<5> fd;
+
+ let Opcode = 0x13;
+
+ let Inst{25-21} = base;
+ let Inst{20-16} = index;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5-0} = funct;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 559943a..a3a18bf 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- MipsInstrInfo.cpp - Mips Instruction Information ---------*- C++ -*-===//
+//===-- MipsInstrInfo.cpp - Mips Instruction Information ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,10 +29,10 @@ using namespace llvm;
MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm)
: MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
TM(tm), IsN64(TM.getSubtarget<MipsSubtarget>().isABI_N64()),
- RI(*TM.getSubtargetImpl(), *this) {}
+ RI(*TM.getSubtargetImpl(), *this),
+ UncondBrOpc(TM.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J) {}
-
-const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const {
+const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const {
return RI;
}
@@ -131,6 +131,8 @@ copyPhysReg(MachineBasicBlock &MBB,
Opc = Mips::FMOV_S;
else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_D32;
+ else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
+ Opc = Mips::FMOV_D64;
else if (Mips::CCRRegClass.contains(DestReg, SrcReg))
Opc = Mips::MOVCCRToCCR;
else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg.
@@ -140,18 +142,22 @@ copyPhysReg(MachineBasicBlock &MBB,
Opc = Mips::MFHI64, SrcReg = 0;
else if (SrcReg == Mips::LO64)
Opc = Mips::MFLO64, SrcReg = 0;
+ else if (Mips::FGR64RegClass.contains(SrcReg))
+ Opc = Mips::DMFC1;
}
else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
if (DestReg == Mips::HI64)
Opc = Mips::MTHI64, DestReg = 0;
else if (DestReg == Mips::LO64)
Opc = Mips::MTLO64, DestReg = 0;
+ else if (Mips::FGR64RegClass.contains(DestReg))
+ Opc = Mips::DMTC1;
}
assert(Opc && "Cannot copy registers");
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
-
+
if (DestReg)
MIB.addReg(DestReg, RegState::Define);
@@ -162,6 +168,16 @@ copyPhysReg(MachineBasicBlock &MBB,
MIB.addReg(SrcReg, getKillRegState(KillSrc));
}
+static MachineMemOperand* GetMemOperand(MachineBasicBlock &MBB, int FI,
+ unsigned Flag) {
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo &MFI = *MF.getFrameInfo();
+ unsigned Align = MFI.getObjectAlignment(FI);
+
+ return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), Flag,
+ MFI.getObjectSize(FI), Align);
+}
+
void MipsInstrInfo::
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
@@ -169,6 +185,8 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const TargetRegisterInfo *TRI) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
+
unsigned Opc = 0;
if (RC == Mips::CPURegsRegisterClass)
@@ -184,7 +202,7 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
}
void MipsInstrInfo::
@@ -195,6 +213,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
{
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
+ MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
if (RC == Mips::CPURegsRegisterClass)
@@ -209,7 +228,8 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0);
+ BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
}
MachineInstr*
@@ -230,7 +250,8 @@ static unsigned GetAnalyzableBrOpc(unsigned Opc) {
Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 ||
- Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::J) ?
+ Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B ||
+ Opc == Mips::J) ?
Opc : 0;
}
@@ -239,21 +260,21 @@ static unsigned GetAnalyzableBrOpc(unsigned Opc) {
unsigned Mips::GetOppositeBranchOpc(unsigned Opc)
{
switch (Opc) {
- default: llvm_unreachable("Illegal opcode!");
- case Mips::BEQ : return Mips::BNE;
- case Mips::BNE : return Mips::BEQ;
- case Mips::BGTZ : return Mips::BLEZ;
- case Mips::BGEZ : return Mips::BLTZ;
- case Mips::BLTZ : return Mips::BGEZ;
- case Mips::BLEZ : return Mips::BGTZ;
- case Mips::BEQ64 : return Mips::BNE64;
- case Mips::BNE64 : return Mips::BEQ64;
- case Mips::BGTZ64 : return Mips::BLEZ64;
- case Mips::BGEZ64 : return Mips::BLTZ64;
- case Mips::BLTZ64 : return Mips::BGEZ64;
- case Mips::BLEZ64 : return Mips::BGTZ64;
- case Mips::BC1T : return Mips::BC1F;
- case Mips::BC1F : return Mips::BC1T;
+ default: llvm_unreachable("Illegal opcode!");
+ case Mips::BEQ: return Mips::BNE;
+ case Mips::BNE: return Mips::BEQ;
+ case Mips::BGTZ: return Mips::BLEZ;
+ case Mips::BGEZ: return Mips::BLTZ;
+ case Mips::BLTZ: return Mips::BGEZ;
+ case Mips::BLEZ: return Mips::BGTZ;
+ case Mips::BEQ64: return Mips::BNE64;
+ case Mips::BNE64: return Mips::BEQ64;
+ case Mips::BGTZ64: return Mips::BLEZ64;
+ case Mips::BGEZ64: return Mips::BLTZ64;
+ case Mips::BLTZ64: return Mips::BGEZ64;
+ case Mips::BLEZ64: return Mips::BGTZ64;
+ case Mips::BC1T: return Mips::BC1F;
+ case Mips::BC1F: return Mips::BC1T;
}
}
@@ -262,7 +283,7 @@ static void AnalyzeCondBr(const MachineInstr* Inst, unsigned Opc,
SmallVectorImpl<MachineOperand>& Cond) {
assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch");
int NumOp = Inst->getNumExplicitOperands();
-
+
// for both int and fp branches, the last explicit operand is the
// MBB.
BB = Inst->getOperand(NumOp-1).getMBB();
@@ -314,7 +335,7 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If there is only one terminator instruction, process it.
if (!SecondLastOpc) {
// Unconditional branch
- if (LastOpc == Mips::J) {
+ if (LastOpc == UncondBrOpc) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
@@ -331,7 +352,7 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// If second to last instruction is an unconditional branch,
// analyze it and remove the last instruction.
- if (SecondLastOpc == Mips::J) {
+ if (SecondLastOpc == UncondBrOpc) {
// Return if the last instruction cannot be removed.
if (!AllowModify)
return true;
@@ -343,15 +364,15 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Conditional branch followed by an unconditional branch.
// The last one must be unconditional.
- if (LastOpc != Mips::J)
+ if (LastOpc != UncondBrOpc)
return true;
AnalyzeCondBr(SecondLastInst, SecondLastOpc, TBB, Cond);
FBB = LastInst->getOperand(0).getMBB();
return false;
-}
-
+}
+
void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
MachineBasicBlock *TBB, DebugLoc DL,
const SmallVectorImpl<MachineOperand>& Cond)
@@ -385,14 +406,14 @@ InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
// Two-way Conditional branch.
if (FBB) {
BuildCondBr(MBB, TBB, DL, Cond);
- BuildMI(&MBB, DL, get(Mips::J)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(UncondBrOpc)).addMBB(FBB);
return 2;
}
// One way branch.
// Unconditional branch.
if (Cond.empty())
- BuildMI(&MBB, DL, get(Mips::J)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(UncondBrOpc)).addMBB(TBB);
else // Conditional branch.
BuildCondBr(MBB, TBB, DL, Cond);
return 1;
@@ -433,27 +454,3 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
return false;
}
-/// getGlobalBaseReg - Return a virtual register initialized with the
-/// the global base register value. Output instructions required to
-/// initialize the register in the function entry block, if necessary.
-///
-unsigned MipsInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- MipsFunctionInfo *MipsFI = MF->getInfo<MipsFunctionInfo>();
- unsigned GlobalBaseReg = MipsFI->getGlobalBaseReg();
- if (GlobalBaseReg != 0)
- return GlobalBaseReg;
-
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
- GlobalBaseReg = RegInfo.createVirtualRegister(Mips::CPURegsRegisterClass);
- BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
- GlobalBaseReg).addReg(Mips::GP);
- RegInfo.addLiveIn(Mips::GP);
-
- MipsFI->setGlobalBaseReg(GlobalBaseReg);
- return GlobalBaseReg;
-}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
index 271d248..4be727d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -1,4 +1,4 @@
-//===- MipsInstrInfo.h - Mips Instruction Information -----------*- C++ -*-===//
+//===-- MipsInstrInfo.h - Mips Instruction Information ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,9 +15,9 @@
#define MIPSINSTRUCTIONINFO_H
#include "Mips.h"
+#include "MipsRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "MipsRegisterInfo.h"
#define GET_INSTRINFO_HEADER
#include "MipsGenInstrInfo.inc"
@@ -30,90 +30,11 @@ namespace Mips {
unsigned GetOppositeBranchOpc(unsigned Opc);
}
-/// MipsII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace MipsII {
- /// Target Operand Flag enum.
- enum TOF {
- //===------------------------------------------------------------------===//
- // Mips Specific MachineOperand flags.
-
- MO_NO_FLAG,
-
- /// MO_GOT - Represents the offset into the global offset table at which
- /// the address the relocation entry symbol resides during execution.
- MO_GOT,
-
- /// MO_GOT_CALL - Represents the offset into the global offset table at
- /// which the address of a call site relocation entry symbol resides
- /// during execution. This is different from the above since this flag
- /// can only be present in call instructions.
- MO_GOT_CALL,
-
- /// MO_GPREL - Represents the offset from the current gp value to be used
- /// for the relocatable object file being produced.
- MO_GPREL,
-
- /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol
- /// address.
- MO_ABS_HI,
- MO_ABS_LO,
-
- /// MO_TLSGD - Represents the offset into the global offset table at which
- // the module ID and TSL block offset reside during execution (General
- // Dynamic TLS).
- MO_TLSGD,
-
- /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial
- // Exec TLS).
- MO_GOTTPREL,
-
- /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from
- // the thread pointer (Local Exec TLS).
- MO_TPREL_HI,
- MO_TPREL_LO,
-
- // N32/64 Flags.
- MO_GPOFF_HI,
- MO_GPOFF_LO,
- MO_GOT_DISP,
- MO_GOT_PAGE,
- MO_GOT_OFST
- };
-
- enum {
- //===------------------------------------------------------------------===//
- // Instruction encodings. These are the standard/most common forms for
- // Mips instructions.
- //
-
- // Pseudo - This represents an instruction that is a pseudo instruction
- // or one that has not been implemented yet. It is illegal to code generate
- // it, but tolerated for intermediate implementation stages.
- Pseudo = 0,
-
- /// FrmR - This form is for instructions of the format R.
- FrmR = 1,
- /// FrmI - This form is for instructions of the format I.
- FrmI = 2,
- /// FrmJ - This form is for instructions of the format J.
- FrmJ = 3,
- /// FrmFR - This form is for instructions of the format FR.
- FrmFR = 4,
- /// FrmFI - This form is for instructions of the format FI.
- FrmFI = 5,
- /// FrmOther - This form is for instructions that have no specific format.
- FrmOther = 6,
-
- FormMask = 15
- };
-}
-
class MipsInstrInfo : public MipsGenInstrInfo {
MipsTargetMachine &TM;
bool IsN64;
const MipsRegisterInfo RI;
+ unsigned UncondBrOpc;
public:
explicit MipsInstrInfo(MipsTargetMachine &TM);
@@ -182,12 +103,6 @@ public:
/// Insert nop instruction when hazard condition is found
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
-
- /// getGlobalBaseReg - Return a virtual register initialized with the
- /// the global base register value. Output instructions required to
- /// initialize the register in the function entry block, if necessary.
- ///
- unsigned getGlobalBaseReg(MachineFunction *MF) const;
};
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index 3fbd41e..be74f8e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -39,8 +39,8 @@ def SDT_MipsDivRem : SDTypeProfile<0, 2,
def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
-def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
- SDTCisVT<1, iPTR>]>;
+def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>,
+ SDTCisSameAs<0, 1>]>;
def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
@@ -103,11 +103,11 @@ def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem,
// target constant nodes that would otherwise remain unchanged with ADDiu
// nodes. Without these wrapper node patterns, the following conditional move
// instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
-// compiled:
+// compiled:
// movn %got(d)($gp), %got(c)($gp), $4
// This instruction is illegal since movn can take only register operands.
-def MipsWrapperPIC : SDNode<"MipsISD::WrapperPIC", SDTIntUnaryOp>;
+def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntBinOp>;
// Pointer to dynamically allocated stack area.
def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc,
@@ -128,18 +128,31 @@ def HasCondMov : Predicate<"Subtarget.hasCondMov()">;
def HasMips32 : Predicate<"Subtarget.hasMips32()">;
def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">;
def HasMips64 : Predicate<"Subtarget.hasMips64()">;
+def HasMips32r2Or64 : Predicate<"Subtarget.hasMips32r2Or64()">;
def NotMips64 : Predicate<"!Subtarget.hasMips64()">;
def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">;
def IsN64 : Predicate<"Subtarget.isABI_N64()">;
def NotN64 : Predicate<"!Subtarget.isABI_N64()">;
+def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
+def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">;
+def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">;
//===----------------------------------------------------------------------===//
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
// Instruction operand types
-def brtarget : Operand<OtherVT>;
-def calltarget : Operand<i32>;
+def jmptarget : Operand<OtherVT> {
+ let EncoderMethod = "getJumpTargetOpValue";
+}
+def brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+ let OperandType = "OPERAND_PCREL";
+}
+def calltarget : Operand<iPTR> {
+ let EncoderMethod = "getJumpTargetOpValue";
+}
+def calltarget64: Operand<i64>;
def simm16 : Operand<i32>;
def simm16_64 : Operand<i64>;
def shamt : Operand<i32>;
@@ -167,6 +180,12 @@ def mem_ea : Operand<i32> {
let EncoderMethod = "getMemEncoding";
}
+def mem_ea_64 : Operand<i64> {
+ let PrintMethod = "printMemOperandEA";
+ let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let EncoderMethod = "getMemEncoding";
+}
+
// size operand of ext instruction
def size_ext : Operand<i32> {
let EncoderMethod = "getSizeExtEncoding";
@@ -179,12 +198,12 @@ def size_ins : Operand<i32> {
// Transformation Function - get the lower 16 bits.
def LO16 : SDNodeXForm<imm, [{
- return getI32Imm((unsigned)N->getZExtValue() & 0xFFFF);
+ return getImm(N, N->getZExtValue() & 0xFFFF);
}]>;
// Transformation Function - get the higher 16 bits.
def HI16 : SDNodeXForm<imm, [{
- return getI32Imm((unsigned)N->getZExtValue() >> 16);
+ return getImm(N, (N->getZExtValue() >> 16) & 0xFFFF);
}]>;
// Node immediate fits as 16-bit sign extended on target immediate.
@@ -202,36 +221,42 @@ def immZExt16 : PatLeaf<(imm), [{
return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
}], LO16>;
-// shamt field must fit in 5 bits.
-def immZExt5 : PatLeaf<(imm), [{
- return N->getZExtValue() == ((N->getZExtValue()) & 0x1f) ;
+// Immediate can be loaded with LUi (32-bit int with lower 16-bit cleared).
+def immLow16Zero : PatLeaf<(imm), [{
+ int64_t Val = N->getSExtValue();
+ return isInt<32>(Val) && !(Val & 0xffff);
}]>;
+// shamt field must fit in 5 bits.
+def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>;
+
// Mips Address Mode! SDNode frameindex could possibily be a match
// since load and store instructions from stack used it.
-def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], []>;
+def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>;
//===----------------------------------------------------------------------===//
// Pattern fragment for load/store
//===----------------------------------------------------------------------===//
-class UnalignedLoad<PatFrag Node> : PatFrag<(ops node:$ptr), (Node node:$ptr), [{
+class UnalignedLoad<PatFrag Node> :
+ PatFrag<(ops node:$ptr), (Node node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment();
}]>;
-class AlignedLoad<PatFrag Node> : PatFrag<(ops node:$ptr), (Node node:$ptr), [{
+class AlignedLoad<PatFrag Node> :
+ PatFrag<(ops node:$ptr), (Node node:$ptr), [{
LoadSDNode *LD = cast<LoadSDNode>(N);
return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment();
}]>;
-class UnalignedStore<PatFrag Node> : PatFrag<(ops node:$val, node:$ptr),
- (Node node:$val, node:$ptr), [{
+class UnalignedStore<PatFrag Node> :
+ PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
StoreSDNode *SD = cast<StoreSDNode>(N);
return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment();
}]>;
-class AlignedStore<PatFrag Node> : PatFrag<(ops node:$val, node:$ptr),
- (Node node:$val, node:$ptr), [{
+class AlignedStore<PatFrag Node> :
+ PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{
StoreSDNode *SD = cast<StoreSDNode>(N);
return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment();
}]>;
@@ -313,27 +338,34 @@ class LogicNOR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
}
// Shifts
-class LogicR_shift_rotate_imm<bits<6> func, bits<5> _rs, string instr_asm,
- SDNode OpNode>:
- FR<0x00, func, (outs CPURegs:$rd), (ins CPURegs:$rt, shamt:$shamt),
+class shift_rotate_imm<bits<6> func, bits<5> isRotate, string instr_asm,
+ SDNode OpNode, PatFrag PF, Operand ImmOpnd,
+ RegisterClass RC>:
+ FR<0x00, func, (outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt),
!strconcat(instr_asm, "\t$rd, $rt, $shamt"),
- [(set CPURegs:$rd, (OpNode CPURegs:$rt, (i32 immZExt5:$shamt)))], IIAlu> {
- let rs = _rs;
+ [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu> {
+ let rs = isRotate;
}
-class LogicR_shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm,
- SDNode OpNode>:
- FR<0x00, func, (outs CPURegs:$rd), (ins CPURegs:$rs, CPURegs:$rt),
+// 32-bit shift instructions.
+class shift_rotate_imm32<bits<6> func, bits<5> isRotate, string instr_asm,
+ SDNode OpNode>:
+ shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt5, shamt, CPURegs>;
+
+class shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm,
+ SDNode OpNode, RegisterClass RC>:
+ FR<0x00, func, (outs RC:$rd), (ins CPURegs:$rs, RC:$rt),
!strconcat(instr_asm, "\t$rd, $rt, $rs"),
- [(set CPURegs:$rd, (OpNode CPURegs:$rt, CPURegs:$rs))], IIAlu> {
+ [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs))], IIAlu> {
let shamt = isRotate;
}
// Load Upper Imediate
-class LoadUpper<bits<6> op, string instr_asm>:
- FI<op, (outs CPURegs:$rt), (ins uimm16:$imm16),
+class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>:
+ FI<op, (outs RC:$rt), (ins Imm:$imm16),
!strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> {
let rs = 0;
+ let neverHasSideEffects = 1;
}
class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
@@ -361,6 +393,14 @@ class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC,
let isPseudo = Pseudo;
}
+// Unaligned Memory Load/Store
+let canFoldAsLoad = 1 in
+class LoadUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
+ FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), "", [], IILoad> {}
+
+class StoreUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>:
+ FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), "", [], IIStore> {}
+
// 32-bit load.
multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
@@ -368,7 +408,7 @@ multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
Requires<[NotN64]>;
def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
Requires<[IsN64]>;
-}
+}
// 64-bit load.
multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
@@ -377,8 +417,15 @@ multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
Requires<[NotN64]>;
def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
Requires<[IsN64]>;
-}
+}
+// 32-bit load.
+multiclass LoadUnAlign32<bits<6> op> {
+ def #NAME# : LoadUnAlign<op, CPURegs, mem>,
+ Requires<[NotN64]>;
+ def _P8 : LoadUnAlign<op, CPURegs, mem64>,
+ Requires<[IsN64]>;
+}
// 32-bit store.
multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode,
bit Pseudo = 0> {
@@ -397,11 +444,19 @@ multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode,
Requires<[IsN64]>;
}
+// 32-bit store.
+multiclass StoreUnAlign32<bits<6> op> {
+ def #NAME# : StoreUnAlign<op, CPURegs, mem>,
+ Requires<[NotN64]>;
+ def _P8 : StoreUnAlign<op, CPURegs, mem64>,
+ Requires<[IsN64]>;
+}
+
// Conditional Branch
class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
- CBranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16),
- !strconcat(instr_asm, "\t$rs, $rt, $imm16"),
- [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$imm16)], IIBranch> {
+ BranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16),
+ !strconcat(instr_asm, "\t$rs, $rt, $imm16"),
+ [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$imm16)], IIBranch> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
@@ -409,9 +464,9 @@ class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>:
class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op,
RegisterClass RC>:
- CBranchBase<op, (outs), (ins RC:$rs, brtarget:$imm16),
- !strconcat(instr_asm, "\t$rs, $imm16"),
- [(brcond (i32 (cond_op RC:$rs, 0)), bb:$imm16)], IIBranch> {
+ BranchBase<op, (outs), (ins RC:$rs, brtarget:$imm16),
+ !strconcat(instr_asm, "\t$rs, $imm16"),
+ [(brcond (i32 (cond_op RC:$rs, 0)), bb:$imm16)], IIBranch> {
let rt = _rt;
let isBranch = 1;
let isTerminator = 1;
@@ -435,146 +490,228 @@ class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od,
[(set CPURegs:$rt, (cond_op RC:$rs, imm_type:$imm16))],
IIAlu>;
-// Unconditional branch
-let isBranch=1, isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
+// Jump
class JumpFJ<bits<6> op, string instr_asm>:
- FJ<op, (outs), (ins brtarget:$target),
- !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch>;
+ FJ<op, (outs), (ins jmptarget:$target),
+ !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch> {
+ let isBranch=1;
+ let isTerminator=1;
+ let isBarrier=1;
+ let hasDelaySlot = 1;
+ let Predicates = [RelocStatic];
+}
+
+// Unconditional branch
+class UncondBranch<bits<6> op, string instr_asm>:
+ BranchBase<op, (outs), (ins brtarget:$imm16),
+ !strconcat(instr_asm, "\t$imm16"), [(br bb:$imm16)], IIBranch> {
+ let rs = 0;
+ let rt = 0;
+ let isBranch = 1;
+ let isTerminator = 1;
+ let isBarrier = 1;
+ let hasDelaySlot = 1;
+ let Predicates = [RelocPIC];
+}
-let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1 in
-class JumpFR<bits<6> op, bits<6> func, string instr_asm>:
- FR<op, func, (outs), (ins CPURegs:$rs),
- !strconcat(instr_asm, "\t$rs"), [(brind CPURegs:$rs)], IIBranch> {
+let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1,
+ isIndirectBranch = 1 in
+class JumpFR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>:
+ FR<op, func, (outs), (ins RC:$rs),
+ !strconcat(instr_asm, "\t$rs"), [(brind RC:$rs)], IIBranch> {
let rt = 0;
let rd = 0;
let shamt = 0;
}
// Jump and Link (Call)
-let isCall=1, hasDelaySlot=1,
- // All calls clobber the non-callee saved registers...
- Defs = [AT, V0, V1, A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9,
- K0, K1, D0, D1, D2, D3, D4, D5, D6, D7, D8, D9], Uses = [GP] in {
+let isCall=1, hasDelaySlot=1 in {
class JumpLink<bits<6> op, string instr_asm>:
FJ<op, (outs), (ins calltarget:$target, variable_ops),
!strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)],
IIBranch>;
- class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm>:
- FR<op, func, (outs), (ins CPURegs:$rs, variable_ops),
- !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink CPURegs:$rs)], IIBranch> {
+ class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm,
+ RegisterClass RC>:
+ FR<op, func, (outs), (ins RC:$rs, variable_ops),
+ !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink RC:$rs)], IIBranch> {
let rt = 0;
let rd = 31;
let shamt = 0;
}
- class BranchLink<string instr_asm>:
- FI<0x1, (outs), (ins CPURegs:$rs, brtarget:$imm16, variable_ops),
- !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch>;
+ class BranchLink<string instr_asm, bits<5> _rt, RegisterClass RC>:
+ FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16, variable_ops),
+ !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch> {
+ let rt = _rt;
+ }
}
// Mul, Div
-class Mul<bits<6> func, string instr_asm, InstrItinClass itin>:
- FR<0x00, func, (outs), (ins CPURegs:$rs, CPURegs:$rt),
+class Mult<bits<6> func, string instr_asm, InstrItinClass itin,
+ RegisterClass RC, list<Register> DefRegs>:
+ FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
!strconcat(instr_asm, "\t$rs, $rt"), [], itin> {
let rd = 0;
let shamt = 0;
let isCommutable = 1;
- let Defs = [HI, LO];
+ let Defs = DefRegs;
+ let neverHasSideEffects = 1;
}
-class Div<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
- FR<0x00, func, (outs), (ins CPURegs:$rs, CPURegs:$rt),
- !strconcat(instr_asm, "\t$$zero, $rs, $rt"),
- [(op CPURegs:$rs, CPURegs:$rt)], itin> {
+class Mult32<bits<6> func, string instr_asm, InstrItinClass itin>:
+ Mult<func, instr_asm, itin, CPURegs, [HI, LO]>;
+
+class Div<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin,
+ RegisterClass RC, list<Register> DefRegs>:
+ FR<0x00, func, (outs), (ins RC:$rs, RC:$rt),
+ !strconcat(instr_asm, "\t$$zero, $rs, $rt"),
+ [(op RC:$rs, RC:$rt)], itin> {
let rd = 0;
let shamt = 0;
- let Defs = [HI, LO];
+ let Defs = DefRegs;
}
+class Div32<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>:
+ Div<op, func, instr_asm, itin, CPURegs, [HI, LO]>;
+
// Move from Hi/Lo
-class MoveFromLOHI<bits<6> func, string instr_asm>:
- FR<0x00, func, (outs CPURegs:$rd), (ins),
+class MoveFromLOHI<bits<6> func, string instr_asm, RegisterClass RC,
+ list<Register> UseRegs>:
+ FR<0x00, func, (outs RC:$rd), (ins),
!strconcat(instr_asm, "\t$rd"), [], IIHiLo> {
let rs = 0;
let rt = 0;
let shamt = 0;
+ let Uses = UseRegs;
+ let neverHasSideEffects = 1;
}
-class MoveToLOHI<bits<6> func, string instr_asm>:
- FR<0x00, func, (outs), (ins CPURegs:$rs),
+class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC,
+ list<Register> DefRegs>:
+ FR<0x00, func, (outs), (ins RC:$rs),
!strconcat(instr_asm, "\t$rs"), [], IIHiLo> {
let rt = 0;
let rd = 0;
let shamt = 0;
+ let Defs = DefRegs;
+ let neverHasSideEffects = 1;
}
-class EffectiveAddress<string instr_asm> :
- FMem<0x09, (outs CPURegs:$rt), (ins mem_ea:$addr),
- instr_asm, [(set CPURegs:$rt, addr:$addr)], IIAlu>;
+class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> :
+ FMem<0x09, (outs RC:$rt), (ins Mem:$addr),
+ instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>;
// Count Leading Ones/Zeros in Word
-class CountLeading<bits<6> func, string instr_asm, list<dag> pattern>:
- FR<0x1c, func, (outs CPURegs:$rd), (ins CPURegs:$rs),
- !strconcat(instr_asm, "\t$rd, $rs"), pattern, IIAlu>,
+class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>:
+ FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
+ !strconcat(instr_asm, "\t$rd, $rs"),
+ [(set RC:$rd, (ctlz RC:$rs))], IIAlu>,
+ Requires<[HasBitCount]> {
+ let shamt = 0;
+ let rt = rd;
+}
+
+class CountLeading1<bits<6> func, string instr_asm, RegisterClass RC>:
+ FR<0x1c, func, (outs RC:$rd), (ins RC:$rs),
+ !strconcat(instr_asm, "\t$rd, $rs"),
+ [(set RC:$rd, (ctlz (not RC:$rs)))], IIAlu>,
Requires<[HasBitCount]> {
let shamt = 0;
let rt = rd;
}
// Sign Extend in Register.
-class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt>:
- FR<0x1f, 0x20, (outs CPURegs:$rd), (ins CPURegs:$rt),
+class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt,
+ RegisterClass RC>:
+ FR<0x1f, 0x20, (outs RC:$rd), (ins RC:$rt),
!strconcat(instr_asm, "\t$rd, $rt"),
- [(set CPURegs:$rd, (sext_inreg CPURegs:$rt, vt))], NoItinerary> {
+ [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary> {
let rs = 0;
let shamt = sa;
let Predicates = [HasSEInReg];
}
-// Byte Swap
-class ByteSwap<bits<6> func, bits<5> sa, string instr_asm>:
- FR<0x1f, func, (outs CPURegs:$rd), (ins CPURegs:$rt),
- !strconcat(instr_asm, "\t$rd, $rt"),
- [(set CPURegs:$rd, (bswap CPURegs:$rt))], NoItinerary> {
+// Subword Swap
+class SubwordSwap<bits<6> func, bits<5> sa, string instr_asm, RegisterClass RC>:
+ FR<0x1f, func, (outs RC:$rd), (ins RC:$rt),
+ !strconcat(instr_asm, "\t$rd, $rt"), [], NoItinerary> {
let rs = 0;
let shamt = sa;
let Predicates = [HasSwap];
+ let neverHasSideEffects = 1;
}
// Read Hardware
-class ReadHardware: FR<0x1f, 0x3b, (outs CPURegs:$rt), (ins HWRegs:$rd),
- "rdhwr\t$rt, $rd", [], IIAlu> {
+class ReadHardware<RegisterClass CPURegClass, RegisterClass HWRegClass>
+ : FR<0x1f, 0x3b, (outs CPURegClass:$rt), (ins HWRegClass:$rd),
+ "rdhwr\t$rt, $rd", [], IIAlu> {
let rs = 0;
let shamt = 0;
}
// Ext and Ins
-class ExtIns<bits<6> _funct, string instr_asm, dag outs, dag ins,
- list<dag> pattern, InstrItinClass itin>:
- FR<0x1f, _funct, outs, ins, !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
- pattern, itin>, Requires<[HasMips32r2]> {
+class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
+ FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz),
+ !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
+ [(set RC:$rt, (MipsExt RC:$rs, imm:$pos, imm:$sz))], NoItinerary> {
+ bits<5> pos;
+ bits<5> sz;
+ let rd = sz;
+ let shamt = pos;
+ let Predicates = [HasMips32r2];
+}
+
+class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
+ FR<0x1f, _funct, (outs RC:$rt),
+ (ins RC:$rs, uimm16:$pos, size_ins:$sz, RC:$src),
+ !strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
+ [(set RC:$rt, (MipsIns RC:$rs, imm:$pos, imm:$sz, RC:$src))],
+ NoItinerary> {
bits<5> pos;
bits<5> sz;
let rd = sz;
let shamt = pos;
+ let Predicates = [HasMips32r2];
+ let Constraints = "$src = $rt";
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
-class Atomic2Ops<PatFrag Op, string Opstr> :
- MipsPseudo<(outs CPURegs:$dst), (ins CPURegs:$ptr, CPURegs:$incr),
+class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
!strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$incr))]>;
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
+
+multiclass Atomic2Ops32<PatFrag Op, string Opstr> {
+ def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
// Atomic Compare & Swap.
-class AtomicCmpSwap<PatFrag Op, string Width> :
- MipsPseudo<(outs CPURegs:$dst),
- (ins CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap),
- !strconcat("atomic_cmp_swap_", Width,
- "\t$dst, $ptr, $cmp, $swap"),
- [(set CPURegs:$dst,
- (Op CPURegs:$ptr, CPURegs:$cmp, CPURegs:$swap))]>;
+class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC,
+ RegisterClass PRC> :
+ MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
+ !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"),
+ [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
+
+multiclass AtomicCmpSwap32<PatFrag Op, string Width> {
+ def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>;
+ def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>;
+}
+
+class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$rt), (ins Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IILoad> {
+ let mayLoad = 1;
+}
+
+class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> :
+ FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr),
+ !strconcat(opstring, "\t$rt, $addr"), [], IIStore> {
+ let mayStore = 1;
+ let Constraints = "$rt = $dst";
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -590,52 +727,64 @@ def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
-// Some assembly macros need to avoid pseudoinstructions and assembler
-// automatic reodering, we should reorder ourselves.
-def MACRO : MipsPseudo<(outs), (ins), ".set\tmacro", []>;
-def REORDER : MipsPseudo<(outs), (ins), ".set\treorder", []>;
-def NOMACRO : MipsPseudo<(outs), (ins), ".set\tnomacro", []>;
-def NOREORDER : MipsPseudo<(outs), (ins), ".set\tnoreorder", []>;
-
-// These macros are inserted to prevent GAS from complaining
-// when using the AT register.
-def NOAT : MipsPseudo<(outs), (ins), ".set\tnoat", []>;
-def ATMACRO : MipsPseudo<(outs), (ins), ".set\tat", []>;
-
// When handling PIC code the assembler needs .cpload and .cprestore
// directives. If the real instructions corresponding these directives
// are used, we have the same behavior, but get also a bunch of warnings
// from the assembler.
-def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$picreg), ".cpload\t$picreg", []>;
-def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc), ".cprestore\t$loc", []>;
+let neverHasSideEffects = 1 in
+def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc, CPURegs:$gp),
+ ".cprestore\t$loc", []>;
+
+// For O32 ABI & PIC & non-fixed global base register, the following instruction
+// seqeunce is emitted to set the global base register:
+//
+// 0. lui $2, %hi(_gp_disp)
+// 1. addiu $2, $2, %lo(_gp_disp)
+// 2. addu $globalbasereg, $2, $t9
+//
+// SETGP01 is emitted during Prologue/Epilogue insertion and then converted to
+// instructions 0 and 1 in the sequence above during MC lowering.
+// SETGP2 is emitted just before register allocation and converted to
+// instruction 2 just prior to post-RA scheduling.
+//
+// These pseudo instructions are needed to ensure no instructions are inserted
+// before or between instructions 0 and 1, which is a limitation imposed by
+// GNU linker.
+
+let isTerminator = 1, isBarrier = 1 in
+def SETGP01 : MipsPseudo<(outs CPURegs:$dst), (ins), "", []>;
+
+let neverHasSideEffects = 1 in
+def SETGP2 : MipsPseudo<(outs CPURegs:$globalreg), (ins CPURegs:$picreg), "",
+ []>;
let usesCustomInserter = 1 in {
- def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, "load_add_8">;
- def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, "load_add_16">;
- def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, "load_add_32">;
- def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, "load_sub_8">;
- def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, "load_sub_16">;
- def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, "load_sub_32">;
- def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, "load_and_8">;
- def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, "load_and_16">;
- def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, "load_and_32">;
- def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, "load_or_8">;
- def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, "load_or_16">;
- def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, "load_or_32">;
- def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, "load_xor_8">;
- def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, "load_xor_16">;
- def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, "load_xor_32">;
- def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, "load_nand_8">;
- def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, "load_nand_16">;
- def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, "load_nand_32">;
-
- def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, "swap_8">;
- def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, "swap_16">;
- def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, "swap_32">;
-
- def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, "8">;
- def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, "16">;
- def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, "32">;
+ defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">;
+ defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">;
+ defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">;
+ defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">;
+ defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">;
+ defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">;
+ defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">;
+ defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">;
+ defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">;
+ defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">;
+ defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">;
+ defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">;
+ defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">;
+ defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">;
+ defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">;
+ defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">;
+ defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">;
+ defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">;
+
+ defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">;
+ defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">;
+ defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">;
+
+ defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">;
+ defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">;
+ defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">;
}
//===----------------------------------------------------------------------===//
@@ -654,7 +803,7 @@ def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>;
def ANDi : ArithLogicI<0x0c, "andi", and, uimm16, immZExt16, CPURegs>;
def ORi : ArithLogicI<0x0d, "ori", or, uimm16, immZExt16, CPURegs>;
def XORi : ArithLogicI<0x0e, "xori", xor, uimm16, immZExt16, CPURegs>;
-def LUi : LoadUpper<0x0f, "lui">;
+def LUi : LoadUpper<0x0f, "lui", CPURegs, uimm16>;
/// Arithmetic Instructions (3-Operand, R-Type)
def ADDu : ArithLogicR<0x00, 0x21, "addu", add, IIAlu, CPURegs, 1>;
@@ -669,17 +818,17 @@ def XOR : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPURegs, 1>;
def NOR : LogicNOR<0x00, 0x27, "nor", CPURegs>;
/// Shift Instructions
-def SLL : LogicR_shift_rotate_imm<0x00, 0x00, "sll", shl>;
-def SRL : LogicR_shift_rotate_imm<0x02, 0x00, "srl", srl>;
-def SRA : LogicR_shift_rotate_imm<0x03, 0x00, "sra", sra>;
-def SLLV : LogicR_shift_rotate_reg<0x04, 0x00, "sllv", shl>;
-def SRLV : LogicR_shift_rotate_reg<0x06, 0x00, "srlv", srl>;
-def SRAV : LogicR_shift_rotate_reg<0x07, 0x00, "srav", sra>;
+def SLL : shift_rotate_imm32<0x00, 0x00, "sll", shl>;
+def SRL : shift_rotate_imm32<0x02, 0x00, "srl", srl>;
+def SRA : shift_rotate_imm32<0x03, 0x00, "sra", sra>;
+def SLLV : shift_rotate_reg<0x04, 0x00, "sllv", shl, CPURegs>;
+def SRLV : shift_rotate_reg<0x06, 0x00, "srlv", srl, CPURegs>;
+def SRAV : shift_rotate_reg<0x07, 0x00, "srav", sra, CPURegs>;
// Rotate Instructions
let Predicates = [HasMips32r2] in {
- def ROTR : LogicR_shift_rotate_imm<0x02, 0x01, "rotr", rotr>;
- def ROTRV : LogicR_shift_rotate_reg<0x06, 0x01, "rotrv", rotr>;
+ def ROTR : shift_rotate_imm32<0x02, 0x01, "rotr", rotr>;
+ def ROTRV : shift_rotate_reg<0x06, 0x01, "rotrv", rotr, CPURegs>;
}
/// Load and Store Instructions
@@ -700,6 +849,12 @@ defm ULW : LoadM32<0x23, "ulw", load_u, 1>;
defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>;
defm USW : StoreM32<0x2b, "usw", store_u, 1>;
+/// Primitives for unaligned
+defm LWL : LoadUnAlign32<0x22>;
+defm LWR : LoadUnAlign32<0x26>;
+defm SWL : StoreUnAlign32<0x2A>;
+defm SWR : StoreUnAlign32<0x2E>;
+
let hasSideEffects = 1 in
def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
[(MipsSync imm:$stype)], NoItinerary, FrmOther>
@@ -712,19 +867,15 @@ def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype",
}
/// Load-linked, Store-conditional
-let mayLoad = 1 in
- def LL : FMem<0x30, (outs CPURegs:$rt), (ins mem:$addr),
- "ll\t$rt, $addr", [], IILoad>;
-let mayStore = 1, Constraints = "$rt = $dst" in
- def SC : FMem<0x38, (outs CPURegs:$dst), (ins CPURegs:$rt, mem:$addr),
- "sc\t$rt, $addr", [], IIStore>;
+def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>;
+def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>;
+def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>;
+def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>;
/// Jump and Branch Instructions
def J : JumpFJ<0x02, "j">;
-let isIndirectBranch = 1 in
- def JR : JumpFR<0x00, 0x08, "jr">;
-def JAL : JumpLink<0x03, "jal">;
-def JALR : JumpLinkReg<0x00, 0x09, "jalr">;
+def JR : JumpFR<0x00, 0x08, "jr", CPURegs>;
+def B : UncondBranch<0x04, "b">;
def BEQ : CBranch<0x04, "beq", seteq, CPURegs>;
def BNE : CBranch<0x05, "bne", setne, CPURegs>;
def BGEZ : CBranchZero<0x01, 1, "bgez", setge, CPURegs>;
@@ -732,10 +883,10 @@ def BGTZ : CBranchZero<0x07, 0, "bgtz", setgt, CPURegs>;
def BLEZ : CBranchZero<0x06, 0, "blez", setle, CPURegs>;
def BLTZ : CBranchZero<0x01, 0, "bltz", setlt, CPURegs>;
-let rt=0x11 in
- def BGEZAL : BranchLink<"bgezal">;
-let rt=0x10 in
- def BLTZAL : BranchLink<"bltzal">;
+def JAL : JumpLink<0x03, "jal">;
+def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>;
+def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>;
+def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>;
let isReturn=1, isTerminator=1, hasDelaySlot=1,
isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in
@@ -743,50 +894,26 @@ let isReturn=1, isTerminator=1, hasDelaySlot=1,
"jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>;
/// Multiply and Divide Instructions.
-def MULT : Mul<0x18, "mult", IIImul>;
-def MULTu : Mul<0x19, "multu", IIImul>;
-def SDIV : Div<MipsDivRem, 0x1a, "div", IIIdiv>;
-def UDIV : Div<MipsDivRemU, 0x1b, "divu", IIIdiv>;
+def MULT : Mult32<0x18, "mult", IIImul>;
+def MULTu : Mult32<0x19, "multu", IIImul>;
+def SDIV : Div32<MipsDivRem, 0x1a, "div", IIIdiv>;
+def UDIV : Div32<MipsDivRemU, 0x1b, "divu", IIIdiv>;
-let Defs = [HI] in
- def MTHI : MoveToLOHI<0x11, "mthi">;
-let Defs = [LO] in
- def MTLO : MoveToLOHI<0x13, "mtlo">;
-
-let Uses = [HI] in
- def MFHI : MoveFromLOHI<0x10, "mfhi">;
-let Uses = [LO] in
- def MFLO : MoveFromLOHI<0x12, "mflo">;
+def MTHI : MoveToLOHI<0x11, "mthi", CPURegs, [HI]>;
+def MTLO : MoveToLOHI<0x13, "mtlo", CPURegs, [LO]>;
+def MFHI : MoveFromLOHI<0x10, "mfhi", CPURegs, [HI]>;
+def MFLO : MoveFromLOHI<0x12, "mflo", CPURegs, [LO]>;
/// Sign Ext In Register Instructions.
-def SEB : SignExtInReg<0x10, "seb", i8>;
-def SEH : SignExtInReg<0x18, "seh", i16>;
+def SEB : SignExtInReg<0x10, "seb", i8, CPURegs>;
+def SEH : SignExtInReg<0x18, "seh", i16, CPURegs>;
/// Count Leading
-def CLZ : CountLeading<0x20, "clz",
- [(set CPURegs:$rd, (ctlz CPURegs:$rs))]>;
-def CLO : CountLeading<0x21, "clo",
- [(set CPURegs:$rd, (ctlz (not CPURegs:$rs)))]>;
-
-/// Byte Swap
-def WSBW : ByteSwap<0x20, 0x2, "wsbw">;
-
-// Conditional moves:
-// These instructions are expanded in
-// MipsISelLowering::EmitInstrWithCustomInserter if target does not have
-// conditional move instructions.
-// flag:int, data:int
-class CondMovIntInt<bits<6> funct, string instr_asm> :
- FR<0, funct, (outs CPURegs:$rd),
- (ins CPURegs:$rs, CPURegs:$rt, CPURegs:$F),
- !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], NoItinerary> {
- let shamt = 0;
- let usesCustomInserter = 1;
- let Constraints = "$F = $rd";
-}
+def CLZ : CountLeading0<0x20, "clz", CPURegs>;
+def CLO : CountLeading1<0x21, "clo", CPURegs>;
-def MOVZ_I : CondMovIntInt<0x0a, "movz">;
-def MOVN_I : CondMovIntInt<0x0b, "movn">;
+/// Word Swap Bytes Within Halfwords
+def WSBH : SubwordSwap<0x20, 0x2, "wsbh", CPURegs>;
/// No operation
let addr=0 in
@@ -796,13 +923,13 @@ let addr=0 in
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr">;
+def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// DynAlloc node points to dynamically allocated stack space.
// $sp is added to the list of implicitly used registers to prevent dead code
// elimination from removing instructions that modify $sp.
let Uses = [SP] in
-def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr">;
+def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>;
// MADD*/MSUB*
def MADD : MArithR<0, "madd", MipsMAdd, 1>;
@@ -815,21 +942,10 @@ def MSUBU : MArithR<5, "msubu", MipsMSubu>;
def MUL : ArithLogicR<0x1c, 0x02, "mul", mul, IIImul, CPURegs, 1>,
Requires<[HasMips32]>;
-def RDHWR : ReadHardware;
+def RDHWR : ReadHardware<CPURegs, HWRegs>;
-def EXT : ExtIns<0, "ext", (outs CPURegs:$rt),
- (ins CPURegs:$rs, uimm16:$pos, size_ext:$sz),
- [(set CPURegs:$rt,
- (MipsExt CPURegs:$rs, immZExt5:$pos, immZExt5:$sz))],
- NoItinerary>;
-
-let Constraints = "$src = $rt" in
-def INS : ExtIns<4, "ins", (outs CPURegs:$rt),
- (ins CPURegs:$rs, uimm16:$pos, size_ins:$sz, CPURegs:$src),
- [(set CPURegs:$rt,
- (MipsIns CPURegs:$rs, immZExt5:$pos, immZExt5:$sz,
- CPURegs:$src))],
- NoItinerary>;
+def EXT : ExtBase<0, "ext", CPURegs>;
+def INS : InsBase<4, "ins", CPURegs>;
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
@@ -840,6 +956,8 @@ def : Pat<(i32 immSExt16:$in),
(ADDiu ZERO, imm:$in)>;
def : Pat<(i32 immZExt16:$in),
(ORi ZERO, imm:$in)>;
+def : Pat<(i32 immLow16Zero:$in),
+ (LUi (HI16 imm:$in))>;
// Arbitrary immediates
def : Pat<(i32 imm:$imm),
@@ -864,22 +982,26 @@ def : Pat<(MipsJmpLink (i32 texternalsym:$dst)),
// hi/lo relocs
def : Pat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>;
def : Pat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>;
+def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
+def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
+def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
+
def : Pat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>;
def : Pat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>;
+def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
+def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
+def : Pat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
+
def : Pat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
(ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
def : Pat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
(ADDiu CPURegs:$hi, tblockaddress:$lo)>;
-
-def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>;
-def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>;
def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
(ADDiu CPURegs:$hi, tjumptable:$lo)>;
-
-def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>;
-def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
(ADDiu CPURegs:$hi, tconstpool:$lo)>;
+def : Pat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
// gp_rel relocs
def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
@@ -887,39 +1009,45 @@ def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
(ADDiu CPURegs:$gp, tconstpool:$in)>;
-// tlsgd
-def : Pat<(add CPURegs:$gp, (MipsTlsGd tglobaltlsaddr:$in)),
- (ADDiu CPURegs:$gp, tglobaltlsaddr:$in)>;
-
-// tprel hi/lo
-def : Pat<(MipsTprelHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>;
-def : Pat<(MipsTprelLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
-def : Pat<(add CPURegs:$hi, (MipsTprelLo tglobaltlsaddr:$lo)),
- (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
-
// wrapper_pic
-class WrapperPICPat<SDNode node>:
- Pat<(MipsWrapperPIC node:$in),
- (ADDiu GP, node:$in)>;
+class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
+ Pat<(MipsWrapper RC:$gp, node:$in),
+ (ADDiuOp RC:$gp, node:$in)>;
-def : WrapperPICPat<tglobaladdr>;
-def : WrapperPICPat<tconstpool>;
-def : WrapperPICPat<texternalsym>;
-def : WrapperPICPat<tblockaddress>;
-def : WrapperPICPat<tjumptable>;
+def : WrapperPat<tglobaladdr, ADDiu, CPURegs>;
+def : WrapperPat<tconstpool, ADDiu, CPURegs>;
+def : WrapperPat<texternalsym, ADDiu, CPURegs>;
+def : WrapperPat<tblockaddress, ADDiu, CPURegs>;
+def : WrapperPat<tjumptable, ADDiu, CPURegs>;
+def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>;
// Mips does not have "not", so we expand our way
def : Pat<(not CPURegs:$in),
(NOR CPURegs:$in, ZERO)>;
-// extended load and stores
-def : Pat<(extloadi1 addr:$src), (LBu addr:$src)>;
-def : Pat<(extloadi8 addr:$src), (LBu addr:$src)>;
-def : Pat<(extloadi16_a addr:$src), (LHu addr:$src)>;
-def : Pat<(extloadi16_u addr:$src), (ULHu addr:$src)>;
+// extended loads
+let Predicates = [NotN64] in {
+ def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
+ def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
+ def : Pat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>;
+ def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>;
+}
+let Predicates = [IsN64] in {
+ def : Pat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
+ def : Pat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
+ def : Pat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>;
+ def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>;
+}
// peepholes
-def : Pat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
+let Predicates = [NotN64] in {
+ def : Pat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
+ def : Pat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>;
+}
+let Predicates = [IsN64] in {
+ def : Pat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
+ def : Pat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>;
+}
// brcond patterns
multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
@@ -950,38 +1078,6 @@ def : Pat<(brcond RC:$cond, bb:$dst),
defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
-// select patterns
-multiclass MovzPats<RegisterClass RC, Instruction MOVZInst> {
- def : Pat<(select (i32 (setge CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLT CPURegs:$lhs, CPURegs:$rhs), RC:$F)>;
- def : Pat<(select (i32 (setuge CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLTu CPURegs:$lhs, CPURegs:$rhs), RC:$F)>;
- def : Pat<(select (i32 (setge CPURegs:$lhs, immSExt16:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLTi CPURegs:$lhs, immSExt16:$rhs), RC:$F)>;
- def : Pat<(select (i32 (setuge CPURegs:$lh, immSExt16:$rh)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLTiu CPURegs:$lh, immSExt16:$rh), RC:$F)>;
- def : Pat<(select (i32 (setle CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLT CPURegs:$rhs, CPURegs:$lhs), RC:$F)>;
- def : Pat<(select (i32 (setule CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (SLTu CPURegs:$rhs, CPURegs:$lhs), RC:$F)>;
- def : Pat<(select (i32 (seteq CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVZInst RC:$T, (XOR CPURegs:$lhs, CPURegs:$rhs), RC:$F)>;
- def : Pat<(select (i32 (seteq CPURegs:$lhs, 0)), RC:$T, RC:$F),
- (MOVZInst RC:$T, CPURegs:$lhs, RC:$F)>;
-}
-
-multiclass MovnPats<RegisterClass RC, Instruction MOVNInst> {
- def : Pat<(select (i32 (setne CPURegs:$lhs, CPURegs:$rhs)), RC:$T, RC:$F),
- (MOVNInst RC:$T, (XOR CPURegs:$lhs, CPURegs:$rhs), RC:$F)>;
- def : Pat<(select CPURegs:$cond, RC:$T, RC:$F),
- (MOVNInst RC:$T, CPURegs:$cond, RC:$F)>;
- def : Pat<(select (i32 (setne CPURegs:$lhs, 0)), RC:$T, RC:$F),
- (MOVNInst RC:$T, CPURegs:$lhs, RC:$F)>;
-}
-
-defm : MovzPats<CPURegs, MOVZ_I>;
-defm : MovnPats<CPURegs, MOVN_I>;
-
// setcc patterns
multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
Instruction SLTuOp, Register ZEROReg> {
@@ -1029,10 +1125,14 @@ defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
// select MipsDynAlloc
def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
+// bswap pattern
+def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
+
//===----------------------------------------------------------------------===//
// Floating Point Support
//===----------------------------------------------------------------------===//
include "MipsInstrFPU.td"
include "Mips64InstrInfo.td"
+include "MipsCondMov.td"
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
index e3f6a75..76ca3e1 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp
@@ -1,4 +1,4 @@
-//===- MipsJITInfo.cpp - Implement the JIT interfaces for the Mips target -===//
+//===-- MipsJITInfo.cpp - Implement the Mips JIT Interface ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -200,7 +200,7 @@ void MipsJITInfo::relocate(void *Function, MachineRelocation *MR,
intptr_t ResultPtr = (intptr_t) MR->getResultPointer();
switch ((Mips::RelocationType) MR->getRelocationType()) {
- case Mips::reloc_mips_branch:
+ case Mips::reloc_mips_pc16:
ResultPtr = (((ResultPtr - (intptr_t) RelocPos) - 4) >> 2) & 0xffff;
*((unsigned*) RelocPos) |= (unsigned) ResultPtr;
break;
@@ -218,13 +218,16 @@ void MipsJITInfo::relocate(void *Function, MachineRelocation *MR,
*((unsigned*) RelocPos) |= (unsigned) ResultPtr;
break;
- case Mips::reloc_mips_lo:
- ResultPtr = ResultPtr & 0xffff;
+ case Mips::reloc_mips_lo: {
+ // Addend is needed for unaligned load/store instructions, where offset
+ // for the second load/store in the expanded instruction sequence must
+ // be modified by +1 or +3. Otherwise, Addend is 0.
+ int Addend = *((unsigned*) RelocPos) & 0xffff;
+ ResultPtr = (ResultPtr + Addend) & 0xffff;
+ *((unsigned*) RelocPos) &= 0xffff0000;
*((unsigned*) RelocPos) |= (unsigned) ResultPtr;
break;
-
- default:
- llvm_unreachable("ERROR: Unknown Mips relocation.");
+ }
}
}
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
index 41f32a3..f4c4ae8 100644
--- a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h
@@ -1,4 +1,4 @@
-//===- MipsJITInfo.h - Mips implementation of the JIT interface -*- C++ -*-===//
+//===- MipsJITInfo.h - Mips Implementation of the JIT Interface -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,8 +19,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/Target/TargetJITInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
namespace llvm {
class MipsTargetMachine;
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
index 608a7d2..1597b93 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp
@@ -15,99 +15,179 @@
#include "MipsMCInstLower.h"
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
-#include "MipsMCSymbolRefExpr.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Target/Mangler.h"
+
using namespace llvm;
-MipsMCInstLower::MipsMCInstLower(Mangler *mang, const MachineFunction &mf,
- MipsAsmPrinter &asmprinter)
- : Ctx(mf.getContext()), Mang(mang), AsmPrinter(asmprinter) {}
+MipsMCInstLower::MipsMCInstLower(MipsAsmPrinter &asmprinter)
+ : AsmPrinter(asmprinter) {}
+
+void MipsMCInstLower::Initialize(Mangler *M, MCContext* C) {
+ Mang = M;
+ Ctx = C;
+}
MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy,
unsigned Offset) const {
- MipsMCSymbolRefExpr::VariantKind Kind;
+ MCSymbolRefExpr::VariantKind Kind;
const MCSymbol *Symbol;
switch(MO.getTargetFlags()) {
- default: assert(0 && "Invalid target flag!");
- case MipsII::MO_NO_FLAG: Kind = MipsMCSymbolRefExpr::VK_Mips_None; break;
- case MipsII::MO_GPREL: Kind = MipsMCSymbolRefExpr::VK_Mips_GPREL; break;
- case MipsII::MO_GOT_CALL: Kind = MipsMCSymbolRefExpr::VK_Mips_GOT_CALL; break;
- case MipsII::MO_GOT: Kind = MipsMCSymbolRefExpr::VK_Mips_GOT; break;
- case MipsII::MO_ABS_HI: Kind = MipsMCSymbolRefExpr::VK_Mips_ABS_HI; break;
- case MipsII::MO_ABS_LO: Kind = MipsMCSymbolRefExpr::VK_Mips_ABS_LO; break;
- case MipsII::MO_TLSGD: Kind = MipsMCSymbolRefExpr::VK_Mips_TLSGD; break;
- case MipsII::MO_GOTTPREL: Kind = MipsMCSymbolRefExpr::VK_Mips_GOTTPREL; break;
- case MipsII::MO_TPREL_HI: Kind = MipsMCSymbolRefExpr::VK_Mips_TPREL_HI; break;
- case MipsII::MO_TPREL_LO: Kind = MipsMCSymbolRefExpr::VK_Mips_TPREL_LO; break;
- case MipsII::MO_GPOFF_HI: Kind = MipsMCSymbolRefExpr::VK_Mips_GPOFF_HI; break;
- case MipsII::MO_GPOFF_LO: Kind = MipsMCSymbolRefExpr::VK_Mips_GPOFF_LO; break;
- case MipsII::MO_GOT_DISP: Kind = MipsMCSymbolRefExpr::VK_Mips_GOT_DISP; break;
- case MipsII::MO_GOT_PAGE: Kind = MipsMCSymbolRefExpr::VK_Mips_GOT_PAGE; break;
- case MipsII::MO_GOT_OFST: Kind = MipsMCSymbolRefExpr::VK_Mips_GOT_OFST; break;
+ default: llvm_unreachable("Invalid target flag!");
+ case MipsII::MO_NO_FLAG: Kind = MCSymbolRefExpr::VK_None; break;
+ case MipsII::MO_GPREL: Kind = MCSymbolRefExpr::VK_Mips_GPREL; break;
+ case MipsII::MO_GOT_CALL: Kind = MCSymbolRefExpr::VK_Mips_GOT_CALL; break;
+ case MipsII::MO_GOT16: Kind = MCSymbolRefExpr::VK_Mips_GOT16; break;
+ case MipsII::MO_GOT: Kind = MCSymbolRefExpr::VK_Mips_GOT; break;
+ case MipsII::MO_ABS_HI: Kind = MCSymbolRefExpr::VK_Mips_ABS_HI; break;
+ case MipsII::MO_ABS_LO: Kind = MCSymbolRefExpr::VK_Mips_ABS_LO; break;
+ case MipsII::MO_TLSGD: Kind = MCSymbolRefExpr::VK_Mips_TLSGD; break;
+ case MipsII::MO_TLSLDM: Kind = MCSymbolRefExpr::VK_Mips_TLSLDM; break;
+ case MipsII::MO_DTPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_HI; break;
+ case MipsII::MO_DTPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_LO; break;
+ case MipsII::MO_GOTTPREL: Kind = MCSymbolRefExpr::VK_Mips_GOTTPREL; break;
+ case MipsII::MO_TPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_TPREL_HI; break;
+ case MipsII::MO_TPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_TPREL_LO; break;
+ case MipsII::MO_GPOFF_HI: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_HI; break;
+ case MipsII::MO_GPOFF_LO: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_LO; break;
+ case MipsII::MO_GOT_DISP: Kind = MCSymbolRefExpr::VK_Mips_GOT_DISP; break;
+ case MipsII::MO_GOT_PAGE: Kind = MCSymbolRefExpr::VK_Mips_GOT_PAGE; break;
+ case MipsII::MO_GOT_OFST: Kind = MCSymbolRefExpr::VK_Mips_GOT_OFST; break;
}
switch (MOTy) {
- case MachineOperand::MO_MachineBasicBlock:
- Symbol = MO.getMBB()->getSymbol();
- break;
-
- case MachineOperand::MO_GlobalAddress:
- Symbol = Mang->getSymbol(MO.getGlobal());
- break;
-
- case MachineOperand::MO_BlockAddress:
- Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
- break;
-
- case MachineOperand::MO_ExternalSymbol:
- Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
- break;
-
- case MachineOperand::MO_JumpTableIndex:
- Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
- break;
-
- case MachineOperand::MO_ConstantPoolIndex:
- Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
- if (MO.getOffset())
- Offset += MO.getOffset();
- break;
-
- default:
- llvm_unreachable("<unknown operand type>");
+ case MachineOperand::MO_MachineBasicBlock:
+ Symbol = MO.getMBB()->getSymbol();
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ Symbol = Mang->getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_BlockAddress:
+ Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol:
+ Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
+ break;
+
+ case MachineOperand::MO_JumpTableIndex:
+ Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
+ if (MO.getOffset())
+ Offset += MO.getOffset();
+ break;
+
+ default:
+ llvm_unreachable("<unknown operand type>");
+ }
+
+ const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::Create(Symbol, Kind, *Ctx);
+
+ if (!Offset)
+ return MCOperand::CreateExpr(MCSym);
+
+ // Assume offset is never negative.
+ assert(Offset > 0);
+
+ const MCConstantExpr *OffsetExpr = MCConstantExpr::Create(Offset, *Ctx);
+ const MCBinaryExpr *AddExpr = MCBinaryExpr::CreateAdd(MCSym, OffsetExpr, *Ctx);
+ return MCOperand::CreateExpr(AddExpr);
+}
+
+static void CreateMCInst(MCInst& Inst, unsigned Opc, const MCOperand& Opnd0,
+ const MCOperand& Opnd1,
+ const MCOperand& Opnd2 = MCOperand()) {
+ Inst.setOpcode(Opc);
+ Inst.addOperand(Opnd0);
+ Inst.addOperand(Opnd1);
+ if (Opnd2.isValid())
+ Inst.addOperand(Opnd2);
+}
+
+// Lower ".cpload $reg" to
+// "lui $gp, %hi(_gp_disp)"
+// "addiu $gp, $gp, %lo(_gp_disp)"
+// "addu $gp, $gp, $t9"
+void MipsMCInstLower::LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts) {
+ MCOperand GPReg = MCOperand::CreateReg(Mips::GP);
+ MCOperand T9Reg = MCOperand::CreateReg(Mips::T9);
+ StringRef SymName("_gp_disp");
+ const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName);
+ const MCSymbolRefExpr *MCSym;
+
+ MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx);
+ MCOperand SymHi = MCOperand::CreateExpr(MCSym);
+ MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx);
+ MCOperand SymLo = MCOperand::CreateExpr(MCSym);
+
+ MCInsts.resize(3);
+
+ CreateMCInst(MCInsts[0], Mips::LUi, GPReg, SymHi);
+ CreateMCInst(MCInsts[1], Mips::ADDiu, GPReg, GPReg, SymLo);
+ CreateMCInst(MCInsts[2], Mips::ADDu, GPReg, GPReg, T9Reg);
+}
+
+// Lower ".cprestore offset" to "sw $gp, offset($sp)".
+void MipsMCInstLower::LowerCPRESTORE(int64_t Offset,
+ SmallVector<MCInst, 4>& MCInsts) {
+ assert(isInt<32>(Offset) && (Offset >= 0) &&
+ "Imm operand of .cprestore must be a non-negative 32-bit value.");
+
+ MCOperand SPReg = MCOperand::CreateReg(Mips::SP), BaseReg = SPReg;
+ MCOperand GPReg = MCOperand::CreateReg(Mips::GP);
+
+ if (!isInt<16>(Offset)) {
+ unsigned Hi = ((Offset + 0x8000) >> 16) & 0xffff;
+ Offset &= 0xffff;
+ MCOperand ATReg = MCOperand::CreateReg(Mips::AT);
+ BaseReg = ATReg;
+
+ // lui at,hi
+ // addu at,at,sp
+ MCInsts.resize(2);
+ CreateMCInst(MCInsts[0], Mips::LUi, ATReg, MCOperand::CreateImm(Hi));
+ CreateMCInst(MCInsts[1], Mips::ADDu, ATReg, ATReg, SPReg);
}
-
- return MCOperand::CreateExpr(MipsMCSymbolRefExpr::Create(Kind, Symbol, Offset,
- Ctx));
+
+ MCInst Sw;
+ CreateMCInst(Sw, Mips::SW, GPReg, BaseReg, MCOperand::CreateImm(Offset));
+ MCInsts.push_back(Sw);
}
-MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO) const {
+MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO,
+ unsigned offset) const {
MachineOperandType MOTy = MO.getType();
-
+
switch (MOTy) {
- default:
- assert(0 && "unknown operand type");
- break;
+ default: llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
if (MO.isImplicit()) break;
return MCOperand::CreateReg(MO.getReg());
case MachineOperand::MO_Immediate:
- return MCOperand::CreateImm(MO.getImm());
+ return MCOperand::CreateImm(MO.getImm() + offset);
case MachineOperand::MO_MachineBasicBlock:
case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol:
case MachineOperand::MO_JumpTableIndex:
case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_BlockAddress:
- return LowerSymbolOperand(MO, MOTy, 0);
+ return LowerSymbolOperand(MO, MOTy, offset);
+ case MachineOperand::MO_RegisterMask:
+ break;
}
return MCOperand();
@@ -115,7 +195,7 @@ MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO) const {
void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCOp = LowerOperand(MO);
@@ -124,3 +204,140 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.addOperand(MCOp);
}
}
+
+void MipsMCInstLower::LowerUnalignedLoadStore(const MachineInstr *MI,
+ SmallVector<MCInst,
+ 4>& MCInsts) {
+ unsigned Opc = MI->getOpcode();
+ MCInst Instr1, Instr2, Instr3, Move;
+
+ bool TwoInstructions = false;
+
+ assert(MI->getNumOperands() == 3);
+ assert(MI->getOperand(0).isReg());
+ assert(MI->getOperand(1).isReg());
+
+ MCOperand Target = LowerOperand(MI->getOperand(0));
+ MCOperand Base = LowerOperand(MI->getOperand(1));
+ MCOperand ATReg = MCOperand::CreateReg(Mips::AT);
+ MCOperand ZeroReg = MCOperand::CreateReg(Mips::ZERO);
+
+ MachineOperand UnLoweredName = MI->getOperand(2);
+ MCOperand Name = LowerOperand(UnLoweredName);
+
+ Move.setOpcode(Mips::ADDu);
+ Move.addOperand(Target);
+ Move.addOperand(ATReg);
+ Move.addOperand(ZeroReg);
+
+ switch (Opc) {
+ case Mips::ULW: {
+ // FIXME: only works for little endian right now
+ MCOperand AdjName = LowerOperand(UnLoweredName, 3);
+ if (Base.getReg() == (Target.getReg())) {
+ Instr1.setOpcode(Mips::LWL);
+ Instr1.addOperand(ATReg);
+ Instr1.addOperand(Base);
+ Instr1.addOperand(AdjName);
+ Instr2.setOpcode(Mips::LWR);
+ Instr2.addOperand(ATReg);
+ Instr2.addOperand(Base);
+ Instr2.addOperand(Name);
+ Instr3 = Move;
+ } else {
+ TwoInstructions = true;
+ Instr1.setOpcode(Mips::LWL);
+ Instr1.addOperand(Target);
+ Instr1.addOperand(Base);
+ Instr1.addOperand(AdjName);
+ Instr2.setOpcode(Mips::LWR);
+ Instr2.addOperand(Target);
+ Instr2.addOperand(Base);
+ Instr2.addOperand(Name);
+ }
+ break;
+ }
+ case Mips::ULHu: {
+ // FIXME: only works for little endian right now
+ MCOperand AdjName = LowerOperand(UnLoweredName, 1);
+ Instr1.setOpcode(Mips::LBu);
+ Instr1.addOperand(ATReg);
+ Instr1.addOperand(Base);
+ Instr1.addOperand(AdjName);
+ Instr2.setOpcode(Mips::LBu);
+ Instr2.addOperand(Target);
+ Instr2.addOperand(Base);
+ Instr2.addOperand(Name);
+ Instr3.setOpcode(Mips::INS);
+ Instr3.addOperand(Target);
+ Instr3.addOperand(ATReg);
+ Instr3.addOperand(MCOperand::CreateImm(0x8));
+ Instr3.addOperand(MCOperand::CreateImm(0x18));
+ break;
+ }
+
+ case Mips::USW: {
+ // FIXME: only works for little endian right now
+ assert (Base.getReg() != Target.getReg());
+ TwoInstructions = true;
+ MCOperand AdjName = LowerOperand(UnLoweredName, 3);
+ Instr1.setOpcode(Mips::SWL);
+ Instr1.addOperand(Target);
+ Instr1.addOperand(Base);
+ Instr1.addOperand(AdjName);
+ Instr2.setOpcode(Mips::SWR);
+ Instr2.addOperand(Target);
+ Instr2.addOperand(Base);
+ Instr2.addOperand(Name);
+ break;
+ }
+ case Mips::USH: {
+ MCOperand AdjName = LowerOperand(UnLoweredName, 1);
+ Instr1.setOpcode(Mips::SB);
+ Instr1.addOperand(Target);
+ Instr1.addOperand(Base);
+ Instr1.addOperand(Name);
+ Instr2.setOpcode(Mips::SRL);
+ Instr2.addOperand(ATReg);
+ Instr2.addOperand(Target);
+ Instr2.addOperand(MCOperand::CreateImm(8));
+ Instr3.setOpcode(Mips::SB);
+ Instr3.addOperand(ATReg);
+ Instr3.addOperand(Base);
+ Instr3.addOperand(AdjName);
+ break;
+ }
+ default:
+ // FIXME: need to add others
+ llvm_unreachable("unaligned instruction not processed");
+ }
+
+ MCInsts.push_back(Instr1);
+ MCInsts.push_back(Instr2);
+ if (!TwoInstructions) MCInsts.push_back(Instr3);
+}
+
+// Convert
+// "setgp01 $reg"
+// to
+// "lui $reg, %hi(_gp_disp)"
+// "addiu $reg, $reg, %lo(_gp_disp)"
+void MipsMCInstLower::LowerSETGP01(const MachineInstr *MI,
+ SmallVector<MCInst, 4>& MCInsts) {
+ const MachineOperand &MO = MI->getOperand(0);
+ assert(MO.isReg());
+ MCOperand RegOpnd = MCOperand::CreateReg(MO.getReg());
+ StringRef SymName("_gp_disp");
+ const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName);
+ const MCSymbolRefExpr *MCSym;
+
+ MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx);
+ MCOperand SymHi = MCOperand::CreateExpr(MCSym);
+ MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx);
+ MCOperand SymLo = MCOperand::CreateExpr(MCSym);
+
+ MCInsts.resize(2);
+
+ CreateMCInst(MCInsts[0], Mips::LUi, RegOpnd, SymHi);
+ CreateMCInst(MCInsts[1], Mips::ADDiu, RegOpnd, RegOpnd, SymLo);
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
index 223f23a..c1d007d 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h
@@ -1,4 +1,4 @@
-//===-- MipsMCInstLower.h - Lower MachineInstr to MCInst -------------------==//
+//===-- MipsMCInstLower.h - Lower MachineInstr to MCInst -------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -9,35 +9,39 @@
#ifndef MIPSMCINSTLOWER_H
#define MIPSMCINSTLOWER_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
- class MCAsmInfo;
class MCContext;
class MCInst;
class MCOperand;
- class MCSymbol;
class MachineInstr;
class MachineFunction;
class Mangler;
class MipsAsmPrinter;
-
+
/// MipsMCInstLower - This class is used to lower an MachineInstr into an
// MCInst.
class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
typedef MachineOperand::MachineOperandType MachineOperandType;
- MCContext &Ctx;
+ MCContext *Ctx;
Mangler *Mang;
MipsAsmPrinter &AsmPrinter;
public:
- MipsMCInstLower(Mangler *mang, const MachineFunction &MF,
- MipsAsmPrinter &asmprinter);
+ MipsMCInstLower(MipsAsmPrinter &asmprinter);
+ void Initialize(Mangler *mang, MCContext* C);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+ void LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts);
+ void LowerCPRESTORE(int64_t Offset, SmallVector<MCInst, 4>& MCInsts);
+ void LowerUnalignedLoadStore(const MachineInstr *MI,
+ SmallVector<MCInst, 4>& MCInsts);
+ void LowerSETGP01(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
- MCOperand LowerOperand(const MachineOperand& MO) const;
+ MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
};
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.cpp b/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.cpp
deleted file mode 100644
index a0a242c..0000000
--- a/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//===-- MipsMCSymbolRefExpr.cpp - Mips specific MC expression classes -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "mipsmcsymbolrefexpr"
-#include "MipsMCSymbolRefExpr.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-using namespace llvm;
-
-const MipsMCSymbolRefExpr*
-MipsMCSymbolRefExpr::Create(VariantKind Kind, const MCSymbol *Symbol,
- int Offset, MCContext &Ctx) {
- return new (Ctx) MipsMCSymbolRefExpr(Kind, Symbol, Offset);
-}
-
-void MipsMCSymbolRefExpr::PrintImpl(raw_ostream &OS) const {
- switch (Kind) {
- default: assert(0 && "Invalid kind!");
- case VK_Mips_None: break;
- case VK_Mips_GPREL: OS << "%gp_rel("; break;
- case VK_Mips_GOT_CALL: OS << "%call16("; break;
- case VK_Mips_GOT: OS << "%got("; break;
- case VK_Mips_ABS_HI: OS << "%hi("; break;
- case VK_Mips_ABS_LO: OS << "%lo("; break;
- case VK_Mips_TLSGD: OS << "%tlsgd("; break;
- case VK_Mips_GOTTPREL: OS << "%gottprel("; break;
- case VK_Mips_TPREL_HI: OS << "%tprel_hi("; break;
- case VK_Mips_TPREL_LO: OS << "%tprel_lo("; break;
- case VK_Mips_GPOFF_HI: OS << "%hi(%neg(%gp_rel("; break;
- case VK_Mips_GPOFF_LO: OS << "%lo(%neg(%gp_rel("; break;
- case VK_Mips_GOT_DISP: OS << "%got_disp("; break;
- case VK_Mips_GOT_PAGE: OS << "%got_page("; break;
- case VK_Mips_GOT_OFST: OS << "%got_ofst("; break;
- }
-
- OS << *Symbol;
-
- if (Offset) {
- if (Offset > 0)
- OS << '+';
- OS << Offset;
- }
-
- if (Kind == VK_Mips_GPOFF_HI || Kind == VK_Mips_GPOFF_LO)
- OS << ")))";
- else if (Kind != VK_Mips_None)
- OS << ')';
-}
-
-bool
-MipsMCSymbolRefExpr::EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const {
- return false;
-}
-
-void MipsMCSymbolRefExpr::AddValueSymbols(MCAssembler *Asm) const {
- Asm->getOrCreateSymbolData(*Symbol);
-}
-
-const MCSection *MipsMCSymbolRefExpr::FindAssociatedSection() const {
- return Symbol->isDefined() ? &Symbol->getSection() : NULL;
-}
-
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.h b/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.h
deleted file mode 100644
index 55e85a7..0000000
--- a/contrib/llvm/lib/Target/Mips/MipsMCSymbolRefExpr.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- MipsMCSymbolRefExpr.h - Mips specific MCSymbolRefExpr class -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MIPSMCSYMBOLREFEXPR_H
-#define MIPSMCSYMBOLREFEXPR_H
-#include "llvm/MC/MCExpr.h"
-
-namespace llvm {
-
-class MipsMCSymbolRefExpr : public MCTargetExpr {
-public:
- enum VariantKind {
- VK_Mips_None,
- VK_Mips_GPREL,
- VK_Mips_GOT_CALL,
- VK_Mips_GOT,
- VK_Mips_ABS_HI,
- VK_Mips_ABS_LO,
- VK_Mips_TLSGD,
- VK_Mips_GOTTPREL,
- VK_Mips_TPREL_HI,
- VK_Mips_TPREL_LO,
- VK_Mips_GPOFF_HI,
- VK_Mips_GPOFF_LO,
- VK_Mips_GOT_DISP,
- VK_Mips_GOT_PAGE,
- VK_Mips_GOT_OFST
- };
-
-private:
- const VariantKind Kind;
- const MCSymbol *Symbol;
- int Offset;
-
- explicit MipsMCSymbolRefExpr(VariantKind _Kind, const MCSymbol *_Symbol,
- int _Offset)
- : Kind(_Kind), Symbol(_Symbol), Offset(_Offset) {}
-
-public:
- static const MipsMCSymbolRefExpr *Create(VariantKind Kind,
- const MCSymbol *Symbol, int Offset,
- MCContext &Ctx);
-
- void PrintImpl(raw_ostream &OS) const;
- bool EvaluateAsRelocatableImpl(MCValue &Res,
- const MCAsmLayout *Layout) const;
- void AddValueSymbols(MCAssembler *) const;
- const MCSection *FindAssociatedSection() const;
-
- static bool classof(const MCExpr *E) {
- return E->getKind() == MCExpr::Target;
- }
-
- static bool classof(const MipsMCSymbolRefExpr *) { return true; }
-
- int getOffset() const { return Offset; }
- void setOffset(int O) { Offset = O; }
-};
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
new file mode 100644
index 0000000..b00c62b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -0,0 +1,50 @@
+//===-- MipsMachineFunctionInfo.cpp - Private data used for Mips ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsMachineFunction.h"
+#include "MipsInstrInfo.h"
+#include "MipsSubtarget.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
+#include "llvm/Function.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true),
+ cl::desc("Always use $gp as the global base register."));
+
+bool MipsFunctionInfo::globalBaseRegFixed() const {
+ return FixGlobalBaseReg;
+}
+
+bool MipsFunctionInfo::globalBaseRegSet() const {
+ return GlobalBaseReg;
+}
+
+unsigned MipsFunctionInfo::getGlobalBaseReg() {
+ // Return if it has already been initialized.
+ if (GlobalBaseReg)
+ return GlobalBaseReg;
+
+ const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
+
+ if (FixGlobalBaseReg) // $gp is the global base register.
+ return GlobalBaseReg = ST.isABI_N64() ? Mips::GP_64 : Mips::GP;
+
+ const TargetRegisterClass *RC;
+ RC = ST.isABI_N64() ?
+ Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
+
+ return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
+}
+
+void MipsFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
index bc30b6b..0fde55c 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -14,19 +14,17 @@
#ifndef MIPS_MACHINE_FUNCTION_INFO_H
#define MIPS_MACHINE_FUNCTION_INFO_H
-#include <utility>
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include <utility>
namespace llvm {
/// MipsFunctionInfo - This class is derived from MachineFunction private
/// Mips target-specific information for each MachineFunction.
class MipsFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
-private:
MachineFunction& MF;
/// SRetReturnReg - Some subtargets require that sret lowering includes
/// returning the value of the returned struct in a register. This field
@@ -45,18 +43,20 @@ private:
// InArgFIRange: Range of indices of all frame objects created during call to
// LowerFormalArguments.
// OutArgFIRange: Range of indices of all frame objects created during call to
- // LowerCall except for the frame object for restoring $gp.
+ // LowerCall except for the frame object for restoring $gp.
std::pair<int, int> InArgFIRange, OutArgFIRange;
- int GPFI; // Index of the frame object for restoring $gp
- mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
+ int GPFI; // Index of the frame object for restoring $gp
+ mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
unsigned MaxCallFrameSize;
+ bool EmitNOAT;
+
public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0),
VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)),
OutArgFIRange(std::make_pair(-1, 0)), GPFI(0), DynAllocFI(0),
- MaxCallFrameSize(0)
+ MaxCallFrameSize(0), EmitNOAT(false)
{}
bool isInArgFI(int FI) const {
@@ -64,7 +64,7 @@ public:
}
void setLastInArgFI(int FI) { InArgFIRange.second = FI; }
- bool isOutArgFI(int FI) const {
+ bool isOutArgFI(int FI) const {
return FI <= OutArgFIRange.first && FI >= OutArgFIRange.second;
}
void extendOutArgFIRange(int FirstFI, int LastFI) {
@@ -92,14 +92,18 @@ public:
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
- unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
- void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
+ bool globalBaseRegFixed() const;
+ bool globalBaseRegSet() const;
+ unsigned getGlobalBaseReg();
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+
+ bool getEmitNOAT() const { return EmitNOAT; }
+ void setEmitNOAT() { EmitNOAT = true; }
};
} // end of namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index f8c0fda..f30de44 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- MipsRegisterInfo.cpp - MIPS Register Information -== -----*- C++ -*-===//
+//===-- MipsRegisterInfo.cpp - MIPS Register Information -== --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,9 +13,10 @@
#define DEBUG_TYPE "mips-reg-info"
+#include "MipsRegisterInfo.h"
#include "Mips.h"
+#include "MipsAnalyzeImmediate.h"
#include "MipsSubtarget.h"
-#include "MipsRegisterInfo.h"
#include "MipsMachineFunction.h"
#include "llvm/Constants.h"
#include "llvm/Type.h"
@@ -45,97 +46,6 @@ MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST,
const TargetInstrInfo &tii)
: MipsGenRegisterInfo(Mips::RA), Subtarget(ST), TII(tii) {}
-/// getRegisterNumbering - Given the enum value for some register, e.g.
-/// Mips::RA, return the number that it corresponds to (e.g. 31).
-unsigned MipsRegisterInfo::
-getRegisterNumbering(unsigned RegEnum)
-{
- switch (RegEnum) {
- case Mips::ZERO: case Mips::ZERO_64: case Mips::F0: case Mips::D0_64:
- case Mips::D0:
- return 0;
- case Mips::AT: case Mips::AT_64: case Mips::F1: case Mips::D1_64:
- return 1;
- case Mips::V0: case Mips::V0_64: case Mips::F2: case Mips::D2_64:
- case Mips::D1:
- return 2;
- case Mips::V1: case Mips::V1_64: case Mips::F3: case Mips::D3_64:
- return 3;
- case Mips::A0: case Mips::A0_64: case Mips::F4: case Mips::D4_64:
- case Mips::D2:
- return 4;
- case Mips::A1: case Mips::A1_64: case Mips::F5: case Mips::D5_64:
- return 5;
- case Mips::A2: case Mips::A2_64: case Mips::F6: case Mips::D6_64:
- case Mips::D3:
- return 6;
- case Mips::A3: case Mips::A3_64: case Mips::F7: case Mips::D7_64:
- return 7;
- case Mips::T0: case Mips::T0_64: case Mips::F8: case Mips::D8_64:
- case Mips::D4:
- return 8;
- case Mips::T1: case Mips::T1_64: case Mips::F9: case Mips::D9_64:
- return 9;
- case Mips::T2: case Mips::T2_64: case Mips::F10: case Mips::D10_64:
- case Mips::D5:
- return 10;
- case Mips::T3: case Mips::T3_64: case Mips::F11: case Mips::D11_64:
- return 11;
- case Mips::T4: case Mips::T4_64: case Mips::F12: case Mips::D12_64:
- case Mips::D6:
- return 12;
- case Mips::T5: case Mips::T5_64: case Mips::F13: case Mips::D13_64:
- return 13;
- case Mips::T6: case Mips::T6_64: case Mips::F14: case Mips::D14_64:
- case Mips::D7:
- return 14;
- case Mips::T7: case Mips::T7_64: case Mips::F15: case Mips::D15_64:
- return 15;
- case Mips::S0: case Mips::S0_64: case Mips::F16: case Mips::D16_64:
- case Mips::D8:
- return 16;
- case Mips::S1: case Mips::S1_64: case Mips::F17: case Mips::D17_64:
- return 17;
- case Mips::S2: case Mips::S2_64: case Mips::F18: case Mips::D18_64:
- case Mips::D9:
- return 18;
- case Mips::S3: case Mips::S3_64: case Mips::F19: case Mips::D19_64:
- return 19;
- case Mips::S4: case Mips::S4_64: case Mips::F20: case Mips::D20_64:
- case Mips::D10:
- return 20;
- case Mips::S5: case Mips::S5_64: case Mips::F21: case Mips::D21_64:
- return 21;
- case Mips::S6: case Mips::S6_64: case Mips::F22: case Mips::D22_64:
- case Mips::D11:
- return 22;
- case Mips::S7: case Mips::S7_64: case Mips::F23: case Mips::D23_64:
- return 23;
- case Mips::T8: case Mips::T8_64: case Mips::F24: case Mips::D24_64:
- case Mips::D12:
- return 24;
- case Mips::T9: case Mips::T9_64: case Mips::F25: case Mips::D25_64:
- return 25;
- case Mips::K0: case Mips::K0_64: case Mips::F26: case Mips::D26_64:
- case Mips::D13:
- return 26;
- case Mips::K1: case Mips::K1_64: case Mips::F27: case Mips::D27_64:
- return 27;
- case Mips::GP: case Mips::GP_64: case Mips::F28: case Mips::D28_64:
- case Mips::D14:
- return 28;
- case Mips::SP: case Mips::SP_64: case Mips::F29: case Mips::D29_64:
- return 29;
- case Mips::FP: case Mips::FP_64: case Mips::F30: case Mips::D30_64:
- case Mips::D15:
- return 30;
- case Mips::RA: case Mips::RA_64: case Mips::F31: case Mips::D31_64:
- return 31;
- default: llvm_unreachable("Unknown register number!");
- }
- return 0; // Not reached
-}
-
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
//===----------------------------------------------------------------------===//
@@ -143,71 +53,55 @@ unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
//===----------------------------------------------------------------------===//
/// Mips Callee Saved Registers
-const unsigned* MipsRegisterInfo::
+const uint16_t* MipsRegisterInfo::
getCalleeSavedRegs(const MachineFunction *MF) const
{
- // Mips callee-save register range is $16-$23, $f20-$f30
- static const unsigned SingleFloatOnlyCalleeSavedRegs[] = {
- Mips::F31, Mips::F30, Mips::F29, Mips::F28, Mips::F27, Mips::F26,
- Mips::F25, Mips::F24, Mips::F23, Mips::F22, Mips::F21, Mips::F20,
- Mips::RA, Mips::FP, Mips::S7, Mips::S6, Mips::S5, Mips::S4,
- Mips::S3, Mips::S2, Mips::S1, Mips::S0, 0
- };
-
- static const unsigned Mips32CalleeSavedRegs[] = {
- Mips::D15, Mips::D14, Mips::D13, Mips::D12, Mips::D11, Mips::D10,
- Mips::RA, Mips::FP, Mips::S7, Mips::S6, Mips::S5, Mips::S4,
- Mips::S3, Mips::S2, Mips::S1, Mips::S0, 0
- };
-
- static const unsigned N32CalleeSavedRegs[] = {
- Mips::D31_64, Mips::D29_64, Mips::D27_64, Mips::D25_64, Mips::D23_64,
- Mips::D21_64,
- Mips::RA_64, Mips::FP_64, Mips::GP_64, Mips::S7_64, Mips::S6_64,
- Mips::S5_64, Mips::S4_64, Mips::S3_64, Mips::S2_64, Mips::S1_64,
- Mips::S0_64, 0
- };
+ if (Subtarget.isSingleFloat())
+ return CSR_SingleFloatOnly_SaveList;
+ else if (!Subtarget.hasMips64())
+ return CSR_O32_SaveList;
+ else if (Subtarget.isABI_N32())
+ return CSR_N32_SaveList;
- static const unsigned N64CalleeSavedRegs[] = {
- Mips::D31_64, Mips::D30_64, Mips::D29_64, Mips::D28_64, Mips::D27_64,
- Mips::D26_64, Mips::D25_64, Mips::D24_64,
- Mips::RA_64, Mips::FP_64, Mips::GP_64, Mips::S7_64, Mips::S6_64,
- Mips::S5_64, Mips::S4_64, Mips::S3_64, Mips::S2_64, Mips::S1_64,
- Mips::S0_64, 0
- };
+ assert(Subtarget.isABI_N64());
+ return CSR_N64_SaveList;
+}
+const uint32_t*
+MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const
+{
if (Subtarget.isSingleFloat())
- return SingleFloatOnlyCalleeSavedRegs;
+ return CSR_SingleFloatOnly_RegMask;
else if (!Subtarget.hasMips64())
- return Mips32CalleeSavedRegs;
+ return CSR_O32_RegMask;
else if (Subtarget.isABI_N32())
- return N32CalleeSavedRegs;
-
+ return CSR_N32_RegMask;
+
assert(Subtarget.isABI_N64());
- return N64CalleeSavedRegs;
+ return CSR_N64_RegMask;
}
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
- static const unsigned ReservedCPURegs[] = {
- Mips::ZERO, Mips::AT, Mips::K0, Mips::K1,
- Mips::GP, Mips::SP, Mips::FP, Mips::RA, 0
+ static const uint16_t ReservedCPURegs[] = {
+ Mips::ZERO, Mips::AT, Mips::K0, Mips::K1,
+ Mips::SP, Mips::FP, Mips::RA
};
- static const unsigned ReservedCPU64Regs[] = {
- Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64,
- Mips::GP_64, Mips::SP_64, Mips::FP_64, Mips::RA_64, 0
+ static const uint16_t ReservedCPU64Regs[] = {
+ Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64,
+ Mips::SP_64, Mips::FP_64, Mips::RA_64
};
BitVector Reserved(getNumRegs());
typedef TargetRegisterClass::iterator RegIter;
- for (const unsigned *Reg = ReservedCPURegs; *Reg; ++Reg)
- Reserved.set(*Reg);
+ for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
+ Reserved.set(ReservedCPURegs[I]);
if (Subtarget.hasMips64()) {
- for (const unsigned *Reg = ReservedCPU64Regs; *Reg; ++Reg)
- Reserved.set(*Reg);
+ for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I)
+ Reserved.set(ReservedCPU64Regs[I]);
// Reserve all registers in AFGR64.
for (RegIter Reg = Mips::AFGR64RegisterClass->begin();
@@ -224,10 +118,25 @@ getReservedRegs(const MachineFunction &MF) const {
Reg != Mips::FGR64RegisterClass->end(); ++Reg)
Reserved.set(*Reg);
}
-
+
+ // If GP is dedicated as a global base register, reserve it.
+ if (MF.getInfo<MipsFunctionInfo>()->globalBaseRegFixed()) {
+ Reserved.set(Mips::GP);
+ Reserved.set(Mips::GP_64);
+ }
+
+ // Reserve hardware registers.
+ Reserved.set(Mips::HWR29);
+ Reserved.set(Mips::HWR29_64);
+
return Reserved;
}
+bool
+MipsRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
+ return true;
+}
+
// This function eliminate ADJCALLSTACKDOWN,
// ADJCALLSTACKUP pseudo instructions
void MipsRegisterInfo::
@@ -259,8 +168,8 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
errs() << "<--------->\n" << MI);
int FrameIndex = MI.getOperand(i).getIndex();
- int stackSize = MF.getFrameInfo()->getStackSize();
- int spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
+ uint64_t stackSize = MF.getFrameInfo()->getStackSize();
+ int64_t spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
<< "spOffset : " << spOffset << "\n"
@@ -279,52 +188,71 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
// 1. Outgoing arguments.
// 2. Pointer to dynamically allocated stack space.
// 3. Locations for callee-saved registers.
- // Everything else is referenced relative to whatever register
+ // Everything else is referenced relative to whatever register
// getFrameRegister() returns.
unsigned FrameReg;
if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isDynAllocFI(FrameIndex) ||
(FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
- FrameReg = Mips::SP;
+ FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
else
- FrameReg = getFrameRegister(MF);
-
+ FrameReg = getFrameRegister(MF);
+
// Calculate final offset.
// - There is no need to change the offset if the frame object is one of the
// following: an outgoing argument, pointer to a dynamically allocated
// stack space or a $gp restore location,
// - If the frame object is any of the following, its offset must be adjusted
// by adding the size of the stack:
- // incoming argument, callee-saved register location or local variable.
- int Offset;
+ // incoming argument, callee-saved register location or local variable.
+ int64_t Offset;
if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isGPFI(FrameIndex) ||
MipsFI->isDynAllocFI(FrameIndex))
Offset = spOffset;
else
- Offset = spOffset + stackSize;
+ Offset = spOffset + (int64_t)stackSize;
Offset += MI.getOperand(i+1).getImm();
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
// If MI is not a debug value, make sure Offset fits in the 16-bit immediate
- // field.
- if (!MI.isDebugValue() && (Offset >= 0x8000 || Offset < -0x8000)) {
+ // field.
+ if (!MI.isDebugValue() && !isInt<16>(Offset)) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = II->getDebugLoc();
- int ImmHi = (((unsigned)Offset & 0xffff0000) >> 16) +
- ((Offset & 0x8000) != 0);
-
- // FIXME: change this when mips goes MC".
- BuildMI(MBB, II, DL, TII.get(Mips::NOAT));
- BuildMI(MBB, II, DL, TII.get(Mips::LUi), Mips::AT).addImm(ImmHi);
- BuildMI(MBB, II, DL, TII.get(Mips::ADDu), Mips::AT).addReg(FrameReg)
- .addReg(Mips::AT);
- FrameReg = Mips::AT;
- Offset = (short)(Offset & 0xffff);
-
- BuildMI(MBB, ++II, MI.getDebugLoc(), TII.get(Mips::ATMACRO));
+ MipsAnalyzeImmediate AnalyzeImm;
+ unsigned Size = Subtarget.isABI_N64() ? 64 : 32;
+ unsigned LUi = Subtarget.isABI_N64() ? Mips::LUi64 : Mips::LUi;
+ unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned ZEROReg = Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
+ unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT;
+ const MipsAnalyzeImmediate::InstSeq &Seq =
+ AnalyzeImm.Analyze(Offset, Size, true /* LastInstrIsADDiu */);
+ MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
+
+ MipsFI->setEmitNOAT();
+
+ // The first instruction can be a LUi, which is different from other
+ // instructions (ADDiu, ORI and SLL) in that it does not have a register
+ // operand.
+ if (Inst->Opc == LUi)
+ BuildMI(MBB, II, DL, TII.get(LUi), ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+ else
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ // Build the remaining instructions in Seq except for the last one.
+ for (++Inst; Inst != Seq.end() - 1; ++Inst)
+ BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
+ .addImm(SignExtend64<16>(Inst->ImmOpnd));
+
+ BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg);
+
+ FrameReg = ATReg;
+ Offset = SignExtend64<16>(Inst->ImmOpnd);
}
MI.getOperand(i).ChangeToRegister(FrameReg, false);
@@ -334,18 +262,18 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
unsigned MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ bool IsN64 = Subtarget.isABI_N64();
- return TFI->hasFP(MF) ? Mips::FP : Mips::SP;
+ return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
+ (IsN64 ? Mips::SP_64 : Mips::SP);
}
unsigned MipsRegisterInfo::
getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
- return 0;
}
unsigned MipsRegisterInfo::
getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
- return 0;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index 67e57dd..0716d29 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- MipsRegisterInfo.h - Mips Register Information Impl ------*- C++ -*-===//
+//===-- MipsRegisterInfo.h - Mips Register Information Impl -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -42,10 +42,13 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void adjustMipsStackFrame(MachineFunction &MF) const;
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
+ virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
+
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
index 925ad9e..ce399a0 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td
@@ -1,4 +1,4 @@
-//===- MipsRegisterInfo.td - Mips Register defs ------------*- tablegen -*-===//
+//===-- MipsRegisterInfo.td - Mips Register defs -----------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -50,6 +50,7 @@ class AFPR<bits<5> num, string n, list<Register> subregs>
: MipsRegWithSubRegs<n, subregs> {
let Num = num;
let SubRegIndices = [sub_fpeven, sub_fpodd];
+ let CoveredBySubRegs = 1;
}
class AFPR64<bits<5> num, string n, list<Register> subregs>
@@ -68,8 +69,6 @@ class HWR<bits<5> num, string n> : MipsReg<n> {
//===----------------------------------------------------------------------===//
let Namespace = "Mips" in {
- // FIXME: Fix DwarfRegNum.
-
// General Purpose Registers
def ZERO : MipsGPRReg< 0, "ZERO">, DwarfRegNum<[0]>;
def AT : MipsGPRReg< 1, "AT">, DwarfRegNum<[1]>;
@@ -105,38 +104,38 @@ let Namespace = "Mips" in {
def RA : MipsGPRReg< 31, "RA">, DwarfRegNum<[31]>;
// General Purpose 64-bit Registers
- def ZERO_64 : Mips64GPRReg< 0, "ZERO", [ZERO]>;
- def AT_64 : Mips64GPRReg< 1, "AT", [AT]>;
- def V0_64 : Mips64GPRReg< 2, "2", [V0]>;
- def V1_64 : Mips64GPRReg< 3, "3", [V1]>;
- def A0_64 : Mips64GPRReg< 4, "4", [A0]>;
- def A1_64 : Mips64GPRReg< 5, "5", [A1]>;
- def A2_64 : Mips64GPRReg< 6, "6", [A2]>;
- def A3_64 : Mips64GPRReg< 7, "7", [A3]>;
- def T0_64 : Mips64GPRReg< 8, "8", [T0]>;
- def T1_64 : Mips64GPRReg< 9, "9", [T1]>;
- def T2_64 : Mips64GPRReg< 10, "10", [T2]>;
- def T3_64 : Mips64GPRReg< 11, "11", [T3]>;
- def T4_64 : Mips64GPRReg< 12, "12", [T4]>;
- def T5_64 : Mips64GPRReg< 13, "13", [T5]>;
- def T6_64 : Mips64GPRReg< 14, "14", [T6]>;
- def T7_64 : Mips64GPRReg< 15, "15", [T7]>;
- def S0_64 : Mips64GPRReg< 16, "16", [S0]>;
- def S1_64 : Mips64GPRReg< 17, "17", [S1]>;
- def S2_64 : Mips64GPRReg< 18, "18", [S2]>;
- def S3_64 : Mips64GPRReg< 19, "19", [S3]>;
- def S4_64 : Mips64GPRReg< 20, "20", [S4]>;
- def S5_64 : Mips64GPRReg< 21, "21", [S5]>;
- def S6_64 : Mips64GPRReg< 22, "22", [S6]>;
- def S7_64 : Mips64GPRReg< 23, "23", [S7]>;
- def T8_64 : Mips64GPRReg< 24, "24", [T8]>;
- def T9_64 : Mips64GPRReg< 25, "25", [T9]>;
- def K0_64 : Mips64GPRReg< 26, "26", [K0]>;
- def K1_64 : Mips64GPRReg< 27, "27", [K1]>;
- def GP_64 : Mips64GPRReg< 28, "GP", [GP]>;
- def SP_64 : Mips64GPRReg< 29, "SP", [SP]>;
- def FP_64 : Mips64GPRReg< 30, "FP", [FP]>;
- def RA_64 : Mips64GPRReg< 31, "RA", [RA]>;
+ def ZERO_64 : Mips64GPRReg< 0, "ZERO", [ZERO]>, DwarfRegNum<[0]>;
+ def AT_64 : Mips64GPRReg< 1, "AT", [AT]>, DwarfRegNum<[1]>;
+ def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>;
+ def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>;
+ def A0_64 : Mips64GPRReg< 4, "4", [A0]>, DwarfRegNum<[4]>;
+ def A1_64 : Mips64GPRReg< 5, "5", [A1]>, DwarfRegNum<[5]>;
+ def A2_64 : Mips64GPRReg< 6, "6", [A2]>, DwarfRegNum<[6]>;
+ def A3_64 : Mips64GPRReg< 7, "7", [A3]>, DwarfRegNum<[7]>;
+ def T0_64 : Mips64GPRReg< 8, "8", [T0]>, DwarfRegNum<[8]>;
+ def T1_64 : Mips64GPRReg< 9, "9", [T1]>, DwarfRegNum<[9]>;
+ def T2_64 : Mips64GPRReg< 10, "10", [T2]>, DwarfRegNum<[10]>;
+ def T3_64 : Mips64GPRReg< 11, "11", [T3]>, DwarfRegNum<[11]>;
+ def T4_64 : Mips64GPRReg< 12, "12", [T4]>, DwarfRegNum<[12]>;
+ def T5_64 : Mips64GPRReg< 13, "13", [T5]>, DwarfRegNum<[13]>;
+ def T6_64 : Mips64GPRReg< 14, "14", [T6]>, DwarfRegNum<[14]>;
+ def T7_64 : Mips64GPRReg< 15, "15", [T7]>, DwarfRegNum<[15]>;
+ def S0_64 : Mips64GPRReg< 16, "16", [S0]>, DwarfRegNum<[16]>;
+ def S1_64 : Mips64GPRReg< 17, "17", [S1]>, DwarfRegNum<[17]>;
+ def S2_64 : Mips64GPRReg< 18, "18", [S2]>, DwarfRegNum<[18]>;
+ def S3_64 : Mips64GPRReg< 19, "19", [S3]>, DwarfRegNum<[19]>;
+ def S4_64 : Mips64GPRReg< 20, "20", [S4]>, DwarfRegNum<[20]>;
+ def S5_64 : Mips64GPRReg< 21, "21", [S5]>, DwarfRegNum<[21]>;
+ def S6_64 : Mips64GPRReg< 22, "22", [S6]>, DwarfRegNum<[22]>;
+ def S7_64 : Mips64GPRReg< 23, "23", [S7]>, DwarfRegNum<[23]>;
+ def T8_64 : Mips64GPRReg< 24, "24", [T8]>, DwarfRegNum<[24]>;
+ def T9_64 : Mips64GPRReg< 25, "25", [T9]>, DwarfRegNum<[25]>;
+ def K0_64 : Mips64GPRReg< 26, "26", [K0]>, DwarfRegNum<[26]>;
+ def K1_64 : Mips64GPRReg< 27, "27", [K1]>, DwarfRegNum<[27]>;
+ def GP_64 : Mips64GPRReg< 28, "GP", [GP]>, DwarfRegNum<[28]>;
+ def SP_64 : Mips64GPRReg< 29, "SP", [SP]>, DwarfRegNum<[29]>;
+ def FP_64 : Mips64GPRReg< 30, "FP", [FP]>, DwarfRegNum<[30]>;
+ def RA_64 : Mips64GPRReg< 31, "RA", [RA]>, DwarfRegNum<[31]>;
/// Mips Single point precision FPU Registers
def F0 : FPR< 0, "F0">, DwarfRegNum<[32]>;
@@ -192,38 +191,38 @@ let Namespace = "Mips" in {
def D15 : AFPR<30, "F30", [F30, F31]>;
/// Mips Double point precision FPU Registers in MFP64 mode.
- def D0_64 : AFPR64<0, "F0", [F0]>;
- def D1_64 : AFPR64<1, "F1", [F1]>;
- def D2_64 : AFPR64<2, "F2", [F2]>;
- def D3_64 : AFPR64<3, "F3", [F3]>;
- def D4_64 : AFPR64<4, "F4", [F4]>;
- def D5_64 : AFPR64<5, "F5", [F5]>;
- def D6_64 : AFPR64<6, "F6", [F6]>;
- def D7_64 : AFPR64<7, "F7", [F7]>;
- def D8_64 : AFPR64<8, "F8", [F8]>;
- def D9_64 : AFPR64<9, "F9", [F9]>;
- def D10_64 : AFPR64<10, "F10", [F10]>;
- def D11_64 : AFPR64<11, "F11", [F11]>;
- def D12_64 : AFPR64<12, "F12", [F12]>;
- def D13_64 : AFPR64<13, "F13", [F13]>;
- def D14_64 : AFPR64<14, "F14", [F14]>;
- def D15_64 : AFPR64<15, "F15", [F15]>;
- def D16_64 : AFPR64<16, "F16", [F16]>;
- def D17_64 : AFPR64<17, "F17", [F17]>;
- def D18_64 : AFPR64<18, "F18", [F18]>;
- def D19_64 : AFPR64<19, "F19", [F19]>;
- def D20_64 : AFPR64<20, "F20", [F20]>;
- def D21_64 : AFPR64<21, "F21", [F21]>;
- def D22_64 : AFPR64<22, "F22", [F22]>;
- def D23_64 : AFPR64<23, "F23", [F23]>;
- def D24_64 : AFPR64<24, "F24", [F24]>;
- def D25_64 : AFPR64<25, "F25", [F25]>;
- def D26_64 : AFPR64<26, "F26", [F26]>;
- def D27_64 : AFPR64<27, "F27", [F27]>;
- def D28_64 : AFPR64<28, "F28", [F28]>;
- def D29_64 : AFPR64<29, "F29", [F29]>;
- def D30_64 : AFPR64<30, "F30", [F30]>;
- def D31_64 : AFPR64<31, "F31", [F31]>;
+ def D0_64 : AFPR64<0, "F0", [F0]>, DwarfRegNum<[32]>;
+ def D1_64 : AFPR64<1, "F1", [F1]>, DwarfRegNum<[33]>;
+ def D2_64 : AFPR64<2, "F2", [F2]>, DwarfRegNum<[34]>;
+ def D3_64 : AFPR64<3, "F3", [F3]>, DwarfRegNum<[35]>;
+ def D4_64 : AFPR64<4, "F4", [F4]>, DwarfRegNum<[36]>;
+ def D5_64 : AFPR64<5, "F5", [F5]>, DwarfRegNum<[37]>;
+ def D6_64 : AFPR64<6, "F6", [F6]>, DwarfRegNum<[38]>;
+ def D7_64 : AFPR64<7, "F7", [F7]>, DwarfRegNum<[39]>;
+ def D8_64 : AFPR64<8, "F8", [F8]>, DwarfRegNum<[40]>;
+ def D9_64 : AFPR64<9, "F9", [F9]>, DwarfRegNum<[41]>;
+ def D10_64 : AFPR64<10, "F10", [F10]>, DwarfRegNum<[42]>;
+ def D11_64 : AFPR64<11, "F11", [F11]>, DwarfRegNum<[43]>;
+ def D12_64 : AFPR64<12, "F12", [F12]>, DwarfRegNum<[44]>;
+ def D13_64 : AFPR64<13, "F13", [F13]>, DwarfRegNum<[45]>;
+ def D14_64 : AFPR64<14, "F14", [F14]>, DwarfRegNum<[46]>;
+ def D15_64 : AFPR64<15, "F15", [F15]>, DwarfRegNum<[47]>;
+ def D16_64 : AFPR64<16, "F16", [F16]>, DwarfRegNum<[48]>;
+ def D17_64 : AFPR64<17, "F17", [F17]>, DwarfRegNum<[49]>;
+ def D18_64 : AFPR64<18, "F18", [F18]>, DwarfRegNum<[50]>;
+ def D19_64 : AFPR64<19, "F19", [F19]>, DwarfRegNum<[51]>;
+ def D20_64 : AFPR64<20, "F20", [F20]>, DwarfRegNum<[52]>;
+ def D21_64 : AFPR64<21, "F21", [F21]>, DwarfRegNum<[53]>;
+ def D22_64 : AFPR64<22, "F22", [F22]>, DwarfRegNum<[54]>;
+ def D23_64 : AFPR64<23, "F23", [F23]>, DwarfRegNum<[55]>;
+ def D24_64 : AFPR64<24, "F24", [F24]>, DwarfRegNum<[56]>;
+ def D25_64 : AFPR64<25, "F25", [F25]>, DwarfRegNum<[57]>;
+ def D26_64 : AFPR64<26, "F26", [F26]>, DwarfRegNum<[58]>;
+ def D27_64 : AFPR64<27, "F27", [F27]>, DwarfRegNum<[59]>;
+ def D28_64 : AFPR64<28, "F28", [F28]>, DwarfRegNum<[60]>;
+ def D29_64 : AFPR64<29, "F29", [F29]>, DwarfRegNum<[61]>;
+ def D30_64 : AFPR64<30, "F30", [F30]>, DwarfRegNum<[62]>;
+ def D31_64 : AFPR64<31, "F31", [F31]>, DwarfRegNum<[63]>;
// Hi/Lo registers
def HI : Register<"hi">, DwarfRegNum<[64]>;
@@ -239,6 +238,7 @@ let Namespace = "Mips" in {
// Hardware register $29
def HWR29 : Register<"29">;
+ def HWR29_64 : Register<"29">;
}
//===----------------------------------------------------------------------===//
@@ -301,3 +301,5 @@ def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)> {
// Hardware registers
def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>;
+def HWRegs64 : RegisterClass<"Mips", [i64], 32, (add HWR29_64)>;
+
diff --git a/contrib/llvm/lib/Target/Mips/MipsRelocations.h b/contrib/llvm/lib/Target/Mips/MipsRelocations.h
index 66d1bfd..0787ed3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRelocations.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRelocations.h
@@ -1,16 +1,16 @@
-//===- MipsRelocations.h - Mips Code Relocations ---------------*- C++ -*-===//
+//===-- MipsRelocations.h - Mips Code Relocations ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
-//===---------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
//
// This file defines the Mips target-specific relocation types
// (for relocation-model=static).
//
-//===---------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
#ifndef MIPSRELOCATIONS_H_
#define MIPSRELOCATIONS_H_
@@ -20,10 +20,10 @@
namespace llvm {
namespace Mips{
enum RelocationType {
- // reloc_mips_branch - pc relative relocation for branches. The lower 18
+ // reloc_mips_pc16 - pc relative relocation for branches. The lower 18
// bits of the difference between the branch target and the branch
// instruction, shifted right by 2.
- reloc_mips_branch = 1,
+ reloc_mips_pc16 = 1,
// reloc_mips_hi - upper 16 bits of the address (modified by +1 if the
// lower 16 bits of the address is negative).
diff --git a/contrib/llvm/lib/Target/Mips/MipsSchedule.td b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
index 00be8ee..1add02f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSchedule.td
+++ b/contrib/llvm/lib/Target/Mips/MipsSchedule.td
@@ -1,4 +1,4 @@
-//===- MipsSchedule.td - Mips Scheduling Definitions -------*- tablegen -*-===//
+//===-- MipsSchedule.td - Mips Scheduling Definitions ------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
index 016d449..00347df 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- MipsSubtarget.cpp - Mips Subtarget Information -----------*- C++ -*-===//
+//===-- MipsSubtarget.cpp - Mips Subtarget Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,7 @@
#include "MipsSubtarget.h"
#include "Mips.h"
+#include "MipsRegisterInfo.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_SUBTARGETINFO_TARGET_DESC
@@ -21,17 +22,19 @@
using namespace llvm;
+void MipsSubtarget::anchor() { }
+
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool little) :
MipsGenSubtargetInfo(TT, CPU, FS),
- MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
+ MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false),
HasMinMax(false), HasSwap(false), HasBitCount(false)
{
std::string CPUName = CPU;
if (CPUName.empty())
- CPUName = "mips32r1";
+ CPUName = "mips32";
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
@@ -41,7 +44,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
// Set MipsABI if it hasn't been set yet.
if (MipsABI == UnknownABI)
- MipsABI = hasMips64() ? N64 : O32;
+ MipsABI = hasMips64() ? N64 : O32;
// Check if Architecture and ABI are compatible.
assert(((!hasMips64() && (isABI_O32() || isABI_EABI())) ||
@@ -52,3 +55,14 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
if (TT.find("linux") == std::string::npos)
IsLinux = false;
}
+
+bool
+MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const {
+ Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ CriticalPathRCs.clear();
+ CriticalPathRCs.push_back(hasMips64() ?
+ &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass);
+ return OptLevel >= CodeGenOpt::Aggressive;
+}
diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
index d9dddad..7faf77b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
+++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h
@@ -1,4 +1,4 @@
-//=====-- MipsSubtarget.h - Define Subtarget for the Mips -----*- C++ -*--====//
+//===-- MipsSubtarget.h - Define Subtarget for the Mips ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,7 @@ namespace llvm {
class StringRef;
class MipsSubtarget : public MipsGenSubtargetInfo {
+ virtual void anchor();
public:
// NOTE: O64 will not be supported.
@@ -88,6 +89,9 @@ protected:
InstrItineraryData InstrItins;
public:
+ virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
+ AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const;
/// Only O32 and EABI supported right now.
bool isABI_EABI() const { return MipsABI == EABI; }
@@ -111,6 +115,8 @@ public:
bool hasMips64() const { return MipsArchVersion >= Mips64; }
bool hasMips64r2() const { return MipsArchVersion == Mips64r2; }
+ bool hasMips32r2Or64() const { return hasMips32r2() || hasMips64(); }
+
bool isLittle() const { return IsLittle; }
bool isFP64bit() const { return IsFP64bit; }
bool isGP64bit() const { return IsGP64bit; }
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
index 6480da3..ad02231 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Mips.h"
#include "MipsTargetMachine.h"
+#include "Mips.h"
#include "llvm/PassManager.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -34,86 +35,119 @@ extern "C" void LLVMInitializeMipsTarget() {
// Using CodeModel::Large enables different CALL behavior.
MipsTargetMachine::
MipsTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- bool isLittle):
- LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- Subtarget(TT, CPU, FS, isLittle),
- DataLayout(isLittle ?
- (Subtarget.isABI_N64() ?
- "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
- "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") :
- (Subtarget.isABI_N64() ?
- "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
- "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")),
- InstrInfo(*this),
- FrameLowering(Subtarget),
- TLInfo(*this), TSInfo(*this), JITInfo() {
+ CodeGenOpt::Level OL,
+ bool isLittle)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, isLittle),
+ DataLayout(isLittle ?
+ (Subtarget.isABI_N64() ?
+ "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
+ "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") :
+ (Subtarget.isABI_N64() ?
+ "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
+ "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")),
+ InstrInfo(*this),
+ FrameLowering(Subtarget),
+ TLInfo(*this), TSInfo(*this), JITInfo() {
}
+void MipsebTargetMachine::anchor() { }
+
MipsebTargetMachine::
MipsebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM) :
- MipsTargetMachine(T, TT, CPU, FS, RM, CM, false) {}
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+
+void MipselTargetMachine::anchor() { }
MipselTargetMachine::
MipselTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM) :
- MipsTargetMachine(T, TT, CPU, FS, RM, CM, true) {}
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+
+void Mips64ebTargetMachine::anchor() { }
Mips64ebTargetMachine::
Mips64ebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM) :
- MipsTargetMachine(T, TT, CPU, FS, RM, CM, false) {}
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+
+void Mips64elTargetMachine::anchor() { }
Mips64elTargetMachine::
Mips64elTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM) :
- MipsTargetMachine(T, TT, CPU, FS, RM, CM, true) {}
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+
+namespace {
+/// Mips Code Generator Pass Configuration Options.
+class MipsPassConfig : public TargetPassConfig {
+public:
+ MipsPassConfig(MipsTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ MipsTargetMachine &getMipsTargetMachine() const {
+ return getTM<MipsTargetMachine>();
+ }
+
+ const MipsSubtarget &getMipsSubtarget() const {
+ return *getMipsTargetMachine().getSubtargetImpl();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreRegAlloc();
+ virtual bool addPreSched2();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *MipsTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new MipsPassConfig(this, PM);
+}
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
-bool MipsTargetMachine::
-addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
+bool MipsPassConfig::addInstSelector()
{
- PM.add(createMipsISelDag(*this));
+ PM.add(createMipsISelDag(getMipsTargetMachine()));
return false;
}
// Implemented by targets that want to run passes immediately before
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
-bool MipsTargetMachine::
-addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel)
+bool MipsPassConfig::addPreEmitPass()
{
- PM.add(createMipsDelaySlotFillerPass(*this));
+ PM.add(createMipsDelaySlotFillerPass(getMipsTargetMachine()));
return true;
}
-bool MipsTargetMachine::
-addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
+bool MipsPassConfig::addPreRegAlloc() {
// Do not restore $gp if target is Mips64.
// In N32/64, $gp is a callee-saved register.
- if (!Subtarget.hasMips64())
- PM.add(createMipsEmitGPRestorePass(*this));
+ if (!getMipsSubtarget().hasMips64())
+ PM.add(createMipsEmitGPRestorePass(getMipsTargetMachine()));
return true;
}
-bool MipsTargetMachine::
-addPostRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel) {
- PM.add(createMipsExpandPseudoPass(*this));
+bool MipsPassConfig::addPreSched2() {
+ PM.add(createMipsExpandPseudoPass(getMipsTargetMachine()));
return true;
}
bool MipsTargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
- JITCodeEmitter &JCE) {
+ JITCodeEmitter &JCE) {
// Machine code emitter pass for Mips.
PM.add(createMipsJITCodeEmitterPass(*this, JCE));
return false;
}
-
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
index 118ed10..80c00e8 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h
@@ -1,4 +1,4 @@
-//===-- MipsTargetMachine.h - Define TargetMachine for Mips -00--*- C++ -*-===//
+//===-- MipsTargetMachine.h - Define TargetMachine for Mips -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,15 +14,15 @@
#ifndef MIPSTARGETMACHINE_H
#define MIPSTARGETMACHINE_H
-#include "MipsSubtarget.h"
+#include "MipsFrameLowering.h"
#include "MipsInstrInfo.h"
#include "MipsISelLowering.h"
-#include "MipsFrameLowering.h"
+#include "MipsJITInfo.h"
#include "MipsSelectionDAGInfo.h"
+#include "MipsSubtarget.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameLowering.h"
-#include "MipsJITInfo.h"
namespace llvm {
class formatted_raw_ostream;
@@ -38,8 +38,9 @@ namespace llvm {
public:
MipsTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool isLittle);
virtual const MipsInstrInfo *getInstrInfo() const
@@ -67,15 +68,8 @@ namespace llvm {
}
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPreRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPostRegAlloc(PassManagerBase &, CodeGenOpt::Level);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
virtual bool addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE);
};
@@ -83,37 +77,47 @@ namespace llvm {
/// MipsebTargetMachine - Mips32 big endian target machine.
///
class MipsebTargetMachine : public MipsTargetMachine {
+ virtual void anchor();
public:
MipsebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// MipselTargetMachine - Mips32 little endian target machine.
///
class MipselTargetMachine : public MipsTargetMachine {
+ virtual void anchor();
public:
MipselTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// Mips64ebTargetMachine - Mips64 big endian target machine.
///
class Mips64ebTargetMachine : public MipsTargetMachine {
+ virtual void anchor();
public:
Mips64ebTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// Mips64elTargetMachine - Mips64 little endian target machine.
///
class Mips64elTargetMachine : public MipsTargetMachine {
+ virtual void anchor();
public:
Mips64elTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
index 05c46f5..04dc60a 100644
--- a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp
@@ -1,4 +1,4 @@
-//===-- MipsTargetObjectFile.cpp - Mips object files ----------------------===//
+//===-- MipsTargetObjectFile.cpp - Mips Object Files ----------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
index aabb404..1830213 100644
--- a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.cpp
@@ -18,27 +18,69 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-#define GET_INSTRUCTION_NAME
#include "PTXGenAsmWriter.inc"
PTXInstPrinter::PTXInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) :
- MCInstPrinter(MAI) {
+ MCInstPrinter(MAI, MII, MRI) {
// Initialize the set of available features.
setAvailableFeatures(STI.getFeatureBits());
}
-StringRef PTXInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
void PTXInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
- OS << getRegisterName(RegNo);
+ // Decode the register number into type and offset
+ unsigned RegSpace = RegNo & 0x7;
+ unsigned RegType = (RegNo >> 3) & 0x7;
+ unsigned RegOffset = RegNo >> 6;
+
+ // Print the register
+ OS << "%";
+
+ switch (RegSpace) {
+ default:
+ llvm_unreachable("Unknown register space!");
+ case PTXRegisterSpace::Reg:
+ switch (RegType) {
+ default:
+ llvm_unreachable("Unknown register type!");
+ case PTXRegisterType::Pred:
+ OS << "p";
+ break;
+ case PTXRegisterType::B16:
+ OS << "rh";
+ break;
+ case PTXRegisterType::B32:
+ OS << "r";
+ break;
+ case PTXRegisterType::B64:
+ OS << "rd";
+ break;
+ case PTXRegisterType::F32:
+ OS << "f";
+ break;
+ case PTXRegisterType::F64:
+ OS << "fd";
+ break;
+ }
+ break;
+ case PTXRegisterSpace::Return:
+ OS << "ret";
+ break;
+ case PTXRegisterSpace::Argument:
+ OS << "arg";
+ break;
+ }
+
+ OS << RegOffset;
}
void PTXInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
@@ -96,9 +138,23 @@ void PTXInstPrinter::printCall(const MCInst *MI, raw_ostream &O) {
O << "), ";
}
- O << *(MI->getOperand(Index++).getExpr()) << ", (";
-
+ const MCExpr* Expr = MI->getOperand(Index++).getExpr();
unsigned NumArgs = MI->getOperand(Index++).getImm();
+
+ // if the function call is to printf or puts, change to vprintf
+ if (const MCSymbolRefExpr *SymRefExpr = dyn_cast<MCSymbolRefExpr>(Expr)) {
+ const MCSymbol &Sym = SymRefExpr->getSymbol();
+ if (Sym.getName() == "printf" || Sym.getName() == "puts") {
+ O << "vprintf";
+ } else {
+ O << Sym.getName();
+ }
+ } else {
+ O << *Expr;
+ }
+
+ O << ", (";
+
if (NumArgs > 0) {
printOperand(MI, Index++, O);
for (unsigned i = 1; i < NumArgs; ++i) {
@@ -125,6 +181,8 @@ void PTXInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} else {
O << "0000000000000000";
}
+ } else if (Op.isReg()) {
+ printRegName(O, Op.getReg());
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
const MCExpr *Expr = Op.getExpr();
@@ -156,7 +214,6 @@ void PTXInstPrinter::printRoundingMode(const MCInst *MI, unsigned OpNo,
llvm_unreachable("Unknown rounding mode!");
case PTXRoundingMode::RndDefault:
llvm_unreachable("FP rounding-mode pass did not handle instruction!");
- break;
case PTXRoundingMode::RndNone:
// Do not print anything.
break;
diff --git a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h
index 86dfd48..ea4d504 100644
--- a/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h
+++ b/contrib/llvm/lib/Target/PTX/InstPrinter/PTXInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- PTXInstPrinter.h - Convert PTX MCInst to assembly syntax ----------===//
+//===- PTXInstPrinter.h - Convert PTX MCInst to assembly syntax -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,14 +23,12 @@ class MCOperand;
class PTXInstPrinter : public MCInstPrinter {
public:
- PTXInstPrinter(const MCAsmInfo &MAI, const MCSubtargetInfo &STI);
+ PTXInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, const MCSubtargetInfo &STI);
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
- static const char *getInstructionName(unsigned Opcode);
-
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h
index c6094be..a3e0f32 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h
+++ b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXBaseInfo.h
@@ -18,6 +18,8 @@
#define PTXBASEINFO_H
#include "PTXMCTargetDesc.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
namespace PTXStateSpace {
@@ -57,6 +59,75 @@ namespace llvm {
RndPosInfInt = 10 // .rpi
};
} // namespace PTXII
+
+ namespace PTXRegisterType {
+ // Register type encoded in MCOperands
+ enum {
+ Pred = 0,
+ B16,
+ B32,
+ B64,
+ F32,
+ F64
+ };
+ } // namespace PTXRegisterType
+
+ namespace PTXRegisterSpace {
+ // Register space encoded in MCOperands
+ enum {
+ Reg = 0,
+ Local,
+ Param,
+ Argument,
+ Return
+ };
+ }
+
+ inline static void decodeRegisterName(raw_ostream &OS,
+ unsigned EncodedReg) {
+ OS << "%";
+
+ unsigned RegSpace = EncodedReg & 0x7;
+ unsigned RegType = (EncodedReg >> 3) & 0x7;
+ unsigned RegOffset = EncodedReg >> 6;
+
+ switch (RegSpace) {
+ default:
+ llvm_unreachable("Unknown register space!");
+ case PTXRegisterSpace::Reg:
+ switch (RegType) {
+ default:
+ llvm_unreachable("Unknown register type!");
+ case PTXRegisterType::Pred:
+ OS << "p";
+ break;
+ case PTXRegisterType::B16:
+ OS << "rh";
+ break;
+ case PTXRegisterType::B32:
+ OS << "r";
+ break;
+ case PTXRegisterType::B64:
+ OS << "rd";
+ break;
+ case PTXRegisterType::F32:
+ OS << "f";
+ break;
+ case PTXRegisterType::F64:
+ OS << "fd";
+ break;
+ }
+ break;
+ case PTXRegisterSpace::Return:
+ OS << "ret";
+ break;
+ case PTXRegisterSpace::Argument:
+ OS << "arg";
+ break;
+ }
+
+ OS << RegOffset;
+ }
} // namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp
index efefead..cdfbc80 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.cpp
@@ -16,6 +16,8 @@
using namespace llvm;
+void PTXMCAsmInfo::anchor() { }
+
PTXMCAsmInfo::PTXMCAsmInfo(const Target &T, const StringRef &TT) {
Triple TheTriple(TT);
if (TheTriple.getArch() == Triple::ptx64)
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h
index 03f5d66..32ca069 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- PTXMCAsmInfo.h - PTX asm properties -----------------*- C++ -*--====//
+//===-- PTXMCAsmInfo.h - PTX asm properties --------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,7 +20,9 @@ namespace llvm {
class Target;
class StringRef;
- struct PTXMCAsmInfo : public MCAsmInfo {
+ class PTXMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit PTXMCAsmInfo(const Target &T, const StringRef &TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp
index a5af3b8..08fb970 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- PTXMCTargetDesc.cpp - PTX Target Descriptions -----------*- C++ -*-===//
+//===-- PTXMCTargetDesc.cpp - PTX Target Descriptions ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -52,18 +52,21 @@ static MCSubtargetInfo *createPTXMCSubtargetInfo(StringRef TT, StringRef CPU,
}
static MCCodeGenInfo *createPTXMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
static MCInstPrinter *createPTXMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
assert(SyntaxVariant == 0 && "We only have one syntax variant");
- return new PTXInstPrinter(MAI, STI);
+ return new PTXInstPrinter(MAI, MII, MRI, STI);
}
extern "C" void LLVMInitializePTXTargetMC() {
diff --git a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h
index 1003b0b..542638a 100644
--- a/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/PTX/MCTargetDesc/PTXMCTargetDesc.h
@@ -15,9 +15,7 @@
#define PTXMCTARGETDESC_H
namespace llvm {
-class MCSubtargetInfo;
class Target;
-class StringRef;
extern Target ThePTX32Target;
extern Target ThePTX64Target;
diff --git a/contrib/llvm/lib/Target/PTX/PTX.h b/contrib/llvm/lib/Target/PTX/PTX.h
index 7d46cce..ffb92cb 100644
--- a/contrib/llvm/lib/Target/PTX/PTX.h
+++ b/contrib/llvm/lib/Target/PTX/PTX.h
@@ -1,4 +1,3 @@
-//===-- PTX.h - Top-level interface for PTX representation ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTX.td b/contrib/llvm/lib/Target/PTX/PTX.td
index 693bb9c..994a68e 100644
--- a/contrib/llvm/lib/Target/PTX/PTX.td
+++ b/contrib/llvm/lib/Target/PTX/PTX.td
@@ -1,4 +1,4 @@
-//===- PTX.td - Describe the PTX Target Machine ---------------*- tblgen -*-==//
+//===-- PTX.td - Describe the PTX Target Machine -----------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
index 733744b..0b6ac7b 100644
--- a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.cpp
@@ -14,8 +14,8 @@
#define DEBUG_TYPE "ptx-asm-printer"
-#include "PTX.h"
#include "PTXAsmPrinter.h"
+#include "PTX.h"
#include "PTXMachineFunctionInfo.h"
#include "PTXParamManager.h"
#include "PTXRegisterInfo.h"
@@ -25,7 +25,6 @@
#include "llvm/Function.h"
#include "llvm/Module.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/AsmPrinter.h"
@@ -52,23 +51,23 @@ using namespace llvm;
static const char PARAM_PREFIX[] = "__param_";
static const char RETURN_PREFIX[] = "__ret_";
-static const char *getRegisterTypeName(unsigned RegNo,
- const MachineRegisterInfo& MRI) {
- const TargetRegisterClass *TRC = MRI.getRegClass(RegNo);
-
-#define TEST_REGCLS(cls, clsstr) \
- if (PTX::cls ## RegisterClass == TRC) return # clsstr;
-
- TEST_REGCLS(RegPred, pred);
- TEST_REGCLS(RegI16, b16);
- TEST_REGCLS(RegI32, b32);
- TEST_REGCLS(RegI64, b64);
- TEST_REGCLS(RegF32, b32);
- TEST_REGCLS(RegF64, b64);
-#undef TEST_REGCLS
-
- llvm_unreachable("Not in any register class!");
- return NULL;
+static const char *getRegisterTypeName(unsigned RegType) {
+ switch (RegType) {
+ default:
+ llvm_unreachable("Unknown register type");
+ case PTXRegisterType::Pred:
+ return ".pred";
+ case PTXRegisterType::B16:
+ return ".b16";
+ case PTXRegisterType::B32:
+ return ".b32";
+ case PTXRegisterType::B64:
+ return ".b64";
+ case PTXRegisterType::F32:
+ return ".f32";
+ case PTXRegisterType::F64:
+ return ".f64";
+ }
}
static const char *getStateSpaceName(unsigned addressSpace) {
@@ -80,7 +79,6 @@ static const char *getStateSpaceName(unsigned addressSpace) {
case PTXStateSpace::Parameter: return "param";
case PTXStateSpace::Shared: return "shared";
}
- return NULL;
}
static const char *getTypeName(Type* type) {
@@ -139,15 +137,15 @@ void PTXAsmPrinter::EmitStartOfAsmFile(Module &M)
const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
// Emit the PTX .version and .target attributes
- OutStreamer.EmitRawText(Twine("\t.version " + ST.getPTXVersionString()));
- OutStreamer.EmitRawText(Twine("\t.target " + ST.getTargetString() +
+ OutStreamer.EmitRawText(Twine("\t.version ") + ST.getPTXVersionString());
+ OutStreamer.EmitRawText(Twine("\t.target ") + ST.getTargetString() +
(ST.supportsDouble() ? ""
- : ", map_f64_to_f32")));
+ : ", map_f64_to_f32"));
// .address_size directive is optional, but it must immediately follow
// the .target directive if present within a module
if (ST.supportsPTX23()) {
- std::string addrSize = ST.is64Bit() ? "64" : "32";
- OutStreamer.EmitRawText(Twine("\t.address_size " + addrSize));
+ const char *addrSize = ST.is64Bit() ? "64" : "32";
+ OutStreamer.EmitRawText(Twine("\t.address_size ") + addrSize);
}
OutStreamer.AddBlankLine();
@@ -166,6 +164,11 @@ void PTXAsmPrinter::EmitStartOfAsmFile(Module &M)
OutStreamer.AddBlankLine();
+ // declare external functions
+ for (Module::const_iterator i = M.begin(), e = M.end();
+ i != e; ++i)
+ EmitFunctionDeclaration(i);
+
// declare global variables
for (Module::const_global_iterator i = M.global_begin(), e = M.global_end();
i != e; ++i)
@@ -179,68 +182,47 @@ void PTXAsmPrinter::EmitFunctionBodyStart() {
const PTXParamManager &PM = MFI->getParamManager();
// Print register definitions
- std::string regDefs;
+ SmallString<128> regDefs;
+ raw_svector_ostream os(regDefs);
unsigned numRegs;
// pred
- numRegs = MFI->getNumRegistersForClass(PTX::RegPredRegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .pred %p<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::Pred, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .pred %p<" << numRegs << ">;\n";
// i16
- numRegs = MFI->getNumRegistersForClass(PTX::RegI16RegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .b16 %rh<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::B16, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .b16 %rh<" << numRegs << ">;\n";
// i32
- numRegs = MFI->getNumRegistersForClass(PTX::RegI32RegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .b32 %r<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::B32, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .b32 %r<" << numRegs << ">;\n";
// i64
- numRegs = MFI->getNumRegistersForClass(PTX::RegI64RegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .b64 %rd<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::B64, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .b64 %rd<" << numRegs << ">;\n";
// f32
- numRegs = MFI->getNumRegistersForClass(PTX::RegF32RegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .f32 %f<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::F32, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .f32 %f<" << numRegs << ">;\n";
// f64
- numRegs = MFI->getNumRegistersForClass(PTX::RegF64RegisterClass);
- if(numRegs > 0) {
- regDefs += "\t.reg .f64 %fd<";
- regDefs += utostr(numRegs);
- regDefs += ">;\n";
- }
+ numRegs = MFI->countRegisters(PTXRegisterType::F64, PTXRegisterSpace::Reg);
+ if(numRegs > 0)
+ os << "\t.reg .f64 %fd<" << numRegs << ">;\n";
// Local params
for (PTXParamManager::param_iterator i = PM.local_begin(), e = PM.local_end();
- i != e; ++i) {
- regDefs += "\t.param .b";
- regDefs += utostr(PM.getParamSize(*i));
- regDefs += " ";
- regDefs += PM.getParamName(*i);
- regDefs += ";\n";
- }
+ i != e; ++i)
+ os << "\t.param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i)
+ << ";\n";
- OutStreamer.EmitRawText(Twine(regDefs));
+ OutStreamer.EmitRawText(os.str());
const MachineFrameInfo* FrameInfo = MF->getFrameInfo();
@@ -249,16 +231,13 @@ void PTXAsmPrinter::EmitFunctionBodyStart() {
for (unsigned i = 0, e = FrameInfo->getNumObjects(); i != e; ++i) {
DEBUG(dbgs() << "Size of object: " << FrameInfo->getObjectSize(i) << "\n");
if (FrameInfo->getObjectSize(i) > 0) {
- std::string def = "\t.local .align ";
- def += utostr(FrameInfo->getObjectAlignment(i));
- def += " .b8";
- def += " __local";
- def += utostr(i);
- def += "[";
- def += utostr(FrameInfo->getObjectSize(i)); // Convert to bits
- def += "]";
- def += ";";
- OutStreamer.EmitRawText(Twine(def));
+ OutStreamer.EmitRawText("\t.local .align " +
+ Twine(FrameInfo->getObjectAlignment(i)) +
+ " .b8 __local" +
+ Twine(i) +
+ "[" +
+ Twine(FrameInfo->getObjectSize(i)) +
+ "];");
}
}
@@ -295,36 +274,27 @@ void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
assert(gvsym->isUndefined() && "Cannot define a symbol twice!");
- std::string decl;
+ SmallString<128> decl;
+ raw_svector_ostream os(decl);
// check if it is defined in some other translation unit
if (gv->isDeclaration())
- decl += ".extern ";
+ os << ".extern ";
// state space: e.g., .global
- decl += ".";
- decl += getStateSpaceName(gv->getType()->getAddressSpace());
- decl += " ";
+ os << '.' << getStateSpaceName(gv->getType()->getAddressSpace()) << ' ';
// alignment (optional)
unsigned alignment = gv->getAlignment();
- if (alignment != 0) {
- decl += ".align ";
- decl += utostr(gv->getAlignment());
- decl += " ";
- }
+ if (alignment != 0)
+ os << ".align " << gv->getAlignment() << ' ';
if (PointerType::classof(gv->getType())) {
PointerType* pointerTy = dyn_cast<PointerType>(gv->getType());
Type* elementTy = pointerTy->getElementType();
- decl += ".b8 ";
- decl += gvsym->getName();
- decl += "[";
-
- if (elementTy->isArrayTy())
- {
+ if (elementTy->isArrayTy()) {
assert(elementTy->isArrayTy() && "Only pointers to arrays are supported");
ArrayType* arrayTy = dyn_cast<ArrayType>(elementTy);
@@ -333,7 +303,6 @@ void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
unsigned numElements = arrayTy->getNumElements();
while (elementTy->isArrayTy()) {
-
arrayTy = dyn_cast<ArrayType>(elementTy);
elementTy = arrayTy->getElementType();
@@ -342,110 +311,91 @@ void PTXAsmPrinter::EmitVariableDeclaration(const GlobalVariable *gv) {
// FIXME: isPrimitiveType() == false for i16?
assert(elementTy->isSingleValueType() &&
- "Non-primitive types are not handled");
+ "Non-primitive types are not handled");
- // Compute the size of the array, in bytes.
- uint64_t arraySize = (elementTy->getPrimitiveSizeInBits() >> 3)
- * numElements;
+ // Find the size of the element in bits
+ unsigned elementSize = elementTy->getPrimitiveSizeInBits();
- decl += utostr(arraySize);
+ os << ".b" << elementSize << ' ' << gvsym->getName()
+ << '[' << numElements << ']';
+ } else {
+ os << ".b8" << gvsym->getName() << "[]";
}
- decl += "]";
-
// handle string constants (assume ConstantArray means string)
-
- if (gv->hasInitializer())
- {
+ if (gv->hasInitializer()) {
const Constant *C = gv->getInitializer();
- if (const ConstantArray *CA = dyn_cast<ConstantArray>(C))
- {
- decl += " = {";
+ if (const ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
+ os << " = {";
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
- {
- if (i > 0) decl += ",";
+ for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
+ if (i > 0)
+ os << ',';
- decl += "0x" +
- utohexstr(cast<ConstantInt>(CA->getOperand(i))->getZExtValue());
+ os << "0x";
+ os.write_hex(cast<ConstantInt>(CA->getOperand(i))->getZExtValue());
}
- decl += "}";
+ os << '}';
}
}
- }
- else {
+ } else {
// Note: this is currently the fall-through case and most likely generates
// incorrect code.
- decl += getTypeName(gv->getType());
- decl += " ";
-
- decl += gvsym->getName();
+ os << getTypeName(gv->getType()) << ' ' << gvsym->getName();
- if (ArrayType::classof(gv->getType()) ||
- PointerType::classof(gv->getType()))
- decl += "[]";
+ if (isa<ArrayType>(gv->getType()) || isa<PointerType>(gv->getType()))
+ os << "[]";
}
- decl += ";";
-
- OutStreamer.EmitRawText(Twine(decl));
+ os << ';';
+ OutStreamer.EmitRawText(os.str());
OutStreamer.AddBlankLine();
}
void PTXAsmPrinter::EmitFunctionEntryLabel() {
// The function label could have already been emitted if two symbols end up
// conflicting due to asm renaming. Detect this and emit an error.
- if (!CurrentFnSym->isUndefined()) {
+ if (!CurrentFnSym->isUndefined())
report_fatal_error("'" + Twine(CurrentFnSym->getName()) +
"' label emitted multiple times to assembly file");
- return;
- }
const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
const PTXParamManager &PM = MFI->getParamManager();
const bool isKernel = MFI->isKernel();
const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
- const MachineRegisterInfo& MRI = MF->getRegInfo();
- std::string decl = isKernel ? ".entry" : ".func";
+ SmallString<128> decl;
+ raw_svector_ostream os(decl);
+ os << (isKernel ? ".entry" : ".func");
if (!isKernel) {
- decl += " (";
+ os << " (";
if (ST.useParamSpaceForDeviceArgs()) {
for (PTXParamManager::param_iterator i = PM.ret_begin(), e = PM.ret_end(),
b = i; i != e; ++i) {
- if (i != b) {
- decl += ", ";
- }
+ if (i != b)
+ os << ", ";
- decl += ".param .b";
- decl += utostr(PM.getParamSize(*i));
- decl += " ";
- decl += PM.getParamName(*i);
+ os << ".param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i);
}
} else {
for (PTXMachineFunctionInfo::reg_iterator
i = MFI->retreg_begin(), e = MFI->retreg_end(), b = i;
i != e; ++i) {
- if (i != b) {
- decl += ", ";
- }
- decl += ".reg .";
- decl += getRegisterTypeName(*i, MRI);
- decl += " ";
- decl += MFI->getRegisterName(*i);
+ if (i != b)
+ os << ", ";
+
+ os << ".reg " << getRegisterTypeName(MFI->getRegisterType(*i)) << ' '
+ << MFI->getRegisterName(*i);
}
}
- decl += ")";
+ os << ')';
}
// Print function name
- decl += " ";
- decl += CurrentFnSym->getName().str();
-
- decl += " (";
+ os << ' ' << CurrentFnSym->getName() << " (";
const Function *F = MF->getFunction();
@@ -453,63 +403,80 @@ void PTXAsmPrinter::EmitFunctionEntryLabel() {
if (isKernel || ST.useParamSpaceForDeviceArgs()) {
/*for (PTXParamManager::param_iterator i = PM.arg_begin(), e = PM.arg_end(),
b = i; i != e; ++i) {
- if (i != b) {
- decl += ", ";
- }
+ if (i != b)
+ os << ", ";
- decl += ".param .b";
- decl += utostr(PM.getParamSize(*i));
- decl += " ";
- decl += PM.getParamName(*i);
+ os << ".param .b" << PM.getParamSize(*i) << ' ' << PM.getParamName(*i);
}*/
int Counter = 1;
for (Function::const_arg_iterator i = F->arg_begin(), e = F->arg_end(),
b = i; i != e; ++i) {
if (i != b)
- decl += ", ";
+ os << ", ";
const Type *ArgType = (*i).getType();
- decl += ".param .b";
+ os << ".param .b";
if (ArgType->isPointerTy()) {
if (ST.is64Bit())
- decl += "64";
+ os << "64";
else
- decl += "32";
+ os << "32";
} else {
- decl += utostr(ArgType->getPrimitiveSizeInBits());
+ os << ArgType->getPrimitiveSizeInBits();
}
if (ArgType->isPointerTy() && ST.emitPtrAttribute()) {
const PointerType *PtrType = dyn_cast<const PointerType>(ArgType);
- decl += " .ptr";
+ os << " .ptr";
switch (PtrType->getAddressSpace()) {
default:
llvm_unreachable("Unknown address space in argument");
case PTXStateSpace::Global:
- decl += " .global";
+ os << " .global";
break;
case PTXStateSpace::Shared:
- decl += " .shared";
+ os << " .shared";
break;
}
}
- decl += " __param_";
- decl += utostr(Counter++);
+ os << " __param_" << Counter++;
}
} else {
for (PTXMachineFunctionInfo::reg_iterator
i = MFI->argreg_begin(), e = MFI->argreg_end(), b = i;
i != e; ++i) {
- if (i != b) {
- decl += ", ";
- }
+ if (i != b)
+ os << ", ";
- decl += ".reg .";
- decl += getRegisterTypeName(*i, MRI);
- decl += " ";
- decl += MFI->getRegisterName(*i);
+ os << ".reg " << getRegisterTypeName(MFI->getRegisterType(*i)) << ' '
+ << MFI->getRegisterName(*i);
}
}
- decl += ")";
+ os << ')';
+ OutStreamer.EmitRawText(os.str());
+}
+
+void PTXAsmPrinter::EmitFunctionDeclaration(const Function* func)
+{
+ const PTXSubtarget& ST = TM.getSubtarget<PTXSubtarget>();
+
+ std::string decl = "";
+
+ // hard-coded emission of extern vprintf function
+
+ if (func->getName() == "printf" || func->getName() == "puts") {
+ decl += ".extern .func (.param .b32 __param_1) vprintf (.param .b";
+ if (ST.is64Bit())
+ decl += "64";
+ else
+ decl += "32";
+ decl += " __param_2, .param .b";
+ if (ST.is64Bit())
+ decl += "64";
+ else
+ decl += "32";
+ decl += " __param_3)\n";
+ }
+
OutStreamer.EmitRawText(Twine(decl));
}
@@ -535,7 +502,7 @@ unsigned PTXAsmPrinter::GetOrCreateSourceID(StringRef FileName,
Entry.setValue(SrcId);
// Print out a .file directive to specify files for .loc directives.
- OutStreamer.EmitDwarfFileDirective(SrcId, Entry.getKey());
+ OutStreamer.EmitDwarfFileDirective(SrcId, "", Entry.getKey());
return SrcId;
}
@@ -550,20 +517,18 @@ MCOperand PTXAsmPrinter::GetSymbolRef(const MachineOperand &MO,
MCOperand PTXAsmPrinter::lowerOperand(const MachineOperand &MO) {
MCOperand MCOp;
const PTXMachineFunctionInfo *MFI = MF->getInfo<PTXMachineFunctionInfo>();
- const MCExpr *Expr;
- const char *RegSymbolName;
+ unsigned EncodedReg;
switch (MO.getType()) {
default:
llvm_unreachable("Unknown operand type");
case MachineOperand::MO_Register:
- // We create register operands as symbols, since the PTXInstPrinter class
- // has no way to map virtual registers back to a name without some ugly
- // hacks.
- // FIXME: Figure out a better way to handle virtual register naming.
- RegSymbolName = MFI->getRegisterName(MO.getReg());
- Expr = MCSymbolRefExpr::Create(RegSymbolName, MCSymbolRefExpr::VK_None,
- OutContext);
- MCOp = MCOperand::CreateExpr(Expr);
+ if (MO.getReg() > 0) {
+ // Encode the register
+ EncodedReg = MFI->getEncodedRegister(MO.getReg());
+ } else {
+ EncodedReg = 0;
+ }
+ MCOp = MCOperand::CreateReg(EncodedReg);
break;
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
@@ -594,4 +559,3 @@ extern "C" void LLVMInitializePTXAsmPrinter() {
RegisterAsmPrinter<PTXAsmPrinter> X(ThePTX32Target);
RegisterAsmPrinter<PTXAsmPrinter> Y(ThePTX64Target);
}
-
diff --git a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h
index 538c080..74c8d58 100644
--- a/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h
+++ b/contrib/llvm/lib/Target/PTX/PTXAsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- PTXAsmPrinter.h - Print machine code to a PTX file ----------------===//
+//===-- PTXAsmPrinter.h - Print machine code to a PTX file ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -47,7 +47,7 @@ public:
private:
void EmitVariableDeclaration(const GlobalVariable *gv);
- void EmitFunctionDeclaration();
+ void EmitFunctionDeclaration(const Function* func);
StringMap<unsigned> SourceIdMap;
}; // class PTXAsmPrinter
diff --git a/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp b/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp
index 0b653e0..a21d172 100644
--- a/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXFPRoundingModePass.cpp
@@ -23,9 +23,11 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
// NOTE: PTXFPRoundingModePass should be executed just before emission.
-namespace llvm {
+namespace {
/// PTXFPRoundingModePass - Pass to assign appropriate FP rounding modes to
/// all FP instructions. Essentially, this pass just looks for all FP
/// instructions that have a rounding mode set to RndDefault, and sets an
@@ -58,7 +60,7 @@ namespace llvm {
void initializeMap();
void processInstruction(MachineInstr &MI);
}; // class PTXFPRoundingModePass
-} // namespace llvm
+} // end anonymous namespace
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
index b621b9d..e6e268e 100644
--- a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.cpp
@@ -1,4 +1,4 @@
-//=======- PTXFrameLowering.cpp - PTX Frame Information -------*- C++ -*-=====//
+//===-- PTXFrameLowering.cpp - PTX Frame Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
index 9320676..831e818 100644
--- a/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
+++ b/contrib/llvm/lib/Target/PTX/PTXFrameLowering.h
@@ -1,4 +1,4 @@
-//===--- PTXFrameLowering.h - Define frame lowering for PTX --*- C++ -*----===//
+//===-- PTXFrameLowering.h - Define frame lowering for PTX -----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp b/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
index 3307d91..ef4455b 100644
--- a/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXISelLowering.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "PTX.h"
#include "PTXISelLowering.h"
+#include "PTX.h"
#include "PTXMachineFunctionInfo.h"
#include "PTXRegisterInfo.h"
#include "PTXSubtarget.h"
@@ -20,6 +20,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
@@ -46,6 +47,11 @@ PTXTargetLowering::PTXTargetLowering(TargetMachine &TM)
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
setMinFunctionAlignment(2);
+ // Let LLVM use loads/stores for all mem* operations
+ maxStoresPerMemcpy = 4096;
+ maxStoresPerMemmove = 4096;
+ maxStoresPerMemset = 4096;
+
////////////////////////////////////
/////////// Expansion //////////////
////////////////////////////////////
@@ -91,7 +97,8 @@ PTXTargetLowering::PTXTargetLowering(TargetMachine &TM)
// customise setcc to use bitwise logic if possible
- setOperationAction(ISD::SETCC, MVT::i1, Custom);
+ //setOperationAction(ISD::SETCC, MVT::i1, Custom);
+ setOperationAction(ISD::SETCC, MVT::i1, Legal);
// customize translation of memory addresses
@@ -150,18 +157,27 @@ SDValue PTXTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
DebugLoc dl = Op.getDebugLoc();
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ //ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// Look for X == 0, X == 1, X != 0, or X != 1
// We can simplify these to bitwise logic
- if (Op1.getOpcode() == ISD::Constant &&
- (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
- cast<ConstantSDNode>(Op1)->isNullValue()) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ //if (Op1.getOpcode() == ISD::Constant &&
+ // (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
+ // cast<ConstantSDNode>(Op1)->isNullValue()) &&
+ // (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ //
+ // return DAG.getNode(ISD::AND, dl, MVT::i1, Op0, Op1);
+ //}
- return DAG.getNode(ISD::AND, dl, MVT::i1, Op0, Op1);
- }
+ //ConstantSDNode* COp1 = cast<ConstantSDNode>(Op1);
+ //if(COp1 && COp1->getZExtValue() == 1) {
+ // if(CC == ISD::SETNE) {
+ // return DAG.getNode(PTX::XORripreds, dl, MVT::i1, Op0);
+ // }
+ //}
+
+ llvm_unreachable("setcc was not matched by a pattern!");
return DAG.getNode(ISD::SETCC, dl, MVT::i1, Op0, Op1, Op2);
}
@@ -205,7 +221,6 @@ SDValue PTXTargetLowering::
switch (CallConv) {
default:
llvm_unreachable("Unsupported calling convention");
- break;
case CallingConv::PTX_Kernel:
MFI->setKernel(true);
break;
@@ -235,8 +250,25 @@ SDValue PTXTargetLowering::
}
else {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- EVT RegVT = Ins[i].VT;
- TargetRegisterClass* TRC = getRegClassFor(RegVT);
+ EVT RegVT = Ins[i].VT;
+ const TargetRegisterClass* TRC = getRegClassFor(RegVT);
+ unsigned RegType;
+
+ // Determine which register class we need
+ if (RegVT == MVT::i1)
+ RegType = PTXRegisterType::Pred;
+ else if (RegVT == MVT::i16)
+ RegType = PTXRegisterType::B16;
+ else if (RegVT == MVT::i32)
+ RegType = PTXRegisterType::B32;
+ else if (RegVT == MVT::i64)
+ RegType = PTXRegisterType::B64;
+ else if (RegVT == MVT::f32)
+ RegType = PTXRegisterType::F32;
+ else if (RegVT == MVT::f64)
+ RegType = PTXRegisterType::F64;
+ else
+ llvm_unreachable("Unknown parameter type");
// Use a unique index in the instruction to prevent instruction folding.
// Yes, this is a hack.
@@ -247,7 +279,7 @@ SDValue PTXTargetLowering::
InVals.push_back(ArgValue);
- MFI->addArgReg(Reg);
+ MFI->addRegister(Reg, RegType, PTXRegisterSpace::Argument);
}
}
@@ -297,26 +329,33 @@ SDValue PTXTargetLowering::
} else {
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
EVT RegVT = Outs[i].VT;
- TargetRegisterClass* TRC = 0;
+ const TargetRegisterClass* TRC;
+ unsigned RegType;
// Determine which register class we need
if (RegVT == MVT::i1) {
TRC = PTX::RegPredRegisterClass;
+ RegType = PTXRegisterType::Pred;
}
else if (RegVT == MVT::i16) {
TRC = PTX::RegI16RegisterClass;
+ RegType = PTXRegisterType::B16;
}
else if (RegVT == MVT::i32) {
TRC = PTX::RegI32RegisterClass;
+ RegType = PTXRegisterType::B32;
}
else if (RegVT == MVT::i64) {
TRC = PTX::RegI64RegisterClass;
+ RegType = PTXRegisterType::B64;
}
else if (RegVT == MVT::f32) {
TRC = PTX::RegF32RegisterClass;
+ RegType = PTXRegisterType::F32;
}
else if (RegVT == MVT::f64) {
TRC = PTX::RegF64RegisterClass;
+ RegType = PTXRegisterType::F64;
}
else {
llvm_unreachable("Unknown parameter type");
@@ -329,7 +368,7 @@ SDValue PTXTargetLowering::
Chain = DAG.getNode(PTXISD::WRITE_PARAM, dl, MVT::Other, Copy, OutReg);
- MFI->addRetReg(Reg);
+ MFI->addRegister(Reg, RegType, PTXRegisterSpace::Return);
}
}
@@ -344,7 +383,7 @@ SDValue PTXTargetLowering::
SDValue
PTXTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -352,38 +391,99 @@ PTXTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction& MF = DAG.getMachineFunction();
- PTXMachineFunctionInfo *MFI = MF.getInfo<PTXMachineFunctionInfo>();
- PTXParamManager &PM = MFI->getParamManager();
+ PTXMachineFunctionInfo *PTXMFI = MF.getInfo<PTXMachineFunctionInfo>();
+ PTXParamManager &PM = PTXMFI->getParamManager();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
assert(getTargetMachine().getSubtarget<PTXSubtarget>().callsAreHandled() &&
"Calls are not handled for the target device");
+ // Identify the callee function
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
+ const Function *function = cast<Function>(GV);
+
+ // allow non-device calls only for printf
+ bool isPrintf = function->getName() == "printf" || function->getName() == "puts";
+
+ assert((isPrintf || function->getCallingConv() == CallingConv::PTX_Device) &&
+ "PTX function calls must be to PTX device functions");
+
+ unsigned outSize = isPrintf ? 2 : Outs.size();
+
std::vector<SDValue> Ops;
// The layout of the ops will be [Chain, #Ins, Ins, Callee, #Outs, Outs]
- Ops.resize(Outs.size() + Ins.size() + 4);
+ Ops.resize(outSize + Ins.size() + 4);
Ops[0] = Chain;
// Identify the callee function
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
- assert(cast<Function>(GV)->getCallingConv() == CallingConv::PTX_Device &&
- "PTX function calls must be to PTX device functions");
Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
Ops[Ins.size()+2] = Callee;
- // Generate STORE_PARAM nodes for each function argument. In PTX, function
- // arguments are explicitly stored into .param variables and passed as
- // arguments. There is no register/stack-based calling convention in PTX.
- Ops[Ins.size()+3] = DAG.getTargetConstant(OutVals.size(), MVT::i32);
- for (unsigned i = 0; i != OutVals.size(); ++i) {
- unsigned Size = OutVals[i].getValueType().getSizeInBits();
- unsigned Param = PM.addLocalParam(Size);
- const std::string &ParamName = PM.getParamName(Param);
- SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
- MVT::Other);
+ // #Outs
+ Ops[Ins.size()+3] = DAG.getTargetConstant(outSize, MVT::i32);
+
+ if (isPrintf) {
+ // first argument is the address of the global string variable in memory
+ unsigned Param0 = PM.addLocalParam(getPointerTy().getSizeInBits());
+ SDValue ParamValue0 = DAG.getTargetExternalSymbol(PM.getParamName(Param0).c_str(),
+ MVT::Other);
Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
- ParamValue, OutVals[i]);
- Ops[i+Ins.size()+4] = ParamValue;
+ ParamValue0, OutVals[0]);
+ Ops[Ins.size()+4] = ParamValue0;
+
+ // alignment is the maximum size of all the arguments
+ unsigned alignment = 0;
+ for (unsigned i = 1; i < OutVals.size(); ++i) {
+ alignment = std::max(alignment,
+ OutVals[i].getValueType().getSizeInBits());
+ }
+
+ // size is the alignment multiplied by the number of arguments
+ unsigned size = alignment * (OutVals.size() - 1);
+
+ // second argument is the address of the stack object (unless no arguments)
+ unsigned Param1 = PM.addLocalParam(getPointerTy().getSizeInBits());
+ SDValue ParamValue1 = DAG.getTargetExternalSymbol(PM.getParamName(Param1).c_str(),
+ MVT::Other);
+ Ops[Ins.size()+5] = ParamValue1;
+
+ if (size > 0)
+ {
+ // create a local stack object to store the arguments
+ unsigned StackObject = MFI->CreateStackObject(size / 8, alignment / 8, false);
+ SDValue FrameIndex = DAG.getFrameIndex(StackObject, getPointerTy());
+
+ // store each of the arguments to the stack in turn
+ for (unsigned int i = 1; i != OutVals.size(); i++) {
+ SDValue FrameAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FrameIndex, DAG.getTargetConstant((i - 1) * 8, getPointerTy()));
+ Chain = DAG.getStore(Chain, dl, OutVals[i], FrameAddr,
+ MachinePointerInfo(),
+ false, false, 0);
+ }
+
+ // copy the address of the local frame index to get the address in non-local space
+ SDValue genericAddr = DAG.getNode(PTXISD::COPY_ADDRESS, dl, getPointerTy(), FrameIndex);
+
+ // store this address in the second argument
+ Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain, ParamValue1, genericAddr);
+ }
+ }
+ else
+ {
+ // Generate STORE_PARAM nodes for each function argument. In PTX, function
+ // arguments are explicitly stored into .param variables and passed as
+ // arguments. There is no register/stack-based calling convention in PTX.
+ for (unsigned i = 0; i != OutVals.size(); ++i) {
+ unsigned Size = OutVals[i].getValueType().getSizeInBits();
+ unsigned Param = PM.addLocalParam(Size);
+ const std::string &ParamName = PM.getParamName(Param);
+ SDValue ParamValue = DAG.getTargetExternalSymbol(ParamName.c_str(),
+ MVT::Other);
+ Chain = DAG.getNode(PTXISD::STORE_PARAM, dl, MVT::Other, Chain,
+ ParamValue, OutVals[i]);
+ Ops[i+Ins.size()+4] = ParamValue;
+ }
}
std::vector<SDValue> InParams;
diff --git a/contrib/llvm/lib/Target/PTX/PTXISelLowering.h b/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
index 4d25665..33220f4 100644
--- a/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
+++ b/contrib/llvm/lib/Target/PTX/PTXISelLowering.h
@@ -1,4 +1,4 @@
-//==-- PTXISelLowering.h - PTX DAG Lowering Interface ------------*- C++ -*-==//
+//===-- PTXISelLowering.h - PTX DAG Lowering Interface ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,8 +18,6 @@
#include "llvm/Target/TargetLowering.h"
namespace llvm {
-class PTXSubtarget;
-class PTXTargetMachine;
namespace PTXISD {
enum NodeType {
@@ -64,9 +62,8 @@ class PTXTargetLowering : public TargetLowering {
SelectionDAG &DAG) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td b/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
index 397fdc3..267e834 100644
--- a/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrFormats.td
@@ -1,4 +1,4 @@
-//===- PTXInstrFormats.td - PTX Instruction Formats ----------*- tblgen -*-===//
+//===-- PTXInstrFormats.td - PTX Instruction Formats -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
index 1b947a5..443cd54 100644
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- PTXInstrInfo.cpp - PTX Instruction Information ---------------------===//
+//===-- PTXInstrInfo.cpp - PTX Instruction Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,8 +13,8 @@
#define DEBUG_TYPE "ptx-instrinfo"
-#include "PTX.h"
#include "PTXInstrInfo.h"
+#include "PTX.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
@@ -116,7 +116,7 @@ bool PTXInstrInfo::isPredicated(const MachineInstr *MI) const {
}
bool PTXInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- return !isPredicated(MI) && get(MI->getOpcode()).isTerminator();
+ return !isPredicated(MI) && MI->isTerminator();
}
bool PTXInstrInfo::
@@ -184,15 +184,13 @@ AnalyzeBranch(MachineBasicBlock &MBB,
if (MBB.empty())
return true;
- MachineBasicBlock::const_iterator iter = MBB.end();
+ MachineBasicBlock::iterator iter = MBB.end();
const MachineInstr& instLast1 = *--iter;
- const MCInstrDesc &desc1 = instLast1.getDesc();
// for special case that MBB has only 1 instruction
const bool IsSizeOne = MBB.size() == 1;
// if IsSizeOne is true, *--iter and instLast2 are invalid
// we put a dummy value in instLast2 and desc2 since they are used
const MachineInstr& instLast2 = IsSizeOne ? instLast1 : *--iter;
- const MCInstrDesc &desc2 = IsSizeOne ? desc1 : instLast2.getDesc();
DEBUG(dbgs() << "\n");
DEBUG(dbgs() << "AnalyzeBranch: opcode: " << instLast1.getOpcode() << "\n");
@@ -207,7 +205,7 @@ AnalyzeBranch(MachineBasicBlock &MBB,
}
// this block ends with only an unconditional branch
- if (desc1.isUnconditionalBranch() &&
+ if (instLast1.isUnconditionalBranch() &&
// when IsSizeOne is true, it "absorbs" the evaluation of instLast2
(IsSizeOne || !IsAnyKindOfBranch(instLast2))) {
DEBUG(dbgs() << "AnalyzeBranch: ends with only uncond branch\n");
@@ -217,7 +215,7 @@ AnalyzeBranch(MachineBasicBlock &MBB,
// this block ends with a conditional branch and
// it falls through to a successor block
- if (desc1.isConditionalBranch() &&
+ if (instLast1.isConditionalBranch() &&
IsAnySuccessorAlsoLayoutSuccessor(MBB)) {
DEBUG(dbgs() << "AnalyzeBranch: ends with cond branch and fall through\n");
TBB = GetBranchTarget(instLast1);
@@ -233,8 +231,8 @@ AnalyzeBranch(MachineBasicBlock &MBB,
// this block ends with a conditional branch
// followed by an unconditional branch
- if (desc2.isConditionalBranch() &&
- desc1.isUnconditionalBranch()) {
+ if (instLast2.isConditionalBranch() &&
+ instLast1.isUnconditionalBranch()) {
DEBUG(dbgs() << "AnalyzeBranch: ends with cond and uncond branch\n");
TBB = GetBranchTarget(instLast2);
FBB = GetBranchTarget(instLast1);
@@ -302,7 +300,7 @@ void PTXInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(false && "storeRegToStackSlot should not be called for PTX");
+ llvm_unreachable("storeRegToStackSlot should not be called for PTX");
}
void PTXInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
@@ -310,7 +308,7 @@ void PTXInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(false && "loadRegFromStackSlot should not be called for PTX");
+ llvm_unreachable("loadRegFromStackSlot should not be called for PTX");
}
// static helper routines
@@ -341,8 +339,7 @@ void PTXInstrInfo::AddDefaultPredicate(MachineInstr *MI) {
}
bool PTXInstrInfo::IsAnyKindOfBranch(const MachineInstr& inst) {
- const MCInstrDesc &desc = inst.getDesc();
- return desc.isTerminator() || desc.isBranch() || desc.isIndirectBranch();
+ return inst.isTerminator() || inst.isBranch() || inst.isIndirectBranch();
}
bool PTXInstrInfo::
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
index 871f1ac..fba89c0 100644
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.h
@@ -1,4 +1,4 @@
-//===- PTXInstrInfo.h - PTX Instruction Information -------------*- C++ -*-===//
+//===-- PTXInstrInfo.h - PTX Instruction Information ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
index a3fcea9..bead428 100644
--- a/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrInfo.td
@@ -1,4 +1,4 @@
-//===- PTXInstrInfo.td - PTX Instruction defs -----------------*- tblgen-*-===//
+//===-- PTXInstrInfo.td - PTX Instruction defs --------------*- tablegen-*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -680,6 +680,12 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
def MOVaddr64
: InstPTX<(outs RegI64:$d), (ins i64imm:$a), "mov.u64\t$d, $a",
[(set RegI64:$d, (PTXcopyaddress tglobaladdr:$a))]>;
+ def MOVframe32
+ : InstPTX<(outs RegI32:$d), (ins i32imm:$a), "cvta.local.u32\t$d, $a",
+ [(set RegI32:$d, (PTXcopyaddress frameindex:$a))]>;
+ def MOVframe64
+ : InstPTX<(outs RegI64:$d), (ins i64imm:$a), "cvta.local.u64\t$d, $a",
+ [(set RegI64:$d, (PTXcopyaddress frameindex:$a))]>;
}
// PTX cvt instructions
@@ -802,6 +808,8 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isBranch = 1, isTerminator = 1 in {
// FIXME: The pattern part is blank because I cannot (or do not yet know
// how to) use the first operand of PredicateOperand (a RegPred register) here
+ // When this is revisited, make sure to also look at LowerSETCC and try to
+ // fold it into negated predicates, if possible.
def BRAdp
: InstPTX<(outs), (ins brtarget:$d), "bra\t$d",
[/*(brcond pred:$_p, bb:$d)*/]>;
@@ -819,17 +827,17 @@ let hasSideEffects = 1 in {
///===- Parameter Passing Pseudo-Instructions -----------------------------===//
def READPARAMPRED : InstPTX<(outs RegPred:$a), (ins i32imm:$b),
- "mov.pred\t$a, %param$b", []>;
+ "mov.pred\t$a, %arg$b", []>;
def READPARAMI16 : InstPTX<(outs RegI16:$a), (ins i32imm:$b),
- "mov.b16\t$a, %param$b", []>;
+ "mov.b16\t$a, %arg$b", []>;
def READPARAMI32 : InstPTX<(outs RegI32:$a), (ins i32imm:$b),
- "mov.b32\t$a, %param$b", []>;
+ "mov.b32\t$a, %arg$b", []>;
def READPARAMI64 : InstPTX<(outs RegI64:$a), (ins i32imm:$b),
- "mov.b64\t$a, %param$b", []>;
+ "mov.b64\t$a, %arg$b", []>;
def READPARAMF32 : InstPTX<(outs RegF32:$a), (ins i32imm:$b),
- "mov.f32\t$a, %param$b", []>;
+ "mov.f32\t$a, %arg$b", []>;
def READPARAMF64 : InstPTX<(outs RegF64:$a), (ins i32imm:$b),
- "mov.f64\t$a, %param$b", []>;
+ "mov.f64\t$a, %arg$b", []>;
def WRITEPARAMPRED : InstPTX<(outs), (ins RegPred:$a), "//w", []>;
def WRITEPARAMI16 : InstPTX<(outs), (ins RegI16:$a), "//w", []>;
@@ -885,19 +893,26 @@ def : Pat<(f64 (fdiv RegF64:$a, fpimm:$b)),
// FMUL+FADD
def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), RegF32:$c)),
- (FMADrrr32 RndDefault, RegF32:$a, RegF32:$b, RegF32:$c)>;
+ (FMADrrr32 RndDefault, RegF32:$a, RegF32:$b, RegF32:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), fpimm:$c)),
- (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>;
+ (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f32 (fadd (fmul RegF32:$a, fpimm:$b), fpimm:$c)),
- (FMADrrr32 RndDefault, RegF32:$a, fpimm:$b, fpimm:$c)>;
+ (FMADrrr32 RndDefault, RegF32:$a, fpimm:$b, fpimm:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f32 (fadd (fmul RegF32:$a, RegF32:$b), fpimm:$c)),
- (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>;
+ (FMADrri32 RndDefault, RegF32:$a, RegF32:$b, fpimm:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f64 (fadd (fmul RegF64:$a, RegF64:$b), RegF64:$c)),
- (FMADrrr64 RndDefault, RegF64:$a, RegF64:$b, RegF64:$c)>;
+ (FMADrrr64 RndDefault, RegF64:$a, RegF64:$b, RegF64:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f64 (fadd (fmul RegF64:$a, RegF64:$b), fpimm:$c)),
- (FMADrri64 RndDefault, RegF64:$a, RegF64:$b, fpimm:$c)>;
+ (FMADrri64 RndDefault, RegF64:$a, RegF64:$b, fpimm:$c)>,
+ Requires<[SupportsFMA]>;
def : Pat<(f64 (fadd (fmul RegF64:$a, fpimm:$b), fpimm:$c)),
- (FMADrri64 RndDefault, RegF64:$a, fpimm:$b, fpimm:$c)>;
+ (FMADrri64 RndDefault, RegF64:$a, fpimm:$b, fpimm:$c)>,
+ Requires<[SupportsFMA]>;
// FNEG
def : Pat<(f32 (fneg RegF32:$a)), (FNEGrr32 RndDefault, RegF32:$a)>;
@@ -1004,6 +1019,9 @@ def : Pat<(f64 (sint_to_fp RegI64:$a)), (CVTf64s64 RndDefault, RegI64:$a)>;
def : Pat<(f64 (fextend RegF32:$a)), (CVTf64f32 RegF32:$a)>;
def : Pat<(f64 (bitconvert RegI64:$a)), (MOVf64i64 RegI64:$a)>;
+// setcc - predicate inversion for branch conditions
+def : Pat<(i1 (setcc RegPred:$a, imm:$b, SETNE)),
+ (XORripreds RegPred:$a, imm:$b)>;
///===- Intrinsic Instructions --------------------------------------------===//
include "PTXIntrinsicInstrInfo.td"
diff --git a/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td b/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td
index 9b4f56c..7a62684 100644
--- a/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td
+++ b/contrib/llvm/lib/Target/PTX/PTXInstrLoadStore.td
@@ -1,4 +1,4 @@
-//===- PTXInstrLoadStore.td - PTX Load/Store Instruction Defs -*- tblgen-*-===//
+//===- PTXInstrLoadStore.td - PTX Load/Store Instruction Defs -*- tablegen-*-=//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td b/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td
index 9de1cb6..3416f1c 100644
--- a/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td
+++ b/contrib/llvm/lib/Target/PTX/PTXIntrinsicInstrInfo.td
@@ -1,4 +1,4 @@
-//===- PTXIntrinsicInstrInfo.td - Defines PTX intrinsics ---*- tablegen -*-===//
+//===-- PTXIntrinsicInstrInfo.td - Defines PTX intrinsics --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp b/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
index 468ce93..3ed67a6 100644
--- a/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXMCAsmStreamer.cpp
@@ -1,4 +1,4 @@
-//===- lib/Target/PTX/PTXMCAsmStreamer.cpp - PTX Text Assembly Output -----===//
+//===-- PTXMCAsmStreamer.cpp - PTX Text Assembly Output -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,6 +22,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PathV2.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -161,11 +162,12 @@ public:
virtual void EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit = 0);
- virtual void EmitValueToOffset(const MCExpr *Offset,
+ virtual bool EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0);
virtual void EmitFileDirective(StringRef Filename);
- virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Filename);
+ virtual bool EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename);
virtual void EmitInstruction(const MCInst &Inst);
@@ -174,7 +176,7 @@ public:
/// indicated by the hasRawTextSupport() predicate.
virtual void EmitRawText(StringRef String);
- virtual void Finish();
+ virtual void FinishImpl();
/// @}
@@ -476,8 +478,8 @@ void PTXMCAsmStreamer::EmitValueToAlignment(unsigned ByteAlignment,
void PTXMCAsmStreamer::EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit) {}
-void PTXMCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
- unsigned char Value) {}
+bool PTXMCAsmStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {return false;}
void PTXMCAsmStreamer::EmitFileDirective(StringRef Filename) {
@@ -489,11 +491,20 @@ void PTXMCAsmStreamer::EmitFileDirective(StringRef Filename) {
// FIXME: should we inherit from MCAsmStreamer?
bool PTXMCAsmStreamer::EmitDwarfFileDirective(unsigned FileNo,
- StringRef Filename){
+ StringRef Directory,
+ StringRef Filename) {
+ if (!Directory.empty()) {
+ if (sys::path::is_absolute(Filename))
+ return EmitDwarfFileDirective(FileNo, "", Filename);
+ SmallString<128> FullPathName = Directory;
+ sys::path::append(FullPathName, Filename);
+ return EmitDwarfFileDirective(FileNo, "", FullPathName);
+ }
+
OS << "\t.file\t" << FileNo << ' ';
PrintQuotedString(Filename, OS);
EmitEOL();
- return this->MCStreamer::EmitDwarfFileDirective(FileNo, Filename);
+ return this->MCStreamer::EmitDwarfFileDirective(FileNo, Directory, Filename);
}
void PTXMCAsmStreamer::AddEncodingComment(const MCInst &Inst) {}
@@ -529,12 +540,13 @@ void PTXMCAsmStreamer::EmitRawText(StringRef String) {
EmitEOL();
}
-void PTXMCAsmStreamer::Finish() {}
+void PTXMCAsmStreamer::FinishImpl() {}
namespace llvm {
MCStreamer *createPTXAsmStreamer(MCContext &Context,
formatted_raw_ostream &OS,
bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory,
MCInstPrinter *IP,
MCCodeEmitter *CE, MCAsmBackend *MAB,
bool ShowInst) {
diff --git a/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp b/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
index b33a273..172a0e0 100644
--- a/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXMFInfoExtract.cpp
@@ -22,9 +22,11 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
// NOTE: PTXMFInfoExtract must after register allocation!
-namespace llvm {
+namespace {
/// PTXMFInfoExtract - PTX specific code to extract of PTX machine
/// function information for PTXAsmPrinter
///
@@ -42,7 +44,7 @@ namespace llvm {
return "PTX Machine Function Info Extractor";
}
}; // class PTXMFInfoExtract
-} // namespace llvm
+} // end anonymous namespace
using namespace llvm;
@@ -56,7 +58,22 @@ bool PTXMFInfoExtract::runOnMachineFunction(MachineFunction &MF) {
for (unsigned i = 0; i < MRI.getNumVirtRegs(); ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
- MFI->addVirtualRegister(TRC, Reg);
+ unsigned RegType;
+ if (TRC == PTX::RegPredRegisterClass)
+ RegType = PTXRegisterType::Pred;
+ else if (TRC == PTX::RegI16RegisterClass)
+ RegType = PTXRegisterType::B16;
+ else if (TRC == PTX::RegI32RegisterClass)
+ RegType = PTXRegisterType::B32;
+ else if (TRC == PTX::RegI64RegisterClass)
+ RegType = PTXRegisterType::B64;
+ else if (TRC == PTX::RegF32RegisterClass)
+ RegType = PTXRegisterType::F32;
+ else if (TRC == PTX::RegF64RegisterClass)
+ RegType = PTXRegisterType::F64;
+ else
+ llvm_unreachable("Unkown register class.");
+ MFI->addRegister(Reg, RegType, PTXRegisterSpace::Reg);
}
return false;
diff --git a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp
new file mode 100644
index 0000000..60acfc7
--- /dev/null
+++ b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- PTXMachineFuctionInfo.cpp - PTX machine function info -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PTXMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void PTXMachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
index 3b985f7..bb7574c 100644
--- a/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/PTX/PTXMachineFunctionInfo.h
@@ -1,4 +1,4 @@
-//===- PTXMachineFuctionInfo.h - PTX machine function info -------*- C++ -*-==//
+//===-- PTXMachineFuctionInfo.h - PTX machine function info ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -30,20 +30,27 @@ namespace llvm {
/// contains private PTX target-specific information for each MachineFunction.
///
class PTXMachineFunctionInfo : public MachineFunctionInfo {
-private:
+ virtual void anchor();
bool IsKernel;
DenseSet<unsigned> RegArgs;
DenseSet<unsigned> RegRets;
- typedef std::vector<unsigned> RegisterList;
- typedef DenseMap<const TargetRegisterClass*, RegisterList> RegisterMap;
- typedef DenseMap<unsigned, std::string> RegisterNameMap;
typedef DenseMap<int, std::string> FrameMap;
- RegisterMap UsedRegs;
- RegisterNameMap RegNames;
FrameMap FrameSymbols;
+ struct RegisterInfo {
+ unsigned Reg;
+ unsigned Type;
+ unsigned Space;
+ unsigned Offset;
+ unsigned Encoded;
+ };
+
+ typedef DenseMap<unsigned, RegisterInfo> RegisterInfoMap;
+
+ RegisterInfoMap RegInfo;
+
PTXParamManager ParamManager;
public:
@@ -51,13 +58,7 @@ public:
PTXMachineFunctionInfo(MachineFunction &MF)
: IsKernel(false) {
- UsedRegs[PTX::RegPredRegisterClass] = RegisterList();
- UsedRegs[PTX::RegI16RegisterClass] = RegisterList();
- UsedRegs[PTX::RegI32RegisterClass] = RegisterList();
- UsedRegs[PTX::RegI64RegisterClass] = RegisterList();
- UsedRegs[PTX::RegF32RegisterClass] = RegisterList();
- UsedRegs[PTX::RegF64RegisterClass] = RegisterList();
- }
+ }
/// getParamManager - Returns the PTXParamManager instance for this function.
PTXParamManager& getParamManager() { return ParamManager; }
@@ -78,69 +79,106 @@ public:
reg_iterator retreg_begin() const { return RegRets.begin(); }
reg_iterator retreg_end() const { return RegRets.end(); }
+ /// addRegister - Adds a virtual register to the set of all used registers
+ void addRegister(unsigned Reg, unsigned RegType, unsigned RegSpace) {
+ if (!RegInfo.count(Reg)) {
+ RegisterInfo Info;
+ Info.Reg = Reg;
+ Info.Type = RegType;
+ Info.Space = RegSpace;
+
+ // Determine register offset
+ Info.Offset = 0;
+ for(RegisterInfoMap::const_iterator i = RegInfo.begin(),
+ e = RegInfo.end(); i != e; ++i) {
+ const RegisterInfo& RI = i->second;
+ if (RI.Space == RegSpace)
+ if (RI.Space != PTXRegisterSpace::Reg || RI.Type == Info.Type)
+ Info.Offset++;
+ }
+
+ // Encode the register data into a single register number
+ Info.Encoded = (Info.Offset << 6) | (Info.Type << 3) | Info.Space;
+
+ RegInfo[Reg] = Info;
+
+ if (RegSpace == PTXRegisterSpace::Argument)
+ RegArgs.insert(Reg);
+ else if (RegSpace == PTXRegisterSpace::Return)
+ RegRets.insert(Reg);
+ }
+ }
+
+ /// countRegisters - Returns the number of registers of the given type and
+ /// space.
+ unsigned countRegisters(unsigned RegType, unsigned RegSpace) const {
+ unsigned Count = 0;
+ for(RegisterInfoMap::const_iterator i = RegInfo.begin(), e = RegInfo.end();
+ i != e; ++i) {
+ const RegisterInfo& RI = i->second;
+ if (RI.Type == RegType && RI.Space == RegSpace)
+ Count++;
+ }
+ return Count;
+ }
+
+ /// getEncodedRegister - Returns the encoded value of the register.
+ unsigned getEncodedRegister(unsigned Reg) const {
+ return RegInfo.lookup(Reg).Encoded;
+ }
+
/// addRetReg - Adds a register to the set of return-value registers.
void addRetReg(unsigned Reg) {
if (!RegRets.count(Reg)) {
RegRets.insert(Reg);
- std::string name;
- name = "%ret";
- name += utostr(RegRets.size() - 1);
- RegNames[Reg] = name;
}
}
/// addArgReg - Adds a register to the set of function argument registers.
void addArgReg(unsigned Reg) {
RegArgs.insert(Reg);
- std::string name;
- name = "%param";
- name += utostr(RegArgs.size() - 1);
- RegNames[Reg] = name;
- }
-
- /// addVirtualRegister - Adds a virtual register to the set of all used
- /// registers in the function.
- void addVirtualRegister(const TargetRegisterClass *TRC, unsigned Reg) {
- std::string name;
-
- // Do not count registers that are argument/return registers.
- if (!RegRets.count(Reg) && !RegArgs.count(Reg)) {
- UsedRegs[TRC].push_back(Reg);
- if (TRC == PTX::RegPredRegisterClass)
- name = "%p";
- else if (TRC == PTX::RegI16RegisterClass)
- name = "%rh";
- else if (TRC == PTX::RegI32RegisterClass)
- name = "%r";
- else if (TRC == PTX::RegI64RegisterClass)
- name = "%rd";
- else if (TRC == PTX::RegF32RegisterClass)
- name = "%f";
- else if (TRC == PTX::RegF64RegisterClass)
- name = "%fd";
- else
- llvm_unreachable("Invalid register class");
-
- name += utostr(UsedRegs[TRC].size() - 1);
- RegNames[Reg] = name;
- }
}
/// getRegisterName - Returns the name of the specified virtual register. This
/// name is used during PTX emission.
- const char *getRegisterName(unsigned Reg) const {
- if (RegNames.count(Reg))
- return RegNames.find(Reg)->second.c_str();
+ std::string getRegisterName(unsigned Reg) const {
+ if (RegInfo.count(Reg)) {
+ const RegisterInfo& RI = RegInfo.lookup(Reg);
+ std::string Name;
+ raw_string_ostream NameStr(Name);
+ decodeRegisterName(NameStr, RI.Encoded);
+ NameStr.flush();
+ return Name;
+ }
else if (Reg == PTX::NoRegister)
return "%noreg";
else
llvm_unreachable("Register not in register name map");
}
- /// getNumRegistersForClass - Returns the number of virtual registers that are
- /// used for the specified register class.
- unsigned getNumRegistersForClass(const TargetRegisterClass *TRC) const {
- return UsedRegs.lookup(TRC).size();
+ /// getEncodedRegisterName - Returns the name of the encoded register.
+ std::string getEncodedRegisterName(unsigned EncodedReg) const {
+ std::string Name;
+ raw_string_ostream NameStr(Name);
+ decodeRegisterName(NameStr, EncodedReg);
+ NameStr.flush();
+ return Name;
+ }
+
+ /// getRegisterType - Returns the type of the specified virtual register.
+ unsigned getRegisterType(unsigned Reg) const {
+ if (RegInfo.count(Reg))
+ return RegInfo.lookup(Reg).Type;
+ else
+ llvm_unreachable("Unknown register");
+ }
+
+ /// getOffsetForRegister - Returns the offset of the virtual register
+ unsigned getOffsetForRegister(unsigned Reg) const {
+ if (RegInfo.count(Reg))
+ return RegInfo.lookup(Reg).Offset;
+ else
+ return 0;
}
/// getFrameSymbol - Returns the symbol name for the given FrameIndex.
@@ -148,13 +186,13 @@ public:
if (FrameSymbols.count(FrameIndex)) {
return FrameSymbols.lookup(FrameIndex).c_str();
} else {
- std::string Name = "__local";
- Name += utostr(FrameIndex);
+ std::string Name = "__local";
+ Name += utostr(FrameIndex);
// The whole point of caching this name is to ensure the pointer we pass
// to any getExternalSymbol() calls will remain valid for the lifetime of
// the back-end instance. This is to work around an issue in SelectionDAG
// where symbol names are expected to be life-long strings.
- FrameSymbols[FrameIndex] = Name;
+ FrameSymbols[FrameIndex] = Name;
return FrameSymbols[FrameIndex].c_str();
}
}
diff --git a/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp b/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp
index 7753787..cc1cc71 100644
--- a/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXParamManager.cpp
@@ -1,4 +1,4 @@
-//===- PTXParamManager.cpp - Manager for .param variables -------*- C++ -*-===//
+//===-- PTXParamManager.cpp - Manager for .param variables ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "PTX.h"
#include "PTXParamManager.h"
+#include "PTX.h"
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/PTX/PTXParamManager.h b/contrib/llvm/lib/Target/PTX/PTXParamManager.h
index 9fd2de5..92e7728 100644
--- a/contrib/llvm/lib/Target/PTX/PTXParamManager.h
+++ b/contrib/llvm/lib/Target/PTX/PTXParamManager.h
@@ -1,4 +1,4 @@
-//===- PTXParamManager.h - Manager for .param variables ----------*- C++ -*-==//
+//===-- PTXParamManager.h - Manager for .param variables --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include <string>
namespace llvm {
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp b/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp
index 2d2d5c3..7fd5375 100644
--- a/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXRegAlloc.cpp
@@ -24,10 +24,7 @@ namespace {
class PTXRegAlloc : public MachineFunctionPass {
public:
static char ID;
- PTXRegAlloc() : MachineFunctionPass(ID) {
- initializePHIEliminationPass(*PassRegistry::getPassRegistry());
- initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
- }
+ PTXRegAlloc() : MachineFunctionPass(ID) {}
virtual const char* getPassName() const {
return "PTX Register Allocator";
@@ -35,8 +32,6 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- AU.addRequiredID(PHIEliminationID);
- AU.addRequiredID(TwoAddressInstructionPassID);
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
index c806266..b6ffd38 100644
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- PTXRegisterInfo.cpp - PTX Register Information ---------------------===//
+//===-- PTXRegisterInfo.cpp - PTX Register Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "PTX.h"
#include "PTXRegisterInfo.h"
+#include "PTX.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -31,44 +31,8 @@ PTXRegisterInfo::PTXRegisterInfo(PTXTargetMachine &TM,
: PTXGenRegisterInfo(0), TII(tii) {
}
-void PTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj,
- RegScavenger *RS) const {
- unsigned Index;
- MachineInstr &MI = *II;
- //MachineBasicBlock &MBB = *MI.getParent();
- //DebugLoc dl = MI.getDebugLoc();
- //MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
-
- //unsigned Reg = MRI.createVirtualRegister(PTX::RegF32RegisterClass);
-
+void PTXRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator /*II*/,
+ int /*SPAdj*/,
+ RegScavenger * /*RS*/) const {
llvm_unreachable("FrameIndex should have been previously eliminated!");
-
- Index = 0;
- while (!MI.getOperand(Index).isFI()) {
- ++Index;
- assert(Index < MI.getNumOperands() &&
- "Instr does not have a FrameIndex operand!");
- }
-
- int FrameIndex = MI.getOperand(Index).getIndex();
-
- DEBUG(dbgs() << "eliminateFrameIndex: " << MI);
- DEBUG(dbgs() << "- SPAdj: " << SPAdj << "\n");
- DEBUG(dbgs() << "- FrameIndex: " << FrameIndex << "\n");
-
- //MachineInstr* MI2 = BuildMI(MBB, II, dl, TII.get(PTX::LOAD_LOCAL_F32))
- //.addReg(Reg, RegState::Define).addImm(FrameIndex);
- //if (MI2->findFirstPredOperandIdx() == -1) {
- // MI2->addOperand(MachineOperand::CreateReg(PTX::NoRegister, /*IsDef=*/false));
- // MI2->addOperand(MachineOperand::CreateImm(PTX::PRED_NORMAL));
- //}
- //MI2->dump();
-
- //MachineOperand ESOp = MachineOperand::CreateES("__local__");
-
- // This frame index is post stack slot re-use assignments
- //MI.getOperand(Index).ChangeToRegister(Reg, false);
- MI.getOperand(Index).ChangeToImmediate(FrameIndex);
- //MI.getOperand(Index) = ESOp;
}
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
index 55fafe4..5614ce7 100644
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- PTXRegisterInfo.h - PTX Register Information Impl --------*- C++ -*-===//
+//===-- PTXRegisterInfo.h - PTX Register Information Impl -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -32,9 +32,9 @@ public:
PTXRegisterInfo(PTXTargetMachine &TM,
const TargetInstrInfo &tii);
- virtual const unsigned
+ virtual const uint16_t
*getCalleeSavedRegs(const MachineFunction *MF = 0) const {
- static const unsigned CalleeSavedRegs[] = { 0 };
+ static const uint16_t CalleeSavedRegs[] = { 0 };
return CalleeSavedRegs; // save nothing
}
@@ -49,7 +49,6 @@ public:
virtual unsigned getFrameRegister(const MachineFunction &MF) const {
llvm_unreachable("PTX does not have a frame register");
- return 0;
}
}; // struct PTXRegisterInfo
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
index 6ed6d3f..e8b262e 100644
--- a/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
+++ b/contrib/llvm/lib/Target/PTX/PTXRegisterInfo.td
@@ -1,5 +1,4 @@
-
-//===- PTXRegisterInfo.td - PTX Register defs ----------------*- tblgen -*-===//
+//===-- PTXRegisterInfo.td - PTX Register defs -------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp
index 50ef14a..a116fab 100644
--- a/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXSelectionDAGInfo.cpp
@@ -70,7 +70,7 @@ PTXSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
DAG.getNode(ISD::ADD, dl, PointerType, Src,
DAG.getConstant(SrcOff, PointerType)),
SrcPtrInfo.getWithOffset(SrcOff), isVolatile,
- false, 0);
+ false, false, 0);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -108,7 +108,8 @@ PTXSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
Loads[i] = DAG.getLoad(VT, dl, Chain,
DAG.getNode(ISD::ADD, dl, PointerType, Src,
DAG.getConstant(SrcOff, PointerType)),
- SrcPtrInfo.getWithOffset(SrcOff), false, false, 0);
+ SrcPtrInfo.getWithOffset(SrcOff), false, false,
+ false, 0);
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp b/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
index 1eb57d2..454f64e 100644
--- a/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- PTXSubtarget.cpp - PTX Subtarget Information ---------------*- C++ -*-=//
+//===-- PTXSubtarget.cpp - PTX Subtarget Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,6 +22,8 @@
using namespace llvm;
+void PTXSubtarget::anchor() { }
+
PTXSubtarget::PTXSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit)
: PTXGenSubtargetInfo(TT, CPU, FS),
@@ -57,10 +59,10 @@ std::string PTXSubtarget::getTargetString() const {
std::string PTXSubtarget::getPTXVersionString() const {
switch(PTXVersion) {
- default: llvm_unreachable("Unknown PTX version");
case PTX_VERSION_2_0: return "2.0";
case PTX_VERSION_2_1: return "2.1";
case PTX_VERSION_2_2: return "2.2";
case PTX_VERSION_2_3: return "2.3";
}
+ llvm_unreachable("Invalid PTX version");
}
diff --git a/contrib/llvm/lib/Target/PTX/PTXSubtarget.h b/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
index b946d7c..ce93fef 100644
--- a/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
+++ b/contrib/llvm/lib/Target/PTX/PTXSubtarget.h
@@ -1,4 +1,4 @@
-//====-- PTXSubtarget.h - Define Subtarget for the PTX ---------*- C++ -*--===//
+//===-- PTXSubtarget.h - Define Subtarget for the PTX -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,6 +23,7 @@ namespace llvm {
class StringRef;
class PTXSubtarget : public PTXGenSubtargetInfo {
+ virtual void anchor();
public:
/**
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
index 449a3d9..c55a658 100644
--- a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.cpp
@@ -11,13 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "PTX.h"
#include "PTXTargetMachine.h"
+#include "PTX.h"
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
@@ -26,6 +25,7 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
@@ -37,8 +37,6 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -46,7 +44,7 @@ using namespace llvm;
namespace llvm {
MCStreamer *createPTXAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
bool isVerboseAsm, bool useLoc,
- bool useCFI,
+ bool useCFI, bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
MCAsmBackend *MAB,
@@ -67,29 +65,16 @@ namespace {
"e-p:32:32-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64";
const char* DataLayout64 =
"e-p:64:64-i64:32:32-f64:32:32-v128:32:128-v64:32:64-n32:64";
-
- // Copied from LLVMTargetMachine.cpp
- void printNoVerify(PassManagerBase &PM, const char *Banner) {
- if (PrintMachineCode)
- PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
- }
-
- void printAndVerify(PassManagerBase &PM,
- const char *Banner) {
- if (PrintMachineCode)
- PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
-
- //if (VerifyMachineCode)
- // PM.add(createMachineVerifierPass(Banner));
- }
}
// DataLayout and FrameLowering are filled with dummy data
PTXTargetMachine::PTXTargetMachine(const Target &T,
StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
DataLayout(is64Bit ? DataLayout64 : DataLayout32),
Subtarget(TT, CPU, FS, is64Bit),
FrameLowering(Subtarget),
@@ -98,276 +83,83 @@ PTXTargetMachine::PTXTargetMachine(const Target &T,
TLInfo(*this) {
}
+void PTX32TargetMachine::anchor() { }
+
PTX32TargetMachine::PTX32TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : PTXTargetMachine(T, TT, CPU, FS, RM, CM, false) {
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
}
+void PTX64TargetMachine::anchor() { }
+
PTX64TargetMachine::PTX64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : PTXTargetMachine(T, TT, CPU, FS, RM, CM, true) {
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : PTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {
}
-bool PTXTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createPTXISelDag(*this, OptLevel));
- return false;
+namespace llvm {
+/// PTX Code Generator Pass Configuration Options.
+class PTXPassConfig : public TargetPassConfig {
+public:
+ PTXPassConfig(PTXTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ PTXTargetMachine &getPTXTargetMachine() const {
+ return getTM<PTXTargetMachine>();
+ }
+
+ bool addInstSelector();
+ FunctionPass *createTargetRegisterAllocator(bool);
+ void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+ bool addPostRegAlloc();
+ void addMachineLateOptimization();
+ bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *PTXTargetMachine::createPassConfig(PassManagerBase &PM) {
+ PTXPassConfig *PassConfig = new PTXPassConfig(this, PM);
+ PassConfig->disablePass(PrologEpilogCodeInserterID);
+ return PassConfig;
}
-bool PTXTargetMachine::addPostRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // PTXMFInfoExtract must after register allocation!
- //PM.add(createPTXMFInfoExtract(*this, OptLevel));
+bool PTXPassConfig::addInstSelector() {
+ PM.add(createPTXISelDag(getPTXTargetMachine(), getOptLevel()));
return false;
}
-bool PTXTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
- formatted_raw_ostream &Out,
- CodeGenFileType FileType,
- CodeGenOpt::Level OptLevel,
- bool DisableVerify) {
- // This is mostly based on LLVMTargetMachine::addPassesToEmitFile
-
- // Add common CodeGen passes.
- MCContext *Context = 0;
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Context))
- return true;
- assert(Context != 0 && "Failed to get MCContext");
-
- if (hasMCSaveTempLabels())
- Context->setAllowTemporaryLabels(false);
-
- const MCAsmInfo &MAI = *getMCAsmInfo();
- const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
- OwningPtr<MCStreamer> AsmStreamer;
-
- switch (FileType) {
- default: return true;
- case CGFT_AssemblyFile: {
- MCInstPrinter *InstPrinter =
- getTarget().createMCInstPrinter(MAI.getAssemblerDialect(), MAI, STI);
-
- // Create a code emitter if asked to show the encoding.
- MCCodeEmitter *MCE = 0;
- MCAsmBackend *MAB = 0;
-
- MCStreamer *S = getTarget().createAsmStreamer(*Context, Out,
- true, /* verbose asm */
- hasMCUseLoc(),
- hasMCUseCFI(),
- InstPrinter,
- MCE, MAB,
- false /* show MC encoding */);
- AsmStreamer.reset(S);
- break;
- }
- case CGFT_ObjectFile: {
- llvm_unreachable("Object file emission is not supported with PTX");
- }
- case CGFT_Null:
- // The Null output is intended for use for performance analysis and testing,
- // not real users.
- AsmStreamer.reset(createNullStreamer(*Context));
- break;
- }
-
- // MC Logging
- //AsmStreamer.reset(createLoggingStreamer(AsmStreamer.take(), errs()));
-
- // Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
- FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
- if (Printer == 0)
- return true;
-
- // If successful, createAsmPrinter took ownership of AsmStreamer.
- AsmStreamer.take();
+FunctionPass *PTXPassConfig::createTargetRegisterAllocator(bool /*Optimized*/) {
+ return createPTXRegisterAllocator();
+}
- PM.add(Printer);
+// Modify the optimized compilation path to bypass optimized register alloction.
+void PTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
+ addFastRegAlloc(RegAllocPass);
+}
- PM.add(createGCInfoDeleter());
+bool PTXPassConfig::addPostRegAlloc() {
+ // PTXMFInfoExtract must after register allocation!
+ //PM.add(createPTXMFInfoExtract(getPTXTargetMachine()));
return false;
}
-bool PTXTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
- bool DisableVerify,
- MCContext *&OutContext) {
- // Add standard LLVM codegen passes.
- // This is derived from LLVMTargetMachine::addCommonCodeGenPasses, with some
- // modifications for the PTX target.
-
- // Standard LLVM-Level Passes.
-
- // Basic AliasAnalysis support.
- // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
- // BasicAliasAnalysis wins if they disagree. This is intended to help
- // support "obvious" type-punning idioms.
- PM.add(createTypeBasedAliasAnalysisPass());
- PM.add(createBasicAliasAnalysisPass());
-
- // Before running any passes, run the verifier to determine if the input
- // coming from the front-end and/or optimizer is valid.
- if (!DisableVerify)
- PM.add(createVerifierPass());
-
- // Run loop strength reduction before anything else.
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createLoopStrengthReducePass(getTargetLowering()));
- //PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
- }
-
- PM.add(createGCLoweringPass());
-
- // Make sure that no unreachable blocks are instruction selected.
- PM.add(createUnreachableBlockEliminationPass());
-
- PM.add(createLowerInvokePass(getTargetLowering()));
- // The lower invoke pass may create unreachable code. Remove it.
- PM.add(createUnreachableBlockEliminationPass());
-
- if (OptLevel != CodeGenOpt::None)
- PM.add(createCodeGenPreparePass(getTargetLowering()));
+/// Add passes that optimize machine instructions after register allocation.
+void PTXPassConfig::addMachineLateOptimization() {
+ if (addPass(BranchFolderPassID) != &NoPassID)
+ printAndVerify("After BranchFolding");
- PM.add(createStackProtectorPass(getTargetLowering()));
-
- addPreISel(PM, OptLevel);
-
- //PM.add(createPrintFunctionPass("\n\n"
- // "*** Final LLVM Code input to ISel ***\n",
- // &dbgs()));
-
- // All passes which modify the LLVM IR are now complete; run the verifier
- // to ensure that the IR is valid.
- if (!DisableVerify)
- PM.add(createVerifierPass());
-
- // Standard Lower-Level Passes.
-
- // Install a MachineModuleInfo class, which is an immutable pass that holds
- // all the per-module stuff we're generating, including MCContext.
- MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo(),
- *getRegisterInfo(),
- &getTargetLowering()->getObjFileLowering());
- PM.add(MMI);
- OutContext = &MMI->getContext(); // Return the MCContext specifically by-ref.
-
- // Set up a MachineFunction for the rest of CodeGen to work on.
- PM.add(new MachineFunctionAnalysis(*this, OptLevel));
-
- // Ask the target for an isel.
- if (addInstSelector(PM, OptLevel))
- return true;
-
- // Print the instruction selected machine code...
- printAndVerify(PM, "After Instruction Selection");
-
- // Expand pseudo-instructions emitted by ISel.
- PM.add(createExpandISelPseudosPass());
-
- // Pre-ra tail duplication.
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createTailDuplicatePass(true));
- printAndVerify(PM, "After Pre-RegAlloc TailDuplicate");
- }
-
- // Optimize PHIs before DCE: removing dead PHI cycles may make more
- // instructions dead.
- if (OptLevel != CodeGenOpt::None)
- PM.add(createOptimizePHIsPass());
-
- // If the target requests it, assign local variables to stack slots relative
- // to one another and simplify frame index references where possible.
- PM.add(createLocalStackSlotAllocationPass());
-
- if (OptLevel != CodeGenOpt::None) {
- // With optimization, dead code should already be eliminated. However
- // there is one known exception: lowered code for arguments that are only
- // used by tail calls, where the tail calls reuse the incoming stack
- // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass");
-
- PM.add(createMachineLICMPass());
- PM.add(createMachineCSEPass());
- PM.add(createMachineSinkingPass());
- printAndVerify(PM, "After Machine LICM, CSE and Sinking passes");
-
- PM.add(createPeepholeOptimizerPass());
- printAndVerify(PM, "After codegen peephole optimization pass");
- }
-
- // Run pre-ra passes.
- if (addPreRegAlloc(PM, OptLevel))
- printAndVerify(PM, "After PreRegAlloc passes");
-
- // Perform register allocation.
- PM.add(createPTXRegisterAllocator());
- printAndVerify(PM, "After Register Allocation");
-
- // Perform stack slot coloring and post-ra machine LICM.
- if (OptLevel != CodeGenOpt::None) {
- // FIXME: Re-enable coloring with register when it's capable of adding
- // kill markers.
- PM.add(createStackSlotColoringPass(false));
-
- // FIXME: Post-RA LICM has asserts that fire on virtual registers.
- // Run post-ra machine LICM to hoist reloads / remats.
- //if (!DisablePostRAMachineLICM)
- // PM.add(createMachineLICMPass(false));
-
- printAndVerify(PM, "After StackSlotColoring and postra Machine LICM");
- }
-
- // Run post-ra passes.
- if (addPostRegAlloc(PM, OptLevel))
- printAndVerify(PM, "After PostRegAlloc passes");
-
- PM.add(createExpandPostRAPseudosPass());
- printAndVerify(PM, "After ExpandPostRAPseudos");
-
- // Insert prolog/epilog code. Eliminate abstract frame index references...
- PM.add(createPrologEpilogCodeInserter());
- printAndVerify(PM, "After PrologEpilogCodeInserter");
-
- // Run pre-sched2 passes.
- if (addPreSched2(PM, OptLevel))
- printAndVerify(PM, "After PreSched2 passes");
-
- // Second pass scheduler.
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createPostRAScheduler(OptLevel));
- printAndVerify(PM, "After PostRAScheduler");
- }
-
- // Branch folding must be run after regalloc and prolog/epilog insertion.
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createBranchFoldingPass(getEnableTailMergeDefault()));
- printNoVerify(PM, "After BranchFolding");
- }
-
- // Tail duplication.
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createTailDuplicatePass(false));
- printNoVerify(PM, "After TailDuplicate");
- }
-
- PM.add(createGCMachineCodeAnalysisPass());
-
- //if (PrintGCInfo)
- // PM.add(createGCInfoPrinter(dbgs()));
-
- if (OptLevel != CodeGenOpt::None) {
- PM.add(createCodePlacementOptPass());
- printNoVerify(PM, "After CodePlacementOpt");
- }
-
- if (addPreEmitPass(PM, OptLevel))
- printNoVerify(PM, "After PreEmit passes");
-
- PM.add(createPTXMFInfoExtract(*this, OptLevel));
- PM.add(createPTXFPRoundingModePass(*this, OptLevel));
+ if (addPass(TailDuplicateID) != &NoPassID)
+ printAndVerify("After TailDuplicate");
+}
- return false;
+bool PTXPassConfig::addPreEmitPass() {
+ PM.add(createPTXMFInfoExtract(getPTXTargetMachine(), getOptLevel()));
+ PM.add(createPTXFPRoundingModePass(getPTXTargetMachine(), getOptLevel()));
+ return true;
}
diff --git a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
index 5b7c82b..278d155 100644
--- a/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
+++ b/contrib/llvm/lib/Target/PTX/PTXTargetMachine.h
@@ -35,8 +35,9 @@ class PTXTargetMachine : public LLVMTargetMachine {
public:
PTXTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64Bit);
virtual const TargetData *getTargetData() const { return &DataLayout; }
@@ -58,22 +59,9 @@ class PTXTargetMachine : public LLVMTargetMachine {
virtual const PTXSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual bool addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
- virtual bool addPostRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel);
-
- // We override this method to supply our own set of codegen passes.
- virtual bool addPassesToEmitFile(PassManagerBase &,
- formatted_raw_ostream &,
- CodeGenFileType,
- CodeGenOpt::Level,
- bool = true);
-
// Emission of machine code through JITCodeEmitter is not supported.
virtual bool addPassesToEmitMachineCode(PassManagerBase &,
JITCodeEmitter &,
- CodeGenOpt::Level,
bool = true) {
return true;
}
@@ -82,32 +70,33 @@ class PTXTargetMachine : public LLVMTargetMachine {
virtual bool addPassesToEmitMC(PassManagerBase &,
MCContext *&,
raw_ostream &,
- CodeGenOpt::Level,
bool = true) {
return true;
}
- private:
-
- bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
- bool DisableVerify, MCContext *&OutCtx);
+ // Pass Pipeline Configuration
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
}; // class PTXTargetMachine
class PTX32TargetMachine : public PTXTargetMachine {
+ virtual void anchor();
public:
PTX32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
}; // class PTX32TargetMachine
class PTX64TargetMachine : public PTXTargetMachine {
+ virtual void anchor();
public:
PTX64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
}; // class PTX32TargetMachine
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
index b6a0835..61d23ce 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
@@ -17,16 +17,12 @@
#include "MCTargetDesc/PPCPredicates.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-#define GET_INSTRUCTION_NAME
#include "PPCGenAsmWriter.inc"
-StringRef PPCInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
void PPCInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
OS << getRegisterName(RegNo);
}
@@ -94,7 +90,6 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo,
unsigned Code = MI->getOperand(OpNo).getImm();
if (StringRef(Modifier) == "cc") {
switch ((PPC::Predicate)Code) {
- default: assert(0 && "Invalid predicate");
case PPC::PRED_ALWAYS: return; // Don't print anything for always.
case PPC::PRED_LT: O << "lt"; return;
case PPC::PRED_LE: O << "le"; return;
@@ -175,7 +170,7 @@ void PPCInstPrinter::printcrbitm(const MCInst *MI, unsigned OpNo,
unsigned CCReg = MI->getOperand(OpNo).getReg();
unsigned RegNo;
switch (CCReg) {
- default: assert(0 && "Unknown CR register");
+ default: llvm_unreachable("Unknown CR register");
case PPC::CR0: RegNo = 0; break;
case PPC::CR1: RegNo = 1; break;
case PPC::CR2: RegNo = 2; break;
diff --git a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
index 4ed4b76..73fd534 100644
--- a/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
+++ b/contrib/llvm/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- PPCInstPrinter.h - Convert PPC MCInst to assembly syntax ----------===//
+//===- PPCInstPrinter.h - Convert PPC MCInst to assembly syntax -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,8 +24,9 @@ class PPCInstPrinter : public MCInstPrinter {
// 0 -> AIX, 1 -> Darwin.
unsigned SyntaxVariant;
public:
- PPCInstPrinter(const MCAsmInfo &MAI, unsigned syntaxVariant)
- : MCInstPrinter(MAI), SyntaxVariant(syntaxVariant) {}
+ PPCInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI, unsigned syntaxVariant)
+ : MCInstPrinter(MAI, MII, MRI), SyntaxVariant(syntaxVariant) {}
bool isDarwinSyntax() const {
return SyntaxVariant == 1;
@@ -33,9 +34,6 @@ public:
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
-
- static const char *getInstructionName(unsigned Opcode);
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index 9f2fd6d..48de583 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -7,10 +7,11 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCAsmBackend.h"
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCFixupKinds.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCMachObjectWriter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCObjectWriter.h"
@@ -57,13 +58,6 @@ public:
MCValue Target, uint64_t &FixedValue) {}
};
-class PPCELFObjectWriter : public MCELFObjectTargetWriter {
-public:
- PPCELFObjectWriter(bool Is64Bit, Triple::OSType OSType, uint16_t EMachine,
- bool HasRelocationAddend, bool isLittleEndian)
- : MCELFObjectTargetWriter(Is64Bit, OSType, EMachine, HasRelocationAddend) {}
-};
-
class PPCAsmBackend : public MCAsmBackend {
const Target &TheTarget;
public:
@@ -80,33 +74,42 @@ public:
{ "fixup_ppc_ha16", 16, 16, 0 },
{ "fixup_ppc_lo14", 16, 14, 0 }
};
-
+
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
-
+
assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
"Invalid kind!");
return Infos[Kind - FirstTargetFixupKind];
}
-
- bool MayNeedRelaxation(const MCInst &Inst) const {
+
+ bool mayNeedRelaxation(const MCInst &Inst) const {
// FIXME.
return false;
}
-
- void RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const {
// FIXME.
- assert(0 && "RelaxInstruction() unimplemented");
+ llvm_unreachable("relaxInstruction() unimplemented");
}
-
- bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+
+
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ // FIXME.
+ llvm_unreachable("relaxInstruction() unimplemented");
+ }
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const {
// FIXME: Zero fill for now. That's not right, but at least will get the
// section size right.
for (uint64_t i = 0; i != Count; ++i)
OW->Write8(0);
return true;
- }
-
+ }
+
unsigned getPointerSize() const {
StringRef Name = TheTarget.getName();
if (Name == "ppc64") return 8;
@@ -122,12 +125,12 @@ namespace {
class DarwinPPCAsmBackend : public PPCAsmBackend {
public:
DarwinPPCAsmBackend(const Target &T) : PPCAsmBackend(T) { }
-
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
- assert(0 && "UNIMP");
+ llvm_unreachable("UNIMP");
}
-
+
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
bool is64 = getPointerSize() == 8;
return createMachObjectWriter(new PPCMachObjectWriter(
@@ -137,19 +140,19 @@ namespace {
object::mach::CSPPC_ALL),
OS, /*IsLittleEndian=*/false);
}
-
+
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
return false;
}
};
class ELFPPCAsmBackend : public PPCAsmBackend {
- Triple::OSType OSType;
+ uint8_t OSABI;
public:
- ELFPPCAsmBackend(const Target &T, Triple::OSType OSType) :
- PPCAsmBackend(T), OSType(OSType) { }
-
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ ELFPPCAsmBackend(const Target &T, uint8_t OSABI) :
+ PPCAsmBackend(T), OSABI(OSABI) { }
+
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
Value = adjustFixupValue(Fixup.getKind(), Value);
if (!Value) return; // Doesn't change encoding.
@@ -162,17 +165,12 @@ namespace {
for (unsigned i = 0; i != 4; ++i)
Data[Offset + i] |= uint8_t((Value >> ((4 - i - 1)*8)) & 0xff);
}
-
+
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
bool is64 = getPointerSize() == 8;
- return createELFObjectWriter(new PPCELFObjectWriter(
- /*Is64Bit=*/is64,
- OSType,
- is64 ? ELF::EM_PPC64 : ELF::EM_PPC,
- /*addend*/ true, /*isLittleEndian*/ false),
- OS, /*IsLittleEndian=*/false);
+ return createPPCELFObjectWriter(OS, is64, OSABI);
}
-
+
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
return false;
}
@@ -187,5 +185,6 @@ MCAsmBackend *llvm::createPPCAsmBackend(const Target &T, StringRef TT) {
if (Triple(TT).isOSDarwin())
return new DarwinPPCAsmBackend(T);
- return new ELFPPCAsmBackend(T, Triple(TT).getOS());
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ return new ELFPPCAsmBackend(T, OSABI);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h
index 369bbdc..9c975c0 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCBaseInfo.h
@@ -1,4 +1,4 @@
-//===-- PPCBaseInfo.h - Top level definitions for PPC -------- --*- C++ -*-===//
+//===-- PPCBaseInfo.h - Top level definitions for PPC -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
new file mode 100644
index 0000000..a197981
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -0,0 +1,103 @@
+//===-- PPCELFObjectWriter.cpp - PPC ELF Writer ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/PPCFixupKinds.h"
+#include "MCTargetDesc/PPCMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+ class PPCELFObjectWriter : public MCELFObjectTargetWriter {
+ public:
+ PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI);
+
+ virtual ~PPCELFObjectWriter();
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const;
+ virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset);
+ };
+}
+
+PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI)
+ : MCELFObjectTargetWriter(Is64Bit, OSABI,
+ Is64Bit ? ELF::EM_PPC64 : ELF::EM_PPC,
+ /*HasRelocationAddend*/ true) {}
+
+PPCELFObjectWriter::~PPCELFObjectWriter() {
+}
+
+unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ // determine the type of the relocation
+ unsigned Type;
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case PPC::fixup_ppc_br24:
+ Type = ELF::R_PPC_REL24;
+ break;
+ case FK_PCRel_4:
+ Type = ELF::R_PPC_REL32;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case PPC::fixup_ppc_br24:
+ Type = ELF::R_PPC_ADDR24;
+ break;
+ case PPC::fixup_ppc_brcond14:
+ Type = ELF::R_PPC_ADDR14_BRTAKEN; // XXX: or BRNTAKEN?_
+ break;
+ case PPC::fixup_ppc_ha16:
+ Type = ELF::R_PPC_ADDR16_HA;
+ break;
+ case PPC::fixup_ppc_lo16:
+ Type = ELF::R_PPC_ADDR16_LO;
+ break;
+ case PPC::fixup_ppc_lo14:
+ Type = ELF::R_PPC_ADDR14;
+ break;
+ case FK_Data_4:
+ Type = ELF::R_PPC_ADDR32;
+ break;
+ case FK_Data_2:
+ Type = ELF::R_PPC_ADDR16;
+ break;
+ }
+ }
+ return Type;
+}
+
+void PPCELFObjectWriter::
+adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) {
+ switch ((unsigned)Fixup.getKind()) {
+ case PPC::fixup_ppc_ha16:
+ case PPC::fixup_ppc_lo16:
+ RelocOffset += 2;
+ break;
+ default:
+ break;
+ }
+}
+
+MCObjectWriter *llvm::createPPCELFObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new PPCELFObjectWriter(Is64Bit, OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/false);
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
index e9424d8..245b457 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp
@@ -1,4 +1,4 @@
-//===-- PPCMCAsmInfo.cpp - PPC asm properties -------------------*- C++ -*-===//
+//===-- PPCMCAsmInfo.cpp - PPC asm properties -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,8 @@
#include "PPCMCAsmInfo.h"
using namespace llvm;
+void PPCMCAsmInfoDarwin::anchor() { }
+
PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit) {
if (is64Bit)
PointerSize = 8;
@@ -30,6 +32,8 @@ PPCMCAsmInfoDarwin::PPCMCAsmInfoDarwin(bool is64Bit) {
SupportsDebugInformation= true; // Debug information.
}
+void PPCLinuxMCAsmInfo::anchor() { }
+
PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
if (is64Bit)
PointerSize = 8;
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
index 96ae6fb..7b4ed9f 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- PPCMCAsmInfo.h - PPC asm properties -----------------*- C++ -*--====//
+//===-- PPCMCAsmInfo.h - PPC asm properties --------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,11 +18,15 @@
namespace llvm {
- struct PPCMCAsmInfoDarwin : public MCAsmInfoDarwin {
+ class PPCMCAsmInfoDarwin : public MCAsmInfoDarwin {
+ virtual void anchor();
+ public:
explicit PPCMCAsmInfoDarwin(bool is64Bit);
};
- struct PPCLinuxMCAsmInfo : public MCAsmInfo {
+ class PPCLinuxMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit PPCLinuxMCAsmInfo(bool is64Bit);
};
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 262f97c3..5a6827f 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -57,7 +57,7 @@ public:
// getBinaryCodeForInstr - TableGen'erated function for getting the
// binary encoding for an instruction.
- unsigned getBinaryCodeForInstr(const MCInst &MI,
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
SmallVectorImpl<MCFixup> &Fixups) const;
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const {
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index d5c8a9e..6568e82 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- PPCMCTargetDesc.cpp - PowerPC Target Descriptions -------*- C++ -*-===//
+//===-- PPCMCTargetDesc.cpp - PowerPC Target Descriptions -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,6 +20,7 @@
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -76,7 +77,8 @@ static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createPPCMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
if (RM == Reloc::Default) {
@@ -86,7 +88,7 @@ static MCCodeGenInfo *createPPCMCCodeGenInfo(StringRef TT, Reloc::Model RM,
else
RM = Reloc::Static;
}
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
@@ -106,8 +108,10 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
static MCInstPrinter *createPPCMCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
- return new PPCInstPrinter(MAI, SyntaxVariant);
+ return new PPCInstPrinter(MAI, MII, MRI, SyntaxVariant);
}
extern "C" void LLVMInitializePowerPCTargetMC() {
diff --git a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index e5bf2a9..b7fa064 100644
--- a/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -14,14 +14,18 @@
#ifndef PPCMCTARGETDESC_H
#define PPCMCTARGETDESC_H
+#include "llvm/Support/DataTypes.h"
+
namespace llvm {
class MCAsmBackend;
class MCCodeEmitter;
class MCContext;
class MCInstrInfo;
+class MCObjectWriter;
class MCSubtargetInfo;
class Target;
class StringRef;
+class raw_ostream;
extern Target ThePPC32Target;
extern Target ThePPC64Target;
@@ -31,7 +35,11 @@ MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createPPCAsmBackend(const Target &T, StringRef TT);
-
+
+/// createPPCELFObjectWriter - Construct an PPC ELF object writer.
+MCObjectWriter *createPPCELFObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint8_t OSABI);
} // End llvm namespace
// Defines symbolic names for PowerPC registers. This defines a mapping from
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.h b/contrib/llvm/lib/Target/PowerPC/PPC.h
index 5dc1863..24a7178 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.h
@@ -25,14 +25,11 @@
namespace llvm {
class PPCTargetMachine;
class FunctionPass;
- class formatted_raw_ostream;
class JITCodeEmitter;
- class Target;
class MachineInstr;
class AsmPrinter;
class MCInst;
- class TargetMachine;
-
+
FunctionPass *createPPCBranchSelectionPass();
FunctionPass *createPPCISelDag(PPCTargetMachine &TM);
FunctionPass *createPPCJITCodeEmitterPass(PPCTargetMachine &TM,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPC.td b/contrib/llvm/lib/Target/PowerPC/PPC.td
index 2d5d302..c554d39 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPC.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPC.td
@@ -1,10 +1,10 @@
-//===- PPC.td - Describe the PowerPC Target Machine --------*- tablegen -*-===//
-//
+//===-- PPC.td - Describe the PowerPC Target Machine -------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This is the top level entry point for the PowerPC target.
@@ -23,6 +23,7 @@ include "llvm/Target/Target.td"
// CPU Directives //
//===----------------------------------------------------------------------===//
+def Directive440 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_440", "">;
def Directive601 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_601", "">;
def Directive602 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_602", "">;
def Directive603 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_603", "">;
@@ -33,6 +34,7 @@ def Directive750 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_750", "">;
def Directive970 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_970", "">;
def Directive32 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_32", "">;
def Directive64 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_64", "">;
+def DirectiveA2 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_A2", "">;
def Feature64Bit : SubtargetFeature<"64bit","Has64BitSupport", "true",
"Enable 64-bit instructions">;
@@ -46,6 +48,8 @@ def FeatureFSqrt : SubtargetFeature<"fsqrt","HasFSQRT", "true",
"Enable the fsqrt instruction">;
def FeatureSTFIWX : SubtargetFeature<"stfiwx","HasSTFIWX", "true",
"Enable the stfiwx instruction">;
+def FeatureBookE : SubtargetFeature<"booke", "IsBookE", "true",
+ "Enable Book E instructions">;
//===----------------------------------------------------------------------===//
// Register File Description
@@ -60,6 +64,8 @@ include "PPCInstrInfo.td"
//
def : Processor<"generic", G3Itineraries, [Directive32]>;
+def : Processor<"440", PPC440Itineraries, [Directive440, FeatureBookE]>;
+def : Processor<"450", PPC440Itineraries, [Directive440, FeatureBookE]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
def : Processor<"602", G3Itineraries, [Directive602]>;
def : Processor<"603", G3Itineraries, [Directive603]>;
@@ -82,6 +88,10 @@ def : Processor<"g5", G5Itineraries,
[Directive970, FeatureAltivec,
FeatureGPUL, FeatureFSqrt, FeatureSTFIWX,
Feature64Bit /*, Feature64BitRegs */]>;
+def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE,
+ FeatureFSqrt, FeatureSTFIWX,
+ Feature64Bit
+ /*, Feature64BitRegs */]>;
def : Processor<"ppc", G3Itineraries, [Directive32]>;
def : Processor<"ppc64", G5Itineraries,
[Directive64, FeatureAltivec,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 9528459..fb7aa71 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -1,4 +1,4 @@
-//===-- PPCAsmPrinter.cpp - Print machine instrs to PowerPC assembly --------=//
+//===-- PPCAsmPrinter.cpp - Print machine instrs to PowerPC assembly ------===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,6 +20,7 @@
#include "PPC.h"
#include "PPCTargetMachine.h"
#include "PPCSubtarget.h"
+#include "InstPrinter/PPCInstPrinter.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
@@ -39,6 +40,7 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSectionELF.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -49,10 +51,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ELF.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/SmallString.h"
-#include "InstPrinter/PPCInstPrinter.h"
using namespace llvm;
namespace {
@@ -366,14 +367,21 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
case PPC::MFCRpseud:
+ case PPC::MFCR8pseud:
// Transform: %R3 = MFCRpseud %CR7
// Into: %R3 = MFCR ;; cr7
OutStreamer.AddComment(PPCInstPrinter::
getRegisterName(MI->getOperand(1).getReg()));
- TmpInst.setOpcode(PPC::MFCR);
+ TmpInst.setOpcode(Subtarget.isPPC64() ? PPC::MFCR8 : PPC::MFCR);
TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
OutStreamer.EmitInstruction(TmpInst);
return;
+ case PPC::SYNC:
+ // In Book E sync is called msync, handle this special case here...
+ if (Subtarget.isBookE()) {
+ OutStreamer.EmitRawText(StringRef("\tmsync"));
+ return;
+ }
}
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
@@ -385,14 +393,26 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
return AsmPrinter::EmitFunctionEntryLabel();
// Emit an official procedure descriptor.
- // FIXME 64-bit SVR4: Use MCSection here!
- OutStreamer.EmitRawText(StringRef("\t.section\t\".opd\",\"aw\""));
- OutStreamer.EmitRawText(StringRef("\t.align 3"));
+ const MCSection *Current = OutStreamer.getCurrentSection();
+ const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".opd",
+ ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
+ SectionKind::getReadOnly());
+ OutStreamer.SwitchSection(Section);
OutStreamer.EmitLabel(CurrentFnSym);
- OutStreamer.EmitRawText("\t.quad .L." + Twine(CurrentFnSym->getName()) +
- ",.TOC.@tocbase");
- OutStreamer.EmitRawText(StringRef("\t.previous"));
- OutStreamer.EmitRawText(".L." + Twine(CurrentFnSym->getName()) + ":");
+ OutStreamer.EmitValueToAlignment(8);
+ MCSymbol *Symbol1 =
+ OutContext.GetOrCreateSymbol(".L." + Twine(CurrentFnSym->getName()));
+ MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC.@tocbase"));
+ OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol1, OutContext),
+ Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/);
+ OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2, OutContext),
+ Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/);
+ OutStreamer.SwitchSection(Current);
+
+ MCSymbol *RealFnSym = OutContext.GetOrCreateSymbol(
+ ".L." + Twine(CurrentFnSym->getName()));
+ OutStreamer.EmitLabel(RealFnSym);
+ CurrentFnSymForSize = RealFnSym;
}
@@ -402,8 +422,10 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
bool isPPC64 = TD->getPointerSizeInBits() == 64;
if (isPPC64 && !TOC.empty()) {
- // FIXME 64-bit SVR4: Use MCSection here?
- OutStreamer.EmitRawText(StringRef("\t.section\t\".toc\",\"aw\""));
+ const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc",
+ ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC,
+ SectionKind::getReadOnly());
+ OutStreamer.SwitchSection(Section);
// FIXME: This is nondeterminstic!
for (DenseMap<MCSymbol*, MCSymbol*>::iterator I = TOC.begin(),
@@ -421,12 +443,14 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
static const char *const CPUDirectives[] = {
"",
"ppc",
+ "ppc440",
"ppc601",
"ppc602",
"ppc603",
"ppc7400",
"ppc750",
"ppc970",
+ "ppcA2",
"ppc64"
};
@@ -435,7 +459,7 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
Directive = PPC::DIR_970;
if (Subtarget.hasAltivec() && Directive < PPC::DIR_7400)
Directive = PPC::DIR_7400;
- if (Subtarget.isPPC64() && Directive < PPC::DIR_970)
+ if (Subtarget.isPPC64() && Directive < PPC::DIR_64)
Directive = PPC::DIR_64;
assert(Directive <= PPC::DIR_64 && "Directive out of range.");
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index 475edf3..5f775e1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -1,4 +1,4 @@
-//===-- PPCBranchSelector.cpp - Emit long conditional branches-----*- C++ -*-=//
+//===-- PPCBranchSelector.cpp - Emit long conditional branches ------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
index 441db94..9883c2e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCallingConv.td
@@ -1,10 +1,10 @@
//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This describes the calling conventions for the PowerPC 32- and 64-bit
@@ -130,3 +130,34 @@ def CC_PPC_SVR4_ByVal : CallingConv<[
CCCustom<"CC_PPC_SVR4_Custom_Dummy">
]>;
+def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
+ R21, R22, R23, R24, R25, R26, R27, R28,
+ R29, R30, R31, F14, F15, F16, F17, F18,
+ F19, F20, F21, F22, F23, F24, F25, F26,
+ F27, F28, F29, F30, F31, CR2, CR3, CR4,
+ V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31)>;
+
+def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, VRSAVE,
+ R21, R22, R23, R24, R25, R26, R27, R28,
+ R29, R30, R31, F14, F15, F16, F17, F18,
+ F19, F20, F21, F22, F23, F24, F25, F26,
+ F27, F28, F29, F30, F31, CR2, CR3, CR4,
+ V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31)>;
+
+def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20,
+ X21, X22, X23, X24, X25, X26, X27, X28,
+ X29, X30, X31, F14, F15, F16, F17, F18,
+ F19, F20, F21, F22, F23, F24, F25, F26,
+ F27, F28, F29, F30, F31, CR2, CR3, CR4,
+ V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31)>;
+
+def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, VRSAVE,
+ X21, X22, X23, X24, X25, X26, X27, X28,
+ X29, X30, X31, F14, F15, F16, F17, F18,
+ F19, F20, F21, F22, F23, F24, F25, F26,
+ F27, F28, F29, F30, F31, CR2, CR3, CR4,
+ V20, V21, V22, V23, V24, V25, V26, V27,
+ V28, V29, V30, V31)>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
index 4a1f182..252a2d1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- PPCCodeEmitter.cpp - JIT Code Emitter for PowerPC32 -------*- C++ -*-=//
+//===-- PPCCodeEmitter.cpp - JIT Code Emitter for PowerPC -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -50,7 +50,7 @@ namespace {
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
/// machine instructions.
- unsigned getBinaryCodeForInstr(const MachineInstr &MI) const;
+ uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const;
MachineRelocation GetRelocation(const MachineOperand &MO,
@@ -138,7 +138,8 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
unsigned PPCCodeEmitter::get_crbitm_encoding(const MachineInstr &MI,
unsigned OpNo) const {
const MachineOperand &MO = MI.getOperand(OpNo);
- assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) &&
+ assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MTCRF8 ||
+ MI.getOpcode() == PPC::MFOCRF) &&
(MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7));
return 0x80 >> getPPCRegisterNumbering(MO.getReg());
}
@@ -248,7 +249,8 @@ unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
if (MO.isReg()) {
// MTCRF/MFOCRF should go through get_crbitm_encoding for the CR operand.
// The GPR operand should come through here though.
- assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MFOCRF) ||
+ assert((MI.getOpcode() != PPC::MTCRF && MI.getOpcode() != PPC::MTCRF8 &&
+ MI.getOpcode() != PPC::MFOCRF) ||
MO.getReg() < PPC::CR0 || MO.getReg() > PPC::CR7);
return getPPCRegisterNumbering(MO.getReg());
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 0b85fea..b77a80b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1,4 +1,4 @@
-//=====- PPCFrameLowering.cpp - PPC Frame Information -----------*- C++ -*-===//
+//===-- PPCFrameLowering.cpp - PPC Frame Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -38,7 +38,7 @@ using namespace llvm;
/// VRRegNo - Map from a numbered VR register to its enum value.
///
-static const unsigned short VRRegNo[] = {
+static const uint16_t VRRegNo[] = {
PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 , PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
PPC::V8 , PPC::V9 , PPC::V10, PPC::V11, PPC::V12, PPC::V13, PPC::V14, PPC::V15,
PPC::V16, PPC::V17, PPC::V18, PPC::V19, PPC::V20, PPC::V21, PPC::V22, PPC::V23,
@@ -64,7 +64,7 @@ static void RemoveVRSaveCode(MachineInstr *MI) {
// epilog blocks.
for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I) {
// If last instruction is a return instruction, add an epilogue
- if (!I->empty() && I->back().getDesc().isReturn()) {
+ if (!I->empty() && I->back().isReturn()) {
bool FoundIt = false;
for (MBBI = I->end(); MBBI != I->begin(); ) {
--MBBI;
@@ -244,8 +244,10 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
if (MF.getFunction()->hasFnAttr(Attribute::Naked))
return false;
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() ||
- (GuaranteedTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects() ||
+ (MF.getTarget().Options.GuaranteedTailCallOpt &&
+ MF.getInfo<PPCFunctionInfo>()->hasFastCall());
}
@@ -365,8 +367,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::R0, RegState::Kill)
.addImm(NegFrameSize);
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
- .addReg(PPC::R1)
- .addReg(PPC::R1)
+ .addReg(PPC::R1, RegState::Kill)
+ .addReg(PPC::R1, RegState::Define)
.addReg(PPC::R0);
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
@@ -380,8 +382,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::R0, RegState::Kill)
.addImm(NegFrameSize & 0xFFFF);
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWUX))
- .addReg(PPC::R1)
- .addReg(PPC::R1)
+ .addReg(PPC::R1, RegState::Kill)
+ .addReg(PPC::R1, RegState::Define)
.addReg(PPC::R0);
}
} else { // PPC64.
@@ -398,8 +400,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::X0)
.addImm(NegFrameSize);
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
- .addReg(PPC::X1)
- .addReg(PPC::X1)
+ .addReg(PPC::X1, RegState::Kill)
+ .addReg(PPC::X1, RegState::Define)
.addReg(PPC::X0);
} else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
@@ -413,8 +415,8 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::X0, RegState::Kill)
.addImm(NegFrameSize & 0xFFFF);
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDUX))
- .addReg(PPC::X1)
- .addReg(PPC::X1)
+ .addReg(PPC::X1, RegState::Kill)
+ .addReg(PPC::X1, RegState::Define)
.addReg(PPC::X0);
}
}
@@ -655,7 +657,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
// Callee pop calling convention. Pop parameter/linkage area. Used for tail
// call optimization
- if (GuaranteedTailCallOpt && RetOpcode == PPC::BLR &&
+ if (MF.getTarget().Options.GuaranteedTailCallOpt && RetOpcode == PPC::BLR &&
MF.getFunction()->getCallingConv() == CallingConv::Fast) {
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
unsigned CallerAllocatedAmt = FI->getMinReservedArea();
@@ -758,7 +760,8 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Reserve stack space to move the linkage area to in case of a tail call.
int TCSPDelta = 0;
- if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
+ if (MF.getTarget().Options.GuaranteedTailCallOpt &&
+ (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
MFI->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
}
@@ -769,7 +772,7 @@ PPCFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// FIXME: doesn't detect whether or not we need to spill vXX, which requires
// r0 for now.
- if (RegInfo->requiresRegisterScavenging(MF)) // FIXME (64-bit): Enable.
+ if (RegInfo->requiresRegisterScavenging(MF))
if (needsFP(MF) || spillsCR(MF)) {
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
@@ -863,7 +866,8 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF)
// Take into account stack space reserved for tail calls.
int TCSPDelta = 0;
- if (GuaranteedTailCallOpt && (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
+ if (MF.getTarget().Options.GuaranteedTailCallOpt &&
+ (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
LowerBound = TCSPDelta;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
index 20faa71..d708541 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.h
@@ -1,4 +1,4 @@
-//==-- PPCFrameLowering.h - Define frame lowering for PowerPC ----*- C++ -*-==//
+//===-- PPCFrameLowering.h - Define frame lowering for PowerPC --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index cddc9d8..6ed1fb9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -22,6 +22,30 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
+// PowerPC Scoreboard Hazard Recognizer
+void PPCScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
+ const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+ if (!MCID)
+ // This is a PPC pseudo-instruction.
+ return;
+
+ ScoreboardHazardRecognizer::EmitInstruction(SU);
+}
+
+ScheduleHazardRecognizer::HazardType
+PPCScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
+ return ScoreboardHazardRecognizer::getHazardType(SU, Stalls);
+}
+
+void PPCScoreboardHazardRecognizer::AdvanceCycle() {
+ ScoreboardHazardRecognizer::AdvanceCycle();
+}
+
+void PPCScoreboardHazardRecognizer::Reset() {
+ ScoreboardHazardRecognizer::Reset();
+}
+
+//===----------------------------------------------------------------------===//
// PowerPC 970 Hazard Recognizer
//
// This models the dispatch group formation of the PPC970 processor. Dispatch
@@ -67,12 +91,6 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,
bool &isCracked,
bool &isLoad, bool &isStore) {
- if ((int)Opcode >= 0) {
- isFirst = isSingle = isCracked = isLoad = isStore = false;
- return PPCII::PPC970_Pseudo;
- }
- Opcode = ~Opcode;
-
const MCInstrDesc &MCID = TII.get(Opcode);
isLoad = MCID.mayLoad();
@@ -89,29 +107,23 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
/// isLoadOfStoredAddress - If we have a load from the previously stored pointer
/// as indicated by StorePtr1/StorePtr2/StoreSize, return true.
bool PPCHazardRecognizer970::
-isLoadOfStoredAddress(unsigned LoadSize, SDValue Ptr1, SDValue Ptr2) const {
+isLoadOfStoredAddress(uint64_t LoadSize, int64_t LoadOffset,
+ const Value *LoadValue) const {
for (unsigned i = 0, e = NumStores; i != e; ++i) {
// Handle exact and commuted addresses.
- if (Ptr1 == StorePtr1[i] && Ptr2 == StorePtr2[i])
- return true;
- if (Ptr2 == StorePtr1[i] && Ptr1 == StorePtr2[i])
+ if (LoadValue == StoreValue[i] && LoadOffset == StoreOffset[i])
return true;
// Okay, we don't have an exact match, if this is an indexed offset, see if
// we have overlap (which happens during fp->int conversion for example).
- if (StorePtr2[i] == Ptr2) {
- if (ConstantSDNode *StoreOffset = dyn_cast<ConstantSDNode>(StorePtr1[i]))
- if (ConstantSDNode *LoadOffset = dyn_cast<ConstantSDNode>(Ptr1)) {
- // Okay the base pointers match, so we have [c1+r] vs [c2+r]. Check
- // to see if the load and store actually overlap.
- int StoreOffs = StoreOffset->getZExtValue();
- int LoadOffs = LoadOffset->getZExtValue();
- if (StoreOffs < LoadOffs) {
- if (int(StoreOffs+StoreSize[i]) > LoadOffs) return true;
- } else {
- if (int(LoadOffs+LoadSize) > StoreOffs) return true;
- }
- }
+ if (StoreValue[i] == LoadValue) {
+ // Okay the base pointers match, so we have [c1+r] vs [c2+r]. Check
+ // to see if the load and store actually overlap.
+ if (StoreOffset[i] < LoadOffset) {
+ if (int64_t(StoreOffset[i]+StoreSize[i]) > LoadOffset) return true;
+ } else {
+ if (int64_t(LoadOffset+LoadSize) > StoreOffset[i]) return true;
+ }
}
}
return false;
@@ -125,13 +137,17 @@ ScheduleHazardRecognizer::HazardType PPCHazardRecognizer970::
getHazardType(SUnit *SU, int Stalls) {
assert(Stalls == 0 && "PPC hazards don't support scoreboard lookahead");
- const SDNode *Node = SU->getNode()->getGluedMachineNode();
+ MachineInstr *MI = SU->getInstr();
+
+ if (MI->isDebugValue())
+ return NoHazard;
+
+ unsigned Opcode = MI->getOpcode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
PPCII::PPC970_Unit InstrType =
- GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
+ GetInstrType(Opcode, isFirst, isSingle, isCracked,
isLoad, isStore);
if (InstrType == PPCII::PPC970_Pseudo) return NoHazard;
- unsigned Opcode = Node->getMachineOpcode();
// We can only issue a PPC970_First/PPC970_Single instruction (such as
// crand/mtspr/etc) if this is the first cycle of the dispatch group.
@@ -168,55 +184,10 @@ getHazardType(SUnit *SU, int Stalls) {
// If this is a load following a store, make sure it's not to the same or
// overlapping address.
- if (isLoad && NumStores) {
- unsigned LoadSize;
- switch (Opcode) {
- default: llvm_unreachable("Unknown load!");
- case PPC::LBZ: case PPC::LBZU:
- case PPC::LBZX:
- case PPC::LBZ8: case PPC::LBZU8:
- case PPC::LBZX8:
- case PPC::LVEBX:
- LoadSize = 1;
- break;
- case PPC::LHA: case PPC::LHAU:
- case PPC::LHAX:
- case PPC::LHZ: case PPC::LHZU:
- case PPC::LHZX:
- case PPC::LVEHX:
- case PPC::LHBRX:
- case PPC::LHA8: case PPC::LHAU8:
- case PPC::LHAX8:
- case PPC::LHZ8: case PPC::LHZU8:
- case PPC::LHZX8:
- LoadSize = 2;
- break;
- case PPC::LFS: case PPC::LFSU:
- case PPC::LFSX:
- case PPC::LWZ: case PPC::LWZU:
- case PPC::LWZX:
- case PPC::LWA:
- case PPC::LWAX:
- case PPC::LVEWX:
- case PPC::LWBRX:
- case PPC::LWZ8:
- case PPC::LWZX8:
- LoadSize = 4;
- break;
- case PPC::LFD: case PPC::LFDU:
- case PPC::LFDX:
- case PPC::LD: case PPC::LDU:
- case PPC::LDX:
- LoadSize = 8;
- break;
- case PPC::LVX:
- case PPC::LVXL:
- LoadSize = 16;
- break;
- }
-
- if (isLoadOfStoredAddress(LoadSize,
- Node->getOperand(0), Node->getOperand(1)))
+ if (isLoad && NumStores && !MI->memoperands_empty()) {
+ MachineMemOperand *MO = *MI->memoperands_begin();
+ if (isLoadOfStoredAddress(MO->getSize(),
+ MO->getOffset(), MO->getValue()))
return NoopHazard;
}
@@ -224,66 +195,27 @@ getHazardType(SUnit *SU, int Stalls) {
}
void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) {
- const SDNode *Node = SU->getNode()->getGluedMachineNode();
+ MachineInstr *MI = SU->getInstr();
+
+ if (MI->isDebugValue())
+ return;
+
+ unsigned Opcode = MI->getOpcode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
PPCII::PPC970_Unit InstrType =
- GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
+ GetInstrType(Opcode, isFirst, isSingle, isCracked,
isLoad, isStore);
if (InstrType == PPCII::PPC970_Pseudo) return;
- unsigned Opcode = Node->getMachineOpcode();
// Update structural hazard information.
if (Opcode == PPC::MTCTR || Opcode == PPC::MTCTR8) HasCTRSet = true;
// Track the address stored to.
- if (isStore) {
- unsigned ThisStoreSize;
- switch (Opcode) {
- default: llvm_unreachable("Unknown store instruction!");
- case PPC::STB: case PPC::STB8:
- case PPC::STBU: case PPC::STBU8:
- case PPC::STBX: case PPC::STBX8:
- case PPC::STVEBX:
- ThisStoreSize = 1;
- break;
- case PPC::STH: case PPC::STH8:
- case PPC::STHU: case PPC::STHU8:
- case PPC::STHX: case PPC::STHX8:
- case PPC::STVEHX:
- case PPC::STHBRX:
- ThisStoreSize = 2;
- break;
- case PPC::STFS:
- case PPC::STFSU:
- case PPC::STFSX:
- case PPC::STWX: case PPC::STWX8:
- case PPC::STWUX:
- case PPC::STW: case PPC::STW8:
- case PPC::STWU:
- case PPC::STVEWX:
- case PPC::STFIWX:
- case PPC::STWBRX:
- ThisStoreSize = 4;
- break;
- case PPC::STD_32:
- case PPC::STDX_32:
- case PPC::STD:
- case PPC::STDU:
- case PPC::STFD:
- case PPC::STFDX:
- case PPC::STDX:
- case PPC::STDUX:
- ThisStoreSize = 8;
- break;
- case PPC::STVX:
- case PPC::STVXL:
- ThisStoreSize = 16;
- break;
- }
-
- StoreSize[NumStores] = ThisStoreSize;
- StorePtr1[NumStores] = Node->getOperand(1);
- StorePtr2[NumStores] = Node->getOperand(2);
+ if (isStore && NumStores < 4 && !MI->memoperands_empty()) {
+ MachineMemOperand *MO = *MI->memoperands_begin();
+ StoreSize[NumStores] = MO->getSize();
+ StoreOffset[NumStores] = MO->getOffset();
+ StoreValue[NumStores] = MO->getValue();
++NumStores;
}
@@ -306,3 +238,8 @@ void PPCHazardRecognizer970::AdvanceCycle() {
if (NumIssued == 5)
EndDispatchGroup();
}
+
+void PPCHazardRecognizer970::Reset() {
+ EndDispatchGroup();
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
index 2f81f0f..55b45d0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.h
@@ -14,12 +14,28 @@
#ifndef PPCHAZRECS_H
#define PPCHAZRECS_H
+#include "PPCInstrInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "PPCInstrInfo.h"
namespace llvm {
+/// PPCScoreboardHazardRecognizer - This class implements a scoreboard-based
+/// hazard recognizer for generic PPC processors.
+class PPCScoreboardHazardRecognizer : public ScoreboardHazardRecognizer {
+ const ScheduleDAG *DAG;
+public:
+ PPCScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+ const ScheduleDAG *DAG_) :
+ ScoreboardHazardRecognizer(ItinData, DAG_), DAG(DAG_) {}
+
+ virtual HazardType getHazardType(SUnit *SU, int Stalls);
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+ virtual void Reset();
+};
+
/// PPCHazardRecognizer970 - This class defines a finite state automata that
/// models the dispatch logic on the PowerPC 970 (aka G5) processor. This
/// promotes good dispatch group formation and implements noop insertion to
@@ -42,8 +58,9 @@ class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
//
// This is null if we haven't seen a store yet. We keep track of both
// operands of the store here, since we support [r+r] and [r+i] addressing.
- SDValue StorePtr1[4], StorePtr2[4];
- unsigned StoreSize[4];
+ const Value *StoreValue[4];
+ int64_t StoreOffset[4];
+ uint64_t StoreSize[4];
unsigned NumStores;
public:
@@ -51,6 +68,7 @@ public:
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
+ virtual void Reset();
private:
/// EndDispatchGroup - Called when we are finishing a new dispatch group.
@@ -63,8 +81,8 @@ private:
bool &isFirst, bool &isSingle,bool &isCracked,
bool &isLoad, bool &isStore);
- bool isLoadOfStoredAddress(unsigned LoadSize,
- SDValue Ptr1, SDValue Ptr2) const;
+ bool isLoadOfStoredAddress(uint64_t LoadSize, int64_t LoadOffset,
+ const Value *LoadValue) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 6f204cc..5a04888 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -18,7 +18,6 @@
#include "MCTargetDesc/PPCPredicates.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
@@ -211,13 +210,13 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// Find all return blocks, outputting a restore in each epilog.
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
- if (!BB->empty() && BB->back().getDesc().isReturn()) {
+ if (!BB->empty() && BB->back().isReturn()) {
IP = BB->end(); --IP;
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = IP;
- while (I2 != BB->begin() && (--I2)->getDesc().isTerminator())
+ while (I2 != BB->begin() && (--I2)->isTerminator())
IP = I2;
// Emit: MTVRSAVE InVRSave
@@ -378,8 +377,8 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
APInt LKZ, LKO, RKZ, RKO;
- CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO);
- CurDAG->ComputeMaskedBits(Op1, APInt::getAllOnesValue(32), RKZ, RKO);
+ CurDAG->ComputeMaskedBits(Op0, LKZ, LKO);
+ CurDAG->ComputeMaskedBits(Op1, RKZ, RKO);
unsigned TargetMask = LKZ.getZExtValue();
unsigned InsertMask = RKZ.getZExtValue();
@@ -603,7 +602,6 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) {
case ISD::SETULT: return 0;
case ISD::SETUGT: return 1;
}
- return 0;
}
SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
@@ -1067,7 +1065,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDValue Target = N->getOperand(1);
unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
unsigned Reg = Target.getValueType() == MVT::i32 ? PPC::BCTR : PPC::BCTR8;
- Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Target,
+ Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Target,
Chain), 0);
return CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index d6b8a9e..3b24951 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -16,26 +16,24 @@
#include "PPCPerfectShuffle.h"
#include "PPCTargetMachine.h"
#include "MCTargetDesc/PPCPredicates.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/DerivedTypes.h"
+#include "llvm/Target/TargetOptions.h"
using namespace llvm;
static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
@@ -104,6 +102,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// from FP_ROUND: that rounds to nearest, this rounds to zero.
setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
+ // We do not currently implment this libm ops for PowerPC.
+ setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
+ setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
+ setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
+
// PowerPC has no SREM/UREM instructions
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
@@ -147,9 +152,13 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
// PowerPC does not have ROTR
setOperationAction(ISD::ROTR, MVT::i32 , Expand);
@@ -217,11 +226,23 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
- // VAARG is custom lowered with the 32-bit SVR4 ABI.
- if (TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
- && !TM.getSubtarget<PPCSubtarget>().isPPC64()) {
- setOperationAction(ISD::VAARG, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::i64, Custom);
+ if (TM.getSubtarget<PPCSubtarget>().isSVR4ABI()) {
+ if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
+ // VAARG always uses double-word chunks, so promote anything smaller.
+ setOperationAction(ISD::VAARG, MVT::i1, Promote);
+ AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
+ setOperationAction(ISD::VAARG, MVT::i8, Promote);
+ AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
+ setOperationAction(ISD::VAARG, MVT::i16, Promote);
+ AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
+ setOperationAction(ISD::VAARG, MVT::i32, Promote);
+ AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ } else {
+ // VAARG is custom lowered with the 32-bit SVR4 ABI.
+ setOperationAction(ISD::VAARG, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::i64, Custom);
+ }
} else
setOperationAction(ISD::VAARG, MVT::Other, Expand);
@@ -333,7 +354,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
@@ -366,6 +389,9 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
}
+ if (TM.getSubtarget<PPCSubtarget>().has64BitSupport())
+ setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
+
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
@@ -408,6 +434,8 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setInsertFencesForAtomic(true);
+ setSchedulingPreference(Sched::Hybrid);
+
computeRegisterProperties();
}
@@ -418,7 +446,16 @@ unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
// Darwin passes everything on 4 byte boundary.
if (TM.getSubtarget<PPCSubtarget>().isDarwin())
return 4;
- // FIXME SVR4 TBD
+
+ // 16byte and wider vectors are passed on 16byte boundary.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if (VTy->getBitWidth() >= 128)
+ return 16;
+
+ // The rest is 8 on PPC64 and 4 on PPC32 boundary.
+ if (PPCSubTarget.isPPC64())
+ return 8;
+
return 4;
}
@@ -447,6 +484,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
case PPCISD::STD_32: return "PPCISD::STD_32";
case PPCISD::CALL_SVR4: return "PPCISD::CALL_SVR4";
+ case PPCISD::CALL_NOP_SVR4: return "PPCISD::CALL_NOP_SVR4";
case PPCISD::CALL_Darwin: return "PPCISD::CALL_Darwin";
case PPCISD::NOP: return "PPCISD::NOP";
case PPCISD::MTCTR: return "PPCISD::MTCTR";
@@ -822,14 +860,10 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
APInt LHSKnownZero, LHSKnownOne;
APInt RHSKnownZero, RHSKnownOne;
DAG.ComputeMaskedBits(N.getOperand(0),
- APInt::getAllOnesValue(N.getOperand(0)
- .getValueSizeInBits()),
LHSKnownZero, LHSKnownOne);
if (LHSKnownZero.getBoolValue()) {
DAG.ComputeMaskedBits(N.getOperand(1),
- APInt::getAllOnesValue(N.getOperand(1)
- .getValueSizeInBits()),
RHSKnownZero, RHSKnownOne);
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
@@ -884,10 +918,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
APInt LHSKnownZero, LHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0),
- APInt::getAllOnesValue(N.getOperand(0)
- .getValueSizeInBits()),
- LHSKnownZero, LHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
// If all of the bits are known zero on the LHS or RHS, the add won't
@@ -1000,10 +1031,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
APInt LHSKnownZero, LHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0),
- APInt::getAllOnesValue(N.getOperand(0)
- .getValueSizeInBits()),
- LHSKnownZero, LHSKnownOne);
+ DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
@@ -1223,7 +1251,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
// extra load to get the address of the global.
if (MOHiFlag & PPCII::MO_NLP_FLAG)
Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return Ptr;
}
@@ -1319,11 +1347,13 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
// areas
SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false,
+ false, 0);
InChain = OverflowArea.getValue(1);
SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false,
+ false, 0);
InChain = RegSaveArea.getValue(1);
// select overflow_area if index > 8
@@ -1372,7 +1402,8 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
MachinePointerInfo(),
MVT::i32, false, false, 0);
- return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), false, false, 0);
+ return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
+ false, false, false, 0);
}
SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
@@ -1411,8 +1442,9 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
- false, false, false, false, 0, CallingConv::C, false,
- /*isReturnValueUsed=*/true,
+ false, false, false, false, 0, CallingConv::C,
+ /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__trampoline_setup", PtrVT),
Args, DAG, dl);
@@ -1530,7 +1562,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
@@ -1557,7 +1589,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State) {
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
@@ -1581,8 +1613,8 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
/// GetFPR - Get the set of FP registers that should be allocated for arguments,
/// on Darwin.
-static const unsigned *GetFPR() {
- static const unsigned FPR[] = {
+static const uint16_t *GetFPR() {
+ static const uint16_t FPR[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
};
@@ -1663,7 +1695,8 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Potential tail calls could cause overwriting of argument stack slots.
- bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast));
+ bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
+ (CallConv == CallingConv::Fast));
unsigned PtrByteSize = 4;
// Assign locations to all of the incoming arguments.
@@ -1681,7 +1714,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Arguments stored in registers.
if (VA.isRegLoc()) {
- TargetRegisterClass *RC;
+ const TargetRegisterClass *RC;
EVT ValVT = VA.getValVT();
switch (ValVT.getSimpleVT().SimpleTy) {
@@ -1721,7 +1754,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
MachinePointerInfo(),
- false, false, 0));
+ false, false, false, 0));
}
}
@@ -1762,13 +1795,13 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- static const unsigned GPArgRegs[] = {
+ static const uint16_t GPArgRegs[] = {
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
- static const unsigned FPArgRegs[] = {
+ static const uint16_t FPArgRegs[] = {
PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
PPC::F8
};
@@ -1853,25 +1886,26 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
// Potential tail calls could cause overwriting of argument stack slots.
- bool isImmutable = !(GuaranteedTailCallOpt && (CallConv==CallingConv::Fast));
+ bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
+ (CallConv == CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4;
unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
// Area that is at least reserved in caller of this function.
unsigned MinReservedArea = ArgOffset;
- static const unsigned GPR_32[] = { // 32-bit registers.
+ static const uint16_t GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- static const unsigned GPR_64[] = { // 64-bit registers.
+ static const uint16_t GPR_64[] = { // 64-bit registers.
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const unsigned *FPR = GetFPR();
+ static const uint16_t *FPR = GetFPR();
- static const unsigned VR[] = {
+ static const uint16_t VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
@@ -1882,7 +1916,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
- const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
+ const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
// In 32-bit non-varargs functions, the stack space for vectors is after the
// stack space for non-vectors. We do not use this space unless we have
@@ -1896,12 +1930,11 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
++ArgNo) {
EVT ObjectVT = Ins[ArgNo].VT;
- unsigned ObjSize = ObjectVT.getSizeInBits()/8;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
if (Flags.isByVal()) {
// ObjSize is the true size, ArgSize rounded up to multiple of regs.
- ObjSize = Flags.getByValSize();
+ unsigned ObjSize = Flags.getByValSize();
unsigned ArgSize =
((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
VecArgOffset += ArgSize;
@@ -2138,7 +2171,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
InVals.push_back(ArgVal);
@@ -2259,9 +2292,9 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
// Tail call needs the stack to be aligned.
- if (CC==CallingConv::Fast && GuaranteedTailCallOpt) {
- unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
- getStackAlignment();
+ if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().
+ getFrameLowering()->getStackAlignment();
unsigned AlignMask = TargetAlign-1;
NumBytes = (NumBytes + AlignMask) & ~AlignMask;
}
@@ -2295,7 +2328,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- if (!GuaranteedTailCallOpt)
+ if (!getTargetMachine().Options.GuaranteedTailCallOpt)
return false;
// Variable argument functions are not supported.
@@ -2443,7 +2476,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
LROpOut = getReturnAddrFrameIndex(DAG);
LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
Chain = SDValue(LROpOut.getNode(), 1);
// When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
@@ -2451,7 +2484,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
if (isDarwinABI) {
FPOpOut = getFramePointerFrameIndex(DAG);
FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
Chain = SDValue(FPOpOut.getNode(), 1);
}
}
@@ -2748,7 +2781,14 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// the stack. Account for this here so these bytes can be pushed back on in
// PPCRegisterInfo::eliminateCallFramePseudoInstr.
int BytesCalleePops =
- (CallConv==CallingConv::Fast && GuaranteedTailCallOpt) ? NumBytes : 0;
+ (CallConv == CallingConv::Fast &&
+ getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
+
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -2776,9 +2816,6 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
}
- Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
-
// Add a NOP immediately after the branch instruction when using the 64-bit
// SVR4 ABI. At link time, if caller and callee are in a different module and
// thus have a different TOC, the call will be replaced with a call to a stub
@@ -2787,8 +2824,9 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// which restores the TOC of the caller from the TOC save slot of the current
// stack frame. If caller and callee belong to the same module (and have the
// same TOC), the NOP will remain unchanged.
+
+ bool needsTOCRestore = false;
if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) {
- SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
if (CallOpc == PPCISD::BCTRL_SVR4) {
// This is a call through a function pointer.
// Restore the caller TOC from the save area into R2.
@@ -2799,14 +2837,22 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
// since r2 is a reserved register (which prevents the register allocator
// from allocating it), resulting in an additional register being
// allocated and an unnecessary move instruction being generated.
- Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag);
- InFlag = Chain.getValue(1);
- } else {
+ needsTOCRestore = true;
+ } else if (CallOpc == PPCISD::CALL_SVR4) {
// Otherwise insert NOP.
- InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Glue, InFlag);
+ CallOpc = PPCISD::CALL_NOP_SVR4;
}
}
+ Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ if (needsTOCRestore) {
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(BytesCalleePops, true),
InFlag);
@@ -2820,7 +2866,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
SDValue
PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -2864,7 +2910,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// and restoring the callers stack pointer in this functions epilog. This is
// done because by tail calling the called function might overwrite the value
// in this function's (MF) stack pointer stack slot 0(SP).
- if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast)
+ if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
// Count how many bytes are to be pushed on the stack, including the linkage
@@ -3071,7 +3118,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// and restoring the callers stack pointer in this functions epilog. This is
// done because by tail calling the called function might overwrite the value
// in this function's (MF) stack pointer stack slot 0(SP).
- if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast)
+ if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ CallConv == CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
unsigned nAltivecParamsAtEnd = 0;
@@ -3120,17 +3168,17 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
- static const unsigned GPR_32[] = { // 32-bit registers.
+ static const uint16_t GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
};
- static const unsigned GPR_64[] = { // 64-bit registers.
+ static const uint16_t GPR_64[] = { // 64-bit registers.
PPC::X3, PPC::X4, PPC::X5, PPC::X6,
PPC::X7, PPC::X8, PPC::X9, PPC::X10,
};
- static const unsigned *FPR = GetFPR();
+ static const uint16_t *FPR = GetFPR();
- static const unsigned VR[] = {
+ static const uint16_t VR[] = {
PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
};
@@ -3138,7 +3186,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
const unsigned NumFPRs = 13;
const unsigned NumVRs = array_lengthof(VR);
- const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
+ const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
@@ -3212,7 +3260,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
ArgOffset += PtrByteSize;
@@ -3250,7 +3298,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Float varargs are always shadowed in available integer registers
if (GPR_idx != NumGPRs) {
SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false,
+ false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -3259,7 +3308,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -3308,7 +3357,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
if (VR_idx != NumVRs) {
SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
}
@@ -3319,7 +3368,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
DAG.getConstant(i, PtrVT));
SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
}
@@ -3483,7 +3532,7 @@ SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
// Load the old link SP.
SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Restore the stack pointer.
Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
@@ -3674,7 +3723,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
DAG.getConstant(4, FIPtr.getValueType()));
return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
@@ -3718,7 +3767,7 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
Ops, 4, MVT::i64, MMO);
// Load the value as a double.
SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// FCFID it and return it.
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
@@ -3770,7 +3819,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SDValue Four = DAG.getConstant(4, PtrVT);
SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Transform as necessary
SDValue CWD1 =
@@ -4236,8 +4285,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Check to see if this is a shuffle of 4-byte values. If so, we can use our
// perfect shuffle table to emit an optimal matching sequence.
- SmallVector<int, 16> PermMask;
- SVOp->getMask(PermMask);
+ ArrayRef<int> PermMask = SVOp->getMask();
unsigned PFIndexes[4];
bool isFourElementShuffle = true;
@@ -4441,7 +4489,7 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
false, false, 0);
// Load it out.
return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
@@ -4549,7 +4597,6 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
}
- return SDValue();
}
void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
@@ -4559,8 +4606,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default:
- assert(false && "Do not know how to custom type legalize this operation!");
- return;
+ llvm_unreachable("Do not know how to custom type legalize this operation!");
case ISD::VAARG: {
if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
|| TM.getSubtarget<PPCSubtarget>().isPPC64())
@@ -5461,12 +5507,11 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
//===----------------------------------------------------------------------===//
void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
+ KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
@@ -5700,7 +5745,7 @@ bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{
return (V > -(1 << 16) && V < (1 << 16)-1);
}
-bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
+bool PPCTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const {
return false;
}
@@ -5729,13 +5774,13 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
// Just load the return address off the stack.
SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, MachinePointerInfo(), false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, false, 0);
}
SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
@@ -5749,7 +5794,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MFI->setFrameAddressIsTaken(true);
- bool is31 = (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) &&
+ bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
+ MFI->hasVarSizedObjects()) &&
MFI->getStackSize() &&
!MF.getFunction()->hasFnAttr(Attribute::Naked);
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
@@ -5758,7 +5804,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
PtrVT);
while (Depth--)
FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
- FrameAddr, MachinePointerInfo(), false, false, 0);
+ FrameAddr, MachinePointerInfo(), false, false,
+ false, 0);
return FrameAddr;
}
@@ -5774,7 +5821,7 @@ PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If
-/// 'NonScalarIntSafe' is true, that means it's safe to return a
+/// 'IsZeroVal' is true, that means it's safe to return a
/// non-scalar-integer type, e.g. empty string source, constant, or loaded
/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
/// constant so it does not need to be loaded.
@@ -5782,7 +5829,7 @@ PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
/// target-independent logic.
EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
- bool NonScalarIntSafe,
+ bool IsZeroVal,
bool MemcpyStrSrc,
MachineFunction &MF) const {
if (this->PPCSubTarget.isPPC64()) {
@@ -5791,3 +5838,12 @@ EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
return MVT::i32;
}
}
+
+Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
+ unsigned Directive = PPCSubTarget.getDarwinDirective();
+ if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2)
+ return Sched::ILP;
+
+ return TargetLowering::getSchedulingPreference(N);
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 430e45e..18eb072 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -15,10 +15,10 @@
#ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
#define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/CodeGen/SelectionDAG.h"
#include "PPC.h"
#include "PPCSubtarget.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/CodeGen/SelectionDAG.h"
namespace llvm {
namespace PPCISD {
@@ -95,7 +95,9 @@ namespace llvm {
EXTSW_32,
/// CALL - A direct function call.
- CALL_Darwin, CALL_SVR4,
+ /// CALL_NOP_SVR4 is a call with the special NOP which follows 64-bit
+ /// SVR4 calls.
+ CALL_Darwin, CALL_SVR4, CALL_NOP_SVR4,
/// NOP - Special NOP which follows 64-bit SVR4 calls.
NOP,
@@ -279,6 +281,7 @@ namespace llvm {
bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base,
SelectionDAG &DAG) const;
+ Sched::Preference getSchedulingPreference(SDNode *N) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
@@ -293,7 +296,6 @@ namespace llvm {
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -353,7 +355,7 @@ namespace llvm {
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If
- /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// 'IsZeroVal' is true, that means it's safe to return a
/// non-scalar-integer type, e.g. empty string source, constant, or loaded
/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
/// constant so it does not need to be loaded.
@@ -361,7 +363,7 @@ namespace llvm {
/// target-independent logic.
virtual EVT
getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
- bool NonScalarIntSafe, bool MemcpyStrSrc,
+ bool IsZeroVal, bool MemcpyStrSrc,
MachineFunction &MF) const;
private:
@@ -437,8 +439,8 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -472,21 +474,21 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
SDValue
- LowerCall_Darwin(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
+ LowerCall_Darwin(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue
- LowerCall_SVR4(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
+ LowerCall_SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool isTailCall,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
};
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index e88ad37..7f67a41 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1,10 +1,10 @@
-//===- PPCInstr64Bit.td - The PowerPC 64-bit Support -------*- tablegen -*-===//
-//
+//===-- PPCInstr64Bit.td - The PowerPC 64-bit Support ------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the PowerPC 64-bit instructions. These patterns are used
@@ -64,13 +64,7 @@ let Defs = [LR8] in
PPC970_Unit_BRU;
// Darwin ABI Calls.
-let isCall = 1, PPC970_Unit = 7,
- // All calls clobber the PPC64 non-callee saved registers.
- Defs = [X0,X2,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,
- F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13,
- V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,
- LR8,CTR8,
- CR0,CR1,CR5,CR6,CR7,CARRY] in {
+let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8_Darwin : IForm<18, 0, 1,
@@ -90,23 +84,29 @@ let isCall = 1, PPC970_Unit = 7,
// ELF 64 ABI Calls = Darwin ABI Calls
// Used to define BL8_ELF and BLA8_ELF
-let isCall = 1, PPC970_Unit = 7,
- // All calls clobber the PPC64 non-callee saved registers.
- Defs = [X0,X2,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,
- F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13,
- V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,
- LR8,CTR8,
- CR0,CR1,CR5,CR6,CR7,CARRY] in {
+let isCall = 1, PPC970_Unit = 7, Defs = [LR8] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL8_ELF : IForm<18, 0, 1,
(outs), (ins calltarget:$func, variable_ops),
- "bl $func", BrB, []>; // See Pat patterns below.
+ "bl $func", BrB, []>; // See Pat patterns below.
+
+ let isCodeGenOnly = 1 in
+ def BL8_NOP_ELF : IForm_and_DForm_4_zero<18, 0, 1, 24,
+ (outs), (ins calltarget:$func, variable_ops),
+ "bl $func\n\tnop", BrB, []>;
+
def BLA8_ELF : IForm<18, 1, 1,
(outs), (ins aaddr:$func, variable_ops),
"bla $func", BrB, [(PPCcall_SVR4 (i64 imm:$func))]>;
+
+ let isCodeGenOnly = 1 in
+ def BLA8_NOP_ELF : IForm_and_DForm_4_zero<18, 1, 1, 24,
+ (outs), (ins aaddr:$func, variable_ops),
+ "bla $func\n\tnop", BrB,
+ [(PPCcall_nop_SVR4 (i64 imm:$func))]>;
}
- let Uses = [CTR8, RM] in {
+ let Uses = [X11, CTR8, RM] in {
def BCTRL8_ELF : XLForm_2_ext<19, 528, 20, 0, 1,
(outs), (ins variable_ops),
"bctrl", BrB,
@@ -123,8 +123,14 @@ def : Pat<(PPCcall_Darwin (i64 texternalsym:$dst)),
def : Pat<(PPCcall_SVR4 (i64 tglobaladdr:$dst)),
(BL8_ELF tglobaladdr:$dst)>;
+def : Pat<(PPCcall_nop_SVR4 (i64 tglobaladdr:$dst)),
+ (BL8_NOP_ELF tglobaladdr:$dst)>;
+
def : Pat<(PPCcall_SVR4 (i64 texternalsym:$dst)),
(BL8_ELF texternalsym:$dst)>;
+def : Pat<(PPCcall_nop_SVR4 (i64 texternalsym:$dst)),
+ (BL8_NOP_ELF texternalsym:$dst)>;
+
def : Pat<(PPCnop),
(NOP)>;
@@ -223,6 +229,18 @@ def : Pat<(PPCtc_return (i64 texternalsym:$dst), imm:$imm),
def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
(TCRETURNri8 CTRRC8:$dst, imm:$imm)>;
+// 64-but CR instructions
+def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins G8RC:$rS),
+ "mtcrf $FXM, $rS", BrMCRX>,
+ PPC970_MicroCode, PPC970_Unit_CRU;
+
+def MFCR8pseud: XFXForm_3<31, 19, (outs G8RC:$rT), (ins crbitm:$FXM),
+ "", SprMFCR>,
+ PPC970_MicroCode, PPC970_Unit_CRU;
+
+def MFCR8 : XFXForm_3<31, 19, (outs G8RC:$rT), (ins),
+ "mfcr $rT", SprMFCR>,
+ PPC970_MicroCode, PPC970_Unit_CRU;
//===----------------------------------------------------------------------===//
// 64-bit SPR manipulation instrs.
@@ -469,6 +487,12 @@ def RLDICR : MDForm_1<30, 1,
(outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$ME),
"rldicr $rA, $rS, $SH, $ME", IntRotateD,
[]>, isPPC64;
+
+def RLWINM8 : MForm_2<21,
+ (outs G8RC:$rA), (ins G8RC:$rS, u5imm:$SH, u5imm:$MB, u5imm:$ME),
+ "rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral,
+ []>;
+
} // End FXU Operations.
@@ -500,7 +524,7 @@ def LWAX : XForm_1<31, 341, (outs G8RC:$rD), (ins memrr:$src),
let mayLoad = 1 in
def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp,
ptr_rc:$rA),
- "lhau $rD, $disp($rA)", LdStGeneral,
+ "lhau $rD, $disp($rA)", LdStLoad,
[]>, RegConstraint<"$rA = $ea_result">,
NoEncode<"$ea_result">;
// NO LWAU!
@@ -510,38 +534,38 @@ def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp
// Zero extending loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ8 : DForm_1<34, (outs G8RC:$rD), (ins memri:$src),
- "lbz $rD, $src", LdStGeneral,
+ "lbz $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi8 iaddr:$src))]>;
def LHZ8 : DForm_1<40, (outs G8RC:$rD), (ins memri:$src),
- "lhz $rD, $src", LdStGeneral,
+ "lhz $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ8 : DForm_1<32, (outs G8RC:$rD), (ins memri:$src),
- "lwz $rD, $src", LdStGeneral,
+ "lwz $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi32 iaddr:$src))]>, isPPC64;
def LBZX8 : XForm_1<31, 87, (outs G8RC:$rD), (ins memrr:$src),
- "lbzx $rD, $src", LdStGeneral,
+ "lbzx $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi8 xaddr:$src))]>;
def LHZX8 : XForm_1<31, 279, (outs G8RC:$rD), (ins memrr:$src),
- "lhzx $rD, $src", LdStGeneral,
+ "lhzx $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX8 : XForm_1<31, 23, (outs G8RC:$rD), (ins memrr:$src),
- "lwzx $rD, $src", LdStGeneral,
+ "lwzx $rD, $src", LdStLoad,
[(set G8RC:$rD, (zextloadi32 xaddr:$src))]>;
// Update forms.
let mayLoad = 1 in {
def LBZU8 : DForm_1<35, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStGeneral,
+ "lbzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU8 : DForm_1<41, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStGeneral,
+ "lhzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStGeneral,
+ "lwzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
}
@@ -557,7 +581,8 @@ def LDtoc: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg),
"",
[(set G8RC:$rD,
(PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64;
-
+
+let hasSideEffects = 1 in {
let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
def LDinto_toc: DSForm_1<58, 0, (outs), (ins G8RC:$reg),
"ld 2, 8($reg)", LdStLD,
@@ -567,6 +592,7 @@ let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo.
def LDtoc_restore : DSForm_1<58, 0, (outs), (ins),
"ld 2, 40(1)", LdStLD,
[(PPCtoc_restore)]>, isPPC64;
+}
def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src),
"ldx $rD, $src", LdStLD,
[(set G8RC:$rD, (load xaddr:$src))]>, isPPC64;
@@ -587,24 +613,24 @@ def : Pat<(PPCload xaddr:$src),
let PPC970_Unit = 2 in {
// Truncating stores.
def STB8 : DForm_1<38, (outs), (ins G8RC:$rS, memri:$src),
- "stb $rS, $src", LdStGeneral,
+ "stb $rS, $src", LdStStore,
[(truncstorei8 G8RC:$rS, iaddr:$src)]>;
def STH8 : DForm_1<44, (outs), (ins G8RC:$rS, memri:$src),
- "sth $rS, $src", LdStGeneral,
+ "sth $rS, $src", LdStStore,
[(truncstorei16 G8RC:$rS, iaddr:$src)]>;
def STW8 : DForm_1<36, (outs), (ins G8RC:$rS, memri:$src),
- "stw $rS, $src", LdStGeneral,
+ "stw $rS, $src", LdStStore,
[(truncstorei32 G8RC:$rS, iaddr:$src)]>;
def STBX8 : XForm_8<31, 215, (outs), (ins G8RC:$rS, memrr:$dst),
- "stbx $rS, $dst", LdStGeneral,
+ "stbx $rS, $dst", LdStStore,
[(truncstorei8 G8RC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX8 : XForm_8<31, 407, (outs), (ins G8RC:$rS, memrr:$dst),
- "sthx $rS, $dst", LdStGeneral,
+ "sthx $rS, $dst", LdStStore,
[(truncstorei16 G8RC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX8 : XForm_8<31, 151, (outs), (ins G8RC:$rS, memrr:$dst),
- "stwx $rS, $dst", LdStGeneral,
+ "stwx $rS, $dst", LdStStore,
[(truncstorei32 G8RC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
// Normal 8-byte stores.
@@ -621,14 +647,14 @@ let PPC970_Unit = 2 in {
def STBU8 : DForm_1a<38, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stbu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "stbu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res,
(pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "sthu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "sthu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res,
(pre_truncsti16 G8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index 256370f..6c0f3d3 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -1,10 +1,10 @@
-//===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
-//
+//===-- PPCInstrAltivec.td - The PowerPC Altivec Extension -*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the Altivec extension to the PowerPC instruction set.
@@ -188,85 +188,85 @@ class VX2_Int<bits<11> xo, string opc, Intrinsic IntID>
def DSS : DSS_Form<822, (outs),
(ins u5imm:$ZERO0, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2),
- "dss $STRM", LdStGeneral /*FIXME*/, []>;
+ "dss $STRM", LdStLoad /*FIXME*/, []>;
def DSSALL : DSS_Form<822, (outs),
(ins u5imm:$ONE, u5imm:$ZERO0,u5imm:$ZERO1,u5imm:$ZERO2),
- "dssall", LdStGeneral /*FIXME*/, []>;
+ "dssall", LdStLoad /*FIXME*/, []>;
def DST : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
- "dst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTT : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
- "dstt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTST : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
- "dstst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTSTT : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
- "dststt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DST64 : DSS_Form<342, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB),
- "dst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTT64 : DSS_Form<342, (outs),
(ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB),
- "dstt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dstt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTST64 : DSS_Form<374, (outs),
(ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB),
- "dstst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dstst $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def DSTSTT64 : DSS_Form<374, (outs),
(ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB),
- "dststt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>;
+ "dststt $rA, $rB, $STRM", LdStLoad /*FIXME*/, []>;
def MFVSCR : VXForm_4<1540, (outs VRRC:$vD), (ins),
- "mfvscr $vD", LdStGeneral,
+ "mfvscr $vD", LdStStore,
[(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>;
def MTVSCR : VXForm_5<1604, (outs), (ins VRRC:$vB),
- "mtvscr $vB", LdStGeneral,
+ "mtvscr $vB", LdStLoad,
[(int_ppc_altivec_mtvscr VRRC:$vB)]>;
let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads.
def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src),
- "lvebx $vD, $src", LdStGeneral,
+ "lvebx $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
def LVEHX: XForm_1<31, 39, (outs VRRC:$vD), (ins memrr:$src),
- "lvehx $vD, $src", LdStGeneral,
+ "lvehx $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
def LVEWX: XForm_1<31, 71, (outs VRRC:$vD), (ins memrr:$src),
- "lvewx $vD, $src", LdStGeneral,
+ "lvewx $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
def LVX : XForm_1<31, 103, (outs VRRC:$vD), (ins memrr:$src),
- "lvx $vD, $src", LdStGeneral,
+ "lvx $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
def LVXL : XForm_1<31, 359, (outs VRRC:$vD), (ins memrr:$src),
- "lvxl $vD, $src", LdStGeneral,
+ "lvxl $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
}
def LVSL : XForm_1<31, 6, (outs VRRC:$vD), (ins memrr:$src),
- "lvsl $vD, $src", LdStGeneral,
+ "lvsl $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
PPC970_Unit_LSU;
def LVSR : XForm_1<31, 38, (outs VRRC:$vD), (ins memrr:$src),
- "lvsr $vD, $src", LdStGeneral,
+ "lvsr $vD, $src", LdStLoad,
[(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
PPC970_Unit_LSU;
let PPC970_Unit = 2 in { // Stores.
def STVEBX: XForm_8<31, 135, (outs), (ins VRRC:$rS, memrr:$dst),
- "stvebx $rS, $dst", LdStGeneral,
+ "stvebx $rS, $dst", LdStStore,
[(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
def STVEHX: XForm_8<31, 167, (outs), (ins VRRC:$rS, memrr:$dst),
- "stvehx $rS, $dst", LdStGeneral,
+ "stvehx $rS, $dst", LdStStore,
[(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
def STVEWX: XForm_8<31, 199, (outs), (ins VRRC:$rS, memrr:$dst),
- "stvewx $rS, $dst", LdStGeneral,
+ "stvewx $rS, $dst", LdStStore,
[(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
def STVX : XForm_8<31, 231, (outs), (ins VRRC:$rS, memrr:$dst),
- "stvx $rS, $dst", LdStGeneral,
+ "stvx $rS, $dst", LdStStore,
[(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
def STVXL : XForm_8<31, 487, (outs), (ins VRRC:$rS, memrr:$dst),
- "stvxl $rS, $dst", LdStGeneral,
+ "stvxl $rS, $dst", LdStStore,
[(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index 84a15b1..d8e4b2b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -1,10 +1,10 @@
//===- PowerPCInstrFormats.td - PowerPC Instruction Formats --*- tablegen -*-=//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -51,6 +51,36 @@ class PPC970_Unit_VALU { bits<3> PPC970_Unit = 5; }
class PPC970_Unit_VPERM { bits<3> PPC970_Unit = 6; }
class PPC970_Unit_BRU { bits<3> PPC970_Unit = 7; }
+// Two joined instructions; used to emit two adjacent instructions as one.
+// The itinerary from the first instruction is used for scheduling and
+// classification.
+class I2<bits<6> opcode1, bits<6> opcode2, dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin>
+ : Instruction {
+ field bits<64> Inst;
+
+ bit PPC64 = 0; // Default value, override with isPPC64
+
+ let Namespace = "PPC";
+ let Inst{0-5} = opcode1;
+ let Inst{32-37} = opcode2;
+ let OutOperandList = OOL;
+ let InOperandList = IOL;
+ let AsmString = asmstr;
+ let Itinerary = itin;
+
+ bits<1> PPC970_First = 0;
+ bits<1> PPC970_Single = 0;
+ bits<1> PPC970_Cracked = 0;
+ bits<3> PPC970_Unit = 0;
+
+ /// These fields correspond to the fields in PPCInstrInfo.h. Any changes to
+ /// these must be reflected there! See comments there for what these are.
+ let TSFlags{0} = PPC970_First;
+ let TSFlags{1} = PPC970_Single;
+ let TSFlags{2} = PPC970_Cracked;
+ let TSFlags{5-3} = PPC970_Unit;
+}
// 1.7.1 I-Form
class IForm<bits<6> opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr,
@@ -164,6 +194,35 @@ class DForm_4_zero<bits<6> opcode, dag OOL, dag IOL, string asmstr,
let Addr = 0;
}
+class IForm_and_DForm_1<bits<6> opcode1, bit aa, bit lk, bits<6> opcode2,
+ dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : I2<opcode1, opcode2, OOL, IOL, asmstr, itin> {
+ bits<5> A;
+ bits<21> Addr;
+
+ let Pattern = pattern;
+ bits<24> LI;
+
+ let Inst{6-29} = LI;
+ let Inst{30} = aa;
+ let Inst{31} = lk;
+
+ let Inst{38-42} = A;
+ let Inst{43-47} = Addr{20-16}; // Base Reg
+ let Inst{48-63} = Addr{15-0}; // Displacement
+}
+
+// This is used to emit BL8+NOP.
+class IForm_and_DForm_4_zero<bits<6> opcode1, bit aa, bit lk, bits<6> opcode2,
+ dag OOL, dag IOL, string asmstr,
+ InstrItinClass itin, list<dag> pattern>
+ : IForm_and_DForm_1<opcode1, aa, lk, opcode2,
+ OOL, IOL, asmstr, itin, pattern> {
+ let A = 0;
+ let Addr = 0;
+}
+
class DForm_5<bits<6> opcode, dag OOL, dag IOL, string asmstr,
InstrItinClass itin>
: I<opcode, OOL, IOL, asmstr, itin> {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 2bc109c..b45ada9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- PPCInstrInfo.cpp - PowerPC32 Instruction Information -----*- C++ -*-===//
+//===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -34,8 +34,8 @@
#include "PPCGenInstrInfo.inc"
namespace llvm {
-extern cl::opt<bool> EnablePPC32RS; // FIXME (64-bit): See PPCRegisterInfo.cpp.
-extern cl::opt<bool> EnablePPC64RS; // FIXME (64-bit): See PPCRegisterInfo.cpp.
+extern cl::opt<bool> DisablePPC32RS;
+extern cl::opt<bool> DisablePPC64RS;
}
using namespace llvm;
@@ -49,13 +49,32 @@ PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer(
const TargetMachine *TM,
const ScheduleDAG *DAG) const {
- // Should use subtarget info to pick the right hazard recognizer. For
- // now, always return a PPC970 recognizer.
- const TargetInstrInfo *TII = TM->getInstrInfo();
- assert(TII && "No InstrInfo?");
- return new PPCHazardRecognizer970(*TII);
+ unsigned Directive = TM->getSubtarget<PPCSubtarget>().getDarwinDirective();
+ if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2) {
+ const InstrItineraryData *II = TM->getInstrItineraryData();
+ return new PPCScoreboardHazardRecognizer(II, DAG);
+ }
+
+ return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
}
+/// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer
+/// to use for this target when scheduling the DAG.
+ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer(
+ const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const {
+ unsigned Directive = TM.getSubtarget<PPCSubtarget>().getDarwinDirective();
+
+ // Most subtargets use a PPC970 recognizer.
+ if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2) {
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ assert(TII && "No InstrInfo?");
+
+ return new PPCHazardRecognizer970(*TII);
+ }
+
+ return new PPCScoreboardHazardRecognizer(II, DAG);
+}
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
@@ -327,6 +346,7 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
}
+// This function returns true if a CR spill is necessary and false otherwise.
bool
PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
unsigned SrcReg, bool isKill,
@@ -358,7 +378,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
FrameIdx));
} else {
// FIXME: this spills LR immediately to memory in one step. To do this,
- // we use R11, which we know cannot be used in the prolog/epilog. This is
+ // we use X11, which we know cannot be used in the prolog/epilog. This is
// a hack.
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFLR8), PPC::X11));
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD))
@@ -377,9 +397,8 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
getKillRegState(isKill)),
FrameIdx));
} else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
- if ((EnablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
- (EnablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
- // FIXME (64-bit): Enable
+ if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
+ (!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR))
.addReg(SrcReg,
getKillRegState(isKill)),
@@ -392,11 +411,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
// We hack this on Darwin by reserving R2. It's probably broken on Linux
// at the moment.
+ bool is64Bit = TM.getSubtargetImpl()->isPPC64();
// We need to store the CR in the low 4-bits of the saved value. First,
// issue a MFCR to save all of the CRBits.
unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
- PPC::R2 : PPC::R0;
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFCRpseud), ScratchReg)
+ (is64Bit ? PPC::X2 : PPC::R2) :
+ (is64Bit ? PPC::X0 : PPC::R0);
+ NewMIs.push_back(BuildMI(MF, DL, get(is64Bit ? PPC::MFCR8pseud :
+ PPC::MFCRpseud), ScratchReg)
.addReg(SrcReg, getKillRegState(isKill)));
// If the saved register wasn't CR0, shift the bits left so that they are
@@ -404,12 +426,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
if (SrcReg != PPC::CR0) {
unsigned ShiftBits = getPPCRegisterNumbering(SrcReg)*4;
// rlwinm scratch, scratch, ShiftBits, 0, 31.
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::RLWINM), ScratchReg)
+ NewMIs.push_back(BuildMI(MF, DL, get(is64Bit ? PPC::RLWINM8 :
+ PPC::RLWINM), ScratchReg)
.addReg(ScratchReg).addImm(ShiftBits)
.addImm(0).addImm(31));
}
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(is64Bit ?
+ PPC::STW8 : PPC::STW))
.addReg(ScratchReg,
getKillRegState(isKill)),
FrameIdx));
@@ -486,15 +510,14 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
const MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOStore,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
NewMIs.back()->addMemOperand(MF, MMO);
}
-void
+bool
PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
@@ -514,8 +537,8 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
FrameIdx));
} else {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD),
- PPC::R11), FrameIdx));
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR8)).addReg(PPC::R11));
+ PPC::X11), FrameIdx));
+ NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTLR8)).addReg(PPC::X11));
}
} else if (PPC::F8RCRegisterClass->hasSubClassEq(RC)) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg),
@@ -524,28 +547,37 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg),
FrameIdx));
} else if (PPC::CRRCRegisterClass->hasSubClassEq(RC)) {
- // FIXME: We need a scatch reg here. The trouble with using R0 is that
- // it's possible for the stack frame to be so big the save location is
- // out of range of immediate offsets, necessitating another register.
- // We hack this on Darwin by reserving R2. It's probably broken on Linux
- // at the moment.
- unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
- PPC::R2 : PPC::R0;
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
- ScratchReg), FrameIdx));
-
- // If the reloaded register isn't CR0, shift the bits right so that they are
- // in the right CR's slot.
- if (DestReg != PPC::CR0) {
- unsigned ShiftBits = getPPCRegisterNumbering(DestReg)*4;
- // rlwinm r11, r11, 32-ShiftBits, 0, 31.
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::RLWINM), ScratchReg)
- .addReg(ScratchReg).addImm(32-ShiftBits).addImm(0)
- .addImm(31));
+ if ((!DisablePPC32RS && !TM.getSubtargetImpl()->isPPC64()) ||
+ (!DisablePPC64RS && TM.getSubtargetImpl()->isPPC64())) {
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL,
+ get(PPC::RESTORE_CR), DestReg)
+ , FrameIdx));
+ return true;
+ } else {
+ // FIXME: We need a scatch reg here. The trouble with using R0 is that
+ // it's possible for the stack frame to be so big the save location is
+ // out of range of immediate offsets, necessitating another register.
+ // We hack this on Darwin by reserving R2. It's probably broken on Linux
+ // at the moment.
+ unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
+ PPC::R2 : PPC::R0;
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
+ ScratchReg), FrameIdx));
+
+ // If the reloaded register isn't CR0, shift the bits right so that they are
+ // in the right CR's slot.
+ if (DestReg != PPC::CR0) {
+ unsigned ShiftBits = getPPCRegisterNumbering(DestReg)*4;
+ // rlwinm r11, r11, 32-ShiftBits, 0, 31.
+ NewMIs.push_back(BuildMI(MF, DL, get(PPC::RLWINM), ScratchReg)
+ .addReg(ScratchReg).addImm(32-ShiftBits).addImm(0)
+ .addImm(31));
+ }
+
+ NewMIs.push_back(BuildMI(MF, DL, get(TM.getSubtargetImpl()->isPPC64() ?
+ PPC::MTCRF8 : PPC::MTCRF), DestReg)
+ .addReg(ScratchReg));
}
-
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTCRF), DestReg)
- .addReg(ScratchReg));
} else if (PPC::CRBITRCRegisterClass->hasSubClassEq(RC)) {
unsigned Reg = 0;
@@ -590,6 +622,8 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
} else {
llvm_unreachable("Unknown regclass!");
}
+
+ return false;
}
void
@@ -602,14 +636,16 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
SmallVector<MachineInstr*, 4> NewMIs;
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
- LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs);
+ if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs)) {
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ FuncInfo->setSpillsCR();
+ }
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
MBB.insert(MI, NewMIs[i]);
const MachineFrameInfo &MFI = *MF.getFrameInfo();
MachineMemOperand *MMO =
- MF.getMachineMemOperand(
- MachinePointerInfo(PseudoSourceValue::getFixedStack(FrameIdx)),
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
MachineMemOperand::MOLoad,
MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
@@ -649,6 +685,9 @@ unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
case PPC::GC_LABEL:
case PPC::DBG_VALUE:
return 0;
+ case PPC::BL8_NOP_ELF:
+ case PPC::BLA8_NOP_ELF:
+ return 8;
default:
return 4; // PowerPC instructions are all 4 bytes
}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index 90bacc9..7d49aa1 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -1,4 +1,4 @@
-//===- PPCInstrInfo.h - PowerPC Instruction Information ---------*- C++ -*-===//
+//===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef POWERPC32_INSTRUCTIONINFO_H
-#define POWERPC32_INSTRUCTIONINFO_H
+#ifndef POWERPC_INSTRUCTIONINFO_H
+#define POWERPC_INSTRUCTIONINFO_H
#include "PPC.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "PPCRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "PPCGenInstrInfo.inc"
@@ -72,7 +72,7 @@ class PPCInstrInfo : public PPCGenInstrInfo {
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- void LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
+ bool LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
@@ -88,6 +88,9 @@ public:
ScheduleHazardRecognizer *
CreateTargetHazardRecognizer(const TargetMachine *TM,
const ScheduleDAG *DAG) const;
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
+ const ScheduleDAG *DAG) const;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index f248b5b..748486c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -1,10 +1,10 @@
-//===- PPCInstrInfo.td - The PowerPC Instruction Set -------*- tablegen -*-===//
-//
+//===-- PPCInstrInfo.td - The PowerPC Instruction Set ------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the subset of the 32-bit PowerPC instruction set, as used
@@ -116,6 +116,9 @@ def PPCcall_Darwin : SDNode<"PPCISD::CALL_Darwin", SDT_PPCCall,
def PPCcall_SVR4 : SDNode<"PPCISD::CALL_SVR4", SDT_PPCCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
+def PPCcall_nop_SVR4 : SDNode<"PPCISD::CALL_NOP_SVR4", SDT_PPCCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInGlue, SDNPOutGlue]>;
def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
@@ -349,10 +352,10 @@ def iaddroff : ComplexPattern<iPTR, 1, "SelectAddrImmOffs", [], []>;
//===----------------------------------------------------------------------===//
// PowerPC Instruction Predicate Definitions.
-def FPContractions : Predicate<"!NoExcessFPPrecision">;
+def FPContractions : Predicate<"!TM.Options.NoExcessFPPrecision">;
def In32BitMode : Predicate<"!PPCSubTarget.isPPC64()">;
def In64BitMode : Predicate<"PPCSubTarget.isPPC64()">;
-
+def IsBookE : Predicate<"PPCSubTarget.isBookE()">;
//===----------------------------------------------------------------------===//
// PowerPC Instruction Definitions.
@@ -399,7 +402,14 @@ let usesCustomInserter = 1, // Expanded after instruction selection.
// SPILL_CR - Indicate that we're dumping the CR register, so we'll need to
// scavenge a register for it.
-def SPILL_CR : Pseudo<(outs), (ins GPRC:$cond, memri:$F),
+let mayStore = 1 in
+def SPILL_CR : Pseudo<(outs), (ins CRRC:$cond, memri:$F),
+ "", []>;
+
+// RESTORE_CR - Indicate that we're restoring the CR register (previously
+// spilled), so we'll need to scavenge a register for it.
+let mayLoad = 1 in
+def RESTORE_CR : Pseudo<(outs CRRC:$cond), (ins memri:$F),
"", []>;
let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in {
@@ -431,13 +441,7 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
}
// Darwin ABI Calls.
-let isCall = 1, PPC970_Unit = 7,
- // All calls clobber the non-callee saved registers...
- Defs = [R0,R2,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,
- F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13,
- V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,
- LR,CTR,
- CR0,CR1,CR5,CR6,CR7,CARRY] in {
+let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL_Darwin : IForm<18, 0, 1,
@@ -456,13 +460,7 @@ let isCall = 1, PPC970_Unit = 7,
}
// SVR4 ABI Calls.
-let isCall = 1, PPC970_Unit = 7,
- // All calls clobber the non-callee saved registers...
- Defs = [R0,R3,R4,R5,R6,R7,R8,R9,R10,R11,R12,
- F0,F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12,F13,
- V0,V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,
- LR,CTR,
- CR0,CR1,CR5,CR6,CR7,CARRY] in {
+let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
// Convenient aliases for call instructions
let Uses = [RM] in {
def BL_SVR4 : IForm<18, 0, 1,
@@ -547,6 +545,9 @@ def DCBZL : DCB_Form<1014, 1, (outs), (ins memrr:$dst),
"dcbzl $dst", LdStDCBF, [(int_ppc_dcbzl xoaddr:$dst)]>,
PPC970_DGroup_Single;
+def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)),
+ (DCBT xoaddr:$dst)>;
+
// Atomic operations
let usesCustomInserter = 1 in {
let Defs = [CR0] in {
@@ -642,7 +643,7 @@ def STWCX : XForm_1<31, 150, (outs), (ins GPRC:$rS, memrr:$dst),
isDOT;
let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
-def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStGeneral, [(trap)]>;
+def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStLoad, [(trap)]>;
//===----------------------------------------------------------------------===//
// PPC32 Load Instructions.
@@ -651,17 +652,17 @@ def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStGeneral, [(trap)]>;
// Unindexed (r+i) Loads.
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZ : DForm_1<34, (outs GPRC:$rD), (ins memri:$src),
- "lbz $rD, $src", LdStGeneral,
+ "lbz $rD, $src", LdStLoad,
[(set GPRC:$rD, (zextloadi8 iaddr:$src))]>;
def LHA : DForm_1<42, (outs GPRC:$rD), (ins memri:$src),
"lha $rD, $src", LdStLHA,
[(set GPRC:$rD, (sextloadi16 iaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZ : DForm_1<40, (outs GPRC:$rD), (ins memri:$src),
- "lhz $rD, $src", LdStGeneral,
+ "lhz $rD, $src", LdStLoad,
[(set GPRC:$rD, (zextloadi16 iaddr:$src))]>;
def LWZ : DForm_1<32, (outs GPRC:$rD), (ins memri:$src),
- "lwz $rD, $src", LdStGeneral,
+ "lwz $rD, $src", LdStLoad,
[(set GPRC:$rD, (load iaddr:$src))]>;
def LFS : DForm_1<48, (outs F4RC:$rD), (ins memri:$src),
@@ -675,22 +676,22 @@ def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src),
// Unindexed (r+i) Loads with Update (preinc).
let mayLoad = 1 in {
def LBZU : DForm_1<35, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lbzu $rD, $addr", LdStGeneral,
+ "lbzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHAU : DForm_1<43, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhau $rD, $addr", LdStGeneral,
+ "lhau $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LHZU : DForm_1<41, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lhzu $rD, $addr", LdStGeneral,
+ "lhzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
def LWZU : DForm_1<33, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
- "lwzu $rD, $addr", LdStGeneral,
+ "lwzu $rD, $addr", LdStLoad,
[]>, RegConstraint<"$addr.reg = $ea_result">,
NoEncode<"$ea_result">;
@@ -710,25 +711,25 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr),
//
let canFoldAsLoad = 1, PPC970_Unit = 2 in {
def LBZX : XForm_1<31, 87, (outs GPRC:$rD), (ins memrr:$src),
- "lbzx $rD, $src", LdStGeneral,
+ "lbzx $rD, $src", LdStLoad,
[(set GPRC:$rD, (zextloadi8 xaddr:$src))]>;
def LHAX : XForm_1<31, 343, (outs GPRC:$rD), (ins memrr:$src),
"lhax $rD, $src", LdStLHA,
[(set GPRC:$rD, (sextloadi16 xaddr:$src))]>,
PPC970_DGroup_Cracked;
def LHZX : XForm_1<31, 279, (outs GPRC:$rD), (ins memrr:$src),
- "lhzx $rD, $src", LdStGeneral,
+ "lhzx $rD, $src", LdStLoad,
[(set GPRC:$rD, (zextloadi16 xaddr:$src))]>;
def LWZX : XForm_1<31, 23, (outs GPRC:$rD), (ins memrr:$src),
- "lwzx $rD, $src", LdStGeneral,
+ "lwzx $rD, $src", LdStLoad,
[(set GPRC:$rD, (load xaddr:$src))]>;
def LHBRX : XForm_1<31, 790, (outs GPRC:$rD), (ins memrr:$src),
- "lhbrx $rD, $src", LdStGeneral,
+ "lhbrx $rD, $src", LdStLoad,
[(set GPRC:$rD, (PPClbrx xoaddr:$src, i16))]>;
def LWBRX : XForm_1<31, 534, (outs GPRC:$rD), (ins memrr:$src),
- "lwbrx $rD, $src", LdStGeneral,
+ "lwbrx $rD, $src", LdStLoad,
[(set GPRC:$rD, (PPClbrx xoaddr:$src, i32))]>;
def LFSX : XForm_25<31, 535, (outs F4RC:$frD), (ins memrr:$src),
@@ -746,13 +747,13 @@ def LFDX : XForm_25<31, 599, (outs F8RC:$frD), (ins memrr:$src),
// Unindexed (r+i) Stores.
let PPC970_Unit = 2 in {
def STB : DForm_1<38, (outs), (ins GPRC:$rS, memri:$src),
- "stb $rS, $src", LdStGeneral,
+ "stb $rS, $src", LdStStore,
[(truncstorei8 GPRC:$rS, iaddr:$src)]>;
def STH : DForm_1<44, (outs), (ins GPRC:$rS, memri:$src),
- "sth $rS, $src", LdStGeneral,
+ "sth $rS, $src", LdStStore,
[(truncstorei16 GPRC:$rS, iaddr:$src)]>;
def STW : DForm_1<36, (outs), (ins GPRC:$rS, memri:$src),
- "stw $rS, $src", LdStGeneral,
+ "stw $rS, $src", LdStStore,
[(store GPRC:$rS, iaddr:$src)]>;
def STFS : DForm_1<52, (outs), (ins F4RC:$rS, memri:$dst),
"stfs $rS, $dst", LdStUX,
@@ -766,33 +767,33 @@ def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst),
let PPC970_Unit = 2 in {
def STBU : DForm_1a<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stbu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "stbu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res,
(pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STHU : DForm_1a<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "sthu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "sthu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res,
(pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STWU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stwu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "stwu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STFSU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stfsu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "stfsu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS,
symbolLo:$ptroff, ptr_rc:$ptrreg),
- "stfdu $rS, $ptroff($ptrreg)", LdStGeneral,
+ "stfdu $rS, $ptroff($ptrreg)", LdStStore,
[(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg,
iaddroff:$ptroff))]>,
RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">;
@@ -803,29 +804,29 @@ def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS,
//
let PPC970_Unit = 2 in {
def STBX : XForm_8<31, 215, (outs), (ins GPRC:$rS, memrr:$dst),
- "stbx $rS, $dst", LdStGeneral,
+ "stbx $rS, $dst", LdStStore,
[(truncstorei8 GPRC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STHX : XForm_8<31, 407, (outs), (ins GPRC:$rS, memrr:$dst),
- "sthx $rS, $dst", LdStGeneral,
+ "sthx $rS, $dst", LdStStore,
[(truncstorei16 GPRC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
def STWX : XForm_8<31, 151, (outs), (ins GPRC:$rS, memrr:$dst),
- "stwx $rS, $dst", LdStGeneral,
+ "stwx $rS, $dst", LdStStore,
[(store GPRC:$rS, xaddr:$dst)]>,
PPC970_DGroup_Cracked;
let mayStore = 1 in {
def STWUX : XForm_8<31, 183, (outs), (ins GPRC:$rS, GPRC:$rA, GPRC:$rB),
- "stwux $rS, $rA, $rB", LdStGeneral,
+ "stwux $rS, $rA, $rB", LdStStore,
[]>;
}
def STHBRX: XForm_8<31, 918, (outs), (ins GPRC:$rS, memrr:$dst),
- "sthbrx $rS, $dst", LdStGeneral,
+ "sthbrx $rS, $dst", LdStStore,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, i16)]>,
PPC970_DGroup_Cracked;
def STWBRX: XForm_8<31, 662, (outs), (ins GPRC:$rS, memrr:$dst),
- "stwbrx $rS, $dst", LdStGeneral,
+ "stwbrx $rS, $dst", LdStStore,
[(PPCstbrx GPRC:$rS, xoaddr:$dst, i32)]>,
PPC970_DGroup_Cracked;
@@ -1091,7 +1092,7 @@ def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs GPRC:$rT), (ins),
"mfspr $rT, 256", IntGeneral>,
PPC970_DGroup_First, PPC970_Unit_FXU;
-def MTCRF : XFXForm_5<31, 144, (outs), (ins crbitm:$FXM, GPRC:$rS),
+def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins GPRC:$rS),
"mtcrf $FXM, $rS", BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
index 4590f00..a6528c0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.cpp
@@ -291,9 +291,10 @@ void PPC64CompilationCallback() {
}
#endif
-extern "C" void *PPCCompilationCallbackC(unsigned *StubCallAddrPlus4,
- unsigned *OrigCallAddrPlus4,
- bool is64Bit) {
+extern "C" {
+static void* LLVM_ATTRIBUTE_USED PPCCompilationCallbackC(unsigned *StubCallAddrPlus4,
+ unsigned *OrigCallAddrPlus4,
+ bool is64Bit) {
// Adjust the pointer to the address of the call instruction in the stub
// emitted by emitFunctionStub, rather than the instruction after it.
unsigned *StubCallAddr = StubCallAddrPlus4 - 1;
@@ -337,6 +338,7 @@ extern "C" void *PPCCompilationCallbackC(unsigned *StubCallAddrPlus4,
// stack after we restore all regs.
return Target;
}
+}
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
index 47ead59..2f8243a 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCJITInfo.h
@@ -1,4 +1,4 @@
-//===- PPCJITInfo.h - PowerPC impl. of the JIT interface --------*- C++ -*-===//
+//===-- PPCJITInfo.h - PowerPC impl. of the JIT interface -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
index 33af426..276edcb 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -140,7 +140,7 @@ void llvm::LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
switch (MO.getType()) {
default:
MI->dump();
- assert(0 && "unknown operand type");
+ llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
assert(!MO.getSubReg() && "Subregs should be eliminated!");
MCOp = MCOperand::CreateReg(MO.getReg());
@@ -166,6 +166,8 @@ void llvm::LowerPPCMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
MCOp = GetSymbolRef(MO,AP.GetBlockAddressSymbol(MO.getBlockAddress()),AP,
isDarwin);
break;
+ case MachineOperand::MO_RegisterMask:
+ continue;
}
OutMI.addOperand(MCOp);
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
new file mode 100644
index 0000000..6a0aec8
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.cpp
@@ -0,0 +1,15 @@
+//===-- PPCMachineFunctionInfo.cpp - Private data used for PowerPC --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void PPCFunctionInfo::anchor() { }
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index e2649c8..24caffa 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -21,7 +21,8 @@ namespace llvm {
/// PPCFunctionInfo - This class is derived from MachineFunction private
/// PowerPC target-specific information for each MachineFunction.
class PPCFunctionInfo : public MachineFunctionInfo {
-private:
+ virtual void anchor();
+
/// FramePointerSaveIndex - Frame index of where the old frame pointer is
/// stored. Also used as an anchor for instructions that need to be altered
/// when using frame pointers (dyna_add, dyna_sub.)
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCPerfectShuffle.h b/contrib/llvm/lib/Target/PowerPC/PPCPerfectShuffle.h
index 3164e33..17b836d 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCPerfectShuffle.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCPerfectShuffle.h
@@ -1,4 +1,4 @@
-//===-- PPCPerfectShuffle.h - Altivec Perfect Shuffle Table ---------------===//
+//===-- PPCPerfectShuffle.h - Altivec Perfect Shuffle Table -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 2e90b7a..ef13571 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- PPCRegisterInfo.cpp - PowerPC Register Information -------*- C++ -*-===//
+//===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,10 +13,10 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "reginfo"
+#include "PPCRegisterInfo.h"
#include "PPC.h"
#include "PPCInstrBuilder.h"
#include "PPCMachineFunctionInfo.h"
-#include "PPCRegisterInfo.h"
#include "PPCFrameLowering.h"
#include "PPCSubtarget.h"
#include "llvm/CallingConv.h"
@@ -46,15 +46,14 @@
#define GET_REGINFO_TARGET_DESC
#include "PPCGenRegisterInfo.inc"
-// FIXME (64-bit): Eventually enable by default.
namespace llvm {
-cl::opt<bool> EnablePPC32RS("enable-ppc32-regscavenger",
+cl::opt<bool> DisablePPC32RS("disable-ppc32-regscavenger",
cl::init(false),
- cl::desc("Enable PPC32 register scavenger"),
+ cl::desc("Disable PPC32 register scavenger"),
cl::Hidden);
-cl::opt<bool> EnablePPC64RS("enable-ppc64-regscavenger",
+cl::opt<bool> DisablePPC64RS("disable-ppc64-regscavenger",
cl::init(false),
- cl::desc("Enable PPC64 register scavenger"),
+ cl::desc("Disable PPC64 register scavenger"),
cl::Hidden);
}
@@ -63,8 +62,8 @@ using namespace llvm;
// FIXME (64-bit): Should be inlined.
bool
PPCRegisterInfo::requiresRegisterScavenging(const MachineFunction &) const {
- return ((EnablePPC32RS && !Subtarget.isPPC64()) ||
- (EnablePPC64RS && Subtarget.isPPC64()));
+ return ((!DisablePPC32RS && !Subtarget.isPPC64()) ||
+ (!DisablePPC64RS && Subtarget.isPPC64()));
}
PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
@@ -99,122 +98,22 @@ PPCRegisterInfo::getPointerRegClass(unsigned Kind) const {
return &PPC::GPRCRegClass;
}
-const unsigned*
+const uint16_t*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- // 32-bit Darwin calling convention.
- static const unsigned Darwin32_CalleeSavedRegs[] = {
- PPC::R13, PPC::R14, PPC::R15,
- PPC::R16, PPC::R17, PPC::R18, PPC::R19,
- PPC::R20, PPC::R21, PPC::R22, PPC::R23,
- PPC::R24, PPC::R25, PPC::R26, PPC::R27,
- PPC::R28, PPC::R29, PPC::R30, PPC::R31,
-
- PPC::F14, PPC::F15, PPC::F16, PPC::F17,
- PPC::F18, PPC::F19, PPC::F20, PPC::F21,
- PPC::F22, PPC::F23, PPC::F24, PPC::F25,
- PPC::F26, PPC::F27, PPC::F28, PPC::F29,
- PPC::F30, PPC::F31,
-
- PPC::CR2, PPC::CR3, PPC::CR4,
- PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27,
- PPC::V28, PPC::V29, PPC::V30, PPC::V31,
-
- PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
- PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
- PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
-
- PPC::LR, 0
- };
-
- // 32-bit SVR4 calling convention.
- static const unsigned SVR4_CalleeSavedRegs[] = {
- PPC::R14, PPC::R15,
- PPC::R16, PPC::R17, PPC::R18, PPC::R19,
- PPC::R20, PPC::R21, PPC::R22, PPC::R23,
- PPC::R24, PPC::R25, PPC::R26, PPC::R27,
- PPC::R28, PPC::R29, PPC::R30, PPC::R31,
-
- PPC::F14, PPC::F15, PPC::F16, PPC::F17,
- PPC::F18, PPC::F19, PPC::F20, PPC::F21,
- PPC::F22, PPC::F23, PPC::F24, PPC::F25,
- PPC::F26, PPC::F27, PPC::F28, PPC::F29,
- PPC::F30, PPC::F31,
-
- PPC::CR2, PPC::CR3, PPC::CR4,
-
- PPC::VRSAVE,
-
- PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27,
- PPC::V28, PPC::V29, PPC::V30, PPC::V31,
-
- PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
- PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
- PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
-
- 0
- };
- // 64-bit Darwin calling convention.
- static const unsigned Darwin64_CalleeSavedRegs[] = {
- PPC::X14, PPC::X15,
- PPC::X16, PPC::X17, PPC::X18, PPC::X19,
- PPC::X20, PPC::X21, PPC::X22, PPC::X23,
- PPC::X24, PPC::X25, PPC::X26, PPC::X27,
- PPC::X28, PPC::X29, PPC::X30, PPC::X31,
-
- PPC::F14, PPC::F15, PPC::F16, PPC::F17,
- PPC::F18, PPC::F19, PPC::F20, PPC::F21,
- PPC::F22, PPC::F23, PPC::F24, PPC::F25,
- PPC::F26, PPC::F27, PPC::F28, PPC::F29,
- PPC::F30, PPC::F31,
-
- PPC::CR2, PPC::CR3, PPC::CR4,
- PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27,
- PPC::V28, PPC::V29, PPC::V30, PPC::V31,
-
- PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
- PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
- PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
-
- PPC::LR8, 0
- };
-
- // 64-bit SVR4 calling convention.
- static const unsigned SVR4_64_CalleeSavedRegs[] = {
- PPC::X14, PPC::X15,
- PPC::X16, PPC::X17, PPC::X18, PPC::X19,
- PPC::X20, PPC::X21, PPC::X22, PPC::X23,
- PPC::X24, PPC::X25, PPC::X26, PPC::X27,
- PPC::X28, PPC::X29, PPC::X30, PPC::X31,
-
- PPC::F14, PPC::F15, PPC::F16, PPC::F17,
- PPC::F18, PPC::F19, PPC::F20, PPC::F21,
- PPC::F22, PPC::F23, PPC::F24, PPC::F25,
- PPC::F26, PPC::F27, PPC::F28, PPC::F29,
- PPC::F30, PPC::F31,
-
- PPC::CR2, PPC::CR3, PPC::CR4,
-
- PPC::VRSAVE,
-
- PPC::V20, PPC::V21, PPC::V22, PPC::V23,
- PPC::V24, PPC::V25, PPC::V26, PPC::V27,
- PPC::V28, PPC::V29, PPC::V30, PPC::V31,
+ if (Subtarget.isDarwinABI())
+ return Subtarget.isPPC64() ? CSR_Darwin64_SaveList :
+ CSR_Darwin32_SaveList;
- PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN,
- PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN,
- PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN,
+ return Subtarget.isPPC64() ? CSR_SVR464_SaveList : CSR_SVR432_SaveList;
+}
- 0
- };
-
+const unsigned*
+PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
if (Subtarget.isDarwinABI())
- return Subtarget.isPPC64() ? Darwin64_CalleeSavedRegs :
- Darwin32_CalleeSavedRegs;
+ return Subtarget.isPPC64() ? CSR_Darwin64_RegMask :
+ CSR_Darwin32_RegMask;
- return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegs : SVR4_CalleeSavedRegs;
+ return Subtarget.isPPC64() ? CSR_SVR464_RegMask : CSR_SVR432_RegMask;
}
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
@@ -247,9 +146,6 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
Reserved.set(PPC::R13);
Reserved.set(PPC::R31);
- if (!requiresRegisterScavenging(MF))
- Reserved.set(PPC::R0); // FIXME (64-bit): Remove
-
Reserved.set(PPC::X0);
Reserved.set(PPC::X1);
Reserved.set(PPC::X13);
@@ -259,7 +155,7 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (Subtarget.isSVR4ABI()) {
Reserved.set(PPC::X2);
}
- // Reserve R2 on Darwin to hack around the problem of save/restore of CR
+ // Reserve X2 on Darwin to hack around the problem of save/restore of CR
// when the stack frame is too big to address directly; we need two regs.
// This is a hack.
if (Subtarget.isDarwinABI()) {
@@ -273,6 +169,29 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
+unsigned
+PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ const unsigned DefaultSafety = 1;
+
+ switch (RC->getID()) {
+ default:
+ return 0;
+ case PPC::G8RCRegClassID:
+ case PPC::GPRCRegClassID: {
+ unsigned FP = TFI->hasFP(MF) ? 1 : 0;
+ return 32 - FP - DefaultSafety;
+ }
+ case PPC::F8RCRegClassID:
+ case PPC::F4RCRegClassID:
+ case PPC::VRRCRegClassID:
+ return 32 - DefaultSafety;
+ case PPC::CRRCRegClassID:
+ return 8 - DefaultSafety;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
@@ -280,7 +199,8 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
void PPCRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (GuaranteedTailCallOpt && I->getOpcode() == PPC::ADJCALLSTACKUP) {
+ if (MF.getTarget().Options.GuaranteedTailCallOpt &&
+ I->getOpcode() == PPC::ADJCALLSTACKUP) {
// Add (actually subtract) back the amount the callee popped on return.
if (int CalleeAmt = I->getOperand(1).getImm()) {
bool is64Bit = Subtarget.isPPC64();
@@ -295,8 +215,9 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
DebugLoc dl = MI->getDebugLoc();
if (isInt<16>(CalleeAmt)) {
- BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg).addReg(StackReg).
- addImm(CalleeAmt);
+ BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg)
+ .addReg(StackReg, RegState::Kill)
+ .addImm(CalleeAmt);
} else {
MachineBasicBlock::iterator MBBI = I;
BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
@@ -304,9 +225,8 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
.addReg(TmpReg, RegState::Kill)
.addImm(CalleeAmt & 0xFFFF);
- BuildMI(MBB, MBBI, dl, TII.get(ADDInstr))
- .addReg(StackReg)
- .addReg(StackReg)
+ BuildMI(MBB, MBBI, dl, TII.get(ADDInstr), StackReg)
+ .addReg(StackReg, RegState::Kill)
.addReg(TmpReg);
}
}
@@ -403,12 +323,12 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Use "true" part.
BuildMI(MBB, II, dl, TII.get(PPC::STDUX))
.addReg(Reg, RegState::Kill)
- .addReg(PPC::X1)
+ .addReg(PPC::X1, RegState::Define)
.addReg(MI.getOperand(1).getReg());
else
BuildMI(MBB, II, dl, TII.get(PPC::STDUX))
.addReg(PPC::X0, RegState::Kill)
- .addReg(PPC::X1)
+ .addReg(PPC::X1, RegState::Define)
.addReg(MI.getOperand(1).getReg());
if (!MI.getOperand(1).isKill())
@@ -424,7 +344,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
} else {
BuildMI(MBB, II, dl, TII.get(PPC::STWUX))
.addReg(Reg, RegState::Kill)
- .addReg(PPC::R1)
+ .addReg(PPC::R1, RegState::Define)
.addReg(MI.getOperand(1).getReg());
if (!MI.getOperand(1).isKill())
@@ -455,28 +375,32 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
unsigned FrameIndex, int SPAdj,
RegScavenger *RS) const {
// Get the instruction.
- MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset>, <FI>
+ MachineInstr &MI = *II; // ; SPILL_CR <SrcReg>, <offset>
// Get the instruction's basic block.
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
- const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
- const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
- unsigned Reg = findScratchRegister(II, RS, RC, SPAdj);
- unsigned SrcReg = MI.getOperand(0).getReg();
+ // FIXME: Once LLVM supports creating virtual registers here, or the register
+ // scavenger can return multiple registers, stop using reserved registers
+ // here.
+ (void) SPAdj;
+ (void) RS;
+
bool LP64 = Subtarget.isPPC64();
+ unsigned Reg = Subtarget.isDarwinABI() ? (LP64 ? PPC::X2 : PPC::R2) :
+ (LP64 ? PPC::X0 : PPC::R0);
+ unsigned SrcReg = MI.getOperand(0).getReg();
// We need to store the CR in the low 4-bits of the saved value. First, issue
// an MFCRpsued to save all of the CRBits and, if needed, kill the SrcReg.
- BuildMI(MBB, II, dl, TII.get(PPC::MFCRpseud), Reg)
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFCR8pseud : PPC::MFCRpseud), Reg)
.addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
// If the saved register wasn't CR0, shift the bits left so that they are in
// CR0's slot.
if (SrcReg != PPC::CR0)
// rlwinm rA, rA, ShiftBits, 0, 31.
- BuildMI(MBB, II, dl, TII.get(PPC::RLWINM), Reg)
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
.addReg(Reg, RegState::Kill)
.addImm(getPPCRegisterNumbering(SrcReg) * 4)
.addImm(0)
@@ -490,6 +414,48 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
MBB.erase(II);
}
+void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
+ unsigned FrameIndex, int SPAdj,
+ RegScavenger *RS) const {
+ // Get the instruction.
+ MachineInstr &MI = *II; // ; <DestReg> = RESTORE_CR <offset>
+ // Get the instruction's basic block.
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc dl = MI.getDebugLoc();
+
+ // FIXME: Once LLVM supports creating virtual registers here, or the register
+ // scavenger can return multiple registers, stop using reserved registers
+ // here.
+ (void) SPAdj;
+ (void) RS;
+
+ bool LP64 = Subtarget.isPPC64();
+ unsigned Reg = Subtarget.isDarwinABI() ? (LP64 ? PPC::X2 : PPC::R2) :
+ (LP64 ? PPC::X0 : PPC::R0);
+ unsigned DestReg = MI.getOperand(0).getReg();
+ assert(MI.definesRegister(DestReg) &&
+ "RESTORE_CR does not define its destination");
+
+ addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
+ Reg), FrameIndex);
+
+ // If the reloaded register isn't CR0, shift the bits right so that they are
+ // in the right CR's slot.
+ if (DestReg != PPC::CR0) {
+ unsigned ShiftBits = getPPCRegisterNumbering(DestReg)*4;
+ // rlwinm r11, r11, 32-ShiftBits, 0, 31.
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
+ .addReg(Reg).addImm(32-ShiftBits).addImm(0)
+ .addImm(31);
+ }
+
+ BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTCRF8 : PPC::MTCRF), DestReg)
+ .addReg(Reg);
+
+ // Discard the pseudo instruction.
+ MBB.erase(II);
+}
+
void
PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS) const {
@@ -535,16 +501,23 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
return;
}
- // Special case for pseudo-op SPILL_CR.
- if (requiresRegisterScavenging(MF)) // FIXME (64-bit): Enable by default.
+ // Special case for pseudo-ops SPILL_CR and RESTORE_CR.
+ if (requiresRegisterScavenging(MF)) {
if (OpC == PPC::SPILL_CR) {
lowerCRSpilling(II, FrameIndex, SPAdj, RS);
return;
+ } else if (OpC == PPC::RESTORE_CR) {
+ lowerCRRestore(II, FrameIndex, SPAdj, RS);
+ return;
}
+ }
// Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
+
+ bool is64Bit = Subtarget.isPPC64();
MI.getOperand(FIOperandNo).ChangeToRegister(TFI->hasFP(MF) ?
- PPC::R31 : PPC::R1,
+ (is64Bit ? PPC::X31 : PPC::R31) :
+ (is64Bit ? PPC::X1 : PPC::R1),
false);
// Figure out if the offset in the instruction is shifted right two bits. This
@@ -581,7 +554,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// clear can be encoded. This is extremely uncommon, because normally you
// only "std" to a stack slot that is at least 4-byte aligned, but it can
// happen in invalid code.
- if (isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
+ if (OpC == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
+ (isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) {
if (isIXAddr)
Offset >>= 2; // The actual encoded value has the low two bits zero.
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
@@ -590,19 +564,19 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// The offset doesn't fit into a single register, scavenge one to build the
// offset in.
- // FIXME: figure out what SPAdj is doing here.
- // FIXME (64-bit): Use "findScratchRegister".
unsigned SReg;
- if (requiresRegisterScavenging(MF))
- SReg = findScratchRegister(II, RS, &PPC::GPRCRegClass, SPAdj);
- else
- SReg = PPC::R0;
+ if (requiresRegisterScavenging(MF)) {
+ const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
+ const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
+ SReg = findScratchRegister(II, RS, is64Bit ? G8RC : GPRC, SPAdj);
+ } else
+ SReg = is64Bit ? PPC::X0 : PPC::R0;
// Insert a set of rA with the full offset value before the ld, st, or add
- BuildMI(MBB, II, dl, TII.get(PPC::LIS), SReg)
+ BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SReg)
.addImm(Offset >> 16);
- BuildMI(MBB, II, dl, TII.get(PPC::ORI), SReg)
+ BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
.addReg(SReg, RegState::Kill)
.addImm(Offset);
@@ -624,7 +598,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned StackReg = MI.getOperand(FIOperandNo).getReg();
MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
- MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false);
+ MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
}
unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 1cc7213..b1e6a72 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- PPCRegisterInfo.h - PowerPC Register Information Impl -----*- C++ -*-==//
+//===-- PPCRegisterInfo.h - PowerPC Register Information Impl ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -37,8 +37,12 @@ public:
/// This is used for addressing modes.
virtual const TargetRegisterClass *getPointerRegClass(unsigned Kind=0) const;
+ unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const;
+
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const unsigned *getCallPreservedMask(CallingConv::ID CC) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -54,6 +58,8 @@ public:
int SPAdj, RegScavenger *RS) const;
void lowerCRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex,
int SPAdj, RegScavenger *RS) const;
+ void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex,
+ int SPAdj, RegScavenger *RS) const;
void eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, RegScavenger *RS = NULL) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 1acdf4e..0e55313 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -1,10 +1,10 @@
-//===- PPCRegisterInfo.td - The PowerPC Register File ------*- tablegen -*-===//
-//
+//===-- PPCRegisterInfo.td - The PowerPC Register File -----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
//
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRelocations.h b/contrib/llvm/lib/Target/PowerPC/PPCRelocations.h
index a33e7e0..0b392f9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRelocations.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRelocations.h
@@ -1,4 +1,4 @@
-//===- PPCRelocations.h - PPC32 Code Relocations ----------------*- C++ -*-===//
+//===-- PPCRelocations.h - PPC Code Relocations -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef PPC32RELOCATIONS_H
-#define PPC32RELOCATIONS_H
+#ifndef PPCRELOCATIONS_H
+#define PPCRELOCATIONS_H
#include "llvm/CodeGen/MachineRelocation.h"
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
index 9664f14..8c0a858 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule.td
@@ -1,10 +1,10 @@
-//===- PPCSchedule.td - PowerPC Scheduling Definitions -----*- tablegen -*-===//
-//
+//===-- PPCSchedule.td - PowerPC Scheduling Definitions ----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -50,7 +50,8 @@ def BrMCRX : InstrItinClass;
def LdStDCBA : InstrItinClass;
def LdStDCBF : InstrItinClass;
def LdStDCBI : InstrItinClass;
-def LdStGeneral : InstrItinClass;
+def LdStLoad : InstrItinClass;
+def LdStStore : InstrItinClass;
def LdStDSS : InstrItinClass;
def LdStICBI : InstrItinClass;
def LdStUX : InstrItinClass;
@@ -103,9 +104,11 @@ def VecVSR : InstrItinClass;
// Processor instruction itineraries.
include "PPCScheduleG3.td"
+include "PPCSchedule440.td"
include "PPCScheduleG4.td"
include "PPCScheduleG4Plus.td"
include "PPCScheduleG5.td"
+include "PPCScheduleA2.td"
//===----------------------------------------------------------------------===//
// Instruction to itinerary class map - When add new opcodes to the supported
@@ -149,8 +152,8 @@ include "PPCScheduleG5.td"
// dcbf LdStDCBF
// dcbi LdStDCBI
// dcbst LdStDCBF
-// dcbt LdStGeneral
-// dcbtst LdStGeneral
+// dcbt LdStLoad
+// dcbtst LdStLoad
// dcbz LdStDCBF
// divd IntDivD
// divdu IntDivD
@@ -159,9 +162,9 @@ include "PPCScheduleG5.td"
// dss LdStDSS
// dst LdStDSS
// dstst LdStDSS
-// eciwx LdStGeneral
-// ecowx LdStGeneral
-// eieio LdStGeneral
+// eciwx LdStLoad
+// ecowx LdStLoad
+// eieio LdStLoad
// eqv IntGeneral
// extsb IntGeneral
// extsh IntGeneral
@@ -201,10 +204,10 @@ include "PPCScheduleG5.td"
// fsubs FPGeneral
// icbi LdStICBI
// isync SprISYNC
-// lbz LdStGeneral
-// lbzu LdStGeneral
+// lbz LdStLoad
+// lbzu LdStLoad
// lbzux LdStUX
-// lbzx LdStGeneral
+// lbzx LdStLoad
// ld LdStLD
// ldarx LdStLDARX
// ldu LdStLD
@@ -222,11 +225,11 @@ include "PPCScheduleG5.td"
// lhau LdStLHA
// lhaux LdStLHA
// lhax LdStLHA
-// lhbrx LdStGeneral
-// lhz LdStGeneral
-// lhzu LdStGeneral
+// lhbrx LdStLoad
+// lhz LdStLoad
+// lhzu LdStLoad
// lhzux LdStUX
-// lhzx LdStGeneral
+// lhzx LdStLoad
// lmw LdStLMW
// lswi LdStLMW
// lswx LdStLMW
@@ -241,11 +244,11 @@ include "PPCScheduleG5.td"
// lwarx LdStLWARX
// lwaux LdStLHA
// lwax LdStLHA
-// lwbrx LdStGeneral
-// lwz LdStGeneral
-// lwzu LdStGeneral
+// lwbrx LdStLoad
+// lwz LdStLoad
+// lwzu LdStLoad
// lwzux LdStUX
-// lwzx LdStGeneral
+// lwzx LdStLoad
// mcrf BrMCR
// mcrfs FPGeneral
// mcrxr BrMCRX
@@ -306,10 +309,10 @@ include "PPCScheduleG5.td"
// srawi IntShift
// srd IntRotateD
// srw IntGeneral
-// stb LdStGeneral
-// stbu LdStGeneral
-// stbux LdStGeneral
-// stbx LdStGeneral
+// stb LdStStore
+// stbu LdStStore
+// stbux LdStStore
+// stbx LdStStore
// std LdStSTD
// stdcx. LdStSTDCX
// stdu LdStSTD
@@ -324,11 +327,11 @@ include "PPCScheduleG5.td"
// stfsu LdStUX
// stfsux LdStUX
// stfsx LdStUX
-// sth LdStGeneral
-// sthbrx LdStGeneral
-// sthu LdStGeneral
-// sthux LdStGeneral
-// sthx LdStGeneral
+// sth LdStStore
+// sthbrx LdStStore
+// sthu LdStStore
+// sthux LdStStore
+// sthx LdStStore
// stmw LdStLMW
// stswi LdStLMW
// stswx LdStLMW
@@ -337,12 +340,12 @@ include "PPCScheduleG5.td"
// stvewx LdStSTVEBX
// stvx LdStSTVEBX
// stvxl LdStSTVEBX
-// stw LdStGeneral
-// stwbrx LdStGeneral
+// stw LdStStore
+// stwbrx LdStStore
// stwcx. LdStSTWCX
-// stwu LdStGeneral
-// stwux LdStGeneral
-// stwx LdStGeneral
+// stwu LdStStore
+// stwux LdStStore
+// stwx LdStStore
// subf IntGeneral
// subfc IntGeneral
// subfe IntGeneral
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
new file mode 100644
index 0000000..419faea
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSchedule440.td
@@ -0,0 +1,616 @@
+//===-- PPCSchedule440.td - PPC 440 Scheduling Definitions -*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Primary reference:
+// PowerPC 440x6 Embedded Processor Core User's Manual.
+// IBM (as updated in) 2010.
+
+// The basic PPC 440 does not include a floating-point unit; the pipeline
+// timings here are constructed to match the FP2 unit shipped with the
+// PPC-440- and PPC-450-based Blue Gene (L and P) supercomputers.
+// References:
+// S. Chatterjee, et al. Design and exploitation of a high-performance
+// SIMD floating-point unit for Blue Gene/L.
+// IBM J. Res. & Dev. 49 (2/3) March/May 2005.
+// also:
+// Carlos Sosa and Brant Knudson. IBM System Blue Gene Solution:
+// Blue Gene/P Application Development.
+// IBM (as updated in) 2009.
+
+//===----------------------------------------------------------------------===//
+// Functional units on the PowerPC 440/450 chip sets
+//
+def IFTH1 : FuncUnit; // Fetch unit 1
+def IFTH2 : FuncUnit; // Fetch unit 2
+def PDCD1 : FuncUnit; // Decode unit 1
+def PDCD2 : FuncUnit; // Decode unit 2
+def DISS1 : FuncUnit; // Issue unit 1
+def DISS2 : FuncUnit; // Issue unit 2
+def LRACC : FuncUnit; // Register access and dispatch for
+ // the simple integer (J-pipe) and
+ // load/store (L-pipe) pipelines
+def IRACC : FuncUnit; // Register access and dispatch for
+ // the complex integer (I-pipe) pipeline
+def FRACC : FuncUnit; // Register access and dispatch for
+ // the floating-point execution (F-pipe) pipeline
+def IEXE1 : FuncUnit; // Execution stage 1 for the I pipeline
+def IEXE2 : FuncUnit; // Execution stage 2 for the I pipeline
+def IWB : FuncUnit; // Write-back unit for the I pipeline
+def JEXE1 : FuncUnit; // Execution stage 1 for the J pipeline
+def JEXE2 : FuncUnit; // Execution stage 2 for the J pipeline
+def JWB : FuncUnit; // Write-back unit for the J pipeline
+def AGEN : FuncUnit; // Address generation for the L pipeline
+def CRD : FuncUnit; // D-cache access for the L pipeline
+def LWB : FuncUnit; // Write-back unit for the L pipeline
+def FEXE1 : FuncUnit; // Execution stage 1 for the F pipeline
+def FEXE2 : FuncUnit; // Execution stage 2 for the F pipeline
+def FEXE3 : FuncUnit; // Execution stage 3 for the F pipeline
+def FEXE4 : FuncUnit; // Execution stage 4 for the F pipeline
+def FEXE5 : FuncUnit; // Execution stage 5 for the F pipeline
+def FEXE6 : FuncUnit; // Execution stage 6 for the F pipeline
+def FWB : FuncUnit; // Write-back unit for the F pipeline
+
+def LWARX_Hold : FuncUnit; // This is a pseudo-unit which is used
+ // to make sure that no lwarx/stwcx.
+ // instructions are issued while another
+ // lwarx/stwcx. is in the L pipe.
+
+def GPR_Bypass : Bypass; // The bypass for general-purpose regs.
+def FPR_Bypass : Bypass; // The bypass for floating-point regs.
+
+// Notes:
+// Instructions are held in the FRACC, LRACC and IRACC pipeline
+// stages until their source operands become ready. Exceptions:
+// - Store instructions will hold in the AGEN stage
+// - The integer multiply-accumulate instruction will hold in
+// the IEXE1 stage
+//
+// For most I-pipe operations, the result is available at the end of
+// the IEXE1 stage. Operations such as multiply and divide must
+// continue to execute in IEXE2 and IWB. Divide resides in IWB for
+// 33 cycles (multiply also calculates its result in IWB). For all
+// J-pipe instructions, the result is available
+// at the end of the JEXE1 stage. Loads have a 3-cycle latency
+// (data is not available until after the LWB stage).
+//
+// The L1 cache hit latency is four cycles for floating point loads
+// and three cycles for integer loads.
+//
+// The stwcx. instruction requires both the LRACC and the IRACC
+// dispatch stages. It must be issued from DISS0.
+//
+// All lwarx/stwcx. instructions hold in LRACC if another
+// uncommitted lwarx/stwcx. is in AGEN, CRD, or LWB.
+//
+// msync (a.k.a. sync) and mbar will hold in LWB until all load/store
+// resources are empty. AGEN and CRD are held empty until the msync/mbar
+// commits.
+//
+// Most floating-point instructions, computational and move,
+// have a 5-cycle latency. Divide takes longer (30 cycles). Instructions that
+// update the CR take 2 cycles. Stores take 3 cycles and, as mentioned above,
+// loads take 4 cycles (for L1 hit).
+
+//
+// This file defines the itinerary class data for the PPC 440 processor.
+//
+//===----------------------------------------------------------------------===//
+
+
+def PPC440Itineraries : ProcessorItineraries<
+ [IFTH1, IFTH2, PDCD1, PDCD2, DISS1, DISS2, FRACC,
+ IRACC, IEXE1, IEXE2, IWB, LRACC, JEXE1, JEXE2, JWB, AGEN, CRD, LWB,
+ FEXE1, FEXE2, FEXE3, FEXE4, FEXE5, FEXE6, FWB, LWARX_Hold],
+ [GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntGeneral , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC, LRACC]>,
+ InstrStage<1, [IEXE1, JEXE1]>,
+ InstrStage<1, [IEXE2, JEXE2]>,
+ InstrStage<1, [IWB, JWB]>],
+ [6, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntCompare , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC, LRACC]>,
+ InstrStage<1, [IEXE1, JEXE1]>,
+ InstrStage<1, [IEXE2, JEXE2]>,
+ InstrStage<1, [IWB, JWB]>],
+ [6, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntDivW , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<33, [IWB]>],
+ [40, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMFFS , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [7, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMTFSB0 , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [7, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHW , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHWU , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulLI , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotate , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC, LRACC]>,
+ InstrStage<1, [IEXE1, JEXE1]>,
+ InstrStage<1, [IEXE2, JEXE2]>,
+ InstrStage<1, [IWB, JWB]>],
+ [6, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntShift , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC, LRACC]>,
+ InstrStage<1, [IEXE1, JEXE1]>,
+ InstrStage<1, [IEXE2, JEXE2]>,
+ InstrStage<1, [IWB, JWB]>],
+ [6, 4, 4],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntTrapW , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [6, 4],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrB , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<BrCR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrMCR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrMCRX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4, 4],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBA , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBF , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBI , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLoad , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [9, 5],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStStore , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStICBI , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStUX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5, 5],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLFD , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [9, 5, 5],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLFDU , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [9, 5, 5],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLHA , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLMW , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLWARX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1]>,
+ InstrStage<1, [IRACC], 0>,
+ InstrStage<4, [LWARX_Hold], 0>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTD , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDCX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1]>,
+ InstrStage<1, [IRACC], 0>,
+ InstrStage<4, [LWARX_Hold], 0>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTD , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<2, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDCX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1]>,
+ InstrStage<1, [IRACC], 0>,
+ InstrStage<4, [LWARX_Hold], 0>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTWCX , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1]>,
+ InstrStage<1, [IRACC], 0>,
+ InstrStage<4, [LWARX_Hold], 0>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<1, [AGEN]>,
+ InstrStage<1, [CRD]>,
+ InstrStage<1, [LWB]>],
+ [8, 5],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSync , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [LRACC]>,
+ InstrStage<3, [AGEN], 1>,
+ InstrStage<2, [CRD], 1>,
+ InstrStage<1, [LWB]>]>,
+ InstrItinData<SprISYNC , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC], 0>,
+ InstrStage<1, [LRACC], 0>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [FEXE1], 0>,
+ InstrStage<1, [AGEN], 0>,
+ InstrStage<1, [JEXE1], 0>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [FEXE2], 0>,
+ InstrStage<1, [CRD], 0>,
+ InstrStage<1, [JEXE2], 0>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<6, [FEXE3], 0>,
+ InstrStage<6, [LWB], 0>,
+ InstrStage<6, [JWB], 0>,
+ InstrStage<6, [IWB]>]>,
+ InstrItinData<SprMFSR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [6, 4],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMTMSR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [6, 4],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMTSR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<3, [IWB]>],
+ [9, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprTLBSYNC , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>]>,
+ InstrItinData<SprMFCR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMFMSR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [7, 4],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<SprMFSPR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<3, [IWB]>],
+ [10, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMFTB , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<3, [IWB]>],
+ [10, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSPR , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<3, [IWB]>],
+ [10, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSRIN , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<3, [IWB]>],
+ [10, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprRFI , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprSC , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [IRACC]>,
+ InstrStage<1, [IEXE1]>,
+ InstrStage<1, [IEXE2]>,
+ InstrStage<1, [IWB]>],
+ [8, 4],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<FPGeneral , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<1, [FWB]>],
+ [10, 4, 4],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPCompare , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<1, [FWB]>],
+ [10, 4, 4],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivD , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<25, [FWB]>],
+ [35, 4, 4],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivS , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<13, [FWB]>],
+ [23, 4, 4],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPFused , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<1, [FWB]>],
+ [10, 4, 4, 4],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPRes , [InstrStage<1, [IFTH1, IFTH2]>,
+ InstrStage<1, [PDCD1, PDCD2]>,
+ InstrStage<1, [DISS1, DISS2]>,
+ InstrStage<1, [FRACC]>,
+ InstrStage<1, [FEXE1]>,
+ InstrStage<1, [FEXE2]>,
+ InstrStage<1, [FEXE3]>,
+ InstrStage<1, [FEXE4]>,
+ InstrStage<1, [FEXE5]>,
+ InstrStage<1, [FEXE6]>,
+ InstrStage<1, [FWB]>],
+ [10, 4],
+ [FPR_Bypass, FPR_Bypass]>
+]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
new file mode 100644
index 0000000..857ba40
--- /dev/null
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleA2.td
@@ -0,0 +1,652 @@
+//===- PPCScheduleA2.td - PPC A2 Scheduling Definitions --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Primary reference:
+// A2 Processor User's Manual.
+// IBM (as updated in) 2010.
+
+//===----------------------------------------------------------------------===//
+// Functional units on the PowerPC A2 chip sets
+//
+def IU0to3_0 : FuncUnit; // Fetch unit 1 to 4 slot 1
+def IU0to3_1 : FuncUnit; // Fetch unit 1 to 4 slot 2
+def IU0to3_2 : FuncUnit; // Fetch unit 1 to 4 slot 3
+def IU0to3_3 : FuncUnit; // Fetch unit 1 to 4 slot 4
+def IU4_0 : FuncUnit; // Instruction buffer slot 1
+def IU4_1 : FuncUnit; // Instruction buffer slot 2
+def IU4_2 : FuncUnit; // Instruction buffer slot 3
+def IU4_3 : FuncUnit; // Instruction buffer slot 4
+def IU4_4 : FuncUnit; // Instruction buffer slot 5
+def IU4_5 : FuncUnit; // Instruction buffer slot 6
+def IU4_6 : FuncUnit; // Instruction buffer slot 7
+def IU4_7 : FuncUnit; // Instruction buffer slot 8
+def IU5 : FuncUnit; // Dependency resolution
+def IU6 : FuncUnit; // Instruction issue
+def RF0 : FuncUnit;
+def XRF1 : FuncUnit;
+def XEX1 : FuncUnit; // Execution stage 1 for the XU pipeline
+def XEX2 : FuncUnit; // Execution stage 2 for the XU pipeline
+def XEX3 : FuncUnit; // Execution stage 3 for the XU pipeline
+def XEX4 : FuncUnit; // Execution stage 4 for the XU pipeline
+def XEX5 : FuncUnit; // Execution stage 5 for the XU pipeline
+def XEX6 : FuncUnit; // Execution stage 6 for the XU pipeline
+def FRF1 : FuncUnit;
+def FEX1 : FuncUnit; // Execution stage 1 for the FU pipeline
+def FEX2 : FuncUnit; // Execution stage 2 for the FU pipeline
+def FEX3 : FuncUnit; // Execution stage 3 for the FU pipeline
+def FEX4 : FuncUnit; // Execution stage 4 for the FU pipeline
+def FEX5 : FuncUnit; // Execution stage 5 for the FU pipeline
+def FEX6 : FuncUnit; // Execution stage 6 for the FU pipeline
+
+def CR_Bypass : Bypass; // The bypass for condition regs.
+//def GPR_Bypass : Bypass; // The bypass for general-purpose regs.
+//def FPR_Bypass : Bypass; // The bypass for floating-point regs.
+
+//
+// This file defines the itinerary class data for the PPC A2 processor.
+//
+//===----------------------------------------------------------------------===//
+
+
+def PPCA2Itineraries : ProcessorItineraries<
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3,
+ IU4_0, IU4_1, IU4_2, IU4_3, IU4_4, IU4_5, IU4_6, IU4_7,
+ IU5, IU6, RF0, XRF1, XEX1, XEX2, XEX3, XEX4, XEX5, XEX6,
+ FRF1, FEX1, FEX2, FEX3, FEX4, FEX5, FEX6],
+ [CR_Bypass, GPR_Bypass, FPR_Bypass], [
+ InstrItinData<IntGeneral , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntCompare , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntDivW , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<38, [XEX6]>],
+ [53, 7, 7],
+ [NoBypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMFFS , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMTFSB0 , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHW , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulHWU , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntMulLI , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntRotate , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntShift , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<IntTrapW , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<BrB , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<BrCR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [CR_Bypass, CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [CR_Bypass, CR_Bypass, CR_Bypass]>,
+ InstrItinData<BrMCRX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7, 7],
+ [CR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBA , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 11],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBF , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 11],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStDCBI , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 11],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLoad , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStStore , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStICBI , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStUX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<LdStLFD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLFDU , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7, 7],
+ [FPR_Bypass, GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStLHA , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLMW , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [14, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStLWARX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [26, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDCX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [26, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [13, 7],
+ [GPR_Bypass, GPR_Bypass]>,
+ InstrItinData<LdStSTDCX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [26, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSTWCX , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<13, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [26, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<LdStSync , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<12, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>]>,
+ InstrItinData<SprISYNC , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>]>,
+ InstrItinData<SprMFSR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [GPR_Bypass, NoBypass]>,
+ InstrItinData<SprMTMSR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprTLBSYNC , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>]>,
+ InstrItinData<SprMFCR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [10, 7],
+ [GPR_Bypass, CR_Bypass]>,
+ InstrItinData<SprMFMSR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [GPR_Bypass, NoBypass]>,
+ InstrItinData<SprMFSPR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMFTB , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
+ [29, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSPR , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>],
+ [15, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprMTSRIN , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
+ [29, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprRFI , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
+ [29, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<SprSC , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>,
+ InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>,
+ InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>,
+ InstrStage<1, [XEX5]>, InstrStage<14, [XEX6]>],
+ [29, 7],
+ [NoBypass, GPR_Bypass]>,
+ InstrItinData<FPGeneral , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
+ InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
+ InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
+ InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
+ [15, 7, 7],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPCompare , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
+ InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
+ InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
+ InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
+ [13, 7, 7],
+ [CR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivD , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<71, [FRF1], 0>,
+ InstrStage<71, [FEX1], 0>,
+ InstrStage<71, [FEX2], 0>,
+ InstrStage<71, [FEX3], 0>,
+ InstrStage<71, [FEX4], 0>,
+ InstrStage<71, [FEX5], 0>,
+ InstrStage<71, [FEX6]>],
+ [86, 7, 7],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPDivS , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<58, [FRF1], 0>,
+ InstrStage<58, [FEX1], 0>,
+ InstrStage<58, [FEX2], 0>,
+ InstrStage<58, [FEX3], 0>,
+ InstrStage<58, [FEX4], 0>,
+ InstrStage<58, [FEX5], 0>,
+ InstrStage<58, [FEX6]>],
+ [73, 7, 7],
+ [NoBypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPSqrt , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<68, [FRF1], 0>,
+ InstrStage<68, [FEX1], 0>,
+ InstrStage<68, [FEX2], 0>,
+ InstrStage<68, [FEX3], 0>,
+ InstrStage<68, [FEX4], 0>,
+ InstrStage<68, [FEX5], 0>,
+ InstrStage<68, [FEX6]>],
+ [86, 7], // FIXME: should be [86, 7] for double
+ // and [82, 7] for single. Likewise,
+ // the FEX? cycle count should be 68
+ // for double and 64 for single.
+ [NoBypass, FPR_Bypass]>,
+ InstrItinData<FPFused , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
+ InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
+ InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
+ InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
+ [15, 7, 7, 7],
+ [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>,
+ InstrItinData<FPRes , [InstrStage<4,
+ [IU0to3_0, IU0to3_1, IU0to3_2, IU0to3_3]>,
+ InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3,
+ IU4_4, IU4_5, IU4_6, IU4_7]>,
+ InstrStage<1, [IU5]>, InstrStage<1, [IU6]>,
+ InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>,
+ InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>,
+ InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>,
+ InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>],
+ [15, 7],
+ [FPR_Bypass, FPR_Bypass]>
+]>;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index ad4da1f..bc926f7 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -1,10 +1,10 @@
-//===- PPCScheduleG3.td - PPC G3 Scheduling Definitions ----*- tablegen -*-===//
-//
+//===-- PPCScheduleG3.td - PPC G3 Scheduling Definitions ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the G3 (750) processor.
@@ -32,7 +32,8 @@ def G3Itineraries : ProcessorItineraries<
InstrItinData<LdStDCBA , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStGeneral , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStUX , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStLFD , [InstrStage<2, [SLU]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index 03c3b29..f7ec1e0 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -1,10 +1,10 @@
-//===- PPCScheduleG4.td - PPC G4 Scheduling Definitions ----*- tablegen -*-===//
-//
+//===-- PPCScheduleG4.td - PPC G4 Scheduling Definitions ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the G4 (7400) processor.
@@ -31,7 +31,8 @@ def G4Itineraries : ProcessorItineraries<
InstrItinData<BrMCRX , [InstrStage<1, [SRU]>]>,
InstrItinData<LdStDCBF , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<2, [SLU]>]>,
- InstrItinData<LdStGeneral , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStLoad , [InstrStage<2, [SLU]>]>,
+ InstrItinData<LdStStore , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<2, [SLU]>]>,
InstrItinData<LdStUX , [InstrStage<2, [SLU]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index 00cac3c..37ebfc5 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -1,10 +1,10 @@
-//===- PPCScheduleG4Plus.td - PPC G4+ Scheduling Defs. -----*- tablegen -*-===//
-//
+//===-- PPCScheduleG4Plus.td - PPC G4+ Scheduling Defs. ----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the G4+ (7450) processor.
@@ -34,7 +34,8 @@ def G4PlusItineraries : ProcessorItineraries<
InstrItinData<BrMCRX , [InstrStage<2, [IU2]>]>,
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDCBI , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStGeneral , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<3, [IU2]>]>,
InstrItinData<LdStUX , [InstrStage<3, [SLU]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index 1671f22..d1e40ce 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/contrib/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -1,10 +1,10 @@
-//===- PPCScheduleG5.td - PPC G5 Scheduling Definitions ----*- tablegen -*-===//
-//
+//===-- PPCScheduleG5.td - PPC G5 Scheduling Definitions ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the G5 (970) processor.
@@ -35,7 +35,8 @@ def G5Itineraries : ProcessorItineraries<
InstrItinData<BrMCR , [InstrStage<2, [BPU]>]>,
InstrItinData<BrMCRX , [InstrStage<3, [BPU]>]>,
InstrItinData<LdStDCBF , [InstrStage<3, [SLU]>]>,
- InstrItinData<LdStGeneral , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStLoad , [InstrStage<3, [SLU]>]>,
+ InstrItinData<LdStStore , [InstrStage<3, [SLU]>]>,
InstrItinData<LdStDSS , [InstrStage<10, [SLU]>]>,
InstrItinData<LdStICBI , [InstrStage<40, [SLU]>]>,
InstrItinData<LdStUX , [InstrStage<4, [SLU]>]>,
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
index cf194de..f405b47 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- PowerPCSubtarget.cpp - PPC Subtarget Information -------------------===//
+//===-- PowerPCSubtarget.cpp - PPC Subtarget Information ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "PPCSubtarget.h"
+#include "PPCRegisterInfo.h"
#include "PPC.h"
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetMachine.h"
@@ -74,6 +75,7 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
, HasAltivec(false)
, HasFSQRT(false)
, HasSTFIWX(false)
+ , IsBookE(false)
, HasLazyResolverStubs(false)
, IsJITCodeModel(false)
, TargetTriple(TT) {
@@ -139,3 +141,23 @@ bool PPCSubtarget::hasLazyResolverStub(const GlobalValue *GV,
return GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
GV->hasCommonLinkage() || isDecl;
}
+
+bool PPCSubtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const {
+ if (DarwinDirective == PPC::DIR_440 || DarwinDirective == PPC::DIR_A2)
+ Mode = TargetSubtargetInfo::ANTIDEP_ALL;
+ else
+ Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+
+ CriticalPathRCs.clear();
+
+ if (isPPC64())
+ CriticalPathRCs.push_back(&PPC::G8RCRegClass);
+ else
+ CriticalPathRCs.push_back(&PPC::GPRCRegClass);
+
+ return OptLevel >= CodeGenOpt::Default;
+}
+
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
index e028de6..a275029 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -1,4 +1,4 @@
-//=====-- PPCSubtarget.h - Define Subtarget for the PPC -------*- C++ -*--====//
+//===-- PPCSubtarget.h - Define Subtarget for the PPC ----------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -33,12 +33,14 @@ namespace PPC {
enum {
DIR_NONE,
DIR_32,
+ DIR_440,
DIR_601,
DIR_602,
DIR_603,
DIR_7400,
DIR_750,
DIR_970,
+ DIR_A2,
DIR_64
};
}
@@ -66,6 +68,7 @@ protected:
bool HasAltivec;
bool HasFSQRT;
bool HasSTFIWX;
+ bool IsBookE;
bool HasLazyResolverStubs;
bool IsJITCodeModel;
@@ -136,15 +139,22 @@ public:
bool hasSTFIWX() const { return HasSTFIWX; }
bool hasAltivec() const { return HasAltivec; }
bool isGigaProcessor() const { return IsGigaProcessor; }
+ bool isBookE() const { return IsBookE; }
const Triple &getTargetTriple() const { return TargetTriple; }
/// isDarwin - True if this is any darwin platform.
bool isDarwin() const { return TargetTriple.isMacOSX(); }
+ /// isBGP - True if this is a BG/P platform.
+ bool isBGP() const { return TargetTriple.getVendor() == Triple::BGP; }
bool isDarwinABI() const { return isDarwin(); }
bool isSVR4ABI() const { return !isDarwin(); }
+ /// enablePostRAScheduler - True at 'More' optimization.
+ bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index f5744b8..d113976 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -11,10 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "PPC.h"
#include "PPCTargetMachine.h"
+#include "PPC.h"
#include "llvm/PassManager.h"
#include "llvm/MC/MCStreamer.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
@@ -22,37 +23,46 @@ using namespace llvm;
extern "C" void LLVMInitializePowerPCTarget() {
// Register the targets
- RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
+ RegisterTargetMachine<PPC32TargetMachine> A(ThePPC32Target);
RegisterTargetMachine<PPC64TargetMachine> B(ThePPC64Target);
}
PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64Bit),
DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this),
FrameLowering(Subtarget), JITInfo(*this, is64Bit),
TLInfo(*this), TSInfo(*this),
InstrItins(Subtarget.getInstrItineraryData()) {
+
+ // The binutils for the BG/P are too old for CFI.
+ if (Subtarget.isBGP())
+ setMCUseCFI(false);
}
-/// Override this for PowerPC. Tail merging happily breaks up instruction issue
-/// groups, which typically degrades performance.
-bool PPCTargetMachine::getEnableTailMergeDefault() const { return false; }
+void PPC32TargetMachine::anchor() { }
-PPC32TargetMachine::PPC32TargetMachine(const Target &T, StringRef TT,
+PPC32TargetMachine::PPC32TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : PPCTargetMachine(T, TT, CPU, FS, RM, CM, false) {
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
}
+void PPC64TargetMachine::anchor() { }
-PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT,
+PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : PPCTargetMachine(T, TT, CPU, FS, RM, CM, true) {
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {
}
@@ -60,33 +70,56 @@ PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT,
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
-bool PPCTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+namespace {
+/// PPC Code Generator Pass Configuration Options.
+class PPCPassConfig : public TargetPassConfig {
+public:
+ PPCPassConfig(PPCTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ PPCTargetMachine &getPPCTargetMachine() const {
+ return getTM<PPCTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) {
+ TargetPassConfig *PassConfig = new PPCPassConfig(this, PM);
+
+ // Override this for PowerPC. Tail merging happily breaks up instruction issue
+ // groups, which typically degrades performance.
+ PassConfig->setEnableTailMerge(false);
+
+ return PassConfig;
+}
+
+bool PPCPassConfig::addInstSelector() {
// Install an instruction selector.
- PM.add(createPPCISelDag(*this));
+ PM.add(createPPCISelDag(getPPCTargetMachine()));
return false;
}
-bool PPCTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool PPCPassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
PM.add(createPPCBranchSelectionPass());
return false;
}
bool PPCTargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE) {
// FIXME: This should be moved to TargetJITInfo!!
if (Subtarget.isPPC64())
// Temporary workaround for the inability of PPC64 JIT to handle jump
// tables.
- DisableJumpTables = true;
-
+ Options.DisableJumpTables = true;
+
// Inform the subtarget that we are in JIT mode. FIXME: does this break macho
// writing?
Subtarget.SetJITMode();
-
+
// Machine code emitter pass for PowerPC.
PM.add(createPPCJITCodeEmitterPass(*this, JCE));
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
index d06f084..7da2b0c 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCTargetMachine.h
@@ -1,4 +1,4 @@
-//===-- PPCTargetMachine.h - Define TargetMachine for PowerPC -----*- C++ -*-=//
+//===-- PPCTargetMachine.h - Define TargetMachine for PowerPC ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,8 +24,6 @@
#include "llvm/Target/TargetData.h"
namespace llvm {
-class PassManager;
-class GlobalValue;
/// PPCTargetMachine - Common code between 32-bit and 64-bit PowerPC targets.
///
@@ -41,15 +39,16 @@ class PPCTargetMachine : public LLVMTargetMachine {
public:
PPCTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM, bool is64Bit);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool is64Bit);
virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const PPCFrameLowering *getFrameLowering() const {
return &FrameLowering;
}
virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
- virtual const PPCTargetLowering *getTargetLowering() const {
+ virtual const PPCTargetLowering *getTargetLowering() const {
return &TLInfo;
}
virtual const PPCSelectionDAGInfo* getSelectionDAGInfo() const {
@@ -58,37 +57,39 @@ public:
virtual const PPCRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
-
+
virtual const TargetData *getTargetData() const { return &DataLayout; }
virtual const PPCSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const InstrItineraryData *getInstrItineraryData() const {
+ virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+ virtual bool addCodeEmitter(PassManagerBase &PM,
JITCodeEmitter &JCE);
- virtual bool getEnableTailMergeDefault() const;
};
/// PPC32TargetMachine - PowerPC 32-bit target machine.
///
class PPC32TargetMachine : public PPCTargetMachine {
+ virtual void anchor();
public:
PPC32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// PPC64TargetMachine - PowerPC 64-bit target machine.
///
class PPC64TargetMachine : public PPCTargetMachine {
+ virtual void anchor();
public:
PPC64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
index dab35e5..883aa3a 100644
--- a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -100,7 +100,7 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I)
- if (I->getDesc().hasDelaySlot()) {
+ if (I->hasDelaySlot()) {
MachineBasicBlock::iterator D = MBB.end();
MachineBasicBlock::iterator J = I;
@@ -149,7 +149,7 @@ Filler::findDelayInstr(MachineBasicBlock &MBB,
}
//Call's delay filler can def some of call's uses.
- if (slot->getDesc().isCall())
+ if (slot->isCall())
insertCallUses(slot, RegUses);
else
insertDefsUses(slot, RegDefs, RegUses);
@@ -170,7 +170,7 @@ Filler::findDelayInstr(MachineBasicBlock &MBB,
if (I->hasUnmodeledSideEffects()
|| I->isInlineAsm()
|| I->isLabel()
- || I->getDesc().hasDelaySlot()
+ || I->hasDelaySlot()
|| isDelayFiller(MBB, I))
break;
@@ -194,13 +194,13 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
if (candidate->isImplicitDef() || candidate->isKill())
return true;
- if (candidate->getDesc().mayLoad()) {
+ if (candidate->mayLoad()) {
sawLoad = true;
if (sawStore)
return true;
}
- if (candidate->getDesc().mayStore()) {
+ if (candidate->mayStore()) {
if (sawStore)
return true;
sawStore = true;
@@ -282,7 +282,7 @@ bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
if (RegSet.count(Reg))
return true;
// check Aliased Registers
- for (const unsigned *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
+ for (const uint16_t *Alias = TM.getRegisterInfo()->getAliasSet(Reg);
*Alias; ++ Alias)
if (RegSet.count(*Alias))
return true;
@@ -298,13 +298,13 @@ bool Filler::isDelayFiller(MachineBasicBlock &MBB,
return false;
if (candidate->getOpcode() == SP::UNIMP)
return true;
- const MCInstrDesc &prevdesc = (--candidate)->getDesc();
- return prevdesc.hasDelaySlot();
+ --candidate;
+ return candidate->hasDelaySlot();
}
bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
{
- if (!I->getDesc().isCall())
+ if (!I->isCall())
return false;
unsigned structSizeOpNum = 0;
diff --git a/contrib/llvm/lib/Target/Sparc/FPMover.cpp b/contrib/llvm/lib/Target/Sparc/FPMover.cpp
index 1423b1e..9a729bd 100644
--- a/contrib/llvm/lib/Target/Sparc/FPMover.cpp
+++ b/contrib/llvm/lib/Target/Sparc/FPMover.cpp
@@ -59,19 +59,19 @@ FunctionPass *llvm::createSparcFPMoverPass(TargetMachine &tm) {
/// registers that correspond to it.
static void getDoubleRegPair(unsigned DoubleReg, unsigned &EvenReg,
unsigned &OddReg) {
- static const unsigned EvenHalvesOfPairs[] = {
+ static const uint16_t EvenHalvesOfPairs[] = {
SP::F0, SP::F2, SP::F4, SP::F6, SP::F8, SP::F10, SP::F12, SP::F14,
SP::F16, SP::F18, SP::F20, SP::F22, SP::F24, SP::F26, SP::F28, SP::F30
};
- static const unsigned OddHalvesOfPairs[] = {
+ static const uint16_t OddHalvesOfPairs[] = {
SP::F1, SP::F3, SP::F5, SP::F7, SP::F9, SP::F11, SP::F13, SP::F15,
SP::F17, SP::F19, SP::F21, SP::F23, SP::F25, SP::F27, SP::F29, SP::F31
};
- static const unsigned DoubleRegsInOrder[] = {
+ static const uint16_t DoubleRegsInOrder[] = {
SP::D0, SP::D1, SP::D2, SP::D3, SP::D4, SP::D5, SP::D6, SP::D7, SP::D8,
SP::D9, SP::D10, SP::D11, SP::D12, SP::D13, SP::D14, SP::D15
};
- for (unsigned i = 0; i < sizeof(DoubleRegsInOrder)/sizeof(unsigned); ++i)
+ for (unsigned i = 0; i < array_lengthof(DoubleRegsInOrder); ++i)
if (DoubleRegsInOrder[i] == DoubleReg) {
EvenReg = EvenHalvesOfPairs[i];
OddReg = OddHalvesOfPairs[i];
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 6a7e090..f5e10fc 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -16,6 +16,8 @@
using namespace llvm;
+void SparcELFMCAsmInfo::anchor() { }
+
SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Target &T, StringRef TT) {
IsLittleEndian = false;
Triple TheTriple(TT);
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
index 0cb6827..f0e1354 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- SparcMCAsmInfo.h - Sparc asm properties -------------*- C++ -*--====//
+//===-- SparcMCAsmInfo.h - Sparc asm properties ----------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,15 @@
#ifndef SPARCTARGETASMINFO_H
#define SPARCTARGETASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
+ class StringRef;
class Target;
- struct SparcELFMCAsmInfo : public MCAsmInfo {
+ class SparcELFMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit SparcELFMCAsmInfo(const Target &T, StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
index cb2a7df..7fdb0c3 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- SparcMCTargetDesc.cpp - Sparc Target Descriptions --------*- C++ -*-===//
+//===-- SparcMCTargetDesc.cpp - Sparc Target Descriptions -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -50,9 +51,10 @@ static MCSubtargetInfo *createSparcMCSubtargetInfo(StringRef TT, StringRef CPU,
}
static MCCodeGenInfo *createSparcMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
index 2fd9e3f..cba775a 100644
--- a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
@@ -15,9 +15,7 @@
#define SPARCMCTARGETDESC_H
namespace llvm {
-class MCSubtargetInfo;
class Target;
-class StringRef;
extern Target TheSparcTarget;
extern Target TheSparcV9Target;
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.h b/contrib/llvm/lib/Target/Sparc/Sparc.h
index 7b2c614..ce6ae17 100644
--- a/contrib/llvm/lib/Target/Sparc/Sparc.h
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.h
@@ -18,7 +18,6 @@
#include "MCTargetDesc/SparcMCTargetDesc.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
-#include <cassert>
namespace llvm {
class FunctionPass;
@@ -74,7 +73,6 @@ namespace llvm {
inline static const char *SPARCCondCodeToString(SPCC::CondCodes CC) {
switch (CC) {
- default: llvm_unreachable("Unknown condition code");
case SPCC::ICC_NE: return "ne";
case SPCC::ICC_E: return "e";
case SPCC::ICC_G: return "g";
@@ -103,7 +101,8 @@ namespace llvm {
case SPCC::FCC_LE: return "le";
case SPCC::FCC_ULE: return "ule";
case SPCC::FCC_O: return "o";
- }
+ }
+ llvm_unreachable("Invalid cond code");
}
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.td b/contrib/llvm/lib/Target/Sparc/Sparc.td
index 7643366..611f8e8 100644
--- a/contrib/llvm/lib/Target/Sparc/Sparc.td
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.td
@@ -1,10 +1,10 @@
-//===- Sparc.td - Describe the Sparc Target Machine --------*- tablegen -*-===//
-//
+//===-- Sparc.td - Describe the Sparc Target Machine -------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
//
diff --git a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index 345e1bc..c14b3d4 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -23,7 +23,6 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -63,6 +62,8 @@ namespace {
virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB)
const;
+
+ virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
};
} // end of anonymous namespace
@@ -82,7 +83,7 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
}
switch (MO.getType()) {
case MachineOperand::MO_Register:
- O << "%" << LowercaseString(getRegisterName(MO.getReg()));
+ O << "%" << StringRef(getRegisterName(MO.getReg())).lower();
break;
case MachineOperand::MO_Immediate:
@@ -141,13 +142,13 @@ bool SparcAsmPrinter::printGetPCX(const MachineInstr *MI, unsigned opNum,
std::string operand = "";
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
- default: assert(0 && "Operand is not a register ");
+ default: llvm_unreachable("Operand is not a register");
case MachineOperand::MO_Register:
assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
"Operand is not a physical register ");
assert(MO.getReg() != SP::O7 &&
"%o7 is assigned as destination for getpcx!");
- operand = "%" + LowercaseString(getRegisterName(MO.getReg()));
+ operand = "%" + StringRef(getRegisterName(MO.getReg())).lower();
break;
}
@@ -237,12 +238,19 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
// Check if the last terminator is an unconditional branch.
MachineBasicBlock::const_iterator I = Pred->end();
- while (I != Pred->begin() && !(--I)->getDesc().isTerminator())
+ while (I != Pred->begin() && !(--I)->isTerminator())
; // Noop
- return I == Pred->end() || !I->getDesc().isBarrier();
+ return I == Pred->end() || !I->isBarrier();
}
-
+MachineLocation SparcAsmPrinter::
+getDebugValueLocation(const MachineInstr *MI) const {
+ assert(MI->getNumOperands() == 4 && "Invalid number of operands!");
+ assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
+ "Unexpected MachineOperand types");
+ return MachineLocation(MI->getOperand(0).getReg(),
+ MI->getOperand(1).getImm());
+}
// Force static initialization.
extern "C" void LLVMInitializeSparcAsmPrinter() {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
index 856f87a..d471220 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
@@ -1,10 +1,10 @@
-//===- SparcCallingConv.td - Calling Conventions Sparc -----*- tablegen -*-===//
-//
+//===-- SparcCallingConv.td - Calling Conventions Sparc ----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This describes the calling conventions for the Sparc architectures.
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
index 320c8ca..1c5c89e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -1,4 +1,4 @@
-//====- SparcFrameLowering.cpp - Sparc Frame Information -------*- C++ -*-====//
+//===-- SparcFrameLowering.cpp - Sparc Frame Information ------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
index 9a2ddc8..210705e 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -1,4 +1,4 @@
-//===- SparcFrameLowering.h - Define frame lowering for Sparc --*- C++ -*--===//
+//===-- SparcFrameLowering.h - Define frame lowering for Sparc --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index 8c6103d..93710c4 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -176,7 +176,6 @@ SDNode *SparcDAGToDAGISel::Select(SDNode *N) {
MulLHS, MulRHS);
// The high part is in the Y register.
return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDValue(Mul, 1));
- return NULL;
}
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index d70b163..c3e6f16 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -25,7 +25,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -51,7 +50,7 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State)
{
- static const unsigned RegList[] = {
+ static const uint16_t RegList[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
//Try to get first reg
@@ -175,7 +174,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
InVals.push_back(Arg);
continue;
}
@@ -197,7 +196,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
} else {
unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
&SP::IntRegsRegClass);
@@ -237,7 +236,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
MachinePointerInfo(),
- false,false, 0);
+ false,false, false, 0);
InVals.push_back(Load);
continue;
}
@@ -248,7 +247,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy());
SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
int FI2 = MF.getFrameInfo()->CreateFixedObject(4,
Offset+4,
true);
@@ -256,7 +255,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
SDValue WholeValue =
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
@@ -273,7 +272,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
} else {
ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
// Sparc is big endian, so add an offset based on the ObjectVT.
@@ -302,11 +301,11 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
// Store remaining ArgRegs to the stack if this is a varargs function.
if (isVarArg) {
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
};
unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs, 6);
- const unsigned *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
+ const uint16_t *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
unsigned ArgOffset = CCInfo.getNextStackOffset();
if (NumAllocated == 6)
ArgOffset += StackOffset;
@@ -348,7 +347,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue
SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -467,13 +466,13 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
false, false, 0);
// Sparc is big-endian, so the high part comes first.
SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
// Increment the pointer to the other half.
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getIntPtrConstant(4));
// Load the low part.
SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi));
@@ -763,7 +762,9 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FMA , MVT::f32, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::CTLZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
@@ -831,22 +832,19 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
/// be zero. Op is expected to be a target specific node. Used by DAG
/// combiner.
void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
APInt KnownZero2, KnownOne2;
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
+ KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
case SPISD::SELECT_ICC:
case SPISD::SELECT_FCC:
- DAG.ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne,
- Depth+1);
- DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2,
- Depth+1);
+ DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
@@ -897,7 +895,7 @@ SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, MachinePointerInfo(), false, false, 0);
+ AbsAddr, MachinePointerInfo(), false, false, false, 0);
}
SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
@@ -918,7 +916,7 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- AbsAddr, MachinePointerInfo(), false, false, 0);
+ AbsAddr, MachinePointerInfo(), false, false, false, 0);
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
@@ -1026,7 +1024,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
DebugLoc dl = Node->getDebugLoc();
SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr,
- MachinePointerInfo(SV), false, false, 0);
+ MachinePointerInfo(SV), false, false, false, 0);
// Increment the pointer, VAList, to the next vaarg
SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
DAG.getConstant(VT.getSizeInBits()/8,
@@ -1038,11 +1036,11 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
// f64 load.
if (VT != MVT::f64)
return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Otherwise, load it as i64, then do a bitconvert.
SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Bit-Convert the value to f64.
SDValue Ops[2] = {
@@ -1103,7 +1101,7 @@ static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
FrameAddr = DAG.getLoad(MVT::i32, dl,
Chain,
Ptr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
}
return FrameAddr;
@@ -1135,7 +1133,7 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
RetAddr = DAG.getLoad(MVT::i32, dl,
Chain,
Ptr,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
}
return RetAddr;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index 8a1886a..cf43048 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -15,8 +15,8 @@
#ifndef SPARC_ISELLOWERING_H
#define SPARC_ISELLOWERING_H
-#include "llvm/Target/TargetLowering.h"
#include "Sparc.h"
+#include "llvm/Target/TargetLowering.h"
namespace llvm {
namespace SPISD {
@@ -50,7 +50,6 @@ namespace llvm {
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -77,9 +76,8 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
index 6535259..dce3312 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
@@ -1,10 +1,10 @@
-//===- SparcInstrFormats.td - Sparc Instruction Formats ----*- tablegen -*-===//
-//
+//===-- SparcInstrFormats.td - Sparc Instruction Formats ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern> : Instruction {
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 7a6bf50..faff468 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- SparcInstrInfo.cpp - Sparc Instruction Information -------*- C++ -*-===//
+//===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -79,7 +79,6 @@ static bool IsIntegerCC(unsigned CC)
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
{
switch(CC) {
- default: llvm_unreachable("Unknown condition code");
case SPCC::ICC_NE: return SPCC::ICC_E;
case SPCC::ICC_E: return SPCC::ICC_NE;
case SPCC::ICC_G: return SPCC::ICC_LE;
@@ -110,6 +109,18 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
case SPCC::FCC_NE: return SPCC::FCC_E;
case SPCC::FCC_E: return SPCC::FCC_NE;
}
+ llvm_unreachable("Invalid cond code");
+}
+
+MachineInstr *
+SparcInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx,
+ uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc dl) const {
+ MachineInstrBuilder MIB = BuildMI(MF, dl, get(SP::DBG_VALUE))
+ .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
+ return &*MIB;
}
@@ -133,7 +144,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
break;
//Terminator is not a branch
- if (!I->getDesc().isBranch())
+ if (!I->isBranch())
return true;
//Handle Unconditional branches
@@ -195,7 +206,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
.addMBB(UnCondBrIter->getOperand(0).getMBB()).addImm(BranchCode);
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(SP::BA))
.addMBB(TargetBB);
- MBB.addSuccessor(TargetBB);
+
OldInst->eraseFromParent();
UnCondBrIter->eraseFromParent();
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
index eda64ef..204f698 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -1,4 +1,4 @@
-//===- SparcInstrInfo.h - Sparc Instruction Information ---------*- C++ -*-===//
+//===-- SparcInstrInfo.h - Sparc Instruction Information --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,8 +14,8 @@
#ifndef SPARCINSTRUCTIONINFO_H
#define SPARCINSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "SparcRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "SparcGenInstrInfo.inc"
@@ -62,6 +62,13 @@ public:
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
+ /// emitFrameIndexDebugValue - Emit a target-dependent form of
+ /// DBG_VALUE encoding the address of a frame index.
+ virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx,
+ uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc dl) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index cf5c48f..15541ef 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -1,10 +1,10 @@
-//===- SparcInstrInfo.td - Target Description for Sparc Target ------------===//
-//
+//===-- SparcInstrInfo.td - Target Description for Sparc Target -----------===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the Sparc instructions in TableGen format.
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp
new file mode 100644
index 0000000..e744282
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- SparcMachineFunctionInfo.cpp - Sparc Machine Function Info --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void SparcMachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
index 0b74308..90c27a4 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
@@ -18,6 +18,7 @@
namespace llvm {
class SparcMachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
private:
unsigned GlobalBaseReg;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
index 8c16251..6357468 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- SparcRegisterInfo.cpp - SPARC Register Information -------*- C++ -*-===//
+//===-- SparcRegisterInfo.cpp - SPARC Register Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,15 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "Sparc.h"
#include "SparcRegisterInfo.h"
+#include "Sparc.h"
#include "SparcSubtarget.h"
+#include "llvm/Type.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Type.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
@@ -33,9 +33,9 @@ SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st,
: SparcGenRegisterInfo(SP::I7), Subtarget(st), TII(tii) {
}
-const unsigned* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
+const uint16_t* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
- static const unsigned CalleeSavedRegs[] = { 0 };
+ static const uint16_t CalleeSavedRegs[] = { 0 };
return CalleeSavedRegs;
}
@@ -118,10 +118,8 @@ unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
unsigned SparcRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
- return 0;
}
unsigned SparcRegisterInfo::getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
- return 0;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
index f845667..9515ad3 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- SparcRegisterInfo.h - Sparc Register Information Impl ----*- C++ -*-===//
+//===-- SparcRegisterInfo.h - Sparc Register Information Impl ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -32,7 +32,7 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
index cf92829..81bff6c 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
@@ -1,10 +1,10 @@
-//===- SparcRegisterInfo.td - Sparc Register defs ----------*- tablegen -*-===//
-//
+//===-- SparcRegisterInfo.td - Sparc Register defs ---------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -39,6 +39,7 @@ class Rd<bits<5> num, string n, list<Register> subregs> : SparcReg<n> {
let Num = num;
let SubRegs = subregs;
let SubRegIndices = [sub_even, sub_odd];
+ let CoveredBySubRegs = 1;
}
// Control Registers
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
index 6c501cf..e5b2aeb 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- SparcSubtarget.cpp - SPARC Subtarget Information -------------------===//
+//===-- SparcSubtarget.cpp - SPARC Subtarget Information ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,6 +21,8 @@
using namespace llvm;
+void SparcSubtarget::anchor() { }
+
SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
const std::string &FS, bool is64Bit) :
SparcGenSubtargetInfo(TT, CPU, FS),
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
index 00a04c3..a81931b 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
@@ -1,4 +1,4 @@
-//=====-- SparcSubtarget.h - Define Subtarget for the SPARC ----*- C++ -*-====//
+//===-- SparcSubtarget.h - Define Subtarget for the SPARC -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,6 +24,7 @@ namespace llvm {
class StringRef;
class SparcSubtarget : public SparcGenSubtargetInfo {
+ virtual void anchor();
bool IsV9;
bool V8DeprecatedInsts;
bool IsVIS;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 3d7b4a4..6f31356 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -10,9 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Sparc.h"
#include "SparcTargetMachine.h"
+#include "Sparc.h"
#include "llvm/PassManager.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -24,43 +25,73 @@ extern "C" void LLVMInitializeSparcTarget() {
/// SparcTargetMachine ctor - Create an ILP32 architecture model
///
-SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
+SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64bit)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64bit),
DataLayout(Subtarget.getDataLayout()),
TLInfo(*this), TSInfo(*this), InstrInfo(Subtarget),
FrameLowering(Subtarget) {
}
-bool SparcTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createSparcISelDag(*this));
+namespace {
+/// Sparc Code Generator Pass Configuration Options.
+class SparcPassConfig : public TargetPassConfig {
+public:
+ SparcPassConfig(SparcTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ SparcTargetMachine &getSparcTargetMachine() const {
+ return getTM<SparcTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new SparcPassConfig(this, PM);
+}
+
+bool SparcPassConfig::addInstSelector() {
+ PM.add(createSparcISelDag(getSparcTargetMachine()));
return false;
}
/// addPreEmitPass - This pass may be implemented by targets that want to run
/// passes immediately before machine code is emitted. This should return
/// true if -print-machineinstrs should print out the code after the passes.
-bool SparcTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel){
- PM.add(createSparcFPMoverPass(*this));
- PM.add(createSparcDelaySlotFillerPass(*this));
+bool SparcPassConfig::addPreEmitPass(){
+ PM.add(createSparcFPMoverPass(getSparcTargetMachine()));
+ PM.add(createSparcDelaySlotFillerPass(getSparcTargetMachine()));
return true;
}
+void SparcV8TargetMachine::anchor() { }
+
SparcV8TargetMachine::SparcV8TargetMachine(const Target &T,
StringRef TT, StringRef CPU,
- StringRef FS, Reloc::Model RM,
- CodeModel::Model CM)
- : SparcTargetMachine(T, TT, CPU, FS, RM, CM, false) {
+ StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
}
-SparcV9TargetMachine::SparcV9TargetMachine(const Target &T,
+void SparcV9TargetMachine::anchor() { }
+
+SparcV9TargetMachine::SparcV9TargetMachine(const Target &T,
StringRef TT, StringRef CPU,
- StringRef FS, Reloc::Model RM,
- CodeModel::Model CM)
- : SparcTargetMachine(T, TT, CPU, FS, RM, CM, true) {
+ StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
index 3c907dd..b203dfa 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -34,8 +34,9 @@ class SparcTargetMachine : public LLVMTargetMachine {
SparcFrameLowering FrameLowering;
public:
SparcTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM, bool is64bit);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL, bool is64bit);
virtual const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetFrameLowering *getFrameLowering() const {
@@ -54,26 +55,31 @@ public:
virtual const TargetData *getTargetData() const { return &DataLayout; }
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
};
/// SparcV8TargetMachine - Sparc 32-bit target machine
///
class SparcV8TargetMachine : public SparcTargetMachine {
+ virtual void anchor();
public:
SparcV8TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// SparcV9TargetMachine - Sparc 64-bit target machine
///
class SparcV9TargetMachine : public SparcTargetMachine {
+ virtual void anchor();
public:
SparcV9TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
deleted file mode 100644
index 8540546..0000000
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- SystemZMCAsmInfo.cpp - SystemZ asm properties ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the SystemZMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZMCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Support/ELF.h"
-using namespace llvm;
-
-SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
- IsLittleEndian = false;
- PointerSize = 8;
- PrivateGlobalPrefix = ".L";
- WeakRefDirective = "\t.weak\t";
- PCSymbol = ".";
-}
-
-const MCSection *SystemZMCAsmInfo::
-getNonexecutableStackSection(MCContext &Ctx) const{
- return Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS,
- 0, SectionKind::getMetadata());
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
deleted file mode 100644
index a6a27e2..0000000
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//====-- SystemZMCAsmInfo.h - SystemZ asm properties -----------*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the SystemZMCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SystemZTARGETASMINFO_H
-#define SystemZTARGETASMINFO_H
-
-#include "llvm/MC/MCAsmInfo.h"
-
-namespace llvm {
- class Target;
- class StringRef;
-
- struct SystemZMCAsmInfo : public MCAsmInfo {
- explicit SystemZMCAsmInfo(const Target &T, StringRef TT);
- virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
- };
-
-} // namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
deleted file mode 100644
index 23fb1e0..0000000
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- SystemZMCTargetDesc.cpp - SystemZ Target Descriptions ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides SystemZ specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZMCTargetDesc.h"
-#include "SystemZMCAsmInfo.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_MC_DESC
-#include "SystemZGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "SystemZGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "SystemZGenRegisterInfo.inc"
-
-using namespace llvm;
-
-static MCInstrInfo *createSystemZMCInstrInfo() {
- MCInstrInfo *X = new MCInstrInfo();
- InitSystemZMCInstrInfo(X);
- return X;
-}
-
-static MCRegisterInfo *createSystemZMCRegisterInfo(StringRef TT) {
- MCRegisterInfo *X = new MCRegisterInfo();
- InitSystemZMCRegisterInfo(X, 0);
- return X;
-}
-
-static MCSubtargetInfo *createSystemZMCSubtargetInfo(StringRef TT,
- StringRef CPU,
- StringRef FS) {
- MCSubtargetInfo *X = new MCSubtargetInfo();
- InitSystemZMCSubtargetInfo(X, TT, CPU, FS);
- return X;
-}
-
-static MCCodeGenInfo *createSystemZMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
- if (RM == Reloc::Default)
- RM = Reloc::Static;
- X->InitMCCodeGenInfo(RM, CM);
- return X;
-}
-
-extern "C" void LLVMInitializeSystemZTargetMC() {
- // Register the MC asm info.
- RegisterMCAsmInfo<SystemZMCAsmInfo> X(TheSystemZTarget);
-
- // Register the MC codegen info.
- TargetRegistry::RegisterMCCodeGenInfo(TheSystemZTarget,
- createSystemZMCCodeGenInfo);
-
- // Register the MC instruction info.
- TargetRegistry::RegisterMCInstrInfo(TheSystemZTarget,
- createSystemZMCInstrInfo);
-
- // Register the MC register info.
- TargetRegistry::RegisterMCRegInfo(TheSystemZTarget,
- createSystemZMCRegisterInfo);
-
- // Register the MC subtarget info.
- TargetRegistry::RegisterMCSubtargetInfo(TheSystemZTarget,
- createSystemZMCSubtargetInfo);
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
deleted file mode 100644
index e2ad5af..0000000
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- SystemZMCTargetDesc.h - SystemZ Target Descriptions -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides SystemZ specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SYSTEMZMCTARGETDESC_H
-#define SYSTEMZMCTARGETDESC_H
-
-namespace llvm {
-class MCSubtargetInfo;
-class Target;
-class StringRef;
-
-extern Target TheSystemZTarget;
-
-} // End llvm namespace
-
-// Defines symbolic names for SystemZ registers.
-// This defines a mapping from register name to register number.
-#define GET_REGINFO_ENUM
-#include "SystemZGenRegisterInfo.inc"
-
-// Defines symbolic names for the SystemZ instructions.
-#define GET_INSTRINFO_ENUM
-#include "SystemZGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "SystemZGenSubtargetInfo.inc"
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.h b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
deleted file mode 100644
index 88960b9..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//=-- SystemZ.h - Top-level interface for SystemZ representation -*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in
-// the LLVM SystemZ backend.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_SystemZ_H
-#define LLVM_TARGET_SystemZ_H
-
-#include "MCTargetDesc/SystemZMCTargetDesc.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class SystemZTargetMachine;
- class FunctionPass;
- class formatted_raw_ostream;
-
- namespace SystemZCC {
- // SystemZ specific condition code. These correspond to SYSTEMZ_*_COND in
- // SystemZInstrInfo.td. They must be kept in synch.
- enum CondCodes {
- O = 0,
- H = 1,
- NLE = 2,
- L = 3,
- NHE = 4,
- LH = 5,
- NE = 6,
- E = 7,
- NLH = 8,
- HE = 9,
- NL = 10,
- LE = 11,
- NH = 12,
- NO = 13,
- INVALID = -1
- };
- }
-
- FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
-} // end namespace llvm;
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.td b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
deleted file mode 100644
index 4c08c08..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.td
+++ /dev/null
@@ -1,61 +0,0 @@
-//===- SystemZ.td - Describe the SystemZ Target Machine ------*- tblgen -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This is the top level entry point for the SystemZ target.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// Subtarget Features.
-//===----------------------------------------------------------------------===//
-def FeatureZ10 : SubtargetFeature<"z10", "HasZ10Insts", "true",
- "Support Z10 instructions">;
-
-//===----------------------------------------------------------------------===//
-// SystemZ supported processors.
-//===----------------------------------------------------------------------===//
-class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, NoItineraries, Features>;
-
-def : Proc<"z9", []>;
-def : Proc<"z10", [FeatureZ10]>;
-
-//===----------------------------------------------------------------------===//
-// Register File Description
-//===----------------------------------------------------------------------===//
-
-include "SystemZRegisterInfo.td"
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Description
-//===----------------------------------------------------------------------===//
-
-include "SystemZCallingConv.td"
-
-//===----------------------------------------------------------------------===//
-// Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "SystemZInstrInfo.td"
-include "SystemZInstrFP.td"
-
-def SystemZInstrInfo : InstrInfo {}
-
-//===----------------------------------------------------------------------===//
-// Target Declaration
-//===----------------------------------------------------------------------===//
-
-def SystemZ : Target {
- let InstructionSet = SystemZInstrInfo;
-}
-
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
deleted file mode 100644
index 43dcdfc..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-//===-- SystemZAsmPrinter.cpp - SystemZ LLVM assembly writer ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to the SystemZ assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "SystemZ.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
- class SystemZAsmPrinter : public AsmPrinter {
- public:
- SystemZAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {}
-
- virtual const char *getPassName() const {
- return "SystemZ Assembly Printer";
- }
-
- void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char* Modifier = 0);
- void printPCRelImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O);
- void printRIAddrOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char* Modifier = 0);
- void printRRIAddrOperand(const MachineInstr *MI, int OpNum, raw_ostream &O,
- const char* Modifier = 0);
- void printS16ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
- O << (int16_t)MI->getOperand(OpNum).getImm();
- }
- void printU16ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
- O << (uint16_t)MI->getOperand(OpNum).getImm();
- }
- void printS32ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
- O << (int32_t)MI->getOperand(OpNum).getImm();
- }
- void printU32ImmOperand(const MachineInstr *MI, int OpNum, raw_ostream &O) {
- O << (uint32_t)MI->getOperand(OpNum).getImm();
- }
-
- void printInstruction(const MachineInstr *MI, raw_ostream &O);
- static const char *getRegisterName(unsigned RegNo);
-
- void EmitInstruction(const MachineInstr *MI);
- };
-} // end of anonymous namespace
-
-#include "SystemZGenAsmWriter.inc"
-
-void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- SmallString<128> Str;
- raw_svector_ostream OS(Str);
- printInstruction(MI, OS);
- OutStreamer.EmitRawText(OS.str());
-}
-
-void SystemZAsmPrinter::printPCRelImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- switch (MO.getType()) {
- case MachineOperand::MO_Immediate:
- O << MO.getImm();
- return;
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
- case MachineOperand::MO_GlobalAddress: {
- const GlobalValue *GV = MO.getGlobal();
- O << *Mang->getSymbol(GV);
-
- // Assemble calls via PLT for externally visible symbols if PIC.
- if (TM.getRelocationModel() == Reloc::PIC_ &&
- !GV->hasHiddenVisibility() && !GV->hasProtectedVisibility() &&
- !GV->hasLocalLinkage())
- O << "@PLT";
-
- printOffset(MO.getOffset(), O);
- return;
- }
- case MachineOperand::MO_ExternalSymbol: {
- std::string Name(MAI->getGlobalPrefix());
- Name += MO.getSymbolName();
- O << Name;
-
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "@PLT";
-
- return;
- }
- default:
- assert(0 && "Not implemented yet!");
- }
-}
-
-
-void SystemZAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O, const char *Modifier) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- switch (MO.getType()) {
- case MachineOperand::MO_Register: {
- assert (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
- "Virtual registers should be already mapped!");
- unsigned Reg = MO.getReg();
- if (Modifier && strncmp(Modifier, "subreg", 6) == 0) {
- if (strncmp(Modifier + 7, "even", 4) == 0)
- Reg = TM.getRegisterInfo()->getSubReg(Reg, SystemZ::subreg_32bit);
- else if (strncmp(Modifier + 7, "odd", 3) == 0)
- Reg = TM.getRegisterInfo()->getSubReg(Reg, SystemZ::subreg_odd32);
- else
- assert(0 && "Invalid subreg modifier");
- }
-
- O << '%' << getRegisterName(Reg);
- return;
- }
- case MachineOperand::MO_Immediate:
- O << MO.getImm();
- return;
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol();
- return;
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_'
- << MO.getIndex();
-
- return;
- case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
- << MO.getIndex();
-
- printOffset(MO.getOffset(), O);
- break;
- case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
- break;
- case MachineOperand::MO_ExternalSymbol: {
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- break;
- }
- default:
- assert(0 && "Not implemented yet!");
- }
-
- switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
- case SystemZII::MO_NO_FLAG:
- break;
- case SystemZII::MO_GOTENT: O << "@GOTENT"; break;
- case SystemZII::MO_PLT: O << "@PLT"; break;
- }
-
- printOffset(MO.getOffset(), O);
-}
-
-void SystemZAsmPrinter::printRIAddrOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O,
- const char *Modifier) {
- const MachineOperand &Base = MI->getOperand(OpNum);
-
- // Print displacement operand.
- printOperand(MI, OpNum+1, O);
-
- // Print base operand (if any)
- if (Base.getReg()) {
- O << '(';
- printOperand(MI, OpNum, O);
- O << ')';
- }
-}
-
-void SystemZAsmPrinter::printRRIAddrOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O,
- const char *Modifier) {
- const MachineOperand &Base = MI->getOperand(OpNum);
- const MachineOperand &Index = MI->getOperand(OpNum+2);
-
- // Print displacement operand.
- printOperand(MI, OpNum+1, O);
-
- // Print base operand (if any)
- if (Base.getReg()) {
- O << '(';
- printOperand(MI, OpNum, O);
- if (Index.getReg()) {
- O << ',';
- printOperand(MI, OpNum+2, O);
- }
- O << ')';
- } else
- assert(!Index.getReg() && "Should allocate base register first!");
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeSystemZAsmPrinter() {
- RegisterAsmPrinter<SystemZAsmPrinter> X(TheSystemZTarget);
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
deleted file mode 100644
index c799a9e..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
+++ /dev/null
@@ -1,46 +0,0 @@
-//=- SystemZCallingConv.td - Calling Conventions for SystemZ -*- tablegen -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for SystemZ architecture.
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// SystemZ Return Value Calling Convention
-//===----------------------------------------------------------------------===//
-def RetCC_SystemZ : CallingConv<[
- // Promote i8/i16/i32 arguments to i64.
- CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
-
- // i64 is returned in register R2
- CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D]>>,
-
- // f32 / f64 are returned in F0
- CCIfType<[f32], CCAssignToReg<[F0S, F2S, F4S, F6S]>>,
- CCIfType<[f64], CCAssignToReg<[F0L, F2L, F4L, F6L]>>
-]>;
-
-//===----------------------------------------------------------------------===//
-// SystemZ Argument Calling Conventions
-//===----------------------------------------------------------------------===//
-def CC_SystemZ : CallingConv<[
- // Promote i8/i16/i32 arguments to i64.
- CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
-
- // The first 5 integer arguments of non-varargs functions are passed in
- // integer registers.
- CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D, R6D]>>,
-
- // The first 4 floating point arguments of non-varargs functions are passed
- // in FP registers.
- CCIfType<[f32], CCAssignToReg<[F0S, F2S, F4S, F6S]>>,
- CCIfType<[f64], CCAssignToReg<[F0L, F2L, F4L, F6L]>>,
-
- // Integer values get stored in stack slots that are 8 bytes in
- // size and 8-byte aligned.
- CCIfType<[i64, f32, f64], CCAssignToStack<8, 8>>
-]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
deleted file mode 100644
index 2ad84a2..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-//=====- SystemZFrameLowering.cpp - SystemZ Frame Information ------*- C++ -*-====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the SystemZ implementation of TargetFrameLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZFrameLowering.h"
-#include "SystemZInstrBuilder.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZMachineFunctionInfo.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/CommandLine.h"
-
-using namespace llvm;
-
-SystemZFrameLowering::SystemZFrameLowering(const SystemZSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, -160), STI(sti) {
- // Fill the spill offsets map
- static const unsigned SpillOffsTab[][2] = {
- { SystemZ::R2D, 0x10 },
- { SystemZ::R3D, 0x18 },
- { SystemZ::R4D, 0x20 },
- { SystemZ::R5D, 0x28 },
- { SystemZ::R6D, 0x30 },
- { SystemZ::R7D, 0x38 },
- { SystemZ::R8D, 0x40 },
- { SystemZ::R9D, 0x48 },
- { SystemZ::R10D, 0x50 },
- { SystemZ::R11D, 0x58 },
- { SystemZ::R12D, 0x60 },
- { SystemZ::R13D, 0x68 },
- { SystemZ::R14D, 0x70 },
- { SystemZ::R15D, 0x78 }
- };
-
- RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
-
- for (unsigned i = 0, e = array_lengthof(SpillOffsTab); i != e; ++i)
- RegSpillOffsets[SpillOffsTab[i][0]] = SpillOffsTab[i][1];
-}
-
-/// needsFP - Return true if the specified function should have a dedicated
-/// frame pointer register. This is true if the function has variable sized
-/// allocas or if frame pointer elimination is disabled.
-bool SystemZFrameLowering::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects();
-}
-
-/// emitSPUpdate - Emit a series of instructions to increment / decrement the
-/// stack pointer by a constant value.
-static
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- int64_t NumBytes, const TargetInstrInfo &TII) {
- unsigned Opc; uint64_t Chunk;
- bool isSub = NumBytes < 0;
- uint64_t Offset = isSub ? -NumBytes : NumBytes;
-
- if (Offset >= (1LL << 15) - 1) {
- Opc = SystemZ::ADD64ri32;
- Chunk = (1LL << 31) - 1;
- } else {
- Opc = SystemZ::ADD64ri16;
- Chunk = (1LL << 15) - 1;
- }
-
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- while (Offset) {
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(Opc), SystemZ::R15D)
- .addReg(SystemZ::R15D).addImm(isSub ? -ThisVal : ThisVal);
- // The PSW implicit def is dead.
- MI->getOperand(3).setIsDead();
- Offset -= ThisVal;
- }
-}
-
-void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineFrameInfo *MFI = MF.getFrameInfo();
- const SystemZInstrInfo &TII =
- *static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
- SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
-
- // Get the number of bytes to allocate from the FrameInfo.
- // Note that area for callee-saved stuff is already allocated, thus we need to
- // 'undo' the stack movement.
- uint64_t StackSize = MFI->getStackSize();
- StackSize -= SystemZMFI->getCalleeSavedFrameSize();
-
- uint64_t NumBytes = StackSize - getOffsetOfLocalArea();
-
- // Skip the callee-saved push instructions.
- while (MBBI != MBB.end() &&
- (MBBI->getOpcode() == SystemZ::MOV64mr ||
- MBBI->getOpcode() == SystemZ::MOV64mrm))
- ++MBBI;
-
- if (MBBI != MBB.end())
- DL = MBBI->getDebugLoc();
-
- // adjust stack pointer: R15 -= numbytes
- if (StackSize || MFI->hasCalls()) {
- assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
- "Invalid stack frame calculation!");
- emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, TII);
- }
-
- if (hasFP(MF)) {
- // Update R11 with the new base value...
- BuildMI(MBB, MBBI, DL, TII.get(SystemZ::MOV64rr), SystemZ::R11D)
- .addReg(SystemZ::R15D);
-
- // Mark the FramePtr as live-in in every block except the entry.
- for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
- I != E; ++I)
- I->addLiveIn(SystemZ::R11D);
-
- }
-}
-
-void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- const SystemZInstrInfo &TII =
- *static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
- SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- unsigned RetOpcode = MBBI->getOpcode();
-
- switch (RetOpcode) {
- case SystemZ::RET: break; // These are ok
- default:
- assert(0 && "Can only insert epilog into returning blocks");
- }
-
- // Get the number of bytes to allocate from the FrameInfo
- // Note that area for callee-saved stuff is already allocated, thus we need to
- // 'undo' the stack movement.
- uint64_t StackSize =
- MFI->getStackSize() - SystemZMFI->getCalleeSavedFrameSize();
- uint64_t NumBytes = StackSize - getOffsetOfLocalArea();
-
- // Skip the final terminator instruction.
- while (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
- --MBBI;
- if (!PI->getDesc().isTerminator())
- break;
- }
-
- // During callee-saved restores emission stack frame was not yet finialized
- // (and thus - the stack size was unknown). Tune the offset having full stack
- // size in hands.
- if (StackSize || MFI->hasCalls()) {
- assert((MBBI->getOpcode() == SystemZ::MOV64rmm ||
- MBBI->getOpcode() == SystemZ::MOV64rm) &&
- "Expected to see callee-save register restore code");
- assert(MF.getRegInfo().isPhysRegUsed(SystemZ::R15D) &&
- "Invalid stack frame calculation!");
-
- unsigned i = 0;
- MachineInstr &MI = *MBBI;
- while (!MI.getOperand(i).isImm()) {
- ++i;
- assert(i < MI.getNumOperands() && "Unexpected restore code!");
- }
-
- uint64_t Offset = NumBytes + MI.getOperand(i).getImm();
- // If Offset does not fit into 20-bit signed displacement field we need to
- // emit some additional code...
- if (Offset > 524287) {
- // Fold the displacement into load instruction as much as possible.
- NumBytes = Offset - 524287;
- Offset = 524287;
- emitSPUpdate(MBB, MBBI, NumBytes, TII);
- }
-
- MI.getOperand(i).ChangeToImmediate(Offset);
- }
-}
-
-int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
- int FI) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const SystemZMachineFunctionInfo *SystemZMFI =
- MF.getInfo<SystemZMachineFunctionInfo>();
- int Offset = MFI->getObjectOffset(FI) + MFI->getOffsetAdjustment();
- uint64_t StackSize = MFI->getStackSize();
-
- // Fixed objects are really located in the "previous" frame.
- if (FI < 0)
- StackSize -= SystemZMFI->getCalleeSavedFrameSize();
-
- Offset += StackSize - getOffsetOfLocalArea();
-
- // Skip the register save area if we generated the stack frame.
- if (StackSize || MFI->hasCalls())
- Offset -= getOffsetOfLocalArea();
-
- return Offset;
-}
-
-bool
-SystemZFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
- SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
- unsigned CalleeFrameSize = 0;
-
- // Scan the callee-saved and find the bounds of register spill area.
- unsigned LowReg = 0, HighReg = 0, StartOffset = -1U, EndOffset = 0;
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (!SystemZ::FP64RegClass.contains(Reg)) {
- unsigned Offset = RegSpillOffsets[Reg];
- CalleeFrameSize += 8;
- if (StartOffset > Offset) {
- LowReg = Reg; StartOffset = Offset;
- }
- if (EndOffset < Offset) {
- HighReg = Reg; EndOffset = RegSpillOffsets[Reg];
- }
- }
- }
-
- // Save information for epilogue inserter.
- MFI->setCalleeSavedFrameSize(CalleeFrameSize);
- MFI->setLowReg(LowReg); MFI->setHighReg(HighReg);
-
- // Save GPRs
- if (StartOffset) {
- // Build a store instruction. Use STORE MULTIPLE instruction if there are many
- // registers to store, otherwise - just STORE.
- MachineInstrBuilder MIB =
- BuildMI(MBB, MI, DL, TII.get((LowReg == HighReg ?
- SystemZ::MOV64mr : SystemZ::MOV64mrm)));
-
- // Add store operands.
- MIB.addReg(SystemZ::R15D).addImm(StartOffset);
- if (LowReg == HighReg)
- MIB.addReg(0);
- MIB.addReg(LowReg, RegState::Kill);
- if (LowReg != HighReg)
- MIB.addReg(HighReg, RegState::Kill);
-
- // Do a second scan adding regs as being killed by instruction
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- if (Reg != LowReg && Reg != HighReg)
- MIB.addReg(Reg, RegState::ImplicitKill);
- }
- }
-
- // Save FPRs
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (SystemZ::FP64RegClass.contains(Reg)) {
- MBB.addLiveIn(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(),
- &SystemZ::FP64RegClass, TRI);
- }
- }
-
- return true;
-}
-
-bool
-SystemZFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineFunction &MF = *MBB.getParent();
- const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
- SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
-
- // Restore FP registers
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (SystemZ::FP64RegClass.contains(Reg))
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
- &SystemZ::FP64RegClass, TRI);
- }
-
- // Restore GP registers
- unsigned LowReg = MFI->getLowReg(), HighReg = MFI->getHighReg();
- unsigned StartOffset = RegSpillOffsets[LowReg];
-
- if (StartOffset) {
- // Build a load instruction. Use LOAD MULTIPLE instruction if there are many
- // registers to load, otherwise - just LOAD.
- MachineInstrBuilder MIB =
- BuildMI(MBB, MI, DL, TII.get((LowReg == HighReg ?
- SystemZ::MOV64rm : SystemZ::MOV64rmm)));
- // Add store operands.
- MIB.addReg(LowReg, RegState::Define);
- if (LowReg != HighReg)
- MIB.addReg(HighReg, RegState::Define);
-
- MIB.addReg(hasFP(MF) ? SystemZ::R11D : SystemZ::R15D);
- MIB.addImm(StartOffset);
- if (LowReg == HighReg)
- MIB.addReg(0);
-
- // Do a second scan adding regs as being defined by instruction
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- if (Reg != LowReg && Reg != HighReg)
- MIB.addReg(Reg, RegState::ImplicitDefine);
- }
- }
-
- return true;
-}
-
-void
-SystemZFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- // Determine whether R15/R14 will ever be clobbered inside the function. And
- // if yes - mark it as 'callee' saved.
- MachineFrameInfo *FFI = MF.getFrameInfo();
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- // Check whether high FPRs are ever used, if yes - we need to save R15 as
- // well.
- static const unsigned HighFPRs[] = {
- SystemZ::F8L, SystemZ::F9L, SystemZ::F10L, SystemZ::F11L,
- SystemZ::F12L, SystemZ::F13L, SystemZ::F14L, SystemZ::F15L,
- SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
- SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S,
- };
-
- bool HighFPRsUsed = false;
- for (unsigned i = 0, e = array_lengthof(HighFPRs); i != e; ++i)
- HighFPRsUsed |= MRI.isPhysRegUsed(HighFPRs[i]);
-
- if (FFI->hasCalls())
- /* FIXME: function is varargs */
- /* FIXME: function grabs RA */
- /* FIXME: function calls eh_return */
- MRI.setPhysRegUsed(SystemZ::R14D);
-
- if (HighFPRsUsed ||
- FFI->hasCalls() ||
- FFI->getObjectIndexEnd() != 0 || // Contains automatic variables
- FFI->hasVarSizedObjects() // Function calls dynamic alloca's
- /* FIXME: function is varargs */)
- MRI.setPhysRegUsed(SystemZ::R15D);
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
deleted file mode 100644
index 1284b68..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//=- SystemZFrameLowering.h - Define frame lowering for z/System -*- C++ -*--=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SYSTEMZ_FRAMEINFO_H
-#define SYSTEMZ_FRAMEINFO_H
-
-#include "SystemZ.h"
-#include "SystemZSubtarget.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/ADT/IndexedMap.h"
-
-namespace llvm {
- class SystemZSubtarget;
-
-class SystemZFrameLowering : public TargetFrameLowering {
- IndexedMap<unsigned> RegSpillOffsets;
-protected:
- const SystemZSubtarget &STI;
-
-public:
- explicit SystemZFrameLowering(const SystemZSubtarget &sti);
-
- /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
- /// the function.
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
- bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
- bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const;
-
- bool hasReservedCallFrame(const MachineFunction &MF) const { return true; }
- bool hasFP(const MachineFunction &MF) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
deleted file mode 100644
index 2186ff1..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ /dev/null
@@ -1,779 +0,0 @@
-//==-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the SystemZ target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZ.h"
-#include "SystemZTargetMachine.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
- /// SystemZRRIAddressMode - This corresponds to rriaddr, but uses SDValue's
- /// instead of register numbers for the leaves of the matched tree.
- struct SystemZRRIAddressMode {
- enum {
- RegBase,
- FrameIndexBase
- } BaseType;
-
- struct { // This is really a union, discriminated by BaseType!
- SDValue Reg;
- int FrameIndex;
- } Base;
-
- SDValue IndexReg;
- int64_t Disp;
- bool isRI;
-
- SystemZRRIAddressMode(bool RI = false)
- : BaseType(RegBase), IndexReg(), Disp(0), isRI(RI) {
- }
-
- void dump() {
- errs() << "SystemZRRIAddressMode " << this << '\n';
- if (BaseType == RegBase) {
- errs() << "Base.Reg ";
- if (Base.Reg.getNode() != 0)
- Base.Reg.getNode()->dump();
- else
- errs() << "nul";
- errs() << '\n';
- } else {
- errs() << " Base.FrameIndex " << Base.FrameIndex << '\n';
- }
- if (!isRI) {
- errs() << "IndexReg ";
- if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
- else errs() << "nul";
- }
- errs() << " Disp " << Disp << '\n';
- }
- };
-}
-
-/// SystemZDAGToDAGISel - SystemZ specific code to select SystemZ machine
-/// instructions for SelectionDAG operations.
-///
-namespace {
- class SystemZDAGToDAGISel : public SelectionDAGISel {
- const SystemZTargetLowering &Lowering;
- const SystemZSubtarget &Subtarget;
-
- void getAddressOperandsRI(const SystemZRRIAddressMode &AM,
- SDValue &Base, SDValue &Disp);
- void getAddressOperands(const SystemZRRIAddressMode &AM,
- SDValue &Base, SDValue &Disp,
- SDValue &Index);
-
- public:
- SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(TM, OptLevel),
- Lowering(*TM.getTargetLowering()),
- Subtarget(*TM.getSubtargetImpl()) { }
-
- virtual const char *getPassName() const {
- return "SystemZ DAG->DAG Pattern Instruction Selection";
- }
-
- /// getI8Imm - Return a target constant with the specified value, of type
- /// i8.
- inline SDValue getI8Imm(uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i8);
- }
-
- /// getI16Imm - Return a target constant with the specified value, of type
- /// i16.
- inline SDValue getI16Imm(uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i16);
- }
-
- /// getI32Imm - Return a target constant with the specified value, of type
- /// i32.
- inline SDValue getI32Imm(uint64_t Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
- }
-
- // Include the pieces autogenerated from the target description.
- #include "SystemZGenDAGISel.inc"
-
- private:
- bool SelectAddrRI12Only(SDValue& Addr,
- SDValue &Base, SDValue &Disp);
- bool SelectAddrRI12(SDValue& Addr,
- SDValue &Base, SDValue &Disp,
- bool is12BitOnly = false);
- bool SelectAddrRI(SDValue& Addr, SDValue &Base, SDValue &Disp);
- bool SelectAddrRRI12(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index);
- bool SelectAddrRRI20(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index);
- bool SelectLAAddr(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index);
-
- SDNode *Select(SDNode *Node);
-
- bool TryFoldLoad(SDNode *P, SDValue N,
- SDValue &Base, SDValue &Disp, SDValue &Index);
-
- bool MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
- bool is12Bit, unsigned Depth = 0);
- bool MatchAddressBase(SDValue N, SystemZRRIAddressMode &AM);
- };
-} // end anonymous namespace
-
-/// createSystemZISelDag - This pass converts a legalized DAG into a
-/// SystemZ-specific DAG, ready for instruction scheduling.
-///
-FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new SystemZDAGToDAGISel(TM, OptLevel);
-}
-
-/// isImmSExt20 - This method tests to see if the node is either a 32-bit
-/// or 64-bit immediate, and if the value can be accurately represented as a
-/// sign extension from a 20-bit value. If so, this returns true and the
-/// immediate.
-static bool isImmSExt20(int64_t Val, int64_t &Imm) {
- if (Val >= -524288 && Val <= 524287) {
- Imm = Val;
- return true;
- }
- return false;
-}
-
-/// isImmZExt12 - This method tests to see if the node is either a 32-bit
-/// or 64-bit immediate, and if the value can be accurately represented as a
-/// zero extension from a 12-bit value. If so, this returns true and the
-/// immediate.
-static bool isImmZExt12(int64_t Val, int64_t &Imm) {
- if (Val >= 0 && Val <= 0xFFF) {
- Imm = Val;
- return true;
- }
- return false;
-}
-
-/// MatchAddress - Add the specified node to the specified addressing mode,
-/// returning true if it cannot be done. This just pattern matches for the
-/// addressing mode.
-bool SystemZDAGToDAGISel::MatchAddress(SDValue N, SystemZRRIAddressMode &AM,
- bool is12Bit, unsigned Depth) {
- DebugLoc dl = N.getDebugLoc();
- DEBUG(errs() << "MatchAddress: "; AM.dump());
- // Limit recursion.
- if (Depth > 5)
- return MatchAddressBase(N, AM);
-
- // FIXME: We can perform better here. If we have something like
- // (shift (add A, imm), N), we can try to reassociate stuff and fold shift of
- // imm into addressing mode.
- switch (N.getOpcode()) {
- default: break;
- case ISD::Constant: {
- int64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
- int64_t Imm = 0;
- bool Match = (is12Bit ?
- isImmZExt12(AM.Disp + Val, Imm) :
- isImmSExt20(AM.Disp + Val, Imm));
- if (Match) {
- AM.Disp = Imm;
- return false;
- }
- break;
- }
-
- case ISD::FrameIndex:
- if (AM.BaseType == SystemZRRIAddressMode::RegBase &&
- AM.Base.Reg.getNode() == 0) {
- AM.BaseType = SystemZRRIAddressMode::FrameIndexBase;
- AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
- return false;
- }
- break;
-
- case ISD::SUB: {
- // Given A-B, if A can be completely folded into the address and
- // the index field with the index field unused, use -B as the index.
- // This is a win if a has multiple parts that can be folded into
- // the address. Also, this saves a mov if the base register has
- // other uses, since it avoids a two-address sub instruction, however
- // it costs an additional mov if the index register has other uses.
-
- // Test if the LHS of the sub can be folded.
- SystemZRRIAddressMode Backup = AM;
- if (MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1)) {
- AM = Backup;
- break;
- }
- // Test if the index field is free for use.
- if (AM.IndexReg.getNode() || AM.isRI) {
- AM = Backup;
- break;
- }
-
- // If the base is a register with multiple uses, this transformation may
- // save a mov. Otherwise it's probably better not to do it.
- if (AM.BaseType == SystemZRRIAddressMode::RegBase &&
- (!AM.Base.Reg.getNode() || AM.Base.Reg.getNode()->hasOneUse())) {
- AM = Backup;
- break;
- }
-
- // Ok, the transformation is legal and appears profitable. Go for it.
- SDValue RHS = N.getNode()->getOperand(1);
- SDValue Zero = CurDAG->getConstant(0, N.getValueType());
- SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
- AM.IndexReg = Neg;
-
- // Insert the new nodes into the topological ordering.
- if (Zero.getNode()->getNodeId() == -1 ||
- Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), Zero.getNode());
- Zero.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- if (Neg.getNode()->getNodeId() == -1 ||
- Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), Neg.getNode());
- Neg.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- return false;
- }
-
- case ISD::ADD: {
- SystemZRRIAddressMode Backup = AM;
- if (!MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1) &&
- !MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1))
- return false;
- AM = Backup;
- if (!MatchAddress(N.getNode()->getOperand(1), AM, is12Bit, Depth+1) &&
- !MatchAddress(N.getNode()->getOperand(0), AM, is12Bit, Depth+1))
- return false;
- AM = Backup;
-
- // If we couldn't fold both operands into the address at the same time,
- // see if we can just put each operand into a register and fold at least
- // the add.
- if (!AM.isRI &&
- AM.BaseType == SystemZRRIAddressMode::RegBase &&
- !AM.Base.Reg.getNode() && !AM.IndexReg.getNode()) {
- AM.Base.Reg = N.getNode()->getOperand(0);
- AM.IndexReg = N.getNode()->getOperand(1);
- return false;
- }
- break;
- }
-
- case ISD::OR:
- // Handle "X | C" as "X + C" iff X is known to have C bits clear.
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- SystemZRRIAddressMode Backup = AM;
- int64_t Offset = CN->getSExtValue();
- int64_t Imm = 0;
- bool MatchOffset = (is12Bit ?
- isImmZExt12(AM.Disp + Offset, Imm) :
- isImmSExt20(AM.Disp + Offset, Imm));
- // The resultant disp must fit in 12 or 20-bits.
- if (MatchOffset &&
- // LHS should be an addr mode.
- !MatchAddress(N.getOperand(0), AM, is12Bit, Depth+1) &&
- // Check to see if the LHS & C is zero.
- CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
- AM.Disp = Imm;
- return false;
- }
- AM = Backup;
- }
- break;
- }
-
- return MatchAddressBase(N, AM);
-}
-
-/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
-/// specified addressing mode without any further recursion.
-bool SystemZDAGToDAGISel::MatchAddressBase(SDValue N,
- SystemZRRIAddressMode &AM) {
- // Is the base register already occupied?
- if (AM.BaseType != SystemZRRIAddressMode::RegBase || AM.Base.Reg.getNode()) {
- // If so, check to see if the index register is set.
- if (AM.IndexReg.getNode() == 0 && !AM.isRI) {
- AM.IndexReg = N;
- return false;
- }
-
- // Otherwise, we cannot select it.
- return true;
- }
-
- // Default, generate it as a register.
- AM.BaseType = SystemZRRIAddressMode::RegBase;
- AM.Base.Reg = N;
- return false;
-}
-
-void SystemZDAGToDAGISel::getAddressOperandsRI(const SystemZRRIAddressMode &AM,
- SDValue &Base, SDValue &Disp) {
- if (AM.BaseType == SystemZRRIAddressMode::RegBase)
- Base = AM.Base.Reg;
- else
- Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy());
- Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
-}
-
-void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM,
- SDValue &Base, SDValue &Disp,
- SDValue &Index) {
- getAddressOperandsRI(AM, Base, Disp);
- Index = AM.IndexReg;
-}
-
-/// Returns true if the address can be represented by a base register plus
-/// an unsigned 12-bit displacement [r+imm].
-bool SystemZDAGToDAGISel::SelectAddrRI12Only(SDValue &Addr,
- SDValue &Base, SDValue &Disp) {
- return SelectAddrRI12(Addr, Base, Disp, /*is12BitOnly*/true);
-}
-
-bool SystemZDAGToDAGISel::SelectAddrRI12(SDValue &Addr,
- SDValue &Base, SDValue &Disp,
- bool is12BitOnly) {
- SystemZRRIAddressMode AM20(/*isRI*/true), AM12(/*isRI*/true);
- bool Done = false;
-
- if (!Addr.hasOneUse()) {
- unsigned Opcode = Addr.getOpcode();
- if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) {
- // If we are able to fold N into addressing mode, then we'll allow it even
- // if N has multiple uses. In general, addressing computation is used as
- // addresses by all of its uses. But watch out for CopyToReg uses, that
- // means the address computation is liveout. It will be computed by a LA
- // so we want to avoid computing the address twice.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
- UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::CopyToReg) {
- MatchAddressBase(Addr, AM12);
- Done = true;
- break;
- }
- }
- }
- }
- if (!Done && MatchAddress(Addr, AM12, /* is12Bit */ true))
- return false;
-
- // Check, whether we can match stuff using 20-bit displacements
- if (!Done && !is12BitOnly &&
- !MatchAddress(Addr, AM20, /* is12Bit */ false))
- if (AM12.Disp == 0 && AM20.Disp != 0)
- return false;
-
- DEBUG(errs() << "MatchAddress (final): "; AM12.dump());
-
- EVT VT = Addr.getValueType();
- if (AM12.BaseType == SystemZRRIAddressMode::RegBase) {
- if (!AM12.Base.Reg.getNode())
- AM12.Base.Reg = CurDAG->getRegister(0, VT);
- }
-
- assert(AM12.IndexReg.getNode() == 0 && "Invalid reg-imm address mode!");
-
- getAddressOperandsRI(AM12, Base, Disp);
-
- return true;
-}
-
-/// Returns true if the address can be represented by a base register plus
-/// a signed 20-bit displacement [r+imm].
-bool SystemZDAGToDAGISel::SelectAddrRI(SDValue& Addr,
- SDValue &Base, SDValue &Disp) {
- SystemZRRIAddressMode AM(/*isRI*/true);
- bool Done = false;
-
- if (!Addr.hasOneUse()) {
- unsigned Opcode = Addr.getOpcode();
- if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) {
- // If we are able to fold N into addressing mode, then we'll allow it even
- // if N has multiple uses. In general, addressing computation is used as
- // addresses by all of its uses. But watch out for CopyToReg uses, that
- // means the address computation is liveout. It will be computed by a LA
- // so we want to avoid computing the address twice.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
- UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::CopyToReg) {
- MatchAddressBase(Addr, AM);
- Done = true;
- break;
- }
- }
- }
- }
- if (!Done && MatchAddress(Addr, AM, /* is12Bit */ false))
- return false;
-
- DEBUG(errs() << "MatchAddress (final): "; AM.dump());
-
- EVT VT = Addr.getValueType();
- if (AM.BaseType == SystemZRRIAddressMode::RegBase) {
- if (!AM.Base.Reg.getNode())
- AM.Base.Reg = CurDAG->getRegister(0, VT);
- }
-
- assert(AM.IndexReg.getNode() == 0 && "Invalid reg-imm address mode!");
-
- getAddressOperandsRI(AM, Base, Disp);
-
- return true;
-}
-
-/// Returns true if the address can be represented by a base register plus
-/// index register plus an unsigned 12-bit displacement [base + idx + imm].
-bool SystemZDAGToDAGISel::SelectAddrRRI12(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index) {
- SystemZRRIAddressMode AM20, AM12;
- bool Done = false;
-
- if (!Addr.hasOneUse()) {
- unsigned Opcode = Addr.getOpcode();
- if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) {
- // If we are able to fold N into addressing mode, then we'll allow it even
- // if N has multiple uses. In general, addressing computation is used as
- // addresses by all of its uses. But watch out for CopyToReg uses, that
- // means the address computation is liveout. It will be computed by a LA
- // so we want to avoid computing the address twice.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
- UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::CopyToReg) {
- MatchAddressBase(Addr, AM12);
- Done = true;
- break;
- }
- }
- }
- }
- if (!Done && MatchAddress(Addr, AM12, /* is12Bit */ true))
- return false;
-
- // Check, whether we can match stuff using 20-bit displacements
- if (!Done && !MatchAddress(Addr, AM20, /* is12Bit */ false))
- if (AM12.Disp == 0 && AM20.Disp != 0)
- return false;
-
- DEBUG(errs() << "MatchAddress (final): "; AM12.dump());
-
- EVT VT = Addr.getValueType();
- if (AM12.BaseType == SystemZRRIAddressMode::RegBase) {
- if (!AM12.Base.Reg.getNode())
- AM12.Base.Reg = CurDAG->getRegister(0, VT);
- }
-
- if (!AM12.IndexReg.getNode())
- AM12.IndexReg = CurDAG->getRegister(0, VT);
-
- getAddressOperands(AM12, Base, Disp, Index);
-
- return true;
-}
-
-/// Returns true if the address can be represented by a base register plus
-/// index register plus a signed 20-bit displacement [base + idx + imm].
-bool SystemZDAGToDAGISel::SelectAddrRRI20(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index) {
- SystemZRRIAddressMode AM;
- bool Done = false;
-
- if (!Addr.hasOneUse()) {
- unsigned Opcode = Addr.getOpcode();
- if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex) {
- // If we are able to fold N into addressing mode, then we'll allow it even
- // if N has multiple uses. In general, addressing computation is used as
- // addresses by all of its uses. But watch out for CopyToReg uses, that
- // means the address computation is liveout. It will be computed by a LA
- // so we want to avoid computing the address twice.
- for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
- UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
- if (UI->getOpcode() == ISD::CopyToReg) {
- MatchAddressBase(Addr, AM);
- Done = true;
- break;
- }
- }
- }
- }
- if (!Done && MatchAddress(Addr, AM, /* is12Bit */ false))
- return false;
-
- DEBUG(errs() << "MatchAddress (final): "; AM.dump());
-
- EVT VT = Addr.getValueType();
- if (AM.BaseType == SystemZRRIAddressMode::RegBase) {
- if (!AM.Base.Reg.getNode())
- AM.Base.Reg = CurDAG->getRegister(0, VT);
- }
-
- if (!AM.IndexReg.getNode())
- AM.IndexReg = CurDAG->getRegister(0, VT);
-
- getAddressOperands(AM, Base, Disp, Index);
-
- return true;
-}
-
-/// SelectLAAddr - it calls SelectAddr and determines if the maximal addressing
-/// mode it matches can be cost effectively emitted as an LA/LAY instruction.
-bool SystemZDAGToDAGISel::SelectLAAddr(SDValue Addr,
- SDValue &Base, SDValue &Disp, SDValue &Index) {
- SystemZRRIAddressMode AM;
-
- if (MatchAddress(Addr, AM, false))
- return false;
-
- EVT VT = Addr.getValueType();
- unsigned Complexity = 0;
- if (AM.BaseType == SystemZRRIAddressMode::RegBase)
- if (AM.Base.Reg.getNode())
- Complexity = 1;
- else
- AM.Base.Reg = CurDAG->getRegister(0, VT);
- else if (AM.BaseType == SystemZRRIAddressMode::FrameIndexBase)
- Complexity = 4;
-
- if (AM.IndexReg.getNode())
- Complexity += 1;
- else
- AM.IndexReg = CurDAG->getRegister(0, VT);
-
- if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
- Complexity += 1;
-
- if (Complexity > 2) {
- getAddressOperands(AM, Base, Disp, Index);
- return true;
- }
-
- return false;
-}
-
-bool SystemZDAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
- SDValue &Base, SDValue &Disp, SDValue &Index) {
- if (ISD::isNON_EXTLoad(N.getNode()) &&
- IsLegalToFold(N, P, P, OptLevel))
- return SelectAddrRRI20(N.getOperand(1), Base, Disp, Index);
- return false;
-}
-
-SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
- EVT NVT = Node->getValueType(0);
- DebugLoc dl = Node->getDebugLoc();
- unsigned Opcode = Node->getOpcode();
-
- // Dump information about the Node being selected
- DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
-
- // If we have a custom node, we already have selected!
- if (Node->isMachineOpcode()) {
- DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
- return NULL; // Already selected.
- }
-
- switch (Opcode) {
- default: break;
- case ISD::SDIVREM: {
- unsigned Opc, MOpc;
- SDValue N0 = Node->getOperand(0);
- SDValue N1 = Node->getOperand(1);
-
- EVT ResVT;
- bool is32Bit = false;
- switch (NVT.getSimpleVT().SimpleTy) {
- default: assert(0 && "Unsupported VT!");
- case MVT::i32:
- Opc = SystemZ::SDIVREM32r; MOpc = SystemZ::SDIVREM32m;
- ResVT = MVT::v2i64;
- is32Bit = true;
- break;
- case MVT::i64:
- Opc = SystemZ::SDIVREM64r; MOpc = SystemZ::SDIVREM64m;
- ResVT = MVT::v2i64;
- break;
- }
-
- SDValue Tmp0, Tmp1, Tmp2;
- bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2);
-
- // Prepare the dividend
- SDNode *Dividend;
- if (is32Bit)
- Dividend = CurDAG->getMachineNode(SystemZ::MOVSX64rr32, dl, MVT::i64, N0);
- else
- Dividend = N0.getNode();
-
- // Insert prepared dividend into suitable 'subreg'
- SDNode *Tmp = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, ResVT);
- Dividend =
- CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, ResVT,
- SDValue(Tmp, 0), SDValue(Dividend, 0),
- CurDAG->getTargetConstant(SystemZ::subreg_odd, MVT::i32));
-
- SDNode *Result;
- SDValue DivVal = SDValue(Dividend, 0);
- if (foldedLoad) {
- SDValue Ops[] = { DivVal, Tmp0, Tmp1, Tmp2, N1.getOperand(0) };
- Result = CurDAG->getMachineNode(MOpc, dl, ResVT, MVT::Other,
- Ops, array_lengthof(Ops));
- // Update the chain.
- ReplaceUses(N1.getValue(1), SDValue(Result, 1));
- } else {
- Result = CurDAG->getMachineNode(Opc, dl, ResVT, SDValue(Dividend, 0), N1);
- }
-
- // Copy the division (odd subreg) result, if it is needed.
- if (!SDValue(Node, 0).use_empty()) {
- unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_odd32 : SystemZ::subreg_odd);
- SDNode *Div = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- dl, NVT,
- SDValue(Result, 0),
- CurDAG->getTargetConstant(SubRegIdx,
- MVT::i32));
-
- ReplaceUses(SDValue(Node, 0), SDValue(Div, 0));
- DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n");
- }
-
- // Copy the remainder (even subreg) result, if it is needed.
- if (!SDValue(Node, 1).use_empty()) {
- unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_32bit : SystemZ::subreg_even);
- SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- dl, NVT,
- SDValue(Result, 0),
- CurDAG->getTargetConstant(SubRegIdx,
- MVT::i32));
-
- ReplaceUses(SDValue(Node, 1), SDValue(Rem, 0));
- DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n");
- }
-
- return NULL;
- }
- case ISD::UDIVREM: {
- unsigned Opc, MOpc, ClrOpc;
- SDValue N0 = Node->getOperand(0);
- SDValue N1 = Node->getOperand(1);
- EVT ResVT;
-
- bool is32Bit = false;
- switch (NVT.getSimpleVT().SimpleTy) {
- default: assert(0 && "Unsupported VT!");
- case MVT::i32:
- Opc = SystemZ::UDIVREM32r; MOpc = SystemZ::UDIVREM32m;
- ClrOpc = SystemZ::MOV64Pr0_even;
- ResVT = MVT::v2i32;
- is32Bit = true;
- break;
- case MVT::i64:
- Opc = SystemZ::UDIVREM64r; MOpc = SystemZ::UDIVREM64m;
- ClrOpc = SystemZ::MOV128r0_even;
- ResVT = MVT::v2i64;
- break;
- }
-
- SDValue Tmp0, Tmp1, Tmp2;
- bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2);
-
- // Prepare the dividend
- SDNode *Dividend = N0.getNode();
-
- // Insert prepared dividend into suitable 'subreg'
- SDNode *Tmp = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, ResVT);
- {
- unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_odd32 : SystemZ::subreg_odd);
- Dividend =
- CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, ResVT,
- SDValue(Tmp, 0), SDValue(Dividend, 0),
- CurDAG->getTargetConstant(SubRegIdx, MVT::i32));
- }
-
- // Zero out even subreg
- Dividend = CurDAG->getMachineNode(ClrOpc, dl, ResVT, SDValue(Dividend, 0));
-
- SDValue DivVal = SDValue(Dividend, 0);
- SDNode *Result;
- if (foldedLoad) {
- SDValue Ops[] = { DivVal, Tmp0, Tmp1, Tmp2, N1.getOperand(0) };
- Result = CurDAG->getMachineNode(MOpc, dl, ResVT, MVT::Other,
- Ops, array_lengthof(Ops));
- // Update the chain.
- ReplaceUses(N1.getValue(1), SDValue(Result, 1));
- } else {
- Result = CurDAG->getMachineNode(Opc, dl, ResVT, DivVal, N1);
- }
-
- // Copy the division (odd subreg) result, if it is needed.
- if (!SDValue(Node, 0).use_empty()) {
- unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_odd32 : SystemZ::subreg_odd);
- SDNode *Div = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- dl, NVT,
- SDValue(Result, 0),
- CurDAG->getTargetConstant(SubRegIdx,
- MVT::i32));
- ReplaceUses(SDValue(Node, 0), SDValue(Div, 0));
- DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n");
- }
-
- // Copy the remainder (even subreg) result, if it is needed.
- if (!SDValue(Node, 1).use_empty()) {
- unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_32bit : SystemZ::subreg_even);
- SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
- dl, NVT,
- SDValue(Result, 0),
- CurDAG->getTargetConstant(SubRegIdx,
- MVT::i32));
- ReplaceUses(SDValue(Node, 1), SDValue(Rem, 0));
- DEBUG(errs() << "=> "; Result->dump(CurDAG); errs() << "\n");
- }
-
- return NULL;
- }
- }
-
- // Select the default instruction
- SDNode *ResNode = SelectCode(Node);
-
- DEBUG(errs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
- Node->dump(CurDAG);
- else
- ResNode->dump(CurDAG);
- errs() << "\n";
- );
- return ResNode;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
deleted file mode 100644
index 48ca99f..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ /dev/null
@@ -1,868 +0,0 @@
-//===-- SystemZISelLowering.cpp - SystemZ DAG Lowering Implementation -----==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SystemZTargetLowering class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "systemz-lower"
-
-#include "SystemZISelLowering.h"
-#include "SystemZ.h"
-#include "SystemZTargetMachine.h"
-#include "SystemZSubtarget.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/CallingConv.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/GlobalAlias.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/VectorExtras.h"
-using namespace llvm;
-
-SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
- TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()), TM(tm) {
-
- RegInfo = TM.getRegisterInfo();
-
- // Set up the register classes.
- addRegisterClass(MVT::i32, SystemZ::GR32RegisterClass);
- addRegisterClass(MVT::i64, SystemZ::GR64RegisterClass);
- addRegisterClass(MVT::v2i32,SystemZ::GR64PRegisterClass);
- addRegisterClass(MVT::v2i64,SystemZ::GR128RegisterClass);
-
- if (!UseSoftFloat) {
- addRegisterClass(MVT::f32, SystemZ::FP32RegisterClass);
- addRegisterClass(MVT::f64, SystemZ::FP64RegisterClass);
- }
-
- // Compute derived properties from the register classes
- computeRegisterProperties();
-
- // Provide all sorts of operation actions
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
-
- setLoadExtAction(ISD::SEXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-
- setLoadExtAction(ISD::SEXTLOAD, MVT::f64, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::f64, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
-
- setStackPointerRegisterToSaveRestore(SystemZ::R15D);
-
- // TODO: It may be better to default to latency-oriented scheduling, however
- // LLVM's current latency-oriented scheduler can't handle physreg definitions
- // such as SystemZ has with PSW, so set this to the register-pressure
- // scheduler, because it can.
- setSchedulingPreference(Sched::RegPressure);
-
- setBooleanContents(ZeroOrOneBooleanContent);
- setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
-
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRCOND, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Custom);
- setOperationAction(ISD::BR_CC, MVT::i64, Custom);
- setOperationAction(ISD::BR_CC, MVT::f32, Custom);
- setOperationAction(ISD::BR_CC, MVT::f64, Custom);
- setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
- setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
- setOperationAction(ISD::JumpTable, MVT::i64, Custom);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
-
- setOperationAction(ISD::SDIV, MVT::i32, Expand);
- setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::SDIV, MVT::i64, Expand);
- setOperationAction(ISD::UDIV, MVT::i64, Expand);
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i64, Expand);
- setOperationAction(ISD::UREM, MVT::i64, Expand);
-
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- setOperationAction(ISD::CTPOP, MVT::i64, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i64, Expand);
- setOperationAction(ISD::CTLZ, MVT::i32, Promote);
- setOperationAction(ISD::CTLZ, MVT::i64, Legal);
-
- // FIXME: Can we lower these 2 efficiently?
- setOperationAction(ISD::SETCC, MVT::i32, Expand);
- setOperationAction(ISD::SETCC, MVT::i64, Expand);
- setOperationAction(ISD::SETCC, MVT::f32, Expand);
- setOperationAction(ISD::SETCC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT, MVT::i32, Expand);
- setOperationAction(ISD::SELECT, MVT::i64, Expand);
- setOperationAction(ISD::SELECT, MVT::f32, Expand);
- setOperationAction(ISD::SELECT, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
-
- setOperationAction(ISD::MULHS, MVT::i64, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
-
- // FIXME: Can we support these natively?
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
- setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
- setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
-
- // Lower some FP stuff
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FSIN, MVT::f64, Expand);
- setOperationAction(ISD::FCOS, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f64, Expand);
- setOperationAction(ISD::FREM, MVT::f32, Expand);
- setOperationAction(ISD::FREM, MVT::f64, Expand);
- setOperationAction(ISD::FMA, MVT::f32, Expand);
- setOperationAction(ISD::FMA, MVT::f64, Expand);
-
- // We have only 64-bit bitconverts
- setOperationAction(ISD::BITCAST, MVT::f32, Expand);
- setOperationAction(ISD::BITCAST, MVT::i32, Expand);
-
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- setMinFunctionAlignment(1);
-}
-
-SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
- SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- case ISD::BR_CC: return LowerBR_CC(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- default:
- llvm_unreachable("Should not custom lower this!");
- return SDValue();
- }
-}
-
-bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- if (UseSoftFloat || (VT != MVT::f32 && VT != MVT::f64))
- return false;
-
- // +0.0 lzer
- // +0.0f lzdr
- // -0.0 lzer + lner
- // -0.0f lzdr + lndr
- return Imm.isZero() || Imm.isNegZero();
-}
-
-//===----------------------------------------------------------------------===//
-// SystemZ Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-TargetLowering::ConstraintType
-SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'r':
- return C_RegisterClass;
- default:
- break;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-std::pair<unsigned, const TargetRegisterClass*>
-SystemZTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
- if (Constraint.size() == 1) {
- // GCC Constraint Letters
- switch (Constraint[0]) {
- default: break;
- case 'r': // GENERAL_REGS
- if (VT == MVT::i32)
- return std::make_pair(0U, SystemZ::GR32RegisterClass);
- else if (VT == MVT::i128)
- return std::make_pair(0U, SystemZ::GR128RegisterClass);
-
- return std::make_pair(0U, SystemZ::GR64RegisterClass);
- }
- }
-
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-#include "SystemZGenCallingConv.inc"
-
-SDValue
-SystemZTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
-
- switch (CallConv) {
- default:
- llvm_unreachable("Unsupported calling convention");
- case CallingConv::C:
- case CallingConv::Fast:
- return LowerCCCArguments(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals);
- }
-}
-
-SDValue
-SystemZTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- // SystemZ target does not yet support tail call optimization.
- isTailCall = false;
-
- switch (CallConv) {
- default:
- llvm_unreachable("Unsupported calling convention");
- case CallingConv::Fast:
- case CallingConv::C:
- return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, OutVals, Ins, dl, DAG, InVals);
- }
-}
-
-/// LowerCCCArguments - transform physical registers into virtual registers and
-/// generate load operations for arguments places on the stack.
-// FIXME: struct return stuff
-// FIXME: varargs
-SDValue
-SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals)
- const {
-
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
-
- // Assign locations to all of the incoming arguments.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
-
- if (isVarArg)
- report_fatal_error("Varargs not supported yet");
-
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- SDValue ArgValue;
- CCValAssign &VA = ArgLocs[i];
- EVT LocVT = VA.getLocVT();
- if (VA.isRegLoc()) {
- // Arguments passed in registers
- TargetRegisterClass *RC;
- switch (LocVT.getSimpleVT().SimpleTy) {
- default:
-#ifndef NDEBUG
- errs() << "LowerFormalArguments Unhandled argument type: "
- << LocVT.getSimpleVT().SimpleTy
- << "\n";
-#endif
- llvm_unreachable(0);
- case MVT::i64:
- RC = SystemZ::GR64RegisterClass;
- break;
- case MVT::f32:
- RC = SystemZ::FP32RegisterClass;
- break;
- case MVT::f64:
- RC = SystemZ::FP64RegisterClass;
- break;
- }
-
- unsigned VReg = RegInfo.createVirtualRegister(RC);
- RegInfo.addLiveIn(VA.getLocReg(), VReg);
- ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
- } else {
- // Sanity check
- assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- // Create the frame index object for this incoming parameter...
- int FI = MFI->CreateFixedObject(LocVT.getSizeInBits()/8,
- VA.getLocMemOffset(), true);
-
- // Create the SelectionDAG nodes corresponding to a load
- // from this parameter
- SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue = DAG.getLoad(LocVT, dl, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, 0);
- }
-
- // If this is an 8/16/32-bit value, it is really passed promoted to 64
- // bits. Insert an assert[sz]ext to capture this, then truncate to the
- // right size.
- if (VA.getLocInfo() == CCValAssign::SExt)
- ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
-
- if (VA.getLocInfo() != CCValAssign::Full)
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
-
- InVals.push_back(ArgValue);
- }
-
- return Chain;
-}
-
-/// LowerCCCCallTo - functions arguments are copied from virtual regs to
-/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
-/// TODO: sret.
-SDValue
-SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg>
- &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const TargetFrameLowering *TFI = TM.getFrameLowering();
-
- // Offset to first argument stack slot.
- const unsigned FirstArgOffset = 160;
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
-
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
-
- Chain = DAG.getCALLSEQ_START(Chain ,DAG.getConstant(NumBytes,
- getPointerTy(), true));
-
- SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
- SmallVector<SDValue, 12> MemOpChains;
- SDValue StackPtr;
-
- // Walk the register/memloc assignments, inserting copies/loads.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
-
- SDValue Arg = OutVals[i];
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default: assert(0 && "Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- }
-
- // Arguments that can be passed on register must be kept at RegsToPass
- // vector
- if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
- assert(VA.isMemLoc());
-
- if (StackPtr.getNode() == 0)
- StackPtr =
- DAG.getCopyFromReg(Chain, dl,
- (TFI->hasFP(MF) ?
- SystemZ::R11D : SystemZ::R15D),
- getPointerTy());
-
- unsigned Offset = FirstArgOffset + VA.getLocMemOffset();
- SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
- StackPtr,
- DAG.getIntPtrConstant(Offset));
-
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
- MachinePointerInfo(),
- false, false, 0));
- }
- }
-
- // Transform all store nodes into one single node because all store nodes are
- // independent of each other.
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
-
- // Build a sequence of copy-to-reg nodes chained together with token chain and
- // flag operands which copy the outgoing args into registers. The InFlag in
- // necessary since all emitted instructions must be stuck together.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- // If the callee is a GlobalAddress node (quite common, every direct call is)
- // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
- // Likewise ExternalSymbol -> TargetExternalSymbol.
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
- else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
-
- // Returns a chain & a flag for retval copy to use.
- SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
-
- // Add argument registers to the end of the list so that they are
- // known live into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
-
- if (InFlag.getNode())
- Ops.push_back(InFlag);
-
- Chain = DAG.getNode(SystemZISD::CALL, dl, NodeTys, &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
-
- // Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy(), true),
- DAG.getConstant(0, getPointerTy(), true),
- InFlag);
- InFlag = Chain.getValue(1);
-
- // Handle result values, copying them out of physregs into vregs that we
- // return.
- return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl,
- DAG, InVals);
-}
-
-/// LowerCallResult - Lower the result values of a call into the
-/// appropriate copies out of appropriate physical registers.
-///
-SDValue
-SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
-
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- CCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
-
- Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
- VA.getLocVT(), InFlag).getValue(1);
- SDValue RetValue = Chain.getValue(0);
- InFlag = Chain.getValue(2);
-
- // If this is an 8/16/32-bit value, it is really passed promoted to 64
- // bits. Insert an assert[sz]ext to capture this, then truncate to the
- // right size.
- if (VA.getLocInfo() == CCValAssign::SExt)
- RetValue = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), RetValue,
- DAG.getValueType(VA.getValVT()));
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- RetValue = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), RetValue,
- DAG.getValueType(VA.getValVT()));
-
- if (VA.getLocInfo() != CCValAssign::Full)
- RetValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), RetValue);
-
- InVals.push_back(RetValue);
- }
-
- return Chain;
-}
-
-
-SDValue
-SystemZTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
-
- // CCValAssign - represent the assignment of the return value to a location
- SmallVector<CCValAssign, 16> RVLocs;
-
- // CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
-
- // Analize return values.
- CCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
-
- // If this is the first return lowered for this function, add the regs to the
- // liveout set for the function.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
- for (unsigned i = 0; i != RVLocs.size(); ++i)
- if (RVLocs[i].isRegLoc())
- DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
- }
-
- SDValue Flag;
-
- // Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign &VA = RVLocs[i];
- SDValue ResValue = OutVals[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
-
- // If this is an 8/16/32-bit value, it is really should be passed promoted
- // to 64 bits.
- if (VA.getLocInfo() == CCValAssign::SExt)
- ResValue = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ResValue);
- else if (VA.getLocInfo() == CCValAssign::ZExt)
- ResValue = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ResValue);
- else if (VA.getLocInfo() == CCValAssign::AExt)
- ResValue = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ResValue);
-
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ResValue, Flag);
-
- // Guarantee that all emitted copies are stuck together,
- // avoiding something bad.
- Flag = Chain.getValue(1);
- }
-
- if (Flag.getNode())
- return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
-
- // Return Void
- return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain);
-}
-
-SDValue SystemZTargetLowering::EmitCmp(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDValue &SystemZCC,
- SelectionDAG &DAG) const {
- // FIXME: Emit a test if RHS is zero
-
- bool isUnsigned = false;
- SystemZCC::CondCodes TCC;
- switch (CC) {
- default:
- llvm_unreachable("Invalid integer condition!");
- case ISD::SETEQ:
- case ISD::SETOEQ:
- TCC = SystemZCC::E;
- break;
- case ISD::SETUEQ:
- TCC = SystemZCC::NLH;
- break;
- case ISD::SETNE:
- case ISD::SETONE:
- TCC = SystemZCC::NE;
- break;
- case ISD::SETUNE:
- TCC = SystemZCC::LH;
- break;
- case ISD::SETO:
- TCC = SystemZCC::O;
- break;
- case ISD::SETUO:
- TCC = SystemZCC::NO;
- break;
- case ISD::SETULE:
- if (LHS.getValueType().isFloatingPoint()) {
- TCC = SystemZCC::NH;
- break;
- }
- isUnsigned = true; // FALLTHROUGH
- case ISD::SETLE:
- case ISD::SETOLE:
- TCC = SystemZCC::LE;
- break;
- case ISD::SETUGE:
- if (LHS.getValueType().isFloatingPoint()) {
- TCC = SystemZCC::NL;
- break;
- }
- isUnsigned = true; // FALLTHROUGH
- case ISD::SETGE:
- case ISD::SETOGE:
- TCC = SystemZCC::HE;
- break;
- case ISD::SETUGT:
- if (LHS.getValueType().isFloatingPoint()) {
- TCC = SystemZCC::NLE;
- break;
- }
- isUnsigned = true; // FALLTHROUGH
- case ISD::SETGT:
- case ISD::SETOGT:
- TCC = SystemZCC::H;
- break;
- case ISD::SETULT:
- if (LHS.getValueType().isFloatingPoint()) {
- TCC = SystemZCC::NHE;
- break;
- }
- isUnsigned = true; // FALLTHROUGH
- case ISD::SETLT:
- case ISD::SETOLT:
- TCC = SystemZCC::L;
- break;
- }
-
- SystemZCC = DAG.getConstant(TCC, MVT::i32);
-
- DebugLoc dl = LHS.getDebugLoc();
- return DAG.getNode((isUnsigned ? SystemZISD::UCMP : SystemZISD::CMP),
- dl, MVT::i64, LHS, RHS);
-}
-
-
-SDValue SystemZTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- SDValue Chain = Op.getOperand(0);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2);
- SDValue RHS = Op.getOperand(3);
- SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
-
- SDValue SystemZCC;
- SDValue Flag = EmitCmp(LHS, RHS, CC, SystemZCC, DAG);
- return DAG.getNode(SystemZISD::BRCOND, dl, Op.getValueType(),
- Chain, Dest, SystemZCC, Flag);
-}
-
-SDValue SystemZTargetLowering::LowerSELECT_CC(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- SDValue TrueV = Op.getOperand(2);
- SDValue FalseV = Op.getOperand(3);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- DebugLoc dl = Op.getDebugLoc();
-
- SDValue SystemZCC;
- SDValue Flag = EmitCmp(LHS, RHS, CC, SystemZCC, DAG);
-
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- SmallVector<SDValue, 4> Ops;
- Ops.push_back(TrueV);
- Ops.push_back(FalseV);
- Ops.push_back(SystemZCC);
- Ops.push_back(Flag);
-
- return DAG.getNode(SystemZISD::SELECT, dl, VTs, &Ops[0], Ops.size());
-}
-
-SDValue SystemZTargetLowering::LowerGlobalAddress(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
-
- bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- bool ExtraLoadRequired =
- Subtarget.GVRequiresExtraLoad(GV, getTargetMachine(), false);
-
- SDValue Result;
- if (!IsPic && !ExtraLoadRequired) {
- Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
- Offset = 0;
- } else {
- unsigned char OpFlags = 0;
- if (ExtraLoadRequired)
- OpFlags = SystemZII::MO_GOTENT;
-
- Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
- }
-
- Result = DAG.getNode(SystemZISD::PCRelativeWrapper, dl,
- getPointerTy(), Result);
-
- if (ExtraLoadRequired)
- Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(), false, false, 0);
-
- // If there was a non-zero offset that we didn't fold, create an explicit
- // addition for it.
- if (Offset != 0)
- Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
- DAG.getConstant(Offset, getPointerTy()));
-
- return Result;
-}
-
-// FIXME: PIC here
-SDValue SystemZTargetLowering::LowerJumpTable(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
-
- return DAG.getNode(SystemZISD::PCRelativeWrapper, dl, getPointerTy(), Result);
-}
-
-
-// FIXME: PIC here
-// FIXME: This is just dirty hack. We need to lower cpool properly
-SDValue SystemZTargetLowering::LowerConstantPool(SDValue Op,
- SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
- ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
-
- SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
- CP->getAlignment(),
- CP->getOffset());
-
- return DAG.getNode(SystemZISD::PCRelativeWrapper, dl, getPointerTy(), Result);
-}
-
-const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- case SystemZISD::RET_FLAG: return "SystemZISD::RET_FLAG";
- case SystemZISD::CALL: return "SystemZISD::CALL";
- case SystemZISD::BRCOND: return "SystemZISD::BRCOND";
- case SystemZISD::CMP: return "SystemZISD::CMP";
- case SystemZISD::UCMP: return "SystemZISD::UCMP";
- case SystemZISD::SELECT: return "SystemZISD::SELECT";
- case SystemZISD::PCRelativeWrapper: return "SystemZISD::PCRelativeWrapper";
- default: return NULL;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Other Lowering Code
-//===----------------------------------------------------------------------===//
-
-MachineBasicBlock*
-SystemZTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const {
- const SystemZInstrInfo &TII = *TM.getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
- assert((MI->getOpcode() == SystemZ::Select32 ||
- MI->getOpcode() == SystemZ::SelectF32 ||
- MI->getOpcode() == SystemZ::Select64 ||
- MI->getOpcode() == SystemZ::SelectF64) &&
- "Unexpected instr type to insert");
-
- // To "insert" a SELECT instruction, we actually have to insert the diamond
- // control-flow pattern. The incoming instruction knows the destination vreg
- // to set, the condition code register to branch on, the true/false values to
- // select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator I = BB;
- ++I;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // cmpTY ccX, r1, r2
- // jCC copy1MBB
- // fallthrough --> copy0MBB
- MachineBasicBlock *thisMBB = BB;
- MachineFunction *F = BB->getParent();
- MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
- SystemZCC::CondCodes CC = (SystemZCC::CondCodes)MI->getOperand(3).getImm();
- F->insert(I, copy0MBB);
- F->insert(I, copy1MBB);
- // Update machine-CFG edges by transferring all successors of the current
- // block to the new block which will contain the Phi node for the select.
- copy1MBB->splice(copy1MBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
- copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
- // Next, add the true and fallthrough blocks as its successors.
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(copy1MBB);
-
- BuildMI(BB, dl, TII.getBrCond(CC)).addMBB(copy1MBB);
-
- // copy0MBB:
- // %FalseValue = ...
- // # fallthrough to copy1MBB
- BB = copy0MBB;
-
- // Update machine-CFG edges
- BB->addSuccessor(copy1MBB);
-
- // copy1MBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- BB = copy1MBB;
- BuildMI(*BB, BB->begin(), dl, TII.get(SystemZ::PHI),
- MI->getOperand(0).getReg())
- .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
-
- MI->eraseFromParent(); // The pseudo instruction is gone now.
- return BB;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
deleted file mode 100644
index bab3dc2..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ /dev/null
@@ -1,145 +0,0 @@
-//==-- SystemZISelLowering.h - SystemZ DAG Lowering Interface ----*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that SystemZ uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_SystemZ_ISELLOWERING_H
-#define LLVM_TARGET_SystemZ_ISELLOWERING_H
-
-#include "SystemZ.h"
-#include "SystemZRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetLowering.h"
-
-namespace llvm {
- namespace SystemZISD {
- enum {
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- /// Return with a flag operand. Operand 0 is the chain operand.
- RET_FLAG,
-
- /// CALL - These operations represent an abstract call
- /// instruction, which includes a bunch of information.
- CALL,
-
- /// PCRelativeWrapper - PC relative address
- PCRelativeWrapper,
-
- /// CMP, UCMP - Compare instruction
- CMP,
- UCMP,
-
- /// BRCOND - Conditional branch. Operand 0 is chain operand, operand 1 is
- /// the block to branch if condition is true, operand 2 is condition code
- /// and operand 3 is the flag operand produced by a CMP instruction.
- BRCOND,
-
- /// SELECT - Operands 0 and 1 are selection variables, operand 2 is
- /// condition code and operand 3 is the flag operand.
- SELECT
- };
- }
-
- class SystemZSubtarget;
- class SystemZTargetMachine;
-
- class SystemZTargetLowering : public TargetLowering {
- public:
- explicit SystemZTargetLowering(SystemZTargetMachine &TM);
-
- virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
-
- /// LowerOperation - Provide custom lowering hooks for some operations.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
-
- /// getTargetNodeName - This method returns the name of a target specific
- /// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
-
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
- TargetLowering::ConstraintType
- getConstraintType(const std::string &Constraint) const;
-
- SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
-
- SDValue EmitCmp(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDValue &SystemZCC,
- SelectionDAG &DAG) const;
-
-
- MachineBasicBlock* EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
- /// isFPImmLegal - Returns true if the target can instruction select the
- /// specified FP immediate natively. If false, the legalizer will
- /// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
-
- private:
- SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- SDValue LowerCCCArguments(SDValue Chain,
- CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl,
- SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
-
- const SystemZSubtarget &Subtarget;
- const SystemZTargetMachine &TM;
- const SystemZRegisterInfo *RegInfo;
- };
-} // namespace llvm
-
-#endif // LLVM_TARGET_SystemZ_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
deleted file mode 100644
index ab45ec5..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
+++ /dev/null
@@ -1,128 +0,0 @@
-//===- SystemZInstrBuilder.h - Functions to aid building insts -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file exposes functions that may be used with BuildMI from the
-// MachineInstrBuilder.h file to handle SystemZ'isms in a clean way.
-//
-// The BuildMem function may be used with the BuildMI function to add entire
-// memory references in a single, typed, function call.
-//
-// For reference, the order of operands for memory references is:
-// (Operand), Base, Displacement, Index.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SYSTEMZINSTRBUILDER_H
-#define SYSTEMZINSTRBUILDER_H
-
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-
-namespace llvm {
-
-/// SystemZAddressMode - This struct holds a generalized full x86 address mode.
-/// The base register can be a frame index, which will eventually be replaced
-/// with R15 or R11 and Disp being offsetted accordingly.
-struct SystemZAddressMode {
- enum {
- RegBase,
- FrameIndexBase
- } BaseType;
-
- union {
- unsigned Reg;
- int FrameIndex;
- } Base;
-
- unsigned IndexReg;
- int32_t Disp;
- const GlobalValue *GV;
-
- SystemZAddressMode() : BaseType(RegBase), IndexReg(0), Disp(0) {
- Base.Reg = 0;
- }
-};
-
-/// addDirectMem - This function is used to add a direct memory reference to the
-/// current instruction -- that is, a dereference of an address in a register,
-/// with no index or displacement.
-///
-static inline const MachineInstrBuilder &
-addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg) {
- // Because memory references are always represented with 3
- // values, this adds: Reg, [0, NoReg] to the instruction.
- return MIB.addReg(Reg).addImm(0).addReg(0);
-}
-
-static inline const MachineInstrBuilder &
-addOffset(const MachineInstrBuilder &MIB, int Offset) {
- return MIB.addImm(Offset).addReg(0);
-}
-
-/// addRegOffset - This function is used to add a memory reference of the form
-/// [Reg + Offset], i.e., one with no or index, but with a
-/// displacement. An example is: 10(%r15).
-///
-static inline const MachineInstrBuilder &
-addRegOffset(const MachineInstrBuilder &MIB,
- unsigned Reg, bool isKill, int Offset) {
- return addOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
-}
-
-/// addRegReg - This function is used to add a memory reference of the form:
-/// [Reg + Reg].
-static inline const MachineInstrBuilder &
-addRegReg(const MachineInstrBuilder &MIB,
- unsigned Reg1, bool isKill1, unsigned Reg2, bool isKill2) {
- return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(0)
- .addReg(Reg2, getKillRegState(isKill2));
-}
-
-static inline const MachineInstrBuilder &
-addFullAddress(const MachineInstrBuilder &MIB, const SystemZAddressMode &AM) {
- if (AM.BaseType == SystemZAddressMode::RegBase)
- MIB.addReg(AM.Base.Reg);
- else if (AM.BaseType == SystemZAddressMode::FrameIndexBase)
- MIB.addFrameIndex(AM.Base.FrameIndex);
- else
- assert(0);
-
- return MIB.addImm(AM.Disp).addReg(AM.IndexReg);
-}
-
-/// addFrameReference - This function is used to add a reference to the base of
-/// an abstract object on the stack frame of the current function. This
-/// reference has base register as the FrameIndex offset until it is resolved.
-/// This allows a constant offset to be specified as well...
-///
-static inline const MachineInstrBuilder &
-addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
- MachineInstr *MI = MIB;
- MachineFunction &MF = *MI->getParent()->getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- const MCInstrDesc &MCID = MI->getDesc();
- unsigned Flags = 0;
- if (MCID.mayLoad())
- Flags |= MachineMemOperand::MOLoad;
- if (MCID.mayStore())
- Flags |= MachineMemOperand::MOStore;
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo(
- PseudoSourceValue::getFixedStack(FI), Offset),
- Flags, MFI.getObjectSize(FI),
- MFI.getObjectAlignment(FI));
- return addOffset(MIB.addFrameIndex(FI), Offset)
- .addMemOperand(MMO);
-}
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
deleted file mode 100644
index a658280..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ /dev/null
@@ -1,340 +0,0 @@
-//===- SystemZInstrFP.td - SystemZ FP Instruction defs --------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the SystemZ (binary) floating point instructions in
-// TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-// FIXME: multiclassify!
-
-//===----------------------------------------------------------------------===//
-// FP Pattern fragments
-
-def fpimm0 : PatLeaf<(fpimm), [{
- return N->isExactlyValue(+0.0);
-}]>;
-
-def fpimmneg0 : PatLeaf<(fpimm), [{
- return N->isExactlyValue(-0.0);
-}]>;
-
-let Uses = [PSW], usesCustomInserter = 1 in {
- def SelectF32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2, i8imm:$cc),
- "# SelectF32 PSEUDO",
- [(set FP32:$dst,
- (SystemZselect FP32:$src1, FP32:$src2, imm:$cc, PSW))]>;
- def SelectF64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2, i8imm:$cc),
- "# SelectF64 PSEUDO",
- [(set FP64:$dst,
- (SystemZselect FP64:$src1, FP64:$src2, imm:$cc, PSW))]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Move Instructions
-
-// Floating point constant loads.
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
-def LD_Fp032 : Pseudo<(outs FP32:$dst), (ins),
- "lzer\t{$dst}",
- [(set FP32:$dst, fpimm0)]>;
-def LD_Fp064 : Pseudo<(outs FP64:$dst), (ins),
- "lzdr\t{$dst}",
- [(set FP64:$dst, fpimm0)]>;
-}
-
-let neverHasSideEffects = 1 in {
-def FMOV32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
- "ler\t{$dst, $src}",
- []>;
-def FMOV64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
- "ldr\t{$dst, $src}",
- []>;
-}
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in {
-def FMOV32rm : Pseudo<(outs FP32:$dst), (ins rriaddr12:$src),
- "le\t{$dst, $src}",
- [(set FP32:$dst, (load rriaddr12:$src))]>;
-def FMOV32rmy : Pseudo<(outs FP32:$dst), (ins rriaddr:$src),
- "ley\t{$dst, $src}",
- [(set FP32:$dst, (load rriaddr:$src))]>;
-def FMOV64rm : Pseudo<(outs FP64:$dst), (ins rriaddr12:$src),
- "ld\t{$dst, $src}",
- [(set FP64:$dst, (load rriaddr12:$src))]>;
-def FMOV64rmy : Pseudo<(outs FP64:$dst), (ins rriaddr:$src),
- "ldy\t{$dst, $src}",
- [(set FP64:$dst, (load rriaddr:$src))]>;
-}
-
-def FMOV32mr : Pseudo<(outs), (ins rriaddr12:$dst, FP32:$src),
- "ste\t{$src, $dst}",
- [(store FP32:$src, rriaddr12:$dst)]>;
-def FMOV32mry : Pseudo<(outs), (ins rriaddr:$dst, FP32:$src),
- "stey\t{$src, $dst}",
- [(store FP32:$src, rriaddr:$dst)]>;
-def FMOV64mr : Pseudo<(outs), (ins rriaddr12:$dst, FP64:$src),
- "std\t{$src, $dst}",
- [(store FP64:$src, rriaddr12:$dst)]>;
-def FMOV64mry : Pseudo<(outs), (ins rriaddr:$dst, FP64:$src),
- "stdy\t{$src, $dst}",
- [(store FP64:$src, rriaddr:$dst)]>;
-
-def FCOPYSIGN32 : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
- "cpsdr\t{$dst, $src2, $src1}",
- [(set FP32:$dst, (fcopysign FP32:$src1, FP32:$src2))]>;
-def FCOPYSIGN64 : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
- "cpsdr\t{$dst, $src2, $src1}",
- [(set FP64:$dst, (fcopysign FP64:$src1, FP64:$src2))]>;
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions
-
-
-let Defs = [PSW] in {
-def FNEG32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
- "lcebr\t{$dst, $src}",
- [(set FP32:$dst, (fneg FP32:$src)),
- (implicit PSW)]>;
-def FNEG64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
- "lcdbr\t{$dst, $src}",
- [(set FP64:$dst, (fneg FP64:$src)),
- (implicit PSW)]>;
-
-def FABS32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
- "lpebr\t{$dst, $src}",
- [(set FP32:$dst, (fabs FP32:$src)),
- (implicit PSW)]>;
-def FABS64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
- "lpdbr\t{$dst, $src}",
- [(set FP64:$dst, (fabs FP64:$src)),
- (implicit PSW)]>;
-
-def FNABS32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
- "lnebr\t{$dst, $src}",
- [(set FP32:$dst, (fneg(fabs FP32:$src))),
- (implicit PSW)]>;
-def FNABS64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
- "lndbr\t{$dst, $src}",
- [(set FP64:$dst, (fneg(fabs FP64:$src))),
- (implicit PSW)]>;
-}
-
-let Constraints = "$src1 = $dst" in {
-let Defs = [PSW] in {
-let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
-def FADD32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
- "aebr\t{$dst, $src2}",
- [(set FP32:$dst, (fadd FP32:$src1, FP32:$src2)),
- (implicit PSW)]>;
-def FADD64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
- "adbr\t{$dst, $src2}",
- [(set FP64:$dst, (fadd FP64:$src1, FP64:$src2)),
- (implicit PSW)]>;
-}
-
-def FADD32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2),
- "aeb\t{$dst, $src2}",
- [(set FP32:$dst, (fadd FP32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def FADD64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2),
- "adb\t{$dst, $src2}",
- [(set FP64:$dst, (fadd FP64:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-
-def FSUB32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
- "sebr\t{$dst, $src2}",
- [(set FP32:$dst, (fsub FP32:$src1, FP32:$src2)),
- (implicit PSW)]>;
-def FSUB64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
- "sdbr\t{$dst, $src2}",
- [(set FP64:$dst, (fsub FP64:$src1, FP64:$src2)),
- (implicit PSW)]>;
-
-def FSUB32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2),
- "seb\t{$dst, $src2}",
- [(set FP32:$dst, (fsub FP32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def FSUB64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2),
- "sdb\t{$dst, $src2}",
- [(set FP64:$dst, (fsub FP64:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-} // Defs = [PSW]
-
-let isCommutable = 1 in { // X = MUL Y, Z == X = MUL Z, Y
-def FMUL32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
- "meebr\t{$dst, $src2}",
- [(set FP32:$dst, (fmul FP32:$src1, FP32:$src2))]>;
-def FMUL64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
- "mdbr\t{$dst, $src2}",
- [(set FP64:$dst, (fmul FP64:$src1, FP64:$src2))]>;
-}
-
-def FMUL32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2),
- "meeb\t{$dst, $src2}",
- [(set FP32:$dst, (fmul FP32:$src1, (load rriaddr12:$src2)))]>;
-def FMUL64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2),
- "mdb\t{$dst, $src2}",
- [(set FP64:$dst, (fmul FP64:$src1, (load rriaddr12:$src2)))]>;
-
-def FMADD32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2, FP32:$src3),
- "maebr\t{$dst, $src3, $src2}",
- [(set FP32:$dst, (fadd (fmul FP32:$src2, FP32:$src3),
- FP32:$src1))]>;
-def FMADD32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2, FP32:$src3),
- "maeb\t{$dst, $src3, $src2}",
- [(set FP32:$dst, (fadd (fmul (load rriaddr12:$src2),
- FP32:$src3),
- FP32:$src1))]>;
-
-def FMADD64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2, FP64:$src3),
- "madbr\t{$dst, $src3, $src2}",
- [(set FP64:$dst, (fadd (fmul FP64:$src2, FP64:$src3),
- FP64:$src1))]>;
-def FMADD64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2, FP64:$src3),
- "madb\t{$dst, $src3, $src2}",
- [(set FP64:$dst, (fadd (fmul (load rriaddr12:$src2),
- FP64:$src3),
- FP64:$src1))]>;
-
-def FMSUB32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2, FP32:$src3),
- "msebr\t{$dst, $src3, $src2}",
- [(set FP32:$dst, (fsub (fmul FP32:$src2, FP32:$src3),
- FP32:$src1))]>;
-def FMSUB32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2, FP32:$src3),
- "mseb\t{$dst, $src3, $src2}",
- [(set FP32:$dst, (fsub (fmul (load rriaddr12:$src2),
- FP32:$src3),
- FP32:$src1))]>;
-
-def FMSUB64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2, FP64:$src3),
- "msdbr\t{$dst, $src3, $src2}",
- [(set FP64:$dst, (fsub (fmul FP64:$src2, FP64:$src3),
- FP64:$src1))]>;
-def FMSUB64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2, FP64:$src3),
- "msdb\t{$dst, $src3, $src2}",
- [(set FP64:$dst, (fsub (fmul (load rriaddr12:$src2),
- FP64:$src3),
- FP64:$src1))]>;
-
-def FDIV32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
- "debr\t{$dst, $src2}",
- [(set FP32:$dst, (fdiv FP32:$src1, FP32:$src2))]>;
-def FDIV64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src1, FP64:$src2),
- "ddbr\t{$dst, $src2}",
- [(set FP64:$dst, (fdiv FP64:$src1, FP64:$src2))]>;
-
-def FDIV32rm : Pseudo<(outs FP32:$dst), (ins FP32:$src1, rriaddr12:$src2),
- "deb\t{$dst, $src2}",
- [(set FP32:$dst, (fdiv FP32:$src1, (load rriaddr12:$src2)))]>;
-def FDIV64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2),
- "ddb\t{$dst, $src2}",
- [(set FP64:$dst, (fdiv FP64:$src1, (load rriaddr12:$src2)))]>;
-
-} // Constraints = "$src1 = $dst"
-
-def FSQRT32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
- "sqebr\t{$dst, $src}",
- [(set FP32:$dst, (fsqrt FP32:$src))]>;
-def FSQRT64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
- "sqdbr\t{$dst, $src}",
- [(set FP64:$dst, (fsqrt FP64:$src))]>;
-
-def FSQRT32rm : Pseudo<(outs FP32:$dst), (ins rriaddr12:$src),
- "sqeb\t{$dst, $src}",
- [(set FP32:$dst, (fsqrt (load rriaddr12:$src)))]>;
-def FSQRT64rm : Pseudo<(outs FP64:$dst), (ins rriaddr12:$src),
- "sqdb\t{$dst, $src}",
- [(set FP64:$dst, (fsqrt (load rriaddr12:$src)))]>;
-
-def FROUND64r32 : Pseudo<(outs FP32:$dst), (ins FP64:$src),
- "ledbr\t{$dst, $src}",
- [(set FP32:$dst, (fround FP64:$src))]>;
-
-def FEXT32r64 : Pseudo<(outs FP64:$dst), (ins FP32:$src),
- "ldebr\t{$dst, $src}",
- [(set FP64:$dst, (fextend FP32:$src))]>;
-def FEXT32m64 : Pseudo<(outs FP64:$dst), (ins rriaddr12:$src),
- "ldeb\t{$dst, $src}",
- [(set FP64:$dst, (fextend (load rriaddr12:$src)))]>;
-
-let Defs = [PSW] in {
-def FCONVFP32 : Pseudo<(outs FP32:$dst), (ins GR32:$src),
- "cefbr\t{$dst, $src}",
- [(set FP32:$dst, (sint_to_fp GR32:$src)),
- (implicit PSW)]>;
-def FCONVFP32r64: Pseudo<(outs FP32:$dst), (ins GR64:$src),
- "cegbr\t{$dst, $src}",
- [(set FP32:$dst, (sint_to_fp GR64:$src)),
- (implicit PSW)]>;
-
-def FCONVFP64r32: Pseudo<(outs FP64:$dst), (ins GR32:$src),
- "cdfbr\t{$dst, $src}",
- [(set FP64:$dst, (sint_to_fp GR32:$src)),
- (implicit PSW)]>;
-def FCONVFP64 : Pseudo<(outs FP64:$dst), (ins GR64:$src),
- "cdgbr\t{$dst, $src}",
- [(set FP64:$dst, (sint_to_fp GR64:$src)),
- (implicit PSW)]>;
-
-def FCONVGR32 : Pseudo<(outs GR32:$dst), (ins FP32:$src),
- "cfebr\t{$dst, 5, $src}",
- [(set GR32:$dst, (fp_to_sint FP32:$src)),
- (implicit PSW)]>;
-def FCONVGR32r64: Pseudo<(outs GR32:$dst), (ins FP64:$src),
- "cfdbr\t{$dst, 5, $src}",
- [(set GR32:$dst, (fp_to_sint FP64:$src)),
- (implicit PSW)]>;
-
-def FCONVGR64r32: Pseudo<(outs GR64:$dst), (ins FP32:$src),
- "cgebr\t{$dst, 5, $src}",
- [(set GR64:$dst, (fp_to_sint FP32:$src)),
- (implicit PSW)]>;
-def FCONVGR64 : Pseudo<(outs GR64:$dst), (ins FP64:$src),
- "cgdbr\t{$dst, 5, $src}",
- [(set GR64:$dst, (fp_to_sint FP64:$src)),
- (implicit PSW)]>;
-} // Defs = [PSW]
-
-def FBCONVG64 : Pseudo<(outs GR64:$dst), (ins FP64:$src),
- "lgdr\t{$dst, $src}",
- [(set GR64:$dst, (bitconvert FP64:$src))]>;
-def FBCONVF64 : Pseudo<(outs FP64:$dst), (ins GR64:$src),
- "ldgr\t{$dst, $src}",
- [(set FP64:$dst, (bitconvert GR64:$src))]>;
-
-//===----------------------------------------------------------------------===//
-// Test instructions (like AND but do not produce any result)
-
-// Integer comparisons
-let Defs = [PSW] in {
-def FCMP32rr : Pseudo<(outs), (ins FP32:$src1, FP32:$src2),
- "cebr\t$src1, $src2",
- [(set PSW, (SystemZcmp FP32:$src1, FP32:$src2))]>;
-def FCMP64rr : Pseudo<(outs), (ins FP64:$src1, FP64:$src2),
- "cdbr\t$src1, $src2",
- [(set PSW, (SystemZcmp FP64:$src1, FP64:$src2))]>;
-
-def FCMP32rm : Pseudo<(outs), (ins FP32:$src1, rriaddr12:$src2),
- "ceb\t$src1, $src2",
- [(set PSW, (SystemZcmp FP32:$src1,
- (load rriaddr12:$src2)))]>;
-def FCMP64rm : Pseudo<(outs), (ins FP64:$src1, rriaddr12:$src2),
- "cdb\t$src1, $src2",
- [(set PSW, (SystemZcmp FP64:$src1,
- (load rriaddr12:$src2)))]>;
-} // Defs = [PSW]
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// Floating point constant -0.0
-def : Pat<(f32 fpimmneg0), (FNEG32rr (LD_Fp032))>;
-def : Pat<(f64 fpimmneg0), (FNEG64rr (LD_Fp064))>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
deleted file mode 100644
index b4a8993..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
+++ /dev/null
@@ -1,133 +0,0 @@
-//===- SystemZInstrFormats.td - SystemZ Instruction Formats ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-// Format specifies the encoding used by the instruction. This is part of the
-// ad-hoc solution used to emit machine instruction encodings by our machine
-// code emitter.
-class Format<bits<5> val> {
- bits<5> Value = val;
-}
-
-def Pseudo : Format<0>;
-def EForm : Format<1>;
-def IForm : Format<2>;
-def RIForm : Format<3>;
-def RIEForm : Format<4>;
-def RILForm : Format<5>;
-def RISForm : Format<6>;
-def RRForm : Format<7>;
-def RREForm : Format<8>;
-def RRFForm : Format<9>;
-def RRRForm : Format<10>;
-def RRSForm : Format<11>;
-def RSForm : Format<12>;
-def RSIForm : Format<13>;
-def RSILForm : Format<14>;
-def RSYForm : Format<15>;
-def RXForm : Format<16>;
-def RXEForm : Format<17>;
-def RXFForm : Format<18>;
-def RXYForm : Format<19>;
-def SForm : Format<20>;
-def SIForm : Format<21>;
-def SILForm : Format<22>;
-def SIYForm : Format<23>;
-def SSForm : Format<24>;
-def SSEForm : Format<25>;
-def SSFForm : Format<26>;
-
-class InstSystemZ<bits<16> op, Format f, dag outs, dag ins> : Instruction {
- let Namespace = "SystemZ";
-
- bits<16> Opcode = op;
-
- Format Form = f;
- bits<5> FormBits = Form.Value;
-
- dag OutOperandList = outs;
- dag InOperandList = ins;
-}
-
-class I8<bits<8> op, Format f, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : InstSystemZ<0, f, outs, ins> {
- let Opcode{0-7} = op;
- let Opcode{8-15} = 0;
-
- let Pattern = pattern;
- let AsmString = asmstr;
-}
-
-class I12<bits<12> op, Format f, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : InstSystemZ<0, f, outs, ins> {
- let Opcode{0-11} = op;
- let Opcode{12-15} = 0;
-
- let Pattern = pattern;
- let AsmString = asmstr;
-}
-
-class I16<bits<16> op, Format f, dag outs, dag ins, string asmstr,
- list<dag> pattern>
- : InstSystemZ<op, f, outs, ins> {
- let Pattern = pattern;
- let AsmString = asmstr;
-}
-
-class RRI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I8<op, RRForm, outs, ins, asmstr, pattern>;
-
-class RII<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I12<op, RIForm, outs, ins, asmstr, pattern>;
-
-class RILI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I12<op, RILForm, outs, ins, asmstr, pattern>;
-
-class RREI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I16<op, RREForm, outs, ins, asmstr, pattern>;
-
-class RXI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I8<op, RXForm, outs, ins, asmstr, pattern> {
- let AddedComplexity = 1;
-}
-
-class RXYI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I16<op, RXYForm, outs, ins, asmstr, pattern>;
-
-class RSI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I8<op, RSForm, outs, ins, asmstr, pattern> {
- let AddedComplexity = 1;
-}
-
-class RSYI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I16<op, RSYForm, outs, ins, asmstr, pattern>;
-
-class SII<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I8<op, SIForm, outs, ins, asmstr, pattern> {
- let AddedComplexity = 1;
-}
-
-class SIYI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I16<op, SIYForm, outs, ins, asmstr, pattern>;
-
-class SILI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
- : I16<op, SILForm, outs, ins, asmstr, pattern>;
-
-
-//===----------------------------------------------------------------------===//
-// Pseudo instructions
-//===----------------------------------------------------------------------===//
-
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : InstSystemZ<0, Pseudo, outs, ins> {
-
- let Pattern = pattern;
- let AsmString = asmstr;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
deleted file mode 100644
index 5f3dd80..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ /dev/null
@@ -1,439 +0,0 @@
-//===- SystemZInstrInfo.cpp - SystemZ Instruction Information --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the SystemZ implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZ.h"
-#include "SystemZInstrBuilder.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZMachineFunctionInfo.h"
-#include "SystemZTargetMachine.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_INSTRINFO_CTOR
-#include "SystemZGenInstrInfo.inc"
-
-using namespace llvm;
-
-SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
- : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKUP, SystemZ::ADJCALLSTACKDOWN),
- RI(tm, *this), TM(tm) {
-}
-
-/// isGVStub - Return true if the GV requires an extra load to get the
-/// real address.
-static inline bool isGVStub(GlobalValue *GV, SystemZTargetMachine &TM) {
- return TM.getSubtarget<SystemZSubtarget>().GVRequiresExtraLoad(GV, TM, false);
-}
-
-void SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- unsigned Opc = 0;
- if (RC == &SystemZ::GR32RegClass ||
- RC == &SystemZ::ADDR32RegClass)
- Opc = SystemZ::MOV32mr;
- else if (RC == &SystemZ::GR64RegClass ||
- RC == &SystemZ::ADDR64RegClass) {
- Opc = SystemZ::MOV64mr;
- } else if (RC == &SystemZ::FP32RegClass) {
- Opc = SystemZ::FMOV32mr;
- } else if (RC == &SystemZ::FP64RegClass) {
- Opc = SystemZ::FMOV64mr;
- } else if (RC == &SystemZ::GR64PRegClass) {
- Opc = SystemZ::MOV64Pmr;
- } else if (RC == &SystemZ::GR128RegClass) {
- Opc = SystemZ::MOV128mr;
- } else
- llvm_unreachable("Unsupported regclass to store");
-
- addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
- .addReg(SrcReg, getKillRegState(isKill));
-}
-
-void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const{
- DebugLoc DL;
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- unsigned Opc = 0;
- if (RC == &SystemZ::GR32RegClass ||
- RC == &SystemZ::ADDR32RegClass)
- Opc = SystemZ::MOV32rm;
- else if (RC == &SystemZ::GR64RegClass ||
- RC == &SystemZ::ADDR64RegClass) {
- Opc = SystemZ::MOV64rm;
- } else if (RC == &SystemZ::FP32RegClass) {
- Opc = SystemZ::FMOV32rm;
- } else if (RC == &SystemZ::FP64RegClass) {
- Opc = SystemZ::FMOV64rm;
- } else if (RC == &SystemZ::GR64PRegClass) {
- Opc = SystemZ::MOV64Prm;
- } else if (RC == &SystemZ::GR128RegClass) {
- Opc = SystemZ::MOV128rm;
- } else
- llvm_unreachable("Unsupported regclass to load");
-
- addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
-}
-
-void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const {
- unsigned Opc;
- if (SystemZ::GR64RegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::MOV64rr;
- else if (SystemZ::GR32RegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::MOV32rr;
- else if (SystemZ::GR64PRegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::MOV64rrP;
- else if (SystemZ::GR128RegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::MOV128rr;
- else if (SystemZ::FP32RegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::FMOV32rr;
- else if (SystemZ::FP64RegClass.contains(DestReg, SrcReg))
- Opc = SystemZ::FMOV64rr;
- else
- llvm_unreachable("Impossible reg-to-reg copy");
-
- BuildMI(MBB, I, DL, get(Opc), DestReg)
- .addReg(SrcReg, getKillRegState(KillSrc));
-}
-
-unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case SystemZ::MOV32rm:
- case SystemZ::MOV32rmy:
- case SystemZ::MOV64rm:
- case SystemZ::MOVSX32rm8:
- case SystemZ::MOVSX32rm16y:
- case SystemZ::MOVSX64rm8:
- case SystemZ::MOVSX64rm16:
- case SystemZ::MOVSX64rm32:
- case SystemZ::MOVZX32rm8:
- case SystemZ::MOVZX32rm16:
- case SystemZ::MOVZX64rm8:
- case SystemZ::MOVZX64rm16:
- case SystemZ::MOVZX64rm32:
- case SystemZ::FMOV32rm:
- case SystemZ::FMOV32rmy:
- case SystemZ::FMOV64rm:
- case SystemZ::FMOV64rmy:
- case SystemZ::MOV64Prm:
- case SystemZ::MOV64Prmy:
- case SystemZ::MOV128rm:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() && MI->getOperand(3).isReg() &&
- MI->getOperand(2).getImm() == 0 && MI->getOperand(3).getReg() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case SystemZ::MOV32mr:
- case SystemZ::MOV32mry:
- case SystemZ::MOV64mr:
- case SystemZ::MOV32m8r:
- case SystemZ::MOV32m8ry:
- case SystemZ::MOV32m16r:
- case SystemZ::MOV32m16ry:
- case SystemZ::MOV64m8r:
- case SystemZ::MOV64m8ry:
- case SystemZ::MOV64m16r:
- case SystemZ::MOV64m16ry:
- case SystemZ::MOV64m32r:
- case SystemZ::MOV64m32ry:
- case SystemZ::FMOV32mr:
- case SystemZ::FMOV32mry:
- case SystemZ::FMOV64mr:
- case SystemZ::FMOV64mry:
- case SystemZ::MOV64Pmr:
- case SystemZ::MOV64Pmry:
- case SystemZ::MOV128mr:
- if (MI->getOperand(0).isFI() &&
- MI->getOperand(1).isImm() && MI->getOperand(2).isReg() &&
- MI->getOperand(1).getImm() == 0 && MI->getOperand(2).getReg() == 0) {
- FrameIndex = MI->getOperand(0).getIndex();
- return MI->getOperand(3).getReg();
- }
- break;
- }
- return 0;
-}
-
-bool SystemZInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
- assert(Cond.size() == 1 && "Invalid Xbranch condition!");
-
- SystemZCC::CondCodes CC = static_cast<SystemZCC::CondCodes>(Cond[0].getImm());
- Cond[0].setImm(getOppositeCondition(CC));
- return false;
-}
-
-bool SystemZInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isTerminator()) return false;
-
- // Conditional branch is a special case.
- if (MCID.isBranch() && !MCID.isBarrier())
- return true;
- if (!MCID.isPredicable())
- return true;
- return !isPredicated(MI);
-}
-
-bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- // Start from the bottom of the block and work up, examining the
- // terminator instructions.
- MachineBasicBlock::iterator I = MBB.end();
- while (I != MBB.begin()) {
- --I;
- if (I->isDebugValue())
- continue;
- // Working from the bottom, when we see a non-terminator
- // instruction, we're done.
- if (!isUnpredicatedTerminator(I))
- break;
-
- // A terminator that isn't a branch can't easily be handled
- // by this analysis.
- if (!I->getDesc().isBranch())
- return true;
-
- // Handle unconditional branches.
- if (I->getOpcode() == SystemZ::JMP) {
- if (!AllowModify) {
- TBB = I->getOperand(0).getMBB();
- continue;
- }
-
- // If the block has any instructions after a JMP, delete them.
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
- Cond.clear();
- FBB = 0;
-
- // Delete the JMP if it's equivalent to a fall-through.
- if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
- TBB = 0;
- I->eraseFromParent();
- I = MBB.end();
- continue;
- }
-
- // TBB is used to indicate the unconditinal destination.
- TBB = I->getOperand(0).getMBB();
- continue;
- }
-
- // Handle conditional branches.
- SystemZCC::CondCodes BranchCode = getCondFromBranchOpc(I->getOpcode());
- if (BranchCode == SystemZCC::INVALID)
- return true; // Can't handle indirect branch.
-
- // Working from the bottom, handle the first conditional branch.
- if (Cond.empty()) {
- FBB = TBB;
- TBB = I->getOperand(0).getMBB();
- Cond.push_back(MachineOperand::CreateImm(BranchCode));
- continue;
- }
-
- // Handle subsequent conditional branches. Only handle the case where all
- // conditional branches branch to the same destination.
- assert(Cond.size() == 1);
- assert(TBB);
-
- // Only handle the case where all conditional branches branch to
- // the same destination.
- if (TBB != I->getOperand(0).getMBB())
- return true;
-
- SystemZCC::CondCodes OldBranchCode = (SystemZCC::CondCodes)Cond[0].getImm();
- // If the conditions are the same, we can leave them alone.
- if (OldBranchCode == BranchCode)
- continue;
-
- return true;
- }
-
- return false;
-}
-
-unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator I = MBB.end();
- unsigned Count = 0;
-
- while (I != MBB.begin()) {
- --I;
- if (I->isDebugValue())
- continue;
- if (I->getOpcode() != SystemZ::JMP &&
- getCondFromBranchOpc(I->getOpcode()) == SystemZCC::INVALID)
- break;
- // Remove the branch.
- I->eraseFromParent();
- I = MBB.end();
- ++Count;
- }
-
- return Count;
-}
-
-unsigned
-SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const {
- // Shouldn't be a fall through.
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 1 || Cond.size() == 0) &&
- "SystemZ branch conditions have one component!");
-
- if (Cond.empty()) {
- // Unconditional branch?
- assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(SystemZ::JMP)).addMBB(TBB);
- return 1;
- }
-
- // Conditional branch.
- unsigned Count = 0;
- SystemZCC::CondCodes CC = (SystemZCC::CondCodes)Cond[0].getImm();
- BuildMI(&MBB, DL, getBrCond(CC)).addMBB(TBB);
- ++Count;
-
- if (FBB) {
- // Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, DL, get(SystemZ::JMP)).addMBB(FBB);
- ++Count;
- }
- return Count;
-}
-
-const MCInstrDesc&
-SystemZInstrInfo::getBrCond(SystemZCC::CondCodes CC) const {
- switch (CC) {
- default:
- llvm_unreachable("Unknown condition code!");
- case SystemZCC::O: return get(SystemZ::JO);
- case SystemZCC::H: return get(SystemZ::JH);
- case SystemZCC::NLE: return get(SystemZ::JNLE);
- case SystemZCC::L: return get(SystemZ::JL);
- case SystemZCC::NHE: return get(SystemZ::JNHE);
- case SystemZCC::LH: return get(SystemZ::JLH);
- case SystemZCC::NE: return get(SystemZ::JNE);
- case SystemZCC::E: return get(SystemZ::JE);
- case SystemZCC::NLH: return get(SystemZ::JNLH);
- case SystemZCC::HE: return get(SystemZ::JHE);
- case SystemZCC::NL: return get(SystemZ::JNL);
- case SystemZCC::LE: return get(SystemZ::JLE);
- case SystemZCC::NH: return get(SystemZ::JNH);
- case SystemZCC::NO: return get(SystemZ::JNO);
- }
-}
-
-SystemZCC::CondCodes
-SystemZInstrInfo::getCondFromBranchOpc(unsigned Opc) const {
- switch (Opc) {
- default: return SystemZCC::INVALID;
- case SystemZ::JO: return SystemZCC::O;
- case SystemZ::JH: return SystemZCC::H;
- case SystemZ::JNLE: return SystemZCC::NLE;
- case SystemZ::JL: return SystemZCC::L;
- case SystemZ::JNHE: return SystemZCC::NHE;
- case SystemZ::JLH: return SystemZCC::LH;
- case SystemZ::JNE: return SystemZCC::NE;
- case SystemZ::JE: return SystemZCC::E;
- case SystemZ::JNLH: return SystemZCC::NLH;
- case SystemZ::JHE: return SystemZCC::HE;
- case SystemZ::JNL: return SystemZCC::NL;
- case SystemZ::JLE: return SystemZCC::LE;
- case SystemZ::JNH: return SystemZCC::NH;
- case SystemZ::JNO: return SystemZCC::NO;
- }
-}
-
-SystemZCC::CondCodes
-SystemZInstrInfo::getOppositeCondition(SystemZCC::CondCodes CC) const {
- switch (CC) {
- default:
- llvm_unreachable("Invalid condition!");
- case SystemZCC::O: return SystemZCC::NO;
- case SystemZCC::H: return SystemZCC::NH;
- case SystemZCC::NLE: return SystemZCC::LE;
- case SystemZCC::L: return SystemZCC::NL;
- case SystemZCC::NHE: return SystemZCC::HE;
- case SystemZCC::LH: return SystemZCC::NLH;
- case SystemZCC::NE: return SystemZCC::E;
- case SystemZCC::E: return SystemZCC::NE;
- case SystemZCC::NLH: return SystemZCC::LH;
- case SystemZCC::HE: return SystemZCC::NHE;
- case SystemZCC::NL: return SystemZCC::L;
- case SystemZCC::LE: return SystemZCC::NLE;
- case SystemZCC::NH: return SystemZCC::H;
- case SystemZCC::NO: return SystemZCC::O;
- }
-}
-
-const MCInstrDesc&
-SystemZInstrInfo::getLongDispOpc(unsigned Opc) const {
- switch (Opc) {
- default:
- llvm_unreachable("Don't have long disp version of this instruction");
- case SystemZ::MOV32mr: return get(SystemZ::MOV32mry);
- case SystemZ::MOV32rm: return get(SystemZ::MOV32rmy);
- case SystemZ::MOVSX32rm16: return get(SystemZ::MOVSX32rm16y);
- case SystemZ::MOV32m8r: return get(SystemZ::MOV32m8ry);
- case SystemZ::MOV32m16r: return get(SystemZ::MOV32m16ry);
- case SystemZ::MOV64m8r: return get(SystemZ::MOV64m8ry);
- case SystemZ::MOV64m16r: return get(SystemZ::MOV64m16ry);
- case SystemZ::MOV64m32r: return get(SystemZ::MOV64m32ry);
- case SystemZ::MOV8mi: return get(SystemZ::MOV8miy);
- case SystemZ::MUL32rm: return get(SystemZ::MUL32rmy);
- case SystemZ::CMP32rm: return get(SystemZ::CMP32rmy);
- case SystemZ::UCMP32rm: return get(SystemZ::UCMP32rmy);
- case SystemZ::FMOV32mr: return get(SystemZ::FMOV32mry);
- case SystemZ::FMOV64mr: return get(SystemZ::FMOV64mry);
- case SystemZ::FMOV32rm: return get(SystemZ::FMOV32rmy);
- case SystemZ::FMOV64rm: return get(SystemZ::FMOV64rmy);
- case SystemZ::MOV64Pmr: return get(SystemZ::MOV64Pmry);
- case SystemZ::MOV64Prm: return get(SystemZ::MOV64Prmy);
- }
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
deleted file mode 100644
index 6a31e94..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ /dev/null
@@ -1,113 +0,0 @@
-//===- SystemZInstrInfo.h - SystemZ Instruction Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the SystemZ implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_SYSTEMZINSTRINFO_H
-#define LLVM_TARGET_SYSTEMZINSTRINFO_H
-
-#include "SystemZ.h"
-#include "SystemZRegisterInfo.h"
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-#define GET_INSTRINFO_HEADER
-#include "SystemZGenInstrInfo.inc"
-
-namespace llvm {
-
-class SystemZTargetMachine;
-
-/// SystemZII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace SystemZII {
- enum {
- //===------------------------------------------------------------------===//
- // SystemZ Specific MachineOperand flags.
-
- MO_NO_FLAG = 0,
-
- /// MO_GOTENT - On a symbol operand this indicates that the immediate is
- /// the offset to the location of the symbol name from the base of the GOT.
- ///
- /// SYMBOL_LABEL @GOTENT
- MO_GOTENT = 1,
-
- /// MO_PLT - On a symbol operand this indicates that the immediate is
- /// offset to the PLT entry of symbol name from the current code location.
- ///
- /// SYMBOL_LABEL @PLT
- MO_PLT = 2
- };
-}
-
-class SystemZInstrInfo : public SystemZGenInstrInfo {
- const SystemZRegisterInfo RI;
- SystemZTargetMachine &TM;
-public:
- explicit SystemZInstrInfo(SystemZTargetMachine &TM);
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- virtual const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
-
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const;
-
- unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
- unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned SrcReg, bool isKill,
- int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
-
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
-
- SystemZCC::CondCodes getOppositeCondition(SystemZCC::CondCodes CC) const;
- SystemZCC::CondCodes getCondFromBranchOpc(unsigned Opc) const;
- const MCInstrDesc& getBrCond(SystemZCC::CondCodes CC) const;
- const MCInstrDesc& getLongDispOpc(unsigned Opc) const;
-
- const MCInstrDesc& getMemoryInstr(unsigned Opc, int64_t Offset = 0) const {
- if (Offset < 0 || Offset >= 4096)
- return getLongDispOpc(Opc);
- else
- return get(Opc);
- }
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
deleted file mode 100644
index 580d65b..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ /dev/null
@@ -1,1147 +0,0 @@
-//===- SystemZInstrInfo.td - SystemZ Instruction defs ---------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the SystemZ instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// SystemZ Instruction Predicate Definitions.
-def IsZ10 : Predicate<"Subtarget.isZ10()">;
-
-include "SystemZInstrFormats.td"
-
-//===----------------------------------------------------------------------===//
-// Type Constraints.
-//===----------------------------------------------------------------------===//
-class SDTCisI8<int OpNum> : SDTCisVT<OpNum, i8>;
-class SDTCisI16<int OpNum> : SDTCisVT<OpNum, i16>;
-class SDTCisI32<int OpNum> : SDTCisVT<OpNum, i32>;
-class SDTCisI64<int OpNum> : SDTCisVT<OpNum, i64>;
-
-//===----------------------------------------------------------------------===//
-// Type Profiles.
-//===----------------------------------------------------------------------===//
-def SDT_SystemZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
-def SDT_SystemZCallSeqStart : SDCallSeqStart<[SDTCisI64<0>]>;
-def SDT_SystemZCallSeqEnd : SDCallSeqEnd<[SDTCisI64<0>, SDTCisI64<1>]>;
-def SDT_CmpTest : SDTypeProfile<1, 2, [SDTCisI64<0>,
- SDTCisSameAs<1, 2>]>;
-def SDT_BrCond : SDTypeProfile<0, 3,
- [SDTCisVT<0, OtherVT>,
- SDTCisI8<1>, SDTCisVT<2, i64>]>;
-def SDT_SelectCC : SDTypeProfile<1, 4,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
- SDTCisI8<3>, SDTCisVT<4, i64>]>;
-def SDT_Address : SDTypeProfile<1, 1,
- [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
-
-//===----------------------------------------------------------------------===//
-// SystemZ Specific Node Definitions.
-//===----------------------------------------------------------------------===//
-def SystemZretflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-def SystemZcall : SDNode<"SystemZISD::CALL", SDT_SystemZCall,
- [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>;
-def SystemZcallseq_start :
- SDNode<"ISD::CALLSEQ_START", SDT_SystemZCallSeqStart,
- [SDNPHasChain, SDNPOutGlue]>;
-def SystemZcallseq_end :
- SDNode<"ISD::CALLSEQ_END", SDT_SystemZCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def SystemZcmp : SDNode<"SystemZISD::CMP", SDT_CmpTest>;
-def SystemZucmp : SDNode<"SystemZISD::UCMP", SDT_CmpTest>;
-def SystemZbrcond : SDNode<"SystemZISD::BRCOND", SDT_BrCond,
- [SDNPHasChain]>;
-def SystemZselect : SDNode<"SystemZISD::SELECT", SDT_SelectCC>;
-def SystemZpcrelwrapper : SDNode<"SystemZISD::PCRelativeWrapper", SDT_Address, []>;
-
-
-include "SystemZOperands.td"
-
-//===----------------------------------------------------------------------===//
-// Instruction list..
-
-def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
- "#ADJCALLSTACKDOWN",
- [(SystemZcallseq_start timm:$amt)]>;
-def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
- "#ADJCALLSTACKUP",
- [(SystemZcallseq_end timm:$amt1, timm:$amt2)]>;
-
-let Uses = [PSW], usesCustomInserter = 1 in {
- def Select32 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cc),
- "# Select32 PSEUDO",
- [(set GR32:$dst,
- (SystemZselect GR32:$src1, GR32:$src2, imm:$cc, PSW))]>;
- def Select64 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$cc),
- "# Select64 PSEUDO",
- [(set GR64:$dst,
- (SystemZselect GR64:$src1, GR64:$src2, imm:$cc, PSW))]>;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions...
-//
-
-// FIXME: Provide proper encoding!
-let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
- def RET : Pseudo<(outs), (ins), "br\t%r14", [(SystemZretflag)]>;
-}
-
-let isBranch = 1, isTerminator = 1 in {
- let isBarrier = 1 in {
- def JMP : Pseudo<(outs), (ins brtarget:$dst), "j\t{$dst}", [(br bb:$dst)]>;
-
- let isIndirectBranch = 1 in
- def JMPr : Pseudo<(outs), (ins GR64:$dst), "br\t{$dst}", [(brind GR64:$dst)]>;
- }
-
- let Uses = [PSW] in {
- def JO : Pseudo<(outs), (ins brtarget:$dst),
- "jo\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_O, PSW)]>;
- def JH : Pseudo<(outs), (ins brtarget:$dst),
- "jh\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_H, PSW)]>;
- def JNLE: Pseudo<(outs), (ins brtarget:$dst),
- "jnle\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NLE, PSW)]>;
- def JL : Pseudo<(outs), (ins brtarget:$dst),
- "jl\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_L, PSW)]>;
- def JNHE: Pseudo<(outs), (ins brtarget:$dst),
- "jnhe\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NHE, PSW)]>;
- def JLH : Pseudo<(outs), (ins brtarget:$dst),
- "jlh\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_LH, PSW)]>;
- def JNE : Pseudo<(outs), (ins brtarget:$dst),
- "jne\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NE, PSW)]>;
- def JE : Pseudo<(outs), (ins brtarget:$dst),
- "je\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_E, PSW)]>;
- def JNLH: Pseudo<(outs), (ins brtarget:$dst),
- "jnlh\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NLH, PSW)]>;
- def JHE : Pseudo<(outs), (ins brtarget:$dst),
- "jhe\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_HE, PSW)]>;
- def JNL : Pseudo<(outs), (ins brtarget:$dst),
- "jnl\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NL, PSW)]>;
- def JLE : Pseudo<(outs), (ins brtarget:$dst),
- "jle\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_LE, PSW)]>;
- def JNH : Pseudo<(outs), (ins brtarget:$dst),
- "jnh\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NH, PSW)]>;
- def JNO : Pseudo<(outs), (ins brtarget:$dst),
- "jno\t$dst",
- [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NO, PSW)]>;
- } // Uses = [PSW]
-} // isBranch = 1
-
-//===----------------------------------------------------------------------===//
-// Call Instructions...
-//
-
-let isCall = 1 in
- // All calls clobber the non-callee saved registers. Uses for argument
- // registers are added manually.
- let Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D,
- F0L, F1L, F2L, F3L, F4L, F5L, F6L, F7L] in {
- def CALLi : Pseudo<(outs), (ins imm_pcrel:$dst, variable_ops),
- "brasl\t%r14, $dst", [(SystemZcall imm:$dst)]>;
- def CALLr : Pseudo<(outs), (ins ADDR64:$dst, variable_ops),
- "basr\t%r14, $dst", [(SystemZcall ADDR64:$dst)]>;
- }
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions.
-//
-
-let isReMaterializable = 1 in
-// FIXME: Provide imm12 variant
-// FIXME: Address should be halfword aligned...
-def LA64r : RXI<0x47,
- (outs GR64:$dst), (ins laaddr:$src),
- "lay\t{$dst, $src}",
- [(set GR64:$dst, laaddr:$src)]>;
-def LA64rm : RXYI<0x71E3,
- (outs GR64:$dst), (ins i64imm:$src),
- "larl\t{$dst, $src}",
- [(set GR64:$dst,
- (SystemZpcrelwrapper tglobaladdr:$src))]>;
-
-let neverHasSideEffects = 1 in
-def NOP : Pseudo<(outs), (ins), "# no-op", []>;
-
-//===----------------------------------------------------------------------===//
-// Move Instructions
-
-let neverHasSideEffects = 1 in {
-def MOV32rr : RRI<0x18,
- (outs GR32:$dst), (ins GR32:$src),
- "lr\t{$dst, $src}",
- []>;
-def MOV64rr : RREI<0xB904,
- (outs GR64:$dst), (ins GR64:$src),
- "lgr\t{$dst, $src}",
- []>;
-def MOV128rr : Pseudo<(outs GR128:$dst), (ins GR128:$src),
- "# MOV128 PSEUDO!\n"
- "\tlgr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
- "\tlgr\t${dst:subreg_even}, ${src:subreg_even}",
- []>;
-def MOV64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
- "# MOV64P PSEUDO!\n"
- "\tlr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
- "\tlr\t${dst:subreg_even}, ${src:subreg_even}",
- []>;
-}
-
-def MOVSX64rr32 : RREI<0xB914,
- (outs GR64:$dst), (ins GR32:$src),
- "lgfr\t{$dst, $src}",
- [(set GR64:$dst, (sext GR32:$src))]>;
-def MOVZX64rr32 : RREI<0xB916,
- (outs GR64:$dst), (ins GR32:$src),
- "llgfr\t{$dst, $src}",
- [(set GR64:$dst, (zext GR32:$src))]>;
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
-def MOV32ri16 : RII<0x8A7,
- (outs GR32:$dst), (ins s16imm:$src),
- "lhi\t{$dst, $src}",
- [(set GR32:$dst, immSExt16:$src)]>;
-def MOV64ri16 : RII<0x9A7,
- (outs GR64:$dst), (ins s16imm64:$src),
- "lghi\t{$dst, $src}",
- [(set GR64:$dst, immSExt16:$src)]>;
-
-def MOV64rill16 : RII<0xFA5,
- (outs GR64:$dst), (ins u16imm:$src),
- "llill\t{$dst, $src}",
- [(set GR64:$dst, i64ll16:$src)]>;
-def MOV64rilh16 : RII<0xEA5,
- (outs GR64:$dst), (ins u16imm:$src),
- "llilh\t{$dst, $src}",
- [(set GR64:$dst, i64lh16:$src)]>;
-def MOV64rihl16 : RII<0xDA5,
- (outs GR64:$dst), (ins u16imm:$src),
- "llihl\t{$dst, $src}",
- [(set GR64:$dst, i64hl16:$src)]>;
-def MOV64rihh16 : RII<0xCA5,
- (outs GR64:$dst), (ins u16imm:$src),
- "llihh\t{$dst, $src}",
- [(set GR64:$dst, i64hh16:$src)]>;
-
-def MOV64ri32 : RILI<0x1C0,
- (outs GR64:$dst), (ins s32imm64:$src),
- "lgfi\t{$dst, $src}",
- [(set GR64:$dst, immSExt32:$src)]>;
-def MOV64rilo32 : RILI<0xFC0,
- (outs GR64:$dst), (ins u32imm:$src),
- "llilf\t{$dst, $src}",
- [(set GR64:$dst, i64lo32:$src)]>;
-def MOV64rihi32 : RILI<0xEC0, (outs GR64:$dst), (ins u32imm:$src),
- "llihf\t{$dst, $src}",
- [(set GR64:$dst, i64hi32:$src)]>;
-}
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in {
-def MOV32rm : RXI<0x58,
- (outs GR32:$dst), (ins rriaddr12:$src),
- "l\t{$dst, $src}",
- [(set GR32:$dst, (load rriaddr12:$src))]>;
-def MOV32rmy : RXYI<0x58E3,
- (outs GR32:$dst), (ins rriaddr:$src),
- "ly\t{$dst, $src}",
- [(set GR32:$dst, (load rriaddr:$src))]>;
-def MOV64rm : RXYI<0x04E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "lg\t{$dst, $src}",
- [(set GR64:$dst, (load rriaddr:$src))]>;
-def MOV64Prm : Pseudo<(outs GR64P:$dst), (ins rriaddr12:$src),
- "# MOV64P PSEUDO!\n"
- "\tl\t${dst:subreg_odd}, $src\n"
- "\tl\t${dst:subreg_even}, 4+$src",
- [(set GR64P:$dst, (load rriaddr12:$src))]>;
-def MOV64Prmy : Pseudo<(outs GR64P:$dst), (ins rriaddr:$src),
- "# MOV64P PSEUDO!\n"
- "\tly\t${dst:subreg_odd}, $src\n"
- "\tly\t${dst:subreg_even}, 4+$src",
- [(set GR64P:$dst, (load rriaddr:$src))]>;
-def MOV128rm : Pseudo<(outs GR128:$dst), (ins rriaddr:$src),
- "# MOV128 PSEUDO!\n"
- "\tlg\t${dst:subreg_odd}, $src\n"
- "\tlg\t${dst:subreg_even}, 8+$src",
- [(set GR128:$dst, (load rriaddr:$src))]>;
-}
-
-def MOV32mr : RXI<0x50,
- (outs), (ins rriaddr12:$dst, GR32:$src),
- "st\t{$src, $dst}",
- [(store GR32:$src, rriaddr12:$dst)]>;
-def MOV32mry : RXYI<0x50E3,
- (outs), (ins rriaddr:$dst, GR32:$src),
- "sty\t{$src, $dst}",
- [(store GR32:$src, rriaddr:$dst)]>;
-def MOV64mr : RXYI<0x24E3,
- (outs), (ins rriaddr:$dst, GR64:$src),
- "stg\t{$src, $dst}",
- [(store GR64:$src, rriaddr:$dst)]>;
-def MOV64Pmr : Pseudo<(outs), (ins rriaddr12:$dst, GR64P:$src),
- "# MOV64P PSEUDO!\n"
- "\tst\t${src:subreg_odd}, $dst\n"
- "\tst\t${src:subreg_even}, 4+$dst",
- [(store GR64P:$src, rriaddr12:$dst)]>;
-def MOV64Pmry : Pseudo<(outs), (ins rriaddr:$dst, GR64P:$src),
- "# MOV64P PSEUDO!\n"
- "\tsty\t${src:subreg_odd}, $dst\n"
- "\tsty\t${src:subreg_even}, 4+$dst",
- [(store GR64P:$src, rriaddr:$dst)]>;
-def MOV128mr : Pseudo<(outs), (ins rriaddr:$dst, GR128:$src),
- "# MOV128 PSEUDO!\n"
- "\tstg\t${src:subreg_odd}, $dst\n"
- "\tstg\t${src:subreg_even}, 8+$dst",
- [(store GR128:$src, rriaddr:$dst)]>;
-
-def MOV8mi : SII<0x92,
- (outs), (ins riaddr12:$dst, i32i8imm:$src),
- "mvi\t{$dst, $src}",
- [(truncstorei8 (i32 i32immSExt8:$src), riaddr12:$dst)]>;
-def MOV8miy : SIYI<0x52EB,
- (outs), (ins riaddr:$dst, i32i8imm:$src),
- "mviy\t{$dst, $src}",
- [(truncstorei8 (i32 i32immSExt8:$src), riaddr:$dst)]>;
-
-let AddedComplexity = 2 in {
-def MOV16mi : SILI<0xE544,
- (outs), (ins riaddr12:$dst, s16imm:$src),
- "mvhhi\t{$dst, $src}",
- [(truncstorei16 (i32 i32immSExt16:$src), riaddr12:$dst)]>,
- Requires<[IsZ10]>;
-def MOV32mi16 : SILI<0xE54C,
- (outs), (ins riaddr12:$dst, s32imm:$src),
- "mvhi\t{$dst, $src}",
- [(store (i32 immSExt16:$src), riaddr12:$dst)]>,
- Requires<[IsZ10]>;
-def MOV64mi16 : SILI<0xE548,
- (outs), (ins riaddr12:$dst, s32imm64:$src),
- "mvghi\t{$dst, $src}",
- [(store (i64 immSExt16:$src), riaddr12:$dst)]>,
- Requires<[IsZ10]>;
-}
-
-// sexts
-def MOVSX32rr8 : RREI<0xB926,
- (outs GR32:$dst), (ins GR32:$src),
- "lbr\t{$dst, $src}",
- [(set GR32:$dst, (sext_inreg GR32:$src, i8))]>;
-def MOVSX64rr8 : RREI<0xB906,
- (outs GR64:$dst), (ins GR64:$src),
- "lgbr\t{$dst, $src}",
- [(set GR64:$dst, (sext_inreg GR64:$src, i8))]>;
-def MOVSX32rr16 : RREI<0xB927,
- (outs GR32:$dst), (ins GR32:$src),
- "lhr\t{$dst, $src}",
- [(set GR32:$dst, (sext_inreg GR32:$src, i16))]>;
-def MOVSX64rr16 : RREI<0xB907,
- (outs GR64:$dst), (ins GR64:$src),
- "lghr\t{$dst, $src}",
- [(set GR64:$dst, (sext_inreg GR64:$src, i16))]>;
-
-// extloads
-def MOVSX32rm8 : RXYI<0x76E3,
- (outs GR32:$dst), (ins rriaddr:$src),
- "lb\t{$dst, $src}",
- [(set GR32:$dst, (sextloadi32i8 rriaddr:$src))]>;
-def MOVSX32rm16 : RXI<0x48,
- (outs GR32:$dst), (ins rriaddr12:$src),
- "lh\t{$dst, $src}",
- [(set GR32:$dst, (sextloadi32i16 rriaddr12:$src))]>;
-def MOVSX32rm16y : RXYI<0x78E3,
- (outs GR32:$dst), (ins rriaddr:$src),
- "lhy\t{$dst, $src}",
- [(set GR32:$dst, (sextloadi32i16 rriaddr:$src))]>;
-def MOVSX64rm8 : RXYI<0x77E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "lgb\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i8 rriaddr:$src))]>;
-def MOVSX64rm16 : RXYI<0x15E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "lgh\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i16 rriaddr:$src))]>;
-def MOVSX64rm32 : RXYI<0x14E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "lgf\t{$dst, $src}",
- [(set GR64:$dst, (sextloadi64i32 rriaddr:$src))]>;
-
-def MOVZX32rm8 : RXYI<0x94E3,
- (outs GR32:$dst), (ins rriaddr:$src),
- "llc\t{$dst, $src}",
- [(set GR32:$dst, (zextloadi32i8 rriaddr:$src))]>;
-def MOVZX32rm16 : RXYI<0x95E3,
- (outs GR32:$dst), (ins rriaddr:$src),
- "llh\t{$dst, $src}",
- [(set GR32:$dst, (zextloadi32i16 rriaddr:$src))]>;
-def MOVZX64rm8 : RXYI<0x90E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "llgc\t{$dst, $src}",
- [(set GR64:$dst, (zextloadi64i8 rriaddr:$src))]>;
-def MOVZX64rm16 : RXYI<0x91E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "llgh\t{$dst, $src}",
- [(set GR64:$dst, (zextloadi64i16 rriaddr:$src))]>;
-def MOVZX64rm32 : RXYI<0x16E3,
- (outs GR64:$dst), (ins rriaddr:$src),
- "llgf\t{$dst, $src}",
- [(set GR64:$dst, (zextloadi64i32 rriaddr:$src))]>;
-
-// truncstores
-def MOV32m8r : RXI<0x42,
- (outs), (ins rriaddr12:$dst, GR32:$src),
- "stc\t{$src, $dst}",
- [(truncstorei8 GR32:$src, rriaddr12:$dst)]>;
-
-def MOV32m8ry : RXYI<0x72E3,
- (outs), (ins rriaddr:$dst, GR32:$src),
- "stcy\t{$src, $dst}",
- [(truncstorei8 GR32:$src, rriaddr:$dst)]>;
-
-def MOV32m16r : RXI<0x40,
- (outs), (ins rriaddr12:$dst, GR32:$src),
- "sth\t{$src, $dst}",
- [(truncstorei16 GR32:$src, rriaddr12:$dst)]>;
-
-def MOV32m16ry : RXYI<0x70E3,
- (outs), (ins rriaddr:$dst, GR32:$src),
- "sthy\t{$src, $dst}",
- [(truncstorei16 GR32:$src, rriaddr:$dst)]>;
-
-def MOV64m8r : RXI<0x42,
- (outs), (ins rriaddr12:$dst, GR64:$src),
- "stc\t{$src, $dst}",
- [(truncstorei8 GR64:$src, rriaddr12:$dst)]>;
-
-def MOV64m8ry : RXYI<0x72E3,
- (outs), (ins rriaddr:$dst, GR64:$src),
- "stcy\t{$src, $dst}",
- [(truncstorei8 GR64:$src, rriaddr:$dst)]>;
-
-def MOV64m16r : RXI<0x40,
- (outs), (ins rriaddr12:$dst, GR64:$src),
- "sth\t{$src, $dst}",
- [(truncstorei16 GR64:$src, rriaddr12:$dst)]>;
-
-def MOV64m16ry : RXYI<0x70E3,
- (outs), (ins rriaddr:$dst, GR64:$src),
- "sthy\t{$src, $dst}",
- [(truncstorei16 GR64:$src, rriaddr:$dst)]>;
-
-def MOV64m32r : RXI<0x50,
- (outs), (ins rriaddr12:$dst, GR64:$src),
- "st\t{$src, $dst}",
- [(truncstorei32 GR64:$src, rriaddr12:$dst)]>;
-
-def MOV64m32ry : RXYI<0x50E3,
- (outs), (ins rriaddr:$dst, GR64:$src),
- "sty\t{$src, $dst}",
- [(truncstorei32 GR64:$src, rriaddr:$dst)]>;
-
-// multiple regs moves
-// FIXME: should we use multiple arg nodes?
-def MOV32mrm : RSYI<0x90EB,
- (outs), (ins riaddr:$dst, GR32:$from, GR32:$to),
- "stmy\t{$from, $to, $dst}",
- []>;
-def MOV64mrm : RSYI<0x24EB,
- (outs), (ins riaddr:$dst, GR64:$from, GR64:$to),
- "stmg\t{$from, $to, $dst}",
- []>;
-def MOV32rmm : RSYI<0x90EB,
- (outs GR32:$from, GR32:$to), (ins riaddr:$dst),
- "lmy\t{$from, $to, $dst}",
- []>;
-def MOV64rmm : RSYI<0x04EB,
- (outs GR64:$from, GR64:$to), (ins riaddr:$dst),
- "lmg\t{$from, $to, $dst}",
- []>;
-
-let isReMaterializable = 1, neverHasSideEffects = 1, isAsCheapAsAMove = 1,
- Constraints = "$src = $dst" in {
-def MOV64Pr0_even : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
- "lhi\t${dst:subreg_even}, 0",
- []>;
-def MOV128r0_even : Pseudo<(outs GR128:$dst), (ins GR128:$src),
- "lghi\t${dst:subreg_even}, 0",
- []>;
-}
-
-// Byte swaps
-def BSWAP32rr : RREI<0xB91F,
- (outs GR32:$dst), (ins GR32:$src),
- "lrvr\t{$dst, $src}",
- [(set GR32:$dst, (bswap GR32:$src))]>;
-def BSWAP64rr : RREI<0xB90F,
- (outs GR64:$dst), (ins GR64:$src),
- "lrvgr\t{$dst, $src}",
- [(set GR64:$dst, (bswap GR64:$src))]>;
-
-// FIXME: this is invalid pattern for big-endian
-//def BSWAP16rm : RXYI<0x1FE3, (outs GR32:$dst), (ins rriaddr:$src),
-// "lrvh\t{$dst, $src}",
-// [(set GR32:$dst, (bswap (extloadi32i16 rriaddr:$src)))]>;
-def BSWAP32rm : RXYI<0x1EE3, (outs GR32:$dst), (ins rriaddr:$src),
- "lrv\t{$dst, $src}",
- [(set GR32:$dst, (bswap (load rriaddr:$src)))]>;
-def BSWAP64rm : RXYI<0x0FE3, (outs GR64:$dst), (ins rriaddr:$src),
- "lrvg\t{$dst, $src}",
- [(set GR64:$dst, (bswap (load rriaddr:$src)))]>;
-
-//def BSWAP16mr : RXYI<0xE33F, (outs), (ins rriaddr:$dst, GR32:$src),
-// "strvh\t{$src, $dst}",
-// [(truncstorei16 (bswap GR32:$src), rriaddr:$dst)]>;
-def BSWAP32mr : RXYI<0xE33E, (outs), (ins rriaddr:$dst, GR32:$src),
- "strv\t{$src, $dst}",
- [(store (bswap GR32:$src), rriaddr:$dst)]>;
-def BSWAP64mr : RXYI<0xE32F, (outs), (ins rriaddr:$dst, GR64:$src),
- "strvg\t{$src, $dst}",
- [(store (bswap GR64:$src), rriaddr:$dst)]>;
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions
-
-let Defs = [PSW] in {
-def NEG32rr : RRI<0x13,
- (outs GR32:$dst), (ins GR32:$src),
- "lcr\t{$dst, $src}",
- [(set GR32:$dst, (ineg GR32:$src)),
- (implicit PSW)]>;
-def NEG64rr : RREI<0xB903, (outs GR64:$dst), (ins GR64:$src),
- "lcgr\t{$dst, $src}",
- [(set GR64:$dst, (ineg GR64:$src)),
- (implicit PSW)]>;
-def NEG64rr32 : RREI<0xB913, (outs GR64:$dst), (ins GR32:$src),
- "lcgfr\t{$dst, $src}",
- [(set GR64:$dst, (ineg (sext GR32:$src))),
- (implicit PSW)]>;
-}
-
-let Constraints = "$src1 = $dst" in {
-
-let Defs = [PSW] in {
-
-let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
-def ADD32rr : RRI<0x1A, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "ar\t{$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
- (implicit PSW)]>;
-def ADD64rr : RREI<0xB908, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "agr\t{$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
- (implicit PSW)]>;
-}
-
-def ADD32rm : RXI<0x5A, (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "a\t{$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def ADD32rmy : RXYI<0xE35A, (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "ay\t{$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-def ADD64rm : RXYI<0xE308, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "ag\t{$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-
-
-def ADD32ri16 : RII<0xA7A,
- (outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
- "ahi\t{$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, immSExt16:$src2)),
- (implicit PSW)]>;
-def ADD32ri : RILI<0xC29,
- (outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
- "afi\t{$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, imm:$src2)),
- (implicit PSW)]>;
-def ADD64ri16 : RILI<0xA7B,
- (outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
- "aghi\t{$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, immSExt16:$src2)),
- (implicit PSW)]>;
-def ADD64ri32 : RILI<0xC28,
- (outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
- "agfi\t{$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, immSExt32:$src2)),
- (implicit PSW)]>;
-
-let isCommutable = 1 in { // X = ADC Y, Z == X = ADC Z, Y
-def ADC32rr : RRI<0x1E, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "alr\t{$dst, $src2}",
- [(set GR32:$dst, (addc GR32:$src1, GR32:$src2))]>;
-def ADC64rr : RREI<0xB90A, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "algr\t{$dst, $src2}",
- [(set GR64:$dst, (addc GR64:$src1, GR64:$src2))]>;
-}
-
-def ADC32ri : RILI<0xC2B,
- (outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
- "alfi\t{$dst, $src2}",
- [(set GR32:$dst, (addc GR32:$src1, imm:$src2))]>;
-def ADC64ri32 : RILI<0xC2A,
- (outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
- "algfi\t{$dst, $src2}",
- [(set GR64:$dst, (addc GR64:$src1, immSExt32:$src2))]>;
-
-let Uses = [PSW] in {
-def ADDE32rr : RREI<0xB998, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "alcr\t{$dst, $src2}",
- [(set GR32:$dst, (adde GR32:$src1, GR32:$src2)),
- (implicit PSW)]>;
-def ADDE64rr : RREI<0xB988, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "alcgr\t{$dst, $src2}",
- [(set GR64:$dst, (adde GR64:$src1, GR64:$src2)),
- (implicit PSW)]>;
-}
-
-let isCommutable = 1 in { // X = AND Y, Z == X = AND Z, Y
-def AND32rr : RRI<0x14,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "nr\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
-def AND64rr : RREI<0xB980,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "ngr\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
-}
-
-def AND32rm : RXI<0x54, (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "n\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def AND32rmy : RXYI<0xE354, (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "ny\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-def AND64rm : RXYI<0xE360, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "ng\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-
-def AND32rill16 : RII<0xA57,
- (outs GR32:$dst), (ins GR32:$src1, u16imm:$src2),
- "nill\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, i32ll16c:$src2))]>;
-def AND64rill16 : RII<0xA57,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "nill\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64ll16c:$src2))]>;
-
-def AND32rilh16 : RII<0xA56,
- (outs GR32:$dst), (ins GR32:$src1, u16imm:$src2),
- "nilh\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, i32lh16c:$src2))]>;
-def AND64rilh16 : RII<0xA56,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "nilh\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64lh16c:$src2))]>;
-
-def AND64rihl16 : RII<0xA55,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "nihl\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64hl16c:$src2))]>;
-def AND64rihh16 : RII<0xA54,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "nihh\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64hh16c:$src2))]>;
-
-def AND32ri : RILI<0xC0B,
- (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
- "nilf\t{$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
-def AND64rilo32 : RILI<0xC0B,
- (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
- "nilf\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64lo32c:$src2))]>;
-def AND64rihi32 : RILI<0xC0A,
- (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
- "nihf\t{$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64hi32c:$src2))]>;
-
-let isCommutable = 1 in { // X = OR Y, Z == X = OR Z, Y
-def OR32rr : RRI<0x16,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "or\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
-def OR64rr : RREI<0xB981,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "ogr\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
-}
-
-def OR32rm : RXI<0x56, (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "o\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def OR32rmy : RXYI<0xE356, (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "oy\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-def OR64rm : RXYI<0xE381, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "og\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-
- // FIXME: Provide proper encoding!
-def OR32ri16 : RII<0xA5B,
- (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
- "oill\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, i32ll16:$src2))]>;
-def OR32ri16h : RII<0xA5A,
- (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
- "oilh\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, i32lh16:$src2))]>;
-def OR32ri : RILI<0xC0D,
- (outs GR32:$dst), (ins GR32:$src1, u32imm:$src2),
- "oilf\t{$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
-
-def OR64rill16 : RII<0xA5B,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "oill\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64ll16:$src2))]>;
-def OR64rilh16 : RII<0xA5A,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "oilh\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64lh16:$src2))]>;
-def OR64rihl16 : RII<0xA59,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "oihl\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64hl16:$src2))]>;
-def OR64rihh16 : RII<0xA58,
- (outs GR64:$dst), (ins GR64:$src1, u16imm:$src2),
- "oihh\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64hh16:$src2))]>;
-
-def OR64rilo32 : RILI<0xC0D,
- (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
- "oilf\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64lo32:$src2))]>;
-def OR64rihi32 : RILI<0xC0C,
- (outs GR64:$dst), (ins GR64:$src1, u32imm:$src2),
- "oihf\t{$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64hi32:$src2))]>;
-
-def SUB32rr : RRI<0x1B,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "sr\t{$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
-def SUB64rr : RREI<0xB909,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "sgr\t{$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
-
-def SUB32rm : RXI<0x5B, (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "s\t{$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def SUB32rmy : RXYI<0xE35B, (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "sy\t{$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-def SUB64rm : RXYI<0xE309, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "sg\t{$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-
-def SBC32rr : RRI<0x1F,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "slr\t{$dst, $src2}",
- [(set GR32:$dst, (subc GR32:$src1, GR32:$src2))]>;
-def SBC64rr : RREI<0xB90B,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "slgr\t{$dst, $src2}",
- [(set GR64:$dst, (subc GR64:$src1, GR64:$src2))]>;
-
-def SBC32ri : RILI<0xC25,
- (outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
- "sllfi\t{$dst, $src2}",
- [(set GR32:$dst, (subc GR32:$src1, imm:$src2))]>;
-def SBC64ri32 : RILI<0xC24,
- (outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
- "slgfi\t{$dst, $src2}",
- [(set GR64:$dst, (subc GR64:$src1, immSExt32:$src2))]>;
-
-let Uses = [PSW] in {
-def SUBE32rr : RREI<0xB999, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "slbr\t{$dst, $src2}",
- [(set GR32:$dst, (sube GR32:$src1, GR32:$src2)),
- (implicit PSW)]>;
-def SUBE64rr : RREI<0xB989, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "slbgr\t{$dst, $src2}",
- [(set GR64:$dst, (sube GR64:$src1, GR64:$src2)),
- (implicit PSW)]>;
-}
-
-let isCommutable = 1 in { // X = XOR Y, Z == X = XOR Z, Y
-def XOR32rr : RRI<0x17,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "xr\t{$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
-def XOR64rr : RREI<0xB982,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "xgr\t{$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
-}
-
-def XOR32rm : RXI<0x57,(outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "x\t{$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, (load rriaddr12:$src2))),
- (implicit PSW)]>;
-def XOR32rmy : RXYI<0xE357, (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "xy\t{$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-def XOR64rm : RXYI<0xE382, (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "xg\t{$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, (load rriaddr:$src2))),
- (implicit PSW)]>;
-
-def XOR32ri : RILI<0xC07,
- (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
- "xilf\t{$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
-
-} // Defs = [PSW]
-
-let isCommutable = 1 in { // X = MUL Y, Z == X = MUL Z, Y
-def MUL32rr : RREI<0xB252,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "msr\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>;
-def MUL64rr : RREI<0xB90C,
- (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
- "msgr\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>;
-}
-
-def MUL64rrP : RRI<0x1C,
- (outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
- "mr\t{$dst, $src2}",
- []>;
-def UMUL64rrP : RREI<0xB996,
- (outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
- "mlr\t{$dst, $src2}",
- []>;
-def UMUL128rrP : RREI<0xB986,
- (outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
- "mlgr\t{$dst, $src2}",
- []>;
-
-def MUL32ri16 : RII<0xA7C,
- (outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
- "mhi\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, i32immSExt16:$src2))]>;
-def MUL64ri16 : RII<0xA7D,
- (outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
- "mghi\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, immSExt16:$src2))]>;
-
-let AddedComplexity = 2 in {
-def MUL32ri : RILI<0xC21,
- (outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
- "msfi\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>,
- Requires<[IsZ10]>;
-def MUL64ri32 : RILI<0xC20,
- (outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
- "msgfi\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>,
- Requires<[IsZ10]>;
-}
-
-def MUL32rm : RXI<0x71,
- (outs GR32:$dst), (ins GR32:$src1, rriaddr12:$src2),
- "ms\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load rriaddr12:$src2)))]>;
-def MUL32rmy : RXYI<0xE351,
- (outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
- "msy\t{$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load rriaddr:$src2)))]>;
-def MUL64rm : RXYI<0xE30C,
- (outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
- "msg\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load rriaddr:$src2)))]>;
-
-def MULSX64rr32 : RREI<0xB91C,
- (outs GR64:$dst), (ins GR64:$src1, GR32:$src2),
- "msgfr\t{$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (sext GR32:$src2)))]>;
-
-def SDIVREM32r : RREI<0xB91D,
- (outs GR128:$dst), (ins GR128:$src1, GR32:$src2),
- "dsgfr\t{$dst, $src2}",
- []>;
-def SDIVREM64r : RREI<0xB90D,
- (outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
- "dsgr\t{$dst, $src2}",
- []>;
-
-def UDIVREM32r : RREI<0xB997,
- (outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
- "dlr\t{$dst, $src2}",
- []>;
-def UDIVREM64r : RREI<0xB987,
- (outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
- "dlgr\t{$dst, $src2}",
- []>;
-let mayLoad = 1 in {
-def SDIVREM32m : RXYI<0xE31D,
- (outs GR128:$dst), (ins GR128:$src1, rriaddr:$src2),
- "dsgf\t{$dst, $src2}",
- []>;
-def SDIVREM64m : RXYI<0xE30D,
- (outs GR128:$dst), (ins GR128:$src1, rriaddr:$src2),
- "dsg\t{$dst, $src2}",
- []>;
-
-def UDIVREM32m : RXYI<0xE397, (outs GR64P:$dst), (ins GR64P:$src1, rriaddr:$src2),
- "dl\t{$dst, $src2}",
- []>;
-def UDIVREM64m : RXYI<0xE387, (outs GR128:$dst), (ins GR128:$src1, rriaddr:$src2),
- "dlg\t{$dst, $src2}",
- []>;
-} // mayLoad
-} // Constraints = "$src1 = $dst"
-
-//===----------------------------------------------------------------------===//
-// Shifts
-
-let Constraints = "$src = $dst" in
-def SRL32rri : RSI<0x88,
- (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
- "srl\t{$src, $amt}",
- [(set GR32:$dst, (srl GR32:$src, riaddr32:$amt))]>;
-def SRL64rri : RSYI<0xEB0C,
- (outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
- "srlg\t{$dst, $src, $amt}",
- [(set GR64:$dst, (srl GR64:$src, riaddr:$amt))]>;
-
-let Constraints = "$src = $dst" in
-def SHL32rri : RSI<0x89,
- (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
- "sll\t{$src, $amt}",
- [(set GR32:$dst, (shl GR32:$src, riaddr32:$amt))]>;
-def SHL64rri : RSYI<0xEB0D,
- (outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
- "sllg\t{$dst, $src, $amt}",
- [(set GR64:$dst, (shl GR64:$src, riaddr:$amt))]>;
-
-let Defs = [PSW] in {
-let Constraints = "$src = $dst" in
-def SRA32rri : RSI<0x8A,
- (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
- "sra\t{$src, $amt}",
- [(set GR32:$dst, (sra GR32:$src, riaddr32:$amt)),
- (implicit PSW)]>;
-
-def SRA64rri : RSYI<0xEB0A,
- (outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
- "srag\t{$dst, $src, $amt}",
- [(set GR64:$dst, (sra GR64:$src, riaddr:$amt)),
- (implicit PSW)]>;
-} // Defs = [PSW]
-
-def ROTL32rri : RSYI<0xEB1D,
- (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
- "rll\t{$dst, $src, $amt}",
- [(set GR32:$dst, (rotl GR32:$src, riaddr32:$amt))]>;
-def ROTL64rri : RSYI<0xEB1C,
- (outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
- "rllg\t{$dst, $src, $amt}",
- [(set GR64:$dst, (rotl GR64:$src, riaddr:$amt))]>;
-
-//===----------------------------------------------------------------------===//
-// Test instructions (like AND but do not produce any result)
-
-// Integer comparisons
-let Defs = [PSW] in {
-def CMP32rr : RRI<0x19,
- (outs), (ins GR32:$src1, GR32:$src2),
- "cr\t$src1, $src2",
- [(set PSW, (SystemZcmp GR32:$src1, GR32:$src2))]>;
-def CMP64rr : RREI<0xB920,
- (outs), (ins GR64:$src1, GR64:$src2),
- "cgr\t$src1, $src2",
- [(set PSW, (SystemZcmp GR64:$src1, GR64:$src2))]>;
-
-def CMP32ri : RILI<0xC2D,
- (outs), (ins GR32:$src1, s32imm:$src2),
- "cfi\t$src1, $src2",
- [(set PSW, (SystemZcmp GR32:$src1, imm:$src2))]>;
-def CMP64ri32 : RILI<0xC2C,
- (outs), (ins GR64:$src1, s32imm64:$src2),
- "cgfi\t$src1, $src2",
- [(set PSW, (SystemZcmp GR64:$src1, i64immSExt32:$src2))]>;
-
-def CMP32rm : RXI<0x59,
- (outs), (ins GR32:$src1, rriaddr12:$src2),
- "c\t$src1, $src2",
- [(set PSW, (SystemZcmp GR32:$src1, (load rriaddr12:$src2)))]>;
-def CMP32rmy : RXYI<0xE359,
- (outs), (ins GR32:$src1, rriaddr:$src2),
- "cy\t$src1, $src2",
- [(set PSW, (SystemZcmp GR32:$src1, (load rriaddr:$src2)))]>;
-def CMP64rm : RXYI<0xE320,
- (outs), (ins GR64:$src1, rriaddr:$src2),
- "cg\t$src1, $src2",
- [(set PSW, (SystemZcmp GR64:$src1, (load rriaddr:$src2)))]>;
-
-def UCMP32rr : RRI<0x15,
- (outs), (ins GR32:$src1, GR32:$src2),
- "clr\t$src1, $src2",
- [(set PSW, (SystemZucmp GR32:$src1, GR32:$src2))]>;
-def UCMP64rr : RREI<0xB921,
- (outs), (ins GR64:$src1, GR64:$src2),
- "clgr\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1, GR64:$src2))]>;
-
-def UCMP32ri : RILI<0xC2F,
- (outs), (ins GR32:$src1, i32imm:$src2),
- "clfi\t$src1, $src2",
- [(set PSW, (SystemZucmp GR32:$src1, imm:$src2))]>;
-def UCMP64ri32 : RILI<0xC2E,
- (outs), (ins GR64:$src1, i64i32imm:$src2),
- "clgfi\t$src1, $src2",
- [(set PSW,(SystemZucmp GR64:$src1, i64immZExt32:$src2))]>;
-
-def UCMP32rm : RXI<0x55,
- (outs), (ins GR32:$src1, rriaddr12:$src2),
- "cl\t$src1, $src2",
- [(set PSW, (SystemZucmp GR32:$src1,
- (load rriaddr12:$src2)))]>;
-def UCMP32rmy : RXYI<0xE355,
- (outs), (ins GR32:$src1, rriaddr:$src2),
- "cly\t$src1, $src2",
- [(set PSW, (SystemZucmp GR32:$src1,
- (load rriaddr:$src2)))]>;
-def UCMP64rm : RXYI<0xE351,
- (outs), (ins GR64:$src1, rriaddr:$src2),
- "clg\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1,
- (load rriaddr:$src2)))]>;
-
-def CMPSX64rr32 : RREI<0xB930,
- (outs), (ins GR64:$src1, GR32:$src2),
- "cgfr\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1,
- (sext GR32:$src2)))]>;
-def UCMPZX64rr32 : RREI<0xB931,
- (outs), (ins GR64:$src1, GR32:$src2),
- "clgfr\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1,
- (zext GR32:$src2)))]>;
-
-def CMPSX64rm32 : RXYI<0xE330,
- (outs), (ins GR64:$src1, rriaddr:$src2),
- "cgf\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1,
- (sextloadi64i32 rriaddr:$src2)))]>;
-def UCMPZX64rm32 : RXYI<0xE331,
- (outs), (ins GR64:$src1, rriaddr:$src2),
- "clgf\t$src1, $src2",
- [(set PSW, (SystemZucmp GR64:$src1,
- (zextloadi64i32 rriaddr:$src2)))]>;
-
-// FIXME: Add other crazy ucmp forms
-
-} // Defs = [PSW]
-
-//===----------------------------------------------------------------------===//
-// Other crazy stuff
-let Defs = [PSW] in {
-def FLOGR64 : RREI<0xB983,
- (outs GR128:$dst), (ins GR64:$src),
- "flogr\t{$dst, $src}",
- []>;
-} // Defs = [PSW]
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns.
-//===----------------------------------------------------------------------===//
-
-// ConstPools, JumpTables
-def : Pat<(SystemZpcrelwrapper tjumptable:$src), (LA64rm tjumptable:$src)>;
-def : Pat<(SystemZpcrelwrapper tconstpool:$src), (LA64rm tconstpool:$src)>;
-
-// anyext
-def : Pat<(i64 (anyext GR32:$src)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_32bit)>;
-
-// calls
-def : Pat<(SystemZcall (i64 tglobaladdr:$dst)), (CALLi tglobaladdr:$dst)>;
-def : Pat<(SystemZcall (i64 texternalsym:$dst)), (CALLi texternalsym:$dst)>;
-
-//===----------------------------------------------------------------------===//
-// Peepholes.
-//===----------------------------------------------------------------------===//
-
-// FIXME: use add/sub tricks with 32678/-32768
-
-// Arbitrary immediate support.
-def : Pat<(i32 imm:$src),
- (EXTRACT_SUBREG (MOV64ri32 (GetI64FromI32 (i32 imm:$src))),
- subreg_32bit)>;
-
-// Implement in terms of LLIHF/OILF.
-def : Pat<(i64 imm:$imm),
- (OR64rilo32 (MOV64rihi32 (HI32 imm:$imm)), (LO32 imm:$imm))>;
-
-// trunc patterns
-def : Pat<(i32 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, subreg_32bit)>;
-
-// sext_inreg patterns
-def : Pat<(sext_inreg GR64:$src, i32),
- (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
-
-// extload patterns
-def : Pat<(extloadi32i8 rriaddr:$src), (MOVZX32rm8 rriaddr:$src)>;
-def : Pat<(extloadi32i16 rriaddr:$src), (MOVZX32rm16 rriaddr:$src)>;
-def : Pat<(extloadi64i8 rriaddr:$src), (MOVZX64rm8 rriaddr:$src)>;
-def : Pat<(extloadi64i16 rriaddr:$src), (MOVZX64rm16 rriaddr:$src)>;
-def : Pat<(extloadi64i32 rriaddr:$src), (MOVZX64rm32 rriaddr:$src)>;
-
-// muls
-def : Pat<(mulhs GR32:$src1, GR32:$src2),
- (EXTRACT_SUBREG (MUL64rrP (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
- GR32:$src1, subreg_odd32),
- GR32:$src2),
- subreg_32bit)>;
-
-def : Pat<(mulhu GR32:$src1, GR32:$src2),
- (EXTRACT_SUBREG (UMUL64rrP (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
- GR32:$src1, subreg_odd32),
- GR32:$src2),
- subreg_32bit)>;
-def : Pat<(mulhu GR64:$src1, GR64:$src2),
- (EXTRACT_SUBREG (UMUL128rrP (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
- GR64:$src1, subreg_odd),
- GR64:$src2),
- subreg_even)>;
-
-def : Pat<(ctlz GR64:$src),
- (EXTRACT_SUBREG (FLOGR64 GR64:$src), subreg_even)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
deleted file mode 100644
index fd6e330..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//==- SystemZMachineFuctionInfo.h - SystemZ machine function info -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares SystemZ-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SYSTEMZMACHINEFUNCTIONINFO_H
-#define SYSTEMZMACHINEFUNCTIONINFO_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-
-namespace llvm {
-
-/// SystemZMachineFunctionInfo - This class is derived from MachineFunction and
-/// contains private SystemZ target-specific information for each MachineFunction.
-class SystemZMachineFunctionInfo : public MachineFunctionInfo {
- /// CalleeSavedFrameSize - Size of the callee-saved register portion of the
- /// stack frame in bytes.
- unsigned CalleeSavedFrameSize;
-
- /// LowReg - Low register of range of callee-saved registers to store.
- unsigned LowReg;
-
- /// HighReg - High register of range of callee-saved registers to store.
- unsigned HighReg;
-public:
- SystemZMachineFunctionInfo() : CalleeSavedFrameSize(0) {}
-
- explicit SystemZMachineFunctionInfo(MachineFunction &MF)
- : CalleeSavedFrameSize(0) {}
-
- unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
- void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; }
-
- unsigned getLowReg() const { return LowReg; }
- void setLowReg(unsigned Reg) { LowReg = Reg; }
-
- unsigned getHighReg() const { return HighReg; }
- void setHighReg(unsigned Reg) { HighReg = Reg; }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
deleted file mode 100644
index 8b835cc..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
+++ /dev/null
@@ -1,325 +0,0 @@
-//=====- SystemZOperands.td - SystemZ Operands defs ---------*- tblgen-*-=====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the various SystemZ instruction operands.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Instruction Pattern Stuff.
-//===----------------------------------------------------------------------===//
-
-// SystemZ specific condition code. These correspond to CondCode in
-// SystemZ.h. They must be kept in synch.
-def SYSTEMZ_COND_O : PatLeaf<(i8 0)>;
-def SYSTEMZ_COND_H : PatLeaf<(i8 1)>;
-def SYSTEMZ_COND_NLE : PatLeaf<(i8 2)>;
-def SYSTEMZ_COND_L : PatLeaf<(i8 3)>;
-def SYSTEMZ_COND_NHE : PatLeaf<(i8 4)>;
-def SYSTEMZ_COND_LH : PatLeaf<(i8 5)>;
-def SYSTEMZ_COND_NE : PatLeaf<(i8 6)>;
-def SYSTEMZ_COND_E : PatLeaf<(i8 7)>;
-def SYSTEMZ_COND_NLH : PatLeaf<(i8 8)>;
-def SYSTEMZ_COND_HE : PatLeaf<(i8 9)>;
-def SYSTEMZ_COND_NL : PatLeaf<(i8 10)>;
-def SYSTEMZ_COND_LE : PatLeaf<(i8 11)>;
-def SYSTEMZ_COND_NH : PatLeaf<(i8 12)>;
-def SYSTEMZ_COND_NO : PatLeaf<(i8 13)>;
-
-def LO8 : SDNodeXForm<imm, [{
- // Transformation function: return low 8 bits.
- return getI8Imm(N->getZExtValue() & 0x00000000000000FFULL);
-}]>;
-
-def LL16 : SDNodeXForm<imm, [{
- // Transformation function: return low 16 bits.
- return getI16Imm(N->getZExtValue() & 0x000000000000FFFFULL);
-}]>;
-
-def LH16 : SDNodeXForm<imm, [{
- // Transformation function: return bits 16-31.
- return getI16Imm((N->getZExtValue() & 0x00000000FFFF0000ULL) >> 16);
-}]>;
-
-def HL16 : SDNodeXForm<imm, [{
- // Transformation function: return bits 32-47.
- return getI16Imm((N->getZExtValue() & 0x0000FFFF00000000ULL) >> 32);
-}]>;
-
-def HH16 : SDNodeXForm<imm, [{
- // Transformation function: return bits 48-63.
- return getI16Imm((N->getZExtValue() & 0xFFFF000000000000ULL) >> 48);
-}]>;
-
-def LO32 : SDNodeXForm<imm, [{
- // Transformation function: return low 32 bits.
- return getI32Imm(N->getZExtValue() & 0x00000000FFFFFFFFULL);
-}]>;
-
-def HI32 : SDNodeXForm<imm, [{
- // Transformation function: return bits 32-63.
- return getI32Imm(N->getZExtValue() >> 32);
-}]>;
-
-def GetI64FromI32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i64);
-}]>;
-
-def i32ll16 : PatLeaf<(i32 imm), [{
- // i32ll16 predicate - true if the 32-bit immediate has only rightmost 16
- // bits set.
- return ((N->getZExtValue() & 0x000000000000FFFFULL) == N->getZExtValue());
-}], LL16>;
-
-def i32lh16 : PatLeaf<(i32 imm), [{
- // i32lh16 predicate - true if the 32-bit immediate has only bits 16-31 set.
- return ((N->getZExtValue() & 0x00000000FFFF0000ULL) == N->getZExtValue());
-}], LH16>;
-
-def i32ll16c : PatLeaf<(i32 imm), [{
- // i32ll16c predicate - true if the 32-bit immediate has all bits 16-31 set.
- return ((N->getZExtValue() | 0x00000000FFFF0000ULL) == N->getZExtValue());
-}], LL16>;
-
-def i32lh16c : PatLeaf<(i32 imm), [{
- // i32lh16c predicate - true if the 32-bit immediate has all rightmost 16
- // bits set.
- return ((N->getZExtValue() | 0x000000000000FFFFULL) == N->getZExtValue());
-}], LH16>;
-
-def i64ll16 : PatLeaf<(i64 imm), [{
- // i64ll16 predicate - true if the 64-bit immediate has only rightmost 16
- // bits set.
- return ((N->getZExtValue() & 0x000000000000FFFFULL) == N->getZExtValue());
-}], LL16>;
-
-def i64lh16 : PatLeaf<(i64 imm), [{
- // i64lh16 predicate - true if the 64-bit immediate has only bits 16-31 set.
- return ((N->getZExtValue() & 0x00000000FFFF0000ULL) == N->getZExtValue());
-}], LH16>;
-
-def i64hl16 : PatLeaf<(i64 imm), [{
- // i64hl16 predicate - true if the 64-bit immediate has only bits 32-47 set.
- return ((N->getZExtValue() & 0x0000FFFF00000000ULL) == N->getZExtValue());
-}], HL16>;
-
-def i64hh16 : PatLeaf<(i64 imm), [{
- // i64hh16 predicate - true if the 64-bit immediate has only bits 48-63 set.
- return ((N->getZExtValue() & 0xFFFF000000000000ULL) == N->getZExtValue());
-}], HH16>;
-
-def i64ll16c : PatLeaf<(i64 imm), [{
- // i64ll16c predicate - true if the 64-bit immediate has only rightmost 16
- // bits set.
- return ((N->getZExtValue() | 0xFFFFFFFFFFFF0000ULL) == N->getZExtValue());
-}], LL16>;
-
-def i64lh16c : PatLeaf<(i64 imm), [{
- // i64lh16c predicate - true if the 64-bit immediate has only bits 16-31 set.
- return ((N->getZExtValue() | 0xFFFFFFFF0000FFFFULL) == N->getZExtValue());
-}], LH16>;
-
-def i64hl16c : PatLeaf<(i64 imm), [{
- // i64hl16c predicate - true if the 64-bit immediate has only bits 32-47 set.
- return ((N->getZExtValue() | 0xFFFF0000FFFFFFFFULL) == N->getZExtValue());
-}], HL16>;
-
-def i64hh16c : PatLeaf<(i64 imm), [{
- // i64hh16c predicate - true if the 64-bit immediate has only bits 48-63 set.
- return ((N->getZExtValue() | 0x0000FFFFFFFFFFFFULL) == N->getZExtValue());
-}], HH16>;
-
-def immSExt16 : PatLeaf<(imm), [{
- // immSExt16 predicate - true if the immediate fits in a 16-bit sign extended
- // field.
- if (N->getValueType(0) == MVT::i64) {
- uint64_t val = N->getZExtValue();
- return ((int64_t)val == (int16_t)val);
- } else if (N->getValueType(0) == MVT::i32) {
- uint32_t val = N->getZExtValue();
- return ((int32_t)val == (int16_t)val);
- }
-
- return false;
-}], LL16>;
-
-def immSExt32 : PatLeaf<(i64 imm), [{
- // immSExt32 predicate - true if the immediate fits in a 32-bit sign extended
- // field.
- uint64_t val = N->getZExtValue();
- return ((int64_t)val == (int32_t)val);
-}], LO32>;
-
-def i64lo32 : PatLeaf<(i64 imm), [{
- // i64lo32 predicate - true if the 64-bit immediate has only rightmost 32
- // bits set.
- return ((N->getZExtValue() & 0x00000000FFFFFFFFULL) == N->getZExtValue());
-}], LO32>;
-
-def i64hi32 : PatLeaf<(i64 imm), [{
- // i64hi32 predicate - true if the 64-bit immediate has only bits 32-63 set.
- return ((N->getZExtValue() & 0xFFFFFFFF00000000ULL) == N->getZExtValue());
-}], HI32>;
-
-def i64lo32c : PatLeaf<(i64 imm), [{
- // i64lo32 predicate - true if the 64-bit immediate has only rightmost 32
- // bits set.
- return ((N->getZExtValue() | 0xFFFFFFFF00000000ULL) == N->getZExtValue());
-}], LO32>;
-
-def i64hi32c : PatLeaf<(i64 imm), [{
- // i64hi32 predicate - true if the 64-bit immediate has only bits 32-63 set.
- return ((N->getZExtValue() | 0x00000000FFFFFFFFULL) == N->getZExtValue());
-}], HI32>;
-
-def i32immSExt8 : PatLeaf<(i32 imm), [{
- // i32immSExt8 predicate - True if the 32-bit immediate fits in a 8-bit
- // sign extended field.
- return (int32_t)N->getZExtValue() == (int8_t)N->getZExtValue();
-}], LO8>;
-
-def i32immSExt16 : PatLeaf<(i32 imm), [{
- // i32immSExt16 predicate - True if the 32-bit immediate fits in a 16-bit
- // sign extended field.
- return (int32_t)N->getZExtValue() == (int16_t)N->getZExtValue();
-}], LL16>;
-
-def i64immSExt32 : PatLeaf<(i64 imm), [{
- // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // sign extended field.
- return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
-}], LO32>;
-
-def i64immZExt32 : PatLeaf<(i64 imm), [{
- // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // zero extended field.
- return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
-}], LO32>;
-
-// extloads
-def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
-def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
-def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
-def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
-def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
-
-def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
-def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
-def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
-def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
-def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
-
-def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
-def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
-def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
-def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
-def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
-
-// A couple of more descriptive operand definitions.
-// 32-bits but only 8 bits are significant.
-def i32i8imm : Operand<i32>;
-// 32-bits but only 16 bits are significant.
-def i32i16imm : Operand<i32>;
-// 64-bits but only 32 bits are significant.
-def i64i32imm : Operand<i64>;
-// Branch targets have OtherVT type.
-def brtarget : Operand<OtherVT>;
-
-// Unsigned i12
-def u12imm : Operand<i32> {
- let PrintMethod = "printU12ImmOperand";
-}
-def u12imm64 : Operand<i64> {
- let PrintMethod = "printU12ImmOperand";
-}
-
-// Signed i16
-def s16imm : Operand<i32> {
- let PrintMethod = "printS16ImmOperand";
-}
-def s16imm64 : Operand<i64> {
- let PrintMethod = "printS16ImmOperand";
-}
-// Unsigned i16
-def u16imm : Operand<i32> {
- let PrintMethod = "printU16ImmOperand";
-}
-def u16imm64 : Operand<i64> {
- let PrintMethod = "printU16ImmOperand";
-}
-
-// Signed i20
-def s20imm : Operand<i32> {
- let PrintMethod = "printS20ImmOperand";
-}
-def s20imm64 : Operand<i64> {
- let PrintMethod = "printS20ImmOperand";
-}
-// Signed i32
-def s32imm : Operand<i32> {
- let PrintMethod = "printS32ImmOperand";
-}
-def s32imm64 : Operand<i64> {
- let PrintMethod = "printS32ImmOperand";
-}
-// Unsigned i32
-def u32imm : Operand<i32> {
- let PrintMethod = "printU32ImmOperand";
-}
-def u32imm64 : Operand<i64> {
- let PrintMethod = "printU32ImmOperand";
-}
-
-def imm_pcrel : Operand<i64> {
- let PrintMethod = "printPCRelImmOperand";
-}
-
-//===----------------------------------------------------------------------===//
-// SystemZ Operand Definitions.
-//===----------------------------------------------------------------------===//
-
-// Address operands
-
-// riaddr := reg + imm
-def riaddr32 : Operand<i64>,
- ComplexPattern<i64, 2, "SelectAddrRI12Only", []> {
- let PrintMethod = "printRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, u12imm:$disp);
-}
-
-def riaddr12 : Operand<i64>,
- ComplexPattern<i64, 2, "SelectAddrRI12", []> {
- let PrintMethod = "printRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, u12imm64:$disp);
-}
-
-def riaddr : Operand<i64>,
- ComplexPattern<i64, 2, "SelectAddrRI", []> {
- let PrintMethod = "printRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, s20imm64:$disp);
-}
-
-//===----------------------------------------------------------------------===//
-
-// rriaddr := reg + reg + imm
-def rriaddr12 : Operand<i64>,
- ComplexPattern<i64, 3, "SelectAddrRRI12", [], []> {
- let PrintMethod = "printRRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, u12imm64:$disp, ADDR64:$index);
-}
-def rriaddr : Operand<i64>,
- ComplexPattern<i64, 3, "SelectAddrRRI20", [], []> {
- let PrintMethod = "printRRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, s20imm64:$disp, ADDR64:$index);
-}
-def laaddr : Operand<i64>,
- ComplexPattern<i64, 3, "SelectLAAddr", [add, sub, or, frameindex], []> {
- let PrintMethod = "printRRIAddrOperand";
- let MIOperandInfo = (ops ADDR64:$base, s20imm64:$disp, ADDR64:$index);
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
deleted file mode 100644
index b1050d4..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-//===- SystemZRegisterInfo.cpp - SystemZ Register Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the SystemZ implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZ.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZMachineFunctionInfo.h"
-#include "SystemZRegisterInfo.h"
-#include "SystemZSubtarget.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/BitVector.h"
-
-#define GET_REGINFO_TARGET_DESC
-#include "SystemZGenRegisterInfo.inc"
-
-using namespace llvm;
-
-SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm,
- const SystemZInstrInfo &tii)
- : SystemZGenRegisterInfo(0), TM(tm), TII(tii) {
-}
-
-const unsigned*
-SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const unsigned CalleeSavedRegs[] = {
- SystemZ::R6D, SystemZ::R7D, SystemZ::R8D, SystemZ::R9D,
- SystemZ::R10D, SystemZ::R11D, SystemZ::R12D, SystemZ::R13D,
- SystemZ::R14D, SystemZ::R15D,
- SystemZ::F8L, SystemZ::F9L, SystemZ::F10L, SystemZ::F11L,
- SystemZ::F12L, SystemZ::F13L, SystemZ::F14L, SystemZ::F15L,
- 0
- };
-
- return CalleeSavedRegs;
-}
-
-BitVector SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
- BitVector Reserved(getNumRegs());
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- if (TFI->hasFP(MF)) {
- // R11D is the frame pointer. Reserve all aliases.
- Reserved.set(SystemZ::R11D);
- Reserved.set(SystemZ::R11W);
- Reserved.set(SystemZ::R10P);
- Reserved.set(SystemZ::R10Q);
- }
-
- Reserved.set(SystemZ::R14D);
- Reserved.set(SystemZ::R15D);
- Reserved.set(SystemZ::R14W);
- Reserved.set(SystemZ::R15W);
- Reserved.set(SystemZ::R14P);
- Reserved.set(SystemZ::R14Q);
- return Reserved;
-}
-
-const TargetRegisterClass*
-SystemZRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B,
- unsigned Idx) const {
- switch(Idx) {
- // Exact sub-classes don't exist for the other sub-register indexes.
- default: return 0;
- case SystemZ::subreg_32bit:
- if (B == SystemZ::ADDR32RegisterClass)
- return A->getSize() == 8 ? SystemZ::ADDR64RegisterClass : 0;
- return A;
- }
-}
-
-void SystemZRegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- MBB.erase(I);
-}
-
-void
-SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS) const {
- assert(SPAdj == 0 && "Unxpected");
-
- unsigned i = 0;
- MachineInstr &MI = *II;
- MachineFunction &MF = *MI.getParent()->getParent();
- const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
-
- while (!MI.getOperand(i).isFI()) {
- ++i;
- assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
- }
-
- int FrameIndex = MI.getOperand(i).getIndex();
-
- unsigned BasePtr = (TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D);
-
- // This must be part of a rri or ri operand memory reference. Replace the
- // FrameIndex with base register with BasePtr. Add an offset to the
- // displacement field.
- MI.getOperand(i).ChangeToRegister(BasePtr, false);
-
- // Offset is a either 12-bit unsigned or 20-bit signed integer.
- // FIXME: handle "too long" displacements.
- int Offset =
- TFI->getFrameIndexOffset(MF, FrameIndex) + MI.getOperand(i+1).getImm();
-
- // Check whether displacement is too long to fit into 12 bit zext field.
- MI.setDesc(TII.getMemoryInstr(MI.getOpcode(), Offset));
-
- MI.getOperand(i+1).ChangeToImmediate(Offset);
-}
-
-unsigned
-SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- assert(0 && "What is the frame register");
- return 0;
-}
-
-unsigned SystemZRegisterInfo::getEHExceptionRegister() const {
- assert(0 && "What is the exception register");
- return 0;
-}
-
-unsigned SystemZRegisterInfo::getEHHandlerRegister() const {
- assert(0 && "What is the exception handler register");
- return 0;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
deleted file mode 100644
index 03935b2..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- SystemZRegisterInfo.h - SystemZ Register Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the SystemZ implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SystemZREGISTERINFO_H
-#define SystemZREGISTERINFO_H
-
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#include "SystemZGenRegisterInfo.inc"
-
-namespace llvm {
-
-class SystemZSubtarget;
-class SystemZInstrInfo;
-class Type;
-
-struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
- SystemZTargetMachine &TM;
- const SystemZInstrInfo &TII;
-
- SystemZRegisterInfo(SystemZTargetMachine &tm, const SystemZInstrInfo &tii);
-
- /// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
-
- const TargetRegisterClass*
- getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- void eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, RegScavenger *RS = NULL) const;
-
- // Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
deleted file mode 100644
index a24cbcf..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ /dev/null
@@ -1,205 +0,0 @@
-//===- SystemZRegisterInfo.td - The PowerPC Register File ------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-class SystemZReg<string n> : Register<n> {
- let Namespace = "SystemZ";
-}
-
-class SystemZRegWithSubregs<string n, list<Register> subregs>
- : RegisterWithSubRegs<n, subregs> {
- let Namespace = "SystemZ";
-}
-
-// We identify all our registers with a 4-bit ID, for consistency's sake.
-
-// GPR32 - Lower 32 bits of one of the 16 64-bit general-purpose registers
-class GPR32<bits<4> num, string n> : SystemZReg<n> {
- field bits<4> Num = num;
-}
-
-// GPR64 - One of the 16 64-bit general-purpose registers
-class GPR64<bits<4> num, string n, list<Register> subregs,
- list<Register> aliases = []>
- : SystemZRegWithSubregs<n, subregs> {
- field bits<4> Num = num;
- let Aliases = aliases;
-}
-
-// GPR128 - 8 even-odd register pairs
-class GPR128<bits<4> num, string n, list<Register> subregs,
- list<Register> aliases = []>
- : SystemZRegWithSubregs<n, subregs> {
- field bits<4> Num = num;
- let Aliases = aliases;
-}
-
-// FPRS - Lower 32 bits of one of the 16 64-bit floating-point registers
-class FPRS<bits<4> num, string n> : SystemZReg<n> {
- field bits<4> Num = num;
-}
-
-// FPRL - One of the 16 64-bit floating-point registers
-class FPRL<bits<4> num, string n, list<Register> subregs>
- : SystemZRegWithSubregs<n, subregs> {
- field bits<4> Num = num;
-}
-
-let Namespace = "SystemZ" in {
-def subreg_32bit : SubRegIndex;
-def subreg_odd32 : SubRegIndex;
-def subreg_even : SubRegIndex;
-def subreg_odd : SubRegIndex;
-}
-
-// General-purpose registers
-def R0W : GPR32< 0, "r0">;
-def R1W : GPR32< 1, "r1">;
-def R2W : GPR32< 2, "r2">;
-def R3W : GPR32< 3, "r3">;
-def R4W : GPR32< 4, "r4">;
-def R5W : GPR32< 5, "r5">;
-def R6W : GPR32< 6, "r6">;
-def R7W : GPR32< 7, "r7">;
-def R8W : GPR32< 8, "r8">;
-def R9W : GPR32< 9, "r9">;
-def R10W : GPR32<10, "r10">;
-def R11W : GPR32<11, "r11">;
-def R12W : GPR32<12, "r12">;
-def R13W : GPR32<13, "r13">;
-def R14W : GPR32<14, "r14">;
-def R15W : GPR32<15, "r15">;
-
-let SubRegIndices = [subreg_32bit] in {
-def R0D : GPR64< 0, "r0", [R0W]>, DwarfRegNum<[0]>;
-def R1D : GPR64< 1, "r1", [R1W]>, DwarfRegNum<[1]>;
-def R2D : GPR64< 2, "r2", [R2W]>, DwarfRegNum<[2]>;
-def R3D : GPR64< 3, "r3", [R3W]>, DwarfRegNum<[3]>;
-def R4D : GPR64< 4, "r4", [R4W]>, DwarfRegNum<[4]>;
-def R5D : GPR64< 5, "r5", [R5W]>, DwarfRegNum<[5]>;
-def R6D : GPR64< 6, "r6", [R6W]>, DwarfRegNum<[6]>;
-def R7D : GPR64< 7, "r7", [R7W]>, DwarfRegNum<[7]>;
-def R8D : GPR64< 8, "r8", [R8W]>, DwarfRegNum<[8]>;
-def R9D : GPR64< 9, "r9", [R9W]>, DwarfRegNum<[9]>;
-def R10D : GPR64<10, "r10", [R10W]>, DwarfRegNum<[10]>;
-def R11D : GPR64<11, "r11", [R11W]>, DwarfRegNum<[11]>;
-def R12D : GPR64<12, "r12", [R12W]>, DwarfRegNum<[12]>;
-def R13D : GPR64<13, "r13", [R13W]>, DwarfRegNum<[13]>;
-def R14D : GPR64<14, "r14", [R14W]>, DwarfRegNum<[14]>;
-def R15D : GPR64<15, "r15", [R15W]>, DwarfRegNum<[15]>;
-}
-
-// Register pairs
-let SubRegIndices = [subreg_32bit, subreg_odd32] in {
-def R0P : GPR64< 0, "r0", [R0W, R1W], [R0D, R1D]>;
-def R2P : GPR64< 2, "r2", [R2W, R3W], [R2D, R3D]>;
-def R4P : GPR64< 4, "r4", [R4W, R5W], [R4D, R5D]>;
-def R6P : GPR64< 6, "r6", [R6W, R7W], [R6D, R7D]>;
-def R8P : GPR64< 8, "r8", [R8W, R9W], [R8D, R9D]>;
-def R10P : GPR64<10, "r10", [R10W, R11W], [R10D, R11D]>;
-def R12P : GPR64<12, "r12", [R12W, R13W], [R12D, R13D]>;
-def R14P : GPR64<14, "r14", [R14W, R15W], [R14D, R15D]>;
-}
-
-let SubRegIndices = [subreg_even, subreg_odd],
- CompositeIndices = [(subreg_odd32 subreg_odd, subreg_32bit)] in {
-def R0Q : GPR128< 0, "r0", [R0D, R1D], [R0P]>;
-def R2Q : GPR128< 2, "r2", [R2D, R3D], [R2P]>;
-def R4Q : GPR128< 4, "r4", [R4D, R5D], [R4P]>;
-def R6Q : GPR128< 6, "r6", [R6D, R7D], [R6P]>;
-def R8Q : GPR128< 8, "r8", [R8D, R9D], [R8P]>;
-def R10Q : GPR128<10, "r10", [R10D, R11D], [R10P]>;
-def R12Q : GPR128<12, "r12", [R12D, R13D], [R12P]>;
-def R14Q : GPR128<14, "r14", [R14D, R15D], [R14P]>;
-}
-
-// Floating-point registers
-def F0S : FPRS< 0, "f0">, DwarfRegNum<[16]>;
-def F1S : FPRS< 1, "f1">, DwarfRegNum<[17]>;
-def F2S : FPRS< 2, "f2">, DwarfRegNum<[18]>;
-def F3S : FPRS< 3, "f3">, DwarfRegNum<[19]>;
-def F4S : FPRS< 4, "f4">, DwarfRegNum<[20]>;
-def F5S : FPRS< 5, "f5">, DwarfRegNum<[21]>;
-def F6S : FPRS< 6, "f6">, DwarfRegNum<[22]>;
-def F7S : FPRS< 7, "f7">, DwarfRegNum<[23]>;
-def F8S : FPRS< 8, "f8">, DwarfRegNum<[24]>;
-def F9S : FPRS< 9, "f9">, DwarfRegNum<[25]>;
-def F10S : FPRS<10, "f10">, DwarfRegNum<[26]>;
-def F11S : FPRS<11, "f11">, DwarfRegNum<[27]>;
-def F12S : FPRS<12, "f12">, DwarfRegNum<[28]>;
-def F13S : FPRS<13, "f13">, DwarfRegNum<[29]>;
-def F14S : FPRS<14, "f14">, DwarfRegNum<[30]>;
-def F15S : FPRS<15, "f15">, DwarfRegNum<[31]>;
-
-let SubRegIndices = [subreg_32bit] in {
-def F0L : FPRL< 0, "f0", [F0S]>;
-def F1L : FPRL< 1, "f1", [F1S]>;
-def F2L : FPRL< 2, "f2", [F2S]>;
-def F3L : FPRL< 3, "f3", [F3S]>;
-def F4L : FPRL< 4, "f4", [F4S]>;
-def F5L : FPRL< 5, "f5", [F5S]>;
-def F6L : FPRL< 6, "f6", [F6S]>;
-def F7L : FPRL< 7, "f7", [F7S]>;
-def F8L : FPRL< 8, "f8", [F8S]>;
-def F9L : FPRL< 9, "f9", [F9S]>;
-def F10L : FPRL<10, "f10", [F10S]>;
-def F11L : FPRL<11, "f11", [F11S]>;
-def F12L : FPRL<12, "f12", [F12S]>;
-def F13L : FPRL<13, "f13", [F13S]>;
-def F14L : FPRL<14, "f14", [F14S]>;
-def F15L : FPRL<15, "f15", [F15S]>;
-}
-
-// Status register
-def PSW : SystemZReg<"psw">;
-
-/// Register classes.
-/// Allocate the callee-saved R6-R12 backwards. That way they can be saved
-/// together with R14 and R15 in one prolog instruction.
-def GR32 : RegisterClass<"SystemZ", [i32], 32, (add (sequence "R%uW", 0, 5),
- (sequence "R%uW", 15, 6))>;
-
-/// Registers used to generate address. Everything except R0.
-def ADDR32 : RegisterClass<"SystemZ", [i32], 32, (sub GR32, R0W)>;
-
-def GR64 : RegisterClass<"SystemZ", [i64], 64, (add (sequence "R%uD", 0, 5),
- (sequence "R%uD", 15, 6))> {
- let SubRegClasses = [(GR32 subreg_32bit)];
-}
-
-def ADDR64 : RegisterClass<"SystemZ", [i64], 64, (sub GR64, R0D)> {
- let SubRegClasses = [(ADDR32 subreg_32bit)];
-}
-
-// Even-odd register pairs
-def GR64P : RegisterClass<"SystemZ", [v2i32], 64, (add R0P, R2P, R4P,
- R12P, R10P, R8P, R6P,
- R14P)> {
- let SubRegClasses = [(GR32 subreg_32bit, subreg_odd32)];
-}
-
-def GR128 : RegisterClass<"SystemZ", [v2i64], 128, (add R0Q, R2Q, R4Q,
- R12Q, R10Q, R8Q, R6Q,
- R14Q)> {
- let SubRegClasses = [(GR32 subreg_32bit, subreg_odd32),
- (GR64 subreg_even, subreg_odd)];
-}
-
-def FP32 : RegisterClass<"SystemZ", [f32], 32, (sequence "F%uS", 0, 15)>;
-
-def FP64 : RegisterClass<"SystemZ", [f64], 64, (sequence "F%uL", 0, 15)> {
- let SubRegClasses = [(FP32 subreg_32bit)];
-}
-
-// Status flags registers.
-def CCR : RegisterClass<"SystemZ", [i64], 64, (add PSW)> {
- let CopyCost = -1; // Don't allow copying of status registers.
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
deleted file mode 100644
index 3eabcd2..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- SystemZSelectionDAGInfo.cpp - SystemZ SelectionDAG Info -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SystemZSelectionDAGInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "systemz-selectiondag-info"
-#include "SystemZTargetMachine.h"
-using namespace llvm;
-
-SystemZSelectionDAGInfo::SystemZSelectionDAGInfo(const SystemZTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
-
-SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() {
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
deleted file mode 100644
index 1450401..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- SystemZSelectionDAGInfo.h - SystemZ SelectionDAG Info ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the SystemZ subclass for TargetSelectionDAGInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SYSTEMZSELECTIONDAGINFO_H
-#define SYSTEMZSELECTIONDAGINFO_H
-
-#include "llvm/Target/TargetSelectionDAGInfo.h"
-
-namespace llvm {
-
-class SystemZTargetMachine;
-
-class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
-public:
- explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM);
- ~SystemZSelectionDAGInfo();
-};
-
-}
-
-#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
deleted file mode 100644
index 0845510..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//===- SystemZSubtarget.cpp - SystemZ Subtarget Information -------*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SystemZ specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZSubtarget.h"
-#include "SystemZ.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/TargetRegistry.h"
-
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "SystemZGenSubtargetInfo.inc"
-
-using namespace llvm;
-
-SystemZSubtarget::SystemZSubtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS):
- SystemZGenSubtargetInfo(TT, CPU, FS), HasZ10Insts(false) {
- std::string CPUName = CPU;
- if (CPUName.empty())
- CPUName = "z9";
-
- // Parse features string.
- ParseSubtargetFeatures(CPUName, FS);
-}
-
-/// True if accessing the GV requires an extra load.
-bool SystemZSubtarget::GVRequiresExtraLoad(const GlobalValue* GV,
- const TargetMachine& TM,
- bool isDirectCall) const {
- if (TM.getRelocationModel() == Reloc::PIC_) {
- // Extra load is needed for all externally visible.
- if (isDirectCall)
- return false;
-
- if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
- return false;
-
- return true;
- }
-
- return false;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
deleted file mode 100644
index 55cfd80..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
+++ /dev/null
@@ -1,48 +0,0 @@
-//==-- SystemZSubtarget.h - Define Subtarget for the SystemZ ---*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the SystemZ specific subclass of TargetSubtargetInfo.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_SystemZ_SUBTARGET_H
-#define LLVM_TARGET_SystemZ_SUBTARGET_H
-
-#include "llvm/Target/TargetSubtargetInfo.h"
-#include <string>
-
-#define GET_SUBTARGETINFO_HEADER
-#include "SystemZGenSubtargetInfo.inc"
-
-namespace llvm {
-class GlobalValue;
-class StringRef;
-class TargetMachine;
-
-class SystemZSubtarget : public SystemZGenSubtargetInfo {
- bool HasZ10Insts;
-public:
- /// This constructor initializes the data members to match that
- /// of the specified triple.
- ///
- SystemZSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
-
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
- bool isZ10() const { return HasZ10Insts; }
-
- bool GVRequiresExtraLoad(const GlobalValue* GV, const TargetMachine& TM,
- bool isDirectCall) const;
-};
-} // End llvm namespace
-
-#endif // LLVM_TARGET_SystemZ_SUBTARGET_H
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
deleted file mode 100644
index e390f06..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- SystemZTargetMachine.cpp - Define TargetMachine for SystemZ -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "SystemZTargetMachine.h"
-#include "SystemZ.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/TargetRegistry.h"
-using namespace llvm;
-
-extern "C" void LLVMInitializeSystemZTarget() {
- // Register the target.
- RegisterTargetMachine<SystemZTargetMachine> X(TheSystemZTarget);
-}
-
-/// SystemZTargetMachine ctor - Create an ILP64 architecture model
-///
-SystemZTargetMachine::SystemZTargetMachine(const Target &T,
- StringRef TT, StringRef CPU,
- StringRef FS, Reloc::Model RM,
- CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- Subtarget(TT, CPU, FS),
- DataLayout("E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32"
- "-f64:64:64-f128:128:128-a0:16:16-n32:64"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) {
-}
-
-bool SystemZTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // Install an instruction selector.
- PM.add(createSystemZISelDag(*this, OptLevel));
- return false;
-}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
deleted file mode 100644
index 43dce4b..0000000
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
+++ /dev/null
@@ -1,68 +0,0 @@
-//==- SystemZTargetMachine.h - Define TargetMachine for SystemZ ---*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the SystemZ specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-
-#ifndef LLVM_TARGET_SYSTEMZ_TARGETMACHINE_H
-#define LLVM_TARGET_SYSTEMZ_TARGETMACHINE_H
-
-#include "SystemZInstrInfo.h"
-#include "SystemZISelLowering.h"
-#include "SystemZFrameLowering.h"
-#include "SystemZSelectionDAGInfo.h"
-#include "SystemZRegisterInfo.h"
-#include "SystemZSubtarget.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameLowering.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-/// SystemZTargetMachine
-///
-class SystemZTargetMachine : public LLVMTargetMachine {
- SystemZSubtarget Subtarget;
- const TargetData DataLayout; // Calculates type size & alignment
- SystemZInstrInfo InstrInfo;
- SystemZTargetLowering TLInfo;
- SystemZSelectionDAGInfo TSInfo;
- SystemZFrameLowering FrameLowering;
-public:
- SystemZTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
-
- virtual const TargetFrameLowering *getFrameLowering() const {
- return &FrameLowering;
- }
- virtual const SystemZInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout;}
- virtual const SystemZSubtarget *getSubtargetImpl() const { return &Subtarget; }
-
- virtual const SystemZRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
-
- virtual const SystemZTargetLowering *getTargetLowering() const {
- return &TLInfo;
- }
-
- virtual const SystemZSelectionDAGInfo* getSelectionDAGInfo() const {
- return &TSInfo;
- }
-
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
-}; // SystemZTargetMachine.
-
-} // end namespace llvm
-
-#endif // LLVM_TARGET_SystemZ_TARGETMACHINE_H
diff --git a/contrib/llvm/lib/Target/TargetData.cpp b/contrib/llvm/lib/Target/TargetData.cpp
index bd6a6b6..acb7476 100644
--- a/contrib/llvm/lib/Target/TargetData.cpp
+++ b/contrib/llvm/lib/Target/TargetData.cpp
@@ -125,15 +125,15 @@ const TargetAlignElem TargetData::InvalidAlignmentElem =
//===----------------------------------------------------------------------===//
/// getInt - Get an integer ignoring errors.
-static unsigned getInt(StringRef R) {
- unsigned Result = 0;
+static int getInt(StringRef R) {
+ int Result = 0;
R.getAsInteger(10, Result);
return Result;
}
-void TargetData::init(StringRef Desc) {
+void TargetData::init() {
initializeTargetDataPass(*PassRegistry::getPassRegistry());
-
+
LayoutMap = 0;
LittleEndian = false;
PointerMemSize = 8;
@@ -147,11 +147,19 @@ void TargetData::init(StringRef Desc) {
setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16
setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32
setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64
+ setAlignment(FLOAT_ALIGN, 2, 2, 16); // half
setAlignment(FLOAT_ALIGN, 4, 4, 32); // float
setAlignment(FLOAT_ALIGN, 8, 8, 64); // double
+ setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ...
setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ...
setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ...
setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct
+}
+
+std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
+
+ if (td)
+ td->init();
while (!Desc.empty()) {
std::pair<StringRef, StringRef> Split = Desc.split('-');
@@ -169,28 +177,54 @@ void TargetData::init(StringRef Desc) {
switch (Specifier[0]) {
case 'E':
- LittleEndian = false;
+ if (td)
+ td->LittleEndian = false;
break;
case 'e':
- LittleEndian = true;
+ if (td)
+ td->LittleEndian = true;
break;
- case 'p':
+ case 'p': {
+ // Pointer size.
Split = Token.split(':');
- PointerMemSize = getInt(Split.first) / 8;
+ int PointerMemSizeBits = getInt(Split.first);
+ if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0)
+ return "invalid pointer size, must be a positive 8-bit multiple";
+ if (td)
+ td->PointerMemSize = PointerMemSizeBits / 8;
+
+ // Pointer ABI alignment.
Split = Split.second.split(':');
- PointerABIAlign = getInt(Split.first) / 8;
+ int PointerABIAlignBits = getInt(Split.first);
+ if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) {
+ return "invalid pointer ABI alignment, "
+ "must be a positive 8-bit multiple";
+ }
+ if (td)
+ td->PointerABIAlign = PointerABIAlignBits / 8;
+
+ // Pointer preferred alignment.
Split = Split.second.split(':');
- PointerPrefAlign = getInt(Split.first) / 8;
- if (PointerPrefAlign == 0)
- PointerPrefAlign = PointerABIAlign;
+ int PointerPrefAlignBits = getInt(Split.first);
+ if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) {
+ return "invalid pointer preferred alignment, "
+ "must be a positive 8-bit multiple";
+ }
+ if (td) {
+ td->PointerPrefAlign = PointerPrefAlignBits / 8;
+ if (td->PointerPrefAlign == 0)
+ td->PointerPrefAlign = td->PointerABIAlign;
+ }
break;
+ }
case 'i':
case 'v':
case 'f':
case 'a':
case 's': {
AlignTypeEnum AlignType;
- switch (Specifier[0]) {
+ char field = Specifier[0];
+ switch (field) {
default:
case 'i': AlignType = INTEGER_ALIGN; break;
case 'v': AlignType = VECTOR_ALIGN; break;
@@ -198,37 +232,66 @@ void TargetData::init(StringRef Desc) {
case 'a': AlignType = AGGREGATE_ALIGN; break;
case 's': AlignType = STACK_ALIGN; break;
}
- unsigned Size = getInt(Specifier.substr(1));
+ int Size = getInt(Specifier.substr(1));
+ if (Size < 0) {
+ return std::string("invalid ") + field + "-size field, "
+ "must be positive";
+ }
+
Split = Token.split(':');
- unsigned ABIAlign = getInt(Split.first) / 8;
+ int ABIAlignBits = getInt(Split.first);
+ if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) {
+ return std::string("invalid ") + field +"-abi-alignment field, "
+ "must be a positive 8-bit multiple";
+ }
+ unsigned ABIAlign = ABIAlignBits / 8;
Split = Split.second.split(':');
- unsigned PrefAlign = getInt(Split.first) / 8;
+
+ int PrefAlignBits = getInt(Split.first);
+ if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) {
+ return std::string("invalid ") + field +"-preferred-alignment field, "
+ "must be a positive 8-bit multiple";
+ }
+ unsigned PrefAlign = PrefAlignBits / 8;
if (PrefAlign == 0)
PrefAlign = ABIAlign;
- setAlignment(AlignType, ABIAlign, PrefAlign, Size);
+
+ if (td)
+ td->setAlignment(AlignType, ABIAlign, PrefAlign, Size);
break;
}
case 'n': // Native integer types.
Specifier = Specifier.substr(1);
do {
- if (unsigned Width = getInt(Specifier))
- LegalIntWidths.push_back(Width);
+ int Width = getInt(Specifier);
+ if (Width <= 0) {
+ return std::string("invalid native integer size \'") + Specifier.str() +
+ "\', must be a positive integer.";
+ }
+ if (td && Width != 0)
+ td->LegalIntWidths.push_back(Width);
Split = Token.split(':');
Specifier = Split.first;
Token = Split.second;
} while (!Specifier.empty() || !Token.empty());
break;
- case 'S': // Stack natural alignment.
- StackNaturalAlign = getInt(Specifier.substr(1));
- StackNaturalAlign /= 8;
- // FIXME: Should we really be truncating these alingments and
- // sizes silently?
+ case 'S': { // Stack natural alignment.
+ int StackNaturalAlignBits = getInt(Specifier.substr(1));
+ if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) {
+ return "invalid natural stack alignment (S-field), "
+ "must be a positive 8-bit multiple";
+ }
+ if (td)
+ td->StackNaturalAlign = StackNaturalAlignBits / 8;
break;
+ }
default:
break;
}
}
+
+ return "";
}
/// Default ctor.
@@ -242,7 +305,9 @@ TargetData::TargetData() : ImmutablePass(ID) {
TargetData::TargetData(const Module *M)
: ImmutablePass(ID) {
- init(M->getDataLayout());
+ std::string errMsg = parseSpecifier(M->getDataLayout(), this);
+ assert(errMsg == "" && "Module M has malformed target data layout string.");
+ (void)errMsg;
}
void
@@ -308,7 +373,7 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
if (Align & (Align-1))
- Align = llvm::NextPowerOf2(Align);
+ Align = NextPowerOf2(Align);
return Align;
}
}
@@ -414,6 +479,8 @@ uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
return cast<IntegerType>(Ty)->getBitWidth();
case Type::VoidTyID:
return 8;
+ case Type::HalfTyID:
+ return 16;
case Type::FloatTyID:
return 32;
case Type::DoubleTyID:
@@ -430,9 +497,7 @@ uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
return cast<VectorType>(Ty)->getBitWidth();
default:
llvm_unreachable("TargetData::getTypeSizeInBits(): Unsupported type");
- break;
}
- return 0;
}
/*!
@@ -471,6 +536,7 @@ unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
case Type::VoidTyID:
AlignType = INTEGER_ALIGN;
break;
+ case Type::HalfTyID:
case Type::FloatTyID:
case Type::DoubleTyID:
// PPC_FP128TyID and FP128TyID have different data contents, but the
@@ -486,7 +552,6 @@ unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
break;
default:
llvm_unreachable("Bad type for getAlignment!!!");
- break;
}
return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
diff --git a/contrib/llvm/lib/Target/TargetInstrInfo.cpp b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
index d52ecb3..440f9ad 100644
--- a/contrib/llvm/lib/Target/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
@@ -13,7 +13,6 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/ErrorHandling.h"
@@ -73,23 +72,6 @@ TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
-int
-TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
- SDNode *DefNode, unsigned DefIdx,
- SDNode *UseNode, unsigned UseIdx) const {
- if (!ItinData || ItinData->isEmpty())
- return -1;
-
- if (!DefNode->isMachineOpcode())
- return -1;
-
- unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
- if (!UseNode->isMachineOpcode())
- return ItinData->getOperandCycle(DefClass, DefIdx);
- unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
- return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
-}
-
int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost) const {
@@ -99,17 +81,6 @@ int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return ItinData->getStageLatency(MI->getDesc().getSchedClass());
}
-int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
- SDNode *N) const {
- if (!ItinData || ItinData->isEmpty())
- return 1;
-
- if (!N->isMachineOpcode())
- return 1;
-
- return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
-}
-
bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI,
unsigned DefIdx) const {
@@ -129,19 +100,6 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
}
-bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isTerminator()) return false;
-
- // Conditional branch is a special case.
- if (MCID.isBranch() && !MCID.isBarrier())
- return true;
- if (!MCID.isPredicable())
- return true;
- return !isPredicated(MI);
-}
-
-
/// Measure the specified inline asm to determine an approximation of its
/// length.
/// Comments (which run till the next SeparatorString or newline) do not
diff --git a/contrib/llvm/lib/Target/TargetJITInfo.cpp b/contrib/llvm/lib/Target/TargetJITInfo.cpp
new file mode 100644
index 0000000..aafedf8
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetJITInfo.cpp
@@ -0,0 +1,14 @@
+//===- Target/TargetJITInfo.h - Target Information for JIT ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetJITInfo.h"
+
+using namespace llvm;
+
+void TargetJITInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
index 709dfd2..269958f 100644
--- a/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetLibraryInfo.cpp
@@ -20,6 +20,106 @@ INITIALIZE_PASS(TargetLibraryInfo, "targetlibinfo",
"Target Library Information", false, true)
char TargetLibraryInfo::ID = 0;
+void TargetLibraryInfo::anchor() { }
+
+const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
+ {
+ "acos",
+ "acosl",
+ "acosf",
+ "asin",
+ "asinl",
+ "asinf",
+ "atan",
+ "atanl",
+ "atanf",
+ "atan2",
+ "atan2l",
+ "atan2f",
+ "ceil",
+ "ceill",
+ "ceilf",
+ "copysign",
+ "copysignf",
+ "copysignl",
+ "cos",
+ "cosl",
+ "cosf",
+ "cosh",
+ "coshl",
+ "coshf",
+ "exp",
+ "expl",
+ "expf",
+ "exp2",
+ "exp2l",
+ "exp2f",
+ "expm1",
+ "expm1l",
+ "expl1f",
+ "fabs",
+ "fabsl",
+ "fabsf",
+ "floor",
+ "floorl",
+ "floorf",
+ "fiprintf",
+ "fmod",
+ "fmodl",
+ "fmodf",
+ "fputs",
+ "fwrite",
+ "iprintf",
+ "log",
+ "logl",
+ "logf",
+ "log2",
+ "log2l",
+ "log2f",
+ "log10",
+ "log10l",
+ "log10f",
+ "log1p",
+ "log1pl",
+ "log1pf",
+ "memcpy",
+ "memmove",
+ "memset",
+ "memset_pattern16",
+ "nearbyint",
+ "nearbyintf",
+ "nearbyintl",
+ "pow",
+ "powf",
+ "powl",
+ "rint",
+ "rintf",
+ "rintl",
+ "sin",
+ "sinl",
+ "sinf",
+ "sinh",
+ "sinhl",
+ "sinhf",
+ "siprintf",
+ "sqrt",
+ "sqrtl",
+ "sqrtf",
+ "tan",
+ "tanl",
+ "tanf",
+ "tanh",
+ "tanhl",
+ "tanhf",
+ "trunc",
+ "truncf",
+ "truncl",
+ "__cxa_atexit",
+ "__cxa_guard_abort",
+ "__cxa_guard_acquire",
+ "__cxa_guard_release"
+ };
+
/// initialize - Initialize the set of available library functions based on the
/// specified target triple. This should be carefully written so that a missing
/// target triple gets a sane set of defaults.
@@ -38,6 +138,17 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T) {
TLI.setUnavailable(LibFunc::memset_pattern16);
}
+ if (T.isMacOSX() && T.getArch() == Triple::x86 &&
+ !T.isMacOSXVersionLT(10, 7)) {
+ // x86-32 OSX has a scheme where fwrite and fputs (and some other functions
+ // we don't care about) have two versions; on recent OSX, the one we want
+ // has a $UNIX2003 suffix. The two implementations are identical except
+ // for the return value in some edge cases. However, we don't want to
+ // generate code that depends on the old symbols.
+ TLI.setAvailableWithName(LibFunc::fwrite, "fwrite$UNIX2003");
+ TLI.setAvailableWithName(LibFunc::fputs, "fputs$UNIX2003");
+ }
+
// iprintf and friends are only available on XCore and TCE.
if (T.getArch() != Triple::xcore && T.getArch() != Triple::tce) {
TLI.setUnavailable(LibFunc::iprintf);
@@ -64,6 +175,7 @@ TargetLibraryInfo::TargetLibraryInfo(const Triple &T) : ImmutablePass(ID) {
TargetLibraryInfo::TargetLibraryInfo(const TargetLibraryInfo &TLI)
: ImmutablePass(ID) {
memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
+ CustomNames = TLI.CustomNames;
}
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index 56b7b69..2570e0d 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -28,7 +28,6 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallString.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -48,7 +47,7 @@ void TargetLoweringObjectFile::Initialize(MCContext &ctx,
TargetLoweringObjectFile::~TargetLoweringObjectFile() {
}
-static bool isSuitableForBSS(const GlobalVariable *GV) {
+static bool isSuitableForBSS(const GlobalVariable *GV, bool NoZerosInBSS) {
const Constant *C = GV->getInitializer();
// Must have zero initializer.
@@ -73,31 +72,27 @@ static bool isSuitableForBSS(const GlobalVariable *GV) {
/// IsNullTerminatedString - Return true if the specified constant (which is
/// known to have a type that is an array of 1/2/4 byte elements) ends with a
-/// nul value and contains no other nuls in it.
+/// nul value and contains no other nuls in it. Note that this is more general
+/// than ConstantDataSequential::isString because we allow 2 & 4 byte strings.
static bool IsNullTerminatedString(const Constant *C) {
- ArrayType *ATy = cast<ArrayType>(C->getType());
-
- // First check: is we have constant array of i8 terminated with zero
- if (const ConstantArray *CVA = dyn_cast<ConstantArray>(C)) {
- if (ATy->getNumElements() == 0) return false;
-
- ConstantInt *Null =
- dyn_cast<ConstantInt>(CVA->getOperand(ATy->getNumElements()-1));
- if (Null == 0 || !Null->isZero())
+ // First check: is we have constant array terminated with zero
+ if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(C)) {
+ unsigned NumElts = CDS->getNumElements();
+ assert(NumElts != 0 && "Can't have an empty CDS");
+
+ if (CDS->getElementAsInteger(NumElts-1) != 0)
return false; // Not null terminated.
-
+
// Verify that the null doesn't occur anywhere else in the string.
- for (unsigned i = 0, e = ATy->getNumElements()-1; i != e; ++i)
- // Reject constantexpr elements etc.
- if (!isa<ConstantInt>(CVA->getOperand(i)) ||
- CVA->getOperand(i) == Null)
+ for (unsigned i = 0; i != NumElts-1; ++i)
+ if (CDS->getElementAsInteger(i) == 0)
return false;
return true;
}
// Another possibility: [1 x i8] zeroinitializer
if (isa<ConstantAggregateZero>(C))
- return ATy->getNumElements() == 1;
+ return cast<ArrayType>(C->getType())->getNumElements() == 1;
return false;
}
@@ -133,7 +128,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// Handle thread-local data first.
if (GVar->isThreadLocal()) {
- if (isSuitableForBSS(GVar))
+ if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS))
return SectionKind::getThreadBSS();
return SectionKind::getThreadData();
}
@@ -143,7 +138,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
return SectionKind::getCommon();
// Variable can be easily put to BSS section.
- if (isSuitableForBSS(GVar)) {
+ if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS)) {
if (GVar->hasLocalLinkage())
return SectionKind::getBSSLocal();
else if (GVar->hasExternalLinkage())
@@ -160,7 +155,6 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
// relocation, then we may have to drop this into a wriable data section
// even though it is marked const.
switch (C->getRelocationInfo()) {
- default: assert(0 && "unknown relocation info kind");
case Constant::NoRelocation:
// If the global is required to have a unique address, it can't be put
// into a mergable section: just drop it into the general read-only
@@ -234,7 +228,6 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
return SectionKind::getDataNoRel();
switch (C->getRelocationInfo()) {
- default: assert(0 && "unknown relocation info kind");
case Constant::NoRelocation:
return SectionKind::getDataNoRel();
case Constant::LocalRelocation:
@@ -242,6 +235,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV,
case Constant::GlobalRelocations:
return SectionKind::getDataRel();
}
+ llvm_unreachable("Invalid relocation");
}
/// SectionForGlobal - This method computes the appropriate section to emit
diff --git a/contrib/llvm/lib/Target/TargetMachine.cpp b/contrib/llvm/lib/Target/TargetMachine.cpp
index fe8a7ce..b9b2526 100644
--- a/contrib/llvm/lib/Target/TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/TargetMachine.cpp
@@ -11,11 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/GlobalValue.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -24,153 +23,10 @@ using namespace llvm;
//
namespace llvm {
- bool LessPreciseFPMADOption;
- bool PrintMachineCode;
- bool NoFramePointerElim;
- bool NoFramePointerElimNonLeaf;
- bool NoExcessFPPrecision;
- bool UnsafeFPMath;
- bool NoInfsFPMath;
- bool NoNaNsFPMath;
- bool HonorSignDependentRoundingFPMathOption;
- bool UseSoftFloat;
- FloatABI::ABIType FloatABIType;
- bool NoImplicitFloat;
- bool NoZerosInBSS;
- bool JITExceptionHandling;
- bool JITEmitDebugInfo;
- bool JITEmitDebugInfoToDisk;
- bool GuaranteedTailCallOpt;
- unsigned StackAlignmentOverride;
- bool RealignStack;
- bool DisableJumpTables;
- bool StrongPHIElim;
bool HasDivModLibcall;
bool AsmVerbosityDefault(false);
- bool EnableSegmentedStacks;
}
-static cl::opt<bool, true>
-PrintCode("print-machineinstrs",
- cl::desc("Print generated machine code"),
- cl::location(PrintMachineCode), cl::init(false));
-static cl::opt<bool, true>
-DisableFPElim("disable-fp-elim",
- cl::desc("Disable frame pointer elimination optimization"),
- cl::location(NoFramePointerElim),
- cl::init(false));
-static cl::opt<bool, true>
-DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
- cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
- cl::location(NoFramePointerElimNonLeaf),
- cl::init(false));
-static cl::opt<bool, true>
-DisableExcessPrecision("disable-excess-fp-precision",
- cl::desc("Disable optimizations that may increase FP precision"),
- cl::location(NoExcessFPPrecision),
- cl::init(false));
-static cl::opt<bool, true>
-EnableFPMAD("enable-fp-mad",
- cl::desc("Enable less precise MAD instructions to be generated"),
- cl::location(LessPreciseFPMADOption),
- cl::init(false));
-static cl::opt<bool, true>
-EnableUnsafeFPMath("enable-unsafe-fp-math",
- cl::desc("Enable optimizations that may decrease FP precision"),
- cl::location(UnsafeFPMath),
- cl::init(false));
-static cl::opt<bool, true>
-EnableNoInfsFPMath("enable-no-infs-fp-math",
- cl::desc("Enable FP math optimizations that assume no +-Infs"),
- cl::location(NoInfsFPMath),
- cl::init(false));
-static cl::opt<bool, true>
-EnableNoNaNsFPMath("enable-no-nans-fp-math",
- cl::desc("Enable FP math optimizations that assume no NaNs"),
- cl::location(NoNaNsFPMath),
- cl::init(false));
-static cl::opt<bool, true>
-EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
- cl::Hidden,
- cl::desc("Force codegen to assume rounding mode can change dynamically"),
- cl::location(HonorSignDependentRoundingFPMathOption),
- cl::init(false));
-static cl::opt<bool, true>
-GenerateSoftFloatCalls("soft-float",
- cl::desc("Generate software floating point library calls"),
- cl::location(UseSoftFloat),
- cl::init(false));
-static cl::opt<llvm::FloatABI::ABIType, true>
-FloatABIForCalls("float-abi",
- cl::desc("Choose float ABI type"),
- cl::location(FloatABIType),
- cl::init(FloatABI::Default),
- cl::values(
- clEnumValN(FloatABI::Default, "default",
- "Target default float ABI type"),
- clEnumValN(FloatABI::Soft, "soft",
- "Soft float ABI (implied by -soft-float)"),
- clEnumValN(FloatABI::Hard, "hard",
- "Hard float ABI (uses FP registers)"),
- clEnumValEnd));
-static cl::opt<bool, true>
-DontPlaceZerosInBSS("nozero-initialized-in-bss",
- cl::desc("Don't place zero-initialized symbols into bss section"),
- cl::location(NoZerosInBSS),
- cl::init(false));
-static cl::opt<bool, true>
-EnableJITExceptionHandling("jit-enable-eh",
- cl::desc("Emit exception handling information"),
- cl::location(JITExceptionHandling),
- cl::init(false));
-// In debug builds, make this default to true.
-#ifdef NDEBUG
-#define EMIT_DEBUG false
-#else
-#define EMIT_DEBUG true
-#endif
-static cl::opt<bool, true>
-EmitJitDebugInfo("jit-emit-debug",
- cl::desc("Emit debug information to debugger"),
- cl::location(JITEmitDebugInfo),
- cl::init(EMIT_DEBUG));
-#undef EMIT_DEBUG
-static cl::opt<bool, true>
-EmitJitDebugInfoToDisk("jit-emit-debug-to-disk",
- cl::Hidden,
- cl::desc("Emit debug info objfiles to disk"),
- cl::location(JITEmitDebugInfoToDisk),
- cl::init(false));
-
-static cl::opt<bool, true>
-EnableGuaranteedTailCallOpt("tailcallopt",
- cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
- cl::location(GuaranteedTailCallOpt),
- cl::init(false));
-static cl::opt<unsigned, true>
-OverrideStackAlignment("stack-alignment",
- cl::desc("Override default stack alignment"),
- cl::location(StackAlignmentOverride),
- cl::init(0));
-static cl::opt<bool, true>
-EnableRealignStack("realign-stack",
- cl::desc("Realign stack if needed"),
- cl::location(RealignStack),
- cl::init(true));
-static cl::opt<bool, true>
-DisableSwitchTables(cl::Hidden, "disable-jump-tables",
- cl::desc("Do not generate jump tables."),
- cl::location(DisableJumpTables),
- cl::init(false));
-static cl::opt<bool, true>
-EnableStrongPHIElim(cl::Hidden, "strong-phi-elim",
- cl::desc("Use strong PHI elimination."),
- cl::location(StrongPHIElim),
- cl::init(false));
-static cl::opt<std::string>
-TrapFuncName("trap-func", cl::Hidden,
- cl::desc("Emit a call to trap function rather than a trap instruction"),
- cl::init(""));
static cl::opt<bool>
DataSections("fdata-sections",
cl::desc("Emit data into separate sections"),
@@ -179,29 +35,23 @@ static cl::opt<bool>
FunctionSections("ffunction-sections",
cl::desc("Emit functions into separate sections"),
cl::init(false));
-static cl::opt<bool, true>
-SegmentedStacks("segmented-stacks",
- cl::desc("Use segmented stacks if possible."),
- cl::location(EnableSegmentedStacks),
- cl::init(false));
-
+
//---------------------------------------------------------------------------
// TargetMachine Class
//
TargetMachine::TargetMachine(const Target &T,
- StringRef TT, StringRef CPU, StringRef FS)
+ StringRef TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options)
: TheTarget(T), TargetTriple(TT), TargetCPU(CPU), TargetFS(FS),
CodeGenInfo(0), AsmInfo(0),
MCRelaxAll(false),
MCNoExecStack(false),
MCSaveTempLabels(false),
MCUseLoc(true),
- MCUseCFI(true) {
- // Typically it will be subtargets that will adjust FloatABIType from Default
- // to Soft or Hard.
- if (UseSoftFloat)
- FloatABIType = FloatABI::Soft;
+ MCUseCFI(true),
+ MCUseDwarfDirectory(false),
+ Options(Options) {
}
TargetMachine::~TargetMachine() {
@@ -225,6 +75,35 @@ CodeModel::Model TargetMachine::getCodeModel() const {
return CodeGenInfo->getCodeModel();
}
+TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const {
+ bool isLocal = GV->hasLocalLinkage();
+ bool isDeclaration = GV->isDeclaration();
+ // FIXME: what should we do for protected and internal visibility?
+ // For variables, is internal different from hidden?
+ bool isHidden = GV->hasHiddenVisibility();
+
+ if (getRelocationModel() == Reloc::PIC_ &&
+ !Options.PositionIndependentExecutable) {
+ if (isLocal || isHidden)
+ return TLSModel::LocalDynamic;
+ else
+ return TLSModel::GeneralDynamic;
+ } else {
+ if (!isDeclaration || isHidden)
+ return TLSModel::LocalExec;
+ else
+ return TLSModel::InitialExec;
+ }
+}
+
+/// getOptLevel - Returns the optimization level: None, Less,
+/// Default, or Aggressive.
+CodeGenOpt::Level TargetMachine::getOptLevel() const {
+ if (!CodeGenInfo)
+ return CodeGenOpt::Default;
+ return CodeGenInfo->getOptLevel();
+}
+
bool TargetMachine::getAsmVerbosityDefault() {
return AsmVerbosityDefault;
}
@@ -249,36 +128,3 @@ void TargetMachine::setDataSections(bool V) {
DataSections = V;
}
-namespace llvm {
- /// DisableFramePointerElim - This returns true if frame pointer elimination
- /// optimization should be disabled for the given machine function.
- bool DisableFramePointerElim(const MachineFunction &MF) {
- // Check to see if we should eliminate non-leaf frame pointers and then
- // check to see if we should eliminate all frame pointers.
- if (NoFramePointerElimNonLeaf && !NoFramePointerElim) {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return MFI->hasCalls();
- }
-
- return NoFramePointerElim;
- }
-
- /// LessPreciseFPMAD - This flag return true when -enable-fp-mad option
- /// is specified on the command line. When this flag is off(default), the
- /// code generator is not allowed to generate mad (multiply add) if the
- /// result is "less precise" than doing those operations individually.
- bool LessPreciseFPMAD() { return UnsafeFPMath || LessPreciseFPMADOption; }
-
- /// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
- /// that the rounding mode of the FPU can change from its default.
- bool HonorSignDependentRoundingFPMath() {
- return !UnsafeFPMath && HonorSignDependentRoundingFPMathOption;
- }
-
- /// getTrapFunctionName - If this returns a non-empty string, this means isel
- /// should lower Intrinsic::trap to a call to the specified function name
- /// instead of an ISD::TRAP node.
- StringRef getTrapFunctionName() {
- return TrapFuncName;
- }
-}
diff --git a/contrib/llvm/lib/Target/TargetMachineC.cpp b/contrib/llvm/lib/Target/TargetMachineC.cpp
new file mode 100644
index 0000000..d6bba8b
--- /dev/null
+++ b/contrib/llvm/lib/Target/TargetMachineC.cpp
@@ -0,0 +1,197 @@
+//===-- TargetMachine.cpp -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LLVM-C part of TargetMachine.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/Core.h"
+#include "llvm-c/Target.h"
+#include "llvm-c/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+
+using namespace llvm;
+
+
+
+LLVMTargetRef LLVMGetFirstTarget() {
+ const Target* target = &*TargetRegistry::begin();
+ return wrap(target);
+}
+LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T) {
+ return wrap(unwrap(T)->getNext());
+}
+
+const char * LLVMGetTargetName(LLVMTargetRef T) {
+ return unwrap(T)->getName();
+}
+
+const char * LLVMGetTargetDescription(LLVMTargetRef T) {
+ return unwrap(T)->getShortDescription();
+}
+
+LLVMBool LLVMTargetHasJIT(LLVMTargetRef T) {
+ return unwrap(T)->hasJIT();
+}
+
+LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T) {
+ return unwrap(T)->hasTargetMachine();
+}
+
+LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T) {
+ return unwrap(T)->hasMCAsmBackend();
+}
+
+LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, char* Triple,
+ char* CPU, char* Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc,
+ LLVMCodeModel CodeModel) {
+ Reloc::Model RM;
+ switch (Reloc){
+ case LLVMRelocStatic:
+ RM = Reloc::Static;
+ break;
+ case LLVMRelocPIC:
+ RM = Reloc::PIC_;
+ break;
+ case LLVMRelocDynamicNoPic:
+ RM = Reloc::DynamicNoPIC;
+ break;
+ default:
+ RM = Reloc::Default;
+ break;
+ }
+
+ CodeModel::Model CM;
+ switch (CodeModel) {
+ case LLVMCodeModelJITDefault:
+ CM = CodeModel::JITDefault;
+ break;
+ case LLVMCodeModelSmall:
+ CM = CodeModel::Small;
+ break;
+ case LLVMCodeModelKernel:
+ CM = CodeModel::Kernel;
+ break;
+ case LLVMCodeModelMedium:
+ CM = CodeModel::Medium;
+ break;
+ case LLVMCodeModelLarge:
+ CM = CodeModel::Large;
+ break;
+ default:
+ CM = CodeModel::Default;
+ break;
+ }
+ CodeGenOpt::Level OL;
+
+ switch (Level) {
+ case LLVMCodeGenLevelNone:
+ OL = CodeGenOpt::None;
+ break;
+ case LLVMCodeGenLevelLess:
+ OL = CodeGenOpt::Less;
+ break;
+ case LLVMCodeGenLevelAggressive:
+ OL = CodeGenOpt::Aggressive;
+ break;
+ default:
+ OL = CodeGenOpt::Default;
+ break;
+ }
+
+ TargetOptions opt;
+ return wrap(unwrap(T)->createTargetMachine(Triple, CPU, Features, opt, RM,
+ CM, OL));
+}
+
+
+void LLVMDisposeTargetMachine(LLVMTargetMachineRef T) {
+ delete unwrap(T);
+}
+
+LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T) {
+ const Target* target = &(unwrap(T)->getTarget());
+ return wrap(target);
+}
+
+char* LLVMGetTargetMachineTriple(LLVMTargetMachineRef T) {
+ std::string StringRep = unwrap(T)->getTargetTriple();
+ return strdup(StringRep.c_str());
+}
+
+char* LLVMGetTargetMachineCPU(LLVMTargetMachineRef T) {
+ std::string StringRep = unwrap(T)->getTargetCPU();
+ return strdup(StringRep.c_str());
+}
+
+char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) {
+ std::string StringRep = unwrap(T)->getTargetFeatureString();
+ return strdup(StringRep.c_str());
+}
+
+LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
+ return wrap(unwrap(T)->getTargetData());
+}
+
+LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
+ char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) {
+ TargetMachine* TM = unwrap(T);
+ Module* Mod = unwrap(M);
+
+ PassManager pass;
+
+ std::string error;
+
+ const TargetData* td = TM->getTargetData();
+
+ if (!td) {
+ error = "No TargetData in TargetMachine";
+ *ErrorMessage = strdup(error.c_str());
+ return true;
+ }
+ pass.add(new TargetData(*td));
+
+ TargetMachine::CodeGenFileType ft;
+ switch (codegen) {
+ case LLVMAssemblyFile:
+ ft = TargetMachine::CGFT_AssemblyFile;
+ break;
+ default:
+ ft = TargetMachine::CGFT_ObjectFile;
+ break;
+ }
+ raw_fd_ostream dest(Filename, error, raw_fd_ostream::F_Binary);
+ formatted_raw_ostream destf(dest);
+ if (!error.empty()) {
+ *ErrorMessage = strdup(error.c_str());
+ return true;
+ }
+
+ if (TM->addPassesToEmitFile(pass, destf, ft)) {
+ error = "No TargetData in TargetMachine";
+ *ErrorMessage = strdup(error.c_str());
+ return true;
+ }
+
+ pass.run(*Mod);
+
+ destf.flush();
+ dest.flush();
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
index 67239b8..1716423 100644
--- a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -13,8 +13,6 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/raw_ostream.h"
@@ -73,7 +71,7 @@ TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
/// registers for the specific register class.
static void getAllocatableSetForRC(const MachineFunction &MF,
const TargetRegisterClass *RC, BitVector &R){
- ArrayRef<unsigned> Order = RC->getRawAllocationOrder(MF);
+ ArrayRef<uint16_t> Order = RC->getRawAllocationOrder(MF);
for (unsigned i = 0; i != Order.size(); ++i)
R.set(Order[i]);
}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
index 1eaccff..2794e60 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
@@ -14,7 +14,6 @@
#include "llvm/MC/MCTargetAsmLexer.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
using namespace llvm;
@@ -144,11 +143,7 @@ AsmToken X86AsmLexer::LexTokenIntel() {
SetError(Lexer->getErrLoc(), Lexer->getErr());
return lexedToken;
case AsmToken::Identifier: {
- std::string upperCase = lexedToken.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
- StringRef lowerRef(lowerCase);
-
- unsigned regID = MatchRegisterName(lowerRef);
+ unsigned regID = MatchRegisterName(lexedToken.getString().lower());
if (regID)
return AsmToken(AsmToken::Register,
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index cb4f15f..08c732c 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -17,10 +17,8 @@
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/SourceMgr.h"
@@ -32,33 +30,47 @@ using namespace llvm;
namespace {
struct X86Operand;
-class X86ATTAsmParser : public MCTargetAsmParser {
+class X86AsmParser : public MCTargetAsmParser {
MCSubtargetInfo &STI;
MCAsmParser &Parser;
-
private:
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
- bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
+ bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
+ return Parser.Error(L, Msg, Ranges);
+ }
+
+ X86Operand *ErrorOperand(SMLoc Loc, StringRef Msg) {
+ Error(Loc, Msg);
+ return 0;
+ }
X86Operand *ParseOperand();
+ X86Operand *ParseATTOperand();
+ X86Operand *ParseIntelOperand();
+ X86Operand *ParseIntelMemOperand();
+ X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size);
X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
+ bool processInstruction(MCInst &Inst,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
+
bool MatchAndEmitInstruction(SMLoc IDLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out);
/// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi)
- /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
+ /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode.
bool isSrcOp(X86Operand &Op);
- /// isDstOp - Returns true if operand is either %es:(%rdi) in 64bit mode
- /// or %es:(%edi) in 32bit mode.
+ /// isDstOp - Returns true if operand is either (%rdi) or %es:(%rdi)
+ /// in 64bit mode or (%edi) or %es:(%edi) in 32bit mode.
bool isDstOp(X86Operand &Op);
bool is64BitMode() const {
@@ -79,7 +91,7 @@ private:
/// }
public:
- X86ATTAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
+ X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
: MCTargetAsmParser(), STI(sti), Parser(parser) {
// Initialize the set of available features.
@@ -91,6 +103,10 @@ public:
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
+
+ bool isParsingIntelSyntax() {
+ return getParser().getAssemblerDialect();
+ }
};
} // end anonymous namespace
@@ -101,6 +117,31 @@ static unsigned MatchRegisterName(StringRef Name);
/// }
+static bool isImmSExti16i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+static bool isImmSExti32i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+static bool isImmZExtu32u8Value(uint64_t Value) {
+ return (Value <= 0x00000000000000FFULL);
+}
+
+static bool isImmSExti64i8Value(uint64_t Value) {
+ return (( Value <= 0x000000000000007FULL)||
+ (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
+
+static bool isImmSExti64i32Value(uint64_t Value) {
+ return (( Value <= 0x000000007FFFFFFFULL)||
+ (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+}
namespace {
/// X86Operand - Instances of this class represent a parsed X86 machine
@@ -135,6 +176,7 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned BaseReg;
unsigned IndexReg;
unsigned Scale;
+ unsigned Size;
} Mem;
};
@@ -145,6 +187,8 @@ struct X86Operand : public MCParsedAsmOperand {
SMLoc getStartLoc() const { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
+
+ SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
virtual void print(raw_ostream &OS) const {}
@@ -205,10 +249,7 @@ struct X86Operand : public MCParsedAsmOperand {
// Otherwise, check the value is in a range that makes sense for this
// extension.
- uint64_t Value = CE->getValue();
- return (( Value <= 0x000000000000007FULL)||
- (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isImmSExti16i8Value(CE->getValue());
}
bool isImmSExti32i8() const {
if (!isImm())
@@ -222,10 +263,7 @@ struct X86Operand : public MCParsedAsmOperand {
// Otherwise, check the value is in a range that makes sense for this
// extension.
- uint64_t Value = CE->getValue();
- return (( Value <= 0x000000000000007FULL)||
- (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isImmSExti32i8Value(CE->getValue());
}
bool isImmZExtu32u8() const {
if (!isImm())
@@ -239,8 +277,7 @@ struct X86Operand : public MCParsedAsmOperand {
// Otherwise, check the value is in a range that makes sense for this
// extension.
- uint64_t Value = CE->getValue();
- return (Value <= 0x00000000000000FFULL);
+ return isImmZExtu32u8Value(CE->getValue());
}
bool isImmSExti64i8() const {
if (!isImm())
@@ -254,9 +291,7 @@ struct X86Operand : public MCParsedAsmOperand {
// Otherwise, check the value is in a range that makes sense for this
// extension.
- uint64_t Value = CE->getValue();
- return (( Value <= 0x000000000000007FULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isImmSExti64i8Value(CE->getValue());
}
bool isImmSExti64i32() const {
if (!isImm())
@@ -270,12 +305,31 @@ struct X86Operand : public MCParsedAsmOperand {
// Otherwise, check the value is in a range that makes sense for this
// extension.
- uint64_t Value = CE->getValue();
- return (( Value <= 0x000000007FFFFFFFULL)||
- (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isImmSExti64i32Value(CE->getValue());
}
bool isMem() const { return Kind == Memory; }
+ bool isMem8() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 8);
+ }
+ bool isMem16() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 16);
+ }
+ bool isMem32() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 32);
+ }
+ bool isMem64() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 64);
+ }
+ bool isMem80() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 80);
+ }
+ bool isMem128() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 128);
+ }
+ bool isMem256() const {
+ return Kind == Memory && (!Mem.Size || Mem.Size == 256);
+ }
bool isAbsMem() const {
return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
@@ -302,6 +356,28 @@ struct X86Operand : public MCParsedAsmOperand {
addExpr(Inst, getImm());
}
+ void addMem8Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem16Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem32Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem64Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem80Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem128Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+ void addMem256Operands(MCInst &Inst, unsigned N) const {
+ addMemOperands(Inst, N);
+ }
+
void addMemOperands(MCInst &Inst, unsigned N) const {
assert((N == 5) && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
@@ -313,11 +389,16 @@ struct X86Operand : public MCParsedAsmOperand {
void addAbsMemOperands(MCInst &Inst, unsigned N) const {
assert((N == 1) && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
+ // Add as immediates when possible.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
}
static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
- X86Operand *Res = new X86Operand(Token, Loc, Loc);
+ SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size() - 1);
+ X86Operand *Res = new X86Operand(Token, Loc, EndLoc);
Res->Tok.Data = Str.data();
Res->Tok.Length = Str.size();
return Res;
@@ -337,20 +418,22 @@ struct X86Operand : public MCParsedAsmOperand {
/// Create an absolute memory operand.
static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc,
- SMLoc EndLoc) {
+ SMLoc EndLoc, unsigned Size = 0) {
X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
Res->Mem.BaseReg = 0;
Res->Mem.IndexReg = 0;
Res->Mem.Scale = 1;
+ Res->Mem.Size = Size;
return Res;
}
/// Create a generalized memory operand.
static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc StartLoc, SMLoc EndLoc) {
+ unsigned Scale, SMLoc StartLoc, SMLoc EndLoc,
+ unsigned Size = 0) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
@@ -364,13 +447,14 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.BaseReg = BaseReg;
Res->Mem.IndexReg = IndexReg;
Res->Mem.Scale = Scale;
+ Res->Mem.Size = Size;
return Res;
}
};
} // end anonymous namespace.
-bool X86ATTAsmParser::isSrcOp(X86Operand &Op) {
+bool X86AsmParser::isSrcOp(X86Operand &Op) {
unsigned basereg = is64BitMode() ? X86::RSI : X86::ESI;
return (Op.isMem() &&
@@ -380,32 +464,38 @@ bool X86ATTAsmParser::isSrcOp(X86Operand &Op) {
Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0);
}
-bool X86ATTAsmParser::isDstOp(X86Operand &Op) {
+bool X86AsmParser::isDstOp(X86Operand &Op) {
unsigned basereg = is64BitMode() ? X86::RDI : X86::EDI;
- return Op.isMem() && Op.Mem.SegReg == X86::ES &&
+ return Op.isMem() &&
+ (Op.Mem.SegReg == 0 || Op.Mem.SegReg == X86::ES) &&
isa<MCConstantExpr>(Op.Mem.Disp) &&
cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
Op.Mem.BaseReg == basereg && Op.Mem.IndexReg == 0;
}
-bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
- SMLoc &StartLoc, SMLoc &EndLoc) {
+bool X86AsmParser::ParseRegister(unsigned &RegNo,
+ SMLoc &StartLoc, SMLoc &EndLoc) {
RegNo = 0;
- const AsmToken &TokPercent = Parser.getTok();
- assert(TokPercent.is(AsmToken::Percent) && "Invalid token kind!");
- StartLoc = TokPercent.getLoc();
- Parser.Lex(); // Eat percent token.
+ if (!isParsingIntelSyntax()) {
+ const AsmToken &TokPercent = Parser.getTok();
+ assert(TokPercent.is(AsmToken::Percent) && "Invalid token kind!");
+ StartLoc = TokPercent.getLoc();
+ Parser.Lex(); // Eat percent token.
+ }
const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return Error(Tok.getLoc(), "invalid register name");
+ if (Tok.isNot(AsmToken::Identifier)) {
+ if (isParsingIntelSyntax()) return true;
+ return Error(StartLoc, "invalid register name",
+ SMRange(StartLoc, Tok.getEndLoc()));
+ }
RegNo = MatchRegisterName(Tok.getString());
// If the match failed, try the register name as lowercase.
if (RegNo == 0)
- RegNo = MatchRegisterName(LowercaseString(Tok.getString()));
+ RegNo = MatchRegisterName(Tok.getString().lower());
if (!is64BitMode()) {
// FIXME: This should be done using Requires<In32BitMode> and
@@ -417,8 +507,9 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
X86II::isX86_64NonExtLowByteReg(RegNo) ||
X86II::isX86_64ExtendedReg(RegNo))
- return Error(Tok.getLoc(), "register %"
- + Tok.getString() + " is only available in 64-bit mode");
+ return Error(StartLoc, "register %"
+ + Tok.getString() + " is only available in 64-bit mode",
+ SMRange(StartLoc, Tok.getEndLoc()));
}
// Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
@@ -478,15 +569,182 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
}
}
- if (RegNo == 0)
- return Error(Tok.getLoc(), "invalid register name");
+ if (RegNo == 0) {
+ if (isParsingIntelSyntax()) return true;
+ return Error(StartLoc, "invalid register name",
+ SMRange(StartLoc, Tok.getEndLoc()));
+ }
- EndLoc = Tok.getLoc();
+ EndLoc = Tok.getEndLoc();
Parser.Lex(); // Eat identifier token.
return false;
}
-X86Operand *X86ATTAsmParser::ParseOperand() {
+X86Operand *X86AsmParser::ParseOperand() {
+ if (isParsingIntelSyntax())
+ return ParseIntelOperand();
+ return ParseATTOperand();
+}
+
+/// getIntelMemOperandSize - Return intel memory operand size.
+static unsigned getIntelMemOperandSize(StringRef OpStr) {
+ unsigned Size = 0;
+ if (OpStr == "BYTE") Size = 8;
+ if (OpStr == "WORD") Size = 16;
+ if (OpStr == "DWORD") Size = 32;
+ if (OpStr == "QWORD") Size = 64;
+ if (OpStr == "XWORD") Size = 80;
+ if (OpStr == "XMMWORD") Size = 128;
+ if (OpStr == "YMMWORD") Size = 256;
+ return Size;
+}
+
+X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg,
+ unsigned Size) {
+ unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
+ SMLoc Start = Parser.getTok().getLoc(), End;
+
+ const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
+ // Parse [ BaseReg + Scale*IndexReg + Disp ] or [ symbol ]
+
+ // Eat '['
+ if (getLexer().isNot(AsmToken::LBrac))
+ return ErrorOperand(Start, "Expected '[' token!");
+ Parser.Lex();
+
+ if (getLexer().is(AsmToken::Identifier)) {
+ // Parse BaseReg
+ if (ParseRegister(BaseReg, Start, End)) {
+ // Handle '[' 'symbol' ']'
+ if (getParser().ParseExpression(Disp, End)) return 0;
+ if (getLexer().isNot(AsmToken::RBrac))
+ return ErrorOperand(Start, "Expected ']' token!");
+ Parser.Lex();
+ return X86Operand::CreateMem(Disp, Start, End, Size);
+ }
+ } else if (getLexer().is(AsmToken::Integer)) {
+ int64_t Val = Parser.getTok().getIntVal();
+ Parser.Lex();
+ SMLoc Loc = Parser.getTok().getLoc();
+ if (getLexer().is(AsmToken::RBrac)) {
+ // Handle '[' number ']'
+ Parser.Lex();
+ const MCExpr *Disp = MCConstantExpr::Create(Val, getContext());
+ if (SegReg)
+ return X86Operand::CreateMem(SegReg, Disp, 0, 0, Scale,
+ Start, End, Size);
+ return X86Operand::CreateMem(Disp, Start, End, Size);
+ } else if (getLexer().is(AsmToken::Star)) {
+ // Handle '[' Scale*IndexReg ']'
+ Parser.Lex();
+ SMLoc IdxRegLoc = Parser.getTok().getLoc();
+ if (ParseRegister(IndexReg, IdxRegLoc, End))
+ return ErrorOperand(IdxRegLoc, "Expected register");
+ Scale = Val;
+ } else
+ return ErrorOperand(Loc, "Unepxeted token");
+ }
+
+ if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus)) {
+ bool isPlus = getLexer().is(AsmToken::Plus);
+ Parser.Lex();
+ SMLoc PlusLoc = Parser.getTok().getLoc();
+ if (getLexer().is(AsmToken::Integer)) {
+ int64_t Val = Parser.getTok().getIntVal();
+ Parser.Lex();
+ if (getLexer().is(AsmToken::Star)) {
+ Parser.Lex();
+ SMLoc IdxRegLoc = Parser.getTok().getLoc();
+ if (ParseRegister(IndexReg, IdxRegLoc, End))
+ return ErrorOperand(IdxRegLoc, "Expected register");
+ Scale = Val;
+ } else if (getLexer().is(AsmToken::RBrac)) {
+ const MCExpr *ValExpr = MCConstantExpr::Create(Val, getContext());
+ Disp = isPlus ? ValExpr : MCConstantExpr::Create(0-Val, getContext());
+ } else
+ return ErrorOperand(PlusLoc, "unexpected token after +");
+ } else if (getLexer().is(AsmToken::Identifier)) {
+ // This could be an index register or a displacement expression.
+ End = Parser.getTok().getLoc();
+ if (!IndexReg)
+ ParseRegister(IndexReg, Start, End);
+ else if (getParser().ParseExpression(Disp, End)) return 0;
+ }
+ }
+
+ if (getLexer().isNot(AsmToken::RBrac))
+ if (getParser().ParseExpression(Disp, End)) return 0;
+
+ End = Parser.getTok().getLoc();
+ if (getLexer().isNot(AsmToken::RBrac))
+ return ErrorOperand(End, "expected ']' token!");
+ Parser.Lex();
+ End = Parser.getTok().getLoc();
+
+ // handle [-42]
+ if (!BaseReg && !IndexReg)
+ return X86Operand::CreateMem(Disp, Start, End, Size);
+
+ return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
+ Start, End, Size);
+}
+
+/// ParseIntelMemOperand - Parse intel style memory operand.
+X86Operand *X86AsmParser::ParseIntelMemOperand() {
+ const AsmToken &Tok = Parser.getTok();
+ SMLoc Start = Parser.getTok().getLoc(), End;
+ unsigned SegReg = 0;
+
+ unsigned Size = getIntelMemOperandSize(Tok.getString());
+ if (Size) {
+ Parser.Lex();
+ assert (Tok.getString() == "PTR" && "Unexpected token!");
+ Parser.Lex();
+ }
+
+ if (getLexer().is(AsmToken::LBrac))
+ return ParseIntelBracExpression(SegReg, Size);
+
+ if (!ParseRegister(SegReg, Start, End)) {
+ // Handel SegReg : [ ... ]
+ if (getLexer().isNot(AsmToken::Colon))
+ return ErrorOperand(Start, "Expected ':' token!");
+ Parser.Lex(); // Eat :
+ if (getLexer().isNot(AsmToken::LBrac))
+ return ErrorOperand(Start, "Expected '[' token!");
+ return ParseIntelBracExpression(SegReg, Size);
+ }
+
+ const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
+ if (getParser().ParseExpression(Disp, End)) return 0;
+ return X86Operand::CreateMem(Disp, Start, End, Size);
+}
+
+X86Operand *X86AsmParser::ParseIntelOperand() {
+ SMLoc Start = Parser.getTok().getLoc(), End;
+
+ // immediate.
+ if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) ||
+ getLexer().is(AsmToken::Minus)) {
+ const MCExpr *Val;
+ if (!getParser().ParseExpression(Val, End)) {
+ End = Parser.getTok().getLoc();
+ return X86Operand::CreateImm(Val, Start, End);
+ }
+ }
+
+ // register
+ unsigned RegNo = 0;
+ if (!ParseRegister(RegNo, Start, End)) {
+ End = Parser.getTok().getLoc();
+ return X86Operand::CreateReg(RegNo, Start, End);
+ }
+
+ // mem operand
+ return ParseIntelMemOperand();
+}
+
+X86Operand *X86AsmParser::ParseATTOperand() {
switch (getLexer().getKind()) {
default:
// Parse a memory operand with no segment register.
@@ -497,7 +755,8 @@ X86Operand *X86ATTAsmParser::ParseOperand() {
SMLoc Start, End;
if (ParseRegister(RegNo, Start, End)) return 0;
if (RegNo == X86::EIZ || RegNo == X86::RIZ) {
- Error(Start, "%eiz and %riz can only be used as index registers");
+ Error(Start, "%eiz and %riz can only be used as index registers",
+ SMRange(Start, End));
return 0;
}
@@ -524,7 +783,7 @@ X86Operand *X86ATTAsmParser::ParseOperand() {
/// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix
/// has already been parsed if present.
-X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
+X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// We have to disambiguate a parenthesized expression "(4+5)" from the start
// of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
@@ -579,18 +838,21 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
// If we reached here, then we just ate the ( of the memory operand. Process
// the rest of the memory operand.
unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
+ SMLoc IndexLoc;
if (getLexer().is(AsmToken::Percent)) {
- SMLoc L;
- if (ParseRegister(BaseReg, L, L)) return 0;
+ SMLoc StartLoc, EndLoc;
+ if (ParseRegister(BaseReg, StartLoc, EndLoc)) return 0;
if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) {
- Error(L, "eiz and riz can only be used as index registers");
+ Error(StartLoc, "eiz and riz can only be used as index registers",
+ SMRange(StartLoc, EndLoc));
return 0;
}
}
if (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
+ IndexLoc = Parser.getTok().getLoc();
// Following the comma we should have either an index register, or a scale
// value. We don't support the later form, but we want to parse it
@@ -616,8 +878,10 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
SMLoc Loc = Parser.getTok().getLoc();
int64_t ScaleVal;
- if (getParser().ParseAbsoluteExpression(ScaleVal))
+ if (getParser().ParseAbsoluteExpression(ScaleVal)){
+ Error(Loc, "expected scale expression");
return 0;
+ }
// Validate the scale amount.
if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
@@ -650,11 +914,28 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
SMLoc MemEnd = Parser.getTok().getLoc();
Parser.Lex(); // Eat the ')'.
+ // If we have both a base register and an index register make sure they are
+ // both 64-bit or 32-bit registers.
+ if (BaseReg != 0 && IndexReg != 0) {
+ if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
+ !X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) &&
+ IndexReg != X86::RIZ) {
+ Error(IndexLoc, "index register is 32-bit, but base register is 64-bit");
+ return 0;
+ }
+ if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
+ !X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) &&
+ IndexReg != X86::EIZ){
+ Error(IndexLoc, "index register is 64-bit, but base register is 32-bit");
+ return 0;
+ }
+ }
+
return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
MemStart, MemEnd);
}
-bool X86ATTAsmParser::
+bool X86AsmParser::
ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
StringRef PatchedName = Name;
@@ -669,20 +950,21 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
- bool IsVCMP = PatchedName.startswith("vcmp");
+ bool IsVCMP = PatchedName[0] == 'v';
unsigned SSECCIdx = IsVCMP ? 4 : 3;
unsigned SSEComparisonCode = StringSwitch<unsigned>(
PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
- .Case("eq", 0)
- .Case("lt", 1)
- .Case("le", 2)
- .Case("unord", 3)
- .Case("neq", 4)
- .Case("nlt", 5)
- .Case("nle", 6)
- .Case("ord", 7)
- .Case("eq_uq", 8)
- .Case("nge", 9)
+ .Case("eq", 0x00)
+ .Case("lt", 0x01)
+ .Case("le", 0x02)
+ .Case("unord", 0x03)
+ .Case("neq", 0x04)
+ .Case("nlt", 0x05)
+ .Case("nle", 0x06)
+ .Case("ord", 0x07)
+ /* AVX only from here */
+ .Case("eq_uq", 0x08)
+ .Case("nge", 0x09)
.Case("ngt", 0x0A)
.Case("false", 0x0B)
.Case("neq_oq", 0x0C)
@@ -706,7 +988,7 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
.Case("gt_oq", 0x1E)
.Case("true_us", 0x1F)
.Default(~0U);
- if (SSEComparisonCode != ~0U) {
+ if (SSEComparisonCode != ~0U && (IsVCMP || SSEComparisonCode < 8)) {
ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
getParser().getContext());
if (PatchedName.endswith("ss")) {
@@ -724,10 +1006,9 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
- if (ExtraImmOp)
+ if (ExtraImmOp && !isParsingIntelSyntax())
Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
-
// Determine whether this is an instruction prefix.
bool isPrefix =
Name == "lock" || Name == "rep" ||
@@ -781,6 +1062,9 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
else if (isPrefix && getLexer().is(AsmToken::Slash))
Parser.Lex(); // Consume the prefix separator Slash
+ if (ExtraImmOp && isParsingIntelSyntax())
+ Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
+
// This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
@@ -916,11 +1200,21 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
Name.startswith("rcl") || Name.startswith("rcr") ||
Name.startswith("rol") || Name.startswith("ror")) &&
Operands.size() == 3) {
- X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
- if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
- cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
- delete Operands[1];
- Operands.erase(Operands.begin() + 1);
+ if (isParsingIntelSyntax()) {
+ // Intel syntax
+ X86Operand *Op1 = static_cast<X86Operand*>(Operands[2]);
+ if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[2];
+ Operands.pop_back();
+ }
+ } else {
+ X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
+ if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[1];
+ Operands.erase(Operands.begin() + 1);
+ }
}
}
@@ -939,7 +1233,246 @@ ParseInstruction(StringRef Name, SMLoc NameLoc,
return false;
}
-bool X86ATTAsmParser::
+bool X86AsmParser::
+processInstruction(MCInst &Inst,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Ops) {
+ switch (Inst.getOpcode()) {
+ default: return false;
+ case X86::AND16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::AND16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::AND32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::AND32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::AND64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::AND64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::XOR16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::XOR16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::XOR32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::XOR32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::XOR64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::XOR64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::OR16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::OR16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::OR32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::OR32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::OR64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::OR64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::CMP16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::CMP16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::CMP32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::CMP32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::CMP64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::CMP64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::ADD16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::ADD16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::ADD32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::ADD32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::ADD64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::ADD64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::SUB16i16: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::SUB16ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::AX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::SUB32i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::SUB32ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::EAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ case X86::SUB64i32: {
+ if (!Inst.getOperand(0).isImm() ||
+ !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
+ return false;
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::SUB64ri8);
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(MCOperand::CreateReg(X86::RAX));
+ TmpInst.addOperand(Inst.getOperand(0));
+ Inst = TmpInst;
+ return true;
+ }
+ }
+}
+
+bool X86AsmParser::
MatchAndEmitInstruction(SMLoc IDLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCStreamer &Out) {
@@ -957,6 +1490,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
Op->getToken() == "fstenv" || Op->getToken() == "fclex") {
MCInst Inst;
Inst.setOpcode(X86::WAIT);
+ Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
const char *Repl =
@@ -980,9 +1514,17 @@ MatchAndEmitInstruction(SMLoc IDLoc,
MCInst Inst;
// First, try a direct match.
- switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo)) {
+ switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo,
+ isParsingIntelSyntax())) {
default: break;
case Match_Success:
+ // Some instructions need post-processing to, for example, tweak which
+ // encoding is selected. Loop on it while changes happen so the
+ // individual transformations can chain off each other.
+ while (processInstruction(Inst, Operands))
+ ;
+
+ Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
return false;
case Match_MissingFeature:
@@ -1040,6 +1582,7 @@ MatchAndEmitInstruction(SMLoc IDLoc,
(Match1 == Match_Success) + (Match2 == Match_Success) +
(Match3 == Match_Success) + (Match4 == Match_Success);
if (NumSuccessfulMatches == 1) {
+ Inst.setLoc(IDLoc);
Out.EmitInstruction(Inst);
return false;
}
@@ -1078,21 +1621,24 @@ MatchAndEmitInstruction(SMLoc IDLoc,
if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) &&
(Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) {
if (!WasOriginallyInvalidOperand) {
- Error(IDLoc, "invalid instruction mnemonic '" + Base + "'");
- return true;
+ return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
+ Op->getLocRange());
}
// Recover location info for the operand if we know which was the problem.
- SMLoc ErrorLoc = IDLoc;
if (OrigErrorInfo != ~0U) {
if (OrigErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((X86Operand*)Operands[OrigErrorInfo])->getStartLoc();
- if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ X86Operand *Operand = (X86Operand*)Operands[OrigErrorInfo];
+ if (Operand->getStartLoc().isValid()) {
+ SMRange OperandRange = Operand->getLocRange();
+ return Error(Operand->getStartLoc(), "invalid operand for instruction",
+ OperandRange);
+ }
}
- return Error(ErrorLoc, "invalid operand for instruction");
+ return Error(IDLoc, "invalid operand for instruction");
}
// If one instruction matched with a missing feature, report this as a
@@ -1112,24 +1658,34 @@ MatchAndEmitInstruction(SMLoc IDLoc,
}
// If all of these were an outright failure, report it in a useless way.
- // FIXME: We should give nicer diagnostics about the exact failure.
Error(IDLoc, "unknown use of instruction mnemonic without a size suffix");
return true;
}
-bool X86ATTAsmParser::ParseDirective(AsmToken DirectiveID) {
+bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getIdentifier();
if (IDVal == ".word")
return ParseDirectiveWord(2, DirectiveID.getLoc());
else if (IDVal.startswith(".code"))
return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
+ else if (IDVal.startswith(".intel_syntax")) {
+ getParser().setAssemblerDialect(1);
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if(Parser.getTok().getString() == "noprefix") {
+ // FIXME : Handle noprefix
+ Parser.Lex();
+ } else
+ return true;
+ }
+ return false;
+ }
return true;
}
/// ParseDirectiveWord
/// ::= .word [ expression (, expression)* ]
-bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
+bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
@@ -1154,7 +1710,7 @@ bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
/// ParseDirectiveCode
/// ::= .code32 | .code64
-bool X86ATTAsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
+bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
if (IDVal == ".code32") {
Parser.Lex();
if (is64BitMode()) {
@@ -1179,8 +1735,8 @@ extern "C" void LLVMInitializeX86AsmLexer();
// Force static initialization.
extern "C" void LLVMInitializeX86AsmParser() {
- RegisterMCAsmParser<X86ATTAsmParser> X(TheX86_32Target);
- RegisterMCAsmParser<X86ATTAsmParser> Y(TheX86_64Target);
+ RegisterMCAsmParser<X86AsmParser> X(TheX86_32Target);
+ RegisterMCAsmParser<X86AsmParser> Y(TheX86_64Target);
LLVMInitializeX86AsmLexer();
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 3aacb20..8278bde 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -1,4 +1,4 @@
-//===- X86Disassembler.cpp - Disassembler for x86 and x86_64 ----*- C++ -*-===//
+//===-- X86Disassembler.cpp - Disassembler for x86 and x86_64 -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -18,9 +18,11 @@
#include "X86DisassemblerDecoder.h"
#include "llvm/MC/EDInstInfo.h"
-#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MemoryObject.h"
@@ -42,6 +44,11 @@ void x86DisassemblerDebug(const char *file,
dbgs() << file << ":" << line << ": " << s;
}
+const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii) {
+ const MCInstrInfo *MII = static_cast<const MCInstrInfo *>(mii);
+ return MII->getName(Opcode);
+}
+
#define debug(s) DEBUG(x86DisassemblerDebug(__FILE__, __LINE__, s));
namespace llvm {
@@ -65,17 +72,19 @@ extern Target TheX86_32Target, TheX86_64Target;
}
static bool translateInstruction(MCInst &target,
- InternalInstruction &source);
+ InternalInstruction &source,
+ const MCDisassembler *Dis);
-X86GenericDisassembler::X86GenericDisassembler(const MCSubtargetInfo &STI, DisassemblerMode mode) :
- MCDisassembler(STI),
- fMode(mode) {
-}
+X86GenericDisassembler::X86GenericDisassembler(const MCSubtargetInfo &STI,
+ DisassemblerMode mode,
+ const MCInstrInfo *MII)
+ : MCDisassembler(STI), MII(MII), fMode(mode) {}
X86GenericDisassembler::~X86GenericDisassembler() {
+ delete MII;
}
-EDInstInfo *X86GenericDisassembler::getEDInfo() const {
+const EDInstInfo *X86GenericDisassembler::getEDInfo() const {
return instInfoX86;
}
@@ -116,6 +125,8 @@ X86GenericDisassembler::getInstruction(MCInst &instr,
uint64_t address,
raw_ostream &vStream,
raw_ostream &cStream) const {
+ CommentStream = &cStream;
+
InternalInstruction internalInstr;
dlog_t loggerFn = logger;
@@ -127,6 +138,7 @@ X86GenericDisassembler::getInstruction(MCInst &instr,
(void*)&region,
loggerFn,
(void*)&vStream,
+ (void*)MII,
address,
fMode);
@@ -136,7 +148,8 @@ X86GenericDisassembler::getInstruction(MCInst &instr,
}
else {
size = internalInstr.length;
- return (!translateInstruction(instr, internalInstr)) ? Success : Fail;
+ return (!translateInstruction(instr, internalInstr, this)) ?
+ Success : Fail;
}
}
@@ -161,6 +174,140 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
mcInst.addOperand(MCOperand::CreateReg(llvmRegnum));
}
+/// tryAddingSymbolicOperand - trys to add a symbolic operand in place of the
+/// immediate Value in the MCInst.
+///
+/// @param Value - The immediate Value, has had any PC adjustment made by
+/// the caller.
+/// @param isBranch - If the instruction is a branch instruction
+/// @param Address - The starting address of the instruction
+/// @param Offset - The byte offset to this immediate in the instruction
+/// @param Width - The byte width of this immediate in the instruction
+///
+/// If the getOpInfo() function was set when setupForSymbolicDisassembly() was
+/// called then that function is called to get any symbolic information for the
+/// immediate in the instruction using the Address, Offset and Width. If that
+/// returns non-zero then the symbolic information it returns is used to create
+/// an MCExpr and that is added as an operand to the MCInst. If getOpInfo()
+/// returns zero and isBranch is true then a symbol look up for immediate Value
+/// is done and if a symbol is found an MCExpr is created with that, else
+/// an MCExpr with the immediate Value is created. This function returns true
+/// if it adds an operand to the MCInst and false otherwise.
+static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Width, MCInst &MI,
+ const MCDisassembler *Dis) {
+ LLVMOpInfoCallback getOpInfo = Dis->getLLVMOpInfoCallback();
+ struct LLVMOpInfo1 SymbolicOp;
+ memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
+ SymbolicOp.Value = Value;
+ void *DisInfo = Dis->getDisInfoBlock();
+
+ if (!getOpInfo ||
+ !getOpInfo(DisInfo, Address, Offset, Width, 1, &SymbolicOp)) {
+ // Clear SymbolicOp.Value from above and also all other fields.
+ memset(&SymbolicOp, '\0', sizeof(struct LLVMOpInfo1));
+ LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
+ if (!SymbolLookUp)
+ return false;
+ uint64_t ReferenceType;
+ if (isBranch)
+ ReferenceType = LLVMDisassembler_ReferenceType_In_Branch;
+ else
+ ReferenceType = LLVMDisassembler_ReferenceType_InOut_None;
+ const char *ReferenceName;
+ const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address,
+ &ReferenceName);
+ if (Name) {
+ SymbolicOp.AddSymbol.Name = Name;
+ SymbolicOp.AddSymbol.Present = true;
+ }
+ // For branches always create an MCExpr so it gets printed as hex address.
+ else if (isBranch) {
+ SymbolicOp.Value = Value;
+ }
+ if(ReferenceType == LLVMDisassembler_ReferenceType_Out_SymbolStub)
+ (*Dis->CommentStream) << "symbol stub for: " << ReferenceName;
+ if (!Name && !isBranch)
+ return false;
+ }
+
+ MCContext *Ctx = Dis->getMCContext();
+ const MCExpr *Add = NULL;
+ if (SymbolicOp.AddSymbol.Present) {
+ if (SymbolicOp.AddSymbol.Name) {
+ StringRef Name(SymbolicOp.AddSymbol.Name);
+ MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
+ Add = MCSymbolRefExpr::Create(Sym, *Ctx);
+ } else {
+ Add = MCConstantExpr::Create((int)SymbolicOp.AddSymbol.Value, *Ctx);
+ }
+ }
+
+ const MCExpr *Sub = NULL;
+ if (SymbolicOp.SubtractSymbol.Present) {
+ if (SymbolicOp.SubtractSymbol.Name) {
+ StringRef Name(SymbolicOp.SubtractSymbol.Name);
+ MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
+ Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
+ } else {
+ Sub = MCConstantExpr::Create((int)SymbolicOp.SubtractSymbol.Value, *Ctx);
+ }
+ }
+
+ const MCExpr *Off = NULL;
+ if (SymbolicOp.Value != 0)
+ Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
+
+ const MCExpr *Expr;
+ if (Sub) {
+ const MCExpr *LHS;
+ if (Add)
+ LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
+ else
+ LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
+ if (Off != 0)
+ Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
+ else
+ Expr = LHS;
+ } else if (Add) {
+ if (Off != 0)
+ Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
+ else
+ Expr = Add;
+ } else {
+ if (Off != 0)
+ Expr = Off;
+ else
+ Expr = MCConstantExpr::Create(0, *Ctx);
+ }
+
+ MI.addOperand(MCOperand::CreateExpr(Expr));
+
+ return true;
+}
+
+/// tryAddingPcLoadReferenceComment - trys to add a comment as to what is being
+/// referenced by a load instruction with the base register that is the rip.
+/// These can often be addresses in a literal pool. The Address of the
+/// instruction and its immediate Value are used to determine the address
+/// being referenced in the literal pool entry. The SymbolLookUp call back will
+/// return a pointer to a literal 'C' string if the referenced address is an
+/// address into a section with 'C' string literals.
+static void tryAddingPcLoadReferenceComment(uint64_t Address, uint64_t Value,
+ const void *Decoder) {
+ const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
+ LLVMSymbolLookupCallback SymbolLookUp = Dis->getLLVMSymbolLookupCallback();
+ if (SymbolLookUp) {
+ void *DisInfo = Dis->getDisInfoBlock();
+ uint64_t ReferenceType = LLVMDisassembler_ReferenceType_In_PCrel_Load;
+ const char *ReferenceName;
+ (void)SymbolLookUp(DisInfo, Value, &ReferenceType, Address, &ReferenceName);
+ if(ReferenceType == LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr)
+ (*Dis->CommentStream) << "literal pool for: " << ReferenceName;
+ }
+}
+
/// translateImmediate - Appends an immediate operand to an MCInst.
///
/// @param mcInst - The MCInst to append to.
@@ -169,10 +316,11 @@ static void translateRegister(MCInst &mcInst, Reg reg) {
/// @param insn - The internal instruction.
static void translateImmediate(MCInst &mcInst, uint64_t immediate,
const OperandSpecifier &operand,
- InternalInstruction &insn) {
+ InternalInstruction &insn,
+ const MCDisassembler *Dis) {
// Sign-extend the immediate if necessary.
- OperandType type = operand.type;
+ OperandType type = (OperandType)operand.type;
if (type == TYPE_RELv) {
switch (insn.displacementSize) {
@@ -225,6 +373,8 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
}
}
+ bool isBranch = false;
+ uint64_t pcrel = 0;
switch (type) {
case TYPE_XMM128:
mcInst.addOperand(MCOperand::CreateReg(X86::XMM0 + (immediate >> 4)));
@@ -232,8 +382,11 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
case TYPE_XMM256:
mcInst.addOperand(MCOperand::CreateReg(X86::YMM0 + (immediate >> 4)));
return;
- case TYPE_MOFFS8:
case TYPE_REL8:
+ isBranch = true;
+ pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize;
+ // fall through to sign extend the immediate if needed.
+ case TYPE_MOFFS8:
if(immediate & 0x80)
immediate |= ~(0xffull);
break;
@@ -241,9 +394,12 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
if(immediate & 0x8000)
immediate |= ~(0xffffull);
break;
- case TYPE_MOFFS32:
case TYPE_REL32:
case TYPE_REL64:
+ isBranch = true;
+ pcrel = insn.startLocation + insn.immediateOffset + insn.immediateSize;
+ // fall through to sign extend the immediate if needed.
+ case TYPE_MOFFS32:
if(immediate & 0x80000000)
immediate |= ~(0xffffffffull);
break;
@@ -253,7 +409,10 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate,
break;
}
- mcInst.addOperand(MCOperand::CreateImm(immediate));
+ if(!tryAddingSymbolicOperand(immediate + pcrel, isBranch, insn.startLocation,
+ insn.immediateOffset, insn.immediateSize,
+ mcInst, Dis))
+ mcInst.addOperand(MCOperand::CreateImm(immediate));
}
/// translateRMRegister - Translates a register stored in the R/M field of the
@@ -300,7 +459,8 @@ static bool translateRMRegister(MCInst &mcInst,
/// @param insn - The instruction to extract Mod, R/M, and SIB fields
/// from.
/// @return - 0 on success; nonzero otherwise
-static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
+static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn,
+ const MCDisassembler *Dis) {
// Addresses in an MCInst are represented as five operands:
// 1. basereg (register) The R/M base, or (if there is a SIB) the
// SIB base
@@ -318,6 +478,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
MCOperand indexReg;
MCOperand displacement;
MCOperand segmentReg;
+ uint64_t pcrel = 0;
if (insn.eaBase == EA_BASE_sib || insn.eaBase == EA_BASE_sib64) {
if (insn.sibBase != SIB_BASE_NONE) {
@@ -359,8 +520,14 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
debug("EA_BASE_NONE and EA_DISP_NONE for ModR/M base");
return true;
}
- if (insn.mode == MODE_64BIT)
+ if (insn.mode == MODE_64BIT){
+ pcrel = insn.startLocation +
+ insn.displacementOffset + insn.displacementSize;
+ tryAddingPcLoadReferenceComment(insn.startLocation +
+ insn.displacementOffset,
+ insn.displacement + pcrel, Dis);
baseReg = MCOperand::CreateReg(X86::RIP); // Section 2.2.1.6
+ }
else
baseReg = MCOperand::CreateReg(0);
@@ -426,7 +593,10 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
mcInst.addOperand(baseReg);
mcInst.addOperand(scaleAmount);
mcInst.addOperand(indexReg);
- mcInst.addOperand(displacement);
+ if(!tryAddingSymbolicOperand(insn.displacement + pcrel, false,
+ insn.startLocation, insn.displacementOffset,
+ insn.displacementSize, mcInst, Dis))
+ mcInst.addOperand(displacement);
mcInst.addOperand(segmentReg);
return false;
}
@@ -440,7 +610,7 @@ static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
/// from.
/// @return - 0 on success; nonzero otherwise
static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
- InternalInstruction &insn) {
+ InternalInstruction &insn, const MCDisassembler *Dis) {
switch (operand.type) {
default:
debug("Unexpected type for a R/M operand");
@@ -480,7 +650,7 @@ static bool translateRM(MCInst &mcInst, const OperandSpecifier &operand,
case TYPE_M1632:
case TYPE_M1664:
case TYPE_LEA:
- return translateRMMemory(mcInst, insn);
+ return translateRMMemory(mcInst, insn, Dis);
}
}
@@ -510,7 +680,8 @@ static bool translateFPRegister(MCInst &mcInst,
/// @param insn - The internal instruction.
/// @return - false on success; true otherwise.
static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
- InternalInstruction &insn) {
+ InternalInstruction &insn,
+ const MCDisassembler *Dis) {
switch (operand.encoding) {
default:
debug("Unhandled operand encoding during translation");
@@ -519,7 +690,7 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
translateRegister(mcInst, insn.reg);
return false;
case ENCODING_RM:
- return translateRM(mcInst, operand, insn);
+ return translateRM(mcInst, operand, insn, Dis);
case ENCODING_CB:
case ENCODING_CW:
case ENCODING_CD:
@@ -537,7 +708,8 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
translateImmediate(mcInst,
insn.immediates[insn.numImmediatesTranslated++],
operand,
- insn);
+ insn,
+ Dis);
return false;
case ENCODING_RB:
case ENCODING_RW:
@@ -556,7 +728,7 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
case ENCODING_DUP:
return translateOperand(mcInst,
insn.spec->operands[operand.type - TYPE_DUP0],
- insn);
+ insn, Dis);
}
}
@@ -567,7 +739,8 @@ static bool translateOperand(MCInst &mcInst, const OperandSpecifier &operand,
/// @param insn - The internal instruction.
/// @return - false on success; true otherwise.
static bool translateInstruction(MCInst &mcInst,
- InternalInstruction &insn) {
+ InternalInstruction &insn,
+ const MCDisassembler *Dis) {
if (!insn.spec) {
debug("Instruction has no specification");
return true;
@@ -581,7 +754,7 @@ static bool translateInstruction(MCInst &mcInst,
for (index = 0; index < X86_MAX_OPERANDS; ++index) {
if (insn.spec->operands[index].encoding != ENCODING_NONE) {
- if (translateOperand(mcInst, insn.spec->operands[index], insn)) {
+ if (translateOperand(mcInst, insn.spec->operands[index], insn, Dis)) {
return true;
}
}
@@ -590,12 +763,16 @@ static bool translateInstruction(MCInst &mcInst,
return false;
}
-static MCDisassembler *createX86_32Disassembler(const Target &T, const MCSubtargetInfo &STI) {
- return new X86Disassembler::X86_32Disassembler(STI);
+static MCDisassembler *createX86_32Disassembler(const Target &T,
+ const MCSubtargetInfo &STI) {
+ return new X86Disassembler::X86GenericDisassembler(STI, MODE_32BIT,
+ T.createMCInstrInfo());
}
-static MCDisassembler *createX86_64Disassembler(const Target &T, const MCSubtargetInfo &STI) {
- return new X86Disassembler::X86_64Disassembler(STI);
+static MCDisassembler *createX86_64Disassembler(const Target &T,
+ const MCSubtargetInfo &STI) {
+ return new X86Disassembler::X86GenericDisassembler(STI, MODE_64BIT,
+ T.createMCInstrInfo());
}
extern "C" void LLVMInitializeX86Disassembler() {
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
index 6ac9a0f..c11f51c 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
@@ -1,4 +1,4 @@
-//===- X86Disassembler.h - Disassembler for x86 and x86_64 ------*- C++ -*-===//
+//===-- X86Disassembler.h - Disassembler for x86 and x86_64 -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -78,7 +78,7 @@
const char* name;
#define INSTRUCTION_IDS \
- const InstrUID *instructionIDs;
+ unsigned instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
@@ -87,11 +87,10 @@
#include "llvm/MC/MCDisassembler.h"
-struct InternalInstruction;
-
namespace llvm {
class MCInst;
+class MCInstrInfo;
class MCSubtargetInfo;
class MemoryObject;
class raw_ostream;
@@ -104,13 +103,16 @@ namespace X86Disassembler {
/// All each platform class should have to do is subclass the constructor, and
/// provide a different disassemblerMode value.
class X86GenericDisassembler : public MCDisassembler {
-protected:
+ const MCInstrInfo *MII;
+public:
/// Constructor - Initializes the disassembler.
///
/// @param mode - The X86 architecture mode to decode for.
- X86GenericDisassembler(const MCSubtargetInfo &STI, DisassemblerMode mode);
-public:
+ X86GenericDisassembler(const MCSubtargetInfo &STI, DisassemblerMode mode,
+ const MCInstrInfo *MII);
+private:
~X86GenericDisassembler();
+public:
/// getInstruction - See MCDisassembler.
DecodeStatus getInstruction(MCInst &instr,
@@ -121,37 +123,13 @@ public:
raw_ostream &cStream) const;
/// getEDInfo - See MCDisassembler.
- EDInstInfo *getEDInfo() const;
+ const EDInstInfo *getEDInfo() const;
private:
DisassemblerMode fMode;
};
-/// X86_16Disassembler - 16-bit X86 disassembler.
-class X86_16Disassembler : public X86GenericDisassembler {
-public:
- X86_16Disassembler(const MCSubtargetInfo &STI) :
- X86GenericDisassembler(STI, MODE_16BIT) {
- }
-};
-
-/// X86_16Disassembler - 32-bit X86 disassembler.
-class X86_32Disassembler : public X86GenericDisassembler {
-public:
- X86_32Disassembler(const MCSubtargetInfo &STI) :
- X86GenericDisassembler(STI, MODE_32BIT) {
- }
-};
-
-/// X86_16Disassembler - 64-bit X86 disassembler.
-class X86_64Disassembler : public X86GenericDisassembler {
-public:
- X86_64Disassembler(const MCSubtargetInfo &STI) :
- X86GenericDisassembler(STI, MODE_64BIT) {
- }
-};
-
} // namespace X86Disassembler
-
+
} // namespace llvm
-
+
#endif
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
index f9b0fe5..6020877 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
@@ -1,4 +1,4 @@
-/*===- X86DisassemblerDecoder.c - Disassembler decoder -------------*- C -*-==*
+/*===-- X86DisassemblerDecoder.c - Disassembler decoder ------------*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
@@ -82,11 +82,9 @@ static int modRMRequired(OpcodeType type,
decision = &THREEBYTEA7_SYM;
break;
}
-
+
return decision->opcodeDecisions[insnContext].modRMDecisions[opcode].
modrm_type != MODRM_ONEENTRY;
-
- return 0;
}
/*
@@ -103,12 +101,9 @@ static InstrUID decode(OpcodeType type,
InstructionContext insnContext,
uint8_t opcode,
uint8_t modRM) {
- const struct ModRMDecision* dec;
+ const struct ModRMDecision* dec = 0;
switch (type) {
- default:
- debug("Unknown opcode type");
- return 0;
case ONEBYTE:
dec = &ONEBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
break;
@@ -134,14 +129,17 @@ static InstrUID decode(OpcodeType type,
debug("Corrupt table! Unknown modrm_type");
return 0;
case MODRM_ONEENTRY:
- return dec->instructionIDs[0];
+ return modRMTable[dec->instructionIDs];
case MODRM_SPLITRM:
if (modFromModRM(modRM) == 0x3)
- return dec->instructionIDs[1];
- else
- return dec->instructionIDs[0];
+ return modRMTable[dec->instructionIDs+1];
+ return modRMTable[dec->instructionIDs];
+ case MODRM_SPLITREG:
+ if (modFromModRM(modRM) == 0x3)
+ return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)+8];
+ return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)];
case MODRM_FULL:
- return dec->instructionIDs[modRM];
+ return modRMTable[dec->instructionIDs+modRM];
}
}
@@ -314,6 +312,15 @@ static int readPrefixes(struct InternalInstruction* insn) {
if (consumeByte(insn, &byte))
return -1;
+
+ /*
+ * If the first byte is a LOCK prefix break and let it be disassembled
+ * as a lock "instruction", by creating an <MCInst #xxxx LOCK_PREFIX>.
+ * FIXME there is currently no way to get the disassembler to print the
+ * lock prefix if it is not the first byte.
+ */
+ if (insn->readerCursor - 1 == insn->startLocation && byte == 0xf0)
+ break;
switch (byte) {
case 0xf0: /* LOCK */
@@ -712,7 +719,7 @@ static BOOL is16BitEquvalent(const char* orig, const char* equiv) {
* @return - 0 if the ModR/M could be read when needed or was not needed;
* nonzero otherwise.
*/
-static int getID(struct InternalInstruction* insn) {
+static int getID(struct InternalInstruction* insn, void *miiArg) {
uint8_t attrMask;
uint16_t instructionID;
@@ -765,6 +772,8 @@ static int getID(struct InternalInstruction* insn) {
else {
if (isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation))
attrMask |= ATTR_OPSIZE;
+ else if (isPrefixAtLocation(insn, 0x67, insn->necessaryPrefixLocation))
+ attrMask |= ATTR_ADSIZE;
else if (isPrefixAtLocation(insn, 0xf3, insn->necessaryPrefixLocation))
attrMask |= ATTR_XS;
else if (isPrefixAtLocation(insn, 0xf2, insn->necessaryPrefixLocation))
@@ -773,17 +782,20 @@ static int getID(struct InternalInstruction* insn) {
if (insn->rexPrefix & 0x08)
attrMask |= ATTR_REXW;
-
+
if (getIDWithAttrMask(&instructionID, insn, attrMask))
return -1;
-
+
/* The following clauses compensate for limitations of the tables. */
-
- if ((attrMask & ATTR_VEXL) && (attrMask & ATTR_REXW)) {
+
+ if ((attrMask & ATTR_VEXL) && (attrMask & ATTR_REXW) &&
+ !(attrMask & ATTR_OPSIZE)) {
/*
* Some VEX instructions ignore the L-bit, but use the W-bit. Normally L-bit
* has precedence since there are no L-bit with W-bit entries in the tables.
* So if the L-bit isn't significant we should use the W-bit instead.
+ * We only need to do this if the instruction doesn't specify OpSize since
+ * there is a VEX_L_W_OPSIZE table.
*/
const struct InstructionSpecifier *spec;
@@ -823,7 +835,7 @@ static int getID(struct InternalInstruction* insn) {
const struct InstructionSpecifier *spec;
uint16_t instructionIDWithOpsize;
- const struct InstructionSpecifier *specWithOpsize;
+ const char *specName, *specWithOpSizeName;
spec = specifierForUID(instructionID);
@@ -840,11 +852,13 @@ static int getID(struct InternalInstruction* insn) {
return 0;
}
- specWithOpsize = specifierForUID(instructionIDWithOpsize);
-
- if (is16BitEquvalent(spec->name, specWithOpsize->name)) {
+ specName = x86DisassemblerGetInstrName(instructionID, miiArg);
+ specWithOpSizeName =
+ x86DisassemblerGetInstrName(instructionIDWithOpsize, miiArg);
+
+ if (is16BitEquvalent(specName, specWithOpSizeName)) {
insn->instructionID = instructionIDWithOpsize;
- insn->spec = specWithOpsize;
+ insn->spec = specifierForUID(instructionIDWithOpsize);
} else {
insn->instructionID = instructionID;
insn->spec = spec;
@@ -1011,6 +1025,7 @@ static int readDisplacement(struct InternalInstruction* insn) {
return 0;
insn->consumedDisplacement = TRUE;
+ insn->displacementOffset = insn->readerCursor - insn->startLocation;
switch (insn->eaDisplacement) {
case EA_DISP_NONE:
@@ -1407,6 +1422,7 @@ static int readImmediate(struct InternalInstruction* insn, uint8_t size) {
size = insn->immediateSize;
else
insn->immediateSize = size;
+ insn->immediateOffset = insn->readerCursor - insn->startLocation;
switch (size) {
case 1:
@@ -1469,6 +1485,7 @@ static int readVVVV(struct InternalInstruction* insn) {
static int readOperands(struct InternalInstruction* insn) {
int index;
int hasVVVV, needVVVV;
+ int sawRegImm = 0;
dbgprintf(insn, "readOperands()");
@@ -1497,11 +1514,25 @@ static int readOperands(struct InternalInstruction* insn) {
dbgprintf(insn, "We currently don't hande code-offset encodings");
return -1;
case ENCODING_IB:
+ if (sawRegImm) {
+ /* Saw a register immediate so don't read again and instead split the
+ previous immediate. FIXME: This is a hack. */
+ insn->immediates[insn->numImmediatesConsumed] =
+ insn->immediates[insn->numImmediatesConsumed - 1] & 0xf;
+ ++insn->numImmediatesConsumed;
+ break;
+ }
if (readImmediate(insn, 1))
return -1;
if (insn->spec->operands[index].type == TYPE_IMM3 &&
insn->immediates[insn->numImmediatesConsumed - 1] > 7)
return -1;
+ if (insn->spec->operands[index].type == TYPE_IMM5 &&
+ insn->immediates[insn->numImmediatesConsumed - 1] > 31)
+ return -1;
+ if (insn->spec->operands[index].type == TYPE_XMM128 ||
+ insn->spec->operands[index].type == TYPE_XMM256)
+ sawRegImm = 1;
break;
case ENCODING_IW:
if (readImmediate(insn, 2))
@@ -1593,6 +1624,7 @@ int decodeInstruction(struct InternalInstruction* insn,
void* readerArg,
dlog_t logger,
void* loggerArg,
+ void* miiArg,
uint64_t startLoc,
DisassemblerMode mode) {
memset(insn, 0, sizeof(struct InternalInstruction));
@@ -1608,7 +1640,7 @@ int decodeInstruction(struct InternalInstruction* insn,
if (readPrefixes(insn) ||
readOpcode(insn) ||
- getID(insn) ||
+ getID(insn, miiArg) ||
insn->instructionID == 0 ||
readOperands(insn))
return -1;
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index a9c90f8..fae309b 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -1,4 +1,4 @@
-/*===- X86DisassemblerDecoderInternal.h - Disassembler decoder -----*- C -*-==*
+/*===-- X86DisassemblerDecoderInternal.h - Disassembler decoder ---*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
@@ -20,11 +20,10 @@
extern "C" {
#endif
-#define INSTRUCTION_SPECIFIER_FIELDS \
- const char* name;
+#define INSTRUCTION_SPECIFIER_FIELDS
#define INSTRUCTION_IDS \
- const InstrUID *instructionIDs;
+ unsigned instructionIDs;
#include "X86DisassemblerDecoderCommon.h"
@@ -460,6 +459,11 @@ struct InternalInstruction {
uint8_t addressSize;
uint8_t displacementSize;
uint8_t immediateSize;
+
+ /* Offsets from the start of the instruction to the pieces of data, which is
+ needed to find relocation entries for adding symbolic operands */
+ uint8_t displacementOffset;
+ uint8_t immediateOffset;
/* opcode state */
@@ -554,6 +558,7 @@ int decodeInstruction(struct InternalInstruction* insn,
void* readerArg,
dlog_t logger,
void* loggerArg,
+ void* miiArg,
uint64_t startLoc,
DisassemblerMode mode);
@@ -568,6 +573,8 @@ void x86DisassemblerDebug(const char *file,
unsigned line,
const char *s);
+const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii);
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
index 8b79335..13e1136 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
@@ -1,4 +1,4 @@
-/*===- X86DisassemblerDecoderCommon.h - Disassembler decoder -------*- C -*-==*
+/*===-- X86DisassemblerDecoderCommon.h - Disassembler decoder -----*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
@@ -54,8 +54,9 @@
ENUM_ENTRY(ATTR_XD, 0x04) \
ENUM_ENTRY(ATTR_REXW, 0x08) \
ENUM_ENTRY(ATTR_OPSIZE, 0x10) \
- ENUM_ENTRY(ATTR_VEX, 0x20) \
- ENUM_ENTRY(ATTR_VEXL, 0x40)
+ ENUM_ENTRY(ATTR_ADSIZE, 0x20) \
+ ENUM_ENTRY(ATTR_VEX, 0x40) \
+ ENUM_ENTRY(ATTR_VEXL, 0x80)
#define ENUM_ENTRY(n, v) n = v,
enum attributeBits {
@@ -77,6 +78,8 @@ enum attributeBits {
"64-bit mode but no more") \
ENUM_ENTRY(IC_OPSIZE, 3, "requires an OPSIZE prefix, so " \
"operands change width") \
+ ENUM_ENTRY(IC_ADSIZE, 3, "requires an ADSIZE prefix, so " \
+ "operands change width") \
ENUM_ENTRY(IC_XD, 2, "may say something about the opcode " \
"but not the operands") \
ENUM_ENTRY(IC_XS, 2, "may say something about the opcode " \
@@ -88,6 +91,7 @@ enum attributeBits {
ENUM_ENTRY(IC_64BIT_REXW, 4, "requires a REX.W prefix, so operands "\
"change width; overrides IC_OPSIZE") \
ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE") \
+ ENUM_ENTRY(IC_64BIT_ADSIZE, 3, "Just as meaningful as IC_ADSIZE") \
ENUM_ENTRY(IC_64BIT_XD, 5, "XD instructions are SSE; REX.W is " \
"secondary") \
ENUM_ENTRY(IC_64BIT_XS, 5, "Just as meaningful as IC_64BIT_XD") \
@@ -111,7 +115,8 @@ enum attributeBits {
ENUM_ENTRY(IC_VEX_L, 3, "requires VEX and the L prefix") \
ENUM_ENTRY(IC_VEX_L_XS, 4, "requires VEX and the L and XS prefix")\
ENUM_ENTRY(IC_VEX_L_XD, 4, "requires VEX and the L and XD prefix")\
- ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize")
+ ENUM_ENTRY(IC_VEX_L_OPSIZE, 4, "requires VEX, L, and OpSize") \
+ ENUM_ENTRY(IC_VEX_L_W_OPSIZE, 5, "requires VEX, L, W and OpSize")
#define ENUM_ENTRY(n, r, d) n,
@@ -155,6 +160,8 @@ typedef uint16_t InstrUID;
* MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode
* corresponds to one instruction; otherwise, it corresponds to
* a different instruction.
+ * MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
+ corresponds to instructions that use reg field as opcode
* MODRM_FULL - Potentially, each value of the ModR/M byte could correspond
* to a different instruction.
*/
@@ -162,6 +169,7 @@ typedef uint16_t InstrUID;
#define MODRMTYPES \
ENUM_ENTRY(MODRM_ONEENTRY) \
ENUM_ENTRY(MODRM_SPLITRM) \
+ ENUM_ENTRY(MODRM_SPLITREG) \
ENUM_ENTRY(MODRM_FULL)
#define ENUM_ENTRY(n) n,
@@ -265,6 +273,7 @@ struct ContextDecision {
ENUM_ENTRY(TYPE_IMM32, "4-byte") \
ENUM_ENTRY(TYPE_IMM64, "8-byte") \
ENUM_ENTRY(TYPE_IMM3, "1-byte immediate operand between 0 and 7") \
+ ENUM_ENTRY(TYPE_IMM5, "1-byte immediate operand between 0 and 31") \
ENUM_ENTRY(TYPE_RM8, "1-byte register or memory operand") \
ENUM_ENTRY(TYPE_RM16, "2-byte") \
ENUM_ENTRY(TYPE_RM32, "4-byte") \
@@ -335,8 +344,8 @@ typedef enum {
* operand.
*/
struct OperandSpecifier {
- OperandEncoding encoding;
- OperandType type;
+ uint8_t encoding;
+ uint8_t type;
};
/*
@@ -363,7 +372,7 @@ typedef enum {
* its operands.
*/
struct InstructionSpecifier {
- ModifierType modifierType;
+ uint8_t modifierType;
uint8_t modifierBase;
struct OperandSpecifier operands[X86_MAX_OPERANDS];
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
index 029d491..5118e4c 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
@@ -19,6 +19,8 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormattedStream.h"
@@ -26,14 +28,9 @@
using namespace llvm;
// Include the auto-generated portion of the assembly writer.
-#define GET_INSTRUCTION_NAME
#define PRINT_ALIAS_INSTR
#include "X86GenAsmWriter.inc"
-X86ATTInstPrinter::X86ATTInstPrinter(const MCAsmInfo &MAI)
- : MCInstPrinter(MAI) {
-}
-
void X86ATTInstPrinter::printRegName(raw_ostream &OS,
unsigned RegNo) const {
OS << '%' << getRegisterName(RegNo);
@@ -45,29 +42,50 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
if (!printAliasInstr(MI, OS))
printInstruction(MI, OS);
+ // Next always print the annotation.
+ printAnnotation(OS, Annot);
+
// If verbose assembly is enabled, we can print some informative comments.
- if (CommentStream) {
- printAnnotation(OS, Annot);
+ if (CommentStream)
EmitAnyX86InstComments(MI, *CommentStream, getRegisterName);
- }
-}
-
-StringRef X86ATTInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
}
void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
raw_ostream &O) {
switch (MI->getOperand(Op).getImm()) {
- default: assert(0 && "Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
+ default: llvm_unreachable("Invalid ssecc argument!");
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ case 8: O << "eq_uq"; break;
+ case 9: O << "nge"; break;
+ case 0xa: O << "ngt"; break;
+ case 0xb: O << "false"; break;
+ case 0xc: O << "neq_oq"; break;
+ case 0xd: O << "ge"; break;
+ case 0xe: O << "gt"; break;
+ case 0xf: O << "true"; break;
+ case 0x10: O << "eq_os"; break;
+ case 0x11: O << "lt_oq"; break;
+ case 0x12: O << "le_oq"; break;
+ case 0x13: O << "unord_s"; break;
+ case 0x14: O << "neq_us"; break;
+ case 0x15: O << "nlt_uq"; break;
+ case 0x16: O << "nle_uq"; break;
+ case 0x17: O << "ord_s"; break;
+ case 0x18: O << "eq_us"; break;
+ case 0x19: O << "nge_uq"; break;
+ case 0x1a: O << "ngt_uq"; break;
+ case 0x1b: O << "false_os"; break;
+ case 0x1c: O << "neq_os"; break;
+ case 0x1d: O << "ge_oq"; break;
+ case 0x1e: O << "gt_oq"; break;
+ case 0x1f: O << "true_us"; break;
}
}
@@ -79,11 +97,21 @@ void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
if (Op.isImm())
- // Print this as a signed 32-bit value.
- O << (int)Op.getImm();
+ O << Op.getImm();
else {
assert(Op.isExpr() && "unknown pcrel immediate operand");
- O << *Op.getExpr();
+ // If a symbolic branch target was added as a constant expression then print
+ // that address in hex.
+ const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
+ int64_t Address;
+ if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
+ O << "0x";
+ O.write_hex(Address);
+ }
+ else {
+ // Otherwise, just print the expression.
+ O << *Op.getExpr();
+ }
}
}
@@ -97,7 +125,7 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
O << '$' << (int64_t)Op.getImm();
if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256))
- *CommentStream << format("imm = 0x%llX\n", (long long)Op.getImm());
+ *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm());
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index 0293869..2e00bff 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- X86ATTInstPrinter.h - Convert X86 MCInst to assembly syntax -------===//
+//==- X86ATTInstPrinter.h - Convert X86 MCInst to assembly syntax -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -22,11 +22,12 @@ class MCOperand;
class X86ATTInstPrinter : public MCInstPrinter {
public:
- X86ATTInstPrinter(const MCAsmInfo &MAI);
-
+ X86ATTInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
// Autogenerated by tblgen, returns true if we successfully printed an
// alias.
@@ -35,7 +36,6 @@ public:
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &OS);
static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
index 8d85b95..f532019 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.cpp
@@ -29,11 +29,17 @@ using namespace llvm;
void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
const char *(*getRegName)(unsigned)) {
// If this is a shuffle operation, the switch should fill in this state.
- SmallVector<unsigned, 8> ShuffleMask;
+ SmallVector<int, 8> ShuffleMask;
const char *DestName = 0, *Src1Name = 0, *Src2Name = 0;
switch (MI->getOpcode()) {
case X86::INSERTPSrr:
+ Src1Name = getRegName(MI->getOperand(0).getReg());
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
+ break;
+ case X86::VINSERTPSrr:
+ DestName = getRegName(MI->getOperand(0).getReg());
Src1Name = getRegName(MI->getOperand(1).getReg());
Src2Name = getRegName(MI->getOperand(2).getReg());
DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask);
@@ -44,34 +50,61 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
Src1Name = getRegName(MI->getOperand(0).getReg());
DecodeMOVLHPSMask(2, ShuffleMask);
break;
+ case X86::VMOVLHPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVLHPSMask(2, ShuffleMask);
+ break;
case X86::MOVHLPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
Src1Name = getRegName(MI->getOperand(0).getReg());
DecodeMOVHLPSMask(2, ShuffleMask);
break;
+ case X86::VMOVHLPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeMOVHLPSMask(2, ShuffleMask);
+ break;
case X86::PSHUFDri:
+ case X86::VPSHUFDri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFDmi:
+ case X86::VPSHUFDmi:
DestName = getRegName(MI->getOperand(0).getReg());
- DecodePSHUFMask(4, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ DecodePSHUFMask(MVT::v4i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ break;
+ case X86::VPSHUFDYri:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPSHUFDYmi:
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodePSHUFMask(MVT::v8i32, MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
+
case X86::PSHUFHWri:
+ case X86::VPSHUFHWri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFHWmi:
+ case X86::VPSHUFHWmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFHWMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
break;
case X86::PSHUFLWri:
+ case X86::VPSHUFLWri:
Src1Name = getRegName(MI->getOperand(1).getReg());
// FALL THROUGH.
case X86::PSHUFLWmi:
+ case X86::VPSHUFLWmi:
DestName = getRegName(MI->getOperand(0).getReg());
DecodePSHUFLWMask(MI->getOperand(MI->getNumOperands()-1).getImm(),
ShuffleMask);
@@ -82,28 +115,92 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::PUNPCKHBWrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKHMask(16, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKHBWrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHBWrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKHBWYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHBWYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v32i8, ShuffleMask);
break;
case X86::PUNPCKHWDrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKHWDrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKHMask(8, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKHWDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHWDrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKHWDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHWDYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v16i16, ShuffleMask);
break;
case X86::PUNPCKHDQrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKHDQrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKHMask(4, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKHDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKHDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v8i32, ShuffleMask);
break;
case X86::PUNPCKHQDQrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKHQDQrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKHMask(2, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKHQDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHQDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKHQDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKHQDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKHMask(MVT::v4i64, ShuffleMask);
break;
case X86::PUNPCKLBWrr:
@@ -111,126 +208,284 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// FALL THROUGH.
case X86::PUNPCKLBWrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKLBWMask(16, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKLBWrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLBWrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v16i8, ShuffleMask);
+ break;
+ case X86::VPUNPCKLBWYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLBWYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v32i8, ShuffleMask);
break;
case X86::PUNPCKLWDrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKLWDrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKLWDMask(8, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKLWDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLWDrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v8i16, ShuffleMask);
+ break;
+ case X86::VPUNPCKLWDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLWDYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v16i16, ShuffleMask);
break;
case X86::PUNPCKLDQrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKLDQrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKLDQMask(4, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKLDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v4i32, ShuffleMask);
+ break;
+ case X86::VPUNPCKLDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v8i32, ShuffleMask);
break;
case X86::PUNPCKLQDQrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::PUNPCKLQDQrm:
Src1Name = getRegName(MI->getOperand(0).getReg());
- DecodePUNPCKLQDQMask(2, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKLQDQrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLQDQrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v2i64, ShuffleMask);
+ break;
+ case X86::VPUNPCKLQDQYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPUNPCKLQDQYrm:
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ DecodeUNPCKLMask(MVT::v4i64, ShuffleMask);
break;
case X86::SHUFPDrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::SHUFPDrmi:
- DecodeSHUFPSMask(2, MI->getOperand(3).getImm(), ShuffleMask);
+ DecodeSHUFPMask(MVT::v2f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
+ case X86::VSHUFPDrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPDrmi:
+ DecodeSHUFPMask(MVT::v2f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VSHUFPDYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPDYrmi:
+ DecodeSHUFPMask(MVT::v4f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
case X86::SHUFPSrri:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::SHUFPSrmi:
- DecodeSHUFPSMask(4, MI->getOperand(3).getImm(), ShuffleMask);
+ DecodeSHUFPMask(MVT::v4f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
+ case X86::VSHUFPSrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPSrmi:
+ DecodeSHUFPMask(MVT::v4f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VSHUFPSYrri:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VSHUFPSYrmi:
+ DecodeSHUFPMask(MVT::v8f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
case X86::UNPCKLPDrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::UNPCKLPDrm:
- DecodeUNPCKLPDMask(2, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v2f64, ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
case X86::VUNPCKLPDrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VUNPCKLPDrm:
- DecodeUNPCKLPDMask(2, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v2f64, ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VUNPCKLPDYrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VUNPCKLPDYrm:
- DecodeUNPCKLPDMask(4, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v4f64, ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::UNPCKLPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::UNPCKLPSrm:
- DecodeUNPCKLPSMask(4, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v4f32, ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
case X86::VUNPCKLPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VUNPCKLPSrm:
- DecodeUNPCKLPSMask(4, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v4f32, ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VUNPCKLPSYrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::VUNPCKLPSYrm:
- DecodeUNPCKLPSMask(8, ShuffleMask);
+ DecodeUNPCKLMask(MVT::v8f32, ShuffleMask);
Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::UNPCKHPDrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::UNPCKHPDrm:
- DecodeUNPCKHPMask(2, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v2f64, ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
+ case X86::VUNPCKHPDrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPDrm:
+ DecodeUNPCKHMask(MVT::v2f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPDYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPDYrm:
+ DecodeUNPCKHMask(MVT::v4f64, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
case X86::UNPCKHPSrr:
Src2Name = getRegName(MI->getOperand(2).getReg());
// FALL THROUGH.
case X86::UNPCKHPSrm:
- DecodeUNPCKHPMask(4, ShuffleMask);
+ DecodeUNPCKHMask(MVT::v4f32, ShuffleMask);
Src1Name = getRegName(MI->getOperand(0).getReg());
break;
+ case X86::VUNPCKHPSrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPSrm:
+ DecodeUNPCKHMask(MVT::v4f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
+ case X86::VUNPCKHPSYrr:
+ Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VUNPCKHPSYrm:
+ DecodeUNPCKHMask(MVT::v8f32, ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
+ break;
case X86::VPERMILPSri:
- DecodeVPERMILPSMask(4, MI->getOperand(2).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(0).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPSmi:
+ DecodePSHUFMask(MVT::v4f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPSYri:
- DecodeVPERMILPSMask(8, MI->getOperand(2).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(0).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPSYmi:
+ DecodePSHUFMask(MVT::v8f32, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPDri:
- DecodeVPERMILPDMask(2, MI->getOperand(2).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(0).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPDmi:
+ DecodePSHUFMask(MVT::v2f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERMILPDYri:
- DecodeVPERMILPDMask(4, MI->getOperand(2).getImm(),
- ShuffleMask);
- Src1Name = getRegName(MI->getOperand(0).getReg());
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ // FALL THROUGH.
+ case X86::VPERMILPDYmi:
+ DecodePSHUFMask(MVT::v4f64, MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
case X86::VPERM2F128rr:
- DecodeVPERM2F128Mask(MI->getOperand(3).getImm(), ShuffleMask);
- Src1Name = getRegName(MI->getOperand(1).getReg());
+ case X86::VPERM2I128rr:
Src2Name = getRegName(MI->getOperand(2).getReg());
+ // FALL THROUGH.
+ case X86::VPERM2F128rm:
+ case X86::VPERM2I128rm:
+ // For instruction comments purpose, assume the 256-bit vector is v4i64.
+ DecodeVPERM2X128Mask(MVT::v4i64,
+ MI->getOperand(MI->getNumOperands()-1).getImm(),
+ ShuffleMask);
+ Src1Name = getRegName(MI->getOperand(1).getReg());
+ DestName = getRegName(MI->getOperand(0).getReg());
break;
}
@@ -245,7 +500,7 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
if (Src1Name == Src2Name) {
for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) {
if ((int)ShuffleMask[i] >= 0 && // Not sentinel.
- ShuffleMask[i] >= e) // From second mask.
+ ShuffleMask[i] >= (int)e) // From second mask.
ShuffleMask[i] -= e;
}
}
@@ -263,13 +518,13 @@ void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
// Otherwise, it must come from src1 or src2. Print the span of elements
// that comes from this src.
- bool isSrc1 = ShuffleMask[i] < ShuffleMask.size();
+ bool isSrc1 = ShuffleMask[i] < (int)ShuffleMask.size();
const char *SrcName = isSrc1 ? Src1Name : Src2Name;
OS << (SrcName ? SrcName : "mem") << '[';
bool IsFirst = true;
while (i != e &&
(int)ShuffleMask[i] >= 0 &&
- (ShuffleMask[i] < ShuffleMask.size()) == isSrc1) {
+ (ShuffleMask[i] < (int)ShuffleMask.size()) == isSrc1) {
if (!IsFirst)
OS << ',';
else
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h
index 6b86db4..13fdf9a 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86InstComments.h
@@ -1,4 +1,4 @@
-//===-- X86InstComments.h - Generate verbose-asm comments for instrs ------===//
+//=- X86InstComments.h - Generate verbose-asm comments for instrs -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
index f9ab5ae..4ea662c 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
@@ -17,15 +17,13 @@
#include "X86InstComments.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include <cctype>
using namespace llvm;
-// Include the auto-generated portion of the assembly writer.
-#define GET_INSTRUCTION_NAME
#include "X86GenAsmWriter1.inc"
void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
@@ -35,29 +33,52 @@ void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const {
void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
StringRef Annot) {
printInstruction(MI, OS);
-
+
+ // Next always print the annotation.
+ printAnnotation(OS, Annot);
+
// If verbose assembly is enabled, we can print some informative comments.
- if (CommentStream) {
- printAnnotation(OS, Annot);
+ if (CommentStream)
EmitAnyX86InstComments(MI, *CommentStream, getRegisterName);
- }
-}
-StringRef X86IntelInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
}
void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op,
raw_ostream &O) {
switch (MI->getOperand(Op).getImm()) {
- default: assert(0 && "Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
+ default: llvm_unreachable("Invalid ssecc argument!");
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ case 8: O << "eq_uq"; break;
+ case 9: O << "nge"; break;
+ case 0xa: O << "ngt"; break;
+ case 0xb: O << "false"; break;
+ case 0xc: O << "neq_oq"; break;
+ case 0xd: O << "ge"; break;
+ case 0xe: O << "gt"; break;
+ case 0xf: O << "true"; break;
+ case 0x10: O << "eq_os"; break;
+ case 0x11: O << "lt_oq"; break;
+ case 0x12: O << "le_oq"; break;
+ case 0x13: O << "unord_s"; break;
+ case 0x14: O << "neq_us"; break;
+ case 0x15: O << "nlt_uq"; break;
+ case 0x16: O << "nle_uq"; break;
+ case 0x17: O << "ord_s"; break;
+ case 0x18: O << "eq_us"; break;
+ case 0x19: O << "nge_uq"; break;
+ case 0x1a: O << "ngt_uq"; break;
+ case 0x1b: O << "false_os"; break;
+ case 0x1c: O << "neq_os"; break;
+ case 0x1d: O << "ge_oq"; break;
+ case 0x1e: O << "gt_oq"; break;
+ case 0x1f: O << "true_us"; break;
+
}
}
@@ -70,7 +91,18 @@ void X86IntelInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo,
O << Op.getImm();
else {
assert(Op.isExpr() && "unknown pcrel immediate operand");
- O << *Op.getExpr();
+ // If a symbolic branch target was added as a constant expression then print
+ // that address in hex.
+ const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
+ int64_t Address;
+ if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
+ O << "0x";
+ O.write_hex(Address);
+ }
+ else {
+ // Otherwise, just print the expression.
+ O << *Op.getExpr();
+ }
}
}
diff --git a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 6d5ec62..4f5938d 100644
--- a/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -1,4 +1,4 @@
-//===-- X86IntelInstPrinter.h - Convert X86 MCInst to assembly syntax -----===//
+//= X86IntelInstPrinter.h - Convert X86 MCInst to assembly syntax -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
@@ -23,17 +23,16 @@ class MCOperand;
class X86IntelInstPrinter : public MCInstPrinter {
public:
- X86IntelInstPrinter(const MCAsmInfo &MAI)
- : MCInstPrinter(MAI) {}
+ X86IntelInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
// Autogenerated by tblgen.
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 69ad7d7..32e40fe 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -7,10 +7,9 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/MC/MCAsmBackend.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86FixupKinds.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
@@ -37,18 +36,22 @@ MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
- default: assert(0 && "invalid fixup kind!");
+ default: llvm_unreachable("invalid fixup kind!");
case FK_PCRel_1:
+ case FK_SecRel_1:
case FK_Data_1: return 0;
case FK_PCRel_2:
+ case FK_SecRel_2:
case FK_Data_2: return 1;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
case X86::reloc_global_offset_table:
+ case FK_SecRel_4:
case FK_Data_4: return 2;
case FK_PCRel_8:
+ case FK_SecRel_8:
case FK_Data_8: return 3;
}
}
@@ -57,9 +60,9 @@ namespace {
class X86ELFObjectWriter : public MCELFObjectTargetWriter {
public:
- X86ELFObjectWriter(bool is64Bit, Triple::OSType OSType, uint16_t EMachine,
- bool HasRelocationAddend)
- : MCELFObjectTargetWriter(is64Bit, OSType, EMachine, HasRelocationAddend) {}
+ X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
+ bool HasRelocationAddend, bool foobar)
+ : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
};
class X86AsmBackend : public MCAsmBackend {
@@ -87,7 +90,7 @@ public:
return Infos[Kind - FirstTargetFixupKind];
}
- void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
uint64_t Value) const {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
@@ -105,11 +108,16 @@ public:
Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
}
- bool MayNeedRelaxation(const MCInst &Inst) const;
+ bool mayNeedRelaxation(const MCInst &Inst) const;
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const;
- void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
- bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
};
} // end anonymous namespace
@@ -214,7 +222,7 @@ static unsigned getRelaxedOpcode(unsigned Op) {
return getRelaxedOpcodeBranch(Op);
}
-bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
// Branches can always be relaxed.
if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
return true;
@@ -244,9 +252,17 @@ bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
return hasExp && !hasRIP;
}
+bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCInstFragment *DF,
+ const MCAsmLayout &Layout) const {
+ // Relax if the value is too big for a (signed) i8.
+ return int64_t(Value) != int64_t(int8_t(Value));
+}
+
// FIXME: Can tblgen help at all here to verify there aren't other instructions
// we can relax?
-void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
@@ -262,10 +278,10 @@ void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
Res.setOpcode(RelaxedOp);
}
-/// WriteNopData - Write optimal nops to the output file for the \arg Count
+/// writeNopData - Write optimal nops to the output file for the \arg Count
/// bytes. This returns the number of bytes written. It may return 0 if
/// the \arg Count is more than the maximum optimal nops.
-bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
static const uint8_t Nops[10][10] = {
// nop
{0x90},
@@ -310,9 +326,9 @@ bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
namespace {
class ELFX86AsmBackend : public X86AsmBackend {
public:
- Triple::OSType OSType;
- ELFX86AsmBackend(const Target &T, Triple::OSType _OSType)
- : X86AsmBackend(T), OSType(_OSType) {
+ uint8_t OSABI;
+ ELFX86AsmBackend(const Target &T, uint8_t _OSABI)
+ : X86AsmBackend(T), OSABI(_OSABI) {
HasReliableSymbolDifference = true;
}
@@ -324,31 +340,21 @@ public:
class ELFX86_32AsmBackend : public ELFX86AsmBackend {
public:
- ELFX86_32AsmBackend(const Target &T, Triple::OSType OSType)
- : ELFX86AsmBackend(T, OSType) {}
+ ELFX86_32AsmBackend(const Target &T, uint8_t OSABI)
+ : ELFX86AsmBackend(T, OSABI) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(createELFObjectTargetWriter(),
- OS, /*IsLittleEndian*/ true);
- }
-
- MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
- return new X86ELFObjectWriter(false, OSType, ELF::EM_386, false);
+ return createX86ELFObjectWriter(OS, /*Is64Bit*/ false, OSABI);
}
};
class ELFX86_64AsmBackend : public ELFX86AsmBackend {
public:
- ELFX86_64AsmBackend(const Target &T, Triple::OSType OSType)
- : ELFX86AsmBackend(T, OSType) {}
+ ELFX86_64AsmBackend(const Target &T, uint8_t OSABI)
+ : ELFX86AsmBackend(T, OSABI) {}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createELFObjectWriter(createELFObjectTargetWriter(),
- OS, /*IsLittleEndian*/ true);
- }
-
- MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
- return new X86ELFObjectWriter(true, OSType, ELF::EM_X86_64, true);
+ return createX86ELFObjectWriter(OS, /*Is64Bit*/ true, OSABI);
}
};
@@ -362,7 +368,7 @@ public:
}
MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
- return createWinCOFFObjectWriter(OS, Is64Bit);
+ return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
};
@@ -442,7 +448,8 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT) {
if (TheTriple.isOSWindows())
return new WindowsX86AsmBackend(T, false);
- return new ELFX86_32AsmBackend(T, TheTriple.getOS());
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ return new ELFX86_32AsmBackend(T, OSABI);
}
MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT) {
@@ -454,5 +461,6 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT) {
if (TheTriple.isOSWindows())
return new WindowsX86AsmBackend(T, true);
- return new ELFX86_64AsmBackend(T, TheTriple.getOS());
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ return new ELFX86_64AsmBackend(T, OSABI);
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index e6ba705..a0bb6dc 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -19,7 +19,7 @@
#include "X86MCTargetDesc.h"
#include "llvm/Support/DataTypes.h"
-#include <cassert>
+#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -164,7 +164,13 @@ namespace X86II {
/// is some TLS offset from the picbase.
///
/// This is the 32-bit TLS offset for Darwin TLS in PIC mode.
- MO_TLVP_PIC_BASE
+ MO_TLVP_PIC_BASE,
+
+ /// MO_SECREL - On a symbol operand this indicates that the immediate is
+ /// the offset from beginning of section.
+ ///
+ /// This is the TLS offset for the COFF/Windows TLS mechanism.
+ MO_SECREL
};
enum {
@@ -223,19 +229,13 @@ namespace X86II {
// destinations are the same register.
MRMInitReg = 32,
- //// MRM_C1 - A mod/rm byte of exactly 0xC1.
- MRM_C1 = 33,
- MRM_C2 = 34,
- MRM_C3 = 35,
- MRM_C4 = 36,
- MRM_C8 = 37,
- MRM_C9 = 38,
- MRM_E8 = 39,
- MRM_F0 = 40,
- MRM_F8 = 41,
- MRM_F9 = 42,
- MRM_D0 = 45,
- MRM_D1 = 46,
+ //// MRM_XX - A mod/rm byte of exactly 0xXX.
+ MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35, MRM_C4 = 36,
+ MRM_C8 = 37, MRM_C9 = 38, MRM_E8 = 39, MRM_F0 = 40,
+ MRM_F8 = 41, MRM_F9 = 42, MRM_D0 = 45, MRM_D1 = 46,
+ MRM_D4 = 47, MRM_D8 = 48, MRM_D9 = 49, MRM_DA = 50,
+ MRM_DB = 51, MRM_DC = 52, MRM_DD = 53, MRM_DE = 54,
+ MRM_DF = 55,
/// RawFrmImm8 - This is used for the ENTER instruction, which has two
/// immediates, the first of which is a 16-bit immediate (specified by
@@ -295,8 +295,20 @@ namespace X86II {
T8 = 13 << Op0Shift, TA = 14 << Op0Shift,
A6 = 15 << Op0Shift, A7 = 16 << Op0Shift,
- // TF - Prefix before and after 0x0F
- TF = 17 << Op0Shift,
+ // T8XD - Prefix before and after 0x0F. Combination of T8 and XD.
+ T8XD = 17 << Op0Shift,
+
+ // T8XS - Prefix before and after 0x0F. Combination of T8 and XS.
+ T8XS = 18 << Op0Shift,
+
+ // TAXD - Prefix before and after 0x0F. Combination of TA and XD.
+ TAXD = 19 << Op0Shift,
+
+ // XOP8 - Prefix to include use of imm byte.
+ XOP8 = 20 << Op0Shift,
+
+ // XOP9 - Prefix to exclude use of imm byte.
+ XOP9 = 21 << Op0Shift,
//===------------------------------------------------------------------===//
// REX_W - REX prefixes are instruction prefixes used in 64-bit mode.
@@ -387,20 +399,24 @@ namespace X86II {
/// and the additional register is encoded in VEX_VVVV prefix.
VEX_4V = 1U << 2,
+ /// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode
+ /// operand 3 with VEX.vvvv.
+ VEX_4VOp3 = 1U << 3,
+
/// VEX_I8IMM - Specifies that the last register used in a AVX instruction,
/// must be encoded in the i8 immediate field. This usually happens in
/// instructions with 4 operands.
- VEX_I8IMM = 1U << 3,
+ VEX_I8IMM = 1U << 4,
/// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
/// instruction uses 256-bit wide registers. This is usually auto detected
/// if a VR256 register is used, but some AVX instructions also have this
/// field marked when using a f256 memory references.
- VEX_L = 1U << 4,
+ VEX_L = 1U << 5,
// VEX_LIG - Specifies that this instruction ignores the L-bit in the VEX
// prefix. Usually used for scalar instructions. Needed by disassembler.
- VEX_LIG = 1U << 5,
+ VEX_LIG = 1U << 6,
/// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
/// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
@@ -408,7 +424,15 @@ namespace X86II {
/// storing a classifier in the imm8 field. To simplify our implementation,
/// we handle this by storeing the classifier in the opcode field and using
/// this flag to indicate that the encoder should do the wacky 3DNow! thing.
- Has3DNow0F0FOpcode = 1U << 6
+ Has3DNow0F0FOpcode = 1U << 7,
+
+ /// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in
+ /// ModRM or I8IMM. This is used for FMA4 and XOP instructions.
+ MemOp4 = 1U << 8,
+
+ /// XOP - Opcode prefix used by XOP instructions.
+ XOP = 1U << 9
+
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
@@ -426,7 +450,7 @@ namespace X86II {
/// of the specified instruction.
static inline unsigned getSizeOfImm(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
- default: assert(0 && "Unknown immediate size");
+ default: llvm_unreachable("Unknown immediate size");
case X86II::Imm8:
case X86II::Imm8PCRel: return 1;
case X86II::Imm16:
@@ -441,7 +465,7 @@ namespace X86II {
/// TSFlags indicates that it is pc relative.
static inline unsigned isImmPCRel(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
- default: assert(0 && "Unknown immediate size");
+ default: llvm_unreachable("Unknown immediate size");
case X86II::Imm8PCRel:
case X86II::Imm16PCRel:
case X86II::Imm32PCRel:
@@ -462,10 +486,10 @@ namespace X86II {
/// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
/// counted as one operand.
///
- static inline int getMemoryOperandNo(uint64_t TSFlags) {
+ static inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: assert(0 && "FIXME: Remove this form");
- default: assert(0 && "Unknown FormMask value in getMemoryOperandNo!");
+ case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this form");
+ default: llvm_unreachable("Unknown FormMask value in getMemoryOperandNo!");
case X86II::Pseudo:
case X86II::RawFrm:
case X86II::AddRegFrm:
@@ -478,9 +502,12 @@ namespace X86II {
return 0;
case X86II::MRMSrcMem: {
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
unsigned FirstMemOp = 1;
if (HasVEX_4V)
++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
+ if (HasMemOp4)
+ ++FirstMemOp;// Skip the register source (which is encoded in I8IMM).
// FIXME: Maybe lea should have its own form? This is a horrible hack.
//if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
@@ -495,20 +522,24 @@ namespace X86II {
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m:
- return 0;
- case X86II::MRM_C1:
- case X86II::MRM_C2:
- case X86II::MRM_C3:
- case X86II::MRM_C4:
- case X86II::MRM_C8:
- case X86II::MRM_C9:
- case X86II::MRM_E8:
- case X86II::MRM_F0:
- case X86II::MRM_F8:
- case X86II::MRM_F9:
- case X86II::MRM_D0:
- case X86II::MRM_D1:
+ case X86II::MRM6m: case X86II::MRM7m: {
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ unsigned FirstMemOp = 0;
+ if (HasVEX_4V)
+ ++FirstMemOp;// Skip the register dest (which is encoded in VEX_VVVV).
+ return FirstMemOp;
+ }
+ case X86II::MRM_C1: case X86II::MRM_C2:
+ case X86II::MRM_C3: case X86II::MRM_C4:
+ case X86II::MRM_C8: case X86II::MRM_C9:
+ case X86II::MRM_E8: case X86II::MRM_F0:
+ case X86II::MRM_F8: case X86II::MRM_F9:
+ case X86II::MRM_D0: case X86II::MRM_D1:
+ case X86II::MRM_D4: case X86II::MRM_D8:
+ case X86II::MRM_D9: case X86II::MRM_DA:
+ case X86II::MRM_DB: case X86II::MRM_DC:
+ case X86II::MRM_DD: case X86II::MRM_DE:
+ case X86II::MRM_DF:
return -1;
}
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
new file mode 100644
index 0000000..5a42a80
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -0,0 +1,224 @@
+//===-- X86ELFObjectWriter.cpp - X86 ELF Writer ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/X86FixupKinds.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+ class X86ELFObjectWriter : public MCELFObjectTargetWriter {
+ public:
+ X86ELFObjectWriter(bool is64Bit, uint8_t OSABI);
+
+ virtual ~X86ELFObjectWriter();
+ protected:
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const;
+ };
+}
+
+X86ELFObjectWriter::X86ELFObjectWriter(bool Is64Bit, uint8_t OSABI)
+ : MCELFObjectTargetWriter(Is64Bit, OSABI,
+ Is64Bit ? ELF::EM_X86_64 : ELF::EM_386,
+ /*HasRelocationAddend*/ Is64Bit) {}
+
+X86ELFObjectWriter::~X86ELFObjectWriter()
+{}
+
+unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ // determine the type of the relocation
+
+ MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None : Target.getSymA()->getKind();
+ unsigned Type;
+ if (is64Bit()) {
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+
+ case FK_Data_8: Type = ELF::R_X86_64_PC64; break;
+ case FK_Data_4: Type = ELF::R_X86_64_PC32; break;
+ case FK_Data_2: Type = ELF::R_X86_64_PC16; break;
+
+ case FK_PCRel_8:
+ assert(Modifier == MCSymbolRefExpr::VK_None);
+ Type = ELF::R_X86_64_PC64;
+ break;
+ case X86::reloc_signed_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte:
+ case FK_PCRel_4:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_X86_64_PC32;
+ break;
+ case MCSymbolRefExpr::VK_PLT:
+ Type = ELF::R_X86_64_PLT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_X86_64_GOTPCREL;
+ break;
+ case MCSymbolRefExpr::VK_GOTTPOFF:
+ Type = ELF::R_X86_64_GOTTPOFF;
+ break;
+ case MCSymbolRefExpr::VK_TLSGD:
+ Type = ELF::R_X86_64_TLSGD;
+ break;
+ case MCSymbolRefExpr::VK_TLSLD:
+ Type = ELF::R_X86_64_TLSLD;
+ break;
+ }
+ break;
+ case FK_PCRel_2:
+ assert(Modifier == MCSymbolRefExpr::VK_None);
+ Type = ELF::R_X86_64_PC16;
+ break;
+ case FK_PCRel_1:
+ assert(Modifier == MCSymbolRefExpr::VK_None);
+ Type = ELF::R_X86_64_PC8;
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_8: Type = ELF::R_X86_64_64; break;
+ case X86::reloc_signed_4byte:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_X86_64_32S;
+ break;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_X86_64_GOT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTPCREL:
+ Type = ELF::R_X86_64_GOTPCREL;
+ break;
+ case MCSymbolRefExpr::VK_TPOFF:
+ Type = ELF::R_X86_64_TPOFF32;
+ break;
+ case MCSymbolRefExpr::VK_DTPOFF:
+ Type = ELF::R_X86_64_DTPOFF32;
+ break;
+ }
+ break;
+ case FK_Data_4:
+ Type = ELF::R_X86_64_32;
+ break;
+ case FK_Data_2: Type = ELF::R_X86_64_16; break;
+ case FK_PCRel_1:
+ case FK_Data_1: Type = ELF::R_X86_64_8; break;
+ }
+ }
+ } else {
+ if (IsPCRel) {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+
+ case X86::reloc_global_offset_table:
+ Type = ELF::R_386_GOTPC;
+ break;
+
+ case X86::reloc_signed_4byte:
+ case FK_PCRel_4:
+ case FK_Data_4:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_PC32;
+ break;
+ case MCSymbolRefExpr::VK_PLT:
+ Type = ELF::R_386_PLT32;
+ break;
+ }
+ break;
+ }
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+
+ case X86::reloc_global_offset_table:
+ Type = ELF::R_386_GOTPC;
+ break;
+
+ // FIXME: Should we avoid selecting reloc_signed_4byte in 32 bit mode
+ // instead?
+ case X86::reloc_signed_4byte:
+ case FK_PCRel_4:
+ case FK_Data_4:
+ switch (Modifier) {
+ default:
+ llvm_unreachable("Unimplemented");
+ case MCSymbolRefExpr::VK_None:
+ Type = ELF::R_386_32;
+ break;
+ case MCSymbolRefExpr::VK_GOT:
+ Type = ELF::R_386_GOT32;
+ break;
+ case MCSymbolRefExpr::VK_GOTOFF:
+ Type = ELF::R_386_GOTOFF;
+ break;
+ case MCSymbolRefExpr::VK_TLSGD:
+ Type = ELF::R_386_TLS_GD;
+ break;
+ case MCSymbolRefExpr::VK_TPOFF:
+ Type = ELF::R_386_TLS_LE_32;
+ break;
+ case MCSymbolRefExpr::VK_INDNTPOFF:
+ Type = ELF::R_386_TLS_IE;
+ break;
+ case MCSymbolRefExpr::VK_NTPOFF:
+ Type = ELF::R_386_TLS_LE;
+ break;
+ case MCSymbolRefExpr::VK_GOTNTPOFF:
+ Type = ELF::R_386_TLS_GOTIE;
+ break;
+ case MCSymbolRefExpr::VK_TLSLDM:
+ Type = ELF::R_386_TLS_LDM;
+ break;
+ case MCSymbolRefExpr::VK_DTPOFF:
+ Type = ELF::R_386_TLS_LDO_32;
+ break;
+ case MCSymbolRefExpr::VK_GOTTPOFF:
+ Type = ELF::R_386_TLS_IE_32;
+ break;
+ }
+ break;
+ case FK_Data_2: Type = ELF::R_386_16; break;
+ case FK_PCRel_1:
+ case FK_Data_1: Type = ELF::R_386_8; break;
+ }
+ }
+ }
+
+ return Type;
+}
+
+MCObjectWriter *llvm::createX86ELFObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW =
+ new X86ELFObjectWriter(Is64Bit, OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true);
+}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 17d242a..f2e34cb 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -1,4 +1,4 @@
-//===-- X86/X86FixupKinds.h - X86 Specific Fixup Entries --------*- C++ -*-===//
+//===-- X86FixupKinds.h - X86 Specific Fixup Entries ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index 2703100..afa545c 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -48,6 +48,8 @@ static const char *const x86_asm_table[] = {
"{cc}", "cc",
0,0};
+void X86MCAsmInfoDarwin::anchor() { }
+
X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
bool is64Bit = T.getArch() == Triple::x86_64;
if (is64Bit)
@@ -80,6 +82,8 @@ X86_64MCAsmInfoDarwin::X86_64MCAsmInfoDarwin(const Triple &Triple)
: X86MCAsmInfoDarwin(Triple) {
}
+void X86ELFMCAsmInfo::anchor() { }
+
X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
if (T.getArch() == Triple::x86_64)
PointerSize = 8;
@@ -125,7 +129,23 @@ getNonexecutableStackSection(MCContext &Ctx) const {
0, SectionKind::getMetadata());
}
-X86MCAsmInfoCOFF::X86MCAsmInfoCOFF(const Triple &Triple) {
+void X86MCAsmInfoMicrosoft::anchor() { }
+
+X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
+ if (Triple.getArch() == Triple::x86_64) {
+ GlobalPrefix = "";
+ PrivateGlobalPrefix = ".L";
+ }
+
+ AsmTransCBE = x86_asm_table;
+ AssemblerDialect = AsmWriterFlavor;
+
+ TextAlignFillValue = 0x90;
+}
+
+void X86MCAsmInfoGNUCOFF::anchor() { }
+
+X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
if (Triple.getArch() == Triple::x86_64) {
GlobalPrefix = "";
PrivateGlobalPrefix = ".L";
@@ -135,4 +155,7 @@ X86MCAsmInfoCOFF::X86MCAsmInfoCOFF(const Triple &Triple) {
AssemblerDialect = AsmWriterFlavor;
TextAlignFillValue = 0x90;
+
+ // Exceptions handling
+ ExceptionsType = ExceptionHandling::DwarfCFI;
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
index 2cd4c8e..b6b70fd 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- X86MCAsmInfo.h - X86 asm properties -----------------*- C++ -*--====//
+//===-- X86MCAsmInfo.h - X86 asm properties --------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,7 +21,9 @@
namespace llvm {
class Triple;
- struct X86MCAsmInfoDarwin : public MCAsmInfoDarwin {
+ class X86MCAsmInfoDarwin : public MCAsmInfoDarwin {
+ virtual void anchor();
+ public:
explicit X86MCAsmInfoDarwin(const Triple &Triple);
};
@@ -33,13 +35,23 @@ namespace llvm {
MCStreamer &Streamer) const;
};
- struct X86ELFMCAsmInfo : public MCAsmInfo {
+ class X86ELFMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
+ public:
explicit X86ELFMCAsmInfo(const Triple &Triple);
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
};
- struct X86MCAsmInfoCOFF : public MCAsmInfoCOFF {
- explicit X86MCAsmInfoCOFF(const Triple &Triple);
+ class X86MCAsmInfoMicrosoft : public MCAsmInfoMicrosoft {
+ virtual void anchor();
+ public:
+ explicit X86MCAsmInfoMicrosoft(const Triple &Triple);
+ };
+
+ class X86MCAsmInfoGNUCOFF : public MCAsmInfoGNUCOFF {
+ virtual void anchor();
+ public:
+ explicit X86MCAsmInfoGNUCOFF(const Triple &Triple);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 2eee112..80990e5 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- X86/X86MCCodeEmitter.cpp - Convert X86 code to machine code -------===//
+//===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -46,6 +46,11 @@ public:
return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
}
+ bool is32BitMode() const {
+ // FIXME: Can tablegen auto-generate this?
+ return (STI.getFeatureBits() & X86::Mode64Bit) == 0;
+ }
+
static unsigned GetX86RegNum(const MCOperand &MO) {
return X86_MC::getX86RegNum(MO.getReg());
}
@@ -63,9 +68,8 @@ public:
unsigned OpNum) {
unsigned SrcReg = MI.getOperand(OpNum).getReg();
unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
- if ((SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15) ||
- (SrcReg >= X86::YMM8 && SrcReg <= X86::YMM15))
- SrcRegNum += 8;
+ if (X86II::isX86_64ExtendedReg(SrcReg))
+ SrcRegNum |= 8;
// The registers represented through VEX_VVVV should
// be encoded in 1's complement form.
@@ -86,7 +90,7 @@ public:
}
}
- void EmitImmediate(const MCOperand &Disp,
+ void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
unsigned ImmSize, MCFixupKind FixupKind,
unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
@@ -155,9 +159,8 @@ static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
return MCFixup::getKindForSize(Size, isPCRel);
}
-/// Is32BitMemOperand - Return true if the specified instruction with a memory
-/// operand should emit the 0x67 prefix byte in 64-bit mode due to a 32-bit
-/// memory operand. Op specifies the operand # of the memoperand.
+/// Is32BitMemOperand - Return true if the specified instruction has
+/// a 32-bit memory operand. Op specifies the operand # of the memoperand.
static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
@@ -170,28 +173,71 @@ static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
return false;
}
-/// StartsWithGlobalOffsetTable - Return true for the simple cases where this
-/// expression starts with _GLOBAL_OFFSET_TABLE_. This is a needed to support
-/// PIC on ELF i386 as that symbol is magic. We check only simple case that
+/// Is64BitMemOperand - Return true if the specified instruction has
+/// a 64-bit memory operand. Op specifies the operand # of the memoperand.
+#ifndef NDEBUG
+static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
+ const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
+}
+#endif
+
+/// Is16BitMemOperand - Return true if the specified instruction has
+/// a 16-bit memory operand. Op specifies the operand # of the memoperand.
+static bool Is16BitMemOperand(const MCInst &MI, unsigned Op) {
+ const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
+ const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
+
+ if ((BaseReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
+ (IndexReg.getReg() != 0 &&
+ X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
+ return true;
+ return false;
+}
+
+/// StartsWithGlobalOffsetTable - Check if this expression starts with
+/// _GLOBAL_OFFSET_TABLE_ and if it is of the form
+/// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
+/// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
/// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
/// of a binary expression.
-static bool StartsWithGlobalOffsetTable(const MCExpr *Expr) {
+enum GlobalOffsetTableExprKind {
+ GOT_None,
+ GOT_Normal,
+ GOT_SymDiff
+};
+static GlobalOffsetTableExprKind
+StartsWithGlobalOffsetTable(const MCExpr *Expr) {
+ const MCExpr *RHS = 0;
if (Expr->getKind() == MCExpr::Binary) {
const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
Expr = BE->getLHS();
+ RHS = BE->getRHS();
}
if (Expr->getKind() != MCExpr::SymbolRef)
- return false;
+ return GOT_None;
const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
const MCSymbol &S = Ref->getSymbol();
- return S.getName() == "_GLOBAL_OFFSET_TABLE_";
+ if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
+ return GOT_None;
+ if (RHS && RHS->getKind() == MCExpr::SymbolRef)
+ return GOT_SymDiff;
+ return GOT_Normal;
}
void X86MCCodeEmitter::
-EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
- unsigned &CurByte, raw_ostream &OS,
+EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
+ MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
const MCExpr *Expr = NULL;
if (DispOp.isImm()) {
@@ -210,12 +256,21 @@ EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
// If we have an immoffset, add it to the expression.
if ((FixupKind == FK_Data_4 ||
- FixupKind == MCFixupKind(X86::reloc_signed_4byte)) &&
- StartsWithGlobalOffsetTable(Expr)) {
- assert(ImmOffset == 0);
-
- FixupKind = MCFixupKind(X86::reloc_global_offset_table);
- ImmOffset = CurByte;
+ FixupKind == FK_Data_8 ||
+ FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
+ GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
+ if (Kind != GOT_None) {
+ assert(ImmOffset == 0);
+
+ FixupKind = MCFixupKind(X86::reloc_global_offset_table);
+ if (Kind == GOT_Normal)
+ ImmOffset = CurByte;
+ } else if (Expr->getKind() == MCExpr::SymbolRef) {
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
+ if (Ref->getKind() == MCSymbolRefExpr::VK_SECREL) {
+ FixupKind = MCFixupKind(FK_SecRel_4);
+ }
+ }
}
// If the fixup is pc-relative, we need to bias the value to be relative to
@@ -234,7 +289,7 @@ EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
Ctx);
// Emit a symbolic constant as a fixup and 4 zeros.
- Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind));
+ Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc));
EmitConstant(0, Size, CurByte, OS);
}
@@ -270,7 +325,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// expression to emit.
int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
- EmitImmediate(Disp, 4, MCFixupKind(FixupKind),
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
CurByte, OS, Fixups, -ImmSize);
return;
}
@@ -294,7 +349,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
if (BaseReg == 0) { // [disp32] in X86-32 mode
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
- EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
+ EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
return;
}
@@ -310,13 +365,13 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
if (Disp.isImm() && isDisp8(Disp.getImm())) {
EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
return;
}
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
Fixups);
return;
}
@@ -375,10 +430,10 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Do we need to output a displacement?
if (ForceDisp8)
- EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
+ EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
else if (ForceDisp32 || Disp.getImm() != 0)
- EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
- Fixups);
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
+ CurByte, OS, Fixups);
}
/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
@@ -387,9 +442,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
- bool HasVEX_4V = false;
- if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
- HasVEX_4V = true;
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
// VEX_R: opcode externsion equivalent to REX.R in
// 1's complement (inverted) form
@@ -417,6 +471,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// opcode extension, or ignored, depending on the opcode byte)
unsigned char VEX_W = 0;
+ // XOP: Use XOP prefix byte 0x8f instead of VEX.
+ unsigned char XOP = 0;
+
// VEX_5M (VEX m-mmmmm field):
//
// 0b00000: Reserved for future use
@@ -424,7 +481,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b00010: implied 0F 38 leading opcode bytes
// 0b00011: implied 0F 3A leading opcode bytes
// 0b00100-0b11111: Reserved for future use
- //
+ // 0b01000: XOP map select - 08h instructions with imm byte
+ // 0b10001: XOP map select - 09h instructions with no imm byte
unsigned char VEX_5M = 0x1;
// VEX_4V (VEX vvvv field): a register specifier
@@ -455,27 +513,44 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
VEX_W = 1;
+ if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
+ XOP = 1;
+
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
VEX_L = 1;
switch (TSFlags & X86II::Op0Mask) {
- default: assert(0 && "Invalid prefix!");
+ default: llvm_unreachable("Invalid prefix!");
case X86II::T8: // 0F 38
VEX_5M = 0x2;
break;
case X86II::TA: // 0F 3A
VEX_5M = 0x3;
break;
- case X86II::TF: // F2 0F 38
+ case X86II::T8XS: // F3 0F 38
+ VEX_PP = 0x2;
+ VEX_5M = 0x2;
+ break;
+ case X86II::T8XD: // F2 0F 38
VEX_PP = 0x3;
VEX_5M = 0x2;
break;
+ case X86II::TAXD: // F2 0F 3A
+ VEX_PP = 0x3;
+ VEX_5M = 0x3;
+ break;
case X86II::XS: // F3 0F
VEX_PP = 0x2;
break;
case X86II::XD: // F2 0F
VEX_PP = 0x3;
break;
+ case X86II::XOP8:
+ VEX_5M = 0x8;
+ break;
+ case X86II::XOP9:
+ VEX_5M = 0x9;
+ break;
case X86II::A6: // Bypass: Not used by VEX
case X86II::A7: // Bypass: Not used by VEX
case X86II::TB: // Bypass: Not used by VEX
@@ -483,6 +558,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
break; // No prefix!
}
+
// Set the vector length to 256-bit if YMM0-YMM15 is used
for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
if (!MI.getOperand(i).isReg())
@@ -495,7 +571,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned CurOp = 0;
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
+ case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
case X86II::MRMDestMem: {
// MRMDestMem instructions forms:
// MemAddr, src1(ModR/M)
@@ -516,41 +592,50 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_R = 0x0;
break;
}
- case X86II::MRMSrcMem: {
+ case X86II::MRMSrcMem:
// MRMSrcMem instructions forms:
// src1(ModR/M), MemAddr
// src1(ModR/M), src2(VEX_4V), MemAddr
// src1(ModR/M), MemAddr, imm8
// src1(ModR/M), MemAddr, src2(VEX_I8IMM)
//
+ // FMA4:
+ // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
+ // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
VEX_R = 0x0;
- unsigned MemAddrOffset = 1;
- if (HasVEX_4V) {
+ if (HasVEX_4V)
VEX_4V = getVEXRegisterEncoding(MI, 1);
- MemAddrOffset++;
- }
if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemAddrOffset+X86::AddrBaseReg).getReg()))
+ MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
VEX_B = 0x0;
if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemAddrOffset+X86::AddrIndexReg).getReg()))
+ MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
+
+ if (HasVEX_4VOp3)
+ VEX_4V = getVEXRegisterEncoding(MI, X86::AddrNumOperands+1);
break;
- }
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m:
+ case X86II::MRM6m: case X86II::MRM7m: {
// MRM[0-9]m instructions forms:
// MemAddr
- if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg()))
+ // src1(VEX_4V), MemAddr
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, 0);
+
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg()))
+ if (X86II::isX86_64ExtendedReg(
+ MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
VEX_X = 0x0;
break;
+ }
case X86II::MRMSrcReg:
// MRMSrcReg instructions forms:
// dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
@@ -565,6 +650,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
VEX_B = 0x0;
+ CurOp++;
+ if (HasVEX_4VOp3)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
break;
case X86II::MRMDestReg:
// MRMDestReg instructions forms:
@@ -605,14 +693,14 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
//
unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
- if (VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { // 2 byte VEX prefix
+ if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
EmitByte(0xC5, CurByte, OS);
EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
return;
}
// 3 byte VEX prefix
- EmitByte(0xC4, CurByte, OS);
+ EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
}
@@ -647,7 +735,7 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
+ case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!");
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
@@ -717,12 +805,12 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
const MCInst &MI,
raw_ostream &OS) const {
switch (TSFlags & X86II::SegOvrMask) {
- default: assert(0 && "Invalid segment!");
+ default: llvm_unreachable("Invalid segment!");
case 0:
// No segment override, check for explicit one on memory operand.
if (MemOperand != -1) { // If the instruction has a memory operand.
switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
- default: assert(0 && "Unknown segment register!");
+ default: llvm_unreachable("Unknown segment register!");
case 0: break;
case X86::CS: EmitByte(0x2E, CurByte, OS); break;
case X86::SS: EmitByte(0x36, CurByte, OS); break;
@@ -763,8 +851,22 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EmitByte(0xF3, CurByte, OS);
// Emit the address size opcode prefix as needed.
- if ((TSFlags & X86II::AdSize) ||
- (MemOperand != -1 && is64BitMode() && Is32BitMemOperand(MI, MemOperand)))
+ bool need_address_override;
+ if (TSFlags & X86II::AdSize) {
+ need_address_override = true;
+ } else if (MemOperand == -1) {
+ need_address_override = false;
+ } else if (is64BitMode()) {
+ assert(!Is16BitMemOperand(MI, MemOperand));
+ need_address_override = Is32BitMemOperand(MI, MemOperand);
+ } else if (is32BitMode()) {
+ assert(!Is64BitMemOperand(MI, MemOperand));
+ need_address_override = Is16BitMemOperand(MI, MemOperand);
+ } else {
+ need_address_override = false;
+ }
+
+ if (need_address_override)
EmitByte(0x67, CurByte, OS);
// Emit the operand size opcode prefix as needed.
@@ -773,7 +875,7 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
bool Need0FPrefix = false;
switch (TSFlags & X86II::Op0Mask) {
- default: assert(0 && "Invalid prefix!");
+ default: llvm_unreachable("Invalid prefix!");
case 0: break; // No prefix!
case X86II::REP: break; // already handled.
case X86II::TB: // Two-byte opcode prefix
@@ -783,7 +885,15 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
case X86II::A7: // 0F A7
Need0FPrefix = true;
break;
- case X86II::TF: // F2 0F 38
+ case X86II::T8XS: // F3 0F 38
+ EmitByte(0xF3, CurByte, OS);
+ Need0FPrefix = true;
+ break;
+ case X86II::T8XD: // F2 0F 38
+ EmitByte(0xF2, CurByte, OS);
+ Need0FPrefix = true;
+ break;
+ case X86II::TAXD: // F2 0F 3A
EmitByte(0xF2, CurByte, OS);
Need0FPrefix = true;
break;
@@ -818,10 +928,12 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FIXME: Pull this up into previous switch if REX can be moved earlier.
switch (TSFlags & X86II::Op0Mask) {
- case X86II::TF: // F2 0F 38
+ case X86II::T8XS: // F3 0F 38
+ case X86II::T8XD: // F2 0F 38
case X86II::T8: // 0F 38
EmitByte(0x38, CurByte, OS);
break;
+ case X86II::TAXD: // F2 0F 3A
case X86II::TA: // 0F 3A
EmitByte(0x3A, CurByte, OS);
break;
@@ -859,18 +971,16 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned CurByte = 0;
// Is this instruction encoded using the AVX VEX prefix?
- bool HasVEXPrefix = false;
+ bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
// It uses the VEX.VVVV field?
- bool HasVEX_4V = false;
-
- if ((TSFlags >> X86II::VEXShift) & X86II::VEX)
- HasVEXPrefix = true;
- if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
- HasVEX_4V = true;
+ bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
+ bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
+ bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
+ const unsigned MemOp4_I8IMMOperand = 2;
// Determine where the memory operand starts, if present.
- int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
if (MemoryOperand != -1) MemoryOperand += CurOp;
if (!HasVEXPrefix)
@@ -886,27 +996,29 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned SrcRegNum = 0;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
- assert(0 && "FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
+ llvm_unreachable("FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
- assert(0 && "Unknown FormMask value in X86MCCodeEmitter!");
+ llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
case X86II::Pseudo:
- assert(0 && "Pseudo instruction shouldn't be emitted");
+ llvm_unreachable("Pseudo instruction shouldn't be emitted");
case X86II::RawFrm:
EmitByte(BaseOpcode, CurByte, OS);
break;
case X86II::RawFrmImm8:
EmitByte(BaseOpcode, CurByte, OS);
- EmitImmediate(MI.getOperand(CurOp++),
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
- EmitImmediate(MI.getOperand(CurOp++), 1, FK_Data_1, CurByte, OS, Fixups);
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
+ OS, Fixups);
break;
case X86II::RawFrmImm16:
EmitByte(BaseOpcode, CurByte, OS);
- EmitImmediate(MI.getOperand(CurOp++),
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
- EmitImmediate(MI.getOperand(CurOp++), 2, FK_Data_2, CurByte, OS, Fixups);
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
+ OS, Fixups);
break;
case X86II::AddRegFrm:
@@ -940,9 +1052,16 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
SrcRegNum++;
+ if(HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
+ SrcRegNum++;
+
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
- CurOp = SrcRegNum + 1;
+
+ // 2 operands skipped with HasMemOp4, comensate accordingly
+ CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
+ if (HasVEX_4VOp3)
+ ++CurOp;
break;
case X86II::MRMSrcMem: {
@@ -952,12 +1071,16 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
++AddrOperands;
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
}
+ if(HasMemOp4) // Skip second register source (encoded in I8IMM)
+ ++FirstMemOp;
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
TSFlags, CurByte, OS, Fixups);
CurOp += AddrOperands + 1;
+ if (HasVEX_4VOp3)
+ ++CurOp;
break;
}
@@ -976,58 +1099,52 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
+ if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
+ CurOp++;
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
TSFlags, CurByte, OS, Fixups);
CurOp += X86::AddrNumOperands;
break;
- case X86II::MRM_C1:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC1, CurByte, OS);
- break;
- case X86II::MRM_C2:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC2, CurByte, OS);
- break;
- case X86II::MRM_C3:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC3, CurByte, OS);
- break;
- case X86II::MRM_C4:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC4, CurByte, OS);
- break;
- case X86II::MRM_C8:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC8, CurByte, OS);
- break;
- case X86II::MRM_C9:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xC9, CurByte, OS);
- break;
- case X86II::MRM_E8:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xE8, CurByte, OS);
- break;
- case X86II::MRM_F0:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xF0, CurByte, OS);
- break;
- case X86II::MRM_F8:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xF8, CurByte, OS);
- break;
+ case X86II::MRM_C1: case X86II::MRM_C2:
+ case X86II::MRM_C3: case X86II::MRM_C4:
+ case X86II::MRM_C8: case X86II::MRM_C9:
+ case X86II::MRM_D0: case X86II::MRM_D1:
+ case X86II::MRM_D4: case X86II::MRM_D8:
+ case X86II::MRM_D9: case X86II::MRM_DA:
+ case X86II::MRM_DB: case X86II::MRM_DC:
+ case X86II::MRM_DD: case X86II::MRM_DE:
+ case X86II::MRM_DF: case X86II::MRM_E8:
+ case X86II::MRM_F0: case X86II::MRM_F8:
case X86II::MRM_F9:
EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xF9, CurByte, OS);
- break;
- case X86II::MRM_D0:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xD0, CurByte, OS);
- break;
- case X86II::MRM_D1:
- EmitByte(BaseOpcode, CurByte, OS);
- EmitByte(0xD1, CurByte, OS);
+
+ unsigned char MRM;
+ switch (TSFlags & X86II::FormMask) {
+ default: llvm_unreachable("Invalid Form");
+ case X86II::MRM_C1: MRM = 0xC1; break;
+ case X86II::MRM_C2: MRM = 0xC2; break;
+ case X86II::MRM_C3: MRM = 0xC3; break;
+ case X86II::MRM_C4: MRM = 0xC4; break;
+ case X86II::MRM_C8: MRM = 0xC8; break;
+ case X86II::MRM_C9: MRM = 0xC9; break;
+ case X86II::MRM_D0: MRM = 0xD0; break;
+ case X86II::MRM_D1: MRM = 0xD1; break;
+ case X86II::MRM_D4: MRM = 0xD4; break;
+ case X86II::MRM_D8: MRM = 0xD8; break;
+ case X86II::MRM_D9: MRM = 0xD9; break;
+ case X86II::MRM_DA: MRM = 0xDA; break;
+ case X86II::MRM_DB: MRM = 0xDB; break;
+ case X86II::MRM_DC: MRM = 0xDC; break;
+ case X86II::MRM_DD: MRM = 0xDD; break;
+ case X86II::MRM_DE: MRM = 0xDE; break;
+ case X86II::MRM_DF: MRM = 0xDF; break;
+ case X86II::MRM_E8: MRM = 0xE8; break;
+ case X86II::MRM_F0: MRM = 0xF0; break;
+ case X86II::MRM_F8: MRM = 0xF8; break;
+ case X86II::MRM_F9: MRM = 0xF9; break;
+ }
+ EmitByte(MRM, CurByte, OS);
break;
}
@@ -1035,14 +1152,26 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
// according to the right size for the instruction.
if (CurOp != NumOps) {
// The last source register of a 4 operand instruction in AVX is encoded
- // in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
+ // in bits[7:4] of a immediate byte.
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
- const MCOperand &MO = MI.getOperand(CurOp++);
+ const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
+ : CurOp);
+ CurOp++;
bool IsExtReg = X86II::isX86_64ExtendedReg(MO.getReg());
unsigned RegNum = (IsExtReg ? (1 << 7) : 0);
RegNum |= GetX86RegNum(MO) << 4;
- EmitImmediate(MCOperand::CreateImm(RegNum), 1, FK_Data_1, CurByte, OS,
- Fixups);
+ // If there is an additional 5th operand it must be an immediate, which
+ // is encoded in bits[3:0]
+ if(CurOp != NumOps) {
+ const MCOperand &MIMM = MI.getOperand(CurOp++);
+ if(MIMM.isImm()) {
+ unsigned Val = MIMM.getImm();
+ assert(Val < 16 && "Immediate operand value out of range");
+ RegNum |= Val;
+ }
+ }
+ EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1,
+ CurByte, OS, Fixups);
} else {
unsigned FixupKind;
// FIXME: Is there a better way to know that we need a signed relocation?
@@ -1053,7 +1182,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
FixupKind = X86::reloc_signed_4byte;
else
FixupKind = getImmFixupKind(TSFlags);
- EmitImmediate(MI.getOperand(CurOp++),
+ EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind),
CurByte, OS, Fixups);
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index f98d5e3..3482363 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- X86MCTargetDesc.cpp - X86 Target Descriptions -----------*- C++ -*-===//
+//===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,6 +24,7 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_REGINFO_MC_DESC
@@ -35,6 +36,10 @@
#define GET_SUBTARGETINFO_MC_DESC
#include "X86GenSubtargetInfo.inc"
+#if _MSC_VER
+#include <intrin.h>
+#endif
+
using namespace llvm;
@@ -45,10 +50,6 @@ std::string X86_MC::ParseX86Triple(StringRef TT) {
FS = "+64bit-mode";
else
FS = "-64bit-mode";
- if (TheTriple.getOS() == Triple::NativeClient)
- FS += ",+nacl-mode";
- else
- FS += ",-nacl-mode";
return FS;
}
@@ -76,6 +77,8 @@ bool X86_MC::GetCpuIDAndInfo(unsigned value, unsigned *rEAX,
*rECX = registers[2];
*rEDX = registers[3];
return false;
+ #else
+ return true;
#endif
#elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)
#if defined(__GNUC__)
@@ -102,9 +105,81 @@ bool X86_MC::GetCpuIDAndInfo(unsigned value, unsigned *rEAX,
mov dword ptr [esi],edx
}
return false;
+ #else
+ return true;
#endif
+#else
+ return true;
#endif
+}
+
+/// GetCpuIDAndInfoEx - Execute the specified cpuid with subleaf and return the
+/// 4 values in the specified arguments. If we can't run cpuid on the host,
+/// return true.
+bool X86_MC::GetCpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX,
+ unsigned *rEBX, unsigned *rECX, unsigned *rEDX) {
+#if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
+ #if defined(__GNUC__)
+ // gcc desn't know cpuid would clobber ebx/rbx. Preseve it manually.
+ asm ("movq\t%%rbx, %%rsi\n\t"
+ "cpuid\n\t"
+ "xchgq\t%%rbx, %%rsi\n\t"
+ : "=a" (*rEAX),
+ "=S" (*rEBX),
+ "=c" (*rECX),
+ "=d" (*rEDX)
+ : "a" (value),
+ "c" (subleaf));
+ return false;
+ #elif defined(_MSC_VER)
+ // __cpuidex was added in MSVC++ 9.0 SP1
+ #if (_MSC_VER > 1500) || (_MSC_VER == 1500 && _MSC_FULL_VER >= 150030729)
+ int registers[4];
+ __cpuidex(registers, value, subleaf);
+ *rEAX = registers[0];
+ *rEBX = registers[1];
+ *rECX = registers[2];
+ *rEDX = registers[3];
+ return false;
+ #else
+ return true;
+ #endif
+ #else
+ return true;
+ #endif
+#elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)
+ #if defined(__GNUC__)
+ asm ("movl\t%%ebx, %%esi\n\t"
+ "cpuid\n\t"
+ "xchgl\t%%ebx, %%esi\n\t"
+ : "=a" (*rEAX),
+ "=S" (*rEBX),
+ "=c" (*rECX),
+ "=d" (*rEDX)
+ : "a" (value),
+ "c" (subleaf));
+ return false;
+ #elif defined(_MSC_VER)
+ __asm {
+ mov eax,value
+ mov ecx,subleaf
+ cpuid
+ mov esi,rEAX
+ mov dword ptr [esi],eax
+ mov esi,rEBX
+ mov dword ptr [esi],ebx
+ mov esi,rECX
+ mov dword ptr [esi],ecx
+ mov esi,rEDX
+ mov dword ptr [esi],edx
+ }
+ return false;
+ #else
+ return true;
+ #endif
+#else
return true;
+#endif
}
void X86_MC::DetectFamilyModel(unsigned EAX, unsigned &Family,
@@ -261,7 +336,8 @@ MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
std::string CPUName = CPU;
if (CPUName.empty()) {
-#if defined (__x86_64__) || defined(__i386__)
+#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\
+ || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
CPUName = sys::getHostCPUName();
#else
CPUName = "generic";
@@ -303,8 +379,10 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
MAI = new X86_64MCAsmInfoDarwin(TheTriple);
else
MAI = new X86MCAsmInfoDarwin(TheTriple);
- } else if (TheTriple.isOSWindows()) {
- MAI = new X86MCAsmInfoCOFF(TheTriple);
+ } else if (TheTriple.getOS() == Triple::Win32) {
+ MAI = new X86MCAsmInfoMicrosoft(TheTriple);
+ } else if (TheTriple.getOS() == Triple::MinGW32 || TheTriple.getOS() == Triple::Cygwin) {
+ MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
} else {
MAI = new X86ELFMCAsmInfo(TheTriple);
}
@@ -327,7 +405,8 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createX86MCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
Triple T(TT);
@@ -371,7 +450,7 @@ static MCCodeGenInfo *createX86MCCodeGenInfo(StringRef TT, Reloc::Model RM,
// 64-bit JIT places everything in the same buffer except external funcs.
CM = is64Bit ? CodeModel::Large : CodeModel::Small;
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
@@ -395,11 +474,13 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
static MCInstPrinter *createX86MCInstPrinter(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) {
if (SyntaxVariant == 0)
- return new X86ATTInstPrinter(MAI);
+ return new X86ATTInstPrinter(MAI, MII, MRI);
if (SyntaxVariant == 1)
- return new X86IntelInstPrinter(MAI);
+ return new X86IntelInstPrinter(MAI, MII, MRI);
return 0;
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index c144c51..9896cbe 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -54,6 +54,11 @@ namespace X86_MC {
/// the specified arguments. If we can't run cpuid on the host, return true.
bool GetCpuIDAndInfo(unsigned value, unsigned *rEAX,
unsigned *rEBX, unsigned *rECX, unsigned *rEDX);
+ /// GetCpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
+ /// the 4 values in the specified arguments. If we can't run cpuid on the
+ /// host, return true.
+ bool GetCpuIDAndInfoEx(unsigned value, unsigned subleaf, unsigned *rEAX,
+ unsigned *rEBX, unsigned *rECX, unsigned *rEDX);
void DetectFamilyModel(unsigned EAX, unsigned &Family, unsigned &Model);
@@ -83,6 +88,12 @@ MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS,
uint32_t CPUType,
uint32_t CPUSubtype);
+/// createX86ELFObjectWriter - Construct an X86 ELF object writer.
+MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ uint8_t OSABI);
+/// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer.
+MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit);
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
new file mode 100644
index 0000000..bc272ef
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -0,0 +1,65 @@
+//===-- X86WinCOFFObjectWriter.cpp - X86 Win COFF Writer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/X86FixupKinds.h"
+#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCWinCOFFObjectWriter.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace llvm {
+ class MCObjectWriter;
+}
+
+namespace {
+ class X86WinCOFFObjectWriter : public MCWinCOFFObjectTargetWriter {
+ const bool Is64Bit;
+
+ public:
+ X86WinCOFFObjectWriter(bool Is64Bit_);
+ ~X86WinCOFFObjectWriter();
+
+ virtual unsigned getRelocType(unsigned FixupKind) const;
+ };
+}
+
+X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit_)
+ : MCWinCOFFObjectTargetWriter(Is64Bit_ ? COFF::IMAGE_FILE_MACHINE_AMD64 :
+ COFF::IMAGE_FILE_MACHINE_I386),
+ Is64Bit(Is64Bit_) {}
+
+X86WinCOFFObjectWriter::~X86WinCOFFObjectWriter() {}
+
+unsigned X86WinCOFFObjectWriter::getRelocType(unsigned FixupKind) const {
+ switch (FixupKind) {
+ case FK_PCRel_4:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ return Is64Bit ? COFF::IMAGE_REL_AMD64_REL32 : COFF::IMAGE_REL_I386_REL32;
+ case FK_Data_4:
+ case X86::reloc_signed_4byte:
+ return Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32 : COFF::IMAGE_REL_I386_DIR32;
+ case FK_Data_8:
+ if (Is64Bit)
+ return COFF::IMAGE_REL_AMD64_ADDR64;
+ llvm_unreachable("unsupported relocation type");
+ case FK_SecRel_4:
+ return Is64Bit ? COFF::IMAGE_REL_AMD64_SECREL : COFF::IMAGE_REL_I386_SECREL;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+}
+
+MCObjectWriter *llvm::createX86WinCOFFObjectWriter(raw_ostream &OS,
+ bool Is64Bit) {
+ MCWinCOFFObjectTargetWriter *MOTW = new X86WinCOFFObjectWriter(Is64Bit);
+ return createWinCOFFObjectWriter(MOTW, OS);
+}
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index aeb3309..32c722a 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -20,7 +20,7 @@
namespace llvm {
-void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
// Defaults the copying the dest value.
ShuffleMask.push_back(0);
ShuffleMask.push_back(1);
@@ -44,8 +44,7 @@ void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) {
}
// <3,1> or <6,7,2,3>
-void DecodeMOVHLPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
for (unsigned i = NElts/2; i != NElts; ++i)
ShuffleMask.push_back(NElts+i);
@@ -54,8 +53,7 @@ void DecodeMOVHLPSMask(unsigned NElts,
}
// <0,2> or <0,1,4,5>
-void DecodeMOVLHPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask) {
for (unsigned i = 0; i != NElts/2; ++i)
ShuffleMask.push_back(i);
@@ -63,16 +61,26 @@ void DecodeMOVLHPSMask(unsigned NElts,
ShuffleMask.push_back(NElts+i);
}
-void DecodePSHUFMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- for (unsigned i = 0; i != NElts; ++i) {
- ShuffleMask.push_back(Imm % NElts);
- Imm /= NElts;
+/// DecodePSHUFMask - This decodes the shuffle masks for pshufd, and vpermilp*.
+/// VT indicates the type of the vector allowing it to handle different
+/// datatypes and vector widths.
+void DecodePSHUFMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ int NewImm = Imm;
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + l);
+ NewImm /= NumLaneElts;
+ }
+ if (NumLaneElts == 4) NewImm = Imm; // reload imm
}
}
-void DecodePSHUFHWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePSHUFHWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
ShuffleMask.push_back(0);
ShuffleMask.push_back(1);
ShuffleMask.push_back(2);
@@ -83,8 +91,7 @@ void DecodePSHUFHWMask(unsigned Imm,
}
}
-void DecodePSHUFLWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodePSHUFLWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
for (unsigned i = 0; i != 4; ++i) {
ShuffleMask.push_back((Imm & 3));
Imm >>= 2;
@@ -95,76 +102,35 @@ void DecodePSHUFLWMask(unsigned Imm,
ShuffleMask.push_back(7);
}
-void DecodePUNPCKLBWMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i8, NElts), ShuffleMask);
-}
-
-void DecodePUNPCKLWDMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i16, NElts), ShuffleMask);
-}
-
-void DecodePUNPCKLDQMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i32, NElts), ShuffleMask);
-}
-
-void DecodePUNPCKLQDQMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i64, NElts), ShuffleMask);
-}
-
-void DecodePUNPCKLMask(EVT VT,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(VT, ShuffleMask);
-}
-
-void DecodePUNPCKHMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- for (unsigned i = 0; i != NElts/2; ++i) {
- ShuffleMask.push_back(i+NElts/2);
- ShuffleMask.push_back(i+NElts+NElts/2);
- }
-}
+/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
+/// the type of the vector allowing it to handle different datatypes and vector
+/// widths.
+void DecodeSHUFPMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
-void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- // Part that reads from dest.
- for (unsigned i = 0; i != NElts/2; ++i) {
- ShuffleMask.push_back(Imm % NElts);
- Imm /= NElts;
- }
- // Part that reads from src.
- for (unsigned i = 0; i != NElts/2; ++i) {
- ShuffleMask.push_back(Imm % NElts + NElts);
- Imm /= NElts;
- }
-}
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ unsigned NumLaneElts = NumElts / NumLanes;
-void DecodeUNPCKHPMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- for (unsigned i = 0; i != NElts/2; ++i) {
- ShuffleMask.push_back(i+NElts/2); // Reads from dest
- ShuffleMask.push_back(i+NElts+NElts/2); // Reads from src
+ int NewImm = Imm;
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ // Part that reads from dest.
+ for (unsigned i = 0; i != NumLaneElts/2; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + l);
+ NewImm /= NumLaneElts;
+ }
+ // Part that reads from src.
+ for (unsigned i = 0; i != NumLaneElts/2; ++i) {
+ ShuffleMask.push_back(NewImm % NumLaneElts + NumElts + l);
+ NewImm /= NumLaneElts;
+ }
+ if (NumLaneElts == 4) NewImm = Imm; // reload imm
}
}
-void DecodeUNPCKLPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i32, NElts), ShuffleMask);
-}
-
-void DecodeUNPCKLPDMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- DecodeUNPCKLPMask(MVT::getVectorVT(MVT::i64, NElts), ShuffleMask);
-}
-
-/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// etc. VT indicates the type of the vector allowing it to handle different
-/// datatypes and vector widths.
-void DecodeUNPCKLPMask(EVT VT,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
+/// and punpckh*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKHMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
@@ -173,55 +139,36 @@ void DecodeUNPCKLPMask(EVT VT,
if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
unsigned NumLaneElts = NumElts / NumLanes;
- unsigned Start = 0;
- unsigned End = NumLaneElts / 2;
- for (unsigned s = 0; s < NumLanes; ++s) {
- for (unsigned i = Start; i != End; ++i) {
- ShuffleMask.push_back(i); // Reads from dest/src1
- ShuffleMask.push_back(i+NumLaneElts); // Reads from src/src2
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = l + NumLaneElts/2, e = l + NumLaneElts; i != e; ++i) {
+ ShuffleMask.push_back(i); // Reads from dest/src1
+ ShuffleMask.push_back(i+NumElts); // Reads from src/src2
}
- // Process the next 128 bits.
- Start += NumLaneElts;
- End += NumLaneElts;
}
}
-// DecodeVPERMILPSMask - Decodes VPERMILPS permutes for any 128-bit 32-bit
-// elements. For 256-bit vectors, it's considered as two 128 lanes, the
-// referenced elements can't cross lanes and the mask of the first lane must
-// be the same of the second.
-void DecodeVPERMILPSMask(unsigned NumElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- unsigned NumLanes = (NumElts*32)/128;
- unsigned LaneSize = NumElts/NumLanes;
-
- for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = 0; i != LaneSize; ++i) {
- unsigned Idx = (Imm >> (i*2)) & 0x3 ;
- ShuffleMask.push_back(Idx+(l*LaneSize));
- }
- }
-}
+/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// and punpckl*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
+ unsigned NumElts = VT.getVectorNumElements();
-// DecodeVPERMILPDMask - Decodes VPERMILPD permutes for any 128-bit 64-bit
-// elements. For 256-bit vectors, it's considered as two 128 lanes, the
-// referenced elements can't cross lanes but the mask of the first lane can
-// be the different of the second (not like VPERMILPS).
-void DecodeVPERMILPDMask(unsigned NumElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- unsigned NumLanes = (NumElts*64)/128;
- unsigned LaneSize = NumElts/NumLanes;
-
- for (unsigned l = 0; l < NumLanes; ++l) {
- for (unsigned i = l*LaneSize; i < LaneSize*(l+1); ++i) {
- unsigned Idx = (Imm >> i) & 0x1;
- ShuffleMask.push_back(Idx+(l*LaneSize));
+ // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
+ // independently on 128-bit lanes.
+ unsigned NumLanes = VT.getSizeInBits() / 128;
+ if (NumLanes == 0 ) NumLanes = 1; // Handle MMX
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ for (unsigned i = l, e = l + NumLaneElts/2; i != e; ++i) {
+ ShuffleMask.push_back(i); // Reads from dest/src1
+ ShuffleMask.push_back(i+NumElts); // Reads from src/src2
}
}
}
-void DecodeVPERM2F128Mask(EVT VT, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
+void DecodeVPERM2X128Mask(EVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask) {
unsigned HalfSize = VT.getVectorNumElements()/2;
unsigned FstHalfBegin = (Imm & 0x3) * HalfSize;
unsigned SndHalfBegin = ((Imm >> 4) & 0x3) * HalfSize;
@@ -232,12 +179,4 @@ void DecodeVPERM2F128Mask(EVT VT, unsigned Imm,
ShuffleMask.push_back(i);
}
-void DecodeVPERM2F128Mask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask) {
- // VPERM2F128 is used by any 256-bit EVT, but X86InstComments only
- // has information about the instruction and not the types. So for
- // instruction comments purpose, assume the 256-bit vector is v4i64.
- return DecodeVPERM2F128Mask(MVT::v4i64, Imm, ShuffleMask);
-}
-
} // llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
index 58193e6..5b8c6ef 100644
--- a/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/contrib/llvm/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -24,83 +24,41 @@
namespace llvm {
enum {
- SM_SentinelZero = ~0U
+ SM_SentinelZero = -1
};
-void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
// <3,1> or <6,7,2,3>
-void DecodeMOVHLPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodeMOVHLPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
// <0,2> or <0,1,4,5>
-void DecodeMOVLHPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodeMOVLHPSMask(unsigned NElts, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodePSHUFMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFHWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodePSHUFHWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePSHUFLWMask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodePSHUFLWMask(unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePUNPCKLBWMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
+/// DecodeSHUFPMask - This decodes the shuffle masks for shufp*. VT indicates
+/// the type of the vector allowing it to handle different datatypes and vector
+/// widths.
+void DecodeSHUFPMask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask);
-void DecodePUNPCKLWDMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
+/// DecodeUNPCKHMask - This decodes the shuffle masks for unpckhps/unpckhpd
+/// and punpckh*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKHMask(EVT VT, SmallVectorImpl<int> &ShuffleMask);
-void DecodePUNPCKLDQMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
+/// DecodeUNPCKLMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// and punpckl*. VT indicates the type of the vector allowing it to handle
+/// different datatypes and vector widths.
+void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask);
-void DecodePUNPCKLQDQMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
-void DecodePUNPCKLMask(EVT VT,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodePUNPCKHMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodeUNPCKHPMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodeUNPCKLPSMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodeUNPCKLPDMask(unsigned NElts,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd
-/// etc. VT indicates the type of the vector allowing it to handle different
-/// datatypes and vector widths.
-void DecodeUNPCKLPMask(EVT VT,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-
-// DecodeVPERMILPSMask - Decodes VPERMILPS permutes for any 128-bit 32-bit
-// elements. For 256-bit vectors, it's considered as two 128 lanes, the
-// referenced elements can't cross lanes and the mask of the first lane must
-// be the same of the second.
-void DecodeVPERMILPSMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-// DecodeVPERMILPDMask - Decodes VPERMILPD permutes for any 128-bit 64-bit
-// elements. For 256-bit vectors, it's considered as two 128 lanes, the
-// referenced elements can't cross lanes but the mask of the first lane can
-// be the different of the second (not like VPERMILPS).
-void DecodeVPERMILPDMask(unsigned NElts, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
-
-void DecodeVPERM2F128Mask(unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
-void DecodeVPERM2F128Mask(EVT VT, unsigned Imm,
- SmallVectorImpl<unsigned> &ShuffleMask);
+void DecodeVPERM2X128Mask(EVT VT, unsigned Imm,
+ SmallVectorImpl<int> &ShuffleMask);
} // llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86.h b/contrib/llvm/lib/Target/X86/X86.h
index 81e9422..ecc7b59 100644
--- a/contrib/llvm/lib/Target/X86/X86.h
+++ b/contrib/llvm/lib/Target/X86/X86.h
@@ -24,8 +24,6 @@ namespace llvm {
class FunctionPass;
class JITCodeEmitter;
-class MachineCodeEmitter;
-class Target;
class X86TargetMachine;
/// createX86ISelDag - This pass converts a legalized DAG into a
diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td
index 104b91f..b6591d4 100644
--- a/contrib/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm/lib/Target/X86/X86.td
@@ -1,4 +1,4 @@
-//===- X86.td - Target definition file for the Intel X86 ---*- tablegen -*-===//
+//===-- X86.td - Target definition file for the Intel X86 --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,9 +23,6 @@ include "llvm/Target/Target.td"
def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
"64-bit mode (x86_64)">;
-def ModeNaCl : SubtargetFeature<"nacl-mode", "InNaClMode", "true",
- "Native Client mode">;
-
//===----------------------------------------------------------------------===//
// X86 Subtarget features.
//===----------------------------------------------------------------------===//
@@ -58,7 +55,7 @@ def FeatureSSE41 : SubtargetFeature<"sse41", "X86SSELevel", "SSE41",
[FeatureSSSE3]>;
def FeatureSSE42 : SubtargetFeature<"sse42", "X86SSELevel", "SSE42",
"Enable SSE 4.2 instructions",
- [FeatureSSE41, FeaturePOPCNT]>;
+ [FeatureSSE41]>;
def Feature3DNow : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
"Enable 3DNow! instructions",
[FeatureMMX]>;
@@ -81,16 +78,24 @@ def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
"Fast unaligned memory access">;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
"Support SSE 4a instructions",
- [FeaturePOPCNT]>;
+ [FeatureSSE3]>;
-def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true",
- "Enable AVX instructions">;
+def FeatureAVX : SubtargetFeature<"avx", "X86SSELevel", "AVX",
+ "Enable AVX instructions",
+ [FeatureSSE42]>;
+def FeatureAVX2 : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
+ "Enable AVX2 instructions",
+ [FeatureAVX]>;
def FeatureCLMUL : SubtargetFeature<"clmul", "HasCLMUL", "true",
"Enable carry-less multiplication instructions">;
def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true",
- "Enable three-operand fused multiple-add">;
+ "Enable three-operand fused multiple-add",
+ [FeatureAVX]>;
def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
- "Enable four-operand fused multiple-add">;
+ "Enable four-operand fused multiple-add",
+ [FeatureAVX]>;
+def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
+ "Enable XOP instructions">;
def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
"HasVectorUAMem", "true",
"Allow unaligned memory operands on vector/SIMD instructions">;
@@ -102,17 +107,31 @@ def FeatureRDRAND : SubtargetFeature<"rdrand", "HasRDRAND", "true",
"Support RDRAND instruction">;
def FeatureF16C : SubtargetFeature<"f16c", "HasF16C", "true",
"Support 16-bit floating point conversion instructions">;
+def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true",
+ "Support FS/GS Base instructions">;
def FeatureLZCNT : SubtargetFeature<"lzcnt", "HasLZCNT", "true",
"Support LZCNT instruction">;
def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true",
"Support BMI instructions">;
+def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true",
+ "Support BMI2 instructions">;
+def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
+ "Use LEA for adjusting the stack pointer">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
//===----------------------------------------------------------------------===//
+include "X86Schedule.td"
+
+def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
+ "Intel Atom processors">;
+
class Proc<string Name, list<SubtargetFeature> Features>
- : Processor<Name, NoItineraries, Features>;
+ : Processor<Name, GenericItineraries, Features>;
+
+class AtomProc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, AtomItineraries, Features>;
def : Proc<"generic", []>;
def : Proc<"i386", []>;
@@ -137,34 +156,38 @@ def : Proc<"core2", [FeatureSSSE3, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
-def : Proc<"atom", [FeatureSSE3, FeatureCMPXCHG16B, FeatureMOVBE,
- FeatureSlowBTMem]>;
+def : AtomProc<"atom", [ProcIntelAtom, FeatureSSE3, FeatureCMPXCHG16B,
+ FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP]>;
// "Arrandale" along with corei3 and corei5
def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B,
- FeatureSlowBTMem, FeatureFastUAMem, FeatureAES]>;
+ FeatureSlowBTMem, FeatureFastUAMem,
+ FeaturePOPCNT, FeatureAES]>;
def : Proc<"nehalem", [FeatureSSE42, FeatureCMPXCHG16B,
- FeatureSlowBTMem, FeatureFastUAMem]>;
+ FeatureSlowBTMem, FeatureFastUAMem,
+ FeaturePOPCNT]>;
// Westmere is a similar machine to nehalem with some additional features.
// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
def : Proc<"westmere", [FeatureSSE42, FeatureCMPXCHG16B,
- FeatureSlowBTMem, FeatureFastUAMem, FeatureAES,
- FeatureCLMUL]>;
+ FeatureSlowBTMem, FeatureFastUAMem,
+ FeaturePOPCNT, FeatureAES, FeatureCLMUL]>;
// Sandy Bridge
// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
// rather than a superset.
// FIXME: Disabling AVX for now since it's not ready.
-def : Proc<"corei7-avx", [FeatureSSE42, FeatureCMPXCHG16B,
+def : Proc<"corei7-avx", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
FeatureAES, FeatureCLMUL]>;
// Ivy Bridge
-def : Proc<"core-avx-i", [FeatureSSE42, FeatureCMPXCHG16B,
+def : Proc<"core-avx-i", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
FeatureAES, FeatureCLMUL,
- FeatureRDRAND, FeatureF16C]>;
+ FeatureRDRAND, FeatureF16C, FeatureFSGSBase]>;
// Haswell
-def : Proc<"core-avx2", [FeatureSSE42, FeatureCMPXCHG16B, FeatureAES,
- FeatureCLMUL, FeatureRDRAND, FeatureF16C,
- FeatureFMA3, FeatureMOVBE, FeatureLZCNT,
- FeatureBMI]>;
+// FIXME: Disabling AVX/AVX2/FMA3 for now since it's not ready.
+def : Proc<"core-avx2", [FeatureSSE42, FeatureCMPXCHG16B, FeaturePOPCNT,
+ FeatureAES, FeatureCLMUL, FeatureRDRAND,
+ FeatureF16C, FeatureFSGSBase,
+ FeatureMOVBE, FeatureLZCNT, FeatureBMI,
+ FeatureBMI2]>;
def : Proc<"k6", [FeatureMMX]>;
def : Proc<"k6-2", [Feature3DNow]>;
@@ -189,15 +212,21 @@ def : Proc<"opteron-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
def : Proc<"athlon64-sse3", [FeatureSSE3, Feature3DNowA, FeatureCMPXCHG16B,
FeatureSlowBTMem]>;
def : Proc<"amdfam10", [FeatureSSE3, FeatureSSE4A,
- Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSlowBTMem]>;
-def : Proc<"barcelona", [FeatureSSE3, FeatureSSE4A,
- Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSlowBTMem]>;
-def : Proc<"istanbul", [Feature3DNowA, FeatureCMPXCHG16B,
- FeatureSSE4A, Feature3DNowA]>;
-def : Proc<"shanghai", [Feature3DNowA, FeatureCMPXCHG16B, FeatureSSE4A,
- Feature3DNowA]>;
+ Feature3DNowA, FeatureCMPXCHG16B, FeatureLZCNT,
+ FeaturePOPCNT, FeatureSlowBTMem]>;
+// Bobcat
+def : Proc<"btver1", [FeatureSSSE3, FeatureSSE4A, FeatureCMPXCHG16B,
+ FeatureLZCNT, FeaturePOPCNT]>;
+// FIXME: Disabling AVX/FMA4 for now since it's not ready.
+// Bulldozer
+def : Proc<"bdver1", [FeatureSSE42, FeatureSSE4A, FeatureCMPXCHG16B,
+ FeatureAES, FeatureCLMUL,
+ FeatureXOP, FeatureLZCNT, FeaturePOPCNT]>;
+// Enhanced Bulldozer
+def : Proc<"bdver2", [FeatureSSE42, FeatureSSE4A, FeatureCMPXCHG16B,
+ FeatureAES, FeatureCLMUL,
+ FeatureXOP, FeatureF16C, FeatureLZCNT,
+ FeaturePOPCNT, FeatureBMI]>;
def : Proc<"winchip-c6", [FeatureMMX]>;
def : Proc<"winchip2", [Feature3DNow]>;
@@ -229,9 +258,11 @@ include "X86CallingConv.td"
// Assembly Parser
//===----------------------------------------------------------------------===//
-// Currently the X86 assembly parser only supports ATT syntax.
def ATTAsmParser : AsmParser {
- string AsmParserClassName = "ATTAsmParser";
+ string AsmParserClassName = "AsmParser";
+}
+
+def ATTAsmParserVariant : AsmParserVariant {
int Variant = 0;
// Discard comments in assembly strings.
@@ -241,6 +272,16 @@ def ATTAsmParser : AsmParser {
string RegisterPrefix = "%";
}
+def IntelAsmParserVariant : AsmParserVariant {
+ int Variant = 1;
+
+ // Discard comments in assembly strings.
+ string CommentDelimiter = ";";
+
+ // Recognize hard coded registers.
+ string RegisterPrefix = "";
+}
+
//===----------------------------------------------------------------------===//
// Assembly Printers
//===----------------------------------------------------------------------===//
@@ -261,8 +302,7 @@ def IntelAsmWriter : AsmWriter {
def X86 : Target {
// Information about the instructions...
let InstructionSet = X86InstrInfo;
-
let AssemblyParsers = [ATTAsmParser];
-
+ let AssemblyParserVariants = [ATTAsmParserVariant, IntelAsmParserVariant];
let AssemblyWriters = [ATTAsmWriter, IntelAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 4c3ff02..7db7ccb 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -13,13 +13,12 @@
//===----------------------------------------------------------------------===//
#include "X86AsmPrinter.h"
-#include "InstPrinter/X86ATTInstPrinter.h"
-#include "InstPrinter/X86IntelInstPrinter.h"
#include "X86MCInstLower.h"
#include "X86.h"
#include "X86COFFMachineModuleInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
+#include "InstPrinter/X86ATTInstPrinter.h"
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
@@ -199,6 +198,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
case X86II::MO_TLVP_PIC_BASE:
O << "@TLVP" << '-' << *MF->getPICBaseSymbol();
break;
+ case X86II::MO_SECREL: O << "@SECREL"; break;
}
}
@@ -264,16 +264,40 @@ void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
void X86AsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op,
raw_ostream &O) {
unsigned char value = MI->getOperand(Op).getImm();
- assert(value <= 7 && "Invalid ssecc argument!");
switch (value) {
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
+ default: llvm_unreachable("Invalid ssecc argument!");
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ case 8: O << "eq_uq"; break;
+ case 9: O << "nge"; break;
+ case 0xa: O << "ngt"; break;
+ case 0xb: O << "false"; break;
+ case 0xc: O << "neq_oq"; break;
+ case 0xd: O << "ge"; break;
+ case 0xe: O << "gt"; break;
+ case 0xf: O << "true"; break;
+ case 0x10: O << "eq_os"; break;
+ case 0x11: O << "lt_oq"; break;
+ case 0x12: O << "le_oq"; break;
+ case 0x13: O << "unord_s"; break;
+ case 0x14: O << "neq_us"; break;
+ case 0x15: O << "nlt_uq"; break;
+ case 0x16: O << "nle_uq"; break;
+ case 0x17: O << "ord_s"; break;
+ case 0x18: O << "eq_us"; break;
+ case 0x19: O << "nge_uq"; break;
+ case 0x1a: O << "ngt_uq"; break;
+ case 0x1b: O << "false_os"; break;
+ case 0x1c: O << "neq_os"; break;
+ case 0x1d: O << "ge_oq"; break;
+ case 0x1e: O << "gt_oq"; break;
+ case 0x1f: O << "true_us"; break;
}
}
@@ -575,7 +599,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing() &&
- MMI->callsExternalVAFunctionWithFloatingPointArguments()) {
+ MMI->usesVAFloatArgument()) {
StringRef SymbolName = Subtarget->is64Bit() ? "_fltused" : "__fltused";
MCSymbol *S = MMI->getContext().GetOrCreateSymbol(SymbolName);
OutStreamer.EmitSymbolAttribute(S, MCSA_Global);
diff --git a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
index 3a50435..a6ed9ba 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/contrib/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -17,7 +17,6 @@
#include "X86.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -25,11 +24,7 @@
namespace llvm {
-class MachineJumpTableInfo;
-class MCContext;
-class MCInst;
class MCStreamer;
-class MCSymbol;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
index 4326814..e01ff41 100644
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/X86COFFMachineModuleInfo.cpp -------------------------===//
+//===-- X86COFFMachineModuleInfo.cpp - X86 COFF MMI Impl ------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
index 98ab2a6..0cec95a 100644
--- a/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/X86COFFMachineModuleInfo.h -----------------*- C++ -*-===//
+//===-- X86COFFMachineModuleInfo.h - X86 COFF MMI Impl ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,9 +14,9 @@
#ifndef X86COFF_MACHINEMODULEINFO_H
#define X86COFF_MACHINEMODULEINFO_H
+#include "X86MachineFunctionInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/ADT/DenseSet.h"
-#include "X86MachineFunctionInfo.h"
namespace llvm {
class X86MachineFunctionInfo;
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index 77b9905..d148989 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -1,10 +1,10 @@
-//===- X86CallingConv.td - Calling Conventions X86 32/64 ---*- tablegen -*-===//
-//
+//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This describes the calling conventions for the X86-32 and X86-64
@@ -61,7 +61,7 @@ def RetCC_X86_32_C : CallingConv<[
// weirdly; this is really the sse-regparm calling convention) in which
// case they use XMM0, otherwise it is the same as the common X86 calling
// conv.
- CCIfInReg<CCIfSubtarget<"hasXMMInt()",
+ CCIfInReg<CCIfSubtarget<"hasSSE2()",
CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
CCDelegateTo<RetCC_X86Common>
@@ -73,8 +73,8 @@ def RetCC_X86_32_Fast : CallingConv<[
// SSE2.
// This can happen when a float, 2 x float, or 3 x float vector is split by
// target lowering, and is returned in 1-3 sse regs.
- CCIfType<[f32], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
- CCIfType<[f64], CCIfSubtarget<"hasXMMInt()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
+ CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
+ CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
// For integers, ECX can be used as an extra return register
CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
@@ -150,18 +150,23 @@ def CC_X86_64_C : CallingConv<[
// The first 8 MMX vector arguments are passed in XMM registers on Darwin.
CCIfType<[x86mmx],
CCIfSubtarget<"isTargetDarwin()",
- CCIfSubtarget<"hasXMMInt()",
+ CCIfSubtarget<"hasSSE2()",
CCPromoteToType<v2i64>>>>,
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfSubtarget<"hasXMM()",
+ CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
- // The first 8 256-bit vector arguments are passed in YMM registers.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCIfSubtarget<"hasAVX()",
- CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7]>>>,
+ // The first 8 256-bit vector arguments are passed in YMM registers, unless
+ // this is a vararg function.
+ // FIXME: This isn't precisely correct; the x86-64 ABI document says that
+ // fixed arguments to vararg functions are supposed to be passed in
+ // registers. Actually modeling that would be a lot of work, though.
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCIfSubtarget<"hasAVX()",
+ CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
+ YMM4, YMM5, YMM6, YMM7]>>>>,
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
@@ -193,6 +198,10 @@ def CC_X86_Win64_C : CallingConv<[
// 128 bit vectors are passed by pointer
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
+
+ // 256 bit vectors are passed by pointer
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
+
// The first 4 MMX vector arguments are passed in GPRs.
CCIfType<[x86mmx], CCBitConvertToType<i64>>,
@@ -233,7 +242,7 @@ def CC_X86_64_GHC : CallingConv<[
// Pass in STG registers: F1, F2, F3, F4, D1, D2
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCIfSubtarget<"hasXMM()",
+ CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
]>;
@@ -251,7 +260,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 3 float or double arguments, if marked 'inreg' and if the call
// is not a vararg call and if SSE2 is available, are passed in SSE registers.
CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
- CCIfSubtarget<"hasXMMInt()",
+ CCIfSubtarget<"hasSSE2()",
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
// The first 3 __m64 vector arguments are passed in mmx registers if the
@@ -322,8 +331,8 @@ def CC_X86_32_ThisCall : CallingConv<[
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
- // The 'nest' parameter, if any, is passed in EAX.
- CCIfNest<CCAssignToReg<[EAX]>>,
+ // Pass sret arguments indirectly through EAX
+ CCIfSRet<CCAssignToReg<[EAX]>>,
// The first integer argument is passed in ECX
CCIfType<[i32], CCAssignToReg<[ECX]>>,
@@ -350,7 +359,7 @@ def CC_X86_32_FastCC : CallingConv<[
// The first 3 float or double arguments, if the call is not a vararg
// call and if SSE2 is available, are passed in SSE registers.
CCIfNotVarArg<CCIfType<[f32,f64],
- CCIfSubtarget<"hasXMMInt()",
+ CCIfSubtarget<"hasSSE2()",
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
// Doubles get 8-byte slots that are 8-byte aligned.
@@ -399,3 +408,18 @@ def CC_X86 : CallingConv<[
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
CCDelegateTo<CC_X86_32>
]>;
+
+//===----------------------------------------------------------------------===//
+// Callee-saved Registers.
+//===----------------------------------------------------------------------===//
+
+def CSR_Ghc : CalleeSavedRegs<(add)>;
+
+def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
+def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
+
+def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
+def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
+
+def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
+ (sequence "XMM%u", 6, 15))>;
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index f939510..ee3de9a 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -1,4 +1,4 @@
-//===-- X86/X86CodeEmitter.cpp - Convert X86 code to machine code ---------===//
+//===-- X86CodeEmitter.cpp - Convert X86 code to machine code -------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -664,15 +664,14 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
case X86II::A7: // 0F A7
Need0FPrefix = true;
break;
- case X86II::TF: // F2 0F 38
- MCE.emitByte(0xF2);
- Need0FPrefix = true;
- break;
case X86II::REP: break; // already handled.
+ case X86II::T8XS: // F3 0F 38
case X86II::XS: // F3 0F
MCE.emitByte(0xF3);
Need0FPrefix = true;
break;
+ case X86II::T8XD: // F2 0F 38
+ case X86II::TAXD: // F2 0F 3A
case X86II::XD: // F2 0F
MCE.emitByte(0xF2);
Need0FPrefix = true;
@@ -698,10 +697,12 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
MCE.emitByte(0x0F);
switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::TF: // F2 0F 38
+ case X86II::T8XD: // F2 0F 38
+ case X86II::T8XS: // F3 0F 38
case X86II::T8: // 0F 38
MCE.emitByte(0x38);
break;
+ case X86II::TAXD: // F2 0F 38
case X86II::TA: // 0F 3A
MCE.emitByte(0x3A);
break;
@@ -805,8 +806,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
}
assert(MO.isImm() && "Unknown RawFrm operand!");
- if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32 ||
- Opcode == X86::WINCALL64pcrel32) {
+ if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) {
// Fix up immediate operand for pc relative calls.
intptr_t Imm = (intptr_t)MO.getImm();
Imm = Imm - MCE.getCurrentPCValue() - 4;
@@ -1003,7 +1003,7 @@ void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
break;
}
- if (!Desc->isVariadic() && CurOp != NumOps) {
+ if (!MI.isVariadic() && CurOp != NumOps) {
#ifndef NDEBUG
dbgs() << "Cannot encode all operands of: " << MI << "\n";
#endif
diff --git a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
index 4a72d15..c1a49a7 100644
--- a/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
@@ -60,7 +60,6 @@ unsigned X86ELFWriterInfo::getRelocationType(unsigned MachineRelTy) const {
llvm_unreachable("unknown x86 machine relocation type");
}
}
- return 0;
}
long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
@@ -83,7 +82,6 @@ long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy,
llvm_unreachable("unknown x86 relocation type");
}
}
- return 0;
}
unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
@@ -107,7 +105,6 @@ unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const {
llvm_unreachable("unknown x86 relocation type");
}
}
- return 0;
}
bool X86ELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
@@ -132,7 +129,6 @@ bool X86ELFWriterInfo::isPCRelativeRel(unsigned RelTy) const {
llvm_unreachable("unknown x86 relocation type");
}
}
- return 0;
}
unsigned X86ELFWriterInfo::getAbsoluteLabelMachineRelTy() const {
@@ -146,8 +142,6 @@ long int X86ELFWriterInfo::computeRelocation(unsigned SymOffset,
if (RelTy == ELF::R_X86_64_PC32 || RelTy == ELF::R_386_PC32)
return SymOffset - (RelOffset + 4);
- else
- assert(0 && "computeRelocation unknown for this relocation type");
- return 0;
+ llvm_unreachable("computeRelocation unknown for this relocation type");
}
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index f912b28..69752c5 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -60,8 +60,8 @@ public:
explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
- X86ScalarSSEf64 = Subtarget->hasSSE2() || Subtarget->hasAVX();
- X86ScalarSSEf32 = Subtarget->hasSSE1() || Subtarget->hasAVX();
+ X86ScalarSSEf64 = Subtarget->hasSSE2();
+ X86ScalarSSEf32 = Subtarget->hasSSE1();
}
virtual bool TargetSelectInstruction(const Instruction *I);
@@ -258,6 +258,18 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
Opc = X86ScalarSSEf64 ?
(Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
break;
+ case MVT::v4f32:
+ Opc = X86::MOVAPSmr;
+ break;
+ case MVT::v2f64:
+ Opc = X86::MOVAPDmr;
+ break;
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v8i16:
+ case MVT::v16i8:
+ Opc = X86::MOVDQAmr;
+ break;
}
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
@@ -671,7 +683,14 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(const Instruction *I) {
// Atomic stores need special handling.
- if (cast<StoreInst>(I)->isAtomic())
+ const StoreInst *S = cast<StoreInst>(I);
+
+ if (S->isAtomic())
+ return false;
+
+ unsigned SABIAlignment =
+ TD.getABITypeAlignment(S->getValueOperand()->getType());
+ if (S->getAlignment() != 0 && S->getAlignment() < SABIAlignment)
return false;
MVT VT;
@@ -709,7 +728,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// fastcc with -tailcallopt is intended to provide a guaranteed
// tail call optimization. Fastisel doesn't know how to do that.
- if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
return false;
// Let SDISel handle vararg functions.
@@ -818,8 +837,8 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
bool HasAVX = Subtarget->hasAVX();
- bool X86ScalarSSEf32 = HasAVX || Subtarget->hasSSE1();
- bool X86ScalarSSEf64 = HasAVX || Subtarget->hasSSE2();
+ bool X86ScalarSSEf32 = Subtarget->hasSSE1();
+ bool X86ScalarSSEf64 = Subtarget->hasSSE2();
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
@@ -1510,7 +1529,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// fastcc with -tailcallopt is intended to provide a guaranteed
// tail call optimization. Fastisel doesn't know how to do that.
- if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
return false;
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
@@ -1524,7 +1543,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Fast-isel doesn't know about callee-pop yet.
if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg,
- GuaranteedTailCallOpt))
+ TM.Options.GuaranteedTailCallOpt))
return false;
// Check whether the function can return without sret-demotion.
@@ -1557,10 +1576,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
SmallVector<unsigned, 8> Args;
SmallVector<MVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
- Args.reserve(CS.arg_size());
- ArgVals.reserve(CS.arg_size());
- ArgVTs.reserve(CS.arg_size());
- ArgFlags.reserve(CS.arg_size());
+ unsigned arg_size = CS.arg_size();
+ Args.reserve(arg_size);
+ ArgVals.reserve(arg_size);
+ ArgVTs.reserve(arg_size);
+ ArgFlags.reserve(arg_size);
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
// If we're lowering a mem intrinsic instead of a regular call, skip the
@@ -1740,9 +1760,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// If this is a really simple value, emit this with the Value* version
// of X86FastEmitStore. If it isn't simple, we don't want to do this,
// as it can cause us to reevaluate the argument.
- X86FastEmitStore(ArgVT, ArgVal, AM);
+ if (!X86FastEmitStore(ArgVT, ArgVal, AM))
+ return false;
} else {
- X86FastEmitStore(ArgVT, Arg, AM);
+ if (!X86FastEmitStore(ArgVT, Arg, AM))
+ return false;
}
}
}
@@ -1757,7 +1779,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) {
// Count the number of XMM registers allocated.
- static const unsigned XMMArgRegs[] = {
+ static const uint16_t XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
@@ -1771,9 +1793,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (CalleeOp) {
// Register-indirect call.
unsigned CallOpc;
- if (Subtarget->isTargetWin64())
- CallOpc = X86::WINCALL64r;
- else if (Subtarget->is64Bit())
+ if (Subtarget->is64Bit())
CallOpc = X86::CALL64r;
else
CallOpc = X86::CALL32r;
@@ -1784,9 +1804,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Direct call.
assert(GV && "Not a direct call");
unsigned CallOpc;
- if (Subtarget->isTargetWin64())
- CallOpc = X86::WINCALL64pcrel32;
- else if (Subtarget->is64Bit())
+ if (Subtarget->is64Bit())
CallOpc = X86::CALL64pcrel32;
else
CallOpc = X86::CALLpcrel32;
@@ -1831,10 +1849,15 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
+ // Add a register mask with the call-preserved registers.
+ // Proper defs for return values will be added by setPhysRegsDeadExcept().
+ MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
+
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
unsigned NumBytesCallee = 0;
- if (!Subtarget->is64Bit() && CS.paramHasAttr(1, Attribute::StructRet))
+ if (!Subtarget->is64Bit() && !Subtarget->isTargetWindows() &&
+ CS.paramHasAttr(1, Attribute::StructRet))
NumBytesCallee = 4;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesCallee);
@@ -2081,7 +2104,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
if (!X86SelectAddress(C, AM))
return 0;
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
- TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
+ const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(Opc), ResultReg), AM);
@@ -2100,7 +2123,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
default: return false;
case MVT::f32:
if (X86ScalarSSEf32) {
- Opc = Subtarget->hasAVX() ? X86::VFsFLD0SS : X86::FsFLD0SS;
+ Opc = X86::FsFLD0SS;
RC = X86::FR32RegisterClass;
} else {
Opc = X86::LD_Fp032;
@@ -2109,7 +2132,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
break;
case MVT::f64:
if (X86ScalarSSEf64) {
- Opc = Subtarget->hasAVX() ? X86::VFsFLD0SD : X86::FsFLD0SD;
+ Opc = X86::FsFLD0SD;
RC = X86::FR64RegisterClass;
} else {
Opc = X86::LD_Fp064;
@@ -2156,7 +2179,7 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
namespace llvm {
- llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
+ FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
return new X86FastISel(funcInfo);
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index e3461c8..ed1707d 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -26,8 +26,8 @@
#define DEBUG_TYPE "x86-codegen"
#include "X86.h"
#include "X86InstrInfo.h"
+#include "llvm/InlineAsm.h"
#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -37,7 +37,6 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/InlineAsm.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -219,7 +218,7 @@ namespace {
/// getSTReg - Return the X86::ST(i) register which contains the specified
/// FP<RegNo> register.
unsigned getSTReg(unsigned RegNo) const {
- return StackTop - 1 - getSlot(RegNo) + llvm::X86::ST0;
+ return StackTop - 1 - getSlot(RegNo) + X86::ST0;
}
// pushReg - Push the specified FP<n> register onto the stack.
@@ -570,8 +569,8 @@ void FPS::finishBlockStack() {
namespace {
struct TableEntry {
- unsigned from;
- unsigned to;
+ uint16_t from;
+ uint16_t to;
bool operator<(const TableEntry &TE) const { return from < TE.from; }
friend bool operator<(const TableEntry &TE, unsigned V) {
return TE.from < V;
@@ -1644,6 +1643,30 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
return;
}
+ case X86::WIN_FTOL_32:
+ case X86::WIN_FTOL_64: {
+ // Push the operand into ST0.
+ MachineOperand &Op = MI->getOperand(0);
+ assert(Op.isUse() && Op.isReg() &&
+ Op.getReg() >= X86::FP0 && Op.getReg() <= X86::FP6);
+ unsigned FPReg = getFPReg(Op);
+ if (Op.isKill())
+ moveToTop(FPReg, I);
+ else
+ duplicateToTop(FPReg, FPReg, I);
+
+ // Emit the call. This will pop the operand.
+ BuildMI(*MBB, I, MI->getDebugLoc(), TII->get(X86::CALLpcrel32))
+ .addExternalSymbol("_ftol2")
+ .addReg(X86::ST0, RegState::ImplicitKill)
+ .addReg(X86::EAX, RegState::Define | RegState::Implicit)
+ .addReg(X86::EDX, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+ --StackTop;
+
+ break;
+ }
+
case X86::RET:
case X86::RETI:
// If RET has an FP register use operand, pass the first one in ST(0) and
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
index d54f4ae..000e375 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1,4 +1,4 @@
-//=======- X86FrameLowering.cpp - X86 Frame Information --------*- C++ -*-====//
+//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -47,7 +47,7 @@ bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineModuleInfo &MMI = MF.getMMI();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
- return (DisableFramePointerElim(MF) ||
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
RI->needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken() ||
@@ -79,6 +79,10 @@ static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
}
}
+static unsigned getLEArOpcode(unsigned is64Bit) {
+ return is64Bit ? X86::LEA64r : X86::LEA32r;
+}
+
/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
/// when it reaches the "return" instruction. We can then pop a stack object
/// to this register without worry about clobbering it.
@@ -91,11 +95,11 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
if (!F || MF->getMMI().callsEHReturn())
return 0;
- static const unsigned CallerSavedRegs32Bit[] = {
+ static const uint16_t CallerSavedRegs32Bit[] = {
X86::EAX, X86::EDX, X86::ECX, 0
};
- static const unsigned CallerSavedRegs64Bit[] = {
+ static const uint16_t CallerSavedRegs64Bit[] = {
X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
X86::R8, X86::R9, X86::R10, X86::R11, 0
};
@@ -113,7 +117,7 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
case X86::TCRETURNmi64:
case X86::EH_RETURN:
case X86::EH_RETURN64: {
- SmallSet<unsigned, 8> Uses;
+ SmallSet<uint16_t, 8> Uses;
for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MBBI->getOperand(i);
if (!MO.isReg() || MO.isDef())
@@ -121,11 +125,11 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- for (const unsigned *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI)
+ for (const uint16_t *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI)
Uses.insert(*AsI);
}
- const unsigned *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
+ const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
for (; *CS; ++CS)
if (!Uses.count(*CS))
return *CS;
@@ -141,13 +145,18 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
static
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
unsigned StackPtr, int64_t NumBytes,
- bool Is64Bit, const TargetInstrInfo &TII,
- const TargetRegisterInfo &TRI) {
+ bool Is64Bit, bool UseLEA,
+ const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
- unsigned Opc = isSub ?
- getSUBriOpcode(Is64Bit, Offset) :
- getADDriOpcode(Is64Bit, Offset);
+ unsigned Opc;
+ if (UseLEA)
+ Opc = getLEArOpcode(Is64Bit);
+ else
+ Opc = isSub
+ ? getSUBriOpcode(Is64Bit, Offset)
+ : getADDriOpcode(Is64Bit, Offset);
+
uint64_t Chunk = (1LL << 31) - 1;
DebugLoc DL = MBB.findDebugLoc(MBBI);
@@ -171,13 +180,21 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
}
}
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr)
- .addImm(ThisVal);
+ MachineInstr *MI = NULL;
+
+ if (UseLEA) {
+ MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ StackPtr, false, isSub ? -ThisVal : ThisVal);
+ } else {
+ MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(ThisVal);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
if (isSub)
MI->setFlag(MachineInstr::FrameSetup);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+
Offset -= ThisVal;
}
}
@@ -191,7 +208,8 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
MachineBasicBlock::iterator PI = prior(MBBI);
unsigned Opc = PI->getOpcode();
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
PI->getOperand(0).getReg() == StackPtr) {
if (NumBytes)
*NumBytes += PI->getOperand(2).getImm();
@@ -210,7 +228,7 @@ static
void mergeSPUpdatesDown(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned StackPtr, uint64_t *NumBytes = NULL) {
- // FIXME: THIS ISN'T RUN!!!
+ // FIXME: THIS ISN'T RUN!!!
return;
if (MBBI == MBB.end()) return;
@@ -237,8 +255,8 @@ void mergeSPUpdatesDown(MachineBasicBlock &MBB,
}
/// mergeSPUpdates - Checks the instruction before/after the passed
-/// instruction. If it is an ADD/SUB instruction it is deleted argument and the
-/// stack adjustment is returned as a positive value for ADD and a negative for
+/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the
+/// stack adjustment is returned as a positive value for ADD/LEA and a negative for
/// SUB.
static int mergeSPUpdates(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
@@ -254,7 +272,8 @@ static int mergeSPUpdates(MachineBasicBlock &MBB,
int Offset = 0;
if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
PI->getOperand(0).getReg() == StackPtr){
Offset += PI->getOperand(2).getImm();
MBB.erase(PI);
@@ -351,20 +370,22 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
/// register. The number corresponds to the enum lists in
/// compact_unwind_encoding.h.
static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) {
- int Idx = 1;
- for (; *CURegs; ++CURegs, ++Idx)
+ for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
if (*CURegs == Reg)
return Idx;
return -1;
}
+// Number of registers that can be saved in a compact unwind encoding.
+#define CU_NUM_SAVED_REGS 6
+
/// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding
/// used with frameless stacks. It is passed the number of registers to be saved
/// and an array of the registers saved.
-static uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[6],
- unsigned RegCount,
- bool Is64Bit) {
+static uint32_t
+encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
+ unsigned RegCount, bool Is64Bit) {
// The saved registers are numbered from 1 to 6. In order to encode the order
// in which they were saved, we re-number them according to their place in the
// register order. The re-numbering is relative to the last re-numbered
@@ -385,14 +406,21 @@ static uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[6],
};
const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
- uint32_t RenumRegs[6];
- for (unsigned i = 6 - RegCount; i < 6; ++i) {
+ for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]);
if (CUReg == -1) return ~0U;
SavedRegs[i] = CUReg;
+ }
+ // Reverse the list.
+ std::swap(SavedRegs[0], SavedRegs[5]);
+ std::swap(SavedRegs[1], SavedRegs[4]);
+ std::swap(SavedRegs[2], SavedRegs[3]);
+
+ uint32_t RenumRegs[CU_NUM_SAVED_REGS];
+ for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) {
unsigned Countless = 0;
- for (unsigned j = 6 - RegCount; j < i; ++j)
+ for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
if (SavedRegs[j] < SavedRegs[i])
++Countless;
@@ -435,8 +463,9 @@ static uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[6],
/// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a
/// compact encoding with a frame pointer.
-static uint32_t encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[6],
- bool Is64Bit) {
+static uint32_t
+encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
+ bool Is64Bit) {
static const unsigned CU32BitRegs[] = {
X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
};
@@ -446,18 +475,21 @@ static uint32_t encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[6],
const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
// Encode the registers in the order they were saved, 3-bits per register. The
- // registers are numbered from 1 to 6.
+ // registers are numbered from 1 to CU_NUM_SAVED_REGS.
uint32_t RegEnc = 0;
- for (int I = 5; I >= 0; --I) {
+ for (int I = CU_NUM_SAVED_REGS - 1, Idx = 0; I != -1; --I) {
unsigned Reg = SavedRegs[I];
- if (Reg == 0) break;
+ if (Reg == 0) continue;
+
int CURegNum = getCompactUnwindRegNum(CURegs, Reg);
- if (CURegNum == -1)
- return ~0U;
- RegEnc |= (CURegNum & 0x7) << (5 - I);
+ if (CURegNum == -1) return ~0U;
+
+ // Encode the 3-bit register number in order, skipping over 3-bits for each
+ // register.
+ RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
}
- assert((RegEnc & 0x7FFF) == RegEnc && "Invalid compact register encoding!");
+ assert((RegEnc & 0x3FFFF) == RegEnc && "Invalid compact register encoding!");
return RegEnc;
}
@@ -466,14 +498,11 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
unsigned FramePtr = RegInfo->getFrameRegister(MF);
unsigned StackPtr = RegInfo->getStackRegister();
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
-
bool Is64Bit = STI.is64Bit();
bool HasFP = hasFP(MF);
- unsigned SavedRegs[6] = { 0, 0, 0, 0, 0, 0 };
- int SavedRegIdx = 6;
+ unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 };
+ unsigned SavedRegIdx = 0;
unsigned OffsetSize = (Is64Bit ? 8 : 4);
@@ -481,14 +510,13 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
unsigned PushInstrSize = 1;
unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
unsigned MoveInstrSize = (Is64Bit ? 3 : 2);
- unsigned SubtractInstr = getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta);
unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2);
unsigned StackDivide = (Is64Bit ? 8 : 4);
unsigned InstrOffset = 0;
- unsigned CFAOffset = 0;
unsigned StackAdjust = 0;
+ unsigned StackSize = 0;
MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB.
bool ExpectEnd = false;
@@ -504,10 +532,10 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
if (Opc == PushInstr) {
// If there are too many saved registers, we cannot use compact encoding.
- if (--SavedRegIdx < 0) return 0;
+ if (SavedRegIdx >= CU_NUM_SAVED_REGS) return 0;
- SavedRegs[SavedRegIdx] = MI.getOperand(0).getReg();
- CFAOffset += OffsetSize;
+ SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg();
+ StackAdjust += OffsetSize;
InstrOffset += PushInstrSize;
} else if (Opc == MoveInstr) {
unsigned SrcReg = MI.getOperand(1).getReg();
@@ -516,12 +544,14 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
if (DstReg != FramePtr || SrcReg != StackPtr)
return 0;
- CFAOffset = 0;
+ StackAdjust = 0;
memset(SavedRegs, 0, sizeof(SavedRegs));
+ SavedRegIdx = 0;
InstrOffset += MoveInstrSize;
- } else if (Opc == SubtractInstr) {
- if (StackAdjust)
- // We all ready have a stack pointer adjustment.
+ } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) {
+ if (StackSize)
+ // We already have a stack size.
return 0;
if (!MI.getOperand(0).isReg() ||
@@ -532,7 +562,7 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
// %RSP<def> = SUB64ri8 %RSP, 48
return 0;
- StackAdjust = MI.getOperand(2).getImm() / StackDivide;
+ StackSize = MI.getOperand(2).getImm() / StackDivide;
SubtractInstrIdx += InstrOffset;
ExpectEnd = true;
}
@@ -540,28 +570,30 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
// Encode that we are using EBP/RBP as the frame pointer.
uint32_t CompactUnwindEncoding = 0;
- CFAOffset /= StackDivide;
+ StackAdjust /= StackDivide;
if (HasFP) {
- if ((CFAOffset & 0xFF) != CFAOffset)
+ if ((StackAdjust & 0xFF) != StackAdjust)
// Offset was too big for compact encoding.
return 0;
// Get the encoding of the saved registers when we have a frame pointer.
uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit);
- if (RegEnc == ~0U)
- return 0;
+ if (RegEnc == ~0U) return 0;
CompactUnwindEncoding |= 0x01000000;
- CompactUnwindEncoding |= (CFAOffset & 0xFF) << 16;
+ CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
CompactUnwindEncoding |= RegEnc & 0x7FFF;
} else {
- unsigned FullOffset = CFAOffset + StackAdjust;
- if ((FullOffset & 0xFF) == FullOffset) {
- // Frameless stack.
+ ++StackAdjust;
+ uint32_t TotalStackSize = StackAdjust + StackSize;
+ if ((TotalStackSize & 0xFF) == TotalStackSize) {
+ // Frameless stack with a small stack size.
CompactUnwindEncoding |= 0x02000000;
- CompactUnwindEncoding |= (FullOffset & 0xFF) << 16;
+
+ // Encode the stack size.
+ CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16;
} else {
- if ((CFAOffset & 0x7) != CFAOffset)
+ if ((StackAdjust & 0x7) != StackAdjust)
// The extra stack adjustments are too big for us to handle.
return 0;
@@ -572,16 +604,21 @@ uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
// instruction.
CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
- // Encode any extra stack stack changes (done via push instructions).
- CompactUnwindEncoding |= (CFAOffset & 0x7) << 13;
+ // Encode any extra stack stack adjustments (done via push instructions).
+ CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
}
+ // Encode the number of registers saved.
+ CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
+
// Get the encoding of the saved registers when we don't have a frame
// pointer.
- uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegs,
- 6 - SavedRegIdx,
- Is64Bit);
+ uint32_t RegEnc =
+ encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx,
+ Is64Bit);
if (RegEnc == ~0U) return 0;
+
+ // Encode the register encoding.
CompactUnwindEncoding |= RegEnc & 0x3FF;
}
@@ -608,6 +645,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
bool HasFP = hasFP(MF);
bool Is64Bit = STI.is64Bit();
bool IsWin64 = STI.isTargetWin64();
+ bool UseLEA = STI.useLeaForSP();
unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
@@ -637,10 +675,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// stack pointer (we fit in the Red Zone).
if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
- !MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->adjustsStack() && // No calls.
- !IsWin64 && // Win64 has no Red Zone
- !EnableSegmentedStacks) { // Regular stack
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64 && // Win64 has no Red Zone
+ !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
@@ -861,7 +899,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// FIXME: %rax preserves the offset and should be available.
if (isSPUpdateNeeded)
emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
- TII, *RegInfo);
+ UseLEA, TII, *RegInfo);
if (isEAXAlive) {
// Restore EAX
@@ -873,7 +911,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
}
} else if (NumBytes)
emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
- TII, *RegInfo);
+ UseLEA, TII, *RegInfo);
if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
// Mark end of stack pointer adjustment.
@@ -917,6 +955,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
bool Is64Bit = STI.is64Bit();
+ bool UseLEA = STI.useLeaForSP();
unsigned StackAlign = getStackAlignment();
unsigned SlotSize = RegInfo->getSlotSize();
unsigned FramePtr = RegInfo->getFrameRegister(MF);
@@ -977,7 +1016,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned Opc = PI->getOpcode();
if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
- !PI->getDesc().isTerminator())
+ !PI->isTerminator())
break;
--MBBI;
@@ -997,7 +1036,8 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// We cannot use LEA here, because stack pointer was realigned. We need to
// deallocate local frame back.
if (CSSize) {
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, UseLEA, TII,
+ *RegInfo);
MBBI = prior(LastCSPop);
}
@@ -1018,7 +1058,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
} else if (NumBytes) {
// Adjust stack pointer back: ESP += numbytes.
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, UseLEA, TII, *RegInfo);
}
// We're returning from function via eh_return.
@@ -1053,7 +1093,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
if (Offset) {
// Check for possible merge with preceding ADD instruction.
Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, UseLEA, TII, *RegInfo);
}
// Jump to label or value in register.
@@ -1097,7 +1137,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Check for possible merge with preceding ADD instruction.
delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, UseLEA, TII, *RegInfo);
}
}
@@ -1280,31 +1320,35 @@ HasNestArgument(const MachineFunction *MF) {
return false;
}
+
+/// GetScratchRegister - Get a register for performing work in the segmented
+/// stack prologue. Depending on platform and the properties of the function
+/// either one or two registers will be needed. Set primary to true for
+/// the first register, false for the second.
static unsigned
-GetScratchRegister(bool Is64Bit, const MachineFunction &MF) {
- if (Is64Bit) {
- return X86::R11;
- } else {
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
- bool IsNested = HasNestArgument(&MF);
-
- if (CallingConvention == CallingConv::X86_FastCall) {
- if (IsNested) {
- report_fatal_error("Segmented stacks does not support fastcall with "
- "nested function.");
- return -1;
- } else {
- return X86::EAX;
- }
- } else {
- if (IsNested)
- return X86::EDX;
- else
- return X86::ECX;
- }
+GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
+ if (Is64Bit)
+ return Primary ? X86::R11 : X86::R12;
+
+ CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+ bool IsNested = HasNestArgument(&MF);
+
+ if (CallingConvention == CallingConv::X86_FastCall ||
+ CallingConvention == CallingConv::Fast) {
+ if (IsNested)
+ report_fatal_error("Segmented stacks does not support fastcall with "
+ "nested function.");
+ return Primary ? X86::EAX : X86::ECX;
}
+ if (IsNested)
+ return Primary ? X86::EDX : X86::EAX;
+ return Primary ? X86::ECX : X86::EAX;
}
+// The stack limit in the TCB is set to this many bytes above the actual stack
+// limit.
+static const uint64_t kSplitStackAvailable = 256;
+
void
X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MachineBasicBlock &prologueMBB = MF.front();
@@ -1316,14 +1360,15 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
DebugLoc DL;
const X86Subtarget *ST = &MF.getTarget().getSubtarget<X86Subtarget>();
- unsigned ScratchReg = GetScratchRegister(Is64Bit, MF);
+ unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
if (MF.getFunction()->isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
- if (!ST->isTargetLinux())
- report_fatal_error("Segmented stacks supported only on linux.");
+ if (!ST->isTargetLinux() && !ST->isTargetDarwin() &&
+ !ST->isTargetWin32() && !ST->isTargetFreeBSD())
+ report_fatal_error("Segmented stacks not supported on this platform.");
MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
@@ -1336,26 +1381,16 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
// The MOV R10, RAX needs to be in a different block, since the RET we emit in
// allocMBB needs to be last (terminating) instruction.
- MachineBasicBlock *restoreR10MBB = NULL;
- if (IsNested)
- restoreR10MBB = MF.CreateMachineBasicBlock();
for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
e = prologueMBB.livein_end(); i != e; i++) {
allocMBB->addLiveIn(*i);
checkMBB->addLiveIn(*i);
-
- if (IsNested)
- restoreR10MBB->addLiveIn(*i);
}
- if (IsNested) {
+ if (IsNested)
allocMBB->addLiveIn(X86::R10);
- restoreR10MBB->addLiveIn(X86::RAX);
- }
- if (IsNested)
- MF.push_front(restoreR10MBB);
MF.push_front(allocMBB);
MF.push_front(checkMBB);
@@ -1364,28 +1399,99 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
// prologue.
StackSize = MFI->getStackSize();
+ // When the frame size is less than 256 we just compare the stack
+ // boundary directly to the value of the stack pointer, per gcc.
+ bool CompareStackPointer = StackSize < kSplitStackAvailable;
+
// Read the limit off the current stacklet off the stack_guard location.
if (Is64Bit) {
- TlsReg = X86::FS;
- TlsOffset = 0x70;
+ if (ST->isTargetLinux()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x70;
+ } else if (ST->isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
+ } else if (ST->isTargetFreeBSD()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x18;
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = X86::RSP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
- BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
- .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
- .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
} else {
- TlsReg = X86::GS;
- TlsOffset = 0x30;
+ if (ST->isTargetLinux()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x30;
+ } else if (ST->isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x48 + 90*4;
+ } else if (ST->isTargetWin32()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x14; // pvArbitrary, reserved for application use
+ } else if (ST->isTargetFreeBSD()) {
+ report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
+
+ if (ST->isTargetLinux() || ST->isTargetWin32()) {
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
+ .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ } else if (ST->isTargetDarwin()) {
+
+ // TlsOffset doesn't fit into a mod r/m byte so we need an extra register
+ unsigned ScratchReg2;
+ bool SaveScratch2;
+ if (CompareStackPointer) {
+ // The primary scratch register is available for holding the TLS offset
+ ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
+ SaveScratch2 = false;
+ } else {
+ // Need to use a second register to hold the TLS offset
+ ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
+
+ // Unfortunately, with fastcc the second scratch register may hold an arg
+ SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
+ }
- BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
- .addImm(0).addReg(0).addImm(-StackSize).addReg(0);
- BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
- .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ // If Scratch2 is live-in then it needs to be saved
+ assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
+ "Scratch register is live-in and not saved");
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
+ .addReg(ScratchReg2, RegState::Kill);
+
+ BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
+ .addImm(TlsOffset);
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
+ .addReg(ScratchReg)
+ .addReg(ScratchReg2).addImm(1).addReg(0)
+ .addImm(0)
+ .addReg(TlsReg);
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
+ }
}
// This jump is taken if SP >= (Stacklet Limit + Stack Space required).
// It jumps to normal execution of the function body.
- BuildMI(checkMBB, DL, TII.get(X86::JG_4)).addMBB(&prologueMBB);
+ BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
// On 32 bit we first push the arguments size and then the frame size. On 64
// bit, we pass the stack frame size in r10 and the argument size in r11.
@@ -1403,9 +1509,6 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
MF.getRegInfo().setPhysRegUsed(X86::R10);
MF.getRegInfo().setPhysRegUsed(X86::R11);
} else {
- // Since we'll call __morestack, stack alignment needs to be preserved.
- BuildMI(allocMBB, DL, TII.get(X86::SUB32ri), X86::ESP).addReg(X86::ESP)
- .addImm(8);
BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
.addImm(X86FI->getArgumentStackSize());
BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
@@ -1420,23 +1523,12 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
.addExternalSymbol("__morestack");
- // __morestack only seems to remove 8 bytes off the stack. Add back the
- // additional 8 bytes we added before pushing the arguments.
- if (!Is64Bit)
- BuildMI(allocMBB, DL, TII.get(X86::ADD32ri), X86::ESP).addReg(X86::ESP)
- .addImm(8);
- BuildMI(allocMBB, DL, TII.get(X86::RET));
-
if (IsNested)
- BuildMI(restoreR10MBB, DL, TII.get(X86::MOV64rr), X86::R10)
- .addReg(X86::RAX);
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
- if (IsNested) {
- allocMBB->addSuccessor(restoreR10MBB);
- restoreR10MBB->addSuccessor(&prologueMBB);
- } else {
- allocMBB->addSuccessor(&prologueMBB);
- }
+ allocMBB->addSuccessor(&prologueMBB);
checkMBB->addSuccessor(allocMBB);
checkMBB->addSuccessor(&prologueMBB);
diff --git a/contrib/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
index 6f49064..d55a497 100644
--- a/contrib/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86FrameLowering.h
@@ -1,4 +1,4 @@
-//=-- X86TargetFrameLowering.h - Define frame lowering for X86 ---*- C++ -*-===//
+//===-- X86TargetFrameLowering.h - Define frame lowering for X86 -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 02b0ff2..8e2b1d6 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -21,7 +21,6 @@
#include "X86TargetMachine.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -32,11 +31,11 @@
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -540,7 +539,7 @@ void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
const TargetInstrInfo *TII = TM.getInstrInfo();
if (Subtarget->isTargetCygMing()) {
unsigned CallOp =
- Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
+ Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
BuildMI(BB, DebugLoc(),
TII->get(CallOp)).addExternalSymbol("__main");
}
@@ -621,14 +620,14 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
// Handle X86-64 rip-relative addresses. We check this before checking direct
// folding because RIP is preferable to non-RIP accesses.
- if (Subtarget->is64Bit() &&
+ if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
// they cannot be folded into immediate fields.
// FIXME: This can be improved for kernel and other models?
- (M == CodeModel::Small || M == CodeModel::Kernel) &&
- // Base and index reg must be 0 in order to use %rip as base and lowering
- // must allow RIP.
- !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
+ (M == CodeModel::Small || M == CodeModel::Kernel)) {
+ // Base and index reg must be 0 in order to use %rip as base.
+ if (AM.hasBaseOrIndexReg())
+ return true;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
X86ISelAddressMode Backup = AM;
AM.GV = G->getGlobal();
@@ -663,11 +662,12 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
}
// Handle the case when globals fit in our immediate field: This is true for
- // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
- // mode, this results in a non-RIP-relative computation.
+ // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
+ // mode, this only applies to a non-RIP-relative computation.
if (!Subtarget->is64Bit() ||
- ((M == CodeModel::Small || M == CodeModel::Kernel) &&
- TM.getRelocationModel() == Reloc::Static)) {
+ M == CodeModel::Small || M == CodeModel::Kernel) {
+ assert(N.getOpcode() != X86ISD::WrapperRIP &&
+ "RIP-relative addressing already handled");
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
AM.GV = G->getGlobal();
AM.Disp += G->getOffset();
@@ -725,6 +725,213 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
return false;
}
+// Insert a node into the DAG at least before the Pos node's position. This
+// will reposition the node as needed, and will assign it a node ID that is <=
+// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
+// IDs! The selection DAG must no longer depend on their uniqueness when this
+// is used.
+static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
+ if (N.getNode()->getNodeId() == -1 ||
+ N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
+ DAG.RepositionNode(Pos.getNode(), N.getNode());
+ N.getNode()->setNodeId(Pos.getNode()->getNodeId());
+ }
+}
+
+// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
+// allows us to convert the shift and and into an h-register extract and
+// a scaled index. Returns false if the simplification is performed.
+static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
+ uint64_t Mask,
+ SDValue Shift, SDValue X,
+ X86ISelAddressMode &AM) {
+ if (Shift.getOpcode() != ISD::SRL ||
+ !isa<ConstantSDNode>(Shift.getOperand(1)) ||
+ !Shift.hasOneUse())
+ return true;
+
+ int ScaleLog = 8 - Shift.getConstantOperandVal(1);
+ if (ScaleLog <= 0 || ScaleLog >= 4 ||
+ Mask != (0xffu << ScaleLog))
+ return true;
+
+ EVT VT = N.getValueType();
+ DebugLoc DL = N.getDebugLoc();
+ SDValue Eight = DAG.getConstant(8, MVT::i8);
+ SDValue NewMask = DAG.getConstant(0xff, VT);
+ SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
+ SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
+ SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
+
+ // Insert the new nodes into the topological ordering. We must do this in
+ // a valid topological ordering as nothing is going to go back and re-sort
+ // these nodes. We continually insert before 'N' in sequence as this is
+ // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
+ // hierarchy left to express.
+ InsertDAGNode(DAG, N, Eight);
+ InsertDAGNode(DAG, N, Srl);
+ InsertDAGNode(DAG, N, NewMask);
+ InsertDAGNode(DAG, N, And);
+ InsertDAGNode(DAG, N, ShlCount);
+ InsertDAGNode(DAG, N, Shl);
+ DAG.ReplaceAllUsesWith(N, Shl);
+ AM.IndexReg = And;
+ AM.Scale = (1 << ScaleLog);
+ return false;
+}
+
+// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
+// allows us to fold the shift into this addressing mode. Returns false if the
+// transform succeeded.
+static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
+ uint64_t Mask,
+ SDValue Shift, SDValue X,
+ X86ISelAddressMode &AM) {
+ if (Shift.getOpcode() != ISD::SHL ||
+ !isa<ConstantSDNode>(Shift.getOperand(1)))
+ return true;
+
+ // Not likely to be profitable if either the AND or SHIFT node has more
+ // than one use (unless all uses are for address computation). Besides,
+ // isel mechanism requires their node ids to be reused.
+ if (!N.hasOneUse() || !Shift.hasOneUse())
+ return true;
+
+ // Verify that the shift amount is something we can fold.
+ unsigned ShiftAmt = Shift.getConstantOperandVal(1);
+ if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
+ return true;
+
+ EVT VT = N.getValueType();
+ DebugLoc DL = N.getDebugLoc();
+ SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
+ SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
+ SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
+
+ // Insert the new nodes into the topological ordering. We must do this in
+ // a valid topological ordering as nothing is going to go back and re-sort
+ // these nodes. We continually insert before 'N' in sequence as this is
+ // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
+ // hierarchy left to express.
+ InsertDAGNode(DAG, N, NewMask);
+ InsertDAGNode(DAG, N, NewAnd);
+ InsertDAGNode(DAG, N, NewShift);
+ DAG.ReplaceAllUsesWith(N, NewShift);
+
+ AM.Scale = 1 << ShiftAmt;
+ AM.IndexReg = NewAnd;
+ return false;
+}
+
+// Implement some heroics to detect shifts of masked values where the mask can
+// be replaced by extending the shift and undoing that in the addressing mode
+// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
+// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
+// the addressing mode. This results in code such as:
+//
+// int f(short *y, int *lookup_table) {
+// ...
+// return *y + lookup_table[*y >> 11];
+// }
+//
+// Turning into:
+// movzwl (%rdi), %eax
+// movl %eax, %ecx
+// shrl $11, %ecx
+// addl (%rsi,%rcx,4), %eax
+//
+// Instead of:
+// movzwl (%rdi), %eax
+// movl %eax, %ecx
+// shrl $9, %ecx
+// andl $124, %rcx
+// addl (%rsi,%rcx), %eax
+//
+// Note that this function assumes the mask is provided as a mask *after* the
+// value is shifted. The input chain may or may not match that, but computing
+// such a mask is trivial.
+static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
+ uint64_t Mask,
+ SDValue Shift, SDValue X,
+ X86ISelAddressMode &AM) {
+ if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
+ !isa<ConstantSDNode>(Shift.getOperand(1)))
+ return true;
+
+ unsigned ShiftAmt = Shift.getConstantOperandVal(1);
+ unsigned MaskLZ = CountLeadingZeros_64(Mask);
+ unsigned MaskTZ = CountTrailingZeros_64(Mask);
+
+ // The amount of shift we're trying to fit into the addressing mode is taken
+ // from the trailing zeros of the mask.
+ unsigned AMShiftAmt = MaskTZ;
+
+ // There is nothing we can do here unless the mask is removing some bits.
+ // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
+ if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
+
+ // We also need to ensure that mask is a continuous run of bits.
+ if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
+
+ // Scale the leading zero count down based on the actual size of the value.
+ // Also scale it down based on the size of the shift.
+ MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
+
+ // The final check is to ensure that any masked out high bits of X are
+ // already known to be zero. Otherwise, the mask has a semantic impact
+ // other than masking out a couple of low bits. Unfortunately, because of
+ // the mask, zero extensions will be removed from operands in some cases.
+ // This code works extra hard to look through extensions because we can
+ // replace them with zero extensions cheaply if necessary.
+ bool ReplacingAnyExtend = false;
+ if (X.getOpcode() == ISD::ANY_EXTEND) {
+ unsigned ExtendBits =
+ X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
+ // Assume that we'll replace the any-extend with a zero-extend, and
+ // narrow the search to the extended value.
+ X = X.getOperand(0);
+ MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
+ ReplacingAnyExtend = true;
+ }
+ APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
+ MaskLZ);
+ APInt KnownZero, KnownOne;
+ DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
+ if (MaskedHighBits != KnownZero) return true;
+
+ // We've identified a pattern that can be transformed into a single shift
+ // and an addressing mode. Make it so.
+ EVT VT = N.getValueType();
+ if (ReplacingAnyExtend) {
+ assert(X.getValueType() != VT);
+ // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
+ SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
+ InsertDAGNode(DAG, N, NewX);
+ X = NewX;
+ }
+ DebugLoc DL = N.getDebugLoc();
+ SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
+ SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
+ SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
+ SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
+
+ // Insert the new nodes into the topological ordering. We must do this in
+ // a valid topological ordering as nothing is going to go back and re-sort
+ // these nodes. We continually insert before 'N' in sequence as this is
+ // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
+ // hierarchy left to express.
+ InsertDAGNode(DAG, N, NewSRLAmt);
+ InsertDAGNode(DAG, N, NewSRL);
+ InsertDAGNode(DAG, N, NewSHLAmt);
+ InsertDAGNode(DAG, N, NewSHL);
+ DAG.ReplaceAllUsesWith(N, NewSHL);
+
+ AM.Scale = 1 << AMShiftAmt;
+ AM.IndexReg = NewSRL;
+ return false;
+}
+
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
DebugLoc dl = N.getDebugLoc();
@@ -814,6 +1021,33 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
break;
}
+ case ISD::SRL: {
+ // Scale must not be used already.
+ if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
+
+ SDValue And = N.getOperand(0);
+ if (And.getOpcode() != ISD::AND) break;
+ SDValue X = And.getOperand(0);
+
+ // We only handle up to 64-bit values here as those are what matter for
+ // addressing mode optimizations.
+ if (X.getValueSizeInBits() > 64) break;
+
+ // The mask used for the transform is expected to be post-shift, but we
+ // found the shift first so just apply the shift to the mask before passing
+ // it down.
+ if (!isa<ConstantSDNode>(N.getOperand(1)) ||
+ !isa<ConstantSDNode>(And.getOperand(1)))
+ break;
+ uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
+
+ // Try to fold the mask and shift into the scale, and return false if we
+ // succeed.
+ if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
+ return false;
+ break;
+ }
+
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
// A mul_lohi where we need the low part can be folded as a plain multiply.
@@ -917,16 +1151,8 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
AM.Scale = 1;
// Insert the new nodes into the topological ordering.
- if (Zero.getNode()->getNodeId() == -1 ||
- Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), Zero.getNode());
- Zero.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- if (Neg.getNode()->getNodeId() == -1 ||
- Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), Neg.getNode());
- Neg.getNode()->setNodeId(N.getNode()->getNodeId());
- }
+ InsertDAGNode(*CurDAG, N, Zero);
+ InsertDAGNode(*CurDAG, N, Neg);
return false;
}
@@ -981,121 +1207,34 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// Perform some heroic transforms on an and of a constant-count shift
// with a constant to enable use of the scaled offset field.
- SDValue Shift = N.getOperand(0);
- if (Shift.getNumOperands() != 2) break;
-
// Scale must not be used already.
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
+ SDValue Shift = N.getOperand(0);
+ if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
SDValue X = Shift.getOperand(0);
- ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
- ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
- if (!C1 || !C2) break;
-
- // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
- // allows us to convert the shift and and into an h-register extract and
- // a scaled index.
- if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
- unsigned ScaleLog = 8 - C1->getZExtValue();
- if (ScaleLog > 0 && ScaleLog < 4 &&
- C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
- SDValue Eight = CurDAG->getConstant(8, MVT::i8);
- SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
- SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
- X, Eight);
- SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
- Srl, Mask);
- SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
- SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
- And, ShlCount);
-
- // Insert the new nodes into the topological ordering.
- if (Eight.getNode()->getNodeId() == -1 ||
- Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
- CurDAG->RepositionNode(X.getNode(), Eight.getNode());
- Eight.getNode()->setNodeId(X.getNode()->getNodeId());
- }
- if (Mask.getNode()->getNodeId() == -1 ||
- Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
- CurDAG->RepositionNode(X.getNode(), Mask.getNode());
- Mask.getNode()->setNodeId(X.getNode()->getNodeId());
- }
- if (Srl.getNode()->getNodeId() == -1 ||
- Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
- CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
- Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
- }
- if (And.getNode()->getNodeId() == -1 ||
- And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), And.getNode());
- And.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- if (ShlCount.getNode()->getNodeId() == -1 ||
- ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
- CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
- ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- if (Shl.getNode()->getNodeId() == -1 ||
- Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), Shl.getNode());
- Shl.getNode()->setNodeId(N.getNode()->getNodeId());
- }
- CurDAG->ReplaceAllUsesWith(N, Shl);
- AM.IndexReg = And;
- AM.Scale = (1 << ScaleLog);
- return false;
- }
- }
- // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
- // allows us to fold the shift into this addressing mode.
- if (Shift.getOpcode() != ISD::SHL) break;
+ // We only handle up to 64-bit values here as those are what matter for
+ // addressing mode optimizations.
+ if (X.getValueSizeInBits() > 64) break;
- // Not likely to be profitable if either the AND or SHIFT node has more
- // than one use (unless all uses are for address computation). Besides,
- // isel mechanism requires their node ids to be reused.
- if (!N.hasOneUse() || !Shift.hasOneUse())
- break;
-
- // Verify that the shift amount is something we can fold.
- unsigned ShiftCst = C1->getZExtValue();
- if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
+ if (!isa<ConstantSDNode>(N.getOperand(1)))
break;
-
- // Get the new AND mask, this folds to a constant.
- SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
- SDValue(C2, 0), SDValue(C1, 0));
- SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
- NewANDMask);
- SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
- NewAND, SDValue(C1, 0));
+ uint64_t Mask = N.getConstantOperandVal(1);
- // Insert the new nodes into the topological ordering.
- if (C1->getNodeId() > X.getNode()->getNodeId()) {
- CurDAG->RepositionNode(X.getNode(), C1);
- C1->setNodeId(X.getNode()->getNodeId());
- }
- if (NewANDMask.getNode()->getNodeId() == -1 ||
- NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
- CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
- NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
- }
- if (NewAND.getNode()->getNodeId() == -1 ||
- NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
- CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
- NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
- }
- if (NewSHIFT.getNode()->getNodeId() == -1 ||
- NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
- CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
- NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
- }
+ // Try to fold the mask and shift into an extract and scale.
+ if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
+ return false;
- CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
-
- AM.Scale = 1 << ShiftCst;
- AM.IndexReg = NewAND;
- return false;
+ // Try to fold the mask and shift directly into the scale.
+ if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
+ return false;
+
+ // Try to swap the mask and shift to place shifts which can be done as
+ // a scale on the outside of the mask.
+ if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
+ return false;
+ break;
}
}
@@ -1515,7 +1654,7 @@ enum AtomicSz {
AtomicSzEnd
};
-static const unsigned int AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
+static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
{
X86::LOCK_OR8mi,
X86::LOCK_OR8mr,
@@ -1709,6 +1848,96 @@ static bool HasNoSignedComparisonUses(SDNode *N) {
return true;
}
+/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
+/// is suitable for doing the {load; increment or decrement; store} to modify
+/// transformation.
+static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
+ SDValue StoredVal, SelectionDAG *CurDAG,
+ LoadSDNode* &LoadNode, SDValue &InputChain) {
+
+ // is the value stored the result of a DEC or INC?
+ if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
+
+ // is the stored value result 0 of the load?
+ if (StoredVal.getResNo() != 0) return false;
+
+ // are there other uses of the loaded value than the inc or dec?
+ if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
+
+ // is the store non-extending and non-indexed?
+ if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
+ return false;
+
+ SDValue Load = StoredVal->getOperand(0);
+ // Is the stored value a non-extending and non-indexed load?
+ if (!ISD::isNormalLoad(Load.getNode())) return false;
+
+ // Return LoadNode by reference.
+ LoadNode = cast<LoadSDNode>(Load);
+ // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
+ EVT LdVT = LoadNode->getMemoryVT();
+ if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
+ LdVT != MVT::i8)
+ return false;
+
+ // Is store the only read of the loaded value?
+ if (!Load.hasOneUse())
+ return false;
+
+ // Is the address of the store the same as the load?
+ if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
+ LoadNode->getOffset() != StoreNode->getOffset())
+ return false;
+
+ // Check if the chain is produced by the load or is a TokenFactor with
+ // the load output chain as an operand. Return InputChain by reference.
+ SDValue Chain = StoreNode->getChain();
+
+ bool ChainCheck = false;
+ if (Chain == Load.getValue(1)) {
+ ChainCheck = true;
+ InputChain = LoadNode->getChain();
+ } else if (Chain.getOpcode() == ISD::TokenFactor) {
+ SmallVector<SDValue, 4> ChainOps;
+ for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
+ SDValue Op = Chain.getOperand(i);
+ if (Op == Load.getValue(1)) {
+ ChainCheck = true;
+ continue;
+ }
+ ChainOps.push_back(Op);
+ }
+
+ if (ChainCheck)
+ // Make a new TokenFactor with all the other input chains except
+ // for the load.
+ InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
+ MVT::Other, &ChainOps[0], ChainOps.size());
+ }
+ if (!ChainCheck)
+ return false;
+
+ return true;
+}
+
+/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
+/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
+static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
+ if (Opc == X86ISD::DEC) {
+ if (LdVT == MVT::i64) return X86::DEC64m;
+ if (LdVT == MVT::i32) return X86::DEC32m;
+ if (LdVT == MVT::i16) return X86::DEC16m;
+ if (LdVT == MVT::i8) return X86::DEC8m;
+ } else {
+ assert(Opc == X86ISD::INC && "unrecognized opcode");
+ if (LdVT == MVT::i64) return X86::INC64m;
+ if (LdVT == MVT::i32) return X86::INC32m;
+ if (LdVT == MVT::i16) return X86::INC16m;
+ if (LdVT == MVT::i8) return X86::INC8m;
+ }
+ llvm_unreachable("unrecognized size for LdVT");
+}
+
SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
EVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
@@ -1829,7 +2058,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
getI8Imm(ShlVal));
- break;
}
case X86ISD::UMUL: {
SDValue N0 = Node->getOperand(0);
@@ -2114,7 +2342,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
HasNoSignedComparisonUses(Node))
// Look past the truncate if CMP is the only use of it.
N0 = N0.getOperand(0);
- if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
+ if ((N0.getNode()->getOpcode() == ISD::AND ||
+ (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
+ N0.getNode()->hasOneUse() &&
N0.getValueType() != MVT::i8 &&
X86::isZeroNode(N1)) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
@@ -2129,7 +2359,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// On x86-32, only the ABCD registers have 8-bit subregisters.
if (!Subtarget->is64Bit()) {
- TargetRegisterClass *TRC = 0;
+ const TargetRegisterClass *TRC;
switch (N0.getValueType().getSimpleVT().SimpleTy) {
case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
@@ -2158,7 +2388,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Reg = N0.getNode()->getOperand(0);
// Put the value in an ABCD register.
- TargetRegisterClass *TRC = 0;
+ const TargetRegisterClass *TRC;
switch (N0.getValueType().getSimpleVT().SimpleTy) {
case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
@@ -2214,6 +2444,56 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
break;
}
+ case ISD::STORE: {
+ // Change a chain of {load; incr or dec; store} of the same value into
+ // a simple increment or decrement through memory of that value, if the
+ // uses of the modified value and its address are suitable.
+ // The DEC64m tablegen pattern is currently not able to match the case where
+ // the EFLAGS on the original DEC are used. (This also applies to
+ // {INC,DEC}X{64,32,16,8}.)
+ // We'll need to improve tablegen to allow flags to be transferred from a
+ // node in the pattern to the result node. probably with a new keyword
+ // for example, we have this
+ // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
+ // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
+ // (implicit EFLAGS)]>;
+ // but maybe need something like this
+ // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
+ // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
+ // (transferrable EFLAGS)]>;
+
+ StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
+ SDValue StoredVal = StoreNode->getOperand(1);
+ unsigned Opc = StoredVal->getOpcode();
+
+ LoadSDNode *LoadNode = 0;
+ SDValue InputChain;
+ if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
+ LoadNode, InputChain))
+ break;
+
+ SDValue Base, Scale, Index, Disp, Segment;
+ if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
+ Base, Scale, Index, Disp, Segment))
+ break;
+
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
+ MemOp[0] = StoreNode->getMemOperand();
+ MemOp[1] = LoadNode->getMemOperand();
+ const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
+ EVT LdVT = LoadNode->getMemoryVT();
+ unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
+ MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
+ Node->getDebugLoc(),
+ MVT::i32, MVT::Other, Ops,
+ array_lengthof(Ops));
+ Result->setMemRefs(MemOp, MemOp + 2);
+
+ ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
+ ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
+
+ return Result;
+ }
}
SDNode *ResNode = SelectCode(Node);
@@ -2254,6 +2534,6 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
/// X86-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
- llvm::CodeGenOpt::Level OptLevel) {
+ CodeGenOpt::Level OptLevel) {
return new X86DAGToDAGISel(TM, OptLevel);
}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7c8ce17..9b83aad 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -13,9 +13,9 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "x86-isel"
+#include "X86ISelLowering.h"
#include "X86.h"
#include "X86InstrBuilder.h"
-#include "X86ISelLowering.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "Utils/X86ShuffleDecode.h"
@@ -35,25 +35,21 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/VectorExtras.h"
+#include "llvm/ADT/VariadicFunction.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
+#include <bitset>
using namespace llvm;
-using namespace dwarf;
STATISTIC(NumTailCalls, "Number of tail calls");
@@ -61,17 +57,6 @@ STATISTIC(NumTailCalls, "Number of tail calls");
static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2);
-static SDValue Insert128BitVector(SDValue Result,
- SDValue Vec,
- SDValue Idx,
- SelectionDAG &DAG,
- DebugLoc dl);
-
-static SDValue Extract128BitVector(SDValue Vec,
- SDValue Idx,
- SelectionDAG &DAG,
- DebugLoc dl);
-
/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
/// sets things up to match to an AVX VEXTRACTF128 instruction or a
/// simple subregister reference. Idx is an index in the 128 bits we
@@ -169,8 +154,8 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
: TargetLowering(TM, createTLOF(TM)) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
- X86ScalarSSEf64 = Subtarget->hasXMMInt();
- X86ScalarSSEf32 = Subtarget->hasXMM();
+ X86ScalarSSEf64 = Subtarget->hasSSE2();
+ X86ScalarSSEf32 = Subtarget->hasSSE1();
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
RegInfo = TM.getRegisterInfo();
@@ -186,8 +171,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// For 64-bit since we have so many registers use the ILP scheduler, for
// 32-bit code use the register pressure specific scheduling.
+ // For 32 bit Atom, use Hybrid (register pressure + latency) scheduling.
if (Subtarget->is64Bit())
setSchedulingPreference(Sched::ILP);
+ else if (Subtarget->isAtom())
+ setSchedulingPreference(Sched::Hybrid);
else
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
@@ -199,15 +187,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setLibcallName(RTLIB::SREM_I64, "_allrem");
setLibcallName(RTLIB::UREM_I64, "_aullrem");
setLibcallName(RTLIB::MUL_I64, "_allmul");
- setLibcallName(RTLIB::FPTOUINT_F64_I64, "_ftol2");
- setLibcallName(RTLIB::FPTOUINT_F32_I64, "_ftol2");
setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
- setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::C);
- setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::C);
+
+ // The _ftol2 runtime function has an unusual calling conv, which
+ // is modeled by a special pseudo-instruction.
+ setLibcallName(RTLIB::FPTOUINT_F64_I64, 0);
+ setLibcallName(RTLIB::FPTOUINT_F32_I64, 0);
+ setLibcallName(RTLIB::FPTOUINT_F64_I32, 0);
+ setLibcallName(RTLIB::FPTOUINT_F32_I32, 0);
}
if (Subtarget->isTargetDarwin()) {
@@ -256,8 +247,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->is64Bit()) {
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
- setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
- } else if (!UseSoftFloat) {
+ setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
+ } else if (!TM.Options.UseSoftFloat) {
// We have an algorithm for SSE2->double, and we turn this into a
// 64-bit FILD followed by conditional FADD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
@@ -271,7 +262,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
- if (!UseSoftFloat) {
+ if (!TM.Options.UseSoftFloat) {
// SSE has no i16 to fp conversion, only i32
if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
@@ -314,7 +305,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->is64Bit()) {
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
- } else if (!UseSoftFloat) {
+ } else if (!TM.Options.UseSoftFloat) {
// Since AVX is a superset of SSE3, only check for SSE here.
if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
// Expand FP_TO_UINT into a select.
@@ -327,6 +318,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
}
+ if (isTargetFTOL()) {
+ // Use the _ftol2 runtime function, which has a pseudo-instruction
+ // to handle its weird calling convention.
+ setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
+ }
+
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSEf64) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
@@ -379,10 +376,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
+ // Promote the i8 variants and force them on up to i32 which has a shorter
+ // encoding.
+ setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
+ AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
+ AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
if (Subtarget->hasBMI()) {
- setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
} else {
- setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
if (Subtarget->is64Bit())
@@ -390,13 +395,27 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
if (Subtarget->hasLZCNT()) {
+ // When promoting the i8 variants, force them to i32 for a shorter
+ // encoding.
setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
+ AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
+ AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
} else {
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
- if (Subtarget->is64Bit())
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
+ if (Subtarget->is64Bit()) {
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
+ }
}
if (Subtarget->hasPOPCNT()) {
@@ -459,7 +478,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
}
- if (Subtarget->hasXMM())
+ if (Subtarget->hasSSE1())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
@@ -538,14 +557,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho())
setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
MVT::i64 : MVT::i32, Custom);
- else if (EnableSegmentedStacks)
+ else if (TM.Options.EnableSegmentedStacks)
setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
MVT::i64 : MVT::i32, Custom);
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ?
MVT::i64 : MVT::i32, Expand);
- if (!UseSoftFloat && X86ScalarSSEf64) {
+ if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
@@ -577,7 +596,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// cases we handle.
addLegalFPImmediate(APFloat(+0.0)); // xorpd
addLegalFPImmediate(APFloat(+0.0f)); // xorps
- } else if (!UseSoftFloat && X86ScalarSSEf32) {
+ } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
@@ -606,11 +625,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
- if (!UnsafeFPMath) {
+ if (!TM.Options.UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
}
- } else if (!UseSoftFloat) {
+ } else if (!TM.Options.UseSoftFloat) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
@@ -621,7 +640,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- if (!UnsafeFPMath) {
+ if (!TM.Options.UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
}
@@ -640,7 +659,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FMA, MVT::f32, Expand);
// Long double always uses X87.
- if (!UseSoftFloat) {
+ if (!TM.Options.UseSoftFloat) {
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
@@ -659,11 +678,16 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
}
- if (!UnsafeFPMath) {
+ if (!TM.Options.UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f80 , Expand);
setOperationAction(ISD::FCOS , MVT::f80 , Expand);
}
+ setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
+ setOperationAction(ISD::FCEIL, MVT::f80, Expand);
+ setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
+ setOperationAction(ISD::FRINT, MVT::f80, Expand);
+ setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
setOperationAction(ISD::FMA, MVT::f80, Expand);
}
@@ -715,7 +739,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
@@ -749,7 +775,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
- if (!UseSoftFloat && Subtarget->hasMMX()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
addRegisterClass(MVT::x86mmx, X86::VR64RegisterClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
@@ -786,7 +812,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
- if (!UseSoftFloat && Subtarget->hasXMM()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
@@ -803,7 +829,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
}
- if (!UseSoftFloat && Subtarget->hasXMMInt()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
// FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
@@ -909,7 +935,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
}
- if (Subtarget->hasSSE41() || Subtarget->hasAVX()) {
+ if (Subtarget->hasSSE41()) {
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FCEIL, MVT::f32, Legal);
setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
@@ -924,10 +950,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
- // Can turn SHL into an integer multiply.
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
- setOperationAction(ISD::SHL, MVT::v16i8, Custom);
-
setOperationAction(ISD::VSELECT, MVT::v2f64, Legal);
setOperationAction(ISD::VSELECT, MVT::v2i64, Legal);
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
@@ -948,30 +970,47 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
+ // FIXME: these should be Legal but thats only for the case where
+ // the index is constant. For now custom expand to deal with that.
if (Subtarget->is64Bit()) {
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
}
}
- if (Subtarget->hasXMMInt()) {
- setOperationAction(ISD::SRL, MVT::v2i64, Custom);
- setOperationAction(ISD::SRL, MVT::v4i32, Custom);
- setOperationAction(ISD::SRL, MVT::v16i8, Custom);
+ if (Subtarget->hasSSE2()) {
setOperationAction(ISD::SRL, MVT::v8i16, Custom);
+ setOperationAction(ISD::SRL, MVT::v16i8, Custom);
- setOperationAction(ISD::SHL, MVT::v2i64, Custom);
- setOperationAction(ISD::SHL, MVT::v4i32, Custom);
setOperationAction(ISD::SHL, MVT::v8i16, Custom);
+ setOperationAction(ISD::SHL, MVT::v16i8, Custom);
- setOperationAction(ISD::SRA, MVT::v4i32, Custom);
setOperationAction(ISD::SRA, MVT::v8i16, Custom);
+ setOperationAction(ISD::SRA, MVT::v16i8, Custom);
+
+ if (Subtarget->hasAVX2()) {
+ setOperationAction(ISD::SRL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v2i64, Legal);
+ setOperationAction(ISD::SHL, MVT::v4i32, Legal);
+
+ setOperationAction(ISD::SRA, MVT::v4i32, Legal);
+ } else {
+ setOperationAction(ISD::SRL, MVT::v2i64, Custom);
+ setOperationAction(ISD::SRL, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::SHL, MVT::v2i64, Custom);
+ setOperationAction(ISD::SHL, MVT::v4i32, Custom);
+
+ setOperationAction(ISD::SRA, MVT::v4i32, Custom);
+ }
}
- if (Subtarget->hasSSE42() || Subtarget->hasAVX())
+ if (Subtarget->hasSSE42())
setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
- if (!UseSoftFloat && Subtarget->hasAVX()) {
+ if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) {
addRegisterClass(MVT::v32i8, X86::VR256RegisterClass);
addRegisterClass(MVT::v16i16, X86::VR256RegisterClass);
addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
@@ -1008,18 +1047,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i16, Custom);
- setOperationAction(ISD::SRL, MVT::v4i64, Custom);
- setOperationAction(ISD::SRL, MVT::v8i32, Custom);
setOperationAction(ISD::SRL, MVT::v16i16, Custom);
setOperationAction(ISD::SRL, MVT::v32i8, Custom);
- setOperationAction(ISD::SHL, MVT::v4i64, Custom);
- setOperationAction(ISD::SHL, MVT::v8i32, Custom);
setOperationAction(ISD::SHL, MVT::v16i16, Custom);
setOperationAction(ISD::SHL, MVT::v32i8, Custom);
- setOperationAction(ISD::SRA, MVT::v8i32, Custom);
setOperationAction(ISD::SRA, MVT::v16i16, Custom);
+ setOperationAction(ISD::SRA, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
@@ -1030,25 +1065,60 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
- setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
- setOperationAction(ISD::VSELECT, MVT::v4i64, Legal);
- setOperationAction(ISD::VSELECT, MVT::v8i32, Legal);
- setOperationAction(ISD::VSELECT, MVT::v8f32, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v4i64, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v8i32, Legal);
+ setOperationAction(ISD::VSELECT, MVT::v8f32, Legal);
+
+ if (Subtarget->hasAVX2()) {
+ setOperationAction(ISD::ADD, MVT::v4i64, Legal);
+ setOperationAction(ISD::ADD, MVT::v8i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v16i16, Legal);
+ setOperationAction(ISD::ADD, MVT::v32i8, Legal);
+
+ setOperationAction(ISD::SUB, MVT::v4i64, Legal);
+ setOperationAction(ISD::SUB, MVT::v8i32, Legal);
+ setOperationAction(ISD::SUB, MVT::v16i16, Legal);
+ setOperationAction(ISD::SUB, MVT::v32i8, Legal);
+
+ setOperationAction(ISD::MUL, MVT::v4i64, Custom);
+ setOperationAction(ISD::MUL, MVT::v8i32, Legal);
+ setOperationAction(ISD::MUL, MVT::v16i16, Legal);
+ // Don't lower v32i8 because there is no 128-bit byte mul
+
+ setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
+
+ setOperationAction(ISD::SRL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SRL, MVT::v8i32, Legal);
+
+ setOperationAction(ISD::SHL, MVT::v4i64, Legal);
+ setOperationAction(ISD::SHL, MVT::v8i32, Legal);
+
+ setOperationAction(ISD::SRA, MVT::v8i32, Legal);
+ } else {
+ setOperationAction(ISD::ADD, MVT::v4i64, Custom);
+ setOperationAction(ISD::ADD, MVT::v8i32, Custom);
+ setOperationAction(ISD::ADD, MVT::v16i16, Custom);
+ setOperationAction(ISD::ADD, MVT::v32i8, Custom);
+
+ setOperationAction(ISD::SUB, MVT::v4i64, Custom);
+ setOperationAction(ISD::SUB, MVT::v8i32, Custom);
+ setOperationAction(ISD::SUB, MVT::v16i16, Custom);
+ setOperationAction(ISD::SUB, MVT::v32i8, Custom);
+
+ setOperationAction(ISD::MUL, MVT::v4i64, Custom);
+ setOperationAction(ISD::MUL, MVT::v8i32, Custom);
+ setOperationAction(ISD::MUL, MVT::v16i16, Custom);
+ // Don't lower v32i8 because there is no 128-bit byte mul
- setOperationAction(ISD::ADD, MVT::v4i64, Custom);
- setOperationAction(ISD::ADD, MVT::v8i32, Custom);
- setOperationAction(ISD::ADD, MVT::v16i16, Custom);
- setOperationAction(ISD::ADD, MVT::v32i8, Custom);
+ setOperationAction(ISD::SRL, MVT::v4i64, Custom);
+ setOperationAction(ISD::SRL, MVT::v8i32, Custom);
- setOperationAction(ISD::SUB, MVT::v4i64, Custom);
- setOperationAction(ISD::SUB, MVT::v8i32, Custom);
- setOperationAction(ISD::SUB, MVT::v16i16, Custom);
- setOperationAction(ISD::SUB, MVT::v32i8, Custom);
+ setOperationAction(ISD::SHL, MVT::v4i64, Custom);
+ setOperationAction(ISD::SHL, MVT::v8i32, Custom);
- setOperationAction(ISD::MUL, MVT::v4i64, Custom);
- setOperationAction(ISD::MUL, MVT::v8i32, Custom);
- setOperationAction(ISD::MUL, MVT::v16i16, Custom);
- // Don't lower v32i8 because there is no 128-bit byte mul
+ setOperationAction(ISD::SRA, MVT::v8i32, Custom);
+ }
// Custom lower several nodes for 256-bit types.
for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
@@ -1099,7 +1169,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// of this type with custom code.
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; VT++) {
- setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,
+ Custom);
}
// We want to custom lower some of our intrinsics.
@@ -1137,7 +1208,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
- setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
@@ -1152,9 +1222,13 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+ setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::SINT_TO_FP);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
+ if (Subtarget->hasBMI())
+ setTargetDAGCombine(ISD::XOR);
computeRegisterProperties();
@@ -1166,10 +1240,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
- setPrefLoopAlignment(16);
+ setPrefLoopAlignment(4); // 2^4 bytes.
benefitFromCodePlacementOpt = true;
- setPrefFunctionAlignment(4);
+ setPrefFunctionAlignment(4); // 2^4 bytes.
}
@@ -1219,7 +1293,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
}
unsigned Align = 4;
- if (Subtarget->hasXMM())
+ if (Subtarget->hasSSE1())
getMaxByValAlign(Ty, Align);
return Align;
}
@@ -1230,7 +1304,7 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If
-/// 'NonScalarIntSafe' is true, that means it's safe to return a
+/// 'IsZeroVal' is true, that means it's safe to return a
/// non-scalar-integer type, e.g. empty string source, constant, or loaded
/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
/// constant so it does not need to be loaded.
@@ -1239,31 +1313,34 @@ unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
EVT
X86TargetLowering::getOptimalMemOpType(uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
- bool NonScalarIntSafe,
+ bool IsZeroVal,
bool MemcpyStrSrc,
MachineFunction &MF) const {
// FIXME: This turns off use of xmm stores for memset/memcpy on targets like
// linux. This is because the stack realignment code can't handle certain
// cases like PR2962. This should be removed when PR2962 is fixed.
const Function *F = MF.getFunction();
- if (NonScalarIntSafe &&
+ if (IsZeroVal &&
!F->hasFnAttr(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16))) &&
Subtarget->getStackAlignment() >= 16) {
- if (Subtarget->hasAVX() &&
- Subtarget->getStackAlignment() >= 32)
- return MVT::v8f32;
- if (Subtarget->hasXMMInt())
+ if (Subtarget->getStackAlignment() >= 32) {
+ if (Subtarget->hasAVX2())
+ return MVT::v8i32;
+ if (Subtarget->hasAVX())
+ return MVT::v8f32;
+ }
+ if (Subtarget->hasSSE2())
return MVT::v4i32;
- if (Subtarget->hasXMM())
+ if (Subtarget->hasSSE1())
return MVT::v4f32;
} else if (!MemcpyStrSrc && Size >= 8 &&
!Subtarget->is64Bit() &&
Subtarget->getStackAlignment() >= 8 &&
- Subtarget->hasXMMInt()) {
+ Subtarget->hasSSE2()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
return MVT::f64;
@@ -1428,14 +1505,14 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
- (Subtarget->is64Bit() && !Subtarget->hasXMM())) {
+ (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
if (ValVT == MVT::f64 &&
- (Subtarget->is64Bit() && !Subtarget->hasXMMInt()))
+ (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
report_fatal_error("SSE2 register return with SSE2 disabled");
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
@@ -1461,7 +1538,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
- if (!Subtarget->hasXMMInt())
+ if (!Subtarget->hasSSE2())
ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
}
}
@@ -1501,15 +1578,21 @@ X86TargetLowering::LowerReturn(SDValue Chain,
MVT::Other, &RetOps[0], RetOps.size());
}
-bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const {
+bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
if (N->getNumValues() != 1)
return false;
if (!N->hasNUsesOfValue(1, 0))
return false;
+ SDValue TCChain = Chain;
SDNode *Copy = *N->use_begin();
- if (Copy->getOpcode() != ISD::CopyToReg &&
- Copy->getOpcode() != ISD::FP_EXTEND)
+ if (Copy->getOpcode() == ISD::CopyToReg) {
+ // If the copy has a glue operand, we conservatively assume it isn't safe to
+ // perform a tail call.
+ if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
+ return false;
+ TCChain = Copy->getOperand(0);
+ } else if (Copy->getOpcode() != ISD::FP_EXTEND)
return false;
bool HasRet = false;
@@ -1520,7 +1603,11 @@ bool X86TargetLowering::isUsedByReturnOnly(SDNode *N) const {
HasRet = true;
}
- return HasRet;
+ if (!HasRet)
+ return false;
+
+ Chain = TCChain;
+ return true;
}
EVT
@@ -1561,7 +1648,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
- ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasXMM())) {
+ ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
report_fatal_error("SSE register return with SSE disabled");
}
@@ -1651,7 +1738,7 @@ static bool IsTailCallConvention(CallingConv::ID CC) {
}
bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!CI->isTailCall())
+ if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
return false;
CallSite CS(CI);
@@ -1664,7 +1751,8 @@ bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
/// FuncIsMadeTailCallSafe - Return true if the function is being made into
/// a tailcall target by changing its ABI.
-static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {
+static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
+ bool GuaranteedTailCallOpt) {
return GuaranteedTailCallOpt && IsTailCallConvention(CC);
}
@@ -1678,7 +1766,8 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
- bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv);
+ bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv,
+ getTargetMachine().Options.GuaranteedTailCallOpt);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT;
@@ -1704,7 +1793,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
return DAG.getLoad(ValVT, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0);
+ false, false, false, 0);
}
}
@@ -1728,6 +1817,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
MachineFrameInfo *MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget->is64Bit();
+ bool IsWindows = Subtarget->isTargetWindows();
bool IsWin64 = Subtarget->isTargetWin64();
assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
@@ -1758,7 +1848,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
- TargetRegisterClass *RC = NULL;
+ const TargetRegisterClass *RC;
if (RegVT == MVT::i32)
RC = X86::GR32RegisterClass;
else if (Is64Bit && RegVT == MVT::i64)
@@ -1807,7 +1897,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect)
ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
InVals.push_back(ArgValue);
}
@@ -1828,7 +1918,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned StackSize = CCInfo.getNextStackOffset();
// Align stack specially for tail calls.
- if (FuncIsMadeTailCallSafe(CallConv))
+ if (FuncIsMadeTailCallSafe(CallConv,
+ MF.getTarget().Options.GuaranteedTailCallOpt))
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
@@ -1842,17 +1933,17 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
// FIXME: We should really autogenerate these arrays
- static const unsigned GPR64ArgRegsWin64[] = {
+ static const uint16_t GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
- static const unsigned GPR64ArgRegs64Bit[] = {
+ static const uint16_t GPR64ArgRegs64Bit[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
- static const unsigned XMMArgRegs64Bit[] = {
+ static const uint16_t XMMArgRegs64Bit[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- const unsigned *GPR64ArgRegs;
+ const uint16_t *GPR64ArgRegs;
unsigned NumXMMRegs = 0;
if (IsWin64) {
@@ -1865,17 +1956,20 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
TotalNumIntRegs = 6; TotalNumXMMRegs = 8;
GPR64ArgRegs = GPR64ArgRegs64Bit;
- NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, TotalNumXMMRegs);
+ NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit,
+ TotalNumXMMRegs);
}
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
TotalNumIntRegs);
bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
- assert(!(NumXMMRegs && !Subtarget->hasXMM()) &&
+ assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
- assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloatOps) &&
+ assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
+ NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
- if (UseSoftFloat || NoImplicitFloatOps || !Subtarget->hasXMM())
+ if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
+ !Subtarget->hasSSE1())
// Kernel mode asks for SSE to be disabled, so don't push them
// on the stack.
TotalNumXMMRegs = 0;
@@ -1892,8 +1986,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
} else {
// For X86-64, if there are vararg parameters that are passed via
- // registers, then we must store them to their spots on the stack so they
- // may be loaded by deferencing the result of va_next.
+ // registers, then we must store them to their spots on the stack so
+ // they may be loaded by deferencing the result of va_next.
FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16);
FuncInfo->setRegSaveFrameIndex(
@@ -1953,12 +2047,14 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
}
// Some CCs need callee pop.
- if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt)) {
+ if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
+ MF.getTarget().Options.GuaranteedTailCallOpt)) {
FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
- if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins))
+ if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
+ ArgsAreStructReturn(Ins))
FuncInfo->setBytesToPopOnReturn(4);
}
@@ -2006,7 +2102,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
// Load the "old" Return address.
OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return SDValue(OutRetAddr.getNode(), 1);
}
@@ -2033,7 +2129,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
SDValue
X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -2042,9 +2138,13 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isTargetWin64();
+ bool IsWindows = Subtarget->isTargetWindows();
bool IsStructRet = CallIsStructReturn(Outs);
bool IsSibcall = false;
+ if (MF.getTarget().Options.DisableTailCalls)
+ isTailCall = false;
+
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
@@ -2053,7 +2153,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
- if (!GuaranteedTailCallOpt && isTailCall)
+ if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
IsSibcall = true;
if (isTailCall)
@@ -2081,7 +2181,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// This is a sibcall. The memory operands are available in caller's
// own caller's stack.
NumBytes = 0;
- else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv))
+ else if (getTargetMachine().Options.GuaranteedTailCallOpt &&
+ IsTailCallConvention(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
@@ -2231,12 +2332,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// registers used and is in the range 0 - 8 inclusive.
// Count the number of XMM registers allocated.
- static const unsigned XMMArgRegs[] = {
+ static const uint16_t XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
- assert((Subtarget->hasXMM() || !NumXMMRegs)
+ assert((Subtarget->hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
@@ -2260,7 +2361,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
int FI = 0;
// Do not flag preceding copytoreg stuff together with the following stuff.
InFlag = SDValue();
- if (GuaranteedTailCallOpt) {
+ if (getTargetMachine().Options.GuaranteedTailCallOpt) {
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (VA.isRegLoc())
@@ -2368,7 +2469,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (ExtraLoad)
Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(),
- false, false, 0);
+ false, false, false, 0);
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
unsigned char OpFlags = 0;
@@ -2421,6 +2522,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (Is64Bit && isVarArg && !IsWin64)
Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
if (InFlag.getNode())
Ops.push_back(InFlag);
@@ -2440,12 +2547,15 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush;
- if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, GuaranteedTailCallOpt))
+ if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
+ getTargetMachine().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
- else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet)
+ else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows &&
+ IsStructRet)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
+ // For MSVC Win32 targets, the caller pops the hidden struct pointer.
NumBytesForCalleeToPush = 4;
else
NumBytesForCalleeToPush = 0; // Callee pops nothing.
@@ -2598,7 +2708,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CallerCC = CallerF->getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
- if (GuaranteedTailCallOpt) {
+ if (getTargetMachine().Options.GuaranteedTailCallOpt) {
if (IsTailCallConvention(CalleeCC) && CCMatch)
return true;
return false;
@@ -2641,9 +2751,9 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return false;
}
- // If the call result is in ST0 / ST1, it needs to be popped off the x87 stack.
- // Therefore if it's not used by the call it is not safe to optimize this into
- // a sibcall.
+ // If the call result is in ST0 / ST1, it needs to be popped off the x87
+ // stack. Therefore, if it's not used by the call it is not safe to optimize
+ // this into a sibcall.
bool Unused = false;
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
if (!Ins[i].Used) {
@@ -2785,9 +2895,8 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
- case X86ISD::SHUFPD:
+ case X86ISD::SHUFP:
case X86ISD::PALIGN:
- case X86ISD::SHUFPS:
case X86ISD::MOVLHPS:
case X86ISD::MOVLHPD:
case X86ISD::MOVHLPS:
@@ -2798,34 +2907,16 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
- case X86ISD::UNPCKLPS:
- case X86ISD::UNPCKLPD:
- case X86ISD::VUNPCKLPSY:
- case X86ISD::VUNPCKLPDY:
- case X86ISD::PUNPCKLWD:
- case X86ISD::PUNPCKLBW:
- case X86ISD::PUNPCKLDQ:
- case X86ISD::PUNPCKLQDQ:
- case X86ISD::UNPCKHPS:
- case X86ISD::UNPCKHPD:
- case X86ISD::VUNPCKHPSY:
- case X86ISD::VUNPCKHPDY:
- case X86ISD::PUNPCKHWD:
- case X86ISD::PUNPCKHBW:
- case X86ISD::PUNPCKHDQ:
- case X86ISD::PUNPCKHQDQ:
- case X86ISD::VPERMILPS:
- case X86ISD::VPERMILPSY:
- case X86ISD::VPERMILPD:
- case X86ISD::VPERMILPDY:
- case X86ISD::VPERM2F128:
+ case X86ISD::UNPCKL:
+ case X86ISD::UNPCKH:
+ case X86ISD::VPERMILP:
+ case X86ISD::VPERM2X128:
return true;
}
- return false;
}
static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
- SDValue V1, SelectionDAG &DAG) {
+ SDValue V1, SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
case X86ISD::MOVSHDUP:
@@ -2833,39 +2924,32 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
case X86ISD::MOVDDUP:
return DAG.getNode(Opc, dl, VT, V1);
}
-
- return SDValue();
}
static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
- SDValue V1, unsigned TargetMask, SelectionDAG &DAG) {
+ SDValue V1, unsigned TargetMask,
+ SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
- case X86ISD::VPERMILPS:
- case X86ISD::VPERMILPSY:
- case X86ISD::VPERMILPD:
- case X86ISD::VPERMILPDY:
+ case X86ISD::VPERMILP:
return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
}
-
- return SDValue();
}
static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
- SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) {
+ SDValue V1, SDValue V2, unsigned TargetMask,
+ SelectionDAG &DAG) {
switch(Opc) {
default: llvm_unreachable("Unknown x86 shuffle node");
case X86ISD::PALIGN:
- case X86ISD::SHUFPD:
- case X86ISD::SHUFPS:
- case X86ISD::VPERM2F128:
+ case X86ISD::SHUFP:
+ case X86ISD::VPERM2X128:
return DAG.getNode(Opc, dl, VT, V1, V2,
DAG.getConstant(TargetMask, MVT::i8));
}
- return SDValue();
}
static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
@@ -2879,25 +2963,10 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
case X86ISD::MOVLPD:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
- case X86ISD::UNPCKLPS:
- case X86ISD::UNPCKLPD:
- case X86ISD::VUNPCKLPSY:
- case X86ISD::VUNPCKLPDY:
- case X86ISD::PUNPCKLWD:
- case X86ISD::PUNPCKLBW:
- case X86ISD::PUNPCKLDQ:
- case X86ISD::PUNPCKLQDQ:
- case X86ISD::UNPCKHPS:
- case X86ISD::UNPCKHPD:
- case X86ISD::VUNPCKHPSY:
- case X86ISD::VUNPCKHPDY:
- case X86ISD::PUNPCKHWD:
- case X86ISD::PUNPCKHBW:
- case X86ISD::PUNPCKHDQ:
- case X86ISD::PUNPCKHQDQ:
+ case X86ISD::UNPCKL:
+ case X86ISD::UNPCKH:
return DAG.getNode(Opc, dl, VT, V1, V2);
}
- return SDValue();
}
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
@@ -3092,17 +3161,6 @@ static bool isUndefOrInRange(int Val, int Low, int Hi) {
return (Val < 0) || (Val >= Low && Val < Hi);
}
-/// isUndefOrInRange - Return true if every element in Mask, begining
-/// from position Pos and ending in Pos+Size, falls within the specified
-/// range (L, L+Pos]. or is undef.
-static bool isUndefOrInRange(const SmallVectorImpl<int> &Mask,
- int Pos, int Size, int Low, int Hi) {
- for (int i = Pos, e = Pos+Size; i != e; ++i)
- if (!isUndefOrInRange(Mask[i], Low, Hi))
- return false;
- return true;
-}
-
/// isUndefOrEqual - Val is either less than zero (undef) or equal to the
/// specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
@@ -3114,7 +3172,7 @@ static bool isUndefOrEqual(int Val, int CmpVal) {
/// isSequentialOrUndefInRange - Return true if every element in Mask, begining
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (L, L+Pos]. or is undef.
-static bool isSequentialOrUndefInRange(const SmallVectorImpl<int> &Mask,
+static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
int Pos, int Size, int Low) {
for (int i = Pos, e = Pos+Size; i != e; ++i, ++Low)
if (!isUndefOrEqual(Mask[i], Low))
@@ -3125,7 +3183,7 @@ static bool isSequentialOrUndefInRange(const SmallVectorImpl<int> &Mask,
/// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference
/// the second operand.
-static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) {
if (VT == MVT::v4f32 || VT == MVT::v4i32 )
return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
if (VT == MVT::v2f64 || VT == MVT::v2i64)
@@ -3133,302 +3191,188 @@ static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
return false;
}
-bool X86::isPSHUFDMask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isPSHUFDMask(M, N->getValueType(0));
-}
-
/// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT) {
if (VT != MVT::v8i16)
return false;
// Lower quadword copied in order or undef.
- for (int i = 0; i != 4; ++i)
- if (Mask[i] >= 0 && Mask[i] != i)
- return false;
+ if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
+ return false;
// Upper quadword shuffled.
- for (int i = 4; i != 8; ++i)
+ for (unsigned i = 4; i != 8; ++i)
if (Mask[i] >= 0 && (Mask[i] < 4 || Mask[i] > 7))
return false;
return true;
}
-bool X86::isPSHUFHWMask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isPSHUFHWMask(M, N->getValueType(0));
-}
-
/// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT) {
if (VT != MVT::v8i16)
return false;
// Upper quadword copied in order.
- for (int i = 4; i != 8; ++i)
- if (Mask[i] >= 0 && Mask[i] != i)
- return false;
+ if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
+ return false;
// Lower quadword shuffled.
- for (int i = 0; i != 4; ++i)
+ for (unsigned i = 0; i != 4; ++i)
if (Mask[i] >= 4)
return false;
return true;
}
-bool X86::isPSHUFLWMask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isPSHUFLWMask(M, N->getValueType(0));
-}
-
/// isPALIGNRMask - Return true if the node specifies a shuffle of elements that
/// is suitable for input to PALIGNR.
-static bool isPALIGNRMask(const SmallVectorImpl<int> &Mask, EVT VT,
- bool hasSSSE3OrAVX) {
- int i, e = VT.getVectorNumElements();
- if (VT.getSizeInBits() != 128 && VT.getSizeInBits() != 64)
+static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT,
+ const X86Subtarget *Subtarget) {
+ if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) ||
+ (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2()))
return false;
- // Do not handle v2i64 / v2f64 shuffles with palignr.
- if (e < 4 || !hasSSSE3OrAVX)
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts/NumLanes;
+
+ // Do not handle 64-bit element shuffles with palignr.
+ if (NumLaneElts == 2)
return false;
- for (i = 0; i != e; ++i)
- if (Mask[i] >= 0)
- break;
+ for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
+ unsigned i;
+ for (i = 0; i != NumLaneElts; ++i) {
+ if (Mask[i+l] >= 0)
+ break;
+ }
- // All undef, not a palignr.
- if (i == e)
- return false;
+ // Lane is all undef, go to next lane
+ if (i == NumLaneElts)
+ continue;
- // Make sure we're shifting in the right direction.
- if (Mask[i] <= i)
- return false;
+ int Start = Mask[i+l];
- int s = Mask[i] - i;
+ // Make sure its in this lane in one of the sources
+ if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
+ !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
+ return false;
- // Check the rest of the elements to see if they are consecutive.
- for (++i; i != e; ++i) {
- int m = Mask[i];
- if (m >= 0 && m != s+i)
+ // If not lane 0, then we must match lane 0
+ if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
return false;
- }
- return true;
-}
-/// isVSHUFPSYMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 256-bit
-/// VSHUFPSY.
-static bool isVSHUFPSYMask(const SmallVectorImpl<int> &Mask, EVT VT,
- const X86Subtarget *Subtarget) {
- int NumElems = VT.getVectorNumElements();
+ // Correct second source to be contiguous with first source
+ if (Start >= (int)NumElts)
+ Start -= NumElts - NumLaneElts;
- if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256)
- return false;
+ // Make sure we're shifting in the right direction.
+ if (Start <= (int)(i+l))
+ return false;
- if (NumElems != 8)
- return false;
+ Start -= i;
- // VSHUFPSY divides the resulting vector into 4 chunks.
- // The sources are also splitted into 4 chunks, and each destination
- // chunk must come from a different source chunk.
- //
- // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
- // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
- //
- // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
- // Y3..Y0, Y3..Y0, X3..X0, X3..X0
- //
- int QuarterSize = NumElems/4;
- int HalfSize = QuarterSize*2;
- for (int i = 0; i < QuarterSize; ++i)
- if (!isUndefOrInRange(Mask[i], 0, HalfSize))
- return false;
- for (int i = QuarterSize; i < QuarterSize*2; ++i)
- if (!isUndefOrInRange(Mask[i], NumElems, NumElems+HalfSize))
- return false;
+ // Check the rest of the elements to see if they are consecutive.
+ for (++i; i != NumLaneElts; ++i) {
+ int Idx = Mask[i+l];
- // The mask of the second half must be the same as the first but with
- // the appropriate offsets. This works in the same way as VPERMILPS
- // works with masks.
- for (int i = QuarterSize*2; i < QuarterSize*3; ++i) {
- if (!isUndefOrInRange(Mask[i], HalfSize, NumElems))
- return false;
- int FstHalfIdx = i-HalfSize;
- if (Mask[FstHalfIdx] < 0)
- continue;
- if (!isUndefOrEqual(Mask[i], Mask[FstHalfIdx]+HalfSize))
- return false;
- }
- for (int i = QuarterSize*3; i < NumElems; ++i) {
- if (!isUndefOrInRange(Mask[i], NumElems+HalfSize, NumElems*2))
- return false;
- int FstHalfIdx = i-HalfSize;
- if (Mask[FstHalfIdx] < 0)
- continue;
- if (!isUndefOrEqual(Mask[i], Mask[FstHalfIdx]+HalfSize))
- return false;
+ // Make sure its in this lane
+ if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
+ !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
+ return false;
+
+ // If not lane 0, then we must match lane 0
+ if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
+ return false;
+
+ if (Idx >= (int)NumElts)
+ Idx -= NumElts - NumLaneElts;
+ if (!isUndefOrEqual(Idx, Start+i))
+ return false;
+
+ }
}
return true;
}
-/// getShuffleVSHUFPSYImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VSHUFPSY instruction.
-static unsigned getShuffleVSHUFPSYImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
-
- assert(NumElems == 8 && VT.getSizeInBits() == 256 &&
- "Only supports v8i32 and v8f32 types");
-
- int HalfSize = NumElems/2;
- unsigned Mask = 0;
- for (int i = 0; i != NumElems ; ++i) {
- if (SVOp->getMaskElt(i) < 0)
+/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
+/// the two vector operands have swapped position.
+static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
+ unsigned NumElems) {
+ for (unsigned i = 0; i != NumElems; ++i) {
+ int idx = Mask[i];
+ if (idx < 0)
continue;
- // The mask of the first half must be equal to the second one.
- unsigned Shamt = (i%HalfSize)*2;
- unsigned Elt = SVOp->getMaskElt(i) % HalfSize;
- Mask |= Elt << Shamt;
+ else if (idx < (int)NumElems)
+ Mask[i] = idx + NumElems;
+ else
+ Mask[i] = idx - NumElems;
}
-
- return Mask;
}
-/// isVSHUFPDYMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 256-bit
-/// VSHUFPDY. This shuffle doesn't have the same restriction as the PS
-/// version and the mask of the second half isn't binded with the first
-/// one.
-static bool isVSHUFPDYMask(const SmallVectorImpl<int> &Mask, EVT VT,
- const X86Subtarget *Subtarget) {
- int NumElems = VT.getVectorNumElements();
-
- if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256)
+/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a shuffle of elements that is suitable for input to 128/256-bit
+/// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
+/// reverse of what x86 shuffles want.
+static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX,
+ bool Commuted = false) {
+ if (!HasAVX && VT.getSizeInBits() == 256)
return false;
- if (NumElems != 4)
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElems = NumElems/NumLanes;
+
+ if (NumLaneElems != 2 && NumLaneElems != 4)
return false;
// VSHUFPSY divides the resulting vector into 4 chunks.
// The sources are also splitted into 4 chunks, and each destination
// chunk must come from a different source chunk.
//
+ // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
+ // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
+ //
+ // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
+ // Y3..Y0, Y3..Y0, X3..X0, X3..X0
+ //
+ // VSHUFPDY divides the resulting vector into 4 chunks.
+ // The sources are also splitted into 4 chunks, and each destination
+ // chunk must come from a different source chunk.
+ //
// SRC1 => X3 X2 X1 X0
// SRC2 => Y3 Y2 Y1 Y0
//
- // DST => Y2..Y3, X2..X3, Y1..Y0, X1..X0
+ // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
//
- int QuarterSize = NumElems/4;
- int HalfSize = QuarterSize*2;
- for (int i = 0; i < QuarterSize; ++i)
- if (!isUndefOrInRange(Mask[i], 0, HalfSize))
- return false;
- for (int i = QuarterSize; i < QuarterSize*2; ++i)
- if (!isUndefOrInRange(Mask[i], NumElems, NumElems+HalfSize))
- return false;
- for (int i = QuarterSize*2; i < QuarterSize*3; ++i)
- if (!isUndefOrInRange(Mask[i], HalfSize, NumElems))
- return false;
- for (int i = QuarterSize*3; i < NumElems; ++i)
- if (!isUndefOrInRange(Mask[i], NumElems+HalfSize, NumElems*2))
- return false;
-
- return true;
-}
-
-/// getShuffleVSHUFPDYImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VSHUFPDY instruction.
-static unsigned getShuffleVSHUFPDYImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
-
- assert(NumElems == 4 && VT.getSizeInBits() == 256 &&
- "Only supports v4i64 and v4f64 types");
-
- int HalfSize = NumElems/2;
- unsigned Mask = 0;
- for (int i = 0; i != NumElems ; ++i) {
- if (SVOp->getMaskElt(i) < 0)
- continue;
- int Elt = SVOp->getMaskElt(i) % HalfSize;
- Mask |= Elt << i;
+ unsigned HalfLaneElems = NumLaneElems/2;
+ for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
+ for (unsigned i = 0; i != NumLaneElems; ++i) {
+ int Idx = Mask[i+l];
+ unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
+ if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
+ return false;
+ // For VSHUFPSY, the mask of the second half must be the same as the
+ // first but with the appropriate offsets. This works in the same way as
+ // VPERMILPS works with masks.
+ if (NumElems != 8 || l == 0 || Mask[i] < 0)
+ continue;
+ if (!isUndefOrEqual(Idx, Mask[i]+l))
+ return false;
+ }
}
- return Mask;
-}
-
-/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to 128-bit
-/// SHUFPS and SHUFPD.
-static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
- int NumElems = VT.getVectorNumElements();
-
- if (VT.getSizeInBits() != 128)
- return false;
-
- if (NumElems != 2 && NumElems != 4)
- return false;
-
- int Half = NumElems / 2;
- for (int i = 0; i < Half; ++i)
- if (!isUndefOrInRange(Mask[i], 0, NumElems))
- return false;
- for (int i = Half; i < NumElems; ++i)
- if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
- return false;
-
- return true;
-}
-
-bool X86::isSHUFPMask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isSHUFPMask(M, N->getValueType(0));
-}
-
-/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
-/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
-/// half elements to come from vector 1 (which would equal the dest.) and
-/// the upper half to come from vector 2.
-static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
- int NumElems = VT.getVectorNumElements();
-
- if (NumElems != 2 && NumElems != 4)
- return false;
-
- int Half = NumElems / 2;
- for (int i = 0; i < Half; ++i)
- if (!isUndefOrInRange(Mask[i], NumElems, NumElems*2))
- return false;
- for (int i = Half; i < NumElems; ++i)
- if (!isUndefOrInRange(Mask[i], 0, NumElems))
- return false;
return true;
}
-static bool isCommutedSHUFP(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return isCommutedSHUFPMask(M, N->getValueType(0));
-}
-
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
-bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
- EVT VT = N->getValueType(0);
+static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) {
unsigned NumElems = VT.getVectorNumElements();
if (VT.getSizeInBits() != 128)
@@ -3438,17 +3382,16 @@ bool X86::isMOVHLPSMask(ShuffleVectorSDNode *N) {
return false;
// Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
- return isUndefOrEqual(N->getMaskElt(0), 6) &&
- isUndefOrEqual(N->getMaskElt(1), 7) &&
- isUndefOrEqual(N->getMaskElt(2), 2) &&
- isUndefOrEqual(N->getMaskElt(3), 3);
+ return isUndefOrEqual(Mask[0], 6) &&
+ isUndefOrEqual(Mask[1], 7) &&
+ isUndefOrEqual(Mask[2], 2) &&
+ isUndefOrEqual(Mask[3], 3);
}
/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
/// <2, 3, 2, 3>
-bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
- EVT VT = N->getValueType(0);
+static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) {
unsigned NumElems = VT.getVectorNumElements();
if (VT.getSizeInBits() != 128)
@@ -3457,26 +3400,29 @@ bool X86::isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N) {
if (NumElems != 4)
return false;
- return isUndefOrEqual(N->getMaskElt(0), 2) &&
- isUndefOrEqual(N->getMaskElt(1), 3) &&
- isUndefOrEqual(N->getMaskElt(2), 2) &&
- isUndefOrEqual(N->getMaskElt(3), 3);
+ return isUndefOrEqual(Mask[0], 2) &&
+ isUndefOrEqual(Mask[1], 3) &&
+ isUndefOrEqual(Mask[2], 2) &&
+ isUndefOrEqual(Mask[3], 3);
}
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
-bool X86::isMOVLPMask(ShuffleVectorSDNode *N) {
- unsigned NumElems = N->getValueType(0).getVectorNumElements();
+static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) {
+ if (VT.getSizeInBits() != 128)
+ return false;
+
+ unsigned NumElems = VT.getVectorNumElements();
if (NumElems != 2 && NumElems != 4)
return false;
- for (unsigned i = 0; i < NumElems/2; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), i + NumElems))
+ for (unsigned i = 0; i != NumElems/2; ++i)
+ if (!isUndefOrEqual(Mask[i], i + NumElems))
return false;
- for (unsigned i = NumElems/2; i < NumElems; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), i))
+ for (unsigned i = NumElems/2; i != NumElems; ++i)
+ if (!isUndefOrEqual(Mask[i], i))
return false;
return true;
@@ -3484,19 +3430,19 @@ bool X86::isMOVLPMask(ShuffleVectorSDNode *N) {
/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLHPS.
-bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) {
- unsigned NumElems = N->getValueType(0).getVectorNumElements();
+static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) {
+ unsigned NumElems = VT.getVectorNumElements();
if ((NumElems != 2 && NumElems != 4)
- || N->getValueType(0).getSizeInBits() > 128)
+ || VT.getSizeInBits() > 128)
return false;
- for (unsigned i = 0; i < NumElems/2; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), i))
+ for (unsigned i = 0; i != NumElems/2; ++i)
+ if (!isUndefOrEqual(Mask[i], i))
return false;
- for (unsigned i = 0; i < NumElems/2; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i + NumElems/2), i + NumElems))
+ for (unsigned i = 0; i != NumElems/2; ++i)
+ if (!isUndefOrEqual(Mask[i + NumElems/2], i + NumElems))
return false;
return true;
@@ -3504,14 +3450,15 @@ bool X86::isMOVLHPSMask(ShuffleVectorSDNode *N) {
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
-static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
- bool V2IsSplat = false) {
- int NumElts = VT.getVectorNumElements();
+static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT,
+ bool HasAVX2, bool V2IsSplat = false) {
+ unsigned NumElts = VT.getVectorNumElements();
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for unpckh");
- if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8)
+ if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 &&
+ (!HasAVX2 || (NumElts != 16 && NumElts != 32)))
return false;
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
@@ -3519,11 +3466,9 @@ static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
- unsigned Start = 0;
- unsigned End = NumLaneElts;
- for (unsigned s = 0; s < NumLanes; ++s) {
- for (unsigned i = Start, j = s * NumLaneElts;
- i != End;
+ for (unsigned l = 0; l != NumLanes; ++l) {
+ for (unsigned i = l*NumLaneElts, j = l*NumLaneElts;
+ i != (l+1)*NumLaneElts;
i += 2, ++j) {
int BitI = Mask[i];
int BitI1 = Mask[i+1];
@@ -3537,30 +3482,22 @@ static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
return false;
}
}
- // Process the next 128 bits.
- Start += NumLaneElts;
- End += NumLaneElts;
}
return true;
}
-bool X86::isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isUNPCKLMask(M, N->getValueType(0), V2IsSplat);
-}
-
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
-static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT,
- bool V2IsSplat = false) {
- int NumElts = VT.getVectorNumElements();
+static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT,
+ bool HasAVX2, bool V2IsSplat = false) {
+ unsigned NumElts = VT.getVectorNumElements();
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for unpckh");
- if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8)
+ if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 &&
+ (!HasAVX2 || (NumElts != 16 && NumElts != 32)))
return false;
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
@@ -3568,11 +3505,9 @@ static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT,
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts/NumLanes;
- unsigned Start = 0;
- unsigned End = NumLaneElts;
for (unsigned l = 0; l != NumLanes; ++l) {
- for (unsigned i = Start, j = (l*NumLaneElts)+NumLaneElts/2;
- i != End; i += 2, ++j) {
+ for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2;
+ i != (l+1)*NumLaneElts; i += 2, ++j) {
int BitI = Mask[i];
int BitI1 = Mask[i+1];
if (!isUndefOrEqual(BitI, j))
@@ -3585,42 +3520,39 @@ static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT,
return false;
}
}
- // Process the next 128 bits.
- Start += NumLaneElts;
- End += NumLaneElts;
}
return true;
}
-bool X86::isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isUNPCKHMask(M, N->getValueType(0), V2IsSplat);
-}
-
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
/// <0, 0, 1, 1>
-static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
- int NumElems = VT.getVectorNumElements();
- if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
+static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT,
+ bool HasAVX2) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Unsupported vector type for unpckh");
+
+ if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 &&
+ (!HasAVX2 || (NumElts != 16 && NumElts != 32)))
return false;
// For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
// FIXME: Need a better way to get rid of this, there's no latency difference
// between UNPCKLPD and MOVDDUP, the later should always be checked first and
// the former later. We should also remove the "_undef" special mask.
- if (NumElems == 4 && VT.getSizeInBits() == 256)
+ if (NumElts == 4 && VT.getSizeInBits() == 256)
return false;
// Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
// independently on 128-bit lanes.
- unsigned NumLanes = VT.getSizeInBits() / 128;
- unsigned NumLaneElts = NumElems / NumLanes;
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts/NumLanes;
- for (unsigned s = 0; s < NumLanes; ++s) {
- for (unsigned i = s * NumLaneElts, j = s * NumLaneElts;
- i != NumLaneElts * (s + 1);
+ for (unsigned l = 0; l != NumLanes; ++l) {
+ for (unsigned i = l*NumLaneElts, j = l*NumLaneElts;
+ i != (l+1)*NumLaneElts;
i += 2, ++j) {
int BitI = Mask[i];
int BitI1 = Mask[i+1];
@@ -3635,81 +3567,77 @@ static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
return true;
}
-bool X86::isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isUNPCKL_v_undef_Mask(M, N->getValueType(0));
-}
-
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
/// <2, 2, 3, 3>
-static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
- int NumElems = VT.getVectorNumElements();
- if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
+static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) {
+ unsigned NumElts = VT.getVectorNumElements();
+
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Unsupported vector type for unpckh");
+
+ if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 &&
+ (!HasAVX2 || (NumElts != 16 && NumElts != 32)))
return false;
- for (int i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
- int BitI = Mask[i];
- int BitI1 = Mask[i+1];
- if (!isUndefOrEqual(BitI, j))
- return false;
- if (!isUndefOrEqual(BitI1, j))
- return false;
+ // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
+ // independently on 128-bit lanes.
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts/NumLanes;
+
+ for (unsigned l = 0; l != NumLanes; ++l) {
+ for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2;
+ i != (l+1)*NumLaneElts; i += 2, ++j) {
+ int BitI = Mask[i];
+ int BitI1 = Mask[i+1];
+ if (!isUndefOrEqual(BitI, j))
+ return false;
+ if (!isUndefOrEqual(BitI1, j))
+ return false;
+ }
}
return true;
}
-bool X86::isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isUNPCKH_v_undef_Mask(M, N->getValueType(0));
-}
-
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
-static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
if (VT.getVectorElementType().getSizeInBits() < 32)
return false;
+ if (VT.getSizeInBits() == 256)
+ return false;
- int NumElts = VT.getVectorNumElements();
+ unsigned NumElts = VT.getVectorNumElements();
if (!isUndefOrEqual(Mask[0], NumElts))
return false;
- for (int i = 1; i < NumElts; ++i)
+ for (unsigned i = 1; i != NumElts; ++i)
if (!isUndefOrEqual(Mask[i], i))
return false;
return true;
}
-bool X86::isMOVLMask(ShuffleVectorSDNode *N) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return ::isMOVLMask(M, N->getValueType(0));
-}
-
-/// isVPERM2F128Mask - Match 256-bit shuffles where the elements are considered
+/// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
/// as permutations between 128-bit chunks or halves. As an example: this
/// shuffle bellow:
/// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
/// The first half comes from the second half of V1 and the second half from the
/// the second half of V2.
-static bool isVPERM2F128Mask(const SmallVectorImpl<int> &Mask, EVT VT,
- const X86Subtarget *Subtarget) {
- if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256)
+static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
+ if (!HasAVX || VT.getSizeInBits() != 256)
return false;
// The shuffle result is divided into half A and half B. In total the two
// sources have 4 halves, namely: C, D, E, F. The final values of A and
// B must come from C, D, E or F.
- int HalfSize = VT.getVectorNumElements()/2;
+ unsigned HalfSize = VT.getVectorNumElements()/2;
bool MatchA = false, MatchB = false;
// Check if A comes from one of C, D, E, F.
- for (int Half = 0; Half < 4; ++Half) {
+ for (unsigned Half = 0; Half != 4; ++Half) {
if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
MatchA = true;
break;
@@ -3717,7 +3645,7 @@ static bool isVPERM2F128Mask(const SmallVectorImpl<int> &Mask, EVT VT,
}
// Check if B comes from one of C, D, E, F.
- for (int Half = 0; Half < 4; ++Half) {
+ for (unsigned Half = 0; Half != 4; ++Half) {
if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
MatchB = true;
break;
@@ -3727,22 +3655,21 @@ static bool isVPERM2F128Mask(const SmallVectorImpl<int> &Mask, EVT VT,
return MatchA && MatchB;
}
-/// getShuffleVPERM2F128Immediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VPERM2F128 instructions.
-static unsigned getShuffleVPERM2F128Immediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+/// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
+/// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
+static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
EVT VT = SVOp->getValueType(0);
- int HalfSize = VT.getVectorNumElements()/2;
+ unsigned HalfSize = VT.getVectorNumElements()/2;
- int FstHalf = 0, SndHalf = 0;
- for (int i = 0; i < HalfSize; ++i) {
+ unsigned FstHalf = 0, SndHalf = 0;
+ for (unsigned i = 0; i < HalfSize; ++i) {
if (SVOp->getMaskElt(i) > 0) {
FstHalf = SVOp->getMaskElt(i)/HalfSize;
break;
}
}
- for (int i = HalfSize; i < HalfSize*2; ++i) {
+ for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
if (SVOp->getMaskElt(i) > 0) {
SndHalf = SVOp->getMaskElt(i)/HalfSize;
break;
@@ -3752,141 +3679,56 @@ static unsigned getShuffleVPERM2F128Immediate(SDNode *N) {
return (FstHalf | (SndHalf << 4));
}
-/// isVPERMILPDMask - Return true if the specified VECTOR_SHUFFLE operand
+/// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
/// Note that VPERMIL mask matching is different depending whether theunderlying
/// type is 32 or 64. In the VPERMILPS the high half of the mask should point
/// to the same elements of the low, but to the higher half of the source.
/// In VPERMILPD the two lanes could be shuffled independently of each other
-/// with the same restriction that lanes can't be crossed.
-static bool isVPERMILPDMask(const SmallVectorImpl<int> &Mask, EVT VT,
- const X86Subtarget *Subtarget) {
- int NumElts = VT.getVectorNumElements();
- int NumLanes = VT.getSizeInBits()/128;
-
- if (!Subtarget->hasAVX())
- return false;
-
- // Only match 256-bit with 64-bit types
- if (VT.getSizeInBits() != 256 || NumElts != 4)
+/// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
+static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
+ if (!HasAVX)
return false;
- // The mask on the high lane is independent of the low. Both can match
- // any element in inside its own lane, but can't cross.
- int LaneSize = NumElts/NumLanes;
- for (int l = 0; l < NumLanes; ++l)
- for (int i = l*LaneSize; i < LaneSize*(l+1); ++i) {
- int LaneStart = l*LaneSize;
- if (!isUndefOrInRange(Mask[i], LaneStart, LaneStart+LaneSize))
- return false;
- }
-
- return true;
-}
-
-/// isVPERMILPSMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to VPERMILPS*.
-/// Note that VPERMIL mask matching is different depending whether theunderlying
-/// type is 32 or 64. In the VPERMILPS the high half of the mask should point
-/// to the same elements of the low, but to the higher half of the source.
-/// In VPERMILPD the two lanes could be shuffled independently of each other
-/// with the same restriction that lanes can't be crossed.
-static bool isVPERMILPSMask(const SmallVectorImpl<int> &Mask, EVT VT,
- const X86Subtarget *Subtarget) {
unsigned NumElts = VT.getVectorNumElements();
- unsigned NumLanes = VT.getSizeInBits()/128;
-
- if (!Subtarget->hasAVX())
+ // Only match 256-bit with 32/64-bit types
+ if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8))
return false;
- // Only match 256-bit with 32-bit types
- if (VT.getSizeInBits() != 256 || NumElts != 8)
- return false;
-
- // The mask on the high lane should be the same as the low. Actually,
- // they can differ if any of the corresponding index in a lane is undef
- // and the other stays in range.
- int LaneSize = NumElts/NumLanes;
- for (int i = 0; i < LaneSize; ++i) {
- int HighElt = i+LaneSize;
- bool HighValid = isUndefOrInRange(Mask[HighElt], LaneSize, NumElts);
- bool LowValid = isUndefOrInRange(Mask[i], 0, LaneSize);
-
- if (!HighValid || !LowValid)
- return false;
- if (Mask[i] < 0 || Mask[HighElt] < 0)
- continue;
- if (Mask[HighElt]-Mask[i] != LaneSize)
- return false;
- }
-
- return true;
-}
-
-/// getShuffleVPERMILPSImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VPERMILPS* instructions.
-static unsigned getShuffleVPERMILPSImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- EVT VT = SVOp->getValueType(0);
-
- int NumElts = VT.getVectorNumElements();
- int NumLanes = VT.getSizeInBits()/128;
- int LaneSize = NumElts/NumLanes;
-
- // Although the mask is equal for both lanes do it twice to get the cases
- // where a mask will match because the same mask element is undef on the
- // first half but valid on the second. This would get pathological cases
- // such as: shuffle <u, 0, 1, 2, 4, 4, 5, 6>, which is completely valid.
- unsigned Mask = 0;
- for (int l = 0; l < NumLanes; ++l) {
- for (int i = 0; i < LaneSize; ++i) {
- int MaskElt = SVOp->getMaskElt(i+(l*LaneSize));
- if (MaskElt < 0)
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned LaneSize = NumElts/NumLanes;
+ for (unsigned l = 0; l != NumElts; l += LaneSize) {
+ for (unsigned i = 0; i != LaneSize; ++i) {
+ if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
+ return false;
+ if (NumElts != 8 || l == 0)
continue;
- if (MaskElt >= LaneSize)
- MaskElt -= LaneSize;
- Mask |= MaskElt << (i*2);
- }
- }
-
- return Mask;
-}
-
-/// getShuffleVPERMILPDImmediate - Return the appropriate immediate to shuffle
-/// the specified VECTOR_MASK mask with VPERMILPD* instructions.
-static unsigned getShuffleVPERMILPDImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- EVT VT = SVOp->getValueType(0);
-
- int NumElts = VT.getVectorNumElements();
- int NumLanes = VT.getSizeInBits()/128;
-
- unsigned Mask = 0;
- int LaneSize = NumElts/NumLanes;
- for (int l = 0; l < NumLanes; ++l)
- for (int i = l*LaneSize; i < LaneSize*(l+1); ++i) {
- int MaskElt = SVOp->getMaskElt(i);
- if (MaskElt < 0)
+ // VPERMILPS handling
+ if (Mask[i] < 0)
continue;
- Mask |= (MaskElt-l*LaneSize) << i;
+ if (!isUndefOrEqual(Mask[i+l], Mask[i]+l))
+ return false;
}
+ }
- return Mask;
+ return true;
}
-/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
+/// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
/// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT,
+static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT,
bool V2IsSplat = false, bool V2IsUndef = false) {
- int NumOps = VT.getVectorNumElements();
+ unsigned NumOps = VT.getVectorNumElements();
+ if (VT.getSizeInBits() == 256)
+ return false;
if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
return false;
if (!isUndefOrEqual(Mask[0], 0))
return false;
- for (int i = 1; i < NumOps; ++i)
+ for (unsigned i = 1; i != NumOps; ++i)
if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
(V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
(V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
@@ -3895,26 +3737,14 @@ static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT,
return true;
}
-static bool isCommutedMOVL(ShuffleVectorSDNode *N, bool V2IsSplat = false,
- bool V2IsUndef = false) {
- SmallVector<int, 8> M;
- N->getMask(M);
- return isCommutedMOVLMask(M, N->getValueType(0), V2IsSplat, V2IsUndef);
-}
-
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
/// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
-bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N,
- const X86Subtarget *Subtarget) {
- if (!Subtarget->hasSSE3() && !Subtarget->hasAVX())
- return false;
-
- // The second vector must be undef
- if (N->getOperand(1).getOpcode() != ISD::UNDEF)
+static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT,
+ const X86Subtarget *Subtarget) {
+ if (!Subtarget->hasSSE3())
return false;
- EVT VT = N->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
if ((VT.getSizeInBits() == 128 && NumElems != 4) ||
@@ -3922,9 +3752,9 @@ bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N,
return false;
// "i+1" is the value the indexed mask element must have
- for (unsigned i = 0; i < NumElems; i += 2)
- if (!isUndefOrEqual(N->getMaskElt(i), i+1) ||
- !isUndefOrEqual(N->getMaskElt(i+1), i+1))
+ for (unsigned i = 0; i != NumElems; i += 2)
+ if (!isUndefOrEqual(Mask[i], i+1) ||
+ !isUndefOrEqual(Mask[i+1], i+1))
return false;
return true;
@@ -3933,16 +3763,11 @@ bool X86::isMOVSHDUPMask(ShuffleVectorSDNode *N,
/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
/// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
-bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N,
- const X86Subtarget *Subtarget) {
- if (!Subtarget->hasSSE3() && !Subtarget->hasAVX())
- return false;
-
- // The second vector must be undef
- if (N->getOperand(1).getOpcode() != ISD::UNDEF)
+static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT,
+ const X86Subtarget *Subtarget) {
+ if (!Subtarget->hasSSE3())
return false;
- EVT VT = N->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
if ((VT.getSizeInBits() == 128 && NumElems != 4) ||
@@ -3950,9 +3775,9 @@ bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N,
return false;
// "i" is the value the indexed mask element must have
- for (unsigned i = 0; i < NumElems; i += 2)
- if (!isUndefOrEqual(N->getMaskElt(i), i) ||
- !isUndefOrEqual(N->getMaskElt(i+1), i))
+ for (unsigned i = 0; i != NumElems; i += 2)
+ if (!isUndefOrEqual(Mask[i], i) ||
+ !isUndefOrEqual(Mask[i+1], i))
return false;
return true;
@@ -3961,21 +3786,17 @@ bool X86::isMOVSLDUPMask(ShuffleVectorSDNode *N,
/// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to 256-bit
/// version of MOVDDUP.
-static bool isMOVDDUPYMask(ShuffleVectorSDNode *N,
- const X86Subtarget *Subtarget) {
- EVT VT = N->getValueType(0);
- int NumElts = VT.getVectorNumElements();
- bool V2IsUndef = N->getOperand(1).getOpcode() == ISD::UNDEF;
+static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) {
+ unsigned NumElts = VT.getVectorNumElements();
- if (!Subtarget->hasAVX() || VT.getSizeInBits() != 256 ||
- !V2IsUndef || NumElts != 4)
+ if (!HasAVX || VT.getSizeInBits() != 256 || NumElts != 4)
return false;
- for (int i = 0; i != NumElts/2; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), 0))
+ for (unsigned i = 0; i != NumElts/2; ++i)
+ if (!isUndefOrEqual(Mask[i], 0))
return false;
- for (int i = NumElts/2; i != NumElts; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), NumElts/2))
+ for (unsigned i = NumElts/2; i != NumElts; ++i)
+ if (!isUndefOrEqual(Mask[i], NumElts/2))
return false;
return true;
}
@@ -3983,18 +3804,16 @@ static bool isMOVDDUPYMask(ShuffleVectorSDNode *N,
/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to 128-bit
/// version of MOVDDUP.
-bool X86::isMOVDDUPMask(ShuffleVectorSDNode *N) {
- EVT VT = N->getValueType(0);
-
+static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) {
if (VT.getSizeInBits() != 128)
return false;
- int e = VT.getVectorNumElements() / 2;
- for (int i = 0; i < e; ++i)
- if (!isUndefOrEqual(N->getMaskElt(i), i))
+ unsigned e = VT.getVectorNumElements() / 2;
+ for (unsigned i = 0; i != e; ++i)
+ if (!isUndefOrEqual(Mask[i], i))
return false;
- for (int i = 0; i < e; ++i)
- if (!isUndefOrEqual(N->getMaskElt(e+i), i))
+ for (unsigned i = 0; i != e; ++i)
+ if (!isUndefOrEqual(Mask[e+i], i))
return false;
return true;
}
@@ -4039,31 +3858,43 @@ bool X86::isVINSERTF128Index(SDNode *N) {
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
-unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- int NumOperands = SVOp->getValueType(0).getVectorNumElements();
+/// Handles 128-bit and 256-bit.
+static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
+ EVT VT = N->getValueType(0);
+
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Unsupported vector type for PSHUF/SHUFP");
- unsigned Shift = (NumOperands == 4) ? 2 : 1;
+ // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
+ // independently on 128-bit lanes.
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts/NumLanes;
+
+ assert((NumLaneElts == 2 || NumLaneElts == 4) &&
+ "Only supports 2 or 4 elements per lane");
+
+ unsigned Shift = (NumLaneElts == 4) ? 1 : 0;
unsigned Mask = 0;
- for (int i = 0; i < NumOperands; ++i) {
- int Val = SVOp->getMaskElt(NumOperands-i-1);
- if (Val < 0) Val = 0;
- if (Val >= NumOperands) Val -= NumOperands;
- Mask |= Val;
- if (i != NumOperands - 1)
- Mask <<= Shift;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int Elt = N->getMaskElt(i);
+ if (Elt < 0) continue;
+ Elt %= NumLaneElts;
+ unsigned ShAmt = i << Shift;
+ if (ShAmt >= 8) ShAmt -= 8;
+ Mask |= Elt << ShAmt;
}
+
return Mask;
}
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
-unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
unsigned Mask = 0;
// 8 nodes, but we only care about the last 4.
for (unsigned i = 7; i >= 4; --i) {
- int Val = SVOp->getMaskElt(i);
+ int Val = N->getMaskElt(i);
if (Val >= 0)
Mask |= (Val - 4);
if (i != 4)
@@ -4074,12 +3905,11 @@ unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
-unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
unsigned Mask = 0;
// 8 nodes, but we only care about the first 4.
for (int i = 3; i >= 0; --i) {
- int Val = SVOp->getMaskElt(i);
+ int Val = N->getMaskElt(i);
if (Val >= 0)
Mask |= Val;
if (i != 0)
@@ -4090,18 +3920,24 @@ unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
/// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
/// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
-unsigned X86::getShufflePALIGNRImmediate(SDNode *N) {
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- EVT VVT = N->getValueType(0);
- unsigned EltSize = VVT.getVectorElementType().getSizeInBits() >> 3;
- int Val = 0;
+static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
+ EVT VT = SVOp->getValueType(0);
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3;
+
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts/NumLanes;
- unsigned i, e;
- for (i = 0, e = VVT.getVectorNumElements(); i != e; ++i) {
+ int Val = 0;
+ unsigned i;
+ for (i = 0; i != NumElts; ++i) {
Val = SVOp->getMaskElt(i);
if (Val >= 0)
break;
}
+ if (Val >= (int)NumElts)
+ Val -= NumElts - NumLaneElts;
+
assert(Val - i > 0 && "PALIGNR imm should be positive");
return (Val - i) * EltSize;
}
@@ -4170,36 +4006,20 @@ static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
SVOp->getOperand(0), &MaskVec[0]);
}
-/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
-/// the two vector operands have swapped position.
-static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) {
- unsigned NumElems = VT.getVectorNumElements();
- for (unsigned i = 0; i != NumElems; ++i) {
- int idx = Mask[i];
- if (idx < 0)
- continue;
- else if (idx < (int)NumElems)
- Mask[i] = idx + NumElems;
- else
- Mask[i] = idx - NumElems;
- }
-}
-
/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
/// match movhlps. The lower half elements should come from upper half of
/// V1 (and in order), and the upper half elements should come from the upper
/// half of V2 (and in order).
-static bool ShouldXformToMOVHLPS(ShuffleVectorSDNode *Op) {
- EVT VT = Op->getValueType(0);
+static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) {
if (VT.getSizeInBits() != 128)
return false;
if (VT.getVectorNumElements() != 4)
return false;
for (unsigned i = 0, e = 2; i != e; ++i)
- if (!isUndefOrEqual(Op->getMaskElt(i), i+2))
+ if (!isUndefOrEqual(Mask[i], i+2))
return false;
for (unsigned i = 2; i != 4; ++i)
- if (!isUndefOrEqual(Op->getMaskElt(i), i+4))
+ if (!isUndefOrEqual(Mask[i], i+4))
return false;
return true;
}
@@ -4218,14 +4038,36 @@ static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) {
return true;
}
+// Test whether the given value is a vector value which will be legalized
+// into a load.
+static bool WillBeConstantPoolLoad(SDNode *N) {
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+
+ // Check for any non-constant elements.
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ switch (N->getOperand(i).getNode()->getOpcode()) {
+ case ISD::UNDEF:
+ case ISD::ConstantFP:
+ case ISD::Constant:
+ break;
+ default:
+ return false;
+ }
+
+ // Vectors of all-zeros and all-ones are materialized with special
+ // instructions rather than being loaded.
+ return !ISD::isBuildVectorAllZeros(N) &&
+ !ISD::isBuildVectorAllOnes(N);
+}
+
/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
/// match movlp{s|d}. The lower half elements should come from lower half of
/// V1 (and in order), and the upper half elements should come from the upper
/// half of V2 (and in order). And since V1 will become the source of the
/// MOVLP, it must be either a vector load or a scalar load to vector.
static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
- ShuffleVectorSDNode *Op) {
- EVT VT = Op->getValueType(0);
+ ArrayRef<int> Mask, EVT VT) {
if (VT.getSizeInBits() != 128)
return false;
@@ -4233,7 +4075,7 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
return false;
// Is V2 is a vector load, don't do this transformation. We will try to use
// load folding shufps op.
- if (ISD::isNON_EXTLoad(V2))
+ if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
return false;
unsigned NumElems = VT.getVectorNumElements();
@@ -4241,10 +4083,10 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
if (NumElems != 2 && NumElems != 4)
return false;
for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Op->getMaskElt(i), i))
+ if (!isUndefOrEqual(Mask[i], i))
return false;
for (unsigned i = NumElems/2; i != NumElems; ++i)
- if (!isUndefOrEqual(Op->getMaskElt(i), i+NumElems))
+ if (!isUndefOrEqual(Mask[i], i+NumElems))
return false;
return true;
}
@@ -4292,15 +4134,15 @@ static bool isZeroShuffle(ShuffleVectorSDNode *N) {
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
-static SDValue getZeroVector(EVT VT, bool HasXMMInt, SelectionDAG &DAG,
- DebugLoc dl) {
+static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG, DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
// Always build SSE zero vectors as <4 x i32> bitcasted
// to their dest type. This ensures they get CSE'd.
SDValue Vec;
if (VT.getSizeInBits() == 128) { // SSE
- if (HasXMMInt) { // SSE2
+ if (Subtarget->hasSSE2()) { // SSE2
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
} else { // SSE1
@@ -4308,34 +4150,46 @@ static SDValue getZeroVector(EVT VT, bool HasXMMInt, SelectionDAG &DAG,
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
}
} else if (VT.getSizeInBits() == 256) { // AVX
- // 256-bit logic and arithmetic instructions in AVX are
- // all floating-point, no support for integer ops. Default
- // to emitting fp zeroed vectors then.
- SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
- SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
+ if (Subtarget->hasAVX2()) { // AVX2
+ SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8);
+ } else {
+ // 256-bit logic and arithmetic instructions in AVX are all
+ // floating-point, no support for integer ops. Emit fp zeroed vectors.
+ SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
+ }
}
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
}
/// getOnesVector - Returns a vector of specified type with all bits set.
-/// Always build ones vectors as <4 x i32>. For 256-bit types, use two
-/// <4 x i32> inserted in a <8 x i32> appropriately. Then bitcast to their
-/// original type, ensuring they get CSE'd.
-static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
+/// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
+/// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
+/// Then bitcast to their original type, ensuring they get CSE'd.
+static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG,
+ DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
assert((VT.is128BitVector() || VT.is256BitVector())
&& "Expected a 128-bit or 256-bit vector type");
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
- SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
- Cst, Cst, Cst, Cst);
-
- if (VT.is256BitVector()) {
- SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32),
- Vec, DAG.getConstant(0, MVT::i32), DAG, dl);
- Vec = Insert128BitVector(InsV, Vec,
- DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl);
+ SDValue Vec;
+ if (VT.getSizeInBits() == 256) {
+ if (HasAVX2) { // AVX2
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8);
+ } else { // AVX
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+ SDValue InsV = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, MVT::v8i32),
+ Vec, DAG.getConstant(0, MVT::i32), DAG, dl);
+ Vec = Insert128BitVector(InsV, Vec,
+ DAG.getConstant(4 /* NumElems/2 */, MVT::i32), DAG, dl);
+ }
+ } else {
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
}
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
@@ -4343,24 +4197,12 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
/// that point to V2 points to its first element.
-static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
- EVT VT = SVOp->getValueType(0);
- unsigned NumElems = VT.getVectorNumElements();
-
- bool Changed = false;
- SmallVector<int, 8> MaskVec;
- SVOp->getMask(MaskVec);
-
+static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
for (unsigned i = 0; i != NumElems; ++i) {
- if (MaskVec[i] > (int)NumElems) {
- MaskVec[i] = NumElems;
- Changed = true;
+ if (Mask[i] > (int)NumElems) {
+ Mask[i] = NumElems;
}
}
- if (Changed)
- return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(0),
- SVOp->getOperand(1), &MaskVec[0]);
- return SDValue(SVOp, 0);
}
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
@@ -4464,7 +4306,7 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// Extract the 128-bit part containing the splat element and update
// the splat element index when it refers to the higher register.
if (Size == 256) {
- unsigned Idx = (EltNo > NumElems/2) ? NumElems/2 : 0;
+ unsigned Idx = (EltNo >= NumElems/2) ? NumElems/2 : 0;
V1 = Extract128BitVector(V1, DAG.getConstant(Idx, MVT::i32), DAG, dl);
if (Idx > 0)
EltNo -= NumElems/2;
@@ -4496,11 +4338,12 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
/// element of V2 is swizzled into the zero/undef vector, landing at element
/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
- bool isZero, bool HasXMMInt,
+ bool IsZero,
+ const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
EVT VT = V2.getValueType();
- SDValue V1 = isZero
- ? getZeroVector(VT, HasXMMInt, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
+ SDValue V1 = IsZero
+ ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 16> MaskVec;
for (unsigned i = 0; i != NumElems; ++i)
@@ -4509,9 +4352,81 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]);
}
+/// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
+/// target specific opcode. Returns true if the Mask could be calculated.
+/// Sets IsUnary to true if only uses one source.
+static bool getTargetShuffleMask(SDNode *N, EVT VT,
+ SmallVectorImpl<int> &Mask, bool &IsUnary) {
+ unsigned NumElems = VT.getVectorNumElements();
+ SDValue ImmN;
+
+ IsUnary = false;
+ switch(N->getOpcode()) {
+ case X86ISD::SHUFP:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ break;
+ case X86ISD::UNPCKH:
+ DecodeUNPCKHMask(VT, Mask);
+ break;
+ case X86ISD::UNPCKL:
+ DecodeUNPCKLMask(VT, Mask);
+ break;
+ case X86ISD::MOVHLPS:
+ DecodeMOVHLPSMask(NumElems, Mask);
+ break;
+ case X86ISD::MOVLHPS:
+ DecodeMOVLHPSMask(NumElems, Mask);
+ break;
+ case X86ISD::PSHUFD:
+ case X86ISD::VPERMILP:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::PSHUFHW:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::PSHUFLW:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ IsUnary = true;
+ break;
+ case X86ISD::MOVSS:
+ case X86ISD::MOVSD: {
+ // The index 0 always comes from the first element of the second source,
+ // this is why MOVSS and MOVSD are used in the first place. The other
+ // elements come from the other positions of the first source vector
+ Mask.push_back(NumElems);
+ for (unsigned i = 1; i != NumElems; ++i) {
+ Mask.push_back(i);
+ }
+ break;
+ }
+ case X86ISD::VPERM2X128:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
+ break;
+ case X86ISD::MOVDDUP:
+ case X86ISD::MOVLHPD:
+ case X86ISD::MOVLPD:
+ case X86ISD::MOVLPS:
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP:
+ case X86ISD::PALIGN:
+ // Not yet implemented
+ return false;
+ default: llvm_unreachable("unknown target shuffle node");
+ }
+
+ return true;
+}
+
/// getShuffleScalarElt - Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
-static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
+static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
unsigned Depth) {
if (Depth == 6)
return SDValue(); // Limit search depth.
@@ -4522,129 +4437,34 @@ static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
// Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
- Index = SV->getMaskElt(Index);
+ int Elt = SV->getMaskElt(Index);
- if (Index < 0)
+ if (Elt < 0)
return DAG.getUNDEF(VT.getVectorElementType());
- int NumElems = VT.getVectorNumElements();
- SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1);
- return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1);
+ unsigned NumElems = VT.getVectorNumElements();
+ SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
+ : SV->getOperand(1);
+ return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
}
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
- int NumElems = VT.getVectorNumElements();
- SmallVector<unsigned, 16> ShuffleMask;
+ unsigned NumElems = VT.getVectorNumElements();
+ SmallVector<int, 16> ShuffleMask;
SDValue ImmN;
+ bool IsUnary;
- switch(Opcode) {
- case X86ISD::SHUFPS:
- case X86ISD::SHUFPD:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeSHUFPSMask(NumElems,
- cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::PUNPCKHBW:
- case X86ISD::PUNPCKHWD:
- case X86ISD::PUNPCKHDQ:
- case X86ISD::PUNPCKHQDQ:
- DecodePUNPCKHMask(NumElems, ShuffleMask);
- break;
- case X86ISD::UNPCKHPS:
- case X86ISD::UNPCKHPD:
- case X86ISD::VUNPCKHPSY:
- case X86ISD::VUNPCKHPDY:
- DecodeUNPCKHPMask(NumElems, ShuffleMask);
- break;
- case X86ISD::PUNPCKLBW:
- case X86ISD::PUNPCKLWD:
- case X86ISD::PUNPCKLDQ:
- case X86ISD::PUNPCKLQDQ:
- DecodePUNPCKLMask(VT, ShuffleMask);
- break;
- case X86ISD::UNPCKLPS:
- case X86ISD::UNPCKLPD:
- case X86ISD::VUNPCKLPSY:
- case X86ISD::VUNPCKLPDY:
- DecodeUNPCKLPMask(VT, ShuffleMask);
- break;
- case X86ISD::MOVHLPS:
- DecodeMOVHLPSMask(NumElems, ShuffleMask);
- break;
- case X86ISD::MOVLHPS:
- DecodeMOVLHPSMask(NumElems, ShuffleMask);
- break;
- case X86ISD::PSHUFD:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodePSHUFMask(NumElems,
- cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::PSHUFHW:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::PSHUFLW:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::MOVSS:
- case X86ISD::MOVSD: {
- // The index 0 always comes from the first element of the second source,
- // this is why MOVSS and MOVSD are used in the first place. The other
- // elements come from the other positions of the first source vector.
- unsigned OpNum = (Index == 0) ? 1 : 0;
- return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG,
- Depth+1);
- }
- case X86ISD::VPERMILPS:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMILPSMask(4, cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::VPERMILPSY:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMILPSMask(8, cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::VPERMILPD:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMILPDMask(2, cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::VPERMILPDY:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERMILPDMask(4, cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::VPERM2F128:
- ImmN = N->getOperand(N->getNumOperands()-1);
- DecodeVPERM2F128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(),
- ShuffleMask);
- break;
- case X86ISD::MOVDDUP:
- case X86ISD::MOVLHPD:
- case X86ISD::MOVLPD:
- case X86ISD::MOVLPS:
- case X86ISD::MOVSHDUP:
- case X86ISD::MOVSLDUP:
- case X86ISD::PALIGN:
- return SDValue(); // Not yet implemented.
- default:
- assert(0 && "unknown target shuffle node");
+ if (!getTargetShuffleMask(N, VT, ShuffleMask, IsUnary))
return SDValue();
- }
- Index = ShuffleMask[Index];
- if (Index < 0)
+ int Elt = ShuffleMask[Index];
+ if (Elt < 0)
return DAG.getUNDEF(VT.getVectorElementType());
- SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
- return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG,
+ SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
+ : N->getOperand(1);
+ return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
@@ -4660,7 +4480,7 @@ static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
return (Index == 0) ? V.getOperand(0)
- : DAG.getUNDEF(VT.getVectorElementType());
+ : DAG.getUNDEF(VT.getVectorElementType());
if (V.getOpcode() == ISD::BUILD_VECTOR)
return V.getOperand(Index);
@@ -4672,38 +4492,37 @@ static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
/// shuffle operation which come from a consecutively from a zero. The
/// search can start in two different directions, from left or right.
static
-unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems,
+unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems,
bool ZerosFromLeft, SelectionDAG &DAG) {
- int i = 0;
-
- while (i < NumElems) {
+ unsigned i;
+ for (i = 0; i != NumElems; ++i) {
unsigned Index = ZerosFromLeft ? i : NumElems-i-1;
- SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0);
+ SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
if (!(Elt.getNode() &&
(Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt))))
break;
- ++i;
}
return i;
}
-/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to
-/// MaskE correspond consecutively to elements from one of the vector operands,
+/// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
+/// correspond consecutively to elements from one of the vector operands,
/// starting from its index OpIdx. Also tell OpNum which source vector operand.
static
-bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE,
- int OpIdx, int NumElems, unsigned &OpNum) {
+bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
+ unsigned MaskI, unsigned MaskE, unsigned OpIdx,
+ unsigned NumElems, unsigned &OpNum) {
bool SeenV1 = false;
bool SeenV2 = false;
- for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) {
+ for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
int Idx = SVOp->getMaskElt(i);
// Ignore undef indicies
if (Idx < 0)
continue;
- if (Idx < NumElems)
+ if (Idx < (int)NumElems)
SeenV1 = true;
else
SeenV2 = true;
@@ -4738,7 +4557,7 @@ static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
//
if (!isShuffleMaskConsecutive(SVOp,
0, // Mask Start Index
- NumElems-NumZeros-1, // Mask End Index
+ NumElems-NumZeros, // Mask End Index(exclusive)
NumZeros, // Where to start looking in the src vector
NumElems, // Number of elements in vector
OpSrc)) // Which source operand ?
@@ -4771,7 +4590,7 @@ static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
//
if (!isShuffleMaskConsecutive(SVOp,
NumZeros, // Mask Start Index
- NumElems-1, // Mask End Index
+ NumElems, // Mask End Index(exclusive)
0, // Where to start looking in the src vector
NumElems, // Number of elements in vector
OpSrc)) // Which source operand ?
@@ -4804,6 +4623,7 @@ static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
+ const X86Subtarget* Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 8)
return SDValue();
@@ -4815,7 +4635,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, true, DAG, dl);
+ V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
else
V = DAG.getUNDEF(MVT::v8i16);
First = false;
@@ -4851,6 +4671,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
+ const X86Subtarget* Subtarget,
const TargetLowering &TLI) {
if (NumNonZero > 4)
return SDValue();
@@ -4863,7 +4684,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
if (isNonZero) {
if (First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, true, DAG, dl);
+ V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
else
V = DAG.getUNDEF(MVT::v8i16);
First = false;
@@ -4884,7 +4705,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
const TargetLowering &TLI, DebugLoc dl) {
assert(VT.getSizeInBits() == 128 && "Unknown type for VShift");
EVT ShVT = MVT::v2i64;
- unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
+ unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp,
@@ -4952,21 +4773,16 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
int EltNo = (Offset - StartOffset) >> 2;
int NumElems = VT.getVectorNumElements();
- EVT CanonVT = VT.getSizeInBits() == 128 ? MVT::v4i32 : MVT::v8i32;
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(StartOffset),
- false, false, 0);
+ false, false, false, 0);
- // Canonicalize it to a v4i32 or v8i32 shuffle.
SmallVector<int, 8> Mask;
for (int i = 0; i < NumElems; ++i)
Mask.push_back(EltNo);
- V1 = DAG.getNode(ISD::BITCAST, dl, CanonVT, V1);
- return DAG.getNode(ISD::BITCAST, dl, NVT,
- DAG.getVectorShuffle(CanonVT, dl, V1,
- DAG.getUNDEF(CanonVT),&Mask[0]));
+ return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
}
return SDValue();
@@ -5021,11 +4837,12 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
LDBase->getPointerInfo(),
- LDBase->isVolatile(), LDBase->isNonTemporal(), 0);
+ LDBase->isVolatile(), LDBase->isNonTemporal(),
+ LDBase->isInvariant(), 0);
return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
LDBase->getPointerInfo(),
LDBase->isVolatile(), LDBase->isNonTemporal(),
- LDBase->getAlignment());
+ LDBase->isInvariant(), LDBase->getAlignment());
} else if (NumElems == 4 && LastLoadedElt == 1 &&
DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
@@ -5041,6 +4858,137 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
return SDValue();
}
+/// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
+/// to generate a splat value for the following cases:
+/// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
+/// 2. A splat shuffle which uses a scalar_to_vector node which comes from
+/// a scalar load, or a constant.
+/// The VBROADCAST node is returned when a pattern is found,
+/// or SDValue() otherwise.
+SDValue
+X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const {
+ if (!Subtarget->hasAVX())
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+
+ SDValue Ld;
+ bool ConstSplatVal;
+
+ switch (Op.getOpcode()) {
+ default:
+ // Unknown pattern found.
+ return SDValue();
+
+ case ISD::BUILD_VECTOR: {
+ // The BUILD_VECTOR node must be a splat.
+ if (!isSplatVector(Op.getNode()))
+ return SDValue();
+
+ Ld = Op.getOperand(0);
+ ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
+ Ld.getOpcode() == ISD::ConstantFP);
+
+ // The suspected load node has several users. Make sure that all
+ // of its users are from the BUILD_VECTOR node.
+ // Constants may have multiple users.
+ if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0))
+ return SDValue();
+ break;
+ }
+
+ case ISD::VECTOR_SHUFFLE: {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+
+ // Shuffles must have a splat mask where the first element is
+ // broadcasted.
+ if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
+ return SDValue();
+
+ SDValue Sc = Op.getOperand(0);
+ if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR)
+ return SDValue();
+
+ Ld = Sc.getOperand(0);
+ ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
+ Ld.getOpcode() == ISD::ConstantFP);
+
+ // The scalar_to_vector node and the suspected
+ // load node must have exactly one user.
+ // Constants may have multiple users.
+ if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse()))
+ return SDValue();
+ break;
+ }
+ }
+
+ bool Is256 = VT.getSizeInBits() == 256;
+ bool Is128 = VT.getSizeInBits() == 128;
+
+ // Handle the broadcasting a single constant scalar from the constant pool
+ // into a vector. On Sandybridge it is still better to load a constant vector
+ // from the constant pool and not to broadcast it from a scalar.
+ if (ConstSplatVal && Subtarget->hasAVX2()) {
+ EVT CVT = Ld.getValueType();
+ assert(!CVT.isVector() && "Must not broadcast a vector type");
+ unsigned ScalarSize = CVT.getSizeInBits();
+
+ if ((Is256 && (ScalarSize == 32 || ScalarSize == 64)) ||
+ (Is128 && (ScalarSize == 32))) {
+
+ const Constant *C = 0;
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
+ C = CI->getConstantIntValue();
+ else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
+ C = CF->getConstantFPValue();
+
+ assert(C && "Invalid constant type");
+
+ SDValue CP = DAG.getConstantPool(C, getPointerTy());
+ unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
+ Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, Alignment);
+
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
+ }
+ }
+
+ // The scalar source must be a normal load.
+ if (!ISD::isNormalLoad(Ld.getNode()))
+ return SDValue();
+
+ // Reject loads that have uses of the chain result
+ if (Ld->hasAnyUseOfValue(1))
+ return SDValue();
+
+ unsigned ScalarSize = Ld.getValueType().getSizeInBits();
+
+ // VBroadcast to YMM
+ if (Is256 && (ScalarSize == 32 || ScalarSize == 64))
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
+
+ // VBroadcast to XMM
+ if (Is128 && (ScalarSize == 32))
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
+
+ // The integer check is needed for the 64-bit into 128-bit so it doesn't match
+ // double since there is vbroadcastsd xmm
+ if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) {
+ // VBroadcast to YMM
+ if (Is256 && (ScalarSize == 8 || ScalarSize == 16))
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
+
+ // VBroadcast to XMM
+ if (Is128 && (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64))
+ return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
+ }
+
+ // Unsupported broadcast.
+ return SDValue();
+}
+
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
@@ -5053,22 +5001,26 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
- if (Op.getValueType() == MVT::v4i32 ||
- Op.getValueType() == MVT::v8i32)
+ if (VT == MVT::v4i32 || VT == MVT::v8i32)
return Op;
- return getZeroVector(Op.getValueType(), Subtarget->hasXMMInt(), DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, dl);
}
// Vectors containing all ones can be matched by pcmpeqd on 128-bit width
- // vectors or broken into v4i32 operations on 256-bit vectors.
+ // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
+ // vpcmpeqd on 256-bit vectors.
if (ISD::isBuildVectorAllOnes(Op.getNode())) {
- if (Op.getValueType() == MVT::v4i32)
+ if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2()))
return Op;
- return getOnesVector(Op.getValueType(), DAG, dl);
+ return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl);
}
+ SDValue Broadcast = LowerVectorBroadcast(Op, DAG);
+ if (Broadcast.getNode())
+ return Broadcast;
+
unsigned EVTBits = ExtVT.getSizeInBits();
unsigned NumZero = 0;
@@ -5118,8 +5070,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// convert it to a vector with movd (S2V+shuffle to zero extend).
Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
- Item = getShuffleVectorZeroOrUndef(Item, 0, true,
- Subtarget->hasXMMInt(), DAG);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
// Now we have our 32-bit value zero extended in the low element of
// a vector. If Idx != 0, swizzle it into place.
@@ -5132,7 +5083,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DAG.getUNDEF(Item.getValueType()),
&Mask[0]);
}
- return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item);
+ return DAG.getNode(ISD::BITCAST, dl, VT, Item);
}
}
@@ -5141,21 +5092,33 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// the rest of the elements. This will be matched as movd/movq/movss/movsd
// depending on what the source datatype is.
if (Idx == 0) {
- if (NumZero == 0) {
+ if (NumZero == 0)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
- } else if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
+
+ if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
(ExtVT == MVT::i64 && Subtarget->is64Bit())) {
+ if (VT.getSizeInBits() == 256) {
+ SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
+ Item, DAG.getIntPtrConstant(0));
+ }
+ assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
- return getShuffleVectorZeroOrUndef(Item, 0, true,Subtarget->hasXMMInt(),
- DAG);
- } else if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
+ return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
+ }
+
+ if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
- assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
- EVT MiddleVT = MVT::v4i32;
- Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
- Item = getShuffleVectorZeroOrUndef(Item, 0, true,
- Subtarget->hasXMMInt(), DAG);
+ Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
+ if (VT.getSizeInBits() == 256) {
+ SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
+ Item = Insert128BitVector(ZeroVec, Item, DAG.getConstant(0, MVT::i32),
+ DAG, dl);
+ } else {
+ assert(VT.getSizeInBits() == 128 && "Expected an SSE value type!");
+ Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
+ }
return DAG.getNode(ISD::BITCAST, dl, VT, Item);
}
}
@@ -5183,8 +5146,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a shuffle of zero and zero-extended scalar to vector.
- Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
- Subtarget->hasXMMInt(), DAG);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
SmallVector<int, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(i == Idx ? 0 : 1);
@@ -5214,9 +5176,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
- if (VT.getSizeInBits() == 256 && !ISD::isBuildVectorAllZeros(Op.getNode())) {
+ if (VT.getSizeInBits() == 256) {
SmallVector<SDValue, 32> V;
- for (unsigned i = 0; i < NumElems; ++i)
+ for (unsigned i = 0; i != NumElems; ++i)
V.push_back(Op.getOperand(i));
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
@@ -5240,8 +5202,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
unsigned Idx = CountTrailingZeros_32(NonZeros);
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Op.getOperand(Idx));
- return getShuffleVectorZeroOrUndef(V2, Idx, true,
- Subtarget->hasXMMInt(), DAG);
+ return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
}
return SDValue();
}
@@ -5249,24 +5210,23 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// If element VT is < 32 bits, convert it to inserts into a zero vector.
if (EVTBits == 8 && NumElems == 16) {
SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
- *this);
+ Subtarget, *this);
if (V.getNode()) return V;
}
if (EVTBits == 16 && NumElems == 8) {
SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
- *this);
+ Subtarget, *this);
if (V.getNode()) return V;
}
// If element VT is == 32 bits, turn it into a number of shuffles.
- SmallVector<SDValue, 8> V;
- V.resize(NumElems);
+ SmallVector<SDValue, 8> V(NumElems);
if (NumElems == 4 && NumZero > 0) {
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1 << i));
if (isZero)
- V[i] = getZeroVector(VT, Subtarget->hasXMMInt(), DAG, dl);
+ V[i] = getZeroVector(VT, Subtarget, DAG, dl);
else
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
}
@@ -5289,13 +5249,14 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
}
}
- SmallVector<int, 8> MaskVec;
- bool Reverse = (NonZeros & 0x3) == 2;
- for (unsigned i = 0; i < 2; ++i)
- MaskVec.push_back(Reverse ? 1-i : i);
- Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
- for (unsigned i = 0; i < 2; ++i)
- MaskVec.push_back(Reverse ? 1-i+NumElems : i+NumElems);
+ bool Reverse1 = (NonZeros & 0x3) == 2;
+ bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
+ int MaskVec[] = {
+ Reverse1 ? 1 : 0,
+ Reverse1 ? 0 : 1,
+ static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
+ static_cast<int>(Reverse2 ? NumElems : NumElems+1)
+ };
return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
}
@@ -5310,7 +5271,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
return LD;
// For SSE 4.1, use insertps to put the high elements into the low element.
- if (getSubtarget()->hasSSE41() || getSubtarget()->hasAVX()) {
+ if (getSubtarget()->hasSSE41()) {
SDValue Result;
if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
@@ -5422,6 +5383,85 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
return LowerAVXCONCAT_VECTORS(Op, DAG);
}
+// Try to lower a shuffle node into a simple blend instruction.
+static SDValue LowerVECTOR_SHUFFLEtoBlend(SDValue Op,
+ const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ SDValue V1 = SVOp->getOperand(0);
+ SDValue V2 = SVOp->getOperand(1);
+ DebugLoc dl = SVOp->getDebugLoc();
+ EVT VT = Op.getValueType();
+ EVT InVT = V1.getValueType();
+ int MaskSize = VT.getVectorNumElements();
+ int InSize = InVT.getVectorNumElements();
+
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ if (MaskSize != InSize)
+ return SDValue();
+
+ int ISDNo = 0;
+ MVT OpTy;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v8i16:
+ ISDNo = X86ISD::BLENDPW;
+ OpTy = MVT::v8i16;
+ break;
+ case MVT::v4i32:
+ case MVT::v4f32:
+ ISDNo = X86ISD::BLENDPS;
+ OpTy = MVT::v4f32;
+ break;
+ case MVT::v2i64:
+ case MVT::v2f64:
+ ISDNo = X86ISD::BLENDPD;
+ OpTy = MVT::v2f64;
+ break;
+ case MVT::v8i32:
+ case MVT::v8f32:
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ ISDNo = X86ISD::BLENDPS;
+ OpTy = MVT::v8f32;
+ break;
+ case MVT::v4i64:
+ case MVT::v4f64:
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ ISDNo = X86ISD::BLENDPD;
+ OpTy = MVT::v4f64;
+ break;
+ case MVT::v16i16:
+ if (!Subtarget->hasAVX2())
+ return SDValue();
+ ISDNo = X86ISD::BLENDPW;
+ OpTy = MVT::v16i16;
+ break;
+ }
+ assert(ISDNo && "Invalid Op Number");
+
+ unsigned MaskVals = 0;
+
+ for (int i = 0; i < MaskSize; ++i) {
+ int EltIdx = SVOp->getMaskElt(i);
+ if (EltIdx == i || EltIdx == -1)
+ MaskVals |= (1<<i);
+ else if (EltIdx == (i + MaskSize))
+ continue; // Bit is set to zero;
+ else return SDValue();
+ }
+
+ V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1);
+ V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2);
+ SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2,
+ DAG.getConstant(MaskVals, MVT::i32));
+ return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
+}
+
// v8i16 shuffles - Prefer shuffles in the following order:
// 1. [all] pshuflw, pshufhw, optional move
// 2. [ssse3] 1 x pshufb
@@ -5439,11 +5479,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
// Determine if more than 1 of the words in each of the low and high quadwords
// of the result come from the same quadword of one of the two inputs. Undef
// mask values count as coming from any quadword, for better codegen.
- SmallVector<unsigned, 4> LoQuad(4);
- SmallVector<unsigned, 4> HiQuad(4);
- BitVector InputQuads(4);
+ unsigned LoQuad[] = { 0, 0, 0, 0 };
+ unsigned HiQuad[] = { 0, 0, 0, 0 };
+ std::bitset<4> InputQuads;
for (unsigned i = 0; i < 8; ++i) {
- SmallVectorImpl<unsigned> &Quad = i < 4 ? LoQuad : HiQuad;
+ unsigned *Quad = i < 4 ? LoQuad : HiQuad;
int EltIdx = SVOp->getMaskElt(i);
MaskVals.push_back(EltIdx);
if (EltIdx < 0) {
@@ -5481,10 +5521,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
// quads, disable the next transformation since it does not help SSSE3.
bool V1Used = InputQuads[0] || InputQuads[1];
bool V2Used = InputQuads[2] || InputQuads[3];
- if (Subtarget->hasSSSE3() || Subtarget->hasAVX()) {
+ if (Subtarget->hasSSSE3()) {
if (InputQuads.count() == 2 && V1Used && V2Used) {
- BestLoQuad = InputQuads.find_first();
- BestHiQuad = InputQuads.find_next(BestLoQuad);
+ BestLoQuad = InputQuads[0] ? 0 : 1;
+ BestHiQuad = InputQuads[2] ? 2 : 3;
}
if (InputQuads.count() > 2) {
BestLoQuad = -1;
@@ -5497,9 +5537,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
// words from all 4 input quadwords.
SDValue NewV;
if (BestLoQuad >= 0 || BestHiQuad >= 0) {
- SmallVector<int, 8> MaskV;
- MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
- MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
+ int MaskV[] = {
+ BestLoQuad < 0 ? 0 : BestLoQuad,
+ BestHiQuad < 0 ? 1 : BestHiQuad
+ };
NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
@@ -5544,8 +5585,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
unsigned TargetMask = 0;
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
- TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()):
- X86::getShufflePSHUFLWImmediate(NewV.getNode());
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
+ TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
+ getShufflePSHUFLWImmediate(SVOp);
V1 = NewV.getOperand(0);
return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
}
@@ -5554,7 +5596,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
// If we have SSSE3, and all words of the result are from 1 input vector,
// case 2 is generated, otherwise case 3 is generated. If no SSSE3
// is present, fall back to case 4.
- if (Subtarget->hasSSSE3() || Subtarget->hasAVX()) {
+ if (Subtarget->hasSSSE3()) {
SmallVector<SDValue,16> pshufbMask;
// If we have elements from both input vectors, set the high bit of the
@@ -5602,61 +5644,51 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
// If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
// and update MaskVals with new element order.
- BitVector InOrder(8);
+ std::bitset<8> InOrder;
if (BestLoQuad >= 0) {
- SmallVector<int, 8> MaskV;
+ int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
for (int i = 0; i != 4; ++i) {
int idx = MaskVals[i];
if (idx < 0) {
- MaskV.push_back(-1);
InOrder.set(i);
} else if ((idx / 4) == BestLoQuad) {
- MaskV.push_back(idx & 3);
+ MaskV[i] = idx & 3;
InOrder.set(i);
- } else {
- MaskV.push_back(-1);
}
}
- for (unsigned i = 4; i != 8; ++i)
- MaskV.push_back(i);
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE &&
- (Subtarget->hasSSSE3() || Subtarget->hasAVX()))
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
- NewV.getOperand(0),
- X86::getShufflePSHUFLWImmediate(NewV.getNode()),
- DAG);
+ NewV.getOperand(0),
+ getShufflePSHUFLWImmediate(SVOp), DAG);
+ }
}
// If BestHi >= 0, generate a pshufhw to put the high elements in order,
// and update MaskVals with the new element order.
if (BestHiQuad >= 0) {
- SmallVector<int, 8> MaskV;
- for (unsigned i = 0; i != 4; ++i)
- MaskV.push_back(i);
+ int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
for (unsigned i = 4; i != 8; ++i) {
int idx = MaskVals[i];
if (idx < 0) {
- MaskV.push_back(-1);
InOrder.set(i);
} else if ((idx / 4) == BestHiQuad) {
- MaskV.push_back((idx & 3) + 4);
+ MaskV[i] = (idx & 3) + 4;
InOrder.set(i);
- } else {
- MaskV.push_back(-1);
}
}
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
- if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE &&
- (Subtarget->hasSSSE3() || Subtarget->hasAVX()))
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
- NewV.getOperand(0),
- X86::getShufflePSHUFHWImmediate(NewV.getNode()),
- DAG);
+ NewV.getOperand(0),
+ getShufflePSHUFHWImmediate(SVOp), DAG);
+ }
}
// In case BestHi & BestLo were both -1, which means each quadword has a word
@@ -5698,8 +5730,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
- SmallVector<int, 16> MaskVals;
- SVOp->getMask(MaskVals);
+ ArrayRef<int> MaskVals = SVOp->getMask();
// If we have SSSE3, case 1 is generated when all result bytes come from
// one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
@@ -5718,7 +5749,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
// If SSSE3, use 1 pshufb instruction per vector with elements in the result.
- if (TLI.getSubtarget()->hasSSSE3() || TLI.getSubtarget()->hasAVX()) {
+ if (TLI.getSubtarget()->hasSSSE3()) {
SmallVector<SDValue,16> pshufbMask;
// If all result elements are from one input vector, then only translate
@@ -5849,7 +5880,7 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
EVT NewVT;
switch (VT.getSimpleVT().SimpleTy) {
- default: assert(false && "Unexpected!");
+ default: llvm_unreachable("Unexpected!");
case MVT::v4f32: NewVT = MVT::v2f64; break;
case MVT::v4i32: NewVT = MVT::v2i64; break;
case MVT::v8i16: NewVT = MVT::v4i32; break;
@@ -5915,96 +5946,89 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
OpVT, SrcOp)));
}
-/// areShuffleHalvesWithinDisjointLanes - Check whether each half of a vector
-/// shuffle node referes to only one lane in the sources.
-static bool areShuffleHalvesWithinDisjointLanes(ShuffleVectorSDNode *SVOp) {
- EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
- int HalfSize = NumElems/2;
- SmallVector<int, 16> M;
- SVOp->getMask(M);
- bool MatchA = false, MatchB = false;
-
- for (int l = 0; l < NumElems*2; l += HalfSize) {
- if (isUndefOrInRange(M, 0, HalfSize, l, l+HalfSize)) {
- MatchA = true;
- break;
- }
- }
-
- for (int l = 0; l < NumElems*2; l += HalfSize) {
- if (isUndefOrInRange(M, HalfSize, HalfSize, l, l+HalfSize)) {
- MatchB = true;
- break;
- }
- }
-
- return MatchA && MatchB;
-}
-
/// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
/// which could not be matched by any known target speficic shuffle
static SDValue
LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
- if (areShuffleHalvesWithinDisjointLanes(SVOp)) {
- // If each half of a vector shuffle node referes to only one lane in the
- // source vectors, extract each used 128-bit lane and shuffle them using
- // 128-bit shuffles. Then, concatenate the results. Otherwise leave
- // the work to the legalizer.
- DebugLoc dl = SVOp->getDebugLoc();
- EVT VT = SVOp->getValueType(0);
- int NumElems = VT.getVectorNumElements();
- int HalfSize = NumElems/2;
-
- // Extract the reference for each half
- int FstVecExtractIdx = 0, SndVecExtractIdx = 0;
- int FstVecOpNum = 0, SndVecOpNum = 0;
- for (int i = 0; i < HalfSize; ++i) {
- int Elt = SVOp->getMaskElt(i);
- if (SVOp->getMaskElt(i) < 0)
- continue;
- FstVecOpNum = Elt/NumElems;
- FstVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize;
- break;
- }
- for (int i = HalfSize; i < NumElems; ++i) {
- int Elt = SVOp->getMaskElt(i);
- if (SVOp->getMaskElt(i) < 0)
+ EVT VT = SVOp->getValueType(0);
+
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned NumLaneElems = NumElems / 2;
+
+ DebugLoc dl = SVOp->getDebugLoc();
+ MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ EVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
+ SDValue Shufs[2];
+
+ SmallVector<int, 16> Mask;
+ for (unsigned l = 0; l < 2; ++l) {
+ // Build a shuffle mask for the output, discovering on the fly which
+ // input vectors to use as shuffle operands (recorded in InputUsed).
+ // If building a suitable shuffle vector proves too hard, then bail
+ // out with useBuildVector set.
+ int InputUsed[2] = { -1, -1 }; // Not yet discovered.
+ unsigned LaneStart = l * NumLaneElems;
+ for (unsigned i = 0; i != NumLaneElems; ++i) {
+ // The mask element. This indexes into the input.
+ int Idx = SVOp->getMaskElt(i+LaneStart);
+ if (Idx < 0) {
+ // the mask element does not index into any input vector.
+ Mask.push_back(-1);
continue;
- SndVecOpNum = Elt/NumElems;
- SndVecExtractIdx = Elt % NumElems < HalfSize ? 0 : HalfSize;
- break;
- }
+ }
- // Extract the subvectors
- SDValue V1 = Extract128BitVector(SVOp->getOperand(FstVecOpNum),
- DAG.getConstant(FstVecExtractIdx, MVT::i32), DAG, dl);
- SDValue V2 = Extract128BitVector(SVOp->getOperand(SndVecOpNum),
- DAG.getConstant(SndVecExtractIdx, MVT::i32), DAG, dl);
+ // The input vector this mask element indexes into.
+ int Input = Idx / NumLaneElems;
- // Generate 128-bit shuffles
- SmallVector<int, 16> MaskV1, MaskV2;
- for (int i = 0; i < HalfSize; ++i) {
- int Elt = SVOp->getMaskElt(i);
- MaskV1.push_back(Elt < 0 ? Elt : Elt % HalfSize);
- }
- for (int i = HalfSize; i < NumElems; ++i) {
- int Elt = SVOp->getMaskElt(i);
- MaskV2.push_back(Elt < 0 ? Elt : Elt % HalfSize);
+ // Turn the index into an offset from the start of the input vector.
+ Idx -= Input * NumLaneElems;
+
+ // Find or create a shuffle vector operand to hold this input.
+ unsigned OpNo;
+ for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
+ if (InputUsed[OpNo] == Input)
+ // This input vector is already an operand.
+ break;
+ if (InputUsed[OpNo] < 0) {
+ // Create a new operand for this input vector.
+ InputUsed[OpNo] = Input;
+ break;
+ }
+ }
+
+ if (OpNo >= array_lengthof(InputUsed)) {
+ // More than two input vectors used! Give up.
+ return SDValue();
+ }
+
+ // Add the mask index for the new shuffle vector.
+ Mask.push_back(Idx + OpNo * NumLaneElems);
}
- EVT NVT = V1.getValueType();
- V1 = DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &MaskV1[0]);
- V2 = DAG.getVectorShuffle(NVT, dl, V2, DAG.getUNDEF(NVT), &MaskV2[0]);
+ if (InputUsed[0] < 0) {
+ // No input vectors were used! The result is undefined.
+ Shufs[l] = DAG.getUNDEF(NVT);
+ } else {
+ SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
+ DAG.getConstant((InputUsed[0] % 2) * NumLaneElems, MVT::i32),
+ DAG, dl);
+ // If only one input was used, use an undefined vector for the other.
+ SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
+ Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
+ DAG.getConstant((InputUsed[1] % 2) * NumLaneElems, MVT::i32),
+ DAG, dl);
+ // At least one input vector was used. Create a new shuffle vector.
+ Shufs[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
+ }
- // Concatenate the result back
- SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), V1,
- DAG.getConstant(0, MVT::i32), DAG, dl);
- return Insert128BitVector(V, V2, DAG.getConstant(NumElems/2, MVT::i32),
- DAG, dl);
+ Mask.clear();
}
- return SDValue();
+ // Concatenate the result back
+ SDValue V = Insert128BitVector(DAG.getNode(ISD::UNDEF, dl, VT), Shufs[0],
+ DAG.getConstant(0, MVT::i32), DAG, dl);
+ return Insert128BitVector(V, Shufs[1],DAG.getConstant(NumLaneElems, MVT::i32),
+ DAG, dl);
}
/// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
@@ -6018,11 +6042,9 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
assert(VT.getSizeInBits() == 128 && "Unsupported vector size");
- SmallVector<std::pair<int, int>, 8> Locs;
- Locs.resize(4);
- SmallVector<int, 8> Mask1(4U, -1);
- SmallVector<int, 8> PermMask;
- SVOp->getMask(PermMask);
+ std::pair<int, int> Locs[4];
+ int Mask1[] = { -1, -1, -1, -1 };
+ SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
unsigned NumHi = 0;
unsigned NumLo = 0;
@@ -6052,17 +6074,14 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
// vector operands, put the elements into the right order.
V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
- SmallVector<int, 8> Mask2(4U, -1);
+ int Mask2[] = { -1, -1, -1, -1 };
- for (unsigned i = 0; i != 4; ++i) {
- if (Locs[i].first == -1)
- continue;
- else {
+ for (unsigned i = 0; i != 4; ++i)
+ if (Locs[i].first != -1) {
unsigned Idx = (i < 2) ? 0 : 4;
Idx += Locs[i].first * 2 + Locs[i].second;
Mask2[i] = Idx;
}
- }
return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
} else if (NumLo == 3 || NumHi == 3) {
@@ -6075,7 +6094,7 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
// from X.
if (NumHi == 3) {
// Normalize it so the 3 elements come from V1.
- CommuteVectorShuffleMask(PermMask, VT);
+ CommuteVectorShuffleMask(PermMask, 4);
std::swap(V1, V2);
}
@@ -6115,18 +6134,16 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
}
// Break it into (shuffle shuffle_hi, shuffle_lo).
- Locs.clear();
- Locs.resize(4);
- SmallVector<int,8> LoMask(4U, -1);
- SmallVector<int,8> HiMask(4U, -1);
+ int LoMask[] = { -1, -1, -1, -1 };
+ int HiMask[] = { -1, -1, -1, -1 };
- SmallVector<int,8> *MaskPtr = &LoMask;
+ int *MaskPtr = LoMask;
unsigned MaskIdx = 0;
unsigned LoIdx = 0;
unsigned HiIdx = 2;
for (unsigned i = 0; i != 4; ++i) {
if (i == 2) {
- MaskPtr = &HiMask;
+ MaskPtr = HiMask;
MaskIdx = 1;
LoIdx = 0;
HiIdx = 2;
@@ -6136,26 +6153,21 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
Locs[i] = std::make_pair(-1, -1);
} else if (Idx < 4) {
Locs[i] = std::make_pair(MaskIdx, LoIdx);
- (*MaskPtr)[LoIdx] = Idx;
+ MaskPtr[LoIdx] = Idx;
LoIdx++;
} else {
Locs[i] = std::make_pair(MaskIdx, HiIdx);
- (*MaskPtr)[HiIdx] = Idx;
+ MaskPtr[HiIdx] = Idx;
HiIdx++;
}
}
SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
- SmallVector<int, 8> MaskOps;
- for (unsigned i = 0; i != 4; ++i) {
- if (Locs[i].first == -1) {
- MaskOps.push_back(-1);
- } else {
- unsigned Idx = Locs[i].first * 4 + Locs[i].second;
- MaskOps.push_back(Idx);
- }
- }
+ int MaskOps[] = { -1, -1, -1, -1 };
+ for (unsigned i = 0; i != 4; ++i)
+ if (Locs[i].first != -1)
+ MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
}
@@ -6164,6 +6176,10 @@ static bool MayFoldVectorLoad(SDValue V) {
V = V.getOperand(0);
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
V = V.getOperand(0);
+ if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
+ V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
+ // BUILD_VECTOR (load), undef
+ V = V.getOperand(0);
if (MayFoldLoad(V))
return true;
return false;
@@ -6186,82 +6202,6 @@ static bool RelaxedMayFoldVectorLoad(SDValue V) {
return false;
}
-/// CanFoldShuffleIntoVExtract - Check if the current shuffle is used by
-/// a vector extract, and if both can be later optimized into a single load.
-/// This is done in visitEXTRACT_VECTOR_ELT and the conditions are checked
-/// here because otherwise a target specific shuffle node is going to be
-/// emitted for this shuffle, and the optimization not done.
-/// FIXME: This is probably not the best approach, but fix the problem
-/// until the right path is decided.
-static
-bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
- const TargetLowering &TLI) {
- EVT VT = V.getValueType();
- ShuffleVectorSDNode *SVOp = dyn_cast<ShuffleVectorSDNode>(V);
-
- // Be sure that the vector shuffle is present in a pattern like this:
- // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), c) -> (f32 load $addr)
- if (!V.hasOneUse())
- return false;
-
- SDNode *N = *V.getNode()->use_begin();
- if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return false;
-
- SDValue EltNo = N->getOperand(1);
- if (!isa<ConstantSDNode>(EltNo))
- return false;
-
- // If the bit convert changed the number of elements, it is unsafe
- // to examine the mask.
- bool HasShuffleIntoBitcast = false;
- if (V.getOpcode() == ISD::BITCAST) {
- EVT SrcVT = V.getOperand(0).getValueType();
- if (SrcVT.getVectorNumElements() != VT.getVectorNumElements())
- return false;
- V = V.getOperand(0);
- HasShuffleIntoBitcast = true;
- }
-
- // Select the input vector, guarding against out of range extract vector.
- unsigned NumElems = VT.getVectorNumElements();
- unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
- int Idx = (Elt > NumElems) ? -1 : SVOp->getMaskElt(Elt);
- V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1);
-
- // Skip one more bit_convert if necessary
- if (V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
- if (ISD::isNormalLoad(V.getNode())) {
- // Is the original load suitable?
- LoadSDNode *LN0 = cast<LoadSDNode>(V);
-
- // FIXME: avoid the multi-use bug that is preventing lots of
- // of foldings to be detected, this is still wrong of course, but
- // give the temporary desired behavior, and if it happens that
- // the load has real more uses, during isel it will not fold, and
- // will generate poor code.
- if (!LN0 || LN0->isVolatile()) // || !LN0->hasOneUse()
- return false;
-
- if (!HasShuffleIntoBitcast)
- return true;
-
- // If there's a bitcast before the shuffle, check if the load type and
- // alignment is valid.
- unsigned Align = LN0->getAlignment();
- unsigned NewAlign =
- TLI.getTargetData()->getABITypeAlignment(
- VT.getTypeForEVT(*DAG.getContext()));
-
- if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
- return false;
- }
-
- return true;
-}
-
static
SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
@@ -6275,14 +6215,14 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
static
SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
- bool HasXMMInt) {
+ bool HasSSE2) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
EVT VT = Op.getValueType();
assert(VT != MVT::v2i64 && "unsupported shuffle type");
- if (HasXMMInt && VT == MVT::v2f64)
+ if (HasSSE2 && VT == MVT::v2f64)
return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
// v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
@@ -6308,24 +6248,8 @@ SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) {
return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
}
-static inline unsigned getSHUFPOpcode(EVT VT) {
- switch(VT.getSimpleVT().SimpleTy) {
- case MVT::v8i32: // Use fp unit for int unpack.
- case MVT::v8f32:
- case MVT::v4i32: // Use fp unit for int unpack.
- case MVT::v4f32: return X86ISD::SHUFPS;
- case MVT::v4i64: // Use fp unit for int unpack.
- case MVT::v4f64:
- case MVT::v2i64: // Use fp unit for int unpack.
- case MVT::v2f64: return X86ISD::SHUFPD;
- default:
- llvm_unreachable("Unknown type for shufp*");
- }
- return 0;
-}
-
static
-SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasXMMInt) {
+SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
EVT VT = Op.getValueType();
@@ -6346,32 +6270,30 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasXMMInt) {
// turns into:
// (MOVLPSmr addr:$src1, VR128:$src2)
// So, recognize this potential and also use MOVLPS or MOVLPD
- if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
+ else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
CanFoldLoad = true;
- // Both of them can't be memory operations though.
- if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2))
- CanFoldLoad = false;
-
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
if (CanFoldLoad) {
- if (HasXMMInt && NumElems == 2)
+ if (HasSSE2 && NumElems == 2)
return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
if (NumElems == 4)
- return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
+ // If we don't care about the second element, procede to use movss.
+ if (SVOp->getMaskElt(1) != -1)
+ return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
}
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
// movl and movlp will both match v2i64, but v2i64 is never matched by
// movl earlier because we make it strict to avoid messing with the movlp load
// folding logic (see the code above getMOVLP call). Match it here then,
// this is horrible, but will stay like this until we move all shuffle
// matching to x86 specific nodes. Note that for the 1st condition all
// types are matched with movsd.
- if (HasXMMInt) {
+ if (HasSSE2) {
// FIXME: isMOVLMask should be checked and matched before getMOVLP,
// as to remove this logic from here, as much as possible
- if (NumElems == 2 || !X86::isMOVLMask(SVOp))
+ if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
}
@@ -6379,112 +6301,12 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasXMMInt) {
assert(VT != MVT::v4i32 && "unsupported shuffle type");
// Invert the operand order and use SHUFPS to match it.
- return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V2, V1,
- X86::getShuffleSHUFImmediate(SVOp), DAG);
-}
-
-static inline unsigned getUNPCKLOpcode(EVT VT) {
- switch(VT.getSimpleVT().SimpleTy) {
- case MVT::v4i32: return X86ISD::PUNPCKLDQ;
- case MVT::v2i64: return X86ISD::PUNPCKLQDQ;
- case MVT::v4f32: return X86ISD::UNPCKLPS;
- case MVT::v2f64: return X86ISD::UNPCKLPD;
- case MVT::v8i32: // Use fp unit for int unpack.
- case MVT::v8f32: return X86ISD::VUNPCKLPSY;
- case MVT::v4i64: // Use fp unit for int unpack.
- case MVT::v4f64: return X86ISD::VUNPCKLPDY;
- case MVT::v16i8: return X86ISD::PUNPCKLBW;
- case MVT::v8i16: return X86ISD::PUNPCKLWD;
- default:
- llvm_unreachable("Unknown type for unpckl");
- }
- return 0;
-}
-
-static inline unsigned getUNPCKHOpcode(EVT VT) {
- switch(VT.getSimpleVT().SimpleTy) {
- case MVT::v4i32: return X86ISD::PUNPCKHDQ;
- case MVT::v2i64: return X86ISD::PUNPCKHQDQ;
- case MVT::v4f32: return X86ISD::UNPCKHPS;
- case MVT::v2f64: return X86ISD::UNPCKHPD;
- case MVT::v8i32: // Use fp unit for int unpack.
- case MVT::v8f32: return X86ISD::VUNPCKHPSY;
- case MVT::v4i64: // Use fp unit for int unpack.
- case MVT::v4f64: return X86ISD::VUNPCKHPDY;
- case MVT::v16i8: return X86ISD::PUNPCKHBW;
- case MVT::v8i16: return X86ISD::PUNPCKHWD;
- default:
- llvm_unreachable("Unknown type for unpckh");
- }
- return 0;
-}
-
-static inline unsigned getVPERMILOpcode(EVT VT) {
- switch(VT.getSimpleVT().SimpleTy) {
- case MVT::v4i32:
- case MVT::v4f32: return X86ISD::VPERMILPS;
- case MVT::v2i64:
- case MVT::v2f64: return X86ISD::VPERMILPD;
- case MVT::v8i32:
- case MVT::v8f32: return X86ISD::VPERMILPSY;
- case MVT::v4i64:
- case MVT::v4f64: return X86ISD::VPERMILPDY;
- default:
- llvm_unreachable("Unknown type for vpermil");
- }
- return 0;
-}
-
-/// isVectorBroadcast - Check if the node chain is suitable to be xformed to
-/// a vbroadcast node. The nodes are suitable whenever we can fold a load coming
-/// from a 32 or 64 bit scalar. Update Op to the desired load to be folded.
-static bool isVectorBroadcast(SDValue &Op) {
- EVT VT = Op.getValueType();
- bool Is256 = VT.getSizeInBits() == 256;
-
- assert((VT.getSizeInBits() == 128 || Is256) &&
- "Unsupported type for vbroadcast node");
-
- SDValue V = Op;
- if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
- V = V.getOperand(0);
-
- if (Is256 && !(V.hasOneUse() &&
- V.getOpcode() == ISD::INSERT_SUBVECTOR &&
- V.getOperand(0).getOpcode() == ISD::UNDEF))
- return false;
-
- if (Is256)
- V = V.getOperand(1);
-
- if (!V.hasOneUse())
- return false;
-
- // Check the source scalar_to_vector type. 256-bit broadcasts are
- // supported for 32/64-bit sizes, while 128-bit ones are only supported
- // for 32-bit scalars.
- if (V.getOpcode() != ISD::SCALAR_TO_VECTOR)
- return false;
-
- unsigned ScalarSize = V.getOperand(0).getValueType().getSizeInBits();
- if (ScalarSize != 32 && ScalarSize != 64)
- return false;
- if (!Is256 && ScalarSize == 64)
- return false;
-
- V = V.getOperand(0);
- if (!MayFoldLoad(V))
- return false;
-
- // Return the load node
- Op = V;
- return true;
+ return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
+ getShuffleSHUFImmediate(SVOp), DAG);
}
-static
-SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
- const TargetLowering &TLI,
- const X86Subtarget *Subtarget) {
+SDValue
+X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
@@ -6492,22 +6314,17 @@ SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
SDValue V2 = Op.getOperand(1);
if (isZeroShuffle(SVOp))
- return getZeroVector(VT, Subtarget->hasXMMInt(), DAG, dl);
+ return getZeroVector(VT, Subtarget, DAG, dl);
// Handle splat operations
if (SVOp->isSplat()) {
unsigned NumElem = VT.getVectorNumElements();
int Size = VT.getSizeInBits();
- // Special case, this is the only place now where it's allowed to return
- // a vector_shuffle operation without using a target specific node, because
- // *hopefully* it will be optimized away by the dag combiner. FIXME: should
- // this be moved to DAGCombine instead?
- if (NumElem <= 4 && CanXFormVExtractWithShuffleIntoLoad(Op, DAG, TLI))
- return Op;
// Use vbroadcast whenever the splat comes from a foldable load
- if (Subtarget->hasAVX() && isVectorBroadcast(V1))
- return DAG.getNode(X86ISD::VBROADCAST, dl, VT, V1);
+ SDValue Broadcast = LowerVectorBroadcast(Op, DAG);
+ if (Broadcast.getNode())
+ return Broadcast;
// Handle splats by matching through known shuffle masks
if ((Size == 128 && NumElem <= 4) ||
@@ -6525,21 +6342,26 @@ SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
if (NewOp.getNode())
return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
} else if ((VT == MVT::v4i32 ||
- (VT == MVT::v4f32 && Subtarget->hasXMMInt()))) {
+ (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
// FIXME: Figure out a cleaner way to do this.
// Try to make use of movq to zero out the top part.
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode()) {
- if (isCommutedMOVL(cast<ShuffleVectorSDNode>(NewOp), true, false))
- return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(0),
+ EVT NewVT = NewOp.getValueType();
+ if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
+ NewVT, true, false))
+ return getVZextMovL(VT, NewVT, NewOp.getOperand(0),
DAG, Subtarget, dl);
}
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
- if (NewOp.getNode() && X86::isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)))
- return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1),
- DAG, Subtarget, dl);
+ if (NewOp.getNode()) {
+ EVT NewVT = NewOp.getValueType();
+ if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
+ return getVZextMovL(VT, NewVT, NewOp.getOperand(1),
+ DAG, Subtarget, dl);
+ }
}
}
return SDValue();
@@ -6553,18 +6375,22 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned NumElems = VT.getVectorNumElements();
- bool isMMX = VT.getSizeInBits() == 64;
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
bool V1IsSplat = false;
bool V2IsSplat = false;
- bool HasXMMInt = Subtarget->hasXMMInt();
+ bool HasSSE2 = Subtarget->hasSSE2();
+ bool HasAVX = Subtarget->hasAVX();
+ bool HasAVX2 = Subtarget->hasAVX2();
MachineFunction &MF = DAG.getMachineFunction();
bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
- // Shuffle operations on MMX not supported.
- if (isMMX)
- return Op;
+ assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
+
+ if (V1IsUndef && V2IsUndef)
+ return DAG.getUNDEF(VT);
+
+ assert(!V1IsUndef && "Op 1 of shuffle should not be undef");
// Vector shuffle lowering takes 3 steps:
//
@@ -6576,50 +6402,54 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
// so the shuffle can be broken into other shuffles and the legalizer can
// try the lowering again.
//
- // The general ideia is that no vector_shuffle operation should be left to
+ // The general idea is that no vector_shuffle operation should be left to
// be matched during isel, all of them must be converted to a target specific
// node here.
// Normalize the input vectors. Here splats, zeroed vectors, profitable
// narrowing and commutation of operands should be handled. The actual code
// doesn't include all of those, work in progress...
- SDValue NewOp = NormalizeVectorShuffle(Op, DAG, *this, Subtarget);
+ SDValue NewOp = NormalizeVectorShuffle(Op, DAG);
if (NewOp.getNode())
return NewOp;
+ SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
+
// NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
// unpckh_undef). Only use pshufd if speed is more important than size.
- if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp))
- return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
- if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp))
- return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+ if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
+ if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
- if (X86::isMOVDDUPMask(SVOp) &&
- (Subtarget->hasSSE3() || Subtarget->hasAVX()) &&
+ if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
V2IsUndef && RelaxedMayFoldVectorLoad(V1))
return getMOVDDup(Op, dl, V1, DAG);
- if (X86::isMOVHLPS_v_undef_Mask(SVOp))
+ if (isMOVHLPS_v_undef_Mask(M, VT))
return getMOVHighToLow(Op, dl, DAG);
// Use to match splats
- if (HasXMMInt && X86::isUNPCKHMask(SVOp) && V2IsUndef &&
+ if (HasSSE2 && isUNPCKHMask(M, VT, HasAVX2) && V2IsUndef &&
(VT == MVT::v2f64 || VT == MVT::v2i64))
- return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
- if (X86::isPSHUFDMask(SVOp)) {
+ if (isPSHUFDMask(M, VT)) {
// The actual implementation will match the mask in the if above and then
// during isel it can match several different instructions, not only pshufd
// as its name says, sad but true, emulate the behavior for now...
- if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
- return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
+ if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
+ return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
+
+ unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
- unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp);
+ if (HasAVX && (VT == MVT::v4f32 || VT == MVT::v2f64))
+ return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1, TargetMask, DAG);
- if (HasXMMInt && (VT == MVT::v4f32 || VT == MVT::v4i32))
+ if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
- return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V1,
+ return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
TargetMask, DAG);
}
@@ -6627,8 +6457,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool isLeft = false;
unsigned ShAmt = 0;
SDValue ShVal;
- bool isShift = getSubtarget()->hasXMMInt() &&
- isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
+ bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
if (isShift && ShVal.hasOneUse()) {
// If the shifted value has multiple uses, it may be cheaper to use
// v_set0 + movlhps or movhlps, etc.
@@ -6637,13 +6466,11 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
}
- if (X86::isMOVLMask(SVOp)) {
- if (V1IsUndef)
- return V2;
+ if (isMOVLMask(M, VT)) {
if (ISD::isBuildVectorAllZeros(V1.getNode()))
return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
- if (!X86::isMOVLPMask(SVOp)) {
- if (HasXMMInt && (VT == MVT::v2i64 || VT == MVT::v2f64))
+ if (!isMOVLPMask(M, VT)) {
+ if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
if (VT == MVT::v4i32 || VT == MVT::v4f32)
@@ -6652,27 +6479,27 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
}
// FIXME: fold these into legal mask.
- if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
- return getMOVLowToHigh(Op, dl, DAG, HasXMMInt);
+ if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasAVX2))
+ return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
- if (X86::isMOVHLPSMask(SVOp))
+ if (isMOVHLPSMask(M, VT))
return getMOVHighToLow(Op, dl, DAG);
- if (X86::isMOVSHDUPMask(SVOp, Subtarget))
+ if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
- if (X86::isMOVSLDUPMask(SVOp, Subtarget))
+ if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
- if (X86::isMOVLPMask(SVOp))
- return getMOVLP(Op, dl, DAG, HasXMMInt);
+ if (isMOVLPMask(M, VT))
+ return getMOVLP(Op, dl, DAG, HasSSE2);
- if (ShouldXformToMOVHLPS(SVOp) ||
- ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
+ if (ShouldXformToMOVHLPS(M, VT) ||
+ ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
return CommuteVectorShuffle(SVOp, DAG);
if (isShift) {
- // No better options. Use a vshl / vsrl.
+ // No better options. Use a vshldq / vsrldq.
EVT EltVT = VT.getVectorElementType();
ShAmt *= EltVT.getSizeInBits();
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
@@ -6685,17 +6512,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
V2IsSplat = isSplatVector(V2.getNode());
// Canonicalize the splat or undef, if present, to be on the RHS.
- if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
- Op = CommuteVectorShuffle(SVOp, DAG);
- SVOp = cast<ShuffleVectorSDNode>(Op);
- V1 = SVOp->getOperand(0);
- V2 = SVOp->getOperand(1);
+ if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
+ CommuteVectorShuffleMask(M, NumElems);
+ std::swap(V1, V2);
std::swap(V1IsSplat, V2IsSplat);
- std::swap(V1IsUndef, V2IsUndef);
Commuted = true;
}
- if (isCommutedMOVL(SVOp, V2IsSplat, V2IsUndef)) {
+ if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
// Shuffling low element of v1 into undef, just return v1.
if (V2IsUndef)
return V1;
@@ -6705,81 +6529,77 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
return getMOVL(DAG, dl, VT, V2, V1);
}
- if (X86::isUNPCKLMask(SVOp))
- return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
+ if (isUNPCKLMask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
- if (X86::isUNPCKHMask(SVOp))
- return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
+ if (isUNPCKHMask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
if (V2IsSplat) {
// Normalize mask so all entries that point to V2 points to its first
// element then try to match unpck{h|l} again. If match, return a
- // new vector_shuffle with the corrected mask.
- SDValue NewMask = NormalizeMask(SVOp, DAG);
- ShuffleVectorSDNode *NSVOp = cast<ShuffleVectorSDNode>(NewMask);
- if (NSVOp != SVOp) {
- if (X86::isUNPCKLMask(NSVOp, true)) {
- return NewMask;
- } else if (X86::isUNPCKHMask(NSVOp, true)) {
- return NewMask;
- }
+ // new vector_shuffle with the corrected mask.p
+ SmallVector<int, 8> NewMask(M.begin(), M.end());
+ NormalizeMask(NewMask, NumElems);
+ if (isUNPCKLMask(NewMask, VT, HasAVX2, true)) {
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
+ } else if (isUNPCKHMask(NewMask, VT, HasAVX2, true)) {
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
}
}
if (Commuted) {
// Commute is back and try unpck* again.
// FIXME: this seems wrong.
- SDValue NewOp = CommuteVectorShuffle(SVOp, DAG);
- ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
+ CommuteVectorShuffleMask(M, NumElems);
+ std::swap(V1, V2);
+ std::swap(V1IsSplat, V2IsSplat);
+ Commuted = false;
- if (X86::isUNPCKLMask(NewSVOp))
- return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
+ if (isUNPCKLMask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
- if (X86::isUNPCKHMask(NewSVOp))
- return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
+ if (isUNPCKHMask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
}
// Normalize the node to match x86 shuffle ops if needed
- if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
+ if (!V2IsUndef && (isSHUFPMask(M, VT, HasAVX, /* Commuted */ true)))
return CommuteVectorShuffle(SVOp, DAG);
// The checks below are all present in isShuffleMaskLegal, but they are
// inlined here right now to enable us to directly emit target specific
// nodes, and remove one by one until they don't return Op anymore.
- SmallVector<int, 16> M;
- SVOp->getMask(M);
- if (isPALIGNRMask(M, VT, Subtarget->hasSSSE3() || Subtarget->hasAVX()))
+ if (isPALIGNRMask(M, VT, Subtarget))
return getTargetShuffleNode(X86ISD::PALIGN, dl, VT, V1, V2,
- X86::getShufflePALIGNRImmediate(SVOp),
+ getShufflePALIGNRImmediate(SVOp),
DAG);
if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
SVOp->getSplatIndex() == 0 && V2IsUndef) {
- if (VT == MVT::v2f64)
- return getTargetShuffleNode(X86ISD::UNPCKLPD, dl, VT, V1, V1, DAG);
- if (VT == MVT::v2i64)
- return getTargetShuffleNode(X86ISD::PUNPCKLQDQ, dl, VT, V1, V1, DAG);
+ if (VT == MVT::v2f64 || VT == MVT::v2i64)
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
}
if (isPSHUFHWMask(M, VT))
return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
- X86::getShufflePSHUFHWImmediate(SVOp),
+ getShufflePSHUFHWImmediate(SVOp),
DAG);
if (isPSHUFLWMask(M, VT))
return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
- X86::getShufflePSHUFLWImmediate(SVOp),
+ getShufflePSHUFLWImmediate(SVOp),
DAG);
- if (isSHUFPMask(M, VT))
- return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2,
- X86::getShuffleSHUFImmediate(SVOp), DAG);
+ if (isSHUFPMask(M, VT, HasAVX))
+ return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
+ getShuffleSHUFImmediate(SVOp), DAG);
- if (X86::isUNPCKL_v_undef_Mask(SVOp))
- return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
- if (X86::isUNPCKH_v_undef_Mask(SVOp))
- return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+ if (isUNPCKL_v_undef_Mask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
+ if (isUNPCKH_v_undef_Mask(M, VT, HasAVX2))
+ return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
//===--------------------------------------------------------------------===//
// Generate target specific nodes for 128 or 256-bit shuffles only
@@ -6787,33 +6607,26 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
//
// Handle VMOVDDUPY permutations
- if (isMOVDDUPYMask(SVOp, Subtarget))
+ if (V2IsUndef && isMOVDDUPYMask(M, VT, HasAVX))
return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
- // Handle VPERMILPS* permutations
- if (isVPERMILPSMask(M, VT, Subtarget))
- return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1,
- getShuffleVPERMILPSImmediate(SVOp), DAG);
-
- // Handle VPERMILPD* permutations
- if (isVPERMILPDMask(M, VT, Subtarget))
- return getTargetShuffleNode(getVPERMILOpcode(VT), dl, VT, V1,
- getShuffleVPERMILPDImmediate(SVOp), DAG);
-
- // Handle VPERM2F128 permutations
- if (isVPERM2F128Mask(M, VT, Subtarget))
- return getTargetShuffleNode(X86ISD::VPERM2F128, dl, VT, V1, V2,
- getShuffleVPERM2F128Immediate(SVOp), DAG);
+ // Handle VPERMILPS/D* permutations
+ if (isVPERMILPMask(M, VT, HasAVX)) {
+ if (HasAVX2 && VT == MVT::v8i32)
+ return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
+ getShuffleSHUFImmediate(SVOp), DAG);
+ return getTargetShuffleNode(X86ISD::VPERMILP, dl, VT, V1,
+ getShuffleSHUFImmediate(SVOp), DAG);
+ }
- // Handle VSHUFPSY permutations
- if (isVSHUFPSYMask(M, VT, Subtarget))
- return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2,
- getShuffleVSHUFPSYImmediate(SVOp), DAG);
+ // Handle VPERM2F128/VPERM2I128 permutations
+ if (isVPERM2X128Mask(M, VT, HasAVX))
+ return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
+ V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
- // Handle VSHUFPDY permutations
- if (isVSHUFPDYMask(M, VT, Subtarget))
- return getTargetShuffleNode(getSHUFPOpcode(VT), dl, VT, V1, V2,
- getShuffleVSHUFPDYImmediate(SVOp), DAG);
+ SDValue BlendOp = LowerVECTOR_SHUFFLEtoBlend(Op, Subtarget, DAG);
+ if (BlendOp.getNode())
+ return BlendOp;
//===--------------------------------------------------------------------===//
// Since no target specific shuffle was selected for this generic one,
@@ -6896,8 +6709,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
Op.getOperand(0)),
Op.getOperand(1));
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
- } else if (VT == MVT::i32) {
- // ExtractPS works with constant index.
+ } else if (VT == MVT::i32 || VT == MVT::i64) {
+ // ExtractPS/pextrq works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return Op;
}
@@ -6933,7 +6746,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
assert(Vec.getValueSizeInBits() <= 128 && "Unexpected vector length");
- if (Subtarget->hasSSE41() || Subtarget->hasAVX()) {
+ if (Subtarget->hasSSE41()) {
SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
if (Res.getNode())
return Res;
@@ -7036,7 +6849,8 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
- } else if (EltVT == MVT::i32 && isa<ConstantSDNode>(N2)) {
+ } else if ((EltVT == MVT::i32 || EltVT == MVT::i64) &&
+ isa<ConstantSDNode>(N2)) {
// PINSR* works with constant index.
return Op;
}
@@ -7074,7 +6888,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
return Insert128BitVector(N0, V, Ins128Idx, DAG, dl);
}
- if (Subtarget->hasSSE41() || Subtarget->hasAVX())
+ if (Subtarget->hasSSE41())
return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
if (EltVT == MVT::i8)
@@ -7276,7 +7090,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
// load.
if (isGlobalStubReference(OpFlag))
Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(), false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, false, 0);
return Result;
}
@@ -7344,7 +7158,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
- MachinePointerInfo::getGOT(), false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, false, 0);
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
@@ -7423,7 +7237,8 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getIntPtrConstant(0),
- MachinePointerInfo(Ptr), false, false, 0);
+ MachinePointerInfo(Ptr),
+ false, false, false, 0);
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
@@ -7449,7 +7264,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
if (model == TLSModel::InitialExec)
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
- MachinePointerInfo::getGOT(), false, false, 0);
+ MachinePointerInfo::getGOT(), false, false, false, 0);
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
@@ -7471,8 +7286,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
GV = GA->resolveAliasedGlobal(false);
- TLSModel::Model model
- = getTLSModel(GV, getTargetMachine().getRelocationModel());
+ TLSModel::Model model = getTargetMachine().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
@@ -7529,19 +7343,77 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
Chain.getValue(1));
- }
+ } else if (Subtarget->isTargetWindows()) {
+ // Just use the implicit TLS architecture
+ // Need to generate someting similar to:
+ // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
+ // ; from TEB
+ // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
+ // mov rcx, qword [rdx+rcx*8]
+ // mov eax, .tls$:tlsvar
+ // [rax+rcx] contains the address
+ // Windows 64bit: gs:0x58
+ // Windows 32bit: fs:__tls_array
- assert(false &&
- "TLS not implemented for this target.");
+ // If GV is an alias then use the aliasee for determining
+ // thread-localness.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GV = GA->resolveAliasedGlobal(false);
+ DebugLoc dl = GA->getDebugLoc();
+ SDValue Chain = DAG.getEntryNode();
- llvm_unreachable("Unreachable");
- return SDValue();
+ // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
+ // %gs:0x58 (64-bit).
+ Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
+ ? Type::getInt8PtrTy(*DAG.getContext(),
+ 256)
+ : Type::getInt32PtrTy(*DAG.getContext(),
+ 257));
+
+ SDValue ThreadPointer = DAG.getLoad(getPointerTy(), dl, Chain,
+ Subtarget->is64Bit()
+ ? DAG.getIntPtrConstant(0x58)
+ : DAG.getExternalSymbol("_tls_array",
+ getPointerTy()),
+ MachinePointerInfo(Ptr),
+ false, false, false, 0);
+
+ // Load the _tls_index variable
+ SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
+ if (Subtarget->is64Bit())
+ IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
+ IDX, MachinePointerInfo(), MVT::i32,
+ false, false, 0);
+ else
+ IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
+ false, false, false, 0);
+
+ SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
+ getPointerTy());
+ IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
+
+ SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
+ res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
+ false, false, false, 0);
+
+ // Get the offset of start of .tls section
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ GA->getValueType(0),
+ GA->getOffset(), X86II::MO_SECREL);
+ SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
+
+ // The address of the thread local variable is the add of the thread
+ // pointer with the offset of the variable.
+ return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
+ }
+
+ llvm_unreachable("TLS not implemented for this target.");
}
-/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values and
-/// take a 2 x i32 value to shift plus a shift amount.
-SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const {
+/// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
+/// and take a 2 x i32 value to shift plus a shift amount.
+SDValue X86TargetLowering::LowerShiftParts(SDValue Op, SelectionDAG &DAG) const{
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
@@ -7673,7 +7545,7 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
Op.getValueType(), MMO);
Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
MachinePointerInfo::getFixedStack(SSFI),
- false, false, 0);
+ false, false, false, 0);
}
return Result;
@@ -7682,85 +7554,65 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
SelectionDAG &DAG) const {
- // This algorithm is not obvious. Here it is in C code, more or less:
+ // This algorithm is not obvious. Here it is what we're trying to output:
/*
- double uint64_to_double( uint32_t hi, uint32_t lo ) {
- static const __m128i exp = { 0x4330000045300000ULL, 0 };
- static const __m128d bias = { 0x1.0p84, 0x1.0p52 };
-
- // Copy ints to xmm registers.
- __m128i xh = _mm_cvtsi32_si128( hi );
- __m128i xl = _mm_cvtsi32_si128( lo );
-
- // Combine into low half of a single xmm register.
- __m128i x = _mm_unpacklo_epi32( xh, xl );
- __m128d d;
- double sd;
-
- // Merge in appropriate exponents to give the integer bits the right
- // magnitude.
- x = _mm_unpacklo_epi32( x, exp );
-
- // Subtract away the biases to deal with the IEEE-754 double precision
- // implicit 1.
- d = _mm_sub_pd( (__m128d) x, bias );
-
- // All conversions up to here are exact. The correctly rounded result is
- // calculated using the current rounding mode using the following
- // horizontal add.
- d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) );
- _mm_store_sd( &sd, d ); // Because we are returning doubles in XMM, this
- // store doesn't really need to be here (except
- // maybe to zero the other double)
- return sd;
- }
+ movq %rax, %xmm0
+ punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
+ subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
+ #ifdef __SSE3__
+ haddpd %xmm0, %xmm0
+ #else
+ pshufd $0x4e, %xmm0, %xmm1
+ addpd %xmm1, %xmm0
+ #endif
*/
DebugLoc dl = Op.getDebugLoc();
LLVMContext *Context = DAG.getContext();
// Build some magic constants.
- std::vector<Constant*> CV0;
- CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x45300000)));
- CV0.push_back(ConstantInt::get(*Context, APInt(32, 0x43300000)));
- CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
- CV0.push_back(ConstantInt::get(*Context, APInt(32, 0)));
- Constant *C0 = ConstantVector::get(CV0);
+ const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
+ Constant *C0 = ConstantDataVector::get(*Context, CV0);
SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
- std::vector<Constant*> CV1;
+ SmallVector<Constant*,2> CV1;
CV1.push_back(
- ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL))));
+ ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL))));
CV1.push_back(
- ConstantFP::get(*Context, APFloat(APInt(64, 0x4330000000000000ULL))));
+ ConstantFP::get(*Context, APFloat(APInt(64, 0x4530000000000000ULL))));
Constant *C1 = ConstantVector::get(CV1);
SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
- SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
- DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Op.getOperand(0),
- DAG.getIntPtrConstant(1)));
- SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
- DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
- Op.getOperand(0),
- DAG.getIntPtrConstant(0)));
- SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2);
+ // Load the 64-bit value into an XMM register.
+ SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
+ Op.getOperand(0));
SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
- SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
- SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2);
+ false, false, false, 16);
+ SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
+ CLod0);
+
SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
+ false, false, false, 16);
+ SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
+ SDValue Result;
+
+ if (Subtarget->hasSSE3()) {
+ // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
+ Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
+ } else {
+ SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
+ SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
+ S2F, 0x4E, DAG);
+ Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
+ DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
+ Sub);
+ }
- // Add the halves; easiest way is to swap them into another reg first.
- int ShufMask[2] = { 1, -1 };
- SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub,
- DAG.getUNDEF(MVT::v2f64), ShufMask);
- SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add,
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
DAG.getIntPtrConstant(0));
}
@@ -7777,8 +7629,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
Op.getOperand(0));
// Zero out the upper parts of the register.
- Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget->hasXMMInt(),
- DAG);
+ Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
@@ -7830,6 +7681,9 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
return LowerUINT_TO_FP_i64(Op, DAG);
else if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG);
+ else if (Subtarget->is64Bit() &&
+ SrcVT == MVT::i64 && DstVT == MVT::f32)
+ return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
@@ -7849,7 +7703,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot, MachinePointerInfo(),
+ StackSlot, MachinePointerInfo(),
false, false, 0);
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
@@ -7897,19 +7751,19 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
}
std::pair<SDValue,SDValue> X86TargetLowering::
-FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
+FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) const {
DebugLoc DL = Op.getDebugLoc();
EVT DstTy = Op.getValueType();
- if (!IsSigned) {
+ if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
DstTy = MVT::i64;
}
assert(DstTy.getSimpleVT() <= MVT::i64 &&
DstTy.getSimpleVT() >= MVT::i16 &&
- "Unknown FP_TO_SINT to lower!");
+ "Unknown FP_TO_INT to lower!");
// These are really Legal.
if (DstTy == MVT::i32 &&
@@ -7920,26 +7774,29 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
- // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
- // stack slot.
+ // We lower FP->int64 either into FISTP64 followed by a load from a temporary
+ // stack slot, or into the FTOL runtime function.
MachineFunction &MF = DAG.getMachineFunction();
unsigned MemSize = DstTy.getSizeInBits()/8;
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
-
-
unsigned Opc;
- switch (DstTy.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
- case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
- case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
- case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
- }
+ if (!IsSigned && isIntegerTypeFTOL(DstTy))
+ Opc = X86ISD::WIN_FTOL;
+ else
+ switch (DstTy.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
+ case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
+ case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
+ case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
+ }
SDValue Chain = DAG.getEntryNode();
SDValue Value = Op.getOperand(0);
EVT TheVT = Op.getOperand(0).getValueType();
+ // FIXME This causes a redundant load/store if the SSE-class value is already
+ // in memory, such as if it is on the callstack.
if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, DL, Value, StackSlot,
@@ -7964,12 +7821,26 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
MachineMemOperand::MOStore, MemSize, MemSize);
- // Build the FP_TO_INT*_IN_MEM
- SDValue Ops[] = { Chain, Value, StackSlot };
- SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
- Ops, 3, DstTy, MMO);
-
- return std::make_pair(FIST, StackSlot);
+ if (Opc != X86ISD::WIN_FTOL) {
+ // Build the FP_TO_INT*_IN_MEM
+ SDValue Ops[] = { Chain, Value, StackSlot };
+ SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
+ Ops, 3, DstTy, MMO);
+ return std::make_pair(FIST, StackSlot);
+ } else {
+ SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
+ DAG.getVTList(MVT::Other, MVT::Glue),
+ Chain, Value);
+ SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
+ MVT::i32, ftol.getValue(1));
+ SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
+ MVT::i32, eax.getValue(2));
+ SDValue Ops[] = { eax, edx };
+ SDValue pair = IsReplace
+ ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops, 2)
+ : DAG.getMergeValues(Ops, 2, DL);
+ return std::make_pair(pair, SDValue());
+ }
}
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
@@ -7977,25 +7848,37 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
if (Op.getValueType().isVector())
return SDValue();
- std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, true);
+ std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
+ /*IsSigned=*/ true, /*IsReplace=*/ false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
// If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
if (FIST.getNode() == 0) return Op;
- // Load the result.
- return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
- FIST, StackSlot, MachinePointerInfo(), false, false, 0);
+ if (StackSlot.getNode())
+ // Load the result.
+ return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
+ FIST, StackSlot, MachinePointerInfo(),
+ false, false, false, 0);
+ else
+ // The node is the result.
+ return FIST;
}
SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
SelectionDAG &DAG) const {
- std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false);
+ std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
+ /*IsSigned=*/ false, /*IsReplace=*/ false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
assert(FIST.getNode() && "Unexpected failure");
- // Load the result.
- return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
- FIST, StackSlot, MachinePointerInfo(), false, false, 0);
+ if (StackSlot.getNode())
+ // Load the result.
+ return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
+ FIST, StackSlot, MachinePointerInfo(),
+ false, false, false, 0);
+ else
+ // The node is the result.
+ return FIST;
}
SDValue X86TargetLowering::LowerFABS(SDValue Op,
@@ -8006,23 +7889,18 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op,
EVT EltVT = VT;
if (VT.isVector())
EltVT = VT.getVectorElementType();
- std::vector<Constant*> CV;
+ Constant *C;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))));
- CV.push_back(C);
- CV.push_back(C);
+ C = ConstantVector::getSplat(2,
+ ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
} else {
- Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))));
- CV.push_back(C);
- CV.push_back(C);
- CV.push_back(C);
- CV.push_back(C);
+ C = ConstantVector::getSplat(4,
+ ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))));
}
- Constant *C = ConstantVector::get(CV);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
+ false, false, false, 16);
return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
}
@@ -8031,31 +7909,28 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
EVT EltVT = VT;
- if (VT.isVector())
+ unsigned NumElts = VT == MVT::f64 ? 2 : 4;
+ if (VT.isVector()) {
EltVT = VT.getVectorElementType();
- std::vector<Constant*> CV;
- if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)));
- CV.push_back(C);
- CV.push_back(C);
- } else {
- Constant *C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)));
- CV.push_back(C);
- CV.push_back(C);
- CV.push_back(C);
- CV.push_back(C);
+ NumElts = VT.getVectorNumElements();
}
- Constant *C = ConstantVector::get(CV);
+ Constant *C;
+ if (EltVT == MVT::f64)
+ C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)));
+ else
+ C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31)));
+ C = ConstantVector::getSplat(NumElts, C);
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
+ false, false, false, 16);
if (VT.isVector()) {
+ MVT XORVT = VT.getSizeInBits() == 128 ? MVT::v2i64 : MVT::v4i64;
return DAG.getNode(ISD::BITCAST, dl, VT,
- DAG.getNode(ISD::XOR, dl, MVT::v2i64,
- DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
+ DAG.getNode(ISD::XOR, dl, XORVT,
+ DAG.getNode(ISD::BITCAST, dl, XORVT,
Op.getOperand(0)),
- DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask)));
+ DAG.getNode(ISD::BITCAST, dl, XORVT, Mask)));
} else {
return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
}
@@ -8084,7 +7959,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// type, and that won't be f80 since that is not custom lowered.
// First get the sign bit of second operand.
- std::vector<Constant*> CV;
+ SmallVector<Constant*,4> CV;
if (SrcVT == MVT::f64) {
CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))));
CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
@@ -8098,7 +7973,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
+ false, false, false, 16);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
// Shift sign bit right or left if the two operands have different types.
@@ -8127,7 +8002,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
MachinePointerInfo::getConstantPool(),
- false, false, 16);
+ false, false, false, 16);
SDValue Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Mask2);
// Or the value with the sign bit.
@@ -8191,8 +8066,10 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
// climbing the DAG back to the root, and it doesn't seem to be worth the
// effort.
for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI)
- if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC)
+ UE = Op.getNode()->use_end(); UI != UE; ++UI)
+ if (UI->getOpcode() != ISD::CopyToReg &&
+ UI->getOpcode() != ISD::SETCC &&
+ UI->getOpcode() != ISD::STORE)
goto default_case;
if (ConstantSDNode *C =
@@ -8325,8 +8202,8 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
unsigned BitWidth = Op0.getValueSizeInBits();
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
- APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones;
- DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones);
+ APInt Zeros, Ones;
+ DAG.ComputeMaskedBits(Op0, Zeros, Ones);
if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
return SDValue();
}
@@ -8335,11 +8212,19 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
}
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
+ uint64_t AndRHSVal = AndRHS->getZExtValue();
SDValue AndLHS = Op0;
- if (AndRHS->getZExtValue() == 1 && AndLHS.getOpcode() == ISD::SRL) {
+
+ if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
LHS = AndLHS.getOperand(0);
RHS = AndLHS.getOperand(1);
}
+
+ // Use BT if the immediate can't be encoded in a TEST instruction.
+ if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
+ LHS = AndLHS;
+ RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
+ }
}
if (LHS.getNode()) {
@@ -8466,9 +8351,8 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
if (isFP) {
unsigned SSECC = 8;
EVT EltVT = Op0.getValueType().getVectorElementType();
- assert(EltVT == MVT::f32 || EltVT == MVT::f64);
+ assert(EltVT == MVT::f32 || EltVT == MVT::f64); (void)EltVT;
- unsigned Opc = EltVT == MVT::f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
bool Swap = false;
// SSE Condition code mapping:
@@ -8508,61 +8392,57 @@ SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
if (SSECC == 8) {
if (SetCCOpcode == ISD::SETUEQ) {
SDValue UNORD, EQ;
- UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8));
- EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8));
+ UNORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(3, MVT::i8));
+ EQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(0, MVT::i8));
return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ);
- }
- else if (SetCCOpcode == ISD::SETONE) {
+ } else if (SetCCOpcode == ISD::SETONE) {
SDValue ORD, NEQ;
- ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8));
- NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
+ ORD = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(7, MVT::i8));
+ NEQ = DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(4, MVT::i8));
return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
}
llvm_unreachable("Illegal FP comparison");
}
// Handle all other FP comparisons here.
- return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
+ return DAG.getNode(X86ISD::CMPP, dl, VT, Op0, Op1,
+ DAG.getConstant(SSECC, MVT::i8));
}
// Break 256-bit integer vector compare into smaller ones.
- if (!isFP && VT.getSizeInBits() == 256)
+ if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())
return Lower256IntVSETCC(Op, DAG);
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
- unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
+ unsigned Opc = 0;
bool Swap = false, Invert = false, FlipSigns = false;
- switch (VT.getSimpleVT().SimpleTy) {
- default: break;
- case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
- case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
- case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
- case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
- }
-
switch (SetCCOpcode) {
default: break;
case ISD::SETNE: Invert = true;
- case ISD::SETEQ: Opc = EQOpc; break;
+ case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
case ISD::SETLT: Swap = true;
- case ISD::SETGT: Opc = GTOpc; break;
+ case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
case ISD::SETGE: Swap = true;
- case ISD::SETLE: Opc = GTOpc; Invert = true; break;
+ case ISD::SETLE: Opc = X86ISD::PCMPGT; Invert = true; break;
case ISD::SETULT: Swap = true;
- case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break;
+ case ISD::SETUGT: Opc = X86ISD::PCMPGT; FlipSigns = true; break;
case ISD::SETUGE: Swap = true;
- case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break;
+ case ISD::SETULE: Opc = X86ISD::PCMPGT; FlipSigns = true; Invert = true; break;
}
if (Swap)
std::swap(Op0, Op1);
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
- if (Opc == X86ISD::PCMPGTQ && !Subtarget->hasSSE42() && !Subtarget->hasAVX())
+ if (Opc == X86ISD::PCMPGT && VT == MVT::v2i64 && !Subtarget->hasSSE42())
return SDValue();
- if (Opc == X86ISD::PCMPEQQ && !Subtarget->hasSSE41() && !Subtarget->hasAVX())
+ if (Opc == X86ISD::PCMPEQ && VT == MVT::v2i64 && !Subtarget->hasSSE41())
return SDValue();
// Since SSE has no unsigned integer comparisons, we need to flip the sign
@@ -8679,8 +8559,9 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
- if (Cond.getOpcode() == X86ISD::SETCC ||
- Cond.getOpcode() == X86ISD::SETCC_CARRY) {
+ unsigned CondOpcode = Cond.getOpcode();
+ if (CondOpcode == X86ISD::SETCC ||
+ CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
@@ -8697,6 +8578,39 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
Cond = Cmp;
addTest = false;
}
+ } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
+ CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
+ ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
+ Cond.getOperand(0).getValueType() != MVT::i8)) {
+ SDValue LHS = Cond.getOperand(0);
+ SDValue RHS = Cond.getOperand(1);
+ unsigned X86Opcode;
+ unsigned X86Cond;
+ SDVTList VTs;
+ switch (CondOpcode) {
+ case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
+ case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
+ case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
+ case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
+ case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
+ case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
+ default: llvm_unreachable("unexpected overflowing operator");
+ }
+ if (CondOpcode == ISD::UMULO)
+ VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
+ MVT::i32);
+ else
+ VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
+
+ SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
+
+ if (CondOpcode == ISD::UMULO)
+ Cond = X86Op.getValue(2);
+ else
+ Cond = X86Op.getValue(1);
+
+ CC = DAG.getConstant(X86Cond, MVT::i8);
+ addTest = false;
}
if (addTest) {
@@ -8778,11 +8692,27 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(2);
DebugLoc dl = Op.getDebugLoc();
SDValue CC;
+ bool Inverted = false;
if (Cond.getOpcode() == ISD::SETCC) {
- SDValue NewCond = LowerSETCC(Cond, DAG);
- if (NewCond.getNode())
- Cond = NewCond;
+ // Check for setcc([su]{add,sub,mul}o == 0).
+ if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
+ isa<ConstantSDNode>(Cond.getOperand(1)) &&
+ cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
+ Cond.getOperand(0).getResNo() == 1 &&
+ (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
+ Cond.getOperand(0).getOpcode() == ISD::UADDO ||
+ Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
+ Cond.getOperand(0).getOpcode() == ISD::USUBO ||
+ Cond.getOperand(0).getOpcode() == ISD::SMULO ||
+ Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
+ Inverted = true;
+ Cond = Cond.getOperand(0);
+ } else {
+ SDValue NewCond = LowerSETCC(Cond, DAG);
+ if (NewCond.getNode())
+ Cond = NewCond;
+ }
}
#if 0
// FIXME: LowerXALUO doesn't handle these!!
@@ -8803,8 +8733,9 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
- if (Cond.getOpcode() == X86ISD::SETCC ||
- Cond.getOpcode() == X86ISD::SETCC_CARRY) {
+ unsigned CondOpcode = Cond.getOpcode();
+ if (CondOpcode == X86ISD::SETCC ||
+ CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
@@ -8825,6 +8756,43 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
break;
}
}
+ }
+ CondOpcode = Cond.getOpcode();
+ if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
+ CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
+ ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
+ Cond.getOperand(0).getValueType() != MVT::i8)) {
+ SDValue LHS = Cond.getOperand(0);
+ SDValue RHS = Cond.getOperand(1);
+ unsigned X86Opcode;
+ unsigned X86Cond;
+ SDVTList VTs;
+ switch (CondOpcode) {
+ case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
+ case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
+ case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
+ case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
+ case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
+ case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
+ default: llvm_unreachable("unexpected overflowing operator");
+ }
+ if (Inverted)
+ X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
+ if (CondOpcode == ISD::UMULO)
+ VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
+ MVT::i32);
+ else
+ VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
+
+ SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
+
+ if (CondOpcode == ISD::UMULO)
+ Cond = X86Op.getValue(2);
+ else
+ Cond = X86Op.getValue(1);
+
+ CC = DAG.getConstant(X86Cond, MVT::i8);
+ addTest = false;
} else {
unsigned CondOpc;
if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
@@ -8888,6 +8856,66 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
CC = DAG.getConstant(CCode, MVT::i8);
Cond = Cond.getOperand(0).getOperand(1);
addTest = false;
+ } else if (Cond.getOpcode() == ISD::SETCC &&
+ cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
+ // For FCMP_OEQ, we can emit
+ // two branches instead of an explicit AND instruction with a
+ // separate test. However, we only do this if this block doesn't
+ // have a fall-through edge, because this requires an explicit
+ // jmp when the condition is false.
+ if (Op.getNode()->hasOneUse()) {
+ SDNode *User = *Op.getNode()->use_begin();
+ // Look for an unconditional branch following this conditional branch.
+ // We need this because we need to reverse the successors in order
+ // to implement FCMP_OEQ.
+ if (User->getOpcode() == ISD::BR) {
+ SDValue FalseBB = User->getOperand(1);
+ SDNode *NewBR =
+ DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
+ assert(NewBR == User);
+ (void)NewBR;
+ Dest = FalseBB;
+
+ SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
+ Cond.getOperand(0), Cond.getOperand(1));
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
+ Chain, Dest, CC, Cmp);
+ CC = DAG.getConstant(X86::COND_P, MVT::i8);
+ Cond = Cmp;
+ addTest = false;
+ }
+ }
+ } else if (Cond.getOpcode() == ISD::SETCC &&
+ cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
+ // For FCMP_UNE, we can emit
+ // two branches instead of an explicit AND instruction with a
+ // separate test. However, we only do this if this block doesn't
+ // have a fall-through edge, because this requires an explicit
+ // jmp when the condition is false.
+ if (Op.getNode()->hasOneUse()) {
+ SDNode *User = *Op.getNode()->use_begin();
+ // Look for an unconditional branch following this conditional branch.
+ // We need this because we need to reverse the successors in order
+ // to implement FCMP_UNE.
+ if (User->getOpcode() == ISD::BR) {
+ SDValue FalseBB = User->getOperand(1);
+ SDNode *NewBR =
+ DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
+ assert(NewBR == User);
+ (void)NewBR;
+
+ SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
+ Cond.getOperand(0), Cond.getOperand(1));
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
+ Chain, Dest, CC, Cmp);
+ CC = DAG.getConstant(X86::COND_NP, MVT::i8);
+ Cond = Cmp;
+ addTest = false;
+ Dest = FalseBB;
+ }
+ }
}
}
@@ -8926,7 +8954,7 @@ SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
assert((Subtarget->isTargetCygMing() || Subtarget->isTargetWindows() ||
- EnableSegmentedStacks) &&
+ getTargetMachine().Options.EnableSegmentedStacks) &&
"This should be used only on Windows targets or when segmented stacks "
"are being used");
assert(!Subtarget->isTargetEnvMacho() && "Not implemented");
@@ -8940,7 +8968,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
bool Is64Bit = Subtarget->is64Bit();
EVT SPTy = Is64Bit ? MVT::i64 : MVT::i32;
- if (EnableSegmentedStacks) {
+ if (getTargetMachine().Options.EnableSegmentedStacks) {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -9076,10 +9104,10 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
- assert(!UseSoftFloat &&
+ assert(!getTargetMachine().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
.getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) &&
- Subtarget->hasXMM());
+ Subtarget->hasSSE1());
}
// Insert VAARG_64 node into the DAG
@@ -9106,7 +9134,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
Chain,
VAARG,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
@@ -9125,6 +9153,43 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
+// getTargetVShiftNOde - Handle vector element shifts where the shift amount
+// may or may not be a constant. Takes immediate version of shift as input.
+static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT,
+ SDValue SrcOp, SDValue ShAmt,
+ SelectionDAG &DAG) {
+ assert(ShAmt.getValueType() == MVT::i32 && "ShAmt is not i32");
+
+ if (isa<ConstantSDNode>(ShAmt)) {
+ switch (Opc) {
+ default: llvm_unreachable("Unknown target vector shift node");
+ case X86ISD::VSHLI:
+ case X86ISD::VSRLI:
+ case X86ISD::VSRAI:
+ return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
+ }
+ }
+
+ // Change opcode to non-immediate version
+ switch (Opc) {
+ default: llvm_unreachable("Unknown target vector shift node");
+ case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
+ case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
+ case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
+ }
+
+ // Need to build a vector containing shift amount
+ // Shift amount is 32-bits, but SSE instructions read 64-bit, so fill with 0
+ SDValue ShOps[4];
+ ShOps[0] = ShAmt;
+ ShOps[1] = DAG.getConstant(0, MVT::i32);
+ ShOps[2] = DAG.getUNDEF(MVT::i32);
+ ShOps[3] = DAG.getUNDEF(MVT::i32);
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, &ShOps[0], 4);
+ ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
+ return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
+}
+
SDValue
X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
@@ -9159,7 +9224,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
unsigned Opc = 0;
ISD::CondCode CC = ISD::SETCC_INVALID;
switch (IntNo) {
- default: break;
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_sse_comieq_ss:
case Intrinsic::x86_sse2_comieq_sd:
Opc = X86ISD::COMI;
@@ -9231,7 +9296,201 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
DAG.getConstant(X86CC, MVT::i8), Cond);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
+ // XOP comparison intrinsics
+ case Intrinsic::x86_xop_vpcomltb:
+ case Intrinsic::x86_xop_vpcomltw:
+ case Intrinsic::x86_xop_vpcomltd:
+ case Intrinsic::x86_xop_vpcomltq:
+ case Intrinsic::x86_xop_vpcomltub:
+ case Intrinsic::x86_xop_vpcomltuw:
+ case Intrinsic::x86_xop_vpcomltud:
+ case Intrinsic::x86_xop_vpcomltuq:
+ case Intrinsic::x86_xop_vpcomleb:
+ case Intrinsic::x86_xop_vpcomlew:
+ case Intrinsic::x86_xop_vpcomled:
+ case Intrinsic::x86_xop_vpcomleq:
+ case Intrinsic::x86_xop_vpcomleub:
+ case Intrinsic::x86_xop_vpcomleuw:
+ case Intrinsic::x86_xop_vpcomleud:
+ case Intrinsic::x86_xop_vpcomleuq:
+ case Intrinsic::x86_xop_vpcomgtb:
+ case Intrinsic::x86_xop_vpcomgtw:
+ case Intrinsic::x86_xop_vpcomgtd:
+ case Intrinsic::x86_xop_vpcomgtq:
+ case Intrinsic::x86_xop_vpcomgtub:
+ case Intrinsic::x86_xop_vpcomgtuw:
+ case Intrinsic::x86_xop_vpcomgtud:
+ case Intrinsic::x86_xop_vpcomgtuq:
+ case Intrinsic::x86_xop_vpcomgeb:
+ case Intrinsic::x86_xop_vpcomgew:
+ case Intrinsic::x86_xop_vpcomged:
+ case Intrinsic::x86_xop_vpcomgeq:
+ case Intrinsic::x86_xop_vpcomgeub:
+ case Intrinsic::x86_xop_vpcomgeuw:
+ case Intrinsic::x86_xop_vpcomgeud:
+ case Intrinsic::x86_xop_vpcomgeuq:
+ case Intrinsic::x86_xop_vpcomeqb:
+ case Intrinsic::x86_xop_vpcomeqw:
+ case Intrinsic::x86_xop_vpcomeqd:
+ case Intrinsic::x86_xop_vpcomeqq:
+ case Intrinsic::x86_xop_vpcomequb:
+ case Intrinsic::x86_xop_vpcomequw:
+ case Intrinsic::x86_xop_vpcomequd:
+ case Intrinsic::x86_xop_vpcomequq:
+ case Intrinsic::x86_xop_vpcomneb:
+ case Intrinsic::x86_xop_vpcomnew:
+ case Intrinsic::x86_xop_vpcomned:
+ case Intrinsic::x86_xop_vpcomneq:
+ case Intrinsic::x86_xop_vpcomneub:
+ case Intrinsic::x86_xop_vpcomneuw:
+ case Intrinsic::x86_xop_vpcomneud:
+ case Intrinsic::x86_xop_vpcomneuq:
+ case Intrinsic::x86_xop_vpcomfalseb:
+ case Intrinsic::x86_xop_vpcomfalsew:
+ case Intrinsic::x86_xop_vpcomfalsed:
+ case Intrinsic::x86_xop_vpcomfalseq:
+ case Intrinsic::x86_xop_vpcomfalseub:
+ case Intrinsic::x86_xop_vpcomfalseuw:
+ case Intrinsic::x86_xop_vpcomfalseud:
+ case Intrinsic::x86_xop_vpcomfalseuq:
+ case Intrinsic::x86_xop_vpcomtrueb:
+ case Intrinsic::x86_xop_vpcomtruew:
+ case Intrinsic::x86_xop_vpcomtrued:
+ case Intrinsic::x86_xop_vpcomtrueq:
+ case Intrinsic::x86_xop_vpcomtrueub:
+ case Intrinsic::x86_xop_vpcomtrueuw:
+ case Intrinsic::x86_xop_vpcomtrueud:
+ case Intrinsic::x86_xop_vpcomtrueuq: {
+ unsigned CC = 0;
+ unsigned Opc = 0;
+
+ switch (IntNo) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
+ case Intrinsic::x86_xop_vpcomltb:
+ case Intrinsic::x86_xop_vpcomltw:
+ case Intrinsic::x86_xop_vpcomltd:
+ case Intrinsic::x86_xop_vpcomltq:
+ CC = 0;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomltub:
+ case Intrinsic::x86_xop_vpcomltuw:
+ case Intrinsic::x86_xop_vpcomltud:
+ case Intrinsic::x86_xop_vpcomltuq:
+ CC = 0;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomleb:
+ case Intrinsic::x86_xop_vpcomlew:
+ case Intrinsic::x86_xop_vpcomled:
+ case Intrinsic::x86_xop_vpcomleq:
+ CC = 1;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomleub:
+ case Intrinsic::x86_xop_vpcomleuw:
+ case Intrinsic::x86_xop_vpcomleud:
+ case Intrinsic::x86_xop_vpcomleuq:
+ CC = 1;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomgtb:
+ case Intrinsic::x86_xop_vpcomgtw:
+ case Intrinsic::x86_xop_vpcomgtd:
+ case Intrinsic::x86_xop_vpcomgtq:
+ CC = 2;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomgtub:
+ case Intrinsic::x86_xop_vpcomgtuw:
+ case Intrinsic::x86_xop_vpcomgtud:
+ case Intrinsic::x86_xop_vpcomgtuq:
+ CC = 2;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomgeb:
+ case Intrinsic::x86_xop_vpcomgew:
+ case Intrinsic::x86_xop_vpcomged:
+ case Intrinsic::x86_xop_vpcomgeq:
+ CC = 3;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomgeub:
+ case Intrinsic::x86_xop_vpcomgeuw:
+ case Intrinsic::x86_xop_vpcomgeud:
+ case Intrinsic::x86_xop_vpcomgeuq:
+ CC = 3;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomeqb:
+ case Intrinsic::x86_xop_vpcomeqw:
+ case Intrinsic::x86_xop_vpcomeqd:
+ case Intrinsic::x86_xop_vpcomeqq:
+ CC = 4;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomequb:
+ case Intrinsic::x86_xop_vpcomequw:
+ case Intrinsic::x86_xop_vpcomequd:
+ case Intrinsic::x86_xop_vpcomequq:
+ CC = 4;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomneb:
+ case Intrinsic::x86_xop_vpcomnew:
+ case Intrinsic::x86_xop_vpcomned:
+ case Intrinsic::x86_xop_vpcomneq:
+ CC = 5;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomneub:
+ case Intrinsic::x86_xop_vpcomneuw:
+ case Intrinsic::x86_xop_vpcomneud:
+ case Intrinsic::x86_xop_vpcomneuq:
+ CC = 5;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomfalseb:
+ case Intrinsic::x86_xop_vpcomfalsew:
+ case Intrinsic::x86_xop_vpcomfalsed:
+ case Intrinsic::x86_xop_vpcomfalseq:
+ CC = 6;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomfalseub:
+ case Intrinsic::x86_xop_vpcomfalseuw:
+ case Intrinsic::x86_xop_vpcomfalseud:
+ case Intrinsic::x86_xop_vpcomfalseuq:
+ CC = 6;
+ Opc = X86ISD::VPCOMU;
+ break;
+ case Intrinsic::x86_xop_vpcomtrueb:
+ case Intrinsic::x86_xop_vpcomtruew:
+ case Intrinsic::x86_xop_vpcomtrued:
+ case Intrinsic::x86_xop_vpcomtrueq:
+ CC = 7;
+ Opc = X86ISD::VPCOM;
+ break;
+ case Intrinsic::x86_xop_vpcomtrueub:
+ case Intrinsic::x86_xop_vpcomtrueuw:
+ case Intrinsic::x86_xop_vpcomtrueud:
+ case Intrinsic::x86_xop_vpcomtrueuq:
+ CC = 7;
+ Opc = X86ISD::VPCOMU;
+ break;
+ }
+
+ SDValue LHS = Op.getOperand(1);
+ SDValue RHS = Op.getOperand(2);
+ return DAG.getNode(Opc, dl, Op.getValueType(), LHS, RHS,
+ DAG.getConstant(CC, MVT::i8));
+ }
+
// Arithmetic intrinsics.
+ case Intrinsic::x86_sse2_pmulu_dq:
+ case Intrinsic::x86_avx2_pmulu_dq:
+ return DAG.getNode(X86ISD::PMULUDQ, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_sse3_hadd_ps:
case Intrinsic::x86_sse3_hadd_pd:
case Intrinsic::x86_avx_hadd_ps_256:
@@ -9244,6 +9503,62 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
case Intrinsic::x86_avx_hsub_pd_256:
return DAG.getNode(X86ISD::FHSUB, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_ssse3_phadd_w_128:
+ case Intrinsic::x86_ssse3_phadd_d_128:
+ case Intrinsic::x86_avx2_phadd_w:
+ case Intrinsic::x86_avx2_phadd_d:
+ return DAG.getNode(X86ISD::HADD, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_ssse3_phsub_w_128:
+ case Intrinsic::x86_ssse3_phsub_d_128:
+ case Intrinsic::x86_avx2_phsub_w:
+ case Intrinsic::x86_avx2_phsub_d:
+ return DAG.getNode(X86ISD::HSUB, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_avx2_psllv_d:
+ case Intrinsic::x86_avx2_psllv_q:
+ case Intrinsic::x86_avx2_psllv_d_256:
+ case Intrinsic::x86_avx2_psllv_q_256:
+ return DAG.getNode(ISD::SHL, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_avx2_psrlv_d:
+ case Intrinsic::x86_avx2_psrlv_q:
+ case Intrinsic::x86_avx2_psrlv_d_256:
+ case Intrinsic::x86_avx2_psrlv_q_256:
+ return DAG.getNode(ISD::SRL, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_avx2_psrav_d:
+ case Intrinsic::x86_avx2_psrav_d_256:
+ return DAG.getNode(ISD::SRA, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_ssse3_pshuf_b_128:
+ case Intrinsic::x86_avx2_pshuf_b:
+ return DAG.getNode(X86ISD::PSHUFB, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_ssse3_psign_b_128:
+ case Intrinsic::x86_ssse3_psign_w_128:
+ case Intrinsic::x86_ssse3_psign_d_128:
+ case Intrinsic::x86_avx2_psign_b:
+ case Intrinsic::x86_avx2_psign_w:
+ case Intrinsic::x86_avx2_psign_d:
+ return DAG.getNode(X86ISD::PSIGN, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_sse41_insertps:
+ return DAG.getNode(X86ISD::INSERTPS, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::x86_avx_vperm2f128_ps_256:
+ case Intrinsic::x86_avx_vperm2f128_pd_256:
+ case Intrinsic::x86_avx_vperm2f128_si_256:
+ case Intrinsic::x86_avx2_vperm2i128:
+ return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
+ case Intrinsic::x86_avx_vpermil_ps:
+ case Intrinsic::x86_avx_vpermil_pd:
+ case Intrinsic::x86_avx_vpermil_ps_256:
+ case Intrinsic::x86_avx_vpermil_pd_256:
+ return DAG.getNode(X86ISD::VPERMILP, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+
// ptest and testp intrinsics. The intrinsic these come from are designed to
// return an integer value, not just an instruction so lower it to the ptest
// or testp pattern and a setcc for the result.
@@ -9310,16 +9625,53 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
- // Fix vector shift instructions where the last operand is a non-immediate
- // i32 value.
+ // SSE/AVX shift intrinsics
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ return DAG.getNode(X86ISD::VSHL, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ return DAG.getNode(X86ISD::VSRL, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
+ case Intrinsic::x86_sse2_psra_w:
+ case Intrinsic::x86_sse2_psra_d:
+ case Intrinsic::x86_avx2_psra_w:
+ case Intrinsic::x86_avx2_psra_d:
+ return DAG.getNode(X86ISD::VSRA, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2));
case Intrinsic::x86_sse2_pslli_w:
case Intrinsic::x86_sse2_pslli_d:
case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ return getTargetVShiftNode(X86ISD::VSHLI, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), DAG);
case Intrinsic::x86_sse2_psrli_w:
case Intrinsic::x86_sse2_psrli_d:
case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ return getTargetVShiftNode(X86ISD::VSRLI, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), DAG);
case Intrinsic::x86_sse2_psrai_w:
case Intrinsic::x86_sse2_psrai_d:
+ case Intrinsic::x86_avx2_psrai_w:
+ case Intrinsic::x86_avx2_psrai_d:
+ return getTargetVShiftNode(X86ISD::VSRAI, dl, Op.getValueType(),
+ Op.getOperand(1), Op.getOperand(2), DAG);
+ // Fix vector shift instructions where the last operand is a non-immediate
+ // i32 value.
case Intrinsic::x86_mmx_pslli_w:
case Intrinsic::x86_mmx_pslli_d:
case Intrinsic::x86_mmx_pslli_q:
@@ -9333,79 +9685,40 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
return SDValue();
unsigned NewIntNo = 0;
- EVT ShAmtVT = MVT::v4i32;
switch (IntNo) {
- case Intrinsic::x86_sse2_pslli_w:
- NewIntNo = Intrinsic::x86_sse2_psll_w;
- break;
- case Intrinsic::x86_sse2_pslli_d:
- NewIntNo = Intrinsic::x86_sse2_psll_d;
+ case Intrinsic::x86_mmx_pslli_w:
+ NewIntNo = Intrinsic::x86_mmx_psll_w;
break;
- case Intrinsic::x86_sse2_pslli_q:
- NewIntNo = Intrinsic::x86_sse2_psll_q;
+ case Intrinsic::x86_mmx_pslli_d:
+ NewIntNo = Intrinsic::x86_mmx_psll_d;
break;
- case Intrinsic::x86_sse2_psrli_w:
- NewIntNo = Intrinsic::x86_sse2_psrl_w;
+ case Intrinsic::x86_mmx_pslli_q:
+ NewIntNo = Intrinsic::x86_mmx_psll_q;
break;
- case Intrinsic::x86_sse2_psrli_d:
- NewIntNo = Intrinsic::x86_sse2_psrl_d;
+ case Intrinsic::x86_mmx_psrli_w:
+ NewIntNo = Intrinsic::x86_mmx_psrl_w;
break;
- case Intrinsic::x86_sse2_psrli_q:
- NewIntNo = Intrinsic::x86_sse2_psrl_q;
+ case Intrinsic::x86_mmx_psrli_d:
+ NewIntNo = Intrinsic::x86_mmx_psrl_d;
break;
- case Intrinsic::x86_sse2_psrai_w:
- NewIntNo = Intrinsic::x86_sse2_psra_w;
+ case Intrinsic::x86_mmx_psrli_q:
+ NewIntNo = Intrinsic::x86_mmx_psrl_q;
break;
- case Intrinsic::x86_sse2_psrai_d:
- NewIntNo = Intrinsic::x86_sse2_psra_d;
+ case Intrinsic::x86_mmx_psrai_w:
+ NewIntNo = Intrinsic::x86_mmx_psra_w;
break;
- default: {
- ShAmtVT = MVT::v2i32;
- switch (IntNo) {
- case Intrinsic::x86_mmx_pslli_w:
- NewIntNo = Intrinsic::x86_mmx_psll_w;
- break;
- case Intrinsic::x86_mmx_pslli_d:
- NewIntNo = Intrinsic::x86_mmx_psll_d;
- break;
- case Intrinsic::x86_mmx_pslli_q:
- NewIntNo = Intrinsic::x86_mmx_psll_q;
- break;
- case Intrinsic::x86_mmx_psrli_w:
- NewIntNo = Intrinsic::x86_mmx_psrl_w;
- break;
- case Intrinsic::x86_mmx_psrli_d:
- NewIntNo = Intrinsic::x86_mmx_psrl_d;
- break;
- case Intrinsic::x86_mmx_psrli_q:
- NewIntNo = Intrinsic::x86_mmx_psrl_q;
- break;
- case Intrinsic::x86_mmx_psrai_w:
- NewIntNo = Intrinsic::x86_mmx_psra_w;
- break;
- case Intrinsic::x86_mmx_psrai_d:
- NewIntNo = Intrinsic::x86_mmx_psra_d;
- break;
- default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
- }
+ case Intrinsic::x86_mmx_psrai_d:
+ NewIntNo = Intrinsic::x86_mmx_psra_d;
break;
- }
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
}
// The vector shift intrinsics with scalars uses 32b shift amounts but
// the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
// to be zero.
- SDValue ShOps[4];
- ShOps[0] = ShAmt;
- ShOps[1] = DAG.getConstant(0, MVT::i32);
- if (ShAmtVT == MVT::v4i32) {
- ShOps[2] = DAG.getUNDEF(MVT::i32);
- ShOps[3] = DAG.getUNDEF(MVT::i32);
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 4);
- } else {
- ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
+ ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, ShAmt,
+ DAG.getConstant(0, MVT::i32));
// FIXME this must be lowered to get rid of the invalid type.
- }
EVT VT = Op.getValueType();
ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
@@ -9432,13 +9745,13 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
- RetAddrFI, MachinePointerInfo(), false, false, 0);
+ RetAddrFI, MachinePointerInfo(), false, false, false, 0);
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
@@ -9453,7 +9766,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
return FrameAddr;
}
@@ -9685,7 +9998,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
// Load FP Control Word from stack slot
SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
- MachinePointerInfo(), false, false, 0);
+ MachinePointerInfo(), false, false, false, 0);
// Transform as necessary
SDValue CWD1 =
@@ -9745,7 +10058,8 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
return Op;
}
-SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
+SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op,
+ SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
@@ -9753,26 +10067,41 @@ SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
Op = Op.getOperand(0);
if (VT == MVT::i8) {
+ // Zero extend to i32 since there is not an i8 bsr.
OpVT = MVT::i32;
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
}
- // Issue a bsf (scan bits forward) which also sets EFLAGS.
+ // Issue a bsr (scan bits in reverse).
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
+ Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
+
+ // And xor with NumBits-1.
+ Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
+
+ if (VT == MVT::i8)
+ Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
+ return Op;
+}
+
+SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ unsigned NumBits = VT.getSizeInBits();
+ DebugLoc dl = Op.getDebugLoc();
+ Op = Op.getOperand(0);
+
+ // Issue a bsf (scan bits forward) which also sets EFLAGS.
+ SDVTList VTs = DAG.getVTList(VT, MVT::i32);
Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
// If src is zero (i.e. bsf sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
- DAG.getConstant(NumBits, OpVT),
+ DAG.getConstant(NumBits, VT),
DAG.getConstant(X86::COND_E, MVT::i8),
Op.getValue(1)
};
- Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops));
-
- if (VT == MVT::i8)
- Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
- return Op;
+ return DAG.getNode(X86ISD::CMOV, dl, VT, Ops, array_lengthof(Ops));
}
// Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
@@ -9824,49 +10153,49 @@ SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.getSizeInBits() == 256)
+ if (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())
return Lower256IntArith(Op, DAG);
- assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
+ assert((VT == MVT::v2i64 || VT == MVT::v4i64) &&
+ "Only know how to lower V2I64/V4I64 multiply");
+
DebugLoc dl = Op.getDebugLoc();
- // ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32);
- // ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32);
- // ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b );
- // ulong2 AloBhi = __builtin_ia32_pmuludq128( a, Bhi );
- // ulong2 AhiBlo = __builtin_ia32_pmuludq128( Ahi, b );
+ // Ahi = psrlqi(a, 32);
+ // Bhi = psrlqi(b, 32);
//
- // AloBhi = __builtin_ia32_psllqi128( AloBhi, 32 );
- // AhiBlo = __builtin_ia32_psllqi128( AhiBlo, 32 );
+ // AloBlo = pmuludq(a, b);
+ // AloBhi = pmuludq(a, Bhi);
+ // AhiBlo = pmuludq(Ahi, b);
+
+ // AloBhi = psllqi(AloBhi, 32);
+ // AhiBlo = psllqi(AhiBlo, 32);
// return AloBlo + AloBhi + AhiBlo;
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
- SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
- A, DAG.getConstant(32, MVT::i32));
- SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
- B, DAG.getConstant(32, MVT::i32));
- SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
- A, B);
- SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
- A, Bhi);
- SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
- Ahi, B);
- AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
- AloBhi, DAG.getConstant(32, MVT::i32));
- AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
- AhiBlo, DAG.getConstant(32, MVT::i32));
+ SDValue ShAmt = DAG.getConstant(32, MVT::i32);
+
+ SDValue Ahi = DAG.getNode(X86ISD::VSRLI, dl, VT, A, ShAmt);
+ SDValue Bhi = DAG.getNode(X86ISD::VSRLI, dl, VT, B, ShAmt);
+
+ // Bit cast to 32-bit vectors for MULUDQ
+ EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 : MVT::v8i32;
+ A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
+ B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
+ Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
+ Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
+
+ SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
+ SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
+ SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
+
+ AloBhi = DAG.getNode(X86ISD::VSHLI, dl, VT, AloBhi, ShAmt);
+ AhiBlo = DAG.getNode(X86ISD::VSHLI, dl, VT, AhiBlo, ShAmt);
+
SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
- Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
- return Res;
+ return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
}
SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
@@ -9877,12 +10206,183 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
SDValue Amt = Op.getOperand(1);
LLVMContext *Context = DAG.getContext();
- if (!Subtarget->hasXMMInt())
+ if (!Subtarget->hasSSE2())
return SDValue();
+ // Optimize shl/srl/sra with constant shift amount.
+ if (isSplatVector(Amt.getNode())) {
+ SDValue SclrAmt = Amt->getOperand(0);
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
+ uint64_t ShiftAmt = C->getZExtValue();
+
+ if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
+ (Subtarget->hasAVX2() &&
+ (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16))) {
+ if (Op.getOpcode() == ISD::SHL)
+ return DAG.getNode(X86ISD::VSHLI, dl, VT, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ if (Op.getOpcode() == ISD::SRL)
+ return DAG.getNode(X86ISD::VSRLI, dl, VT, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
+ return DAG.getNode(X86ISD::VSRAI, dl, VT, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ }
+
+ if (VT == MVT::v16i8) {
+ if (Op.getOpcode() == ISD::SHL) {
+ // Make a large shift.
+ SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
+ // Zero out the rightmost bits.
+ SmallVector<SDValue, 16> V(16,
+ DAG.getConstant(uint8_t(-1U << ShiftAmt),
+ MVT::i8));
+ return DAG.getNode(ISD::AND, dl, VT, SHL,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16));
+ }
+ if (Op.getOpcode() == ISD::SRL) {
+ // Make a large shift.
+ SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v8i16, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
+ // Zero out the leftmost bits.
+ SmallVector<SDValue, 16> V(16,
+ DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
+ MVT::i8));
+ return DAG.getNode(ISD::AND, dl, VT, SRL,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16));
+ }
+ if (Op.getOpcode() == ISD::SRA) {
+ if (ShiftAmt == 7) {
+ // R s>> 7 === R s< 0
+ SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
+ }
+
+ // R s>> a === ((R u>> a) ^ m) - m
+ SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
+ SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
+ MVT::i8));
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 16);
+ Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
+ Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
+ return Res;
+ }
+ }
+
+ if (Subtarget->hasAVX2() && VT == MVT::v32i8) {
+ if (Op.getOpcode() == ISD::SHL) {
+ // Make a large shift.
+ SDValue SHL = DAG.getNode(X86ISD::VSHLI, dl, MVT::v16i16, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
+ // Zero out the rightmost bits.
+ SmallVector<SDValue, 32> V(32,
+ DAG.getConstant(uint8_t(-1U << ShiftAmt),
+ MVT::i8));
+ return DAG.getNode(ISD::AND, dl, VT, SHL,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32));
+ }
+ if (Op.getOpcode() == ISD::SRL) {
+ // Make a large shift.
+ SDValue SRL = DAG.getNode(X86ISD::VSRLI, dl, MVT::v16i16, R,
+ DAG.getConstant(ShiftAmt, MVT::i32));
+ SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
+ // Zero out the leftmost bits.
+ SmallVector<SDValue, 32> V(32,
+ DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
+ MVT::i8));
+ return DAG.getNode(ISD::AND, dl, VT, SRL,
+ DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32));
+ }
+ if (Op.getOpcode() == ISD::SRA) {
+ if (ShiftAmt == 7) {
+ // R s>> 7 === R s< 0
+ SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
+ return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
+ }
+
+ // R s>> a === ((R u>> a) ^ m) - m
+ SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
+ SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
+ MVT::i8));
+ SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &V[0], 32);
+ Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
+ Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
+ return Res;
+ }
+ }
+ }
+ }
+
+ // Lower SHL with variable shift amount.
+ if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
+ Op = DAG.getNode(X86ISD::VSHLI, dl, VT, Op.getOperand(1),
+ DAG.getConstant(23, MVT::i32));
+
+ const uint32_t CV[] = { 0x3f800000U, 0x3f800000U, 0x3f800000U, 0x3f800000U};
+ Constant *C = ConstantDataVector::get(*Context, CV);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(),
+ false, false, false, 16);
+
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
+ Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
+ Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
+ return DAG.getNode(ISD::MUL, dl, VT, Op, R);
+ }
+ if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
+ assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
+
+ // a = a << 5;
+ Op = DAG.getNode(X86ISD::VSHLI, dl, MVT::v8i16, Op.getOperand(1),
+ DAG.getConstant(5, MVT::i32));
+ Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
+
+ // Turn 'a' into a mask suitable for VSELECT
+ SDValue VSelM = DAG.getConstant(0x80, VT);
+ SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
+ OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
+
+ SDValue CM1 = DAG.getConstant(0x0f, VT);
+ SDValue CM2 = DAG.getConstant(0x3f, VT);
+
+ // r = VSELECT(r, psllw(r & (char16)15, 4), a);
+ SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
+ M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M,
+ DAG.getConstant(4, MVT::i32), DAG);
+ M = DAG.getNode(ISD::BITCAST, dl, VT, M);
+ R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
+
+ // a += a
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
+ OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
+ OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
+
+ // r = VSELECT(r, psllw(r & (char16)63, 2), a);
+ M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
+ M = getTargetVShiftNode(X86ISD::VSHLI, dl, MVT::v8i16, M,
+ DAG.getConstant(2, MVT::i32), DAG);
+ M = DAG.getNode(ISD::BITCAST, dl, VT, M);
+ R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
+
+ // a += a
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
+ OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
+ OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
+
+ // return VSELECT(r, r+r, a);
+ R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
+ DAG.getNode(ISD::ADD, dl, VT, R, R), R);
+ return R;
+ }
+
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.getSizeInBits() == 256) {
- int NumElems = VT.getVectorNumElements();
+ unsigned NumElems = VT.getVectorNumElements();
MVT EltVT = VT.getVectorElementType().getSimpleVT();
EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
@@ -9897,9 +10397,9 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
// Constant shift amount
SmallVector<SDValue, 4> Amt1Csts;
SmallVector<SDValue, 4> Amt2Csts;
- for (int i = 0; i < NumElems/2; ++i)
+ for (unsigned i = 0; i != NumElems/2; ++i)
Amt1Csts.push_back(Amt->getOperand(i));
- for (int i = NumElems/2; i < NumElems; ++i)
+ for (unsigned i = NumElems/2; i != NumElems; ++i)
Amt2Csts.push_back(Amt->getOperand(i));
Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT,
@@ -9921,120 +10421,6 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
}
- // Optimize shl/srl/sra with constant shift amount.
- if (isSplatVector(Amt.getNode())) {
- SDValue SclrAmt = Amt->getOperand(0);
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SclrAmt)) {
- uint64_t ShiftAmt = C->getZExtValue();
-
- if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SHL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SHL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SHL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v2i64 && Op.getOpcode() == ISD::SRL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v4i32 && Op.getOpcode() == ISD::SRA)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
-
- if (VT == MVT::v8i16 && Op.getOpcode() == ISD::SRA)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
- R, DAG.getConstant(ShiftAmt, MVT::i32));
- }
- }
-
- // Lower SHL with variable shift amount.
- if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
- Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
- Op.getOperand(1), DAG.getConstant(23, MVT::i32));
-
- ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U));
-
- std::vector<Constant*> CV(4, CI);
- Constant *C = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
- SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(),
- false, false, 16);
-
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
- Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
- Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
- return DAG.getNode(ISD::MUL, dl, VT, Op, R);
- }
- if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
- // a = a << 5;
- Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
- Op.getOperand(1), DAG.getConstant(5, MVT::i32));
-
- ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15));
- ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63));
-
- std::vector<Constant*> CVM1(16, CM1);
- std::vector<Constant*> CVM2(16, CM2);
- Constant *C = ConstantVector::get(CVM1);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
- SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(),
- false, false, 16);
-
- // r = pblendv(r, psllw(r & (char16)15, 4), a);
- M = DAG.getNode(ISD::AND, dl, VT, R, M);
- M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
- DAG.getConstant(4, MVT::i32));
- R = DAG.getNode(ISD::VSELECT, dl, VT, Op, R, M);
- // a += a
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-
- C = ConstantVector::get(CVM2);
- CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
- M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(),
- false, false, 16);
-
- // r = pblendv(r, psllw(r & (char16)63, 2), a);
- M = DAG.getNode(ISD::AND, dl, VT, R, M);
- M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
- DAG.getConstant(2, MVT::i32));
- R = DAG.getNode(ISD::VSELECT, dl, VT, Op, R, M);
- // a += a
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
-
- // return pblendv(r, r+r, a);
- R = DAG.getNode(ISD::VSELECT, dl, VT, Op,
- R, DAG.getNode(ISD::ADD, dl, VT, R, R));
- return R;
- }
return SDValue();
}
@@ -10113,46 +10499,58 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
-SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const{
+SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- SDNode* Node = Op.getNode();
- EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
- EVT VT = Node->getValueType(0);
- if (Subtarget->hasXMMInt() && VT.isVector()) {
- unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
- ExtraVT.getScalarType().getSizeInBits();
- SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32);
-
- unsigned SHLIntrinsicsID = 0;
- unsigned SRAIntrinsicsID = 0;
- switch (VT.getSimpleVT().SimpleTy) {
- default:
- return SDValue();
- case MVT::v4i32: {
- SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_d;
- SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_d;
- break;
- }
- case MVT::v8i16: {
- SHLIntrinsicsID = Intrinsic::x86_sse2_pslli_w;
- SRAIntrinsicsID = Intrinsic::x86_sse2_psrai_w;
- break;
- }
- }
+ EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT VT = Op.getValueType();
- SDValue Tmp1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(SHLIntrinsicsID, MVT::i32),
- Node->getOperand(0), ShAmt);
+ if (!Subtarget->hasSSE2() || !VT.isVector())
+ return SDValue();
- // In case of 1 bit sext, no need to shr
- if (ExtraVT.getScalarType().getSizeInBits() == 1) return Tmp1;
+ unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
+ ExtraVT.getScalarType().getSizeInBits();
+ SDValue ShAmt = DAG.getConstant(BitsDiff, MVT::i32);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(SRAIntrinsicsID, MVT::i32),
- Tmp1, ShAmt);
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v8i32:
+ case MVT::v16i16:
+ if (!Subtarget->hasAVX())
+ return SDValue();
+ if (!Subtarget->hasAVX2()) {
+ // needs to be split
+ int NumElems = VT.getVectorNumElements();
+ SDValue Idx0 = DAG.getConstant(0, MVT::i32);
+ SDValue Idx1 = DAG.getConstant(NumElems/2, MVT::i32);
+
+ // Extract the LHS vectors
+ SDValue LHS = Op.getOperand(0);
+ SDValue LHS1 = Extract128BitVector(LHS, Idx0, DAG, dl);
+ SDValue LHS2 = Extract128BitVector(LHS, Idx1, DAG, dl);
+
+ MVT EltVT = VT.getVectorElementType().getSimpleVT();
+ EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
+
+ EVT ExtraEltVT = ExtraVT.getVectorElementType();
+ int ExtraNumElems = ExtraVT.getVectorNumElements();
+ ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
+ ExtraNumElems/2);
+ SDValue Extra = DAG.getValueType(ExtraVT);
+
+ LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
+ LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);;
+ }
+ // fall through
+ case MVT::v4i32:
+ case MVT::v8i16: {
+ SDValue Tmp1 = getTargetVShiftNode(X86ISD::VSHLI, dl, VT,
+ Op.getOperand(0), ShAmt, DAG);
+ return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, Tmp1, ShAmt, DAG);
+ }
}
-
- return SDValue();
}
@@ -10161,7 +10559,7 @@ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
// Go ahead and emit the fence on x86-64 even if we asked for no-sse2.
// There isn't any reason to disable it if the target processor supports it.
- if (!Subtarget->hasXMMInt() && !Subtarget->is64Bit()) {
+ if (!Subtarget->hasSSE2() && !Subtarget->is64Bit()) {
SDValue Chain = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, MVT::i32);
SDValue Ops[] = {
@@ -10215,7 +10613,7 @@ SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op,
// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
// no-sse2). There isn't any reason to disable it if the target processor
// supports it.
- if (Subtarget->hasXMMInt() || Subtarget->is64Bit())
+ if (Subtarget->hasSSE2() || Subtarget->is64Bit())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
@@ -10246,8 +10644,7 @@ SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
unsigned Reg = 0;
unsigned size = 0;
switch(T.getSimpleVT().SimpleTy) {
- default:
- assert(false && "Invalid value type!");
+ default: llvm_unreachable("Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
@@ -10295,7 +10692,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType();
EVT DstVT = Op.getValueType();
- assert(Subtarget->is64Bit() && !Subtarget->hasXMMInt() &&
+ assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
@@ -10365,7 +10762,7 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
unsigned Opc;
bool ExtraOp = false;
switch (Op.getOpcode()) {
- default: assert(0 && "Invalid code");
+ default: llvm_unreachable("Invalid code");
case ISD::ADDC: Opc = X86ISD::ADD; break;
case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
case ISD::SUBC: Opc = X86ISD::SUB; break;
@@ -10432,6 +10829,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::CTLZ: return LowerCTLZ(Op, DAG);
+ case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
case ISD::SRA:
@@ -10506,8 +10904,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default:
- assert(false && "Do not know how to custom type legalize this operation!");
- return;
+ llvm_unreachable("Do not know how to custom type legalize this operation!");
case ISD::SIGN_EXTEND_INREG:
case ISD::ADDC:
case ISD::ADDE:
@@ -10515,15 +10912,25 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::SUBE:
// We don't want to expand or promote these.
return;
- case ISD::FP_TO_SINT: {
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT: {
+ bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
+
+ if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
+ return;
+
std::pair<SDValue,SDValue> Vals =
- FP_TO_INTHelper(SDValue(N, 0), DAG, true);
+ FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
if (FIST.getNode() != 0) {
EVT VT = N->getValueType(0);
// Return a load from the stack slot.
- Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
- MachinePointerInfo(), false, false, 0));
+ if (StackSlot.getNode() != 0)
+ Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
+ MachinePointerInfo(),
+ false, false, false, 0));
+ else
+ Results.push_back(FIST);
}
return;
}
@@ -10657,15 +11064,19 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::ANDNP: return "X86ISD::ANDNP";
- case X86ISD::PSIGNB: return "X86ISD::PSIGNB";
- case X86ISD::PSIGNW: return "X86ISD::PSIGNW";
- case X86ISD::PSIGND: return "X86ISD::PSIGND";
+ case X86ISD::PSIGN: return "X86ISD::PSIGN";
+ case X86ISD::BLENDV: return "X86ISD::BLENDV";
+ case X86ISD::BLENDPW: return "X86ISD::BLENDPW";
+ case X86ISD::BLENDPS: return "X86ISD::BLENDPS";
+ case X86ISD::BLENDPD: return "X86ISD::BLENDPD";
+ case X86ISD::HADD: return "X86ISD::HADD";
+ case X86ISD::HSUB: return "X86ISD::HSUB";
+ case X86ISD::FHADD: return "X86ISD::FHADD";
+ case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
- case X86ISD::FHADD: return "X86ISD::FHADD";
- case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
@@ -10681,18 +11092,17 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
+ case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
+ case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
case X86ISD::VSRL: return "X86ISD::VSRL";
- case X86ISD::CMPPD: return "X86ISD::CMPPD";
- case X86ISD::CMPPS: return "X86ISD::CMPPS";
- case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB";
- case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW";
- case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD";
- case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ";
- case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB";
- case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW";
- case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD";
- case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
+ case X86ISD::VSRA: return "X86ISD::VSRA";
+ case X86ISD::VSHLI: return "X86ISD::VSHLI";
+ case X86ISD::VSRLI: return "X86ISD::VSRLI";
+ case X86ISD::VSRAI: return "X86ISD::VSRAI";
+ case X86ISD::CMPP: return "X86ISD::CMPP";
+ case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
+ case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::ADC: return "X86ISD::ADC";
@@ -10705,54 +11115,39 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::XOR: return "X86ISD::XOR";
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::ANDN: return "X86ISD::ANDN";
+ case X86ISD::BLSI: return "X86ISD::BLSI";
+ case X86ISD::BLSMSK: return "X86ISD::BLSMSK";
+ case X86ISD::BLSR: return "X86ISD::BLSR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::PALIGN: return "X86ISD::PALIGN";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
- case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD";
case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
- case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD";
- case X86ISD::SHUFPS: return "X86ISD::SHUFPS";
- case X86ISD::SHUFPD: return "X86ISD::SHUFPD";
+ case X86ISD::SHUFP: return "X86ISD::SHUFP";
case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
- case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD";
case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
- case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD";
- case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD";
case X86ISD::MOVSD: return "X86ISD::MOVSD";
case X86ISD::MOVSS: return "X86ISD::MOVSS";
- case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS";
- case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD";
- case X86ISD::VUNPCKLPDY: return "X86ISD::VUNPCKLPDY";
- case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS";
- case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD";
- case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW";
- case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD";
- case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ";
- case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ";
- case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW";
- case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD";
- case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ";
- case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ";
+ case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
+ case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
- case X86ISD::VPERMILPS: return "X86ISD::VPERMILPS";
- case X86ISD::VPERMILPSY: return "X86ISD::VPERMILPSY";
- case X86ISD::VPERMILPD: return "X86ISD::VPERMILPD";
- case X86ISD::VPERMILPDY: return "X86ISD::VPERMILPDY";
- case X86ISD::VPERM2F128: return "X86ISD::VPERM2F128";
+ case X86ISD::VPERMILP: return "X86ISD::VPERMILP";
+ case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
+ case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
+ case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
}
}
@@ -10855,21 +11250,21 @@ X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
EVT VT) const {
// Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSizeInBits() == 64)
- return isPALIGNRMask(M, VT, Subtarget->hasSSSE3() || Subtarget->hasAVX());
+ return false;
// FIXME: pshufb, blends, shifts.
return (VT.getVectorNumElements() == 2 ||
ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
isMOVLMask(M, VT) ||
- isSHUFPMask(M, VT) ||
+ isSHUFPMask(M, VT, Subtarget->hasAVX()) ||
isPSHUFDMask(M, VT) ||
isPSHUFHWMask(M, VT) ||
isPSHUFLWMask(M, VT) ||
- isPALIGNRMask(M, VT, Subtarget->hasSSSE3() || Subtarget->hasAVX()) ||
- isUNPCKLMask(M, VT) ||
- isUNPCKHMask(M, VT) ||
- isUNPCKL_v_undef_Mask(M, VT) ||
- isUNPCKH_v_undef_Mask(M, VT));
+ isPALIGNRMask(M, VT, Subtarget) ||
+ isUNPCKLMask(M, VT, Subtarget->hasAVX2()) ||
+ isUNPCKHMask(M, VT, Subtarget->hasAVX2()) ||
+ isUNPCKL_v_undef_Mask(M, VT, Subtarget->hasAVX2()) ||
+ isUNPCKH_v_undef_Mask(M, VT, Subtarget->hasAVX2()));
}
bool
@@ -10882,8 +11277,8 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
if (NumElts == 4 && VT.getSizeInBits() == 128) {
return (isMOVLMask(Mask, VT) ||
isCommutedMOVLMask(Mask, VT, true) ||
- isSHUFPMask(Mask, VT) ||
- isCommutedSHUFPMask(Mask, VT));
+ isSHUFPMask(Mask, VT, Subtarget->hasAVX()) ||
+ isSHUFPMask(Mask, VT, Subtarget->hasAVX(), /* Commuted */ true));
}
return false;
}
@@ -10902,7 +11297,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
unsigned CXchgOpc,
unsigned notOpc,
unsigned EAXreg,
- TargetRegisterClass *RC,
+ const TargetRegisterClass *RC,
bool invSrc) const {
// For the atomic bitwise operator, we generate
// thisMBB:
@@ -11274,7 +11669,7 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
MachineBasicBlock *
X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
unsigned numArgs, bool memArg) const {
- assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) &&
+ assert(Subtarget->hasSSE42() &&
"Target must have SSE4.2 or AVX features enabled");
DebugLoc dl = MI->getDebugLoc();
@@ -11679,6 +12074,42 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
return EndMBB;
}
+// The EFLAGS operand of SelectItr might be missing a kill marker
+// because there were multiple uses of EFLAGS, and ISel didn't know
+// which to mark. Figure out whether SelectItr should have had a
+// kill marker, and set it if it should. Returns the correct kill
+// marker value.
+static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
+ MachineBasicBlock* BB,
+ const TargetRegisterInfo* TRI) {
+ // Scan forward through BB for a use/def of EFLAGS.
+ MachineBasicBlock::iterator miI(llvm::next(SelectItr));
+ for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
+ const MachineInstr& mi = *miI;
+ if (mi.readsRegister(X86::EFLAGS))
+ return false;
+ if (mi.definesRegister(X86::EFLAGS))
+ break; // Should have kill-flag - update below.
+ }
+
+ // If we hit the end of the block, check whether EFLAGS is live into a
+ // successor.
+ if (miI == BB->end()) {
+ for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
+ sEnd = BB->succ_end();
+ sItr != sEnd; ++sItr) {
+ MachineBasicBlock* succ = *sItr;
+ if (succ->isLiveIn(X86::EFLAGS))
+ return false;
+ }
+ }
+
+ // We found a def, or hit the end of the basic block and EFLAGS wasn't live
+ // out. SelectMI should have a kill flag on EFLAGS.
+ SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
+ return true;
+}
+
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -11708,7 +12139,9 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
- if (!MI->killsRegister(X86::EFLAGS)) {
+ const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo();
+ if (!MI->killsRegister(X86::EFLAGS) &&
+ !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
copy0MBB->addLiveIn(X86::EFLAGS);
sinkMBB->addLiveIn(X86::EFLAGS);
}
@@ -11753,7 +12186,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
MachineFunction *MF = BB->getParent();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- assert(EnableSegmentedStacks);
+ assert(getTargetMachine().Options.EnableSegmentedStacks);
unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
unsigned TlsOffset = Is64Bit ? 0x70 : 0x30;
@@ -11785,6 +12218,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
+ SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
sizeVReg = MI->getOperand(1).getReg(),
physSPReg = Is64Bit ? X86::RSP : X86::ESP;
@@ -11802,33 +12236,39 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
// Add code to the main basic block to check if the stack limit has been hit,
// and if so, jump to mallocMBB otherwise to bumpMBB.
BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
- BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), tmpSPVReg)
+ BuildMI(BB, DL, TII->get(Is64Bit ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
.addReg(tmpSPVReg).addReg(sizeVReg);
BuildMI(BB, DL, TII->get(Is64Bit ? X86::CMP64mr:X86::CMP32mr))
- .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg)
- .addReg(tmpSPVReg);
+ .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
+ .addReg(SPLimitVReg);
BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB);
// bumpMBB simply decreases the stack pointer, since we know the current
// stacklet has enough space.
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
- .addReg(tmpSPVReg);
+ .addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
- .addReg(tmpSPVReg);
+ .addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap.
+ const uint32_t *RegMask =
+ getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (Is64Bit) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
- .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI);
+ .addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI)
+ .addRegMask(RegMask)
+ .addReg(X86::RAX, RegState::ImplicitDefine);
} else {
BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
.addImm(12);
BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
- .addExternalSymbol("__morestack_allocate_stack_space");
+ .addExternalSymbol("__morestack_allocate_stack_space")
+ .addRegMask(RegMask)
+ .addReg(X86::EAX, RegState::ImplicitDefine);
}
if (!Is64Bit)
@@ -11926,6 +12366,11 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
assert(MI->getOperand(3).isGlobal() && "This should be a global");
+ // Get a register mask for the lowered call.
+ // FIXME: The 32-bit calls have non-standard calling conventions. Use a
+ // proper register mask.
+ const uint32_t *RegMask =
+ getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C);
if (Subtarget->is64Bit()) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV64rm), X86::RDI)
@@ -11936,6 +12381,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
+ MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV32rm), X86::EAX)
@@ -11946,6 +12392,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
+ MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else {
MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
TII->get(X86::MOV32rm), X86::EAX)
@@ -11956,6 +12403,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
+ MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
}
MI->eraseFromParent(); // The pseudo instruction is gone now.
@@ -11966,30 +12414,14 @@ MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
switch (MI->getOpcode()) {
- default: assert(0 && "Unexpected instr type to insert");
+ default: llvm_unreachable("Unexpected instr type to insert");
case X86::TAILJMPd64:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
- assert(0 && "TAILJMP64 would not be touched here.");
+ llvm_unreachable("TAILJMP64 would not be touched here.");
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64:
- // Defs of TCRETURNxx64 has Win64's callee-saved registers, as subset.
- // On AMD64, additional defs should be added before register allocation.
- if (!Subtarget->isTargetWin64()) {
- MI->addRegisterDefined(X86::RSI);
- MI->addRegisterDefined(X86::RDI);
- MI->addRegisterDefined(X86::XMM6);
- MI->addRegisterDefined(X86::XMM7);
- MI->addRegisterDefined(X86::XMM8);
- MI->addRegisterDefined(X86::XMM9);
- MI->addRegisterDefined(X86::XMM10);
- MI->addRegisterDefined(X86::XMM11);
- MI->addRegisterDefined(X86::XMM12);
- MI->addRegisterDefined(X86::XMM13);
- MI->addRegisterDefined(X86::XMM14);
- MI->addRegisterDefined(X86::XMM15);
- }
return BB;
case X86::WIN_ALLOCA:
return EmitLoweredWinAlloca(MI, BB);
@@ -12294,11 +12726,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
//===----------------------------------------------------------------------===//
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
+ unsigned BitWidth = KnownZero.getBitWidth();
unsigned Opc = Op.getOpcode();
assert((Opc >= ISD::BUILTIN_OP_END ||
Opc == ISD::INTRINSIC_WO_CHAIN ||
@@ -12307,7 +12739,7 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
+ KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -12326,8 +12758,7 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
break;
// Fallthrough
case X86ISD::SETCC:
- KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
- Mask.getBitWidth() - 1);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
break;
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -12339,18 +12770,20 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
case Intrinsic::x86_sse2_movmsk_pd:
case Intrinsic::x86_avx_movmsk_pd_256:
case Intrinsic::x86_mmx_pmovmskb:
- case Intrinsic::x86_sse2_pmovmskb_128: {
+ case Intrinsic::x86_sse2_pmovmskb_128:
+ case Intrinsic::x86_avx2_pmovmskb: {
// High bits of movmskp{s|d}, pmovmskb are known zero.
switch (IntId) {
+ default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
+ case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
}
- KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
- Mask.getBitWidth() - NumLoBits);
+ KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
break;
}
}
@@ -12418,7 +12851,8 @@ static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
/// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget* Subtarget) {
DebugLoc dl = N->getDebugLoc();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
SDValue V1 = SVOp->getOperand(0);
@@ -12454,9 +12888,23 @@ static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
!isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
return SDValue();
+ // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
+ if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
+ SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
+ SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
+ SDValue ResNode =
+ DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2,
+ Ld->getMemoryVT(),
+ Ld->getPointerInfo(),
+ Ld->getAlignment(),
+ false/*isVolatile*/, true/*ReadMem*/,
+ false/*WriteMem*/);
+ return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
+ }
+
// Emit a zeroed vector and insert the desired subvector on its
// first half.
- SDValue Zeros = getZeroVector(VT, true /* HasXMMInt */, DAG, dl);
+ SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0),
DAG.getConstant(0, MVT::i32), DAG, dl);
return DCI.CombineTo(N, InsV);
@@ -12501,7 +12949,7 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
// Combine 256-bit vector shuffles. This is only profitable when in AVX mode
if (Subtarget->hasAVX() && VT.getSizeInBits() == 256 &&
N->getOpcode() == ISD::VECTOR_SHUFFLE)
- return PerformShuffleCombine256(N, DAG, DCI);
+ return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
// Only handle 128 wide vector from here on.
if (VT.getSizeInBits() != 128)
@@ -12517,11 +12965,185 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return EltsFromConsecutiveLoads(VT, Elts, dl, DAG);
}
+
+/// PerformTruncateCombine - Converts truncate operation to
+/// a sequence of vector shuffle operations.
+/// It is possible when we truncate 256-bit vector to 128-bit vector
+
+SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const {
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ if (!Subtarget->hasAVX()) return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDValue Op = N->getOperand(0);
+ EVT OpVT = Op.getValueType();
+ DebugLoc dl = N->getDebugLoc();
+
+ if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) {
+
+ SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
+ DAG.getIntPtrConstant(0));
+
+ SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
+ DAG.getIntPtrConstant(2));
+
+ OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
+ OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
+
+ // PSHUFD
+ int ShufMask1[] = {0, 2, 0, 0};
+
+ OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT),
+ ShufMask1);
+ OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT),
+ ShufMask1);
+
+ // MOVLHPS
+ int ShufMask2[] = {0, 1, 4, 5};
+
+ return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2);
+ }
+ if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) {
+
+ SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
+ DAG.getIntPtrConstant(0));
+
+ SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
+ DAG.getIntPtrConstant(4));
+
+ OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo);
+ OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi);
+
+ // PSHUFB
+ int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
+ -1, -1, -1, -1, -1, -1, -1, -1};
+
+ OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo,
+ DAG.getUNDEF(MVT::v16i8),
+ ShufMask1);
+ OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi,
+ DAG.getUNDEF(MVT::v16i8),
+ ShufMask1);
+
+ OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
+ OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
+
+ // MOVLHPS
+ int ShufMask2[] = {0, 1, 4, 5};
+
+ SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2);
+ return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res);
+ }
+
+ return SDValue();
+}
+
+/// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
+/// specific shuffle of a load can be folded into a single element load.
+/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
+/// shuffles have been customed lowered so we need to handle those here.
+static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ SDValue InVec = N->getOperand(0);
+ SDValue EltNo = N->getOperand(1);
+
+ if (!isa<ConstantSDNode>(EltNo))
+ return SDValue();
+
+ EVT VT = InVec.getValueType();
+
+ bool HasShuffleIntoBitcast = false;
+ if (InVec.getOpcode() == ISD::BITCAST) {
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+ EVT BCVT = InVec.getOperand(0).getValueType();
+ if (BCVT.getVectorNumElements() != VT.getVectorNumElements())
+ return SDValue();
+ InVec = InVec.getOperand(0);
+ HasShuffleIntoBitcast = true;
+ }
+
+ if (!isTargetShuffle(InVec.getOpcode()))
+ return SDValue();
+
+ // Don't duplicate a load with other uses.
+ if (!InVec.hasOneUse())
+ return SDValue();
+
+ SmallVector<int, 16> ShuffleMask;
+ bool UnaryShuffle;
+ if (!getTargetShuffleMask(InVec.getNode(), VT, ShuffleMask, UnaryShuffle))
+ return SDValue();
+
+ // Select the input vector, guarding against out of range extract vector.
+ unsigned NumElems = VT.getVectorNumElements();
+ int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
+ SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
+ : InVec.getOperand(1);
+
+ // If inputs to shuffle are the same for both ops, then allow 2 uses
+ unsigned AllowedUses = InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
+
+ if (LdNode.getOpcode() == ISD::BITCAST) {
+ // Don't duplicate a load with other uses.
+ if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
+ return SDValue();
+
+ AllowedUses = 1; // only allow 1 load use if we have a bitcast
+ LdNode = LdNode.getOperand(0);
+ }
+
+ if (!ISD::isNormalLoad(LdNode.getNode()))
+ return SDValue();
+
+ LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
+
+ if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
+ return SDValue();
+
+ if (HasShuffleIntoBitcast) {
+ // If there's a bitcast before the shuffle, check if the load type and
+ // alignment is valid.
+ unsigned Align = LN0->getAlignment();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ unsigned NewAlign = TLI.getTargetData()->
+ getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
+
+ if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
+ return SDValue();
+ }
+
+ // All checks match so transform back to vector_shuffle so that DAG combiner
+ // can finish the job
+ DebugLoc dl = N->getDebugLoc();
+
+ // Create shuffle node taking into account the case that its a unary shuffle
+ SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(VT) : InVec.getOperand(1);
+ Shuffle = DAG.getVectorShuffle(InVec.getValueType(), dl,
+ InVec.getOperand(0), Shuffle,
+ &ShuffleMask[0]);
+ Shuffle = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
+ EltNo);
+}
+
/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
/// generation and convert it from being a bunch of shuffles and extracts
/// to a simple store and scalar loads to extract the elements.
static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
- const TargetLowering &TLI) {
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
+ if (NewOp.getNode())
+ return NewOp;
+
SDValue InputVector = N->getOperand(0);
// Only operate on vectors of 4 elements, where the alternative shuffling
@@ -12582,6 +13204,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
unsigned EltSize =
InputVector.getValueType().getVectorElementType().getSizeInBits()/8;
uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
@@ -12590,7 +13213,7 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
// Load the scalar.
SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch,
ScalarAddr, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
// Replace the exact with the load.
DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
@@ -12603,7 +13226,10 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
/// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
/// nodes.
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
+
+
DebugLoc DL = N->getDebugLoc();
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
@@ -12617,7 +13243,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// ignored in unsafe-math mode).
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
VT != MVT::f80 && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
- (Subtarget->hasXMMInt() ||
+ (Subtarget->hasSSE2() ||
(Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
@@ -12632,7 +13258,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
std::swap(LHS, RHS);
@@ -12642,7 +13268,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::SETOLE:
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly.
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
break;
Opcode = X86ISD::FMIN;
@@ -12660,7 +13286,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::SETOGE:
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly.
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
break;
Opcode = X86ISD::FMAX;
@@ -12670,7 +13296,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
std::swap(LHS, RHS);
@@ -12696,7 +13322,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
@@ -12706,7 +13332,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
break;
case ISD::SETUGT:
// Converting this to a min would handle NaNs incorrectly.
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
(!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
break;
Opcode = X86ISD::FMIN;
@@ -12731,7 +13357,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
- if (!UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
@@ -12848,6 +13474,57 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ // Canonicalize max and min:
+ // (x > y) ? x : y -> (x >= y) ? x : y
+ // (x < y) ? x : y -> (x <= y) ? x : y
+ // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
+ // the need for an extra compare
+ // against zero. e.g.
+ // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
+ // subl %esi, %edi
+ // testl %edi, %edi
+ // movl $0, %eax
+ // cmovgl %edi, %eax
+ // =>
+ // xorl %eax, %eax
+ // subl %esi, $edi
+ // cmovsl %eax, %edi
+ if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
+ DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
+ DAG.isEqualTo(RHS, Cond.getOperand(1))) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
+ switch (CC) {
+ default: break;
+ case ISD::SETLT:
+ case ISD::SETGT: {
+ ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
+ Cond = DAG.getSetCC(Cond.getDebugLoc(), Cond.getValueType(),
+ Cond.getOperand(0), Cond.getOperand(1), NewCC);
+ return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
+ }
+ }
+ }
+
+ // If we know that this node is legal then we know that it is going to be
+ // matched by one of the SSE/AVX BLEND instructions. These instructions only
+ // depend on the highest bit in each word. Try to use SimplifyDemandedBits
+ // to simplify previous instructions.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
+ !DCI.isBeforeLegalize() &&
+ TLI.isOperationLegal(ISD::VSELECT, VT)) {
+ unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
+ assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
+ APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
+
+ APInt KnownZero, KnownOne;
+ TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
+ DCI.isBeforeLegalizeOps());
+ if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
+ TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne, TLO))
+ DCI.CommitTargetLoweringOpt(TLO);
+ }
+
return SDValue();
}
@@ -13042,7 +13719,8 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
// fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
// since the result of setcc_c is all zero's or all ones.
- if (N1C && N0.getOpcode() == ISD::AND &&
+ if (VT.isInteger() && !VT.isVector() &&
+ N1C && N0.getOpcode() == ISD::AND &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
@@ -13058,26 +13736,46 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
}
}
+
+ // Hardware support for vector shifts is sparse which makes us scalarize the
+ // vector operations in many cases. Also, on sandybridge ADD is faster than
+ // shl.
+ // (shl V, 1) -> add V,V
+ if (isSplatVector(N1.getNode())) {
+ assert(N0.getValueType().isVector() && "Invalid vector shift type");
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(0));
+ // We shift all of the values by one. In many cases we do not have
+ // hardware support for this operation. This is better expressed as an ADD
+ // of two values.
+ if (N1C && (1 == N1C->getZExtValue())) {
+ return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N0);
+ }
+ }
+
return SDValue();
}
/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts
/// when possible.
static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
EVT VT = N->getValueType(0);
- if (!VT.isVector() && VT.isInteger() &&
- N->getOpcode() == ISD::SHL)
- return PerformSHLCombine(N, DAG);
+ if (N->getOpcode() == ISD::SHL) {
+ SDValue V = PerformSHLCombine(N, DAG);
+ if (V.getNode()) return V;
+ }
// On X86 with SSE2 support, we can transform this to a vector shift if
// all elements are shifted by the same amount. We can't do this in legalize
// because the a constant vector is typically transformed to a constant pool
// so we have no knowledge of the shift amount.
- if (!Subtarget->hasXMMInt())
+ if (!Subtarget->hasSSE2())
return SDValue();
- if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
+ if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
+ (!Subtarget->hasAVX2() ||
+ (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
return SDValue();
SDValue ShAmtOp = N->getOperand(1);
@@ -13093,6 +13791,11 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
BaseShAmt = Arg;
break;
}
+ // Handle the case where the build_vector is all undef
+ // FIXME: Should DAG allow this?
+ if (i == NumElts)
+ return SDValue();
+
for (; i != NumElts; ++i) {
SDValue Arg = ShAmtOp.getOperand(i);
if (Arg.getOpcode() == ISD::UNDEF) continue;
@@ -13119,9 +13822,16 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
BaseShAmt = InVec.getOperand(1);
}
}
- if (BaseShAmt.getNode() == 0)
+ if (BaseShAmt.getNode() == 0) {
+ // Don't create instructions with illegal types after legalize
+ // types has run.
+ if (!DAG.getTargetLoweringInfo().isTypeLegal(EltVT) &&
+ !DCI.isBeforeLegalize())
+ return SDValue();
+
BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp,
DAG.getIntPtrConstant(0));
+ }
} else
return SDValue();
@@ -13136,47 +13846,38 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
switch (N->getOpcode()) {
default:
llvm_unreachable("Unknown shift opcode!");
- break;
case ISD::SHL:
- if (VT == MVT::v2i64)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
- ValOp, BaseShAmt);
- if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
- ValOp, BaseShAmt);
- if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
- ValOp, BaseShAmt);
- break;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v2i64:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v4i64:
+ case MVT::v8i32:
+ case MVT::v16i16:
+ return getTargetVShiftNode(X86ISD::VSHLI, DL, VT, ValOp, BaseShAmt, DAG);
+ }
case ISD::SRA:
- if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
- ValOp, BaseShAmt);
- if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
- ValOp, BaseShAmt);
- break;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v8i32:
+ case MVT::v16i16:
+ return getTargetVShiftNode(X86ISD::VSRAI, DL, VT, ValOp, BaseShAmt, DAG);
+ }
case ISD::SRL:
- if (VT == MVT::v2i64)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
- ValOp, BaseShAmt);
- if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
- ValOp, BaseShAmt);
- if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
- ValOp, BaseShAmt);
- break;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: return SDValue();
+ case MVT::v2i64:
+ case MVT::v4i32:
+ case MVT::v8i16:
+ case MVT::v4i64:
+ case MVT::v8i32:
+ case MVT::v16i16:
+ return getTargetVShiftNode(X86ISD::VSRLI, DL, VT, ValOp, BaseShAmt, DAG);
+ }
}
- return SDValue();
}
@@ -13190,7 +13891,7 @@ static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
// SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
// we're requiring SSE2 for both.
- if (Subtarget->hasXMMInt() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
+ if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
@@ -13300,7 +14001,9 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
- // Create ANDN instructions
+ // Create ANDN, BLSI, and BLSR instructions
+ // BLSI is X & (-X)
+ // BLSR is X & (X-1)
if (Subtarget->hasBMI() && (VT == MVT::i32 || VT == MVT::i64)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -13313,6 +14016,26 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
if (N1.getOpcode() == ISD::XOR && isAllOnes(N1.getOperand(1)))
return DAG.getNode(X86ISD::ANDN, DL, VT, N1.getOperand(0), N0);
+ // Check LHS for neg
+ if (N0.getOpcode() == ISD::SUB && N0.getOperand(1) == N1 &&
+ isZero(N0.getOperand(0)))
+ return DAG.getNode(X86ISD::BLSI, DL, VT, N1);
+
+ // Check RHS for neg
+ if (N1.getOpcode() == ISD::SUB && N1.getOperand(1) == N0 &&
+ isZero(N1.getOperand(0)))
+ return DAG.getNode(X86ISD::BLSI, DL, VT, N0);
+
+ // Check LHS for X-1
+ if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
+ isAllOnes(N0.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSR, DL, VT, N1);
+
+ // Check RHS for X-1
+ if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
+ isAllOnes(N1.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSR, DL, VT, N0);
+
return SDValue();
}
@@ -13353,98 +14076,87 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
return R;
EVT VT = N->getValueType(0);
- if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64 && VT != MVT::v2i64)
- return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// look for psign/blend
- if (Subtarget->hasSSSE3() || Subtarget->hasAVX()) {
- if (VT == MVT::v2i64) {
- // Canonicalize pandn to RHS
- if (N0.getOpcode() == X86ISD::ANDNP)
- std::swap(N0, N1);
- // or (and (m, x), (pandn m, y))
- if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
- SDValue Mask = N1.getOperand(0);
- SDValue X = N1.getOperand(1);
- SDValue Y;
- if (N0.getOperand(0) == Mask)
- Y = N0.getOperand(1);
- if (N0.getOperand(1) == Mask)
- Y = N0.getOperand(0);
-
- // Check to see if the mask appeared in both the AND and ANDNP and
- if (!Y.getNode())
- return SDValue();
-
- // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
- if (Mask.getOpcode() != ISD::BITCAST ||
- X.getOpcode() != ISD::BITCAST ||
- Y.getOpcode() != ISD::BITCAST)
- return SDValue();
-
- // Look through mask bitcast.
- Mask = Mask.getOperand(0);
- EVT MaskVT = Mask.getValueType();
-
- // Validate that the Mask operand is a vector sra node. The sra node
- // will be an intrinsic.
- if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN)
- return SDValue();
-
- // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
- // there is no psrai.b
- switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) {
- case Intrinsic::x86_sse2_psrai_w:
- case Intrinsic::x86_sse2_psrai_d:
- break;
- default: return SDValue();
- }
-
- // Check that the SRA is all signbits.
- SDValue SraC = Mask.getOperand(2);
- unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
- if ((SraAmt + 1) != EltBits)
- return SDValue();
+ if (VT == MVT::v2i64 || VT == MVT::v4i64) {
+ if (!Subtarget->hasSSSE3() ||
+ (VT == MVT::v4i64 && !Subtarget->hasAVX2()))
+ return SDValue();
- DebugLoc DL = N->getDebugLoc();
+ // Canonicalize pandn to RHS
+ if (N0.getOpcode() == X86ISD::ANDNP)
+ std::swap(N0, N1);
+ // or (and (m, y), (pandn m, x))
+ if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
+ SDValue Mask = N1.getOperand(0);
+ SDValue X = N1.getOperand(1);
+ SDValue Y;
+ if (N0.getOperand(0) == Mask)
+ Y = N0.getOperand(1);
+ if (N0.getOperand(1) == Mask)
+ Y = N0.getOperand(0);
+
+ // Check to see if the mask appeared in both the AND and ANDNP and
+ if (!Y.getNode())
+ return SDValue();
- // Now we know we at least have a plendvb with the mask val. See if
- // we can form a psignb/w/d.
- // psign = x.type == y.type == mask.type && y = sub(0, x);
+ // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
+ // Look through mask bitcast.
+ if (Mask.getOpcode() == ISD::BITCAST)
+ Mask = Mask.getOperand(0);
+ if (X.getOpcode() == ISD::BITCAST)
X = X.getOperand(0);
+ if (Y.getOpcode() == ISD::BITCAST)
Y = Y.getOperand(0);
- if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
- ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
- X.getValueType() == MaskVT && X.getValueType() == Y.getValueType()){
- unsigned Opc = 0;
- switch (EltBits) {
- case 8: Opc = X86ISD::PSIGNB; break;
- case 16: Opc = X86ISD::PSIGNW; break;
- case 32: Opc = X86ISD::PSIGND; break;
- default: break;
- }
- if (Opc) {
- SDValue Sign = DAG.getNode(Opc, DL, MaskVT, X, Mask.getOperand(1));
- return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Sign);
- }
- }
- // PBLENDVB only available on SSE 4.1
- if (!(Subtarget->hasSSE41() || Subtarget->hasAVX()))
- return SDValue();
-
- X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X);
- Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y);
- Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask);
- Mask = DAG.getNode(ISD::VSELECT, DL, MVT::v16i8, Mask, X, Y);
- return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Mask);
+
+ EVT MaskVT = Mask.getValueType();
+
+ // Validate that the Mask operand is a vector sra node.
+ // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
+ // there is no psrai.b
+ if (Mask.getOpcode() != X86ISD::VSRAI)
+ return SDValue();
+
+ // Check that the SRA is all signbits.
+ SDValue SraC = Mask.getOperand(1);
+ unsigned SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
+ unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ if ((SraAmt + 1) != EltBits)
+ return SDValue();
+
+ DebugLoc DL = N->getDebugLoc();
+
+ // Now we know we at least have a plendvb with the mask val. See if
+ // we can form a psignb/w/d.
+ // psign = x.type == y.type == mask.type && y = sub(0, x);
+ if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
+ ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
+ X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
+ assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
+ "Unsupported VT for PSIGN");
+ Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
+ return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
}
+ // PBLENDVB only available on SSE 4.1
+ if (!Subtarget->hasSSE41())
+ return SDValue();
+
+ EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
+
+ X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
+ Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
+ Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
+ Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
+ return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
}
}
+ if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
@@ -13500,6 +14212,36 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
+static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+
+ assert(Subtarget->hasBMI() && "Creating BLSMSK requires BMI instructions");
+
+ // Create BLSMSK instructions by finding X ^ (X-1)
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ DebugLoc DL = N->getDebugLoc();
+
+ if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1 &&
+ isAllOnes(N0.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSMSK, DL, VT, N1);
+
+ if (N1.getOpcode() == ISD::ADD && N1.getOperand(0) == N0 &&
+ isAllOnes(N1.getOperand(1)))
+ return DAG.getNode(X86ISD::BLSMSK, DL, VT, N0);
+
+ return SDValue();
+}
+
/// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
@@ -13515,7 +14257,8 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
// shuffle. We need SSE4 for the shuffles.
// TODO: It is possible to support ZExt by zeroing the undef values
// during the shuffle phase or after the shuffle.
- if (RegVT.isVector() && Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) {
+ if (RegVT.isVector() && RegVT.isInteger() &&
+ Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) {
assert(MemVT != RegVT && "Cannot extend to the same type");
assert(MemVT.isVector() && "Must load a vector from memory");
@@ -13553,7 +14296,8 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
SDValue ScalarLoad = DAG.getLoad(SclrLoadTy, dl, Ld->getChain(),
Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->getAlignment());
+ Ld->isNonTemporal(), Ld->isInvariant(),
+ Ld->getAlignment());
// Insert the word loaded into a vector.
SDValue ScalarInVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
@@ -13561,7 +14305,8 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
// Bitcast the loaded value to a vector of the original element type, in
// the size of the target vector type.
- SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, ScalarInVector);
+ SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT,
+ ScalarInVector);
unsigned SizeRatio = RegSz/MemSz;
// Redistribute the loaded elements into the different locations.
@@ -13593,7 +14338,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue StoredVal = St->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- // If we are saving a concatination of two XMM registers, perform two stores.
+ // If we are saving a concatenation of two XMM registers, perform two stores.
// This is better in Sandy Bridge cause one 256-bit mem op is done via two
// 128-bit ones. If in the future the cost becomes only one memory access the
// first version would be better.
@@ -13703,8 +14448,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
const Function *F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
- bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps
- && Subtarget->hasXMMInt();
+ bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
+ && Subtarget->hasSSE2();
if ((VT.isVector() ||
(VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
@@ -13722,7 +14467,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
Ld = cast<LoadSDNode>(St->getChain());
else if (St->getValue().hasOneUse() &&
ChainVal->getOpcode() == ISD::TokenFactor) {
- for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
if (ChainVal->getOperand(i).getNode() == LdVal) {
TokenFactorIndex = i;
Ld = cast<LoadSDNode>(St->getValue());
@@ -13749,7 +14494,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->isVolatile(),
- Ld->isNonTemporal(), Ld->getAlignment());
+ Ld->isNonTemporal(), Ld->isInvariant(),
+ Ld->getAlignment());
SDValue NewChain = NewLd.getValue(1);
if (TokenFactorIndex != -1) {
Ops.push_back(NewChain);
@@ -13770,10 +14516,11 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
Ld->getPointerInfo(),
Ld->isVolatile(), Ld->isNonTemporal(),
- Ld->getAlignment());
+ Ld->isInvariant(), Ld->getAlignment());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
Ld->getPointerInfo().getWithOffset(4),
Ld->isVolatile(), Ld->isNonTemporal(),
+ Ld->isInvariant(),
MinAlign(Ld->getAlignment(), 4));
SDValue NewChain = LoLd.getValue(1);
@@ -13817,7 +14564,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
/// set to A, RHS to B, and the routine returns 'true'.
/// Note that the binary operation should have the property that if one of the
/// operands is UNDEF then the result is UNDEF.
-static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool isCommutative) {
+static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
// Look for the following pattern: if
// A = < float a0, float a1, float a2, float a3 >
// B = < float b0, float b1, float b2, float b3 >
@@ -13833,7 +14580,18 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool isCommutative) {
return false;
EVT VT = LHS.getValueType();
- unsigned N = VT.getVectorNumElements();
+
+ assert((VT.is128BitVector() || VT.is256BitVector()) &&
+ "Unsupported vector type for horizontal add/sub");
+
+ // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
+ // operate independently on 128-bit lanes.
+ unsigned NumElts = VT.getVectorNumElements();
+ unsigned NumLanes = VT.getSizeInBits()/128;
+ unsigned NumLaneElts = NumElts / NumLanes;
+ assert((NumLaneElts % 2 == 0) &&
+ "Vector type should have an even number of elements in each lane");
+ unsigned HalfLaneElts = NumLaneElts/2;
// View LHS in the form
// LHS = VECTOR_SHUFFLE A, B, LMask
@@ -13842,34 +14600,36 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool isCommutative) {
// NOTE: in what follows a default initialized SDValue represents an UNDEF of
// type VT.
SDValue A, B;
- SmallVector<int, 8> LMask(N);
+ SmallVector<int, 16> LMask(NumElts);
if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
A = LHS.getOperand(0);
if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
B = LHS.getOperand(1);
- cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(LMask);
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
+ std::copy(Mask.begin(), Mask.end(), LMask.begin());
} else {
if (LHS.getOpcode() != ISD::UNDEF)
A = LHS;
- for (unsigned i = 0; i != N; ++i)
+ for (unsigned i = 0; i != NumElts; ++i)
LMask[i] = i;
}
// Likewise, view RHS in the form
// RHS = VECTOR_SHUFFLE C, D, RMask
SDValue C, D;
- SmallVector<int, 8> RMask(N);
+ SmallVector<int, 16> RMask(NumElts);
if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
C = RHS.getOperand(0);
if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
D = RHS.getOperand(1);
- cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(RMask);
+ ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
+ std::copy(Mask.begin(), Mask.end(), RMask.begin());
} else {
if (RHS.getOpcode() != ISD::UNDEF)
C = RHS;
- for (unsigned i = 0; i != N; ++i)
+ for (unsigned i = 0; i != NumElts; ++i)
RMask[i] = i;
}
@@ -13884,30 +14644,28 @@ static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool isCommutative) {
// If A and B occur in reverse order in RHS, then "swap" them (which means
// rewriting the mask).
if (A != C)
- for (unsigned i = 0; i != N; ++i) {
- unsigned Idx = RMask[i];
- if (Idx < N)
- RMask[i] += N;
- else if (Idx < 2*N)
- RMask[i] -= N;
- }
+ CommuteVectorShuffleMask(RMask, NumElts);
// At this point LHS and RHS are equivalent to
// LHS = VECTOR_SHUFFLE A, B, LMask
// RHS = VECTOR_SHUFFLE A, B, RMask
// Check that the masks correspond to performing a horizontal operation.
- for (unsigned i = 0; i != N; ++i) {
- unsigned LIdx = LMask[i], RIdx = RMask[i];
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int LIdx = LMask[i], RIdx = RMask[i];
// Ignore any UNDEF components.
- if (LIdx >= 2*N || RIdx >= 2*N || (!A.getNode() && (LIdx < N || RIdx < N))
- || (!B.getNode() && (LIdx >= N || RIdx >= N)))
+ if (LIdx < 0 || RIdx < 0 ||
+ (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
+ (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
continue;
// Check that successive elements are being operated on. If not, this is
// not a horizontal operation.
- if (!(LIdx == 2*i && RIdx == 2*i + 1) &&
- !(isCommutative && LIdx == 2*i + 1 && RIdx == 2*i))
+ unsigned Src = (i/HalfLaneElts) % 2; // each lane is split between srcs
+ unsigned LaneStart = (i/NumLaneElts) * NumLaneElts;
+ int Index = 2*(i%HalfLaneElts) + NumElts*Src + LaneStart;
+ if (!(LIdx == Index && RIdx == Index + 1) &&
+ !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
return false;
}
@@ -13924,8 +14682,8 @@ static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
SDValue RHS = N->getOperand(1);
// Try to synthesize horizontal adds from adds of shuffles.
- if ((Subtarget->hasSSE3() || Subtarget->hasAVX()) &&
- (VT == MVT::v4f32 || VT == MVT::v2f64) &&
+ if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, true))
return DAG.getNode(X86ISD::FHADD, N->getDebugLoc(), VT, LHS, RHS);
return SDValue();
@@ -13939,8 +14697,8 @@ static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
SDValue RHS = N->getOperand(1);
// Try to synthesize horizontal subs from subs of shuffles.
- if ((Subtarget->hasSSE3() || Subtarget->hasAVX()) &&
- (VT == MVT::v4f32 || VT == MVT::v2f64) &&
+ if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
+ (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, false))
return DAG.getNode(X86ISD::FHSUB, N->getDebugLoc(), VT, LHS, RHS);
return SDValue();
@@ -14006,7 +14764,58 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget *Subtarget) {
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ if (!Subtarget->hasAVX())
+ return SDValue();
+
+ // Optimize vectors in AVX mode
+ // Sign extend v8i16 to v8i32 and
+ // v4i32 to v4i64
+ //
+ // Divide input vector into two parts
+ // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
+ // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
+ // concat the vectors to original VT
+
+ EVT VT = N->getValueType(0);
+ SDValue Op = N->getOperand(0);
+ EVT OpVT = Op.getValueType();
+ DebugLoc dl = N->getDebugLoc();
+
+ if ((VT == MVT::v4i64 && OpVT == MVT::v4i32) ||
+ (VT == MVT::v8i32 && OpVT == MVT::v8i16)) {
+
+ unsigned NumElems = OpVT.getVectorNumElements();
+ SmallVector<int,8> ShufMask1(NumElems, -1);
+ for (unsigned i = 0; i < NumElems/2; i++) ShufMask1[i] = i;
+
+ SDValue OpLo = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT),
+ ShufMask1.data());
+
+ SmallVector<int,8> ShufMask2(NumElems, -1);
+ for (unsigned i = 0; i < NumElems/2; i++) ShufMask2[i] = i + NumElems/2;
+
+ SDValue OpHi = DAG.getVectorShuffle(OpVT, dl, Op, DAG.getUNDEF(OpVT),
+ ShufMask2.data());
+
+ EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
+ VT.getVectorNumElements()/2);
+
+ OpLo = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpLo);
+ OpHi = DAG.getNode(X86ISD::VSEXT_MOVL, dl, HalfVT, OpHi);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
+ }
+ return SDValue();
+}
+
+static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
@@ -14014,6 +14823,8 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
DebugLoc dl = N->getDebugLoc();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ EVT OpVT = N0.getValueType();
+
if (N0.getOpcode() == ISD::AND &&
N0.hasOneUse() &&
N0.getOperand(0).hasOneUse()) {
@@ -14028,6 +14839,37 @@ static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, VT));
}
+ // Optimize vectors in AVX mode:
+ //
+ // v8i16 -> v8i32
+ // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
+ // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
+ // Concat upper and lower parts.
+ //
+ // v4i32 -> v4i64
+ // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
+ // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
+ // Concat upper and lower parts.
+ //
+ if (Subtarget->hasAVX()) {
+
+ if (((VT == MVT::v8i32) && (OpVT == MVT::v8i16)) ||
+ ((VT == MVT::v4i64) && (OpVT == MVT::v4i32))) {
+
+ SDValue ZeroVec = getZeroVector(OpVT, Subtarget, DAG, dl);
+ SDValue OpLo = getTargetShuffleNode(X86ISD::UNPCKL, dl, OpVT, N0, ZeroVec, DAG);
+ SDValue OpHi = getTargetShuffleNode(X86ISD::UNPCKH, dl, OpVT, N0, ZeroVec, DAG);
+
+ EVT HVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
+ VT.getVectorNumElements()/2);
+
+ OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
+ OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
+
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
+ }
+ }
+
return SDValue();
}
@@ -14136,7 +14978,24 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
}
-static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) {
+/// PerformADDCombine - Do target-specific dag combines on integer adds.
+static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ EVT VT = N->getValueType(0);
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ // Try to synthesize horizontal adds from adds of shuffles.
+ if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ isHorizontalBinOp(Op0, Op1, true))
+ return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1);
+
+ return OptimizeConditionalInDecrement(N, DAG);
+}
+
+static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
@@ -14158,6 +15017,13 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) {
}
}
+ // Try to synthesize horizontal adds from adds of shuffles.
+ EVT VT = N->getValueType(0);
+ if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
+ (Subtarget->hasAVX2() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
+ isHorizontalBinOp(Op0, Op1, true))
+ return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1);
+
return OptimizeConditionalInDecrement(N, DAG);
}
@@ -14167,19 +15033,20 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
switch (N->getOpcode()) {
default: break;
case ISD::EXTRACT_VECTOR_ELT:
- return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
+ return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
case ISD::VSELECT:
- case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
+ case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
- case ISD::ADD: return OptimizeConditionalInDecrement(N, DAG);
- case ISD::SUB: return PerformSubCombine(N, DAG);
+ case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
+ case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
case ISD::SHL:
case ISD::SRA:
- case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
+ case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
+ case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
case ISD::LOAD: return PerformLOADCombine(N, DAG, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
@@ -14190,27 +15057,14 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
- case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
+ case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, Subtarget);
+ case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
+ case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI);
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
- case X86ISD::SHUFPS: // Handle all target specific shuffles
- case X86ISD::SHUFPD:
+ case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::PALIGN:
- case X86ISD::PUNPCKHBW:
- case X86ISD::PUNPCKHWD:
- case X86ISD::PUNPCKHDQ:
- case X86ISD::PUNPCKHQDQ:
- case X86ISD::UNPCKHPS:
- case X86ISD::UNPCKHPD:
- case X86ISD::VUNPCKHPSY:
- case X86ISD::VUNPCKHPDY:
- case X86ISD::PUNPCKLBW:
- case X86ISD::PUNPCKLWD:
- case X86ISD::PUNPCKLDQ:
- case X86ISD::PUNPCKLQDQ:
- case X86ISD::UNPCKLPS:
- case X86ISD::UNPCKLPD:
- case X86ISD::VUNPCKLPSY:
- case X86ISD::VUNPCKLPDY:
+ case X86ISD::UNPCKH:
+ case X86ISD::UNPCKL:
case X86ISD::MOVHLPS:
case X86ISD::MOVLHPS:
case X86ISD::PSHUFD:
@@ -14218,11 +15072,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::PSHUFLW:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
- case X86ISD::VPERMILPS:
- case X86ISD::VPERMILPSY:
- case X86ISD::VPERMILPD:
- case X86ISD::VPERMILPDY:
- case X86ISD::VPERM2F128:
+ case X86ISD::VPERMILP:
+ case X86ISD::VPERM2X128:
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
}
@@ -14330,11 +15181,38 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
+namespace {
+ // Helper to match a string separated by whitespace.
+ bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
+ s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
+
+ for (unsigned i = 0, e = args.size(); i != e; ++i) {
+ StringRef piece(*args[i]);
+ if (!s.startswith(piece)) // Check if the piece matches.
+ return false;
+
+ s = s.substr(piece.size());
+ StringRef::size_type pos = s.find_first_not_of(" \t");
+ if (pos == 0) // We matched a prefix.
+ return false;
+
+ s = s.substr(pos);
+ }
+
+ return s.empty();
+ }
+ const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
+}
+
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
std::string AsmStr = IA->getAsmString();
+ IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
+ if (!Ty || Ty->getBitWidth() % 16 != 0)
+ return false;
+
// TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
SmallVector<StringRef, 4> AsmPieces;
SplitString(AsmStr, AsmPieces, ";\n");
@@ -14342,35 +15220,27 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
switch (AsmPieces.size()) {
default: return false;
case 1:
- AsmStr = AsmPieces[0];
- AsmPieces.clear();
- SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace.
-
// FIXME: this should verify that we are targeting a 486 or better. If not,
- // we will turn this bswap into something that will be lowered to logical ops
- // instead of emitting the bswap asm. For now, we don't support 486 or lower
- // so don't worry about this.
+ // we will turn this bswap into something that will be lowered to logical
+ // ops instead of emitting the bswap asm. For now, we don't support 486 or
+ // lower so don't worry about this.
// bswap $0
- if (AsmPieces.size() == 2 &&
- (AsmPieces[0] == "bswap" ||
- AsmPieces[0] == "bswapq" ||
- AsmPieces[0] == "bswapl") &&
- (AsmPieces[1] == "$0" ||
- AsmPieces[1] == "${0:q}")) {
+ if (matchAsm(AsmPieces[0], "bswap", "$0") ||
+ matchAsm(AsmPieces[0], "bswapl", "$0") ||
+ matchAsm(AsmPieces[0], "bswapq", "$0") ||
+ matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
+ matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
+ matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
// No need to check constraints, nothing other than the equivalent of
// "=r,0" would be valid here.
- IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
- if (!Ty || Ty->getBitWidth() % 16 != 0)
- return false;
return IntrinsicLowering::LowerToByteSwap(CI);
}
+
// rorw $$8, ${0:w} --> llvm.bswap.i16
if (CI->getType()->isIntegerTy(16) &&
- AsmPieces.size() == 3 &&
- (AsmPieces[0] == "rorw" || AsmPieces[0] == "rolw") &&
- AsmPieces[1] == "$$8," &&
- AsmPieces[2] == "${0:w}" &&
- IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
+ IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
+ (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
+ matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
AsmPieces.clear();
const std::string &ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
@@ -14379,46 +15249,26 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[0] == "~{cc}" &&
AsmPieces[1] == "~{dirflag}" &&
AsmPieces[2] == "~{flags}" &&
- AsmPieces[3] == "~{fpsr}") {
- IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
- if (!Ty || Ty->getBitWidth() % 16 != 0)
- return false;
- return IntrinsicLowering::LowerToByteSwap(CI);
- }
+ AsmPieces[3] == "~{fpsr}")
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
break;
case 3:
if (CI->getType()->isIntegerTy(32) &&
- IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
- SmallVector<StringRef, 4> Words;
- SplitString(AsmPieces[0], Words, " \t,");
- if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
- Words[2] == "${0:w}") {
- Words.clear();
- SplitString(AsmPieces[1], Words, " \t,");
- if (Words.size() == 3 && Words[0] == "rorl" && Words[1] == "$$16" &&
- Words[2] == "$0") {
- Words.clear();
- SplitString(AsmPieces[2], Words, " \t,");
- if (Words.size() == 3 && Words[0] == "rorw" && Words[1] == "$$8" &&
- Words[2] == "${0:w}") {
- AsmPieces.clear();
- const std::string &ConstraintsStr = IA->getConstraintString();
- SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
- std::sort(AsmPieces.begin(), AsmPieces.end());
- if (AsmPieces.size() == 4 &&
- AsmPieces[0] == "~{cc}" &&
- AsmPieces[1] == "~{dirflag}" &&
- AsmPieces[2] == "~{flags}" &&
- AsmPieces[3] == "~{fpsr}") {
- IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
- if (!Ty || Ty->getBitWidth() % 16 != 0)
- return false;
- return IntrinsicLowering::LowerToByteSwap(CI);
- }
- }
- }
- }
+ IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
+ matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
+ matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
+ matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
+ AsmPieces.clear();
+ const std::string &ConstraintsStr = IA->getConstraintString();
+ SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
+ std::sort(AsmPieces.begin(), AsmPieces.end());
+ if (AsmPieces.size() == 4 &&
+ AsmPieces[0] == "~{cc}" &&
+ AsmPieces[1] == "~{dirflag}" &&
+ AsmPieces[2] == "~{flags}" &&
+ AsmPieces[3] == "~{fpsr}")
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
if (CI->getType()->isIntegerTy(64)) {
@@ -14427,23 +15277,10 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
// bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
- SmallVector<StringRef, 4> Words;
- SplitString(AsmPieces[0], Words, " \t");
- if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%eax") {
- Words.clear();
- SplitString(AsmPieces[1], Words, " \t");
- if (Words.size() == 2 && Words[0] == "bswap" && Words[1] == "%edx") {
- Words.clear();
- SplitString(AsmPieces[2], Words, " \t,");
- if (Words.size() == 3 && Words[0] == "xchgl" && Words[1] == "%eax" &&
- Words[2] == "%edx") {
- IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
- if (!Ty || Ty->getBitWidth() % 16 != 0)
- return false;
- return IntrinsicLowering::LowerToByteSwap(CI);
- }
- }
- }
+ if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
+ matchAsm(AsmPieces[1], "bswap", "%edx") &&
+ matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
+ return IntrinsicLowering::LowerToByteSwap(CI);
}
}
break;
@@ -14538,7 +15375,8 @@ TargetLowering::ConstraintWeight
break;
case 'x':
case 'Y':
- if ((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasXMM())
+ if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
+ ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasAVX()))
weight = CW_Register;
break;
case 'I':
@@ -14608,9 +15446,9 @@ LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
- if (Subtarget->hasXMMInt())
+ if (Subtarget->hasSSE2())
return "Y";
- if (Subtarget->hasXMM())
+ if (Subtarget->hasSSE1())
return "x";
}
@@ -14816,10 +15654,10 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
if (!Subtarget->hasMMX()) break;
return std::make_pair(0U, X86::VR64RegisterClass);
case 'Y': // SSE_REGS if SSE2 allowed
- if (!Subtarget->hasXMMInt()) break;
+ if (!Subtarget->hasSSE2()) break;
// FALL THROUGH.
- case 'x': // SSE_REGS if SSE1 allowed
- if (!Subtarget->hasXMM()) break;
+ case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
+ if (!Subtarget->hasSSE1()) break;
switch (VT.getSimpleVT().SimpleTy) {
default: break;
@@ -14838,6 +15676,15 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case MVT::v4f32:
case MVT::v2f64:
return std::make_pair(0U, X86::VR128RegisterClass);
+ // AVX types.
+ case MVT::v32i8:
+ case MVT::v16i16:
+ case MVT::v8i32:
+ case MVT::v4i64:
+ case MVT::v8f32:
+ case MVT::v4f64:
+ return std::make_pair(0U, X86::VR256RegisterClass);
+
}
break;
}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index 342a5e6..4e00733 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -172,12 +172,23 @@ namespace llvm {
/// ANDNP - Bitwise Logical AND NOT of Packed FP values.
ANDNP,
- /// PSIGNB/W/D - Copy integer sign.
- PSIGNB, PSIGNW, PSIGND,
+ /// PSIGN - Copy integer sign.
+ PSIGN,
- /// BLEND family of opcodes
+ /// BLENDV - Blend where the selector is an XMM.
BLENDV,
+ /// BLENDxx - Blend where the selector is an immediate.
+ BLENDPW,
+ BLENDPS,
+ BLENDPD,
+
+ /// HADD - Integer horizontal add.
+ HADD,
+
+ /// HSUB - Integer horizontal sub.
+ HSUB,
+
/// FHADD - Floating point horizontal add.
FHADD,
@@ -213,16 +224,26 @@ namespace llvm {
// VZEXT_MOVL - Vector move low and zero extend.
VZEXT_MOVL,
- // VSHL, VSRL - Vector logical left / right shift.
- VSHL, VSRL,
+ // VSEXT_MOVL - Vector move low and sign extend.
+ VSEXT_MOVL,
+
+ // VSHL, VSRL - 128-bit vector logical left / right shift
+ VSHLDQ, VSRLDQ,
+
+ // VSHL, VSRL, VSRA - Vector shift elements
+ VSHL, VSRL, VSRA,
- // CMPPD, CMPPS - Vector double/float comparison.
- // CMPPD, CMPPS - Vector double/float comparison.
- CMPPD, CMPPS,
+ // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate
+ VSHLI, VSRLI, VSRAI,
+
+ // CMPP - Vector packed double/float comparison.
+ CMPP,
// PCMP* - Vector integer comparisons.
- PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
- PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
+ PCMPEQ, PCMPGT,
+
+ // VPCOM, VPCOMU - XOP Vector integer comparisons.
+ VPCOM, VPCOMU,
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
@@ -230,6 +251,10 @@ namespace llvm {
ANDN, // ANDN - Bitwise AND NOT with FLAGS results.
+ BLSI, // BLSI - Extract lowest set isolated bit
+ BLSMSK, // BLSMSK - Get mask up to lowest set bit
+ BLSR, // BLSR - Reset lowest set bit
+
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
@@ -246,46 +271,26 @@ namespace llvm {
PSHUFD,
PSHUFHW,
PSHUFLW,
- PSHUFHW_LD,
- PSHUFLW_LD,
- SHUFPD,
- SHUFPS,
+ SHUFP,
MOVDDUP,
MOVSHDUP,
MOVSLDUP,
- MOVSHDUP_LD,
- MOVSLDUP_LD,
MOVLHPS,
MOVLHPD,
MOVHLPS,
- MOVHLPD,
MOVLPS,
MOVLPD,
MOVSD,
MOVSS,
- UNPCKLPS,
- UNPCKLPD,
- VUNPCKLPSY,
- VUNPCKLPDY,
- UNPCKHPS,
- UNPCKHPD,
- VUNPCKHPSY,
- VUNPCKHPDY,
- PUNPCKLBW,
- PUNPCKLWD,
- PUNPCKLDQ,
- PUNPCKLQDQ,
- PUNPCKHBW,
- PUNPCKHWD,
- PUNPCKHDQ,
- PUNPCKHQDQ,
- VPERMILPS,
- VPERMILPSY,
- VPERMILPD,
- VPERMILPDY,
- VPERM2F128,
+ UNPCKL,
+ UNPCKH,
+ VPERMILP,
+ VPERM2X128,
VBROADCAST,
+ // PMULUDQ - Vector multiply packed unsigned doubleword integers
+ PMULUDQ,
+
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
// according to %al. An operator is needed so that this can be expanded
// with control flow.
@@ -299,6 +304,9 @@ namespace llvm {
// falls back to heap allocation if not.
SEG_ALLOCA,
+ // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui.
+ WIN_FTOL,
+
// Memory barrier
MEMBARRIER,
MFENCE,
@@ -368,75 +376,6 @@ namespace llvm {
/// Define some predicates that are used for node matching.
namespace X86 {
- /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to PSHUFD.
- bool isPSHUFDMask(ShuffleVectorSDNode *N);
-
- /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to PSHUFD.
- bool isPSHUFHWMask(ShuffleVectorSDNode *N);
-
- /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to PSHUFD.
- bool isPSHUFLWMask(ShuffleVectorSDNode *N);
-
- /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to SHUFP*.
- bool isSHUFPMask(ShuffleVectorSDNode *N);
-
- /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
- bool isMOVHLPSMask(ShuffleVectorSDNode *N);
-
- /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
- /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
- /// <2, 3, 2, 3>
- bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
-
- /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
- bool isMOVLPMask(ShuffleVectorSDNode *N);
-
- /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
- /// as well as MOVLHPS.
- bool isMOVLHPSMask(ShuffleVectorSDNode *N);
-
- /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to UNPCKL.
- bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
-
- /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to UNPCKH.
- bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false);
-
- /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
- /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
- /// <0, 0, 1, 1>
- bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N);
-
- /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
- /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
- /// <2, 2, 3, 3>
- bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N);
-
- /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVSS,
- /// MOVSD, and MOVD, i.e. setting the lowest element.
- bool isMOVLMask(ShuffleVectorSDNode *N);
-
- /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
- bool isMOVSHDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
-
- /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
- bool isMOVSLDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
-
- /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
- bool isMOVDDUPMask(ShuffleVectorSDNode *N);
-
/// isVEXTRACTF128Index - Return true if the specified
/// EXTRACT_SUBVECTOR operand specifies a vector extract that is
/// suitable for input to VEXTRACTF128.
@@ -447,23 +386,6 @@ namespace llvm {
/// suitable for input to VINSERTF128.
bool isVINSERTF128Index(SDNode *N);
- /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
- /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
- /// instructions.
- unsigned getShuffleSHUFImmediate(SDNode *N);
-
- /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
- /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
- unsigned getShufflePSHUFHWImmediate(SDNode *N);
-
- /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
- /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
- unsigned getShufflePSHUFLWImmediate(SDNode *N);
-
- /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle
- /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction.
- unsigned getShufflePALIGNRImmediate(SDNode *N);
-
/// getExtractVEXTRACTF128Immediate - Return the appropriate
/// immediate to extract the specified EXTRACT_SUBVECTOR index
/// with VEXTRACTF128 instructions.
@@ -529,7 +451,7 @@ namespace llvm {
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If
- /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// 'IsZeroVal' is true, that means it's safe to return a
/// non-scalar-integer type, e.g. empty string source, constant, or loaded
/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
/// constant so it does not need to be loaded.
@@ -537,7 +459,7 @@ namespace llvm {
/// target-independent logic.
virtual EVT
getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
- bool NonScalarIntSafe, bool MemcpyStrSrc,
+ bool IsZeroVal, bool MemcpyStrSrc,
MachineFunction &MF) const;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
@@ -587,7 +509,6 @@ namespace llvm {
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -697,6 +618,18 @@ namespace llvm {
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
+ /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine
+ /// for fptoui.
+ bool isTargetFTOL() const {
+ return Subtarget->isTargetWindows() && !Subtarget->is64Bit();
+ }
+
+ /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be
+ /// used for fptoui to the given type.
+ bool isIntegerTypeFTOL(EVT VT) const {
+ return isTargetFTOL() && VT == MVT::i64;
+ }
+
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
@@ -779,7 +712,8 @@ namespace llvm {
SelectionDAG &DAG) const;
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
- bool isSigned) const;
+ bool isSigned,
+ bool isReplace) const;
SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
SelectionDAG &DAG) const;
@@ -833,6 +767,7 @@ namespace llvm {
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const;
@@ -846,9 +781,12 @@ namespace llvm {
SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue PerformTruncateCombine(SDNode* N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const;
// Utility functions to help LowerVECTOR_SHUFFLE
SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const;
+ SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
@@ -857,8 +795,8 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -872,7 +810,7 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
- virtual bool isUsedByReturnOnly(SDNode *N) const;
+ virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
@@ -916,7 +854,7 @@ namespace llvm {
unsigned cxchgOpc,
unsigned notOpc,
unsigned EAXreg,
- TargetRegisterClass *RC,
+ const TargetRegisterClass *RC,
bool invSrc = false) const;
MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
diff --git a/contrib/llvm/lib/Target/X86/X86Instr3DNow.td b/contrib/llvm/lib/Target/X86/X86Instr3DNow.td
index dd4f6a5..54b91c3 100644
--- a/contrib/llvm/lib/Target/X86/X86Instr3DNow.td
+++ b/contrib/llvm/lib/Target/X86/X86Instr3DNow.td
@@ -1,4 +1,4 @@
-//====- X86Instr3DNow.td - The 3DNow! Instruction Set ------*- tablegen -*-===//
+//===-- X86Instr3DNow.td - The 3DNow! Instruction Set ------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
index 74b647a..0eee083 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -1,10 +1,10 @@
-//===- X86InstrArithmetic.td - Integer Arithmetic Instrs ---*- tablegen -*-===//
-//
+//===-- X86InstrArithmetic.td - Integer Arithmetic Instrs --*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the integer arithmetic instructions in the X86
@@ -18,22 +18,24 @@
let neverHasSideEffects = 1 in
def LEA16r : I<0x8D, MRMSrcMem,
(outs GR16:$dst), (ins i32mem:$src),
- "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
+ "lea{w}\t{$src|$dst}, {$dst|$src}", [], IIC_LEA_16>, OpSize;
let isReMaterializable = 1 in
def LEA32r : I<0x8D, MRMSrcMem,
(outs GR32:$dst), (ins i32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
- [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
+ [(set GR32:$dst, lea32addr:$src)], IIC_LEA>,
+ Requires<[In32BitMode]>;
def LEA64_32r : I<0x8D, MRMSrcMem,
(outs GR32:$dst), (ins lea64_32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
- [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
+ [(set GR32:$dst, lea32addr:$src)], IIC_LEA>,
+ Requires<[In64BitMode]>;
let isReMaterializable = 1 in
def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lea{q}\t{$src|$dst}, {$dst|$src}",
- [(set GR64:$dst, lea64addr:$src)]>;
+ [(set GR64:$dst, lea64addr:$src)], IIC_LEA>;
@@ -51,21 +53,23 @@ def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
[(set AL, (mul AL, GR8:$src)),
- (implicit EFLAGS)]>; // AL,AH = AL*GR8
+ (implicit EFLAGS)], IIC_MUL8>; // AL,AH = AL*GR8
let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src),
"mul{w}\t$src",
- []>, OpSize; // AX,DX = AX*GR16
+ [], IIC_MUL16_REG>, OpSize; // AX,DX = AX*GR16
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src),
"mul{l}\t$src", // EAX,EDX = EAX*GR32
- [/*(set EAX, EDX, EFLAGS, (X86umul_flag EAX, GR32:$src))*/]>;
+ [/*(set EAX, EDX, EFLAGS, (X86umul_flag EAX, GR32:$src))*/],
+ IIC_MUL32_REG>;
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src),
"mul{q}\t$src", // RAX,RDX = RAX*GR64
- [/*(set RAX, RDX, EFLAGS, (X86umul_flag RAX, GR64:$src))*/]>;
+ [/*(set RAX, RDX, EFLAGS, (X86umul_flag RAX, GR64:$src))*/],
+ IIC_MUL64>;
let Defs = [AL,EFLAGS,AX], Uses = [AL] in
def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
@@ -74,50 +78,51 @@ def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src),
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
[(set AL, (mul AL, (loadi8 addr:$src))),
- (implicit EFLAGS)]>; // AL,AH = AL*[mem8]
+ (implicit EFLAGS)], IIC_MUL8>; // AL,AH = AL*[mem8]
let mayLoad = 1, neverHasSideEffects = 1 in {
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src),
"mul{w}\t$src",
- []>, OpSize; // AX,DX = AX*[mem16]
+ [], IIC_MUL16_MEM>, OpSize; // AX,DX = AX*[mem16]
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src),
"mul{l}\t$src",
- []>; // EAX,EDX = EAX*[mem32]
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+ [], IIC_MUL32_MEM>; // EAX,EDX = EAX*[mem32]
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src),
- "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
+ "mul{q}\t$src", [], IIC_MUL64>; // RAX,RDX = RAX*[mem64]
}
let neverHasSideEffects = 1 in {
let Defs = [AL,EFLAGS,AX], Uses = [AL] in
-def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>;
- // AL,AH = AL*GR8
+def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", [],
+ IIC_IMUL8>; // AL,AH = AL*GR8
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
-def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>,
- OpSize; // AX,DX = AX*GR16
+def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", [],
+ IIC_IMUL16_RR>, OpSize; // AX,DX = AX*GR16
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
-def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>;
- // EAX,EDX = EAX*GR32
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
-def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src), "imul{q}\t$src", []>;
- // RAX,RDX = RAX*GR64
+def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", [],
+ IIC_IMUL32_RR>; // EAX,EDX = EAX*GR32
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
+def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src), "imul{q}\t$src", [],
+ IIC_IMUL64_RR>; // RAX,RDX = RAX*GR64
let mayLoad = 1 in {
let Defs = [AL,EFLAGS,AX], Uses = [AL] in
def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src),
- "imul{b}\t$src", []>; // AL,AH = AL*[mem8]
+ "imul{b}\t$src", [], IIC_IMUL8>; // AL,AH = AL*[mem8]
let Defs = [AX,DX,EFLAGS], Uses = [AX] in
def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src),
- "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16]
+ "imul{w}\t$src", [], IIC_IMUL16_MEM>, OpSize;
+ // AX,DX = AX*[mem16]
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in
def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src),
- "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32]
-let Defs = [RAX,RDX,EFLAGS], Uses = [RAX], neverHasSideEffects = 1 in
+ "imul{l}\t$src", [], IIC_IMUL32_MEM>; // EAX,EDX = EAX*[mem32]
+let Defs = [RAX,RDX,EFLAGS], Uses = [RAX] in
def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
- "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64]
+ "imul{q}\t$src", [], IIC_IMUL64>; // RAX,RDX = RAX*[mem64]
}
} // neverHasSideEffects
@@ -130,16 +135,19 @@ let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, GR16:$src2))]>, TB, OpSize;
+ (X86smul_flag GR16:$src1, GR16:$src2))], IIC_IMUL16_RR>,
+ TB, OpSize;
def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, GR32:$src2))]>, TB;
+ (X86smul_flag GR32:$src1, GR32:$src2))], IIC_IMUL32_RR>,
+ TB;
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
+ (X86smul_flag GR64:$src1, GR64:$src2))], IIC_IMUL64_RR>,
+ TB;
}
// Register-Memory Signed Integer Multiply
@@ -147,18 +155,23 @@ def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, (load addr:$src2)))]>,
+ (X86smul_flag GR16:$src1, (load addr:$src2)))],
+ IIC_IMUL16_RM>,
TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, (load addr:$src2)))]>, TB;
+ (X86smul_flag GR32:$src1, (load addr:$src2)))],
+ IIC_IMUL32_RM>,
+ TB;
def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
+ (X86smul_flag GR64:$src1, (load addr:$src2)))],
+ IIC_IMUL64_RM>,
+ TB;
} // Constraints = "$src1 = $dst"
} // Defs = [EFLAGS]
@@ -170,33 +183,39 @@ def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, imm:$src2))]>, OpSize;
+ (X86smul_flag GR16:$src1, imm:$src2))],
+ IIC_IMUL16_RRI>, OpSize;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
- (X86smul_flag GR16:$src1, i16immSExt8:$src2))]>,
+ (X86smul_flag GR16:$src1, i16immSExt8:$src2))],
+ IIC_IMUL16_RRI>,
OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, imm:$src2))]>;
+ (X86smul_flag GR32:$src1, imm:$src2))],
+ IIC_IMUL32_RRI>;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
- (X86smul_flag GR32:$src1, i32immSExt8:$src2))]>;
+ (X86smul_flag GR32:$src1, i32immSExt8:$src2))],
+ IIC_IMUL32_RRI>;
def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
+ (X86smul_flag GR64:$src1, i64immSExt32:$src2))],
+ IIC_IMUL64_RRI>;
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, EFLAGS,
- (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
+ (X86smul_flag GR64:$src1, i64immSExt8:$src2))],
+ IIC_IMUL64_RRI>;
// Memory-Integer Signed Integer Multiply
@@ -204,37 +223,43 @@ def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1), imm:$src2))]>,
+ (X86smul_flag (load addr:$src1), imm:$src2))],
+ IIC_IMUL16_RMI>,
OpSize;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR16:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
- i16immSExt8:$src2))]>, OpSize;
+ i16immSExt8:$src2))], IIC_IMUL16_RMI>,
+ OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
- (X86smul_flag (load addr:$src1), imm:$src2))]>;
+ (X86smul_flag (load addr:$src1), imm:$src2))],
+ IIC_IMUL32_RMI>;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
- i32immSExt8:$src2))]>;
+ i32immSExt8:$src2))],
+ IIC_IMUL32_RMI>;
def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
- i64immSExt32:$src2))]>;
+ i64immSExt32:$src2))],
+ IIC_IMUL64_RMI>;
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR64:$dst, EFLAGS,
(X86smul_flag (load addr:$src1),
- i64immSExt8:$src2))]>;
+ i64immSExt8:$src2))],
+ IIC_IMUL64_RMI>;
} // Defs = [EFLAGS]
@@ -243,62 +268,62 @@ def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
// unsigned division/remainder
let Defs = [AL,EFLAGS,AX], Uses = [AX] in
def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
- "div{b}\t$src", []>;
+ "div{b}\t$src", [], IIC_DIV8_REG>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "div{w}\t$src", []>, OpSize;
+ "div{w}\t$src", [], IIC_DIV16>, OpSize;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "div{l}\t$src", []>;
+ "div{l}\t$src", [], IIC_DIV32>;
// RDX:RAX/r64 = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src),
- "div{q}\t$src", []>;
+ "div{q}\t$src", [], IIC_DIV64>;
let mayLoad = 1 in {
let Defs = [AL,EFLAGS,AX], Uses = [AX] in
def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
- "div{b}\t$src", []>;
+ "div{b}\t$src", [], IIC_DIV8_MEM>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "div{w}\t$src", []>, OpSize;
+ "div{w}\t$src", [], IIC_DIV16>, OpSize;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src),
- "div{l}\t$src", []>;
+ "div{l}\t$src", [], IIC_DIV32>;
// RDX:RAX/[mem64] = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src),
- "div{q}\t$src", []>;
+ "div{q}\t$src", [], IIC_DIV64>;
}
// Signed division/remainder.
let Defs = [AL,EFLAGS,AX], Uses = [AX] in
def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH
- "idiv{b}\t$src", []>;
+ "idiv{b}\t$src", [], IIC_IDIV8>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX
- "idiv{w}\t$src", []>, OpSize;
+ "idiv{w}\t$src", [], IIC_IDIV16>, OpSize;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in
def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX
- "idiv{l}\t$src", []>;
+ "idiv{l}\t$src", [], IIC_IDIV32>;
// RDX:RAX/r64 = RAX,RDX
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in
def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src),
- "idiv{q}\t$src", []>;
-
-let mayLoad = 1, mayLoad = 1 in {
+ "idiv{q}\t$src", [], IIC_IDIV64>;
+
+let mayLoad = 1 in {
let Defs = [AL,EFLAGS,AX], Uses = [AX] in
def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH
- "idiv{b}\t$src", []>;
+ "idiv{b}\t$src", [], IIC_IDIV8>;
let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in
def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX
- "idiv{w}\t$src", []>, OpSize;
+ "idiv{w}\t$src", [], IIC_IDIV16>, OpSize;
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in // EDX:EAX/[mem32] = EAX,EDX
def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
- "idiv{l}\t$src", []>;
+ "idiv{l}\t$src", [], IIC_IDIV32>;
let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in // RDX:RAX/[mem64] = RAX,RDX
def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
- "idiv{q}\t$src", []>;
+ "idiv{q}\t$src", [], IIC_IDIV64>;
}
//===----------------------------------------------------------------------===//
@@ -312,35 +337,35 @@ let Constraints = "$src1 = $dst" in {
def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
"neg{b}\t$dst",
[(set GR8:$dst, (ineg GR8:$src1)),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_REG>;
def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"neg{w}\t$dst",
[(set GR16:$dst, (ineg GR16:$src1)),
- (implicit EFLAGS)]>, OpSize;
+ (implicit EFLAGS)], IIC_UNARY_REG>, OpSize;
def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"neg{l}\t$dst",
[(set GR32:$dst, (ineg GR32:$src1)),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_REG>;
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src1), "neg{q}\t$dst",
[(set GR64:$dst, (ineg GR64:$src1)),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_REG>;
} // Constraints = "$src1 = $dst"
def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
"neg{b}\t$dst",
[(store (ineg (loadi8 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
"neg{w}\t$dst",
[(store (ineg (loadi16 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>, OpSize;
+ (implicit EFLAGS)], IIC_UNARY_MEM>, OpSize;
def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
"neg{l}\t$dst",
[(store (ineg (loadi32 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
[(store (ineg (loadi64 addr:$dst)), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
} // Defs = [EFLAGS]
@@ -351,29 +376,30 @@ let Constraints = "$src1 = $dst" in {
let AddedComplexity = 15 in {
def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
"not{b}\t$dst",
- [(set GR8:$dst, (not GR8:$src1))]>;
+ [(set GR8:$dst, (not GR8:$src1))], IIC_UNARY_REG>;
def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"not{w}\t$dst",
- [(set GR16:$dst, (not GR16:$src1))]>, OpSize;
+ [(set GR16:$dst, (not GR16:$src1))], IIC_UNARY_REG>, OpSize;
def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"not{l}\t$dst",
- [(set GR32:$dst, (not GR32:$src1))]>;
+ [(set GR32:$dst, (not GR32:$src1))], IIC_UNARY_REG>;
def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src1), "not{q}\t$dst",
- [(set GR64:$dst, (not GR64:$src1))]>;
+ [(set GR64:$dst, (not GR64:$src1))], IIC_UNARY_REG>;
}
} // Constraints = "$src1 = $dst"
def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
"not{b}\t$dst",
- [(store (not (loadi8 addr:$dst)), addr:$dst)]>;
+ [(store (not (loadi8 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>;
def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
"not{w}\t$dst",
- [(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
+ [(store (not (loadi16 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>,
+ OpSize;
def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
"not{l}\t$dst",
- [(store (not (loadi32 addr:$dst)), addr:$dst)]>;
+ [(store (not (loadi32 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>;
def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
- [(store (not (loadi64 addr:$dst)), addr:$dst)]>;
+ [(store (not (loadi64 addr:$dst)), addr:$dst)], IIC_UNARY_MEM>;
} // CodeSize
// TODO: inc/dec is slow for P4, but fast for Pentium-M.
@@ -382,19 +408,22 @@ let Constraints = "$src1 = $dst" in {
let CodeSize = 2 in
def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"inc{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
+ [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))],
+ IIC_UNARY_REG>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))], IIC_UNARY_REG>,
OpSize, Requires<[In32BitMode]>;
def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
+ IIC_UNARY_REG>,
Requires<[In32BitMode]>;
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src1), "inc{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))]>;
+ [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src1))],
+ IIC_UNARY_REG>;
} // isConvertibleToThreeAddress = 1, CodeSize = 1
@@ -403,19 +432,23 @@ let isConvertibleToThreeAddress = 1, CodeSize = 2 in {
// Can transform into LEA.
def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))],
+ IIC_UNARY_REG>,
OpSize, Requires<[In64BitMode]>;
def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))],
+ IIC_UNARY_REG>,
Requires<[In64BitMode]>;
def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
+ IIC_UNARY_REG>,
OpSize, Requires<[In64BitMode]>;
def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
+ IIC_UNARY_REG>,
Requires<[In64BitMode]>;
} // isConvertibleToThreeAddress = 1, CodeSize = 2
@@ -424,37 +457,37 @@ def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
let CodeSize = 2 in {
def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
[(store (add (loadi8 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
[(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
OpSize, Requires<[In32BitMode]>;
def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
Requires<[In32BitMode]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
// how to unfold them.
// FIXME: What is this for??
def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
[(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
OpSize, Requires<[In64BitMode]>;
def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
Requires<[In64BitMode]>;
def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
[(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
OpSize, Requires<[In64BitMode]>;
def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
Requires<[In64BitMode]>;
} // CodeSize = 2
@@ -462,18 +495,22 @@ let Constraints = "$src1 = $dst" in {
let CodeSize = 2 in
def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"dec{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
+ [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))],
+ IIC_UNARY_REG>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))],
+ IIC_UNARY_REG>,
OpSize, Requires<[In32BitMode]>;
def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))],
+ IIC_UNARY_REG>,
Requires<[In32BitMode]>;
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))]>;
+ [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src1))],
+ IIC_UNARY_REG>;
} // CodeSize = 2
} // Constraints = "$src1 = $dst"
@@ -481,18 +518,18 @@ def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src1), "dec{q}\t$dst",
let CodeSize = 2 in {
def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
[(store (add (loadi8 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
[(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
OpSize, Requires<[In32BitMode]>;
def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
+ (implicit EFLAGS)], IIC_UNARY_MEM>,
Requires<[In32BitMode]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
[(store (add (loadi64 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>;
+ (implicit EFLAGS)], IIC_UNARY_MEM>;
} // CodeSize = 2
} // Defs = [EFLAGS]
@@ -588,11 +625,13 @@ def Xi64 : X86TypeInfo<i64, "q", GR64, loadi64, i64mem,
/// 4. Infers whether the low bit of the opcode should be 0 (for i8 operations)
/// or 1 (for i16,i32,i64 operations).
class ITy<bits<8> opcode, Format f, X86TypeInfo typeinfo, dag outs, dag ins,
- string mnemonic, string args, list<dag> pattern>
+ string mnemonic, string args, list<dag> pattern,
+ InstrItinClass itin = IIC_BIN_NONMEM>
: I<{opcode{7}, opcode{6}, opcode{5}, opcode{4},
opcode{3}, opcode{2}, opcode{1}, typeinfo.HasOddOpcode },
f, outs, ins,
- !strconcat(mnemonic, "{", typeinfo.InstrSuffix, "}\t", args), pattern> {
+ !strconcat(mnemonic, "{", typeinfo.InstrSuffix, "}\t", args), pattern,
+ itin> {
// Infer instruction prefixes from type info.
let hasOpSizePrefix = typeinfo.HasOpSizePrefix;
@@ -601,10 +640,11 @@ class ITy<bits<8> opcode, Format f, X86TypeInfo typeinfo, dag outs, dag ins,
// BinOpRR - Instructions like "add reg, reg, reg".
class BinOpRR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
- dag outlist, list<dag> pattern, Format f = MRMDestReg>
+ dag outlist, list<dag> pattern, InstrItinClass itin,
+ Format f = MRMDestReg>
: ITy<opcode, f, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern>;
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, itin>;
// BinOpRR_R - Instructions like "add reg, reg, reg", where the pattern has
// just a regclass (no eflags) as a result.
@@ -612,7 +652,8 @@ class BinOpRR_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
SDNode opnode>
: BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst,
- (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))]>;
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))],
+ IIC_BIN_NONMEM>;
// BinOpRR_F - Instructions like "cmp reg, Reg", where the pattern has
// just a EFLAGS as a result.
@@ -621,7 +662,7 @@ class BinOpRR_F<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRR<opcode, mnemonic, typeinfo, (outs),
[(set EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))],
- f>;
+ IIC_BIN_NONMEM, f>;
// BinOpRR_RF - Instructions like "add reg, reg, reg", where the pattern has
// both a regclass and EFLAGS as a result.
@@ -629,7 +670,8 @@ class BinOpRR_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
SDNode opnode>
: BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
- (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))]>;
+ (opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2))],
+ IIC_BIN_NONMEM>;
// BinOpRR_RFF - Instructions like "adc reg, reg, reg", where the pattern has
// both a regclass and EFLAGS as a result, and has EFLAGS as input.
@@ -638,14 +680,14 @@ class BinOpRR_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRR<opcode, mnemonic, typeinfo, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.RegClass:$src2,
- EFLAGS))]>;
+ EFLAGS))], IIC_BIN_NONMEM>;
// BinOpRR_Rev - Instructions like "add reg, reg, reg" (reversed encoding).
class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
: ITy<opcode, MRMSrcReg, typeinfo,
(outs typeinfo.RegClass:$dst),
(ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
- mnemonic, "{$src2, $dst|$dst, $src2}", []> {
+ mnemonic, "{$src2, $dst|$dst, $src2}", [], IIC_BIN_NONMEM> {
// The disassembler should know about this, but not the asmparser.
let isCodeGenOnly = 1;
}
@@ -654,7 +696,7 @@ class BinOpRR_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
class BinOpRR_F_Rev<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo>
: ITy<opcode, MRMSrcReg, typeinfo, (outs),
(ins typeinfo.RegClass:$src1, typeinfo.RegClass:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", []> {
+ mnemonic, "{$src2, $src1|$src1, $src2}", [], IIC_BIN_NONMEM> {
// The disassembler should know about this, but not the asmparser.
let isCodeGenOnly = 1;
}
@@ -664,7 +706,7 @@ class BinOpRM<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
dag outlist, list<dag> pattern>
: ITy<opcode, MRMSrcMem, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.MemOperand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern>;
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM>;
// BinOpRM_R - Instructions like "add reg, reg, [mem]".
class BinOpRM_R<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
@@ -700,7 +742,7 @@ class BinOpRI<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
Format f, dag outlist, list<dag> pattern>
: ITy<opcode, f, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.ImmOperand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern> {
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM> {
let ImmT = typeinfo.ImmEncoding;
}
@@ -724,7 +766,6 @@ class BinOpRI_RF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
: BinOpRI<opcode, mnemonic, typeinfo, f, (outs typeinfo.RegClass:$dst),
[(set typeinfo.RegClass:$dst, EFLAGS,
(opnode typeinfo.RegClass:$src1, typeinfo.ImmOperator:$src2))]>;
-
// BinOpRI_RFF - Instructions like "adc reg, reg, imm".
class BinOpRI_RFF<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
SDNode opnode, Format f>
@@ -738,7 +779,7 @@ class BinOpRI8<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
Format f, dag outlist, list<dag> pattern>
: ITy<opcode, f, typeinfo, outlist,
(ins typeinfo.RegClass:$src1, typeinfo.Imm8Operand:$src2),
- mnemonic, "{$src2, $src1|$src1, $src2}", pattern> {
+ mnemonic, "{$src2, $src1|$src1, $src2}", pattern, IIC_BIN_NONMEM> {
let ImmT = Imm8; // Always 8-bit immediate.
}
@@ -776,7 +817,7 @@ class BinOpMR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
list<dag> pattern>
: ITy<opcode, MRMDestMem, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.RegClass:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern>;
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM>;
// BinOpMR_RMW - Instructions like "add [mem], reg".
class BinOpMR_RMW<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
@@ -804,7 +845,7 @@ class BinOpMI<string mnemonic, X86TypeInfo typeinfo,
Format f, list<dag> pattern, bits<8> opcode = 0x80>
: ITy<opcode, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.ImmOperand:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern> {
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM> {
let ImmT = typeinfo.ImmEncoding;
}
@@ -815,7 +856,6 @@ class BinOpMI_RMW<string mnemonic, X86TypeInfo typeinfo,
[(store (opnode (typeinfo.VT (load addr:$dst)),
typeinfo.ImmOperator:$src), addr:$dst),
(implicit EFLAGS)]>;
-
// BinOpMI_RMW_FF - Instructions like "adc [mem], imm".
class BinOpMI_RMW_FF<string mnemonic, X86TypeInfo typeinfo,
SDNode opnode, Format f>
@@ -837,7 +877,7 @@ class BinOpMI8<string mnemonic, X86TypeInfo typeinfo,
Format f, list<dag> pattern>
: ITy<0x82, f, typeinfo,
(outs), (ins typeinfo.MemOperand:$dst, typeinfo.Imm8Operand:$src),
- mnemonic, "{$src, $dst|$dst, $src}", pattern> {
+ mnemonic, "{$src, $dst|$dst, $src}", pattern, IIC_BIN_MEM> {
let ImmT = Imm8; // Always 8-bit immediate.
}
@@ -1150,7 +1190,7 @@ let Defs = [EFLAGS] in {
// register class is constrained to GR8_NOREX.
let isPseudo = 1 in
def TEST8ri_NOREX : I<0, Pseudo, (outs), (ins GR8_NOREX:$src, i8imm:$mask),
- "", []>;
+ "", [], IIC_BIN_NONMEM>;
}
//===----------------------------------------------------------------------===//
@@ -1160,14 +1200,39 @@ multiclass bmi_andn<string mnemonic, RegisterClass RC, X86MemOperand x86memop,
PatFrag ld_frag> {
def rr : I<0xF2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, EFLAGS, (X86andn_flag RC:$src1, RC:$src2))]>;
+ [(set RC:$dst, EFLAGS, (X86andn_flag RC:$src1, RC:$src2))],
+ IIC_BIN_NONMEM>;
def rm : I<0xF2, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, EFLAGS,
- (X86andn_flag RC:$src1, (ld_frag addr:$src2)))]>;
+ (X86andn_flag RC:$src1, (ld_frag addr:$src2)))], IIC_BIN_MEM>;
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32>, T8, VEX_4V;
defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64>, T8, VEX_4V, VEX_W;
}
+
+//===----------------------------------------------------------------------===//
+// MULX Instruction
+//
+multiclass bmi_mulx<string mnemonic, RegisterClass RC, X86MemOperand x86memop> {
+let neverHasSideEffects = 1 in {
+ let isCommutable = 1 in
+ def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src),
+ !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
+ [], IIC_MUL8>, T8XD, VEX_4V;
+
+ let mayLoad = 1 in
+ def rm : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src),
+ !strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
+ [], IIC_MUL8>, T8XD, VEX_4V;
+}
+}
+
+let Predicates = [HasBMI2] in {
+ let Uses = [EDX] in
+ defm MULX32 : bmi_mulx<"mulx{l}", GR32, i32mem>;
+ let Uses = [RDX] in
+ defm MULX64 : bmi_mulx<"mulx{q}", GR64, i64mem>, VEX_W;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
index 0245e5c..fa1d676 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -27,7 +27,6 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
namespace llvm {
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
index 3a43b22..adeaf54 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCMovSetCC.td
@@ -1,10 +1,10 @@
-//===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
-//
+//===-- X86InstrCMovSetCC.td - Conditional Move and SetCC --*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 conditional move and set on condition
@@ -21,17 +21,20 @@ multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
: I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
[(set GR16:$dst,
- (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,TB,OpSize;
+ (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))],
+ IIC_CMOV16_RR>,TB,OpSize;
def #NAME#32rr
: I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
[(set GR32:$dst,
- (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>, TB;
+ (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))],
+ IIC_CMOV32_RR>, TB;
def #NAME#64rr
:RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
[(set GR64:$dst,
- (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB;
+ (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))],
+ IIC_CMOV32_RR>, TB;
}
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" in {
@@ -39,17 +42,18 @@ multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
: I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
- CondNode, EFLAGS))]>, TB, OpSize;
+ CondNode, EFLAGS))], IIC_CMOV16_RM>,
+ TB, OpSize;
def #NAME#32rm
: I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
- CondNode, EFLAGS))]>, TB;
+ CondNode, EFLAGS))], IIC_CMOV32_RM>, TB;
def #NAME#64rm
:RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- CondNode, EFLAGS))]>, TB;
+ CondNode, EFLAGS))], IIC_CMOV32_RM>, TB;
} // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
} // end multiclass
@@ -78,10 +82,12 @@ multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
let Uses = [EFLAGS] in {
def r : I<opc, MRM0r, (outs GR8:$dst), (ins),
!strconcat(Mnemonic, "\t$dst"),
- [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>, TB;
+ [(set GR8:$dst, (X86setcc OpNode, EFLAGS))],
+ IIC_SET_R>, TB;
def m : I<opc, MRM0m, (outs), (ins i8mem:$dst),
!strconcat(Mnemonic, "\t$dst"),
- [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>, TB;
+ [(store (X86setcc OpNode, EFLAGS), addr:$dst)],
+ IIC_SET_M>, TB;
} // Uses = [EFLAGS]
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
index 612b2fa..6f9e849 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -112,23 +112,39 @@ let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
// allocated by bumping the stack pointer. Otherwise memory is allocated from
// the heap.
-let Defs = [EAX, ESP, EFLAGS], Uses = [ESP, EAX] in
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
"# variable sized alloca for segmented stacks",
[(set GR32:$dst,
(X86SegAlloca GR32:$size))]>,
Requires<[In32BitMode]>;
-let Defs = [RAX, RSP, EFLAGS], Uses = [RSP, RAX] in
+let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
"# variable sized alloca for segmented stacks",
[(set GR64:$dst,
(X86SegAlloca GR64:$size))]>,
Requires<[In64BitMode]>;
-
}
+// The MSVC runtime contains an _ftol2 routine for converting floating-point
+// to integer values. It has a strange calling convention: the input is
+// popped from the x87 stack, and the return value is given in EDX:EAX. No
+// other registers (aside from flags) are touched.
+// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
+// variant is unnecessary.
+
+let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in {
+ def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
+ "# win32 fptoui",
+ [(X86WinFTOL RFP32:$src)]>,
+ Requires<[In32BitMode]>;
+ def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
+ "# win32 fptoui",
+ [(X86WinFTOL RFP64:$src)]>,
+ Requires<[In32BitMode]>;
+}
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
@@ -137,7 +153,7 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, isCodeGenOnly = 1 in {
def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
"ret\t#eh_return, addr: $addr",
- [(X86ehret GR32:$addr)]>;
+ [(X86ehret GR32:$addr)], IIC_RET>;
}
@@ -145,8 +161,26 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, isCodeGenOnly = 1 in {
def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
"ret\t#eh_return, addr: $addr",
- [(X86ehret GR64:$addr)]>;
+ [(X86ehret GR64:$addr)], IIC_RET>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions used by segmented stacks.
+//
+// This is lowered into a RET instruction by MCInstLower. We need
+// this so that we don't have to have a MachineBasicBlock which ends
+// with a RET and also has successors.
+let isPseudo = 1 in {
+def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
+ "", []>;
+
+// This instruction is lowered to a RET followed by a MOV. The two
+// instructions are not generated on a higher level since then the
+// verifier sees a MachineBasicBlock ending with a non-terminator.
+def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
+ "", []>;
}
//===----------------------------------------------------------------------===//
@@ -159,7 +193,7 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
isCodeGenOnly = 1 in {
def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
- [(set GR8:$dst, 0)]>;
+ [(set GR8:$dst, 0)], IIC_ALU_NONMEM>;
// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
// encoding and avoids a partial-register update sometimes, but doing so
@@ -168,11 +202,11 @@ def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
// to an MCInst.
def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
"",
- [(set GR16:$dst, 0)]>, OpSize;
+ [(set GR16:$dst, 0)], IIC_ALU_NONMEM>, OpSize;
// FIXME: Set encoding to pseudo.
def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, 0)]>;
+ [(set GR32:$dst, 0)], IIC_ALU_NONMEM>;
}
// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
@@ -184,7 +218,7 @@ def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
let Defs = [EFLAGS], isCodeGenOnly=1,
AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, 0)]>;
+ [(set GR64:$dst, 0)], IIC_ALU_NONMEM>;
// Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
@@ -192,7 +226,8 @@ def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
isCodeGenOnly = 1 in
def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
- "", [(set GR64:$dst, i64immZExt32:$src)]>;
+ "", [(set GR64:$dst, i64immZExt32:$src)],
+ IIC_ALU_NONMEM>;
// Use sbb to materialize carry bit.
let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
@@ -202,14 +237,18 @@ let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
// X86CodeEmitter.
def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
- [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+ [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
+ IIC_ALU_NONMEM>;
def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
- [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
+ [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
+ IIC_ALU_NONMEM>,
OpSize;
def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
- [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+ [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
+ IIC_ALU_NONMEM>;
def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
- [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
+ [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))],
+ IIC_ALU_NONMEM>;
} // isCodeGenOnly
@@ -262,34 +301,67 @@ def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
// String Pseudo Instructions
//
let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
-def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
- [(X86rep_movs i8)]>, REP;
-def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
- [(X86rep_movs i16)]>, REP, OpSize;
-def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
- [(X86rep_movs i32)]>, REP;
+def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
+ [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
+ Requires<[In32BitMode]>;
+def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
+ Requires<[In32BitMode]>;
+def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
+ Requires<[In32BitMode]>;
}
-let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
-def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
- [(X86rep_movs i64)]>, REP;
-
+let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
+def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
+ [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
+ Requires<[In64BitMode]>;
+def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
+ [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize,
+ Requires<[In64BitMode]>;
+def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
+ [(X86rep_movs i32)], IIC_REP_MOVS>, REP,
+ Requires<[In64BitMode]>;
+def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
+ [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
+ Requires<[In64BitMode]>;
+}
// FIXME: Should use "(X86rep_stos AL)" as the pattern.
-let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
- [(X86rep_stos i8)]>, REP;
-let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
- [(X86rep_stos i16)]>, REP, OpSize;
-let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
-def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
- [(X86rep_stos i32)]>, REP;
-
-let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
-def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
- [(X86rep_stos i64)]>, REP;
+let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
+ let Uses = [AL,ECX,EDI] in
+ def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
+ [(X86rep_stos i8)], IIC_REP_STOS>, REP,
+ Requires<[In32BitMode]>;
+ let Uses = [AX,ECX,EDI] in
+ def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
+ Requires<[In32BitMode]>;
+ let Uses = [EAX,ECX,EDI] in
+ def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP,
+ Requires<[In32BitMode]>;
+}
+let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
+ let Uses = [AL,RCX,RDI] in
+ def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
+ [(X86rep_stos i8)], IIC_REP_STOS>, REP,
+ Requires<[In64BitMode]>;
+ let Uses = [AX,RCX,RDI] in
+ def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
+ [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize,
+ Requires<[In64BitMode]>;
+ let Uses = [RAX,RCX,RDI] in
+ def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
+ [(X86rep_stos i32)], IIC_REP_STOS>, REP,
+ Requires<[In64BitMode]>;
+
+ let Uses = [RAX,RCX,RDI] in
+ def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
+ [(X86rep_stos i64)], IIC_REP_STOS>, REP,
+ Requires<[In64BitMode]>;
+}
//===----------------------------------------------------------------------===//
// Thread Local Storage Instructions
@@ -537,22 +609,13 @@ let isCodeGenOnly = 1, Defs = [EFLAGS] in
def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
"lock\n\t"
"or{l}\t{$zero, $dst|$dst, $zero}",
- []>, Requires<[In32BitMode]>, LOCK;
+ [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK;
let hasSideEffects = 1 in
def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
"#MEMBARRIER",
[(X86MemBarrier)]>;
-// TODO: Get this to fold the constant into the instruction.
-let hasSideEffects = 1, Defs = [ESP], isCodeGenOnly = 1 in
-def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
- "lock\n\t"
- "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
- [(X86MemBarrierNoSSE GR64:$zero)]>,
- Requires<[In64BitMode]>, LOCK;
-
-
// RegOpc corresponds to the mr version of the instruction
// ImmOpc corresponds to the mi version of the instruction
// ImmOpc8 corresponds to the mi8 version of the instruction
@@ -566,72 +629,72 @@ def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
!strconcat("lock\n\t", mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_NONMEM>, LOCK;
def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
!strconcat("lock\n\t", mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, OpSize, LOCK;
+ [], IIC_ALU_NONMEM>, OpSize, LOCK;
def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
!strconcat("lock\n\t", mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_NONMEM>, LOCK;
def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
!strconcat("lock\n\t", mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_NONMEM>, LOCK;
def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
!strconcat("lock\n\t", mnemonic, "{b}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
!strconcat("lock\n\t", mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
!strconcat("lock\n\t", mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
!strconcat("lock\n\t", mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
!strconcat("lock\n\t", mnemonic, "{w}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
!strconcat("lock\n\t", mnemonic, "{l}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
!strconcat("lock\n\t", mnemonic, "{q}\t",
"{$src2, $dst|$dst, $src2}"),
- []>, LOCK;
+ [], IIC_ALU_MEM>, LOCK;
}
@@ -648,29 +711,29 @@ let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
"lock\n\t"
- "inc{b}\t$dst", []>, LOCK;
+ "inc{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
"lock\n\t"
- "inc{w}\t$dst", []>, OpSize, LOCK;
+ "inc{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
"lock\n\t"
- "inc{l}\t$dst", []>, LOCK;
+ "inc{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
"lock\n\t"
- "inc{q}\t$dst", []>, LOCK;
+ "inc{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
"lock\n\t"
- "dec{b}\t$dst", []>, LOCK;
+ "dec{b}\t$dst", [], IIC_UNARY_MEM>, LOCK;
def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
"lock\n\t"
- "dec{w}\t$dst", []>, OpSize, LOCK;
+ "dec{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK;
def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
"lock\n\t"
- "dec{l}\t$dst", []>, LOCK;
+ "dec{l}\t$dst", [], IIC_UNARY_MEM>, LOCK;
def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
"lock\n\t"
- "dec{q}\t$dst", []>, LOCK;
+ "dec{q}\t$dst", [], IIC_UNARY_MEM>, LOCK;
}
// Atomic compare and swap.
@@ -679,42 +742,42 @@ let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
"lock\n\t"
"cmpxchg8b\t$ptr",
- [(X86cas8 addr:$ptr)]>, TB, LOCK;
+ [(X86cas8 addr:$ptr)], IIC_CMPX_LOCK_8B>, TB, LOCK;
let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
isCodeGenOnly = 1 in
def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
"lock\n\t"
"cmpxchg16b\t$ptr",
- [(X86cas16 addr:$ptr)]>, TB, LOCK,
+ [(X86cas16 addr:$ptr)], IIC_CMPX_LOCK_16B>, TB, LOCK,
Requires<[HasCmpxchg16b]>;
let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
"lock\n\t"
"cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
+ [(X86cas addr:$ptr, GR8:$swap, 1)], IIC_CMPX_LOCK_8>, TB, LOCK;
}
let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
"lock\n\t"
"cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
+ [(X86cas addr:$ptr, GR16:$swap, 2)], IIC_CMPX_LOCK>, TB, OpSize, LOCK;
}
let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
"lock\n\t"
"cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
+ [(X86cas addr:$ptr, GR32:$swap, 4)], IIC_CMPX_LOCK>, TB, LOCK;
}
let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
"lock\n\t"
"cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}",
- [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
+ [(X86cas addr:$ptr, GR64:$swap, 8)], IIC_CMPX_LOCK>, TB, LOCK;
}
// Atomic exchange and add
@@ -722,22 +785,26 @@ let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
"lock\n\t"
"xadd{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
+ [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))],
+ IIC_XADD_LOCK_MEM8>,
TB, LOCK;
def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
"lock\n\t"
"xadd{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
+ [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))],
+ IIC_XADD_LOCK_MEM>,
TB, OpSize, LOCK;
def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
"lock\n\t"
"xadd{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
+ [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))],
+ IIC_XADD_LOCK_MEM>,
TB, LOCK;
def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
"lock\n\t"
"xadd{q}\t{$val, $ptr|$ptr, $val}",
- [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
+ [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))],
+ IIC_XADD_LOCK_MEM>,
TB, LOCK;
}
@@ -936,14 +1003,9 @@ def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
// Direct PC relative function call for small code model. 32-bit displacement
// sign extended to 64-bit.
def : Pat<(X86call (i64 tglobaladdr:$dst)),
- (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
+ (CALL64pcrel32 tglobaladdr:$dst)>;
def : Pat<(X86call (i64 texternalsym:$dst)),
- (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
-
-def : Pat<(X86call (i64 tglobaladdr:$dst)),
- (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
-def : Pat<(X86call (i64 texternalsym:$dst)),
- (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
+ (CALL64pcrel32 texternalsym:$dst)>;
// tailcall stuff
def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
@@ -1105,12 +1167,10 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
+ CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
+ CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
@@ -1440,58 +1500,62 @@ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+// Helper imms that check if a mask doesn't change significant shift bits.
+def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
+def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
+
// (shl x (and y, 31)) ==> (shl x, y)
-def : Pat<(shl GR8:$src1, (and CL, 31)),
+def : Pat<(shl GR8:$src1, (and CL, immShift32)),
(SHL8rCL GR8:$src1)>;
-def : Pat<(shl GR16:$src1, (and CL, 31)),
+def : Pat<(shl GR16:$src1, (and CL, immShift32)),
(SHL16rCL GR16:$src1)>;
-def : Pat<(shl GR32:$src1, (and CL, 31)),
+def : Pat<(shl GR32:$src1, (and CL, immShift32)),
(SHL32rCL GR32:$src1)>;
-def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHL8mCL addr:$dst)>;
-def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHL16mCL addr:$dst)>;
-def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHL32mCL addr:$dst)>;
-def : Pat<(srl GR8:$src1, (and CL, 31)),
+def : Pat<(srl GR8:$src1, (and CL, immShift32)),
(SHR8rCL GR8:$src1)>;
-def : Pat<(srl GR16:$src1, (and CL, 31)),
+def : Pat<(srl GR16:$src1, (and CL, immShift32)),
(SHR16rCL GR16:$src1)>;
-def : Pat<(srl GR32:$src1, (and CL, 31)),
+def : Pat<(srl GR32:$src1, (and CL, immShift32)),
(SHR32rCL GR32:$src1)>;
-def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHR8mCL addr:$dst)>;
-def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHR16mCL addr:$dst)>;
-def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
(SHR32mCL addr:$dst)>;
-def : Pat<(sra GR8:$src1, (and CL, 31)),
+def : Pat<(sra GR8:$src1, (and CL, immShift32)),
(SAR8rCL GR8:$src1)>;
-def : Pat<(sra GR16:$src1, (and CL, 31)),
+def : Pat<(sra GR16:$src1, (and CL, immShift32)),
(SAR16rCL GR16:$src1)>;
-def : Pat<(sra GR32:$src1, (and CL, 31)),
+def : Pat<(sra GR32:$src1, (and CL, immShift32)),
(SAR32rCL GR32:$src1)>;
-def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
(SAR8mCL addr:$dst)>;
-def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
(SAR16mCL addr:$dst)>;
-def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
+def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
(SAR32mCL addr:$dst)>;
// (shl x (and y, 63)) ==> (shl x, y)
-def : Pat<(shl GR64:$src1, (and CL, 63)),
+def : Pat<(shl GR64:$src1, (and CL, immShift64)),
(SHL64rCL GR64:$src1)>;
def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
(SHL64mCL addr:$dst)>;
-def : Pat<(srl GR64:$src1, (and CL, 63)),
+def : Pat<(srl GR64:$src1, (and CL, immShift64)),
(SHR64rCL GR64:$src1)>;
def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
(SHR64mCL addr:$dst)>;
-def : Pat<(sra GR64:$src1, (and CL, 63)),
+def : Pat<(sra GR64:$src1, (and CL, immShift64)),
(SAR64rCL GR64:$src1)>;
def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
(SAR64mCL addr:$dst)>;
@@ -1735,3 +1799,11 @@ def : Pat<(and GR64:$src1, i64immSExt8:$src2),
(AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(and GR64:$src1, i64immSExt32:$src2),
(AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
+
+// Bit scan instruction patterns to match explicit zero-undef behavior.
+def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
+def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
+def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
+def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
+def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrControl.td b/contrib/llvm/lib/Target/X86/X86InstrControl.td
index c228a0ae..bf11fde 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrControl.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrControl.td
@@ -1,4 +1,4 @@
-//===- X86InstrControl.td - Control Flow Instructions ------*- tablegen -*-===//
+//===-- X86InstrControl.td - Control Flow Instructions -----*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,39 +20,47 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP in {
def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
"ret",
- [(X86retflag 0)]>;
+ [(X86retflag 0)], IIC_RET>;
+ def RETW : I <0xC3, RawFrm, (outs), (ins variable_ops),
+ "ret{w}",
+ [], IIC_RET>, OpSize;
def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
"ret\t$amt",
- [(X86retflag timm:$amt)]>;
+ [(X86retflag timm:$amt)], IIC_RET_IMM>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops),
- "retw\t$amt",
- []>, OpSize;
+ "ret{w}\t$amt",
+ [], IIC_RET_IMM>, OpSize;
def LRETL : I <0xCB, RawFrm, (outs), (ins),
- "lretl", []>;
+ "{l}ret{l|f}", [], IIC_RET>;
+ def LRETW : I <0xCB, RawFrm, (outs), (ins),
+ "{l}ret{w|f}", [], IIC_RET>, OpSize;
def LRETQ : RI <0xCB, RawFrm, (outs), (ins),
- "lretq", []>;
+ "{l}ret{q|f}", [], IIC_RET>;
def LRETI : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
- "lret\t$amt", []>;
+ "{l}ret{l|f}\t$amt", [], IIC_RET>;
def LRETIW : Ii16<0xCA, RawFrm, (outs), (ins i16imm:$amt),
- "lretw\t$amt", []>, OpSize;
+ "{l}ret{w|f}\t$amt", [], IIC_RET>, OpSize;
}
// Unconditional branches.
let isBarrier = 1, isBranch = 1, isTerminator = 1 in {
def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp\t$dst", [(br bb:$dst)]>;
+ "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>;
def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
- "jmp\t$dst", []>;
+ "jmp\t$dst", [], IIC_JMP_REL>;
+ // FIXME : Intel syntax for JMP64pcrel32 such that it is not ambiguious
+ // with JMP_1.
def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
- "jmp{q}\t$dst", []>;
+ "jmpq\t$dst", [], IIC_JMP_REL>;
}
// Conditional Branches.
let isBranch = 1, isTerminator = 1, Uses = [EFLAGS] in {
multiclass ICBr<bits<8> opc1, bits<8> opc4, string asm, PatFrag Cond> {
- def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, []>;
+ def _1 : Ii8PCRel <opc1, RawFrm, (outs), (ins brtarget8:$dst), asm, [],
+ IIC_Jcc>;
def _4 : Ii32PCRel<opc4, RawFrm, (outs), (ins brtarget:$dst), asm,
- [(X86brcond bb:$dst, Cond, EFLAGS)]>, TB;
+ [(X86brcond bb:$dst, Cond, EFLAGS)], IIC_Jcc>, TB;
}
}
@@ -74,61 +82,61 @@ defm JLE : ICBr<0x7E, 0x8E, "jle\t$dst", X86_COND_LE>;
defm JG : ICBr<0x7F, 0x8F, "jg\t$dst" , X86_COND_G>;
// jcx/jecx/jrcx instructions.
-let isAsmParserOnly = 1, isBranch = 1, isTerminator = 1 in {
+let isBranch = 1, isTerminator = 1 in {
// These are the 32-bit versions of this instruction for the asmparser. In
// 32-bit mode, the address size prefix is jcxz and the unprefixed version is
// jecxz.
let Uses = [CX] in
def JCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jcxz\t$dst", []>, AdSize, Requires<[In32BitMode]>;
+ "jcxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[In32BitMode]>;
let Uses = [ECX] in
def JECXZ_32 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", []>, Requires<[In32BitMode]>;
+ "jecxz\t$dst", [], IIC_JCXZ>, Requires<[In32BitMode]>;
// J*CXZ instruction: 64-bit versions of this instruction for the asmparser.
// In 64-bit mode, the address size prefix is jecxz and the unprefixed version
// is jrcxz.
let Uses = [ECX] in
def JECXZ_64 : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jecxz\t$dst", []>, AdSize, Requires<[In64BitMode]>;
+ "jecxz\t$dst", [], IIC_JCXZ>, AdSize, Requires<[In64BitMode]>;
let Uses = [RCX] in
def JRCXZ : Ii8PCRel<0xE3, RawFrm, (outs), (ins brtarget8:$dst),
- "jrcxz\t$dst", []>, Requires<[In64BitMode]>;
+ "jrcxz\t$dst", [], IIC_JCXZ>, Requires<[In64BitMode]>;
}
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
- [(brind GR32:$dst)]>, Requires<[In32BitMode]>;
+ [(brind GR32:$dst)], IIC_JMP_REG>, Requires<[In32BitMode]>;
def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
- [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>;
+ [(brind (loadi32 addr:$dst))], IIC_JMP_MEM>, Requires<[In32BitMode]>;
def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
- [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
+ [(brind GR64:$dst)], IIC_JMP_REG>, Requires<[In64BitMode]>;
def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
- [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
+ [(brind (loadi64 addr:$dst))], IIC_JMP_MEM>, Requires<[In64BitMode]>;
def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
(ins i16imm:$off, i16imm:$seg),
- "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ "ljmp{w}\t{$seg, $off|$off, $seg}", [], IIC_JMP_FAR_PTR>, OpSize;
def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
(ins i32imm:$off, i16imm:$seg),
- "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
+ "ljmp{l}\t{$seg, $off|$off, $seg}", [], IIC_JMP_FAR_PTR>;
def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
- "ljmp{q}\t{*}$dst", []>;
+ "ljmp{q}\t{*}$dst", [], IIC_JMP_FAR_MEM>;
def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
- "ljmp{w}\t{*}$dst", []>, OpSize;
+ "ljmp{w}\t{*}$dst", [], IIC_JMP_FAR_MEM>, OpSize;
def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
- "ljmp{l}\t{*}$dst", []>;
+ "ljmp{l}\t{*}$dst", [], IIC_JMP_FAR_MEM>;
}
// Loop instructions
-def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
-def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
-def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
+def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", [], IIC_LOOP>;
+def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", [], IIC_LOOPE>;
+def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", [], IIC_LOOPNE>;
//===----------------------------------------------------------------------===//
// Call Instructions...
@@ -138,32 +146,30 @@ let isCall = 1 in
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
// registers are added manually.
- let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in {
+ let Uses = [ESP] in {
def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i32imm_pcrel:$dst,variable_ops),
- "call{l}\t$dst", []>, Requires<[In32BitMode]>;
+ "call{l}\t$dst", [], IIC_CALL_RI>, Requires<[In32BitMode]>;
def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
- "call{l}\t{*}$dst", [(X86call GR32:$dst)]>,
+ "call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>,
Requires<[In32BitMode]>;
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
- "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
+ "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], IIC_CALL_MEM>,
Requires<[In32BitMode]>;
def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
(ins i16imm:$off, i16imm:$seg),
- "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ "lcall{w}\t{$seg, $off|$off, $seg}", [],
+ IIC_CALL_FAR_PTR>, OpSize;
def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
(ins i32imm:$off, i16imm:$seg),
- "lcall{l}\t{$seg, $off|$off, $seg}", []>;
+ "lcall{l}\t{$seg, $off|$off, $seg}", [],
+ IIC_CALL_FAR_PTR>;
def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
- "lcall{w}\t{*}$dst", []>, OpSize;
+ "lcall{w}\t{*}$dst", [], IIC_CALL_FAR_MEM>, OpSize;
def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
- "lcall{l}\t{*}$dst", []>;
+ "lcall{l}\t{*}$dst", [], IIC_CALL_FAR_MEM>;
// callw for 16 bit code for the assembler.
let isAsmParserOnly = 1 in
@@ -177,11 +183,7 @@ let isCall = 1 in
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1 in
- let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [ESP] in {
+ let Uses = [ESP] in {
def TCRETURNdi : PseudoI<(outs),
(ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>;
def TCRETURNri : PseudoI<(outs),
@@ -194,74 +196,43 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
// mcinst.
def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
(ins i32imm_pcrel:$dst, variable_ops),
- "jmp\t$dst # TAILCALL",
- []>;
+ "jmp\t$dst # TAILCALL",
+ [], IIC_JMP_REL>;
def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
- "", []>; // FIXME: Remove encoding when JIT is dead.
+ "", [], IIC_JMP_REG>; // FIXME: Remove encoding when JIT is dead.
let mayLoad = 1 in
def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
- "jmp{l}\t{*}$dst # TAILCALL", []>;
+ "jmp{l}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
}
//===----------------------------------------------------------------------===//
// Call Instructions...
//
-let isCall = 1 in
- // All calls clobber the non-callee saved registers. RSP is marked as
- // a use to prevent stack-pointer assignments that appear immediately
- // before calls from potentially appearing dead. Uses for argument
- // registers are added manually.
- let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
- Uses = [RSP] in {
-
- // NOTE: this pattern doesn't match "X86call imm", because we do not know
- // that the offset between an arbitrary immediate and the call will fit in
- // the 32-bit pcrel field that we have.
- def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
- "call{q}\t$dst", []>,
- Requires<[In64BitMode, NotWin64]>;
- def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
- "call{q}\t{*}$dst", [(X86call GR64:$dst)]>,
- Requires<[In64BitMode, NotWin64]>;
- def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
- "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
- Requires<[In64BitMode, NotWin64]>;
- def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
- "lcall{q}\t{*}$dst", []>;
- }
+// RSP is marked as a use to prevent stack-pointer assignments that appear
+// immediately before calls from potentially appearing dead. Uses for argument
+// registers are added manually.
+let isCall = 1, Uses = [RSP] in {
+ // NOTE: this pattern doesn't match "X86call imm", because we do not know
+ // that the offset between an arbitrary immediate and the call will fit in
+ // the 32-bit pcrel field that we have.
+ def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
+ (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
+ "call{q}\t$dst", [], IIC_CALL_RI>,
+ Requires<[In64BitMode]>;
+ def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
+ "call{q}\t{*}$dst", [(X86call GR64:$dst)],
+ IIC_CALL_RI>,
+ Requires<[In64BitMode]>;
+ def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
+ "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))],
+ IIC_CALL_MEM>,
+ Requires<[In64BitMode]>;
- // FIXME: We need to teach codegen about single list of call-clobbered
- // registers.
-let isCall = 1, isCodeGenOnly = 1 in
- // All calls clobber the non-callee saved registers. RSP is marked as
- // a use to prevent stack-pointer assignments that appear immediately
- // before calls from potentially appearing dead. Uses for argument
- // registers are added manually.
- let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
- Uses = [RSP] in {
- def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
- (outs), (ins i64i32imm_pcrel:$dst, variable_ops),
- "call{q}\t$dst", []>,
- Requires<[IsWin64]>;
- def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
- "call{q}\t{*}$dst",
- [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
- def WINCALL64m : I<0xFF, MRM2m, (outs),
- (ins i64mem:$dst,variable_ops),
- "call{q}\t{*}$dst",
- [(X86call (loadi64 addr:$dst))]>,
- Requires<[IsWin64]>;
- }
+ def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
+ "lcall{q}\t{*}$dst", [], IIC_CALL_FAR_MEM>;
+}
let isCall = 1, isCodeGenOnly = 1 in
// __chkstk(MSVC): clobber R10, R11 and EFLAGS.
@@ -270,18 +241,13 @@ let isCall = 1, isCodeGenOnly = 1 in
Uses = [RSP] in {
def W64ALLOCA : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
- "call{q}\t$dst", []>,
+ "call{q}\t$dst", [], IIC_CALL_RI>,
Requires<[IsWin64]>;
}
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1 in
- // AMD64 cc clobbers RSI, RDI, XMM6-XMM15.
- let Defs = [RAX, RCX, RDX, R8, R9, R10, R11,
- FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
- MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
- Uses = [RSP],
+ let Uses = [RSP],
usesCustomInserter = 1 in {
def TCRETURNdi64 : PseudoI<(outs),
(ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
@@ -294,11 +260,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
(ins i64i32imm_pcrel:$dst, variable_ops),
- "jmp\t$dst # TAILCALL", []>;
+ "jmp\t$dst # TAILCALL", [], IIC_JMP_REL>;
def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL", []>;
+ "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
let mayLoad = 1 in
def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL", []>;
+ "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrExtension.td b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
index e62e6b7..0d5490a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrExtension.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrExtension.td
@@ -1,10 +1,10 @@
-//===- X86InstrExtension.td - Sign and Zero Extensions -----*- tablegen -*-===//
-//
+//===-- X86InstrExtension.td - Sign and Zero Extensions ----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the sign and zero extension operations.
@@ -37,40 +37,47 @@ let neverHasSideEffects = 1 in {
}
+
// Sign/Zero extenders
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
- "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_R8>,
+ TB, OpSize;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
- "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "movs{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVSX_R16_M8>,
+ TB, OpSize;
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sext GR8:$src))]>, TB;
+ [(set GR32:$dst, (sext GR8:$src))], IIC_MOVSX>, TB;
def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
+ [(set GR32:$dst, (sextloadi32i8 addr:$src))], IIC_MOVSX>, TB;
def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sext GR16:$src))]>, TB;
+ [(set GR32:$dst, (sext GR16:$src))], IIC_MOVSX>, TB;
def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movs{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
+ [(set GR32:$dst, (sextloadi32i16 addr:$src))], IIC_MOVSX>,
+ TB;
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
- "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_R8>,
+ TB, OpSize;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
- "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
+ "movz{bw|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX_R16_M8>,
+ TB, OpSize;
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zext GR8:$src))]>, TB;
+ [(set GR32:$dst, (zext GR8:$src))], IIC_MOVZX>, TB;
def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
+ [(set GR32:$dst, (zextloadi32i8 addr:$src))], IIC_MOVZX>, TB;
def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zext GR16:$src))]>, TB;
+ [(set GR32:$dst, (zext GR16:$src))], IIC_MOVZX>, TB;
def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
"movz{wl|x}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
+ [(set GR32:$dst, (zextloadi32i16 addr:$src))], IIC_MOVZX>,
+ TB;
// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
// except that they use GR32_NOREX for the output operand register class
@@ -78,12 +85,12 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
(outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
- []>, TB;
+ [], IIC_MOVZX>, TB;
let mayLoad = 1 in
def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
(outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}",
- []>, TB;
+ [], IIC_MOVZX>, TB;
// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
// operand, which makes it a rare instruction with an 8-bit register
@@ -91,32 +98,38 @@ def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
// were generalized, this would require a special register class.
def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
"movs{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR8:$src))]>, TB;
+ [(set GR64:$dst, (sext GR8:$src))], IIC_MOVSX>, TB;
def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
"movs{bq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
+ [(set GR64:$dst, (sextloadi64i8 addr:$src))], IIC_MOVSX>,
+ TB;
def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
"movs{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR16:$src))]>, TB;
+ [(set GR64:$dst, (sext GR16:$src))], IIC_MOVSX>, TB;
def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"movs{wq|x}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
+ [(set GR64:$dst, (sextloadi64i16 addr:$src))], IIC_MOVSX>,
+ TB;
def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
"movs{lq|xd}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sext GR32:$src))]>;
+ [(set GR64:$dst, (sext GR32:$src))], IIC_MOVSX>;
def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
"movs{lq|xd}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
+ [(set GR64:$dst, (sextloadi64i32 addr:$src))], IIC_MOVSX>;
// movzbq and movzwq encodings for the disassembler
def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+ "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>,
+ TB;
def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
- "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+ "movz{bq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>,
+ TB;
def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+ "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>,
+ TB;
def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
+ "movz{wq|x}\t{$src, $dst|$dst, $src}", [], IIC_MOVZX>,
+ TB;
// FIXME: These should be Pat patterns.
let isCodeGenOnly = 1 in {
@@ -124,15 +137,17 @@ let isCodeGenOnly = 1 in {
// Use movzbl instead of movzbq when the destination is a register; it's
// equivalent due to implicit zero-extending, and it has a smaller encoding.
def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
- "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
+ "", [(set GR64:$dst, (zext GR8:$src))], IIC_MOVZX>, TB;
def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
- "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
+ "", [(set GR64:$dst, (zextloadi64i8 addr:$src))], IIC_MOVZX>,
+ TB;
// Use movzwl instead of movzwq when the destination is a register; it's
// equivalent due to implicit zero-extending, and it has a smaller encoding.
def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
- "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
+ "", [(set GR64:$dst, (zext GR16:$src))], IIC_MOVZX>, TB;
def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
- "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
+ "", [(set GR64:$dst, (zextloadi64i16 addr:$src))],
+ IIC_MOVZX>, TB;
// There's no movzlq instruction, but movl can be used for this purpose, using
// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
@@ -142,10 +157,9 @@ def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
// necessarily all zero. In such cases, we fall back to these explicit zext
// instructions.
def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
- "", [(set GR64:$dst, (zext GR32:$src))]>;
+ "", [(set GR64:$dst, (zext GR32:$src))], IIC_MOVZX>;
def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
- "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
-
-
+ "", [(set GR64:$dst, (zextloadi64i32 addr:$src))],
+ IIC_MOVZX>;
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFMA.td b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
index d868773..d57937b 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFMA.td
@@ -1,4 +1,4 @@
-//====- X86InstrFMA.td - Describe the X86 Instruction Set --*- tablegen -*-===//
+//===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,7 +15,7 @@
// FMA3 - Intel 3 operand Fused Multiply-Add instructions
//===----------------------------------------------------------------------===//
-multiclass fma_rm<bits<8> opc, string OpcodeStr> {
+multiclass fma3p_rm<bits<8> opc, string OpcodeStr> {
def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -34,27 +34,187 @@ multiclass fma_rm<bits<8> opc, string OpcodeStr> {
[]>;
}
-multiclass fma_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
- string OpcodeStr, string PackTy> {
- defm r132 : fma_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
- defm r213 : fma_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
- defm r231 : fma_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
+multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
+ string OpcodeStr, string PackTy> {
+ defm r132 : fma3p_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
+ defm r213 : fma3p_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
+ defm r231 : fma3p_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
}
-let isAsmParserOnly = 1 in {
- // Fused Multiply-Add
- defm VFMADDPS : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">;
- defm VFMADDPD : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W;
- defm VFMADDSUBPS : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">;
- defm VFMADDSUBPD : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W;
- defm VFMSUBADDPS : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">;
- defm VFMSUBADDPD : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W;
- defm VFMSUBPS : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">;
- defm VFMSUBPD : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W;
+// Fused Multiply-Add
+let ExeDomain = SSEPackedSingle in {
+ defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">;
+ defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">;
+ defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">;
+ defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">;
+}
+
+let ExeDomain = SSEPackedDouble in {
+ defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W;
+ defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W;
+ defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W;
+ defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W;
+}
+
+// Fused Negative Multiply-Add
+let ExeDomain = SSEPackedSingle in {
+ defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">;
+ defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">;
+}
+let ExeDomain = SSEPackedDouble in {
+ defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W;
+ defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W;
+}
+
+multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop> {
+ def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+ def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+}
- // Fused Negative Multiply-Add
- defm VFNMADDPS : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">;
- defm VFNMADDPD : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W;
- defm VFNMSUBPS : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">;
- defm VFNMSUBPD : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W;
+multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
+ string OpcodeStr> {
+ defm SSr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132ss"), f32mem>;
+ defm SSr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213ss"), f32mem>;
+ defm SSr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231ss"), f32mem>;
+ defm SDr132 : fma3s_rm<opc132, !strconcat(OpcodeStr, "132sd"), f64mem>, VEX_W;
+ defm SDr213 : fma3s_rm<opc213, !strconcat(OpcodeStr, "213sd"), f64mem>, VEX_W;
+ defm SDr231 : fma3s_rm<opc231, !strconcat(OpcodeStr, "231sd"), f64mem>, VEX_W;
}
+
+defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd">, VEX_LIG;
+defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub">, VEX_LIG;
+
+defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd">, VEX_LIG;
+defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub">, VEX_LIG;
+
+//===----------------------------------------------------------------------===//
+// FMA4 - AMD 4 operand Fused Multiply-Add instructions
+//===----------------------------------------------------------------------===//
+
+
+multiclass fma4s<bits<8> opc, string OpcodeStr, Operand memop,
+ ComplexPattern mem_cpat, Intrinsic Int> {
+ def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
+ def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, memop:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, VR128:$src2, mem_cpat:$src3))]>, VEX_W, MemOp4;
+ def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, memop:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>;
+// For disassembler
+let isCodeGenOnly = 1 in
+ def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
+}
+
+multiclass fma4p<bits<8> opc, string OpcodeStr,
+ Intrinsic Int128, Intrinsic Int256,
+ PatFrag ld_frag128, PatFrag ld_frag256> {
+ def rr : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_W, MemOp4;
+ def rm : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst, (Int128 VR128:$src1, VR128:$src2,
+ (ld_frag128 addr:$src3)))]>, VEX_W, MemOp4;
+ def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
+ def rrY : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, VR256:$src2, VR256:$src3))]>, VEX_W, MemOp4;
+ def rmY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst, (Int256 VR256:$src1, VR256:$src2,
+ (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4;
+ def mrY : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (ld_frag256 addr:$src2), VR256:$src3))]>;
+// For disassembler
+let isCodeGenOnly = 1 in {
+ def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
+ def rrY_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>;
+} // isCodeGenOnly = 1
+}
+
+defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32,
+ int_x86_fma4_vfmadd_ss>;
+defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64,
+ int_x86_fma4_vfmadd_sd>;
+defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma4_vfmadd_ps,
+ int_x86_fma4_vfmadd_ps_256, memopv4f32, memopv8f32>;
+defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma4_vfmadd_pd,
+ int_x86_fma4_vfmadd_pd_256, memopv2f64, memopv4f64>;
+defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem, sse_load_f32,
+ int_x86_fma4_vfmsub_ss>;
+defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem, sse_load_f64,
+ int_x86_fma4_vfmsub_sd>;
+defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma4_vfmsub_ps,
+ int_x86_fma4_vfmsub_ps_256, memopv4f32, memopv8f32>;
+defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma4_vfmsub_pd,
+ int_x86_fma4_vfmsub_pd_256, memopv2f64, memopv4f64>;
+defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem, sse_load_f32,
+ int_x86_fma4_vfnmadd_ss>;
+defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem, sse_load_f64,
+ int_x86_fma4_vfnmadd_sd>;
+defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma4_vfnmadd_ps,
+ int_x86_fma4_vfnmadd_ps_256, memopv4f32, memopv8f32>;
+defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma4_vfnmadd_pd,
+ int_x86_fma4_vfnmadd_pd_256, memopv2f64, memopv4f64>;
+defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem, sse_load_f32,
+ int_x86_fma4_vfnmsub_ss>;
+defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem, sse_load_f64,
+ int_x86_fma4_vfnmsub_sd>;
+defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma4_vfnmsub_ps,
+ int_x86_fma4_vfnmsub_ps_256, memopv4f32, memopv8f32>;
+defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma4_vfnmsub_pd,
+ int_x86_fma4_vfnmsub_pd_256, memopv2f64, memopv4f64>;
+defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma4_vfmaddsub_ps,
+ int_x86_fma4_vfmaddsub_ps_256, memopv4f32, memopv8f32>;
+defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma4_vfmaddsub_pd,
+ int_x86_fma4_vfmaddsub_pd_256, memopv2f64, memopv4f64>;
+defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma4_vfmsubadd_ps,
+ int_x86_fma4_vfmsubadd_ps_256, memopv4f32, memopv8f32>;
+defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma4_vfmsubadd_pd,
+ int_x86_fma4_vfmsubadd_pd_256, memopv2f64, memopv4f64>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
index 7cb870f..a13887e 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -1,10 +1,10 @@
-//==- X86InstrFPStack.td - Describe the X86 Instruction Set --*- tablegen -*-=//
-//
+//===- X86InstrFPStack.td - FPU Instruction Set ------------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 x87 FPU instruction set, defining the
@@ -225,22 +225,22 @@ class FPrST0PInst<bits<8> o, string asm>
// of some of the 'reverse' forms of the fsub and fdiv instructions. As such,
// we have to put some 'r's in and take them out of weird places.
def ADD_FST0r : FPST0rInst <0xC0, "fadd\t$op">;
-def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, %ST(0)}">;
+def ADD_FrST0 : FPrST0Inst <0xC0, "fadd\t{%st(0), $op|$op, ST(0)}">;
def ADD_FPrST0 : FPrST0PInst<0xC0, "faddp\t$op">;
def SUBR_FST0r : FPST0rInst <0xE8, "fsubr\t$op">;
-def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, %ST(0)}">;
+def SUB_FrST0 : FPrST0Inst <0xE8, "fsub{r}\t{%st(0), $op|$op, ST(0)}">;
def SUB_FPrST0 : FPrST0PInst<0xE8, "fsub{r}p\t$op">;
def SUB_FST0r : FPST0rInst <0xE0, "fsub\t$op">;
-def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, %ST(0)}">;
+def SUBR_FrST0 : FPrST0Inst <0xE0, "fsub{|r}\t{%st(0), $op|$op, ST(0)}">;
def SUBR_FPrST0 : FPrST0PInst<0xE0, "fsub{|r}p\t$op">;
def MUL_FST0r : FPST0rInst <0xC8, "fmul\t$op">;
-def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, %ST(0)}">;
+def MUL_FrST0 : FPrST0Inst <0xC8, "fmul\t{%st(0), $op|$op, ST(0)}">;
def MUL_FPrST0 : FPrST0PInst<0xC8, "fmulp\t$op">;
def DIVR_FST0r : FPST0rInst <0xF8, "fdivr\t$op">;
-def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, %ST(0)}">;
+def DIV_FrST0 : FPrST0Inst <0xF8, "fdiv{r}\t{%st(0), $op|$op, ST(0)}">;
def DIV_FPrST0 : FPrST0PInst<0xF8, "fdiv{r}p\t$op">;
def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">;
-def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, %ST(0)}">;
+def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, ST(0)}">;
def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">;
def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">;
@@ -330,21 +330,21 @@ defm CMOVNP : FPCMov<X86_COND_NP>;
let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovb\t{$op, %st(0)|%ST(0), $op}">, DA;
+ "fcmovb\t{$op, %st(0)|ST(0), $op}">, DA;
def CMOVBE_F : FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovbe\t{$op, %st(0)|%ST(0), $op}">, DA;
+ "fcmovbe\t{$op, %st(0)|ST(0), $op}">, DA;
def CMOVE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmove\t{$op, %st(0)|%ST(0), $op}">, DA;
+ "fcmove\t{$op, %st(0)|ST(0), $op}">, DA;
def CMOVP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovu\t {$op, %st(0)|%ST(0), $op}">, DA;
+ "fcmovu\t {$op, %st(0)|ST(0), $op}">, DA;
def CMOVNB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnb\t{$op, %st(0)|%ST(0), $op}">, DB;
+ "fcmovnb\t{$op, %st(0)|ST(0), $op}">, DB;
def CMOVNBE_F: FPI<0xD0, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnbe\t{$op, %st(0)|%ST(0), $op}">, DB;
+ "fcmovnbe\t{$op, %st(0)|ST(0), $op}">, DB;
def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovne\t{$op, %st(0)|%ST(0), $op}">, DB;
+ "fcmovne\t{$op, %st(0)|ST(0), $op}">, DB;
def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
- "fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB;
+ "fcmovnu\t{$op, %st(0)|ST(0), $op}">, DB;
} // Predicates = [HasCMov]
// Floating point loads & stores.
@@ -437,33 +437,26 @@ def IST_FP64m : FPI<0xDF, MRM7m, (outs), (ins i64mem:$dst), "fistp{ll}\t$dst">;
}
// FISTTP requires SSE3 even though it's a FPStack op.
+let Predicates = [HasSSE3] in {
def ISTT_Fp16m32 : FpI_<(outs), (ins i16mem:$op, RFP32:$src), OneArgFP,
- [(X86fp_to_i16mem RFP32:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i16mem RFP32:$src, addr:$op)]>;
def ISTT_Fp32m32 : FpI_<(outs), (ins i32mem:$op, RFP32:$src), OneArgFP,
- [(X86fp_to_i32mem RFP32:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i32mem RFP32:$src, addr:$op)]>;
def ISTT_Fp64m32 : FpI_<(outs), (ins i64mem:$op, RFP32:$src), OneArgFP,
- [(X86fp_to_i64mem RFP32:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i64mem RFP32:$src, addr:$op)]>;
def ISTT_Fp16m64 : FpI_<(outs), (ins i16mem:$op, RFP64:$src), OneArgFP,
- [(X86fp_to_i16mem RFP64:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i16mem RFP64:$src, addr:$op)]>;
def ISTT_Fp32m64 : FpI_<(outs), (ins i32mem:$op, RFP64:$src), OneArgFP,
- [(X86fp_to_i32mem RFP64:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i32mem RFP64:$src, addr:$op)]>;
def ISTT_Fp64m64 : FpI_<(outs), (ins i64mem:$op, RFP64:$src), OneArgFP,
- [(X86fp_to_i64mem RFP64:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i64mem RFP64:$src, addr:$op)]>;
def ISTT_Fp16m80 : FpI_<(outs), (ins i16mem:$op, RFP80:$src), OneArgFP,
- [(X86fp_to_i16mem RFP80:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i16mem RFP80:$src, addr:$op)]>;
def ISTT_Fp32m80 : FpI_<(outs), (ins i32mem:$op, RFP80:$src), OneArgFP,
- [(X86fp_to_i32mem RFP80:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i32mem RFP80:$src, addr:$op)]>;
def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP,
- [(X86fp_to_i64mem RFP80:$src, addr:$op)]>,
- Requires<[HasSSE3]>;
+ [(X86fp_to_i64mem RFP80:$src, addr:$op)]>;
+} // Predicates = [HasSSE3]
let mayStore = 1 in {
def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst">;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index 0a1590b..b387090 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -1,10 +1,10 @@
-//===- X86InstrFormats.td - X86 Instruction Formats --------*- tablegen -*-===//
-//
+//===-- X86InstrFormats.td - X86 Instruction Formats -------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -43,6 +43,15 @@ def RawFrmImm8 : Format<43>;
def RawFrmImm16 : Format<44>;
def MRM_D0 : Format<45>;
def MRM_D1 : Format<46>;
+def MRM_D4 : Format<47>;
+def MRM_D8 : Format<48>;
+def MRM_D9 : Format<49>;
+def MRM_DA : Format<50>;
+def MRM_DB : Format<51>;
+def MRM_DC : Format<52>;
+def MRM_DD : Format<53>;
+def MRM_DE : Format<54>;
+def MRM_DF : Format<55>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
@@ -107,17 +116,25 @@ class T8 { bits<5> Prefix = 13; }
class TA { bits<5> Prefix = 14; }
class A6 { bits<5> Prefix = 15; }
class A7 { bits<5> Prefix = 16; }
-class TF { bits<5> Prefix = 17; }
+class T8XD { bits<5> Prefix = 17; }
+class T8XS { bits<5> Prefix = 18; }
+class TAXD { bits<5> Prefix = 19; }
+class XOP8 { bits<5> Prefix = 20; }
+class XOP9 { bits<5> Prefix = 21; }
class VEX { bit hasVEXPrefix = 1; }
class VEX_W { bit hasVEX_WPrefix = 1; }
class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
+class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; }
class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
class VEX_L { bit hasVEX_L = 1; }
class VEX_LIG { bit ignoresVEX_L = 1; }
class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
-
+class MemOp4 { bit hasMemOp4Prefix = 1; }
+class XOP { bit hasXOP_Prefix = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
- string AsmStr, Domain d = GenericDomain>
+ string AsmStr,
+ InstrItinClass itin,
+ Domain d = GenericDomain>
: Instruction {
let Namespace = "X86";
@@ -133,6 +150,8 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
// If this is a pseudo instruction, mark it isCodeGenOnly.
let isCodeGenOnly = !eq(!cast<string>(f), "Pseudo");
+ let Itinerary = itin;
+
//
// Attributes specific to X86 instructions...
//
@@ -148,11 +167,15 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasVEXPrefix = 0; // Does this inst require a VEX prefix?
bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
bit hasVEX_4VPrefix = 0; // Does this inst require the VEX.VVVV field?
+ bit hasVEX_4VOp3Prefix = 0; // Does this inst require the VEX.VVVV field to
+ // encode the third operand?
bit hasVEX_i8ImmReg = 0; // Does this inst require the last source register
// to be encoded in a immediate field?
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding?
+ bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands
+ bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix?
// TSFlags layout should be kept in sync with X86InstrInfo.h.
let TSFlags{5-0} = FormBits;
@@ -169,58 +192,63 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
let TSFlags{33} = hasVEXPrefix;
let TSFlags{34} = hasVEX_WPrefix;
let TSFlags{35} = hasVEX_4VPrefix;
- let TSFlags{36} = hasVEX_i8ImmReg;
- let TSFlags{37} = hasVEX_L;
- let TSFlags{38} = ignoresVEX_L;
- let TSFlags{39} = has3DNow0F0FOpcode;
+ let TSFlags{36} = hasVEX_4VOp3Prefix;
+ let TSFlags{37} = hasVEX_i8ImmReg;
+ let TSFlags{38} = hasVEX_L;
+ let TSFlags{39} = ignoresVEX_L;
+ let TSFlags{40} = has3DNow0F0FOpcode;
+ let TSFlags{41} = hasMemOp4Prefix;
+ let TSFlags{42} = hasXOP_Prefix;
}
class PseudoI<dag oops, dag iops, list<dag> pattern>
- : X86Inst<0, Pseudo, NoImm, oops, iops, ""> {
+ : X86Inst<0, Pseudo, NoImm, oops, iops, "", NoItinerary> {
let Pattern = pattern;
}
class I<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern, Domain d = GenericDomain>
- : X86Inst<o, f, NoImm, outs, ins, asm, d> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT,
+ Domain d = GenericDomain>
+ : X86Inst<o, f, NoImm, outs, ins, asm, itin, d> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern, Domain d = GenericDomain>
- : X86Inst<o, f, Imm8, outs, ins, asm, d> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT,
+ Domain d = GenericDomain>
+ : X86Inst<o, f, Imm8, outs, ins, asm, itin, d> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii8PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm8PCRel, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm8PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm16, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm16, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm32, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm32, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm16PCRel, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm16PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm32PCRel, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm32PCRel, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
@@ -231,8 +259,9 @@ class FPI<bits<8> o, Format F, dag outs, dag ins, string asm>
: I<o, F, outs, ins, asm, []> {}
// FpI_ - Floating Point Pseudo Instruction template. Not Predicated.
-class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern>
- : X86Inst<0, Pseudo, NoImm, outs, ins, ""> {
+class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern,
+ InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<0, Pseudo, NoImm, outs, ins, "", itin> {
let FPForm = fp;
let Pattern = pattern;
}
@@ -244,20 +273,23 @@ class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern>
// Iseg32 - 16-bit segment selector, 32-bit offset
class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern> : X86Inst<o, f, Imm16, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm16, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern> : X86Inst<o, f, Imm32, outs, ins, asm> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm32, outs, ins, asm, itin> {
let Pattern = pattern;
let CodeSize = 3;
}
// SI - SSE 1 & 2 scalar instructions
-class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern> {
+class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
!if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
@@ -267,8 +299,8 @@ class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
// SIi8 - SSE 1 & 2 scalar instructions
class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern> {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
!if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
@@ -278,8 +310,8 @@ class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// PI - SSE 1 & 2 packed instructions
class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
- Domain d>
- : I<o, F, outs, ins, asm, pattern, d> {
+ InstrItinClass itin, Domain d>
+ : I<o, F, outs, ins, asm, pattern, itin, d> {
let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
!if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
@@ -289,8 +321,8 @@ class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
// PIi8 - SSE 1 & 2 packed instructions with immediate
class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern, Domain d>
- : Ii8<o, F, outs, ins, asm, pattern, d> {
+ list<dag> pattern, InstrItinClass itin, Domain d>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, d> {
let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX],
!if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
@@ -306,25 +338,27 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// VSSI - SSE1 instructions with XS prefix in AVX form.
// VPSI - SSE1 instructions with TB prefix in AVX form.
-class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
+class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE1]>;
class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
-class PSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasSSE1]>;
+class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
Requires<[HasSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
Requires<[HasSSE1]>;
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
Requires<[HasAVX]>;
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>, TB,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, TB,
Requires<[HasAVX]>;
// SSE2 Instruction Templates:
@@ -337,28 +371,30 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
// VSDI - SSE2 instructions with XD prefix in AVX form.
// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
-class SDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
+class SDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasSSE2]>;
class SSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
-class PDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
Requires<[HasSSE2]>;
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
Requires<[HasSSE2]>;
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XD,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD,
Requires<[HasAVX]>;
class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedDouble>, TB,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>, TB,
OpSize, Requires<[HasAVX]>;
// SSE3 Instruction Templates:
@@ -368,15 +404,16 @@ class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
// S3DI - SSE3 instructions with XD prefix.
class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, XS,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, XS,
Requires<[HasSSE3]>;
class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, XD,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XD,
Requires<[HasSSE3]>;
-class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, TB, OpSize,
Requires<[HasSSE3]>;
@@ -386,16 +423,16 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
// SS3AI - SSSE3 instructions with TA prefix.
//
// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
-// uses the MMX registers. We put those instructions here because they better
-// fit into the SSSE3 instruction category rather than the MMX category.
+// uses the MMX registers. The 64-bit versions are grouped with the MMX
+// classes. They need to be enabled even if AVX is enabled.
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
Requires<[HasSSSE3]>;
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
Requires<[HasSSSE3]>;
// SSE4.1 Instruction Templates:
@@ -404,31 +441,31 @@ class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
// SS41AIi8 - SSE 4.1 instructions with TA prefix and ImmT == Imm8.
//
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
Requires<[HasSSE41]>;
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
Requires<[HasSSE41]>;
// SSE4.2 Instruction Templates:
//
// SS428I - SSE 4.2 instructions with T8 prefix.
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
Requires<[HasSSE42]>;
-// SS42FI - SSE 4.2 instructions with TF prefix.
+// SS42FI - SSE 4.2 instructions with T8XD prefix.
class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TF, Requires<[HasSSE42]>;
-
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, T8XD, Requires<[HasSSE42]>;
+
// SS42AI = SSE 4.2 instructions with TA prefix
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
Requires<[HasSSE42]>;
// AVX Instruction Templates:
@@ -437,76 +474,115 @@ class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
// AVX8I - AVX instructions with T8 and OpSize prefix.
// AVXAIi8 - AVX instructions with TA, OpSize prefix and ImmT = Imm8.
class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, OpSize,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
Requires<[HasAVX]>;
class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, OpSize,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
Requires<[HasAVX]>;
+// AVX2 Instruction Templates:
+// Instructions introduced in AVX2 (no SSE equivalent forms)
+//
+// AVX28I - AVX2 instructions with T8 and OpSize prefix.
+// AVX2AIi8 - AVX2 instructions with TA, OpSize prefix and ImmT = Imm8.
+class AVX28I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8, OpSize,
+ Requires<[HasAVX2]>;
+class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA, OpSize,
+ Requires<[HasAVX2]>;
+
// AES Instruction Templates:
//
// AES8I
// These use the same encoding as the SSE4.2 T8 and TA encodings.
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
- Requires<[HasAES]>;
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8,
+ Requires<[HasSSE2, HasAES]>;
class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
- Requires<[HasAES]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ Requires<[HasSSE2, HasAES]>;
// CLMUL Instruction Templates
class CLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag>pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
- OpSize, Requires<[HasCLMUL]>;
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ OpSize, Requires<[HasSSE2, HasCLMUL]>;
class AVXCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag>pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
OpSize, VEX_4V, Requires<[HasAVX, HasCLMUL]>;
// FMA3 Instruction Templates
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, T8,
OpSize, VEX_4V, Requires<[HasFMA3]>;
+// FMA4 Instruction Templates
+class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ OpSize, VEX_4V, VEX_I8IMM, Requires<[HasFMA4]>;
+
+// XOP 2, 3 and 4 Operand Instruction Template
+class IXOP<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>,
+ XOP, XOP9, Requires<[HasXOP]>;
+
+// XOP 2, 3 and 4 Operand Instruction Templates with imm byte
+class IXOPi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>,
+ XOP, XOP8, Requires<[HasXOP]>;
+
+// XOP 5 operand instruction (VEX encoding!)
+class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TA,
+ OpSize, VEX_4V, VEX_I8IMM, Requires<[HasXOP]>;
+
// X86-64 Instruction templates...
//
-class RI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, REX_W;
+class RI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RIi8 <bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, REX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RIi32 <bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii32<o, F, outs, ins, asm, pattern>, REX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii32<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RIi64<bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm64, outs, ins, asm>, REX_W {
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : X86Inst<o, f, Imm64, outs, ins, asm, itin>, REX_W {
let Pattern = pattern;
let CodeSize = 3;
}
class RSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : SSI<o, F, outs, ins, asm, pattern>, REX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : SSI<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : SDI<o, F, outs, ins, asm, pattern>, REX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : SDI<o, F, outs, ins, asm, pattern, itin>, REX_W;
class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : PDI<o, F, outs, ins, asm, pattern>, REX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : PDI<o, F, outs, ins, asm, pattern, itin>, REX_W;
class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : VPDI<o, F, outs, ins, asm, pattern>, VEX_W;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : VPDI<o, F, outs, ins, asm, pattern, itin>, VEX_W;
// MMX Instruction templates
//
@@ -519,23 +595,23 @@ class VRPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
// MMXID - MMX instructions with XD prefix.
// MMXIS - MMX instructions with XS prefix.
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX,In64BitMode]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In64BitMode]>;
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, REX_W, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB, REX_W, Requires<[HasMMX]>;
class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : I<o, F, outs, ins, asm, pattern, itin>, TB, OpSize, Requires<[HasMMX]>;
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasMMX]>;
class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm,
- list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasMMX]>;
+ list<dag> pattern, InstrItinClass itin = IIC_DEFAULT>
+ : Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[HasMMX]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index af919fb..041a64f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -1,10 +1,10 @@
-//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
+//===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file provides pattern fragments useful for SIMD instructions.
@@ -41,24 +41,20 @@ def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
+def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
+def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86cmpss : SDNode<"X86ISD::FSETCCss", SDTX86Cmpss>;
def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
def X86pshufb : SDNode<"X86ISD::PSHUFB",
- SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
def X86andnp : SDNode<"X86ISD::ANDNP",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
-def X86psignb : SDNode<"X86ISD::PSIGNB",
- SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
-def X86psignw : SDNode<"X86ISD::PSIGNW",
- SDTypeProfile<1, 2, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
-def X86psignd : SDNode<"X86ISD::PSIGND",
- SDTypeProfile<1, 2, [SDTCisVT<0, v4i32>, SDTCisSameAs<0,1>,
+def X86psign : SDNode<"X86ISD::PSIGN",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
def X86pextrb : SDNode<"X86ISD::PEXTRB",
SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
@@ -75,20 +71,30 @@ def X86insrtps : SDNode<"X86ISD::INSERTPS",
SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
+def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
+ SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
+
def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
-def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
-def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
-def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
-def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
-def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
-def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
-def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
+def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
+def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
+def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
+def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
+
+def X86vshl : SDNode<"X86ISD::VSHL",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisVec<2>]>>;
+def X86vsrl : SDNode<"X86ISD::VSRL",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisVec<2>]>>;
+def X86vsra : SDNode<"X86ISD::VSRA",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisVec<2>]>>;
+
+def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
+def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
+def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisVec<1>,
@@ -96,6 +102,17 @@ def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
+def X86vpcom : SDNode<"X86ISD::VPCOM",
+ SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
+def X86vpcomu : SDNode<"X86ISD::VPCOMU",
+ SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
+
+def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
+ SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
+ SDTCisSameAs<1,2>]>>;
+
// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
// translated into one of the target nodes below during lowering.
// Note: this is a work in progress...
@@ -109,6 +126,8 @@ def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>, SDTCisInt<3>]>;
def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
+def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
@@ -116,8 +135,7 @@ def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
-def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>;
-def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>;
+def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
@@ -129,40 +147,23 @@ def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
-def X86Movhlpd : SDNode<"X86ISD::MOVHLPD", SDTShuff2Op>;
def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
-def X86Unpcklps : SDNode<"X86ISD::UNPCKLPS", SDTShuff2Op>;
-def X86Unpcklpd : SDNode<"X86ISD::UNPCKLPD", SDTShuff2Op>;
-def X86Unpcklpsy : SDNode<"X86ISD::VUNPCKLPSY", SDTShuff2Op>;
-def X86Unpcklpdy : SDNode<"X86ISD::VUNPCKLPDY", SDTShuff2Op>;
-
-def X86Unpckhps : SDNode<"X86ISD::UNPCKHPS", SDTShuff2Op>;
-def X86Unpckhpd : SDNode<"X86ISD::UNPCKHPD", SDTShuff2Op>;
-def X86Unpckhpsy : SDNode<"X86ISD::VUNPCKHPSY", SDTShuff2Op>;
-def X86Unpckhpdy : SDNode<"X86ISD::VUNPCKHPDY", SDTShuff2Op>;
-
-def X86Punpcklbw : SDNode<"X86ISD::PUNPCKLBW", SDTShuff2Op>;
-def X86Punpcklwd : SDNode<"X86ISD::PUNPCKLWD", SDTShuff2Op>;
-def X86Punpckldq : SDNode<"X86ISD::PUNPCKLDQ", SDTShuff2Op>;
-def X86Punpcklqdq : SDNode<"X86ISD::PUNPCKLQDQ", SDTShuff2Op>;
-
-def X86Punpckhbw : SDNode<"X86ISD::PUNPCKHBW", SDTShuff2Op>;
-def X86Punpckhwd : SDNode<"X86ISD::PUNPCKHWD", SDTShuff2Op>;
-def X86Punpckhdq : SDNode<"X86ISD::PUNPCKHDQ", SDTShuff2Op>;
-def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>;
+def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
+def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
-def X86VPermilps : SDNode<"X86ISD::VPERMILPS", SDTShuff2OpI>;
-def X86VPermilpsy : SDNode<"X86ISD::VPERMILPSY", SDTShuff2OpI>;
-def X86VPermilpd : SDNode<"X86ISD::VPERMILPD", SDTShuff2OpI>;
-def X86VPermilpdy : SDNode<"X86ISD::VPERMILPDY", SDTShuff2OpI>;
+def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
-def X86VPerm2f128 : SDNode<"X86ISD::VPERM2F128", SDTShuff3OpI>;
+def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
+def X86Blendpw : SDNode<"X86ISD::BLENDPW", SDTBlend>;
+def X86Blendps : SDNode<"X86ISD::BLENDPS", SDTBlend>;
+def X86Blendpd : SDNode<"X86ISD::BLENDPD", SDTBlend>;
+
//===----------------------------------------------------------------------===//
// SSE Complex Patterns
//===----------------------------------------------------------------------===//
@@ -195,15 +196,15 @@ def sdmem : Operand<v2f64> {
//===----------------------------------------------------------------------===//
// 128-bit load pattern fragments
+// NOTE: all 128-bit integer vector loads are promoted to v2i64
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
-def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
// 256-bit load pattern fragments
+// NOTE: all 256-bit integer vector loads are promoted to v4i64
def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
-def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
// Like 'store', but always requires 128-bit vector alignment.
@@ -223,6 +224,11 @@ def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;
+// Like 'X86vzload', but always requires 128-bit vector alignment.
+def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
+ return cast<MemSDNode>(N)->getAlignment() >= 16;
+}]>;
+
// Like 'load', but always requires 256-bit vector alignment.
def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return cast<LoadSDNode>(N)->getAlignment() >= 32;
@@ -234,22 +240,20 @@ def alignedloadfsf64 : PatFrag<(ops node:$ptr),
(f64 (alignedload node:$ptr))>;
// 128-bit aligned load pattern fragments
+// NOTE: all 128-bit integer vector loads are promoted to v2i64
def alignedloadv4f32 : PatFrag<(ops node:$ptr),
(v4f32 (alignedload node:$ptr))>;
def alignedloadv2f64 : PatFrag<(ops node:$ptr),
(v2f64 (alignedload node:$ptr))>;
-def alignedloadv4i32 : PatFrag<(ops node:$ptr),
- (v4i32 (alignedload node:$ptr))>;
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
(v2i64 (alignedload node:$ptr))>;
// 256-bit aligned load pattern fragments
+// NOTE: all 256-bit integer vector loads are promoted to v4i64
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
(v8f32 (alignedload256 node:$ptr))>;
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
(v4f64 (alignedload256 node:$ptr))>;
-def alignedloadv8i32 : PatFrag<(ops node:$ptr),
- (v8i32 (alignedload256 node:$ptr))>;
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
(v4i64 (alignedload256 node:$ptr))>;
@@ -268,19 +272,16 @@ def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
// 128-bit memop pattern fragments
+// NOTE: all 128-bit integer vector loads are promoted to v2i64
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
-def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
-def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop node:$ptr))>;
-def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
// 256-bit memop pattern fragments
-def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
+// NOTE: all 256-bit integer vector loads are promoted to v4i64
def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
-def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
// 16-byte boundary.
@@ -326,6 +327,8 @@ def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
// 256-bit bitconvert pattern fragments
+def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
+def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
@@ -350,30 +353,6 @@ def BYTE_imm : SDNodeXForm<imm, [{
return getI32Imm(N->getZExtValue() >> 3);
}]>;
-// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
-// SHUFP* etc. imm.
-def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
-// PSHUFHW imm.
-def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
-// PSHUFLW imm.
-def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
-}]>;
-
-// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
-// a PALIGNR imm.
-def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePALIGNRImmediate(N));
-}]>;
-
// EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index
// to VEXTRACTF128 imm.
def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{
@@ -386,72 +365,6 @@ def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{
return getI8Imm(X86::getInsertVINSERTF128Immediate(N));
}]>;
-def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-}]>;
-
-def movddup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def shufp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshufhw_imm>;
-
-def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshuflw_imm>;
-
def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index),
(extract_subvector node:$bigvec,
node:$index), [{
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index 3a02de0..307c96b 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
+//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,14 +25,13 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/MC/MCAsmInfo.h"
#include <limits>
#define GET_INSTRINFO_CTOR
@@ -83,6 +82,12 @@ enum {
TB_FOLDED_STORE = 1 << 19
};
+struct X86OpTblEntry {
+ uint16_t RegOp;
+ uint16_t MemOp;
+ uint32_t Flags;
+};
+
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
: X86GenInstrInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
? X86::ADJCALLSTACKDOWN64
@@ -92,7 +97,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
: X86::ADJCALLSTACKUP32)),
TM(tm), RI(tm, *this) {
- static const unsigned OpTbl2Addr[][3] = {
+ static const X86OpTblEntry OpTbl2Addr[] = {
{ X86::ADC32ri, X86::ADC32mi, 0 },
{ X86::ADC32ri8, X86::ADC32mi8, 0 },
{ X86::ADC32rr, X86::ADC32mr, 0 },
@@ -260,22 +265,21 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
};
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
- unsigned RegOp = OpTbl2Addr[i][0];
- unsigned MemOp = OpTbl2Addr[i][1];
- unsigned Flags = OpTbl2Addr[i][2];
+ unsigned RegOp = OpTbl2Addr[i].RegOp;
+ unsigned MemOp = OpTbl2Addr[i].MemOp;
+ unsigned Flags = OpTbl2Addr[i].Flags;
AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable,
RegOp, MemOp,
// Index 0, folded load and store, no alignment requirement.
Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE);
}
- static const unsigned OpTbl0[][3] = {
+ static const X86OpTblEntry OpTbl0[] = {
{ X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD },
{ X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD },
{ X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD },
{ X86::CALL32r, X86::CALL32m, TB_FOLDED_LOAD },
{ X86::CALL64r, X86::CALL64m, TB_FOLDED_LOAD },
- { X86::WINCALL64r, X86::WINCALL64m, TB_FOLDED_LOAD },
{ X86::CMP16ri, X86::CMP16mi, TB_FOLDED_LOAD },
{ X86::CMP16ri8, X86::CMP16mi8, TB_FOLDED_LOAD },
{ X86::CMP16rr, X86::CMP16mr, TB_FOLDED_LOAD },
@@ -352,6 +356,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::FsVMOVAPDrr, X86::VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::FsVMOVAPSrr, X86::VMOVSSmr, TB_FOLDED_STORE | TB_NO_REVERSE },
+ { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVDQArr, X86::VMOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 },
@@ -362,6 +367,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVUPDrr, X86::VMOVUPDmr, TB_FOLDED_STORE },
{ X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE },
// AVX 256-bit foldable instructions
+ { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 },
{ X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
{ X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 },
@@ -370,14 +376,14 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
};
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
- unsigned RegOp = OpTbl0[i][0];
- unsigned MemOp = OpTbl0[i][1];
- unsigned Flags = OpTbl0[i][2];
+ unsigned RegOp = OpTbl0[i].RegOp;
+ unsigned MemOp = OpTbl0[i].MemOp;
+ unsigned Flags = OpTbl0[i].Flags;
AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable,
RegOp, MemOp, TB_INDEX_0 | Flags);
}
- static const unsigned OpTbl1[][3] = {
+ static const X86OpTblEntry OpTbl1[] = {
{ X86::CMP16rr, X86::CMP16rm, 0 },
{ X86::CMP32rr, X86::CMP32rm, 0 },
{ X86::CMP64rr, X86::CMP64rm, 0 },
@@ -456,6 +462,9 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOVZX64rr16, X86::MOVZX64rm16, 0 },
{ X86::MOVZX64rr32, X86::MOVZX64rm32, 0 },
{ X86::MOVZX64rr8, X86::MOVZX64rm8, 0 },
+ { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 },
+ { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 },
+ { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 },
{ X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 },
{ X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 },
{ X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 },
@@ -508,6 +517,11 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMOVZDI2PDIrr, X86::VMOVZDI2PDIrm, 0 },
{ X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 },
{ X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 },
+ { X86::VPABSBrr128, X86::VPABSBrm128, TB_ALIGN_16 },
+ { X86::VPABSDrr128, X86::VPABSDrm128, TB_ALIGN_16 },
+ { X86::VPABSWrr128, X86::VPABSWrm128, TB_ALIGN_16 },
+ { X86::VPERMILPDri, X86::VPERMILPDmi, TB_ALIGN_16 },
+ { X86::VPERMILPSri, X86::VPERMILPSmi, TB_ALIGN_16 },
{ X86::VPSHUFDri, X86::VPSHUFDmi, TB_ALIGN_16 },
{ X86::VPSHUFHWri, X86::VPSHUFHWmi, TB_ALIGN_16 },
{ X86::VPSHUFLWri, X86::VPSHUFLWmi, TB_ALIGN_16 },
@@ -524,22 +538,39 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
// AVX 256-bit foldable instructions
{ X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 },
{ X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 },
- { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_16 },
+ { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 },
{ X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 },
- { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 }
+ { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 },
+ { X86::VPERMILPDYri, X86::VPERMILPDYmi, TB_ALIGN_32 },
+ { X86::VPERMILPSYri, X86::VPERMILPSYmi, TB_ALIGN_32 },
+ // AVX2 foldable instructions
+ { X86::VPABSBrr256, X86::VPABSBrm256, TB_ALIGN_32 },
+ { X86::VPABSDrr256, X86::VPABSDrm256, TB_ALIGN_32 },
+ { X86::VPABSWrr256, X86::VPABSWrm256, TB_ALIGN_32 },
+ { X86::VPSHUFDYri, X86::VPSHUFDYmi, TB_ALIGN_32 },
+ { X86::VPSHUFHWYri, X86::VPSHUFHWYmi, TB_ALIGN_32 },
+ { X86::VPSHUFLWYri, X86::VPSHUFLWYmi, TB_ALIGN_32 },
+ { X86::VRCPPSYr, X86::VRCPPSYm, TB_ALIGN_32 },
+ { X86::VRCPPSYr_Int, X86::VRCPPSYm_Int, TB_ALIGN_32 },
+ { X86::VRSQRTPSYr, X86::VRSQRTPSYm, TB_ALIGN_32 },
+ { X86::VRSQRTPSYr_Int, X86::VRSQRTPSYm_Int, TB_ALIGN_32 },
+ { X86::VSQRTPDYr, X86::VSQRTPDYm, TB_ALIGN_32 },
+ { X86::VSQRTPDYr_Int, X86::VSQRTPDYm_Int, TB_ALIGN_32 },
+ { X86::VSQRTPSYr, X86::VSQRTPSYm, TB_ALIGN_32 },
+ { X86::VSQRTPSYr_Int, X86::VSQRTPSYm_Int, TB_ALIGN_32 },
};
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
- unsigned RegOp = OpTbl1[i][0];
- unsigned MemOp = OpTbl1[i][1];
- unsigned Flags = OpTbl1[i][2];
+ unsigned RegOp = OpTbl1[i].RegOp;
+ unsigned MemOp = OpTbl1[i].MemOp;
+ unsigned Flags = OpTbl1[i].Flags;
AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable,
RegOp, MemOp,
// Index 1, folded load
Flags | TB_INDEX_1 | TB_FOLDED_LOAD);
}
- static const unsigned OpTbl2[][3] = {
+ static const X86OpTblEntry OpTbl2[] = {
{ X86::ADC32rr, X86::ADC32rm, 0 },
{ X86::ADC64rr, X86::ADC64rm, 0 },
{ X86::ADD16rr, X86::ADD16rm, 0 },
@@ -563,6 +594,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ANDNPSrr, X86::ANDNPSrm, TB_ALIGN_16 },
{ X86::ANDPDrr, X86::ANDPDrm, TB_ALIGN_16 },
{ X86::ANDPSrr, X86::ANDPSrm, TB_ALIGN_16 },
+ { X86::BLENDPDrri, X86::BLENDPDrmi, TB_ALIGN_16 },
+ { X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16 },
+ { X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16 },
+ { X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16 },
{ X86::CMOVA16rr, X86::CMOVA16rm, 0 },
{ X86::CMOVA32rr, X86::CMOVA32rm, 0 },
{ X86::CMOVA64rr, X86::CMOVA64rm, 0 },
@@ -652,6 +687,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
{ X86::MINSSrr, X86::MINSSrm, 0 },
{ X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
+ { X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 },
{ X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 },
{ X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 },
{ X86::MULSDrr, X86::MULSDrm, 0 },
@@ -664,30 +700,45 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ORPSrr, X86::ORPSrm, TB_ALIGN_16 },
{ X86::PACKSSDWrr, X86::PACKSSDWrm, TB_ALIGN_16 },
{ X86::PACKSSWBrr, X86::PACKSSWBrm, TB_ALIGN_16 },
+ { X86::PACKUSDWrr, X86::PACKUSDWrm, TB_ALIGN_16 },
{ X86::PACKUSWBrr, X86::PACKUSWBrm, TB_ALIGN_16 },
{ X86::PADDBrr, X86::PADDBrm, TB_ALIGN_16 },
{ X86::PADDDrr, X86::PADDDrm, TB_ALIGN_16 },
{ X86::PADDQrr, X86::PADDQrm, TB_ALIGN_16 },
{ X86::PADDSBrr, X86::PADDSBrm, TB_ALIGN_16 },
{ X86::PADDSWrr, X86::PADDSWrm, TB_ALIGN_16 },
+ { X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 },
+ { X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 },
{ X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 },
+ { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 },
{ X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 },
{ X86::PANDrr, X86::PANDrm, TB_ALIGN_16 },
{ X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 },
{ X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 },
+ { X86::PBLENDWrri, X86::PBLENDWrmi, TB_ALIGN_16 },
{ X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 },
{ X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 },
+ { X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 },
{ X86::PCMPEQWrr, X86::PCMPEQWrm, TB_ALIGN_16 },
{ X86::PCMPGTBrr, X86::PCMPGTBrm, TB_ALIGN_16 },
{ X86::PCMPGTDrr, X86::PCMPGTDrm, TB_ALIGN_16 },
+ { X86::PCMPGTQrr, X86::PCMPGTQrm, TB_ALIGN_16 },
{ X86::PCMPGTWrr, X86::PCMPGTWrm, TB_ALIGN_16 },
+ { X86::PHADDDrr, X86::PHADDDrm, TB_ALIGN_16 },
+ { X86::PHADDWrr, X86::PHADDWrm, TB_ALIGN_16 },
+ { X86::PHADDSWrr128, X86::PHADDSWrm128, TB_ALIGN_16 },
+ { X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16 },
+ { X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 },
+ { X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16 },
{ X86::PINSRWrri, X86::PINSRWrmi, TB_ALIGN_16 },
+ { X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 },
{ X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 },
{ X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 },
{ X86::PMAXUBrr, X86::PMAXUBrm, TB_ALIGN_16 },
{ X86::PMINSWrr, X86::PMINSWrm, TB_ALIGN_16 },
{ X86::PMINUBrr, X86::PMINUBrm, TB_ALIGN_16 },
{ X86::PMULDQrr, X86::PMULDQrm, TB_ALIGN_16 },
+ { X86::PMULHRSWrr128, X86::PMULHRSWrm128, TB_ALIGN_16 },
{ X86::PMULHUWrr, X86::PMULHUWrm, TB_ALIGN_16 },
{ X86::PMULHWrr, X86::PMULHWrm, TB_ALIGN_16 },
{ X86::PMULLDrr, X86::PMULLDrm, TB_ALIGN_16 },
@@ -695,6 +746,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::PMULUDQrr, X86::PMULUDQrm, TB_ALIGN_16 },
{ X86::PORrr, X86::PORrm, TB_ALIGN_16 },
{ X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 },
+ { X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 },
+ { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 },
+ { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 },
+ { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 },
{ X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 },
{ X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 },
{ X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 },
@@ -778,6 +833,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VANDNPSrr, X86::VANDNPSrm, TB_ALIGN_16 },
{ X86::VANDPDrr, X86::VANDPDrm, TB_ALIGN_16 },
{ X86::VANDPSrr, X86::VANDPSrm, TB_ALIGN_16 },
+ { X86::VBLENDPDrri, X86::VBLENDPDrmi, TB_ALIGN_16 },
+ { X86::VBLENDPSrri, X86::VBLENDPSrmi, TB_ALIGN_16 },
+ { X86::VBLENDVPDrr, X86::VBLENDVPDrm, TB_ALIGN_16 },
+ { X86::VBLENDVPSrr, X86::VBLENDVPSrm, TB_ALIGN_16 },
{ X86::VCMPPDrri, X86::VCMPPDrmi, TB_ALIGN_16 },
{ X86::VCMPPSrri, X86::VCMPPSrmi, TB_ALIGN_16 },
{ X86::VCMPSDrr, X86::VCMPSDrm, 0 },
@@ -816,6 +875,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 },
{ X86::VMINSSrr, X86::VMINSSrm, 0 },
{ X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 },
+ { X86::VMPSADBWrri, X86::VMPSADBWrmi, TB_ALIGN_16 },
{ X86::VMULPDrr, X86::VMULPDrm, TB_ALIGN_16 },
{ X86::VMULPSrr, X86::VMULPSrm, TB_ALIGN_16 },
{ X86::VMULSDrr, X86::VMULSDrm, 0 },
@@ -824,28 +884,47 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VORPSrr, X86::VORPSrm, TB_ALIGN_16 },
{ X86::VPACKSSDWrr, X86::VPACKSSDWrm, TB_ALIGN_16 },
{ X86::VPACKSSWBrr, X86::VPACKSSWBrm, TB_ALIGN_16 },
+ { X86::VPACKUSDWrr, X86::VPACKUSDWrm, TB_ALIGN_16 },
{ X86::VPACKUSWBrr, X86::VPACKUSWBrm, TB_ALIGN_16 },
{ X86::VPADDBrr, X86::VPADDBrm, TB_ALIGN_16 },
{ X86::VPADDDrr, X86::VPADDDrm, TB_ALIGN_16 },
{ X86::VPADDQrr, X86::VPADDQrm, TB_ALIGN_16 },
{ X86::VPADDSBrr, X86::VPADDSBrm, TB_ALIGN_16 },
{ X86::VPADDSWrr, X86::VPADDSWrm, TB_ALIGN_16 },
+ { X86::VPADDUSBrr, X86::VPADDUSBrm, TB_ALIGN_16 },
+ { X86::VPADDUSWrr, X86::VPADDUSWrm, TB_ALIGN_16 },
{ X86::VPADDWrr, X86::VPADDWrm, TB_ALIGN_16 },
+ { X86::VPALIGNR128rr, X86::VPALIGNR128rm, TB_ALIGN_16 },
{ X86::VPANDNrr, X86::VPANDNrm, TB_ALIGN_16 },
{ X86::VPANDrr, X86::VPANDrm, TB_ALIGN_16 },
+ { X86::VPAVGBrr, X86::VPAVGBrm, TB_ALIGN_16 },
+ { X86::VPAVGWrr, X86::VPAVGWrm, TB_ALIGN_16 },
+ { X86::VPBLENDWrri, X86::VPBLENDWrmi, TB_ALIGN_16 },
{ X86::VPCMPEQBrr, X86::VPCMPEQBrm, TB_ALIGN_16 },
{ X86::VPCMPEQDrr, X86::VPCMPEQDrm, TB_ALIGN_16 },
+ { X86::VPCMPEQQrr, X86::VPCMPEQQrm, TB_ALIGN_16 },
{ X86::VPCMPEQWrr, X86::VPCMPEQWrm, TB_ALIGN_16 },
{ X86::VPCMPGTBrr, X86::VPCMPGTBrm, TB_ALIGN_16 },
{ X86::VPCMPGTDrr, X86::VPCMPGTDrm, TB_ALIGN_16 },
+ { X86::VPCMPGTQrr, X86::VPCMPGTQrm, TB_ALIGN_16 },
{ X86::VPCMPGTWrr, X86::VPCMPGTWrm, TB_ALIGN_16 },
+ { X86::VPHADDDrr, X86::VPHADDDrm, TB_ALIGN_16 },
+ { X86::VPHADDSWrr128, X86::VPHADDSWrm128, TB_ALIGN_16 },
+ { X86::VPHADDWrr, X86::VPHADDWrm, TB_ALIGN_16 },
+ { X86::VPHSUBDrr, X86::VPHSUBDrm, TB_ALIGN_16 },
+ { X86::VPHSUBSWrr128, X86::VPHSUBSWrm128, TB_ALIGN_16 },
+ { X86::VPHSUBWrr, X86::VPHSUBWrm, TB_ALIGN_16 },
+ { X86::VPERMILPDrr, X86::VPERMILPDrm, TB_ALIGN_16 },
+ { X86::VPERMILPSrr, X86::VPERMILPSrm, TB_ALIGN_16 },
{ X86::VPINSRWrri, X86::VPINSRWrmi, TB_ALIGN_16 },
+ { X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, TB_ALIGN_16 },
{ X86::VPMADDWDrr, X86::VPMADDWDrm, TB_ALIGN_16 },
{ X86::VPMAXSWrr, X86::VPMAXSWrm, TB_ALIGN_16 },
{ X86::VPMAXUBrr, X86::VPMAXUBrm, TB_ALIGN_16 },
{ X86::VPMINSWrr, X86::VPMINSWrm, TB_ALIGN_16 },
{ X86::VPMINUBrr, X86::VPMINUBrm, TB_ALIGN_16 },
{ X86::VPMULDQrr, X86::VPMULDQrm, TB_ALIGN_16 },
+ { X86::VPMULHRSWrr128, X86::VPMULHRSWrm128, TB_ALIGN_16 },
{ X86::VPMULHUWrr, X86::VPMULHUWrm, TB_ALIGN_16 },
{ X86::VPMULHWrr, X86::VPMULHWrm, TB_ALIGN_16 },
{ X86::VPMULLDrr, X86::VPMULLDrm, TB_ALIGN_16 },
@@ -853,6 +932,10 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VPMULUDQrr, X86::VPMULUDQrm, TB_ALIGN_16 },
{ X86::VPORrr, X86::VPORrm, TB_ALIGN_16 },
{ X86::VPSADBWrr, X86::VPSADBWrm, TB_ALIGN_16 },
+ { X86::VPSHUFBrr, X86::VPSHUFBrm, TB_ALIGN_16 },
+ { X86::VPSIGNBrr, X86::VPSIGNBrm, TB_ALIGN_16 },
+ { X86::VPSIGNWrr, X86::VPSIGNWrm, TB_ALIGN_16 },
+ { X86::VPSIGNDrr, X86::VPSIGNDrm, TB_ALIGN_16 },
{ X86::VPSLLDrr, X86::VPSLLDrm, TB_ALIGN_16 },
{ X86::VPSLLQrr, X86::VPSLLQrm, TB_ALIGN_16 },
{ X86::VPSLLWrr, X86::VPSLLWrm, TB_ALIGN_16 },
@@ -886,14 +969,154 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, TB_ALIGN_16 },
{ X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, TB_ALIGN_16 },
{ X86::VXORPDrr, X86::VXORPDrm, TB_ALIGN_16 },
- { X86::VXORPSrr, X86::VXORPSrm, TB_ALIGN_16 }
+ { X86::VXORPSrr, X86::VXORPSrm, TB_ALIGN_16 },
+ // AVX 256-bit foldable instructions
+ { X86::VADDPDYrr, X86::VADDPDYrm, TB_ALIGN_32 },
+ { X86::VADDPSYrr, X86::VADDPSYrm, TB_ALIGN_32 },
+ { X86::VADDSUBPDYrr, X86::VADDSUBPDYrm, TB_ALIGN_32 },
+ { X86::VADDSUBPSYrr, X86::VADDSUBPSYrm, TB_ALIGN_32 },
+ { X86::VANDNPDYrr, X86::VANDNPDYrm, TB_ALIGN_32 },
+ { X86::VANDNPSYrr, X86::VANDNPSYrm, TB_ALIGN_32 },
+ { X86::VANDPDYrr, X86::VANDPDYrm, TB_ALIGN_32 },
+ { X86::VANDPSYrr, X86::VANDPSYrm, TB_ALIGN_32 },
+ { X86::VBLENDPDYrri, X86::VBLENDPDYrmi, TB_ALIGN_32 },
+ { X86::VBLENDPSYrri, X86::VBLENDPSYrmi, TB_ALIGN_32 },
+ { X86::VBLENDVPDYrr, X86::VBLENDVPDYrm, TB_ALIGN_32 },
+ { X86::VBLENDVPSYrr, X86::VBLENDVPSYrm, TB_ALIGN_32 },
+ { X86::VCMPPDYrri, X86::VCMPPDYrmi, TB_ALIGN_32 },
+ { X86::VCMPPSYrri, X86::VCMPPSYrmi, TB_ALIGN_32 },
+ { X86::VDIVPDYrr, X86::VDIVPDYrm, TB_ALIGN_32 },
+ { X86::VDIVPSYrr, X86::VDIVPSYrm, TB_ALIGN_32 },
+ { X86::VHADDPDYrr, X86::VHADDPDYrm, TB_ALIGN_32 },
+ { X86::VHADDPSYrr, X86::VHADDPSYrm, TB_ALIGN_32 },
+ { X86::VHSUBPDYrr, X86::VHSUBPDYrm, TB_ALIGN_32 },
+ { X86::VHSUBPSYrr, X86::VHSUBPSYrm, TB_ALIGN_32 },
+ { X86::VINSERTF128rr, X86::VINSERTF128rm, TB_ALIGN_32 },
+ { X86::VMAXPDYrr, X86::VMAXPDYrm, TB_ALIGN_32 },
+ { X86::VMAXPDYrr_Int, X86::VMAXPDYrm_Int, TB_ALIGN_32 },
+ { X86::VMAXPSYrr, X86::VMAXPSYrm, TB_ALIGN_32 },
+ { X86::VMAXPSYrr_Int, X86::VMAXPSYrm_Int, TB_ALIGN_32 },
+ { X86::VMINPDYrr, X86::VMINPDYrm, TB_ALIGN_32 },
+ { X86::VMINPDYrr_Int, X86::VMINPDYrm_Int, TB_ALIGN_32 },
+ { X86::VMINPSYrr, X86::VMINPSYrm, TB_ALIGN_32 },
+ { X86::VMINPSYrr_Int, X86::VMINPSYrm_Int, TB_ALIGN_32 },
+ { X86::VMULPDYrr, X86::VMULPDYrm, TB_ALIGN_32 },
+ { X86::VMULPSYrr, X86::VMULPSYrm, TB_ALIGN_32 },
+ { X86::VORPDYrr, X86::VORPDYrm, TB_ALIGN_32 },
+ { X86::VORPSYrr, X86::VORPSYrm, TB_ALIGN_32 },
+ { X86::VPERM2F128rr, X86::VPERM2F128rm, TB_ALIGN_32 },
+ { X86::VPERMILPDYrr, X86::VPERMILPDYrm, TB_ALIGN_32 },
+ { X86::VPERMILPSYrr, X86::VPERMILPSYrm, TB_ALIGN_32 },
+ { X86::VSHUFPDYrri, X86::VSHUFPDYrmi, TB_ALIGN_32 },
+ { X86::VSHUFPSYrri, X86::VSHUFPSYrmi, TB_ALIGN_32 },
+ { X86::VSUBPDYrr, X86::VSUBPDYrm, TB_ALIGN_32 },
+ { X86::VSUBPSYrr, X86::VSUBPSYrm, TB_ALIGN_32 },
+ { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrm, TB_ALIGN_32 },
+ { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrm, TB_ALIGN_32 },
+ { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrm, TB_ALIGN_32 },
+ { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrm, TB_ALIGN_32 },
+ { X86::VXORPDYrr, X86::VXORPDYrm, TB_ALIGN_32 },
+ { X86::VXORPSYrr, X86::VXORPSYrm, TB_ALIGN_32 },
+ // AVX2 foldable instructions
+ { X86::VINSERTI128rr, X86::VINSERTI128rm, TB_ALIGN_16 },
+ { X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, TB_ALIGN_32 },
+ { X86::VPACKSSWBYrr, X86::VPACKSSWBYrm, TB_ALIGN_32 },
+ { X86::VPACKUSDWYrr, X86::VPACKUSDWYrm, TB_ALIGN_32 },
+ { X86::VPACKUSWBYrr, X86::VPACKUSWBYrm, TB_ALIGN_32 },
+ { X86::VPADDBYrr, X86::VPADDBYrm, TB_ALIGN_32 },
+ { X86::VPADDDYrr, X86::VPADDDYrm, TB_ALIGN_32 },
+ { X86::VPADDQYrr, X86::VPADDQYrm, TB_ALIGN_32 },
+ { X86::VPADDSBYrr, X86::VPADDSBYrm, TB_ALIGN_32 },
+ { X86::VPADDSWYrr, X86::VPADDSWYrm, TB_ALIGN_32 },
+ { X86::VPADDUSBYrr, X86::VPADDUSBYrm, TB_ALIGN_32 },
+ { X86::VPADDUSWYrr, X86::VPADDUSWYrm, TB_ALIGN_32 },
+ { X86::VPADDWYrr, X86::VPADDWYrm, TB_ALIGN_32 },
+ { X86::VPALIGNR256rr, X86::VPALIGNR256rm, TB_ALIGN_32 },
+ { X86::VPANDNYrr, X86::VPANDNYrm, TB_ALIGN_32 },
+ { X86::VPANDYrr, X86::VPANDYrm, TB_ALIGN_32 },
+ { X86::VPAVGBYrr, X86::VPAVGBYrm, TB_ALIGN_32 },
+ { X86::VPAVGWYrr, X86::VPAVGWYrm, TB_ALIGN_32 },
+ { X86::VPBLENDDrri, X86::VPBLENDDrmi, TB_ALIGN_32 },
+ { X86::VPBLENDDYrri, X86::VPBLENDDYrmi, TB_ALIGN_32 },
+ { X86::VPBLENDWYrri, X86::VPBLENDWYrmi, TB_ALIGN_32 },
+ { X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, TB_ALIGN_32 },
+ { X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, TB_ALIGN_32 },
+ { X86::VPCMPEQQYrr, X86::VPCMPEQQYrm, TB_ALIGN_32 },
+ { X86::VPCMPEQWYrr, X86::VPCMPEQWYrm, TB_ALIGN_32 },
+ { X86::VPCMPGTBYrr, X86::VPCMPGTBYrm, TB_ALIGN_32 },
+ { X86::VPCMPGTDYrr, X86::VPCMPGTDYrm, TB_ALIGN_32 },
+ { X86::VPCMPGTQYrr, X86::VPCMPGTQYrm, TB_ALIGN_32 },
+ { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, TB_ALIGN_32 },
+ { X86::VPERM2I128rr, X86::VPERM2I128rm, TB_ALIGN_32 },
+ { X86::VPERMDYrr, X86::VPERMDYrm, TB_ALIGN_32 },
+ { X86::VPERMPDYrr, X86::VPERMPDYrm, TB_ALIGN_32 },
+ { X86::VPERMPSYrr, X86::VPERMPSYrm, TB_ALIGN_32 },
+ { X86::VPERMQYrr, X86::VPERMQYrm, TB_ALIGN_32 },
+ { X86::VPHADDDYrr, X86::VPHADDDYrm, TB_ALIGN_32 },
+ { X86::VPHADDSWrr256, X86::VPHADDSWrm256, TB_ALIGN_32 },
+ { X86::VPHADDWYrr, X86::VPHADDWYrm, TB_ALIGN_32 },
+ { X86::VPHSUBDYrr, X86::VPHSUBDYrm, TB_ALIGN_32 },
+ { X86::VPHSUBSWrr256, X86::VPHSUBSWrm256, TB_ALIGN_32 },
+ { X86::VPHSUBWYrr, X86::VPHSUBWYrm, TB_ALIGN_32 },
+ { X86::VPMADDUBSWrr256, X86::VPMADDUBSWrm256, TB_ALIGN_32 },
+ { X86::VPMADDWDYrr, X86::VPMADDWDYrm, TB_ALIGN_32 },
+ { X86::VPMAXSWYrr, X86::VPMAXSWYrm, TB_ALIGN_32 },
+ { X86::VPMAXUBYrr, X86::VPMAXUBYrm, TB_ALIGN_32 },
+ { X86::VPMINSWYrr, X86::VPMINSWYrm, TB_ALIGN_32 },
+ { X86::VPMINUBYrr, X86::VPMINUBYrm, TB_ALIGN_32 },
+ { X86::VMPSADBWYrri, X86::VMPSADBWYrmi, TB_ALIGN_32 },
+ { X86::VPMULDQYrr, X86::VPMULDQYrm, TB_ALIGN_32 },
+ { X86::VPMULHRSWrr256, X86::VPMULHRSWrm256, TB_ALIGN_32 },
+ { X86::VPMULHUWYrr, X86::VPMULHUWYrm, TB_ALIGN_32 },
+ { X86::VPMULHWYrr, X86::VPMULHWYrm, TB_ALIGN_32 },
+ { X86::VPMULLDYrr, X86::VPMULLDYrm, TB_ALIGN_32 },
+ { X86::VPMULLWYrr, X86::VPMULLWYrm, TB_ALIGN_32 },
+ { X86::VPMULUDQYrr, X86::VPMULUDQYrm, TB_ALIGN_32 },
+ { X86::VPORYrr, X86::VPORYrm, TB_ALIGN_32 },
+ { X86::VPSADBWYrr, X86::VPSADBWYrm, TB_ALIGN_32 },
+ { X86::VPSHUFBYrr, X86::VPSHUFBYrm, TB_ALIGN_32 },
+ { X86::VPSIGNBYrr, X86::VPSIGNBYrm, TB_ALIGN_32 },
+ { X86::VPSIGNWYrr, X86::VPSIGNWYrm, TB_ALIGN_32 },
+ { X86::VPSIGNDYrr, X86::VPSIGNDYrm, TB_ALIGN_32 },
+ { X86::VPSLLDYrr, X86::VPSLLDYrm, TB_ALIGN_16 },
+ { X86::VPSLLQYrr, X86::VPSLLQYrm, TB_ALIGN_16 },
+ { X86::VPSLLWYrr, X86::VPSLLWYrm, TB_ALIGN_16 },
+ { X86::VPSLLVDrr, X86::VPSLLVDrm, TB_ALIGN_16 },
+ { X86::VPSLLVDYrr, X86::VPSLLVDYrm, TB_ALIGN_32 },
+ { X86::VPSLLVQrr, X86::VPSLLVQrm, TB_ALIGN_16 },
+ { X86::VPSLLVQYrr, X86::VPSLLVQYrm, TB_ALIGN_32 },
+ { X86::VPSRADYrr, X86::VPSRADYrm, TB_ALIGN_16 },
+ { X86::VPSRAWYrr, X86::VPSRAWYrm, TB_ALIGN_16 },
+ { X86::VPSRAVDrr, X86::VPSRAVDrm, TB_ALIGN_16 },
+ { X86::VPSRAVDYrr, X86::VPSRAVDYrm, TB_ALIGN_32 },
+ { X86::VPSRLDYrr, X86::VPSRLDYrm, TB_ALIGN_16 },
+ { X86::VPSRLQYrr, X86::VPSRLQYrm, TB_ALIGN_16 },
+ { X86::VPSRLWYrr, X86::VPSRLWYrm, TB_ALIGN_16 },
+ { X86::VPSRLVDrr, X86::VPSRLVDrm, TB_ALIGN_16 },
+ { X86::VPSRLVDYrr, X86::VPSRLVDYrm, TB_ALIGN_32 },
+ { X86::VPSRLVQrr, X86::VPSRLVQrm, TB_ALIGN_16 },
+ { X86::VPSRLVQYrr, X86::VPSRLVQYrm, TB_ALIGN_32 },
+ { X86::VPSUBBYrr, X86::VPSUBBYrm, TB_ALIGN_32 },
+ { X86::VPSUBDYrr, X86::VPSUBDYrm, TB_ALIGN_32 },
+ { X86::VPSUBSBYrr, X86::VPSUBSBYrm, TB_ALIGN_32 },
+ { X86::VPSUBSWYrr, X86::VPSUBSWYrm, TB_ALIGN_32 },
+ { X86::VPSUBWYrr, X86::VPSUBWYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKHQDQYrr, X86::VPUNPCKHQDQYrm, TB_ALIGN_16 },
+ { X86::VPUNPCKHWDYrr, X86::VPUNPCKHWDYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKLBWYrr, X86::VPUNPCKLBWYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKLDQYrr, X86::VPUNPCKLDQYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, TB_ALIGN_32 },
+ { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, TB_ALIGN_32 },
+ { X86::VPXORYrr, X86::VPXORYrm, TB_ALIGN_32 },
// FIXME: add AVX 256-bit foldable instructions
};
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
- unsigned RegOp = OpTbl2[i][0];
- unsigned MemOp = OpTbl2[i][1];
- unsigned Flags = OpTbl2[i][2];
+ unsigned RegOp = OpTbl2[i].RegOp;
+ unsigned MemOp = OpTbl2[i].MemOp;
+ unsigned Flags = OpTbl2[i].Flags;
AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable,
RegOp, MemOp,
// Index 2, folded load
@@ -946,7 +1169,6 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
switch (MI.getOpcode()) {
default:
llvm_unreachable(0);
- break;
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
@@ -989,7 +1211,8 @@ bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
static bool isFrameLoadOpcode(int Opcode) {
switch (Opcode) {
- default: break;
+ default:
+ return false;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
@@ -1011,9 +1234,7 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
return true;
- break;
}
- return false;
}
static bool isFrameStoreOpcode(int Opcode) {
@@ -1203,6 +1424,8 @@ static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
bool SeenDef = false;
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
MachineOperand &MO = Iter->getOperand(j);
+ if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
+ SeenDef = true;
if (!MO.isReg())
continue;
if (MO.getReg() == X86::EFLAGS) {
@@ -1247,6 +1470,10 @@ static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
bool SawKill = false;
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
MachineOperand &MO = Iter->getOperand(j);
+ // A register mask may clobber EFLAGS, but we should still look for a
+ // live EFLAGS def.
+ if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
+ SawKill = true;
if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
if (MO.isDef()) return MO.isDead();
if (MO.isKill()) SawKill = true;
@@ -1357,7 +1584,6 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
switch (MIOpc) {
default:
llvm_unreachable(0);
- break;
case X86::SHL16ri: {
unsigned ShAmt = MI->getOperand(2).getImm();
MIB.addReg(0).addImm(1 << ShAmt)
@@ -1392,9 +1618,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
- BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
+ BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2);
InsMI2 =
- BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
+ BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
.addReg(leaInReg2, RegState::Define, X86::sub_16bit)
.addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
@@ -1469,6 +1695,24 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(B, getKillRegState(isKill)).addImm(M);
break;
}
+ case X86::SHUFPDrri: {
+ assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!");
+ if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
+
+ unsigned B = MI->getOperand(1).getReg();
+ unsigned C = MI->getOperand(2).getReg();
+ if (B != C) return 0;
+ unsigned A = MI->getOperand(0).getReg();
+ unsigned M = MI->getOperand(3).getImm();
+
+ // Convert to PSHUFD mask.
+ M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44;
+
+ NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
+ .addReg(A, RegState::Define | getDeadRegState(isDead))
+ .addReg(B, getKillRegState(isKill)).addImm(M);
+ break;
+ }
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
@@ -1597,7 +1841,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32rr_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
- TargetRegisterClass *RC;
+ const TargetRegisterClass *RC;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
Opc = X86::LEA64r;
RC = X86::GR64_NOSPRegisterClass;
@@ -1904,13 +2148,12 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
}
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
- const MCInstrDesc &MCID = MI->getDesc();
- if (!MCID.isTerminator()) return false;
+ if (!MI->isTerminator()) return false;
// Conditional branch is a special case.
- if (MCID.isBranch() && !MCID.isBarrier())
+ if (MI->isBranch() && !MI->isBarrier())
return true;
- if (!MCID.isPredicable())
+ if (!MI->isPredicable())
return true;
return !isPredicated(MI);
}
@@ -1936,7 +2179,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// A terminator that isn't a branch can't easily be handled by this
// analysis.
- if (!I->getDesc().isBranch())
+ if (!I->isBranch())
return true;
// Handle unconditional branches.
@@ -2420,7 +2663,9 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
switch (MI->getOpcode()) {
case X86::V_SET0:
- return Expand2AddrUndef(MI, get(HasAVX ? X86::VPXORrr : X86::PXORrr));
+ case X86::FsFLD0SS:
+ case X86::FsFLD0SD:
+ return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
return true;
@@ -2624,6 +2869,10 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
///
static bool hasPartialRegUpdate(unsigned Opcode) {
switch (Opcode) {
+ case X86::CVTSI2SSrr:
+ case X86::CVTSI2SS64rr:
+ case X86::CVTSI2SDrr:
+ case X86::CVTSI2SD64rr:
case X86::CVTSD2SSrr:
case X86::Int_CVTSD2SSrr:
case X86::CVTSS2SDrr:
@@ -2631,7 +2880,9 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
case X86::RCPSSr:
case X86::RCPSSr_Int:
case X86::ROUNDSDr:
+ case X86::ROUNDSDr_Int:
case X86::ROUNDSSr:
+ case X86::ROUNDSSr_Int:
case X86::RSQRTSSr:
case X86::RSQRTSSr_Int:
case X86::SQRTSSr:
@@ -2643,7 +2894,9 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
case X86::Int_VCVTSS2SDrr:
case X86::VRCPSSr:
case X86::VROUNDSDr:
+ case X86::VROUNDSDr_Int:
case X86::VROUNDSSr:
+ case X86::VROUNDSSr_Int:
case X86::VRSQRTSSr:
case X86::VSQRTSSr:
return true;
@@ -2652,6 +2905,54 @@ static bool hasPartialRegUpdate(unsigned Opcode) {
return false;
}
+/// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle
+/// instructions we would like before a partial register update.
+unsigned X86InstrInfo::
+getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode()))
+ return 0;
+
+ // If MI is marked as reading Reg, the partial register update is wanted.
+ const MachineOperand &MO = MI->getOperand(0);
+ unsigned Reg = MO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (MO.readsReg() || MI->readsVirtualRegister(Reg))
+ return 0;
+ } else {
+ if (MI->readsRegister(Reg, TRI))
+ return 0;
+ }
+
+ // If any of the preceding 16 instructions are reading Reg, insert a
+ // dependency breaking instruction. The magic number is based on a few
+ // Nehalem experiments.
+ return 16;
+}
+
+void X86InstrInfo::
+breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ unsigned Reg = MI->getOperand(OpNum).getReg();
+ if (X86::VR128RegClass.contains(Reg)) {
+ // These instructions are all floating point domain, so xorps is the best
+ // choice.
+ bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
+ unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr;
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg)
+ .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
+ } else if (X86::VR256RegClass.contains(Reg)) {
+ // Use vxorps to clear the full ymm register.
+ // It wants to read and write the xmm sub-register.
+ unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
+ BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg)
+ .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef)
+ .addReg(Reg, RegState::ImplicitDefine);
+ } else
+ return;
+ MI->addRegisterKilled(Reg, TRI, true);
+}
+
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
@@ -2714,6 +3015,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
switch (LoadMI->getOpcode()) {
case X86::AVX_SET0PSY:
case X86::AVX_SET0PDY:
+ case X86::AVX2_SETALLONES:
+ case X86::AVX2_SET0:
Alignment = 32;
break;
case X86::V_SET0:
@@ -2722,11 +3025,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = 16;
break;
case X86::FsFLD0SD:
- case X86::VFsFLD0SD:
Alignment = 8;
break;
case X86::FsFLD0SS:
- case X86::VFsFLD0SS:
Alignment = 4;
break;
default:
@@ -2759,10 +3060,10 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
case X86::AVX_SET0PSY:
case X86::AVX_SET0PDY:
case X86::AVX_SETALLONES:
+ case X86::AVX2_SETALLONES:
+ case X86::AVX2_SET0:
case X86::FsFLD0SD:
- case X86::FsFLD0SS:
- case X86::VFsFLD0SD:
- case X86::VFsFLD0SS: {
+ case X86::FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
// Create a constant-pool entry and operands to load from it.
@@ -2788,16 +3089,19 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
unsigned Opc = LoadMI->getOpcode();
- if (Opc == X86::FsFLD0SS || Opc == X86::VFsFLD0SS)
+ if (Opc == X86::FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
- else if (Opc == X86::FsFLD0SD || Opc == X86::VFsFLD0SD)
+ else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
+ else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX2_SET0)
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
- bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES);
+ bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES ||
+ Opc == X86::AVX2_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
@@ -3329,7 +3633,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
// These are the replaceable SSE instructions. Some of these have Int variants
// that we don't include here. We don't want to replace instructions selected
// by intrinsics.
-static const unsigned ReplaceableInstrs[][3] = {
+static const uint16_t ReplaceableInstrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
{ X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
@@ -3366,31 +3670,66 @@ static const unsigned ReplaceableInstrs[][3] = {
{ X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
{ X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
{ X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
- { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
+ { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr }
+};
+
+static const uint16_t ReplaceableInstrsAVX2[][3] = {
+ //PackedSingle PackedDouble PackedInt
+ { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
+ { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
+ { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
+ { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
+ { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
+ { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
+ { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
+ { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
+ { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
+ { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
+ { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
+ { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
+ { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
+ { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr }
};
// FIXME: Some shuffle and unpack instructions have equivalents in different
// domains, but they require a bit more work than just switching opcodes.
-static const unsigned *lookup(unsigned opcode, unsigned domain) {
+static const uint16_t *lookup(unsigned opcode, unsigned domain) {
for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
if (ReplaceableInstrs[i][domain-1] == opcode)
return ReplaceableInstrs[i];
return 0;
}
+static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) {
+ for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i)
+ if (ReplaceableInstrsAVX2[i][domain-1] == opcode)
+ return ReplaceableInstrsAVX2[i];
+ return 0;
+}
+
std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const {
uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
- return std::make_pair(domain,
- domain && lookup(MI->getOpcode(), domain) ? 0xe : 0);
+ bool hasAVX2 = TM.getSubtarget<X86Subtarget>().hasAVX2();
+ uint16_t validDomains = 0;
+ if (domain && lookup(MI->getOpcode(), domain))
+ validDomains = 0xe;
+ else if (domain && lookupAVX2(MI->getOpcode(), domain))
+ validDomains = hasAVX2 ? 0xe : 0x6;
+ return std::make_pair(domain, validDomains);
}
void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
assert(Domain>0 && Domain<4 && "Invalid execution domain");
uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
- const unsigned *table = lookup(MI->getOpcode(), dom);
+ const uint16_t *table = lookup(MI->getOpcode(), dom);
+ if (!table) { // try the other table
+ assert((TM.getSubtarget<X86Subtarget>().hasAVX2() || Domain < 3) &&
+ "256-bit vector operations only available in AVX2");
+ table = lookupAVX2(MI->getOpcode(), dom);
+ }
assert(table && "Cannot change domain");
MI->setDesc(get(table[Domain-1]));
}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index 97009db..b23d756 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -1,4 +1,4 @@
-//===- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*- ===//
+//===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,10 +14,10 @@
#ifndef X86INSTRUCTIONINFO_H
#define X86INSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "X86.h"
#include "X86RegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "X86GenInstrInfo.inc"
@@ -345,6 +345,11 @@ public:
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
+ unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const;
+ void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const;
+
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
unsigned OpNum,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index d54bf27..6a25312 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -1,4 +1,4 @@
-//===- X86InstrInfo.td - Main X86 Instruction Definition ---*- tablegen -*-===//
+//===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -99,17 +99,16 @@ def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
+def SDT_X86WIN_FTOL : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
+
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
-def SDT_X86MEMBARRIERNoSSE : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
-def X86MemBarrierNoSSE : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIERNoSSE,
- [SDNPHasChain]>;
def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
[SDNPHasChain]>;
def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
@@ -226,6 +225,10 @@ def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86andn_flag : SDNode<"X86ISD::ANDN", SDTBinaryArithWithFlags>;
+def X86blsi_flag : SDNode<"X86ISD::BLSI", SDTUnaryArithWithFlags>;
+def X86blsmsk_flag : SDNode<"X86ISD::BLSMSK", SDTUnaryArithWithFlags>;
+def X86blsr_flag : SDNode<"X86ISD::BLSR", SDTUnaryArithWithFlags>;
+
def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDTX86Void,
@@ -237,6 +240,9 @@ def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def X86WinFTOL : SDNode<"X86ISD::WIN_FTOL", SDT_X86WIN_FTOL,
+ [SDNPHasChain, SDNPOutGlue]>;
+
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
//
@@ -247,10 +253,31 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
// *mem - Operand definitions for the funky X86 addressing mode operands.
//
-def X86MemAsmOperand : AsmOperandClass {
- let Name = "Mem";
- let SuperClasses = [];
+def X86MemAsmOperand : AsmOperandClass {
+ let Name = "Mem"; let PredicateMethod = "isMem";
+}
+def X86Mem8AsmOperand : AsmOperandClass {
+ let Name = "Mem8"; let PredicateMethod = "isMem8";
+}
+def X86Mem16AsmOperand : AsmOperandClass {
+ let Name = "Mem16"; let PredicateMethod = "isMem16";
+}
+def X86Mem32AsmOperand : AsmOperandClass {
+ let Name = "Mem32"; let PredicateMethod = "isMem32";
+}
+def X86Mem64AsmOperand : AsmOperandClass {
+ let Name = "Mem64"; let PredicateMethod = "isMem64";
+}
+def X86Mem80AsmOperand : AsmOperandClass {
+ let Name = "Mem80"; let PredicateMethod = "isMem80";
+}
+def X86Mem128AsmOperand : AsmOperandClass {
+ let Name = "Mem128"; let PredicateMethod = "isMem128";
}
+def X86Mem256AsmOperand : AsmOperandClass {
+ let Name = "Mem256"; let PredicateMethod = "isMem256";
+}
+
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
let SuperClasses = [X86MemAsmOperand];
@@ -267,17 +294,28 @@ def opaque48mem : X86MemOperand<"printopaquemem">;
def opaque80mem : X86MemOperand<"printopaquemem">;
def opaque512mem : X86MemOperand<"printopaquemem">;
-def i8mem : X86MemOperand<"printi8mem">;
-def i16mem : X86MemOperand<"printi16mem">;
-def i32mem : X86MemOperand<"printi32mem">;
-def i64mem : X86MemOperand<"printi64mem">;
-def i128mem : X86MemOperand<"printi128mem">;
-def i256mem : X86MemOperand<"printi256mem">;
-def f32mem : X86MemOperand<"printf32mem">;
-def f64mem : X86MemOperand<"printf64mem">;
-def f80mem : X86MemOperand<"printf80mem">;
-def f128mem : X86MemOperand<"printf128mem">;
-def f256mem : X86MemOperand<"printf256mem">;
+def i8mem : X86MemOperand<"printi8mem"> {
+ let ParserMatchClass = X86Mem8AsmOperand; }
+def i16mem : X86MemOperand<"printi16mem"> {
+ let ParserMatchClass = X86Mem16AsmOperand; }
+def i32mem : X86MemOperand<"printi32mem"> {
+ let ParserMatchClass = X86Mem32AsmOperand; }
+def i64mem : X86MemOperand<"printi64mem"> {
+ let ParserMatchClass = X86Mem64AsmOperand; }
+def i128mem : X86MemOperand<"printi128mem"> {
+ let ParserMatchClass = X86Mem128AsmOperand; }
+def i256mem : X86MemOperand<"printi256mem"> {
+ let ParserMatchClass = X86Mem256AsmOperand; }
+def f32mem : X86MemOperand<"printf32mem"> {
+ let ParserMatchClass = X86Mem32AsmOperand; }
+def f64mem : X86MemOperand<"printf64mem"> {
+ let ParserMatchClass = X86Mem64AsmOperand; }
+def f80mem : X86MemOperand<"printf80mem"> {
+ let ParserMatchClass = X86Mem80AsmOperand; }
+def f128mem : X86MemOperand<"printf128mem"> {
+ let ParserMatchClass = X86Mem128AsmOperand; }
+def f256mem : X86MemOperand<"printf256mem">{
+ let ParserMatchClass = X86Mem256AsmOperand; }
}
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
@@ -285,7 +323,7 @@ def f256mem : X86MemOperand<"printf256mem">;
def i8mem_NOREX : Operand<i64> {
let PrintMethod = "printi8mem";
let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX_NOSP, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86Mem8AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -299,7 +337,7 @@ def ptr_rc_tailcall : PointerLikeRegClass<2>;
def i32mem_TC : Operand<i32> {
let PrintMethod = "printi32mem";
let MIOperandInfo = (ops GR32_TC, i8imm, GR32_TC, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86Mem32AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -310,7 +348,7 @@ def i64mem_TC : Operand<i64> {
let PrintMethod = "printi64mem";
let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
ptr_rc_tailcall, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86Mem64AsmOperand;
let OperandType = "OPERAND_MEMORY";
}
@@ -336,6 +374,11 @@ def SSECC : Operand<i8> {
let OperandType = "OPERAND_IMMEDIATE";
}
+def AVXCC : Operand<i8> {
+ let PrintMethod = "printSSECC";
+ let OperandType = "OPERAND_IMMEDIATE";
+}
+
class ImmSExtAsmOperandClass : AsmOperandClass {
let SuperClasses = [ImmAsmOperand];
let RenderMethod = "addImmOperands";
@@ -466,37 +509,32 @@ def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
-
def HasAVX : Predicate<"Subtarget->hasAVX()">;
-def HasXMMInt : Predicate<"Subtarget->hasXMMInt()">;
+def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
def HasAES : Predicate<"Subtarget->hasAES()">;
def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">;
def HasFMA3 : Predicate<"Subtarget->hasFMA3()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
+def HasXOP : Predicate<"Subtarget->hasXOP()">;
def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
def HasF16C : Predicate<"Subtarget->hasF16C()">;
+def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
def HasBMI : Predicate<"Subtarget->hasBMI()">;
-def FPStackf32 : Predicate<"!Subtarget->hasXMM()">;
-def FPStackf64 : Predicate<"!Subtarget->hasXMMInt()">;
+def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
+def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
+def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
def In32BitMode : Predicate<"!Subtarget->is64Bit()">,
AssemblerPredicate<"!Mode64Bit">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">,
AssemblerPredicate<"Mode64Bit">;
def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
-def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
-def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">,
- AssemblerPredicate<"ModeNaCl">;
-def IsNaCl32 : Predicate<"Subtarget->isTargetNaCl32()">,
- AssemblerPredicate<"ModeNaCl,!Mode64Bit">;
-def IsNaCl64 : Predicate<"Subtarget->isTargetNaCl64()">,
- AssemblerPredicate<"ModeNaCl,Mode64Bit">;
-def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">,
- AssemblerPredicate<"!ModeNaCl">;
+def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
+def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&"
@@ -1375,7 +1413,7 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
}
//===----------------------------------------------------------------------===//
-// TZCNT Instruction
+// BMI Instructions
//
let Predicates = [HasBMI], Defs = [EFLAGS] in {
def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
@@ -1405,6 +1443,83 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in {
(implicit EFLAGS)]>, XS;
}
+multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
+ RegisterClass RC, X86MemOperand x86memop, SDNode OpNode,
+ PatFrag ld_frag> {
+ def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
+ !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, EFLAGS, (OpNode RC:$src))]>, T8, VEX_4V;
+ def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, EFLAGS, (OpNode (ld_frag addr:$src)))]>,
+ T8, VEX_4V;
+}
+
+let Predicates = [HasBMI], Defs = [EFLAGS] in {
+ defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem,
+ X86blsr_flag, loadi32>;
+ defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem,
+ X86blsr_flag, loadi64>, VEX_W;
+ defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem,
+ X86blsmsk_flag, loadi32>;
+ defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem,
+ X86blsmsk_flag, loadi64>, VEX_W;
+ defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem,
+ X86blsi_flag, loadi32>;
+ defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem,
+ X86blsi_flag, loadi64>, VEX_W;
+}
+
+multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
+ X86MemOperand x86memop, Intrinsic Int,
+ PatFrag ld_frag> {
+ def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
+ T8, VEX_4VOp3;
+ def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
+ !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
+ (implicit EFLAGS)]>, T8, VEX_4VOp3;
+}
+
+let Predicates = [HasBMI], Defs = [EFLAGS] in {
+ defm BEXTR32 : bmi_bextr_bzhi<0xF7, "bextr{l}", GR32, i32mem,
+ int_x86_bmi_bextr_32, loadi32>;
+ defm BEXTR64 : bmi_bextr_bzhi<0xF7, "bextr{q}", GR64, i64mem,
+ int_x86_bmi_bextr_64, loadi64>, VEX_W;
+}
+
+let Predicates = [HasBMI2], Defs = [EFLAGS] in {
+ defm BZHI32 : bmi_bextr_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
+ int_x86_bmi_bzhi_32, loadi32>;
+ defm BZHI64 : bmi_bextr_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
+ int_x86_bmi_bzhi_64, loadi64>, VEX_W;
+}
+
+multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
+ X86MemOperand x86memop, Intrinsic Int,
+ PatFrag ld_frag> {
+ def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (Int RC:$src1, RC:$src2))]>,
+ VEX_4V;
+ def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))]>, VEX_4V;
+}
+
+let Predicates = [HasBMI2] in {
+ defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
+ int_x86_bmi_pdep_32, loadi32>, T8XD;
+ defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
+ int_x86_bmi_pdep_64, loadi64>, T8XD, VEX_W;
+ defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
+ int_x86_bmi_pext_32, loadi32>, T8XS;
+ defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
+ int_x86_bmi_pext_64, loadi64>, T8XS, VEX_W;
+}
+
//===----------------------------------------------------------------------===//
// Subsystems.
//===----------------------------------------------------------------------===//
@@ -1424,12 +1539,16 @@ include "X86InstrFragmentsSIMD.td"
// FMA - Fused Multiply-Add support (requires FMA)
include "X86InstrFMA.td"
+// XOP
+include "X86InstrXOP.td"
+
// SSE, MMX and 3DNow! vector support.
include "X86InstrSSE.td"
include "X86InstrMMX.td"
include "X86Instr3DNow.td"
include "X86InstrVMX.td"
+include "X86InstrSVM.td"
// System instructions.
include "X86InstrSystem.td"
@@ -1445,10 +1564,11 @@ def : MnemonicAlias<"call", "calll">, Requires<[In32BitMode]>;
def : MnemonicAlias<"call", "callq">, Requires<[In64BitMode]>;
def : MnemonicAlias<"cbw", "cbtw">;
+def : MnemonicAlias<"cwde", "cwtl">;
def : MnemonicAlias<"cwd", "cwtd">;
def : MnemonicAlias<"cdq", "cltd">;
-def : MnemonicAlias<"cwde", "cwtl">;
def : MnemonicAlias<"cdqe", "cltq">;
+def : MnemonicAlias<"cqo", "cqto">;
// lret maps to lretl, it is not ambiguous with lretq.
def : MnemonicAlias<"lret", "lretl">;
@@ -1497,6 +1617,7 @@ def : MnemonicAlias<"verrw", "verr">;
// System instruction aliases.
def : MnemonicAlias<"iret", "iretl">;
def : MnemonicAlias<"sysret", "sysretl">;
+def : MnemonicAlias<"sysexit", "sysexitl">;
def : MnemonicAlias<"lgdtl", "lgdt">, Requires<[In32BitMode]>;
def : MnemonicAlias<"lgdtq", "lgdt">, Requires<[In64BitMode]>;
@@ -1516,6 +1637,8 @@ def : MnemonicAlias<"fcmovna", "fcmovbe">;
def : MnemonicAlias<"fcmovae", "fcmovnb">;
def : MnemonicAlias<"fcomip", "fcompi">;
def : MnemonicAlias<"fildq", "fildll">;
+def : MnemonicAlias<"fistpq", "fistpll">;
+def : MnemonicAlias<"fisttpq", "fisttpll">;
def : MnemonicAlias<"fldcww", "fldcw">;
def : MnemonicAlias<"fnstcww", "fnstcw">;
def : MnemonicAlias<"fnstsww", "fnstsw">;
@@ -1737,20 +1860,20 @@ def : InstAlias<"outl $port", (OUT32ir i8imm:$port)>;
// errors, since its encoding is the most compact.
def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem)>;
-// shld/shrd op,op -> shld op, op, 1
-def : InstAlias<"shldw $r1, $r2", (SHLD16rri8 GR16:$r1, GR16:$r2, 1)>;
-def : InstAlias<"shldl $r1, $r2", (SHLD32rri8 GR32:$r1, GR32:$r2, 1)>;
-def : InstAlias<"shldq $r1, $r2", (SHLD64rri8 GR64:$r1, GR64:$r2, 1)>;
-def : InstAlias<"shrdw $r1, $r2", (SHRD16rri8 GR16:$r1, GR16:$r2, 1)>;
-def : InstAlias<"shrdl $r1, $r2", (SHRD32rri8 GR32:$r1, GR32:$r2, 1)>;
-def : InstAlias<"shrdq $r1, $r2", (SHRD64rri8 GR64:$r1, GR64:$r2, 1)>;
-
-def : InstAlias<"shldw $mem, $reg", (SHLD16mri8 i16mem:$mem, GR16:$reg, 1)>;
-def : InstAlias<"shldl $mem, $reg", (SHLD32mri8 i32mem:$mem, GR32:$reg, 1)>;
-def : InstAlias<"shldq $mem, $reg", (SHLD64mri8 i64mem:$mem, GR64:$reg, 1)>;
-def : InstAlias<"shrdw $mem, $reg", (SHRD16mri8 i16mem:$mem, GR16:$reg, 1)>;
-def : InstAlias<"shrdl $mem, $reg", (SHRD32mri8 i32mem:$mem, GR32:$reg, 1)>;
-def : InstAlias<"shrdq $mem, $reg", (SHRD64mri8 i64mem:$mem, GR64:$reg, 1)>;
+// shld/shrd op,op -> shld op, op, CL
+def : InstAlias<"shldw $r2, $r1", (SHLD16rrCL GR16:$r1, GR16:$r2)>;
+def : InstAlias<"shldl $r2, $r1", (SHLD32rrCL GR32:$r1, GR32:$r2)>;
+def : InstAlias<"shldq $r2, $r1", (SHLD64rrCL GR64:$r1, GR64:$r2)>;
+def : InstAlias<"shrdw $r2, $r1", (SHRD16rrCL GR16:$r1, GR16:$r2)>;
+def : InstAlias<"shrdl $r2, $r1", (SHRD32rrCL GR32:$r1, GR32:$r2)>;
+def : InstAlias<"shrdq $r2, $r1", (SHRD64rrCL GR64:$r1, GR64:$r2)>;
+
+def : InstAlias<"shldw $reg, $mem", (SHLD16mrCL i16mem:$mem, GR16:$reg)>;
+def : InstAlias<"shldl $reg, $mem", (SHLD32mrCL i32mem:$mem, GR32:$reg)>;
+def : InstAlias<"shldq $reg, $mem", (SHLD64mrCL i64mem:$mem, GR64:$reg)>;
+def : InstAlias<"shrdw $reg, $mem", (SHRD16mrCL i16mem:$mem, GR16:$reg)>;
+def : InstAlias<"shrdl $reg, $mem", (SHRD32mrCL i32mem:$mem, GR32:$reg)>;
+def : InstAlias<"shrdq $reg, $mem", (SHRD64mrCL i64mem:$mem, GR64:$reg)>;
/* FIXME: This is disabled because the asm matcher is currently incapable of
* matching a fixed immediate like $1.
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index b2d9fca..63f96b6 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -1,4 +1,4 @@
-//====- X86InstrMMX.td - Describe the MMX Instruction Set --*- tablegen -*-===//
+//===-- X86InstrMMX.td - Describe the MMX Instruction Set --*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -105,19 +105,23 @@ multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
string asm, Domain d> {
def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (Int SrcRC:$src))], d>;
+ [(set DstRC:$dst, (Int SrcRC:$src))],
+ IIC_DEFAULT, d>;
def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))],
+ IIC_DEFAULT, d>;
}
multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
PatFrag ld_frag, string asm, Domain d> {
def irr : PI<opc, MRMSrcReg, (outs DstRC:$dst),(ins DstRC:$src1, SrcRC:$src2),
- asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
+ asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
+ IIC_DEFAULT, d>;
def irm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
(ins DstRC:$src1, x86memop:$src2), asm,
- [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
+ IIC_DEFAULT, d>;
}
//===----------------------------------------------------------------------===//
@@ -175,25 +179,25 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (x86mmx VR64:$src), addr:$dst)]>;
-def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "movdq2q\t{$src, $dst|$dst, $src}",
+def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
+ (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}",
[(set VR64:$dst,
(x86mmx (bitconvert
(i64 (vector_extract (v2i64 VR128:$src),
(iPTR 0))))))]>;
-def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
- "movq2dq\t{$src, $dst|$dst, $src}",
+def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst),
+ (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector
(i64 (bitconvert (x86mmx VR64:$src))))))]>;
let neverHasSideEffects = 1 in
-def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src),
- "movq2dq\t{$src, $dst|$dst, $src}", []>;
+def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst),
+ (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", []>;
-def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins FR64:$src),
- "movdq2q\t{$src, $dst|$dst, $src}", []>;
+def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst),
+ (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", []>;
def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movntq\t{$src, $dst|$dst, $src}",
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index d3ced23..408ab167 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -1,4 +1,4 @@
-//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
+//===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,6 +13,126 @@
//
//===----------------------------------------------------------------------===//
+class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
+ InstrItinClass rr = arg_rr;
+ InstrItinClass rm = arg_rm;
+}
+
+class SizeItins<OpndItins arg_s, OpndItins arg_d> {
+ OpndItins s = arg_s;
+ OpndItins d = arg_d;
+}
+
+
+class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
+ InstrItinClass arg_ri> {
+ InstrItinClass rr = arg_rr;
+ InstrItinClass rm = arg_rm;
+ InstrItinClass ri = arg_ri;
+}
+
+
+// scalar
+def SSE_ALU_F32S : OpndItins<
+ IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
+>;
+
+def SSE_ALU_F64S : OpndItins<
+ IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
+>;
+
+def SSE_ALU_ITINS_S : SizeItins<
+ SSE_ALU_F32S, SSE_ALU_F64S
+>;
+
+def SSE_MUL_F32S : OpndItins<
+ IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
+>;
+
+def SSE_MUL_F64S : OpndItins<
+ IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
+>;
+
+def SSE_MUL_ITINS_S : SizeItins<
+ SSE_MUL_F32S, SSE_MUL_F64S
+>;
+
+def SSE_DIV_F32S : OpndItins<
+ IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
+>;
+
+def SSE_DIV_F64S : OpndItins<
+ IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
+>;
+
+def SSE_DIV_ITINS_S : SizeItins<
+ SSE_DIV_F32S, SSE_DIV_F64S
+>;
+
+// parallel
+def SSE_ALU_F32P : OpndItins<
+ IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
+>;
+
+def SSE_ALU_F64P : OpndItins<
+ IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
+>;
+
+def SSE_ALU_ITINS_P : SizeItins<
+ SSE_ALU_F32P, SSE_ALU_F64P
+>;
+
+def SSE_MUL_F32P : OpndItins<
+ IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
+>;
+
+def SSE_MUL_F64P : OpndItins<
+ IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
+>;
+
+def SSE_MUL_ITINS_P : SizeItins<
+ SSE_MUL_F32P, SSE_MUL_F64P
+>;
+
+def SSE_DIV_F32P : OpndItins<
+ IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
+>;
+
+def SSE_DIV_F64P : OpndItins<
+ IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
+>;
+
+def SSE_DIV_ITINS_P : SizeItins<
+ SSE_DIV_F32P, SSE_DIV_F64P
+>;
+
+def SSE_BIT_ITINS_P : OpndItins<
+ IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
+>;
+
+def SSE_INTALU_ITINS_P : OpndItins<
+ IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
+>;
+
+def SSE_INTALUQ_ITINS_P : OpndItins<
+ IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
+>;
+
+def SSE_INTMUL_ITINS_P : OpndItins<
+ IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
+>;
+
+def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
+ IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
+>;
+
+def SSE_MOVA_ITINS : OpndItins<
+ IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
+>;
+
+def SSE_MOVU_ITINS : OpndItins<
+ IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
+>;
//===----------------------------------------------------------------------===//
// SSE 1 & 2 Instructions Classes
@@ -21,25 +141,27 @@
/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
RegisterClass RC, X86MemOperand x86memop,
+ OpndItins itins,
bit Is2Addr = 1> {
let isCommutable = 1 in {
def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
+ [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>;
}
def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
+ [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>;
}
/// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
string asm, string SSEVer, string FPSizeStr,
Operand memopr, ComplexPattern mem_cpat,
+ OpndItins itins,
bit Is2Addr = 1> {
def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
@@ -47,72 +169,74 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (!cast<Intrinsic>(
!strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
- RC:$src1, RC:$src2))]>;
+ RC:$src1, RC:$src2))], itins.rr>;
def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
SSEVer, "_", OpcodeStr, FPSizeStr))
- RC:$src1, mem_cpat:$src2))]>;
+ RC:$src1, mem_cpat:$src2))], itins.rm>;
}
/// sse12_fp_packed - SSE 1 & 2 packed instructions class
multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
RegisterClass RC, ValueType vt,
X86MemOperand x86memop, PatFrag mem_frag,
- Domain d, bit Is2Addr = 1> {
+ Domain d, OpndItins itins, bit Is2Addr = 1> {
let isCommutable = 1 in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>;
let mayLoad = 1 in
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
+ [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
+ itins.rm, d>;
}
/// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
string OpcodeStr, X86MemOperand x86memop,
list<dag> pat_rr, list<dag> pat_rm,
- bit Is2Addr = 1> {
- let isCommutable = 1 in
+ bit Is2Addr = 1,
+ bit rr_hasSideEffects = 0> {
+ let isCommutable = 1, neverHasSideEffects = rr_hasSideEffects in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- pat_rr, d>;
+ pat_rr, IIC_DEFAULT, d>;
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- pat_rm, d>;
+ pat_rm, IIC_DEFAULT, d>;
}
/// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
string asm, string SSEVer, string FPSizeStr,
X86MemOperand x86memop, PatFrag mem_frag,
- Domain d, bit Is2Addr = 1> {
+ Domain d, OpndItins itins, bit Is2Addr = 1> {
def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (!cast<Intrinsic>(
!strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
- RC:$src1, RC:$src2))], d>;
+ RC:$src1, RC:$src2))], IIC_DEFAULT, d>;
def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set RC:$dst, (!cast<Intrinsic>(
!strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
- RC:$src1, (mem_frag addr:$src2)))], d>;
+ RC:$src1, (mem_frag addr:$src2)))], IIC_DEFAULT, d>;
}
//===----------------------------------------------------------------------===//
@@ -170,7 +294,7 @@ def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
// Bitcasts between 128-bit vector types. Return the original type since
// no instruction is needed for the conversion
-let Predicates = [HasXMMInt] in {
+let Predicates = [HasSSE2] in {
def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
@@ -239,21 +363,13 @@ let Predicates = [HasAVX] in {
}
// Alias instructions that map fld0 to pxor for sse.
-// FIXME: Set encoding to pseudo!
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
- canFoldAsLoad = 1 in {
- def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
- [(set FR32:$dst, fp32imm0)]>,
- Requires<[HasSSE1]>, TB, OpSize;
- def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
- [(set FR64:$dst, fpimm0)]>,
- Requires<[HasSSE2]>, TB, OpSize;
- def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
- [(set FR32:$dst, fp32imm0)]>,
- Requires<[HasAVX]>, TB, OpSize, VEX_4V;
- def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
- [(set FR64:$dst, fpimm0)]>,
- Requires<[HasAVX]>, TB, OpSize, VEX_4V;
+// This is expanded by ExpandPostRAPseudos.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isPseudo = 1 in {
+ def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
+ [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
+ def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
+ [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
}
//===----------------------------------------------------------------------===//
@@ -286,16 +402,35 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
// JIT implementatioan, it does not expand the instructions below like
// X86MCInstLower does.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1, Predicates = [HasAVX] in {
+ isCodeGenOnly = 1 in {
+let Predicates = [HasAVX] in {
def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
[(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
[(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
}
+let Predicates = [HasAVX2], neverHasSideEffects = 1 in
+def AVX2_SET0 : PDI<0xef, MRMInitReg, (outs VR256:$dst), (ins), "",
+ []>, VEX_4V;
+}
+let Predicates = [HasAVX2], AddedComplexity = 5 in {
+ def : Pat<(v4i64 immAllZerosV), (AVX2_SET0)>;
+ def : Pat<(v8i32 immAllZerosV), (AVX2_SET0)>;
+ def : Pat<(v16i16 immAllZerosV), (AVX2_SET0)>;
+ def : Pat<(v32i8 immAllZerosV), (AVX2_SET0)>;
+}
// AVX has no support for 256-bit integer instructions, but since the 128-bit
// VPXOR instruction writes zero to its upper part, it's safe build zeros.
+def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
+def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
+ (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
+
+def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
+def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
+ (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
+
def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
(SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
@@ -310,13 +445,16 @@ def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
// JIT implementation, it does not expand the instructions below like
// X86MCInstLower does.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
- def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllOnesV))]>;
-let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
+ isCodeGenOnly = 1, ExeDomain = SSEPackedInt in {
+ let Predicates = [HasAVX] in
def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
[(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
+ def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllOnesV))]>;
+ let Predicates = [HasAVX2] in
+ def AVX2_SETALLONES : PDI<0x76, MRMInitReg, (outs VR256:$dst), (ins), "",
+ [(set VR256:$dst, (v8i32 immAllOnesV))]>, VEX_4V;
+}
//===----------------------------------------------------------------------===//
@@ -329,22 +467,25 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
//===----------------------------------------------------------------------===//
-class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
+class sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt, string asm> :
SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
- [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
+ [(set VR128:$dst, (vt (OpNode VR128:$src1,
+ (scalar_to_vector RC:$src2))))],
+ IIC_SSE_MOV_S_RR>;
// Loading from memory automatically zeroing upper bits.
class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
PatFrag mem_pat, string OpcodeStr> :
SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (mem_pat addr:$src))]>;
+ [(set RC:$dst, (mem_pat addr:$src))],
+ IIC_SSE_MOV_S_RM>;
// AVX
-def VMOVSSrr : sse12_move_rr<FR32, v4f32,
+def VMOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
"movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V,
VEX_LIG;
-def VMOVSDrr : sse12_move_rr<FR64, v2f64,
+def VMOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
"movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V,
VEX_LIG;
@@ -352,11 +493,13 @@ def VMOVSDrr : sse12_move_rr<FR64, v2f64,
let isCodeGenOnly = 1 in {
def VMOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, FR32:$src2),
- "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ IIC_SSE_MOV_S_RR>,
XS, VEX_4V, VEX_LIG;
def VMOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, FR64:$src2),
- "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ IIC_SSE_MOV_S_RR>,
XD, VEX_4V, VEX_LIG;
}
@@ -370,26 +513,30 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in {
def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
"movss\t{$src, $dst|$dst, $src}",
- [(store FR32:$src, addr:$dst)]>, XS, VEX, VEX_LIG;
+ [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
+ XS, VEX, VEX_LIG;
def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
"movsd\t{$src, $dst|$dst, $src}",
- [(store FR64:$src, addr:$dst)]>, XD, VEX, VEX_LIG;
+ [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
+ XD, VEX, VEX_LIG;
// SSE1 & 2
let Constraints = "$src1 = $dst" in {
- def MOVSSrr : sse12_move_rr<FR32, v4f32,
+ def MOVSSrr : sse12_move_rr<FR32, X86Movss, v4f32,
"movss\t{$src2, $dst|$dst, $src2}">, XS;
- def MOVSDrr : sse12_move_rr<FR64, v2f64,
+ def MOVSDrr : sse12_move_rr<FR64, X86Movsd, v2f64,
"movsd\t{$src2, $dst|$dst, $src2}">, XD;
// For the disassembler
let isCodeGenOnly = 1 in {
def MOVSSrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, FR32:$src2),
- "movss\t{$src2, $dst|$dst, $src2}", []>, XS;
+ "movss\t{$src2, $dst|$dst, $src2}", [],
+ IIC_SSE_MOV_S_RR>, XS;
def MOVSDrr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, FR64:$src2),
- "movsd\t{$src2, $dst|$dst, $src2}", []>, XD;
+ "movsd\t{$src2, $dst|$dst, $src2}", [],
+ IIC_SSE_MOV_S_RR>, XD;
}
}
@@ -402,153 +549,14 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in {
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
"movss\t{$src, $dst|$dst, $src}",
- [(store FR32:$src, addr:$dst)]>;
+ [(store FR32:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
"movsd\t{$src, $dst|$dst, $src}",
- [(store FR64:$src, addr:$dst)]>;
+ [(store FR64:$src, addr:$dst)], IIC_SSE_MOV_S_MR>;
// Patterns
-let Predicates = [HasSSE1] in {
- let AddedComplexity = 15 in {
- // Extract the low 32-bit value from one vector and insert it into another.
- def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4f32 VR128:$src1),
- (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
- def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
-
- // Move scalar to XMM zero-extended, zeroing a VR128 then do a
- // MOVSS to the lower bits.
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
- (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
- def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
- (MOVSSrr (v4f32 (V_SET0)),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
- def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (MOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
- }
-
- let AddedComplexity = 20 in {
- // MOVSSrm zeros the high parts of the register; represent this
- // with SUBREG_TO_REG.
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
- def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
- def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
- }
-
- // Extract and store.
- def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSSmr addr:$dst,
- (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
-
- // Shuffle with MOVSS
- def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
- (MOVSSrr VR128:$src1, FR32:$src2)>;
- def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
- def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
- (MOVSSrr (v4f32 VR128:$src1),
- (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
-}
-
-let Predicates = [HasSSE2] in {
- let AddedComplexity = 15 in {
- // Extract the low 64-bit value from one vector and insert it into another.
- def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
- def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
-
- // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
- def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
- def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
-
- // Move scalar to XMM zero-extended, zeroing a VR128 then do a
- // MOVSD to the lower bits.
- def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
- (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
- }
-
- let AddedComplexity = 20 in {
- // MOVSDrm zeros the high parts of the register; represent this
- // with SUBREG_TO_REG.
- def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
- def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
- def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
- def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
- def : Pat<(v2f64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
- }
-
- // Extract and store.
- def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSDmr addr:$dst,
- (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
-
- // Shuffle with MOVSD
- def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
- (MOVSDrr VR128:$src1, FR64:$src2)>;
- def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
- def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
- def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
- def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
-
- // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
- // is during lowering, where it's not possible to recognize the fold cause
- // it has two uses through a bitcast. One use disappears at isel time and the
- // fold opportunity reappears.
- def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
- def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
-}
-
let Predicates = [HasAVX] in {
let AddedComplexity = 15 in {
- // Extract the low 32-bit value from one vector and insert it into another.
- def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
- (VMOVSSrr (v4f32 VR128:$src1),
- (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
- def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
- (VMOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
-
- // Extract the low 64-bit value from one vector and insert it into another.
- def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
- (VMOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
- def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
- (VMOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
-
- // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
- def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
- def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
- (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
-
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
// MOVS{S,D} to the lower bits.
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
@@ -561,6 +569,16 @@ let Predicates = [HasAVX] in {
(EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
(VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
+
+ // Move low f32 and clear high bits.
+ def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSrr (v4f32 (V_SET0)),
+ (EXTRACT_SUBREG (v8f32 VR256:$src), sub_ss)), sub_xmm)>;
+ def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSrr (v4i32 (V_SET0)),
+ (EXTRACT_SUBREG (v8i32 VR256:$src), sub_ss)), sub_xmm)>;
}
let AddedComplexity = 20 in {
@@ -588,6 +606,9 @@ let Predicates = [HasAVX] in {
// Represent the same patterns above but in the form they appear for
// 256-bit types
+ def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))), (i32 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
(SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
@@ -605,6 +626,20 @@ let Predicates = [HasAVX] in {
(SUBREG_TO_REG (i64 0),
(v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
sub_xmm)>;
+ def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))), (i32 0)))),
+ (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
+
+ // Move low f64 and clear high bits.
+ def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDrr (v2f64 (V_SET0)),
+ (EXTRACT_SUBREG (v4f64 VR256:$src), sub_sd)), sub_xmm)>;
+
+ def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDrr (v2i64 (V_SET0)),
+ (EXTRACT_SUBREG (v4i64 VR256:$src), sub_sd)), sub_xmm)>;
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
@@ -617,8 +652,6 @@ let Predicates = [HasAVX] in {
(EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
// Shuffle with VMOVSS
- def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
- (VMOVSSrr VR128:$src1, FR32:$src2)>;
def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
(VMOVSSrr (v4i32 VR128:$src1),
(EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
@@ -626,9 +659,17 @@ let Predicates = [HasAVX] in {
(VMOVSSrr (v4f32 VR128:$src1),
(EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+ // 256-bit variants
+ def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_ss),
+ (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_ss)), sub_xmm)>;
+ def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_ss),
+ (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_ss)), sub_xmm)>;
+
// Shuffle with VMOVSD
- def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
- (VMOVSDrr VR128:$src1, FR64:$src2)>;
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
(VMOVSDrr (v2i64 VR128:$src1),
(EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
@@ -642,10 +683,27 @@ let Predicates = [HasAVX] in {
(VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
sub_sd))>;
+ // 256-bit variants
+ def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_sd),
+ (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_sd)), sub_xmm)>;
+ def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
+ (SUBREG_TO_REG (i32 0),
+ (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_sd),
+ (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_sd)), sub_xmm)>;
+
+
// FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
// is during lowering, where it's not possible to recognize the fold cause
// it has two uses through a bitcast. One use disappears at isel time and the
// fold opportunity reappears.
+ def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),
+ sub_sd))>;
+ def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2i64 VR128:$src2),
+ sub_sd))>;
def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
(VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
sub_sd))>;
@@ -654,6 +712,101 @@ let Predicates = [HasAVX] in {
sub_sd))>;
}
+let Predicates = [HasSSE1] in {
+ let AddedComplexity = 15 in {
+ // Move scalar to XMM zero-extended, zeroing a VR128 then do a
+ // MOVSS to the lower bits.
+ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
+ (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
+ def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
+ (MOVSSrr (v4f32 (V_SET0)),
+ (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
+ def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
+ (MOVSSrr (v4i32 (V_SET0)),
+ (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
+ }
+
+ let AddedComplexity = 20 in {
+ // MOVSSrm zeros the high parts of the register; represent this
+ // with SUBREG_TO_REG.
+ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+ }
+
+ // Extract and store.
+ def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
+ addr:$dst),
+ (MOVSSmr addr:$dst,
+ (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+
+ // Shuffle with MOVSS
+ def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
+ (MOVSSrr (v4i32 VR128:$src1),
+ (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
+ def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
+ (MOVSSrr (v4f32 VR128:$src1),
+ (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+}
+
+let Predicates = [HasSSE2] in {
+ let AddedComplexity = 15 in {
+ // Move scalar to XMM zero-extended, zeroing a VR128 then do a
+ // MOVSD to the lower bits.
+ def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
+ (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
+ }
+
+ let AddedComplexity = 20 in {
+ // MOVSDrm zeros the high parts of the register; represent this
+ // with SUBREG_TO_REG.
+ def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ def : Pat<(v2f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+ }
+
+ // Extract and store.
+ def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
+ addr:$dst),
+ (MOVSDmr addr:$dst,
+ (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+
+ // Shuffle with MOVSD
+ def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2i64 VR128:$src1),
+ (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
+ def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2f64 VR128:$src1),
+ (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+ def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
+ def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
+
+ // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
+ // is during lowering, where it's not possible to recognize the fold cause
+ // it has two uses through a bitcast. One use disappears at isel time and the
+ // fold opportunity reappears.
+ def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2f64 VR128:$src2),sub_sd))>;
+ def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v2i64 VR128:$src2),sub_sd))>;
+ def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
+ def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
//===----------------------------------------------------------------------===//
@@ -661,126 +814,176 @@ let Predicates = [HasAVX] in {
multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
X86MemOperand x86memop, PatFrag ld_frag,
string asm, Domain d,
+ OpndItins itins,
bit IsReMaterializable = 1> {
let neverHasSideEffects = 1 in
def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>;
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (ld_frag addr:$src))], d>;
+ [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>;
}
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
- "movaps", SSEPackedSingle>, TB, VEX;
+ "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
+ TB, VEX;
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
- "movapd", SSEPackedDouble>, TB, OpSize, VEX;
+ "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
+ TB, OpSize, VEX;
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
- "movups", SSEPackedSingle>, TB, VEX;
+ "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
+ TB, VEX;
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
- "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
+ "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
+ TB, OpSize, VEX;
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
- "movaps", SSEPackedSingle>, TB, VEX;
+ "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
+ TB, VEX;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
- "movapd", SSEPackedDouble>, TB, OpSize, VEX;
+ "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
+ TB, OpSize, VEX;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
- "movups", SSEPackedSingle>, TB, VEX;
+ "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
+ TB, VEX;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
- "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
+ "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
+ TB, OpSize, VEX;
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
- "movaps", SSEPackedSingle>, TB;
+ "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
+ TB;
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
- "movapd", SSEPackedDouble>, TB, OpSize;
+ "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
+ TB, OpSize;
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
- "movups", SSEPackedSingle>, TB;
+ "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
+ TB;
defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
- "movupd", SSEPackedDouble, 0>, TB, OpSize;
+ "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
+ TB, OpSize;
def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>, VEX;
def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>, VEX;
def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
+ [(store (v4f32 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>, VEX;
def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
+ [(store (v2f64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>, VEX;
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore256 (v8f32 VR256:$src), addr:$dst)]>, VEX;
+ [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>, VEX;
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore256 (v4f64 VR256:$src), addr:$dst)]>, VEX;
+ [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>, VEX;
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
+ [(store (v8f32 VR256:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>, VEX;
def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
+ [(store (v4f64 VR256:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>, VEX;
// For disassembler
let isCodeGenOnly = 1 in {
def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movaps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movapd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movups\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>, VEX;
def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movupd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>, VEX;
def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movaps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movapd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movups\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movups\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>, VEX;
def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movupd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>, VEX;
+}
+
+let Predicates = [HasAVX] in {
+def : Pat<(v8i32 (X86vzmovl
+ (insert_subvector undef, (v4i32 VR128:$src), (i32 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
+def : Pat<(v4i64 (X86vzmovl
+ (insert_subvector undef, (v2i64 VR128:$src), (i32 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
+def : Pat<(v8f32 (X86vzmovl
+ (insert_subvector undef, (v4f32 VR128:$src), (i32 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
+def : Pat<(v4f64 (X86vzmovl
+ (insert_subvector undef, (v2f64 VR128:$src), (i32 0)))),
+ (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
}
-def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
+
def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
-
-def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
(VMOVUPDYmr addr:$dst, VR256:$src)>;
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>;
def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVA_P_MR>;
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v4f32 VR128:$src), addr:$dst)]>;
+ [(store (v4f32 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>;
def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)]>;
+ [(store (v2f64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVU_P_MR>;
// For disassembler
let isCodeGenOnly = 1 in {
def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
+ "movaps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>;
def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
+ "movapd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>;
def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", []>;
+ "movups\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>;
def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>;
+ "movupd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>;
}
let Predicates = [HasAVX] in {
@@ -797,44 +1000,9 @@ let Predicates = [HasSSE2] in
def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
(MOVUPDmr addr:$dst, VR128:$src)>;
-// Use movaps / movups for SSE integer load / store (one byte shorter).
-// The instructions selected below are then converted to MOVDQA/MOVDQU
-// during the SSE domain pass.
-let Predicates = [HasSSE1] in {
- def : Pat<(alignedloadv4i32 addr:$src),
- (MOVAPSrm addr:$src)>;
- def : Pat<(loadv4i32 addr:$src),
- (MOVUPSrm addr:$src)>;
- def : Pat<(alignedloadv2i64 addr:$src),
- (MOVAPSrm addr:$src)>;
- def : Pat<(loadv2i64 addr:$src),
- (MOVUPSrm addr:$src)>;
-
- def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v2i64 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v4i32 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v8i16 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (v16i8 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-}
-
// Use vmovaps/vmovups for AVX integer load/store.
let Predicates = [HasAVX] in {
// 128-bit load/store
- def : Pat<(alignedloadv4i32 addr:$src),
- (VMOVAPSrm addr:$src)>;
- def : Pat<(loadv4i32 addr:$src),
- (VMOVUPSrm addr:$src)>;
def : Pat<(alignedloadv2i64 addr:$src),
(VMOVAPSrm addr:$src)>;
def : Pat<(loadv2i64 addr:$src),
@@ -862,10 +1030,6 @@ let Predicates = [HasAVX] in {
(VMOVAPSYrm addr:$src)>;
def : Pat<(loadv4i64 addr:$src),
(VMOVUPSYrm addr:$src)>;
- def : Pat<(alignedloadv8i32 addr:$src),
- (VMOVAPSYrm addr:$src)>;
- def : Pat<(loadv8i32 addr:$src),
- (VMOVUPSYrm addr:$src)>;
def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
@@ -884,36 +1048,71 @@ let Predicates = [HasAVX] in {
(VMOVUPSYmr addr:$dst, VR256:$src)>;
}
+// Use movaps / movups for SSE integer load / store (one byte shorter).
+// The instructions selected below are then converted to MOVDQA/MOVDQU
+// during the SSE domain pass.
+let Predicates = [HasSSE1] in {
+ def : Pat<(alignedloadv2i64 addr:$src),
+ (MOVAPSrm addr:$src)>;
+ def : Pat<(loadv2i64 addr:$src),
+ (MOVUPSrm addr:$src)>;
+
+ def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v2i64 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v4i32 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+}
+
// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
// bits are disregarded. FIXME: Set encoding to pseudo!
let neverHasSideEffects = 1 in {
-def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
-def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movaps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movapd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>, VEX;
+def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ "movaps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>;
+def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
+ "movapd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>;
}
// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
// bits are disregarded. FIXME: Set encoding to pseudo!
let canFoldAsLoad = 1, isReMaterializable = 1 in {
-def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
- "movaps\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
-def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
let isCodeGenOnly = 1 in {
def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>, VEX;
+ [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
+ IIC_SSE_MOVA_P_RM>, VEX;
def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>, VEX;
+ [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
+ IIC_SSE_MOVA_P_RM>, VEX;
}
+def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
+ IIC_SSE_MOVA_P_RM>;
+def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
+ IIC_SSE_MOVA_P_RM>;
}
//===----------------------------------------------------------------------===//
@@ -921,94 +1120,68 @@ let isCodeGenOnly = 1 in {
//===----------------------------------------------------------------------===//
multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
- PatFrag mov_frag, string base_opc,
- string asm_opr> {
+ SDNode psnode, SDNode pdnode, string base_opc,
+ string asm_opr, InstrItinClass itin> {
def PSrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
!strconcat(base_opc, "s", asm_opr),
[(set RC:$dst,
- (mov_frag RC:$src1,
+ (psnode RC:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
- SSEPackedSingle>, TB;
+ itin, SSEPackedSingle>, TB;
def PDrm : PI<opc, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, f64mem:$src2),
!strconcat(base_opc, "d", asm_opr),
- [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
+ [(set RC:$dst, (v2f64 (pdnode RC:$src1,
(scalar_to_vector (loadf64 addr:$src2)))))],
- SSEPackedDouble>, TB, OpSize;
+ itin, SSEPackedDouble>, TB, OpSize;
}
let AddedComplexity = 20 in {
- defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+ defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ IIC_SSE_MOV_LH>, VEX_4V;
}
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
- defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
- "\t{$src2, $dst|$dst, $src2}">;
+ defm MOVL : sse12_mov_hilo_packed<0x12, VR128, X86Movlps, X86Movlpd, "movlp",
+ "\t{$src2, $dst|$dst, $src2}",
+ IIC_SSE_MOV_LH>;
}
def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
- (iPTR 0))), addr:$dst)]>, VEX;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOV_LH>, VEX;
def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlpd\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)]>, VEX;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOV_LH>, VEX;
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
- (iPTR 0))), addr:$dst)]>;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOV_LH>;
def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlpd\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOV_LH>;
let Predicates = [HasAVX] in {
- let AddedComplexity = 20 in {
- // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
- def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
- (VMOVLPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
- (VMOVLPSrm VR128:$src1, addr:$src2)>;
- // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
- def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
- (VMOVLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
- (VMOVLPDrm VR128:$src1, addr:$src2)>;
- }
-
- // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
- def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (VMOVLPSmr addr:$src1, VR128:$src2)>;
- def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
- VR128:$src2)), addr:$src1),
- (VMOVLPSmr addr:$src1, VR128:$src2)>;
-
- // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
- def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (VMOVLPDmr addr:$src1, VR128:$src2)>;
- def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (VMOVLPDmr addr:$src1, VR128:$src2)>;
-
// Shuffle with VMOVLPS
def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
(VMOVLPSrm VR128:$src1, addr:$src2)>;
def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
(VMOVLPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(X86Movlps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
- (VMOVLPSrm VR128:$src1, addr:$src2)>;
// Shuffle with VMOVLPD
def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
(VMOVLPDrm VR128:$src1, addr:$src2)>;
def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
(VMOVLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Movlpd VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))),
- (VMOVLPDrm VR128:$src1, addr:$src2)>;
// Store patterns
def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
@@ -1026,19 +1199,9 @@ let Predicates = [HasAVX] in {
}
let Predicates = [HasSSE1] in {
- let AddedComplexity = 20 in {
- // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
- def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
- (MOVLPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
- (MOVLPSrm VR128:$src1, addr:$src2)>;
- }
-
// (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
- def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (MOVLPSmr addr:$src1, VR128:$src2)>;
- def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
- VR128:$src2)), addr:$src1),
+ def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
+ (iPTR 0))), addr:$src1),
(MOVLPSmr addr:$src1, VR128:$src2)>;
// Shuffle with MOVLPS
@@ -1047,7 +1210,7 @@ let Predicates = [HasSSE1] in {
def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
(MOVLPSrm VR128:$src1, addr:$src2)>;
def : Pat<(X86Movlps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
+ (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
(MOVLPSrm VR128:$src1, addr:$src2)>;
// Store patterns
@@ -1061,28 +1224,11 @@ let Predicates = [HasSSE1] in {
}
let Predicates = [HasSSE2] in {
- let AddedComplexity = 20 in {
- // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
- def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
- (MOVLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
- (MOVLPDrm VR128:$src1, addr:$src2)>;
- }
-
- // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
- def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (MOVLPDmr addr:$src1, VR128:$src2)>;
- def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
- (MOVLPDmr addr:$src1, VR128:$src2)>;
-
// Shuffle with MOVLPD
def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
(MOVLPDrm VR128:$src1, addr:$src2)>;
def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
(MOVLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Movlpd VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))),
- (MOVLPDrm VR128:$src1, addr:$src2)>;
// Store patterns
def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
@@ -1098,12 +1244,14 @@ let Predicates = [HasSSE2] in {
//===----------------------------------------------------------------------===//
let AddedComplexity = 20 in {
- defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+ defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ IIC_SSE_MOV_LH>, VEX_4V;
}
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
- defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
- "\t{$src2, $dst|$dst, $src2}">;
+ defm MOVH : sse12_mov_hilo_packed<0x16, VR128, X86Movlhps, X86Movlhpd, "movhp",
+ "\t{$src2, $dst|$dst, $src2}",
+ IIC_SSE_MOV_LH>;
}
// v2f64 extract element 1 is always custom lowered to unpack high to low
@@ -1111,94 +1259,62 @@ let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
- (unpckh (bc_v2f64 (v4f32 VR128:$src)),
- (undef)), (iPTR 0))), addr:$dst)]>,
- VEX;
+ (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
+ (bc_v2f64 (v4f32 VR128:$src))),
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
- (v2f64 (unpckh VR128:$src, (undef))),
- (iPTR 0))), addr:$dst)]>,
- VEX;
+ (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
- (unpckh (bc_v2f64 (v4f32 VR128:$src)),
- (undef)), (iPTR 0))), addr:$dst)]>;
+ (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
+ (bc_v2f64 (v4f32 VR128:$src))),
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
- (v2f64 (unpckh VR128:$src, (undef))),
- (iPTR 0))), addr:$dst)]>;
+ (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
let Predicates = [HasAVX] in {
// VMOVHPS patterns
- def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
- (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
+ (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
(VMOVHPSrm VR128:$src1, addr:$src2)>;
def : Pat<(X86Movlhps VR128:$src1,
(bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
(VMOVHPSrm VR128:$src1, addr:$src2)>;
- // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
- // is during lowering, where it's not possible to recognize the load fold cause
- // it has two uses through a bitcast. One use disappears at isel time and the
- // fold opportunity reappears.
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
+ // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
+ // is during lowering, where it's not possible to recognize the load fold
+ // cause it has two uses through a bitcast. One use disappears at isel time
+ // and the fold opportunity reappears.
+ def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))),
(VMOVHPDrm VR128:$src1, addr:$src2)>;
-
- // FIXME: This should be matched by a X86Movhpd instead. Same as above
- def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))),
- (VMOVHPDrm VR128:$src1, addr:$src2)>;
-
- // Store patterns
- def : Pat<(store (f64 (vector_extract
- (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
- (VMOVHPSmr addr:$dst, VR128:$src)>;
- def : Pat<(store (f64 (vector_extract
- (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))), addr:$dst),
- (VMOVHPDmr addr:$dst, VR128:$src)>;
}
let Predicates = [HasSSE1] in {
// MOVHPS patterns
- def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
- (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
def : Pat<(X86Movlhps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
+ (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
(MOVHPSrm VR128:$src1, addr:$src2)>;
def : Pat<(X86Movlhps VR128:$src1,
(bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
(MOVHPSrm VR128:$src1, addr:$src2)>;
-
- // Store patterns
- def : Pat<(store (f64 (vector_extract
- (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
- (MOVHPSmr addr:$dst, VR128:$src)>;
}
let Predicates = [HasSSE2] in {
- // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
- // is during lowering, where it's not possible to recognize the load fold cause
- // it has two uses through a bitcast. One use disappears at isel time and the
- // fold opportunity reappears.
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
+ // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
+ // is during lowering, where it's not possible to recognize the load fold
+ // cause it has two uses through a bitcast. One use disappears at isel time
+ // and the fold opportunity reappears.
+ def : Pat<(v2f64 (X86Unpckl VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))),
(MOVHPDrm VR128:$src1, addr:$src2)>;
-
- // FIXME: This should be matched by a X86Movhpd instead. Same as above
- def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))),
- (MOVHPDrm VR128:$src1, addr:$src2)>;
-
- // Store patterns
- def : Pat<(store (f64 (vector_extract
- (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
- (MOVHPDmr addr:$dst, VR128:$src)>;
}
//===----------------------------------------------------------------------===//
@@ -1210,13 +1326,15 @@ let AddedComplexity = 20 in {
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
+ (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
+ IIC_SSE_MOV_LH>,
VEX_4V;
def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
+ (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
+ IIC_SSE_MOV_LH>,
VEX_4V;
}
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
@@ -1224,86 +1342,36 @@ let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
+ (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
+ IIC_SSE_MOV_LH>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
+ (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
+ IIC_SSE_MOV_LH>;
}
let Predicates = [HasAVX] in {
// MOVLHPS patterns
- let AddedComplexity = 20 in {
- def : Pat<(v4f32 (movddup VR128:$src, (undef))),
- (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
- def : Pat<(v2i64 (movddup VR128:$src, (undef))),
- (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
-
- // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
- def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
- (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
- }
- def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
- (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
(VMOVLHPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
(VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
// MOVHLPS patterns
- let AddedComplexity = 20 in {
- // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
- def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
- (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
-
- // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
- def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
- (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
- def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
- (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
- }
-
- def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
- (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
(VMOVHLPSrr VR128:$src1, VR128:$src2)>;
}
let Predicates = [HasSSE1] in {
// MOVLHPS patterns
- let AddedComplexity = 20 in {
- def : Pat<(v4f32 (movddup VR128:$src, (undef))),
- (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
- def : Pat<(v2i64 (movddup VR128:$src, (undef))),
- (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
-
- // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
- def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
- (MOVLHPSrr VR128:$src1, VR128:$src2)>;
- }
- def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
- (MOVLHPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
(MOVLHPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
(MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
// MOVHLPS patterns
- let AddedComplexity = 20 in {
- // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
- def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
- (MOVHLPSrr VR128:$src1, VR128:$src2)>;
-
- // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
- def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
- (MOVHLPSrr VR128:$src1, VR128:$src1)>;
- def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
- (MOVHLPSrr VR128:$src1, VR128:$src1)>;
- }
-
- def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
- (MOVHLPSrr VR128:$src1, VR128:$src2)>;
def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
(MOVHLPSrr VR128:$src1, VR128:$src2)>;
}
@@ -1312,70 +1380,97 @@ let Predicates = [HasSSE1] in {
// SSE 1 & 2 - Conversion Instructions
//===----------------------------------------------------------------------===//
+def SSE_CVT_PD : OpndItins<
+ IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
+>;
+
+def SSE_CVT_PS : OpndItins<
+ IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
+>;
+
+def SSE_CVT_Scalar : OpndItins<
+ IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
+>;
+
+def SSE_CVT_SS2SI_32 : OpndItins<
+ IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
+>;
+
+def SSE_CVT_SS2SI_64 : OpndItins<
+ IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
+>;
+
+def SSE_CVT_SD2SI : OpndItins<
+ IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
+>;
+
multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
- string asm> {
+ string asm, OpndItins itins> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
+ [(set DstRC:$dst, (OpNode SrcRC:$src))],
+ itins.rr>;
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
-}
-
-multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
- X86MemOperand x86memop, string asm> {
- def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm, []>;
- let mayLoad = 1 in
- def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm, []>;
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
+ itins.rm>;
}
multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
- string asm, Domain d> {
+ string asm, Domain d, OpndItins itins> {
def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
- [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
+ [(set DstRC:$dst, (OpNode SrcRC:$src))],
+ itins.rr, d>;
def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
- [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
+ itins.rm, d>;
}
multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
X86MemOperand x86memop, string asm> {
+let neverHasSideEffects = 1 in {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
let mayLoad = 1 in
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
(ins DstRC:$src1, x86memop:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
+} // neverHasSideEffects = 1
}
defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
- "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
- VEX_LIG;
+ "cvttss2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_32>,
+ XS, VEX, VEX_LIG;
defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
- "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
- VEX_W, VEX_LIG;
+ "cvttss2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_64>,
+ XS, VEX, VEX_W, VEX_LIG;
defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
- "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX,
- VEX_LIG;
+ "cvttsd2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SD2SI>,
+ XD, VEX, VEX_LIG;
defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
- "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
- VEX, VEX_W, VEX_LIG;
+ "cvttsd2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SD2SI>,
+ XD, VEX, VEX_W, VEX_LIG;
// The assembler can recognize rr 64-bit instructions by seeing a rxx
// register, but the same isn't true when only using memory operands,
// provide other assembly "l" and "q" forms to address this explicitly
// where appropriate to do so.
-defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
- VEX_4V, VEX_LIG;
-defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
- VEX_4V, VEX_W, VEX_LIG;
-defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
- VEX_4V, VEX_LIG;
-defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
- VEX_4V, VEX_LIG;
-defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
- VEX_4V, VEX_W, VEX_LIG;
-
-let Predicates = [HasAVX] in {
+defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">,
+ XS, VEX_4V, VEX_LIG;
+defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
+ XS, VEX_4V, VEX_W, VEX_LIG;
+defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">,
+ XD, VEX_4V, VEX_LIG;
+defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
+ XD, VEX_4V, VEX_LIG;
+defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
+ XD, VEX_4V, VEX_W, VEX_LIG;
+
+let Predicates = [HasAVX], AddedComplexity = 1 in {
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
@@ -1396,169 +1491,185 @@ let Predicates = [HasAVX] in {
}
defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
- "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
+ "cvttss2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_32>, XS;
defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
- "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+ "cvttss2si{q}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_64>, XS, REX_W;
defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
- "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
+ "cvttsd2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SD2SI>, XD;
defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
- "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
+ "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SD2SI>, XD, REX_W;
defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
- "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
+ "cvtsi2ss\t{$src, $dst|$dst, $src}",
+ SSE_CVT_Scalar>, XS;
defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
- "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+ "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_Scalar>, XS, REX_W;
defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
- "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
+ "cvtsi2sd\t{$src, $dst|$dst, $src}",
+ SSE_CVT_Scalar>, XD;
defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
- "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
+ "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_Scalar>, XD, REX_W;
// Conversion Instructions Intrinsics - Match intrinsics which expect MM
// and/or XMM operand(s).
multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
- string asm> {
+ string asm, OpndItins itins> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int SrcRC:$src))]>;
+ [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>;
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))], itins.rm>;
}
multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
- PatFrag ld_frag, string asm, bit Is2Addr = 1> {
+ PatFrag ld_frag, string asm, OpndItins itins,
+ bit Is2Addr = 1> {
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
+ [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
+ itins.rr>;
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
(ins DstRC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
+ itins.rm>;
}
-defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
- f128mem, load, "cvtsd2si">, XD, VEX;
-defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
- int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
- XD, VEX, VEX_W;
-
-// FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
-// Get rid of this hack or rename the intrinsics, there are several
-// intructions that only match with the intrinsic form, why create duplicates
-// to let them be recognized by the assembler?
-defm VCVTSD2SI : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
- "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_LIG;
-defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
- "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W,
- VEX_LIG;
+defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si", SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
+defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
+ int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si",
+ SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
- f128mem, load, "cvtsd2si{l}">, XD;
+ f128mem, load, "cvtsd2si{l}", SSE_CVT_SD2SI>, XD;
defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
- f128mem, load, "cvtsd2si{q}">, XD, REX_W;
+ f128mem, load, "cvtsd2si{q}", SSE_CVT_SD2SI>, XD, REX_W;
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
+ int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss",
+ SSE_CVT_Scalar, 0>, XS, VEX_4V;
defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss",
+ SSE_CVT_Scalar, 0>, XS, VEX_4V,
VEX_W;
defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
- int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd",
+ SSE_CVT_Scalar, 0>, XD, VEX_4V;
defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
- int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd",
+ SSE_CVT_Scalar, 0>, XD,
VEX_4V, VEX_W;
let Constraints = "$src1 = $dst" in {
defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse_cvtsi2ss, i32mem, loadi32,
- "cvtsi2ss">, XS;
+ "cvtsi2ss", SSE_CVT_Scalar>, XS;
defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
int_x86_sse_cvtsi642ss, i64mem, loadi64,
- "cvtsi2ss{q}">, XS, REX_W;
+ "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
int_x86_sse2_cvtsi2sd, i32mem, loadi32,
- "cvtsi2sd">, XD;
+ "cvtsi2sd", SSE_CVT_Scalar>, XD;
defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
int_x86_sse2_cvtsi642sd, i64mem, loadi64,
- "cvtsi2sd">, XD, REX_W;
+ "cvtsi2sd", SSE_CVT_Scalar>, XD, REX_W;
}
/// SSE 1 Only
// Aliases for intrinsics
defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
- f32mem, load, "cvttss2si">, XS, VEX;
+ f32mem, load, "cvttss2si",
+ SSE_CVT_SS2SI_32>, XS, VEX;
defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse_cvttss2si64, f32mem, load,
- "cvttss2si">, XS, VEX, VEX_W;
+ "cvttss2si", SSE_CVT_SS2SI_64>,
+ XS, VEX, VEX_W;
defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttsd2si">, XD, VEX;
+ f128mem, load, "cvttsd2si", SSE_CVT_SD2SI>,
+ XD, VEX;
defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttsd2si">, XD, VEX, VEX_W;
+ "cvttsd2si", SSE_CVT_SD2SI>,
+ XD, VEX, VEX_W;
defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
- f32mem, load, "cvttss2si">, XS;
+ f32mem, load, "cvttss2si",
+ SSE_CVT_SS2SI_32>, XS;
defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse_cvttss2si64, f32mem, load,
- "cvttss2si{q}">, XS, REX_W;
+ "cvttss2si{q}", SSE_CVT_SS2SI_64>,
+ XS, REX_W;
defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
- f128mem, load, "cvttsd2si">, XD;
+ f128mem, load, "cvttsd2si", SSE_CVT_SD2SI>,
+ XD;
defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
int_x86_sse2_cvttsd2si64, f128mem, load,
- "cvttsd2si{q}">, XD, REX_W;
+ "cvttsd2si{q}", SSE_CVT_SD2SI>,
+ XD, REX_W;
let Pattern = []<dag> in {
defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
- "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS,
- VEX, VEX_LIG;
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
- "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
- VEX_W, VEX_LIG;
+ "cvtss2si\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB, VEX;
+ SSEPackedSingle, SSE_CVT_PS>, TB, VEX;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB, VEX;
+ SSEPackedSingle, SSE_CVT_PS>, TB, VEX;
}
let Pattern = []<dag> in {
defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
- "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_32>, XS;
defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
- "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+ "cvtss2si{q}\t{$src, $dst|$dst, $src}",
+ SSE_CVT_SS2SI_64>, XS, REX_W;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
- SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
+ SSEPackedSingle, SSE_CVT_PS>,
+ TB; /* PD SSE3 form is avaiable */
}
-let Predicates = [HasSSE1] in {
+let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse_cvtss2si VR128:$src),
- (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
- (CVTSS2SIrm addr:$src)>;
+ (VCVTSS2SIrm addr:$src)>;
def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
- (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
- (CVTSS2SI64rm addr:$src)>;
+ (VCVTSS2SI64rm addr:$src)>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasSSE1] in {
def : Pat<(int_x86_sse_cvtss2si VR128:$src),
- (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
- (VCVTSS2SIrm addr:$src)>;
+ (CVTSS2SIrm addr:$src)>;
def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
- (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+ (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
- (VCVTSS2SI64rm addr:$src)>;
+ (CVTSS2SI64rm addr:$src)>;
}
/// SSE 2 Only
@@ -1566,43 +1677,51 @@ let Predicates = [HasAVX] in {
// Convert scalar double to scalar single
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR64:$src1, FR64:$src2),
- "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- VEX_4V, VEX_LIG;
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
+ IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG;
let mayLoad = 1 in
def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
(ins FR64:$src1, f64mem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
+ [], IIC_SSE_CVT_Scalar_RM>,
+ XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG;
def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
Requires<[HasAVX]>;
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (fround FR64:$src))]>;
+ [(set FR32:$dst, (fround FR64:$src))],
+ IIC_SSE_CVT_Scalar_RR>;
def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
+ [(set FR32:$dst, (fround (loadf64 addr:$src)))],
+ IIC_SSE_CVT_Scalar_RM>,
+ XD,
Requires<[HasSSE2, OptForSize]>;
defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
- int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
+ int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss",
+ SSE_CVT_Scalar, 0>,
XS, VEX_4V;
let Constraints = "$src1 = $dst" in
defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
- int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
+ int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss",
+ SSE_CVT_Scalar>, XS;
// Convert scalar single to scalar double
// SSE2 instructions with XS prefix
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR32:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG;
+ [], IIC_SSE_CVT_Scalar_RR>,
+ XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG;
let mayLoad = 1 in
def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
(ins FR32:$src1, f32mem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
+ [], IIC_SSE_CVT_Scalar_RM>,
+ XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>;
let Predicates = [HasAVX] in {
def : Pat<(f64 (fextend FR32:$src)),
@@ -1619,11 +1738,13 @@ def : Pat<(extloadf32 addr:$src),
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (fextend FR32:$src))]>, XS,
+ [(set FR64:$dst, (fextend FR32:$src))],
+ IIC_SSE_CVT_Scalar_RR>, XS,
Requires<[HasSSE2]>;
def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
+ [(set FR64:$dst, (extloadf32 addr:$src))],
+ IIC_SSE_CVT_Scalar_RM>, XS,
Requires<[HasSSE2, OptForSize]>;
// extload f32 -> f64. This matches load+fextend because we have a hack in
@@ -1640,26 +1761,30 @@ def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))]>, XS, VEX_4V,
+ VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V,
Requires<[HasAVX]>;
def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))]>, XS, VEX_4V,
+ (load addr:$src2)))],
+ IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V,
Requires<[HasAVX]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))]>, XS,
+ VR128:$src2))],
+ IIC_SSE_CVT_Scalar_RR>, XS,
Requires<[HasSSE2]>;
def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))]>, XS,
+ (load addr:$src2)))],
+ IIC_SSE_CVT_Scalar_RM>, XS,
Requires<[HasSSE2]>;
}
@@ -1667,216 +1792,275 @@ def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
// SSE2 instructions without OpSize prefix
def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))],
+ IIC_SSE_CVT_PS_RR>,
TB, VEX, Requires<[HasAVX]>;
def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps
- (bitconvert (memopv2i64 addr:$src))))]>,
+ (bitconvert (memopv2i64 addr:$src))))],
+ IIC_SSE_CVT_PS_RM>,
TB, VEX, Requires<[HasAVX]>;
def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))],
+ IIC_SSE_CVT_PS_RR>,
TB, Requires<[HasSSE2]>;
def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"cvtdq2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps
- (bitconvert (memopv2i64 addr:$src))))]>,
+ (bitconvert (memopv2i64 addr:$src))))],
+ IIC_SSE_CVT_PS_RM>,
TB, Requires<[HasSSE2]>;
// FIXME: why the non-intrinsic version is described as SSE3?
// SSE2 instructions with XS prefix
def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
XS, VEX, Requires<[HasAVX]>;
def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd
- (bitconvert (memopv2i64 addr:$src))))]>,
+ (bitconvert (memopv2i64 addr:$src))))],
+ IIC_SSE_CVT_PD_RM>,
XS, VEX, Requires<[HasAVX]>;
def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
XS, Requires<[HasSSE2]>;
def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd
- (bitconvert (memopv2i64 addr:$src))))]>,
+ (bitconvert (memopv2i64 addr:$src))))],
+ IIC_SSE_CVT_PD_RM>,
XS, Requires<[HasSSE2]>;
// Convert packed single/double fp to doubleword
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RR>, VEX;
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RM>, VEX;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RR>, VEX;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RM>, VEX;
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RR>;
def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvtps2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PS_RM>;
def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>,
VEX;
def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
(ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
- (memop addr:$src)))]>, VEX;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, VEX;
def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>;
def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
- (memop addr:$src)))]>;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PS_RM>;
// SSE2 packed instructions with XD prefix
def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
XD, VEX, Requires<[HasAVX]>;
def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))]>,
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>,
XD, VEX, Requires<[HasAVX]>;
def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
XD, Requires<[HasSSE2]>;
def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))]>,
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>,
XD, Requires<[HasSSE2]>;
// Convert with truncation packed single/double fp to doubleword
// SSE2 packed instructions with XS prefix
def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
-let mayLoad = 1 in
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>, VEX;
def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttps2dq
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, VEX;
def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
-let mayLoad = 1 in
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst,
+ (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
+ IIC_SSE_CVT_PS_RR>, VEX;
def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
- "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
+ (memopv8f32 addr:$src)))],
+ IIC_SSE_CVT_PS_RM>, VEX;
+
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))]>;
+ (int_x86_sse2_cvttps2dq VR128:$src))],
+ IIC_SSE_CVT_PS_RR>;
def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
-
-def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (int_x86_sse2_cvttps2dq VR128:$src))]>,
- XS, VEX, Requires<[HasAVX]>;
-def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "vcvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttps2dq
- (memop addr:$src)))]>,
- XS, VEX, Requires<[HasAVX]>;
-
-let Predicates = [HasSSE2] in {
- def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
- (Int_CVTDQ2PSrr VR128:$src)>;
- def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
- (CVTTPS2DQrr VR128:$src)>;
-}
+ (int_x86_sse2_cvttps2dq (memop addr:$src)))],
+ IIC_SSE_CVT_PS_RM>;
let Predicates = [HasAVX] in {
def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
(Int_VCVTDQ2PSrr VR128:$src)>;
+ def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
+ (Int_VCVTDQ2PSrm addr:$src)>;
+
def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
(VCVTTPS2DQrr VR128:$src)>;
+ def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
+ (VCVTTPS2DQrm addr:$src)>;
+
def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
(VCVTDQ2PSYrr VR256:$src)>;
+ def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (memopv4i64 addr:$src)))),
+ (VCVTDQ2PSYrm addr:$src)>;
+
def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
(VCVTTPS2DQYrr VR256:$src)>;
+ def : Pat<(v8i32 (fp_to_sint (memopv8f32 addr:$src))),
+ (VCVTTPS2DQYrm addr:$src)>;
+}
+
+let Predicates = [HasSSE2] in {
+ def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
+ (Int_CVTDQ2PSrr VR128:$src)>;
+ def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
+ (Int_CVTDQ2PSrm addr:$src)>;
+
+ def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
+ (CVTTPS2DQrr VR128:$src)>;
+ def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
+ (CVTTPS2DQrm addr:$src)>;
}
def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (int_x86_sse2_cvttpd2dq VR128:$src))]>, VEX;
+ (int_x86_sse2_cvttpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>, VEX;
let isCodeGenOnly = 1 in
def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memop addr:$src)))]>, VEX;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>, VEX;
def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
+ IIC_SSE_CVT_PD_RR>;
def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
- (memop addr:$src)))]>;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>;
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttpd2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
// XMM only
def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, VEX;
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
- "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
// Convert packed single to packed double
let Predicates = [HasAVX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, TB, VEX;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, TB, VEX;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, TB, VEX;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
- "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, TB, VEX;
}
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
+ "cvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, TB;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
+ "cvtps2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, TB;
def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
TB, VEX, Requires<[HasAVX]>;
def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd
- (load addr:$src)))]>,
+ (load addr:$src)))],
+ IIC_SSE_CVT_PD_RM>,
TB, VEX, Requires<[HasAVX]>;
def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
+ IIC_SSE_CVT_PD_RR>,
TB, Requires<[HasSSE2]>;
def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd
- (load addr:$src)))]>,
+ (load addr:$src)))],
+ IIC_SSE_CVT_PD_RM>,
TB, Requires<[HasSSE2]>;
// Convert packed double to packed single
@@ -1884,49 +2068,61 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
// register, but the same isn't true when using memory operands instead.
// Provide other assembly rr and rm forms to address this explicitly.
def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
// XMM only
def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, VEX;
// YMM only
def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
- "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>, VEX;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
- "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>, VEX, VEX_L;
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>;
def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
+ IIC_SSE_CVT_PD_RR>;
def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
(ins f128mem:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
- (memop addr:$src)))]>;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>;
def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
+ IIC_SSE_CVT_PD_RR>;
def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
- (memop addr:$src)))]>;
+ (memop addr:$src)))],
+ IIC_SSE_CVT_PD_RM>;
// AVX 256-bit register conversion intrinsics
// FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
// whenever possible to avoid declaring two versions of each one.
def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
(VCVTDQ2PSYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
+def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (memopv4i64 addr:$src))),
(VCVTDQ2PSYrm addr:$src)>;
def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
@@ -1949,11 +2145,6 @@ def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
(VCVTTPD2DQYrm addr:$src)>;
-def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
- (VCVTTPS2DQYrr VR256:$src)>;
-def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
- (VCVTTPS2DQYrm addr:$src)>;
-
// Match fround and fextend for 128/256-bit conversions
def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
(VCVTPD2PSYrr VR256:$src)>;
@@ -1971,70 +2162,85 @@ def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
// sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
- SDNode OpNode, ValueType VT, PatFrag ld_frag,
- string asm, string asm_alt> {
+ Operand CC, SDNode OpNode, ValueType VT,
+ PatFrag ld_frag, string asm, string asm_alt,
+ OpndItins itins> {
def rr : SIi8<0xC2, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, SSECC:$cc), asm,
- [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))]>;
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
+ itins.rr>;
def rm : SIi8<0xC2, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, x86memop:$src2, SSECC:$cc), asm,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
[(set RC:$dst, (OpNode (VT RC:$src1),
- (ld_frag addr:$src2), imm:$cc))]>;
+ (ld_frag addr:$src2), imm:$cc))],
+ itins.rm>;
// Accept explicit immediate argument form instead of comparison code.
let neverHasSideEffects = 1 in {
def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, []>;
+ (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
+ IIC_SSE_ALU_F32S_RR>;
let mayLoad = 1 in
def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, []>;
+ (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
+ IIC_SSE_ALU_F32S_RM>;
}
}
-defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, X86cmpss, f32, loadf32,
+defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmpss, f32, loadf32,
"cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
+ SSE_ALU_F32S>,
XS, VEX_4V, VEX_LIG;
-defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, X86cmpsd, f64, loadf64,
+defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmpsd, f64, loadf64,
"cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
+ "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
+ SSE_ALU_F32S>, // same latency as 32 bit compare
XD, VEX_4V, VEX_LIG;
let Constraints = "$src1 = $dst" in {
- defm CMPSS : sse12_cmp_scalar<FR32, f32mem, X86cmpss, f32, loadf32,
+ defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmpss, f32, loadf32,
"cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
- "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}">,
+ "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
XS;
- defm CMPSD : sse12_cmp_scalar<FR64, f64mem, X86cmpsd, f64, loadf64,
+ defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmpsd, f64, loadf64,
"cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
- "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}">,
+ "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
+ SSE_ALU_F32S>, // same latency as 32 bit compare
XD;
}
-multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
- Intrinsic Int, string asm> {
+multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
+ Intrinsic Int, string asm, OpndItins itins> {
def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
+ (ins VR128:$src1, VR128:$src, CC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
- VR128:$src, imm:$cc))]>;
+ VR128:$src, imm:$cc))],
+ itins.rr>;
def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
+ (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
- (load addr:$src), imm:$cc))]>;
+ (load addr:$src), imm:$cc))],
+ itins.rm>;
}
// Aliases to match intrinsics which expect XMM operand(s).
-defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
- "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
+defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
+ SSE_ALU_F32S>,
XS, VEX_4V;
-defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
- "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
+defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ SSE_ALU_F32S>, // same latency as f32
XD, VEX_4V;
let Constraints = "$src1 = $dst" in {
- defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
- "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
- defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
- "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
+ defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}",
+ SSE_ALU_F32S>, XS;
+ defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}",
+ SSE_ALU_F32S>, // same latency as f32
+ XD;
}
@@ -2044,11 +2250,13 @@ multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
PatFrag ld_frag, string OpcodeStr, Domain d> {
def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
- [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
+ [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
+ IIC_SSE_COMIS_RR, d>;
def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (OpNode (vt RC:$src1),
- (ld_frag addr:$src2)))], d>;
+ (ld_frag addr:$src2)))],
+ IIC_SSE_COMIS_RM, d>;
}
let Defs = [EFLAGS] in {
@@ -2098,89 +2306,91 @@ let Defs = [EFLAGS] in {
"comisd", SSEPackedDouble>, TB, OpSize;
} // Defs = [EFLAGS]
-// sse12_cmp_packed - sse 1 & 2 compared packed instructions
+// sse12_cmp_packed - sse 1 & 2 compare packed instructions
multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
- Intrinsic Int, string asm, string asm_alt,
- Domain d> {
- let isAsmParserOnly = 1 in {
- def rri : PIi8<0xC2, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, SSECC:$cc), asm,
- [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))], d>;
- def rmi : PIi8<0xC2, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, f128mem:$src2, SSECC:$cc), asm,
- [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))], d>;
- }
+ Operand CC, Intrinsic Int, string asm,
+ string asm_alt, Domain d> {
+ def rri : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
+ IIC_SSE_CMPP_RR, d>;
+ def rmi : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
+ IIC_SSE_CMPP_RM, d>;
// Accept explicit immediate argument form instead of comparison code.
- def rri_alt : PIi8<0xC2, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
- asm_alt, [], d>;
- def rmi_alt : PIi8<0xC2, MRMSrcMem,
- (outs RC:$dst), (ins RC:$src1, f128mem:$src2, i8imm:$cc),
- asm_alt, [], d>;
+ let neverHasSideEffects = 1 in {
+ def rri_alt : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_CMPP_RR, d>;
+ def rmi_alt : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
+ asm_alt, [], IIC_SSE_CMPP_RM, d>;
+ }
}
-defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSEPackedSingle>, TB, VEX_4V;
-defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSEPackedDouble>, TB, OpSize, VEX_4V;
-defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
+defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSEPackedSingle>, TB, VEX_4V;
-defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
+defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SSEPackedDouble>, TB, OpSize, VEX_4V;
let Constraints = "$src1 = $dst" in {
- defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+ defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
SSEPackedSingle>, TB;
- defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+ defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
"cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
SSEPackedDouble>, TB, OpSize;
}
-let Predicates = [HasSSE1] in {
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
-}
-
-let Predicates = [HasSSE2] in {
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-}
-
let Predicates = [HasAVX] in {
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
(VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
(VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
(VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
-def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
(VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
+def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
(VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
-def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
+def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
(VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
}
+let Predicates = [HasSSE1] in {
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
+def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
+}
+
+let Predicates = [HasSSE2] in {
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
+def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Shuffle Instructions
//===----------------------------------------------------------------------===//
@@ -2190,14 +2400,14 @@ multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
ValueType vt, string asm, PatFrag mem_frag,
Domain d, bit IsConvertibleToThreeAddress = 0> {
def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
- [(set RC:$dst, (vt (shufp:$src3
- RC:$src1, (mem_frag addr:$src2))))], d>;
+ (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
+ [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
+ (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, i8imm:$src3), asm,
- [(set RC:$dst,
- (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
+ [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
+ (i8 imm:$src3))))], IIC_SSE_SHUFP, d>;
}
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
@@ -2220,133 +2430,52 @@ let Constraints = "$src1 = $dst" in {
TB;
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv2f64, SSEPackedDouble>, TB, OpSize;
-}
-
-let Predicates = [HasSSE1] in {
- def : Pat<(v4f32 (X86Shufps VR128:$src1,
- (memopv4f32 addr:$src2), (i8 imm:$imm))),
- (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
- def : Pat<(v4i32 (X86Shufps VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
- (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
- // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
- // fall back to this for SSE1)
- def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
- (SHUFPSrri VR128:$src2, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special unary SHUFPSrri case.
- def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
- (SHUFPSrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
-}
-
-let Predicates = [HasSSE2] in {
- // Special binary v4i32 shuffle cases with SHUFPS.
- def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
- (SHUFPSrri VR128:$src1, VR128:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2)))),
- (SHUFPSrmi VR128:$src1, addr:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special unary SHUFPDrri cases.
- def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
- (SHUFPDrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
- (SHUFPDrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special binary v2i64 shuffle cases using SHUFPDrri.
- def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
- (SHUFPDrri VR128:$src1, VR128:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Generic SHUFPD patterns
- def : Pat<(v2f64 (X86Shufps VR128:$src1,
- (memopv2f64 addr:$src2), (i8 imm:$imm))),
- (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
- def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
+ memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>,
+ TB, OpSize;
}
let Predicates = [HasAVX] in {
- def : Pat<(v4f32 (X86Shufps VR128:$src1,
- (memopv4f32 addr:$src2), (i8 imm:$imm))),
- (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
- def : Pat<(v4i32 (X86Shufps VR128:$src1,
+ def : Pat<(v4i32 (X86Shufp VR128:$src1,
(bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
(VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
- // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
- // fall back to this for SSE1)
- def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
- (VSHUFPSrri VR128:$src2, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special unary SHUFPSrri case.
- def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
- (VSHUFPSrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special binary v4i32 shuffle cases with SHUFPS.
- def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
- (VSHUFPSrri VR128:$src1, VR128:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2)))),
- (VSHUFPSrmi VR128:$src1, addr:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special unary SHUFPDrri cases.
- def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
- (VSHUFPDrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
- (VSHUFPDrri VR128:$src1, VR128:$src1,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
- // Special binary v2i64 shuffle cases using SHUFPDrri.
- def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
- (VSHUFPDrri VR128:$src1, VR128:$src2,
- (SHUFFLE_get_shuf_imm VR128:$src3))>;
-
- def : Pat<(v2f64 (X86Shufps VR128:$src1,
- (memopv2f64 addr:$src2), (i8 imm:$imm))),
+
+ def : Pat<(v2i64 (X86Shufp VR128:$src1,
+ (memopv2i64 addr:$src2), (i8 imm:$imm))),
(VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
- def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
// 256-bit patterns
- def : Pat<(v8i32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
- def : Pat<(v8i32 (X86Shufps VR256:$src1,
+ def : Pat<(v8i32 (X86Shufp VR256:$src1,
(bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
(VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
- def : Pat<(v8f32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
- def : Pat<(v8f32 (X86Shufps VR256:$src1,
- (memopv8f32 addr:$src2), (i8 imm:$imm))),
- (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
-
- def : Pat<(v4i64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
- def : Pat<(v4i64 (X86Shufpd VR256:$src1,
+ def : Pat<(v4i64 (X86Shufp VR256:$src1,
(memopv4i64 addr:$src2), (i8 imm:$imm))),
(VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
+}
- def : Pat<(v4f64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
- def : Pat<(v4f64 (X86Shufpd VR256:$src1,
- (memopv4f64 addr:$src2), (i8 imm:$imm))),
- (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
+let Predicates = [HasSSE1] in {
+ def : Pat<(v4i32 (X86Shufp VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
+ (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
+ def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
+}
+
+let Predicates = [HasSSE2] in {
+ // Generic SHUFPD patterns
+ def : Pat<(v2i64 (X86Shufp VR128:$src1,
+ (memopv2i64 addr:$src2), (i8 imm:$imm))),
+ (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
+ def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
}
//===----------------------------------------------------------------------===//
@@ -2354,159 +2483,80 @@ let Predicates = [HasAVX] in {
//===----------------------------------------------------------------------===//
/// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
-multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
+multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
PatFrag mem_frag, RegisterClass RC,
X86MemOperand x86memop, string asm,
Domain d> {
def rr : PI<opc, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2),
asm, [(set RC:$dst,
- (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ (vt (OpNode RC:$src1, RC:$src2)))],
+ IIC_SSE_UNPCK, d>;
def rm : PI<opc, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2),
asm, [(set RC:$dst,
(vt (OpNode RC:$src1,
- (mem_frag addr:$src2))))], d>;
-}
-
-let AddedComplexity = 10 in {
- defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
- VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
- defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
- VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
- defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
- VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
- defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
- VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
-
- defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
- VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
- defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
- VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
- defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
- VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedSingle>, TB, VEX_4V;
- defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
- VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SSEPackedDouble>, TB, OpSize, VEX_4V;
-
- let Constraints = "$src1 = $dst" in {
- defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
- VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
- defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
- VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
- SSEPackedDouble>, TB, OpSize;
- defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
- VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB;
- defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
- VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
- SSEPackedDouble>, TB, OpSize;
- } // Constraints = "$src1 = $dst"
-} // AddedComplexity
+ (mem_frag addr:$src2))))],
+ IIC_SSE_UNPCK, d>;
+}
+
+defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, TB, VEX_4V;
+defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, TB, OpSize, VEX_4V;
+defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, TB, VEX_4V;
+defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, TB, OpSize, VEX_4V;
+
+defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32,
+ VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, TB, VEX_4V;
+defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64,
+ VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, TB, OpSize, VEX_4V;
+defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32,
+ VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, TB, VEX_4V;
+defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64,
+ VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, TB, OpSize, VEX_4V;
-let Predicates = [HasSSE1] in {
- def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
- (UNPCKLPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
- (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
- (UNPCKHPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
- (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
-}
+let Constraints = "$src1 = $dst" in {
+ defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+ defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+} // Constraints = "$src1 = $dst"
-let Predicates = [HasSSE2] in {
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
- (UNPCKLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
- (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
- (UNPCKHPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
- (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
-
- // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
+let Predicates = [HasAVX], AddedComplexity = 1 in {
+ // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
// problem is during lowering, where it's not possible to recognize the load
// fold cause it has two uses through a bitcast. One use disappears at isel
// time and the fold opportunity reappears.
def : Pat<(v2f64 (X86Movddup VR128:$src)),
- (UNPCKLPDrr VR128:$src, VR128:$src)>;
-
- let AddedComplexity = 10 in
- def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
- (UNPCKLPDrr VR128:$src, VR128:$src)>;
+ (VUNPCKLPDrr VR128:$src, VR128:$src)>;
}
-let Predicates = [HasAVX] in {
- def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
- (VUNPCKLPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
- (VUNPCKLPSrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
- (VUNPCKHPSrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
- (VUNPCKHPSrr VR128:$src1, VR128:$src2)>;
-
- def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
- (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
- (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
- (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
- (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
- (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
- (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
- (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
- (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
-
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
- (VUNPCKLPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
- (VUNPCKLPDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
- (VUNPCKHPDrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
- (VUNPCKHPDrr VR128:$src1, VR128:$src2)>;
-
- def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
- (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
- (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
- (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
- (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
- (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
- (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
- def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
- (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
- def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
- (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
-
- // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
+let Predicates = [HasSSE2] in {
+ // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
// problem is during lowering, where it's not possible to recognize the load
// fold cause it has two uses through a bitcast. One use disappears at isel
// time and the fold opportunity reappears.
def : Pat<(v2f64 (X86Movddup VR128:$src)),
- (VUNPCKLPDrr VR128:$src, VR128:$src)>;
- let AddedComplexity = 10 in
- def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
- (VUNPCKLPDrr VR128:$src, VR128:$src)>;
+ (UNPCKLPDrr VR128:$src, VR128:$src)>;
}
//===----------------------------------------------------------------------===//
@@ -2518,29 +2568,12 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
Domain d> {
def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set GR32:$dst, (Int RC:$src))], d>;
+ [(set GR32:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>;
def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
- !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [],
+ IIC_SSE_MOVMSK, d>, REX_W;
}
-defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
- SSEPackedSingle>, TB;
-defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
- SSEPackedDouble>, TB, OpSize;
-
-def : Pat<(i32 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>, Requires<[HasSSE1]>;
-def : Pat<(i64 (X86fgetsign FR32:$src)),
- (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
- sub_ss))>, Requires<[HasSSE1]>;
-def : Pat<(i32 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>, Requires<[HasSSE2]>;
-def : Pat<(i64 (X86fgetsign FR64:$src)),
- (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
- sub_sd))>, Requires<[HasSSE2]>;
-
let Predicates = [HasAVX] in {
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
"movmskps", SSEPackedSingle>, TB, VEX;
@@ -2568,17 +2601,105 @@ let Predicates = [HasAVX] in {
// Assembler Only
def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
+ "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
+ SSEPackedSingle>, TB, VEX;
def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB,
+ "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
+ SSEPackedDouble>, TB,
OpSize, VEX;
def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
+ "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
+ SSEPackedSingle>, TB, VEX;
def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
- "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB,
+ "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK,
+ SSEPackedDouble>, TB,
OpSize, VEX;
}
+defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
+ SSEPackedSingle>, TB;
+defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
+ SSEPackedDouble>, TB, OpSize;
+
+def : Pat<(i32 (X86fgetsign FR32:$src)),
+ (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
+ sub_ss))>, Requires<[HasSSE1]>;
+def : Pat<(i64 (X86fgetsign FR32:$src)),
+ (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
+ sub_ss))>, Requires<[HasSSE1]>;
+def : Pat<(i32 (X86fgetsign FR64:$src)),
+ (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
+ sub_sd))>, Requires<[HasSSE2]>;
+def : Pat<(i64 (X86fgetsign FR64:$src)),
+ (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
+ sub_sd))>, Requires<[HasSSE2]>;
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Logical Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
+
+/// PDI_binop_rm - Simple SSE2 binary operator.
+multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop,
+ OpndItins itins,
+ bit IsCommutable = 0,
+ bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
+ def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>;
+ def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1,
+ (bitconvert (memop_frag addr:$src2)))))],
+ itins.rm>;
+}
+} // ExeDomain = SSEPackedInt
+
+// These are ordered here for pattern ordering requirements with the fp versions
+
+let Predicates = [HasAVX] in {
+defm VPAND : PDI_binop_rm<0xDB, "vpand", and, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPOR : PDI_binop_rm<0xEB, "vpor" , or, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPXOR : PDI_binop_rm<0xEF, "vpxor", xor, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPANDN : PDI_binop_rm<0xDF, "vpandn", X86andnp, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+defm PAND : PDI_binop_rm<0xDB, "pand", and, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1>;
+defm POR : PDI_binop_rm<0xEB, "por" , or, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1>;
+defm PXOR : PDI_binop_rm<0xEF, "pxor", xor, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 1>;
+defm PANDN : PDI_binop_rm<0xDF, "pandn", X86andnp, v2i64, VR128, memopv2i64,
+ i128mem, SSE_BIT_ITINS_P, 0>;
+} // Constraints = "$src1 = $dst"
+
+let Predicates = [HasAVX2] in {
+defm VPANDY : PDI_binop_rm<0xDB, "vpand", and, v4i64, VR256, memopv4i64,
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPORY : PDI_binop_rm<0xEB, "vpor", or, v4i64, VR256, memopv4i64,
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPXORY : PDI_binop_rm<0xEF, "vpxor", xor, v4i64, VR256, memopv4i64,
+ i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V;
+defm VPANDNY : PDI_binop_rm<0xDF, "vpandn", X86andnp, v4i64, VR256, memopv4i64,
+ i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V;
+}
+
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Logical Instructions
//===----------------------------------------------------------------------===//
@@ -2586,31 +2707,39 @@ let Predicates = [HasAVX] in {
/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
///
multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
- SDNode OpNode> {
+ SDNode OpNode, OpndItins itins> {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
- FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
+ FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
+ TB, VEX_4V;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
- FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
+ FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
+ TB, OpSize, VEX_4V;
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
- f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
+ f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
+ TB;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
- f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
+ f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
+ TB, OpSize;
}
}
// Alias bitwise logical operations using SSE logical ops on packed FP values.
let mayLoad = 0 in {
- defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
- defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
- defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
+ defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
+ SSE_BIT_ITINS_P>;
+ defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
+ SSE_BIT_ITINS_P>;
+ defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
+ SSE_BIT_ITINS_P>;
}
let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
- defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
+ defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef,
+ SSE_BIT_ITINS_P>;
/// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
///
@@ -2623,7 +2752,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, [],
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
+ (memopv2i64 addr:$src2)))], 0, 1>, TB, VEX_4V;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem,
@@ -2697,118 +2826,145 @@ let isCommutable = 0 in
/// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
/// classes below
multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SizeItins itins,
bit Is2Addr = 1> {
defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
- OpNode, FR32, f32mem, Is2Addr>, XS;
+ OpNode, FR32, f32mem,
+ itins.s, Is2Addr>, XS;
defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
- OpNode, FR64, f64mem, Is2Addr>, XD;
+ OpNode, FR64, f64mem,
+ itins.d, Is2Addr>, XD;
}
multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ SizeItins itins,
bit Is2Addr = 1> {
let mayLoad = 0 in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
- v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
+ v4f32, f128mem, memopv4f32, SSEPackedSingle, itins.s, Is2Addr>,
+ TB;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
- v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
+ v2f64, f128mem, memopv2f64, SSEPackedDouble, itins.d, Is2Addr>,
+ TB, OpSize;
}
}
multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
- SDNode OpNode> {
+ SDNode OpNode,
+ SizeItins itins> {
let mayLoad = 0 in {
defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
- v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
+ v8f32, f256mem, memopv8f32, SSEPackedSingle, itins.s, 0>,
+ TB;
defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
- v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
+ v4f64, f256mem, memopv4f64, SSEPackedDouble, itins.d, 0>,
+ TB, OpSize;
}
}
multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
+ SizeItins itins,
bit Is2Addr = 1> {
defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
- !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
+ !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
+ itins.s, Is2Addr>, XS;
defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
- !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
+ !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
+ itins.d, Is2Addr>, XD;
}
multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
+ SizeItins itins,
bit Is2Addr = 1> {
defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
!strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
- SSEPackedSingle, Is2Addr>, TB;
+ SSEPackedSingle, itins.s, Is2Addr>,
+ TB;
defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
!strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
- SSEPackedDouble, Is2Addr>, TB, OpSize;
+ SSEPackedDouble, itins.d, Is2Addr>,
+ TB, OpSize;
}
-multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
+multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr,
+ SizeItins itins> {
defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
!strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
- SSEPackedSingle, 0>, TB;
+ SSEPackedSingle, itins.s, 0>, TB;
defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
!strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
- SSEPackedDouble, 0>, TB, OpSize;
+ SSEPackedDouble, itins.d, 0>, TB, OpSize;
}
// Binary Arithmetic instructions
-defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
- basic_sse12_fp_binop_s_int<0x58, "add", 0>, VEX_4V, VEX_LIG;
-defm VADD : basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
- basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
-defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
- basic_sse12_fp_binop_s_int<0x59, "mul", 0>, VEX_4V, VEX_LIG;
-defm VMUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
- basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
+defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+defm VADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x58, "add", fadd, SSE_ALU_ITINS_P>,
+ VEX_4V;
+defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+defm VMUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
+ VEX_4V;
let isCommutable = 0 in {
- defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
- basic_sse12_fp_binop_s_int<0x5C, "sub", 0>, VEX_4V, VEX_LIG;
- defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
- basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
- defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
- basic_sse12_fp_binop_s_int<0x5E, "div", 0>, VEX_4V, VEX_LIG;
- defm VDIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
- basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
- defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
- basic_sse12_fp_binop_s_int<0x5F, "max", 0>, VEX_4V, VEX_LIG;
- defm VMAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
- basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
- basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
- basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
- defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
- basic_sse12_fp_binop_s_int<0x5D, "min", 0>, VEX_4V, VEX_LIG;
- defm VMIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
- basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
- basic_sse12_fp_binop_p_y_int<0x5D, "min">,
- basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
+ defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>, VEX_4V;
+ defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VDIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
+ VEX_4V;
+ defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VMAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_p_y_int<0x5F, "max", SSE_ALU_ITINS_P>,
+ VEX_4V;
+ defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S, 0>,
+ basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S, 0>,
+ VEX_4V, VEX_LIG;
+ defm VMIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P, 0>,
+ basic_sse12_fp_binop_p_y_int<0x5D, "min", SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
+ VEX_4V;
}
let Constraints = "$src1 = $dst" in {
- defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
- basic_sse12_fp_binop_p<0x58, "add", fadd>,
- basic_sse12_fp_binop_s_int<0x58, "add">;
- defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
- basic_sse12_fp_binop_p<0x59, "mul", fmul>,
- basic_sse12_fp_binop_s_int<0x59, "mul">;
+ defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
+ defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
+ basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
let isCommutable = 0 in {
- defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
- basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
- basic_sse12_fp_binop_s_int<0x5C, "sub">;
- defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
- basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
- basic_sse12_fp_binop_s_int<0x5E, "div">;
- defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
- basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
- basic_sse12_fp_binop_s_int<0x5F, "max">,
- basic_sse12_fp_binop_p_int<0x5F, "max">;
- defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
- basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
- basic_sse12_fp_binop_s_int<0x5D, "min">,
- basic_sse12_fp_binop_p_int<0x5D, "min">;
+ defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
+ defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
+ defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p_int<0x5F, "max", SSE_ALU_ITINS_P>;
+ defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
+ basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>,
+ basic_sse12_fp_binop_p_int<0x5D, "min", SSE_ALU_ITINS_P>;
}
}
@@ -2820,9 +2976,25 @@ let Constraints = "$src1 = $dst" in {
///
/// And, we have a special variant form for a full-vector intrinsic form.
+def SSE_SQRTP : OpndItins<
+ IIC_SSE_SQRTP_RR, IIC_SSE_SQRTP_RM
+>;
+
+def SSE_SQRTS : OpndItins<
+ IIC_SSE_SQRTS_RR, IIC_SSE_SQRTS_RM
+>;
+
+def SSE_RCPP : OpndItins<
+ IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
+>;
+
+def SSE_RCPS : OpndItins<
+ IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
+>;
+
/// sse1_fp_unop_s - SSE1 unops in scalar form.
multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F32Int> {
+ SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
[(set FR32:$dst, (OpNode FR32:$src))]>;
@@ -2832,14 +3004,14 @@ multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
// partial register update condition.
def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
+ [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
Requires<[HasSSE1, OptForSize]>;
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int VR128:$src))]>;
+ [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>;
def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+ [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>;
}
/// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
@@ -2852,80 +3024,91 @@ multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins ssmem:$src1, VR128:$src2),
+ (ins VR128:$src1, ssmem:$src2),
!strconcat(OpcodeStr,
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
}
/// sse1_fp_unop_p - SSE1 unops in packed form.
-multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ OpndItins itins> {
def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
+ [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>;
def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
+ [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>;
}
/// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
-multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ OpndItins itins> {
def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
+ [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
+ itins.rr>;
def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
+ [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))],
+ itins.rm>;
}
/// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
- Intrinsic V4F32Int> {
+ Intrinsic V4F32Int, OpndItins itins> {
def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int VR128:$src))]>;
+ [(set VR128:$dst, (V4F32Int VR128:$src))],
+ itins.rr>;
def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
+ [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
+ itins.rm>;
}
/// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
- Intrinsic V4F32Int> {
+ Intrinsic V4F32Int, OpndItins itins> {
def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (V4F32Int VR256:$src))]>;
+ [(set VR256:$dst, (V4F32Int VR256:$src))],
+ itins.rr>;
def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
+ [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))],
+ itins.rm>;
}
/// sse2_fp_unop_s - SSE2 unops in scalar form.
multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F64Int> {
+ SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode FR64:$src))]>;
+ [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>;
// See the comments in sse1_fp_unop_s for why this is OptForSize.
def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
+ [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
Requires<[HasSSE2, OptForSize]>;
def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int VR128:$src))]>;
+ [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>;
def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+ [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>;
}
/// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
+ let neverHasSideEffects = 1 in {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ let mayLoad = 1 in
def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
!strconcat(OpcodeStr,
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ }
def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, sdmem:$src2),
!strconcat(OpcodeStr,
@@ -2934,45 +3117,52 @@ multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
/// sse2_fp_unop_p - SSE2 unops in vector forms.
multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
- SDNode OpNode> {
+ SDNode OpNode, OpndItins itins> {
def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
+ [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>;
def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
+ [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>;
}
/// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
-multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ OpndItins itins> {
def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
+ [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
+ itins.rr>;
def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
+ [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))],
+ itins.rm>;
}
/// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
- Intrinsic V2F64Int> {
+ Intrinsic V2F64Int, OpndItins itins> {
def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V2F64Int VR128:$src))]>;
+ [(set VR128:$dst, (V2F64Int VR128:$src))],
+ itins.rr>;
def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
+ [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))],
+ itins.rm>;
}
/// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
- Intrinsic V2F64Int> {
+ Intrinsic V2F64Int, OpndItins itins> {
def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (V2F64Int VR256:$src))]>;
+ [(set VR256:$dst, (V2F64Int VR256:$src))],
+ itins.rr>;
def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
+ [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))],
+ itins.rm>;
}
let Predicates = [HasAVX] in {
@@ -2980,31 +3170,40 @@ let Predicates = [HasAVX] in {
defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V, VEX_LIG;
- defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
- sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
- sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
- sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
- sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
- sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
- sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
- sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
+ defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
+ sse2_fp_unop_p<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
+ sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
+ sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt, SSE_SQRTP>,
+ sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps,
+ SSE_SQRTP>,
+ sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd,
+ SSE_SQRTP>,
+ sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256,
+ SSE_SQRTP>,
+ sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256,
+ SSE_SQRTP>,
VEX;
// Reciprocal approximations. Note that these typically require refinement
// in order to obtain suitable precision.
defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V, VEX_LIG;
- defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
- sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
- sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
- sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
+ defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
+ sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt, SSE_SQRTP>,
+ sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256,
+ SSE_SQRTP>,
+ sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps,
+ SSE_SQRTP>, VEX;
defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V, VEX_LIG;
- defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
- sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
- sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
- sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
+ defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp, SSE_RCPP>,
+ sse1_fp_unop_p_y<0x53, "vrcp", X86frcp, SSE_RCPP>,
+ sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256,
+ SSE_RCPP>,
+ sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps,
+ SSE_RCPP>, VEX;
}
+let AddedComplexity = 1 in {
def : Pat<(f32 (fsqrt FR32:$src)),
(VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
def : Pat<(f32 (fsqrt (load addr:$src))),
@@ -3027,8 +3226,9 @@ def : Pat<(f32 (X86frcp FR32:$src)),
def : Pat<(f32 (X86frcp (load addr:$src))),
(VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
Requires<[HasAVX, OptForSize]>;
+}
-let Predicates = [HasAVX] in {
+let Predicates = [HasAVX], AddedComplexity = 1 in {
def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
(VSQRTSSr (f32 (IMPLICIT_DEF)),
@@ -3063,21 +3263,26 @@ let Predicates = [HasAVX] in {
}
// Square root.
-defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
- sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
- sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
- sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
- sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
- sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
+defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
+ SSE_SQRTS>,
+ sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
+ sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps, SSE_SQRTS>,
+ sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
+ SSE_SQRTS>,
+ sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>,
+ sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>;
// Reciprocal approximations. Note that these typically require refinement
// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
- sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
- sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
-defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
- sse1_fp_unop_p<0x53, "rcp", X86frcp>,
- sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
+defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss,
+ SSE_SQRTS>,
+ sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>,
+ sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
+ SSE_SQRTS>;
+defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss,
+ SSE_RCPS>,
+ sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>,
+ sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>;
// There is no f64 version of the reciprocal approximation instructions.
@@ -3090,24 +3295,22 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions
(ins f128mem:$dst, VR128:$src),
"movntps\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4f32 VR128:$src),
- addr:$dst)]>, VEX;
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src),
"movntpd\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v2f64 VR128:$src),
- addr:$dst)]>, VEX;
- def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
- (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v2f64 VR128:$src),
- addr:$dst)]>, VEX;
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
let ExeDomain = SSEPackedInt in
def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src),
"movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src),
- addr:$dst)]>, VEX;
+ [(alignednontemporalstore (v2i64 VR128:$src),
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
(VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
@@ -3116,23 +3319,21 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions
(ins f256mem:$dst, VR256:$src),
"movntps\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v8f32 VR256:$src),
- addr:$dst)]>, VEX;
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntpd\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4f64 VR256:$src),
- addr:$dst)]>, VEX;
- def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
- (ins f256mem:$dst, VR256:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f64 VR256:$src),
- addr:$dst)]>, VEX;
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
let ExeDomain = SSEPackedInt in
def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v8f32 VR256:$src),
- addr:$dst)]>, VEX;
+ [(alignednontemporalstore (v4i64 VR256:$src),
+ addr:$dst)],
+ IIC_SSE_MOVNT>, VEX;
}
def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
@@ -3145,19 +3346,18 @@ def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
let AddedComplexity = 400 in { // Prefer non-temporal versions
def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movntps\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+ [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVNT>;
def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movntpd\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
-
-def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
+ [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVNT>;
let ExeDomain = SSEPackedInt in
def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+ [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
+ IIC_SSE_MOVNT>;
def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
(MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
@@ -3165,11 +3365,13 @@ def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
// There is no AVX form for instructions below this point
def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movnti{l}\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
+ [(nontemporalstore (i32 GR32:$src), addr:$dst)],
+ IIC_SSE_MOVNT>,
TB, Requires<[HasSSE2]>;
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movnti{q}\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
+ [(nontemporalstore (i64 GR64:$src), addr:$dst)],
+ IIC_SSE_MOVNT>,
TB, Requires<[HasSSE2]>;
}
@@ -3178,31 +3380,40 @@ def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
//===----------------------------------------------------------------------===//
// Prefetch intrinsic.
-def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
- "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
-def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
- "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
-def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
- "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
-def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
- "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
+let Predicates = [HasSSE1] in {
+def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
+ "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
+ IIC_SSE_PREFETCH>, TB;
+def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
+ "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
+ IIC_SSE_PREFETCH>, TB;
+def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
+ "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
+ IIC_SSE_PREFETCH>, TB;
+def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
+ "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
+ IIC_SSE_PREFETCH>, TB;
+}
// Flush cache
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
- TB, Requires<[HasSSE2]>;
+ "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
+ IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
// Pause. This "instruction" is encoded as "rep; nop", so even though it
// was introduced with SSE2, it's backward compatible.
-def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
+def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", [], IIC_SSE_PAUSE>, REP;
// Load, store, and memory fence
def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
- "sfence", [(int_x86_sse_sfence)]>, TB, Requires<[HasSSE1]>;
+ "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
+ TB, Requires<[HasSSE1]>;
def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
- "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
+ "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
+ TB, Requires<[HasSSE2]>;
def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
- "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
+ "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
+ TB, Requires<[HasSSE2]>;
def : Pat<(X86SFence), (SFENCE)>;
def : Pat<(X86LFence), (LFENCE)>;
@@ -3213,14 +3424,18 @@ def : Pat<(X86MFence), (MFENCE)>;
//===----------------------------------------------------------------------===//
def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
- "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
+ IIC_SSE_LDMXCSR>, VEX;
def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
- "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
+ IIC_SSE_STMXCSR>, VEX;
def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
- "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
+ IIC_SSE_LDMXCSR>;
def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
- "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
+ IIC_SSE_STMXCSR>;
//===---------------------------------------------------------------------===//
// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
@@ -3230,108 +3445,134 @@ let ExeDomain = SSEPackedInt in { // SSE integer instructions
let neverHasSideEffects = 1 in {
def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
+ VEX;
def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
+ VEX;
}
def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
+ VEX;
def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqu\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
+ VEX;
// For Disassembler
let isCodeGenOnly = 1 in {
def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>,
+ VEX;
def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>,
+ VEX;
def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqu\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>,
+ VEX;
def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqu\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqu\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVU_P_RR>,
+ VEX;
}
let canFoldAsLoad = 1, mayLoad = 1 in {
def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
+ VEX;
def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
+ VEX;
let Predicates = [HasAVX] in {
def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
+ XS, VEX;
def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
+ XS, VEX;
}
}
let mayStore = 1 in {
def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
+ VEX;
def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
+ VEX;
let Predicates = [HasAVX] in {
def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
+ XS, VEX;
def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
+ XS, VEX;
}
}
let neverHasSideEffects = 1 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>;
+ "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- []>, XS, Requires<[HasSSE2]>;
+ [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
// For Disassembler
let isCodeGenOnly = 1 in {
def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>;
+ "movdqa\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVA_P_RR>;
def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- []>, XS, Requires<[HasSSE2]>;
+ [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>;
}
let canFoldAsLoad = 1, mayLoad = 1 in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
- [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
+ [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
+ IIC_SSE_MOVA_P_RM>;
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
+ [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
+ IIC_SSE_MOVU_P_RM>,
XS, Requires<[HasSSE2]>;
}
let mayStore = 1 in {
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
- [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
+ [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
+ IIC_SSE_MOVA_P_MR>;
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
+ [/*(store (v2i64 VR128:$src), addr:$dst)*/],
+ IIC_SSE_MOVU_P_MR>,
XS, Requires<[HasSSE2]>;
}
// Intrinsic forms of MOVDQU load and store
def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
+ [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
+ IIC_SSE_MOVU_P_MR>,
XS, VEX, Requires<[HasAVX]>;
def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
+ [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)],
+ IIC_SSE_MOVU_P_MR>,
XS, Requires<[HasSSE2]>;
} // ExeDomain = SSEPackedInt
let Predicates = [HasAVX] in {
- def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
(VMOVDQUYmr addr:$dst, VR256:$src)>;
}
@@ -3340,178 +3581,326 @@ let Predicates = [HasAVX] in {
// SSE2 - Packed Integer Arithmetic Instructions
//===---------------------------------------------------------------------===//
+def SSE_PMADD : OpndItins<
+ IIC_SSE_PMADD, IIC_SSE_PMADD
+>;
+
let ExeDomain = SSEPackedInt in { // SSE integer instructions
multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- bit IsCommutable = 0, bit Is2Addr = 1> {
+ RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop,
+ OpndItins itins,
+ bit IsCommutable = 0,
+ bit Is2Addr = 1> {
let isCommutable = IsCommutable in
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+ def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>;
+ def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>;
-}
-
-multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
- string OpcodeStr, Intrinsic IntId,
- Intrinsic IntId2, bit Is2Addr = 1> {
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+ [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
+ itins.rm>;
+}
+
+multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
+ string OpcodeStr, SDNode OpNode,
+ SDNode OpNode2, RegisterClass RC,
+ ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
+ ShiftOpndItins itins,
+ bit Is2Addr = 1> {
+ // src2 is always 128-bit
+ def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, VR128:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !if(Is2Addr,
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2))))]>;
- def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- !if(Is2Addr,
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
-}
-
-/// PDI_binop_rm - Simple SSE2 binary operator.
-multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
- let isCommutable = IsCommutable in
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+ [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
+ itins.rr>;
+ def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, i128mem:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ [(set RC:$dst, (DstVT (OpNode RC:$src1,
+ (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>;
+ def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
+ (ins RC:$src1, i32i8imm:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
- (bitconvert (memopv2i64 addr:$src2)))))]>;
+ [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i32 imm:$src2))))], itins.ri>;
}
-/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
-///
-/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
-/// to collapse (bitconvert VT to VT) into its operand.
-///
-multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit IsCommutable = 0, bit Is2Addr = 1> {
+/// PDI_binop_rm - Simple SSE2 binary operator with different src and dst types
+multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType DstVT, ValueType SrcVT, RegisterClass RC,
+ PatFrag memop_frag, X86MemOperand x86memop,
+ OpndItins itins,
+ bit IsCommutable = 0, bit Is2Addr = 1> {
let isCommutable = IsCommutable in
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+ def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>;
+ def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
+ [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
+ (bitconvert (memop_frag addr:$src2)))))]>;
}
-
} // ExeDomain = SSEPackedInt
// 128-bit Integer Arithmetic
let Predicates = [HasAVX] in {
-defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
-defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
-defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
-defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
-defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
-defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
-defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
-defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
-defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
+defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1, 0 /*3addr*/>,
+ VEX_4V;
+defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDQ : PDI_binop_rm<0xD4, "vpaddq", add, v2i64, VR128, memopv2i64,
+ i128mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBQ : PDI_binop_rm<0xFB, "vpsubq", sub, v2i64, VR128, memopv2i64,
+ i128mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
+defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
+ memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
+ VEX_4V;
+
+// Intrinsic forms
+defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd,
+ VR128, memopv2i64, i128mem,
+ SSE_PMADD, 1, 0>, VEX_4V;
+defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+defm VPADDBY : PDI_binop_rm<0xFC, "vpaddb", add, v32i8, VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDWY : PDI_binop_rm<0xFD, "vpaddw", add, v16i16, VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDDY : PDI_binop_rm<0xFE, "vpaddd", add, v8i32, VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDQY : PDI_binop_rm<0xD4, "vpaddq", add, v4i64, VR256, memopv4i64,
+ i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULLWY : PDI_binop_rm<0xD5, "vpmullw", mul, v16i16, VR256, memopv4i64,
+ i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPSUBBY : PDI_binop_rm<0xF8, "vpsubb", sub, v32i8, VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBWY : PDI_binop_rm<0xF9, "vpsubw", sub, v16i16,VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBDY : PDI_binop_rm<0xFA, "vpsubd", sub, v8i32, VR256, memopv4i64,
+ i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBQY : PDI_binop_rm<0xFB, "vpsubq", sub, v4i64, VR256, memopv4i64,
+ i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V;
+defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
+ VR256, memopv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
// Intrinsic forms
-defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
- VEX_4V;
-defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
- VEX_4V;
-defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
- VEX_4V;
-defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
- VEX_4V;
-defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
- VEX_4V;
-defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
- VEX_4V;
-defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
- VEX_4V;
-defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
- VEX_4V;
-defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
- VEX_4V;
-defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
- VEX_4V;
-defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
- VEX_4V;
-defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
- VEX_4V;
-defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
- VEX_4V;
-defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
- VEX_4V;
-defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
- VEX_4V;
-defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
- VEX_4V;
-defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
- VEX_4V;
-defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
- VEX_4V;
-defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
- VEX_4V;
+defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBUSBY : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_avx2_psubus_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPSUBUSWY : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_avx2_psubus_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
+defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd,
+ VR256, memopv4i64, i256mem,
+ SSE_PMADD, 1, 0>, VEX_4V;
+defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMINUBY : PDI_binop_rm_int<0xDA, "vpminub", int_x86_avx2_pminu_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMINSWY : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_avx2_pmins_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMAXUBY : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_avx2_pmaxu_b,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPMAXSWY : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_avx2_pmaxs_w,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
-defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
-defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
-defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
-defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
-defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
-defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
-defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
-defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
-defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
+defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1>;
+defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1>;
+defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P, 1>;
+defm PADDQ : PDI_binop_rm<0xD4, "paddq", add, v2i64, VR128, memopv2i64,
+ i128mem, SSE_INTALUQ_ITINS_P, 1>;
+defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTMUL_ITINS_P, 1>;
+defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P>;
+defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P>;
+defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32, VR128, memopv2i64,
+ i128mem, SSE_INTALU_ITINS_P>;
+defm PSUBQ : PDI_binop_rm<0xFB, "psubq", sub, v2i64, VR128, memopv2i64,
+ i128mem, SSE_INTALUQ_ITINS_P>;
+defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
+ memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
// Intrinsic forms
-defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
-defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
-defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
-defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
-defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
-defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
-defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
-defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
-defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
-defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
-defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
-defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
-defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
-defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
-defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
-defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
-defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
-defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
-defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
+defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1>;
+defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTMUL_ITINS_P, 1>;
+defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
+ VR128, memopv2i64, i128mem,
+ SSE_PMADD, 1>;
+defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
} // Constraints = "$src1 = $dst"
@@ -3520,145 +3909,176 @@ defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
//===---------------------------------------------------------------------===//
let Predicates = [HasAVX] in {
-defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
- int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
- VEX_4V;
-defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
- int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
- VEX_4V;
-defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
- int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
- VEX_4V;
-
-defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
- int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
- VEX_4V;
-defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
- int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
- VEX_4V;
-defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
- int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
- VEX_4V;
-
-defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
- int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
- VEX_4V;
-defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
- int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
- VEX_4V;
-
-defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
-defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
-defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
+defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
+ VR128, v2i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+
+defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
+ VR128, v2i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+
+defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
let ExeDomain = SSEPackedInt in {
- let neverHasSideEffects = 1 in {
- // 128-bit logical shifts.
- def VPSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- VEX_4V;
- def VPSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- VEX_4V;
- // PSRADQri doesn't exist in SSE[1-3].
- }
- def VPANDNrr : PDI<0xDF, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ // 128-bit logical shifts.
+ def VPSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
+ VEX_4V;
+ def VPSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
+ (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
+ VEX_4V;
+ // PSRADQri doesn't exist in SSE[1-3].
+}
+} // Predicates = [HasAVX]
+
+let Predicates = [HasAVX2] in {
+defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+ VR256, v16i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
+ VR256, v8i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
+ VR256, v4i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+
+defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+ VR256, v16i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
+ VR256, v8i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
+ VR256, v4i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+
+defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+ VR256, v16i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
+ VR256, v8i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
- def VPANDNrm : PDI<0xDF, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (X86andnp VR128:$src1,
- (memopv2i64 addr:$src2)))]>, VEX_4V;
-}
-}
+let ExeDomain = SSEPackedInt in {
+ // 256-bit logical shifts.
+ def VPSLLDQYri : PDIi8<0x73, MRM7r,
+ (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
+ "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR256:$dst,
+ (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
+ VEX_4V;
+ def VPSRLDQYri : PDIi8<0x73, MRM3r,
+ (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
+ "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR256:$dst,
+ (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
+ VEX_4V;
+ // PSRADQYri doesn't exist in SSE[1-3].
+}
+} // Predicates = [HasAVX2]
let Constraints = "$src1 = $dst" in {
-defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
- int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
-defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
- int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
-defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
- int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
-
-defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
- int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
-defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
- int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
-defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
- int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
-
-defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
- int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
-defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
- int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
-
-defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
-defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
-defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
+defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P>;
+defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P>;
+defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
+ VR128, v2i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P>;
+
+defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P>;
+defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P>;
+defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
+ VR128, v2i64, v2i64, bc_v2i64,
+ SSE_INTSHIFT_ITINS_P>;
+
+defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
+ VR128, v8i16, v8i16, bc_v8i16,
+ SSE_INTSHIFT_ITINS_P>;
+defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
+ VR128, v4i32, v4i32, bc_v4i32,
+ SSE_INTSHIFT_ITINS_P>;
let ExeDomain = SSEPackedInt in {
- let neverHasSideEffects = 1 in {
- // 128-bit logical shifts.
- def PSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "pslldq\t{$src2, $dst|$dst, $src2}", []>;
- def PSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "psrldq\t{$src2, $dst|$dst, $src2}", []>;
- // PSRADQri doesn't exist in SSE[1-3].
- }
- def PANDNrr : PDI<0xDF, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}", []>;
-
- def PANDNrm : PDI<0xDF, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}", []>;
+ // 128-bit logical shifts.
+ def PSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "pslldq\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>;
+ def PSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "psrldq\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>;
+ // PSRADQri doesn't exist in SSE[1-3].
}
} // Constraints = "$src1 = $dst"
let Predicates = [HasAVX] in {
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
- (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
- (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
- def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
- (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
- def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
- (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
+ (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
- (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
// Shift up / down and insert zero's.
- def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
- (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
- def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
- (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
+ def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
+ (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
+ def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
+ (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
+}
+
+let Predicates = [HasAVX2] in {
+ def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
+ (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
+ def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
+ (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
}
let Predicates = [HasSSE2] in {
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
- (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
- (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
- def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
- (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
- def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
- (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
+ (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
- (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
// Shift up / down and insert zero's.
- def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
- (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
- def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
- (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
+ def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
+ (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
+ def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
+ (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
}
//===---------------------------------------------------------------------===//
@@ -3666,100 +4086,106 @@ let Predicates = [HasSSE2] in {
//===---------------------------------------------------------------------===//
let Predicates = [HasAVX] in {
- defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
- 0>, VEX_4V;
- defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
- 0>, VEX_4V;
- defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
- 0>, VEX_4V;
- defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
- 0>, VEX_4V;
- defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
- 0>, VEX_4V;
- defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
- 0>, VEX_4V;
-
- def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
- (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
- (VPCMPEQBrm VR128:$src1, addr:$src2)>;
- def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
- (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
- (VPCMPEQWrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
- (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
- (VPCMPEQDrm VR128:$src1, addr:$src2)>;
-
- def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
- (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
- (VPCMPGTBrm VR128:$src1, addr:$src2)>;
- def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
- (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
- (VPCMPGTWrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
- (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
- (VPCMPGTDrm VR128:$src1, addr:$src2)>;
+ defm VPCMPEQB : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v16i8,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPEQW : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v8i16,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPEQD : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v4i32,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPGTB : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v16i8,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ defm VPCMPGTW : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v8i16,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ defm VPCMPGTD : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v4i32,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+ defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 1, 0>, VEX_4V;
+ defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+ defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
- defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
- defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
- defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
- defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
- defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
- defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
+ defm PCMPEQB : PDI_binop_rm<0x74, "pcmpeqb", X86pcmpeq, v16i8,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+ defm PCMPEQW : PDI_binop_rm<0x75, "pcmpeqw", X86pcmpeq, v8i16,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+ defm PCMPEQD : PDI_binop_rm<0x76, "pcmpeqd", X86pcmpeq, v4i32,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 1>;
+ defm PCMPGTB : PDI_binop_rm<0x64, "pcmpgtb", X86pcmpgt, v16i8,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+ defm PCMPGTW : PDI_binop_rm<0x65, "pcmpgtw", X86pcmpgt, v8i16,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+ defm PCMPGTD : PDI_binop_rm<0x66, "pcmpgtd", X86pcmpgt, v4i32,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
} // Constraints = "$src1 = $dst"
-let Predicates = [HasSSE2] in {
- def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
- (PCMPEQBrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
- (PCMPEQBrm VR128:$src1, addr:$src2)>;
- def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
- (PCMPEQWrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
- (PCMPEQWrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
- (PCMPEQDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
- (PCMPEQDrm VR128:$src1, addr:$src2)>;
-
- def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
- (PCMPGTBrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
- (PCMPGTBrm VR128:$src1, addr:$src2)>;
- def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
- (PCMPGTWrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
- (PCMPGTWrm VR128:$src1, addr:$src2)>;
- def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
- (PCMPGTDrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
- (PCMPGTDrm VR128:$src1, addr:$src2)>;
-}
-
//===---------------------------------------------------------------------===//
// SSE2 - Packed Integer Pack Instructions
//===---------------------------------------------------------------------===//
let Predicates = [HasAVX] in {
defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
- 0, 0>, VEX_4V;
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
- 0, 0>, VEX_4V;
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
- 0, 0>, VEX_4V;
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
+defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb,
+ VR256, memopv4i64, i256mem,
+ SSE_INTALU_ITINS_P, 0, 0>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
-defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
-defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
-defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
+defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
+defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128,
+ VR128, memopv2i64, i128mem,
+ SSE_INTALU_ITINS_P>;
} // Constraints = "$src1 = $dst"
//===---------------------------------------------------------------------===//
@@ -3767,103 +4193,75 @@ defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
//===---------------------------------------------------------------------===//
let ExeDomain = SSEPackedInt in {
-multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
- PatFrag bc_frag> {
+multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, SDNode OpNode> {
def ri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
+ (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (vt (OpNode VR128:$src1, (i8 imm:$src2))))],
+ IIC_SSE_PSHUF>;
+def mi : Ii8<0x70, MRMSrcMem,
+ (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (vt (OpNode (bitconvert (memopv2i64 addr:$src1)),
+ (i8 imm:$src2))))],
+ IIC_SSE_PSHUF>;
+}
+
+multiclass sse2_pshuffle_y<string OpcodeStr, ValueType vt, SDNode OpNode> {
+def Yri : Ii8<0x70, MRMSrcReg,
+ (outs VR256:$dst), (ins VR256:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
- (undef))))]>;
-def mi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
+ [(set VR256:$dst, (vt (OpNode VR256:$src1, (i8 imm:$src2))))]>;
+def Ymi : Ii8<0x70, MRMSrcMem,
+ (outs VR256:$dst), (ins i256mem:$src1, i8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (vt (pshuf_frag:$src2
- (bc_frag (memopv2i64 addr:$src1)),
- (undef))))]>;
+ [(set VR256:$dst,
+ (vt (OpNode (bitconvert (memopv4i64 addr:$src1)),
+ (i8 imm:$src2))))]>;
}
} // ExeDomain = SSEPackedInt
let Predicates = [HasAVX] in {
- let AddedComplexity = 5 in
- defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize,
- VEX;
-
- // SSE2 with ImmT == Imm8 and XS prefix.
- defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
- VEX;
-
- // SSE2 with ImmT == Imm8 and XD prefix.
- defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
- VEX;
-
- let AddedComplexity = 5 in
- def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
- (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
- // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
- def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
- (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
-
- def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
- (i8 imm:$imm))),
- (VPSHUFDmi addr:$src1, imm:$imm)>;
- def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
- (i8 imm:$imm))),
- (VPSHUFDmi addr:$src1, imm:$imm)>;
- def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
- (VPSHUFDri VR128:$src1, imm:$imm)>;
- def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
- (VPSHUFDri VR128:$src1, imm:$imm)>;
- def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
- (VPSHUFHWri VR128:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
- (i8 imm:$imm))),
- (VPSHUFHWmi addr:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
- (VPSHUFLWri VR128:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
- (i8 imm:$imm))),
- (VPSHUFLWmi addr:$src, imm:$imm)>;
+ let AddedComplexity = 5 in
+ defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, X86PShufd>, TB, OpSize, VEX;
+
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, X86PShufhw>, XS, VEX;
+
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, X86PShuflw>, XD, VEX;
+
+ def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
+ (VPSHUFDmi addr:$src1, imm:$imm)>;
+ def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (VPSHUFDri VR128:$src1, imm:$imm)>;
+}
+
+let Predicates = [HasAVX2] in {
+ defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>, TB, OpSize, VEX;
+ defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>, XS, VEX;
+ defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>, XD, VEX;
}
let Predicates = [HasSSE2] in {
- let AddedComplexity = 5 in
- defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
-
- // SSE2 with ImmT == Imm8 and XS prefix.
- defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
-
- // SSE2 with ImmT == Imm8 and XD prefix.
- defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
-
- let AddedComplexity = 5 in
- def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
- (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
- // Unary v4f32 shuffle with PSHUF* in order to fold a load.
- def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
- (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
-
- def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
- (i8 imm:$imm))),
- (PSHUFDmi addr:$src1, imm:$imm)>;
- def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
- (i8 imm:$imm))),
- (PSHUFDmi addr:$src1, imm:$imm)>;
- def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
- (PSHUFDri VR128:$src1, imm:$imm)>;
- def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
- (PSHUFDri VR128:$src1, imm:$imm)>;
- def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
- (PSHUFHWri VR128:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
- (i8 imm:$imm))),
- (PSHUFHWmi addr:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
- (PSHUFLWri VR128:$src, imm:$imm)>;
- def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
- (i8 imm:$imm))),
- (PSHUFLWmi addr:$src, imm:$imm)>;
+ let AddedComplexity = 5 in
+ defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize;
+
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, X86PShufhw>, XS;
+
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, X86PShuflw>, XD;
+
+ def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
+ (PSHUFDmi addr:$src1, imm:$imm)>;
+ def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (PSHUFDri VR128:$src1, imm:$imm)>;
}
//===---------------------------------------------------------------------===//
@@ -3878,7 +4276,8 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
!if(Is2Addr,
!strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
+ [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
+ IIC_SSE_UNPCK>;
def rm : PDI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
!if(Is2Addr,
@@ -3886,96 +4285,104 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
!strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (OpNode VR128:$src1,
(bc_frag (memopv2i64
- addr:$src2))))]>;
+ addr:$src2))))],
+ IIC_SSE_UNPCK>;
+}
+
+multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
+ SDNode OpNode, PatFrag bc_frag> {
+ def Yrr : PDI<opc, MRMSrcReg,
+ (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>;
+ def Yrm : PDI<opc, MRMSrcMem,
+ (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (OpNode VR256:$src1,
+ (bc_frag (memopv4i64 addr:$src2))))]>;
}
let Predicates = [HasAVX] in {
- defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
+ defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
bc_v16i8, 0>, VEX_4V;
- defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
+ defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
bc_v8i16, 0>, VEX_4V;
- defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
+ defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
bc_v4i32, 0>, VEX_4V;
+ defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
+ bc_v2i64, 0>, VEX_4V;
- /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
- /// knew to collapse (bitconvert VT to VT) into its operand.
- def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
- VR128:$src2)))]>, VEX_4V;
- def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
- (memopv2i64 addr:$src2))))]>, VEX_4V;
-
- defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
+ defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
bc_v16i8, 0>, VEX_4V;
- defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
+ defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
bc_v8i16, 0>, VEX_4V;
- defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
+ defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
bc_v4i32, 0>, VEX_4V;
-
- /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
- /// knew to collapse (bitconvert VT to VT) into its operand.
- def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
- VR128:$src2)))]>, VEX_4V;
- def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
- (memopv2i64 addr:$src2))))]>, VEX_4V;
+ defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
+ bc_v2i64, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+ defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
+ bc_v32i8>, VEX_4V;
+ defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
+ bc_v16i16>, VEX_4V;
+ defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
+ bc_v8i32>, VEX_4V;
+ defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
+ bc_v4i64>, VEX_4V;
+
+ defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
+ bc_v32i8>, VEX_4V;
+ defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
+ bc_v16i16>, VEX_4V;
+ defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
+ bc_v8i32>, VEX_4V;
+ defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
+ bc_v4i64>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
- defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
- defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
- defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
-
- /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
- /// knew to collapse (bitconvert VT to VT) into its operand.
- def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpcklqdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpcklqdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (X86Punpcklqdq VR128:$src1,
- (memopv2i64 addr:$src2))))]>;
-
- defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
- defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
- defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
-
- /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
- /// knew to collapse (bitconvert VT to VT) into its operand.
- def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhqdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhqdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (X86Punpckhqdq VR128:$src1,
- (memopv2i64 addr:$src2))))]>;
+ defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
+ bc_v16i8>;
+ defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
+ bc_v8i16>;
+ defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
+ bc_v4i32>;
+ defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
+ bc_v2i64>;
+
+ defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
+ bc_v16i8>;
+ defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
+ bc_v8i16>;
+ defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
+ bc_v4i32>;
+ defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
+ bc_v2i64>;
}
} // ExeDomain = SSEPackedInt
-// Splat v2f64 / v2i64
-let AddedComplexity = 10 in {
- def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
- (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
- def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
- (VPUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasAVX]>;
+// Patterns for using AVX1 instructions with integer vectors
+// Here to give AVX2 priority
+let Predicates = [HasAVX] in {
+ def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
+ (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))),
+ (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
+ (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
+
+ def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))),
+ (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
+ (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
+ def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))),
+ (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
+ def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
+ (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
}
//===---------------------------------------------------------------------===//
@@ -3991,7 +4398,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
+ (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))], IIC_SSE_PINSRW>;
def rmi : Ii8<0xC4, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1,
i16mem:$src2, i32i8imm:$src3),
@@ -4000,7 +4407,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1> {
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
- imm:$src3))]>;
+ imm:$src3))], IIC_SSE_PINSRW>;
}
// Extract
@@ -4014,7 +4421,7 @@ def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))]>;
+ imm:$src2))], IIC_SSE_PEXTRW>;
// Insert
let Predicates = [HasAVX] in {
@@ -4038,12 +4445,23 @@ let ExeDomain = SSEPackedInt in {
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ IIC_SSE_MOVMSK>, VEX;
def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK>, VEX;
+
+let Predicates = [HasAVX2] in {
+def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX;
+def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
+ IIC_SSE_MOVMSK>;
} // ExeDomain = SSEPackedInt
@@ -4057,21 +4475,25 @@ let Uses = [EDI] in
def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
+ IIC_SSE_MASKMOV>, VEX;
let Uses = [RDI] in
def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
+ IIC_SSE_MASKMOV>, VEX;
let Uses = [EDI] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
+ IIC_SSE_MASKMOV>;
let Uses = [RDI] in
def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
+ IIC_SSE_MASKMOV>;
} // ExeDomain = SSEPackedInt
@@ -4085,54 +4507,65 @@ def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
+ (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
+ VEX;
def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
+ IIC_SSE_MOVDQ>,
VEX;
def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
+ (v2i64 (scalar_to_vector GR64:$src)))],
+ IIC_SSE_MOVDQ>, VEX;
def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
+ [(set FR64:$dst, (bitconvert GR64:$src))],
+ IIC_SSE_MOVDQ>, VEX;
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector GR32:$src)))]>;
+ (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>;
def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
+ IIC_SSE_MOVDQ>;
def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))]>;
+ (v2i64 (scalar_to_vector GR64:$src)))],
+ IIC_SSE_MOVDQ>;
def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert GR64:$src))]>;
+ [(set FR64:$dst, (bitconvert GR64:$src))],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Move Int Doubleword to Single Scalar
//
def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
+ [(set FR32:$dst, (bitconvert GR32:$src))],
+ IIC_SSE_MOVDQ>, VEX;
def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
+ IIC_SSE_MOVDQ>,
VEX;
def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))]>;
+ [(set FR32:$dst, (bitconvert GR32:$src))],
+ IIC_SSE_MOVDQ>;
def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Move Packed Doubleword Int to Packed Double Int
@@ -4140,20 +4573,22 @@ def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
- (iPTR 0)))]>, VEX;
+ (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX;
def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
- (iPTR 0))), addr:$dst)]>, VEX;
+ (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
+ VEX;
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
- (iPTR 0)))]>;
+ (iPTR 0)))], IIC_SSE_MOVD_ToGP>;
def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (vector_extract (v4i32 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Move Packed Doubleword Int first element to Doubleword Int
@@ -4161,13 +4596,15 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
- (iPTR 0)))]>,
+ (iPTR 0)))],
+ IIC_SSE_MOVD_ToGP>,
TB, OpSize, VEX, VEX_W, Requires<[HasAVX, In64BitMode]>;
def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
- (iPTR 0)))]>;
+ (iPTR 0)))],
+ IIC_SSE_MOVD_ToGP>;
//===---------------------------------------------------------------------===//
// Bitcast FR64 <-> GR64
@@ -4179,36 +4616,45 @@ def VMOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
VEX;
def VMOVSDto64rr : VRPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))]>;
+ [(set GR64:$dst, (bitconvert FR64:$src))],
+ IIC_SSE_MOVDQ>, VEX;
def VMOVSDto64mr : VRPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, VEX;
def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
+ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
+ IIC_SSE_MOVDQ>;
def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))]>;
+ [(set GR64:$dst, (bitconvert FR64:$src))],
+ IIC_SSE_MOVD_ToGP>;
def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Move Scalar Single to Double Int
//
def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
+ [(set GR32:$dst, (bitconvert FR32:$src))],
+ IIC_SSE_MOVD_ToGP>, VEX;
def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>, VEX;
def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))]>;
+ [(set GR32:$dst, (bitconvert FR32:$src))],
+ IIC_SSE_MOVD_ToGP>;
def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Patterns and instructions to describe movd/movq to XMM register zero-extends
@@ -4217,23 +4663,26 @@ let AddedComplexity = 15 in {
def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector GR32:$src)))))]>,
- VEX;
+ (v4i32 (scalar_to_vector GR32:$src)))))],
+ IIC_SSE_MOVDQ>, VEX;
def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
- (v2i64 (scalar_to_vector GR64:$src)))))]>,
+ (v2i64 (scalar_to_vector GR64:$src)))))],
+ IIC_SSE_MOVDQ>,
VEX, VEX_W;
}
let AddedComplexity = 15 in {
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector GR32:$src)))))]>;
+ (v4i32 (scalar_to_vector GR32:$src)))))],
+ IIC_SSE_MOVDQ>;
def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
- (v2i64 (scalar_to_vector GR64:$src)))))]>;
+ (v2i64 (scalar_to_vector GR64:$src)))))],
+ IIC_SSE_MOVDQ>;
}
let AddedComplexity = 20 in {
@@ -4241,29 +4690,19 @@ def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86vzmovl (v4i32 (scalar_to_vector
- (loadi32 addr:$src))))))]>,
- VEX;
+ (loadi32 addr:$src))))))],
+ IIC_SSE_MOVDQ>, VEX;
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86vzmovl (v4i32 (scalar_to_vector
- (loadi32 addr:$src))))))]>;
-}
-
-let Predicates = [HasSSE2], AddedComplexity = 20 in {
- def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
- (MOVZDI2PDIrm addr:$src)>;
- def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
- (MOVZDI2PDIrm addr:$src)>;
- def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
- (MOVZDI2PDIrm addr:$src)>;
+ (loadi32 addr:$src))))))],
+ IIC_SSE_MOVDQ>;
}
let Predicates = [HasAVX] in {
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
let AddedComplexity = 20 in {
- def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
- (VMOVZDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(VMOVZDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
@@ -4278,6 +4717,13 @@ let Predicates = [HasAVX] in {
(SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
}
+let Predicates = [HasSSE2], AddedComplexity = 20 in {
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
+ (MOVZDI2PDIrm addr:$src)>;
+ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
+ (MOVZDI2PDIrm addr:$src)>;
+}
+
// These are the correct encodings of the instructions so that we know how to
// read correct assembly, even though we continue to emit the wrong ones for
// compatibility with Darwin's buggy assembler.
@@ -4309,7 +4755,8 @@ def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
+ IIC_SSE_MOVDQ>, XS,
Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
//===---------------------------------------------------------------------===//
@@ -4318,11 +4765,13 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)]>, VEX;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOVDQ>, VEX;
def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
+ (iPTR 0))), addr:$dst)],
+ IIC_SSE_MOVDQ>;
//===---------------------------------------------------------------------===//
// Store / copy lower 64-bits of a XMM register.
@@ -4332,14 +4781,16 @@ def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
[(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
+ [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)],
+ IIC_SSE_MOVDQ>;
let AddedComplexity = 20 in
def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
- (loadi64 addr:$src))))))]>,
+ (loadi64 addr:$src))))))],
+ IIC_SSE_MOVDQ>,
XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 20 in
@@ -4347,9 +4798,19 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
- (loadi64 addr:$src))))))]>,
+ (loadi64 addr:$src))))))],
+ IIC_SSE_MOVDQ>,
XS, Requires<[HasSSE2]>;
+let Predicates = [HasAVX], AddedComplexity = 20 in {
+ def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
+ (VMOVZQI2PQIrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
+ (VMOVZQI2PQIrm addr:$src)>;
+ def : Pat<(v2i64 (X86vzload addr:$src)),
+ (VMOVZQI2PQIrm addr:$src)>;
+}
+
let Predicates = [HasSSE2], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(MOVZQI2PQIrm addr:$src)>;
@@ -4358,13 +4819,11 @@ let Predicates = [HasSSE2], AddedComplexity = 20 in {
def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
}
-let Predicates = [HasAVX], AddedComplexity = 20 in {
- def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
- (VMOVZQI2PQIrm addr:$src)>;
- def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
- (VMOVZQI2PQIrm addr:$src)>;
- def : Pat<(v2i64 (X86vzload addr:$src)),
- (VMOVZQI2PQIrm addr:$src)>;
+let Predicates = [HasAVX] in {
+def : Pat<(v4i64 (alignedX86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
+def : Pat<(v4i64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
}
//===---------------------------------------------------------------------===//
@@ -4374,51 +4833,58 @@ let Predicates = [HasAVX], AddedComplexity = 20 in {
let AddedComplexity = 15 in
def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
+ IIC_SSE_MOVQ_RR>,
XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 15 in
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
+ IIC_SSE_MOVQ_RR>,
XS, Requires<[HasSSE2]>;
let AddedComplexity = 20 in
def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl
- (loadv2i64 addr:$src))))]>,
+ (loadv2i64 addr:$src))))],
+ IIC_SSE_MOVDQ>,
XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 20 in {
def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl
- (loadv2i64 addr:$src))))]>,
+ (loadv2i64 addr:$src))))],
+ IIC_SSE_MOVDQ>,
XS, Requires<[HasSSE2]>;
}
let AddedComplexity = 20 in {
- let Predicates = [HasSSE2] in {
- def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
- (MOVZPQILo2PQIrm addr:$src)>;
- def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
- (MOVZPQILo2PQIrr VR128:$src)>;
- }
let Predicates = [HasAVX] in {
- def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
+ def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(VMOVZPQILo2PQIrm addr:$src)>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(VMOVZPQILo2PQIrr VR128:$src)>;
}
+ let Predicates = [HasSSE2] in {
+ def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
+ (MOVZPQILo2PQIrm addr:$src)>;
+ def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
+ (MOVZPQILo2PQIrr VR128:$src)>;
+ }
}
// Instructions to match in the assembler
def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+ "movq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVDQ>, VEX, VEX_W;
def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+ "movq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVDQ>, VEX, VEX_W;
// Recognize "movd" with GR64 destination, but encode as a "movq"
def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+ "movd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_MOVDQ>, VEX, VEX_W;
// Instructions for the disassembler
// xr = XMM register
@@ -4428,7 +4894,7 @@ let Predicates = [HasAVX] in
def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", []>, XS;
+ "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, XS;
//===---------------------------------------------------------------------===//
// SSE3 - Conversion Instructions
@@ -4458,14 +4924,16 @@ def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
}
def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>;
def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>;
def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
- (VCVTPD2DQYrr VR256:$src)>;
+ (VCVTTPD2DQYrr VR256:$src)>;
def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
- (VCVTPD2DQYrm addr:$src)>;
+ (VCVTTPD2DQYrm addr:$src)>;
// Convert Packed DW Integers to Packed Double FP
let Predicates = [HasAVX] in {
@@ -4480,14 +4948,16 @@ def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
}
def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RR>;
def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
+ IIC_SSE_CVT_PD_RM>;
// AVX 256-bit register conversion intrinsics
def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
(VCVTDQ2PDYrr VR128:$src)>;
-def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
+def : Pat<(int_x86_avx_cvtdq2_pd_256 (bitconvert (memopv2i64 addr:$src))),
(VCVTDQ2PDYrm addr:$src)>;
def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
@@ -4497,7 +4967,7 @@ def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PDYrr VR128:$src)>;
-def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
+def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
(VCVTDQ2PDYrm addr:$src)>;
//===---------------------------------------------------------------------===//
@@ -4508,10 +4978,12 @@ multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
X86MemOperand x86memop> {
def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (vt (OpNode RC:$src)))]>;
+ [(set RC:$dst, (vt (OpNode RC:$src)))],
+ IIC_SSE_MOV_LH>;
def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
+ [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
+ IIC_SSE_MOV_LH>;
}
let Predicates = [HasAVX] in {
@@ -4529,17 +5001,6 @@ defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
memopv4f32, f128mem>;
-let Predicates = [HasSSE3] in {
- def : Pat<(v4i32 (X86Movshdup VR128:$src)),
- (MOVSHDUPrr VR128:$src)>;
- def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
- (MOVSHDUPrm addr:$src)>;
- def : Pat<(v4i32 (X86Movsldup VR128:$src)),
- (MOVSLDUPrr VR128:$src)>;
- def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
- (MOVSLDUPrm addr:$src)>;
-}
-
let Predicates = [HasAVX] in {
def : Pat<(v4i32 (X86Movshdup VR128:$src)),
(VMOVSHDUPrr VR128:$src)>;
@@ -4559,82 +5020,60 @@ let Predicates = [HasAVX] in {
(VMOVSLDUPYrm addr:$src)>;
}
+let Predicates = [HasSSE3] in {
+ def : Pat<(v4i32 (X86Movshdup VR128:$src)),
+ (MOVSHDUPrr VR128:$src)>;
+ def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
+ (MOVSHDUPrm addr:$src)>;
+ def : Pat<(v4i32 (X86Movsldup VR128:$src)),
+ (MOVSLDUPrr VR128:$src)>;
+ def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
+ (MOVSLDUPrm addr:$src)>;
+}
+
//===---------------------------------------------------------------------===//
// SSE3 - Replicate Double FP - MOVDDUP
//===---------------------------------------------------------------------===//
multiclass sse3_replicate_dfp<string OpcodeStr> {
+let neverHasSideEffects = 1 in
def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
+ [], IIC_SSE_MOV_LH>;
def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
- (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
- (undef))))]>;
+ (v2f64 (X86Movddup
+ (scalar_to_vector (loadf64 addr:$src)))))],
+ IIC_SSE_MOV_LH>;
}
// FIXME: Merge with above classe when there're patterns for the ymm version
multiclass sse3_replicate_dfp_y<string OpcodeStr> {
+def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>;
+def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst,
+ (v4f64 (X86Movddup
+ (scalar_to_vector (loadf64 addr:$src)))))]>;
+}
+
let Predicates = [HasAVX] in {
- def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- []>;
- def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- []>;
- }
+ defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
+ defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
}
defm MOVDDUP : sse3_replicate_dfp<"movddup">;
-defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
-defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
-
-let Predicates = [HasSSE3] in {
- def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
- (undef)),
- (MOVDDUPrm addr:$src)>;
- let AddedComplexity = 5 in {
- def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
- def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
- (MOVDDUPrm addr:$src)>;
- def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
- def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
- (MOVDDUPrm addr:$src)>;
- }
- def : Pat<(X86Movddup (memopv2f64 addr:$src)),
- (MOVDDUPrm addr:$src)>;
- def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
- (MOVDDUPrm addr:$src)>;
- def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
- (MOVDDUPrm addr:$src)>;
- def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
- (MOVDDUPrm addr:$src)>;
- def : Pat<(X86Movddup (bc_v2f64
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
- (MOVDDUPrm addr:$src)>;
-}
let Predicates = [HasAVX] in {
- def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
- (undef)),
- (VMOVDDUPrm addr:$src)>;
- let AddedComplexity = 5 in {
- def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
- def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
- (VMOVDDUPrm addr:$src)>;
- def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
- def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
- (VMOVDDUPrm addr:$src)>;
- }
def : Pat<(X86Movddup (memopv2f64 addr:$src)),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
- def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
- (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
def : Pat<(X86Movddup (bc_v2f64
(v2i64 (scalar_to_vector (loadi64 addr:$src))))),
(VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
@@ -4644,16 +5083,24 @@ let Predicates = [HasAVX] in {
(VMOVDDUPYrm addr:$src)>;
def : Pat<(X86Movddup (memopv4i64 addr:$src)),
(VMOVDDUPYrm addr:$src)>;
- def : Pat<(X86Movddup (v4f64 (scalar_to_vector (loadf64 addr:$src)))),
- (VMOVDDUPYrm addr:$src)>;
def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
(VMOVDDUPYrm addr:$src)>;
- def : Pat<(X86Movddup (v4f64 VR256:$src)),
- (VMOVDDUPYrr VR256:$src)>;
def : Pat<(X86Movddup (v4i64 VR256:$src)),
(VMOVDDUPYrr VR256:$src)>;
}
+let Predicates = [HasSSE3] in {
+ def : Pat<(X86Movddup (memopv2f64 addr:$src)),
+ (MOVDDUPrm addr:$src)>;
+ def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
+ (MOVDDUPrm addr:$src)>;
+ def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
+ (MOVDDUPrm addr:$src)>;
+ def : Pat<(X86Movddup (bc_v2f64
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
+ (MOVDDUPrm addr:$src)>;
+}
+
//===---------------------------------------------------------------------===//
// SSE3 - Move Unaligned Integer
//===---------------------------------------------------------------------===//
@@ -4668,45 +5115,51 @@ let Predicates = [HasAVX] in {
}
def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"lddqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
+ [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
+ IIC_SSE_LDDQU>;
//===---------------------------------------------------------------------===//
// SSE3 - Arithmetic
//===---------------------------------------------------------------------===//
multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
- X86MemOperand x86memop, bit Is2Addr = 1> {
+ X86MemOperand x86memop, OpndItins itins,
+ bit Is2Addr = 1> {
def rr : I<0xD0, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
+ [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>;
def rm : I<0xD0, MRMSrcMem,
(outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
+ [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>;
}
-let Predicates = [HasAVX],
- ExeDomain = SSEPackedDouble in {
- defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
- f128mem, 0>, TB, XD, VEX_4V;
- defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
- f128mem, 0>, TB, OpSize, VEX_4V;
- defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
- f256mem, 0>, TB, XD, VEX_4V;
- defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
- f256mem, 0>, TB, OpSize, VEX_4V;
-}
-let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
- ExeDomain = SSEPackedDouble in {
+let Predicates = [HasAVX] in {
+ let ExeDomain = SSEPackedSingle in {
+ defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
+ f128mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
+ defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
+ f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V;
+ }
+ let ExeDomain = SSEPackedDouble in {
+ defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
+ f128mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
+ defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
+ f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V;
+ }
+}
+let Constraints = "$src1 = $dst", Predicates = [HasSSE3] in {
+ let ExeDomain = SSEPackedSingle in
defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
- f128mem>, TB, XD;
+ f128mem, SSE_ALU_F32P>, TB, XD;
+ let ExeDomain = SSEPackedDouble in
defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
- f128mem>, TB, OpSize;
+ f128mem, SSE_ALU_F64P>, TB, OpSize;
}
//===---------------------------------------------------------------------===//
@@ -4720,13 +5173,14 @@ multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>;
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))]>;
+ [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
+ IIC_SSE_HADDSUB_RM>;
}
multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
@@ -4734,39 +5188,48 @@ multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))]>;
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>;
def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))]>;
+ [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
+ IIC_SSE_HADDSUB_RM>;
}
let Predicates = [HasAVX] in {
- defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
- X86fhadd, 0>, VEX_4V;
- defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
- X86fhadd, 0>, VEX_4V;
- defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
- X86fhsub, 0>, VEX_4V;
- defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
- X86fhsub, 0>, VEX_4V;
- defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
- X86fhadd, 0>, VEX_4V;
- defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
- X86fhadd, 0>, VEX_4V;
- defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
- X86fhsub, 0>, VEX_4V;
- defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
- X86fhsub, 0>, VEX_4V;
+ let ExeDomain = SSEPackedSingle in {
+ defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
+ X86fhadd, 0>, VEX_4V;
+ defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
+ X86fhsub, 0>, VEX_4V;
+ defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
+ X86fhadd, 0>, VEX_4V;
+ defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
+ X86fhsub, 0>, VEX_4V;
+ }
+ let ExeDomain = SSEPackedDouble in {
+ defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
+ X86fhadd, 0>, VEX_4V;
+ defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
+ X86fhsub, 0>, VEX_4V;
+ defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
+ X86fhadd, 0>, VEX_4V;
+ defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
+ X86fhsub, 0>, VEX_4V;
+ }
}
let Constraints = "$src1 = $dst" in {
- defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
- defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
- defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
- defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
+ let ExeDomain = SSEPackedSingle in {
+ defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
+ defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
+ }
+ let ExeDomain = SSEPackedDouble in {
+ defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
+ defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
+ }
}
//===---------------------------------------------------------------------===//
@@ -4776,11 +5239,11 @@ let Constraints = "$src1 = $dst" in {
/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
- PatFrag mem_frag128, Intrinsic IntId128> {
+ Intrinsic IntId128> {
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>,
+ [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
OpSize;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
@@ -4788,32 +5251,101 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId128
- (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
+ (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
+ OpSize;
+}
+
+/// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
+multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId256> {
+ def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId256 VR256:$src))]>,
+ OpSize;
+
+ def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins i256mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst,
+ (IntId256
+ (bitconvert (memopv4i64 addr:$src))))]>, OpSize;
}
let Predicates = [HasAVX] in {
- defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
+ defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
int_x86_ssse3_pabs_b_128>, VEX;
- defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
+ defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
int_x86_ssse3_pabs_w_128>, VEX;
- defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
+ defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
int_x86_ssse3_pabs_d_128>, VEX;
}
-defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
+let Predicates = [HasAVX2] in {
+ defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
+ int_x86_avx2_pabs_b>, VEX;
+ defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
+ int_x86_avx2_pabs_w>, VEX;
+ defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
+ int_x86_avx2_pabs_d>, VEX;
+}
+
+defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
int_x86_ssse3_pabs_b_128>;
-defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
+defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
int_x86_ssse3_pabs_w_128>;
-defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
+defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
int_x86_ssse3_pabs_d_128>;
//===---------------------------------------------------------------------===//
// SSSE3 - Packed Binary Operator Instructions
//===---------------------------------------------------------------------===//
+def SSE_PHADDSUBD : OpndItins<
+ IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
+>;
+def SSE_PHADDSUBSW : OpndItins<
+ IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
+>;
+def SSE_PHADDSUBW : OpndItins<
+ IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
+>;
+def SSE_PSHUFB : OpndItins<
+ IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
+>;
+def SSE_PSIGN : OpndItins<
+ IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
+>;
+def SSE_PMULHRSW : OpndItins<
+ IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
+>;
+
+/// SS3I_binop_rm - Simple SSSE3 bin op
+multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop, OpndItins itins,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
+ OpSize;
+ def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1,
+ (bitconvert (memop_frag addr:$src2)))))], itins.rm>, OpSize;
+}
+
/// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
- PatFrag mem_frag128, Intrinsic IntId128,
+ Intrinsic IntId128, OpndItins itins,
bit Is2Addr = 1> {
let isCommutable = 1 in
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
@@ -4830,94 +5362,134 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+ (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
+}
+
+multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId256> {
+ let isCommutable = 1 in
+ def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
+ OpSize;
+ def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (IntId256 VR256:$src1,
+ (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
}
let ImmT = NoImm, Predicates = [HasAVX] in {
let isCommutable = 0 in {
- defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
- int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
- defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
- int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
- defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
- int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
- defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
- int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
- defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
- int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
- defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
- int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
- defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
- int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
- defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
- int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
- defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
- int_x86_ssse3_psign_b_128, 0>, VEX_4V;
- defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
- int_x86_ssse3_psign_w_128, 0>, VEX_4V;
- defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
- int_x86_ssse3_psign_d_128, 0>, VEX_4V;
-}
-defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
- int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
+ defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
+ memopv2i64, i128mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
+ memopv2i64, i128mem,
+ SSE_PHADDSUBD, 0>, VEX_4V;
+ defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
+ memopv2i64, i128mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
+ memopv2i64, i128mem,
+ SSE_PHADDSUBD, 0>, VEX_4V;
+ defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
+ memopv2i64, i128mem,
+ SSE_PSIGN, 0>, VEX_4V;
+ defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
+ memopv2i64, i128mem,
+ SSE_PSIGN, 0>, VEX_4V;
+ defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
+ memopv2i64, i128mem,
+ SSE_PSIGN, 0>, VEX_4V;
+ defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
+ memopv2i64, i128mem,
+ SSE_PSHUFB, 0>, VEX_4V;
+ defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
+ int_x86_ssse3_phadd_sw_128,
+ SSE_PHADDSUBSW, 0>, VEX_4V;
+ defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
+ int_x86_ssse3_phsub_sw_128,
+ SSE_PHADDSUBSW, 0>, VEX_4V;
+ defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
+ int_x86_ssse3_pmadd_ub_sw_128,
+ SSE_PMADD, 0>, VEX_4V;
+}
+defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
+ int_x86_ssse3_pmul_hr_sw_128,
+ SSE_PMULHRSW, 0>, VEX_4V;
+}
+
+let ImmT = NoImm, Predicates = [HasAVX2] in {
+let isCommutable = 0 in {
+ defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
+ memopv4i64, i256mem,
+ SSE_PHADDSUBW, 0>, VEX_4V;
+ defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
+ int_x86_avx2_phadd_sw>, VEX_4V;
+ defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
+ int_x86_avx2_phsub_sw>, VEX_4V;
+ defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
+ int_x86_avx2_pmadd_ub_sw>, VEX_4V;
+}
+defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
+ int_x86_avx2_pmul_hr_sw>, VEX_4V;
}
// None of these have i8 immediate fields.
let ImmT = NoImm, Constraints = "$src1 = $dst" in {
let isCommutable = 0 in {
- defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
- int_x86_ssse3_phadd_w_128>;
- defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
- int_x86_ssse3_phadd_d_128>;
- defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
- int_x86_ssse3_phadd_sw_128>;
- defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
- int_x86_ssse3_phsub_w_128>;
- defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
- int_x86_ssse3_phsub_d_128>;
- defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
- int_x86_ssse3_phsub_sw_128>;
- defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
- int_x86_ssse3_pmadd_ub_sw_128>;
- defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
- int_x86_ssse3_pshuf_b_128>;
- defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
- int_x86_ssse3_psign_b_128>;
- defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
- int_x86_ssse3_psign_w_128>;
- defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
- int_x86_ssse3_psign_d_128>;
-}
-defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
- int_x86_ssse3_pmul_hr_sw_128>;
-}
-
-let Predicates = [HasSSSE3] in {
- def : Pat<(X86pshufb VR128:$src, VR128:$mask),
- (PSHUFBrr128 VR128:$src, VR128:$mask)>;
- def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
- (PSHUFBrm128 VR128:$src, addr:$mask)>;
-
- def : Pat<(X86psignb VR128:$src1, VR128:$src2),
- (PSIGNBrr128 VR128:$src1, VR128:$src2)>;
- def : Pat<(X86psignw VR128:$src1, VR128:$src2),
- (PSIGNWrr128 VR128:$src1, VR128:$src2)>;
- def : Pat<(X86psignd VR128:$src1, VR128:$src2),
- (PSIGNDrr128 VR128:$src1, VR128:$src2)>;
-}
-
-let Predicates = [HasAVX] in {
- def : Pat<(X86pshufb VR128:$src, VR128:$mask),
- (VPSHUFBrr128 VR128:$src, VR128:$mask)>;
- def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
- (VPSHUFBrm128 VR128:$src, addr:$mask)>;
-
- def : Pat<(X86psignb VR128:$src1, VR128:$src2),
- (VPSIGNBrr128 VR128:$src1, VR128:$src2)>;
- def : Pat<(X86psignw VR128:$src1, VR128:$src2),
- (VPSIGNWrr128 VR128:$src1, VR128:$src2)>;
- def : Pat<(X86psignd VR128:$src1, VR128:$src2),
- (VPSIGNDrr128 VR128:$src1, VR128:$src2)>;
+ defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
+ memopv2i64, i128mem, SSE_PHADDSUBW>;
+ defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
+ memopv2i64, i128mem, SSE_PHADDSUBD>;
+ defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
+ memopv2i64, i128mem, SSE_PHADDSUBW>;
+ defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
+ memopv2i64, i128mem, SSE_PHADDSUBD>;
+ defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
+ memopv2i64, i128mem, SSE_PSIGN>;
+ defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
+ memopv2i64, i128mem, SSE_PSIGN>;
+ defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
+ memopv2i64, i128mem, SSE_PSIGN>;
+ defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
+ memopv2i64, i128mem, SSE_PSHUFB>;
+ defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
+ int_x86_ssse3_phadd_sw_128,
+ SSE_PHADDSUBSW>;
+ defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
+ int_x86_ssse3_phsub_sw_128,
+ SSE_PHADDSUBSW>;
+ defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
+ int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
+}
+defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
+ int_x86_ssse3_pmul_hr_sw_128,
+ SSE_PMULHRSW>;
}
//===---------------------------------------------------------------------===//
@@ -4925,36 +5497,57 @@ let Predicates = [HasAVX] in {
//===---------------------------------------------------------------------===//
multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
+ let neverHasSideEffects = 1 in {
def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
- []>, OpSize;
+ [], IIC_SSE_PALIGNR>, OpSize;
+ let mayLoad = 1 in
def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
!if(Is2Addr,
!strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
!strconcat(asm,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [], IIC_SSE_PALIGNR>, OpSize;
+ }
+}
+
+multiclass ssse3_palign_y<string asm, bit Is2Addr = 1> {
+ let neverHasSideEffects = 1 in {
+ def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+ let mayLoad = 1 in
+ def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[]>, OpSize;
+ }
}
let Predicates = [HasAVX] in
defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
+let Predicates = [HasAVX2] in
+ defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V;
let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
defm PALIGN : ssse3_palign<"palignr">;
-let Predicates = [HasSSSE3] in {
-def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
-def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
-def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
-def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
- (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+let Predicates = [HasAVX2] in {
+def : Pat<(v8i32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
+def : Pat<(v8f32 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
+def : Pat<(v16i16 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
+def : Pat<(v32i8 (X86PAlign VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
}
let Predicates = [HasAVX] in {
@@ -4968,23 +5561,36 @@ def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
(VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
}
+let Predicates = [HasSSSE3] in {
+def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+}
+
//===---------------------------------------------------------------------===//
// SSSE3 - Thread synchronization
//===---------------------------------------------------------------------===//
let usesCustomInserter = 1 in {
def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
- [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
+ [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
+ Requires<[HasSSE3]>;
def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
- [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
+ [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>,
+ Requires<[HasSSE3]>;
}
let Uses = [EAX, ECX, EDX] in
-def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
- Requires<[HasSSE3]>;
+def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
+ TB, Requires<[HasSSE3]>;
let Uses = [ECX, EAX] in
-def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
- Requires<[HasSSE3]>;
+def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", [], IIC_SSE_MWAIT>,
+ TB, Requires<[HasSSE3]>;
def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
@@ -5010,6 +5616,17 @@ multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+multiclass SS41I_binop_rm_int16_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId> {
+ def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+
+ def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId (load addr:$src)))]>, OpSize;
+}
+
let Predicates = [HasAVX] in {
defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
VEX;
@@ -5025,6 +5642,21 @@ defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
VEX;
}
+let Predicates = [HasAVX2] in {
+defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw",
+ int_x86_avx2_pmovsxbw>, VEX;
+defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd",
+ int_x86_avx2_pmovsxwd>, VEX;
+defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq",
+ int_x86_avx2_pmovsxdq>, VEX;
+defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw",
+ int_x86_avx2_pmovzxbw>, VEX;
+defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd",
+ int_x86_avx2_pmovzxwd>, VEX;
+defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq",
+ int_x86_avx2_pmovzxdq>, VEX;
+}
+
defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
@@ -5032,70 +5664,80 @@ defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
-let Predicates = [HasSSE41] in {
+let Predicates = [HasAVX] in {
// Common patterns involving scalar load.
def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
- (PMOVSXBWrm addr:$src)>;
+ (VPMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
- (PMOVSXBWrm addr:$src)>;
+ (VPMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
- (PMOVSXWDrm addr:$src)>;
+ (VPMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
- (PMOVSXWDrm addr:$src)>;
+ (VPMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
- (PMOVSXDQrm addr:$src)>;
+ (VPMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
- (PMOVSXDQrm addr:$src)>;
+ (VPMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
- (PMOVZXBWrm addr:$src)>;
+ (VPMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
- (PMOVZXBWrm addr:$src)>;
+ (VPMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
- (PMOVZXWDrm addr:$src)>;
+ (VPMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
- (PMOVZXWDrm addr:$src)>;
+ (VPMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
- (PMOVZXDQrm addr:$src)>;
+ (VPMOVZXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
- (PMOVZXDQrm addr:$src)>;
+ (VPMOVZXDQrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasSSE41] in {
// Common patterns involving scalar load.
def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
- (VPMOVSXBWrm addr:$src)>;
+ (PMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
- (VPMOVSXBWrm addr:$src)>;
+ (PMOVSXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
- (VPMOVSXWDrm addr:$src)>;
+ (PMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
- (VPMOVSXWDrm addr:$src)>;
+ (PMOVSXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
- (VPMOVSXDQrm addr:$src)>;
+ (PMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
- (VPMOVSXDQrm addr:$src)>;
+ (PMOVSXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
- (VPMOVZXBWrm addr:$src)>;
+ (PMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
- (VPMOVZXBWrm addr:$src)>;
+ (PMOVZXBWrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
- (VPMOVZXWDrm addr:$src)>;
+ (PMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
- (VPMOVZXWDrm addr:$src)>;
+ (PMOVZXWDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
- (VPMOVZXDQrm addr:$src)>;
+ (PMOVZXDQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
- (VPMOVZXDQrm addr:$src)>;
+ (PMOVZXDQrm addr:$src)>;
+}
+
+let Predicates = [HasAVX] in {
+def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQrr VR128:$src)>;
+def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>;
+}
+
+let Predicates = [HasSSE41] in {
+def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>;
+def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>;
}
@@ -5111,6 +5753,19 @@ multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+multiclass SS41I_binop_rm_int8_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId> {
+ def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+
+ def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i32mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst,
+ (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
+ OpSize;
+}
+
let Predicates = [HasAVX] in {
defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
VEX;
@@ -5122,35 +5777,46 @@ defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
VEX;
}
+let Predicates = [HasAVX2] in {
+defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd",
+ int_x86_avx2_pmovsxbd>, VEX;
+defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq",
+ int_x86_avx2_pmovsxwq>, VEX;
+defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd",
+ int_x86_avx2_pmovzxbd>, VEX;
+defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq",
+ int_x86_avx2_pmovzxwq>, VEX;
+}
+
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
-let Predicates = [HasSSE41] in {
+let Predicates = [HasAVX] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
- (PMOVSXBDrm addr:$src)>;
+ (VPMOVSXBDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
- (PMOVSXWQrm addr:$src)>;
+ (VPMOVSXWQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
- (PMOVZXBDrm addr:$src)>;
+ (VPMOVZXBDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
- (PMOVZXWQrm addr:$src)>;
+ (VPMOVZXWQrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasSSE41] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
- (VPMOVSXBDrm addr:$src)>;
+ (PMOVSXBDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
- (VPMOVSXWQrm addr:$src)>;
+ (PMOVSXWQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
- (VPMOVZXBDrm addr:$src)>;
+ (PMOVZXBDrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
- (VPMOVZXWQrm addr:$src)>;
+ (PMOVZXWQrm addr:$src)>;
}
multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
@@ -5166,39 +5832,59 @@ multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+multiclass SS41I_binop_rm_int4_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId> {
+ def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId VR128:$src))]>, OpSize;
+
+ // Expecting a i16 load any extended to i32 value.
+ def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst), (ins i16mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (IntId (bitconvert
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
+ OpSize;
+}
+
let Predicates = [HasAVX] in {
defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
VEX;
defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
VEX;
}
+let Predicates = [HasAVX2] in {
+defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq",
+ int_x86_avx2_pmovsxbq>, VEX;
+defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq",
+ int_x86_avx2_pmovzxbq>, VEX;
+}
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
-let Predicates = [HasSSE41] in {
+let Predicates = [HasAVX] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbq
(bitconvert (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVSXBQrm addr:$src)>;
+ (VPMOVSXBQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbq
(bitconvert (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (PMOVZXBQrm addr:$src)>;
+ (VPMOVZXBQrm addr:$src)>;
}
-let Predicates = [HasAVX] in {
+let Predicates = [HasSSE41] in {
// Common patterns involving scalar load
def : Pat<(int_x86_sse41_pmovsxbq
(bitconvert (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVSXBQrm addr:$src)>;
+ (PMOVSXBQrm addr:$src)>;
def : Pat<(int_x86_sse41_pmovzxbq
(bitconvert (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
- (VPMOVZXBQrm addr:$src)>;
+ (PMOVZXBQrm addr:$src)>;
}
//===----------------------------------------------------------------------===//
@@ -5213,6 +5899,7 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
OpSize;
+ let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
@@ -5235,6 +5922,7 @@ defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
+ let neverHasSideEffects = 1, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
(ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
@@ -5311,26 +5999,28 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
addr:$dst)]>, OpSize;
}
-let Predicates = [HasAVX] in {
- defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
- def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, OpSize, VEX;
+let ExeDomain = SSEPackedSingle in {
+ let Predicates = [HasAVX] in {
+ defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
+ def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, OpSize, VEX;
+ }
+ defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
}
-defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
// Also match an EXTRACTPS store when the store is done as f32 instead of i32.
def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
imm:$src2))),
addr:$dst),
- (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
- Requires<[HasSSE41]>;
+ (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
+ Requires<[HasAVX]>;
def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
imm:$src2))),
addr:$dst),
- (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
- Requires<[HasAVX]>;
+ (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
+ Requires<[HasSSE41]>;
//===----------------------------------------------------------------------===//
// SSE4.1 - Insert Instructions
@@ -5439,17 +6129,12 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
imm:$src3))]>, OpSize;
}
-let Constraints = "$src1 = $dst" in
- defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
-let Predicates = [HasAVX] in
- defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
-
-def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
- (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
- Requires<[HasAVX]>;
-def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
- (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
- Requires<[HasSSE41]>;
+let ExeDomain = SSEPackedSingle in {
+ let Predicates = [HasAVX] in
+ defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
+ let Constraints = "$src1 = $dst" in
+ defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+}
//===----------------------------------------------------------------------===//
// SSE4.1 - Round Instructions
@@ -5459,6 +6144,7 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
X86MemOperand x86memop, RegisterClass RC,
PatFrag mem_frag32, PatFrag mem_frag64,
Intrinsic V4F32Int, Intrinsic V2F64Int> {
+let ExeDomain = SSEPackedSingle in {
// Intrinsic operation, reg.
// Vector intrinsic operation, reg
def PSr : SS4AIi8<opcps, MRMSrcReg,
@@ -5469,15 +6155,16 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
OpSize;
// Vector intrinsic operation, mem
- def PSm : Ii8<opcps, MRMSrcMem,
+ def PSm : SS4AIi8<opcps, MRMSrcMem,
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
!strconcat(OpcodeStr,
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst,
(V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
- TA, OpSize,
- Requires<[HasSSE41]>;
+ OpSize;
+} // ExeDomain = SSEPackedSingle
+let ExeDomain = SSEPackedDouble in {
// Vector intrinsic operation, reg
def PDr : SS4AIi8<opcpd, MRMSrcReg,
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
@@ -5494,46 +6181,26 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
[(set RC:$dst,
(V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
OpSize;
-}
-
-multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
- RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
- // Intrinsic operation, reg.
- // Vector intrinsic operation, reg
- def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
-
- // Vector intrinsic operation, mem
- def PSm_AVX : Ii8<opcps, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, TA, OpSize, Requires<[HasSSE41]>;
-
- // Vector intrinsic operation, reg
- def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
- (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
-
- // Vector intrinsic operation, mem
- def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
- (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []>, OpSize;
+} // ExeDomain = SSEPackedDouble
}
multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
string OpcodeStr,
Intrinsic F32Int,
Intrinsic F64Int, bit Is2Addr = 1> {
- // Intrinsic operation, reg.
+let ExeDomain = GenericDomain in {
+ // Operation, reg.
def SSr : SS4AIi8<opcss, MRMSrcReg,
+ (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
+
+ // Intrinsic operation, reg.
+ def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -5555,8 +6222,18 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
(F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
OpSize;
- // Intrinsic operation, reg.
+ // Operation, reg.
def SDr : SS4AIi8<opcsd, MRMSrcReg,
+ (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
+
+ // Intrinsic operation, reg.
+ def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
!if(Is2Addr,
!strconcat(OpcodeStr,
@@ -5577,37 +6254,7 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
[(set VR128:$dst,
(F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
OpSize;
-}
-
-multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
- string OpcodeStr> {
- // Intrinsic operation, reg.
- def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize;
-
- // Intrinsic operation, mem.
- def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize;
-
- // Intrinsic operation, reg.
- def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize;
-
- // Intrinsic operation, mem.
- def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, OpSize;
+} // ExeDomain = GenericDomain
}
// FP round - roundss, roundps, roundsd, roundpd
@@ -5625,12 +6272,26 @@ let Predicates = [HasAVX] in {
int_x86_sse41_round_ss,
int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
- // Instructions for the assembler
- defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
- VEX;
- defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
- VEX;
- defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V, VEX_LIG;
+ def : Pat<(ffloor FR32:$src),
+ (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
+ def : Pat<(f64 (ffloor FR64:$src)),
+ (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
+ def : Pat<(f32 (fnearbyint FR32:$src)),
+ (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
+ def : Pat<(f64 (fnearbyint FR64:$src)),
+ (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
+ def : Pat<(f32 (fceil FR32:$src)),
+ (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
+ def : Pat<(f64 (fceil FR64:$src)),
+ (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
+ def : Pat<(f32 (frint FR32:$src)),
+ (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
+ def : Pat<(f64 (frint FR64:$src)),
+ (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
+ def : Pat<(f32 (ftrunc FR32:$src)),
+ (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
+ def : Pat<(f64 (ftrunc FR64:$src)),
+ (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
}
defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
@@ -5640,6 +6301,27 @@ let Constraints = "$src1 = $dst" in
defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
+def : Pat<(ffloor FR32:$src),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
+def : Pat<(f64 (ffloor FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
+def : Pat<(f32 (fnearbyint FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
+def : Pat<(f64 (fnearbyint FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
+def : Pat<(f32 (fceil FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
+def : Pat<(f64 (fceil FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
+def : Pat<(f32 (frint FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
+def : Pat<(f64 (frint FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
+def : Pat<(f32 (ftrunc FR32:$src)),
+ (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
+def : Pat<(f64 (ftrunc FR64:$src)),
+ (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
+
//===----------------------------------------------------------------------===//
// SSE4.1 - Packed Bit Test
//===----------------------------------------------------------------------===//
@@ -5649,11 +6331,11 @@ defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
let Defs = [EFLAGS], Predicates = [HasAVX] in {
def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
+ [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
OpSize, VEX;
def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
+ [(set EFLAGS,(X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
OpSize, VEX;
def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
@@ -5668,12 +6350,12 @@ def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
let Defs = [EFLAGS] in {
def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ptest \t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
+ "ptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
OpSize;
def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "ptest \t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
+ "ptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
OpSize;
}
@@ -5690,11 +6372,15 @@ multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
}
let Defs = [EFLAGS], Predicates = [HasAVX] in {
+let ExeDomain = SSEPackedSingle in {
defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
+}
+let ExeDomain = SSEPackedDouble in {
defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
}
+}
//===----------------------------------------------------------------------===//
// SSE4.1 - Misc Instructions
@@ -5743,7 +6429,7 @@ multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId128
- (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
+ (bitconvert (memopv2i64 addr:$src))))]>, OpSize;
}
let Predicates = [HasAVX] in
@@ -5769,15 +6455,29 @@ multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
(IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+ (bitconvert (memopv2i64 addr:$src2))))]>, OpSize;
+}
+
+/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
+multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId256> {
+ let isCommutable = 1 in
+ def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>, OpSize;
+ def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (IntId256 VR256:$src1,
+ (bitconvert (memopv4i64 addr:$src2))))]>, OpSize;
}
let Predicates = [HasAVX] in {
let isCommutable = 0 in
defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
0>, VEX_4V;
- defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
- 0>, VEX_4V;
defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
0>, VEX_4V;
defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
@@ -5796,17 +6496,35 @@ let Predicates = [HasAVX] in {
0>, VEX_4V;
defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
0>, VEX_4V;
+}
- def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
- (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
- (VPCMPEQQrm VR128:$src1, addr:$src2)>;
+let Predicates = [HasAVX2] in {
+ let isCommutable = 0 in
+ defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw",
+ int_x86_avx2_packusdw>, VEX_4V;
+ defm VPMINSB : SS41I_binop_rm_int_y<0x38, "vpminsb",
+ int_x86_avx2_pmins_b>, VEX_4V;
+ defm VPMINSD : SS41I_binop_rm_int_y<0x39, "vpminsd",
+ int_x86_avx2_pmins_d>, VEX_4V;
+ defm VPMINUD : SS41I_binop_rm_int_y<0x3B, "vpminud",
+ int_x86_avx2_pminu_d>, VEX_4V;
+ defm VPMINUW : SS41I_binop_rm_int_y<0x3A, "vpminuw",
+ int_x86_avx2_pminu_w>, VEX_4V;
+ defm VPMAXSB : SS41I_binop_rm_int_y<0x3C, "vpmaxsb",
+ int_x86_avx2_pmaxs_b>, VEX_4V;
+ defm VPMAXSD : SS41I_binop_rm_int_y<0x3D, "vpmaxsd",
+ int_x86_avx2_pmaxs_d>, VEX_4V;
+ defm VPMAXUD : SS41I_binop_rm_int_y<0x3F, "vpmaxud",
+ int_x86_avx2_pmaxu_d>, VEX_4V;
+ defm VPMAXUW : SS41I_binop_rm_int_y<0x3E, "vpmaxuw",
+ int_x86_avx2_pmaxu_w>, VEX_4V;
+ defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq",
+ int_x86_avx2_pmul_dq>, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in
defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
- defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
@@ -5818,36 +6536,46 @@ let Constraints = "$src1 = $dst" in {
defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
}
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
- (PCMPEQQrr VR128:$src1, VR128:$src2)>;
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
- (PCMPEQQrm VR128:$src1, addr:$src2)>;
-
/// SS48I_binop_rm - Simple SSE41 binary operator.
multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit Is2Addr = 1> {
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop, bit Is2Addr = 1> {
let isCommutable = 1 in
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+ def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
- OpSize;
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>, OpSize;
+ def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>,
- OpSize;
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1,
+ (bitconvert (memop_frag addr:$src2)))))]>, OpSize;
}
-let Predicates = [HasAVX] in
- defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
-let Constraints = "$src1 = $dst" in
- defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
+let Predicates = [HasAVX] in {
+ defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
+ memopv2i64, i128mem, 0>, VEX_4V;
+ defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
+ memopv2i64, i128mem, 0>, VEX_4V;
+}
+let Predicates = [HasAVX2] in {
+ defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
+ memopv4i64, i256mem, 0>, VEX_4V;
+ defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
+ memopv4i64, i256mem, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
+ memopv2i64, i128mem>;
+ defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
+ memopv2i64, i128mem>;
+}
/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
@@ -5878,77 +6606,106 @@ multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX] in {
let isCommutable = 0 in {
- defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
- defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
- defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
- int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
- defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
- int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
+ let ExeDomain = SSEPackedSingle in {
+ defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
+ VR128, memopv4f32, i128mem, 0>, VEX_4V;
+ defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
+ int_x86_avx_blend_ps_256, VR256, memopv8f32, i256mem, 0>, VEX_4V;
+ }
+ let ExeDomain = SSEPackedDouble in {
+ defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
+ VR128, memopv2f64, i128mem, 0>, VEX_4V;
+ defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
+ int_x86_avx_blend_pd_256, VR256, memopv4f64, i256mem, 0>, VEX_4V;
+ }
defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ VR128, memopv2i64, i128mem, 0>, VEX_4V;
defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ VR128, memopv2i64, i128mem, 0>, VEX_4V;
}
+ let ExeDomain = SSEPackedSingle in
defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ VR128, memopv4f32, i128mem, 0>, VEX_4V;
+ let ExeDomain = SSEPackedDouble in
defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
- VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ VR128, memopv2f64, i128mem, 0>, VEX_4V;
+ let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
- VR256, memopv32i8, i256mem, 0>, VEX_4V;
+ VR256, memopv8f32, i256mem, 0>, VEX_4V;
+}
+
+let Predicates = [HasAVX2] in {
+ let isCommutable = 0 in {
+ defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
+ VR256, memopv4i64, i256mem, 0>, VEX_4V;
+ defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
+ VR256, memopv4i64, i256mem, 0>, VEX_4V;
+ }
}
let Constraints = "$src1 = $dst" in {
let isCommutable = 0 in {
+ let ExeDomain = SSEPackedSingle in
defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv4f32, i128mem>;
+ let ExeDomain = SSEPackedDouble in
defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv2f64, i128mem>;
defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv2i64, i128mem>;
defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv2i64, i128mem>;
}
+ let ExeDomain = SSEPackedSingle in
defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv4f32, i128mem>;
+ let ExeDomain = SSEPackedDouble in
defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
- VR128, memopv16i8, i128mem>;
+ VR128, memopv2f64, i128mem>;
}
/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
-let Predicates = [HasAVX] in {
multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
RegisterClass RC, X86MemOperand x86memop,
PatFrag mem_frag, Intrinsic IntId> {
- def rr : I<opc, MRMSrcReg, (outs RC:$dst),
+ def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
- SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+ IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
- def rm : I<opc, MRMSrcMem, (outs RC:$dst),
+ def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst,
(IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
RC:$src3))],
- SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
-}
+ IIC_DEFAULT, SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
}
+let Predicates = [HasAVX] in {
+let ExeDomain = SSEPackedDouble in {
defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
- memopv16i8, int_x86_sse41_blendvpd>;
-defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
- memopv16i8, int_x86_sse41_blendvps>;
-defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
- memopv16i8, int_x86_sse41_pblendvb>;
+ memopv2f64, int_x86_sse41_blendvpd>;
defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
- memopv32i8, int_x86_avx_blendv_pd_256>;
+ memopv4f64, int_x86_avx_blendv_pd_256>;
+} // ExeDomain = SSEPackedDouble
+let ExeDomain = SSEPackedSingle in {
+defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
+ memopv4f32, int_x86_sse41_blendvps>;
defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
- memopv32i8, int_x86_avx_blendv_ps_256>;
+ memopv8f32, int_x86_avx_blendv_ps_256>;
+} // ExeDomain = SSEPackedSingle
+defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
+ memopv2i64, int_x86_sse41_pblendvb>;
+}
+
+let Predicates = [HasAVX2] in {
+defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
+ memopv4i64, int_x86_avx2_pblendvb>;
+}
let Predicates = [HasAVX] in {
def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
@@ -5978,11 +6735,28 @@ let Predicates = [HasAVX] in {
def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
(v4f64 VR256:$src2))),
(VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
+
+ def : Pat<(v8f32 (X86Blendps (v8f32 VR256:$src1), (v8f32 VR256:$src2),
+ (imm:$mask))),
+ (VBLENDPSYrri VR256:$src2, VR256:$src1, imm:$mask)>;
+ def : Pat<(v4f64 (X86Blendpd (v4f64 VR256:$src1), (v4f64 VR256:$src2),
+ (imm:$mask))),
+ (VBLENDPDYrri VR256:$src2, VR256:$src1, imm:$mask)>;
+}
+
+let Predicates = [HasAVX2] in {
+ def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
+ (v32i8 VR256:$src2))),
+ (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
+ def : Pat<(v16i16 (X86Blendpw (v16i16 VR256:$src1), (v16i16 VR256:$src2),
+ (imm:$mask))),
+ (VPBLENDWYrri VR256:$src2, VR256:$src1, imm:$mask)>;
}
/// SS41I_ternary_int - SSE 4.1 ternary operator
let Uses = [XMM0], Constraints = "$src1 = $dst" in {
- multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+ multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
+ Intrinsic IntId> {
def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr,
@@ -5996,13 +6770,18 @@ let Uses = [XMM0], Constraints = "$src1 = $dst" in {
"\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst,
(IntId VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
+ (bitconvert (mem_frag addr:$src2)), XMM0))]>, OpSize;
}
}
-defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
-defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
-defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
+let ExeDomain = SSEPackedDouble in
+defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64,
+ int_x86_sse41_blendvpd>;
+let ExeDomain = SSEPackedSingle in
+defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32,
+ int_x86_sse41_blendvps>;
+defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64,
+ int_x86_sse41_pblendvb>;
let Predicates = [HasSSE41] in {
def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
@@ -6020,6 +6799,17 @@ let Predicates = [HasSSE41] in {
def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
(v2f64 VR128:$src2))),
(BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
+
+ def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2),
+ (imm:$mask))),
+ (VPBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>;
+ def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2),
+ (imm:$mask))),
+ (VBLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>;
+ def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2),
+ (imm:$mask))),
+ (VBLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>;
+
}
let Predicates = [HasAVX] in
@@ -6027,6 +6817,11 @@ def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
OpSize, VEX;
+let Predicates = [HasAVX2] in
+def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
+ OpSize, VEX;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
@@ -6036,43 +6831,37 @@ def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
// SSE4.2 - Compare Instructions
//===----------------------------------------------------------------------===//
-/// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
-multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Is2Addr = 1> {
- def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
+/// SS42I_binop_rm - Simple SSE 4.2 binary operator
+multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop, bit Is2Addr = 1> {
+ def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
OpSize;
- def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
+ def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+ [(set RC:$dst,
+ (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>, OpSize;
}
-let Predicates = [HasAVX] in {
- defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
- 0>, VEX_4V;
+let Predicates = [HasAVX] in
+ defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
+ memopv2i64, i128mem, 0>, VEX_4V;
- def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
- (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
- (VPCMPGTQrm VR128:$src1, addr:$src2)>;
-}
+let Predicates = [HasAVX2] in
+ defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
+ memopv4i64, i256mem, 0>, VEX_4V;
let Constraints = "$src1 = $dst" in
- defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
-
-def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
- (PCMPGTQrr VR128:$src1, VR128:$src2)>;
-def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
- (PCMPGTQrm VR128:$src1, addr:$src2)>;
+ defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
+ memopv2i64, i128mem>;
//===----------------------------------------------------------------------===//
// SSE4.2 - String/text Processing Instructions
@@ -6091,23 +6880,26 @@ multiclass pseudo_pcmpistrm<string asm> {
}
let Defs = [EFLAGS], usesCustomInserter = 1 in {
+ let AddedComplexity = 1 in
+ defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
- defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
}
-let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
+let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1, Predicates = [HasAVX] in {
def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+ let mayLoad = 1 in
def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
}
-let Defs = [XMM0, EFLAGS] in {
+let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in {
def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ let mayLoad = 1 in
def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
"pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
@@ -6126,24 +6918,27 @@ multiclass pseudo_pcmpestrm<string asm> {
}
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
+ let AddedComplexity = 1 in
+ defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
- defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
}
let Predicates = [HasAVX],
- Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+ Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
"vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+ let mayLoad = 1 in
def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
"vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
}
-let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in {
def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ let mayLoad = 1 in
def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
"pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
@@ -6318,8 +7113,7 @@ multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+ (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>, OpSize;
}
// Perform One Round of an AES Encryption/Decryption Flow
@@ -6345,44 +7139,6 @@ let Constraints = "$src1 = $dst" in {
int_x86_aesni_aesdeclast>;
}
-let Predicates = [HasAES] in {
- def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
- (AESENCrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
- (AESENCrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
- (AESENCLASTrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
- (AESENCLASTrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
- (AESDECrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
- (AESDECrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
- (AESDECLASTrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
- (AESDECLASTrm VR128:$src1, addr:$src2)>;
-}
-
-let Predicates = [HasAVX, HasAES], AddedComplexity = 20 in {
- def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
- (VAESENCrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
- (VAESENCrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
- (VAESENCLASTrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
- (VAESENCLASTrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
- (VAESDECrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
- (VAESDECrm VR128:$src1, addr:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
- (VAESDECLASTrr VR128:$src1, VR128:$src2)>;
- def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
- (VAESDECLASTrm VR128:$src1, addr:$src2)>;
-}
-
// Perform the AES InvMixColumn Transformation
let Predicates = [HasAVX, HasAES] in {
def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
@@ -6394,8 +7150,7 @@ let Predicates = [HasAVX, HasAES] in {
def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"vaesimc\t{$src1, $dst|$dst, $src1}",
- [(set VR128:$dst,
- (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
+ [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
OpSize, VEX;
}
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
@@ -6407,8 +7162,7 @@ def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"aesimc\t{$src1, $dst|$dst, $src1}",
- [(set VR128:$dst,
- (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
+ [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
OpSize;
// AES Round Key Generation Assist
@@ -6423,8 +7177,7 @@ let Predicates = [HasAVX, HasAES] in {
(ins i128mem:$src1, i8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
- imm:$src2))]>,
+ (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
OpSize, VEX;
}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
@@ -6437,8 +7190,7 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, i8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
- imm:$src2))]>,
+ (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
OpSize;
//===----------------------------------------------------------------------===//
@@ -6446,28 +7198,32 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
//===----------------------------------------------------------------------===//
// Carry-less Multiplication instructions
-let Constraints = "$src1 = $dst" in {
-def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
+let neverHasSideEffects = 1 in {
+// AVX carry-less Multiplication instructions
+def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>;
-def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
+let mayLoad = 1 in
+def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>;
-}
-// AVX carry-less Multiplication instructions
-def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
+let Constraints = "$src1 = $dst" in {
+def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[]>;
-def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
+let mayLoad = 1 in
+def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[]>;
+} // Constraints = "$src1 = $dst"
+} // neverHasSideEffects = 1
multiclass pclmul_alias<string asm, int immop> {
@@ -6506,51 +7262,60 @@ class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (Int addr:$src))]>, VEX;
-def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
- int_x86_avx_vbroadcastss>;
-def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
- int_x86_avx_vbroadcastss_256>;
-def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
- int_x86_avx_vbroadcast_sd_256>;
+// AVX2 adds register forms
+class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ Intrinsic Int> :
+ AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (Int VR128:$src))]>, VEX;
+
+let ExeDomain = SSEPackedSingle in {
+ def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
+ int_x86_avx_vbroadcast_ss>;
+ def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
+ int_x86_avx_vbroadcast_ss_256>;
+}
+let ExeDomain = SSEPackedDouble in
+def VBROADCASTSDrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
+ int_x86_avx_vbroadcast_sd_256>;
def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
int_x86_avx_vbroadcastf128_pd_256>;
+let ExeDomain = SSEPackedSingle in {
+ def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
+ int_x86_avx2_vbroadcast_ss_ps>;
+ def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
+ int_x86_avx2_vbroadcast_ss_ps_256>;
+}
+let ExeDomain = SSEPackedDouble in
+def VBROADCASTSDrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
+ int_x86_avx2_vbroadcast_sd_pd_256>;
+
+let Predicates = [HasAVX2] in
+def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
+ int_x86_avx2_vbroadcasti128>;
+
+let Predicates = [HasAVX] in
def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
(VBROADCASTF128 addr:$src)>;
-def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
- (VBROADCASTSSY addr:$src)>;
-def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
- (VBROADCASTSD addr:$src)>;
-def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSSY addr:$src)>;
-def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
- (VBROADCASTSD addr:$src)>;
-
-def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
- (VBROADCASTSS addr:$src)>;
-def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
- (VBROADCASTSS addr:$src)>;
//===----------------------------------------------------------------------===//
// VINSERTF128 - Insert packed floating-point values
//
+let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR128:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, VEX_4V;
+let mayLoad = 1 in
def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f128mem:$src2, i8imm:$src3),
"vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[]>, VEX_4V;
+}
-def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
- (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
- (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
- (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
-
+let Predicates = [HasAVX] in {
def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
(i32 imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
@@ -6559,11 +7324,11 @@ def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
(i32 imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
(i32 imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
-def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
(i32 imm)),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
@@ -6576,18 +7341,54 @@ def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
(VINSERTF128rr VR256:$src1, VR128:$src2,
(INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
+ (i32 imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
+ (i32 imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
+ (i32 imm)),
+ (VINSERTF128rm VR256:$src1, addr:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+}
+
//===----------------------------------------------------------------------===//
// VEXTRACTF128 - Extract packed floating-point values
//
+let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in {
def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
(ins VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, VEX;
+let mayStore = 1 in
def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
(ins f128mem:$dst, VR256:$src1, i8imm:$src2),
"vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, VEX;
+}
+// Extract and store.
+let Predicates = [HasAVX] in {
+ def : Pat<(alignedstore (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+ def : Pat<(alignedstore (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+ def : Pat<(alignedstore (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), addr:$dst),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+
+ def : Pat<(int_x86_sse_storeu_ps addr:$dst, (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2)),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+ def : Pat<(int_x86_sse2_storeu_pd addr:$dst, (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2)),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+ def : Pat<(int_x86_sse2_storeu_dq addr:$dst, (bc_v16i8 (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2))),
+ (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>;
+}
+
+// AVX1 patterns
+let Predicates = [HasAVX] in {
def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
(VEXTRACTF128rr VR256:$src1, imm:$src2)>;
def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
@@ -6604,14 +7405,14 @@ def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
(v4f64 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
- (v4i32 (VEXTRACTF128rr
- (v8i32 VR256:$src1),
- (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
-def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
(v2i64 (VEXTRACTF128rr
(v4i64 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v4i32 (VEXTRACTF128rr
+ (v8i32 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
(v8i16 (VEXTRACTF128rr
(v16i16 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
@@ -6619,14 +7420,14 @@ def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
(v16i8 (VEXTRACTF128rr
(v32i8 VR256:$src1),
(EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+}
//===----------------------------------------------------------------------===//
// VMASKMOV - Conditional SIMD Packed Loads and Stores
//
multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
Intrinsic IntLd, Intrinsic IntLd256,
- Intrinsic IntSt, Intrinsic IntSt256,
- PatFrag pf128, PatFrag pf256> {
+ Intrinsic IntSt, Intrinsic IntSt256> {
def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -6647,26 +7448,26 @@ multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
[(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
}
+let ExeDomain = SSEPackedSingle in
defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
int_x86_avx_maskload_ps,
int_x86_avx_maskload_ps_256,
int_x86_avx_maskstore_ps,
- int_x86_avx_maskstore_ps_256,
- memopv4f32, memopv8f32>;
+ int_x86_avx_maskstore_ps_256>;
+let ExeDomain = SSEPackedDouble in
defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
int_x86_avx_maskload_pd,
int_x86_avx_maskload_pd_256,
int_x86_avx_maskstore_pd,
- int_x86_avx_maskstore_pd_256,
- memopv2f64, memopv4f64>;
+ int_x86_avx_maskstore_pd_256>;
//===----------------------------------------------------------------------===//
// VPERMIL - Permute Single and Double Floating-Point Values
//
multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
RegisterClass RC, X86MemOperand x86memop_f,
- X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
- Intrinsic IntVar, Intrinsic IntImm> {
+ X86MemOperand x86memop_i, PatFrag i_frag,
+ Intrinsic IntVar, ValueType vt> {
def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, RC:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
@@ -6674,86 +7475,98 @@ multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
(ins RC:$src1, x86memop_i:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
+ [(set RC:$dst, (IntVar RC:$src1,
+ (bitconvert (i_frag addr:$src2))))]>, VEX_4V;
def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
(ins RC:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
+ [(set RC:$dst, (vt (X86VPermilp RC:$src1, (i8 imm:$src2))))]>, VEX;
def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
(ins x86memop_f:$src1, i8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
-}
-
-defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
- memopv4f32, memopv4i32,
- int_x86_avx_vpermilvar_ps,
- int_x86_avx_vpermil_ps>;
-defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
- memopv8f32, memopv8i32,
- int_x86_avx_vpermilvar_ps_256,
- int_x86_avx_vpermil_ps_256>;
-defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
- memopv2f64, memopv2i64,
- int_x86_avx_vpermilvar_pd,
- int_x86_avx_vpermil_pd>;
-defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
- memopv4f64, memopv4i64,
- int_x86_avx_vpermilvar_pd_256,
- int_x86_avx_vpermil_pd_256>;
-
-def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
- (VPERMILPSYri VR256:$src1, imm:$imm)>;
-def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
- (VPERMILPDYri VR256:$src1, imm:$imm)>;
-def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
+ [(set RC:$dst,
+ (vt (X86VPermilp (memop addr:$src1), (i8 imm:$src2))))]>, VEX;
+}
+
+let ExeDomain = SSEPackedSingle in {
+ defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
+ memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
+ defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
+ memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>;
+}
+let ExeDomain = SSEPackedDouble in {
+ defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
+ memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
+ defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
+ memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>;
+}
+
+let Predicates = [HasAVX] in {
+def : Pat<(v8i32 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
(VPERMILPSYri VR256:$src1, imm:$imm)>;
-def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
+def : Pat<(v4i64 (X86VPermilp VR256:$src1, (i8 imm:$imm))),
(VPERMILPDYri VR256:$src1, imm:$imm)>;
+def : Pat<(v8i32 (X86VPermilp (bc_v8i32 (memopv4i64 addr:$src1)),
+ (i8 imm:$imm))),
+ (VPERMILPSYmi addr:$src1, imm:$imm)>;
+def : Pat<(v4i64 (X86VPermilp (memopv4i64 addr:$src1), (i8 imm:$imm))),
+ (VPERMILPDYmi addr:$src1, imm:$imm)>;
+
+def : Pat<(v2i64 (X86VPermilp VR128:$src1, (i8 imm:$imm))),
+ (VPERMILPDri VR128:$src1, imm:$imm)>;
+def : Pat<(v2i64 (X86VPermilp (memopv2i64 addr:$src1), (i8 imm:$imm))),
+ (VPERMILPDmi addr:$src1, imm:$imm)>;
+}
//===----------------------------------------------------------------------===//
// VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
//
+let ExeDomain = SSEPackedSingle in {
def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
+ (i8 imm:$src3))))]>, VEX_4V;
def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2, i8imm:$src3),
"vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- []>, VEX_4V;
+ [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2),
+ (i8 imm:$src3)))]>, VEX_4V;
+}
-def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
- (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
- (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
- (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
-
-def : Pat<(int_x86_avx_vperm2f128_ps_256
- VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
- (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vperm2f128_pd_256
- VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
- (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
-def : Pat<(int_x86_avx_vperm2f128_si_256
- VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
- (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
-
-def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
- (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+let Predicates = [HasAVX] in {
+def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
-def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
(VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
+def : Pat<(v8f32 (X86VPerm2x128 VR256:$src1,
+ (memopv8f32 addr:$src2), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
+ (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
+ (memopv4i64 addr:$src2), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
+ (memopv4f64 addr:$src2), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
+ (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
+ (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
+}
+
//===----------------------------------------------------------------------===//
// VZERO - Zero YMM registers
//
@@ -6770,30 +7583,362 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
//===----------------------------------------------------------------------===//
// Half precision conversion instructions
-//
+//===----------------------------------------------------------------------===//
+multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
let Predicates = [HasAVX, HasF16C] in {
- def VCVTPH2PSrm : I<0x13, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
- def VCVTPH2PSrr : I<0x13, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
- def VCVTPH2PSYrm : I<0x13, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
- "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
- def VCVTPH2PSYrr : I<0x13, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
- "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
- def VCVTPS2PHmr : Ii8<0x1D, MRMDestMem, (outs f64mem:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TA, OpSize, VEX;
- def VCVTPS2PHrr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TA, OpSize, VEX;
- def VCVTPS2PHYmr : Ii8<0x1D, MRMDestMem, (outs f128mem:$dst),
- (ins VR256:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TA, OpSize, VEX;
- def VCVTPS2PHYrr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
- (ins VR256:$src1, i32i8imm:$src2),
- "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TA, OpSize, VEX;
+ def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
+ "vcvtph2ps\t{$src, $dst|$dst, $src}",
+ [(set RC:$dst, (Int VR128:$src))]>,
+ T8, OpSize, VEX;
+ let neverHasSideEffects = 1, mayLoad = 1 in
+ def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8, OpSize, VEX;
+}
+}
+
+multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
+let Predicates = [HasAVX, HasF16C] in {
+ def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
+ (ins RC:$src1, i32i8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
+ TA, OpSize, VEX;
+ let neverHasSideEffects = 1, mayLoad = 1 in
+ def mr : Ii8<0x1D, MRMDestMem, (outs x86memop:$dst),
+ (ins RC:$src1, i32i8imm:$src2),
+ "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ TA, OpSize, VEX;
+}
+}
+
+defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
+defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>;
+defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
+defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>;
+
+//===----------------------------------------------------------------------===//
+// AVX2 Instructions
+//===----------------------------------------------------------------------===//
+
+/// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
+multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop> {
+ let isCommutable = 1 in
+ def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, u32u8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
+ VEX_4V;
+ def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst,
+ (IntId RC:$src1,
+ (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
+ VEX_4V;
}
+
+let isCommutable = 0 in {
+defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
+ VR128, memopv2i64, i128mem>;
+defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
+ VR256, memopv4i64, i256mem>;
+}
+
+//===----------------------------------------------------------------------===//
+// VPBROADCAST - Load from memory and broadcast to all elements of the
+// destination operand
+//
+multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ Intrinsic Int128, Intrinsic Int256> {
+ def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int128 VR128:$src))]>, VEX;
+ def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst,
+ (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (Int256 VR128:$src))]>, VEX;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst,
+ (Int256 (scalar_to_vector (ld_frag addr:$src))))]>, VEX;
+}
+
+defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
+ int_x86_avx2_pbroadcastb_128,
+ int_x86_avx2_pbroadcastb_256>;
+defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
+ int_x86_avx2_pbroadcastw_128,
+ int_x86_avx2_pbroadcastw_256>;
+defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
+ int_x86_avx2_pbroadcastd_128,
+ int_x86_avx2_pbroadcastd_256>;
+defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
+ int_x86_avx2_pbroadcastq_128,
+ int_x86_avx2_pbroadcastq_256>;
+
+let Predicates = [HasAVX2] in {
+ def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
+ (VPBROADCASTBrm addr:$src)>;
+ def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
+ (VPBROADCASTBYrm addr:$src)>;
+ def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
+ (VPBROADCASTWrm addr:$src)>;
+ def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
+ (VPBROADCASTWYrm addr:$src)>;
+ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
+ (VPBROADCASTDrm addr:$src)>;
+ def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
+ (VPBROADCASTDYrm addr:$src)>;
+ def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
+ (VPBROADCASTQrm addr:$src)>;
+ def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
+ (VPBROADCASTQYrm addr:$src)>;
+}
+
+// AVX1 broadcast patterns
+let Predicates = [HasAVX] in {
+def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
+ (VBROADCASTSSYrm addr:$src)>;
+def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
+ (VBROADCASTSDrm addr:$src)>;
+def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
+ (VBROADCASTSSYrm addr:$src)>;
+def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
+ (VBROADCASTSDrm addr:$src)>;
+
+def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
+ (VBROADCASTSSrm addr:$src)>;
+def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
+ (VBROADCASTSSrm addr:$src)>;
+}
+
+//===----------------------------------------------------------------------===//
+// VPERM - Permute instructions
+//
+
+multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
+ Intrinsic Int> {
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (Int VR256:$src1, VR256:$src2))]>, VEX_4V;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (Int VR256:$src1,
+ (bitconvert (mem_frag addr:$src2))))]>,
+ VEX_4V;
+}
+
+defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, int_x86_avx2_permd>;
+let ExeDomain = SSEPackedSingle in
+defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, int_x86_avx2_permps>;
+
+multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
+ Intrinsic Int> {
+ def Yrr : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (Int VR256:$src1, imm:$src2))]>, VEX;
+ def Yrm : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins i256mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (Int (mem_frag addr:$src1), imm:$src2))]>,
+ VEX;
+}
+
+defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, int_x86_avx2_permq>,
+ VEX_W;
+let ExeDomain = SSEPackedDouble in
+defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, int_x86_avx2_permpd>,
+ VEX_W;
+
+//===----------------------------------------------------------------------===//
+// VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
+//
+let AddedComplexity = 1 in {
+def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
+ (i8 imm:$src3))))]>, VEX_4V;
+def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
+ "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2),
+ (i8 imm:$src3)))]>, VEX_4V;
+}
+
+let Predicates = [HasAVX2], AddedComplexity = 1 in {
+def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
+def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
+def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
+ (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
+
+def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)),
+ (i8 imm:$imm))),
+ (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
+ (bc_v16i16 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
+ (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
+def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)),
+ (i8 imm:$imm))),
+ (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
+}
+
+
+//===----------------------------------------------------------------------===//
+// VINSERTI128 - Insert packed integer values
+//
+let neverHasSideEffects = 1 in {
+def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR128:$src2, i8imm:$src3),
+ "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>,
+ VEX_4V;
+def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
+ "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, VEX_4V;
+}
+
+let Predicates = [HasAVX2], AddedComplexity = 1 in {
+def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
+ (i32 imm)),
+ (VINSERTI128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
+ (i32 imm)),
+ (VINSERTI128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
+ (i32 imm)),
+ (VINSERTI128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
+ (i32 imm)),
+ (VINSERTI128rr VR256:$src1, VR128:$src2,
+ (INSERT_get_vinsertf128_imm VR256:$ins))>;
+}
+
+//===----------------------------------------------------------------------===//
+// VEXTRACTI128 - Extract packed integer values
+//
+def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
+ (ins VR256:$src1, i8imm:$src2),
+ "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
+ VEX;
+let neverHasSideEffects = 1, mayStore = 1 in
+def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
+ "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, VEX;
+
+let Predicates = [HasAVX2], AddedComplexity = 1 in {
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v2i64 (VEXTRACTI128rr
+ (v4i64 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v4i32 (VEXTRACTI128rr
+ (v8i32 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v8i16 (VEXTRACTI128rr
+ (v16i16 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
+ (v16i8 (VEXTRACTI128rr
+ (v32i8 VR256:$src1),
+ (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
+}
+
+//===----------------------------------------------------------------------===//
+// VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
+//
+multiclass avx2_pmovmask<string OpcodeStr,
+ Intrinsic IntLd128, Intrinsic IntLd256,
+ Intrinsic IntSt128, Intrinsic IntSt256> {
+ def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
+ def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, VEX_4V;
+ def mr : AVX28I<0x8e, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
+ def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
+ (ins i256mem:$dst, VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
+}
+
+defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
+ int_x86_avx2_maskload_d,
+ int_x86_avx2_maskload_d_256,
+ int_x86_avx2_maskstore_d,
+ int_x86_avx2_maskstore_d_256>;
+defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
+ int_x86_avx2_maskload_q,
+ int_x86_avx2_maskload_q_256,
+ int_x86_avx2_maskstore_q,
+ int_x86_avx2_maskstore_q_256>, VEX_W;
+
+
+//===----------------------------------------------------------------------===//
+// Variable Bit Shifts
+//
+multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType vt128, ValueType vt256> {
+ def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
+ VEX_4V;
+ def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (vt128 (OpNode VR128:$src1,
+ (vt128 (bitconvert (memopv2i64 addr:$src2))))))]>,
+ VEX_4V;
+ def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
+ VEX_4V;
+ def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, i256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst,
+ (vt256 (OpNode VR256:$src1,
+ (vt256 (bitconvert (memopv4i64 addr:$src2))))))]>,
+ VEX_4V;
+}
+
+defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
+defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
+defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
+defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
+defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSVM.td b/contrib/llvm/lib/Target/X86/X86InstrSVM.td
new file mode 100644
index 0000000..757dcd0
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrSVM.td
@@ -0,0 +1,62 @@
+//===-- X86InstrSVM.td - SVM Instruction Set Extension -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the instructions that make up the AMD SVM instruction
+// set.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// SVM instructions
+
+// 0F 01 D9
+def VMMCALL : I<0x01, MRM_D9, (outs), (ins), "vmmcall", []>, TB;
+
+// 0F 01 DC
+def STGI : I<0x01, MRM_DC, (outs), (ins), "stgi", []>, TB;
+
+// 0F 01 DD
+def CLGI : I<0x01, MRM_DD, (outs), (ins), "clgi", []>, TB;
+
+// 0F 01 DE
+let Uses = [EAX] in
+def SKINIT : I<0x01, MRM_DE, (outs), (ins), "skinit\t{%eax|EAX}", []>, TB;
+
+// 0F 01 D8
+let Uses = [EAX] in
+def VMRUN32 : I<0x01, MRM_D8, (outs), (ins),
+ "vmrun\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+let Uses = [RAX] in
+def VMRUN64 : I<0x01, MRM_D8, (outs), (ins),
+ "vmrun\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+
+// 0F 01 DA
+let Uses = [EAX] in
+def VMLOAD32 : I<0x01, MRM_DA, (outs), (ins),
+ "vmload\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+let Uses = [RAX] in
+def VMLOAD64 : I<0x01, MRM_DA, (outs), (ins),
+ "vmload\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+
+// 0F 01 DB
+let Uses = [EAX] in
+def VMSAVE32 : I<0x01, MRM_DB, (outs), (ins),
+ "vmsave\t{%eax|EAX}", []>, TB, Requires<[In32BitMode]>;
+let Uses = [RAX] in
+def VMSAVE64 : I<0x01, MRM_DB, (outs), (ins),
+ "vmsave\t{%rax|RAX}", []>, TB, Requires<[In64BitMode]>;
+
+// 0F 01 DF
+let Uses = [EAX, ECX] in
+def INVLPGA32 : I<0x01, MRM_DF, (outs), (ins),
+ "invlpga\t{%ecx, %eax|EAX, ECX}", []>, TB, Requires<[In32BitMode]>;
+let Uses = [RAX, ECX] in
+def INVLPGA64 : I<0x01, MRM_DF, (outs), (ins),
+ "invlpga\t{%ecx, %rax|RAX, ECX}", []>, TB, Requires<[In64BitMode]>;
+
diff --git a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
index 8278568..bdeb63f 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -1,10 +1,10 @@
-//===- X86InstrShiftRotate.td - Shift and Rotate Instrs ----*- tablegen -*-===//
-//
+//===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the shift and rotate instructions.
@@ -19,44 +19,46 @@ let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
"shl{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (shl GR8:$src1, CL))]>;
+ [(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>;
def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
"shl{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
+ [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize;
def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
"shl{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (shl GR32:$src1, CL))]>;
+ [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>;
def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
- "shl{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (shl GR64:$src1, CL))]>;
+ "shl{q}\t{%cl, $dst|$dst, CL}",
+ [(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>;
} // Uses = [CL]
def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"shl{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
+ [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shl{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
+ [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))], IIC_SR>,
+ OpSize;
def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"shl{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))]>;
+ [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))], IIC_SR>;
def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"shl{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>;
+ [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
// NOTE: We don't include patterns for shifts of a register by one, because
// 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
- "shl{b}\t$dst", []>;
+ "shl{b}\t$dst", [], IIC_SR>;
def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
- "shl{w}\t$dst", []>, OpSize;
+ "shl{w}\t$dst", [], IIC_SR>, OpSize;
def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
- "shl{l}\t$dst", []>;
+ "shl{l}\t$dst", [], IIC_SR>;
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
- "shl{q}\t$dst", []>;
+ "shl{q}\t$dst", [], IIC_SR>;
} // isConvertibleToThreeAddress = 1
} // Constraints = "$src = $dst"
@@ -66,223 +68,266 @@ def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
let Uses = [CL] in {
def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
"shl{b}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>;
+ [(store (shl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
"shl{w}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+ [(store (shl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
+ OpSize;
def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t{%cl, $dst|$dst, CL}",
- [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>;
+ [(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
- "shl{q}\t{%cl, $dst|$dst, %CL}",
- [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>;
+ "shl{q}\t{%cl, $dst|$dst, CL}",
+ [(store (shl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
}
def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src),
"shl{b}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, i8imm:$src),
"shl{w}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>,
OpSize;
def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, i8imm:$src),
"shl{l}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src),
"shl{q}\t{$src, $dst|$dst, $src}",
- [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
// Shift by 1
def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
"shl{b}\t$dst",
- [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
"shl{w}\t$dst",
- [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>,
OpSize;
def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t$dst",
- [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
"shl{q}\t$dst",
- [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
"shr{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (srl GR8:$src1, CL))]>;
+ [(set GR8:$dst, (srl GR8:$src1, CL))], IIC_SR>;
def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize;
+ [(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize;
def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (srl GR32:$src1, CL))]>;
+ [(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>;
def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
- "shr{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (srl GR64:$src1, CL))]>;
+ "shr{q}\t{%cl, $dst|$dst, CL}",
+ [(set GR64:$dst, (srl GR64:$src1, CL))], IIC_SR>;
}
def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"shr{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))]>;
+ [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shr{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))]>, OpSize;
+ [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))],
+ IIC_SR>, OpSize;
def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"shr{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))]>;
+ [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
"shr{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
+ [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))], IIC_SR>;
// Shift right by 1
def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
"shr{b}\t$dst",
- [(set GR8:$dst, (srl GR8:$src1, (i8 1)))]>;
+ [(set GR8:$dst, (srl GR8:$src1, (i8 1)))], IIC_SR>;
def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t$dst",
- [(set GR16:$dst, (srl GR16:$src1, (i8 1)))]>, OpSize;
+ [(set GR16:$dst, (srl GR16:$src1, (i8 1)))], IIC_SR>, OpSize;
def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t$dst",
- [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
+ [(set GR32:$dst, (srl GR32:$src1, (i8 1)))], IIC_SR>;
def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t$dst",
- [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
+ [(set GR64:$dst, (srl GR64:$src1, (i8 1)))], IIC_SR>;
} // Constraints = "$src = $dst"
let Uses = [CL] in {
def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
"shr{b}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>;
+ [(store (srl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
"shr{w}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>,
+ [(store (srl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
OpSize;
def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t{%cl, $dst|$dst, CL}",
- [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>;
+ [(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>;
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
- "shr{q}\t{%cl, $dst|$dst, %CL}",
- [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>;
+ "shr{q}\t{%cl, $dst|$dst, CL}",
+ [(store (srl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>;
}
def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src),
"shr{b}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, i8imm:$src),
"shr{w}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>,
OpSize;
def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, i8imm:$src),
"shr{l}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src),
"shr{q}\t{$src, $dst|$dst, $src}",
- [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
// Shift by 1
def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
"shr{b}\t$dst",
- [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
"shr{w}\t$dst",
- [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,OpSize;
+ [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>,OpSize;
def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t$dst",
- [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
"shr{q}\t$dst",
- [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
"sar{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (sra GR8:$src1, CL))]>;
+ [(set GR8:$dst, (sra GR8:$src1, CL))],
+ IIC_SR>;
def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (sra GR16:$src1, CL))]>, OpSize;
+ [(set GR16:$dst, (sra GR16:$src1, CL))],
+ IIC_SR>, OpSize;
def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (sra GR32:$src1, CL))]>;
+ [(set GR32:$dst, (sra GR32:$src1, CL))],
+ IIC_SR>;
def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
- "sar{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (sra GR64:$src1, CL))]>;
+ "sar{q}\t{%cl, $dst|$dst, CL}",
+ [(set GR64:$dst, (sra GR64:$src1, CL))],
+ IIC_SR>;
}
def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"sar{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))]>;
+ [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"sar{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))]>,
+ [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))],
+ IIC_SR>,
OpSize;
def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"sar{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))]>;
+ [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"sar{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>;
+ [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
// Shift by 1
def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
"sar{b}\t$dst",
- [(set GR8:$dst, (sra GR8:$src1, (i8 1)))]>;
+ [(set GR8:$dst, (sra GR8:$src1, (i8 1)))],
+ IIC_SR>;
def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t$dst",
- [(set GR16:$dst, (sra GR16:$src1, (i8 1)))]>, OpSize;
+ [(set GR16:$dst, (sra GR16:$src1, (i8 1)))],
+ IIC_SR>, OpSize;
def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t$dst",
- [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
+ [(set GR32:$dst, (sra GR32:$src1, (i8 1)))],
+ IIC_SR>;
def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t$dst",
- [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
+ [(set GR64:$dst, (sra GR64:$src1, (i8 1)))],
+ IIC_SR>;
} // Constraints = "$src = $dst"
let Uses = [CL] in {
def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
"sar{b}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>;
+ [(store (sra (loadi8 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
"sar{w}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+ [(store (sra (loadi16 addr:$dst), CL), addr:$dst)],
+ IIC_SR>, OpSize;
def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t{%cl, $dst|$dst, CL}",
- [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>;
+ [(store (sra (loadi32 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
- "sar{q}\t{%cl, $dst|$dst, %CL}",
- [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>;
+ "sar{q}\t{%cl, $dst|$dst, CL}",
+ [(store (sra (loadi64 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
}
def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src),
"sar{b}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, i8imm:$src),
"sar{w}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>,
OpSize;
def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, i8imm:$src),
"sar{l}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src),
"sar{q}\t{$src, $dst|$dst, $src}",
- [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
// Shift by 1
def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
"sar{b}\t$dst",
- [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
"sar{w}\t$dst",
- [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>,
OpSize;
def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t$dst",
- [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
"sar{q}\t$dst",
- [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
//===----------------------------------------------------------------------===//
// Rotate instructions
@@ -290,125 +335,125 @@ def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
let Constraints = "$src1 = $dst" in {
def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
- "rcl{b}\t$dst", []>;
+ "rcl{b}\t$dst", [], IIC_SR>;
def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
- "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
- "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t$dst", []>, OpSize;
+ "rcl{w}\t$dst", [], IIC_SR>, OpSize;
def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
let Uses = [CL] in
def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
- "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t$dst", []>;
+ "rcl{l}\t$dst", [], IIC_SR>;
def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
- "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
- "rcl{q}\t$dst", []>;
+ "rcl{q}\t$dst", [], IIC_SR>;
def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
- "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
- "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
- "rcr{b}\t$dst", []>;
+ "rcr{b}\t$dst", [], IIC_SR>;
def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
- "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
- "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t$dst", []>, OpSize;
+ "rcr{w}\t$dst", [], IIC_SR>, OpSize;
def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
let Uses = [CL] in
def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
- "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t$dst", []>;
+ "rcr{l}\t$dst", [], IIC_SR>;
def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
- "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
- "rcr{q}\t$dst", []>;
+ "rcr{q}\t$dst", [], IIC_SR>;
def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$cnt),
- "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in
def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
- "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
} // Constraints = "$src = $dst"
def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
- "rcl{b}\t$dst", []>;
+ "rcl{b}\t$dst", [], IIC_SR>;
def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
- "rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t$dst", []>, OpSize;
+ "rcl{w}\t$dst", [], IIC_SR>, OpSize;
def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+ "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t$dst", []>;
+ "rcl{l}\t$dst", [], IIC_SR>;
def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
- "rcl{q}\t$dst", []>;
+ "rcl{q}\t$dst", [], IIC_SR>;
def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
- "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
- "rcr{b}\t$dst", []>;
+ "rcr{b}\t$dst", [], IIC_SR>;
def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, i8imm:$cnt),
- "rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t$dst", []>, OpSize;
+ "rcr{w}\t$dst", [], IIC_SR>, OpSize;
def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, i8imm:$cnt),
- "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
+ "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize;
def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t$dst", []>;
+ "rcr{l}\t$dst", [], IIC_SR>;
def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, i8imm:$cnt),
- "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
- "rcr{q}\t$dst", []>;
+ "rcr{q}\t$dst", [], IIC_SR>;
def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, i8imm:$cnt),
- "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>;
+ "rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
let Uses = [CL] in {
def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
- "rcl{b}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
- "rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+ "rcl{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
- "rcl{l}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
- "rcl{q}\t{%cl, $dst|$dst, CL}", []>;
+ "rcl{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
- "rcr{b}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{b}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
- "rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
+ "rcr{w}\t{%cl, $dst|$dst, CL}", [], IIC_SR>, OpSize;
def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
- "rcr{l}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{l}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
- "rcr{q}\t{%cl, $dst|$dst, CL}", []>;
+ "rcr{q}\t{%cl, $dst|$dst, CL}", [], IIC_SR>;
}
let Constraints = "$src1 = $dst" in {
@@ -416,179 +461,217 @@ let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"rol{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
+ [(set GR8:$dst, (rotl GR8:$src1, CL))], IIC_SR>;
def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize;
+ [(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize;
def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotl GR32:$src1, CL))]>;
+ [(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>;
def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
- "rol{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
+ "rol{q}\t{%cl, $dst|$dst, CL}",
+ [(set GR64:$dst, (rotl GR64:$src1, CL))], IIC_SR>;
}
def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"rol{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>;
+ [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"rol{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>,
+ [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))],
+ IIC_SR>,
OpSize;
def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"rol{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>;
+ [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>;
+ [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
// Rotate by 1
def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"rol{b}\t$dst",
- [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))]>;
+ [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))],
+ IIC_SR>;
def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t$dst",
- [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))]>, OpSize;
+ [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))],
+ IIC_SR>, OpSize;
def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t$dst",
- [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
+ [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))],
+ IIC_SR>;
def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t$dst",
- [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
+ [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))],
+ IIC_SR>;
} // Constraints = "$src = $dst"
let Uses = [CL] in {
def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
"rol{b}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>;
+ [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
"rol{w}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+ [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)],
+ IIC_SR>, OpSize;
def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t{%cl, $dst|$dst, CL}",
- [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>;
+ [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
- "rol{q}\t{%cl, $dst|$dst, %CL}",
- [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>;
+ "rol{q}\t{%cl, $dst|$dst, %cl}",
+ [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
}
def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src1),
"rol{b}\t{$src1, $dst|$dst, $src1}",
- [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+ [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)],
+ IIC_SR>;
def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, i8imm:$src1),
"rol{w}\t{$src1, $dst|$dst, $src1}",
- [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)]>,
+ [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)],
+ IIC_SR>,
OpSize;
def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, i8imm:$src1),
"rol{l}\t{$src1, $dst|$dst, $src1}",
- [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+ [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)],
+ IIC_SR>;
def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src1),
"rol{q}\t{$src1, $dst|$dst, $src1}",
- [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)]>;
+ [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)],
+ IIC_SR>;
// Rotate by 1
def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
"rol{b}\t$dst",
- [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
"rol{w}\t$dst",
- [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>,
OpSize;
def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t$dst",
- [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
"rol{q}\t$dst",
- [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"ror{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
+ [(set GR8:$dst, (rotr GR8:$src1, CL))], IIC_SR>;
def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize;
+ [(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize;
def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotr GR32:$src1, CL))]>;
+ [(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>;
def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
- "ror{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
+ "ror{q}\t{%cl, $dst|$dst, CL}",
+ [(set GR64:$dst, (rotr GR64:$src1, CL))], IIC_SR>;
}
def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"ror{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>;
+ [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"ror{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>,
+ [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))],
+ IIC_SR>,
OpSize;
def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2),
"ror{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>;
+ [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>;
+ [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))],
+ IIC_SR>;
// Rotate by 1
def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"ror{b}\t$dst",
- [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))]>;
+ [(set GR8:$dst, (rotr GR8:$src1, (i8 1)))],
+ IIC_SR>;
def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t$dst",
- [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))]>, OpSize;
+ [(set GR16:$dst, (rotr GR16:$src1, (i8 1)))],
+ IIC_SR>, OpSize;
def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t$dst",
- [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
+ [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))],
+ IIC_SR>;
def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t$dst",
- [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
+ [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))],
+ IIC_SR>;
} // Constraints = "$src = $dst"
let Uses = [CL] in {
def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
"ror{b}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>;
+ [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize;
+ [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)],
+ IIC_SR>, OpSize;
def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t{%cl, $dst|$dst, CL}",
- [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>;
+ [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
- "ror{q}\t{%cl, $dst|$dst, %CL}",
- [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>;
+ "ror{q}\t{%cl, $dst|$dst, CL}",
+ [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)],
+ IIC_SR>;
}
def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
"ror{b}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, i8imm:$src),
"ror{w}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)]>,
+ [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>,
OpSize;
def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, i8imm:$src),
"ror{l}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src),
"ror{q}\t{$src, $dst|$dst, $src}",
- [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>;
+ [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
+ IIC_SR>;
// Rotate by 1
def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
"ror{b}\t$dst",
- [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotr (loadi8 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
"ror{w}\t$dst",
- [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)]>,
+ [(store (rotr (loadi16 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>,
OpSize;
def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t$dst",
- [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
"ror{q}\t$dst",
- [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
+ [(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)],
+ IIC_SR>;
//===----------------------------------------------------------------------===//
@@ -601,30 +684,36 @@ let Uses = [CL] in {
def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>,
+ [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))],
+ IIC_SHD16_REG_CL>,
TB, OpSize;
def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>,
+ [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))],
+ IIC_SHD16_REG_CL>,
TB, OpSize;
def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB;
+ [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))],
+ IIC_SHD32_REG_CL>, TB;
def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
- [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB;
+ [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))],
+ IIC_SHD32_REG_CL>, TB;
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>,
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))],
+ IIC_SHD64_REG_CL>,
TB;
def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
- [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>,
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
+ [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))],
+ IIC_SHD64_REG_CL>,
TB;
}
@@ -634,42 +723,42 @@ def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
(ins GR16:$src1, GR16:$src2, i8imm:$src3),
"shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD16_REG_IM>,
TB, OpSize;
def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
(outs GR16:$dst),
(ins GR16:$src1, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD16_REG_IM>,
TB, OpSize;
def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
(outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD32_REG_IM>,
TB;
def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
(outs GR32:$dst),
(ins GR32:$src1, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD32_REG_IM>,
TB;
def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
(outs GR64:$dst),
(ins GR64:$src1, GR64:$src2, i8imm:$src3),
"shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD64_REG_IM>,
TB;
def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
(outs GR64:$dst),
(ins GR64:$src1, GR64:$src2, i8imm:$src3),
"shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
- (i8 imm:$src3)))]>,
+ (i8 imm:$src3)))], IIC_SHD64_REG_IM>,
TB;
}
} // Constraints = "$src = $dst"
@@ -678,69 +767,110 @@ let Uses = [CL] in {
def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)]>, TB, OpSize;
+ addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
- addr:$dst)]>, TB, OpSize;
+ addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize;
def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)]>, TB;
+ addr:$dst)], IIC_SHD32_MEM_CL>, TB;
def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
- addr:$dst)]>, TB;
+ addr:$dst)], IIC_SHD32_MEM_CL>, TB;
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ "shld{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
- addr:$dst)]>, TB;
+ addr:$dst)], IIC_SHD64_MEM_CL>, TB;
def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
- "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}",
+ "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, CL}",
[(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
- addr:$dst)]>, TB;
+ addr:$dst)], IIC_SHD64_MEM_CL>, TB;
}
def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
"shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shld (loadi16 addr:$dst), GR16:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD16_MEM_IM>,
TB, OpSize;
def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3),
"shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD16_MEM_IM>,
TB, OpSize;
def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
"shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shld (loadi32 addr:$dst), GR32:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD32_MEM_IM>,
TB;
def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3),
"shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD32_MEM_IM>,
TB;
def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
"shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shld (loadi64 addr:$dst), GR64:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD64_MEM_IM>,
TB;
def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3),
"shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
- (i8 imm:$src3)), addr:$dst)]>,
+ (i8 imm:$src3)), addr:$dst)],
+ IIC_SHD64_MEM_IM>,
TB;
} // Defs = [EFLAGS]
+multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
+let neverHasSideEffects = 1 in {
+ def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, i8imm:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, TAXD, VEX;
+ let mayLoad = 1 in
+ def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop:$src1, i8imm:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, TAXD, VEX;
+}
+}
+
+multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
+let neverHasSideEffects = 1 in {
+ def rr : I<0xF7, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ VEX_4VOp3;
+ let mayLoad = 1 in
+ def rm : I<0xF7, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ VEX_4VOp3;
+}
+}
+
+let Predicates = [HasBMI2] in {
+ defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
+ defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, VEX_W;
+ defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
+ defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
+ defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
+ defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
+ defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, OpSize;
+ defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, OpSize, VEX_W;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSystem.td b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
index 05a5b36..bddba6c 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSystem.td
@@ -1,10 +1,10 @@
-//===- X86InstrSystem.td - System Instructions -------------*- tablegen -*-===//
-//
+//===-- X86InstrSystem.td - System Instructions ------------*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the X86 instructions that are generally used in
@@ -45,18 +45,17 @@ def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap",
def SYSCALL : I<0x05, RawFrm, (outs), (ins), "syscall", []>, TB;
-def SYSRETL : I<0x07, RawFrm, (outs), (ins), "sysretl", []>, TB;
-def SYSRETQ :RI<0x07, RawFrm, (outs), (ins), "sysretq", []>, TB,
+def SYSRET : I<0x07, RawFrm, (outs), (ins), "sysret{l}", []>, TB;
+def SYSRET64 :RI<0x07, RawFrm, (outs), (ins), "sysret{q}", []>, TB,
Requires<[In64BitMode]>;
def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", []>, TB;
-def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit", []>, TB,
- Requires<[In32BitMode]>;
-def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit", []>, TB,
+def SYSEXIT : I<0x35, RawFrm, (outs), (ins), "sysexit{l}", []>, TB;
+def SYSEXIT64 :RI<0x35, RawFrm, (outs), (ins), "sysexit{q}", []>, TB,
Requires<[In64BitMode]>;
-def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iretw", []>, OpSize;
+def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize;
def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l|d}", []>;
def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iretq", []>,
Requires<[In64BitMode]>;
@@ -215,18 +214,18 @@ def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
def INVLPG : I<0x01, MRM7m, (outs), (ins i8mem:$addr), "invlpg\t$addr", []>, TB;
def STR16r : I<0x00, MRM1r, (outs GR16:$dst), (ins),
- "str{w}\t{$dst}", []>, TB, OpSize;
+ "str{w}\t$dst", []>, TB, OpSize;
def STR32r : I<0x00, MRM1r, (outs GR32:$dst), (ins),
- "str{l}\t{$dst}", []>, TB;
+ "str{l}\t$dst", []>, TB;
def STR64r : RI<0x00, MRM1r, (outs GR64:$dst), (ins),
- "str{q}\t{$dst}", []>, TB;
+ "str{q}\t$dst", []>, TB;
def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins),
- "str{w}\t{$dst}", []>, TB;
+ "str{w}\t$dst", []>, TB;
def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src),
- "ltr{w}\t{$src}", []>, TB;
+ "ltr{w}\t$src", []>, TB;
def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src),
- "ltr{w}\t{$src}", []>, TB;
+ "ltr{w}\t$src", []>, TB;
def PUSHCS16 : I<0x0E, RawFrm, (outs), (ins),
"push{w}\t{%cs|CS}", []>, Requires<[In32BitMode]>, OpSize;
@@ -447,21 +446,38 @@ let Defs = [RAX, RDX, RSI], Uses = [RAX, RSI] in
//===----------------------------------------------------------------------===//
// FS/GS Base Instructions
-let Predicates = [In64BitMode] in {
+let Predicates = [HasFSGSBase, In64BitMode] in {
def RDFSBASE : I<0xAE, MRM0r, (outs GR32:$dst), (ins),
- "rdfsbase{l}\t$dst", []>, TB, XS;
+ "rdfsbase{l}\t$dst",
+ [(set GR32:$dst, (int_x86_rdfsbase_32))]>, TB, XS;
def RDFSBASE64 : RI<0xAE, MRM0r, (outs GR64:$dst), (ins),
- "rdfsbase{q}\t$dst", []>, TB, XS;
+ "rdfsbase{q}\t$dst",
+ [(set GR64:$dst, (int_x86_rdfsbase_64))]>, TB, XS;
def RDGSBASE : I<0xAE, MRM1r, (outs GR32:$dst), (ins),
- "rdgsbase{l}\t$dst", []>, TB, XS;
+ "rdgsbase{l}\t$dst",
+ [(set GR32:$dst, (int_x86_rdgsbase_32))]>, TB, XS;
def RDGSBASE64 : RI<0xAE, MRM1r, (outs GR64:$dst), (ins),
- "rdgsbase{q}\t$dst", []>, TB, XS;
- def WRFSBASE : I<0xAE, MRM2r, (outs), (ins GR32:$dst),
- "wrfsbase{l}\t$dst", []>, TB, XS;
- def WRFSBASE64 : RI<0xAE, MRM2r, (outs), (ins GR64:$dst),
- "wrfsbase{q}\t$dst", []>, TB, XS;
- def WRGSBASE : I<0xAE, MRM3r, (outs), (ins GR32:$dst),
- "wrgsbase{l}\t$dst", []>, TB, XS;
- def WRGSBASE64 : RI<0xAE, MRM3r, (outs), (ins GR64:$dst),
- "wrgsbase{q}\t$dst", []>, TB, XS;
+ "rdgsbase{q}\t$dst",
+ [(set GR64:$dst, (int_x86_rdgsbase_64))]>, TB, XS;
+ def WRFSBASE : I<0xAE, MRM2r, (outs), (ins GR32:$src),
+ "wrfsbase{l}\t$src",
+ [(int_x86_wrfsbase_32 GR32:$src)]>, TB, XS;
+ def WRFSBASE64 : RI<0xAE, MRM2r, (outs), (ins GR64:$src),
+ "wrfsbase{q}\t$src",
+ [(int_x86_wrfsbase_64 GR64:$src)]>, TB, XS;
+ def WRGSBASE : I<0xAE, MRM3r, (outs), (ins GR32:$src),
+ "wrgsbase{l}\t$src",
+ [(int_x86_wrgsbase_32 GR32:$src)]>, TB, XS;
+ def WRGSBASE64 : RI<0xAE, MRM3r, (outs), (ins GR64:$src),
+ "wrgsbase{q}\t$src",
+ [(int_x86_wrgsbase_64 GR64:$src)]>, TB, XS;
}
+
+//===----------------------------------------------------------------------===//
+// INVPCID Instruction
+def INVPCID32 : I<0x82, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
+ "invpcid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In32BitMode]>;
+def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
+ "invpcid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In64BitMode]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrVMX.td b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
index 09a7a7d0c..6a8f0c8 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrVMX.td
@@ -1,10 +1,10 @@
-//===- X86InstrVMX.td - VMX Instruction Set Extension ------*- tablegen -*-===//
-//
+//===-- X86InstrVMX.td - VMX Instruction Set Extension -----*- tablegen -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file describes the instructions that make up the Intel VMX instruction
@@ -17,18 +17,24 @@
// 66 0F 38 80
def INVEPT32 : I<0x80, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8;
+ "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In32BitMode]>;
def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8;
+ "invept {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8;
+ "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In32BitMode]>;
def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8;
+ "invvpid {$src2, $src1|$src1, $src2}", []>, OpSize, T8,
+ Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
"vmclear\t$vmcs", []>, OpSize, TB;
+// OF 01 D4
+def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, TB;
// 0F 01 C2
def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
// 0F 01 C3
@@ -38,23 +44,23 @@ def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
"vmptrst\t$vmcs", []>, TB;
def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In32BitMode]>;
// 0F 01 C4
def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
- "vmxon\t{$vmxon}", []>, XS;
+ "vmxon\t$vmxon", []>, XS;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrXOP.td b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
new file mode 100644
index 0000000..65bbcb5
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86InstrXOP.td
@@ -0,0 +1,307 @@
+//===-- X86InstrXOP.td - XOP Instruction Set ---------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes XOP (eXtended OPerations)
+//
+//===----------------------------------------------------------------------===//
+
+multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
+ def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int VR128:$src))]>, VEX;
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>;
+ defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, memopv2i64>;
+ defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, memopv2i64>;
+ defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, memopv2i64>;
+ defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, memopv2i64>;
+ defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, memopv2i64>;
+ defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, memopv2i64>;
+ defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, memopv2i64>;
+ defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, memopv2i64>;
+ defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, memopv2i64>;
+ defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, memopv2i64>;
+ defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, memopv2i64>;
+ defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, memopv2i64>;
+ defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, memopv2i64>;
+ defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, memopv2i64>;
+ defm VFRCZPS : xop2op<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>;
+ defm VFRCZPD : xop2op<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, memopv2f64>;
+}
+
+// Scalar load 2 addr operand instructions
+let Constraints = "$src1 = $dst" in {
+multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
+ Operand memop, ComplexPattern mem_cpat> {
+ def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1,
+ VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, VEX;
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1,
+ memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (Int VR128:$src1,
+ (bitconvert mem_cpat:$src2)))]>, VEX;
+}
+
+} // Constraints = "$src1 = $dst"
+
+let isAsmParserOnly = 1 in {
+ defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
+ ssmem, sse_load_f32>;
+ defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
+ sdmem, sse_load_f64>;
+}
+
+
+multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
+ PatFrag memop> {
+ def rrY : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (Int VR256:$src))]>, VEX, VEX_L;
+ def rmY : IXOP<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256,
+ memopv8f32>;
+ defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256,
+ memopv4f64>;
+}
+
+multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, VEX_4VOp3;
+ def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>,
+ VEX_4V, VEX_W;
+ def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (Int (bitconvert (memopv2i64 addr:$src1)), VR128:$src2))]>,
+ VEX_4VOp3;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
+ defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
+ defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
+ defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
+ defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
+ defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
+ defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
+ defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
+ defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
+ defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
+ defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
+ defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
+}
+
+multiclass xop3opimm<bits<8> opc, string OpcodeStr> {
+ let neverHasSideEffects = 1 in {
+ def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, VEX;
+ let mayLoad = 1 in
+ def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, VEX;
+ }
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPROTW : xop3opimm<0xC1, "vprotw">;
+ defm VPROTQ : xop3opimm<0xC3, "vprotq">;
+ defm VPROTD : xop3opimm<0xC2, "vprotd">;
+ defm VPROTB : xop3opimm<0xC0, "vprotb">;
+}
+
+// Instruction where second source can be memory, but third must be register
+multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ def rr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_4V, VEX_I8IMM;
+ def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ VR128:$src3))]>, VEX_4V, VEX_I8IMM;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>;
+ defm VPMADCSSWD : xop4opm2<0xA6, "vpmadcsswd", int_x86_xop_vpmadcsswd>;
+ defm VPMACSWW : xop4opm2<0x95, "vpmacsww", int_x86_xop_vpmacsww>;
+ defm VPMACSWD : xop4opm2<0x96, "vpmacswd", int_x86_xop_vpmacswd>;
+ defm VPMACSSWW : xop4opm2<0x85, "vpmacssww", int_x86_xop_vpmacssww>;
+ defm VPMACSSWD : xop4opm2<0x86, "vpmacsswd", int_x86_xop_vpmacsswd>;
+ defm VPMACSSDQL : xop4opm2<0x87, "vpmacssdql", int_x86_xop_vpmacssdql>;
+ defm VPMACSSDQH : xop4opm2<0x8F, "vpmacssdqh", int_x86_xop_vpmacssdqh>;
+ defm VPMACSSDD : xop4opm2<0x8E, "vpmacssdd", int_x86_xop_vpmacssdd>;
+ defm VPMACSDQL : xop4opm2<0x97, "vpmacsdql", int_x86_xop_vpmacsdql>;
+ defm VPMACSDQH : xop4opm2<0x9F, "vpmacsdqh", int_x86_xop_vpmacsdqh>;
+ defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>;
+}
+
+// Instruction where second source can be memory, third must be imm8
+multiclass xop4opimm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType VT> {
+ def ri : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (VT (OpNode VR128:$src1, VR128:$src2, imm:$src3)))]>, VEX_4V;
+ def mi : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (VT (OpNode VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ imm:$src3)))]>, VEX_4V;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPCOMB : xop4opimm<0xCC, "vpcomb", X86vpcom, v16i8>;
+ defm VPCOMW : xop4opimm<0xCD, "vpcomw", X86vpcom, v8i16>;
+ defm VPCOMD : xop4opimm<0xCE, "vpcomd", X86vpcom, v4i32>;
+ defm VPCOMQ : xop4opimm<0xCF, "vpcomq", X86vpcom, v2i64>;
+ defm VPCOMUB : xop4opimm<0xEC, "vpcomub", X86vpcomu, v16i8>;
+ defm VPCOMUW : xop4opimm<0xED, "vpcomuw", X86vpcomu, v8i16>;
+ defm VPCOMUD : xop4opimm<0xEE, "vpcomud", X86vpcomu, v4i32>;
+ defm VPCOMUQ : xop4opimm<0xEF, "vpcomuq", X86vpcomu, v2i64>;
+}
+
+// Instruction where either second or third source can be memory
+multiclass xop4op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ def rr : IXOPi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>,
+ VEX_4V, VEX_I8IMM;
+ def rm : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, VR128:$src2,
+ (bitconvert (memopv2i64 addr:$src3))))]>,
+ VEX_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def mr : IXOPi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)),
+ VR128:$src3))]>,
+ VEX_4V, VEX_I8IMM;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>;
+ defm VPCMOV : xop4op<0xA2, "vpcmov", int_x86_xop_vpcmov>;
+}
+
+multiclass xop4op256<bits<8> opc, string OpcodeStr, Intrinsic Int> {
+ def rrY : IXOPi8<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>,
+ VEX_4V, VEX_I8IMM;
+ def rmY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int VR256:$src1, VR256:$src2,
+ (bitconvert (memopv4i64 addr:$src3))))]>,
+ VEX_4V, VEX_I8IMM, VEX_W, MemOp4;
+ def mrY : IXOPi8<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, VR256:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR256:$dst,
+ (Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)),
+ VR256:$src3))]>,
+ VEX_4V, VEX_I8IMM;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>;
+}
+
+multiclass xop5op<bits<8> opc, string OpcodeStr, Intrinsic Int128,
+ Intrinsic Int256, PatFrag ld_128, PatFrag ld_256> {
+ def rr : IXOP5<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, VR128:$src2, VR128:$src3, imm:$src4))]>;
+ def rm : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, f128mem:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, VR128:$src2, (ld_128 addr:$src3), imm:$src4))]>,
+ VEX_W, MemOp4;
+ def mr : IXOP5<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, VR128:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR128:$dst,
+ (Int128 VR128:$src1, (ld_128 addr:$src2), VR128:$src3, imm:$src4))]>;
+ def rrY : IXOP5<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, VR256:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>;
+ def rmY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, f256mem:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, VR256:$src2, (ld_256 addr:$src3), imm:$src4))]>,
+ VEX_W, MemOp4;
+ def mrY : IXOP5<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, VR256:$src3, i8imm:$src4),
+ !strconcat(OpcodeStr,
+ "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
+ [(set VR256:$dst,
+ (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>;
+}
+
+defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd,
+ int_x86_xop_vpermil2pd_256, memopv2f64, memopv4f64>;
+defm VPERMIL2PS : xop5op<0x48, "vpermil2ps", int_x86_xop_vpermil2ps,
+ int_x86_xop_vpermil2ps_256, memopv4f32, memopv8f32>;
+
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
index 3f88fa6..0168d12 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -300,7 +300,10 @@ extern "C" {
SIZE(X86CompilationCallback_SSE)
);
# else
- void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);
+ // the following function is called only from this translation unit,
+ // unless we are under 64bit Windows with MSC, where there is
+ // no support for inline assembly
+ static void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);
_declspec(naked) void X86CompilationCallback(void) {
__asm {
@@ -424,7 +427,9 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
TargetJITInfo::LazyResolverFn
X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
+ TsanIgnoreWritesBegin();
JITCompilerFunction = F;
+ TsanIgnoreWritesEnd();
#if defined (X86_32_JIT) && !defined (_MSC_VER)
if (Subtarget->hasSSE1())
@@ -569,6 +574,5 @@ char* X86JITInfo::allocateThreadLocalMemory(size_t size) {
return TLSOffset;
#else
llvm_unreachable("Cannot allocate thread local storage on this arch!");
- return 0;
#endif
}
diff --git a/contrib/llvm/lib/Target/X86/X86JITInfo.h b/contrib/llvm/lib/Target/X86/X86JITInfo.h
index 238420c..c76d3cc 100644
--- a/contrib/llvm/lib/Target/X86/X86JITInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86JITInfo.h
@@ -1,4 +1,4 @@
-//===- X86JITInfo.h - X86 implementation of the JIT interface --*- C++ -*-===//
+//===-- X86JITInfo.h - X86 implementation of the JIT interface --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
index 50bc14d..b578e8d 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -12,10 +12,11 @@
//
//===----------------------------------------------------------------------===//
-#include "InstPrinter/X86ATTInstPrinter.h"
#include "X86MCInstLower.h"
#include "X86AsmPrinter.h"
#include "X86COFFMachineModuleInfo.h"
+#include "InstPrinter/X86ATTInstPrinter.h"
+#include "llvm/Type.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -26,7 +27,6 @@
#include "llvm/Target/Mangler.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Type.h"
using namespace llvm;
X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf,
@@ -154,6 +154,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
Ctx),
Ctx);
break;
+ case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break;
case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
@@ -230,7 +231,8 @@ static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
/// a short fixed-register form.
static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
unsigned ImmOp = Inst.getNumOperands() - 1;
- assert(Inst.getOperand(0).isReg() && Inst.getOperand(ImmOp).isImm() &&
+ assert(Inst.getOperand(0).isReg() &&
+ (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&
((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
Inst.getNumOperands() == 2) && "Unexpected instruction!");
@@ -335,6 +337,9 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
MCOp = LowerSymbolOperand(MO,
AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
break;
+ case MachineOperand::MO_RegisterMask:
+ // Ignore call clobbers.
+ continue;
}
OutMI.addOperand(MCOp);
@@ -368,14 +373,12 @@ ReSimplify:
case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
- case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
- case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
- case X86::VFsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
- case X86::VFsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break;
case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break;
case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break;
case X86::AVX_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDrr); break;
+ case X86::AVX2_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDYrr);break;
+ case X86::AVX2_SET0: LowerUnaryToTwoAddr(OutMI, X86::VPXORYrr); break;
case X86::MOV16r0:
LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0
@@ -386,14 +389,12 @@ ReSimplify:
LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
break;
- // TAILJMPr64, [WIN]CALL64r, [WIN]CALL64pcrel32 - These instructions have
- // register inputs modeled as normal uses instead of implicit uses. As such,
- // truncate off all but the first operand (the callee). FIXME: Change isel.
+ // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register
+ // inputs modeled as normal uses instead of implicit uses. As such, truncate
+ // off all but the first operand (the callee). FIXME: Change isel.
case X86::TAILJMPr64:
case X86::CALL64r:
- case X86::CALL64pcrel32:
- case X86::WINCALL64r:
- case X86::WINCALL64pcrel32: {
+ case X86::CALL64pcrel32: {
unsigned Opcode = OutMI.getOpcode();
MCOperand Saved = OutMI.getOperand(0);
OutMI = MCInst();
@@ -415,7 +416,7 @@ ReSimplify:
case X86::TAILJMPd64: {
unsigned Opcode;
switch (OutMI.getOpcode()) {
- default: assert(0 && "Invalid opcode");
+ default: llvm_unreachable("Invalid opcode");
case X86::TAILJMPr: Opcode = X86::JMP32r; break;
case X86::TAILJMPd:
case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
@@ -527,6 +528,22 @@ ReSimplify:
case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
+
+ case X86::MORESTACK_RET:
+ OutMI.setOpcode(X86::RET);
+ break;
+
+ case X86::MORESTACK_RET_RESTORE_R10: {
+ MCInst retInst;
+
+ OutMI.setOpcode(X86::MOV64rr);
+ OutMI.addOperand(MCOperand::CreateReg(X86::R10));
+ OutMI.addOperand(MCOperand::CreateReg(X86::RAX));
+
+ retInst.setOpcode(X86::RET);
+ AsmPrinter.OutStreamer.EmitInstruction(retInst);
+ break;
+ }
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86MCInstLower.h b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
index 0210072..40df3db 100644
--- a/contrib/llvm/lib/Target/X86/X86MCInstLower.h
+++ b/contrib/llvm/lib/Target/X86/X86MCInstLower.h
@@ -1,4 +1,4 @@
-//===-- X86MCInstLower.h - Lower MachineInstr to MCInst -------------------===//
+//===-- X86MCInstLower.h - Lower MachineInstr to MCInst ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.cpp b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.cpp
new file mode 100644
index 0000000..568dc22
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- X86MachineFuctionInfo.cpp - X86 machine function info -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86MachineFunctionInfo.h"
+
+using namespace llvm;
+
+void X86MachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
index b0bb313..c747109 100644
--- a/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86MachineFunctionInfo.h
@@ -1,10 +1,10 @@
-//====- X86MachineFuctionInfo.h - X86 machine function info -----*- C++ -*-===//
-//
+//===-- X86MachineFuctionInfo.h - X86 machine function info -----*- C++ -*-===//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file declares X86-specific per-machine-function information.
@@ -21,6 +21,8 @@ namespace llvm {
/// X86MachineFunctionInfo - This class is derived from MachineFunction and
/// contains private X86 target-specific information for each MachineFunction.
class X86MachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
+
/// ForceFramePointer - True if the function is required to use of frame
/// pointer for reasons other than it containing dynamic allocation or
/// that FP eliminatation is turned off. For example, Cygwin main function
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index c1ac9f3..b56025f 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
+//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#include "X86.h"
#include "X86RegisterInfo.h"
+#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
@@ -127,121 +127,13 @@ const TargetRegisterClass *
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B,
unsigned SubIdx) const {
- switch (SubIdx) {
- default: return 0;
- case X86::sub_8bit:
- if (B == &X86::GR8RegClass) {
- if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
- return A;
- } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
- A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass ||
- A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_ABCDRegClass;
- else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
- A == &X86::GR32_NOREXRegClass ||
- A == &X86::GR32_NOSPRegClass)
- return &X86::GR32_ABCDRegClass;
- else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
- A == &X86::GR16_NOREXRegClass)
- return &X86::GR16_ABCDRegClass;
- } else if (B == &X86::GR8_NOREXRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_NOREXRegClass;
- else if (A == &X86::GR64_ABCDRegClass)
- return &X86::GR64_ABCDRegClass;
- else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
- A == &X86::GR32_NOSPRegClass)
- return &X86::GR32_NOREXRegClass;
- else if (A == &X86::GR32_ABCDRegClass)
- return &X86::GR32_ABCDRegClass;
- else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
- return &X86::GR16_NOREXRegClass;
- else if (A == &X86::GR16_ABCDRegClass)
- return &X86::GR16_ABCDRegClass;
- }
- break;
- case X86::sub_8bit_hi:
- if (B->hasSubClassEq(&X86::GR8_ABCD_HRegClass))
- switch (A->getSize()) {
- case 2: return getCommonSubClass(A, &X86::GR16_ABCDRegClass);
- case 4: return getCommonSubClass(A, &X86::GR32_ABCDRegClass);
- case 8: return getCommonSubClass(A, &X86::GR64_ABCDRegClass);
- default: return 0;
- }
- break;
- case X86::sub_16bit:
- if (B == &X86::GR16RegClass) {
- if (A->getSize() == 4 || A->getSize() == 8)
- return A;
- } else if (B == &X86::GR16_ABCDRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
- A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass ||
- A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_ABCDRegClass;
- else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
- A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
- return &X86::GR32_ABCDRegClass;
- } else if (B == &X86::GR16_NOREXRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_NOREXRegClass;
- else if (A == &X86::GR64_ABCDRegClass)
- return &X86::GR64_ABCDRegClass;
- else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
- A == &X86::GR32_NOSPRegClass)
- return &X86::GR32_NOREXRegClass;
- else if (A == &X86::GR32_ABCDRegClass)
- return &X86::GR64_ABCDRegClass;
- }
- break;
- case X86::sub_32bit:
- if (B == &X86::GR32RegClass) {
- if (A->getSize() == 8)
- return A;
- } else if (B == &X86::GR32_NOSPRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
- return &X86::GR64_NOSPRegClass;
- if (A->getSize() == 8)
- return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
- } else if (B == &X86::GR32_ABCDRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
- A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass ||
- A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_ABCDRegClass;
- } else if (B == &X86::GR32_NOREXRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
- return &X86::GR64_NOREXRegClass;
- else if (A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_NOREX_NOSPRegClass;
- else if (A == &X86::GR64_ABCDRegClass)
- return &X86::GR64_ABCDRegClass;
- } else if (B == &X86::GR32_NOREX_NOSPRegClass) {
- if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
- A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
- return &X86::GR64_NOREX_NOSPRegClass;
- else if (A == &X86::GR64_ABCDRegClass)
- return &X86::GR64_ABCDRegClass;
- }
- break;
- case X86::sub_ss:
- if (B == &X86::FR32RegClass)
- return A;
- break;
- case X86::sub_sd:
- if (B == &X86::FR64RegClass)
- return A;
- break;
- case X86::sub_xmm:
- if (B == &X86::VR128RegClass)
- return A;
- break;
+ // The sub_8bit sub-register index is more constrained in 32-bit mode.
+ if (!Is64Bit && SubIdx == X86::sub_8bit) {
+ A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
+ if (!A)
+ return 0;
}
- return 0;
+ return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
}
const TargetRegisterClass*
@@ -334,7 +226,7 @@ X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}
-const unsigned *
+const uint16_t *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
bool callsEHReturn = false;
bool ghcCall = false;
@@ -345,45 +237,29 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
}
- static const unsigned GhcCalleeSavedRegs[] = {
- 0
- };
-
- static const unsigned CalleeSavedRegs32Bit[] = {
- X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
- };
-
- static const unsigned CalleeSavedRegs32EHRet[] = {
- X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
- };
-
- static const unsigned CalleeSavedRegs64Bit[] = {
- X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
- };
-
- static const unsigned CalleeSavedRegs64EHRet[] = {
- X86::RAX, X86::RDX, X86::RBX, X86::R12,
- X86::R13, X86::R14, X86::R15, X86::RBP, 0
- };
-
- static const unsigned CalleeSavedRegsWin64[] = {
- X86::RBX, X86::RBP, X86::RDI, X86::RSI,
- X86::R12, X86::R13, X86::R14, X86::R15,
- X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
- X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
- X86::XMM14, X86::XMM15, 0
- };
-
- if (ghcCall) {
- return GhcCalleeSavedRegs;
- } else if (Is64Bit) {
+ if (ghcCall)
+ return CSR_Ghc_SaveList;
+ if (Is64Bit) {
if (IsWin64)
- return CalleeSavedRegsWin64;
- else
- return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
- } else {
- return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
+ return CSR_Win64_SaveList;
+ if (callsEHReturn)
+ return CSR_64EHRet_SaveList;
+ return CSR_64_SaveList;
}
+ if (callsEHReturn)
+ return CSR_32EHRet_SaveList;
+ return CSR_32_SaveList;
+}
+
+const uint32_t*
+X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ if (CC == CallingConv::GHC)
+ return CSR_Ghc_RegMask;
+ if (!Is64Bit)
+ return CSR_32_RegMask;
+ if (IsWin64)
+ return CSR_Win64_RegMask;
+ return CSR_64_RegMask;
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
@@ -428,16 +304,16 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
for (unsigned n = 0; n != 8; ++n) {
// R8, R9, ...
- const unsigned GPR64[] = {
+ static const uint16_t GPR64[] = {
X86::R8, X86::R9, X86::R10, X86::R11,
X86::R12, X86::R13, X86::R14, X86::R15
};
- for (const unsigned *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI)
+ for (const uint16_t *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI)
Reserved.set(Reg);
// XMM8, XMM9, ...
assert(X86::XMM15 == X86::XMM8+7);
- for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
+ for (const uint16_t *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
++AI)
Reserved.set(Reg);
}
@@ -452,7 +328,7 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return (RealignStack &&
+ return (MF.getTarget().Options.RealignStack &&
!MFI->hasVarSizedObjects());
}
@@ -583,7 +459,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
MachineBasicBlock::iterator B = MBB.begin();
- while (I != B && !llvm::prior(I)->getDesc().isCall())
+ while (I != B && !llvm::prior(I)->isCall())
--I;
MBB.insert(I, New);
}
@@ -650,12 +526,10 @@ unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
unsigned X86RegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
- return 0;
}
unsigned X86RegisterInfo::getEHHandlerRegister() const {
llvm_unreachable("What is the exception handler register");
- return 0;
}
namespace llvm {
@@ -665,7 +539,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
case MVT::i8:
if (High) {
switch (Reg) {
- default: return 0;
+ default: return getX86SubSuperRegister(Reg, MVT::i64, High);
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AH;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
@@ -785,6 +659,22 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
return X86::R15D;
}
case MVT::i64:
+ // For 64-bit mode if we've requested a "high" register and the
+ // Q or r constraints we want one of these high registers or
+ // just the register name otherwise.
+ if (High) {
+ switch (Reg) {
+ case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
+ return X86::SI;
+ case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
+ return X86::DI;
+ case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
+ return X86::BP;
+ case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
+ return X86::SP;
+ // Fallthrough.
+ }
+ }
switch (Reg) {
default: return Reg;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
@@ -821,8 +711,6 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
return X86::R15;
}
}
-
- return Reg;
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index 7d39c68..bee0393 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -1,4 +1,4 @@
-//===- X86RegisterInfo.h - X86 Register Information Impl --------*- C++ -*-===//
+//===-- X86RegisterInfo.h - X86 Register Information Impl -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -95,7 +95,8 @@ public:
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee-save registers on this target.
- const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+ const uint32_t *getCallPreservedMask(CallingConv::ID) const;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index 9a7db36..5263a49 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -70,7 +70,7 @@ let Namespace = "X86" in {
def BH : Register<"bh">;
// 16-bit registers
- let SubRegIndices = [sub_8bit, sub_8bit_hi] in {
+ let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in {
def AX : RegisterWithSubRegs<"ax", [AL,AH]>;
def DX : RegisterWithSubRegs<"dx", [DL,DH]>;
def CX : RegisterWithSubRegs<"cx", [CL,CH]>;
diff --git a/contrib/llvm/lib/Target/X86/X86Relocations.h b/contrib/llvm/lib/Target/X86/X86Relocations.h
index 990962d..857becf 100644
--- a/contrib/llvm/lib/Target/X86/X86Relocations.h
+++ b/contrib/llvm/lib/Target/X86/X86Relocations.h
@@ -1,4 +1,4 @@
-//===- X86Relocations.h - X86 Code Relocations ------------------*- C++ -*-===//
+//===-- X86Relocations.h - X86 Code Relocations -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/X86/X86Schedule.td b/contrib/llvm/lib/Target/X86/X86Schedule.td
new file mode 100644
index 0000000..17f4efd
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86Schedule.td
@@ -0,0 +1,273 @@
+//===-- X86Schedule.td - X86 Scheduling Definitions --------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction Itinerary classes used for X86
+def IIC_DEFAULT : InstrItinClass;
+def IIC_ALU_MEM : InstrItinClass;
+def IIC_ALU_NONMEM : InstrItinClass;
+def IIC_LEA : InstrItinClass;
+def IIC_LEA_16 : InstrItinClass;
+def IIC_MUL8 : InstrItinClass;
+def IIC_MUL16_MEM : InstrItinClass;
+def IIC_MUL16_REG : InstrItinClass;
+def IIC_MUL32_MEM : InstrItinClass;
+def IIC_MUL32_REG : InstrItinClass;
+def IIC_MUL64 : InstrItinClass;
+// imul by al, ax, eax, tax
+def IIC_IMUL8 : InstrItinClass;
+def IIC_IMUL16_MEM : InstrItinClass;
+def IIC_IMUL16_REG : InstrItinClass;
+def IIC_IMUL32_MEM : InstrItinClass;
+def IIC_IMUL32_REG : InstrItinClass;
+def IIC_IMUL64 : InstrItinClass;
+// imul reg by reg|mem
+def IIC_IMUL16_RM : InstrItinClass;
+def IIC_IMUL16_RR : InstrItinClass;
+def IIC_IMUL32_RM : InstrItinClass;
+def IIC_IMUL32_RR : InstrItinClass;
+def IIC_IMUL64_RM : InstrItinClass;
+def IIC_IMUL64_RR : InstrItinClass;
+// imul reg = reg/mem * imm
+def IIC_IMUL16_RMI : InstrItinClass;
+def IIC_IMUL16_RRI : InstrItinClass;
+def IIC_IMUL32_RMI : InstrItinClass;
+def IIC_IMUL32_RRI : InstrItinClass;
+def IIC_IMUL64_RMI : InstrItinClass;
+def IIC_IMUL64_RRI : InstrItinClass;
+// div
+def IIC_DIV8_MEM : InstrItinClass;
+def IIC_DIV8_REG : InstrItinClass;
+def IIC_DIV16 : InstrItinClass;
+def IIC_DIV32 : InstrItinClass;
+def IIC_DIV64 : InstrItinClass;
+// idiv
+def IIC_IDIV8 : InstrItinClass;
+def IIC_IDIV16 : InstrItinClass;
+def IIC_IDIV32 : InstrItinClass;
+def IIC_IDIV64 : InstrItinClass;
+// neg/not/inc/dec
+def IIC_UNARY_REG : InstrItinClass;
+def IIC_UNARY_MEM : InstrItinClass;
+// add/sub/and/or/xor/adc/sbc/cmp/test
+def IIC_BIN_MEM : InstrItinClass;
+def IIC_BIN_NONMEM : InstrItinClass;
+// shift/rotate
+def IIC_SR : InstrItinClass;
+// shift double
+def IIC_SHD16_REG_IM : InstrItinClass;
+def IIC_SHD16_REG_CL : InstrItinClass;
+def IIC_SHD16_MEM_IM : InstrItinClass;
+def IIC_SHD16_MEM_CL : InstrItinClass;
+def IIC_SHD32_REG_IM : InstrItinClass;
+def IIC_SHD32_REG_CL : InstrItinClass;
+def IIC_SHD32_MEM_IM : InstrItinClass;
+def IIC_SHD32_MEM_CL : InstrItinClass;
+def IIC_SHD64_REG_IM : InstrItinClass;
+def IIC_SHD64_REG_CL : InstrItinClass;
+def IIC_SHD64_MEM_IM : InstrItinClass;
+def IIC_SHD64_MEM_CL : InstrItinClass;
+// cmov
+def IIC_CMOV16_RM : InstrItinClass;
+def IIC_CMOV16_RR : InstrItinClass;
+def IIC_CMOV32_RM : InstrItinClass;
+def IIC_CMOV32_RR : InstrItinClass;
+def IIC_CMOV64_RM : InstrItinClass;
+def IIC_CMOV64_RR : InstrItinClass;
+// set
+def IIC_SET_R : InstrItinClass;
+def IIC_SET_M : InstrItinClass;
+// jmp/jcc/jcxz
+def IIC_Jcc : InstrItinClass;
+def IIC_JCXZ : InstrItinClass;
+def IIC_JMP_REL : InstrItinClass;
+def IIC_JMP_REG : InstrItinClass;
+def IIC_JMP_MEM : InstrItinClass;
+def IIC_JMP_FAR_MEM : InstrItinClass;
+def IIC_JMP_FAR_PTR : InstrItinClass;
+// loop
+def IIC_LOOP : InstrItinClass;
+def IIC_LOOPE : InstrItinClass;
+def IIC_LOOPNE : InstrItinClass;
+// call
+def IIC_CALL_RI : InstrItinClass;
+def IIC_CALL_MEM : InstrItinClass;
+def IIC_CALL_FAR_MEM : InstrItinClass;
+def IIC_CALL_FAR_PTR : InstrItinClass;
+// ret
+def IIC_RET : InstrItinClass;
+def IIC_RET_IMM : InstrItinClass;
+//sign extension movs
+def IIC_MOVSX : InstrItinClass;
+def IIC_MOVSX_R16_R8 : InstrItinClass;
+def IIC_MOVSX_R16_M8 : InstrItinClass;
+def IIC_MOVSX_R16_R16 : InstrItinClass;
+def IIC_MOVSX_R32_R32 : InstrItinClass;
+//zero extension movs
+def IIC_MOVZX : InstrItinClass;
+def IIC_MOVZX_R16_R8 : InstrItinClass;
+def IIC_MOVZX_R16_M8 : InstrItinClass;
+
+def IIC_REP_MOVS : InstrItinClass;
+def IIC_REP_STOS : InstrItinClass;
+
+// SSE scalar/parallel binary operations
+def IIC_SSE_ALU_F32S_RR : InstrItinClass;
+def IIC_SSE_ALU_F32S_RM : InstrItinClass;
+def IIC_SSE_ALU_F64S_RR : InstrItinClass;
+def IIC_SSE_ALU_F64S_RM : InstrItinClass;
+def IIC_SSE_MUL_F32S_RR : InstrItinClass;
+def IIC_SSE_MUL_F32S_RM : InstrItinClass;
+def IIC_SSE_MUL_F64S_RR : InstrItinClass;
+def IIC_SSE_MUL_F64S_RM : InstrItinClass;
+def IIC_SSE_DIV_F32S_RR : InstrItinClass;
+def IIC_SSE_DIV_F32S_RM : InstrItinClass;
+def IIC_SSE_DIV_F64S_RR : InstrItinClass;
+def IIC_SSE_DIV_F64S_RM : InstrItinClass;
+def IIC_SSE_ALU_F32P_RR : InstrItinClass;
+def IIC_SSE_ALU_F32P_RM : InstrItinClass;
+def IIC_SSE_ALU_F64P_RR : InstrItinClass;
+def IIC_SSE_ALU_F64P_RM : InstrItinClass;
+def IIC_SSE_MUL_F32P_RR : InstrItinClass;
+def IIC_SSE_MUL_F32P_RM : InstrItinClass;
+def IIC_SSE_MUL_F64P_RR : InstrItinClass;
+def IIC_SSE_MUL_F64P_RM : InstrItinClass;
+def IIC_SSE_DIV_F32P_RR : InstrItinClass;
+def IIC_SSE_DIV_F32P_RM : InstrItinClass;
+def IIC_SSE_DIV_F64P_RR : InstrItinClass;
+def IIC_SSE_DIV_F64P_RM : InstrItinClass;
+
+def IIC_SSE_COMIS_RR : InstrItinClass;
+def IIC_SSE_COMIS_RM : InstrItinClass;
+
+def IIC_SSE_HADDSUB_RR : InstrItinClass;
+def IIC_SSE_HADDSUB_RM : InstrItinClass;
+
+def IIC_SSE_BIT_P_RR : InstrItinClass;
+def IIC_SSE_BIT_P_RM : InstrItinClass;
+
+def IIC_SSE_INTALU_P_RR : InstrItinClass;
+def IIC_SSE_INTALU_P_RM : InstrItinClass;
+def IIC_SSE_INTALUQ_P_RR : InstrItinClass;
+def IIC_SSE_INTALUQ_P_RM : InstrItinClass;
+
+def IIC_SSE_INTMUL_P_RR : InstrItinClass;
+def IIC_SSE_INTMUL_P_RM : InstrItinClass;
+
+def IIC_SSE_INTSH_P_RR : InstrItinClass;
+def IIC_SSE_INTSH_P_RM : InstrItinClass;
+def IIC_SSE_INTSH_P_RI : InstrItinClass;
+
+def IIC_SSE_CMPP_RR : InstrItinClass;
+def IIC_SSE_CMPP_RM : InstrItinClass;
+
+def IIC_SSE_SHUFP : InstrItinClass;
+def IIC_SSE_PSHUF : InstrItinClass;
+
+def IIC_SSE_UNPCK : InstrItinClass;
+
+def IIC_SSE_MOVMSK : InstrItinClass;
+def IIC_SSE_MASKMOV : InstrItinClass;
+
+def IIC_SSE_PEXTRW : InstrItinClass;
+def IIC_SSE_PINSRW : InstrItinClass;
+
+def IIC_SSE_PABS_RR : InstrItinClass;
+def IIC_SSE_PABS_RM : InstrItinClass;
+
+def IIC_SSE_SQRTP_RR : InstrItinClass;
+def IIC_SSE_SQRTP_RM : InstrItinClass;
+def IIC_SSE_SQRTS_RR : InstrItinClass;
+def IIC_SSE_SQRTS_RM : InstrItinClass;
+
+def IIC_SSE_RCPP_RR : InstrItinClass;
+def IIC_SSE_RCPP_RM : InstrItinClass;
+def IIC_SSE_RCPS_RR : InstrItinClass;
+def IIC_SSE_RCPS_RM : InstrItinClass;
+
+def IIC_SSE_MOV_S_RR : InstrItinClass;
+def IIC_SSE_MOV_S_RM : InstrItinClass;
+def IIC_SSE_MOV_S_MR : InstrItinClass;
+
+def IIC_SSE_MOVA_P_RR : InstrItinClass;
+def IIC_SSE_MOVA_P_RM : InstrItinClass;
+def IIC_SSE_MOVA_P_MR : InstrItinClass;
+
+def IIC_SSE_MOVU_P_RR : InstrItinClass;
+def IIC_SSE_MOVU_P_RM : InstrItinClass;
+def IIC_SSE_MOVU_P_MR : InstrItinClass;
+
+def IIC_SSE_MOVDQ : InstrItinClass;
+def IIC_SSE_MOVD_ToGP : InstrItinClass;
+def IIC_SSE_MOVQ_RR : InstrItinClass;
+
+def IIC_SSE_MOV_LH : InstrItinClass;
+
+def IIC_SSE_LDDQU : InstrItinClass;
+
+def IIC_SSE_MOVNT : InstrItinClass;
+
+def IIC_SSE_PHADDSUBD_RR : InstrItinClass;
+def IIC_SSE_PHADDSUBD_RM : InstrItinClass;
+def IIC_SSE_PHADDSUBSW_RR : InstrItinClass;
+def IIC_SSE_PHADDSUBSW_RM : InstrItinClass;
+def IIC_SSE_PHADDSUBW_RR : InstrItinClass;
+def IIC_SSE_PHADDSUBW_RM : InstrItinClass;
+def IIC_SSE_PSHUFB_RR : InstrItinClass;
+def IIC_SSE_PSHUFB_RM : InstrItinClass;
+def IIC_SSE_PSIGN_RR : InstrItinClass;
+def IIC_SSE_PSIGN_RM : InstrItinClass;
+
+def IIC_SSE_PMADD : InstrItinClass;
+def IIC_SSE_PMULHRSW : InstrItinClass;
+def IIC_SSE_PALIGNR : InstrItinClass;
+def IIC_SSE_MWAIT : InstrItinClass;
+def IIC_SSE_MONITOR : InstrItinClass;
+
+def IIC_SSE_PREFETCH : InstrItinClass;
+def IIC_SSE_PAUSE : InstrItinClass;
+def IIC_SSE_LFENCE : InstrItinClass;
+def IIC_SSE_MFENCE : InstrItinClass;
+def IIC_SSE_SFENCE : InstrItinClass;
+def IIC_SSE_LDMXCSR : InstrItinClass;
+def IIC_SSE_STMXCSR : InstrItinClass;
+
+def IIC_SSE_CVT_PD_RR : InstrItinClass;
+def IIC_SSE_CVT_PD_RM : InstrItinClass;
+def IIC_SSE_CVT_PS_RR : InstrItinClass;
+def IIC_SSE_CVT_PS_RM : InstrItinClass;
+def IIC_SSE_CVT_PI2PS_RR : InstrItinClass;
+def IIC_SSE_CVT_PI2PS_RM : InstrItinClass;
+def IIC_SSE_CVT_Scalar_RR : InstrItinClass;
+def IIC_SSE_CVT_Scalar_RM : InstrItinClass;
+def IIC_SSE_CVT_SS2SI32_RM : InstrItinClass;
+def IIC_SSE_CVT_SS2SI32_RR : InstrItinClass;
+def IIC_SSE_CVT_SS2SI64_RM : InstrItinClass;
+def IIC_SSE_CVT_SS2SI64_RR : InstrItinClass;
+def IIC_SSE_CVT_SD2SI_RM : InstrItinClass;
+def IIC_SSE_CVT_SD2SI_RR : InstrItinClass;
+
+def IIC_CMPX_LOCK : InstrItinClass;
+def IIC_CMPX_LOCK_8 : InstrItinClass;
+def IIC_CMPX_LOCK_8B : InstrItinClass;
+def IIC_CMPX_LOCK_16B : InstrItinClass;
+
+def IIC_XADD_LOCK_MEM : InstrItinClass;
+def IIC_XADD_LOCK_MEM8 : InstrItinClass;
+
+
+//===----------------------------------------------------------------------===//
+// Processor instruction itineraries.
+
+def GenericItineraries : ProcessorItineraries<[], [], []>;
+
+include "X86ScheduleAtom.td"
+
+
+
diff --git a/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
new file mode 100644
index 0000000..77d4e56
--- /dev/null
+++ b/contrib/llvm/lib/Target/X86/X86ScheduleAtom.td
@@ -0,0 +1,305 @@
+//===- X86ScheduleAtom.td - X86 Atom Scheduling Definitions -*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the itinerary class data for the Intel Atom (Bonnell)
+// processors.
+//
+//===----------------------------------------------------------------------===//
+
+//
+// Scheduling information derived from the "Intel 64 and IA32 Architectures
+// Optimization Reference Manual", Chapter 13, Section 4.
+// Functional Units
+// Port 0
+def Port0 : FuncUnit; // ALU: ALU0, shift/rotate, load/store
+ // SIMD/FP: SIMD ALU, Shuffle,SIMD/FP multiply, divide
+def Port1 : FuncUnit; // ALU: ALU1, bit processing, jump, and LEA
+ // SIMD/FP: SIMD ALU, FP Adder
+
+def AtomItineraries : ProcessorItineraries<
+ [ Port0, Port1 ],
+ [], [
+ // P0 only
+ // InstrItinData<class, [InstrStage<N, [P0]>] >,
+ // P0 or P1
+ // InstrItinData<class, [InstrStage<N, [P0, P1]>] >,
+ // P0 and P1
+ // InstrItinData<class, [InstrStage<N, [P0], 0>, InstrStage<N, [P1]>] >,
+ //
+ // Default is 1 cycle, port0 or port1
+ InstrItinData<IIC_DEFAULT, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_ALU_MEM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_ALU_NONMEM, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_LEA, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_LEA_16, [InstrStage<2, [Port0, Port1]>] >,
+ // mul
+ InstrItinData<IIC_MUL8, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_MUL16_MEM, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_MUL16_REG, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_MUL32_MEM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_MUL32_REG, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_MUL64, [InstrStage<12, [Port0, Port1]>] >,
+ // imul by al, ax, eax, rax
+ InstrItinData<IIC_IMUL8, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL16_MEM, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL16_REG, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL32_MEM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL32_REG, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL64, [InstrStage<12, [Port0, Port1]>] >,
+ // imul reg by reg|mem
+ InstrItinData<IIC_IMUL16_RM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL16_RR, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL32_RM, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_IMUL32_RR, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_IMUL64_RM, [InstrStage<12, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL64_RR, [InstrStage<12, [Port0, Port1]>] >,
+ // imul reg = reg/mem * imm
+ InstrItinData<IIC_IMUL16_RRI, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL32_RRI, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_IMUL64_RRI, [InstrStage<14, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL16_RMI, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_IMUL32_RMI, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_IMUL64_RMI, [InstrStage<14, [Port0, Port1]>] >,
+ // idiv
+ InstrItinData<IIC_IDIV8, [InstrStage<62, [Port0, Port1]>] >,
+ InstrItinData<IIC_IDIV16, [InstrStage<62, [Port0, Port1]>] >,
+ InstrItinData<IIC_IDIV32, [InstrStage<62, [Port0, Port1]>] >,
+ InstrItinData<IIC_IDIV64, [InstrStage<130, [Port0, Port1]>] >,
+ // div
+ InstrItinData<IIC_DIV8_REG, [InstrStage<50, [Port0, Port1]>] >,
+ InstrItinData<IIC_DIV8_MEM, [InstrStage<68, [Port0, Port1]>] >,
+ InstrItinData<IIC_DIV16, [InstrStage<50, [Port0, Port1]>] >,
+ InstrItinData<IIC_DIV32, [InstrStage<50, [Port0, Port1]>] >,
+ InstrItinData<IIC_DIV64, [InstrStage<130, [Port0, Port1]>] >,
+ // neg/not/inc/dec
+ InstrItinData<IIC_UNARY_REG, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_UNARY_MEM, [InstrStage<1, [Port0]>] >,
+ // add/sub/and/or/xor/adc/sbc/cmp/test
+ InstrItinData<IIC_BIN_NONMEM, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_BIN_MEM, [InstrStage<1, [Port0]>] >,
+ // shift/rotate
+ InstrItinData<IIC_SR, [InstrStage<1, [Port0]>] >,
+ // shift double
+ InstrItinData<IIC_SHD16_REG_IM, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD16_REG_CL, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD16_MEM_IM, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD16_MEM_CL, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD32_REG_IM, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD32_REG_CL, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD32_MEM_IM, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD32_MEM_CL, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD64_REG_IM, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD64_REG_CL, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD64_MEM_IM, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SHD64_MEM_CL, [InstrStage<9, [Port0, Port1]>] >,
+ // cmov
+ InstrItinData<IIC_CMOV16_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_CMOV16_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMOV32_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_CMOV32_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMOV64_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_CMOV64_RR, [InstrStage<1, [Port0, Port1]>] >,
+ // set
+ InstrItinData<IIC_SET_M, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SET_R, [InstrStage<1, [Port0, Port1]>] >,
+ // jcc
+ InstrItinData<IIC_Jcc, [InstrStage<1, [Port1]>] >,
+ // jcxz/jecxz/jrcxz
+ InstrItinData<IIC_JCXZ, [InstrStage<4, [Port0, Port1]>] >,
+ // jmp rel
+ InstrItinData<IIC_JMP_REL, [InstrStage<1, [Port1]>] >,
+ // jmp indirect
+ InstrItinData<IIC_JMP_REG, [InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_JMP_MEM, [InstrStage<2, [Port0, Port1]>] >,
+ // jmp far
+ InstrItinData<IIC_JMP_FAR_MEM, [InstrStage<32, [Port0, Port1]>] >,
+ InstrItinData<IIC_JMP_FAR_PTR, [InstrStage<31, [Port0, Port1]>] >,
+ // loop/loope/loopne
+ InstrItinData<IIC_LOOP, [InstrStage<18, [Port0, Port1]>] >,
+ InstrItinData<IIC_LOOPE, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_LOOPNE, [InstrStage<17, [Port0, Port1]>] >,
+ // call - all but reg/imm
+ InstrItinData<IIC_CALL_RI, [InstrStage<1, [Port0], 0>,
+ InstrStage<1, [Port1]>] >,
+ InstrItinData<IIC_CALL_MEM, [InstrStage<15, [Port0, Port1]>] >,
+ InstrItinData<IIC_CALL_FAR_MEM, [InstrStage<40, [Port0, Port1]>] >,
+ InstrItinData<IIC_CALL_FAR_PTR, [InstrStage<39, [Port0, Port1]>] >,
+ //ret
+ InstrItinData<IIC_RET, [InstrStage<79, [Port0, Port1]>] >,
+ InstrItinData<IIC_RET_IMM, [InstrStage<1, [Port0], 0>, InstrStage<1, [Port1]>] >,
+ //sign extension movs
+ InstrItinData<IIC_MOVSX,[InstrStage<1, [Port0] >] >,
+ InstrItinData<IIC_MOVSX_R16_R8, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVSX_R16_M8, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVSX_R16_R16, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVSX_R32_R32, [InstrStage<1, [Port0, Port1]>] >,
+ //zero extension movs
+ InstrItinData<IIC_MOVZX,[InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_MOVZX_R16_R8, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_MOVZX_R16_M8, [InstrStage<3, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_REP_MOVS, [InstrStage<75, [Port0, Port1]>] >,
+ InstrItinData<IIC_REP_STOS, [InstrStage<74, [Port0, Port1]>] >,
+
+ // SSE binary operations
+ // arithmetic fp scalar
+ InstrItinData<IIC_SSE_ALU_F32S_RR, [InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F32S_RM, [InstrStage<5, [Port0], 0>,
+ InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64S_RR, [InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64S_RM, [InstrStage<5, [Port0], 0>,
+ InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_MUL_F32S_RR, [InstrStage<4, [Port0]>] >,
+ InstrItinData<IIC_SSE_MUL_F32S_RM, [InstrStage<4, [Port0]>] >,
+ InstrItinData<IIC_SSE_MUL_F64S_RR, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_MUL_F64S_RM, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_DIV_F32S_RR, [InstrStage<34, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F32S_RM, [InstrStage<34, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64S_RR, [InstrStage<62, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64S_RM, [InstrStage<62, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_COMIS_RR, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_COMIS_RM, [InstrStage<10, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_HADDSUB_RR, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_HADDSUB_RM, [InstrStage<9, [Port0, Port1]>] >,
+
+ // arithmetic fp parallel
+ InstrItinData<IIC_SSE_ALU_F32P_RR, [InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F32P_RM, [InstrStage<5, [Port0], 0>,
+ InstrStage<5, [Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64P_RR, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_ALU_F64P_RM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MUL_F32P_RR, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_MUL_F32P_RM, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_MUL_F64P_RR, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MUL_F64P_RM, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F32P_RR, [InstrStage<70, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F32P_RM, [InstrStage<70, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64P_RR, [InstrStage<125, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_DIV_F64P_RM, [InstrStage<125, [Port0, Port1]>] >,
+
+ // bitwise parallel
+ InstrItinData<IIC_SSE_BIT_P_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_BIT_P_RM, [InstrStage<1, [Port0]>] >,
+
+ // arithmetic int parallel
+ InstrItinData<IIC_SSE_INTALU_P_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_INTALU_P_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_INTALUQ_P_RR, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_INTALUQ_P_RM, [InstrStage<3, [Port0, Port1]>] >,
+
+ // multiply int parallel
+ InstrItinData<IIC_SSE_INTMUL_P_RR, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_INTMUL_P_RM, [InstrStage<5, [Port0]>] >,
+
+ // shift parallel
+ InstrItinData<IIC_SSE_INTSH_P_RR, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_INTSH_P_RM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_INTSH_P_RI, [InstrStage<1, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_CMPP_RR, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CMPP_RM, [InstrStage<7, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_SHUFP, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PSHUF, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_UNPCK, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_SQRTP_RR, [InstrStage<13, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTP_RM, [InstrStage<14, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTS_RR, [InstrStage<11, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_SQRTS_RM, [InstrStage<12, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_RCPP_RR, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_RCPP_RM, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_RCPS_RR, [InstrStage<4, [Port0]>] >,
+ InstrItinData<IIC_SSE_RCPS_RM, [InstrStage<4, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_MOVMSK, [InstrStage<3, [Port0]>] >,
+ InstrItinData<IIC_SSE_MASKMOV, [InstrStage<2, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_PEXTRW, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PINSRW, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_PABS_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PABS_RM, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_MOV_S_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MOV_S_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_MOV_S_MR, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_MOVA_P_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MOVA_P_RM, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_MOVA_P_MR, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_MOVU_P_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MOVU_P_RM, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MOVU_P_MR, [InstrStage<2, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_MOV_LH, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_LDDQU, [InstrStage<3, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_MOVDQ, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_MOVD_ToGP, [InstrStage<3, [Port0]>] >,
+ InstrItinData<IIC_SSE_MOVQ_RR, [InstrStage<1, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_MOVNT, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_PREFETCH, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_PAUSE, [InstrStage<17, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_LFENCE, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MFENCE, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_SFENCE, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_LDMXCSR, [InstrStage<5, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_STMXCSR, [InstrStage<15, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_SSE_PHADDSUBD_RR, [InstrStage<3, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBD_RM, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBSW_RR, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBSW_RM, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBW_RR, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PHADDSUBW_RM, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PSHUFB_RR, [InstrStage<4, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PSHUFB_RM, [InstrStage<5, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PSIGN_RR, [InstrStage<1, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_PSIGN_RM, [InstrStage<1, [Port0]>] >,
+
+ InstrItinData<IIC_SSE_PMADD, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_PMULHRSW, [InstrStage<5, [Port0]>] >,
+ InstrItinData<IIC_SSE_PALIGNR, [InstrStage<1, [Port0]>] >,
+ InstrItinData<IIC_SSE_MWAIT, [InstrStage<46, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_MONITOR, [InstrStage<45, [Port0, Port1]>] >,
+
+ // conversions
+ // to/from PD ...
+ InstrItinData<IIC_SSE_CVT_PD_RR, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_PD_RM, [InstrStage<8, [Port0, Port1]>] >,
+ // to/from PS except to/from PD and PS2PI
+ InstrItinData<IIC_SSE_CVT_PS_RR, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_PS_RM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_Scalar_RR, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_Scalar_RM, [InstrStage<7, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI32_RR, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI32_RM, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI64_RR, [InstrStage<9, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SS2SI64_RM, [InstrStage<10, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SD2SI_RR, [InstrStage<8, [Port0, Port1]>] >,
+ InstrItinData<IIC_SSE_CVT_SD2SI_RM, [InstrStage<9, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_CMPX_LOCK, [InstrStage<14, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_8, [InstrStage<6, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_8B, [InstrStage<18, [Port0, Port1]>] >,
+ InstrItinData<IIC_CMPX_LOCK_16B, [InstrStage<22, [Port0, Port1]>] >,
+
+ InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<2, [Port0, Port1]>] >,
+ InstrItinData<IIC_XADD_LOCK_MEM, [InstrStage<3, [Port0, Port1]>] >
+ ]>;
+
diff --git a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 6406bce..9a04e35 100644
--- a/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -65,7 +65,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
std::pair<SDValue,SDValue> CallResult =
TLI.LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
false, false, false, false,
- 0, CallingConv::C, false, /*isReturnValueUsed=*/false,
+ 0, CallingConv::C, /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/false,
DAG.getExternalSymbol(bzeroEntry, IntPtr), Args,
DAG, dl);
return CallResult.second;
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 7064dd0..452dd7e 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -21,7 +21,6 @@
#include "llvm/Support/Host.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/SmallVector.h"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
@@ -177,16 +176,18 @@ unsigned X86Subtarget::getSpecialAddressLatency() const {
void X86Subtarget::AutoDetectSubtargetFeatures() {
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
+ unsigned MaxLevel;
union {
unsigned u[3];
char c[12];
} text;
-
- if (X86_MC::GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1))
+
+ if (X86_MC::GetCpuIDAndInfo(0, &MaxLevel, text.u+0, text.u+2, text.u+1) ||
+ MaxLevel < 1)
return;
X86_MC::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
-
+
if ((EDX >> 15) & 1) { HasCMov = true; ToggleFeature(X86::FeatureCMOV); }
if ((EDX >> 23) & 1) { X86SSELevel = MMX; ToggleFeature(X86::FeatureMMX); }
if ((EDX >> 25) & 1) { X86SSELevel = SSE1; ToggleFeature(X86::FeatureSSE1); }
@@ -196,7 +197,7 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);}
if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);}
// FIXME: AVX codegen support is not ready.
- //if ((ECX >> 28) & 1) { HasAVX = true; ToggleFeature(X86::FeatureAVX); }
+ //if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); }
bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0;
bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0;
@@ -244,28 +245,69 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
IsBTMemSlow = true;
ToggleFeature(X86::FeatureSlowBTMem);
}
+
// If it's Nehalem, unaligned memory access is fast.
+ // FIXME: Nehalem is family 6. Also include Westmere and later processors?
if (Family == 15 && Model == 26) {
IsUAMemFast = true;
ToggleFeature(X86::FeatureFastUAMem);
}
- X86_MC::GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
- if ((EDX >> 29) & 0x1) {
- HasX86_64 = true;
- ToggleFeature(X86::Feature64Bit);
- }
- if ((ECX >> 5) & 0x1) {
- HasLZCNT = true;
- ToggleFeature(X86::FeatureLZCNT);
+ // Set processor type. Currently only Atom is detected.
+ if (Family == 6 && Model == 28) {
+ X86ProcFamily = IntelAtom;
+ ToggleFeature(X86::FeatureLeaForSP);
}
- if (IsAMD && ((ECX >> 6) & 0x1)) {
- HasSSE4A = true;
- ToggleFeature(X86::FeatureSSE4A);
+
+ unsigned MaxExtLevel;
+ X86_MC::GetCpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
+
+ if (MaxExtLevel >= 0x80000001) {
+ X86_MC::GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
+ if ((EDX >> 29) & 0x1) {
+ HasX86_64 = true;
+ ToggleFeature(X86::Feature64Bit);
+ }
+ if ((ECX >> 5) & 0x1) {
+ HasLZCNT = true;
+ ToggleFeature(X86::FeatureLZCNT);
+ }
+ if (IsAMD) {
+ if ((ECX >> 6) & 0x1) {
+ HasSSE4A = true;
+ ToggleFeature(X86::FeatureSSE4A);
+ }
+ if ((ECX >> 11) & 0x1) {
+ HasXOP = true;
+ ToggleFeature(X86::FeatureXOP);
+ }
+ if ((ECX >> 16) & 0x1) {
+ HasFMA4 = true;
+ ToggleFeature(X86::FeatureFMA4);
+ }
+ }
}
- if (IsAMD && ((ECX >> 16) & 0x1)) {
- HasFMA4 = true;
- ToggleFeature(X86::FeatureFMA4);
+ }
+
+ if (IsIntel && MaxLevel >= 7) {
+ if (!X86_MC::GetCpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX)) {
+ if (EBX & 0x1) {
+ HasFSGSBase = true;
+ ToggleFeature(X86::FeatureFSGSBase);
+ }
+ if ((EBX >> 3) & 0x1) {
+ HasBMI = true;
+ ToggleFeature(X86::FeatureBMI);
+ }
+ // FIXME: AVX2 codegen support is not ready.
+ //if ((EBX >> 5) & 0x1) {
+ // X86SSELevel = AVX2;
+ // ToggleFeature(X86::FeatureAVX2);
+ //}
+ if ((EBX >> 8) & 0x1) {
+ HasBMI2 = true;
+ ToggleFeature(X86::FeatureBMI2);
+ }
}
}
}
@@ -274,6 +316,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
const std::string &FS,
unsigned StackAlignOverride, bool is64Bit)
: X86GenSubtargetInfo(TT, CPU, FS)
+ , X86ProcFamily(Others)
, PICStyle(PICStyles::None)
, X86SSELevel(NoMMXSSE)
, X863DNowLevel(NoThreeDNow)
@@ -281,31 +324,35 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
, HasX86_64(false)
, HasPOPCNT(false)
, HasSSE4A(false)
- , HasAVX(false)
, HasAES(false)
, HasCLMUL(false)
, HasFMA3(false)
, HasFMA4(false)
+ , HasXOP(false)
, HasMOVBE(false)
, HasRDRAND(false)
, HasF16C(false)
+ , HasFSGSBase(false)
, HasLZCNT(false)
, HasBMI(false)
+ , HasBMI2(false)
, IsBTMemSlow(false)
, IsUAMemFast(false)
, HasVectorUAMem(false)
, HasCmpxchg16b(false)
- , stackAlignment(8)
+ , UseLeaForSP(false)
+ , PostRAScheduler(false)
+ , stackAlignment(4)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
, TargetTriple(TT)
- , In64BitMode(is64Bit)
- , InNaClMode(false) {
+ , In64BitMode(is64Bit) {
// Determine default and user specified characteristics
+ std::string CPUName = CPU;
if (!FS.empty() || !CPU.empty()) {
- std::string CPUName = CPU;
if (CPUName.empty()) {
-#if defined (__x86_64__) || defined(__i386__)
+#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\
+ || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
CPUName = sys::getHostCPUName();
#else
CPUName = "generic";
@@ -325,6 +372,13 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
// If feature string is not empty, parse features string.
ParseSubtargetFeatures(CPUName, FullFS);
} else {
+ if (CPUName.empty()) {
+#if defined (__x86_64__) || defined(__i386__)
+ CPUName = sys::getHostCPUName();
+#else
+ CPUName = "generic";
+#endif
+ }
// Otherwise, use CPUID to auto-detect feature set.
AutoDetectSubtargetFeatures();
@@ -333,7 +387,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
HasX86_64 = true; ToggleFeature(X86::Feature64Bit);
HasCMov = true; ToggleFeature(X86::FeatureCMOV);
- if (!HasAVX && X86SSELevel < SSE2) {
+ if (X86SSELevel < SSE2) {
X86SSELevel = SSE2;
ToggleFeature(X86::FeatureSSE1);
ToggleFeature(X86::FeatureSSE2);
@@ -341,28 +395,22 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
}
}
+ if (X86ProcFamily == IntelAtom) {
+ PostRAScheduler = true;
+ InstrItins = getInstrItineraryForCPU(CPUName);
+ }
+
// It's important to keep the MCSubtargetInfo feature bits in sync with
// target data structure which is shared with MC code emitter, etc.
if (In64BitMode)
ToggleFeature(X86::Mode64Bit);
- if (isTargetNaCl()) {
- InNaClMode = true;
- ToggleFeature(X86::ModeNaCl);
- }
-
- if (HasAVX)
- X86SSELevel = NoMMXSSE;
-
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- if(EnableSegmentedStacks && !isTargetELF())
- report_fatal_error("Segmented stacks are only implemented on ELF.");
-
// Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both
// 32 and 64 bit) and for all 64-bit targets.
if (StackAlignOverride)
@@ -371,3 +419,12 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
isTargetSolaris() || In64BitMode)
stackAlignment = 16;
}
+
+bool X86Subtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const {
+ Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
+ CriticalPathRCs.clear();
+ return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index 3258d3d..7fd832b 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -1,4 +1,4 @@
-//=====---- X86Subtarget.h - Define Subtarget for the X86 -----*- C++ -*--====//
+//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,9 +14,9 @@
#ifndef X86SUBTARGET_H
#define X86SUBTARGET_H
+#include "llvm/CallingConv.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/CallingConv.h"
#include <string>
#define GET_SUBTARGETINFO_HEADER
@@ -42,13 +42,20 @@ enum Style {
class X86Subtarget : public X86GenSubtargetInfo {
protected:
enum X86SSEEnum {
- NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42
+ NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2
};
enum X863DNowEnum {
NoThreeDNow, ThreeDNow, ThreeDNowA
};
+ enum X86ProcFamilyEnum {
+ Others, IntelAtom
+ };
+
+ /// X86ProcFamily - X86 processor family: Intel Atom, and others
+ X86ProcFamilyEnum X86ProcFamily;
+
/// PICStyle - Which PIC style to use
///
PICStyles::Style PICStyle;
@@ -75,9 +82,6 @@ protected:
/// HasSSE4A - True if the processor supports SSE4A instructions.
bool HasSSE4A;
- /// HasAVX - Target has AVX instructions
- bool HasAVX;
-
/// HasAES - Target has AES instructions
bool HasAES;
@@ -90,6 +94,9 @@ protected:
/// HasFMA4 - Target has 4-operand fused multiply-add
bool HasFMA4;
+ /// HasXOP - Target has XOP instructions
+ bool HasXOP;
+
/// HasMOVBE - True if the processor has the MOVBE instruction.
bool HasMOVBE;
@@ -99,12 +106,18 @@ protected:
/// HasF16C - Processor has 16-bit floating point conversion instructions.
bool HasF16C;
+ /// HasFSGSBase - Processor has FS/GS base insturctions.
+ bool HasFSGSBase;
+
/// HasLZCNT - Processor has LZCNT instruction.
bool HasLZCNT;
/// HasBMI - Processor has BMI1 instructions.
bool HasBMI;
+ /// HasBMI2 - Processor has BMI2 instructions.
+ bool HasBMI2;
+
/// IsBTMemSlow - True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
@@ -119,6 +132,13 @@ protected:
/// this is true for most x86-64 chips, but not the first AMD chips.
bool HasCmpxchg16b;
+ /// UseLeaForSP - True if the LEA instruction should be used for adjusting
+ /// the stack pointer. This is an optimization for Intel Atom processors.
+ bool UseLeaForSP;
+
+ /// PostRAScheduler - True if using post-register-allocation scheduler.
+ bool PostRAScheduler;
+
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -129,14 +149,14 @@ protected:
/// TargetTriple - What processor and OS we're targeting.
Triple TargetTriple;
+
+ /// Instruction itineraries for scheduling
+ InstrItineraryData InstrItins;
private:
/// In64BitMode - True if compiling for 64-bit, false for 32-bit.
bool In64BitMode;
- /// InNaClMode - True if compiling for Native Client target.
- bool InNaClMode;
-
public:
/// This constructor initializes the data members to match that
@@ -176,26 +196,31 @@ public:
bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
bool hasSSE41() const { return X86SSELevel >= SSE41; }
bool hasSSE42() const { return X86SSELevel >= SSE42; }
+ bool hasAVX() const { return X86SSELevel >= AVX; }
+ bool hasAVX2() const { return X86SSELevel >= AVX2; }
bool hasSSE4A() const { return HasSSE4A; }
bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
bool hasPOPCNT() const { return HasPOPCNT; }
- bool hasAVX() const { return HasAVX; }
- bool hasXMM() const { return hasSSE1() || hasAVX(); }
- bool hasXMMInt() const { return hasSSE2() || hasAVX(); }
bool hasAES() const { return HasAES; }
bool hasCLMUL() const { return HasCLMUL; }
bool hasFMA3() const { return HasFMA3; }
bool hasFMA4() const { return HasFMA4; }
+ bool hasXOP() const { return HasXOP; }
bool hasMOVBE() const { return HasMOVBE; }
bool hasRDRAND() const { return HasRDRAND; }
bool hasF16C() const { return HasF16C; }
+ bool hasFSGSBase() const { return HasFSGSBase; }
bool hasLZCNT() const { return HasLZCNT; }
bool hasBMI() const { return HasBMI; }
+ bool hasBMI2() const { return HasBMI2; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
+ bool useLeaForSP() const { return UseLeaForSP; }
+
+ bool isAtom() const { return X86ProcFamily == IntelAtom; }
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -209,38 +234,28 @@ public:
// ELF is a reasonably sane default and the only other X86 targets we
// support are Darwin and Windows. Just use "not those".
- bool isTargetELF() const {
- return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing();
- }
+ bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
bool isTargetNaCl() const {
return TargetTriple.getOS() == Triple::NativeClient;
}
bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
-
bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; }
bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
- bool isTargetCygMing() const {
- return isTargetMingw() || isTargetCygwin();
- }
-
- /// isTargetCOFF - Return true if this is any COFF/Windows target variant.
- bool isTargetCOFF() const {
- return isTargetMingw() || isTargetCygwin() || isTargetWindows();
- }
+ bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
+ bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
+ bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); }
bool isTargetWin64() const {
// FIXME: x86_64-cygwin has not been released yet.
- return In64BitMode && (isTargetCygMing() || isTargetWindows());
- }
-
- bool isTargetEnvMacho() const {
- return isTargetDarwin() || (TargetTriple.getEnvironment() == Triple::MachO);
+ return In64BitMode && TargetTriple.isOSWindows();
}
bool isTargetWin32() const {
+ // FIXME: Cygwin is included for isTargetWin64 -- should it be included
+ // here too?
return !In64BitMode && (isTargetMingw() || isTargetWindows());
}
@@ -286,6 +301,15 @@ public:
/// indicating the number of scheduling cycles of backscheduling that
/// should be attempted.
unsigned getSpecialAddressLatency() const;
+
+ /// enablePostRAScheduler - run for Atom optimization.
+ bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode& Mode,
+ RegClassVector& CriticalPathRCs) const;
+
+ /// getInstrItins = Return the instruction itineraries based on the
+ /// subtarget selection.
+ const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index 15c6c4e..f4b7a62 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -28,11 +28,14 @@ extern "C" void LLVMInitializeX86Target() {
RegisterTargetMachine<X86_64TargetMachine> Y(TheX86_64Target);
}
+void X86_32TargetMachine::anchor() { }
X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : X86TargetMachine(T, TT, CPU, FS, RM, CM, false),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false),
DataLayout(getSubtargetImpl()->isTargetDarwin() ?
"e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-"
"n8:16:32-S128" :
@@ -48,11 +51,14 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
JITInfo(*this) {
}
+void X86_64TargetMachine::anchor() { }
X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : X86TargetMachine(T, TT, CPU, FS, RM, CM, true),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true),
DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
"n8:16:32:64-S128"),
InstrInfo(*this),
@@ -65,12 +71,15 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
///
X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64Bit)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
- Subtarget(TT, CPU, FS, StackAlignmentOverride, is64Bit),
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, Options.StackAlignmentOverride, is64Bit),
FrameLowering(*this, Subtarget),
- ELFWriterInfo(is64Bit, true) {
+ ELFWriterInfo(is64Bit, true),
+ InstrItins(Subtarget.getInstrItineraryData()){
// Determine the PICStyle based on the target selected.
if (getRelocationModel() == Reloc::Static) {
// Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
@@ -92,8 +101,8 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
}
// default to hard float ABI
- if (FloatABIType == FloatABI::Default)
- FloatABIType = FloatABI::Hard;
+ if (Options.FloatABIType == FloatABI::Default)
+ this->Options.FloatABIType = FloatABI::Hard;
}
//===----------------------------------------------------------------------===//
@@ -102,46 +111,67 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT,
static cl::opt<bool>
UseVZeroUpper("x86-use-vzeroupper",
cl::desc("Minimize AVX to SSE transition penalty"),
- cl::init(false));
+ cl::init(true));
//===----------------------------------------------------------------------===//
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
-bool X86TargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+namespace {
+/// X86 Code Generator Pass Configuration Options.
+class X86PassConfig : public TargetPassConfig {
+public:
+ X86PassConfig(X86TargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ X86TargetMachine &getX86TargetMachine() const {
+ return getTM<X86TargetMachine>();
+ }
+
+ const X86Subtarget &getX86Subtarget() const {
+ return *getX86TargetMachine().getSubtargetImpl();
+ }
+
+ virtual bool addInstSelector();
+ virtual bool addPreRegAlloc();
+ virtual bool addPostRegAlloc();
+ virtual bool addPreEmitPass();
+};
+} // namespace
+
+TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new X86PassConfig(this, PM);
+}
+
+bool X86PassConfig::addInstSelector() {
// Install an instruction selector.
- PM.add(createX86ISelDag(*this, OptLevel));
+ PM.add(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
// For 32-bit, prepend instructions to set the "global base reg" for PIC.
- if (!Subtarget.is64Bit())
+ if (!getX86Subtarget().is64Bit())
PM.add(createGlobalBaseRegPass());
return false;
}
-bool X86TargetMachine::addPreRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool X86PassConfig::addPreRegAlloc() {
PM.add(createX86MaxStackAlignmentHeuristicPass());
return false; // -print-machineinstr shouldn't print after this.
}
-bool X86TargetMachine::addPostRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool X86PassConfig::addPostRegAlloc() {
PM.add(createX86FloatingPointStackifierPass());
return true; // -print-machineinstr should print after this.
}
-bool X86TargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
+bool X86PassConfig::addPreEmitPass() {
bool ShouldPrint = false;
- if (OptLevel != CodeGenOpt::None &&
- (Subtarget.hasSSE2() || Subtarget.hasAVX())) {
+ if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) {
PM.add(createExecutionDependencyFixPass(&X86::VR128RegClass));
ShouldPrint = true;
}
- if (Subtarget.hasAVX() && UseVZeroUpper) {
+ if (getX86Subtarget().hasAVX() && UseVZeroUpper) {
PM.add(createX86IssueVZeroUpperPass());
ShouldPrint = true;
}
@@ -150,7 +180,6 @@ bool X86TargetMachine::addPreEmitPass(PassManagerBase &PM,
}
bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE) {
PM.add(createX86JITCodeEmitterPass(*this, JCE));
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.h b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
index d1569aa..8e935af 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.h
@@ -27,19 +27,20 @@
#include "llvm/Target/TargetFrameLowering.h"
namespace llvm {
-
-class formatted_raw_ostream;
+
class StringRef;
class X86TargetMachine : public LLVMTargetMachine {
- X86Subtarget Subtarget;
- X86FrameLowering FrameLowering;
- X86ELFWriterInfo ELFWriterInfo;
+ X86Subtarget Subtarget;
+ X86FrameLowering FrameLowering;
+ X86ELFWriterInfo ELFWriterInfo;
+ InstrItineraryData InstrItins;
public:
- X86TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
+ X86TargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL,
bool is64Bit);
virtual const X86InstrInfo *getInstrInfo() const {
@@ -55,7 +56,7 @@ public:
virtual const X86TargetLowering *getTargetLowering() const {
llvm_unreachable("getTargetLowering not implemented");
}
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
llvm_unreachable("getSelectionDAGInfo not implemented");
}
virtual const X86RegisterInfo *getRegisterInfo() const {
@@ -64,19 +65,21 @@ public:
virtual const X86ELFWriterInfo *getELFWriterInfo() const {
return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
}
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return &InstrItins;
+ }
// Set up the pass pipeline.
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPostRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ virtual bool addCodeEmitter(PassManagerBase &PM,
JITCodeEmitter &JCE);
};
/// X86_32TargetMachine - X86 32-bit target machine.
///
class X86_32TargetMachine : public X86TargetMachine {
+ virtual void anchor();
const TargetData DataLayout; // Calculates type size & alignment
X86InstrInfo InstrInfo;
X86SelectionDAGInfo TSInfo;
@@ -84,13 +87,14 @@ class X86_32TargetMachine : public X86TargetMachine {
X86JITInfo JITInfo;
public:
X86_32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const TargetData *getTargetData() const { return &DataLayout; }
virtual const X86TargetLowering *getTargetLowering() const {
return &TLInfo;
}
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
return &TSInfo;
}
virtual const X86InstrInfo *getInstrInfo() const {
@@ -104,6 +108,7 @@ public:
/// X86_64TargetMachine - X86 64-bit target machine.
///
class X86_64TargetMachine : public X86TargetMachine {
+ virtual void anchor();
const TargetData DataLayout; // Calculates type size & alignment
X86InstrInfo InstrInfo;
X86SelectionDAGInfo TSInfo;
@@ -111,13 +116,14 @@ class X86_64TargetMachine : public X86TargetMachine {
X86JITInfo JITInfo;
public:
X86_64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const TargetData *getTargetData() const { return &DataLayout; }
virtual const X86TargetLowering *getTargetLowering() const {
return &TLInfo;
}
- virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
return &TSInfo;
}
virtual const X86InstrInfo *getInstrInfo() const {
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
index 991f322..718f35e 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -1,4 +1,4 @@
-//===-- llvm/Target/X86/X86TargetObjectFile.cpp - X86 Object Info ---------===//
+//===-- X86TargetObjectFile.cpp - X86 Object Info -------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,6 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Dwarf.h"
using namespace llvm;
using namespace dwarf;
diff --git a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
index d7adf27..a02a368 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
+++ b/contrib/llvm/lib/Target/X86/X86TargetObjectFile.h
@@ -1,4 +1,4 @@
-//===-- llvm/Target/X86/X86TargetObjectFile.h - X86 Object Info -*- C++ -*-===//
+//===-- X86TargetObjectFile.h - X86 Object Info -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,7 +15,6 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
namespace llvm {
- class X86TargetMachine;
/// X8664_MachoTargetObjectFile - This TLOF implementation is used for Darwin
/// x86-64.
diff --git a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 3958494..2fd78a7 100644
--- a/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/contrib/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -14,14 +14,16 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "x86-codegen"
+#define DEBUG_TYPE "x86-vzeroupper"
#include "X86.h"
#include "X86InstrInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/GlobalValue.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
@@ -41,6 +43,60 @@ namespace {
private:
const TargetInstrInfo *TII; // Machine instruction info.
MachineBasicBlock *MBB; // Current basic block
+
+ // Any YMM register live-in to this function?
+ bool FnHasLiveInYmm;
+
+ // BBState - Contains the state of each MBB: unknown, clean, dirty
+ SmallVector<uint8_t, 8> BBState;
+
+ // BBSolved - Keep track of all MBB which had been already analyzed
+ // and there is no further processing required.
+ BitVector BBSolved;
+
+ // Machine Basic Blocks are classified according this pass:
+ //
+ // ST_UNKNOWN - The MBB state is unknown, meaning from the entry state
+ // until the MBB exit there isn't a instruction using YMM to change
+ // the state to dirty, or one of the incoming predecessors is unknown
+ // and there's not a dirty predecessor between them.
+ //
+ // ST_CLEAN - No YMM usage in the end of the MBB. A MBB could have
+ // instructions using YMM and be marked ST_CLEAN, as long as the state
+ // is cleaned by a vzeroupper before any call.
+ //
+ // ST_DIRTY - Any MBB ending with a YMM usage not cleaned up by a
+ // vzeroupper instruction.
+ //
+ // ST_INIT - Placeholder for an empty state set
+ //
+ enum {
+ ST_UNKNOWN = 0,
+ ST_CLEAN = 1,
+ ST_DIRTY = 2,
+ ST_INIT = 3
+ };
+
+ // computeState - Given two states, compute the resulting state, in
+ // the following way
+ //
+ // 1) One dirty state yields another dirty state
+ // 2) All states must be clean for the result to be clean
+ // 3) If none above and one unknown, the result state is also unknown
+ //
+ unsigned computeState(unsigned PrevState, unsigned CurState) {
+ if (PrevState == ST_INIT)
+ return CurState;
+
+ if (PrevState == ST_DIRTY || CurState == ST_DIRTY)
+ return ST_DIRTY;
+
+ if (PrevState == ST_CLEAN && CurState == ST_CLEAN)
+ return ST_CLEAN;
+
+ return ST_UNKNOWN;
+ }
+
};
char VZeroUpperInserter::ID = 0;
}
@@ -49,37 +105,82 @@ FunctionPass *llvm::createX86IssueVZeroUpperPass() {
return new VZeroUpperInserter();
}
+static bool isYmmReg(unsigned Reg) {
+ if (Reg >= X86::YMM0 && Reg <= X86::YMM15)
+ return true;
+
+ return false;
+}
+
+static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) {
+ for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(),
+ E = MRI.livein_end(); I != E; ++I)
+ if (isYmmReg(I->first))
+ return true;
+
+ return false;
+}
+
+static bool hasYmmReg(MachineInstr *MI) {
+ for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (MO.isDebug())
+ continue;
+ if (isYmmReg(MO.getReg()))
+ return true;
+ }
+ return false;
+}
+
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// vzero upper instructions before function calls.
bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
- bool Changed = false;
-
- // Process any unreachable blocks in arbitrary order now.
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
- Changed |= processBasicBlock(MF, *BB);
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool EverMadeChange = false;
- return Changed;
-}
+ // Fast check: if the function doesn't use any ymm registers, we don't need
+ // to insert any VZEROUPPER instructions. This is constant-time, so it is
+ // cheap in the common case of no ymm use.
+ bool YMMUsed = false;
+ const TargetRegisterClass *RC = X86::VR256RegisterClass;
+ for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end();
+ i != e; i++) {
+ if (MRI.isPhysRegUsed(*i)) {
+ YMMUsed = true;
+ break;
+ }
+ }
+ if (!YMMUsed)
+ return EverMadeChange;
-static bool isCallToModuleFn(const MachineInstr *MI) {
- assert(MI->getDesc().isCall() && "Isn't a call instruction");
+ // Pre-compute the existence of any live-in YMM registers to this function
+ FnHasLiveInYmm = checkFnHasLiveInYmm(MRI);
- for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ assert(BBState.empty());
+ BBState.resize(MF.getNumBlockIDs(), 0);
+ BBSolved.resize(MF.getNumBlockIDs(), 0);
- if (!MO.isGlobal())
- continue;
+ // Each BB state depends on all predecessors, loop over until everything
+ // converges. (Once we converge, we can implicitly mark everything that is
+ // still ST_UNKNOWN as ST_CLEAN.)
+ while (1) {
+ bool MadeChange = false;
- const GlobalValue *GV = MO.getGlobal();
- GlobalValue::LinkageTypes LT = GV->getLinkage();
- if (GV->isInternalLinkage(LT) || GV->isPrivateLinkage(LT) ||
- (GV->isExternalLinkage(LT) && !GV->isDeclaration()))
- return true;
+ // Process all basic blocks.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ MadeChange |= processBasicBlock(MF, *I);
- return false;
+ // If this iteration over the code changed anything, keep iterating.
+ if (!MadeChange) break;
+ EverMadeChange = true;
}
- return false;
+
+ BBState.clear();
+ BBSolved.clear();
+ return EverMadeChange;
}
/// processBasicBlock - Loop over all of the instructions in the basic block,
@@ -87,19 +188,98 @@ static bool isCallToModuleFn(const MachineInstr *MI) {
bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF,
MachineBasicBlock &BB) {
bool Changed = false;
+ unsigned BBNum = BB.getNumber();
MBB = &BB;
+ // Don't process already solved BBs
+ if (BBSolved[BBNum])
+ return false; // No changes
+
+ // Check the state of all predecessors
+ unsigned EntryState = ST_INIT;
+ for (MachineBasicBlock::const_pred_iterator PI = BB.pred_begin(),
+ PE = BB.pred_end(); PI != PE; ++PI) {
+ EntryState = computeState(EntryState, BBState[(*PI)->getNumber()]);
+ if (EntryState == ST_DIRTY)
+ break;
+ }
+
+
+ // The entry MBB for the function may set the inital state to dirty if
+ // the function receives any YMM incoming arguments
+ if (MBB == MF.begin()) {
+ EntryState = ST_CLEAN;
+ if (FnHasLiveInYmm)
+ EntryState = ST_DIRTY;
+ }
+
+ // The current state is initialized according to the predecessors
+ unsigned CurState = EntryState;
+ bool BBHasCall = false;
+
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
MachineInstr *MI = I;
DebugLoc dl = I->getDebugLoc();
+ bool isControlFlow = MI->isCall() || MI->isReturn();
+
+ // Shortcut: don't need to check regular instructions in dirty state.
+ if (!isControlFlow && CurState == ST_DIRTY)
+ continue;
+
+ if (hasYmmReg(MI)) {
+ // We found a ymm-using instruction; this could be an AVX instruction,
+ // or it could be control flow.
+ CurState = ST_DIRTY;
+ continue;
+ }
- // Insert a vzeroupper instruction before each control transfer
- // to functions outside this module
- if (MI->getDesc().isCall() && !isCallToModuleFn(MI)) {
- BuildMI(*MBB, I, dl, TII->get(X86::VZEROUPPER));
- ++NumVZU;
+ // Check for control-flow out of the current function (which might
+ // indirectly execute SSE instructions).
+ if (!isControlFlow)
+ continue;
+
+ BBHasCall = true;
+
+ // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX
+ // registers. This instruction has zero latency. In addition, the processor
+ // changes back to Clean state, after which execution of Intel SSE
+ // instructions or Intel AVX instructions has no transition penalty. Add
+ // the VZEROUPPER instruction before any function call/return that might
+ // execute SSE code.
+ // FIXME: In some cases, we may want to move the VZEROUPPER into a
+ // predecessor block.
+ if (CurState == ST_DIRTY) {
+ // Only insert the VZEROUPPER in case the entry state isn't unknown.
+ // When unknown, only compute the information within the block to have
+ // it available in the exit if possible, but don't change the block.
+ if (EntryState != ST_UNKNOWN) {
+ BuildMI(*MBB, I, dl, TII->get(X86::VZEROUPPER));
+ ++NumVZU;
+ }
+
+ // After the inserted VZEROUPPER the state becomes clean again, but
+ // other YMM may appear before other subsequent calls or even before
+ // the end of the BB.
+ CurState = ST_CLEAN;
}
}
+ DEBUG(dbgs() << "MBB #" << BBNum
+ << ", current state: " << CurState << '\n');
+
+ // A BB can only be considered solved when we both have done all the
+ // necessary transformations, and have computed the exit state. This happens
+ // in two cases:
+ // 1) We know the entry state: this immediately implies the exit state and
+ // all the necessary transformations.
+ // 2) There are no calls, and and a non-call instruction marks this block:
+ // no transformations are necessary, and we know the exit state.
+ if (EntryState != ST_UNKNOWN || (!BBHasCall && CurState != ST_UNKNOWN))
+ BBSolved[BBNum] = true;
+
+ if (CurState != BBState[BBNum])
+ Changed = true;
+
+ BBState[BBNum] = CurState;
return Changed;
}
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
index 42ab1b3..1cfdbda 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
@@ -8,8 +8,11 @@
//===----------------------------------------------------------------------===//
#include "XCoreMCAsmInfo.h"
+#include "llvm/ADT/StringRef.h"
using namespace llvm;
+void XCoreMCAsmInfo::anchor() { }
+
XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, StringRef TT) {
SupportsDebugInformation = true;
Data16bitsDirective = "\t.short\t";
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
index 8403922..0767775 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
@@ -1,4 +1,4 @@
-//=====-- XCoreMCAsmInfo.h - XCore asm properties -------------*- C++ -*--====//
+//===-- XCoreMCAsmInfo.h - XCore asm properties ----------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,13 +14,14 @@
#ifndef XCORETARGETASMINFO_H
#define XCORETARGETASMINFO_H
-#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
+ class StringRef;
class Target;
class XCoreMCAsmInfo : public MCAsmInfo {
+ virtual void anchor();
public:
explicit XCoreMCAsmInfo(const Target &T, StringRef TT);
};
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index 276e841..bbfdd43 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -1,4 +1,4 @@
-//===-- XCoreMCTargetDesc.cpp - XCore Target Descriptions -------*- C++ -*-===//
+//===-- XCoreMCTargetDesc.cpp - XCore Target Descriptions -----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -61,9 +62,10 @@ static MCAsmInfo *createXCoreMCAsmInfo(const Target &T, StringRef TT) {
}
static MCCodeGenInfo *createXCoreMCCodeGenInfo(StringRef TT, Reloc::Model RM,
- CodeModel::Model CM) {
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
MCCodeGenInfo *X = new MCCodeGenInfo();
- X->InitMCCodeGenInfo(RM, CM);
+ X->InitMCCodeGenInfo(RM, CM, OL);
return X;
}
diff --git a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.h b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.h
index 3cfc3764..a255adb 100644
--- a/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.h
@@ -15,9 +15,7 @@
#define XCOREMCTARGETDESC_H
namespace llvm {
-class MCSubtargetInfo;
class Target;
-class StringRef;
extern Target TheXCoreTarget;
diff --git a/contrib/llvm/lib/Target/XCore/XCore.h b/contrib/llvm/lib/Target/XCore/XCore.h
index b8fb0ca..08f091e 100644
--- a/contrib/llvm/lib/Target/XCore/XCore.h
+++ b/contrib/llvm/lib/Target/XCore/XCore.h
@@ -24,7 +24,8 @@ namespace llvm {
class XCoreTargetMachine;
class formatted_raw_ostream;
- FunctionPass *createXCoreISelDag(XCoreTargetMachine &TM);
+ FunctionPass *createXCoreISelDag(XCoreTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
} // end namespace llvm;
diff --git a/contrib/llvm/lib/Target/XCore/XCore.td b/contrib/llvm/lib/Target/XCore/XCore.td
index 3840189..04a1dd5 100644
--- a/contrib/llvm/lib/Target/XCore/XCore.td
+++ b/contrib/llvm/lib/Target/XCore/XCore.td
@@ -1,4 +1,4 @@
-//===- XCore.td - Describe the XCore Target Machine --------*- tablegen -*-===//
+//===-- XCore.td - Describe the XCore Target Machine -------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
+// This is the top level entry point for the XCore target.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index 7f8b169..50fda58 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -1,4 +1,4 @@
-//===-- XCoreFrameLowering.cpp - Frame info for XCore Target -----*- C++ -*-==//
+//===-- XCoreFrameLowering.cpp - Frame info for XCore Target --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "XCore.h"
#include "XCoreFrameLowering.h"
+#include "XCore.h"
#include "XCoreInstrInfo.h"
#include "XCoreMachineFunctionInfo.h"
#include "llvm/Function.h"
@@ -84,7 +84,8 @@ XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti)
}
bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const {
- return DisableFramePointerElim(MF) || MF.getFrameInfo()->hasVarSizedObjects();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MF.getFrameInfo()->hasVarSizedObjects();
}
void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
@@ -92,8 +93,6 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo *MMI = &MF.getMMI();
- const XCoreRegisterInfo *RegInfo =
- static_cast<const XCoreRegisterInfo*>(MF.getTarget().getRegisterInfo());
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo*>(MF.getTarget().getInstrInfo());
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
@@ -118,7 +117,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
// FIXME could emit multiple instructions.
report_fatal_error("emitPrologue Frame size too big: " + Twine(FrameSize));
}
- bool emitFrameMoves = RegInfo->needsFrameMoves(MF);
+ bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF);
// Do we need to allocate space on the stack?
if (FrameSize) {
diff --git a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
index c591e93..4c51aa5 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreFrameLowering.h
@@ -1,4 +1,4 @@
-//===-- XCoreFrameLowering.h - Frame info for XCore Target -------*- C++ -*-==//
+//===-- XCoreFrameLowering.h - Frame info for XCore Target ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index 4dac1ce..7564fba 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -41,8 +41,8 @@ namespace {
const XCoreSubtarget &Subtarget;
public:
- XCoreDAGToDAGISel(XCoreTargetMachine &TM)
- : SelectionDAGISel(TM),
+ XCoreDAGToDAGISel(XCoreTargetMachine &TM, CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(TM, OptLevel),
Lowering(*TM.getTargetLowering()),
Subtarget(*TM.getSubtargetImpl()) { }
@@ -83,8 +83,9 @@ namespace {
/// createXCoreISelDag - This pass converts a legalized DAG into a
/// XCore-specific DAG, ready for instruction scheduling.
///
-FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM) {
- return new XCoreDAGToDAGISel(TM);
+FunctionPass *llvm::createXCoreISelDag(XCoreTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new XCoreDAGToDAGISel(TM, OptLevel);
}
bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Addr, SDValue &Base,
@@ -120,7 +121,7 @@ bool XCoreDAGToDAGISel::SelectADDRdpii(SDValue Addr, SDValue &Base,
ConstantSDNode *CN = 0;
if ((Addr.getOperand(0).getOpcode() == XCoreISD::DPRelativeWrapper)
&& (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- && (CN->getSExtValue() % 4 == 0)) {
+ && (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
// Constant word offset from a object in the data region
Base = Addr.getOperand(0).getOperand(0);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
@@ -141,7 +142,7 @@ bool XCoreDAGToDAGISel::SelectADDRcpii(SDValue Addr, SDValue &Base,
ConstantSDNode *CN = 0;
if ((Addr.getOperand(0).getOpcode() == XCoreISD::CPRelativeWrapper)
&& (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- && (CN->getSExtValue() % 4 == 0)) {
+ && (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
// Constant word offset from a object in the data region
Base = Addr.getOperand(0).getOperand(0);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 2afe0e3..fdf2b78 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1,4 +1,4 @@
-//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===//
+//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -36,7 +36,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/VectorExtras.h"
using namespace llvm;
const char *XCoreTargetLowering::
@@ -109,6 +108,8 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
@@ -186,7 +187,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
- return SDValue();
}
}
@@ -198,7 +198,6 @@ void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
switch (N->getOpcode()) {
default:
llvm_unreachable("Don't know how to custom expand this!");
- return;
case ISD::ADD:
case ISD::SUB:
Results.push_back(ExpandADDSUB(N, DAG));
@@ -274,9 +273,8 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
}
- if (! GVar) {
+ if (!GVar) {
llvm_unreachable("Thread local object not a GlobalVariable?");
- return SDValue();
}
Type *Ty = cast<PointerType>(GV->getType())->getElementType();
if (!Ty->isSized() || isZeroLengthArray(Ty)) {
@@ -386,6 +384,15 @@ IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase,
Offset = off;
return true;
}
+ // Check for an aligned global variable.
+ if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(*Root)) {
+ const GlobalValue *GV = GA->getGlobal();
+ if (GA->getOffset() == 0 && GV->getAlignment() >= 4) {
+ AlignedBase = Base;
+ Offset = off;
+ return true;
+ }
+ }
return false;
}
@@ -418,7 +425,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
//
return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr,
MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
// Lower to
// ldw low, base[offset >> 2]
@@ -435,9 +442,11 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
- LowAddr, MachinePointerInfo(), false, false, 0);
+ LowAddr, MachinePointerInfo(),
+ false, false, false, 0);
SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
- HighAddr, MachinePointerInfo(), false, false, 0);
+ HighAddr, MachinePointerInfo(),
+ false, false, false, 0);
SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
@@ -478,8 +487,8 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, IntPtrTy, false, false,
- false, false, 0, CallingConv::C, false,
- /*isReturnValueUsed=*/true,
+ false, false, 0, CallingConv::C, /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
Args, DAG, DL);
@@ -540,8 +549,8 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
- false, false, 0, CallingConv::C, false,
- /*isReturnValueUsed=*/true,
+ false, false, 0, CallingConv::C, /*isTailCall=*/false,
+ /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
Args, DAG, dl);
@@ -745,14 +754,14 @@ SDValue XCoreTargetLowering::
LowerVAARG(SDValue Op, SelectionDAG &DAG) const
{
llvm_unreachable("unimplemented");
- // FIX Arguments passed by reference need a extra dereference.
+ // FIXME Arguments passed by reference need a extra dereference.
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
EVT VT = Node->getValueType(0);
SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
Node->getOperand(1), MachinePointerInfo(V),
- false, false, 0);
+ false, false, false, 0);
// Increment the pointer, VAList, to the next vararg
SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
DAG.getConstant(VT.getSizeInBits(),
@@ -762,7 +771,7 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
MachinePointerInfo(V), false, false, 0);
// Load the actual argument out of the pointer VAList
return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
- false, false, 0);
+ false, false, false, 0);
}
SDValue XCoreTargetLowering::
@@ -866,7 +875,7 @@ LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
SDValue
XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -1137,13 +1146,13 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
- false, false, 0));
+ false, false, false, 0));
}
}
if (isVarArg) {
/* Argument registers */
- static const unsigned ArgRegs[] = {
+ static const uint16_t ArgRegs[] = {
XCore::R0, XCore::R1, XCore::R2, XCore::R3
};
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
@@ -1354,8 +1363,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
- if (KnownZero == Mask) {
+ DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ if ((KnownZero & Mask) == Mask) {
SDValue Carry = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
SDValue Ops [] = { Carry, Result };
@@ -1377,8 +1386,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
- if (KnownZero == Mask) {
+ DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ if ((KnownZero & Mask) == Mask) {
SDValue Borrow = N2;
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
DAG.getConstant(0, VT), N2);
@@ -1393,8 +1402,8 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
- if (KnownZero == Mask) {
+ DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ if ((KnownZero & Mask) == Mask) {
SDValue Borrow = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
SDValue Ops [] = { Borrow, Result };
@@ -1512,21 +1521,19 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
}
void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
+ KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD:
case XCoreISD::LSUB:
if (Op.getResNo() == 0) {
// Top bits of carry / borrow are clear.
- KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
- Mask.getBitWidth() - 1);
- KnownZero &= Mask;
+ KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
+ KnownZero.getBitWidth() - 1);
}
break;
}
@@ -1590,8 +1597,6 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
// reg + reg<<2
return AM.Scale == 4 && AM.BaseOffs == 0;
}
-
- return false;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
index d6c5b32..0b63ecd 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -15,9 +15,9 @@
#ifndef XCOREISELLOWERING_H
#define XCOREISELLOWERING_H
+#include "XCore.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
-#include "XCore.h"
namespace llvm {
@@ -160,7 +160,6 @@ namespace llvm {
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
@@ -175,9 +174,8 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
+ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
+ bool isVarArg, bool doesNotRet, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrFormats.td b/contrib/llvm/lib/Target/XCore/XCoreInstrFormats.td
index 8002c99..1963a70 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrFormats.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrFormats.td
@@ -1,4 +1,4 @@
-//===- XCoreInstrFormats.td - XCore Instruction Formats ----*- tablegen -*-===//
+//===-- XCoreInstrFormats.td - XCore Instruction Formats ---*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index a0946a1..0a3008d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -1,4 +1,4 @@
-//===- XCoreInstrInfo.cpp - XCore Instruction Information -------*- C++ -*-===//
+//===-- XCoreInstrInfo.cpp - XCore Instruction Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "XCoreMachineFunctionInfo.h"
#include "XCoreInstrInfo.h"
+#include "XCoreMachineFunctionInfo.h"
#include "XCore.h"
#include "llvm/MC/MCContext.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
index d354802..42eeed8 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -1,4 +1,4 @@
-//===- XCoreInstrInfo.h - XCore Instruction Information ---------*- C++ -*-===//
+//===-- XCoreInstrInfo.h - XCore Instruction Information --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,8 +14,8 @@
#ifndef XCOREINSTRUCTIONINFO_H
#define XCOREINSTRUCTIONINFO_H
-#include "llvm/Target/TargetInstrInfo.h"
#include "XCoreRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
#include "XCoreGenInstrInfo.inc"
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index 4d2e93b..b25a08d 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -1,4 +1,4 @@
-//===- XCoreInstrInfo.td - Target Description for XCore ----*- tablegen -*-===//
+//===-- XCoreInstrInfo.td - Target Description for XCore ---*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
new file mode 100644
index 0000000..7ca0672
--- /dev/null
+++ b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- XCoreMachineFuctionInfo.cpp - XCore machine function info ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "XCoreMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void XCoreFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
index a575a0f..f869fcf 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreMachineFunctionInfo.h
@@ -1,4 +1,4 @@
-//====- XCoreMachineFuctionInfo.h - XCore machine function info -*- C++ -*-===//
+//===-- XCoreMachineFuctionInfo.h - XCore machine function info -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -26,7 +26,7 @@ class Function;
/// XCoreFunctionInfo - This class is derived from MachineFunction private
/// XCore target-specific information for each MachineFunction.
class XCoreFunctionInfo : public MachineFunctionInfo {
-private:
+ virtual void anchor();
bool UsesLR;
int LRSpillSlot;
int FPSpillSlot;
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index 1b78b37..f3b4b4c 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -1,4 +1,4 @@
-//===- XCoreRegisterInfo.cpp - XCore Register Information -------*- C++ -*-===//
+//===-- XCoreRegisterInfo.cpp - XCore Register Information ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,8 @@
#include "XCoreRegisterInfo.h"
#include "XCoreMachineFunctionInfo.h"
#include "XCore.h"
+#include "llvm/Type.h"
+#include "llvm/Function.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -24,8 +26,6 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Type.h"
-#include "llvm/Function.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
@@ -54,28 +54,14 @@ static inline bool isImmU16(unsigned val) {
return val < (1 << 16);
}
-static const unsigned XCore_ArgRegs[] = {
- XCore::R0, XCore::R1, XCore::R2, XCore::R3
-};
-
-const unsigned * XCoreRegisterInfo::getArgRegs(const MachineFunction *MF)
-{
- return XCore_ArgRegs;
-}
-
-unsigned XCoreRegisterInfo::getNumArgRegs(const MachineFunction *MF)
-{
- return array_lengthof(XCore_ArgRegs);
-}
-
bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
return MF.getMMI().hasDebugInfo() ||
MF.getFunction()->needsUnwindTableEntry();
}
-const unsigned* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
+const uint16_t* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
const {
- static const unsigned CalleeSavedRegs[] = {
+ static const uint16_t CalleeSavedRegs[] = {
XCore::R4, XCore::R5, XCore::R6, XCore::R7,
XCore::R8, XCore::R9, XCore::R10, XCore::LR,
0
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index 5c28f39..7391cfd 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -1,4 +1,4 @@
-//===- XCoreRegisterInfo.h - XCore Register Information Impl ----*- C++ -*-===//
+//===-- XCoreRegisterInfo.h - XCore Register Information Impl ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -44,7 +44,7 @@ public:
/// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
+ const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -62,15 +62,6 @@ public:
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
- //! Return the array of argument passing registers
- /*!
- \note The size of this array is returned by getArgRegsSize().
- */
- static const unsigned *getArgRegs(const MachineFunction *MF = 0);
-
- //! Return the size of the argument passing register array
- static unsigned getNumArgRegs(const MachineFunction *MF = 0);
-
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);
};
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
index c354230..9edfda1 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.td
@@ -1,4 +1,4 @@
-//===- XCoreRegisterInfo.td - XCore Register defs ----------*- tablegen -*-===//
+//===-- XCoreRegisterInfo.td - XCore Register defs ---------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
index b4e9927..8cfb770 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.cpp
@@ -1,4 +1,4 @@
-//===- XCoreSubtarget.cpp - XCore Subtarget Information -----------*- C++ -*-=//
+//===-- XCoreSubtarget.cpp - XCore Subtarget Information ------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,6 +21,8 @@
using namespace llvm;
+void XCoreSubtarget::anchor() { }
+
XCoreSubtarget::XCoreSubtarget(const std::string &TT,
const std::string &CPU, const std::string &FS)
: XCoreGenSubtargetInfo(TT, CPU, FS)
diff --git a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
index 7b29fa2..8d0f254 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreSubtarget.h
@@ -1,4 +1,4 @@
-//=====-- XCoreSubtarget.h - Define Subtarget for the XCore -----*- C++ -*--==//
+//===-- XCoreSubtarget.h - Define Subtarget for the XCore -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,7 @@ namespace llvm {
class StringRef;
class XCoreSubtarget : public XCoreGenSubtargetInfo {
+ virtual void anchor();
public:
/// This constructor initializes the data members to match that
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
index fdc5d35..f65297e 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -14,6 +14,7 @@
#include "XCore.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
@@ -21,8 +22,10 @@ using namespace llvm;
///
XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM)
- : LLVMTargetMachine(T, TT, CPU, FS, RM, CM),
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS),
DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
"i16:16:32-i32:32:32-i64:32:32-n32"),
@@ -32,9 +35,27 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
TSInfo(*this) {
}
-bool XCoreTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createXCoreISelDag(*this));
+namespace {
+/// XCore Code Generator Pass Configuration Options.
+class XCorePassConfig : public TargetPassConfig {
+public:
+ XCorePassConfig(XCoreTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ XCoreTargetMachine &getXCoreTargetMachine() const {
+ return getTM<XCoreTargetMachine>();
+ }
+
+ virtual bool addInstSelector();
+};
+} // namespace
+
+TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new XCorePassConfig(this, PM);
+}
+
+bool XCorePassConfig::addInstSelector() {
+ PM.add(createXCoreISelDag(getXCoreTargetMachine(), getOptLevel()));
return false;
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
index 83d09d6d..2546681 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetMachine.h
@@ -14,13 +14,13 @@
#ifndef XCORETARGETMACHINE_H
#define XCORETARGETMACHINE_H
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
#include "XCoreFrameLowering.h"
#include "XCoreSubtarget.h"
#include "XCoreInstrInfo.h"
#include "XCoreISelLowering.h"
#include "XCoreSelectionDAGInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
namespace llvm {
@@ -33,8 +33,9 @@ class XCoreTargetMachine : public LLVMTargetMachine {
XCoreSelectionDAGInfo TSInfo;
public:
XCoreTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- Reloc::Model RM, CodeModel::Model CM);
+ StringRef CPU, StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
virtual const XCoreInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const XCoreFrameLowering *getFrameLowering() const {
@@ -55,7 +56,7 @@ public:
virtual const TargetData *getTargetData() const { return &DataLayout; }
// Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
index 7424c78..27875e7 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreTargetObjectFile.h
@@ -1,4 +1,4 @@
-//===-- llvm/Target/XCoreTargetObjectFile.h - XCore Object Info -*- C++ -*-===//
+//===-- XCoreTargetObjectFile.h - XCore Object Info -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index c3ecb7a..d8fae8a 100644
--- a/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -140,18 +140,24 @@ bool ConstantMerge::runOnModule(Module &M) {
UsedGlobals.count(GV))
continue;
+ // This transformation is legal for weak ODR globals in the sense it
+ // doesn't change semantics, but we really don't want to perform it
+ // anyway; it's likely to pessimize code generation, and some tools
+ // (like the Darwin linker in cases involving CFString) don't expect it.
+ if (GV->isWeakForLinker())
+ continue;
+
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV));
GlobalVariable *&Slot = CMap[Pair];
- // If this is the first constant we find or if the old on is local,
- // replace with the current one. It the current is externally visible
+ // If this is the first constant we find or if the old one is local,
+ // replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCannonical(*GV, *Slot)) {
+ if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
Slot = GV;
- }
}
// Second: identify all globals that can be merged together, filling in
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 4bb6f7a..95aef27 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -74,7 +74,7 @@ namespace {
std::string getDescription() const {
return std::string((IsArg ? "Argument #" : "Return value #"))
- + utostr(Idx) + " of function " + F->getNameStr();
+ + utostr(Idx) + " of function " + F->getName().str();
}
};
diff --git a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 0edf342..f3f6228 100644
--- a/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -27,6 +27,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/ADT/SCCIterator.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/UniqueVector.h"
@@ -225,31 +226,247 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
return MadeChange;
}
+namespace {
+ // For a given pointer Argument, this retains a list of Arguments of functions
+ // in the same SCC that the pointer data flows into. We use this to build an
+ // SCC of the arguments.
+ struct ArgumentGraphNode {
+ Argument *Definition;
+ SmallVector<ArgumentGraphNode*, 4> Uses;
+ };
+
+ class ArgumentGraph {
+ // We store pointers to ArgumentGraphNode objects, so it's important that
+ // that they not move around upon insert.
+ typedef std::map<Argument*, ArgumentGraphNode> ArgumentMapTy;
+
+ ArgumentMapTy ArgumentMap;
+
+ // There is no root node for the argument graph, in fact:
+ // void f(int *x, int *y) { if (...) f(x, y); }
+ // is an example where the graph is disconnected. The SCCIterator requires a
+ // single entry point, so we maintain a fake ("synthetic") root node that
+ // uses every node. Because the graph is directed and nothing points into
+ // the root, it will not participate in any SCCs (except for its own).
+ ArgumentGraphNode SyntheticRoot;
+
+ public:
+ ArgumentGraph() { SyntheticRoot.Definition = 0; }
+
+ typedef SmallVectorImpl<ArgumentGraphNode*>::iterator iterator;
+
+ iterator begin() { return SyntheticRoot.Uses.begin(); }
+ iterator end() { return SyntheticRoot.Uses.end(); }
+ ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
+
+ ArgumentGraphNode *operator[](Argument *A) {
+ ArgumentGraphNode &Node = ArgumentMap[A];
+ Node.Definition = A;
+ SyntheticRoot.Uses.push_back(&Node);
+ return &Node;
+ }
+ };
+
+ // This tracker checks whether callees are in the SCC, and if so it does not
+ // consider that a capture, instead adding it to the "Uses" list and
+ // continuing with the analysis.
+ struct ArgumentUsesTracker : public CaptureTracker {
+ ArgumentUsesTracker(const SmallPtrSet<Function*, 8> &SCCNodes)
+ : Captured(false), SCCNodes(SCCNodes) {}
+
+ void tooManyUses() { Captured = true; }
+
+ bool shouldExplore(Use *U) { return true; }
+
+ bool captured(Use *U) {
+ CallSite CS(U->getUser());
+ if (!CS.getInstruction()) { Captured = true; return true; }
+
+ Function *F = CS.getCalledFunction();
+ if (!F || !SCCNodes.count(F)) { Captured = true; return true; }
+
+ Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ for (CallSite::arg_iterator PI = CS.arg_begin(), PE = CS.arg_end();
+ PI != PE; ++PI, ++AI) {
+ if (AI == AE) {
+ assert(F->isVarArg() && "More params than args in non-varargs call");
+ Captured = true;
+ return true;
+ }
+ if (PI == U) {
+ Uses.push_back(AI);
+ break;
+ }
+ }
+ assert(!Uses.empty() && "Capturing call-site captured nothing?");
+ return false;
+ }
+
+ bool Captured; // True only if certainly captured (used outside our SCC).
+ SmallVector<Argument*, 4> Uses; // Uses within our SCC.
+
+ const SmallPtrSet<Function*, 8> &SCCNodes;
+ };
+}
+
+namespace llvm {
+ template<> struct GraphTraits<ArgumentGraphNode*> {
+ typedef ArgumentGraphNode NodeType;
+ typedef SmallVectorImpl<ArgumentGraphNode*>::iterator ChildIteratorType;
+
+ static inline NodeType *getEntryNode(NodeType *A) { return A; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->Uses.begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->Uses.end();
+ }
+ };
+ template<> struct GraphTraits<ArgumentGraph*>
+ : public GraphTraits<ArgumentGraphNode*> {
+ static NodeType *getEntryNode(ArgumentGraph *AG) {
+ return AG->getEntryNode();
+ }
+ static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
+ return AG->begin();
+ }
+ static ChildIteratorType nodes_end(ArgumentGraph *AG) {
+ return AG->end();
+ }
+ };
+}
+
/// AddNoCaptureAttrs - Deduce nocapture attributes for the SCC.
bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
bool Changed = false;
+ SmallPtrSet<Function*, 8> SCCNodes;
+
+ // Fill SCCNodes with the elements of the SCC. Used for quickly
+ // looking up whether a given CallGraphNode is in this SCC.
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
+ if (F && !F->isDeclaration() && !F->mayBeOverridden())
+ SCCNodes.insert(F);
+ }
+
+ ArgumentGraph AG;
+
// Check each function in turn, determining which pointer arguments are not
// captured.
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
if (F == 0)
- // External node - skip it;
+ // External node - only a problem for arguments that we pass to it.
continue;
// Definitions with weak linkage may be overridden at linktime with
- // something that writes memory, so treat them like declarations.
+ // something that captures pointers, so treat them like declarations.
if (F->isDeclaration() || F->mayBeOverridden())
continue;
+ // Functions that are readonly (or readnone) and nounwind and don't return
+ // a value can't capture arguments. Don't analyze them.
+ if (F->onlyReadsMemory() && F->doesNotThrow() &&
+ F->getReturnType()->isVoidTy()) {
+ for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ }
+ }
+ continue;
+ }
+
for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A!=E; ++A)
- if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr() &&
- !PointerMayBeCaptured(A, true, /*StoreCaptures=*/false)) {
- A->addAttr(Attribute::NoCapture);
+ if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
+ ArgumentUsesTracker Tracker(SCCNodes);
+ PointerMayBeCaptured(A, &Tracker);
+ if (!Tracker.Captured) {
+ if (Tracker.Uses.empty()) {
+ // If it's trivially not captured, mark it nocapture now.
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ } else {
+ // If it's not trivially captured and not trivially not captured,
+ // then it must be calling into another function in our SCC. Save
+ // its particulars for Argument-SCC analysis later.
+ ArgumentGraphNode *Node = AG[A];
+ for (SmallVectorImpl<Argument*>::iterator UI = Tracker.Uses.begin(),
+ UE = Tracker.Uses.end(); UI != UE; ++UI)
+ Node->Uses.push_back(AG[*UI]);
+ }
+ }
+ // Otherwise, it's captured. Don't bother doing SCC analysis on it.
+ }
+ }
+
+ // The graph we've collected is partial because we stopped scanning for
+ // argument uses once we solved the argument trivially. These partial nodes
+ // show up as ArgumentGraphNode objects with an empty Uses list, and for
+ // these nodes the final decision about whether they capture has already been
+ // made. If the definition doesn't have a 'nocapture' attribute by now, it
+ // captures.
+
+ for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG), E = scc_end(&AG);
+ I != E; ++I) {
+ std::vector<ArgumentGraphNode*> &ArgumentSCC = *I;
+ if (ArgumentSCC.size() == 1) {
+ if (!ArgumentSCC[0]->Definition) continue; // synthetic root node
+
+ // eg. "void f(int* x) { if (...) f(x); }"
+ if (ArgumentSCC[0]->Uses.size() == 1 &&
+ ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
+ ArgumentSCC[0]->Definition->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed = true;
}
+ continue;
+ }
+
+ bool SCCCaptured = false;
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ ArgumentGraphNode *Node = *I;
+ if (Node->Uses.empty()) {
+ if (!Node->Definition->hasNoCaptureAttr())
+ SCCCaptured = true;
+ }
+ }
+ if (SCCCaptured) continue;
+
+ SmallPtrSet<Argument*, 8> ArgumentSCCNodes;
+ // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for
+ // quickly looking up whether a given Argument is in this ArgumentSCC.
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E; ++I) {
+ ArgumentSCCNodes.insert((*I)->Definition);
+ }
+
+ for (std::vector<ArgumentGraphNode*>::iterator I = ArgumentSCC.begin(),
+ E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) {
+ ArgumentGraphNode *N = *I;
+ for (SmallVectorImpl<ArgumentGraphNode*>::iterator UI = N->Uses.begin(),
+ UE = N->Uses.end(); UI != UE; ++UI) {
+ Argument *A = (*UI)->Definition;
+ if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
+ continue;
+ SCCCaptured = true;
+ break;
+ }
+ }
+ if (SCCCaptured) continue;
+
+ for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
+ Argument *A = ArgumentSCC[i]->Definition;
+ A->addAttr(Attribute::NoCapture);
+ ++NumNoCapture;
+ Changed = true;
+ }
}
return Changed;
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 3552d03..1522aa4 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -26,6 +26,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -61,6 +62,7 @@ namespace {
struct GlobalStatus;
struct GlobalOpt : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetLibraryInfo>();
}
static char ID; // Pass identification, replacement for typeid
GlobalOpt() : ModulePass(ID) {
@@ -80,11 +82,17 @@ namespace {
const SmallPtrSet<const PHINode*, 16> &PHIUsers,
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
+
+ TargetData *TD;
+ TargetLibraryInfo *TLI;
};
}
char GlobalOpt::ID = 0;
-INITIALIZE_PASS(GlobalOpt, "globalopt",
+INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
+ "Global Variable Optimizer", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(GlobalOpt, "globalopt",
"Global Variable Optimizer", false, false)
ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
@@ -143,18 +151,31 @@ struct GlobalStatus {
/// HasPHIUser - Set to true if this global has a user that is a PHI node.
bool HasPHIUser;
+ /// AtomicOrdering - Set to the strongest atomic ordering requirement.
+ AtomicOrdering Ordering;
+
GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored),
StoredOnceValue(0), AccessingFunction(0),
- HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
- HasPHIUser(false) {}
+ HasMultipleAccessingFunctions(false),
+ HasNonInstructionUser(false), HasPHIUser(false),
+ Ordering(NotAtomic) {}
};
}
-// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
-// by constants itself. Note that constants cannot be cyclic, so this test is
-// pretty easy to implement recursively.
-//
+/// StrongerOrdering - Return the stronger of the two ordering. If the two
+/// orderings are acquire and release, then return AcquireRelease.
+///
+static AtomicOrdering StrongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
+ if (X == Acquire && Y == Release) return AcquireRelease;
+ if (Y == Acquire && X == Release) return AcquireRelease;
+ return (AtomicOrdering)std::max(X, Y);
+}
+
+/// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
+/// by constants itself. Note that constants cannot be cyclic, so this test is
+/// pretty easy to implement recursively.
+///
static bool SafeToDestroyConstant(const Constant *C) {
if (isa<GlobalValue>(C)) return false;
@@ -195,14 +216,16 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
}
if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
GS.isLoaded = true;
- // Don't hack on volatile/atomic loads.
- if (!LI->isSimple()) return true;
+ // Don't hack on volatile loads.
+ if (LI->isVolatile()) return true;
+ GS.Ordering = StrongerOrdering(GS.Ordering, LI->getOrdering());
} else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
// Don't allow a store OF the address, only stores TO the address.
if (SI->getOperand(0) == V) return true;
- // Don't hack on volatile/atomic stores.
- if (!SI->isSimple()) return true;
+ // Don't hack on volatile stores.
+ if (SI->isVolatile()) return true;
+ GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering());
// If this is a direct store to the global (i.e., the global is a scalar
// value, not an aggregate), keep more specific information about
@@ -271,43 +294,12 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
return false;
}
-static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) {
- ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
- if (!CI) return 0;
- unsigned IdxV = CI->getZExtValue();
-
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) {
- if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV);
- } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) {
- if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV);
- } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) {
- if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV);
- } else if (isa<ConstantAggregateZero>(Agg)) {
- if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
- if (IdxV < STy->getNumElements())
- return Constant::getNullValue(STy->getElementType(IdxV));
- } else if (SequentialType *STy =
- dyn_cast<SequentialType>(Agg->getType())) {
- return Constant::getNullValue(STy->getElementType());
- }
- } else if (isa<UndefValue>(Agg)) {
- if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
- if (IdxV < STy->getNumElements())
- return UndefValue::get(STy->getElementType(IdxV));
- } else if (SequentialType *STy =
- dyn_cast<SequentialType>(Agg->getType())) {
- return UndefValue::get(STy->getElementType());
- }
- }
- return 0;
-}
-
-
/// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
/// users of the global, cleaning up the obvious ones. This is largely just a
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
-static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
+static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
+ TargetData *TD, TargetLibraryInfo *TLI) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
User *U = *UI++;
@@ -328,11 +320,11 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
Constant *SubInit = 0;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
- Changed |= CleanupConstantGlobalUsers(CE, SubInit);
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
} else if (CE->getOpcode() == Instruction::BitCast &&
CE->getType()->isPointerTy()) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0);
+ Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
}
if (CE->use_empty()) {
@@ -346,11 +338,17 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
Constant *SubInit = 0;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP));
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
+
+ // If the initializer is an all-null value and we have an inbounds GEP,
+ // we already know what the result of any load from that GEP is.
+ // TODO: Handle splats.
+ if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
+ SubInit = Constant::getNullValue(GEP->getType()->getElementType());
}
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit);
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
if (GEP->use_empty()) {
GEP->eraseFromParent();
@@ -368,7 +366,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
if (SafeToDestroyConstant(C)) {
C->destroyConstant();
// This could have invalidated UI, start over from scratch.
- CleanupConstantGlobalUsers(V, Init);
+ CleanupConstantGlobalUsers(V, Init, TD, TLI);
return true;
}
}
@@ -514,8 +512,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
NewGlobals.reserve(STy->getNumElements());
const StructLayout &Layout = *TD.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Constant *In = getAggregateConstantElement(Init,
- ConstantInt::get(Type::getInt32Ty(STy->getContext()), i));
+ Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
GlobalVariable::InternalLinkage,
@@ -547,8 +544,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
- Constant *In = getAggregateConstantElement(Init,
- ConstantInt::get(Type::getInt32Ty(Init->getContext()), i));
+ Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
@@ -770,7 +766,9 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// value stored into it. If there are uses of the loaded value that would trap
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
-static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
+static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
bool Changed = false;
// Keep track of whether we are able to remove all the uses of the global
@@ -813,7 +811,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
// nor is the global.
if (AllNonStoreUsesGone) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
- CleanupConstantGlobalUsers(GV, 0);
+ CleanupConstantGlobalUsers(GV, 0, TD, TLI);
if (GV->use_empty()) {
GV->eraseFromParent();
++NumDeleted;
@@ -825,10 +823,11 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V) {
+static void ConstantPropUsersOf(Value *V,
+ TargetData *TD, TargetLibraryInfo *TLI) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -848,7 +847,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- TargetData* TD) {
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
Type *GlobalType;
@@ -906,7 +906,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
while (!GV->use_empty()) {
if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
// The global is initialized when the store to it occurs.
- new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI);
+ new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
+ SI->getOrdering(), SI->getSynchScope(), SI);
SI->eraseFromParent();
continue;
}
@@ -921,7 +922,10 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
- Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI);
+ // Sink the load to where the compare was, if atomic rules allow us to.
+ Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
+ LI->getOrdering(), LI->getSynchScope(),
+ LI->isUnordered() ? (Instruction*)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
default: llvm_unreachable("Unknown ICmp Predicate!");
@@ -962,9 +966,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
- ConstantPropUsersOf(NewGV);
+ ConstantPropUsersOf(NewGV, TD, TLI);
if (RepValue != NewGV)
- ConstantPropUsersOf(RepValue);
+ ConstantPropUsersOf(RepValue, TD, TLI);
return NewGV;
}
@@ -1203,7 +1207,6 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
} else {
llvm_unreachable("Unknown usable value");
- Result = 0;
}
return FieldVals[FieldNo] = Result;
@@ -1293,9 +1296,9 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value* NElems, TargetData *TD) {
+ Value *NElems, TargetData *TD) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
- Type* MAT = getMallocAllocatedType(CI);
+ Type *MAT = getMallocAllocatedType(CI);
StructType *STy = cast<StructType>(MAT);
// There is guaranteed to be at least one use of the malloc (storing
@@ -1482,8 +1485,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
+ AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD) {
+ TargetData *TD,
+ TargetLibraryInfo *TLI) {
if (!TD)
return false;
@@ -1502,7 +1507,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// We can't optimize this if the malloc itself is used in a complex way,
// for example, being stored into multiple globals. This allows the
- // malloc to be stored into the specified global, loaded setcc'd, and
+ // malloc to be stored into the specified global, loaded icmp'd, and
// GEP'd. These are all things we could transform to using the global
// for.
SmallPtrSet<const PHINode*, 8> PHIs;
@@ -1523,7 +1528,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
return true;
}
@@ -1531,6 +1536,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
+ if (Ordering != NotAtomic)
+ return false;
+
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@@ -1563,7 +1571,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD);
return true;
}
@@ -1573,8 +1581,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
// that only one value (besides its initializer) is ever stored to the global.
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
+ AtomicOrdering Ordering,
Module::global_iterator &GVI,
- TargetData *TD) {
+ TargetData *TD, TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1589,12 +1598,13 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
- Type* MallocType = getMallocAllocatedType(CI);
- if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
- GVI, TD))
+ Type *MallocType = getMallocAllocatedType(CI);
+ if (MallocType &&
+ TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
+ TD, TLI))
return true;
}
}
@@ -1670,7 +1680,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
- StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI);
+ StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
+ LI->getOrdering(), LI->getSynchScope(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
"This is not a form that we understand!");
@@ -1678,11 +1689,13 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
}
}
- new StoreInst(StoreVal, NewGV, SI);
+ new StoreInst(StoreVal, NewGV, false, 0,
+ SI->getOrdering(), SI->getSynchScope(), SI);
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
- LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI);
+ LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
+ LI->getOrdering(), LI->getSynchScope(), LI);
Value *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
@@ -1699,8 +1712,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
}
-/// ProcessInternalGlobal - Analyze the specified global variable and optimize
-/// it if possible. If we make a change, return true.
+/// ProcessGlobal - Analyze the specified global variable and optimize it if
+/// possible. If we make a change, return true.
bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
if (!GV->hasLocalLinkage())
@@ -1737,7 +1750,7 @@ bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
/// it if possible. If we make a change, return true.
bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Module::global_iterator &GVI,
- const SmallPtrSet<const PHINode*, 16> &PHIUsers,
+ const SmallPtrSet<const PHINode*, 16> &PHIUsers,
const GlobalStatus &GS) {
// If this is a first class global and has only one accessing function
// and this function is main (which we know is not recursive we can make
@@ -1755,11 +1768,11 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GS.AccessingFunction->hasExternalLinkage() &&
GV->getType()->getAddressSpace() == 0) {
DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
- Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
+ Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
->getEntryBlock().begin());
- Type* ElemTy = GV->getType()->getElementType();
+ Type *ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
- AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
+ AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
new StoreInst(GV->getInitializer(), Alloca, &FirstI);
@@ -1776,7 +1789,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
- bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
+ TD, TLI);
// If the global is dead now, delete it.
if (GV->use_empty()) {
@@ -1791,7 +1805,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
@@ -1820,7 +1834,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer());
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
@@ -1836,8 +1850,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Try to optimize globals based on the knowledge that only one value
// (besides its initializer) is ever stored to the global.
- if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
- getAnalysisIfAvailable<TargetData>()))
+ if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
+ TD, TLI))
return true;
// Otherwise, if the global was not a boolean, we can shrink it to be a
@@ -1890,7 +1904,7 @@ bool GlobalOpt::OptimizeFunctions(Module &M) {
if (!F->hasName() && !F->isDeclaration())
F->setLinkage(GlobalValue::InternalLinkage);
F->removeDeadConstantUsers();
- if (F->use_empty() && (F->hasLocalLinkage() || F->hasLinkOnceLinkage())) {
+ if (F->isDefTriviallyDead()) {
F->eraseFromParent();
Changed = true;
++NumFnDeleted;
@@ -1930,8 +1944,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
- Constant *New = ConstantFoldConstantExpression(CE, TD);
+ Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@@ -2052,16 +2065,10 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
}
-static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) {
- if (Constant *CV = dyn_cast<Constant>(V)) return CV;
- Constant *R = ComputedValues[V];
- assert(R && "Reference to an uncomputed value!");
- return R;
-}
-
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants);
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2073,7 +2080,8 @@ isSimpleEnoughValueToCommit(Constant *C,
/// in SimpleConstants to avoid having to rescan the same constants all the
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD) {
// Simple integer, undef, constant aggregate zero, global addresses, etc are
// all supported.
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
@@ -2085,7 +2093,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
isa<ConstantVector>(C)) {
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants))
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
return false;
}
return true;
@@ -2097,34 +2105,42 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
ConstantExpr *CE = cast<ConstantExpr>(C);
switch (CE->getOpcode()) {
case Instruction::BitCast:
+ // Bitcast is fine if the casted value is fine.
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+
case Instruction::IntToPtr:
case Instruction::PtrToInt:
- // These casts are always fine if the casted value is.
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ // int <=> ptr is fine if the int type is the same size as the
+ // pointer type.
+ if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
+ TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ return false;
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
// GEP is fine if it is simple + constant offset.
case Instruction::GetElementPtr:
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
if (!isa<ConstantInt>(CE->getOperand(i)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
case Instruction::Add:
// We allow simple+cst.
if (!isa<ConstantInt>(CE->getOperand(1)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
}
return false;
}
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSet<Constant*, 8> &SimpleConstants) {
+ SmallPtrSet<Constant*, 8> &SimpleConstants,
+ const TargetData *TD) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants);
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
}
@@ -2191,23 +2207,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
return Val;
}
- std::vector<Constant*> Elts;
+ SmallVector<Constant*, 32> Elts;
if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
-
// Break up the constant into its elements.
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
- for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (isa<ConstantAggregateZero>(Init)) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- Elts.push_back(Constant::getNullValue(STy->getElementType(i)));
- } else if (isa<UndefValue>(Init)) {
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- Elts.push_back(UndefValue::get(STy->getElementType(i)));
- } else {
- llvm_unreachable("This code is out of sync with "
- " ConstantFoldLoadThroughGEPConstantExpr");
- }
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ Elts.push_back(Init->getAggregateElement(i));
// Replace the element that we are supposed to.
ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
@@ -2226,22 +2230,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
NumElts = ATy->getNumElements();
else
- NumElts = cast<VectorType>(InitTy)->getNumElements();
+ NumElts = InitTy->getVectorNumElements();
// Break up the array into elements.
- if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
- for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (ConstantVector *CV = dyn_cast<ConstantVector>(Init)) {
- for (User::op_iterator i = CV->op_begin(), e = CV->op_end(); i != e; ++i)
- Elts.push_back(cast<Constant>(*i));
- } else if (isa<ConstantAggregateZero>(Init)) {
- Elts.assign(NumElts, Constant::getNullValue(InitTy->getElementType()));
- } else {
- assert(isa<UndefValue>(Init) && "This code is out of sync with "
- " ConstantFoldLoadThroughGEPConstantExpr");
- Elts.assign(NumElts, UndefValue::get(InitTy->getElementType()));
- }
+ for (uint64_t i = 0, e = NumElts; i != e; ++i)
+ Elts.push_back(Init->getAggregateElement(i));
assert(CI->getZExtValue() < NumElts);
Elts[CI->getZExtValue()] =
@@ -2266,15 +2259,109 @@ static void CommitValueTo(Constant *Val, Constant *Addr) {
GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
}
+namespace {
+
+/// Evaluator - This class evaluates LLVM IR, producing the Constant
+/// representing each SSA instruction. Changes to global variables are stored
+/// in a mapping that can be iterated over after the evaluation is complete.
+/// Once an evaluation call fails, the evaluation object should not be reused.
+class Evaluator {
+public:
+ Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI)
+ : TD(TD), TLI(TLI) {
+ ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ }
+
+ ~Evaluator() {
+ DeleteContainerPointers(ValueStack);
+ while (!AllocaTmps.empty()) {
+ GlobalVariable *Tmp = AllocaTmps.back();
+ AllocaTmps.pop_back();
+
+ // If there are still users of the alloca, the program is doing something
+ // silly, e.g. storing the address of the alloca somewhere and using it
+ // later. Since this is undefined, we'll just make it be null.
+ if (!Tmp->use_empty())
+ Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
+ delete Tmp;
+ }
+ }
+
+ /// EvaluateFunction - Evaluate a call to function F, returning true if
+ /// successful, false if we can't evaluate it. ActualArgs contains the formal
+ /// arguments for the function.
+ bool EvaluateFunction(Function *F, Constant *&RetVal,
+ const SmallVectorImpl<Constant*> &ActualArgs);
+
+ /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
+ /// successful, false if we can't evaluate it. NewBB returns the next BB that
+ /// control flows into, or null upon return.
+ bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
+
+ Constant *getVal(Value *V) {
+ if (Constant *CV = dyn_cast<Constant>(V)) return CV;
+ Constant *R = ValueStack.back()->lookup(V);
+ assert(R && "Reference to an uncomputed value!");
+ return R;
+ }
+
+ void setVal(Value *V, Constant *C) {
+ ValueStack.back()->operator[](V) = C;
+ }
+
+ const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
+ return MutatedMemory;
+ }
+
+ const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const {
+ return Invariants;
+ }
+
+private:
+ Constant *ComputeLoadResult(Constant *P);
+
+ /// ValueStack - As we compute SSA register values, we store their contents
+ /// here. The back of the vector contains the current function and the stack
+ /// contains the values in the calling frames.
+ SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack;
+
+ /// CallStack - This is used to detect recursion. In pathological situations
+ /// we could hit exponential behavior, but at least there is nothing
+ /// unbounded.
+ SmallVector<Function*, 4> CallStack;
+
+ /// MutatedMemory - For each store we execute, we update this map. Loads
+ /// check this to get the most up-to-date value. If evaluation is successful,
+ /// this state is committed to the process.
+ DenseMap<Constant*, Constant*> MutatedMemory;
+
+ /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
+ /// to represent its body. This vector is needed so we can delete the
+ /// temporary globals when we are done.
+ SmallVector<GlobalVariable*, 32> AllocaTmps;
+
+ /// Invariants - These global variables have been marked invariant by the
+ /// static constructor.
+ SmallPtrSet<GlobalVariable*, 8> Invariants;
+
+ /// SimpleConstants - These are constants we have checked and know to be
+ /// simple enough to live in a static initializer of a global.
+ SmallPtrSet<Constant*, 8> SimpleConstants;
+
+ const TargetData *TD;
+ const TargetLibraryInfo *TLI;
+};
+
+} // anonymous namespace
+
/// ComputeLoadResult - Return the value that would be computed by a load from
/// P after the stores reflected by 'memory' have been performed. If we can't
/// decide, return null.
-static Constant *ComputeLoadResult(Constant *P,
- const DenseMap<Constant*, Constant*> &Memory) {
+Constant *Evaluator::ComputeLoadResult(Constant *P) {
// If this memory location has been recently stored, use the stored value: it
// is the most up-to-date.
- DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
- if (I != Memory.end()) return I->second;
+ DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P);
+ if (I != MutatedMemory.end()) return I->second;
// Access it.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
@@ -2295,56 +2382,29 @@ static Constant *ComputeLoadResult(Constant *P,
return 0; // don't know how to evaluate.
}
-/// EvaluateFunction - Evaluate a call to function F, returning true if
-/// successful, false if we can't evaluate it. ActualArgs contains the formal
-/// arguments for the function.
-static bool EvaluateFunction(Function *F, Constant *&RetVal,
- const SmallVectorImpl<Constant*> &ActualArgs,
- std::vector<Function*> &CallStack,
- DenseMap<Constant*, Constant*> &MutatedMemory,
- std::vector<GlobalVariable*> &AllocaTmps,
- SmallPtrSet<Constant*, 8> &SimpleConstants,
- const TargetData *TD) {
- // Check to see if this function is already executing (recursion). If so,
- // bail out. TODO: we might want to accept limited recursion.
- if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
- return false;
-
- CallStack.push_back(F);
-
- /// Values - As we compute SSA register values, we store their contents here.
- DenseMap<Value*, Constant*> Values;
-
- // Initialize arguments to the incoming values specified.
- unsigned ArgNo = 0;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
- ++AI, ++ArgNo)
- Values[AI] = ActualArgs[ArgNo];
-
- /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
- /// we can only evaluate any one basic block at most once. This set keeps
- /// track of what we have executed so we can detect recursive cases etc.
- SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
-
- // CurInst - The current instruction we're evaluating.
- BasicBlock::iterator CurInst = F->begin()->begin();
-
+/// EvaluateBlock - Evaluate all instructions in block BB, returning true if
+/// successful, false if we can't evaluate it. NewBB returns the next BB that
+/// control flows into, or null upon return.
+bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
+ BasicBlock *&NextBB) {
// This is the main evaluation loop.
while (1) {
Constant *InstResult = 0;
if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
if (!SI->isSimple()) return false; // no volatile/atomic accesses.
- Constant *Ptr = getVal(Values, SI->getOperand(1));
+ Constant *Ptr = getVal(SI->getOperand(1));
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
if (!isSimpleEnoughPointerToCommit(Ptr))
// If this is too complex for us to commit, reject it.
return false;
- Constant *Val = getVal(Values, SI->getOperand(0));
+ Constant *Val = getVal(SI->getOperand(0));
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants))
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD))
return false;
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
@@ -2354,7 +2414,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
// stored value.
Ptr = CE->getOperand(0);
- Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
+ Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
// In order to push the bitcast onto the stored value, a bitcast
// from NewTy to Val's type must be legal. If it's not, we can try
@@ -2366,16 +2426,18 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (StructType *STy = dyn_cast<StructType>(NewTy)) {
NewTy = STy->getTypeAtIndex(0U);
- IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
+ IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
Constant * const IdxList[] = {IdxZero, IdxZero};
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
-
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
} else {
- return 0;
+ return false;
}
}
@@ -2387,33 +2449,35 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
MutatedMemory[Ptr] = Val;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
InstResult = ConstantExpr::get(BO->getOpcode(),
- getVal(Values, BO->getOperand(0)),
- getVal(Values, BO->getOperand(1)));
+ getVal(BO->getOperand(0)),
+ getVal(BO->getOperand(1)));
} else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
InstResult = ConstantExpr::getCompare(CI->getPredicate(),
- getVal(Values, CI->getOperand(0)),
- getVal(Values, CI->getOperand(1)));
+ getVal(CI->getOperand(0)),
+ getVal(CI->getOperand(1)));
} else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
InstResult = ConstantExpr::getCast(CI->getOpcode(),
- getVal(Values, CI->getOperand(0)),
+ getVal(CI->getOperand(0)),
CI->getType());
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
- InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
- getVal(Values, SI->getOperand(1)),
- getVal(Values, SI->getOperand(2)));
+ InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
+ getVal(SI->getOperand(1)),
+ getVal(SI->getOperand(2)));
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
- Constant *P = getVal(Values, GEP->getOperand(0));
+ Constant *P = getVal(GEP->getOperand(0));
SmallVector<Constant*, 8> GEPOps;
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
i != e; ++i)
- GEPOps.push_back(getVal(Values, *i));
+ GEPOps.push_back(getVal(*i));
InstResult =
ConstantExpr::getGetElementPtr(P, GEPOps,
cast<GEPOperator>(GEP)->isInBounds());
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
if (!LI->isSimple()) return false; // no volatile/atomic accesses.
- InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
- MutatedMemory);
+ Constant *Ptr = getVal(LI->getOperand(0));
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ InstResult = ComputeLoadResult(Ptr);
if (InstResult == 0) return false; // Could not evaluate load.
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
@@ -2423,25 +2487,53 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
UndefValue::get(Ty),
AI->getName()));
InstResult = AllocaTmps.back();
- } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
+ } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
+ CallSite CS(CurInst);
// Debug info can safely be ignored here.
- if (isa<DbgInfoIntrinsic>(CI)) {
+ if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
++CurInst;
continue;
}
// Cannot handle inline asm.
- if (isa<InlineAsm>(CI->getCalledValue())) return false;
-
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(CI)) {
- if (MSI->isVolatile()) return false;
- Constant *Ptr = getVal(Values, MSI->getDest());
- Constant *Val = getVal(Values, MSI->getValue());
- Constant *DestVal = ComputeLoadResult(getVal(Values, Ptr),
- MutatedMemory);
- if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
- // This memset is a no-op.
+ if (isa<InlineAsm>(CS.getCalledValue())) return false;
+
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
+ if (MSI->isVolatile()) return false;
+ Constant *Ptr = getVal(MSI->getDest());
+ Constant *Val = getVal(MSI->getValue());
+ Constant *DestVal = ComputeLoadResult(getVal(Ptr));
+ if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
+ // This memset is a no-op.
+ ++CurInst;
+ continue;
+ }
+ }
+
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end) {
+ ++CurInst;
+ continue;
+ }
+
+ if (II->getIntrinsicID() == Intrinsic::invariant_start) {
+ // We don't insert an entry into Values, as it doesn't have a
+ // meaningful return value.
+ if (!II->use_empty())
+ return false;
+ ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
+ Value *PtrArg = getVal(II->getArgOperand(1));
+ Value *Ptr = PtrArg->stripPointerCasts();
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
+ Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
+ if (!Size->isAllOnesValue() &&
+ Size->getValue().getLimitedValue() >=
+ TD->getTypeStoreSize(ElemTy))
+ Invariants.insert(GV);
+ }
+ // Continue even if we do nothing.
++CurInst;
continue;
}
@@ -2449,19 +2541,17 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
// Resolve function pointers.
- Function *Callee = dyn_cast<Function>(getVal(Values,
- CI->getCalledValue()));
- if (!Callee) return false; // Cannot resolve.
+ Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
+ if (!Callee || Callee->mayBeOverridden())
+ return false; // Cannot resolve.
SmallVector<Constant*, 8> Formals;
- CallSite CS(CI);
- for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i)
- Formals.push_back(getVal(Values, *i));
+ for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
+ Formals.push_back(getVal(*i));
if (Callee->isDeclaration()) {
// If this is a function we can constant fold, do it.
- if (Constant *C = ConstantFoldCall(Callee, Formals)) {
+ if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
InstResult = C;
} else {
return false;
@@ -2472,62 +2562,43 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
Constant *RetVal;
// Execute the call, if successful, use the return value.
- if (!EvaluateFunction(Callee, RetVal, Formals, CallStack,
- MutatedMemory, AllocaTmps, SimpleConstants, TD))
+ ValueStack.push_back(new DenseMap<Value*, Constant*>);
+ if (!EvaluateFunction(Callee, RetVal, Formals))
return false;
+ delete ValueStack.pop_back_val();
InstResult = RetVal;
}
} else if (isa<TerminatorInst>(CurInst)) {
- BasicBlock *NewBB = 0;
if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
if (BI->isUnconditional()) {
- NewBB = BI->getSuccessor(0);
+ NextBB = BI->getSuccessor(0);
} else {
ConstantInt *Cond =
- dyn_cast<ConstantInt>(getVal(Values, BI->getCondition()));
+ dyn_cast<ConstantInt>(getVal(BI->getCondition()));
if (!Cond) return false; // Cannot determine.
- NewBB = BI->getSuccessor(!Cond->getZExtValue());
+ NextBB = BI->getSuccessor(!Cond->getZExtValue());
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
ConstantInt *Val =
- dyn_cast<ConstantInt>(getVal(Values, SI->getCondition()));
+ dyn_cast<ConstantInt>(getVal(SI->getCondition()));
if (!Val) return false; // Cannot determine.
- NewBB = SI->getSuccessor(SI->findCaseValue(Val));
+ NextBB = SI->findCaseValue(Val).getCaseSuccessor();
} else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
- Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts();
+ Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
- NewBB = BA->getBasicBlock();
+ NextBB = BA->getBasicBlock();
else
return false; // Cannot determine.
- } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
- if (RI->getNumOperands())
- RetVal = getVal(Values, RI->getOperand(0));
-
- CallStack.pop_back(); // return from fn.
- return true; // We succeeded at evaluating this ctor!
+ } else if (isa<ReturnInst>(CurInst)) {
+ NextBB = 0;
} else {
// invoke, unwind, resume, unreachable.
return false; // Cannot handle this terminator.
}
- // Okay, we succeeded in evaluating this control flow. See if we have
- // executed the new block before. If so, we have a looping function,
- // which we cannot evaluate in reasonable time.
- if (!ExecutedBlocks.insert(NewBB))
- return false; // looped!
-
- // Okay, we have never been in this block before. Check to see if there
- // are any PHI nodes. If so, evaluate them with information about where
- // we came from.
- BasicBlock *OldBB = CurInst->getParent();
- CurInst = NewBB->begin();
- PHINode *PN;
- for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
- Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB));
-
- // Do NOT increment CurInst. We know that the terminator had no value.
- continue;
+ // We succeeded at evaluating this block!
+ return true;
} else {
// Did not know how to evaluate this!
return false;
@@ -2535,9 +2606,15 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, TD);
+ InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
- Values[CurInst] = InstResult;
+ setVal(CurInst, InstResult);
+ }
+
+ // If we just processed an invoke, we finished evaluating the block.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
+ NextBB = II->getNormalDest();
+ return true;
}
// Advance program counter.
@@ -2545,64 +2622,96 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
}
-/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
-/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const TargetData *TD) {
- /// MutatedMemory - For each store we execute, we update this map. Loads
- /// check this to get the most up-to-date value. If evaluation is successful,
- /// this state is committed to the process.
- DenseMap<Constant*, Constant*> MutatedMemory;
+/// EvaluateFunction - Evaluate a call to function F, returning true if
+/// successful, false if we can't evaluate it. ActualArgs contains the formal
+/// arguments for the function.
+bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
+ const SmallVectorImpl<Constant*> &ActualArgs) {
+ // Check to see if this function is already executing (recursion). If so,
+ // bail out. TODO: we might want to accept limited recursion.
+ if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
+ return false;
- /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
- /// to represent its body. This vector is needed so we can delete the
- /// temporary globals when we are done.
- std::vector<GlobalVariable*> AllocaTmps;
+ CallStack.push_back(F);
- /// CallStack - This is used to detect recursion. In pathological situations
- /// we could hit exponential behavior, but at least there is nothing
- /// unbounded.
- std::vector<Function*> CallStack;
+ // Initialize arguments to the incoming values specified.
+ unsigned ArgNo = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
+ ++AI, ++ArgNo)
+ setVal(AI, ActualArgs[ArgNo]);
- /// SimpleConstants - These are constants we have checked and know to be
- /// simple enough to live in a static initializer of a global.
- SmallPtrSet<Constant*, 8> SimpleConstants;
-
+ // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
+ // we can only evaluate any one basic block at most once. This set keeps
+ // track of what we have executed so we can detect recursive cases etc.
+ SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
+
+ // CurBB - The current basic block we're evaluating.
+ BasicBlock *CurBB = F->begin();
+
+ BasicBlock::iterator CurInst = CurBB->begin();
+
+ while (1) {
+ BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
+ if (!EvaluateBlock(CurInst, NextBB))
+ return false;
+
+ if (NextBB == 0) {
+ // Successfully running until there's no next block means that we found
+ // the return. Fill it the return value and pop the call stack.
+ ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
+ if (RI->getNumOperands())
+ RetVal = getVal(RI->getOperand(0));
+ CallStack.pop_back();
+ return true;
+ }
+
+ // Okay, we succeeded in evaluating this control flow. See if we have
+ // executed the new block before. If so, we have a looping function,
+ // which we cannot evaluate in reasonable time.
+ if (!ExecutedBlocks.insert(NextBB))
+ return false; // looped!
+
+ // Okay, we have never been in this block before. Check to see if there
+ // are any PHI nodes. If so, evaluate them with information about where
+ // we came from.
+ PHINode *PN = 0;
+ for (CurInst = NextBB->begin();
+ (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
+ setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
+
+ // Advance to the next block.
+ CurBB = NextBB;
+ }
+}
+
+/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
+/// we can. Return true if we can, false otherwise.
+static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Call the function.
+ Evaluator Eval(TD, TLI);
Constant *RetValDummy;
- bool EvalSuccess = EvaluateFunction(F, RetValDummy,
- SmallVector<Constant*, 0>(), CallStack,
- MutatedMemory, AllocaTmps,
- SimpleConstants, TD);
+ bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
+ SmallVector<Constant*, 0>());
if (EvalSuccess) {
// We succeeded at evaluation: commit the result.
DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
- << F->getName() << "' to " << MutatedMemory.size()
+ << F->getName() << "' to " << Eval.getMutatedMemory().size()
<< " stores.\n");
- for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(),
- E = MutatedMemory.end(); I != E; ++I)
+ for (DenseMap<Constant*, Constant*>::const_iterator I =
+ Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
+ I != E; ++I)
CommitValueTo(I->second, I->first);
- }
-
- // At this point, we are done interpreting. If we created any 'alloca'
- // temporaries, release them now.
- while (!AllocaTmps.empty()) {
- GlobalVariable *Tmp = AllocaTmps.back();
- AllocaTmps.pop_back();
-
- // If there are still users of the alloca, the program is doing something
- // silly, e.g. storing the address of the alloca somewhere and using it
- // later. Since this is undefined, we'll just make it be null.
- if (!Tmp->use_empty())
- Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
- delete Tmp;
+ for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I =
+ Eval.getInvariants().begin(), E = Eval.getInvariants().end();
+ I != E; ++I)
+ (*I)->setConstant(true);
}
return EvalSuccess;
}
-
-
/// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
/// Return true if anything changed.
bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
@@ -2610,7 +2719,6 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
bool MadeChange = false;
if (Ctors.empty()) return false;
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
// Loop over global ctors, optimizing them when we can.
for (unsigned i = 0; i != Ctors.size(); ++i) {
Function *F = Ctors[i];
@@ -2628,7 +2736,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
if (F->empty()) continue;
// If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F, TD)) {
+ if (EvaluateStaticConstructor(F, TD, TLI)) {
Ctors.erase(Ctors.begin()+i);
MadeChange = true;
--i;
@@ -2700,12 +2808,15 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
return Changed;
}
-static Function *FindCXAAtExit(Module &M) {
- Function *Fn = M.getFunction("__cxa_atexit");
+static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
+ if (!TLI->has(LibFunc::cxa_atexit))
+ return 0;
+
+ Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
if (!Fn)
return 0;
-
+
FunctionType *FTy = Fn->getFunctionType();
// Checking that the function has the right return type, the right number of
@@ -2724,7 +2835,8 @@ static Function *FindCXAAtExit(Module &M) {
/// destructor and can therefore be eliminated.
/// Note that we assume that other optimization passes have already simplified
/// the code so we only look for a function with a single basic block, where
-/// the only allowed instructions are 'ret' or 'call' to empty C++ dtor.
+/// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
+/// other side-effect free instructions.
static bool cxxDtorIsEmpty(const Function &Fn,
SmallPtrSet<const Function *, 8> &CalledFunctions) {
// FIXME: We could eliminate C++ destructors if they're readonly/readnone and
@@ -2757,9 +2869,9 @@ static bool cxxDtorIsEmpty(const Function &Fn,
if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
return false;
} else if (isa<ReturnInst>(*I))
- return true;
- else
- return false;
+ return true; // We're done.
+ else if (I->mayHaveSideEffects())
+ return false; // Destructor with side effects, bail.
}
return false;
@@ -2815,10 +2927,13 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
+ TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
// Try to find the llvm.globalctors list.
GlobalVariable *GlobalCtors = FindGlobalCtors(M);
- Function *CXAAtExitFn = FindCXAAtExit(M);
+ Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
bool LocalChange = true;
while (LocalChange) {
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
index c0426da..664ddf6 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -32,34 +32,21 @@ namespace {
// AlwaysInliner only inlines functions that are mark as "always inline".
class AlwaysInliner : public Inliner {
- // Functions that are never inlined
- SmallPtrSet<const Function*, 16> NeverInline;
- InlineCostAnalyzer CA;
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(ID, -2000000000) {
+ AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/true) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
- static char ID; // Pass identification, replacement for typeid
- InlineCost getInlineCost(CallSite CS) {
- return CA.getInlineCost(CS, NeverInline);
- }
- float getInlineFudgeFactor(CallSite CS) {
- return CA.getInlineFudgeFactor(CS);
- }
- void resetCachedCostInfo(Function *Caller) {
- CA.resetCachedCostInfo(Caller);
- }
- void growCachedCostInfo(Function* Caller, Function* Callee) {
- CA.growCachedCostInfo(Caller, Callee);
+ AlwaysInliner(bool InsertLifetime) : Inliner(ID, -2000000000,
+ InsertLifetime) {
+ initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
+ static char ID; // Pass identification, replacement for typeid
+ virtual InlineCost getInlineCost(CallSite CS);
virtual bool doFinalization(CallGraph &CG) {
- return removeDeadFunctions(CG, &NeverInline);
+ return removeDeadFunctions(CG, /*AlwaysInlineOnly=*/true);
}
virtual bool doInitialization(CallGraph &CG);
- void releaseMemory() {
- CA.clear();
- }
};
}
@@ -72,17 +59,74 @@ INITIALIZE_PASS_END(AlwaysInliner, "always-inline",
Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); }
-// doInitialization - Initializes the vector of functions that have not
-// been annotated with the "always inline" attribute.
-bool AlwaysInliner::doInitialization(CallGraph &CG) {
- CA.setTargetData(getAnalysisIfAvailable<TargetData>());
+Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
+ return new AlwaysInliner(InsertLifetime);
+}
+
+/// \brief Minimal filter to detect invalid constructs for inlining.
+static bool isInlineViable(Function &F) {
+ bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice);
+ for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
+ // Disallow inlining of functions which contain an indirect branch.
+ if (isa<IndirectBrInst>(BI->getTerminator()))
+ return false;
- Module &M = CG.getModule();
+ for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
+ ++II) {
+ CallSite CS(II);
+ if (!CS)
+ continue;
- for (Module::iterator I = M.begin(), E = M.end();
- I != E; ++I)
- if (!I->isDeclaration() && !I->hasFnAttr(Attribute::AlwaysInline))
- NeverInline.insert(I);
+ // Disallow recursive calls.
+ if (&F == CS.getCalledFunction())
+ return false;
+ // Disallow calls which expose returns-twice to a function not previously
+ // attributed as such.
+ if (!ReturnsTwice && CS.isCall() &&
+ cast<CallInst>(CS.getInstruction())->canReturnTwice())
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Get the inline cost for the always-inliner.
+///
+/// The always inliner *only* handles functions which are marked with the
+/// attribute to force inlining. As such, it is dramatically simpler and avoids
+/// using the powerful (but expensive) inline cost analysis. Instead it uses
+/// a very simple and boring direct walk of the instructions looking for
+/// impossible-to-inline constructs.
+///
+/// Note, it would be possible to go to some lengths to cache the information
+/// computed here, but as we only expect to do this for relatively few and
+/// small functions which have the explicit attribute to force inlining, it is
+/// likely not worth it in practice.
+InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
+ Function *Callee = CS.getCalledFunction();
+ // We assume indirect calls aren't calling an always-inline function.
+ if (!Callee) return InlineCost::getNever();
+
+ // We can't inline calls to external functions.
+ // FIXME: We shouldn't even get here.
+ if (Callee->isDeclaration()) return InlineCost::getNever();
+
+ // Return never for anything not marked as always inline.
+ if (!Callee->hasFnAttr(Attribute::AlwaysInline))
+ return InlineCost::getNever();
+
+ // Do some minimal analysis to preclude non-viable functions.
+ if (!isInlineViable(*Callee))
+ return InlineCost::getNever();
+
+ // Otherwise, force inlining.
+ return InlineCost::getAlways();
+}
+
+// doInitialization - Initializes the vector of functions that have not
+// been annotated with the "always inline" attribute.
+bool AlwaysInliner::doInitialization(CallGraph &CG) {
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 84dd4fd..50038d8 100644
--- a/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -23,40 +23,26 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
namespace {
class SimpleInliner : public Inliner {
- // Functions that are never inlined
- SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
public:
SimpleInliner() : Inliner(ID) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
- SimpleInliner(int Threshold) : Inliner(ID, Threshold) {
+ SimpleInliner(int Threshold) : Inliner(ID, Threshold,
+ /*InsertLifetime*/true) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
- return CA.getInlineCost(CS, NeverInline);
- }
- float getInlineFudgeFactor(CallSite CS) {
- return CA.getInlineFudgeFactor(CS);
- }
- void resetCachedCostInfo(Function *Caller) {
- CA.resetCachedCostInfo(Caller);
- }
- void growCachedCostInfo(Function* Caller, Function* Callee) {
- CA.growCachedCostInfo(Caller, Callee);
+ return CA.getInlineCost(CS, getInlineThreshold(CS));
}
virtual bool doInitialization(CallGraph &CG);
- void releaseMemory() {
- CA.clear();
- }
};
}
@@ -77,44 +63,6 @@ Pass *llvm::createFunctionInliningPass(int Threshold) {
// annotated with the noinline attribute.
bool SimpleInliner::doInitialization(CallGraph &CG) {
CA.setTargetData(getAnalysisIfAvailable<TargetData>());
-
- Module &M = CG.getModule();
-
- for (Module::iterator I = M.begin(), E = M.end();
- I != E; ++I)
- if (!I->isDeclaration() && I->hasFnAttr(Attribute::NoInline))
- NeverInline.insert(I);
-
- // Get llvm.noinline
- GlobalVariable *GV = M.getNamedGlobal("llvm.noinline");
-
- if (GV == 0)
- return false;
-
- // Don't crash on invalid code
- if (!GV->hasDefinitiveInitializer())
- return false;
-
- const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
-
- if (InitList == 0)
- return false;
-
- // Iterate over each element and add to the NeverInline set
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
-
- // Get Source
- const Constant *Elt = InitList->getOperand(i);
-
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Elt))
- if (CE->getOpcode() == Instruction::BitCast)
- Elt = CE->getOperand(0);
-
- // Insert into set of functions to never inline
- if (const Function *F = dyn_cast<Function>(Elt))
- NeverInline.insert(F);
- }
-
return false;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index f00935b..dc9cbfb 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -36,6 +36,11 @@ STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
STATISTIC(NumMergedAllocas, "Number of allocas merged together");
+// This weirdly named statistic tracks the number of times that, when attemting
+// to inline a function A into B, we analyze the callers of B in order to see
+// if those would be more profitable and blocked inline steps.
+STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
+
static cl::opt<int>
InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
cl::desc("Control the amount of inlining to perform (default = 225)"));
@@ -48,11 +53,12 @@ HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
const int OptSizeThreshold = 75;
Inliner::Inliner(char &ID)
- : CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
+ : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
-Inliner::Inliner(char &ID, int Threshold)
+Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
: CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
- InlineLimit : Threshold) {}
+ InlineLimit : Threshold),
+ InsertLifetime(InsertLifetime) {}
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
@@ -75,13 +81,13 @@ InlinedArrayAllocasTy;
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
- int InlineHistory) {
+ int InlineHistory, bool InsertLifetime) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
// Try to inline the function. Get the list of static allocas that were
// inlined.
- if (!InlineFunction(CS, IFI))
+ if (!InlineFunction(CS, IFI, InsertLifetime))
return false;
// If the inlined function had a higher stack protection level than the
@@ -230,29 +236,37 @@ bool Inliner::shouldInline(CallSite CS) {
return false;
}
- int Cost = IC.getValue();
Function *Caller = CS.getCaller();
- int CurrentThreshold = getInlineThreshold(CS);
- float FudgeFactor = getInlineFudgeFactor(CS);
- int AdjThreshold = (int)(CurrentThreshold * FudgeFactor);
- if (Cost >= AdjThreshold) {
- DEBUG(dbgs() << " NOT Inlining: cost=" << Cost
- << ", thres=" << AdjThreshold
+ if (!IC) {
+ DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
+ << ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << "\n");
return false;
}
- // Try to detect the case where the current inlining candidate caller
- // (call it B) is a static function and is an inlining candidate elsewhere,
- // and the current candidate callee (call it C) is large enough that
- // inlining it into B would make B too big to inline later. In these
- // circumstances it may be best not to inline C into B, but to inline B
- // into its callers.
- if (Caller->hasLocalLinkage()) {
+ // Try to detect the case where the current inlining candidate caller (call
+ // it B) is a static or linkonce-ODR function and is an inlining candidate
+ // elsewhere, and the current candidate callee (call it C) is large enough
+ // that inlining it into B would make B too big to inline later. In these
+ // circumstances it may be best not to inline C into B, but to inline B into
+ // its callers.
+ //
+ // This only applies to static and linkonce-ODR functions because those are
+ // expected to be available for inlining in the translation units where they
+ // are used. Thus we will always have the opportunity to make local inlining
+ // decisions. Importantly the linkonce-ODR linkage covers inline functions
+ // and templates in C++.
+ //
+ // FIXME: All of this logic should be sunk into getInlineCost. It relies on
+ // the internal implementation of the inline cost metrics rather than
+ // treating them as truly abstract units etc.
+ if (Caller->hasLocalLinkage() ||
+ Caller->getLinkage() == GlobalValue::LinkOnceODRLinkage) {
int TotalSecondaryCost = 0;
- bool outerCallsFound = false;
+ // The candidate cost to be imposed upon the current function.
+ int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
// This bool tracks what happens if we do NOT inline C into B.
- bool callerWillBeRemoved = true;
+ bool callerWillBeRemoved = Caller->hasLocalLinkage();
// This bool tracks what happens if we DO inline C into B.
bool inliningPreventsSomeOuterInline = false;
for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
@@ -268,26 +282,20 @@ bool Inliner::shouldInline(CallSite CS) {
}
InlineCost IC2 = getInlineCost(CS2);
- if (IC2.isNever())
+ ++NumCallerCallersAnalyzed;
+ if (!IC2) {
callerWillBeRemoved = false;
- if (IC2.isAlways() || IC2.isNever())
+ continue;
+ }
+ if (IC2.isAlways())
continue;
- outerCallsFound = true;
- int Cost2 = IC2.getValue();
- int CurrentThreshold2 = getInlineThreshold(CS2);
- float FudgeFactor2 = getInlineFudgeFactor(CS2);
-
- if (Cost2 >= (int)(CurrentThreshold2 * FudgeFactor2))
- callerWillBeRemoved = false;
-
- // See if we have this case. We subtract off the penalty
- // for the call instruction, which we would be deleting.
- if (Cost2 < (int)(CurrentThreshold2 * FudgeFactor2) &&
- Cost2 + Cost - (InlineConstants::CallPenalty + 1) >=
- (int)(CurrentThreshold2 * FudgeFactor2)) {
+ // See if inlining or original callsite would erase the cost delta of
+ // this callsite. We subtract off the penalty for the call instruction,
+ // which we would be deleting.
+ if (IC2.getCostDelta() <= CandidateCost) {
inliningPreventsSomeOuterInline = true;
- TotalSecondaryCost += Cost2;
+ TotalSecondaryCost += IC2.getCost();
}
}
// If all outer calls to Caller would get inlined, the cost for the last
@@ -297,17 +305,16 @@ bool Inliner::shouldInline(CallSite CS) {
if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
- if (outerCallsFound && inliningPreventsSomeOuterInline &&
- TotalSecondaryCost < Cost) {
- DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
- " Cost = " << Cost <<
+ if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
+ DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
+ " Cost = " << IC.getCost() <<
", outer Cost = " << TotalSecondaryCost << '\n');
return false;
}
}
- DEBUG(dbgs() << " Inlining: cost=" << Cost
- << ", thres=" << AdjThreshold
+ DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
+ << ", thres=" << (IC.getCostDelta() + IC.getCost())
<< ", Call: " << *CS.getInstruction() << '\n');
return true;
}
@@ -326,7 +333,6 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
return false;
}
-
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraph>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>();
@@ -415,8 +421,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CG[Caller]->removeCallEdgeFor(CS);
CS.getInstruction()->eraseFromParent();
++NumCallsDeleted;
- // Update the cached cost info with the missing call
- growCachedCostInfo(Caller, NULL);
} else {
// We can only inline direct calls to non-declarations.
if (Callee == 0 || Callee->isDeclaration()) continue;
@@ -439,7 +443,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID))
+ InlineHistoryID, InsertLifetime))
continue;
++NumInlined;
@@ -457,9 +461,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
}
}
-
- // Update the cached cost info with the inlined call.
- growCachedCostInfo(Caller, Callee);
}
// If we inlined or deleted the last possible call site to the function,
@@ -479,8 +480,6 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// Remove any call graph edges from the callee to its callees.
CalleeNode->removeAllCalledFunctions();
- resetCachedCostInfo(Callee);
-
// Removing the node for callee from the call graph and delete it.
delete CG.removeFunctionFromModule(CalleeNode);
++NumDeleted;
@@ -514,29 +513,28 @@ bool Inliner::doFinalization(CallGraph &CG) {
/// removeDeadFunctions - Remove dead functions that are not included in
/// DNR (Do Not Remove) list.
-bool Inliner::removeDeadFunctions(CallGraph &CG,
- SmallPtrSet<const Function *, 16> *DNR) {
- SmallPtrSet<CallGraphNode*, 16> FunctionsToRemove;
+bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
+ SmallVector<CallGraphNode*, 16> FunctionsToRemove;
// Scan for all of the functions, looking for ones that should now be removed
// from the program. Insert the dead ones in the FunctionsToRemove set.
for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
CallGraphNode *CGN = I->second;
- if (CGN->getFunction() == 0)
- continue;
-
Function *F = CGN->getFunction();
-
+ if (!F || F->isDeclaration())
+ continue;
+
+ // Handle the case when this function is called and we only want to care
+ // about always-inline functions. This is a bit of a hack to share code
+ // between here and the InlineAlways pass.
+ if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
+ continue;
+
// If the only remaining users of the function are dead constants, remove
// them.
F->removeDeadConstantUsers();
- if (DNR && DNR->count(F))
- continue;
- if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
- !F->hasAvailableExternallyLinkage())
- continue;
- if (!F->use_empty())
+ if (!F->isDefTriviallyDead())
continue;
// Remove any call graph edges from the function to its callees.
@@ -548,24 +546,27 @@ bool Inliner::removeDeadFunctions(CallGraph &CG,
CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
// Removing the node for callee from the call graph and delete it.
- FunctionsToRemove.insert(CGN);
+ FunctionsToRemove.push_back(CGN);
}
+ if (FunctionsToRemove.empty())
+ return false;
// Now that we know which functions to delete, do so. We didn't want to do
// this inline, because that would invalidate our CallGraph::iterator
// objects. :(
//
- // Note that it doesn't matter that we are iterating over a non-stable set
+ // Note that it doesn't matter that we are iterating over a non-stable order
// here to do this, it doesn't matter which order the functions are deleted
// in.
- bool Changed = false;
- for (SmallPtrSet<CallGraphNode*, 16>::iterator I = FunctionsToRemove.begin(),
- E = FunctionsToRemove.end(); I != E; ++I) {
- resetCachedCostInfo((*I)->getFunction());
+ array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
+ FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
+ FunctionsToRemove.end()),
+ FunctionsToRemove.end());
+ for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
+ E = FunctionsToRemove.end();
+ I != E; ++I) {
delete CG.removeFunctionFromModule(*I);
++NumDeleted;
- Changed = true;
}
-
- return Changed;
+ return true;
}
diff --git a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
index 7cb1d18..cd29e7a 100644
--- a/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -122,6 +122,9 @@ bool InternalizePass::runOnModule(Module &M) {
bool Changed = false;
+ // Never internalize functions which code-gen might insert.
+ ExternalNames.insert("__stack_chk_fail");
+
// Mark all functions not in the api as internal.
// FIXME: maybe use private linkage?
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
@@ -148,9 +151,11 @@ bool InternalizePass::runOnModule(Module &M) {
// won't find them. (see MachineModuleInfo.)
ExternalNames.insert("llvm.global_ctors");
ExternalNames.insert("llvm.global_dtors");
- ExternalNames.insert("llvm.noinline");
ExternalNames.insert("llvm.global.annotations");
+ // Never internalize symbols code-gen inserts.
+ ExternalNames.insert("__stack_chk_guard");
+
// Mark all global variables with initializers that are not in the api as
// internal as well.
// FIXME: maybe use private linkage?
diff --git a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
index 8fdfd72..a1b0a45 100644
--- a/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -22,14 +22,19 @@
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Vectorize.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ManagedStatic.h"
using namespace llvm;
+static cl::opt<bool>
+RunVectorization("vectorize", cl::desc("Run vectorization passes"));
+
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
@@ -38,6 +43,7 @@ PassManagerBuilder::PassManagerBuilder() {
DisableSimplifyLibCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
+ Vectorize = RunVectorization;
}
PassManagerBuilder::~PassManagerBuilder() {
@@ -101,6 +107,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(Inliner);
Inliner = 0;
}
+ addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
return;
}
@@ -110,6 +117,8 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
addInitialAliasAnalysisPasses(MPM);
if (!DisableUnitAtATime) {
+ addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
+
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
MPM.add(createIPSCCPPass()); // IP SCCP
@@ -170,6 +179,13 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
+ if (Vectorize) {
+ MPM.add(createBBVectorizePass());
+ MPM.add(createInstructionCombiningPass());
+ if (OptLevel > 1)
+ MPM.add(createGVNPass()); // Remove redundancies
+ }
+
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
@@ -186,11 +202,13 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (OptLevel > 1)
MPM.add(createConstantMergePass()); // Merge dup global constants
}
+ addExtensionsToPM(EP_OptimizerLast, MPM);
}
void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
bool Internalize,
- bool RunInliner) {
+ bool RunInliner,
+ bool DisableGVNLoadPRE) {
// Provide AliasAnalysis services for optimizations.
addInitialAliasAnalysisPasses(PM);
@@ -246,9 +264,9 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM,
PM.add(createFunctionAttrsPass()); // Add nocapture.
PM.add(createGlobalsModRefPass()); // IP alias analysis.
- PM.add(createLICMPass()); // Hoist loop invariants.
- PM.add(createGVNPass()); // Remove redundancies.
- PM.add(createMemCpyOptPass()); // Remove dead memcpys.
+ PM.add(createLICMPass()); // Hoist loop invariants.
+ PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
+ PM.add(createMemCpyOptPass()); // Remove dead memcpys.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
@@ -340,4 +358,3 @@ void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
PassManagerBase *LPM = unwrap(PM);
Builder->populateLTOPassManager(*LPM, Internalize, RunInliner);
}
-
diff --git a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
index cbb80f0..c8cc8fd 100644
--- a/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -101,8 +101,7 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
// Check to see if this function performs an unwind or calls an
// unwinding function.
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- if (CheckUnwind && (isa<UnwindInst>(BB->getTerminator()) ||
- isa<ResumeInst>(BB->getTerminator()))) {
+ if (CheckUnwind && isa<ResumeInst>(BB->getTerminator())) {
// Uses unwind / resume!
SCCMightUnwind = true;
} else if (CheckReturn && isa<ReturnInst>(BB->getTerminator())) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index 3808278..199df51 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -22,6 +22,7 @@
namespace llvm {
class CallSite;
class TargetData;
+ class TargetLibraryInfo;
class DbgDeclareInst;
class MemIntrinsic;
class MemSetInst;
@@ -71,6 +72,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner
: public FunctionPass,
public InstVisitor<InstCombiner, Instruction*> {
TargetData *TD;
+ TargetLibraryInfo *TLI;
bool MadeIRChange;
public:
/// Worklist - All of the instructions that need to be simplified.
@@ -92,9 +94,11 @@ public:
bool DoOneIteration(Function &F, unsigned ItNum);
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
TargetData *getTargetData() const { return TD; }
+ TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
+
// Visitation implementation - Implement instruction combining for different
// instruction types. The semantics are as follows:
// Return Value:
@@ -287,9 +291,9 @@ public:
return 0; // Don't do anything with FI
}
- void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
+ void ComputeMaskedBits(Value *V, APInt &KnownZero,
APInt &KnownOne, unsigned Depth = 0) const {
- return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
+ return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
}
bool MaskedValueIsZero(Value *V, const APInt &Mask,
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index d10046c..05e702f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -136,6 +136,18 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
return BinaryOperator::CreateAShr(NewShl, ShAmt);
}
+
+ // If this is a xor that was canonicalized from a sub, turn it back into
+ // a sub and fuse this add with it.
+ if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
+ IntegerType *IT = cast<IntegerType>(I.getType());
+ APInt LHSKnownOne(IT->getBitWidth(), 0);
+ APInt LHSKnownZero(IT->getBitWidth(), 0);
+ ComputeMaskedBits(XorLHS, LHSKnownZero, LHSKnownOne);
+ if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
+ return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
+ XorLHS);
+ }
}
}
@@ -189,14 +201,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// A+B --> A|B iff A and B have no bits set in common.
if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
- APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
+ ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
if (LHSKnownZero != 0) {
APInt RHSKnownOne(IT->getBitWidth(), 0);
APInt RHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
+ ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
// No bits in common -> bitwise or.
if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
@@ -466,57 +477,57 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
// this.
bool Swapped = false;
- GetElementPtrInst *GEP = 0;
- ConstantExpr *CstGEP = 0;
-
- // TODO: Could also optimize &A[i] - &A[j] -> "i-j", and "&A.foo[i] - &A.foo".
+ GEPOperator *GEP1 = 0, *GEP2 = 0;
+
// For now we require one side to be the base pointer "A" or a constant
- // expression derived from it.
- if (GetElementPtrInst *LHSGEP = dyn_cast<GetElementPtrInst>(LHS)) {
+ // GEP derived from it.
+ if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
// (gep X, ...) - X
if (LHSGEP->getOperand(0) == RHS) {
- GEP = LHSGEP;
+ GEP1 = LHSGEP;
Swapped = false;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(RHS)) {
- // (gep X, ...) - (ce_gep X, ...)
- if (CE->getOpcode() == Instruction::GetElementPtr &&
- LHSGEP->getOperand(0) == CE->getOperand(0)) {
- CstGEP = CE;
- GEP = LHSGEP;
+ } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
+ // (gep X, ...) - (gep X, ...)
+ if (LHSGEP->getOperand(0)->stripPointerCasts() ==
+ RHSGEP->getOperand(0)->stripPointerCasts()) {
+ GEP2 = RHSGEP;
+ GEP1 = LHSGEP;
Swapped = false;
}
}
}
- if (GetElementPtrInst *RHSGEP = dyn_cast<GetElementPtrInst>(RHS)) {
+ if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
// X - (gep X, ...)
if (RHSGEP->getOperand(0) == LHS) {
- GEP = RHSGEP;
+ GEP1 = RHSGEP;
Swapped = true;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(LHS)) {
- // (ce_gep X, ...) - (gep X, ...)
- if (CE->getOpcode() == Instruction::GetElementPtr &&
- RHSGEP->getOperand(0) == CE->getOperand(0)) {
- CstGEP = CE;
- GEP = RHSGEP;
+ } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
+ // (gep X, ...) - (gep X, ...)
+ if (RHSGEP->getOperand(0)->stripPointerCasts() ==
+ LHSGEP->getOperand(0)->stripPointerCasts()) {
+ GEP2 = LHSGEP;
+ GEP1 = RHSGEP;
Swapped = true;
}
}
}
- if (GEP == 0)
+ // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
+ // multiple users.
+ if (GEP1 == 0 ||
+ (GEP2 != 0 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
return 0;
// Emit the offset of the GEP and an intptr_t.
- Value *Result = EmitGEPOffset(GEP);
+ Value *Result = EmitGEPOffset(GEP1);
// If we had a constant expression GEP on the other side offsetting the
// pointer, subtract it from the offset we have.
- if (CstGEP) {
- Value *CstOffset = EmitGEPOffset(CstGEP);
- Result = Builder->CreateSub(Result, CstOffset);
+ if (GEP2) {
+ Value *Offset = EmitGEPOffset(GEP2);
+ Result = Builder->CreateSub(Result, Offset);
}
-
// If we have p - gep(p, ...) then we have to negate the result.
if (Swapped)
@@ -587,6 +598,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
ConstantInt *C2;
if (match(Op1, m_Add(m_Value(X), m_ConstantInt(C2))))
return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
+
+ if (SimplifyDemandedInstructionBits(I))
+ return &I;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 5e0bfe8..0dbe11d 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -14,6 +14,7 @@
#include "InstCombine.h"
#include "llvm/Intrinsics.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Transforms/Utils/CmpInstAnalysis.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
@@ -62,50 +63,6 @@ static inline Value *dyn_castNotVal(Value *V) {
return 0;
}
-
-/// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
-/// are carefully arranged to allow folding of expressions such as:
-///
-/// (A < B) | (A > B) --> (A != B)
-///
-/// Note that this is only valid if the first and second predicates have the
-/// same sign. Is illegal to do: (A u< B) | (A s> B)
-///
-/// Three bits are used to represent the condition, as follows:
-/// 0 A > B
-/// 1 A == B
-/// 2 A < B
-///
-/// <=> Value Definition
-/// 000 0 Always false
-/// 001 1 A > B
-/// 010 2 A == B
-/// 011 3 A >= B
-/// 100 4 A < B
-/// 101 5 A != B
-/// 110 6 A <= B
-/// 111 7 Always true
-///
-static unsigned getICmpCode(const ICmpInst *ICI) {
- switch (ICI->getPredicate()) {
- // False -> 0
- case ICmpInst::ICMP_UGT: return 1; // 001
- case ICmpInst::ICMP_SGT: return 1; // 001
- case ICmpInst::ICMP_EQ: return 2; // 010
- case ICmpInst::ICMP_UGE: return 3; // 011
- case ICmpInst::ICMP_SGE: return 3; // 011
- case ICmpInst::ICMP_ULT: return 4; // 100
- case ICmpInst::ICMP_SLT: return 4; // 100
- case ICmpInst::ICMP_NE: return 5; // 101
- case ICmpInst::ICMP_ULE: return 6; // 110
- case ICmpInst::ICMP_SLE: return 6; // 110
- // True -> 7
- default:
- llvm_unreachable("Invalid ICmp predicate!");
- return 0;
- }
-}
-
/// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
/// predicate into a three bit mask. It also returns whether it is an ordered
/// predicate by reference.
@@ -130,31 +87,19 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
default:
// Not expecting FCMP_FALSE and FCMP_TRUE;
llvm_unreachable("Unexpected FCmp predicate!");
- return 0;
}
}
-/// getICmpValue - This is the complement of getICmpCode, which turns an
+/// getNewICmpValue - This is the complement of getICmpCode, which turns an
/// opcode and two operands into either a constant true or false, or a brand
/// new ICmp instruction. The sign is passed in to determine which kind
/// of predicate to use in the new icmp instruction.
-static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *Builder) {
- CmpInst::Predicate Pred;
- switch (Code) {
- default: assert(0 && "Illegal ICmp code!");
- case 0: // False.
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case 1: Pred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
- case 2: Pred = ICmpInst::ICMP_EQ; break;
- case 3: Pred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
- case 4: Pred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
- case 5: Pred = ICmpInst::ICMP_NE; break;
- case 6: Pred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
- case 7: // True.
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
- }
- return Builder->CreateICmp(Pred, LHS, RHS);
+static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+ InstCombiner::BuilderTy *Builder) {
+ ICmpInst::Predicate NewPred;
+ if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
+ return NewConstant;
+ return Builder->CreateICmp(NewPred, LHS, RHS);
}
/// getFCmpValue - This is the complement of getFCmpCode, which turns an
@@ -165,7 +110,7 @@ static Value *getFCmpValue(bool isordered, unsigned code,
InstCombiner::BuilderTy *Builder) {
CmpInst::Predicate Pred;
switch (code) {
- default: assert(0 && "Illegal FCmp code!");
+ default: llvm_unreachable("Illegal FCmp code!");
case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
@@ -180,14 +125,6 @@ static Value *getFCmpValue(bool isordered, unsigned code,
return Builder->CreateFCmp(Pred, LHS, RHS);
}
-/// PredicatesFoldable - Return true if both predicates match sign or if at
-/// least one of them is an equality comparison (which is signless).
-static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
- return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
- (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
- (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
-}
-
// OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
// guaranteed to be a binary operator.
@@ -558,6 +495,38 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
return result;
}
+/// decomposeBitTestICmp - Decompose an icmp into the form ((X & Y) pred Z)
+/// if possible. The returned predicate is either == or !=. Returns false if
+/// decomposition fails.
+static bool decomposeBitTestICmp(const ICmpInst *I, ICmpInst::Predicate &Pred,
+ Value *&X, Value *&Y, Value *&Z) {
+ // X < 0 is equivalent to (X & SignBit) != 0.
+ if (I->getPredicate() == ICmpInst::ICMP_SLT)
+ if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
+ if (C->isZero()) {
+ X = I->getOperand(0);
+ Y = ConstantInt::get(I->getContext(),
+ APInt::getSignBit(C->getBitWidth()));
+ Pred = ICmpInst::ICMP_NE;
+ Z = C;
+ return true;
+ }
+
+ // X > -1 is equivalent to (X & SignBit) == 0.
+ if (I->getPredicate() == ICmpInst::ICMP_SGT)
+ if (ConstantInt *C = dyn_cast<ConstantInt>(I->getOperand(1)))
+ if (C->isAllOnesValue()) {
+ X = I->getOperand(0);
+ Y = ConstantInt::get(I->getContext(),
+ APInt::getSignBit(C->getBitWidth()));
+ Pred = ICmpInst::ICMP_EQ;
+ Z = ConstantInt::getNullValue(C->getType());
+ return true;
+ }
+
+ return false;
+}
+
/// foldLogOpOfMaskedICmpsHelper:
/// handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
/// return the set of pattern classes (from MaskedICmpType)
@@ -565,10 +534,9 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
Value*& B, Value*& C,
Value*& D, Value*& E,
- ICmpInst *LHS, ICmpInst *RHS) {
- ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
- if (LHSCC != ICmpInst::ICMP_EQ && LHSCC != ICmpInst::ICMP_NE) return 0;
- if (RHSCC != ICmpInst::ICMP_EQ && RHSCC != ICmpInst::ICMP_NE) return 0;
+ ICmpInst *LHS, ICmpInst *RHS,
+ ICmpInst::Predicate &LHSCC,
+ ICmpInst::Predicate &RHSCC) {
if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType()) return 0;
// vectors are not (yet?) supported
if (LHS->getOperand(0)->getType()->isVectorTy()) return 0;
@@ -582,40 +550,60 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
Value *L1 = LHS->getOperand(0);
Value *L2 = LHS->getOperand(1);
Value *L11,*L12,*L21,*L22;
- if (match(L1, m_And(m_Value(L11), m_Value(L12)))) {
- if (!match(L2, m_And(m_Value(L21), m_Value(L22))))
+ // Check whether the icmp can be decomposed into a bit test.
+ if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) {
+ L21 = L22 = L1 = 0;
+ } else {
+ // Look for ANDs in the LHS icmp.
+ if (match(L1, m_And(m_Value(L11), m_Value(L12)))) {
+ if (!match(L2, m_And(m_Value(L21), m_Value(L22))))
+ L21 = L22 = 0;
+ } else {
+ if (!match(L2, m_And(m_Value(L11), m_Value(L12))))
+ return 0;
+ std::swap(L1, L2);
L21 = L22 = 0;
- }
- else {
- if (!match(L2, m_And(m_Value(L11), m_Value(L12))))
- return 0;
- std::swap(L1, L2);
- L21 = L22 = 0;
+ }
}
+ // Bail if LHS was a icmp that can't be decomposed into an equality.
+ if (!ICmpInst::isEquality(LHSCC))
+ return 0;
+
Value *R1 = RHS->getOperand(0);
Value *R2 = RHS->getOperand(1);
Value *R11,*R12;
bool ok = false;
- if (match(R1, m_And(m_Value(R11), m_Value(R12)))) {
- if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
- A = R11; D = R12; E = R2; ok = true;
+ if (decomposeBitTestICmp(RHS, RHSCC, R11, R12, R2)) {
+ if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
+ A = R11; D = R12;
+ } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
+ A = R12; D = R11;
+ } else {
+ return 0;
}
- else
- if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
+ E = R2; R1 = 0; ok = true;
+ } else if (match(R1, m_And(m_Value(R11), m_Value(R12)))) {
+ if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
+ A = R11; D = R12; E = R2; ok = true;
+ } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
A = R12; D = R11; E = R2; ok = true;
}
}
+
+ // Bail if RHS was a icmp that can't be decomposed into an equality.
+ if (!ICmpInst::isEquality(RHSCC))
+ return 0;
+
+ // Look for ANDs in on the right side of the RHS icmp.
if (!ok && match(R2, m_And(m_Value(R11), m_Value(R12)))) {
- if (R11 != 0 && (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22)) {
- A = R11; D = R12; E = R1; ok = true;
- }
- else
- if (R12 != 0 && (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22)) {
+ if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
+ A = R11; D = R12; E = R1; ok = true;
+ } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
A = R12; D = R11; E = R1; ok = true;
- }
- else
+ } else {
return 0;
+ }
}
if (!ok)
return 0;
@@ -644,8 +632,12 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS,
ICmpInst::Predicate NEWCC,
llvm::InstCombiner::BuilderTy* Builder) {
Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
- unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS);
+ ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
+ unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS,
+ LHSCC, RHSCC);
if (mask == 0) return 0;
+ assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) &&
+ "foldLogOpOfMaskedICmpsHelper must return an equality predicate.");
if (NEWCC == ICmpInst::ICMP_NE)
mask >>= 1; // treat "Not"-states as normal states
@@ -693,11 +685,11 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS,
ConstantInt *CCst = dyn_cast<ConstantInt>(C);
if (CCst == 0) return 0;
- if (LHS->getPredicate() != NEWCC)
+ if (LHSCC != NEWCC)
CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
ConstantInt *ECst = dyn_cast<ConstantInt>(E);
if (ECst == 0) return 0;
- if (RHS->getPredicate() != NEWCC)
+ if (RHSCC != NEWCC)
ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
ConstantInt* MCst = dyn_cast<ConstantInt>(
ConstantExpr::getAnd(ConstantExpr::getAnd(BCst, DCst),
@@ -728,7 +720,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
bool isSigned = LHS->isSigned() || RHS->isSigned();
- return getICmpValue(isSigned, Code, Op0, Op1, Builder);
+ return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
}
}
@@ -756,24 +748,12 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *NewOr = Builder->CreateOr(Val, Val2);
return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
}
-
- // (icmp slt A, 0) & (icmp slt B, 0) --> (icmp slt (A&B), 0)
- if (LHSCC == ICmpInst::ICMP_SLT && LHSCst->isZero()) {
- Value *NewAnd = Builder->CreateAnd(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewAnd, LHSCst);
- }
-
- // (icmp sgt A, -1) & (icmp sgt B, -1) --> (icmp sgt (A|B), -1)
- if (LHSCC == ICmpInst::ICMP_SGT && LHSCst->isAllOnesValue()) {
- Value *NewOr = Builder->CreateOr(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
- }
}
// (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
// where CMAX is the all ones value for the truncated type,
// iff the lower bits of C2 and CA are zero.
- if (LHSCC == RHSCC && ICmpInst::isEquality(LHSCC) &&
+ if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
LHS->hasOneUse() && RHS->hasOneUse()) {
Value *V;
ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0;
@@ -805,7 +785,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
}
}
-
+
// From here on, we only handle:
// (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
if (Val != Val2) return 0;
@@ -1382,13 +1362,8 @@ static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
// part of the value (e.g. byte 3) then it must be shifted right. If from the
// low part, it must be shifted left.
unsigned DestByteNo = InputByteNo + OverallLeftShift;
- if (InputByteNo < ByteValues.size()/2) {
- if (ByteValues.size()-1-DestByteNo != InputByteNo)
- return true;
- } else {
- if (ByteValues.size()-1-DestByteNo != InputByteNo)
- return true;
- }
+ if (ByteValues.size()-1-DestByteNo != InputByteNo)
+ return true;
// If the destination byte value is already defined, the values are or'd
// together, which isn't a bswap (unless it's an or of the same bits).
@@ -1469,7 +1444,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
bool isSigned = LHS->isSigned() || RHS->isSigned();
- return getICmpValue(isSigned, Code, Op0, Op1, Builder);
+ return getNewICmpValue(isSigned, Code, Op0, Op1, Builder);
}
}
@@ -1490,18 +1465,6 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *NewOr = Builder->CreateOr(Val, Val2);
return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
}
-
- // (icmp slt A, 0) | (icmp slt B, 0) --> (icmp slt (A|B), 0)
- if (LHSCC == ICmpInst::ICMP_SLT && LHSCst->isZero()) {
- Value *NewOr = Builder->CreateOr(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
- }
-
- // (icmp sgt A, -1) | (icmp sgt B, -1) --> (icmp sgt (A&B), -1)
- if (LHSCC == ICmpInst::ICMP_SGT && LHSCst->isAllOnesValue()) {
- Value *NewAnd = Builder->CreateAnd(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewAnd, LHSCst);
- }
}
// (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
@@ -1586,7 +1549,6 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
return ConstantInt::getTrue(LHS->getContext());
}
- break;
case ICmpInst::ICMP_ULT:
switch (RHSCC) {
default: llvm_unreachable("Unknown integer condition code!");
@@ -1962,8 +1924,11 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
// Canonicalize xor to the RHS.
- if (match(Op0, m_Xor(m_Value(), m_Value())))
+ bool SwappedForXor = false;
+ if (match(Op0, m_Xor(m_Value(), m_Value()))) {
std::swap(Op0, Op1);
+ SwappedForXor = true;
+ }
// A | ( A ^ B) -> A | B
// A | (~A ^ B) -> A | ~B
@@ -1994,6 +1959,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return BinaryOperator::CreateOr(Not, Op0);
}
+ if (SwappedForXor)
+ std::swap(Op0, Op1);
+
if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
if (Value *Res = FoldOrOfICmps(LHS, RHS))
@@ -2281,7 +2249,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
bool isSigned = LHS->isSigned() || RHS->isSigned();
return ReplaceInstUsesWith(I,
- getICmpValue(isSigned, Code, Op0, Op1, Builder));
+ getNewICmpValue(isSigned, Code, Op0, Op1,
+ Builder));
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index c7b3ff8..77e4727 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -37,26 +37,26 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned CopyAlign = MI->getAlignment();
if (CopyAlign < MinAlign) {
- MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
+ MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
MinAlign, false));
return MI;
}
-
+
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
// load/store.
ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
if (MemOpLength == 0) return 0;
-
+
// Source and destination pointer types are always "i8*" for intrinsic. See
// if the size is something we can handle with a single primitive load/store.
// A single load+store correctly handles overlapping memory in the memmove
// case.
unsigned Size = MemOpLength->getZExtValue();
if (Size == 0) return MI; // Delete this mem transfer.
-
+
if (Size > 8 || (Size&(Size-1)))
return 0; // If not 1/2/4/8 bytes, exit.
-
+
// Use an integer load+store unless we can find something better.
unsigned SrcAddrSp =
cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
@@ -66,7 +66,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
-
+
// Memcpy forces the use of i8* for the source and destination. That means
// that if you're using memcpy to move one double around, you'll get a cast
// from double* to i8*. We'd much rather use a double load+store rather than
@@ -94,20 +94,20 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
} else
break;
}
-
+
if (SrcETy->isSingleValueType()) {
NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
}
}
}
-
-
+
+
// If the memcpy/memmove provides better alignment info than we can
// infer, use it.
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
-
+
Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
@@ -127,7 +127,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Alignment, false));
return MI;
}
-
+
// Extract the length and alignment and fill if they are constant.
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
@@ -135,14 +135,14 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
return 0;
uint64_t Len = LenC->getZExtValue();
Alignment = MI->getAlignment();
-
+
// If the length is zero, this is a no-op
if (Len == 0) return MI; // memset(d,c,0,a) -> noop
-
+
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
-
+
Value *Dest = MI->getDest();
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
@@ -150,13 +150,13 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// Alignment 0 is identity for alignment 1 for memset, but not store.
if (Alignment == 0) Alignment = 1;
-
+
// Extract the fill value and store.
uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
MI->isVolatile());
S->setAlignment(Alignment);
-
+
// Set the size of the copy to 0, it will be deleted on the next iteration.
MI->setLength(Constant::getNullValue(LenC->getType()));
return MI;
@@ -165,7 +165,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
return 0;
}
-/// visitCallInst - CallInst simplification. This mostly only handles folding
+/// visitCallInst - CallInst simplification. This mostly only handles folding
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
/// the heavy lifting.
///
@@ -182,7 +182,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
CI.setDoesNotThrow();
return &CI;
}
-
+
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallSite(&CI);
@@ -203,7 +203,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// alignment is sufficient.
}
}
-
+
// No other transformations apply to volatile transfers.
if (MI->isVolatile())
return 0;
@@ -242,13 +242,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Changed) return II;
}
-
+
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
// We need target data for just about everything so depend on it.
if (!TD) break;
-
+
Type *ReturnTy = CI.getType();
uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
@@ -265,6 +265,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Get the current byte offset into the thing. Use the original
// operand in case we're looking through a bitcast.
SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
+ if (!GEP->getPointerOperandType()->isPointerTy())
+ return 0;
Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
Op1 = GEP->getPointerOperand()->stripPointerCasts();
@@ -322,7 +324,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap)
return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
-
+
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
@@ -334,7 +336,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return new TruncInst(V, TI->getType());
}
}
-
+
break;
case Intrinsic::powi:
if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
@@ -359,14 +361,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
- KnownZero, KnownOne);
+ ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
unsigned TrailingZeros = KnownOne.countTrailingZeros();
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
if ((Mask & KnownZero) == Mask)
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
APInt(BitWidth, TrailingZeros)));
-
+
}
break;
case Intrinsic::ctlz: {
@@ -378,31 +379,29 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
- KnownZero, KnownOne);
+ ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
unsigned LeadingZeros = KnownOne.countLeadingZeros();
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
if ((Mask & KnownZero) == Mask)
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
APInt(BitWidth, LeadingZeros)));
-
+
}
break;
case Intrinsic::uadd_with_overflow: {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
- APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
+ ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
if (LHSKnownNegative || LHSKnownPositive) {
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
+ ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
if (LHSKnownNegative && RHSKnownNegative) {
@@ -448,7 +447,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// X + undef -> undef
if (isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
+
if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X + 0 -> {X, false}
if (RHS->isZero()) {
@@ -469,7 +468,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (isa<UndefValue>(II->getArgOperand(0)) ||
isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
+
if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X - 0 -> {X, false}
if (RHS->isZero()) {
@@ -477,7 +476,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
UndefValue::get(II->getArgOperand(0)->getType()),
ConstantInt::getFalse(II->getContext())
};
- Constant *Struct =
+ Constant *Struct =
ConstantStruct::get(cast<StructType>(II->getType()), V);
return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
@@ -486,14 +485,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::umul_with_overflow: {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
+ ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
+ ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
// Get the largest possible values for each operand.
APInt LHSMax = ~LHSKnownZero;
@@ -526,19 +524,19 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// X * undef -> undef
if (isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
+
if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X*0 -> {0, false}
if (RHSI->isZero())
return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
-
+
// X * 1 -> {X, false}
if (RHSI->equalsInt(1)) {
Constant *V[] = {
UndefValue::get(II->getArgOperand(0)->getType()),
ConstantInt::getFalse(II->getContext())
};
- Constant *Struct =
+ Constant *Struct =
ConstantStruct::get(cast<StructType>(II->getType()), V);
return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
@@ -557,7 +555,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
- Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
@@ -568,7 +566,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
- Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
return new StoreInst(II->getArgOperand(1), Ptr);
@@ -621,19 +619,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
- assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
-
+ if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
+ assert(Mask->getType()->getVectorNumElements() == 16 &&
+ "Bad type for intrinsic!");
+
// Check that all of the elements are integer constants or undefs.
bool AllEltsOk = true;
for (unsigned i = 0; i != 16; ++i) {
- if (!isa<ConstantInt>(Mask->getOperand(i)) &&
- !isa<UndefValue>(Mask->getOperand(i))) {
+ Constant *Elt = Mask->getAggregateElement(i);
+ if (Elt == 0 ||
+ !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
AllEltsOk = false;
break;
}
}
-
+
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
@@ -641,23 +641,24 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
Mask->getType());
Value *Result = UndefValue::get(Op0->getType());
-
+
// Only extract each element once.
Value *ExtractedElts[32];
memset(ExtractedElts, 0, sizeof(ExtractedElts));
-
+
for (unsigned i = 0; i != 16; ++i) {
- if (isa<UndefValue>(Mask->getOperand(i)))
+ if (isa<UndefValue>(Mask->getAggregateElement(i)))
continue;
- unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
+ unsigned Idx =
+ cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Idx &= 31; // Match the hardware behavior.
-
+
if (ExtractedElts[Idx] == 0) {
- ExtractedElts[Idx] =
+ ExtractedElts[Idx] =
Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
Builder->getInt32(Idx&15));
}
-
+
// Insert this value into the result vector.
Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
Builder->getInt32(i));
@@ -703,7 +704,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return EraseInstFromFunction(CI);
}
}
-
+
// Scan down this block to see if there is another stack restore in the
// same block without an intervening call/alloca.
BasicBlock::iterator BI = II;
@@ -728,12 +729,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
}
}
-
+
// If the stack restore is in a return, resume, or unwind block and if there
// are no allocas or calls between the restore and the return, nuke the
// restore.
- if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI) ||
- isa<UnwindInst>(TI)))
+ if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
return EraseInstFromFunction(CI);
break;
}
@@ -748,7 +748,7 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
return visitCallSite(&II);
}
-/// isSafeToEliminateVarargsCast - If this cast does not affect the value
+/// isSafeToEliminateVarargsCast - If this cast does not affect the value
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
@@ -760,10 +760,10 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// The size of ByVal arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.
- if (!CS.paramHasAttr(ix, Attribute::ByVal))
+ if (!CS.isByValArgument(ix))
return true;
- Type* SrcTy =
+ Type* SrcTy =
cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
@@ -807,7 +807,7 @@ public:
} // end anonymous namespace
// Try to fold some different type of calls here.
-// Currently we're only working with the checking functions, memcpy_chk,
+// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
@@ -916,7 +916,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
!CalleeF->isDeclaration()) {
Instruction *OldCall = CS.getInstruction();
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
- UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
+ UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
OldCall);
// If OldCall dues not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
@@ -924,7 +924,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
if (isa<CallInst>(OldCall))
return EraseInstFromFunction(*OldCall);
-
+
// We cannot remove an invoke, because it would change the CFG, just
// change the callee to a null pointer.
cast<InvokeInst>(OldCall)->setCalledFunction(
@@ -960,7 +960,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
if (FTy->isVarArg()) {
- int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
+ int ix = FTy->getNumParams();
// See if we can optimize any arguments passed through the varargs area of
// the call.
for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
@@ -1061,17 +1061,17 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (!CastInst::isCastable(ActTy, ParamTy))
return false; // Cannot transform this parameter value.
- unsigned Attrs = CallerPAL.getParamAttributes(i + 1);
+ Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
if (Attrs & Attribute::typeIncompatible(ParamTy))
return false; // Attribute not compatible with transformed value.
-
+
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
return false;
-
+
Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
if (TD->getTypeAllocSize(CurElTy) !=
TD->getTypeAllocSize(ParamPTy->getElementType()))
@@ -1099,8 +1099,17 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
+
+ // If both the callee and the cast type are varargs, we still have to make
+ // sure the number of fixed parameters are the same or we have the same
+ // ABI issues as if we introduce a varargs call.
+ if (FT->isVarArg() &&
+ cast<FunctionType>(APTy->getElementType())->isVarArg() &&
+ FT->getNumParams() !=
+ cast<FunctionType>(APTy->getElementType())->getNumParams())
+ return false;
}
-
+
if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
!CallerPAL.isEmpty())
// In this case we have more arguments than the new function type, but we
@@ -1114,7 +1123,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false;
}
-
+
// Okay, we decided that this is a safe thing to do: go ahead and start
// inserting cast instructions as necessary.
std::vector<Value*> Args;
@@ -1352,11 +1361,11 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// Replace the trampoline call with a direct call. Let the generic
// code sort out any function type mismatches.
- FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
+ FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
FTy->isVarArg());
Constant *NewCallee =
NestF->getType() == PointerType::getUnqual(NewFTy) ?
- NestF : ConstantExpr::getBitCast(NestF,
+ NestF : ConstantExpr::getBitCast(NestF,
PointerType::getUnqual(NewFTy));
const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
NewAttrs.end());
@@ -1385,9 +1394,8 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// parameter, there is no need to adjust the argument list. Let the generic
// code sort out any function type mismatches.
Constant *NewCallee =
- NestF->getType() == PTy ? NestF :
+ NestF->getType() == PTy ? NestF :
ConstantExpr::getBitCast(NestF, PTy);
CS.setCalledFunction(NewCallee);
return CS.getInstruction();
}
-
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index f10e48a..39279f4 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -14,6 +14,7 @@
#include "InstCombine.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
@@ -147,8 +148,6 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
return ReplaceInstUsesWith(CI, New);
}
-
-
/// EvaluateInDifferentType - Given an expression that
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
/// insert the code to evaluate the expression.
@@ -158,7 +157,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
// If we got a constantexpr back, try to simplify it with TD info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ C = ConstantFoldConstantExpression(CE, TD, TLI);
return C;
}
@@ -216,7 +215,6 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
default:
// TODO: Can handle more cases here.
llvm_unreachable("Unreachable!");
- break;
}
Res->takeName(I);
@@ -528,9 +526,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
return ReplaceInstUsesWith(CI, In);
}
-
-
-
+
// zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
// zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
// zext (X == 1) to i32 --> X iff X has only the low bit set.
@@ -545,8 +541,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// If Op1C some other power of two, convert:
uint32_t BitWidth = Op1C->getType()->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- APInt TypeMask(APInt::getAllOnesValue(BitWidth));
- ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
+ ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
APInt KnownZeroMask(~KnownZero);
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
@@ -594,9 +589,8 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
- APInt TypeMask(APInt::getAllOnesValue(BitWidth));
- ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
- ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
+ ComputeMaskedBits(LHS, KnownZeroLHS, KnownOneLHS);
+ ComputeMaskedBits(RHS, KnownZeroRHS, KnownOneRHS);
if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
APInt KnownBits = KnownZeroLHS | KnownOneLHS;
@@ -915,8 +909,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
unsigned BitWidth = Op1C->getType()->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- APInt TypeMask(APInt::getAllOnesValue(BitWidth));
- ComputeMaskedBits(Op0, TypeMask, KnownZero, KnownOne);
+ ComputeMaskedBits(Op0, KnownZero, KnownOne);
APInt KnownZeroMask(~KnownZero);
if (KnownZeroMask.isPowerOf2()) {
@@ -1163,6 +1156,9 @@ static Value *LookThroughFPExtensions(Value *V) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
return V; // No constant folding of this.
+ // See if the value can be truncated to half and then reextended.
+ if (Value *V = FitsInFPType(CFP, APFloat::IEEEhalf))
+ return V;
// See if the value can be truncated to float and then reextended.
if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle))
return V;
@@ -1213,10 +1209,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
}
// Fold (fptrunc (sqrt (fpext x))) -> (sqrtf x)
- // NOTE: This should be disabled by -fno-builtin-sqrt if we ever support it.
CallInst *Call = dyn_cast<CallInst>(CI.getOperand(0));
- if (Call && Call->getCalledFunction() &&
- Call->getCalledFunction()->getName() == "sqrt" &&
+ if (Call && Call->getCalledFunction() && TLI->has(LibFunc::sqrtf) &&
+ Call->getCalledFunction()->getName() == TLI->getName(LibFunc::sqrt) &&
Call->getNumArgOperands() == 1 &&
Call->hasOneUse()) {
CastInst *Arg = dyn_cast<CastInst>(Call->getArgOperand(0));
@@ -1423,16 +1418,15 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
// Now that the element types match, get the shuffle mask and RHS of the
// shuffle to use, which depends on whether we're increasing or decreasing the
// size of the input.
- SmallVector<Constant*, 16> ShuffleMask;
+ SmallVector<uint32_t, 16> ShuffleMask;
Value *V2;
- IntegerType *Int32Ty = Type::getInt32Ty(SrcTy->getContext());
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
// If we're shrinking the number of elements, just shuffle in the low
// elements from the input and use undef as the second shuffle input.
V2 = UndefValue::get(SrcTy);
for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
- ShuffleMask.push_back(ConstantInt::get(Int32Ty, i));
+ ShuffleMask.push_back(i);
} else {
// If we're increasing the number of elements, shuffle in all of the
@@ -1441,14 +1435,16 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
V2 = Constant::getNullValue(SrcTy);
unsigned SrcElts = SrcTy->getNumElements();
for (unsigned i = 0, e = SrcElts; i != e; ++i)
- ShuffleMask.push_back(ConstantInt::get(Int32Ty, i));
+ ShuffleMask.push_back(i);
// The excess elements reference the first element of the zero input.
- ShuffleMask.append(DestTy->getNumElements()-SrcElts,
- ConstantInt::get(Int32Ty, SrcElts));
+ for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
+ ShuffleMask.push_back(SrcElts);
}
- return new ShuffleVectorInst(InVal, V2, ConstantVector::get(ShuffleMask));
+ return new ShuffleVectorInst(InVal, V2,
+ ConstantDataVector::get(V2->getContext(),
+ ShuffleMask));
}
static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index bb1cbfa..ab2987f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -203,8 +203,12 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// We need TD information to know the pointer size unless this is inbounds.
if (!GEP->isInBounds() && TD == 0) return 0;
- ConstantArray *Init = dyn_cast<ConstantArray>(GV->getInitializer());
- if (Init == 0 || Init->getNumOperands() > 1024) return 0;
+ Constant *Init = GV->getInitializer();
+ if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
+ return 0;
+
+ uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
+ if (ArrayElementCount > 1024) return 0; // Don't blow up on huge arrays.
// There are many forms of this optimization we can handle, for now, just do
// the simple index into a single-dimensional array.
@@ -221,7 +225,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// structs.
SmallVector<unsigned, 4> LaterIndices;
- Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
+ Type *EltTy = Init->getType()->getArrayElementType();
for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (Idx == 0) return 0; // Variable index.
@@ -272,8 +276,9 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// Scan the array and see if one of our patterns matches.
Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
- for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
- Constant *Elt = Init->getOperand(i);
+ for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
+ Constant *Elt = Init->getAggregateElement(i);
+ if (Elt == 0) return 0;
// If this is indexing an array of structures, get the structure element.
if (!LaterIndices.empty())
@@ -284,7 +289,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, TD);
+ CompareRHS, TD, TLI);
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
@@ -440,10 +445,10 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// If a 32-bit or 64-bit magic bitvector captures the entire comparison state
// of this load, replace it with computation that does:
// ((magic_cst >> i) & 1) != 0
- if (Init->getNumOperands() <= 32 ||
- (TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
+ if (ArrayElementCount <= 32 ||
+ (TD && ArrayElementCount <= 64 && TD->isLegalInteger(64))) {
Type *Ty;
- if (Init->getNumOperands() <= 32)
+ if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
else
Ty = Type::getInt64Ty(Init->getContext());
@@ -566,6 +571,14 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
ICmpInst::Predicate Cond,
Instruction &I) {
+ // Don't transform signed compares of GEPs into index compares. Even if the
+ // GEP is inbounds, the final add of the base pointer can have signed overflow
+ // and would change the result of the icmp.
+ // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
+ // the maximum signed value for the pointer type.
+ if (ICmpInst::isSigned(Cond))
+ return 0;
+
// Look through bitcasts.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
RHS = BCI->getOperand(0);
@@ -602,6 +615,20 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
GEPLHS->getOperand(0), GEPRHS->getOperand(0));
+ // If we're comparing GEPs with two base pointers that only differ in type
+ // and both GEPs have only constant indices or just one use, then fold
+ // the compare with the adjusted indices.
+ if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
+ (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
+ (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
+ PtrBase->stripPointerCasts() ==
+ GEPRHS->getOperand(0)->stripPointerCasts()) {
+ Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
+ EmitGEPOffset(GEPLHS),
+ EmitGEPOffset(GEPRHS));
+ return ReplaceInstUsesWith(I, Cmp);
+ }
+
// Otherwise, the base pointers are different and the indices are
// different, bail out.
return 0;
@@ -1001,9 +1028,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// of the high bits truncated out of x are known.
unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
- APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
- ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
+ ComputeMaskedBits(LHSI->getOperand(0), KnownZero, KnownOne);
// If all the high bits are known, we can do this xform.
if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
@@ -1657,6 +1683,14 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
return 0;
+ // This is only really a signed overflow check if the inputs have been
+ // sign-extended; check for that condition. For example, if CI2 is 2^31 and
+ // the operands of the add are 64 bits wide, we need at least 33 sign bits.
+ unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
+ if (IC.ComputeNumSignBits(A) < NeededSignBits ||
+ IC.ComputeNumSignBits(B) < NeededSignBits)
+ return 0;
+
// In order to replace the original add with a narrower
// llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
// and truncates that discard the high bits of the add. Verify that this is
@@ -1787,6 +1821,24 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
+ // comparing -val or val with non-zero is the same as just comparing val
+ // ie, abs(val) != 0 -> val != 0
+ if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero()))
+ {
+ Value *Cond, *SelectTrue, *SelectFalse;
+ if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
+ m_Value(SelectFalse)))) {
+ if (Value *V = dyn_castNegVal(SelectTrue)) {
+ if (V == SelectFalse)
+ return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
+ }
+ else if (Value *V = dyn_castNegVal(SelectFalse)) {
+ if (V == SelectTrue)
+ return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
+ }
+ }
+ }
+
Type *Ty = Op0->getType();
// icmp's with boolean values can always be turned into bitwise operations
@@ -2683,6 +2735,17 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
}
+ } else {
+ // See if the RHS value is < UnsignedMin.
+ APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
+ SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
+ APFloat::rmNearestTiesToEven);
+ if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
+ if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
+ Pred == ICmpInst::ICMP_UGE)
+ return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
+ return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
+ }
}
// Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
@@ -2822,7 +2885,9 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
const fltSemantics *Sem;
// FIXME: This shouldn't be here.
- if (LHSExt->getSrcTy()->isFloatTy())
+ if (LHSExt->getSrcTy()->isHalfTy())
+ Sem = &APFloat::IEEEhalf;
+ else if (LHSExt->getSrcTy()->isFloatTy())
Sem = &APFloat::IEEEsingle;
else if (LHSExt->getSrcTy()->isDoubleTy())
Sem = &APFloat::IEEEdouble;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 7446a51..b2f2e24 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -22,6 +22,72 @@ using namespace llvm;
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
+// Try to kill dead allocas by walking through its uses until we see some use
+// that could escape. This is a conservative analysis which tries to handle
+// GEPs, bitcasts, stores, and no-op intrinsics. These tend to be the things
+// left after inlining and SROA finish chewing on an alloca.
+static Instruction *removeDeadAlloca(InstCombiner &IC, AllocaInst &AI) {
+ SmallVector<Instruction *, 4> Worklist, DeadStores;
+ Worklist.push_back(&AI);
+ do {
+ Instruction *PI = Worklist.pop_back_val();
+ for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end();
+ UI != UE; ++UI) {
+ Instruction *I = cast<Instruction>(*UI);
+ switch (I->getOpcode()) {
+ default:
+ // Give up the moment we see something we can't handle.
+ return 0;
+
+ case Instruction::GetElementPtr:
+ case Instruction::BitCast:
+ Worklist.push_back(I);
+ continue;
+
+ case Instruction::Call:
+ // We can handle a limited subset of calls to no-op intrinsics.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ continue;
+ default:
+ return 0;
+ }
+ }
+ // Reject everything else.
+ return 0;
+
+ case Instruction::Store: {
+ // Stores into the alloca are only live if the alloca is live.
+ StoreInst *SI = cast<StoreInst>(I);
+ // We can eliminate atomic stores, but not volatile.
+ if (SI->isVolatile())
+ return 0;
+ // The store is only trivially safe if the poniter is the destination
+ // as opposed to the value. We're conservative here and don't check for
+ // the case where we store the address of a dead alloca into a dead
+ // alloca.
+ if (SI->getPointerOperand() != PI)
+ return 0;
+ DeadStores.push_back(I);
+ continue;
+ }
+ }
+ }
+ } while (!Worklist.empty());
+
+ // The alloca is dead. Kill off all the stores to it, and then replace it
+ // with undef.
+ while (!DeadStores.empty())
+ IC.EraseInstFromFunction(*DeadStores.pop_back_val());
+ return IC.ReplaceInstUsesWith(AI, UndefValue::get(AI.getType()));
+}
+
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
@@ -81,7 +147,10 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
}
- return 0;
+ // Try to aggressively remove allocas which are only used for GEPs, lifetime
+ // markers, and stores. This happens when SROA iteratively promotes stores
+ // out of the alloca, and we need to cleanup after it.
+ return removeDeadAlloca(*this, AI);
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 7f48125..5168e2a 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -256,22 +256,18 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- // Simplify mul instructions with a constant RHS...
+ // Simplify mul instructions with a constant RHS.
if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1C)) {
// "In IEEE floating point, x*1 is not equivalent to x for nans. However,
// ANSI says we can drop signals, so we can do this anyway." (from GCC)
if (Op1F->isExactlyValue(1.0))
return ReplaceInstUsesWith(I, Op0); // Eliminate 'fmul double %X, 1.0'
- } else if (Op1C->getType()->isVectorTy()) {
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
- // As above, vector X*splat(1.0) -> X in all defined cases.
- if (Constant *Splat = Op1V->getSplatValue()) {
- if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
- if (F->isExactlyValue(1.0))
- return ReplaceInstUsesWith(I, Op0);
- }
- }
+ } else if (ConstantDataVector *Op1V = dyn_cast<ConstantDataVector>(Op1C)) {
+ // As above, vector X*splat(1.0) -> X in all defined cases.
+ if (ConstantFP *F = dyn_cast_or_null<ConstantFP>(Op1V->getSplatValue()))
+ if (F->isExactlyValue(1.0))
+ return ReplaceInstUsesWith(I, Op0);
}
// Try to fold constant mul into select arguments.
@@ -441,19 +437,23 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
// Handle the integer div common cases
if (Instruction *Common = commonIDivTransforms(I))
return Common;
-
- if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
+
+ {
// X udiv 2^C -> X >> C
// Check to see if this is an unsigned division with an exact power of 2,
// if so, convert to a right shift.
- if (C->getValue().isPowerOf2()) { // 0 not included in isPowerOf2
+ const APInt *C;
+ if (match(Op1, m_Power2(C))) {
BinaryOperator *LShr =
- BinaryOperator::CreateLShr(Op0,
- ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
+ BinaryOperator::CreateLShr(Op0,
+ ConstantInt::get(Op0->getType(),
+ C->logBase2()));
if (I.isExact()) LShr->setIsExact();
return LShr;
}
+ }
+ if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
// X udiv C, where C >= signbit
if (C->getValue().isNegative()) {
Value *IC = Builder->CreateICmpULT(Op0, C);
@@ -684,28 +684,36 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
}
// If it's a constant vector, flip any negative values positive.
- if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
- unsigned VWidth = RHSV->getNumOperands();
+ if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
+ Constant *C = cast<Constant>(Op1);
+ unsigned VWidth = C->getType()->getVectorNumElements();
bool hasNegative = false;
- for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
+ bool hasMissing = false;
+ for (unsigned i = 0; i != VWidth; ++i) {
+ Constant *Elt = C->getAggregateElement(i);
+ if (Elt == 0) {
+ hasMissing = true;
+ break;
+ }
+
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
if (RHS->isNegative())
hasNegative = true;
+ }
- if (hasNegative) {
- std::vector<Constant *> Elts(VWidth);
+ if (hasNegative && !hasMissing) {
+ SmallVector<Constant *, 16> Elts(VWidth);
for (unsigned i = 0; i != VWidth; ++i) {
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
+ Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
if (RHS->isNegative())
Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
- else
- Elts[i] = RHS;
}
}
Constant *NewRHSV = ConstantVector::get(Elts);
- if (NewRHSV != RHSV) {
+ if (NewRHSV != C) { // Don't loop on -MININT
Worklist.AddValue(I.getOperand(1));
I.setOperand(1, NewRHSV);
return &I;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 91e60a4..e727b2c 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -184,7 +184,6 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
}
llvm_unreachable("Shouldn't get here");
- return 0;
}
static bool isSelect01(Constant *C1, Constant *C2) {
@@ -282,7 +281,8 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
/// replaced with RepOp.
static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
// Trivial replacement.
if (V == Op)
return RepOp;
@@ -294,17 +294,19 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
// If this is a binary operator, try to simplify it with the replaced op.
if (BinaryOperator *B = dyn_cast<BinaryOperator>(I)) {
if (B->getOperand(0) == Op)
- return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), TD);
+ return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), TD, TLI);
if (B->getOperand(1) == Op)
- return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, TD);
+ return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, TD, TLI);
}
// Same for CmpInsts.
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
if (C->getOperand(0) == Op)
- return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), TD);
+ return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), TD,
+ TLI);
if (C->getOperand(1) == Op)
- return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, TD);
+ return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, TD,
+ TLI);
}
// TODO: We could hand off more cases to instsimplify here.
@@ -330,7 +332,7 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
return ConstantFoldLoadFromConstPtr(ConstOps[0], TD);
return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- ConstOps, TD);
+ ConstOps, TD, TLI);
}
}
@@ -479,18 +481,18 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// arms of the select. See if substituting this value into the arm and
// simplifying the result yields the same value as the other arm.
if (Pred == ICmpInst::ICMP_EQ) {
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, FalseVal);
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, FalseVal);
} else if (Pred == ICmpInst::ICMP_NE) {
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, TrueVal);
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, TrueVal);
}
@@ -679,6 +681,13 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return BinaryOperator::CreateOr(CondVal, FalseVal);
else if (CondVal == FalseVal)
return BinaryOperator::CreateAnd(CondVal, TrueVal);
+
+ // select a, ~a, b -> (~a)&b
+ // select a, b, ~a -> (~a)|b
+ if (match(TrueVal, m_Not(m_Specific(CondVal))))
+ return BinaryOperator::CreateAnd(TrueVal, FalseVal);
+ else if (match(FalseVal, m_Not(m_Specific(CondVal))))
+ return BinaryOperator::CreateOr(TrueVal, FalseVal);
}
// Selecting between two integer constants?
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 6d85add..b31049e 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -190,7 +190,8 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
V = IC.Builder->CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
- V = ConstantFoldConstantExpression(CE, IC.getTargetData());
+ V = ConstantFoldConstantExpression(CE, IC.getTargetData(),
+ IC.getTargetLibraryInfo());
return V;
}
@@ -198,7 +199,7 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
IC.Worklist.Add(I);
switch (I->getOpcode()) {
- default: assert(0 && "Inconsistency with CanEvaluateShifted");
+ default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
@@ -535,12 +536,11 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
Value *X = ShiftOp->getOperand(0);
- uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
-
IntegerType *Ty = cast<IntegerType>(I.getType());
// Check for (X << c1) << c2 and (X >> c1) >> c2
if (I.getOpcode() == ShiftOp->getOpcode()) {
+ uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
// If this is oversized composite shift, then unsigned shifts get 0, ashr
// saturates.
if (AmtSum >= TypeBits) {
@@ -576,7 +576,16 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
ShiftOp->getOpcode() != Instruction::Shl) {
assert(ShiftOp->getOpcode() == Instruction::LShr ||
ShiftOp->getOpcode() == Instruction::AShr);
- Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ if (ShiftOp->isExact()) {
+ // (X >>?,exact C1) << C2 --> X << (C2-C1)
+ BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
+ X, ShiftDiffCst);
+ NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
+ NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
+ return NewShl;
+ }
+ Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
return BinaryOperator::CreateAnd(Shift,
@@ -586,15 +595,34 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
if (I.getOpcode() == Instruction::LShr &&
ShiftOp->getOpcode() == Instruction::Shl) {
- assert(ShiftOp->getOpcode() == Instruction::Shl);
- Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ // (X <<nuw C1) >>u C2 --> X >>u (C2-C1)
+ if (ShiftOp->hasNoUnsignedWrap()) {
+ BinaryOperator *NewLShr = BinaryOperator::Create(Instruction::LShr,
+ X, ShiftDiffCst);
+ NewLShr->setIsExact(I.isExact());
+ return NewLShr;
+ }
+ Value *Shift = Builder->CreateLShr(X, ShiftDiffCst);
APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
return BinaryOperator::CreateAnd(Shift,
ConstantInt::get(I.getContext(),Mask));
}
-
- // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
+
+ // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
+ // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
+ if (I.getOpcode() == Instruction::AShr &&
+ ShiftOp->getOpcode() == Instruction::Shl) {
+ if (ShiftOp->hasNoSignedWrap()) {
+ // (X <<nsw C1) >>s C2 --> X >>s (C2-C1)
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ BinaryOperator *NewAShr = BinaryOperator::Create(Instruction::AShr,
+ X, ShiftDiffCst);
+ NewAShr->setIsExact(I.isExact());
+ return NewAShr;
+ }
+ }
} else {
assert(ShiftAmt2 < ShiftAmt1);
uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
@@ -602,9 +630,16 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
if (I.getOpcode() == Instruction::Shl &&
ShiftOp->getOpcode() != Instruction::Shl) {
- Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
- ConstantInt::get(Ty, ShiftDiff));
-
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ if (ShiftOp->isExact()) {
+ // (X >>?exact C1) << C2 --> X >>?exact (C1-C2)
+ BinaryOperator *NewShr = BinaryOperator::Create(ShiftOp->getOpcode(),
+ X, ShiftDiffCst);
+ NewShr->setIsExact(true);
+ return NewShr;
+ }
+ Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(),
+ X, ShiftDiffCst);
APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
return BinaryOperator::CreateAnd(Shift,
ConstantInt::get(I.getContext(),Mask));
@@ -613,14 +648,34 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
if (I.getOpcode() == Instruction::LShr &&
ShiftOp->getOpcode() == Instruction::Shl) {
- Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ if (ShiftOp->hasNoUnsignedWrap()) {
+ // (X <<nuw C1) >>u C2 --> X <<nuw (C1-C2)
+ BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
+ X, ShiftDiffCst);
+ NewShl->setHasNoUnsignedWrap(true);
+ return NewShl;
+ }
+ Value *Shift = Builder->CreateShl(X, ShiftDiffCst);
APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
return BinaryOperator::CreateAnd(Shift,
ConstantInt::get(I.getContext(),Mask));
}
- // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
+ // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. However,
+ // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
+ if (I.getOpcode() == Instruction::AShr &&
+ ShiftOp->getOpcode() == Instruction::Shl) {
+ if (ShiftOp->hasNoSignedWrap()) {
+ // (X <<nsw C1) >>s C2 --> X <<nsw (C1-C2)
+ ConstantInt *ShiftDiffCst = ConstantInt::get(Ty, ShiftDiff);
+ BinaryOperator *NewShl = BinaryOperator::Create(Instruction::Shl,
+ X, ShiftDiffCst);
+ NewShl->setHasNoSignedWrap(true);
+ return NewShl;
+ }
+ }
}
}
return 0;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 5cd9a4b..125c74a 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -142,7 +142,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
return 0; // Only analyze instructions.
}
@@ -156,10 +156,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// this instruction has a simpler value in that context.
if (I->getOpcode() == Instruction::And) {
// If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(I->getOperand(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
- LHSKnownZero, LHSKnownOne, Depth+1);
+ ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If all of the demanded bits are known 1 on one side, return the other.
// These bits cannot contribute to the result of the 'and' in this
@@ -180,10 +178,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// only bits from X or Y are demanded.
// If either the LHS or the RHS are One, the result is One.
- ComputeMaskedBits(I->getOperand(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
- LHSKnownZero, LHSKnownOne, Depth+1);
+ ComputeMaskedBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1);
+ ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If all of the demanded bits are known zero on one side, return the
// other. These bits cannot contribute to the result of the 'or' in this
@@ -206,7 +202,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
// Compute the KnownZero/KnownOne bits to simplify things downstream.
- ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
return 0;
}
@@ -219,7 +215,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
switch (I->getOpcode()) {
default:
- ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
break;
case Instruction::And:
// If either the LHS or the RHS are Zero, the result is zero.
@@ -567,9 +563,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
LHSKnownZero, LHSKnownOne, Depth+1))
return I;
}
+
// Otherwise just hand the sub off to ComputeMaskedBits to fill in
// the known zeros and ones.
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
+
+ // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
+ // zero.
+ if (ConstantInt *C0 = dyn_cast<ConstantInt>(I->getOperand(0))) {
+ APInt I0 = C0->getValue();
+ if ((I0 + 1).isPowerOf2() && (I0 | KnownZero).isAllOnesValue()) {
+ Instruction *Xor = BinaryOperator::CreateXor(I->getOperand(1), C0);
+ return InsertNewInstWith(Xor, *I);
+ }
+ }
break;
case Instruction::Shl:
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
@@ -671,8 +678,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
(HighBits & ~DemandedMask) == HighBits) {
// Perform the logical shift right.
- Instruction *NewVal = BinaryOperator::CreateLShr(
- I->getOperand(0), SA, I->getName());
+ BinaryOperator *NewVal = BinaryOperator::CreateLShr(I->getOperand(0),
+ SA, I->getName());
+ NewVal->setIsExact(cast<BinaryOperator>(I)->isExact());
return InsertNewInstWith(NewVal, *I);
} else if ((KnownOne & SignBit) != 0) { // New bits are known one.
KnownOne |= HighBits;
@@ -717,10 +725,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// The sign bit is the LHS's sign bit, except when the result of the
// remainder is zero.
if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
- APInt Mask2 = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne,
- Depth+1);
+ ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
KnownZero |= LHSKnownZero;
@@ -783,7 +789,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return 0;
}
}
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
+ ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
break;
}
@@ -822,46 +828,39 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
UndefElts = 0;
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
+
+ // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential.
+ if (Constant *C = dyn_cast<Constant>(V)) {
+ // Check if this is identity. If so, return 0 since we are not simplifying
+ // anything.
+ if (DemandedElts.isAllOnesValue())
+ return 0;
+
Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
-
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != VWidth; ++i)
+
+ SmallVector<Constant*, 16> Elts;
+ for (unsigned i = 0; i != VWidth; ++i) {
if (!DemandedElts[i]) { // If not demanded, set to undef.
Elts.push_back(Undef);
UndefElts.setBit(i);
- } else if (isa<UndefValue>(CV->getOperand(i))) { // Already undef.
+ continue;
+ }
+
+ Constant *Elt = C->getAggregateElement(i);
+ if (Elt == 0) return 0;
+
+ if (isa<UndefValue>(Elt)) { // Already undef.
Elts.push_back(Undef);
UndefElts.setBit(i);
} else { // Otherwise, defined.
- Elts.push_back(CV->getOperand(i));
+ Elts.push_back(Elt);
}
-
- // If we changed the constant, return it.
- Constant *NewCP = ConstantVector::get(Elts);
- return NewCP != CV ? NewCP : 0;
- }
-
- if (isa<ConstantAggregateZero>(V)) {
- // Simplify the CAZ to a ConstantVector where the non-demanded elements are
- // set to undef.
-
- // Check if this is identity. If so, return 0 since we are not simplifying
- // anything.
- if (DemandedElts.isAllOnesValue())
- return 0;
-
- Type *EltTy = cast<VectorType>(V->getType())->getElementType();
- Constant *Zero = Constant::getNullValue(EltTy);
- Constant *Undef = UndefValue::get(EltTy);
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != VWidth; ++i) {
- Constant *Elt = DemandedElts[i] ? Zero : Undef;
- Elts.push_back(Elt);
}
- UndefElts = DemandedElts ^ EltMask;
- return ConstantVector::get(Elts);
+
+ // If we changed the constant, return it.
+ Constant *NewCV = ConstantVector::get(Elts);
+ return NewCV != C ? NewCV : 0;
}
// Limit search depth.
@@ -977,7 +976,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (NewUndefElts) {
// Add additional discovered undefs.
- std::vector<Constant*> Elts;
+ SmallVector<Constant*, 16> Elts;
for (unsigned i = 0; i < VWidth; ++i) {
if (UndefElts[i])
Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 154267c..cf60f0f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -16,16 +16,16 @@
using namespace llvm;
/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
-/// is to leave as a vector operation.
+/// is to leave as a vector operation. isConstant indicates whether we're
+/// extracting one known element. If false we're extracting a variable index.
static bool CheapToScalarize(Value *V, bool isConstant) {
- if (isa<ConstantAggregateZero>(V))
- return true;
- if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
+ if (Constant *C = dyn_cast<Constant>(V)) {
if (isConstant) return true;
- // If all elts are the same, we can extract.
- Constant *Op0 = C->getOperand(0);
- for (unsigned i = 1; i < C->getNumOperands(); ++i)
- if (C->getOperand(i) != Op0)
+
+ // If all elts are the same, we can extract it and use any of the values.
+ Constant *Op0 = C->getAggregateElement(0U);
+ for (unsigned i = 1, e = V->getType()->getVectorNumElements(); i != e; ++i)
+ if (C->getAggregateElement(i) != Op0)
return false;
return true;
}
@@ -53,41 +53,18 @@ static bool CheapToScalarize(Value *V, bool isConstant) {
return false;
}
-/// getShuffleMask - Read and decode a shufflevector mask.
-/// Turn undef elements into negative values.
-static std::vector<int> getShuffleMask(const ShuffleVectorInst *SVI) {
- unsigned NElts = SVI->getType()->getNumElements();
- if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
- return std::vector<int>(NElts, 0);
- if (isa<UndefValue>(SVI->getOperand(2)))
- return std::vector<int>(NElts, -1);
-
- std::vector<int> Result;
- const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
- for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
- if (isa<UndefValue>(*i))
- Result.push_back(-1); // undef
- else
- Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
- return Result;
-}
-
/// FindScalarElement - Given a vector and an element number, see if the scalar
/// value is already around as a register, for example if it were inserted then
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
- VectorType *PTy = cast<VectorType>(V->getType());
- unsigned Width = PTy->getNumElements();
+ VectorType *VTy = cast<VectorType>(V->getType());
+ unsigned Width = VTy->getNumElements();
if (EltNo >= Width) // Out of range access.
- return UndefValue::get(PTy->getElementType());
+ return UndefValue::get(VTy->getElementType());
- if (isa<UndefValue>(V))
- return UndefValue::get(PTy->getElementType());
- if (isa<ConstantAggregateZero>(V))
- return Constant::getNullValue(PTy->getElementType());
- if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
- return CP->getOperand(EltNo);
+ if (Constant *C = dyn_cast<Constant>(V))
+ return C->getAggregateElement(EltNo);
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
@@ -106,11 +83,10 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
}
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
- unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
- int InEl = getShuffleMask(SVI)[EltNo];
+ unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
+ int InEl = SVI->getMaskValue(EltNo);
if (InEl < 0)
- return UndefValue::get(PTy->getElementType());
+ return UndefValue::get(VTy->getElementType());
if (InEl < (int)LHSWidth)
return FindScalarElement(SVI->getOperand(0), InEl);
return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
@@ -121,27 +97,11 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
}
Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
- // If vector val is undef, replace extract with scalar undef.
- if (isa<UndefValue>(EI.getOperand(0)))
- return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
-
- // If vector val is constant 0, replace extract with scalar 0.
- if (isa<ConstantAggregateZero>(EI.getOperand(0)))
- return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
-
- if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
- // If vector val is constant with all elements the same, replace EI with
- // that element. When the elements are not identical, we cannot replace yet
- // (we do that below, but only when the index is constant).
- Constant *op0 = C->getOperand(0);
- for (unsigned i = 1; i != C->getNumOperands(); ++i)
- if (C->getOperand(i) != op0) {
- op0 = 0;
- break;
- }
- if (op0)
- return ReplaceInstUsesWith(EI, op0);
- }
+ // If vector val is constant with all elements the same, replace EI with
+ // that element. We handle a known element # below.
+ if (Constant *C = dyn_cast<Constant>(EI.getOperand(0)))
+ if (CheapToScalarize(C, false))
+ return ReplaceInstUsesWith(EI, C->getAggregateElement(0U));
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
@@ -175,8 +135,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// the same number of elements, see if we can find the source element from
// it. In this case, we will end up needing to bitcast the scalars.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
- if (VectorType *VT =
- dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
+ if (VectorType *VT = dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
if (VT->getNumElements() == VectorWidth)
if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
return new BitCastInst(Elt, EI.getType());
@@ -212,10 +171,10 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// If this is extracting an element from a shufflevector, figure out where
// it came from and extract from the appropriate input element instead.
if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
- int SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
+ int SrcIdx = SVI->getMaskValue(Elt->getZExtValue());
Value *Src;
unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
+ SVI->getOperand(0)->getType()->getVectorNumElements();
if (SrcIdx < 0)
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
@@ -248,7 +207,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
/// elements from either LHS or RHS, return the shuffle mask and true.
/// Otherwise, return false.
static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
- std::vector<Constant*> &Mask) {
+ SmallVectorImpl<Constant*> &Mask) {
assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
@@ -325,7 +284,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
/// CollectShuffleElements - We are building a shuffle of V, using RHS as the
/// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
/// that computes V and the LHS value of the shuffle.
-static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
+static Value *CollectShuffleElements(Value *V, SmallVectorImpl<Constant*> &Mask,
Value *&RHS) {
assert(V->getType()->isVectorTy() &&
(RHS == 0 || V->getType() == RHS->getType()) &&
@@ -335,10 +294,14 @@ static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
return V;
- } else if (isa<ConstantAggregateZero>(V)) {
+ }
+
+ if (isa<ConstantAggregateZero>(V)) {
Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(V->getContext()),0));
return V;
- } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
+ }
+
+ if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
Value *VecOp = IEI->getOperand(0);
Value *ScalarOp = IEI->getOperand(1);
@@ -421,7 +384,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
// If this insertelement isn't used by some other insertelement, turn it
// (and any insertelements it points to), into one big shuffle.
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
- std::vector<Constant*> Mask;
+ SmallVector<Constant*, 16> Mask;
Value *RHS = 0;
Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
if (RHS == 0) RHS = UndefValue::get(LHS->getType());
@@ -447,7 +410,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *LHS = SVI.getOperand(0);
Value *RHS = SVI.getOperand(1);
- std::vector<int> Mask = getShuffleMask(&SVI);
+ SmallVector<int, 16> Mask = SVI.getShuffleMask();
bool MadeChange = false;
@@ -457,9 +420,6 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
- if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
- return 0;
-
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
@@ -470,29 +430,34 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
MadeChange = true;
}
+ unsigned LHSWidth = cast<VectorType>(LHS->getType())->getNumElements();
+
// Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
// Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
if (LHS == RHS || isa<UndefValue>(LHS)) {
if (isa<UndefValue>(LHS) && LHS == RHS) {
// shuffle(undef,undef,mask) -> undef.
- return ReplaceInstUsesWith(SVI, LHS);
+ Value* result = (VWidth == LHSWidth)
+ ? LHS : UndefValue::get(SVI.getType());
+ return ReplaceInstUsesWith(SVI, result);
}
// Remap any references to RHS to use LHS.
- std::vector<Constant*> Elts;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] < 0)
+ SmallVector<Constant*, 16> Elts;
+ for (unsigned i = 0, e = LHSWidth; i != VWidth; ++i) {
+ if (Mask[i] < 0) {
Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
- else {
- if ((Mask[i] >= (int)e && isa<UndefValue>(RHS)) ||
- (Mask[i] < (int)e && isa<UndefValue>(LHS))) {
- Mask[i] = -1; // Turn into undef.
- Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
- } else {
- Mask[i] = Mask[i] % e; // Force to LHS.
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(SVI.getContext()),
- Mask[i]));
- }
+ continue;
+ }
+
+ if ((Mask[i] >= (int)e && isa<UndefValue>(RHS)) ||
+ (Mask[i] < (int)e && isa<UndefValue>(LHS))) {
+ Mask[i] = -1; // Turn into undef.
+ Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
+ } else {
+ Mask[i] = Mask[i] % e; // Force to LHS.
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(SVI.getContext()),
+ Mask[i]));
}
}
SVI.setOperand(0, SVI.getOperand(1));
@@ -503,72 +468,204 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
MadeChange = true;
}
- // Analyze the shuffle, are the LHS or RHS and identity shuffles?
- bool isLHSID = true, isRHSID = true;
+ if (VWidth == LHSWidth) {
+ // Analyze the shuffle, are the LHS or RHS and identity shuffles?
+ bool isLHSID = true, isRHSID = true;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] < 0) continue; // Ignore undef values.
- // Is this an identity shuffle of the LHS value?
- isLHSID &= (Mask[i] == (int)i);
+ for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
+ if (Mask[i] < 0) continue; // Ignore undef values.
+ // Is this an identity shuffle of the LHS value?
+ isLHSID &= (Mask[i] == (int)i);
- // Is this an identity shuffle of the RHS value?
- isRHSID &= (Mask[i]-e == i);
- }
+ // Is this an identity shuffle of the RHS value?
+ isRHSID &= (Mask[i]-e == i);
+ }
- // Eliminate identity shuffles.
- if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
- if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
+ // Eliminate identity shuffles.
+ if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
+ if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
+ }
// If the LHS is a shufflevector itself, see if we can combine it with this
- // one without producing an unusual shuffle. Here we are really conservative:
+ // one without producing an unusual shuffle.
+ // Cases that might be simplified:
+ // 1.
+ // x1=shuffle(v1,v2,mask1)
+ // x=shuffle(x1,undef,mask)
+ // ==>
+ // x=shuffle(v1,undef,newMask)
+ // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1
+ // 2.
+ // x1=shuffle(v1,undef,mask1)
+ // x=shuffle(x1,x2,mask)
+ // where v1.size() == mask1.size()
+ // ==>
+ // x=shuffle(v1,x2,newMask)
+ // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i]
+ // 3.
+ // x2=shuffle(v2,undef,mask2)
+ // x=shuffle(x1,x2,mask)
+ // where v2.size() == mask2.size()
+ // ==>
+ // x=shuffle(x1,v2,newMask)
+ // newMask[i] = (mask[i] < x1.size())
+ // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size()
+ // 4.
+ // x1=shuffle(v1,undef,mask1)
+ // x2=shuffle(v2,undef,mask2)
+ // x=shuffle(x1,x2,mask)
+ // where v1.size() == v2.size()
+ // ==>
+ // x=shuffle(v1,v2,newMask)
+ // newMask[i] = (mask[i] < x1.size())
+ // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size()
+ //
+ // Here we are really conservative:
// we are absolutely afraid of producing a shuffle mask not in the input
// program, because the code gen may not be smart enough to turn a merged
// shuffle into two specific shuffles: it may produce worse code. As such,
// we only merge two shuffles if the result is either a splat or one of the
- // two input shuffle masks. In this case, merging the shuffles just removes
+ // input shuffle masks. In this case, merging the shuffles just removes
// one instruction, which we know is safe. This is good for things like
- // turning: (splat(splat)) -> splat.
- if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
+ // turning: (splat(splat)) -> splat, or
+ // merge(V[0..n], V[n+1..2n]) -> V[0..2n]
+ ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS);
+ ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
+ if (LHSShuffle)
+ if (!isa<UndefValue>(LHSShuffle->getOperand(1)) && !isa<UndefValue>(RHS))
+ LHSShuffle = NULL;
+ if (RHSShuffle)
+ if (!isa<UndefValue>(RHSShuffle->getOperand(1)))
+ RHSShuffle = NULL;
+ if (!LHSShuffle && !RHSShuffle)
+ return MadeChange ? &SVI : 0;
+
+ Value* LHSOp0 = NULL;
+ Value* LHSOp1 = NULL;
+ Value* RHSOp0 = NULL;
+ unsigned LHSOp0Width = 0;
+ unsigned RHSOp0Width = 0;
+ if (LHSShuffle) {
+ LHSOp0 = LHSShuffle->getOperand(0);
+ LHSOp1 = LHSShuffle->getOperand(1);
+ LHSOp0Width = cast<VectorType>(LHSOp0->getType())->getNumElements();
+ }
+ if (RHSShuffle) {
+ RHSOp0 = RHSShuffle->getOperand(0);
+ RHSOp0Width = cast<VectorType>(RHSOp0->getType())->getNumElements();
+ }
+ Value* newLHS = LHS;
+ Value* newRHS = RHS;
+ if (LHSShuffle) {
+ // case 1
if (isa<UndefValue>(RHS)) {
- std::vector<int> LHSMask = getShuffleMask(LHSSVI);
-
- if (LHSMask.size() == Mask.size()) {
- std::vector<int> NewMask;
- bool isSplat = true;
- int SplatElt = -1; // undef
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- int MaskElt;
- if (Mask[i] < 0 || Mask[i] >= (int)e)
- MaskElt = -1; // undef
- else
- MaskElt = LHSMask[Mask[i]];
- // Check if this could still be a splat.
- if (MaskElt >= 0) {
- if (SplatElt >=0 && SplatElt != MaskElt)
- isSplat = false;
- SplatElt = MaskElt;
- }
- NewMask.push_back(MaskElt);
- }
+ newLHS = LHSOp0;
+ newRHS = LHSOp1;
+ }
+ // case 2 or 4
+ else if (LHSOp0Width == LHSWidth) {
+ newLHS = LHSOp0;
+ }
+ }
+ // case 3 or 4
+ if (RHSShuffle && RHSOp0Width == LHSWidth) {
+ newRHS = RHSOp0;
+ }
+ // case 4
+ if (LHSOp0 == RHSOp0) {
+ newLHS = LHSOp0;
+ newRHS = NULL;
+ }
- // If the result mask is equal to the src shuffle or this
- // shuffle mask, do the replacement.
- if (isSplat || NewMask == LHSMask || NewMask == Mask) {
- std::vector<Constant*> Elts;
- Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
- for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
- if (NewMask[i] < 0) {
- Elts.push_back(UndefValue::get(Int32Ty));
- } else {
- Elts.push_back(ConstantInt::get(Int32Ty, NewMask[i]));
- }
- }
- return new ShuffleVectorInst(LHSSVI->getOperand(0),
- LHSSVI->getOperand(1),
- ConstantVector::get(Elts));
+ if (newLHS == LHS && newRHS == RHS)
+ return MadeChange ? &SVI : 0;
+
+ SmallVector<int, 16> LHSMask;
+ SmallVector<int, 16> RHSMask;
+ if (newLHS != LHS)
+ LHSMask = LHSShuffle->getShuffleMask();
+ if (RHSShuffle && newRHS != RHS)
+ RHSMask = RHSShuffle->getShuffleMask();
+
+ unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
+ SmallVector<int, 16> newMask;
+ bool isSplat = true;
+ int SplatElt = -1;
+ // Create a new mask for the new ShuffleVectorInst so that the new
+ // ShuffleVectorInst is equivalent to the original one.
+ for (unsigned i = 0; i < VWidth; ++i) {
+ int eltMask;
+ if (Mask[i] == -1) {
+ // This element is an undef value.
+ eltMask = -1;
+ } else if (Mask[i] < (int)LHSWidth) {
+ // This element is from left hand side vector operand.
+ //
+ // If LHS is going to be replaced (case 1, 2, or 4), calculate the
+ // new mask value for the element.
+ if (newLHS != LHS) {
+ eltMask = LHSMask[Mask[i]];
+ // If the value selected is an undef value, explicitly specify it
+ // with a -1 mask value.
+ if (eltMask >= (int)LHSOp0Width && isa<UndefValue>(LHSOp1))
+ eltMask = -1;
+ }
+ else
+ eltMask = Mask[i];
+ } else {
+ // This element is from right hand side vector operand
+ //
+ // If the value selected is an undef value, explicitly specify it
+ // with a -1 mask value. (case 1)
+ if (isa<UndefValue>(RHS))
+ eltMask = -1;
+ // If RHS is going to be replaced (case 3 or 4), calculate the
+ // new mask value for the element.
+ else if (newRHS != RHS) {
+ eltMask = RHSMask[Mask[i]-LHSWidth];
+ // If the value selected is an undef value, explicitly specify it
+ // with a -1 mask value.
+ if (eltMask >= (int)RHSOp0Width) {
+ assert(isa<UndefValue>(RHSShuffle->getOperand(1))
+ && "should have been check above");
+ eltMask = -1;
}
}
+ else
+ eltMask = Mask[i]-LHSWidth;
+
+ // If LHS's width is changed, shift the mask value accordingly.
+ // If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any
+ // references to RHSOp0 to LHSOp0, so we don't need to shift the mask.
+ if (eltMask >= 0 && newRHS != NULL)
+ eltMask += newLHSWidth;
+ }
+
+ // Check if this could still be a splat.
+ if (eltMask >= 0) {
+ if (SplatElt >= 0 && SplatElt != eltMask)
+ isSplat = false;
+ SplatElt = eltMask;
+ }
+
+ newMask.push_back(eltMask);
+ }
+
+ // If the result mask is equal to one of the original shuffle masks,
+ // or is a splat, do the replacement.
+ if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {
+ SmallVector<Constant*, 16> Elts;
+ Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
+ for (unsigned i = 0, e = newMask.size(); i != e; ++i) {
+ if (newMask[i] < 0) {
+ Elts.push_back(UndefValue::get(Int32Ty));
+ } else {
+ Elts.push_back(ConstantInt::get(Int32Ty, newMask[i]));
+ }
}
+ if (newRHS == NULL)
+ newRHS = UndefValue::get(newLHS->getType());
+ return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts));
}
return MadeChange ? &SVI : 0;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
index 32009c3..99a02fc 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
@@ -55,9 +55,9 @@ public:
Worklist.reserve(NumEntries+16);
WorklistMap.resize(NumEntries);
DEBUG(errs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n");
- for (; NumEntries; --NumEntries) {
+ for (unsigned Idx = 0; NumEntries; --NumEntries) {
Instruction *I = List[NumEntries-1];
- WorklistMap.insert(std::make_pair(I, Worklist.size()));
+ WorklistMap.insert(std::make_pair(I, Idx++));
Worklist.push_back(I);
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index c15b805..066b2ec 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -41,6 +41,7 @@
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
@@ -74,11 +75,15 @@ void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
}
char InstCombiner::ID = 0;
-INITIALIZE_PASS(InstCombiner, "instcombine",
+INITIALIZE_PASS_BEGIN(InstCombiner, "instcombine",
+ "Combine redundant instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(InstCombiner, "instcombine",
"Combine redundant instructions", false, false)
void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfo>();
}
@@ -490,7 +495,7 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {
if (ConstantInt *C = dyn_cast<ConstantInt>(V))
return ConstantExpr::getNeg(C);
- if (ConstantVector *C = dyn_cast<ConstantVector>(V))
+ if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
if (C->getType()->getElementType()->isIntegerTy())
return ConstantExpr::getNeg(C);
@@ -509,7 +514,7 @@ Value *InstCombiner::dyn_castFNegVal(Value *V) const {
if (ConstantFP *C = dyn_cast<ConstantFP>(V))
return ConstantExpr::getFNeg(C);
- if (ConstantVector *C = dyn_cast<ConstantVector>(V))
+ if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
if (C->getType()->getElementType()->isFloatingPointTy())
return ConstantExpr::getFNeg(C);
@@ -826,7 +831,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
MadeChange = true;
}
- if ((*I)->getType() != IntPtrTy) {
+ Type *IndexTy = (*I)->getType();
+ if (IndexTy != IntPtrTy && !IndexTy->isVectorTy()) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
@@ -909,7 +915,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
Value *StrippedPtr = PtrOp->stripPointerCasts();
- PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
+ PointerType *StrippedPtrTy = dyn_cast<PointerType>(StrippedPtr->getType());
+
+ // We do not handle pointer-vector geps here.
+ if (!StrippedPtrTy)
+ return 0;
+
if (StrippedPtr != PtrOp &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
@@ -1235,15 +1246,15 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
if (I->getOpcode() == Instruction::Add)
if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
// change 'switch (X+4) case 1:' into 'switch (X) case -3'
- unsigned NumCases = SI.getNumCases();
// Skip the first item since that's the default case.
- for (unsigned i = 1; i < NumCases; ++i) {
- ConstantInt* CaseVal = SI.getCaseValue(i);
+ for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
+ i != e; ++i) {
+ ConstantInt* CaseVal = i.getCaseValue();
Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
AddRHS);
assert(isa<ConstantInt>(NewCaseVal) &&
"Result of expression should be constant");
- SI.setSuccessorValue(i, cast<ConstantInt>(NewCaseVal));
+ i.setValue(cast<ConstantInt>(NewCaseVal));
}
SI.setCondition(I->getOperand(0));
Worklist.Add(I);
@@ -1260,24 +1271,16 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
return ReplaceInstUsesWith(EV, Agg);
if (Constant *C = dyn_cast<Constant>(Agg)) {
- if (isa<UndefValue>(C))
- return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
-
- if (isa<ConstantAggregateZero>(C))
- return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
-
- if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
- // Extract the element indexed by the first index out of the constant
- Value *V = C->getOperand(*EV.idx_begin());
- if (EV.getNumIndices() > 1)
- // Extract the remaining indices out of the constant indexed by the
- // first index
- return ExtractValueInst::Create(V, EV.getIndices().slice(1));
- else
- return ReplaceInstUsesWith(EV, V);
+ if (Constant *C2 = C->getAggregateElement(*EV.idx_begin())) {
+ if (EV.getNumIndices() == 0)
+ return ReplaceInstUsesWith(EV, C2);
+ // Extract the remaining indices out of the constant indexed by the
+ // first index
+ return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
}
return 0; // Can't handle other constants
- }
+ }
+
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
// We're extracting from an insertvalue instruction, compare the indices
const unsigned *exti, *exte, *insi, *inse;
@@ -1414,7 +1417,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
enum Personality_Type {
Unknown_Personality,
GNU_Ada_Personality,
- GNU_CXX_Personality
+ GNU_CXX_Personality,
+ GNU_ObjC_Personality
};
/// RecognizePersonality - See if the given exception handling personality
@@ -1426,7 +1430,8 @@ static Personality_Type RecognizePersonality(Value *Pers) {
return Unknown_Personality;
return StringSwitch<Personality_Type>(F->getName())
.Case("__gnat_eh_personality", GNU_Ada_Personality)
- .Case("__gxx_personality_v0", GNU_CXX_Personality)
+ .Case("__gxx_personality_v0", GNU_CXX_Personality)
+ .Case("__objc_personality_v0", GNU_ObjC_Personality)
.Default(Unknown_Personality);
}
@@ -1440,6 +1445,7 @@ static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
// match foreign exceptions (or didn't, before gcc-4.7).
return false;
case GNU_CXX_Personality:
+ case GNU_ObjC_Personality:
return TypeInfo->isNullValue();
}
llvm_unreachable("Unknown personality!");
@@ -1795,7 +1801,8 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 64> &Visited,
InstCombiner &IC,
- const TargetData *TD) {
+ const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
Worklist.push_back(BB);
@@ -1822,7 +1829,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
// ConstantProp instruction if trivially constant.
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
+ if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
<< *Inst << '\n');
Inst->replaceAllUsesWith(C);
@@ -1840,7 +1847,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Constant*& FoldRes = FoldedConstants[CE];
if (!FoldRes)
- FoldRes = ConstantFoldConstantExpression(CE, TD);
+ FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
if (!FoldRes)
FoldRes = CE;
@@ -1867,15 +1874,16 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
// See if this is an explicit destination.
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
- if (SI->getCaseValue(i) == Cond) {
- BasicBlock *ReachableBB = SI->getSuccessor(i);
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i)
+ if (i.getCaseValue() == Cond) {
+ BasicBlock *ReachableBB = i.getCaseSuccessor();
Worklist.push_back(ReachableBB);
continue;
}
// Otherwise it is the default destination.
- Worklist.push_back(SI->getSuccessor(0));
+ Worklist.push_back(SI->getDefaultDest());
continue;
}
}
@@ -1899,14 +1907,15 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
MadeIRChange = false;
DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
- << F.getNameStr() << "\n");
+ << F.getName() << "\n");
{
// Do a depth-first traversal of the function, populate the worklist with
// the reachable instructions. Ignore blocks that are not reachable. Keep
// track of which blocks we visit.
SmallPtrSet<BasicBlock*, 64> Visited;
- MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
+ MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,
+ TLI);
// Do a quick scan over the function. If we find any blocks that are
// unreachable, remove any instructions inside of them. This prevents
@@ -1951,7 +1960,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(I, TD)) {
+ if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
// Add operands to the worklist.
@@ -2059,7 +2068,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
bool InstCombiner::runOnFunction(Function &F) {
TD = getAnalysisIfAvailable<TargetData>();
-
+ TLI = &getAnalysis<TargetLibraryInfo>();
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
new file mode 100644
index 0000000..b43b9e5
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -0,0 +1,937 @@
+//===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+// Details of the algorithm:
+// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asan"
+
+#include "FunctionBlackList.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Function.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Type.h"
+
+#include <string>
+#include <algorithm>
+
+using namespace llvm;
+
+static const uint64_t kDefaultShadowScale = 3;
+static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
+static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
+
+static const size_t kMaxStackMallocSize = 1 << 16; // 64K
+static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
+static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
+
+static const char *kAsanModuleCtorName = "asan.module_ctor";
+static const char *kAsanModuleDtorName = "asan.module_dtor";
+static const int kAsanCtorAndCtorPriority = 1;
+static const char *kAsanReportErrorTemplate = "__asan_report_";
+static const char *kAsanRegisterGlobalsName = "__asan_register_globals";
+static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals";
+static const char *kAsanInitName = "__asan_init";
+static const char *kAsanHandleNoReturnName = "__asan_handle_no_return";
+static const char *kAsanMappingOffsetName = "__asan_mapping_offset";
+static const char *kAsanMappingScaleName = "__asan_mapping_scale";
+static const char *kAsanStackMallocName = "__asan_stack_malloc";
+static const char *kAsanStackFreeName = "__asan_stack_free";
+
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+static const int kAsanStackPartialRedzoneMagic = 0xf4;
+
+// Command-line flags.
+
+// This flag may need to be replaced with -f[no-]asan-reads.
+static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
+ cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
+ cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
+// This flag may need to be replaced with -f[no]asan-stack.
+static cl::opt<bool> ClStack("asan-stack",
+ cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
+// This flag may need to be replaced with -f[no]asan-use-after-return.
+static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
+ cl::desc("Check return-after-free"), cl::Hidden, cl::init(false));
+// This flag may need to be replaced with -f[no]asan-globals.
+static cl::opt<bool> ClGlobals("asan-globals",
+ cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClMemIntrin("asan-memintrin",
+ cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true));
+// This flag may need to be replaced with -fasan-blacklist.
+static cl::opt<std::string> ClBlackListFile("asan-blacklist",
+ cl::desc("File containing the list of functions to ignore "
+ "during instrumentation"), cl::Hidden);
+
+// These flags allow to change the shadow mapping.
+// The shadow mapping looks like
+// Shadow = (Mem >> scale) + (1 << offset_log)
+static cl::opt<int> ClMappingScale("asan-mapping-scale",
+ cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
+static cl::opt<int> ClMappingOffsetLog("asan-mapping-offset-log",
+ cl::desc("offset of asan shadow mapping"), cl::Hidden, cl::init(-1));
+
+// Optimization flags. Not user visible, used mostly for testing
+// and benchmarking the tool.
+static cl::opt<bool> ClOpt("asan-opt",
+ cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true));
+static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
+ cl::desc("Instrument the same temp just once"), cl::Hidden,
+ cl::init(true));
+static cl::opt<bool> ClOptGlobals("asan-opt-globals",
+ cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
+
+// Debug flags.
+static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
+ cl::init(0));
+static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
+ cl::Hidden, cl::init(0));
+static cl::opt<std::string> ClDebugFunc("asan-debug-func",
+ cl::Hidden, cl::desc("Debug func"));
+static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
+ cl::Hidden, cl::init(-1));
+static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
+ cl::Hidden, cl::init(-1));
+
+namespace {
+
+/// AddressSanitizer: instrument the code in module to find memory bugs.
+struct AddressSanitizer : public ModulePass {
+ AddressSanitizer();
+ virtual const char *getPassName() const;
+ void instrumentMop(Instruction *I);
+ void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB,
+ Value *Addr, uint32_t TypeSize, bool IsWrite);
+ Instruction *generateCrashCode(IRBuilder<> &IRB, Value *Addr,
+ bool IsWrite, uint32_t TypeSize);
+ bool instrumentMemIntrinsic(MemIntrinsic *MI);
+ void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr,
+ Value *Size,
+ Instruction *InsertBefore, bool IsWrite);
+ Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
+ bool handleFunction(Module &M, Function &F);
+ bool maybeInsertAsanInitAtFunctionEntry(Function &F);
+ bool poisonStackInFunction(Module &M, Function &F);
+ virtual bool runOnModule(Module &M);
+ bool insertGlobalRedzones(Module &M);
+ BranchInst *splitBlockAndInsertIfThen(Instruction *SplitBefore, Value *Cmp);
+ static char ID; // Pass identification, replacement for typeid
+
+ private:
+
+ uint64_t getAllocaSizeInBytes(AllocaInst *AI) {
+ Type *Ty = AI->getAllocatedType();
+ uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
+ return SizeInBytes;
+ }
+ uint64_t getAlignedSize(uint64_t SizeInBytes) {
+ return ((SizeInBytes + RedzoneSize - 1)
+ / RedzoneSize) * RedzoneSize;
+ }
+ uint64_t getAlignedAllocaSize(AllocaInst *AI) {
+ uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
+ return getAlignedSize(SizeInBytes);
+ }
+
+ void PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec, IRBuilder<> IRB,
+ Value *ShadowBase, bool DoPoison);
+ bool LooksLikeCodeInBug11395(Instruction *I);
+
+ Module *CurrentModule;
+ LLVMContext *C;
+ TargetData *TD;
+ uint64_t MappingOffset;
+ int MappingScale;
+ size_t RedzoneSize;
+ int LongSize;
+ Type *IntptrTy;
+ Type *IntptrPtrTy;
+ Function *AsanCtorFunction;
+ Function *AsanInitFunction;
+ Instruction *CtorInsertBefore;
+ OwningPtr<FunctionBlackList> BL;
+};
+} // namespace
+
+char AddressSanitizer::ID = 0;
+INITIALIZE_PASS(AddressSanitizer, "asan",
+ "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
+ false, false)
+AddressSanitizer::AddressSanitizer() : ModulePass(ID) { }
+ModulePass *llvm::createAddressSanitizerPass() {
+ return new AddressSanitizer();
+}
+
+const char *AddressSanitizer::getPassName() const {
+ return "AddressSanitizer";
+}
+
+// Create a constant for Str so that we can pass it to the run-time lib.
+static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) {
+ Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
+ return new GlobalVariable(M, StrConst->getType(), true,
+ GlobalValue::PrivateLinkage, StrConst, "");
+}
+
+// Split the basic block and insert an if-then code.
+// Before:
+// Head
+// SplitBefore
+// Tail
+// After:
+// Head
+// if (Cmp)
+// NewBasicBlock
+// SplitBefore
+// Tail
+//
+// Returns the NewBasicBlock's terminator.
+BranchInst *AddressSanitizer::splitBlockAndInsertIfThen(
+ Instruction *SplitBefore, Value *Cmp) {
+ BasicBlock *Head = SplitBefore->getParent();
+ BasicBlock *Tail = Head->splitBasicBlock(SplitBefore);
+ TerminatorInst *HeadOldTerm = Head->getTerminator();
+ BasicBlock *NewBasicBlock =
+ BasicBlock::Create(*C, "", Head->getParent());
+ BranchInst *HeadNewTerm = BranchInst::Create(/*ifTrue*/NewBasicBlock,
+ /*ifFalse*/Tail,
+ Cmp);
+ ReplaceInstWithInst(HeadOldTerm, HeadNewTerm);
+
+ BranchInst *CheckTerm = BranchInst::Create(Tail, NewBasicBlock);
+ return CheckTerm;
+}
+
+Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
+ // Shadow >> scale
+ Shadow = IRB.CreateLShr(Shadow, MappingScale);
+ if (MappingOffset == 0)
+ return Shadow;
+ // (Shadow >> scale) | offset
+ return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy,
+ MappingOffset));
+}
+
+void AddressSanitizer::instrumentMemIntrinsicParam(Instruction *OrigIns,
+ Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) {
+ // Check the first byte.
+ {
+ IRBuilder<> IRB(InsertBefore);
+ instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite);
+ }
+ // Check the last byte.
+ {
+ IRBuilder<> IRB(InsertBefore);
+ Value *SizeMinusOne = IRB.CreateSub(
+ Size, ConstantInt::get(Size->getType(), 1));
+ SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false);
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne);
+ instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite);
+ }
+}
+
+// Instrument memset/memmove/memcpy
+bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+ Value *Dst = MI->getDest();
+ MemTransferInst *MemTran = dyn_cast<MemTransferInst>(MI);
+ Value *Src = MemTran ? MemTran->getSource() : NULL;
+ Value *Length = MI->getLength();
+
+ Constant *ConstLength = dyn_cast<Constant>(Length);
+ Instruction *InsertBefore = MI;
+ if (ConstLength) {
+ if (ConstLength->isNullValue()) return false;
+ } else {
+ // The size is not a constant so it could be zero -- check at run-time.
+ IRBuilder<> IRB(InsertBefore);
+
+ Value *Cmp = IRB.CreateICmpNE(Length,
+ Constant::getNullValue(Length->getType()));
+ InsertBefore = splitBlockAndInsertIfThen(InsertBefore, Cmp);
+ }
+
+ instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true);
+ if (Src)
+ instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false);
+ return true;
+}
+
+static Value *getLDSTOperand(Instruction *I) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ return LI->getPointerOperand();
+ }
+ return cast<StoreInst>(*I).getPointerOperand();
+}
+
+void AddressSanitizer::instrumentMop(Instruction *I) {
+ int IsWrite = isa<StoreInst>(*I);
+ Value *Addr = getLDSTOperand(I);
+ if (ClOpt && ClOptGlobals && isa<GlobalVariable>(Addr)) {
+ // We are accessing a global scalar variable. Nothing to catch here.
+ return;
+ }
+ Type *OrigPtrTy = Addr->getType();
+ Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
+
+ assert(OrigTy->isSized());
+ uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+
+ if (TypeSize != 8 && TypeSize != 16 &&
+ TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
+ // Ignore all unusual sizes.
+ return;
+ }
+
+ IRBuilder<> IRB(I);
+ instrumentAddress(I, IRB, Addr, TypeSize, IsWrite);
+}
+
+Instruction *AddressSanitizer::generateCrashCode(
+ IRBuilder<> &IRB, Value *Addr, bool IsWrite, uint32_t TypeSize) {
+ // IsWrite and TypeSize are encoded in the function name.
+ std::string FunctionName = std::string(kAsanReportErrorTemplate) +
+ (IsWrite ? "store" : "load") + itostr(TypeSize / 8);
+ Value *ReportWarningFunc = CurrentModule->getOrInsertFunction(
+ FunctionName, IRB.getVoidTy(), IntptrTy, NULL);
+ CallInst *Call = IRB.CreateCall(ReportWarningFunc, Addr);
+ Call->setDoesNotReturn();
+ return Call;
+}
+
+void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
+ IRBuilder<> &IRB, Value *Addr,
+ uint32_t TypeSize, bool IsWrite) {
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+
+ Type *ShadowTy = IntegerType::get(
+ *C, std::max(8U, TypeSize >> MappingScale));
+ Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
+ Value *ShadowPtr = memToShadow(AddrLong, IRB);
+ Value *CmpVal = Constant::getNullValue(ShadowTy);
+ Value *ShadowValue = IRB.CreateLoad(
+ IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
+
+ Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
+
+ Instruction *CheckTerm = splitBlockAndInsertIfThen(
+ cast<Instruction>(Cmp)->getNextNode(), Cmp);
+ IRBuilder<> IRB2(CheckTerm);
+
+ size_t Granularity = 1 << MappingScale;
+ if (TypeSize < 8 * Granularity) {
+ // Addr & (Granularity - 1)
+ Value *Lower3Bits = IRB2.CreateAnd(
+ AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
+ // (Addr & (Granularity - 1)) + size - 1
+ Value *LastAccessedByte = IRB2.CreateAdd(
+ Lower3Bits, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
+ // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
+ LastAccessedByte = IRB2.CreateIntCast(
+ LastAccessedByte, IRB.getInt8Ty(), false);
+ // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
+ Value *Cmp2 = IRB2.CreateICmpSGE(LastAccessedByte, ShadowValue);
+
+ CheckTerm = splitBlockAndInsertIfThen(CheckTerm, Cmp2);
+ }
+
+ IRBuilder<> IRB1(CheckTerm);
+ Instruction *Crash = generateCrashCode(IRB1, AddrLong, IsWrite, TypeSize);
+ Crash->setDebugLoc(OrigIns->getDebugLoc());
+ ReplaceInstWithInst(CheckTerm, new UnreachableInst(*C));
+}
+
+// This function replaces all global variables with new variables that have
+// trailing redzones. It also creates a function that poisons
+// redzones and inserts this function into llvm.global_ctors.
+bool AddressSanitizer::insertGlobalRedzones(Module &M) {
+ SmallVector<GlobalVariable *, 16> GlobalsToChange;
+
+ for (Module::GlobalListType::iterator G = M.getGlobalList().begin(),
+ E = M.getGlobalList().end(); G != E; ++G) {
+ Type *Ty = cast<PointerType>(G->getType())->getElementType();
+ DEBUG(dbgs() << "GLOBAL: " << *G);
+
+ if (!Ty->isSized()) continue;
+ if (!G->hasInitializer()) continue;
+ // Touch only those globals that will not be defined in other modules.
+ // Don't handle ODR type linkages since other modules may be built w/o asan.
+ if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
+ G->getLinkage() != GlobalVariable::PrivateLinkage &&
+ G->getLinkage() != GlobalVariable::InternalLinkage)
+ continue;
+ // Two problems with thread-locals:
+ // - The address of the main thread's copy can't be computed at link-time.
+ // - Need to poison all copies, not just the main thread's one.
+ if (G->isThreadLocal())
+ continue;
+ // For now, just ignore this Alloca if the alignment is large.
+ if (G->getAlignment() > RedzoneSize) continue;
+
+ // Ignore all the globals with the names starting with "\01L_OBJC_".
+ // Many of those are put into the .cstring section. The linker compresses
+ // that section by removing the spare \0s after the string terminator, so
+ // our redzones get broken.
+ if ((G->getName().find("\01L_OBJC_") == 0) ||
+ (G->getName().find("\01l_OBJC_") == 0)) {
+ DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G);
+ continue;
+ }
+
+ if (G->hasSection()) {
+ StringRef Section(G->getSection());
+ // Ignore the globals from the __OBJC section. The ObjC runtime assumes
+ // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
+ // them.
+ if ((Section.find("__OBJC,") == 0) ||
+ (Section.find("__DATA, __objc_") == 0)) {
+ DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G);
+ continue;
+ }
+ // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
+ // Constant CFString instances are compiled in the following way:
+ // -- the string buffer is emitted into
+ // __TEXT,__cstring,cstring_literals
+ // -- the constant NSConstantString structure referencing that buffer
+ // is placed into __DATA,__cfstring
+ // Therefore there's no point in placing redzones into __DATA,__cfstring.
+ // Moreover, it causes the linker to crash on OS X 10.7
+ if (Section.find("__DATA,__cfstring") == 0) {
+ DEBUG(dbgs() << "Ignoring CFString: " << *G);
+ continue;
+ }
+ }
+
+ GlobalsToChange.push_back(G);
+ }
+
+ size_t n = GlobalsToChange.size();
+ if (n == 0) return false;
+
+ // A global is described by a structure
+ // size_t beg;
+ // size_t size;
+ // size_t size_with_redzone;
+ // const char *name;
+ // We initialize an array of such structures and pass it to a run-time call.
+ StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy,
+ IntptrTy, IntptrTy, NULL);
+ SmallVector<Constant *, 16> Initializers(n);
+
+ IRBuilder<> IRB(CtorInsertBefore);
+
+ for (size_t i = 0; i < n; i++) {
+ GlobalVariable *G = GlobalsToChange[i];
+ PointerType *PtrTy = cast<PointerType>(G->getType());
+ Type *Ty = PtrTy->getElementType();
+ uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
+ uint64_t RightRedzoneSize = RedzoneSize +
+ (RedzoneSize - (SizeInBytes % RedzoneSize));
+ Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
+
+ StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
+ Constant *NewInitializer = ConstantStruct::get(
+ NewTy, G->getInitializer(),
+ Constant::getNullValue(RightRedZoneTy), NULL);
+
+ SmallString<2048> DescriptionOfGlobal = G->getName();
+ DescriptionOfGlobal += " (";
+ DescriptionOfGlobal += M.getModuleIdentifier();
+ DescriptionOfGlobal += ")";
+ GlobalVariable *Name = createPrivateGlobalForString(M, DescriptionOfGlobal);
+
+ // Create a new global variable with enough space for a redzone.
+ GlobalVariable *NewGlobal = new GlobalVariable(
+ M, NewTy, G->isConstant(), G->getLinkage(),
+ NewInitializer, "", G, G->isThreadLocal());
+ NewGlobal->copyAttributesFrom(G);
+ NewGlobal->setAlignment(RedzoneSize);
+
+ Value *Indices2[2];
+ Indices2[0] = IRB.getInt32(0);
+ Indices2[1] = IRB.getInt32(0);
+
+ G->replaceAllUsesWith(
+ ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
+ NewGlobal->takeName(G);
+ G->eraseFromParent();
+
+ Initializers[i] = ConstantStruct::get(
+ GlobalStructTy,
+ ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
+ ConstantInt::get(IntptrTy, SizeInBytes),
+ ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
+ ConstantExpr::getPointerCast(Name, IntptrTy),
+ NULL);
+ DEBUG(dbgs() << "NEW GLOBAL:\n" << *NewGlobal);
+ }
+
+ ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
+ GlobalVariable *AllGlobals = new GlobalVariable(
+ M, ArrayOfGlobalStructTy, false, GlobalVariable::PrivateLinkage,
+ ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
+
+ Function *AsanRegisterGlobals = cast<Function>(M.getOrInsertFunction(
+ kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
+
+ IRB.CreateCall2(AsanRegisterGlobals,
+ IRB.CreatePointerCast(AllGlobals, IntptrTy),
+ ConstantInt::get(IntptrTy, n));
+
+ // We also need to unregister globals at the end, e.g. when a shared library
+ // gets closed.
+ Function *AsanDtorFunction = Function::Create(
+ FunctionType::get(Type::getVoidTy(*C), false),
+ GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
+ BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
+ IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
+ Function *AsanUnregisterGlobals = cast<Function>(M.getOrInsertFunction(
+ kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
+ AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
+
+ IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
+ IRB.CreatePointerCast(AllGlobals, IntptrTy),
+ ConstantInt::get(IntptrTy, n));
+ appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndCtorPriority);
+
+ DEBUG(dbgs() << M);
+ return true;
+}
+
+// virtual
+bool AddressSanitizer::runOnModule(Module &M) {
+ // Initialize the private fields. No one has accessed them before.
+ TD = getAnalysisIfAvailable<TargetData>();
+ if (!TD)
+ return false;
+ BL.reset(new FunctionBlackList(ClBlackListFile));
+
+ CurrentModule = &M;
+ C = &(M.getContext());
+ LongSize = TD->getPointerSizeInBits();
+ IntptrTy = Type::getIntNTy(*C, LongSize);
+ IntptrPtrTy = PointerType::get(IntptrTy, 0);
+
+ AsanCtorFunction = Function::Create(
+ FunctionType::get(Type::getVoidTy(*C), false),
+ GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
+ BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
+ CtorInsertBefore = ReturnInst::Create(*C, AsanCtorBB);
+
+ // call __asan_init in the module ctor.
+ IRBuilder<> IRB(CtorInsertBefore);
+ AsanInitFunction = cast<Function>(
+ M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
+ AsanInitFunction->setLinkage(Function::ExternalLinkage);
+ IRB.CreateCall(AsanInitFunction);
+
+ MappingOffset = LongSize == 32
+ ? kDefaultShadowOffset32 : kDefaultShadowOffset64;
+ if (ClMappingOffsetLog >= 0) {
+ if (ClMappingOffsetLog == 0) {
+ // special case
+ MappingOffset = 0;
+ } else {
+ MappingOffset = 1ULL << ClMappingOffsetLog;
+ }
+ }
+ MappingScale = kDefaultShadowScale;
+ if (ClMappingScale) {
+ MappingScale = ClMappingScale;
+ }
+ // Redzone used for stack and globals is at least 32 bytes.
+ // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
+ RedzoneSize = std::max(32, (int)(1 << MappingScale));
+
+ bool Res = false;
+
+ if (ClGlobals)
+ Res |= insertGlobalRedzones(M);
+
+ if (ClMappingOffsetLog >= 0) {
+ // Tell the run-time the current values of mapping offset and scale.
+ GlobalValue *asan_mapping_offset =
+ new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
+ ConstantInt::get(IntptrTy, MappingOffset),
+ kAsanMappingOffsetName);
+ // Read the global, otherwise it may be optimized away.
+ IRB.CreateLoad(asan_mapping_offset, true);
+ }
+ if (ClMappingScale) {
+ GlobalValue *asan_mapping_scale =
+ new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage,
+ ConstantInt::get(IntptrTy, MappingScale),
+ kAsanMappingScaleName);
+ // Read the global, otherwise it may be optimized away.
+ IRB.CreateLoad(asan_mapping_scale, true);
+ }
+
+
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration()) continue;
+ Res |= handleFunction(M, *F);
+ }
+
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority);
+
+ return Res;
+}
+
+bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
+ // For each NSObject descendant having a +load method, this method is invoked
+ // by the ObjC runtime before any of the static constructors is called.
+ // Therefore we need to instrument such methods with a call to __asan_init
+ // at the beginning in order to initialize our runtime before any access to
+ // the shadow memory.
+ // We cannot just ignore these methods, because they may call other
+ // instrumented functions.
+ if (F.getName().find(" load]") != std::string::npos) {
+ IRBuilder<> IRB(F.begin()->begin());
+ IRB.CreateCall(AsanInitFunction);
+ return true;
+ }
+ return false;
+}
+
+bool AddressSanitizer::handleFunction(Module &M, Function &F) {
+ if (BL->isIn(F)) return false;
+ if (&F == AsanCtorFunction) return false;
+
+ // If needed, insert __asan_init before checking for AddressSafety attr.
+ maybeInsertAsanInitAtFunctionEntry(F);
+
+ if (!F.hasFnAttr(Attribute::AddressSafety)) return false;
+
+ if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
+ return false;
+ // We want to instrument every address only once per basic block
+ // (unless there are calls between uses).
+ SmallSet<Value*, 16> TempsToInstrument;
+ SmallVector<Instruction*, 16> ToInstrument;
+ SmallVector<Instruction*, 8> NoReturnCalls;
+
+ // Fill the set of memory operations to instrument.
+ for (Function::iterator FI = F.begin(), FE = F.end();
+ FI != FE; ++FI) {
+ TempsToInstrument.clear();
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
+ BI != BE; ++BI) {
+ if (LooksLikeCodeInBug11395(BI)) return false;
+ if ((isa<LoadInst>(BI) && ClInstrumentReads) ||
+ (isa<StoreInst>(BI) && ClInstrumentWrites)) {
+ Value *Addr = getLDSTOperand(BI);
+ if (ClOpt && ClOptSameTemp) {
+ if (!TempsToInstrument.insert(Addr))
+ continue; // We've seen this temp in the current BB.
+ }
+ } else if (isa<MemIntrinsic>(BI) && ClMemIntrin) {
+ // ok, take it.
+ } else {
+ if (CallInst *CI = dyn_cast<CallInst>(BI)) {
+ // A call inside BB.
+ TempsToInstrument.clear();
+ if (CI->doesNotReturn()) {
+ NoReturnCalls.push_back(CI);
+ }
+ }
+ continue;
+ }
+ ToInstrument.push_back(BI);
+ }
+ }
+
+ // Instrument.
+ int NumInstrumented = 0;
+ for (size_t i = 0, n = ToInstrument.size(); i != n; i++) {
+ Instruction *Inst = ToInstrument[i];
+ if (ClDebugMin < 0 || ClDebugMax < 0 ||
+ (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
+ if (isa<StoreInst>(Inst) || isa<LoadInst>(Inst))
+ instrumentMop(Inst);
+ else
+ instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
+ }
+ NumInstrumented++;
+ }
+
+ DEBUG(dbgs() << F);
+
+ bool ChangedStack = poisonStackInFunction(M, F);
+
+ // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
+ // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
+ for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) {
+ Instruction *CI = NoReturnCalls[i];
+ IRBuilder<> IRB(CI);
+ IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName,
+ IRB.getVoidTy(), NULL));
+ }
+
+ return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
+}
+
+static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) {
+ if (ShadowRedzoneSize == 1) return PoisonByte;
+ if (ShadowRedzoneSize == 2) return (PoisonByte << 8) + PoisonByte;
+ if (ShadowRedzoneSize == 4)
+ return (PoisonByte << 24) + (PoisonByte << 16) +
+ (PoisonByte << 8) + (PoisonByte);
+ llvm_unreachable("ShadowRedzoneSize is either 1, 2 or 4");
+}
+
+static void PoisonShadowPartialRightRedzone(uint8_t *Shadow,
+ size_t Size,
+ size_t RedzoneSize,
+ size_t ShadowGranularity,
+ uint8_t Magic) {
+ for (size_t i = 0; i < RedzoneSize;
+ i+= ShadowGranularity, Shadow++) {
+ if (i + ShadowGranularity <= Size) {
+ *Shadow = 0; // fully addressable
+ } else if (i >= Size) {
+ *Shadow = Magic; // unaddressable
+ } else {
+ *Shadow = Size - i; // first Size-i bytes are addressable
+ }
+ }
+}
+
+void AddressSanitizer::PoisonStack(const ArrayRef<AllocaInst*> &AllocaVec,
+ IRBuilder<> IRB,
+ Value *ShadowBase, bool DoPoison) {
+ size_t ShadowRZSize = RedzoneSize >> MappingScale;
+ assert(ShadowRZSize >= 1 && ShadowRZSize <= 4);
+ Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8);
+ Type *RZPtrTy = PointerType::get(RZTy, 0);
+
+ Value *PoisonLeft = ConstantInt::get(RZTy,
+ ValueForPoison(DoPoison ? kAsanStackLeftRedzoneMagic : 0LL, ShadowRZSize));
+ Value *PoisonMid = ConstantInt::get(RZTy,
+ ValueForPoison(DoPoison ? kAsanStackMidRedzoneMagic : 0LL, ShadowRZSize));
+ Value *PoisonRight = ConstantInt::get(RZTy,
+ ValueForPoison(DoPoison ? kAsanStackRightRedzoneMagic : 0LL, ShadowRZSize));
+
+ // poison the first red zone.
+ IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy));
+
+ // poison all other red zones.
+ uint64_t Pos = RedzoneSize;
+ for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
+ AllocaInst *AI = AllocaVec[i];
+ uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
+ uint64_t AlignedSize = getAlignedAllocaSize(AI);
+ assert(AlignedSize - SizeInBytes < RedzoneSize);
+ Value *Ptr = NULL;
+
+ Pos += AlignedSize;
+
+ assert(ShadowBase->getType() == IntptrTy);
+ if (SizeInBytes < AlignedSize) {
+ // Poison the partial redzone at right
+ Ptr = IRB.CreateAdd(
+ ShadowBase, ConstantInt::get(IntptrTy,
+ (Pos >> MappingScale) - ShadowRZSize));
+ size_t AddressableBytes = RedzoneSize - (AlignedSize - SizeInBytes);
+ uint32_t Poison = 0;
+ if (DoPoison) {
+ PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes,
+ RedzoneSize,
+ 1ULL << MappingScale,
+ kAsanStackPartialRedzoneMagic);
+ }
+ Value *PartialPoison = ConstantInt::get(RZTy, Poison);
+ IRB.CreateStore(PartialPoison, IRB.CreateIntToPtr(Ptr, RZPtrTy));
+ }
+
+ // Poison the full redzone at right.
+ Ptr = IRB.CreateAdd(ShadowBase,
+ ConstantInt::get(IntptrTy, Pos >> MappingScale));
+ Value *Poison = i == AllocaVec.size() - 1 ? PoisonRight : PoisonMid;
+ IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy));
+
+ Pos += RedzoneSize;
+ }
+}
+
+// Workaround for bug 11395: we don't want to instrument stack in functions
+// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
+// FIXME: remove once the bug 11395 is fixed.
+bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
+ if (LongSize != 32) return false;
+ CallInst *CI = dyn_cast<CallInst>(I);
+ if (!CI || !CI->isInlineAsm()) return false;
+ if (CI->getNumArgOperands() <= 5) return false;
+ // We have inline assembly with quite a few arguments.
+ return true;
+}
+
+// Find all static Alloca instructions and put
+// poisoned red zones around all of them.
+// Then unpoison everything back before the function returns.
+//
+// Stack poisoning does not play well with exception handling.
+// When an exception is thrown, we essentially bypass the code
+// that unpoisones the stack. This is why the run-time library has
+// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
+// stack in the interceptor. This however does not work inside the
+// actual function which catches the exception. Most likely because the
+// compiler hoists the load of the shadow value somewhere too high.
+// This causes asan to report a non-existing bug on 453.povray.
+// It sounds like an LLVM bug.
+bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) {
+ if (!ClStack) return false;
+ SmallVector<AllocaInst*, 16> AllocaVec;
+ SmallVector<Instruction*, 8> RetVec;
+ uint64_t TotalSize = 0;
+
+ // Filter out Alloca instructions we want (and can) handle.
+ // Collect Ret instructions.
+ for (Function::iterator FI = F.begin(), FE = F.end();
+ FI != FE; ++FI) {
+ BasicBlock &BB = *FI;
+ for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
+ BI != BE; ++BI) {
+ if (isa<ReturnInst>(BI)) {
+ RetVec.push_back(BI);
+ continue;
+ }
+
+ AllocaInst *AI = dyn_cast<AllocaInst>(BI);
+ if (!AI) continue;
+ if (AI->isArrayAllocation()) continue;
+ if (!AI->isStaticAlloca()) continue;
+ if (!AI->getAllocatedType()->isSized()) continue;
+ if (AI->getAlignment() > RedzoneSize) continue;
+ AllocaVec.push_back(AI);
+ uint64_t AlignedSize = getAlignedAllocaSize(AI);
+ TotalSize += AlignedSize;
+ }
+ }
+
+ if (AllocaVec.empty()) return false;
+
+ uint64_t LocalStackSize = TotalSize + (AllocaVec.size() + 1) * RedzoneSize;
+
+ bool DoStackMalloc = ClUseAfterReturn
+ && LocalStackSize <= kMaxStackMallocSize;
+
+ Instruction *InsBefore = AllocaVec[0];
+ IRBuilder<> IRB(InsBefore);
+
+
+ Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
+ AllocaInst *MyAlloca =
+ new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
+ MyAlloca->setAlignment(RedzoneSize);
+ assert(MyAlloca->isStaticAlloca());
+ Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy);
+ Value *LocalStackBase = OrigStackBase;
+
+ if (DoStackMalloc) {
+ Value *AsanStackMallocFunc = M.getOrInsertFunction(
+ kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL);
+ LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc,
+ ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
+ }
+
+ // This string will be parsed by the run-time (DescribeStackAddress).
+ SmallString<2048> StackDescriptionStorage;
+ raw_svector_ostream StackDescription(StackDescriptionStorage);
+ StackDescription << F.getName() << " " << AllocaVec.size() << " ";
+
+ uint64_t Pos = RedzoneSize;
+ // Replace Alloca instructions with base+offset.
+ for (size_t i = 0, n = AllocaVec.size(); i < n; i++) {
+ AllocaInst *AI = AllocaVec[i];
+ uint64_t SizeInBytes = getAllocaSizeInBytes(AI);
+ StringRef Name = AI->getName();
+ StackDescription << Pos << " " << SizeInBytes << " "
+ << Name.size() << " " << Name << " ";
+ uint64_t AlignedSize = getAlignedAllocaSize(AI);
+ assert((AlignedSize % RedzoneSize) == 0);
+ AI->replaceAllUsesWith(
+ IRB.CreateIntToPtr(
+ IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)),
+ AI->getType()));
+ Pos += AlignedSize + RedzoneSize;
+ }
+ assert(Pos == LocalStackSize);
+
+ // Write the Magic value and the frame description constant to the redzone.
+ Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
+ IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
+ BasePlus0);
+ Value *BasePlus1 = IRB.CreateAdd(LocalStackBase,
+ ConstantInt::get(IntptrTy, LongSize/8));
+ BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy);
+ Value *Description = IRB.CreatePointerCast(
+ createPrivateGlobalForString(M, StackDescription.str()),
+ IntptrTy);
+ IRB.CreateStore(Description, BasePlus1);
+
+ // Poison the stack redzones at the entry.
+ Value *ShadowBase = memToShadow(LocalStackBase, IRB);
+ PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRB, ShadowBase, true);
+
+ Value *AsanStackFreeFunc = NULL;
+ if (DoStackMalloc) {
+ AsanStackFreeFunc = M.getOrInsertFunction(
+ kAsanStackFreeName, IRB.getVoidTy(),
+ IntptrTy, IntptrTy, IntptrTy, NULL);
+ }
+
+ // Unpoison the stack before all ret instructions.
+ for (size_t i = 0, n = RetVec.size(); i < n; i++) {
+ Instruction *Ret = RetVec[i];
+ IRBuilder<> IRBRet(Ret);
+
+ // Mark the current frame as retired.
+ IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
+ BasePlus0);
+ // Unpoison the stack.
+ PoisonStack(ArrayRef<AllocaInst*>(AllocaVec), IRBRet, ShadowBase, false);
+
+ if (DoStackMalloc) {
+ IRBRet.CreateCall3(AsanStackFreeFunc, LocalStackBase,
+ ConstantInt::get(IntptrTy, LocalStackSize),
+ OrigStackBase);
+ }
+ }
+
+ if (ClDebugStack) {
+ DEBUG(dbgs() << F);
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp
new file mode 100644
index 0000000..188ea4d
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.cpp
@@ -0,0 +1,79 @@
+//===-- FunctionBlackList.cpp - blacklist of functions --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class for instrumentation passes (like AddressSanitizer
+// or ThreadSanitizer) to avoid instrumenting some functions based on
+// user-supplied blacklist.
+//
+//===----------------------------------------------------------------------===//
+
+#include "FunctionBlackList.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Function.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+
+namespace llvm {
+
+FunctionBlackList::FunctionBlackList(const std::string &Path) {
+ Functions = NULL;
+ const char *kFunPrefix = "fun:";
+ if (!Path.size()) return;
+ std::string Fun;
+
+ OwningPtr<MemoryBuffer> File;
+ if (error_code EC = MemoryBuffer::getFile(Path.c_str(), File)) {
+ report_fatal_error("Can't open blacklist file " + Path + ": " +
+ EC.message());
+ }
+ MemoryBuffer *Buff = File.take();
+ const char *Data = Buff->getBufferStart();
+ size_t DataLen = Buff->getBufferSize();
+ SmallVector<StringRef, 16> Lines;
+ SplitString(StringRef(Data, DataLen), Lines, "\n\r");
+ for (size_t i = 0, numLines = Lines.size(); i < numLines; i++) {
+ if (Lines[i].startswith(kFunPrefix)) {
+ std::string ThisFunc = Lines[i].substr(strlen(kFunPrefix));
+ std::string ThisFuncRE;
+ // add ThisFunc replacing * with .*
+ for (size_t j = 0, n = ThisFunc.size(); j < n; j++) {
+ if (ThisFunc[j] == '*')
+ ThisFuncRE += '.';
+ ThisFuncRE += ThisFunc[j];
+ }
+ // Check that the regexp is valid.
+ Regex CheckRE(ThisFuncRE);
+ std::string Error;
+ if (!CheckRE.isValid(Error))
+ report_fatal_error("malformed blacklist regex: " + ThisFunc +
+ ": " + Error);
+ // Append to the final regexp.
+ if (Fun.size())
+ Fun += "|";
+ Fun += ThisFuncRE;
+ }
+ }
+ if (Fun.size()) {
+ Functions = new Regex(Fun);
+ }
+}
+
+bool FunctionBlackList::isIn(const Function &F) {
+ if (Functions) {
+ bool Res = Functions->match(F.getName());
+ return Res;
+ }
+ return false;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h
new file mode 100644
index 0000000..c1239b9
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/FunctionBlackList.h
@@ -0,0 +1,37 @@
+//===-- FunctionBlackList.cpp - blacklist of functions ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class for instrumentation passes (like AddressSanitizer
+// or ThreadSanitizer) to avoid instrumenting some functions based on
+// user-supplied blacklist.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#include <string>
+
+namespace llvm {
+class Function;
+class Regex;
+
+// Blacklisted functions are not instrumented.
+// The blacklist file contains one or more lines like this:
+// ---
+// fun:FunctionWildCard
+// ---
+// This is similar to the "ignore" feature of ThreadSanitizer.
+// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores
+class FunctionBlackList {
+ public:
+ FunctionBlackList(const std::string &Path);
+ bool isIn(const Function &F);
+ private:
+ Regex *Functions;
+};
+
+} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index ccf7e11..96e5d5b 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -43,12 +43,14 @@ namespace {
public:
static char ID;
GCOVProfiler()
- : ModulePass(ID), EmitNotes(true), EmitData(true), Use402Format(false) {
+ : ModulePass(ID), EmitNotes(true), EmitData(true), Use402Format(false),
+ UseExtraChecksum(false) {
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
- GCOVProfiler(bool EmitNotes, bool EmitData, bool use402Format = false)
+ GCOVProfiler(bool EmitNotes, bool EmitData, bool use402Format = false,
+ bool useExtraChecksum = false)
: ModulePass(ID), EmitNotes(EmitNotes), EmitData(EmitData),
- Use402Format(use402Format) {
+ Use402Format(use402Format), UseExtraChecksum(useExtraChecksum) {
assert((EmitNotes || EmitData) && "GCOVProfiler asked to do nothing?");
initializeGCOVProfilerPass(*PassRegistry::getPassRegistry());
}
@@ -94,6 +96,7 @@ namespace {
bool EmitNotes;
bool EmitData;
bool Use402Format;
+ bool UseExtraChecksum;
Module *M;
LLVMContext *Ctx;
@@ -105,8 +108,9 @@ INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling",
"Insert instrumentation for GCOV profiling", false, false)
ModulePass *llvm::createGCOVProfilerPass(bool EmitNotes, bool EmitData,
- bool Use402Format) {
- return new GCOVProfiler(EmitNotes, EmitData, Use402Format);
+ bool Use402Format,
+ bool UseExtraChecksum) {
+ return new GCOVProfiler(EmitNotes, EmitData, Use402Format, UseExtraChecksum);
}
namespace {
@@ -167,7 +171,7 @@ namespace {
}
uint32_t length() {
- // Here 2 = 1 for string lenght + 1 for '0' id#.
+ // Here 2 = 1 for string length + 1 for '0' id#.
return lengthOfGCOVString(Filename) + 2 + Lines.size();
}
@@ -244,10 +248,12 @@ namespace {
// object users can construct, the blocks and lines will be rooted here.
class GCOVFunction : public GCOVRecord {
public:
- GCOVFunction(DISubprogram SP, raw_ostream *os, bool Use402Format) {
+ GCOVFunction(DISubprogram SP, raw_ostream *os,
+ bool Use402Format, bool UseExtraChecksum) {
this->os = os;
Function *F = SP.getFunction();
+ DEBUG(dbgs() << "Function: " << F->getName() << "\n");
uint32_t i = 0;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
Blocks[BB] = new GCOVBlock(i++, os);
@@ -257,14 +263,14 @@ namespace {
writeBytes(FunctionTag, 4);
uint32_t BlockLen = 1 + 1 + 1 + lengthOfGCOVString(SP.getName()) +
1 + lengthOfGCOVString(SP.getFilename()) + 1;
- if (!Use402Format)
- ++BlockLen; // For second checksum.
+ if (UseExtraChecksum)
+ ++BlockLen;
write(BlockLen);
uint32_t Ident = reinterpret_cast<intptr_t>((MDNode*)SP);
write(Ident);
- write(0); // checksum #1
- if (!Use402Format)
- write(0); // checksum #2
+ write(0); // lineno checksum
+ if (UseExtraChecksum)
+ write(0); // cfg checksum
writeGCOVString(SP.getName());
writeGCOVString(SP.getFilename());
write(SP.getLineNumber());
@@ -290,6 +296,7 @@ namespace {
for (int i = 0, e = Blocks.size() + 1; i != e; ++i) {
write(0); // No flags on our blocks.
}
+ DEBUG(dbgs() << Blocks.size() << " blocks.\n");
// Emit edges between blocks.
for (DenseMap<BasicBlock *, GCOVBlock *>::iterator I = Blocks.begin(),
@@ -301,6 +308,8 @@ namespace {
write(Block.OutEdges.size() * 2 + 1);
write(Block.Number);
for (int i = 0, e = Block.OutEdges.size(); i != e; ++i) {
+ DEBUG(dbgs() << Block.Number << " -> " << Block.OutEdges[i]->Number
+ << "\n");
write(Block.OutEdges[i]->Number);
write(0); // no flags
}
@@ -350,68 +359,60 @@ bool GCOVProfiler::runOnModule(Module &M) {
}
void GCOVProfiler::emitGCNO() {
- DenseMap<const MDNode *, raw_fd_ostream *> GcnoFiles;
NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu");
- if (CU_Nodes) {
- for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
- // Each compile unit gets its own .gcno file. This means that whether we run
- // this pass over the original .o's as they're produced, or run it after
- // LTO, we'll generate the same .gcno files.
-
- DICompileUnit CU(CU_Nodes->getOperand(i));
- raw_fd_ostream *&out = GcnoFiles[CU];
- std::string ErrorInfo;
- out = new raw_fd_ostream(mangleName(CU, "gcno").c_str(), ErrorInfo,
- raw_fd_ostream::F_Binary);
- if (!Use402Format)
- out->write("oncg*404MVLL", 12);
- else
- out->write("oncg*204MVLL", 12);
-
- DIArray SPs = CU.getSubprograms();
- for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
- DISubprogram SP(SPs.getElement(i));
- if (!SP.Verify()) continue;
- raw_fd_ostream *&os = GcnoFiles[CU];
-
- Function *F = SP.getFunction();
- if (!F) continue;
- GCOVFunction Func(SP, os, Use402Format);
-
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- GCOVBlock &Block = Func.getBlock(BB);
- TerminatorInst *TI = BB->getTerminator();
- if (int successors = TI->getNumSuccessors()) {
- for (int i = 0; i != successors; ++i) {
- Block.addEdge(Func.getBlock(TI->getSuccessor(i)));
- }
- } else if (isa<ReturnInst>(TI)) {
- Block.addEdge(Func.getReturnBlock());
- }
-
- uint32_t Line = 0;
- for (BasicBlock::iterator I = BB->begin(), IE = BB->end(); I != IE; ++I) {
- const DebugLoc &Loc = I->getDebugLoc();
- if (Loc.isUnknown()) continue;
- if (Line == Loc.getLine()) continue;
- Line = Loc.getLine();
- if (SP != getDISubprogram(Loc.getScope(*Ctx))) continue;
-
- GCOVLines &Lines = Block.getFile(SP.getFilename());
- Lines.addLine(Loc.getLine());
+ if (!CU_Nodes) return;
+
+ for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) {
+ // Each compile unit gets its own .gcno file. This means that whether we run
+ // this pass over the original .o's as they're produced, or run it after
+ // LTO, we'll generate the same .gcno files.
+
+ DICompileUnit CU(CU_Nodes->getOperand(i));
+ std::string ErrorInfo;
+ raw_fd_ostream out(mangleName(CU, "gcno").c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary);
+ if (!Use402Format)
+ out.write("oncg*404MVLL", 12);
+ else
+ out.write("oncg*204MVLL", 12);
+
+ DIArray SPs = CU.getSubprograms();
+ for (unsigned i = 0, e = SPs.getNumElements(); i != e; ++i) {
+ DISubprogram SP(SPs.getElement(i));
+ if (!SP.Verify()) continue;
+
+ Function *F = SP.getFunction();
+ if (!F) continue;
+ GCOVFunction Func(SP, &out, Use402Format, UseExtraChecksum);
+
+ for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
+ GCOVBlock &Block = Func.getBlock(BB);
+ TerminatorInst *TI = BB->getTerminator();
+ if (int successors = TI->getNumSuccessors()) {
+ for (int i = 0; i != successors; ++i) {
+ Block.addEdge(Func.getBlock(TI->getSuccessor(i)));
}
+ } else if (isa<ReturnInst>(TI)) {
+ Block.addEdge(Func.getReturnBlock());
+ }
+
+ uint32_t Line = 0;
+ for (BasicBlock::iterator I = BB->begin(), IE = BB->end();
+ I != IE; ++I) {
+ const DebugLoc &Loc = I->getDebugLoc();
+ if (Loc.isUnknown()) continue;
+ if (Line == Loc.getLine()) continue;
+ Line = Loc.getLine();
+ if (SP != getDISubprogram(Loc.getScope(*Ctx))) continue;
+
+ GCOVLines &Lines = Block.getFile(SP.getFilename());
+ Lines.addLine(Loc.getLine());
}
- Func.writeOut();
}
+ Func.writeOut();
}
- }
-
- for (DenseMap<const MDNode *, raw_fd_ostream *>::iterator
- I = GcnoFiles.begin(), E = GcnoFiles.end(); I != E; ++I) {
- raw_fd_ostream *&out = I->second;
- out->write("\0\0\0\0\0\0\0\0", 8); // EOF
- out->close();
- delete out;
+ out.write("\0\0\0\0\0\0\0\0", 8); // EOF
+ out.close();
}
}
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
index 71adc1e..c7266e2 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -24,6 +24,8 @@ void llvm::initializeInstrumentation(PassRegistry &Registry) {
initializeOptimalEdgeProfilerPass(Registry);
initializePathProfilerPass(Registry);
initializeGCOVProfilerPass(Registry);
+ initializeAddressSanitizerPass(Registry);
+ initializeThreadSanitizerPass(Registry);
}
/// LLVMInitializeInstrumentation - C binding for
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index 62c21b8..1fe1254 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -69,7 +69,7 @@ inline static void printEdgeCounter(ProfileInfo::Edge e,
BasicBlock* b,
unsigned i) {
DEBUG(dbgs() << "--Edge Counter for " << (e) << " in " \
- << ((b)?(b)->getNameStr():"0") << " (# " << (i) << ")\n");
+ << ((b)?(b)->getName():"0") << " (# " << (i) << ")\n");
}
bool OptimalEdgeProfiler::runOnModule(Module &M) {
@@ -127,7 +127,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
unsigned i = 0;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
- DEBUG(dbgs() << "Working on " << F->getNameStr() << "\n");
+ DEBUG(dbgs() << "Working on " << F->getName() << "\n");
// Calculate a Maximum Spanning Tree with the edge weights determined by
// ProfileEstimator. ProfileEstimator also assign weights to the virtual
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
index 23915d3..b214796 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/PathProfiling.cpp
@@ -665,7 +665,7 @@ void BLInstrumentationDag::unlinkPhony() {
// Generate a .dot graph to represent the DAG and pathNumbers
void BLInstrumentationDag::generateDotGraph() {
std::string errorInfo;
- std::string functionName = getFunction().getNameStr();
+ std::string functionName = getFunction().getName().str();
std::string filename = "pathdag." + functionName + ".dot";
DEBUG (dbgs() << "Writing '" << filename << "'...\n");
@@ -750,7 +750,8 @@ Value* BLInstrumentationNode::getStartingPathNumber(){
// Sets the Value of the pathNumber. Used by the instrumentation code.
void BLInstrumentationNode::setStartingPathNumber(Value* pathNumber) {
DEBUG(dbgs() << " SPN-" << getName() << " <-- " << (pathNumber ?
- pathNumber->getNameStr() : "unused") << "\n");
+ pathNumber->getName() :
+ "unused") << "\n");
_startingPathNumber = pathNumber;
}
@@ -760,7 +761,7 @@ Value* BLInstrumentationNode::getEndingPathNumber(){
void BLInstrumentationNode::setEndingPathNumber(Value* pathNumber) {
DEBUG(dbgs() << " EPN-" << getName() << " <-- "
- << (pathNumber ? pathNumber->getNameStr() : "unused") << "\n");
+ << (pathNumber ? pathNumber->getName() : "unused") << "\n");
_endingPathNumber = pathNumber;
}
@@ -1239,9 +1240,9 @@ void PathProfiler::insertInstrumentation(
insertPoint++;
DEBUG(dbgs() << "\nInstrumenting method call block '"
- << node->getBlock()->getNameStr() << "'\n");
+ << node->getBlock()->getName() << "'\n");
DEBUG(dbgs() << " Path number initialized: "
- << ((node->getStartingPathNumber()) ? "yes" : "no") << "\n");
+ << ((node->getStartingPathNumber()) ? "yes" : "no") << "\n");
Value* newpn;
if( node->getStartingPathNumber() ) {
@@ -1370,7 +1371,7 @@ bool PathProfiler::runOnModule(Module &M) {
if (F->isDeclaration())
continue;
- DEBUG(dbgs() << "Function: " << F->getNameStr() << "\n");
+ DEBUG(dbgs() << "Function: " << F->getName() << "\n");
functionNumber++;
// set function number
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
new file mode 100644
index 0000000..8bb337e
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -0,0 +1,311 @@
+//===-- ThreadSanitizer.cpp - race detector -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer, a race detector.
+//
+// The tool is under development, for the details about previous versions see
+// http://code.google.com/p/data-race-test
+//
+// The instrumentation phase is quite simple:
+// - Insert calls to run-time library before every memory access.
+// - Optimizations may apply to avoid instrumenting some of the accesses.
+// - Insert calls at function entry/exit.
+// The rest is handled by the run-time library.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "tsan"
+
+#include "FunctionBlackList.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Function.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Type.h"
+
+using namespace llvm;
+
+static cl::opt<std::string> ClBlackListFile("tsan-blacklist",
+ cl::desc("Blacklist file"), cl::Hidden);
+
+static cl::opt<bool> ClPrintStats("tsan-print-stats",
+ cl::desc("Print ThreadSanitizer instrumentation stats"), cl::Hidden);
+
+namespace {
+
+// Stats counters for ThreadSanitizer instrumentation.
+struct ThreadSanitizerStats {
+ size_t NumInstrumentedReads;
+ size_t NumInstrumentedWrites;
+ size_t NumOmittedReadsBeforeWrite;
+ size_t NumAccessesWithBadSize;
+ size_t NumInstrumentedVtableWrites;
+ size_t NumOmittedReadsFromConstantGlobals;
+ size_t NumOmittedReadsFromVtable;
+};
+
+/// ThreadSanitizer: instrument the code in module to find races.
+struct ThreadSanitizer : public FunctionPass {
+ ThreadSanitizer();
+ bool runOnFunction(Function &F);
+ bool doInitialization(Module &M);
+ bool doFinalization(Module &M);
+ bool instrumentLoadOrStore(Instruction *I);
+ static char ID; // Pass identification, replacement for typeid.
+
+ private:
+ void choseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
+ SmallVectorImpl<Instruction*> &All);
+ bool addrPointsToConstantData(Value *Addr);
+
+ TargetData *TD;
+ OwningPtr<FunctionBlackList> BL;
+ // Callbacks to run-time library are computed in doInitialization.
+ Value *TsanFuncEntry;
+ Value *TsanFuncExit;
+ // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
+ static const size_t kNumberOfAccessSizes = 5;
+ Value *TsanRead[kNumberOfAccessSizes];
+ Value *TsanWrite[kNumberOfAccessSizes];
+ Value *TsanVptrUpdate;
+
+ // Stats are modified w/o synchronization.
+ ThreadSanitizerStats stats;
+};
+} // namespace
+
+char ThreadSanitizer::ID = 0;
+INITIALIZE_PASS(ThreadSanitizer, "tsan",
+ "ThreadSanitizer: detects data races.",
+ false, false)
+
+ThreadSanitizer::ThreadSanitizer()
+ : FunctionPass(ID),
+ TD(NULL) {
+}
+
+FunctionPass *llvm::createThreadSanitizerPass() {
+ return new ThreadSanitizer();
+}
+
+bool ThreadSanitizer::doInitialization(Module &M) {
+ TD = getAnalysisIfAvailable<TargetData>();
+ if (!TD)
+ return false;
+ BL.reset(new FunctionBlackList(ClBlackListFile));
+ memset(&stats, 0, sizeof(stats));
+
+ // Always insert a call to __tsan_init into the module's CTORs.
+ IRBuilder<> IRB(M.getContext());
+ Value *TsanInit = M.getOrInsertFunction("__tsan_init",
+ IRB.getVoidTy(), NULL);
+ appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
+
+ // Initialize the callbacks.
+ TsanFuncEntry = M.getOrInsertFunction("__tsan_func_entry", IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), NULL);
+ TsanFuncExit = M.getOrInsertFunction("__tsan_func_exit", IRB.getVoidTy(),
+ NULL);
+ for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
+ SmallString<32> ReadName("__tsan_read");
+ ReadName += itostr(1 << i);
+ TsanRead[i] = M.getOrInsertFunction(ReadName, IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), NULL);
+ SmallString<32> WriteName("__tsan_write");
+ WriteName += itostr(1 << i);
+ TsanWrite[i] = M.getOrInsertFunction(WriteName, IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), NULL);
+ }
+ TsanVptrUpdate = M.getOrInsertFunction("__tsan_vptr_update", IRB.getVoidTy(),
+ IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
+ NULL);
+ return true;
+}
+
+bool ThreadSanitizer::doFinalization(Module &M) {
+ if (ClPrintStats) {
+ errs() << "ThreadSanitizerStats " << M.getModuleIdentifier()
+ << ": wr " << stats.NumInstrumentedWrites
+ << "; rd " << stats.NumInstrumentedReads
+ << "; vt " << stats.NumInstrumentedVtableWrites
+ << "; bs " << stats.NumAccessesWithBadSize
+ << "; rbw " << stats.NumOmittedReadsBeforeWrite
+ << "; rcg " << stats.NumOmittedReadsFromConstantGlobals
+ << "; rvt " << stats.NumOmittedReadsFromVtable
+ << "\n";
+ }
+ return true;
+}
+
+static bool isVtableAccess(Instruction *I) {
+ if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) {
+ if (Tag->getNumOperands() < 1) return false;
+ if (MDString *Tag1 = dyn_cast<MDString>(Tag->getOperand(0))) {
+ if (Tag1->getString() == "vtable pointer") return true;
+ }
+ }
+ return false;
+}
+
+bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
+ // If this is a GEP, just analyze its pointer operand.
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
+ Addr = GEP->getPointerOperand();
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
+ if (GV->isConstant()) {
+ // Reads from constant globals can not race with any writes.
+ stats.NumOmittedReadsFromConstantGlobals++;
+ return true;
+ }
+ } else if(LoadInst *L = dyn_cast<LoadInst>(Addr)) {
+ if (isVtableAccess(L)) {
+ // Reads from a vtable pointer can not race with any writes.
+ stats.NumOmittedReadsFromVtable++;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Instrumenting some of the accesses may be proven redundant.
+// Currently handled:
+// - read-before-write (within same BB, no calls between)
+//
+// We do not handle some of the patterns that should not survive
+// after the classic compiler optimizations.
+// E.g. two reads from the same temp should be eliminated by CSE,
+// two writes should be eliminated by DSE, etc.
+//
+// 'Local' is a vector of insns within the same BB (no calls between).
+// 'All' is a vector of insns that will be instrumented.
+void ThreadSanitizer::choseInstructionsToInstrument(
+ SmallVectorImpl<Instruction*> &Local,
+ SmallVectorImpl<Instruction*> &All) {
+ SmallSet<Value*, 8> WriteTargets;
+ // Iterate from the end.
+ for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
+ E = Local.rend(); It != E; ++It) {
+ Instruction *I = *It;
+ if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
+ WriteTargets.insert(Store->getPointerOperand());
+ } else {
+ LoadInst *Load = cast<LoadInst>(I);
+ Value *Addr = Load->getPointerOperand();
+ if (WriteTargets.count(Addr)) {
+ // We will write to this temp, so no reason to analyze the read.
+ stats.NumOmittedReadsBeforeWrite++;
+ continue;
+ }
+ if (addrPointsToConstantData(Addr)) {
+ // Addr points to some constant data -- it can not race with any writes.
+ continue;
+ }
+ }
+ All.push_back(I);
+ }
+ Local.clear();
+}
+
+bool ThreadSanitizer::runOnFunction(Function &F) {
+ if (!TD) return false;
+ if (BL->isIn(F)) return false;
+ SmallVector<Instruction*, 8> RetVec;
+ SmallVector<Instruction*, 8> AllLoadsAndStores;
+ SmallVector<Instruction*, 8> LocalLoadsAndStores;
+ bool Res = false;
+ bool HasCalls = false;
+
+ // Traverse all instructions, collect loads/stores/returns, check for calls.
+ for (Function::iterator FI = F.begin(), FE = F.end();
+ FI != FE; ++FI) {
+ BasicBlock &BB = *FI;
+ for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
+ BI != BE; ++BI) {
+ if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
+ LocalLoadsAndStores.push_back(BI);
+ else if (isa<ReturnInst>(BI))
+ RetVec.push_back(BI);
+ else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
+ HasCalls = true;
+ choseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ }
+ }
+ choseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
+ }
+
+ // We have collected all loads and stores.
+ // FIXME: many of these accesses do not need to be checked for races
+ // (e.g. variables that do not escape, etc).
+
+ // Instrument memory accesses.
+ for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
+ Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
+ }
+
+ // Instrument function entry/exit points if there were instrumented accesses.
+ if (Res || HasCalls) {
+ IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
+ Value *ReturnAddress = IRB.CreateCall(
+ Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
+ IRB.getInt32(0));
+ IRB.CreateCall(TsanFuncEntry, ReturnAddress);
+ for (size_t i = 0, n = RetVec.size(); i < n; ++i) {
+ IRBuilder<> IRBRet(RetVec[i]);
+ IRBRet.CreateCall(TsanFuncExit);
+ }
+ Res = true;
+ }
+ return Res;
+}
+
+bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
+ IRBuilder<> IRB(I);
+ bool IsWrite = isa<StoreInst>(*I);
+ Value *Addr = IsWrite
+ ? cast<StoreInst>(I)->getPointerOperand()
+ : cast<LoadInst>(I)->getPointerOperand();
+ Type *OrigPtrTy = Addr->getType();
+ Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
+ assert(OrigTy->isSized());
+ uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ if (TypeSize != 8 && TypeSize != 16 &&
+ TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
+ stats.NumAccessesWithBadSize++;
+ // Ignore all unusual sizes.
+ return false;
+ }
+ if (IsWrite && isVtableAccess(I)) {
+ Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
+ IRB.CreateCall2(TsanVptrUpdate,
+ IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
+ stats.NumInstrumentedVtableWrites++;
+ return true;
+ }
+ size_t Idx = CountTrailingZeros_32(TypeSize / 8);
+ assert(Idx < kNumberOfAccessSizes);
+ Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
+ if (IsWrite) stats.NumInstrumentedWrites++;
+ else stats.NumInstrumentedReads++;
+ return true;
+}
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index f8f18b2..9a5423f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -26,6 +26,7 @@
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -64,11 +65,17 @@ static cl::opt<bool> DisableBranchOpts(
"disable-cgp-branch-opts", cl::Hidden, cl::init(false),
cl::desc("Disable branch optimizations in CodeGenPrepare"));
+// FIXME: Remove this abomination once all of the tests pass without it!
+static cl::opt<bool> DisableDeleteDeadBlocks(
+ "disable-cgp-delete-dead-blocks", cl::Hidden, cl::init(false),
+ cl::desc("Disable deleting dead blocks in CodeGenPrepare"));
+
namespace {
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
const TargetLowering *TLI;
+ const TargetLibraryInfo *TLInfo;
DominatorTree *DT;
ProfileInfo *PFI;
@@ -97,6 +104,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorTree>();
AU.addPreserved<ProfileInfo>();
+ AU.addRequired<TargetLibraryInfo>();
}
private:
@@ -116,7 +124,10 @@ namespace {
}
char CodeGenPrepare::ID = 0;
-INITIALIZE_PASS(CodeGenPrepare, "codegenprepare",
+INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
+ "Optimize for code generation", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
"Optimize for code generation", false, false)
FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
@@ -127,6 +138,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool EverMadeChange = false;
ModifiedDT = false;
+ TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
@@ -153,8 +165,22 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
if (!DisableBranchOpts) {
MadeChange = false;
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ SmallPtrSet<BasicBlock*, 8> WorkList;
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
MadeChange |= ConstantFoldTerminator(BB, true);
+ if (!MadeChange) continue;
+
+ for (SmallVectorImpl<BasicBlock*>::iterator
+ II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
+ if (pred_begin(*II) == pred_end(*II))
+ WorkList.insert(*II);
+ }
+
+ if (!DisableDeleteDeadBlocks)
+ for (SmallPtrSet<BasicBlock*, 8>::iterator
+ I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
+ DeleteDeadBlock(*I);
if (MadeChange)
ModifiedDT = true;
@@ -541,8 +567,8 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// happens.
WeakVH IterHandle(CurInstIterator);
- ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0,
- ModifiedDT ? 0 : DT);
+ replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
+ TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
@@ -553,6 +579,15 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
return true;
}
+ if (II && TLI) {
+ SmallVector<Value*, 2> PtrOps;
+ Type *AccessTy;
+ if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy))
+ while (!PtrOps.empty())
+ if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy))
+ return true;
+ }
+
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
@@ -612,7 +647,7 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
// It's not safe to eliminate the sign / zero extension of the return value.
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
return false;
@@ -667,7 +702,7 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
// Conservatively require the attributes of the call to match those of the
// return. Ignore noalias because it doesn't affect the call sequence.
- unsigned CalleeRetAttr = CS.getAttributes().getRetAttributes();
+ Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes();
if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
continue;
diff --git a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index 664c3f6..5430f62 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -24,6 +24,8 @@
#include "llvm/Constant.h"
#include "llvm/Instruction.h"
#include "llvm/Pass.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/ADT/Statistic.h"
#include <set>
@@ -42,19 +44,22 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfo>();
}
};
}
char ConstantPropagation::ID = 0;
-INITIALIZE_PASS(ConstantPropagation, "constprop",
+INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop",
+ "Simple constant propagation", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(ConstantPropagation, "constprop",
"Simple constant propagation", false, false)
FunctionPass *llvm::createConstantPropagationPass() {
return new ConstantPropagation();
}
-
bool ConstantPropagation::runOnFunction(Function &F) {
// Initialize the worklist to all of the instructions ready to process...
std::set<Instruction*> WorkList;
@@ -62,13 +67,15 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
+ TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {
Instruction *I = *WorkList.begin();
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
- if (Constant *C = ConstantFoldInstruction(I)) {
+ if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
diff --git a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index e275268..9b0aadb 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -28,6 +28,7 @@ STATISTIC(NumPhis, "Number of phis propagated");
STATISTIC(NumSelects, "Number of selects propagated");
STATISTIC(NumMemAccess, "Number of memory access targets propagated");
STATISTIC(NumCmps, "Number of comparisons propagated");
+STATISTIC(NumDeadCases, "Number of switch cases removed");
namespace {
class CorrelatedValuePropagation : public FunctionPass {
@@ -37,6 +38,7 @@ namespace {
bool processPHI(PHINode *P);
bool processMemAccess(Instruction *I);
bool processCmp(CmpInst *C);
+ bool processSwitch(SwitchInst *SI);
public:
static char ID;
@@ -110,7 +112,8 @@ bool CorrelatedValuePropagation::processPHI(PHINode *P) {
Changed = true;
}
- ++NumPhis;
+ if (Changed)
+ ++NumPhis;
return Changed;
}
@@ -173,6 +176,86 @@ bool CorrelatedValuePropagation::processCmp(CmpInst *C) {
return true;
}
+/// processSwitch - Simplify a switch instruction by removing cases which can
+/// never fire. If the uselessness of a case could be determined locally then
+/// constant propagation would already have figured it out. Instead, walk the
+/// predecessors and statically evaluate cases based on information available
+/// on that edge. Cases that cannot fire no matter what the incoming edge can
+/// safely be removed. If a case fires on every incoming edge then the entire
+/// switch can be removed and replaced with a branch to the case destination.
+bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) {
+ Value *Cond = SI->getCondition();
+ BasicBlock *BB = SI->getParent();
+
+ // If the condition was defined in same block as the switch then LazyValueInfo
+ // currently won't say anything useful about it, though in theory it could.
+ if (isa<Instruction>(Cond) && cast<Instruction>(Cond)->getParent() == BB)
+ return false;
+
+ // If the switch is unreachable then trying to improve it is a waste of time.
+ pred_iterator PB = pred_begin(BB), PE = pred_end(BB);
+ if (PB == PE) return false;
+
+ // Analyse each switch case in turn. This is done in reverse order so that
+ // removing a case doesn't cause trouble for the iteration.
+ bool Changed = false;
+ for (SwitchInst::CaseIt CI = SI->case_end(), CE = SI->case_begin(); CI-- != CE;
+ ) {
+ ConstantInt *Case = CI.getCaseValue();
+
+ // Check to see if the switch condition is equal to/not equal to the case
+ // value on every incoming edge, equal/not equal being the same each time.
+ LazyValueInfo::Tristate State = LazyValueInfo::Unknown;
+ for (pred_iterator PI = PB; PI != PE; ++PI) {
+ // Is the switch condition equal to the case value?
+ LazyValueInfo::Tristate Value = LVI->getPredicateOnEdge(CmpInst::ICMP_EQ,
+ Cond, Case, *PI, BB);
+ // Give up on this case if nothing is known.
+ if (Value == LazyValueInfo::Unknown) {
+ State = LazyValueInfo::Unknown;
+ break;
+ }
+
+ // If this was the first edge to be visited, record that all other edges
+ // need to give the same result.
+ if (PI == PB) {
+ State = Value;
+ continue;
+ }
+
+ // If this case is known to fire for some edges and known not to fire for
+ // others then there is nothing we can do - give up.
+ if (Value != State) {
+ State = LazyValueInfo::Unknown;
+ break;
+ }
+ }
+
+ if (State == LazyValueInfo::False) {
+ // This case never fires - remove it.
+ CI.getCaseSuccessor()->removePredecessor(BB);
+ SI->removeCase(CI); // Does not invalidate the iterator.
+ ++NumDeadCases;
+ Changed = true;
+ } else if (State == LazyValueInfo::True) {
+ // This case always fires. Arrange for the switch to be turned into an
+ // unconditional branch by replacing the switch condition with the case
+ // value.
+ SI->setCondition(Case);
+ NumDeadCases += SI->getNumCases();
+ Changed = true;
+ break;
+ }
+ }
+
+ if (Changed)
+ // If the switch has been simplified to the point where it can be replaced
+ // by a branch then do so now.
+ ConstantFoldTerminator(BB);
+
+ return Changed;
+}
+
bool CorrelatedValuePropagation::runOnFunction(Function &F) {
LVI = &getAnalysis<LazyValueInfo>();
@@ -200,6 +283,13 @@ bool CorrelatedValuePropagation::runOnFunction(Function &F) {
}
}
+ Instruction *Term = FI->getTerminator();
+ switch (Term->getOpcode()) {
+ case Instruction::Switch:
+ BBChanged |= processSwitch(cast<SwitchInst>(Term));
+ break;
+ }
+
FnChanged |= BBChanged;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index a593d0f..c8c5360 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -24,6 +24,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
@@ -33,6 +34,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
using namespace llvm;
STATISTIC(NumFastStores, "Number of stores deleted");
@@ -42,25 +44,26 @@ namespace {
struct DSE : public FunctionPass {
AliasAnalysis *AA;
MemoryDependenceAnalysis *MD;
+ DominatorTree *DT;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(ID), AA(0), MD(0) {
+ DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
initializeDSEPass(*PassRegistry::getPassRegistry());
}
virtual bool runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DominatorTree &DT = getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTree>();
bool Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
// Only check non-dead blocks. Dead blocks may have strange pointer
// cycles that will confuse alias analysis.
- if (DT.isReachableFromEntry(I))
+ if (DT->isReachableFromEntry(I))
Changed |= runOnBasicBlock(*I);
- AA = 0; MD = 0;
+ AA = 0; MD = 0; DT = 0;
return Changed;
}
@@ -221,7 +224,7 @@ static bool isRemovable(Instruction *I) {
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
- default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
+ default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
case Intrinsic::lifetime_end:
// Never remove dead lifetime_end's, e.g. because it is followed by a
// free.
@@ -238,6 +241,24 @@ static bool isRemovable(Instruction *I) {
}
}
+
+/// isShortenable - Returns true if this instruction can be safely shortened in
+/// length.
+static bool isShortenable(Instruction *I) {
+ // Don't shorten stores for now
+ if (isa<StoreInst>(I))
+ return false;
+
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ // Do shorten memory intrinsics.
+ return true;
+ }
+}
+
/// getStoredPointerOperand - Return the pointer that is being written to.
static Value *getStoredPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
@@ -247,46 +268,61 @@ static Value *getStoredPointerOperand(Instruction *I) {
IntrinsicInst *II = cast<IntrinsicInst>(I);
switch (II->getIntrinsicID()) {
- default: assert(false && "Unexpected intrinsic!");
+ default: llvm_unreachable("Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return II->getArgOperand(0);
}
}
-static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
+static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
const TargetData *TD = AA.getTargetData();
+
+ if (const CallInst *CI = extractMallocCall(V)) {
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
+ return C->getZExtValue();
+ }
+
if (TD == 0)
return AliasAnalysis::UnknownSize;
- if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
+ if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
// Get size information for the alloca
- if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
- return AliasAnalysis::UnknownSize;
}
- assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
- PointerType *PT = cast<PointerType>(V->getType());
- return TD->getTypeAllocSize(PT->getElementType());
+ if (const Argument *A = dyn_cast<Argument>(V)) {
+ if (A->hasByValAttr())
+ if (PointerType *PT = dyn_cast<PointerType>(A->getType()))
+ return TD->getTypeAllocSize(PT->getElementType());
+ }
+
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ if (!GV->mayBeOverridden())
+ return TD->getTypeAllocSize(GV->getType()->getElementType());
+ }
+
+ return AliasAnalysis::UnknownSize;
}
-/// isObjectPointerWithTrustworthySize - Return true if the specified Value* is
-/// pointing to an object with a pointer size we can trust.
-static bool isObjectPointerWithTrustworthySize(const Value *V) {
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
- return !AI->isArrayAllocation();
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- return !GV->mayBeOverridden();
- if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValAttr();
- return false;
+namespace {
+ enum OverwriteResult
+ {
+ OverwriteComplete,
+ OverwriteEnd,
+ OverwriteUnknown
+ };
}
-/// isCompleteOverwrite - Return true if a store to the 'Later' location
+/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
/// completely overwrites a store to the 'Earlier' location.
-static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
- const AliasAnalysis::Location &Earlier,
- AliasAnalysis &AA) {
+/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
+/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
+static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
+ const AliasAnalysis::Location &Earlier,
+ AliasAnalysis &AA,
+ int64_t &EarlierOff,
+ int64_t &LaterOff) {
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@@ -300,23 +336,24 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
// If we have no TargetData information around, then the size of the store
// is inferrable from the pointee type. If they are the same type, then
// we know that the store is safe.
- if (AA.getTargetData() == 0)
- return Later.Ptr->getType() == Earlier.Ptr->getType();
- return false;
+ if (AA.getTargetData() == 0 &&
+ Later.Ptr->getType() == Earlier.Ptr->getType())
+ return OverwriteComplete;
+
+ return OverwriteUnknown;
}
// Make sure that the Later size is >= the Earlier size.
- if (Later.Size < Earlier.Size)
- return false;
- return true;
+ if (Later.Size >= Earlier.Size)
+ return OverwriteComplete;
}
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize ||
- Later.Size <= Earlier.Size || AA.getTargetData() == 0)
- return false;
+ AA.getTargetData() == 0)
+ return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval argument). If so, then it clearly overwrites any
@@ -329,26 +366,25 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
if (UO1 != UO2)
- return false;
+ return OverwriteUnknown;
// If the "Later" store is to a recognizable object, get its size.
- if (isObjectPointerWithTrustworthySize(UO2)) {
- uint64_t ObjectSize =
- TD.getTypeAllocSize(cast<PointerType>(UO2->getType())->getElementType());
- if (ObjectSize == Later.Size)
- return true;
- }
+ uint64_t ObjectSize = getPointerSize(UO2, AA);
+ if (ObjectSize != AliasAnalysis::UnknownSize)
+ if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
+ return OverwriteComplete;
// Okay, we have stores to two completely different pointers. Try to
// decompose the pointer into a "base + constant_offset" form. If the base
// pointers are equal, then we can reason about the two stores.
- int64_t EarlierOff = 0, LaterOff = 0;
+ EarlierOff = 0;
+ LaterOff = 0;
const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
// If the base pointers still differ, we have two completely different stores.
if (BP1 != BP2)
- return false;
+ return OverwriteUnknown;
// The later store completely overlaps the earlier store if:
//
@@ -366,11 +402,25 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
//
// We have to be careful here as *Off is signed while *.Size is unsigned.
if (EarlierOff >= LaterOff &&
+ Later.Size > Earlier.Size &&
uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
- return true;
+ return OverwriteComplete;
+
+ // The other interesting case is if the later store overwrites the end of
+ // the earlier store
+ //
+ // |--earlier--|
+ // |-- later --|
+ //
+ // In this case we may want to trim the size of earlier to avoid generating
+ // writes to addresses which will definitely be overwritten later
+ if (LaterOff > EarlierOff &&
+ LaterOff < int64_t(EarlierOff + Earlier.Size) &&
+ int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
+ return OverwriteEnd;
// Otherwise, they don't completely overlap.
- return false;
+ return OverwriteUnknown;
}
/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
@@ -494,22 +544,52 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// If we find a write that is a) removable (i.e., non-volatile), b) is
// completely obliterated by the store to 'Loc', and c) which we know that
// 'Inst' doesn't load from, then we can remove it.
- if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA) &&
+ if (isRemovable(DepWrite) &&
!isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
- DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
- << *DepWrite << "\n KILLER: " << *Inst << '\n');
-
- // Delete the store and now-dead instructions that feed it.
- DeleteDeadInstruction(DepWrite, *MD);
- ++NumFastStores;
- MadeChange = true;
-
- // DeleteDeadInstruction can delete the current instruction in loop
- // cases, reset BBI.
- BBI = Inst;
- if (BBI != BB.begin())
- --BBI;
- break;
+ int64_t InstWriteOffset, DepWriteOffset;
+ OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
+ DepWriteOffset, InstWriteOffset);
+ if (OR == OverwriteComplete) {
+ DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
+ << *DepWrite << "\n KILLER: " << *Inst << '\n');
+
+ // Delete the store and now-dead instructions that feed it.
+ DeleteDeadInstruction(DepWrite, *MD);
+ ++NumFastStores;
+ MadeChange = true;
+
+ // DeleteDeadInstruction can delete the current instruction in loop
+ // cases, reset BBI.
+ BBI = Inst;
+ if (BBI != BB.begin())
+ --BBI;
+ break;
+ } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
+ // TODO: base this on the target vector size so that if the earlier
+ // store was too small to get vector writes anyway then its likely
+ // a good idea to shorten it
+ // Power of 2 vector writes are probably always a bad idea to optimize
+ // as any store/memset/memcpy is likely using vector instructions so
+ // shortening it to not vector size is likely to be slower
+ MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
+ unsigned DepWriteAlign = DepIntrinsic->getAlignment();
+ if (llvm::isPowerOf2_64(InstWriteOffset) ||
+ ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
+
+ DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
+ << *DepWrite << "\n KILLER (offset "
+ << InstWriteOffset << ", "
+ << DepLoc.Size << ")"
+ << *Inst << '\n');
+
+ Value* DepWriteLength = DepIntrinsic->getLength();
+ Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
+ InstWriteOffset -
+ DepWriteOffset);
+ DepIntrinsic->setLength(TrimmedLength);
+ MadeChange = true;
+ }
+ }
}
// If this is a may-aliased store that is clobbering the store value, we
@@ -538,37 +618,67 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
return MadeChange;
}
+/// Find all blocks that will unconditionally lead to the block BB and append
+/// them to F.
+static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
+ BasicBlock *BB, DominatorTree *DT) {
+ for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
+ BasicBlock *Pred = *I;
+ if (Pred == BB) continue;
+ TerminatorInst *PredTI = Pred->getTerminator();
+ if (PredTI->getNumSuccessors() != 1)
+ continue;
+
+ if (DT->isReachableFromEntry(Pred))
+ Blocks.push_back(Pred);
+ }
+}
+
/// HandleFree - Handle frees of entire structures whose dependency is a store
/// to a field of that structure.
bool DSE::HandleFree(CallInst *F) {
bool MadeChange = false;
- MemDepResult Dep = MD->getDependency(F);
+ AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
+ SmallVector<BasicBlock *, 16> Blocks;
+ Blocks.push_back(F->getParent());
- while (Dep.isDef() || Dep.isClobber()) {
- Instruction *Dependency = Dep.getInst();
- if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
- return MadeChange;
+ while (!Blocks.empty()) {
+ BasicBlock *BB = Blocks.pop_back_val();
+ Instruction *InstPt = BB->getTerminator();
+ if (BB == F->getParent()) InstPt = F;
- Value *DepPointer =
- GetUnderlyingObject(getStoredPointerOperand(Dependency));
+ MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
+ while (Dep.isDef() || Dep.isClobber()) {
+ Instruction *Dependency = Dep.getInst();
+ if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
+ break;
- // Check for aliasing.
- if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
- return MadeChange;
+ Value *DepPointer =
+ GetUnderlyingObject(getStoredPointerOperand(Dependency));
- // DCE instructions only used to calculate that store
- DeleteDeadInstruction(Dependency, *MD);
- ++NumFastStores;
- MadeChange = true;
+ // Check for aliasing.
+ if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
+ break;
- // Inst's old Dependency is now deleted. Compute the next dependency,
- // which may also be dead, as in
- // s[0] = 0;
- // s[1] = 0; // This has just been deleted.
- // free(s);
- Dep = MD->getDependency(F);
- };
+ Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
+
+ // DCE instructions only used to calculate that store
+ DeleteDeadInstruction(Dependency, *MD);
+ ++NumFastStores;
+ MadeChange = true;
+
+ // Inst's old Dependency is now deleted. Compute the next dependency,
+ // which may also be dead, as in
+ // s[0] = 0;
+ // s[1] = 0; // This has just been deleted.
+ // free(s);
+ Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
+ }
+
+ if (Dep.isNonLocal())
+ FindUnconditionalPreds(Blocks, BB, DT);
+ }
return MadeChange;
}
@@ -588,10 +698,17 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Find all of the alloca'd pointers in the entry block.
BasicBlock *Entry = BB.getParent()->begin();
- for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
+ for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
DeadStackObjects.insert(AI);
+ // Okay, so these are dead heap objects, but if the pointer never escapes
+ // then it's leaked by this function anyways.
+ if (CallInst *CI = extractMallocCall(I))
+ if (!PointerMayBeCaptured(CI, true, true))
+ DeadStackObjects.insert(CI);
+ }
+
// Treat byval arguments the same, stores to them are dead at the end of the
// function.
for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
@@ -637,6 +754,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
+ if (CallInst *CI = extractMallocCall(BBI)) {
+ DeadStackObjects.erase(CI);
+ continue;
+ }
+
if (CallSite CS = cast<Value>(BBI)) {
// If this call does not access memory, it can't be loading any of our
// pointers.
@@ -732,4 +854,3 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
I != E; ++I)
DeadStackObjects.erase(*I);
}
-
diff --git a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index c0223d2..f3c92d6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -19,11 +19,13 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/Statistic.h"
+#include <deque>
using namespace llvm;
STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
@@ -215,6 +217,7 @@ namespace {
class EarlyCSE : public FunctionPass {
public:
const TargetData *TD;
+ const TargetLibraryInfo *TLI;
DominatorTree *DT;
typedef RecyclingAllocator<BumpPtrAllocator,
ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy;
@@ -257,12 +260,77 @@ public:
bool runOnFunction(Function &F);
private:
-
+
+ // NodeScope - almost a POD, but needs to call the constructors for the
+ // scoped hash tables so that a new scope gets pushed on. These are RAII so
+ // that the scope gets popped when the NodeScope is destroyed.
+ class NodeScope {
+ public:
+ NodeScope(ScopedHTType *availableValues,
+ LoadHTType *availableLoads,
+ CallHTType *availableCalls) :
+ Scope(*availableValues),
+ LoadScope(*availableLoads),
+ CallScope(*availableCalls) {}
+
+ private:
+ NodeScope(const NodeScope&); // DO NOT IMPLEMENT
+
+ ScopedHTType::ScopeTy Scope;
+ LoadHTType::ScopeTy LoadScope;
+ CallHTType::ScopeTy CallScope;
+ };
+
+ // StackNode - contains all the needed information to create a stack for
+ // doing a depth first tranversal of the tree. This includes scopes for
+ // values, loads, and calls as well as the generation. There is a child
+ // iterator so that the children do not need to be store spearately.
+ class StackNode {
+ public:
+ StackNode(ScopedHTType *availableValues,
+ LoadHTType *availableLoads,
+ CallHTType *availableCalls,
+ unsigned cg, DomTreeNode *n,
+ DomTreeNode::iterator child, DomTreeNode::iterator end) :
+ CurrentGeneration(cg), ChildGeneration(cg), Node(n),
+ ChildIter(child), EndIter(end),
+ Scopes(availableValues, availableLoads, availableCalls),
+ Processed(false) {}
+
+ // Accessors.
+ unsigned currentGeneration() { return CurrentGeneration; }
+ unsigned childGeneration() { return ChildGeneration; }
+ void childGeneration(unsigned generation) { ChildGeneration = generation; }
+ DomTreeNode *node() { return Node; }
+ DomTreeNode::iterator childIter() { return ChildIter; }
+ DomTreeNode *nextChild() {
+ DomTreeNode *child = *ChildIter;
+ ++ChildIter;
+ return child;
+ }
+ DomTreeNode::iterator end() { return EndIter; }
+ bool isProcessed() { return Processed; }
+ void process() { Processed = true; }
+
+ private:
+ StackNode(const StackNode&); // DO NOT IMPLEMENT
+
+ // Members.
+ unsigned CurrentGeneration;
+ unsigned ChildGeneration;
+ DomTreeNode *Node;
+ DomTreeNode::iterator ChildIter;
+ DomTreeNode::iterator EndIter;
+ NodeScope Scopes;
+ bool Processed;
+ };
+
bool processNode(DomTreeNode *Node);
// This transformation requires dominator postdominator info
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
+ AU.addRequired<TargetLibraryInfo>();
AU.setPreservesCFG();
}
};
@@ -277,22 +345,10 @@ FunctionPass *llvm::createEarlyCSEPass() {
INITIALIZE_PASS_BEGIN(EarlyCSE, "early-cse", "Early CSE", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false)
bool EarlyCSE::processNode(DomTreeNode *Node) {
- // Define a scope in the scoped hash table. When we are done processing this
- // domtree node and recurse back up to our parent domtree node, this will pop
- // off all the values we install.
- ScopedHTType::ScopeTy Scope(*AvailableValues);
-
- // Define a scope for the load values so that anything we add will get
- // popped when we recurse back up to our parent domtree node.
- LoadHTType::ScopeTy LoadScope(*AvailableLoads);
-
- // Define a scope for the call values so that anything we add will get
- // popped when we recurse back up to our parent domtree node.
- CallHTType::ScopeTy CallScope(*AvailableCalls);
-
BasicBlock *BB = Node->getBlock();
// If this block has a single predecessor, then the predecessor is the parent
@@ -328,7 +384,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
- if (Value *V = SimplifyInstruction(Inst, TD, DT)) {
+ if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {
DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
@@ -442,19 +498,16 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
}
}
}
-
- unsigned LiveOutGeneration = CurrentGeneration;
- for (DomTreeNode::iterator I = Node->begin(), E = Node->end(); I != E; ++I) {
- Changed |= processNode(*I);
- // Pop any generation changes off the stack from the recursive walk.
- CurrentGeneration = LiveOutGeneration;
- }
+
return Changed;
}
bool EarlyCSE::runOnFunction(Function &F) {
+ std::deque<StackNode *> nodesToProcess;
+
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
// Tables that the pass uses when walking the domtree.
@@ -466,5 +519,52 @@ bool EarlyCSE::runOnFunction(Function &F) {
AvailableCalls = &CallTable;
CurrentGeneration = 0;
- return processNode(DT->getRootNode());
+ bool Changed = false;
+
+ // Process the root node.
+ nodesToProcess.push_front(
+ new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
+ CurrentGeneration, DT->getRootNode(),
+ DT->getRootNode()->begin(),
+ DT->getRootNode()->end()));
+
+ // Save the current generation.
+ unsigned LiveOutGeneration = CurrentGeneration;
+
+ // Process the stack.
+ while (!nodesToProcess.empty()) {
+ // Grab the first item off the stack. Set the current generation, remove
+ // the node from the stack, and process it.
+ StackNode *NodeToProcess = nodesToProcess.front();
+
+ // Initialize class members.
+ CurrentGeneration = NodeToProcess->currentGeneration();
+
+ // Check if the node needs to be processed.
+ if (!NodeToProcess->isProcessed()) {
+ // Process the node.
+ Changed |= processNode(NodeToProcess->node());
+ NodeToProcess->childGeneration(CurrentGeneration);
+ NodeToProcess->process();
+ } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
+ // Push the next child onto the stack.
+ DomTreeNode *child = NodeToProcess->nextChild();
+ nodesToProcess.push_front(
+ new StackNode(AvailableValues,
+ AvailableLoads,
+ AvailableCalls,
+ NodeToProcess->childGeneration(), child,
+ child->begin(), child->end()));
+ } else {
+ // It has been processed, and there are no more children to process,
+ // so delete it and pop it off the stack.
+ delete NodeToProcess;
+ nodesToProcess.pop_front();
+ }
+ } // while (!nodes...)
+
+ // Reset the current generation.
+ CurrentGeneration = LiveOutGeneration;
+
+ return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index cbfdbcd..fb733ad 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -31,10 +31,12 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Allocator.h"
@@ -83,6 +85,12 @@ namespace {
return false;
return true;
}
+
+ friend hash_code hash_value(const Expression &Value) {
+ return hash_combine(Value.opcode, Value.type,
+ hash_combine_range(Value.varargs.begin(),
+ Value.varargs.end()));
+ }
};
class ValueTable {
@@ -95,12 +103,17 @@ namespace {
uint32_t nextValueNumber;
Expression create_expression(Instruction* I);
+ Expression create_cmp_expression(unsigned Opcode,
+ CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS);
Expression create_extractvalue_expression(ExtractValueInst* EI);
uint32_t lookup_or_add_call(CallInst* C);
public:
ValueTable() : nextValueNumber(1) { }
uint32_t lookup_or_add(Value *V);
uint32_t lookup(Value *V) const;
+ uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred,
+ Value *LHS, Value *RHS);
void add(Value *V, uint32_t num);
void clear();
void erase(Value *v);
@@ -124,16 +137,8 @@ template <> struct DenseMapInfo<Expression> {
}
static unsigned getHashValue(const Expression e) {
- unsigned hash = e.opcode;
-
- hash = ((unsigned)((uintptr_t)e.type >> 4) ^
- (unsigned)((uintptr_t)e.type >> 9));
-
- for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
- E = e.varargs.end(); I != E; ++I)
- hash = *I + hash * 37;
-
- return hash;
+ using llvm::hash_value;
+ return static_cast<unsigned>(hash_value(e));
}
static bool isEqual(const Expression &LHS, const Expression &RHS) {
return LHS == RHS;
@@ -153,9 +158,24 @@ Expression ValueTable::create_expression(Instruction *I) {
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
e.varargs.push_back(lookup_or_add(*OI));
+ if (I->isCommutative()) {
+ // Ensure that commutative instructions that only differ by a permutation
+ // of their operands get the same value number by sorting the operand value
+ // numbers. Since all commutative instructions have two operands it is more
+ // efficient to sort by hand rather than using, say, std::sort.
+ assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
+ if (e.varargs[0] > e.varargs[1])
+ std::swap(e.varargs[0], e.varargs[1]);
+ }
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
- e.opcode = (C->getOpcode() << 8) | C->getPredicate();
+ // Sort the operand value numbers so x<y and y>x get the same value number.
+ CmpInst::Predicate Predicate = C->getPredicate();
+ if (e.varargs[0] > e.varargs[1]) {
+ std::swap(e.varargs[0], e.varargs[1]);
+ Predicate = CmpInst::getSwappedPredicate(Predicate);
+ }
+ e.opcode = (C->getOpcode() << 8) | Predicate;
} else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) {
for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
II != IE; ++II)
@@ -165,6 +185,25 @@ Expression ValueTable::create_expression(Instruction *I) {
return e;
}
+Expression ValueTable::create_cmp_expression(unsigned Opcode,
+ CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS) {
+ assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
+ "Not a comparison!");
+ Expression e;
+ e.type = CmpInst::makeCmpResultType(LHS->getType());
+ e.varargs.push_back(lookup_or_add(LHS));
+ e.varargs.push_back(lookup_or_add(RHS));
+
+ // Sort the operand value numbers so x<y and y>x get the same value number.
+ if (e.varargs[0] > e.varargs[1]) {
+ std::swap(e.varargs[0], e.varargs[1]);
+ Predicate = CmpInst::getSwappedPredicate(Predicate);
+ }
+ e.opcode = (Opcode << 8) | Predicate;
+ return e;
+}
+
Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) {
assert(EI != 0 && "Not an ExtractValueInst?");
Expression e;
@@ -414,6 +453,19 @@ uint32_t ValueTable::lookup(Value *V) const {
return VI->second;
}
+/// lookup_or_add_cmp - Returns the value number of the given comparison,
+/// assigning it a new number if it did not have one before. Useful when
+/// we deduced the result of a comparison, but don't immediately have an
+/// instruction realizing that comparison to hand.
+uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode,
+ CmpInst::Predicate Predicate,
+ Value *LHS, Value *RHS) {
+ Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS);
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
+ return e;
+}
+
/// clear - Remove all entries from the ValueTable.
void ValueTable::clear() {
valueNumbering.clear();
@@ -446,7 +498,8 @@ namespace {
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
const TargetData *TD;
-
+ const TargetLibraryInfo *TLI;
+
ValueTable VN;
/// LeaderTable - A mapping from value numbers to lists of Value*'s that
@@ -530,6 +583,7 @@ namespace {
// This transformation requires dominator postdominator info
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
+ AU.addRequired<TargetLibraryInfo>();
if (!NoLoads)
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
@@ -568,6 +622,7 @@ FunctionPass *llvm::createGVNPass(bool NoLoads) {
INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
@@ -776,7 +831,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
const TargetData &TD) {
- // If the loaded or stored value is an first class array or struct, don't try
+ // If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
return -1;
@@ -973,7 +1028,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
}
-/// GetStoreValueForLoad - This function is called when we have a
+/// GetLoadValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering load. This means
/// that the load *may* provide bits used by the load but we can't be sure
/// because the pointers don't mustalias. Check this case to see if there is
@@ -1274,14 +1329,14 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// If we had to process more than one hundred blocks to find the
// dependencies, this load isn't worth worrying about. Optimizing
// it will be too expensive.
- if (Deps.size() > 100)
+ unsigned NumDeps = Deps.size();
+ if (NumDeps > 100)
return false;
// If we had a phi translation failure, we'll have a single entry which is a
// clobber in the current block. Reject this early.
- if (Deps.size() == 1
- && !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber())
- {
+ if (NumDeps == 1 &&
+ !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
DEBUG(
dbgs() << "GVN: non-local load ";
WriteAsOperand(dbgs(), LI);
@@ -1294,10 +1349,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// where we have a value available in repl, also keep track of whether we see
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
- SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
- SmallVector<BasicBlock*, 16> UnavailableBlocks;
+ SmallVector<AvailableValueInBlock, 64> ValuesPerBlock;
+ SmallVector<BasicBlock*, 64> UnavailableBlocks;
- for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
+ for (unsigned i = 0, e = NumDeps; i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
@@ -1896,12 +1951,19 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
unsigned Count = 0;
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
UI != UE; ) {
- Instruction *User = cast<Instruction>(*UI);
- unsigned OpNum = UI.getOperandNo();
- ++UI;
+ Use &U = (UI++).getUse();
+
+ // If From occurs as a phi node operand then the use implicitly lives in the
+ // corresponding incoming block. Otherwise it is the block containing the
+ // user that must be dominated by Root.
+ BasicBlock *UsingBlock;
+ if (PHINode *PN = dyn_cast<PHINode>(U.getUser()))
+ UsingBlock = PN->getIncomingBlock(U);
+ else
+ UsingBlock = cast<Instruction>(U.getUser())->getParent();
- if (DT->dominates(Root, User->getParent())) {
- User->setOperand(OpNum, To);
+ if (DT->dominates(Root, UsingBlock)) {
+ U.set(To);
++Count;
}
}
@@ -1912,69 +1974,119 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
/// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
/// 'RHS' everywhere in the scope. Returns whether a change was made.
bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
- if (LHS == RHS) return false;
- assert(LHS->getType() == RHS->getType() && "Equal but types differ!");
+ SmallVector<std::pair<Value*, Value*>, 4> Worklist;
+ Worklist.push_back(std::make_pair(LHS, RHS));
+ bool Changed = false;
- // Don't try to propagate equalities between constants.
- if (isa<Constant>(LHS) && isa<Constant>(RHS))
- return false;
+ while (!Worklist.empty()) {
+ std::pair<Value*, Value*> Item = Worklist.pop_back_val();
+ LHS = Item.first; RHS = Item.second;
+
+ if (LHS == RHS) continue;
+ assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
+
+ // Don't try to propagate equalities between constants.
+ if (isa<Constant>(LHS) && isa<Constant>(RHS)) continue;
+
+ // Prefer a constant on the right-hand side, or an Argument if no constants.
+ if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
+ std::swap(LHS, RHS);
+ assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
+
+ // If there is no obvious reason to prefer the left-hand side over the right-
+ // hand side, ensure the longest lived term is on the right-hand side, so the
+ // shortest lived term will be replaced by the longest lived. This tends to
+ // expose more simplifications.
+ uint32_t LVN = VN.lookup_or_add(LHS);
+ if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
+ (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
+ // Move the 'oldest' value to the right-hand side, using the value number as
+ // a proxy for age.
+ uint32_t RVN = VN.lookup_or_add(RHS);
+ if (LVN < RVN) {
+ std::swap(LHS, RHS);
+ LVN = RVN;
+ }
+ }
+ assert((!isa<Instruction>(RHS) ||
+ DT->properlyDominates(cast<Instruction>(RHS)->getParent(), Root)) &&
+ "Instruction doesn't dominate scope!");
+
+ // If value numbering later deduces that an instruction in the scope is equal
+ // to 'LHS' then ensure it will be turned into 'RHS'.
+ addToLeaderTable(LVN, RHS, Root);
+
+ // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
+ // LHS always has at least one use that is not dominated by Root, this will
+ // never do anything if LHS has only one use.
+ if (!LHS->hasOneUse()) {
+ unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root);
+ Changed |= NumReplacements > 0;
+ NumGVNEqProp += NumReplacements;
+ }
- // Make sure that any constants are on the right-hand side. In general the
- // best results are obtained by placing the longest lived value on the RHS.
- if (isa<Constant>(LHS))
- std::swap(LHS, RHS);
+ // Now try to deduce additional equalities from this one. For example, if the
+ // known equality was "(A != B)" == "false" then it follows that A and B are
+ // equal in the scope. Only boolean equalities with an explicit true or false
+ // RHS are currently supported.
+ if (!RHS->getType()->isIntegerTy(1))
+ // Not a boolean equality - bail out.
+ continue;
+ ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
+ if (!CI)
+ // RHS neither 'true' nor 'false' - bail out.
+ continue;
+ // Whether RHS equals 'true'. Otherwise it equals 'false'.
+ bool isKnownTrue = CI->isAllOnesValue();
+ bool isKnownFalse = !isKnownTrue;
+
+ // If "A && B" is known true then both A and B are known true. If "A || B"
+ // is known false then both A and B are known false.
+ Value *A, *B;
+ if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) ||
+ (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) {
+ Worklist.push_back(std::make_pair(A, RHS));
+ Worklist.push_back(std::make_pair(B, RHS));
+ continue;
+ }
- // If neither term is constant then bail out. This is not for correctness,
- // it's just that the non-constant case is much less useful: it occurs just
- // as often as the constant case but handling it hardly ever results in an
- // improvement.
- if (!isa<Constant>(RHS))
- return false;
+ // If we are propagating an equality like "(A == B)" == "true" then also
+ // propagate the equality A == B. When propagating a comparison such as
+ // "(A >= B)" == "true", replace all instances of "A < B" with "false".
+ if (ICmpInst *Cmp = dyn_cast<ICmpInst>(LHS)) {
+ Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
- // If value numbering later deduces that an instruction in the scope is equal
- // to 'LHS' then ensure it will be turned into 'RHS'.
- addToLeaderTable(VN.lookup_or_add(LHS), RHS, Root);
-
- // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.
- unsigned NumReplacements = replaceAllDominatedUsesWith(LHS, RHS, Root);
- bool Changed = NumReplacements > 0;
- NumGVNEqProp += NumReplacements;
-
- // Now try to deduce additional equalities from this one. For example, if the
- // known equality was "(A != B)" == "false" then it follows that A and B are
- // equal in the scope. Only boolean equalities with an explicit true or false
- // RHS are currently supported.
- if (!RHS->getType()->isIntegerTy(1))
- // Not a boolean equality - bail out.
- return Changed;
- ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
- if (!CI)
- // RHS neither 'true' nor 'false' - bail out.
- return Changed;
- // Whether RHS equals 'true'. Otherwise it equals 'false'.
- bool isKnownTrue = CI->isAllOnesValue();
- bool isKnownFalse = !isKnownTrue;
-
- // If "A && B" is known true then both A and B are known true. If "A || B"
- // is known false then both A and B are known false.
- Value *A, *B;
- if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) ||
- (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) {
- Changed |= propagateEquality(A, RHS, Root);
- Changed |= propagateEquality(B, RHS, Root);
- return Changed;
- }
+ // If "A == B" is known true, or "A != B" is known false, then replace
+ // A with B everywhere in the scope.
+ if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) ||
+ (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE))
+ Worklist.push_back(std::make_pair(Op0, Op1));
+
+ // If "A >= B" is known true, replace "A < B" with false everywhere.
+ CmpInst::Predicate NotPred = Cmp->getInversePredicate();
+ Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
+ // Since we don't have the instruction "A < B" immediately to hand, work out
+ // the value number that it would have and use that to find an appropriate
+ // instruction (if any).
+ uint32_t NextNum = VN.getNextUnusedValueNumber();
+ uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1);
+ // If the number we were assigned was brand new then there is no point in
+ // looking for an instruction realizing it: there cannot be one!
+ if (Num < NextNum) {
+ Value *NotCmp = findLeader(Root, Num);
+ if (NotCmp && isa<Instruction>(NotCmp)) {
+ unsigned NumReplacements =
+ replaceAllDominatedUsesWith(NotCmp, NotVal, Root);
+ Changed |= NumReplacements > 0;
+ NumGVNEqProp += NumReplacements;
+ }
+ }
+ // Ensure that any instruction in scope that gets the "A < B" value number
+ // is replaced with false.
+ addToLeaderTable(Num, NotVal, Root);
- // If we are propagating an equality like "(A == B)" == "true" then also
- // propagate the equality A == B.
- if (ICmpInst *Cmp = dyn_cast<ICmpInst>(LHS)) {
- // Only equality comparisons are supported.
- if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) ||
- (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) {
- Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
- Changed |= propagateEquality(Op0, Op1, Root);
+ continue;
}
- return Changed;
}
return Changed;
@@ -1985,35 +2097,15 @@ bool GVN::propagateEquality(Value *LHS, Value *RHS, BasicBlock *Root) {
/// particular 'Dst' must not be reachable via another edge from 'Src'.
static bool isOnlyReachableViaThisEdge(BasicBlock *Src, BasicBlock *Dst,
DominatorTree *DT) {
- // First off, there must not be more than one edge from Src to Dst, there
- // should be exactly one. So keep track of the number of times Src occurs
- // as a predecessor of Dst and fail if it's more than once. Secondly, any
- // other predecessors of Dst should be dominated by Dst (see logic below).
- bool SawEdgeFromSrc = false;
- for (pred_iterator PI = pred_begin(Dst), PE = pred_end(Dst); PI != PE; ++PI) {
- BasicBlock *Pred = *PI;
- if (Pred == Src) {
- // An edge from Src to Dst.
- if (SawEdgeFromSrc)
- // There are multiple edges from Src to Dst - fail.
- return false;
- SawEdgeFromSrc = true;
- continue;
- }
- // If the predecessor is not dominated by Dst, then it must be possible to
- // reach it either without passing through Src (and thus not via the edge)
- // or by passing through Src but taking a different edge out of Src. Either
- // way it is possible to reach Dst without passing via the edge, so fail.
- if (!DT->dominates(Dst, *PI))
- return false;
- }
- assert(SawEdgeFromSrc && "No edge between these basic blocks!");
-
- // Every path from the entry block to Dst must at some point pass to Dst from
- // a predecessor that is not dominated by Dst. This predecessor can only be
- // Src, since all others are dominated by Dst. As there is only one edge from
- // Src to Dst, the path passes by this edge.
- return true;
+ // While in theory it is interesting to consider the case in which Dst has
+ // more than one predecessor, because Dst might be part of a loop which is
+ // only reachable from Src, in practice it is pointless since at the time
+ // GVN runs all such loops have preheaders, which means that Dst will have
+ // been changed to have only one predecessor, namely Src.
+ BasicBlock *Pred = Dst->getSinglePredecessor();
+ assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
+ (void)Src;
+ return Pred != 0;
}
/// processInstruction - When calculating availability, handle an instruction
@@ -2027,7 +2119,7 @@ bool GVN::processInstruction(Instruction *I) {
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
- if (Value *V = SimplifyInstruction(I, TD, DT)) {
+ if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
@@ -2076,16 +2168,17 @@ bool GVN::processInstruction(Instruction *I) {
Value *SwitchCond = SI->getCondition();
BasicBlock *Parent = SI->getParent();
bool Changed = false;
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i) {
- BasicBlock *Dst = SI->getSuccessor(i);
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ BasicBlock *Dst = i.getCaseSuccessor();
if (isOnlyReachableViaThisEdge(Parent, Dst, DT))
- Changed |= propagateEquality(SwitchCond, SI->getCaseValue(i), Dst);
+ Changed |= propagateEquality(SwitchCond, i.getCaseValue(), Dst);
}
return Changed;
}
// Instructions with void type don't return a value, so there's
- // no point in trying to find redudancies in them.
+ // no point in trying to find redundancies in them.
if (I->getType()->isVoidTy()) return false;
uint32_t NextNum = VN.getNextUnusedValueNumber();
@@ -2101,7 +2194,7 @@ bool GVN::processInstruction(Instruction *I) {
// If the number we were assigned was a brand new VN, then we don't
// need to do a lookup to see if the number already exists
// somewhere in the domtree: it can't!
- if (Num == NextNum) {
+ if (Num >= NextNum) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
@@ -2129,6 +2222,7 @@ bool GVN::runOnFunction(Function& F) {
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
VN.setDomTree(DT);
@@ -2241,7 +2335,14 @@ bool GVN::performPRE(Function &F) {
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
isa<DbgInfoIntrinsic>(CurInst))
continue;
-
+
+ // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
+ // sinking the compare again, and it would force the code generator to
+ // move the i1 from processor flags or predicate registers into a general
+ // purpose register.
+ if (isa<CmpInst>(CurInst))
+ continue;
+
// We don't currently value number ANY inline asm calls.
if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
if (CallI->isInlineAsm())
diff --git a/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
index 5f863ea..c2bd6e6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMGlobalMerge.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -1,4 +1,4 @@
-//===-- ARMGlobalMerge.cpp - Internal globals merging --------------------===//
+//===-- GlobalMerge.cpp - Internal globals merging -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -51,9 +51,8 @@
// note that we saved 2 registers here almostly "for free".
// ===---------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm-global-merge"
-#include "ARM.h"
-#include "llvm/CodeGen/Passes.h"
+#define DEBUG_TYPE "global-merge"
+#include "llvm/Transforms/Scalar.h"
#include "llvm/Attributes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -66,10 +65,12 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/ADT/Statistic.h"
using namespace llvm;
+STATISTIC(NumMerged , "Number of globals merged");
namespace {
- class ARMGlobalMerge : public FunctionPass {
+ class GlobalMerge : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// target type sizes.
const TargetLowering *TLI;
@@ -79,8 +80,10 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
- explicit ARMGlobalMerge(const TargetLowering *tli)
- : FunctionPass(ID), TLI(tli) {}
+ explicit GlobalMerge(const TargetLowering *tli = 0)
+ : FunctionPass(ID), TLI(tli) {
+ initializeGlobalMergePass(*PassRegistry::getPassRegistry());
+ }
virtual bool doInitialization(Module &M);
virtual bool runOnFunction(Function &F);
@@ -109,9 +112,12 @@ namespace {
};
} // end anonymous namespace
-char ARMGlobalMerge::ID = 0;
+char GlobalMerge::ID = 0;
+INITIALIZE_PASS(GlobalMerge, "global-merge",
+ "Global Merge", false, false)
+
-bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
+bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst) const {
const TargetData *TD = TLI->getTargetData();
@@ -153,6 +159,7 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(MergedGV, Idx);
Globals[k]->replaceAllUsesWith(GEP);
Globals[k]->eraseFromParent();
+ NumMerged++;
}
i = j;
}
@@ -161,7 +168,7 @@ bool ARMGlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
}
-bool ARMGlobalMerge::doInitialization(Module &M) {
+bool GlobalMerge::doInitialization(Module &M) {
SmallVector<GlobalVariable*, 16> Globals, ConstGlobals, BSSGlobals;
const TargetData *TD = TLI->getTargetData();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
@@ -175,7 +182,7 @@ bool ARMGlobalMerge::doInitialization(Module &M) {
continue;
// Ignore fancy-aligned globals for now.
- unsigned Alignment = I->getAlignment();
+ unsigned Alignment = TD->getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
if (Alignment > TD->getABITypeAlignment(Ty))
continue;
@@ -186,8 +193,8 @@ bool ARMGlobalMerge::doInitialization(Module &M) {
continue;
if (TD->getTypeAllocSize(Ty) < MaxOffset) {
- const TargetLoweringObjectFile &TLOF = TLI->getObjFileLowering();
- if (TLOF.getKindForGlobal(I, TLI->getTargetMachine()).isBSSLocal())
+ if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine())
+ .isBSSLocal())
BSSGlobals.push_back(I);
else if (I->isConstant())
ConstGlobals.push_back(I);
@@ -210,10 +217,10 @@ bool ARMGlobalMerge::doInitialization(Module &M) {
return Changed;
}
-bool ARMGlobalMerge::runOnFunction(Function &F) {
+bool GlobalMerge::runOnFunction(Function &F) {
return false;
}
-FunctionPass *llvm::createARMGlobalMergePass(const TargetLowering *tli) {
- return new ARMGlobalMerge(tli);
+Pass *llvm::createGlobalMergePass(const TargetLowering *tli) {
+ return new GlobalMerge(tli);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 75fa011..a9ba657 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -33,7 +33,6 @@
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
@@ -50,30 +49,21 @@
#include "llvm/ADT/Statistic.h"
using namespace llvm;
-STATISTIC(NumRemoved , "Number of aux indvars removed");
STATISTIC(NumWidened , "Number of indvars widened");
-STATISTIC(NumInserted , "Number of canonical indvars added");
STATISTIC(NumReplaced , "Number of exit values replaced");
STATISTIC(NumLFTR , "Number of loop exit tests replaced");
STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated");
STATISTIC(NumElimIV , "Number of congruent IVs eliminated");
-namespace llvm {
- cl::opt<bool> EnableIVRewrite(
- "enable-iv-rewrite", cl::Hidden,
- cl::desc("Enable canonical induction variable rewriting"));
-
- // Trip count verification can be enabled by default under NDEBUG if we
- // implement a strong expression equivalence checker in SCEV. Until then, we
- // use the verify-indvars flag, which may assert in some cases.
- cl::opt<bool> VerifyIndvars(
- "verify-indvars", cl::Hidden,
- cl::desc("Verify the ScalarEvolution result after running indvars"));
-}
+// Trip count verification can be enabled by default under NDEBUG if we
+// implement a strong expression equivalence checker in SCEV. Until then, we
+// use the verify-indvars flag, which may assert in some cases.
+static cl::opt<bool> VerifyIndvars(
+ "verify-indvars", cl::Hidden,
+ cl::desc("Verify the ScalarEvolution result after running indvars"));
namespace {
class IndVarSimplify : public LoopPass {
- IVUsers *IU;
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
@@ -84,7 +74,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID), IU(0), LI(0), SE(0), DT(0), TD(0),
+ IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),
Changed(false) {
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
}
@@ -97,13 +87,9 @@ namespace {
AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- if (EnableIVRewrite)
- AU.addRequired<IVUsers>();
AU.addPreserved<ScalarEvolution>();
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
- if (EnableIVRewrite)
- AU.addPreserved<IVUsers>();
AU.setPreservesCFG();
}
@@ -121,8 +107,6 @@ namespace {
void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter);
- void RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter);
-
Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount,
PHINode *IndVar, SCEVExpander &Rewriter);
@@ -138,7 +122,6 @@ INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
-INITIALIZE_PASS_DEPENDENCY(IVUsers)
INITIALIZE_PASS_END(IndVarSimplify, "indvars",
"Induction Variable Simplification", false, false)
@@ -180,6 +163,11 @@ bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) {
// base of a recurrence. This handles the case in which SCEV expansion
// converts a pointer type recurrence into a nonrecurrent pointer base
// indexed by an integer recurrence.
+
+ // If the GEP base pointer is a vector of pointers, abort.
+ if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
+ return false;
+
const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
if (FromBase == ToBase)
@@ -445,11 +433,6 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
PN->replaceAllUsesWith(Conv);
RecursivelyDeleteTriviallyDeadInstructions(PN);
}
-
- // Add a new IVUsers entry for the newly-created integer PHI.
- if (IU)
- IU->AddUsersIfInteresting(NewPHI);
-
Changed = true;
}
@@ -595,124 +578,6 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
}
//===----------------------------------------------------------------------===//
-// Rewrite IV users based on a canonical IV.
-// Only for use with -enable-iv-rewrite.
-//===----------------------------------------------------------------------===//
-
-/// FIXME: It is an extremely bad idea to indvar substitute anything more
-/// complex than affine induction variables. Doing so will put expensive
-/// polynomial evaluations inside of the loop, and the str reduction pass
-/// currently can only reduce affine polynomials. For now just disable
-/// indvar subst on anything more complex than an affine addrec, unless
-/// it can be expanded to a trivial value.
-static bool isSafe(const SCEV *S, const Loop *L, ScalarEvolution *SE) {
- // Loop-invariant values are safe.
- if (SE->isLoopInvariant(S, L)) return true;
-
- // Affine addrecs are safe. Non-affine are not, because LSR doesn't know how
- // to transform them into efficient code.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
- return AR->isAffine();
-
- // An add is safe it all its operands are safe.
- if (const SCEVCommutativeExpr *Commutative
- = dyn_cast<SCEVCommutativeExpr>(S)) {
- for (SCEVCommutativeExpr::op_iterator I = Commutative->op_begin(),
- E = Commutative->op_end(); I != E; ++I)
- if (!isSafe(*I, L, SE)) return false;
- return true;
- }
-
- // A cast is safe if its operand is.
- if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
- return isSafe(C->getOperand(), L, SE);
-
- // A udiv is safe if its operands are.
- if (const SCEVUDivExpr *UD = dyn_cast<SCEVUDivExpr>(S))
- return isSafe(UD->getLHS(), L, SE) &&
- isSafe(UD->getRHS(), L, SE);
-
- // SCEVUnknown is always safe.
- if (isa<SCEVUnknown>(S))
- return true;
-
- // Nothing else is safe.
- return false;
-}
-
-void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
- // Rewrite all induction variable expressions in terms of the canonical
- // induction variable.
- //
- // If there were induction variables of other sizes or offsets, manually
- // add the offsets to the primary induction variable and cast, avoiding
- // the need for the code evaluation methods to insert induction variables
- // of different sizes.
- for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
- Value *Op = UI->getOperandValToReplace();
- Type *UseTy = Op->getType();
- Instruction *User = UI->getUser();
-
- // Compute the final addrec to expand into code.
- const SCEV *AR = IU->getReplacementExpr(*UI);
-
- // Evaluate the expression out of the loop, if possible.
- if (!L->contains(UI->getUser())) {
- const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
- if (SE->isLoopInvariant(ExitVal, L))
- AR = ExitVal;
- }
-
- // FIXME: It is an extremely bad idea to indvar substitute anything more
- // complex than affine induction variables. Doing so will put expensive
- // polynomial evaluations inside of the loop, and the str reduction pass
- // currently can only reduce affine polynomials. For now just disable
- // indvar subst on anything more complex than an affine addrec, unless
- // it can be expanded to a trivial value.
- if (!isSafe(AR, L, SE))
- continue;
-
- // Determine the insertion point for this user. By default, insert
- // immediately before the user. The SCEVExpander class will automatically
- // hoist loop invariants out of the loop. For PHI nodes, there may be
- // multiple uses, so compute the nearest common dominator for the
- // incoming blocks.
- Instruction *InsertPt = getInsertPointForUses(User, Op, DT);
-
- // Now expand it into actual Instructions and patch it into place.
- Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);
-
- DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n'
- << " into = " << *NewVal << "\n");
-
- if (!isValidRewrite(Op, NewVal)) {
- DeadInsts.push_back(NewVal);
- continue;
- }
- // Inform ScalarEvolution that this value is changing. The change doesn't
- // affect its value, but it does potentially affect which use lists the
- // value will be on after the replacement, which affects ScalarEvolution's
- // ability to walk use lists and drop dangling pointers when a value is
- // deleted.
- SE->forgetValue(User);
-
- // Patch the new value into place.
- if (Op->hasName())
- NewVal->takeName(Op);
- if (Instruction *NewValI = dyn_cast<Instruction>(NewVal))
- NewValI->setDebugLoc(User->getDebugLoc());
- User->replaceUsesOfWith(Op, NewVal);
- UI->setOperandValToReplace(NewVal);
-
- ++NumRemoved;
- Changed = true;
-
- // The old value may be dead now.
- DeadInsts.push_back(Op);
- }
-}
-
-//===----------------------------------------------------------------------===//
// IV Widening - Extend the width of an IV to cover its widest uses.
//===----------------------------------------------------------------------===//
@@ -843,7 +708,7 @@ protected:
const SCEVAddRecExpr* GetExtendedOperandRecurrence(NarrowIVDefUse DU);
- Instruction *WidenIVUse(NarrowIVDefUse DU);
+ Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
};
@@ -917,7 +782,6 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
}
return WideBO;
}
- llvm_unreachable(0);
}
/// No-wrap operations can transfer sign extension of their result to their
@@ -946,9 +810,13 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
else
return 0;
+ // When creating this AddExpr, don't apply the current operations NSW or NUW
+ // flags. This instruction may be guarded by control flow that the no-wrap
+ // behavior depends on. Non-control-equivalent instructions can be mapped to
+ // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
+ // semantics to those operations.
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(
- SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr,
- IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW));
+ SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr));
if (!AddRec || AddRec->getLoop() != L)
return 0;
@@ -983,7 +851,7 @@ const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
/// WidenIVUse - Determine whether an individual user of the narrow IV can be
/// widened. If so, return the wide clone of the user.
-Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU) {
+Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// Stop traversing the def-use chain at inner-loop phis or post-loop phis.
if (isa<PHINode>(DU.NarrowUse) &&
@@ -1051,7 +919,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU) {
// NarrowUse.
Instruction *WideUse = 0;
if (WideAddRec == WideIncExpr
- && SCEVExpander::hoistStep(WideInc, DU.NarrowUse, DT))
+ && Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
WideUse = WideInc;
else {
WideUse = CloneIVUser(DU);
@@ -1156,7 +1024,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
// Process a def-use edge. This may replace the use, so don't hold a
// use_iterator across it.
- Instruction *WideUse = WidenIVUse(DU);
+ Instruction *WideUse = WidenIVUse(DU, Rewriter);
// Follow all def-use edges from the previous narrow use.
if (WideUse)
@@ -1231,7 +1099,11 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
/// BackedgeTakenInfo. If these expressions have not been reduced, then
/// expanding them may incur additional cost (albeit in the loop preheader).
static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
+ SmallPtrSet<const SCEV*, 8> &Processed,
ScalarEvolution *SE) {
+ if (!Processed.insert(S))
+ return false;
+
// If the backedge-taken count is a UDiv, it's very likely a UDiv that
// ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a
// precise expression, rather than a UDiv from the user's code. If we can't
@@ -1250,16 +1122,13 @@ static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
}
}
- if (EnableIVRewrite)
- return false;
-
// Recurse past add expressions, which commonly occur in the
// BackedgeTakenCount. They may already exist in program code, and if not,
// they are not too expensive rematerialize.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
I != E; ++I) {
- if (isHighCostExpansion(*I, BI, SE))
+ if (isHighCostExpansion(*I, BI, Processed, SE))
return true;
}
return false;
@@ -1270,14 +1139,24 @@ static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
return true;
- // If we haven't recognized an expensive SCEV patter, assume its an expression
- // produced by program code.
+ // If we haven't recognized an expensive SCEV pattern, assume it's an
+ // expression produced by program code.
return false;
}
/// canExpandBackedgeTakenCount - Return true if this loop's backedge taken
/// count expression can be safely and cheaply expanded into an instruction
/// sequence that can be used by LinearFunctionTestReplace.
+///
+/// TODO: This fails for pointer-type loop counters with greater than one byte
+/// strides, consequently preventing LFTR from running. For the purpose of LFTR
+/// we could skip this check in the case that the LFTR loop counter (chosen by
+/// FindLoopCounter) is also pointer type. Instead, we could directly convert
+/// the loop test to an inequality test by checking the target data's alignment
+/// of element types (given that the initial pointer value originates from or is
+/// used by ABI constrained operation, as opposed to inttoptr/ptrtoint).
+/// However, we don't yet have a strong motivation for converting loop tests
+/// into inequality tests.
static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
@@ -1292,42 +1171,13 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
if (!BI)
return false;
- if (isHighCostExpansion(BackedgeTakenCount, BI, SE))
+ SmallPtrSet<const SCEV*, 8> Processed;
+ if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE))
return false;
return true;
}
-/// getBackedgeIVType - Get the widest type used by the loop test after peeking
-/// through Truncs.
-///
-/// TODO: Unnecessary when ForceLFTR is removed.
-static Type *getBackedgeIVType(Loop *L) {
- if (!L->getExitingBlock())
- return 0;
-
- // Can't rewrite non-branch yet.
- BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
- if (!BI)
- return 0;
-
- ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
- if (!Cond)
- return 0;
-
- Type *Ty = 0;
- for(User::op_iterator OI = Cond->op_begin(), OE = Cond->op_end();
- OI != OE; ++OI) {
- assert((!Ty || Ty == (*OI)->getType()) && "bad icmp operand types");
- TruncInst *Trunc = dyn_cast<TruncInst>(*OI);
- if (!Trunc)
- continue;
-
- return Trunc->getSrcTy();
- }
- return Ty;
-}
-
/// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop
/// invariant value to the phi.
static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
@@ -1429,6 +1279,10 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// FindLoopCounter - Find an affine IV in canonical form.
///
+/// BECount may be an i8* pointer type. The pointer difference is already
+/// valid count without scaling the address stride, so it remains a pointer
+/// expression as far as SCEV is concerned.
+///
/// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount
///
/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
@@ -1437,11 +1291,6 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
static PHINode *
FindLoopCounter(Loop *L, const SCEV *BECount,
ScalarEvolution *SE, DominatorTree *DT, const TargetData *TD) {
- // I'm not sure how BECount could be a pointer type, but we definitely don't
- // want to LFTR that.
- if (BECount->getType()->isPointerTy())
- return 0;
-
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
@@ -1458,6 +1307,10 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
if (!SE->isSCEVable(Phi->getType()))
continue;
+ // Avoid comparing an integer IV against a pointer Limit.
+ if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy())
+ continue;
+
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
if (!AR || AR->getLoop() != L || !AR->isAffine())
continue;
@@ -1503,6 +1356,82 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
return BestPhi;
}
+/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
+/// holds the RHS of the new loop test.
+static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
+ SCEVExpander &Rewriter, ScalarEvolution *SE) {
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
+ assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
+ const SCEV *IVInit = AR->getStart();
+
+ // IVInit may be a pointer while IVCount is an integer when FindLoopCounter
+ // finds a valid pointer IV. Sign extend BECount in order to materialize a
+ // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing
+ // the existing GEPs whenever possible.
+ if (IndVar->getType()->isPointerTy()
+ && !IVCount->getType()->isPointerTy()) {
+
+ Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType());
+ const SCEV *IVOffset = SE->getTruncateOrSignExtend(IVCount, OfsTy);
+
+ // Expand the code for the iteration count.
+ assert(SE->isLoopInvariant(IVOffset, L) &&
+ "Computed iteration count is not loop invariant!");
+ BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
+ Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI);
+
+ Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader());
+ assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter");
+ // We could handle pointer IVs other than i8*, but we need to compensate for
+ // gep index scaling. See canExpandBackedgeTakenCount comments.
+ assert(SE->getSizeOfExpr(
+ cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
+ && "unit stride pointer IV must be i8*");
+
+ IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
+ return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit");
+ }
+ else {
+ // In any other case, convert both IVInit and IVCount to integers before
+ // comparing. This may result in SCEV expension of pointers, but in practice
+ // SCEV will fold the pointer arithmetic away as such:
+ // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc).
+ //
+ // Valid Cases: (1) both integers is most common; (2) both may be pointers
+ // for simple memset-style loops; (3) IVInit is an integer and IVCount is a
+ // pointer may occur when enable-iv-rewrite generates a canonical IV on top
+ // of case #2.
+
+ const SCEV *IVLimit = 0;
+ // For unit stride, IVCount = Start + BECount with 2's complement overflow.
+ // For non-zero Start, compute IVCount here.
+ if (AR->getStart()->isZero())
+ IVLimit = IVCount;
+ else {
+ assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
+ const SCEV *IVInit = AR->getStart();
+
+ // For integer IVs, truncate the IV before computing IVInit + BECount.
+ if (SE->getTypeSizeInBits(IVInit->getType())
+ > SE->getTypeSizeInBits(IVCount->getType()))
+ IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
+
+ IVLimit = SE->getAddExpr(IVInit, IVCount);
+ }
+ // Expand the code for the iteration count.
+ BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
+ IRBuilder<> Builder(BI);
+ assert(SE->isLoopInvariant(IVLimit, L) &&
+ "Computed iteration count is not loop invariant!");
+ // Ensure that we generate the same type as IndVar, or a smaller integer
+ // type. In the presence of null pointer values, we have an integer type
+ // SCEV expression (IVInit) for a pointer type IV value (IndVar).
+ Type *LimitTy = IVCount->getType()->isPointerTy() ?
+ IndVar->getType() : IVCount->getType();
+ return Rewriter.expandCodeFor(IVLimit, LimitTy, BI);
+ }
+}
+
/// LinearFunctionTestReplace - This method rewrites the exit condition of the
/// loop to be a canonical != comparison against the incremented loop induction
/// variable. This pass is able to rewrite the exit tests of any loop where the
@@ -1514,37 +1443,35 @@ LinearFunctionTestReplace(Loop *L,
PHINode *IndVar,
SCEVExpander &Rewriter) {
assert(canExpandBackedgeTakenCount(L, SE) && "precondition");
- BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
// LFTR can ignore IV overflow and truncate to the width of
// BECount. This avoids materializing the add(zext(add)) expression.
- Type *CntTy = !EnableIVRewrite ?
- BackedgeTakenCount->getType() : IndVar->getType();
+ Type *CntTy = BackedgeTakenCount->getType();
- const SCEV *IVLimit = BackedgeTakenCount;
+ const SCEV *IVCount = BackedgeTakenCount;
- // If the exiting block is not the same as the backedge block, we must compare
- // against the preincremented value, otherwise we prefer to compare against
- // the post-incremented value.
+ // If the exiting block is the same as the backedge block, we prefer to
+ // compare against the post-incremented value, otherwise we must compare
+ // against the preincremented value.
Value *CmpIndVar;
if (L->getExitingBlock() == L->getLoopLatch()) {
// Add one to the "backedge-taken" count to get the trip count.
// If this addition may overflow, we have to be more pessimistic and
// cast the induction variable before doing the add.
const SCEV *N =
- SE->getAddExpr(IVLimit, SE->getConstant(IVLimit->getType(), 1));
- if (CntTy == IVLimit->getType())
- IVLimit = N;
+ SE->getAddExpr(IVCount, SE->getConstant(IVCount->getType(), 1));
+ if (CntTy == IVCount->getType())
+ IVCount = N;
else {
- const SCEV *Zero = SE->getConstant(IVLimit->getType(), 0);
+ const SCEV *Zero = SE->getConstant(IVCount->getType(), 0);
if ((isa<SCEVConstant>(N) && !N->isZero()) ||
SE->isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, N, Zero)) {
// No overflow. Cast the sum.
- IVLimit = SE->getTruncateOrZeroExtend(N, CntTy);
+ IVCount = SE->getTruncateOrZeroExtend(N, CntTy);
} else {
// Potential overflow. Cast before doing the add.
- IVLimit = SE->getTruncateOrZeroExtend(IVLimit, CntTy);
- IVLimit = SE->getAddExpr(IVLimit, SE->getConstant(CntTy, 1));
+ IVCount = SE->getTruncateOrZeroExtend(IVCount, CntTy);
+ IVCount = SE->getAddExpr(IVCount, SE->getConstant(CntTy, 1));
}
}
// The BackedgeTaken expression contains the number of times that the
@@ -1552,62 +1479,17 @@ LinearFunctionTestReplace(Loop *L,
// number of times the loop executes, so use the incremented indvar.
CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
} else {
- // We have to use the preincremented value...
- IVLimit = SE->getTruncateOrZeroExtend(IVLimit, CntTy);
+ // We must use the preincremented value...
+ IVCount = SE->getTruncateOrZeroExtend(IVCount, CntTy);
CmpIndVar = IndVar;
}
- // For unit stride, IVLimit = Start + BECount with 2's complement overflow.
- // So for, non-zero start compute the IVLimit here.
- bool isPtrIV = false;
- Type *CmpTy = CntTy;
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
- assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
- if (!AR->getStart()->isZero()) {
- assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
- const SCEV *IVInit = AR->getStart();
-
- // For pointer types, sign extend BECount in order to materialize a GEP.
- // Note that for without EnableIVRewrite, we never run SCEVExpander on a
- // pointer type, because we must preserve the existing GEPs. Instead we
- // directly generate a GEP later.
- if (IVInit->getType()->isPointerTy()) {
- isPtrIV = true;
- CmpTy = SE->getEffectiveSCEVType(IVInit->getType());
- IVLimit = SE->getTruncateOrSignExtend(IVLimit, CmpTy);
- }
- // For integer types, truncate the IV before computing IVInit + BECount.
- else {
- if (SE->getTypeSizeInBits(IVInit->getType())
- > SE->getTypeSizeInBits(CmpTy))
- IVInit = SE->getTruncateExpr(IVInit, CmpTy);
-
- IVLimit = SE->getAddExpr(IVInit, IVLimit);
- }
- }
- // Expand the code for the iteration count.
- IRBuilder<> Builder(BI);
-
- assert(SE->isLoopInvariant(IVLimit, L) &&
- "Computed iteration count is not loop invariant!");
- Value *ExitCnt = Rewriter.expandCodeFor(IVLimit, CmpTy, BI);
-
- // Create a gep for IVInit + IVLimit from on an existing pointer base.
- assert(isPtrIV == IndVar->getType()->isPointerTy() &&
- "IndVar type must match IVInit type");
- if (isPtrIV) {
- Value *IVStart = IndVar->getIncomingValueForBlock(L->getLoopPreheader());
- assert(AR->getStart() == SE->getSCEV(IVStart) && "bad loop counter");
- assert(SE->getSizeOfExpr(
- cast<PointerType>(IVStart->getType())->getElementType())->isOne()
- && "unit stride pointer IV must be i8*");
-
- Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
- ExitCnt = Builder.CreateGEP(IVStart, ExitCnt, "lftr.limit");
- Builder.SetInsertPoint(BI);
- }
+ Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
+ assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
+ && "genLoopLimit missed a cast");
// Insert a new icmp_ne or icmp_eq instruction before the branch.
+ BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
ICmpInst::Predicate P;
if (L->contains(BI->getSuccessor(0)))
P = ICmpInst::ICMP_NE;
@@ -1619,11 +1501,13 @@ LinearFunctionTestReplace(Loop *L,
<< " op:\t"
<< (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n"
<< " RHS:\t" << *ExitCnt << "\n"
- << " Expr:\t" << *IVLimit << "\n");
+ << " IVCount:\t" << *IVCount << "\n");
+ IRBuilder<> Builder(BI);
if (SE->getTypeSizeInBits(CmpIndVar->getType())
- > SE->getTypeSizeInBits(CmpTy)) {
- CmpIndVar = Builder.CreateTrunc(CmpIndVar, CmpTy, "lftr.wideiv");
+ > SE->getTypeSizeInBits(ExitCnt->getType())) {
+ CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(),
+ "lftr.wideiv");
}
Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond");
@@ -1680,11 +1564,12 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
if (isa<LandingPadInst>(I))
continue;
- // Don't sink static AllocaInsts out of the entry block, which would
- // turn them into dynamic allocas!
- if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
- if (AI->isStaticAlloca())
- continue;
+ // Don't sink alloca: we never want to sink static alloca's out of the
+ // entry block, and correctly sinking dynamic alloca's requires
+ // checks for stacksave/stackrestore intrinsics.
+ // FIXME: Refactor this check somehow?
+ if (isa<AllocaInst>(I))
+ continue;
// Determine if there is a use in or before the loop (direct or
// otherwise).
@@ -1746,8 +1631,6 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!L->isLoopSimplifyForm())
return false;
- if (EnableIVRewrite)
- IU = &getAnalysis<IVUsers>();
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTree>();
@@ -1774,10 +1657,8 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// attempt to avoid evaluating SCEVs for sign/zero extend operations until
// other expressions involving loop IVs have been evaluated. This helps SCEV
// set no-wrap flags before normalizing sign/zero extension.
- if (!EnableIVRewrite) {
- Rewriter.disableCanonicalMode();
- SimplifyAndExtend(L, Rewriter, LPM);
- }
+ Rewriter.disableCanonicalMode();
+ SimplifyAndExtend(L, Rewriter, LPM);
// Check to see if this loop has a computable loop-invariant execution count.
// If so, this means that we can compute the final value of any expressions
@@ -1788,106 +1669,28 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount))
RewriteLoopExitValues(L, Rewriter);
- // Eliminate redundant IV users.
- if (EnableIVRewrite)
- Changed |= simplifyIVUsers(IU, SE, &LPM, DeadInsts);
-
// Eliminate redundant IV cycles.
- if (!EnableIVRewrite)
- NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts);
-
- // Compute the type of the largest recurrence expression, and decide whether
- // a canonical induction variable should be inserted.
- Type *LargestType = 0;
- bool NeedCannIV = false;
- bool ExpandBECount = canExpandBackedgeTakenCount(L, SE);
- if (EnableIVRewrite && ExpandBECount) {
- // If we have a known trip count and a single exit block, we'll be
- // rewriting the loop exit test condition below, which requires a
- // canonical induction variable.
- NeedCannIV = true;
- Type *Ty = BackedgeTakenCount->getType();
- if (!EnableIVRewrite) {
- // In this mode, SimplifyIVUsers may have already widened the IV used by
- // the backedge test and inserted a Trunc on the compare's operand. Get
- // the wider type to avoid creating a redundant narrow IV only used by the
- // loop test.
- LargestType = getBackedgeIVType(L);
- }
- if (!LargestType ||
- SE->getTypeSizeInBits(Ty) >
- SE->getTypeSizeInBits(LargestType))
- LargestType = SE->getEffectiveSCEVType(Ty);
- }
- if (EnableIVRewrite) {
- for (IVUsers::const_iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
- NeedCannIV = true;
- Type *Ty =
- SE->getEffectiveSCEVType(I->getOperandValToReplace()->getType());
- if (!LargestType ||
- SE->getTypeSizeInBits(Ty) >
- SE->getTypeSizeInBits(LargestType))
- LargestType = Ty;
- }
- }
-
- // Now that we know the largest of the induction variable expressions
- // in this loop, insert a canonical induction variable of the largest size.
- PHINode *IndVar = 0;
- if (NeedCannIV) {
- // Check to see if the loop already has any canonical-looking induction
- // variables. If any are present and wider than the planned canonical
- // induction variable, temporarily remove them, so that the Rewriter
- // doesn't attempt to reuse them.
- SmallVector<PHINode *, 2> OldCannIVs;
- while (PHINode *OldCannIV = L->getCanonicalInductionVariable()) {
- if (SE->getTypeSizeInBits(OldCannIV->getType()) >
- SE->getTypeSizeInBits(LargestType))
- OldCannIV->removeFromParent();
- else
- break;
- OldCannIVs.push_back(OldCannIV);
- }
+ NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts);
- IndVar = Rewriter.getOrInsertCanonicalInductionVariable(L, LargestType);
-
- ++NumInserted;
- Changed = true;
- DEBUG(dbgs() << "INDVARS: New CanIV: " << *IndVar << '\n');
-
- // Now that the official induction variable is established, reinsert
- // any old canonical-looking variables after it so that the IR remains
- // consistent. They will be deleted as part of the dead-PHI deletion at
- // the end of the pass.
- while (!OldCannIVs.empty()) {
- PHINode *OldCannIV = OldCannIVs.pop_back_val();
- OldCannIV->insertBefore(L->getHeader()->getFirstInsertionPt());
- }
- }
- else if (!EnableIVRewrite && ExpandBECount && needsLFTR(L, DT)) {
- IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
- }
// If we have a trip count expression, rewrite the loop's exit condition
// using it. We can currently only handle loops with a single exit.
- Value *NewICmp = 0;
- if (ExpandBECount && IndVar) {
- // Check preconditions for proper SCEVExpander operation. SCEV does not
- // express SCEVExpander's dependencies, such as LoopSimplify. Instead any
- // pass that uses the SCEVExpander must do it. This does not work well for
- // loop passes because SCEVExpander makes assumptions about all loops, while
- // LoopPassManager only forces the current loop to be simplified.
- //
- // FIXME: SCEV expansion has no way to bail out, so the caller must
- // explicitly check any assumptions made by SCEV. Brittle.
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount);
- if (!AR || AR->getLoop()->getLoopPreheader())
- NewICmp =
- LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, Rewriter);
+ if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
+ PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
+ if (IndVar) {
+ // Check preconditions for proper SCEVExpander operation. SCEV does not
+ // express SCEVExpander's dependencies, such as LoopSimplify. Instead any
+ // pass that uses the SCEVExpander must do it. This does not work well for
+ // loop passes because SCEVExpander makes assumptions about all loops, while
+ // LoopPassManager only forces the current loop to be simplified.
+ //
+ // FIXME: SCEV expansion has no way to bail out, so the caller must
+ // explicitly check any assumptions made by SCEV. Brittle.
+ const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount);
+ if (!AR || AR->getLoop()->getLoopPreheader())
+ (void)LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar,
+ Rewriter);
+ }
}
- // Rewrite IV-derived expressions.
- if (EnableIVRewrite)
- RewriteIVExpressions(L, Rewriter);
-
// Clear the rewriter cache, because values that are in the rewriter's cache
// can be deleted in the loop below, causing the AssertingVH in the cache to
// trigger.
@@ -1906,13 +1709,6 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// loop may be sunk below the loop to reduce register pressure.
SinkUnusedInvariants(L);
- // For completeness, inform IVUsers of the IV use in the newly-created
- // loop exit test instruction.
- if (IU && NewICmp) {
- ICmpInst *NewICmpInst = dyn_cast<ICmpInst>(NewICmp);
- if (NewICmpInst)
- IU->AddUsersIfInteresting(cast<Instruction>(NewICmpInst->getOperand(0)));
- }
// Clean up dead instructions.
Changed |= DeleteDeadPHIs(L->getHeader());
// Check a post-condition.
@@ -1922,8 +1718,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Verify that LFTR, and any other change have not interfered with SCEV's
// ability to compute trip count.
#ifndef NDEBUG
- if (!EnableIVRewrite && VerifyIndvars &&
- !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
+ if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
SE->forgetLoop(L);
const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index f410af3..429b61b 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -24,6 +24,7 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Statistic.h"
@@ -75,6 +76,7 @@ namespace {
///
class JumpThreading : public FunctionPass {
TargetData *TD;
+ TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
SmallPtrSet<BasicBlock*, 16> LoopHeaders;
@@ -107,6 +109,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LazyValueInfo>();
AU.addPreserved<LazyValueInfo>();
+ AU.addRequired<TargetLibraryInfo>();
}
void FindLoopHeaders(Function &F);
@@ -133,6 +136,7 @@ char JumpThreading::ID = 0;
INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
"Jump Threading", false, false)
INITIALIZE_PASS_DEPENDENCY(LazyValueInfo)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_END(JumpThreading, "jump-threading",
"Jump Threading", false, false)
@@ -144,6 +148,7 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
FindLoopHeaders(F);
@@ -674,7 +679,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// Run constant folding to see if we can reduce the condition to a simple
// constant.
if (Instruction *I = dyn_cast<Instruction>(Condition)) {
- Value *SimpleVal = ConstantFoldInstruction(I, TD);
+ Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI);
if (SimpleVal) {
I->replaceAllUsesWith(SimpleVal);
I->eraseFromParent();
@@ -852,6 +857,9 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
if (BBIt != LoadBB->begin())
return false;
+ // If all of the loads and stores that feed the value have the same TBAA tag,
+ // then we can propagate it onto any newly inserted loads.
+ MDNode *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
SmallPtrSet<BasicBlock*, 8> PredsScanned;
typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
@@ -870,11 +878,16 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Scan the predecessor to see if the value is available in the pred.
BBIt = PredBB->end();
- Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6);
+ MDNode *ThisTBAATag = 0;
+ Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6,
+ 0, &ThisTBAATag);
if (!PredAvailable) {
OneUnavailablePred = PredBB;
continue;
}
+
+ // If tbaa tags disagree or are not present, forget about them.
+ if (TBAATag != ThisTBAATag) TBAATag = 0;
// If so, this load is partially redundant. Remember this info so that we
// can create a PHI node.
@@ -921,8 +934,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Split them out to their own block.
UnavailablePred =
- SplitBlockPredecessors(LoadBB, &PredsToSplit[0], PredsToSplit.size(),
- "thread-pre-split", this);
+ SplitBlockPredecessors(LoadBB, PredsToSplit, "thread-pre-split", this);
}
// If the value isn't available in all predecessors, then there will be
@@ -935,6 +947,9 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
LI->getAlignment(),
UnavailablePred->getTerminator());
NewVal->setDebugLoc(LI->getDebugLoc());
+ if (TBAATag)
+ NewVal->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal));
}
@@ -1082,9 +1097,9 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
DestBB = 0;
else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
- else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
- DestBB = SI->getSuccessor(SI->findCaseValue(cast<ConstantInt>(Val)));
- else {
+ else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
+ DestBB = SI->findCaseValue(cast<ConstantInt>(Val)).getCaseSuccessor();
+ } else {
assert(isa<IndirectBrInst>(BB->getTerminator())
&& "Unexpected terminator");
DestBB = cast<BlockAddress>(Val)->getBasicBlock();
@@ -1334,8 +1349,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
else {
DEBUG(dbgs() << " Factoring out " << PredBBs.size()
<< " common predecessors.\n");
- PredBB = SplitBlockPredecessors(BB, &PredBBs[0], PredBBs.size(),
- ".thr_comm", this);
+ PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm", this);
}
// And finally, do it!
@@ -1479,8 +1493,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
else {
DEBUG(dbgs() << " Factoring out " << PredBBs.size()
<< " common predecessors.\n");
- PredBB = SplitBlockPredecessors(BB, &PredBBs[0], PredBBs.size(),
- ".thr_comm", this);
+ PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm", this);
}
// Okay, we decided to do this! Clone all the instructions in BB onto the end
diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
index b79bb13..8795cd8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -43,8 +43,11 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
@@ -84,6 +87,7 @@ namespace {
AU.addPreserved<AliasAnalysis>();
AU.addPreserved("scalar-evolution");
AU.addPreservedID(LoopSimplifyID);
+ AU.addRequired<TargetLibraryInfo>();
}
bool doFinalization() {
@@ -96,6 +100,9 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
+ TargetData *TD; // TargetData for constant folding.
+ TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
+
// State that is updated as we process loops.
bool Changed; // Set to true when we change anything.
BasicBlock *Preheader; // The preheader block of the current loop...
@@ -177,6 +184,7 @@ INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -194,6 +202,9 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
+ TD = getAnalysisIfAvailable<TargetData>();
+ TLI = &getAnalysis<TargetLibraryInfo>();
+
CurAST = new AliasSetTracker(*AA);
// Collect Alias info from subloops.
for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
@@ -333,7 +344,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
// Try constant folding this instruction. If all the operands are
// constants, it is technically hoistable, but it would be better to just
// fold it.
- if (Constant *C = ConstantFoldInstruction(&I)) {
+ if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {
DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
CurAST->copyValue(&I, C);
CurAST->deleteValue(&I);
@@ -369,6 +380,8 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
// in the same alias set as something that ends up being modified.
if (AA->pointsToConstantMemory(LI->getOperand(0)))
return true;
+ if (LI->getMetadata("invariant.load"))
+ return true;
// Don't hoist loads which have may-aliased stores in loop.
uint64_t Size = 0;
@@ -579,7 +592,7 @@ void LICM::hoist(Instruction &I) {
///
bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
// If it is not a trapping instruction, it is always safe to hoist.
- if (Inst.isSafeToSpeculativelyExecute())
+ if (isSafeToSpeculativelyExecute(&Inst))
return true;
return isGuaranteedToExecute(Inst);
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
index af25c5c..f0f05e6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h"
@@ -43,6 +44,7 @@ namespace {
AU.addPreservedID(LoopSimplifyID);
AU.addPreservedID(LCSSAID);
AU.addPreserved("scalar-evolution");
+ AU.addRequired<TargetLibraryInfo>();
}
};
}
@@ -50,6 +52,7 @@ namespace {
char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
"Simplify instructions in loops", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
@@ -64,6 +67,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
LoopInfo *LI = &getAnalysis<LoopInfo>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallVector<BasicBlock*, 8> ExitBlocks;
L->getUniqueExitBlocks(ExitBlocks);
@@ -104,7 +108,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Don't bother simplifying unused instructions.
if (!I->use_empty()) {
- Value *V = SimplifyInstruction(I, TD, DT);
+ Value *V = SimplifyInstruction(I, TD, TLI, DT);
if (V && LI->replacementPreservesLCSSAForm(I, V)) {
// Mark all uses for resimplification next time round the loop.
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 9fd0958..59aace9 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -52,13 +53,14 @@ namespace {
}
bool runOnLoop(Loop *L, LPPassManager &LPM);
+ void simplifyLoopLatch(Loop *L);
bool rotateLoop(Loop *L);
-
+
private:
LoopInfo *LI;
};
}
-
+
char LoopRotate::ID = 0;
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -73,6 +75,11 @@ Pass *llvm::createLoopRotatePass() { return new LoopRotate(); }
bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
+ // Simplify the loop latch before attempting to rotate the header
+ // upward. Rotation may not be needed if the loop tail can be folded into the
+ // loop exit.
+ simplifyLoopLatch(L);
+
// One loop can be rotated multiple times.
bool MadeChange = false;
while (rotateLoop(L))
@@ -92,18 +99,18 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
BasicBlock::iterator I, E = OrigHeader->end();
for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I)
PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader));
-
+
// Now fix up users of the instructions in OrigHeader, inserting PHI nodes
// as necessary.
SSAUpdater SSA;
for (I = OrigHeader->begin(); I != E; ++I) {
Value *OrigHeaderVal = I;
-
+
// If there are no uses of the value (e.g. because it returns void), there
// is nothing to rewrite.
if (OrigHeaderVal->use_empty())
continue;
-
+
Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal];
// The value now exits in two versions: the initial value in the preheader
@@ -111,27 +118,27 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal);
-
+
// Visit each use of the OrigHeader instruction.
for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
UE = OrigHeaderVal->use_end(); UI != UE; ) {
// Grab the use before incrementing the iterator.
Use &U = UI.getUse();
-
+
// Increment the iterator before removing the use from the list.
++UI;
-
+
// SSAUpdater can't handle a non-PHI use in the same block as an
// earlier def. We can easily handle those cases manually.
Instruction *UserInst = cast<Instruction>(U.getUser());
if (!isa<PHINode>(UserInst)) {
BasicBlock *UserBB = UserInst->getParent();
-
+
// The original users in the OrigHeader are already using the
// original definitions.
if (UserBB == OrigHeader)
continue;
-
+
// Users in the OrigPreHeader need to use the value to which the
// original definitions are mapped.
if (UserBB == OrigPreheader) {
@@ -139,32 +146,128 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
continue;
}
}
-
+
// Anything else can be handled by SSAUpdater.
SSA.RewriteUse(U);
}
}
-}
+}
+
+/// Determine whether the instructions in this range my be safely and cheaply
+/// speculated. This is not an important enough situation to develop complex
+/// heuristics. We handle a single arithmetic instruction along with any type
+/// conversions.
+static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
+ BasicBlock::iterator End) {
+ bool seenIncrement = false;
+ for (BasicBlock::iterator I = Begin; I != End; ++I) {
+
+ if (!isSafeToSpeculativelyExecute(I))
+ return false;
+
+ if (isa<DbgInfoIntrinsic>(I))
+ continue;
+
+ switch (I->getOpcode()) {
+ default:
+ return false;
+ case Instruction::GetElementPtr:
+ // GEPs are cheap if all indices are constant.
+ if (!cast<GEPOperator>(I)->hasAllConstantIndices())
+ return false;
+ // fall-thru to increment case
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ if (seenIncrement)
+ return false;
+ seenIncrement = true;
+ break;
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ // ignore type conversions
+ break;
+ }
+ }
+ return true;
+}
+
+/// Fold the loop tail into the loop exit by speculating the loop tail
+/// instructions. Typically, this is a single post-increment. In the case of a
+/// simple 2-block loop, hoisting the increment can be much better than
+/// duplicating the entire loop header. In the cast of loops with early exits,
+/// rotation will not work anyway, but simplifyLoopLatch will put the loop in
+/// canonical form so downstream passes can handle it.
+///
+/// I don't believe this invalidates SCEV.
+void LoopRotate::simplifyLoopLatch(Loop *L) {
+ BasicBlock *Latch = L->getLoopLatch();
+ if (!Latch || Latch->hasAddressTaken())
+ return;
+
+ BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator());
+ if (!Jmp || !Jmp->isUnconditional())
+ return;
+
+ BasicBlock *LastExit = Latch->getSinglePredecessor();
+ if (!LastExit || !L->isLoopExiting(LastExit))
+ return;
+
+ BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator());
+ if (!BI)
+ return;
+
+ if (!shouldSpeculateInstrs(Latch->begin(), Jmp))
+ return;
+
+ DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into "
+ << LastExit->getName() << "\n");
+
+ // Hoist the instructions from Latch into LastExit.
+ LastExit->getInstList().splice(BI, Latch->getInstList(), Latch->begin(), Jmp);
+
+ unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1;
+ BasicBlock *Header = Jmp->getSuccessor(0);
+ assert(Header == L->getHeader() && "expected a backward branch");
+
+ // Remove Latch from the CFG so that LastExit becomes the new Latch.
+ BI->setSuccessor(FallThruPath, Header);
+ Latch->replaceSuccessorsPhiUsesWith(LastExit);
+ Jmp->eraseFromParent();
+
+ // Nuke the Latch block.
+ assert(Latch->empty() && "unable to evacuate Latch");
+ LI->removeBlock(Latch);
+ if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
+ DT->eraseNode(Latch);
+ Latch->eraseFromParent();
+}
/// Rotate loop LP. Return true if the loop is rotated.
bool LoopRotate::rotateLoop(Loop *L) {
// If the loop has only one block then there is not much to rotate.
if (L->getBlocks().size() == 1)
return false;
-
+
BasicBlock *OrigHeader = L->getHeader();
-
+
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
if (BI == 0 || BI->isUnconditional())
return false;
-
+
// If the loop header is not one of the loop exiting blocks then
// either this loop is already rotated or it is not
// suitable for loop rotation transformations.
if (!L->isLoopExiting(OrigHeader))
return false;
- // Updating PHInodes in loops with multiple exits adds complexity.
+ // Updating PHInodes in loops with multiple exits adds complexity.
// Keep it simple, and restrict loop rotation to loops with one exit only.
// In future, lift this restriction and support for multiple exits if
// required.
@@ -184,7 +287,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
// Now, this loop is suitable for rotation.
BasicBlock *OrigPreheader = L->getLoopPreheader();
BasicBlock *OrigLatch = L->getLoopLatch();
-
+
// If the loop could not be converted to canonical form, it must have an
// indirectbr in it, just give up.
if (OrigPreheader == 0 || OrigLatch == 0)
@@ -203,9 +306,9 @@ bool LoopRotate::rotateLoop(Loop *L) {
if (L->contains(Exit))
std::swap(Exit, NewHeader);
assert(NewHeader && "Unable to determine new loop header");
- assert(L->contains(NewHeader) && !L->contains(Exit) &&
+ assert(L->contains(NewHeader) && !L->contains(Exit) &&
"Unable to determine loop header and exit blocks");
-
+
// This code assumes that the new header has exactly one predecessor.
// Remove any single-entry PHI nodes in it.
assert(NewHeader->getSinglePredecessor() &&
@@ -227,7 +330,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator();
while (I != E) {
Instruction *Inst = I++;
-
+
// If the instruction's operands are invariant and it doesn't read or write
// memory, then it is safe to hoist. Doing this doesn't change the order of
// execution in the preheader, but does prevent the instruction from
@@ -236,18 +339,19 @@ bool LoopRotate::rotateLoop(Loop *L) {
// memory (without proving that the loop doesn't write).
if (L->hasLoopInvariantOperands(Inst) &&
!Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() &&
- !isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) {
+ !isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst) &&
+ !isa<AllocaInst>(Inst)) {
Inst->moveBefore(LoopEntryBranch);
continue;
}
-
+
// Otherwise, create a duplicate of the instruction.
Instruction *C = Inst->clone();
-
+
// Eagerly remap the operands of the instruction.
RemapInstruction(C, ValueMap,
RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
-
+
// With the operands remapped, see if the instruction constant folds or is
// otherwise simplifyable. This commonly occurs because the entry from PHI
// nodes allows icmps and other instructions to fold.
@@ -287,7 +391,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
L->moveToHeader(NewHeader);
assert(L->getHeader() == NewHeader && "Latch block is our new header");
-
+
// At this point, we've finished our major CFG changes. As part of cloning
// the loop into the preheader we've simplified instructions and the
// duplicated conditional branch may now be branching on a constant. If it is
@@ -308,16 +412,16 @@ bool LoopRotate::rotateLoop(Loop *L) {
// the dominator of Exit.
DT->changeImmediateDominator(Exit, OrigPreheader);
DT->changeImmediateDominator(NewHeader, OrigPreheader);
-
+
// Update OrigHeader to be dominated by the new header block.
DT->changeImmediateDominator(OrigHeader, OrigLatch);
}
-
+
// Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and
// thus is not a preheader anymore. Split the edge to form a real preheader.
BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this);
NewPH->setName(NewHeader->getName() + ".lr.ph");
-
+
// Preserve canonical loop form, which means that 'Exit' should have only one
// predecessor.
BasicBlock *ExitSplit = SplitCriticalEdge(L->getLoopLatch(), Exit, this);
@@ -329,7 +433,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI);
NewBI->setDebugLoc(PHBI->getDebugLoc());
PHBI->eraseFromParent();
-
+
// With our CFG finalized, update DomTree if it is available.
if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>()) {
// Update OrigHeader to be dominated by the new header block.
@@ -337,7 +441,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
DT->changeImmediateDominator(OrigHeader, OrigLatch);
}
}
-
+
assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation");
assert(L->getLoopLatch() && "Invalid loop latch after loop rotation");
@@ -346,7 +450,7 @@ bool LoopRotate::rotateLoop(Loop *L) {
// connected by an unconditional branch. This is just a cleanup so the
// emitted code isn't too gross in this common case.
MergeBlockIntoPredecessor(OrigHeader, this);
-
+
++NumRotated;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 3e122c2..d57ec22 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -77,19 +77,22 @@
#include <algorithm>
using namespace llvm;
-namespace llvm {
-cl::opt<bool> EnableNested(
- "enable-lsr-nested", cl::Hidden, cl::desc("Enable LSR on nested loops"));
-
-cl::opt<bool> EnableRetry(
- "enable-lsr-retry", cl::Hidden, cl::desc("Enable LSR retry"));
-
// Temporary flag to cleanup congruent phis after LSR phi expansion.
// It's currently disabled until we can determine whether it's truly useful or
// not. The flag should be removed after the v3.0 release.
-cl::opt<bool> EnablePhiElim(
- "enable-lsr-phielim", cl::Hidden, cl::desc("Enable LSR phi elimination"));
-}
+// This is now needed for ivchains.
+static cl::opt<bool> EnablePhiElim(
+ "enable-lsr-phielim", cl::Hidden, cl::init(true),
+ cl::desc("Enable LSR phi elimination"));
+
+#ifndef NDEBUG
+// Stress test IV chain generation.
+static cl::opt<bool> StressIVChain(
+ "stress-ivchain", cl::Hidden, cl::init(false),
+ cl::desc("Stress test LSR IV chains"));
+#else
+static bool StressIVChain = false;
+#endif
namespace {
@@ -636,6 +639,91 @@ static Type *getAccessType(const Instruction *Inst) {
return AccessTy;
}
+/// isExistingPhi - Return true if this AddRec is already a phi in its loop.
+static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
+ for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
+ PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+ if (SE.isSCEVable(PN->getType()) &&
+ (SE.getEffectiveSCEVType(PN->getType()) ==
+ SE.getEffectiveSCEVType(AR->getType())) &&
+ SE.getSCEV(PN) == AR)
+ return true;
+ }
+ return false;
+}
+
+/// Check if expanding this expression is likely to incur significant cost. This
+/// is tricky because SCEV doesn't track which expressions are actually computed
+/// by the current IR.
+///
+/// We currently allow expansion of IV increments that involve adds,
+/// multiplication by constants, and AddRecs from existing phis.
+///
+/// TODO: Allow UDivExpr if we can find an existing IV increment that is an
+/// obvious multiple of the UDivExpr.
+static bool isHighCostExpansion(const SCEV *S,
+ SmallPtrSet<const SCEV*, 8> &Processed,
+ ScalarEvolution &SE) {
+ // Zero/One operand expressions
+ switch (S->getSCEVType()) {
+ case scUnknown:
+ case scConstant:
+ return false;
+ case scTruncate:
+ return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
+ Processed, SE);
+ case scZeroExtend:
+ return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
+ Processed, SE);
+ case scSignExtend:
+ return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
+ Processed, SE);
+ }
+
+ if (!Processed.insert(S))
+ return false;
+
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I) {
+ if (isHighCostExpansion(*I, Processed, SE))
+ return true;
+ }
+ return false;
+ }
+
+ if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
+ if (Mul->getNumOperands() == 2) {
+ // Multiplication by a constant is ok
+ if (isa<SCEVConstant>(Mul->getOperand(0)))
+ return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
+
+ // If we have the value of one operand, check if an existing
+ // multiplication already generates this expression.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
+ Value *UVal = U->getValue();
+ for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end();
+ UI != UE; ++UI) {
+ // If U is a constant, it may be used by a ConstantExpr.
+ Instruction *User = dyn_cast<Instruction>(*UI);
+ if (User && User->getOpcode() == Instruction::Mul
+ && SE.isSCEVable(User->getType())) {
+ return SE.getSCEV(User) == Mul;
+ }
+ }
+ }
+ }
+ }
+
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ if (isExistingPhi(AR, SE))
+ return false;
+ }
+
+ // Fow now, consider any other type of expression (div/mul/min/max) high cost.
+ return true;
+}
+
/// DeleteTriviallyDeadInstructions - If any of the instructions is the
/// specified set are trivially dead, delete them and see if this makes any of
/// their operands subsequently dead.
@@ -705,7 +793,8 @@ public:
const DenseSet<const SCEV *> &VisitedRegs,
const Loop *L,
const SmallVectorImpl<int64_t> &Offsets,
- ScalarEvolution &SE, DominatorTree &DT);
+ ScalarEvolution &SE, DominatorTree &DT,
+ SmallPtrSet<const SCEV *, 16> *LoserRegs = 0);
void print(raw_ostream &OS) const;
void dump() const;
@@ -718,7 +807,8 @@ private:
void RatePrimaryRegister(const SCEV *Reg,
SmallPtrSet<const SCEV *, 16> &Regs,
const Loop *L,
- ScalarEvolution &SE, DominatorTree &DT);
+ ScalarEvolution &SE, DominatorTree &DT,
+ SmallPtrSet<const SCEV *, 16> *LoserRegs);
};
}
@@ -729,41 +819,20 @@ void Cost::RateRegister(const SCEV *Reg,
const Loop *L,
ScalarEvolution &SE, DominatorTree &DT) {
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
- if (AR->getLoop() == L)
- AddRecCost += 1; /// TODO: This should be a function of the stride.
-
// If this is an addrec for another loop, don't second-guess its addrec phi
// nodes. LSR isn't currently smart enough to reason about more than one
- // loop at a time. LSR has either already run on inner loops, will not run
- // on other loops, and cannot be expected to change sibling loops. If the
- // AddRec exists, consider it's register free and leave it alone. Otherwise,
- // do not consider this formula at all.
- // FIXME: why do we need to generate such fomulae?
- else if (!EnableNested || L->contains(AR->getLoop()) ||
- (!AR->getLoop()->contains(L) &&
- DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) {
- for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
- PHINode *PN = dyn_cast<PHINode>(I); ++I) {
- if (SE.isSCEVable(PN->getType()) &&
- (SE.getEffectiveSCEVType(PN->getType()) ==
- SE.getEffectiveSCEVType(AR->getType())) &&
- SE.getSCEV(PN) == AR)
- return;
- }
- if (!EnableNested) {
- Loose();
+ // loop at a time. LSR has already run on inner loops, will not run on outer
+ // loops, and cannot be expected to change sibling loops.
+ if (AR->getLoop() != L) {
+ // If the AddRec exists, consider it's register free and leave it alone.
+ if (isExistingPhi(AR, SE))
return;
- }
- // If this isn't one of the addrecs that the loop already has, it
- // would require a costly new phi and add. TODO: This isn't
- // precisely modeled right now.
- ++NumBaseAdds;
- if (!Regs.count(AR->getStart())) {
- RateRegister(AR->getStart(), Regs, L, SE, DT);
- if (isLoser())
- return;
- }
+
+ // Otherwise, do not consider this formula at all.
+ Loose();
+ return;
}
+ AddRecCost += 1; /// TODO: This should be a function of the stride.
// Add the step value register, if it needs one.
// TODO: The non-affine case isn't precisely modeled here.
@@ -791,13 +860,22 @@ void Cost::RateRegister(const SCEV *Reg,
}
/// RatePrimaryRegister - Record this register in the set. If we haven't seen it
-/// before, rate it.
+/// before, rate it. Optional LoserRegs provides a way to declare any formula
+/// that refers to one of those regs an instant loser.
void Cost::RatePrimaryRegister(const SCEV *Reg,
SmallPtrSet<const SCEV *, 16> &Regs,
const Loop *L,
- ScalarEvolution &SE, DominatorTree &DT) {
- if (Regs.insert(Reg))
+ ScalarEvolution &SE, DominatorTree &DT,
+ SmallPtrSet<const SCEV *, 16> *LoserRegs) {
+ if (LoserRegs && LoserRegs->count(Reg)) {
+ Loose();
+ return;
+ }
+ if (Regs.insert(Reg)) {
RateRegister(Reg, Regs, L, SE, DT);
+ if (isLoser())
+ LoserRegs->insert(Reg);
+ }
}
void Cost::RateFormula(const Formula &F,
@@ -805,14 +883,15 @@ void Cost::RateFormula(const Formula &F,
const DenseSet<const SCEV *> &VisitedRegs,
const Loop *L,
const SmallVectorImpl<int64_t> &Offsets,
- ScalarEvolution &SE, DominatorTree &DT) {
+ ScalarEvolution &SE, DominatorTree &DT,
+ SmallPtrSet<const SCEV *, 16> *LoserRegs) {
// Tally up the registers.
if (const SCEV *ScaledReg = F.ScaledReg) {
if (VisitedRegs.count(ScaledReg)) {
Loose();
return;
}
- RatePrimaryRegister(ScaledReg, Regs, L, SE, DT);
+ RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs);
if (isLoser())
return;
}
@@ -823,7 +902,7 @@ void Cost::RateFormula(const Formula &F,
Loose();
return;
}
- RatePrimaryRegister(BaseReg, Regs, L, SE, DT);
+ RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs);
if (isLoser())
return;
}
@@ -1105,7 +1184,6 @@ bool LSRUse::InsertFormula(const Formula &F) {
Formulae.push_back(F);
// Record registers now being used by this use.
- if (F.ScaledReg) Regs.insert(F.ScaledReg);
Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
return true;
@@ -1116,7 +1194,6 @@ void LSRUse::DeleteFormula(Formula &F) {
if (&F != &Formulae.back())
std::swap(F, Formulae.back());
Formulae.pop_back();
- assert(!Formulae.empty() && "LSRUse has no formulae left!");
}
/// RecomputeRegs - Recompute the Regs field, and update RegUses.
@@ -1205,10 +1282,19 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
// If we have low-level target information, ask the target if it can fold an
// integer immediate on an icmp.
if (AM.BaseOffs != 0) {
- if (TLI) return TLI->isLegalICmpImmediate(-(uint64_t)AM.BaseOffs);
- return false;
+ if (!TLI)
+ return false;
+ // We have one of:
+ // ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset
+ // ICmpZero -1*ScaleReg + Offset => ICmp ScaleReg, Offset
+ // Offs is the ICmp immediate.
+ int64_t Offs = AM.BaseOffs;
+ if (AM.Scale == 0)
+ Offs = -(uint64_t)Offs; // The cast does the right thing with INT64_MIN.
+ return TLI->isLegalICmpImmediate(Offs);
}
+ // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
return true;
case LSRUse::Basic:
@@ -1220,7 +1306,7 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM,
return AM.Scale == 0 || AM.Scale == -1;
}
- return false;
+ llvm_unreachable("Invalid LSRUse Kind!");
}
static bool isLegalUse(TargetLowering::AddrMode AM,
@@ -1327,6 +1413,36 @@ struct UseMapDenseMapInfo {
}
};
+/// IVInc - An individual increment in a Chain of IV increments.
+/// Relate an IV user to an expression that computes the IV it uses from the IV
+/// used by the previous link in the Chain.
+///
+/// For the head of a chain, IncExpr holds the absolute SCEV expression for the
+/// original IVOperand. The head of the chain's IVOperand is only valid during
+/// chain collection, before LSR replaces IV users. During chain generation,
+/// IncExpr can be used to find the new IVOperand that computes the same
+/// expression.
+struct IVInc {
+ Instruction *UserInst;
+ Value* IVOperand;
+ const SCEV *IncExpr;
+
+ IVInc(Instruction *U, Value *O, const SCEV *E):
+ UserInst(U), IVOperand(O), IncExpr(E) {}
+};
+
+// IVChain - The list of IV increments in program order.
+// We typically add the head of a chain without finding subsequent links.
+typedef SmallVector<IVInc,1> IVChain;
+
+/// ChainUsers - Helper for CollectChains to track multiple IV increment uses.
+/// Distinguish between FarUsers that definitely cross IV increments and
+/// NearUsers that may be used between IV increments.
+struct ChainUsers {
+ SmallPtrSet<Instruction*, 4> FarUsers;
+ SmallPtrSet<Instruction*, 4> NearUsers;
+};
+
/// LSRInstance - This class holds state for the main loop strength reduction
/// logic.
class LSRInstance {
@@ -1359,11 +1475,29 @@ class LSRInstance {
/// RegUses - Track which uses use which register candidates.
RegUseTracker RegUses;
+ // Limit the number of chains to avoid quadratic behavior. We don't expect to
+ // have more than a few IV increment chains in a loop. Missing a Chain falls
+ // back to normal LSR behavior for those uses.
+ static const unsigned MaxChains = 8;
+
+ /// IVChainVec - IV users can form a chain of IV increments.
+ SmallVector<IVChain, MaxChains> IVChainVec;
+
+ /// IVIncSet - IV users that belong to profitable IVChains.
+ SmallPtrSet<Use*, MaxChains> IVIncSet;
+
void OptimizeShadowIV();
bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
void OptimizeLoopTermCond();
+ void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
+ SmallVectorImpl<ChainUsers> &ChainUsersVec);
+ void FinalizeChain(IVChain &Chain);
+ void CollectChains();
+ void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts);
+
void CollectInterestingTypesAndFactors();
void CollectFixupsAndInitialFormulae();
@@ -1389,7 +1523,6 @@ class LSRInstance {
LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
-public:
void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
void CountRegisters(const Formula &F, size_t LUIdx);
@@ -1428,9 +1561,11 @@ public:
BasicBlock::iterator
HoistInsertPosition(BasicBlock::iterator IP,
const SmallVectorImpl<Instruction *> &Inputs) const;
- BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP,
- const LSRFixup &LF,
- const LSRUse &LU) const;
+ BasicBlock::iterator
+ AdjustInsertPositionForExpand(BasicBlock::iterator IP,
+ const LSRFixup &LF,
+ const LSRUse &LU,
+ SCEVExpander &Rewriter) const;
Value *Expand(const LSRFixup &LF,
const Formula &F,
@@ -1450,6 +1585,7 @@ public:
void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Pass *P);
+public:
LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
bool getChanged() const { return Changed; }
@@ -2045,7 +2181,8 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
do {
const SCEV *S = Worklist.pop_back_val();
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- Strides.insert(AR->getStepRecurrence(SE));
+ if (AR->getLoop() == L)
+ Strides.insert(AR->getStepRecurrence(SE));
Worklist.push_back(AR->getStart());
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
Worklist.append(Add->op_begin(), Add->op_end());
@@ -2091,11 +2228,544 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
DEBUG(print_factors_and_types(dbgs()));
}
+/// findIVOperand - Helper for CollectChains that finds an IV operand (computed
+/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped
+/// Instructions to IVStrideUses, we could partially skip this.
+static User::op_iterator
+findIVOperand(User::op_iterator OI, User::op_iterator OE,
+ Loop *L, ScalarEvolution &SE) {
+ for(; OI != OE; ++OI) {
+ if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
+ if (!SE.isSCEVable(Oper->getType()))
+ continue;
+
+ if (const SCEVAddRecExpr *AR =
+ dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
+ if (AR->getLoop() == L)
+ break;
+ }
+ }
+ }
+ return OI;
+}
+
+/// getWideOperand - IVChain logic must consistenctly peek base TruncInst
+/// operands, so wrap it in a convenient helper.
+static Value *getWideOperand(Value *Oper) {
+ if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
+ return Trunc->getOperand(0);
+ return Oper;
+}
+
+/// isCompatibleIVType - Return true if we allow an IV chain to include both
+/// types.
+static bool isCompatibleIVType(Value *LVal, Value *RVal) {
+ Type *LType = LVal->getType();
+ Type *RType = RVal->getType();
+ return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy());
+}
+
+/// getExprBase - Return an approximation of this SCEV expression's "base", or
+/// NULL for any constant. Returning the expression itself is
+/// conservative. Returning a deeper subexpression is more precise and valid as
+/// long as it isn't less complex than another subexpression. For expressions
+/// involving multiple unscaled values, we need to return the pointer-type
+/// SCEVUnknown. This avoids forming chains across objects, such as:
+/// PrevOper==a[i], IVOper==b[i], IVInc==b-a.
+///
+/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
+/// SCEVUnknown, we simply return the rightmost SCEV operand.
+static const SCEV *getExprBase(const SCEV *S) {
+ switch (S->getSCEVType()) {
+ default: // uncluding scUnknown.
+ return S;
+ case scConstant:
+ return 0;
+ case scTruncate:
+ return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
+ case scZeroExtend:
+ return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
+ case scSignExtend:
+ return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
+ case scAddExpr: {
+ // Skip over scaled operands (scMulExpr) to follow add operands as long as
+ // there's nothing more complex.
+ // FIXME: not sure if we want to recognize negation.
+ const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
+ for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()),
+ E(Add->op_begin()); I != E; ++I) {
+ const SCEV *SubExpr = *I;
+ if (SubExpr->getSCEVType() == scAddExpr)
+ return getExprBase(SubExpr);
+
+ if (SubExpr->getSCEVType() != scMulExpr)
+ return SubExpr;
+ }
+ return S; // all operands are scaled, be conservative.
+ }
+ case scAddRecExpr:
+ return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
+ }
+}
+
+/// Return true if the chain increment is profitable to expand into a loop
+/// invariant value, which may require its own register. A profitable chain
+/// increment will be an offset relative to the same base. We allow such offsets
+/// to potentially be used as chain increment as long as it's not obviously
+/// expensive to expand using real instructions.
+static const SCEV *
+getProfitableChainIncrement(Value *NextIV, Value *PrevIV,
+ const IVChain &Chain, Loop *L,
+ ScalarEvolution &SE, const TargetLowering *TLI) {
+ // Prune the solution space aggressively by checking that both IV operands
+ // are expressions that operate on the same unscaled SCEVUnknown. This
+ // "base" will be canceled by the subsequent getMinusSCEV call. Checking first
+ // avoids creating extra SCEV expressions.
+ const SCEV *OperExpr = SE.getSCEV(NextIV);
+ const SCEV *PrevExpr = SE.getSCEV(PrevIV);
+ if (getExprBase(OperExpr) != getExprBase(PrevExpr) && !StressIVChain)
+ return 0;
+
+ const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
+ if (!SE.isLoopInvariant(IncExpr, L))
+ return 0;
+
+ // We are not able to expand an increment unless it is loop invariant,
+ // however, the following checks are purely for profitability.
+ if (StressIVChain)
+ return IncExpr;
+
+ // Do not replace a constant offset from IV head with a nonconstant IV
+ // increment.
+ if (!isa<SCEVConstant>(IncExpr)) {
+ const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Chain[0].IVOperand));
+ if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
+ return 0;
+ }
+
+ SmallPtrSet<const SCEV*, 8> Processed;
+ if (isHighCostExpansion(IncExpr, Processed, SE))
+ return 0;
+
+ return IncExpr;
+}
+
+/// Return true if the number of registers needed for the chain is estimated to
+/// be less than the number required for the individual IV users. First prohibit
+/// any IV users that keep the IV live across increments (the Users set should
+/// be empty). Next count the number and type of increments in the chain.
+///
+/// Chaining IVs can lead to considerable code bloat if ISEL doesn't
+/// effectively use postinc addressing modes. Only consider it profitable it the
+/// increments can be computed in fewer registers when chained.
+///
+/// TODO: Consider IVInc free if it's already used in another chains.
+static bool
+isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
+ ScalarEvolution &SE, const TargetLowering *TLI) {
+ if (StressIVChain)
+ return true;
+
+ if (Chain.size() <= 2)
+ return false;
+
+ if (!Users.empty()) {
+ DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " users:\n";
+ for (SmallPtrSet<Instruction*, 4>::const_iterator I = Users.begin(),
+ E = Users.end(); I != E; ++I) {
+ dbgs() << " " << **I << "\n";
+ });
+ return false;
+ }
+ assert(!Chain.empty() && "empty IV chains are not allowed");
+
+ // The chain itself may require a register, so intialize cost to 1.
+ int cost = 1;
+
+ // A complete chain likely eliminates the need for keeping the original IV in
+ // a register. LSR does not currently know how to form a complete chain unless
+ // the header phi already exists.
+ if (isa<PHINode>(Chain.back().UserInst)
+ && SE.getSCEV(Chain.back().UserInst) == Chain[0].IncExpr) {
+ --cost;
+ }
+ const SCEV *LastIncExpr = 0;
+ unsigned NumConstIncrements = 0;
+ unsigned NumVarIncrements = 0;
+ unsigned NumReusedIncrements = 0;
+ for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
+ I != E; ++I) {
+
+ if (I->IncExpr->isZero())
+ continue;
+
+ // Incrementing by zero or some constant is neutral. We assume constants can
+ // be folded into an addressing mode or an add's immediate operand.
+ if (isa<SCEVConstant>(I->IncExpr)) {
+ ++NumConstIncrements;
+ continue;
+ }
+
+ if (I->IncExpr == LastIncExpr)
+ ++NumReusedIncrements;
+ else
+ ++NumVarIncrements;
+
+ LastIncExpr = I->IncExpr;
+ }
+ // An IV chain with a single increment is handled by LSR's postinc
+ // uses. However, a chain with multiple increments requires keeping the IV's
+ // value live longer than it needs to be if chained.
+ if (NumConstIncrements > 1)
+ --cost;
+
+ // Materializing increment expressions in the preheader that didn't exist in
+ // the original code may cost a register. For example, sign-extended array
+ // indices can produce ridiculous increments like this:
+ // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
+ cost += NumVarIncrements;
+
+ // Reusing variable increments likely saves a register to hold the multiple of
+ // the stride.
+ cost -= NumReusedIncrements;
+
+ DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " Cost: " << cost << "\n");
+
+ return cost < 0;
+}
+
+/// ChainInstruction - Add this IV user to an existing chain or make it the head
+/// of a new chain.
+void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
+ SmallVectorImpl<ChainUsers> &ChainUsersVec) {
+ // When IVs are used as types of varying widths, they are generally converted
+ // to a wider type with some uses remaining narrow under a (free) trunc.
+ Value *NextIV = getWideOperand(IVOper);
+
+ // Visit all existing chains. Check if its IVOper can be computed as a
+ // profitable loop invariant increment from the last link in the Chain.
+ unsigned ChainIdx = 0, NChains = IVChainVec.size();
+ const SCEV *LastIncExpr = 0;
+ for (; ChainIdx < NChains; ++ChainIdx) {
+ Value *PrevIV = getWideOperand(IVChainVec[ChainIdx].back().IVOperand);
+ if (!isCompatibleIVType(PrevIV, NextIV))
+ continue;
+
+ // A phi node terminates a chain.
+ if (isa<PHINode>(UserInst)
+ && isa<PHINode>(IVChainVec[ChainIdx].back().UserInst))
+ continue;
+
+ if (const SCEV *IncExpr =
+ getProfitableChainIncrement(NextIV, PrevIV, IVChainVec[ChainIdx],
+ L, SE, TLI)) {
+ LastIncExpr = IncExpr;
+ break;
+ }
+ }
+ // If we haven't found a chain, create a new one, unless we hit the max. Don't
+ // bother for phi nodes, because they must be last in the chain.
+ if (ChainIdx == NChains) {
+ if (isa<PHINode>(UserInst))
+ return;
+ if (NChains >= MaxChains && !StressIVChain) {
+ DEBUG(dbgs() << "IV Chain Limit\n");
+ return;
+ }
+ LastIncExpr = SE.getSCEV(NextIV);
+ // IVUsers may have skipped over sign/zero extensions. We don't currently
+ // attempt to form chains involving extensions unless they can be hoisted
+ // into this loop's AddRec.
+ if (!isa<SCEVAddRecExpr>(LastIncExpr))
+ return;
+ ++NChains;
+ IVChainVec.resize(NChains);
+ ChainUsersVec.resize(NChains);
+ DEBUG(dbgs() << "IV Head: (" << *UserInst << ") IV=" << *LastIncExpr
+ << "\n");
+ }
+ else
+ DEBUG(dbgs() << "IV Inc: (" << *UserInst << ") IV+" << *LastIncExpr
+ << "\n");
+
+ // Add this IV user to the end of the chain.
+ IVChainVec[ChainIdx].push_back(IVInc(UserInst, IVOper, LastIncExpr));
+
+ SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
+ // This chain's NearUsers become FarUsers.
+ if (!LastIncExpr->isZero()) {
+ ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
+ NearUsers.end());
+ NearUsers.clear();
+ }
+
+ // All other uses of IVOperand become near uses of the chain.
+ // We currently ignore intermediate values within SCEV expressions, assuming
+ // they will eventually be used be the current chain, or can be computed
+ // from one of the chain increments. To be more precise we could
+ // transitively follow its user and only add leaf IV users to the set.
+ for (Value::use_iterator UseIter = IVOper->use_begin(),
+ UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) {
+ Instruction *OtherUse = dyn_cast<Instruction>(*UseIter);
+ if (!OtherUse || OtherUse == UserInst)
+ continue;
+ if (SE.isSCEVable(OtherUse->getType())
+ && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
+ && IU.isIVUserOrOperand(OtherUse)) {
+ continue;
+ }
+ NearUsers.insert(OtherUse);
+ }
+
+ // Since this user is part of the chain, it's no longer considered a use
+ // of the chain.
+ ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
+}
+
+/// CollectChains - Populate the vector of Chains.
+///
+/// This decreases ILP at the architecture level. Targets with ample registers,
+/// multiple memory ports, and no register renaming probably don't want
+/// this. However, such targets should probably disable LSR altogether.
+///
+/// The job of LSR is to make a reasonable choice of induction variables across
+/// the loop. Subsequent passes can easily "unchain" computation exposing more
+/// ILP *within the loop* if the target wants it.
+///
+/// Finding the best IV chain is potentially a scheduling problem. Since LSR
+/// will not reorder memory operations, it will recognize this as a chain, but
+/// will generate redundant IV increments. Ideally this would be corrected later
+/// by a smart scheduler:
+/// = A[i]
+/// = A[i+x]
+/// A[i] =
+/// A[i+x] =
+///
+/// TODO: Walk the entire domtree within this loop, not just the path to the
+/// loop latch. This will discover chains on side paths, but requires
+/// maintaining multiple copies of the Chains state.
+void LSRInstance::CollectChains() {
+ SmallVector<ChainUsers, 8> ChainUsersVec;
+
+ SmallVector<BasicBlock *,8> LatchPath;
+ BasicBlock *LoopHeader = L->getHeader();
+ for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
+ Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
+ LatchPath.push_back(Rung->getBlock());
+ }
+ LatchPath.push_back(LoopHeader);
+
+ // Walk the instruction stream from the loop header to the loop latch.
+ for (SmallVectorImpl<BasicBlock *>::reverse_iterator
+ BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend();
+ BBIter != BBEnd; ++BBIter) {
+ for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end();
+ I != E; ++I) {
+ // Skip instructions that weren't seen by IVUsers analysis.
+ if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I))
+ continue;
+
+ // Ignore users that are part of a SCEV expression. This way we only
+ // consider leaf IV Users. This effectively rediscovers a portion of
+ // IVUsers analysis but in program order this time.
+ if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I)))
+ continue;
+
+ // Remove this instruction from any NearUsers set it may be in.
+ for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
+ ChainIdx < NChains; ++ChainIdx) {
+ ChainUsersVec[ChainIdx].NearUsers.erase(I);
+ }
+ // Search for operands that can be chained.
+ SmallPtrSet<Instruction*, 4> UniqueOperands;
+ User::op_iterator IVOpEnd = I->op_end();
+ User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE);
+ while (IVOpIter != IVOpEnd) {
+ Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
+ if (UniqueOperands.insert(IVOpInst))
+ ChainInstruction(I, IVOpInst, ChainUsersVec);
+ IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
+ }
+ } // Continue walking down the instructions.
+ } // Continue walking down the domtree.
+ // Visit phi backedges to determine if the chain can generate the IV postinc.
+ for (BasicBlock::iterator I = L->getHeader()->begin();
+ PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+ if (!SE.isSCEVable(PN->getType()))
+ continue;
+
+ Instruction *IncV =
+ dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
+ if (IncV)
+ ChainInstruction(PN, IncV, ChainUsersVec);
+ }
+ // Remove any unprofitable chains.
+ unsigned ChainIdx = 0;
+ for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
+ UsersIdx < NChains; ++UsersIdx) {
+ if (!isProfitableChain(IVChainVec[UsersIdx],
+ ChainUsersVec[UsersIdx].FarUsers, SE, TLI))
+ continue;
+ // Preserve the chain at UsesIdx.
+ if (ChainIdx != UsersIdx)
+ IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
+ FinalizeChain(IVChainVec[ChainIdx]);
+ ++ChainIdx;
+ }
+ IVChainVec.resize(ChainIdx);
+}
+
+void LSRInstance::FinalizeChain(IVChain &Chain) {
+ assert(!Chain.empty() && "empty IV chains are not allowed");
+ DEBUG(dbgs() << "Final Chain: " << *Chain[0].UserInst << "\n");
+
+ for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end();
+ I != E; ++I) {
+ DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n");
+ User::op_iterator UseI =
+ std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand);
+ assert(UseI != I->UserInst->op_end() && "cannot find IV operand");
+ IVIncSet.insert(UseI);
+ }
+}
+
+/// Return true if the IVInc can be folded into an addressing mode.
+static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
+ Value *Operand, const TargetLowering *TLI) {
+ const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
+ if (!IncConst || !isAddressUse(UserInst, Operand))
+ return false;
+
+ if (IncConst->getValue()->getValue().getMinSignedBits() > 64)
+ return false;
+
+ int64_t IncOffset = IncConst->getValue()->getSExtValue();
+ if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false,
+ LSRUse::Address, getAccessType(UserInst), TLI))
+ return false;
+
+ return true;
+}
+
+/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to
+/// materialize the IV user's operand from the previous IV user's operand.
+void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts) {
+ // Find the new IVOperand for the head of the chain. It may have been replaced
+ // by LSR.
+ const IVInc &Head = Chain[0];
+ User::op_iterator IVOpEnd = Head.UserInst->op_end();
+ User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
+ IVOpEnd, L, SE);
+ Value *IVSrc = 0;
+ while (IVOpIter != IVOpEnd) {
+ IVSrc = getWideOperand(*IVOpIter);
+
+ // If this operand computes the expression that the chain needs, we may use
+ // it. (Check this after setting IVSrc which is used below.)
+ //
+ // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
+ // narrow for the chain, so we can no longer use it. We do allow using a
+ // wider phi, assuming the LSR checked for free truncation. In that case we
+ // should already have a truncate on this operand such that
+ // getSCEV(IVSrc) == IncExpr.
+ if (SE.getSCEV(*IVOpIter) == Head.IncExpr
+ || SE.getSCEV(IVSrc) == Head.IncExpr) {
+ break;
+ }
+ IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE);
+ }
+ if (IVOpIter == IVOpEnd) {
+ // Gracefully give up on this chain.
+ DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
+ return;
+ }
+
+ DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
+ Type *IVTy = IVSrc->getType();
+ Type *IntTy = SE.getEffectiveSCEVType(IVTy);
+ const SCEV *LeftOverExpr = 0;
+ for (IVChain::const_iterator IncI = llvm::next(Chain.begin()),
+ IncE = Chain.end(); IncI != IncE; ++IncI) {
+
+ Instruction *InsertPt = IncI->UserInst;
+ if (isa<PHINode>(InsertPt))
+ InsertPt = L->getLoopLatch()->getTerminator();
+
+ // IVOper will replace the current IV User's operand. IVSrc is the IV
+ // value currently held in a register.
+ Value *IVOper = IVSrc;
+ if (!IncI->IncExpr->isZero()) {
+ // IncExpr was the result of subtraction of two narrow values, so must
+ // be signed.
+ const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy);
+ LeftOverExpr = LeftOverExpr ?
+ SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
+ }
+ if (LeftOverExpr && !LeftOverExpr->isZero()) {
+ // Expand the IV increment.
+ Rewriter.clearPostInc();
+ Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
+ const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
+ SE.getUnknown(IncV));
+ IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
+
+ // If an IV increment can't be folded, use it as the next IV value.
+ if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
+ TLI)) {
+ assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
+ IVSrc = IVOper;
+ LeftOverExpr = 0;
+ }
+ }
+ Type *OperTy = IncI->IVOperand->getType();
+ if (IVTy != OperTy) {
+ assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
+ "cannot extend a chained IV");
+ IRBuilder<> Builder(InsertPt);
+ IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
+ }
+ IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper);
+ DeadInsts.push_back(IncI->IVOperand);
+ }
+ // If LSR created a new, wider phi, we may also replace its postinc. We only
+ // do this if we also found a wide value for the head of the chain.
+ if (isa<PHINode>(Chain.back().UserInst)) {
+ for (BasicBlock::iterator I = L->getHeader()->begin();
+ PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
+ if (!isCompatibleIVType(Phi, IVSrc))
+ continue;
+ Instruction *PostIncV = dyn_cast<Instruction>(
+ Phi->getIncomingValueForBlock(L->getLoopLatch()));
+ if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
+ continue;
+ Value *IVOper = IVSrc;
+ Type *PostIncTy = PostIncV->getType();
+ if (IVTy != PostIncTy) {
+ assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types");
+ IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
+ Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
+ IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
+ }
+ Phi->replaceUsesOfWith(PostIncV, IVOper);
+ DeadInsts.push_back(PostIncV);
+ }
+ }
+}
+
void LSRInstance::CollectFixupsAndInitialFormulae() {
for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
+ Instruction *UserInst = UI->getUser();
+ // Skip IV users that are part of profitable IV Chains.
+ User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(),
+ UI->getOperandValToReplace());
+ assert(UseI != UserInst->op_end() && "cannot find IV operand");
+ if (IVIncSet.count(UseI))
+ continue;
+
// Record the uses.
LSRFixup &LF = getNewFixup();
- LF.UserInst = UI->getUser();
+ LF.UserInst = UserInst;
LF.OperandValToReplace = UI->getOperandValToReplace();
LF.PostIncLoops = UI->getPostIncLoops();
@@ -2914,6 +3584,7 @@ LSRInstance::GenerateAllReuseFormulae() {
void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
DenseSet<const SCEV *> VisitedRegs;
SmallPtrSet<const SCEV *, 16> Regs;
+ SmallPtrSet<const SCEV *, 16> LoserRegs;
#ifndef NDEBUG
bool ChangedFormulae = false;
#endif
@@ -2933,46 +3604,66 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
FIdx != NumForms; ++FIdx) {
Formula &F = LU.Formulae[FIdx];
- SmallVector<const SCEV *, 2> Key;
- for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
- JE = F.BaseRegs.end(); J != JE; ++J) {
- const SCEV *Reg = *J;
- if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
- Key.push_back(Reg);
+ // Some formulas are instant losers. For example, they may depend on
+ // nonexistent AddRecs from other loops. These need to be filtered
+ // immediately, otherwise heuristics could choose them over others leading
+ // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
+ // avoids the need to recompute this information across formulae using the
+ // same bad AddRec. Passing LoserRegs is also essential unless we remove
+ // the corresponding bad register from the Regs set.
+ Cost CostF;
+ Regs.clear();
+ CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT,
+ &LoserRegs);
+ if (CostF.isLoser()) {
+ // During initial formula generation, undesirable formulae are generated
+ // by uses within other loops that have some non-trivial address mode or
+ // use the postinc form of the IV. LSR needs to provide these formulae
+ // as the basis of rediscovering the desired formula that uses an AddRec
+ // corresponding to the existing phi. Once all formulae have been
+ // generated, these initial losers may be pruned.
+ DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
+ dbgs() << "\n");
}
- if (F.ScaledReg &&
- RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
- Key.push_back(F.ScaledReg);
- // Unstable sort by host order ok, because this is only used for
- // uniquifying.
- std::sort(Key.begin(), Key.end());
-
- std::pair<BestFormulaeTy::const_iterator, bool> P =
- BestFormulae.insert(std::make_pair(Key, FIdx));
- if (!P.second) {
+ else {
+ SmallVector<const SCEV *, 2> Key;
+ for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
+ JE = F.BaseRegs.end(); J != JE; ++J) {
+ const SCEV *Reg = *J;
+ if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
+ Key.push_back(Reg);
+ }
+ if (F.ScaledReg &&
+ RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
+ Key.push_back(F.ScaledReg);
+ // Unstable sort by host order ok, because this is only used for
+ // uniquifying.
+ std::sort(Key.begin(), Key.end());
+
+ std::pair<BestFormulaeTy::const_iterator, bool> P =
+ BestFormulae.insert(std::make_pair(Key, FIdx));
+ if (P.second)
+ continue;
+
Formula &Best = LU.Formulae[P.first->second];
- Cost CostF;
- CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
- Regs.clear();
Cost CostBest;
- CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
Regs.clear();
+ CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
if (CostF < CostBest)
std::swap(F, Best);
DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
dbgs() << "\n"
" in favor of formula "; Best.print(dbgs());
dbgs() << '\n');
+ }
#ifndef NDEBUG
- ChangedFormulae = true;
+ ChangedFormulae = true;
#endif
- LU.DeleteFormula(F);
- --FIdx;
- --NumForms;
- Any = true;
- continue;
- }
+ LU.DeleteFormula(F);
+ --FIdx;
+ --NumForms;
+ Any = true;
}
// Now that we've filtered out some formulae, recompute the Regs set.
@@ -3284,24 +3975,29 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
if (LU.Regs.count(*I))
ReqRegs.insert(*I);
- bool AnySatisfiedReqRegs = false;
SmallPtrSet<const SCEV *, 16> NewRegs;
Cost NewCost;
-retry:
for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
E = LU.Formulae.end(); I != E; ++I) {
const Formula &F = *I;
// Ignore formulae which do not use any of the required registers.
+ bool SatisfiedReqReg = true;
for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
JE = ReqRegs.end(); J != JE; ++J) {
const SCEV *Reg = *J;
if ((!F.ScaledReg || F.ScaledReg != Reg) &&
std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) ==
- F.BaseRegs.end())
- goto skip;
+ F.BaseRegs.end()) {
+ SatisfiedReqReg = false;
+ break;
+ }
+ }
+ if (!SatisfiedReqReg) {
+ // If none of the formulae satisfied the required registers, then we could
+ // clear ReqRegs and try again. Currently, we simply give up in this case.
+ continue;
}
- AnySatisfiedReqRegs = true;
// Evaluate the cost of the current formula. If it's already worse than
// the current best, prune the search at that point.
@@ -3317,7 +4013,7 @@ retry:
VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
} else {
DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
- dbgs() << ". Regs:";
+ dbgs() << ".\n Regs:";
for (SmallPtrSet<const SCEV *, 16>::const_iterator
I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I)
dbgs() << ' ' << **I;
@@ -3328,18 +4024,6 @@ retry:
}
Workspace.pop_back();
}
- skip:;
- }
-
- if (!EnableRetry && !AnySatisfiedReqRegs)
- return;
-
- // If none of the formulae had all of the required registers, relax the
- // constraint so that we don't exclude all formulae.
- if (!AnySatisfiedReqRegs) {
- assert(!ReqRegs.empty() && "Solver failed even without required registers");
- ReqRegs.clear();
- goto retry;
}
}
@@ -3435,9 +4119,10 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
/// AdjustInsertPositionForExpand - Determine an input position which will be
/// dominated by the operands and which will dominate the result.
BasicBlock::iterator
-LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP,
+LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
const LSRFixup &LF,
- const LSRUse &LU) const {
+ const LSRUse &LU,
+ SCEVExpander &Rewriter) const {
// Collect some instructions which must be dominated by the
// expanding replacement. These must be dominated by any operands that
// will be required in the expansion.
@@ -3472,9 +4157,13 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP,
}
}
+ assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP)
+ && !isa<DbgInfoIntrinsic>(LowestIP) &&
+ "Insertion point must be a normal instruction");
+
// Then, climb up the immediate dominator tree as far as we can go while
// still being dominated by the input positions.
- IP = HoistInsertPosition(IP, Inputs);
+ BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
// Don't insert instructions before PHI nodes.
while (isa<PHINode>(IP)) ++IP;
@@ -3485,6 +4174,11 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP,
// Ignore debug intrinsics.
while (isa<DbgInfoIntrinsic>(IP)) ++IP;
+ // Set IP below instructions recently inserted by SCEVExpander. This keeps the
+ // IP consistent across expansions and allows the previously inserted
+ // instructions to be reused by subsequent expansion.
+ while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP;
+
return IP;
}
@@ -3499,7 +4193,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
// Determine an input position which will be dominated by the operands and
// which will dominate the result.
- IP = AdjustInsertPositionForExpand(IP, LF, LU);
+ IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter);
// Inform the Rewriter if we have a post-increment use, so that it can
// perform an advantageous expansion.
@@ -3775,10 +4469,20 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
SmallVector<WeakVH, 16> DeadInsts;
SCEVExpander Rewriter(SE, "lsr");
+#ifndef NDEBUG
+ Rewriter.setDebugType(DEBUG_TYPE);
+#endif
Rewriter.disableCanonicalMode();
Rewriter.enableLSRMode();
Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
+ // Mark phi nodes that terminate chains so the expander tries to reuse them.
+ for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
+ ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
+ if (PHINode *PN = dyn_cast<PHINode>(ChainI->back().UserInst))
+ Rewriter.setChainedPhi(PN);
+ }
+
// Expand the new value definitions and update the users.
for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
E = Fixups.end(); I != E; ++I) {
@@ -3789,6 +4493,11 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Changed = true;
}
+ for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
+ ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
+ GenerateIVChain(*ChainI, Rewriter, DeadInsts);
+ Changed = true;
+ }
// Clean up after ourselves. This must be done before deleting any
// instructions.
Rewriter.clear();
@@ -3804,11 +4513,29 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
// If LoopSimplify form is not available, stay out of trouble.
- if (!L->isLoopSimplifyForm()) return;
+ if (!L->isLoopSimplifyForm())
+ return;
// If there's no interesting work to be done, bail early.
if (IU.empty()) return;
+#ifndef NDEBUG
+ // All dominating loops must have preheaders, or SCEVExpander may not be able
+ // to materialize an AddRecExpr whose Start is an outer AddRecExpr.
+ //
+ // IVUsers analysis should only create users that are dominated by simple loop
+ // headers. Since this loop should dominate all of its users, its user list
+ // should be empty if this loop itself is not within a simple loop nest.
+ for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader());
+ Rung; Rung = Rung->getIDom()) {
+ BasicBlock *BB = Rung->getBlock();
+ const Loop *DomLoop = LI.getLoopFor(BB);
+ if (DomLoop && DomLoop->getHeader() == BB) {
+ assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest");
+ }
+ }
+#endif // DEBUG
+
DEBUG(dbgs() << "\nLSR on loop ";
WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
dbgs() << ":\n");
@@ -3821,24 +4548,18 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
if (IU.empty()) return;
// Skip nested loops until we can model them better with formulae.
- if (!EnableNested && !L->empty()) {
-
- if (EnablePhiElim) {
- // Remove any extra phis created by processing inner loops.
- SmallVector<WeakVH, 16> DeadInsts;
- SCEVExpander Rewriter(SE, "lsr");
- Changed |= Rewriter.replaceCongruentIVs(L, &DT, DeadInsts);
- Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
- }
+ if (!L->empty()) {
DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
return;
}
// Start collecting data and preparing for the solver.
+ CollectChains();
CollectInterestingTypesAndFactors();
CollectFixupsAndInitialFormulae();
CollectLoopInvariantFixupsAndFormulae();
+ assert(!Uses.empty() && "IVUsers reported at least one use");
DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
print_uses(dbgs()));
@@ -3875,14 +4596,6 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
// Now that we've decided what we want, make it so.
ImplementSolution(Solution, P);
-
- if (EnablePhiElim) {
- // Remove any extra phis created by processing inner loops.
- SmallVector<WeakVH, 16> DeadInsts;
- SCEVExpander Rewriter(SE, "lsr");
- Changed |= Rewriter.replaceCongruentIVs(L, &DT, DeadInsts);
- Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
- }
}
void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
@@ -4008,9 +4721,21 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
// Run the main LSR transformation.
Changed |= LSRInstance(TLI, L, this).getChanged();
- // At this point, it is worth checking to see if any recurrence PHIs are also
- // dead, so that we can remove them as well.
+ // Remove any extra phis created by processing inner loops.
Changed |= DeleteDeadPHIs(L->getHeader());
-
+ if (EnablePhiElim) {
+ SmallVector<WeakVH, 16> DeadInsts;
+ SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr");
+#ifndef NDEBUG
+ Rewriter.setDebugType(DEBUG_TYPE);
+#endif
+ unsigned numFolded = Rewriter.
+ replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, TLI);
+ if (numFolded) {
+ Changed = true;
+ DeleteTriviallyDeadInstructions(DeadInsts);
+ DeleteDeadPHIs(L->getHeader());
+ }
+ }
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 91395b2..09a186f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -40,10 +40,9 @@ UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
cl::desc("Allows loops to be partially unrolled until "
"-unroll-threshold loop size is reached."));
-// Temporary flag to be removed in 3.0
static cl::opt<bool>
-NoSCEVUnroll("disable-unroll-scev", cl::init(false), cl::Hidden,
- cl::desc("Use ScalarEvolution to analyze loop trip counts for unrolling"));
+UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden,
+ cl::desc("Unroll loops with run-time trip counts"));
namespace {
class LoopUnroll : public LoopPass {
@@ -68,6 +67,10 @@ namespace {
// explicit -unroll-threshold).
static const unsigned OptSizeUnrollThreshold = 50;
+ // Default unroll count for loops with run-time trip count if
+ // -unroll-count is not set
+ static const unsigned UnrollRuntimeCount = 8;
+
unsigned CurrentCount;
unsigned CurrentThreshold;
bool CurrentAllowPartial;
@@ -101,6 +104,7 @@ INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_DEPENDENCY(LCSSA)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial) {
@@ -147,23 +151,21 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// Find trip count and trip multiple if count is not available
unsigned TripCount = 0;
unsigned TripMultiple = 1;
- if (!NoSCEVUnroll) {
- // Find "latch trip count". UnrollLoop assumes that control cannot exit
- // via the loop latch on any iteration prior to TripCount. The loop may exit
- // early via an earlier branch.
- BasicBlock *LatchBlock = L->getLoopLatch();
- if (LatchBlock) {
- TripCount = SE->getSmallConstantTripCount(L, LatchBlock);
- TripMultiple = SE->getSmallConstantTripMultiple(L, LatchBlock);
- }
- }
- else {
- TripCount = L->getSmallConstantTripCount();
- if (TripCount == 0)
- TripMultiple = L->getSmallConstantTripMultiple();
+ // Find "latch trip count". UnrollLoop assumes that control cannot exit
+ // via the loop latch on any iteration prior to TripCount. The loop may exit
+ // early via an earlier branch.
+ BasicBlock *LatchBlock = L->getLoopLatch();
+ if (LatchBlock) {
+ TripCount = SE->getSmallConstantTripCount(L, LatchBlock);
+ TripMultiple = SE->getSmallConstantTripMultiple(L, LatchBlock);
}
- // Automatically select an unroll count.
+ // Use a default unroll-count if the user doesn't specify a value
+ // and the trip count is a run-time value. The default is different
+ // for run-time or compile-time trip count loops.
unsigned Count = CurrentCount;
+ if (UnrollRuntime && CurrentCount == 0 && TripCount == 0)
+ Count = UnrollRuntimeCount;
+
if (Count == 0) {
// Conservative heuristic: if we know the trip count, see if we can
// completely unroll (subject to the threshold, checked below); otherwise
@@ -188,15 +190,23 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
if (TripCount != 1 && Size > Threshold) {
DEBUG(dbgs() << " Too large to fully unroll with count: " << Count
<< " because size: " << Size << ">" << Threshold << "\n");
- if (!CurrentAllowPartial) {
+ if (!CurrentAllowPartial && !(UnrollRuntime && TripCount == 0)) {
DEBUG(dbgs() << " will not try to unroll partially because "
<< "-unroll-allow-partial not given\n");
return false;
}
- // Reduce unroll count to be modulo of TripCount for partial unrolling
- Count = Threshold / LoopSize;
- while (Count != 0 && TripCount%Count != 0) {
- Count--;
+ if (TripCount) {
+ // Reduce unroll count to be modulo of TripCount for partial unrolling
+ Count = Threshold / LoopSize;
+ while (Count != 0 && TripCount%Count != 0)
+ Count--;
+ }
+ else if (UnrollRuntime) {
+ // Reduce unroll count to be a lower power-of-two value
+ while (Count != 0 && Size > Threshold) {
+ Count >>= 1;
+ Size = LoopSize*Count;
+ }
}
if (Count < 2) {
DEBUG(dbgs() << " could not unroll partially\n");
@@ -207,7 +217,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
}
// Unroll the loop.
- if (!UnrollLoop(L, Count, TripCount, TripMultiple, LI, &LPM))
+ if (!UnrollLoop(L, Count, TripCount, UnrollRuntime, TripMultiple, LI, &LPM))
return false;
return true;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 458949c..ee23268 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -32,7 +32,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
-#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
@@ -48,6 +48,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <map>
#include <set>
using namespace llvm;
@@ -56,14 +57,70 @@ STATISTIC(NumSwitches, "Number of switches unswitched");
STATISTIC(NumSelects , "Number of selects unswitched");
STATISTIC(NumTrivial , "Number of unswitches that are trivial");
STATISTIC(NumSimplify, "Number of simplifications of unswitched code");
+STATISTIC(TotalInsts, "Total number of instructions analyzed");
-// The specific value of 50 here was chosen based only on intuition and a
+// The specific value of 100 here was chosen based only on intuition and a
// few specific examples.
static cl::opt<unsigned>
Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"),
- cl::init(50), cl::Hidden);
-
+ cl::init(100), cl::Hidden);
+
namespace {
+
+ class LUAnalysisCache {
+
+ typedef DenseMap<const SwitchInst*, SmallPtrSet<const Value *, 8> >
+ UnswitchedValsMap;
+
+ typedef UnswitchedValsMap::iterator UnswitchedValsIt;
+
+ struct LoopProperties {
+ unsigned CanBeUnswitchedCount;
+ unsigned SizeEstimation;
+ UnswitchedValsMap UnswitchedVals;
+ };
+
+ // Here we use std::map instead of DenseMap, since we need to keep valid
+ // LoopProperties pointer for current loop for better performance.
+ typedef std::map<const Loop*, LoopProperties> LoopPropsMap;
+ typedef LoopPropsMap::iterator LoopPropsMapIt;
+
+ LoopPropsMap LoopsProperties;
+ UnswitchedValsMap* CurLoopInstructions;
+ LoopProperties* CurrentLoopProperties;
+
+ // Max size of code we can produce on remained iterations.
+ unsigned MaxSize;
+
+ public:
+
+ LUAnalysisCache() :
+ CurLoopInstructions(NULL), CurrentLoopProperties(NULL),
+ MaxSize(Threshold)
+ {}
+
+ // Analyze loop. Check its size, calculate is it possible to unswitch
+ // it. Returns true if we can unswitch this loop.
+ bool countLoop(const Loop* L);
+
+ // Clean all data related to given loop.
+ void forgetLoop(const Loop* L);
+
+ // Mark case value as unswitched.
+ // Since SI instruction can be partly unswitched, in order to avoid
+ // extra unswitching in cloned loops keep track all unswitched values.
+ void setUnswitched(const SwitchInst* SI, const Value* V);
+
+ // Check was this case value unswitched before or not.
+ bool isUnswitched(const SwitchInst* SI, const Value* V);
+
+ // Clone all loop-unswitch related loop properties.
+ // Redistribute unswitching quotas.
+ // Note, that new loop data is stored inside the VMap.
+ void cloneData(const Loop* NewLoop, const Loop* OldLoop,
+ const ValueToValueMapTy& VMap);
+ };
+
class LoopUnswitch : public LoopPass {
LoopInfo *LI; // Loop information
LPPassManager *LPM;
@@ -71,8 +128,9 @@ namespace {
// LoopProcessWorklist - Used to check if second loop needs processing
// after RewriteLoopBodyWithConditionConstant rewrites first loop.
std::vector<Loop*> LoopProcessWorklist;
- SmallPtrSet<Value *,8> UnswitchedVals;
-
+
+ LUAnalysisCache BranchesInfo;
+
bool OptimizeForSize;
bool redoLoop;
@@ -80,9 +138,9 @@ namespace {
DominatorTree *DT;
BasicBlock *loopHeader;
BasicBlock *loopPreheader;
-
+
// LoopBlocks contains all of the basic blocks of the loop, including the
- // preheader of the loop, the body of the loop, and the exit blocks of the
+ // preheader of the loop, the body of the loop, and the exit blocks of the
// loop, in that order.
std::vector<BasicBlock*> LoopBlocks;
// NewBlocks contained cloned copy of basic blocks from LoopBlocks.
@@ -90,8 +148,8 @@ namespace {
public:
static char ID; // Pass ID, replacement for typeid
- explicit LoopUnswitch(bool Os = false) :
- LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
+ explicit LoopUnswitch(bool Os = false) :
+ LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
currentLoop(NULL), DT(NULL), loopHeader(NULL),
loopPreheader(NULL) {
initializeLoopUnswitchPass(*PassRegistry::getPassRegistry());
@@ -117,7 +175,7 @@ namespace {
private:
virtual void releaseMemory() {
- UnswitchedVals.clear();
+ BranchesInfo.forgetLoop(currentLoop);
}
/// RemoveLoopFromWorklist - If the specified loop is on the loop worklist,
@@ -147,7 +205,7 @@ namespace {
Constant *Val, bool isEqual);
void EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
- BasicBlock *TrueDest,
+ BasicBlock *TrueDest,
BasicBlock *FalseDest,
Instruction *InsertPt);
@@ -160,6 +218,112 @@ namespace {
};
}
+
+// Analyze loop. Check its size, calculate is it possible to unswitch
+// it. Returns true if we can unswitch this loop.
+bool LUAnalysisCache::countLoop(const Loop* L) {
+
+ std::pair<LoopPropsMapIt, bool> InsertRes =
+ LoopsProperties.insert(std::make_pair(L, LoopProperties()));
+
+ LoopProperties& Props = InsertRes.first->second;
+
+ if (InsertRes.second) {
+ // New loop.
+
+ // Limit the number of instructions to avoid causing significant code
+ // expansion, and the number of basic blocks, to avoid loops with
+ // large numbers of branches which cause loop unswitching to go crazy.
+ // This is a very ad-hoc heuristic.
+
+ // FIXME: This is overly conservative because it does not take into
+ // consideration code simplification opportunities and code that can
+ // be shared by the resultant unswitched loops.
+ CodeMetrics Metrics;
+ for (Loop::block_iterator I = L->block_begin(),
+ E = L->block_end();
+ I != E; ++I)
+ Metrics.analyzeBasicBlock(*I);
+
+ Props.SizeEstimation = std::min(Metrics.NumInsts, Metrics.NumBlocks * 5);
+ Props.CanBeUnswitchedCount = MaxSize / (Props.SizeEstimation);
+ MaxSize -= Props.SizeEstimation * Props.CanBeUnswitchedCount;
+ }
+
+ if (!Props.CanBeUnswitchedCount) {
+ DEBUG(dbgs() << "NOT unswitching loop %"
+ << L->getHeader()->getName() << ", cost too high: "
+ << L->getBlocks().size() << "\n");
+
+ return false;
+ }
+
+ // Be careful. This links are good only before new loop addition.
+ CurrentLoopProperties = &Props;
+ CurLoopInstructions = &Props.UnswitchedVals;
+
+ return true;
+}
+
+// Clean all data related to given loop.
+void LUAnalysisCache::forgetLoop(const Loop* L) {
+
+ LoopPropsMapIt LIt = LoopsProperties.find(L);
+
+ if (LIt != LoopsProperties.end()) {
+ LoopProperties& Props = LIt->second;
+ MaxSize += Props.CanBeUnswitchedCount * Props.SizeEstimation;
+ LoopsProperties.erase(LIt);
+ }
+
+ CurrentLoopProperties = NULL;
+ CurLoopInstructions = NULL;
+}
+
+// Mark case value as unswitched.
+// Since SI instruction can be partly unswitched, in order to avoid
+// extra unswitching in cloned loops keep track all unswitched values.
+void LUAnalysisCache::setUnswitched(const SwitchInst* SI, const Value* V) {
+ (*CurLoopInstructions)[SI].insert(V);
+}
+
+// Check was this case value unswitched before or not.
+bool LUAnalysisCache::isUnswitched(const SwitchInst* SI, const Value* V) {
+ return (*CurLoopInstructions)[SI].count(V);
+}
+
+// Clone all loop-unswitch related loop properties.
+// Redistribute unswitching quotas.
+// Note, that new loop data is stored inside the VMap.
+void LUAnalysisCache::cloneData(const Loop* NewLoop, const Loop* OldLoop,
+ const ValueToValueMapTy& VMap) {
+
+ LoopProperties& NewLoopProps = LoopsProperties[NewLoop];
+ LoopProperties& OldLoopProps = *CurrentLoopProperties;
+ UnswitchedValsMap& Insts = OldLoopProps.UnswitchedVals;
+
+ // Reallocate "can-be-unswitched quota"
+
+ --OldLoopProps.CanBeUnswitchedCount;
+ unsigned Quota = OldLoopProps.CanBeUnswitchedCount;
+ NewLoopProps.CanBeUnswitchedCount = Quota / 2;
+ OldLoopProps.CanBeUnswitchedCount = Quota - Quota / 2;
+
+ NewLoopProps.SizeEstimation = OldLoopProps.SizeEstimation;
+
+ // Clone unswitched values info:
+ // for new loop switches we clone info about values that was
+ // already unswitched and has redundant successors.
+ for (UnswitchedValsIt I = Insts.begin(); I != Insts.end(); ++I) {
+ const SwitchInst* OldInst = I->first;
+ Value* NewI = VMap.lookup(OldInst);
+ const SwitchInst* NewInst = cast_or_null<SwitchInst>(NewI);
+ assert(NewInst && "All instructions that are in SrcBB must be in VMap.");
+
+ NewLoopProps.UnswitchedVals[NewInst] = OldLoopProps.UnswitchedVals[OldInst];
+ }
+}
+
char LoopUnswitch::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnswitch, "loop-unswitch", "Unswitch loops",
false, false)
@@ -169,14 +333,18 @@ INITIALIZE_PASS_DEPENDENCY(LCSSA)
INITIALIZE_PASS_END(LoopUnswitch, "loop-unswitch", "Unswitch loops",
false, false)
-Pass *llvm::createLoopUnswitchPass(bool Os) {
- return new LoopUnswitch(Os);
+Pass *llvm::createLoopUnswitchPass(bool Os) {
+ return new LoopUnswitch(Os);
}
/// FindLIVLoopCondition - Cond is a condition that occurs in L. If it is
/// invariant in the loop, or has an invariant piece, return the invariant.
/// Otherwise, return null.
static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
+
+ // We started analyze new instruction, increment scanned instructions counter.
+ ++TotalInsts;
+
// We can never unswitch on vector conditions.
if (Cond->getType()->isVectorTy())
return 0;
@@ -201,7 +369,7 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
if (Value *RHS = FindLIVLoopCondition(BO->getOperand(1), L, Changed))
return RHS;
}
-
+
return 0;
}
@@ -226,16 +394,36 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
return Changed;
}
-/// processCurrentLoop - Do actual work and unswitch loop if possible
+/// processCurrentLoop - Do actual work and unswitch loop if possible
/// and profitable.
bool LoopUnswitch::processCurrentLoop() {
bool Changed = false;
- LLVMContext &Context = currentLoop->getHeader()->getContext();
+
+ initLoopData();
+
+ // If LoopSimplify was unable to form a preheader, don't do any unswitching.
+ if (!loopPreheader)
+ return false;
+
+ // Loops with indirectbr cannot be cloned.
+ if (!currentLoop->isSafeToClone())
+ return false;
+
+ // Without dedicated exits, splitting the exit edge may fail.
+ if (!currentLoop->hasDedicatedExits())
+ return false;
+
+ LLVMContext &Context = loopHeader->getContext();
+
+ // Probably we reach the quota of branches for this loop. If so
+ // stop unswitching.
+ if (!BranchesInfo.countLoop(currentLoop))
+ return false;
// Loop over all of the basic blocks in the loop. If we find an interior
// block that is branching on a loop-invariant condition, we can unswitch this
// loop.
- for (Loop::block_iterator I = currentLoop->block_begin(),
+ for (Loop::block_iterator I = currentLoop->block_begin(),
E = currentLoop->block_end(); I != E; ++I) {
TerminatorInst *TI = (*I)->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
@@ -244,24 +432,37 @@ bool LoopUnswitch::processCurrentLoop() {
if (BI->isConditional()) {
// See if this, or some part of it, is loop invariant. If so, we can
// unswitch on it if we desire.
- Value *LoopCond = FindLIVLoopCondition(BI->getCondition(),
+ Value *LoopCond = FindLIVLoopCondition(BI->getCondition(),
currentLoop, Changed);
- if (LoopCond && UnswitchIfProfitable(LoopCond,
+ if (LoopCond && UnswitchIfProfitable(LoopCond,
ConstantInt::getTrue(Context))) {
++NumBranches;
return true;
}
- }
+ }
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
+ Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
currentLoop, Changed);
- if (LoopCond && SI->getNumCases() > 1) {
+ unsigned NumCases = SI->getNumCases();
+ if (LoopCond && NumCases) {
// Find a value to unswitch on:
// FIXME: this should chose the most expensive case!
// FIXME: scan for a case with a non-critical edge?
- Constant *UnswitchVal = SI->getCaseValue(1);
+ Constant *UnswitchVal = NULL;
+
// Do not process same value again and again.
- if (!UnswitchedVals.insert(UnswitchVal))
+ // At this point we have some cases already unswitched and
+ // some not yet unswitched. Let's find the first not yet unswitched one.
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ Constant* UnswitchValCandidate = i.getCaseValue();
+ if (!BranchesInfo.isUnswitched(SI, UnswitchValCandidate)) {
+ UnswitchVal = UnswitchValCandidate;
+ break;
+ }
+ }
+
+ if (!UnswitchVal)
continue;
if (UnswitchIfProfitable(LoopCond, UnswitchVal)) {
@@ -270,14 +471,14 @@ bool LoopUnswitch::processCurrentLoop() {
}
}
}
-
+
// Scan the instructions to check for unswitchable values.
- for (BasicBlock::iterator BBI = (*I)->begin(), E = (*I)->end();
+ for (BasicBlock::iterator BBI = (*I)->begin(), E = (*I)->end();
BBI != E; ++BBI)
if (SelectInst *SI = dyn_cast<SelectInst>(BBI)) {
- Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
+ Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
currentLoop, Changed);
- if (LoopCond && UnswitchIfProfitable(LoopCond,
+ if (LoopCond && UnswitchIfProfitable(LoopCond,
ConstantInt::getTrue(Context))) {
++NumSelects;
return true;
@@ -297,7 +498,8 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
BasicBlock *&ExitBB,
std::set<BasicBlock*> &Visited) {
if (!Visited.insert(BB).second) {
- // Already visited. Without more analysis, this could indicate an infinte loop.
+ // Already visited. Without more analysis, this could indicate an infinite
+ // loop.
return false;
} else if (!L->contains(BB)) {
// Otherwise, this is a loop exit, this is fine so long as this is the
@@ -306,7 +508,7 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
ExitBB = BB;
return true;
}
-
+
// Otherwise, this is an unvisited intra-loop node. Check all successors.
for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) {
// Check to see if the successor is a trivial loop exit.
@@ -319,12 +521,12 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (I->mayHaveSideEffects())
return false;
-
+
return true;
}
/// isTrivialLoopExitBlock - Return true if the specified block unconditionally
-/// leads to an exit from the specified loop, and has no side-effects in the
+/// leads to an exit from the specified loop, and has no side-effects in the
/// process. If so, return the block that is exited to, otherwise return null.
static BasicBlock *isTrivialLoopExitBlock(Loop *L, BasicBlock *BB) {
std::set<BasicBlock*> Visited;
@@ -352,49 +554,61 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
BasicBlock *Header = currentLoop->getHeader();
TerminatorInst *HeaderTerm = Header->getTerminator();
LLVMContext &Context = Header->getContext();
-
+
BasicBlock *LoopExitBB = 0;
if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
// If the header block doesn't end with a conditional branch on Cond, we
// can't handle it.
if (!BI->isConditional() || BI->getCondition() != Cond)
return false;
-
- // Check to see if a successor of the branch is guaranteed to
- // exit through a unique exit block without having any
+
+ // Check to see if a successor of the branch is guaranteed to
+ // exit through a unique exit block without having any
// side-effects. If so, determine the value of Cond that causes it to do
// this.
- if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
+ if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
BI->getSuccessor(0)))) {
if (Val) *Val = ConstantInt::getTrue(Context);
- } else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
+ } else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
BI->getSuccessor(1)))) {
if (Val) *Val = ConstantInt::getFalse(Context);
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) {
// If this isn't a switch on Cond, we can't handle it.
if (SI->getCondition() != Cond) return false;
-
+
// Check to see if a successor of the switch is guaranteed to go to the
- // latch block or exit through a one exit block without having any
+ // latch block or exit through a one exit block without having any
// side-effects. If so, determine the value of Cond that causes it to do
- // this. Note that we can't trivially unswitch on the default case.
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
- if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
- SI->getSuccessor(i)))) {
+ // this.
+ // Note that we can't trivially unswitch on the default case or
+ // on already unswitched cases.
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ BasicBlock* LoopExitCandidate;
+ if ((LoopExitCandidate = isTrivialLoopExitBlock(currentLoop,
+ i.getCaseSuccessor()))) {
// Okay, we found a trivial case, remember the value that is trivial.
- if (Val) *Val = SI->getCaseValue(i);
+ ConstantInt* CaseVal = i.getCaseValue();
+
+ // Check that it was not unswitched before, since already unswitched
+ // trivial vals are looks trivial too.
+ if (BranchesInfo.isUnswitched(SI, CaseVal))
+ continue;
+ LoopExitBB = LoopExitCandidate;
+ if (Val) *Val = CaseVal;
break;
}
+ }
}
// If we didn't find a single unique LoopExit block, or if the loop exit block
// contains phi nodes, this isn't trivial.
if (!LoopExitBB || isa<PHINode>(LoopExitBB->begin()))
return false; // Can't handle this.
-
+
if (LoopExit) *LoopExit = LoopExitBB;
-
+
// We already know that nothing uses any scalar values defined inside of this
// loop. As such, we just have to check to see if this loop will execute any
// side-effecting instructions (e.g. stores, calls, volatile loads) in the
@@ -411,12 +625,6 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
/// unswitch the loop, reprocess the pieces, then return true.
bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
- initLoopData();
-
- // If LoopSimplify was unable to form a preheader, don't do any unswitching.
- if (!loopPreheader)
- return false;
-
Function *F = loopHeader->getParent();
Constant *CondVal = 0;
@@ -434,28 +642,6 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
return false;
- // FIXME: This is overly conservative because it does not take into
- // consideration code simplification opportunities and code that can
- // be shared by the resultant unswitched loops.
- CodeMetrics Metrics;
- for (Loop::block_iterator I = currentLoop->block_begin(),
- E = currentLoop->block_end();
- I != E; ++I)
- Metrics.analyzeBasicBlock(*I);
-
- // Limit the number of instructions to avoid causing significant code
- // expansion, and the number of basic blocks, to avoid loops with
- // large numbers of branches which cause loop unswitching to go crazy.
- // This is a very ad-hoc heuristic.
- if (Metrics.NumInsts > Threshold ||
- Metrics.NumBlocks * 5 > Threshold ||
- Metrics.containsIndirectBr || Metrics.isRecursive) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << currentLoop->getHeader()->getName() << ", cost too high: "
- << currentLoop->getBlocks().size() << "\n");
- return false;
- }
-
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
return true;
}
@@ -508,17 +694,17 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
/// UnswitchTrivialCondition - Given a loop that has a trivial unswitchable
/// condition in it (a cond branch from its header block to its latch block,
-/// where the path through the loop that doesn't execute its body has no
+/// where the path through the loop that doesn't execute its body has no
/// side-effects), unswitch it. This doesn't involve any code duplication, just
/// moving the conditional branch outside of the loop and updating loop info.
-void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
- Constant *Val,
+void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
+ Constant *Val,
BasicBlock *ExitBlock) {
DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %"
<< loopHeader->getName() << " [" << L->getBlocks().size()
<< " blocks] in Function " << L->getHeader()->getParent()->getName()
<< " on cond: " << *Val << " == " << *Cond << "\n");
-
+
// First step, split the preheader, so that we know that there is a safe place
// to insert the conditional branch. We will change loopPreheader to have a
// conditional branch on Cond.
@@ -527,24 +713,24 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
// Now that we have a place to insert the conditional branch, create a place
// to branch to: this is the exit block out of the loop that we should
// short-circuit to.
-
+
// Split this block now, so that the loop maintains its exit block, and so
// that the jump from the preheader can execute the contents of the exit block
// without actually branching to it (the exit block should be dominated by the
// loop header, not the preheader).
assert(!L->contains(ExitBlock) && "Exit block is in the loop?");
BasicBlock *NewExit = SplitBlock(ExitBlock, ExitBlock->begin(), this);
-
- // Okay, now we have a position to branch from and a position to branch to,
+
+ // Okay, now we have a position to branch from and a position to branch to,
// insert the new conditional branch.
- EmitPreheaderBranchOnCondition(Cond, Val, NewExit, NewPH,
+ EmitPreheaderBranchOnCondition(Cond, Val, NewExit, NewPH,
loopPreheader->getTerminator());
LPM->deleteSimpleAnalysisValue(loopPreheader->getTerminator(), L);
loopPreheader->getTerminator()->eraseFromParent();
// We need to reprocess this loop, it could be unswitched again.
redoLoop = true;
-
+
// Now that we know that the loop is never entered when this condition is a
// particular value, rewrite the loop with this info. We know that this will
// at least eliminate the old branch.
@@ -554,7 +740,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
/// SplitExitEdges - Split all of the edges from inside the loop to their exit
/// blocks. Update the appropriate Phi nodes as we do so.
-void LoopUnswitch::SplitExitEdges(Loop *L,
+void LoopUnswitch::SplitExitEdges(Loop *L,
const SmallVector<BasicBlock *, 8> &ExitBlocks){
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
@@ -565,8 +751,7 @@ void LoopUnswitch::SplitExitEdges(Loop *L,
// Although SplitBlockPredecessors doesn't preserve loop-simplify in
// general, if we call it on all predecessors of all exits then it does.
if (!ExitBlock->isLandingPad()) {
- SplitBlockPredecessors(ExitBlock, Preds.data(), Preds.size(),
- ".us-lcssa", this);
+ SplitBlockPredecessors(ExitBlock, Preds, ".us-lcssa", this);
} else {
SmallVector<BasicBlock*, 2> NewBBs;
SplitLandingPadPredecessors(ExitBlock, Preds, ".us-lcssa", ".us-lcssa",
@@ -575,10 +760,10 @@ void LoopUnswitch::SplitExitEdges(Loop *L,
}
}
-/// UnswitchNontrivialCondition - We determined that the loop is profitable
-/// to unswitch when LIC equal Val. Split it into loop versions and test the
+/// UnswitchNontrivialCondition - We determined that the loop is profitable
+/// to unswitch when LIC equal Val. Split it into loop versions and test the
/// condition outside of either loop. Return the loops created as Out1/Out2.
-void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
+void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
Loop *L) {
Function *F = loopHeader->getParent();
DEBUG(dbgs() << "loop-unswitch: Unswitching loop %"
@@ -621,6 +806,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
ValueToValueMapTy VMap;
for (unsigned i = 0, e = LoopBlocks.size(); i != e; ++i) {
BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], VMap, ".us", F);
+
NewBlocks.push_back(NewBB);
VMap[LoopBlocks[i]] = NewBB; // Keep the BB mapping.
LPM->cloneBasicBlockSimpleAnalysis(LoopBlocks[i], NewBB, L);
@@ -633,6 +819,11 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// Now we create the new Loop object for the versioned loop.
Loop *NewLoop = CloneLoop(L, L->getParentLoop(), VMap, LI, LPM);
+
+ // Recalculate unswitching quota, inherit simplified switches info for NewBB,
+ // Probably clone more loop-unswitch related loop properties.
+ BranchesInfo.cloneData(NewLoop, L, VMap);
+
Loop *ParentLoop = L->getParentLoop();
if (ParentLoop) {
// Make sure to add the cloned preheader and exit blocks to the parent loop
@@ -645,7 +836,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// The new exit block should be in the same loop as the old one.
if (Loop *ExitBBLoop = LI->getLoopFor(ExitBlocks[i]))
ExitBBLoop->addBasicBlockToLoop(NewExit, LI->getBase());
-
+
assert(NewExit->getTerminator()->getNumSuccessors() == 1 &&
"Exit block should have been split to have one successor!");
BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
@@ -680,7 +871,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (BasicBlock::iterator I = NewBlocks[i]->begin(),
E = NewBlocks[i]->end(); I != E; ++I)
RemapInstruction(I, VMap,RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
-
+
// Rewrite the original preheader to select between versions of the loop.
BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator());
assert(OldBR->isUnconditional() && OldBR->getSuccessor(0) == LoopBlocks[0] &&
@@ -699,7 +890,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// the condition that we're unswitching on), we don't rewrite the second
// iteration.
WeakVH LICHandle(LIC);
-
+
// Now we rewrite the original code to know that the condition is true and the
// new code to know that the condition is false.
RewriteLoopBodyWithConditionConstant(L, LIC, Val, false);
@@ -714,7 +905,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
/// RemoveFromWorklist - Remove all instances of I from the worklist vector
/// specified.
-static void RemoveFromWorklist(Instruction *I,
+static void RemoveFromWorklist(Instruction *I,
std::vector<Instruction*> &Worklist) {
std::vector<Instruction*>::iterator WI = std::find(Worklist.begin(),
Worklist.end(), I);
@@ -727,7 +918,7 @@ static void RemoveFromWorklist(Instruction *I,
/// ReplaceUsesOfWith - When we find that I really equals V, remove I from the
/// program, replacing all uses with V and update the worklist.
-static void ReplaceUsesOfWith(Instruction *I, Value *V,
+static void ReplaceUsesOfWith(Instruction *I, Value *V,
std::vector<Instruction*> &Worklist,
Loop *L, LPPassManager *LPM) {
DEBUG(dbgs() << "Replace with '" << *V << "': " << *I);
@@ -760,10 +951,10 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
if (BasicBlock *Pred = BB->getSinglePredecessor()) {
// If it has one pred, fold phi nodes in BB.
while (isa<PHINode>(BB->begin()))
- ReplaceUsesOfWith(BB->begin(),
- cast<PHINode>(BB->begin())->getIncomingValue(0),
+ ReplaceUsesOfWith(BB->begin(),
+ cast<PHINode>(BB->begin())->getIncomingValue(0),
Worklist, L, LPM);
-
+
// If this is the header of a loop and the only pred is the latch, we now
// have an unreachable loop.
if (Loop *L = LI->getLoopFor(BB))
@@ -774,15 +965,15 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
Pred->getTerminator()->eraseFromParent();
new UnreachableInst(BB->getContext(), Pred);
-
+
// The loop is now broken, remove it from LI.
RemoveLoopFromHierarchy(L);
-
+
// Reprocess the header, which now IS dead.
RemoveBlockIfDead(BB, Worklist, L);
return;
}
-
+
// If pred ends in a uncond branch, add uncond branch to worklist so that
// the two blocks will get merged.
if (BranchInst *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
@@ -793,11 +984,11 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
}
DEBUG(dbgs() << "Nuking dead block: " << *BB);
-
+
// Remove the instructions in the basic block from the worklist.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
RemoveFromWorklist(I, Worklist);
-
+
// Anything that uses the instructions in this basic block should have their
// uses replaced with undefs.
// If I is not void type then replaceAllUsesWith undef.
@@ -805,7 +996,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
if (!I->getType()->isVoidTy())
I->replaceAllUsesWith(UndefValue::get(I->getType()));
}
-
+
// If this is the edge to the header block for a loop, remove the loop and
// promote all subloops.
if (Loop *BBLoop = LI->getLoopFor(BB)) {
@@ -821,8 +1012,8 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
// Remove the block from the loop info, which removes it from any loops it
// was in.
LI->removeBlock(BB);
-
-
+
+
// Remove phi node entries in successors for this block.
TerminatorInst *TI = BB->getTerminator();
SmallVector<BasicBlock*, 4> Succs;
@@ -830,13 +1021,13 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
Succs.push_back(TI->getSuccessor(i));
TI->getSuccessor(i)->removePredecessor(BB);
}
-
+
// Unique the successors, remove anything with multiple uses.
array_pod_sort(Succs.begin(), Succs.end());
Succs.erase(std::unique(Succs.begin(), Succs.end()), Succs.end());
-
+
// Remove the basic block, including all of the instructions contained in it.
- LPM->deleteSimpleAnalysisValue(BB, L);
+ LPM->deleteSimpleAnalysisValue(BB, L);
BB->eraseFromParent();
// Remove successor blocks here that are not dead, so that we know we only
// have dead blocks in this list. Nondead blocks have a way of becoming dead,
@@ -854,7 +1045,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
--i;
}
}
-
+
for (unsigned i = 0, e = Succs.size(); i != e; ++i)
RemoveBlockIfDead(Succs[i], Worklist, L);
}
@@ -877,14 +1068,14 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
Constant *Val,
bool IsEqual) {
assert(!isa<Constant>(LIC) && "Why are we unswitching on a constant?");
-
+
// FIXME: Support correlated properties, like:
// for (...)
// if (li1 < li2)
// ...
// if (li1 > li2)
// ...
-
+
// FOLD boolean conditions (X|LIC), (X&LIC). Fold conditional branches,
// selects, switches.
std::vector<Instruction*> Worklist;
@@ -899,21 +1090,25 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
if (IsEqual)
Replacement = Val;
else
- Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()),
+ Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()),
!cast<ConstantInt>(Val)->getZExtValue());
-
+
for (Value::use_iterator UI = LIC->use_begin(), E = LIC->use_end();
UI != E; ++UI) {
Instruction *U = dyn_cast<Instruction>(*UI);
if (!U || !L->contains(U))
continue;
- U->replaceUsesOfWith(LIC, Replacement);
Worklist.push_back(U);
}
+
+ for (std::vector<Instruction*>::iterator UI = Worklist.begin();
+ UI != Worklist.end(); ++UI)
+ (*UI)->replaceUsesOfWith(LIC, Replacement);
+
SimplifyCode(Worklist, L);
return;
}
-
+
// Otherwise, we don't know the precise value of LIC, but we do know that it
// is certainly NOT "Val". As such, simplify any uses in the loop that we
// can. This case occurs when we unswitch switch statements.
@@ -925,23 +1120,27 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
Worklist.push_back(U);
- // TODO: We could do other simplifications, for example, turning
+ // TODO: We could do other simplifications, for example, turning
// 'icmp eq LIC, Val' -> false.
// If we know that LIC is not Val, use this info to simplify code.
SwitchInst *SI = dyn_cast<SwitchInst>(U);
if (SI == 0 || !isa<ConstantInt>(Val)) continue;
-
- unsigned DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
- if (DeadCase == 0) continue; // Default case is live for multiple values.
-
- // Found a dead case value. Don't remove PHI nodes in the
+
+ SwitchInst::CaseIt DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
+ // Default case is live for multiple values.
+ if (DeadCase == SI->case_default()) continue;
+
+ // Found a dead case value. Don't remove PHI nodes in the
// successor if they become single-entry, those PHI nodes may
// be in the Users list.
BasicBlock *Switch = SI->getParent();
- BasicBlock *SISucc = SI->getSuccessor(DeadCase);
+ BasicBlock *SISucc = DeadCase.getCaseSuccessor();
BasicBlock *Latch = L->getLoopLatch();
+
+ BranchesInfo.setUnswitched(SI, Val);
+
if (!SI->findCaseDest(SISucc)) continue; // Edge is critical.
// If the DeadCase successor dominates the loop latch, then the
// transformation isn't safe since it will delete the sole predecessor edge
@@ -957,7 +1156,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// Compute the successors instead of relying on the return value
// of SplitEdge, since it may have split the switch successor
// after PHI nodes.
- BasicBlock *NewSISucc = SI->getSuccessor(DeadCase);
+ BasicBlock *NewSISucc = DeadCase.getCaseSuccessor();
BasicBlock *OldSISucc = *succ_begin(NewSISucc);
// Create an "unreachable" destination.
BasicBlock *Abort = BasicBlock::Create(Context, "us-unreachable",
@@ -981,7 +1180,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
if (DT)
DT->addNewBlock(Abort, NewSISucc);
}
-
+
SimplifyCode(Worklist, L);
}
@@ -1002,7 +1201,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
// Simple DCE.
if (isInstructionTriviallyDead(I)) {
DEBUG(dbgs() << "Remove dead instruction '" << *I);
-
+
// Add uses to the worklist, which may be dead now.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Instruction *Use = dyn_cast<Instruction>(I->getOperand(i)))
@@ -1017,7 +1216,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
// See if instruction simplification can hack this up. This is common for
// things like "select false, X, Y" after unswitching made the condition be
// 'false'.
- if (Value *V = SimplifyInstruction(I, 0, DT))
+ if (Value *V = SimplifyInstruction(I, 0, 0, DT))
if (LI->replacementPreservesLCSSAForm(I, V)) {
ReplaceUsesOfWith(I, V, Worklist, L, LPM);
continue;
@@ -1034,24 +1233,24 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
if (!SinglePred) continue; // Nothing to do.
assert(SinglePred == Pred && "CFG broken");
- DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- "
+ DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- "
<< Succ->getName() << "\n");
-
+
// Resolve any single entry PHI nodes in Succ.
while (PHINode *PN = dyn_cast<PHINode>(Succ->begin()))
ReplaceUsesOfWith(PN, PN->getIncomingValue(0), Worklist, L, LPM);
-
+
// If Succ has any successors with PHI nodes, update them to have
// entries coming from Pred instead of Succ.
Succ->replaceAllUsesWith(Pred);
-
+
// Move all of the successor contents from Succ to Pred.
Pred->getInstList().splice(BI, Succ->getInstList(), Succ->begin(),
Succ->end());
LPM->deleteSimpleAnalysisValue(BI, L);
BI->eraseFromParent();
RemoveFromWorklist(BI, Worklist);
-
+
// Remove Succ from the loop tree.
LI->removeBlock(Succ);
LPM->deleteSimpleAnalysisValue(Succ, L);
@@ -1059,7 +1258,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
++NumSimplify;
continue;
}
-
+
if (ConstantInt *CB = dyn_cast<ConstantInt>(BI->getCondition())){
// Conditional branch. Turn it into an unconditional branch, then
// remove dead blocks.
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index eeb8931..a87cce3 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -147,8 +147,8 @@ struct MemsetRange {
} // end anon namespace
bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
- // If we found more than 8 stores to merge or 64 bytes, use memset.
- if (TheStores.size() >= 8 || End-Start >= 64) return true;
+ // If we found more than 4 stores to merge or 16 bytes, use memset.
+ if (TheStores.size() >= 4 || End-Start >= 16) return true;
// If there is nothing to merge, don't do anything.
if (TheStores.size() < 2) return false;
@@ -806,21 +806,25 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
// a) memcpy-memcpy xform which exposes redundance for DSE.
// b) call-memcpy xform for return slot optimization.
MemDepResult DepInfo = MD->getDependency(M);
- if (!DepInfo.isClobber())
- return false;
-
- if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()))
- return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
-
- if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
- if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
- CopySize->getZExtValue(), C)) {
- MD->removeInstruction(M);
- M->eraseFromParent();
- return true;
+ if (DepInfo.isClobber()) {
+ if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
+ if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
+ CopySize->getZExtValue(), C)) {
+ MD->removeInstruction(M);
+ M->eraseFromParent();
+ return true;
+ }
}
}
-
+
+ AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
+ MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
+ M, M->getParent());
+ if (SrcDepInfo.isClobber()) {
+ if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
+ return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
+ }
+
return false;
}
@@ -945,7 +949,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
RepeatInstruction = processMemMove(M);
else if (CallSite CS = (Value*)I) {
for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
- if (CS.paramHasAttr(i+1, Attribute::ByVal))
+ if (CS.isByValArgument(i))
MadeChange |= processByValArgument(CS, i);
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
index da74e9c..40b0b20 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ObjCARC.cpp
@@ -88,13 +88,14 @@ namespace {
}
#endif
- ValueT &operator[](KeyT Arg) {
+ ValueT &operator[](const KeyT &Arg) {
std::pair<typename MapTy::iterator, bool> Pair =
Map.insert(std::make_pair(Arg, size_t(0)));
if (Pair.second) {
- Pair.first->second = Vector.size();
+ size_t Num = Vector.size();
+ Pair.first->second = Num;
Vector.push_back(std::make_pair(Arg, ValueT()));
- return Vector.back().second;
+ return Vector[Num].second;
}
return Vector[Pair.first->second].second;
}
@@ -104,14 +105,15 @@ namespace {
std::pair<typename MapTy::iterator, bool> Pair =
Map.insert(std::make_pair(InsertPair.first, size_t(0)));
if (Pair.second) {
- Pair.first->second = Vector.size();
+ size_t Num = Vector.size();
+ Pair.first->second = Num;
Vector.push_back(InsertPair);
- return std::make_pair(llvm::prior(Vector.end()), true);
+ return std::make_pair(Vector.begin() + Num, true);
}
return std::make_pair(Vector.begin() + Pair.first->second, false);
}
- const_iterator find(KeyT Key) const {
+ const_iterator find(const KeyT &Key) const {
typename MapTy::const_iterator It = Map.find(Key);
if (It == Map.end()) return Vector.end();
return Vector.begin() + It->second;
@@ -121,7 +123,7 @@ namespace {
/// from the vector, it just zeros out the key in the vector. This leaves
/// iterators intact, but clients must be prepared for zeroed-out keys when
/// iterating.
- void blot(KeyT Key) {
+ void blot(const KeyT &Key) {
typename MapTy::iterator It = Map.find(Key);
if (It == Map.end()) return;
Vector[It->second].first = KeyT();
@@ -179,9 +181,13 @@ static bool IsPotentialUse(const Value *Op) {
Arg->hasNestAttr() ||
Arg->hasStructRetAttr())
return false;
- // Only consider values with pointer types, and not function pointers.
+ // Only consider values with pointer types.
+ // It seemes intuitive to exclude function pointer types as well, since
+ // functions are never reference-counted, however clang occasionally
+ // bitcasts reference-counted pointers to function-pointer type
+ // temporarily.
PointerType *Ty = dyn_cast<PointerType>(Op->getType());
- if (!Ty || isa<FunctionType>(Ty->getElementType()))
+ if (!Ty)
return false;
// Conservatively assume anything else is a potential use.
return true;
@@ -371,7 +377,7 @@ static InstructionClass GetBasicInstructionClass(const Value *V) {
}
// Otherwise, be conservative.
- return IC_User;
+ return isa<InvokeInst>(V) ? IC_CallOrUser : IC_User;
}
/// IsRetain - Test if the the given class is objc_retain or
@@ -597,6 +603,46 @@ static bool ModuleHasARC(const Module &M) {
M.getNamedValue("objc_unretainedPointer");
}
+/// DoesObjCBlockEscape - Test whether the given pointer, which is an
+/// Objective C block pointer, does not "escape". This differs from regular
+/// escape analysis in that a use as an argument to a call is not considered
+/// an escape.
+static bool DoesObjCBlockEscape(const Value *BlockPtr) {
+ // Walk the def-use chains.
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(BlockPtr);
+ do {
+ const Value *V = Worklist.pop_back_val();
+ for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
+ UI != UE; ++UI) {
+ const User *UUser = *UI;
+ // Special - Use by a call (callee or argument) is not considered
+ // to be an escape.
+ if (isa<CallInst>(UUser) || isa<InvokeInst>(UUser))
+ continue;
+ // Use by an instruction which copies the value is an escape if the
+ // result is an escape.
+ if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
+ isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
+ Worklist.push_back(UUser);
+ continue;
+ }
+ // Use by a load is not an escape.
+ if (isa<LoadInst>(UUser))
+ continue;
+ // Use by a store is not an escape if the use is the address.
+ if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
+ if (V != SI->getValueOperand())
+ continue;
+ // Otherwise, conservatively assume an escape.
+ return true;
+ }
+ } while (!Worklist.empty());
+
+ // No escapes found.
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// ARC AliasAnalysis.
//===----------------------------------------------------------------------===//
@@ -850,6 +896,139 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
}
//===----------------------------------------------------------------------===//
+// ARC autorelease pool elimination.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Constants.h"
+
+namespace {
+ /// ObjCARCAPElim - Autorelease pool elimination.
+ class ObjCARCAPElim : public ModulePass {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual bool runOnModule(Module &M);
+
+ bool MayAutorelease(CallSite CS, unsigned Depth = 0);
+ bool OptimizeBB(BasicBlock *BB);
+
+ public:
+ static char ID;
+ ObjCARCAPElim() : ModulePass(ID) {
+ initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry());
+ }
+ };
+}
+
+char ObjCARCAPElim::ID = 0;
+INITIALIZE_PASS(ObjCARCAPElim,
+ "objc-arc-apelim",
+ "ObjC ARC autorelease pool elimination",
+ false, false)
+
+Pass *llvm::createObjCARCAPElimPass() {
+ return new ObjCARCAPElim();
+}
+
+void ObjCARCAPElim::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
+
+/// MayAutorelease - Interprocedurally determine if calls made by the
+/// given call site can possibly produce autoreleases.
+bool ObjCARCAPElim::MayAutorelease(CallSite CS, unsigned Depth) {
+ if (Function *Callee = CS.getCalledFunction()) {
+ if (Callee->isDeclaration() || Callee->mayBeOverridden())
+ return true;
+ for (Function::iterator I = Callee->begin(), E = Callee->end();
+ I != E; ++I) {
+ BasicBlock *BB = I;
+ for (BasicBlock::iterator J = BB->begin(), F = BB->end(); J != F; ++J)
+ if (CallSite JCS = CallSite(J))
+ // This recursion depth limit is arbitrary. It's just great
+ // enough to cover known interesting testcases.
+ if (Depth < 3 &&
+ !JCS.onlyReadsMemory() &&
+ MayAutorelease(JCS, Depth + 1))
+ return true;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
+ bool Changed = false;
+
+ Instruction *Push = 0;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ Instruction *Inst = I++;
+ switch (GetBasicInstructionClass(Inst)) {
+ case IC_AutoreleasepoolPush:
+ Push = Inst;
+ break;
+ case IC_AutoreleasepoolPop:
+ // If this pop matches a push and nothing in between can autorelease,
+ // zap the pair.
+ if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) {
+ Changed = true;
+ Inst->eraseFromParent();
+ Push->eraseFromParent();
+ }
+ Push = 0;
+ break;
+ case IC_CallOrUser:
+ if (MayAutorelease(CallSite(Inst)))
+ Push = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return Changed;
+}
+
+bool ObjCARCAPElim::runOnModule(Module &M) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ if (!ModuleHasARC(M))
+ return false;
+
+ // Find the llvm.global_ctors variable, as the first step in
+ // identifying the global constructors.
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+ if (!GV)
+ return false;
+
+ assert(GV->hasDefinitiveInitializer() &&
+ "llvm.global_ctors is uncooperative!");
+
+ bool Changed = false;
+
+ // Dig the constructor functions out of GV's initializer.
+ ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
+ for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end();
+ OI != OE; ++OI) {
+ Value *Op = *OI;
+ // llvm.global_ctors is an array of pairs where the second members
+ // are constructor functions.
+ Function *F = cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
+ // Only look at function definitions.
+ if (F->isDeclaration())
+ continue;
+ // Only look at functions with one basic block.
+ if (llvm::next(F->begin()) != F->end())
+ continue;
+ // Ok, a single-block constructor function definition. Try to optimize it.
+ Changed |= OptimizeBB(F->begin());
+ }
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
// ARC optimization.
//===----------------------------------------------------------------------===//
@@ -896,8 +1075,9 @@ bool ObjCARCExpand::runOnFunction(Function &F) {
#include "llvm/LLVMContext.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/CFG.h"
-#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/DenseSet.h"
STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
@@ -1158,6 +1338,12 @@ namespace {
/// with the "tail" keyword.
bool IsTailCallRelease;
+ /// Partial - True of we've seen an opportunity for partial RR elimination,
+ /// such as pushing calls into a CFG triangle or into one side of a
+ /// CFG diamond.
+ /// TODO: Consider moving this to PtrState.
+ bool Partial;
+
/// ReleaseMetadata - If the Calls are objc_release calls and they all have
/// a clang.imprecise_release tag, this is the metadata tag.
MDNode *ReleaseMetadata;
@@ -1171,7 +1357,8 @@ namespace {
SmallPtrSet<Instruction *, 2> ReverseInsertPts;
RRInfo() :
- KnownSafe(false), IsRetainBlock(false), IsTailCallRelease(false),
+ KnownSafe(false), IsRetainBlock(false),
+ IsTailCallRelease(false), Partial(false),
ReleaseMetadata(0) {}
void clear();
@@ -1182,6 +1369,7 @@ void RRInfo::clear() {
KnownSafe = false;
IsRetainBlock = false;
IsTailCallRelease = false;
+ Partial = false;
ReleaseMetadata = 0;
Calls.clear();
ReverseInsertPts.clear();
@@ -1239,16 +1427,6 @@ namespace {
Seq = NewSeq;
}
- void SetSeqToRelease(MDNode *M) {
- if (Seq == S_None || Seq == S_Use) {
- Seq = M ? S_MovableRelease : S_Release;
- RRI.ReleaseMetadata = M;
- } else if (Seq != S_MovableRelease || RRI.ReleaseMetadata != M) {
- Seq = S_Release;
- RRI.ReleaseMetadata = 0;
- }
- }
-
Sequence GetSeq() const {
return Seq;
}
@@ -1272,8 +1450,16 @@ PtrState::Merge(const PtrState &Other, bool TopDown) {
if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock)
Seq = S_None;
+ // If we're not in a sequence (anymore), drop all associated state.
if (Seq == S_None) {
RRI.clear();
+ } else if (RRI.Partial || Other.RRI.Partial) {
+ // If we're doing a merge on a path that's previously seen a partial
+ // merge, conservatively drop the sequence, to avoid doing partial
+ // RR elimination. If the branch predicates for the two merge differ,
+ // mixing them is unsafe.
+ Seq = S_None;
+ RRI.clear();
} else {
// Conservatively merge the ReleaseMetadata information.
if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
@@ -1282,8 +1468,15 @@ PtrState::Merge(const PtrState &Other, bool TopDown) {
RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
RRI.IsTailCallRelease = RRI.IsTailCallRelease && Other.RRI.IsTailCallRelease;
RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
- RRI.ReverseInsertPts.insert(Other.RRI.ReverseInsertPts.begin(),
- Other.RRI.ReverseInsertPts.end());
+
+ // Merge the insert point sets. If there are any differences,
+ // that makes this a partial merge.
+ RRI.Partial = RRI.ReverseInsertPts.size() !=
+ Other.RRI.ReverseInsertPts.size();
+ for (SmallPtrSet<Instruction *, 2>::const_iterator
+ I = Other.RRI.ReverseInsertPts.begin(),
+ E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
+ RRI.Partial |= RRI.ReverseInsertPts.insert(*I);
}
}
@@ -1460,6 +1653,14 @@ namespace {
/// metadata.
unsigned ImpreciseReleaseMDKind;
+ /// CopyOnEscapeMDKind - The Metadata Kind for clang.arc.copy_on_escape
+ /// metadata.
+ unsigned CopyOnEscapeMDKind;
+
+ /// NoObjCARCExceptionsMDKind - The Metadata Kind for
+ /// clang.arc.no_objc_arc_exceptions metadata.
+ unsigned NoObjCARCExceptionsMDKind;
+
Constant *getRetainRVCallee(Module *M);
Constant *getAutoreleaseRVCallee(Module *M);
Constant *getReleaseCallee(Module *M);
@@ -1467,6 +1668,8 @@ namespace {
Constant *getRetainBlockCallee(Module *M);
Constant *getAutoreleaseCallee(Module *M);
+ bool IsRetainBlockOptimizable(const Instruction *Inst);
+
void OptimizeRetainCall(Function &F, Instruction *Retain);
bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV);
@@ -1475,9 +1678,16 @@ namespace {
void CheckForCFGHazards(const BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
BBState &MyStates) const;
+ bool VisitInstructionBottomUp(Instruction *Inst,
+ BasicBlock *BB,
+ MapVector<Value *, RRInfo> &Retains,
+ BBState &MyStates);
bool VisitBottomUp(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
MapVector<Value *, RRInfo> &Retains);
+ bool VisitInstructionTopDown(Instruction *Inst,
+ DenseMap<Value *, RRInfo> &Releases,
+ BBState &MyStates);
bool VisitTopDown(BasicBlock *BB,
DenseMap<const BasicBlock *, BBState> &BBStates,
DenseMap<Value *, RRInfo> &Releases);
@@ -1534,6 +1744,22 @@ void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
}
+bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
+ // Without the magic metadata tag, we have to assume this might be an
+ // objc_retainBlock call inserted to convert a block pointer to an id,
+ // in which case it really is needed.
+ if (!Inst->getMetadata(CopyOnEscapeMDKind))
+ return false;
+
+ // If the pointer "escapes" (not including being used in a call),
+ // the copy may be needed.
+ if (DoesObjCBlockEscape(Inst))
+ return false;
+
+ // Otherwise, it's not needed.
+ return true;
+}
+
Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
if (!RetainRVCallee) {
LLVMContext &C = M->getContext();
@@ -1737,6 +1963,7 @@ namespace {
/// use here.
enum DependenceKind {
NeedsPositiveRetainCount,
+ AutoreleasePoolBoundary,
CanChangeRetainCount,
RetainAutoreleaseDep, ///< Blocks objc_retainAutorelease.
RetainAutoreleaseRVDep, ///< Blocks objc_retainAutoreleaseReturnValue.
@@ -1766,6 +1993,19 @@ Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
}
}
+ case AutoreleasePoolBoundary: {
+ InstructionClass Class = GetInstructionClass(Inst);
+ switch (Class) {
+ case IC_AutoreleasepoolPop:
+ case IC_AutoreleasepoolPush:
+ // These mark the end and begin of an autorelease pool scope.
+ return true;
+ default:
+ // Nothing else does this.
+ return false;
+ }
+ }
+
case CanChangeRetainCount: {
InstructionClass Class = GetInstructionClass(Inst);
switch (Class) {
@@ -1783,6 +2023,7 @@ Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
case RetainAutoreleaseDep:
switch (GetBasicInstructionClass(Inst)) {
case IC_AutoreleasepoolPop:
+ case IC_AutoreleasepoolPush:
// Don't merge an objc_autorelease with an objc_retain inside a different
// autoreleasepool scope.
return true;
@@ -1794,7 +2035,6 @@ Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
// Nothing else matters for objc_retainAutorelease formation.
return false;
}
- break;
case RetainAutoreleaseRVDep: {
InstructionClass Class = GetBasicInstructionClass(Inst);
@@ -1808,7 +2048,6 @@ Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
// retainAutoreleaseReturnValue formation.
return CanInterruptRV(Class);
}
- break;
}
case RetainRVDep:
@@ -1816,7 +2055,6 @@ Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
}
llvm_unreachable("Invalid dependence flavor");
- return true;
}
/// FindDependencies - Walk up the CFG from StartPos (which is in StartBB) and
@@ -1920,17 +2158,26 @@ ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
/// return true.
bool
ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
- // Check for the argument being from an immediately preceding call.
+ // Check for the argument being from an immediately preceding call or invoke.
Value *Arg = GetObjCArg(RetainRV);
CallSite CS(Arg);
- if (Instruction *Call = CS.getInstruction())
+ if (Instruction *Call = CS.getInstruction()) {
if (Call->getParent() == RetainRV->getParent()) {
BasicBlock::iterator I = Call;
++I;
while (isNoopInstruction(I)) ++I;
if (&*I == RetainRV)
return false;
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
+ BasicBlock *RetainRVParent = RetainRV->getParent();
+ if (II->getNormalDest() == RetainRVParent) {
+ BasicBlock::iterator I = RetainRVParent->begin();
+ while (isNoopInstruction(I)) ++I;
+ if (&*I == RetainRV)
+ return false;
+ }
}
+ }
// Check for being preceded by an objc_autoreleaseReturnValue on the same
// pointer. In this case, we can delete the pair.
@@ -2144,9 +2391,34 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
// Check that there is nothing that cares about the reference
// count between the call and the phi.
- FindDependencies(NeedsPositiveRetainCount, Arg,
- Inst->getParent(), Inst,
- DependingInstructions, Visited, PA);
+ switch (Class) {
+ case IC_Retain:
+ case IC_RetainBlock:
+ // These can always be moved up.
+ break;
+ case IC_Release:
+ // These can't be moved across things that care about the retain count.
+ FindDependencies(NeedsPositiveRetainCount, Arg,
+ Inst->getParent(), Inst,
+ DependingInstructions, Visited, PA);
+ break;
+ case IC_Autorelease:
+ // These can't be moved across autorelease pool scope boundaries.
+ FindDependencies(AutoreleasePoolBoundary, Arg,
+ Inst->getParent(), Inst,
+ DependingInstructions, Visited, PA);
+ break;
+ case IC_RetainRV:
+ case IC_AutoreleaseRV:
+ // Don't move these; the RV optimization depends on the autoreleaseRV
+ // being tail called, and the retainRV being immediately after a call
+ // (which might still happen if we get lucky with codegen layout, but
+ // it's not worth taking the chance).
+ continue;
+ default:
+ llvm_unreachable("Invalid dependence flavor");
+ }
+
if (DependingInstructions.size() == 1 &&
*DependingInstructions.begin() == PN) {
Changed = true;
@@ -2186,7 +2458,7 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
BBState &MyStates) const {
// If any top-down local-use or possible-dec has a succ which is earlier in
// the sequence, forget it.
- for (BBState::ptr_const_iterator I = MyStates.top_down_ptr_begin(),
+ for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
E = MyStates.top_down_ptr_end(); I != E; ++I)
switch (I->second.GetSeq()) {
default: break;
@@ -2195,14 +2467,32 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
bool SomeSuccHasSame = false;
bool AllSuccsHaveSame = true;
- PtrState &S = MyStates.getPtrTopDownState(Arg);
- for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
- PtrState &SuccS = BBStates[*SI].getPtrBottomUpState(Arg);
- switch (SuccS.GetSeq()) {
+ PtrState &S = I->second;
+ succ_const_iterator SI(TI), SE(TI, false);
+
+ // If the terminator is an invoke marked with the
+ // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
+ // ignored, for ARC purposes.
+ if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
+ --SE;
+
+ for (; SI != SE; ++SI) {
+ Sequence SuccSSeq = S_None;
+ bool SuccSRRIKnownSafe = false;
+ // If VisitBottomUp has visited this successor, take what we know about it.
+ DenseMap<const BasicBlock *, BBState>::iterator BBI = BBStates.find(*SI);
+ if (BBI != BBStates.end()) {
+ const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ SuccSSeq = SuccS.GetSeq();
+ SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
+ }
+ switch (SuccSSeq) {
case S_None:
case S_CanRelease: {
- if (!S.RRI.KnownSafe && !SuccS.RRI.KnownSafe)
+ if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
S.ClearSequenceProgress();
+ break;
+ }
continue;
}
case S_Use:
@@ -2211,7 +2501,7 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
case S_Stop:
case S_Release:
case S_MovableRelease:
- if (!S.RRI.KnownSafe && !SuccS.RRI.KnownSafe)
+ if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
AllSuccsHaveSame = false;
break;
case S_Retain:
@@ -2223,19 +2513,38 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
// guards against loops in the middle of a sequence.
if (SomeSuccHasSame && !AllSuccsHaveSame)
S.ClearSequenceProgress();
+ break;
}
case S_CanRelease: {
const Value *Arg = I->first;
const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
bool SomeSuccHasSame = false;
bool AllSuccsHaveSame = true;
- PtrState &S = MyStates.getPtrTopDownState(Arg);
- for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
- PtrState &SuccS = BBStates[*SI].getPtrBottomUpState(Arg);
- switch (SuccS.GetSeq()) {
+ PtrState &S = I->second;
+ succ_const_iterator SI(TI), SE(TI, false);
+
+ // If the terminator is an invoke marked with the
+ // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
+ // ignored, for ARC purposes.
+ if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
+ --SE;
+
+ for (; SI != SE; ++SI) {
+ Sequence SuccSSeq = S_None;
+ bool SuccSRRIKnownSafe = false;
+ // If VisitBottomUp has visited this successor, take what we know about it.
+ DenseMap<const BasicBlock *, BBState>::iterator BBI = BBStates.find(*SI);
+ if (BBI != BBStates.end()) {
+ const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ SuccSSeq = SuccS.GetSeq();
+ SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
+ }
+ switch (SuccSSeq) {
case S_None: {
- if (!S.RRI.KnownSafe && !SuccS.RRI.KnownSafe)
+ if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
S.ClearSequenceProgress();
+ break;
+ }
continue;
}
case S_CanRelease:
@@ -2245,7 +2554,7 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
case S_Release:
case S_MovableRelease:
case S_Use:
- if (!S.RRI.KnownSafe && !SuccS.RRI.KnownSafe)
+ if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
AllSuccsHaveSame = false;
break;
case S_Retain:
@@ -2257,8 +2566,167 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
// guards against loops in the middle of a sequence.
if (SomeSuccHasSame && !AllSuccsHaveSame)
S.ClearSequenceProgress();
+ break;
+ }
+ }
+}
+
+bool
+ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
+ BasicBlock *BB,
+ MapVector<Value *, RRInfo> &Retains,
+ BBState &MyStates) {
+ bool NestingDetected = false;
+ InstructionClass Class = GetInstructionClass(Inst);
+ const Value *Arg = 0;
+
+ switch (Class) {
+ case IC_Release: {
+ Arg = GetObjCArg(Inst);
+
+ PtrState &S = MyStates.getPtrBottomUpState(Arg);
+
+ // If we see two releases in a row on the same pointer. If so, make
+ // a note, and we'll cicle back to revisit it after we've
+ // hopefully eliminated the second release, which may allow us to
+ // eliminate the first release too.
+ // Theoretically we could implement removal of nested retain+release
+ // pairs by making PtrState hold a stack of states, but this is
+ // simple and avoids adding overhead for the non-nested case.
+ if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease)
+ NestingDetected = true;
+
+ S.RRI.clear();
+
+ MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
+ S.SetSeq(ReleaseMetadata ? S_MovableRelease : S_Release);
+ S.RRI.ReleaseMetadata = ReleaseMetadata;
+ S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented();
+ S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
+ S.RRI.Calls.insert(Inst);
+
+ S.IncrementRefCount();
+ S.IncrementNestCount();
+ break;
+ }
+ case IC_RetainBlock:
+ // An objc_retainBlock call with just a use may need to be kept,
+ // because it may be copying a block from the stack to the heap.
+ if (!IsRetainBlockOptimizable(Inst))
+ break;
+ // FALLTHROUGH
+ case IC_Retain:
+ case IC_RetainRV: {
+ Arg = GetObjCArg(Inst);
+
+ PtrState &S = MyStates.getPtrBottomUpState(Arg);
+ S.DecrementRefCount();
+ S.SetAtLeastOneRefCount();
+ S.DecrementNestCount();
+
+ switch (S.GetSeq()) {
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ case S_Use:
+ S.RRI.ReverseInsertPts.clear();
+ // FALL THROUGH
+ case S_CanRelease:
+ // Don't do retain+release tracking for IC_RetainRV, because it's
+ // better to let it remain as the first instruction after a call.
+ if (Class != IC_RetainRV) {
+ S.RRI.IsRetainBlock = Class == IC_RetainBlock;
+ Retains[Inst] = S.RRI;
+ }
+ S.ClearSequenceProgress();
+ break;
+ case S_None:
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
}
+ return NestingDetected;
+ }
+ case IC_AutoreleasepoolPop:
+ // Conservatively, clear MyStates for all known pointers.
+ MyStates.clearBottomUpPointers();
+ return NestingDetected;
+ case IC_AutoreleasepoolPush:
+ case IC_None:
+ // These are irrelevant.
+ return NestingDetected;
+ default:
+ break;
+ }
+
+ // Consider any other possible effects of this instruction on each
+ // pointer being tracked.
+ for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
+ ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
+ const Value *Ptr = MI->first;
+ if (Ptr == Arg)
+ continue; // Handled above.
+ PtrState &S = MI->second;
+ Sequence Seq = S.GetSeq();
+
+ // Check for possible releases.
+ if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
+ S.DecrementRefCount();
+ switch (Seq) {
+ case S_Use:
+ S.SetSeq(S_CanRelease);
+ continue;
+ case S_CanRelease:
+ case S_Release:
+ case S_MovableRelease:
+ case S_Stop:
+ case S_None:
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ }
}
+
+ // Check for possible direct uses.
+ switch (Seq) {
+ case S_Release:
+ case S_MovableRelease:
+ if (CanUse(Inst, Ptr, PA, Class)) {
+ assert(S.RRI.ReverseInsertPts.empty());
+ // If this is an invoke instruction, we're scanning it as part of
+ // one of its successor blocks, since we can't insert code after it
+ // in its own block, and we don't want to split critical edges.
+ if (isa<InvokeInst>(Inst))
+ S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
+ else
+ S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
+ S.SetSeq(S_Use);
+ } else if (Seq == S_Release &&
+ (Class == IC_User || Class == IC_CallOrUser)) {
+ // Non-movable releases depend on any possible objc pointer use.
+ S.SetSeq(S_Stop);
+ assert(S.RRI.ReverseInsertPts.empty());
+ // As above; handle invoke specially.
+ if (isa<InvokeInst>(Inst))
+ S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
+ else
+ S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
+ }
+ break;
+ case S_Stop:
+ if (CanUse(Inst, Ptr, PA, Class))
+ S.SetSeq(S_Use);
+ break;
+ case S_CanRelease:
+ case S_Use:
+ case S_None:
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ }
+ }
+
+ return NestingDetected;
}
bool
@@ -2274,7 +2742,13 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
succ_const_iterator SI(TI), SE(TI, false);
if (SI == SE)
MyStates.SetAsExit();
- else
+ else {
+ // If the terminator is an invoke marked with the
+ // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
+ // ignored, for ARC purposes.
+ if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
+ --SE;
+
do {
const BasicBlock *Succ = *SI++;
if (Succ == BB)
@@ -2295,145 +2769,169 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
}
break;
} while (SI != SE);
+ }
// Visit all the instructions, bottom-up.
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
Instruction *Inst = llvm::prior(I);
- InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
- switch (Class) {
- case IC_Release: {
- Arg = GetObjCArg(Inst);
+ // Invoke instructions are visited as part of their successors (below).
+ if (isa<InvokeInst>(Inst))
+ continue;
+
+ NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
+ }
+
+ // If there's a predecessor with an invoke, visit the invoke as
+ // if it were part of this block, since we can't insert code after
+ // an invoke in its own block, and we don't want to split critical
+ // edges.
+ for (pred_iterator PI(BB), PE(BB, false); PI != PE; ++PI) {
+ BasicBlock *Pred = *PI;
+ TerminatorInst *PredTI = cast<TerminatorInst>(&Pred->back());
+ if (isa<InvokeInst>(PredTI))
+ NestingDetected |= VisitInstructionBottomUp(PredTI, BB, Retains, MyStates);
+ }
+
+ return NestingDetected;
+}
+
+bool
+ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
+ DenseMap<Value *, RRInfo> &Releases,
+ BBState &MyStates) {
+ bool NestingDetected = false;
+ InstructionClass Class = GetInstructionClass(Inst);
+ const Value *Arg = 0;
+
+ switch (Class) {
+ case IC_RetainBlock:
+ // An objc_retainBlock call with just a use may need to be kept,
+ // because it may be copying a block from the stack to the heap.
+ if (!IsRetainBlockOptimizable(Inst))
+ break;
+ // FALLTHROUGH
+ case IC_Retain:
+ case IC_RetainRV: {
+ Arg = GetObjCArg(Inst);
- PtrState &S = MyStates.getPtrBottomUpState(Arg);
+ PtrState &S = MyStates.getPtrTopDownState(Arg);
- // If we see two releases in a row on the same pointer. If so, make
+ // Don't do retain+release tracking for IC_RetainRV, because it's
+ // better to let it remain as the first instruction after a call.
+ if (Class != IC_RetainRV) {
+ // If we see two retains in a row on the same pointer. If so, make
// a note, and we'll cicle back to revisit it after we've
- // hopefully eliminated the second release, which may allow us to
- // eliminate the first release too.
+ // hopefully eliminated the second retain, which may allow us to
+ // eliminate the first retain too.
// Theoretically we could implement removal of nested retain+release
// pairs by making PtrState hold a stack of states, but this is
// simple and avoids adding overhead for the non-nested case.
- if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease)
+ if (S.GetSeq() == S_Retain)
NestingDetected = true;
- S.SetSeqToRelease(Inst->getMetadata(ImpreciseReleaseMDKind));
+ S.SetSeq(S_Retain);
S.RRI.clear();
- S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented();
- S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
+ S.RRI.IsRetainBlock = Class == IC_RetainBlock;
+ // Don't check S.IsKnownIncremented() here because it's not
+ // sufficient.
+ S.RRI.KnownSafe = S.IsKnownNested();
S.RRI.Calls.insert(Inst);
+ }
- S.IncrementRefCount();
- S.IncrementNestCount();
+ S.SetAtLeastOneRefCount();
+ S.IncrementRefCount();
+ S.IncrementNestCount();
+ return NestingDetected;
+ }
+ case IC_Release: {
+ Arg = GetObjCArg(Inst);
+
+ PtrState &S = MyStates.getPtrTopDownState(Arg);
+ S.DecrementRefCount();
+ S.DecrementNestCount();
+
+ switch (S.GetSeq()) {
+ case S_Retain:
+ case S_CanRelease:
+ S.RRI.ReverseInsertPts.clear();
+ // FALL THROUGH
+ case S_Use:
+ S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
+ S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
+ Releases[Inst] = S.RRI;
+ S.ClearSequenceProgress();
+ break;
+ case S_None:
break;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ llvm_unreachable("top-down pointer in release state!");
}
- case IC_RetainBlock:
- case IC_Retain:
- case IC_RetainRV: {
- Arg = GetObjCArg(Inst);
+ break;
+ }
+ case IC_AutoreleasepoolPop:
+ // Conservatively, clear MyStates for all known pointers.
+ MyStates.clearTopDownPointers();
+ return NestingDetected;
+ case IC_AutoreleasepoolPush:
+ case IC_None:
+ // These are irrelevant.
+ return NestingDetected;
+ default:
+ break;
+ }
- PtrState &S = MyStates.getPtrBottomUpState(Arg);
+ // Consider any other possible effects of this instruction on each
+ // pointer being tracked.
+ for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
+ ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
+ const Value *Ptr = MI->first;
+ if (Ptr == Arg)
+ continue; // Handled above.
+ PtrState &S = MI->second;
+ Sequence Seq = S.GetSeq();
+
+ // Check for possible releases.
+ if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
S.DecrementRefCount();
- S.SetAtLeastOneRefCount();
- S.DecrementNestCount();
-
- // An objc_retainBlock call with just a use still needs to be kept,
- // because it may be copying a block from the stack to the heap.
- if (Class == IC_RetainBlock && S.GetSeq() == S_Use)
+ switch (Seq) {
+ case S_Retain:
S.SetSeq(S_CanRelease);
+ assert(S.RRI.ReverseInsertPts.empty());
+ S.RRI.ReverseInsertPts.insert(Inst);
- switch (S.GetSeq()) {
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
+ // One call can't cause a transition from S_Retain to S_CanRelease
+ // and S_CanRelease to S_Use. If we've made the first transition,
+ // we're done.
+ continue;
case S_Use:
- S.RRI.ReverseInsertPts.clear();
- // FALL THROUGH
case S_CanRelease:
- // Don't do retain+release tracking for IC_RetainRV, because it's
- // better to let it remain as the first instruction after a call.
- if (Class != IC_RetainRV) {
- S.RRI.IsRetainBlock = Class == IC_RetainBlock;
- Retains[Inst] = S.RRI;
- }
- S.ClearSequenceProgress();
- break;
case S_None:
break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
- }
- continue;
- }
- case IC_AutoreleasepoolPop:
- // Conservatively, clear MyStates for all known pointers.
- MyStates.clearBottomUpPointers();
- continue;
- case IC_AutoreleasepoolPush:
- case IC_None:
- // These are irrelevant.
- continue;
- default:
- break;
- }
-
- // Consider any other possible effects of this instruction on each
- // pointer being tracked.
- for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
- ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
- const Value *Ptr = MI->first;
- if (Ptr == Arg)
- continue; // Handled above.
- PtrState &S = MI->second;
- Sequence Seq = S.GetSeq();
-
- // Check for possible releases.
- if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- S.DecrementRefCount();
- switch (Seq) {
- case S_Use:
- S.SetSeq(S_CanRelease);
- continue;
- case S_CanRelease:
- case S_Release:
- case S_MovableRelease:
- case S_Stop:
- case S_None:
- break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
- }
- }
-
- // Check for possible direct uses.
- switch (Seq) {
+ case S_Stop:
case S_Release:
case S_MovableRelease:
- if (CanUse(Inst, Ptr, PA, Class)) {
- assert(S.RRI.ReverseInsertPts.empty());
- S.RRI.ReverseInsertPts.insert(Inst);
- S.SetSeq(S_Use);
- } else if (Seq == S_Release &&
- (Class == IC_User || Class == IC_CallOrUser)) {
- // Non-movable releases depend on any possible objc pointer use.
- S.SetSeq(S_Stop);
- assert(S.RRI.ReverseInsertPts.empty());
- S.RRI.ReverseInsertPts.insert(Inst);
- }
- break;
- case S_Stop:
- if (CanUse(Inst, Ptr, PA, Class))
- S.SetSeq(S_Use);
- break;
- case S_CanRelease:
- case S_Use:
- case S_None:
- break;
- case S_Retain:
- llvm_unreachable("bottom-up pointer in retain state!");
+ llvm_unreachable("top-down pointer in release state!");
}
}
+
+ // Check for possible direct uses.
+ switch (Seq) {
+ case S_CanRelease:
+ if (CanUse(Inst, Ptr, PA, Class))
+ S.SetSeq(S_Use);
+ break;
+ case S_Retain:
+ case S_Use:
+ case S_None:
+ break;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ llvm_unreachable("top-down pointer in release state!");
+ }
}
return NestingDetected;
@@ -2453,22 +2951,31 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
MyStates.SetAsEntry();
else
do {
- const BasicBlock *Pred = *PI++;
+ unsigned OperandNo = PI.getOperandNo();
+ const Use &Us = PI.getUse();
+ ++PI;
+
+ // Skip invoke unwind edges on invoke instructions marked with
+ // clang.arc.no_objc_arc_exceptions.
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(Us.getUser()))
+ if (OperandNo == II->getNumArgOperands() + 2 &&
+ II->getMetadata(NoObjCARCExceptionsMDKind))
+ continue;
+
+ const BasicBlock *Pred = cast<TerminatorInst>(Us.getUser())->getParent();
if (Pred == BB)
continue;
DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
- assert(I != BBStates.end());
// If we haven't seen this node yet, then we've found a CFG cycle.
// Be optimistic here; it's CheckForCFGHazards' job detect trouble.
- if (!I->second.isVisitedTopDown())
+ if (I == BBStates.end() || !I->second.isVisitedTopDown())
continue;
MyStates.InitFromPred(I->second);
while (PI != PE) {
Pred = *PI++;
if (Pred != BB) {
I = BBStates.find(Pred);
- assert(I != BBStates.end());
- if (I->second.isVisitedTopDown())
+ if (I != BBStates.end() && I->second.isVisitedTopDown())
MyStates.MergePred(I->second);
}
}
@@ -2478,147 +2985,89 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB,
// Visit all the instructions, top-down.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
Instruction *Inst = I;
- InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
+ }
- switch (Class) {
- case IC_RetainBlock:
- case IC_Retain:
- case IC_RetainRV: {
- Arg = GetObjCArg(Inst);
+ CheckForCFGHazards(BB, BBStates, MyStates);
+ return NestingDetected;
+}
- PtrState &S = MyStates.getPtrTopDownState(Arg);
+static void
+ComputePostOrders(Function &F,
+ SmallVectorImpl<BasicBlock *> &PostOrder,
+ SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder) {
+ /// Backedges - Backedges detected in the DFS. These edges will be
+ /// ignored in the reverse-CFG DFS, so that loops with multiple exits will be
+ /// traversed in the desired order.
+ DenseSet<std::pair<BasicBlock *, BasicBlock *> > Backedges;
+
+ /// Visited - The visited set, for doing DFS walks.
+ SmallPtrSet<BasicBlock *, 16> Visited;
- // Don't do retain+release tracking for IC_RetainRV, because it's
- // better to let it remain as the first instruction after a call.
- if (Class != IC_RetainRV) {
- // If we see two retains in a row on the same pointer. If so, make
- // a note, and we'll cicle back to revisit it after we've
- // hopefully eliminated the second retain, which may allow us to
- // eliminate the first retain too.
- // Theoretically we could implement removal of nested retain+release
- // pairs by making PtrState hold a stack of states, but this is
- // simple and avoids adding overhead for the non-nested case.
- if (S.GetSeq() == S_Retain)
- NestingDetected = true;
-
- S.SetSeq(S_Retain);
- S.RRI.clear();
- S.RRI.IsRetainBlock = Class == IC_RetainBlock;
- // Don't check S.IsKnownIncremented() here because it's not
- // sufficient.
- S.RRI.KnownSafe = S.IsKnownNested();
- S.RRI.Calls.insert(Inst);
+ // Do DFS, computing the PostOrder.
+ SmallPtrSet<BasicBlock *, 16> OnStack;
+ SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
+ BasicBlock *EntryBB = &F.getEntryBlock();
+ SuccStack.push_back(std::make_pair(EntryBB, succ_begin(EntryBB)));
+ Visited.insert(EntryBB);
+ OnStack.insert(EntryBB);
+ do {
+ dfs_next_succ:
+ TerminatorInst *TI = cast<TerminatorInst>(&SuccStack.back().first->back());
+ succ_iterator End = succ_iterator(TI, true);
+ while (SuccStack.back().second != End) {
+ BasicBlock *BB = *SuccStack.back().second++;
+ if (Visited.insert(BB)) {
+ SuccStack.push_back(std::make_pair(BB, succ_begin(BB)));
+ OnStack.insert(BB);
+ goto dfs_next_succ;
}
-
- S.SetAtLeastOneRefCount();
- S.IncrementRefCount();
- S.IncrementNestCount();
- continue;
+ if (OnStack.count(BB))
+ Backedges.insert(std::make_pair(SuccStack.back().first, BB));
}
- case IC_Release: {
- Arg = GetObjCArg(Inst);
+ OnStack.erase(SuccStack.back().first);
+ PostOrder.push_back(SuccStack.pop_back_val().first);
+ } while (!SuccStack.empty());
- PtrState &S = MyStates.getPtrTopDownState(Arg);
- S.DecrementRefCount();
- S.DecrementNestCount();
-
- switch (S.GetSeq()) {
- case S_Retain:
- case S_CanRelease:
- S.RRI.ReverseInsertPts.clear();
- // FALL THROUGH
- case S_Use:
- S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
- S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
- Releases[Inst] = S.RRI;
- S.ClearSequenceProgress();
- break;
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
- }
- break;
- }
- case IC_AutoreleasepoolPop:
- // Conservatively, clear MyStates for all known pointers.
- MyStates.clearTopDownPointers();
- continue;
- case IC_AutoreleasepoolPush:
- case IC_None:
- // These are irrelevant.
- continue;
- default:
- break;
- }
+ Visited.clear();
- // Consider any other possible effects of this instruction on each
- // pointer being tracked.
- for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
- ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
- const Value *Ptr = MI->first;
- if (Ptr == Arg)
- continue; // Handled above.
- PtrState &S = MI->second;
- Sequence Seq = S.GetSeq();
-
- // Check for possible releases.
- if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
- S.DecrementRefCount();
- switch (Seq) {
- case S_Retain:
- S.SetSeq(S_CanRelease);
- assert(S.RRI.ReverseInsertPts.empty());
- S.RRI.ReverseInsertPts.insert(Inst);
+ // Compute the exits, which are the starting points for reverse-CFG DFS.
+ // This includes blocks where all the successors are backedges that
+ // we're skipping.
+ SmallVector<BasicBlock *, 4> Exits;
+ for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
+ BasicBlock *BB = I;
+ TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
+ for (succ_iterator SI(TI), SE(TI, true); SI != SE; ++SI)
+ if (!Backedges.count(std::make_pair(BB, *SI)))
+ goto HasNonBackedgeSucc;
+ Exits.push_back(BB);
+ HasNonBackedgeSucc:;
+ }
- // One call can't cause a transition from S_Retain to S_CanRelease
- // and S_CanRelease to S_Use. If we've made the first transition,
- // we're done.
+ // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
+ SmallVector<std::pair<BasicBlock *, pred_iterator>, 16> PredStack;
+ for (SmallVectorImpl<BasicBlock *>::iterator I = Exits.begin(), E = Exits.end();
+ I != E; ++I) {
+ BasicBlock *ExitBB = *I;
+ PredStack.push_back(std::make_pair(ExitBB, pred_begin(ExitBB)));
+ Visited.insert(ExitBB);
+ while (!PredStack.empty()) {
+ reverse_dfs_next_succ:
+ pred_iterator End = pred_end(PredStack.back().first);
+ while (PredStack.back().second != End) {
+ BasicBlock *BB = *PredStack.back().second++;
+ // Skip backedges detected in the forward-CFG DFS.
+ if (Backedges.count(std::make_pair(BB, PredStack.back().first)))
continue;
- case S_Use:
- case S_CanRelease:
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
- }
- }
-
- // Check for possible direct uses.
- switch (Seq) {
- case S_CanRelease:
- if (CanUse(Inst, Ptr, PA, Class))
- S.SetSeq(S_Use);
- break;
- case S_Retain:
- // An objc_retainBlock call may be responsible for copying the block
- // data from the stack to the heap. Model this by moving it straight
- // from S_Retain to S_Use.
- if (S.RRI.IsRetainBlock &&
- CanUse(Inst, Ptr, PA, Class)) {
- assert(S.RRI.ReverseInsertPts.empty());
- S.RRI.ReverseInsertPts.insert(Inst);
- S.SetSeq(S_Use);
+ if (Visited.insert(BB)) {
+ PredStack.push_back(std::make_pair(BB, pred_begin(BB)));
+ goto reverse_dfs_next_succ;
}
- break;
- case S_Use:
- case S_None:
- break;
- case S_Stop:
- case S_Release:
- case S_MovableRelease:
- llvm_unreachable("top-down pointer in release state!");
}
+ ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
}
}
-
- CheckForCFGHazards(BB, BBStates, MyStates);
- return NestingDetected;
}
// Visit - Visit the function both top-down and bottom-up.
@@ -2627,43 +3076,29 @@ ObjCARCOpt::Visit(Function &F,
DenseMap<const BasicBlock *, BBState> &BBStates,
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases) {
- // Use reverse-postorder on the reverse CFG for bottom-up, because we
- // magically know that loops will be well behaved, i.e. they won't repeatedly
- // call retain on a single pointer without doing a release. We can't use
- // ReversePostOrderTraversal here because we want to walk up from each
- // function exit point.
- SmallPtrSet<BasicBlock *, 16> Visited;
- SmallVector<std::pair<BasicBlock *, pred_iterator>, 16> Stack;
- SmallVector<BasicBlock *, 16> Order;
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
- BasicBlock *BB = I;
- if (BB->getTerminator()->getNumSuccessors() == 0)
- Stack.push_back(std::make_pair(BB, pred_begin(BB)));
- }
- while (!Stack.empty()) {
- pred_iterator End = pred_end(Stack.back().first);
- while (Stack.back().second != End) {
- BasicBlock *BB = *Stack.back().second++;
- if (Visited.insert(BB))
- Stack.push_back(std::make_pair(BB, pred_begin(BB)));
- }
- Order.push_back(Stack.pop_back_val().first);
- }
+
+ // Use reverse-postorder traversals, because we magically know that loops
+ // will be well behaved, i.e. they won't repeatedly call retain on a single
+ // pointer without doing a release. We can't use the ReversePostOrderTraversal
+ // class here because we want the reverse-CFG postorder to consider each
+ // function exit point, and we want to ignore selected cycle edges.
+ SmallVector<BasicBlock *, 16> PostOrder;
+ SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
+ ComputePostOrders(F, PostOrder, ReverseCFGPostOrder);
+
+ // Use reverse-postorder on the reverse CFG for bottom-up.
bool BottomUpNestingDetected = false;
for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
- Order.rbegin(), E = Order.rend(); I != E; ++I) {
- BasicBlock *BB = *I;
- BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
- }
+ ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
+ I != E; ++I)
+ BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
- // Use regular reverse-postorder for top-down.
+ // Use reverse-postorder for top-down.
bool TopDownNestingDetected = false;
- typedef ReversePostOrderTraversal<Function *> RPOTType;
- RPOTType RPOT(&F);
- for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
- BasicBlock *BB = *I;
- TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
- }
+ for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
+ PostOrder.rbegin(), E = PostOrder.rend();
+ I != E; ++I)
+ TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
return TopDownNestingDetected && BottomUpNestingDetected;
}
@@ -2691,40 +3126,26 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
getRetainBlockCallee(M) : getRetainCallee(M),
MyArg, "", InsertPt);
Call->setDoesNotThrow();
- if (!RetainsToMove.IsRetainBlock)
+ if (RetainsToMove.IsRetainBlock)
+ Call->setMetadata(CopyOnEscapeMDKind,
+ MDNode::get(M->getContext(), ArrayRef<Value *>()));
+ else
Call->setTailCall();
}
for (SmallPtrSet<Instruction *, 2>::const_iterator
PI = RetainsToMove.ReverseInsertPts.begin(),
PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
- Instruction *LastUse = *PI;
- Instruction *InsertPts[] = { 0, 0, 0 };
- if (InvokeInst *II = dyn_cast<InvokeInst>(LastUse)) {
- // We can't insert code immediately after an invoke instruction, so
- // insert code at the beginning of both successor blocks instead.
- // The invoke's return value isn't available in the unwind block,
- // but our releases will never depend on it, because they must be
- // paired with retains from before the invoke.
- InsertPts[0] = II->getNormalDest()->getFirstInsertionPt();
- InsertPts[1] = II->getUnwindDest()->getFirstInsertionPt();
- } else {
- // Insert code immediately after the last use.
- InsertPts[0] = llvm::next(BasicBlock::iterator(LastUse));
- }
-
- for (Instruction **I = InsertPts; *I; ++I) {
- Instruction *InsertPt = *I;
- Value *MyArg = ArgTy == ParamTy ? Arg :
- new BitCastInst(Arg, ParamTy, "", InsertPt);
- CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
- "", InsertPt);
- // Attach a clang.imprecise_release metadata tag, if appropriate.
- if (MDNode *M = ReleasesToMove.ReleaseMetadata)
- Call->setMetadata(ImpreciseReleaseMDKind, M);
- Call->setDoesNotThrow();
- if (ReleasesToMove.IsTailCallRelease)
- Call->setTailCall();
- }
+ Instruction *InsertPt = *PI;
+ Value *MyArg = ArgTy == ParamTy ? Arg :
+ new BitCastInst(Arg, ParamTy, "", InsertPt);
+ CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
+ "", InsertPt);
+ // Attach a clang.imprecise_release metadata tag, if appropriate.
+ if (MDNode *M = ReleasesToMove.ReleaseMetadata)
+ Call->setMetadata(ImpreciseReleaseMDKind, M);
+ Call->setDoesNotThrow();
+ if (ReleasesToMove.IsTailCallRelease)
+ Call->setTailCall();
}
// Delete the original retain and release calls.
@@ -2765,17 +3186,11 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
Instruction *Retain = cast<Instruction>(V);
Value *Arg = GetObjCArg(Retain);
- // If the object being released is in static storage, we know it's
+ // If the object being released is in static or stack storage, we know it's
// not being managed by ObjC reference counting, so we can delete pairs
// regardless of what possible decrements or uses lie between them.
- bool KnownSafe = isa<Constant>(Arg);
+ bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
- // Same for stack storage, unless this is an objc_retainBlock call,
- // which is responsible for copying the block data from the stack to
- // the heap.
- if (!I->second.IsRetainBlock && isa<AllocaInst>(Arg))
- KnownSafe = true;
-
// A constant pointer can't be pointing to an object on the heap. It may
// be reference-counted, but it won't be deleted.
if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
@@ -3091,7 +3506,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
UE = Alloca->use_end(); UI != UE; ) {
CallInst *UserInst = cast<CallInst>(*UI++);
if (!UserInst->use_empty())
- UserInst->replaceAllUsesWith(UserInst->getOperand(1));
+ UserInst->replaceAllUsesWith(UserInst->getArgOperand(0));
UserInst->eraseFromParent();
}
Alloca->eraseFromParent();
@@ -3243,6 +3658,10 @@ bool ObjCARCOpt::doInitialization(Module &M) {
// Identify the imprecise release metadata kind.
ImpreciseReleaseMDKind =
M.getContext().getMDKindID("clang.imprecise_release");
+ CopyOnEscapeMDKind =
+ M.getContext().getMDKindID("clang.arc.copy_on_escape");
+ NoObjCARCExceptionsMDKind =
+ M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
// Intuitively, objc_retain and others are nocapture, however in practice
// they are not, because they return their argument value. And objc_release
@@ -3344,6 +3763,11 @@ namespace {
/// RetainRV calls to make the optimization work on targets which need it.
const MDString *RetainRVMarker;
+ /// StoreStrongCalls - The set of inserted objc_storeStrong calls. If
+ /// at the end of walking the function we have found no alloca
+ /// instructions, these calls can be marked "tail".
+ DenseSet<CallInst *> StoreStrongCalls;
+
Constant *getStoreStrongCallee(Module *M);
Constant *getRetainAutoreleaseCallee(Module *M);
Constant *getRetainAutoreleaseRVCallee(Module *M);
@@ -3547,6 +3971,11 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
StoreStrong->setDoesNotThrow();
StoreStrong->setDebugLoc(Store->getDebugLoc());
+ // We can't set the tail flag yet, because we haven't yet determined
+ // whether there are any escaping allocas. Remember this call, so that
+ // we can set the tail flag once we know it's safe.
+ StoreStrongCalls.insert(StoreStrong);
+
if (&*Iter == Store) ++Iter;
Store->eraseFromParent();
Release->eraseFromParent();
@@ -3593,6 +4022,13 @@ bool ObjCARCContract::runOnFunction(Function &F) {
PA.setAA(&getAnalysis<AliasAnalysis>());
+ // Track whether it's ok to mark objc_storeStrong calls with the "tail"
+ // keyword. Be conservative if the function has variadic arguments.
+ // It seems that functions which "return twice" are also unsafe for the
+ // "tail" argument, because they are setjmp, which could need to
+ // return to an earlier stack state.
+ bool TailOkForStoreStrongs = !F.isVarArg() && !F.callsFunctionThatReturnsTwice();
+
// For ObjC library calls which return their argument, replace uses of the
// argument with uses of the call return value, if it dominates the use. This
// reduces register pressure.
@@ -3649,6 +4085,13 @@ bool ObjCARCContract::runOnFunction(Function &F) {
case IC_Release:
ContractRelease(Inst, I);
continue;
+ case IC_User:
+ // Be conservative if the function has any alloca instructions.
+ // Technically we only care about escaping alloca instructions,
+ // but this is sufficient to handle some interesting cases.
+ if (isa<AllocaInst>(Inst))
+ TailOkForStoreStrongs = false;
+ continue;
default:
continue;
}
@@ -3666,36 +4109,37 @@ bool ObjCARCContract::runOnFunction(Function &F) {
Use &U = UI.getUse();
unsigned OperandNo = UI.getOperandNo();
++UI; // Increment UI now, because we may unlink its element.
- if (Instruction *UserInst = dyn_cast<Instruction>(U.getUser()))
- if (Inst != UserInst && DT->dominates(Inst, UserInst)) {
- Changed = true;
- Instruction *Replacement = Inst;
- Type *UseTy = U.get()->getType();
- if (PHINode *PHI = dyn_cast<PHINode>(UserInst)) {
- // For PHI nodes, insert the bitcast in the predecessor block.
- unsigned ValNo =
- PHINode::getIncomingValueNumForOperand(OperandNo);
- BasicBlock *BB =
- PHI->getIncomingBlock(ValNo);
- if (Replacement->getType() != UseTy)
- Replacement = new BitCastInst(Replacement, UseTy, "",
- &BB->back());
- for (unsigned i = 0, e = PHI->getNumIncomingValues();
- i != e; ++i)
- if (PHI->getIncomingBlock(i) == BB) {
- // Keep the UI iterator valid.
- if (&PHI->getOperandUse(
- PHINode::getOperandNumForIncomingValue(i)) ==
- &UI.getUse())
- ++UI;
- PHI->setIncomingValue(i, Replacement);
- }
- } else {
- if (Replacement->getType() != UseTy)
- Replacement = new BitCastInst(Replacement, UseTy, "", UserInst);
- U.set(Replacement);
- }
+ if (DT->isReachableFromEntry(U) &&
+ DT->dominates(Inst, U)) {
+ Changed = true;
+ Instruction *Replacement = Inst;
+ Type *UseTy = U.get()->getType();
+ if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
+ // For PHI nodes, insert the bitcast in the predecessor block.
+ unsigned ValNo =
+ PHINode::getIncomingValueNumForOperand(OperandNo);
+ BasicBlock *BB =
+ PHI->getIncomingBlock(ValNo);
+ if (Replacement->getType() != UseTy)
+ Replacement = new BitCastInst(Replacement, UseTy, "",
+ &BB->back());
+ for (unsigned i = 0, e = PHI->getNumIncomingValues();
+ i != e; ++i)
+ if (PHI->getIncomingBlock(i) == BB) {
+ // Keep the UI iterator valid.
+ if (&PHI->getOperandUse(
+ PHINode::getOperandNumForIncomingValue(i)) ==
+ &UI.getUse())
+ ++UI;
+ PHI->setIncomingValue(i, Replacement);
+ }
+ } else {
+ if (Replacement->getType() != UseTy)
+ Replacement = new BitCastInst(Replacement, UseTy, "",
+ cast<Instruction>(U.getUser()));
+ U.set(Replacement);
}
+ }
}
// If Arg is a no-op casted pointer, strip one level of casts and
@@ -3713,5 +4157,13 @@ bool ObjCARCContract::runOnFunction(Function &F) {
}
}
+ // If this function has no escaping allocas or suspicious vararg usage,
+ // objc_storeStrong calls can be marked with the "tail" keyword.
+ if (TailOkForStoreStrongs)
+ for (DenseSet<CallInst *>::iterator I = StoreStrongCalls.begin(),
+ E = StoreStrongCalls.end(); I != E; ++I)
+ (*I)->setTailCall();
+ StoreStrongCalls.clear();
+
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 8f98a5b..cb408a1 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -74,7 +74,7 @@ static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
namespace {
class Reassociate : public FunctionPass {
DenseMap<BasicBlock*, unsigned> RankMap;
- DenseMap<AssertingVH<>, unsigned> ValueRankMap;
+ DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
SmallVector<WeakVH, 8> RedoInsts;
SmallVector<WeakVH, 8> DeadInsts;
bool MadeChange;
@@ -210,7 +210,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
/// LowerNegateToMultiply - Replace 0-X with X*-1.
///
static Instruction *LowerNegateToMultiply(Instruction *Neg,
- DenseMap<AssertingVH<>, unsigned> &ValueRankMap) {
+ DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
Constant *Cst = Constant::getAllOnesValue(Neg->getType());
Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
@@ -492,7 +492,7 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
/// only used by an add, transform this into (X+(0-Y)) to promote better
/// reassociation.
static Instruction *BreakUpSubtract(Instruction *Sub,
- DenseMap<AssertingVH<>, unsigned> &ValueRankMap) {
+ DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
// Convert a subtract into an add and a neg instruction. This allows sub
// instructions to be commuted with other add instructions.
//
@@ -517,8 +517,8 @@ static Instruction *BreakUpSubtract(Instruction *Sub,
/// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used
/// by one, change this into a multiply by a constant to assist with further
/// reassociation.
-static Instruction *ConvertShiftToMul(Instruction *Shl,
- DenseMap<AssertingVH<>, unsigned> &ValueRankMap) {
+static Instruction *ConvertShiftToMul(Instruction *Shl,
+ DenseMap<AssertingVH<Value>, unsigned> &ValueRankMap) {
// If an operand of this shift is a reassociable multiply, or if the shift
// is used by a reassociable multiply or add, turn into a multiply.
if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) ||
diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
index 196a847..16b64a5 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -25,9 +25,9 @@
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -39,9 +39,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
#include <algorithm>
-#include <map>
using namespace llvm;
STATISTIC(NumInstRemoved, "Number of instructions removed");
@@ -59,7 +57,7 @@ class LatticeVal {
enum LatticeValueTy {
/// undefined - This LLVM Value has no known value yet.
undefined,
-
+
/// constant - This LLVM Value has a specific constant value.
constant,
@@ -68,7 +66,7 @@ class LatticeVal {
/// with another (different) constant, it goes to overdefined, instead of
/// asserting.
forcedconstant,
-
+
/// overdefined - This instruction is not known to be constant, and we know
/// it has a value.
overdefined
@@ -77,30 +75,30 @@ class LatticeVal {
/// Val: This stores the current lattice value along with the Constant* for
/// the constant if this is a 'constant' or 'forcedconstant' value.
PointerIntPair<Constant *, 2, LatticeValueTy> Val;
-
+
LatticeValueTy getLatticeValue() const {
return Val.getInt();
}
-
+
public:
LatticeVal() : Val(0, undefined) {}
-
+
bool isUndefined() const { return getLatticeValue() == undefined; }
bool isConstant() const {
return getLatticeValue() == constant || getLatticeValue() == forcedconstant;
}
bool isOverdefined() const { return getLatticeValue() == overdefined; }
-
+
Constant *getConstant() const {
assert(isConstant() && "Cannot get the constant of a non-constant!");
return Val.getPointer();
}
-
+
/// markOverdefined - Return true if this is a change in status.
bool markOverdefined() {
if (isOverdefined())
return false;
-
+
Val.setInt(overdefined);
return true;
}
@@ -111,17 +109,17 @@ public:
assert(getConstant() == V && "Marking constant with different value");
return false;
}
-
+
if (isUndefined()) {
Val.setInt(constant);
assert(V && "Marking constant with NULL");
Val.setPointer(V);
} else {
- assert(getLatticeValue() == forcedconstant &&
+ assert(getLatticeValue() == forcedconstant &&
"Cannot move from overdefined to constant!");
// Stay at forcedconstant if the constant is the same.
if (V == getConstant()) return false;
-
+
// Otherwise, we go to overdefined. Assumptions made based on the
// forced value are possibly wrong. Assuming this is another constant
// could expose a contradiction.
@@ -137,7 +135,7 @@ public:
return dyn_cast<ConstantInt>(getConstant());
return 0;
}
-
+
void markForcedConstant(Constant *V) {
assert(isUndefined() && "Can't force a defined value!");
Val.setInt(forcedconstant);
@@ -156,6 +154,7 @@ namespace {
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
const TargetData *TD;
+ const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -163,7 +162,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
/// StructType, for example for formal arguments, calls, insertelement, etc.
///
DenseMap<std::pair<Value*, unsigned>, LatticeVal> StructValueState;
-
+
/// GlobalValue - If we are tracking any values for the contents of a global
/// variable, we keep a mapping from the constant accessor to the element of
/// the global, to the currently known value. If the value becomes
@@ -178,7 +177,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
/// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
/// that return multiple values.
DenseMap<std::pair<Function*, unsigned>, LatticeVal> TrackedMultipleRetVals;
-
+
/// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
/// represented here for efficient lookup.
SmallPtrSet<Function*, 16> MRVFunctionsTracked;
@@ -187,7 +186,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
/// arguments we make optimistic assumptions about and try to prove as
/// constants.
SmallPtrSet<Function*, 16> TrackingIncomingArguments;
-
+
/// The reason for two worklists is that overdefined is the lowest state
/// on the lattice, and moving things to overdefined as fast as possible
/// makes SCCP converge much faster.
@@ -201,16 +200,13 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
SmallVector<BasicBlock*, 64> BBWorkList; // The BasicBlock work list
- /// UsersOfOverdefinedPHIs - Keep track of any users of PHI nodes that are not
- /// overdefined, despite the fact that the PHI node is overdefined.
- std::multimap<PHINode*, Instruction*> UsersOfOverdefinedPHIs;
-
/// KnownFeasibleEdges - Entries in this set are edges which have already had
/// PHI nodes retriggered.
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const TargetData *td) : TD(td) {}
+ SCCPSolver(const TargetData *td, const TargetLibraryInfo *tli)
+ : TD(td), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
@@ -253,7 +249,7 @@ public:
void AddArgumentTrackedFunction(Function *F) {
TrackingIncomingArguments.insert(F);
}
-
+
/// Solve - Solve for constants and executable blocks.
///
void Solve();
@@ -274,9 +270,9 @@ public:
assert(I != ValueState.end() && "V is not in valuemap!");
return I->second;
}
-
+
/*LatticeVal getStructLatticeValueFor(Value *V, unsigned i) const {
- DenseMap<std::pair<Value*, unsigned>, LatticeVal>::const_iterator I =
+ DenseMap<std::pair<Value*, unsigned>, LatticeVal>::const_iterator I =
StructValueState.find(std::make_pair(V, i));
assert(I != StructValueState.end() && "V is not in valuemap!");
return I->second;
@@ -308,7 +304,7 @@ public:
else
markOverdefined(V);
}
-
+
private:
// markConstant - Make a value be marked as "constant". If the value
// is not already a constant, add it to the instruction work list so that
@@ -322,7 +318,7 @@ private:
else
InstWorkList.push_back(V);
}
-
+
void markConstant(Value *V, Constant *C) {
assert(!V->getType()->isStructTy() && "Should use other method");
markConstant(ValueState[V], V, C);
@@ -338,14 +334,14 @@ private:
else
InstWorkList.push_back(V);
}
-
-
+
+
// markOverdefined - Make a value be marked as "overdefined". If the
// value is not already overdefined, add it to the overdefined instruction
// work list so that the users of the instruction are updated later.
void markOverdefined(LatticeVal &IV, Value *V) {
if (!IV.markOverdefined()) return;
-
+
DEBUG(dbgs() << "markOverdefined: ";
if (Function *F = dyn_cast<Function>(V))
dbgs() << "Function '" << F->getName() << "'\n";
@@ -365,7 +361,7 @@ private:
else if (IV.getConstant() != MergeWithV.getConstant())
markOverdefined(IV, V);
}
-
+
void mergeInValue(Value *V, LatticeVal MergeWithV) {
assert(!V->getType()->isStructTy() && "Should use other method");
mergeInValue(ValueState[V], V, MergeWithV);
@@ -390,7 +386,7 @@ private:
if (!isa<UndefValue>(V))
LV.markConstant(C); // Constants are constant
}
-
+
// All others are underdefined by default.
return LV;
}
@@ -412,21 +408,20 @@ private:
return LV; // Common case, already in the map.
if (Constant *C = dyn_cast<Constant>(V)) {
- if (isa<UndefValue>(C))
- ; // Undef values remain undefined.
- else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C))
- LV.markConstant(CS->getOperand(i)); // Constants are constant.
- else if (isa<ConstantAggregateZero>(C)) {
- Type *FieldTy = cast<StructType>(V->getType())->getElementType(i);
- LV.markConstant(Constant::getNullValue(FieldTy));
- } else
+ Constant *Elt = C->getAggregateElement(i);
+
+ if (Elt == 0)
LV.markOverdefined(); // Unknown sort of constant.
+ else if (isa<UndefValue>(Elt))
+ ; // Undef values remain undefined.
+ else
+ LV.markConstant(Elt); // Constants are constant.
}
-
+
// All others are underdefined by default.
return LV;
}
-
+
/// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
/// work list if it is not already executable.
@@ -466,33 +461,6 @@ private:
if (BBExecutable.count(I->getParent())) // Inst is executable?
visit(*I);
}
-
- /// RemoveFromOverdefinedPHIs - If I has any entries in the
- /// UsersOfOverdefinedPHIs map for PN, remove them now.
- void RemoveFromOverdefinedPHIs(Instruction *I, PHINode *PN) {
- if (UsersOfOverdefinedPHIs.empty()) return;
- typedef std::multimap<PHINode*, Instruction*>::iterator ItTy;
- std::pair<ItTy, ItTy> Range = UsersOfOverdefinedPHIs.equal_range(PN);
- for (ItTy It = Range.first, E = Range.second; It != E;) {
- if (It->second == I)
- UsersOfOverdefinedPHIs.erase(It++);
- else
- ++It;
- }
- }
-
- /// InsertInOverdefinedPHIs - Insert an entry in the UsersOfOverdefinedPHIS
- /// map for I and PN, but if one is there already, do not create another.
- /// (Duplicate entries do not break anything directly, but can lead to
- /// exponential growth of the table in rare cases.)
- void InsertInOverdefinedPHIs(Instruction *I, PHINode *PN) {
- typedef std::multimap<PHINode*, Instruction*>::iterator ItTy;
- std::pair<ItTy, ItTy> Range = UsersOfOverdefinedPHIs.equal_range(PN);
- for (ItTy J = Range.first, E = Range.second; J != E; ++J)
- if (J->second == I)
- return;
- UsersOfOverdefinedPHIs.insert(std::make_pair(PN, I));
- }
private:
friend class InstVisitor<SCCPSolver>;
@@ -559,7 +527,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs[0] = true;
return;
}
-
+
LatticeVal BCValue = getValueState(BI->getCondition());
ConstantInt *CI = BCValue.getConstantInt();
if (CI == 0) {
@@ -569,44 +537,44 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs[0] = Succs[1] = true;
return;
}
-
+
// Constant condition variables mean the branch can only go a single way.
Succs[CI->isZero()] = true;
return;
}
-
+
if (isa<InvokeInst>(TI)) {
// Invoke instructions successors are always executable.
Succs[0] = Succs[1] = true;
return;
}
-
+
if (SwitchInst *SI = dyn_cast<SwitchInst>(&TI)) {
- if (TI.getNumSuccessors() < 2) {
+ if (!SI->getNumCases()) {
Succs[0] = true;
return;
}
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
-
+
if (CI == 0) { // Overdefined or undefined condition?
// All destinations are executable!
if (!SCValue.isUndefined())
Succs.assign(TI.getNumSuccessors(), true);
return;
}
-
- Succs[SI->findCaseValue(CI)] = true;
+
+ Succs[SI->findCaseValue(CI).getSuccessorIndex()] = true;
return;
}
-
+
// TODO: This could be improved if the operand is a [cast of a] BlockAddress.
if (isa<IndirectBrInst>(&TI)) {
// Just mark all destinations executable!
Succs.assign(TI.getNumSuccessors(), true);
return;
}
-
+
#ifndef NDEBUG
dbgs() << "Unknown terminator instruction: " << TI << '\n';
#endif
@@ -628,7 +596,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
if (BI->isUnconditional())
return true;
-
+
LatticeVal BCValue = getValueState(BI->getCondition());
// Overdefined condition variables mean the branch could go either way,
@@ -636,40 +604,33 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
ConstantInt *CI = BCValue.getConstantInt();
if (CI == 0)
return !BCValue.isUndefined();
-
+
// Constant condition variables mean the branch can only go a single way.
return BI->getSuccessor(CI->isZero()) == To;
}
-
+
// Invoke instructions successors are always executable.
if (isa<InvokeInst>(TI))
return true;
-
+
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (SI->getNumSuccessors() < 2)
+ if (SI->getNumCases() < 1)
return true;
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
-
+
if (CI == 0)
return !SCValue.isUndefined();
- // Make sure to skip the "default value" which isn't a value
- for (unsigned i = 1, E = SI->getNumSuccessors(); i != E; ++i)
- if (SI->getSuccessorValue(i) == CI) // Found the taken branch.
- return SI->getSuccessor(i) == To;
-
- // If the constant value is not equal to any of the branches, we must
- // execute default branch.
- return SI->getDefaultDest() == To;
+ return SI->findCaseValue(CI).getCaseSuccessor() == To;
}
-
+
// Just mark all destinations executable!
// TODO: This could be improved if the operand is a [cast of a] BlockAddress.
if (isa<IndirectBrInst>(TI))
return true;
-
+
#ifndef NDEBUG
dbgs() << "Unknown terminator instruction: " << *TI << '\n';
#endif
@@ -699,30 +660,15 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
// TODO: We could do a lot better than this if code actually uses this.
if (PN.getType()->isStructTy())
return markAnythingOverdefined(&PN);
-
- if (getValueState(&PN).isOverdefined()) {
- // There may be instructions using this PHI node that are not overdefined
- // themselves. If so, make sure that they know that the PHI node operand
- // changed.
- typedef std::multimap<PHINode*, Instruction*>::iterator ItTy;
- std::pair<ItTy, ItTy> Range = UsersOfOverdefinedPHIs.equal_range(&PN);
-
- if (Range.first == Range.second)
- return;
-
- SmallVector<Instruction*, 16> Users;
- for (ItTy I = Range.first, E = Range.second; I != E; ++I)
- Users.push_back(I->second);
- while (!Users.empty())
- visit(Users.pop_back_val());
+
+ if (getValueState(&PN).isOverdefined())
return; // Quick exit
- }
// Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
// and slow us down a lot. Just mark them overdefined.
if (PN.getNumIncomingValues() > 64)
return markOverdefined(&PN);
-
+
// Look at all of the executable operands of the PHI node. If any of them
// are overdefined, the PHI becomes overdefined as well. If they are all
// constant, and they agree with each other, the PHI becomes the identical
@@ -736,7 +682,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
continue;
-
+
if (IV.isOverdefined()) // PHI node becomes overdefined!
return markOverdefined(&PN);
@@ -744,11 +690,11 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
OperandVal = IV.getConstant();
continue;
}
-
+
// There is already a reachable operand. If we conflict with it,
// then the PHI node becomes overdefined. If we agree with it, we
// can continue on.
-
+
// Check to see if there are two different constants merging, if so, the PHI
// node is overdefined.
if (IV.getConstant() != OperandVal)
@@ -772,7 +718,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) {
Function *F = I.getParent()->getParent();
Value *ResultOp = I.getOperand(0);
-
+
// If we are tracking the return value of this function, merge it in.
if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
DenseMap<Function*, LatticeVal>::iterator TFRVI =
@@ -782,7 +728,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) {
return;
}
}
-
+
// Handle functions that return multiple values.
if (!TrackedMultipleRetVals.empty()) {
if (StructType *STy = dyn_cast<StructType>(ResultOp->getType()))
@@ -790,7 +736,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
getStructValueState(ResultOp, i));
-
+
}
}
@@ -811,7 +757,7 @@ void SCCPSolver::visitCastInst(CastInst &I) {
if (OpSt.isOverdefined()) // Inherit overdefinedness of operand
markOverdefined(&I);
else if (OpSt.isConstant()) // Propagate constant value
- markConstant(&I, ConstantExpr::getCast(I.getOpcode(),
+ markConstant(&I, ConstantExpr::getCast(I.getOpcode(),
OpSt.getConstant(), I.getType()));
}
@@ -821,7 +767,7 @@ void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
// structs in structs.
if (EVI.getType()->isStructTy())
return markAnythingOverdefined(&EVI);
-
+
// If this is extracting from more than one level of struct, we don't know.
if (EVI.getNumIndices() != 1)
return markOverdefined(&EVI);
@@ -841,15 +787,15 @@ void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
StructType *STy = dyn_cast<StructType>(IVI.getType());
if (STy == 0)
return markOverdefined(&IVI);
-
+
// If this has more than one index, we can't handle it, drive all results to
// undef.
if (IVI.getNumIndices() != 1)
return markAnythingOverdefined(&IVI);
-
+
Value *Aggr = IVI.getAggregateOperand();
unsigned Idx = *IVI.idx_begin();
-
+
// Compute the result based on what we're inserting.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
// This passes through all values that aren't the inserted element.
@@ -858,7 +804,7 @@ void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
continue;
}
-
+
Value *Val = IVI.getInsertedValueOperand();
if (Val->getType()->isStructTy())
// We don't track structs in structs.
@@ -875,25 +821,25 @@ void SCCPSolver::visitSelectInst(SelectInst &I) {
// TODO: We could do a lot better than this if code actually uses this.
if (I.getType()->isStructTy())
return markAnythingOverdefined(&I);
-
+
LatticeVal CondValue = getValueState(I.getCondition());
if (CondValue.isUndefined())
return;
-
+
if (ConstantInt *CondCB = CondValue.getConstantInt()) {
Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
mergeInValue(&I, getValueState(OpVal));
return;
}
-
+
// Otherwise, the condition is overdefined or a constant we can't evaluate.
// See if we can produce something better than overdefined based on the T/F
// value.
LatticeVal TVal = getValueState(I.getTrueValue());
LatticeVal FVal = getValueState(I.getFalseValue());
-
+
// select ?, C, C -> C.
- if (TVal.isConstant() && FVal.isConstant() &&
+ if (TVal.isConstant() && FVal.isConstant() &&
TVal.getConstant() == FVal.getConstant())
return markConstant(&I, FVal.getConstant());
@@ -908,7 +854,7 @@ void SCCPSolver::visitSelectInst(SelectInst &I) {
void SCCPSolver::visitBinaryOperator(Instruction &I) {
LatticeVal V1State = getValueState(I.getOperand(0));
LatticeVal V2State = getValueState(I.getOperand(1));
-
+
LatticeVal &IV = ValueState[&I];
if (IV.isOverdefined()) return;
@@ -916,14 +862,14 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
return markConstant(IV, &I,
ConstantExpr::get(I.getOpcode(), V1State.getConstant(),
V2State.getConstant()));
-
+
// If something is undef, wait for it to resolve.
if (!V1State.isOverdefined() && !V2State.isOverdefined())
return;
-
+
// Otherwise, one of our operands is overdefined. Try to produce something
// better than overdefined with some tricks.
-
+
// If this is an AND or OR with 0 or -1, it doesn't matter that the other
// operand is overdefined.
if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) {
@@ -945,7 +891,7 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
Constant::getAllOnesValue(I.getType()));
return;
}
-
+
if (I.getOpcode() == Instruction::And) {
// X and 0 = 0
if (NonOverdefVal->getConstant()->isNullValue())
@@ -959,64 +905,6 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
}
- // If both operands are PHI nodes, it is possible that this instruction has
- // a constant value, despite the fact that the PHI node doesn't. Check for
- // this condition now.
- if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
- if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
- if (PN1->getParent() == PN2->getParent()) {
- // Since the two PHI nodes are in the same basic block, they must have
- // entries for the same predecessors. Walk the predecessor list, and
- // if all of the incoming values are constants, and the result of
- // evaluating this expression with all incoming value pairs is the
- // same, then this expression is a constant even though the PHI node
- // is not a constant!
- LatticeVal Result;
- for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
- LatticeVal In1 = getValueState(PN1->getIncomingValue(i));
- BasicBlock *InBlock = PN1->getIncomingBlock(i);
- LatticeVal In2 =getValueState(PN2->getIncomingValueForBlock(InBlock));
-
- if (In1.isOverdefined() || In2.isOverdefined()) {
- Result.markOverdefined();
- break; // Cannot fold this operation over the PHI nodes!
- }
-
- if (In1.isConstant() && In2.isConstant()) {
- Constant *V = ConstantExpr::get(I.getOpcode(), In1.getConstant(),
- In2.getConstant());
- if (Result.isUndefined())
- Result.markConstant(V);
- else if (Result.isConstant() && Result.getConstant() != V) {
- Result.markOverdefined();
- break;
- }
- }
- }
-
- // If we found a constant value here, then we know the instruction is
- // constant despite the fact that the PHI nodes are overdefined.
- if (Result.isConstant()) {
- markConstant(IV, &I, Result.getConstant());
- // Remember that this instruction is virtually using the PHI node
- // operands.
- InsertInOverdefinedPHIs(&I, PN1);
- InsertInOverdefinedPHIs(&I, PN2);
- return;
- }
-
- if (Result.isUndefined())
- return;
-
- // Okay, this really is overdefined now. Since we might have
- // speculatively thought that this was not overdefined before, and
- // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
- // make sure to clean out any entries that we put there, for
- // efficiency.
- RemoveFromOverdefinedPHIs(&I, PN1);
- RemoveFromOverdefinedPHIs(&I, PN2);
- }
-
markOverdefined(&I);
}
@@ -1029,75 +917,13 @@ void SCCPSolver::visitCmpInst(CmpInst &I) {
if (IV.isOverdefined()) return;
if (V1State.isConstant() && V2State.isConstant())
- return markConstant(IV, &I, ConstantExpr::getCompare(I.getPredicate(),
- V1State.getConstant(),
+ return markConstant(IV, &I, ConstantExpr::getCompare(I.getPredicate(),
+ V1State.getConstant(),
V2State.getConstant()));
-
+
// If operands are still undefined, wait for it to resolve.
if (!V1State.isOverdefined() && !V2State.isOverdefined())
return;
-
- // If something is overdefined, use some tricks to avoid ending up and over
- // defined if we can.
-
- // If both operands are PHI nodes, it is possible that this instruction has
- // a constant value, despite the fact that the PHI node doesn't. Check for
- // this condition now.
- if (PHINode *PN1 = dyn_cast<PHINode>(I.getOperand(0)))
- if (PHINode *PN2 = dyn_cast<PHINode>(I.getOperand(1)))
- if (PN1->getParent() == PN2->getParent()) {
- // Since the two PHI nodes are in the same basic block, they must have
- // entries for the same predecessors. Walk the predecessor list, and
- // if all of the incoming values are constants, and the result of
- // evaluating this expression with all incoming value pairs is the
- // same, then this expression is a constant even though the PHI node
- // is not a constant!
- LatticeVal Result;
- for (unsigned i = 0, e = PN1->getNumIncomingValues(); i != e; ++i) {
- LatticeVal In1 = getValueState(PN1->getIncomingValue(i));
- BasicBlock *InBlock = PN1->getIncomingBlock(i);
- LatticeVal In2 =getValueState(PN2->getIncomingValueForBlock(InBlock));
-
- if (In1.isOverdefined() || In2.isOverdefined()) {
- Result.markOverdefined();
- break; // Cannot fold this operation over the PHI nodes!
- }
-
- if (In1.isConstant() && In2.isConstant()) {
- Constant *V = ConstantExpr::getCompare(I.getPredicate(),
- In1.getConstant(),
- In2.getConstant());
- if (Result.isUndefined())
- Result.markConstant(V);
- else if (Result.isConstant() && Result.getConstant() != V) {
- Result.markOverdefined();
- break;
- }
- }
- }
-
- // If we found a constant value here, then we know the instruction is
- // constant despite the fact that the PHI nodes are overdefined.
- if (Result.isConstant()) {
- markConstant(&I, Result.getConstant());
- // Remember that this instruction is virtually using the PHI node
- // operands.
- InsertInOverdefinedPHIs(&I, PN1);
- InsertInOverdefinedPHIs(&I, PN2);
- return;
- }
-
- if (Result.isUndefined())
- return;
-
- // Okay, this really is overdefined now. Since we might have
- // speculatively thought that this was not overdefined before, and
- // added ourselves to the UsersOfOverdefinedPHIs list for the PHIs,
- // make sure to clean out any entries that we put there, for
- // efficiency.
- RemoveFromOverdefinedPHIs(&I, PN1);
- RemoveFromOverdefinedPHIs(&I, PN2);
- }
markOverdefined(&I);
}
@@ -1135,7 +961,7 @@ void SCCPSolver::visitInsertElementInst(InsertElementInst &I) {
EltState.getConstant(),
IdxState.getConstant()));
else if (ValState.isUndefined() && EltState.isConstant() &&
- IdxState.isConstant())
+ IdxState.isConstant())
markConstant(&I,ConstantExpr::getInsertElement(UndefValue::get(I.getType()),
EltState.getConstant(),
IdxState.getConstant()));
@@ -1153,17 +979,17 @@ void SCCPSolver::visitShuffleVectorInst(ShuffleVectorInst &I) {
if (MaskState.isUndefined() ||
(V1State.isUndefined() && V2State.isUndefined()))
return; // Undefined output if mask or both inputs undefined.
-
+
if (V1State.isOverdefined() || V2State.isOverdefined() ||
MaskState.isOverdefined()) {
markOverdefined(&I);
} else {
// A mix of constant/undef inputs.
- Constant *V1 = V1State.isConstant() ?
+ Constant *V1 = V1State.isConstant() ?
V1State.getConstant() : UndefValue::get(I.getType());
- Constant *V2 = V2State.isConstant() ?
+ Constant *V2 = V2State.isConstant() ?
V2State.getConstant() : UndefValue::get(I.getType());
- Constant *Mask = MaskState.isConstant() ?
+ Constant *Mask = MaskState.isConstant() ?
MaskState.getConstant() : UndefValue::get(I.getOperand(2)->getType());
markConstant(&I, ConstantExpr::getShuffleVector(V1, V2, Mask));
}
@@ -1183,7 +1009,7 @@ void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) {
LatticeVal State = getValueState(I.getOperand(i));
if (State.isUndefined())
return; // Operands are not resolved yet.
-
+
if (State.isOverdefined())
return markOverdefined(&I);
@@ -1200,10 +1026,10 @@ void SCCPSolver::visitStoreInst(StoreInst &SI) {
// If this store is of a struct, ignore it.
if (SI.getOperand(0)->getType()->isStructTy())
return;
-
+
if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
return;
-
+
GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
DenseMap<GlobalVariable*, LatticeVal>::iterator I = TrackedGlobals.find(GV);
if (I == TrackedGlobals.end() || I->second.isOverdefined()) return;
@@ -1221,22 +1047,22 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
// If this load is of a struct, just mark the result overdefined.
if (I.getType()->isStructTy())
return markAnythingOverdefined(&I);
-
+
LatticeVal PtrVal = getValueState(I.getOperand(0));
if (PtrVal.isUndefined()) return; // The pointer is not resolved yet!
-
+
LatticeVal &IV = ValueState[&I];
if (IV.isOverdefined()) return;
if (!PtrVal.isConstant() || I.isVolatile())
return markOverdefined(IV, &I);
-
+
Constant *Ptr = PtrVal.getConstant();
// load null -> null
if (isa<ConstantPointerNull>(Ptr) && I.getPointerAddressSpace() == 0)
return markConstant(IV, &I, Constant::getNullValue(I.getType()));
-
+
// Transform load (constant global) into the value loaded.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
if (!TrackedGlobals.empty()) {
@@ -1262,7 +1088,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
void SCCPSolver::visitCallSite(CallSite CS) {
Function *F = CS.getCalledFunction();
Instruction *I = CS.getInstruction();
-
+
// The common case is that we aren't tracking the callee, either because we
// are not doing interprocedural analysis or the callee is indirect, or is
// external. Handle these cases first.
@@ -1270,17 +1096,17 @@ void SCCPSolver::visitCallSite(CallSite CS) {
CallOverdefined:
// Void return and not tracking callee, just bail.
if (I->getType()->isVoidTy()) return;
-
+
// Otherwise, if we have a single return value case, and if the function is
// a declaration, maybe we can constant fold it.
if (F && F->isDeclaration() && !I->getType()->isStructTy() &&
canConstantFoldCallTo(F)) {
-
+
SmallVector<Constant*, 8> Operands;
for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end();
AI != E; ++AI) {
LatticeVal State = getValueState(*AI);
-
+
if (State.isUndefined())
return; // Operands are not resolved yet.
if (State.isOverdefined())
@@ -1288,10 +1114,10 @@ CallOverdefined:
assert(State.isConstant() && "Unknown state!");
Operands.push_back(State.getConstant());
}
-
+
// If we can constant fold this, mark the result of the call as a
// constant.
- if (Constant *C = ConstantFoldCall(F, Operands))
+ if (Constant *C = ConstantFoldCall(F, Operands, TLI))
return markConstant(I, C);
}
@@ -1304,7 +1130,7 @@ CallOverdefined:
// the formal arguments of the function.
if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){
MarkBlockExecutable(F->begin());
-
+
// Propagate information from this call site into the callee.
CallSite::arg_iterator CAI = CS.arg_begin();
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
@@ -1315,7 +1141,7 @@ CallOverdefined:
markOverdefined(AI);
continue;
}
-
+
if (StructType *STy = dyn_cast<StructType>(AI->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
LatticeVal CallArg = getStructValueState(*CAI, i);
@@ -1326,22 +1152,22 @@ CallOverdefined:
}
}
}
-
+
// If this is a single/zero retval case, see if we're tracking the function.
if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) {
if (!MRVFunctionsTracked.count(F))
goto CallOverdefined; // Not tracking this callee.
-
+
// If we are tracking this callee, propagate the result of the function
// into this call site.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
- mergeInValue(getStructValueState(I, i), I,
+ mergeInValue(getStructValueState(I, i), I,
TrackedMultipleRetVals[std::make_pair(F, i)]);
} else {
DenseMap<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F);
if (TFRVI == TrackedRetVals.end())
goto CallOverdefined; // Not tracking this callee.
-
+
// If so, propagate the return value of the callee into this call result.
mergeInValue(I, TFRVI->second);
}
@@ -1370,7 +1196,7 @@ void SCCPSolver::Solve() {
if (Instruction *I = dyn_cast<Instruction>(*UI))
OperandChangedState(I);
}
-
+
// Process the instruction work list.
while (!InstWorkList.empty()) {
Value *I = InstWorkList.pop_back_val();
@@ -1427,11 +1253,11 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!BBExecutable.count(BB))
continue;
-
+
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
// Look for instructions which produce undef values.
if (I->getType()->isVoidTy()) continue;
-
+
if (StructType *STy = dyn_cast<StructType>(I->getType())) {
// Only a few things that can be structs matter for undef.
@@ -1442,7 +1268,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
continue;
// extractvalue and insertvalue don't need to be marked; they are
- // tracked as precisely as their operands.
+ // tracked as precisely as their operands.
if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I))
continue;
@@ -1549,12 +1375,12 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// X / undef -> undef. No change.
// X % undef -> undef. No change.
if (Op1LV.isUndefined()) break;
-
+
// undef / X -> 0. X could be maxint.
// undef % X -> 0. X could be 1.
markForcedConstant(I, Constant::getNullValue(ITy));
return true;
-
+
case Instruction::AShr:
// X >>a undef -> undef.
if (Op1LV.isUndefined()) break;
@@ -1587,7 +1413,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
} else {
// Leave Op1LV as Operand(1)'s LatticeValue.
}
-
+
if (Op1LV.isConstant())
markForcedConstant(I, Op1LV.getConstant());
else
@@ -1627,7 +1453,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
return true;
}
}
-
+
// Check to see if we have a branch or switch on an undefined value. If so
// we force the branch to go one way or the other to make the successor
// values live. It doesn't really matter which way we force it.
@@ -1636,7 +1462,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
if (!BI->isConditional()) continue;
if (!getValueState(BI->getCondition()).isUndefined())
continue;
-
+
// If the input to SCCP is actually branch on undef, fix the undef to
// false.
if (isa<UndefValue>(BI->getCondition())) {
@@ -1644,7 +1470,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
markEdgeExecutable(BB, TI->getSuccessor(1));
return true;
}
-
+
// Otherwise, it is a branch on a symbolic value which is currently
// considered to be undef. Handle this by forcing the input value to the
// branch to false.
@@ -1652,22 +1478,22 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
ConstantInt::getFalse(TI->getContext()));
return true;
}
-
+
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (SI->getNumSuccessors() < 2) // no cases
+ if (!SI->getNumCases())
continue;
if (!getValueState(SI->getCondition()).isUndefined())
continue;
-
+
// If the input to SCCP is actually switch on undef, fix the undef to
// the first constant.
if (isa<UndefValue>(SI->getCondition())) {
- SI->setCondition(SI->getCaseValue(1));
- markEdgeExecutable(BB, TI->getSuccessor(1));
+ SI->setCondition(SI->case_begin().getCaseValue());
+ markEdgeExecutable(BB, SI->case_begin().getCaseSuccessor());
return true;
}
-
- markForcedConstant(SI->getCondition(), SI->getCaseValue(1));
+
+ markForcedConstant(SI->getCondition(), SI->case_begin().getCaseValue());
return true;
}
}
@@ -1683,6 +1509,9 @@ namespace {
/// Sparse Conditional Constant Propagator.
///
struct SCCP : public FunctionPass {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetLibraryInfo>();
+ }
static char ID; // Pass identification, replacement for typeid
SCCP() : FunctionPass(ID) {
initializeSCCPPass(*PassRegistry::getPassRegistry());
@@ -1735,7 +1564,9 @@ static void DeleteInstructionInBlock(BasicBlock *BB) {
//
bool SCCP::runOnFunction(Function &F) {
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- SCCPSolver Solver(getAnalysisIfAvailable<TargetData>());
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ SCCPSolver Solver(TD, TLI);
// Mark the first block of the function as being executable.
Solver.MarkBlockExecutable(F.begin());
@@ -1764,7 +1595,7 @@ bool SCCP::runOnFunction(Function &F) {
MadeChanges = true;
continue;
}
-
+
// Iterate over all of the instructions in a function, replacing them with
// constants if we have found them to be of constant values.
//
@@ -1772,25 +1603,25 @@ bool SCCP::runOnFunction(Function &F) {
Instruction *Inst = BI++;
if (Inst->getType()->isVoidTy() || isa<TerminatorInst>(Inst))
continue;
-
+
// TODO: Reconstruct structs from their elements.
if (Inst->getType()->isStructTy())
continue;
-
+
LatticeVal IV = Solver.getLatticeValueFor(Inst);
if (IV.isOverdefined())
continue;
-
+
Constant *Const = IV.isConstant()
? IV.getConstant() : UndefValue::get(Inst->getType());
DEBUG(dbgs() << " Constant: " << *Const << " = " << *Inst);
// Replaces all of the uses of a variable with uses of the constant.
Inst->replaceAllUsesWith(Const);
-
+
// Delete the instruction.
Inst->eraseFromParent();
-
+
// Hey, we just changed something!
MadeChanges = true;
++NumInstRemoved;
@@ -1807,6 +1638,9 @@ namespace {
/// Constant Propagation.
///
struct IPSCCP : public ModulePass {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetLibraryInfo>();
+ }
static char ID;
IPSCCP() : ModulePass(ID) {
initializeIPSCCPPass(*PassRegistry::getPassRegistry());
@@ -1816,7 +1650,11 @@ namespace {
} // end anonymous namespace
char IPSCCP::ID = 0;
-INITIALIZE_PASS(IPSCCP, "ipsccp",
+INITIALIZE_PASS_BEGIN(IPSCCP, "ipsccp",
+ "Interprocedural Sparse Conditional Constant Propagation",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(IPSCCP, "ipsccp",
"Interprocedural Sparse Conditional Constant Propagation",
false, false)
@@ -1855,7 +1693,9 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- SCCPSolver Solver(getAnalysisIfAvailable<TargetData>());
+ const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
+ SCCPSolver Solver(TD, TLI);
// AddressTakenFunctions - This set keeps track of the address-taken functions
// that are in the input. As IPSCCP runs through and simplifies code,
@@ -1863,19 +1703,19 @@ bool IPSCCP::runOnModule(Module &M) {
// address-taken-ness. Because of this, we keep track of their addresses from
// the first pass so we can use them for the later simplification pass.
SmallPtrSet<Function*, 32> AddressTakenFunctions;
-
+
// Loop over all functions, marking arguments to those with their addresses
// taken or that are external as overdefined.
//
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration())
continue;
-
+
// If this is a strong or ODR definition of this function, then we can
// propagate information about its result into callsites of it.
if (!F->mayBeOverridden())
Solver.AddTrackedFunction(F);
-
+
// If this function only has direct calls that we can see, we can track its
// arguments and return value aggressively, and can assume it is not called
// unless we see evidence to the contrary.
@@ -1890,7 +1730,7 @@ bool IPSCCP::runOnModule(Module &M) {
// Assume the function is called.
Solver.MarkBlockExecutable(F->begin());
-
+
// Assume nothing about the incoming arguments.
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI)
@@ -1928,17 +1768,17 @@ bool IPSCCP::runOnModule(Module &M) {
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
if (AI->use_empty() || AI->getType()->isStructTy()) continue;
-
+
// TODO: Could use getStructLatticeValueFor to find out if the entire
// result is a constant and replace it entirely if so.
LatticeVal IV = Solver.getLatticeValueFor(AI);
if (IV.isOverdefined()) continue;
-
+
Constant *CST = IV.isConstant() ?
IV.getConstant() : UndefValue::get(AI->getType());
DEBUG(dbgs() << "*** Arg " << *AI << " = " << *CST <<"\n");
-
+
// Replaces all of the uses of a variable with uses of the
// constant.
AI->replaceAllUsesWith(CST);
@@ -1967,19 +1807,19 @@ bool IPSCCP::runOnModule(Module &M) {
new UnreachableInst(M.getContext(), BB);
continue;
}
-
+
for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
Instruction *Inst = BI++;
if (Inst->getType()->isVoidTy() || Inst->getType()->isStructTy())
continue;
-
+
// TODO: Could use getStructLatticeValueFor to find out if the entire
// result is a constant and replace it entirely if so.
-
+
LatticeVal IV = Solver.getLatticeValueFor(Inst);
if (IV.isOverdefined())
continue;
-
+
Constant *Const = IV.isConstant()
? IV.getConstant() : UndefValue::get(Inst->getType());
DEBUG(dbgs() << " Constant: " << *Const << " = " << *Inst);
@@ -1987,7 +1827,7 @@ bool IPSCCP::runOnModule(Module &M) {
// Replaces all of the uses of a variable with uses of the
// constant.
Inst->replaceAllUsesWith(Const);
-
+
// Delete the instruction.
if (!isa<CallInst>(Inst) && !isa<TerminatorInst>(Inst))
Inst->eraseFromParent();
@@ -2029,15 +1869,15 @@ bool IPSCCP::runOnModule(Module &M) {
llvm_unreachable("Didn't fold away reference to block!");
}
#endif
-
+
// Make this an uncond branch to the first successor.
TerminatorInst *TI = I->getParent()->getTerminator();
BranchInst::Create(TI->getSuccessor(0), TI);
-
+
// Remove entries in successor phi nodes to remove edges.
for (unsigned i = 1, e = TI->getNumSuccessors(); i != e; ++i)
TI->getSuccessor(i)->removePredecessor(TI->getParent());
-
+
// Remove the old terminator.
TI->eraseFromParent();
}
@@ -2060,7 +1900,7 @@ bool IPSCCP::runOnModule(Module &M) {
// last use of a function, the order of processing functions would affect
// whether other functions are optimizable.
SmallVector<ReturnInst*, 8> ReturnsToZap;
-
+
// TODO: Process multiple value ret instructions also.
const DenseMap<Function*, LatticeVal> &RV = Solver.getTrackedRetVals();
for (DenseMap<Function*, LatticeVal>::const_iterator I = RV.begin(),
@@ -2068,11 +1908,11 @@ bool IPSCCP::runOnModule(Module &M) {
Function *F = I->first;
if (I->second.isOverdefined() || F->getReturnType()->isVoidTy())
continue;
-
+
// We can only do this if we know that nothing else can call the function.
if (!F->hasLocalLinkage() || AddressTakenFunctions.count(F))
continue;
-
+
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
if (!isa<UndefValue>(RI->getOperand(0)))
@@ -2084,9 +1924,9 @@ bool IPSCCP::runOnModule(Module &M) {
Function *F = ReturnsToZap[i]->getParent()->getParent();
ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType()));
}
-
- // If we inferred constant or undef values for globals variables, we can delete
- // the global and any stores that remain to it.
+
+ // If we inferred constant or undef values for globals variables, we can
+ // delete the global and any stores that remain to it.
const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals();
for (DenseMap<GlobalVariable*, LatticeVal>::const_iterator I = TG.begin(),
E = TG.end(); I != E; ++I) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
index f6918de..7d65bcc 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -51,6 +51,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeLowerExpectIntrinsicPass(Registry);
initializeMemCpyOptPass(Registry);
initializeObjCARCAliasAnalysisPass(Registry);
+ initializeObjCARCAPElimPass(Registry);
initializeObjCARCExpandPass(Registry);
initializeObjCARCContractPass(Registry);
initializeObjCARCOptPass(Registry);
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index c6d9123..026fea1 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -13,7 +13,7 @@
// each member (if possible). Then, if possible, it transforms the individual
// alloca instructions into nice clean scalar SSA form.
//
-// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
+// This combines a simple SRoA algorithm with the Mem2Reg algorithm because they
// often interact, especially for C++ programs. As such, iterating between
// SRoA, then Mem2Reg until we run out of things to promote works well.
//
@@ -453,6 +453,8 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ if (!GEP->getPointerOperandType()->isPointerTy())
+ return false;
uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
Indices);
// See if all uses can be converted.
@@ -572,8 +574,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// transform it into a store of the expanded constant value.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
assert(MSI->getRawDest() == Ptr && "Consistency error!");
- unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
- if (NumBytes != 0) {
+ int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue();
+ if (SNumBytes > 0 && (SNumBytes >> 32) == 0) {
+ unsigned NumBytes = static_cast<unsigned>(SNumBytes);
unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
// Compute the value replicated the right number of times.
@@ -806,8 +809,10 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
return Builder.CreateBitCast(SV, AllocaType);
// Must be an element insertion.
- assert(SV->getType() == VTy->getElementType());
- uint64_t EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
+ Type *EltTy = VTy->getElementType();
+ if (SV->getType() != EltTy)
+ SV = Builder.CreateBitCast(SV, EltTy);
+ uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
unsigned Elt = Offset/EltSize;
return Builder.CreateInsertElement(Old, SV, Builder.getInt32(Elt));
}
@@ -934,13 +939,14 @@ public:
void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
// Remember which alloca we're promoting (for isInstInList).
this->AI = AI;
- if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI))
+ if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI)) {
for (Value::use_iterator UI = DebugNode->use_begin(),
E = DebugNode->use_end(); UI != E; ++UI)
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
DDIs.push_back(DDI);
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
DVIs.push_back(DVI);
+ }
LoadAndStorePromoter::run(Insts);
AI->eraseFromParent();
@@ -975,30 +981,25 @@ public:
for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
+ Value *Arg = NULL;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- Instruction *DbgVal = NULL;
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
- Argument *ExtendedArg = NULL;
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
- ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
+ Arg = dyn_cast<Argument>(ZExt->getOperand(0));
if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
- ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
- if (ExtendedArg)
- DbgVal = DIB->insertDbgValueIntrinsic(ExtendedArg, 0,
- DIVariable(DVI->getVariable()),
- SI);
- else
- DbgVal = DIB->insertDbgValueIntrinsic(SI->getOperand(0), 0,
- DIVariable(DVI->getVariable()),
- SI);
- DbgVal->setDebugLoc(DVI->getDebugLoc());
+ Arg = dyn_cast<Argument>(SExt->getOperand(0));
+ if (!Arg)
+ Arg = SI->getOperand(0);
} else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- Instruction *DbgVal =
- DIB->insertDbgValueIntrinsic(LI->getOperand(0), 0,
- DIVariable(DVI->getVariable()), LI);
- DbgVal->setDebugLoc(DVI->getDebugLoc());
+ Arg = LI->getOperand(0);
+ } else {
+ continue;
}
+ Instruction *DbgVal =
+ DIB->insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
+ Inst);
+ DbgVal->setDebugLoc(DVI->getDebugLoc());
}
}
};
@@ -1517,6 +1518,9 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
if (Length == 0)
return MarkUnsafe(Info, User);
+ if (Length->isNegative())
+ return MarkUnsafe(Info, User);
+
isSafeMemAccess(Offset, Length->getZExtValue(), 0,
UI.getOperandNo() == 0, Info, MI,
true /*AllowWholeAccess*/);
@@ -1873,8 +1877,14 @@ void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
return;
// The bitcast references the original alloca. Replace its uses with
- // references to the first new element alloca.
- Instruction *Val = NewElts[0];
+ // references to the alloca containing offset zero (which is normally at
+ // index zero, but might not be in cases involving structs with elements
+ // of size zero).
+ Type *T = AI->getAllocatedType();
+ uint64_t EltOffset = 0;
+ Type *IdxTy;
+ uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
+ Instruction *Val = NewElts[Idx];
if (Val->getType() != BC->getDestTy()) {
Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
Val->takeName(BC);
@@ -2146,8 +2156,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If the requested value was a vector constant, create it.
if (EltTy->isVectorTy()) {
unsigned NumElts = cast<VectorType>(EltTy)->getNumElements();
- SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
- StoreVal = ConstantVector::get(Elts);
+ StoreVal = ConstantVector::getSplat(NumElts, StoreVal);
}
}
new StoreInst(StoreVal, EltPtr, MI);
@@ -2158,6 +2167,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
}
unsigned EltSize = TD->getTypeAllocSize(EltTy);
+ if (!EltSize)
+ continue;
IRBuilder<> Builder(MI);
@@ -2524,13 +2535,12 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
// ignore it if we know that the value isn't captured.
unsigned ArgNo = CS.getArgumentNo(UI);
if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() ||
- CS.paramHasAttr(ArgNo+1, Attribute::NoCapture)))
+ (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
continue;
// If this is being passed as a byval argument, the caller is making a
// copy, so it is only a read of the alloca.
- if (CS.paramHasAttr(ArgNo+1, Attribute::ByVal))
+ if (CS.isByValArgument(ArgNo))
continue;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index fbb9465..9c49ec1 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -256,19 +256,18 @@ struct StrChrOpt : public LibCallOptimization {
ConstantInt::get(TD->getIntPtrType(*Context), Len),
B, TD);
}
-
+
// Otherwise, the character is a constant, see if the first argument is
// a string literal. If so, we can constant fold.
- std::string Str;
- if (!GetConstantStringInfo(SrcStr, Str))
+ StringRef Str;
+ if (!getConstantStringInfo(SrcStr, Str))
return 0;
- // strchr can find the nul character.
- Str += '\0';
-
- // Compute the offset.
- size_t I = Str.find(CharC->getSExtValue());
- if (I == std::string::npos) // Didn't find the char. strchr returns null.
+ // Compute the offset, make sure to handle the case when we're searching for
+ // zero (a weird way to spell strlen).
+ size_t I = CharC->getSExtValue() == 0 ?
+ Str.size() : Str.find(CharC->getSExtValue());
+ if (I == StringRef::npos) // Didn't find the char. strchr returns null.
return Constant::getNullValue(CI->getType());
// strchr(s+n,c) -> gep(s+n+i,c)
@@ -296,20 +295,18 @@ struct StrRChrOpt : public LibCallOptimization {
if (!CharC)
return 0;
- std::string Str;
- if (!GetConstantStringInfo(SrcStr, Str)) {
+ StringRef Str;
+ if (!getConstantStringInfo(SrcStr, Str)) {
// strrchr(s, 0) -> strchr(s, 0)
if (TD && CharC->isZero())
return EmitStrChr(SrcStr, '\0', B, TD);
return 0;
}
- // strrchr can find the nul character.
- Str += '\0';
-
// Compute the offset.
- size_t I = Str.rfind(CharC->getSExtValue());
- if (I == std::string::npos) // Didn't find the char. Return null.
+ size_t I = CharC->getSExtValue() == 0 ?
+ Str.size() : Str.rfind(CharC->getSExtValue());
+ if (I == StringRef::npos) // Didn't find the char. Return null.
return Constant::getNullValue(CI->getType());
// strrchr(s+n,c) -> gep(s+n+i,c)
@@ -334,14 +331,13 @@ struct StrCmpOpt : public LibCallOptimization {
if (Str1P == Str2P) // strcmp(x,x) -> 0
return ConstantInt::get(CI->getType(), 0);
- std::string Str1, Str2;
- bool HasStr1 = GetConstantStringInfo(Str1P, Str1);
- bool HasStr2 = GetConstantStringInfo(Str2P, Str2);
+ StringRef Str1, Str2;
+ bool HasStr1 = getConstantStringInfo(Str1P, Str1);
+ bool HasStr2 = getConstantStringInfo(Str2P, Str2);
// strcmp(x, y) -> cnst (if both x and y are constant strings)
if (HasStr1 && HasStr2)
- return ConstantInt::get(CI->getType(),
- StringRef(Str1).compare(Str2));
+ return ConstantInt::get(CI->getType(), Str1.compare(Str2));
if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"),
@@ -397,14 +393,14 @@ struct StrNCmpOpt : public LibCallOptimization {
if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD);
- std::string Str1, Str2;
- bool HasStr1 = GetConstantStringInfo(Str1P, Str1);
- bool HasStr2 = GetConstantStringInfo(Str2P, Str2);
+ StringRef Str1, Str2;
+ bool HasStr1 = getConstantStringInfo(Str1P, Str1);
+ bool HasStr2 = getConstantStringInfo(Str2P, Str2);
// strncmp(x, y) -> cnst (if both x and y are constant strings)
if (HasStr1 && HasStr2) {
- StringRef SubStr1 = StringRef(Str1).substr(0, Length);
- StringRef SubStr2 = StringRef(Str2).substr(0, Length);
+ StringRef SubStr1 = Str1.substr(0, Length);
+ StringRef SubStr2 = Str2.substr(0, Length);
return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
}
@@ -549,9 +545,9 @@ struct StrPBrkOpt : public LibCallOptimization {
FT->getReturnType() != FT->getParamType(0))
return 0;
- std::string S1, S2;
- bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
// strpbrk(s, "") -> NULL
// strpbrk("", s) -> NULL
@@ -609,9 +605,9 @@ struct StrSpnOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy())
return 0;
- std::string S1, S2;
- bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
// strspn(s, "") -> 0
// strspn("", s) -> 0
@@ -619,8 +615,11 @@ struct StrSpnOpt : public LibCallOptimization {
return Constant::getNullValue(CI->getType());
// Constant folding.
- if (HasS1 && HasS2)
- return ConstantInt::get(CI->getType(), strspn(S1.c_str(), S2.c_str()));
+ if (HasS1 && HasS2) {
+ size_t Pos = S1.find_first_not_of(S2);
+ if (Pos == StringRef::npos) Pos = S1.size();
+ return ConstantInt::get(CI->getType(), Pos);
+ }
return 0;
}
@@ -638,17 +637,20 @@ struct StrCSpnOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy())
return 0;
- std::string S1, S2;
- bool HasS1 = GetConstantStringInfo(CI->getArgOperand(0), S1);
- bool HasS2 = GetConstantStringInfo(CI->getArgOperand(1), S2);
+ StringRef S1, S2;
+ bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
+ bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
// strcspn("", s) -> 0
if (HasS1 && S1.empty())
return Constant::getNullValue(CI->getType());
// Constant folding.
- if (HasS1 && HasS2)
- return ConstantInt::get(CI->getType(), strcspn(S1.c_str(), S2.c_str()));
+ if (HasS1 && HasS2) {
+ size_t Pos = S1.find_first_of(S2);
+ if (Pos == StringRef::npos) Pos = S1.size();
+ return ConstantInt::get(CI->getType(), Pos);
+ }
// strcspn(s, "") -> strlen(s)
if (TD && HasS2 && S2.empty())
@@ -692,9 +694,9 @@ struct StrStrOpt : public LibCallOptimization {
}
// See if either input string is a constant string.
- std::string SearchStr, ToFindStr;
- bool HasStr1 = GetConstantStringInfo(CI->getArgOperand(0), SearchStr);
- bool HasStr2 = GetConstantStringInfo(CI->getArgOperand(1), ToFindStr);
+ StringRef SearchStr, ToFindStr;
+ bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
+ bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
// fold strstr(x, "") -> x.
if (HasStr2 && ToFindStr.empty())
@@ -704,7 +706,7 @@ struct StrStrOpt : public LibCallOptimization {
if (HasStr1 && HasStr2) {
std::string::size_type Offset = SearchStr.find(ToFindStr);
- if (Offset == std::string::npos) // strstr("foo", "bar") -> null
+ if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
return Constant::getNullValue(CI->getType());
// strstr("abcd", "bc") -> gep((char*)"abcd", 1)
@@ -756,11 +758,11 @@ struct MemCmpOpt : public LibCallOptimization {
}
// Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
- std::string LHSStr, RHSStr;
- if (GetConstantStringInfo(LHS, LHSStr) &&
- GetConstantStringInfo(RHS, RHSStr)) {
+ StringRef LHSStr, RHSStr;
+ if (getConstantStringInfo(LHS, LHSStr) &&
+ getConstantStringInfo(RHS, RHSStr)) {
// Make sure we're not reading out-of-bounds memory.
- if (Len > LHSStr.length() || Len > RHSStr.length())
+ if (Len > LHSStr.size() || Len > RHSStr.size())
return 0;
uint64_t Ret = memcmp(LHSStr.data(), RHSStr.data(), Len);
return ConstantInt::get(CI->getType(), Ret);
@@ -841,6 +843,28 @@ struct MemSetOpt : public LibCallOptimization {
//===----------------------------------------------------------------------===//
//===---------------------------------------===//
+// 'cos*' Optimizations
+
+struct CosOpt : public LibCallOptimization {
+ virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
+ FunctionType *FT = Callee->getFunctionType();
+ // Just make sure this has 1 argument of FP type, which matches the
+ // result type.
+ if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isFloatingPointTy())
+ return 0;
+
+ // cos(-x) -> cos(x)
+ Value *Op1 = CI->getArgOperand(0);
+ if (BinaryOperator::isFNeg(Op1)) {
+ BinaryOperator *BinExpr = cast<BinaryOperator>(Op1);
+ return B.CreateCall(Callee, BinExpr->getOperand(1), "cos");
+ }
+ return 0;
+ }
+};
+
+//===---------------------------------------===//
// 'pow*' Optimizations
struct PowOpt : public LibCallOptimization {
@@ -870,7 +894,7 @@ struct PowOpt : public LibCallOptimization {
if (Op2C->isExactlyValue(0.5)) {
// Expand pow(x, 0.5) to (x == -infinity ? +infinity : fabs(sqrt(x))).
// This is faster than calling pow, and still handles negative zero
- // and negative infinite correctly.
+ // and negative infinity correctly.
// TODO: In fast-math mode, this could be just sqrt(x).
// TODO: In finite-only mode, this could be just fabs(sqrt(x)).
Value *Inf = ConstantFP::getInfinity(CI->getType());
@@ -963,8 +987,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
// floor((double)floatval) -> (double)floorf(floatval)
Value *V = Cast->getOperand(0);
- V = EmitUnaryFloatFnCall(V, Callee->getName().data(), B,
- Callee->getAttributes());
+ V = EmitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes());
return B.CreateFPExt(V, B.getDoubleTy());
}
};
@@ -1000,7 +1023,7 @@ struct FFSOpt : public LibCallOptimization {
Type *ArgType = Op->getType();
Value *F = Intrinsic::getDeclaration(Callee->getParent(),
Intrinsic::cttz, ArgType);
- Value *V = B.CreateCall(F, Op, "cttz");
+ Value *V = B.CreateCall2(F, Op, B.getFalse(), "cttz");
V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1));
V = B.CreateIntCast(V, B.getInt32Ty(), false);
@@ -1095,8 +1118,8 @@ struct PrintFOpt : public LibCallOptimization {
Value *OptimizeFixedFormatString(Function *Callee, CallInst *CI,
IRBuilder<> &B) {
// Check for a fixed format string.
- std::string FormatStr;
- if (!GetConstantStringInfo(CI->getArgOperand(0), FormatStr))
+ StringRef FormatStr;
+ if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
return 0;
// Empty format string -> noop.
@@ -1122,11 +1145,9 @@ struct PrintFOpt : public LibCallOptimization {
FormatStr.find('%') == std::string::npos) { // no format characters.
// Create a string literal with no \n on it. We expect the constant merge
// pass to be run after this pass, to merge duplicate strings.
- FormatStr.erase(FormatStr.end()-1);
- Constant *C = ConstantArray::get(*Context, FormatStr, true);
- C = new GlobalVariable(*Callee->getParent(), C->getType(), true,
- GlobalVariable::InternalLinkage, C, "str");
- EmitPutS(C, B, TD);
+ FormatStr = FormatStr.drop_back();
+ Value *GV = B.CreateGlobalString(FormatStr, "str");
+ EmitPutS(GV, B, TD);
return CI->use_empty() ? (Value*)CI :
ConstantInt::get(CI->getType(), FormatStr.size()+1);
}
@@ -1184,8 +1205,8 @@ struct SPrintFOpt : public LibCallOptimization {
Value *OptimizeFixedFormatString(Function *Callee, CallInst *CI,
IRBuilder<> &B) {
// Check for a fixed format string.
- std::string FormatStr;
- if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
+ StringRef FormatStr;
+ if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// If we just have a format string (nothing else crazy) transform it.
@@ -1296,7 +1317,8 @@ struct FWriteOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), 0);
// If this is writing one byte, turn it into fputc.
- if (Bytes == 1) { // fwrite(S,1,1,F) -> fputc(S[0],F)
+ // This optimisation is only valid, if the return value is unused.
+ if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
EmitFPutC(Char, CI->getArgOperand(3), B, TD);
return ConstantInt::get(CI->getType(), 1);
@@ -1326,7 +1348,7 @@ struct FPutsOpt : public LibCallOptimization {
if (!Len) return 0;
EmitFWrite(CI->getArgOperand(0),
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getArgOperand(1), B, TD);
+ CI->getArgOperand(1), B, TD, TLI);
return CI; // Known to have no uses (see above).
}
};
@@ -1338,8 +1360,8 @@ struct FPrintFOpt : public LibCallOptimization {
Value *OptimizeFixedFormatString(Function *Callee, CallInst *CI,
IRBuilder<> &B) {
// All the optimizations depend on the format string.
- std::string FormatStr;
- if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
+ StringRef FormatStr;
+ if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
@@ -1354,7 +1376,7 @@ struct FPrintFOpt : public LibCallOptimization {
EmitFWrite(CI->getArgOperand(1),
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()),
- CI->getArgOperand(0), B, TD);
+ CI->getArgOperand(0), B, TD, TLI);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1376,7 +1398,7 @@ struct FPrintFOpt : public LibCallOptimization {
// fprintf(F, "%s", str) --> fputs(str, F)
if (!CI->getArgOperand(2)->getType()->isPointerTy() || !CI->use_empty())
return 0;
- EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
+ EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
return CI;
}
return 0;
@@ -1422,8 +1444,8 @@ struct PutsOpt : public LibCallOptimization {
return 0;
// Check for a constant string.
- std::string Str;
- if (!GetConstantStringInfo(CI->getArgOperand(0), Str))
+ StringRef Str;
+ if (!getConstantStringInfo(CI->getArgOperand(0), Str))
return 0;
if (Str.empty() && CI->use_empty()) {
@@ -1457,7 +1479,7 @@ namespace {
StrToOpt StrTo; StrSpnOpt StrSpn; StrCSpnOpt StrCSpn; StrStrOpt StrStr;
MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet;
// Math Library Optimizations
- PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP;
+ CosOpt Cos; PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP;
// Integer Optimizations
FFSOpt FFS; AbsOpt Abs; IsDigitOpt IsDigit; IsAsciiOpt IsAscii;
ToAsciiOpt ToAscii;
@@ -1472,6 +1494,7 @@ namespace {
SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true) {
initializeSimplifyLibCallsPass(*PassRegistry::getPassRegistry());
}
+ void AddOpt(LibFunc::Func F, LibCallOptimization* Opt);
void InitOptimizations();
bool runOnFunction(Function &F);
@@ -1502,6 +1525,11 @@ FunctionPass *llvm::createSimplifyLibCallsPass() {
return new SimplifyLibCalls();
}
+void SimplifyLibCalls::AddOpt(LibFunc::Func F, LibCallOptimization* Opt) {
+ if (TLI->has(F))
+ Optimizations[TLI->getName(F)] = Opt;
+}
+
/// Optimizations - Populate the Optimizations map with all the optimizations
/// we know.
void SimplifyLibCalls::InitOptimizations() {
@@ -1527,14 +1555,17 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["strcspn"] = &StrCSpn;
Optimizations["strstr"] = &StrStr;
Optimizations["memcmp"] = &MemCmp;
- if (TLI->has(LibFunc::memcpy)) Optimizations["memcpy"] = &MemCpy;
+ AddOpt(LibFunc::memcpy, &MemCpy);
Optimizations["memmove"] = &MemMove;
- if (TLI->has(LibFunc::memset)) Optimizations["memset"] = &MemSet;
+ AddOpt(LibFunc::memset, &MemSet);
// _chk variants of String and Memory LibCall Optimizations.
Optimizations["__strcpy_chk"] = &StrCpyChk;
// Math Library Optimizations
+ Optimizations["cosf"] = &Cos;
+ Optimizations["cos"] = &Cos;
+ Optimizations["cosl"] = &Cos;
Optimizations["powf"] = &Pow;
Optimizations["pow"] = &Pow;
Optimizations["powl"] = &Pow;
@@ -1582,8 +1613,8 @@ void SimplifyLibCalls::InitOptimizations() {
// Formatting and IO Optimizations
Optimizations["sprintf"] = &SPrintF;
Optimizations["printf"] = &PrintF;
- Optimizations["fwrite"] = &FWrite;
- Optimizations["fputs"] = &FPuts;
+ AddOpt(LibFunc::fwrite, &FWrite);
+ AddOpt(LibFunc::fputs, &FPuts);
Optimizations["fprintf"] = &FPrintF;
Optimizations["puts"] = &Puts;
}
@@ -2348,9 +2379,6 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(sqrt(x)) -> pow(x,1/9)
//
-// cos, cosf, cosl:
-// * cos(-x) -> cos(x)
-//
// exp, expf, expl:
// * exp(log(x)) -> x
//
@@ -2387,6 +2415,8 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// * stpcpy(str, "literal") ->
// llvm.memcpy(str,"literal",strlen("literal")+1,1)
//
+// strchr:
+// * strchr(p, 0) -> strlen(p)
// tan, tanf, tanl:
// * tan(atan(x)) -> x
//
diff --git a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
index c83f56c..ef65c0a 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -18,6 +18,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CFG.h"
@@ -240,7 +241,7 @@ bool Sinking::SinkInstruction(Instruction *Inst,
if (SuccToSinkTo->getUniquePredecessor() != ParentBlock) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
- if (!Inst->isSafeToSpeculativelyExecute()) {
+ if (!isSafeToSpeculativelyExecute(Inst)) {
DEBUG(dbgs() << " *** PUNTING: Wont sink load along critical edge.\n");
return false;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
index 8e5a1eb..d831452 100644
--- a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -473,14 +473,7 @@ bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
// Check to see if this value is already used in the memory instruction's
// block. If so, it's already live into the block at the very least, so we
// can reasonably fold it.
- BasicBlock *MemBB = MemoryInst->getParent();
- for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
- UI != E; ++UI)
- // We know that uses of arguments and instructions have to be instructions.
- if (cast<Instruction>(*UI)->getParent() == MemBB)
- return true;
-
- return false;
+ return Val->isUsedInBasicBlock(MemoryInst->getParent());
}
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index a7f9efd..3859a1a 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -249,7 +249,6 @@ unsigned llvm::GetSuccessorNumber(BasicBlock *BB, BasicBlock *Succ) {
if (Term->getSuccessor(i) == Succ)
return i;
}
- return 0;
}
/// SplitEdge - Split the edge connecting specified block. Pass P must
@@ -453,9 +452,8 @@ static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB,
/// of the edges being split is an exit of a loop with other exits).
///
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
- BasicBlock *const *Preds,
- unsigned NumPreds, const char *Suffix,
- Pass *P) {
+ ArrayRef<BasicBlock*> Preds,
+ const char *Suffix, Pass *P) {
// Create new basic block, insert right before the original block.
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+Suffix,
BB->getParent(), BB);
@@ -464,7 +462,7 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
BranchInst *BI = BranchInst::Create(BB, NewBB);
// Move the edges from Preds to point to NewBB instead of BB.
- for (unsigned i = 0; i != NumPreds; ++i) {
+ for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
// This is slightly more strict than necessary; the minimum requirement
// is that there be no more than one indirectbr branching to BB. And
// all BlockAddress uses would need to be updated.
@@ -477,7 +475,7 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
// node becomes an incoming value for BB's phi node. However, if the Preds
// list is empty, we need to insert dummy entries into the PHI nodes in BB to
// account for the newly created predecessor.
- if (NumPreds == 0) {
+ if (Preds.size() == 0) {
// Insert dummy values as the incoming value.
for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
@@ -486,12 +484,10 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
// Update DominatorTree, LoopInfo, and LCCSA analysis information.
bool HasLoopExit = false;
- UpdateAnalysisInformation(BB, NewBB, ArrayRef<BasicBlock*>(Preds, NumPreds),
- P, HasLoopExit);
+ UpdateAnalysisInformation(BB, NewBB, Preds, P, HasLoopExit);
// Update the PHI nodes in BB with the values coming from NewBB.
- UpdatePHINodes(BB, NewBB, ArrayRef<BasicBlock*>(Preds, NumPreds), BI,
- P, HasLoopExit);
+ UpdatePHINodes(BB, NewBB, Preds, BI, P, HasLoopExit);
return NewBB;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicInliner.cpp b/contrib/llvm/lib/Transforms/Utils/BasicInliner.cpp
deleted file mode 100644
index 23a30cc5..0000000
--- a/contrib/llvm/lib/Transforms/Utils/BasicInliner.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-//===- BasicInliner.cpp - Basic function level inliner --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a simple function based inliner that does not use
-// call graph information.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "basicinliner"
-#include "llvm/Module.h"
-#include "llvm/Function.h"
-#include "llvm/Transforms/Utils/BasicInliner.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include <vector>
-
-using namespace llvm;
-
-static cl::opt<unsigned>
-BasicInlineThreshold("basic-inline-threshold", cl::Hidden, cl::init(200),
- cl::desc("Control the amount of basic inlining to perform (default = 200)"));
-
-namespace llvm {
-
- /// BasicInlinerImpl - BasicInliner implemantation class. This hides
- /// container info, used by basic inliner, from public interface.
- struct BasicInlinerImpl {
-
- BasicInlinerImpl(const BasicInlinerImpl&); // DO NOT IMPLEMENT
- void operator=(const BasicInlinerImpl&); // DO NO IMPLEMENT
- public:
- BasicInlinerImpl(TargetData *T) : TD(T) {}
-
- /// addFunction - Add function into the list of functions to process.
- /// All functions must be inserted using this interface before invoking
- /// inlineFunctions().
- void addFunction(Function *F) {
- Functions.push_back(F);
- }
-
- /// neverInlineFunction - Sometimes a function is never to be inlined
- /// because of one or other reason.
- void neverInlineFunction(Function *F) {
- NeverInline.insert(F);
- }
-
- /// inlineFuctions - Walk all call sites in all functions supplied by
- /// client. Inline as many call sites as possible. Delete completely
- /// inlined functions.
- void inlineFunctions();
-
- private:
- TargetData *TD;
- std::vector<Function *> Functions;
- SmallPtrSet<const Function *, 16> NeverInline;
- SmallPtrSet<Function *, 8> DeadFunctions;
- InlineCostAnalyzer CA;
- };
-
-/// inlineFuctions - Walk all call sites in all functions supplied by
-/// client. Inline as many call sites as possible. Delete completely
-/// inlined functions.
-void BasicInlinerImpl::inlineFunctions() {
-
- // Scan through and identify all call sites ahead of time so that we only
- // inline call sites in the original functions, not call sites that result
- // from inlining other functions.
- std::vector<CallSite> CallSites;
-
- for (std::vector<Function *>::iterator FI = Functions.begin(),
- FE = Functions.end(); FI != FE; ++FI) {
- Function *F = *FI;
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
- CallSite CS(cast<Value>(I));
- if (CS && CS.getCalledFunction()
- && !CS.getCalledFunction()->isDeclaration())
- CallSites.push_back(CS);
- }
- }
-
- DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
-
- // Inline call sites.
- bool Changed = false;
- do {
- Changed = false;
- for (unsigned index = 0; index != CallSites.size() && !CallSites.empty();
- ++index) {
- CallSite CS = CallSites[index];
- if (Function *Callee = CS.getCalledFunction()) {
-
- // Eliminate calls that are never inlinable.
- if (Callee->isDeclaration() ||
- CS.getInstruction()->getParent()->getParent() == Callee) {
- CallSites.erase(CallSites.begin() + index);
- --index;
- continue;
- }
- InlineCost IC = CA.getInlineCost(CS, NeverInline);
- if (IC.isAlways()) {
- DEBUG(dbgs() << " Inlining: cost=always"
- <<", call: " << *CS.getInstruction());
- } else if (IC.isNever()) {
- DEBUG(dbgs() << " NOT Inlining: cost=never"
- <<", call: " << *CS.getInstruction());
- continue;
- } else {
- int Cost = IC.getValue();
-
- if (Cost >= (int) BasicInlineThreshold) {
- DEBUG(dbgs() << " NOT Inlining: cost = " << Cost
- << ", call: " << *CS.getInstruction());
- continue;
- } else {
- DEBUG(dbgs() << " Inlining: cost = " << Cost
- << ", call: " << *CS.getInstruction());
- }
- }
-
- // Inline
- InlineFunctionInfo IFI(0, TD);
- if (InlineFunction(CS, IFI)) {
- if (Callee->use_empty() && (Callee->hasLocalLinkage() ||
- Callee->hasAvailableExternallyLinkage()))
- DeadFunctions.insert(Callee);
- Changed = true;
- CallSites.erase(CallSites.begin() + index);
- --index;
- }
- }
- }
- } while (Changed);
-
- // Remove completely inlined functions from module.
- for(SmallPtrSet<Function *, 8>::iterator I = DeadFunctions.begin(),
- E = DeadFunctions.end(); I != E; ++I) {
- Function *D = *I;
- Module *M = D->getParent();
- M->getFunctionList().remove(D);
- }
-}
-
-BasicInliner::BasicInliner(TargetData *TD) {
- Impl = new BasicInlinerImpl(TD);
-}
-
-BasicInliner::~BasicInliner() {
- delete Impl;
-}
-
-/// addFunction - Add function into the list of functions to process.
-/// All functions must be inserted using this interface before invoking
-/// inlineFunctions().
-void BasicInliner::addFunction(Function *F) {
- Impl->addFunction(F);
-}
-
-/// neverInlineFunction - Sometimes a function is never to be inlined because
-/// of one or other reason.
-void BasicInliner::neverInlineFunction(Function *F) {
- Impl->neverInlineFunction(F);
-}
-
-/// inlineFuctions - Walk all call sites in all functions supplied by
-/// client. Inline as many call sites as possible. Delete completely
-/// inlined functions.
-void BasicInliner::inlineFunctions() {
- Impl->inlineFunctions();
-}
-
-}
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index c052910..f752d79 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -372,8 +372,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// form, which we're in the process of restoring!
if (!Preds.empty() && HasPredOutsideOfLoop) {
BasicBlock *NewExitBB =
- SplitBlockPredecessors(Exit, Preds.data(), Preds.size(),
- "split", P);
+ SplitBlockPredecessors(Exit, Preds, "split", P);
if (P->mustPreserveAnalysisID(LCSSAID))
CreatePHIsForSplitLoopExit(Preds, NewExitBB, Exit);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 4b5f45b..a808303 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -15,11 +15,15 @@
#include "llvm/Type.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/LLVMContext.h"
#include "llvm/Intrinsics.h"
+#include "llvm/ADT/SmallString.h"
using namespace llvm;
@@ -206,19 +210,16 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
/// 'floor'). This function is known to take a single of type matching 'Op' and
/// returns one value with the same type. If 'Op' is a long double, 'l' is
/// added as the suffix of name, if 'Op' is a float, we add a 'f' suffix.
-Value *llvm::EmitUnaryFloatFnCall(Value *Op, const char *Name,
- IRBuilder<> &B, const AttrListPtr &Attrs) {
- char NameBuffer[20];
+Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+ const AttrListPtr &Attrs) {
+ SmallString<20> NameBuffer;
if (!Op->getType()->isDoubleTy()) {
// If we need to add a suffix, copy into NameBuffer.
- unsigned NameLen = strlen(Name);
- assert(NameLen < sizeof(NameBuffer)-2);
- memcpy(NameBuffer, Name, NameLen);
+ NameBuffer += Name;
if (Op->getType()->isFloatTy())
- NameBuffer[NameLen] = 'f'; // floorf
+ NameBuffer += 'f'; // floorf
else
- NameBuffer[NameLen] = 'l'; // floorl
- NameBuffer[NameLen+1] = 0;
+ NameBuffer += 'l'; // floorl
Name = NameBuffer;
}
@@ -299,20 +300,21 @@ void llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
void llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
- const TargetData *TD) {
+ const TargetData *TD, const TargetLibraryInfo *TLI) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ StringRef FPutsName = TLI->getName(LibFunc::fputs);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction("fputs", AttrListPtr::get(AWI, 3),
+ F = M->getOrInsertFunction(FPutsName, AttrListPtr::get(AWI, 3),
B.getInt32Ty(),
B.getInt8PtrTy(),
File->getType(), NULL);
else
- F = M->getOrInsertFunction("fputs", B.getInt32Ty(),
+ F = M->getOrInsertFunction(FPutsName, B.getInt32Ty(),
B.getInt8PtrTy(),
File->getType(), NULL);
CallInst *CI = B.CreateCall2(F, CastToCStr(Str, B), File, "fputs");
@@ -324,23 +326,25 @@ void llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
void llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
- IRBuilder<> &B, const TargetData *TD) {
+ IRBuilder<> &B, const TargetData *TD,
+ const TargetLibraryInfo *TLI) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[3];
AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(4, Attribute::NoCapture);
AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
+ StringRef FWriteName = TLI->getName(LibFunc::fwrite);
Constant *F;
if (File->getType()->isPointerTy())
- F = M->getOrInsertFunction("fwrite", AttrListPtr::get(AWI, 3),
+ F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI, 3),
TD->getIntPtrType(Context),
B.getInt8PtrTy(),
TD->getIntPtrType(Context),
TD->getIntPtrType(Context),
File->getType(), NULL);
else
- F = M->getOrInsertFunction("fwrite", TD->getIntPtrType(Context),
+ F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(Context),
B.getInt8PtrTy(),
TD->getIntPtrType(Context),
TD->getIntPtrType(Context),
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index cf21f1e..20052a4 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -23,8 +23,11 @@
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/ADT/SmallVector.h"
#include <map>
@@ -60,7 +63,6 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
if (CodeInfo) {
CodeInfo->ContainsCalls |= hasCalls;
- CodeInfo->ContainsUnwinds |= isa<UnwindInst>(BB->getTerminator());
CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
CodeInfo->ContainsDynamicAllocas |= hasStaticAllocas &&
BB != &BB->getParent()->getEntryBlock();
@@ -75,7 +77,8 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
- const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
+ const char *NameSuffix, ClonedCodeInfo *CodeInfo,
+ ValueMapTypeRemapper *TypeMapper) {
assert(NameSuffix && "NameSuffix cannot be null!");
#ifndef NDEBUG
@@ -113,8 +116,23 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Create a new basic block and copy instructions into it!
BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo);
- VMap[&BB] = CBB; // Add basic block mapping.
+ // Add basic block mapping.
+ VMap[&BB] = CBB;
+
+ // It is only legal to clone a function if a block address within that
+ // function is never referenced outside of the function. Given that, we
+ // want to map block addresses from the old function to block addresses in
+ // the clone. (This is different from the generic ValueMapper
+ // implementation, which generates an invalid blockaddress when
+ // cloning a function.)
+ if (BB.hasAddressTaken()) {
+ Constant *OldBBAddr = BlockAddress::get(const_cast<Function*>(OldFunc),
+ const_cast<BasicBlock*>(&BB));
+ VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB);
+ }
+
+ // Note return instructions for the caller.
if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
Returns.push_back(RI);
}
@@ -126,7 +144,8 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Loop over all instructions, fixing each one as we find it...
for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
RemapInstruction(II, VMap,
- ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges,
+ TypeMapper);
}
/// CloneFunction - Return a copy of the specified function, but without
@@ -181,7 +200,6 @@ namespace {
const Function *OldFunc;
ValueToValueMapTy &VMap;
bool ModuleLevelChanges;
- SmallVectorImpl<ReturnInst*> &Returns;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
const TargetData *TD;
@@ -189,24 +207,18 @@ namespace {
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
ValueToValueMapTy &valueMap,
bool moduleLevelChanges,
- SmallVectorImpl<ReturnInst*> &returns,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
const TargetData *td)
: NewFunc(newFunc), OldFunc(oldFunc),
VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
- Returns(returns), NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
+ NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
}
/// CloneBlock - The specified block is found to be reachable, clone it and
/// anything that it can reach.
void CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone);
-
- public:
- /// ConstantFoldMappedInstruction - Constant fold the specified instruction,
- /// mapping its operands through VMap if they are available.
- Constant *ConstantFoldMappedInstruction(const Instruction *I);
};
}
@@ -214,7 +226,7 @@ namespace {
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
- TrackingVH<Value> &BBEntry = VMap[BB];
+ WeakVH &BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -224,25 +236,55 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
BBEntry = NewBB = BasicBlock::Create(BB->getContext());
if (BB->hasName()) NewBB->setName(BB->getName()+NameSuffix);
+ // It is only legal to clone a function if a block address within that
+ // function is never referenced outside of the function. Given that, we
+ // want to map block addresses from the old function to block addresses in
+ // the clone. (This is different from the generic ValueMapper
+ // implementation, which generates an invalid blockaddress when
+ // cloning a function.)
+ //
+ // Note that we don't need to fix the mapping for unreachable blocks;
+ // the default mapping there is safe.
+ if (BB->hasAddressTaken()) {
+ Constant *OldBBAddr = BlockAddress::get(const_cast<Function*>(OldFunc),
+ const_cast<BasicBlock*>(BB));
+ VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB);
+ }
+
+
bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false;
// Loop over all instructions, and copy them over, DCE'ing as we go. This
// loop doesn't include the terminator.
for (BasicBlock::const_iterator II = BB->begin(), IE = --BB->end();
II != IE; ++II) {
- // If this instruction constant folds, don't bother cloning the instruction,
- // instead, just add the constant to the value map.
- if (Constant *C = ConstantFoldMappedInstruction(II)) {
- VMap[II] = C;
- continue;
+ Instruction *NewInst = II->clone();
+
+ // Eagerly remap operands to the newly cloned instruction, except for PHI
+ // nodes for which we defer processing until we update the CFG.
+ if (!isa<PHINode>(NewInst)) {
+ RemapInstruction(NewInst, VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
+
+ // If we can simplify this instruction to some other value, simply add
+ // a mapping to that value rather than inserting a new instruction into
+ // the basic block.
+ if (Value *V = SimplifyInstruction(NewInst, TD)) {
+ // On the off-chance that this simplifies to an instruction in the old
+ // function, map it back into the new function.
+ if (Value *MappedV = VMap.lookup(V))
+ V = MappedV;
+
+ VMap[II] = V;
+ delete NewInst;
+ continue;
+ }
}
- Instruction *NewInst = II->clone();
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
- NewBB->getInstList().push_back(NewInst);
VMap[II] = NewInst; // Add instruction map to value.
-
+ NewBB->getInstList().push_back(NewInst);
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
if (isa<ConstantInt>(AI->getArraySize()))
@@ -281,7 +323,8 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
Cond = dyn_cast_or_null<ConstantInt>(V);
}
if (Cond) { // Constant fold to uncond branch!
- BasicBlock *Dest = SI->getSuccessor(SI->findCaseValue(Cond));
+ SwitchInst::ConstCaseIt Case = SI->findCaseValue(Cond);
+ BasicBlock *Dest = const_cast<BasicBlock*>(Case.getCaseSuccessor());
VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
@@ -303,38 +346,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (CodeInfo) {
CodeInfo->ContainsCalls |= hasCalls;
- CodeInfo->ContainsUnwinds |= isa<UnwindInst>(OldTI);
CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas;
CodeInfo->ContainsDynamicAllocas |= hasStaticAllocas &&
BB != &BB->getParent()->front();
}
-
- if (ReturnInst *RI = dyn_cast<ReturnInst>(NewBB->getTerminator()))
- Returns.push_back(RI);
-}
-
-/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
-/// mapping its operands through VMap if they are available.
-Constant *PruningFunctionCloner::
-ConstantFoldMappedInstruction(const Instruction *I) {
- SmallVector<Constant*, 8> Ops;
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- VMap,
- ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges)))
- Ops.push_back(Op);
- else
- return 0; // All operands not constant!
-
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
- TD);
-
- if (const LoadInst *LI = dyn_cast<LoadInst>(I))
- if (!LI->isVolatile())
- return ConstantFoldLoadFromConstPtr(Ops[0], TD);
-
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD);
}
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
@@ -361,7 +376,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#endif
PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
- Returns, NameSuffix, CodeInfo, TD);
+ NameSuffix, CodeInfo, TD);
// Clone the entry block, and anything recursively reachable from it.
std::vector<const BasicBlock*> CloneWorklist;
@@ -386,29 +401,19 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
-
- // Loop over all of the instructions in the block, fixing up operand
- // references as we go. This uses VMap to do all the hard work.
- //
- BasicBlock::iterator I = NewBB->begin();
-
- DebugLoc TheCallDL;
- if (TheCall)
- TheCallDL = TheCall->getDebugLoc();
-
+
// Handle PHI nodes specially, as we have to remove references to dead
// blocks.
- if (PHINode *PN = dyn_cast<PHINode>(I)) {
- // Skip over all PHI nodes, remembering them for later.
- BasicBlock::const_iterator OldI = BI->begin();
- for (; (PN = dyn_cast<PHINode>(I)); ++I, ++OldI)
- PHIToResolve.push_back(cast<PHINode>(OldI));
- }
-
- // Otherwise, remap the rest of the instructions normally.
- for (; I != NewBB->end(); ++I)
- RemapInstruction(I, VMap,
- ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
+ for (BasicBlock::const_iterator I = BI->begin(), E = BI->end(); I != E; ++I)
+ if (const PHINode *PN = dyn_cast<PHINode>(I))
+ PHIToResolve.push_back(PN);
+ else
+ break;
+
+ // Finally, remap the terminator instructions, as those can't be remapped
+ // until all BBs are mapped.
+ RemapInstruction(NewBB->getTerminator(), VMap,
+ ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges);
}
// Defer PHI resolution until rest of function is resolved, PHI resolution
@@ -490,31 +495,55 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
++OldI;
}
}
- // NOTE: We cannot eliminate single entry phi nodes here, because of
- // VMap. Single entry phi nodes can have multiple VMap entries
- // pointing at them. Thus, deleting one would require scanning the VMap
- // to update any entries in it that would require that. This would be
- // really slow.
}
-
+
+ // Make a second pass over the PHINodes now that all of them have been
+ // remapped into the new function, simplifying the PHINode and performing any
+ // recursive simplifications exposed. This will transparently update the
+ // WeakVH in the VMap. Notably, we rely on that so that if we coalesce
+ // two PHINodes, the iteration over the old PHIs remains valid, and the
+ // mapping will just map us to the new node (which may not even be a PHI
+ // node).
+ for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)
+ if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]]))
+ recursivelySimplifyInstruction(PN, TD);
+
// Now that the inlined function body has been fully constructed, go through
// and zap unconditional fall-through branches. This happen all the time when
// specializing code: code specialization turns conditional branches into
// uncond branches, and this code folds them.
- Function::iterator I = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]);
+ Function::iterator Begin = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]);
+ Function::iterator I = Begin;
while (I != NewFunc->end()) {
+ // Check if this block has become dead during inlining or other
+ // simplifications. Note that the first block will appear dead, as it has
+ // not yet been wired up properly.
+ if (I != Begin && (pred_begin(I) == pred_end(I) ||
+ I->getSinglePredecessor() == I)) {
+ BasicBlock *DeadBB = I++;
+ DeleteDeadBlock(DeadBB);
+ continue;
+ }
+
+ // We need to simplify conditional branches and switches with a constant
+ // operand. We try to prune these out when cloning, but if the
+ // simplification required looking through PHI nodes, those are only
+ // available after forming the full basic block. That may leave some here,
+ // and we still want to prune the dead code as early as possible.
+ ConstantFoldTerminator(I);
+
BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator());
if (!BI || BI->isConditional()) { ++I; continue; }
- // Note that we can't eliminate uncond branches if the destination has
- // single-entry PHI nodes. Eliminating the single-entry phi nodes would
- // require scanning the VMap to update any entries that point to the phi
- // node.
BasicBlock *Dest = BI->getSuccessor(0);
- if (!Dest->getSinglePredecessor() || isa<PHINode>(Dest->begin())) {
+ if (!Dest->getSinglePredecessor()) {
++I; continue;
}
-
+
+ // We shouldn't be able to get single-entry PHI nodes here, as instsimplify
+ // above should have zapped all of them..
+ assert(!isa<PHINode>(Dest->begin()));
+
// We know all single-entry PHI nodes in the inlined function have been
// removed, so we just need to splice the blocks.
BI->eraseFromParent();
@@ -530,4 +559,13 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Do not increment I, iteratively merge all things this block branches to.
}
+
+ // Make a final pass over the basic blocks from theh old function to gather
+ // any return instructions which survived folding. We have to do this here
+ // because we can iteratively remove and merge returns above.
+ for (Function::iterator I = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]),
+ E = NewFunc->end();
+ I != E; ++I)
+ if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator()))
+ Returns.push_back(RI);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
new file mode 100644
index 0000000..9b09915
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -0,0 +1,96 @@
+//===- CmpInstAnalysis.cpp - Utils to help fold compares ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse compare instructions
+// and fold them into constants or other compare instructions
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/CmpInstAnalysis.h"
+#include "llvm/Constants.h"
+#include "llvm/Instructions.h"
+
+using namespace llvm;
+
+/// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+/// are carefully arranged to allow folding of expressions such as:
+///
+/// (A < B) | (A > B) --> (A != B)
+///
+/// Note that this is only valid if the first and second predicates have the
+/// same sign. Is illegal to do: (A u< B) | (A s> B)
+///
+/// Three bits are used to represent the condition, as follows:
+/// 0 A > B
+/// 1 A == B
+/// 2 A < B
+///
+/// <=> Value Definition
+/// 000 0 Always false
+/// 001 1 A > B
+/// 010 2 A == B
+/// 011 3 A >= B
+/// 100 4 A < B
+/// 101 5 A != B
+/// 110 6 A <= B
+/// 111 7 Always true
+///
+unsigned llvm::getICmpCode(const ICmpInst *ICI, bool InvertPred) {
+ ICmpInst::Predicate Pred = InvertPred ? ICI->getInversePredicate()
+ : ICI->getPredicate();
+ switch (Pred) {
+ // False -> 0
+ case ICmpInst::ICMP_UGT: return 1; // 001
+ case ICmpInst::ICMP_SGT: return 1; // 001
+ case ICmpInst::ICMP_EQ: return 2; // 010
+ case ICmpInst::ICMP_UGE: return 3; // 011
+ case ICmpInst::ICMP_SGE: return 3; // 011
+ case ICmpInst::ICMP_ULT: return 4; // 100
+ case ICmpInst::ICMP_SLT: return 4; // 100
+ case ICmpInst::ICMP_NE: return 5; // 101
+ case ICmpInst::ICMP_ULE: return 6; // 110
+ case ICmpInst::ICMP_SLE: return 6; // 110
+ // True -> 7
+ default:
+ llvm_unreachable("Invalid ICmp predicate!");
+ }
+}
+
+/// getICmpValue - This is the complement of getICmpCode, which turns an
+/// opcode and two operands into either a constant true or false, or the
+/// predicate for a new ICmp instruction. The sign is passed in to determine
+/// which kind of predicate to use in the new icmp instruction.
+/// Non-NULL return value will be a true or false constant.
+/// NULL return means a new ICmp is needed. The predicate for which is
+/// output in NewICmpPred.
+Value *llvm::getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+ CmpInst::Predicate &NewICmpPred) {
+ switch (Code) {
+ default: llvm_unreachable("Illegal ICmp code!");
+ case 0: // False.
+ return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
+ case 1: NewICmpPred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
+ case 2: NewICmpPred = ICmpInst::ICMP_EQ; break;
+ case 3: NewICmpPred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
+ case 4: NewICmpPred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
+ case 5: NewICmpPred = ICmpInst::ICMP_NE; break;
+ case 6: NewICmpPred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
+ case 7: // True.
+ return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
+ }
+ return NULL;
+}
+
+/// PredicatesFoldable - Return true if both predicates match sign or if at
+/// least one of them is an equality comparison (which is signless).
+bool llvm::PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
+ return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
+ (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
+ (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 5f47ebb..e8c0b80 100644
--- a/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -615,9 +615,10 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
default:
// Otherwise, make the default destination of the switch instruction be one
// of the other successors.
- TheSwitch->setOperand(0, call);
- TheSwitch->setSuccessor(0, TheSwitch->getSuccessor(NumExitBlocks));
- TheSwitch->removeCase(NumExitBlocks); // Remove redundant case
+ TheSwitch->setCondition(call);
+ TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks));
+ // Remove redundant case
+ TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
break;
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index 8cc2649..99b5830 100644
--- a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -6,21 +6,12 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file provide the function DemoteRegToStack(). This function takes a
-// virtual register computed by an Instruction and replaces it with a slot in
-// the stack frame, allocated via alloca. It returns the pointer to the
-// AllocaInst inserted. After this function is called on an instruction, we are
-// guaranteed that the only user of the instruction is a store that is
-// immediately after it.
-//
-//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/Type.h"
-#include <map>
+#include "llvm/ADT/DenseMap.h"
using namespace llvm;
/// DemoteRegToStack - This function takes a virtual register computed by an
@@ -28,8 +19,7 @@ using namespace llvm;
/// alloca. This allows the CFG to be changed around without fear of
/// invalidating the SSA information for the value. It returns the pointer to
/// the alloca inserted to create a stack slot for I.
-///
-AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
+AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Instruction *AllocaPoint) {
if (I.use_empty()) {
I.eraseFromParent();
@@ -47,21 +37,20 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
F->getEntryBlock().begin());
}
- // Change all of the users of the instruction to read from the stack slot
- // instead.
+ // Change all of the users of the instruction to read from the stack slot.
while (!I.use_empty()) {
Instruction *U = cast<Instruction>(I.use_back());
if (PHINode *PN = dyn_cast<PHINode>(U)) {
// If this is a PHI node, we can't insert a load of the value before the
- // use. Instead, insert the load in the predecessor block corresponding
+ // use. Instead insert the load in the predecessor block corresponding
// to the incoming value.
//
// Note that if there are multiple edges from a basic block to this PHI
- // node that we cannot multiple loads. The problem is that the resultant
- // PHI node will have multiple values (from each load) coming in from the
- // same block, which is illegal SSA form. For this reason, we keep track
- // and reuse loads we insert.
- std::map<BasicBlock*, Value*> Loads;
+ // node that we cannot have multiple loads. The problem is that the
+ // resulting PHI node will have multiple values (from each load) coming in
+ // from the same block, which is illegal SSA form. For this reason, we
+ // keep track of and reuse loads we insert.
+ DenseMap<BasicBlock*, Value*> Loads;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == &I) {
Value *&V = Loads[PN->getIncomingBlock(i)];
@@ -81,9 +70,9 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
}
- // Insert stores of the computed value into the stack slot. We have to be
- // careful is I is an invoke instruction though, because we can't insert the
- // store AFTER the terminator instruction.
+ // Insert stores of the computed value into the stack slot. We have to be
+ // careful if I is an invoke instruction, because we can't insert the store
+ // AFTER the terminator instruction.
BasicBlock::iterator InsertPt;
if (!isa<TerminatorInst>(I)) {
InsertPt = &I;
@@ -97,18 +86,17 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
InsertPt = II.getNormalDest()->begin();
}
- for (; isa<PHINode>(InsertPt); ++InsertPt)
- /* empty */; // Don't insert before any PHI nodes.
- new StoreInst(&I, Slot, InsertPt);
+ for (; isa<PHINode>(InsertPt) || isa<LandingPadInst>(InsertPt); ++InsertPt)
+ /* empty */; // Don't insert before PHI nodes or landingpad instrs.
+ new StoreInst(&I, Slot, InsertPt);
return Slot;
}
-
-/// DemotePHIToStack - This function takes a virtual register computed by a phi
-/// node and replaces it with a slot in the stack frame, allocated via alloca.
-/// The phi node is deleted and it returns the pointer to the alloca inserted.
-AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
+/// DemotePHIToStack - This function takes a virtual register computed by a PHI
+/// node and replaces it with a slot in the stack frame allocated via alloca.
+/// The PHI node is deleted. It returns the pointer to the alloca inserted.
+AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
P->eraseFromParent();
return 0;
@@ -125,7 +113,7 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
F->getEntryBlock().begin());
}
- // Iterate over each operand, insert store in each predecessor.
+ // Iterate over each operand inserting a store in each predecessor.
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
assert(II->getParent() != P->getIncomingBlock(i) &&
@@ -135,12 +123,11 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
P->getIncomingBlock(i)->getTerminator());
}
- // Insert load in place of the phi and replace all uses.
+ // Insert a load in place of the PHI and replace all uses.
Value *V = new LoadInst(Slot, P->getName()+".reload", P);
P->replaceAllUsesWith(V);
- // Delete phi.
+ // Delete PHI.
P->eraseFromParent();
-
return Slot;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 5464dbc..d2b167a 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -10,13 +10,6 @@
// This file implements inlining of a function into a call site, resolving
// parameters and the return value as appropriate.
//
-// The code in this file for handling inlines through invoke
-// instructions preserves semantics only under some assumptions about
-// the behavior of unwinders which correspond to gcc-style libUnwind
-// exception personality functions. Eventually the IR will be
-// improved to make this unnecessary, but until then, this code is
-// marked [LIBUNWIND].
-//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/Cloning.h"
@@ -38,271 +31,52 @@
#include "llvm/Support/IRBuilder.h"
using namespace llvm;
-bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
- return InlineFunction(CallSite(CI), IFI);
-}
-bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
- return InlineFunction(CallSite(II), IFI);
-}
-
-// FIXME: New EH - Remove the functions marked [LIBUNWIND] when new EH is
-// turned on.
-
-/// [LIBUNWIND] Look for an llvm.eh.exception call in the given block.
-static EHExceptionInst *findExceptionInBlock(BasicBlock *bb) {
- for (BasicBlock::iterator i = bb->begin(), e = bb->end(); i != e; i++) {
- EHExceptionInst *exn = dyn_cast<EHExceptionInst>(i);
- if (exn) return exn;
- }
-
- return 0;
-}
-
-/// [LIBUNWIND] Look for the 'best' llvm.eh.selector instruction for
-/// the given llvm.eh.exception call.
-static EHSelectorInst *findSelectorForException(EHExceptionInst *exn) {
- BasicBlock *exnBlock = exn->getParent();
-
- EHSelectorInst *outOfBlockSelector = 0;
- for (Instruction::use_iterator
- ui = exn->use_begin(), ue = exn->use_end(); ui != ue; ++ui) {
- EHSelectorInst *sel = dyn_cast<EHSelectorInst>(*ui);
- if (!sel) continue;
-
- // Immediately accept an eh.selector in the same block as the
- // excepton call.
- if (sel->getParent() == exnBlock) return sel;
-
- // Otherwise, use the first selector we see.
- if (!outOfBlockSelector) outOfBlockSelector = sel;
- }
-
- return outOfBlockSelector;
+bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
+ return InlineFunction(CallSite(CI), IFI, InsertLifetime);
}
-
-/// [LIBUNWIND] Find the (possibly absent) call to @llvm.eh.selector
-/// in the given landing pad. In principle, llvm.eh.exception is
-/// required to be in the landing pad; in practice, SplitCriticalEdge
-/// can break that invariant, and then inlining can break it further.
-/// There's a real need for a reliable solution here, but until that
-/// happens, we have some fragile workarounds here.
-static EHSelectorInst *findSelectorForLandingPad(BasicBlock *lpad) {
- // Look for an exception call in the actual landing pad.
- EHExceptionInst *exn = findExceptionInBlock(lpad);
- if (exn) return findSelectorForException(exn);
-
- // Okay, if that failed, look for one in an obvious successor. If
- // we find one, we'll fix the IR by moving things back to the
- // landing pad.
-
- bool dominates = true; // does the lpad dominate the exn call
- BasicBlock *nonDominated = 0; // if not, the first non-dominated block
- BasicBlock *lastDominated = 0; // and the block which branched to it
-
- BasicBlock *exnBlock = lpad;
-
- // We need to protect against lpads that lead into infinite loops.
- SmallPtrSet<BasicBlock*,4> visited;
- visited.insert(exnBlock);
-
- do {
- // We're not going to apply this hack to anything more complicated
- // than a series of unconditional branches, so if the block
- // doesn't terminate in an unconditional branch, just fail. More
- // complicated cases can arise when, say, sinking a call into a
- // split unwind edge and then inlining it; but that can do almost
- // *anything* to the CFG, including leaving the selector
- // completely unreachable. The only way to fix that properly is
- // to (1) prohibit transforms which move the exception or selector
- // values away from the landing pad, e.g. by producing them with
- // instructions that are pinned to an edge like a phi, or
- // producing them with not-really-instructions, and (2) making
- // transforms which split edges deal with that.
- BranchInst *branch = dyn_cast<BranchInst>(&exnBlock->back());
- if (!branch || branch->isConditional()) return 0;
-
- BasicBlock *successor = branch->getSuccessor(0);
-
- // Fail if we found an infinite loop.
- if (!visited.insert(successor)) return 0;
-
- // If the successor isn't dominated by exnBlock:
- if (!successor->getSinglePredecessor()) {
- // We don't want to have to deal with threading the exception
- // through multiple levels of phi, so give up if we've already
- // followed a non-dominating edge.
- if (!dominates) return 0;
-
- // Otherwise, remember this as a non-dominating edge.
- dominates = false;
- nonDominated = successor;
- lastDominated = exnBlock;
- }
-
- exnBlock = successor;
-
- // Can we stop here?
- exn = findExceptionInBlock(exnBlock);
- } while (!exn);
-
- // Look for a selector call for the exception we found.
- EHSelectorInst *selector = findSelectorForException(exn);
- if (!selector) return 0;
-
- // The easy case is when the landing pad still dominates the
- // exception call, in which case we can just move both calls back to
- // the landing pad.
- if (dominates) {
- selector->moveBefore(lpad->getFirstNonPHI());
- exn->moveBefore(selector);
- return selector;
- }
-
- // Otherwise, we have to split at the first non-dominating block.
- // The CFG looks basically like this:
- // lpad:
- // phis_0
- // insnsAndBranches_1
- // br label %nonDominated
- // nonDominated:
- // phis_2
- // insns_3
- // %exn = call i8* @llvm.eh.exception()
- // insnsAndBranches_4
- // %selector = call @llvm.eh.selector(i8* %exn, ...
- // We need to turn this into:
- // lpad:
- // phis_0
- // %exn0 = call i8* @llvm.eh.exception()
- // %selector0 = call @llvm.eh.selector(i8* %exn0, ...
- // insnsAndBranches_1
- // br label %split // from lastDominated
- // nonDominated:
- // phis_2 (without edge from lastDominated)
- // %exn1 = call i8* @llvm.eh.exception()
- // %selector1 = call i8* @llvm.eh.selector(i8* %exn1, ...
- // br label %split
- // split:
- // phis_2 (edge from lastDominated, edge from split)
- // %exn = phi ...
- // %selector = phi ...
- // insns_3
- // insnsAndBranches_4
-
- assert(nonDominated);
- assert(lastDominated);
-
- // First, make clones of the intrinsics to go in lpad.
- EHExceptionInst *lpadExn = cast<EHExceptionInst>(exn->clone());
- EHSelectorInst *lpadSelector = cast<EHSelectorInst>(selector->clone());
- lpadSelector->setArgOperand(0, lpadExn);
- lpadSelector->insertBefore(lpad->getFirstNonPHI());
- lpadExn->insertBefore(lpadSelector);
-
- // Split the non-dominated block.
- BasicBlock *split =
- nonDominated->splitBasicBlock(nonDominated->getFirstNonPHI(),
- nonDominated->getName() + ".lpad-fix");
-
- // Redirect the last dominated branch there.
- cast<BranchInst>(lastDominated->back()).setSuccessor(0, split);
-
- // Move the existing intrinsics to the end of the old block.
- selector->moveBefore(&nonDominated->back());
- exn->moveBefore(selector);
-
- Instruction *splitIP = &split->front();
-
- // For all the phis in nonDominated, make a new phi in split to join
- // that phi with the edge from lastDominated.
- for (BasicBlock::iterator
- i = nonDominated->begin(), e = nonDominated->end(); i != e; ++i) {
- PHINode *phi = dyn_cast<PHINode>(i);
- if (!phi) break;
-
- PHINode *splitPhi = PHINode::Create(phi->getType(), 2, phi->getName(),
- splitIP);
- phi->replaceAllUsesWith(splitPhi);
- splitPhi->addIncoming(phi, nonDominated);
- splitPhi->addIncoming(phi->removeIncomingValue(lastDominated),
- lastDominated);
- }
-
- // Make new phis for the exception and selector.
- PHINode *exnPhi = PHINode::Create(exn->getType(), 2, "", splitIP);
- exn->replaceAllUsesWith(exnPhi);
- selector->setArgOperand(0, exn); // except for this use
- exnPhi->addIncoming(exn, nonDominated);
- exnPhi->addIncoming(lpadExn, lastDominated);
-
- PHINode *selectorPhi = PHINode::Create(selector->getType(), 2, "", splitIP);
- selector->replaceAllUsesWith(selectorPhi);
- selectorPhi->addIncoming(selector, nonDominated);
- selectorPhi->addIncoming(lpadSelector, lastDominated);
-
- return lpadSelector;
+bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
+ return InlineFunction(CallSite(II), IFI, InsertLifetime);
}
namespace {
/// A class for recording information about inlining through an invoke.
class InvokeInliningInfo {
- BasicBlock *OuterUnwindDest;
- EHSelectorInst *OuterSelector;
- BasicBlock *InnerUnwindDest;
- PHINode *InnerExceptionPHI;
- PHINode *InnerSelectorPHI;
- SmallVector<Value*, 8> UnwindDestPHIValues;
-
- // FIXME: New EH - These will replace the analogous ones above.
BasicBlock *OuterResumeDest; //< Destination of the invoke's unwind.
BasicBlock *InnerResumeDest; //< Destination for the callee's resume.
LandingPadInst *CallerLPad; //< LandingPadInst associated with the invoke.
PHINode *InnerEHValuesPHI; //< PHI for EH values from landingpad insts.
+ SmallVector<Value*, 8> UnwindDestPHIValues;
public:
InvokeInliningInfo(InvokeInst *II)
- : OuterUnwindDest(II->getUnwindDest()), OuterSelector(0),
- InnerUnwindDest(0), InnerExceptionPHI(0), InnerSelectorPHI(0),
- OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
+ : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
CallerLPad(0), InnerEHValuesPHI(0) {
// If there are PHI nodes in the unwind destination block, we need to keep
// track of which values came into them from the invoke before removing
// the edge from this block.
llvm::BasicBlock *InvokeBB = II->getParent();
- BasicBlock::iterator I = OuterUnwindDest->begin();
+ BasicBlock::iterator I = OuterResumeDest->begin();
for (; isa<PHINode>(I); ++I) {
// Save the value to use for this edge.
PHINode *PHI = cast<PHINode>(I);
UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
}
- // FIXME: With the new EH, this if/dyn_cast should be a 'cast'.
- if (LandingPadInst *LPI = dyn_cast<LandingPadInst>(I)) {
- CallerLPad = LPI;
- }
+ CallerLPad = cast<LandingPadInst>(I);
}
- /// The outer unwind destination is the target of unwind edges
- /// introduced for calls within the inlined function.
- BasicBlock *getOuterUnwindDest() const {
- return OuterUnwindDest;
+ /// getOuterResumeDest - The outer unwind destination is the target of
+ /// unwind edges introduced for calls within the inlined function.
+ BasicBlock *getOuterResumeDest() const {
+ return OuterResumeDest;
}
- EHSelectorInst *getOuterSelector() {
- if (!OuterSelector)
- OuterSelector = findSelectorForLandingPad(OuterUnwindDest);
- return OuterSelector;
- }
-
- BasicBlock *getInnerUnwindDest();
-
- // FIXME: New EH - Rename when new EH is turned on.
- BasicBlock *getInnerUnwindDestNewEH();
+ BasicBlock *getInnerResumeDest();
LandingPadInst *getLandingPadInst() const { return CallerLPad; }
- bool forwardEHResume(CallInst *call, BasicBlock *src);
-
/// forwardResume - Forward the 'resume' instruction to the caller's landing
/// pad block. When the landing pad block has only one predecessor, this is
/// a simple branch. When there is more than one predecessor, we need to
@@ -314,7 +88,7 @@ namespace {
/// destination block for the given basic block, using the values for the
/// original invoke's source block.
void addIncomingPHIValuesFor(BasicBlock *BB) const {
- addIncomingPHIValuesForInto(BB, OuterUnwindDest);
+ addIncomingPHIValuesForInto(BB, OuterResumeDest);
}
void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
@@ -327,113 +101,8 @@ namespace {
};
}
-/// [LIBUNWIND] Get or create a target for the branch out of rewritten calls to
-/// llvm.eh.resume.
-BasicBlock *InvokeInliningInfo::getInnerUnwindDest() {
- if (InnerUnwindDest) return InnerUnwindDest;
-
- // Find and hoist the llvm.eh.exception and llvm.eh.selector calls
- // in the outer landing pad to immediately following the phis.
- EHSelectorInst *selector = getOuterSelector();
- if (!selector) return 0;
-
- // The call to llvm.eh.exception *must* be in the landing pad.
- Instruction *exn = cast<Instruction>(selector->getArgOperand(0));
- assert(exn->getParent() == OuterUnwindDest);
-
- // TODO: recognize when we've already done this, so that we don't
- // get a linear number of these when inlining calls into lots of
- // invokes with the same landing pad.
-
- // Do the hoisting.
- Instruction *splitPoint = exn->getParent()->getFirstNonPHI();
- assert(splitPoint != selector && "selector-on-exception dominance broken!");
- if (splitPoint == exn) {
- selector->removeFromParent();
- selector->insertAfter(exn);
- splitPoint = selector->getNextNode();
- } else {
- exn->moveBefore(splitPoint);
- selector->moveBefore(splitPoint);
- }
-
- // Split the landing pad.
- InnerUnwindDest = OuterUnwindDest->splitBasicBlock(splitPoint,
- OuterUnwindDest->getName() + ".body");
-
- // The number of incoming edges we expect to the inner landing pad.
- const unsigned phiCapacity = 2;
-
- // Create corresponding new phis for all the phis in the outer landing pad.
- BasicBlock::iterator insertPoint = InnerUnwindDest->begin();
- BasicBlock::iterator I = OuterUnwindDest->begin();
- for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
- PHINode *outerPhi = cast<PHINode>(I);
- PHINode *innerPhi = PHINode::Create(outerPhi->getType(), phiCapacity,
- outerPhi->getName() + ".lpad-body",
- insertPoint);
- outerPhi->replaceAllUsesWith(innerPhi);
- innerPhi->addIncoming(outerPhi, OuterUnwindDest);
- }
-
- // Create a phi for the exception value...
- InnerExceptionPHI = PHINode::Create(exn->getType(), phiCapacity,
- "exn.lpad-body", insertPoint);
- exn->replaceAllUsesWith(InnerExceptionPHI);
- selector->setArgOperand(0, exn); // restore this use
- InnerExceptionPHI->addIncoming(exn, OuterUnwindDest);
-
- // ...and the selector.
- InnerSelectorPHI = PHINode::Create(selector->getType(), phiCapacity,
- "selector.lpad-body", insertPoint);
- selector->replaceAllUsesWith(InnerSelectorPHI);
- InnerSelectorPHI->addIncoming(selector, OuterUnwindDest);
-
- // All done.
- return InnerUnwindDest;
-}
-
-/// [LIBUNWIND] Try to forward the given call, which logically occurs
-/// at the end of the given block, as a branch to the inner unwind
-/// block. Returns true if the call was forwarded.
-bool InvokeInliningInfo::forwardEHResume(CallInst *call, BasicBlock *src) {
- // First, check whether this is a call to the intrinsic.
- Function *fn = dyn_cast<Function>(call->getCalledValue());
- if (!fn || fn->getName() != "llvm.eh.resume")
- return false;
-
- // At this point, we need to return true on all paths, because
- // otherwise we'll construct an invoke of the intrinsic, which is
- // not well-formed.
-
- // Try to find or make an inner unwind dest, which will fail if we
- // can't find a selector call for the outer unwind dest.
- BasicBlock *dest = getInnerUnwindDest();
- bool hasSelector = (dest != 0);
-
- // If we failed, just use the outer unwind dest, dropping the
- // exception and selector on the floor.
- if (!hasSelector)
- dest = OuterUnwindDest;
-
- // Make a branch.
- BranchInst::Create(dest, src);
-
- // Update the phis in the destination. They were inserted in an
- // order which makes this work.
- addIncomingPHIValuesForInto(src, dest);
-
- if (hasSelector) {
- InnerExceptionPHI->addIncoming(call->getArgOperand(0), src);
- InnerSelectorPHI->addIncoming(call->getArgOperand(1), src);
- }
-
- return true;
-}
-
-/// Get or create a target for the branch from ResumeInsts.
-BasicBlock *InvokeInliningInfo::getInnerUnwindDestNewEH() {
- // FIXME: New EH - rename this function when new EH is turned on.
+/// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
+BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
if (InnerResumeDest) return InnerResumeDest;
// Split the landing pad.
@@ -472,7 +141,7 @@ BasicBlock *InvokeInliningInfo::getInnerUnwindDestNewEH() {
/// branch. When there is more than one predecessor, we need to split the
/// landing pad block after the landingpad instruction and jump to there.
void InvokeInliningInfo::forwardResume(ResumeInst *RI) {
- BasicBlock *Dest = getInnerUnwindDestNewEH();
+ BasicBlock *Dest = getInnerResumeDest();
BasicBlock *Src = RI->getParent();
BranchInst::Create(Dest, Src);
@@ -485,14 +154,6 @@ void InvokeInliningInfo::forwardResume(ResumeInst *RI) {
RI->eraseFromParent();
}
-/// [LIBUNWIND] Check whether this selector is "only cleanups":
-/// call i32 @llvm.eh.selector(blah, blah, i32 0)
-static bool isCleanupOnlySelector(EHSelectorInst *selector) {
- if (selector->getNumArgOperands() != 3) return false;
- ConstantInt *val = dyn_cast<ConstantInt>(selector->getArgOperand(2));
- return (val && val->isZero());
-}
-
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes. This function analyze BB to see if there are any calls, and if so,
@@ -507,77 +168,34 @@ static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *I = BBI++;
- if (LPI) // FIXME: New EH - This won't be NULL in the new EH.
- if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
- unsigned NumClauses = LPI->getNumClauses();
- L->reserveClauses(NumClauses);
- for (unsigned i = 0; i != NumClauses; ++i)
- L->addClause(LPI->getClause(i));
- }
+ if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
+ unsigned NumClauses = LPI->getNumClauses();
+ L->reserveClauses(NumClauses);
+ for (unsigned i = 0; i != NumClauses; ++i)
+ L->addClause(LPI->getClause(i));
+ }
// We only need to check for function calls: inlined invoke
// instructions require no special handling.
CallInst *CI = dyn_cast<CallInst>(I);
- if (CI == 0) continue;
-
- // LIBUNWIND: merge selector instructions.
- if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) {
- EHSelectorInst *Outer = Invoke.getOuterSelector();
- if (!Outer) continue;
-
- bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner);
- bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer);
-
- // If both selectors contain only cleanups, we don't need to do
- // anything. TODO: this is really just a very specific instance
- // of a much more general optimization.
- if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue;
-
- // Otherwise, we just append the outer selector to the inner selector.
- SmallVector<Value*, 16> NewSelector;
- for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i)
- NewSelector.push_back(Inner->getArgOperand(i));
- for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i)
- NewSelector.push_back(Outer->getArgOperand(i));
-
- CallInst *NewInner =
- IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector);
- // No need to copy attributes, calling convention, etc.
- NewInner->takeName(Inner);
- Inner->replaceAllUsesWith(NewInner);
- Inner->eraseFromParent();
- continue;
- }
-
+
// If this call cannot unwind, don't convert it to an invoke.
- if (CI->doesNotThrow())
+ if (!CI || CI->doesNotThrow())
continue;
-
- // Convert this function call into an invoke instruction.
- // First, split the basic block.
+
+ // Convert this function call into an invoke instruction. First, split the
+ // basic block.
BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
// Delete the unconditional branch inserted by splitBasicBlock
BB->getInstList().pop_back();
- // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch
- // directly to the new landing pad.
- if (Invoke.forwardEHResume(CI, BB)) {
- // TODO: 'Split' is now unreachable; clean it up.
-
- // We want to leave the original call intact so that the call
- // graph and other structures won't get misled. We also have to
- // avoid processing the next block, or we'll iterate here forever.
- return true;
- }
-
- // Otherwise, create the new invoke instruction.
+ // Create the new invoke instruction.
ImmutableCallSite CS(CI);
SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
- InvokeInst *II =
- InvokeInst::Create(CI->getCalledValue(), Split,
- Invoke.getOuterUnwindDest(),
- InvokeArgs, CI->getName(), BB);
+ InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
+ Invoke.getOuterResumeDest(),
+ InvokeArgs, CI->getName(), BB);
II->setCallingConv(CI->getCallingConv());
II->setAttributes(CI->getAttributes());
@@ -585,21 +203,20 @@ static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// updates the CallGraph if present, because it uses a WeakVH.
CI->replaceAllUsesWith(II);
- Split->getInstList().pop_front(); // Delete the original call
+ // Delete the original call
+ Split->getInstList().pop_front();
- // Update any PHI nodes in the exceptional block to indicate that
- // there is now a new entry in them.
+ // Update any PHI nodes in the exceptional block to indicate that there is
+ // now a new entry in them.
Invoke.addIncomingPHIValuesFor(BB);
return false;
}
return false;
}
-
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
-/// in the body of the inlined function into invokes and turn unwind
-/// instructions into branches to the invoke unwind dest.
+/// in the body of the inlined function into invokes.
///
/// II is the invoke instruction being inlined. FirstNewBlock is the first
/// block of the inlined code (the last block is the end of the function),
@@ -614,7 +231,7 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
// start of the inlined code to its end, checking for stuff we need to
// rewrite. If the code doesn't have calls or unwinds, we know there is
// nothing to rewrite.
- if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
+ if (!InlinedCodeInfo.ContainsCalls) {
// Now that everything is happy, we have one final detail. The PHI nodes in
// the exception destination block still have entries due to the original
// invoke instruction. Eliminate these entries (which might even delete the
@@ -628,30 +245,13 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
if (InlinedCodeInfo.ContainsCalls)
if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
- // Honor a request to skip the next block. We don't need to
- // consider UnwindInsts in this case either.
+ // Honor a request to skip the next block.
++BB;
continue;
}
- if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // An UnwindInst requires special handling when it gets inlined into an
- // invoke site. Once this happens, we know that the unwind would cause
- // a control transfer to the invoke exception destination, so we can
- // transform it into a direct branch to the exception destination.
- BranchInst::Create(InvokeDest, UI);
-
- // Delete the unwind instruction!
- UI->eraseFromParent();
-
- // Update any PHI nodes in the exceptional block to indicate that
- // there is now a new entry in them.
- Invoke.addIncomingPHIValuesFor(BB);
- }
-
- if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) {
+ if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
Invoke.forwardResume(RI);
- }
}
// Now that everything is happy, we have one final detail. The PHI nodes in
@@ -836,8 +436,8 @@ static bool hasLifetimeMarkers(AllocaInst *AI) {
return false;
}
-/// updateInlinedAtInfo - Helper function used by fixupLineNumbers to recursively
-/// update InlinedAtEntry of a DebugLoc.
+/// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
+/// recursively update InlinedAtEntry of a DebugLoc.
static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
const DebugLoc &InlinedAtDL,
LLVMContext &Ctx) {
@@ -847,16 +447,15 @@ static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
NewInlinedAtDL.getAsMDNode(Ctx));
}
-
+
return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
InlinedAtDL.getAsMDNode(Ctx));
}
-
/// fixupLineNumbers - Update inlined instructions' line numbers to
/// to encode location where these instructions are inlined.
static void fixupLineNumbers(Function *Fn, Function::iterator FI,
- Instruction *TheCall) {
+ Instruction *TheCall) {
DebugLoc TheCallDL = TheCall->getDebugLoc();
if (TheCallDL.isUnknown())
return;
@@ -878,18 +477,18 @@ static void fixupLineNumbers(Function *Fn, Function::iterator FI,
}
}
-// InlineFunction - This function inlines the called function into the basic
-// block of the caller. This returns false if it is not possible to inline this
-// call. The program is still in a well defined state if this occurs though.
-//
-// Note that this only does one level of inlining. For example, if the
-// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
-// exists in the instruction stream. Similarly this will inline a recursive
-// function by one level.
-//
-bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller. This returns false if it is not possible to inline
+/// this call. The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
Instruction *TheCall = CS.getInstruction();
- LLVMContext &Context = TheCall->getContext();
assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
"Instruction not in function!");
@@ -924,43 +523,40 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
return false;
}
- // Find the personality function used by the landing pads of the caller. If it
- // exists, then check to see that it matches the personality function used in
- // the callee.
- for (Function::const_iterator
- I = Caller->begin(), E = Caller->end(); I != E; ++I)
+ // Get the personality function from the callee if it contains a landing pad.
+ Value *CalleePersonality = 0;
+ for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
+ I != E; ++I)
if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
const BasicBlock *BB = II->getUnwindDest();
- // FIXME: This 'isa' here should become go away once the new EH system is
- // in place.
- if (!isa<LandingPadInst>(BB->getFirstNonPHI()))
- continue;
- const LandingPadInst *LP = cast<LandingPadInst>(BB->getFirstNonPHI());
- const Value *CallerPersFn = LP->getPersonalityFn();
-
- // If the personality functions match, then we can perform the
- // inlining. Otherwise, we can't inline.
- // TODO: This isn't 100% true. Some personality functions are proper
- // supersets of others and can be used in place of the other.
- for (Function::const_iterator
- I = CalledFunc->begin(), E = CalledFunc->end(); I != E; ++I)
- if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
- const BasicBlock *BB = II->getUnwindDest();
- // FIXME: This 'if/dyn_cast' here should become a normal 'cast' once
- // the new EH system is in place.
- if (const LandingPadInst *LP =
- dyn_cast<LandingPadInst>(BB->getFirstNonPHI()))
- if (CallerPersFn != LP->getPersonalityFn())
- return false;
- break;
- }
-
+ const LandingPadInst *LP = BB->getLandingPadInst();
+ CalleePersonality = LP->getPersonalityFn();
break;
}
+ // Find the personality function used by the landing pads of the caller. If it
+ // exists, then check to see that it matches the personality function used in
+ // the callee.
+ if (CalleePersonality) {
+ for (Function::const_iterator I = Caller->begin(), E = Caller->end();
+ I != E; ++I)
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
+ const BasicBlock *BB = II->getUnwindDest();
+ const LandingPadInst *LP = BB->getLandingPadInst();
+
+ // If the personality functions match, then we can perform the
+ // inlining. Otherwise, we can't inline.
+ // TODO: This isn't 100% true. Some personality functions are proper
+ // supersets of others and can be used in place of the other.
+ if (LP->getPersonalityFn() != CalleePersonality)
+ return false;
+
+ break;
+ }
+ }
+
// Get an iterator to the last basic block in the function, which will have
// the new function inlined after it.
- //
Function::iterator LastBlock = &Caller->back();
// Make sure to capture all of the return instructions from the cloned
@@ -987,7 +583,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// by them explicit. However, we don't do this if the callee is readonly
// or readnone, because the copy would be unneeded: the callee doesn't
// modify the struct.
- if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal)) {
+ if (CS.isByValArgument(ArgNo)) {
ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
CalledFunc->getParamAlignment(ArgNo+1));
@@ -1023,7 +619,6 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// block for the callee, move them to the entry block of the caller. First
// calculate which instruction they should be inserted before. We insert the
// instructions at the end of the current alloca list.
- //
{
BasicBlock::iterator InsertPoint = Caller->begin()->begin();
for (BasicBlock::iterator I = FirstNewBlock->begin(),
@@ -1063,7 +658,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Leave lifetime markers for the static alloca's, scoping them to the
// function we just inlined.
- if (!IFI.StaticAllocas.empty()) {
+ if (InsertLifetime && !IFI.StaticAllocas.empty()) {
IRBuilder<> builder(FirstNewBlock->begin());
for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
AllocaInst *AI = IFI.StaticAllocas[ai];
@@ -1098,20 +693,6 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
}
-
- // Count the number of StackRestore calls we insert.
- unsigned NumStackRestores = Returns.size();
-
- // If we are inlining an invoke instruction, insert restores before each
- // unwind. These unwinds will be rewritten into branches later.
- if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB)
- if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- IRBuilder<>(UI).CreateCall(StackRestore, SavedPtr);
- ++NumStackRestores;
- }
- }
}
// If we are inlining tail call instruction through a call site that isn't
@@ -1131,21 +712,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
}
}
- // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
- // instructions are unreachable.
- if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB) {
- TerminatorInst *Term = BB->getTerminator();
- if (isa<UnwindInst>(Term)) {
- new UnreachableInst(Context, Term);
- BB->getInstList().erase(Term);
- }
- }
-
// If we are inlining for an invoke instruction, we must make sure to rewrite
- // any inlined 'unwind' instructions into branches to the invoke exception
- // destination, and call instructions into invoke instructions.
+ // any call instructions into invoke instructions.
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
@@ -1308,11 +876,12 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// If we inserted a phi node, check to see if it has a single value (e.g. all
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
- if (PHI)
+ if (PHI) {
if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
}
+ }
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index 7034feb..d1c4d59 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -28,6 +28,7 @@
#include "llvm/Analysis/DIBuilder.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ProfileInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
@@ -105,33 +106,32 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
// If we are switching on a constant, we can convert the switch into a
// single branch instruction!
ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
- BasicBlock *TheOnlyDest = SI->getSuccessor(0); // The default dest
+ BasicBlock *TheOnlyDest = SI->getDefaultDest();
BasicBlock *DefaultDest = TheOnlyDest;
- assert(TheOnlyDest == SI->getDefaultDest() &&
- "Default destination is not successor #0?");
// Figure out which case it goes to.
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) {
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
// Found case matching a constant operand?
- if (SI->getSuccessorValue(i) == CI) {
- TheOnlyDest = SI->getSuccessor(i);
+ if (i.getCaseValue() == CI) {
+ TheOnlyDest = i.getCaseSuccessor();
break;
}
// Check to see if this branch is going to the same place as the default
// dest. If so, eliminate it as an explicit compare.
- if (SI->getSuccessor(i) == DefaultDest) {
+ if (i.getCaseSuccessor() == DefaultDest) {
// Remove this entry.
DefaultDest->removePredecessor(SI->getParent());
SI->removeCase(i);
- --i; --e; // Don't skip an entry...
+ --i; --e;
continue;
}
// Otherwise, check to see if the switch only branches to one destination.
// We do this by reseting "TheOnlyDest" to null when we find two non-equal
// destinations.
- if (SI->getSuccessor(i) != TheOnlyDest) TheOnlyDest = 0;
+ if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = 0;
}
if (CI && !TheOnlyDest) {
@@ -165,14 +165,16 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
return true;
}
- if (SI->getNumSuccessors() == 2) {
+ if (SI->getNumCases() == 1) {
// Otherwise, we can fold this switch into a conditional branch
// instruction if it has only one non-default destination.
+ SwitchInst::CaseIt FirstCase = SI->case_begin();
Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
- SI->getSuccessorValue(1), "cond");
+ FirstCase.getCaseValue(), "cond");
// Insert the new branch.
- Builder.CreateCondBr(Cond, SI->getSuccessor(1), SI->getSuccessor(0));
+ Builder.CreateCondBr(Cond, FirstCase.getCaseSuccessor(),
+ SI->getDefaultDest());
// Delete the old switch.
SI->eraseFromParent();
@@ -257,6 +259,13 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
II->getIntrinsicID() == Intrinsic::lifetime_end)
return isa<UndefValue>(II->getArgOperand(1));
}
+
+ if (extractMallocCall(I)) return true;
+
+ if (CallInst *CI = isFreeCall(I))
+ if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
+ return C->isNullValue() || isa<UndefValue>(C);
+
return false;
}
@@ -346,22 +355,27 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
/// instructions in other blocks as well in this block.
bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
bool MadeChange = false;
- for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) {
+
+#ifndef NDEBUG
+ // In debug builds, ensure that the terminator of the block is never replaced
+ // or deleted by these simplifications. The idea of simplification is that it
+ // cannot introduce new instructions, and there is no way to replace the
+ // terminator of a block without introducing a new instruction.
+ AssertingVH<Instruction> TerminatorVH(--BB->end());
+#endif
+
+ for (BasicBlock::iterator BI = BB->begin(), E = --BB->end(); BI != E; ) {
+ assert(!BI->isTerminator());
Instruction *Inst = BI++;
-
- if (Value *V = SimplifyInstruction(Inst, TD)) {
- WeakVH BIHandle(BI);
- ReplaceAndSimplifyAllUses(Inst, V, TD);
+
+ WeakVH BIHandle(BI);
+ if (recursivelySimplifyInstruction(Inst, TD)) {
MadeChange = true;
if (BIHandle != BI)
BI = BB->begin();
continue;
}
- if (Inst->isTerminator())
- break;
-
- WeakVH BIHandle(BI);
MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst);
if (BIHandle != BI)
BI = BB->begin();
@@ -399,17 +413,11 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
WeakVH PhiIt = &BB->front();
while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
+ Value *OldPhiIt = PhiIt;
- Value *PNV = SimplifyInstruction(PN, TD);
- if (PNV == 0) continue;
+ if (!recursivelySimplifyInstruction(PN, TD))
+ continue;
- // If we're able to simplify the phi to a single value, substitute the new
- // value into all of its uses.
- assert(PNV != PN && "SimplifyInstruction broken!");
-
- Value *OldPhiIt = PhiIt;
- ReplaceAndSimplifyAllUses(PN, PNV, TD);
-
// If recursive simplification ended up deleting the next PHI node we would
// iterate to, then our iterator is invalid, restart scanning from the top
// of the block.
@@ -486,22 +494,8 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
if (Succ->getSinglePredecessor()) return true;
// Make a list of the predecessors of BB
- typedef SmallPtrSet<BasicBlock*, 16> BlockSet;
- BlockSet BBPreds(pred_begin(BB), pred_end(BB));
-
- // Use that list to make another list of common predecessors of BB and Succ
- BlockSet CommonPreds;
- for (pred_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
- PI != PE; ++PI) {
- BasicBlock *P = *PI;
- if (BBPreds.count(P))
- CommonPreds.insert(P);
- }
+ SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
- // Shortcut, if there are no common predecessors, merging is always safe
- if (CommonPreds.empty())
- return true;
-
// Look at all the phi nodes in Succ, to see if they present a conflict when
// merging these blocks
for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
@@ -512,28 +506,28 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
// merge the phi nodes and then the blocks can still be merged
PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
if (BBPN && BBPN->getParent() == BB) {
- for (BlockSet::iterator PI = CommonPreds.begin(), PE = CommonPreds.end();
- PI != PE; PI++) {
- if (BBPN->getIncomingValueForBlock(*PI)
- != PN->getIncomingValueForBlock(*PI)) {
+ for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
+ BasicBlock *IBB = PN->getIncomingBlock(PI);
+ if (BBPreds.count(IBB) &&
+ BBPN->getIncomingValueForBlock(IBB) != PN->getIncomingValue(PI)) {
DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
<< Succ->getName() << " is conflicting with "
<< BBPN->getName() << " with regard to common predecessor "
- << (*PI)->getName() << "\n");
+ << IBB->getName() << "\n");
return false;
}
}
} else {
Value* Val = PN->getIncomingValueForBlock(BB);
- for (BlockSet::iterator PI = CommonPreds.begin(), PE = CommonPreds.end();
- PI != PE; PI++) {
+ for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
// See if the incoming value for the common predecessor is equal to the
// one for BB, in which case this phi node will not prevent the merging
// of the block.
- if (Val != PN->getIncomingValueForBlock(*PI)) {
+ BasicBlock *IBB = PN->getIncomingBlock(PI);
+ if (BBPreds.count(IBB) && Val != PN->getIncomingValue(PI)) {
DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
<< Succ->getName() << " is conflicting with regard to common "
- << "predecessor " << (*PI)->getName() << "\n");
+ << "predecessor " << IBB->getName() << "\n");
return false;
}
}
@@ -740,6 +734,10 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Align,
// If there is a large requested alignment and we can, bump up the alignment
// of the global.
if (GV->isDeclaration()) return Align;
+ // If the memory we set aside for the global may not be the memory used by
+ // the final program then it is impossible for us to reliably enforce the
+ // preferred alignment.
+ if (GV->isWeakForLinker()) return Align;
if (GV->getAlignment() >= PrefAlign)
return GV->getAlignment();
@@ -764,9 +762,8 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!");
unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
- APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
+ ComputeMaskedBits(V, KnownZero, KnownOne, TD);
unsigned TrailZ = KnownZero.countTrailingOnes();
// Avoid trouble with rediculously large TrailZ values, such as
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index cbd54a8..0bc185d 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -99,7 +99,8 @@ namespace {
bool ProcessLoop(Loop *L, LPPassManager &LPM);
BasicBlock *RewriteLoopExitBlock(Loop *L, BasicBlock *Exit);
BasicBlock *InsertPreheaderForLoop(Loop *L);
- Loop *SeparateNestedLoop(Loop *L, LPPassManager &LPM);
+ Loop *SeparateNestedLoop(Loop *L, LPPassManager &LPM,
+ BasicBlock *Preheader);
BasicBlock *InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader);
void PlaceSplitBlockCarefully(BasicBlock *NewBB,
SmallVectorImpl<BasicBlock*> &SplitPreds,
@@ -240,7 +241,7 @@ ReprocessLoop:
// this for loops with a giant number of backedges, just factor them into a
// common backedge instead.
if (L->getNumBackEdges() < 8) {
- if (SeparateNestedLoop(L, LPM)) {
+ if (SeparateNestedLoop(L, LPM, Preheader)) {
++NumNested;
// This is a big restructuring change, reprocess the whole loop.
Changed = true;
@@ -265,7 +266,7 @@ ReprocessLoop:
PHINode *PN;
for (BasicBlock::iterator I = L->getHeader()->begin();
(PN = dyn_cast<PHINode>(I++)); )
- if (Value *V = SimplifyInstruction(PN, 0, DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
if (AA) AA->deleteValue(PN);
if (SE) SE->forgetValue(PN);
PN->replaceAllUsesWith(V);
@@ -379,19 +380,27 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
}
// Split out the loop pre-header.
- BasicBlock *NewBB =
- SplitBlockPredecessors(Header, &OutsideBlocks[0], OutsideBlocks.size(),
- ".preheader", this);
+ BasicBlock *PreheaderBB;
+ if (!Header->isLandingPad()) {
+ PreheaderBB = SplitBlockPredecessors(Header, OutsideBlocks, ".preheader",
+ this);
+ } else {
+ SmallVector<BasicBlock*, 2> NewBBs;
+ SplitLandingPadPredecessors(Header, OutsideBlocks, ".preheader",
+ ".split-lp", this, NewBBs);
+ PreheaderBB = NewBBs[0];
+ }
- NewBB->getTerminator()->setDebugLoc(Header->getFirstNonPHI()->getDebugLoc());
- DEBUG(dbgs() << "LoopSimplify: Creating pre-header " << NewBB->getName()
- << "\n");
+ PreheaderBB->getTerminator()->setDebugLoc(
+ Header->getFirstNonPHI()->getDebugLoc());
+ DEBUG(dbgs() << "LoopSimplify: Creating pre-header "
+ << PreheaderBB->getName() << "\n");
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
- PlaceSplitBlockCarefully(NewBB, OutsideBlocks, L);
+ PlaceSplitBlockCarefully(PreheaderBB, OutsideBlocks, L);
- return NewBB;
+ return PreheaderBB;
}
/// RewriteLoopExitBlock - Ensure that the loop preheader dominates all exit
@@ -420,9 +429,7 @@ BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
this, NewBBs);
NewExitBB = NewBBs[0];
} else {
- NewExitBB = SplitBlockPredecessors(Exit, &LoopBlocks[0],
- LoopBlocks.size(), ".loopexit",
- this);
+ NewExitBB = SplitBlockPredecessors(Exit, LoopBlocks, ".loopexit", this);
}
DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
@@ -456,7 +463,7 @@ static PHINode *FindPHIToPartitionLoops(Loop *L, DominatorTree *DT,
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I);
++I;
- if (Value *V = SimplifyInstruction(PN, 0, DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
// This is a degenerate PHI already, don't modify it!
PN->replaceAllUsesWith(V);
if (AA) AA->deleteValue(PN);
@@ -529,7 +536,16 @@ void LoopSimplify::PlaceSplitBlockCarefully(BasicBlock *NewBB,
/// If we are able to separate out a loop, return the new outer loop that was
/// created.
///
-Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
+Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM,
+ BasicBlock *Preheader) {
+ // Don't try to separate loops without a preheader.
+ if (!Preheader)
+ return 0;
+
+ // The header is not a landing pad; preheader insertion should ensure this.
+ assert(!L->getHeader()->isLandingPad() &&
+ "Can't insert backedge to landing pad");
+
PHINode *PN = FindPHIToPartitionLoops(L, DT, AA, LI);
if (PN == 0) return 0; // No known way to partition.
@@ -537,16 +553,15 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
// handles the case when a PHI node has multiple instances of itself as
// arguments.
SmallVector<BasicBlock*, 8> OuterLoopPreds;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
if (PN->getIncomingValue(i) != PN ||
!L->contains(PN->getIncomingBlock(i))) {
// We can't split indirectbr edges.
if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator()))
return 0;
-
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
}
-
+ }
DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n");
// If ScalarEvolution is around and knows anything about values in
@@ -556,9 +571,8 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
SE->forgetLoop(L);
BasicBlock *Header = L->getHeader();
- BasicBlock *NewBB = SplitBlockPredecessors(Header, &OuterLoopPreds[0],
- OuterLoopPreds.size(),
- ".outer", this);
+ BasicBlock *NewBB =
+ SplitBlockPredecessors(Header, OuterLoopPreds, ".outer", this);
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
// code layout too horribly.
@@ -640,6 +654,9 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
if (!Preheader)
return 0;
+ // The header is not a landing pad; preheader insertion should ensure this.
+ assert(!Header->isLandingPad() && "Can't insert backedge to landing pad");
+
// Figure out which basic blocks contain back-edges to the loop header.
std::vector<BasicBlock*> BackedgeBlocks;
for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 62e4fa2..e15497a 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -135,7 +135,8 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
/// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are
/// available it must also preserve those analyses.
bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
- unsigned TripMultiple, LoopInfo *LI, LPPassManager *LPM) {
+ bool AllowRuntime, unsigned TripMultiple,
+ LoopInfo *LI, LPPassManager *LPM) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
@@ -148,6 +149,12 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
return false;
}
+ // Loops with indirectbr cannot be cloned.
+ if (!L->isSafeToClone()) {
+ DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
+ return false;
+ }
+
BasicBlock *Header = L->getHeader();
BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
@@ -165,12 +172,6 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
return false;
}
- // Notify ScalarEvolution that the loop will be substantially changed,
- // if not outright eliminated.
- ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
- if (SE)
- SE->forgetLoop(L);
-
if (TripCount != 0)
DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
if (TripMultiple != 1)
@@ -181,6 +182,11 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
if (TripCount != 0 && Count > TripCount)
Count = TripCount;
+ // Don't enter the unroll code if there is nothing to do. This way we don't
+ // need to support "partial unrolling by 1".
+ if (TripCount == 0 && Count < 2)
+ return false;
+
assert(Count > 0);
assert(TripMultiple > 0);
assert(TripCount == 0 || TripCount % TripMultiple == 0);
@@ -188,6 +194,20 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
// Are we eliminating the loop control altogether?
bool CompletelyUnroll = Count == TripCount;
+ // We assume a run-time trip count if the compiler cannot
+ // figure out the loop trip count and the unroll-runtime
+ // flag is specified.
+ bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
+
+ if (RuntimeTripCount && !UnrollRuntimeLoopProlog(L, Count, LI, LPM))
+ return false;
+
+ // Notify ScalarEvolution that the loop will be substantially changed,
+ // if not outright eliminated.
+ ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ if (SE)
+ SE->forgetLoop(L);
+
// If we know the trip count, we know the multiple...
unsigned BreakoutTrip = 0;
if (TripCount != 0) {
@@ -209,6 +229,8 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
} else if (TripMultiple != 1) {
DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
+ } else if (RuntimeTripCount) {
+ DEBUG(dbgs() << " with run-time trip count");
}
DEBUG(dbgs() << "!\n");
}
@@ -332,6 +354,10 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
BasicBlock *Dest = Headers[j];
bool NeedConditional = true;
+ if (RuntimeTripCount && j != 0) {
+ NeedConditional = false;
+ }
+
// For a complete unroll, make the last iteration end with a branch
// to the exit block.
if (CompletelyUnroll && j == 0) {
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
new file mode 100644
index 0000000..3aa6bef
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -0,0 +1,372 @@
+//===-- UnrollLoopRuntime.cpp - Runtime Loop unrolling utilities ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some loop unrolling utilities for loops with run-time
+// trip counts. See LoopUnroll.cpp for unrolling loops with compile-time
+// trip counts.
+//
+// The functions in this file are used to generate extra code when the
+// run-time trip count modulo the unroll factor is not 0. When this is the
+// case, we need to generate code to execute these 'left over' iterations.
+//
+// The current strategy generates an if-then-else sequence prior to the
+// unrolled loop to execute the 'left over' iterations. Other strategies
+// include generate a loop before or after the unrolled loop.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loop-unroll"
+#include "llvm/Transforms/Utils/UnrollLoop.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/LoopIterator.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <algorithm>
+
+using namespace llvm;
+
+STATISTIC(NumRuntimeUnrolled,
+ "Number of loops unrolled with run-time trip counts");
+
+/// Connect the unrolling prolog code to the original loop.
+/// The unrolling prolog code contains code to execute the
+/// 'extra' iterations if the run-time trip count modulo the
+/// unroll count is non-zero.
+///
+/// This function performs the following:
+/// - Create PHI nodes at prolog end block to combine values
+/// that exit the prolog code and jump around the prolog.
+/// - Add a PHI operand to a PHI node at the loop exit block
+/// for values that exit the prolog and go around the loop.
+/// - Branch around the original loop if the trip count is less
+/// than the unroll factor.
+///
+static void ConnectProlog(Loop *L, Value *TripCount, unsigned Count,
+ BasicBlock *LastPrologBB, BasicBlock *PrologEnd,
+ BasicBlock *OrigPH, BasicBlock *NewPH,
+ ValueToValueMapTy &LVMap, Pass *P) {
+ BasicBlock *Latch = L->getLoopLatch();
+ assert(Latch != 0 && "Loop must have a latch");
+
+ // Create a PHI node for each outgoing value from the original loop
+ // (which means it is an outgoing value from the prolog code too).
+ // The new PHI node is inserted in the prolog end basic block.
+ // The new PHI name is added as an operand of a PHI node in either
+ // the loop header or the loop exit block.
+ for (succ_iterator SBI = succ_begin(Latch), SBE = succ_end(Latch);
+ SBI != SBE; ++SBI) {
+ for (BasicBlock::iterator BBI = (*SBI)->begin();
+ PHINode *PN = dyn_cast<PHINode>(BBI); ++BBI) {
+
+ // Add a new PHI node to the prolog end block and add the
+ // appropriate incoming values.
+ PHINode *NewPN = PHINode::Create(PN->getType(), 2, PN->getName()+".unr",
+ PrologEnd->getTerminator());
+ // Adding a value to the new PHI node from the original loop preheader.
+ // This is the value that skips all the prolog code.
+ if (L->contains(PN)) {
+ NewPN->addIncoming(PN->getIncomingValueForBlock(NewPH), OrigPH);
+ } else {
+ NewPN->addIncoming(Constant::getNullValue(PN->getType()), OrigPH);
+ }
+
+ Value *V = PN->getIncomingValueForBlock(Latch);
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (L->contains(I)) {
+ V = LVMap[I];
+ }
+ }
+ // Adding a value to the new PHI node from the last prolog block
+ // that was created.
+ NewPN->addIncoming(V, LastPrologBB);
+
+ // Update the existing PHI node operand with the value from the
+ // new PHI node. How this is done depends on if the existing
+ // PHI node is in the original loop block, or the exit block.
+ if (L->contains(PN)) {
+ PN->setIncomingValue(PN->getBasicBlockIndex(NewPH), NewPN);
+ } else {
+ PN->addIncoming(NewPN, PrologEnd);
+ }
+ }
+ }
+
+ // Create a branch around the orignal loop, which is taken if the
+ // trip count is less than the unroll factor.
+ Instruction *InsertPt = PrologEnd->getTerminator();
+ Instruction *BrLoopExit =
+ new ICmpInst(InsertPt, ICmpInst::ICMP_ULT, TripCount,
+ ConstantInt::get(TripCount->getType(), Count));
+ BasicBlock *Exit = L->getUniqueExitBlock();
+ assert(Exit != 0 && "Loop must have a single exit block only");
+ // Split the exit to maintain loop canonicalization guarantees
+ SmallVector<BasicBlock*, 4> Preds(pred_begin(Exit), pred_end(Exit));
+ if (!Exit->isLandingPad()) {
+ SplitBlockPredecessors(Exit, Preds, ".unr-lcssa", P);
+ } else {
+ SmallVector<BasicBlock*, 2> NewBBs;
+ SplitLandingPadPredecessors(Exit, Preds, ".unr1-lcssa", ".unr2-lcssa",
+ P, NewBBs);
+ }
+ // Add the branch to the exit block (around the unrolled loop)
+ BranchInst::Create(Exit, NewPH, BrLoopExit, InsertPt);
+ InsertPt->eraseFromParent();
+}
+
+/// Create a clone of the blocks in a loop and connect them together.
+/// This function doesn't create a clone of the loop structure.
+///
+/// There are two value maps that are defined and used. VMap is
+/// for the values in the current loop instance. LVMap contains
+/// the values from the last loop instance. We need the LVMap values
+/// to update the inital values for the current loop instance.
+///
+static void CloneLoopBlocks(Loop *L,
+ bool FirstCopy,
+ BasicBlock *InsertTop,
+ BasicBlock *InsertBot,
+ std::vector<BasicBlock *> &NewBlocks,
+ LoopBlocksDFS &LoopBlocks,
+ ValueToValueMapTy &VMap,
+ ValueToValueMapTy &LVMap,
+ LoopInfo *LI) {
+
+ BasicBlock *Preheader = L->getLoopPreheader();
+ BasicBlock *Header = L->getHeader();
+ BasicBlock *Latch = L->getLoopLatch();
+ Function *F = Header->getParent();
+ LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
+ LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
+ // For each block in the original loop, create a new copy,
+ // and update the value map with the newly created values.
+ for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
+ BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".unr", F);
+ NewBlocks.push_back(NewBB);
+
+ if (Loop *ParentLoop = L->getParentLoop())
+ ParentLoop->addBasicBlockToLoop(NewBB, LI->getBase());
+
+ VMap[*BB] = NewBB;
+ if (Header == *BB) {
+ // For the first block, add a CFG connection to this newly
+ // created block
+ InsertTop->getTerminator()->setSuccessor(0, NewBB);
+
+ // Change the incoming values to the ones defined in the
+ // previously cloned loop.
+ for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
+ PHINode *NewPHI = cast<PHINode>(VMap[I]);
+ if (FirstCopy) {
+ // We replace the first phi node with the value from the preheader
+ VMap[I] = NewPHI->getIncomingValueForBlock(Preheader);
+ NewBB->getInstList().erase(NewPHI);
+ } else {
+ // Update VMap with values from the previous block
+ unsigned idx = NewPHI->getBasicBlockIndex(Latch);
+ Value *InVal = NewPHI->getIncomingValue(idx);
+ if (Instruction *I = dyn_cast<Instruction>(InVal))
+ if (L->contains(I))
+ InVal = LVMap[InVal];
+ NewPHI->setIncomingValue(idx, InVal);
+ NewPHI->setIncomingBlock(idx, InsertTop);
+ }
+ }
+ }
+
+ if (Latch == *BB) {
+ VMap.erase((*BB)->getTerminator());
+ NewBB->getTerminator()->eraseFromParent();
+ BranchInst::Create(InsertBot, NewBB);
+ }
+ }
+ // LastValueMap is updated with the values for the current loop
+ // which are used the next time this function is called.
+ for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
+ VI != VE; ++VI) {
+ LVMap[VI->first] = VI->second;
+ }
+}
+
+/// Insert code in the prolog code when unrolling a loop with a
+/// run-time trip-count.
+///
+/// This method assumes that the loop unroll factor is total number
+/// of loop bodes in the loop after unrolling. (Some folks refer
+/// to the unroll factor as the number of *extra* copies added).
+/// We assume also that the loop unroll factor is a power-of-two. So, after
+/// unrolling the loop, the number of loop bodies executed is 2,
+/// 4, 8, etc. Note - LLVM converts the if-then-sequence to a switch
+/// instruction in SimplifyCFG.cpp. Then, the backend decides how code for
+/// the switch instruction is generated.
+///
+/// extraiters = tripcount % loopfactor
+/// if (extraiters == 0) jump Loop:
+/// if (extraiters == loopfactor) jump L1
+/// if (extraiters == loopfactor-1) jump L2
+/// ...
+/// L1: LoopBody;
+/// L2: LoopBody;
+/// ...
+/// if tripcount < loopfactor jump End
+/// Loop:
+/// ...
+/// End:
+///
+bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
+ LPPassManager *LPM) {
+ // for now, only unroll loops that contain a single exit
+ if (!L->getExitingBlock())
+ return false;
+
+ // Make sure the loop is in canonical form, and there is a single
+ // exit block only.
+ if (!L->isLoopSimplifyForm() || L->getUniqueExitBlock() == 0)
+ return false;
+
+ // Use Scalar Evolution to compute the trip count. This allows more
+ // loops to be unrolled than relying on induction var simplification
+ ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
+ if (SE == 0)
+ return false;
+
+ // Only unroll loops with a computable trip count and the trip count needs
+ // to be an int value (allowing a pointer type is a TODO item)
+ const SCEV *BECount = SE->getBackedgeTakenCount(L);
+ if (isa<SCEVCouldNotCompute>(BECount) || !BECount->getType()->isIntegerTy())
+ return false;
+
+ // Add 1 since the backedge count doesn't include the first loop iteration
+ const SCEV *TripCountSC =
+ SE->getAddExpr(BECount, SE->getConstant(BECount->getType(), 1));
+ if (isa<SCEVCouldNotCompute>(TripCountSC))
+ return false;
+
+ // We only handle cases when the unroll factor is a power of 2.
+ // Count is the loop unroll factor, the number of extra copies added + 1.
+ if ((Count & (Count-1)) != 0)
+ return false;
+
+ // If this loop is nested, then the loop unroller changes the code in
+ // parent loop, so the Scalar Evolution pass needs to be run again
+ if (Loop *ParentLoop = L->getParentLoop())
+ SE->forgetLoop(ParentLoop);
+
+ BasicBlock *PH = L->getLoopPreheader();
+ BasicBlock *Header = L->getHeader();
+ BasicBlock *Latch = L->getLoopLatch();
+ // It helps to splits the original preheader twice, one for the end of the
+ // prolog code and one for a new loop preheader
+ BasicBlock *PEnd = SplitEdge(PH, Header, LPM->getAsPass());
+ BasicBlock *NewPH = SplitBlock(PEnd, PEnd->getTerminator(), LPM->getAsPass());
+ BranchInst *PreHeaderBR = cast<BranchInst>(PH->getTerminator());
+
+ // Compute the number of extra iterations required, which is:
+ // extra iterations = run-time trip count % (loop unroll factor + 1)
+ SCEVExpander Expander(*SE, "loop-unroll");
+ Value *TripCount = Expander.expandCodeFor(TripCountSC, TripCountSC->getType(),
+ PreHeaderBR);
+ Type *CountTy = TripCount->getType();
+ BinaryOperator *ModVal =
+ BinaryOperator::CreateURem(TripCount,
+ ConstantInt::get(CountTy, Count),
+ "xtraiter");
+ ModVal->insertBefore(PreHeaderBR);
+
+ // Check if for no extra iterations, then jump to unrolled loop
+ Value *BranchVal = new ICmpInst(PreHeaderBR,
+ ICmpInst::ICMP_NE, ModVal,
+ ConstantInt::get(CountTy, 0), "lcmp");
+ // Branch to either the extra iterations or the unrolled loop
+ // We will fix up the true branch label when adding loop body copies
+ BranchInst::Create(PEnd, PEnd, BranchVal, PreHeaderBR);
+ assert(PreHeaderBR->isUnconditional() &&
+ PreHeaderBR->getSuccessor(0) == PEnd &&
+ "CFG edges in Preheader are not correct");
+ PreHeaderBR->eraseFromParent();
+
+ ValueToValueMapTy LVMap;
+ Function *F = Header->getParent();
+ // These variables are used to update the CFG links in each iteration
+ BasicBlock *CompareBB = 0;
+ BasicBlock *LastLoopBB = PH;
+ // Get an ordered list of blocks in the loop to help with the ordering of the
+ // cloned blocks in the prolog code
+ LoopBlocksDFS LoopBlocks(L);
+ LoopBlocks.perform(LI);
+
+ //
+ // For each extra loop iteration, create a copy of the loop's basic blocks
+ // and generate a condition that branches to the copy depending on the
+ // number of 'left over' iterations.
+ //
+ for (unsigned leftOverIters = Count-1; leftOverIters > 0; --leftOverIters) {
+ std::vector<BasicBlock*> NewBlocks;
+ ValueToValueMapTy VMap;
+
+ // Clone all the basic blocks in the loop, but we don't clone the loop
+ // This function adds the appropriate CFG connections.
+ CloneLoopBlocks(L, (leftOverIters == Count-1), LastLoopBB, PEnd, NewBlocks,
+ LoopBlocks, VMap, LVMap, LI);
+ LastLoopBB = cast<BasicBlock>(VMap[Latch]);
+
+ // Insert the cloned blocks into function just before the original loop
+ F->getBasicBlockList().splice(PEnd, F->getBasicBlockList(),
+ NewBlocks[0], F->end());
+
+ // Generate the code for the comparison which determines if the loop
+ // prolog code needs to be executed.
+ if (leftOverIters == Count-1) {
+ // There is no compare block for the fall-thru case when for the last
+ // left over iteration
+ CompareBB = NewBlocks[0];
+ } else {
+ // Create a new block for the comparison
+ BasicBlock *NewBB = BasicBlock::Create(CompareBB->getContext(), "unr.cmp",
+ F, CompareBB);
+ if (Loop *ParentLoop = L->getParentLoop()) {
+ // Add the new block to the parent loop, if needed
+ ParentLoop->addBasicBlockToLoop(NewBB, LI->getBase());
+ }
+
+ // The comparison w/ the extra iteration value and branch
+ Value *BranchVal = new ICmpInst(*NewBB, ICmpInst::ICMP_EQ, ModVal,
+ ConstantInt::get(CountTy, leftOverIters),
+ "un.tmp");
+ // Branch to either the extra iterations or the unrolled loop
+ BranchInst::Create(NewBlocks[0], CompareBB,
+ BranchVal, NewBB);
+ CompareBB = NewBB;
+ PH->getTerminator()->setSuccessor(0, NewBB);
+ VMap[NewPH] = CompareBB;
+ }
+
+ // Rewrite the cloned instruction operands to use the values
+ // created when the clone is created.
+ for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i) {
+ for (BasicBlock::iterator I = NewBlocks[i]->begin(),
+ E = NewBlocks[i]->end(); I != E; ++I) {
+ RemapInstruction(I, VMap,
+ RF_NoModuleLevelChanges|RF_IgnoreMissingEntries);
+ }
+ }
+ }
+
+ // Connect the prolog code to the original loop and update the
+ // PHI functions.
+ ConnectProlog(L, TripCount, Count, LastLoopBB, PEnd, PH, NewPH, LVMap,
+ LPM->getAsPass());
+ NumRuntimeUnrolled++;
+ return true;
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
index 61ab3f6..c70ced1 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerExpectIntrinsic.cpp
@@ -1,3 +1,16 @@
+//===- LowerExpectIntrinsic.cpp - Lower expect intrinsic ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the 'expect' intrinsic to LLVM metadata.
+//
+//===----------------------------------------------------------------------===//
+
#define DEBUG_TYPE "lower-expect-intrinsic"
#include "llvm/Constants.h"
#include "llvm/Function.h"
@@ -60,14 +73,17 @@ bool LowerExpectIntrinsic::HandleSwitchExpect(SwitchInst *SI) {
LLVMContext &Context = CI->getContext();
Type *Int32Ty = Type::getInt32Ty(Context);
- unsigned caseNo = SI->findCaseValue(ExpectedValue);
+ SwitchInst::CaseIt Case = SI->findCaseValue(ExpectedValue);
std::vector<Value *> Vec;
unsigned n = SI->getNumCases();
- Vec.resize(n + 1); // +1 for MDString
+ Vec.resize(n + 1 + 1); // +1 for MDString and +1 for default case
Vec[0] = MDString::get(Context, "branch_weights");
+ Vec[1] = ConstantInt::get(Int32Ty, Case == SI->case_default() ?
+ LikelyBranchWeight : UnlikelyBranchWeight);
for (unsigned i = 0; i < n; ++i) {
- Vec[i + 1] = ConstantInt::get(Int32Ty, i == caseNo ? LikelyBranchWeight : UnlikelyBranchWeight);
+ Vec[i + 1 + 1] = ConstantInt::get(Int32Ty, i == Case.getCaseIndex() ?
+ LikelyBranchWeight : UnlikelyBranchWeight);
}
MDNode *WeightsNode = llvm::MDNode::get(Context, Vec);
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index c96c8fc..9305554 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -54,7 +54,6 @@
using namespace llvm;
STATISTIC(NumInvokes, "Number of invokes replaced");
-STATISTIC(NumUnwinds, "Number of unwinds replaced");
STATISTIC(NumSpilled, "Number of registers live across unwind edges");
static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
@@ -193,20 +192,6 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
BB->getInstList().erase(II);
++NumInvokes; Changed = true;
- } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // Insert a call to abort()
- CallInst::Create(AbortFn, "", UI)->setTailCall();
-
- // Insert a return instruction. This really should be a "barrier", as it
- // is unreachable.
- ReturnInst::Create(F.getContext(),
- F.getReturnType()->isVoidTy() ?
- 0 : Constant::getNullValue(F.getReturnType()), UI);
-
- // Remove the unwind instruction now.
- BB->getInstList().erase(UI);
-
- ++NumUnwinds; Changed = true;
}
return Changed;
}
@@ -404,7 +389,6 @@ splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
SmallVector<ReturnInst*,16> Returns;
- SmallVector<UnwindInst*,16> Unwinds;
SmallVector<InvokeInst*,16> Invokes;
UnreachableInst* UnreachablePlaceholder = 0;
@@ -415,14 +399,11 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
Returns.push_back(RI);
} else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
Invokes.push_back(II);
- } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- Unwinds.push_back(UI);
}
- if (Unwinds.empty() && Invokes.empty()) return false;
+ if (Invokes.empty()) return false;
NumInvokes += Invokes.size();
- NumUnwinds += Unwinds.size();
// TODO: This is not an optimal way to do this. In particular, this always
// inserts setjmp calls into the entries of functions with invoke instructions
@@ -572,13 +553,6 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
CallInst::Create(AbortFn, "",
TermBlock->getTerminator())->setTailCall();
-
- // Replace all unwinds with a branch to the unwind handler.
- for (unsigned i = 0, e = Unwinds.size(); i != e; ++i) {
- BranchInst::Create(UnwindHandler, Unwinds[i]);
- Unwinds[i]->eraseFromParent();
- }
-
// Replace the inserted unreachable with a branch to the unwind handler.
if (UnreachablePlaceholder) {
BranchInst::Create(UnwindHandler, UnreachablePlaceholder);
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 686178c..a16130d 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -237,10 +237,10 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
unsigned numCmps = 0;
// Start with "simple" cases
- for (unsigned i = 1; i < SI->getNumSuccessors(); ++i)
- Cases.push_back(CaseRange(SI->getSuccessorValue(i),
- SI->getSuccessorValue(i),
- SI->getSuccessor(i)));
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
+ Cases.push_back(CaseRange(i.getCaseValue(), i.getCaseValue(),
+ i.getCaseSuccessor()));
+
std::sort(Cases.begin(), Cases.end(), CaseCmp());
// Merge case into clusters
@@ -281,7 +281,7 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
BasicBlock* Default = SI->getDefaultDest();
// If there is only the default destination, don't bother with the code below.
- if (SI->getNumCases() == 1) {
+ if (!SI->getNumCases()) {
BranchInst::Create(SI->getDefaultDest(), CurBlock);
CurBlock->getInstList().erase(SI);
return;
diff --git a/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
new file mode 100644
index 0000000..8491c55
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Utils/ModuleUtils.cpp
@@ -0,0 +1,64 @@
+//===-- ModuleUtils.cpp - Functions to manipulate Modules -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Support/IRBuilder.h"
+
+using namespace llvm;
+
+static void appendToGlobalArray(const char *Array,
+ Module &M, Function *F, int Priority) {
+ IRBuilder<> IRB(M.getContext());
+ FunctionType *FnTy = FunctionType::get(IRB.getVoidTy(), false);
+ StructType *Ty = StructType::get(
+ IRB.getInt32Ty(), PointerType::getUnqual(FnTy), NULL);
+
+ Constant *RuntimeCtorInit = ConstantStruct::get(
+ Ty, IRB.getInt32(Priority), F, NULL);
+
+ // Get the current set of static global constructors and add the new ctor
+ // to the list.
+ SmallVector<Constant *, 16> CurrentCtors;
+ if (GlobalVariable * GVCtor = M.getNamedGlobal(Array)) {
+ if (Constant *Init = GVCtor->getInitializer()) {
+ unsigned n = Init->getNumOperands();
+ CurrentCtors.reserve(n + 1);
+ for (unsigned i = 0; i != n; ++i)
+ CurrentCtors.push_back(cast<Constant>(Init->getOperand(i)));
+ }
+ GVCtor->eraseFromParent();
+ }
+
+ CurrentCtors.push_back(RuntimeCtorInit);
+
+ // Create a new initializer.
+ ArrayType *AT = ArrayType::get(RuntimeCtorInit->getType(),
+ CurrentCtors.size());
+ Constant *NewInit = ConstantArray::get(AT, CurrentCtors);
+
+ // Create the new global variable and replace all uses of
+ // the old global variable with the new one.
+ (void)new GlobalVariable(M, NewInit->getType(), false,
+ GlobalValue::AppendingLinkage, NewInit, Array);
+}
+
+void llvm::appendToGlobalCtors(Module &M, Function *F, int Priority) {
+ appendToGlobalArray("llvm.global_ctors", M, F, Priority);
+}
+
+void llvm::appendToGlobalDtors(Module &M, Function *F, int Priority) {
+ appendToGlobalArray("llvm.global_dtors", M, F, Priority);
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index db3e942..2357d81 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -41,6 +41,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -66,7 +67,8 @@ struct DenseMapInfo<std::pair<BasicBlock*, unsigned> > {
return EltTy(reinterpret_cast<BasicBlock*>(-2), 0U);
}
static unsigned getHashValue(const std::pair<BasicBlock*, unsigned> &Val) {
- return DenseMapInfo<void*>::getHashValue(Val.first) + Val.second*2;
+ using llvm::hash_value;
+ return static_cast<unsigned>(hash_value(Val));
}
static bool isEqual(const EltTy &LHS, const EltTy &RHS) {
return LHS == RHS;
@@ -423,7 +425,8 @@ void PromoteMem2Reg::run() {
// Finally, after the scan, check to see if the store is all that is left.
if (Info.UsingBlocks.empty()) {
- // Record debuginfo for the store and remove the declaration's debuginfo.
+ // Record debuginfo for the store and remove the declaration's
+ // debuginfo.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
if (!DIB)
DIB = new DIBuilder(*DDI->getParent()->getParent()->getParent());
@@ -590,7 +593,7 @@ void PromoteMem2Reg::run() {
PHINode *PN = I->second;
// If this PHI node merges one value and/or undefs, get the value.
- if (Value *V = SimplifyInstruction(PN, 0, &DT)) {
+ if (Value *V = SimplifyInstruction(PN, 0, 0, &DT)) {
if (AST && PN->getType()->isPointerTy())
AST->deleteValue(PN);
PN->replaceAllUsesWith(V);
diff --git a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index fa8061c..e60a41b 100644
--- a/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -518,3 +518,10 @@ run(const SmallVectorImpl<Instruction*> &Insts) const {
User->eraseFromParent();
}
}
+
+bool
+LoadAndStorePromoter::isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts)
+ const {
+ return std::find(Insts.begin(), Insts.end(), I) != Insts.end();
+}
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index b8c3ab4..66dd2c9 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -14,16 +14,20 @@
#define DEBUG_TYPE "simplifycfg"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Operator.h"
#include "llvm/Type.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/GlobalVariable.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -63,9 +67,8 @@ class SimplifyCFGOpt {
bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
IRBuilder<> &Builder);
- bool SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder);
bool SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder);
- bool SimplifyUnwind(UnwindInst *UI, IRBuilder<> &Builder);
+ bool SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder);
bool SimplifyUnreachable(UnreachableInst *UI);
bool SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder);
bool SimplifyIndirectBr(IndirectBrInst *IBI);
@@ -205,6 +208,42 @@ static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
return BI->getCondition();
}
+/// ComputeSpeculuationCost - Compute an abstract "cost" of speculating the
+/// given instruction, which is assumed to be safe to speculate. 1 means
+/// cheap, 2 means less cheap, and UINT_MAX means prohibitively expensive.
+static unsigned ComputeSpeculationCost(const User *I) {
+ assert(isSafeToSpeculativelyExecute(I) &&
+ "Instruction is not safe to speculatively execute!");
+ switch (Operator::getOpcode(I)) {
+ default:
+ // In doubt, be conservative.
+ return UINT_MAX;
+ case Instruction::GetElementPtr:
+ // GEPs are cheap if all indices are constant.
+ if (!cast<GEPOperator>(I)->hasAllConstantIndices())
+ return UINT_MAX;
+ return 1;
+ case Instruction::Load:
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::ICmp:
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ return 1; // These are all cheap.
+
+ case Instruction::Call:
+ case Instruction::Select:
+ return 2;
+ }
+}
+
/// DominatesMergePoint - If we have a merge point of an "if condition" as
/// accepted above, return true if the specified value dominates the block. We
/// don't handle the true generality of domination here, just a special case
@@ -257,46 +296,10 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// Okay, it looks like the instruction IS in the "condition". Check to
// see if it's a cheap instruction to unconditionally compute, and if it
// only uses stuff defined outside of the condition. If so, hoist it out.
- if (!I->isSafeToSpeculativelyExecute())
+ if (!isSafeToSpeculativelyExecute(I))
return false;
- unsigned Cost = 0;
-
- switch (I->getOpcode()) {
- default: return false; // Cannot hoist this out safely.
- case Instruction::Load:
- // We have to check to make sure there are no instructions before the
- // load in its basic block, as we are going to hoist the load out to its
- // predecessor.
- if (PBB->getFirstNonPHIOrDbg() != I)
- return false;
- Cost = 1;
- break;
- case Instruction::GetElementPtr:
- // GEPs are cheap if all indices are constant.
- if (!cast<GetElementPtrInst>(I)->hasAllConstantIndices())
- return false;
- Cost = 1;
- break;
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- case Instruction::ICmp:
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- Cost = 1;
- break; // These are all cheap and non-trapping instructions.
-
- case Instruction::Select:
- Cost = 2;
- break;
- }
+ unsigned Cost = ComputeSpeculationCost(I);
if (Cost > CostRemaining)
return false;
@@ -373,9 +376,7 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
Span = Span.inverse();
// If there are a ton of values, we don't want to make a ginormous switch.
- if (Span.getSetSize().ugt(8) || Span.isEmptySet() ||
- // We don't handle wrapped sets yet.
- Span.isWrappedSet())
+ if (Span.getSetSize().ugt(8) || Span.isEmptySet())
return 0;
for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
@@ -430,9 +431,9 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
return 0;
}
-
+
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
- Instruction* Cond = 0;
+ Instruction *Cond = 0;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cond = dyn_cast<Instruction>(SI->getCondition());
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
@@ -479,8 +480,9 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
BasicBlock*> > &Cases) {
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cases.reserve(SI->getNumCases());
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- Cases.push_back(std::make_pair(SI->getCaseValue(i), SI->getSuccessor(i)));
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
+ Cases.push_back(std::make_pair(i.getCaseValue(),
+ i.getCaseSuccessor()));
return SI->getDefaultDest();
}
@@ -603,11 +605,13 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator()
<< "Through successor TI: " << *TI);
- for (unsigned i = SI->getNumCases()-1; i != 0; --i)
- if (DeadCases.count(SI->getCaseValue(i))) {
- SI->getSuccessor(i)->removePredecessor(TI->getParent());
+ for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) {
+ --i;
+ if (DeadCases.count(i.getCaseValue())) {
+ i.getCaseSuccessor()->removePredecessor(TI->getParent());
SI->removeCase(i);
}
+ }
DEBUG(dbgs() << "Leaving: " << *TI << "\n");
return true;
@@ -951,6 +955,20 @@ HoistTerminator:
/// and an BB2 and the only successor of BB1 is BB2, hoist simple code
/// (for now, restricted to a single instruction that's side effect free) from
/// the BB1 into the branch block to speculatively execute it.
+///
+/// Turn
+/// BB:
+/// %t1 = icmp
+/// br i1 %t1, label %BB1, label %BB2
+/// BB1:
+/// %t3 = add %t2, c
+/// br label BB2
+/// BB2:
+/// =>
+/// BB:
+/// %t1 = icmp
+/// %t4 = add %t2, c
+/// %t3 = select i1 %t1, %t2, %t3
static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
// Only speculatively execution a single instruction (not counting the
// terminator) for now.
@@ -967,8 +985,29 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
return false;
HInst = I;
}
- if (!HInst)
- return false;
+
+ BasicBlock *BIParent = BI->getParent();
+
+ // Check the instruction to be hoisted, if there is one.
+ if (HInst) {
+ // Don't hoist the instruction if it's unsafe or expensive.
+ if (!isSafeToSpeculativelyExecute(HInst))
+ return false;
+ if (ComputeSpeculationCost(HInst) > PHINodeFoldingThreshold)
+ return false;
+
+ // Do not hoist the instruction if any of its operands are defined but not
+ // used in this BB. The transformation will prevent the operand from
+ // being sunk into the use block.
+ for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end();
+ i != e; ++i) {
+ Instruction *OpI = dyn_cast<Instruction>(*i);
+ if (OpI && OpI->getParent() == BIParent &&
+ !OpI->mayHaveSideEffects() &&
+ !OpI->isUsedInBasicBlock(BIParent))
+ return false;
+ }
+ }
// Be conservative for now. FP select instruction can often be expensive.
Value *BrCond = BI->getCondition();
@@ -983,130 +1022,78 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
Invert = true;
}
- // Turn
- // BB:
- // %t1 = icmp
- // br i1 %t1, label %BB1, label %BB2
- // BB1:
- // %t3 = add %t2, c
- // br label BB2
- // BB2:
- // =>
- // BB:
- // %t1 = icmp
- // %t4 = add %t2, c
- // %t3 = select i1 %t1, %t2, %t3
- switch (HInst->getOpcode()) {
- default: return false; // Not safe / profitable to hoist.
- case Instruction::Add:
- case Instruction::Sub:
- // Not worth doing for vector ops.
- if (HInst->getType()->isVectorTy())
- return false;
- break;
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- // Don't mess with vector operations.
- if (HInst->getType()->isVectorTy())
- return false;
- break; // These are all cheap and non-trapping instructions.
- }
-
- // If the instruction is obviously dead, don't try to predicate it.
- if (HInst->use_empty()) {
- HInst->eraseFromParent();
- return true;
+ // Collect interesting PHIs, and scan for hazards.
+ SmallSetVector<std::pair<Value *, Value *>, 4> PHIs;
+ BasicBlock *BB2 = BB1->getTerminator()->getSuccessor(0);
+ for (BasicBlock::iterator I = BB2->begin();
+ PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+ Value *BB1V = PN->getIncomingValueForBlock(BB1);
+ Value *BIParentV = PN->getIncomingValueForBlock(BIParent);
+
+ // Skip PHIs which are trivial.
+ if (BB1V == BIParentV)
+ continue;
+
+ // Check for saftey.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(BB1V)) {
+ // An unfolded ConstantExpr could end up getting expanded into
+ // Instructions. Don't speculate this and another instruction at
+ // the same time.
+ if (HInst)
+ return false;
+ if (!isSafeToSpeculativelyExecute(CE))
+ return false;
+ if (ComputeSpeculationCost(CE) > PHINodeFoldingThreshold)
+ return false;
+ }
+
+ // Ok, we may insert a select for this PHI.
+ PHIs.insert(std::make_pair(BB1V, BIParentV));
}
- // Can we speculatively execute the instruction? And what is the value
- // if the condition is false? Consider the phi uses, if the incoming value
- // from the "if" block are all the same V, then V is the value of the
- // select if the condition is false.
- BasicBlock *BIParent = BI->getParent();
- SmallVector<PHINode*, 4> PHIUses;
- Value *FalseV = NULL;
+ // If there are no PHIs to process, bail early. This helps ensure idempotence
+ // as well.
+ if (PHIs.empty())
+ return false;
- BasicBlock *BB2 = BB1->getTerminator()->getSuccessor(0);
- for (Value::use_iterator UI = HInst->use_begin(), E = HInst->use_end();
- UI != E; ++UI) {
- // Ignore any user that is not a PHI node in BB2. These can only occur in
- // unreachable blocks, because they would not be dominated by the instr.
- PHINode *PN = dyn_cast<PHINode>(*UI);
- if (!PN || PN->getParent() != BB2)
- return false;
- PHIUses.push_back(PN);
-
- Value *PHIV = PN->getIncomingValueForBlock(BIParent);
- if (!FalseV)
- FalseV = PHIV;
- else if (FalseV != PHIV)
- return false; // Inconsistent value when condition is false.
- }
-
- assert(FalseV && "Must have at least one user, and it must be a PHI");
-
- // Do not hoist the instruction if any of its operands are defined but not
- // used in this BB. The transformation will prevent the operand from
- // being sunk into the use block.
- for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end();
- i != e; ++i) {
- Instruction *OpI = dyn_cast<Instruction>(*i);
- if (OpI && OpI->getParent() == BIParent &&
- !OpI->isUsedInBasicBlock(BIParent))
- return false;
- }
+ // If we get here, we can hoist the instruction and if-convert.
+ DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *BB1 << "\n";);
- // If we get here, we can hoist the instruction. Try to place it
- // before the icmp instruction preceding the conditional branch.
- BasicBlock::iterator InsertPos = BI;
- if (InsertPos != BIParent->begin())
- --InsertPos;
- // Skip debug info between condition and branch.
- while (InsertPos != BIParent->begin() && isa<DbgInfoIntrinsic>(InsertPos))
- --InsertPos;
- if (InsertPos == BrCond && !isa<PHINode>(BrCond)) {
- SmallPtrSet<Instruction *, 4> BB1Insns;
- for(BasicBlock::iterator BB1I = BB1->begin(), BB1E = BB1->end();
- BB1I != BB1E; ++BB1I)
- BB1Insns.insert(BB1I);
- for(Value::use_iterator UI = BrCond->use_begin(), UE = BrCond->use_end();
- UI != UE; ++UI) {
- Instruction *Use = cast<Instruction>(*UI);
- if (!BB1Insns.count(Use)) continue;
-
- // If BrCond uses the instruction that place it just before
- // branch instruction.
- InsertPos = BI;
- break;
- }
- } else
- InsertPos = BI;
- BIParent->getInstList().splice(InsertPos, BB1->getInstList(), HInst);
+ // Hoist the instruction.
+ if (HInst)
+ BIParent->getInstList().splice(BI, BB1->getInstList(), HInst);
- // Create a select whose true value is the speculatively executed value and
- // false value is the previously determined FalseV.
+ // Insert selects and rewrite the PHI operands.
IRBuilder<true, NoFolder> Builder(BI);
- SelectInst *SI;
- if (Invert)
- SI = cast<SelectInst>
- (Builder.CreateSelect(BrCond, FalseV, HInst,
- FalseV->getName() + "." + HInst->getName()));
- else
- SI = cast<SelectInst>
- (Builder.CreateSelect(BrCond, HInst, FalseV,
- HInst->getName() + "." + FalseV->getName()));
-
- // Make the PHI node use the select for all incoming values for "then" and
- // "if" blocks.
- for (unsigned i = 0, e = PHIUses.size(); i != e; ++i) {
- PHINode *PN = PHIUses[i];
- for (unsigned j = 0, ee = PN->getNumIncomingValues(); j != ee; ++j)
- if (PN->getIncomingBlock(j) == BB1 || PN->getIncomingBlock(j) == BIParent)
- PN->setIncomingValue(j, SI);
+ for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
+ Value *TrueV = PHIs[i].first;
+ Value *FalseV = PHIs[i].second;
+
+ // Create a select whose true value is the speculatively executed value and
+ // false value is the previously determined FalseV.
+ SelectInst *SI;
+ if (Invert)
+ SI = cast<SelectInst>
+ (Builder.CreateSelect(BrCond, FalseV, TrueV,
+ FalseV->getName() + "." + TrueV->getName()));
+ else
+ SI = cast<SelectInst>
+ (Builder.CreateSelect(BrCond, TrueV, FalseV,
+ TrueV->getName() + "." + FalseV->getName()));
+
+ // Make the PHI node use the select for all incoming values for "then" and
+ // "if" blocks.
+ for (BasicBlock::iterator I = BB2->begin();
+ PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+ unsigned BB1I = PN->getBasicBlockIndex(BB1);
+ unsigned BIParentI = PN->getBasicBlockIndex(BIParent);
+ Value *BB1V = PN->getIncomingValue(BB1I);
+ Value *BIParentV = PN->getIncomingValue(BIParentI);
+ if (TrueV == BB1V && FalseV == BIParentV) {
+ PN->setIncomingValue(BB1I, SI);
+ PN->setIncomingValue(BIParentI, SI);
+ }
+ }
}
++NumSpeculations;
@@ -1461,6 +1448,49 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI,
return true;
}
+/// ExtractBranchMetadata - Given a conditional BranchInstruction, retrieve the
+/// probabilities of the branch taking each edge. Fills in the two APInt
+/// parameters and return true, or returns false if no or invalid metadata was
+/// found.
+static bool ExtractBranchMetadata(BranchInst *BI,
+ APInt &ProbTrue, APInt &ProbFalse) {
+ assert(BI->isConditional() &&
+ "Looking for probabilities on unconditional branch?");
+ MDNode *ProfileData = BI->getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData || ProfileData->getNumOperands() != 3) return false;
+ ConstantInt *CITrue = dyn_cast<ConstantInt>(ProfileData->getOperand(1));
+ ConstantInt *CIFalse = dyn_cast<ConstantInt>(ProfileData->getOperand(2));
+ if (!CITrue || !CIFalse) return false;
+ ProbTrue = CITrue->getValue();
+ ProbFalse = CIFalse->getValue();
+ assert(ProbTrue.getBitWidth() == 32 && ProbFalse.getBitWidth() == 32 &&
+ "Branch probability metadata must be 32-bit integers");
+ return true;
+}
+
+/// MultiplyAndLosePrecision - Multiplies A and B, then returns the result. In
+/// the event of overflow, logically-shifts all four inputs right until the
+/// multiply fits.
+static APInt MultiplyAndLosePrecision(APInt &A, APInt &B, APInt &C, APInt &D,
+ unsigned &BitsLost) {
+ BitsLost = 0;
+ bool Overflow = false;
+ APInt Result = A.umul_ov(B, Overflow);
+ if (Overflow) {
+ APInt MaxB = APInt::getMaxValue(A.getBitWidth()).udiv(A);
+ do {
+ B = B.lshr(1);
+ ++BitsLost;
+ } while (B.ugt(MaxB));
+ A = A.lshr(BitsLost);
+ C = C.lshr(BitsLost);
+ D = D.lshr(BitsLost);
+ Result = A * B;
+ }
+ return Result;
+}
+
+
/// FoldBranchToCommonDest - If this basic block is simple enough, and if a
/// predecessor branches to us and one of our successors, fold the block into
/// the predecessor and use logical operations to pick the right destination.
@@ -1479,7 +1509,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(FrontIt)) ++FrontIt;
-
+
// Allow a single instruction to be hoisted in addition to the compare
// that feeds the branch. We later ensure that any values that _it_ uses
// were also live in the predecessor, so that we don't unnecessarily create
@@ -1487,7 +1517,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
Instruction *BonusInst = 0;
if (&*FrontIt != Cond &&
FrontIt->hasOneUse() && *FrontIt->use_begin() == Cond &&
- FrontIt->isSafeToSpeculativelyExecute()) {
+ isSafeToSpeculativelyExecute(FrontIt)) {
BonusInst = &*FrontIt;
++FrontIt;
@@ -1557,7 +1587,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
SmallPtrSet<Value*, 4> UsedValues;
for (Instruction::op_iterator OI = BonusInst->op_begin(),
OE = BonusInst->op_end(); OI != OE; ++OI) {
- Value* V = *OI;
+ Value *V = *OI;
if (!isa<Constant>(V))
UsedValues.insert(V);
}
@@ -1602,10 +1632,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
}
PBI->setCondition(NewCond);
- BasicBlock *OldTrue = PBI->getSuccessor(0);
- BasicBlock *OldFalse = PBI->getSuccessor(1);
- PBI->setSuccessor(0, OldFalse);
- PBI->setSuccessor(1, OldTrue);
+ PBI->swapSuccessors();
}
// If we have a bonus inst, clone it into the predecessor block.
@@ -1638,6 +1665,94 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
PBI->setSuccessor(1, FalseDest);
}
+ // TODO: If BB is reachable from all paths through PredBlock, then we
+ // could replace PBI's branch probabilities with BI's.
+
+ // Merge probability data into PredBlock's branch.
+ APInt A, B, C, D;
+ if (ExtractBranchMetadata(PBI, C, D) && ExtractBranchMetadata(BI, A, B)) {
+ // Given IR which does:
+ // bbA:
+ // br i1 %x, label %bbB, label %bbC
+ // bbB:
+ // br i1 %y, label %bbD, label %bbC
+ // Let's call the probability that we take the edge from %bbA to %bbB
+ // 'a', from %bbA to %bbC, 'b', from %bbB to %bbD 'c' and from %bbB to
+ // %bbC probability 'd'.
+ //
+ // We transform the IR into:
+ // bbA:
+ // br i1 %z, label %bbD, label %bbC
+ // where the probability of going to %bbD is (a*c) and going to bbC is
+ // (b+a*d).
+ //
+ // Probabilities aren't stored as ratios directly. Using branch weights,
+ // we get:
+ // (a*c)% = A*C, (b+(a*d))% = A*D+B*C+B*D.
+
+ // In the event of overflow, we want to drop the LSB of the input
+ // probabilities.
+ unsigned BitsLost;
+
+ // Ignore overflow result on ProbTrue.
+ APInt ProbTrue = MultiplyAndLosePrecision(A, C, B, D, BitsLost);
+
+ APInt Tmp1 = MultiplyAndLosePrecision(B, D, A, C, BitsLost);
+ if (BitsLost) {
+ ProbTrue = ProbTrue.lshr(BitsLost*2);
+ }
+
+ APInt Tmp2 = MultiplyAndLosePrecision(A, D, C, B, BitsLost);
+ if (BitsLost) {
+ ProbTrue = ProbTrue.lshr(BitsLost*2);
+ Tmp1 = Tmp1.lshr(BitsLost*2);
+ }
+
+ APInt Tmp3 = MultiplyAndLosePrecision(B, C, A, D, BitsLost);
+ if (BitsLost) {
+ ProbTrue = ProbTrue.lshr(BitsLost*2);
+ Tmp1 = Tmp1.lshr(BitsLost*2);
+ Tmp2 = Tmp2.lshr(BitsLost*2);
+ }
+
+ bool Overflow1 = false, Overflow2 = false;
+ APInt Tmp4 = Tmp2.uadd_ov(Tmp3, Overflow1);
+ APInt ProbFalse = Tmp4.uadd_ov(Tmp1, Overflow2);
+
+ if (Overflow1 || Overflow2) {
+ ProbTrue = ProbTrue.lshr(1);
+ Tmp1 = Tmp1.lshr(1);
+ Tmp2 = Tmp2.lshr(1);
+ Tmp3 = Tmp3.lshr(1);
+ Tmp4 = Tmp2 + Tmp3;
+ ProbFalse = Tmp4 + Tmp1;
+ }
+
+ // The sum of branch weights must fit in 32-bits.
+ if (ProbTrue.isNegative() && ProbFalse.isNegative()) {
+ ProbTrue = ProbTrue.lshr(1);
+ ProbFalse = ProbFalse.lshr(1);
+ }
+
+ if (ProbTrue != ProbFalse) {
+ // Normalize the result.
+ APInt GCD = APIntOps::GreatestCommonDivisor(ProbTrue, ProbFalse);
+ ProbTrue = ProbTrue.udiv(GCD);
+ ProbFalse = ProbFalse.udiv(GCD);
+
+ LLVMContext &Context = BI->getContext();
+ Value *Ops[3];
+ Ops[0] = BI->getMetadata(LLVMContext::MD_prof)->getOperand(0);
+ Ops[1] = ConstantInt::get(Context, ProbTrue);
+ Ops[2] = ConstantInt::get(Context, ProbFalse);
+ PBI->setMetadata(LLVMContext::MD_prof, MDNode::get(Context, Ops));
+ } else {
+ PBI->setMetadata(LLVMContext::MD_prof, NULL);
+ }
+ } else {
+ PBI->setMetadata(LLVMContext::MD_prof, NULL);
+ }
+
// Copy any debug value intrinsics into the end of PredBlock.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (isa<DbgInfoIntrinsic>(*I))
@@ -1894,8 +2009,8 @@ static bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) {
// Find the relevant condition and destinations.
Value *Condition = Select->getCondition();
- BasicBlock *TrueBB = SI->getSuccessor(SI->findCaseValue(TrueVal));
- BasicBlock *FalseBB = SI->getSuccessor(SI->findCaseValue(FalseVal));
+ BasicBlock *TrueBB = SI->findCaseValue(TrueVal).getCaseSuccessor();
+ BasicBlock *FalseBB = SI->findCaseValue(FalseVal).getCaseSuccessor();
// Perform the actual simplification.
return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB);
@@ -1979,7 +2094,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
// Ok, the block is reachable from the default dest. If the constant we're
// comparing exists in one of the other edges, then we can constant fold ICI
// and zap it.
- if (SI->findCaseValue(Cst) != 0) {
+ if (SI->findCaseValue(Cst) != SI->case_default()) {
Value *V;
if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
V = ConstantInt::getFalse(BB->getContext());
@@ -2235,52 +2350,6 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) {
return false;
}
-bool SimplifyCFGOpt::SimplifyUnwind(UnwindInst *UI, IRBuilder<> &Builder) {
- // Check to see if the first instruction in this block is just an unwind.
- // If so, replace any invoke instructions which use this as an exception
- // destination with call instructions.
- BasicBlock *BB = UI->getParent();
- if (!BB->getFirstNonPHIOrDbg()->isTerminator()) return false;
-
- bool Changed = false;
- SmallVector<BasicBlock*, 8> Preds(pred_begin(BB), pred_end(BB));
- while (!Preds.empty()) {
- BasicBlock *Pred = Preds.back();
- InvokeInst *II = dyn_cast<InvokeInst>(Pred->getTerminator());
- if (II && II->getUnwindDest() == BB) {
- // Insert a new branch instruction before the invoke, because this
- // is now a fall through.
- Builder.SetInsertPoint(II);
- BranchInst *BI = Builder.CreateBr(II->getNormalDest());
- Pred->getInstList().remove(II); // Take out of symbol table
-
- // Insert the call now.
- SmallVector<Value*,8> Args(II->op_begin(), II->op_end()-3);
- Builder.SetInsertPoint(BI);
- CallInst *CI = Builder.CreateCall(II->getCalledValue(),
- Args, II->getName());
- CI->setCallingConv(II->getCallingConv());
- CI->setAttributes(II->getAttributes());
- // If the invoke produced a value, the Call now does instead.
- II->replaceAllUsesWith(CI);
- delete II;
- Changed = true;
- }
-
- Preds.pop_back();
- }
-
- // If this block is now dead (and isn't the entry block), remove it.
- if (pred_begin(BB) == pred_end(BB) &&
- BB != &BB->getParent()->getEntryBlock()) {
- // We know there are no successors, so just nuke the block.
- BB->eraseFromParent();
- return true;
- }
-
- return Changed;
-}
-
bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
BasicBlock *BB = UI->getParent();
@@ -2352,8 +2421,9 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
}
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- if (SI->getSuccessor(i) == BB) {
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i)
+ if (i.getCaseSuccessor() == BB) {
BB->removePredecessor(SI->getParent());
SI->removeCase(i);
--i; --e;
@@ -2361,14 +2431,15 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
}
// If the default value is unreachable, figure out the most popular
// destination and make it the default.
- if (SI->getSuccessor(0) == BB) {
+ if (SI->getDefaultDest() == BB) {
std::map<BasicBlock*, std::pair<unsigned, unsigned> > Popularity;
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i) {
- std::pair<unsigned, unsigned>& entry =
- Popularity[SI->getSuccessor(i)];
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i) {
+ std::pair<unsigned, unsigned> &entry =
+ Popularity[i.getCaseSuccessor()];
if (entry.first == 0) {
entry.first = 1;
- entry.second = i;
+ entry.second = i.getCaseIndex();
} else {
entry.first++;
}
@@ -2390,7 +2461,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
if (MaxBlock) {
// Make this the new default, allowing us to delete any explicit
// edges to it.
- SI->setSuccessor(0, MaxBlock);
+ SI->setDefaultDest(MaxBlock);
Changed = true;
// If MaxBlock has phinodes in it, remove MaxPop-1 entries from
@@ -2399,8 +2470,9 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
for (unsigned i = 0; i != MaxPop-1; ++i)
MaxBlock->removePredecessor(SI->getParent());
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i)
- if (SI->getSuccessor(i) == MaxBlock) {
+ for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
+ i != e; ++i)
+ if (i.getCaseSuccessor() == MaxBlock) {
SI->removeCase(i);
--i; --e;
}
@@ -2442,17 +2514,19 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
/// TurnSwitchRangeIntoICmp - Turns a switch with that contains only a
/// integer range comparison into a sub, an icmp and a branch.
static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) {
- assert(SI->getNumCases() > 2 && "Degenerate switch?");
+ assert(SI->getNumCases() > 1 && "Degenerate switch?");
// Make sure all cases point to the same destination and gather the values.
SmallVector<ConstantInt *, 16> Cases;
- Cases.push_back(SI->getCaseValue(1));
- for (unsigned I = 2, E = SI->getNumCases(); I != E; ++I) {
- if (SI->getSuccessor(I-1) != SI->getSuccessor(I))
+ SwitchInst::CaseIt I = SI->case_begin();
+ Cases.push_back(I.getCaseValue());
+ SwitchInst::CaseIt PrevI = I++;
+ for (SwitchInst::CaseIt E = SI->case_end(); I != E; PrevI = I++) {
+ if (PrevI.getCaseSuccessor() != I.getCaseSuccessor())
return false;
- Cases.push_back(SI->getCaseValue(I));
+ Cases.push_back(I.getCaseValue());
}
- assert(Cases.size() == SI->getNumCases()-1 && "Not all cases gathered");
+ assert(Cases.size() == SI->getNumCases() && "Not all cases gathered");
// Sort the case values, then check if they form a range we can transform.
array_pod_sort(Cases.begin(), Cases.end(), ConstantIntSortPredicate);
@@ -2462,18 +2536,19 @@ static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) {
}
Constant *Offset = ConstantExpr::getNeg(Cases.back());
- Constant *NumCases = ConstantInt::get(Offset->getType(), SI->getNumCases()-1);
+ Constant *NumCases = ConstantInt::get(Offset->getType(), SI->getNumCases());
Value *Sub = SI->getCondition();
if (!Offset->isNullValue())
Sub = Builder.CreateAdd(Sub, Offset, Sub->getName()+".off");
Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch");
- Builder.CreateCondBr(Cmp, SI->getSuccessor(1), SI->getDefaultDest());
+ Builder.CreateCondBr(
+ Cmp, SI->case_begin().getCaseSuccessor(), SI->getDefaultDest());
// Prune obsolete incoming values off the successor's PHI nodes.
- for (BasicBlock::iterator BBI = SI->getSuccessor(1)->begin();
+ for (BasicBlock::iterator BBI = SI->case_begin().getCaseSuccessor()->begin();
isa<PHINode>(BBI); ++BBI) {
- for (unsigned I = 0, E = SI->getNumCases()-2; I != E; ++I)
+ for (unsigned I = 0, E = SI->getNumCases()-1; I != E; ++I)
cast<PHINode>(BBI)->removeIncomingValue(SI->getParent());
}
SI->eraseFromParent();
@@ -2487,24 +2562,26 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) {
Value *Cond = SI->getCondition();
unsigned Bits = cast<IntegerType>(Cond->getType())->getBitWidth();
APInt KnownZero(Bits, 0), KnownOne(Bits, 0);
- ComputeMaskedBits(Cond, APInt::getAllOnesValue(Bits), KnownZero, KnownOne);
+ ComputeMaskedBits(Cond, KnownZero, KnownOne);
// Gather dead cases.
SmallVector<ConstantInt*, 8> DeadCases;
- for (unsigned I = 1, E = SI->getNumCases(); I != E; ++I) {
- if ((SI->getCaseValue(I)->getValue() & KnownZero) != 0 ||
- (SI->getCaseValue(I)->getValue() & KnownOne) != KnownOne) {
- DeadCases.push_back(SI->getCaseValue(I));
+ for (SwitchInst::CaseIt I = SI->case_begin(), E = SI->case_end(); I != E; ++I) {
+ if ((I.getCaseValue()->getValue() & KnownZero) != 0 ||
+ (I.getCaseValue()->getValue() & KnownOne) != KnownOne) {
+ DeadCases.push_back(I.getCaseValue());
DEBUG(dbgs() << "SimplifyCFG: switch case '"
- << SI->getCaseValue(I)->getValue() << "' is dead.\n");
+ << I.getCaseValue() << "' is dead.\n");
}
}
// Remove dead cases from the switch.
for (unsigned I = 0, E = DeadCases.size(); I != E; ++I) {
- unsigned Case = SI->findCaseValue(DeadCases[I]);
+ SwitchInst::CaseIt Case = SI->findCaseValue(DeadCases[I]);
+ assert(Case != SI->case_default() &&
+ "Case was not found. Probably mistake in DeadCases forming.");
// Prune unused values from PHI nodes.
- SI->getSuccessor(Case)->removePredecessor(SI->getParent());
+ Case.getCaseSuccessor()->removePredecessor(SI->getParent());
SI->removeCase(Case);
}
@@ -2553,9 +2630,9 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
typedef DenseMap<PHINode*, SmallVector<int,4> > ForwardingNodesMap;
ForwardingNodesMap ForwardingNodes;
- for (unsigned I = 1; I < SI->getNumCases(); ++I) { // 0 is the default case.
- ConstantInt *CaseValue = SI->getCaseValue(I);
- BasicBlock *CaseDest = SI->getSuccessor(I);
+ for (SwitchInst::CaseIt I = SI->case_begin(), E = SI->case_end(); I != E; ++I) {
+ ConstantInt *CaseValue = I.getCaseValue();
+ BasicBlock *CaseDest = I.getCaseSuccessor();
int PhiIndex;
PHINode *PHI = FindPHIForConditionForwarding(CaseValue, CaseDest,
@@ -2676,8 +2753,8 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
if (ICI->isEquality() && isa<ConstantInt>(ICI->getOperand(1))) {
for (++I; isa<DbgInfoIntrinsic>(I); ++I)
;
- if (I->isTerminator()
- && TryToSimplifyUncondBranchWithICmpInIt(ICI, TD, Builder))
+ if (I->isTerminator() &&
+ TryToSimplifyUncondBranchWithICmpInIt(ICI, TD, Builder))
return true;
}
@@ -2720,6 +2797,12 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (SimplifyBranchOnICmpChain(BI, TD, Builder))
return true;
+ // If this basic block is ONLY a compare and a branch, and if a predecessor
+ // branches to us and one of our successors, fold the comparison into the
+ // predecessor and use logical operations to pick the right destination.
+ if (FoldBranchToCommonDest(BI))
+ return SimplifyCFG(BB) | true;
+
// We have a conditional branch to two blocks that are only reachable
// from BI. We know that the condbr dominates the two blocks, so see if
// there is any identical code in the "then" and "else" blocks. If so, we
@@ -2754,12 +2837,6 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (FoldCondBranchOnPHI(BI, TD))
return SimplifyCFG(BB) | true;
- // If this basic block is ONLY a setcc and a branch, and if a predecessor
- // branches to us and one of our successors, fold the setcc into the
- // predecessor and use logical operations to pick the right destination.
- if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB) | true;
-
// Scan predecessor blocks for conditional branches.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
@@ -2809,7 +2886,7 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) {
}
/// If BB has an incoming value that will always trigger undefined behavior
-/// (eg. null pointer derefence), remove the branch leading here.
+/// (eg. null pointer dereference), remove the branch leading here.
static bool removeUndefIntroducingPredecessor(BasicBlock *BB) {
for (BasicBlock::iterator i = BB->begin();
PHINode *PHI = dyn_cast<PHINode>(i); ++i)
@@ -2883,17 +2960,15 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
} else {
if (SimplifyCondBranch(BI, Builder)) return true;
}
- } else if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) {
- if (SimplifyResume(RI, Builder)) return true;
} else if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
if (SimplifyReturn(RI, Builder)) return true;
+ } else if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) {
+ if (SimplifyResume(RI, Builder)) return true;
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
if (SimplifySwitch(SI, Builder)) return true;
} else if (UnreachableInst *UI =
dyn_cast<UnreachableInst>(BB->getTerminator())) {
if (SimplifyUnreachable(UI)) return true;
- } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- if (SimplifyUnwind(UI, Builder)) return true;
} else if (IndirectBrInst *IBI =
dyn_cast<IndirectBrInst>(BB->getTerminator())) {
if (SimplifyIndirectBr(IBI)) return true;
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 76289c0..4030bef 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -46,7 +46,6 @@ namespace {
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
- IVUsers *IU; // NULL for DisableIVRewrite
const TargetData *TD; // May be NULL
SmallVectorImpl<WeakVH> &DeadInsts;
@@ -59,7 +58,6 @@ namespace {
L(Loop),
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
SE(SE),
- IU(IVU),
TD(LPM->getAnalysisIfAvailable<TargetData>()),
DeadInsts(Dead),
Changed(false) {
@@ -107,8 +105,8 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
// Attempt to fold a binary operator with constant operand.
// e.g. ((I + 1) >> 2) => I >> 2
- if (IVOperand->getNumOperands() != 2 ||
- !isa<ConstantInt>(IVOperand->getOperand(1)))
+ if (!isa<BinaryOperator>(IVOperand)
+ || !isa<ConstantInt>(IVOperand->getOperand(1)))
return 0;
IVSrc = IVOperand->getOperand(0);
@@ -229,11 +227,6 @@ void SimplifyIndvar::eliminateIVRemainder(BinaryOperator *Rem,
Rem->replaceAllUsesWith(Sel);
}
- // Inform IVUsers about the new users.
- if (IU) {
- if (Instruction *I = dyn_cast<Instruction>(Rem->getOperand(0)))
- IU->AddUsersIfInteresting(I);
- }
DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
++NumElimRem;
Changed = true;
@@ -375,6 +368,8 @@ void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
namespace llvm {
+void IVVisitor::anchor() { }
+
/// simplifyUsersOfIV - Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, LPPassManager *LPM,
@@ -397,36 +392,4 @@ bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, LPPassManager *LPM,
return Changed;
}
-/// simplifyIVUsers - Perform simplification on instructions recorded by the
-/// IVUsers pass.
-///
-/// This is the old approach to IV simplification to be replaced by
-/// SimplifyLoopIVs.
-bool simplifyIVUsers(IVUsers *IU, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead) {
- SimplifyIndvar SIV(IU->getLoop(), SE, LPM, Dead);
-
- // Each round of simplification involves a round of eliminating operations
- // followed by a round of widening IVs. A single IVUsers worklist is used
- // across all rounds. The inner loop advances the user. If widening exposes
- // more uses, then another pass through the outer loop is triggered.
- for (IVUsers::iterator I = IU->begin(); I != IU->end(); ++I) {
- Instruction *UseInst = I->getUser();
- Value *IVOperand = I->getOperandValToReplace();
-
- if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
- SIV.eliminateIVComparison(ICmp, IVOperand);
- continue;
- }
- if (BinaryOperator *Rem = dyn_cast<BinaryOperator>(UseInst)) {
- bool IsSigned = Rem->getOpcode() == Instruction::SRem;
- if (IsSigned || Rem->getOpcode() == Instruction::URem) {
- SIV.eliminateIVRemainder(Rem, IVOperand, IsSigned);
- continue;
- }
- }
- }
- return SIV.hasChanged();
-}
-
} // namespace llvm
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
index ac005f9..81eb9e0 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -24,6 +24,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -39,12 +40,14 @@ namespace {
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfo>();
}
/// runOnFunction - Remove instructions that simplify.
bool runOnFunction(Function &F) {
const DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
bool Changed = false;
@@ -60,7 +63,7 @@ namespace {
continue;
// Don't waste time simplifying unused instructions.
if (!I->use_empty())
- if (Value *V = SimplifyInstruction(I, TD, DT)) {
+ if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
// Mark all uses for resimplification next time round the loop.
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
@@ -84,8 +87,11 @@ namespace {
}
char InstSimplifier::ID = 0;
-INITIALIZE_PASS(InstSimplifier, "instsimplify", "Remove redundant instructions",
- false, false)
+INITIALIZE_PASS_BEGIN(InstSimplifier, "instsimplify",
+ "Remove redundant instructions", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
+INITIALIZE_PASS_END(InstSimplifier, "instsimplify",
+ "Remove redundant instructions", false, false)
char &llvm::InstructionSimplifierID = InstSimplifier::ID;
// Public interface to the simplify instructions pass.
diff --git a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
index 46d4ada..b1cad06 100644
--- a/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@@ -50,33 +50,13 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// return.
//
std::vector<BasicBlock*> ReturningBlocks;
- std::vector<BasicBlock*> UnwindingBlocks;
std::vector<BasicBlock*> UnreachableBlocks;
for(Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
if (isa<ReturnInst>(I->getTerminator()))
ReturningBlocks.push_back(I);
- else if (isa<UnwindInst>(I->getTerminator()))
- UnwindingBlocks.push_back(I);
else if (isa<UnreachableInst>(I->getTerminator()))
UnreachableBlocks.push_back(I);
- // Handle unwinding blocks first.
- if (UnwindingBlocks.empty()) {
- UnwindBlock = 0;
- } else if (UnwindingBlocks.size() == 1) {
- UnwindBlock = UnwindingBlocks.front();
- } else {
- UnwindBlock = BasicBlock::Create(F.getContext(), "UnifiedUnwindBlock", &F);
- new UnwindInst(F.getContext(), UnwindBlock);
-
- for (std::vector<BasicBlock*>::iterator I = UnwindingBlocks.begin(),
- E = UnwindingBlocks.end(); I != E; ++I) {
- BasicBlock *BB = *I;
- BB->getInstList().pop_back(); // Remove the unwind insn
- BranchInst::Create(UnwindBlock, BB);
- }
- }
-
// Then unreachable blocks.
if (UnreachableBlocks.empty()) {
UnreachableBlock = 0;
diff --git a/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
new file mode 100644
index 0000000..286b54f
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -0,0 +1,1907 @@
+//===- BBVectorize.cpp - A Basic-Block Vectorizer -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a basic-block vectorization pass. The algorithm was
+// inspired by that used by the Vienna MAP Vectorizor by Franchetti and Kral,
+// et al. It works by looking for chains of pairable operations and then
+// pairing them.
+//
+//===----------------------------------------------------------------------===//
+
+#define BBV_NAME "bb-vectorize"
+#define DEBUG_TYPE BBV_NAME
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Pass.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Vectorize.h"
+#include <algorithm>
+#include <map>
+using namespace llvm;
+
+static cl::opt<unsigned>
+ReqChainDepth("bb-vectorize-req-chain-depth", cl::init(6), cl::Hidden,
+ cl::desc("The required chain depth for vectorization"));
+
+static cl::opt<unsigned>
+SearchLimit("bb-vectorize-search-limit", cl::init(400), cl::Hidden,
+ cl::desc("The maximum search distance for instruction pairs"));
+
+static cl::opt<bool>
+SplatBreaksChain("bb-vectorize-splat-breaks-chain", cl::init(false), cl::Hidden,
+ cl::desc("Replicating one element to a pair breaks the chain"));
+
+static cl::opt<unsigned>
+VectorBits("bb-vectorize-vector-bits", cl::init(128), cl::Hidden,
+ cl::desc("The size of the native vector registers"));
+
+static cl::opt<unsigned>
+MaxIter("bb-vectorize-max-iter", cl::init(0), cl::Hidden,
+ cl::desc("The maximum number of pairing iterations"));
+
+static cl::opt<unsigned>
+MaxInsts("bb-vectorize-max-instr-per-group", cl::init(500), cl::Hidden,
+ cl::desc("The maximum number of pairable instructions per group"));
+
+static cl::opt<unsigned>
+MaxCandPairsForCycleCheck("bb-vectorize-max-cycle-check-pairs", cl::init(200),
+ cl::Hidden, cl::desc("The maximum number of candidate pairs with which to use"
+ " a full cycle check"));
+
+static cl::opt<bool>
+NoInts("bb-vectorize-no-ints", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize integer values"));
+
+static cl::opt<bool>
+NoFloats("bb-vectorize-no-floats", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize floating-point values"));
+
+static cl::opt<bool>
+NoCasts("bb-vectorize-no-casts", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize casting (conversion) operations"));
+
+static cl::opt<bool>
+NoMath("bb-vectorize-no-math", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize floating-point math intrinsics"));
+
+static cl::opt<bool>
+NoFMA("bb-vectorize-no-fma", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize the fused-multiply-add intrinsic"));
+
+static cl::opt<bool>
+NoMemOps("bb-vectorize-no-mem-ops", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize loads and stores"));
+
+static cl::opt<bool>
+AlignedOnly("bb-vectorize-aligned-only", cl::init(false), cl::Hidden,
+ cl::desc("Only generate aligned loads and stores"));
+
+static cl::opt<bool>
+NoMemOpBoost("bb-vectorize-no-mem-op-boost",
+ cl::init(false), cl::Hidden,
+ cl::desc("Don't boost the chain-depth contribution of loads and stores"));
+
+static cl::opt<bool>
+FastDep("bb-vectorize-fast-dep", cl::init(false), cl::Hidden,
+ cl::desc("Use a fast instruction dependency analysis"));
+
+#ifndef NDEBUG
+static cl::opt<bool>
+DebugInstructionExamination("bb-vectorize-debug-instruction-examination",
+ cl::init(false), cl::Hidden,
+ cl::desc("When debugging is enabled, output information on the"
+ " instruction-examination process"));
+static cl::opt<bool>
+DebugCandidateSelection("bb-vectorize-debug-candidate-selection",
+ cl::init(false), cl::Hidden,
+ cl::desc("When debugging is enabled, output information on the"
+ " candidate-selection process"));
+static cl::opt<bool>
+DebugPairSelection("bb-vectorize-debug-pair-selection",
+ cl::init(false), cl::Hidden,
+ cl::desc("When debugging is enabled, output information on the"
+ " pair-selection process"));
+static cl::opt<bool>
+DebugCycleCheck("bb-vectorize-debug-cycle-check",
+ cl::init(false), cl::Hidden,
+ cl::desc("When debugging is enabled, output information on the"
+ " cycle-checking process"));
+#endif
+
+STATISTIC(NumFusedOps, "Number of operations fused by bb-vectorize");
+
+namespace {
+ struct BBVectorize : public BasicBlockPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ const VectorizeConfig Config;
+
+ BBVectorize(const VectorizeConfig &C = VectorizeConfig())
+ : BasicBlockPass(ID), Config(C) {
+ initializeBBVectorizePass(*PassRegistry::getPassRegistry());
+ }
+
+ BBVectorize(Pass *P, const VectorizeConfig &C)
+ : BasicBlockPass(ID), Config(C) {
+ AA = &P->getAnalysis<AliasAnalysis>();
+ SE = &P->getAnalysis<ScalarEvolution>();
+ TD = P->getAnalysisIfAvailable<TargetData>();
+ }
+
+ typedef std::pair<Value *, Value *> ValuePair;
+ typedef std::pair<ValuePair, size_t> ValuePairWithDepth;
+ typedef std::pair<ValuePair, ValuePair> VPPair; // A ValuePair pair
+ typedef std::pair<std::multimap<Value *, Value *>::iterator,
+ std::multimap<Value *, Value *>::iterator> VPIteratorPair;
+ typedef std::pair<std::multimap<ValuePair, ValuePair>::iterator,
+ std::multimap<ValuePair, ValuePair>::iterator>
+ VPPIteratorPair;
+
+ AliasAnalysis *AA;
+ ScalarEvolution *SE;
+ TargetData *TD;
+
+ // FIXME: const correct?
+
+ bool vectorizePairs(BasicBlock &BB);
+
+ bool getCandidatePairs(BasicBlock &BB,
+ BasicBlock::iterator &Start,
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts);
+
+ void computeConnectedPairs(std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs);
+
+ void buildDepMap(BasicBlock &BB,
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &PairableInstUsers);
+
+ void choosePairs(std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ DenseMap<Value *, Value *>& ChosenPairs);
+
+ void fuseChosenPairs(BasicBlock &BB,
+ std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *>& ChosenPairs);
+
+ bool isInstVectorizable(Instruction *I, bool &IsSimpleLoadStore);
+
+ bool areInstsCompatible(Instruction *I, Instruction *J,
+ bool IsSimpleLoadStore);
+
+ bool trackUsesOfI(DenseSet<Value *> &Users,
+ AliasSetTracker &WriteSet, Instruction *I,
+ Instruction *J, bool UpdateUsers = true,
+ std::multimap<Value *, Value *> *LoadMoveSet = 0);
+
+ void computePairsConnectedTo(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ ValuePair P);
+
+ bool pairsConflict(ValuePair P, ValuePair Q,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> *PairableInstUserMap = 0);
+
+ bool pairWillFormCycle(ValuePair P,
+ std::multimap<ValuePair, ValuePair> &PairableInstUsers,
+ DenseSet<ValuePair> &CurrentPairs);
+
+ void pruneTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseMap<ValuePair, size_t> &Tree,
+ DenseSet<ValuePair> &PrunedTree, ValuePair J,
+ bool UseCycleCheck);
+
+ void buildInitialTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseMap<ValuePair, size_t> &Tree, ValuePair J);
+
+ void findBestTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<ValuePair> &BestTree, size_t &BestMaxDepth,
+ size_t &BestEffSize, VPIteratorPair ChoiceRange,
+ bool UseCycleCheck);
+
+ Value *getReplacementPointerInput(LLVMContext& Context, Instruction *I,
+ Instruction *J, unsigned o, bool &FlipMemInputs);
+
+ void fillNewShuffleMask(LLVMContext& Context, Instruction *J,
+ unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
+ unsigned IdxOffset, std::vector<Constant*> &Mask);
+
+ Value *getReplacementShuffleMask(LLVMContext& Context, Instruction *I,
+ Instruction *J);
+
+ Value *getReplacementInput(LLVMContext& Context, Instruction *I,
+ Instruction *J, unsigned o, bool FlipMemInputs);
+
+ void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
+ Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
+ bool &FlipMemInputs);
+
+ void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
+ Instruction *J, Instruction *K,
+ Instruction *&InsertionPt, Instruction *&K1,
+ Instruction *&K2, bool &FlipMemInputs);
+
+ void collectPairLoadMoveSet(BasicBlock &BB,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *I);
+
+ void collectLoadMoveSet(BasicBlock &BB,
+ std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ std::multimap<Value *, Value *> &LoadMoveSet);
+
+ bool canMoveUsesOfIAfterJ(BasicBlock &BB,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *I, Instruction *J);
+
+ void moveUsesOfIAfterJ(BasicBlock &BB,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *&InsertionPt,
+ Instruction *I, Instruction *J);
+
+ bool vectorizeBB(BasicBlock &BB) {
+ bool changed = false;
+ // Iterate a sufficient number of times to merge types of size 1 bit,
+ // then 2 bits, then 4, etc. up to half of the target vector width of the
+ // target vector register.
+ for (unsigned v = 2, n = 1;
+ v <= Config.VectorBits && (!Config.MaxIter || n <= Config.MaxIter);
+ v *= 2, ++n) {
+ DEBUG(dbgs() << "BBV: fusing loop #" << n <<
+ " for " << BB.getName() << " in " <<
+ BB.getParent()->getName() << "...\n");
+ if (vectorizePairs(BB))
+ changed = true;
+ else
+ break;
+ }
+
+ DEBUG(dbgs() << "BBV: done!\n");
+ return changed;
+ }
+
+ virtual bool runOnBasicBlock(BasicBlock &BB) {
+ AA = &getAnalysis<AliasAnalysis>();
+ SE = &getAnalysis<ScalarEvolution>();
+ TD = getAnalysisIfAvailable<TargetData>();
+
+ return vectorizeBB(BB);
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ BasicBlockPass::getAnalysisUsage(AU);
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<ScalarEvolution>();
+ AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<ScalarEvolution>();
+ AU.setPreservesCFG();
+ }
+
+ // This returns the vector type that holds a pair of the provided type.
+ // If the provided type is already a vector, then its length is doubled.
+ static inline VectorType *getVecTypeForPair(Type *ElemTy) {
+ if (VectorType *VTy = dyn_cast<VectorType>(ElemTy)) {
+ unsigned numElem = VTy->getNumElements();
+ return VectorType::get(ElemTy->getScalarType(), numElem*2);
+ }
+
+ return VectorType::get(ElemTy, 2);
+ }
+
+ // Returns the weight associated with the provided value. A chain of
+ // candidate pairs has a length given by the sum of the weights of its
+ // members (one weight per pair; the weight of each member of the pair
+ // is assumed to be the same). This length is then compared to the
+ // chain-length threshold to determine if a given chain is significant
+ // enough to be vectorized. The length is also used in comparing
+ // candidate chains where longer chains are considered to be better.
+ // Note: when this function returns 0, the resulting instructions are
+ // not actually fused.
+ inline size_t getDepthFactor(Value *V) {
+ // InsertElement and ExtractElement have a depth factor of zero. This is
+ // for two reasons: First, they cannot be usefully fused. Second, because
+ // the pass generates a lot of these, they can confuse the simple metric
+ // used to compare the trees in the next iteration. Thus, giving them a
+ // weight of zero allows the pass to essentially ignore them in
+ // subsequent iterations when looking for vectorization opportunities
+ // while still tracking dependency chains that flow through those
+ // instructions.
+ if (isa<InsertElementInst>(V) || isa<ExtractElementInst>(V))
+ return 0;
+
+ // Give a load or store half of the required depth so that load/store
+ // pairs will vectorize.
+ if (!Config.NoMemOpBoost && (isa<LoadInst>(V) || isa<StoreInst>(V)))
+ return Config.ReqChainDepth/2;
+
+ return 1;
+ }
+
+ // This determines the relative offset of two loads or stores, returning
+ // true if the offset could be determined to be some constant value.
+ // For example, if OffsetInElmts == 1, then J accesses the memory directly
+ // after I; if OffsetInElmts == -1 then I accesses the memory
+ // directly after J. This function assumes that both instructions
+ // have the same type.
+ bool getPairPtrInfo(Instruction *I, Instruction *J,
+ Value *&IPtr, Value *&JPtr, unsigned &IAlignment, unsigned &JAlignment,
+ int64_t &OffsetInElmts) {
+ OffsetInElmts = 0;
+ if (isa<LoadInst>(I)) {
+ IPtr = cast<LoadInst>(I)->getPointerOperand();
+ JPtr = cast<LoadInst>(J)->getPointerOperand();
+ IAlignment = cast<LoadInst>(I)->getAlignment();
+ JAlignment = cast<LoadInst>(J)->getAlignment();
+ } else {
+ IPtr = cast<StoreInst>(I)->getPointerOperand();
+ JPtr = cast<StoreInst>(J)->getPointerOperand();
+ IAlignment = cast<StoreInst>(I)->getAlignment();
+ JAlignment = cast<StoreInst>(J)->getAlignment();
+ }
+
+ const SCEV *IPtrSCEV = SE->getSCEV(IPtr);
+ const SCEV *JPtrSCEV = SE->getSCEV(JPtr);
+
+ // If this is a trivial offset, then we'll get something like
+ // 1*sizeof(type). With target data, which we need anyway, this will get
+ // constant folded into a number.
+ const SCEV *OffsetSCEV = SE->getMinusSCEV(JPtrSCEV, IPtrSCEV);
+ if (const SCEVConstant *ConstOffSCEV =
+ dyn_cast<SCEVConstant>(OffsetSCEV)) {
+ ConstantInt *IntOff = ConstOffSCEV->getValue();
+ int64_t Offset = IntOff->getSExtValue();
+
+ Type *VTy = cast<PointerType>(IPtr->getType())->getElementType();
+ int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
+
+ assert(VTy == cast<PointerType>(JPtr->getType())->getElementType());
+
+ OffsetInElmts = Offset/VTyTSS;
+ return (abs64(Offset) % VTyTSS) == 0;
+ }
+
+ return false;
+ }
+
+ // Returns true if the provided CallInst represents an intrinsic that can
+ // be vectorized.
+ bool isVectorizableIntrinsic(CallInst* I) {
+ Function *F = I->getCalledFunction();
+ if (!F) return false;
+
+ unsigned IID = F->getIntrinsicID();
+ if (!IID) return false;
+
+ switch(IID) {
+ default:
+ return false;
+ case Intrinsic::sqrt:
+ case Intrinsic::powi:
+ case Intrinsic::sin:
+ case Intrinsic::cos:
+ case Intrinsic::log:
+ case Intrinsic::log2:
+ case Intrinsic::log10:
+ case Intrinsic::exp:
+ case Intrinsic::exp2:
+ case Intrinsic::pow:
+ return Config.VectorizeMath;
+ case Intrinsic::fma:
+ return Config.VectorizeFMA;
+ }
+ }
+
+ // Returns true if J is the second element in some pair referenced by
+ // some multimap pair iterator pair.
+ template <typename V>
+ bool isSecondInIteratorPair(V J, std::pair<
+ typename std::multimap<V, V>::iterator,
+ typename std::multimap<V, V>::iterator> PairRange) {
+ for (typename std::multimap<V, V>::iterator K = PairRange.first;
+ K != PairRange.second; ++K)
+ if (K->second == J) return true;
+
+ return false;
+ }
+ };
+
+ // This function implements one vectorization iteration on the provided
+ // basic block. It returns true if the block is changed.
+ bool BBVectorize::vectorizePairs(BasicBlock &BB) {
+ bool ShouldContinue;
+ BasicBlock::iterator Start = BB.getFirstInsertionPt();
+
+ std::vector<Value *> AllPairableInsts;
+ DenseMap<Value *, Value *> AllChosenPairs;
+
+ do {
+ std::vector<Value *> PairableInsts;
+ std::multimap<Value *, Value *> CandidatePairs;
+ ShouldContinue = getCandidatePairs(BB, Start, CandidatePairs,
+ PairableInsts);
+ if (PairableInsts.empty()) continue;
+
+ // Now we have a map of all of the pairable instructions and we need to
+ // select the best possible pairing. A good pairing is one such that the
+ // users of the pair are also paired. This defines a (directed) forest
+ // over the pairs such that two pairs are connected iff the second pair
+ // uses the first.
+
+ // Note that it only matters that both members of the second pair use some
+ // element of the first pair (to allow for splatting).
+
+ std::multimap<ValuePair, ValuePair> ConnectedPairs;
+ computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs);
+ if (ConnectedPairs.empty()) continue;
+
+ // Build the pairable-instruction dependency map
+ DenseSet<ValuePair> PairableInstUsers;
+ buildDepMap(BB, CandidatePairs, PairableInsts, PairableInstUsers);
+
+ // There is now a graph of the connected pairs. For each variable, pick
+ // the pairing with the largest tree meeting the depth requirement on at
+ // least one branch. Then select all pairings that are part of that tree
+ // and remove them from the list of available pairings and pairable
+ // variables.
+
+ DenseMap<Value *, Value *> ChosenPairs;
+ choosePairs(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairableInstUsers, ChosenPairs);
+
+ if (ChosenPairs.empty()) continue;
+ AllPairableInsts.insert(AllPairableInsts.end(), PairableInsts.begin(),
+ PairableInsts.end());
+ AllChosenPairs.insert(ChosenPairs.begin(), ChosenPairs.end());
+ } while (ShouldContinue);
+
+ if (AllChosenPairs.empty()) return false;
+ NumFusedOps += AllChosenPairs.size();
+
+ // A set of pairs has now been selected. It is now necessary to replace the
+ // paired instructions with vector instructions. For this procedure each
+ // operand must be replaced with a vector operand. This vector is formed
+ // by using build_vector on the old operands. The replaced values are then
+ // replaced with a vector_extract on the result. Subsequent optimization
+ // passes should coalesce the build/extract combinations.
+
+ fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs);
+ return true;
+ }
+
+ // This function returns true if the provided instruction is capable of being
+ // fused into a vector instruction. This determination is based only on the
+ // type and other attributes of the instruction.
+ bool BBVectorize::isInstVectorizable(Instruction *I,
+ bool &IsSimpleLoadStore) {
+ IsSimpleLoadStore = false;
+
+ if (CallInst *C = dyn_cast<CallInst>(I)) {
+ if (!isVectorizableIntrinsic(C))
+ return false;
+ } else if (LoadInst *L = dyn_cast<LoadInst>(I)) {
+ // Vectorize simple loads if possbile:
+ IsSimpleLoadStore = L->isSimple();
+ if (!IsSimpleLoadStore || !Config.VectorizeMemOps)
+ return false;
+ } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
+ // Vectorize simple stores if possbile:
+ IsSimpleLoadStore = S->isSimple();
+ if (!IsSimpleLoadStore || !Config.VectorizeMemOps)
+ return false;
+ } else if (CastInst *C = dyn_cast<CastInst>(I)) {
+ // We can vectorize casts, but not casts of pointer types, etc.
+ if (!Config.VectorizeCasts)
+ return false;
+
+ Type *SrcTy = C->getSrcTy();
+ if (!SrcTy->isSingleValueType() || SrcTy->isPointerTy())
+ return false;
+
+ Type *DestTy = C->getDestTy();
+ if (!DestTy->isSingleValueType() || DestTy->isPointerTy())
+ return false;
+ } else if (!(I->isBinaryOp() || isa<ShuffleVectorInst>(I) ||
+ isa<ExtractElementInst>(I) || isa<InsertElementInst>(I))) {
+ return false;
+ }
+
+ // We can't vectorize memory operations without target data
+ if (TD == 0 && IsSimpleLoadStore)
+ return false;
+
+ Type *T1, *T2;
+ if (isa<StoreInst>(I)) {
+ // For stores, it is the value type, not the pointer type that matters
+ // because the value is what will come from a vector register.
+
+ Value *IVal = cast<StoreInst>(I)->getValueOperand();
+ T1 = IVal->getType();
+ } else {
+ T1 = I->getType();
+ }
+
+ if (I->isCast())
+ T2 = cast<CastInst>(I)->getSrcTy();
+ else
+ T2 = T1;
+
+ // Not every type can be vectorized...
+ if (!(VectorType::isValidElementType(T1) || T1->isVectorTy()) ||
+ !(VectorType::isValidElementType(T2) || T2->isVectorTy()))
+ return false;
+
+ if (!Config.VectorizeInts
+ && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
+ return false;
+
+ if (!Config.VectorizeFloats
+ && (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy()))
+ return false;
+
+ if (T1->getPrimitiveSizeInBits() > Config.VectorBits/2 ||
+ T2->getPrimitiveSizeInBits() > Config.VectorBits/2)
+ return false;
+
+ return true;
+ }
+
+ // This function returns true if the two provided instructions are compatible
+ // (meaning that they can be fused into a vector instruction). This assumes
+ // that I has already been determined to be vectorizable and that J is not
+ // in the use tree of I.
+ bool BBVectorize::areInstsCompatible(Instruction *I, Instruction *J,
+ bool IsSimpleLoadStore) {
+ DEBUG(if (DebugInstructionExamination) dbgs() << "BBV: looking at " << *I <<
+ " <-> " << *J << "\n");
+
+ // Loads and stores can be merged if they have different alignments,
+ // but are otherwise the same.
+ LoadInst *LI, *LJ;
+ StoreInst *SI, *SJ;
+ if ((LI = dyn_cast<LoadInst>(I)) && (LJ = dyn_cast<LoadInst>(J))) {
+ if (I->getType() != J->getType())
+ return false;
+
+ if (LI->getPointerOperand()->getType() !=
+ LJ->getPointerOperand()->getType() ||
+ LI->isVolatile() != LJ->isVolatile() ||
+ LI->getOrdering() != LJ->getOrdering() ||
+ LI->getSynchScope() != LJ->getSynchScope())
+ return false;
+ } else if ((SI = dyn_cast<StoreInst>(I)) && (SJ = dyn_cast<StoreInst>(J))) {
+ if (SI->getValueOperand()->getType() !=
+ SJ->getValueOperand()->getType() ||
+ SI->getPointerOperand()->getType() !=
+ SJ->getPointerOperand()->getType() ||
+ SI->isVolatile() != SJ->isVolatile() ||
+ SI->getOrdering() != SJ->getOrdering() ||
+ SI->getSynchScope() != SJ->getSynchScope())
+ return false;
+ } else if (!J->isSameOperationAs(I)) {
+ return false;
+ }
+ // FIXME: handle addsub-type operations!
+
+ if (IsSimpleLoadStore) {
+ Value *IPtr, *JPtr;
+ unsigned IAlignment, JAlignment;
+ int64_t OffsetInElmts = 0;
+ if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
+ OffsetInElmts) && abs64(OffsetInElmts) == 1) {
+ if (Config.AlignedOnly) {
+ Type *aType = isa<StoreInst>(I) ?
+ cast<StoreInst>(I)->getValueOperand()->getType() : I->getType();
+ // An aligned load or store is possible only if the instruction
+ // with the lower offset has an alignment suitable for the
+ // vector type.
+
+ unsigned BottomAlignment = IAlignment;
+ if (OffsetInElmts < 0) BottomAlignment = JAlignment;
+
+ Type *VType = getVecTypeForPair(aType);
+ unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
+ if (BottomAlignment < VecAlignment)
+ return false;
+ }
+ } else {
+ return false;
+ }
+ } else if (isa<ShuffleVectorInst>(I)) {
+ // Only merge two shuffles if they're both constant
+ return isa<Constant>(I->getOperand(2)) &&
+ isa<Constant>(J->getOperand(2));
+ // FIXME: We may want to vectorize non-constant shuffles also.
+ }
+
+ // The powi intrinsic is special because only the first argument is
+ // vectorized, the second arguments must be equal.
+ CallInst *CI = dyn_cast<CallInst>(I);
+ Function *FI;
+ if (CI && (FI = CI->getCalledFunction()) &&
+ FI->getIntrinsicID() == Intrinsic::powi) {
+
+ Value *A1I = CI->getArgOperand(1),
+ *A1J = cast<CallInst>(J)->getArgOperand(1);
+ const SCEV *A1ISCEV = SE->getSCEV(A1I),
+ *A1JSCEV = SE->getSCEV(A1J);
+ return (A1ISCEV == A1JSCEV);
+ }
+
+ return true;
+ }
+
+ // Figure out whether or not J uses I and update the users and write-set
+ // structures associated with I. Specifically, Users represents the set of
+ // instructions that depend on I. WriteSet represents the set
+ // of memory locations that are dependent on I. If UpdateUsers is true,
+ // and J uses I, then Users is updated to contain J and WriteSet is updated
+ // to contain any memory locations to which J writes. The function returns
+ // true if J uses I. By default, alias analysis is used to determine
+ // whether J reads from memory that overlaps with a location in WriteSet.
+ // If LoadMoveSet is not null, then it is a previously-computed multimap
+ // where the key is the memory-based user instruction and the value is
+ // the instruction to be compared with I. So, if LoadMoveSet is provided,
+ // then the alias analysis is not used. This is necessary because this
+ // function is called during the process of moving instructions during
+ // vectorization and the results of the alias analysis are not stable during
+ // that process.
+ bool BBVectorize::trackUsesOfI(DenseSet<Value *> &Users,
+ AliasSetTracker &WriteSet, Instruction *I,
+ Instruction *J, bool UpdateUsers,
+ std::multimap<Value *, Value *> *LoadMoveSet) {
+ bool UsesI = false;
+
+ // This instruction may already be marked as a user due, for example, to
+ // being a member of a selected pair.
+ if (Users.count(J))
+ UsesI = true;
+
+ if (!UsesI)
+ for (User::op_iterator JU = J->op_begin(), JE = J->op_end();
+ JU != JE; ++JU) {
+ Value *V = *JU;
+ if (I == V || Users.count(V)) {
+ UsesI = true;
+ break;
+ }
+ }
+ if (!UsesI && J->mayReadFromMemory()) {
+ if (LoadMoveSet) {
+ VPIteratorPair JPairRange = LoadMoveSet->equal_range(J);
+ UsesI = isSecondInIteratorPair<Value*>(I, JPairRange);
+ } else {
+ for (AliasSetTracker::iterator W = WriteSet.begin(),
+ WE = WriteSet.end(); W != WE; ++W) {
+ if (W->aliasesUnknownInst(J, *AA)) {
+ UsesI = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (UsesI && UpdateUsers) {
+ if (J->mayWriteToMemory()) WriteSet.add(J);
+ Users.insert(J);
+ }
+
+ return UsesI;
+ }
+
+ // This function iterates over all instruction pairs in the provided
+ // basic block and collects all candidate pairs for vectorization.
+ bool BBVectorize::getCandidatePairs(BasicBlock &BB,
+ BasicBlock::iterator &Start,
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts) {
+ BasicBlock::iterator E = BB.end();
+ if (Start == E) return false;
+
+ bool ShouldContinue = false, IAfterStart = false;
+ for (BasicBlock::iterator I = Start++; I != E; ++I) {
+ if (I == Start) IAfterStart = true;
+
+ bool IsSimpleLoadStore;
+ if (!isInstVectorizable(I, IsSimpleLoadStore)) continue;
+
+ // Look for an instruction with which to pair instruction *I...
+ DenseSet<Value *> Users;
+ AliasSetTracker WriteSet(*AA);
+ bool JAfterStart = IAfterStart;
+ BasicBlock::iterator J = llvm::next(I);
+ for (unsigned ss = 0; J != E && ss <= Config.SearchLimit; ++J, ++ss) {
+ if (J == Start) JAfterStart = true;
+
+ // Determine if J uses I, if so, exit the loop.
+ bool UsesI = trackUsesOfI(Users, WriteSet, I, J, !Config.FastDep);
+ if (Config.FastDep) {
+ // Note: For this heuristic to be effective, independent operations
+ // must tend to be intermixed. This is likely to be true from some
+ // kinds of grouped loop unrolling (but not the generic LLVM pass),
+ // but otherwise may require some kind of reordering pass.
+
+ // When using fast dependency analysis,
+ // stop searching after first use:
+ if (UsesI) break;
+ } else {
+ if (UsesI) continue;
+ }
+
+ // J does not use I, and comes before the first use of I, so it can be
+ // merged with I if the instructions are compatible.
+ if (!areInstsCompatible(I, J, IsSimpleLoadStore)) continue;
+
+ // J is a candidate for merging with I.
+ if (!PairableInsts.size() ||
+ PairableInsts[PairableInsts.size()-1] != I) {
+ PairableInsts.push_back(I);
+ }
+
+ CandidatePairs.insert(ValuePair(I, J));
+
+ // The next call to this function must start after the last instruction
+ // selected during this invocation.
+ if (JAfterStart) {
+ Start = llvm::next(J);
+ IAfterStart = JAfterStart = false;
+ }
+
+ DEBUG(if (DebugCandidateSelection) dbgs() << "BBV: candidate pair "
+ << *I << " <-> " << *J << "\n");
+
+ // If we have already found too many pairs, break here and this function
+ // will be called again starting after the last instruction selected
+ // during this invocation.
+ if (PairableInsts.size() >= Config.MaxInsts) {
+ ShouldContinue = true;
+ break;
+ }
+ }
+
+ if (ShouldContinue)
+ break;
+ }
+
+ DEBUG(dbgs() << "BBV: found " << PairableInsts.size()
+ << " instructions with candidate pairs\n");
+
+ return ShouldContinue;
+ }
+
+ // Finds candidate pairs connected to the pair P = <PI, PJ>. This means that
+ // it looks for pairs such that both members have an input which is an
+ // output of PI or PJ.
+ void BBVectorize::computePairsConnectedTo(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ ValuePair P) {
+ // For each possible pairing for this variable, look at the uses of
+ // the first value...
+ for (Value::use_iterator I = P.first->use_begin(),
+ E = P.first->use_end(); I != E; ++I) {
+ VPIteratorPair IPairRange = CandidatePairs.equal_range(*I);
+
+ // For each use of the first variable, look for uses of the second
+ // variable...
+ for (Value::use_iterator J = P.second->use_begin(),
+ E2 = P.second->use_end(); J != E2; ++J) {
+ VPIteratorPair JPairRange = CandidatePairs.equal_range(*J);
+
+ // Look for <I, J>:
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange))
+ ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+
+ // Look for <J, I>:
+ if (isSecondInIteratorPair<Value*>(*I, JPairRange))
+ ConnectedPairs.insert(VPPair(P, ValuePair(*J, *I)));
+ }
+
+ if (Config.SplatBreaksChain) continue;
+ // Look for cases where just the first value in the pair is used by
+ // both members of another pair (splatting).
+ for (Value::use_iterator J = P.first->use_begin(); J != E; ++J) {
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange))
+ ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+ }
+ }
+
+ if (Config.SplatBreaksChain) return;
+ // Look for cases where just the second value in the pair is used by
+ // both members of another pair (splatting).
+ for (Value::use_iterator I = P.second->use_begin(),
+ E = P.second->use_end(); I != E; ++I) {
+ VPIteratorPair IPairRange = CandidatePairs.equal_range(*I);
+
+ for (Value::use_iterator J = P.second->use_begin(); J != E; ++J) {
+ if (isSecondInIteratorPair<Value*>(*J, IPairRange))
+ ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
+ }
+ }
+ }
+
+ // This function figures out which pairs are connected. Two pairs are
+ // connected if some output of the first pair forms an input to both members
+ // of the second pair.
+ void BBVectorize::computeConnectedPairs(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs) {
+
+ for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
+ PE = PairableInsts.end(); PI != PE; ++PI) {
+ VPIteratorPair choiceRange = CandidatePairs.equal_range(*PI);
+
+ for (std::multimap<Value *, Value *>::iterator P = choiceRange.first;
+ P != choiceRange.second; ++P)
+ computePairsConnectedTo(CandidatePairs, PairableInsts,
+ ConnectedPairs, *P);
+ }
+
+ DEBUG(dbgs() << "BBV: found " << ConnectedPairs.size()
+ << " pair connections.\n");
+ }
+
+ // This function builds a set of use tuples such that <A, B> is in the set
+ // if B is in the use tree of A. If B is in the use tree of A, then B
+ // depends on the output of A.
+ void BBVectorize::buildDepMap(
+ BasicBlock &BB,
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ DenseSet<ValuePair> &PairableInstUsers) {
+ DenseSet<Value *> IsInPair;
+ for (std::multimap<Value *, Value *>::iterator C = CandidatePairs.begin(),
+ E = CandidatePairs.end(); C != E; ++C) {
+ IsInPair.insert(C->first);
+ IsInPair.insert(C->second);
+ }
+
+ // Iterate through the basic block, recording all Users of each
+ // pairable instruction.
+
+ BasicBlock::iterator E = BB.end();
+ for (BasicBlock::iterator I = BB.getFirstInsertionPt(); I != E; ++I) {
+ if (IsInPair.find(I) == IsInPair.end()) continue;
+
+ DenseSet<Value *> Users;
+ AliasSetTracker WriteSet(*AA);
+ for (BasicBlock::iterator J = llvm::next(I); J != E; ++J)
+ (void) trackUsesOfI(Users, WriteSet, I, J);
+
+ for (DenseSet<Value *>::iterator U = Users.begin(), E = Users.end();
+ U != E; ++U)
+ PairableInstUsers.insert(ValuePair(I, *U));
+ }
+ }
+
+ // Returns true if an input to pair P is an output of pair Q and also an
+ // input of pair Q is an output of pair P. If this is the case, then these
+ // two pairs cannot be simultaneously fused.
+ bool BBVectorize::pairsConflict(ValuePair P, ValuePair Q,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> *PairableInstUserMap) {
+ // Two pairs are in conflict if they are mutual Users of eachother.
+ bool QUsesP = PairableInstUsers.count(ValuePair(P.first, Q.first)) ||
+ PairableInstUsers.count(ValuePair(P.first, Q.second)) ||
+ PairableInstUsers.count(ValuePair(P.second, Q.first)) ||
+ PairableInstUsers.count(ValuePair(P.second, Q.second));
+ bool PUsesQ = PairableInstUsers.count(ValuePair(Q.first, P.first)) ||
+ PairableInstUsers.count(ValuePair(Q.first, P.second)) ||
+ PairableInstUsers.count(ValuePair(Q.second, P.first)) ||
+ PairableInstUsers.count(ValuePair(Q.second, P.second));
+ if (PairableInstUserMap) {
+ // FIXME: The expensive part of the cycle check is not so much the cycle
+ // check itself but this edge insertion procedure. This needs some
+ // profiling and probably a different data structure (same is true of
+ // most uses of std::multimap).
+ if (PUsesQ) {
+ VPPIteratorPair QPairRange = PairableInstUserMap->equal_range(Q);
+ if (!isSecondInIteratorPair(P, QPairRange))
+ PairableInstUserMap->insert(VPPair(Q, P));
+ }
+ if (QUsesP) {
+ VPPIteratorPair PPairRange = PairableInstUserMap->equal_range(P);
+ if (!isSecondInIteratorPair(Q, PPairRange))
+ PairableInstUserMap->insert(VPPair(P, Q));
+ }
+ }
+
+ return (QUsesP && PUsesQ);
+ }
+
+ // This function walks the use graph of current pairs to see if, starting
+ // from P, the walk returns to P.
+ bool BBVectorize::pairWillFormCycle(ValuePair P,
+ std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
+ DenseSet<ValuePair> &CurrentPairs) {
+ DEBUG(if (DebugCycleCheck)
+ dbgs() << "BBV: starting cycle check for : " << *P.first << " <-> "
+ << *P.second << "\n");
+ // A lookup table of visisted pairs is kept because the PairableInstUserMap
+ // contains non-direct associations.
+ DenseSet<ValuePair> Visited;
+ SmallVector<ValuePair, 32> Q;
+ // General depth-first post-order traversal:
+ Q.push_back(P);
+ do {
+ ValuePair QTop = Q.pop_back_val();
+ Visited.insert(QTop);
+
+ DEBUG(if (DebugCycleCheck)
+ dbgs() << "BBV: cycle check visiting: " << *QTop.first << " <-> "
+ << *QTop.second << "\n");
+ VPPIteratorPair QPairRange = PairableInstUserMap.equal_range(QTop);
+ for (std::multimap<ValuePair, ValuePair>::iterator C = QPairRange.first;
+ C != QPairRange.second; ++C) {
+ if (C->second == P) {
+ DEBUG(dbgs()
+ << "BBV: rejected to prevent non-trivial cycle formation: "
+ << *C->first.first << " <-> " << *C->first.second << "\n");
+ return true;
+ }
+
+ if (CurrentPairs.count(C->second) && !Visited.count(C->second))
+ Q.push_back(C->second);
+ }
+ } while (!Q.empty());
+
+ return false;
+ }
+
+ // This function builds the initial tree of connected pairs with the
+ // pair J at the root.
+ void BBVectorize::buildInitialTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseMap<ValuePair, size_t> &Tree, ValuePair J) {
+ // Each of these pairs is viewed as the root node of a Tree. The Tree
+ // is then walked (depth-first). As this happens, we keep track of
+ // the pairs that compose the Tree and the maximum depth of the Tree.
+ SmallVector<ValuePairWithDepth, 32> Q;
+ // General depth-first post-order traversal:
+ Q.push_back(ValuePairWithDepth(J, getDepthFactor(J.first)));
+ do {
+ ValuePairWithDepth QTop = Q.back();
+
+ // Push each child onto the queue:
+ bool MoreChildren = false;
+ size_t MaxChildDepth = QTop.second;
+ VPPIteratorPair qtRange = ConnectedPairs.equal_range(QTop.first);
+ for (std::multimap<ValuePair, ValuePair>::iterator k = qtRange.first;
+ k != qtRange.second; ++k) {
+ // Make sure that this child pair is still a candidate:
+ bool IsStillCand = false;
+ VPIteratorPair checkRange =
+ CandidatePairs.equal_range(k->second.first);
+ for (std::multimap<Value *, Value *>::iterator m = checkRange.first;
+ m != checkRange.second; ++m) {
+ if (m->second == k->second.second) {
+ IsStillCand = true;
+ break;
+ }
+ }
+
+ if (IsStillCand) {
+ DenseMap<ValuePair, size_t>::iterator C = Tree.find(k->second);
+ if (C == Tree.end()) {
+ size_t d = getDepthFactor(k->second.first);
+ Q.push_back(ValuePairWithDepth(k->second, QTop.second+d));
+ MoreChildren = true;
+ } else {
+ MaxChildDepth = std::max(MaxChildDepth, C->second);
+ }
+ }
+ }
+
+ if (!MoreChildren) {
+ // Record the current pair as part of the Tree:
+ Tree.insert(ValuePairWithDepth(QTop.first, MaxChildDepth));
+ Q.pop_back();
+ }
+ } while (!Q.empty());
+ }
+
+ // Given some initial tree, prune it by removing conflicting pairs (pairs
+ // that cannot be simultaneously chosen for vectorization).
+ void BBVectorize::pruneTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseMap<ValuePair, size_t> &Tree,
+ DenseSet<ValuePair> &PrunedTree, ValuePair J,
+ bool UseCycleCheck) {
+ SmallVector<ValuePairWithDepth, 32> Q;
+ // General depth-first post-order traversal:
+ Q.push_back(ValuePairWithDepth(J, getDepthFactor(J.first)));
+ do {
+ ValuePairWithDepth QTop = Q.pop_back_val();
+ PrunedTree.insert(QTop.first);
+
+ // Visit each child, pruning as necessary...
+ DenseMap<ValuePair, size_t> BestChildren;
+ VPPIteratorPair QTopRange = ConnectedPairs.equal_range(QTop.first);
+ for (std::multimap<ValuePair, ValuePair>::iterator K = QTopRange.first;
+ K != QTopRange.second; ++K) {
+ DenseMap<ValuePair, size_t>::iterator C = Tree.find(K->second);
+ if (C == Tree.end()) continue;
+
+ // This child is in the Tree, now we need to make sure it is the
+ // best of any conflicting children. There could be multiple
+ // conflicting children, so first, determine if we're keeping
+ // this child, then delete conflicting children as necessary.
+
+ // It is also necessary to guard against pairing-induced
+ // dependencies. Consider instructions a .. x .. y .. b
+ // such that (a,b) are to be fused and (x,y) are to be fused
+ // but a is an input to x and b is an output from y. This
+ // means that y cannot be moved after b but x must be moved
+ // after b for (a,b) to be fused. In other words, after
+ // fusing (a,b) we have y .. a/b .. x where y is an input
+ // to a/b and x is an output to a/b: x and y can no longer
+ // be legally fused. To prevent this condition, we must
+ // make sure that a child pair added to the Tree is not
+ // both an input and output of an already-selected pair.
+
+ // Pairing-induced dependencies can also form from more complicated
+ // cycles. The pair vs. pair conflicts are easy to check, and so
+ // that is done explicitly for "fast rejection", and because for
+ // child vs. child conflicts, we may prefer to keep the current
+ // pair in preference to the already-selected child.
+ DenseSet<ValuePair> CurrentPairs;
+
+ bool CanAdd = true;
+ for (DenseMap<ValuePair, size_t>::iterator C2
+ = BestChildren.begin(), E2 = BestChildren.end();
+ C2 != E2; ++C2) {
+ if (C2->first.first == C->first.first ||
+ C2->first.first == C->first.second ||
+ C2->first.second == C->first.first ||
+ C2->first.second == C->first.second ||
+ pairsConflict(C2->first, C->first, PairableInstUsers,
+ UseCycleCheck ? &PairableInstUserMap : 0)) {
+ if (C2->second >= C->second) {
+ CanAdd = false;
+ break;
+ }
+
+ CurrentPairs.insert(C2->first);
+ }
+ }
+ if (!CanAdd) continue;
+
+ // Even worse, this child could conflict with another node already
+ // selected for the Tree. If that is the case, ignore this child.
+ for (DenseSet<ValuePair>::iterator T = PrunedTree.begin(),
+ E2 = PrunedTree.end(); T != E2; ++T) {
+ if (T->first == C->first.first ||
+ T->first == C->first.second ||
+ T->second == C->first.first ||
+ T->second == C->first.second ||
+ pairsConflict(*T, C->first, PairableInstUsers,
+ UseCycleCheck ? &PairableInstUserMap : 0)) {
+ CanAdd = false;
+ break;
+ }
+
+ CurrentPairs.insert(*T);
+ }
+ if (!CanAdd) continue;
+
+ // And check the queue too...
+ for (SmallVector<ValuePairWithDepth, 32>::iterator C2 = Q.begin(),
+ E2 = Q.end(); C2 != E2; ++C2) {
+ if (C2->first.first == C->first.first ||
+ C2->first.first == C->first.second ||
+ C2->first.second == C->first.first ||
+ C2->first.second == C->first.second ||
+ pairsConflict(C2->first, C->first, PairableInstUsers,
+ UseCycleCheck ? &PairableInstUserMap : 0)) {
+ CanAdd = false;
+ break;
+ }
+
+ CurrentPairs.insert(C2->first);
+ }
+ if (!CanAdd) continue;
+
+ // Last but not least, check for a conflict with any of the
+ // already-chosen pairs.
+ for (DenseMap<Value *, Value *>::iterator C2 =
+ ChosenPairs.begin(), E2 = ChosenPairs.end();
+ C2 != E2; ++C2) {
+ if (pairsConflict(*C2, C->first, PairableInstUsers,
+ UseCycleCheck ? &PairableInstUserMap : 0)) {
+ CanAdd = false;
+ break;
+ }
+
+ CurrentPairs.insert(*C2);
+ }
+ if (!CanAdd) continue;
+
+ // To check for non-trivial cycles formed by the addition of the
+ // current pair we've formed a list of all relevant pairs, now use a
+ // graph walk to check for a cycle. We start from the current pair and
+ // walk the use tree to see if we again reach the current pair. If we
+ // do, then the current pair is rejected.
+
+ // FIXME: It may be more efficient to use a topological-ordering
+ // algorithm to improve the cycle check. This should be investigated.
+ if (UseCycleCheck &&
+ pairWillFormCycle(C->first, PairableInstUserMap, CurrentPairs))
+ continue;
+
+ // This child can be added, but we may have chosen it in preference
+ // to an already-selected child. Check for this here, and if a
+ // conflict is found, then remove the previously-selected child
+ // before adding this one in its place.
+ for (DenseMap<ValuePair, size_t>::iterator C2
+ = BestChildren.begin(); C2 != BestChildren.end();) {
+ if (C2->first.first == C->first.first ||
+ C2->first.first == C->first.second ||
+ C2->first.second == C->first.first ||
+ C2->first.second == C->first.second ||
+ pairsConflict(C2->first, C->first, PairableInstUsers))
+ BestChildren.erase(C2++);
+ else
+ ++C2;
+ }
+
+ BestChildren.insert(ValuePairWithDepth(C->first, C->second));
+ }
+
+ for (DenseMap<ValuePair, size_t>::iterator C
+ = BestChildren.begin(), E2 = BestChildren.end();
+ C != E2; ++C) {
+ size_t DepthF = getDepthFactor(C->first.first);
+ Q.push_back(ValuePairWithDepth(C->first, QTop.second+DepthF));
+ }
+ } while (!Q.empty());
+ }
+
+ // This function finds the best tree of mututally-compatible connected
+ // pairs, given the choice of root pairs as an iterator range.
+ void BBVectorize::findBestTreeFor(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ std::multimap<ValuePair, ValuePair> &PairableInstUserMap,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<ValuePair> &BestTree, size_t &BestMaxDepth,
+ size_t &BestEffSize, VPIteratorPair ChoiceRange,
+ bool UseCycleCheck) {
+ for (std::multimap<Value *, Value *>::iterator J = ChoiceRange.first;
+ J != ChoiceRange.second; ++J) {
+
+ // Before going any further, make sure that this pair does not
+ // conflict with any already-selected pairs (see comment below
+ // near the Tree pruning for more details).
+ DenseSet<ValuePair> ChosenPairSet;
+ bool DoesConflict = false;
+ for (DenseMap<Value *, Value *>::iterator C = ChosenPairs.begin(),
+ E = ChosenPairs.end(); C != E; ++C) {
+ if (pairsConflict(*C, *J, PairableInstUsers,
+ UseCycleCheck ? &PairableInstUserMap : 0)) {
+ DoesConflict = true;
+ break;
+ }
+
+ ChosenPairSet.insert(*C);
+ }
+ if (DoesConflict) continue;
+
+ if (UseCycleCheck &&
+ pairWillFormCycle(*J, PairableInstUserMap, ChosenPairSet))
+ continue;
+
+ DenseMap<ValuePair, size_t> Tree;
+ buildInitialTreeFor(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairableInstUsers, ChosenPairs, Tree, *J);
+
+ // Because we'll keep the child with the largest depth, the largest
+ // depth is still the same in the unpruned Tree.
+ size_t MaxDepth = Tree.lookup(*J);
+
+ DEBUG(if (DebugPairSelection) dbgs() << "BBV: found Tree for pair {"
+ << *J->first << " <-> " << *J->second << "} of depth " <<
+ MaxDepth << " and size " << Tree.size() << "\n");
+
+ // At this point the Tree has been constructed, but, may contain
+ // contradictory children (meaning that different children of
+ // some tree node may be attempting to fuse the same instruction).
+ // So now we walk the tree again, in the case of a conflict,
+ // keep only the child with the largest depth. To break a tie,
+ // favor the first child.
+
+ DenseSet<ValuePair> PrunedTree;
+ pruneTreeFor(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairableInstUsers, PairableInstUserMap, ChosenPairs, Tree,
+ PrunedTree, *J, UseCycleCheck);
+
+ size_t EffSize = 0;
+ for (DenseSet<ValuePair>::iterator S = PrunedTree.begin(),
+ E = PrunedTree.end(); S != E; ++S)
+ EffSize += getDepthFactor(S->first);
+
+ DEBUG(if (DebugPairSelection)
+ dbgs() << "BBV: found pruned Tree for pair {"
+ << *J->first << " <-> " << *J->second << "} of depth " <<
+ MaxDepth << " and size " << PrunedTree.size() <<
+ " (effective size: " << EffSize << ")\n");
+ if (MaxDepth >= Config.ReqChainDepth && EffSize > BestEffSize) {
+ BestMaxDepth = MaxDepth;
+ BestEffSize = EffSize;
+ BestTree = PrunedTree;
+ }
+ }
+ }
+
+ // Given the list of candidate pairs, this function selects those
+ // that will be fused into vector instructions.
+ void BBVectorize::choosePairs(
+ std::multimap<Value *, Value *> &CandidatePairs,
+ std::vector<Value *> &PairableInsts,
+ std::multimap<ValuePair, ValuePair> &ConnectedPairs,
+ DenseSet<ValuePair> &PairableInstUsers,
+ DenseMap<Value *, Value *>& ChosenPairs) {
+ bool UseCycleCheck =
+ CandidatePairs.size() <= Config.MaxCandPairsForCycleCheck;
+ std::multimap<ValuePair, ValuePair> PairableInstUserMap;
+ for (std::vector<Value *>::iterator I = PairableInsts.begin(),
+ E = PairableInsts.end(); I != E; ++I) {
+ // The number of possible pairings for this variable:
+ size_t NumChoices = CandidatePairs.count(*I);
+ if (!NumChoices) continue;
+
+ VPIteratorPair ChoiceRange = CandidatePairs.equal_range(*I);
+
+ // The best pair to choose and its tree:
+ size_t BestMaxDepth = 0, BestEffSize = 0;
+ DenseSet<ValuePair> BestTree;
+ findBestTreeFor(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairableInstUsers, PairableInstUserMap, ChosenPairs,
+ BestTree, BestMaxDepth, BestEffSize, ChoiceRange,
+ UseCycleCheck);
+
+ // A tree has been chosen (or not) at this point. If no tree was
+ // chosen, then this instruction, I, cannot be paired (and is no longer
+ // considered).
+
+ DEBUG(if (BestTree.size() > 0)
+ dbgs() << "BBV: selected pairs in the best tree for: "
+ << *cast<Instruction>(*I) << "\n");
+
+ for (DenseSet<ValuePair>::iterator S = BestTree.begin(),
+ SE2 = BestTree.end(); S != SE2; ++S) {
+ // Insert the members of this tree into the list of chosen pairs.
+ ChosenPairs.insert(ValuePair(S->first, S->second));
+ DEBUG(dbgs() << "BBV: selected pair: " << *S->first << " <-> " <<
+ *S->second << "\n");
+
+ // Remove all candidate pairs that have values in the chosen tree.
+ for (std::multimap<Value *, Value *>::iterator K =
+ CandidatePairs.begin(); K != CandidatePairs.end();) {
+ if (K->first == S->first || K->second == S->first ||
+ K->second == S->second || K->first == S->second) {
+ // Don't remove the actual pair chosen so that it can be used
+ // in subsequent tree selections.
+ if (!(K->first == S->first && K->second == S->second))
+ CandidatePairs.erase(K++);
+ else
+ ++K;
+ } else {
+ ++K;
+ }
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "BBV: selected " << ChosenPairs.size() << " pairs.\n");
+ }
+
+ std::string getReplacementName(Instruction *I, bool IsInput, unsigned o,
+ unsigned n = 0) {
+ if (!I->hasName())
+ return "";
+
+ return (I->getName() + (IsInput ? ".v.i" : ".v.r") + utostr(o) +
+ (n > 0 ? "." + utostr(n) : "")).str();
+ }
+
+ // Returns the value that is to be used as the pointer input to the vector
+ // instruction that fuses I with J.
+ Value *BBVectorize::getReplacementPointerInput(LLVMContext& Context,
+ Instruction *I, Instruction *J, unsigned o,
+ bool &FlipMemInputs) {
+ Value *IPtr, *JPtr;
+ unsigned IAlignment, JAlignment;
+ int64_t OffsetInElmts;
+ (void) getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
+ OffsetInElmts);
+
+ // The pointer value is taken to be the one with the lowest offset.
+ Value *VPtr;
+ if (OffsetInElmts > 0) {
+ VPtr = IPtr;
+ } else {
+ FlipMemInputs = true;
+ VPtr = JPtr;
+ }
+
+ Type *ArgType = cast<PointerType>(IPtr->getType())->getElementType();
+ Type *VArgType = getVecTypeForPair(ArgType);
+ Type *VArgPtrType = PointerType::get(VArgType,
+ cast<PointerType>(IPtr->getType())->getAddressSpace());
+ return new BitCastInst(VPtr, VArgPtrType, getReplacementName(I, true, o),
+ /* insert before */ FlipMemInputs ? J : I);
+ }
+
+ void BBVectorize::fillNewShuffleMask(LLVMContext& Context, Instruction *J,
+ unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
+ unsigned IdxOffset, std::vector<Constant*> &Mask) {
+ for (unsigned v = 0; v < NumElem/2; ++v) {
+ int m = cast<ShuffleVectorInst>(J)->getMaskValue(v);
+ if (m < 0) {
+ Mask[v+MaskOffset] = UndefValue::get(Type::getInt32Ty(Context));
+ } else {
+ unsigned mm = m + (int) IdxOffset;
+ if (m >= (int) NumInElem)
+ mm += (int) NumInElem;
+
+ Mask[v+MaskOffset] =
+ ConstantInt::get(Type::getInt32Ty(Context), mm);
+ }
+ }
+ }
+
+ // Returns the value that is to be used as the vector-shuffle mask to the
+ // vector instruction that fuses I with J.
+ Value *BBVectorize::getReplacementShuffleMask(LLVMContext& Context,
+ Instruction *I, Instruction *J) {
+ // This is the shuffle mask. We need to append the second
+ // mask to the first, and the numbers need to be adjusted.
+
+ Type *ArgType = I->getType();
+ Type *VArgType = getVecTypeForPair(ArgType);
+
+ // Get the total number of elements in the fused vector type.
+ // By definition, this must equal the number of elements in
+ // the final mask.
+ unsigned NumElem = cast<VectorType>(VArgType)->getNumElements();
+ std::vector<Constant*> Mask(NumElem);
+
+ Type *OpType = I->getOperand(0)->getType();
+ unsigned NumInElem = cast<VectorType>(OpType)->getNumElements();
+
+ // For the mask from the first pair...
+ fillNewShuffleMask(Context, I, NumElem, 0, NumInElem, 0, Mask);
+
+ // For the mask from the second pair...
+ fillNewShuffleMask(Context, J, NumElem, NumElem/2, NumInElem, NumInElem,
+ Mask);
+
+ return ConstantVector::get(Mask);
+ }
+
+ // Returns the value to be used as the specified operand of the vector
+ // instruction that fuses I with J.
+ Value *BBVectorize::getReplacementInput(LLVMContext& Context, Instruction *I,
+ Instruction *J, unsigned o, bool FlipMemInputs) {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
+
+ // Compute the fused vector type for this operand
+ Type *ArgType = I->getOperand(o)->getType();
+ VectorType *VArgType = getVecTypeForPair(ArgType);
+
+ Instruction *L = I, *H = J;
+ if (FlipMemInputs) {
+ L = J;
+ H = I;
+ }
+
+ if (ArgType->isVectorTy()) {
+ unsigned numElem = cast<VectorType>(VArgType)->getNumElements();
+ std::vector<Constant*> Mask(numElem);
+ for (unsigned v = 0; v < numElem; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+
+ Instruction *BV = new ShuffleVectorInst(L->getOperand(o),
+ H->getOperand(o),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ BV->insertBefore(J);
+ return BV;
+ }
+
+ // If these two inputs are the output of another vector instruction,
+ // then we should use that output directly. It might be necessary to
+ // permute it first. [When pairings are fused recursively, you can
+ // end up with cases where a large vector is decomposed into scalars
+ // using extractelement instructions, then built into size-2
+ // vectors using insertelement and the into larger vectors using
+ // shuffles. InstCombine does not simplify all of these cases well,
+ // and so we make sure that shuffles are generated here when possible.
+ ExtractElementInst *LEE
+ = dyn_cast<ExtractElementInst>(L->getOperand(o));
+ ExtractElementInst *HEE
+ = dyn_cast<ExtractElementInst>(H->getOperand(o));
+
+ if (LEE && HEE &&
+ LEE->getOperand(0)->getType() == HEE->getOperand(0)->getType()) {
+ VectorType *EEType = cast<VectorType>(LEE->getOperand(0)->getType());
+ unsigned LowIndx = cast<ConstantInt>(LEE->getOperand(1))->getZExtValue();
+ unsigned HighIndx = cast<ConstantInt>(HEE->getOperand(1))->getZExtValue();
+ if (LEE->getOperand(0) == HEE->getOperand(0)) {
+ if (LowIndx == 0 && HighIndx == 1)
+ return LEE->getOperand(0);
+
+ std::vector<Constant*> Mask(2);
+ Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
+ Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
+
+ Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
+ UndefValue::get(EEType),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ BV->insertBefore(J);
+ return BV;
+ }
+
+ std::vector<Constant*> Mask(2);
+ HighIndx += EEType->getNumElements();
+ Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
+ Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
+
+ Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
+ HEE->getOperand(0),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ BV->insertBefore(J);
+ return BV;
+ }
+
+ Instruction *BV1 = InsertElementInst::Create(
+ UndefValue::get(VArgType),
+ L->getOperand(o), CV0,
+ getReplacementName(I, true, o, 1));
+ BV1->insertBefore(I);
+ Instruction *BV2 = InsertElementInst::Create(BV1, H->getOperand(o),
+ CV1,
+ getReplacementName(I, true, o, 2));
+ BV2->insertBefore(J);
+ return BV2;
+ }
+
+ // This function creates an array of values that will be used as the inputs
+ // to the vector instruction that fuses I with J.
+ void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
+ Instruction *I, Instruction *J,
+ SmallVector<Value *, 3> &ReplacedOperands,
+ bool &FlipMemInputs) {
+ FlipMemInputs = false;
+ unsigned NumOperands = I->getNumOperands();
+
+ for (unsigned p = 0, o = NumOperands-1; p < NumOperands; ++p, --o) {
+ // Iterate backward so that we look at the store pointer
+ // first and know whether or not we need to flip the inputs.
+
+ if (isa<LoadInst>(I) || (o == 1 && isa<StoreInst>(I))) {
+ // This is the pointer for a load/store instruction.
+ ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o,
+ FlipMemInputs);
+ continue;
+ } else if (isa<CallInst>(I)) {
+ Function *F = cast<CallInst>(I)->getCalledFunction();
+ unsigned IID = F->getIntrinsicID();
+ if (o == NumOperands-1) {
+ BasicBlock &BB = *I->getParent();
+
+ Module *M = BB.getParent()->getParent();
+ Type *ArgType = I->getType();
+ Type *VArgType = getVecTypeForPair(ArgType);
+
+ // FIXME: is it safe to do this here?
+ ReplacedOperands[o] = Intrinsic::getDeclaration(M,
+ (Intrinsic::ID) IID, VArgType);
+ continue;
+ } else if (IID == Intrinsic::powi && o == 1) {
+ // The second argument of powi is a single integer and we've already
+ // checked that both arguments are equal. As a result, we just keep
+ // I's second argument.
+ ReplacedOperands[o] = I->getOperand(o);
+ continue;
+ }
+ } else if (isa<ShuffleVectorInst>(I) && o == NumOperands-1) {
+ ReplacedOperands[o] = getReplacementShuffleMask(Context, I, J);
+ continue;
+ }
+
+ ReplacedOperands[o] =
+ getReplacementInput(Context, I, J, o, FlipMemInputs);
+ }
+ }
+
+ // This function creates two values that represent the outputs of the
+ // original I and J instructions. These are generally vector shuffles
+ // or extracts. In many cases, these will end up being unused and, thus,
+ // eliminated by later passes.
+ void BBVectorize::replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
+ Instruction *J, Instruction *K,
+ Instruction *&InsertionPt,
+ Instruction *&K1, Instruction *&K2,
+ bool &FlipMemInputs) {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
+
+ if (isa<StoreInst>(I)) {
+ AA->replaceWithNewValue(I, K);
+ AA->replaceWithNewValue(J, K);
+ } else {
+ Type *IType = I->getType();
+ Type *VType = getVecTypeForPair(IType);
+
+ if (IType->isVectorTy()) {
+ unsigned numElem = cast<VectorType>(IType)->getNumElements();
+ std::vector<Constant*> Mask1(numElem), Mask2(numElem);
+ for (unsigned v = 0; v < numElem; ++v) {
+ Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElem+v);
+ }
+
+ K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask2 : Mask1),
+ getReplacementName(K, false, 1));
+ K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask1 : Mask2),
+ getReplacementName(K, false, 2));
+ } else {
+ K1 = ExtractElementInst::Create(K, FlipMemInputs ? CV1 : CV0,
+ getReplacementName(K, false, 1));
+ K2 = ExtractElementInst::Create(K, FlipMemInputs ? CV0 : CV1,
+ getReplacementName(K, false, 2));
+ }
+
+ K1->insertAfter(K);
+ K2->insertAfter(K1);
+ InsertionPt = K2;
+ }
+ }
+
+ // Move all uses of the function I (including pairing-induced uses) after J.
+ bool BBVectorize::canMoveUsesOfIAfterJ(BasicBlock &BB,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *I, Instruction *J) {
+ // Skip to the first instruction past I.
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+
+ DenseSet<Value *> Users;
+ AliasSetTracker WriteSet(*AA);
+ for (; cast<Instruction>(L) != J; ++L)
+ (void) trackUsesOfI(Users, WriteSet, I, L, true, &LoadMoveSet);
+
+ assert(cast<Instruction>(L) == J &&
+ "Tracking has not proceeded far enough to check for dependencies");
+ // If J is now in the use set of I, then trackUsesOfI will return true
+ // and we have a dependency cycle (and the fusing operation must abort).
+ return !trackUsesOfI(Users, WriteSet, I, J, true, &LoadMoveSet);
+ }
+
+ // Move all uses of the function I (including pairing-induced uses) after J.
+ void BBVectorize::moveUsesOfIAfterJ(BasicBlock &BB,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *&InsertionPt,
+ Instruction *I, Instruction *J) {
+ // Skip to the first instruction past I.
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+
+ DenseSet<Value *> Users;
+ AliasSetTracker WriteSet(*AA);
+ for (; cast<Instruction>(L) != J;) {
+ if (trackUsesOfI(Users, WriteSet, I, L, true, &LoadMoveSet)) {
+ // Move this instruction
+ Instruction *InstToMove = L; ++L;
+
+ DEBUG(dbgs() << "BBV: moving: " << *InstToMove <<
+ " to after " << *InsertionPt << "\n");
+ InstToMove->removeFromParent();
+ InstToMove->insertAfter(InsertionPt);
+ InsertionPt = InstToMove;
+ } else {
+ ++L;
+ }
+ }
+ }
+
+ // Collect all load instruction that are in the move set of a given first
+ // pair member. These loads depend on the first instruction, I, and so need
+ // to be moved after J (the second instruction) when the pair is fused.
+ void BBVectorize::collectPairLoadMoveSet(BasicBlock &BB,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ std::multimap<Value *, Value *> &LoadMoveSet,
+ Instruction *I) {
+ // Skip to the first instruction past I.
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
+
+ DenseSet<Value *> Users;
+ AliasSetTracker WriteSet(*AA);
+
+ // Note: We cannot end the loop when we reach J because J could be moved
+ // farther down the use chain by another instruction pairing. Also, J
+ // could be before I if this is an inverted input.
+ for (BasicBlock::iterator E = BB.end(); cast<Instruction>(L) != E; ++L) {
+ if (trackUsesOfI(Users, WriteSet, I, L)) {
+ if (L->mayReadFromMemory())
+ LoadMoveSet.insert(ValuePair(L, I));
+ }
+ }
+ }
+
+ // In cases where both load/stores and the computation of their pointers
+ // are chosen for vectorization, we can end up in a situation where the
+ // aliasing analysis starts returning different query results as the
+ // process of fusing instruction pairs continues. Because the algorithm
+ // relies on finding the same use trees here as were found earlier, we'll
+ // need to precompute the necessary aliasing information here and then
+ // manually update it during the fusion process.
+ void BBVectorize::collectLoadMoveSet(BasicBlock &BB,
+ std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ std::multimap<Value *, Value *> &LoadMoveSet) {
+ for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
+ PIE = PairableInsts.end(); PI != PIE; ++PI) {
+ DenseMap<Value *, Value *>::iterator P = ChosenPairs.find(*PI);
+ if (P == ChosenPairs.end()) continue;
+
+ Instruction *I = cast<Instruction>(P->first);
+ collectPairLoadMoveSet(BB, ChosenPairs, LoadMoveSet, I);
+ }
+ }
+
+ // This function fuses the chosen instruction pairs into vector instructions,
+ // taking care preserve any needed scalar outputs and, then, it reorders the
+ // remaining instructions as needed (users of the first member of the pair
+ // need to be moved to after the location of the second member of the pair
+ // because the vector instruction is inserted in the location of the pair's
+ // second member).
+ void BBVectorize::fuseChosenPairs(BasicBlock &BB,
+ std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs) {
+ LLVMContext& Context = BB.getContext();
+
+ // During the vectorization process, the order of the pairs to be fused
+ // could be flipped. So we'll add each pair, flipped, into the ChosenPairs
+ // list. After a pair is fused, the flipped pair is removed from the list.
+ std::vector<ValuePair> FlippedPairs;
+ FlippedPairs.reserve(ChosenPairs.size());
+ for (DenseMap<Value *, Value *>::iterator P = ChosenPairs.begin(),
+ E = ChosenPairs.end(); P != E; ++P)
+ FlippedPairs.push_back(ValuePair(P->second, P->first));
+ for (std::vector<ValuePair>::iterator P = FlippedPairs.begin(),
+ E = FlippedPairs.end(); P != E; ++P)
+ ChosenPairs.insert(*P);
+
+ std::multimap<Value *, Value *> LoadMoveSet;
+ collectLoadMoveSet(BB, PairableInsts, ChosenPairs, LoadMoveSet);
+
+ DEBUG(dbgs() << "BBV: initial: \n" << BB << "\n");
+
+ for (BasicBlock::iterator PI = BB.getFirstInsertionPt(); PI != BB.end();) {
+ DenseMap<Value *, Value *>::iterator P = ChosenPairs.find(PI);
+ if (P == ChosenPairs.end()) {
+ ++PI;
+ continue;
+ }
+
+ if (getDepthFactor(P->first) == 0) {
+ // These instructions are not really fused, but are tracked as though
+ // they are. Any case in which it would be interesting to fuse them
+ // will be taken care of by InstCombine.
+ --NumFusedOps;
+ ++PI;
+ continue;
+ }
+
+ Instruction *I = cast<Instruction>(P->first),
+ *J = cast<Instruction>(P->second);
+
+ DEBUG(dbgs() << "BBV: fusing: " << *I <<
+ " <-> " << *J << "\n");
+
+ // Remove the pair and flipped pair from the list.
+ DenseMap<Value *, Value *>::iterator FP = ChosenPairs.find(P->second);
+ assert(FP != ChosenPairs.end() && "Flipped pair not found in list");
+ ChosenPairs.erase(FP);
+ ChosenPairs.erase(P);
+
+ if (!canMoveUsesOfIAfterJ(BB, LoadMoveSet, I, J)) {
+ DEBUG(dbgs() << "BBV: fusion of: " << *I <<
+ " <-> " << *J <<
+ " aborted because of non-trivial dependency cycle\n");
+ --NumFusedOps;
+ ++PI;
+ continue;
+ }
+
+ bool FlipMemInputs;
+ unsigned NumOperands = I->getNumOperands();
+ SmallVector<Value *, 3> ReplacedOperands(NumOperands);
+ getReplacementInputsForPair(Context, I, J, ReplacedOperands,
+ FlipMemInputs);
+
+ // Make a copy of the original operation, change its type to the vector
+ // type and replace its operands with the vector operands.
+ Instruction *K = I->clone();
+ if (I->hasName()) K->takeName(I);
+
+ if (!isa<StoreInst>(K))
+ K->mutateType(getVecTypeForPair(I->getType()));
+
+ for (unsigned o = 0; o < NumOperands; ++o)
+ K->setOperand(o, ReplacedOperands[o]);
+
+ // If we've flipped the memory inputs, make sure that we take the correct
+ // alignment.
+ if (FlipMemInputs) {
+ if (isa<StoreInst>(K))
+ cast<StoreInst>(K)->setAlignment(cast<StoreInst>(J)->getAlignment());
+ else
+ cast<LoadInst>(K)->setAlignment(cast<LoadInst>(J)->getAlignment());
+ }
+
+ K->insertAfter(J);
+
+ // Instruction insertion point:
+ Instruction *InsertionPt = K;
+ Instruction *K1 = 0, *K2 = 0;
+ replaceOutputsOfPair(Context, I, J, K, InsertionPt, K1, K2,
+ FlipMemInputs);
+
+ // The use tree of the first original instruction must be moved to after
+ // the location of the second instruction. The entire use tree of the
+ // first instruction is disjoint from the input tree of the second
+ // (by definition), and so commutes with it.
+
+ moveUsesOfIAfterJ(BB, LoadMoveSet, InsertionPt, I, J);
+
+ if (!isa<StoreInst>(I)) {
+ I->replaceAllUsesWith(K1);
+ J->replaceAllUsesWith(K2);
+ AA->replaceWithNewValue(I, K1);
+ AA->replaceWithNewValue(J, K2);
+ }
+
+ // Instructions that may read from memory may be in the load move set.
+ // Once an instruction is fused, we no longer need its move set, and so
+ // the values of the map never need to be updated. However, when a load
+ // is fused, we need to merge the entries from both instructions in the
+ // pair in case those instructions were in the move set of some other
+ // yet-to-be-fused pair. The loads in question are the keys of the map.
+ if (I->mayReadFromMemory()) {
+ std::vector<ValuePair> NewSetMembers;
+ VPIteratorPair IPairRange = LoadMoveSet.equal_range(I);
+ VPIteratorPair JPairRange = LoadMoveSet.equal_range(J);
+ for (std::multimap<Value *, Value *>::iterator N = IPairRange.first;
+ N != IPairRange.second; ++N)
+ NewSetMembers.push_back(ValuePair(K, N->second));
+ for (std::multimap<Value *, Value *>::iterator N = JPairRange.first;
+ N != JPairRange.second; ++N)
+ NewSetMembers.push_back(ValuePair(K, N->second));
+ for (std::vector<ValuePair>::iterator A = NewSetMembers.begin(),
+ AE = NewSetMembers.end(); A != AE; ++A)
+ LoadMoveSet.insert(*A);
+ }
+
+ // Before removing I, set the iterator to the next instruction.
+ PI = llvm::next(BasicBlock::iterator(I));
+ if (cast<Instruction>(PI) == J)
+ ++PI;
+
+ SE->forgetValue(I);
+ SE->forgetValue(J);
+ I->eraseFromParent();
+ J->eraseFromParent();
+ }
+
+ DEBUG(dbgs() << "BBV: final: \n" << BB << "\n");
+ }
+}
+
+char BBVectorize::ID = 0;
+static const char bb_vectorize_name[] = "Basic-Block Vectorization";
+INITIALIZE_PASS_BEGIN(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
+
+BasicBlockPass *llvm::createBBVectorizePass(const VectorizeConfig &C) {
+ return new BBVectorize(C);
+}
+
+bool
+llvm::vectorizeBasicBlock(Pass *P, BasicBlock &BB, const VectorizeConfig &C) {
+ BBVectorize BBVectorizer(P, C);
+ return BBVectorizer.vectorizeBB(BB);
+}
+
+//===----------------------------------------------------------------------===//
+VectorizeConfig::VectorizeConfig() {
+ VectorBits = ::VectorBits;
+ VectorizeInts = !::NoInts;
+ VectorizeFloats = !::NoFloats;
+ VectorizeCasts = !::NoCasts;
+ VectorizeMath = !::NoMath;
+ VectorizeFMA = !::NoFMA;
+ VectorizeMemOps = !::NoMemOps;
+ AlignedOnly = ::AlignedOnly;
+ ReqChainDepth= ::ReqChainDepth;
+ SearchLimit = ::SearchLimit;
+ MaxCandPairsForCycleCheck = ::MaxCandPairsForCycleCheck;
+ SplatBreaksChain = ::SplatBreaksChain;
+ MaxInsts = ::MaxInsts;
+ MaxIter = ::MaxIter;
+ NoMemOpBoost = ::NoMemOpBoost;
+ FastDep = ::FastDep;
+}
diff --git a/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
new file mode 100644
index 0000000..1ef6002
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Vectorize/Vectorize.cpp
@@ -0,0 +1,39 @@
+//===-- Vectorize.cpp -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common infrastructure for libLLVMVectorizeOpts.a, which
+// implements several vectorization transformations over the LLVM intermediate
+// representation, including the C bindings for that library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/Transforms/Vectorize.h"
+#include "llvm-c/Initialization.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Transforms/Vectorize.h"
+
+using namespace llvm;
+
+/// initializeVectorizationPasses - Initialize all passes linked into the
+/// Vectorization library.
+void llvm::initializeVectorization(PassRegistry &Registry) {
+ initializeBBVectorizePass(Registry);
+}
+
+void LLVMInitializeVectorization(LLVMPassRegistryRef R) {
+ initializeVectorization(*unwrap(R));
+}
+
+void LLVMAddBBVectorizePass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createBBVectorizePass());
+}
+
diff --git a/contrib/llvm/lib/VMCore/AsmWriter.cpp b/contrib/llvm/lib/VMCore/AsmWriter.cpp
index 18308f2..7b39efb 100644
--- a/contrib/llvm/lib/VMCore/AsmWriter.cpp
+++ b/contrib/llvm/lib/VMCore/AsmWriter.cpp
@@ -89,7 +89,6 @@ enum PrefixType {
static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
assert(!Name.empty() && "Cannot get empty name!");
switch (Prefix) {
- default: llvm_unreachable("Bad prefix!");
case NoPrefix: break;
case GlobalPrefix: OS << '@'; break;
case LabelPrefix: break;
@@ -189,6 +188,7 @@ void TypePrinting::incorporateTypes(const Module &M) {
void TypePrinting::print(Type *Ty, raw_ostream &OS) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: OS << "void"; break;
+ case Type::HalfTyID: OS << "half"; break;
case Type::FloatTyID: OS << "float"; break;
case Type::DoubleTyID: OS << "double"; break;
case Type::X86_FP80TyID: OS << "x86_fp80"; break;
@@ -231,7 +231,7 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) {
if (I != NumberedTypes.end())
OS << '%' << I->second;
else // Not enumerated, print the hex address.
- OS << "%\"type 0x" << STy << '\"';
+ OS << "%\"type " << STy << '\"';
return;
}
case Type::PointerTyID: {
@@ -708,31 +708,37 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble ||
- &CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle) {
+ if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEhalf ||
+ &CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle ||
+ &CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble) {
// We would like to output the FP constant value in exponential notation,
// but we cannot do this if doing so will lose precision. Check here to
// make sure that we only output it in exponential format if we can parse
// the value back and get the same value.
//
bool ignored;
+ bool isHalf = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEhalf;
bool isDouble = &CFP->getValueAPF().getSemantics()==&APFloat::IEEEdouble;
- double Val = isDouble ? CFP->getValueAPF().convertToDouble() :
- CFP->getValueAPF().convertToFloat();
- SmallString<128> StrVal;
- raw_svector_ostream(StrVal) << Val;
-
- // Check to make sure that the stringized number is not some string like
- // "Inf" or NaN, that atof will accept, but the lexer will not. Check
- // that the string matches the "[-+]?[0-9]" regex.
- //
- if ((StrVal[0] >= '0' && StrVal[0] <= '9') ||
- ((StrVal[0] == '-' || StrVal[0] == '+') &&
- (StrVal[1] >= '0' && StrVal[1] <= '9'))) {
- // Reparse stringized version!
- if (atof(StrVal.c_str()) == Val) {
- Out << StrVal.str();
- return;
+ bool isInf = CFP->getValueAPF().isInfinity();
+ bool isNaN = CFP->getValueAPF().isNaN();
+ if (!isHalf && !isInf && !isNaN) {
+ double Val = isDouble ? CFP->getValueAPF().convertToDouble() :
+ CFP->getValueAPF().convertToFloat();
+ SmallString<128> StrVal;
+ raw_svector_ostream(StrVal) << Val;
+
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN, that atof will accept, but the lexer will not. Check
+ // that the string matches the "[-+]?[0-9]" regex.
+ //
+ if ((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ (StrVal[1] >= '0' && StrVal[1] <= '9'))) {
+ // Reparse stringized version!
+ if (APFloat(APFloat::IEEEdouble, StrVal).convertToDouble() == Val) {
+ Out << StrVal.str();
+ return;
+ }
}
}
// Otherwise we could not reparse it to exactly the same value, so we must
@@ -743,7 +749,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
"assuming that double is 64 bits!");
char Buffer[40];
APFloat apf = CFP->getValueAPF();
- // Floats are represented in ASCII IR as double, convert.
+ // Halves and floats are represented in ASCII IR as double, convert.
if (!isDouble)
apf.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
&ignored);
@@ -823,35 +829,53 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
}
if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(0),
+ &TypePrinter, Machine,
+ Context);
+ for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
+ Out << ", ";
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine,
+ Context);
+ }
+ Out << ']';
+ return;
+ }
+
+ if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) {
// As a special case, print the array as a string if it is an array of
// i8 with ConstantInt values.
- //
- Type *ETy = CA->getType()->getElementType();
if (CA->isString()) {
Out << "c\"";
PrintEscapedString(CA->getAsString(), Out);
Out << '"';
- } else { // Cannot output in string format...
- Out << '[';
- if (CA->getNumOperands()) {
- TypePrinter.print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, CA->getOperand(0),
- &TypePrinter, Machine,
- Context);
- for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
- Out << ", ";
- TypePrinter.print(ETy, Out);
- Out << ' ';
- WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine,
- Context);
- }
- }
- Out << ']';
+ return;
}
+
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(0),
+ &TypePrinter, Machine,
+ Context);
+ for (unsigned i = 1, e = CA->getNumElements(); i != e; ++i) {
+ Out << ", ";
+ TypePrinter.print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(i), &TypePrinter,
+ Machine, Context);
+ }
+ Out << ']';
return;
}
+
if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
if (CS->getType()->isPacked())
Out << '<';
@@ -882,21 +906,19 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
return;
}
- if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
- Type *ETy = CP->getType()->getElementType();
- assert(CP->getNumOperands() > 0 &&
- "Number of operands for a PackedConst must be > 0");
+ if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) {
+ Type *ETy = CV->getType()->getVectorElementType();
Out << '<';
TypePrinter.print(ETy, Out);
Out << ' ';
- WriteAsOperandInternal(Out, CP->getOperand(0), &TypePrinter, Machine,
- Context);
- for (unsigned i = 1, e = CP->getNumOperands(); i != e; ++i) {
+ WriteAsOperandInternal(Out, CV->getAggregateElement(0U), &TypePrinter,
+ Machine, Context);
+ for (unsigned i = 1, e = CV->getType()->getVectorNumElements(); i != e;++i){
Out << ", ";
TypePrinter.print(ETy, Out);
Out << ' ';
- WriteAsOperandInternal(Out, CP->getOperand(i), &TypePrinter, Machine,
- Context);
+ WriteAsOperandInternal(Out, CV->getAggregateElement(i), &TypePrinter,
+ Machine, Context);
}
Out << '>';
return;
@@ -1162,7 +1184,6 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
return;
switch (SynchScope) {
- default: Out << " <bad scope " << int(SynchScope) << ">"; break;
case SingleThread: Out << " singlethread"; break;
case CrossThread: break;
}
@@ -1710,13 +1731,12 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << ", ";
writeOperand(SI.getDefaultDest(), true);
Out << " [";
- // Skip the first item since that's the default case.
- unsigned NumCases = SI.getNumCases();
- for (unsigned i = 1; i < NumCases; ++i) {
+ for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
+ i != e; ++i) {
Out << "\n ";
- writeOperand(SI.getCaseValue(i), true);
+ writeOperand(i.getCaseValue(), true);
Out << ", ";
- writeOperand(SI.getSuccessor(i), true);
+ writeOperand(i.getCaseSuccessor(), true);
}
Out << "\n ]";
} else if (isa<IndirectBrInst>(I)) {
@@ -1988,7 +2008,7 @@ static void WriteMDNodeComment(const MDNode *Node,
if (!CI) return;
APInt Val = CI->getValue();
APInt Tag = Val & ~APInt(Val.getBitWidth(), LLVMDebugVersionMask);
- if (Val.ult(LLVMDebugVersion))
+ if (Val.ult(LLVMDebugVersion11))
return;
Out.PadToColumn(50);
@@ -2110,3 +2130,6 @@ void Type::dump() const { print(dbgs()); }
// Module::dump() - Allow printing of Modules from the debugger.
void Module::dump() const { print(dbgs(), 0); }
+
+// NamedMDNode::dump() - Allow printing of NamedMDNodes from the debugger.
+void NamedMDNode::dump() const { print(dbgs(), 0); }
diff --git a/contrib/llvm/lib/VMCore/Attributes.cpp b/contrib/llvm/lib/VMCore/Attributes.cpp
index 485be75..c05132b 100644
--- a/contrib/llvm/lib/VMCore/Attributes.cpp
+++ b/contrib/llvm/lib/VMCore/Attributes.cpp
@@ -76,6 +76,8 @@ std::string Attribute::getAsString(Attributes Attrs) {
Result += "naked ";
if (Attrs & Attribute::NonLazyBind)
Result += "nonlazybind ";
+ if (Attrs & Attribute::AddressSafety)
+ Result += "address_safety ";
if (Attrs & Attribute::StackAlignment) {
Result += "alignstack(";
Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs));
@@ -152,8 +154,10 @@ public:
}
static void Profile(FoldingSetNodeID &ID, const AttributeWithIndex *Attr,
unsigned NumAttrs) {
- for (unsigned i = 0; i != NumAttrs; ++i)
- ID.AddInteger(uint64_t(Attr[i].Attrs) << 32 | unsigned(Attr[i].Index));
+ for (unsigned i = 0; i != NumAttrs; ++i) {
+ ID.AddInteger(Attr[i].Attrs.Raw());
+ ID.AddInteger(Attr[i].Index);
+ }
}
};
}
diff --git a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
index b849d3e..ea3d4ba 100644
--- a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -38,105 +38,31 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
return false;
Name = Name.substr(5); // Strip off "llvm."
- FunctionType *FTy = F->getFunctionType();
- Module *M = F->getParent();
-
switch (Name[0]) {
default: break;
- case 'a':
- if (Name.startswith("atomic.cmp.swap") ||
- Name.startswith("atomic.swap") ||
- Name.startswith("atomic.load.add") ||
- Name.startswith("atomic.load.sub") ||
- Name.startswith("atomic.load.and") ||
- Name.startswith("atomic.load.nand") ||
- Name.startswith("atomic.load.or") ||
- Name.startswith("atomic.load.xor") ||
- Name.startswith("atomic.load.max") ||
- Name.startswith("atomic.load.min") ||
- Name.startswith("atomic.load.umax") ||
- Name.startswith("atomic.load.umin"))
- return true;
- case 'i':
- // This upgrades the old llvm.init.trampoline to the new
- // llvm.init.trampoline and llvm.adjust.trampoline pair.
- if (Name == "init.trampoline") {
- // The new llvm.init.trampoline returns nothing.
- if (FTy->getReturnType()->isVoidTy())
- break;
-
- assert(FTy->getNumParams() == 3 && "old init.trampoline takes 3 args!");
-
- // Change the name of the old intrinsic so that we can play with its type.
- std::string NameTmp = F->getName();
- F->setName("");
- NewFn = cast<Function>(M->getOrInsertFunction(
- NameTmp,
- Type::getVoidTy(M->getContext()),
- FTy->getParamType(0), FTy->getParamType(1),
- FTy->getParamType(2), (Type *)0));
+ case 'c': {
+ if (Name.startswith("ctlz.") && F->arg_size() == 1) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ F->arg_begin()->getType());
return true;
}
- case 'm':
- if (Name == "memory.barrier")
- return true;
- case 'p':
- // This upgrades the llvm.prefetch intrinsic to accept one more parameter,
- // which is a instruction / data cache identifier. The old version only
- // implicitly accepted the data version.
- if (Name == "prefetch") {
- // Don't do anything if it has the correct number of arguments already
- if (FTy->getNumParams() == 4)
- break;
-
- assert(FTy->getNumParams() == 3 && "old prefetch takes 3 args!");
- // We first need to change the name of the old (bad) intrinsic, because
- // its type is incorrect, but we cannot overload that name. We
- // arbitrarily unique it here allowing us to construct a correctly named
- // and typed function below.
- std::string NameTmp = F->getName();
- F->setName("");
- NewFn = cast<Function>(M->getOrInsertFunction(NameTmp,
- FTy->getReturnType(),
- FTy->getParamType(0),
- FTy->getParamType(1),
- FTy->getParamType(2),
- FTy->getParamType(2),
- (Type*)0));
+ if (Name.startswith("cttz.") && F->arg_size() == 1) {
+ F->setName(Name + ".old");
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
+ F->arg_begin()->getType());
return true;
}
-
break;
+ }
case 'x': {
- const char *NewFnName = NULL;
- // This fixes the poorly named crc32 intrinsics.
- if (Name == "x86.sse42.crc32.8")
- NewFnName = "llvm.x86.sse42.crc32.32.8";
- else if (Name == "x86.sse42.crc32.16")
- NewFnName = "llvm.x86.sse42.crc32.32.16";
- else if (Name == "x86.sse42.crc32.32")
- NewFnName = "llvm.x86.sse42.crc32.32.32";
- else if (Name == "x86.sse42.crc64.8")
- NewFnName = "llvm.x86.sse42.crc32.64.8";
- else if (Name == "x86.sse42.crc64.64")
- NewFnName = "llvm.x86.sse42.crc32.64.64";
-
- if (NewFnName) {
- F->setName(NewFnName);
- NewFn = F;
+ if (Name.startswith("x86.sse2.pcmpeq.") ||
+ Name.startswith("x86.sse2.pcmpgt.") ||
+ Name.startswith("x86.avx2.pcmpeq.") ||
+ Name.startswith("x86.avx2.pcmpgt.")) {
+ NewFn = 0;
return true;
}
-
- // Calls to these instructions are transformed into unaligned loads.
- if (Name == "x86.sse.loadu.ps" || Name == "x86.sse2.loadu.dq" ||
- Name == "x86.sse2.loadu.pd")
- return true;
-
- // Calls to these instructions are transformed into nontemporal stores.
- if (Name == "x86.sse.movnt.ps" || Name == "x86.sse2.movnt.dq" ||
- Name == "x86.sse2.movnt.pd" || Name == "x86.sse2.movnt.i")
- return true;
-
break;
}
}
@@ -171,188 +97,52 @@ bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Function *F = CI->getCalledFunction();
LLVMContext &C = CI->getContext();
- ImmutableCallSite CS(CI);
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
- assert(F && "CallInst has no function associated with it.");
+ assert(F && "Intrinsic call is not direct?");
if (!NewFn) {
- if (F->getName() == "llvm.x86.sse.loadu.ps" ||
- F->getName() == "llvm.x86.sse2.loadu.dq" ||
- F->getName() == "llvm.x86.sse2.loadu.pd") {
- // Convert to a native, unaligned load.
- Type *VecTy = CI->getType();
- Type *IntTy = IntegerType::get(C, 128);
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
-
- Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
- PointerType::getUnqual(IntTy),
- "cast");
- LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
- LI->setAlignment(1); // Unaligned load.
- BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
-
- // Fix up all the uses with our new load.
- if (!CI->use_empty())
- CI->replaceAllUsesWith(BC);
-
- // Remove intrinsic.
- CI->eraseFromParent();
- } else if (F->getName() == "llvm.x86.sse.movnt.ps" ||
- F->getName() == "llvm.x86.sse2.movnt.dq" ||
- F->getName() == "llvm.x86.sse2.movnt.pd" ||
- F->getName() == "llvm.x86.sse2.movnt.i") {
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
-
- Module *M = F->getParent();
- SmallVector<Value *, 1> Elts;
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
- MDNode *Node = MDNode::get(C, Elts);
-
- Value *Arg0 = CI->getArgOperand(0);
- Value *Arg1 = CI->getArgOperand(1);
-
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(Arg0,
- PointerType::getUnqual(Arg1->getType()),
- "cast");
- StoreInst *SI = Builder.CreateStore(Arg1, BC);
- SI->setMetadata(M->getMDKindID("nontemporal"), Node);
- SI->setAlignment(16);
-
- // Remove intrinsic.
- CI->eraseFromParent();
- } else if (F->getName().startswith("llvm.atomic.cmp.swap")) {
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
- Value *Val = Builder.CreateAtomicCmpXchg(CI->getArgOperand(0),
- CI->getArgOperand(1),
- CI->getArgOperand(2),
- Monotonic);
-
- // Replace intrinsic.
- Val->takeName(CI);
- if (!CI->use_empty())
- CI->replaceAllUsesWith(Val);
- CI->eraseFromParent();
- } else if (F->getName().startswith("llvm.atomic")) {
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
-
- AtomicRMWInst::BinOp Op;
- if (F->getName().startswith("llvm.atomic.swap"))
- Op = AtomicRMWInst::Xchg;
- else if (F->getName().startswith("llvm.atomic.load.add"))
- Op = AtomicRMWInst::Add;
- else if (F->getName().startswith("llvm.atomic.load.sub"))
- Op = AtomicRMWInst::Sub;
- else if (F->getName().startswith("llvm.atomic.load.and"))
- Op = AtomicRMWInst::And;
- else if (F->getName().startswith("llvm.atomic.load.nand"))
- Op = AtomicRMWInst::Nand;
- else if (F->getName().startswith("llvm.atomic.load.or"))
- Op = AtomicRMWInst::Or;
- else if (F->getName().startswith("llvm.atomic.load.xor"))
- Op = AtomicRMWInst::Xor;
- else if (F->getName().startswith("llvm.atomic.load.max"))
- Op = AtomicRMWInst::Max;
- else if (F->getName().startswith("llvm.atomic.load.min"))
- Op = AtomicRMWInst::Min;
- else if (F->getName().startswith("llvm.atomic.load.umax"))
- Op = AtomicRMWInst::UMax;
- else if (F->getName().startswith("llvm.atomic.load.umin"))
- Op = AtomicRMWInst::UMin;
- else
- llvm_unreachable("Unknown atomic");
-
- Value *Val = Builder.CreateAtomicRMW(Op, CI->getArgOperand(0),
- CI->getArgOperand(1),
- Monotonic);
-
- // Replace intrinsic.
- Val->takeName(CI);
- if (!CI->use_empty())
- CI->replaceAllUsesWith(Val);
- CI->eraseFromParent();
- } else if (F->getName() == "llvm.memory.barrier") {
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
-
- // Note that this conversion ignores the "device" bit; it was not really
- // well-defined, and got abused because nobody paid enough attention to
- // get it right. In practice, this probably doesn't matter; application
- // code generally doesn't need anything stronger than
- // SequentiallyConsistent (and realistically, SequentiallyConsistent
- // is lowered to a strong enough barrier for almost anything).
-
- if (cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue())
- Builder.CreateFence(SequentiallyConsistent);
- else if (!cast<ConstantInt>(CI->getArgOperand(0))->getZExtValue())
- Builder.CreateFence(Release);
- else if (!cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue())
- Builder.CreateFence(Acquire);
- else
- Builder.CreateFence(AcquireRelease);
-
- // Remove intrinsic.
- CI->eraseFromParent();
+ // Get the Function's name.
+ StringRef Name = F->getName();
+
+ Value *Rep;
+ // Upgrade packed integer vector compares intrinsics to compare instructions
+ if (Name.startswith("llvm.x86.sse2.pcmpeq.") ||
+ Name.startswith("llvm.x86.avx2.pcmpeq.")) {
+ Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
+ "pcmpeq");
+ // need to sign extend since icmp returns vector of i1
+ Rep = Builder.CreateSExt(Rep, CI->getType(), "");
+ } else if (Name.startswith("llvm.x86.sse2.pcmpgt.") ||
+ Name.startswith("llvm.x86.avx2.pcmpgt.")) {
+ Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
+ "pcmpgt");
+ // need to sign extend since icmp returns vector of i1
+ Rep = Builder.CreateSExt(Rep, CI->getType(), "");
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
- return;
- }
- switch (NewFn->getIntrinsicID()) {
- case Intrinsic::prefetch: {
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI);
- llvm::Type *I32Ty = llvm::Type::getInt32Ty(CI->getContext());
-
- // Add the extra "data cache" argument
- Value *Operands[4] = { CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2),
- llvm::ConstantInt::get(I32Ty, 1) };
- CallInst *NewCI = CallInst::Create(NewFn, Operands,
- CI->getName(), CI);
- NewCI->setTailCall(CI->isTailCall());
- NewCI->setCallingConv(CI->getCallingConv());
- // Handle any uses of the old CallInst.
- if (!CI->use_empty())
- // Replace all uses of the old call with the new cast which has the
- // correct type.
- CI->replaceAllUsesWith(NewCI);
-
- // Clean up the old call now that it has been completely upgraded.
+ CI->replaceAllUsesWith(Rep);
CI->eraseFromParent();
- break;
+ return;
}
- case Intrinsic::init_trampoline: {
-
- // Transform
- // %tramp = call i8* llvm.init.trampoline (i8* x, i8* y, i8* z)
- // to
- // call void llvm.init.trampoline (i8* %x, i8* %y, i8* %z)
- // %tramp = call i8* llvm.adjust.trampoline (i8* %x)
-
- Function *AdjustTrampolineFn =
- cast<Function>(Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::adjust_trampoline));
-
- IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI);
- Builder.CreateCall3(NewFn, CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2));
-
- CallInst *AdjustCall = Builder.CreateCall(AdjustTrampolineFn,
- CI->getArgOperand(0),
- CI->getName());
- if (!CI->use_empty())
- CI->replaceAllUsesWith(AdjustCall);
+ switch (NewFn->getIntrinsicID()) {
+ default:
+ llvm_unreachable("Unknown function for CallInst upgrade.");
+
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ assert(CI->getNumArgOperands() == 1 &&
+ "Mismatch between function args and call args");
+ StringRef Name = CI->getName();
+ CI->setName(Name + ".old");
+ CI->replaceAllUsesWith(Builder.CreateCall2(NewFn, CI->getArgOperand(0),
+ Builder.getFalse(), Name));
CI->eraseFromParent();
- break;
- }
+ return;
}
}
@@ -378,291 +168,3 @@ void llvm::UpgradeCallsToIntrinsic(Function* F) {
}
}
-/// This function strips all debug info intrinsics, except for llvm.dbg.declare.
-/// If an llvm.dbg.declare intrinsic is invalid, then this function simply
-/// strips that use.
-void llvm::CheckDebugInfoIntrinsics(Module *M) {
- if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
- while (!FuncStart->use_empty())
- cast<CallInst>(FuncStart->use_back())->eraseFromParent();
- FuncStart->eraseFromParent();
- }
-
- if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
- while (!StopPoint->use_empty())
- cast<CallInst>(StopPoint->use_back())->eraseFromParent();
- StopPoint->eraseFromParent();
- }
-
- if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
- while (!RegionStart->use_empty())
- cast<CallInst>(RegionStart->use_back())->eraseFromParent();
- RegionStart->eraseFromParent();
- }
-
- if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
- while (!RegionEnd->use_empty())
- cast<CallInst>(RegionEnd->use_back())->eraseFromParent();
- RegionEnd->eraseFromParent();
- }
-
- if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
- if (!Declare->use_empty()) {
- DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
- if (!isa<MDNode>(DDI->getArgOperand(0)) ||
- !isa<MDNode>(DDI->getArgOperand(1))) {
- while (!Declare->use_empty()) {
- CallInst *CI = cast<CallInst>(Declare->use_back());
- CI->eraseFromParent();
- }
- Declare->eraseFromParent();
- }
- }
- }
-}
-
-/// FindExnAndSelIntrinsics - Find the eh_exception and eh_selector intrinsic
-/// calls reachable from the unwind basic block.
-static void FindExnAndSelIntrinsics(BasicBlock *BB, CallInst *&Exn,
- CallInst *&Sel,
- SmallPtrSet<BasicBlock*, 8> &Visited) {
- if (!Visited.insert(BB)) return;
-
- for (BasicBlock::iterator
- I = BB->begin(), E = BB->end(); I != E; ++I) {
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- switch (CI->getCalledFunction()->getIntrinsicID()) {
- default: break;
- case Intrinsic::eh_exception:
- assert(!Exn && "Found more than one eh.exception call!");
- Exn = CI;
- break;
- case Intrinsic::eh_selector:
- assert(!Sel && "Found more than one eh.selector call!");
- Sel = CI;
- break;
- }
-
- if (Exn && Sel) return;
- }
- }
-
- if (Exn && Sel) return;
-
- for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- FindExnAndSelIntrinsics(*I, Exn, Sel, Visited);
- if (Exn && Sel) return;
- }
-}
-
-/// TransferClausesToLandingPadInst - Transfer the exception handling clauses
-/// from the eh_selector call to the new landingpad instruction.
-static void TransferClausesToLandingPadInst(LandingPadInst *LPI,
- CallInst *EHSel) {
- LLVMContext &Context = LPI->getContext();
- unsigned N = EHSel->getNumArgOperands();
-
- for (unsigned i = N - 1; i > 1; --i) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(EHSel->getArgOperand(i))){
- unsigned FilterLength = CI->getZExtValue();
- unsigned FirstCatch = i + FilterLength + !FilterLength;
- assert(FirstCatch <= N && "Invalid filter length");
-
- if (FirstCatch < N)
- for (unsigned j = FirstCatch; j < N; ++j) {
- Value *Val = EHSel->getArgOperand(j);
- if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
- LPI->addClause(EHSel->getArgOperand(j));
- } else {
- GlobalVariable *GV = cast<GlobalVariable>(Val);
- LPI->addClause(GV->getInitializer());
- }
- }
-
- if (!FilterLength) {
- // Cleanup.
- LPI->setCleanup(true);
- } else {
- // Filter.
- SmallVector<Constant *, 4> TyInfo;
- TyInfo.reserve(FilterLength - 1);
- for (unsigned j = i + 1; j < FirstCatch; ++j)
- TyInfo.push_back(cast<Constant>(EHSel->getArgOperand(j)));
- ArrayType *AType =
- ArrayType::get(!TyInfo.empty() ? TyInfo[0]->getType() :
- PointerType::getUnqual(Type::getInt8Ty(Context)),
- TyInfo.size());
- LPI->addClause(ConstantArray::get(AType, TyInfo));
- }
-
- N = i;
- }
- }
-
- if (N > 2)
- for (unsigned j = 2; j < N; ++j) {
- Value *Val = EHSel->getArgOperand(j);
- if (!Val->hasName() || Val->getName() != "llvm.eh.catch.all.value") {
- LPI->addClause(EHSel->getArgOperand(j));
- } else {
- GlobalVariable *GV = cast<GlobalVariable>(Val);
- LPI->addClause(GV->getInitializer());
- }
- }
-}
-
-/// This function upgrades the old pre-3.0 exception handling system to the new
-/// one. N.B. This will be removed in 3.1.
-void llvm::UpgradeExceptionHandling(Module *M) {
- Function *EHException = M->getFunction("llvm.eh.exception");
- Function *EHSelector = M->getFunction("llvm.eh.selector");
- if (!EHException || !EHSelector)
- return;
-
- LLVMContext &Context = M->getContext();
- Type *ExnTy = PointerType::getUnqual(Type::getInt8Ty(Context));
- Type *SelTy = Type::getInt32Ty(Context);
- Type *LPadSlotTy = StructType::get(ExnTy, SelTy, NULL);
-
- // This map links the invoke instruction with the eh.exception and eh.selector
- // calls associated with it.
- DenseMap<InvokeInst*, std::pair<Value*, Value*> > InvokeToIntrinsicsMap;
- for (Module::iterator
- I = M->begin(), E = M->end(); I != E; ++I) {
- Function &F = *I;
-
- for (Function::iterator
- II = F.begin(), IE = F.end(); II != IE; ++II) {
- BasicBlock *BB = &*II;
- InvokeInst *Inst = dyn_cast<InvokeInst>(BB->getTerminator());
- if (!Inst) continue;
- BasicBlock *UnwindDest = Inst->getUnwindDest();
- if (UnwindDest->isLandingPad()) continue; // Already converted.
-
- SmallPtrSet<BasicBlock*, 8> Visited;
- CallInst *Exn = 0;
- CallInst *Sel = 0;
- FindExnAndSelIntrinsics(UnwindDest, Exn, Sel, Visited);
- assert(Exn && Sel && "Cannot find eh.exception and eh.selector calls!");
- InvokeToIntrinsicsMap[Inst] = std::make_pair(Exn, Sel);
- }
- }
-
- // This map stores the slots where the exception object and selector value are
- // stored within a function.
- DenseMap<Function*, std::pair<Value*, Value*> > FnToLPadSlotMap;
- SmallPtrSet<Instruction*, 32> DeadInsts;
- for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
- I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
- I != E; ++I) {
- InvokeInst *Invoke = I->first;
- BasicBlock *UnwindDest = Invoke->getUnwindDest();
- Function *F = UnwindDest->getParent();
- std::pair<Value*, Value*> EHIntrinsics = I->second;
- CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
- CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
-
- // Store the exception object and selector value in the entry block.
- Value *ExnSlot = 0;
- Value *SelSlot = 0;
- if (!FnToLPadSlotMap[F].first) {
- BasicBlock *Entry = &F->front();
- ExnSlot = new AllocaInst(ExnTy, "exn", Entry->getTerminator());
- SelSlot = new AllocaInst(SelTy, "sel", Entry->getTerminator());
- FnToLPadSlotMap[F] = std::make_pair(ExnSlot, SelSlot);
- } else {
- ExnSlot = FnToLPadSlotMap[F].first;
- SelSlot = FnToLPadSlotMap[F].second;
- }
-
- if (!UnwindDest->getSinglePredecessor()) {
- // The unwind destination doesn't have a single predecessor. Create an
- // unwind destination which has only one predecessor.
- BasicBlock *NewBB = BasicBlock::Create(Context, "new.lpad",
- UnwindDest->getParent());
- BranchInst::Create(UnwindDest, NewBB);
- Invoke->setUnwindDest(NewBB);
-
- // Fix up any PHIs in the original unwind destination block.
- for (BasicBlock::iterator
- II = UnwindDest->begin(); isa<PHINode>(II); ++II) {
- PHINode *PN = cast<PHINode>(II);
- int Idx = PN->getBasicBlockIndex(Invoke->getParent());
- if (Idx == -1) continue;
- PN->setIncomingBlock(Idx, NewBB);
- }
-
- UnwindDest = NewBB;
- }
-
- IRBuilder<> Builder(Context);
- Builder.SetInsertPoint(UnwindDest, UnwindDest->getFirstInsertionPt());
-
- Value *PersFn = Sel->getArgOperand(1);
- LandingPadInst *LPI = Builder.CreateLandingPad(LPadSlotTy, PersFn, 0);
- Value *LPExn = Builder.CreateExtractValue(LPI, 0);
- Value *LPSel = Builder.CreateExtractValue(LPI, 1);
- Builder.CreateStore(LPExn, ExnSlot);
- Builder.CreateStore(LPSel, SelSlot);
-
- TransferClausesToLandingPadInst(LPI, Sel);
-
- DeadInsts.insert(Exn);
- DeadInsts.insert(Sel);
- }
-
- // Replace the old intrinsic calls with the values from the landingpad
- // instruction(s). These values were stored in allocas for us to use here.
- for (DenseMap<InvokeInst*, std::pair<Value*, Value*> >::iterator
- I = InvokeToIntrinsicsMap.begin(), E = InvokeToIntrinsicsMap.end();
- I != E; ++I) {
- std::pair<Value*, Value*> EHIntrinsics = I->second;
- CallInst *Exn = cast<CallInst>(EHIntrinsics.first);
- CallInst *Sel = cast<CallInst>(EHIntrinsics.second);
- BasicBlock *Parent = Exn->getParent();
-
- std::pair<Value*,Value*> ExnSelSlots = FnToLPadSlotMap[Parent->getParent()];
-
- IRBuilder<> Builder(Context);
- Builder.SetInsertPoint(Parent, Exn);
- LoadInst *LPExn = Builder.CreateLoad(ExnSelSlots.first, "exn.load");
- LoadInst *LPSel = Builder.CreateLoad(ExnSelSlots.second, "sel.load");
-
- Exn->replaceAllUsesWith(LPExn);
- Sel->replaceAllUsesWith(LPSel);
- }
-
- // Remove the dead instructions.
- for (SmallPtrSet<Instruction*, 32>::iterator
- I = DeadInsts.begin(), E = DeadInsts.end(); I != E; ++I) {
- Instruction *Inst = *I;
- Inst->eraseFromParent();
- }
-
- // Replace calls to "llvm.eh.resume" with the 'resume' instruction. Load the
- // exception and selector values from the stored place.
- Function *EHResume = M->getFunction("llvm.eh.resume");
- if (!EHResume) return;
-
- while (!EHResume->use_empty()) {
- CallInst *Resume = cast<CallInst>(EHResume->use_back());
- BasicBlock *BB = Resume->getParent();
-
- IRBuilder<> Builder(Context);
- Builder.SetInsertPoint(BB, Resume);
-
- Value *LPadVal =
- Builder.CreateInsertValue(UndefValue::get(LPadSlotTy),
- Resume->getArgOperand(0), 0, "lpad.val");
- LPadVal = Builder.CreateInsertValue(LPadVal, Resume->getArgOperand(1),
- 1, "lpad.val");
- Builder.CreateResume(LPadVal);
-
- // Remove all instructions after the 'resume.'
- BasicBlock::iterator I = Resume;
- while (I != BB->end()) {
- Instruction *Inst = &*I++;
- Inst->eraseFromParent();
- }
- }
-}
diff --git a/contrib/llvm/lib/VMCore/BasicBlock.cpp b/contrib/llvm/lib/VMCore/BasicBlock.cpp
index d0aa275..d353b0a 100644
--- a/contrib/llvm/lib/VMCore/BasicBlock.cpp
+++ b/contrib/llvm/lib/VMCore/BasicBlock.cpp
@@ -366,3 +366,6 @@ bool BasicBlock::isLandingPad() const {
LandingPadInst *BasicBlock::getLandingPadInst() {
return dyn_cast<LandingPadInst>(getFirstNonPHI());
}
+const LandingPadInst *BasicBlock::getLandingPadInst() const {
+ return dyn_cast<LandingPadInst>(getFirstNonPHI());
+}
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.cpp b/contrib/llvm/lib/VMCore/ConstantFold.cpp
index 30bae71..b743287 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.cpp
+++ b/contrib/llvm/lib/VMCore/ConstantFold.cpp
@@ -38,11 +38,10 @@ using namespace llvm;
// ConstantFold*Instruction Implementations
//===----------------------------------------------------------------------===//
-/// BitCastConstantVector - Convert the specified ConstantVector node to the
+/// BitCastConstantVector - Convert the specified vector Constant node to the
/// specified vector type. At this point, we know that the elements of the
/// input vector constant are all simple integer or FP values.
-static Constant *BitCastConstantVector(ConstantVector *CV,
- VectorType *DstTy) {
+static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
if (CV->isNullValue()) return Constant::getNullValue(DstTy);
@@ -51,22 +50,21 @@ static Constant *BitCastConstantVector(ConstantVector *CV,
// doing so requires endianness information. This should be handled by
// Analysis/ConstantFolding.cpp
unsigned NumElts = DstTy->getNumElements();
- if (NumElts != CV->getNumOperands())
+ if (NumElts != CV->getType()->getVectorNumElements())
return 0;
+
+ Type *DstEltTy = DstTy->getElementType();
// Check to verify that all elements of the input are simple.
+ SmallVector<Constant*, 16> Result;
for (unsigned i = 0; i != NumElts; ++i) {
- if (!isa<ConstantInt>(CV->getOperand(i)) &&
- !isa<ConstantFP>(CV->getOperand(i)))
- return 0;
+ Constant *C = CV->getAggregateElement(i);
+ if (C == 0) return 0;
+ C = ConstantExpr::getBitCast(C, DstEltTy);
+ if (isa<ConstantExpr>(C)) return 0;
+ Result.push_back(C);
}
- // Bitcast each element now.
- std::vector<Constant*> Result;
- Type *DstEltTy = DstTy->getElementType();
- for (unsigned i = 0; i != NumElts; ++i)
- Result.push_back(ConstantExpr::getBitCast(CV->getOperand(i),
- DstEltTy));
return ConstantVector::get(Result);
}
@@ -104,7 +102,8 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
// the first element. If so, return the appropriate GEP instruction.
if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
- if (PTy->getAddressSpace() == DPTy->getAddressSpace()) {
+ if (PTy->getAddressSpace() == DPTy->getAddressSpace()
+ && DPTy->getElementType()->isSized()) {
SmallVector<Value*, 8> IdxList;
Value *Zero =
Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
@@ -141,8 +140,8 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
if (isa<ConstantAggregateZero>(V))
return Constant::getNullValue(DestTy);
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V))
- return BitCastConstantVector(CV, DestPTy);
+ // Handle ConstantVector and ConstantAggregateVector.
+ return BitCastConstantVector(V, DestPTy);
}
// Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts
@@ -548,18 +547,17 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
// If the cast operand is a constant vector, perform the cast by
// operating on each element. In the cast of bitcasts, the element
// count may be mismatched; don't attempt to handle that here.
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V))
- if (DestTy->isVectorTy() &&
- cast<VectorType>(DestTy)->getNumElements() ==
- CV->getType()->getNumElements()) {
- std::vector<Constant*> res;
- VectorType *DestVecTy = cast<VectorType>(DestTy);
- Type *DstEltTy = DestVecTy->getElementType();
- for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
- res.push_back(ConstantExpr::getCast(opc,
- CV->getOperand(i), DstEltTy));
- return ConstantVector::get(res);
- }
+ if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
+ DestTy->isVectorTy() &&
+ DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) {
+ SmallVector<Constant*, 16> res;
+ VectorType *DestVecTy = cast<VectorType>(DestTy);
+ Type *DstEltTy = DestVecTy->getElementType();
+ for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i)
+ res.push_back(ConstantExpr::getCast(opc,
+ V->getAggregateElement(i), DstEltTy));
+ return ConstantVector::get(res);
+ }
// We actually have to do a cast now. Perform the cast according to the
// opcode specified.
@@ -571,7 +569,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
bool ignored;
APFloat Val = FPC->getValueAPF();
- Val.convert(DestTy->isFloatTy() ? APFloat::IEEEsingle :
+ Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf :
+ DestTy->isFloatTy() ? APFloat::IEEEsingle :
DestTy->isDoubleTy() ? APFloat::IEEEdouble :
DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended :
DestTy->isFP128Ty() ? APFloat::IEEEquad :
@@ -690,45 +689,27 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
Constant *V1, Constant *V2) {
- if (ConstantInt *CB = dyn_cast<ConstantInt>(Cond))
- return CB->getZExtValue() ? V1 : V2;
-
- // Check for zero aggregate and ConstantVector of zeros
+ // Check for i1 and vector true/false conditions.
if (Cond->isNullValue()) return V2;
-
- if (ConstantVector* CondV = dyn_cast<ConstantVector>(Cond)) {
-
- if (CondV->isAllOnesValue()) return V1;
-
- VectorType *VTy = cast<VectorType>(V1->getType());
- ConstantVector *CP1 = dyn_cast<ConstantVector>(V1);
- ConstantVector *CP2 = dyn_cast<ConstantVector>(V2);
-
- if ((CP1 || isa<ConstantAggregateZero>(V1)) &&
- (CP2 || isa<ConstantAggregateZero>(V2))) {
-
- // Find the element type of the returned vector
- Type *EltTy = VTy->getElementType();
- unsigned NumElem = VTy->getNumElements();
- std::vector<Constant*> Res(NumElem);
-
- bool Valid = true;
- for (unsigned i = 0; i < NumElem; ++i) {
- ConstantInt* c = dyn_cast<ConstantInt>(CondV->getOperand(i));
- if (!c) {
- Valid = false;
- break;
- }
- Constant *C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- Constant *C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res[i] = c->getZExtValue() ? C1 : C2;
- }
- // If we were able to build the vector, return it
- if (Valid) return ConstantVector::get(Res);
+ if (Cond->isAllOnesValue()) return V1;
+
+ // If the condition is a vector constant, fold the result elementwise.
+ if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
+ SmallVector<Constant*, 16> Result;
+ for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
+ ConstantInt *Cond = dyn_cast<ConstantInt>(CondV->getOperand(i));
+ if (Cond == 0) break;
+
+ Constant *Res = (Cond->getZExtValue() ? V2 : V1)->getAggregateElement(i);
+ if (Res == 0) break;
+ Result.push_back(Res);
}
+
+ // If we were able to build the vector, return it.
+ if (Result.size() == V1->getType()->getVectorNumElements())
+ return ConstantVector::get(Result);
}
-
if (isa<UndefValue>(Cond)) {
if (isa<UndefValue>(V1)) return V1;
return V2;
@@ -754,22 +735,19 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
Constant *Idx) {
if (isa<UndefValue>(Val)) // ee(undef, x) -> undef
- return UndefValue::get(cast<VectorType>(Val->getType())->getElementType());
+ return UndefValue::get(Val->getType()->getVectorElementType());
if (Val->isNullValue()) // ee(zero, x) -> zero
- return Constant::getNullValue(
- cast<VectorType>(Val->getType())->getElementType());
-
- if (ConstantVector *CVal = dyn_cast<ConstantVector>(Val)) {
- if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) {
- uint64_t Index = CIdx->getZExtValue();
- if (Index >= CVal->getNumOperands())
- // ee({w,x,y,z}, wrong_value) -> undef
- return UndefValue::get(cast<VectorType>(Val->getType())->getElementType());
- return CVal->getOperand(CIdx->getZExtValue());
- } else if (isa<UndefValue>(Idx)) {
- // ee({w,x,y,z}, undef) -> undef
- return UndefValue::get(cast<VectorType>(Val->getType())->getElementType());
- }
+ return Constant::getNullValue(Val->getType()->getVectorElementType());
+ // ee({w,x,y,z}, undef) -> undef
+ if (isa<UndefValue>(Idx))
+ return UndefValue::get(Val->getType()->getVectorElementType());
+
+ if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) {
+ uint64_t Index = CIdx->getZExtValue();
+ // ee({w,x,y,z}, wrong_value) -> undef
+ if (Index >= Val->getType()->getVectorNumElements())
+ return UndefValue::get(Val->getType()->getVectorElementType());
+ return Val->getAggregateElement(Index);
}
return 0;
}
@@ -779,103 +757,55 @@ Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
Constant *Idx) {
ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
if (!CIdx) return 0;
- APInt idxVal = CIdx->getValue();
- if (isa<UndefValue>(Val)) {
- // Insertion of scalar constant into vector undef
- // Optimize away insertion of undef
- if (isa<UndefValue>(Elt))
- return Val;
- // Otherwise break the aggregate undef into multiple undefs and do
- // the insertion
- unsigned numOps =
- cast<VectorType>(Val->getType())->getNumElements();
- std::vector<Constant*> Ops;
- Ops.reserve(numOps);
- for (unsigned i = 0; i < numOps; ++i) {
- Constant *Op =
- (idxVal == i) ? Elt : UndefValue::get(Elt->getType());
- Ops.push_back(Op);
- }
- return ConstantVector::get(Ops);
- }
- if (isa<ConstantAggregateZero>(Val)) {
- // Insertion of scalar constant into vector aggregate zero
- // Optimize away insertion of zero
- if (Elt->isNullValue())
- return Val;
- // Otherwise break the aggregate zero into multiple zeros and do
- // the insertion
- unsigned numOps =
- cast<VectorType>(Val->getType())->getNumElements();
- std::vector<Constant*> Ops;
- Ops.reserve(numOps);
- for (unsigned i = 0; i < numOps; ++i) {
- Constant *Op =
- (idxVal == i) ? Elt : Constant::getNullValue(Elt->getType());
- Ops.push_back(Op);
- }
- return ConstantVector::get(Ops);
- }
- if (ConstantVector *CVal = dyn_cast<ConstantVector>(Val)) {
- // Insertion of scalar constant into vector constant
- std::vector<Constant*> Ops;
- Ops.reserve(CVal->getNumOperands());
- for (unsigned i = 0; i < CVal->getNumOperands(); ++i) {
- Constant *Op =
- (idxVal == i) ? Elt : cast<Constant>(CVal->getOperand(i));
- Ops.push_back(Op);
+ const APInt &IdxVal = CIdx->getValue();
+
+ SmallVector<Constant*, 16> Result;
+ for (unsigned i = 0, e = Val->getType()->getVectorNumElements(); i != e; ++i){
+ if (i == IdxVal) {
+ Result.push_back(Elt);
+ continue;
}
- return ConstantVector::get(Ops);
+
+ if (Constant *C = Val->getAggregateElement(i))
+ Result.push_back(C);
+ else
+ return 0;
}
-
- return 0;
-}
-
-/// GetVectorElement - If C is a ConstantVector, ConstantAggregateZero or Undef
-/// return the specified element value. Otherwise return null.
-static Constant *GetVectorElement(Constant *C, unsigned EltNo) {
- if (ConstantVector *CV = dyn_cast<ConstantVector>(C))
- return CV->getOperand(EltNo);
-
- Type *EltTy = cast<VectorType>(C->getType())->getElementType();
- if (isa<ConstantAggregateZero>(C))
- return Constant::getNullValue(EltTy);
- if (isa<UndefValue>(C))
- return UndefValue::get(EltTy);
- return 0;
+
+ return ConstantVector::get(Result);
}
Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
Constant *V2,
Constant *Mask) {
+ unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
+ Type *EltTy = V1->getType()->getVectorElementType();
+
// Undefined shuffle mask -> undefined value.
- if (isa<UndefValue>(Mask)) return UndefValue::get(V1->getType());
+ if (isa<UndefValue>(Mask))
+ return UndefValue::get(VectorType::get(EltTy, MaskNumElts));
- unsigned MaskNumElts = cast<VectorType>(Mask->getType())->getNumElements();
- unsigned SrcNumElts = cast<VectorType>(V1->getType())->getNumElements();
- Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
+ // Don't break the bitcode reader hack.
+ if (isa<ConstantExpr>(Mask)) return 0;
+
+ unsigned SrcNumElts = V1->getType()->getVectorNumElements();
// Loop over the shuffle mask, evaluating each element.
SmallVector<Constant*, 32> Result;
for (unsigned i = 0; i != MaskNumElts; ++i) {
- Constant *InElt = GetVectorElement(Mask, i);
- if (InElt == 0) return 0;
-
- if (isa<UndefValue>(InElt))
- InElt = UndefValue::get(EltTy);
- else if (ConstantInt *CI = dyn_cast<ConstantInt>(InElt)) {
- unsigned Elt = CI->getZExtValue();
- if (Elt >= SrcNumElts*2)
- InElt = UndefValue::get(EltTy);
- else if (Elt >= SrcNumElts)
- InElt = GetVectorElement(V2, Elt - SrcNumElts);
- else
- InElt = GetVectorElement(V1, Elt);
- if (InElt == 0) return 0;
- } else {
- // Unknown value.
- return 0;
+ int Elt = ShuffleVectorInst::getMaskValue(Mask, i);
+ if (Elt == -1) {
+ Result.push_back(UndefValue::get(EltTy));
+ continue;
}
+ Constant *InElt;
+ if (unsigned(Elt) >= SrcNumElts*2)
+ InElt = UndefValue::get(EltTy);
+ else if (unsigned(Elt) >= SrcNumElts)
+ InElt = V2->getAggregateElement(Elt - SrcNumElts);
+ else
+ InElt = V1->getAggregateElement(Elt);
+ if (InElt == 0) return 0;
Result.push_back(InElt);
}
@@ -888,26 +818,10 @@ Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
if (Idxs.empty())
return Agg;
- if (isa<UndefValue>(Agg)) // ev(undef, x) -> undef
- return UndefValue::get(ExtractValueInst::getIndexedType(Agg->getType(),
- Idxs));
-
- if (isa<ConstantAggregateZero>(Agg)) // ev(0, x) -> 0
- return
- Constant::getNullValue(ExtractValueInst::getIndexedType(Agg->getType(),
- Idxs));
-
- // Otherwise recurse.
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg))
- return ConstantFoldExtractValueInstruction(CS->getOperand(Idxs[0]),
- Idxs.slice(1));
-
- if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg))
- return ConstantFoldExtractValueInstruction(CA->getOperand(Idxs[0]),
- Idxs.slice(1));
- ConstantVector *CV = cast<ConstantVector>(Agg);
- return ConstantFoldExtractValueInstruction(CV->getOperand(Idxs[0]),
- Idxs.slice(1));
+ if (Constant *C = Agg->getAggregateElement(Idxs[0]))
+ return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
+
+ return 0;
}
Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
@@ -917,84 +831,30 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
if (Idxs.empty())
return Val;
- if (isa<UndefValue>(Agg)) {
- // Insertion of constant into aggregate undef
- // Optimize away insertion of undef.
- if (isa<UndefValue>(Val))
- return Agg;
-
- // Otherwise break the aggregate undef into multiple undefs and do
- // the insertion.
- CompositeType *AggTy = cast<CompositeType>(Agg->getType());
- unsigned numOps;
- if (ArrayType *AR = dyn_cast<ArrayType>(AggTy))
- numOps = AR->getNumElements();
- else
- numOps = cast<StructType>(AggTy)->getNumElements();
-
- std::vector<Constant*> Ops(numOps);
- for (unsigned i = 0; i < numOps; ++i) {
- Type *MemberTy = AggTy->getTypeAtIndex(i);
- Constant *Op =
- (Idxs[0] == i) ?
- ConstantFoldInsertValueInstruction(UndefValue::get(MemberTy),
- Val, Idxs.slice(1)) :
- UndefValue::get(MemberTy);
- Ops[i] = Op;
- }
-
- if (StructType* ST = dyn_cast<StructType>(AggTy))
- return ConstantStruct::get(ST, Ops);
- return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
- }
+ unsigned NumElts;
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ NumElts = ST->getNumElements();
+ else if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
+ NumElts = AT->getNumElements();
+ else
+ NumElts = AT->getVectorNumElements();
- if (isa<ConstantAggregateZero>(Agg)) {
- // Insertion of constant into aggregate zero
- // Optimize away insertion of zero.
- if (Val->isNullValue())
- return Agg;
-
- // Otherwise break the aggregate zero into multiple zeros and do
- // the insertion.
- CompositeType *AggTy = cast<CompositeType>(Agg->getType());
- unsigned numOps;
- if (ArrayType *AR = dyn_cast<ArrayType>(AggTy))
- numOps = AR->getNumElements();
- else
- numOps = cast<StructType>(AggTy)->getNumElements();
+ SmallVector<Constant*, 32> Result;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Agg->getAggregateElement(i);
+ if (C == 0) return 0;
- std::vector<Constant*> Ops(numOps);
- for (unsigned i = 0; i < numOps; ++i) {
- Type *MemberTy = AggTy->getTypeAtIndex(i);
- Constant *Op =
- (Idxs[0] == i) ?
- ConstantFoldInsertValueInstruction(Constant::getNullValue(MemberTy),
- Val, Idxs.slice(1)) :
- Constant::getNullValue(MemberTy);
- Ops[i] = Op;
- }
+ if (Idxs[0] == i)
+ C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
- if (StructType *ST = dyn_cast<StructType>(AggTy))
- return ConstantStruct::get(ST, Ops);
- return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
+ Result.push_back(C);
}
- if (isa<ConstantStruct>(Agg) || isa<ConstantArray>(Agg)) {
- // Insertion of constant into aggregate constant.
- std::vector<Constant*> Ops(Agg->getNumOperands());
- for (unsigned i = 0; i < Agg->getNumOperands(); ++i) {
- Constant *Op = cast<Constant>(Agg->getOperand(i));
- if (Idxs[0] == i)
- Op = ConstantFoldInsertValueInstruction(Op, Val, Idxs.slice(1));
- Ops[i] = Op;
- }
-
- if (StructType* ST = dyn_cast<StructType>(Agg->getType()))
- return ConstantStruct::get(ST, Ops);
- return ConstantArray::get(cast<ArrayType>(Agg->getType()), Ops);
- }
-
- return 0;
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ return ConstantStruct::get(ST, Result);
+ if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
+ return ConstantArray::get(AT, Result);
+ return ConstantVector::get(Result);
}
@@ -1172,7 +1032,6 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
// At this point we know neither constant is an UndefValue.
if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
- using namespace APIntOps;
const APInt &C1V = CI1->getValue();
const APInt &C2V = CI2->getValue();
switch (Opcode) {
@@ -1269,145 +1128,18 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
}
} else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
- ConstantVector *CP1 = dyn_cast<ConstantVector>(C1);
- ConstantVector *CP2 = dyn_cast<ConstantVector>(C2);
- if ((CP1 != NULL || isa<ConstantAggregateZero>(C1)) &&
- (CP2 != NULL || isa<ConstantAggregateZero>(C2))) {
- std::vector<Constant*> Res;
- Type* EltTy = VTy->getElementType();
- Constant *C1 = 0;
- Constant *C2 = 0;
- switch (Opcode) {
- default:
- break;
- case Instruction::Add:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getAdd(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::FAdd:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getFAdd(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::Sub:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getSub(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::FSub:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getFSub(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::Mul:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getMul(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::FMul:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getFMul(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::UDiv:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getUDiv(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::SDiv:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getSDiv(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::FDiv:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getFDiv(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::URem:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getURem(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::SRem:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getSRem(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::FRem:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getFRem(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::And:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getAnd(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::Or:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getOr(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::Xor:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getXor(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::LShr:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getLShr(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::AShr:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getAShr(C1, C2));
- }
- return ConstantVector::get(Res);
- case Instruction::Shl:
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- C1 = CP1 ? CP1->getOperand(i) : Constant::getNullValue(EltTy);
- C2 = CP2 ? CP2->getOperand(i) : Constant::getNullValue(EltTy);
- Res.push_back(ConstantExpr::getShl(C1, C2));
- }
- return ConstantVector::get(Res);
- }
+ // Perform elementwise folding.
+ SmallVector<Constant*, 16> Result;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
+ Constant *LHS = C1->getAggregateElement(i);
+ Constant *RHS = C2->getAggregateElement(i);
+ if (LHS == 0 || RHS == 0) break;
+
+ Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
}
+
+ if (Result.size() == VTy->getNumElements())
+ return ConstantVector::get(Result);
}
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
@@ -1906,7 +1638,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
APInt V1 = cast<ConstantInt>(C1)->getValue();
APInt V2 = cast<ConstantInt>(C2)->getValue();
switch (pred) {
- default: llvm_unreachable("Invalid ICmp Predicate"); return 0;
+ default: llvm_unreachable("Invalid ICmp Predicate");
case ICmpInst::ICMP_EQ: return ConstantInt::get(ResultTy, V1 == V2);
case ICmpInst::ICMP_NE: return ConstantInt::get(ResultTy, V1 != V2);
case ICmpInst::ICMP_SLT: return ConstantInt::get(ResultTy, V1.slt(V2));
@@ -1923,7 +1655,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
APFloat C2V = cast<ConstantFP>(C2)->getValueAPF();
APFloat::cmpResult R = C1V.compare(C2V);
switch (pred) {
- default: llvm_unreachable("Invalid FCmp Predicate"); return 0;
+ default: llvm_unreachable("Invalid FCmp Predicate");
case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy);
case FCmpInst::FCMP_TRUE: return Constant::getAllOnesValue(ResultTy);
case FCmpInst::FCMP_UNO:
@@ -1962,20 +1694,20 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
R==APFloat::cmpEqual);
}
} else if (C1->getType()->isVectorTy()) {
- SmallVector<Constant*, 16> C1Elts, C2Elts;
- C1->getVectorElements(C1Elts);
- C2->getVectorElements(C2Elts);
- if (C1Elts.empty() || C2Elts.empty())
- return 0;
-
// If we can constant fold the comparison of each element, constant fold
// the whole vector comparison.
SmallVector<Constant*, 4> ResElts;
// Compare the elements, producing an i1 result or constant expr.
- for (unsigned i = 0, e = C1Elts.size(); i != e; ++i)
- ResElts.push_back(ConstantExpr::getCompare(pred, C1Elts[i], C2Elts[i]));
-
- return ConstantVector::get(ResElts);
+ for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){
+ Constant *C1E = C1->getAggregateElement(i);
+ Constant *C2E = C2->getAggregateElement(i);
+ if (C1E == 0 || C2E == 0) break;
+
+ ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
+ }
+
+ if (ResElts.size() == C1->getType()->getVectorNumElements())
+ return ConstantVector::get(ResElts);
}
if (C1->getType()->isFloatingPointTy()) {
@@ -2209,7 +1941,7 @@ static Constant *ConstantFoldGetElementPtrImpl(Constant *C,
I != E; ++I)
LastTy = *I;
- if ((LastTy && LastTy->isArrayTy()) || Idx0->isNullValue()) {
+ if ((LastTy && isa<SequentialType>(LastTy)) || Idx0->isNullValue()) {
SmallVector<Value*, 16> NewIndices;
NewIndices.reserve(Idxs.size() + CE->getNumOperands());
for (unsigned i = 1, e = CE->getNumOperands()-1; i != e; ++i)
diff --git a/contrib/llvm/lib/VMCore/Constants.cpp b/contrib/llvm/lib/VMCore/Constants.cpp
index a84a046..6dbc144 100644
--- a/contrib/llvm/lib/VMCore/Constants.cpp
+++ b/contrib/llvm/lib/VMCore/Constants.cpp
@@ -40,6 +40,8 @@ using namespace llvm;
// Constant Class
//===----------------------------------------------------------------------===//
+void Constant::anchor() { }
+
bool Constant::isNegativeZeroValue() const {
// Floating point values have an explicit -0.0 value.
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
@@ -71,17 +73,27 @@ bool Constant::isAllOnesValue() const {
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue();
- // Check for constant vectors
+ // Check for constant vectors which are splats of -1 values.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
- return CV->isAllOnesValue();
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isAllOnesValue();
+
+ // Check for constant vectors which are splats of -1 values.
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ if (Constant *Splat = CV->getSplatValue())
+ return Splat->isAllOnesValue();
return false;
}
+
// Constructor to create a '0' constant of arbitrary type...
Constant *Constant::getNullValue(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
return ConstantInt::get(Ty, 0);
+ case Type::HalfTyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(APFloat::IEEEhalf));
case Type::FloatTyID:
return ConstantFP::get(Ty->getContext(),
APFloat::getZero(APFloat::IEEEsingle));
@@ -105,8 +117,7 @@ Constant *Constant::getNullValue(Type *Ty) {
return ConstantAggregateZero::get(Ty);
default:
// Function, Label, or Opaque type?
- assert(0 && "Cannot create a null constant of that type!");
- return 0;
+ llvm_unreachable("Cannot create a null constant of that type!");
}
}
@@ -122,7 +133,7 @@ Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) {
// Broadcast a scalar to a vector, if necessary.
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- C = ConstantVector::get(std::vector<Constant *>(VTy->getNumElements(), C));
+ C = ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
@@ -138,13 +149,44 @@ Constant *Constant::getAllOnesValue(Type *Ty) {
return ConstantFP::get(Ty->getContext(), FL);
}
- SmallVector<Constant*, 16> Elts;
VectorType *VTy = cast<VectorType>(Ty);
- Elts.resize(VTy->getNumElements(), getAllOnesValue(VTy->getElementType()));
- assert(Elts[0] && "Invalid AllOnes value!");
- return cast<ConstantVector>(ConstantVector::get(Elts));
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ getAllOnesValue(VTy->getElementType()));
}
+/// getAggregateElement - For aggregates (struct/array/vector) return the
+/// constant that corresponds to the specified element if possible, or null if
+/// not. This can return null if the element index is a ConstantExpr, or if
+/// 'this' is a constant expr.
+Constant *Constant::getAggregateElement(unsigned Elt) const {
+ if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(this))
+ return Elt < CS->getNumOperands() ? CS->getOperand(Elt) : 0;
+
+ if (const ConstantArray *CA = dyn_cast<ConstantArray>(this))
+ return Elt < CA->getNumOperands() ? CA->getOperand(Elt) : 0;
+
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ return Elt < CV->getNumOperands() ? CV->getOperand(Elt) : 0;
+
+ if (const ConstantAggregateZero *CAZ =dyn_cast<ConstantAggregateZero>(this))
+ return CAZ->getElementValue(Elt);
+
+ if (const UndefValue *UV = dyn_cast<UndefValue>(this))
+ return UV->getElementValue(Elt);
+
+ if (const ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(this))
+ return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : 0;
+ return 0;
+}
+
+Constant *Constant::getAggregateElement(Constant *Elt) const {
+ assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt))
+ return getAggregateElement(CI->getZExtValue());
+ return 0;
+}
+
+
void Constant::destroyConstantImpl() {
// When a Constant is destroyed, there may be lingering
// references to the constant by other constants in the constant pool. These
@@ -163,8 +205,7 @@ void Constant::destroyConstantImpl() {
}
#endif
assert(isa<Constant>(V) && "References remain to Constant being destroyed");
- Constant *CV = cast<Constant>(V);
- CV->destroyConstant();
+ cast<Constant>(V)->destroyConstant();
// The constant should remove itself from our use list...
assert((use_empty() || use_back() != V) && "Constant not removed!");
@@ -270,36 +311,6 @@ Constant::PossibleRelocationsTy Constant::getRelocationInfo() const {
return Result;
}
-
-/// getVectorElements - This method, which is only valid on constant of vector
-/// type, returns the elements of the vector in the specified smallvector.
-/// This handles breaking down a vector undef into undef elements, etc. For
-/// constant exprs and other cases we can't handle, we return an empty vector.
-void Constant::getVectorElements(SmallVectorImpl<Constant*> &Elts) const {
- assert(getType()->isVectorTy() && "Not a vector constant!");
-
- if (const ConstantVector *CV = dyn_cast<ConstantVector>(this)) {
- for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i)
- Elts.push_back(CV->getOperand(i));
- return;
- }
-
- VectorType *VT = cast<VectorType>(getType());
- if (isa<ConstantAggregateZero>(this)) {
- Elts.assign(VT->getNumElements(),
- Constant::getNullValue(VT->getElementType()));
- return;
- }
-
- if (isa<UndefValue>(this)) {
- Elts.assign(VT->getNumElements(), UndefValue::get(VT->getElementType()));
- return;
- }
-
- // Unknown type, must be constant expr etc.
-}
-
-
/// removeDeadUsersOfConstant - If the specified constantexpr is dead, remove
/// it. This involves recursively eliminating any dead users of the
/// constantexpr.
@@ -358,6 +369,8 @@ void Constant::removeDeadConstantUsers() const {
// ConstantInt
//===----------------------------------------------------------------------===//
+void ConstantInt::anchor() { }
+
ConstantInt::ConstantInt(IntegerType *Ty, const APInt& V)
: Constant(Ty, ConstantIntVal, 0, 0), Val(V) {
assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
@@ -385,9 +398,8 @@ Constant *ConstantInt::getTrue(Type *Ty) {
}
assert(VTy->getElementType()->isIntegerTy(1) &&
"True must be vector of i1 or i1.");
- SmallVector<Constant*, 16> Splat(VTy->getNumElements(),
- ConstantInt::getTrue(Ty->getContext()));
- return ConstantVector::get(Splat);
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ ConstantInt::getTrue(Ty->getContext()));
}
Constant *ConstantInt::getFalse(Type *Ty) {
@@ -398,9 +410,8 @@ Constant *ConstantInt::getFalse(Type *Ty) {
}
assert(VTy->getElementType()->isIntegerTy(1) &&
"False must be vector of i1 or i1.");
- SmallVector<Constant*, 16> Splat(VTy->getNumElements(),
- ConstantInt::getFalse(Ty->getContext()));
- return ConstantVector::get(Splat);
+ return ConstantVector::getSplat(VTy->getNumElements(),
+ ConstantInt::getFalse(Ty->getContext()));
}
@@ -424,18 +435,17 @@ Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
// For vectors, broadcast the value.
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::get(SmallVector<Constant*,
- 16>(VTy->getNumElements(), C));
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
-ConstantInt* ConstantInt::get(IntegerType* Ty, uint64_t V,
+ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V,
bool isSigned) {
return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
}
-ConstantInt* ConstantInt::getSigned(IntegerType* Ty, int64_t V) {
+ConstantInt *ConstantInt::getSigned(IntegerType *Ty, int64_t V) {
return get(Ty, V, true);
}
@@ -443,20 +453,19 @@ Constant *ConstantInt::getSigned(Type *Ty, int64_t V) {
return get(Ty, V, true);
}
-Constant *ConstantInt::get(Type* Ty, const APInt& V) {
+Constant *ConstantInt::get(Type *Ty, const APInt& V) {
ConstantInt *C = get(Ty->getContext(), V);
assert(C->getType() == Ty->getScalarType() &&
"ConstantInt type doesn't match the type implied by its value!");
// For vectors, broadcast the value.
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::get(
- SmallVector<Constant *, 16>(VTy->getNumElements(), C));
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
-ConstantInt* ConstantInt::get(IntegerType* Ty, StringRef Str,
+ConstantInt *ConstantInt::get(IntegerType* Ty, StringRef Str,
uint8_t radix) {
return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix));
}
@@ -466,6 +475,8 @@ ConstantInt* ConstantInt::get(IntegerType* Ty, StringRef Str,
//===----------------------------------------------------------------------===//
static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
+ if (Ty->isHalfTy())
+ return &APFloat::IEEEhalf;
if (Ty->isFloatTy())
return &APFloat::IEEEsingle;
if (Ty->isDoubleTy())
@@ -479,10 +490,12 @@ static const fltSemantics *TypeToFloatSemantics(Type *Ty) {
return &APFloat::PPCDoubleDouble;
}
+void ConstantFP::anchor() { }
+
/// get() - This returns a constant fp for the specified value in the
/// specified type. This should only be used for simple constant values like
/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
-Constant *ConstantFP::get(Type* Ty, double V) {
+Constant *ConstantFP::get(Type *Ty, double V) {
LLVMContext &Context = Ty->getContext();
APFloat FV(V);
@@ -493,14 +506,13 @@ Constant *ConstantFP::get(Type* Ty, double V) {
// For vectors, broadcast the value.
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::get(
- SmallVector<Constant *, 16>(VTy->getNumElements(), C));
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
-Constant *ConstantFP::get(Type* Ty, StringRef Str) {
+Constant *ConstantFP::get(Type *Ty, StringRef Str) {
LLVMContext &Context = Ty->getContext();
APFloat FV(*TypeToFloatSemantics(Ty->getScalarType()), Str);
@@ -508,31 +520,28 @@ Constant *ConstantFP::get(Type* Ty, StringRef Str) {
// For vectors, broadcast the value.
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
- return ConstantVector::get(
- SmallVector<Constant *, 16>(VTy->getNumElements(), C));
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
return C;
}
-ConstantFP* ConstantFP::getNegativeZero(Type* Ty) {
+ConstantFP *ConstantFP::getNegativeZero(Type *Ty) {
LLVMContext &Context = Ty->getContext();
- APFloat apf = cast <ConstantFP>(Constant::getNullValue(Ty))->getValueAPF();
+ APFloat apf = cast<ConstantFP>(Constant::getNullValue(Ty))->getValueAPF();
apf.changeSign();
return get(Context, apf);
}
-Constant *ConstantFP::getZeroValueForNegation(Type* Ty) {
- if (VectorType *PTy = dyn_cast<VectorType>(Ty))
- if (PTy->getElementType()->isFloatingPointTy()) {
- SmallVector<Constant*, 16> zeros(PTy->getNumElements(),
- getNegativeZero(PTy->getElementType()));
- return ConstantVector::get(zeros);
- }
-
- if (Ty->isFloatingPointTy())
- return getNegativeZero(Ty);
+Constant *ConstantFP::getZeroValueForNegation(Type *Ty) {
+ Type *ScalarTy = Ty->getScalarType();
+ if (ScalarTy->isFloatingPointTy()) {
+ Constant *C = getNegativeZero(ScalarTy);
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+ return C;
+ }
return Constant::getNullValue(Ty);
}
@@ -548,7 +557,9 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
if (!Slot) {
Type *Ty;
- if (&V.getSemantics() == &APFloat::IEEEsingle)
+ if (&V.getSemantics() == &APFloat::IEEEhalf)
+ Ty = Type::getHalfTy(Context);
+ else if (&V.getSemantics() == &APFloat::IEEEsingle)
Ty = Type::getFloatTy(Context);
else if (&V.getSemantics() == &APFloat::IEEEdouble)
Ty = Type::getDoubleTy(Context);
@@ -584,9 +595,83 @@ bool ConstantFP::isExactlyValue(const APFloat &V) const {
}
//===----------------------------------------------------------------------===//
+// ConstantAggregateZero Implementation
+//===----------------------------------------------------------------------===//
+
+/// getSequentialElement - If this CAZ has array or vector type, return a zero
+/// with the right element type.
+Constant *ConstantAggregateZero::getSequentialElement() const {
+ return Constant::getNullValue(getType()->getSequentialElementType());
+}
+
+/// getStructElement - If this CAZ has struct type, return a zero with the
+/// right element type for the specified element.
+Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const {
+ return Constant::getNullValue(getType()->getStructElementType(Elt));
+}
+
+/// getElementValue - Return a zero of the right value for the specified GEP
+/// index if we can, otherwise return null (e.g. if C is a ConstantExpr).
+Constant *ConstantAggregateZero::getElementValue(Constant *C) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+/// getElementValue - Return a zero of the right value for the specified GEP
+/// index.
+Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+
+//===----------------------------------------------------------------------===//
+// UndefValue Implementation
+//===----------------------------------------------------------------------===//
+
+/// getSequentialElement - If this undef has array or vector type, return an
+/// undef with the right element type.
+UndefValue *UndefValue::getSequentialElement() const {
+ return UndefValue::get(getType()->getSequentialElementType());
+}
+
+/// getStructElement - If this undef has struct type, return a zero with the
+/// right element type for the specified element.
+UndefValue *UndefValue::getStructElement(unsigned Elt) const {
+ return UndefValue::get(getType()->getStructElementType(Elt));
+}
+
+/// getElementValue - Return an undef of the right value for the specified GEP
+/// index if we can, otherwise return null (e.g. if C is a ConstantExpr).
+UndefValue *UndefValue::getElementValue(Constant *C) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+/// getElementValue - Return an undef of the right value for the specified GEP
+/// index.
+UndefValue *UndefValue::getElementValue(unsigned Idx) const {
+ if (isa<SequentialType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+
+
+//===----------------------------------------------------------------------===//
// ConstantXXX Classes
//===----------------------------------------------------------------------===//
+template <typename ItTy, typename EltTy>
+static bool rangeOnlyContains(ItTy Start, ItTy End, EltTy Elt) {
+ for (; Start != End; ++Start)
+ if (*Start != Elt)
+ return false;
+ return true;
+}
ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V)
: Constant(T, ConstantArrayVal,
@@ -601,45 +686,97 @@ ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V)
}
Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
+ // Empty arrays are canonicalized to ConstantAggregateZero.
+ if (V.empty())
+ return ConstantAggregateZero::get(Ty);
+
for (unsigned i = 0, e = V.size(); i != e; ++i) {
assert(V[i]->getType() == Ty->getElementType() &&
"Wrong type in array element initializer");
}
LLVMContextImpl *pImpl = Ty->getContext().pImpl;
- // If this is an all-zero array, return a ConstantAggregateZero object
- if (!V.empty()) {
- Constant *C = V[0];
- if (!C->isNullValue())
- return pImpl->ArrayConstants.getOrCreate(Ty, V);
-
- for (unsigned i = 1, e = V.size(); i != e; ++i)
- if (V[i] != C)
- return pImpl->ArrayConstants.getOrCreate(Ty, V);
- }
- return ConstantAggregateZero::get(Ty);
-}
+ // If this is an all-zero array, return a ConstantAggregateZero object. If
+ // all undef, return an UndefValue, if "all simple", then return a
+ // ConstantDataArray.
+ Constant *C = V[0];
+ if (isa<UndefValue>(C) && rangeOnlyContains(V.begin(), V.end(), C))
+ return UndefValue::get(Ty);
-/// ConstantArray::get(const string&) - Return an array that is initialized to
-/// contain the specified string. If length is zero then a null terminator is
-/// added to the specified string so that it may be used in a natural way.
-/// Otherwise, the length parameter specifies how much of the string to use
-/// and it won't be null terminated.
-///
-Constant *ConstantArray::get(LLVMContext &Context, StringRef Str,
- bool AddNull) {
- std::vector<Constant*> ElementVals;
- ElementVals.reserve(Str.size() + size_t(AddNull));
- for (unsigned i = 0; i < Str.size(); ++i)
- ElementVals.push_back(ConstantInt::get(Type::getInt8Ty(Context), Str[i]));
-
- // Add a null terminator to the string...
- if (AddNull) {
- ElementVals.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
+ if (C->isNullValue() && rangeOnlyContains(V.begin(), V.end(), C))
+ return ConstantAggregateZero::get(Ty);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType())) {
+ // We speculatively build the elements here even if it turns out that there
+ // is a constantexpr or something else weird in the array, since it is so
+ // uncommon for that to happen.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ if (CI->getType()->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(64)) {
+ SmallVector<uint64_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ }
+ }
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ if (CFP->getType()->isFloatTy()) {
+ SmallVector<float, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i]))
+ Elts.push_back(CFP->getValueAPF().convertToFloat());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ } else if (CFP->getType()->isDoubleTy()) {
+ SmallVector<double, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i]))
+ Elts.push_back(CFP->getValueAPF().convertToDouble());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataArray::get(C->getContext(), Elts);
+ }
+ }
}
- ArrayType *ATy = ArrayType::get(Type::getInt8Ty(Context), ElementVals.size());
- return get(ATy, ElementVals);
+ // Otherwise, we really do want to create a ConstantArray.
+ return pImpl->ArrayConstants.getOrCreate(Ty, V);
}
/// getTypeForElements - Return an anonymous struct type to use for a constant
@@ -647,9 +784,10 @@ Constant *ConstantArray::get(LLVMContext &Context, StringRef Str,
StructType *ConstantStruct::getTypeForElements(LLVMContext &Context,
ArrayRef<Constant*> V,
bool Packed) {
- SmallVector<Type*, 16> EltTypes;
- for (unsigned i = 0, e = V.size(); i != e; ++i)
- EltTypes.push_back(V[i]->getType());
+ unsigned VecSize = V.size();
+ SmallVector<Type*, 16> EltTypes(VecSize);
+ for (unsigned i = 0; i != VecSize; ++i)
+ EltTypes[i] = V[i]->getType();
return StructType::get(Context, EltTypes, Packed);
}
@@ -677,14 +815,31 @@ ConstantStruct::ConstantStruct(StructType *T, ArrayRef<Constant *> V)
// ConstantStruct accessors.
Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
- // Create a ConstantAggregateZero value if all elements are zeros.
- for (unsigned i = 0, e = V.size(); i != e; ++i)
- if (!V[i]->isNullValue())
- return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
-
assert((ST->isOpaque() || ST->getNumElements() == V.size()) &&
"Incorrect # elements specified to ConstantStruct::get");
- return ConstantAggregateZero::get(ST);
+
+ // Create a ConstantAggregateZero value if all elements are zeros.
+ bool isZero = true;
+ bool isUndef = false;
+
+ if (!V.empty()) {
+ isUndef = isa<UndefValue>(V[0]);
+ isZero = V[0]->isNullValue();
+ if (isUndef || isZero) {
+ for (unsigned i = 0, e = V.size(); i != e; ++i) {
+ if (!V[i]->isNullValue())
+ isZero = false;
+ if (!isa<UndefValue>(V[i]))
+ isUndef = false;
+ }
+ }
+ }
+ if (isZero)
+ return ConstantAggregateZero::get(ST);
+ if (isUndef)
+ return UndefValue::get(ST);
+
+ return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
}
Constant *ConstantStruct::get(StructType *T, ...) {
@@ -731,10 +886,93 @@ Constant *ConstantVector::get(ArrayRef<Constant*> V) {
return ConstantAggregateZero::get(T);
if (isUndef)
return UndefValue::get(T);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType())) {
+ // We speculatively build the elements here even if it turns out that there
+ // is a constantexpr or something else weird in the array, since it is so
+ // uncommon for that to happen.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ if (CI->getType()->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ } else if (CI->getType()->isIntegerTy(64)) {
+ SmallVector<uint64_t, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V[i]))
+ Elts.push_back(CI->getZExtValue());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ }
+ }
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ if (CFP->getType()->isFloatTy()) {
+ SmallVector<float, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i]))
+ Elts.push_back(CFP->getValueAPF().convertToFloat());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ } else if (CFP->getType()->isDoubleTy()) {
+ SmallVector<double, 16> Elts;
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V[i]))
+ Elts.push_back(CFP->getValueAPF().convertToDouble());
+ else
+ break;
+ if (Elts.size() == V.size())
+ return ConstantDataVector::get(C->getContext(), Elts);
+ }
+ }
+ }
+
+ // Otherwise, the element type isn't compatible with ConstantDataVector, or
+ // the operand list constants a ConstantExpr or something else strange.
return pImpl->VectorConstants.getOrCreate(T, V);
}
+Constant *ConstantVector::getSplat(unsigned NumElts, Constant *V) {
+ // If this splat is compatible with ConstantDataVector, use it instead of
+ // ConstantVector.
+ if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) &&
+ ConstantDataSequential::isElementTypeCompatible(V->getType()))
+ return ConstantDataVector::getSplat(NumElts, V);
+
+ SmallVector<Constant*, 32> Elts(NumElts, V);
+ return get(Elts);
+}
+
+
// Utility function for determining if a ConstantExpr is a CastOp or not. This
// can't be inline because we don't want to #include Instruction.h into
// Constant.h
@@ -793,66 +1031,16 @@ unsigned ConstantExpr::getPredicate() const {
/// one, but with the specified operand set to the specified value.
Constant *
ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const {
- assert(OpNo < getNumOperands() && "Operand num is out of range!");
assert(Op->getType() == getOperand(OpNo)->getType() &&
"Replacing operand with value of different type!");
if (getOperand(OpNo) == Op)
return const_cast<ConstantExpr*>(this);
+
+ SmallVector<Constant*, 8> NewOps;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ NewOps.push_back(i == OpNo ? Op : getOperand(i));
- Constant *Op0, *Op1, *Op2;
- switch (getOpcode()) {
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast:
- return ConstantExpr::getCast(getOpcode(), Op, getType());
- case Instruction::Select:
- Op0 = (OpNo == 0) ? Op : getOperand(0);
- Op1 = (OpNo == 1) ? Op : getOperand(1);
- Op2 = (OpNo == 2) ? Op : getOperand(2);
- return ConstantExpr::getSelect(Op0, Op1, Op2);
- case Instruction::InsertElement:
- Op0 = (OpNo == 0) ? Op : getOperand(0);
- Op1 = (OpNo == 1) ? Op : getOperand(1);
- Op2 = (OpNo == 2) ? Op : getOperand(2);
- return ConstantExpr::getInsertElement(Op0, Op1, Op2);
- case Instruction::ExtractElement:
- Op0 = (OpNo == 0) ? Op : getOperand(0);
- Op1 = (OpNo == 1) ? Op : getOperand(1);
- return ConstantExpr::getExtractElement(Op0, Op1);
- case Instruction::ShuffleVector:
- Op0 = (OpNo == 0) ? Op : getOperand(0);
- Op1 = (OpNo == 1) ? Op : getOperand(1);
- Op2 = (OpNo == 2) ? Op : getOperand(2);
- return ConstantExpr::getShuffleVector(Op0, Op1, Op2);
- case Instruction::GetElementPtr: {
- SmallVector<Constant*, 8> Ops;
- Ops.resize(getNumOperands()-1);
- for (unsigned i = 1, e = getNumOperands(); i != e; ++i)
- Ops[i-1] = getOperand(i);
- if (OpNo == 0)
- return
- ConstantExpr::getGetElementPtr(Op, Ops,
- cast<GEPOperator>(this)->isInBounds());
- Ops[OpNo-1] = Op;
- return
- ConstantExpr::getGetElementPtr(getOperand(0), Ops,
- cast<GEPOperator>(this)->isInBounds());
- }
- default:
- assert(getNumOperands() == 2 && "Must be binary operator?");
- Op0 = (OpNo == 0) ? Op : getOperand(0);
- Op1 = (OpNo == 1) ? Op : getOperand(1);
- return ConstantExpr::get(getOpcode(), Op0, Op1, SubclassOptionalData);
- }
+ return getWithOperands(NewOps);
}
/// getWithOperands - This returns the current constant expression with the
@@ -888,12 +1076,15 @@ getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const {
return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
case Instruction::ExtractElement:
return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
+ case Instruction::InsertValue:
+ return ConstantExpr::getInsertValue(Ops[0], Ops[1], getIndices());
+ case Instruction::ExtractValue:
+ return ConstantExpr::getExtractValue(Ops[0], getIndices());
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
- return
- ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1),
- cast<GEPOperator>(this)->isInBounds());
+ return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1),
+ cast<GEPOperator>(this)->isInBounds());
case Instruction::ICmp:
case Instruction::FCmp:
return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1]);
@@ -908,8 +1099,8 @@ getWithOperands(ArrayRef<Constant*> Ops, Type *Ty) const {
// isValueValidForType implementations
bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
- unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth(); // assert okay
- if (Ty == Type::getInt1Ty(Ty->getContext()))
+ unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay
+ if (Ty->isIntegerTy(1))
return Val == 0 || Val == 1;
if (NumBits >= 64)
return true; // always true, has to fit in largest type
@@ -918,8 +1109,8 @@ bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
}
bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) {
- unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth(); // assert okay
- if (Ty == Type::getInt1Ty(Ty->getContext()))
+ unsigned NumBits = Ty->getIntegerBitWidth();
+ if (Ty->isIntegerTy(1))
return Val == 0 || Val == 1 || Val == -1;
if (NumBits >= 64)
return true; // always true, has to fit in largest type
@@ -937,6 +1128,12 @@ bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
return false; // These can't be represented as floating point!
// FIXME rounding mode needs to be more flexible
+ case Type::HalfTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf)
+ return true;
+ Val2.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
case Type::FloatTyID: {
if (&Val2.getSemantics() == &APFloat::IEEEsingle)
return true;
@@ -944,42 +1141,50 @@ bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
return !losesInfo;
}
case Type::DoubleTyID: {
- if (&Val2.getSemantics() == &APFloat::IEEEsingle ||
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
&Val2.getSemantics() == &APFloat::IEEEdouble)
return true;
Val2.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &losesInfo);
return !losesInfo;
}
case Type::X86_FP80TyID:
- return &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
&Val2.getSemantics() == &APFloat::IEEEdouble ||
&Val2.getSemantics() == &APFloat::x87DoubleExtended;
case Type::FP128TyID:
- return &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
&Val2.getSemantics() == &APFloat::IEEEdouble ||
&Val2.getSemantics() == &APFloat::IEEEquad;
case Type::PPC_FP128TyID:
- return &Val2.getSemantics() == &APFloat::IEEEsingle ||
+ return &Val2.getSemantics() == &APFloat::IEEEhalf ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle ||
&Val2.getSemantics() == &APFloat::IEEEdouble ||
&Val2.getSemantics() == &APFloat::PPCDoubleDouble;
}
}
+
//===----------------------------------------------------------------------===//
// Factory Function Implementation
-ConstantAggregateZero* ConstantAggregateZero::get(Type* Ty) {
+ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) {
assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) &&
"Cannot create an aggregate zero of non-aggregate type!");
- LLVMContextImpl *pImpl = Ty->getContext().pImpl;
- return pImpl->AggZeroConstants.getOrCreate(Ty, 0);
+ ConstantAggregateZero *&Entry = Ty->getContext().pImpl->CAZConstants[Ty];
+ if (Entry == 0)
+ Entry = new ConstantAggregateZero(Ty);
+
+ return Entry;
}
-/// destroyConstant - Remove the constant from the constant table...
+/// destroyConstant - Remove the constant from the constant table.
///
void ConstantAggregateZero::destroyConstant() {
- getType()->getContext().pImpl->AggZeroConstants.remove(this);
+ getContext().pImpl->CAZConstants.erase(getType());
destroyConstantImpl();
}
@@ -990,69 +1195,6 @@ void ConstantArray::destroyConstant() {
destroyConstantImpl();
}
-/// isString - This method returns true if the array is an array of i8, and
-/// if the elements of the array are all ConstantInt's.
-bool ConstantArray::isString() const {
- // Check the element type for i8...
- if (!getType()->getElementType()->isIntegerTy(8))
- return false;
- // Check the elements to make sure they are all integers, not constant
- // expressions.
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!isa<ConstantInt>(getOperand(i)))
- return false;
- return true;
-}
-
-/// isCString - This method returns true if the array is a string (see
-/// isString) and it ends in a null byte \\0 and does not contains any other
-/// null bytes except its terminator.
-bool ConstantArray::isCString() const {
- // Check the element type for i8...
- if (!getType()->getElementType()->isIntegerTy(8))
- return false;
-
- // Last element must be a null.
- if (!getOperand(getNumOperands()-1)->isNullValue())
- return false;
- // Other elements must be non-null integers.
- for (unsigned i = 0, e = getNumOperands()-1; i != e; ++i) {
- if (!isa<ConstantInt>(getOperand(i)))
- return false;
- if (getOperand(i)->isNullValue())
- return false;
- }
- return true;
-}
-
-
-/// convertToString - Helper function for getAsString() and getAsCString().
-static std::string convertToString(const User *U, unsigned len) {
- std::string Result;
- Result.reserve(len);
- for (unsigned i = 0; i != len; ++i)
- Result.push_back((char)cast<ConstantInt>(U->getOperand(i))->getZExtValue());
- return Result;
-}
-
-/// getAsString - If this array is isString(), then this method converts the
-/// array to an std::string and returns it. Otherwise, it asserts out.
-///
-std::string ConstantArray::getAsString() const {
- assert(isString() && "Not a string!");
- return convertToString(this, getNumOperands());
-}
-
-
-/// getAsCString - If this array is isCString(), then this method converts the
-/// array (without the trailing null byte) to an std::string and returns it.
-/// Otherwise, it asserts out.
-///
-std::string ConstantArray::getAsCString() const {
- assert(isCString() && "Not a string!");
- return convertToString(this, getNumOperands() - 1);
-}
-
//---- ConstantStruct::get() implementation...
//
@@ -1071,26 +1213,6 @@ void ConstantVector::destroyConstant() {
destroyConstantImpl();
}
-/// This function will return true iff every element in this vector constant
-/// is set to all ones.
-/// @returns true iff this constant's elements are all set to all ones.
-/// @brief Determine if the value is all ones.
-bool ConstantVector::isAllOnesValue() const {
- // Check out first element.
- const Constant *Elt = getOperand(0);
- const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
- const ConstantFP *CF = dyn_cast<ConstantFP>(Elt);
-
- // Then make sure all remaining elements point to the same value.
- for (unsigned I = 1, E = getNumOperands(); I < E; ++I)
- if (getOperand(I) != Elt)
- return false;
-
- // First value is all-ones.
- return (CI && CI->isAllOnesValue()) ||
- (CF && CF->isAllOnesValue());
-}
-
/// getSplatValue - If this is a splat constant, where all of the
/// elements have the same value, return that value. Otherwise return null.
Constant *ConstantVector::getSplatValue() const {
@@ -1107,13 +1229,18 @@ Constant *ConstantVector::getSplatValue() const {
//
ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
- return Ty->getContext().pImpl->NullPtrConstants.getOrCreate(Ty, 0);
+ ConstantPointerNull *&Entry = Ty->getContext().pImpl->CPNConstants[Ty];
+ if (Entry == 0)
+ Entry = new ConstantPointerNull(Ty);
+
+ return Entry;
}
// destroyConstant - Remove the constant from the constant table...
//
void ConstantPointerNull::destroyConstant() {
- getType()->getContext().pImpl->NullPtrConstants.remove(this);
+ getContext().pImpl->CPNConstants.erase(getType());
+ // Free the constant and any dangling references to it.
destroyConstantImpl();
}
@@ -1122,13 +1249,18 @@ void ConstantPointerNull::destroyConstant() {
//
UndefValue *UndefValue::get(Type *Ty) {
- return Ty->getContext().pImpl->UndefValueConstants.getOrCreate(Ty, 0);
+ UndefValue *&Entry = Ty->getContext().pImpl->UVConstants[Ty];
+ if (Entry == 0)
+ Entry = new UndefValue(Ty);
+
+ return Entry;
}
// destroyConstant - Remove the constant from the constant table.
//
void UndefValue::destroyConstant() {
- getType()->getContext().pImpl->UndefValueConstants.remove(this);
+ // Free the constant and any dangling references to it.
+ getContext().pImpl->UVConstants.erase(getType());
destroyConstantImpl();
}
@@ -1236,7 +1368,6 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) {
switch (opc) {
default:
llvm_unreachable("Invalid cast opcode");
- break;
case Instruction::Trunc: return getTrunc(C, Ty);
case Instruction::ZExt: return getZExt(C, Ty);
case Instruction::SExt: return getSExt(C, Ty);
@@ -1250,7 +1381,6 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty) {
case Instruction::IntToPtr: return getIntToPtr(C, Ty);
case Instruction::BitCast: return getBitCast(C, Ty);
}
- return 0;
}
Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) {
@@ -1416,14 +1546,26 @@ Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty) {
}
Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy) {
- assert(C->getType()->isPointerTy() && "PtrToInt source must be pointer");
- assert(DstTy->isIntegerTy() && "PtrToInt destination must be integral");
+ assert(C->getType()->getScalarType()->isPointerTy() &&
+ "PtrToInt source must be pointer or pointer vector");
+ assert(DstTy->getScalarType()->isIntegerTy() &&
+ "PtrToInt destination must be integer or integer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
+ "Invalid cast between a different number of vector elements");
return getFoldedCast(Instruction::PtrToInt, C, DstTy);
}
Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy) {
- assert(C->getType()->isIntegerTy() && "IntToPtr source must be integral");
- assert(DstTy->isPointerTy() && "IntToPtr destination must be a pointer");
+ assert(C->getType()->getScalarType()->isIntegerTy() &&
+ "IntToPtr source must be integer or integer vector");
+ assert(DstTy->getScalarType()->isPointerTy() &&
+ "IntToPtr destination must be a pointer or pointer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&&
+ "Invalid cast between a different number of vector elements");
return getFoldedCast(Instruction::IntToPtr, C, DstTy);
}
@@ -1603,7 +1745,7 @@ Constant *ConstantExpr::getGetElementPtr(Constant *C, ArrayRef<Value *> Idxs,
// Get the result type of the getelementptr!
Type *Ty = GetElementPtrInst::getIndexedType(C->getType(), Idxs);
assert(Ty && "GEP indices invalid!");
- unsigned AS = cast<PointerType>(C->getType())->getAddressSpace();
+ unsigned AS = C->getType()->getPointerAddressSpace();
Type *ReqTy = Ty->getPointerTo(AS);
assert(C->getType()->isPointerTy() &&
@@ -1683,7 +1825,7 @@ Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx) {
const ExprMapKeyType Key(Instruction::ExtractElement,ArgVec);
LLVMContextImpl *pImpl = Val->getContext().pImpl;
- Type *ReqTy = cast<VectorType>(Val->getType())->getElementType();
+ Type *ReqTy = Val->getType()->getVectorElementType();
return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
}
@@ -1691,8 +1833,8 @@ Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
Constant *Idx) {
assert(Val->getType()->isVectorTy() &&
"Tried to create insertelement operation on non-vector type!");
- assert(Elt->getType() == cast<VectorType>(Val->getType())->getElementType()
- && "Insertelement types must match!");
+ assert(Elt->getType() == Val->getType()->getVectorElementType() &&
+ "Insertelement types must match!");
assert(Idx->getType()->isIntegerTy(32) &&
"Insertelement index must be i32 type!");
@@ -1716,8 +1858,8 @@ Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
if (Constant *FC = ConstantFoldShuffleVectorInstruction(V1, V2, Mask))
return FC; // Fold a few common cases.
- unsigned NElts = cast<VectorType>(Mask->getType())->getNumElements();
- Type *EltTy = cast<VectorType>(V1->getType())->getElementType();
+ unsigned NElts = Mask->getType()->getVectorNumElements();
+ Type *EltTy = V1->getType()->getVectorElementType();
Type *ShufTy = VectorType::get(EltTy, NElts);
// Look up the constant in the table first to ensure uniqueness
@@ -1879,7 +2021,7 @@ const char *ConstantExpr::getOpcodeName() const {
GetElementPtrConstantExpr::
-GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
+GetElementPtrConstantExpr(Constant *C, ArrayRef<Constant*> IdxList,
Type *DestTy)
: ConstantExpr(DestTy, Instruction::GetElementPtr,
OperandTraits<GetElementPtrConstantExpr>::op_end(this)
@@ -1889,6 +2031,341 @@ GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
OperandList[i+1] = IdxList[i];
}
+//===----------------------------------------------------------------------===//
+// ConstantData* implementations
+
+void ConstantDataArray::anchor() {}
+void ConstantDataVector::anchor() {}
+
+/// getElementType - Return the element type of the array/vector.
+Type *ConstantDataSequential::getElementType() const {
+ return getType()->getElementType();
+}
+
+StringRef ConstantDataSequential::getRawDataValues() const {
+ return StringRef(DataElements, getNumElements()*getElementByteSize());
+}
+
+/// isElementTypeCompatible - Return true if a ConstantDataSequential can be
+/// formed with a vector or array of the specified element type.
+/// ConstantDataArray only works with normal float and int types that are
+/// stored densely in memory, not with things like i42 or x86_f80.
+bool ConstantDataSequential::isElementTypeCompatible(const Type *Ty) {
+ if (Ty->isFloatTy() || Ty->isDoubleTy()) return true;
+ if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
+ switch (IT->getBitWidth()) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ return true;
+ default: break;
+ }
+ }
+ return false;
+}
+
+/// getNumElements - Return the number of elements in the array or vector.
+unsigned ConstantDataSequential::getNumElements() const {
+ if (ArrayType *AT = dyn_cast<ArrayType>(getType()))
+ return AT->getNumElements();
+ return getType()->getVectorNumElements();
+}
+
+
+/// getElementByteSize - Return the size in bytes of the elements in the data.
+uint64_t ConstantDataSequential::getElementByteSize() const {
+ return getElementType()->getPrimitiveSizeInBits()/8;
+}
+
+/// getElementPointer - Return the start of the specified element.
+const char *ConstantDataSequential::getElementPointer(unsigned Elt) const {
+ assert(Elt < getNumElements() && "Invalid Elt");
+ return DataElements+Elt*getElementByteSize();
+}
+
+
+/// isAllZeros - return true if the array is empty or all zeros.
+static bool isAllZeros(StringRef Arr) {
+ for (StringRef::iterator I = Arr.begin(), E = Arr.end(); I != E; ++I)
+ if (*I != 0)
+ return false;
+ return true;
+}
+
+/// getImpl - This is the underlying implementation of all of the
+/// ConstantDataSequential::get methods. They all thunk down to here, providing
+/// the correct element type. We take the bytes in as a StringRef because
+/// we *want* an underlying "char*" to avoid TBAA type punning violations.
+Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
+ assert(isElementTypeCompatible(Ty->getSequentialElementType()));
+ // If the elements are all zero or there are no elements, return a CAZ, which
+ // is more dense and canonical.
+ if (isAllZeros(Elements))
+ return ConstantAggregateZero::get(Ty);
+
+ // Do a lookup to see if we have already formed one of these.
+ StringMap<ConstantDataSequential*>::MapEntryTy &Slot =
+ Ty->getContext().pImpl->CDSConstants.GetOrCreateValue(Elements);
+
+ // The bucket can point to a linked list of different CDS's that have the same
+ // body but different types. For example, 0,0,0,1 could be a 4 element array
+ // of i8, or a 1-element array of i32. They'll both end up in the same
+ /// StringMap bucket, linked up by their Next pointers. Walk the list.
+ ConstantDataSequential **Entry = &Slot.getValue();
+ for (ConstantDataSequential *Node = *Entry; Node != 0;
+ Entry = &Node->Next, Node = *Entry)
+ if (Node->getType() == Ty)
+ return Node;
+
+ // Okay, we didn't get a hit. Create a node of the right class, link it in,
+ // and return it.
+ if (isa<ArrayType>(Ty))
+ return *Entry = new ConstantDataArray(Ty, Slot.getKeyData());
+
+ assert(isa<VectorType>(Ty));
+ return *Entry = new ConstantDataVector(Ty, Slot.getKeyData());
+}
+
+void ConstantDataSequential::destroyConstant() {
+ // Remove the constant from the StringMap.
+ StringMap<ConstantDataSequential*> &CDSConstants =
+ getType()->getContext().pImpl->CDSConstants;
+
+ StringMap<ConstantDataSequential*>::iterator Slot =
+ CDSConstants.find(getRawDataValues());
+
+ assert(Slot != CDSConstants.end() && "CDS not found in uniquing table");
+
+ ConstantDataSequential **Entry = &Slot->getValue();
+
+ // Remove the entry from the hash table.
+ if ((*Entry)->Next == 0) {
+ // If there is only one value in the bucket (common case) it must be this
+ // entry, and removing the entry should remove the bucket completely.
+ assert((*Entry) == this && "Hash mismatch in ConstantDataSequential");
+ getContext().pImpl->CDSConstants.erase(Slot);
+ } else {
+ // Otherwise, there are multiple entries linked off the bucket, unlink the
+ // node we care about but keep the bucket around.
+ for (ConstantDataSequential *Node = *Entry; ;
+ Entry = &Node->Next, Node = *Entry) {
+ assert(Node && "Didn't find entry in its uniquing hash table!");
+ // If we found our entry, unlink it from the list and we're done.
+ if (Node == this) {
+ *Entry = Node->Next;
+ break;
+ }
+ }
+ }
+
+ // If we were part of a list, make sure that we don't delete the list that is
+ // still owned by the uniquing map.
+ Next = 0;
+
+ // Finally, actually delete it.
+ destroyConstantImpl();
+}
+
+/// get() constructors - Return a constant with array type with an element
+/// count and element type matching the ArrayRef passed in. Note that this
+/// can return a ConstantAggregateZero object.
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
+ Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*1), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*2), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
+ Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
+ Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+}
+Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
+ Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+}
+
+/// getString - This method constructs a CDS and initializes it with a text
+/// string. The default behavior (AddNull==true) causes a null terminator to
+/// be placed at the end of the array (increasing the length of the string by
+/// one more than the StringRef would normally indicate. Pass AddNull=false
+/// to disable this behavior.
+Constant *ConstantDataArray::getString(LLVMContext &Context,
+ StringRef Str, bool AddNull) {
+ if (!AddNull)
+ return get(Context, ArrayRef<uint8_t>((uint8_t*)Str.data(), Str.size()));
+
+ SmallVector<uint8_t, 64> ElementVals;
+ ElementVals.append(Str.begin(), Str.end());
+ ElementVals.push_back(0);
+ return get(Context, ElementVals);
+}
+
+/// get() constructors - Return a constant with vector type with an element
+/// count and element type matching the ArrayRef passed in. Note that this
+/// can return a ConstantAggregateZero object.
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*1), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*2), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
+ Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
+ Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
+ Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
+ return getImpl(StringRef((char*)Elts.data(), Elts.size()*8), Ty);
+}
+
+Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
+ assert(isElementTypeCompatible(V->getType()) &&
+ "Element type not compatible with ConstantData");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (CI->getType()->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ assert(CI->getType()->isIntegerTy(64) && "Unsupported ConstantData type");
+ SmallVector<uint64_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
+ if (CFP->getType()->isFloatTy()) {
+ SmallVector<float, 16> Elts(NumElts, CFP->getValueAPF().convertToFloat());
+ return get(V->getContext(), Elts);
+ }
+ if (CFP->getType()->isDoubleTy()) {
+ SmallVector<double, 16> Elts(NumElts,
+ CFP->getValueAPF().convertToDouble());
+ return get(V->getContext(), Elts);
+ }
+ }
+ return ConstantVector::getSplat(NumElts, V);
+}
+
+
+/// getElementAsInteger - If this is a sequential container of integers (of
+/// any size), return the specified element in the low bits of a uint64_t.
+uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
+ assert(isa<IntegerType>(getElementType()) &&
+ "Accessor can only be used when element is an integer");
+ const char *EltPtr = getElementPointer(Elt);
+
+ // The data is stored in host byte order, make sure to cast back to the right
+ // type to load with the right endianness.
+ switch (getElementType()->getIntegerBitWidth()) {
+ default: llvm_unreachable("Invalid bitwidth for CDS");
+ case 8: return *(uint8_t*)EltPtr;
+ case 16: return *(uint16_t*)EltPtr;
+ case 32: return *(uint32_t*)EltPtr;
+ case 64: return *(uint64_t*)EltPtr;
+ }
+}
+
+/// getElementAsAPFloat - If this is a sequential container of floating point
+/// type, return the specified element as an APFloat.
+APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
+ const char *EltPtr = getElementPointer(Elt);
+
+ switch (getElementType()->getTypeID()) {
+ default:
+ llvm_unreachable("Accessor can only be used when element is float/double!");
+ case Type::FloatTyID: return APFloat(*(float*)EltPtr);
+ case Type::DoubleTyID: return APFloat(*(double*)EltPtr);
+ }
+}
+
+/// getElementAsFloat - If this is an sequential container of floats, return
+/// the specified element as a float.
+float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
+ assert(getElementType()->isFloatTy() &&
+ "Accessor can only be used when element is a 'float'");
+ return *(float*)getElementPointer(Elt);
+}
+
+/// getElementAsDouble - If this is an sequential container of doubles, return
+/// the specified element as a float.
+double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
+ assert(getElementType()->isDoubleTy() &&
+ "Accessor can only be used when element is a 'float'");
+ return *(double*)getElementPointer(Elt);
+}
+
+/// getElementAsConstant - Return a Constant for a specified index's element.
+/// Note that this has to compute a new constant to return, so it isn't as
+/// efficient as getElementAsInteger/Float/Double.
+Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
+ if (getElementType()->isFloatTy() || getElementType()->isDoubleTy())
+ return ConstantFP::get(getContext(), getElementAsAPFloat(Elt));
+
+ return ConstantInt::get(getElementType(), getElementAsInteger(Elt));
+}
+
+/// isString - This method returns true if this is an array of i8.
+bool ConstantDataSequential::isString() const {
+ return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(8);
+}
+
+/// isCString - This method returns true if the array "isString", ends with a
+/// nul byte, and does not contains any other nul bytes.
+bool ConstantDataSequential::isCString() const {
+ if (!isString())
+ return false;
+
+ StringRef Str = getAsString();
+
+ // The last value must be nul.
+ if (Str.back() != 0) return false;
+
+ // Other elements must be non-nul.
+ return Str.drop_back().find(0) == StringRef::npos;
+}
+
+/// getSplatValue - If this is a splat constant, meaning that all of the
+/// elements have the same value, return that value. Otherwise return NULL.
+Constant *ConstantDataVector::getSplatValue() const {
+ const char *Base = getRawDataValues().data();
+
+ // Compare elements 1+ to the 0'th element.
+ unsigned EltSize = getElementByteSize();
+ for (unsigned i = 1, e = getNumElements(); i != e; ++i)
+ if (memcmp(Base, Base+i*EltSize, EltSize))
+ return 0;
+
+ // If they're all the same, return the 0th one as a representative.
+ return getElementAsConstant(0);
+}
//===----------------------------------------------------------------------===//
// replaceUsesOfWithOnConstant implementations
@@ -1911,56 +2388,46 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
LLVMContextImpl *pImpl = getType()->getContext().pImpl;
- std::pair<LLVMContextImpl::ArrayConstantsTy::MapKey, ConstantArray*> Lookup;
- Lookup.first.first = cast<ArrayType>(getType());
- Lookup.second = this;
-
- std::vector<Constant*> &Values = Lookup.first.second;
+ SmallVector<Constant*, 8> Values;
+ LLVMContextImpl::ArrayConstantsTy::LookupKey Lookup;
+ Lookup.first = cast<ArrayType>(getType());
Values.reserve(getNumOperands()); // Build replacement array.
// Fill values with the modified operands of the constant array. Also,
// compute whether this turns into an all-zeros array.
- bool isAllZeros = false;
unsigned NumUpdated = 0;
- if (!ToC->isNullValue()) {
- for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
- Constant *Val = cast<Constant>(O->get());
- if (Val == From) {
- Val = ToC;
- ++NumUpdated;
- }
- Values.push_back(Val);
- }
- } else {
- isAllZeros = true;
- for (Use *O = OperandList, *E = OperandList+getNumOperands();O != E; ++O) {
- Constant *Val = cast<Constant>(O->get());
- if (Val == From) {
- Val = ToC;
- ++NumUpdated;
- }
- Values.push_back(Val);
- if (isAllZeros) isAllZeros = Val->isNullValue();
+
+ // Keep track of whether all the values in the array are "ToC".
+ bool AllSame = true;
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ if (Val == From) {
+ Val = ToC;
+ ++NumUpdated;
}
+ Values.push_back(Val);
+ AllSame &= Val == ToC;
}
Constant *Replacement = 0;
- if (isAllZeros) {
+ if (AllSame && ToC->isNullValue()) {
Replacement = ConstantAggregateZero::get(getType());
+ } else if (AllSame && isa<UndefValue>(ToC)) {
+ Replacement = UndefValue::get(getType());
} else {
// Check to see if we have this array type already.
- bool Exists;
+ Lookup.second = makeArrayRef(Values);
LLVMContextImpl::ArrayConstantsTy::MapTy::iterator I =
- pImpl->ArrayConstants.InsertOrGetItem(Lookup, Exists);
+ pImpl->ArrayConstants.find(Lookup);
- if (Exists) {
- Replacement = I->second;
+ if (I != pImpl->ArrayConstants.map_end()) {
+ Replacement = I->first;
} else {
// Okay, the new shape doesn't exist in the system yet. Instead of
// creating a new constant array, inserting it, replaceallusesof'ing the
// old with the new, then deleting the old... just update the current one
// in place!
- pImpl->ArrayConstants.MoveConstantToNewSlot(this, I);
+ pImpl->ArrayConstants.remove(this);
// Update to the new value. Optimize for the case when we have a single
// operand that we're changing, but handle bulk updates efficiently.
@@ -1974,6 +2441,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
if (getOperand(i) == From)
setOperand(i, ToC);
}
+ pImpl->ArrayConstants.insert(this);
return;
}
}
@@ -1996,26 +2464,32 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
unsigned OperandToUpdate = U-OperandList;
assert(getOperand(OperandToUpdate) == From && "ReplaceAllUsesWith broken!");
- std::pair<LLVMContextImpl::StructConstantsTy::MapKey, ConstantStruct*> Lookup;
- Lookup.first.first = cast<StructType>(getType());
- Lookup.second = this;
- std::vector<Constant*> &Values = Lookup.first.second;
+ SmallVector<Constant*, 8> Values;
+ LLVMContextImpl::StructConstantsTy::LookupKey Lookup;
+ Lookup.first = cast<StructType>(getType());
Values.reserve(getNumOperands()); // Build replacement struct.
-
// Fill values with the modified operands of the constant struct. Also,
// compute whether this turns into an all-zeros struct.
bool isAllZeros = false;
- if (!ToC->isNullValue()) {
- for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O)
- Values.push_back(cast<Constant>(O->get()));
- } else {
+ bool isAllUndef = false;
+ if (ToC->isNullValue()) {
isAllZeros = true;
for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
Constant *Val = cast<Constant>(O->get());
Values.push_back(Val);
if (isAllZeros) isAllZeros = Val->isNullValue();
}
+ } else if (isa<UndefValue>(ToC)) {
+ isAllUndef = true;
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ Values.push_back(Val);
+ if (isAllUndef) isAllUndef = isa<UndefValue>(Val);
+ }
+ } else {
+ for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O)
+ Values.push_back(cast<Constant>(O->get()));
}
Values[OperandToUpdate] = ToC;
@@ -2024,23 +2498,26 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
Constant *Replacement = 0;
if (isAllZeros) {
Replacement = ConstantAggregateZero::get(getType());
+ } else if (isAllUndef) {
+ Replacement = UndefValue::get(getType());
} else {
// Check to see if we have this struct type already.
- bool Exists;
+ Lookup.second = makeArrayRef(Values);
LLVMContextImpl::StructConstantsTy::MapTy::iterator I =
- pImpl->StructConstants.InsertOrGetItem(Lookup, Exists);
+ pImpl->StructConstants.find(Lookup);
- if (Exists) {
- Replacement = I->second;
+ if (I != pImpl->StructConstants.map_end()) {
+ Replacement = I->first;
} else {
// Okay, the new shape doesn't exist in the system yet. Instead of
// creating a new constant struct, inserting it, replaceallusesof'ing the
// old with the new, then deleting the old... just update the current one
// in place!
- pImpl->StructConstants.MoveConstantToNewSlot(this, I);
+ pImpl->StructConstants.remove(this);
// Update to the new value.
setOperand(OperandToUpdate, ToC);
+ pImpl->StructConstants.insert(this);
return;
}
}
@@ -2058,7 +2535,7 @@ void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
Use *U) {
assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
- std::vector<Constant*> Values;
+ SmallVector<Constant*, 8> Values;
Values.reserve(getNumOperands()); // Build replacement array...
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
Constant *Val = getOperand(i);
@@ -2081,89 +2558,13 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
assert(isa<Constant>(ToV) && "Cannot make Constant refer to non-constant!");
Constant *To = cast<Constant>(ToV);
- Constant *Replacement = 0;
- if (getOpcode() == Instruction::GetElementPtr) {
- SmallVector<Constant*, 8> Indices;
- Constant *Pointer = getOperand(0);
- Indices.reserve(getNumOperands()-1);
- if (Pointer == From) Pointer = To;
-
- for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
- Constant *Val = getOperand(i);
- if (Val == From) Val = To;
- Indices.push_back(Val);
- }
- Replacement = ConstantExpr::getGetElementPtr(Pointer, Indices,
- cast<GEPOperator>(this)->isInBounds());
- } else if (getOpcode() == Instruction::ExtractValue) {
- Constant *Agg = getOperand(0);
- if (Agg == From) Agg = To;
-
- ArrayRef<unsigned> Indices = getIndices();
- Replacement = ConstantExpr::getExtractValue(Agg, Indices);
- } else if (getOpcode() == Instruction::InsertValue) {
- Constant *Agg = getOperand(0);
- Constant *Val = getOperand(1);
- if (Agg == From) Agg = To;
- if (Val == From) Val = To;
-
- ArrayRef<unsigned> Indices = getIndices();
- Replacement = ConstantExpr::getInsertValue(Agg, Val, Indices);
- } else if (isCast()) {
- assert(getOperand(0) == From && "Cast only has one use!");
- Replacement = ConstantExpr::getCast(getOpcode(), To, getType());
- } else if (getOpcode() == Instruction::Select) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- Constant *C3 = getOperand(2);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- if (C3 == From) C3 = To;
- Replacement = ConstantExpr::getSelect(C1, C2, C3);
- } else if (getOpcode() == Instruction::ExtractElement) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- Replacement = ConstantExpr::getExtractElement(C1, C2);
- } else if (getOpcode() == Instruction::InsertElement) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- Constant *C3 = getOperand(1);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- if (C3 == From) C3 = To;
- Replacement = ConstantExpr::getInsertElement(C1, C2, C3);
- } else if (getOpcode() == Instruction::ShuffleVector) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- Constant *C3 = getOperand(2);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- if (C3 == From) C3 = To;
- Replacement = ConstantExpr::getShuffleVector(C1, C2, C3);
- } else if (isCompare()) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- if (getOpcode() == Instruction::ICmp)
- Replacement = ConstantExpr::getICmp(getPredicate(), C1, C2);
- else {
- assert(getOpcode() == Instruction::FCmp);
- Replacement = ConstantExpr::getFCmp(getPredicate(), C1, C2);
- }
- } else if (getNumOperands() == 2) {
- Constant *C1 = getOperand(0);
- Constant *C2 = getOperand(1);
- if (C1 == From) C1 = To;
- if (C2 == From) C2 = To;
- Replacement = ConstantExpr::get(getOpcode(), C1, C2, SubclassOptionalData);
- } else {
- llvm_unreachable("Unknown ConstantExpr type!");
- return;
+ SmallVector<Constant*, 8> NewOps;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Constant *Op = getOperand(i);
+ NewOps.push_back(Op == From ? To : Op);
}
+ Constant *Replacement = getWithOperands(NewOps);
assert(Replacement != this && "I didn't contain From!");
// Everyone using this now uses the replacement.
diff --git a/contrib/llvm/lib/VMCore/ConstantsContext.h b/contrib/llvm/lib/VMCore/ConstantsContext.h
index 1077004..8903a8f 100644
--- a/contrib/llvm/lib/VMCore/ConstantsContext.h
+++ b/contrib/llvm/lib/VMCore/ConstantsContext.h
@@ -15,6 +15,8 @@
#ifndef LLVM_CONSTANTSCONTEXT_H
#define LLVM_CONSTANTSCONTEXT_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/Operator.h"
@@ -30,6 +32,7 @@ struct ConstantTraits;
/// UnaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement unary constant exprs.
class UnaryConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly one operand
@@ -46,6 +49,7 @@ public:
/// BinaryConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement binary constant exprs.
class BinaryConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly two operands
@@ -66,6 +70,7 @@ public:
/// SelectConstantExpr - This class is private to Constants.cpp, and is used
/// behind the scenes to implement select constant exprs.
class SelectConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly three operands
@@ -86,6 +91,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractelement constant exprs.
class ExtractElementConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly two operands
@@ -106,6 +112,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertelement constant exprs.
class InsertElementConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly three operands
@@ -127,6 +134,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// shufflevector constant exprs.
class ShuffleVectorConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly three operands
@@ -151,6 +159,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// extractvalue constant exprs.
class ExtractValueConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly one operand
@@ -176,6 +185,7 @@ public:
/// Constants.cpp, and is used behind the scenes to implement
/// insertvalue constant exprs.
class InsertValueConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
public:
// allocate space for exactly one operand
@@ -202,11 +212,12 @@ public:
/// GetElementPtrConstantExpr - This class is private to Constants.cpp, and is
/// used behind the scenes to implement getelementpr constant exprs.
class GetElementPtrConstantExpr : public ConstantExpr {
- GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
+ virtual void anchor();
+ GetElementPtrConstantExpr(Constant *C, ArrayRef<Constant*> IdxList,
Type *DestTy);
public:
static GetElementPtrConstantExpr *Create(Constant *C,
- const std::vector<Constant*>&IdxList,
+ ArrayRef<Constant*> IdxList,
Type *DestTy,
unsigned Flags) {
GetElementPtrConstantExpr *Result =
@@ -221,8 +232,10 @@ public:
// CompareConstantExpr - This class is private to Constants.cpp, and is used
// behind the scenes to implement ICmp and FCmp constant expressions. This is
// needed in order to store the predicate value for these instructions.
-struct CompareConstantExpr : public ConstantExpr {
+class CompareConstantExpr : public ConstantExpr {
+ virtual void anchor();
void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+public:
// allocate space for exactly two operands
void *operator new(size_t s) {
return User::operator new(s, 2);
@@ -397,6 +410,13 @@ struct ConstantCreator {
}
};
+template<class ConstantClass, class TypeClass>
+struct ConstantArrayCreator {
+ static ConstantClass *create(TypeClass *Ty, ArrayRef<Constant*> V) {
+ return new(V.size()) ConstantClass(Ty, V);
+ }
+};
+
template<class ConstantClass>
struct ConstantKeyData {
typedef void ValType;
@@ -447,7 +467,6 @@ struct ConstantCreator<ConstantExpr, Type, ExprMapKeyType> {
return new CompareConstantExpr(Ty, Instruction::FCmp, V.subclassdata,
V.operands[0], V.operands[1]);
llvm_unreachable("Invalid ConstantExpr!");
- return 0;
}
};
@@ -467,90 +486,6 @@ struct ConstantKeyData<ConstantExpr> {
}
};
-// ConstantAggregateZero does not take extra "value" argument...
-template<class ValType>
-struct ConstantCreator<ConstantAggregateZero, Type, ValType> {
- static ConstantAggregateZero *create(Type *Ty, const ValType &V){
- return new ConstantAggregateZero(Ty);
- }
-};
-
-template<>
-struct ConstantKeyData<ConstantVector> {
- typedef std::vector<Constant*> ValType;
- static ValType getValType(ConstantVector *CP) {
- std::vector<Constant*> Elements;
- Elements.reserve(CP->getNumOperands());
- for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
- Elements.push_back(CP->getOperand(i));
- return Elements;
- }
-};
-
-template<>
-struct ConstantKeyData<ConstantAggregateZero> {
- typedef char ValType;
- static ValType getValType(ConstantAggregateZero *C) {
- return 0;
- }
-};
-
-template<>
-struct ConstantKeyData<ConstantArray> {
- typedef std::vector<Constant*> ValType;
- static ValType getValType(ConstantArray *CA) {
- std::vector<Constant*> Elements;
- Elements.reserve(CA->getNumOperands());
- for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- Elements.push_back(cast<Constant>(CA->getOperand(i)));
- return Elements;
- }
-};
-
-template<>
-struct ConstantKeyData<ConstantStruct> {
- typedef std::vector<Constant*> ValType;
- static ValType getValType(ConstantStruct *CS) {
- std::vector<Constant*> Elements;
- Elements.reserve(CS->getNumOperands());
- for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i)
- Elements.push_back(cast<Constant>(CS->getOperand(i)));
- return Elements;
- }
-};
-
-// ConstantPointerNull does not take extra "value" argument...
-template<class ValType>
-struct ConstantCreator<ConstantPointerNull, PointerType, ValType> {
- static ConstantPointerNull *create(PointerType *Ty, const ValType &V){
- return new ConstantPointerNull(Ty);
- }
-};
-
-template<>
-struct ConstantKeyData<ConstantPointerNull> {
- typedef char ValType;
- static ValType getValType(ConstantPointerNull *C) {
- return 0;
- }
-};
-
-// UndefValue does not take extra "value" argument...
-template<class ValType>
-struct ConstantCreator<UndefValue, Type, ValType> {
- static UndefValue *create(Type *Ty, const ValType &V) {
- return new UndefValue(Ty);
- }
-};
-
-template<>
-struct ConstantKeyData<UndefValue> {
- typedef char ValType;
- static ValType getValType(UndefValue *C) {
- return 0;
- }
-};
-
template<>
struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType> {
static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) {
@@ -704,6 +639,129 @@ public:
}
};
+// Unique map for aggregate constants
+template<class TypeClass, class ConstantClass>
+class ConstantAggrUniqueMap {
+public:
+ typedef ArrayRef<Constant*> Operands;
+ typedef std::pair<TypeClass*, Operands> LookupKey;
+private:
+ struct MapInfo {
+ typedef DenseMapInfo<ConstantClass*> ConstantClassInfo;
+ typedef DenseMapInfo<Constant*> ConstantInfo;
+ typedef DenseMapInfo<TypeClass*> TypeClassInfo;
+ static inline ConstantClass* getEmptyKey() {
+ return ConstantClassInfo::getEmptyKey();
+ }
+ static inline ConstantClass* getTombstoneKey() {
+ return ConstantClassInfo::getTombstoneKey();
+ }
+ static unsigned getHashValue(const ConstantClass *CP) {
+ SmallVector<Constant*, 8> CPOperands;
+ CPOperands.reserve(CP->getNumOperands());
+ for (unsigned I = 0, E = CP->getNumOperands(); I < E; ++I)
+ CPOperands.push_back(CP->getOperand(I));
+ return getHashValue(LookupKey(CP->getType(), CPOperands));
+ }
+ static bool isEqual(const ConstantClass *LHS, const ConstantClass *RHS) {
+ return LHS == RHS;
+ }
+ static unsigned getHashValue(const LookupKey &Val) {
+ return hash_combine(Val.first, hash_combine_range(Val.second.begin(),
+ Val.second.end()));
+ }
+ static bool isEqual(const LookupKey &LHS, const ConstantClass *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ if (LHS.first != RHS->getType()
+ || LHS.second.size() != RHS->getNumOperands())
+ return false;
+ for (unsigned I = 0, E = RHS->getNumOperands(); I < E; ++I) {
+ if (LHS.second[I] != RHS->getOperand(I))
+ return false;
+ }
+ return true;
+ }
+ };
+public:
+ typedef DenseMap<ConstantClass *, char, MapInfo> MapTy;
+
+private:
+ /// Map - This is the main map from the element descriptor to the Constants.
+ /// This is the primary way we avoid creating two of the same shape
+ /// constant.
+ MapTy Map;
+
+public:
+ typename MapTy::iterator map_begin() { return Map.begin(); }
+ typename MapTy::iterator map_end() { return Map.end(); }
+
+ void freeConstants() {
+ for (typename MapTy::iterator I=Map.begin(), E=Map.end();
+ I != E; ++I) {
+ // Asserts that use_empty().
+ delete I->first;
+ }
+ }
+
+private:
+ typename MapTy::iterator findExistingElement(ConstantClass *CP) {
+ return Map.find(CP);
+ }
+
+ ConstantClass *Create(TypeClass *Ty, Operands V, typename MapTy::iterator I) {
+ ConstantClass* Result =
+ ConstantArrayCreator<ConstantClass,TypeClass>::create(Ty, V);
+
+ assert(Result->getType() == Ty && "Type specified is not correct!");
+ Map[Result] = '\0';
+
+ return Result;
+ }
+public:
+
+ /// getOrCreate - Return the specified constant from the map, creating it if
+ /// necessary.
+ ConstantClass *getOrCreate(TypeClass *Ty, Operands V) {
+ LookupKey Lookup(Ty, V);
+ ConstantClass* Result = 0;
+
+ typename MapTy::iterator I = Map.find_as(Lookup);
+ // Is it in the map?
+ if (I != Map.end())
+ Result = I->first;
+
+ if (!Result) {
+ // If no preexisting value, create one now...
+ Result = Create(Ty, V, I);
+ }
+
+ return Result;
+ }
+
+ /// Find the constant by lookup key.
+ typename MapTy::iterator find(LookupKey Lookup) {
+ return Map.find_as(Lookup);
+ }
+
+ /// Insert the constant into its proper slot.
+ void insert(ConstantClass *CP) {
+ Map[CP] = '\0';
+ }
+
+ /// Remove this constant from the map
+ void remove(ConstantClass *CP) {
+ typename MapTy::iterator I = findExistingElement(CP);
+ assert(I != Map.end() && "Constant not found in constant table!");
+ assert(I->first == CP && "Didn't find correct element?");
+ Map.erase(I);
+ }
+
+ void dump() const {
+ DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n");
+ }
+};
+
}
#endif
diff --git a/contrib/llvm/lib/VMCore/Core.cpp b/contrib/llvm/lib/VMCore/Core.cpp
index a505e4b..a9cca22 100644
--- a/contrib/llvm/lib/VMCore/Core.cpp
+++ b/contrib/llvm/lib/VMCore/Core.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm-c/Core.h"
+#include "llvm/Attributes.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -132,10 +133,11 @@ LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) {
LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
switch (unwrap(Ty)->getTypeID()) {
- default:
- assert(false && "Unhandled TypeID.");
+ default: llvm_unreachable("Unhandled TypeID.");
case Type::VoidTyID:
return LLVMVoidTypeKind;
+ case Type::HalfTyID:
+ return LLVMHalfTypeKind;
case Type::FloatTyID:
return LLVMFloatTypeKind;
case Type::DoubleTyID:
@@ -222,6 +224,9 @@ unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy) {
/*--.. Operations on real types ............................................--*/
+LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getHalfTy(*unwrap(C));
+}
LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C) {
return (LLVMTypeRef) Type::getFloatTy(*unwrap(C));
}
@@ -241,6 +246,9 @@ LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) {
return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C));
}
+LLVMTypeRef LLVMHalfType(void) {
+ return LLVMHalfTypeInContext(LLVMGetGlobalContext());
+}
LLVMTypeRef LLVMFloatType(void) {
return LLVMFloatTypeInContext(LLVMGetGlobalContext());
}
@@ -558,6 +566,17 @@ void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRe
Dest[i] = wrap(N->getOperand(i));
}
+void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name,
+ LLVMValueRef Val)
+{
+ NamedMDNode *N = unwrap(M)->getOrInsertNamedMetadata(name);
+ if (!N)
+ return;
+ MDNode *Op = Val ? unwrap<MDNode>(Val) : NULL;
+ if (Op)
+ N->addOperand(Op);
+}
+
/*--.. Operations on scalar constants ......................................--*/
LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
@@ -614,8 +633,8 @@ LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
LLVMBool DontNullTerminate) {
/* Inverted the sense of AddNull because ', 0)' is a
better mnemonic for null termination than ', 1)'. */
- return wrap(ConstantArray::get(*unwrap(C), StringRef(Str, Length),
- DontNullTerminate == 0));
+ return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length),
+ DontNullTerminate == 0));
}
LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
LLVMValueRef *ConstantVals,
@@ -660,8 +679,7 @@ LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
static LLVMOpcode map_to_llvmopcode(int opcode)
{
switch (opcode) {
- default:
- assert(0 && "Unhandled Opcode.");
+ default: llvm_unreachable("Unhandled Opcode.");
#define HANDLE_INST(num, opc, clas) case num: return LLVM##opc;
#include "llvm/Instruction.def"
#undef HANDLE_INST
@@ -671,12 +689,11 @@ static LLVMOpcode map_to_llvmopcode(int opcode)
static int map_from_llvmopcode(LLVMOpcode code)
{
switch (code) {
- default:
- assert(0 && "Unhandled Opcode.");
#define HANDLE_INST(num, opc, clas) case LLVM##opc: return num;
#include "llvm/Instruction.def"
#undef HANDLE_INST
}
+ llvm_unreachable("Unhandled Opcode.");
}
/*--.. Constant expressions ................................................--*/
@@ -1040,8 +1057,6 @@ LLVMBool LLVMIsDeclaration(LLVMValueRef Global) {
LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
switch (unwrap<GlobalValue>(Global)->getLinkage()) {
- default:
- assert(false && "Unhandled Linkage Type.");
case GlobalValue::ExternalLinkage:
return LLVMExternalLinkage;
case GlobalValue::AvailableExternallyLinkage:
@@ -1076,16 +1091,13 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMCommonLinkage;
}
- // Should never get here.
- return static_cast<LLVMLinkage>(0);
+ llvm_unreachable("Invalid GlobalValue linkage!");
}
void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
GlobalValue *GV = unwrap<GlobalValue>(Global);
switch (Linkage) {
- default:
- assert(false && "Unhandled Linkage Type.");
case LLVMExternalLinkage:
GV->setLinkage(GlobalValue::ExternalLinkage);
break;
@@ -1337,14 +1349,14 @@ void LLVMSetGC(LLVMValueRef Fn, const char *GC) {
void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
Function *Func = unwrap<Function>(Fn);
const AttrListPtr PAL = Func->getAttributes();
- const AttrListPtr PALnew = PAL.addAttr(~0U, PA);
+ const AttrListPtr PALnew = PAL.addAttr(~0U, Attributes(PA));
Func->setAttributes(PALnew);
}
void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) {
Function *Func = unwrap<Function>(Fn);
const AttrListPtr PAL = Func->getAttributes();
- const AttrListPtr PALnew = PAL.removeAttr(~0U, PA);
+ const AttrListPtr PALnew = PAL.removeAttr(~0U, Attributes(PA));
Func->setAttributes(PALnew);
}
@@ -1352,7 +1364,7 @@ LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn) {
Function *Func = unwrap<Function>(Fn);
const AttrListPtr PAL = Func->getAttributes();
Attributes attr = PAL.getFnAttributes();
- return (LLVMAttribute)attr;
+ return (LLVMAttribute)attr.Raw();
}
/*--.. Operations on parameters ............................................--*/
@@ -1414,18 +1426,18 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
}
void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
- unwrap<Argument>(Arg)->addAttr(PA);
+ unwrap<Argument>(Arg)->addAttr(Attributes(PA));
}
void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) {
- unwrap<Argument>(Arg)->removeAttr(PA);
+ unwrap<Argument>(Arg)->removeAttr(Attributes(PA));
}
LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) {
Argument *A = unwrap<Argument>(Arg);
Attributes attr = A->getParent()->getAttributes().getParamAttributes(
A->getArgNo()+1);
- return (LLVMAttribute)attr;
+ return (LLVMAttribute)attr.Raw();
}
@@ -1603,10 +1615,9 @@ unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
Value *V = unwrap(Instr);
if (CallInst *CI = dyn_cast<CallInst>(V))
return CI->getCallingConv();
- else if (InvokeInst *II = dyn_cast<InvokeInst>(V))
+ if (InvokeInst *II = dyn_cast<InvokeInst>(V))
return II->getCallingConv();
llvm_unreachable("LLVMGetInstructionCallConv applies only to call and invoke!");
- return 0;
}
void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
@@ -1622,14 +1633,14 @@ void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index,
LLVMAttribute PA) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
Call.setAttributes(
- Call.getAttributes().addAttr(index, PA));
+ Call.getAttributes().addAttr(index, Attributes(PA)));
}
void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
LLVMAttribute PA) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
Call.setAttributes(
- Call.getAttributes().removeAttr(index, PA));
+ Call.getAttributes().removeAttr(index, Attributes(PA)));
}
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
@@ -2055,6 +2066,20 @@ LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
return wrap(unwrap(B)->CreateGlobalStringPtr(Str, Name));
}
+LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->isVolatile();
+ return cast<StoreInst>(P)->isVolatile();
+}
+
+void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->setVolatile(isVolatile);
+ return cast<StoreInst>(P)->setVolatile(isVolatile);
+}
+
/*--.. Casts ...............................................................--*/
LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef B, LLVMValueRef Val,
diff --git a/contrib/llvm/lib/VMCore/DebugInfoProbe.cpp b/contrib/llvm/lib/VMCore/DebugInfoProbe.cpp
deleted file mode 100644
index d1275ff..0000000
--- a/contrib/llvm/lib/VMCore/DebugInfoProbe.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-//===-- DebugInfoProbe.cpp - DebugInfo Probe ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements DebugInfoProbe. This probe can be used by a pass
-// manager to analyze how optimizer is treating debugging information.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "debuginfoprobe"
-#include "llvm/DebugInfoProbe.h"
-#include "llvm/Function.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Metadata.h"
-#include "llvm/PassManager.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/DebugLoc.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/StringRef.h"
-#include <set>
-#include <string>
-
-using namespace llvm;
-
-static cl::opt<bool>
-EnableDebugInfoProbe("enable-debug-info-probe", cl::Hidden,
- cl::desc("Enable debug info probe"));
-
-// CreateInfoOutputFile - Return a file stream to print our output on.
-namespace llvm { extern raw_ostream *CreateInfoOutputFile(); }
-
-//===----------------------------------------------------------------------===//
-// DebugInfoProbeImpl - This class implements a interface to monitor
-// how an optimization pass is preserving debugging information.
-
-namespace llvm {
-
- class DebugInfoProbeImpl {
- public:
- DebugInfoProbeImpl() : NumDbgLineLost(0),NumDbgValueLost(0) {}
- void initialize(StringRef PName, Function &F);
- void finalize(Function &F);
- void report();
- private:
- unsigned NumDbgLineLost, NumDbgValueLost;
- std::string PassName;
- Function *TheFn;
- std::set<MDNode *> DbgVariables;
- std::set<Instruction *> MissingDebugLoc;
- };
-}
-
-//===----------------------------------------------------------------------===//
-// DebugInfoProbeImpl
-
-/// initialize - Collect information before running an optimization pass.
-void DebugInfoProbeImpl::initialize(StringRef PName, Function &F) {
- if (!EnableDebugInfoProbe) return;
- PassName = PName;
-
- DbgVariables.clear();
- MissingDebugLoc.clear();
- TheFn = &F;
-
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI) {
- if (!isa<PHINode>(BI) && BI->getDebugLoc().isUnknown())
- MissingDebugLoc.insert(BI);
- if (!isa<DbgInfoIntrinsic>(BI)) continue;
- Value *Addr = NULL;
- MDNode *Node = NULL;
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) {
- Addr = DDI->getAddress();
- Node = DDI->getVariable();
- } else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
- Addr = DVI->getValue();
- Node = DVI->getVariable();
- }
- if (Addr)
- DbgVariables.insert(Node);
- }
-}
-
-/// report - Report findings. This should be invoked after finalize.
-void DebugInfoProbeImpl::report() {
- if (!EnableDebugInfoProbe) return;
- if (NumDbgLineLost || NumDbgValueLost) {
- raw_ostream *OutStream = CreateInfoOutputFile();
- if (NumDbgLineLost)
- *OutStream << NumDbgLineLost
- << "\t times line number info lost by "
- << PassName << "\n";
- if (NumDbgValueLost)
- *OutStream << NumDbgValueLost
- << "\t times variable info lost by "
- << PassName << "\n";
- delete OutStream;
- }
- NumDbgLineLost = 0;
- NumDbgValueLost = 0;
-}
-
-/// finalize - Collect information after running an optimization pass. This
-/// must be used after initialization.
-void DebugInfoProbeImpl::finalize(Function &F) {
- if (!EnableDebugInfoProbe) return;
- assert (TheFn == &F && "Invalid function to measure!");
-
- std::set<MDNode *>DbgVariables2;
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI) {
- if (!isa<PHINode>(BI) && BI->getDebugLoc().isUnknown() &&
- MissingDebugLoc.count(BI) == 0) {
- ++NumDbgLineLost;
- DEBUG(dbgs() << "DebugInfoProbe (" << PassName << "): --- ");
- DEBUG(BI->print(dbgs()));
- DEBUG(dbgs() << "\n");
- }
- if (!isa<DbgInfoIntrinsic>(BI)) continue;
- Value *Addr = NULL;
- MDNode *Node = NULL;
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) {
- Addr = DDI->getAddress();
- Node = DDI->getVariable();
- } else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
- Addr = DVI->getValue();
- Node = DVI->getVariable();
- }
- if (Addr)
- DbgVariables2.insert(Node);
- }
-
- for (std::set<MDNode *>::iterator I = DbgVariables.begin(),
- E = DbgVariables.end(); I != E; ++I) {
- if (DbgVariables2.count(*I) == 0 && (*I)->getNumOperands() >= 2) {
- DEBUG(dbgs()
- << "DebugInfoProbe("
- << PassName
- << "): Losing dbg info for variable: ";
- if (MDString *MDS = dyn_cast_or_null<MDString>(
- (*I)->getOperand(2)))
- dbgs() << MDS->getString();
- else
- dbgs() << "...";
- dbgs() << "\n");
- ++NumDbgValueLost;
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// DebugInfoProbe
-
-DebugInfoProbe::DebugInfoProbe() {
- pImpl = new DebugInfoProbeImpl();
-}
-
-DebugInfoProbe::~DebugInfoProbe() {
- delete pImpl;
-}
-
-/// initialize - Collect information before running an optimization pass.
-void DebugInfoProbe::initialize(StringRef PName, Function &F) {
- pImpl->initialize(PName, F);
-}
-
-/// finalize - Collect information after running an optimization pass. This
-/// must be used after initialization.
-void DebugInfoProbe::finalize(Function &F) {
- pImpl->finalize(F);
-}
-
-/// report - Report findings. This should be invoked after finalize.
-void DebugInfoProbe::report() {
- pImpl->report();
-}
-
-//===----------------------------------------------------------------------===//
-// DebugInfoProbeInfo
-
-/// ~DebugInfoProbeInfo - Report data collected by all probes before deleting
-/// them.
-DebugInfoProbeInfo::~DebugInfoProbeInfo() {
- if (!EnableDebugInfoProbe) return;
- for (StringMap<DebugInfoProbe*>::iterator I = Probes.begin(),
- E = Probes.end(); I != E; ++I) {
- I->second->report();
- delete I->second;
- }
- }
-
-/// initialize - Collect information before running an optimization pass.
-void DebugInfoProbeInfo::initialize(Pass *P, Function &F) {
- if (!EnableDebugInfoProbe) return;
- if (P->getAsPMDataManager())
- return;
-
- StringMapEntry<DebugInfoProbe *> &Entry =
- Probes.GetOrCreateValue(P->getPassName());
- DebugInfoProbe *&Probe = Entry.getValue();
- if (!Probe)
- Probe = new DebugInfoProbe();
- Probe->initialize(P->getPassName(), F);
-}
-
-/// finalize - Collect information after running an optimization pass. This
-/// must be used after initialization.
-void DebugInfoProbeInfo::finalize(Pass *P, Function &F) {
- if (!EnableDebugInfoProbe) return;
- if (P->getAsPMDataManager())
- return;
- StringMapEntry<DebugInfoProbe *> &Entry =
- Probes.GetOrCreateValue(P->getPassName());
- DebugInfoProbe *&Probe = Entry.getValue();
- assert (Probe && "DebugInfoProbe is not initialized!");
- Probe->finalize(F);
-}
diff --git a/contrib/llvm/lib/VMCore/DebugLoc.cpp b/contrib/llvm/lib/VMCore/DebugLoc.cpp
index 328244f..9013d28 100644
--- a/contrib/llvm/lib/VMCore/DebugLoc.cpp
+++ b/contrib/llvm/lib/VMCore/DebugLoc.cpp
@@ -173,10 +173,7 @@ DebugLoc DenseMapInfo<DebugLoc>::getTombstoneKey() {
}
unsigned DenseMapInfo<DebugLoc>::getHashValue(const DebugLoc &Key) {
- FoldingSetNodeID ID;
- ID.AddInteger(Key.LineCol);
- ID.AddInteger(Key.ScopeIdx);
- return ID.ComputeHash();
+ return static_cast<unsigned>(hash_combine(Key.LineCol, Key.ScopeIdx));
}
bool DenseMapInfo<DebugLoc>::isEqual(const DebugLoc &LHS, const DebugLoc &RHS) {
diff --git a/contrib/llvm/lib/VMCore/Dominators.cpp b/contrib/llvm/lib/VMCore/Dominators.cpp
index 08b845e..219e631 100644
--- a/contrib/llvm/lib/VMCore/Dominators.cpp
+++ b/contrib/llvm/lib/VMCore/Dominators.cpp
@@ -80,27 +80,187 @@ void DominatorTree::print(raw_ostream &OS, const Module *) const {
DT->print(OS);
}
-// dominates - Return true if A dominates a use in B. This performs the
-// special checks necessary if A and B are in the same basic block.
-bool DominatorTree::dominates(const Instruction *A, const Instruction *B) const{
- const BasicBlock *BBA = A->getParent(), *BBB = B->getParent();
-
- // If A is an invoke instruction, its value is only available in this normal
- // successor block.
- if (const InvokeInst *II = dyn_cast<InvokeInst>(A))
- BBA = II->getNormalDest();
-
- if (BBA != BBB) return dominates(BBA, BBB);
-
- // It is not possible to determine dominance between two PHI nodes
- // based on their ordering.
- if (isa<PHINode>(A) && isa<PHINode>(B))
+// dominates - Return true if Def dominates a use in User. This performs
+// the special checks necessary if Def and User are in the same basic block.
+// Note that Def doesn't dominate a use in Def itself!
+bool DominatorTree::dominates(const Instruction *Def,
+ const Instruction *User) const {
+ const BasicBlock *UseBB = User->getParent();
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // An instruction doesn't dominate a use in itself.
+ if (Def == User)
return false;
-
- // Loop through the basic block until we find A or B.
- BasicBlock::const_iterator I = BBA->begin();
- for (; &*I != A && &*I != B; ++I)
+
+ // The value defined by an invoke dominates an instruction only if
+ // it dominates every instruction in UseBB.
+ // A PHI is dominated only if the instruction dominates every possible use
+ // in the UseBB.
+ if (isa<InvokeInst>(Def) || isa<PHINode>(User))
+ return dominates(Def, UseBB);
+
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ // Loop through the basic block until we find Def or User.
+ BasicBlock::const_iterator I = DefBB->begin();
+ for (; &*I != Def && &*I != User; ++I)
/*empty*/;
-
- return &*I == A;
+
+ return &*I == Def;
+}
+
+// true if Def would dominate a use in any instruction in UseBB.
+// note that dominates(Def, Def->getParent()) is false.
+bool DominatorTree::dominates(const Instruction *Def,
+ const BasicBlock *UseBB) const {
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if DefBB == UseBB.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ if (DefBB == UseBB)
+ return false;
+
+ const InvokeInst *II = dyn_cast<InvokeInst>(Def);
+ if (!II)
+ return dominates(DefBB, UseBB);
+
+ // Invoke results are only usable in the normal destination, not in the
+ // exceptional destination.
+ BasicBlock *NormalDest = II->getNormalDest();
+ if (!dominates(NormalDest, UseBB))
+ return false;
+
+ // Simple case: if the normal destination has a single predecessor, the
+ // fact that it dominates the use block implies that we also do.
+ if (NormalDest->getSinglePredecessor())
+ return true;
+
+ // The normal edge from the invoke is critical. Conceptually, what we would
+ // like to do is split it and check if the new block dominates the use.
+ // With X being the new block, the graph would look like:
+ //
+ // DefBB
+ // /\ . .
+ // / \ . .
+ // / \ . .
+ // / \ | |
+ // A X B C
+ // | \ | /
+ // . \|/
+ // . NormalDest
+ // .
+ //
+ // Given the definition of dominance, NormalDest is dominated by X iff X
+ // dominates all of NormalDest's predecessors (X, B, C in the example). X
+ // trivially dominates itself, so we only have to find if it dominates the
+ // other predecessors. Since the only way out of X is via NormalDest, X can
+ // only properly dominate a node if NormalDest dominates that node too.
+ for (pred_iterator PI = pred_begin(NormalDest),
+ E = pred_end(NormalDest); PI != E; ++PI) {
+ const BasicBlock *BB = *PI;
+ if (BB == DefBB)
+ continue;
+
+ if (!DT->isReachableFromEntry(BB))
+ continue;
+
+ if (!dominates(NormalDest, BB))
+ return false;
+ }
+ return true;
+}
+
+bool DominatorTree::dominates(const Instruction *Def,
+ const Use &U) const {
+ Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
+
+ // Instructions do not dominate non-instructions.
+ if (!UserInst)
+ return false;
+
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Determine the block in which the use happens. PHI nodes use
+ // their operands on edges; simulate this by thinking of the use
+ // happening at the end of the predecessor block.
+ const BasicBlock *UseBB;
+ if (PHINode *PN = dyn_cast<PHINode>(UserInst))
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // Invoke instructions define their return values on the edges
+ // to their normal successors, so we have to handle them specially.
+ // Among other things, this means they don't dominate anything in
+ // their own block, except possibly a phi, so we don't need to
+ // walk the block in any case.
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) {
+ // A PHI in the normal successor using the invoke's return value is
+ // dominated by the invoke's return value.
+ if (isa<PHINode>(UserInst) &&
+ UserInst->getParent() == II->getNormalDest() &&
+ cast<PHINode>(UserInst)->getIncomingBlock(U) == DefBB)
+ return true;
+
+ // Otherwise use the instruction-dominates-block query, which
+ // handles the crazy case of an invoke with a critical edge
+ // properly.
+ return dominates(Def, UseBB);
+ }
+
+ // If the def and use are in different blocks, do a simple CFG dominator
+ // tree query.
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ // Ok, def and use are in the same block. If the def is an invoke, it
+ // doesn't dominate anything in the block. If it's a PHI, it dominates
+ // everything in the block.
+ if (isa<PHINode>(UserInst))
+ return true;
+
+ // Otherwise, just loop through the basic block until we find Def or User.
+ BasicBlock::const_iterator I = DefBB->begin();
+ for (; &*I != Def && &*I != UserInst; ++I)
+ /*empty*/;
+
+ return &*I != UserInst;
+}
+
+bool DominatorTree::isReachableFromEntry(const Use &U) const {
+ Instruction *I = dyn_cast<Instruction>(U.getUser());
+
+ // ConstantExprs aren't really reachable from the entry block, but they
+ // don't need to be treated like unreachable code either.
+ if (!I) return true;
+
+ // PHI nodes use their operands on their incoming edges.
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ return isReachableFromEntry(PN->getIncomingBlock(U));
+
+ // Everything else uses their operands in their own block.
+ return isReachableFromEntry(I->getParent());
}
diff --git a/contrib/llvm/lib/VMCore/Function.cpp b/contrib/llvm/lib/VMCore/Function.cpp
index 1215e6a..af6344e 100644
--- a/contrib/llvm/lib/VMCore/Function.cpp
+++ b/contrib/llvm/lib/VMCore/Function.cpp
@@ -39,6 +39,8 @@ template class llvm::SymbolTableListTraits<BasicBlock, Function>;
// Argument Implementation
//===----------------------------------------------------------------------===//
+void Argument::anchor() { }
+
Argument::Argument(Type *Ty, const Twine &Name, Function *Par)
: Value(Ty, Value::ArgumentVal) {
Parent = 0;
@@ -359,7 +361,7 @@ std::string Intrinsic::getName(ID id, ArrayRef<Type*> Tys) {
FunctionType *Intrinsic::getType(LLVMContext &Context,
ID id, ArrayRef<Type*> Tys) {
Type *ResultTy = NULL;
- std::vector<Type*> ArgTys;
+ SmallVector<Type*, 8> ArgTys;
bool IsVarArg = false;
#define GET_INTRINSIC_GENERATOR
@@ -370,13 +372,9 @@ FunctionType *Intrinsic::getType(LLVMContext &Context,
}
bool Intrinsic::isOverloaded(ID id) {
- static const bool OTable[] = {
- false,
#define GET_INTRINSIC_OVERLOAD_TABLE
#include "llvm/Intrinsics.gen"
#undef GET_INTRINSIC_OVERLOAD_TABLE
- };
- return OTable[id];
}
/// This defines the "Intrinsic::getAttributes(ID id)" method.
@@ -402,6 +400,7 @@ Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
bool Function::hasAddressTaken(const User* *PutOffender) const {
for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
const User *U = *I;
+ // FIXME: Check for blockaddress, which does not take the address.
if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
return PutOffender ? (*PutOffender = U, true) : true;
ImmutableCallSite CS(cast<Instruction>(U));
@@ -411,41 +410,30 @@ bool Function::hasAddressTaken(const User* *PutOffender) const {
return false;
}
+bool Function::isDefTriviallyDead() const {
+ // Check the linkage
+ if (!hasLinkOnceLinkage() && !hasLocalLinkage() &&
+ !hasAvailableExternallyLinkage())
+ return false;
+
+ // Check if the function is used by anything other than a blockaddress.
+ for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I)
+ if (!isa<BlockAddress>(*I))
+ return false;
+
+ return true;
+}
+
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
-///
-/// FIXME: Remove after <rdar://problem/8031714> is fixed.
-/// FIXME: Is the above FIXME valid?
bool Function::callsFunctionThatReturnsTwice() const {
- static const char *const ReturnsTwiceFns[] = {
- "_setjmp",
- "setjmp",
- "sigsetjmp",
- "setjmp_syscall",
- "savectx",
- "qsetjmp",
- "vfork",
- "getcontext"
- };
-
- for (const_inst_iterator I = inst_begin(this), E = inst_end(this); I != E;
- ++I) {
+ for (const_inst_iterator
+ I = inst_begin(this), E = inst_end(this); I != E; ++I) {
const CallInst* callInst = dyn_cast<CallInst>(&*I);
if (!callInst)
continue;
if (callInst->canReturnTwice())
return true;
-
- // check for known function names.
- // FIXME: move this to clang.
- Function *F = callInst->getCalledFunction();
- if (!F)
- continue;
- StringRef Name = F->getName();
- for (unsigned J = 0, e = array_lengthof(ReturnsTwiceFns); J != e; ++J) {
- if (Name == ReturnsTwiceFns[J])
- return true;
- }
}
return false;
diff --git a/contrib/llvm/lib/VMCore/GCOV.cpp b/contrib/llvm/lib/VMCore/GCOV.cpp
index fc7f96f..595c452 100644
--- a/contrib/llvm/lib/VMCore/GCOV.cpp
+++ b/contrib/llvm/lib/VMCore/GCOV.cpp
@@ -107,7 +107,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) {
for (unsigned i = 0, e = Count; i != e; ++i) {
Blocks[i]->addCount(Buff.readInt64());
}
- return true;;
+ return true;
}
LineNumber = Buff.readInt();
diff --git a/contrib/llvm/lib/VMCore/IRBuilder.cpp b/contrib/llvm/lib/VMCore/IRBuilder.cpp
index 5114e2d..b459234 100644
--- a/contrib/llvm/lib/VMCore/IRBuilder.cpp
+++ b/contrib/llvm/lib/VMCore/IRBuilder.cpp
@@ -24,10 +24,10 @@ using namespace llvm;
/// specified. If Name is specified, it is the name of the global variable
/// created.
Value *IRBuilderBase::CreateGlobalString(StringRef Str, const Twine &Name) {
- Constant *StrConstant = ConstantArray::get(Context, Str, true);
+ Constant *StrConstant = ConstantDataArray::getString(Context, Str);
Module &M = *BB->getParent()->getParent();
GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(),
- true, GlobalValue::InternalLinkage,
+ true, GlobalValue::PrivateLinkage,
StrConstant, "", 0, false);
GV->setName(Name);
GV->setUnnamedAddr(true);
diff --git a/contrib/llvm/lib/VMCore/Instruction.cpp b/contrib/llvm/lib/VMCore/Instruction.cpp
index 73191c1..5449714 100644
--- a/contrib/llvm/lib/VMCore/Instruction.cpp
+++ b/contrib/llvm/lib/VMCore/Instruction.cpp
@@ -102,7 +102,6 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case IndirectBr: return "indirectbr";
case Invoke: return "invoke";
case Resume: return "resume";
- case Unwind: return "unwind";
case Unreachable: return "unreachable";
// Standard binary operators...
@@ -166,8 +165,6 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
default: return "<Invalid operator> ";
}
-
- return 0;
}
/// isIdenticalTo - Return true if the specified instruction is exactly
@@ -391,59 +388,6 @@ bool Instruction::isCommutative(unsigned op) {
}
}
-bool Instruction::isSafeToSpeculativelyExecute() const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (Constant *C = dyn_cast<Constant>(getOperand(i)))
- if (C->canTrap())
- return false;
-
- switch (getOpcode()) {
- default:
- return true;
- case UDiv:
- case URem: {
- // x / y is undefined if y == 0, but calcuations like x / 3 are safe.
- ConstantInt *Op = dyn_cast<ConstantInt>(getOperand(1));
- return Op && !Op->isNullValue();
- }
- case SDiv:
- case SRem: {
- // x / y is undefined if y == 0, and might be undefined if y == -1,
- // but calcuations like x / 3 are safe.
- ConstantInt *Op = dyn_cast<ConstantInt>(getOperand(1));
- return Op && !Op->isNullValue() && !Op->isAllOnesValue();
- }
- case Load: {
- const LoadInst *LI = cast<LoadInst>(this);
- if (!LI->isUnordered())
- return false;
- return LI->getPointerOperand()->isDereferenceablePointer();
- }
- case Call:
- return false; // The called function could have undefined behavior or
- // side-effects.
- // FIXME: We should special-case some intrinsics (bswap,
- // overflow-checking arithmetic, etc.)
- case VAArg:
- case Alloca:
- case Invoke:
- case PHI:
- case Store:
- case Ret:
- case Br:
- case IndirectBr:
- case Switch:
- case Unwind:
- case Unreachable:
- case Fence:
- case LandingPad:
- case AtomicRMW:
- case AtomicCmpXchg:
- case Resume:
- return false; // Misc instructions which have effects
- }
-}
-
Instruction *Instruction::clone() const {
Instruction *New = clone_impl();
New->SubclassOptionalData = SubclassOptionalData;
diff --git a/contrib/llvm/lib/VMCore/Instructions.cpp b/contrib/llvm/lib/VMCore/Instructions.cpp
index b3a7205..8db6ac9 100644
--- a/contrib/llvm/lib/VMCore/Instructions.cpp
+++ b/contrib/llvm/lib/VMCore/Instructions.cpp
@@ -625,40 +625,12 @@ void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const {
llvm_unreachable("ReturnInst has no successors!");
- return 0;
}
ReturnInst::~ReturnInst() {
}
//===----------------------------------------------------------------------===//
-// UnwindInst Implementation
-//===----------------------------------------------------------------------===//
-
-UnwindInst::UnwindInst(LLVMContext &Context, Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(Context), Instruction::Unwind,
- 0, 0, InsertBefore) {
-}
-UnwindInst::UnwindInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(Context), Instruction::Unwind,
- 0, 0, InsertAtEnd) {
-}
-
-
-unsigned UnwindInst::getNumSuccessorsV() const {
- return getNumSuccessors();
-}
-
-void UnwindInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
- llvm_unreachable("UnwindInst has no successors!");
-}
-
-BasicBlock *UnwindInst::getSuccessorV(unsigned idx) const {
- llvm_unreachable("UnwindInst has no successors!");
- return 0;
-}
-
-//===----------------------------------------------------------------------===//
// ResumeInst Implementation
//===----------------------------------------------------------------------===//
@@ -690,7 +662,6 @@ void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
llvm_unreachable("ResumeInst has no successors!");
- return 0;
}
//===----------------------------------------------------------------------===//
@@ -712,12 +683,11 @@ unsigned UnreachableInst::getNumSuccessorsV() const {
}
void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
- llvm_unreachable("UnwindInst has no successors!");
+ llvm_unreachable("UnreachableInst has no successors!");
}
BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
- llvm_unreachable("UnwindInst has no successors!");
- return 0;
+ llvm_unreachable("UnreachableInst has no successors!");
}
//===----------------------------------------------------------------------===//
@@ -1359,6 +1329,15 @@ GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
///
template <typename IndexTy>
static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
+ if (Ptr->isVectorTy()) {
+ assert(IdxList.size() == 1 &&
+ "GEP with vector pointers must have a single index");
+ PointerType *PTy = dyn_cast<PointerType>(
+ cast<VectorType>(Ptr)->getElementType());
+ assert(PTy && "Gep with invalid vector pointer found");
+ return PTy->getElementType();
+ }
+
PointerType *PTy = dyn_cast<PointerType>(Ptr);
if (!PTy) return 0; // Type isn't a pointer type!
Type *Agg = PTy->getElementType();
@@ -1366,7 +1345,7 @@ static Type *getIndexedTypeInternal(Type *Ptr, ArrayRef<IndexTy> IdxList) {
// Handle the special case of the empty set index set, which is always valid.
if (IdxList.empty())
return Agg;
-
+
// If there is at least one index, the top level type must be sized, otherwise
// it cannot be 'stepped over'.
if (!Agg->isSized())
@@ -1396,6 +1375,18 @@ Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList) {
return getIndexedTypeInternal(Ptr, IdxList);
}
+unsigned GetElementPtrInst::getAddressSpace(Value *Ptr) {
+ Type *Ty = Ptr->getType();
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ Ty = VTy->getElementType();
+
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ return PTy->getAddressSpace();
+
+ llvm_unreachable("Invalid GEP pointer type");
+}
+
/// hasAllZeroIndices - Return true if all of the indices of this GEP are
/// zeros. If so, the result pointer and the first operand have the same
/// value, just potentially different types.
@@ -1558,46 +1549,84 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
const Value *Mask) {
+ // V1 and V2 must be vectors of the same type.
if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
return false;
+ // Mask must be vector of i32.
VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
return false;
// Check to see if Mask is valid.
+ if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
+ return true;
+
if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
- VectorType *VTy = cast<VectorType>(V1->getType());
+ unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
- if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
- if (CI->uge(VTy->getNumElements()*2))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
+ if (CI->uge(V1Size*2))
return false;
} else if (!isa<UndefValue>(MV->getOperand(i))) {
return false;
}
}
+ return true;
}
- else if (!isa<UndefValue>(Mask) && !isa<ConstantAggregateZero>(Mask))
- return false;
- return true;
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Mask)) {
+ unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
+ for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
+ if (CDS->getElementAsInteger(i) >= V1Size*2)
+ return false;
+ return true;
+ }
+
+ // The bitcode reader can create a place holder for a forward reference
+ // used as the shuffle mask. When this occurs, the shuffle mask will
+ // fall into this case and fail. To avoid this error, do this bit of
+ // ugliness to allow such a mask pass.
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Mask))
+ if (CE->getOpcode() == Instruction::UserOp1)
+ return true;
+
+ return false;
}
/// getMaskValue - Return the index from the shuffle mask for the specified
/// output result. This is either -1 if the element is undef or a number less
/// than 2*numelements.
-int ShuffleVectorInst::getMaskValue(unsigned i) const {
- const Constant *Mask = cast<Constant>(getOperand(2));
- if (isa<UndefValue>(Mask)) return -1;
- if (isa<ConstantAggregateZero>(Mask)) return 0;
- const ConstantVector *MaskCV = cast<ConstantVector>(Mask);
- assert(i < MaskCV->getNumOperands() && "Index out of range");
-
- if (isa<UndefValue>(MaskCV->getOperand(i)))
+int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
+ assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
+ if (ConstantDataSequential *CDS =dyn_cast<ConstantDataSequential>(Mask))
+ return CDS->getElementAsInteger(i);
+ Constant *C = Mask->getAggregateElement(i);
+ if (isa<UndefValue>(C))
return -1;
- return cast<ConstantInt>(MaskCV->getOperand(i))->getZExtValue();
+ return cast<ConstantInt>(C)->getZExtValue();
+}
+
+/// getShuffleMask - Return the full mask for this instruction, where each
+/// element is the element number and undef's are returned as -1.
+void ShuffleVectorInst::getShuffleMask(Constant *Mask,
+ SmallVectorImpl<int> &Result) {
+ unsigned NumElts = Mask->getType()->getVectorNumElements();
+
+ if (ConstantDataSequential *CDS=dyn_cast<ConstantDataSequential>(Mask)) {
+ for (unsigned i = 0; i != NumElts; ++i)
+ Result.push_back(CDS->getElementAsInteger(i));
+ return;
+ }
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Mask->getAggregateElement(i);
+ Result.push_back(isa<UndefValue>(C) ? -1 :
+ cast<ConstantInt>(C)->getZExtValue());
+ }
}
+
//===----------------------------------------------------------------------===//
// InsertValueInst Class
//===----------------------------------------------------------------------===//
@@ -1848,46 +1877,27 @@ BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
Instruction *InsertBefore) {
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
- return new BinaryOperator(Instruction::FSub,
- zero, Op,
+ return new BinaryOperator(Instruction::FSub, zero, Op,
Op->getType(), Name, InsertBefore);
}
BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd) {
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
- return new BinaryOperator(Instruction::FSub,
- zero, Op,
+ return new BinaryOperator(Instruction::FSub, zero, Op,
Op->getType(), Name, InsertAtEnd);
}
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Instruction *InsertBefore) {
- Constant *C;
- if (VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
- C = Constant::getAllOnesValue(PTy->getElementType());
- C = ConstantVector::get(
- std::vector<Constant*>(PTy->getNumElements(), C));
- } else {
- C = Constant::getAllOnesValue(Op->getType());
- }
-
+ Constant *C = Constant::getAllOnesValue(Op->getType());
return new BinaryOperator(Instruction::Xor, Op, C,
Op->getType(), Name, InsertBefore);
}
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd) {
- Constant *AllOnes;
- if (VectorType *PTy = dyn_cast<VectorType>(Op->getType())) {
- // Create a vector of all ones values.
- Constant *Elt = Constant::getAllOnesValue(PTy->getElementType());
- AllOnes = ConstantVector::get(
- std::vector<Constant*>(PTy->getNumElements(), Elt));
- } else {
- AllOnes = Constant::getAllOnesValue(Op->getType());
- }
-
+ Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
return new BinaryOperator(Instruction::Xor, Op, AllOnes,
Op->getType(), Name, InsertAtEnd);
}
@@ -1895,10 +1905,8 @@ BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
// isConstantAllOnes - Helper function for several functions below
static inline bool isConstantAllOnes(const Value *V) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
- return CI->isAllOnesValue();
- if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
- return CV->isAllOnesValue();
+ if (const Constant *C = dyn_cast<Constant>(V))
+ return C->isAllOnesValue();
return false;
}
@@ -1998,6 +2006,8 @@ bool BinaryOperator::isExact() const {
// CastInst Class
//===----------------------------------------------------------------------===//
+void CastInst::anchor() {}
+
// Just determine if this cast only deals with integral->integral conversion.
bool CastInst::isIntegerCast() const {
switch (getOpcode()) {
@@ -2042,8 +2052,7 @@ bool CastInst::isNoopCast(Instruction::CastOps Opcode,
Type *DestTy,
Type *IntPtrTy) {
switch (Opcode) {
- default:
- assert(0 && "Invalid CastOp");
+ default: llvm_unreachable("Invalid CastOp");
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
@@ -2236,13 +2245,10 @@ unsigned CastInst::isEliminableCastPair(
case 99:
// cast combination can't happen (error in input). This is for all cases
// where the MidTy is not the same for the two cast instructions.
- assert(0 && "Invalid Cast Combination");
- return 0;
+ llvm_unreachable("Invalid Cast Combination");
default:
- assert(0 && "Error in CastResults table!!!");
- return 0;
+ llvm_unreachable("Error in CastResults table!!!");
}
- return 0;
}
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
@@ -2262,10 +2268,8 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
- default:
- assert(0 && "Invalid opcode provided");
+ default: llvm_unreachable("Invalid opcode provided");
}
- return 0;
}
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
@@ -2285,10 +2289,8 @@ CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
- default:
- assert(0 && "Invalid opcode provided");
+ default: llvm_unreachable("Invalid opcode provided");
}
- return 0;
}
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
@@ -2557,9 +2559,8 @@ CastInst::getCastOpcode(
assert(DestBits == SrcBits &&
"Casting vector to floating point of different width");
return BitCast; // same size, no-op cast
- } else {
- llvm_unreachable("Casting pointer or non-first class to float");
}
+ llvm_unreachable("Casting pointer or non-first class to float");
} else if (DestTy->isVectorTy()) {
assert(DestBits == SrcBits &&
"Illegal cast to vector (wrong type or size)");
@@ -2569,24 +2570,16 @@ CastInst::getCastOpcode(
return BitCast; // ptr -> ptr
} else if (SrcTy->isIntegerTy()) {
return IntToPtr; // int -> ptr
- } else {
- assert(0 && "Casting pointer to other than pointer or int");
}
+ llvm_unreachable("Casting pointer to other than pointer or int");
} else if (DestTy->isX86_MMXTy()) {
if (SrcTy->isVectorTy()) {
assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
return BitCast; // 64-bit vector to MMX
- } else {
- assert(0 && "Illegal cast to X86_MMX");
}
- } else {
- assert(0 && "Casting to type that is not first-class");
+ llvm_unreachable("Illegal cast to X86_MMX");
}
-
- // If we fall through to here we probably hit an assertion cast above
- // and assertions are not turned on. Anything we return is an error, so
- // BitCast is as good a choice as any.
- return BitCast;
+ llvm_unreachable("Casting to type that is not first-class");
}
//===----------------------------------------------------------------------===//
@@ -2645,9 +2638,21 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcLength == DstLength;
case Instruction::PtrToInt:
- return SrcTy->isPointerTy() && DstTy->isIntegerTy();
+ if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
+ return false;
+ if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
+ if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
+ return false;
+ return SrcTy->getScalarType()->isPointerTy() &&
+ DstTy->getScalarType()->isIntegerTy();
case Instruction::IntToPtr:
- return SrcTy->isIntegerTy() && DstTy->isPointerTy();
+ if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
+ return false;
+ if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
+ if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
+ return false;
+ return SrcTy->getScalarType()->isIntegerTy() &&
+ DstTy->getScalarType()->isPointerTy();
case Instruction::BitCast:
// BitCast implies a no-op cast of type only. No bits change.
// However, you can't cast pointers to anything but pointers.
@@ -2890,7 +2895,7 @@ bool CmpInst::isEquality() const {
CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
switch (pred) {
- default: assert(0 && "Unknown cmp predicate!");
+ default: llvm_unreachable("Unknown cmp predicate!");
case ICMP_EQ: return ICMP_NE;
case ICMP_NE: return ICMP_EQ;
case ICMP_UGT: return ICMP_ULE;
@@ -2923,7 +2928,7 @@ CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
switch (pred) {
- default: assert(0 && "Unknown icmp predicate!");
+ default: llvm_unreachable("Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
return pred;
@@ -2936,7 +2941,7 @@ ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
switch (pred) {
- default: assert(0 && "Unknown icmp predicate!");
+ default: llvm_unreachable("Unknown icmp predicate!");
case ICMP_EQ: case ICMP_NE:
case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
return pred;
@@ -3012,7 +3017,7 @@ ICmpInst::makeConstantRange(Predicate pred, const APInt &C) {
CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
switch (pred) {
- default: assert(0 && "Unknown cmp predicate!");
+ default: llvm_unreachable("Unknown cmp predicate!");
case ICMP_EQ: case ICMP_NE:
return pred;
case ICMP_SGT: return ICMP_SLT;
@@ -3147,31 +3152,32 @@ SwitchInst::~SwitchInst() {
/// addCase - Add an entry to the switch instruction...
///
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
+ unsigned NewCaseIdx = getNumCases();
unsigned OpNo = NumOperands;
if (OpNo+2 > ReservedSpace)
growOperands(); // Get more space!
// Initialize some new operands.
assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
NumOperands = OpNo+2;
- OperandList[OpNo] = OnVal;
- OperandList[OpNo+1] = Dest;
+ CaseIt Case(this, NewCaseIdx);
+ Case.setValue(OnVal);
+ Case.setSuccessor(Dest);
}
-/// removeCase - This method removes the specified successor from the switch
-/// instruction. Note that this cannot be used to remove the default
-/// destination (successor #0).
-///
-void SwitchInst::removeCase(unsigned idx) {
- assert(idx != 0 && "Cannot remove the default case!");
- assert(idx*2 < getNumOperands() && "Successor index out of range!!!");
+/// removeCase - This method removes the specified case and its successor
+/// from the switch instruction.
+void SwitchInst::removeCase(CaseIt i) {
+ unsigned idx = i.getCaseIndex();
+
+ assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
unsigned NumOps = getNumOperands();
Use *OL = OperandList;
// Overwrite this case with the end of the list.
- if ((idx + 1) * 2 != NumOps) {
- OL[idx * 2] = OL[NumOps - 2];
- OL[idx * 2 + 1] = OL[NumOps - 1];
+ if (2 + (idx + 1) * 2 != NumOps) {
+ OL[2 + idx * 2] = OL[NumOps - 2];
+ OL[2 + idx * 2 + 1] = OL[NumOps - 1];
}
// Nuke the last value.
@@ -3438,15 +3444,11 @@ ExtractElementInst *ExtractElementInst::clone_impl() const {
}
InsertElementInst *InsertElementInst::clone_impl() const {
- return InsertElementInst::Create(getOperand(0),
- getOperand(1),
- getOperand(2));
+ return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
}
ShuffleVectorInst *ShuffleVectorInst::clone_impl() const {
- return new ShuffleVectorInst(getOperand(0),
- getOperand(1),
- getOperand(2));
+ return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
}
PHINode *PHINode::clone_impl() const {
@@ -3482,11 +3484,6 @@ ResumeInst *ResumeInst::clone_impl() const {
return new(1) ResumeInst(*this);
}
-UnwindInst *UnwindInst::clone_impl() const {
- LLVMContext &Context = getContext();
- return new UnwindInst(Context);
-}
-
UnreachableInst *UnreachableInst::clone_impl() const {
LLVMContext &Context = getContext();
return new UnreachableInst(Context);
diff --git a/contrib/llvm/lib/VMCore/LLVMContext.cpp b/contrib/llvm/lib/VMCore/LLVMContext.cpp
index ebd1e0a..68c5621 100644
--- a/contrib/llvm/lib/VMCore/LLVMContext.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContext.cpp
@@ -43,6 +43,16 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
// Create the 'prof' metadata kind.
unsigned ProfID = getMDKindID("prof");
assert(ProfID == MD_prof && "prof kind id drifted"); (void)ProfID;
+
+ // Create the 'fpaccuracy' metadata kind.
+ unsigned FPAccuracyID = getMDKindID("fpaccuracy");
+ assert(FPAccuracyID == MD_fpaccuracy && "fpaccuracy kind id drifted");
+ (void)FPAccuracyID;
+
+ // Create the 'range' metadata kind.
+ unsigned RangeID = getMDKindID("range");
+ assert(RangeID == MD_range && "range kind id drifted");
+ (void)RangeID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
@@ -78,11 +88,11 @@ void *LLVMContext::getInlineAsmDiagnosticContext() const {
return pImpl->InlineAsmDiagContext;
}
-void LLVMContext::emitError(StringRef ErrorStr) {
+void LLVMContext::emitError(const Twine &ErrorStr) {
emitError(0U, ErrorStr);
}
-void LLVMContext::emitError(const Instruction *I, StringRef ErrorStr) {
+void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) {
unsigned LocCookie = 0;
if (const MDNode *SrcLoc = I->getMetadata("srcloc")) {
if (SrcLoc->getNumOperands() != 0)
@@ -92,7 +102,7 @@ void LLVMContext::emitError(const Instruction *I, StringRef ErrorStr) {
return emitError(LocCookie, ErrorStr);
}
-void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
+void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
// If there is no error handler installed, just print the error and exit.
if (pImpl->InlineAsmDiagHandler == 0) {
errs() << "error: " << ErrorStr << "\n";
@@ -100,7 +110,7 @@ void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
}
// If we do have an error handler, we can report the error and keep going.
- SMDiagnostic Diag("", "error: " + ErrorStr.str());
+ SMDiagnostic Diag("", SourceMgr::DK_Error, ErrorStr.str());
pImpl->InlineAsmDiagHandler(Diag, pImpl->InlineAsmDiagContext, LocCookie);
}
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
index 504b372..6279bb8 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.cpp
@@ -21,6 +21,7 @@ LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
: TheTrueVal(0), TheFalseVal(0),
VoidTy(C, Type::VoidTyID),
LabelTy(C, Type::LabelTyID),
+ HalfTy(C, Type::HalfTyID),
FloatTy(C, Type::FloatTyID),
DoubleTy(C, Type::DoubleTyID),
MetadataTy(C, Type::MetadataTyID),
@@ -47,6 +48,16 @@ struct DropReferences {
P.second->dropAllReferences();
}
};
+
+// Temporary - drops pair.first instead of second.
+struct DropFirst {
+ // Takes the value_type of a ConstantUniqueMap's internal map, whose 'second'
+ // is a Constant*.
+ template<typename PairT>
+ void operator()(const PairT &P) {
+ P.first->dropAllReferences();
+ }
+};
}
LLVMContextImpl::~LLVMContextImpl() {
@@ -57,25 +68,32 @@ LLVMContextImpl::~LLVMContextImpl() {
std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end());
DeleteContainerPointers(Modules);
+ // Free the constants. This is important to do here to ensure that they are
+ // freed before the LeakDetector is torn down.
std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(),
DropReferences());
std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(),
- DropReferences());
+ DropFirst());
std::for_each(StructConstants.map_begin(), StructConstants.map_end(),
- DropReferences());
+ DropFirst());
std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(),
- DropReferences());
+ DropFirst());
ExprConstants.freeConstants();
ArrayConstants.freeConstants();
StructConstants.freeConstants();
VectorConstants.freeConstants();
- AggZeroConstants.freeConstants();
- NullPtrConstants.freeConstants();
- UndefValueConstants.freeConstants();
+ DeleteContainerSeconds(CAZConstants);
+ DeleteContainerSeconds(CPNConstants);
+ DeleteContainerSeconds(UVConstants);
InlineAsms.freeConstants();
DeleteContainerSeconds(IntConstants);
DeleteContainerSeconds(FPConstants);
+ for (StringMap<ConstantDataSequential*>::iterator I = CDSConstants.begin(),
+ E = CDSConstants.end(); I != E; ++I)
+ delete I->second;
+ CDSConstants.clear();
+
// Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet
// and the NonUniquedMDNodes sets, so copy the values out first.
SmallVector<MDNode*, 8> MDNodes;
@@ -92,3 +110,24 @@ LLVMContextImpl::~LLVMContextImpl() {
// Destroy MDStrings.
DeleteContainerSeconds(MDStringCache);
}
+
+// ConstantsContext anchors
+void UnaryConstantExpr::anchor() { }
+
+void BinaryConstantExpr::anchor() { }
+
+void SelectConstantExpr::anchor() { }
+
+void ExtractElementConstantExpr::anchor() { }
+
+void InsertElementConstantExpr::anchor() { }
+
+void ShuffleVectorConstantExpr::anchor() { }
+
+void ExtractValueConstantExpr::anchor() { }
+
+void InsertValueConstantExpr::anchor() { }
+
+void GetElementPtrConstantExpr::anchor() { }
+
+void CompareConstantExpr::anchor() { }
diff --git a/contrib/llvm/lib/VMCore/LLVMContextImpl.h b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
index a3f68fe..2252028 100644
--- a/contrib/llvm/lib/VMCore/LLVMContextImpl.h
+++ b/contrib/llvm/lib/VMCore/LLVMContextImpl.h
@@ -29,6 +29,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Hashing.h"
#include <vector>
namespace llvm {
@@ -51,12 +52,14 @@ struct DenseMapAPIntKeyInfo {
bool operator!=(const KeyTy& that) const {
return !this->operator==(that);
}
+ friend hash_code hash_value(const KeyTy &Key) {
+ return hash_combine(Key.type, Key.val);
+ }
};
static inline KeyTy getEmptyKey() { return KeyTy(APInt(1,0), 0); }
static inline KeyTy getTombstoneKey() { return KeyTy(APInt(1,1), 0); }
static unsigned getHashValue(const KeyTy &Key) {
- return DenseMapInfo<void*>::getHashValue(Key.type) ^
- Key.val.getHashValue();
+ return static_cast<unsigned>(hash_value(Key));
}
static bool isEqual(const KeyTy &LHS, const KeyTy &RHS) {
return LHS == RHS;
@@ -74,6 +77,9 @@ struct DenseMapAPFloatKeyInfo {
bool operator!=(const KeyTy& that) const {
return !this->operator==(that);
}
+ friend hash_code hash_value(const KeyTy &Key) {
+ return hash_combine(Key.val);
+ }
};
static inline KeyTy getEmptyKey() {
return KeyTy(APFloat(APFloat::Bogus,1));
@@ -82,13 +88,132 @@ struct DenseMapAPFloatKeyInfo {
return KeyTy(APFloat(APFloat::Bogus,2));
}
static unsigned getHashValue(const KeyTy &Key) {
- return Key.val.getHashValue();
+ return static_cast<unsigned>(hash_value(Key));
}
static bool isEqual(const KeyTy &LHS, const KeyTy &RHS) {
return LHS == RHS;
}
};
+struct AnonStructTypeKeyInfo {
+ struct KeyTy {
+ ArrayRef<Type*> ETypes;
+ bool isPacked;
+ KeyTy(const ArrayRef<Type*>& E, bool P) :
+ ETypes(E), isPacked(P) {}
+ KeyTy(const KeyTy& that) :
+ ETypes(that.ETypes), isPacked(that.isPacked) {}
+ KeyTy(const StructType* ST) :
+ ETypes(ArrayRef<Type*>(ST->element_begin(), ST->element_end())),
+ isPacked(ST->isPacked()) {}
+ bool operator==(const KeyTy& that) const {
+ if (isPacked != that.isPacked)
+ return false;
+ if (ETypes != that.ETypes)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy& that) const {
+ return !this->operator==(that);
+ }
+ };
+ static inline StructType* getEmptyKey() {
+ return DenseMapInfo<StructType*>::getEmptyKey();
+ }
+ static inline StructType* getTombstoneKey() {
+ return DenseMapInfo<StructType*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(const KeyTy& Key) {
+ return hash_combine(hash_combine_range(Key.ETypes.begin(),
+ Key.ETypes.end()),
+ Key.isPacked);
+ }
+ static unsigned getHashValue(const StructType *ST) {
+ return getHashValue(KeyTy(ST));
+ }
+ static bool isEqual(const KeyTy& LHS, const StructType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+ static bool isEqual(const StructType *LHS, const StructType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+struct FunctionTypeKeyInfo {
+ struct KeyTy {
+ const Type *ReturnType;
+ ArrayRef<Type*> Params;
+ bool isVarArg;
+ KeyTy(const Type* R, const ArrayRef<Type*>& P, bool V) :
+ ReturnType(R), Params(P), isVarArg(V) {}
+ KeyTy(const KeyTy& that) :
+ ReturnType(that.ReturnType),
+ Params(that.Params),
+ isVarArg(that.isVarArg) {}
+ KeyTy(const FunctionType* FT) :
+ ReturnType(FT->getReturnType()),
+ Params(ArrayRef<Type*>(FT->param_begin(), FT->param_end())),
+ isVarArg(FT->isVarArg()) {}
+ bool operator==(const KeyTy& that) const {
+ if (ReturnType != that.ReturnType)
+ return false;
+ if (isVarArg != that.isVarArg)
+ return false;
+ if (Params != that.Params)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy& that) const {
+ return !this->operator==(that);
+ }
+ };
+ static inline FunctionType* getEmptyKey() {
+ return DenseMapInfo<FunctionType*>::getEmptyKey();
+ }
+ static inline FunctionType* getTombstoneKey() {
+ return DenseMapInfo<FunctionType*>::getTombstoneKey();
+ }
+ static unsigned getHashValue(const KeyTy& Key) {
+ return hash_combine(Key.ReturnType,
+ hash_combine_range(Key.Params.begin(),
+ Key.Params.end()),
+ Key.isVarArg);
+ }
+ static unsigned getHashValue(const FunctionType *FT) {
+ return getHashValue(KeyTy(FT));
+ }
+ static bool isEqual(const KeyTy& LHS, const FunctionType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+ static bool isEqual(const FunctionType *LHS, const FunctionType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide a FoldingSetTrait::Equals specialization for MDNode that can use a
+// shortcut to avoid comparing all operands.
+template<> struct FoldingSetTrait<MDNode> : DefaultFoldingSetTrait<MDNode> {
+ static bool Equals(const MDNode &X, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) {
+ assert(!X.isNotUniqued() && "Non-uniqued MDNode in FoldingSet?");
+ // First, check if the cached hashes match. If they don't we can skip the
+ // expensive operand walk.
+ if (X.Hash != IDHash)
+ return false;
+
+ // If they match we have to compare the operands.
+ X.Profile(TempID);
+ return TempID == ID;
+ }
+ static unsigned ComputeHash(const MDNode &X, FoldingSetNodeID &) {
+ return X.Hash; // Return cached hash.
+ }
+};
+
/// DebugRecVH - This is a CallbackVH used to keep the Scope -> index maps
/// up to date as MDNodes mutate. This class is implemented in DebugLoc.cpp.
class DebugRecVH : public CallbackVH {
@@ -129,7 +254,7 @@ public:
DenseMapAPFloatKeyInfo> FPMapTy;
FPMapTy FPConstants;
- StringMap<MDString*> MDStringCache;
+ StringMap<Value*> MDStringCache;
FoldingSet<MDNode> MDNodeSet;
// MDNodes may be uniqued or not uniqued. When they're not uniqued, they
@@ -138,23 +263,23 @@ public:
// on Context destruction.
SmallPtrSet<MDNode*, 1> NonUniquedMDNodes;
- ConstantUniqueMap<char, char, Type, ConstantAggregateZero> AggZeroConstants;
+ DenseMap<Type*, ConstantAggregateZero*> CAZConstants;
- typedef ConstantUniqueMap<std::vector<Constant*>, ArrayRef<Constant*>,
- ArrayType, ConstantArray, true /*largekey*/> ArrayConstantsTy;
+ typedef ConstantAggrUniqueMap<ArrayType, ConstantArray> ArrayConstantsTy;
ArrayConstantsTy ArrayConstants;
- typedef ConstantUniqueMap<std::vector<Constant*>, ArrayRef<Constant*>,
- StructType, ConstantStruct, true /*largekey*/> StructConstantsTy;
+ typedef ConstantAggrUniqueMap<StructType, ConstantStruct> StructConstantsTy;
StructConstantsTy StructConstants;
- typedef ConstantUniqueMap<std::vector<Constant*>, ArrayRef<Constant*>,
- VectorType, ConstantVector> VectorConstantsTy;
+ typedef ConstantAggrUniqueMap<VectorType, ConstantVector> VectorConstantsTy;
VectorConstantsTy VectorConstants;
- ConstantUniqueMap<char, char, PointerType, ConstantPointerNull>
- NullPtrConstants;
- ConstantUniqueMap<char, char, Type, UndefValue> UndefValueConstants;
+ DenseMap<PointerType*, ConstantPointerNull*> CPNConstants;
+
+ DenseMap<Type*, UndefValue*> UVConstants;
+
+ StringMap<ConstantDataSequential*> CDSConstants;
+
DenseMap<std::pair<Function*, BasicBlock*> , BlockAddress*> BlockAddresses;
ConstantUniqueMap<ExprMapKeyType, const ExprMapKeyType&, Type, ConstantExpr>
@@ -169,7 +294,7 @@ public:
LeakDetectorImpl<Value> LLVMObjects;
// Basic type instances.
- Type VoidTy, LabelTy, FloatTy, DoubleTy, MetadataTy;
+ Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy;
Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy;
IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty;
@@ -180,9 +305,10 @@ public:
DenseMap<unsigned, IntegerType*> IntegerTypes;
- // TODO: Optimize FunctionTypes/AnonStructTypes!
- std::map<std::vector<Type*>, FunctionType*> FunctionTypes;
- std::map<std::vector<Type*>, StructType*> AnonStructTypes;
+ typedef DenseMap<FunctionType*, bool, FunctionTypeKeyInfo> FunctionTypeMap;
+ FunctionTypeMap FunctionTypes;
+ typedef DenseMap<StructType*, bool, AnonStructTypeKeyInfo> StructTypeMap;
+ StructTypeMap AnonStructTypes;
StringMap<StructType*> NamedStructTypes;
unsigned NamedStructTypesUniqueID;
diff --git a/contrib/llvm/lib/VMCore/Metadata.cpp b/contrib/llvm/lib/VMCore/Metadata.cpp
index ace4dc2..090b09a 100644
--- a/contrib/llvm/lib/VMCore/Metadata.cpp
+++ b/contrib/llvm/lib/VMCore/Metadata.cpp
@@ -29,16 +29,19 @@ using namespace llvm;
// MDString implementation.
//
-MDString::MDString(LLVMContext &C, StringRef S)
- : Value(Type::getMetadataTy(C), Value::MDStringVal), Str(S) {}
+void MDString::anchor() { }
+
+MDString::MDString(LLVMContext &C)
+ : Value(Type::getMetadataTy(C), Value::MDStringVal) {}
MDString *MDString::get(LLVMContext &Context, StringRef Str) {
LLVMContextImpl *pImpl = Context.pImpl;
- StringMapEntry<MDString *> &Entry =
+ StringMapEntry<Value*> &Entry =
pImpl->MDStringCache.GetOrCreateValue(Str);
- MDString *&S = Entry.getValue();
- if (!S) S = new MDString(Context, Entry.getKey());
- return S;
+ Value *&S = Entry.getValue();
+ if (!S) S = new MDString(Context);
+ S->setValueName(&Entry);
+ return cast<MDString>(S);
}
//===----------------------------------------------------------------------===//
@@ -48,14 +51,26 @@ MDString *MDString::get(LLVMContext &Context, StringRef Str) {
// Use CallbackVH to hold MDNode operands.
namespace llvm {
class MDNodeOperand : public CallbackVH {
- MDNode *Parent;
+ MDNode *getParent() {
+ MDNodeOperand *Cur = this;
+
+ while (Cur->getValPtrInt() != 1)
+ --Cur;
+
+ assert(Cur->getValPtrInt() == 1 &&
+ "Couldn't find the beginning of the operand list!");
+ return reinterpret_cast<MDNode*>(Cur) - 1;
+ }
+
public:
- MDNodeOperand(Value *V, MDNode *P) : CallbackVH(V), Parent(P) {}
+ MDNodeOperand(Value *V) : CallbackVH(V) {}
~MDNodeOperand() {}
- void set(Value *V) {
- setValPtr(V);
- }
+ void set(Value *V) { this->setValPtr(V); }
+
+ /// setAsFirstOperand - Accessor method to mark the operand as the first in
+ /// the list.
+ void setAsFirstOperand(unsigned V) { this->setValPtrInt(V); }
virtual void deleted();
virtual void allUsesReplacedWith(Value *NV);
@@ -64,15 +79,13 @@ public:
void MDNodeOperand::deleted() {
- Parent->replaceOperand(this, 0);
+ getParent()->replaceOperand(this, 0);
}
void MDNodeOperand::allUsesReplacedWith(Value *NV) {
- Parent->replaceOperand(this, NV);
+ getParent()->replaceOperand(this, NV);
}
-
-
//===----------------------------------------------------------------------===//
// MDNode implementation.
//
@@ -85,6 +98,11 @@ static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) {
return reinterpret_cast<MDNodeOperand*>(N+1)+Op;
}
+void MDNode::replaceOperandWith(unsigned i, Value *Val) {
+ MDNodeOperand *Op = getOperandPtr(this, i);
+ replaceOperand(Op, Val);
+}
+
MDNode::MDNode(LLVMContext &C, ArrayRef<Value*> Vals, bool isFunctionLocal)
: Value(Type::getMetadataTy(C), Value::MDNodeVal) {
NumOperands = Vals.size();
@@ -95,8 +113,13 @@ MDNode::MDNode(LLVMContext &C, ArrayRef<Value*> Vals, bool isFunctionLocal)
// Initialize the operand list, which is co-allocated on the end of the node.
unsigned i = 0;
for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands;
- Op != E; ++Op, ++i)
- new (Op) MDNodeOperand(Vals[i], this);
+ Op != E; ++Op, ++i) {
+ new (Op) MDNodeOperand(Vals[i]);
+
+ // Mark the first MDNodeOperand as being the first in the list of operands.
+ if (i == 0)
+ Op->setAsFirstOperand(1);
+ }
}
@@ -161,12 +184,13 @@ static const Function *assertLocalFunction(const MDNode *N) {
const Function *MDNode::getFunction() const {
#ifndef NDEBUG
return assertLocalFunction(this);
-#endif
+#else
if (!isFunctionLocal()) return NULL;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
if (const Function *F = getFunctionForValue(getOperand(i)))
return F;
return NULL;
+#endif
}
// destroy - Delete this node. Only when there are no uses.
@@ -197,11 +221,11 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals,
ID.AddPointer(Vals[i]);
void *InsertPoint;
- MDNode *N = NULL;
-
- if ((N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)))
+ MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (N || !Insert)
return N;
-
+
bool isFunctionLocal = false;
switch (FL) {
case FL_Unknown:
@@ -226,6 +250,9 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, ArrayRef<Value*> Vals,
void *Ptr = malloc(sizeof(MDNode)+Vals.size()*sizeof(MDNodeOperand));
N = new (Ptr) MDNode(Context, Vals, isFunctionLocal);
+ // Cache the operand hash.
+ N->Hash = ID.ComputeHash();
+
// InsertPoint will have been set by the FindNodeOrInsertPos call.
pImpl->MDNodeSet.InsertNode(N, InsertPoint);
@@ -349,6 +376,8 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
return;
}
+ // Cache the operand hash.
+ Hash = ID.ComputeHash();
// InsertPoint will have been set by the FindNodeOrInsertPos call.
pImpl->MDNodeSet.InsertNode(this, InsertPoint);
@@ -425,12 +454,12 @@ StringRef NamedMDNode::getName() const {
// Instruction Metadata method implementations.
//
-void Instruction::setMetadata(const char *Kind, MDNode *Node) {
+void Instruction::setMetadata(StringRef Kind, MDNode *Node) {
if (Node == 0 && !hasMetadata()) return;
setMetadata(getContext().getMDKindID(Kind), Node);
}
-MDNode *Instruction::getMetadataImpl(const char *Kind) const {
+MDNode *Instruction::getMetadataImpl(StringRef Kind) const {
return getMetadataImpl(getContext().getMDKindID(Kind));
}
@@ -468,9 +497,11 @@ void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
}
// Otherwise, we're removing metadata from an instruction.
- assert(hasMetadataHashEntry() &&
- getContext().pImpl->MetadataStore.count(this) &&
+ assert((hasMetadataHashEntry() ==
+ getContext().pImpl->MetadataStore.count(this)) &&
"HasMetadata bit out of date!");
+ if (!hasMetadataHashEntry())
+ return; // Nothing to remove!
LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this];
// Common case is removing the only entry.
@@ -541,17 +572,15 @@ getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned,
getContext().pImpl->MetadataStore.count(this) &&
"Shouldn't have called this");
const LLVMContextImpl::MDMapTy &Info =
- getContext().pImpl->MetadataStore.find(this)->second;
+ getContext().pImpl->MetadataStore.find(this)->second;
assert(!Info.empty() && "Shouldn't have called this");
-
Result.append(Info.begin(), Info.end());
-
+
// Sort the resulting array so it is stable.
if (Result.size() > 1)
array_pod_sort(Result.begin(), Result.end());
}
-
/// clearMetadataHashEntries - Clear all hashtable-based metadata from
/// this instruction.
void Instruction::clearMetadataHashEntries() {
diff --git a/contrib/llvm/lib/VMCore/Module.cpp b/contrib/llvm/lib/VMCore/Module.cpp
index c29029b..e8bc6db 100644
--- a/contrib/llvm/lib/VMCore/Module.cpp
+++ b/contrib/llvm/lib/VMCore/Module.cpp
@@ -321,11 +321,67 @@ NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) {
return NMD;
}
+/// eraseNamedMetadata - Remove the given NamedMDNode from this module and
+/// delete it.
void Module::eraseNamedMetadata(NamedMDNode *NMD) {
static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab)->erase(NMD->getName());
NamedMDList.erase(NMD);
}
+/// getModuleFlagsMetadata - Returns the module flags in the provided vector.
+void Module::
+getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
+ const NamedMDNode *ModFlags = getModuleFlagsMetadata();
+ if (!ModFlags) return;
+
+ for (unsigned i = 0, e = ModFlags->getNumOperands(); i != e; ++i) {
+ MDNode *Flag = ModFlags->getOperand(i);
+ ConstantInt *Behavior = cast<ConstantInt>(Flag->getOperand(0));
+ MDString *Key = cast<MDString>(Flag->getOperand(1));
+ Value *Val = Flag->getOperand(2);
+ Flags.push_back(ModuleFlagEntry(ModFlagBehavior(Behavior->getZExtValue()),
+ Key, Val));
+ }
+}
+
+/// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. This method returns null if there are no
+/// module-level flags.
+NamedMDNode *Module::getModuleFlagsMetadata() const {
+ return getNamedMetadata("llvm.module.flags");
+}
+
+/// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. If module-level flags aren't found, it
+/// creates the named metadata that contains them.
+NamedMDNode *Module::getOrInsertModuleFlagsMetadata() {
+ return getOrInsertNamedMetadata("llvm.module.flags");
+}
+
+/// addModuleFlag - Add a module-level flag to the module-level flags
+/// metadata. It will create the module-level flags named metadata if it doesn't
+/// already exist.
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Value *Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ Value *Ops[3] = {
+ ConstantInt::get(Int32Ty, Behavior), MDString::get(Context, Key), Val
+ };
+ getOrInsertModuleFlagsMetadata()->addOperand(MDNode::get(Context, Ops));
+}
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ uint32_t Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ addModuleFlag(Behavior, Key, ConstantInt::get(Int32Ty, Val));
+}
+void Module::addModuleFlag(MDNode *Node) {
+ assert(Node->getNumOperands() == 3 &&
+ "Invalid number of operands for module flag!");
+ assert(isa<ConstantInt>(Node->getOperand(0)) &&
+ isa<MDString>(Node->getOperand(1)) &&
+ "Invalid operand types for module flag!");
+ getOrInsertModuleFlagsMetadata()->addOperand(Node);
+}
//===----------------------------------------------------------------------===//
// Methods to control the materialization of GlobalValues in the Module.
diff --git a/contrib/llvm/lib/VMCore/Pass.cpp b/contrib/llvm/lib/VMCore/Pass.cpp
index 9afc540..994a7ff 100644
--- a/contrib/llvm/lib/VMCore/Pass.cpp
+++ b/contrib/llvm/lib/VMCore/Pass.cpp
@@ -25,11 +25,9 @@ using namespace llvm;
// Pass Implementation
//
-Pass::Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
-
// Force out-of-line virtual method.
-Pass::~Pass() {
- delete Resolver;
+Pass::~Pass() {
+ delete Resolver;
}
// Force out-of-line virtual method.
@@ -48,7 +46,7 @@ bool Pass::mustPreserveAnalysisID(char &AID) const {
return Resolver->getAnalysisIfAvailable(&AID, true) != 0;
}
-// dumpPassStructure - Implement the -debug-passes=Structure option
+// dumpPassStructure - Implement the -debug-pass=Structure option
void Pass::dumpPassStructure(unsigned Offset) {
dbgs().indent(Offset*2) << getPassName() << "\n";
}
@@ -71,7 +69,7 @@ void Pass::preparePassManager(PMStack &) {
PassManagerType Pass::getPotentialPassManagerType() const {
// Default implementation.
- return PMT_Unknown;
+ return PMT_Unknown;
}
void Pass::getAnalysisUsage(AnalysisUsage &) const {
@@ -155,9 +153,8 @@ PassManagerType FunctionPass::getPotentialPassManagerType() const {
Pass *BasicBlockPass::createPrinterPass(raw_ostream &O,
const std::string &Banner) const {
-
+
llvm_unreachable("BasicBlockPass printing unsupported.");
- return 0;
}
bool BasicBlockPass::doInitialization(Module &) {
@@ -181,7 +178,7 @@ bool BasicBlockPass::doFinalization(Module &) {
}
PassManagerType BasicBlockPass::getPotentialPassManagerType() const {
- return PMT_BasicBlockPassManager;
+ return PMT_BasicBlockPassManager;
}
const PassInfo *Pass::lookupPassInfo(const void *TI) {
@@ -192,6 +189,13 @@ const PassInfo *Pass::lookupPassInfo(StringRef Arg) {
return PassRegistry::getPassRegistry()->getPassInfo(Arg);
}
+Pass *Pass::createPass(AnalysisID ID) {
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID);
+ if (!PI)
+ return NULL;
+ return PI->createPass();
+}
+
Pass *PassInfo::createPass() const {
assert((!isAnalysisGroup() || NormalCtor) &&
"No default implementation found for analysis group!");
@@ -246,7 +250,7 @@ namespace {
typedef AnalysisUsage::VectorType VectorType;
VectorType &CFGOnlyList;
GetCFGOnlyPasses(VectorType &L) : CFGOnlyList(L) {}
-
+
void passEnumerate(const PassInfo *P) {
if (P->isCFGOnlyPass())
CFGOnlyList.push_back(P->getTypeInfo());
diff --git a/contrib/llvm/lib/VMCore/PassManager.cpp b/contrib/llvm/lib/VMCore/PassManager.cpp
index ecedb1d..28fbaa6 100644
--- a/contrib/llvm/lib/VMCore/PassManager.cpp
+++ b/contrib/llvm/lib/VMCore/PassManager.cpp
@@ -14,7 +14,6 @@
#include "llvm/PassManagers.h"
#include "llvm/PassManager.h"
-#include "llvm/DebugInfoProbe.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CommandLine.h"
@@ -26,7 +25,6 @@
#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Mutex.h"
-#include "llvm/ADT/StringMap.h"
#include <algorithm>
#include <map>
using namespace llvm;
@@ -84,32 +82,28 @@ PrintAfterAll("print-after-all",
/// This is a helper to determine whether to print IR before or
/// after a pass.
-static bool ShouldPrintBeforeOrAfterPass(const void *PassID,
+static bool ShouldPrintBeforeOrAfterPass(const PassInfo *PI,
PassOptionList &PassesToPrint) {
- if (const llvm::PassInfo *PI =
- PassRegistry::getPassRegistry()->getPassInfo(PassID)) {
- for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) {
- const llvm::PassInfo *PassInf = PassesToPrint[i];
- if (PassInf)
- if (PassInf->getPassArgument() == PI->getPassArgument()) {
- return true;
- }
- }
+ for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) {
+ const llvm::PassInfo *PassInf = PassesToPrint[i];
+ if (PassInf)
+ if (PassInf->getPassArgument() == PI->getPassArgument()) {
+ return true;
+ }
}
return false;
}
-
/// This is a utility to check whether a pass should have IR dumped
/// before it.
-static bool ShouldPrintBeforePass(const void *PassID) {
- return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PassID, PrintBefore);
+static bool ShouldPrintBeforePass(const PassInfo *PI) {
+ return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PI, PrintBefore);
}
/// This is a utility to check whether a pass should have IR dumped
/// after it.
-static bool ShouldPrintAfterPass(const void *PassID) {
- return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PassID, PrintAfter);
+static bool ShouldPrintAfterPass(const PassInfo *PI) {
+ return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PI, PrintAfter);
}
} // End of llvm namespace
@@ -223,6 +217,7 @@ namespace llvm {
class FunctionPassManagerImpl : public Pass,
public PMDataManager,
public PMTopLevelManager {
+ virtual void anchor();
private:
bool wasRun;
public:
@@ -263,27 +258,15 @@ public:
virtual PMDataManager *getAsPMDataManager() { return this; }
virtual Pass *getAsPass() { return this; }
+ virtual PassManagerType getTopLevelPassManagerType() {
+ return PMT_FunctionPassManager;
+ }
/// Pass Manager itself does not invalidate any analysis info.
void getAnalysisUsage(AnalysisUsage &Info) const {
Info.setPreservesAll();
}
- void addTopLevelPass(Pass *P) {
- if (ImmutablePass *IP = P->getAsImmutablePass()) {
- // P is a immutable pass and it will be managed by this
- // top level manager. Set up analysis resolver to connect them.
- AnalysisResolver *AR = new AnalysisResolver(*this);
- P->setResolver(AR);
- initializeAnalysisImpl(P);
- addImmutablePass(IP);
- recordAvailableAnalysis(IP);
- } else {
- P->assignPassManager(activeStack, PMT_FunctionPassManager);
- }
-
- }
-
FPPassManager *getContainedManager(unsigned N) {
assert(N < PassManagers.size() && "Pass number out of range!");
FPPassManager *FP = static_cast<FPPassManager *>(PassManagers[N]);
@@ -291,6 +274,8 @@ public:
}
};
+void FunctionPassManagerImpl::anchor() {}
+
char FunctionPassManagerImpl::ID = 0;
//===----------------------------------------------------------------------===//
@@ -384,6 +369,7 @@ char MPPassManager::ID = 0;
class PassManagerImpl : public Pass,
public PMDataManager,
public PMTopLevelManager {
+ virtual void anchor();
public:
static char ID;
@@ -413,22 +399,11 @@ public:
Info.setPreservesAll();
}
- void addTopLevelPass(Pass *P) {
- if (ImmutablePass *IP = P->getAsImmutablePass()) {
- // P is a immutable pass and it will be managed by this
- // top level manager. Set up analysis resolver to connect them.
- AnalysisResolver *AR = new AnalysisResolver(*this);
- P->setResolver(AR);
- initializeAnalysisImpl(P);
- addImmutablePass(IP);
- recordAvailableAnalysis(IP);
- } else {
- P->assignPassManager(activeStack, PMT_ModulePassManager);
- }
- }
-
virtual PMDataManager *getAsPMDataManager() { return this; }
virtual Pass *getAsPass() { return this; }
+ virtual PassManagerType getTopLevelPassManagerType() {
+ return PMT_ModulePassManager;
+ }
MPPassManager *getContainedManager(unsigned N) {
assert(N < PassManagers.size() && "Pass number out of range!");
@@ -437,26 +412,14 @@ public:
}
};
+void PassManagerImpl::anchor() {}
+
char PassManagerImpl::ID = 0;
} // End of llvm namespace
namespace {
//===----------------------------------------------------------------------===//
-// DebugInfoProbe
-
-static DebugInfoProbeInfo *TheDebugProbe;
-static void createDebugInfoProbe() {
- if (TheDebugProbe) return;
-
- // Constructed the first time this is called. This guarantees that the
- // object will be constructed, if -enable-debug-info-probe is set,
- // before static globals, thus it will be destroyed before them.
- static ManagedStatic<DebugInfoProbeInfo> DIP;
- TheDebugProbe = &*DIP;
-}
-
-//===----------------------------------------------------------------------===//
/// TimingInfo Class - This class is used to calculate information about the
/// amount of time each pass takes to execute. This only happens when
/// -time-passes is enabled on the command line.
@@ -654,7 +617,32 @@ void PMTopLevelManager::schedulePass(Pass *P) {
}
// Now all required passes are available.
- addTopLevelPass(P);
+ if (ImmutablePass *IP = P->getAsImmutablePass()) {
+ // P is a immutable pass and it will be managed by this
+ // top level manager. Set up analysis resolver to connect them.
+ PMDataManager *DM = getAsPMDataManager();
+ AnalysisResolver *AR = new AnalysisResolver(*DM);
+ P->setResolver(AR);
+ DM->initializeAnalysisImpl(P);
+ addImmutablePass(IP);
+ DM->recordAvailableAnalysis(IP);
+ return;
+ }
+
+ if (PI && !PI->isAnalysis() && ShouldPrintBeforePass(PI)) {
+ Pass *PP = P->createPrinterPass(
+ dbgs(), std::string("*** IR Dump Before ") + P->getPassName() + " ***");
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
+
+ // Add the requested pass to the best available pass manager.
+ P->assignPassManager(activeStack, getTopLevelPassManagerType());
+
+ if (PI && !PI->isAnalysis() && ShouldPrintAfterPass(PI)) {
+ Pass *PP = P->createPrinterPass(
+ dbgs(), std::string("*** IR Dump After ") + P->getPassName() + " ***");
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
}
/// Find the pass that implements Analysis AID. Search immutable
@@ -1224,8 +1212,7 @@ void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
}
Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
- assert(0 && "Unable to find on the fly pass");
- return NULL;
+ llvm_unreachable("Unable to find on the fly pass");
}
// Destructor
@@ -1351,31 +1338,13 @@ FunctionPassManager::~FunctionPassManager() {
delete FPM;
}
-/// addImpl - Add a pass to the queue of passes to run, without
-/// checking whether to add a printer pass.
-void FunctionPassManager::addImpl(Pass *P) {
- FPM->add(P);
-}
-
/// add - Add a pass to the queue of passes to run. This passes
/// ownership of the Pass to the PassManager. When the
/// PassManager_X is destroyed, the pass will be destroyed as well, so
/// there is no need to delete the pass. (TODO delete passes.)
/// This implies that all passes MUST be allocated with 'new'.
void FunctionPassManager::add(Pass *P) {
- // If this is a not a function pass, don't add a printer for it.
- const void *PassID = P->getPassID();
- if (P->getPassKind() == PT_Function)
- if (ShouldPrintBeforePass(PassID))
- addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
- + P->getPassName() + " ***"));
-
- addImpl(P);
-
- if (P->getPassKind() == PT_Function)
- if (ShouldPrintAfterPass(PassID))
- addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
- + P->getPassName() + " ***"));
+ FPM->add(P);
}
/// run - Execute all of the passes scheduled for execution. Keep
@@ -1455,7 +1424,6 @@ void FunctionPassManagerImpl::releaseMemoryOnTheFly() {
bool FunctionPassManagerImpl::run(Function &F) {
bool Changed = false;
TimingInfo::createTheTimeInfo();
- createDebugInfoProbe();
initializeAllAnalysisInfo();
for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
@@ -1474,7 +1442,7 @@ bool FunctionPassManagerImpl::run(Function &F) {
char FPPassManager::ID = 0;
/// Print passes managed by this manager
void FPPassManager::dumpPassStructure(unsigned Offset) {
- llvm::dbgs() << std::string(Offset*2, ' ') << "FunctionPass Manager\n";
+ dbgs().indent(Offset*2) << "FunctionPass Manager\n";
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
FunctionPass *FP = getContainedPass(Index);
FP->dumpPassStructure(Offset + 1);
@@ -1503,16 +1471,13 @@ bool FPPassManager::runOnFunction(Function &F) {
dumpRequiredSet(FP);
initializeAnalysisImpl(FP);
- if (TheDebugProbe)
- TheDebugProbe->initialize(FP, F);
+
{
PassManagerPrettyStackEntry X(FP, F);
TimeRegion PassTimer(getPassTimer(FP));
LocalChanged |= FP->runOnFunction(F);
}
- if (TheDebugProbe)
- TheDebugProbe->finalize(FP, F);
Changed |= LocalChanged;
if (LocalChanged)
@@ -1662,7 +1627,6 @@ Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){
bool PassManagerImpl::run(Module &M) {
bool Changed = false;
TimingInfo::createTheTimeInfo();
- createDebugInfoProbe();
dumpArguments();
dumpPasses();
@@ -1687,27 +1651,12 @@ PassManager::~PassManager() {
delete PM;
}
-/// addImpl - Add a pass to the queue of passes to run, without
-/// checking whether to add a printer pass.
-void PassManager::addImpl(Pass *P) {
- PM->add(P);
-}
-
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
/// will be destroyed as well, so there is no need to delete the pass. This
/// implies that all passes MUST be allocated with 'new'.
void PassManager::add(Pass *P) {
- const void* PassID = P->getPassID();
- if (ShouldPrintBeforePass(PassID))
- addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
- + P->getPassName() + " ***"));
-
- addImpl(P);
-
- if (ShouldPrintAfterPass(PassID))
- addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
- + P->getPassName() + " ***"));
+ PM->add(P);
}
/// run - Execute all of the passes scheduled for execution. Keep track of
@@ -1817,7 +1766,7 @@ void ModulePass::assignPassManager(PMStack &PMS,
void FunctionPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
- // Find Module Pass Manager
+ // Find Function Pass Manager
while (!PMS.empty()) {
if (PMS.top()->getPassManagerType() > PMT_FunctionPassManager)
PMS.pop();
diff --git a/contrib/llvm/lib/VMCore/Type.cpp b/contrib/llvm/lib/VMCore/Type.cpp
index 10184bc..c6f3558 100644
--- a/contrib/llvm/lib/VMCore/Type.cpp
+++ b/contrib/llvm/lib/VMCore/Type.cpp
@@ -25,6 +25,7 @@ using namespace llvm;
Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
switch (IDNumber) {
case VoidTyID : return getVoidTy(C);
+ case HalfTyID : return getHalfTy(C);
case FloatTyID : return getFloatTy(C);
case DoubleTyID : return getDoubleTy(C);
case X86_FP80TyID : return getX86_FP80Ty(C);
@@ -57,7 +58,7 @@ bool Type::isIntegerTy(unsigned Bitwidth) const {
bool Type::isIntOrIntVectorTy() const {
if (isIntegerTy())
return true;
- if (ID != Type::VectorTyID) return false;
+ if (getTypeID() != Type::VectorTyID) return false;
return cast<VectorType>(this)->getElementType()->isIntegerTy();
}
@@ -65,11 +66,12 @@ bool Type::isIntOrIntVectorTy() const {
/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP types.
///
bool Type::isFPOrFPVectorTy() const {
- if (ID == Type::FloatTyID || ID == Type::DoubleTyID ||
- ID == Type::FP128TyID || ID == Type::X86_FP80TyID ||
- ID == Type::PPC_FP128TyID)
+ if (getTypeID() == Type::HalfTyID || getTypeID() == Type::FloatTyID ||
+ getTypeID() == Type::DoubleTyID ||
+ getTypeID() == Type::FP128TyID || getTypeID() == Type::X86_FP80TyID ||
+ getTypeID() == Type::PPC_FP128TyID)
return true;
- if (ID != Type::VectorTyID) return false;
+ if (getTypeID() != Type::VectorTyID) return false;
return cast<VectorType>(this)->getElementType()->isFloatingPointTy();
}
@@ -131,6 +133,7 @@ bool Type::isEmptyTy() const {
unsigned Type::getPrimitiveSizeInBits() const {
switch (getTypeID()) {
+ case Type::HalfTyID: return 16;
case Type::FloatTyID: return 32;
case Type::DoubleTyID: return 64;
case Type::X86_FP80TyID: return 80;
@@ -157,11 +160,12 @@ int Type::getFPMantissaWidth() const {
if (const VectorType *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType()->getFPMantissaWidth();
assert(isFloatingPointTy() && "Not a floating point type!");
- if (ID == FloatTyID) return 24;
- if (ID == DoubleTyID) return 53;
- if (ID == X86_FP80TyID) return 64;
- if (ID == FP128TyID) return 113;
- assert(ID == PPC_FP128TyID && "unknown fp type");
+ if (getTypeID() == HalfTyID) return 11;
+ if (getTypeID() == FloatTyID) return 24;
+ if (getTypeID() == DoubleTyID) return 53;
+ if (getTypeID() == X86_FP80TyID) return 64;
+ if (getTypeID() == FP128TyID) return 113;
+ assert(getTypeID() == PPC_FP128TyID && "unknown fp type");
return -1;
}
@@ -181,24 +185,69 @@ bool Type::isSizedDerivedType() const {
if (!this->isStructTy())
return false;
- // Opaque structs have no size.
- if (cast<StructType>(this)->isOpaque())
- return false;
-
- // Okay, our struct is sized if all of the elements are.
- for (subtype_iterator I = subtype_begin(), E = subtype_end(); I != E; ++I)
- if (!(*I)->isSized())
- return false;
+ return cast<StructType>(this)->isSized();
+}
- return true;
+//===----------------------------------------------------------------------===//
+// Subclass Helper Methods
+//===----------------------------------------------------------------------===//
+
+unsigned Type::getIntegerBitWidth() const {
+ return cast<IntegerType>(this)->getBitWidth();
+}
+
+bool Type::isFunctionVarArg() const {
+ return cast<FunctionType>(this)->isVarArg();
+}
+
+Type *Type::getFunctionParamType(unsigned i) const {
+ return cast<FunctionType>(this)->getParamType(i);
+}
+
+unsigned Type::getFunctionNumParams() const {
+ return cast<FunctionType>(this)->getNumParams();
+}
+
+StringRef Type::getStructName() const {
+ return cast<StructType>(this)->getName();
+}
+
+unsigned Type::getStructNumElements() const {
+ return cast<StructType>(this)->getNumElements();
+}
+
+Type *Type::getStructElementType(unsigned N) const {
+ return cast<StructType>(this)->getElementType(N);
+}
+
+
+
+Type *Type::getSequentialElementType() const {
+ return cast<SequentialType>(this)->getElementType();
+}
+
+uint64_t Type::getArrayNumElements() const {
+ return cast<ArrayType>(this)->getNumElements();
+}
+
+unsigned Type::getVectorNumElements() const {
+ return cast<VectorType>(this)->getNumElements();
}
+unsigned Type::getPointerAddressSpace() const {
+ return cast<PointerType>(this)->getAddressSpace();
+}
+
+
+
+
//===----------------------------------------------------------------------===//
// Primitive 'Type' data
//===----------------------------------------------------------------------===//
Type *Type::getVoidTy(LLVMContext &C) { return &C.pImpl->VoidTy; }
Type *Type::getLabelTy(LLVMContext &C) { return &C.pImpl->LabelTy; }
+Type *Type::getHalfTy(LLVMContext &C) { return &C.pImpl->HalfTy; }
Type *Type::getFloatTy(LLVMContext &C) { return &C.pImpl->FloatTy; }
Type *Type::getDoubleTy(LLVMContext &C) { return &C.pImpl->DoubleTy; }
Type *Type::getMetadataTy(LLVMContext &C) { return &C.pImpl->MetadataTy; }
@@ -217,6 +266,10 @@ IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
return IntegerType::get(C, N);
}
+PointerType *Type::getHalfPtrTy(LLVMContext &C, unsigned AS) {
+ return getHalfTy(C)->getPointerTo(AS);
+}
+
PointerType *Type::getFloatPtrTy(LLVMContext &C, unsigned AS) {
return getFloatTy(C)->getPointerTo(AS);
}
@@ -328,23 +381,20 @@ FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params,
// FunctionType::get - The factory function for the FunctionType class.
FunctionType *FunctionType::get(Type *ReturnType,
ArrayRef<Type*> Params, bool isVarArg) {
- // TODO: This is brutally slow.
- std::vector<Type*> Key;
- Key.reserve(Params.size()+2);
- Key.push_back(const_cast<Type*>(ReturnType));
- for (unsigned i = 0, e = Params.size(); i != e; ++i)
- Key.push_back(const_cast<Type*>(Params[i]));
- if (isVarArg)
- Key.push_back(0);
-
LLVMContextImpl *pImpl = ReturnType->getContext().pImpl;
- FunctionType *&FT = pImpl->FunctionTypes[Key];
-
- if (FT == 0) {
+ FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg);
+ LLVMContextImpl::FunctionTypeMap::iterator I =
+ pImpl->FunctionTypes.find_as(Key);
+ FunctionType *FT;
+
+ if (I == pImpl->FunctionTypes.end()) {
FT = (FunctionType*) pImpl->TypeAllocator.
- Allocate(sizeof(FunctionType) + sizeof(Type*)*(Params.size()+1),
+ Allocate(sizeof(FunctionType) + sizeof(Type*) * (Params.size() + 1),
AlignOf<FunctionType>::Alignment);
new (FT) FunctionType(ReturnType, Params, isVarArg);
+ pImpl->FunctionTypes[FT] = true;
+ } else {
+ FT = I->first;
}
return FT;
@@ -377,23 +427,22 @@ bool FunctionType::isValidArgumentType(Type *ArgTy) {
StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes,
bool isPacked) {
- // FIXME: std::vector is horribly inefficient for this probe.
- std::vector<Type*> Key;
- for (unsigned i = 0, e = ETypes.size(); i != e; ++i) {
- assert(isValidElementType(ETypes[i]) &&
- "Invalid type for structure element!");
- Key.push_back(ETypes[i]);
+ LLVMContextImpl *pImpl = Context.pImpl;
+ AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked);
+ LLVMContextImpl::StructTypeMap::iterator I =
+ pImpl->AnonStructTypes.find_as(Key);
+ StructType *ST;
+
+ if (I == pImpl->AnonStructTypes.end()) {
+ // Value not found. Create a new type!
+ ST = new (Context.pImpl->TypeAllocator) StructType(Context);
+ ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
+ ST->setBody(ETypes, isPacked);
+ Context.pImpl->AnonStructTypes[ST] = true;
+ } else {
+ ST = I->first;
}
- if (isPacked)
- Key.push_back(0);
-
- StructType *&ST = Context.pImpl->AnonStructTypes[Key];
- if (ST) return ST;
-
- // Value not found. Create a new type!
- ST = new (Context.pImpl->TypeAllocator) StructType(Context);
- ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
- ST->setBody(ETypes, isPacked);
+
return ST;
}
@@ -403,13 +452,13 @@ void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) {
setSubclassData(getSubclassData() | SCDB_HasBody);
if (isPacked)
setSubclassData(getSubclassData() | SCDB_Packed);
-
- Type **Elts = getContext().pImpl->
- TypeAllocator.Allocate<Type*>(Elements.size());
- memcpy(Elts, Elements.data(), sizeof(Elements[0])*Elements.size());
+
+ unsigned NumElements = Elements.size();
+ Type **Elts = getContext().pImpl->TypeAllocator.Allocate<Type*>(NumElements);
+ memcpy(Elts, Elements.data(), sizeof(Elements[0]) * NumElements);
ContainedTys = Elts;
- NumContainedTys = Elements.size();
+ NumContainedTys = NumElements;
}
void StructType::setName(StringRef Name) {
@@ -434,9 +483,10 @@ void StructType::setName(StringRef Name) {
SmallString<64> TempStr(Name);
TempStr.push_back('.');
raw_svector_ostream TmpStream(TempStr);
+ unsigned NameSize = Name.size();
do {
- TempStr.resize(Name.size()+1);
+ TempStr.resize(NameSize + 1);
TmpStream.resync();
TmpStream << getContext().pImpl->NamedStructTypesUniqueID++;
@@ -520,6 +570,26 @@ StructType *StructType::create(StringRef Name, Type *type, ...) {
return llvm::StructType::create(Ctx, StructFields, Name);
}
+bool StructType::isSized() const {
+ if ((getSubclassData() & SCDB_IsSized) != 0)
+ return true;
+ if (isOpaque())
+ return false;
+
+ // Okay, our struct is sized if all of the elements are, but if one of the
+ // elements is opaque, the struct isn't sized *yet*, but may become sized in
+ // the future, so just bail out without caching.
+ for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
+ if (!(*I)->isSized())
+ return false;
+
+ // Here we cheat a bit and cast away const-ness. The goal is to memoize when
+ // we find a sized type, as types can only move from opaque to sized, not the
+ // other way.
+ const_cast<StructType*>(this)->setSubclassData(
+ getSubclassData() | SCDB_IsSized);
+ return true;
+}
StringRef StructType::getName() const {
assert(!isLiteral() && "Literal structs never have names");
@@ -664,6 +734,8 @@ VectorType *VectorType::get(Type *elementType, unsigned NumElements) {
}
bool VectorType::isValidElementType(Type *ElemTy) {
+ if (PointerType *PTy = dyn_cast<PointerType>(ElemTy))
+ ElemTy = PTy->getElementType();
return ElemTy->isIntegerTy() || ElemTy->isFloatingPointTy();
}
@@ -689,7 +761,12 @@ PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
PointerType::PointerType(Type *E, unsigned AddrSpace)
: SequentialType(PointerTyID, E) {
+#ifndef NDEBUG
+ const unsigned oldNCT = NumContainedTys;
+#endif
setSubclassData(AddrSpace);
+ // Check for miscompile. PR11652.
+ assert(oldNCT == NumContainedTys && "bitfield written out of bounds?");
}
PointerType *Type::getPointerTo(unsigned addrs) {
diff --git a/contrib/llvm/lib/VMCore/Use.cpp b/contrib/llvm/lib/VMCore/Use.cpp
index 359a151..0128adc 100644
--- a/contrib/llvm/lib/VMCore/Use.cpp
+++ b/contrib/llvm/lib/VMCore/Use.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Value.h"
+#include <new>
namespace llvm {
diff --git a/contrib/llvm/lib/VMCore/User.cpp b/contrib/llvm/lib/VMCore/User.cpp
index f01fa34..5f35ce4 100644
--- a/contrib/llvm/lib/VMCore/User.cpp
+++ b/contrib/llvm/lib/VMCore/User.cpp
@@ -17,6 +17,8 @@ namespace llvm {
// User Class
//===----------------------------------------------------------------------===//
+void User::anchor() {}
+
// replaceUsesOfWith - Replaces all references to the "From" definition with
// references to the "To" definition.
//
diff --git a/contrib/llvm/lib/VMCore/Value.cpp b/contrib/llvm/lib/VMCore/Value.cpp
index 2fa5f08..4006b2c 100644
--- a/contrib/llvm/lib/VMCore/Value.cpp
+++ b/contrib/llvm/lib/VMCore/Value.cpp
@@ -66,7 +66,7 @@ Value::~Value() {
// a <badref>
//
if (!use_empty()) {
- dbgs() << "While deleting: " << *VTy << " %" << getNameStr() << "\n";
+ dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
for (use_iterator I = use_begin(), E = use_end(); I != E; ++I)
dbgs() << "Use still stuck around after Def is destroyed:"
<< **I << "\n";
@@ -76,7 +76,7 @@ Value::~Value() {
// If this value is named, destroy the name. This should not be in a symtab
// at this point.
- if (Name)
+ if (Name && SubclassID != MDStringVal)
Name->Destroy();
// There should be no uses of this object anymore, remove it.
@@ -108,6 +108,19 @@ bool Value::hasNUsesOrMore(unsigned N) const {
/// isUsedInBasicBlock - Return true if this value is used in the specified
/// basic block.
bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
+ // Start by scanning over the instructions looking for a use before we start
+ // the expensive use iteration.
+ unsigned MaxBlockSize = 3;
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ if (std::find(I->op_begin(), I->op_end(), this) != I->op_end())
+ return true;
+ if (MaxBlockSize-- == 0) // If the block is larger fall back to use_iterator
+ break;
+ }
+
+ if (MaxBlockSize != 0) // We scanned the entire block and found no use.
+ return false;
+
for (const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
const Instruction *User = dyn_cast<Instruction>(*I);
if (User && User->getParent() == BB)
@@ -156,11 +169,10 @@ StringRef Value::getName() const {
return Name->getKey();
}
-std::string Value::getNameStr() const {
- return getName().str();
-}
-
void Value::setName(const Twine &NewName) {
+ assert(SubclassID != MDStringVal &&
+ "Cannot set the name of MDString with this method!");
+
// Fast path for common IRBuilder case of setName("") when there is no name.
if (NewName.isTriviallyEmpty() && !hasName())
return;
@@ -219,6 +231,8 @@ void Value::setName(const Twine &NewName) {
/// takeName - transfer the name from V to this value, setting V's name to
/// empty. It is an error to call V->takeName(V).
void Value::takeName(Value *V) {
+ assert(SubclassID != MDStringVal && "Cannot take the name of an MDString!");
+
ValueSymbolTable *ST = 0;
// If this value has a name, drop it.
if (hasName()) {
@@ -308,20 +322,40 @@ void Value::replaceAllUsesWith(Value *New) {
BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
}
-Value *Value::stripPointerCasts() {
- if (!getType()->isPointerTy())
- return this;
+namespace {
+// Various metrics for how much to strip off of pointers.
+enum PointerStripKind {
+ PSK_ZeroIndices,
+ PSK_InBoundsConstantIndices,
+ PSK_InBounds
+};
+
+template <PointerStripKind StripKind>
+static Value *stripPointerCastsAndOffsets(Value *V) {
+ if (!V->getType()->isPointerTy())
+ return V;
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
- Value *V = this;
Visited.insert(V);
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- if (!GEP->hasAllZeroIndices())
- return V;
+ switch (StripKind) {
+ case PSK_ZeroIndices:
+ if (!GEP->hasAllZeroIndices())
+ return V;
+ break;
+ case PSK_InBoundsConstantIndices:
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ // fallthrough
+ case PSK_InBounds:
+ if (!GEP->isInBounds())
+ return V;
+ break;
+ }
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
@@ -337,10 +371,24 @@ Value *Value::stripPointerCasts() {
return V;
}
+} // namespace
+
+Value *Value::stripPointerCasts() {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
+}
+
+Value *Value::stripInBoundsConstantOffsets() {
+ return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
+}
+
+Value *Value::stripInBoundsOffsets() {
+ return stripPointerCastsAndOffsets<PSK_InBounds>(this);
+}
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
-bool Value::isDereferenceablePointer() const {
+static bool isDereferenceablePointer(const Value *V,
+ SmallPtrSet<const Value *, 32> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
// It's also not always safe to follow a bitcast, for example:
@@ -349,20 +397,22 @@ bool Value::isDereferenceablePointer() const {
// be handled using TargetData to check sizes and alignments though.
// These are obviously ok.
- if (isa<AllocaInst>(this)) return true;
+ if (isa<AllocaInst>(V)) return true;
// Global variables which can't collapse to null are ok.
- if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this))
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
return !GV->hasExternalWeakLinkage();
// byval arguments are ok.
- if (const Argument *A = dyn_cast<Argument>(this))
+ if (const Argument *A = dyn_cast<Argument>(V))
return A->hasByValAttr();
-
+
// For GEPs, determine if the indexing lands within the allocated object.
- if (const GEPOperator *GEP = dyn_cast<GEPOperator>(this)) {
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
- if (!GEP->getOperand(0)->isDereferenceablePointer())
+ if (!Visited.insert(GEP->getOperand(0)))
+ return false;
+ if (!isDereferenceablePointer(GEP->getOperand(0), Visited))
return false;
// Check the indices.
gep_type_iterator GTI = gep_type_begin(GEP);
@@ -396,6 +446,13 @@ bool Value::isDereferenceablePointer() const {
return false;
}
+/// isDereferenceablePointer - Test if this value is always a pointer to
+/// allocated and suitably aligned memory for a simple load or store.
+bool Value::isDereferenceablePointer() const {
+ SmallPtrSet<const Value *, 32> Visited;
+ return ::isDereferenceablePointer(this, Visited);
+}
+
/// DoPHITranslation - If this value is a PHI node with CurBB as its parent,
/// return the value in the PHI node corresponding to PredBB. If not, return
/// ourself. This is useful if you want to know the value something has in a
@@ -425,7 +482,7 @@ void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
setPrevPtr(List);
if (Next) {
Next->setPrevPtr(&Next);
- assert(VP == Next->VP && "Added to wrong list?");
+ assert(VP.getPointer() == Next->VP.getPointer() && "Added to wrong list?");
}
}
@@ -441,14 +498,14 @@ void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
/// AddToUseList - Add this ValueHandle to the use list for VP.
void ValueHandleBase::AddToUseList() {
- assert(VP && "Null pointer doesn't have a use list!");
+ assert(VP.getPointer() && "Null pointer doesn't have a use list!");
- LLVMContextImpl *pImpl = VP->getContext().pImpl;
+ LLVMContextImpl *pImpl = VP.getPointer()->getContext().pImpl;
- if (VP->HasValueHandle) {
+ if (VP.getPointer()->HasValueHandle) {
// If this value already has a ValueHandle, then it must be in the
// ValueHandles map already.
- ValueHandleBase *&Entry = pImpl->ValueHandles[VP];
+ ValueHandleBase *&Entry = pImpl->ValueHandles[VP.getPointer()];
assert(Entry != 0 && "Value doesn't have any handles?");
AddToExistingUseList(&Entry);
return;
@@ -462,10 +519,10 @@ void ValueHandleBase::AddToUseList() {
DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
- ValueHandleBase *&Entry = Handles[VP];
+ ValueHandleBase *&Entry = Handles[VP.getPointer()];
assert(Entry == 0 && "Value really did already have handles?");
AddToExistingUseList(&Entry);
- VP->HasValueHandle = true;
+ VP.getPointer()->HasValueHandle = true;
// If reallocation didn't happen or if this was the first insertion, don't
// walk the table.
@@ -477,14 +534,16 @@ void ValueHandleBase::AddToUseList() {
// Okay, reallocation did happen. Fix the Prev Pointers.
for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
E = Handles.end(); I != E; ++I) {
- assert(I->second && I->first == I->second->VP && "List invariant broken!");
+ assert(I->second && I->first == I->second->VP.getPointer() &&
+ "List invariant broken!");
I->second->setPrevPtr(&I->second);
}
}
/// RemoveFromUseList - Remove this ValueHandle from its current use list.
void ValueHandleBase::RemoveFromUseList() {
- assert(VP && VP->HasValueHandle && "Pointer doesn't have a use list!");
+ assert(VP.getPointer() && VP.getPointer()->HasValueHandle &&
+ "Pointer doesn't have a use list!");
// Unlink this from its use list.
ValueHandleBase **PrevPtr = getPrevPtr();
@@ -500,11 +559,11 @@ void ValueHandleBase::RemoveFromUseList() {
// If the Next pointer was null, then it is possible that this was the last
// ValueHandle watching VP. If so, delete its entry from the ValueHandles
// map.
- LLVMContextImpl *pImpl = VP->getContext().pImpl;
+ LLVMContextImpl *pImpl = VP.getPointer()->getContext().pImpl;
DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
- Handles.erase(VP);
- VP->HasValueHandle = false;
+ Handles.erase(VP.getPointer());
+ VP.getPointer()->HasValueHandle = false;
}
}
@@ -554,7 +613,7 @@ void ValueHandleBase::ValueIsDeleted(Value *V) {
// All callbacks, weak references, and assertingVHs should be dropped by now.
if (V->HasValueHandle) {
#ifndef NDEBUG // Only in +Asserts mode...
- dbgs() << "While deleting: " << *V->getType() << " %" << V->getNameStr()
+ dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
<< "\n";
if (pImpl->ValueHandles[V]->getKind() == Assert)
llvm_unreachable("An asserting value handle still pointed to this"
@@ -617,8 +676,8 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
case Tracking:
case Weak:
dbgs() << "After RAUW from " << *Old->getType() << " %"
- << Old->getNameStr() << " to " << *New->getType() << " %"
- << New->getNameStr() << "\n";
+ << Old->getName() << " to " << *New->getType() << " %"
+ << New->getName() << "\n";
llvm_unreachable("A tracking or weak value handle still pointed to the"
" old value!\n");
default:
diff --git a/contrib/llvm/lib/VMCore/ValueTypes.cpp b/contrib/llvm/lib/VMCore/ValueTypes.cpp
index e13bd7d..9a8e185 100644
--- a/contrib/llvm/lib/VMCore/ValueTypes.cpp
+++ b/contrib/llvm/lib/VMCore/ValueTypes.cpp
@@ -87,8 +87,7 @@ unsigned EVT::getExtendedSizeInBits() const {
return ITy->getBitWidth();
if (VectorType *VTy = dyn_cast<VectorType>(LLVMTy))
return VTy->getBitWidth();
- assert(false && "Unrecognized extended type!");
- return 0; // Suppress warnings.
+ llvm_unreachable("Unrecognized extended type!");
}
/// getEVTString - This function returns value type as a string, e.g. "i32".
@@ -101,13 +100,13 @@ std::string EVT::getEVTString() const {
if (isInteger())
return "i" + utostr(getSizeInBits());
llvm_unreachable("Invalid EVT!");
- return "?";
case MVT::i1: return "i1";
case MVT::i8: return "i8";
case MVT::i16: return "i16";
case MVT::i32: return "i32";
case MVT::i64: return "i64";
case MVT::i128: return "i128";
+ case MVT::f16: return "f16";
case MVT::f32: return "f32";
case MVT::f64: return "f64";
case MVT::f80: return "f80";
@@ -134,12 +133,13 @@ std::string EVT::getEVTString() const {
case MVT::v4i64: return "v4i64";
case MVT::v8i64: return "v8i64";
case MVT::v2f32: return "v2f32";
+ case MVT::v2f16: return "v2f16";
case MVT::v4f32: return "v4f32";
case MVT::v8f32: return "v8f32";
case MVT::v2f64: return "v2f64";
case MVT::v4f64: return "v4f64";
case MVT::Metadata:return "Metadata";
- case MVT::untyped: return "untyped";
+ case MVT::Untyped: return "Untyped";
}
}
@@ -158,6 +158,7 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::i32: return Type::getInt32Ty(Context);
case MVT::i64: return Type::getInt64Ty(Context);
case MVT::i128: return IntegerType::get(Context, 128);
+ case MVT::f16: return Type::getHalfTy(Context);
case MVT::f32: return Type::getFloatTy(Context);
case MVT::f64: return Type::getDoubleTy(Context);
case MVT::f80: return Type::getX86_FP80Ty(Context);
@@ -180,6 +181,7 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2);
case MVT::v4i64: return VectorType::get(Type::getInt64Ty(Context), 4);
case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8);
+ case MVT::v2f16: return VectorType::get(Type::getHalfTy(Context), 2);
case MVT::v2f32: return VectorType::get(Type::getFloatTy(Context), 2);
case MVT::v4f32: return VectorType::get(Type::getFloatTy(Context), 4);
case MVT::v8f32: return VectorType::get(Type::getFloatTy(Context), 8);
@@ -197,11 +199,11 @@ EVT EVT::getEVT(Type *Ty, bool HandleUnknown){
default:
if (HandleUnknown) return MVT(MVT::Other);
llvm_unreachable("Unknown type!");
- return MVT::isVoid;
case Type::VoidTyID:
return MVT::isVoid;
case Type::IntegerTyID:
return getIntegerVT(Ty->getContext(), cast<IntegerType>(Ty)->getBitWidth());
+ case Type::HalfTyID: return MVT(MVT::f16);
case Type::FloatTyID: return MVT(MVT::f32);
case Type::DoubleTyID: return MVT(MVT::f64);
case Type::X86_FP80TyID: return MVT(MVT::f80);
diff --git a/contrib/llvm/lib/VMCore/Verifier.cpp b/contrib/llvm/lib/VMCore/Verifier.cpp
index 9564b7d..96492e4 100644
--- a/contrib/llvm/lib/VMCore/Verifier.cpp
+++ b/contrib/llvm/lib/VMCore/Verifier.cpp
@@ -1,4 +1,4 @@
-//===-- Verifier.cpp - Implement the Module Verifier -------------*- C++ -*-==//
+//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
//
// The LLVM Compiler Infrastructure
//
@@ -51,6 +51,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/InlineAsm.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
@@ -117,7 +118,6 @@ namespace {
struct Verifier : public FunctionPass, public InstVisitor<Verifier> {
static char ID; // Pass ID, replacement for typeid
bool Broken; // Is this module found to be broken?
- bool RealPass; // Are we not being run by a PassManager?
VerifierFailureAction action;
// What to do if verification fails.
Module *Mod; // Module we are verifying right now
@@ -143,13 +143,13 @@ namespace {
const Value *PersonalityFn;
Verifier()
- : FunctionPass(ID), Broken(false), RealPass(true),
+ : FunctionPass(ID), Broken(false),
action(AbortProcessAction), Mod(0), Context(0), DT(0),
MessagesStr(Messages), PersonalityFn(0) {
initializeVerifierPass(*PassRegistry::getPassRegistry());
}
explicit Verifier(VerifierFailureAction ctn)
- : FunctionPass(ID), Broken(false), RealPass(true), action(ctn), Mod(0),
+ : FunctionPass(ID), Broken(false), action(ctn), Mod(0),
Context(0), DT(0), MessagesStr(Messages), PersonalityFn(0) {
initializeVerifierPass(*PassRegistry::getPassRegistry());
}
@@ -158,17 +158,14 @@ namespace {
Mod = &M;
Context = &M.getContext();
- // If this is a real pass, in a pass manager, we must abort before
- // returning back to the pass manager, or else the pass manager may try to
- // run other passes on the broken module.
- if (RealPass)
- return abortIfBroken();
- return false;
+ // We must abort before returning back to the pass manager, or else the
+ // pass manager may try to run other passes on the broken module.
+ return abortIfBroken();
}
bool runOnFunction(Function &F) {
// Get dominator information if we are being run by PassManager
- if (RealPass) DT = &getAnalysis<DominatorTree>();
+ DT = &getAnalysis<DominatorTree>();
Mod = F.getParent();
if (!Context) Context = &F.getContext();
@@ -177,13 +174,9 @@ namespace {
InstsInThisBlock.clear();
PersonalityFn = 0;
- // If this is a real pass, in a pass manager, we must abort before
- // returning back to the pass manager, or else the pass manager may try to
- // run other passes on the broken module.
- if (RealPass)
- return abortIfBroken();
-
- return false;
+ // We must abort before returning back to the pass manager, or else the
+ // pass manager may try to run other passes on the broken module.
+ return abortIfBroken();
}
bool doFinalization(Module &M) {
@@ -214,8 +207,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredID(PreVerifyID);
- if (RealPass)
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTree>();
}
/// abortIfBroken - If the module is broken and we are supposed to abort on
@@ -225,7 +217,6 @@ namespace {
if (!Broken) return false;
MessagesStr << "Broken module found, ";
switch (action) {
- default: llvm_unreachable("Unknown action");
case AbortProcessAction:
MessagesStr << "compilation aborted!\n";
dbgs() << MessagesStr.str();
@@ -239,6 +230,7 @@ namespace {
MessagesStr << "compilation terminated.\n";
return true;
}
+ llvm_unreachable("Invalid action");
}
@@ -279,6 +271,7 @@ namespace {
void visitGetElementPtrInst(GetElementPtrInst &GEP);
void visitLoadInst(LoadInst &LI);
void visitStoreInst(StoreInst &SI);
+ void verifyDominatesUse(Instruction &I, unsigned i);
void visitInstruction(Instruction &I);
void visitTerminatorInst(TerminatorInst &I);
void visitBranchInst(BranchInst &BI);
@@ -547,7 +540,7 @@ void Verifier::VerifyParameterAttrs(Attributes Attrs, Type *Ty,
for (unsigned i = 0;
i < array_lengthof(Attribute::MutuallyIncompatible); ++i) {
Attributes MutI = Attrs & Attribute::MutuallyIncompatible[i];
- Assert1(!(MutI & (MutI - 1)), "Attributes " +
+ Assert1(MutI.isEmptyOrSingleton(), "Attributes " +
Attribute::getAsString(MutI) + " are incompatible!", V);
}
@@ -607,7 +600,7 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT,
for (unsigned i = 0;
i < array_lengthof(Attribute::MutuallyIncompatible); ++i) {
Attributes MutI = FAttrs & Attribute::MutuallyIncompatible[i];
- Assert1(!(MutI & (MutI - 1)), "Attributes " +
+ Assert1(MutI.isEmptyOrSingleton(), "Attributes " +
Attribute::getAsString(MutI) + " are incompatible!", V);
}
}
@@ -812,11 +805,11 @@ void Verifier::visitSwitchInst(SwitchInst &SI) {
// have the same type as the switched-on value.
Type *SwitchTy = SI.getCondition()->getType();
SmallPtrSet<ConstantInt*, 32> Constants;
- for (unsigned i = 1, e = SI.getNumCases(); i != e; ++i) {
- Assert1(SI.getCaseValue(i)->getType() == SwitchTy,
+ for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) {
+ Assert1(i.getCaseValue()->getType() == SwitchTy,
"Switch constants must all be same type as switch value!", &SI);
- Assert2(Constants.insert(SI.getCaseValue(i)),
- "Duplicate integer as switch case", &SI, SI.getCaseValue(i));
+ Assert2(Constants.insert(i.getCaseValue()),
+ "Duplicate integer as switch case", &SI, i.getCaseValue());
}
visitTerminatorInst(SI);
@@ -1035,8 +1028,19 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert1(SrcTy->isPointerTy(), "PtrToInt source must be pointer", &I);
- Assert1(DestTy->isIntegerTy(), "PtrToInt result must be integral", &I);
+ Assert1(SrcTy->getScalarType()->isPointerTy(),
+ "PtrToInt source must be pointer", &I);
+ Assert1(DestTy->getScalarType()->isIntegerTy(),
+ "PtrToInt result must be integral", &I);
+ Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "PtrToInt type mismatch", &I);
+
+ if (SrcTy->isVectorTy()) {
+ VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
+ VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ Assert1(VSrc->getNumElements() == VDest->getNumElements(),
+ "PtrToInt Vector width mismatch", &I);
+ }
visitInstruction(I);
}
@@ -1046,9 +1050,18 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert1(SrcTy->isIntegerTy(), "IntToPtr source must be an integral", &I);
- Assert1(DestTy->isPointerTy(), "IntToPtr result must be a pointer",&I);
-
+ Assert1(SrcTy->getScalarType()->isIntegerTy(),
+ "IntToPtr source must be an integral", &I);
+ Assert1(DestTy->getScalarType()->isPointerTy(),
+ "IntToPtr result must be a pointer",&I);
+ Assert1(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "IntToPtr type mismatch", &I);
+ if (SrcTy->isVectorTy()) {
+ VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
+ VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ Assert1(VSrc->getNumElements() == VDest->getNumElements(),
+ "IntToPtr Vector width mismatch", &I);
+ }
visitInstruction(I);
}
@@ -1245,7 +1258,7 @@ void Verifier::visitICmpInst(ICmpInst &IC) {
Assert1(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
- Assert1(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPointerTy(),
+ Assert1(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
"Invalid operand types for ICmp instruction", &IC);
// Check that the predicate is valid.
Assert1(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
@@ -1295,17 +1308,41 @@ void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
}
void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
- Assert1(cast<PointerType>(GEP.getOperand(0)->getType())
- ->getElementType()->isSized(),
+ Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
+
+ Assert1(isa<PointerType>(TargetTy),
+ "GEP base pointer is not a vector or a vector of pointers", &GEP);
+ Assert1(cast<PointerType>(TargetTy)->getElementType()->isSized(),
"GEP into unsized type!", &GEP);
-
+
SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
Type *ElTy =
- GetElementPtrInst::getIndexedType(GEP.getOperand(0)->getType(), Idxs);
+ GetElementPtrInst::getIndexedType(GEP.getPointerOperandType(), Idxs);
Assert1(ElTy, "Invalid indices for GEP pointer type!", &GEP);
- Assert2(GEP.getType()->isPointerTy() &&
- cast<PointerType>(GEP.getType())->getElementType() == ElTy,
- "GEP is not of right type for indices!", &GEP, ElTy);
+
+ if (GEP.getPointerOperandType()->isPointerTy()) {
+ // Validate GEPs with scalar indices.
+ Assert2(GEP.getType()->isPointerTy() &&
+ cast<PointerType>(GEP.getType())->getElementType() == ElTy,
+ "GEP is not of right type for indices!", &GEP, ElTy);
+ } else {
+ // Validate GEPs with a vector index.
+ Assert1(Idxs.size() == 1, "Invalid number of indices!", &GEP);
+ Value *Index = Idxs[0];
+ Type *IndexTy = Index->getType();
+ Assert1(IndexTy->isVectorTy(),
+ "Vector GEP must have vector indices!", &GEP);
+ Assert1(GEP.getType()->isVectorTy(),
+ "Vector GEP must return a vector value", &GEP);
+ Type *ElemPtr = cast<VectorType>(GEP.getType())->getElementType();
+ Assert1(ElemPtr->isPointerTy(),
+ "Vector GEP pointer operand is not a pointer!", &GEP);
+ unsigned IndexWidth = cast<VectorType>(IndexTy)->getNumElements();
+ unsigned GepWidth = cast<VectorType>(GEP.getType())->getNumElements();
+ Assert1(IndexWidth == GepWidth, "Invalid GEP index vector width", &GEP);
+ Assert1(ElTy == cast<PointerType>(ElemPtr)->getElementType(),
+ "Vector GEP type does not match pointer type!", &GEP);
+ }
visitInstruction(GEP);
}
@@ -1324,6 +1361,25 @@ void Verifier::visitLoadInst(LoadInst &LI) {
Assert1(LI.getSynchScope() == CrossThread,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
+
+ if (MDNode *Range = LI.getMetadata(LLVMContext::MD_range)) {
+ unsigned NumOperands = Range->getNumOperands();
+ Assert1(NumOperands % 2 == 0, "Unfinished range!", Range);
+ unsigned NumRanges = NumOperands / 2;
+ Assert1(NumRanges >= 1, "It should have at least one range!", Range);
+ for (unsigned i = 0; i < NumRanges; ++i) {
+ ConstantInt *Low = dyn_cast<ConstantInt>(Range->getOperand(2*i));
+ Assert1(Low, "The lower limit must be an integer!", Low);
+ ConstantInt *High = dyn_cast<ConstantInt>(Range->getOperand(2*i + 1));
+ Assert1(High, "The upper limit must be an integer!", High);
+ Assert1(High->getType() == Low->getType() &&
+ High->getType() == ElTy, "Range types must match load type!",
+ &LI);
+ Assert1(High->getValue() != Low->getValue(), "Range must not be empty!",
+ Range);
+ }
+ }
+
visitInstruction(LI);
}
@@ -1468,6 +1524,58 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
visitInstruction(LPI);
}
+void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
+ Instruction *Op = cast<Instruction>(I.getOperand(i));
+ BasicBlock *BB = I.getParent();
+ BasicBlock *OpBlock = Op->getParent();
+ PHINode *PN = dyn_cast<PHINode>(&I);
+
+ // DT can handle non phi instructions for us.
+ if (!PN) {
+ // Definition must dominate use unless use is unreachable!
+ Assert2(InstsInThisBlock.count(Op) || !DT->isReachableFromEntry(BB) ||
+ DT->dominates(Op, &I),
+ "Instruction does not dominate all uses!", Op, &I);
+ return;
+ }
+
+ // Check that a definition dominates all of its uses.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
+ // Invoke results are only usable in the normal destination, not in the
+ // exceptional destination.
+ BasicBlock *NormalDest = II->getNormalDest();
+
+
+ // PHI nodes differ from other nodes because they actually "use" the
+ // value in the predecessor basic blocks they correspond to.
+ BasicBlock *UseBlock = BB;
+ unsigned j = PHINode::getIncomingValueNumForOperand(i);
+ UseBlock = PN->getIncomingBlock(j);
+ Assert2(UseBlock, "Invoke operand is PHI node with bad incoming-BB",
+ Op, &I);
+
+ if (UseBlock == OpBlock) {
+ // Special case of a phi node in the normal destination or the unwind
+ // destination.
+ Assert2(BB == NormalDest || !DT->isReachableFromEntry(UseBlock),
+ "Invoke result not available in the unwind destination!",
+ Op, &I);
+ } else {
+ Assert2(DT->dominates(II, UseBlock) ||
+ !DT->isReachableFromEntry(UseBlock),
+ "Invoke result does not dominate all uses!", Op, &I);
+ }
+ }
+
+ // PHI nodes are more difficult than other nodes because they actually
+ // "use" the value in the predecessor basic blocks they correspond to.
+ unsigned j = PHINode::getIncomingValueNumForOperand(i);
+ BasicBlock *PredBB = PN->getIncomingBlock(j);
+ Assert2(PredBB && (DT->dominates(OpBlock, PredBB) ||
+ !DT->isReachableFromEntry(PredBB)),
+ "Instruction does not dominate all uses!", Op, &I);
+}
+
/// verifyInstruction - Verify that an instruction is well formed.
///
void Verifier::visitInstruction(Instruction &I) {
@@ -1536,84 +1644,30 @@ void Verifier::visitInstruction(Instruction &I) {
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
Assert1(GV->getParent() == Mod, "Referencing global in another module!",
&I);
- } else if (Instruction *Op = dyn_cast<Instruction>(I.getOperand(i))) {
- BasicBlock *OpBlock = Op->getParent();
-
- // Check that a definition dominates all of its uses.
- if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
- // Invoke results are only usable in the normal destination, not in the
- // exceptional destination.
- BasicBlock *NormalDest = II->getNormalDest();
-
- Assert2(NormalDest != II->getUnwindDest(),
- "No uses of invoke possible due to dominance structure!",
- Op, &I);
-
- // PHI nodes differ from other nodes because they actually "use" the
- // value in the predecessor basic blocks they correspond to.
- BasicBlock *UseBlock = BB;
- if (PHINode *PN = dyn_cast<PHINode>(&I)) {
- unsigned j = PHINode::getIncomingValueNumForOperand(i);
- UseBlock = PN->getIncomingBlock(j);
- }
- Assert2(UseBlock, "Invoke operand is PHI node with bad incoming-BB",
- Op, &I);
-
- if (isa<PHINode>(I) && UseBlock == OpBlock) {
- // Special case of a phi node in the normal destination or the unwind
- // destination.
- Assert2(BB == NormalDest || !DT->isReachableFromEntry(UseBlock),
- "Invoke result not available in the unwind destination!",
- Op, &I);
- } else {
- Assert2(DT->dominates(NormalDest, UseBlock) ||
- !DT->isReachableFromEntry(UseBlock),
- "Invoke result does not dominate all uses!", Op, &I);
-
- // If the normal successor of an invoke instruction has multiple
- // predecessors, then the normal edge from the invoke is critical,
- // so the invoke value can only be live if the destination block
- // dominates all of it's predecessors (other than the invoke).
- if (!NormalDest->getSinglePredecessor() &&
- DT->isReachableFromEntry(UseBlock))
- // If it is used by something non-phi, then the other case is that
- // 'NormalDest' dominates all of its predecessors other than the
- // invoke. In this case, the invoke value can still be used.
- for (pred_iterator PI = pred_begin(NormalDest),
- E = pred_end(NormalDest); PI != E; ++PI)
- if (*PI != II->getParent() && !DT->dominates(NormalDest, *PI) &&
- DT->isReachableFromEntry(*PI)) {
- CheckFailed("Invoke result does not dominate all uses!", Op,&I);
- return;
- }
- }
- } else if (PHINode *PN = dyn_cast<PHINode>(&I)) {
- // PHI nodes are more difficult than other nodes because they actually
- // "use" the value in the predecessor basic blocks they correspond to.
- unsigned j = PHINode::getIncomingValueNumForOperand(i);
- BasicBlock *PredBB = PN->getIncomingBlock(j);
- Assert2(PredBB && (DT->dominates(OpBlock, PredBB) ||
- !DT->isReachableFromEntry(PredBB)),
- "Instruction does not dominate all uses!", Op, &I);
- } else {
- if (OpBlock == BB) {
- // If they are in the same basic block, make sure that the definition
- // comes before the use.
- Assert2(InstsInThisBlock.count(Op) || !DT->isReachableFromEntry(BB),
- "Instruction does not dominate all uses!", Op, &I);
- }
-
- // Definition must dominate use unless use is unreachable!
- Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, &I) ||
- !DT->isReachableFromEntry(BB),
- "Instruction does not dominate all uses!", Op, &I);
- }
+ } else if (isa<Instruction>(I.getOperand(i))) {
+ verifyDominatesUse(I, i);
} else if (isa<InlineAsm>(I.getOperand(i))) {
Assert1((i + 1 == e && isa<CallInst>(I)) ||
(i + 3 == e && isa<InvokeInst>(I)),
"Cannot take the address of an inline asm!", &I);
}
}
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpaccuracy)) {
+ Assert1(I.getType()->isFPOrFPVectorTy(),
+ "fpaccuracy requires a floating point result!", &I);
+ Assert1(MD->getNumOperands() == 1, "fpaccuracy takes one operand!", &I);
+ ConstantFP *Op = dyn_cast_or_null<ConstantFP>(MD->getOperand(0));
+ Assert1(Op, "fpaccuracy ULPs not a floating point number!", &I);
+ APFloat ULPs = Op->getValueAPF();
+ Assert1(ULPs.isNormal() || ULPs.isZero(),
+ "fpaccuracy ULPs not a normal number!", &I);
+ Assert1(!ULPs.isNegative(), "fpaccuracy ULPs is negative!", &I);
+ }
+
+ MDNode *MD = I.getMetadata(LLVMContext::MD_range);
+ Assert1(!MD || isa<LoadInst>(I), "Ranges are only for loads!", &I);
+
InstsInThisBlock.insert(&I);
}
@@ -1642,6 +1696,12 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
switch (ID) {
default:
break;
+ case Intrinsic::ctlz: // llvm.ctlz
+ case Intrinsic::cttz: // llvm.cttz
+ Assert1(isa<ConstantInt>(CI.getArgOperand(1)),
+ "is_zero_undef argument of bit counting intrinsics must be a "
+ "constant int", &CI);
+ break;
case Intrinsic::dbg_declare: { // llvm.dbg.declare
Assert1(CI.getArgOperand(0) && isa<MDNode>(CI.getArgOperand(0)),
"invalid llvm.dbg.declare intrinsic call 1", &CI);
diff --git a/contrib/llvm/tools/bugpoint/BugDriver.cpp b/contrib/llvm/tools/bugpoint/BugDriver.cpp
index 677d178..6b219bf 100644
--- a/contrib/llvm/tools/bugpoint/BugDriver.cpp
+++ b/contrib/llvm/tools/bugpoint/BugDriver.cpp
@@ -87,7 +87,7 @@ Module *llvm::ParseInputFile(const std::string &Filename,
SMDiagnostic Err;
Module *Result = ParseIRFile(Filename, Err, Ctxt);
if (!Result)
- Err.Print("bugpoint", errs());
+ Err.print("bugpoint", errs());
// If we don't have an override triple, use the first one to configure
// bugpoint, or use the host triple if none provided.
@@ -96,7 +96,7 @@ Module *llvm::ParseInputFile(const std::string &Filename,
Triple TheTriple(Result->getTargetTriple());
if (TheTriple.getTriple().empty())
- TheTriple.setTriple(sys::getHostTriple());
+ TheTriple.setTriple(sys::getDefaultTargetTriple());
TargetTriple.setTriple(TheTriple.getTriple());
}
diff --git a/contrib/llvm/tools/bugpoint/CMakeLists.txt b/contrib/llvm/tools/bugpoint/CMakeLists.txt
deleted file mode 100644
index e06feb1..0000000
--- a/contrib/llvm/tools/bugpoint/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-set(LLVM_LINK_COMPONENTS asmparser instrumentation scalaropts ipo
- linker bitreader bitwriter)
-
-add_llvm_tool(bugpoint
- BugDriver.cpp
- CrashDebugger.cpp
- ExecutionDriver.cpp
- ExtractFunction.cpp
- FindBugs.cpp
- Miscompilation.cpp
- OptimizerDriver.cpp
- ToolRunner.cpp
- bugpoint.cpp
- )
diff --git a/contrib/llvm/tools/bugpoint/CrashDebugger.cpp b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
index f19ef62..aed16f4 100644
--- a/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
+++ b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
@@ -169,7 +169,7 @@ ReduceCrashingGlobalVariables::TestGlobalVariables(
return false;
}
-namespace llvm {
+namespace {
/// ReduceCrashingFunctions reducer - This works by removing functions and
/// seeing if the program still crashes. If it does, then keep the newer,
/// smaller program.
@@ -401,7 +401,8 @@ bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*>
for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE; ++FI)
for (BasicBlock::iterator I = FI->begin(), E = FI->end(); I != E;) {
Instruction *Inst = I++;
- if (!Instructions.count(Inst) && !isa<TerminatorInst>(Inst)) {
+ if (!Instructions.count(Inst) && !isa<TerminatorInst>(Inst) &&
+ !isa<LandingPadInst>(Inst)) {
if (!Inst->getType()->isVoidTy())
Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
Inst->eraseFromParent();
@@ -568,12 +569,15 @@ static bool DebugACrash(BugDriver &BD,
for (Function::const_iterator BI = FI->begin(), E = FI->end(); BI != E;
++BI)
for (BasicBlock::const_iterator I = BI->begin(), E = --BI->end();
- I != E; ++I, ++CurInstructionNum)
+ I != E; ++I, ++CurInstructionNum) {
if (InstructionsToSkipBeforeDeleting) {
--InstructionsToSkipBeforeDeleting;
} else {
if (BugpointIsInterrupted) goto ExitLoops;
+ if (isa<LandingPadInst>(I))
+ continue;
+
outs() << "Checking instruction: " << *I;
Module *M = BD.deleteInstructionFromProgram(I, Simplification);
@@ -590,6 +594,7 @@ static bool DebugACrash(BugDriver &BD,
// one.
delete M;
}
+ }
if (InstructionsToSkipBeforeDeleting) {
InstructionsToSkipBeforeDeleting = 0;
diff --git a/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp b/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp
index 77c01ac..218a559 100644
--- a/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp
+++ b/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp
@@ -28,8 +28,7 @@ namespace {
// for miscompilation.
//
enum OutputType {
- AutoPick, RunLLI, RunJIT, RunLLC, RunLLCIA, RunCBE, CBE_bug, LLC_Safe,
- CompileCustom, Custom
+ AutoPick, RunLLI, RunJIT, RunLLC, RunLLCIA, LLC_Safe, CompileCustom, Custom
};
cl::opt<double>
@@ -48,8 +47,6 @@ namespace {
clEnumValN(RunLLC, "run-llc", "Compile with LLC"),
clEnumValN(RunLLCIA, "run-llc-ia",
"Compile with LLC with integrated assembler"),
- clEnumValN(RunCBE, "run-cbe", "Compile with CBE"),
- clEnumValN(CBE_bug,"cbe-bug", "Find CBE bugs"),
clEnumValN(LLC_Safe, "llc-safe", "Use LLC for all"),
clEnumValN(CompileCustom, "compile-custom",
"Use -compile-command to define a command to "
@@ -64,7 +61,6 @@ namespace {
SafeInterpreterSel(cl::desc("Specify \"safe\" i.e. known-good backend:"),
cl::values(clEnumValN(AutoPick, "safe-auto", "Use best guess"),
clEnumValN(RunLLC, "safe-run-llc", "Compile with LLC"),
- clEnumValN(RunCBE, "safe-run-cbe", "Compile with CBE"),
clEnumValN(Custom, "safe-run-custom",
"Use -exec-command to define a command to execute "
"the bitcode. Useful for cross-compilation."),
@@ -154,10 +150,6 @@ bool BugDriver::initializeExecutionEnvironment() {
switch (InterpreterSel) {
case AutoPick:
- InterpreterSel = RunCBE;
- Interpreter =
- AbstractInterpreter::createCBE(getToolName(), Message, GCCBinary,
- &ToolArgv, &GCCToolArgv);
if (!Interpreter) {
InterpreterSel = RunJIT;
Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
@@ -195,12 +187,6 @@ bool BugDriver::initializeExecutionEnvironment() {
Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
&ToolArgv);
break;
- case RunCBE:
- case CBE_bug:
- Interpreter = AbstractInterpreter::createCBE(getToolName(), Message,
- GCCBinary, &ToolArgv,
- &GCCToolArgv);
- break;
case CompileCustom:
Interpreter =
AbstractInterpreter::createCustomCompiler(Message, CustomCompileCommand);
@@ -209,9 +195,6 @@ bool BugDriver::initializeExecutionEnvironment() {
Interpreter =
AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
break;
- default:
- Message = "Sorry, this back-end is not supported by bugpoint right now!\n";
- break;
}
if (!Interpreter)
errs() << Message;
@@ -224,17 +207,6 @@ bool BugDriver::initializeExecutionEnvironment() {
std::vector<std::string> SafeToolArgs = SafeToolArgv;
switch (SafeInterpreterSel) {
case AutoPick:
- // In "cbe-bug" mode, default to using LLC as the "safe" backend.
- if (!SafeInterpreter &&
- InterpreterSel == CBE_bug) {
- SafeInterpreterSel = RunLLC;
- SafeToolArgs.push_back("--relocation-model=pic");
- SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
- GCCBinary,
- &SafeToolArgs,
- &GCCToolArgv);
- }
-
// In "llc-safe" mode, default to using LLC as the "safe" backend.
if (!SafeInterpreter &&
InterpreterSel == LLC_Safe) {
@@ -246,17 +218,6 @@ bool BugDriver::initializeExecutionEnvironment() {
&GCCToolArgv);
}
- // Pick a backend that's different from the test backend. The JIT and
- // LLC backends share a lot of code, so prefer to use the CBE as the
- // safe back-end when testing them.
- if (!SafeInterpreter &&
- InterpreterSel != RunCBE) {
- SafeInterpreterSel = RunCBE;
- SafeInterpreter = AbstractInterpreter::createCBE(Path.c_str(), Message,
- GCCBinary,
- &SafeToolArgs,
- &GCCToolArgv);
- }
if (!SafeInterpreter &&
InterpreterSel != RunLLC &&
InterpreterSel != RunJIT) {
@@ -280,11 +241,6 @@ bool BugDriver::initializeExecutionEnvironment() {
&GCCToolArgv,
SafeInterpreterSel == RunLLCIA);
break;
- case RunCBE:
- SafeInterpreter = AbstractInterpreter::createCBE(Path.c_str(), Message,
- GCCBinary, &SafeToolArgs,
- &GCCToolArgv);
- break;
case Custom:
SafeInterpreter =
AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
@@ -462,8 +418,8 @@ bool BugDriver::createReferenceFile(Module *M, const std::string &Filename) {
errs() << Error;
if (Interpreter != SafeInterpreter) {
errs() << "*** There is a bug running the \"safe\" backend. Either"
- << " debug it (for example with the -run-cbe bugpoint option,"
- << " if CBE is being used as the \"safe\" backend), or fix the"
+ << " debug it (for example with the -run-jit bugpoint option,"
+ << " if JIT is being used as the \"safe\" backend), or fix the"
<< " error some other way.\n";
}
return false;
diff --git a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
index 73b65ca..ac8e159 100644
--- a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
+++ b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
@@ -47,7 +47,39 @@ namespace {
cl::opt<bool, true>
NoSCFG("disable-simplifycfg", cl::location(DisableSimplifyCFG),
cl::desc("Do not use the -simplifycfg pass to reduce testcases"));
-}
+
+ Function* globalInitUsesExternalBA(GlobalVariable* GV) {
+ if (!GV->hasInitializer())
+ return 0;
+
+ Constant *I = GV->getInitializer();
+
+ // walk the values used by the initializer
+ // (and recurse into things like ConstantExpr)
+ std::vector<Constant*> Todo;
+ std::set<Constant*> Done;
+ Todo.push_back(I);
+
+ while (!Todo.empty()) {
+ Constant* V = Todo.back();
+ Todo.pop_back();
+ Done.insert(V);
+
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(V)) {
+ Function *F = BA->getFunction();
+ if (F->isDeclaration())
+ return F;
+ }
+
+ for (User::op_iterator i = V->op_begin(), e = V->op_end(); i != e; ++i) {
+ Constant *C = dyn_cast<Constant>(*i);
+ if (C && !isa<GlobalValue>(C) && !Done.count(C))
+ Todo.push_back(C);
+ }
+ }
+ return 0;
+ }
+} // end anonymous namespace
/// deleteInstructionFromProgram - This method clones the current Program and
/// deletes the specified instruction from the cloned module. It then runs a
@@ -272,11 +304,6 @@ llvm::SplitFunctionsOutOfModule(Module *M,
ValueToValueMapTy NewVMap;
Module *New = CloneModule(M, NewVMap);
- // Make sure global initializers exist only in the safe module (CBE->.so)
- for (Module::global_iterator I = New->global_begin(), E = New->global_end();
- I != E; ++I)
- I->setInitializer(0); // Delete the initializer to make it external
-
// Remove the Test functions from the Safe module
std::set<Function *> TestFunctions;
for (unsigned i = 0, e = F.size(); i != e; ++i) {
@@ -295,6 +322,27 @@ llvm::SplitFunctionsOutOfModule(Module *M,
DeleteFunctionBody(I);
+ // Try to split the global initializers evenly
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I) {
+ GlobalVariable *GV = cast<GlobalVariable>(NewVMap[I]);
+ if (Function *TestFn = globalInitUsesExternalBA(I)) {
+ if (Function *SafeFn = globalInitUsesExternalBA(GV)) {
+ errs() << "*** Error: when reducing functions, encountered "
+ "the global '";
+ WriteAsOperand(errs(), GV, false);
+ errs() << "' with an initializer that references blockaddresses "
+ "from safe function '" << SafeFn->getName()
+ << "' and from test function '" << TestFn->getName() << "'.\n";
+ exit(1);
+ }
+ I->setInitializer(0); // Delete the initializer to make it external
+ } else {
+ // If we keep it in the safe module, then delete it in the test module
+ GV->setInitializer(0);
+ }
+ }
+
// Make sure that there is a global ctor/dtor array in both halves of the
// module if they both have static ctor/dtor functions.
SplitStaticCtorDtor("llvm.global_ctors", M, New, NewVMap);
@@ -340,7 +388,7 @@ Module *BugDriver::ExtractMappedBlocksFromModule(const
// If the BB doesn't have a name, give it one so we have something to key
// off of.
if (!BB->hasName()) BB->setName("tmpbb");
- BlocksToNotExtractFile.os() << BB->getParent()->getNameStr() << " "
+ BlocksToNotExtractFile.os() << BB->getParent()->getName() << " "
<< BB->getName() << "\n";
}
BlocksToNotExtractFile.os().close();
diff --git a/contrib/llvm/tools/bugpoint/Makefile b/contrib/llvm/tools/bugpoint/Makefile
deleted file mode 100644
index 5d287ef..0000000
--- a/contrib/llvm/tools/bugpoint/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- tools/bugpoint/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = bugpoint
-
-LINK_COMPONENTS := asmparser instrumentation scalaropts ipo \
- linker bitreader bitwriter
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/bugpoint/Miscompilation.cpp b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
index 7ff16db..82a3a86 100644
--- a/contrib/llvm/tools/bugpoint/Miscompilation.cpp
+++ b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
@@ -820,7 +820,8 @@ static void CleanupAndPrepareModules(BugDriver &BD, Module *&Test,
// Don't forward functions which are external in the test module too.
if (TestFn && !TestFn->isDeclaration()) {
// 1. Add a string constant with its name to the global file
- Constant *InitArray = ConstantArray::get(F->getContext(), F->getName());
+ Constant *InitArray =
+ ConstantDataArray::getString(F->getContext(), F->getName());
GlobalVariable *funcName =
new GlobalVariable(*Safe, InitArray->getType(), true /*isConstant*/,
GlobalValue::InternalLinkage, InitArray,
diff --git a/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
index 336c83d..fb090ee 100644
--- a/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
+++ b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
@@ -85,8 +85,11 @@ void BugDriver::EmitProgressBitcode(const Module *M,
if (NoFlyer || PassesToRun.empty()) return;
outs() << "\n*** You can reproduce the problem with: ";
if (UseValgrind) outs() << "valgrind ";
- outs() << "opt " << Filename << " ";
- outs() << getPassesString(PassesToRun) << "\n";
+ outs() << "opt " << Filename;
+ for (unsigned i = 0, e = PluginLoader::getNumPlugins(); i != e; ++i) {
+ outs() << " -load " << PluginLoader::getPlugin(i);
+ }
+ outs() << " " << getPassesString(PassesToRun) << "\n";
}
cl::opt<bool> SilencePasses("silence-passes",
@@ -145,10 +148,9 @@ bool BugDriver::runPasses(Module *Program,
return 1;
}
- sys::Path tool = PrependMainExecutablePath("opt", getToolName(),
- (void*)"opt");
+ sys::Path tool = sys::Program::FindProgramByName("opt");
if (tool.empty()) {
- errs() << "Cannot find `opt' in executable directory!\n";
+ errs() << "Cannot find `opt' in PATH!\n";
return 1;
}
diff --git a/contrib/llvm/tools/bugpoint/ToolRunner.cpp b/contrib/llvm/tools/bugpoint/ToolRunner.cpp
index 0d98262..25a2bae 100644
--- a/contrib/llvm/tools/bugpoint/ToolRunner.cpp
+++ b/contrib/llvm/tools/bugpoint/ToolRunner.cpp
@@ -234,6 +234,8 @@ int LLI::ExecuteProgram(const std::string &Bitcode,
Timeout, MemoryLimit, Error);
}
+void AbstractInterpreter::anchor() { }
+
// LLI create method - Try to find the LLI executable
AbstractInterpreter *AbstractInterpreter::createLLI(const char *Argv0,
std::string &Message,
@@ -621,94 +623,6 @@ AbstractInterpreter *AbstractInterpreter::createJIT(const char *Argv0,
return 0;
}
-GCC::FileType CBE::OutputCode(const std::string &Bitcode,
- sys::Path &OutputCFile, std::string &Error,
- unsigned Timeout, unsigned MemoryLimit) {
- sys::Path uniqueFile(Bitcode+".cbe.c");
- std::string ErrMsg;
- if (uniqueFile.makeUnique(true, &ErrMsg)) {
- errs() << "Error making unique filename: " << ErrMsg << "\n";
- exit(1);
- }
- OutputCFile = uniqueFile;
- std::vector<const char *> LLCArgs;
- LLCArgs.push_back(LLCPath.c_str());
-
- // Add any extra LLC args.
- for (unsigned i = 0, e = ToolArgs.size(); i != e; ++i)
- LLCArgs.push_back(ToolArgs[i].c_str());
-
- LLCArgs.push_back("-o");
- LLCArgs.push_back(OutputCFile.c_str()); // Output to the C file
- LLCArgs.push_back("-march=c"); // Output C language
- LLCArgs.push_back(Bitcode.c_str()); // This is the input bitcode
- LLCArgs.push_back(0);
-
- outs() << "<cbe>"; outs().flush();
- DEBUG(errs() << "\nAbout to run:\t";
- for (unsigned i = 0, e = LLCArgs.size()-1; i != e; ++i)
- errs() << " " << LLCArgs[i];
- errs() << "\n";
- );
- if (RunProgramWithTimeout(LLCPath, &LLCArgs[0], sys::Path(), sys::Path(),
- sys::Path(), Timeout, MemoryLimit))
- Error = ProcessFailure(LLCPath, &LLCArgs[0], Timeout, MemoryLimit);
- return GCC::CFile;
-}
-
-void CBE::compileProgram(const std::string &Bitcode, std::string *Error,
- unsigned Timeout, unsigned MemoryLimit) {
- sys::Path OutputCFile;
- OutputCode(Bitcode, OutputCFile, *Error, Timeout, MemoryLimit);
- OutputCFile.eraseFromDisk();
-}
-
-int CBE::ExecuteProgram(const std::string &Bitcode,
- const std::vector<std::string> &Args,
- const std::string &InputFile,
- const std::string &OutputFile,
- std::string *Error,
- const std::vector<std::string> &ArgsForGCC,
- const std::vector<std::string> &SharedLibs,
- unsigned Timeout,
- unsigned MemoryLimit) {
- sys::Path OutputCFile;
- OutputCode(Bitcode, OutputCFile, *Error, Timeout, MemoryLimit);
-
- FileRemover CFileRemove(OutputCFile.str(), !SaveTemps);
-
- std::vector<std::string> GCCArgs(ArgsForGCC);
- GCCArgs.insert(GCCArgs.end(), SharedLibs.begin(), SharedLibs.end());
-
- return gcc->ExecuteProgram(OutputCFile.str(), Args, GCC::CFile,
- InputFile, OutputFile, Error, GCCArgs,
- Timeout, MemoryLimit);
-}
-
-/// createCBE - Try to find the 'llc' executable
-///
-CBE *AbstractInterpreter::createCBE(const char *Argv0,
- std::string &Message,
- const std::string &GCCBinary,
- const std::vector<std::string> *Args,
- const std::vector<std::string> *GCCArgs) {
- sys::Path LLCPath =
- PrependMainExecutablePath("llc", Argv0, (void *)(intptr_t)&createCBE);
- if (LLCPath.isEmpty()) {
- Message =
- "Cannot find `llc' in executable directory!\n";
- return 0;
- }
-
- Message = "Found llc: " + LLCPath.str() + "\n";
- GCC *gcc = GCC::create(Message, GCCBinary, GCCArgs);
- if (!gcc) {
- errs() << Message << "\n";
- exit(1);
- }
- return new CBE(LLCPath, gcc, Args);
-}
-
//===---------------------------------------------------------------------===//
// GCC abstraction
//
@@ -920,8 +834,7 @@ int GCC::MakeSharedObject(const std::string &InputFile, FileType fileType,
} else
GCCArgs.push_back("-shared"); // `-shared' for Linux/X86, maybe others
- if ((TargetTriple.getArch() == Triple::alpha) ||
- (TargetTriple.getArch() == Triple::x86_64))
+ if (TargetTriple.getArch() == Triple::x86_64)
GCCArgs.push_back("-fPIC"); // Requires shared objs to contain PIC
if (TargetTriple.getArch() == Triple::sparc)
diff --git a/contrib/llvm/tools/bugpoint/ToolRunner.h b/contrib/llvm/tools/bugpoint/ToolRunner.h
index cfa8acf..7b93394 100644
--- a/contrib/llvm/tools/bugpoint/ToolRunner.h
+++ b/contrib/llvm/tools/bugpoint/ToolRunner.h
@@ -86,6 +86,7 @@ public:
/// complexity behind a simple interface.
///
class AbstractInterpreter {
+ virtual void anchor();
public:
static CBE *createCBE(const char *Argv0, std::string &Message,
const std::string &GCCBinary,
diff --git a/contrib/llvm/tools/bugpoint/bugpoint.cpp b/contrib/llvm/tools/bugpoint/bugpoint.cpp
index 6a87521..8f15b02 100644
--- a/contrib/llvm/tools/bugpoint/bugpoint.cpp
+++ b/contrib/llvm/tools/bugpoint/bugpoint.cpp
@@ -120,6 +120,7 @@ int main(int argc, char **argv) {
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeCore(Registry);
initializeScalarOpts(Registry);
+ initializeVectorization(Registry);
initializeIPO(Registry);
initializeAnalysis(Registry);
initializeIPA(Registry);
diff --git a/contrib/llvm/tools/clang/LICENSE.TXT b/contrib/llvm/tools/clang/LICENSE.TXT
index 91895eb..6c224f8 100644
--- a/contrib/llvm/tools/clang/LICENSE.TXT
+++ b/contrib/llvm/tools/clang/LICENSE.TXT
@@ -4,7 +4,7 @@ LLVM Release License
University of Illinois/NCSA
Open Source License
-Copyright (c) 2007-2011 University of Illinois at Urbana-Champaign.
+Copyright (c) 2007-2012 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
index 1320145..13ba6ba 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Index.h
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -35,6 +35,16 @@ extern "C" {
#define CINDEX_LINKAGE
#endif
+#ifdef __GNUC__
+ #define CINDEX_DEPRECATED __attribute__((deprecated))
+#else
+ #ifdef _MSC_VER
+ #define CINDEX_DEPRECATED __declspec(deprecated)
+ #else
+ #define CINDEX_DEPRECATED
+ #endif
+#endif
+
/** \defgroup CINDEX libclang: C Interface to Clang
*
* The C Interface to Clang provides a relatively small API that exposes
@@ -204,6 +214,61 @@ CINDEX_LINKAGE CXIndex clang_createIndex(int excludeDeclarationsFromPCH,
*/
CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
+typedef enum {
+ /**
+ * \brief Used to indicate that no special CXIndex options are needed.
+ */
+ CXGlobalOpt_None = 0x0,
+
+ /**
+ * \brief Used to indicate that threads that libclang creates for indexing
+ * purposes should use background priority.
+ * Affects \see clang_indexSourceFile, \see clang_indexTranslationUnit,
+ * \see clang_parseTranslationUnit, \see clang_saveTranslationUnit.
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 0x1,
+
+ /**
+ * \brief Used to indicate that threads that libclang creates for editing
+ * purposes should use background priority.
+ * Affects \see clang_reparseTranslationUnit, \see clang_codeCompleteAt,
+ * \see clang_annotateTokens
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForEditing = 0x2,
+
+ /**
+ * \brief Used to indicate that all threads that libclang creates should use
+ * background priority.
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForAll =
+ CXGlobalOpt_ThreadBackgroundPriorityForIndexing |
+ CXGlobalOpt_ThreadBackgroundPriorityForEditing
+
+} CXGlobalOptFlags;
+
+/**
+ * \brief Sets general options associated with a CXIndex.
+ *
+ * For example:
+ * \code
+ * CXIndex idx = ...;
+ * clang_CXIndex_setGlobalOptions(idx,
+ * clang_CXIndex_getGlobalOptions(idx) |
+ * CXGlobalOpt_ThreadBackgroundPriorityForIndexing);
+ * \endcode
+ *
+ * \param options A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags.
+ */
+CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options);
+
+/**
+ * \brief Gets the general options associated with a CXIndex.
+ *
+ * \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that
+ * are associated with the given CXIndex object.
+ */
+CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex);
+
/**
* \defgroup CINDEX_FILES File manipulation routines
*
@@ -522,6 +587,86 @@ enum CXDiagnosticSeverity {
typedef void *CXDiagnostic;
/**
+ * \brief A group of CXDiagnostics.
+ */
+typedef void *CXDiagnosticSet;
+
+/**
+ * \brief Determine the number of diagnostics in a CXDiagnosticSet.
+ */
+CINDEX_LINKAGE unsigned clang_getNumDiagnosticsInSet(CXDiagnosticSet Diags);
+
+/**
+ * \brief Retrieve a diagnostic associated with the given CXDiagnosticSet.
+ *
+ * \param Unit the CXDiagnosticSet to query.
+ * \param Index the zero-based diagnostic number to retrieve.
+ *
+ * \returns the requested diagnostic. This diagnostic must be freed
+ * via a call to \c clang_disposeDiagnostic().
+ */
+CINDEX_LINKAGE CXDiagnostic clang_getDiagnosticInSet(CXDiagnosticSet Diags,
+ unsigned Index);
+
+
+/**
+ * \brief Describes the kind of error that occurred (if any) in a call to
+ * \c clang_loadDiagnostics.
+ */
+enum CXLoadDiag_Error {
+ /**
+ * \brief Indicates that no error occurred.
+ */
+ CXLoadDiag_None = 0,
+
+ /**
+ * \brief Indicates that an unknown error occurred while attempting to
+ * deserialize diagnostics.
+ */
+ CXLoadDiag_Unknown = 1,
+
+ /**
+ * \brief Indicates that the file containing the serialized diagnostics
+ * could not be opened.
+ */
+ CXLoadDiag_CannotLoad = 2,
+
+ /**
+ * \brief Indicates that the serialized diagnostics file is invalid or
+ * corrupt.
+ */
+ CXLoadDiag_InvalidFile = 3
+};
+
+/**
+ * \brief Deserialize a set of diagnostics from a Clang diagnostics bitcode
+ * file.
+ *
+ * \param The name of the file to deserialize.
+ * \param A pointer to a enum value recording if there was a problem
+ * deserializing the diagnostics.
+ * \param A pointer to a CXString for recording the error string
+ * if the file was not successfully loaded.
+ *
+ * \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
+ * diagnostics should be released using clang_disposeDiagnosticSet().
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(const char *file,
+ enum CXLoadDiag_Error *error,
+ CXString *errorString);
+
+/**
+ * \brief Release a CXDiagnosticSet and all of its contained diagnostics.
+ */
+CINDEX_LINKAGE void clang_disposeDiagnosticSet(CXDiagnosticSet Diags);
+
+/**
+ * \brief Retrieve the child diagnostics of a CXDiagnostic. This
+ * CXDiagnosticSet does not need to be released by clang_diposeDiagnosticSet.
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_getChildDiagnostics(CXDiagnostic D);
+
+/**
* \brief Determine the number of diagnostics produced for the given
* translation unit.
*/
@@ -540,6 +685,15 @@ CINDEX_LINKAGE CXDiagnostic clang_getDiagnostic(CXTranslationUnit Unit,
unsigned Index);
/**
+ * \brief Retrieve the complete set of diagnostics associated with a
+ * translation unit.
+ *
+ * \param Unit the translation unit to query.
+ */
+CINDEX_LINKAGE CXDiagnosticSet
+ clang_getDiagnosticSetFromTU(CXTranslationUnit Unit);
+
+/**
* \brief Destroy a diagnostic.
*/
CINDEX_LINKAGE void clang_disposeDiagnostic(CXDiagnostic Diagnostic);
@@ -686,14 +840,25 @@ CINDEX_LINKAGE CXString clang_getDiagnosticOption(CXDiagnostic Diag,
CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
/**
- * \brief Retrieve the name of a particular diagnostic category.
+ * \brief Retrieve the name of a particular diagnostic category. This
+ * is now deprecated. Use clang_getDiagnosticCategoryText()
+ * instead.
*
* \param Category A diagnostic category number, as returned by
* \c clang_getDiagnosticCategory().
*
* \returns The name of the given diagnostic category.
*/
-CINDEX_LINKAGE CXString clang_getDiagnosticCategoryName(unsigned Category);
+CINDEX_DEPRECATED CINDEX_LINKAGE
+CXString clang_getDiagnosticCategoryName(unsigned Category);
+
+/**
+ * \brief Retrieve the diagnostic category text for a given diagnostic.
+ *
+ *
+ * \returns The text of the given diagnostic category.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticCategoryText(CXDiagnostic);
/**
* \brief Determine the number of source ranges associated with the given
@@ -905,27 +1070,15 @@ enum CXTranslationUnit_Flags {
* we are testing C++ precompiled preamble support. It is deprecated.
*/
CXTranslationUnit_CXXChainedPCH = 0x20,
-
- /**
- * \brief Used to indicate that the "detailed" preprocessing record,
- * if requested, should also contain nested macro expansions.
- *
- * Nested macro expansions (i.e., macro expansions that occur
- * inside another macro expansion) can, in some code bases, require
- * a large amount of storage to due preprocessor metaprogramming. Moreover,
- * its fairly rare that this information is useful for libclang clients.
- */
- CXTranslationUnit_NestedMacroExpansions = 0x40,
/**
- * \brief Legacy name to indicate that the "detailed" preprocessing record,
- * if requested, should contain nested macro expansions.
+ * \brief Used to indicate that function/method bodies should be skipped while
+ * parsing.
*
- * \see CXTranslationUnit_NestedMacroExpansions for the current name for this
- * value, and its semantics. This is just an alias.
+ * This option can be used to search for declarations/definitions while
+ * ignoring the usages.
*/
- CXTranslationUnit_NestedMacroInstantiations =
- CXTranslationUnit_NestedMacroExpansions
+ CXTranslationUnit_SkipFunctionBodies = 0x40
};
/**
@@ -1411,7 +1564,13 @@ enum CXCursorKind {
*/
CXCursor_OverloadedDeclRef = 49,
- CXCursor_LastRef = CXCursor_OverloadedDeclRef,
+ /**
+ * \brief A reference to a variable that occurs in some non-expression
+ * context, e.g., a C++ lambda capture list.
+ */
+ CXCursor_VariableRef = 50,
+
+ CXCursor_LastRef = CXCursor_VariableRef,
/* Error conditions */
CXCursor_FirstInvalid = 70,
@@ -1528,7 +1687,7 @@ enum CXCursorKind {
*/
CXCursor_StmtExpr = 121,
- /** \brief Represents a C1X generic selection.
+ /** \brief Represents a C11 generic selection.
*/
CXCursor_GenericSelectionExpr = 122,
@@ -1605,19 +1764,19 @@ enum CXCursorKind {
*/
CXCursor_UnaryExpr = 136,
- /** \brief ObjCStringLiteral, used for Objective-C string literals i.e. "foo".
+ /** \brief An Objective-C string literal i.e. @"foo".
*/
CXCursor_ObjCStringLiteral = 137,
- /** \brief ObjCEncodeExpr, used for in Objective-C.
+ /** \brief An Objective-C @encode expression.
*/
CXCursor_ObjCEncodeExpr = 138,
- /** \brief ObjCSelectorExpr used for in Objective-C.
+ /** \brief An Objective-C @selector expression.
*/
CXCursor_ObjCSelectorExpr = 139,
- /** \brief Objective-C's protocol expression.
+ /** \brief An Objective-C @protocol expression.
*/
CXCursor_ObjCProtocolExpr = 140,
@@ -1657,7 +1816,25 @@ enum CXCursorKind {
*/
CXCursor_SizeOfPackExpr = 143,
- CXCursor_LastExpr = CXCursor_SizeOfPackExpr,
+ /* \brief Represents a C++ lambda expression that produces a local function
+ * object.
+ *
+ * \code
+ * void abssort(float *x, unsigned N) {
+ * std::sort(x, x + N,
+ * [](float a, float b) {
+ * return std::abs(a) < std::abs(b);
+ * });
+ * }
+ * \endcode
+ */
+ CXCursor_LambdaExpr = 144,
+
+ /** \brief Objective-c Boolean Literal.
+ */
+ CXCursor_ObjCBoolLiteralExpr = 145,
+
+ CXCursor_LastExpr = CXCursor_ObjCBoolLiteralExpr,
/* Statements */
CXCursor_FirstStmt = 200,
@@ -1744,7 +1921,7 @@ enum CXCursorKind {
*/
CXCursor_AsmStmt = 215,
- /** \brief Objective-C's overall @try-@catc-@finall statement.
+ /** \brief Objective-C's overall @try-@catch-@finally statement.
*/
CXCursor_ObjCAtTryStmt = 216,
@@ -1831,7 +2008,8 @@ enum CXCursorKind {
CXCursor_CXXFinalAttr = 404,
CXCursor_CXXOverrideAttr = 405,
CXCursor_AnnotateAttr = 406,
- CXCursor_LastAttr = CXCursor_AnnotateAttr,
+ CXCursor_AsmLabelAttr = 407,
+ CXCursor_LastAttr = CXCursor_AsmLabelAttr,
/* Preprocessing */
CXCursor_PreprocessingDirective = 500,
@@ -1894,7 +2072,7 @@ CINDEX_LINKAGE unsigned clang_equalCursors(CXCursor, CXCursor);
/**
* \brief Returns non-zero if \arg cursor is null.
*/
-int clang_Cursor_isNull(CXCursor);
+CINDEX_LINKAGE int clang_Cursor_isNull(CXCursor);
/**
* \brief Compute a hash value for the given cursor.
@@ -2126,11 +2304,12 @@ CINDEX_LINKAGE CXCursor clang_getCursorLexicalParent(CXCursor cursor);
* In both Objective-C and C++, a method (aka virtual member function,
* in C++) can override a virtual method in a base class. For
* Objective-C, a method is said to override any method in the class's
- * interface (if we're coming from an implementation), its protocols,
- * or its categories, that has the same selector and is of the same
- * kind (class or instance). If no such method exists, the search
- * continues to the class's superclass, its protocols, and its
- * categories, and so on.
+ * base class, its protocols, or its categories' protocols, that has the same
+ * selector and is of the same kind (class or instance).
+ * If no such method exists, the search continues to the class's superclass,
+ * its protocols, and its categories, and so on. A method from an Objective-C
+ * implementation is considered to override the same methods as its
+ * corresponding method in the interface.
*
* For C++, a virtual member function overrides any virtual member
* function with the same signature that occurs in its base
@@ -2303,9 +2482,28 @@ enum CXTypeKind {
CXType_ObjCObjectPointer = 109,
CXType_FunctionNoProto = 110,
CXType_FunctionProto = 111,
- CXType_ConstantArray = 112
+ CXType_ConstantArray = 112,
+ CXType_Vector = 113
+};
+
+/**
+ * \brief Describes the calling convention of a function type
+ */
+enum CXCallingConv {
+ CXCallingConv_Default = 0,
+ CXCallingConv_C = 1,
+ CXCallingConv_X86StdCall = 2,
+ CXCallingConv_X86FastCall = 3,
+ CXCallingConv_X86ThisCall = 4,
+ CXCallingConv_X86Pascal = 5,
+ CXCallingConv_AAPCS = 6,
+ CXCallingConv_AAPCS_VFP = 7,
+
+ CXCallingConv_Invalid = 100,
+ CXCallingConv_Unexposed = 200
};
+
/**
* \brief The type of an element in the abstract syntax tree.
*
@@ -2321,6 +2519,58 @@ typedef struct {
CINDEX_LINKAGE CXType clang_getCursorType(CXCursor C);
/**
+ * \brief Retrieve the underlying type of a typedef declaration.
+ *
+ * If the cursor does not reference a typedef declaration, an invalid type is
+ * returned.
+ */
+CINDEX_LINKAGE CXType clang_getTypedefDeclUnderlyingType(CXCursor C);
+
+/**
+ * \brief Retrieve the integer type of an enum declaration.
+ *
+ * If the cursor does not reference an enum declaration, an invalid type is
+ * returned.
+ */
+CINDEX_LINKAGE CXType clang_getEnumDeclIntegerType(CXCursor C);
+
+/**
+ * \brief Retrieve the integer value of an enum constant declaration as a signed
+ * long long.
+ *
+ * If the cursor does not reference an enum constant declaration, LLONG_MIN is returned.
+ * Since this is also potentially a valid constant value, the kind of the cursor
+ * must be verified before calling this function.
+ */
+CINDEX_LINKAGE long long clang_getEnumConstantDeclValue(CXCursor C);
+
+/**
+ * \brief Retrieve the integer value of an enum constant declaration as an unsigned
+ * long long.
+ *
+ * If the cursor does not reference an enum constant declaration, ULLONG_MAX is returned.
+ * Since this is also potentially a valid constant value, the kind of the cursor
+ * must be verified before calling this function.
+ */
+CINDEX_LINKAGE unsigned long long clang_getEnumConstantDeclUnsignedValue(CXCursor C);
+
+/**
+ * \brief Retrieve the number of non-variadic arguments associated with a given
+ * cursor.
+ *
+ * If a cursor that is not a function or method is passed in, -1 is returned.
+ */
+CINDEX_LINKAGE int clang_Cursor_getNumArguments(CXCursor C);
+
+/**
+ * \brief Retrieve the argument cursor of a function or method.
+ *
+ * If a cursor that is not a function or method is passed in or the index
+ * exceeds the number of arguments, an invalid cursor is returned.
+ */
+CINDEX_LINKAGE CXCursor clang_Cursor_getArgument(CXCursor C, unsigned i);
+
+/**
* \determine Determine whether two CXTypes represent the same type.
*
* \returns non-zero if the CXTypes represent the same type and
@@ -2378,13 +2628,44 @@ CINDEX_LINKAGE CXString clang_getDeclObjCTypeEncoding(CXCursor C);
CINDEX_LINKAGE CXString clang_getTypeKindSpelling(enum CXTypeKind K);
/**
+ * \brief Retrieve the calling convention associated with a function type.
+ *
+ * If a non-function type is passed in, CXCallingConv_Invalid is returned.
+ */
+CINDEX_LINKAGE enum CXCallingConv clang_getFunctionTypeCallingConv(CXType T);
+
+/**
* \brief Retrieve the result type associated with a function type.
+ *
+ * If a non-function type is passed in, an invalid type is returned.
*/
CINDEX_LINKAGE CXType clang_getResultType(CXType T);
/**
- * \brief Retrieve the result type associated with a given cursor. This only
- * returns a valid type of the cursor refers to a function or method.
+ * \brief Retrieve the number of non-variadic arguments associated with a function type.
+ *
+ * If a non-function type is passed in, -1 is returned.
+ */
+CINDEX_LINKAGE int clang_getNumArgTypes(CXType T);
+
+/**
+ * \brief Retrieve the type of an argument of a function type.
+ *
+ * If a non-function type is passed in or the function does not have enough parameters,
+ * an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getArgType(CXType T, unsigned i);
+
+/**
+ * \brief Return 1 if the CXType is a variadic function type, and 0 otherwise.
+ *
+ */
+CINDEX_LINKAGE unsigned clang_isFunctionTypeVariadic(CXType T);
+
+/**
+ * \brief Retrieve the result type associated with a given cursor.
+ *
+ * This only returns a valid type if the cursor refers to a function or method.
*/
CINDEX_LINKAGE CXType clang_getCursorResultType(CXCursor C);
@@ -2395,6 +2676,22 @@ CINDEX_LINKAGE CXType clang_getCursorResultType(CXCursor C);
CINDEX_LINKAGE unsigned clang_isPODType(CXType T);
/**
+ * \brief Return the element type of an array, complex, or vector type.
+ *
+ * If a type is passed in that is not an array, complex, or vector type,
+ * an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getElementType(CXType T);
+
+/**
+ * \brief Return the number of elements of an array or vector type.
+ *
+ * If a type is passed in that is not an array or vector type,
+ * -1 is returned.
+ */
+CINDEX_LINKAGE long long clang_getNumElements(CXType T);
+
+/**
* \brief Return the element type of an array type.
*
* If a non-array type is passed in, an invalid type is returned.
@@ -2653,6 +2950,21 @@ CINDEX_LINKAGE CXString clang_constructUSR_ObjCProperty(const char *property,
CINDEX_LINKAGE CXString clang_getCursorSpelling(CXCursor);
/**
+ * \brief Retrieve a range for a piece that forms the cursors spelling name.
+ * Most of the times there is only one range for the complete spelling but for
+ * objc methods and objc message expressions, there are multiple pieces for each
+ * selector identifier.
+ *
+ * \param pieceIndex the index of the spelling name piece. If this is greater
+ * than the actual number of pieces, it will return a NULL (invalid) range.
+ *
+ * \param options Reserved.
+ */
+CINDEX_LINKAGE CXSourceRange clang_Cursor_getSpellingNameRange(CXCursor,
+ unsigned pieceIndex,
+ unsigned options);
+
+/**
* \brief Retrieve the display name for the entity referenced by this cursor.
*
* The display name contains extra information that helps identify the cursor,
@@ -2735,6 +3047,20 @@ CINDEX_LINKAGE unsigned clang_isCursorDefinition(CXCursor);
*/
CINDEX_LINKAGE CXCursor clang_getCanonicalCursor(CXCursor);
+
+/**
+ * \brief If the cursor points to a selector identifier in a objc method or
+ * message expression, this returns the selector index.
+ *
+ * After getting a cursor with \see clang_getCursor, this can be called to
+ * determine if the location points to a selector identifier.
+ *
+ * \returns The selector index if the cursor is an objc method or message
+ * expression and the cursor is pointing to a selector identifier, or -1
+ * otherwise.
+ */
+CINDEX_LINKAGE int clang_Cursor_getObjCSelectorIndex(CXCursor);
+
/**
* @}
*/
@@ -3347,6 +3673,26 @@ clang_getCompletionAnnotation(CXCompletionString completion_string,
unsigned annotation_number);
/**
+ * \brief Retrieve the parent context of the given completion string.
+ *
+ * The parent context of a completion string is the semantic parent of
+ * the declaration (if any) that the code completion represents. For example,
+ * a code completion for an Objective-C method would have the method's class
+ * or protocol as its context.
+ *
+ * \param completion_string The code completion string whose parent is
+ * being queried.
+ *
+ * \param kind If non-NULL, will be set to the kind of the parent context,
+ * or CXCursor_NotImplemented if there is no context.
+ *
+ * \param Returns the name of the completion parent, e.g., "NSObject" if
+ * the completion string represents a method in the NSObject class.
+ */
+CINDEX_LINKAGE CXString
+clang_getCompletionParent(CXCompletionString completion_string,
+ enum CXCursorKind *kind);
+/**
* \brief Retrieve a completion string for an arbitrary declaration or macro
* definition cursor.
*
@@ -3788,6 +4134,20 @@ typedef void *CXRemapping;
CINDEX_LINKAGE CXRemapping clang_getRemappings(const char *path);
/**
+ * \brief Retrieve a remapping.
+ *
+ * \param filePaths pointer to an array of file paths containing remapping info.
+ *
+ * \param numFiles number of file paths.
+ *
+ * \returns the requested remapping. This remapping must be freed
+ * via a call to \c clang_remap_dispose(). Can return NULL if an error occurred.
+ */
+CINDEX_LINKAGE
+CXRemapping clang_getRemappingsFromFileList(const char **filePaths,
+ unsigned numFiles);
+
+/**
* \brief Determine the number of remappings.
*/
CINDEX_LINKAGE unsigned clang_remap_getNumFiles(CXRemapping);
@@ -3856,6 +4216,524 @@ void clang_findReferencesInFileWithBlock(CXCursor, CXFile,
#endif
/**
+ * \brief The client's data object that is associated with a CXFile.
+ */
+typedef void *CXIdxClientFile;
+
+/**
+ * \brief The client's data object that is associated with a semantic entity.
+ */
+typedef void *CXIdxClientEntity;
+
+/**
+ * \brief The client's data object that is associated with a semantic container
+ * of entities.
+ */
+typedef void *CXIdxClientContainer;
+
+/**
+ * \brief The client's data object that is associated with an AST file (PCH
+ * or module).
+ */
+typedef void *CXIdxClientASTFile;
+
+/**
+ * \brief Source location passed to index callbacks.
+ */
+typedef struct {
+ void *ptr_data[2];
+ unsigned int_data;
+} CXIdxLoc;
+
+/**
+ * \brief Data for \see ppIncludedFile callback.
+ */
+typedef struct {
+ /**
+ * \brief Location of '#' in the #include/#import directive.
+ */
+ CXIdxLoc hashLoc;
+ /**
+ * \brief Filename as written in the #include/#import directive.
+ */
+ const char *filename;
+ /**
+ * \brief The actual file that the #include/#import directive resolved to.
+ */
+ CXFile file;
+ int isImport;
+ int isAngled;
+} CXIdxIncludedFileInfo;
+
+/**
+ * \brief Data for \see importedASTFile callback.
+ */
+typedef struct {
+ CXFile file;
+ /**
+ * \brief Location where the file is imported. It is useful mostly for
+ * modules.
+ */
+ CXIdxLoc loc;
+ /**
+ * \brief Non-zero if the AST file is a module otherwise it's a PCH.
+ */
+ int isModule;
+} CXIdxImportedASTFileInfo;
+
+typedef enum {
+ CXIdxEntity_Unexposed = 0,
+ CXIdxEntity_Typedef = 1,
+ CXIdxEntity_Function = 2,
+ CXIdxEntity_Variable = 3,
+ CXIdxEntity_Field = 4,
+ CXIdxEntity_EnumConstant = 5,
+
+ CXIdxEntity_ObjCClass = 6,
+ CXIdxEntity_ObjCProtocol = 7,
+ CXIdxEntity_ObjCCategory = 8,
+
+ CXIdxEntity_ObjCInstanceMethod = 9,
+ CXIdxEntity_ObjCClassMethod = 10,
+ CXIdxEntity_ObjCProperty = 11,
+ CXIdxEntity_ObjCIvar = 12,
+
+ CXIdxEntity_Enum = 13,
+ CXIdxEntity_Struct = 14,
+ CXIdxEntity_Union = 15,
+
+ CXIdxEntity_CXXClass = 16,
+ CXIdxEntity_CXXNamespace = 17,
+ CXIdxEntity_CXXNamespaceAlias = 18,
+ CXIdxEntity_CXXStaticVariable = 19,
+ CXIdxEntity_CXXStaticMethod = 20,
+ CXIdxEntity_CXXInstanceMethod = 21,
+ CXIdxEntity_CXXConstructor = 22,
+ CXIdxEntity_CXXDestructor = 23,
+ CXIdxEntity_CXXConversionFunction = 24,
+ CXIdxEntity_CXXTypeAlias = 25
+
+} CXIdxEntityKind;
+
+typedef enum {
+ CXIdxEntityLang_None = 0,
+ CXIdxEntityLang_C = 1,
+ CXIdxEntityLang_ObjC = 2,
+ CXIdxEntityLang_CXX = 3
+} CXIdxEntityLanguage;
+
+/**
+ * \brief Extra C++ template information for an entity. This can apply to:
+ * CXIdxEntity_Function
+ * CXIdxEntity_CXXClass
+ * CXIdxEntity_CXXStaticMethod
+ * CXIdxEntity_CXXInstanceMethod
+ * CXIdxEntity_CXXConstructor
+ * CXIdxEntity_CXXConversionFunction
+ * CXIdxEntity_CXXTypeAlias
+ */
+typedef enum {
+ CXIdxEntity_NonTemplate = 0,
+ CXIdxEntity_Template = 1,
+ CXIdxEntity_TemplatePartialSpecialization = 2,
+ CXIdxEntity_TemplateSpecialization = 3
+} CXIdxEntityCXXTemplateKind;
+
+typedef enum {
+ CXIdxAttr_Unexposed = 0,
+ CXIdxAttr_IBAction = 1,
+ CXIdxAttr_IBOutlet = 2,
+ CXIdxAttr_IBOutletCollection = 3
+} CXIdxAttrKind;
+
+typedef struct {
+ CXIdxAttrKind kind;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxAttrInfo;
+
+typedef struct {
+ CXIdxEntityKind kind;
+ CXIdxEntityCXXTemplateKind templateKind;
+ CXIdxEntityLanguage lang;
+ const char *name;
+ const char *USR;
+ CXCursor cursor;
+ const CXIdxAttrInfo *const *attributes;
+ unsigned numAttributes;
+} CXIdxEntityInfo;
+
+typedef struct {
+ CXCursor cursor;
+} CXIdxContainerInfo;
+
+typedef struct {
+ const CXIdxAttrInfo *attrInfo;
+ const CXIdxEntityInfo *objcClass;
+ CXCursor classCursor;
+ CXIdxLoc classLoc;
+} CXIdxIBOutletCollectionAttrInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *entityInfo;
+ CXCursor cursor;
+ CXIdxLoc loc;
+ const CXIdxContainerInfo *semanticContainer;
+ /**
+ * \brief Generally same as \see semanticContainer but can be different in
+ * cases like out-of-line C++ member functions.
+ */
+ const CXIdxContainerInfo *lexicalContainer;
+ int isRedeclaration;
+ int isDefinition;
+ int isContainer;
+ const CXIdxContainerInfo *declAsContainer;
+ /**
+ * \brief Whether the declaration exists in code or was created implicitly
+ * by the compiler, e.g. implicit objc methods for properties.
+ */
+ int isImplicit;
+ const CXIdxAttrInfo *const *attributes;
+ unsigned numAttributes;
+} CXIdxDeclInfo;
+
+typedef enum {
+ CXIdxObjCContainer_ForwardRef = 0,
+ CXIdxObjCContainer_Interface = 1,
+ CXIdxObjCContainer_Implementation = 2
+} CXIdxObjCContainerKind;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ CXIdxObjCContainerKind kind;
+} CXIdxObjCContainerDeclInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *base;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxBaseClassInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *protocol;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxObjCProtocolRefInfo;
+
+typedef struct {
+ const CXIdxObjCProtocolRefInfo *const *protocols;
+ unsigned numProtocols;
+} CXIdxObjCProtocolRefListInfo;
+
+typedef struct {
+ const CXIdxObjCContainerDeclInfo *containerInfo;
+ const CXIdxBaseClassInfo *superInfo;
+ const CXIdxObjCProtocolRefListInfo *protocols;
+} CXIdxObjCInterfaceDeclInfo;
+
+typedef struct {
+ const CXIdxObjCContainerDeclInfo *containerInfo;
+ const CXIdxEntityInfo *objcClass;
+ CXCursor classCursor;
+ CXIdxLoc classLoc;
+ const CXIdxObjCProtocolRefListInfo *protocols;
+} CXIdxObjCCategoryDeclInfo;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ const CXIdxEntityInfo *getter;
+ const CXIdxEntityInfo *setter;
+} CXIdxObjCPropertyDeclInfo;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ const CXIdxBaseClassInfo *const *bases;
+ unsigned numBases;
+} CXIdxCXXClassDeclInfo;
+
+/**
+ * \brief Data for \see indexEntityReference callback.
+ */
+typedef enum {
+ /**
+ * \brief The entity is referenced directly in user's code.
+ */
+ CXIdxEntityRef_Direct = 1,
+ /**
+ * \brief An implicit reference, e.g. a reference of an ObjC method via the
+ * dot syntax.
+ */
+ CXIdxEntityRef_Implicit = 2
+} CXIdxEntityRefKind;
+
+/**
+ * \brief Data for \see indexEntityReference callback.
+ */
+typedef struct {
+ CXIdxEntityRefKind kind;
+ /**
+ * \brief Reference cursor.
+ */
+ CXCursor cursor;
+ CXIdxLoc loc;
+ /**
+ * \brief The entity that gets referenced.
+ */
+ const CXIdxEntityInfo *referencedEntity;
+ /**
+ * \brief Immediate "parent" of the reference. For example:
+ *
+ * \code
+ * Foo *var;
+ * \endcode
+ *
+ * The parent of reference of type 'Foo' is the variable 'var'.
+ * For references inside statement bodies of functions/methods,
+ * the parentEntity will be the function/method.
+ */
+ const CXIdxEntityInfo *parentEntity;
+ /**
+ * \brief Lexical container context of the reference.
+ */
+ const CXIdxContainerInfo *container;
+} CXIdxEntityRefInfo;
+
+typedef struct {
+ /**
+ * \brief Called periodically to check whether indexing should be aborted.
+ * Should return 0 to continue, and non-zero to abort.
+ */
+ int (*abortQuery)(CXClientData client_data, void *reserved);
+
+ /**
+ * \brief Called at the end of indexing; passes the complete diagnostic set.
+ */
+ void (*diagnostic)(CXClientData client_data,
+ CXDiagnosticSet, void *reserved);
+
+ CXIdxClientFile (*enteredMainFile)(CXClientData client_data,
+ CXFile mainFile, void *reserved);
+
+ /**
+ * \brief Called when a file gets #included/#imported.
+ */
+ CXIdxClientFile (*ppIncludedFile)(CXClientData client_data,
+ const CXIdxIncludedFileInfo *);
+
+ /**
+ * \brief Called when a AST file (PCH or module) gets imported.
+ *
+ * AST files will not get indexed (there will not be callbacks to index all
+ * the entities in an AST file). The recommended action is that, if the AST
+ * file is not already indexed, to block further indexing and initiate a new
+ * indexing job specific to the AST file.
+ */
+ CXIdxClientASTFile (*importedASTFile)(CXClientData client_data,
+ const CXIdxImportedASTFileInfo *);
+
+ /**
+ * \brief Called at the beginning of indexing a translation unit.
+ */
+ CXIdxClientContainer (*startedTranslationUnit)(CXClientData client_data,
+ void *reserved);
+
+ void (*indexDeclaration)(CXClientData client_data,
+ const CXIdxDeclInfo *);
+
+ /**
+ * \brief Called to index a reference of an entity.
+ */
+ void (*indexEntityReference)(CXClientData client_data,
+ const CXIdxEntityRefInfo *);
+
+} IndexerCallbacks;
+
+CINDEX_LINKAGE int clang_index_isEntityObjCContainerKind(CXIdxEntityKind);
+CINDEX_LINKAGE const CXIdxObjCContainerDeclInfo *
+clang_index_getObjCContainerDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCInterfaceDeclInfo *
+clang_index_getObjCInterfaceDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE
+const CXIdxObjCCategoryDeclInfo *
+clang_index_getObjCCategoryDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCProtocolRefListInfo *
+clang_index_getObjCProtocolRefListInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCPropertyDeclInfo *
+clang_index_getObjCPropertyDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxIBOutletCollectionAttrInfo *
+clang_index_getIBOutletCollectionAttrInfo(const CXIdxAttrInfo *);
+
+CINDEX_LINKAGE const CXIdxCXXClassDeclInfo *
+clang_index_getCXXClassDeclInfo(const CXIdxDeclInfo *);
+
+/**
+ * \brief For retrieving a custom CXIdxClientContainer attached to a
+ * container.
+ */
+CINDEX_LINKAGE CXIdxClientContainer
+clang_index_getClientContainer(const CXIdxContainerInfo *);
+
+/**
+ * \brief For setting a custom CXIdxClientContainer attached to a
+ * container.
+ */
+CINDEX_LINKAGE void
+clang_index_setClientContainer(const CXIdxContainerInfo *,CXIdxClientContainer);
+
+/**
+ * \brief For retrieving a custom CXIdxClientEntity attached to an entity.
+ */
+CINDEX_LINKAGE CXIdxClientEntity
+clang_index_getClientEntity(const CXIdxEntityInfo *);
+
+/**
+ * \brief For setting a custom CXIdxClientEntity attached to an entity.
+ */
+CINDEX_LINKAGE void
+clang_index_setClientEntity(const CXIdxEntityInfo *, CXIdxClientEntity);
+
+/**
+ * \brief An indexing action, to be applied to one or multiple translation units
+ * but not on concurrent threads. If there are threads doing indexing
+ * concurrently, they should use different CXIndexAction objects.
+ */
+typedef void *CXIndexAction;
+
+/**
+ * \brief An indexing action, to be applied to one or multiple translation units
+ * but not on concurrent threads. If there are threads doing indexing
+ * concurrently, they should use different CXIndexAction objects.
+ *
+ * \param CIdx The index object with which the index action will be associated.
+ */
+CINDEX_LINKAGE CXIndexAction clang_IndexAction_create(CXIndex CIdx);
+
+/**
+ * \brief Destroy the given index action.
+ *
+ * The index action must not be destroyed until all of the translation units
+ * created within that index action have been destroyed.
+ */
+CINDEX_LINKAGE void clang_IndexAction_dispose(CXIndexAction);
+
+typedef enum {
+ /**
+ * \brief Used to indicate that no special indexing options are needed.
+ */
+ CXIndexOpt_None = 0x0,
+
+ /**
+ * \brief Used to indicate that \see indexEntityReference should be invoked
+ * for only one reference of an entity per source file that does not also
+ * include a declaration/definition of the entity.
+ */
+ CXIndexOpt_SuppressRedundantRefs = 0x1,
+
+ /**
+ * \brief Function-local symbols should be indexed. If this is not set
+ * function-local symbols will be ignored.
+ */
+ CXIndexOpt_IndexFunctionLocalSymbols = 0x2,
+
+ /**
+ * \brief Implicit function/class template instantiations should be indexed.
+ * If this is not set, implicit instantiations will be ignored.
+ */
+ CXIndexOpt_IndexImplicitTemplateInstantiations = 0x4,
+
+ /**
+ * \brief Suppress all compiler warnings when parsing for indexing.
+ */
+ CXIndexOpt_SuppressWarnings = 0x8
+} CXIndexOptFlags;
+
+/**
+ * \brief Index the given source file and the translation unit corresponding
+ * to that file via callbacks implemented through \see IndexerCallbacks.
+ *
+ * \param client_data pointer data supplied by the client, which will
+ * be passed to the invoked callbacks.
+ *
+ * \param index_callbacks Pointer to indexing callbacks that the client
+ * implements.
+ *
+ * \param index_callbacks_size Size of \see IndexerCallbacks structure that gets
+ * passed in index_callbacks.
+ *
+ * \param index_options A bitmask of options that affects how indexing is
+ * performed. This should be a bitwise OR of the CXIndexOpt_XXX flags.
+ *
+ * \param out_TU [out] pointer to store a CXTranslationUnit that can be reused
+ * after indexing is finished. Set to NULL if you do not require it.
+ *
+ * \returns If there is a failure from which the there is no recovery, returns
+ * non-zero, otherwise returns 0.
+ *
+ * The rest of the parameters are the same as \see clang_parseTranslationUnit.
+ */
+CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
+ CXClientData client_data,
+ IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size,
+ unsigned index_options,
+ const char *source_filename,
+ const char * const *command_line_args,
+ int num_command_line_args,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files,
+ CXTranslationUnit *out_TU,
+ unsigned TU_options);
+
+/**
+ * \brief Index the given translation unit via callbacks implemented through
+ * \see IndexerCallbacks.
+ *
+ * The order of callback invocations is not guaranteed to be the same as
+ * when indexing a source file. The high level order will be:
+ *
+ * -Preprocessor callbacks invocations
+ * -Declaration/reference callbacks invocations
+ * -Diagnostic callback invocations
+ *
+ * The parameters are the same as \see clang_indexSourceFile.
+ *
+ * \returns If there is a failure from which the there is no recovery, returns
+ * non-zero, otherwise returns 0.
+ */
+CINDEX_LINKAGE int clang_indexTranslationUnit(CXIndexAction,
+ CXClientData client_data,
+ IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size,
+ unsigned index_options,
+ CXTranslationUnit);
+
+/**
+ * \brief Retrieve the CXIdxFile, file, line, column, and offset represented by
+ * the given CXIdxLoc.
+ *
+ * If the location refers into a macro expansion, retrieves the
+ * location of the macro expansion and if it refers into a macro argument
+ * retrieves the location of the argument.
+ */
+CINDEX_LINKAGE void clang_indexLoc_getFileLocation(CXIdxLoc loc,
+ CXIdxClientFile *indexFile,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * \brief Retrieve the CXSourceLocation represented by the given CXIdxLoc.
+ */
+CINDEX_LINKAGE
+CXSourceLocation clang_indexLoc_getCXSourceLocation(CXIdxLoc loc);
+
+/**
* @}
*/
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
index d8dea0b..86a6cbb 100644
--- a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
@@ -37,7 +37,7 @@ namespace arcmt {
///
/// \returns false if no error is produced, true otherwise.
bool checkForManualIssues(CompilerInvocation &CI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient,
bool emitPremigrationARCErrors = false,
StringRef plistOut = StringRef());
@@ -47,7 +47,7 @@ bool checkForManualIssues(CompilerInvocation &CI,
///
/// \returns false if no error is produced, true otherwise.
bool applyTransformations(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient);
/// \brief Applies automatic modifications and produces temporary files
@@ -62,7 +62,7 @@ bool applyTransformations(CompilerInvocation &origCI,
///
/// \returns false if no error is produced, true otherwise.
bool migrateWithTemporaryFiles(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient,
StringRef outputDir,
bool emitPremigrationARCErrors,
@@ -76,9 +76,19 @@ bool getFileRemappings(std::vector<std::pair<std::string,std::string> > &remap,
StringRef outputDir,
DiagnosticConsumer *DiagClient);
+/// \brief Get the set of file remappings from a list of files with remapping
+/// info.
+///
+/// \returns false if no error is produced, true otherwise.
+bool getFileRemappingsFromFileList(
+ std::vector<std::pair<std::string,std::string> > &remap,
+ ArrayRef<StringRef> remapFiles,
+ DiagnosticConsumer *DiagClient);
+
typedef void (*TransformFn)(MigrationPass &pass);
-std::vector<TransformFn> getAllTransformations();
+std::vector<TransformFn> getAllTransformations(LangOptions::GCMode OrigGCMode,
+ bool NoFinalizeRemoval);
class MigrationProcess {
CompilerInvocation OrigCI;
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h
index 4eac4fa..e075252 100644
--- a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_ARCMIGRATE_ARCMT_ACTION_H
#include "clang/Frontend/FrontendAction.h"
+#include "clang/ARCMigrate/FileRemapper.h"
#include "llvm/ADT/OwningPtr.h"
namespace clang {
@@ -32,6 +33,14 @@ public:
ModifyAction(FrontendAction *WrappedAction);
};
+class MigrateSourceAction : public ASTFrontendAction {
+ FileRemapper Remapper;
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
class MigrateAction : public WrapperFrontendAction {
std::string MigrateDir;
std::string PlistOut;
@@ -45,6 +54,23 @@ public:
bool emitPremigrationARCErrors);
};
+/// \brief Migrates to modern ObjC syntax.
+class ObjCMigrateAction : public WrapperFrontendAction {
+ std::string MigrateDir;
+ bool MigrateLiterals;
+ bool MigrateSubscripting;
+ FileRemapper Remapper;
+ CompilerInstance *CompInst;
+public:
+ ObjCMigrateAction(FrontendAction *WrappedAction, StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting);
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,StringRef InFile);
+ virtual bool BeginInvocation(CompilerInstance &CI);
+};
+
}
}
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h
index 9a0b690..fe7cfad 100644
--- a/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h
@@ -24,13 +24,13 @@ namespace clang {
class FileManager;
class FileEntry;
class DiagnosticsEngine;
- class CompilerInvocation;
+ class PreprocessorOptions;
namespace arcmt {
class FileRemapper {
// FIXME: Reuse the same FileManager for multiple ASTContexts.
- llvm::OwningPtr<FileManager> FileMgr;
+ OwningPtr<FileManager> FileMgr;
typedef llvm::PointerUnion<const FileEntry *, llvm::MemoryBuffer *> Target;
typedef llvm::DenseMap<const FileEntry *, Target> MappingsTy;
@@ -44,7 +44,10 @@ public:
bool initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
bool ignoreIfFilesChanged);
+ bool initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged);
bool flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag);
+ bool flushToFile(StringRef outputPath, DiagnosticsEngine &Diag);
bool overwriteOriginal(DiagnosticsEngine &Diag,
StringRef outputDir = StringRef());
@@ -52,9 +55,9 @@ public:
void remap(StringRef filePath, llvm::MemoryBuffer *memBuf);
void remap(StringRef filePath, StringRef newPath);
- void applyMappings(CompilerInvocation &CI) const;
+ void applyMappings(PreprocessorOptions &PPOpts) const;
- void transferMappingsAndClear(CompilerInvocation &CI);
+ void transferMappingsAndClear(PreprocessorOptions &PPOpts);
void clear(StringRef outputDir = StringRef());
diff --git a/contrib/llvm/tools/clang/include/clang/AST/APValue.h b/contrib/llvm/tools/clang/include/clang/AST/APValue.h
index 375af28..1b6e90c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/APValue.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/APValue.h
@@ -17,14 +17,24 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
namespace clang {
+ class AddrLabelExpr;
+ class ASTContext;
class CharUnits;
class DiagnosticBuilder;
class Expr;
+ class FieldDecl;
+ class Decl;
+ class ValueDecl;
+ class CXXRecordDecl;
+ class QualType;
/// APValue - This class implements a discriminated union of [uninitialized]
-/// [APSInt] [APFloat], [Complex APSInt] [Complex APFloat], [Expr + Offset].
+/// [APSInt] [APFloat], [Complex APSInt] [Complex APFloat], [Expr + Offset],
+/// [Vector: N * APValue], [Array: N * APValue]
class APValue {
typedef llvm::APSInt APSInt;
typedef llvm::APFloat APFloat;
@@ -36,8 +46,25 @@ public:
ComplexInt,
ComplexFloat,
LValue,
- Vector
+ Vector,
+ Array,
+ Struct,
+ Union,
+ MemberPointer,
+ AddrLabelDiff
};
+ typedef llvm::PointerUnion<const ValueDecl *, const Expr *> LValueBase;
+ typedef llvm::PointerIntPair<const Decl *, 1, bool> BaseOrMemberType;
+ union LValuePathEntry {
+ /// BaseOrMember - The FieldDecl or CXXRecordDecl indicating the next item
+ /// in the path. An opaque value of type BaseOrMemberType.
+ void *BaseOrMember;
+ /// ArrayIndex - The array index of the next item in the path.
+ uint64_t ArrayIndex;
+ };
+ struct NoLValuePath {};
+ struct UninitArray {};
+ struct UninitStruct {};
private:
ValueKind Kind;
@@ -49,13 +76,37 @@ private:
APFloat Real, Imag;
ComplexAPFloat() : Real(0.0), Imag(0.0) {}
};
-
+ struct LV;
struct Vec {
APValue *Elts;
unsigned NumElts;
Vec() : Elts(0), NumElts(0) {}
~Vec() { delete[] Elts; }
};
+ struct Arr {
+ APValue *Elts;
+ unsigned NumElts, ArrSize;
+ Arr(unsigned NumElts, unsigned ArrSize);
+ ~Arr();
+ };
+ struct StructData {
+ APValue *Elts;
+ unsigned NumBases;
+ unsigned NumFields;
+ StructData(unsigned NumBases, unsigned NumFields);
+ ~StructData();
+ };
+ struct UnionData {
+ const FieldDecl *Field;
+ APValue *Value;
+ UnionData();
+ ~UnionData();
+ };
+ struct AddrLabelDiffData {
+ const AddrLabelExpr* LHSExpr;
+ const AddrLabelExpr* RHSExpr;
+ };
+ struct MemberPointerData;
enum {
MaxSize = (sizeof(ComplexAPSInt) > sizeof(ComplexAPFloat) ?
@@ -84,18 +135,42 @@ public:
APValue(const APFloat &R, const APFloat &I) : Kind(Uninitialized) {
MakeComplexFloat(); setComplexFloat(R, I);
}
- APValue(const APValue &RHS) : Kind(Uninitialized) {
- *this = RHS;
+ APValue(const APValue &RHS);
+ APValue(LValueBase B, const CharUnits &O, NoLValuePath N, unsigned CallIndex)
+ : Kind(Uninitialized) {
+ MakeLValue(); setLValue(B, O, N, CallIndex);
+ }
+ APValue(LValueBase B, const CharUnits &O, ArrayRef<LValuePathEntry> Path,
+ bool OnePastTheEnd, unsigned CallIndex)
+ : Kind(Uninitialized) {
+ MakeLValue(); setLValue(B, O, Path, OnePastTheEnd, CallIndex);
}
- APValue(const Expr* B, const CharUnits &O) : Kind(Uninitialized) {
- MakeLValue(); setLValue(B, O);
+ APValue(UninitArray, unsigned InitElts, unsigned Size) : Kind(Uninitialized) {
+ MakeArray(InitElts, Size);
+ }
+ APValue(UninitStruct, unsigned B, unsigned M) : Kind(Uninitialized) {
+ MakeStruct(B, M);
+ }
+ explicit APValue(const FieldDecl *D, const APValue &V = APValue())
+ : Kind(Uninitialized) {
+ MakeUnion(); setUnion(D, V);
+ }
+ APValue(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) : Kind(Uninitialized) {
+ MakeMemberPointer(Member, IsDerivedMember, Path);
+ }
+ APValue(const AddrLabelExpr* LHSExpr, const AddrLabelExpr* RHSExpr)
+ : Kind(Uninitialized) {
+ MakeAddrLabelDiff(); setAddrLabelDiff(LHSExpr, RHSExpr);
}
- APValue(const Expr* B);
~APValue() {
MakeUninit();
}
+ /// \brief Swaps the contents of this and the given APValue.
+ void swap(APValue &RHS);
+
ValueKind getKind() const { return Kind; }
bool isUninit() const { return Kind == Uninitialized; }
bool isInt() const { return Kind == Int; }
@@ -104,9 +179,17 @@ public:
bool isComplexFloat() const { return Kind == ComplexFloat; }
bool isLValue() const { return Kind == LValue; }
bool isVector() const { return Kind == Vector; }
+ bool isArray() const { return Kind == Array; }
+ bool isStruct() const { return Kind == Struct; }
+ bool isUnion() const { return Kind == Union; }
+ bool isMemberPointer() const { return Kind == MemberPointer; }
+ bool isAddrLabelDiff() const { return Kind == AddrLabelDiff; }
- void print(raw_ostream &OS) const;
void dump() const;
+ void dump(raw_ostream &OS) const;
+
+ void printPretty(raw_ostream &OS, ASTContext &Ctx, QualType Ty) const;
+ std::string getAsString(ASTContext &Ctx, QualType Ty) const;
APSInt &getInt() {
assert(isInt() && "Invalid accessor");
@@ -124,19 +207,6 @@ public:
return const_cast<APValue*>(this)->getFloat();
}
- APValue &getVectorElt(unsigned i) {
- assert(isVector() && "Invalid accessor");
- return ((Vec*)(char*)Data)->Elts[i];
- }
- const APValue &getVectorElt(unsigned i) const {
- assert(isVector() && "Invalid accessor");
- return ((const Vec*)(const char*)Data)->Elts[i];
- }
- unsigned getVectorLength() const {
- assert(isVector() && "Invalid accessor");
- return ((const Vec*)(const void *)Data)->NumElts;
- }
-
APSInt &getComplexIntReal() {
assert(isComplexInt() && "Invalid accessor");
return ((ComplexAPSInt*)(char*)Data)->Real;
@@ -169,8 +239,104 @@ public:
return const_cast<APValue*>(this)->getComplexFloatImag();
}
- const Expr* getLValueBase() const;
- CharUnits getLValueOffset() const;
+ const LValueBase getLValueBase() const;
+ CharUnits &getLValueOffset();
+ const CharUnits &getLValueOffset() const {
+ return const_cast<APValue*>(this)->getLValueOffset();
+ }
+ bool isLValueOnePastTheEnd() const;
+ bool hasLValuePath() const;
+ ArrayRef<LValuePathEntry> getLValuePath() const;
+ unsigned getLValueCallIndex() const;
+
+ APValue &getVectorElt(unsigned I) {
+ assert(isVector() && "Invalid accessor");
+ assert(I < getVectorLength() && "Index out of range");
+ return ((Vec*)(char*)Data)->Elts[I];
+ }
+ const APValue &getVectorElt(unsigned I) const {
+ return const_cast<APValue*>(this)->getVectorElt(I);
+ }
+ unsigned getVectorLength() const {
+ assert(isVector() && "Invalid accessor");
+ return ((const Vec*)(const void *)Data)->NumElts;
+ }
+
+ APValue &getArrayInitializedElt(unsigned I) {
+ assert(isArray() && "Invalid accessor");
+ assert(I < getArrayInitializedElts() && "Index out of range");
+ return ((Arr*)(char*)Data)->Elts[I];
+ }
+ const APValue &getArrayInitializedElt(unsigned I) const {
+ return const_cast<APValue*>(this)->getArrayInitializedElt(I);
+ }
+ bool hasArrayFiller() const {
+ return getArrayInitializedElts() != getArraySize();
+ }
+ APValue &getArrayFiller() {
+ assert(isArray() && "Invalid accessor");
+ assert(hasArrayFiller() && "No array filler");
+ return ((Arr*)(char*)Data)->Elts[getArrayInitializedElts()];
+ }
+ const APValue &getArrayFiller() const {
+ return const_cast<APValue*>(this)->getArrayFiller();
+ }
+ unsigned getArrayInitializedElts() const {
+ assert(isArray() && "Invalid accessor");
+ return ((const Arr*)(const void *)Data)->NumElts;
+ }
+ unsigned getArraySize() const {
+ assert(isArray() && "Invalid accessor");
+ return ((const Arr*)(const void *)Data)->ArrSize;
+ }
+
+ unsigned getStructNumBases() const {
+ assert(isStruct() && "Invalid accessor");
+ return ((const StructData*)(const char*)Data)->NumBases;
+ }
+ unsigned getStructNumFields() const {
+ assert(isStruct() && "Invalid accessor");
+ return ((const StructData*)(const char*)Data)->NumFields;
+ }
+ APValue &getStructBase(unsigned i) {
+ assert(isStruct() && "Invalid accessor");
+ return ((StructData*)(char*)Data)->Elts[i];
+ }
+ APValue &getStructField(unsigned i) {
+ assert(isStruct() && "Invalid accessor");
+ return ((StructData*)(char*)Data)->Elts[getStructNumBases() + i];
+ }
+ const APValue &getStructBase(unsigned i) const {
+ return const_cast<APValue*>(this)->getStructBase(i);
+ }
+ const APValue &getStructField(unsigned i) const {
+ return const_cast<APValue*>(this)->getStructField(i);
+ }
+
+ const FieldDecl *getUnionField() const {
+ assert(isUnion() && "Invalid accessor");
+ return ((const UnionData*)(const char*)Data)->Field;
+ }
+ APValue &getUnionValue() {
+ assert(isUnion() && "Invalid accessor");
+ return *((UnionData*)(char*)Data)->Value;
+ }
+ const APValue &getUnionValue() const {
+ return const_cast<APValue*>(this)->getUnionValue();
+ }
+
+ const ValueDecl *getMemberPointerDecl() const;
+ bool isMemberPointerToDerivedMember() const;
+ ArrayRef<const CXXRecordDecl*> getMemberPointerPath() const;
+
+ const AddrLabelExpr* getAddrLabelDiffLHS() const {
+ assert(isAddrLabelDiff() && "Invalid accessor");
+ return ((const AddrLabelDiffData*)(const char*)Data)->LHSExpr;
+ }
+ const AddrLabelExpr* getAddrLabelDiffRHS() const {
+ assert(isAddrLabelDiff() && "Invalid accessor");
+ return ((const AddrLabelDiffData*)(const char*)Data)->RHSExpr;
+ }
void setInt(const APSInt &I) {
assert(isInt() && "Invalid accessor");
@@ -201,12 +367,34 @@ public:
((ComplexAPFloat*)(char*)Data)->Real = R;
((ComplexAPFloat*)(char*)Data)->Imag = I;
}
- void setLValue(const Expr *B, const CharUnits &O);
+ void setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex);
+ void setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool OnePastTheEnd,
+ unsigned CallIndex);
+ void setUnion(const FieldDecl *Field, const APValue &Value) {
+ assert(isUnion() && "Invalid accessor");
+ ((UnionData*)(char*)Data)->Field = Field;
+ *((UnionData*)(char*)Data)->Value = Value;
+ }
+ void setAddrLabelDiff(const AddrLabelExpr* LHSExpr,
+ const AddrLabelExpr* RHSExpr) {
+ ((AddrLabelDiffData*)(char*)Data)->LHSExpr = LHSExpr;
+ ((AddrLabelDiffData*)(char*)Data)->RHSExpr = RHSExpr;
+ }
- const APValue &operator=(const APValue &RHS);
+ /// Assign by swapping from a copy of the RHS.
+ APValue &operator=(APValue RHS) {
+ swap(RHS);
+ return *this;
+ }
private:
- void MakeUninit();
+ void DestroyDataAndMakeUninit();
+ void MakeUninit() {
+ if (Kind != Uninitialized)
+ DestroyDataAndMakeUninit();
+ }
void MakeInt() {
assert(isUninit() && "Bad state change");
new ((void*)Data) APSInt(1);
@@ -233,17 +421,26 @@ private:
Kind = ComplexFloat;
}
void MakeLValue();
+ void MakeArray(unsigned InitElts, unsigned Size);
+ void MakeStruct(unsigned B, unsigned M) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) StructData(B, M);
+ Kind = Struct;
+ }
+ void MakeUnion() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) UnionData();
+ Kind = Union;
+ }
+ void MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path);
+ void MakeAddrLabelDiff() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) AddrLabelDiffData();
+ Kind = AddrLabelDiff;
+ }
};
-inline raw_ostream &operator<<(raw_ostream &OS, const APValue &V) {
- V.print(OS);
- return OS;
-}
-
-// Writes a concise representation of V to DB, in a single << operation.
-const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
- const APValue &V);
-
} // end namespace clang.
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
index fcc9176..69a3866 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
@@ -24,6 +24,7 @@ namespace clang {
class SemaConsumer; // layering violation required for safe SemaConsumer
class TagDecl;
class VarDecl;
+ class FunctionDecl;
/// ASTConsumer - This is an abstract interface that should be implemented by
/// clients that read ASTs. This abstraction layer allows the client to be
@@ -48,7 +49,9 @@ public:
/// called by the parser to process every top-level Decl*. Note that D can be
/// the head of a chain of Decls (e.g. for `int a, b` the chain will have two
/// elements). Use Decl::getNextDeclarator() to walk the chain.
- virtual void HandleTopLevelDecl(DeclGroupRef D);
+ ///
+ /// \returns true to continue parsing, or false to abort parsing.
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
/// HandleInterestingDecl - Handle the specified interesting declaration. This
/// is called by the AST reader when deserializing things that might interest
@@ -65,6 +68,17 @@ public:
/// can be defined in declspecs).
virtual void HandleTagDeclDefinition(TagDecl *D) {}
+ /// \brief Invoked when a function is implicitly instantiated.
+ /// Note that at this point point it does not have a body, its body is
+ /// instantiated at the end of the translation unit and passed to
+ /// HandleTopLevelDecl.
+ virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) {}
+
+ /// \brief Handle the specified top-level declaration that occurred inside
+ /// and ObjC container.
+ /// The default implementation ignored them.
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+
/// CompleteTentativeDefinition - Callback invoked at the end of a translation
/// unit to notify the consumer that the given tentative definition should be
/// completed.
@@ -76,6 +90,10 @@ public:
/// modified by the introduction of an implicit zero initializer.
virtual void CompleteTentativeDefinition(VarDecl *D) {}
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *D) {}
+
/// \brief Callback involved at the end of a translation unit to
/// notify the consumer that a vtable for the given C++ class is
/// required.
@@ -94,7 +112,9 @@ public:
/// \brief If the consumer is interested in entities being deserialized from
/// AST files, it should return a pointer to a ASTDeserializationListener here
- virtual ASTDeserializationListener *GetASTDeserializationListener() { return 0; }
+ virtual ASTDeserializationListener *GetASTDeserializationListener() {
+ return 0;
+ }
/// PrintStats - If desired, print any statistics.
virtual void PrintStats() {}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
index 48d7e94..96e41c5 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -21,17 +21,18 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/VersionTuple.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/LambdaMangleContext.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/CanonicalType.h"
-#include "clang/AST/UsuallyTinyPtrVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/Allocator.h"
#include <vector>
@@ -55,6 +56,7 @@ namespace clang {
class CXXABI;
// Decls
class DeclContext;
+ class CXXConversionDecl;
class CXXMethodDecl;
class CXXRecordDecl;
class Decl;
@@ -80,7 +82,7 @@ namespace clang {
/// ASTContext - This class holds long-lived AST nodes (such as types and
/// decls) that can be referred to throughout the semantic analysis of a file.
-class ASTContext : public llvm::RefCountedBase<ASTContext> {
+class ASTContext : public RefCountedBase<ASTContext> {
ASTContext &this_() { return *this; }
mutable std::vector<Type*> Types;
@@ -145,6 +147,11 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
ObjCLayouts;
+ /// TypeInfoMap - A cache from types to size and alignment information.
+ typedef llvm::DenseMap<const Type*,
+ std::pair<uint64_t, unsigned> > TypeInfoMap;
+ mutable TypeInfoMap MemoizedTypeInfo;
+
/// KeyFunctions - A cache mapping from CXXRecordDecls to key functions.
llvm::DenseMap<const CXXRecordDecl*, const CXXMethodDecl*> KeyFunctions;
@@ -202,12 +209,12 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
/// \brief The typedef for the predefined 'SEL' type.
mutable TypedefDecl *ObjCSelDecl;
- QualType ObjCProtoType;
- const RecordType *ProtoStructType;
-
/// \brief The typedef for the predefined 'Class' type.
mutable TypedefDecl *ObjCClassDecl;
-
+
+ /// \brief The typedef for the predefined 'Protocol' class in Objective-C.
+ mutable ObjCInterfaceDecl *ObjCProtocolClassDecl;
+
// Typedefs which may be provided defining the structure of Objective-C
// pseudo-builtins
QualType ObjCIdRedefinitionType;
@@ -216,6 +223,8 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
QualType ObjCConstantStringType;
mutable RecordDecl *CFConstantStringTypeDecl;
+
+ QualType ObjCNSStringType;
/// \brief The typedef declaration for the Objective-C "instancetype" type.
TypedefDecl *ObjCInstanceTypeDecl;
@@ -318,15 +327,22 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
/// Since most C++ member functions aren't virtual and therefore
/// don't override anything, we store the overridden functions in
/// this map on the side rather than within the CXXMethodDecl structure.
- typedef UsuallyTinyPtrVector<const CXXMethodDecl> CXXMethodVector;
+ typedef llvm::TinyPtrVector<const CXXMethodDecl*> CXXMethodVector;
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector> OverriddenMethods;
+ /// \brief Mapping from each declaration context to its corresponding lambda
+ /// mangling context.
+ llvm::DenseMap<const DeclContext *, LambdaMangleContext> LambdaMangleContexts;
+
/// \brief Mapping that stores parameterIndex values for ParmVarDecls
/// when that value exceeds the bitfield size of
/// ParmVarDeclBits.ParameterIndex.
typedef llvm::DenseMap<const VarDecl *, unsigned> ParameterIndexTable;
ParameterIndexTable ParamIndices;
+ ImportDecl *FirstLocalImport;
+ ImportDecl *LastLocalImport;
+
TranslationUnitDecl *TUDecl;
/// SourceMgr - The associated SourceManager object.
@@ -346,7 +362,7 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
PartialDiagnostic::StorageAllocator DiagAllocator;
/// \brief The current C++ ABI.
- llvm::OwningPtr<CXXABI> ABI;
+ OwningPtr<CXXABI> ABI;
CXXABI *createCXXABI(const TargetInfo &T);
/// \brief The logical -> physical address space map.
@@ -355,7 +371,8 @@ class ASTContext : public llvm::RefCountedBase<ASTContext> {
friend class ASTDeclReader;
friend class ASTReader;
friend class ASTWriter;
-
+ friend class CXXRecordDecl;
+
const TargetInfo *Target;
clang::PrintingPolicy PrintingPolicy;
@@ -364,7 +381,7 @@ public:
SelectorTable &Selectors;
Builtin::Context &BuiltinInfo;
mutable DeclarationNameTable DeclarationNames;
- llvm::OwningPtr<ExternalASTSource> ExternalSource;
+ OwningPtr<ExternalASTSource> ExternalSource;
ASTMutationListener *Listener;
clang::PrintingPolicy getPrintingPolicy() const { return PrintingPolicy; }
@@ -394,7 +411,7 @@ public:
const TargetInfo &getTargetInfo() const { return *Target; }
- const LangOptions& getLangOptions() const { return LangOpts; }
+ const LangOptions& getLangOpts() const { return LangOpts; }
DiagnosticsEngine &getDiagnostics() const;
@@ -468,7 +485,7 @@ public:
const FieldDecl *LastFD) const;
// Access to the set of methods overridden by the given C++ method.
- typedef CXXMethodVector::iterator overridden_cxx_method_iterator;
+ typedef CXXMethodVector::const_iterator overridden_cxx_method_iterator;
overridden_cxx_method_iterator
overridden_methods_begin(const CXXMethodDecl *Method) const;
@@ -482,6 +499,56 @@ public:
void addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden);
+ /// \brief Notify the AST context that a new import declaration has been
+ /// parsed or implicitly created within this translation unit.
+ void addedLocalImportDecl(ImportDecl *Import);
+
+ static ImportDecl *getNextLocalImport(ImportDecl *Import) {
+ return Import->NextLocalImport;
+ }
+
+ /// \brief Iterator that visits import declarations.
+ class import_iterator {
+ ImportDecl *Import;
+
+ public:
+ typedef ImportDecl *value_type;
+ typedef ImportDecl *reference;
+ typedef ImportDecl *pointer;
+ typedef int difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ import_iterator() : Import() { }
+ explicit import_iterator(ImportDecl *Import) : Import(Import) { }
+
+ reference operator*() const { return Import; }
+ pointer operator->() const { return Import; }
+
+ import_iterator &operator++() {
+ Import = ASTContext::getNextLocalImport(Import);
+ return *this;
+ }
+
+ import_iterator operator++(int) {
+ import_iterator Other(*this);
+ ++(*this);
+ return Other;
+ }
+
+ friend bool operator==(import_iterator X, import_iterator Y) {
+ return X.Import == Y.Import;
+ }
+
+ friend bool operator!=(import_iterator X, import_iterator Y) {
+ return X.Import != Y.Import;
+ }
+ };
+
+ import_iterator local_import_begin() const {
+ return import_iterator(FirstLocalImport);
+ }
+ import_iterator local_import_end() const { return import_iterator(); }
+
TranslationUnitDecl *getTranslationUnitDecl() const { return TUDecl; }
@@ -500,7 +567,9 @@ public:
CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
CanQualType VoidPtrTy, NullPtrTy;
CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy;
+ CanQualType PseudoObjectTy, ARCUnbridgedCastTy;
CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy;
+ CanQualType ObjCBuiltinBoolTy;
// Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
mutable QualType AutoDeductTy; // Deduction against 'auto'.
@@ -519,7 +588,7 @@ public:
/// The external AST source provides the ability to load parts of
/// the abstract syntax tree as needed from some external storage,
/// e.g., a precompiled header.
- void setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source);
+ void setExternalSource(OwningPtr<ExternalASTSource> &Source);
/// \brief Retrieve a pointer to the external AST source associated
/// with this AST context, if any.
@@ -800,7 +869,8 @@ public:
QualType getPackExpansionType(QualType Pattern,
llvm::Optional<unsigned> NumExpansions);
- QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl) const;
+ QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl = 0) const;
QualType getObjCObjectType(QualType Base,
ObjCProtocolDecl * const *Protocols,
@@ -815,7 +885,7 @@ public:
QualType getTypeOfType(QualType t) const;
/// getDecltypeType - C++0x decltype.
- QualType getDecltypeType(Expr *e) const;
+ QualType getDecltypeType(Expr *e, QualType UnderlyingType) const;
/// getUnaryTransformType - unary type transforms
QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
@@ -838,6 +908,14 @@ public:
/// in <stddef.h>. The sizeof operator requires this (C99 6.5.3.4p4).
CanQualType getSizeType() const;
+ /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5),
+ /// defined in <stdint.h>.
+ CanQualType getIntMaxType() const;
+
+ /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5),
+ /// defined in <stdint.h>.
+ CanQualType getUIntMaxType() const;
+
/// getWCharType - In C++, this returns the unique wchar_t type. In C99, this
/// returns a type compatible with the type defined in <stddef.h> as defined
/// by the target.
@@ -851,7 +929,7 @@ public:
/// Used when in C++, as a GCC extension.
QualType getUnsignedWCharType() const;
- /// getPointerDiffType - Return the unique type for "ptrdiff_t" (ref?)
+ /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType getPointerDiffType() const;
@@ -874,6 +952,14 @@ public:
return ObjCConstantStringType;
}
+ QualType getObjCNSStringType() const {
+ return ObjCNSStringType;
+ }
+
+ void setObjCNSStringType(QualType T) {
+ ObjCNSStringType = T;
+ }
+
/// \brief Retrieve the type that 'id' has been defined to, which may be
/// different from the built-in 'id' if 'id' has been typedef'd.
QualType getObjCIdRedefinitionType() const {
@@ -972,7 +1058,7 @@ public:
/// \brief The result type of logical operations, '<', '>', '!=', etc.
QualType getLogicalOperationType() const {
- return getLangOptions().CPlusPlus ? BoolTy : IntTy;
+ return getLangOpts().CPlusPlus ? BoolTy : IntTy;
}
/// getObjCEncodingForType - Emit the ObjC type encoding for the
@@ -999,7 +1085,8 @@ public:
///
/// \returns true if an error occurred (e.g., because one of the parameter
/// types is incomplete), false otherwise.
- bool getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, std::string &S)
+ bool getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, std::string &S,
+ bool Extended = false)
const;
/// getObjCEncodingForBlock - Return the encoded type for this block
@@ -1041,9 +1128,6 @@ public:
return getTypeDeclType(getObjCSelDecl());
}
- void setObjCProtoType(QualType QT);
- QualType getObjCProtoType() const { return ObjCProtoType; }
-
/// \brief Retrieve the typedef declaration corresponding to the predefined
/// Objective-C 'Class' type.
TypedefDecl *getObjCClassDecl() const;
@@ -1055,6 +1139,15 @@ public:
return getTypeDeclType(getObjCClassDecl());
}
+ /// \brief Retrieve the Objective-C class declaration corresponding to
+ /// the predefined 'Protocol' class.
+ ObjCInterfaceDecl *getObjCProtocolDecl() const;
+
+ /// \brief Retrieve the type of the Objective-C "Protocol" class.
+ QualType getObjCProtoType() const {
+ return getObjCInterfaceType(getObjCProtocolDecl());
+ }
+
void setBuiltinVaListType(QualType T);
QualType getBuiltinVaListType() const { return BuiltinVaListType; }
@@ -1064,6 +1157,11 @@ public:
return getQualifiedType(T, Qualifiers::fromCVRMask(CVR));
}
+ /// getQualifiedType - Un-split a SplitQualType.
+ QualType getQualifiedType(SplitQualType split) const {
+ return getQualifiedType(split.Ty, split.Quals);
+ }
+
/// getQualifiedType - Returns a type with additional qualifiers.
QualType getQualifiedType(QualType T, Qualifiers Qs) const {
if (!Qs.hasNonFastQualifiers())
@@ -1127,6 +1225,7 @@ public:
private:
CanQualType getFromTargetType(unsigned Type) const;
+ std::pair<uint64_t, unsigned> getTypeInfoImpl(const Type *T) const;
//===--------------------------------------------------------------------===//
// Type Predicates.
@@ -1230,7 +1329,8 @@ public:
const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D)
const;
- void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS) const;
+ void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
+ bool Simple = false) const;
/// getASTObjCImplementationLayout - Get or compute information about
/// the layout of the specified Objective-C implementation. This may
@@ -1246,6 +1346,9 @@ public:
/// of class definition.
const CXXMethodDecl *getKeyFunction(const CXXRecordDecl *RD);
+ /// Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
+ uint64_t getFieldOffset(const ValueDecl *FD) const;
+
bool isNearlyEmpty(const CXXRecordDecl *RD) const;
MangleContext *createMangleContext();
@@ -1282,7 +1385,7 @@ public:
CanQualType getCanonicalParamType(QualType T) const;
/// \brief Determine whether the given types are equivalent.
- bool hasSameType(QualType T1, QualType T2) {
+ bool hasSameType(QualType T1, QualType T2) const {
return getCanonicalType(T1) == getCanonicalType(T2);
}
@@ -1302,7 +1405,7 @@ public:
/// \brief Determine whether the given types are equivalent after
/// cvr-qualifiers have been removed.
- bool hasSameUnqualifiedType(QualType T1, QualType T2) {
+ bool hasSameUnqualifiedType(QualType T1, QualType T2) const {
return getCanonicalType(T1).getTypePtr() ==
getCanonicalType(T2).getTypePtr();
}
@@ -1590,6 +1693,8 @@ public:
return Res;
}
+ bool isSentinelNullExpr(const Expr *E);
+
/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D);
/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
@@ -1622,6 +1727,11 @@ public:
const ObjCMethodDecl *Redecl) {
ObjCMethodRedecls[MD] = Redecl;
}
+
+ /// \brief Returns the objc interface that \arg ND belongs to if it is a
+ /// objc method/property/ivar etc. that is part of an interface,
+ /// otherwise returns null.
+ ObjCInterfaceDecl *getObjContainingInterface(NamedDecl *ND) const;
/// \brief Set the copy inialization expression of a block var decl.
void setBlockVarCopyInits(VarDecl*VD, Expr* Init);
@@ -1671,6 +1781,8 @@ public:
/// it is not used.
bool DeclMustBeEmitted(const Decl *D);
+ /// \brief Retrieve the lambda mangling number for a lambda expression.
+ unsigned getLambdaManglingNumber(CXXMethodDecl *CallOperator);
/// \brief Used by ParmVarDecl to store on the side the
/// index of the parameter when it exceeds the size of the normal bitfield.
@@ -1751,13 +1863,20 @@ private:
const FieldDecl *Field,
bool OutermostType = false,
bool EncodingProperty = false,
- bool StructField = false) const;
+ bool StructField = false,
+ bool EncodeBlockParameters = false,
+ bool EncodeClassNames = false) const;
// Adds the encoding of the structure's members.
void getObjCEncodingForStructureImpl(RecordDecl *RD, std::string &S,
const FieldDecl *Field,
bool includeVBases = true) const;
-
+
+ // Adds the encoding of a method parameter or return type.
+ void getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const;
+
const ASTRecordLayout &
getObjCLayout(const ObjCInterfaceDecl *D,
const ObjCImplementationDecl *Impl) const;
@@ -1795,13 +1914,19 @@ static inline Selector GetUnarySelector(StringRef name, ASTContext& Ctx) {
} // end namespace clang
// operator new and delete aren't allowed inside namespaces.
-// The throw specifications are mandated by the standard.
+
/// @brief Placement new for using the ASTContext's allocator.
///
/// This placement form of operator new uses the ASTContext's allocator for
-/// obtaining memory. It is a non-throwing new, which means that it returns
-/// null on error. (If that is what the allocator does. The current does, so if
-/// this ever changes, this operator will have to be changed, too.)
+/// obtaining memory.
+///
+/// IMPORTANT: These are also declared in clang/AST/Attr.h! Any changes here
+/// need to also be made there.
+///
+/// We intentionally avoid using a nothrow specification here so that the calls
+/// to this operator will not perform a null check on the result -- the
+/// underlying allocator never returns null pointers.
+///
/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
/// @code
/// // Default alignment (8)
@@ -1819,7 +1944,7 @@ static inline Selector GetUnarySelector(StringRef name, ASTContext& Ctx) {
/// allocator supports it).
/// @return The allocated memory. Could be NULL.
inline void *operator new(size_t Bytes, const clang::ASTContext &C,
- size_t Alignment) throw () {
+ size_t Alignment) {
return C.Allocate(Bytes, Alignment);
}
/// @brief Placement delete companion to the new above.
@@ -1828,14 +1953,17 @@ inline void *operator new(size_t Bytes, const clang::ASTContext &C,
/// invoking it directly; see the new operator for more details. This operator
/// is called implicitly by the compiler if a placement new expression using
/// the ASTContext throws in the object constructor.
-inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t)
- throw () {
+inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t) {
C.Deallocate(Ptr);
}
/// This placement form of operator new[] uses the ASTContext's allocator for
-/// obtaining memory. It is a non-throwing new[], which means that it returns
-/// null on error.
+/// obtaining memory.
+///
+/// We intentionally avoid using a nothrow specification here so that the calls
+/// to this operator will not perform a null check on the result -- the
+/// underlying allocator never returns null pointers.
+///
/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
/// @code
/// // Default alignment (8)
@@ -1853,7 +1981,7 @@ inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t)
/// allocator supports it).
/// @return The allocated memory. Could be NULL.
inline void *operator new[](size_t Bytes, const clang::ASTContext& C,
- size_t Alignment = 8) throw () {
+ size_t Alignment = 8) {
return C.Allocate(Bytes, Alignment);
}
@@ -1863,8 +1991,7 @@ inline void *operator new[](size_t Bytes, const clang::ASTContext& C,
/// invoking it directly; see the new[] operator for more details. This operator
/// is called implicitly by the compiler if a placement new[] expression using
/// the ASTContext throws in the object constructor.
-inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t)
- throw () {
+inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t) {
C.Deallocate(Ptr);
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
index b005711..64e955e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define ASTSTART
#include "clang/Basic/DiagnosticASTKinds.inc"
#undef DIAG
@@ -44,7 +44,7 @@ namespace clang {
unsigned NumPrevArgs,
SmallVectorImpl<char> &Output,
void *Cookie,
- SmallVectorImpl<intptr_t> &QualTypeVals);
+ ArrayRef<intptr_t> QualTypeVals);
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
index b583fbf..7157efe 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
@@ -255,6 +255,12 @@ namespace clang {
/// \brief Return the set of declarations that we know are not equivalent.
NonEquivalentDeclSet &getNonEquivalentDecls() { return NonEquivalentDecls; }
+
+ /// \brief Called for ObjCInterfaceDecl, ObjCProtocolDecl, and TagDecl.
+ /// Mark the Decl as complete, filling it in as much as possible.
+ ///
+ /// \param D A declaration in the "to" context.
+ virtual void CompleteDecl(Decl* D);
/// \brief Note that we have imported the "from" declaration by mapping it
/// to the (potentially-newly-created) "to" declaration.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
index 793d3ee..cb038a0 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
@@ -24,6 +24,8 @@ namespace clang {
class FunctionTemplateDecl;
class ObjCCategoryDecl;
class ObjCInterfaceDecl;
+ class ObjCContainerDecl;
+ class ObjCPropertyDecl;
/// \brief An abstract interface that should be implemented by listeners
/// that want to be notified when an AST entity gets modified after its
@@ -60,6 +62,21 @@ public:
/// \brief A new objc category class was added for an interface.
virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) {}
+
+ /// \brief A objc class extension redeclared or introduced a property.
+ ///
+ /// \param Prop the property in the class extension
+ ///
+ /// \param OrigProp the property from the original interface that was declared
+ /// or null if the property was introduced.
+ ///
+ /// \param ClassExt the class extension.
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {}
+
+ // NOTE: If new methods are added they should also be added to
+ // MultiplexASTMutationListener.
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
index cf2e3c5..ef1aa25 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstring>
#include <algorithm>
@@ -39,19 +40,17 @@ namespace clang {
// Defined in ASTContext.h
void *operator new(size_t Bytes, const clang::ASTContext &C,
- size_t Alignment = 16) throw ();
+ size_t Alignment = 16);
// FIXME: Being forced to not have a default argument here due to redeclaration
// rules on default arguments sucks
void *operator new[](size_t Bytes, const clang::ASTContext &C,
- size_t Alignment) throw ();
+ size_t Alignment);
// It is good practice to pair new/delete operators. Also, MSVC gives many
// warnings if a matching delete overload is not declared, even though the
// throw() spec guarantees it will not be implicitly called.
-void operator delete(void *Ptr, const clang::ASTContext &C, size_t)
- throw ();
-void operator delete[](void *Ptr, const clang::ASTContext &C, size_t)
- throw ();
+void operator delete(void *Ptr, const clang::ASTContext &C, size_t);
+void operator delete[](void *Ptr, const clang::ASTContext &C, size_t);
namespace clang {
@@ -103,11 +102,17 @@ public:
// Clone this attribute.
virtual Attr* clone(ASTContext &C) const = 0;
+ virtual bool isLateParsed() const { return false; }
+
+ // Pretty print this attribute.
+ virtual void printPretty(llvm::raw_ostream &OS, ASTContext &C) const = 0;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *) { return true; }
};
class InheritableAttr : public Attr {
+ virtual void anchor();
protected:
InheritableAttr(attr::Kind AK, SourceRange R)
: Attr(AK, R) {}
@@ -123,6 +128,7 @@ public:
};
class InheritableParamAttr : public InheritableAttr {
+ virtual void anchor();
protected:
InheritableParamAttr(attr::Kind AK, SourceRange R)
: InheritableAttr(AK, R) {}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
new file mode 100644
index 0000000..34e6fc5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
@@ -0,0 +1,224 @@
+//===-- BuiltinTypeNodes.def - Metadata about BuiltinTypes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the database about various builtin singleton types.
+//
+// BuiltinType::Id is the enumerator defining the type.
+//
+// Context.SingletonId is the global singleton of this type. Some global
+// singletons are shared by multiple types.
+//
+// BUILTIN_TYPE(Id, SingletonId) - A builtin type that has not been
+// covered by any other #define. Defining this macro covers all
+// the builtins.
+//
+// SIGNED_TYPE(Id, SingletonId) - A signed integral type.
+//
+// UNSIGNED_TYPE(Id, SingletonId) - An unsigned integral type.
+//
+// FLOATING_TYPE(Id, SingletonId) - A floating-point type.
+//
+// PLACEHOLDER_TYPE(Id, SingletonId) - A placeholder type. Placeholder
+// types are used to perform context-sensitive checking of specific
+// forms of expression.
+//
+// SHARED_SINGLETON_TYPE(Expansion) - The given expansion corresponds
+// to a builtin which uses a shared singleton type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SIGNED_TYPE
+#define SIGNED_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef UNSIGNED_TYPE
+#define UNSIGNED_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef FLOATING_TYPE
+#define FLOATING_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef PLACEHOLDER_TYPE
+#define PLACEHOLDER_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef SHARED_SINGLETON_TYPE
+#define SHARED_SINGLETON_TYPE(Expansion) Expansion
+#endif
+
+//===- Builtin Types ------------------------------------------------------===//
+
+// void
+BUILTIN_TYPE(Void, VoidTy)
+
+//===- Unsigned Types -----------------------------------------------------===//
+
+// 'bool' in C++, '_Bool' in C99
+UNSIGNED_TYPE(Bool, BoolTy)
+
+// 'char' for targets where it's unsigned
+SHARED_SINGLETON_TYPE(UNSIGNED_TYPE(Char_U, CharTy))
+
+// 'unsigned char', explicitly qualified
+UNSIGNED_TYPE(UChar, UnsignedCharTy)
+
+// 'wchar_t' for targets where it's unsigned
+SHARED_SINGLETON_TYPE(UNSIGNED_TYPE(WChar_U, WCharTy))
+
+// 'char16_t' in C++
+UNSIGNED_TYPE(Char16, Char16Ty)
+
+// 'char32_t' in C++
+UNSIGNED_TYPE(Char32, Char32Ty)
+
+// 'unsigned short'
+UNSIGNED_TYPE(UShort, UnsignedShortTy)
+
+// 'unsigned int'
+UNSIGNED_TYPE(UInt, UnsignedIntTy)
+
+// 'unsigned long'
+UNSIGNED_TYPE(ULong, UnsignedLongTy)
+
+// 'unsigned long long'
+UNSIGNED_TYPE(ULongLong, UnsignedLongLongTy)
+
+// '__uint128_t'
+UNSIGNED_TYPE(UInt128, UnsignedInt128Ty)
+
+//===- Signed Types -------------------------------------------------------===//
+
+// 'char' for targets where it's signed
+SHARED_SINGLETON_TYPE(SIGNED_TYPE(Char_S, CharTy))
+
+// 'signed char', explicitly qualified
+SIGNED_TYPE(SChar, SignedCharTy)
+
+// 'wchar_t' for targets where it's signed
+SHARED_SINGLETON_TYPE(SIGNED_TYPE(WChar_S, WCharTy))
+
+// 'short' or 'signed short'
+SIGNED_TYPE(Short, ShortTy)
+
+// 'int' or 'signed int'
+SIGNED_TYPE(Int, IntTy)
+
+// 'long' or 'signed long'
+SIGNED_TYPE(Long, LongTy)
+
+// 'long long' or 'signed long long'
+SIGNED_TYPE(LongLong, LongLongTy)
+
+// '__int128_t'
+SIGNED_TYPE(Int128, Int128Ty)
+
+//===- Floating point types -----------------------------------------------===//
+
+// 'half' in OpenCL, '__fp16' in ARM NEON.
+FLOATING_TYPE(Half, HalfTy)
+
+// 'float'
+FLOATING_TYPE(Float, FloatTy)
+
+// 'double'
+FLOATING_TYPE(Double, DoubleTy)
+
+// 'long double'
+FLOATING_TYPE(LongDouble, LongDoubleTy)
+
+//===- Language-specific types --------------------------------------------===//
+
+// This is the type of C++0x 'nullptr'.
+BUILTIN_TYPE(NullPtr, NullPtrTy)
+
+// The primitive Objective C 'id' type. The user-visible 'id'
+// type is a typedef of an ObjCObjectPointerType to an
+// ObjCObjectType with this as its base. In fact, this only ever
+// shows up in an AST as the base type of an ObjCObjectType.
+BUILTIN_TYPE(ObjCId, ObjCBuiltinIdTy)
+
+// The primitive Objective C 'Class' type. The user-visible
+// 'Class' type is a typedef of an ObjCObjectPointerType to an
+// ObjCObjectType with this as its base. In fact, this only ever
+// shows up in an AST as the base type of an ObjCObjectType.
+BUILTIN_TYPE(ObjCClass, ObjCBuiltinClassTy)
+
+// The primitive Objective C 'SEL' type. The user-visible 'SEL'
+// type is a typedef of a PointerType to this.
+BUILTIN_TYPE(ObjCSel, ObjCBuiltinSelTy)
+
+// This represents the type of an expression whose type is
+// totally unknown, e.g. 'T::foo'. It is permitted for this to
+// appear in situations where the structure of the type is
+// theoretically deducible.
+BUILTIN_TYPE(Dependent, DependentTy)
+
+// The type of an unresolved overload set. A placeholder type.
+// Expressions with this type have one of the following basic
+// forms, with parentheses generally permitted:
+// foo # possibly qualified, not if an implicit access
+// foo # possibly qualified, not if an implicit access
+// &foo # possibly qualified, not if an implicit access
+// x->foo # only if might be a static member function
+// &x->foo # only if might be a static member function
+// &Class::foo # when a pointer-to-member; sub-expr also has this type
+// OverloadExpr::find can be used to analyze the expression.
+//
+// Overload should be the first placeholder type, or else change
+// BuiltinType::isNonOverloadPlaceholderType()
+PLACEHOLDER_TYPE(Overload, OverloadTy)
+
+// The type of a bound C++ non-static member function.
+// A placeholder type. Expressions with this type have one of the
+// following basic forms:
+// foo # if an implicit access
+// x->foo # if only contains non-static members
+PLACEHOLDER_TYPE(BoundMember, BoundMemberTy)
+
+// The type of an expression which refers to a pseudo-object,
+// such as those introduced by Objective C's @property or
+// VS.NET's __property declarations. A placeholder type. The
+// pseudo-object is actually accessed by emitting a call to
+// some sort of function or method; typically there is a pair
+// of a setter and a getter, with the setter used if the
+// pseudo-object reference is used syntactically as the
+// left-hand-side of an assignment operator.
+//
+// A pseudo-object reference naming an Objective-C @property is
+// always a dot access with a base of object-pointer type,
+// e.g. 'x.foo'.
+//
+// In VS.NET, a __property declaration creates an implicit
+// member with an associated name, which can then be named
+// in any of the normal ways an ordinary member could be.
+PLACEHOLDER_TYPE(PseudoObject, PseudoObjectTy)
+
+// __builtin_any_type. A placeholder type. Useful for clients
+// like debuggers that don't know what type to give something.
+// Only a small number of operations are valid on expressions of
+// unknown type, most notably explicit casts.
+PLACEHOLDER_TYPE(UnknownAny, UnknownAnyTy)
+
+// The type of a cast which, in ARC, would normally require a
+// __bridge, but which might be okay depending on the immediate
+// context.
+PLACEHOLDER_TYPE(ARCUnbridgedCast, ARCUnbridgedCastTy)
+
+#ifdef LAST_BUILTIN_TYPE
+LAST_BUILTIN_TYPE(ARCUnbridgedCast)
+#undef LAST_BUILTIN_TYPE
+#endif
+
+#undef SHARED_SINGLETON_TYPE
+#undef PLACEHOLDER_TYPE
+#undef FLOATING_TYPE
+#undef SIGNED_TYPE
+#undef UNSIGNED_TYPE
+#undef BUILTIN_TYPE
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
index 38e6b41..6cce888 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
@@ -108,6 +108,8 @@ public:
/// or a derived class thereof, a NULL canonical type.
template<typename U> CanProxy<U> getAs() const;
+ template<typename U> CanProxy<U> castAs() const;
+
/// \brief Overloaded arrow operator that produces a canonical type
/// proxy.
CanProxy<T> operator->() const;
@@ -753,6 +755,13 @@ CanProxy<U> CanQual<T>::getAs() const {
}
template<typename T>
+template<typename U>
+CanProxy<U> CanQual<T>::castAs() const {
+ assert(!Stored.isNull() && isa<U>(Stored.getTypePtr()));
+ return CanQual<U>::CreateUnsafe(Stored);
+}
+
+template<typename T>
CanProxy<T> CanQual<T>::operator->() const {
return CanProxy<T>(*this);
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index a02a2ce..11696db 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -22,6 +22,7 @@
#include "clang/Basic/Linkage.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class CXXTemporary;
@@ -40,7 +41,8 @@ class DependentFunctionTemplateSpecializationInfo;
class TypeLoc;
class UnresolvedSetImpl;
class LabelStmt;
-
+class Module;
+
/// \brief A container of type source information.
///
/// A client can read the relevant info using TypeLoc wrappers, e.g:
@@ -66,6 +68,7 @@ public:
/// TranslationUnitDecl - The top declaration context.
class TranslationUnitDecl : public Decl, public DeclContext {
+ virtual void anchor();
ASTContext &Ctx;
/// The (most recently entered) anonymous namespace for this
@@ -98,11 +101,15 @@ public:
/// NamedDecl - This represents a decl with a name. Many decls have names such
/// as ObjCMethodDecl, but not @class, etc.
class NamedDecl : public Decl {
+ virtual void anchor();
/// Name - The name of this declaration, which is typically a normal
/// identifier but may also be a special kind of name (C++
/// constructor, Objective-C selector, etc.)
DeclarationName Name;
+private:
+ NamedDecl *getUnderlyingDeclImpl();
+
protected:
NamedDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
: Decl(DK, DC, L), Name(N) { }
@@ -182,15 +189,11 @@ public:
/// \brief Determine whether this declaration has linkage.
bool hasLinkage() const;
- /// \brief Whether this declaration was marked as being private to the
- /// module in which it was defined.
- bool isModulePrivate() const { return ModulePrivate; }
+ using Decl::isModulePrivate;
+ using Decl::setModulePrivate;
- /// \brief Specify whether this declaration was marked as being private
- /// to the module in which it was defined.
- void setModulePrivate(bool MP = true) {
- ModulePrivate = MP;
- }
+ /// \brief Determine whether this declaration is hidden from name lookup.
+ bool isHidden() const { return Hidden; }
/// \brief Determine whether this declaration is a C++ class member.
bool isCXXClassMember() const {
@@ -206,8 +209,8 @@ public:
return DC->isRecord();
}
- /// \brief Given that this declaration is a C++ class member,
- /// determine whether it's an instance member of its class.
+ /// \brief Determine whether the given declaration is an instance member of
+ /// a C++ class.
bool isCXXInstanceMember() const;
class LinkageInfo {
@@ -249,26 +252,52 @@ public:
setLinkage(minLinkage(linkage(), L));
}
void mergeLinkage(LinkageInfo Other) {
- setLinkage(minLinkage(linkage(), Other.linkage()));
+ mergeLinkage(Other.linkage());
}
- void mergeVisibility(Visibility V) {
- setVisibility(minVisibility(visibility(), V));
- }
- void mergeVisibility(Visibility V, bool E) {
+ // Merge the visibility V giving preference to explicit ones.
+ // This is used, for example, when merging the visibility of a class
+ // down to one of its members. If the member has no explicit visibility,
+ // the class visibility wins.
+ void mergeVisibility(Visibility V, bool E = false) {
+ // If one has explicit visibility and the other doesn't, keep the
+ // explicit one.
+ if (visibilityExplicit() && !E)
+ return;
+ if (!visibilityExplicit() && E)
+ setVisibility(V, E);
+
+ // If both are explicit or both are implicit, keep the minimum.
setVisibility(minVisibility(visibility(), V), visibilityExplicit() || E);
}
+ // Merge the visibility V, keeping the most restrictive one.
+ // This is used for cases like merging the visibility of a template
+ // argument to an instantiation. If we already have a hidden class,
+ // no argument should give it default visibility.
+ void mergeVisibilityWithMin(Visibility V, bool E = false) {
+ // Never increase the visibility
+ if (visibility() < V)
+ return;
+
+ // If this visibility is explicit, keep it.
+ if (visibilityExplicit() && !E)
+ return;
+ setVisibility(V, E);
+ }
void mergeVisibility(LinkageInfo Other) {
mergeVisibility(Other.visibility(), Other.visibilityExplicit());
}
+ void mergeVisibilityWithMin(LinkageInfo Other) {
+ mergeVisibilityWithMin(Other.visibility(), Other.visibilityExplicit());
+ }
void merge(LinkageInfo Other) {
mergeLinkage(Other);
mergeVisibility(Other);
}
- void merge(std::pair<Linkage,Visibility> LV) {
- mergeLinkage(LV.first);
- mergeVisibility(LV.second);
+ void mergeWithMin(LinkageInfo Other) {
+ mergeLinkage(Other);
+ mergeVisibilityWithMin(Other);
}
friend LinkageInfo merge(LinkageInfo L, LinkageInfo R) {
@@ -281,7 +310,9 @@ public:
Linkage getLinkage() const;
/// \brief Determines the visibility of this entity.
- Visibility getVisibility() const { return getLinkageAndVisibility().visibility(); }
+ Visibility getVisibility() const {
+ return getLinkageAndVisibility().visibility();
+ }
/// \brief Determines the linkage and visibility of this entity.
LinkageInfo getLinkageAndVisibility() const;
@@ -291,12 +322,19 @@ public:
llvm::Optional<Visibility> getExplicitVisibility() const;
/// \brief Clear the linkage cache in response to a change
- /// to the declaration.
+ /// to the declaration.
void ClearLinkageCache();
/// \brief Looks through UsingDecls and ObjCCompatibleAliasDecls for
/// the underlying named decl.
- NamedDecl *getUnderlyingDecl();
+ NamedDecl *getUnderlyingDecl() {
+ // Fast-path the common case.
+ if (this->getKind() != UsingShadow &&
+ this->getKind() != ObjCCompatibleAlias)
+ return this;
+
+ return getUnderlyingDeclImpl();
+ }
const NamedDecl *getUnderlyingDecl() const {
return const_cast<NamedDecl*>(this)->getUnderlyingDecl();
}
@@ -317,6 +355,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const NamedDecl &ND) {
/// location of the statement. For GNU local labels (__label__), the decl
/// location is where the __label__ is.
class LabelDecl : public NamedDecl {
+ virtual void anchor();
LabelStmt *TheStmt;
/// LocStart - For normal labels, this is the same as the main declaration
/// label, i.e., the location of the identifier; for GNU local labels,
@@ -333,14 +372,15 @@ public:
static LabelDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II,
SourceLocation GnuLabelL);
-
+ static LabelDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
LabelStmt *getStmt() const { return TheStmt; }
void setStmt(LabelStmt *T) { TheStmt = T; }
bool isGnuLocal() const { return LocStart != getLocation(); }
void setLocStart(SourceLocation L) { LocStart = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LocStart, getLocation());
}
@@ -349,10 +389,12 @@ public:
static bool classof(const LabelDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Label; }
};
-
+
/// NamespaceDecl - Represent a C++ namespace.
-class NamespaceDecl : public NamedDecl, public DeclContext {
- bool IsInline : 1;
+class NamespaceDecl : public NamedDecl, public DeclContext,
+ public Redeclarable<NamespaceDecl>
+{
+ virtual void anchor();
/// LocStart - The starting location of the source range, pointing
/// to either the namespace or the inline keyword.
@@ -360,41 +402,40 @@ class NamespaceDecl : public NamedDecl, public DeclContext {
/// RBraceLoc - The ending location of the source range.
SourceLocation RBraceLoc;
- // For extended namespace definitions:
- //
- // namespace A { int x; }
- // namespace A { int y; }
- //
- // there will be one NamespaceDecl for each declaration.
- // NextNamespace points to the next extended declaration.
- // OrigNamespace points to the original namespace declaration.
- // OrigNamespace of the first namespace decl points to its anonymous namespace
- LazyDeclPtr NextNamespace;
-
- /// \brief A pointer to either the original namespace definition for
- /// this namespace (if the boolean value is false) or the anonymous
- /// namespace that lives just inside this namespace (if the boolean
- /// value is true).
- ///
- /// We can combine these two notions because the anonymous namespace
- /// must only be stored in one of the namespace declarations (so all
- /// of the namespace declarations can find it). We therefore choose
- /// the original namespace declaration, since all of the namespace
- /// declarations have a link directly to it; the original namespace
- /// declaration itself only needs to know that it is the original
- /// namespace declaration (which the boolean indicates).
- llvm::PointerIntPair<NamespaceDecl *, 1, bool> OrigOrAnonNamespace;
-
- NamespaceDecl(DeclContext *DC, SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id)
- : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
- IsInline(false), LocStart(StartLoc), RBraceLoc(),
- NextNamespace(), OrigOrAnonNamespace(0, true) { }
+ /// \brief A pointer to either the anonymous namespace that lives just inside
+ /// this namespace or to the first namespace in the chain (the latter case
+ /// only when this is not the first in the chain), along with a
+ /// boolean value indicating whether this is an inline namespace.
+ llvm::PointerIntPair<NamespaceDecl *, 1, bool> AnonOrFirstNamespaceAndInline;
+ NamespaceDecl(DeclContext *DC, bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl);
+
+ typedef Redeclarable<NamespaceDecl> redeclarable_base;
+ virtual NamespaceDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual NamespaceDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual NamespaceDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
public:
static NamespaceDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id);
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl);
+
+ static NamespaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
/// \brief Returns true if this is an anonymous namespace declaration.
///
@@ -411,67 +452,60 @@ public:
/// \brief Returns true if this is an inline namespace declaration.
bool isInline() const {
- return IsInline;
+ return AnonOrFirstNamespaceAndInline.getInt();
}
/// \brief Set whether this is an inline namespace declaration.
void setInline(bool Inline) {
- IsInline = Inline;
+ AnonOrFirstNamespaceAndInline.setInt(Inline);
}
- /// \brief Return the next extended namespace declaration or null if there
- /// is none.
- NamespaceDecl *getNextNamespace();
- const NamespaceDecl *getNextNamespace() const {
- return const_cast<NamespaceDecl *>(this)->getNextNamespace();
- }
+ /// \brief Get the original (first) namespace declaration.
+ NamespaceDecl *getOriginalNamespace() {
+ if (isFirstDeclaration())
+ return this;
- /// \brief Set the next extended namespace declaration.
- void setNextNamespace(NamespaceDecl *ND) { NextNamespace = ND; }
+ return AnonOrFirstNamespaceAndInline.getPointer();
+ }
/// \brief Get the original (first) namespace declaration.
- NamespaceDecl *getOriginalNamespace() const {
- if (OrigOrAnonNamespace.getInt())
- return const_cast<NamespaceDecl *>(this);
+ const NamespaceDecl *getOriginalNamespace() const {
+ if (isFirstDeclaration())
+ return this;
- return OrigOrAnonNamespace.getPointer();
+ return AnonOrFirstNamespaceAndInline.getPointer();
}
/// \brief Return true if this declaration is an original (first) declaration
/// of the namespace. This is false for non-original (subsequent) namespace
/// declarations and anonymous namespaces.
bool isOriginalNamespace() const {
- return getOriginalNamespace() == this;
- }
-
- /// \brief Set the original (first) namespace declaration.
- void setOriginalNamespace(NamespaceDecl *ND) {
- if (ND != this) {
- OrigOrAnonNamespace.setPointer(ND);
- OrigOrAnonNamespace.setInt(false);
- }
+ return isFirstDeclaration();
}
+ /// \brief Retrieve the anonymous namespace nested inside this namespace,
+ /// if any.
NamespaceDecl *getAnonymousNamespace() const {
- return getOriginalNamespace()->OrigOrAnonNamespace.getPointer();
+ return getOriginalNamespace()->AnonOrFirstNamespaceAndInline.getPointer();
}
void setAnonymousNamespace(NamespaceDecl *D) {
- assert(!D || D->isAnonymousNamespace());
- assert(!D || D->getParent()->getRedeclContext() == this);
- getOriginalNamespace()->OrigOrAnonNamespace.setPointer(D);
+ getOriginalNamespace()->AnonOrFirstNamespaceAndInline.setPointer(D);
}
- virtual NamespaceDecl *getCanonicalDecl() { return getOriginalNamespace(); }
- const NamespaceDecl *getCanonicalDecl() const {
- return getOriginalNamespace();
+ /// Retrieves the canonical declaration of this namespace.
+ NamespaceDecl *getCanonicalDecl() {
+ return getOriginalNamespace();
}
-
- virtual SourceRange getSourceRange() const {
+ const NamespaceDecl *getCanonicalDecl() const {
+ return getOriginalNamespace();
+ }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LocStart, RBraceLoc);
}
- SourceLocation getLocStart() const { return LocStart; }
+ SourceLocation getLocStart() const LLVM_READONLY { return LocStart; }
SourceLocation getRBraceLoc() const { return RBraceLoc; }
void setLocStart(SourceLocation L) { LocStart = L; }
void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
@@ -486,7 +520,7 @@ public:
static NamespaceDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<NamespaceDecl *>(const_cast<DeclContext*>(DC));
}
-
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -495,6 +529,7 @@ public:
/// an lvalue) a function (in which case it is a function designator) or
/// an enum constant.
class ValueDecl : public NamedDecl {
+ virtual void anchor();
QualType DeclType;
protected:
@@ -505,6 +540,12 @@ public:
QualType getType() const { return DeclType; }
void setType(QualType newType) { DeclType = newType; }
+ /// \brief Determine whether this symbol is weakly-imported,
+ /// or declared with the weak or weak-ref attr.
+ bool isWeak() const {
+ return hasAttr<WeakAttr>() || hasAttr<WeakRefAttr>() || isWeakImported();
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ValueDecl *D) { return true; }
@@ -537,7 +578,7 @@ struct QualifierInfo {
void setTemplateParameterListsInfo(ASTContext &Context,
unsigned NumTPLists,
TemplateParameterList **TPLists);
-
+
private:
// Copy constructor and copy assignment are disabled.
QualifierInfo(const QualifierInfo&);
@@ -592,7 +633,10 @@ public:
/// range taking into account any outer template declarations.
SourceLocation getOuterLocStart() const;
- virtual SourceRange getSourceRange() const;
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getOuterLocStart();
+ }
/// \brief Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
@@ -600,15 +644,15 @@ public:
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
: 0;
}
-
- /// \brief Retrieve the nested-name-specifier (with source-location
- /// information) that qualifies the name of this declaration, if it was
+
+ /// \brief Retrieve the nested-name-specifier (with source-location
+ /// information) that qualifies the name of this declaration, if it was
/// present in the source.
NestedNameSpecifierLoc getQualifierLoc() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc
: NestedNameSpecifierLoc();
}
-
+
void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
unsigned getNumTemplateParameterLists() const {
@@ -655,8 +699,9 @@ struct EvaluatedStmt {
/// integral constant expression.
bool CheckingICE : 1;
- /// \brief Whether this statement is an integral constant
- /// expression. Only valid if CheckedICE is true.
+ /// \brief Whether this statement is an integral constant expression,
+ /// or in C++11, whether the statement is a constant expression. Only
+ /// valid if CheckedICE is true.
bool IsICE : 1;
Stmt *Value;
@@ -675,6 +720,13 @@ public:
/// It is illegal to call this function with SC == None.
static const char *getStorageClassSpecifierString(StorageClass SC);
+ /// \brief Initialization styles.
+ enum InitializationStyle {
+ CInit, ///< C-style initialization with assignment
+ CallInit, ///< Call-style initialization (C++98)
+ ListInit ///< Direct list-initialization (C++11)
+ };
+
protected:
/// \brief Placeholder type used in Init to denote an unparsed C++ default
/// argument.
@@ -700,14 +752,15 @@ private:
unsigned SClass : 3;
unsigned SClassAsWritten : 3;
unsigned ThreadSpecified : 1;
- unsigned HasCXXDirectInit : 1;
+ unsigned InitStyle : 2;
/// \brief Whether this variable is the exception variable in a C++ catch
/// or an Objective-C @catch statement.
unsigned ExceptionVar : 1;
-
+
/// \brief Whether this local variable could be allocated in the return
- /// slot of its function, enabling the named return value optimization (NRVO).
+ /// slot of its function, enabling the named return value optimization
+ /// (NRVO).
unsigned NRVOVariable : 1;
/// \brief Whether this variable is the for-range-declaration in a C++0x
@@ -721,14 +774,14 @@ private:
/// \brief Whether this variable is (C++0x) constexpr.
unsigned IsConstexpr : 1;
};
- enum { NumVarDeclBits = 13 };
+ enum { NumVarDeclBits = 14 };
friend class ASTDeclReader;
friend class StmtIteratorBase;
-
+
protected:
enum { NumParameterIndexBits = 8 };
-
+
class ParmVarDeclBitfields {
friend class ParmVarDecl;
friend class ASTDeclReader;
@@ -749,7 +802,7 @@ protected:
/// Otherwise, the number of function parameter scopes enclosing
/// the function parameter scope in which this parameter was
/// declared.
- unsigned ScopeDepthOrObjCQuals : 8;
+ unsigned ScopeDepthOrObjCQuals : 7;
/// The number of parameters preceding this parameter in the
/// function parameter scope in which it was declared.
@@ -777,22 +830,28 @@ protected:
typedef Redeclarable<VarDecl> redeclarable_base;
virtual VarDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual VarDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual VarDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
public:
typedef redeclarable_base::redecl_iterator redecl_iterator;
- redecl_iterator redecls_begin() const {
- return redeclarable_base::redecls_begin();
- }
- redecl_iterator redecls_end() const {
- return redeclarable_base::redecls_end();
- }
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
static VarDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass S, StorageClass SCAsWritten);
- virtual SourceRange getSourceRange() const;
+ static VarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
StorageClass getStorageClass() const {
return (StorageClass) VarDeclBits.SClass;
@@ -823,12 +882,12 @@ public:
return getStorageClass() >= SC_Auto;
}
- /// isStaticLocal - Returns true if a variable with function scope is a
+ /// isStaticLocal - Returns true if a variable with function scope is a
/// static local variable.
bool isStaticLocal() const {
return getStorageClass() == SC_Static && !isFileVarDecl();
}
-
+
/// hasExternStorage - Returns true if a variable has extern or
/// __private_extern__ storage.
bool hasExternalStorage() const {
@@ -896,11 +955,17 @@ public:
/// \brief Check whether this declaration is a definition. If this could be
/// a tentative definition (in C), don't check whether there's an overriding
/// definition.
- DefinitionKind isThisDeclarationADefinition() const;
+ DefinitionKind isThisDeclarationADefinition(ASTContext &) const;
+ DefinitionKind isThisDeclarationADefinition() const {
+ return isThisDeclarationADefinition(getASTContext());
+ }
/// \brief Check whether this variable is defined in this
/// translation unit.
- DefinitionKind hasDefinition() const;
+ DefinitionKind hasDefinition(ASTContext &) const;
+ DefinitionKind hasDefinition() const {
+ return hasDefinition(getASTContext());
+ }
/// \brief Get the tentative definition that acts as the real definition in
/// a TU. Returns null if there is a proper definition available.
@@ -914,26 +979,32 @@ public:
bool isTentativeDefinitionNow() const;
/// \brief Get the real (not just tentative) definition for this declaration.
- VarDecl *getDefinition();
+ VarDecl *getDefinition(ASTContext &);
+ const VarDecl *getDefinition(ASTContext &C) const {
+ return const_cast<VarDecl*>(this)->getDefinition(C);
+ }
+ VarDecl *getDefinition() {
+ return getDefinition(getASTContext());
+ }
const VarDecl *getDefinition() const {
return const_cast<VarDecl*>(this)->getDefinition();
}
- /// \brief Determine whether this is or was instantiated from an out-of-line
+ /// \brief Determine whether this is or was instantiated from an out-of-line
/// definition of a static data member.
virtual bool isOutOfLine() const;
/// \brief If this is a static data member, find its out-of-line definition.
VarDecl *getOutOfLineDefinition();
-
+
/// isFileVarDecl - Returns true for file scoped variable declaration.
bool isFileVarDecl() const {
if (getKind() != Decl::Var)
return false;
-
+
if (getDeclContext()->getRedeclContext()->isFileContext())
return true;
-
+
if (isStaticDataMember())
return true;
@@ -996,7 +1067,7 @@ public:
void setInit(Expr *I);
/// \brief Determine whether this variable is a reference that
- /// extends the lifetime of its temporary initializer.
+ /// extends the lifetime of its temporary initializer.
///
/// A reference extends the lifetime of its temporary initializer if
/// it's initializer is an rvalue that would normally go out of scope
@@ -1009,41 +1080,21 @@ public:
/// \endcode
bool extendsLifetimeOfTemporary() const;
- EvaluatedStmt *EnsureEvaluatedStmt() const {
- EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
- if (!Eval) {
- Stmt *S = Init.get<Stmt *>();
- Eval = new (getASTContext()) EvaluatedStmt;
- Eval->Value = S;
- Init = Eval;
- }
- return Eval;
- }
-
- /// \brief Check whether we are in the process of checking whether the
- /// initializer can be evaluated.
- bool isEvaluatingValue() const {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
- return Eval->IsEvaluating;
-
- return false;
- }
+ /// \brief Determine whether this variable's value can be used in a
+ /// constant expression, according to the relevant language standard.
+ /// This only checks properties of the declaration, and does not check
+ /// whether the initializer is in fact a constant expression.
+ bool isUsableInConstantExpressions(ASTContext &C) const;
- /// \brief Note that we now are checking whether the initializer can be
- /// evaluated.
- void setEvaluatingValue() const {
- EvaluatedStmt *Eval = EnsureEvaluatedStmt();
- Eval->IsEvaluating = true;
- }
+ EvaluatedStmt *ensureEvaluatedStmt() const;
- /// \brief Note that constant evaluation has computed the given
- /// value for this variable's initializer.
- void setEvaluatedValue(const APValue &Value) const {
- EvaluatedStmt *Eval = EnsureEvaluatedStmt();
- Eval->IsEvaluating = false;
- Eval->WasEvaluated = true;
- Eval->Evaluated = Value;
- }
+ /// \brief Attempt to evaluate the value of the initializer attached to this
+ /// declaration, and produce notes explaining why it cannot be evaluated or is
+ /// not a constant expression. Returns a pointer to the value if evaluation
+ /// succeeded, 0 otherwise.
+ APValue *evaluateValue() const;
+ APValue *evaluateValue(
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
/// \brief Return the already-evaluated value of this variable's
/// initializer, or NULL if the value is not yet known. Returns pointer
@@ -1065,8 +1116,9 @@ public:
return false;
}
- /// \brief Determines whether the initializer is an integral
- /// constant expression.
+ /// \brief Determines whether the initializer is an integral constant
+ /// expression, or in C++11, whether the initializer is a constant
+ /// expression.
///
/// \pre isInitKnownICE()
bool isInitICE() const {
@@ -1075,41 +1127,31 @@ public:
return Init.get<EvaluatedStmt *>()->IsICE;
}
- /// \brief Check whether we are in the process of checking the initializer
- /// is an integral constant expression.
- bool isCheckingICE() const {
- if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
- return Eval->CheckingICE;
-
- return false;
- }
+ /// \brief Determine whether the value of the initializer attached to this
+ /// declaration is an integral constant expression.
+ bool checkInitIsICE() const;
- /// \brief Note that we now are checking whether the initializer is an
- /// integral constant expression.
- void setCheckingICE() const {
- EvaluatedStmt *Eval = EnsureEvaluatedStmt();
- Eval->CheckingICE = true;
+ void setInitStyle(InitializationStyle Style) {
+ VarDeclBits.InitStyle = Style;
}
- /// \brief Note that we now know whether the initializer is an
- /// integral constant expression.
- void setInitKnownICE(bool IsICE) const {
- EvaluatedStmt *Eval = EnsureEvaluatedStmt();
- Eval->CheckingICE = false;
- Eval->CheckedICE = true;
- Eval->IsICE = IsICE;
+ /// \brief The style of initialization for this declaration.
+ ///
+ /// C-style initialization is "int x = 1;". Call-style initialization is
+ /// a C++98 direct-initializer, e.g. "int x(1);". The Init expression will be
+ /// the expression inside the parens or a "ClassType(a,b,c)" class constructor
+ /// expression for class types. List-style initialization is C++11 syntax,
+ /// e.g. "int x{1};". Clients can distinguish between different forms of
+ /// initialization by checking this value. In particular, "int x = {1};" is
+ /// C-style, "int x({1})" is call-style, and "int x{1};" is list-style; the
+ /// Init expression in all three cases is an InitListExpr.
+ InitializationStyle getInitStyle() const {
+ return static_cast<InitializationStyle>(VarDeclBits.InitStyle);
}
- void setCXXDirectInitializer(bool T) { VarDeclBits.HasCXXDirectInit = T; }
-
- /// hasCXXDirectInitializer - If true, the initializer was a direct
- /// initializer, e.g: "int x(1);". The Init expression will be the expression
- /// inside the parens or a "ClassType(a,b,c)" class constructor expression for
- /// class types. Clients can distinguish between "int x(1);" and "int x=1;"
- /// by checking hasCXXDirectInitializer.
- ///
- bool hasCXXDirectInitializer() const {
- return VarDeclBits.HasCXXDirectInit;
+ /// \brief Whether the initializer is a direct-initializer (list or call).
+ bool isDirectInit() const {
+ return getInitStyle() != CInit;
}
/// \brief Determine whether this variable is the exception variable in a
@@ -1118,7 +1160,7 @@ public:
return VarDeclBits.ExceptionVar;
}
void setExceptionVariable(bool EV) { VarDeclBits.ExceptionVar = EV; }
-
+
/// \brief Determine whether this local variable can be used with the named
/// return value optimization (NRVO).
///
@@ -1143,7 +1185,7 @@ public:
/// Generally such variables are also 'const' for safety.
bool isARCPseudoStrong() const { return VarDeclBits.ARCPseudoStrong; }
void setARCPseudoStrong(bool ps) { VarDeclBits.ARCPseudoStrong = ps; }
-
+
/// Whether this variable is (C++0x) constexpr.
bool isConstexpr() const { return VarDeclBits.IsConstexpr; }
void setConstexpr(bool IC) { VarDeclBits.IsConstexpr = IC; }
@@ -1153,15 +1195,15 @@ public:
/// from which it was instantiated.
VarDecl *getInstantiatedFromStaticDataMember() const;
- /// \brief If this variable is a static data member, determine what kind of
+ /// \brief If this variable is a static data member, determine what kind of
/// template specialization or instantiation this is.
TemplateSpecializationKind getTemplateSpecializationKind() const;
-
+
/// \brief If this variable is an instantiation of a static data member of a
/// class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const;
-
+
/// \brief For a static data member that was instantiated from a static
/// data member of a class template, set the template specialiation kind.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
@@ -1174,11 +1216,14 @@ public:
};
class ImplicitParamDecl : public VarDecl {
+ virtual void anchor();
public:
static ImplicitParamDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T);
+ static ImplicitParamDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
ImplicitParamDecl(DeclContext *DC, SourceLocation IdLoc,
IdentifierInfo *Id, QualType Type)
: VarDecl(ImplicitParam, DC, IdLoc, IdLoc, Id, Type,
@@ -1218,8 +1263,10 @@ public:
StorageClass S, StorageClass SCAsWritten,
Expr *DefArg);
- virtual SourceRange getSourceRange() const;
+ static ParmVarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
void setObjCMethodScopeInfo(unsigned parameterIndex) {
ParmVarDeclBits.IsObjCMethodParam = true;
setParameterIndex(parameterIndex);
@@ -1229,7 +1276,8 @@ public:
assert(!ParmVarDeclBits.IsObjCMethodParam);
ParmVarDeclBits.ScopeDepthOrObjCQuals = scopeDepth;
- assert(ParmVarDeclBits.ScopeDepthOrObjCQuals == scopeDepth && "truncation!");
+ assert(ParmVarDeclBits.ScopeDepthOrObjCQuals == scopeDepth
+ && "truncation!");
setParameterIndex(parameterIndex);
}
@@ -1276,20 +1324,14 @@ public:
const Expr *getDefaultArg() const {
return const_cast<ParmVarDecl *>(this)->getDefaultArg();
}
-
+
void setDefaultArg(Expr *defarg) {
Init = reinterpret_cast<Stmt *>(defarg);
}
- unsigned getNumDefaultArgTemporaries() const;
- CXXTemporary *getDefaultArgTemporary(unsigned i);
- const CXXTemporary *getDefaultArgTemporary(unsigned i) const {
- return const_cast<ParmVarDecl *>(this)->getDefaultArgTemporary(i);
- }
-
/// \brief Retrieve the source range that covers the entire default
/// argument.
- SourceRange getDefaultArgRange() const;
+ SourceRange getDefaultArgRange() const;
void setUninstantiatedDefaultArg(Expr *arg) {
Init = reinterpret_cast<UninstantiatedDefaultArgument *>(arg);
}
@@ -1351,7 +1393,7 @@ public:
/// \brief Determine whether this parameter is actually a function
/// parameter pack.
bool isParameterPack() const;
-
+
/// setOwningFunction - Sets the function declaration that owns this
/// ParmVarDecl. Since ParmVarDecls are often created before the
/// FunctionDecls that own them, this routine is required to update
@@ -1362,7 +1404,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ParmVarDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ParmVar; }
-
+
private:
enum { ParameterIndexSentinel = (1 << NumParameterIndexBits) - 1 };
@@ -1371,7 +1413,7 @@ private:
setParameterIndexLarge(parameterIndex);
return;
}
-
+
ParmVarDeclBits.ParameterIndex = parameterIndex;
assert(ParmVarDeclBits.ParameterIndex == parameterIndex && "truncation!");
}
@@ -1379,7 +1421,7 @@ private:
unsigned d = ParmVarDeclBits.ParameterIndex;
return d == ParameterIndexSentinel ? getParameterIndexLarge() : d;
}
-
+
void setParameterIndexLarge(unsigned parameterIndex);
unsigned getParameterIndexLarge() const;
};
@@ -1394,7 +1436,7 @@ private:
/// FunctionDecl (e.g., the translation unit); this FunctionDecl
/// contains all of the information known about the function. Other,
/// previous declarations of the function are available via the
-/// getPreviousDeclaration() chain.
+/// getPreviousDecl() chain.
class FunctionDecl : public DeclaratorDecl, public DeclContext,
public Redeclarable<FunctionDecl> {
public:
@@ -1415,6 +1457,11 @@ private:
/// no formals.
ParmVarDecl **ParamInfo;
+ /// DeclsInPrototypeScope - Array of pointers to NamedDecls for
+ /// decls defined in the function prototype that are not parameters. E.g.
+ /// 'enum Y' in 'void f(enum Y {AA} x) {}'.
+ llvm::ArrayRef<NamedDecl*> DeclsInPrototypeScope;
+
LazyDeclStmtPtr Body;
// FIXME: This can be packed into the bitfields in Decl.
@@ -1456,7 +1503,7 @@ private:
/// FunctionTemplateSpecializationInfo, which contains information about
/// the template being specialized and the template arguments involved in
/// that specialization.
- llvm::PointerUnion4<FunctionTemplateDecl *,
+ llvm::PointerUnion4<FunctionTemplateDecl *,
MemberSpecializationInfo *,
FunctionTemplateSpecializationInfo *,
DependentFunctionTemplateSpecializationInfo *>
@@ -1486,7 +1533,7 @@ private:
/// \param TemplateArgsAsWritten location info of template arguments.
///
/// \param PointOfInstantiation point at which the function template
- /// specialization was first instantiated.
+ /// specialization was first instantiated.
void setFunctionTemplateSpecialization(ASTContext &C,
FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
@@ -1524,15 +1571,19 @@ protected:
typedef Redeclarable<FunctionDecl> redeclarable_base;
virtual FunctionDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual FunctionDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual FunctionDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
public:
typedef redeclarable_base::redecl_iterator redecl_iterator;
- redecl_iterator redecls_begin() const {
- return redeclarable_base::redecls_begin();
- }
- redecl_iterator redecls_end() const {
- return redeclarable_base::redecls_end();
- }
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation NLoc,
@@ -1560,6 +1611,8 @@ public:
bool hasWrittenPrototype = true,
bool isConstexprSpecified = false);
+ static FunctionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
DeclarationNameInfo getNameInfo() const {
return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
}
@@ -1570,7 +1623,7 @@ public:
void setRangeEnd(SourceLocation E) { EndRangeLoc = E; }
- virtual SourceRange getSourceRange() const;
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
/// \brief Returns true if the function has a body (definition). The
/// function body might be in any of the (re-)declarations of this
@@ -1656,7 +1709,7 @@ public:
void setTrivial(bool IT) { IsTrivial = IT; }
/// Whether this function is defaulted per C++0x. Only valid for
- /// special member functions.
+ /// special member functions.
bool isDefaulted() const { return IsDefaulted; }
void setDefaulted(bool D = true) { IsDefaulted = D; }
@@ -1774,6 +1827,11 @@ public:
setParams(getASTContext(), NewParamInfo);
}
+ const llvm::ArrayRef<NamedDecl*> &getDeclsInPrototypeScope() const {
+ return DeclsInPrototypeScope;
+ }
+ void setDeclsInPrototypeScope(llvm::ArrayRef<NamedDecl *> NewDecls);
+
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
@@ -1783,12 +1841,12 @@ public:
QualType getResultType() const {
return getType()->getAs<FunctionType>()->getResultType();
}
-
+
/// \brief Determine the type of an expression that calls this function.
QualType getCallResultType() const {
return getType()->getAs<FunctionType>()->getCallResultType(getASTContext());
}
-
+
StorageClass getStorageClass() const { return StorageClass(SClass); }
void setStorageClass(StorageClass SC);
@@ -1799,10 +1857,10 @@ public:
/// \brief Determine whether the "inline" keyword was specified for this
/// function.
bool isInlineSpecified() const { return IsInlineSpecified; }
-
+
/// Set whether the "inline" keyword was specified for this function.
- void setInlineSpecified(bool I) {
- IsInlineSpecified = I;
+ void setInlineSpecified(bool I) {
+ IsInlineSpecified = I;
IsInline = I;
}
@@ -1819,7 +1877,7 @@ public:
bool isInlineDefinitionExternallyVisible() const;
bool doesDeclarationForceExternallyVisibleDefinition() const;
-
+
/// isOverloadedOperator - Whether this function declaration
/// represents an C++ overloaded operator, e.g., "operator+".
bool isOverloadedOperator() const {
@@ -1852,7 +1910,7 @@ public:
/// X<int>::A is required, it will be instantiated from the
/// declaration returned by getInstantiatedFromMemberFunction().
FunctionDecl *getInstantiatedFromMemberFunction() const;
-
+
/// \brief What kind of templated function this is.
TemplatedKind getTemplatedKind() const;
@@ -1860,7 +1918,7 @@ public:
/// class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const;
-
+
/// \brief Specify that this record is an instantiation of the
/// member function FD.
void setInstantiationOfMemberFunction(FunctionDecl *FD,
@@ -1888,7 +1946,7 @@ public:
TemplateOrSpecialization = Template;
}
- /// \brief Determine whether this function is a function template
+ /// \brief Determine whether this function is a function template
/// specialization.
bool isFunctionTemplateSpecialization() const {
return getPrimaryTemplate() != 0;
@@ -1899,7 +1957,7 @@ public:
FunctionDecl *getClassScopeSpecializationPattern() const;
/// \brief If this function is actually a function template specialization,
- /// retrieve information about this function template specialization.
+ /// retrieve information about this function template specialization.
/// Otherwise, returns NULL.
FunctionTemplateSpecializationInfo *getTemplateSpecializationInfo() const {
return TemplateOrSpecialization.
@@ -1910,7 +1968,11 @@ public:
/// specialization or a member of a class template specialization that can
/// be implicitly instantiated.
bool isImplicitlyInstantiable() const;
-
+
+ /// \brief Determines if the given function was instantiated from a
+ /// function template.
+ bool isTemplateInstantiation() const;
+
/// \brief Retrieve the function declaration from which this function could
/// be instantiated, if it is an instantiation (rather than a non-template
/// or a specialization, for example).
@@ -1958,7 +2020,7 @@ public:
/// \param TemplateArgsAsWritten location info of template arguments.
///
/// \param PointOfInstantiation point at which the function template
- /// specialization was first instantiated.
+ /// specialization was first instantiated.
void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
@@ -1994,15 +2056,21 @@ public:
/// \brief Retrieve the (first) point of instantiation of a function template
/// specialization or a member of a class template specialization.
///
- /// \returns the first point of instantiation, if this function was
- /// instantiated from a template; otherwise, returns an invalid source
+ /// \returns the first point of instantiation, if this function was
+ /// instantiated from a template; otherwise, returns an invalid source
/// location.
SourceLocation getPointOfInstantiation() const;
-
- /// \brief Determine whether this is or was instantiated from an out-of-line
+
+ /// \brief Determine whether this is or was instantiated from an out-of-line
/// definition of a member function.
virtual bool isOutOfLine() const;
-
+
+ /// \brief Identify a memory copying or setting function.
+ /// If the given function is a memory copy or setting function, returns
+ /// the corresponding Builtin ID. If the function is not a memory function,
+ /// returns 0.
+ unsigned getMemoryFunctionKind() const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FunctionDecl *D) { return true; }
@@ -2056,6 +2124,8 @@ public:
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
bool HasInit);
+ static FieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
/// getFieldIndex - Returns the index of this field within its record,
/// as appropriate for passing to ASTRecordLayout::getFieldOffset.
unsigned getFieldIndex() const;
@@ -2108,7 +2178,8 @@ public:
Expr *getInClassInitializer() const {
return hasInClassInitializer() ? InitializerOrBitWidth.getPointer() : 0;
}
- /// setInClassInitializer - Set the C++0x in-class initializer for this member.
+ /// setInClassInitializer - Set the C++0x in-class initializer for this
+ /// member.
void setInClassInitializer(Expr *Init);
/// removeInClassInitializer - Remove the C++0x in-class initializer from this
/// member.
@@ -2128,7 +2199,7 @@ public:
return cast<RecordDecl>(getDeclContext());
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2155,7 +2226,8 @@ public:
SourceLocation L, IdentifierInfo *Id,
QualType T, Expr *E,
const llvm::APSInt &V);
-
+ static EnumConstantDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
const Expr *getInitExpr() const { return (const Expr*) Init; }
Expr *getInitExpr() { return (Expr*) Init; }
const llvm::APSInt &getInitVal() const { return Val; }
@@ -2163,8 +2235,8 @@ public:
void setInitExpr(Expr *E) { Init = (Stmt*) E; }
void setInitVal(const llvm::APSInt &V) { Val = V; }
- SourceRange getSourceRange() const;
-
+ SourceRange getSourceRange() const LLVM_READONLY;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const EnumConstantDecl *D) { return true; }
@@ -2177,6 +2249,7 @@ public:
/// field injected from an anonymous union/struct into the parent scope.
/// IndirectFieldDecl are always implicit.
class IndirectFieldDecl : public ValueDecl {
+ virtual void anchor();
NamedDecl **Chaining;
unsigned ChainingSize;
@@ -2189,6 +2262,8 @@ public:
static IndirectFieldDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
QualType T, NamedDecl **CH, unsigned CHS);
+
+ static IndirectFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
typedef NamedDecl * const *chain_iterator;
chain_iterator chain_begin() const { return Chaining; }
@@ -2216,6 +2291,7 @@ public:
/// TypeDecl - Represents a declaration of a type.
///
class TypeDecl : public NamedDecl {
+ virtual void anchor();
/// TypeForDecl - This indicates the Type object that represents
/// this TypeDecl. It is a cache maintained by
/// ASTContext::getTypedefType, ASTContext::getTagDeclType, and
@@ -2228,6 +2304,7 @@ class TypeDecl : public NamedDecl {
friend class TagDecl;
friend class TemplateTypeParmDecl;
friend class TagType;
+ friend class ASTReader;
protected:
TypeDecl(Kind DK, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
@@ -2239,9 +2316,9 @@ public:
const Type *getTypeForDecl() const { return TypeForDecl; }
void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
- SourceLocation getLocStart() const { return LocStart; }
+ SourceLocation getLocStart() const LLVM_READONLY { return LocStart; }
void setLocStart(SourceLocation L) { LocStart = L; }
- virtual SourceRange getSourceRange() const {
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
if (LocStart.isValid())
return SourceRange(LocStart, getLocation());
else
@@ -2257,6 +2334,7 @@ public:
/// Base class for declarations which introduce a typedef-name.
class TypedefNameDecl : public TypeDecl, public Redeclarable<TypedefNameDecl> {
+ virtual void anchor();
/// UnderlyingType - This is the type the typedef is set to.
TypeSourceInfo *TInfo;
@@ -2270,15 +2348,19 @@ protected:
virtual TypedefNameDecl *getNextRedeclaration() {
return RedeclLink.getNext();
}
+ virtual TypedefNameDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual TypedefNameDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
public:
typedef redeclarable_base::redecl_iterator redecl_iterator;
- redecl_iterator redecls_begin() const {
- return redeclarable_base::redecls_begin();
- }
- redecl_iterator redecls_end() const {
- return redeclarable_base::redecls_end();
- }
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
TypeSourceInfo *getTypeSourceInfo() const {
return TInfo;
@@ -2318,8 +2400,9 @@ public:
static TypedefDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo);
-
- SourceRange getSourceRange() const;
+ static TypedefDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2338,8 +2421,9 @@ public:
static TypeAliasDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo);
+ static TypeAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID);
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2364,15 +2448,17 @@ private:
/// a definition until the definition has been fully processed.
bool IsCompleteDefinition : 1;
+protected:
/// IsBeingDefined - True if this is currently being defined.
bool IsBeingDefined : 1;
+private:
/// IsEmbeddedInDeclarator - True if this tag declaration is
/// "embedded" (i.e., defined or declared for the very first time)
/// in the syntax of a declarator.
bool IsEmbeddedInDeclarator : 1;
- /// /brief True if this tag is free standing, e.g. "struct foo;".
+ /// \brief True if this tag is free standing, e.g. "struct foo;".
bool IsFreeStanding : 1;
protected:
@@ -2381,7 +2467,7 @@ protected:
unsigned NumNegativeBits : 8;
/// IsScoped - True if this tag declaration is a scoped enumeration. Only
- /// possible in C++0x mode.
+ /// possible in C++11 mode.
bool IsScoped : 1;
/// IsScopedUsingClassTag - If this tag declaration is a scoped enum,
/// then this is true if the scoped enum was declared using the class
@@ -2390,7 +2476,7 @@ protected:
bool IsScopedUsingClassTag : 1;
/// IsFixed - True if this is an enumeration with fixed underlying type. Only
- /// possible in C++0x mode.
+ /// possible in C++11 or Microsoft extensions mode.
bool IsFixed : 1;
private:
@@ -2431,20 +2517,24 @@ protected:
typedef Redeclarable<TagDecl> redeclarable_base;
virtual TagDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual TagDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual TagDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
/// @brief Completes the definition of this tag declaration.
///
/// This is a helper function for derived classes.
- void completeDefinition();
-
+ void completeDefinition();
+
public:
typedef redeclarable_base::redecl_iterator redecl_iterator;
- redecl_iterator redecls_begin() const {
- return redeclarable_base::redecls_begin();
- }
- redecl_iterator redecls_end() const {
- return redeclarable_base::redecls_end();
- }
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
SourceLocation getRBraceLoc() const { return RBraceLoc; }
void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
@@ -2456,7 +2546,7 @@ public:
/// getOuterLocStart - Return SourceLocation representing start of source
/// range taking into account any outer template declarations.
SourceLocation getOuterLocStart() const;
- virtual SourceRange getSourceRange() const;
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
virtual TagDecl* getCanonicalDecl();
const TagDecl* getCanonicalDecl() const {
@@ -2532,7 +2622,8 @@ public:
bool isEnum() const { return getTagKind() == TTK_Enum; }
TypedefNameDecl *getTypedefNameForAnonDecl() const {
- return hasExtInfo() ? 0 : TypedefNameDeclOrQualifier.get<TypedefNameDecl*>();
+ return hasExtInfo() ? 0 :
+ TypedefNameDeclOrQualifier.get<TypedefNameDecl*>();
}
void setTypedefNameForAnonDecl(TypedefNameDecl *TDD);
@@ -2543,15 +2634,15 @@ public:
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
: 0;
}
-
- /// \brief Retrieve the nested-name-specifier (with source-location
- /// information) that qualifies the name of this declaration, if it was
+
+ /// \brief Retrieve the nested-name-specifier (with source-location
+ /// information) that qualifies the name of this declaration, if it was
/// present in the source.
NestedNameSpecifierLoc getQualifierLoc() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc
: NestedNameSpecifierLoc();
}
-
+
void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
unsigned getNumTemplateParameterLists() const {
@@ -2580,9 +2671,11 @@ public:
friend class ASTDeclWriter;
};
-/// EnumDecl - Represents an enum. As an extension, we allow forward-declared
-/// enums.
+/// EnumDecl - Represents an enum. In C++11, enums can be forward-declared
+/// with a fixed underlying type, and in C we allow them to be forward-declared
+/// with no underlying type as an extension.
class EnumDecl : public TagDecl {
+ virtual void anchor();
/// IntegerType - This represent the integer type that the enum corresponds
/// to for code generation purposes. Note that the enumerator constants may
/// have a different type than this does.
@@ -2606,23 +2699,16 @@ class EnumDecl : public TagDecl {
/// in C++) are of the enum type instead.
QualType PromotionType;
- /// \brief If the enumeration was instantiated from an enumeration
- /// within a class or function template, this pointer refers to the
- /// enumeration declared within the template.
- EnumDecl *InstantiatedFrom;
-
- // The number of positive and negative bits required by the
- // enumerators are stored in the SubclassBits field.
- enum {
- NumBitsWidth = 8,
- NumBitsMask = (1 << NumBitsWidth) - 1
- };
+ /// \brief If this enumeration is an instantiation of a member enumeration
+ /// of a class template specialization, this is the member specialization
+ /// information.
+ MemberSpecializationInfo *SpecializationInfo;
EnumDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, EnumDecl *PrevDecl,
bool Scoped, bool ScopedUsingClassTag, bool Fixed)
: TagDecl(Enum, TTK_Enum, DC, IdLoc, Id, PrevDecl, StartLoc),
- InstantiatedFrom(0) {
+ SpecializationInfo(0) {
assert(Scoped || !ScopedUsingClassTag);
IntegerType = (const Type*)0;
NumNegativeBits = 0;
@@ -2631,6 +2717,9 @@ class EnumDecl : public TagDecl {
IsScopedUsingClassTag = ScopedUsingClassTag;
IsFixed = Fixed;
}
+
+ void setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK);
public:
EnumDecl *getCanonicalDecl() {
return cast<EnumDecl>(TagDecl::getCanonicalDecl());
@@ -2639,11 +2728,22 @@ public:
return cast<EnumDecl>(TagDecl::getCanonicalDecl());
}
- const EnumDecl *getPreviousDeclaration() const {
- return cast_or_null<EnumDecl>(TagDecl::getPreviousDeclaration());
+ const EnumDecl *getPreviousDecl() const {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDecl());
}
- EnumDecl *getPreviousDeclaration() {
- return cast_or_null<EnumDecl>(TagDecl::getPreviousDeclaration());
+ EnumDecl *getPreviousDecl() {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDecl());
+ }
+
+ const EnumDecl *getMostRecentDecl() const {
+ return cast<EnumDecl>(TagDecl::getMostRecentDecl());
+ }
+ EnumDecl *getMostRecentDecl() {
+ return cast<EnumDecl>(TagDecl::getMostRecentDecl());
+ }
+
+ EnumDecl *getDefinition() const {
+ return cast_or_null<EnumDecl>(TagDecl::getDefinition());
}
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
@@ -2651,7 +2751,7 @@ public:
IdentifierInfo *Id, EnumDecl *PrevDecl,
bool IsScoped, bool IsScopedUsingClassTag,
bool IsFixed);
- static EnumDecl *Create(ASTContext &C, EmptyShell Empty);
+ static EnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// completeDefinition - When created, the EnumDecl corresponds to a
/// forward-declared enum. This method is used to mark the
@@ -2668,14 +2768,14 @@ public:
typedef specific_decl_iterator<EnumConstantDecl> enumerator_iterator;
enumerator_iterator enumerator_begin() const {
- const EnumDecl *E = cast_or_null<EnumDecl>(getDefinition());
+ const EnumDecl *E = getDefinition();
if (!E)
E = this;
return enumerator_iterator(E->decls_begin());
}
enumerator_iterator enumerator_end() const {
- const EnumDecl *E = cast_or_null<EnumDecl>(getDefinition());
+ const EnumDecl *E = getDefinition();
if (!E)
E = this;
return enumerator_iterator(E->decls_end());
@@ -2723,7 +2823,7 @@ public:
/// \brief Returns the width in bits required to store all the
/// negative enumerators of this enum. These widths include
/// the rightmost leading 1; that is:
- ///
+ ///
/// MOST NEGATIVE ENUMERATOR PATTERN NUM NEGATIVE BITS
/// ------------------------ ------- -----------------
/// -1 1111111 1
@@ -2760,11 +2860,31 @@ public:
/// \brief Returns the enumeration (declared within the template)
/// from which this enumeration type was instantiated, or NULL if
/// this enumeration was not instantiated from any template.
- EnumDecl *getInstantiatedFromMemberEnum() const {
- return InstantiatedFrom;
+ EnumDecl *getInstantiatedFromMemberEnum() const;
+
+ /// \brief If this enumeration is a member of a specialization of a
+ /// templated class, determine what kind of template specialization
+ /// or instantiation this is.
+ TemplateSpecializationKind getTemplateSpecializationKind() const;
+
+ /// \brief For an enumeration member that was instantiated from a member
+ /// enumeration of a templated class, set the template specialiation kind.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ /// \brief If this enumeration is an instantiation of a member enumeration of
+ /// a class template specialization, retrieves the member specialization
+ /// information.
+ MemberSpecializationInfo *getMemberSpecializationInfo() const {
+ return SpecializationInfo;
}
- void setInstantiationOfMemberEnum(EnumDecl *IF) { InstantiatedFrom = IF; }
+ /// \brief Specify that this enumeration is an instantiation of the
+ /// member enumeration ED.
+ void setInstantiationOfMemberEnum(EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ setInstantiationOfMemberEnum(getASTContext(), ED, TSK);
+ }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const EnumDecl *D) { return true; }
@@ -2791,7 +2911,7 @@ class RecordDecl : public TagDecl {
bool AnonymousStructOrUnion : 1;
/// HasObjectMember - This is true if this struct has at least one member
- /// containing an object.
+ /// containing an Objective-C object pointer type.
bool HasObjectMember : 1;
/// \brief Whether the field declarations of this record have been loaded
@@ -2810,13 +2930,20 @@ public:
static RecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl = 0);
- static RecordDecl *Create(const ASTContext &C, EmptyShell Empty);
+ static RecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
- const RecordDecl *getPreviousDeclaration() const {
- return cast_or_null<RecordDecl>(TagDecl::getPreviousDeclaration());
+ const RecordDecl *getPreviousDecl() const {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDecl());
}
- RecordDecl *getPreviousDeclaration() {
- return cast_or_null<RecordDecl>(TagDecl::getPreviousDeclaration());
+ RecordDecl *getPreviousDecl() {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDecl());
+ }
+
+ const RecordDecl *getMostRecentDecl() const {
+ return cast<RecordDecl>(TagDecl::getMostRecentDecl());
+ }
+ RecordDecl *getMostRecentDecl() {
+ return cast<RecordDecl>(TagDecl::getMostRecentDecl());
}
bool hasFlexibleArrayMember() const { return HasFlexibleArrayMember; }
@@ -2902,6 +3029,7 @@ private:
};
class FileScopeAsmDecl : public Decl {
+ virtual void anchor();
StringLiteral *AsmString;
SourceLocation RParenLoc;
FileScopeAsmDecl(DeclContext *DC, StringLiteral *asmstring,
@@ -2912,10 +3040,12 @@ public:
StringLiteral *Str, SourceLocation AsmLoc,
SourceLocation RParenLoc);
+ static FileScopeAsmDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
SourceLocation getAsmLoc() const { return getLocation(); }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getAsmLoc(), getRParenLoc());
}
@@ -2976,6 +3106,8 @@ private:
// FIXME: This can be packed into the bitfields in Decl.
bool IsVariadic : 1;
bool CapturesCXXThis : 1;
+ bool BlockMissingReturnType : 1;
+ bool IsConversionFromLambda : 1;
/// ParamInfo - new[]'d array of pointers to ParmVarDecls for the formal
/// parameters of this function. This is null if a prototype or if there are
/// no formals.
@@ -2992,12 +3124,14 @@ protected:
BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
: Decl(Block, DC, CaretLoc), DeclContext(Block),
IsVariadic(false), CapturesCXXThis(false),
+ BlockMissingReturnType(true), IsConversionFromLambda(false),
ParamInfo(0), NumParams(0), Body(0),
SignatureAsWritten(0), Captures(0), NumCaptures(0) {}
public:
- static BlockDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L);
-
+ static BlockDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L);
+ static BlockDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
SourceLocation getCaretLocation() const { return getLocation(); }
bool isVariadic() const { return IsVariadic; }
@@ -3049,6 +3183,11 @@ public:
capture_const_iterator capture_end() const { return Captures + NumCaptures; }
bool capturesCXXThis() const { return CapturesCXXThis; }
+ bool blockMissingReturnType() const { return BlockMissingReturnType; }
+ void setBlockMissingReturnType(bool val) { BlockMissingReturnType = val; }
+
+ bool isConversionFromLambda() const { return IsConversionFromLambda; }
+ void setIsConversionFromLambda(bool val) { IsConversionFromLambda = val; }
bool capturesVariable(const VarDecl *var) const;
@@ -3057,8 +3196,8 @@ public:
const Capture *end,
bool capturesCXXThis);
- virtual SourceRange getSourceRange() const;
-
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const BlockDecl *D) { return true; }
@@ -3071,6 +3210,75 @@ public:
}
};
+/// \brief Describes a module import declaration, which makes the contents
+/// of the named module visible in the current translation unit.
+///
+/// An import declaration imports the named module (or submodule). For example:
+/// \code
+/// @__experimental_modules_import std.vector;
+/// \endcode
+///
+/// Import declarations can also be implicitly generated from #include/#import
+/// directives.
+class ImportDecl : public Decl {
+ /// \brief The imported module, along with a bit that indicates whether
+ /// we have source-location information for each identifier in the module
+ /// name.
+ ///
+ /// When the bit is false, we only have a single source location for the
+ /// end of the import declaration.
+ llvm::PointerIntPair<Module *, 1, bool> ImportedAndComplete;
+
+ /// \brief The next import in the list of imports local to the translation
+ /// unit being parsed (not loaded from an AST file).
+ ImportDecl *NextLocalImport;
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTContext;
+
+ ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs);
+
+ ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
+ SourceLocation EndLoc);
+
+ ImportDecl(EmptyShell Empty) : Decl(Import, Empty), NextLocalImport() { }
+
+public:
+ /// \brief Create a new module import declaration.
+ static ImportDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs);
+
+ /// \brief Create a new module import declaration for an implicitly-generated
+ /// import.
+ static ImportDecl *CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ SourceLocation EndLoc);
+
+ /// \brief Create a new, deserialized module import declaration.
+ static ImportDecl *CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations);
+
+ /// \brief Retrieve the module that was imported by the import declaration.
+ Module *getImportedModule() const { return ImportedAndComplete.getPointer(); }
+
+ /// \brief Retrieves the locations of each of the identifiers that make up
+ /// the complete module name in the import declaration.
+ ///
+ /// This will return an empty array if the locations of the individual
+ /// identifiers aren't available.
+ ArrayRef<SourceLocation> getIdentifierLocs() const;
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ImportDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Import; }
+};
+
+
/// Insertion operator for diagnostics. This allows sending NamedDecl's
/// into a diagnostic with <<.
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
@@ -3079,6 +3287,12 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
DiagnosticsEngine::ak_nameddecl);
return DB;
}
+inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const NamedDecl* ND) {
+ PD.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
+ DiagnosticsEngine::ak_nameddecl);
+ return PD;
+}
template<typename decl_type>
void Redeclarable<decl_type>::setPreviousDeclaration(decl_type *PrevDecl) {
@@ -3086,26 +3300,44 @@ void Redeclarable<decl_type>::setPreviousDeclaration(decl_type *PrevDecl) {
// and Redeclarable to be defined.
decl_type *First;
-
+
if (PrevDecl) {
// Point to previous. Make sure that this is actually the most recent
// redeclaration, or we can build invalid chains. If the most recent
// redeclaration is invalid, it won't be PrevDecl, but we want it anyway.
- RedeclLink = PreviousDeclLink(llvm::cast<decl_type>(
- PrevDecl->getMostRecentDeclaration()));
+ RedeclLink = PreviousDeclLink(
+ llvm::cast<decl_type>(PrevDecl->getMostRecentDecl()));
First = PrevDecl->getFirstDeclaration();
assert(First->RedeclLink.NextIsLatest() && "Expected first");
} else {
// Make this first.
First = static_cast<decl_type*>(this);
}
-
+
// First one will point to this one as latest.
First->RedeclLink = LatestDeclLink(static_cast<decl_type*>(this));
if (NamedDecl *ND = dyn_cast<NamedDecl>(static_cast<decl_type*>(this)))
ND->ClearLinkageCache();
}
+// Inline function definitions.
+
+/// \brief Check if the given decl is complete.
+///
+/// We use this function to break a cycle between the inline definitions in
+/// Type.h and Decl.h.
+inline bool IsEnumDeclComplete(EnumDecl *ED) {
+ return ED->isComplete();
+}
+
+/// \brief Check if the given decl is scoped.
+///
+/// We use this function to break a cycle between the inline definitions in
+/// Type.h and Decl.h.
+inline bool IsEnumDeclScoped(EnumDecl *ED) {
+ return ED->isScoped();
+}
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
index 9f29411..4c675aed 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -17,8 +17,9 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/Specifiers.h"
-#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PrettyStackTrace.h"
namespace clang {
class DeclContext;
@@ -98,7 +99,7 @@ public:
/// identifiers. C++ describes lookup completely differently:
/// certain lookups merely "ignore" certain kinds of declarations,
/// usually based on whether the declaration is of a type, etc.
- ///
+ ///
/// These are meant as bitmasks, so that searches in
/// C++ can look into the "tag" namespace during ordinary lookup.
///
@@ -180,12 +181,28 @@ public:
OBJC_TQ_Oneway = 0x20
};
-private:
- /// NextDeclInContext - The next declaration within the same lexical
+protected:
+ // Enumeration values used in the bits stored in NextInContextAndBits.
+ enum {
+ /// \brief Whether this declaration is a top-level declaration (function,
+ /// global variable, etc.) that is lexically inside an objc container
+ /// definition.
+ TopLevelDeclInObjCContainerFlag = 0x01,
+
+ /// \brief Whether this declaration is private to the module in which it was
+ /// defined.
+ ModulePrivateFlag = 0x02
+ };
+
+ /// \brief The next declaration within the same lexical
/// DeclContext. These pointers form the linked list that is
/// traversed via DeclContext's decls_begin()/decls_end().
- Decl *NextDeclInContext;
+ ///
+ /// The extra two bits are used for the TopLevelDeclInObjCContainer and
+ /// ModulePrivate bits.
+ llvm::PointerIntPair<Decl *, 2, unsigned> NextInContextAndBits;
+private:
friend class DeclContext;
struct MultipleDC {
@@ -243,7 +260,10 @@ private:
/// evaluated context or not, e.g. functions used in uninstantiated templates
/// are regarded as "referenced" but not "used".
unsigned Referenced : 1;
-
+
+ /// \brief Whether statistic collection is enabled.
+ static bool StatisticsEnabled;
+
protected:
/// Access - Used by C++ decls for the access specifier.
// NOTE: VC++ treats enums as signed, avoid using the AccessSpecifier enum
@@ -252,14 +272,12 @@ protected:
/// \brief Whether this declaration was loaded from an AST file.
unsigned FromASTFile : 1;
-
- /// ChangedAfterLoad - if this declaration has changed since being loaded
- unsigned ChangedAfterLoad : 1;
-
- /// \brief Whether this declaration is private to the module in which it was
- /// defined.
- unsigned ModulePrivate : 1;
+ /// \brief Whether this declaration is hidden from normal name lookup, e.g.,
+ /// because it is was loaded from an AST file is either module-private or
+ /// because its submodule has not been made visible.
+ unsigned Hidden : 1;
+
/// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
unsigned IdentifierNamespace : 12;
@@ -267,14 +285,15 @@ protected:
///
/// This field is only valid for NamedDecls subclasses.
mutable unsigned HasCachedLinkage : 1;
-
+
/// \brief If \c HasCachedLinkage, the linkage of this declaration.
///
/// This field is only valid for NamedDecls subclasses.
mutable unsigned CachedLinkage : 2;
-
+
friend class ASTDeclWriter;
friend class ASTDeclReader;
+ friend class ASTReader;
private:
void CheckAccessDeclContext() const;
@@ -282,38 +301,52 @@ private:
protected:
Decl(Kind DK, DeclContext *DC, SourceLocation L)
- : NextDeclInContext(0), DeclCtx(DC),
+ : NextInContextAndBits(), DeclCtx(DC),
Loc(L), DeclKind(DK), InvalidDecl(0),
HasAttrs(false), Implicit(false), Used(false), Referenced(false),
- Access(AS_none), FromASTFile(0), ChangedAfterLoad(false),
- ModulePrivate(0),
+ Access(AS_none), FromASTFile(0), Hidden(0),
IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
- HasCachedLinkage(0)
+ HasCachedLinkage(0)
{
- if (Decl::CollectingStats()) add(DK);
+ if (StatisticsEnabled) add(DK);
}
Decl(Kind DK, EmptyShell Empty)
- : NextDeclInContext(0), DeclKind(DK), InvalidDecl(0),
+ : NextInContextAndBits(), DeclKind(DK), InvalidDecl(0),
HasAttrs(false), Implicit(false), Used(false), Referenced(false),
- Access(AS_none), FromASTFile(0), ChangedAfterLoad(false),
- ModulePrivate(0),
+ Access(AS_none), FromASTFile(0), Hidden(0),
IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
HasCachedLinkage(0)
{
- if (Decl::CollectingStats()) add(DK);
+ if (StatisticsEnabled) add(DK);
}
virtual ~Decl();
+ /// \brief Allocate memory for a deserialized declaration.
+ ///
+ /// This routine must be used to allocate memory for any declaration that is
+ /// deserialized from a module file.
+ ///
+ /// \param Context The context in which we will allocate memory.
+ /// \param ID The global ID of the deserialized declaration.
+ /// \param Size The size of the allocated object.
+ static void *AllocateDeserializedDecl(const ASTContext &Context,
+ unsigned ID,
+ unsigned Size);
+
public:
/// \brief Source range that this declaration covers.
- virtual SourceRange getSourceRange() const {
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLocation(), getLocation());
}
- SourceLocation getLocStart() const { return getSourceRange().getBegin(); }
- SourceLocation getLocEnd() const { return getSourceRange().getEnd(); }
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getSourceRange().getBegin();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ return getSourceRange().getEnd();
+ }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
@@ -321,8 +354,8 @@ public:
Kind getKind() const { return static_cast<Kind>(DeclKind); }
const char *getDeclKindName() const;
- Decl *getNextDeclInContext() { return NextDeclInContext; }
- const Decl *getNextDeclInContext() const { return NextDeclInContext; }
+ Decl *getNextDeclInContext() { return NextInContextAndBits.getPointer(); }
+ const Decl *getNextDeclInContext() const {return NextInContextAndBits.getPointer();}
DeclContext *getDeclContext() {
if (isInSemaDC())
@@ -347,7 +380,7 @@ public:
bool isInAnonymousNamespace() const;
- ASTContext &getASTContext() const;
+ ASTContext &getASTContext() const LLVM_READONLY;
void setAccess(AccessSpecifier AS) {
Access = AS;
@@ -364,7 +397,9 @@ public:
}
bool hasAttrs() const { return HasAttrs; }
- void setAttrs(const AttrVec& Attrs);
+ void setAttrs(const AttrVec& Attrs) {
+ return setAttrsImpl(Attrs, getASTContext());
+ }
AttrVec &getAttrs() {
return const_cast<AttrVec&>(const_cast<const Decl*>(this)->getAttrs());
}
@@ -389,11 +424,11 @@ public:
attr_iterator attr_end() const {
return hasAttrs() ? getAttrs().end() : 0;
}
-
+
template <typename T>
void dropAttr() {
if (!HasAttrs) return;
-
+
AttrVec &Attrs = getAttrs();
for (unsigned i = 0, e = Attrs.size(); i != e; /* in loop */) {
if (isa<T>(Attrs[i])) {
@@ -406,7 +441,7 @@ public:
if (Attrs.empty())
HasAttrs = false;
}
-
+
template <typename T>
specific_attr_iterator<T> specific_attr_begin() const {
return specific_attr_iterator<T>(attr_begin());
@@ -455,6 +490,48 @@ public:
void setReferenced(bool R = true) { Referenced = R; }
+ /// \brief Whether this declaration is a top-level declaration (function,
+ /// global variable, etc.) that is lexically inside an objc container
+ /// definition.
+ bool isTopLevelDeclInObjCContainer() const {
+ return NextInContextAndBits.getInt() & TopLevelDeclInObjCContainerFlag;
+ }
+
+ void setTopLevelDeclInObjCContainer(bool V = true) {
+ unsigned Bits = NextInContextAndBits.getInt();
+ if (V)
+ Bits |= TopLevelDeclInObjCContainerFlag;
+ else
+ Bits &= ~TopLevelDeclInObjCContainerFlag;
+ NextInContextAndBits.setInt(Bits);
+ }
+
+protected:
+ /// \brief Whether this declaration was marked as being private to the
+ /// module in which it was defined.
+ bool isModulePrivate() const {
+ return NextInContextAndBits.getInt() & ModulePrivateFlag;
+ }
+
+ /// \brief Specify whether this declaration was marked as being private
+ /// to the module in which it was defined.
+ void setModulePrivate(bool MP = true) {
+ unsigned Bits = NextInContextAndBits.getInt();
+ if (MP)
+ Bits |= ModulePrivateFlag;
+ else
+ Bits &= ~ModulePrivateFlag;
+ NextInContextAndBits.setInt(Bits);
+ }
+
+ /// \brief Set the owning module ID.
+ void setOwningModuleID(unsigned ID) {
+ assert(isFromASTFile() && "Only works on a deserialized declaration");
+ *((unsigned*)this - 2) = ID;
+ }
+
+public:
+
/// \brief Determine the availability of the given declaration.
///
/// This routine will determine the most restrictive availability of
@@ -504,20 +581,24 @@ public:
/// \brief Determine whether this declaration came from an AST file (such as
/// a precompiled header or module) rather than having been parsed.
bool isFromASTFile() const { return FromASTFile; }
-
- /// \brief Query whether this declaration was changed in a significant way
- /// since being loaded from an AST file.
- ///
- /// In an epic violation of layering, what is "significant" is entirely
- /// up to the serialization system, but implemented in AST and Sema.
- bool isChangedSinceDeserialization() const { return ChangedAfterLoad; }
- /// \brief Mark this declaration as having changed since deserialization, or
- /// reset the flag.
- void setChangedSinceDeserialization(bool Changed) {
- ChangedAfterLoad = Changed;
+ /// \brief Retrieve the global declaration ID associated with this
+ /// declaration, which specifies where in the
+ unsigned getGlobalID() const {
+ if (isFromASTFile())
+ return *((const unsigned*)this - 1);
+ return 0;
}
-
+
+ /// \brief Retrieve the global ID of the module that owns this particular
+ /// declaration.
+ unsigned getOwningModuleID() const {
+ if (isFromASTFile())
+ return *((const unsigned*)this - 2);
+
+ return 0;
+ }
+
unsigned getIdentifierNamespace() const {
return IdentifierNamespace;
}
@@ -587,7 +668,7 @@ public:
/// \brief Whether this particular Decl is a canonical one.
bool isCanonicalDecl() const { return getCanonicalDecl() == this; }
-
+
protected:
/// \brief Returns the next redeclaration or itself if this is the only decl.
///
@@ -595,6 +676,14 @@ protected:
/// Decl::redecl_iterator can iterate over them.
virtual Decl *getNextRedeclaration() { return this; }
+ /// \brief Implementation of getPreviousDecl(), to be overridden by any
+ /// subclass that has a redeclaration chain.
+ virtual Decl *getPreviousDeclImpl() { return 0; }
+
+ /// \brief Implementation of getMostRecentDecl(), to be overridden by any
+ /// subclass that has a redeclaration chain.
+ virtual Decl *getMostRecentDeclImpl() { return this; }
+
public:
/// \brief Iterates through all the redeclarations of the same decl.
class redecl_iterator {
@@ -645,6 +734,26 @@ public:
}
redecl_iterator redecls_end() const { return redecl_iterator(); }
+ /// \brief Retrieve the previous declaration that declares the same entity
+ /// as this declaration, or NULL if there is no previous declaration.
+ Decl *getPreviousDecl() { return getPreviousDeclImpl(); }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration, or NULL if there is no previous declaration.
+ const Decl *getPreviousDecl() const {
+ return const_cast<Decl *>(this)->getPreviousDeclImpl();
+ }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration (which may be this declaration).
+ Decl *getMostRecentDecl() { return getMostRecentDeclImpl(); }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration (which may be this declaration).
+ const Decl *getMostRecentDecl() const {
+ return const_cast<Decl *>(this)->getMostRecentDeclImpl();
+ }
+
/// getBody - If this Decl represents a declaration for a body of code,
/// such as a function or method definition, this method returns the
/// top-level Stmt* of that body. Otherwise this method returns null.
@@ -660,7 +769,7 @@ public:
// global temp stats (until we have a per-module visitor)
static void add(Kind k);
- static bool CollectingStats(bool Enable = false);
+ static void EnableStatistics();
static void PrintStats();
/// isTemplateParameter - Determines whether this declaration is a
@@ -673,7 +782,7 @@ public:
/// \brief Whether this declaration is a parameter pack.
bool isParameterPack() const;
-
+
/// \brief returns true if this declaration is a template
bool isTemplateDecl() const;
@@ -722,7 +831,7 @@ public:
unsigned mask
= (IdentifierNamespace & (IDNS_TagFriend | IDNS_OrdinaryFriend));
if (!mask) return FOK_None;
- return (IdentifierNamespace & (IDNS_Tag | IDNS_Ordinary) ?
+ return (IdentifierNamespace & (IDNS_Tag | IDNS_Ordinary) ?
FOK_Declared : FOK_Undeclared);
}
@@ -747,17 +856,31 @@ public:
static void printGroup(Decl** Begin, unsigned NumDecls,
raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation = 0);
- void dump() const;
- void dumpXML() const;
+ LLVM_ATTRIBUTE_USED void dump() const;
+ LLVM_ATTRIBUTE_USED void dumpXML() const;
void dumpXML(raw_ostream &OS) const;
private:
const Attr *getAttrsImpl() const;
+ void setAttrsImpl(const AttrVec& Attrs, ASTContext &Ctx);
+ void setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx);
protected:
ASTMutationListener *getASTMutationListener() const;
};
+/// \brief Determine whether two declarations declare the same entity.
+inline bool declaresSameEntity(const Decl *D1, const Decl *D2) {
+ if (!D1 || !D2)
+ return false;
+
+ if (D1 == D2)
+ return true;
+
+ return D1->getCanonicalDecl() == D2->getCanonicalDecl();
+}
+
/// PrettyStackTraceDecl - If a crash occurs, indicate that it happened when
/// doing something to a specific decl.
class PrettyStackTraceDecl : public llvm::PrettyStackTraceEntry {
@@ -826,8 +949,11 @@ class DeclContext {
/// \brief Pointer to the data structure used to lookup declarations
/// within this context (or a DependentStoredDeclsMap if this is a
- /// dependent context).
- mutable StoredDeclsMap *LookupPtr;
+ /// dependent context), and a bool indicating whether we have lazily
+ /// omitted any declarations from the map. We maintain the invariant
+ /// that, if the map contains an entry for a DeclarationName, then it
+ /// contains all relevant entries for that name.
+ mutable llvm::PointerIntPair<StoredDeclsMap*, 1, bool> LookupPtr;
protected:
/// FirstDecl - The first declaration stored within this declaration
@@ -841,16 +967,17 @@ protected:
mutable Decl *LastDecl;
friend class ExternalASTSource;
+ friend class ASTWriter;
/// \brief Build up a chain of declarations.
///
/// \returns the first/last pair of declarations.
static std::pair<Decl *, Decl *>
- BuildDeclChain(const SmallVectorImpl<Decl*> &Decls, bool FieldsAlreadyLoaded);
+ BuildDeclChain(ArrayRef<Decl*> Decls, bool FieldsAlreadyLoaded);
DeclContext(Decl::Kind K)
: DeclKind(K), ExternalLexicalStorage(false),
- ExternalVisibleStorage(false), LookupPtr(0), FirstDecl(0),
+ ExternalVisibleStorage(false), LookupPtr(0, false), FirstDecl(0),
LastDecl(0) { }
public:
@@ -886,11 +1013,11 @@ public:
}
DeclContext *getLookupParent();
-
+
const DeclContext *getLookupParent() const {
return const_cast<DeclContext*>(this)->getLookupParent();
}
-
+
ASTContext &getParentASTContext() const {
return cast<Decl>(this)->getASTContext();
}
@@ -974,6 +1101,14 @@ public:
/// declaration context DC.
bool Encloses(const DeclContext *DC) const;
+ /// \brief Find the nearest non-closure ancestor of this context,
+ /// i.e. the innermost semantic parent of this context which is not
+ /// a closure. A context may be its own non-closure ancestor.
+ DeclContext *getNonClosureAncestor();
+ const DeclContext *getNonClosureAncestor() const {
+ return const_cast<DeclContext*>(this)->getNonClosureAncestor();
+ }
+
/// getPrimaryContext - There may be many different
/// declarations of the same entity (including forward declarations
/// of classes, multiple definitions of namespaces, etc.), each with
@@ -1007,24 +1142,30 @@ public:
/// inline, its enclosing namespace, recursively.
bool InEnclosingNamespaceSetOf(const DeclContext *NS) const;
- /// getNextContext - If this is a DeclContext that may have other
- /// DeclContexts that are semantically connected but syntactically
- /// different, such as C++ namespaces, this routine retrieves the
- /// next DeclContext in the link. Iteration through the chain of
- /// DeclContexts should begin at the primary DeclContext and
- /// continue until this function returns NULL. For example, given:
- /// @code
+ /// \\brief Collects all of the declaration contexts that are semantically
+ /// connected to this declaration context.
+ ///
+ /// For declaration contexts that have multiple semantically connected but
+ /// syntactically distinct contexts, such as C++ namespaces, this routine
+ /// retrieves the complete set of such declaration contexts in source order.
+ /// For example, given:
+ ///
+ /// \code
/// namespace N {
/// int x;
/// }
/// namespace N {
/// int y;
/// }
- /// @endcode
- /// The first occurrence of namespace N will be the primary
- /// DeclContext. Its getNextContext will return the second
- /// occurrence of namespace N.
- DeclContext *getNextContext();
+ /// \endcode
+ ///
+ /// The \c Contexts parameter will contain both definitions of N.
+ ///
+ /// \param Contexts Will be cleared and set to the set of declaration
+ /// contexts that are semanticaly connected to this declaration context,
+ /// in source order, including this context (which may be the only result,
+ /// for non-namespace contexts).
+ void collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts);
/// decl_iterator - Iterates through the declarations stored
/// within this context.
@@ -1133,13 +1274,13 @@ public:
return tmp;
}
- friend bool
- operator==(const specific_decl_iterator& x, const specific_decl_iterator& y) {
+ friend bool operator==(const specific_decl_iterator& x,
+ const specific_decl_iterator& y) {
return x.Current == y.Current;
}
- friend bool
- operator!=(const specific_decl_iterator& x, const specific_decl_iterator& y) {
+ friend bool operator!=(const specific_decl_iterator& x,
+ const specific_decl_iterator& y) {
return x.Current != y.Current;
}
};
@@ -1207,13 +1348,13 @@ public:
return tmp;
}
- friend bool
- operator==(const filtered_decl_iterator& x, const filtered_decl_iterator& y) {
+ friend bool operator==(const filtered_decl_iterator& x,
+ const filtered_decl_iterator& y) {
return x.Current == y.Current;
}
- friend bool
- operator!=(const filtered_decl_iterator& x, const filtered_decl_iterator& y) {
+ friend bool operator!=(const filtered_decl_iterator& x,
+ const filtered_decl_iterator& y) {
return x.Current != y.Current;
}
};
@@ -1232,6 +1373,16 @@ public:
/// semantic context via makeDeclVisibleInContext.
void addDecl(Decl *D);
+ /// @brief Add the declaration D into this context, but suppress
+ /// searches for external declarations with the same name.
+ ///
+ /// Although analogous in function to addDecl, this removes an
+ /// important check. This is only useful if the Decl is being
+ /// added in response to an external search; in all other cases,
+ /// addDecl() is the right function to use.
+ /// See the ASTImporter for use cases.
+ void addDeclInternal(Decl *D);
+
/// @brief Add the declaration D to this context without modifying
/// any lookup tables.
///
@@ -1265,12 +1416,12 @@ public:
/// \brief A simplistic name lookup mechanism that performs name lookup
/// into this declaration context without consulting the external source.
///
- /// This function should almost never be used, because it subverts the
+ /// This function should almost never be used, because it subverts the
/// usual relationship between a DeclContext and the external source.
/// See the ASTImporter for the (few, but important) use cases.
- void localUncachedLookup(DeclarationName Name,
+ void localUncachedLookup(DeclarationName Name,
llvm::SmallVectorImpl<NamedDecl *> &Results);
-
+
/// @brief Makes a declaration visible within this context.
///
/// This routine makes the declaration D visible to name lookup
@@ -1285,11 +1436,15 @@ public:
/// visible from this context, as determined by
/// NamedDecl::declarationReplaces, the previous declaration will be
/// replaced with D.
- ///
- /// @param Recoverable true if it's okay to not add this decl to
- /// the lookup tables because it can be easily recovered by walking
- /// the declaration chains.
- void makeDeclVisibleInContext(NamedDecl *D, bool Recoverable = true);
+ void makeDeclVisibleInContext(NamedDecl *D);
+
+ /// all_lookups_iterator - An iterator that provides a view over the results
+ /// of looking up every possible name.
+ class all_lookups_iterator;
+
+ all_lookups_iterator lookups_begin() const;
+
+ all_lookups_iterator lookups_end() const;
/// udir_iterator - Iterates through the using-directives stored
/// within this context.
@@ -1315,7 +1470,11 @@ public:
// Low-level accessors
/// \brief Retrieve the internal representation of the lookup structure.
- StoredDeclsMap* getLookupPtr() const { return LookupPtr; }
+ /// This may omit some names if we are lazily building the structure.
+ StoredDeclsMap *getLookupPtr() const { return LookupPtr.getPointer(); }
+
+ /// \brief Ensure the lookup structure is fully-built and return it.
+ StoredDeclsMap *buildLookup();
/// \brief Whether this DeclContext has external storage containing
/// additional declarations that are lexically in this context.
@@ -1340,9 +1499,10 @@ public:
/// \brief Determine whether the given declaration is stored in the list of
/// declarations lexically within this context.
bool isDeclInLexicalTraversal(const Decl *D) const {
- return D && (D->NextDeclInContext || D == FirstDecl || D == LastDecl);
+ return D && (D->NextInContextAndBits.getPointer() || D == FirstDecl ||
+ D == LastDecl);
}
-
+
static bool classof(const Decl *D);
static bool classof(const DeclContext *D) { return true; }
#define DECL(NAME, BASE)
@@ -1350,16 +1510,26 @@ public:
static bool classof(const NAME##Decl *D) { return true; }
#include "clang/AST/DeclNodes.inc"
- void dumpDeclContext() const;
+ LLVM_ATTRIBUTE_USED void dumpDeclContext() const;
private:
void LoadLexicalDeclsFromExternalStorage() const;
+ /// @brief Makes a declaration visible within this context, but
+ /// suppresses searches for external declarations with the same
+ /// name.
+ ///
+ /// Analogous to makeDeclVisibleInContext, but for the exclusive
+ /// use of addDeclInternal().
+ void makeDeclVisibleInContextInternal(NamedDecl *D);
+
friend class DependentDiagnostic;
StoredDeclsMap *CreateStoredDeclsMap(ASTContext &C) const;
- void buildLookup(DeclContext *DCtx);
- void makeDeclVisibleInContextImpl(NamedDecl *D);
+ void buildLookupImpl(DeclContext *DCtx);
+ void makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Rediscoverable);
+ void makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal);
};
inline bool Decl::isTemplateParameter() const {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
index 7e60773..7f3ec4c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -16,10 +16,14 @@
#define LLVM_CLANG_AST_DECLCXX_H
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/Decl.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -36,7 +40,8 @@ class CXXMemberLookupCriteria;
class CXXFinalOverriderMap;
class CXXIndirectPrimaryBaseSet;
class FriendDecl;
-
+class LambdaExpr;
+
/// \brief Represents any kind of function declaration, whether it is a
/// concrete function or a function template.
class AnyFunctionDecl {
@@ -104,6 +109,7 @@ namespace clang {
/// Also note that this class has nothing to do with so-called
/// "access declarations" (C++98 11.3 [class.access.dcl]).
class AccessSpecDecl : public Decl {
+ virtual void anchor();
/// ColonLoc - The location of the ':'.
SourceLocation ColonLoc;
@@ -125,7 +131,7 @@ public:
/// setColonLoc - Sets the location of the colon.
void setColonLoc(SourceLocation CLoc) { ColonLoc = CLoc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getAccessSpecifierLoc(), getColonLoc());
}
@@ -134,9 +140,7 @@ public:
SourceLocation ColonLoc) {
return new (C) AccessSpecDecl(AS, DC, ASLoc, ColonLoc);
}
- static AccessSpecDecl *Create(ASTContext &C, EmptyShell Empty) {
- return new (C) AccessSpecDecl(Empty);
- }
+ static AccessSpecDecl *CreateDeserialized(ASTContext &C, unsigned ID);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -170,7 +174,7 @@ class CXXBaseSpecifier {
/// \brief The source location of the ellipsis, if this is a pack
/// expansion.
SourceLocation EllipsisLoc;
-
+
/// Virtual - Whether this is a virtual base class or not.
bool Virtual : 1;
@@ -200,12 +204,14 @@ public:
CXXBaseSpecifier(SourceRange R, bool V, bool BC, AccessSpecifier A,
TypeSourceInfo *TInfo, SourceLocation EllipsisLoc)
- : Range(R), EllipsisLoc(EllipsisLoc), Virtual(V), BaseOfClass(BC),
+ : Range(R), EllipsisLoc(EllipsisLoc), Virtual(V), BaseOfClass(BC),
Access(A), InheritConstructors(false), BaseTypeInfo(TInfo) { }
/// getSourceRange - Retrieves the source range that contains the
/// entire base specifier.
- SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
/// isVirtual - Determines whether the base class is a virtual base
/// class (or not).
@@ -214,7 +220,7 @@ public:
/// \brief Determine whether this base class is a base of a class declared
/// with the 'class' keyword (vs. one declared with the 'struct' keyword).
bool isBaseOfClass() const { return BaseOfClass; }
-
+
/// \brief Determine whether this base specifier is a pack expansion.
bool isPackExpansion() const { return EllipsisLoc.isValid(); }
@@ -319,7 +325,8 @@ class CXXRecordDecl : public RecordDecl {
/// * has no non-static data members of type non-standard-layout class (or
/// array of such types) or reference,
/// * has no virtual functions (10.3) and no virtual base classes (10.1),
- /// * has the same access control (Clause 11) for all non-static data members
+ /// * has the same access control (Clause 11) for all non-static data
+ /// members
/// * has no non-standard-layout base classes,
/// * either has no non-static data members in the most derived class and at
/// most one base class with non-static data members, or has no base
@@ -346,7 +353,10 @@ class CXXRecordDecl : public RecordDecl {
/// \brief True if this class (or any subobject) has mutable fields.
bool HasMutableFields : 1;
-
+
+ /// \brief True if there no non-field members declared by the user.
+ bool HasOnlyCMembers : 1;
+
/// HasTrivialDefaultConstructor - True when, if this class has a default
/// constructor, this default constructor is trivial.
///
@@ -364,10 +374,34 @@ class CXXRecordDecl : public RecordDecl {
bool HasTrivialDefaultConstructor : 1;
/// HasConstexprNonCopyMoveConstructor - True when this class has at least
- /// one constexpr constructor which is neither the copy nor move
- /// constructor.
+ /// one user-declared constexpr constructor which is neither the copy nor
+ /// move constructor.
bool HasConstexprNonCopyMoveConstructor : 1;
+ /// DefaultedDefaultConstructorIsConstexpr - True if a defaulted default
+ /// constructor for this class would be constexpr.
+ bool DefaultedDefaultConstructorIsConstexpr : 1;
+
+ /// DefaultedCopyConstructorIsConstexpr - True if a defaulted copy
+ /// constructor for this class would be constexpr.
+ bool DefaultedCopyConstructorIsConstexpr : 1;
+
+ /// DefaultedMoveConstructorIsConstexpr - True if a defaulted move
+ /// constructor for this class would be constexpr.
+ bool DefaultedMoveConstructorIsConstexpr : 1;
+
+ /// HasConstexprDefaultConstructor - True if this class has a constexpr
+ /// default constructor (either user-declared or implicitly declared).
+ bool HasConstexprDefaultConstructor : 1;
+
+ /// HasConstexprCopyConstructor - True if this class has a constexpr copy
+ /// constructor (either user-declared or implicitly declared).
+ bool HasConstexprCopyConstructor : 1;
+
+ /// HasConstexprMoveConstructor - True if this class has a constexpr move
+ /// constructor (either user-declared or implicitly declared).
+ bool HasConstexprMoveConstructor : 1;
+
/// HasTrivialCopyConstructor - True when this class has a trivial copy
/// constructor.
///
@@ -438,8 +472,13 @@ class CXXRecordDecl : public RecordDecl {
/// type (or array thereof), each such class has a trivial destructor.
bool HasTrivialDestructor : 1;
+ /// HasIrrelevantDestructor - True when this class has a destructor with no
+ /// semantic effect.
+ bool HasIrrelevantDestructor : 1;
+
/// HasNonLiteralTypeFieldsOrBases - True when this class contains at least
- /// one non-static data member or base class of non literal type.
+ /// one non-static data member or base class of non-literal or volatile
+ /// type.
bool HasNonLiteralTypeFieldsOrBases : 1;
/// ComputedVisibleConversions - True when visible conversion functions are
@@ -458,13 +497,13 @@ class CXXRecordDecl : public RecordDecl {
/// \brief Whether we have already declared the move constructor.
bool DeclaredMoveConstructor : 1;
-
+
/// \brief Whether we have already declared the copy-assignment operator.
bool DeclaredCopyAssignment : 1;
/// \brief Whether we have already declared the move-assignment operator.
bool DeclaredMoveAssignment : 1;
-
+
/// \brief Whether we have already declared a destructor within the class.
bool DeclaredDestructor : 1;
@@ -476,9 +515,12 @@ class CXXRecordDecl : public RecordDecl {
/// declared but would have been deleted.
bool FailedImplicitMoveAssignment : 1;
+ /// \brief Whether this class describes a C++ lambda.
+ bool IsLambda : 1;
+
/// NumBases - The number of base class specifiers in Bases.
unsigned NumBases;
-
+
/// NumVBases - The number of virtual base class specifiers in VBases.
unsigned NumVBases;
@@ -510,17 +552,59 @@ class CXXRecordDecl : public RecordDecl {
/// in reverse order.
FriendDecl *FirstFriend;
- /// \brief Retrieve the set of direct base classes.
+ /// \brief Retrieve the set of direct base classes.
CXXBaseSpecifier *getBases() const {
return Bases.get(Definition->getASTContext().getExternalSource());
}
- /// \brief Retrieve the set of virtual base classes.
+ /// \brief Retrieve the set of virtual base classes.
CXXBaseSpecifier *getVBases() const {
return VBases.get(Definition->getASTContext().getExternalSource());
}
} *DefinitionData;
+ /// \brief Describes a C++ closure type (generated by a lambda expression).
+ struct LambdaDefinitionData : public DefinitionData {
+ typedef LambdaExpr::Capture Capture;
+
+ LambdaDefinitionData(CXXRecordDecl *D, bool Dependent)
+ : DefinitionData(D), Dependent(Dependent), NumCaptures(0),
+ NumExplicitCaptures(0), ManglingNumber(0), ContextDecl(0), Captures(0)
+ {
+ IsLambda = true;
+ }
+
+ /// \brief Whether this lambda is known to be dependent, even if its
+ /// context isn't dependent.
+ ///
+ /// A lambda with a non-dependent context can be dependent if it occurs
+ /// within the default argument of a function template, because the
+ /// lambda will have been created with the enclosing context as its
+ /// declaration context, rather than function. This is an unfortunate
+ /// artifact of having to parse the default arguments before
+ unsigned Dependent : 1;
+
+ /// \brief The number of captures in this lambda.
+ unsigned NumCaptures : 16;
+
+ /// \brief The number of explicit captures in this lambda.
+ unsigned NumExplicitCaptures : 15;
+
+ /// \brief The number used to indicate this lambda expression for name
+ /// mangling in the Itanium C++ ABI.
+ unsigned ManglingNumber;
+
+ /// \brief The declaration that provides context for this lambda, if the
+ /// actual DeclContext does not suffice. This is used for lambdas that
+ /// occur within default arguments of function parameters within the class
+ /// or within a data member initializer.
+ Decl *ContextDecl;
+
+ /// \brief The list of captures, both explicit and implicit, for this
+ /// lambda.
+ Capture *Captures;
+ };
+
struct DefinitionData &data() {
assert(DefinitionData && "queried property of class with no definition");
return *DefinitionData;
@@ -530,6 +614,13 @@ class CXXRecordDecl : public RecordDecl {
assert(DefinitionData && "queried property of class with no definition");
return *DefinitionData;
}
+
+ struct LambdaDefinitionData &getLambdaData() const {
+ assert(DefinitionData && "queried property of lambda with no definition");
+ assert(DefinitionData->IsLambda &&
+ "queried lambda property of non-lambda class");
+ return static_cast<LambdaDefinitionData &>(*DefinitionData);
+ }
/// \brief The template or declaration that this declaration
/// describes or was instantiated from, respectively.
@@ -538,23 +629,26 @@ class CXXRecordDecl : public RecordDecl {
/// declarations that describe a class template, this will be a
/// pointer to a ClassTemplateDecl. For member
/// classes of class template specializations, this will be the
- /// MemberSpecializationInfo referring to the member class that was
+ /// MemberSpecializationInfo referring to the member class that was
/// instantiated or specialized.
llvm::PointerUnion<ClassTemplateDecl*, MemberSpecializationInfo*>
TemplateOrInstantiation;
friend class DeclContext;
-
+ friend class LambdaExpr;
+
/// \brief Notify the class that member has been added.
///
- /// This routine helps maintain information about the class based on which
+ /// This routine helps maintain information about the class based on which
/// members have been added. It will be invoked by DeclContext::addDecl()
/// whenever a member is added to this record.
void addedMember(Decl *D);
void markedVirtualFunctionPure();
friend void FunctionDecl::setPure(bool);
-
+
+ friend class ASTNodeImporter;
+
protected:
CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
@@ -585,12 +679,19 @@ public:
virtual const CXXRecordDecl *getCanonicalDecl() const {
return cast<CXXRecordDecl>(RecordDecl::getCanonicalDecl());
}
-
- const CXXRecordDecl *getPreviousDeclaration() const {
- return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDeclaration());
+
+ const CXXRecordDecl *getPreviousDecl() const {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDecl());
}
- CXXRecordDecl *getPreviousDeclaration() {
- return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDeclaration());
+ CXXRecordDecl *getPreviousDecl() {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDecl());
+ }
+
+ const CXXRecordDecl *getMostRecentDecl() const {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getMostRecentDecl());
+ }
+ CXXRecordDecl *getMostRecentDecl() {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getMostRecentDecl());
}
CXXRecordDecl *getDefinition() const {
@@ -604,7 +705,9 @@ public:
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, CXXRecordDecl* PrevDecl=0,
bool DelayTypeCreation = false);
- static CXXRecordDecl *Create(const ASTContext &C, EmptyShell Empty);
+ static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, bool DependentLambda);
+ static CXXRecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
bool isDynamicClass() const {
return data().Polymorphic || data().NumVBases != 0;
@@ -704,7 +807,7 @@ public:
///
/// This value is used for lazy creation of default constructors.
bool needsImplicitDefaultConstructor() const {
- return !data().UserDeclaredConstructor &&
+ return !data().UserDeclaredConstructor &&
!data().DeclaredDefaultConstructor;
}
@@ -722,11 +825,11 @@ public:
CXXConstructorDecl *getCopyConstructor(unsigned TypeQuals) const;
/// getMoveConstructor - Returns the move constructor for this class
- CXXConstructorDecl *getMoveConstructor() const;
+ CXXConstructorDecl *getMoveConstructor() const;
/// \brief Retrieve the copy-assignment operator for this class, if available.
///
- /// This routine attempts to find the copy-assignment operator for this
+ /// This routine attempts to find the copy-assignment operator for this
/// class, using a simplistic form of overload resolution.
///
/// \param ArgIsConst Whether the argument to the copy-assignment operator
@@ -739,7 +842,7 @@ public:
/// getMoveAssignmentOperator - Returns the move assignment operator for this
/// class
CXXMethodDecl *getMoveAssignmentOperator() const;
-
+
/// hasUserDeclaredConstructor - Whether this class has any
/// user-declared constructors. When true, a default constructor
/// will not be implicitly declared.
@@ -760,7 +863,7 @@ public:
return data().UserDeclaredCopyConstructor;
}
- /// \brief Determine whether this class has had its copy constructor
+ /// \brief Determine whether this class has had its copy constructor
/// declared, either via the user or via an implicit declaration.
///
/// This value is used for lazy creation of copy constructors.
@@ -822,7 +925,7 @@ public:
return data().UserDeclaredCopyAssignment;
}
- /// \brief Determine whether this class has had its copy assignment operator
+ /// \brief Determine whether this class has had its copy assignment operator
/// declared, either via the user or via an implicit declaration.
///
/// This value is used for lazy creation of copy assignment operators.
@@ -882,6 +985,29 @@ public:
/// This value is used for lazy creation of destructors.
bool hasDeclaredDestructor() const { return data().DeclaredDestructor; }
+ /// \brief Determine whether this class describes a lambda function object.
+ bool isLambda() const { return hasDefinition() && data().IsLambda; }
+
+ /// \brief For a closure type, retrieve the mapping from captured
+ /// variables and this to the non-static data members that store the
+ /// values or references of the captures.
+ ///
+ /// \param Captures Will be populated with the mapping from captured
+ /// variables to the corresponding fields.
+ ///
+ /// \param ThisCapture Will be set to the field declaration for the
+ /// 'this' capture.
+ void getCaptureFields(llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const;
+
+ typedef const LambdaExpr::Capture* capture_const_iterator;
+ capture_const_iterator captures_begin() const {
+ return isLambda() ? getLambdaData().Captures : NULL;
+ }
+ capture_const_iterator captures_end() const {
+ return isLambda() ? captures_begin() + getLambdaData().NumCaptures : NULL;
+ }
+
/// getConversions - Retrieve the overload set containing all of the
/// conversion functions in this class.
UnresolvedSetImpl *getConversionFunctions() {
@@ -920,6 +1046,10 @@ public:
/// user-defined destructor.
bool isPOD() const { return data().PlainOldData; }
+ /// \brief True if this class is C-like, without C++-specific features, e.g.
+ /// it contains only public fields, no bases, tag kind is not 'class', etc.
+ bool isCLike() const;
+
/// isEmpty - Whether this class is empty (C++0x [meta.unary.prop]), which
/// means it has a virtual function, virtual base, data member (other than
/// 0-width bit-field) or inherits from a non-empty class. Does NOT include
@@ -941,20 +1071,63 @@ public:
/// \brief Whether this class, or any of its class subobjects, contains a
/// mutable field.
bool hasMutableFields() const { return data().HasMutableFields; }
-
- // hasTrivialDefaultConstructor - Whether this class has a trivial default
- // constructor
- // (C++0x [class.ctor]p5)
+
+ /// hasTrivialDefaultConstructor - Whether this class has a trivial default
+ /// constructor (C++11 [class.ctor]p5).
bool hasTrivialDefaultConstructor() const {
return data().HasTrivialDefaultConstructor &&
(!data().UserDeclaredConstructor ||
data().DeclaredDefaultConstructor);
}
- // hasConstexprNonCopyMoveConstructor - Whether this class has at least one
- // constexpr constructor other than the copy or move constructors.
+ /// hasConstexprNonCopyMoveConstructor - Whether this class has at least one
+ /// constexpr constructor other than the copy or move constructors.
bool hasConstexprNonCopyMoveConstructor() const {
- return data().HasConstexprNonCopyMoveConstructor;
+ return data().HasConstexprNonCopyMoveConstructor ||
+ (!hasUserDeclaredConstructor() &&
+ defaultedDefaultConstructorIsConstexpr());
+ }
+
+ /// defaultedDefaultConstructorIsConstexpr - Whether a defaulted default
+ /// constructor for this class would be constexpr.
+ bool defaultedDefaultConstructorIsConstexpr() const {
+ return data().DefaultedDefaultConstructorIsConstexpr;
+ }
+
+ /// defaultedCopyConstructorIsConstexpr - Whether a defaulted copy
+ /// constructor for this class would be constexpr.
+ bool defaultedCopyConstructorIsConstexpr() const {
+ return data().DefaultedCopyConstructorIsConstexpr;
+ }
+
+ /// defaultedMoveConstructorIsConstexpr - Whether a defaulted move
+ /// constructor for this class would be constexpr.
+ bool defaultedMoveConstructorIsConstexpr() const {
+ return data().DefaultedMoveConstructorIsConstexpr;
+ }
+
+ /// hasConstexprDefaultConstructor - Whether this class has a constexpr
+ /// default constructor.
+ bool hasConstexprDefaultConstructor() const {
+ return data().HasConstexprDefaultConstructor ||
+ (!data().UserDeclaredConstructor &&
+ data().DefaultedDefaultConstructorIsConstexpr && isLiteral());
+ }
+
+ /// hasConstexprCopyConstructor - Whether this class has a constexpr copy
+ /// constructor.
+ bool hasConstexprCopyConstructor() const {
+ return data().HasConstexprCopyConstructor ||
+ (!data().DeclaredCopyConstructor &&
+ data().DefaultedCopyConstructorIsConstexpr && isLiteral());
+ }
+
+ /// hasConstexprMoveConstructor - Whether this class has a constexpr move
+ /// constructor.
+ bool hasConstexprMoveConstructor() const {
+ return data().HasConstexprMoveConstructor ||
+ (needsImplicitMoveConstructor() &&
+ data().DefaultedMoveConstructorIsConstexpr && isLiteral());
}
// hasTrivialCopyConstructor - Whether this class has a trivial copy
@@ -985,8 +1158,15 @@ public:
// (C++ [class.dtor]p3)
bool hasTrivialDestructor() const { return data().HasTrivialDestructor; }
- // hasNonLiteralTypeFieldsOrBases - Whether this class has a non-literal type
- // non-static data member or base class.
+ // hasIrrelevantDestructor - Whether this class has a destructor which has no
+ // semantic effect. Any such destructor will be trivial, public, defaulted
+ // and not deleted, and will call only irrelevant destructors.
+ bool hasIrrelevantDestructor() const {
+ return data().HasIrrelevantDestructor;
+ }
+
+ // hasNonLiteralTypeFieldsOrBases - Whether this class has a non-literal or
+ // volatile type non-static data member or base class.
bool hasNonLiteralTypeFieldsOrBases() const {
return data().HasNonLiteralTypeFieldsOrBases;
}
@@ -1006,20 +1186,23 @@ public:
// isLiteral - Whether this class is a literal type.
//
- // C++0x [basic.types]p10
+ // C++11 [basic.types]p10
// A class type that has all the following properties:
- // -- a trivial destructor
+ // -- it has a trivial destructor
// -- every constructor call and full-expression in the
// brace-or-equal-intializers for non-static data members (if any) is
// a constant expression.
// -- it is an aggregate type or has at least one constexpr constructor or
// constructor template that is not a copy or move constructor, and
- // -- all non-static data members and base classes of literal types
+ // -- all of its non-static data members and base classes are of literal
+ // types
//
- // We resolve DR1361 by ignoring the second bullet.
+ // We resolve DR1361 by ignoring the second bullet. We resolve DR1452 by
+ // treating types with trivial default constructors as literal types.
bool isLiteral() const {
return hasTrivialDestructor() &&
- (isAggregate() || hasConstexprNonCopyMoveConstructor()) &&
+ (isAggregate() || hasConstexprNonCopyMoveConstructor() ||
+ hasTrivialDefaultConstructor()) &&
!hasNonLiteralTypeFieldsOrBases();
}
@@ -1043,12 +1226,12 @@ public:
/// X<int>::A is required, it will be instantiated from the
/// declaration returned by getInstantiatedFromMemberClass().
CXXRecordDecl *getInstantiatedFromMemberClass() const;
-
+
/// \brief If this class is an instantiation of a member class of a
/// class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const;
-
+
/// \brief Specify that this record is an instantiation of the
/// member class RD.
void setInstantiationOfMemberClass(CXXRecordDecl *RD,
@@ -1077,7 +1260,7 @@ public:
/// instantiation of a class template or member class of a class template,
/// and how it was instantiated or specialized.
TemplateSpecializationKind getTemplateSpecializationKind() const;
-
+
/// \brief Set the kind of specialization or template instantiation this is.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK);
@@ -1104,7 +1287,7 @@ public:
///
/// \returns true if this class is derived from Base, false otherwise.
bool isDerivedFrom(const CXXRecordDecl *Base) const;
-
+
/// \brief Determine whether this class is derived from the type \p Base.
///
/// This routine only determines whether this class is derived from \p Base,
@@ -1119,8 +1302,8 @@ public:
///
/// \returns true if this class is derived from Base, false otherwise.
///
- /// \todo add a separate paramaeter to configure IsDerivedFrom, rather than
- /// tangling input and output in \p Paths
+ /// \todo add a separate paramaeter to configure IsDerivedFrom, rather than
+ /// tangling input and output in \p Paths
bool isDerivedFrom(const CXXRecordDecl *Base, CXXBasePaths &Paths) const;
/// \brief Determine whether this class is virtually derived from
@@ -1155,20 +1338,20 @@ public:
///
/// The class itself does not count as a base class. This routine
/// returns false if the class has non-computable base classes.
- ///
+ ///
/// \param AllowShortCircuit if false, forces the callback to be called
/// for every base class, even if a dependent or non-matching base was
/// found.
bool forallBases(ForallBasesCallback *BaseMatches, void *UserData,
bool AllowShortCircuit = true) const;
-
- /// \brief Function type used by lookupInBases() to determine whether a
+
+ /// \brief Function type used by lookupInBases() to determine whether a
/// specific base class subobject matches the lookup criteria.
///
- /// \param Specifier the base-class specifier that describes the inheritance
+ /// \param Specifier the base-class specifier that describes the inheritance
/// from the base class we are trying to match.
///
- /// \param Path the current path, from the most-derived class down to the
+ /// \param Path the current path, from the most-derived class down to the
/// base named by the \p Specifier.
///
/// \param UserData a single pointer to user-specified data, provided to
@@ -1178,13 +1361,13 @@ public:
typedef bool BaseMatchesCallback(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *UserData);
-
+
/// \brief Look for entities within the base classes of this C++ class,
/// transitively searching all base class subobjects.
///
- /// This routine uses the callback function \p BaseMatches to find base
+ /// This routine uses the callback function \p BaseMatches to find base
/// classes meeting some search criteria, walking all base class subobjects
- /// and populating the given \p Paths structure with the paths through the
+ /// and populating the given \p Paths structure with the paths through the
/// inheritance hierarchy that resulted in a match. On a successful search,
/// the \p Paths structure can be queried to retrieve the matching paths and
/// to determine if there were any ambiguities.
@@ -1201,7 +1384,7 @@ public:
/// subobject that matches the search criteria.
bool lookupInBases(BaseMatchesCallback *BaseMatches, void *UserData,
CXXBasePaths &Paths) const;
-
+
/// \brief Base-class lookup callback that determines whether the given
/// base class specifier refers to a specific class declaration.
///
@@ -1223,7 +1406,7 @@ public:
/// are searching for.
static bool FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path, void *BaseRecord);
-
+
/// \brief Base-class lookup callback that determines whether there exists
/// a tag with the given name.
///
@@ -1241,7 +1424,7 @@ public:
/// is an opaque \c DeclarationName pointer.
static bool FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path, void *Name);
-
+
/// \brief Base-class lookup callback that determines whether there exists
/// a member with the given name that can be used in a nested-name-specifier.
///
@@ -1277,15 +1460,15 @@ public:
/// \brief Indicates that the definition of this class is now complete.
virtual void completeDefinition();
- /// \brief Indicates that the definition of this class is now complete,
+ /// \brief Indicates that the definition of this class is now complete,
/// and provides a final overrider map to help determine
- ///
+ ///
/// \param FinalOverriders The final overrider map for this class, which can
/// be provided as an optimization for abstract-class checking. If NULL,
/// final overriders will be computed if they are needed to complete the
/// definition.
void completeDefinition(CXXFinalOverriderMap *FinalOverriders);
-
+
/// \brief Determine whether this class may end up being abstract, even though
/// it is not yet known to be abstract.
///
@@ -1294,6 +1477,53 @@ public:
/// will need to compute final overriders to determine whether the class is
/// actually abstract.
bool mayBeAbstract() const;
+
+ /// \brief If this is the closure type of a lambda expression, retrieve the
+ /// number to be used for name mangling in the Itanium C++ ABI.
+ ///
+ /// Zero indicates that this closure type has internal linkage, so the
+ /// mangling number does not matter, while a non-zero value indicates which
+ /// lambda expression this is in this particular context.
+ unsigned getLambdaManglingNumber() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ return getLambdaData().ManglingNumber;
+ }
+
+ /// \brief Retrieve the declaration that provides additional context for a
+ /// lambda, when the normal declaration context is not specific enough.
+ ///
+ /// Certain contexts (default arguments of in-class function parameters and
+ /// the initializers of data members) have separate name mangling rules for
+ /// lambdas within the Itanium C++ ABI. For these cases, this routine provides
+ /// the declaration in which the lambda occurs, e.g., the function parameter
+ /// or the non-static data member. Otherwise, it returns NULL to imply that
+ /// the declaration context suffices.
+ Decl *getLambdaContextDecl() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ return getLambdaData().ContextDecl;
+ }
+
+ /// \brief Set the mangling number and context declaration for a lambda
+ /// class.
+ void setLambdaMangling(unsigned ManglingNumber, Decl *ContextDecl) {
+ getLambdaData().ManglingNumber = ManglingNumber;
+ getLambdaData().ContextDecl = ContextDecl;
+ }
+
+ /// \brief Determine whether this lambda expression was known to be dependent
+ /// at the time it was created, even if its context does not appear to be
+ /// dependent.
+ ///
+ /// This flag is a workaround for an issue with parsing, where default
+ /// arguments are parsed before their enclosing function declarations have
+ /// been created. This means that any lambda expressions within those
+ /// default arguments will have as their DeclContext the context enclosing
+ /// the function declaration, which may be non-dependent even when the
+ /// function declaration itself is dependent. This flag indicates when we
+ /// know that the lambda is dependent despite that.
+ bool isDependentLambda() const {
+ return isLambda() && getLambdaData().Dependent;
+ }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
@@ -1313,6 +1543,7 @@ public:
/// CXXMethodDecl - Represents a static or instance method of a
/// struct/union/class.
class CXXMethodDecl : public FunctionDecl {
+ virtual void anchor();
protected:
CXXMethodDecl(Kind DK, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
@@ -1322,9 +1553,9 @@ protected:
: FunctionDecl(DK, RD, StartLoc, NameInfo, T, TInfo,
(isStatic ? SC_Static : SC_None),
SCAsWritten, isInline, isConstexpr) {
- if (EndLocation.isValid())
- setRangeEnd(EndLocation);
- }
+ if (EndLocation.isValid())
+ setRangeEnd(EndLocation);
+ }
public:
static CXXMethodDecl *Create(ASTContext &C, CXXRecordDecl *RD,
@@ -1337,16 +1568,18 @@ public:
bool isConstexpr,
SourceLocation EndLocation);
+ static CXXMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
bool isStatic() const { return getStorageClass() == SC_Static; }
bool isInstance() const { return !isStatic(); }
bool isVirtual() const {
- CXXMethodDecl *CD =
+ CXXMethodDecl *CD =
cast<CXXMethodDecl>(const_cast<CXXMethodDecl*>(this)->getCanonicalDecl());
if (CD->isVirtualAsWritten())
return true;
-
+
return (CD->begin_overridden_methods() != CD->end_overridden_methods());
}
@@ -1354,14 +1587,14 @@ public:
/// (C++ [basic.stc.dynamic.deallocation]p2), which is an overloaded
/// delete or delete[] operator with a particular signature.
bool isUsualDeallocationFunction() const;
-
+
/// \brief Determine whether this is a copy-assignment operator, regardless
/// of whether it was declared implicitly or explicitly.
bool isCopyAssignmentOperator() const;
/// \brief Determine whether this is a move assignment operator.
bool isMoveAssignmentOperator() const;
-
+
const CXXMethodDecl *getCanonicalDecl() const {
return cast<CXXMethodDecl>(FunctionDecl::getCanonicalDecl());
}
@@ -1374,11 +1607,11 @@ public:
bool isUserProvided() const {
return !(isDeleted() || getCanonicalDecl()->isDefaulted());
}
-
+
///
void addOverriddenMethod(const CXXMethodDecl *MD);
- typedef const CXXMethodDecl ** method_iterator;
+ typedef const CXXMethodDecl *const* method_iterator;
method_iterator begin_overridden_methods() const;
method_iterator end_overridden_methods() const;
@@ -1419,9 +1652,18 @@ public:
RefQualifierKind getRefQualifier() const {
return getType()->getAs<FunctionProtoType>()->getRefQualifier();
}
-
+
bool hasInlineBody() const;
+ /// \brief Determine whether this is a lambda closure type's static member
+ /// function that is used for the result of the lambda's conversion to
+ /// function pointer (for a lambda with no captures).
+ ///
+ /// The function itself, if used, will have a placeholder body that will be
+ /// supplied by IR generation to either forward to the function call operator
+ /// or clone the function call operator.
+ bool isLambdaStaticInvoker() const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXMethodDecl *D) { return true; }
@@ -1445,31 +1687,34 @@ public:
/// };
/// @endcode
class CXXCtorInitializer {
- /// \brief Either the base class name (stored as a TypeSourceInfo*), an normal
- /// field (FieldDecl), anonymous field (IndirectFieldDecl*), or target
- /// constructor (CXXConstructorDecl*) being initialized.
- llvm::PointerUnion4<TypeSourceInfo *, FieldDecl *, IndirectFieldDecl *,
- CXXConstructorDecl *>
+ /// \brief Either the base class name/delegating constructor type (stored as
+ /// a TypeSourceInfo*), an normal field (FieldDecl), or an anonymous field
+ /// (IndirectFieldDecl*) being initialized.
+ llvm::PointerUnion3<TypeSourceInfo *, FieldDecl *, IndirectFieldDecl *>
Initializee;
-
+
/// \brief The source location for the field name or, for a base initializer
/// pack expansion, the location of the ellipsis. In the case of a delegating
/// constructor, it will still include the type's source location as the
/// Initializee points to the CXXConstructorDecl (to allow loop detection).
SourceLocation MemberOrEllipsisLocation;
-
+
/// \brief The argument used to initialize the base or member, which may
/// end up constructing an object (when multiple arguments are involved).
- /// If 0, this is a field initializer, and the in-class member initializer
+ /// If 0, this is a field initializer, and the in-class member initializer
/// will be used.
Stmt *Init;
-
+
/// LParenLoc - Location of the left paren of the ctor-initializer.
SourceLocation LParenLoc;
/// RParenLoc - Location of the right paren of the ctor-initializer.
SourceLocation RParenLoc;
+ /// \brief If the initializee is a type, whether that type makes this
+ /// a delegating initialization.
+ bool IsDelegating : 1;
+
/// IsVirtual - If the initializer is a base initializer, this keeps track
/// of whether the base is virtual or not.
bool IsVirtual : 1;
@@ -1483,12 +1728,12 @@ class CXXCtorInitializer {
/// original sources, counting from 0; otherwise, if IsWritten is false,
/// it stores the number of array index variables stored after this
/// object in memory.
- unsigned SourceOrderOrNumArrayIndices : 14;
+ unsigned SourceOrderOrNumArrayIndices : 13;
CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
SourceLocation MemberLoc, SourceLocation L, Expr *Init,
SourceLocation R, VarDecl **Indices, unsigned NumIndices);
-
+
public:
/// CXXCtorInitializer - Creates a new base-class initializer.
explicit
@@ -1510,25 +1755,27 @@ public:
/// CXXCtorInitializer - Creates a new delegating Initializer.
explicit
- CXXCtorInitializer(ASTContext &Context, SourceLocation D, SourceLocation L,
- CXXConstructorDecl *Target, Expr *Init, SourceLocation R);
+ CXXCtorInitializer(ASTContext &Context, TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init, SourceLocation R);
- /// \brief Creates a new member initializer that optionally contains
+ /// \brief Creates a new member initializer that optionally contains
/// array indices used to describe an elementwise initialization.
static CXXCtorInitializer *Create(ASTContext &Context, FieldDecl *Member,
SourceLocation MemberLoc, SourceLocation L,
Expr *Init, SourceLocation R,
VarDecl **Indices, unsigned NumIndices);
-
+
/// isBaseInitializer - Returns true when this initializer is
/// initializing a base class.
- bool isBaseInitializer() const { return Initializee.is<TypeSourceInfo*>(); }
+ bool isBaseInitializer() const {
+ return Initializee.is<TypeSourceInfo*>() && !IsDelegating;
+ }
/// isMemberInitializer - Returns true when this initializer is
/// initializing a non-static data member.
bool isMemberInitializer() const { return Initializee.is<FieldDecl*>(); }
- bool isAnyMemberInitializer() const {
+ bool isAnyMemberInitializer() const {
return isMemberInitializer() || isIndirectMemberInitializer();
}
@@ -1546,21 +1793,21 @@ public:
/// isDelegatingInitializer - Returns true when this initializer is creating
/// a delegating constructor.
bool isDelegatingInitializer() const {
- return Initializee.is<CXXConstructorDecl *>();
+ return Initializee.is<TypeSourceInfo*>() && IsDelegating;
}
/// \brief Determine whether this initializer is a pack expansion.
- bool isPackExpansion() const {
- return isBaseInitializer() && MemberOrEllipsisLocation.isValid();
+ bool isPackExpansion() const {
+ return isBaseInitializer() && MemberOrEllipsisLocation.isValid();
}
-
+
// \brief For a pack expansion, returns the location of the ellipsis.
SourceLocation getEllipsisLoc() const {
assert(isPackExpansion() && "Initializer is not a pack expansion");
return MemberOrEllipsisLocation;
}
-
- /// If this is a base class initializer, returns the type of the
+
+ /// If this is a base class initializer, returns the type of the
/// base class with location information. Otherwise, returns an NULL
/// type location.
TypeLoc getBaseClassLoc() const;
@@ -1572,56 +1819,47 @@ public:
/// Returns whether the base is virtual or not.
bool isBaseVirtual() const {
assert(isBaseInitializer() && "Must call this on base initializer!");
-
+
return IsVirtual;
}
- /// \brief Returns the declarator information for a base class initializer.
- TypeSourceInfo *getBaseClassInfo() const {
+ /// \brief Returns the declarator information for a base class or delegating
+ /// initializer.
+ TypeSourceInfo *getTypeSourceInfo() const {
return Initializee.dyn_cast<TypeSourceInfo *>();
}
-
+
/// getMember - If this is a member initializer, returns the
/// declaration of the non-static data member being
/// initialized. Otherwise, returns NULL.
FieldDecl *getMember() const {
if (isMemberInitializer())
return Initializee.get<FieldDecl*>();
- else
- return 0;
+ return 0;
}
FieldDecl *getAnyMember() const {
if (isMemberInitializer())
return Initializee.get<FieldDecl*>();
- else if (isIndirectMemberInitializer())
+ if (isIndirectMemberInitializer())
return Initializee.get<IndirectFieldDecl*>()->getAnonField();
- else
- return 0;
+ return 0;
}
IndirectFieldDecl *getIndirectMember() const {
if (isIndirectMemberInitializer())
return Initializee.get<IndirectFieldDecl*>();
- else
- return 0;
+ return 0;
}
- CXXConstructorDecl *getTargetConstructor() const {
- if (isDelegatingInitializer())
- return Initializee.get<CXXConstructorDecl*>();
- else
- return 0;
- }
-
- SourceLocation getMemberLocation() const {
+ SourceLocation getMemberLocation() const {
return MemberOrEllipsisLocation;
}
-
+
/// \brief Determine the source location of the initializer.
SourceLocation getSourceLocation() const;
-
+
/// \brief Determine the source range covering the entire initializer.
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
/// isWritten - Returns true if this initializer is explicitly written
/// in the source code.
@@ -1656,7 +1894,7 @@ public:
return IsWritten ? 0 : SourceOrderOrNumArrayIndices;
}
- /// \brief Retrieve a particular array index variable used to
+ /// \brief Retrieve a particular array index variable used to
/// describe an array member initialization.
VarDecl *getArrayIndex(unsigned I) {
assert(I < getNumArrayIndices() && "Out of bounds member array index");
@@ -1670,7 +1908,12 @@ public:
assert(I < getNumArrayIndices() && "Out of bounds member array index");
reinterpret_cast<VarDecl **>(this + 1)[I] = Index;
}
-
+ ArrayRef<VarDecl *> getArrayIndexes() {
+ assert(getNumArrayIndices() != 0 && "Getting indexes for non-array init");
+ return ArrayRef<VarDecl *>(reinterpret_cast<VarDecl **>(this + 1),
+ getNumArrayIndices());
+ }
+
/// \brief Get the initializer. This is 0 if this is an in-class initializer
/// for a non-static data member which has not yet been parsed.
Expr *getInit() const {
@@ -1691,6 +1934,7 @@ public:
/// };
/// @endcode
class CXXConstructorDecl : public CXXMethodDecl {
+ virtual void anchor();
/// IsExplicitSpecified - Whether this constructor declaration has the
/// 'explicit' keyword specified.
bool IsExplicitSpecified : 1;
@@ -1712,7 +1956,7 @@ class CXXConstructorDecl : public CXXMethodDecl {
CXXConstructorDecl(CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
- bool isExplicitSpecified, bool isInline,
+ bool isExplicitSpecified, bool isInline,
bool isImplicitlyDeclared, bool isConstexpr)
: CXXMethodDecl(CXXConstructor, RD, StartLoc, NameInfo, T, TInfo, false,
SC_None, isInline, isConstexpr, SourceLocation()),
@@ -1722,7 +1966,7 @@ class CXXConstructorDecl : public CXXMethodDecl {
}
public:
- static CXXConstructorDecl *Create(ASTContext &C, EmptyShell Empty);
+ static CXXConstructorDecl *CreateDeserialized(ASTContext &C, unsigned ID);
static CXXConstructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
@@ -1734,7 +1978,7 @@ public:
/// isExplicitSpecified - Whether this constructor declaration has the
/// 'explicit' keyword specified.
bool isExplicitSpecified() const { return IsExplicitSpecified; }
-
+
/// isExplicit - Whether this constructor was marked "explicit" or not.
bool isExplicit() const {
return cast<CXXConstructorDecl>(getFirstDeclaration())
@@ -1782,7 +2026,8 @@ public:
}
typedef std::reverse_iterator<init_iterator> init_reverse_iterator;
- typedef std::reverse_iterator<init_const_iterator> init_const_reverse_iterator;
+ typedef std::reverse_iterator<init_const_iterator>
+ init_const_reverse_iterator;
init_reverse_iterator init_rbegin() {
return init_reverse_iterator(init_end());
@@ -1821,11 +2066,7 @@ public:
/// getTargetConstructor - When this constructor delegates to
/// another, retrieve the target
- CXXConstructorDecl *getTargetConstructor() const {
- assert(isDelegatingConstructor() &&
- "A non-delegating constructor has no target");
- return CtorInitializers[0]->getTargetConstructor();
- }
+ CXXConstructorDecl *getTargetConstructor() const;
/// isDefaultConstructor - Whether this constructor is a default
/// constructor (C++ [class.ctor]p5), which can be used to
@@ -1902,12 +2143,12 @@ public:
CXXConstructorDecl *getCanonicalDecl() {
return cast<CXXConstructorDecl>(FunctionDecl::getCanonicalDecl());
}
-
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXConstructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConstructor; }
-
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -1922,6 +2163,7 @@ public:
/// };
/// @endcode
class CXXDestructorDecl : public CXXMethodDecl {
+ virtual void anchor();
/// ImplicitlyDefined - Whether this destructor was implicitly
/// defined by the compiler. When false, the destructor was defined
/// by the user. In C++03, this flag will have the same value as
@@ -1931,7 +2173,7 @@ class CXXDestructorDecl : public CXXMethodDecl {
bool ImplicitlyDefined : 1;
FunctionDecl *OperatorDelete;
-
+
CXXDestructorDecl(CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
@@ -1943,13 +2185,13 @@ class CXXDestructorDecl : public CXXMethodDecl {
}
public:
- static CXXDestructorDecl *Create(ASTContext& C, EmptyShell Empty);
static CXXDestructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo* TInfo,
bool isInline,
bool isImplicitlyDeclared);
+ static CXXDestructorDecl *CreateDeserialized(ASTContext & C, unsigned ID);
/// isImplicitlyDefined - Whether this destructor was implicitly
/// defined. If false, then this destructor was defined by the
@@ -1957,7 +2199,8 @@ public:
/// already been defined.
bool isImplicitlyDefined() const {
assert(isThisDeclarationADefinition() &&
- "Can only get the implicit-definition flag once the destructor has been defined");
+ "Can only get the implicit-definition flag once the destructor has "
+ "been defined");
return ImplicitlyDefined;
}
@@ -1965,7 +2208,8 @@ public:
/// implicitly defined or not.
void setImplicitlyDefined(bool ID) {
assert(isThisDeclarationADefinition() &&
- "Can only set the implicit-definition flag once the destructor has been defined");
+ "Can only set the implicit-definition flag once the destructor has "
+ "been defined");
ImplicitlyDefined = ID;
}
@@ -1976,7 +2220,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXDestructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXDestructor; }
-
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -1991,7 +2235,8 @@ public:
/// };
/// @endcode
class CXXConversionDecl : public CXXMethodDecl {
- /// IsExplicitSpecified - Whether this conversion function declaration is
+ virtual void anchor();
+ /// IsExplicitSpecified - Whether this conversion function declaration is
/// marked "explicit", meaning that it can only be applied when the user
/// explicitly wrote a cast. This is a C++0x feature.
bool IsExplicitSpecified : 1;
@@ -2006,7 +2251,6 @@ class CXXConversionDecl : public CXXMethodDecl {
IsExplicitSpecified(isExplicitSpecified) { }
public:
- static CXXConversionDecl *Create(ASTContext &C, EmptyShell Empty);
static CXXConversionDecl *Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
@@ -2014,8 +2258,9 @@ public:
bool isInline, bool isExplicit,
bool isConstexpr,
SourceLocation EndLocation);
+ static CXXConversionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
- /// IsExplicitSpecified - Whether this conversion function declaration is
+ /// IsExplicitSpecified - Whether this conversion function declaration is
/// marked "explicit", meaning that it can only be applied when the user
/// explicitly wrote a cast. This is a C++0x feature.
bool isExplicitSpecified() const { return IsExplicitSpecified; }
@@ -2034,11 +2279,15 @@ public:
return getType()->getAs<FunctionType>()->getResultType();
}
+ /// \brief Determine whether this conversion function is a conversion from
+ /// a lambda closure type to a block pointer.
+ bool isLambdaToBlockPointerConversion() const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXConversionDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConversion; }
-
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -2047,6 +2296,7 @@ public:
/// extern "C" void foo();
///
class LinkageSpecDecl : public Decl, public DeclContext {
+ virtual void anchor();
public:
/// LanguageIDs - Used to represent the language in a linkage
/// specification. The values are part of the serialization abi for
@@ -2076,7 +2326,8 @@ public:
SourceLocation ExternLoc,
SourceLocation LangLoc, LanguageIDs Lang,
SourceLocation RBraceLoc = SourceLocation());
-
+ static LinkageSpecDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
/// \brief Return the language specified by this linkage specification.
LanguageIDs getLanguage() const { return Language; }
/// \brief Set the language specified by this linkage specification.
@@ -2091,7 +2342,7 @@ public:
void setExternLoc(SourceLocation L) { ExternLoc = L; }
void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
- SourceLocation getLocEnd() const {
+ SourceLocation getLocEnd() const LLVM_READONLY {
if (hasBraces())
return getRBraceLoc();
// No braces: get the end location of the (only) declaration in context
@@ -2099,7 +2350,7 @@ public:
return decls_empty() ? getLocation() : decls_begin()->getLocEnd();
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(ExternLoc, getLocEnd());
}
@@ -2119,12 +2370,13 @@ public:
/// using namespace std;
///
// NB: UsingDirectiveDecl should be Decl not NamedDecl, but we provide
-// artificial name, for all using-directives in order to store
+// artificial names for all using-directives in order to store
// them in DeclContext effectively.
class UsingDirectiveDecl : public NamedDecl {
+ virtual void anchor();
/// \brief The location of the "using" keyword.
SourceLocation UsingLoc;
-
+
/// SourceLocation - Location of 'namespace' token.
SourceLocation NamespaceLoc;
@@ -2159,11 +2411,11 @@ public:
/// \brief Retrieve the nested-name-specifier that qualifies the
/// name of the namespace, with source-location information.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
+
/// \brief Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
NamedDecl *getNominatedNamespaceAsWritten() { return NominatedNamespace; }
@@ -2185,7 +2437,7 @@ public:
/// \brief Return the location of the "using" keyword.
SourceLocation getUsingLoc() const { return UsingLoc; }
-
+
// FIXME: Could omit 'Key' in name.
/// getNamespaceKeyLocation - Returns location of namespace keyword.
SourceLocation getNamespaceKeyLocation() const { return NamespaceLoc; }
@@ -2200,18 +2452,19 @@ public:
SourceLocation IdentLoc,
NamedDecl *Nominated,
DeclContext *CommonAncestor);
-
- SourceRange getSourceRange() const {
+ static UsingDirectiveDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(UsingLoc, getLocation());
}
-
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UsingDirectiveDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UsingDirective; }
// Friend for getUsingDirectiveName.
friend class DeclContext;
-
+
friend class ASTDeclReader;
};
@@ -2221,15 +2474,17 @@ public:
/// namespace Foo = Bar;
/// @endcode
class NamespaceAliasDecl : public NamedDecl {
+ virtual void anchor();
+
/// \brief The location of the "namespace" keyword.
SourceLocation NamespaceLoc;
/// IdentLoc - Location of namespace identifier. Accessed by TargetNameLoc.
SourceLocation IdentLoc;
-
+
/// \brief The nested-name-specifier that precedes the namespace.
NestedNameSpecifierLoc QualifierLoc;
-
+
/// Namespace - The Decl that this alias points to. Can either be a
/// NamespaceDecl or a NamespaceAliasDecl.
NamedDecl *Namespace;
@@ -2238,23 +2493,23 @@ class NamespaceAliasDecl : public NamedDecl {
SourceLocation AliasLoc, IdentifierInfo *Alias,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation IdentLoc, NamedDecl *Namespace)
- : NamedDecl(NamespaceAlias, DC, AliasLoc, Alias),
+ : NamedDecl(NamespaceAlias, DC, AliasLoc, Alias),
NamespaceLoc(NamespaceLoc), IdentLoc(IdentLoc),
QualifierLoc(QualifierLoc), Namespace(Namespace) { }
friend class ASTDeclReader;
-
+
public:
/// \brief Retrieve the nested-name-specifier that qualifies the
/// name of the namespace, with source-location information.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
+
/// \brief Retrieve the nested-name-specifier that qualifies the
/// name of the namespace.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
-
+
/// \brief Retrieve the namespace declaration aliased by this directive.
NamespaceDecl *getNamespace() {
if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(Namespace))
@@ -2282,17 +2537,19 @@ public:
NamedDecl *getAliasedNamespace() const { return Namespace; }
static NamespaceAliasDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation NamespaceLoc,
+ SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation IdentLoc,
NamedDecl *Namespace);
- virtual SourceRange getSourceRange() const {
+ static NamespaceAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(NamespaceLoc, IdentLoc);
}
-
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const NamespaceAliasDecl *D) { return true; }
static bool classofKind(Kind K) { return K == NamespaceAlias; }
@@ -2310,6 +2567,8 @@ public:
/// }
///
class UsingShadowDecl : public NamedDecl {
+ virtual void anchor();
+
/// The referenced declaration.
NamedDecl *Underlying;
@@ -2337,6 +2596,8 @@ public:
return new (C) UsingShadowDecl(DC, Loc, Using, Target);
}
+ static UsingShadowDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
/// \brief Gets the underlying declaration which has been brought into the
/// local scope.
NamedDecl *getTargetDecl() const { return Underlying; }
@@ -2369,6 +2630,8 @@ public:
/// UsingDecl - Represents a C++ using-declaration. For example:
/// using someNameSpace::someIdentifier;
class UsingDecl : public NamedDecl {
+ virtual void anchor();
+
/// \brief The source location of the "using" location itself.
SourceLocation UsingLocation;
@@ -2380,18 +2643,16 @@ class UsingDecl : public NamedDecl {
DeclarationNameLoc DNLoc;
/// \brief The first shadow declaration of the shadow decl chain associated
- /// with this using declaration.
- UsingShadowDecl *FirstUsingShadow;
+ /// with this using declaration. The bool member of the pair store whether
+ /// this decl has the 'typename' keyword.
+ llvm::PointerIntPair<UsingShadowDecl *, 1, bool> FirstUsingShadow;
- // \brief Has 'typename' keyword.
- bool IsTypeName;
-
- UsingDecl(DeclContext *DC, SourceLocation UL,
+ UsingDecl(DeclContext *DC, SourceLocation UL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, bool IsTypeNameArg)
: NamedDecl(Using, DC, NameInfo.getLoc(), NameInfo.getName()),
UsingLocation(UL), QualifierLoc(QualifierLoc),
- DNLoc(NameInfo.getInfo()), FirstUsingShadow(0),IsTypeName(IsTypeNameArg) {
+ DNLoc(NameInfo.getInfo()), FirstUsingShadow(0, IsTypeNameArg) {
}
public:
@@ -2406,8 +2667,8 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// \brief Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
DeclarationNameInfo getNameInfo() const {
@@ -2415,10 +2676,10 @@ public:
}
/// \brief Return true if the using declaration has 'typename'.
- bool isTypeName() const { return IsTypeName; }
+ bool isTypeName() const { return FirstUsingShadow.getInt(); }
/// \brief Sets whether the using declaration has 'typename'.
- void setTypeName(bool TN) { IsTypeName = TN; }
+ void setTypeName(bool TN) { FirstUsingShadow.setInt(TN); }
/// \brief Iterates through the using shadow declarations assosiated with
/// this using declaration.
@@ -2459,7 +2720,7 @@ public:
};
shadow_iterator shadow_begin() const {
- return shadow_iterator(FirstUsingShadow);
+ return shadow_iterator(FirstUsingShadow.getPointer());
}
shadow_iterator shadow_end() const { return shadow_iterator(); }
@@ -2478,7 +2739,9 @@ public:
const DeclarationNameInfo &NameInfo,
bool IsTypeNameArg);
- SourceRange getSourceRange() const {
+ static UsingDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(UsingLocation, getNameInfo().getEndLoc());
}
@@ -2499,6 +2762,8 @@ public:
/// using Base<T>::foo;
/// };
class UnresolvedUsingValueDecl : public ValueDecl {
+ virtual void anchor();
+
/// \brief The source location of the 'using' keyword
SourceLocation UsingLocation;
@@ -2510,7 +2775,7 @@ class UnresolvedUsingValueDecl : public ValueDecl {
DeclarationNameLoc DNLoc;
UnresolvedUsingValueDecl(DeclContext *DC, QualType Ty,
- SourceLocation UsingLoc,
+ SourceLocation UsingLoc,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: ValueDecl(UnresolvedUsingValue, DC,
@@ -2531,20 +2796,23 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// \brief Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
-
+
DeclarationNameInfo getNameInfo() const {
return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
}
static UnresolvedUsingValueDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation UsingLoc,
- NestedNameSpecifierLoc QualifierLoc,
+ NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo);
- SourceRange getSourceRange() const {
+ static UnresolvedUsingValueDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(UsingLocation, getNameInfo().getEndLoc());
}
@@ -2566,6 +2834,8 @@ public:
/// The type associated with a unresolved using typename decl is
/// currently always a typename type.
class UnresolvedUsingTypenameDecl : public TypeDecl {
+ virtual void anchor();
+
/// \brief The source location of the 'using' keyword
SourceLocation UsingLocation;
@@ -2578,14 +2848,14 @@ class UnresolvedUsingTypenameDecl : public TypeDecl {
UnresolvedUsingTypenameDecl(DeclContext *DC, SourceLocation UsingLoc,
SourceLocation TypenameLoc,
NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TargetNameLoc,
+ SourceLocation TargetNameLoc,
IdentifierInfo *TargetName)
: TypeDecl(UnresolvedUsingTypename, DC, TargetNameLoc, TargetName,
UsingLoc),
TypenameLocation(TypenameLoc), QualifierLoc(QualifierLoc) { }
friend class ASTDeclReader;
-
+
public:
/// \brief Returns the source location of the 'using' keyword.
SourceLocation getUsingLoc() const { return getLocStart(); }
@@ -2598,8 +2868,8 @@ public:
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
/// \brief Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
static UnresolvedUsingTypenameDecl *
@@ -2607,6 +2877,9 @@ public:
SourceLocation TypenameLoc, NestedNameSpecifierLoc QualifierLoc,
SourceLocation TargetNameLoc, DeclarationName TargetName);
+ static UnresolvedUsingTypenameDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UnresolvedUsingTypenameDecl *D) { return true; }
static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
@@ -2614,6 +2887,7 @@ public:
/// StaticAssertDecl - Represents a C++0x static_assert declaration.
class StaticAssertDecl : public Decl {
+ virtual void anchor();
Expr *AssertExpr;
StringLiteral *Message;
SourceLocation RParenLoc;
@@ -2629,7 +2903,8 @@ public:
SourceLocation StaticAssertLoc,
Expr *AssertExpr, StringLiteral *Message,
SourceLocation RParenLoc);
-
+ static StaticAssertDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
Expr *getAssertExpr() { return AssertExpr; }
const Expr *getAssertExpr() const { return AssertExpr; }
@@ -2639,7 +2914,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLocation(), getRParenLoc());
}
@@ -2655,6 +2930,9 @@ public:
const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
AccessSpecifier AS);
+const PartialDiagnostic &operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS);
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
index b84e5bb..ba1eb8d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_AST_DECLFRIEND_H
#include "clang/AST/DeclCXX.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -35,6 +36,7 @@ namespace clang {
///
/// The semantic context of a friend decl is its declaring class.
class FriendDecl : public Decl {
+ virtual void anchor();
public:
typedef llvm::PointerUnion<NamedDecl*,TypeSourceInfo*> FriendUnion;
@@ -77,7 +79,7 @@ public:
static FriendDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, FriendUnion Friend_,
SourceLocation FriendL);
- static FriendDecl *Create(ASTContext &C, EmptyShell Empty);
+ static FriendDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// If this friend declaration names an (untemplated but possibly
/// dependent) type, return the type; otherwise return null. This
@@ -99,7 +101,7 @@ public:
}
/// Retrieves the source range for the friend declaration.
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
/* FIXME: consider the case of templates wrt start of range. */
if (NamedDecl *ND = getFriendDecl())
return SourceRange(getFriendLoc(), ND->getLocEnd());
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
new file mode 100644
index 0000000..66d190f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
@@ -0,0 +1,84 @@
+//===-- DeclLookups.h - Low-level interface to all names in a DC-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DeclContext::all_lookups_iterator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLLOOKUPS_H
+#define LLVM_CLANG_AST_DECLLOOKUPS_H
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclarationName.h"
+
+namespace clang {
+
+/// all_lookups_iterator - An iterator that provides a view over the results
+/// of looking up every possible name.
+class DeclContext::all_lookups_iterator {
+ StoredDeclsMap::iterator It, End;
+public:
+ typedef lookup_result value_type;
+ typedef lookup_result reference;
+ typedef lookup_result pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ all_lookups_iterator() {}
+ all_lookups_iterator(StoredDeclsMap::iterator It,
+ StoredDeclsMap::iterator End)
+ : It(It), End(End) {}
+
+ reference operator*() const { return It->second.getLookupResult(); }
+ pointer operator->() const { return It->second.getLookupResult(); }
+
+ all_lookups_iterator& operator++() {
+ // Filter out using directives. They don't belong as results from name
+ // lookup anyways, except as an implementation detail. Users of the API
+ // should not expect to get them (or worse, rely on it).
+ do {
+ ++It;
+ } while (It != End &&
+ It->first == DeclarationName::getUsingDirectiveName());
+
+ return *this;
+ }
+
+ all_lookups_iterator operator++(int) {
+ all_lookups_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(all_lookups_iterator x, all_lookups_iterator y) {
+ return x.It == y.It;
+ }
+ friend bool operator!=(all_lookups_iterator x, all_lookups_iterator y) {
+ return x.It != y.It;
+ }
+};
+
+DeclContext::all_lookups_iterator DeclContext::lookups_begin() const {
+ DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
+ if (StoredDeclsMap *Map = Primary->buildLookup())
+ return all_lookups_iterator(Map->begin(), Map->end());
+ return all_lookups_iterator();
+}
+
+DeclContext::all_lookups_iterator DeclContext::lookups_end() const {
+ DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
+ if (StoredDeclsMap *Map = Primary->buildLookup())
+ return all_lookups_iterator(Map->end(), Map->end());
+ return all_lookups_iterator();
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
index 425c89d..4ae073e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/SelectorLocationsKind.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class Expr;
@@ -84,7 +85,7 @@ public:
loc_iterator loc_begin() const { return Locations; }
loc_iterator loc_end() const { return Locations + size(); }
- void set(ObjCProtocolDecl* const* InList, unsigned Elts,
+ void set(ObjCProtocolDecl* const* InList, unsigned Elts,
const SourceLocation *Locs, ASTContext &Ctx);
};
@@ -124,7 +125,7 @@ private:
// Synthesized declaration method for a property setter/getter
unsigned IsSynthesized : 1;
-
+
// Method has a definition.
unsigned IsDefined : 1;
@@ -144,14 +145,14 @@ private:
/// \brief Indicates whether this method has a related result type.
unsigned RelatedResultType : 1;
-
+
/// \brief Whether the locations of the selector identifiers are in a
/// "standard" position, a enum SelectorLocationsKind.
unsigned SelLocsKind : 2;
// Result type of this method.
QualType MethodDeclType;
-
+
// Type source information for the result type.
TypeSourceInfo *ResultTInfo;
@@ -246,7 +247,7 @@ public:
SourceLocation beginLoc,
SourceLocation endLoc,
Selector SelInfo,
- QualType T,
+ QualType T,
TypeSourceInfo *ResultTInfo,
DeclContext *contextDecl,
bool isInstance = true,
@@ -257,6 +258,8 @@ public:
ImplementationControl impControl = None,
bool HasRelatedResultType = false);
+ static ObjCMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
virtual ObjCMethodDecl *getCanonicalDecl();
const ObjCMethodDecl *getCanonicalDecl() const {
return const_cast<ObjCMethodDecl*>(this)->getCanonicalDecl();
@@ -270,23 +273,27 @@ public:
/// \brief Determine whether this method has a result type that is related
/// to the message receiver's type.
bool hasRelatedResultType() const { return RelatedResultType; }
-
+
/// \brief Note whether this method has a related result type.
void SetRelatedResultType(bool RRT = true) { RelatedResultType = RRT; }
/// \brief True if this is a method redeclaration in the same interface.
bool isRedeclaration() const { return IsRedeclaration; }
void setAsRedeclaration(const ObjCMethodDecl *PrevMethod);
-
+
// Location information, modeled after the Stmt API.
- SourceLocation getLocStart() const { return getLocation(); }
- SourceLocation getLocEnd() const { return EndLoc; }
+ SourceLocation getLocStart() const LLVM_READONLY { return getLocation(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
void setEndLoc(SourceLocation Loc) { EndLoc = Loc; }
- virtual SourceRange getSourceRange() const {
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLocation(), EndLoc);
}
- SourceLocation getSelectorStartLoc() const { return getSelectorLoc(0); }
+ SourceLocation getSelectorStartLoc() const {
+ if (isImplicit())
+ return getLocStart();
+ return getSelectorLoc(0);
+ }
SourceLocation getSelectorLoc(unsigned Index) const {
assert(Index < getNumSelectorLocs() && "Index out of range!");
if (hasStandardSelLocs())
@@ -319,12 +326,12 @@ public:
QualType getResultType() const { return MethodDeclType; }
void setResultType(QualType T) { MethodDeclType = T; }
- /// \brief Determine the type of an expression that sends a message to this
+ /// \brief Determine the type of an expression that sends a message to this
/// function.
QualType getSendResultType() const {
return getResultType().getNonLValueExprType(getASTContext());
}
-
+
TypeSourceInfo *getResultTypeSourceInfo() const { return ResultTInfo; }
void setResultTypeSourceInfo(TypeSourceInfo *TInfo) { ResultTInfo = TInfo; }
@@ -338,8 +345,8 @@ public:
param_iterator param_end() { return getParams() + NumParams; }
// This method returns and of the parameters which are part of the selector
// name mangling requirements.
- param_const_iterator sel_param_end() const {
- return param_begin() + getSelector().getNumArgs();
+ param_const_iterator sel_param_end() const {
+ return param_begin() + getSelector().getNumArgs();
}
/// \brief Sets the method's parameters and selector source locations.
@@ -385,7 +392,7 @@ public:
bool isSynthesized() const { return IsSynthesized; }
void setSynthesized(bool isSynth) { IsSynthesized = isSynth; }
-
+
bool isDefined() const { return IsDefined; }
void setDefined(bool isDefined) { IsDefined = isDefined; }
@@ -426,6 +433,8 @@ public:
/// ObjCProtocolDecl, and ObjCImplDecl.
///
class ObjCContainerDecl : public NamedDecl, public DeclContext {
+ virtual void anchor();
+
SourceLocation AtStart;
// These two locations in the range mark the end of the method container.
@@ -499,7 +508,7 @@ public:
AtEnd = atEnd;
}
- virtual SourceRange getSourceRange() const {
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtStart, getAtEndRange().getEnd());
}
@@ -540,64 +549,113 @@ public:
/// Unlike C++, ObjC is a single-rooted class model. In Cocoa, classes
/// typically inherit from NSObject (an exception is NSProxy).
///
-class ObjCInterfaceDecl : public ObjCContainerDecl {
+class ObjCInterfaceDecl : public ObjCContainerDecl
+ , public Redeclarable<ObjCInterfaceDecl> {
+ virtual void anchor();
+
/// TypeForDecl - This indicates the Type object that represents this
/// TypeDecl. It is a cache maintained by ASTContext::getObjCInterfaceType
mutable const Type *TypeForDecl;
friend class ASTContext;
+
+ struct DefinitionData {
+ /// \brief The definition of this class, for quick access from any
+ /// declaration.
+ ObjCInterfaceDecl *Definition;
+
+ /// Class's super class.
+ ObjCInterfaceDecl *SuperClass;
- /// Class's super class.
- ObjCInterfaceDecl *SuperClass;
+ /// Protocols referenced in the @interface declaration
+ ObjCProtocolList ReferencedProtocols;
- /// Protocols referenced in the @interface declaration
- ObjCProtocolList ReferencedProtocols;
-
- /// Protocols reference in both the @interface and class extensions.
- ObjCList<ObjCProtocolDecl> AllReferencedProtocols;
+ /// Protocols reference in both the @interface and class extensions.
+ ObjCList<ObjCProtocolDecl> AllReferencedProtocols;
- /// \brief List of categories and class extensions defined for this class.
- ///
- /// Categories are stored as a linked list in the AST, since the categories
- /// and class extensions come long after the initial interface declaration,
- /// and we avoid dynamically-resized arrays in the AST wherever possible.
- ObjCCategoryDecl *CategoryList;
-
- /// IvarList - List of all ivars defined by this class; including class
- /// extensions and implementation. This list is built lazily.
- ObjCIvarDecl *IvarList;
+ /// \brief List of categories and class extensions defined for this class.
+ ///
+ /// Categories are stored as a linked list in the AST, since the categories
+ /// and class extensions come long after the initial interface declaration,
+ /// and we avoid dynamically-resized arrays in the AST wherever possible.
+ ObjCCategoryDecl *CategoryList;
- bool ForwardDecl:1; // declared with @class.
- bool InternalInterface:1; // true - no @interface for @implementation
-
- /// \brief Indicates that the contents of this Objective-C class will be
- /// completed by the external AST source when required.
- mutable bool ExternallyCompleted : 1;
-
- SourceLocation SuperClassLoc; // location of the super class identifier.
- SourceLocation EndLoc; // marks the '>', '}', or identifier.
+ /// IvarList - List of all ivars defined by this class; including class
+ /// extensions and implementation. This list is built lazily.
+ ObjCIvarDecl *IvarList;
+
+ /// \brief Indicates that the contents of this Objective-C class will be
+ /// completed by the external AST source when required.
+ mutable bool ExternallyCompleted : 1;
+
+ /// \brief The location of the superclass, if any.
+ SourceLocation SuperClassLoc;
+
+ /// \brief The location of the last location in this declaration, before
+ /// the properties/methods. For example, this will be the '>', '}', or
+ /// identifier,
+ SourceLocation EndLoc;
+
+ DefinitionData() : Definition(), SuperClass(), CategoryList(), IvarList(),
+ ExternallyCompleted() { }
+ };
ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
- SourceLocation CLoc, bool FD, bool isInternal);
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl,
+ bool isInternal);
void LoadExternalDefinition() const;
+
+ /// \brief Contains a pointer to the data associated with this class,
+ /// which will be NULL if this class has not yet been defined.
+ DefinitionData *Data;
+
+ DefinitionData &data() const {
+ assert(Data != 0 && "Declaration has no definition!");
+ return *Data;
+ }
+
+ /// \brief Allocate the definition data for this class.
+ void allocateDefinitionData();
+
+ typedef Redeclarable<ObjCInterfaceDecl> redeclarable_base;
+ virtual ObjCInterfaceDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual ObjCInterfaceDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual ObjCInterfaceDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
public:
- static ObjCInterfaceDecl *Create(ASTContext &C, DeclContext *DC,
+ static ObjCInterfaceDecl *Create(const ASTContext &C, DeclContext *DC,
SourceLocation atLoc,
IdentifierInfo *Id,
+ ObjCInterfaceDecl *PrevDecl,
SourceLocation ClassLoc = SourceLocation(),
- bool ForwardDecl = false,
bool isInternal = false);
-
+
+ static ObjCInterfaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ if (isThisDeclarationADefinition())
+ return ObjCContainerDecl::getSourceRange();
+
+ return SourceRange(getAtStartLoc(), getLocation());
+ }
+
/// \brief Indicate that this Objective-C class is complete, but that
/// the external AST source will be responsible for filling in its contents
/// when a complete class is required.
void setExternallyCompleted();
-
+
const ObjCProtocolList &getReferencedProtocols() const {
- if (ExternallyCompleted)
+ assert(hasDefinition() && "Caller did not check for forward reference!");
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
-
- return ReferencedProtocols;
+
+ return data().ReferencedProtocols;
}
ObjCImplementationDecl *getImplementation() const;
@@ -614,108 +672,182 @@ public:
}
typedef ObjCProtocolList::iterator protocol_iterator;
-
+
protocol_iterator protocol_begin() const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return ReferencedProtocols.begin();
+ return data().ReferencedProtocols.begin();
}
protocol_iterator protocol_end() const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return ReferencedProtocols.end();
+ return data().ReferencedProtocols.end();
}
typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
- protocol_loc_iterator protocol_loc_begin() const {
- if (ExternallyCompleted)
+ protocol_loc_iterator protocol_loc_begin() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return ReferencedProtocols.loc_begin();
+ return data().ReferencedProtocols.loc_begin();
}
- protocol_loc_iterator protocol_loc_end() const {
- if (ExternallyCompleted)
+ protocol_loc_iterator protocol_loc_end() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return ReferencedProtocols.loc_end();
+ return data().ReferencedProtocols.loc_end();
}
-
+
typedef ObjCList<ObjCProtocolDecl>::iterator all_protocol_iterator;
-
+
all_protocol_iterator all_referenced_protocol_begin() const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return all_protocol_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return AllReferencedProtocols.empty() ? protocol_begin()
- : AllReferencedProtocols.begin();
+ return data().AllReferencedProtocols.empty()
+ ? protocol_begin()
+ : data().AllReferencedProtocols.begin();
}
all_protocol_iterator all_referenced_protocol_end() const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return all_protocol_iterator();
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return AllReferencedProtocols.empty() ? protocol_end()
- : AllReferencedProtocols.end();
+ return data().AllReferencedProtocols.empty()
+ ? protocol_end()
+ : data().AllReferencedProtocols.end();
}
typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
- ivar_iterator ivar_begin() const { return ivar_iterator(decls_begin()); }
- ivar_iterator ivar_end() const { return ivar_iterator(decls_end()); }
+ ivar_iterator ivar_begin() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition())
+ return ivar_iterator(Def->decls_begin());
+
+ // FIXME: Should make sure no callers ever do this.
+ return ivar_iterator();
+ }
+ ivar_iterator ivar_end() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition())
+ return ivar_iterator(Def->decls_end());
+
+ // FIXME: Should make sure no callers ever do this.
+ return ivar_iterator();
+ }
unsigned ivar_size() const {
return std::distance(ivar_begin(), ivar_end());
}
-
+
bool ivar_empty() const { return ivar_begin() == ivar_end(); }
-
+
ObjCIvarDecl *all_declared_ivar_begin();
const ObjCIvarDecl *all_declared_ivar_begin() const {
// Even though this modifies IvarList, it's conceptually const:
// the ivar chain is essentially a cached property of ObjCInterfaceDecl.
return const_cast<ObjCInterfaceDecl *>(this)->all_declared_ivar_begin();
}
- void setIvarList(ObjCIvarDecl *ivar) { IvarList = ivar; }
-
+ void setIvarList(ObjCIvarDecl *ivar) { data().IvarList = ivar; }
+
/// setProtocolList - Set the list of protocols that this interface
/// implements.
void setProtocolList(ObjCProtocolDecl *const* List, unsigned Num,
const SourceLocation *Locs, ASTContext &C) {
- ReferencedProtocols.set(List, Num, Locs, C);
+ data().ReferencedProtocols.set(List, Num, Locs, C);
}
/// mergeClassExtensionProtocolList - Merge class extension's protocol list
/// into the protocol list for this class.
- void mergeClassExtensionProtocolList(ObjCProtocolDecl *const* List,
+ void mergeClassExtensionProtocolList(ObjCProtocolDecl *const* List,
unsigned Num,
ASTContext &C);
- bool isForwardDecl() const { return ForwardDecl; }
- void setForwardDecl(bool val) { ForwardDecl = val; }
+ /// \brief Determine whether this particular declaration of this class is
+ /// actually also a definition.
+ bool isThisDeclarationADefinition() const {
+ return Data && Data->Definition == this;
+ }
+
+ /// \brief Determine whether this class has been defined.
+ bool hasDefinition() const { return Data; }
+
+ /// \brief Retrieve the definition of this class, or NULL if this class
+ /// has been forward-declared (with @class) but not yet defined (with
+ /// @interface).
+ ObjCInterfaceDecl *getDefinition() {
+ return hasDefinition()? Data->Definition : 0;
+ }
+
+ /// \brief Retrieve the definition of this class, or NULL if this class
+ /// has been forward-declared (with @class) but not yet defined (with
+ /// @interface).
+ const ObjCInterfaceDecl *getDefinition() const {
+ return hasDefinition()? Data->Definition : 0;
+ }
- ObjCInterfaceDecl *getSuperClass() const {
- if (ExternallyCompleted)
+ /// \brief Starts the definition of this Objective-C class, taking it from
+ /// a forward declaration (@class) to a definition (@interface).
+ void startDefinition();
+
+ ObjCInterfaceDecl *getSuperClass() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return SuperClass;
+ return data().SuperClass;
+ }
+
+ void setSuperClass(ObjCInterfaceDecl * superCls) {
+ data().SuperClass =
+ (superCls && superCls->hasDefinition()) ? superCls->getDefinition()
+ : superCls;
}
-
- void setSuperClass(ObjCInterfaceDecl * superCls) { SuperClass = superCls; }
- ObjCCategoryDecl* getCategoryList() const {
- if (ExternallyCompleted)
+ ObjCCategoryDecl* getCategoryList() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- return CategoryList;
+ return data().CategoryList;
}
-
+
void setCategoryList(ObjCCategoryDecl *category) {
- CategoryList = category;
+ data().CategoryList = category;
}
-
+
ObjCCategoryDecl* getFirstClassExtension() const;
ObjCPropertyDecl
@@ -726,8 +858,9 @@ public:
bool isSuperClassOf(const ObjCInterfaceDecl *I) const {
// If RHS is derived from LHS it is OK; else it is not OK.
while (I != NULL) {
- if (this == I)
+ if (declaresSameEntity(this, I))
return true;
+
I = I->getSuperClass();
}
return false;
@@ -742,7 +875,20 @@ public:
return true;
Class = Class->getSuperClass();
}
- return false;
+ return false;
+ }
+
+ /// isObjCRequiresPropertyDefs - Checks that a class or one of its super
+ /// classes must not be auto-synthesized. Returns class decl. if it must not be;
+ /// 0, otherwise.
+ const ObjCInterfaceDecl *isObjCRequiresPropertyDefs() const {
+ const ObjCInterfaceDecl *Class = this;
+ while (Class) {
+ if (Class->hasAttr<ObjCRequiresPropertyDefsAttr>())
+ return Class;
+ Class = Class->getSuperClass();
+ }
+ return 0;
}
ObjCIvarDecl *lookupInstanceVariable(IdentifierInfo *IVarName,
@@ -754,31 +900,39 @@ public:
// Lookup a method. First, we search locally. If a method isn't
// found, we search referenced protocols and class categories.
- ObjCMethodDecl *lookupMethod(Selector Sel, bool isInstance) const;
- ObjCMethodDecl *lookupInstanceMethod(Selector Sel) const {
- return lookupMethod(Sel, true/*isInstance*/);
+ ObjCMethodDecl *lookupMethod(Selector Sel, bool isInstance,
+ bool shallowCategoryLookup= false) const;
+ ObjCMethodDecl *lookupInstanceMethod(Selector Sel,
+ bool shallowCategoryLookup = false) const {
+ return lookupMethod(Sel, true/*isInstance*/, shallowCategoryLookup);
}
- ObjCMethodDecl *lookupClassMethod(Selector Sel) const {
- return lookupMethod(Sel, false/*isInstance*/);
+ ObjCMethodDecl *lookupClassMethod(Selector Sel,
+ bool shallowCategoryLookup = false) const {
+ return lookupMethod(Sel, false/*isInstance*/, shallowCategoryLookup);
}
ObjCInterfaceDecl *lookupInheritedClass(const IdentifierInfo *ICName);
-
+
// Lookup a method in the classes implementation hierarchy.
ObjCMethodDecl *lookupPrivateMethod(const Selector &Sel, bool Instance=true);
- // Location information, modeled after the Stmt API.
- SourceLocation getLocStart() const { return getAtStartLoc(); } // '@'interface
- SourceLocation getLocEnd() const { return EndLoc; }
- void setLocEnd(SourceLocation LE) { EndLoc = LE; }
+ SourceLocation getEndOfDefinitionLoc() const {
+ if (!hasDefinition())
+ return getLocation();
+
+ return data().EndLoc;
+ }
+
+ void setEndOfDefinitionLoc(SourceLocation LE) { data().EndLoc = LE; }
- void setSuperClassLoc(SourceLocation Loc) { SuperClassLoc = Loc; }
- SourceLocation getSuperClassLoc() const { return SuperClassLoc; }
+ void setSuperClassLoc(SourceLocation Loc) { data().SuperClassLoc = Loc; }
+ SourceLocation getSuperClassLoc() const { return data().SuperClassLoc; }
/// isImplicitInterfaceDecl - check that this is an implicitly declared
/// ObjCInterfaceDecl node. This is for legacy objective-c @implementation
/// declaration without an @interface declaration.
- bool isImplicitInterfaceDecl() const { return InternalInterface; }
- void setImplicitInterfaceDecl(bool val) { InternalInterface = val; }
+ bool isImplicitInterfaceDecl() const {
+ return hasDefinition() ? Data->Definition->isImplicit() : isImplicit();
+ }
/// ClassImplementsProtocol - Checks that 'lProto' protocol
/// has been implemented in IDecl class, its super class or categories (if
@@ -787,6 +941,20 @@ public:
bool lookupCategory,
bool RHSIsQualifiedID = false);
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ /// Retrieves the canonical declaration of this Objective-C class.
+ ObjCInterfaceDecl *getCanonicalDecl() {
+ return getFirstDeclaration();
+ }
+ const ObjCInterfaceDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
+ }
+
// Low-level accessor
const Type *getTypeForDecl() const { return TypeForDecl; }
void setTypeForDecl(const Type *TD) const { TypeForDecl = TD; }
@@ -795,6 +963,7 @@ public:
static bool classof(const ObjCInterfaceDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCInterface; }
+ friend class ASTReader;
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
@@ -815,6 +984,8 @@ public:
/// }
///
class ObjCIvarDecl : public FieldDecl {
+ virtual void anchor();
+
public:
enum AccessControl {
None, Private, Protected, Public, Package
@@ -837,12 +1008,14 @@ public:
AccessControl ac, Expr *BW = NULL,
bool synthesized=false);
+ static ObjCIvarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
/// \brief Return the class interface that this ivar is logically contained
/// in; this is either the interface where the ivar was declared, or the
/// interface the ivar is conceptually a part of in the case of synthesized
/// ivars.
const ObjCInterfaceDecl *getContainingInterface() const;
-
+
ObjCIvarDecl *getNextIvar() { return NextIvar; }
const ObjCIvarDecl *getNextIvar() const { return NextIvar; }
void setNextIvar(ObjCIvarDecl *ivar) { NextIvar = ivar; }
@@ -857,16 +1030,16 @@ public:
void setSynthesize(bool synth) { Synthesized = synth; }
bool getSynthesize() const { return Synthesized; }
-
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCIvarDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCIvar; }
private:
- /// NextIvar - Next Ivar in the list of ivars declared in class; class's
+ /// NextIvar - Next Ivar in the list of ivars declared in class; class's
/// extensions and class's implementation
ObjCIvarDecl *NextIvar;
-
+
// NOTE: VC++ treats enums as signed, avoid using the AccessControl enum
unsigned DeclAccess : 3;
unsigned Synthesized : 1;
@@ -876,7 +1049,7 @@ private:
/// ObjCAtDefsFieldDecl - Represents a field declaration created by an
/// @defs(...).
class ObjCAtDefsFieldDecl : public FieldDecl {
-private:
+ virtual void anchor();
ObjCAtDefsFieldDecl(DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, Expr *BW)
@@ -890,6 +1063,8 @@ public:
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, Expr *BW);
+ static ObjCAtDefsFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCAtDefsFieldDecl *D) { return true; }
@@ -920,46 +1095,94 @@ public:
///
/// id <NSDraggingInfo> anyObjectThatImplementsNSDraggingInfo;
///
-class ObjCProtocolDecl : public ObjCContainerDecl {
- /// Referenced protocols
- ObjCProtocolList ReferencedProtocols;
+class ObjCProtocolDecl : public ObjCContainerDecl,
+ public Redeclarable<ObjCProtocolDecl> {
+ virtual void anchor();
- bool isForwardProtoDecl; // declared with @protocol.
+ struct DefinitionData {
+ // \brief The declaration that defines this protocol.
+ ObjCProtocolDecl *Definition;
- SourceLocation EndLoc; // marks the '>' or identifier.
+ /// \brief Referenced protocols
+ ObjCProtocolList ReferencedProtocols;
+ };
+
+ DefinitionData *Data;
- ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
- SourceLocation nameLoc, SourceLocation atStartLoc)
- : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc),
- isForwardProtoDecl(true) {
+ DefinitionData &data() const {
+ assert(Data && "Objective-C protocol has no definition!");
+ return *Data;
}
+
+ ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl);
+ void allocateDefinitionData();
+
+ typedef Redeclarable<ObjCProtocolDecl> redeclarable_base;
+ virtual ObjCProtocolDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual ObjCProtocolDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual ObjCProtocolDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
public:
static ObjCProtocolDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
SourceLocation nameLoc,
- SourceLocation atStartLoc);
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl);
+ static ObjCProtocolDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
const ObjCProtocolList &getReferencedProtocols() const {
- return ReferencedProtocols;
+ assert(hasDefinition() && "No definition available!");
+ return data().ReferencedProtocols;
}
typedef ObjCProtocolList::iterator protocol_iterator;
- protocol_iterator protocol_begin() const {return ReferencedProtocols.begin();}
- protocol_iterator protocol_end() const { return ReferencedProtocols.end(); }
+ protocol_iterator protocol_begin() const {
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ return data().ReferencedProtocols.begin();
+ }
+ protocol_iterator protocol_end() const {
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ return data().ReferencedProtocols.end();
+ }
typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
- protocol_loc_iterator protocol_loc_begin() const {
- return ReferencedProtocols.loc_begin();
+ protocol_loc_iterator protocol_loc_begin() const {
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ return data().ReferencedProtocols.loc_begin();
+ }
+ protocol_loc_iterator protocol_loc_end() const {
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ return data().ReferencedProtocols.loc_end();
}
- protocol_loc_iterator protocol_loc_end() const {
- return ReferencedProtocols.loc_end();
+ unsigned protocol_size() const {
+ if (!hasDefinition())
+ return 0;
+
+ return data().ReferencedProtocols.size();
}
- unsigned protocol_size() const { return ReferencedProtocols.size(); }
/// setProtocolList - Set the list of protocols that this interface
/// implements.
void setProtocolList(ObjCProtocolDecl *const*List, unsigned Num,
const SourceLocation *Locs, ASTContext &C) {
- ReferencedProtocols.set(List, Num, Locs, C);
+ assert(Data && "Protocol is not defined");
+ data().ReferencedProtocols.set(List, Num, Locs, C);
}
ObjCProtocolDecl *lookupProtocolNamed(IdentifierInfo *PName);
@@ -973,102 +1196,57 @@ public:
ObjCMethodDecl *lookupClassMethod(Selector Sel) const {
return lookupMethod(Sel, false/*isInstance*/);
}
-
- bool isForwardDecl() const { return isForwardProtoDecl; }
- void setForwardDecl(bool val) { isForwardProtoDecl = val; }
- // Location information, modeled after the Stmt API.
- SourceLocation getLocStart() const { return getAtStartLoc(); } // '@'protocol
- SourceLocation getLocEnd() const { return EndLoc; }
- void setLocEnd(SourceLocation LE) { EndLoc = LE; }
+ /// \brief Determine whether this protocol has a definition.
+ bool hasDefinition() const { return Data != 0; }
- static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCProtocolDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == ObjCProtocol; }
-};
+ /// \brief Retrieve the definition of this protocol, if any.
+ ObjCProtocolDecl *getDefinition() {
+ return Data? Data->Definition : 0;
+ }
-/// ObjCClassDecl - Specifies a list of forward class declarations. For example:
-///
-/// @class NSCursor, NSImage, NSPasteboard, NSWindow;
-///
-class ObjCClassDecl : public Decl {
-public:
- class ObjCClassRef {
- ObjCInterfaceDecl *ID;
- SourceLocation L;
- public:
- ObjCClassRef(ObjCInterfaceDecl *d, SourceLocation l) : ID(d), L(l) {}
- SourceLocation getLocation() const { return L; }
- ObjCInterfaceDecl *getInterface() const { return ID; }
- };
-private:
- ObjCClassRef *ForwardDecl;
+ /// \brief Retrieve the definition of this protocol, if any.
+ const ObjCProtocolDecl *getDefinition() const {
+ return Data? Data->Definition : 0;
+ }
- ObjCClassDecl(DeclContext *DC, SourceLocation L,
- ObjCInterfaceDecl *const Elt, const SourceLocation Loc,
- ASTContext &C);
-public:
- static ObjCClassDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L,
- ObjCInterfaceDecl *const Elt = 0,
- const SourceLocation Locs = SourceLocation());
-
- ObjCInterfaceDecl *getForwardInterfaceDecl() { return ForwardDecl->getInterface(); }
- ObjCClassRef *getForwardDecl() { return ForwardDecl; }
- void setClass(ASTContext &C, ObjCInterfaceDecl*const Cls,
- const SourceLocation Locs);
+ /// \brief Determine whether this particular declaration is also the
+ /// definition.
+ bool isThisDeclarationADefinition() const {
+ return getDefinition() == this;
+ }
- virtual SourceRange getSourceRange() const;
-
- static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCClassDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == ObjCClass; }
-};
-
-/// ObjCForwardProtocolDecl - Specifies a list of forward protocol declarations.
-/// For example:
-///
-/// @protocol NSTextInput, NSChangeSpelling, NSDraggingInfo;
-///
-class ObjCForwardProtocolDecl : public Decl {
- ObjCProtocolList ReferencedProtocols;
-
- ObjCForwardProtocolDecl(DeclContext *DC, SourceLocation L,
- ObjCProtocolDecl *const *Elts, unsigned nElts,
- const SourceLocation *Locs, ASTContext &C);
-
-public:
- static ObjCForwardProtocolDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- ObjCProtocolDecl *const *Elts,
- unsigned Num,
- const SourceLocation *Locs);
+ /// \brief Starts the definition of this Objective-C protocol.
+ void startDefinition();
- static ObjCForwardProtocolDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation L) {
- return Create(C, DC, L, 0, 0, 0);
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ if (isThisDeclarationADefinition())
+ return ObjCContainerDecl::getSourceRange();
+
+ return SourceRange(getAtStartLoc(), getLocation());
}
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
- typedef ObjCProtocolList::iterator protocol_iterator;
- protocol_iterator protocol_begin() const {return ReferencedProtocols.begin();}
- protocol_iterator protocol_end() const { return ReferencedProtocols.end(); }
- typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
- protocol_loc_iterator protocol_loc_begin() const {
- return ReferencedProtocols.loc_begin();
+ /// Retrieves the canonical declaration of this Objective-C protocol.
+ ObjCProtocolDecl *getCanonicalDecl() {
+ return getFirstDeclaration();
}
- protocol_loc_iterator protocol_loc_end() const {
- return ReferencedProtocols.loc_end();
+ const ObjCProtocolDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
}
- unsigned protocol_size() const { return ReferencedProtocols.size(); }
-
- /// setProtocolList - Set the list of forward protocols.
- void setProtocolList(ObjCProtocolDecl *const*List, unsigned Num,
- const SourceLocation *Locs, ASTContext &C) {
- ReferencedProtocols.set(List, Num, Locs, C);
- }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classof(const ObjCForwardProtocolDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == ObjCForwardProtocol; }
+ static bool classof(const ObjCProtocolDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCProtocol; }
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
/// ObjCCategoryDecl - Represents a category declaration. A category allows
@@ -1089,6 +1267,8 @@ public:
/// don't support this level of dynamism, which is both powerful and dangerous.
///
class ObjCCategoryDecl : public ObjCContainerDecl {
+ virtual void anchor();
+
/// Interface belonging to this category
ObjCInterfaceDecl *ClassInterface;
@@ -1105,22 +1285,31 @@ class ObjCCategoryDecl : public ObjCContainerDecl {
/// \brief The location of the category name in this declaration.
SourceLocation CategoryNameLoc;
- ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
+ /// class extension may have private ivars.
+ SourceLocation IvarLBraceLoc;
+ SourceLocation IvarRBraceLoc;
+
+ ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc,
- IdentifierInfo *Id, ObjCInterfaceDecl *IDecl)
+ IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation())
: ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
ClassInterface(IDecl), NextClassCategory(0), HasSynthBitfield(false),
- CategoryNameLoc(CategoryNameLoc) {
+ CategoryNameLoc(CategoryNameLoc),
+ IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) {
}
public:
static ObjCCategoryDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation AtLoc,
+ SourceLocation AtLoc,
SourceLocation ClassNameLoc,
SourceLocation CategoryNameLoc,
IdentifierInfo *Id,
- ObjCInterfaceDecl *IDecl);
- static ObjCCategoryDecl *Create(ASTContext &C, EmptyShell Empty);
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation());
+ static ObjCCategoryDecl *CreateDeserialized(ASTContext &C, unsigned ID);
ObjCInterfaceDecl *getClassInterface() { return ClassInterface; }
const ObjCInterfaceDecl *getClassInterface() const { return ClassInterface; }
@@ -1144,21 +1333,21 @@ public:
protocol_iterator protocol_end() const { return ReferencedProtocols.end(); }
unsigned protocol_size() const { return ReferencedProtocols.size(); }
typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
- protocol_loc_iterator protocol_loc_begin() const {
- return ReferencedProtocols.loc_begin();
+ protocol_loc_iterator protocol_loc_begin() const {
+ return ReferencedProtocols.loc_begin();
}
- protocol_loc_iterator protocol_loc_end() const {
- return ReferencedProtocols.loc_end();
+ protocol_loc_iterator protocol_loc_end() const {
+ return ReferencedProtocols.loc_end();
}
ObjCCategoryDecl *getNextClassCategory() const { return NextClassCategory; }
bool IsClassExtension() const { return getIdentifier() == 0; }
const ObjCCategoryDecl *getNextClassExtension() const;
-
+
bool hasSynthBitfield() const { return HasSynthBitfield; }
void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
-
+
typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
ivar_iterator ivar_begin() const {
return ivar_iterator(decls_begin());
@@ -1175,6 +1364,11 @@ public:
SourceLocation getCategoryNameLoc() const { return CategoryNameLoc; }
void setCategoryNameLoc(SourceLocation Loc) { CategoryNameLoc = Loc; }
+
+ void setIvarLBraceLoc(SourceLocation Loc) { IvarLBraceLoc = Loc; }
+ SourceLocation getIvarLBraceLoc() const { return IvarLBraceLoc; }
+ void setIvarRBraceLoc(SourceLocation Loc) { IvarRBraceLoc = Loc; }
+ SourceLocation getIvarRBraceLoc() const { return IvarRBraceLoc; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCCategoryDecl *D) { return true; }
@@ -1185,6 +1379,8 @@ public:
};
class ObjCImplDecl : public ObjCContainerDecl {
+ virtual void anchor();
+
/// Class interface for this class/category implementation
ObjCInterfaceDecl *ClassInterface;
@@ -1248,28 +1444,36 @@ public:
///
/// ObjCCategoryImplDecl
class ObjCCategoryImplDecl : public ObjCImplDecl {
+ virtual void anchor();
+
// Category name
IdentifierInfo *Id;
+ // Category name location
+ SourceLocation CategoryNameLoc;
+
ObjCCategoryImplDecl(DeclContext *DC, IdentifierInfo *Id,
ObjCInterfaceDecl *classInterface,
- SourceLocation nameLoc, SourceLocation atStartLoc)
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc)
: ObjCImplDecl(ObjCCategoryImpl, DC, classInterface, nameLoc, atStartLoc),
- Id(Id) {}
+ Id(Id), CategoryNameLoc(CategoryNameLoc) {}
public:
static ObjCCategoryImplDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
ObjCInterfaceDecl *classInterface,
SourceLocation nameLoc,
- SourceLocation atStartLoc);
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc);
+ static ObjCCategoryImplDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// getIdentifier - Get the identifier that names the category
/// interface associated with this implementation.
/// FIXME: This is a bad API, we are overriding the NamedDecl::getIdentifier()
/// to mean something different. For example:
- /// ((NamedDecl *)SomeCategoryImplDecl)->getIdentifier()
- /// returns the class interface name, whereas
- /// ((ObjCCategoryImplDecl *)SomeCategoryImplDecl)->getIdentifier()
+ /// ((NamedDecl *)SomeCategoryImplDecl)->getIdentifier()
+ /// returns the class interface name, whereas
+ /// ((ObjCCategoryImplDecl *)SomeCategoryImplDecl)->getIdentifier()
/// returns the category name.
IdentifierInfo *getIdentifier() const {
return Id;
@@ -1278,6 +1482,8 @@ public:
ObjCCategoryDecl *getCategoryDecl() const;
+ SourceLocation getCategoryNameLoc() const { return CategoryNameLoc; }
+
/// getName - Get the name of identifier for the class interface associated
/// with this implementation as a StringRef.
//
@@ -1306,10 +1512,12 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCCategoryImplDecl *D) { return true; }
static bool classofKind(Kind K) { return K == ObjCCategoryImpl;}
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
};
-raw_ostream &operator<<(raw_ostream &OS,
- const ObjCCategoryImplDecl *CID);
+raw_ostream &operator<<(raw_ostream &OS, const ObjCCategoryImplDecl &CID);
/// ObjCImplementationDecl - Represents a class definition - this is where
/// method definitions are specified. For example:
@@ -1326,8 +1534,13 @@ raw_ostream &operator<<(raw_ostream &OS,
/// specified, they need to be *identical* to the interface.
///
class ObjCImplementationDecl : public ObjCImplDecl {
+ virtual void anchor();
/// Implementation Class's super class.
ObjCInterfaceDecl *SuperClass;
+ /// @implementation may have private ivars.
+ SourceLocation IvarLBraceLoc;
+ SourceLocation IvarRBraceLoc;
+
/// Support for ivar initialization.
/// IvarInitializers - The arguments used to initialize the ivars
CXXCtorInitializer **IvarInitializers;
@@ -1335,35 +1548,43 @@ class ObjCImplementationDecl : public ObjCImplDecl {
/// true if class has a .cxx_[construct,destruct] method.
bool HasCXXStructors : 1;
-
+
/// true of class extension has at least one bitfield ivar.
bool HasSynthBitfield : 1;
-
+
ObjCImplementationDecl(DeclContext *DC,
ObjCInterfaceDecl *classInterface,
ObjCInterfaceDecl *superDecl,
- SourceLocation nameLoc, SourceLocation atStartLoc)
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation())
: ObjCImplDecl(ObjCImplementation, DC, classInterface, nameLoc, atStartLoc),
- SuperClass(superDecl), IvarInitializers(0), NumIvarInitializers(0),
- HasCXXStructors(false), HasSynthBitfield(false) {}
+ SuperClass(superDecl), IvarLBraceLoc(IvarLBraceLoc),
+ IvarRBraceLoc(IvarRBraceLoc),
+ IvarInitializers(0), NumIvarInitializers(0),
+ HasCXXStructors(false), HasSynthBitfield(false){}
public:
static ObjCImplementationDecl *Create(ASTContext &C, DeclContext *DC,
ObjCInterfaceDecl *classInterface,
ObjCInterfaceDecl *superDecl,
SourceLocation nameLoc,
- SourceLocation atStartLoc);
-
+ SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation());
+
+ static ObjCImplementationDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
/// init_iterator - Iterates through the ivar initializer list.
typedef CXXCtorInitializer **init_iterator;
-
+
/// init_const_iterator - Iterates through the ivar initializer list.
typedef CXXCtorInitializer * const * init_const_iterator;
-
+
/// init_begin() - Retrieve an iterator to the first initializer.
init_iterator init_begin() { return IvarInitializers; }
/// begin() - Retrieve an iterator to the first initializer.
init_const_iterator init_begin() const { return IvarInitializers; }
-
+
/// init_end() - Retrieve an iterator past the last initializer.
init_iterator init_end() {
return IvarInitializers + NumIvarInitializers;
@@ -1376,21 +1597,21 @@ public:
unsigned getNumIvarInitializers() const {
return NumIvarInitializers;
}
-
+
void setNumIvarInitializers(unsigned numNumIvarInitializers) {
NumIvarInitializers = numNumIvarInitializers;
}
-
+
void setIvarInitializers(ASTContext &C,
CXXCtorInitializer ** initializers,
unsigned numInitializers);
bool hasCXXStructors() const { return HasCXXStructors; }
void setHasCXXStructors(bool val) { HasCXXStructors = val; }
-
+
bool hasSynthBitfield() const { return HasSynthBitfield; }
void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
-
+
/// getIdentifier - Get the identifier that names the class
/// interface associated with this implementation.
IdentifierInfo *getIdentifier() const {
@@ -1428,6 +1649,11 @@ public:
void setSuperClass(ObjCInterfaceDecl * superCls) { SuperClass = superCls; }
+ void setIvarLBraceLoc(SourceLocation Loc) { IvarLBraceLoc = Loc; }
+ SourceLocation getIvarLBraceLoc() const { return IvarLBraceLoc; }
+ void setIvarRBraceLoc(SourceLocation Loc) { IvarRBraceLoc = Loc; }
+ SourceLocation getIvarRBraceLoc() const { return IvarRBraceLoc; }
+
typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
ivar_iterator ivar_begin() const {
return ivar_iterator(decls_begin());
@@ -1450,12 +1676,12 @@ public:
friend class ASTDeclWriter;
};
-raw_ostream &operator<<(raw_ostream &OS,
- const ObjCImplementationDecl *ID);
+raw_ostream &operator<<(raw_ostream &OS, const ObjCImplementationDecl &ID);
/// ObjCCompatibleAliasDecl - Represents alias of a class. This alias is
/// declared as @compatibility_alias alias class.
class ObjCCompatibleAliasDecl : public NamedDecl {
+ virtual void anchor();
/// Class that this is an alias of.
ObjCInterfaceDecl *AliasedClass;
@@ -1467,6 +1693,9 @@ public:
SourceLocation L, IdentifierInfo *Id,
ObjCInterfaceDecl* aliasedClass);
+ static ObjCCompatibleAliasDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+
const ObjCInterfaceDecl *getClassInterface() const { return AliasedClass; }
ObjCInterfaceDecl *getClassInterface() { return AliasedClass; }
void setClassInterface(ObjCInterfaceDecl *D) { AliasedClass = D; }
@@ -1482,6 +1711,7 @@ public:
/// @property (assign, readwrite) int MyProperty;
///
class ObjCPropertyDecl : public NamedDecl {
+ virtual void anchor();
public:
enum PropertyAttributeKind {
OBJC_PR_noattr = 0x00,
@@ -1509,6 +1739,7 @@ public:
enum PropertyControl { None, Required, Optional };
private:
SourceLocation AtLoc; // location of @property
+ SourceLocation LParenLoc; // location of '(' starting attribute list or null.
TypeSourceInfo *DeclType;
unsigned PropertyAttributes : NumPropertyAttrsBits;
unsigned PropertyAttributesAsWritten : NumPropertyAttrsBits;
@@ -1523,9 +1754,11 @@ private:
ObjCIvarDecl *PropertyIvarDecl; // Synthesize ivar for this property
ObjCPropertyDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
- SourceLocation AtLocation, TypeSourceInfo *T)
- : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation), DeclType(T),
- PropertyAttributes(OBJC_PR_noattr),
+ SourceLocation AtLocation, SourceLocation LParenLocation,
+ TypeSourceInfo *T)
+ : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation),
+ LParenLoc(LParenLocation), DeclType(T),
+ PropertyAttributes(OBJC_PR_noattr),
PropertyAttributesAsWritten(OBJC_PR_noattr),
PropertyImplementation(None),
GetterName(Selector()),
@@ -1535,11 +1768,18 @@ public:
static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id, SourceLocation AtLocation,
+ SourceLocation LParenLocation,
TypeSourceInfo *T,
PropertyControl propControl = None);
+
+ static ObjCPropertyDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
SourceLocation getAtLoc() const { return AtLoc; }
void setAtLoc(SourceLocation L) { AtLoc = L; }
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+
TypeSourceInfo *getTypeSourceInfo() const { return DeclType; }
QualType getType() const { return DeclType->getType(); }
void setType(TypeSourceInfo *T) { DeclType = T; }
@@ -1560,11 +1800,11 @@ public:
OBJC_PR_unsafe_unretained | OBJC_PR_retain | OBJC_PR_strong |
OBJC_PR_weak);
}
-
+
void setPropertyAttributesAsWritten(PropertyAttributeKind PRVal) {
PropertyAttributesAsWritten = PRVal;
}
-
+
void makeitReadWriteAttribute(void) {
PropertyAttributes &= ~OBJC_PR_readonly;
PropertyAttributes |= OBJC_PR_readwrite;
@@ -1630,7 +1870,7 @@ public:
return PropertyIvarDecl;
}
- virtual SourceRange getSourceRange() const {
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, getLocation());
}
@@ -1655,7 +1895,7 @@ public:
};
private:
SourceLocation AtLoc; // location of @synthesize or @dynamic
-
+
/// \brief For @synthesize, the location of the ivar, if it was written in
/// the source code.
///
@@ -1663,16 +1903,16 @@ private:
/// @synthesize int a = b
/// \endcode
SourceLocation IvarLoc;
-
+
/// Property declaration being implemented
ObjCPropertyDecl *PropertyDecl;
/// Null for @dynamic. Required for @synthesize.
ObjCIvarDecl *PropertyIvarDecl;
-
+
/// Null for @dynamic. Non-null if property must be copy-constructed in getter
Expr *GetterCXXConstructor;
-
+
/// Null for @dynamic. Non-null if property has assignment operator to call
/// in Setter synthesis.
Expr *SetterCXXAssignment;
@@ -1683,7 +1923,7 @@ private:
ObjCIvarDecl *ivarDecl,
SourceLocation ivarLoc)
: Decl(ObjCPropertyImpl, DC, L), AtLoc(atLoc),
- IvarLoc(ivarLoc), PropertyDecl(property), PropertyIvarDecl(ivarDecl),
+ IvarLoc(ivarLoc), PropertyDecl(property), PropertyIvarDecl(ivarDecl),
GetterCXXConstructor(0), SetterCXXAssignment(0) {
assert (PK == Dynamic || PropertyIvarDecl);
}
@@ -1696,9 +1936,11 @@ public:
ObjCIvarDecl *ivarDecl,
SourceLocation ivarLoc);
- virtual SourceRange getSourceRange() const;
+ static ObjCPropertyImplDecl *CreateDeserialized(ASTContext &C, unsigned ID);
- SourceLocation getLocStart() const { return AtLoc; }
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ SourceLocation getLocStart() const LLVM_READONLY { return AtLoc; }
void setAtLoc(SourceLocation Loc) { AtLoc = Loc; }
ObjCPropertyDecl *getPropertyDecl() const {
@@ -1714,13 +1956,13 @@ public:
return PropertyIvarDecl;
}
SourceLocation getPropertyIvarDeclLoc() const { return IvarLoc; }
-
+
void setPropertyIvarDecl(ObjCIvarDecl *Ivar,
- SourceLocation IvarLoc) {
- PropertyIvarDecl = Ivar;
+ SourceLocation IvarLoc) {
+ PropertyIvarDecl = Ivar;
this->IvarLoc = IvarLoc;
}
-
+
Expr *getGetterCXXConstructor() const {
return GetterCXXConstructor;
}
@@ -1734,11 +1976,11 @@ public:
void setSetterCXXAssignment(Expr *setterCXXAssignment) {
SetterCXXAssignment = setterCXXAssignment;
}
-
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCPropertyImplDecl *D) { return true; }
static bool classofKind(Decl::Kind K) { return K == ObjCPropertyImpl; }
-
+
friend class ASTDeclReader;
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
index 138e47d..36549ea 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -15,8 +15,10 @@
#define LLVM_CLANG_AST_DECLTEMPLATE_H
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Redeclarable.h"
#include "clang/AST/TemplateBase.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/Compiler.h"
#include <limits>
namespace clang {
@@ -99,12 +101,12 @@ public:
/// The first template parameter list in a declaration will have depth 0,
/// the second template parameter list will have depth 1, etc.
unsigned getDepth() const;
-
+
SourceLocation getTemplateLoc() const { return TemplateLoc; }
SourceLocation getLAngleLoc() const { return LAngleLoc; }
SourceLocation getRAngleLoc() const { return RAngleLoc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(TemplateLoc, RAngleLoc);
}
};
@@ -116,7 +118,8 @@ class FixedSizeTemplateParameterList : public TemplateParameterList {
NamedDecl *Params[N];
public:
- FixedSizeTemplateParameterList(SourceLocation TemplateLoc, SourceLocation LAngleLoc,
+ FixedSizeTemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
NamedDecl **Params, SourceLocation RAngleLoc) :
TemplateParameterList(TemplateLoc, LAngleLoc, Params, N, RAngleLoc) {
}
@@ -142,7 +145,7 @@ class TemplateArgumentList {
: Arguments(Args, Owned), NumArguments(NumArgs) { }
public:
- /// \brief Type used to indicate that the template argument list itself is a
+ /// \brief Type used to indicate that the template argument list itself is a
/// stack object. It does not own its template arguments.
enum OnStackType { OnStack };
@@ -156,12 +159,12 @@ public:
///
/// The template argument list does not own the template arguments
/// provided.
- explicit TemplateArgumentList(OnStackType,
+ explicit TemplateArgumentList(OnStackType,
const TemplateArgument *Args, unsigned NumArgs)
: Arguments(Args, false), NumArguments(NumArgs) { }
-
- /// \brief Produces a shallow copy of the given template argument list.
- ///
+
+ /// \brief Produces a shallow copy of the given template argument list.
+ ///
/// This operation assumes that the input argument list outlives it.
/// This takes the list as a pointer to avoid looking like a copy
/// constructor, since this really really isn't safe to use that
@@ -197,6 +200,7 @@ public:
/// parameters and a reference to the templated scoped declaration: the
/// underlying AST node.
class TemplateDecl : public NamedDecl {
+ virtual void anchor();
protected:
// This is probably never used.
TemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
@@ -236,7 +240,7 @@ public:
return K >= firstTemplate && K <= lastTemplate;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(TemplateParams->getTemplateLoc(),
TemplatedDecl->getSourceRange().getEnd());
}
@@ -244,7 +248,7 @@ public:
protected:
NamedDecl *TemplatedDecl;
TemplateParameterList* TemplateParams;
-
+
public:
/// \brief Initialize the underlying templated declaration and
/// template parameters.
@@ -298,9 +302,9 @@ public:
const ASTTemplateArgumentListInfo *TemplateArgumentsAsWritten;
/// \brief The point at which this function template specialization was
- /// first instantiated.
+ /// first instantiated.
SourceLocation PointOfInstantiation;
-
+
/// \brief Retrieve the template from which this function was specialized.
FunctionTemplateDecl *getTemplate() const { return Template.getPointer(); }
@@ -325,16 +329,16 @@ public:
///
/// The point of instantiation may be an invalid source location if this
/// function has yet to be instantiated.
- SourceLocation getPointOfInstantiation() const {
- return PointOfInstantiation;
+ SourceLocation getPointOfInstantiation() const {
+ return PointOfInstantiation;
}
-
+
/// \brief Set the (first) point of instantiation of this function template
/// specialization.
void setPointOfInstantiation(SourceLocation POI) {
PointOfInstantiation = POI;
}
-
+
void Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, TemplateArguments->data(),
TemplateArguments->size(),
@@ -350,49 +354,49 @@ public:
}
};
-/// \brief Provides information a specialization of a member of a class
-/// template, which may be a member function, static data member, or
-/// member class.
+/// \brief Provides information a specialization of a member of a class
+/// template, which may be a member function, static data member,
+/// member class or member enumeration.
class MemberSpecializationInfo {
// The member declaration from which this member was instantiated, and the
// manner in which the instantiation occurred (in the lower two bits).
llvm::PointerIntPair<NamedDecl *, 2> MemberAndTSK;
-
+
// The point at which this member was first instantiated.
SourceLocation PointOfInstantiation;
-
+
public:
- explicit
+ explicit
MemberSpecializationInfo(NamedDecl *IF, TemplateSpecializationKind TSK,
SourceLocation POI = SourceLocation())
: MemberAndTSK(IF, TSK - 1), PointOfInstantiation(POI) {
- assert(TSK != TSK_Undeclared &&
+ assert(TSK != TSK_Undeclared &&
"Cannot encode undeclared template specializations for members");
}
-
+
/// \brief Retrieve the member declaration from which this member was
/// instantiated.
NamedDecl *getInstantiatedFrom() const { return MemberAndTSK.getPointer(); }
-
+
/// \brief Determine what kind of template specialization this is.
TemplateSpecializationKind getTemplateSpecializationKind() const {
return (TemplateSpecializationKind)(MemberAndTSK.getInt() + 1);
}
-
+
/// \brief Set the template specialization kind.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
- assert(TSK != TSK_Undeclared &&
+ assert(TSK != TSK_Undeclared &&
"Cannot encode undeclared template specializations for members");
MemberAndTSK.setInt(TSK - 1);
}
-
- /// \brief Retrieve the first point of instantiation of this member.
+
+ /// \brief Retrieve the first point of instantiation of this member.
/// If the point of instantiation is an invalid location, then this member
/// has not yet been instantiated.
- SourceLocation getPointOfInstantiation() const {
- return PointOfInstantiation;
+ SourceLocation getPointOfInstantiation() const {
+ return PointOfInstantiation;
}
-
+
/// \brief Set the first point of instantiation.
void setPointOfInstantiation(SourceLocation POI) {
PointOfInstantiation = POI;
@@ -414,14 +418,14 @@ class DependentFunctionTemplateSpecializationInfo {
union {
// Force sizeof to be a multiple of sizeof(void*) so that the
// trailing data is aligned.
- void *Aligner;
+ void *Aligner;
struct {
/// The number of potential template candidates.
unsigned NumTemplates;
/// The number of template arguments.
- unsigned NumArgs;
+ unsigned NumArgs;
} d;
};
@@ -452,7 +456,7 @@ public:
/// \brief Returns the explicit template arguments that were given.
const TemplateArgumentLoc *getTemplateArgs() const {
return reinterpret_cast<const TemplateArgumentLoc*>(
- &getTemplates()[getNumTemplates()]);
+ &getTemplates()[getNumTemplates()]);
}
/// \brief Returns the number of explicit template arguments that were given.
@@ -474,33 +478,28 @@ public:
return AngleLocs.getEnd();
}
};
-
-/// Declaration of a redeclarable template.
-class RedeclarableTemplateDecl : public TemplateDecl {
- RedeclarableTemplateDecl *getPreviousDeclarationImpl() {
- return CommonOrPrev.dyn_cast<RedeclarableTemplateDecl*>();
+/// Declaration of a redeclarable template.
+class RedeclarableTemplateDecl : public TemplateDecl,
+ public Redeclarable<RedeclarableTemplateDecl>
+{
+ typedef Redeclarable<RedeclarableTemplateDecl> redeclarable_base;
+ virtual RedeclarableTemplateDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
}
-
- RedeclarableTemplateDecl *getCanonicalDeclImpl();
-
- void setPreviousDeclarationImpl(RedeclarableTemplateDecl *Prev);
-
- RedeclarableTemplateDecl *getInstantiatedFromMemberTemplateImpl() {
- return getCommonPtr()->InstantiatedFromMember.getPointer();
+ virtual RedeclarableTemplateDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
}
-
- void setInstantiatedFromMemberTemplateImpl(RedeclarableTemplateDecl *TD) {
- assert(!getCommonPtr()->InstantiatedFromMember.getPointer());
- getCommonPtr()->InstantiatedFromMember.setPointer(TD);
+ virtual RedeclarableTemplateDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
}
protected:
template <typename EntryType> struct SpecEntryTraits {
typedef EntryType DeclType;
- static DeclType *getMostRecentDeclaration(EntryType *D) {
- return D->getMostRecentDeclaration();
+ static DeclType *getMostRecentDecl(EntryType *D) {
+ return D->getMostRecentDecl();
}
};
@@ -522,7 +521,7 @@ protected:
SpecIterator(SetIteratorType SetIter) : SetIter(SetIter) {}
DeclType *operator*() const {
- return SETraits::getMostRecentDeclaration(&*SetIter);
+ return SETraits::getMostRecentDecl(&*SetIter);
}
DeclType *operator->() const { return **this; }
@@ -562,15 +561,12 @@ protected:
/// was explicitly specialized.
llvm::PointerIntPair<RedeclarableTemplateDecl*, 1, bool>
InstantiatedFromMember;
-
- /// \brief The latest declaration of this template.
- RedeclarableTemplateDecl *Latest;
};
- /// \brief A pointer to the previous declaration (if this is a redeclaration)
- /// or to the data that is common to all declarations of this template.
- llvm::PointerUnion<CommonBase*, RedeclarableTemplateDecl*> CommonOrPrev;
-
+ /// \brief Pointer to the common data shared by all declarations of this
+ /// template.
+ CommonBase *Common;
+
/// \brief Retrieves the "common" pointer shared by all (re-)declarations of
/// the same template. Calling this routine may implicitly allocate memory
/// for the common pointer.
@@ -582,56 +578,18 @@ protected:
RedeclarableTemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, TemplateParameterList *Params,
NamedDecl *Decl)
- : TemplateDecl(DK, DC, L, Name, Params, Decl),
- CommonOrPrev((CommonBase*)0) { }
+ : TemplateDecl(DK, DC, L, Name, Params, Decl), Common() { }
public:
template <class decl_type> friend class RedeclarableTemplate;
- RedeclarableTemplateDecl *getCanonicalDecl() {
- return getCanonicalDeclImpl();
- }
-
- /// \brief Retrieve the previous declaration of this template, or
- /// NULL if no such declaration exists.
- RedeclarableTemplateDecl *getPreviousDeclaration() {
- return getPreviousDeclarationImpl();
- }
-
- /// \brief Retrieve the previous declaration of this template, or
- /// NULL if no such declaration exists.
- const RedeclarableTemplateDecl *getPreviousDeclaration() const {
- return
- const_cast<RedeclarableTemplateDecl*>(this)->getPreviousDeclaration();
- }
-
- /// \brief Retrieve the first declaration of this template, or itself
- /// if this the first one.
- RedeclarableTemplateDecl *getFirstDeclaration() {
- return getCanonicalDecl();
- }
-
- /// \brief Retrieve the first declaration of this template, or itself
- /// if this the first one.
- const RedeclarableTemplateDecl *getFirstDeclaration() const {
- return
- const_cast<RedeclarableTemplateDecl*>(this)->getFirstDeclaration();
- }
-
- /// \brief Retrieve the most recent declaration of this template, or itself
- /// if this the most recent one.
- RedeclarableTemplateDecl *getMostRecentDeclaration() {
- return getCommonPtr()->Latest;
- }
-
- /// \brief Retrieve the most recent declaration of this template, or itself
- /// if this the most recent one.
- const RedeclarableTemplateDecl *getMostRecentDeclaration() const {
- return
- const_cast<RedeclarableTemplateDecl*>(this)->getMostRecentDeclaration();
+ /// Retrieves the canonical declaration of this template.
+ RedeclarableTemplateDecl *getCanonicalDecl() { return getFirstDeclaration(); }
+ const RedeclarableTemplateDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
}
- /// \brief Determines whether this template was a specialization of a
+ /// \brief Determines whether this template was a specialization of a
/// member template.
///
/// In the following example, the function template \c X<int>::f and the
@@ -652,21 +610,64 @@ public:
bool isMemberSpecialization() {
return getCommonPtr()->InstantiatedFromMember.getInt();
}
-
+
/// \brief Note that this member template is a specialization.
void setMemberSpecialization() {
assert(getCommonPtr()->InstantiatedFromMember.getPointer() &&
"Only member templates can be member template specializations");
getCommonPtr()->InstantiatedFromMember.setInt(true);
}
-
- /// \brief Retrieve the previous declaration of this template, or
- /// NULL if no such declaration exists.
+
+ /// \brief Retrieve the member template from which this template was
+ /// instantiated, or NULL if this template was not instantiated from a
+ /// member template.
+ ///
+ /// A template is instantiated from a member template when the member
+ /// template itself is part of a class template (or member thereof). For
+ /// example, given
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// template<typename U> void f(T, U);
+ /// };
+ ///
+ /// void test(X<int> x) {
+ /// x.f(1, 'a');
+ /// };
+ /// \endcode
+ ///
+ /// \c X<int>::f is a FunctionTemplateDecl that describes the function
+ /// template
+ ///
+ /// \code
+ /// template<typename U> void X<int>::f(int, U);
+ /// \endcode
+ ///
+ /// which was itself created during the instantiation of \c X<int>. Calling
+ /// getInstantiatedFromMemberTemplate() on this FunctionTemplateDecl will
+ /// retrieve the FunctionTemplateDecl for the original template "f" within
+ /// the class template \c X<T>, i.e.,
+ ///
+ /// \code
+ /// template<typename T>
+ /// template<typename U>
+ /// void X<T>::f(T, U);
+ /// \endcode
RedeclarableTemplateDecl *getInstantiatedFromMemberTemplate() {
- return getInstantiatedFromMemberTemplateImpl();
+ return getCommonPtr()->InstantiatedFromMember.getPointer();
+ }
+
+ void setInstantiatedFromMemberTemplate(RedeclarableTemplateDecl *TD) {
+ assert(!getCommonPtr()->InstantiatedFromMember.getPointer());
+ getCommonPtr()->InstantiatedFromMember.setPointer(TD);
}
- virtual RedeclarableTemplateDecl *getNextRedeclaration();
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -678,107 +679,35 @@ public:
return K >= firstRedeclarableTemplate && K <= lastRedeclarableTemplate;
}
+ friend class ASTReader;
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
-template <class decl_type>
-class RedeclarableTemplate {
- RedeclarableTemplateDecl *thisDecl() {
- return static_cast<decl_type*>(this);
- }
-
-public:
- /// \brief Retrieve the previous declaration of this function template, or
- /// NULL if no such declaration exists.
- decl_type *getPreviousDeclaration() {
- return static_cast<decl_type*>(thisDecl()->getPreviousDeclarationImpl());
- }
-
- /// \brief Retrieve the previous declaration of this function template, or
- /// NULL if no such declaration exists.
- const decl_type *getPreviousDeclaration() const {
- return const_cast<RedeclarableTemplate*>(this)->getPreviousDeclaration();
- }
-
- /// \brief Set the previous declaration of this function template.
- void setPreviousDeclaration(decl_type *Prev) {
- thisDecl()->setPreviousDeclarationImpl(Prev);
- }
-
- decl_type *getCanonicalDecl() {
- return static_cast<decl_type*>(thisDecl()->getCanonicalDeclImpl());
- }
-
- const decl_type *getCanonicalDecl() const {
- return const_cast<RedeclarableTemplate*>(this)->getCanonicalDecl();
- }
-
- /// \brief Retrieve the member template that this template was instantiated
- /// from.
- ///
- /// This routine will return non-NULL for member templates of
- /// class templates. For example, given:
- ///
- /// \code
- /// template <typename T>
- /// struct X {
- /// template <typename U> void f();
- /// template <typename U> struct A {};
- /// };
- /// \endcode
- ///
- /// X<int>::f<float> is a CXXMethodDecl (whose parent is X<int>, a
- /// ClassTemplateSpecializationDecl) for which getPrimaryTemplate() will
- /// return X<int>::f, a FunctionTemplateDecl (whose parent is again
- /// X<int>) for which getInstantiatedFromMemberTemplate() will return
- /// X<T>::f, a FunctionTemplateDecl (whose parent is X<T>, a
- /// ClassTemplateDecl).
- ///
- /// X<int>::A<float> is a ClassTemplateSpecializationDecl (whose parent
- /// is X<int>, also a CTSD) for which getSpecializedTemplate() will
- /// return X<int>::A<U>, a ClassTemplateDecl (whose parent is again
- /// X<int>) for which getInstantiatedFromMemberTemplate() will return
- /// X<T>::A<U>, a ClassTemplateDecl (whose parent is X<T>, also a CTD).
- ///
- /// \returns NULL if this is not an instantiation of a member template.
- decl_type *getInstantiatedFromMemberTemplate() {
- return static_cast<decl_type*>(
- thisDecl()->getInstantiatedFromMemberTemplateImpl());
- }
-
- void setInstantiatedFromMemberTemplate(decl_type *TD) {
- thisDecl()->setInstantiatedFromMemberTemplateImpl(TD);
- }
-};
-
template <> struct RedeclarableTemplateDecl::
SpecEntryTraits<FunctionTemplateSpecializationInfo> {
typedef FunctionDecl DeclType;
static DeclType *
- getMostRecentDeclaration(FunctionTemplateSpecializationInfo *I) {
- return I->Function->getMostRecentDeclaration();
+ getMostRecentDecl(FunctionTemplateSpecializationInfo *I) {
+ return I->Function->getMostRecentDecl();
}
};
/// Declaration of a template function.
-class FunctionTemplateDecl : public RedeclarableTemplateDecl,
- public RedeclarableTemplate<FunctionTemplateDecl> {
+class FunctionTemplateDecl : public RedeclarableTemplateDecl {
static void DeallocateCommon(void *Ptr);
protected:
- typedef RedeclarableTemplate<FunctionTemplateDecl> redeclarable_base;
-
/// \brief Data that is common to all of the declarations of a given
/// function template.
struct Common : CommonBase {
Common() : InjectedArgs(0) { }
-
+
/// \brief The function template specializations for this function
/// template, including explicit specializations and instantiations.
llvm::FoldingSet<FunctionTemplateSpecializationInfo> Specializations;
-
+
/// \brief The set of "injected" template arguments used within this
/// function template.
///
@@ -813,7 +742,7 @@ protected:
/// retrieved by an earlier call to findSpecialization().
void addSpecialization(FunctionTemplateSpecializationInfo* Info,
void *InsertPos);
-
+
public:
/// Get the underlying function declaration of the template.
FunctionDecl *getTemplatedDecl() const {
@@ -832,26 +761,31 @@ public:
unsigned NumArgs, void *&InsertPos);
FunctionTemplateDecl *getCanonicalDecl() {
- return redeclarable_base::getCanonicalDecl();
+ return cast<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
const FunctionTemplateDecl *getCanonicalDecl() const {
- return redeclarable_base::getCanonicalDecl();
+ return cast<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
/// \brief Retrieve the previous declaration of this function template, or
/// NULL if no such declaration exists.
- FunctionTemplateDecl *getPreviousDeclaration() {
- return redeclarable_base::getPreviousDeclaration();
+ FunctionTemplateDecl *getPreviousDecl() {
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
/// \brief Retrieve the previous declaration of this function template, or
/// NULL if no such declaration exists.
- const FunctionTemplateDecl *getPreviousDeclaration() const {
- return redeclarable_base::getPreviousDeclaration();
+ const FunctionTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
FunctionTemplateDecl *getInstantiatedFromMemberTemplate() {
- return redeclarable_base::getInstantiatedFromMemberTemplate();
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
}
typedef SpecIterator<FunctionTemplateSpecializationInfo> spec_iterator;
@@ -866,13 +800,13 @@ public:
/// \brief Retrieve the "injected" template arguments that correspond to the
/// template parameters of this function template.
- ///
+ ///
/// Although the C++ standard has no notion of the "injected" template
/// arguments for a function template, the notion is convenient when
/// we need to perform substitutions inside the definition of a function
- /// template.
+ /// template.
std::pair<const TemplateArgument *, unsigned> getInjectedTemplateArgs();
-
+
/// \brief Create a function template node.
static FunctionTemplateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
@@ -881,7 +815,7 @@ public:
NamedDecl *Decl);
/// \brief Create an empty function template node.
- static FunctionTemplateDecl *Create(ASTContext &C, EmptyShell);
+ static FunctionTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
// Implement isa/cast/dyncast support
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -966,7 +900,8 @@ public:
unsigned D, unsigned P,
IdentifierInfo *Id, bool Typename,
bool ParameterPack);
- static TemplateTypeParmDecl *Create(const ASTContext &C, EmptyShell Empty);
+ static TemplateTypeParmDecl *CreateDeserialized(const ASTContext &C,
+ unsigned ID);
/// \brief Whether this template type parameter was declared with
/// the 'typename' keyword. If not, it was declared with the 'class'
@@ -1003,21 +938,21 @@ public:
DefaultArgument = 0;
InheritedDefault = false;
}
-
+
/// \brief Set whether this template type parameter was declared with
/// the 'typename' or 'class' keyword.
void setDeclaredWithTypename(bool withTypename) { Typename = withTypename; }
/// \brief Retrieve the depth of the template parameter.
unsigned getDepth() const;
-
+
/// \brief Retrieve the index of the template parameter.
unsigned getIndex() const;
/// \brief Returns whether this is a parameter pack.
bool isParameterPack() const;
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -1038,18 +973,18 @@ class NonTypeTemplateParmDecl
// FIXME: Collapse this into TemplateParamPosition; or, just move depth/index
// down here to save memory.
-
+
/// \brief Whether this non-type template parameter is a parameter pack.
bool ParameterPack;
-
- /// \brief Whether this non-type template parameter is an "expanded"
+
+ /// \brief Whether this non-type template parameter is an "expanded"
/// parameter pack, meaning that its type is a pack expansion and we
/// already know the set of types that expansion expands to.
bool ExpandedParameterPack;
-
+
/// \brief The number of types in an expanded parameter pack.
unsigned NumExpandedTypes;
-
+
NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, unsigned D, unsigned P,
IdentifierInfo *Id, QualType T,
@@ -1069,7 +1004,7 @@ class NonTypeTemplateParmDecl
TypeSourceInfo **ExpandedTInfos);
friend class ASTDeclReader;
-
+
public:
static NonTypeTemplateParmDecl *
Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
@@ -1083,13 +1018,19 @@ public:
const QualType *ExpandedTypes, unsigned NumExpandedTypes,
TypeSourceInfo **ExpandedTInfos);
+ static NonTypeTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+ static NonTypeTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned NumExpandedTypes);
+
using TemplateParmPosition::getDepth;
using TemplateParmPosition::setDepth;
using TemplateParmPosition::getPosition;
using TemplateParmPosition::setPosition;
using TemplateParmPosition::getIndex;
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
/// \brief Determine whether this template parameter has a default
/// argument.
@@ -1135,7 +1076,7 @@ public:
/// template<typename T, unsigned ...Dims> struct multi_array;
/// \endcode
bool isParameterPack() const { return ParameterPack; }
-
+
/// \brief Whether this parameter is a non-type template parameter pack
/// that has different types at different positions.
///
@@ -1150,25 +1091,26 @@ public:
/// struct Y { /* ... */ };
/// };
/// \endcode
- ///
+ ///
/// The parameter pack \c Values has a \c PackExpansionType as its type,
/// which expands \c Types. When \c Types is supplied with template arguments
- /// by instantiating \c X, the instantiation of \c Values becomes an
- /// expanded parameter pack. For example, instantiating
+ /// by instantiating \c X, the instantiation of \c Values becomes an
+ /// expanded parameter pack. For example, instantiating
/// \c X<int, unsigned int> results in \c Values being an expanded parameter
/// pack with expansion types \c int and \c unsigned int.
///
- /// The \c getExpansionType() and \c getExpansionTypeSourceInfo() functions
+ /// The \c getExpansionType() and \c getExpansionTypeSourceInfo() functions
/// return the expansion types.
bool isExpandedParameterPack() const { return ExpandedParameterPack; }
-
- /// \brief Retrieves the number of expansion types in an expanded parameter pack.
+
+ /// \brief Retrieves the number of expansion types in an expanded parameter
+ /// pack.
unsigned getNumExpansionTypes() const {
assert(ExpandedParameterPack && "Not an expansion parameter pack");
return NumExpandedTypes;
}
- /// \brief Retrieve a particular expansion type within an expanded parameter
+ /// \brief Retrieve a particular expansion type within an expanded parameter
/// pack.
QualType getExpansionType(unsigned I) const {
assert(I < NumExpandedTypes && "Out-of-range expansion type index");
@@ -1176,7 +1118,7 @@ public:
return QualType::getFromOpaquePtr(TypesAndInfos[2*I]);
}
- /// \brief Retrieve a particular expansion type source info within an
+ /// \brief Retrieve a particular expansion type source info within an
/// expanded parameter pack.
TypeSourceInfo *getExpansionTypeSourceInfo(unsigned I) const {
assert(I < NumExpandedTypes && "Out-of-range expansion type index");
@@ -1197,8 +1139,10 @@ public:
/// @endcode
/// A template template parameter is a TemplateDecl because it defines the
/// name of a template and the template parameters allowable for substitution.
-class TemplateTemplateParmDecl
- : public TemplateDecl, protected TemplateParmPosition {
+class TemplateTemplateParmDecl : public TemplateDecl,
+ protected TemplateParmPosition
+{
+ virtual void anchor();
/// DefaultArgument - The default template argument, if any.
TemplateArgumentLoc DefaultArgument;
@@ -1207,7 +1151,7 @@ class TemplateTemplateParmDecl
/// \brief Whether this parameter is a parameter pack.
bool ParameterPack;
-
+
TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
unsigned D, unsigned P, bool ParameterPack,
IdentifierInfo *Id, TemplateParameterList *Params)
@@ -1223,6 +1167,9 @@ public:
IdentifierInfo *Id,
TemplateParameterList *Params);
+ static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+
using TemplateParmPosition::getDepth;
using TemplateParmPosition::getPosition;
using TemplateParmPosition::getIndex;
@@ -1269,7 +1216,7 @@ public:
DefaultArgumentWasInherited = false;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
SourceLocation End = getLocation();
if (hasDefaultArgument() && !defaultArgumentWasInherited())
End = getDefaultArgument().getSourceRange().getEnd();
@@ -1365,19 +1312,19 @@ public:
unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl);
static ClassTemplateSpecializationDecl *
- Create(ASTContext &Context, EmptyShell Empty);
+ CreateDeserialized(ASTContext &C, unsigned ID);
virtual void getNameForDiagnostic(std::string &S,
const PrintingPolicy &Policy,
bool Qualified) const;
- ClassTemplateSpecializationDecl *getMostRecentDeclaration() {
+ ClassTemplateSpecializationDecl *getMostRecentDecl() {
CXXRecordDecl *Recent
- = cast<CXXRecordDecl>(CXXRecordDecl::getMostRecentDeclaration());
+ = cast<CXXRecordDecl>(CXXRecordDecl::getMostRecentDecl());
if (!isa<ClassTemplateSpecializationDecl>(Recent)) {
// FIXME: Does injected class name need to be in the redeclarations chain?
- assert(Recent->isInjectedClassName() && Recent->getPreviousDeclaration());
- Recent = Recent->getPreviousDeclaration();
+ assert(Recent->isInjectedClassName() && Recent->getPreviousDecl());
+ Recent = Recent->getPreviousDecl();
}
return cast<ClassTemplateSpecializationDecl>(Recent);
}
@@ -1525,7 +1472,7 @@ public:
return ExplicitInfo ? ExplicitInfo->TemplateKeywordLoc : SourceLocation();
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, TemplateArgs->data(), TemplateArgs->size(), getASTContext());
@@ -1552,13 +1499,15 @@ public:
static bool classof(const ClassTemplatePartialSpecializationDecl *) {
return true;
}
-
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
};
class ClassTemplatePartialSpecializationDecl
: public ClassTemplateSpecializationDecl {
+ virtual void anchor();
+
/// \brief The list of template parameters
TemplateParameterList* TemplateParams;
@@ -1571,15 +1520,15 @@ class ClassTemplatePartialSpecializationDecl
/// specialization was added to the set of partial specializations for
/// its owning class template.
unsigned SequenceNumber;
-
- /// \brief The class template partial specialization from which this
+
+ /// \brief The class template partial specialization from which this
/// class template partial specialization was instantiated.
///
/// The boolean value will be true to indicate that this class template
/// partial specialization was specialized at this level.
llvm::PointerIntPair<ClassTemplatePartialSpecializationDecl *, 1, bool>
InstantiatedFromMember;
-
+
ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
DeclContext *DC,
SourceLocation StartLoc,
@@ -1592,7 +1541,7 @@ class ClassTemplatePartialSpecializationDecl
unsigned NumArgInfos,
ClassTemplatePartialSpecializationDecl *PrevDecl,
unsigned SequenceNumber);
-
+
ClassTemplatePartialSpecializationDecl()
: ClassTemplateSpecializationDecl(ClassTemplatePartialSpecialization),
TemplateParams(0), ArgsAsWritten(0),
@@ -1601,7 +1550,7 @@ class ClassTemplatePartialSpecializationDecl
public:
static ClassTemplatePartialSpecializationDecl *
- Create(ASTContext &Context, TagKind TK,DeclContext *DC,
+ Create(ASTContext &Context, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
@@ -1613,11 +1562,11 @@ public:
unsigned SequenceNumber);
static ClassTemplatePartialSpecializationDecl *
- Create(ASTContext &Context, EmptyShell Empty);
+ CreateDeserialized(ASTContext &C, unsigned ID);
- ClassTemplatePartialSpecializationDecl *getMostRecentDeclaration() {
+ ClassTemplatePartialSpecializationDecl *getMostRecentDecl() {
return cast<ClassTemplatePartialSpecializationDecl>(
- ClassTemplateSpecializationDecl::getMostRecentDeclaration());
+ ClassTemplateSpecializationDecl::getMostRecentDecl());
}
/// Get the list of template parameters
@@ -1654,9 +1603,9 @@ public:
/// \endcode
///
/// In this example, the instantiation of \c Outer<float>::Inner<int*> will
- /// end up instantiating the partial specialization
- /// \c Outer<float>::Inner<U*>, which itself was instantiated from the class
- /// template partial specialization \c Outer<T>::Inner<U*>. Given
+ /// end up instantiating the partial specialization
+ /// \c Outer<float>::Inner<U*>, which itself was instantiated from the class
+ /// template partial specialization \c Outer<T>::Inner<U*>. Given
/// \c Outer<float>::Inner<U*>, this function would return
/// \c Outer<T>::Inner<U*>.
ClassTemplatePartialSpecializationDecl *getInstantiatedFromMember() {
@@ -1664,15 +1613,15 @@ public:
= cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
return First->InstantiatedFromMember.getPointer();
}
-
+
void setInstantiatedFromMember(
ClassTemplatePartialSpecializationDecl *PartialSpec) {
ClassTemplatePartialSpecializationDecl *First
= cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
First->InstantiatedFromMember.setPointer(PartialSpec);
}
-
- /// \brief Determines whether this class template partial specialization
+
+ /// \brief Determines whether this class template partial specialization
/// template was a specialization of a member partial specialization.
///
/// In the following example, the member template partial specialization
@@ -1693,7 +1642,7 @@ public:
= cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
return First->InstantiatedFromMember.getInt();
}
-
+
/// \brief Note that this member template is a specialization.
void setMemberSpecialization() {
ClassTemplatePartialSpecializationDecl *First
@@ -1711,7 +1660,7 @@ public:
return cast<InjectedClassNameType>(getTypeForDecl())
->getInjectedSpecializationType();
}
-
+
// FIXME: Add Profile support!
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -1728,18 +1677,15 @@ public:
};
/// Declaration of a class template.
-class ClassTemplateDecl : public RedeclarableTemplateDecl,
- public RedeclarableTemplate<ClassTemplateDecl> {
+class ClassTemplateDecl : public RedeclarableTemplateDecl {
static void DeallocateCommon(void *Ptr);
-
-protected:
- typedef RedeclarableTemplate<ClassTemplateDecl> redeclarable_base;
+protected:
/// \brief Data that is common to all of the declarations of a given
/// class template.
struct Common : CommonBase {
Common() : LazySpecializations() { }
-
+
/// \brief The class template specializations for this class
/// template, including explicit specializations and instantiations.
llvm::FoldingSet<ClassTemplateSpecializationDecl> Specializations;
@@ -1751,7 +1697,7 @@ protected:
/// \brief The injected-class-name type for this class template.
QualType InjectedClassNameType;
-
+
/// \brief If non-null, points to an array of specializations (including
/// partial specializations) known ownly by their external declaration IDs.
///
@@ -1762,7 +1708,7 @@ protected:
/// \brief Load any lazily-loaded specializations from the external source.
void LoadLazySpecializations();
-
+
/// \brief Retrieve the set of specializations of this class template.
llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations();
@@ -1806,7 +1752,7 @@ public:
ClassTemplateDecl *PrevDecl);
/// Create an empty class template node.
- static ClassTemplateDecl *Create(ASTContext &C, EmptyShell);
+ static ClassTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// \brief Return the specialization with the provided arguments if it exists,
/// otherwise return the insertion point.
@@ -1819,26 +1765,31 @@ public:
void AddSpecialization(ClassTemplateSpecializationDecl *D, void *InsertPos);
ClassTemplateDecl *getCanonicalDecl() {
- return redeclarable_base::getCanonicalDecl();
+ return cast<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
const ClassTemplateDecl *getCanonicalDecl() const {
- return redeclarable_base::getCanonicalDecl();
+ return cast<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
/// \brief Retrieve the previous declaration of this class template, or
/// NULL if no such declaration exists.
- ClassTemplateDecl *getPreviousDeclaration() {
- return redeclarable_base::getPreviousDeclaration();
+ ClassTemplateDecl *getPreviousDecl() {
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
/// \brief Retrieve the previous declaration of this class template, or
/// NULL if no such declaration exists.
- const ClassTemplateDecl *getPreviousDeclaration() const {
- return redeclarable_base::getPreviousDeclaration();
+ const ClassTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
ClassTemplateDecl *getInstantiatedFromMemberTemplate() {
- return redeclarable_base::getInstantiatedFromMemberTemplate();
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
}
/// \brief Return the partial specialization with the provided arguments if it
@@ -1860,7 +1811,7 @@ public:
/// \brief Retrieve the partial specializations as an ordered list.
void getPartialSpecializations(
SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS);
-
+
/// \brief Find a class template partial specialization with the given
/// type T.
///
@@ -1870,7 +1821,7 @@ public:
/// \returns the class template partial specialization that exactly matches
/// the type \p T, or NULL if no such partial specialization exists.
ClassTemplatePartialSpecializationDecl *findPartialSpecialization(QualType T);
-
+
/// \brief Find a class template partial specialization which was instantiated
/// from the given member partial specialization.
///
@@ -1939,6 +1890,7 @@ public:
/// NOTE: This class is not currently in use. All of the above
/// will yield a FriendDecl, not a FriendTemplateDecl.
class FriendTemplateDecl : public Decl {
+ virtual void anchor();
public:
typedef llvm::PointerUnion<NamedDecl*,TypeSourceInfo*> FriendUnion;
@@ -1957,7 +1909,7 @@ private:
FriendTemplateDecl(DeclContext *DC, SourceLocation Loc,
- unsigned NParams,
+ unsigned NParams,
TemplateParameterList **Params,
FriendUnion Friend,
SourceLocation FriendLoc)
@@ -1977,12 +1929,12 @@ private:
public:
static FriendTemplateDecl *Create(ASTContext &Context,
DeclContext *DC, SourceLocation Loc,
- unsigned NParams,
+ unsigned NParams,
TemplateParameterList **Params,
FriendUnion Friend,
SourceLocation FriendLoc);
- static FriendTemplateDecl *Create(ASTContext &Context, EmptyShell Empty);
+ static FriendTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// If this friend declaration names a templated type (or
/// a dependent member type of a templated type), return that
@@ -2023,13 +1975,10 @@ public:
/// Declaration of an alias template. For example:
///
/// template <typename T> using V = std::map<T*, int, MyCompare<T>>;
-class TypeAliasTemplateDecl : public RedeclarableTemplateDecl,
- public RedeclarableTemplate<TypeAliasTemplateDecl> {
+class TypeAliasTemplateDecl : public RedeclarableTemplateDecl {
static void DeallocateCommon(void *Ptr);
protected:
- typedef RedeclarableTemplate<TypeAliasTemplateDecl> redeclarable_base;
-
typedef CommonBase Common;
TypeAliasTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
@@ -2050,29 +1999,34 @@ public:
TypeAliasTemplateDecl *getCanonicalDecl() {
- return redeclarable_base::getCanonicalDecl();
+ return cast<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
const TypeAliasTemplateDecl *getCanonicalDecl() const {
- return redeclarable_base::getCanonicalDecl();
+ return cast<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
}
/// \brief Retrieve the previous declaration of this function template, or
/// NULL if no such declaration exists.
- TypeAliasTemplateDecl *getPreviousDeclaration() {
- return redeclarable_base::getPreviousDeclaration();
+ TypeAliasTemplateDecl *getPreviousDecl() {
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
/// \brief Retrieve the previous declaration of this function template, or
/// NULL if no such declaration exists.
- const TypeAliasTemplateDecl *getPreviousDeclaration() const {
- return redeclarable_base::getPreviousDeclaration();
+ const TypeAliasTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
}
TypeAliasTemplateDecl *getInstantiatedFromMemberTemplate() {
- return redeclarable_base::getInstantiatedFromMemberTemplate();
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
}
-
+
/// \brief Create a function template node.
static TypeAliasTemplateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
@@ -2081,7 +2035,7 @@ public:
NamedDecl *Decl);
/// \brief Create an empty alias template node.
- static TypeAliasTemplateDecl *Create(ASTContext &C, EmptyShell);
+ static TypeAliasTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
// Implement isa/cast/dyncast support
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2105,7 +2059,8 @@ public:
/// CXXMethodDecl. Then during an instantiation of class A, it will be
/// transformed into an actual function specialization.
class ClassScopeFunctionSpecializationDecl : public Decl {
-private:
+ virtual void anchor();
+
ClassScopeFunctionSpecializationDecl(DeclContext *DC, SourceLocation Loc,
CXXMethodDecl *FD)
: Decl(Decl::ClassScopeFunctionSpecialization, DC, Loc),
@@ -2126,11 +2081,9 @@ public:
return new (C) ClassScopeFunctionSpecializationDecl(DC , Loc, FD);
}
- static ClassScopeFunctionSpecializationDecl *Create(ASTContext &Context,
- EmptyShell Empty) {
- return new (Context)ClassScopeFunctionSpecializationDecl(0,
- SourceLocation(), 0);
- }
+ static ClassScopeFunctionSpecializationDecl *
+ CreateDeserialized(ASTContext &Context, unsigned ID);
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
index b5b6bd4..62654b8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
@@ -30,12 +30,12 @@ class DeclVisitor {
public:
RetTy Visit(Decl *D) {
switch (D->getKind()) {
- default: llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
#define DECL(DERIVED, BASE) \
case Decl::DERIVED: DISPATCH(DERIVED##Decl, DERIVED##Decl);
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
+ llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
}
// If the implementation chooses not to implement a certain visit
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
index 2170f2b..6349d9c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
@@ -17,6 +17,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/CanonicalType.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
template <typename T> struct DenseMapInfo;
@@ -510,8 +511,17 @@ public:
/// getEndLoc - Retrieve the location of the last token.
SourceLocation getEndLoc() const;
/// getSourceRange - The range of the declaration name.
- SourceRange getSourceRange() const {
- return SourceRange(getBeginLoc(), getEndLoc());
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceLocation BeginLoc = getBeginLoc();
+ SourceLocation EndLoc = getEndLoc();
+ return SourceRange(BeginLoc, EndLoc.isValid() ? EndLoc : BeginLoc);
+ }
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getBeginLoc();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ SourceLocation EndLoc = getEndLoc();
+ return EndLoc.isValid() ? EndLoc : getLocStart();
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h
index 2bbe502..948dcb4 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h
@@ -177,7 +177,7 @@ inline DeclContext::ddiag_iterator DeclContext::ddiag_begin() const {
assert(isDependentContext()
&& "cannot iterate dependent diagnostics of non-dependent context");
const DependentStoredDeclsMap *Map
- = static_cast<DependentStoredDeclsMap*>(getPrimaryContext()->LookupPtr);
+ = static_cast<DependentStoredDeclsMap*>(getPrimaryContext()->getLookupPtr());
if (!Map) return ddiag_iterator();
return ddiag_iterator(Map->FirstDiagnostic);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
index 1242f4e..558bd00 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -21,12 +21,13 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ASTVector.h"
#include "clang/AST/TemplateBase.h"
-#include "clang/AST/UsuallyTinyPtrVector.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include <cctype>
namespace clang {
@@ -56,8 +57,8 @@ class Expr : public Stmt {
protected:
Expr(StmtClass SC, QualType T, ExprValueKind VK, ExprObjectKind OK,
- bool TD, bool VD, bool ID, bool ContainsUnexpandedParameterPack)
- : Stmt(SC)
+ bool TD, bool VD, bool ID, bool ContainsUnexpandedParameterPack)
+ : Stmt(SC)
{
ExprBits.TypeDependent = TD;
ExprBits.ValueDependent = VD;
@@ -97,8 +98,8 @@ public:
bool isValueDependent() const { return ExprBits.ValueDependent; }
/// \brief Set whether this expression is value-dependent or not.
- void setValueDependent(bool VD) {
- ExprBits.ValueDependent = VD;
+ void setValueDependent(bool VD) {
+ ExprBits.ValueDependent = VD;
if (VD)
ExprBits.InstantiationDependent = true;
}
@@ -117,8 +118,8 @@ public:
bool isTypeDependent() const { return ExprBits.TypeDependent; }
/// \brief Set whether this expression is type-dependent or not.
- void setTypeDependent(bool TD) {
- ExprBits.TypeDependent = TD;
+ void setTypeDependent(bool TD) {
+ ExprBits.TypeDependent = TD;
if (TD)
ExprBits.InstantiationDependent = true;
}
@@ -140,12 +141,12 @@ public:
/// }
/// \endcode
///
- bool isInstantiationDependent() const {
- return ExprBits.InstantiationDependent;
+ bool isInstantiationDependent() const {
+ return ExprBits.InstantiationDependent;
}
-
+
/// \brief Set whether this expression is instantiation-dependent or not.
- void setInstantiationDependent(bool ID) {
+ void setInstantiationDependent(bool ID) {
ExprBits.InstantiationDependent = ID;
}
@@ -163,8 +164,8 @@ public:
///
/// The expressions \c args and \c static_cast<Types&&>(args) both
/// contain parameter packs.
- bool containsUnexpandedParameterPack() const {
- return ExprBits.ContainsUnexpandedParameterPack;
+ bool containsUnexpandedParameterPack() const {
+ return ExprBits.ContainsUnexpandedParameterPack;
}
/// \brief Set the bit that describes whether this expression
@@ -175,7 +176,7 @@ public:
/// getExprLoc - Return the preferred location for the arrow when diagnosing
/// a problem with a generic expression.
- SourceLocation getExprLoc() const;
+ SourceLocation getExprLoc() const LLVM_READONLY;
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in Loc and Ranges
@@ -235,7 +236,6 @@ public:
MLV_IncompleteType,
MLV_ConstQualified,
MLV_ArrayType,
- MLV_NotBlockQualified,
MLV_ReadonlyProperty,
MLV_NoSetterProperty,
MLV_MemberFunction,
@@ -271,7 +271,6 @@ public:
CM_RValue, // Not modifiable because it's an rvalue
CM_Function, // Not modifiable because it's a function; C++ only
CM_LValueCast, // Same as CM_RValue, but indicates GCC cast-as-lvalue ext
- CM_NotBlockQualified, // Not captured in the closure
CM_NoSetterProperty,// Implicit assignment to ObjC property without setter
CM_ConstQualified,
CM_ArrayType,
@@ -302,12 +301,12 @@ public:
bool isPRValue() const { return Kind >= CL_Function; }
bool isRValue() const { return Kind >= CL_XValue; }
bool isModifiable() const { return getModifiable() == CM_Modifiable; }
-
+
/// \brief Create a simple, modifiably lvalue
static Classification makeSimpleLValue() {
return Classification(CL_LValue, CM_Modifiable);
}
-
+
};
/// \brief Classify - Classify this expression according to the C++0x
/// expression taxonomy.
@@ -390,54 +389,79 @@ public:
/// \brief Returns whether this expression refers to a vector element.
bool refersToVectorElement() const;
-
+
+ /// \brief Returns whether this expression has a placeholder type.
+ bool hasPlaceholderType() const {
+ return getType()->isPlaceholderType();
+ }
+
+ /// \brief Returns whether this expression has a specific placeholder type.
+ bool hasPlaceholderType(BuiltinType::Kind K) const {
+ assert(BuiltinType::isPlaceholderTypeKind(K));
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(getType()))
+ return BT->getKind() == K;
+ return false;
+ }
+
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
/// C.
bool isKnownToHaveBooleanValue() const;
-
+
/// isIntegerConstantExpr - Return true if this expression is a valid integer
/// constant expression, and, if so, return its value in Result. If not a
/// valid i-c-e, return false and fill in Loc (if specified) with the location
/// of the invalid expression.
+ ///
+ /// Note: This does not perform the implicit conversions required by C++11
+ /// [expr.const]p5.
bool isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
SourceLocation *Loc = 0,
bool isEvaluated = true) const;
- bool isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc = 0) const {
- llvm::APSInt X;
- return isIntegerConstantExpr(X, Ctx, Loc);
- }
- /// isConstantInitializer - Returns true if this expression is a constant
- /// initializer, which can be emitted at compile-time.
- bool isConstantInitializer(ASTContext &Ctx, bool ForRef) const;
+ bool isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc = 0) const;
- /// EvalResult is a struct with detailed info about an evaluated expression.
- struct EvalResult {
- /// Val - This is the value the expression can be folded to.
- APValue Val;
+ /// isCXX98IntegralConstantExpr - Return true if this expression is an
+ /// integral constant expression in C++98. Can only be used in C++.
+ bool isCXX98IntegralConstantExpr(ASTContext &Ctx) const;
+
+ /// isCXX11ConstantExpr - Return true if this expression is a constant
+ /// expression in C++11. Can only be used in C++.
+ ///
+ /// Note: This does not perform the implicit conversions required by C++11
+ /// [expr.const]p5.
+ bool isCXX11ConstantExpr(ASTContext &Ctx, APValue *Result = 0,
+ SourceLocation *Loc = 0) const;
+
+ /// isPotentialConstantExpr - Return true if this function's definition
+ /// might be usable in a constant expression in C++11, if it were marked
+ /// constexpr. Return false if the function can never produce a constant
+ /// expression, along with diagnostics describing why not.
+ static bool isPotentialConstantExpr(const FunctionDecl *FD,
+ llvm::SmallVectorImpl<
+ PartialDiagnosticAt> &Diags);
+
+ /// isConstantInitializer - Returns true if this expression can be emitted to
+ /// IR as a constant, and thus can be used as a constant initializer in C.
+ bool isConstantInitializer(ASTContext &Ctx, bool ForRef) const;
+ /// EvalStatus is a struct with detailed info about an evaluation in progress.
+ struct EvalStatus {
/// HasSideEffects - Whether the evaluated expression has side effects.
/// For example, (f() && 0) can be folded, but it still has side effects.
bool HasSideEffects;
- /// Diag - If the expression is unfoldable, then Diag contains a note
- /// diagnostic indicating why it's not foldable. DiagLoc indicates a caret
- /// position for the error, and DiagExpr is the expression that caused
- /// the error.
- /// If the expression is foldable, but not an integer constant expression,
- /// Diag contains a note diagnostic that describes why it isn't an integer
- /// constant expression. If the expression *is* an integer constant
- /// expression, then Diag will be zero.
- unsigned Diag;
- const Expr *DiagExpr;
- SourceLocation DiagLoc;
+ /// Diag - If this is non-null, it will be filled in with a stack of notes
+ /// indicating why evaluation failed (or why it failed to produce a constant
+ /// expression).
+ /// If the expression is unfoldable, the notes will indicate why it's not
+ /// foldable. If the expression is foldable, but not a constant expression,
+ /// the notes will describes why it isn't a constant expression. If the
+ /// expression *is* a constant expression, no notes will be produced.
+ llvm::SmallVectorImpl<PartialDiagnosticAt> *Diag;
- EvalResult() : HasSideEffects(false), Diag(0), DiagExpr(0) {}
+ EvalStatus() : HasSideEffects(false), Diag(0) {}
- // isGlobalLValue - Return true if the evaluated lvalue expression
- // is global.
- bool isGlobalLValue() const;
// hasSideEffects - Return true if the evaluated expression has
// side effects.
bool hasSideEffects() const {
@@ -445,11 +469,23 @@ public:
}
};
- /// Evaluate - Return true if this is a constant which we can fold using
- /// any crazy technique (that has nothing to do with language standards) that
- /// we want to. If this function returns true, it returns the folded constant
- /// in Result.
- bool Evaluate(EvalResult &Result, const ASTContext &Ctx) const;
+ /// EvalResult is a struct with detailed info about an evaluated expression.
+ struct EvalResult : EvalStatus {
+ /// Val - This is the value the expression can be folded to.
+ APValue Val;
+
+ // isGlobalLValue - Return true if the evaluated lvalue expression
+ // is global.
+ bool isGlobalLValue() const;
+ };
+
+ /// EvaluateAsRValue - Return true if this is a constant which we can fold to
+ /// an rvalue using any crazy technique (that has nothing to do with language
+ /// standards) that we want to, even if the expression has side-effects. If
+ /// this function returns true, it returns the folded constant in Result. If
+ /// the expression is a glvalue, an lvalue-to-rvalue conversion will be
+ /// applied.
+ bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const;
/// EvaluateAsBooleanCondition - Return true if this is a constant
/// which we we can fold and convert to a boolean condition using
@@ -457,30 +493,43 @@ public:
/// side-effects.
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx) const;
+ enum SideEffectsKind { SE_NoSideEffects, SE_AllowSideEffects };
+
/// EvaluateAsInt - Return true if this is a constant which we can fold and
- /// convert to an integer using any crazy technique that we want to.
- bool EvaluateAsInt(llvm::APSInt &Result, const ASTContext &Ctx) const;
+ /// convert to an integer, using any crazy technique that we want to.
+ bool EvaluateAsInt(llvm::APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects = SE_NoSideEffects) const;
- /// isEvaluatable - Call Evaluate to see if this expression can be constant
- /// folded, but discard the result.
+ /// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+ /// constant folded without side-effects, but discard the result.
bool isEvaluatable(const ASTContext &Ctx) const;
/// HasSideEffects - This routine returns true for all those expressions
- /// which must be evaluated each time and must not be optimized away
+ /// which must be evaluated each time and must not be optimized away
/// or evaluated at compile time. Example is a function call, volatile
/// variable read.
bool HasSideEffects(const ASTContext &Ctx) const;
+
+ /// \brief Determine whether this expression involves a call to any function
+ /// that is not trivial.
+ bool hasNonTrivialCall(ASTContext &Ctx);
- /// EvaluateKnownConstInt - Call Evaluate and return the folded integer. This
- /// must be called on an expression that constant folds to an integer.
+ /// EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded
+ /// integer. This must be called on an expression that constant folds to an
+ /// integer.
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const;
- /// EvaluateAsLValue - Evaluate an expression to see if it's a lvalue
- /// with link time known address.
+ /// EvaluateAsLValue - Evaluate an expression to see if we can fold it to an
+ /// lvalue with link time known address, with no side-effects.
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const;
- /// EvaluateAsLValue - Evaluate an expression to see if it's a lvalue.
- bool EvaluateAsAnyLValue(EvalResult &Result, const ASTContext &Ctx) const;
+ /// EvaluateAsInitializer - Evaluate an expression as if it were the
+ /// initializer of the given declaration. Returns true if the initializer
+ /// can be folded to a constant, and produces any relevant notes. In C++11,
+ /// notes will be produced if the expression is not a constant expression.
+ bool EvaluateAsInitializer(APValue &Result, const ASTContext &Ctx,
+ const VarDecl *VD,
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
/// \brief Enumeration used to describe the kind of Null pointer constant
/// returned from \c isNullPointerConstant().
@@ -503,16 +552,16 @@ public:
enum NullPointerConstantValueDependence {
/// \brief Specifies that the expression should never be value-dependent.
NPC_NeverValueDependent = 0,
-
+
/// \brief Specifies that a value-dependent expression of integral or
/// dependent type should be considered a null pointer constant.
NPC_ValueDependentIsNull,
-
+
/// \brief Specifies that a value-dependent expression should be considered
/// to never be a null pointer constant.
NPC_ValueDependentIsNotNull
};
-
+
/// isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to
/// a Null pointer constant. The return value can further distinguish the
/// kind of NULL pointer constant that was detected.
@@ -544,60 +593,62 @@ public:
/// IgnoreImpCasts - Skip past any implicit casts which might
/// surround this expression. Only skips ImplicitCastExprs.
- Expr *IgnoreImpCasts();
+ Expr *IgnoreImpCasts() LLVM_READONLY;
/// IgnoreImplicit - Skip past any implicit AST nodes which might
/// surround this expression.
- Expr *IgnoreImplicit() { return cast<Expr>(Stmt::IgnoreImplicit()); }
+ Expr *IgnoreImplicit() LLVM_READONLY {
+ return cast<Expr>(Stmt::IgnoreImplicit());
+ }
/// IgnoreParens - Ignore parentheses. If this Expr is a ParenExpr, return
/// its subexpression. If that subexpression is also a ParenExpr,
/// then this method recursively returns its subexpression, and so forth.
/// Otherwise, the method returns the current Expr.
- Expr *IgnoreParens();
+ Expr *IgnoreParens() LLVM_READONLY;
/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
/// or CastExprs, returning their operand.
- Expr *IgnoreParenCasts();
+ Expr *IgnoreParenCasts() LLVM_READONLY;
- /// IgnoreParenImpCasts - Ignore parentheses and implicit casts. Strip off any
- /// ParenExpr or ImplicitCastExprs, returning their operand.
- Expr *IgnoreParenImpCasts();
+ /// IgnoreParenImpCasts - Ignore parentheses and implicit casts. Strip off
+ /// any ParenExpr or ImplicitCastExprs, returning their operand.
+ Expr *IgnoreParenImpCasts() LLVM_READONLY;
/// IgnoreConversionOperator - Ignore conversion operator. If this Expr is a
/// call to a conversion operator, return the argument.
- Expr *IgnoreConversionOperator();
+ Expr *IgnoreConversionOperator() LLVM_READONLY;
- const Expr *IgnoreConversionOperator() const {
+ const Expr *IgnoreConversionOperator() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreConversionOperator();
}
- const Expr *IgnoreParenImpCasts() const {
+ const Expr *IgnoreParenImpCasts() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreParenImpCasts();
}
-
+
/// Ignore parentheses and lvalue casts. Strip off any ParenExpr and
/// CastExprs that represent lvalue casts, returning their operand.
- Expr *IgnoreParenLValueCasts();
-
- const Expr *IgnoreParenLValueCasts() const {
+ Expr *IgnoreParenLValueCasts() LLVM_READONLY;
+
+ const Expr *IgnoreParenLValueCasts() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreParenLValueCasts();
}
/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
/// value (including ptr->int casts of the same size). Strip off any
/// ParenExpr or CastExprs, returning their operand.
- Expr *IgnoreParenNoopCasts(ASTContext &Ctx);
+ Expr *IgnoreParenNoopCasts(ASTContext &Ctx) LLVM_READONLY;
/// \brief Determine whether this expression is a default function argument.
///
/// Default arguments are implicitly generated in the abstract syntax tree
- /// by semantic analysis for function calls, object constructions, etc. in
+ /// by semantic analysis for function calls, object constructions, etc. in
/// C++. Default arguments are represented by \c CXXDefaultArgExpr nodes;
/// this routine also looks through any implicit casts to determine whether
/// the expression is a default argument.
bool isDefaultArgument() const;
-
+
/// \brief Determine whether the result of this expression is a
/// temporary object of the given class type.
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const;
@@ -605,21 +656,20 @@ public:
/// \brief Whether this expression is an implicit reference to 'this' in C++.
bool isImplicitCXXThis() const;
- const Expr *IgnoreImpCasts() const {
+ const Expr *IgnoreImpCasts() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreImpCasts();
}
- const Expr *IgnoreParens() const {
+ const Expr *IgnoreParens() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreParens();
}
- const Expr *IgnoreParenCasts() const {
+ const Expr *IgnoreParenCasts() const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreParenCasts();
}
- const Expr *IgnoreParenNoopCasts(ASTContext &Ctx) const {
+ const Expr *IgnoreParenNoopCasts(ASTContext &Ctx) const LLVM_READONLY {
return const_cast<Expr*>(this)->IgnoreParenNoopCasts(Ctx);
}
- static bool hasAnyTypeDependentArguments(Expr** Exprs, unsigned NumExprs);
- static bool hasAnyValueDependentArguments(Expr** Exprs, unsigned NumExprs);
+ static bool hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs);
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstExprConstant &&
@@ -642,15 +692,18 @@ class OpaqueValueExpr : public Expr {
friend class ASTStmtReader;
Expr *SourceExpr;
SourceLocation Loc;
-
+
public:
- OpaqueValueExpr(SourceLocation Loc, QualType T, ExprValueKind VK,
- ExprObjectKind OK = OK_Ordinary)
+ OpaqueValueExpr(SourceLocation Loc, QualType T, ExprValueKind VK,
+ ExprObjectKind OK = OK_Ordinary,
+ Expr *SourceExpr = 0)
: Expr(OpaqueValueExprClass, T, VK, OK,
- T->isDependentType(), T->isDependentType(),
+ T->isDependentType(),
+ T->isDependentType() ||
+ (SourceExpr && SourceExpr->isValueDependent()),
T->isInstantiationDependentType(),
- false),
- SourceExpr(0), Loc(Loc) {
+ false),
+ SourceExpr(SourceExpr), Loc(Loc) {
}
/// Given an expression which invokes a copy constructor --- i.e. a
@@ -663,12 +716,12 @@ public:
/// \brief Retrieve the location of this expression.
SourceLocation getLocation() const { return Loc; }
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
if (SourceExpr) return SourceExpr->getSourceRange();
return Loc;
}
- SourceLocation getExprLoc() const {
+ SourceLocation getExprLoc() const LLVM_READONLY {
if (SourceExpr) return SourceExpr->getExprLoc();
return Loc;
}
@@ -684,7 +737,6 @@ public:
/// expression which binds the opaque value expression in the first
/// place.
Expr *getSourceExpr() const { return SourceExpr; }
- void setSourceExpr(Expr *e) { SourceExpr = e; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OpaqueValueExprClass;
@@ -709,9 +761,12 @@ public:
/// Specifies when this declaration reference expression has a record of
/// a NamedDecl (different from the referenced ValueDecl) which was found
/// during name lookup and/or overload resolution.
-/// DeclRefExprBits.HasExplicitTemplateArgs:
+/// DeclRefExprBits.HasTemplateKWAndArgsInfo:
/// Specifies when this declaration reference expression has an explicit
-/// C++ template argument list.
+/// C++ template keyword and/or template argument list.
+/// DeclRefExprBits.RefersToEnclosingLocal
+/// Specifies when this declaration reference expression (validly)
+/// refers to a local variable from a different function.
class DeclRefExpr : public Expr {
/// \brief The declaration that we are referencing.
ValueDecl *D;
@@ -753,8 +808,11 @@ class DeclRefExpr : public Expr {
return const_cast<DeclRefExpr *>(this)->getInternalFoundDecl();
}
- DeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
- ValueDecl *D, const DeclarationNameInfo &NameInfo,
+ DeclRefExpr(ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool refersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK);
@@ -765,23 +823,27 @@ class DeclRefExpr : public Expr {
/// \brief Computes the type- and value-dependence flags for this
/// declaration reference expression.
- void computeDependence();
+ void computeDependence(ASTContext &C);
public:
- DeclRefExpr(ValueDecl *D, QualType T, ExprValueKind VK, SourceLocation L,
+ DeclRefExpr(ValueDecl *D, bool refersToEnclosingLocal, QualType T,
+ ExprValueKind VK, SourceLocation L,
const DeclarationNameLoc &LocInfo = DeclarationNameLoc())
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), Loc(L), DNLoc(LocInfo) {
DeclRefExprBits.HasQualifier = 0;
- DeclRefExprBits.HasExplicitTemplateArgs = 0;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo = 0;
DeclRefExprBits.HasFoundDecl = 0;
DeclRefExprBits.HadMultipleCandidates = 0;
- computeDependence();
+ DeclRefExprBits.RefersToEnclosingLocal = refersToEnclosingLocal;
+ computeDependence(D->getASTContext());
}
static DeclRefExpr *Create(ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *D,
+ bool isEnclosingLocal,
SourceLocation NameLoc,
QualType T, ExprValueKind VK,
NamedDecl *FoundD = 0,
@@ -789,7 +851,9 @@ public:
static DeclRefExpr *Create(ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *D,
+ bool isEnclosingLocal,
const DeclarationNameInfo &NameInfo,
QualType T, ExprValueKind VK,
NamedDecl *FoundD = 0,
@@ -799,7 +863,7 @@ public:
static DeclRefExpr *CreateEmpty(ASTContext &Context,
bool HasQualifier,
bool HasFoundDecl,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs);
ValueDecl *getDecl() { return D; }
@@ -812,7 +876,9 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
/// \brief Determine whether this declaration reference was preceded by a
/// C++ nested-name-specifier, e.g., \c N::foo.
@@ -851,25 +917,65 @@ public:
return hasFoundDecl() ? getInternalFoundDecl() : D;
}
- /// \brief Determines whether this declaration reference was followed by an
- /// explict template argument list.
- bool hasExplicitTemplateArgs() const {
- return DeclRefExprBits.HasExplicitTemplateArgs;
+ bool hasTemplateKWAndArgsInfo() const {
+ return DeclRefExprBits.HasTemplateKWAndArgsInfo;
}
- /// \brief Retrieve the explicit template argument list that followed the
- /// member template name.
- ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
- assert(hasExplicitTemplateArgs());
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!hasTemplateKWAndArgsInfo())
+ return 0;
+
if (hasFoundDecl())
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
&getInternalFoundDecl() + 1);
if (hasQualifier())
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
&getInternalQualifierLoc() + 1);
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(this + 1);
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(this + 1);
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<DeclRefExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// \brief Determines whether the name in this declaration reference
+ /// was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// \brief Determines whether this declaration reference was followed by an
+ /// explicit template argument list.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name.
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
}
/// \brief Retrieve the explicit template argument list that followed the
@@ -881,7 +987,7 @@ public:
/// \brief Retrieves the optional explicit template arguments.
/// This points to the same data as getExplicitTemplateArgs(), but
/// returns null if there are no explicit template arguments.
- const ASTTemplateArgumentListInfo *getExplicitTemplateArgsOpt() const {
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() const {
if (!hasExplicitTemplateArgs()) return 0;
return &getExplicitTemplateArgs();
}
@@ -893,15 +999,6 @@ public:
getExplicitTemplateArgs().copyInto(List);
}
- /// \brief Retrieve the location of the left angle bracket following the
- /// member name ('<'), if any.
- SourceLocation getLAngleLoc() const {
- if (!hasExplicitTemplateArgs())
- return SourceLocation();
-
- return getExplicitTemplateArgs().LAngleLoc;
- }
-
/// \brief Retrieve the template arguments provided as part of this
/// template-id.
const TemplateArgumentLoc *getTemplateArgs() const {
@@ -920,15 +1017,6 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- /// \brief Retrieve the location of the right angle bracket following the
- /// template arguments ('>').
- SourceLocation getRAngleLoc() const {
- if (!hasExplicitTemplateArgs())
- return SourceLocation();
-
- return getExplicitTemplateArgs().RAngleLoc;
- }
-
/// \brief Returns true if this expression refers to a function that
/// was resolved from an overloaded set having size greater than 1.
bool hadMultipleCandidates() const {
@@ -941,6 +1029,12 @@ public:
DeclRefExprBits.HadMultipleCandidates = V;
}
+ /// Does this DeclRefExpr refer to a local declaration from an
+ /// enclosing function scope?
+ bool refersToEnclosingLocal() const {
+ return DeclRefExprBits.RefersToEnclosingLocal;
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclRefExprClass;
}
@@ -971,7 +1065,7 @@ private:
public:
PredefinedExpr(SourceLocation l, QualType type, IdentType IT)
: Expr(PredefinedExprClass, type, VK_LValue, OK_Ordinary,
- type->isDependentType(), type->isDependentType(),
+ type->isDependentType(), type->isDependentType(),
type->isInstantiationDependentType(),
/*ContainsUnexpandedParameterPack=*/false),
Loc(l), Type(IT) {}
@@ -988,7 +1082,7 @@ public:
static std::string ComputeName(IdentType IT, const Decl *CurrentDecl);
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == PredefinedExprClass;
@@ -1008,11 +1102,11 @@ public:
/// the APFloat/APInt values will never get freed. APNumericStorage uses
/// ASTContext's allocator for memory allocation.
class APNumericStorage {
- unsigned BitWidth;
union {
uint64_t VAL; ///< Used to store the <= 64 bits integer value.
uint64_t *pVal; ///< Used to store the >64 bits integer value.
};
+ unsigned BitWidth;
bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
@@ -1020,7 +1114,7 @@ class APNumericStorage {
APNumericStorage& operator=(const APNumericStorage&); // do not implement
protected:
- APNumericStorage() : BitWidth(0), VAL(0) { }
+ APNumericStorage() : VAL(0), BitWidth(0) { }
llvm::APInt getIntValue() const {
unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
@@ -1032,22 +1126,23 @@ protected:
void setIntValue(ASTContext &C, const llvm::APInt &Val);
};
-class APIntStorage : public APNumericStorage {
-public:
- llvm::APInt getValue() const { return getIntValue(); }
+class APIntStorage : private APNumericStorage {
+public:
+ llvm::APInt getValue() const { return getIntValue(); }
void setValue(ASTContext &C, const llvm::APInt &Val) { setIntValue(C, Val); }
};
-class APFloatStorage : public APNumericStorage {
-public:
- llvm::APFloat getValue() const { return llvm::APFloat(getIntValue()); }
+class APFloatStorage : private APNumericStorage {
+public:
+ llvm::APFloat getValue(bool IsIEEE) const {
+ return llvm::APFloat(getIntValue(), IsIEEE);
+ }
void setValue(ASTContext &C, const llvm::APFloat &Val) {
setIntValue(C, Val.bitcastToAPInt());
}
};
-class IntegerLiteral : public Expr {
- APIntStorage Num;
+class IntegerLiteral : public Expr, public APIntStorage {
SourceLocation Loc;
/// \brief Construct an empty integer literal.
@@ -1077,13 +1172,11 @@ public:
/// \brief Returns a new empty integer literal.
static IntegerLiteral *Create(ASTContext &C, EmptyShell Empty);
- llvm::APInt getValue() const { return Num.getValue(); }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
/// \brief Retrieve the location of the literal.
SourceLocation getLocation() const { return Loc; }
- void setValue(ASTContext &C, const llvm::APInt &Val) { Num.setValue(C, Val); }
void setLocation(SourceLocation Location) { Loc = Location; }
static bool classof(const Stmt *T) {
@@ -1107,28 +1200,30 @@ public:
private:
unsigned Value;
SourceLocation Loc;
- unsigned Kind : 2;
public:
// type should be IntTy
CharacterLiteral(unsigned value, CharacterKind kind, QualType type,
SourceLocation l)
: Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
false, false),
- Value(value), Loc(l), Kind(kind) {
+ Value(value), Loc(l) {
+ CharacterLiteralBits.Kind = kind;
}
/// \brief Construct an empty character literal.
CharacterLiteral(EmptyShell Empty) : Expr(CharacterLiteralClass, Empty) { }
SourceLocation getLocation() const { return Loc; }
- CharacterKind getKind() const { return static_cast<CharacterKind>(Kind); }
+ CharacterKind getKind() const {
+ return static_cast<CharacterKind>(CharacterLiteralBits.Kind);
+ }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
unsigned getValue() const { return Value; }
void setLocation(SourceLocation Location) { Loc = Location; }
- void setKind(CharacterKind kind) { Kind = kind; }
+ void setKind(CharacterKind kind) { CharacterLiteralBits.Kind = kind; }
void setValue(unsigned Val) { Value = Val; }
static bool classof(const Stmt *T) {
@@ -1140,35 +1235,41 @@ public:
child_range children() { return child_range(); }
};
-class FloatingLiteral : public Expr {
- APFloatStorage Num;
- bool IsExact : 1;
+class FloatingLiteral : public Expr, private APFloatStorage {
SourceLocation Loc;
FloatingLiteral(ASTContext &C, const llvm::APFloat &V, bool isexact,
QualType Type, SourceLocation L)
: Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
- false, false),
- IsExact(isexact), Loc(L) {
+ false, false), Loc(L) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = isexact;
setValue(C, V);
}
/// \brief Construct an empty floating-point literal.
- explicit FloatingLiteral(EmptyShell Empty)
- : Expr(FloatingLiteralClass, Empty), IsExact(false) { }
+ explicit FloatingLiteral(ASTContext &C, EmptyShell Empty)
+ : Expr(FloatingLiteralClass, Empty) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = false;
+ }
public:
static FloatingLiteral *Create(ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L);
static FloatingLiteral *Create(ASTContext &C, EmptyShell Empty);
- llvm::APFloat getValue() const { return Num.getValue(); }
+ llvm::APFloat getValue() const {
+ return APFloatStorage::getValue(FloatingLiteralBits.IsIEEE);
+ }
void setValue(ASTContext &C, const llvm::APFloat &Val) {
- Num.setValue(C, Val);
+ APFloatStorage::setValue(C, Val);
}
- bool isExact() const { return IsExact; }
- void setExact(bool E) { IsExact = E; }
+ bool isExact() const { return FloatingLiteralBits.IsExact; }
+ void setExact(bool E) { FloatingLiteralBits.IsExact = E; }
/// getValueAsApproximateDouble - This returns the value as an inaccurate
/// double. Note that this may cause loss of precision, but is useful for
@@ -1178,7 +1279,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == FloatingLiteralClass;
@@ -1210,7 +1311,7 @@ public:
Expr *getSubExpr() { return cast<Expr>(Val); }
void setSubExpr(Expr *E) { Val = E; }
- SourceRange getSourceRange() const { return Val->getSourceRange(); }
+ SourceRange getSourceRange() const LLVM_READONLY { return Val->getSourceRange(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImaginaryLiteralClass;
}
@@ -1249,17 +1350,24 @@ public:
private:
friend class ASTStmtReader;
- const char *StrData;
- unsigned ByteLength;
- unsigned NumConcatenated;
+ union {
+ const char *asChar;
+ const uint16_t *asUInt16;
+ const uint32_t *asUInt32;
+ } StrData;
+ unsigned Length;
+ unsigned CharByteWidth : 4;
unsigned Kind : 3;
- bool IsPascal : 1;
+ unsigned IsPascal : 1;
+ unsigned NumConcatenated;
SourceLocation TokLocs[1];
StringLiteral(QualType Ty) :
Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
false) {}
+ static int mapCharByteWidth(TargetInfo const &target,StringKind k);
+
public:
/// This is the "fully general" constructor that allows representation of
/// strings formed from multiple concatenated tokens.
@@ -1278,15 +1386,46 @@ public:
static StringLiteral *CreateEmpty(ASTContext &C, unsigned NumStrs);
StringRef getString() const {
- return StringRef(StrData, ByteLength);
- }
-
- unsigned getByteLength() const { return ByteLength; }
+ assert(CharByteWidth==1
+ && "This function is used in places that assume strings use char");
+ return StringRef(StrData.asChar, getByteLength());
+ }
+
+ /// Allow clients that need the byte representation, such as ASTWriterStmt
+ /// ::VisitStringLiteral(), access.
+ StringRef getBytes() const {
+ // FIXME: StringRef may not be the right type to use as a result for this.
+ if (CharByteWidth == 1)
+ return StringRef(StrData.asChar, getByteLength());
+ if (CharByteWidth == 4)
+ return StringRef(reinterpret_cast<const char*>(StrData.asUInt32),
+ getByteLength());
+ assert(CharByteWidth == 2 && "unsupported CharByteWidth");
+ return StringRef(reinterpret_cast<const char*>(StrData.asUInt16),
+ getByteLength());
+ }
+
+ uint32_t getCodeUnit(size_t i) const {
+ assert(i < Length && "out of bounds access");
+ if (CharByteWidth == 1)
+ return static_cast<unsigned char>(StrData.asChar[i]);
+ if (CharByteWidth == 4)
+ return StrData.asUInt32[i];
+ assert(CharByteWidth == 2 && "unsupported CharByteWidth");
+ return StrData.asUInt16[i];
+ }
+
+ unsigned getByteLength() const { return CharByteWidth*Length; }
+ unsigned getLength() const { return Length; }
+ unsigned getCharByteWidth() const { return CharByteWidth; }
/// \brief Sets the string data to the given string data.
- void setString(ASTContext &C, StringRef Str);
+ void setString(ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal);
StringKind getKind() const { return static_cast<StringKind>(Kind); }
+
+
bool isAscii() const { return Kind == Ascii; }
bool isWide() const { return Kind == Wide; }
bool isUTF8() const { return Kind == UTF8; }
@@ -1301,6 +1440,7 @@ public:
return true;
return false;
}
+
/// getNumConcatenated - Get the number of string literal tokens that were
/// concatenated in translation phase #6 to form this string literal.
unsigned getNumConcatenated() const { return NumConcatenated; }
@@ -1313,7 +1453,7 @@ public:
assert(TokNum < NumConcatenated && "Invalid tok number");
TokLocs[TokNum] = L;
}
-
+
/// getLocationOfByte - Return a source location that points to the specified
/// byte of this string literal.
///
@@ -1329,7 +1469,7 @@ public:
tokloc_iterator tokloc_begin() const { return TokLocs; }
tokloc_iterator tokloc_end() const { return TokLocs+NumConcatenated; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(TokLocs[0], TokLocs[NumConcatenated-1]);
}
static bool classof(const Stmt *T) {
@@ -1363,7 +1503,7 @@ public:
Expr *getSubExpr() { return cast<Expr>(Val); }
void setSubExpr(Expr *E) { Val = E; }
- SourceRange getSourceRange() const { return SourceRange(L, R); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(L, R); }
/// \brief Get the location of the left parentheses '('.
SourceLocation getLParen() const { return L; }
@@ -1408,7 +1548,7 @@ public:
: Expr(UnaryOperatorClass, type, VK, OK,
input->isTypeDependent() || type->isDependentType(),
input->isValueDependent(),
- (input->isInstantiationDependent() ||
+ (input->isInstantiationDependent() ||
type->isInstantiationDependentType()),
input->containsUnexpandedParameterPack()),
Opc(opc), Loc(l), Val(input) {}
@@ -1439,12 +1579,26 @@ public:
bool isPrefix() const { return isPrefix(getOpcode()); }
bool isPostfix() const { return isPostfix(getOpcode()); }
+
+ static bool isIncrementOp(Opcode Op) {
+ return Op == UO_PreInc || Op == UO_PostInc;
+ }
bool isIncrementOp() const {
- return Opc == UO_PreInc || Opc == UO_PostInc;
+ return isIncrementOp(getOpcode());
+ }
+
+ static bool isDecrementOp(Opcode Op) {
+ return Op == UO_PreDec || Op == UO_PostDec;
+ }
+ bool isDecrementOp() const {
+ return isDecrementOp(getOpcode());
}
+
+ static bool isIncrementDecrementOp(Opcode Op) { return Op <= UO_PreDec; }
bool isIncrementDecrementOp() const {
- return Opc <= UO_PreDec;
+ return isIncrementDecrementOp(getOpcode());
}
+
static bool isArithmeticOp(Opcode Op) {
return Op >= UO_Plus && Op <= UO_LNot;
}
@@ -1462,13 +1616,13 @@ public:
/// the given unary opcode.
static OverloadedOperatorKind getOverloadedOperator(Opcode Opc);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
if (isPostfix())
return SourceRange(Val->getLocStart(), Loc);
else
return SourceRange(Loc, Val->getLocEnd());
}
- SourceLocation getExprLoc() const { return Loc; }
+ SourceLocation getExprLoc() const LLVM_READONLY { return Loc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == UnaryOperatorClass;
@@ -1484,14 +1638,14 @@ public:
/// @code
/// struct S {
/// float f;
-/// double d;
+/// double d;
/// };
/// struct T {
/// int i;
/// struct S s[10];
/// };
/// @endcode
-/// we can represent and evaluate the expression @c offsetof(struct T, s[2].d).
+/// we can represent and evaluate the expression @c offsetof(struct T, s[2].d).
class OffsetOfExpr : public Expr {
public:
@@ -1513,48 +1667,48 @@ public:
private:
enum { MaskBits = 2, Mask = 0x03 };
-
+
/// \brief The source range that covers this part of the designator.
SourceRange Range;
-
+
/// \brief The data describing the designator, which comes in three
/// different forms, depending on the lower two bits.
- /// - An unsigned index into the array of Expr*'s stored after this node
+ /// - An unsigned index into the array of Expr*'s stored after this node
/// in memory, for [constant-expression] designators.
/// - A FieldDecl*, for references to a known field.
/// - An IdentifierInfo*, for references to a field with a given name
/// when the class type is dependent.
- /// - A CXXBaseSpecifier*, for references that look at a field in a
+ /// - A CXXBaseSpecifier*, for references that look at a field in a
/// base class.
uintptr_t Data;
-
+
public:
/// \brief Create an offsetof node that refers to an array element.
- OffsetOfNode(SourceLocation LBracketLoc, unsigned Index,
+ OffsetOfNode(SourceLocation LBracketLoc, unsigned Index,
SourceLocation RBracketLoc)
: Range(LBracketLoc, RBracketLoc), Data((Index << 2) | Array) { }
-
+
/// \brief Create an offsetof node that refers to a field.
- OffsetOfNode(SourceLocation DotLoc, FieldDecl *Field,
+ OffsetOfNode(SourceLocation DotLoc, FieldDecl *Field,
SourceLocation NameLoc)
- : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
+ : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
Data(reinterpret_cast<uintptr_t>(Field) | OffsetOfNode::Field) { }
-
+
/// \brief Create an offsetof node that refers to an identifier.
OffsetOfNode(SourceLocation DotLoc, IdentifierInfo *Name,
SourceLocation NameLoc)
- : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
+ : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
Data(reinterpret_cast<uintptr_t>(Name) | Identifier) { }
/// \brief Create an offsetof node that refers into a C++ base class.
explicit OffsetOfNode(const CXXBaseSpecifier *Base)
: Range(), Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
-
+
/// \brief Determine what kind of offsetof node this is.
- Kind getKind() const {
+ Kind getKind() const {
return static_cast<Kind>(Data & Mask);
}
-
+
/// \brief For an array element node, returns the index into the array
/// of expressions.
unsigned getArrayExprIndex() const {
@@ -1567,28 +1721,28 @@ public:
assert(getKind() == Field);
return reinterpret_cast<FieldDecl *>(Data & ~(uintptr_t)Mask);
}
-
+
/// \brief For a field or identifier offsetof node, returns the name of
/// the field.
IdentifierInfo *getFieldName() const;
-
+
/// \brief For a base class node, returns the base specifier.
CXXBaseSpecifier *getBase() const {
assert(getKind() == Base);
- return reinterpret_cast<CXXBaseSpecifier *>(Data & ~(uintptr_t)Mask);
+ return reinterpret_cast<CXXBaseSpecifier *>(Data & ~(uintptr_t)Mask);
}
-
+
/// \brief Retrieve the source range that covers this offsetof node.
///
/// For an array element node, the source range contains the locations of
/// the square brackets. For a field or identifier node, the source range
- /// contains the location of the period (if there is one) and the
+ /// contains the location of the period (if there is one) and the
/// identifier.
- SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
};
private:
-
+
SourceLocation OperatorLoc, RParenLoc;
// Base type;
TypeSourceInfo *TSInfo;
@@ -1596,26 +1750,26 @@ private:
unsigned NumComps;
// Number of sub-expressions (i.e. array subscript expressions).
unsigned NumExprs;
-
- OffsetOfExpr(ASTContext &C, QualType type,
+
+ OffsetOfExpr(ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
+ OffsetOfNode* compsPtr, unsigned numComps,
Expr** exprsPtr, unsigned numExprs,
SourceLocation RParenLoc);
explicit OffsetOfExpr(unsigned numComps, unsigned numExprs)
: Expr(OffsetOfExprClass, EmptyShell()),
- TSInfo(0), NumComps(numComps), NumExprs(numExprs) {}
+ TSInfo(0), NumComps(numComps), NumExprs(numExprs) {}
public:
-
- static OffsetOfExpr *Create(ASTContext &C, QualType type,
- SourceLocation OperatorLoc, TypeSourceInfo *tsi,
- OffsetOfNode* compsPtr, unsigned numComps,
+
+ static OffsetOfExpr *Create(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
Expr** exprsPtr, unsigned numExprs,
SourceLocation RParenLoc);
- static OffsetOfExpr *CreateEmpty(ASTContext &C,
+ static OffsetOfExpr *CreateEmpty(ASTContext &C,
unsigned NumComps, unsigned NumExprs);
/// getOperatorLoc - Return the location of the operator.
@@ -1625,14 +1779,14 @@ public:
/// \brief Return the location of the right parentheses.
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation R) { RParenLoc = R; }
-
+
TypeSourceInfo *getTypeSourceInfo() const {
return TSInfo;
}
void setTypeSourceInfo(TypeSourceInfo *tsi) {
TSInfo = tsi;
}
-
+
const OffsetOfNode &getComponent(unsigned Idx) const {
assert(Idx < NumComps && "Subscript out of range");
return reinterpret_cast<const OffsetOfNode *> (this + 1)[Idx];
@@ -1642,7 +1796,7 @@ public:
assert(Idx < NumComps && "Subscript out of range");
reinterpret_cast<OffsetOfNode *> (this + 1)[Idx] = ON;
}
-
+
unsigned getNumComponents() const {
return NumComps;
}
@@ -1661,12 +1815,12 @@ public:
reinterpret_cast<Expr **>(
reinterpret_cast<OffsetOfNode *>(this+1) + NumComps)[Idx] = E;
}
-
+
unsigned getNumExpressions() const {
return NumExprs;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(OperatorLoc, RParenLoc);
}
@@ -1689,8 +1843,6 @@ public:
/// expression operand. Used for sizeof/alignof (C99 6.5.3.4) and
/// vec_step (OpenCL 1.1 6.11.12).
class UnaryExprOrTypeTraitExpr : public Expr {
- unsigned Kind : 2;
- bool isType : 1; // true if operand is a type, false if an expression
union {
TypeSourceInfo *Ty;
Stmt *Ex;
@@ -1707,7 +1859,9 @@ public:
TInfo->getType()->isDependentType(),
TInfo->getType()->isInstantiationDependentType(),
TInfo->getType()->containsUnexpandedParameterPack()),
- Kind(ExprKind), isType(true), OpLoc(op), RParenLoc(rp) {
+ OpLoc(op), RParenLoc(rp) {
+ UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ UnaryExprOrTypeTraitExprBits.IsType = true;
Argument.Ty = TInfo;
}
@@ -1720,7 +1874,9 @@ public:
E->isTypeDependent(),
E->isInstantiationDependent(),
E->containsUnexpandedParameterPack()),
- Kind(ExprKind), isType(false), OpLoc(op), RParenLoc(rp) {
+ OpLoc(op), RParenLoc(rp) {
+ UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ UnaryExprOrTypeTraitExprBits.IsType = false;
Argument.Ex = E;
}
@@ -1729,11 +1885,11 @@ public:
: Expr(UnaryExprOrTypeTraitExprClass, Empty) { }
UnaryExprOrTypeTrait getKind() const {
- return static_cast<UnaryExprOrTypeTrait>(Kind);
+ return static_cast<UnaryExprOrTypeTrait>(UnaryExprOrTypeTraitExprBits.Kind);
}
- void setKind(UnaryExprOrTypeTrait K) { Kind = K; }
+ void setKind(UnaryExprOrTypeTrait K) { UnaryExprOrTypeTraitExprBits.Kind = K;}
- bool isArgumentType() const { return isType; }
+ bool isArgumentType() const { return UnaryExprOrTypeTraitExprBits.IsType; }
QualType getArgumentType() const {
return getArgumentTypeInfo()->getType();
}
@@ -1749,10 +1905,13 @@ public:
return const_cast<UnaryExprOrTypeTraitExpr*>(this)->getArgumentExpr();
}
- void setArgument(Expr *E) { Argument.Ex = E; isType = false; }
+ void setArgument(Expr *E) {
+ Argument.Ex = E;
+ UnaryExprOrTypeTraitExprBits.IsType = false;
+ }
void setArgument(TypeSourceInfo *TInfo) {
Argument.Ty = TInfo;
- isType = true;
+ UnaryExprOrTypeTraitExprBits.IsType = true;
}
/// Gets the argument type, or the type of the argument expression, whichever
@@ -1767,7 +1926,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(OpLoc, RParenLoc);
}
@@ -1842,14 +2001,14 @@ public:
return cast<Expr>(getRHS()->getType()->isIntegerType() ? getRHS():getLHS());
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLHS()->getLocStart(), RBracketLoc);
}
SourceLocation getRBracketLoc() const { return RBracketLoc; }
void setRBracketLoc(SourceLocation L) { RBracketLoc = L; }
- SourceLocation getExprLoc() const { return getBase()->getExprLoc(); }
+ SourceLocation getExprLoc() const LLVM_READONLY { return getBase()->getExprLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ArraySubscriptExprClass;
@@ -1927,7 +2086,10 @@ public:
Expr **getArgs() {
return reinterpret_cast<Expr **>(SubExprs+getNumPreArgs()+PREARGS_START);
}
-
+ const Expr *const *getArgs() const {
+ return const_cast<CallExpr*>(this)->getArgs();
+ }
+
/// getArg - Return the specified argument.
Expr *getArg(unsigned Arg) {
assert(Arg < NumArgs && "Arg access out of range!");
@@ -1969,7 +2131,7 @@ public:
/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
- unsigned isBuiltinCall(const ASTContext &Context) const;
+ unsigned isBuiltinCall() const;
/// getCallReturnType - Get the return type of the call expr. This is not
/// always the type of the expr itself, if the return type is a reference
@@ -1979,7 +2141,9 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstCallExprConstant &&
@@ -2016,13 +2180,13 @@ class MemberExpr : public Expr {
/// In X.F, this is the decl referenced by F.
ValueDecl *MemberDecl;
- /// MemberLoc - This is the location of the member name.
- SourceLocation MemberLoc;
-
/// MemberDNLoc - Provides source/type location info for the
/// declaration name embedded in MemberDecl.
DeclarationNameLoc MemberDNLoc;
+ /// MemberLoc - This is the location of the member name.
+ SourceLocation MemberLoc;
+
/// IsArrow - True if this is "X->F", false if this is "X.F".
bool IsArrow : 1;
@@ -2032,12 +2196,14 @@ class MemberExpr : public Expr {
/// structure is allocated immediately after the MemberExpr.
bool HasQualifierOrFoundDecl : 1;
- /// \brief True if this member expression specified a template argument list
- /// explicitly, e.g., x->f<int>. When true, an ExplicitTemplateArgumentList
- /// structure (and its TemplateArguments) are allocated immediately after
- /// the MemberExpr or, if the member expression also has a qualifier, after
- /// the MemberNameQualifier structure.
- bool HasExplicitTemplateArgumentList : 1;
+ /// \brief True if this member expression specified a template keyword
+ /// and/or a template argument list explicitly, e.g., x->f<int>,
+ /// x->template f, x->template f<int>.
+ /// When true, an ASTTemplateKWAndArgsInfo structure and its
+ /// TemplateArguments (if any) are allocated immediately after
+ /// the MemberExpr or, if the member expression also has a qualifier,
+ /// after the MemberNameQualifier structure.
+ bool HasTemplateKWAndArgsInfo : 1;
/// \brief True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
@@ -2059,13 +2225,13 @@ public:
const DeclarationNameInfo &NameInfo, QualType ty,
ExprValueKind VK, ExprObjectKind OK)
: Expr(MemberExprClass, ty, VK, OK,
- base->isTypeDependent(),
+ base->isTypeDependent(),
base->isValueDependent(),
base->isInstantiationDependent(),
base->containsUnexpandedParameterPack()),
- Base(base), MemberDecl(memberdecl), MemberLoc(NameInfo.getLoc()),
- MemberDNLoc(NameInfo.getInfo()), IsArrow(isarrow),
- HasQualifierOrFoundDecl(false), HasExplicitTemplateArgumentList(false),
+ Base(base), MemberDecl(memberdecl), MemberDNLoc(NameInfo.getInfo()),
+ MemberLoc(NameInfo.getLoc()), IsArrow(isarrow),
+ HasQualifierOrFoundDecl(false), HasTemplateKWAndArgsInfo(false),
HadMultipleCandidates(false) {
assert(memberdecl->getDeclName() == NameInfo.getName());
}
@@ -2073,7 +2239,7 @@ public:
// NOTE: this constructor should be used only when it is known that
// the member name can not provide additional syntactic info
// (i.e., source locations for C++ operator names or type source info
- // for constructors, destructors and conversion oeprators).
+ // for constructors, destructors and conversion operators).
MemberExpr(Expr *base, bool isarrow, ValueDecl *memberdecl,
SourceLocation l, QualType ty,
ExprValueKind VK, ExprObjectKind OK)
@@ -2081,13 +2247,14 @@ public:
base->isTypeDependent(), base->isValueDependent(),
base->isInstantiationDependent(),
base->containsUnexpandedParameterPack()),
- Base(base), MemberDecl(memberdecl), MemberLoc(l), MemberDNLoc(),
+ Base(base), MemberDecl(memberdecl), MemberDNLoc(), MemberLoc(l),
IsArrow(isarrow),
- HasQualifierOrFoundDecl(false), HasExplicitTemplateArgumentList(false),
+ HasQualifierOrFoundDecl(false), HasTemplateKWAndArgsInfo(false),
HadMultipleCandidates(false) {}
static MemberExpr *Create(ASTContext &C, Expr *base, bool isarrow,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *memberdecl, DeclAccessPair founddecl,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *targs,
@@ -2126,22 +2293,61 @@ public:
return getMemberQualifier()->QualifierLoc.getNestedNameSpecifier();
}
- /// \brief If the member name was qualified, retrieves the
+ /// \brief If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name, with source-location
/// information.
NestedNameSpecifierLoc getQualifierLoc() const {
if (!hasQualifier())
return NestedNameSpecifierLoc();
-
+
return getMemberQualifier()->QualifierLoc;
}
- /// \brief Determines whether this member expression actually had a C++
- /// template argument list explicitly specified, e.g., x.f<int>.
- bool hasExplicitTemplateArgs() const {
- return HasExplicitTemplateArgumentList;
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo)
+ return 0;
+
+ if (!HasQualifierOrFoundDecl)
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(this + 1);
+
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
+ getMemberQualifier() + 1);
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<MemberExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// the member name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
}
+ /// Determines whether the member name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// \brief Determines whether the member name was followed by an
+ /// explicit template argument list.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
/// \brief Copies the template arguments (if present) into the given
/// structure.
void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
@@ -2153,12 +2359,8 @@ public:
/// follow the member template name. This must only be called on an
/// expression with explicit template arguments.
ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
- assert(HasExplicitTemplateArgumentList);
- if (!HasQualifierOrFoundDecl)
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(this + 1);
-
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(
- getMemberQualifier() + 1);
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
}
/// \brief Retrieve the explicit template argument list that
@@ -2175,20 +2377,11 @@ public:
if (!hasExplicitTemplateArgs()) return 0;
return &getExplicitTemplateArgs();
}
-
- /// \brief Retrieve the location of the left angle bracket following the
- /// member name ('<'), if any.
- SourceLocation getLAngleLoc() const {
- if (!HasExplicitTemplateArgumentList)
- return SourceLocation();
-
- return getExplicitTemplateArgs().LAngleLoc;
- }
/// \brief Retrieve the template arguments provided as part of this
/// template-id.
const TemplateArgumentLoc *getTemplateArgs() const {
- if (!HasExplicitTemplateArgumentList)
+ if (!hasExplicitTemplateArgs())
return 0;
return getExplicitTemplateArgs().getTemplateArgs();
@@ -2197,21 +2390,12 @@ public:
/// \brief Retrieve the number of template arguments provided as part of this
/// template-id.
unsigned getNumTemplateArgs() const {
- if (!HasExplicitTemplateArgumentList)
+ if (!hasExplicitTemplateArgs())
return 0;
return getExplicitTemplateArgs().NumTemplateArgs;
}
- /// \brief Retrieve the location of the right angle bracket following the
- /// template arguments ('>').
- SourceLocation getRAngleLoc() const {
- if (!HasExplicitTemplateArgumentList)
- return SourceLocation();
-
- return getExplicitTemplateArgs().RAngleLoc;
- }
-
/// \brief Retrieve the member declaration name info.
DeclarationNameInfo getMemberNameInfo() const {
return DeclarationNameInfo(MemberDecl->getDeclName(),
@@ -2226,9 +2410,11 @@ public:
SourceLocation getMemberLoc() const { return MemberLoc; }
void setMemberLoc(SourceLocation L) { MemberLoc = L; }
- SourceRange getSourceRange() const;
-
- SourceLocation getExprLoc() const { return MemberLoc; }
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
+
+ SourceLocation getExprLoc() const LLVM_READONLY { return MemberLoc; }
/// \brief Determine whether the base of this explicit is implicit.
bool isImplicitAccess() const {
@@ -2269,19 +2455,19 @@ class CompoundLiteralExpr : public Expr {
/// The type as written. This can be an incomplete array type, in
/// which case the actual expression type will be different.
- TypeSourceInfo *TInfo;
+ /// The int part of the pair stores whether this expr is file scope.
+ llvm::PointerIntPair<TypeSourceInfo *, 1, bool> TInfoAndScope;
Stmt *Init;
- bool FileScope;
public:
CompoundLiteralExpr(SourceLocation lparenloc, TypeSourceInfo *tinfo,
QualType T, ExprValueKind VK, Expr *init, bool fileScope)
: Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary,
- tinfo->getType()->isDependentType(),
+ tinfo->getType()->isDependentType(),
init->isValueDependent(),
(init->isInstantiationDependent() ||
tinfo->getType()->isInstantiationDependentType()),
init->containsUnexpandedParameterPack()),
- LParenLoc(lparenloc), TInfo(tinfo), Init(init), FileScope(fileScope) {}
+ LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {}
/// \brief Construct an empty compound literal.
explicit CompoundLiteralExpr(EmptyShell Empty)
@@ -2291,16 +2477,20 @@ public:
Expr *getInitializer() { return cast<Expr>(Init); }
void setInitializer(Expr *E) { Init = E; }
- bool isFileScope() const { return FileScope; }
- void setFileScope(bool FS) { FileScope = FS; }
+ bool isFileScope() const { return TInfoAndScope.getInt(); }
+ void setFileScope(bool FS) { TInfoAndScope.setInt(FS); }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
- TypeSourceInfo *getTypeSourceInfo() const { return TInfo; }
- void setTypeSourceInfo(TypeSourceInfo* tinfo) { TInfo = tinfo; }
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TInfoAndScope.getPointer();
+ }
+ void setTypeSourceInfo(TypeSourceInfo *tinfo) {
+ TInfoAndScope.setPointer(tinfo);
+ }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
// FIXME: Init should never be null.
if (!Init)
return SourceRange();
@@ -2352,7 +2542,7 @@ protected:
// Cast expressions are value-dependent if the type is
// dependent or if the subexpression is value-dependent.
ty->isDependentType() || (op && op->isValueDependent()),
- (ty->isInstantiationDependentType() ||
+ (ty->isInstantiationDependentType() ||
(op && op->isInstantiationDependent())),
(ty->containsUnexpandedParameterPack() ||
op->containsUnexpandedParameterPack())),
@@ -2454,9 +2644,15 @@ public:
static ImplicitCastExpr *CreateEmpty(ASTContext &Context, unsigned PathSize);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return getSubExpr()->getSourceRange();
}
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getSubExpr()->getLocStart();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ return getSubExpr()->getLocEnd();
+ }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ImplicitCastExprClass;
@@ -2551,7 +2747,7 @@ public:
SourceLocation getRParenLoc() const { return RPLoc; }
void setRParenLoc(SourceLocation L) { RPLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LPLoc, getSubExpr()->getSourceRange().getEnd());
}
static bool classof(const Stmt *T) {
@@ -2611,6 +2807,7 @@ public:
explicit BinaryOperator(EmptyShell Empty)
: Expr(BinaryOperatorClass, Empty), Opc(BO_Comma) { }
+ SourceLocation getExprLoc() const LLVM_READONLY { return OpLoc; }
SourceLocation getOperatorLoc() const { return OpLoc; }
void setOperatorLoc(SourceLocation L) { OpLoc = L; }
@@ -2622,7 +2819,7 @@ public:
Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
void setRHS(Expr *E) { SubExprs[RHS] = E; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getLHS()->getLocStart(), getRHS()->getLocEnd());
}
@@ -2674,6 +2871,13 @@ public:
bool isCompoundAssignmentOp() const {
return isCompoundAssignmentOp(getOpcode());
}
+ static Opcode getOpForCompoundAssignment(Opcode Opc) {
+ assert(isCompoundAssignmentOp(Opc));
+ if (Opc >= BO_AndAssign)
+ return Opcode(unsigned(Opc) - BO_AndAssign + BO_And);
+ else
+ return Opcode(unsigned(Opc) - BO_MulAssign + BO_Mul);
+ }
static bool isShiftAssignOp(Opcode Opc) {
return Opc == BO_ShlAssign || Opc == BO_ShrAssign;
@@ -2842,11 +3046,11 @@ public:
// the expression if the condition evaluates to false. This is
// the same as getRHS.
Expr *getFalseExpr() const { return cast<Expr>(SubExprs[RHS]); }
-
+
Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getCond()->getLocStart(), getRHS()->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -2895,8 +3099,7 @@ public:
SubExprs[COND] = cond;
SubExprs[LHS] = lhs;
SubExprs[RHS] = rhs;
-
- OpaqueValue->setSourceExpr(common);
+ assert(OpaqueValue->getSourceExpr() == common && "Wrong opaque value");
}
/// \brief Build an empty conditional operator.
@@ -2928,8 +3131,8 @@ public:
Expr *getFalseExpr() const {
return cast<Expr>(SubExprs[RHS]);
}
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getCommon()->getLocStart(), getFalseExpr()->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -2981,7 +3184,7 @@ public:
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AmpAmpLoc, LabelLoc);
}
@@ -3023,7 +3226,7 @@ public:
const CompoundStmt *getSubStmt() const { return cast<CompoundStmt>(SubStmt); }
void setSubStmt(CompoundStmt *S) { SubStmt = S; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LParenLoc, RParenLoc);
}
@@ -3073,7 +3276,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -3088,7 +3291,7 @@ public:
/// \brief Retrieve the array of expressions.
Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
-
+
/// getExpr - Return the Expr at the specified index.
Expr *getExpr(unsigned Index) {
assert((Index < NumExprs) && "Arg access out of range!");
@@ -3168,7 +3371,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -3205,7 +3408,7 @@ public:
SourceLocation getTokenLocation() const { return TokenLoc; }
void setTokenLocation(SourceLocation L) { TokenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(TokenLoc);
}
static bool classof(const Stmt *T) {
@@ -3251,7 +3454,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -3319,10 +3522,6 @@ class InitListExpr : public Expr {
/// field within the union will be initialized.
llvm::PointerUnion<Expr *, FieldDecl *> ArrayFillerOrUnionFieldInit;
- /// Whether this initializer list originally had a GNU array-range
- /// designator in it. This is a temporary marker used by CodeGen.
- bool HadArrayRangeDesignator;
-
public:
InitListExpr(ASTContext &C, SourceLocation lbraceloc,
Expr **initexprs, unsigned numinits,
@@ -3336,7 +3535,7 @@ public:
/// \brief Retrieve the set of initializers.
Expr **getInits() { return reinterpret_cast<Expr **>(InitExprs.data()); }
-
+
const Expr *getInit(unsigned Init) const {
assert(Init < getNumInits() && "Initializer access out of range!");
return cast_or_null<Expr>(InitExprs[Init]);
@@ -3383,6 +3582,10 @@ public:
}
void setArrayFiller(Expr *filler);
+ /// \brief Return true if this is an array initializer and its array "filler"
+ /// has been set.
+ bool hasArrayFiller() const { return getArrayFiller(); }
+
/// \brief If this initializes a union, specifies which field in the
/// union to initialize.
///
@@ -3417,12 +3620,21 @@ public:
InitListExpr *getSyntacticForm() const { return SyntacticForm; }
void setSyntacticForm(InitListExpr *Init) { SyntacticForm = Init; }
- bool hadArrayRangeDesignator() const { return HadArrayRangeDesignator; }
+ bool hadArrayRangeDesignator() const {
+ return InitListExprBits.HadArrayRangeDesignator != 0;
+ }
void sawArrayRangeDesignator(bool ARD = true) {
- HadArrayRangeDesignator = ARD;
+ InitListExprBits.HadArrayRangeDesignator = ARD;
+ }
+
+ bool initializesStdInitializerList() const {
+ return InitListExprBits.InitializesStdInitializerList != 0;
+ }
+ void setInitializesStdInitializerList(bool ISIL = true) {
+ InitListExprBits.InitializesStdInitializerList = ISIL;
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == InitListExprClass;
@@ -3490,15 +3702,15 @@ private:
/// The number of designators in this initializer expression.
unsigned NumDesignators : 15;
- /// \brief The designators in this designated initialization
- /// expression.
- Designator *Designators;
-
/// The number of subexpressions of this initializer expression,
/// which contains both the initializer and any additional
/// expressions used by array and array-range designators.
unsigned NumSubExprs : 16;
+ /// \brief The designators in this designated initialization
+ /// expression.
+ Designator *Designators;
+
DesignatedInitExpr(ASTContext &C, QualType Ty, unsigned NumDesignators,
const Designator *Designators,
@@ -3508,7 +3720,7 @@ private:
explicit DesignatedInitExpr(unsigned NumSubExprs)
: Expr(DesignatedInitExprClass, EmptyShell()),
- NumDesignators(0), Designators(0), NumSubExprs(NumSubExprs) { }
+ NumDesignators(0), NumSubExprs(NumSubExprs), Designators(0) { }
public:
/// A field designator, e.g., ".x".
@@ -3658,7 +3870,7 @@ public:
SourceLocation getEndLocation() const {
return Kind == FieldDesignator ? getFieldLoc() : getRBracketLoc();
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getStartLocation(), getEndLocation());
}
};
@@ -3707,7 +3919,7 @@ public:
Designator *getDesignator(unsigned Idx) { return &designators_begin()[Idx]; }
- void setDesignators(ASTContext &C, const Designator *Desigs,
+ void setDesignators(ASTContext &C, const Designator *Desigs,
unsigned NumDesigs);
Expr *getArrayIndex(const Designator& D);
@@ -3760,7 +3972,7 @@ public:
SourceRange getDesignatorsSourceRange() const;
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == DesignatedInitExprClass;
@@ -3797,7 +4009,7 @@ public:
}
static bool classof(const ImplicitValueInitExpr *) { return true; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange();
}
@@ -3813,7 +4025,7 @@ class ParenListExpr : public Expr {
public:
ParenListExpr(ASTContext& C, SourceLocation lparenloc, Expr **exprs,
- unsigned numexprs, SourceLocation rparenloc, QualType T);
+ unsigned numexprs, SourceLocation rparenloc);
/// \brief Build an empty paren list.
explicit ParenListExpr(EmptyShell Empty) : Expr(ParenListExprClass, Empty) { }
@@ -3835,7 +4047,7 @@ public:
SourceLocation getLParenLoc() const { return LParenLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LParenLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -3853,9 +4065,9 @@ public:
};
-/// \brief Represents a C1X generic selection.
+/// \brief Represents a C11 generic selection.
///
-/// A generic selection (C1X 6.5.1.1) contains an unevaluated controlling
+/// A generic selection (C11 6.5.1.1) contains an unevaluated controlling
/// expression, followed by one or more generic associations. Each generic
/// association specifies a type name and an expression, or "default" and an
/// expression (in which case it is known as a default generic association).
@@ -3950,7 +4162,7 @@ public:
const Expr *getResultExpr() const { return getAssocExpr(getResultIndex()); }
Expr *getResultExpr() { return getAssocExpr(getResultIndex()); }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(GenericLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -4016,7 +4228,7 @@ public:
/// aggregate Constant of ConstantInt(s).
void getEncodedElementAccess(SmallVectorImpl<unsigned> &Elts) const;
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getBase()->getLocStart(), AccessorLoc);
}
@@ -4042,9 +4254,8 @@ protected:
public:
BlockExpr(BlockDecl *BD, QualType ty)
: Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary,
- ty->isDependentType(), false,
- // FIXME: Check for instantiate-dependence in the statement?
- ty->isInstantiationDependentType(),
+ ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType() || BD->isDependentContext(),
false),
TheBlock(BD) {}
@@ -4060,12 +4271,12 @@ public:
const Stmt *getBody() const;
Stmt *getBody();
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getCaretLocation(), getBody()->getLocEnd());
}
/// getFunctionType - Return the underlying function type for this block.
- const FunctionType *getFunctionType() const;
+ const FunctionProtoType *getFunctionType() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == BlockExprClass;
@@ -4076,46 +4287,6 @@ public:
child_range children() { return child_range(); }
};
-/// BlockDeclRefExpr - A reference to a local variable declared in an
-/// enclosing scope.
-class BlockDeclRefExpr : public Expr {
- VarDecl *D;
- SourceLocation Loc;
- bool IsByRef : 1;
- bool ConstQualAdded : 1;
-public:
- BlockDeclRefExpr(VarDecl *d, QualType t, ExprValueKind VK,
- SourceLocation l, bool ByRef, bool constAdded = false);
-
- // \brief Build an empty reference to a declared variable in a
- // block.
- explicit BlockDeclRefExpr(EmptyShell Empty)
- : Expr(BlockDeclRefExprClass, Empty) { }
-
- VarDecl *getDecl() { return D; }
- const VarDecl *getDecl() const { return D; }
- void setDecl(VarDecl *VD) { D = VD; }
-
- SourceLocation getLocation() const { return Loc; }
- void setLocation(SourceLocation L) { Loc = L; }
-
- SourceRange getSourceRange() const { return SourceRange(Loc); }
-
- bool isByRef() const { return IsByRef; }
- void setByRef(bool BR) { IsByRef = BR; }
-
- bool isConstQualAdded() const { return ConstQualAdded; }
- void setConstQualAdded(bool C) { ConstQualAdded = C; }
-
- static bool classof(const Stmt *T) {
- return T->getStmtClass() == BlockDeclRefExprClass;
- }
- static bool classof(const BlockDeclRefExpr *) { return true; }
-
- // Iterators
- child_range children() { return child_range(); }
-};
-
/// AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2]
/// This AST node provides support for reinterpreting a type to another
/// type of the same size.
@@ -4127,12 +4298,12 @@ private:
friend class ASTReader;
friend class ASTStmtReader;
explicit AsTypeExpr(EmptyShell Empty) : Expr(AsTypeExprClass, Empty) {}
-
+
public:
AsTypeExpr(Expr* SrcExpr, QualType DstType,
ExprValueKind VK, ExprObjectKind OK,
SourceLocation BuiltinLoc, SourceLocation RParenLoc)
- : Expr(AsTypeExprClass, DstType, VK, OK,
+ : Expr(AsTypeExprClass, DstType, VK, OK,
DstType->isDependentType(),
DstType->isDependentType() || SrcExpr->isValueDependent(),
(DstType->isInstantiationDependentType() ||
@@ -4140,7 +4311,7 @@ public:
(DstType->containsUnexpandedParameterPack() ||
SrcExpr->containsUnexpandedParameterPack())),
SrcExpr(SrcExpr), BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {}
-
+
/// getSrcExpr - Return the Expr to be converted.
Expr *getSrcExpr() const { return cast<Expr>(SrcExpr); }
@@ -4149,83 +4320,218 @@ public:
/// getRParenLoc - Return the location of final right parenthesis.
SourceLocation getRParenLoc() const { return RParenLoc; }
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(BuiltinLoc, RParenLoc);
}
-
+
static bool classof(const Stmt *T) {
- return T->getStmtClass() == AsTypeExprClass;
+ return T->getStmtClass() == AsTypeExprClass;
}
static bool classof(const AsTypeExpr *) { return true; }
-
+
// Iterators
child_range children() { return child_range(&SrcExpr, &SrcExpr+1); }
};
+/// PseudoObjectExpr - An expression which accesses a pseudo-object
+/// l-value. A pseudo-object is an abstract object, accesses to which
+/// are translated to calls. The pseudo-object expression has a
+/// syntactic form, which shows how the expression was actually
+/// written in the source code, and a semantic form, which is a series
+/// of expressions to be executed in order which detail how the
+/// operation is actually evaluated. Optionally, one of the semantic
+/// forms may also provide a result value for the expression.
+///
+/// If any of the semantic-form expressions is an OpaqueValueExpr,
+/// that OVE is required to have a source expression, and it is bound
+/// to the result of that source expression. Such OVEs may appear
+/// only in subsequent semantic-form expressions and as
+/// sub-expressions of the syntactic form.
+///
+/// PseudoObjectExpr should be used only when an operation can be
+/// usefully described in terms of fairly simple rewrite rules on
+/// objects and functions that are meant to be used by end-developers.
+/// For example, under the Itanium ABI, dynamic casts are implemented
+/// as a call to a runtime function called __dynamic_cast; using this
+/// class to describe that would be inappropriate because that call is
+/// not really part of the user-visible semantics, and instead the
+/// cast is properly reflected in the AST and IR-generation has been
+/// taught to generate the call as necessary. In contrast, an
+/// Objective-C property access is semantically defined to be
+/// equivalent to a particular message send, and this is very much
+/// part of the user model. The name of this class encourages this
+/// modelling design.
+class PseudoObjectExpr : public Expr {
+ // PseudoObjectExprBits.NumSubExprs - The number of sub-expressions.
+ // Always at least two, because the first sub-expression is the
+ // syntactic form.
+
+ // PseudoObjectExprBits.ResultIndex - The index of the
+ // sub-expression holding the result. 0 means the result is void,
+ // which is unambiguous because it's the index of the syntactic
+ // form. Note that this is therefore 1 higher than the value passed
+ // in to Create, which is an index within the semantic forms.
+ // Note also that ASTStmtWriter assumes this encoding.
+
+ Expr **getSubExprsBuffer() { return reinterpret_cast<Expr**>(this + 1); }
+ const Expr * const *getSubExprsBuffer() const {
+ return reinterpret_cast<const Expr * const *>(this + 1);
+ }
+
+ friend class ASTStmtReader;
+
+ PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntactic, ArrayRef<Expr*> semantic,
+ unsigned resultIndex);
+
+ PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs);
+
+ unsigned getNumSubExprs() const {
+ return PseudoObjectExprBits.NumSubExprs;
+ }
+
+public:
+ /// NoResult - A value for the result index indicating that there is
+ /// no semantic result.
+ enum { NoResult = ~0U };
+
+ static PseudoObjectExpr *Create(ASTContext &Context, Expr *syntactic,
+ ArrayRef<Expr*> semantic,
+ unsigned resultIndex);
+
+ static PseudoObjectExpr *Create(ASTContext &Context, EmptyShell shell,
+ unsigned numSemanticExprs);
+
+ /// Return the syntactic form of this expression, i.e. the
+ /// expression it actually looks like. Likely to be expressed in
+ /// terms of OpaqueValueExprs bound in the semantic form.
+ Expr *getSyntacticForm() { return getSubExprsBuffer()[0]; }
+ const Expr *getSyntacticForm() const { return getSubExprsBuffer()[0]; }
+
+ /// Return the index of the result-bearing expression into the semantics
+ /// expressions, or PseudoObjectExpr::NoResult if there is none.
+ unsigned getResultExprIndex() const {
+ if (PseudoObjectExprBits.ResultIndex == 0) return NoResult;
+ return PseudoObjectExprBits.ResultIndex - 1;
+ }
+
+ /// Return the result-bearing expression, or null if there is none.
+ Expr *getResultExpr() {
+ if (PseudoObjectExprBits.ResultIndex == 0)
+ return 0;
+ return getSubExprsBuffer()[PseudoObjectExprBits.ResultIndex];
+ }
+ const Expr *getResultExpr() const {
+ return const_cast<PseudoObjectExpr*>(this)->getResultExpr();
+ }
+
+ unsigned getNumSemanticExprs() const { return getNumSubExprs() - 1; }
+
+ typedef Expr * const *semantics_iterator;
+ typedef const Expr * const *const_semantics_iterator;
+ semantics_iterator semantics_begin() {
+ return getSubExprsBuffer() + 1;
+ }
+ const_semantics_iterator semantics_begin() const {
+ return getSubExprsBuffer() + 1;
+ }
+ semantics_iterator semantics_end() {
+ return getSubExprsBuffer() + getNumSubExprs();
+ }
+ const_semantics_iterator semantics_end() const {
+ return getSubExprsBuffer() + getNumSubExprs();
+ }
+ Expr *getSemanticExpr(unsigned index) {
+ assert(index + 1 < getNumSubExprs());
+ return getSubExprsBuffer()[index + 1];
+ }
+ const Expr *getSemanticExpr(unsigned index) const {
+ return const_cast<PseudoObjectExpr*>(this)->getSemanticExpr(index);
+ }
+
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return getSyntacticForm()->getExprLoc();
+ }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return getSyntacticForm()->getSourceRange();
+ }
+
+ child_range children() {
+ Stmt **cs = reinterpret_cast<Stmt**>(getSubExprsBuffer());
+ return child_range(cs, cs + getNumSubExprs());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == PseudoObjectExprClass;
+ }
+ static bool classof(const PseudoObjectExpr *) { return true; }
+};
+
/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
/// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the
-/// similarly-named C++0x instructions. All of these instructions take one
-/// primary pointer and at least one memory order.
+/// similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>.
+/// All of these instructions take one primary pointer and at least one memory
+/// order.
class AtomicExpr : public Expr {
public:
- enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg,
- Add, Sub, And, Or, Xor };
+ enum AtomicOp {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) AO ## ID,
+#include "clang/Basic/Builtins.def"
+ // Avoid trailing comma
+ BI_First = 0
+ };
+
private:
- enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR };
+ enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, WEAK, END_EXPR };
Stmt* SubExprs[END_EXPR];
unsigned NumSubExprs;
SourceLocation BuiltinLoc, RParenLoc;
AtomicOp Op;
+ friend class ASTStmtReader;
+
public:
AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr, QualType t,
AtomicOp op, SourceLocation RP);
+ /// \brief Determine the number of arguments the specified atomic builtin
+ /// should have.
+ static unsigned getNumSubExprs(AtomicOp Op);
+
/// \brief Build an empty AtomicExpr.
explicit AtomicExpr(EmptyShell Empty) : Expr(AtomicExprClass, Empty) { }
Expr *getPtr() const {
return cast<Expr>(SubExprs[PTR]);
}
- void setPtr(Expr *E) {
- SubExprs[PTR] = E;
- }
Expr *getOrder() const {
return cast<Expr>(SubExprs[ORDER]);
}
- void setOrder(Expr *E) {
- SubExprs[ORDER] = E;
- }
Expr *getVal1() const {
- assert(NumSubExprs >= 3);
+ if (Op == AO__c11_atomic_init)
+ return cast<Expr>(SubExprs[ORDER]);
+ assert(NumSubExprs > VAL1);
return cast<Expr>(SubExprs[VAL1]);
}
- void setVal1(Expr *E) {
- assert(NumSubExprs >= 3);
- SubExprs[VAL1] = E;
- }
Expr *getOrderFail() const {
- assert(NumSubExprs == 5);
+ assert(NumSubExprs > ORDER_FAIL);
return cast<Expr>(SubExprs[ORDER_FAIL]);
}
- void setOrderFail(Expr *E) {
- assert(NumSubExprs == 5);
- SubExprs[ORDER_FAIL] = E;
- }
Expr *getVal2() const {
- assert(NumSubExprs == 5);
+ if (Op == AO__atomic_exchange)
+ return cast<Expr>(SubExprs[ORDER_FAIL]);
+ assert(NumSubExprs > VAL2);
return cast<Expr>(SubExprs[VAL2]);
}
- void setVal2(Expr *E) {
- assert(NumSubExprs == 5);
- SubExprs[VAL2] = E;
+ Expr *getWeak() const {
+ assert(NumSubExprs > WEAK);
+ return cast<Expr>(SubExprs[WEAK]);
}
AtomicOp getOp() const { return Op; }
- void setOp(AtomicOp op) { Op = op; }
unsigned getNumSubExprs() { return NumSubExprs; }
- void setNumSubExprs(unsigned num) { NumSubExprs = num; }
Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
@@ -4234,17 +4540,16 @@ public:
}
bool isCmpXChg() const {
- return getOp() == AtomicExpr::CmpXchgStrong ||
- getOp() == AtomicExpr::CmpXchgWeak;
+ return getOp() == AO__c11_atomic_compare_exchange_strong ||
+ getOp() == AO__c11_atomic_compare_exchange_weak ||
+ getOp() == AO__atomic_compare_exchange ||
+ getOp() == AO__atomic_compare_exchange_n;
}
SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
- void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
-
SourceLocation getRParenLoc() const { return RParenLoc; }
- void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(BuiltinLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
index 3cc09cd..b69693d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -14,11 +14,13 @@
#ifndef LLVM_CLANG_AST_EXPRCXX_H
#define LLVM_CLANG_AST_EXPRCXX_H
-#include "clang/Basic/TypeTraits.h"
-#include "clang/Basic/ExpressionTraits.h"
#include "clang/AST/Expr.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/AST/TemplateBase.h"
+#include "clang/Basic/ExpressionTraits.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -72,7 +74,7 @@ public:
/// bracket.
SourceLocation getOperatorLoc() const { return getRParenLoc(); }
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXOperatorCallExprClass;
@@ -101,7 +103,7 @@ public:
/// argument for the member call. For example, in "x.f(5)", this
/// operation would return "x".
Expr *getImplicitObjectArgument() const;
-
+
/// Retrieves the declaration of the called method.
CXXMethodDecl *getMethodDecl() const;
@@ -158,7 +160,7 @@ class CXXNamedCastExpr : public ExplicitCastExpr {
private:
SourceLocation Loc; // the location of the casting op
SourceLocation RParenLoc; // the location of the right parenthesis
-
+
protected:
CXXNamedCastExpr(StmtClass SC, QualType ty, ExprValueKind VK,
CastKind kind, Expr *op, unsigned PathSize,
@@ -171,7 +173,7 @@ protected:
: ExplicitCastExpr(SC, Shell, PathSize) { }
friend class ASTStmtReader;
-
+
public:
const char *getCastName() const;
@@ -181,8 +183,8 @@ public:
/// \brief Retrieve the location of the closing parenthesis.
SourceLocation getRParenLoc() const { return RParenLoc; }
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(Loc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -199,7 +201,8 @@ public:
static bool classof(const CXXNamedCastExpr *) { return true; }
};
-/// CXXStaticCastExpr - A C++ @c static_cast expression (C++ [expr.static.cast]).
+/// CXXStaticCastExpr - A C++ @c static_cast expression
+/// (C++ [expr.static.cast]).
///
/// This expression node represents a C++ static cast, e.g.,
/// @c static_cast<int>(1.0).
@@ -217,7 +220,7 @@ public:
static CXXStaticCastExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *Path,
- TypeSourceInfo *Written, SourceLocation L,
+ TypeSourceInfo *Written, SourceLocation L,
SourceLocation RParenLoc);
static CXXStaticCastExpr *CreateEmpty(ASTContext &Context,
unsigned PathSize);
@@ -248,9 +251,9 @@ public:
static CXXDynamicCastExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK, CastKind Kind, Expr *Op,
const CXXCastPath *Path,
- TypeSourceInfo *Written, SourceLocation L,
+ TypeSourceInfo *Written, SourceLocation L,
SourceLocation RParenLoc);
-
+
static CXXDynamicCastExpr *CreateEmpty(ASTContext &Context,
unsigned pathSize);
@@ -271,7 +274,7 @@ public:
class CXXReinterpretCastExpr : public CXXNamedCastExpr {
CXXReinterpretCastExpr(QualType ty, ExprValueKind vk, CastKind kind,
Expr *op, unsigned pathSize,
- TypeSourceInfo *writtenTy, SourceLocation l,
+ TypeSourceInfo *writtenTy, SourceLocation l,
SourceLocation RParenLoc)
: CXXNamedCastExpr(CXXReinterpretCastExprClass, ty, vk, kind, op,
pathSize, writtenTy, l, RParenLoc) {}
@@ -283,7 +286,7 @@ public:
static CXXReinterpretCastExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK, CastKind Kind,
Expr *Op, const CXXCastPath *Path,
- TypeSourceInfo *WrittenTy, SourceLocation L,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
SourceLocation RParenLoc);
static CXXReinterpretCastExpr *CreateEmpty(ASTContext &Context,
unsigned pathSize);
@@ -301,9 +304,9 @@ public:
/// @c const_cast<char*>(PtrToConstChar).
class CXXConstCastExpr : public CXXNamedCastExpr {
CXXConstCastExpr(QualType ty, ExprValueKind VK, Expr *op,
- TypeSourceInfo *writtenTy, SourceLocation l,
+ TypeSourceInfo *writtenTy, SourceLocation l,
SourceLocation RParenLoc)
- : CXXNamedCastExpr(CXXConstCastExprClass, ty, VK, CK_NoOp, op,
+ : CXXNamedCastExpr(CXXConstCastExprClass, ty, VK, CK_NoOp, op,
0, writtenTy, l, RParenLoc) {}
explicit CXXConstCastExpr(EmptyShell Empty)
@@ -312,7 +315,7 @@ class CXXConstCastExpr : public CXXNamedCastExpr {
public:
static CXXConstCastExpr *Create(ASTContext &Context, QualType T,
ExprValueKind VK, Expr *Op,
- TypeSourceInfo *WrittenTy, SourceLocation L,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
SourceLocation RParenLoc);
static CXXConstCastExpr *CreateEmpty(ASTContext &Context);
@@ -322,6 +325,67 @@ public:
static bool classof(const CXXConstCastExpr *) { return true; }
};
+/// UserDefinedLiteral - A call to a literal operator (C++11 [over.literal])
+/// written as a user-defined literal (C++11 [lit.ext]).
+///
+/// Represents a user-defined literal, e.g. "foo"_bar or 1.23_xyz. While this
+/// is semantically equivalent to a normal call, this AST node provides better
+/// information about the syntactic representation of the literal.
+///
+/// Since literal operators are never found by ADL and can only be declared at
+/// namespace scope, a user-defined literal is never dependent.
+class UserDefinedLiteral : public CallExpr {
+ /// \brief The location of a ud-suffix within the literal.
+ SourceLocation UDSuffixLoc;
+
+public:
+ UserDefinedLiteral(ASTContext &C, Expr *Fn, Expr **Args, unsigned NumArgs,
+ QualType T, ExprValueKind VK, SourceLocation LitEndLoc,
+ SourceLocation SuffixLoc)
+ : CallExpr(C, UserDefinedLiteralClass, Fn, 0, Args, NumArgs, T, VK,
+ LitEndLoc), UDSuffixLoc(SuffixLoc) {}
+ explicit UserDefinedLiteral(ASTContext &C, EmptyShell Empty)
+ : CallExpr(C, UserDefinedLiteralClass, Empty) {}
+
+ /// The kind of literal operator which is invoked.
+ enum LiteralOperatorKind {
+ LOK_Raw, ///< Raw form: operator "" X (const char *)
+ LOK_Template, ///< Raw form: operator "" X<cs...> ()
+ LOK_Integer, ///< operator "" X (unsigned long long)
+ LOK_Floating, ///< operator "" X (long double)
+ LOK_String, ///< operator "" X (const CharT *, size_t)
+ LOK_Character ///< operator "" X (CharT)
+ };
+
+ /// getLiteralOperatorKind - Returns the kind of literal operator invocation
+ /// which this expression represents.
+ LiteralOperatorKind getLiteralOperatorKind() const;
+
+ /// getCookedLiteral - If this is not a raw user-defined literal, get the
+ /// underlying cooked literal (representing the literal with the suffix
+ /// removed).
+ Expr *getCookedLiteral();
+ const Expr *getCookedLiteral() const {
+ return const_cast<UserDefinedLiteral*>(this)->getCookedLiteral();
+ }
+
+ /// getUDSuffixLoc - Returns the location of a ud-suffix in the expression.
+ /// For a string literal, there may be multiple identical suffixes. This
+ /// returns the first.
+ SourceLocation getUDSuffixLoc() const { return getRParenLoc(); }
+
+ /// getUDSuffix - Returns the ud-suffix specified for this literal.
+ const IdentifierInfo *getUDSuffix() const;
+
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() == UserDefinedLiteralClass;
+ }
+ static bool classof(const UserDefinedLiteral *) { return true; }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
/// CXXBoolLiteralExpr - [C++ 2.13.5] C++ Boolean Literal.
///
class CXXBoolLiteralExpr : public Expr {
@@ -339,7 +403,7 @@ public:
bool getValue() const { return Value; }
void setValue(bool V) { Value = V; }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
@@ -365,7 +429,7 @@ public:
explicit CXXNullPtrLiteralExpr(EmptyShell Empty)
: Expr(CXXNullPtrLiteralExprClass, Empty) { }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
@@ -398,7 +462,7 @@ public:
Operand->getType()->isInstantiationDependentType(),
Operand->getType()->containsUnexpandedParameterPack()),
Operand(Operand), Range(R) { }
-
+
CXXTypeidExpr(QualType Ty, Expr *Operand, SourceRange R)
: Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
// typeid is never type-dependent (C++ [temp.dep.expr]p4)
@@ -416,9 +480,9 @@ public:
else
Operand = (TypeSourceInfo*)0;
}
-
+
bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
-
+
/// \brief Retrieves the type operand of this typeid() expression after
/// various required adjustments (removing reference types, cv-qualifiers).
QualType getTypeOperand() const;
@@ -433,20 +497,20 @@ public:
assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
Operand = TSI;
}
-
+
Expr *getExprOperand() const {
assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
return static_cast<Expr*>(Operand.get<Stmt *>());
}
-
+
void setExprOperand(Expr *E) {
assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
Operand = E;
}
-
- SourceRange getSourceRange() const { return Range; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
void setSourceRange(SourceRange R) { Range = R; }
-
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTypeidExprClass;
}
@@ -476,7 +540,7 @@ public:
Operand->getType()->isInstantiationDependentType(),
Operand->getType()->containsUnexpandedParameterPack()),
Operand(Operand), Range(R) { }
-
+
CXXUuidofExpr(QualType Ty, Expr *Operand, SourceRange R)
: Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary,
false, Operand->isTypeDependent(),
@@ -491,9 +555,9 @@ public:
else
Operand = (TypeSourceInfo*)0;
}
-
+
bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
-
+
/// \brief Retrieves the type operand of this __uuidof() expression after
/// various required adjustments (removing reference types, cv-qualifiers).
QualType getTypeOperand() const;
@@ -508,20 +572,20 @@ public:
assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
Operand = TSI;
}
-
+
Expr *getExprOperand() const {
assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
return static_cast<Expr*>(Operand.get<Stmt *>());
}
-
+
void setExprOperand(Expr *E) {
assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
Operand = E;
}
- SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
void setSourceRange(SourceRange R) { Range = R; }
-
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXUuidofExprClass;
}
@@ -549,7 +613,7 @@ public:
class CXXThisExpr : public Expr {
SourceLocation Loc;
bool Implicit : 1;
-
+
public:
CXXThisExpr(SourceLocation L, QualType Type, bool isImplicit)
: Expr(CXXThisExprClass, Type, VK_RValue, OK_Ordinary,
@@ -565,11 +629,11 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- SourceRange getSourceRange() const { return SourceRange(Loc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
bool isImplicit() const { return Implicit; }
void setImplicit(bool I) { Implicit = I; }
-
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXThisExprClass;
}
@@ -588,9 +652,9 @@ class CXXThrowExpr : public Expr {
SourceLocation ThrowLoc;
/// \brief Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
-
+
friend class ASTStmtReader;
-
+
public:
// Ty is the void type which is used as the result type of the
// exepression. The l is the location of the throw keyword. expr
@@ -614,8 +678,8 @@ public:
/// This information is required to determine whether the NRVO can apply to
/// this variable.
bool isThrownVariableInScope() const { return IsThrownVariableInScope; }
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
if (getSubExpr() == 0)
return SourceRange(ThrowLoc, ThrowLoc);
return SourceRange(ThrowLoc, getSubExpr()->getSourceRange().getEnd());
@@ -639,16 +703,16 @@ public:
class CXXDefaultArgExpr : public Expr {
/// \brief The parameter whose default is being used.
///
- /// When the bit is set, the subexpression is stored after the
+ /// When the bit is set, the subexpression is stored after the
/// CXXDefaultArgExpr itself. When the bit is clear, the parameter's
/// actual default expression is the subexpression.
llvm::PointerIntPair<ParmVarDecl *, 1, bool> Param;
/// \brief The location where the default argument expression was used.
SourceLocation Loc;
-
+
CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param)
- : Expr(SC,
+ : Expr(SC,
param->hasUnparsedDefaultArg()
? param->getType().getNonReferenceType()
: param->getDefaultArg()->getType(),
@@ -656,19 +720,19 @@ class CXXDefaultArgExpr : public Expr {
param->getDefaultArg()->getObjectKind(), false, false, false, false),
Param(param, false), Loc(Loc) { }
- CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param,
+ CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param,
Expr *SubExpr)
: Expr(SC, SubExpr->getType(),
SubExpr->getValueKind(), SubExpr->getObjectKind(),
- false, false, false, false),
+ false, false, false, false),
Param(param, true), Loc(Loc) {
*reinterpret_cast<Expr **>(this + 1) = SubExpr;
}
-
+
public:
CXXDefaultArgExpr(EmptyShell Empty) : Expr(CXXDefaultArgExprClass, Empty) {}
-
+
// Param is the parameter whose default argument is used by this
// expression.
static CXXDefaultArgExpr *Create(ASTContext &C, SourceLocation Loc,
@@ -678,32 +742,32 @@ public:
// Param is the parameter whose default argument is used by this
// expression, and SubExpr is the expression that will actually be used.
- static CXXDefaultArgExpr *Create(ASTContext &C,
+ static CXXDefaultArgExpr *Create(ASTContext &C,
SourceLocation Loc,
- ParmVarDecl *Param,
+ ParmVarDecl *Param,
Expr *SubExpr);
-
+
// Retrieve the parameter that the argument was created from.
const ParmVarDecl *getParam() const { return Param.getPointer(); }
ParmVarDecl *getParam() { return Param.getPointer(); }
-
+
// Retrieve the actual argument to the function call.
- const Expr *getExpr() const {
+ const Expr *getExpr() const {
if (Param.getInt())
return *reinterpret_cast<Expr const * const*> (this + 1);
- return getParam()->getDefaultArg();
+ return getParam()->getDefaultArg();
}
- Expr *getExpr() {
+ Expr *getExpr() {
if (Param.getInt())
return *reinterpret_cast<Expr **> (this + 1);
- return getParam()->getDefaultArg();
+ return getParam()->getDefaultArg();
}
- /// \brief Retrieve the location where this default argument was actually
+ /// \brief Retrieve the location where this default argument was actually
/// used.
SourceLocation getUsedLocation() const { return Loc; }
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
// Default argument expressions have no representation in the
// source, so they have an empty source range.
return SourceRange();
@@ -734,6 +798,9 @@ public:
const CXXDestructorDecl *Destructor);
const CXXDestructorDecl *getDestructor() const { return Destructor; }
+ void setDestructor(const CXXDestructorDecl *Dtor) {
+ Destructor = Dtor;
+ }
};
/// \brief Represents binding an expression to a temporary.
@@ -757,7 +824,7 @@ class CXXBindTemporaryExpr : public Expr {
CXXBindTemporaryExpr(CXXTemporary *temp, Expr* SubExpr)
: Expr(CXXBindTemporaryExprClass, SubExpr->getType(),
- VK_RValue, OK_Ordinary, SubExpr->isTypeDependent(),
+ VK_RValue, OK_Ordinary, SubExpr->isTypeDependent(),
SubExpr->isValueDependent(),
SubExpr->isInstantiationDependent(),
SubExpr->containsUnexpandedParameterPack()),
@@ -766,7 +833,7 @@ class CXXBindTemporaryExpr : public Expr {
public:
CXXBindTemporaryExpr(EmptyShell Empty)
: Expr(CXXBindTemporaryExprClass, Empty), Temp(0), SubExpr(0) {}
-
+
static CXXBindTemporaryExpr *Create(ASTContext &C, CXXTemporary *Temp,
Expr* SubExpr);
@@ -778,7 +845,7 @@ public:
Expr *getSubExpr() { return cast<Expr>(SubExpr); }
void setSubExpr(Expr *E) { SubExpr = E; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SubExpr->getSourceRange();
}
@@ -801,7 +868,7 @@ public:
CK_VirtualBase,
CK_Delegating
};
-
+
private:
CXXConstructorDecl *Constructor;
@@ -810,6 +877,7 @@ private:
unsigned NumArgs : 16;
bool Elidable : 1;
bool HadMultipleCandidates : 1;
+ bool ListInitialization : 1;
bool ZeroInitialization : 1;
unsigned ConstructKind : 2;
Stmt **Args;
@@ -820,39 +888,43 @@ protected:
CXXConstructorDecl *d, bool elidable,
Expr **args, unsigned numargs,
bool HadMultipleCandidates,
- bool ZeroInitialization = false,
- ConstructionKind ConstructKind = CK_Complete,
- SourceRange ParenRange = SourceRange());
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange);
/// \brief Construct an empty C++ construction expression.
CXXConstructExpr(StmtClass SC, EmptyShell Empty)
- : Expr(SC, Empty), Constructor(0), NumArgs(0), Elidable(0),
- HadMultipleCandidates(false), ZeroInitialization(0),
- ConstructKind(0), Args(0) { }
+ : Expr(SC, Empty), Constructor(0), NumArgs(0), Elidable(false),
+ HadMultipleCandidates(false), ListInitialization(false),
+ ZeroInitialization(false), ConstructKind(0), Args(0)
+ { }
public:
/// \brief Construct an empty C++ construction expression.
explicit CXXConstructExpr(EmptyShell Empty)
: Expr(CXXConstructExprClass, Empty), Constructor(0),
- NumArgs(0), Elidable(0), HadMultipleCandidates(false),
- ZeroInitialization(0), ConstructKind(0), Args(0) { }
+ NumArgs(0), Elidable(false), HadMultipleCandidates(false),
+ ListInitialization(false), ZeroInitialization(false),
+ ConstructKind(0), Args(0)
+ { }
static CXXConstructExpr *Create(ASTContext &C, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool Elidable,
Expr **Args, unsigned NumArgs,
bool HadMultipleCandidates,
- bool ZeroInitialization = false,
- ConstructionKind ConstructKind = CK_Complete,
- SourceRange ParenRange = SourceRange());
-
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange);
CXXConstructorDecl* getConstructor() const { return Constructor; }
void setConstructor(CXXConstructorDecl *C) { Constructor = C; }
-
+
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation Loc) { this->Loc = Loc; }
-
+
/// \brief Whether this construction is elidable.
bool isElidable() const { return Elidable; }
void setElidable(bool E) { Elidable = E; }
@@ -862,22 +934,26 @@ public:
bool hadMultipleCandidates() const { return HadMultipleCandidates; }
void setHadMultipleCandidates(bool V) { HadMultipleCandidates = V; }
+ /// \brief Whether this constructor call was written as list-initialization.
+ bool isListInitialization() const { return ListInitialization; }
+ void setListInitialization(bool V) { ListInitialization = V; }
+
/// \brief Whether this construction first requires
/// zero-initialization before the initializer is called.
bool requiresZeroInitialization() const { return ZeroInitialization; }
void setRequiresZeroInitialization(bool ZeroInit) {
ZeroInitialization = ZeroInit;
}
-
+
/// \brief Determines whether this constructor is actually constructing
/// a base class (rather than a complete object).
ConstructionKind getConstructionKind() const {
return (ConstructionKind)ConstructKind;
}
- void setConstructionKind(ConstructionKind CK) {
+ void setConstructionKind(ConstructionKind CK) {
ConstructKind = CK;
}
-
+
typedef ExprIterator arg_iterator;
typedef ConstExprIterator const_arg_iterator;
@@ -905,7 +981,7 @@ public:
Args[Arg] = ArgExpr;
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
SourceRange getParenRange() const { return ParenRange; }
static bool classof(const Stmt *T) {
@@ -933,7 +1009,7 @@ class CXXFunctionalCastExpr : public ExplicitCastExpr {
TypeSourceInfo *writtenTy,
SourceLocation tyBeginLoc, CastKind kind,
Expr *castExpr, unsigned pathSize,
- SourceLocation rParenLoc)
+ SourceLocation rParenLoc)
: ExplicitCastExpr(CXXFunctionalCastExprClass, ty, VK, kind,
castExpr, pathSize, writtenTy),
TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {}
@@ -957,7 +1033,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(TyBeginLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -971,7 +1047,7 @@ public:
///
/// This expression type represents a C++ "functional" cast
/// (C++[expr.type.conv]) with N != 1 arguments that invokes a
-/// constructor to build a temporary object. With N == 1 arguments the
+/// constructor to build a temporary object. With N == 1 arguments the
/// functional cast expression will be represented by CXXFunctionalCastExpr.
/// Example:
/// @code
@@ -996,8 +1072,8 @@ public:
TypeSourceInfo *getTypeSourceInfo() const { return Type; }
- SourceRange getSourceRange() const;
-
+ SourceRange getSourceRange() const LLVM_READONLY;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXTemporaryObjectExprClass;
}
@@ -1006,6 +1082,303 @@ public:
friend class ASTStmtReader;
};
+/// \brief A C++ lambda expression, which produces a function object
+/// (of unspecified type) that can be invoked later.
+///
+/// Example:
+/// \code
+/// void low_pass_filter(std::vector<double> &values, double cutoff) {
+/// values.erase(std::remove_if(values.begin(), values.end(),
+// [=](double value) { return value > cutoff; });
+/// }
+/// \endcode
+///
+/// Lambda expressions can capture local variables, either by copying
+/// the values of those local variables at the time the function
+/// object is constructed (not when it is called!) or by holding a
+/// reference to the local variable. These captures can occur either
+/// implicitly or can be written explicitly between the square
+/// brackets ([...]) that start the lambda expression.
+class LambdaExpr : public Expr {
+ enum {
+ /// \brief Flag used by the Capture class to indicate that the given
+ /// capture was implicit.
+ Capture_Implicit = 0x01,
+
+ /// \brief Flag used by the Capture class to indciate that the
+ /// given capture was by-copy.
+ Capture_ByCopy = 0x02
+ };
+
+ /// \brief The source range that covers the lambda introducer ([...]).
+ SourceRange IntroducerRange;
+
+ /// \brief The number of captures.
+ unsigned NumCaptures : 16;
+
+ /// \brief The default capture kind, which is a value of type
+ /// LambdaCaptureDefault.
+ unsigned CaptureDefault : 2;
+
+ /// \brief Whether this lambda had an explicit parameter list vs. an
+ /// implicit (and empty) parameter list.
+ unsigned ExplicitParams : 1;
+
+ /// \brief Whether this lambda had the result type explicitly specified.
+ unsigned ExplicitResultType : 1;
+
+ /// \brief Whether there are any array index variables stored at the end of
+ /// this lambda expression.
+ unsigned HasArrayIndexVars : 1;
+
+ /// \brief The location of the closing brace ('}') that completes
+ /// the lambda.
+ ///
+ /// The location of the brace is also available by looking up the
+ /// function call operator in the lambda class. However, it is
+ /// stored here to improve the performance of getSourceRange(), and
+ /// to avoid having to deserialize the function call operator from a
+ /// module file just to determine the source range.
+ SourceLocation ClosingBrace;
+
+ // Note: The capture initializers are stored directly after the lambda
+ // expression, along with the index variables used to initialize by-copy
+ // array captures.
+
+public:
+ /// \brief Describes the capture of either a variable or 'this'.
+ class Capture {
+ llvm::PointerIntPair<VarDecl *, 2> VarAndBits;
+ SourceLocation Loc;
+ SourceLocation EllipsisLoc;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ public:
+ /// \brief Create a new capture.
+ ///
+ /// \param Loc The source location associated with this capture.
+ ///
+ /// \param Kind The kind of capture (this, byref, bycopy).
+ ///
+ /// \param Implicit Whether the capture was implicit or explicit.
+ ///
+ /// \param Var The local variable being captured, or null if capturing this.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis (...) for a
+ /// capture that is a pack expansion, or an invalid source
+ /// location to indicate that this is not a pack expansion.
+ Capture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var = 0,
+ SourceLocation EllipsisLoc = SourceLocation());
+
+ /// \brief Determine the kind of capture.
+ LambdaCaptureKind getCaptureKind() const;
+
+ /// \brief Determine whether this capture handles the C++ 'this'
+ /// pointer.
+ bool capturesThis() const { return VarAndBits.getPointer() == 0; }
+
+ /// \brief Determine whether this capture handles a variable.
+ bool capturesVariable() const { return VarAndBits.getPointer() != 0; }
+
+ /// \brief Retrieve the declaration of the local variable being
+ /// captured.
+ ///
+ /// This operation is only valid if this capture does not capture
+ /// 'this'.
+ VarDecl *getCapturedVar() const {
+ assert(!capturesThis() && "No variable available for 'this' capture");
+ return VarAndBits.getPointer();
+ }
+
+ /// \brief Determine whether this was an implicit capture (not
+ /// written between the square brackets introducing the lambda).
+ bool isImplicit() const { return VarAndBits.getInt() & Capture_Implicit; }
+
+ /// \brief Determine whether this was an explicit capture, written
+ /// between the square brackets introducing the lambda.
+ bool isExplicit() const { return !isImplicit(); }
+
+ /// \brief Retrieve the source location of the capture.
+ ///
+ /// For an explicit capture, this returns the location of the
+ /// explicit capture in the source. For an implicit capture, this
+ /// returns the location at which the variable or 'this' was first
+ /// used.
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Determine whether this capture is a pack expansion,
+ /// which captures a function parameter pack.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+
+ /// \brief Retrieve the location of the ellipsis for a capture
+ /// that is a pack expansion.
+ SourceLocation getEllipsisLoc() const {
+ assert(isPackExpansion() && "No ellipsis location for a non-expansion");
+ return EllipsisLoc;
+ }
+ };
+
+private:
+ /// \brief Construct a lambda expression.
+ LambdaExpr(QualType T, SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace);
+
+ /// \brief Construct an empty lambda expression.
+ LambdaExpr(EmptyShell Empty, unsigned NumCaptures, bool HasArrayIndexVars)
+ : Expr(LambdaExprClass, Empty),
+ NumCaptures(NumCaptures), CaptureDefault(LCD_None), ExplicitParams(false),
+ ExplicitResultType(false), HasArrayIndexVars(true) {
+ getStoredStmts()[NumCaptures] = 0;
+ }
+
+ Stmt **getStoredStmts() const {
+ return reinterpret_cast<Stmt **>(const_cast<LambdaExpr *>(this) + 1);
+ }
+
+ /// \brief Retrieve the mapping from captures to the first array index
+ /// variable.
+ unsigned *getArrayIndexStarts() const {
+ return reinterpret_cast<unsigned *>(getStoredStmts() + NumCaptures + 1);
+ }
+
+ /// \brief Retrieve the complete set of array-index variables.
+ VarDecl **getArrayIndexVars() const {
+ return reinterpret_cast<VarDecl **>(
+ getArrayIndexStarts() + NumCaptures + 1);
+ }
+
+public:
+ /// \brief Construct a new lambda expression.
+ static LambdaExpr *Create(ASTContext &C,
+ CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace);
+
+ /// \brief Construct a new lambda expression that will be deserialized from
+ /// an external source.
+ static LambdaExpr *CreateDeserialized(ASTContext &C, unsigned NumCaptures,
+ unsigned NumArrayIndexVars);
+
+ /// \brief Determine the default capture kind for this lambda.
+ LambdaCaptureDefault getCaptureDefault() const {
+ return static_cast<LambdaCaptureDefault>(CaptureDefault);
+ }
+
+ /// \brief An iterator that walks over the captures of the lambda,
+ /// both implicit and explicit.
+ typedef const Capture *capture_iterator;
+
+ /// \brief Retrieve an iterator pointing to the first lambda capture.
+ capture_iterator capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the
+ /// sequence of lambda captures.
+ capture_iterator capture_end() const;
+
+ /// \brief Determine the number of captures in this lambda.
+ unsigned capture_size() const { return NumCaptures; }
+
+ /// \brief Retrieve an iterator pointing to the first explicit
+ /// lambda capture.
+ capture_iterator explicit_capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the sequence of
+ /// explicit lambda captures.
+ capture_iterator explicit_capture_end() const;
+
+ /// \brief Retrieve an iterator pointing to the first implicit
+ /// lambda capture.
+ capture_iterator implicit_capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the sequence of
+ /// implicit lambda captures.
+ capture_iterator implicit_capture_end() const;
+
+ /// \brief Iterator that walks over the capture initialization
+ /// arguments.
+ typedef Expr **capture_init_iterator;
+
+ /// \brief Retrieve the first initialization argument for this
+ /// lambda expression (which initializes the first capture field).
+ capture_init_iterator capture_init_begin() const {
+ return reinterpret_cast<Expr **>(getStoredStmts());
+ }
+
+ /// \brief Retrieve the iterator pointing one past the last
+ /// initialization argument for this lambda expression.
+ capture_init_iterator capture_init_end() const {
+ return capture_init_begin() + NumCaptures;
+ }
+
+ /// \brief Retrieve the set of index variables used in the capture
+ /// initializer of an array captured by copy.
+ ///
+ /// \param Iter The iterator that points at the capture initializer for
+ /// which we are extracting the corresponding index variables.
+ ArrayRef<VarDecl *> getCaptureInitIndexVars(capture_init_iterator Iter) const;
+
+ /// \brief Retrieve the source range covering the lambda introducer,
+ /// which contains the explicit capture list surrounded by square
+ /// brackets ([...]).
+ SourceRange getIntroducerRange() const { return IntroducerRange; }
+
+ /// \brief Retrieve the class that corresponds to the lambda, which
+ /// stores the captures in its fields and provides the various
+ /// operations permitted on a lambda (copying, calling).
+ CXXRecordDecl *getLambdaClass() const;
+
+ /// \brief Retrieve the function call operator associated with this
+ /// lambda expression.
+ CXXMethodDecl *getCallOperator() const;
+
+ /// \brief Retrieve the body of the lambda.
+ CompoundStmt *getBody() const;
+
+ /// \brief Determine whether the lambda is mutable, meaning that any
+ /// captures values can be modified.
+ bool isMutable() const;
+
+ /// \brief Determine whether this lambda has an explicit parameter
+ /// list vs. an implicit (empty) parameter list.
+ bool hasExplicitParameters() const { return ExplicitParams; }
+
+ /// \brief Whether this lambda had its result type explicitly specified.
+ bool hasExplicitResultType() const { return ExplicitResultType; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == LambdaExprClass;
+ }
+ static bool classof(const LambdaExpr *) { return true; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(IntroducerRange.getBegin(), ClosingBrace);
+ }
+
+ child_range children() {
+ return child_range(getStoredStmts(), getStoredStmts() + NumCaptures + 1);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
/// CXXScalarValueInitExpr - [C++ 5.2.3p2]
/// Expression "T()" which creates a value-initialized rvalue of type
/// T, which is a non-class type.
@@ -1015,9 +1388,9 @@ class CXXScalarValueInitExpr : public Expr {
TypeSourceInfo *TypeInfo;
friend class ASTStmtReader;
-
+
public:
- /// \brief Create an explicitly-written scalar-value initialization
+ /// \brief Create an explicitly-written scalar-value initialization
/// expression.
CXXScalarValueInitExpr(QualType Type,
TypeSourceInfo *TypeInfo,
@@ -1032,10 +1405,10 @@ public:
TypeSourceInfo *getTypeSourceInfo() const {
return TypeInfo;
}
-
+
SourceLocation getRParenLoc() const { return RParenLoc; }
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXScalarValueInitExprClass;
@@ -1049,68 +1422,63 @@ public:
/// CXXNewExpr - A new expression for memory allocation and constructor calls,
/// e.g: "new CXXNewExpr(foo)".
class CXXNewExpr : public Expr {
- // Was the usage ::new, i.e. is the global new to be used?
- bool GlobalNew : 1;
- // Is there an initializer? If not, built-ins are uninitialized, else they're
- // value-initialized.
- bool Initializer : 1;
- // Do we allocate an array? If so, the first SubExpr is the size expression.
- bool Array : 1;
- // If this is an array allocation, does the usual deallocation
- // function for the allocated type want to know the allocated size?
- bool UsualArrayDeleteWantsSize : 1;
- // Whether the referred constructor (if any) was resolved from an
- // overload set having size greater than 1.
- bool HadMultipleCandidates : 1;
- // The number of placement new arguments.
- unsigned NumPlacementArgs : 13;
- // The number of constructor arguments. This may be 1 even for non-class
- // types; use the pseudo copy constructor.
- unsigned NumConstructorArgs : 14;
- // Contains an optional array size expression, any number of optional
- // placement arguments, and any number of optional constructor arguments,
- // in that order.
+ // Contains an optional array size expression, an optional initialization
+ // expression, and any number of optional placement arguments, in that order.
Stmt **SubExprs;
// Points to the allocation function used.
FunctionDecl *OperatorNew;
// Points to the deallocation function used in case of error. May be null.
FunctionDecl *OperatorDelete;
- // Points to the constructor used. Cannot be null if AllocType is a record;
- // it would still point at the default constructor (even an implicit one).
- // Must be null for all other types.
- CXXConstructorDecl *Constructor;
/// \brief The allocated type-source information, as written in the source.
TypeSourceInfo *AllocatedTypeInfo;
-
- /// \brief If the allocated type was expressed as a parenthesized type-id,
+
+ /// \brief If the allocated type was expressed as a parenthesized type-id,
/// the source range covering the parenthesized type-id.
SourceRange TypeIdParens;
-
+
+ /// \brief Location of the first token.
SourceLocation StartLoc;
- SourceLocation EndLoc;
- SourceLocation ConstructorLParen;
- SourceLocation ConstructorRParen;
+
+ /// \brief Source-range of a paren-delimited initializer.
+ SourceRange DirectInitRange;
+
+ // Was the usage ::new, i.e. is the global new to be used?
+ bool GlobalNew : 1;
+ // Do we allocate an array? If so, the first SubExpr is the size expression.
+ bool Array : 1;
+ // If this is an array allocation, does the usual deallocation
+ // function for the allocated type want to know the allocated size?
+ bool UsualArrayDeleteWantsSize : 1;
+ // The number of placement new arguments.
+ unsigned NumPlacementArgs : 13;
+ // What kind of initializer do we have? Could be none, parens, or braces.
+ // In storage, we distinguish between "none, and no initializer expr", and
+ // "none, but an implicit initializer expr".
+ unsigned StoredInitializationStyle : 2;
friend class ASTStmtReader;
+ friend class ASTStmtWriter;
public:
+ enum InitializationStyle {
+ NoInit, ///< New-expression has no initializer as written.
+ CallInit, ///< New-expression has a C++98 paren-delimited initializer.
+ ListInit ///< New-expression has a C++11 list-initializer.
+ };
+
CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
- Expr **placementArgs, unsigned numPlaceArgs,
- SourceRange TypeIdParens,
- Expr *arraySize, CXXConstructorDecl *constructor, bool initializer,
- Expr **constructorArgs, unsigned numConsArgs,
- bool HadMultipleCandidates,
FunctionDecl *operatorDelete, bool usualArrayDeleteWantsSize,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle, Expr *initializer,
QualType ty, TypeSourceInfo *AllocatedTypeInfo,
- SourceLocation startLoc, SourceLocation endLoc,
- SourceLocation constructorLParen,
- SourceLocation constructorRParen);
+ SourceLocation startLoc, SourceRange directInitRange);
explicit CXXNewExpr(EmptyShell Shell)
: Expr(CXXNewExprClass, Shell), SubExprs(0) { }
void AllocateArgsArray(ASTContext &C, bool isArray, unsigned numPlaceArgs,
- unsigned numConsArgs);
-
+ bool hasInitializer);
+
QualType getAllocatedType() const {
assert(getType()->isPointerType());
return getType()->getAs<PointerType>()->getPointeeType();
@@ -1130,13 +1498,11 @@ public:
/// identical except that the definition of a non-throwing
/// exception specification is just "is it throw()?".
bool shouldNullCheckAllocation(ASTContext &Ctx) const;
-
+
FunctionDecl *getOperatorNew() const { return OperatorNew; }
void setOperatorNew(FunctionDecl *D) { OperatorNew = D; }
FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
void setOperatorDelete(FunctionDecl *D) { OperatorDelete = D; }
- CXXConstructorDecl *getConstructor() const { return Constructor; }
- void setConstructor(CXXConstructorDecl *D) { Constructor = D; }
bool isArray() const { return Array; }
Expr *getArraySize() {
@@ -1147,97 +1513,87 @@ public:
}
unsigned getNumPlacementArgs() const { return NumPlacementArgs; }
- Expr **getPlacementArgs() {
- return reinterpret_cast<Expr **>(SubExprs + Array);
+ Expr **getPlacementArgs() {
+ return reinterpret_cast<Expr **>(SubExprs + Array + hasInitializer());
}
-
+
Expr *getPlacementArg(unsigned i) {
assert(i < NumPlacementArgs && "Index out of range");
- return cast<Expr>(SubExprs[Array + i]);
+ return getPlacementArgs()[i];
}
const Expr *getPlacementArg(unsigned i) const {
assert(i < NumPlacementArgs && "Index out of range");
- return cast<Expr>(SubExprs[Array + i]);
+ return const_cast<CXXNewExpr*>(this)->getPlacementArg(i);
}
bool isParenTypeId() const { return TypeIdParens.isValid(); }
SourceRange getTypeIdParens() const { return TypeIdParens; }
bool isGlobalNew() const { return GlobalNew; }
- bool hasInitializer() const { return Initializer; }
- /// Answers whether the usual array deallocation function for the
- /// allocated type expects the size of the allocation as a
- /// parameter.
- bool doesUsualArrayDeleteWantSize() const {
- return UsualArrayDeleteWantsSize;
+ /// \brief Whether this new-expression has any initializer at all.
+ bool hasInitializer() const { return StoredInitializationStyle > 0; }
+
+ /// \brief The kind of initializer this new-expression has.
+ InitializationStyle getInitializationStyle() const {
+ if (StoredInitializationStyle == 0)
+ return NoInit;
+ return static_cast<InitializationStyle>(StoredInitializationStyle-1);
}
- unsigned getNumConstructorArgs() const { return NumConstructorArgs; }
-
- Expr **getConstructorArgs() {
- return reinterpret_cast<Expr **>(SubExprs + Array + NumPlacementArgs);
+ /// \brief The initializer of this new-expression.
+ Expr *getInitializer() {
+ return hasInitializer() ? cast<Expr>(SubExprs[Array]) : 0;
}
-
- Expr *getConstructorArg(unsigned i) {
- assert(i < NumConstructorArgs && "Index out of range");
- return cast<Expr>(SubExprs[Array + NumPlacementArgs + i]);
+ const Expr *getInitializer() const {
+ return hasInitializer() ? cast<Expr>(SubExprs[Array]) : 0;
}
- const Expr *getConstructorArg(unsigned i) const {
- assert(i < NumConstructorArgs && "Index out of range");
- return cast<Expr>(SubExprs[Array + NumPlacementArgs + i]);
+
+ /// \brief Returns the CXXConstructExpr from this new-expression, or NULL.
+ const CXXConstructExpr* getConstructExpr() {
+ return dyn_cast_or_null<CXXConstructExpr>(getInitializer());
}
- /// \brief Whether the new expression refers a constructor that was
- /// resolved from an overloaded set having size greater than 1.
- bool hadMultipleCandidates() const { return HadMultipleCandidates; }
- void setHadMultipleCandidates(bool V) { HadMultipleCandidates = V; }
+ /// Answers whether the usual array deallocation function for the
+ /// allocated type expects the size of the allocation as a
+ /// parameter.
+ bool doesUsualArrayDeleteWantSize() const {
+ return UsualArrayDeleteWantsSize;
+ }
typedef ExprIterator arg_iterator;
typedef ConstExprIterator const_arg_iterator;
arg_iterator placement_arg_begin() {
- return SubExprs + Array;
+ return SubExprs + Array + hasInitializer();
}
arg_iterator placement_arg_end() {
- return SubExprs + Array + getNumPlacementArgs();
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
}
const_arg_iterator placement_arg_begin() const {
- return SubExprs + Array;
+ return SubExprs + Array + hasInitializer();
}
const_arg_iterator placement_arg_end() const {
- return SubExprs + Array + getNumPlacementArgs();
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
}
- arg_iterator constructor_arg_begin() {
- return SubExprs + Array + getNumPlacementArgs();
- }
- arg_iterator constructor_arg_end() {
- return SubExprs + Array + getNumPlacementArgs() + getNumConstructorArgs();
- }
- const_arg_iterator constructor_arg_begin() const {
- return SubExprs + Array + getNumPlacementArgs();
- }
- const_arg_iterator constructor_arg_end() const {
- return SubExprs + Array + getNumPlacementArgs() + getNumConstructorArgs();
- }
-
typedef Stmt **raw_arg_iterator;
raw_arg_iterator raw_arg_begin() { return SubExprs; }
raw_arg_iterator raw_arg_end() {
- return SubExprs + Array + getNumPlacementArgs() + getNumConstructorArgs();
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
}
const_arg_iterator raw_arg_begin() const { return SubExprs; }
- const_arg_iterator raw_arg_end() const { return constructor_arg_end(); }
+ const_arg_iterator raw_arg_end() const {
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
+ }
SourceLocation getStartLoc() const { return StartLoc; }
- SourceLocation getEndLoc() const { return EndLoc; }
+ SourceLocation getEndLoc() const;
- SourceLocation getConstructorLParen() const { return ConstructorLParen; }
- SourceLocation getConstructorRParen() const { return ConstructorRParen; }
+ SourceRange getDirectInitRange() const { return DirectInitRange; }
- SourceRange getSourceRange() const {
- return SourceRange(StartLoc, EndLoc);
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getStartLoc(), getEndLoc());
}
static bool classof(const Stmt *T) {
@@ -1247,15 +1603,19 @@ public:
// Iterators
child_range children() {
- return child_range(&SubExprs[0],
- &SubExprs[0] + Array + getNumPlacementArgs()
- + getNumConstructorArgs());
+ return child_range(raw_arg_begin(), raw_arg_end());
}
};
/// CXXDeleteExpr - A delete expression for memory deallocation and destructor
/// calls, e.g. "delete[] pArray".
class CXXDeleteExpr : public Expr {
+ // Points to the operator delete overload that is used. Could be a member.
+ FunctionDecl *OperatorDelete;
+ // The pointer expression to be deleted.
+ Stmt *Argument;
+ // Location of the expression.
+ SourceLocation Loc;
// Is this a forced global delete, i.e. "::delete"?
bool GlobalDelete : 1;
// Is this the array form of delete, i.e. "delete[]"?
@@ -1267,12 +1627,6 @@ class CXXDeleteExpr : public Expr {
// Does the usual deallocation function for the element type require
// a size_t argument?
bool UsualArrayDeleteWantsSize : 1;
- // Points to the operator delete overload that is used. Could be a member.
- FunctionDecl *OperatorDelete;
- // The pointer expression to be deleted.
- Stmt *Argument;
- // Location of the expression.
- SourceLocation Loc;
public:
CXXDeleteExpr(QualType ty, bool globalDelete, bool arrayForm,
bool arrayFormAsWritten, bool usualArrayDeleteWantsSize,
@@ -1280,10 +1634,10 @@ public:
: Expr(CXXDeleteExprClass, ty, VK_RValue, OK_Ordinary, false, false,
arg->isInstantiationDependent(),
arg->containsUnexpandedParameterPack()),
+ OperatorDelete(operatorDelete), Argument(arg), Loc(loc),
GlobalDelete(globalDelete),
ArrayForm(arrayForm), ArrayFormAsWritten(arrayFormAsWritten),
- UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize),
- OperatorDelete(operatorDelete), Argument(arg), Loc(loc) { }
+ UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) { }
explicit CXXDeleteExpr(EmptyShell Shell)
: Expr(CXXDeleteExprClass, Shell), OperatorDelete(0), Argument(0) { }
@@ -1308,8 +1662,8 @@ public:
/// destroyed is a dependent type which may or may not be a pointer,
/// return an invalid type.
QualType getDestroyedType() const;
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(Loc, Argument->getLocEnd());
}
@@ -1324,39 +1678,39 @@ public:
friend class ASTStmtReader;
};
-/// \brief Structure used to store the type being destroyed by a
+/// \brief Structure used to store the type being destroyed by a
/// pseudo-destructor expression.
class PseudoDestructorTypeStorage {
- /// \brief Either the type source information or the name of the type, if
+ /// \brief Either the type source information or the name of the type, if
/// it couldn't be resolved due to type-dependence.
llvm::PointerUnion<TypeSourceInfo *, IdentifierInfo *> Type;
-
+
/// \brief The starting source location of the pseudo-destructor type.
SourceLocation Location;
-
+
public:
PseudoDestructorTypeStorage() { }
-
+
PseudoDestructorTypeStorage(IdentifierInfo *II, SourceLocation Loc)
: Type(II), Location(Loc) { }
-
+
PseudoDestructorTypeStorage(TypeSourceInfo *Info);
-
- TypeSourceInfo *getTypeSourceInfo() const {
- return Type.dyn_cast<TypeSourceInfo *>();
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return Type.dyn_cast<TypeSourceInfo *>();
}
-
+
IdentifierInfo *getIdentifier() const {
return Type.dyn_cast<IdentifierInfo *>();
}
-
+
SourceLocation getLocation() const { return Location; }
};
-
+
/// \brief Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
///
/// A pseudo-destructor is an expression that looks like a member access to a
-/// destructor of a scalar type, except that scalar types don't have
+/// destructor of a scalar type, except that scalar types don't have
/// destructors. For example:
///
/// \code
@@ -1367,7 +1721,7 @@ public:
/// \endcode
///
/// Pseudo-destructors typically occur when instantiating templates such as:
-///
+///
/// \code
/// template<typename T>
/// void destroy(T* ptr) {
@@ -1387,27 +1741,27 @@ class CXXPseudoDestructorExpr : public Expr {
/// \brief The location of the '.' or '->' operator.
SourceLocation OperatorLoc;
-
+
/// \brief The nested-name-specifier that follows the operator, if present.
NestedNameSpecifierLoc QualifierLoc;
/// \brief The type that precedes the '::' in a qualified pseudo-destructor
/// expression.
TypeSourceInfo *ScopeType;
-
- /// \brief The location of the '::' in a qualified pseudo-destructor
+
+ /// \brief The location of the '::' in a qualified pseudo-destructor
/// expression.
SourceLocation ColonColonLoc;
-
+
/// \brief The location of the '~'.
SourceLocation TildeLoc;
-
- /// \brief The type being destroyed, or its name if we were unable to
+
+ /// \brief The type being destroyed, or its name if we were unable to
/// resolve the name.
PseudoDestructorTypeStorage DestroyedType;
friend class ASTStmtReader;
-
+
public:
CXXPseudoDestructorExpr(ASTContext &Context,
Expr *Base, bool isArrow, SourceLocation OperatorLoc,
@@ -1431,12 +1785,12 @@ public:
/// \brief Retrieves the nested-name-specifier that qualifies the type name,
/// with source-location information.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
+
/// \brief If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// NULL.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
/// \brief Determine whether this pseudo-destructor expression was written
@@ -1446,48 +1800,48 @@ public:
/// \brief Retrieve the location of the '.' or '->' operator.
SourceLocation getOperatorLoc() const { return OperatorLoc; }
- /// \brief Retrieve the scope type in a qualified pseudo-destructor
+ /// \brief Retrieve the scope type in a qualified pseudo-destructor
/// expression.
///
/// Pseudo-destructor expressions can have extra qualification within them
/// that is not part of the nested-name-specifier, e.g., \c p->T::~T().
/// Here, if the object type of the expression is (or may be) a scalar type,
- /// \p T may also be a scalar type and, therefore, cannot be part of a
+ /// \p T may also be a scalar type and, therefore, cannot be part of a
/// nested-name-specifier. It is stored as the "scope type" of the pseudo-
/// destructor expression.
TypeSourceInfo *getScopeTypeInfo() const { return ScopeType; }
-
+
/// \brief Retrieve the location of the '::' in a qualified pseudo-destructor
/// expression.
SourceLocation getColonColonLoc() const { return ColonColonLoc; }
-
+
/// \brief Retrieve the location of the '~'.
SourceLocation getTildeLoc() const { return TildeLoc; }
-
+
/// \brief Retrieve the source location information for the type
/// being destroyed.
///
- /// This type-source information is available for non-dependent
+ /// This type-source information is available for non-dependent
/// pseudo-destructor expressions and some dependent pseudo-destructor
/// expressions. Returns NULL if we only have the identifier for a
/// dependent pseudo-destructor expression.
- TypeSourceInfo *getDestroyedTypeInfo() const {
- return DestroyedType.getTypeSourceInfo();
+ TypeSourceInfo *getDestroyedTypeInfo() const {
+ return DestroyedType.getTypeSourceInfo();
}
-
+
/// \brief In a dependent pseudo-destructor expression for which we do not
/// have full type information on the destroyed type, provides the name
/// of the destroyed type.
IdentifierInfo *getDestroyedTypeIdentifier() const {
return DestroyedType.getIdentifier();
}
-
+
/// \brief Retrieve the type being destroyed.
QualType getDestroyedType() const;
-
+
/// \brief Retrieve the starting location of the type being destroyed.
- SourceLocation getDestroyedTypeLoc() const {
- return DestroyedType.getLocation();
+ SourceLocation getDestroyedTypeLoc() const {
+ return DestroyedType.getLocation();
}
/// \brief Set the name of destroyed type for a dependent pseudo-destructor
@@ -1501,7 +1855,7 @@ public:
DestroyedType = PseudoDestructorTypeStorage(Info);
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXPseudoDestructorExprClass;
@@ -1533,7 +1887,7 @@ class UnaryTypeTraitExpr : public Expr {
TypeSourceInfo *QueriedType;
public:
- UnaryTypeTraitExpr(SourceLocation loc, UnaryTypeTrait utt,
+ UnaryTypeTraitExpr(SourceLocation loc, UnaryTypeTrait utt,
TypeSourceInfo *queried, bool value,
SourceLocation rparen, QualType ty)
: Expr(UnaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary,
@@ -1546,14 +1900,14 @@ public:
: Expr(UnaryTypeTraitExprClass, Empty), UTT(0), Value(false),
QueriedType() { }
- SourceRange getSourceRange() const { return SourceRange(Loc, RParen);}
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParen);}
UnaryTypeTrait getTrait() const { return static_cast<UnaryTypeTrait>(UTT); }
QualType getQueriedType() const { return QueriedType->getType(); }
TypeSourceInfo *getQueriedTypeSourceInfo() const { return QueriedType; }
-
+
bool getValue() const { return Value; }
static bool classof(const Stmt *T) {
@@ -1591,10 +1945,10 @@ class BinaryTypeTraitExpr : public Expr {
TypeSourceInfo *RhsType;
public:
- BinaryTypeTraitExpr(SourceLocation loc, BinaryTypeTrait btt,
- TypeSourceInfo *lhsType, TypeSourceInfo *rhsType,
+ BinaryTypeTraitExpr(SourceLocation loc, BinaryTypeTrait btt,
+ TypeSourceInfo *lhsType, TypeSourceInfo *rhsType,
bool value, SourceLocation rparen, QualType ty)
- : Expr(BinaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary, false,
+ : Expr(BinaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary, false,
lhsType->getType()->isDependentType() ||
rhsType->getType()->isDependentType(),
(lhsType->getType()->isInstantiationDependentType() ||
@@ -1609,7 +1963,7 @@ public:
: Expr(BinaryTypeTraitExprClass, Empty), BTT(0), Value(false),
LhsType(), RhsType() { }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(Loc, RParen);
}
@@ -1622,7 +1976,7 @@ public:
TypeSourceInfo *getLhsTypeSourceInfo() const { return LhsType; }
TypeSourceInfo *getRhsTypeSourceInfo() const { return RhsType; }
-
+
bool getValue() const { assert(!isTypeDependent()); return Value; }
static bool classof(const Stmt *T) {
@@ -1636,12 +1990,110 @@ public:
friend class ASTStmtReader;
};
+/// \brief A type trait used in the implementation of various C++11 and
+/// Library TR1 trait templates.
+///
+/// \code
+/// __is_trivially_constructible(vector<int>, int*, int*)
+/// \endcode
+class TypeTraitExpr : public Expr {
+ /// \brief The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// \brief The location of the closing parenthesis.
+ SourceLocation RParenLoc;
+
+ // Note: The TypeSourceInfos for the arguments are allocated after the
+ // TypeTraitExpr.
+
+ TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value);
+
+ TypeTraitExpr(EmptyShell Empty) : Expr(TypeTraitExprClass, Empty) { }
+
+ /// \brief Retrieve the argument types.
+ TypeSourceInfo **getTypeSourceInfos() {
+ return reinterpret_cast<TypeSourceInfo **>(this+1);
+ }
+
+ /// \brief Retrieve the argument types.
+ TypeSourceInfo * const *getTypeSourceInfos() const {
+ return reinterpret_cast<TypeSourceInfo * const*>(this+1);
+ }
+
+public:
+ /// \brief Create a new type trait expression.
+ static TypeTraitExpr *Create(ASTContext &C, QualType T, SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value);
+
+ static TypeTraitExpr *CreateDeserialized(ASTContext &C, unsigned NumArgs);
+
+ /// \brief Determine which type trait this expression uses.
+ TypeTrait getTrait() const {
+ return static_cast<TypeTrait>(TypeTraitExprBits.Kind);
+ }
+
+ bool getValue() const {
+ assert(!isValueDependent());
+ return TypeTraitExprBits.Value;
+ }
+
+ /// \brief Determine the number of arguments to this type trait.
+ unsigned getNumArgs() const { return TypeTraitExprBits.NumArgs; }
+
+ /// \brief Retrieve the Ith argument.
+ TypeSourceInfo *getArg(unsigned I) const {
+ assert(I < getNumArgs() && "Argument out-of-range");
+ return getArgs()[I];
+ }
+
+ /// \brief Retrieve the argument types.
+ ArrayRef<TypeSourceInfo *> getArgs() const {
+ return ArrayRef<TypeSourceInfo *>(getTypeSourceInfos(), getNumArgs());
+ }
+
+ typedef TypeSourceInfo **arg_iterator;
+ arg_iterator arg_begin() {
+ return getTypeSourceInfos();
+ }
+ arg_iterator arg_end() {
+ return getTypeSourceInfos() + getNumArgs();
+ }
+
+ typedef TypeSourceInfo const * const *arg_const_iterator;
+ arg_const_iterator arg_begin() const { return getTypeSourceInfos(); }
+ arg_const_iterator arg_end() const {
+ return getTypeSourceInfos() + getNumArgs();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParenLoc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == TypeTraitExprClass;
+ }
+ static bool classof(const TypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+};
+
/// ArrayTypeTraitExpr - An Embarcadero array type trait, as used in the
/// implementation of __array_rank and __array_extent.
/// Example:
/// __array_rank(int[10][20]) == 2
/// __array_extent(int, 1) == 20
class ArrayTypeTraitExpr : public Expr {
+ virtual void anchor();
+
/// ATT - The trait. An ArrayTypeTrait enum in MSVC compat unsigned.
unsigned ATT : 2;
@@ -1679,7 +2131,9 @@ public:
virtual ~ArrayTypeTraitExpr() { }
- virtual SourceRange getSourceRange() const { return SourceRange(Loc, RParen); }
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, RParen);
+ }
ArrayTypeTrait getTrait() const { return static_cast<ArrayTypeTrait>(ATT); }
@@ -1720,7 +2174,7 @@ class ExpressionTraitExpr : public Expr {
Expr* QueriedExpression;
public:
- ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et,
+ ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et,
Expr *queried, bool value,
SourceLocation rparen, QualType resultType)
: Expr(ExpressionTraitExprClass, resultType, VK_RValue, OK_Ordinary,
@@ -1729,13 +2183,14 @@ public:
queried->isTypeDependent(),
queried->isInstantiationDependent(),
queried->containsUnexpandedParameterPack()),
- ET(et), Value(value), Loc(loc), RParen(rparen), QueriedExpression(queried) { }
+ ET(et), Value(value), Loc(loc), RParen(rparen),
+ QueriedExpression(queried) { }
explicit ExpressionTraitExpr(EmptyShell Empty)
: Expr(ExpressionTraitExprClass, Empty), ET(0), Value(false),
QueriedExpression() { }
- SourceRange getSourceRange() const { return SourceRange(Loc, RParen);}
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParen);}
ExpressionTrait getTrait() const { return static_cast<ExpressionTrait>(ET); }
@@ -1758,6 +2213,12 @@ public:
/// \brief A reference to an overloaded function set, either an
/// \t UnresolvedLookupExpr or an \t UnresolvedMemberExpr.
class OverloadExpr : public Expr {
+ /// The common name of these declarations.
+ DeclarationNameInfo NameInfo;
+
+ /// \brief The nested-name-specifier that qualifies the name, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+
/// The results. These are undesugared, which is to say, they may
/// include UsingShadowDecls. Access is relative to the naming
/// class.
@@ -1765,18 +2226,22 @@ class OverloadExpr : public Expr {
DeclAccessPair *Results;
unsigned NumResults;
- /// The common name of these declarations.
- DeclarationNameInfo NameInfo;
+protected:
+ /// \brief Whether the name includes info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo;
- /// \brief The nested-name-specifier that qualifies the name, if any.
- NestedNameSpecifierLoc QualifierLoc;
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo(); // defined far below.
-protected:
- /// True if the name was a template-id.
- bool HasExplicitTemplateArgs;
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<OverloadExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
OverloadExpr(StmtClass K, ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End,
@@ -1785,8 +2250,8 @@ protected:
bool KnownContainsUnexpandedParameterPack);
OverloadExpr(StmtClass K, EmptyShell Empty)
- : Expr(K, Empty), Results(0), NumResults(0),
- QualifierLoc(), HasExplicitTemplateArgs(false) { }
+ : Expr(K, Empty), QualifierLoc(), Results(0), NumResults(0),
+ HasTemplateKWAndArgsInfo(false) { }
void initializeResults(ASTContext &C,
UnresolvedSetIterator Begin,
@@ -1832,10 +2297,10 @@ public:
typedef UnresolvedSetImpl::iterator decls_iterator;
decls_iterator decls_begin() const { return UnresolvedSetIterator(Results); }
- decls_iterator decls_end() const {
+ decls_iterator decls_end() const {
return UnresolvedSetIterator(Results + NumResults);
}
-
+
/// Gets the number of declarations in the unresolved set.
unsigned getNumDecls() const { return NumResults; }
@@ -1849,24 +2314,67 @@ public:
SourceLocation getNameLoc() const { return NameInfo.getLoc(); }
/// Fetches the nested-name qualifier, if one was given.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
- /// Fetches the nested-name qualifier with source-location information, if
+ /// Fetches the nested-name qualifier with source-location information, if
/// one was given.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
- /// \brief Determines whether this expression had an explicit
- /// template argument list, e.g. f<int>.
- bool hasExplicitTemplateArgs() const { return HasExplicitTemplateArgs; }
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// Determines whether this expression had explicit template arguments.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ // Note that, inconsistently with the explicit-template-argument AST
+ // nodes, users are *forbidden* from calling these methods on objects
+ // without explicit template arguments.
- ASTTemplateArgumentListInfo &getExplicitTemplateArgs(); // defined far below
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
+ }
const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
return const_cast<OverloadExpr*>(this)->getExplicitTemplateArgs();
}
+ TemplateArgumentLoc const *getTemplateArgs() const {
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ unsigned getNumTemplateArgs() const {
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ /// Copies the template arguments into the given structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
/// \brief Retrieves the optional explicit template arguments.
/// This points to the same data as getExplicitTemplateArgs(), but
/// returns null if there are no explicit template arguments.
@@ -1916,16 +2424,17 @@ class UnresolvedLookupExpr : public OverloadExpr {
/// against the qualified-lookup bits.
CXXRecordDecl *NamingClass;
- UnresolvedLookupExpr(ASTContext &C,
+ UnresolvedLookupExpr(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- bool RequiresADL, bool Overloaded,
+ bool RequiresADL, bool Overloaded,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End,
bool StdIsAssociatedNamespace)
- : OverloadExpr(UnresolvedLookupExprClass, C, QualifierLoc, NameInfo,
- TemplateArgs, Begin, End, false, false, false),
+ : OverloadExpr(UnresolvedLookupExprClass, C, QualifierLoc, TemplateKWLoc,
+ NameInfo, TemplateArgs, Begin, End, false, false, false),
RequiresADL(RequiresADL),
StdIsAssociatedNamespace(StdIsAssociatedNamespace),
Overloaded(Overloaded), NamingClass(NamingClass)
@@ -1938,19 +2447,20 @@ class UnresolvedLookupExpr : public OverloadExpr {
{}
friend class ASTStmtReader;
-
+
public:
static UnresolvedLookupExpr *Create(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo,
bool ADL, bool Overloaded,
- UnresolvedSetIterator Begin,
+ UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
bool StdIsAssociatedNamespace = false) {
assert((ADL || !StdIsAssociatedNamespace) &&
"std considered associated namespace when not performing ADL");
- return new(C) UnresolvedLookupExpr(C, NamingClass, QualifierLoc, NameInfo,
+ return new(C) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ SourceLocation(), NameInfo,
ADL, Overloaded, 0, Begin, End,
StdIsAssociatedNamespace);
}
@@ -1958,14 +2468,15 @@ public:
static UnresolvedLookupExpr *Create(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool ADL,
- const TemplateArgumentListInfo &Args,
- UnresolvedSetIterator Begin,
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
UnresolvedSetIterator End);
static UnresolvedLookupExpr *CreateEmpty(ASTContext &C,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs);
/// True if this declaration should be extended by
@@ -1984,56 +2495,11 @@ public:
/// that was looked in to find these results.
CXXRecordDecl *getNamingClass() const { return NamingClass; }
- // Note that, inconsistently with the explicit-template-argument AST
- // nodes, users are *forbidden* from calling these methods on objects
- // without explicit template arguments.
-
- ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
- assert(hasExplicitTemplateArgs());
- return *reinterpret_cast<ASTTemplateArgumentListInfo*>(this + 1);
- }
-
- /// Gets a reference to the explicit template argument list.
- const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
- assert(hasExplicitTemplateArgs());
- return *reinterpret_cast<const ASTTemplateArgumentListInfo*>(this + 1);
- }
-
- /// \brief Retrieves the optional explicit template arguments.
- /// This points to the same data as getExplicitTemplateArgs(), but
- /// returns null if there are no explicit template arguments.
- const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
- if (!hasExplicitTemplateArgs()) return 0;
- return &getExplicitTemplateArgs();
- }
-
- /// \brief Copies the template arguments (if present) into the given
- /// structure.
- void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
- getExplicitTemplateArgs().copyInto(List);
- }
-
- SourceLocation getLAngleLoc() const {
- return getExplicitTemplateArgs().LAngleLoc;
- }
-
- SourceLocation getRAngleLoc() const {
- return getExplicitTemplateArgs().RAngleLoc;
- }
-
- TemplateArgumentLoc const *getTemplateArgs() const {
- return getExplicitTemplateArgs().getTemplateArgs();
- }
-
- unsigned getNumTemplateArgs() const {
- return getExplicitTemplateArgs().NumTemplateArgs;
- }
-
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
SourceRange Range(getNameInfo().getSourceRange());
- if (getQualifierLoc())
+ if (getQualifierLoc())
Range.setBegin(getQualifierLoc().getBeginLoc());
- if (hasExplicitTemplateArgs())
+ if (hasExplicitTemplateArgs())
Range.setEnd(getRAngleLoc());
return Range;
}
@@ -2064,26 +2530,40 @@ class DependentScopeDeclRefExpr : public Expr {
/// \brief The nested-name-specifier that qualifies this unresolved
/// declaration name.
NestedNameSpecifierLoc QualifierLoc;
-
+
/// The name of the entity we will be referencing.
DeclarationNameInfo NameInfo;
- /// \brief Whether the name includes explicit template arguments.
- bool HasExplicitTemplateArgs;
+ /// \brief Whether the name includes info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo;
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>(this + 1);
+ }
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<DependentScopeDeclRefExpr*>(this)
+ ->getTemplateKWAndArgsInfo();
+ }
DependentScopeDeclRefExpr(QualType T,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args);
public:
static DependentScopeDeclRefExpr *Create(ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs = 0);
+ const TemplateArgumentListInfo *TemplateArgs);
static DependentScopeDeclRefExpr *CreateEmpty(ASTContext &C,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs);
/// \brief Retrieve the name that this expression refers to.
@@ -2098,16 +2578,40 @@ public:
/// \brief Retrieve the nested-name-specifier that qualifies the
/// name, with source location information.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
-
+
+
/// \brief Retrieve the nested-name-specifier that qualifies this
/// declaration.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
/// Determines whether this lookup had explicit template arguments.
- bool hasExplicitTemplateArgs() const { return HasExplicitTemplateArgs; }
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
// Note that, inconsistently with the explicit-template-argument AST
// nodes, users are *forbidden* from calling these methods on objects
@@ -2137,14 +2641,6 @@ public:
void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
getExplicitTemplateArgs().copyInto(List);
}
-
- SourceLocation getLAngleLoc() const {
- return getExplicitTemplateArgs().LAngleLoc;
- }
-
- SourceLocation getRAngleLoc() const {
- return getExplicitTemplateArgs().RAngleLoc;
- }
TemplateArgumentLoc const *getTemplateArgs() const {
return getExplicitTemplateArgs().getTemplateArgs();
@@ -2154,7 +2650,7 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
SourceRange Range(QualifierLoc.getBeginLoc(), getLocation());
if (hasExplicitTemplateArgs())
Range.setEnd(getRAngleLoc());
@@ -2175,46 +2671,61 @@ public:
/// Represents an expression --- generally a full-expression --- which
/// introduces cleanups to be run at the end of the sub-expression's
/// evaluation. The most common source of expression-introduced
-/// cleanups is temporary objects in C++, but several other C++
-/// expressions can create cleanups.
+/// cleanups is temporary objects in C++, but several other kinds of
+/// expressions can create cleanups, including basically every
+/// call in ARC that returns an Objective-C pointer.
+///
+/// This expression also tracks whether the sub-expression contains a
+/// potentially-evaluated block literal. The lifetime of a block
+/// literal is the extent of the enclosing scope.
class ExprWithCleanups : public Expr {
+public:
+ /// The type of objects that are kept in the cleanup.
+ /// It's useful to remember the set of blocks; we could also
+ /// remember the set of temporaries, but there's currently
+ /// no need.
+ typedef BlockDecl *CleanupObject;
+
+private:
Stmt *SubExpr;
- CXXTemporary **Temps;
- unsigned NumTemps;
+ ExprWithCleanups(EmptyShell, unsigned NumObjects);
+ ExprWithCleanups(Expr *SubExpr, ArrayRef<CleanupObject> Objects);
- ExprWithCleanups(ASTContext &C, Expr *SubExpr,
- CXXTemporary **Temps, unsigned NumTemps);
-
-public:
- ExprWithCleanups(EmptyShell Empty)
- : Expr(ExprWithCleanupsClass, Empty),
- SubExpr(0), Temps(0), NumTemps(0) {}
-
- static ExprWithCleanups *Create(ASTContext &C, Expr *SubExpr,
- CXXTemporary **Temps,
- unsigned NumTemps);
-
- unsigned getNumTemporaries() const { return NumTemps; }
- void setNumTemporaries(ASTContext &C, unsigned N);
-
- CXXTemporary *getTemporary(unsigned i) {
- assert(i < NumTemps && "Index out of range");
- return Temps[i];
+ CleanupObject *getObjectsBuffer() {
+ return reinterpret_cast<CleanupObject*>(this + 1);
+ }
+ const CleanupObject *getObjectsBuffer() const {
+ return reinterpret_cast<const CleanupObject*>(this + 1);
}
- const CXXTemporary *getTemporary(unsigned i) const {
- return const_cast<ExprWithCleanups*>(this)->getTemporary(i);
+ friend class ASTStmtReader;
+
+public:
+ static ExprWithCleanups *Create(ASTContext &C, EmptyShell empty,
+ unsigned numObjects);
+
+ static ExprWithCleanups *Create(ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects);
+
+ ArrayRef<CleanupObject> getObjects() const {
+ return ArrayRef<CleanupObject>(getObjectsBuffer(), getNumObjects());
}
- void setTemporary(unsigned i, CXXTemporary *T) {
- assert(i < NumTemps && "Index out of range");
- Temps[i] = T;
+
+ unsigned getNumObjects() const { return ExprWithCleanupsBits.NumObjects; }
+
+ CleanupObject getObject(unsigned i) const {
+ assert(i < getNumObjects() && "Index out of range");
+ return getObjects()[i];
}
Expr *getSubExpr() { return cast<Expr>(SubExpr); }
const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
+
+ /// setSubExpr - As with any mutator of the AST, be very careful
+ /// when modifying an existing AST to preserve its invariants.
void setSubExpr(Expr *E) { SubExpr = E; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SubExpr->getSourceRange();
}
@@ -2252,7 +2763,7 @@ public:
class CXXUnresolvedConstructExpr : public Expr {
/// \brief The type being constructed.
TypeSourceInfo *Type;
-
+
/// \brief The location of the left parentheses ('(').
SourceLocation LParenLoc;
@@ -2272,7 +2783,7 @@ class CXXUnresolvedConstructExpr : public Expr {
: Expr(CXXUnresolvedConstructExprClass, Empty), Type(), NumArgs(NumArgs) { }
friend class ASTStmtReader;
-
+
public:
static CXXUnresolvedConstructExpr *Create(ASTContext &C,
TypeSourceInfo *Type,
@@ -2288,10 +2799,10 @@ public:
/// in the source code.
QualType getTypeAsWritten() const { return Type->getType(); }
- /// \brief Retrieve the type source information for the type being
+ /// \brief Retrieve the type source information for the type being
/// constructed.
TypeSourceInfo *getTypeSourceInfo() const { return Type; }
-
+
/// \brief Retrieve the location of the left parentheses ('(') that
/// precedes the argument list.
SourceLocation getLParenLoc() const { return LParenLoc; }
@@ -2332,8 +2843,8 @@ public:
*(arg_begin() + I) = E;
}
- SourceRange getSourceRange() const;
-
+ SourceRange getSourceRange() const LLVM_READONLY;
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXUnresolvedConstructExprClass;
}
@@ -2366,9 +2877,9 @@ class CXXDependentScopeMemberExpr : public Expr {
/// the '.' operator.
bool IsArrow : 1;
- /// \brief Whether this member expression has explicitly-specified template
- /// arguments.
- bool HasExplicitTemplateArgs : 1;
+ /// \brief Whether this member expression has info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo : 1;
/// \brief The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
@@ -2390,10 +2901,22 @@ class CXXDependentScopeMemberExpr : public Expr {
/// FIXME: could also be a template-id
DeclarationNameInfo MemberNameInfo;
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>(this + 1);
+ }
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<CXXDependentScopeMemberExpr*>(this)
+ ->getTemplateKWAndArgsInfo();
+ }
+
CXXDependentScopeMemberExpr(ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs);
@@ -2412,12 +2935,13 @@ public:
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs);
static CXXDependentScopeMemberExpr *
- CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs);
/// \brief True if this is an implicit access, i.e. one in which the
@@ -2443,15 +2967,15 @@ public:
/// \brief Retrieve the nested-name-specifier that qualifies the member
/// name.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
}
/// \brief Retrieve the nested-name-specifier that qualifies the member
/// name, with source location information.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
-
+
+
/// \brief Retrieve the first part of the nested-name-specifier that was
/// found in the scope of the member access expression when the member access
/// was initially parsed.
@@ -2481,16 +3005,38 @@ public:
// expression refers to.
SourceLocation getMemberLoc() const { return MemberNameInfo.getLoc(); }
+ /// \brief Retrieve the location of the template keyword preceding the
+ /// member name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the member name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
/// \brief Determines whether this member expression actually had a C++
/// template argument list explicitly specified, e.g., x.f<int>.
- bool hasExplicitTemplateArgs() const {
- return HasExplicitTemplateArgs;
- }
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
/// \brief Retrieve the explicit template argument list that followed the
/// member template name, if any.
ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
- assert(HasExplicitTemplateArgs);
+ assert(hasExplicitTemplateArgs());
return *reinterpret_cast<ASTTemplateArgumentListInfo *>(this + 1);
}
@@ -2520,12 +3066,6 @@ public:
getExplicitTemplateArgs().initializeFrom(List);
}
- /// \brief Retrieve the location of the left angle bracket following the
- /// member name ('<'), if any.
- SourceLocation getLAngleLoc() const {
- return getExplicitTemplateArgs().LAngleLoc;
- }
-
/// \brief Retrieve the template arguments provided as part of this
/// template-id.
const TemplateArgumentLoc *getTemplateArgs() const {
@@ -2538,13 +3078,7 @@ public:
return getExplicitTemplateArgs().NumTemplateArgs;
}
- /// \brief Retrieve the location of the right angle bracket following the
- /// template arguments ('>').
- SourceLocation getRAngleLoc() const {
- return getExplicitTemplateArgs().RAngleLoc;
- }
-
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
SourceRange Range;
if (!isImplicitAccess())
Range.setBegin(Base->getSourceRange().getBegin());
@@ -2612,28 +3146,30 @@ class UnresolvedMemberExpr : public OverloadExpr {
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
-
+
UnresolvedMemberExpr(EmptyShell Empty)
: OverloadExpr(UnresolvedMemberExprClass, Empty), IsArrow(false),
HasUnresolvedUsing(false), Base(0) { }
friend class ASTStmtReader;
-
+
public:
static UnresolvedMemberExpr *
Create(ASTContext &C, bool HasUnresolvedUsing,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
static UnresolvedMemberExpr *
- CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+ CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs);
/// \brief True if this is an implicit access, i.e. one in which the
@@ -2680,58 +3216,7 @@ public:
// expression refers to.
SourceLocation getMemberLoc() const { return getNameLoc(); }
- /// \brief Retrieve the explicit template argument list that followed the
- /// member template name.
- ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
- assert(hasExplicitTemplateArgs());
- return *reinterpret_cast<ASTTemplateArgumentListInfo *>(this + 1);
- }
-
- /// \brief Retrieve the explicit template argument list that followed the
- /// member template name, if any.
- const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
- assert(hasExplicitTemplateArgs());
- return *reinterpret_cast<const ASTTemplateArgumentListInfo *>(this + 1);
- }
-
- /// \brief Retrieves the optional explicit template arguments.
- /// This points to the same data as getExplicitTemplateArgs(), but
- /// returns null if there are no explicit template arguments.
- const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
- if (!hasExplicitTemplateArgs()) return 0;
- return &getExplicitTemplateArgs();
- }
-
- /// \brief Copies the template arguments into the given structure.
- void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
- getExplicitTemplateArgs().copyInto(List);
- }
-
- /// \brief Retrieve the location of the left angle bracket following
- /// the member name ('<').
- SourceLocation getLAngleLoc() const {
- return getExplicitTemplateArgs().LAngleLoc;
- }
-
- /// \brief Retrieve the template arguments provided as part of this
- /// template-id.
- const TemplateArgumentLoc *getTemplateArgs() const {
- return getExplicitTemplateArgs().getTemplateArgs();
- }
-
- /// \brief Retrieve the number of template arguments provided as
- /// part of this template-id.
- unsigned getNumTemplateArgs() const {
- return getExplicitTemplateArgs().NumTemplateArgs;
- }
-
- /// \brief Retrieve the location of the right angle bracket
- /// following the template arguments ('>').
- SourceLocation getRAngleLoc() const {
- return getExplicitTemplateArgs().RAngleLoc;
- }
-
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
SourceRange Range = getMemberNameInfo().getSourceRange();
if (!isImplicitAccess())
Range.setBegin(Base->getSourceRange().getBegin());
@@ -2783,7 +3268,7 @@ public:
Expr *getOperand() const { return static_cast<Expr*>(Operand); }
- SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
bool getValue() const { return Value; }
@@ -2796,7 +3281,7 @@ public:
child_range children() { return child_range(&Operand, &Operand + 1); }
};
-/// \brief Represents a C++0x pack expansion that produces a sequence of
+/// \brief Represents a C++0x pack expansion that produces a sequence of
/// expressions.
///
/// A pack expansion expression contains a pattern (which itself is an
@@ -2810,29 +3295,29 @@ public:
/// \endcode
///
/// Here, the argument to the function object \c f is a pack expansion whose
-/// pattern is \c static_cast<Types&&>(args). When the \c forward function
+/// pattern is \c static_cast<Types&&>(args). When the \c forward function
/// template is instantiated, the pack expansion will instantiate to zero or
/// or more function arguments to the function object \c f.
class PackExpansionExpr : public Expr {
SourceLocation EllipsisLoc;
-
+
/// \brief The number of expansions that will be produced by this pack
/// expansion expression, if known.
///
/// When zero, the number of expansions is not known. Otherwise, this value
/// is the number of expansions + 1.
unsigned NumExpansions;
-
+
Stmt *Pattern;
-
+
friend class ASTStmtReader;
friend class ASTStmtWriter;
-
+
public:
PackExpansionExpr(QualType T, Expr *Pattern, SourceLocation EllipsisLoc,
llvm::Optional<unsigned> NumExpansions)
- : Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
- Pattern->getObjectKind(), /*TypeDependent=*/true,
+ : Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
+ Pattern->getObjectKind(), /*TypeDependent=*/true,
/*ValueDependent=*/true, /*InstantiationDependent=*/true,
/*ContainsUnexpandedParameterPack=*/false),
EllipsisLoc(EllipsisLoc),
@@ -2840,7 +3325,7 @@ public:
Pattern(Pattern) { }
PackExpansionExpr(EmptyShell Empty) : Expr(PackExpansionExprClass, Empty) { }
-
+
/// \brief Retrieve the pattern of the pack expansion.
Expr *getPattern() { return reinterpret_cast<Expr *>(Pattern); }
@@ -2850,17 +3335,17 @@ public:
/// \brief Retrieve the location of the ellipsis that describes this pack
/// expansion.
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
-
- /// \brief Determine the number of expansions that will be produced when
+
+ /// \brief Determine the number of expansions that will be produced when
/// this pack expansion is instantiated, if already known.
llvm::Optional<unsigned> getNumExpansions() const {
if (NumExpansions)
return NumExpansions - 1;
-
+
return llvm::Optional<unsigned>();
}
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(Pattern->getLocStart(), EllipsisLoc);
}
@@ -2868,21 +3353,24 @@ public:
return T->getStmtClass() == PackExpansionExprClass;
}
static bool classof(const PackExpansionExpr *) { return true; }
-
+
// Iterators
child_range children() {
return child_range(&Pattern, &Pattern + 1);
}
};
-
-inline ASTTemplateArgumentListInfo &OverloadExpr::getExplicitTemplateArgs() {
+
+inline ASTTemplateKWAndArgsInfo *OverloadExpr::getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
if (isa<UnresolvedLookupExpr>(this))
- return cast<UnresolvedLookupExpr>(this)->getExplicitTemplateArgs();
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>
+ (cast<UnresolvedLookupExpr>(this) + 1);
else
- return cast<UnresolvedMemberExpr>(this)->getExplicitTemplateArgs();
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>
+ (cast<UnresolvedMemberExpr>(this) + 1);
}
-/// \brief Represents an expression that computes the length of a parameter
+/// \brief Represents an expression that computes the length of a parameter
/// pack.
///
/// \code
@@ -2894,30 +3382,30 @@ inline ASTTemplateArgumentListInfo &OverloadExpr::getExplicitTemplateArgs() {
class SizeOfPackExpr : public Expr {
/// \brief The location of the 'sizeof' keyword.
SourceLocation OperatorLoc;
-
+
/// \brief The location of the name of the parameter pack.
SourceLocation PackLoc;
-
+
/// \brief The location of the closing parenthesis.
SourceLocation RParenLoc;
-
+
/// \brief The length of the parameter pack, if known.
///
/// When this expression is value-dependent, the length of the parameter pack
/// is unknown. When this expression is not value-dependent, the length is
/// known.
unsigned Length;
-
+
/// \brief The parameter pack itself.
NamedDecl *Pack;
-
+
friend class ASTStmtReader;
friend class ASTStmtWriter;
-
+
public:
/// \brief Creates a value-dependent expression that computes the length of
/// the given parameter pack.
- SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
SourceLocation PackLoc, SourceLocation RParenLoc)
: Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
/*TypeDependent=*/false, /*ValueDependent=*/true,
@@ -2928,7 +3416,7 @@ public:
/// \brief Creates an expression that computes the length of
/// the given parameter pack, which is already known.
- SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
SourceLocation PackLoc, SourceLocation RParenLoc,
unsigned Length)
: Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
@@ -2940,38 +3428,38 @@ public:
/// \brief Create an empty expression.
SizeOfPackExpr(EmptyShell Empty) : Expr(SizeOfPackExprClass, Empty) { }
-
+
/// \brief Determine the location of the 'sizeof' keyword.
SourceLocation getOperatorLoc() const { return OperatorLoc; }
/// \brief Determine the location of the parameter pack.
SourceLocation getPackLoc() const { return PackLoc; }
-
+
/// \brief Determine the location of the right parenthesis.
SourceLocation getRParenLoc() const { return RParenLoc; }
-
+
/// \brief Retrieve the parameter pack.
NamedDecl *getPack() const { return Pack; }
-
+
/// \brief Retrieve the length of the parameter pack.
///
- /// This routine may only be invoked when the expression is not
+ /// This routine may only be invoked when the expression is not
/// value-dependent.
unsigned getPackLength() const {
- assert(!isValueDependent() &&
+ assert(!isValueDependent() &&
"Cannot get the length of a value-dependent pack size expression");
return Length;
}
-
- SourceRange getSourceRange() const {
+
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(OperatorLoc, RParenLoc);
}
-
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == SizeOfPackExprClass;
}
static bool classof(const SizeOfPackExpr *) { return true; }
-
+
// Iterators
child_range children() { return child_range(); }
};
@@ -2990,11 +3478,11 @@ class SubstNonTypeTemplateParmExpr : public Expr {
friend class ASTReader;
friend class ASTStmtReader;
- explicit SubstNonTypeTemplateParmExpr(EmptyShell Empty)
+ explicit SubstNonTypeTemplateParmExpr(EmptyShell Empty)
: Expr(SubstNonTypeTemplateParmExprClass, Empty) { }
public:
- SubstNonTypeTemplateParmExpr(QualType type,
+ SubstNonTypeTemplateParmExpr(QualType type,
ExprValueKind valueKind,
SourceLocation loc,
NonTypeTemplateParmDecl *param,
@@ -3006,19 +3494,19 @@ public:
Param(param), Replacement(replacement), NameLoc(loc) {}
SourceLocation getNameLoc() const { return NameLoc; }
- SourceRange getSourceRange() const { return NameLoc; }
+ SourceRange getSourceRange() const LLVM_READONLY { return NameLoc; }
Expr *getReplacement() const { return cast<Expr>(Replacement); }
-
+
NonTypeTemplateParmDecl *getParameter() const { return Param; }
static bool classof(const Stmt *s) {
return s->getStmtClass() == SubstNonTypeTemplateParmExprClass;
}
- static bool classof(const SubstNonTypeTemplateParmExpr *) {
- return true;
+ static bool classof(const SubstNonTypeTemplateParmExpr *) {
+ return true;
}
-
+
// Iterators
child_range children() { return child_range(&Replacement, &Replacement+1); }
};
@@ -3028,7 +3516,7 @@ public:
///
/// When a pack expansion in the source code contains multiple parameter packs
/// and those parameter packs correspond to different levels of template
-/// parameter lists, this node node is used to represent a non-type template
+/// parameter lists, this node node is used to represent a non-type template
/// parameter pack from an outer level, which has already had its argument pack
/// substituted but that still lives within a pack expansion that itself
/// could not be instantiated. When actually performing a substitution into
@@ -3038,47 +3526,47 @@ public:
class SubstNonTypeTemplateParmPackExpr : public Expr {
/// \brief The non-type template parameter pack itself.
NonTypeTemplateParmDecl *Param;
-
+
/// \brief A pointer to the set of template arguments that this
/// parameter pack is instantiated with.
const TemplateArgument *Arguments;
-
+
/// \brief The number of template arguments in \c Arguments.
unsigned NumArguments;
-
+
/// \brief The location of the non-type template parameter pack reference.
SourceLocation NameLoc;
-
+
friend class ASTReader;
friend class ASTStmtReader;
- explicit SubstNonTypeTemplateParmPackExpr(EmptyShell Empty)
+ explicit SubstNonTypeTemplateParmPackExpr(EmptyShell Empty)
: Expr(SubstNonTypeTemplateParmPackExprClass, Empty) { }
-
+
public:
- SubstNonTypeTemplateParmPackExpr(QualType T,
+ SubstNonTypeTemplateParmPackExpr(QualType T,
NonTypeTemplateParmDecl *Param,
SourceLocation NameLoc,
const TemplateArgument &ArgPack);
-
+
/// \brief Retrieve the non-type template parameter pack being substituted.
NonTypeTemplateParmDecl *getParameterPack() const { return Param; }
/// \brief Retrieve the location of the parameter pack name.
SourceLocation getParameterPackLocation() const { return NameLoc; }
-
+
/// \brief Retrieve the template argument pack containing the substituted
/// template arguments.
TemplateArgument getArgumentPack() const;
- SourceRange getSourceRange() const { return NameLoc; }
-
+ SourceRange getSourceRange() const LLVM_READONLY { return NameLoc; }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == SubstNonTypeTemplateParmPackExprClass;
}
- static bool classof(const SubstNonTypeTemplateParmPackExpr *) {
- return true;
+ static bool classof(const SubstNonTypeTemplateParmPackExpr *) {
+ return true;
}
-
+
// Iterators
child_range children() { return child_range(); }
};
@@ -3102,13 +3590,13 @@ public:
class MaterializeTemporaryExpr : public Expr {
/// \brief The temporary-generating expression whose value will be
/// materialized.
- Stmt *Temporary;
-
+ Stmt *Temporary;
+
friend class ASTStmtReader;
friend class ASTStmtWriter;
-
+
public:
- MaterializeTemporaryExpr(QualType T, Expr *Temporary,
+ MaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference)
: Expr(MaterializeTemporaryExprClass, T,
BoundToLvalueReference? VK_LValue : VK_XValue, OK_Ordinary,
@@ -3116,33 +3604,35 @@ public:
Temporary->isInstantiationDependent(),
Temporary->containsUnexpandedParameterPack()),
Temporary(Temporary) { }
-
- MaterializeTemporaryExpr(EmptyShell Empty)
+
+ MaterializeTemporaryExpr(EmptyShell Empty)
: Expr(MaterializeTemporaryExprClass, Empty) { }
-
+
/// \brief Retrieve the temporary-generating subexpression whose value will
/// be materialized into a glvalue.
Expr *GetTemporaryExpr() const { return reinterpret_cast<Expr *>(Temporary); }
-
+
/// \brief Determine whether this materialized temporary is bound to an
/// lvalue reference; otherwise, it's bound to an rvalue reference.
- bool isBoundToLvalueReference() const {
+ bool isBoundToLvalueReference() const {
return getValueKind() == VK_LValue;
}
-
- SourceRange getSourceRange() const { return Temporary->getSourceRange(); }
-
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return Temporary->getSourceRange();
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == MaterializeTemporaryExprClass;
}
- static bool classof(const MaterializeTemporaryExpr *) {
- return true;
+ static bool classof(const MaterializeTemporaryExpr *) {
+ return true;
}
-
+
// Iterators
child_range children() { return child_range(&Temporary, &Temporary + 1); }
};
-
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
index 55726eb..4bfd12c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class IdentifierInfo;
@@ -43,7 +44,7 @@ public:
SourceLocation getAtLoc() const { return AtLoc; }
void setAtLoc(SourceLocation L) { AtLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, String->getLocEnd());
}
@@ -56,6 +57,281 @@ public:
child_range children() { return child_range(&String, &String+1); }
};
+/// ObjCBoolLiteralExpr - Objective-C Boolean Literal.
+///
+class ObjCBoolLiteralExpr : public Expr {
+ bool Value;
+ SourceLocation Loc;
+public:
+ ObjCBoolLiteralExpr(bool val, QualType Ty, SourceLocation l) :
+ Expr(ObjCBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false, false), Value(val), Loc(l) {}
+
+ explicit ObjCBoolLiteralExpr(EmptyShell Empty)
+ : Expr(ObjCBoolLiteralExprClass, Empty) { }
+
+ bool getValue() const { return Value; }
+ void setValue(bool V) { Value = V; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCBoolLiteralExprClass;
+ }
+ static bool classof(const ObjCBoolLiteralExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ObjCNumericLiteral - used for objective-c numeric literals;
+/// as in: @42 or @true (c++/objc++) or @__yes (c/objc)
+class ObjCNumericLiteral : public Expr {
+ /// Number - expression AST node for the numeric literal
+ Stmt *Number;
+ ObjCMethodDecl *ObjCNumericLiteralMethod;
+ SourceLocation AtLoc;
+public:
+ ObjCNumericLiteral(Stmt *NL, QualType T, ObjCMethodDecl *method,
+ SourceLocation L)
+ : Expr(ObjCNumericLiteralClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false), Number(NL),
+ ObjCNumericLiteralMethod(method), AtLoc(L) {}
+ explicit ObjCNumericLiteral(EmptyShell Empty)
+ : Expr(ObjCNumericLiteralClass, Empty) {}
+
+ Expr *getNumber() { return cast<Expr>(Number); }
+ const Expr *getNumber() const { return cast<Expr>(Number); }
+
+ ObjCMethodDecl *getObjCNumericLiteralMethod() const {
+ return ObjCNumericLiteralMethod;
+ }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, Number->getSourceRange().getEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCNumericLiteralClass;
+ }
+ static bool classof(const ObjCNumericLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Number, &Number+1); }
+
+ friend class ASTStmtReader;
+};
+
+/// ObjCArrayLiteral - used for objective-c array containers; as in:
+/// @[@"Hello", NSApp, [NSNumber numberWithInt:42]];
+class ObjCArrayLiteral : public Expr {
+ unsigned NumElements;
+ SourceRange Range;
+ ObjCMethodDecl *ArrayWithObjectsMethod;
+
+ ObjCArrayLiteral(llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR);
+
+ explicit ObjCArrayLiteral(EmptyShell Empty, unsigned NumElements)
+ : Expr(ObjCArrayLiteralClass, Empty), NumElements(NumElements) {}
+
+public:
+ static ObjCArrayLiteral *Create(ASTContext &C,
+ llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR);
+
+ static ObjCArrayLiteral *CreateEmpty(ASTContext &C, unsigned NumElements);
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCArrayLiteralClass;
+ }
+ static bool classof(const ObjCArrayLiteral *) { return true; }
+
+ /// \brief Retrieve elements of array of literals.
+ Expr **getElements() { return reinterpret_cast<Expr **>(this + 1); }
+
+ /// \brief Retrieve elements of array of literals.
+ const Expr * const *getElements() const {
+ return reinterpret_cast<const Expr * const*>(this + 1);
+ }
+
+ /// getNumElements - Return number of elements of objective-c array literal.
+ unsigned getNumElements() const { return NumElements; }
+
+ /// getExpr - Return the Expr at the specified index.
+ Expr *getElement(unsigned Index) {
+ assert((Index < NumElements) && "Arg access out of range!");
+ return cast<Expr>(getElements()[Index]);
+ }
+ const Expr *getElement(unsigned Index) const {
+ assert((Index < NumElements) && "Arg access out of range!");
+ return cast<Expr>(getElements()[Index]);
+ }
+
+ ObjCMethodDecl *getArrayWithObjectsMethod() const {
+ return ArrayWithObjectsMethod;
+ }
+
+ // Iterators
+ child_range children() {
+ return child_range((Stmt **)getElements(),
+ (Stmt **)getElements() + NumElements);
+ }
+
+ friend class ASTStmtReader;
+};
+
+/// \brief An element in an Objective-C dictionary literal.
+///
+struct ObjCDictionaryElement {
+ /// \brief The key for the dictionary element.
+ Expr *Key;
+
+ /// \brief The value of the dictionary element.
+ Expr *Value;
+
+ /// \brief The location of the ellipsis, if this is a pack expansion.
+ SourceLocation EllipsisLoc;
+
+ /// \brief The number of elements this pack expansion will expand to, if
+ /// this is a pack expansion and is known.
+ llvm::Optional<unsigned> NumExpansions;
+
+ /// \brief Determines whether this dictionary element is a pack expansion.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+};
+
+/// ObjCDictionaryLiteral - AST node to represent objective-c dictionary
+/// literals; as in: @{@"name" : NSUserName(), @"date" : [NSDate date] };
+class ObjCDictionaryLiteral : public Expr {
+ /// \brief Key/value pair used to store the key and value of a given element.
+ ///
+ /// Objects of this type are stored directly after the expression.
+ struct KeyValuePair {
+ Expr *Key;
+ Expr *Value;
+ };
+
+ /// \brief Data that describes an element that is a pack expansion, used if any
+ /// of the elements in the dictionary literal are pack expansions.
+ struct ExpansionData {
+ /// \brief The location of the ellipsis, if this element is a pack
+ /// expansion.
+ SourceLocation EllipsisLoc;
+
+ /// \brief If non-zero, the number of elements that this pack
+ /// expansion will expand to (+1).
+ unsigned NumExpansionsPlusOne;
+ };
+
+ /// \brief The number of elements in this dictionary literal.
+ unsigned NumElements : 31;
+
+ /// \brief Determine whether this dictionary literal has any pack expansions.
+ ///
+ /// If the dictionary literal has pack expansions, then there will
+ /// be an array of pack expansion data following the array of
+ /// key/value pairs, which provide the locations of the ellipses (if
+ /// any) and number of elements in the expansion (if known). If
+ /// there are no pack expansions, we optimize away this storage.
+ unsigned HasPackExpansions : 1;
+
+ SourceRange Range;
+ ObjCMethodDecl *DictWithObjectsMethod;
+
+ ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR);
+
+ explicit ObjCDictionaryLiteral(EmptyShell Empty, unsigned NumElements,
+ bool HasPackExpansions)
+ : Expr(ObjCDictionaryLiteralClass, Empty), NumElements(NumElements),
+ HasPackExpansions(HasPackExpansions) {}
+
+ KeyValuePair *getKeyValues() {
+ return reinterpret_cast<KeyValuePair *>(this + 1);
+ }
+
+ const KeyValuePair *getKeyValues() const {
+ return reinterpret_cast<const KeyValuePair *>(this + 1);
+ }
+
+ ExpansionData *getExpansionData() {
+ if (!HasPackExpansions)
+ return 0;
+
+ return reinterpret_cast<ExpansionData *>(getKeyValues() + NumElements);
+ }
+
+ const ExpansionData *getExpansionData() const {
+ if (!HasPackExpansions)
+ return 0;
+
+ return reinterpret_cast<const ExpansionData *>(getKeyValues()+NumElements);
+ }
+
+public:
+ static ObjCDictionaryLiteral *Create(ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR);
+
+ static ObjCDictionaryLiteral *CreateEmpty(ASTContext &C,
+ unsigned NumElements,
+ bool HasPackExpansions);
+
+ /// getNumElements - Return number of elements of objective-c dictionary
+ /// literal.
+ unsigned getNumElements() const { return NumElements; }
+
+ ObjCDictionaryElement getKeyValueElement(unsigned Index) const {
+ assert((Index < NumElements) && "Arg access out of range!");
+ const KeyValuePair &KV = getKeyValues()[Index];
+ ObjCDictionaryElement Result = { KV.Key, KV.Value, SourceLocation(),
+ llvm::Optional<unsigned>() };
+ if (HasPackExpansions) {
+ const ExpansionData &Expansion = getExpansionData()[Index];
+ Result.EllipsisLoc = Expansion.EllipsisLoc;
+ if (Expansion.NumExpansionsPlusOne > 0)
+ Result.NumExpansions = Expansion.NumExpansionsPlusOne - 1;
+ }
+ return Result;
+ }
+
+ ObjCMethodDecl *getDictWithObjectsMethod() const
+ { return DictWithObjectsMethod; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCDictionaryLiteralClass;
+ }
+ static bool classof(const ObjCDictionaryLiteral *) { return true; }
+
+ // Iterators
+ child_range children() {
+ // Note: we're taking advantage of the layout of the KeyValuePair struct
+ // here. If that struct changes, this code will need to change as well.
+ return child_range(reinterpret_cast<Stmt **>(this + 1),
+ reinterpret_cast<Stmt **>(this + 1) + NumElements * 2);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+
/// ObjCEncodeExpr, used for @encode in Objective-C. @encode has the same type
/// and behavior as StringLiteral except that the string initializer is obtained
/// from ASTContext with the encoding type as an argument.
@@ -87,7 +363,7 @@ public:
EncodedType = EncType;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, RParenLoc);
}
@@ -121,7 +397,7 @@ public:
void setAtLoc(SourceLocation L) { AtLoc = L; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, RParenLoc);
}
@@ -161,7 +437,7 @@ public:
void setAtLoc(SourceLocation L) { AtLoc = L; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, RParenLoc);
}
@@ -176,9 +452,9 @@ public:
/// ObjCIvarRefExpr - A reference to an ObjC instance variable.
class ObjCIvarRefExpr : public Expr {
- class ObjCIvarDecl *D;
- SourceLocation Loc;
+ ObjCIvarDecl *D;
Stmt *Base;
+ SourceLocation Loc;
bool IsArrow:1; // True if this is "X->F", false if this is "X.F".
bool IsFreeIvar:1; // True if ivar reference has no base (self assumed).
@@ -190,7 +466,7 @@ public:
/*TypeDependent=*/false, base->isValueDependent(),
base->isInstantiationDependent(),
base->containsUnexpandedParameterPack()),
- D(d), Loc(l), Base(base), IsArrow(arrow), IsFreeIvar(freeIvar) {}
+ D(d), Base(base), Loc(l), IsArrow(arrow), IsFreeIvar(freeIvar) {}
explicit ObjCIvarRefExpr(EmptyShell Empty)
: Expr(ObjCIvarRefExprClass, Empty) {}
@@ -211,7 +487,7 @@ public:
SourceLocation getLocation() const { return Loc; }
void setLocation(SourceLocation L) { Loc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return isFreeIvar() ? SourceRange(Loc)
: SourceRange(getBase()->getLocStart(), Loc);
}
@@ -227,7 +503,6 @@ public:
/// ObjCPropertyRefExpr - A dot-syntax expression to access an ObjC
/// property.
-///
class ObjCPropertyRefExpr : public Expr {
private:
/// If the bool is true, this is an implicit property reference; the
@@ -235,7 +510,23 @@ private:
/// if the bool is false, this is an explicit property reference;
/// the pointer is an ObjCPropertyDecl and Setter is always null.
llvm::PointerIntPair<NamedDecl*, 1, bool> PropertyOrGetter;
- ObjCMethodDecl *Setter;
+
+ /// \brief Indicates whether the property reference will result in a message
+ /// to the getter, the setter, or both.
+ /// This applies to both implicit and explicit property references.
+ enum MethodRefFlags {
+ MethodRef_None = 0,
+ MethodRef_Getter = 0x1,
+ MethodRef_Setter = 0x2
+ };
+
+ /// \brief Contains the Setter method pointer and MethodRefFlags bit flags.
+ llvm::PointerIntPair<ObjCMethodDecl *, 2, unsigned> SetterAndMethodRefFlags;
+
+ // FIXME: Maybe we should store the property identifier here,
+ // because it's not rederivable from the other data when there's an
+ // implicit property with no getter (because the 'foo' -> 'setFoo:'
+ // transformation is lossy on the first character).
SourceLocation IdLoc;
@@ -253,8 +544,9 @@ public:
/*TypeDependent=*/false, base->isValueDependent(),
base->isInstantiationDependent(),
base->containsUnexpandedParameterPack()),
- PropertyOrGetter(PD, false), Setter(0),
+ PropertyOrGetter(PD, false), SetterAndMethodRefFlags(),
IdLoc(l), ReceiverLoc(), Receiver(base) {
+ assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
}
ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
@@ -263,8 +555,9 @@ public:
: Expr(ObjCPropertyRefExprClass, t, VK, OK,
/*TypeDependent=*/false, false, st->isInstantiationDependentType(),
st->containsUnexpandedParameterPack()),
- PropertyOrGetter(PD, false), Setter(0),
+ PropertyOrGetter(PD, false), SetterAndMethodRefFlags(),
IdLoc(l), ReceiverLoc(sl), Receiver(st.getTypePtr()) {
+ assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
@@ -273,8 +566,9 @@ public:
: Expr(ObjCPropertyRefExprClass, T, VK, OK, false,
Base->isValueDependent(), Base->isInstantiationDependent(),
Base->containsUnexpandedParameterPack()),
- PropertyOrGetter(Getter, true), Setter(Setter),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), ReceiverLoc(), Receiver(Base) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
@@ -282,8 +576,9 @@ public:
SourceLocation IdLoc,
SourceLocation SuperLoc, QualType SuperTy)
: Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
- PropertyOrGetter(Getter, true), Setter(Setter),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), ReceiverLoc(SuperLoc), Receiver(SuperTy.getTypePtr()) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
}
ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
@@ -291,8 +586,9 @@ public:
SourceLocation IdLoc,
SourceLocation ReceiverLoc, ObjCInterfaceDecl *Receiver)
: Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
- PropertyOrGetter(Getter, true), Setter(Setter),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
IdLoc(IdLoc), ReceiverLoc(ReceiverLoc), Receiver(Receiver) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
}
explicit ObjCPropertyRefExpr(EmptyShell Empty)
@@ -313,7 +609,7 @@ public:
ObjCMethodDecl *getImplicitPropertySetter() const {
assert(isImplicitProperty());
- return Setter;
+ return SetterAndMethodRefFlags.getPointer();
}
Selector getGetterSelector() const {
@@ -328,6 +624,28 @@ public:
return getExplicitProperty()->getSetterName();
}
+ /// \brief True if the property reference will result in a message to the
+ /// getter.
+ /// This applies to both implicit and explicit property references.
+ bool isMessagingGetter() const {
+ return SetterAndMethodRefFlags.getInt() & MethodRef_Getter;
+ }
+
+ /// \brief True if the property reference will result in a message to the
+ /// setter.
+ /// This applies to both implicit and explicit property references.
+ bool isMessagingSetter() const {
+ return SetterAndMethodRefFlags.getInt() & MethodRef_Setter;
+ }
+
+ void setIsMessagingGetter(bool val = true) {
+ setMethodRefFlag(MethodRef_Getter, val);
+ }
+
+ void setIsMessagingSetter(bool val = true) {
+ setMethodRefFlag(MethodRef_Setter, val);
+ }
+
const Expr *getBase() const {
return cast<Expr>(Receiver.get<Stmt*>());
}
@@ -348,14 +666,15 @@ public:
if (const ObjCMethodDecl *Getter = PDecl->getGetterMethodDecl())
ResultType = Getter->getResultType();
else
- ResultType = getType();
+ ResultType = PDecl->getType();
} else {
const ObjCMethodDecl *Getter = getImplicitPropertyGetter();
- ResultType = Getter->getResultType(); // with reference!
+ if (Getter)
+ ResultType = Getter->getResultType(); // with reference!
}
return ResultType;
}
-
+
QualType getSetterArgType() const {
QualType ArgType;
if (isImplicitProperty()) {
@@ -381,7 +700,7 @@ public:
bool isSuperReceiver() const { return Receiver.is<const Type*>(); }
bool isClassReceiver() const { return Receiver.is<ObjCInterfaceDecl*>(); }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange((isObjectReceiver() ? getBase()->getLocStart()
: getReceiverLocation()),
IdLoc);
@@ -403,15 +722,19 @@ public:
private:
friend class ASTStmtReader;
- void setExplicitProperty(ObjCPropertyDecl *D) {
+ friend class ASTStmtWriter;
+ void setExplicitProperty(ObjCPropertyDecl *D, unsigned methRefFlags) {
PropertyOrGetter.setPointer(D);
PropertyOrGetter.setInt(false);
- Setter = 0;
+ SetterAndMethodRefFlags.setPointer(0);
+ SetterAndMethodRefFlags.setInt(methRefFlags);
}
- void setImplicitProperty(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter) {
+ void setImplicitProperty(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ unsigned methRefFlags) {
PropertyOrGetter.setPointer(Getter);
PropertyOrGetter.setInt(true);
- this->Setter = Setter;
+ SetterAndMethodRefFlags.setPointer(Setter);
+ SetterAndMethodRefFlags.setInt(methRefFlags);
}
void setBase(Expr *Base) { Receiver = Base; }
void setSuperReceiver(QualType T) { Receiver = T.getTypePtr(); }
@@ -419,7 +742,98 @@ private:
void setLocation(SourceLocation L) { IdLoc = L; }
void setReceiverLocation(SourceLocation Loc) { ReceiverLoc = Loc; }
+
+ void setMethodRefFlag(MethodRefFlags flag, bool val) {
+ unsigned f = SetterAndMethodRefFlags.getInt();
+ if (val)
+ f |= flag;
+ else
+ f &= ~flag;
+ SetterAndMethodRefFlags.setInt(f);
+ }
};
+
+/// ObjCSubscriptRefExpr - used for array and dictionary subscripting.
+/// array[4] = array[3]; dictionary[key] = dictionary[alt_key];
+///
+class ObjCSubscriptRefExpr : public Expr {
+ // Location of ']' in an indexing expression.
+ SourceLocation RBracket;
+ // array/dictionary base expression.
+ // for arrays, this is a numeric expression. For dictionaries, this is
+ // an objective-c object pointer expression.
+ enum { BASE, KEY, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+
+ ObjCMethodDecl *GetAtIndexMethodDecl;
+
+ // For immutable objects this is null. When ObjCSubscriptRefExpr is to read
+ // an indexed object this is null too.
+ ObjCMethodDecl *SetAtIndexMethodDecl;
+
+public:
+
+ ObjCSubscriptRefExpr(Expr *base, Expr *key, QualType T,
+ ExprValueKind VK, ExprObjectKind OK,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod, SourceLocation RB)
+ : Expr(ObjCSubscriptRefExprClass, T, VK, OK,
+ base->isTypeDependent() || key->isTypeDependent(),
+ base->isValueDependent() || key->isValueDependent(),
+ base->isInstantiationDependent() || key->isInstantiationDependent(),
+ (base->containsUnexpandedParameterPack() ||
+ key->containsUnexpandedParameterPack())),
+ RBracket(RB),
+ GetAtIndexMethodDecl(getMethod),
+ SetAtIndexMethodDecl(setMethod)
+ {SubExprs[BASE] = base; SubExprs[KEY] = key;}
+
+ explicit ObjCSubscriptRefExpr(EmptyShell Empty)
+ : Expr(ObjCSubscriptRefExprClass, Empty) {}
+
+ static ObjCSubscriptRefExpr *Create(ASTContext &C,
+ Expr *base,
+ Expr *key, QualType T,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod,
+ SourceLocation RB);
+
+ SourceLocation getRBracket() const { return RBracket; }
+ void setRBracket(SourceLocation RB) { RBracket = RB; }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(SubExprs[BASE]->getLocStart(), RBracket);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCSubscriptRefExprClass;
+ }
+ static bool classof(const ObjCSubscriptRefExpr *) { return true; }
+
+ Expr *getBaseExpr() const { return cast<Expr>(SubExprs[BASE]); }
+ void setBaseExpr(Stmt *S) { SubExprs[BASE] = S; }
+
+ Expr *getKeyExpr() const { return cast<Expr>(SubExprs[KEY]); }
+ void setKeyExpr(Stmt *S) { SubExprs[KEY] = S; }
+
+ ObjCMethodDecl *getAtIndexMethodDecl() const {
+ return GetAtIndexMethodDecl;
+ }
+
+ ObjCMethodDecl *setAtIndexMethodDecl() const {
+ return SetAtIndexMethodDecl;
+ }
+
+ bool isArraySubscriptRefExpr() const {
+ return getKeyExpr()->getType()->isIntegralOrEnumerationType();
+ }
+
+ child_range children() {
+ return child_range(SubExprs, SubExprs+END_EXPR);
+ }
+private:
+ friend class ASTStmtReader;
+};
+
/// \brief An expression that sends a message to the given Objective-C
/// object or class.
@@ -477,7 +891,11 @@ class ObjCMessageExpr : public Expr {
/// \brief Whether this message send is a "delegate init call",
/// i.e. a call of an init method on self from within an init method.
unsigned IsDelegateInitCall : 1;
-
+
+ /// \brief Whether this message send was implicitly generated by
+ /// the implementation rather than explicitly written by the user.
+ unsigned IsImplicit : 1;
+
/// \brief Whether the locations of the selector identifiers are in a
/// "standard" position, a enum SelectorLocationsKind.
unsigned SelLocsKind : 2;
@@ -492,7 +910,7 @@ class ObjCMessageExpr : public Expr {
ObjCMessageExpr(EmptyShell Empty, unsigned NumArgs)
: Expr(ObjCMessageExprClass, Empty), SelectorOrMethod(0), Kind(0),
- HasMethod(0), IsDelegateInitCall(0) {
+ HasMethod(0), IsDelegateInitCall(0), IsImplicit(0), SelLocsKind(0) {
setNumArgs(NumArgs);
}
@@ -506,7 +924,8 @@ class ObjCMessageExpr : public Expr {
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
@@ -515,7 +934,8 @@ class ObjCMessageExpr : public Expr {
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
ObjCMessageExpr(QualType T, ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
@@ -524,7 +944,8 @@ class ObjCMessageExpr : public Expr {
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
void initArgsAndSelLocs(ArrayRef<Expr *> Args,
ArrayRef<SourceLocation> SelLocs,
@@ -625,7 +1046,8 @@ public:
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
/// \brief Create a class message send.
///
@@ -660,7 +1082,8 @@ public:
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
/// \brief Create an instance message send.
///
@@ -695,7 +1118,8 @@ public:
ArrayRef<SourceLocation> SeLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc);
+ SourceLocation RBracLoc,
+ bool isImplicit);
/// \brief Create an empty Objective-C message expression, to be
/// filled in by subsequent calls.
@@ -708,6 +1132,11 @@ public:
unsigned NumArgs,
unsigned NumStoredSelLocs);
+ /// \brief Indicates whether the message send was implicitly
+ /// generated by the implementation. If false, it was written explicitly
+ /// in the source code.
+ bool isImplicit() const { return IsImplicit; }
+
/// \brief Determine the kind of receiver that this message is being
/// sent to.
ReceiverKind getReceiverKind() const { return (ReceiverKind)Kind; }
@@ -877,7 +1306,11 @@ public:
SourceLocation getLeftLoc() const { return LBracLoc; }
SourceLocation getRightLoc() const { return RBracLoc; }
- SourceLocation getSelectorStartLoc() const { return getSelectorLoc(0); }
+ SourceLocation getSelectorStartLoc() const {
+ if (isImplicit())
+ return getLocStart();
+ return getSelectorLoc(0);
+ }
SourceLocation getSelectorLoc(unsigned Index) const {
assert(Index < getNumSelectorLocs() && "Index out of range!");
if (hasStandardSelLocs())
@@ -892,6 +1325,8 @@ public:
void getSelectorLocs(SmallVectorImpl<SourceLocation> &SelLocs) const;
unsigned getNumSelectorLocs() const {
+ if (isImplicit())
+ return 0;
Selector Sel = getSelector();
if (Sel.isUnarySelector())
return 1;
@@ -902,7 +1337,7 @@ public:
LBracLoc = R.getBegin();
RBracLoc = R.getEnd();
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LBracLoc, RBracLoc);
}
@@ -965,11 +1400,11 @@ public:
SourceLocation getIsaMemberLoc() const { return IsaMemberLoc; }
void setIsaMemberLoc(SourceLocation L) { IsaMemberLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getBase()->getLocStart(), IsaMemberLoc);
}
- SourceLocation getExprLoc() const { return IsaMemberLoc; }
+ SourceLocation getExprLoc() const LLVM_READONLY { return IsaMemberLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCIsaExprClass;
@@ -1038,8 +1473,12 @@ public:
child_range children() { return child_range(&Operand, &Operand+1); }
// Source locations are determined by the subexpression.
- SourceRange getSourceRange() const { return Operand->getSourceRange(); }
- SourceLocation getExprLoc() const { return getSubExpr()->getExprLoc(); }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return Operand->getSourceRange();
+ }
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return getSubExpr()->getExprLoc();
+ }
static bool classof(const Stmt *s) {
return s->getStmtClass() == ObjCIndirectCopyRestoreExprClass;
@@ -1086,7 +1525,7 @@ public:
/// \brief The location of the bridge keyword.
SourceLocation getBridgeKeywordLoc() const { return BridgeKeywordLoc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LParenLoc, getSubExpr()->getLocEnd());
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
index 96d14b2..18a1432 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -15,6 +15,8 @@
#define LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
#include "clang/AST/DeclBase.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/DenseMap.h"
namespace clang {
@@ -153,6 +155,12 @@ public:
return FindExternalLexicalDecls(DC, DeclTy::classofKind, Result);
}
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {}
+
/// \brief Gives the external AST source an opportunity to complete
/// an incomplete type.
virtual void CompleteType(TagDecl *Tag) {}
@@ -190,6 +198,44 @@ public:
/// The default implementation of this method is a no-op.
virtual void PrintStats();
+
+ /// \brief Perform layout on the given record.
+ ///
+ /// This routine allows the external AST source to provide an specific
+ /// layout for a record, overriding the layout that would normally be
+ /// constructed. It is intended for clients who receive specific layout
+ /// details rather than source code (such as LLDB). The client is expected
+ /// to fill in the field offsets, base offsets, virtual base offsets, and
+ /// complete object size.
+ ///
+ /// \param Record The record whose layout is being requested.
+ ///
+ /// \param Size The final size of the record, in bits.
+ ///
+ /// \param Alignment The final alignment of the record, in bits.
+ ///
+ /// \param FieldOffsets The offset of each of the fields within the record,
+ /// expressed in bits. All of the fields must be provided with offsets.
+ ///
+ /// \param BaseOffsets The offset of each of the direct, non-virtual base
+ /// classes. If any bases are not given offsets, the bases will be laid
+ /// out according to the ABI.
+ ///
+ /// \param VirtualBaseOffsets The offset of each of the virtual base classes
+ /// (either direct or not). If any bases are not given offsets, the bases will be laid
+ /// out according to the ABI.
+ ///
+ /// \returns true if the record layout was provided, false otherwise.
+ virtual bool
+ layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
+ {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// Queries for performance analysis.
//===--------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h b/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h
new file mode 100644
index 0000000..3e2fbad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h
@@ -0,0 +1,36 @@
+//===--- LambdaMangleContext.h - Context for mangling lambdas ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LambdaMangleContext interface, which keeps track of
+// the Itanium C++ ABI mangling numbers for lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LAMBDAMANGLECONTEXT_H
+#define LLVM_CLANG_LAMBDAMANGLECONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class CXXMethodDecl;
+class FunctionProtoType;
+
+/// \brief Keeps track of the mangled names of lambda expressions within a
+/// particular context.
+class LambdaMangleContext {
+ llvm::DenseMap<const FunctionProtoType *, unsigned> ManglingNumbers;
+
+public:
+ /// \brief Retrieve the mangling number of a new lambda expression with the
+ /// given call operator within this lambda context.
+ unsigned getManglingNumber(CXXMethodDecl *CallOperator);
+};
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Mangle.h b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
index f58a83b..ca22ed6 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
@@ -58,12 +58,14 @@ public:
private:
StringRef String;
- llvm::SmallString<256> Buffer;
+ SmallString<256> Buffer;
};
/// MangleContext - Context for tracking state which persists across multiple
/// calls to the C++ name mangler.
class MangleContext {
+ virtual void anchor();
+
ASTContext &Context;
DiagnosticsEngine &Diags;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
new file mode 100644
index 0000000..40e9759
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
@@ -0,0 +1,152 @@
+//===--- NSAPI.h - NSFoundation APIs ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_NSAPI_H
+#define LLVM_CLANG_AST_NSAPI_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/Optional.h"
+
+namespace clang {
+ class ASTContext;
+ class QualType;
+
+// \brief Provides info and caches identifiers/selectors for NSFoundation API.
+class NSAPI {
+public:
+ explicit NSAPI(ASTContext &Ctx);
+
+ ASTContext &getASTContext() const { return Ctx; }
+
+ enum NSClassIdKindKind {
+ ClassId_NSObject,
+ ClassId_NSString,
+ ClassId_NSArray,
+ ClassId_NSMutableArray,
+ ClassId_NSDictionary,
+ ClassId_NSMutableDictionary,
+ ClassId_NSNumber
+ };
+ static const unsigned NumClassIds = 7;
+
+ enum NSStringMethodKind {
+ NSStr_stringWithString,
+ NSStr_initWithString
+ };
+ static const unsigned NumNSStringMethods = 2;
+
+ IdentifierInfo *getNSClassId(NSClassIdKindKind K) const;
+
+ /// \brief The Objective-C NSString selectors.
+ Selector getNSStringSelector(NSStringMethodKind MK) const;
+
+ /// \brief Enumerates the NSArray methods used to generate literals.
+ enum NSArrayMethodKind {
+ NSArr_array,
+ NSArr_arrayWithArray,
+ NSArr_arrayWithObject,
+ NSArr_arrayWithObjects,
+ NSArr_arrayWithObjectsCount,
+ NSArr_initWithArray,
+ NSArr_initWithObjects,
+ NSArr_objectAtIndex,
+ NSMutableArr_replaceObjectAtIndex
+ };
+ static const unsigned NumNSArrayMethods = 9;
+
+ /// \brief The Objective-C NSArray selectors.
+ Selector getNSArraySelector(NSArrayMethodKind MK) const;
+
+ /// \brief Return NSArrayMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSArrayMethodKind> getNSArrayMethodKind(Selector Sel);
+
+ /// \brief Enumerates the NSDictionary methods used to generate literals.
+ enum NSDictionaryMethodKind {
+ NSDict_dictionary,
+ NSDict_dictionaryWithDictionary,
+ NSDict_dictionaryWithObjectForKey,
+ NSDict_dictionaryWithObjectsForKeys,
+ NSDict_dictionaryWithObjectsForKeysCount,
+ NSDict_dictionaryWithObjectsAndKeys,
+ NSDict_initWithDictionary,
+ NSDict_initWithObjectsAndKeys,
+ NSDict_objectForKey,
+ NSMutableDict_setObjectForKey
+ };
+ static const unsigned NumNSDictionaryMethods = 10;
+
+ /// \brief The Objective-C NSDictionary selectors.
+ Selector getNSDictionarySelector(NSDictionaryMethodKind MK) const;
+
+ /// \brief Return NSDictionaryMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSDictionaryMethodKind>
+ getNSDictionaryMethodKind(Selector Sel);
+
+ /// \brief Enumerates the NSNumber methods used to generate literals.
+ enum NSNumberLiteralMethodKind {
+ NSNumberWithChar,
+ NSNumberWithUnsignedChar,
+ NSNumberWithShort,
+ NSNumberWithUnsignedShort,
+ NSNumberWithInt,
+ NSNumberWithUnsignedInt,
+ NSNumberWithLong,
+ NSNumberWithUnsignedLong,
+ NSNumberWithLongLong,
+ NSNumberWithUnsignedLongLong,
+ NSNumberWithFloat,
+ NSNumberWithDouble,
+ NSNumberWithBool,
+ NSNumberWithInteger,
+ NSNumberWithUnsignedInteger
+ };
+ static const unsigned NumNSNumberLiteralMethods = 15;
+
+ /// \brief The Objective-C NSNumber selectors used to create NSNumber literals.
+ /// \param Instance if true it will return the selector for the init* method
+ /// otherwise it will return the selector for the number* method.
+ Selector getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const;
+
+ bool isNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ Selector Sel) const {
+ return Sel == getNSNumberLiteralSelector(MK, false) ||
+ Sel == getNSNumberLiteralSelector(MK, true);
+ }
+
+ /// \brief Return NSNumberLiteralMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSNumberLiteralMethodKind>
+ getNSNumberLiteralMethodKind(Selector Sel) const;
+
+ /// \brief Determine the appropriate NSNumber factory method kind for a
+ /// literal of the given type.
+ static llvm::Optional<NSNumberLiteralMethodKind>
+ getNSNumberFactoryMethodKind(QualType T);
+
+private:
+ ASTContext &Ctx;
+
+ mutable IdentifierInfo *ClassIds[NumClassIds];
+
+ mutable Selector NSStringSelectors[NumNSStringMethods];
+
+ /// \brief The selectors for Objective-C NSArray methods.
+ mutable Selector NSArraySelectors[NumNSArrayMethods];
+
+ /// \brief The selectors for Objective-C NSDictionary methods.
+ mutable Selector NSDictionarySelectors[NumNSDictionaryMethods];
+
+ /// \brief The Objective-C NSNumber selectors used to create NSNumber literals.
+ mutable Selector NSNumberClassSelectors[NumNSNumberLiteralMethods];
+ mutable Selector NSNumberInstanceSelectors[NumNSNumberLiteralMethods];
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_NSAPI_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
index c81c06e..b5bd824 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
@@ -17,6 +17,7 @@
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -36,8 +37,9 @@ class LangOptions;
/// namespaces. For example, "foo::" in "foo::x" is a nested name
/// specifier. Nested name specifiers are made up of a sequence of
/// specifiers, each of which can be a namespace, type, identifier
-/// (for dependent names), or the global specifier ('::', must be the
-/// first specifier).
+/// (for dependent names), decltype specifier, or the global specifier ('::').
+/// The last two specifiers can only appear at the start of a
+/// nested-namespace-specifier.
class NestedNameSpecifier : public llvm::FoldingSetNode {
/// \brief Enumeration describing
@@ -95,7 +97,8 @@ private:
Specifier(Other.Specifier) {
}
- NestedNameSpecifier &operator=(const NestedNameSpecifier &); // do not implement
+ NestedNameSpecifier &operator=(const NestedNameSpecifier &); // do not
+ // implement
/// \brief Either find or insert the given nested name specifier
/// mockup in the given context.
@@ -221,12 +224,12 @@ class NestedNameSpecifierLoc {
public:
/// \brief Construct an empty nested-name-specifier.
NestedNameSpecifierLoc() : Qualifier(0), Data(0) { }
-
+
/// \brief Construct a nested-name-specifier with source location information
- /// from
+ /// from
NestedNameSpecifierLoc(NestedNameSpecifier *Qualifier, void *Data)
: Qualifier(Qualifier), Data(Data) { }
-
+
/// \brief Evalutes true when this nested-name-specifier location is
/// non-empty.
operator bool() const { return Qualifier; }
@@ -239,14 +242,14 @@ public:
/// \brief Retrieve the opaque pointer that refers to source-location data.
void *getOpaqueData() const { return Data; }
-
+
/// \brief Retrieve the source range covering the entirety of this
/// nested-name-specifier.
///
/// For example, if this instance refers to a nested-name-specifier
/// \c ::std::vector<int>::, the returned source range would cover
/// from the initial '::' to the last '::'.
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
/// \brief Retrieve the source range covering just the last part of
/// this nested-name-specifier, not including the prefix.
@@ -258,25 +261,25 @@ public:
/// \brief Retrieve the location of the beginning of this
/// nested-name-specifier.
- SourceLocation getBeginLoc() const {
+ SourceLocation getBeginLoc() const {
return getSourceRange().getBegin();
}
/// \brief Retrieve the location of the end of this
/// nested-name-specifier.
- SourceLocation getEndLoc() const {
+ SourceLocation getEndLoc() const {
return getSourceRange().getEnd();
}
/// \brief Retrieve the location of the beginning of this
/// component of the nested-name-specifier.
- SourceLocation getLocalBeginLoc() const {
+ SourceLocation getLocalBeginLoc() const {
return getLocalSourceRange().getBegin();
}
-
+
/// \brief Retrieve the location of the end of this component of the
/// nested-name-specifier.
- SourceLocation getLocalEndLoc() const {
+ SourceLocation getLocalEndLoc() const {
return getLocalSourceRange().getEnd();
}
@@ -300,13 +303,13 @@ public:
/// \brief Determines the data length for the entire
/// nested-name-specifier.
unsigned getDataLength() const { return getDataLength(Qualifier); }
-
- friend bool operator==(NestedNameSpecifierLoc X,
+
+ friend bool operator==(NestedNameSpecifierLoc X,
NestedNameSpecifierLoc Y) {
return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
}
- friend bool operator!=(NestedNameSpecifierLoc X,
+ friend bool operator!=(NestedNameSpecifierLoc X,
NestedNameSpecifierLoc Y) {
return !(X == Y);
}
@@ -316,39 +319,43 @@ public:
/// with source-location information for all of the components of the
/// nested-name-specifier.
class NestedNameSpecifierLocBuilder {
- /// \brief The current representation of the nested-name-specifier we're
+ /// \brief The current representation of the nested-name-specifier we're
/// building.
NestedNameSpecifier *Representation;
-
+
/// \brief Buffer used to store source-location information for the
/// nested-name-specifier.
///
- /// Note that we explicitly manage the buffer (rather than using a
+ /// Note that we explicitly manage the buffer (rather than using a
/// SmallVector) because \c Declarator expects it to be possible to memcpy()
/// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
char *Buffer;
-
+
/// \brief The size of the buffer used to store source-location information
/// for the nested-name-specifier.
unsigned BufferSize;
-
- /// \brief The capacity of the buffer used to store source-location
+
+ /// \brief The capacity of the buffer used to store source-location
/// information for the nested-name-specifier.
unsigned BufferCapacity;
public:
- NestedNameSpecifierLocBuilder();
-
+ NestedNameSpecifierLocBuilder()
+ : Representation(0), Buffer(0), BufferSize(0), BufferCapacity(0) { }
+
NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
-
+
NestedNameSpecifierLocBuilder &
operator=(const NestedNameSpecifierLocBuilder &Other);
-
- ~NestedNameSpecifierLocBuilder();
-
+
+ ~NestedNameSpecifierLocBuilder() {
+ if (BufferCapacity)
+ free(Buffer);
+ }
+
/// \brief Retrieve the representation of the nested-name-specifier.
NestedNameSpecifier *getRepresentation() const { return Representation; }
-
+
/// \brief Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'type::'.
///
@@ -362,8 +369,8 @@ public:
/// \param ColonColonLoc The location of the trailing '::'.
void Extend(ASTContext &Context, SourceLocation TemplateKWLoc, TypeLoc TL,
SourceLocation ColonColonLoc);
-
- /// \brief Extend the current nested-name-specifier by another
+
+ /// \brief Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'identifier::'.
///
/// \param Context The AST context in which this nested-name-specifier
@@ -376,8 +383,8 @@ public:
/// \param ColonColonLoc The location of the trailing '::'.
void Extend(ASTContext &Context, IdentifierInfo *Identifier,
SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
-
- /// \brief Extend the current nested-name-specifier by another
+
+ /// \brief Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'namespace::'.
///
/// \param Context The AST context in which this nested-name-specifier
@@ -390,8 +397,8 @@ public:
/// \param ColonColonLoc The location of the trailing '::'.
void Extend(ASTContext &Context, NamespaceDecl *Namespace,
SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
-
- /// \brief Extend the current nested-name-specifier by another
+
+ /// \brief Extend the current nested-name-specifier by another
/// nested-name-specifier component of the form 'namespace-alias::'.
///
/// \param Context The AST context in which this nested-name-specifier
@@ -399,35 +406,35 @@ public:
///
/// \param Alias The namespace alias.
///
- /// \param AliasLoc The location of the namespace alias
+ /// \param AliasLoc The location of the namespace alias
/// name.
///
/// \param ColonColonLoc The location of the trailing '::'.
void Extend(ASTContext &Context, NamespaceAliasDecl *Alias,
SourceLocation AliasLoc, SourceLocation ColonColonLoc);
-
+
/// \brief Turn this (empty) nested-name-specifier into the global
/// nested-name-specifier '::'.
void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
-
+
/// \brief Make a new nested-name-specifier from incomplete source-location
/// information.
///
/// This routine should be used very, very rarely, in cases where we
/// need to synthesize a nested-name-specifier. Most code should instead use
/// \c Adopt() with a proper \c NestedNameSpecifierLoc.
- void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
SourceRange R);
-
- /// \brief Adopt an existing nested-name-specifier (with source-range
+
+ /// \brief Adopt an existing nested-name-specifier (with source-range
/// information).
void Adopt(NestedNameSpecifierLoc Other);
-
+
/// \brief Retrieve the source range covered by this nested-name-specifier.
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
}
-
+
/// \brief Retrieve a nested-name-specifier with location information,
/// copied into the given AST context.
///
@@ -449,7 +456,7 @@ public:
Representation = 0;
BufferSize = 0;
}
-
+
/// \brief Retrieve the underlying buffer.
///
/// \returns A pair containing a pointer to the buffer of source-location
@@ -459,9 +466,9 @@ public:
return std::make_pair(Buffer, BufferSize);
}
};
-
-/// Insertion operator for diagnostics. This allows sending NestedNameSpecifiers
-/// into a diagnostic with <<.
+
+/// Insertion operator for diagnostics. This allows sending
+/// NestedNameSpecifiers into a diagnostic with <<.
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
NestedNameSpecifier *NNS) {
DB.AddTaggedVal(reinterpret_cast<intptr_t>(NNS),
diff --git a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
index 469da99..258637d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
@@ -52,13 +52,6 @@ enum CastKind {
/// conversion is always unqualified.
CK_LValueToRValue,
- /// CK_GetObjCProperty - A conversion which calls an Objective-C
- /// property getter. The operand is an OK_ObjCProperty l-value; the
- /// result will generally be an r-value, but could be an ordinary
- /// gl-value if the property reference is to an implicit property
- /// for a method that returns a reference type.
- CK_GetObjCProperty,
-
/// CK_NoOp - A conversion which does not affect the type other than
/// (possibly) adding qualifiers.
/// int -> int
@@ -124,6 +117,15 @@ enum CastKind {
/// against the null member pointer.
CK_MemberPointerToBoolean,
+ /// CK_ReinterpretMemberPointer - Reinterpret a member pointer as a
+ /// different kind of member pointer. C++ forbids this from
+ /// crossing between function and object types, but otherwise does
+ /// not restrict it. However, the only operation that is permitted
+ /// on a "punned" member pointer is casting it back to the original
+ /// type, which is required to be a lossless operation (although
+ /// many ABIs do not guarantee this on all possible intermediate types).
+ CK_ReinterpretMemberPointer,
+
/// CK_UserDefinedConversion - Conversion using a user defined type
/// conversion function.
/// struct A { operator int(); }; int i = int(A());
@@ -274,7 +276,19 @@ enum CastKind {
/// in ARC cause blocks to be copied; this is for cases where that
/// would not otherwise be guaranteed, such as when casting to a
/// non-block pointer type.
- CK_ARCExtendBlockObject
+ CK_ARCExtendBlockObject,
+
+ /// \brief Converts from _Atomic(T) to T.
+ CK_AtomicToNonAtomic,
+ /// \brief Converts from T to _Atomic(T).
+ CK_NonAtomicToAtomic,
+
+ /// \brief Causes a block literal to by copied to the heap and then
+ /// autoreleased.
+ ///
+ /// This particular cast kind is used for the conversion from a C++11
+ /// lambda expression to a block pointer.
+ CK_CopyAndAutoreleaseBlockObject
};
#define CK_Invalid ((CastKind) -1)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
index 2bdd8d3..2e34dc8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
@@ -36,7 +36,7 @@ struct PrintingPolicy {
PrintingPolicy(const LangOptions &LO)
: Indentation(2), LangOpts(LO), SuppressSpecifiers(false),
SuppressTagKeyword(false), SuppressTag(false), SuppressScope(false),
- SuppressInitializers(false),
+ SuppressUnwrittenScope(false), SuppressInitializers(false),
Dump(false), ConstantArraySizeAsWritten(false),
AnonymousTagLocations(true), SuppressStrongLifetime(false),
Bool(LO.Bool) { }
@@ -86,6 +86,10 @@ struct PrintingPolicy {
/// \brief Suppresses printing of scope specifiers.
bool SuppressScope : 1;
+ /// \brief Suppress printing parts of scope specifiers that don't need
+ /// to be written, e.g., for inline or anonymous namespaces.
+ bool SuppressUnwrittenScope : 1;
+
/// \brief Suppress printing of variable initializers.
///
/// This flag is used when printing the loop variable in a for-range
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
index b0186ce..ec07267 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
@@ -62,8 +62,11 @@ class ASTRecordLayout {
/// (either a base or a member). Will be zero if the class doesn't contain
/// any empty subobjects.
CharUnits SizeOfLargestEmptySubobject;
-
- /// VBPtrOffset - Virtual base table offset.
+
+ /// VFPtrOffset - Virtual function table offset (Microsoft-only).
+ CharUnits VFPtrOffset;
+
+ /// VBPtrOffset - Virtual base table offset (Microsoft-only).
CharUnits VBPtrOffset;
/// PrimaryBase - The primary base info for this record.
@@ -92,7 +95,8 @@ class ASTRecordLayout {
// Constructor for C++ records.
typedef CXXRecordLayoutInfo::BaseOffsetsMapTy BaseOffsetsMapTy;
ASTRecordLayout(const ASTContext &Ctx,
- CharUnits size, CharUnits alignment, CharUnits vbptroffset,
+ CharUnits size, CharUnits alignment,
+ CharUnits vfptroffset, CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets, unsigned fieldcount,
CharUnits nonvirtualsize, CharUnits nonvirtualalign,
@@ -204,7 +208,17 @@ public:
return CXXInfo->SizeOfLargestEmptySubobject;
}
+ /// getVFPtrOffset - Get the offset for virtual function table pointer.
+ /// This is only meaningful with the Microsoft ABI.
+ CharUnits getVFPtrOffset() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ return CXXInfo->VFPtrOffset;
+ }
+
+ /// getVBPtrOffset - Get the offset for virtual base table pointer.
+ /// This is only meaningful with the Microsoft ABI.
CharUnits getVBPtrOffset() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
return CXXInfo->VBPtrOffset;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
index 0ec09c9..a4ad525 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -147,7 +147,13 @@ public:
/// \brief Return whether this visitor should recurse into the types of
/// TypeLocs.
bool shouldWalkTypesOfTypeLocs() const { return true; }
-
+
+ /// \brief Return whether \param S should be traversed using data recursion
+ /// to avoid a stack overflow with extreme cases.
+ bool shouldUseDataRecursionFor(Stmt *S) const {
+ return isa<BinaryOperator>(S) || isa<UnaryOperator>(S) || isa<CaseStmt>(S);
+ }
+
/// \brief Recursively visit a statement or expression, by
/// dispatching to Traverse*() based on the argument's dynamic type.
///
@@ -181,12 +187,17 @@ public:
/// \returns false if the visitation was terminated early, true otherwise.
bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
- /// \brief Recursively visit a C++ nested-name-specifier with location
+ /// \brief Recursively visit a C++ nested-name-specifier with location
/// information.
///
/// \returns false if the visitation was terminated early, true otherwise.
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
+ /// \brief Recursively visit a name with its location information.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseDeclarationNameInfo(DeclarationNameInfo NameInfo);
+
/// \brief Recursively visit a template name and dispatch to the
/// appropriate method.
///
@@ -223,6 +234,11 @@ public:
/// \returns false if the visitation was terminated early, true otherwise.
bool TraverseConstructorInitializer(CXXCtorInitializer *Init);
+ /// \brief Recursively visit a lambda capture.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseLambdaCapture(LambdaExpr::Capture C);
+
// ---- Methods on Stmts ----
// Declare Traverse*() for all concrete Stmt classes.
@@ -387,8 +403,102 @@ private:
bool TraverseDeclContextHelper(DeclContext *DC);
bool TraverseFunctionHelper(FunctionDecl *D);
bool TraverseVarHelper(VarDecl *D);
+
+ bool Walk(Stmt *S);
+
+ struct EnqueueJob {
+ Stmt *S;
+ Stmt::child_iterator StmtIt;
+
+ EnqueueJob(Stmt *S) : S(S), StmtIt() {
+ if (Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+ }
+ };
+ bool dataTraverse(Stmt *S);
};
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::dataTraverse(Stmt *S) {
+
+ SmallVector<EnqueueJob, 16> Queue;
+ Queue.push_back(S);
+
+ while (!Queue.empty()) {
+ EnqueueJob &job = Queue.back();
+ Stmt *CurrS = job.S;
+ if (!CurrS) {
+ Queue.pop_back();
+ continue;
+ }
+
+ if (getDerived().shouldUseDataRecursionFor(CurrS)) {
+ if (job.StmtIt == Stmt::child_iterator()) {
+ if (!Walk(CurrS)) return false;
+ job.StmtIt = CurrS->child_begin();
+ } else {
+ ++job.StmtIt;
+ }
+
+ if (job.StmtIt != CurrS->child_end())
+ Queue.push_back(*job.StmtIt);
+ else
+ Queue.pop_back();
+ continue;
+ }
+
+ Queue.pop_back();
+ TRY_TO(TraverseStmt(CurrS));
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::Walk(Stmt *S) {
+
+#define DISPATCH_WALK(NAME, CLASS, VAR) \
+ return getDerived().WalkUpFrom##NAME(static_cast<CLASS*>(VAR));
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
+ switch (BinOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case BO_##NAME: DISPATCH_WALK(Bin##NAME, BinaryOperator, S);
+
+ BINOP_LIST()
+#undef OPERATOR
+
+#define OPERATOR(NAME) \
+ case BO_##NAME##Assign: \
+ DISPATCH_WALK(Bin##NAME##Assign, CompoundAssignOperator, S);
+
+ CAO_LIST()
+#undef OPERATOR
+ }
+ } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(S)) {
+ switch (UnOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case UO_##NAME: DISPATCH_WALK(Unary##NAME, UnaryOperator, S);
+
+ UNARYOP_LIST()
+#undef OPERATOR
+ }
+ }
+
+ // Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: DISPATCH_WALK(CLASS, CLASS, S);
+#include "clang/AST/StmtNodes.inc"
+ }
+
+#undef DISPATCH_WALK
+
+ return true;
+}
+
#define DISPATCH(NAME, CLASS, VAR) \
return getDerived().Traverse##NAME(static_cast<CLASS*>(VAR))
@@ -397,6 +507,9 @@ bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S) {
if (!S)
return true;
+ if (getDerived().shouldUseDataRecursionFor(S))
+ return dataTraverse(S);
+
// If we have a binary expr, dispatch to the subcode of the binop. A smart
// optimizer (e.g. LLVM) will fold this comparison into the switch stmt
// below.
@@ -525,23 +638,48 @@ bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifierLoc(
NestedNameSpecifierLoc NNS) {
if (!NNS)
return true;
-
+
if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
TRY_TO(TraverseNestedNameSpecifierLoc(Prefix));
-
+
switch (NNS.getNestedNameSpecifier()->getKind()) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
return true;
-
+
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
TRY_TO(TraverseTypeLoc(NNS.getTypeLoc()));
break;
}
-
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDeclarationNameInfo(
+ DeclarationNameInfo NameInfo) {
+ switch (NameInfo.getName().getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo())
+ TRY_TO(TraverseTypeLoc(TSInfo->getTypeLoc()));
+
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+
return true;
}
@@ -600,7 +738,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
// FIXME: how can TSI ever be NULL?
if (TypeSourceInfo *TSI = ArgLoc.getTypeSourceInfo())
return getDerived().TraverseTypeLoc(TSI->getTypeLoc());
- else
+ else
return getDerived().TraverseType(Arg.getAsType());
}
@@ -637,12 +775,18 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArguments(
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseConstructorInitializer(
CXXCtorInitializer *Init) {
- // FIXME: recurse on TypeLoc of the base initializer if isBaseInitializer()?
+ if (TypeSourceInfo *TInfo = Init->getTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
+
if (Init->isWritten())
TRY_TO(TraverseStmt(Init->getInit()));
return true;
}
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseLambdaCapture(LambdaExpr::Capture C){
+ return true;
+}
// ----------------- Type traversal -----------------
@@ -822,8 +966,8 @@ DEF_TRAVERSE_TYPE(AtomicType, {
// ----------------- TypeLoc traversal -----------------
// This macro makes available a variable TL, the passed-in TypeLoc.
-// If requested, it calls WalkUpFrom* for the Type in the given TypeLoc,
-// in addition to WalkUpFrom* for the TypeLoc itself, such that existing
+// If requested, it calls WalkUpFrom* for the Type in the given TypeLoc,
+// in addition to WalkUpFrom* for the TypeLoc itself, such that existing
// clients that override the WalkUpFrom*Type() and/or Visit*Type() methods
// continue to work.
#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
@@ -1022,7 +1166,7 @@ DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
if (TL.getQualifierLoc()) {
TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
}
-
+
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
}
@@ -1099,6 +1243,8 @@ DEF_TRAVERSE_DECL(FileScopeAsmDecl, {
TRY_TO(TraverseStmt(D->getAsmString()));
})
+DEF_TRAVERSE_DECL(ImportDecl, { })
+
DEF_TRAVERSE_DECL(FriendDecl, {
// Friend is either decl or a type.
if (D->getFriendType())
@@ -1128,14 +1274,6 @@ DEF_TRAVERSE_DECL(ClassScopeFunctionSpecializationDecl, {
DEF_TRAVERSE_DECL(LinkageSpecDecl, { })
-DEF_TRAVERSE_DECL(ObjCClassDecl, {
- // FIXME: implement this
- })
-
-DEF_TRAVERSE_DECL(ObjCForwardProtocolDecl, {
- // FIXME: implement this
- })
-
DEF_TRAVERSE_DECL(ObjCPropertyImplDecl, {
// FIXME: implement this
})
@@ -1164,8 +1302,8 @@ DEF_TRAVERSE_DECL(NamespaceAliasDecl, {
DEF_TRAVERSE_DECL(LabelDecl, {
// There is no code in a LabelDecl.
})
-
-
+
+
DEF_TRAVERSE_DECL(NamespaceDecl, {
// Code in an unnamed namespace shows up automatically in
// decls_begin()/decls_end(). Thus we don't need to recurse on
@@ -1216,6 +1354,7 @@ DEF_TRAVERSE_DECL(ObjCPropertyDecl, {
DEF_TRAVERSE_DECL(UsingDecl, {
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
})
DEF_TRAVERSE_DECL(UsingDirectiveDecl, {
@@ -1312,7 +1451,8 @@ template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseFunctionInstantiations(
FunctionTemplateDecl* D) {
FunctionTemplateDecl::spec_iterator end = D->spec_end();
- for (FunctionTemplateDecl::spec_iterator it = D->spec_begin(); it != end; ++it) {
+ for (FunctionTemplateDecl::spec_iterator it = D->spec_begin(); it != end;
+ ++it) {
FunctionDecl* FD = *it;
switch (FD->getTemplateSpecializationKind()) {
case TSK_ImplicitInstantiation:
@@ -1329,8 +1469,6 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionInstantiations(
case TSK_Undeclared: // Declaration of the template definition.
case TSK_ExplicitSpecialization:
break;
- default:
- llvm_unreachable("Unknown specialization kind.");
}
}
@@ -1511,6 +1649,7 @@ DEF_TRAVERSE_DECL(UnresolvedUsingValueDecl, {
// Like UnresolvedUsingTypenameDecl, but without the 'typename':
// template <class T> Class A : public Base<T> { using Base<T>::foo; };
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
})
DEF_TRAVERSE_DECL(IndirectFieldDecl, {})
@@ -1529,6 +1668,8 @@ DEF_TRAVERSE_DECL(FieldDecl, {
TRY_TO(TraverseDeclaratorHelper(D));
if (D->isBitField())
TRY_TO(TraverseStmt(D->getBitWidth()));
+ else if (D->hasInClassInitializer())
+ TRY_TO(TraverseStmt(D->getInClassInitializer()));
})
DEF_TRAVERSE_DECL(ObjCAtDefsFieldDecl, {
@@ -1548,6 +1689,7 @@ DEF_TRAVERSE_DECL(ObjCIvarDecl, {
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
// If we're an explicit template specialization, iterate over the
// template args that were explicitly specified. If we were doing
@@ -1624,7 +1766,9 @@ DEF_TRAVERSE_DECL(CXXDestructorDecl, {
template<typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseVarHelper(VarDecl *D) {
TRY_TO(TraverseDeclaratorHelper(D));
- TRY_TO(TraverseStmt(D->getInit()));
+ // Default params are taken care of when we traverse the ParmVarDecl.
+ if (!isa<ParmVarDecl>(D))
+ TRY_TO(TraverseStmt(D->getInit()));
return true;
}
@@ -1735,6 +1879,10 @@ DEF_TRAVERSE_STMT(ObjCAtTryStmt, { })
DEF_TRAVERSE_STMT(ObjCForCollectionStmt, { })
DEF_TRAVERSE_STMT(ObjCAutoreleasePoolStmt, { })
DEF_TRAVERSE_STMT(CXXForRangeStmt, { })
+DEF_TRAVERSE_STMT(MSDependentExistsStmt, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
+})
DEF_TRAVERSE_STMT(ReturnStmt, { })
DEF_TRAVERSE_STMT(SwitchStmt, { })
DEF_TRAVERSE_STMT(WhileStmt, { })
@@ -1742,6 +1890,7 @@ DEF_TRAVERSE_STMT(WhileStmt, { })
DEF_TRAVERSE_STMT(CXXDependentScopeMemberExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getMemberNameInfo()));
if (S->hasExplicitTemplateArgs()) {
TRY_TO(TraverseTemplateArgumentLocsHelper(
S->getTemplateArgs(), S->getNumTemplateArgs()));
@@ -1750,12 +1899,14 @@ DEF_TRAVERSE_STMT(CXXDependentScopeMemberExpr, {
DEF_TRAVERSE_STMT(DeclRefExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
TRY_TO(TraverseTemplateArgumentLocsHelper(
S->getTemplateArgs(), S->getNumTemplateArgs()));
})
DEF_TRAVERSE_STMT(DependentScopeDeclRefExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
if (S->hasExplicitTemplateArgs()) {
TRY_TO(TraverseTemplateArgumentLocsHelper(
S->getExplicitTemplateArgs().getTemplateArgs(),
@@ -1765,6 +1916,7 @@ DEF_TRAVERSE_STMT(DependentScopeDeclRefExpr, {
DEF_TRAVERSE_STMT(MemberExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getMemberNameInfo()));
TRY_TO(TraverseTemplateArgumentLocsHelper(
S->getTemplateArgs(), S->getNumTemplateArgs()));
})
@@ -1831,6 +1983,23 @@ TraverseGenericSelectionExpr(GenericSelectionExpr *S) {
return true;
}
+// PseudoObjectExpr is a special case because of the wierdness with
+// syntactic expressions and opaque values.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::
+TraversePseudoObjectExpr(PseudoObjectExpr *S) {
+ TRY_TO(WalkUpFromPseudoObjectExpr(S));
+ TRY_TO(TraverseStmt(S->getSyntacticForm()));
+ for (PseudoObjectExpr::semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i) {
+ Expr *sub = *i;
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
+ sub = OVE->getSourceExpr();
+ TRY_TO(TraverseStmt(sub));
+ }
+ return true;
+}
+
DEF_TRAVERSE_STMT(CXXScalarValueInitExpr, {
// This is called for code like 'return T()' where T is a built-in
// (i.e. non-class) type.
@@ -1880,6 +2049,11 @@ DEF_TRAVERSE_STMT(BinaryTypeTraitExpr, {
TRY_TO(TraverseTypeLoc(S->getRhsTypeSourceInfo()->getTypeLoc()));
})
+DEF_TRAVERSE_STMT(TypeTraitExpr, {
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ TRY_TO(TraverseTypeLoc(S->getArg(I)->getTypeLoc()));
+})
+
DEF_TRAVERSE_STMT(ArrayTypeTraitExpr, {
TRY_TO(TraverseTypeLoc(S->getQueriedTypeSourceInfo()->getTypeLoc()));
})
@@ -1898,6 +2072,37 @@ DEF_TRAVERSE_STMT(CXXTemporaryObjectExpr, {
TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
})
+// Walk only the visible parts of lambda expressions.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseLambdaExpr(LambdaExpr *S) {
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ TRY_TO(TraverseLambdaCapture(*C));
+ }
+
+ if (S->hasExplicitParameters() || S->hasExplicitResultType()) {
+ TypeLoc TL = S->getCallOperator()->getTypeSourceInfo()->getTypeLoc();
+ if (S->hasExplicitParameters() && S->hasExplicitResultType()) {
+ // Visit the whole type.
+ TRY_TO(TraverseTypeLoc(TL));
+ } else if (isa<FunctionProtoTypeLoc>(TL)) {
+ FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
+ if (S->hasExplicitParameters()) {
+ // Visit parameters.
+ for (unsigned I = 0, N = Proto.getNumArgs(); I != N; ++I) {
+ TRY_TO(TraverseDecl(Proto.getArg(I)));
+ }
+ } else {
+ TRY_TO(TraverseTypeLoc(Proto.getResultLoc()));
+ }
+ }
+ }
+
+ TRY_TO(TraverseStmt(S->getBody()));
+ return true;
+}
+
DEF_TRAVERSE_STMT(CXXUnresolvedConstructExpr, {
// This is called for code like 'T()', where T is a template argument.
TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
@@ -1913,7 +2118,6 @@ DEF_TRAVERSE_STMT(CXXMemberCallExpr, { })
// over the children.
DEF_TRAVERSE_STMT(AddrLabelExpr, { })
DEF_TRAVERSE_STMT(ArraySubscriptExpr, { })
-DEF_TRAVERSE_STMT(BlockDeclRefExpr, { })
DEF_TRAVERSE_STMT(BlockExpr, {
TRY_TO(TraverseDecl(S->getBlockDecl()));
return true; // no child statements to loop through.
@@ -1926,7 +2130,7 @@ DEF_TRAVERSE_STMT(CXXDefaultArgExpr, { })
DEF_TRAVERSE_STMT(CXXDeleteExpr, { })
DEF_TRAVERSE_STMT(ExprWithCleanups, { })
DEF_TRAVERSE_STMT(CXXNullPtrLiteralExpr, { })
-DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, {
+DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
if (TypeSourceInfo *ScopeInfo = S->getScopeTypeInfo())
TRY_TO(TraverseTypeLoc(ScopeInfo->getTypeLoc()));
@@ -1935,15 +2139,18 @@ DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, {
})
DEF_TRAVERSE_STMT(CXXThisExpr, { })
DEF_TRAVERSE_STMT(CXXThrowExpr, { })
+DEF_TRAVERSE_STMT(UserDefinedLiteral, { })
DEF_TRAVERSE_STMT(DesignatedInitExpr, { })
DEF_TRAVERSE_STMT(ExtVectorElementExpr, { })
DEF_TRAVERSE_STMT(GNUNullExpr, { })
DEF_TRAVERSE_STMT(ImplicitValueInitExpr, { })
+DEF_TRAVERSE_STMT(ObjCBoolLiteralExpr, { })
DEF_TRAVERSE_STMT(ObjCEncodeExpr, { })
DEF_TRAVERSE_STMT(ObjCIsaExpr, { })
DEF_TRAVERSE_STMT(ObjCIvarRefExpr, { })
DEF_TRAVERSE_STMT(ObjCMessageExpr, { })
DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCSubscriptRefExpr, { })
DEF_TRAVERSE_STMT(ObjCProtocolExpr, { })
DEF_TRAVERSE_STMT(ObjCSelectorExpr, { })
DEF_TRAVERSE_STMT(ObjCIndirectCopyRestoreExpr, { })
@@ -1958,15 +2165,15 @@ DEF_TRAVERSE_STMT(StmtExpr, { })
DEF_TRAVERSE_STMT(UnresolvedLookupExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
if (S->hasExplicitTemplateArgs()) {
- TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
S->getNumTemplateArgs()));
}
})
-
+
DEF_TRAVERSE_STMT(UnresolvedMemberExpr, {
TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
if (S->hasExplicitTemplateArgs()) {
- TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
S->getNumTemplateArgs()));
}
})
@@ -2001,6 +2208,9 @@ DEF_TRAVERSE_STMT(FloatingLiteral, { })
DEF_TRAVERSE_STMT(ImaginaryLiteral, { })
DEF_TRAVERSE_STMT(StringLiteral, { })
DEF_TRAVERSE_STMT(ObjCStringLiteral, { })
+DEF_TRAVERSE_STMT(ObjCNumericLiteral, { })
+DEF_TRAVERSE_STMT(ObjCArrayLiteral, { })
+DEF_TRAVERSE_STMT(ObjCDictionaryLiteral, { })
// Traverse OpenCL: AsType, Convert.
DEF_TRAVERSE_STMT(AsTypeExpr, { })
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
index e87ca78..88abadb 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
@@ -64,22 +64,22 @@ public:
/// \brief Return the previous declaration of this declaration or NULL if this
/// is the first declaration.
- decl_type *getPreviousDeclaration() {
+ decl_type *getPreviousDecl() {
if (RedeclLink.NextIsPrevious())
return RedeclLink.getNext();
return 0;
}
- const decl_type *getPreviousDeclaration() const {
+ const decl_type *getPreviousDecl() const {
return const_cast<decl_type *>(
- static_cast<const decl_type*>(this))->getPreviousDeclaration();
+ static_cast<const decl_type*>(this))->getPreviousDecl();
}
/// \brief Return the first declaration of this declaration or itself if this
/// is the only declaration.
decl_type *getFirstDeclaration() {
decl_type *D = static_cast<decl_type*>(this);
- while (D->getPreviousDeclaration())
- D = D->getPreviousDeclaration();
+ while (D->getPreviousDecl())
+ D = D->getPreviousDecl();
return D;
}
@@ -87,8 +87,8 @@ public:
/// is the only declaration.
const decl_type *getFirstDeclaration() const {
const decl_type *D = static_cast<const decl_type*>(this);
- while (D->getPreviousDeclaration())
- D = D->getPreviousDeclaration();
+ while (D->getPreviousDecl())
+ D = D->getPreviousDecl();
return D;
}
@@ -98,12 +98,12 @@ public:
}
/// \brief Returns the most recent (re)declaration of this declaration.
- decl_type *getMostRecentDeclaration() {
+ decl_type *getMostRecentDecl() {
return getFirstDeclaration()->RedeclLink.getNext();
}
/// \brief Returns the most recent (re)declaration of this declaration.
- const decl_type *getMostRecentDeclaration() const {
+ const decl_type *getMostRecentDecl() const {
return getFirstDeclaration()->RedeclLink.getNext();
}
@@ -116,6 +116,7 @@ public:
/// Current - The current declaration.
decl_type *Current;
decl_type *Starter;
+ bool PassedFirst;
public:
typedef decl_type* value_type;
@@ -125,13 +126,24 @@ public:
typedef std::ptrdiff_t difference_type;
redecl_iterator() : Current(0) { }
- explicit redecl_iterator(decl_type *C) : Current(C), Starter(C) { }
+ explicit redecl_iterator(decl_type *C)
+ : Current(C), Starter(C), PassedFirst(false) { }
reference operator*() const { return Current; }
pointer operator->() const { return Current; }
redecl_iterator& operator++() {
assert(Current && "Advancing while iterator has reached end");
+ // Sanity check to avoid infinite loop on invalid redecl chain.
+ if (Current->isFirstDeclaration()) {
+ if (PassedFirst) {
+ assert(0 && "Passed first decl twice, invalid redecl chain!");
+ Current = 0;
+ return *this;
+ }
+ PassedFirst = true;
+ }
+
// Get either previous decl or latest decl.
decl_type *Next = Current->RedeclLink.getNext();
Current = (Next != Starter ? Next : 0);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
index 2a6fd6b..84bdfb8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -20,8 +20,9 @@
#include "clang/AST/StmtIterator.h"
#include "clang/AST/DeclGroup.h"
#include "clang/AST/ASTContext.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
#include <string>
namespace llvm {
@@ -39,11 +40,11 @@ namespace clang {
class StringLiteral;
class SwitchStmt;
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
// ExprIterator - Iterators for iterating over Stmt* arrays that contain
// only Expr*. This is needed because AST nodes use Stmt* arrays to store
// references to children (to be compatible with StmtIterator).
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
class Stmt;
class Expr;
@@ -141,11 +142,14 @@ protected:
friend class CallExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class ObjCMessageExpr; // ctor
+ friend class ObjCArrayLiteral; // ctor
+ friend class ObjCDictionaryLiteral; // ctor
friend class ShuffleVectorExpr; // ctor
friend class ParenListExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class OverloadExpr; // ctor
+ friend class PseudoObjectExpr; // ctor
friend class AtomicExpr; // ctor
unsigned : NumStmtBits;
@@ -158,15 +162,39 @@ protected:
};
enum { NumExprBits = 16 };
+ class CharacterLiteralBitfields {
+ friend class CharacterLiteral;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 2;
+ };
+
+ class FloatingLiteralBitfields {
+ friend class FloatingLiteral;
+ unsigned : NumExprBits;
+
+ unsigned IsIEEE : 1; // Distinguishes between PPC128 and IEEE128.
+ unsigned IsExact : 1;
+ };
+
+ class UnaryExprOrTypeTraitExprBitfields {
+ friend class UnaryExprOrTypeTraitExpr;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 2;
+ unsigned IsType : 1; // true if operand is a type, false if an expression.
+ };
+
class DeclRefExprBitfields {
friend class DeclRefExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned HasQualifier : 1;
- unsigned HasExplicitTemplateArgs : 1;
+ unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
+ unsigned RefersToEnclosingLocal : 1;
};
class CastExprBitfields {
@@ -184,6 +212,27 @@ protected:
unsigned NumPreArgs : 1;
};
+ class ExprWithCleanupsBitfields {
+ friend class ExprWithCleanups;
+ friend class ASTStmtReader; // deserialization
+
+ unsigned : NumExprBits;
+
+ unsigned NumObjects : 32 - NumExprBits;
+ };
+
+ class PseudoObjectExprBitfields {
+ friend class PseudoObjectExpr;
+ friend class ASTStmtReader; // deserialization
+
+ unsigned : NumExprBits;
+
+ // These don't need to be particularly wide, because they're
+ // strictly limited by the forms of expressions we permit.
+ unsigned NumSubExprs : 8;
+ unsigned ResultIndex : 32 - 8 - NumExprBits;
+ };
+
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
@@ -191,6 +240,38 @@ protected:
unsigned ShouldCopy : 1;
};
+ class InitListExprBitfields {
+ friend class InitListExpr;
+
+ unsigned : NumExprBits;
+
+ /// Whether this initializer list originally had a GNU array-range
+ /// designator in it. This is a temporary marker used by CodeGen.
+ unsigned HadArrayRangeDesignator : 1;
+
+ /// Whether this initializer list initializes a std::initializer_list
+ /// object.
+ unsigned InitializesStdInitializerList : 1;
+ };
+
+ class TypeTraitExprBitfields {
+ friend class TypeTraitExpr;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ unsigned : NumExprBits;
+
+ /// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
+ unsigned Kind : 8;
+
+ /// \brief If this expression is not value-dependent, this indicates whether
+ /// the trait evaluated true or false.
+ unsigned Value : 1;
+
+ /// \brief The number of arguments to this type trait.
+ unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
+ };
+
union {
// FIXME: this is wasteful on 64-bit platforms.
void *Aligner;
@@ -198,13 +279,21 @@ protected:
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
ExprBitfields ExprBits;
+ CharacterLiteralBitfields CharacterLiteralBits;
+ FloatingLiteralBitfields FloatingLiteralBits;
+ UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
+ ExprWithCleanupsBitfields ExprWithCleanupsBits;
+ PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
+ InitListExprBitfields InitListExprBits;
+ TypeTraitExprBitfields TypeTraitExprBits;
};
friend class ASTStmtReader;
+ friend class ASTStmtWriter;
public:
// Only allow allocation of Stmts using the allocator in ASTContext
@@ -234,20 +323,24 @@ public:
/// de-serialization).
struct EmptyShell { };
+private:
+ /// \brief Whether statistic collection is enabled.
+ static bool StatisticsEnabled;
+
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) {
StmtBits.sClass = SC;
- if (Stmt::CollectingStats()) Stmt::addStmtClass(SC);
+ if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
public:
Stmt(StmtClass SC) {
StmtBits.sClass = SC;
- if (Stmt::CollectingStats()) Stmt::addStmtClass(SC);
+ if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
- StmtClass getStmtClass() const {
+ StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
@@ -255,21 +348,20 @@ public:
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
- SourceRange getSourceRange() const;
-
- SourceLocation getLocStart() const { return getSourceRange().getBegin(); }
- SourceLocation getLocEnd() const { return getSourceRange().getEnd(); }
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
- static bool CollectingStats(bool Enable = false);
+ static void EnableStatistics();
static void PrintStats();
/// dump - This does a local dump of the specified AST fragment. It dumps the
/// specified node and a few nodes underneath it, but not the whole subtree.
/// This is useful in a debugger.
- void dump() const;
- void dump(SourceManager &SM) const;
+ LLVM_ATTRIBUTE_USED void dump() const;
+ LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
@@ -384,7 +476,7 @@ public:
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(StartLoc, EndLoc);
}
@@ -433,7 +525,7 @@ public:
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
- SourceRange getSourceRange() const { return SourceRange(SemiLoc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(SemiLoc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
@@ -483,7 +575,7 @@ public:
body_iterator body_begin() { return Body; }
body_iterator body_end() { return Body + size(); }
Stmt *body_back() { return !body_empty() ? Body[size()-1] : 0; }
-
+
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
Body[size()-1] = S;
@@ -513,7 +605,7 @@ public:
return const_reverse_body_iterator(body_begin());
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(LBracLoc, RBracLoc);
}
@@ -531,7 +623,7 @@ public:
child_range children() {
return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
}
-
+
const_child_range children() const {
return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
}
@@ -558,7 +650,7 @@ public:
return const_cast<SwitchCase*>(this)->getSubStmt();
}
- SourceRange getSourceRange() const { return SourceRange(); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
@@ -613,7 +705,7 @@ public:
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
@@ -653,7 +745,7 @@ public:
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(DefaultLoc, SubStmt->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -665,7 +757,7 @@ public:
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
-
+
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
///
@@ -690,7 +782,7 @@ public:
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(IdentLoc, SubStmt->getLocEnd());
}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
@@ -710,11 +802,11 @@ class IfStmt : public Stmt {
SourceLocation IfLoc;
SourceLocation ElseLoc;
-
+
public:
- IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = 0);
-
+
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
@@ -728,13 +820,13 @@ public:
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
-
+
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
-
+
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
@@ -751,7 +843,7 @@ public:
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
if (SubExprs[ELSE])
return SourceRange(IfLoc, SubExprs[ELSE]->getLocEnd());
else
@@ -801,7 +893,7 @@ public:
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
-
+
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
@@ -832,7 +924,8 @@ public:
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
- assert(!SC->getNextSwitchCase() && "case/default already added to a switch");
+ assert(!SC->getNextSwitchCase()
+ && "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
@@ -849,7 +942,7 @@ public:
return (bool) AllEnumCasesCovered;
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(SwitchLoc, SubExprs[BODY]->getLocEnd());
}
// Iterators
@@ -871,7 +964,7 @@ class WhileStmt : public Stmt {
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
public:
- WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
@@ -904,7 +997,7 @@ public:
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(WhileLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -953,7 +1046,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(DoLoc, RParenLoc);
}
static bool classof(const Stmt *T) {
@@ -979,14 +1072,14 @@ class ForStmt : public Stmt {
SourceLocation LParenLoc, RParenLoc;
public:
- ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc,
+ ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc,
Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
Stmt *getInit() { return SubExprs[INIT]; }
-
+
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
@@ -997,7 +1090,7 @@ public:
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
-
+
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
@@ -1025,7 +1118,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -1060,7 +1153,7 @@ public:
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(GotoLoc, LabelLoc);
}
static bool classof(const Stmt *T) {
@@ -1104,7 +1197,7 @@ public:
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(GotoLoc, Target->getLocEnd());
}
@@ -1131,7 +1224,7 @@ public:
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(ContinueLoc);
}
@@ -1157,7 +1250,7 @@ public:
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
- SourceRange getSourceRange() const { return SourceRange(BreakLoc); }
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(BreakLoc); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
@@ -1182,7 +1275,7 @@ class ReturnStmt : public Stmt {
Stmt *RetExpr;
SourceLocation RetLoc;
const VarDecl *NRVOCandidate;
-
+
public:
ReturnStmt(SourceLocation RL)
: Stmt(ReturnStmtClass), RetExpr(0), RetLoc(RL), NRVOCandidate(0) { }
@@ -1208,8 +1301,8 @@ public:
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
-
- SourceRange getSourceRange() const;
+
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
@@ -1242,16 +1335,16 @@ class AsmStmt : public Stmt {
StringLiteral **Constraints;
Stmt **Exprs;
StringLiteral **Clobbers;
-
+
public:
- AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile,
+ AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile,
bool msasm, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints,
Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
- explicit AsmStmt(EmptyShell Empty) : Stmt(AsmStmtClass, Empty),
+ explicit AsmStmt(EmptyShell Empty) : Stmt(AsmStmtClass, Empty),
Names(0), Constraints(0), Exprs(0), Clobbers(0) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
@@ -1333,7 +1426,7 @@ public:
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
-
+
return StringRef();
}
@@ -1394,7 +1487,7 @@ public:
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
-
+
const Expr *getInputExpr(unsigned i) const {
return const_cast<AsmStmt*>(this)->getInputExpr(i);
}
@@ -1404,7 +1497,7 @@ public:
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
- unsigned NumInputs,
+ unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
@@ -1419,7 +1512,7 @@ public:
StringLiteral *getClobber(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobber(unsigned i) const { return Clobbers[i]; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AsmLoc, RParenLoc);
}
@@ -1490,15 +1583,20 @@ public:
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getExceptLoc(), getEndLoc());
}
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
- Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); }
- CompoundStmt *getBlock() const { return llvm::cast<CompoundStmt>(Children[BLOCK]); }
+ Expr *getFilterExpr() const {
+ return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
+ }
+
+ CompoundStmt *getBlock() const {
+ return llvm::cast<CompoundStmt>(Children[BLOCK]);
+ }
child_range children() {
return child_range(Children,Children+2);
@@ -1528,7 +1626,7 @@ public:
SourceLocation FinallyLoc,
Stmt *Block);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getFinallyLoc(), getEndLoc());
}
@@ -1572,7 +1670,7 @@ public:
Stmt *TryBlock,
Stmt *Handler);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getTryLoc(), getEndLoc());
}
@@ -1580,7 +1678,11 @@ public:
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
- CompoundStmt* getTryBlock() const { return llvm::cast<CompoundStmt>(Children[TRY]); }
+
+ CompoundStmt* getTryBlock() const {
+ return llvm::cast<CompoundStmt>(Children[TRY]);
+ }
+
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
@@ -1596,7 +1698,6 @@ public:
}
static bool classof(SEHTryStmt *) { return true; }
-
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
index 42dcf2b..a948722 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_AST_STMTCXX_H
#include "clang/AST/Stmt.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -37,7 +38,7 @@ public:
CXXCatchStmt(EmptyShell Empty)
: Stmt(CXXCatchStmtClass), ExceptionDecl(0), HandlerBlock(0) {}
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(CatchLoc, HandlerBlock->getLocEnd());
}
@@ -83,7 +84,7 @@ public:
static CXXTryStmt *Create(ASTContext &C, EmptyShell Empty,
unsigned numHandlers);
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getTryLoc(), getEndLoc());
}
@@ -148,7 +149,9 @@ public:
DeclStmt *getRangeStmt() { return cast<DeclStmt>(SubExprs[RANGE]); }
- DeclStmt *getBeginEndStmt() { return cast_or_null<DeclStmt>(SubExprs[BEGINEND]); }
+ DeclStmt *getBeginEndStmt() {
+ return cast_or_null<DeclStmt>(SubExprs[BEGINEND]);
+ }
Expr *getCond() { return cast_or_null<Expr>(SubExprs[COND]); }
Expr *getInc() { return cast_or_null<Expr>(SubExprs[INC]); }
DeclStmt *getLoopVarStmt() { return cast<DeclStmt>(SubExprs[LOOPVAR]); }
@@ -187,7 +190,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -201,6 +204,91 @@ public:
}
};
+/// \brief Representation of a Microsoft __if_exists or __if_not_exists
+/// statement with a dependent name.
+///
+/// The __if_exists statement can be used to include a sequence of statements
+/// in the program only when a particular dependent name does not exist. For
+/// example:
+///
+/// \code
+/// template<typename T>
+/// void call_foo(T &t) {
+/// __if_exists (T::foo) {
+/// t.foo(); // okay: only called when T::foo exists.
+/// }
+/// }
+/// \endcode
+///
+/// Similarly, the __if_not_exists statement can be used to include the
+/// statements when a particular name does not exist.
+///
+/// Note that this statement only captures __if_exists and __if_not_exists
+/// statements whose name is dependent. All non-dependent cases are handled
+/// directly in the parser, so that they don't introduce a new scope. Clang
+/// introduces scopes in the dependent case to keep names inside the compound
+/// statement from leaking out into the surround statements, which would
+/// compromise the template instantiation model. This behavior differs from
+/// Visual C++ (which never introduces a scope), but is a fairly reasonable
+/// approximation of the VC++ behavior.
+class MSDependentExistsStmt : public Stmt {
+ SourceLocation KeywordLoc;
+ bool IsIfExists;
+ NestedNameSpecifierLoc QualifierLoc;
+ DeclarationNameInfo NameInfo;
+ Stmt *SubStmt;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+
+public:
+ MSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ CompoundStmt *SubStmt)
+ : Stmt(MSDependentExistsStmtClass),
+ KeywordLoc(KeywordLoc), IsIfExists(IsIfExists),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo),
+ SubStmt(reinterpret_cast<Stmt *>(SubStmt)) { }
+
+ /// \brief Retrieve the location of the __if_exists or __if_not_exists
+ /// keyword.
+ SourceLocation getKeywordLoc() const { return KeywordLoc; }
+
+ /// \brief Determine whether this is an __if_exists statement.
+ bool isIfExists() const { return IsIfExists; }
+
+ /// \brief Determine whether this is an __if_exists statement.
+ bool isIfNotExists() const { return !IsIfExists; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies this name, if
+ /// any.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the name of the entity we're testing for, along with
+ /// location information
+ DeclarationNameInfo getNameInfo() const { return NameInfo; }
+
+ /// \brief Retrieve the compound statement that will be included in the
+ /// program only if the existence of the symbol matches the initial keyword.
+ CompoundStmt *getSubStmt() const {
+ return reinterpret_cast<CompoundStmt *>(SubStmt);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(KeywordLoc, SubStmt->getLocEnd());
+ }
+
+ child_range children() {
+ return child_range(&SubStmt, &SubStmt+1);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MSDependentExistsStmtClass;
+ }
+
+ static bool classof(MSDependentExistsStmt *) { return true; }
+};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
index 05b50db..b933ed0 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
@@ -90,14 +90,12 @@ public:
StmtIteratorImpl(const VariableArrayType *t) : StmtIteratorBase(t) {}
DERIVED& operator++() {
- if (inDecl() || inDeclGroup()) {
- if (getVAPtr()) NextVA();
- else NextDecl();
- }
- else if (inSizeOfTypeVA())
+ if (inStmt())
+ ++stmt;
+ else if (getVAPtr())
NextVA();
else
- ++stmt;
+ NextDecl();
return static_cast<DERIVED&>(*this);
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
index d996fc5..a321041 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_AST_STMTOBJC_H
#include "clang/AST/Stmt.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
@@ -55,7 +56,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
}
static bool classof(const Stmt *T) {
@@ -103,7 +104,7 @@ public:
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtCatchLoc, Body->getLocEnd());
}
@@ -133,7 +134,7 @@ public:
Stmt *getFinallyBody() { return AtFinallyStmt; }
void setFinallyBody(Stmt *S) { AtFinallyStmt = S; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtFinallyLoc, AtFinallyStmt->getLocEnd());
}
@@ -240,7 +241,7 @@ public:
getStmts()[1 + NumCatchStmts] = S;
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == ObjCAtTryStmtClass;
@@ -294,7 +295,7 @@ public:
}
void setSynchExpr(Stmt *S) { SubStmts[SYNC_EXPR] = S; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtSynchronizedLoc, getSynchBody()->getLocEnd());
}
@@ -327,7 +328,7 @@ public:
SourceLocation getThrowLoc() { return AtThrowLoc; }
void setThrowLoc(SourceLocation Loc) { AtThrowLoc = Loc; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
if (Throw)
return SourceRange(AtThrowLoc, Throw->getLocEnd());
else
@@ -360,7 +361,7 @@ public:
Stmt *getSubStmt() { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(AtLoc, SubStmt->getLocEnd());
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h
index 48a0123..38c4c02 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h
@@ -42,7 +42,6 @@ public:
// below.
if (PTR(BinaryOperator) BinOp = dyn_cast<BinaryOperator>(S)) {
switch (BinOp->getOpcode()) {
- default: llvm_unreachable("Unknown binary operator!");
case BO_PtrMemD: DISPATCH(BinPtrMemD, BinaryOperator);
case BO_PtrMemI: DISPATCH(BinPtrMemI, BinaryOperator);
case BO_Mul: DISPATCH(BinMul, BinaryOperator);
@@ -80,7 +79,6 @@ public:
}
} else if (PTR(UnaryOperator) UnOp = dyn_cast<UnaryOperator>(S)) {
switch (UnOp->getOpcode()) {
- default: llvm_unreachable("Unknown unary operator!");
case UO_PostInc: DISPATCH(UnaryPostInc, UnaryOperator);
case UO_PostDec: DISPATCH(UnaryPostDec, UnaryOperator);
case UO_PreInc: DISPATCH(UnaryPreInc, UnaryOperator);
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
index 371c27a..65f5460 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -15,11 +15,12 @@
#ifndef LLVM_CLANG_AST_TEMPLATEBASE_H
#define LLVM_CLANG_AST_TEMPLATEBASE_H
+#include "clang/AST/Type.h"
+#include "clang/AST/TemplateName.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "clang/AST/Type.h"
-#include "clang/AST/TemplateName.h"
namespace llvm {
class FoldingSetNodeID;
@@ -100,7 +101,6 @@ public:
/// declaration, which is either an external declaration or a
/// template declaration.
TemplateArgument(Decl *D) : Kind(Declaration) {
- // FIXME: Need to be sure we have the "canonical" declaration!
TypeOrValue = reinterpret_cast<uintptr_t>(D);
}
@@ -457,7 +457,7 @@ public:
}
/// \brief - Fetches the full source range of the argument.
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
const TemplateArgument &getArgument() const {
return Argument;
@@ -589,7 +589,42 @@ struct ASTTemplateArgumentListInfo {
bool &ContainsUnexpandedParameterPack);
void copyInto(TemplateArgumentListInfo &List) const;
static std::size_t sizeFor(unsigned NumTemplateArgs);
- static std::size_t sizeFor(const TemplateArgumentListInfo &List);
+};
+
+/// \brief Extends ASTTemplateArgumentListInfo with the source location
+/// information for the template keyword; this is used as part of the
+/// representation of qualified identifiers, such as S<T>::template apply<T>.
+struct ASTTemplateKWAndArgsInfo : public ASTTemplateArgumentListInfo {
+ typedef ASTTemplateArgumentListInfo Base;
+
+ // NOTE: the source location of the (optional) template keyword is
+ // stored after all template arguments.
+
+ /// \brief Get the source location of the template keyword.
+ SourceLocation getTemplateKeywordLoc() const {
+ return *reinterpret_cast<const SourceLocation*>
+ (getTemplateArgs() + NumTemplateArgs);
+ }
+
+ /// \brief Sets the source location of the template keyword.
+ void setTemplateKeywordLoc(SourceLocation TemplateKWLoc) {
+ *reinterpret_cast<SourceLocation*>
+ (getTemplateArgs() + NumTemplateArgs) = TemplateKWLoc;
+ }
+
+ static const ASTTemplateKWAndArgsInfo*
+ Create(ASTContext &C, SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List);
+
+ void initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List);
+ void initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List,
+ bool &Dependent, bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack);
+ void initializeFrom(SourceLocation TemplateKWLoc);
+
+ static std::size_t sizeFor(unsigned NumTemplateArgs);
};
const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
index dd9aa56..7bd367c 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Type.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -95,7 +95,7 @@ namespace clang {
class ExtQualsTypeCommonBase;
struct PrintingPolicy;
- template <typename> class CanQual;
+ template <typename> class CanQual;
typedef CanQual<Type> CanQualType;
// Provide forward declarations for all of the *Type classes
@@ -236,7 +236,7 @@ public:
qs.removeObjCGCAttr();
return qs;
}
- Qualifiers withoutObjCGLifetime() const {
+ Qualifiers withoutObjCLifetime() const {
Qualifiers qs = *this;
qs.removeObjCLifetime();
return qs;
@@ -252,7 +252,8 @@ public:
void removeObjCLifetime() { setObjCLifetime(OCL_None); }
void addObjCLifetime(ObjCLifetime type) {
assert(type);
- setObjCLifetime(type);
+ assert(!hasObjCLifetime());
+ Mask |= (type << LifetimeShift);
}
/// True if the lifetime is neither None or ExplicitNone.
@@ -266,7 +267,7 @@ public:
ObjCLifetime lifetime = getObjCLifetime();
return (lifetime == OCL_Strong || lifetime == OCL_Weak);
}
-
+
bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
unsigned getAddressSpace() const { return Mask >> AddressSpaceShift; }
void setAddressSpace(unsigned space) {
@@ -346,7 +347,7 @@ public:
/// Generally this answers the question of whether an object with the other
/// qualifiers can be safely used as an object with these qualifiers.
bool compatiblyIncludes(Qualifiers other) const {
- return
+ return
// Address spaces must match exactly.
getAddressSpace() == other.getAddressSpace() &&
// ObjC GC qualifiers can match, be added, or be removed, but can't be
@@ -363,24 +364,24 @@ public:
/// qualifiers from the narrow perspective of Objective-C ARC lifetime.
///
/// One set of Objective-C lifetime qualifiers compatibly includes the other
- /// if the lifetime qualifiers match, or if both are non-__weak and the
+ /// if the lifetime qualifiers match, or if both are non-__weak and the
/// including set also contains the 'const' qualifier.
bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
if (getObjCLifetime() == other.getObjCLifetime())
return true;
-
+
if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
return false;
-
+
return hasConst();
}
-
+
bool isSupersetOf(Qualifiers Other) const;
/// \brief Determine whether this set of qualifiers is a strict superset of
/// another set of qualifiers, not considering qualifier compatibility.
bool isStrictSupersetOf(Qualifiers Other) const;
-
+
bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
@@ -397,7 +398,7 @@ public:
L += R;
return L;
}
-
+
Qualifiers &operator-=(Qualifiers R) {
Mask = Mask & ~(R.Mask);
return *this;
@@ -408,7 +409,7 @@ public:
L -= R;
return L;
}
-
+
std::string getAsString() const;
std::string getAsString(const PrintingPolicy &Policy) const {
std::string Buffer;
@@ -447,7 +448,32 @@ enum CallingConv {
CC_AAPCS_VFP // __attribute__((pcs("aapcs-vfp")))
};
-typedef std::pair<const Type*, Qualifiers> SplitQualType;
+/// A std::pair-like structure for storing a qualified type split
+/// into its local qualifiers and its locally-unqualified type.
+struct SplitQualType {
+ /// The locally-unqualified type.
+ const Type *Ty;
+
+ /// The local qualifiers.
+ Qualifiers Quals;
+
+ SplitQualType() : Ty(0), Quals() {}
+ SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
+
+ SplitQualType getSingleStepDesugaredType() const; // end of this file
+
+ // Make llvm::tie work.
+ operator std::pair<const Type *,Qualifiers>() const {
+ return std::pair<const Type *,Qualifiers>(Ty, Quals);
+ }
+
+ friend bool operator==(SplitQualType a, SplitQualType b) {
+ return a.Ty == b.Ty && a.Quals == b.Quals;
+ }
+ friend bool operator!=(SplitQualType a, SplitQualType b) {
+ return a.Ty != b.Ty || a.Quals != b.Quals;
+ }
+};
/// QualType - For efficiency, we don't store CV-qualified types as nodes on
/// their own: instead each reference to a type stores the qualifiers. This
@@ -501,7 +527,7 @@ public:
/// This function requires that the type not be NULL. If the type might be
/// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
const Type *getTypePtr() const;
-
+
const Type *getTypePtrOrNull() const;
/// Retrieves a pointer to the name of the base type.
@@ -534,27 +560,27 @@ public:
return Value.getPointer().isNull();
}
- /// \brief Determine whether this particular QualType instance has the
+ /// \brief Determine whether this particular QualType instance has the
/// "const" qualifier set, without looking through typedefs that may have
/// added "const" at a different level.
bool isLocalConstQualified() const {
return (getLocalFastQualifiers() & Qualifiers::Const);
}
-
+
/// \brief Determine whether this type is const-qualified.
bool isConstQualified() const;
-
- /// \brief Determine whether this particular QualType instance has the
+
+ /// \brief Determine whether this particular QualType instance has the
/// "restrict" qualifier set, without looking through typedefs that may have
/// added "restrict" at a different level.
bool isLocalRestrictQualified() const {
return (getLocalFastQualifiers() & Qualifiers::Restrict);
}
-
+
/// \brief Determine whether this type is restrict-qualified.
bool isRestrictQualified() const;
-
- /// \brief Determine whether this particular QualType instance has the
+
+ /// \brief Determine whether this particular QualType instance has the
/// "volatile" qualifier set, without looking through typedefs that may have
/// added "volatile" at a different level.
bool isLocalVolatileQualified() const {
@@ -563,9 +589,9 @@ public:
/// \brief Determine whether this type is volatile-qualified.
bool isVolatileQualified() const;
-
+
/// \brief Determine whether this particular QualType instance has any
- /// qualifiers, without looking through any typedefs that might add
+ /// qualifiers, without looking through any typedefs that might add
/// qualifiers at a different level.
bool hasLocalQualifiers() const {
return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
@@ -573,7 +599,7 @@ public:
/// \brief Determine whether this type has any qualifiers.
bool hasQualifiers() const;
-
+
/// \brief Determine whether this particular QualType instance has any
/// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
/// instance.
@@ -588,15 +614,15 @@ public:
/// \brief Retrieve the set of qualifiers applied to this type.
Qualifiers getQualifiers() const;
-
- /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
+
+ /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
/// local to this particular QualType instance, not including any qualifiers
/// acquired through typedefs or other sugar.
unsigned getLocalCVRQualifiers() const {
return getLocalFastQualifiers();
}
- /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
+ /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
/// applied to this type.
unsigned getCVRQualifiers() const;
@@ -612,10 +638,10 @@ public:
/// compilation's language.
/// (C++0x [basic.types]p9)
bool isCXX11PODType(ASTContext &Context) const;
-
+
/// isTrivialType - Return true if this is a trivial type
/// (C++0x [basic.types]p9)
- bool isTrivialType(ASTContext &Context) const;
+ bool isTrivialType(ASTContext &Context) const;
/// isTriviallyCopyableType - Return true if this is a trivially
/// copyable type (C++0x [basic.types]p9)
@@ -624,7 +650,7 @@ public:
// Don't promise in the API that anything besides 'const' can be
// easily added.
- /// addConst - add the specified type qualifier to this QualType.
+ /// addConst - add the specified type qualifier to this QualType.
void addConst() {
addFastQualifiers(Qualifiers::Const);
}
@@ -632,13 +658,21 @@ public:
return withFastQualifiers(Qualifiers::Const);
}
- /// addVolatile - add the specified type qualifier to this QualType.
+ /// addVolatile - add the specified type qualifier to this QualType.
void addVolatile() {
addFastQualifiers(Qualifiers::Volatile);
}
QualType withVolatile() const {
return withFastQualifiers(Qualifiers::Volatile);
}
+
+ /// Add the restrict qualifier to this QualType.
+ void addRestrict() {
+ addFastQualifiers(Qualifiers::Restrict);
+ }
+ QualType withRestrict() const {
+ return withFastQualifiers(Qualifiers::Restrict);
+ }
QualType withCVRQualifiers(unsigned CVR) const {
return withFastQualifiers(CVR);
@@ -719,7 +753,7 @@ public:
/// type. To strip qualifiers even from within an array type, use
/// ASTContext::getUnqualifiedArrayType.
inline SplitQualType getSplitUnqualifiedType() const;
-
+
/// \brief Determine whether this type is more qualified than the other
/// given type, requiring exact equality for non-CVR qualifiers.
bool isMoreQualifiedThan(QualType Other) const;
@@ -727,19 +761,19 @@ public:
/// \brief Determine whether this type is at least as qualified as the other
/// given type, requiring exact equality for non-CVR qualifiers.
bool isAtLeastAsQualifiedAs(QualType Other) const;
-
+
QualType getNonReferenceType() const;
/// \brief Determine the type of a (typically non-lvalue) expression with the
/// specified result type.
- ///
+ ///
/// This routine should be used for expressions for which the return type is
/// explicitly specified (e.g., in a cast or call) and isn't necessarily
- /// an lvalue. It removes a top-level reference (since there are no
+ /// an lvalue. It removes a top-level reference (since there are no
/// expressions of reference type) and deletes top-level cvr-qualifiers
/// from non-class types (in C++) or all types (in C).
QualType getNonLValueExprType(ASTContext &Context) const;
-
+
/// getDesugaredType - Return the specified type with any "sugar" removed from
/// the type. This takes off typedefs, typeof's etc. If the outer level of
/// the type is already concrete, it returns it unmodified. This is similar
@@ -757,12 +791,14 @@ public:
}
/// \brief Return the specified type with one level of "sugar" removed from
- /// the type.
+ /// the type.
///
/// This routine takes off the first typedef, typeof, etc. If the outer level
/// of the type is already concrete, it returns it unmodified.
- QualType getSingleStepDesugaredType(const ASTContext &Context) const;
-
+ QualType getSingleStepDesugaredType(const ASTContext &Context) const {
+ return getSingleStepDesugaredTypeImpl(*this, Context);
+ }
+
/// IgnoreParens - Returns the specified type after dropping any
/// outer-level parentheses.
QualType IgnoreParens() const {
@@ -783,7 +819,7 @@ public:
return getAsString(split());
}
static std::string getAsString(SplitQualType split) {
- return getAsString(split.first, split.second);
+ return getAsString(split.Ty, split.Quals);
}
static std::string getAsString(const Type *ty, Qualifiers qs);
@@ -798,7 +834,7 @@ public:
}
static void getAsStringInternal(SplitQualType split, std::string &out,
const PrintingPolicy &policy) {
- return getAsStringInternal(split.first, split.second, out, policy);
+ return getAsStringInternal(split.Ty, split.Quals, out, policy);
}
static void getAsStringInternal(const Type *ty, Qualifiers qs,
std::string &out,
@@ -855,7 +891,7 @@ public:
return isDestructedTypeImpl(*this);
}
- /// \brief Determine whether expressions of the given type are forbidden
+ /// \brief Determine whether expressions of the given type are forbidden
/// from being lvalues in C.
///
/// The expression types that are forbidden to be lvalues are:
@@ -870,7 +906,7 @@ public:
/// \brief Determine whether this type has trivial copy/move-assignment
/// semantics.
bool hasTrivialAssignment(ASTContext &Context, bool Copying) const;
-
+
private:
// These methods are implemented in a separate translation unit;
// "static"-ize them to avoid creating temporary QualTypes in the
@@ -879,6 +915,8 @@ private:
static QualType getDesugaredType(QualType T, const ASTContext &Context);
static SplitQualType getSplitDesugaredType(QualType T);
static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
+ static QualType getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &C);
static QualType IgnoreParens(QualType T);
static DestructionKind isDestructedTypeImpl(QualType type);
};
@@ -915,7 +953,7 @@ public:
namespace clang {
-/// \brief Base class that is common to both the \c ExtQuals and \c Type
+/// \brief Base class that is common to both the \c ExtQuals and \c Type
/// classes, which allows \c QualType to access the common fields between the
/// two.
///
@@ -926,7 +964,7 @@ class ExtQualsTypeCommonBase {
/// \brief The "base" type of an extended qualifiers type (\c ExtQuals) or
/// a self-referential pointer (for \c Type).
///
- /// This pointer allows an efficient mapping from a QualType to its
+ /// This pointer allows an efficient mapping from a QualType to its
/// underlying type pointer.
const Type *const BaseType;
@@ -937,14 +975,14 @@ class ExtQualsTypeCommonBase {
friend class Type;
friend class ExtQuals;
};
-
+
/// ExtQuals - We can encode up to four bits in the low bits of a
/// type pointer, but there are many more type qualifiers that we want
/// to be able to apply to an arbitrary type. Therefore we have this
/// struct, intended to be heap-allocated and used by QualType to
/// store qualifiers.
///
-/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
+/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
/// in three low bits on the QualType pointer; a fourth bit records whether
/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
/// Objective-C GC attributes) are much more rare.
@@ -969,7 +1007,7 @@ class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
ExtQuals *this_() { return this; }
public:
- ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
+ ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
: ExtQualsTypeCommonBase(baseType,
canon.isNull() ? QualType(this_(), 0) : canon),
Quals(quals)
@@ -1008,8 +1046,8 @@ public:
}
};
-/// \brief The kind of C++0x ref-qualifier associated with a function type,
-/// which determines whether a member function's "this" object can be an
+/// \brief The kind of C++0x ref-qualifier associated with a function type,
+/// which determines whether a member function's "this" object can be an
/// lvalue, rvalue, or neither.
enum RefQualifierKind {
/// \brief No ref-qualifier was provided.
@@ -1019,7 +1057,7 @@ enum RefQualifierKind {
/// \brief An rvalue ref-qualifier was provided (\c &&).
RQ_RValue
};
-
+
/// Type - This is the base class of the type hierarchy. A central concept
/// with types is that each type always has a canonical type. A canonical type
/// is the type with any typedef names stripped out of it or the types it
@@ -1071,29 +1109,32 @@ private:
/// Note that this should stay at the end of the ivars for Type so that
/// subclasses can pack their bitfields into the same word.
unsigned Dependent : 1;
-
- /// \brief Whether this type somehow involves a template parameter, even
+
+ /// \brief Whether this type somehow involves a template parameter, even
/// if the resolution of the type does not depend on a template parameter.
unsigned InstantiationDependent : 1;
-
+
/// \brief Whether this type is a variably-modified type (C99 6.7.5).
unsigned VariablyModified : 1;
/// \brief Whether this type contains an unexpanded parameter pack
/// (for C++0x variadic templates).
unsigned ContainsUnexpandedParameterPack : 1;
-
+
/// \brief Nonzero if the cache (i.e. the bitfields here starting
/// with 'Cache') is valid. If so, then this is a
/// LangOptions::VisibilityMode+1.
mutable unsigned CacheValidAndVisibility : 2;
-
+
+ /// \brief True if the visibility was set explicitly in the source code.
+ mutable unsigned CachedExplicitVisibility : 1;
+
/// \brief Linkage of this type.
mutable unsigned CachedLinkage : 2;
- /// \brief Whether this type involves and local or unnamed types.
+ /// \brief Whether this type involves and local or unnamed types.
mutable unsigned CachedLocalOrUnnamed : 1;
-
+
/// \brief FromAST - Whether this type comes from an AST file.
mutable unsigned FromAST : 1;
@@ -1104,6 +1145,10 @@ private:
assert(isCacheValid() && "getting linkage from invalid cache");
return static_cast<Visibility>(CacheValidAndVisibility-1);
}
+ bool isVisibilityExplicit() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return CachedExplicitVisibility;
+ }
Linkage getLinkage() const {
assert(isCacheValid() && "getting linkage from invalid cache");
return static_cast<Linkage>(CachedLinkage);
@@ -1113,7 +1158,7 @@ private:
return CachedLocalOrUnnamed;
}
};
- enum { NumTypeBits = 18 };
+ enum { NumTypeBits = 19 };
protected:
// These classes allow subclasses to somewhat cleanly pack bitfields
@@ -1152,9 +1197,6 @@ protected:
/// regparm and the calling convention.
unsigned ExtInfo : 8;
- /// Whether the function is variadic. Only used by FunctionProtoType.
- unsigned Variadic : 1;
-
/// TypeQuals - Used only by FunctionProtoType, put here to pack with the
/// other bitfields.
/// The qualifiers are part of FunctionProtoType because...
@@ -1162,7 +1204,7 @@ protected:
/// C++ 8.3.5p4: The return type, the parameter type list and the
/// cv-qualifier-seq, [...], are part of the function type.
unsigned TypeQuals : 3;
-
+
/// \brief The ref-qualifier associated with a \c FunctionProtoType.
///
/// This is a value of type \c RefQualifierKind.
@@ -1247,7 +1289,7 @@ protected:
private:
/// \brief Set whether this type comes from an AST file.
- void setFromAST(bool V = true) const {
+ void setFromAST(bool V = true) const {
TypeBits.FromAST = V;
}
@@ -1256,7 +1298,7 @@ private:
protected:
// silence VC++ warning C4355: 'this' : used in base member initializer list
Type *this_() { return this; }
- Type(TypeClass tc, QualType canon, bool Dependent,
+ Type(TypeClass tc, QualType canon, bool Dependent,
bool InstantiationDependent, bool VariablyModified,
bool ContainsUnexpandedParameterPack)
: ExtQualsTypeCommonBase(this,
@@ -1267,20 +1309,21 @@ protected:
TypeBits.VariablyModified = VariablyModified;
TypeBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
TypeBits.CacheValidAndVisibility = 0;
+ TypeBits.CachedExplicitVisibility = false;
TypeBits.CachedLocalOrUnnamed = false;
TypeBits.CachedLinkage = NoLinkage;
TypeBits.FromAST = false;
}
friend class ASTContext;
- void setDependent(bool D = true) {
- TypeBits.Dependent = D;
+ void setDependent(bool D = true) {
+ TypeBits.Dependent = D;
if (D)
TypeBits.InstantiationDependent = true;
}
- void setInstantiationDependent(bool D = true) {
+ void setInstantiationDependent(bool D = true) {
TypeBits.InstantiationDependent = D; }
- void setVariablyModified(bool VM = true) { TypeBits.VariablyModified = VM;
+ void setVariablyModified(bool VM = true) { TypeBits.VariablyModified = VM;
}
void setContainsUnexpandedParameterPack(bool PP = true) {
TypeBits.ContainsUnexpandedParameterPack = PP;
@@ -1306,8 +1349,8 @@ public:
/// };
/// \endcode
///
- /// Note that this routine does not specify which
- bool containsUnexpandedParameterPack() const {
+ /// Note that this routine does not specify which
+ bool containsUnexpandedParameterPack() const {
return TypeBits.ContainsUnexpandedParameterPack;
}
@@ -1317,6 +1360,11 @@ public:
return CanonicalType == QualType(this, 0);
}
+ /// Pull a single level of sugar off of this locally-unqualified type.
+ /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
+ /// or QualType::getSingleStepDesugaredType(const ASTContext&).
+ QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
+
/// Types are partitioned into 3 broad categories (C99 6.2.5p1):
/// object types, function types, and incomplete types.
@@ -1324,18 +1372,22 @@ public:
/// A type that can describe objects, but which lacks information needed to
/// determine its size (e.g. void, or a fwd declared struct). Clients of this
/// routine will need to determine if the size is actually required.
- bool isIncompleteType() const;
+ ///
+ /// \brief Def If non-NULL, and the type refers to some kind of declaration
+ /// that can be completed (such as a C struct, C++ class, or Objective-C
+ /// class), will be set to the declaration.
+ bool isIncompleteType(NamedDecl **Def = 0) const;
/// isIncompleteOrObjectType - Return true if this is an incomplete or object
/// type, in other words, not a function type.
bool isIncompleteOrObjectType() const {
return !isFunctionType();
}
-
+
/// \brief Determine whether this type is an object type.
bool isObjectType() const {
// C++ [basic.types]p8:
- // An object type is a (possibly cv-qualified) type that is not a
+ // An object type is a (possibly cv-qualified) type that is not a
// function type, not a reference type, and not a void type.
return !isReferenceType() && !isFunctionType() && !isVoidType();
}
@@ -1367,6 +1419,10 @@ public:
/// isSpecificPlaceholderType - Test for a specific placeholder type.
bool isSpecificPlaceholderType(unsigned K) const;
+ /// isNonOverloadPlaceholderType - Test for a placeholder type
+ /// other than Overload; see BuiltinType::isNonOverloadPlaceholderType.
+ bool isNonOverloadPlaceholderType() const;
+
/// isIntegerType() does *not* include complex integers (a GCC extension).
/// isComplexIntegerType() can be used to test for complex integers.
bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
@@ -1378,13 +1434,13 @@ public:
bool isChar32Type() const;
bool isAnyCharacterType() const;
bool isIntegralType(ASTContext &Ctx) const;
-
+
/// \brief Determine whether this type is an integral or enumeration type.
bool isIntegralOrEnumerationType() const;
/// \brief Determine whether this type is an integral or unscoped enumeration
/// type.
bool isIntegralOrUnscopedEnumerationType() const;
-
+
/// Floating point categories.
bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
/// isComplexType() does *not* include complex integers (a GCC extension).
@@ -1451,7 +1507,7 @@ public:
bool isCARCBridgableType() const;
bool isTemplateTypeParmType() const; // C++ template type parameter
bool isNullPtrType() const; // C++0x nullptr_t
- bool isAtomicType() const; // C1X _Atomic()
+ bool isAtomicType() const; // C11 _Atomic()
/// Determines if this type, which must satisfy
/// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
@@ -1479,32 +1535,32 @@ public:
/// that its definition somehow depends on a template parameter
/// (C++ [temp.dep.type]).
bool isDependentType() const { return TypeBits.Dependent; }
-
+
/// \brief Determine whether this type is an instantiation-dependent type,
/// meaning that the type involves a template parameter (even if the
/// definition does not actually depend on the type substituted for that
/// template parameter).
- bool isInstantiationDependentType() const {
- return TypeBits.InstantiationDependent;
+ bool isInstantiationDependentType() const {
+ return TypeBits.InstantiationDependent;
}
-
+
/// \brief Whether this type is a variably-modified type (C99 6.7.5).
bool isVariablyModifiedType() const { return TypeBits.VariablyModified; }
/// \brief Whether this type involves a variable-length array type
/// with a definite size.
bool hasSizedVLAType() const;
-
+
/// \brief Whether this type is or contains a local or unnamed type.
bool hasUnnamedOrLocalType() const;
-
+
bool isOverloadableType() const;
/// \brief Determine wither this type is a C++ elaborated-type-specifier.
bool isElaboratedTypeSpecifier() const;
bool canDecayToPointerType() const;
-
+
/// hasPointerRepresentation - Whether this type is represented
/// natively as a pointer; this includes pointers, references, block
/// pointers, and Objective-C interface, qualified id, and qualified
@@ -1547,7 +1603,7 @@ public:
const CXXRecordDecl *getCXXRecordDeclForPointerType() const;
/// \brief Retrieves the CXXRecordDecl that this type refers to, either
- /// because the type is a RecordType or because it is the injected-class-name
+ /// because the type is a RecordType or because it is the injected-class-name
/// type of a class template or class template partial specialization.
CXXRecordDecl *getAsCXXRecordDecl() const;
@@ -1555,7 +1611,7 @@ public:
/// an initializer of this type. This looks through declarators like pointer
/// types, but not through decltype or typedefs.
AutoType *getContainedAutoType() const;
-
+
/// Member-template getAs<specific type>'. Look through sugar for
/// an instance of <specific type>. This scheme will eventually
/// replace the specific getAsXXXX methods above.
@@ -1608,15 +1664,15 @@ public:
bool isSignedIntegerType() const;
/// isUnsignedIntegerType - Return true if this is an integer type that is
- /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
+ /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
/// or an enum decl which has an unsigned representation.
bool isUnsignedIntegerType() const;
- /// Determines whether this is an integer type that is signed or an
+ /// Determines whether this is an integer type that is signed or an
/// enumeration types whose underlying type is a signed integer type.
bool isSignedIntegerOrEnumerationType() const;
-
- /// Determines whether this is an integer type that is unsigned or an
+
+ /// Determines whether this is an integer type that is unsigned or an
/// enumeration types whose underlying type is a unsigned integer type.
bool isUnsignedIntegerOrEnumerationType() const;
@@ -1635,19 +1691,22 @@ public:
/// \brief Determine the visibility of this type.
Visibility getVisibility() const;
+ /// \brief Return true if the visibility was explicitly set is the code.
+ bool isVisibilityExplicit() const;
+
/// \brief Determine the linkage and visibility of this type.
std::pair<Linkage,Visibility> getLinkageAndVisibility() const;
-
+
/// \brief Note that the linkage is no longer known.
void ClearLinkageCache();
-
+
const char *getTypeClassName() const;
QualType getCanonicalTypeInternal() const {
return CanonicalType;
}
CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
- void dump() const;
+ LLVM_ATTRIBUTE_USED void dump() const;
static bool classof(const Type *) { return true; }
@@ -1677,81 +1736,9 @@ template <> inline const Class##Type *Type::castAs() const { \
class BuiltinType : public Type {
public:
enum Kind {
- Void,
-
- Bool, // This is bool and/or _Bool.
- Char_U, // This is 'char' for targets where char is unsigned.
- UChar, // This is explicitly qualified unsigned char.
- WChar_U, // This is 'wchar_t' for C++, when unsigned.
- Char16, // This is 'char16_t' for C++.
- Char32, // This is 'char32_t' for C++.
- UShort,
- UInt,
- ULong,
- ULongLong,
- UInt128, // __uint128_t
-
- Char_S, // This is 'char' for targets where char is signed.
- SChar, // This is explicitly qualified signed char.
- WChar_S, // This is 'wchar_t' for C++, when signed.
- Short,
- Int,
- Long,
- LongLong,
- Int128, // __int128_t
-
- Half, // This is the 'half' type in OpenCL,
- // __fp16 in case of ARM NEON.
- Float, Double, LongDouble,
-
- NullPtr, // This is the type of C++0x 'nullptr'.
-
- /// The primitive Objective C 'id' type. The user-visible 'id'
- /// type is a typedef of an ObjCObjectPointerType to an
- /// ObjCObjectType with this as its base. In fact, this only ever
- /// shows up in an AST as the base type of an ObjCObjectType.
- ObjCId,
-
- /// The primitive Objective C 'Class' type. The user-visible
- /// 'Class' type is a typedef of an ObjCObjectPointerType to an
- /// ObjCObjectType with this as its base. In fact, this only ever
- /// shows up in an AST as the base type of an ObjCObjectType.
- ObjCClass,
-
- /// The primitive Objective C 'SEL' type. The user-visible 'SEL'
- /// type is a typedef of a PointerType to this.
- ObjCSel,
-
- /// This represents the type of an expression whose type is
- /// totally unknown, e.g. 'T::foo'. It is permitted for this to
- /// appear in situations where the structure of the type is
- /// theoretically deducible.
- Dependent,
-
- /// The type of an unresolved overload set. A placeholder type.
- /// Expressions with this type have one of the following basic
- /// forms, with parentheses generally permitted:
- /// foo # possibly qualified, not if an implicit access
- /// foo # possibly qualified, not if an implicit access
- /// &foo # possibly qualified, not if an implicit access
- /// x->foo # only if might be a static member function
- /// &x->foo # only if might be a static member function
- /// &Class::foo # when a pointer-to-member; sub-expr also has this type
- /// OverloadExpr::find can be used to analyze the expression.
- Overload,
-
- /// The type of a bound C++ non-static member function.
- /// A placeholder type. Expressions with this type have one of the
- /// following basic forms:
- /// foo # if an implicit access
- /// x->foo # if only contains non-static members
- BoundMember,
-
- /// __builtin_any_type. A placeholder type. Useful for clients
- /// like debuggers that don't know what type to give something.
- /// Only a small number of operations are valid on expressions of
- /// unknown type, most notably explicit casts.
- UnknownAny
+#define BUILTIN_TYPE(Id, SingletonId) Id,
+#define LAST_BUILTIN_TYPE(Id) LastKind = Id
+#include "clang/AST/BuiltinTypes.def"
};
public:
@@ -1785,11 +1772,29 @@ public:
return getKind() >= Half && getKind() <= LongDouble;
}
+ /// Determines whether the given kind corresponds to a placeholder type.
+ static bool isPlaceholderTypeKind(Kind K) {
+ return K >= Overload;
+ }
+
/// Determines whether this type is a placeholder type, i.e. a type
/// which cannot appear in arbitrary positions in a fully-formed
/// expression.
bool isPlaceholderType() const {
- return getKind() >= Overload;
+ return isPlaceholderTypeKind(getKind());
+ }
+
+ /// Determines whether this type is a placeholder type other than
+ /// Overload. Most placeholder types require only syntactic
+ /// information about their context in order to be resolved (e.g.
+ /// whether it is a call expression), which means they can (and
+ /// should) be resolved in an earlier "phase" of analysis.
+ /// Overload expressions sometimes pick up further information
+ /// from their context, like whether the context expects a
+ /// specific function-pointer type, and so frequently need
+ /// special treatment.
+ bool isNonOverloadPlaceholderType() const {
+ return getKind() > Overload;
}
static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
@@ -1868,7 +1873,7 @@ class PointerType : public Type, public llvm::FoldingSetNode {
Type(Pointer, CanonicalPtr, Pointee->isDependentType(),
Pointee->isInstantiationDependentType(),
Pointee->isVariablyModifiedType(),
- Pointee->containsUnexpandedParameterPack()),
+ Pointee->containsUnexpandedParameterPack()),
PointeeType(Pointee) {
}
friend class ASTContext; // ASTContext creates these.
@@ -1905,7 +1910,7 @@ class BlockPointerType : public Type, public llvm::FoldingSetNode {
PointeeType(Pointee) {
}
friend class ASTContext; // ASTContext creates these.
-
+
public:
// Get the pointee type. Pointee is required to always be a function type.
@@ -1938,17 +1943,17 @@ protected:
Type(tc, CanonicalRef, Referencee->isDependentType(),
Referencee->isInstantiationDependentType(),
Referencee->isVariablyModifiedType(),
- Referencee->containsUnexpandedParameterPack()),
- PointeeType(Referencee)
+ Referencee->containsUnexpandedParameterPack()),
+ PointeeType(Referencee)
{
ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
}
-
+
public:
bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
-
+
QualType getPointeeTypeAsWritten() const { return PointeeType; }
QualType getPointeeType() const {
// FIXME: this might strip inner qualifiers; okay?
@@ -2021,15 +2026,15 @@ class MemberPointerType : public Type, public llvm::FoldingSetNode {
MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr) :
Type(MemberPointer, CanonicalPtr,
Cls->isDependentType() || Pointee->isDependentType(),
- (Cls->isInstantiationDependentType() ||
+ (Cls->isInstantiationDependentType() ||
Pointee->isInstantiationDependentType()),
Pointee->isVariablyModifiedType(),
- (Cls->containsUnexpandedParameterPack() ||
+ (Cls->containsUnexpandedParameterPack() ||
Pointee->containsUnexpandedParameterPack())),
PointeeType(Pointee), Class(Cls) {
}
friend class ASTContext; // ASTContext creates these.
-
+
public:
QualType getPointeeType() const { return PointeeType; }
@@ -2136,7 +2141,7 @@ class ConstantArrayType : public ArrayType {
protected:
ConstantArrayType(TypeClass tc, QualType et, QualType can,
const llvm::APInt &size, ArraySizeModifier sm, unsigned tq)
- : ArrayType(tc, et, can, sm, tq, et->containsUnexpandedParameterPack()),
+ : ArrayType(tc, et, can, sm, tq, et->containsUnexpandedParameterPack()),
Size(size) {}
friend class ASTContext; // ASTContext creates these.
public:
@@ -2144,17 +2149,17 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
-
+
/// \brief Determine the number of bits required to address a member of
// an array with the given element type and number of elements.
static unsigned getNumAddressingBits(ASTContext &Context,
QualType ElementType,
const llvm::APInt &NumElements);
-
+
/// \brief Determine the maximum number of active bits that an array's size
/// can require, which limits the maximum size of the array.
static unsigned getMaxSizeBits(ASTContext &Context);
-
+
void Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getElementType(), getSize(),
getSizeModifier(), getIndexTypeCVRQualifiers());
@@ -2180,7 +2185,7 @@ class IncompleteArrayType : public ArrayType {
IncompleteArrayType(QualType et, QualType can,
ArraySizeModifier sm, unsigned tq)
- : ArrayType(IncompleteArray, et, can, sm, tq,
+ : ArrayType(IncompleteArray, et, can, sm, tq,
et->containsUnexpandedParameterPack()) {}
friend class ASTContext; // ASTContext creates these.
public:
@@ -2232,7 +2237,7 @@ class VariableArrayType : public ArrayType {
VariableArrayType(QualType et, QualType can, Expr *e,
ArraySizeModifier sm, unsigned tq,
SourceRange brackets)
- : ArrayType(VariableArray, et, can, sm, tq,
+ : ArrayType(VariableArray, et, can, sm, tq,
et->containsUnexpandedParameterPack()),
SizeExpr((Stmt*) e), Brackets(brackets) {}
friend class ASTContext; // ASTContext creates these.
@@ -2388,12 +2393,12 @@ protected:
VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorKind vecKind);
-
+
VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind);
friend class ASTContext; // ASTContext creates these.
-
+
public:
QualType getElementType() const { return ElementType; }
@@ -2559,7 +2564,7 @@ class FunctionType : public Type {
bool getNoReturn() const { return Bits & NoReturnMask; }
bool getProducesResult() const { return Bits & ProducesResultMask; }
bool getHasRegParm() const { return (Bits >> RegParmOffset) != 0; }
- unsigned getRegParm() const {
+ unsigned getRegParm() const {
unsigned RegParm = Bits >> RegParmOffset;
if (RegParm > 0)
--RegParm;
@@ -2607,23 +2612,21 @@ class FunctionType : public Type {
};
protected:
- FunctionType(TypeClass tc, QualType res, bool variadic,
+ FunctionType(TypeClass tc, QualType res,
unsigned typeQuals, RefQualifierKind RefQualifier,
QualType Canonical, bool Dependent,
bool InstantiationDependent,
- bool VariablyModified, bool ContainsUnexpandedParameterPack,
+ bool VariablyModified, bool ContainsUnexpandedParameterPack,
ExtInfo Info)
- : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
- ContainsUnexpandedParameterPack),
+ : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
+ ContainsUnexpandedParameterPack),
ResultType(res) {
FunctionTypeBits.ExtInfo = Info.Bits;
- FunctionTypeBits.Variadic = variadic;
FunctionTypeBits.TypeQuals = typeQuals;
FunctionTypeBits.RefQualifier = static_cast<unsigned>(RefQualifier);
}
- bool isVariadic() const { return FunctionTypeBits.Variadic; }
unsigned getTypeQuals() const { return FunctionTypeBits.TypeQuals; }
-
+
RefQualifierKind getRefQualifier() const {
return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
}
@@ -2640,7 +2643,7 @@ public:
/// \brief Determine the type of an expression that calls a function of
/// this type.
- QualType getCallResultType(ASTContext &Context) const {
+ QualType getCallResultType(ASTContext &Context) const {
return getResultType().getNonLValueExprType(Context);
}
@@ -2657,13 +2660,13 @@ public:
/// no information available about its arguments.
class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
- : FunctionType(FunctionNoProto, Result, false, 0, RQ_None, Canonical,
+ : FunctionType(FunctionNoProto, Result, 0, RQ_None, Canonical,
/*Dependent=*/false, /*InstantiationDependent=*/false,
- Result->isVariablyModifiedType(),
+ Result->isVariablyModifiedType(),
/*ContainsUnexpandedParameterPack=*/false, Info) {}
friend class ASTContext; // ASTContext creates these.
-
+
public:
// No additional state past what FunctionType provides.
@@ -2695,34 +2698,26 @@ public:
/// ExtProtoInfo - Extra information about a function prototype.
struct ExtProtoInfo {
ExtProtoInfo() :
- Variadic(false), ExceptionSpecType(EST_None), TypeQuals(0),
- RefQualifier(RQ_None), NumExceptions(0), Exceptions(0), NoexceptExpr(0),
- ConsumedArguments(0) {}
+ Variadic(false), HasTrailingReturn(false), TypeQuals(0),
+ ExceptionSpecType(EST_None), RefQualifier(RQ_None),
+ NumExceptions(0), Exceptions(0), NoexceptExpr(0), ConsumedArguments(0) {}
FunctionType::ExtInfo ExtInfo;
- bool Variadic;
- ExceptionSpecificationType ExceptionSpecType;
+ bool Variadic : 1;
+ bool HasTrailingReturn : 1;
unsigned char TypeQuals;
+ ExceptionSpecificationType ExceptionSpecType;
RefQualifierKind RefQualifier;
unsigned NumExceptions;
-
- /// Exceptions - A variable size array after that holds the exception types.
const QualType *Exceptions;
-
- /// NoexceptExpr - Instead of Exceptions, there may be a single Expr*
- /// pointing to the expression in the noexcept() specifier.
Expr *NoexceptExpr;
-
- /// ConsumedArgs - A variable size array, following Exceptions
- /// and of length NumArgs, holding flags indicating which arguments
- /// are consumed. This only appears if HasAnyConsumedArgs is true.
const bool *ConsumedArguments;
};
private:
/// \brief Determine whether there are any argument types that
/// contain an unexpanded parameter pack.
- static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
+ static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
unsigned numArgs) {
for (unsigned Idx = 0; Idx < numArgs; ++Idx)
if (ArgArray[Idx]->containsUnexpandedParameterPack())
@@ -2735,7 +2730,7 @@ private:
QualType canonical, const ExtProtoInfo &epi);
/// NumArgs - The number of arguments this function has, not counting '...'.
- unsigned NumArgs : 19;
+ unsigned NumArgs : 17;
/// NumExceptions - The number of types in the exception spec, if any.
unsigned NumExceptions : 9;
@@ -2746,6 +2741,25 @@ private:
/// HasAnyConsumedArgs - Whether this function has any consumed arguments.
unsigned HasAnyConsumedArgs : 1;
+ /// Variadic - Whether the function is variadic.
+ unsigned Variadic : 1;
+
+ /// HasTrailingReturn - Whether this function has a trailing return type.
+ unsigned HasTrailingReturn : 1;
+
+ // ArgInfo - There is an variable size array after the class in memory that
+ // holds the argument types.
+
+ // Exceptions - There is another variable size array after ArgInfo that
+ // holds the exception types.
+
+ // NoexceptExpr - Instead of Exceptions, there may be a single Expr* pointing
+ // to the expression in the noexcept() specifier.
+
+ // ConsumedArgs - A variable size array, following Exceptions
+ // and of length NumArgs, holding flags indicating which arguments
+ // are consumed. This only appears if HasAnyConsumedArgs is true.
+
friend class ASTContext; // ASTContext creates these.
const bool *getConsumedArgsBuffer() const {
@@ -2772,6 +2786,7 @@ public:
ExtProtoInfo EPI;
EPI.ExtInfo = getExtInfo();
EPI.Variadic = isVariadic();
+ EPI.HasTrailingReturn = hasTrailingReturn();
EPI.ExceptionSpecType = getExceptionSpecType();
EPI.TypeQuals = static_cast<unsigned char>(getTypeQuals());
EPI.RefQualifier = getRefQualifier();
@@ -2833,24 +2848,26 @@ public:
return getNoexceptSpec(Ctx) == NR_Nothrow;
}
- using FunctionType::isVariadic;
+ bool isVariadic() const { return Variadic; }
/// \brief Determines whether this function prototype contains a
/// parameter pack at the end.
///
/// A function template whose last parameter is a parameter pack can be
/// called with an arbitrary number of arguments, much like a variadic
- /// function. However,
+ /// function.
bool isTemplateVariadic() const;
-
+
+ bool hasTrailingReturn() const { return HasTrailingReturn; }
+
unsigned getTypeQuals() const { return FunctionType::getTypeQuals(); }
-
+
/// \brief Retrieve the ref-qualifier associated with this function type.
RefQualifierKind getRefQualifier() const {
return FunctionType::getRefQualifier();
}
-
+
typedef const QualType *arg_type_iterator;
arg_type_iterator arg_type_begin() const {
return reinterpret_cast<const QualType *>(this+1);
@@ -2881,6 +2898,9 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
+ void printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const;
+
static bool classof(const Type *T) {
return T->getTypeClass() == FunctionProto;
}
@@ -2901,7 +2921,7 @@ class UnresolvedUsingType : public Type {
UnresolvedUsingTypenameDecl *Decl;
UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
- : Type(UnresolvedUsing, QualType(), true, true, false,
+ : Type(UnresolvedUsing, QualType(), true, true, false,
/*ContainsUnexpandedParameterPack=*/false),
Decl(const_cast<UnresolvedUsingTypenameDecl*>(D)) {}
friend class ASTContext; // ASTContext creates these.
@@ -2931,9 +2951,9 @@ class TypedefType : public Type {
TypedefNameDecl *Decl;
protected:
TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can)
- : Type(tc, can, can->isDependentType(),
+ : Type(tc, can, can->isDependentType(),
can->isInstantiationDependentType(),
- can->isVariablyModifiedType(),
+ can->isVariablyModifiedType(),
/*ContainsUnexpandedParameterPack=*/false),
Decl(const_cast<TypedefNameDecl*>(D)) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
@@ -2996,10 +3016,10 @@ public:
class TypeOfType : public Type {
QualType TOType;
TypeOfType(QualType T, QualType can)
- : Type(TypeOf, can, T->isDependentType(),
+ : Type(TypeOf, can, T->isDependentType(),
T->isInstantiationDependentType(),
- T->isVariablyModifiedType(),
- T->containsUnexpandedParameterPack()),
+ T->isVariablyModifiedType(),
+ T->containsUnexpandedParameterPack()),
TOType(T) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
}
@@ -3020,10 +3040,6 @@ public:
/// DecltypeType (C++0x)
class DecltypeType : public Type {
Expr *E;
-
- // FIXME: We could get rid of UnderlyingType if we wanted to: We would have to
- // Move getDesugaredType to ASTContext so that it can call getDecltypeForExpr
- // from it.
QualType UnderlyingType;
protected:
@@ -3089,7 +3105,7 @@ public:
QualType getBaseType() const { return BaseType; }
UTTKind getUTTKind() const { return UKind; }
-
+
static bool classof(const Type *T) {
return T->getTypeClass() == UnaryTransform;
}
@@ -3101,6 +3117,8 @@ class TagType : public Type {
/// TagDecl that declares the entity.
TagDecl * decl;
+ friend class ASTReader;
+
protected:
TagType(TypeClass TC, const TagDecl *D, QualType can);
@@ -3115,8 +3133,6 @@ public:
return T->getTypeClass() >= TagFirst && T->getTypeClass() <= TagLast;
}
static bool classof(const TagType *) { return true; }
- static bool classof(const RecordType *) { return true; }
- static bool classof(const EnumType *) { return true; }
};
/// RecordType - This is a helper class that allows the use of isa/cast/dyncast
@@ -3142,10 +3158,7 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- static bool classof(const TagType *T);
- static bool classof(const Type *T) {
- return isa<TagType>(T) && classof(cast<TagType>(T));
- }
+ static bool classof(const Type *T) { return T->getTypeClass() == Record; }
static bool classof(const RecordType *) { return true; }
};
@@ -3164,10 +3177,7 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- static bool classof(const TagType *T);
- static bool classof(const Type *T) {
- return isa<TagType>(T) && classof(cast<TagType>(T));
- }
+ static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
static bool classof(const EnumType *) { return true; }
};
@@ -3284,7 +3294,7 @@ class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
/// Build the canonical type.
TemplateTypeParmType(unsigned D, unsigned I, bool PP)
- : Type(TemplateTypeParm, QualType(this, 0),
+ : Type(TemplateTypeParm, QualType(this, 0),
/*Dependent=*/true,
/*InstantiationDependent=*/true,
/*VariablyModified=*/false, PP) {
@@ -3389,7 +3399,7 @@ public:
///
/// When a pack expansion in the source code contains multiple parameter packs
/// and those parameter packs correspond to different levels of template
-/// parameter lists, this type node is used to represent a template type
+/// parameter lists, this type node is used to represent a template type
/// parameter pack from an outer level, which has already had its argument pack
/// substituted but that still lives within a pack expansion that itself
/// could not be instantiated. When actually performing a substitution into
@@ -3399,38 +3409,38 @@ public:
class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
/// \brief The original type parameter.
const TemplateTypeParmType *Replaced;
-
+
/// \brief A pointer to the set of template arguments that this
/// parameter pack is instantiated with.
const TemplateArgument *Arguments;
-
+
/// \brief The number of template arguments in \c Arguments.
unsigned NumArguments;
-
- SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+
+ SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
QualType Canon,
const TemplateArgument &ArgPack);
-
+
friend class ASTContext;
-
+
public:
IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
-
+
/// Gets the template parameter that was substituted for.
const TemplateTypeParmType *getReplacedParameter() const {
return Replaced;
}
-
+
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
-
+
TemplateArgument getArgumentPack() const;
-
+
void Profile(llvm::FoldingSetNodeID &ID);
static void Profile(llvm::FoldingSetNodeID &ID,
const TemplateTypeParmType *Replaced,
const TemplateArgument &ArgPack);
-
+
static bool classof(const Type *T) {
return T->getTypeClass() == SubstTemplateTypeParmPack;
}
@@ -3513,8 +3523,12 @@ class TemplateSpecializationType
/// \brief - The number of template arguments named in this class
/// template specialization.
- unsigned NumArgs;
+ unsigned NumArgs : 31;
+ /// \brief Whether this template specialization type is a substituted
+ /// type alias.
+ bool TypeAlias : 1;
+
TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs, QualType Canon,
@@ -3556,9 +3570,23 @@ public:
return isa<InjectedClassNameType>(getCanonicalTypeInternal());
}
- /// True if this template specialization type is for a type alias
- /// template.
- bool isTypeAlias() const;
+ /// \brief Determine if this template specialization type is for a type alias
+ /// template that has been substituted.
+ ///
+ /// Nearly every template specialization type whose template is an alias
+ /// template will be substituted. However, this is not the case when
+ /// the specialization contains a pack expansion but the template alias
+ /// does not have a corresponding parameter pack, e.g.,
+ ///
+ /// \code
+ /// template<typename T, typename U, typename V> struct S;
+ /// template<typename T, typename U> using A = S<T, int, U>;
+ /// template<typename... Ts> struct X {
+ /// typedef A<Ts...> type; // not a type alias
+ /// };
+ /// \endcode
+ bool isTypeAlias() const { return TypeAlias; }
+
/// Get the aliased type, if this is a specialization of a type alias
/// template.
QualType getAliasedType() const {
@@ -3646,7 +3674,7 @@ class InjectedClassNameType : public Type {
InjectedClassNameType(CXXRecordDecl *D, QualType TST)
: Type(InjectedClassName, QualType(), /*Dependent=*/true,
/*InstantiationDependent=*/true,
- /*VariablyModified=*/false,
+ /*VariablyModified=*/false,
/*ContainsUnexpandedParameterPack=*/false),
Decl(D), InjectedType(TST) {
assert(isa<TemplateSpecializationType>(TST));
@@ -3708,10 +3736,10 @@ enum ElaboratedTypeKeyword {
class TypeWithKeyword : public Type {
protected:
TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
- QualType Canonical, bool Dependent,
- bool InstantiationDependent, bool VariablyModified,
+ QualType Canonical, bool Dependent,
+ bool InstantiationDependent, bool VariablyModified,
bool ContainsUnexpandedParameterPack)
- : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
+ : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
ContainsUnexpandedParameterPack) {
TypeWithKeywordBits.Keyword = Keyword;
}
@@ -3815,12 +3843,12 @@ public:
};
/// \brief Represents a qualified type name for which the type name is
-/// dependent.
+/// dependent.
///
-/// DependentNameType represents a class of dependent types that involve a
-/// dependent nested-name-specifier (e.g., "T::") followed by a (dependent)
+/// DependentNameType represents a class of dependent types that involve a
+/// dependent nested-name-specifier (e.g., "T::") followed by a (dependent)
/// name of a type. The DependentNameType may start with a "typename" (for a
-/// typename-specifier), "class", "struct", "union", or "enum" (for a
+/// typename-specifier), "class", "struct", "union", or "enum" (for a
/// dependent elaborated-type-specifier), or nothing (in contexts where we
/// know that we must be referring to a type, e.g., in a base class specifier).
class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
@@ -3831,7 +3859,7 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
/// \brief The type that this typename specifier refers to.
const IdentifierInfo *Name;
- DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
const IdentifierInfo *Name, QualType CanonType)
: TypeWithKeyword(Keyword, DependentName, CanonType, /*Dependent=*/true,
/*InstantiationDependent=*/true,
@@ -3948,7 +3976,7 @@ public:
}
static bool classof(const DependentTemplateSpecializationType *T) {
return true;
- }
+ }
};
/// \brief Represents a pack expansion of types.
@@ -3965,7 +3993,7 @@ public:
/// \code
/// template<typename ...Types> struct tuple;
///
-/// template<typename ...Types>
+/// template<typename ...Types>
/// struct tuple_of_references {
/// typedef tuple<Types&...> type;
/// };
@@ -3978,24 +4006,24 @@ class PackExpansionType : public Type, public llvm::FoldingSetNode {
QualType Pattern;
/// \brief The number of expansions that this pack expansion will
- /// generate when substituted (+1), or indicates that
+ /// generate when substituted (+1), or indicates that
///
- /// This field will only have a non-zero value when some of the parameter
- /// packs that occur within the pattern have been substituted but others have
+ /// This field will only have a non-zero value when some of the parameter
+ /// packs that occur within the pattern have been substituted but others have
/// not.
unsigned NumExpansions;
-
+
PackExpansionType(QualType Pattern, QualType Canon,
llvm::Optional<unsigned> NumExpansions)
: Type(PackExpansion, Canon, /*Dependent=*/true,
/*InstantiationDependent=*/true,
/*VariableModified=*/Pattern->isVariablyModifiedType(),
/*ContainsUnexpandedParameterPack=*/false),
- Pattern(Pattern),
+ Pattern(Pattern),
NumExpansions(NumExpansions? *NumExpansions + 1: 0) { }
friend class ASTContext; // ASTContext creates these
-
+
public:
/// \brief Retrieve the pattern of this pack expansion, which is the
/// type that will be repeatedly instantiated when instantiating the
@@ -4007,10 +4035,10 @@ public:
llvm::Optional<unsigned> getNumExpansions() const {
if (NumExpansions)
return NumExpansions - 1;
-
+
return llvm::Optional<unsigned>();
}
-
+
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -4031,7 +4059,7 @@ public:
}
static bool classof(const PackExpansionType *T) {
return true;
- }
+ }
};
/// ObjCObjectType - Represents a class type in Objective C.
@@ -4075,7 +4103,7 @@ class ObjCObjectType : public Type {
ObjCProtocolDecl **getProtocolStorage();
protected:
- ObjCObjectType(QualType Canonical, QualType Base,
+ ObjCObjectType(QualType Canonical, QualType Base,
ObjCProtocolDecl * const *Protocols, unsigned NumProtocols);
enum Nonce_ObjCInterface { Nonce_ObjCInterface };
@@ -4132,7 +4160,7 @@ public:
assert(I < getNumProtocols() && "Out-of-range protocol access");
return qual_begin()[I];
}
-
+
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -4153,7 +4181,7 @@ class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
// If anyone adds fields here, ObjCObjectType::getProtocolStorage()
// will need to be modified.
- ObjCObjectTypeImpl(QualType Canonical, QualType Base,
+ ObjCObjectTypeImpl(QualType Canonical, QualType Base,
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols)
: ObjCObjectType(Canonical, Base, Protocols, NumProtocols) {}
@@ -4162,8 +4190,8 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
static void Profile(llvm::FoldingSetNodeID &ID,
QualType Base,
- ObjCProtocolDecl *const *protocols,
- unsigned NumProtocols);
+ ObjCProtocolDecl *const *protocols,
+ unsigned NumProtocols);
};
inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorage() {
@@ -4185,12 +4213,14 @@ inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorage() {
/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
/// T->getBaseType() == QualType(T, 0).
class ObjCInterfaceType : public ObjCObjectType {
- ObjCInterfaceDecl *Decl;
+ mutable ObjCInterfaceDecl *Decl;
ObjCInterfaceType(const ObjCInterfaceDecl *D)
: ObjCObjectType(Nonce_ObjCInterface),
Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
friend class ASTContext; // ASTContext creates these.
+ friend class ASTReader;
+ friend class ObjCInterfaceDecl;
public:
/// getDecl - Get the declaration of this interface.
@@ -4300,7 +4330,7 @@ public:
bool isObjCClassType() const {
return getObjectType()->isObjCUnqualifiedClass();
}
-
+
/// isObjCQualifiedIdType - True if this is equivalent to 'id<P>' for some
/// non-empty set of protocols.
bool isObjCQualifiedIdType() const {
@@ -4337,7 +4367,7 @@ public:
ObjCProtocolDecl *getProtocol(unsigned I) const {
return getObjectType()->getProtocol(I);
}
-
+
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -4396,7 +4426,7 @@ public:
addFastQualifiers(type.getLocalFastQualifiers());
if (!type.hasLocalNonFastQualifiers())
return type.getTypePtrUnsafe();
-
+
const ExtQuals *extQuals = type.getExtQualsUnsafe();
addConsistentQualifiers(extQuals->getQualifiers());
return extQuals->getBaseType();
@@ -4412,6 +4442,13 @@ public:
// Inline function definitions.
+inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
+ SplitQualType desugar =
+ Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
+ desugar.Quals.addConsistentQualifiers(Quals);
+ return desugar;
+}
+
inline const Type *QualType::getTypePtr() const {
return getCommonPtr()->BaseType;
}
@@ -4463,7 +4500,7 @@ inline bool QualType::isCanonical() const {
inline bool QualType::isCanonicalAsParam() const {
if (!isCanonical()) return false;
if (hasLocalQualifiers()) return false;
-
+
const Type *T = getTypePtr();
if (T->isVariablyModifiedType() && T->hasSizedVLAType())
return false;
@@ -4472,21 +4509,21 @@ inline bool QualType::isCanonicalAsParam() const {
}
inline bool QualType::isConstQualified() const {
- return isLocalConstQualified() ||
+ return isLocalConstQualified() ||
getCommonPtr()->CanonicalType.isLocalConstQualified();
}
inline bool QualType::isRestrictQualified() const {
- return isLocalRestrictQualified() ||
+ return isLocalRestrictQualified() ||
getCommonPtr()->CanonicalType.isLocalRestrictQualified();
}
inline bool QualType::isVolatileQualified() const {
- return isLocalVolatileQualified() ||
+ return isLocalVolatileQualified() ||
getCommonPtr()->CanonicalType.isLocalVolatileQualified();
}
-
+
inline bool QualType::hasQualifiers() const {
return hasLocalQualifiers() ||
getCommonPtr()->CanonicalType.hasLocalQualifiers();
@@ -4496,7 +4533,7 @@ inline QualType QualType::getUnqualifiedType() const {
if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
return QualType(getTypePtr(), 0);
- return QualType(getSplitUnqualifiedTypeImpl(*this).first, 0);
+ return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
}
inline SplitQualType QualType::getSplitUnqualifiedType() const {
@@ -4505,7 +4542,7 @@ inline SplitQualType QualType::getSplitUnqualifiedType() const {
return getSplitUnqualifiedTypeImpl(*this);
}
-
+
inline void QualType::removeLocalConst() {
removeLocalFastQualifiers(Qualifiers::Const);
}
@@ -4616,7 +4653,8 @@ inline bool Type::isCompoundType() const {
isReferenceType() ||
// -- classes containing a sequence of objects of various types, [...];
isRecordType() ||
- // -- unions, which ar classes capable of containing objects of different types at different times;
+ // -- unions, which are classes capable of containing objects of different
+ // types at different times;
isUnionType() ||
// -- enumerations, which comprise a set of named constant values. [...];
isEnumeralType() ||
@@ -4706,7 +4744,7 @@ inline bool Type::isObjCObjectType() const {
return isa<ObjCObjectType>(CanonicalType);
}
inline bool Type::isObjCObjectOrInterfaceType() const {
- return isa<ObjCInterfaceType>(CanonicalType) ||
+ return isa<ObjCInterfaceType>(CanonicalType) ||
isa<ObjCObjectType>(CanonicalType);
}
inline bool Type::isAtomicType() const {
@@ -4766,11 +4804,87 @@ inline const BuiltinType *Type::getAsPlaceholderType() const {
}
inline bool Type::isSpecificPlaceholderType(unsigned K) const {
+ assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K));
if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
return (BT->getKind() == (BuiltinType::Kind) K);
return false;
}
+inline bool Type::isNonOverloadPlaceholderType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
+ return BT->isNonOverloadPlaceholderType();
+ return false;
+}
+
+inline bool Type::isVoidType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Void;
+ return false;
+}
+
+inline bool Type::isHalfType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Half;
+ // FIXME: Should we allow complex __fp16? Probably not.
+ return false;
+}
+
+inline bool Type::isNullPtrType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ return BT->getKind() == BuiltinType::NullPtr;
+ return false;
+}
+
+extern bool IsEnumDeclComplete(EnumDecl *);
+extern bool IsEnumDeclScoped(EnumDecl *);
+
+inline bool Type::isIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ return IsEnumDeclComplete(ET->getDecl()) &&
+ !IsEnumDeclScoped(ET->getDecl());
+ }
+ return false;
+}
+
+inline bool Type::isScalarType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() > BuiltinType::Void &&
+ BT->getKind() <= BuiltinType::NullPtr;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // Enums are scalar types, but only if they are defined. Incomplete enums
+ // are not treated as scalar types.
+ return IsEnumDeclComplete(ET->getDecl());
+ return isa<PointerType>(CanonicalType) ||
+ isa<BlockPointerType>(CanonicalType) ||
+ isa<MemberPointerType>(CanonicalType) ||
+ isa<ComplexType>(CanonicalType) ||
+ isa<ObjCObjectPointerType>(CanonicalType);
+}
+
+inline bool Type::isIntegralOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return IsEnumDeclComplete(ET->getDecl());
+
+ return false;
+}
+
+inline bool Type::isBooleanType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Bool;
+ return false;
+}
+
/// \brief Determines whether this is a type for which one can define
/// an overloaded operator.
inline bool Type::isOverloadableType() const {
@@ -4822,15 +4936,15 @@ template<typename T,
bool isArrayType = (llvm::is_same<T, ArrayType>::value ||
llvm::is_base_of<ArrayType, T>::value)>
struct ArrayType_cannot_be_used_with_getAs { };
-
+
template<typename T>
struct ArrayType_cannot_be_used_with_getAs<T, true>;
-
+
/// Member-template getAs<specific type>'.
template <typename T> const T *Type::getAs() const {
ArrayType_cannot_be_used_with_getAs<T> at;
(void)at;
-
+
// If this is directly a T type, return it.
if (const T *Ty = dyn_cast<T>(this))
return Ty;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index 20acada..aab87be 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -18,6 +18,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/TemplateBase.h"
#include "clang/Basic/Specifiers.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class ASTContext;
@@ -93,9 +94,11 @@ public:
SourceLocation getEndLoc() const;
/// \brief Get the full source range.
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getBeginLoc(), getEndLoc());
}
+ SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
/// \brief Get the local source range.
SourceRange getLocalSourceRange() const {
@@ -159,7 +162,8 @@ public:
static bool classof(const TypeLoc *TL) { return true; }
private:
- static void initializeImpl(ASTContext &Context, TypeLoc TL, SourceLocation Loc);
+ static void initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc);
static TypeLoc getNextTypeLocImpl(TypeLoc TL);
static TypeLoc IgnoreParensImpl(TypeLoc TL);
static SourceRange getLocalSourceRangeImpl(TypeLoc TL);
@@ -226,7 +230,7 @@ public:
/// \brief Returns the size of the type source info data block.
unsigned getFullDataSize() const {
- return getLocalDataSize() +
+ return getLocalDataSize() +
getFullDataSizeForType(getType().getLocalUnqualifiedType());
}
@@ -326,7 +330,7 @@ protected:
void *getExtraLocalData() const {
return getLocalData() + 1;
}
-
+
void *getNonLocalData() const {
return static_cast<char*>(Base::Data) + asDerived()->getLocalDataSize();
}
@@ -392,7 +396,7 @@ struct TypeSpecLocInfo {
/// \brief A reasonable base class for TypeLocs that correspond to
/// types that are written as a type-specifier.
-class TypeSpecTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+class TypeSpecTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
TypeSpecTypeLoc,
Type,
TypeSpecLocInfo> {
@@ -566,10 +570,11 @@ class TagTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
public:
TagDecl *getDecl() const { return getTypePtr()->getDecl(); }
- /// \brief True if the tag was defined in this type specifier.
+ /// \brief True if the tag was defined in this type specifier.
bool isDefinition() const {
- return getDecl()->isCompleteDefinition() &&
- (getNameLoc().isInvalid() || getNameLoc() == getDecl()->getLocation());
+ TagDecl *D = getDecl();
+ return D->isCompleteDefinition() &&
+ (D->getIdentifier() == 0 || D->getLocation() == getNameLoc());
}
};
@@ -789,7 +794,7 @@ public:
assert(i < getNumProtocols() && "Index is out of bounds!");
return *(this->getTypePtr()->qual_begin() + i);
}
-
+
bool hasBaseTypeAsWritten() const {
return getLocalData()->HasBaseTypeAsWritten;
}
@@ -900,11 +905,11 @@ struct PointerLikeLocInfo {
SourceLocation StarLoc;
};
-/// A base class for
+/// A base class for
template <class Derived, class TypeClass, class LocalData = PointerLikeLocInfo>
class PointerLikeTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, Derived,
TypeClass, LocalData> {
-public:
+public:
SourceLocation getSigilLoc() const {
return this->getLocalData()->StarLoc;
}
@@ -1077,6 +1082,10 @@ public:
getLocalData()->TrailingReturn = Trailing;
}
+ ArrayRef<ParmVarDecl *> getParams() const {
+ return ArrayRef<ParmVarDecl *>(getParmArray(), getNumArgs());
+ }
+
// ParmVarDecls* are stored after Info, one for each argument.
ParmVarDecl **getParmArray() const {
return (ParmVarDecl**) getExtraLocalData();
@@ -1213,6 +1222,7 @@ struct TemplateNameLocInfo {
};
struct TemplateSpecializationLocInfo : TemplateNameLocInfo {
+ SourceLocation TemplateKWLoc;
SourceLocation LAngleLoc;
SourceLocation RAngleLoc;
};
@@ -1223,6 +1233,13 @@ class TemplateSpecializationTypeLoc :
TemplateSpecializationType,
TemplateSpecializationLocInfo> {
public:
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
+ }
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ getLocalData()->TemplateKWLoc = Loc;
+ }
+
SourceLocation getLAngleLoc() const {
return getLocalData()->LAngleLoc;
}
@@ -1271,13 +1288,17 @@ public:
}
SourceRange getLocalSourceRange() const {
- return SourceRange(getTemplateNameLoc(), getRAngleLoc());
+ if (getTemplateKeywordLoc().isValid())
+ return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
+ else
+ return SourceRange(getTemplateNameLoc(), getRAngleLoc());
}
void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
- setTemplateNameLoc(Loc);
initializeArgLocs(Context, getNumArgs(), getTypePtr()->getArgs(),
getArgInfos(), Loc);
}
@@ -1475,10 +1496,8 @@ class AutoTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
};
struct ElaboratedLocInfo {
- SourceLocation KeywordLoc;
-
- /// \brief Opaque data pointer used to reconstruct a nested-name-specifier
- /// from
+ SourceLocation ElaboratedKWLoc;
+ /// \brief Data associated with the nested-name-specifier location.
void *QualifierData;
};
@@ -1487,31 +1506,32 @@ class ElaboratedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
ElaboratedType,
ElaboratedLocInfo> {
public:
- SourceLocation getKeywordLoc() const {
- return this->getLocalData()->KeywordLoc;
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
}
- void setKeywordLoc(SourceLocation Loc) {
- this->getLocalData()->KeywordLoc = Loc;
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
}
NestedNameSpecifierLoc getQualifierLoc() const {
- return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
getLocalData()->QualifierData);
}
-
+
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
- assert(QualifierLoc.getNestedNameSpecifier()
+ assert(QualifierLoc.getNestedNameSpecifier()
== getTypePtr()->getQualifier() &&
"Inconsistent nested-name-specifier pointer");
getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
SourceRange getLocalSourceRange() const {
- if (getKeywordLoc().isValid())
+ if (getElaboratedKeywordLoc().isValid())
if (getQualifierLoc())
- return SourceRange(getKeywordLoc(), getQualifierLoc().getEndLoc());
+ return SourceRange(getElaboratedKeywordLoc(),
+ getQualifierLoc().getEndLoc());
else
- return SourceRange(getKeywordLoc());
+ return SourceRange(getElaboratedKeywordLoc());
else
return getQualifierLoc().getSourceRange();
}
@@ -1537,9 +1557,6 @@ public:
// type is some sort of TypeDeclTypeLoc.
struct DependentNameLocInfo : ElaboratedLocInfo {
SourceLocation NameLoc;
-
- /// \brief Data associated with the nested-name-specifier location.
- void *QualifierData;
};
class DependentNameTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
@@ -1547,25 +1564,25 @@ class DependentNameTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
DependentNameType,
DependentNameLocInfo> {
public:
- SourceLocation getKeywordLoc() const {
- return this->getLocalData()->KeywordLoc;
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
}
- void setKeywordLoc(SourceLocation Loc) {
- this->getLocalData()->KeywordLoc = Loc;
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
}
NestedNameSpecifierLoc getQualifierLoc() const {
- return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
getLocalData()->QualifierData);
}
-
+
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
- assert(QualifierLoc.getNestedNameSpecifier()
+ assert(QualifierLoc.getNestedNameSpecifier()
== getTypePtr()->getQualifier() &&
"Inconsistent nested-name-specifier pointer");
getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
-
+
SourceLocation getNameLoc() const {
return this->getLocalData()->NameLoc;
}
@@ -1574,8 +1591,8 @@ public:
}
SourceRange getLocalSourceRange() const {
- if (getKeywordLoc().isValid())
- return SourceRange(getKeywordLoc(), getNameLoc());
+ if (getElaboratedKeywordLoc().isValid())
+ return SourceRange(getElaboratedKeywordLoc(), getNameLoc());
else
return SourceRange(getQualifierLoc().getBeginLoc(), getNameLoc());
}
@@ -1590,7 +1607,7 @@ public:
};
struct DependentTemplateSpecializationLocInfo : DependentNameLocInfo {
- SourceLocation KeywordLoc;
+ SourceLocation TemplateKWLoc;
SourceLocation LAngleLoc;
SourceLocation RAngleLoc;
// followed by a TemplateArgumentLocInfo[]
@@ -1602,41 +1619,48 @@ class DependentTemplateSpecializationTypeLoc :
DependentTemplateSpecializationType,
DependentTemplateSpecializationLocInfo> {
public:
- SourceLocation getKeywordLoc() const {
- return this->getLocalData()->KeywordLoc;
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
}
- void setKeywordLoc(SourceLocation Loc) {
- this->getLocalData()->KeywordLoc = Loc;
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
}
NestedNameSpecifierLoc getQualifierLoc() const {
if (!getLocalData()->QualifierData)
return NestedNameSpecifierLoc();
-
- return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
getLocalData()->QualifierData);
}
-
+
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
if (!QualifierLoc) {
- // Even if we have a nested-name-specifier in the dependent
+ // Even if we have a nested-name-specifier in the dependent
// template specialization type, we won't record the nested-name-specifier
// location information when this type-source location information is
// part of a nested-name-specifier.
getLocalData()->QualifierData = 0;
return;
}
-
- assert(QualifierLoc.getNestedNameSpecifier()
+
+ assert(QualifierLoc.getNestedNameSpecifier()
== getTypePtr()->getQualifier() &&
"Inconsistent nested-name-specifier pointer");
getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
- SourceLocation getNameLoc() const {
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
+ }
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ getLocalData()->TemplateKWLoc = Loc;
+ }
+
+ SourceLocation getTemplateNameLoc() const {
return this->getLocalData()->NameLoc;
}
- void setNameLoc(SourceLocation Loc) {
+ void setTemplateNameLoc(SourceLocation Loc) {
this->getLocalData()->NameLoc = Loc;
}
@@ -1670,12 +1694,14 @@ public:
}
SourceRange getLocalSourceRange() const {
- if (getKeywordLoc().isValid())
- return SourceRange(getKeywordLoc(), getRAngleLoc());
+ if (getElaboratedKeywordLoc().isValid())
+ return SourceRange(getElaboratedKeywordLoc(), getRAngleLoc());
else if (getQualifierLoc())
return SourceRange(getQualifierLoc().getBeginLoc(), getRAngleLoc());
+ else if (getTemplateKeywordLoc().isValid())
+ return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
else
- return SourceRange(getNameLoc(), getRAngleLoc());
+ return SourceRange(getTemplateNameLoc(), getRAngleLoc());
}
void copy(DependentTemplateSpecializationTypeLoc Loc) {
@@ -1702,7 +1728,7 @@ struct PackExpansionTypeLocInfo {
};
class PackExpansionTypeLoc
- : public ConcreteTypeLoc<UnqualTypeLoc, PackExpansionTypeLoc,
+ : public ConcreteTypeLoc<UnqualTypeLoc, PackExpansionTypeLoc,
PackExpansionType, PackExpansionTypeLocInfo> {
public:
SourceLocation getEllipsisLoc() const {
@@ -1736,7 +1762,7 @@ struct AtomicTypeLocInfo {
class AtomicTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, AtomicTypeLoc,
AtomicType, AtomicTypeLocInfo> {
-public:
+public:
TypeLoc getValueLoc() const {
return this->getInnerTypeLoc();
}
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
index 9eebc4b..242aa58 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
@@ -28,11 +28,11 @@ public:
RetTy Visit(const Type *T) {
// Top switch stmt: dispatch to VisitFooType for each FooType.
switch (T->getTypeClass()) {
- default: llvm_unreachable("Unknown type class!");
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) case Type::CLASS: DISPATCH(CLASS##Type);
#include "clang/AST/TypeNodes.def"
}
+ llvm_unreachable("Unknown type class!");
}
// If the implementation chooses not to implement a certain visit method, fall
diff --git a/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h b/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h
deleted file mode 100644
index 534d4d4..0000000
--- a/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h
+++ /dev/null
@@ -1,114 +0,0 @@
-//===-- UsuallyTinyPtrVector.h - Pointer vector class -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the UsuallyTinyPtrVector class, which is a vector that
-// optimizes the case where there is only one element.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_AST_USUALLY_TINY_PTR_VECTOR_H
-#define LLVM_CLANG_AST_USUALLY_TINY_PTR_VECTOR_H
-
-#include <vector>
-
-namespace clang {
-
-/// \brief A vector class template that is optimized for storing a single
-/// pointer element.
-template<typename T>
-class UsuallyTinyPtrVector {
- /// \brief Storage for the vector.
- ///
- /// When the low bit is zero, this is a T *. When the
- /// low bit is one, this is a std::vector<T *> *.
- mutable uintptr_t Storage;
-
- typedef std::vector<T*> vector_type;
-
-public:
- UsuallyTinyPtrVector() : Storage(0) { }
- explicit UsuallyTinyPtrVector(T *Element)
- : Storage(reinterpret_cast<uintptr_t>(Element)) { }
-
- bool empty() const { return !Storage; }
-
- typedef const T **iterator;
- iterator begin() const;
- iterator end() const;
- size_t size() const;
-
- void push_back(T *Method);
- void Destroy();
-};
-
-template<typename T>
-typename UsuallyTinyPtrVector<T>::iterator
-UsuallyTinyPtrVector<T>::begin() const {
- if ((Storage & 0x01) == 0)
- return reinterpret_cast<iterator>(&Storage);
-
- vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
- return &Vec->front();
-}
-
-template<typename T>
-typename UsuallyTinyPtrVector<T>::iterator
-UsuallyTinyPtrVector<T>::end() const {
- if ((Storage & 0x01) == 0) {
- if (Storage == 0)
- return reinterpret_cast<iterator>(&Storage);
-
- return reinterpret_cast<iterator>(&Storage) + 1;
- }
-
- vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
- return &Vec->front() + Vec->size();
-}
-
-template<typename T>
-size_t UsuallyTinyPtrVector<T>::size() const {
- if ((Storage & 0x01) == 0)
- return (Storage == 0) ? 0 : 1;
-
- vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
- return Vec->size();
-}
-
-template<typename T>
-void UsuallyTinyPtrVector<T>::push_back(T *Element) {
- if (Storage == 0) {
- // 0 -> 1 element.
- Storage = reinterpret_cast<uintptr_t>(Element);
- return;
- }
-
- vector_type *Vec;
- if ((Storage & 0x01) == 0) {
- // 1 -> 2 elements. Allocate a new vector and push the element into that
- // vector.
- Vec = new vector_type;
- Vec->push_back(reinterpret_cast<T *>(Storage));
- Storage = reinterpret_cast<uintptr_t>(Vec) | 0x01;
- } else
- Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
-
- // Add the new element to the vector.
- Vec->push_back(Element);
-}
-
-template<typename T>
-void UsuallyTinyPtrVector<T>::Destroy() {
- if (Storage & 0x01)
- delete reinterpret_cast<vector_type *>(Storage & ~0x01);
-
- Storage = 0;
-}
-
-}
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
index 59bab03..2aa9a3d 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
@@ -34,16 +34,16 @@ public:
CK_OffsetToTop,
CK_RTTI,
CK_FunctionPointer,
-
+
/// CK_CompleteDtorPointer - A pointer to the complete destructor.
CK_CompleteDtorPointer,
-
+
/// CK_DeletingDtorPointer - A pointer to the deleting destructor.
CK_DeletingDtorPointer,
-
+
/// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
/// will end up never being called. Such vtable function pointers are
- /// represented as a CK_UnusedFunctionPointer.
+ /// represented as a CK_UnusedFunctionPointer.
CK_UnusedFunctionPointer
};
@@ -60,34 +60,34 @@ public:
static VTableComponent MakeOffsetToTop(CharUnits Offset) {
return VTableComponent(CK_OffsetToTop, Offset);
}
-
+
static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
}
static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
- assert(!isa<CXXDestructorDecl>(MD) &&
+ assert(!isa<CXXDestructorDecl>(MD) &&
"Don't use MakeFunction with destructors!");
- return VTableComponent(CK_FunctionPointer,
+ return VTableComponent(CK_FunctionPointer,
reinterpret_cast<uintptr_t>(MD));
}
-
+
static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
return VTableComponent(CK_CompleteDtorPointer,
reinterpret_cast<uintptr_t>(DD));
}
static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
- return VTableComponent(CK_DeletingDtorPointer,
+ return VTableComponent(CK_DeletingDtorPointer,
reinterpret_cast<uintptr_t>(DD));
}
static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
- assert(!isa<CXXDestructorDecl>(MD) &&
+ assert(!isa<CXXDestructorDecl>(MD) &&
"Don't use MakeUnusedFunction with destructors!");
return VTableComponent(CK_UnusedFunctionPointer,
- reinterpret_cast<uintptr_t>(MD));
+ reinterpret_cast<uintptr_t>(MD));
}
static VTableComponent getFromOpaqueInteger(uint64_t I) {
@@ -101,88 +101,88 @@ public:
CharUnits getVCallOffset() const {
assert(getKind() == CK_VCallOffset && "Invalid component kind!");
-
+
return getOffset();
}
CharUnits getVBaseOffset() const {
assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
-
+
return getOffset();
}
CharUnits getOffsetToTop() const {
assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
-
+
return getOffset();
}
-
+
const CXXRecordDecl *getRTTIDecl() const {
assert(getKind() == CK_RTTI && "Invalid component kind!");
-
+
return reinterpret_cast<CXXRecordDecl *>(getPointer());
}
-
+
const CXXMethodDecl *getFunctionDecl() const {
assert(getKind() == CK_FunctionPointer);
-
+
return reinterpret_cast<CXXMethodDecl *>(getPointer());
}
const CXXDestructorDecl *getDestructorDecl() const {
assert((getKind() == CK_CompleteDtorPointer ||
getKind() == CK_DeletingDtorPointer) && "Invalid component kind!");
-
+
return reinterpret_cast<CXXDestructorDecl *>(getPointer());
}
const CXXMethodDecl *getUnusedFunctionDecl() const {
assert(getKind() == CK_UnusedFunctionPointer);
-
+
return reinterpret_cast<CXXMethodDecl *>(getPointer());
}
-
+
private:
VTableComponent(Kind ComponentKind, CharUnits Offset) {
- assert((ComponentKind == CK_VCallOffset ||
+ assert((ComponentKind == CK_VCallOffset ||
ComponentKind == CK_VBaseOffset ||
ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
assert(Offset.getQuantity() <= ((1LL << 56) - 1) && "Offset is too big!");
-
+
Value = ((Offset.getQuantity() << 3) | ComponentKind);
}
VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
- assert((ComponentKind == CK_RTTI ||
+ assert((ComponentKind == CK_RTTI ||
ComponentKind == CK_FunctionPointer ||
ComponentKind == CK_CompleteDtorPointer ||
ComponentKind == CK_DeletingDtorPointer ||
ComponentKind == CK_UnusedFunctionPointer) &&
"Invalid component kind!");
-
+
assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
-
+
Value = Ptr | ComponentKind;
}
-
+
CharUnits getOffset() const {
assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
getKind() == CK_OffsetToTop) && "Invalid component kind!");
-
+
return CharUnits::fromQuantity(Value >> 3);
}
uintptr_t getPointer() const {
- assert((getKind() == CK_RTTI ||
+ assert((getKind() == CK_RTTI ||
getKind() == CK_FunctionPointer ||
getKind() == CK_CompleteDtorPointer ||
getKind() == CK_DeletingDtorPointer ||
getKind() == CK_UnusedFunctionPointer) &&
"Invalid component kind!");
-
+
return static_cast<uintptr_t>(Value & ~7ULL);
}
-
+
explicit VTableComponent(uint64_t Value)
: Value(Value) { }
@@ -210,7 +210,7 @@ private:
/// VTableThunks - Contains thunks needed by vtables.
uint64_t NumVTableThunks;
VTableThunkTy *VTableThunks;
-
+
/// Address points - Address points for all vtables.
AddressPointsMapTy AddressPoints;
@@ -265,10 +265,10 @@ class VTableContext {
ASTContext &Context;
public:
- typedef SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
+ typedef SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
VTableThunksTy;
typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
-
+
private:
/// MethodVTableIndices - Contains the index (relative to the vtable address
/// point) where the function pointer for a virtual function is stored.
@@ -279,22 +279,22 @@ private:
VTableLayoutMapTy;
VTableLayoutMapTy VTableLayouts;
- /// NumVirtualFunctionPointers - Contains the number of virtual function
+ /// NumVirtualFunctionPointers - Contains the number of virtual function
/// pointers in the vtable for a given record decl.
llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
typedef std::pair<const CXXRecordDecl *,
const CXXRecordDecl *> ClassPairTy;
- /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
+ /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
/// the address point) in chars where the offsets for virtual bases of a class
/// are stored.
- typedef llvm::DenseMap<ClassPairTy, CharUnits>
+ typedef llvm::DenseMap<ClassPairTy, CharUnits>
VirtualBaseClassOffsetOffsetsMapTy;
VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
-
+
/// Thunks - Contains all thunks that a given method decl will need.
ThunksMapTy Thunks;
@@ -312,7 +312,7 @@ public:
const VTableLayout &getVTableLayout(const CXXRecordDecl *RD) {
ComputeVTableRelatedInformation(RD);
assert(VTableLayouts.count(RD) && "No layout for this record decl!");
-
+
return *VTableLayouts[RD];
}
@@ -337,14 +337,14 @@ public:
/// getNumVirtualFunctionPointers - Return the number of virtual function
/// pointers in the vtable for a given record decl.
uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
-
+
/// getMethodVTableIndex - Return the index (relative to the vtable address
/// point) where the function pointer for the given virtual function is
/// stored.
uint64_t getMethodVTableIndex(GlobalDecl GD);
/// getVirtualBaseOffsetOffset - Return the offset in chars (relative to the
- /// vtable address point) where the offset of the virtual base that contains
+ /// vtable address point) where the offset of the virtual base that contains
/// the given base is stored, otherwise, if no virtual base contains the given
/// class, return 0. Base must be a virtual base class or an unambigious
/// base.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h
new file mode 100644
index 0000000..e9a431a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h
@@ -0,0 +1,212 @@
+//==- Dominators.h - Implementation of dominators tree for Clang CFG C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the dominators tree functionality for Clang CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DOMINATORS_H
+#define LLVM_CLANG_DOMINATORS_H
+
+#include "clang/Analysis/AnalysisContext.h"
+
+#include "llvm/Module.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
+
+namespace clang {
+
+class CFGBlock;
+typedef llvm::DomTreeNodeBase<CFGBlock> DomTreeNode;
+
+/// \brief Concrete subclass of DominatorTreeBase for Clang
+/// This class implements the dominators tree functionality given a Clang CFG.
+///
+class DominatorTree : public ManagedAnalysis {
+ virtual void anchor();
+public:
+ llvm::DominatorTreeBase<CFGBlock>* DT;
+
+ DominatorTree() {
+ DT = new llvm::DominatorTreeBase<CFGBlock>(false);
+ }
+
+ ~DominatorTree() {
+ delete DT;
+ }
+
+ llvm::DominatorTreeBase<CFGBlock>& getBase() { return *DT; }
+
+ /// \brief This method returns the root CFGBlock of the dominators tree.
+ ///
+ inline CFGBlock *getRoot() const {
+ return DT->getRoot();
+ }
+
+ /// \brief This method returns the root DomTreeNode, which is the wrapper
+ /// for CFGBlock.
+ inline DomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ /// \brief This method compares two dominator trees.
+ /// The method returns false if the other dominator tree matches this
+ /// dominator tree, otherwise returns true.
+ ///
+ inline bool compare(DominatorTree &Other) const {
+ DomTreeNode *R = getRootNode();
+ DomTreeNode *OtherR = Other.getRootNode();
+
+ if (!R || !OtherR || R->getBlock() != OtherR->getBlock())
+ return true;
+
+ if (DT->compare(Other.getBase()))
+ return true;
+
+ return false;
+ }
+
+ /// \brief This method builds the dominator tree for a given CFG
+ /// The CFG information is passed via AnalysisDeclContext
+ ///
+ void buildDominatorTree(AnalysisDeclContext &AC) {
+ cfg = AC.getCFG();
+ DT->recalculate(*cfg);
+ }
+
+ /// \brief This method dumps immediate dominators for each block,
+ /// mainly used for debug purposes.
+ ///
+ void dump() {
+ llvm::errs() << "Immediate dominance tree (Node#,IDom#):\n";
+ for (CFG::const_iterator I = cfg->begin(),
+ E = cfg->end(); I != E; ++I) {
+ if(DT->getNode(*I)->getIDom())
+ llvm::errs() << "(" << (*I)->getBlockID()
+ << ","
+ << DT->getNode(*I)->getIDom()->getBlock()->getBlockID()
+ << ")\n";
+ else llvm::errs() << "(" << (*I)->getBlockID()
+ << "," << (*I)->getBlockID() << ")\n";
+ }
+ }
+
+ /// \brief This method tests if one CFGBlock dominates the other.
+ /// The method return true if A dominates B, false otherwise.
+ /// Note a block always dominates itself.
+ ///
+ inline bool dominates(const CFGBlock* A, const CFGBlock* B) const {
+ return DT->dominates(A, B);
+ }
+
+ /// \brief This method tests if one CFGBlock properly dominates the other.
+ /// The method return true if A properly dominates B, false otherwise.
+ ///
+ bool properlyDominates(const CFGBlock*A, const CFGBlock*B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ /// \brief This method finds the nearest common dominator CFG block
+ /// for CFG block A and B. If there is no such block then return NULL.
+ ///
+ inline CFGBlock *findNearestCommonDominator(CFGBlock *A, CFGBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ inline const CFGBlock *findNearestCommonDominator(const CFGBlock *A,
+ const CFGBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ /// \brief This method is used to update the dominator
+ /// tree information when a node's immediate dominator changes.
+ ///
+ inline void changeImmediateDominator(CFGBlock *N, CFGBlock *NewIDom) {
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ /// \brief This method tests if the given CFGBlock can be reachable from root.
+ /// Returns true if reachable, false otherwise.
+ ///
+ bool isReachableFromEntry(const CFGBlock *A) {
+ return DT->isReachableFromEntry(A);
+ }
+
+ /// \brief This method releases the memory held by the dominator tree.
+ ///
+ virtual void releaseMemory() {
+ DT->releaseMemory();
+ }
+
+ /// \brief This method converts the dominator tree to human readable form.
+ ///
+ virtual void print(raw_ostream &OS, const llvm::Module* M= 0) const {
+ DT->print(OS);
+ }
+
+private:
+ CFG *cfg;
+};
+
+inline void WriteAsOperand(raw_ostream &OS, const CFGBlock *BB,
+ bool t) {
+ OS << "BB#" << BB->getBlockID();
+}
+
+} // end namespace clang
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+namespace llvm {
+template <> struct GraphTraits< ::clang::DomTreeNode* > {
+ typedef ::clang::DomTreeNode NodeType;
+ typedef NodeType::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) {
+ return N;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+
+ typedef df_iterator< ::clang::DomTreeNode* > nodes_iterator;
+
+ static nodes_iterator nodes_begin(::clang::DomTreeNode *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(::clang::DomTreeNode *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+template <> struct GraphTraits< ::clang::DominatorTree* >
+ : public GraphTraits< ::clang::DomTreeNode* > {
+ static NodeType *getEntryNode(::clang::DominatorTree *DT) {
+ return DT->getRootNode();
+ }
+
+ static nodes_iterator nodes_begin(::clang::DominatorTree *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(::clang::DominatorTree *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
index a3f9c53..d4d8dc0 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
@@ -66,11 +66,14 @@ public:
AsChar, // 'hh'
AsShort, // 'h'
AsLong, // 'l'
- AsLongLong, // 'll', 'q' (BSD, deprecated)
+ AsLongLong, // 'll'
+ AsQuad, // 'q' (BSD, deprecated, for 64-bit integer types)
AsIntMax, // 'j'
AsSizeT, // 'z'
AsPtrDiff, // 't'
AsLongDouble, // 'L'
+ AsAllocate, // for '%as', GNU extension to C90 scanf
+ AsMAllocate, // for '%ms', GNU extension to scanf
AsWideChar = AsLong // for '%ls', only makes sense for printf
};
@@ -104,7 +107,7 @@ private:
const char *Position;
Kind kind;
};
-
+
class ConversionSpecifier {
public:
enum Kind {
@@ -113,14 +116,14 @@ public:
cArg,
dArg,
iArg,
- IntArgBeg = cArg, IntArgEnd = iArg,
-
+ IntArgBeg = cArg, IntArgEnd = iArg,
+
oArg,
uArg,
xArg,
XArg,
UIntArgBeg = oArg, UIntArgEnd = XArg,
-
+
fArg,
FArg,
eArg,
@@ -130,49 +133,49 @@ public:
aArg,
AArg,
DoubleArgBeg = fArg, DoubleArgEnd = AArg,
-
+
sArg,
pArg,
nArg,
PercentArg,
CArg,
SArg,
-
+
// ** Printf-specific **
-
+
// Objective-C specific specifiers.
ObjCObjArg, // '@'
ObjCBeg = ObjCObjArg, ObjCEnd = ObjCObjArg,
-
+
// FreeBSD specific specifiers
bArg,
DArg,
rArg,
-
+
// GlibC specific specifiers.
PrintErrno, // 'm'
-
+
PrintfConvBeg = ObjCObjArg, PrintfConvEnd = PrintErrno,
-
- // ** Scanf-specific **
+
+ // ** Scanf-specific **
ScanListArg, // '['
ScanfConvBeg = ScanListArg, ScanfConvEnd = ScanListArg
};
-
+
ConversionSpecifier(bool isPrintf)
: IsPrintf(isPrintf), Position(0), EndScanList(0), kind(InvalidSpecifier) {}
-
+
ConversionSpecifier(bool isPrintf, const char *pos, Kind k)
: IsPrintf(isPrintf), Position(pos), EndScanList(0), kind(k) {}
-
+
const char *getStart() const {
return Position;
}
-
+
StringRef getCharacters() const {
return StringRef(getStart(), getLength());
}
-
+
bool consumesDataArgument() const {
switch (kind) {
case PrintErrno:
@@ -183,15 +186,16 @@ public:
return true;
}
}
-
+
Kind getKind() const { return kind; }
void setKind(Kind k) { kind = k; }
unsigned getLength() const {
return EndScanList ? EndScanList - Position : 1;
}
-
+
+ bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
const char *toString() const;
-
+
bool isPrintfKind() const { return IsPrintf; }
protected:
@@ -204,15 +208,18 @@ protected:
class ArgTypeResult {
public:
enum Kind { UnknownTy, InvalidTy, SpecificTy, ObjCPointerTy, CPointerTy,
- CStrTy, WCStrTy, WIntTy };
+ AnyCharTy, CStrTy, WCStrTy, WIntTy };
private:
const Kind K;
QualType T;
- ArgTypeResult(bool) : K(InvalidTy) {}
+ const char *Name;
+ ArgTypeResult(bool) : K(InvalidTy), Name(0) {}
public:
- ArgTypeResult(Kind k = UnknownTy) : K(k) {}
- ArgTypeResult(QualType t) : K(SpecificTy), T(t) {}
- ArgTypeResult(CanQualType t) : K(SpecificTy), T(t) {}
+ ArgTypeResult(Kind k = UnknownTy) : K(k), Name(0) {}
+ ArgTypeResult(Kind k, const char *n) : K(k), Name(n) {}
+ ArgTypeResult(QualType t) : K(SpecificTy), T(t), Name(0) {}
+ ArgTypeResult(QualType t, const char *n) : K(SpecificTy), T(t), Name(n) {}
+ ArgTypeResult(CanQualType t) : K(SpecificTy), T(t), Name(0) {}
static ArgTypeResult Invalid() { return ArgTypeResult(true); }
@@ -227,6 +234,8 @@ public:
bool matchesAnyObjCObjectRef() const { return K == ObjCPointerTy; }
QualType getRepresentativeType(ASTContext &C) const;
+
+ std::string getRepresentativeTypeName(ASTContext &C) const;
};
class OptionalAmount {
@@ -302,9 +311,9 @@ protected:
LengthModifier LM;
OptionalAmount FieldWidth;
ConversionSpecifier CS;
- /// Positional arguments, an IEEE extension:
- /// IEEE Std 1003.1, 2004 Edition
- /// http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
+ /// Positional arguments, an IEEE extension:
+ /// IEEE Std 1003.1, 2004 Edition
+ /// http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
bool UsesPositionalArg;
unsigned argIndex;
public:
@@ -342,8 +351,14 @@ public:
}
bool usesPositionalArg() const { return UsesPositionalArg; }
-
+
bool hasValidLengthModifier() const;
+
+ bool hasStandardLengthModifier() const;
+
+ bool hasStandardConversionSpecifier(const LangOptions &LangOpt) const;
+
+ bool hasStandardLengthConversionCombination() const;
};
} // end analyze_format_string namespace
@@ -353,7 +368,7 @@ public:
namespace analyze_printf {
-class PrintfConversionSpecifier :
+class PrintfConversionSpecifier :
public analyze_format_string::ConversionSpecifier {
public:
PrintfConversionSpecifier()
@@ -364,9 +379,8 @@ public:
bool isObjCArg() const { return kind >= ObjCBeg && kind <= ObjCEnd; }
bool isIntArg() const { return kind >= IntArgBeg && kind <= IntArgEnd; }
- bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
- bool isDoubleArg() const { return kind >= DoubleArgBeg &&
- kind <= DoubleArgBeg; }
+ bool isDoubleArg() const { return kind >= DoubleArgBeg &&
+ kind <= DoubleArgEnd; }
unsigned getLength() const {
// Conversion specifiers currently only are represented by
// single characters, but we be flexible.
@@ -443,7 +457,7 @@ public:
const OptionalAmount &getPrecision() const {
return Precision;
}
-
+
bool consumesDataArgument() const {
return getConversionSpecifier().consumesDataArgument();
}
@@ -453,9 +467,9 @@ public:
/// will return null if the format specifier does not have
/// a matching data argument or the matching argument matches
/// more than one type.
- ArgTypeResult getArgType(ASTContext &Ctx) const;
+ ArgTypeResult getArgType(ASTContext &Ctx, bool IsObjCLiteral) const;
- const OptionalFlag &hasThousandsGrouping() const {
+ const OptionalFlag &hasThousandsGrouping() const {
return HasThousandsGrouping;
}
const OptionalFlag &isLeftJustified() const { return IsLeftJustified; }
@@ -465,14 +479,15 @@ public:
const OptionalFlag &hasSpacePrefix() const { return HasSpacePrefix; }
bool usesPositionalArg() const { return UsesPositionalArg; }
- /// Changes the specifier and length according to a QualType, retaining any
- /// flags or options. Returns true on success, or false when a conversion
- /// was not successful.
- bool fixType(QualType QT);
+ /// Changes the specifier and length according to a QualType, retaining any
+ /// flags or options. Returns true on success, or false when a conversion
+ /// was not successful.
+ bool fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx,
+ bool IsObjCLiteral);
void toString(raw_ostream &os) const;
- // Validation methods - to check if any element results in undefined behavior
+ // Validation methods - to check if any element results in undefined behavior
bool hasValidPlusPrefix() const;
bool hasValidAlternativeForm() const;
bool hasValidLeadingZeros() const;
@@ -500,16 +515,41 @@ public:
: ConversionSpecifier(false, pos, k) {}
void setEndScanList(const char *pos) { EndScanList = pos; }
-
+
static bool classof(const analyze_format_string::ConversionSpecifier *CS) {
return !CS->isPrintfKind();
- }
+ }
};
+using analyze_format_string::ArgTypeResult;
using analyze_format_string::LengthModifier;
using analyze_format_string::OptionalAmount;
using analyze_format_string::OptionalFlag;
+class ScanfArgTypeResult : public ArgTypeResult {
+public:
+ enum Kind { UnknownTy, InvalidTy, CStrTy, WCStrTy, PtrToArgTypeResultTy };
+private:
+ Kind K;
+ ArgTypeResult A;
+ const char *Name;
+ QualType getRepresentativeType(ASTContext &C) const;
+public:
+ ScanfArgTypeResult(Kind k = UnknownTy, const char* n = 0) : K(k), Name(n) {}
+ ScanfArgTypeResult(ArgTypeResult a, const char *n = 0)
+ : K(PtrToArgTypeResultTy), A(a), Name(n) {
+ assert(A.isValid());
+ }
+
+ static ScanfArgTypeResult Invalid() { return ScanfArgTypeResult(InvalidTy); }
+
+ bool isValid() const { return K != InvalidTy; }
+
+ bool matchesType(ASTContext& C, QualType argTy) const;
+
+ std::string getRepresentativeTypeName(ASTContext& C) const;
+};
+
class ScanfSpecifier : public analyze_format_string::FormatSpecifier {
OptionalFlag SuppressAssignment; // '*'
public:
@@ -533,11 +573,17 @@ public:
const ScanfConversionSpecifier &getConversionSpecifier() const {
return cast<ScanfConversionSpecifier>(CS);
}
-
+
bool consumesDataArgument() const {
return CS.consumesDataArgument() && !SuppressAssignment;
}
+ ScanfArgTypeResult getArgType(ASTContext &Ctx) const;
+
+ bool fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx);
+
+ void toString(raw_ostream &os) const;
+
static ScanfSpecifier Parse(const char *beg, const char *end);
};
@@ -557,6 +603,8 @@ public:
virtual void HandleNullChar(const char *nullCharacter) {}
+ virtual void HandlePosition(const char *startPos, unsigned posLen) {}
+
virtual void HandleInvalidPosition(const char *startPos, unsigned posLen,
PositionContext p) {}
@@ -599,11 +647,10 @@ public:
};
bool ParsePrintfString(FormatStringHandler &H,
- const char *beg, const char *end,
- bool FormatExtensions);
+ const char *beg, const char *end, const LangOptions &LO);
bool ParseScanfString(FormatStringHandler &H,
- const char *beg, const char *end);
+ const char *beg, const char *end, const LangOptions &LO);
} // end analyze_format_string namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
index 302ae1c..c9f39b4 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
@@ -52,7 +52,9 @@ public:
friend class LiveVariables;
};
- struct Observer {
+ class Observer {
+ virtual void anchor();
+ public:
virtual ~Observer() {}
/// A callback invoked right before invoking the
@@ -70,7 +72,7 @@ public:
virtual ~LiveVariables();
/// Compute the liveness information for a given CFG.
- static LiveVariables *computeLiveness(AnalysisContext &analysisContext,
+ static LiveVariables *computeLiveness(AnalysisDeclContext &analysisContext,
bool killAtAssign);
/// Return true if a variable is live at the end of a
@@ -93,7 +95,7 @@ public:
void runOnAllBlocks(Observer &obs);
- static LiveVariables *create(AnalysisContext &analysisContext) {
+ static LiveVariables *create(AnalysisDeclContext &analysisContext) {
return computeLiveness(analysisContext, true);
}
@@ -106,7 +108,7 @@ private:
class RelaxedLiveVariables : public LiveVariables {
public:
- static LiveVariables *create(AnalysisContext &analysisContext) {
+ static LiveVariables *create(AnalysisDeclContext &analysisContext) {
return computeLiveness(analysisContext, false);
}
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
new file mode 100644
index 0000000..4e3244e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
@@ -0,0 +1,111 @@
+//===- PostOrderCFGView.h - Post order view of CFG blocks ---------*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements post order view of the blocks in a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_POSTORDER_CFGVIEW
+#define LLVM_CLANG_POSTORDER_CFGVIEW
+
+#include <vector>
+//#include <algorithm>
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/BitVector.h"
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+
+namespace clang {
+
+class PostOrderCFGView : public ManagedAnalysis {
+ virtual void anchor();
+public:
+ /// \brief Implements a set of CFGBlocks using a BitVector.
+ ///
+ /// This class contains a minimal interface, primarily dictated by the SetType
+ /// template parameter of the llvm::po_iterator template, as used with
+ /// external storage. We also use this set to keep track of which CFGBlocks we
+ /// visit during the analysis.
+ class CFGBlockSet {
+ llvm::BitVector VisitedBlockIDs;
+ public:
+ // po_iterator requires this iterator, but the only interface needed is the
+ // value_type typedef.
+ struct iterator { typedef const CFGBlock *value_type; };
+
+ CFGBlockSet() {}
+ CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
+
+ /// \brief Set the bit associated with a particular CFGBlock.
+ /// This is the important method for the SetType template parameter.
+ bool insert(const CFGBlock *Block) {
+ // Note that insert() is called by po_iterator, which doesn't check to
+ // make sure that Block is non-null. Moreover, the CFGBlock iterator will
+ // occasionally hand out null pointers for pruned edges, so we catch those
+ // here.
+ if (Block == 0)
+ return false; // if an edge is trivially false.
+ if (VisitedBlockIDs.test(Block->getBlockID()))
+ return false;
+ VisitedBlockIDs.set(Block->getBlockID());
+ return true;
+ }
+
+ /// \brief Check if the bit for a CFGBlock has been already set.
+ /// This method is for tracking visited blocks in the main threadsafety
+ /// loop. Block must not be null.
+ bool alreadySet(const CFGBlock *Block) {
+ return VisitedBlockIDs.test(Block->getBlockID());
+ }
+ };
+
+private:
+ typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
+ std::vector<const CFGBlock*> Blocks;
+
+ typedef llvm::DenseMap<const CFGBlock *, unsigned> BlockOrderTy;
+ BlockOrderTy BlockOrder;
+
+public:
+ typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
+
+ PostOrderCFGView(const CFG *cfg);
+
+ iterator begin() { return Blocks.rbegin(); }
+ iterator end() { return Blocks.rend(); }
+
+ bool empty() { return begin() == end(); }
+
+ struct BlockOrderCompare;
+ friend struct BlockOrderCompare;
+
+ struct BlockOrderCompare {
+ const PostOrderCFGView &POV;
+ public:
+ BlockOrderCompare(const PostOrderCFGView &pov) : POV(pov) {}
+ bool operator()(const CFGBlock *b1, const CFGBlock *b2) const;
+ };
+
+ BlockOrderCompare getComparator() const {
+ return BlockOrderCompare(*this);
+ }
+
+ // Used by AnalyisContext to construct this object.
+ static const void *getTag();
+
+ static PostOrderCFGView *create(AnalysisDeclContext &analysisContext);
+};
+
+} // end clang namespace
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h
index 6cf7fa4..30c5b2d 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h
@@ -25,7 +25,7 @@ namespace llvm {
}
namespace clang {
- class AnalysisContext;
+ class AnalysisDeclContext;
class CFGBlock;
}
@@ -37,6 +37,7 @@ namespace clang {
namespace reachable_code {
class Callback {
+ virtual void anchor();
public:
virtual ~Callback() {}
virtual void HandleUnreachable(SourceLocation L, SourceRange R1,
@@ -48,7 +49,7 @@ public:
unsigned ScanReachableFromBlock(const CFGBlock *Start,
llvm::BitVector &Reachable);
-void FindUnreachableCode(AnalysisContext &AC, Callback &CB);
+void FindUnreachableCode(AnalysisDeclContext &AC, Callback &CB);
}} // end namespace clang::reachable_code
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index a325056..26e258d 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -67,7 +67,7 @@ enum LockErrorKind {
class ThreadSafetyHandler {
public:
typedef llvm::StringRef Name;
- virtual ~ThreadSafetyHandler() = 0;
+ virtual ~ThreadSafetyHandler();
/// Warn about lock expressions which fail to resolve to lockable objects.
/// \param Loc -- the SourceLocation of the unresolved expression.
@@ -93,9 +93,14 @@ public:
/// 3. or when a mutex is locked but not unlocked inside a function.
/// \param LockName -- A StringRef name for the lock expression, to be printed
/// in the error message.
- /// \param Loc -- The location of the lock expression where the mutex is locked
+ /// \param LocLocked -- The location of the lock expression where the mutex is
+ /// locked
+ /// \param LocEndOfScope -- The location of the end of the scope where the
+ /// mutex is no longer held
/// \param LEK -- which of the three above cases we should warn for
- virtual void handleMutexHeldEndOfScope(Name LockName, SourceLocation Loc,
+ virtual void handleMutexHeldEndOfScope(Name LockName,
+ SourceLocation LocLocked,
+ SourceLocation LocEndOfScope,
LockErrorKind LEK){}
/// Warn when a mutex is held exclusively and shared at the same point. For
@@ -143,7 +148,8 @@ public:
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
-void runThreadSafetyAnalysis(AnalysisContext &AC, ThreadSafetyHandler &Handler);
+void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
+ ThreadSafetyHandler &Handler);
/// \brief Helper function that returns a LockKind required for the given level
/// of access.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
index e2e4f35..4ee6698 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
@@ -1,4 +1,4 @@
-//= UninitializedValues.h - Finding uses of uninitialized values --*- C++ -*-==//
+//= UninitializedValues.h - Finding uses of uninitialized values -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -17,12 +17,12 @@
namespace clang {
-class AnalysisContext;
-class CFG;
+class AnalysisDeclContext;
+class CFG;
class DeclContext;
class Expr;
class VarDecl;
-
+
class UninitVariablesHandler {
public:
UninitVariablesHandler() {}
@@ -32,7 +32,7 @@ public:
virtual void handleUseOfUninitVariable(const Expr *ex,
const VarDecl *vd,
bool isAlwaysUninit) {}
-
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
@@ -45,7 +45,7 @@ struct UninitVariablesAnalysisStats {
};
void runUninitializedVariablesAnalysis(const DeclContext &dc, const CFG &cfg,
- AnalysisContext &ac,
+ AnalysisDeclContext &ac,
UninitVariablesHandler &handler,
UninitVariablesAnalysisStats &stats);
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
index 3d0e88a..6b6f8ef 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines AnalysisContext, a class that manages the analysis context
-// data for path sensitive analysis.
+// This file defines AnalysisDeclContext, a class that manages the analysis
+// context data for path sensitive analysis.
//
//===----------------------------------------------------------------------===//
@@ -38,17 +38,19 @@ class PseudoConstantAnalysis;
class ImplicitParamDecl;
class LocationContextManager;
class StackFrameContext;
-
+class AnalysisDeclContextManager;
+class LocationContext;
+
namespace idx { class TranslationUnit; }
/// The base class of a hierarchy of objects representing analyses tied
-/// to AnalysisContext.
+/// to AnalysisDeclContext.
class ManagedAnalysis {
protected:
ManagedAnalysis() {}
public:
virtual ~ManagedAnalysis();
-
+
// Subclasses need to implement:
//
// static const void *getTag();
@@ -56,47 +58,55 @@ public:
// Which returns a fixed pointer address to distinguish classes of
// analysis objects. They also need to implement:
//
- // static [Derived*] create(AnalysisContext &Ctx);
+ // static [Derived*] create(AnalysisDeclContext &Ctx);
//
- // which creates the analysis object given an AnalysisContext.
+ // which creates the analysis object given an AnalysisDeclContext.
};
-
-/// AnalysisContext contains the context data for the function or method under
-/// analysis.
-class AnalysisContext {
+
+
+/// AnalysisDeclContext contains the context data for the function or method
+/// under analysis.
+class AnalysisDeclContext {
+ /// Backpoint to the AnalysisManager object that created this
+ /// AnalysisDeclContext. This may be null.
+ AnalysisDeclContextManager *Manager;
+
const Decl *D;
// TranslationUnit is NULL if we don't have multiple translation units.
idx::TranslationUnit *TU;
- llvm::OwningPtr<CFG> cfg, completeCFG;
- llvm::OwningPtr<CFGStmtMap> cfgStmtMap;
+ OwningPtr<CFG> cfg, completeCFG;
+ OwningPtr<CFGStmtMap> cfgStmtMap;
CFG::BuildOptions cfgBuildOptions;
CFG::BuildOptions::ForcedBlkExprs *forcedBlkExprs;
-
+
bool builtCFG, builtCompleteCFG;
- llvm::OwningPtr<LiveVariables> liveness;
- llvm::OwningPtr<LiveVariables> relaxedLiveness;
- llvm::OwningPtr<ParentMap> PM;
- llvm::OwningPtr<PseudoConstantAnalysis> PCA;
- llvm::OwningPtr<CFGReverseBlockReachabilityAnalysis> CFA;
+ OwningPtr<LiveVariables> liveness;
+ OwningPtr<LiveVariables> relaxedLiveness;
+ OwningPtr<ParentMap> PM;
+ OwningPtr<PseudoConstantAnalysis> PCA;
+ OwningPtr<CFGReverseBlockReachabilityAnalysis> CFA;
llvm::BumpPtrAllocator A;
- // FIXME: remove.
llvm::DenseMap<const BlockDecl*,void*> *ReferencedBlockVars;
void *ManagedAnalyses;
public:
- AnalysisContext(const Decl *d, idx::TranslationUnit *tu);
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *D,
+ idx::TranslationUnit *TU);
- AnalysisContext(const Decl *d, idx::TranslationUnit *tu,
- const CFG::BuildOptions &buildOptions);
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *D,
+ idx::TranslationUnit *TU,
+ const CFG::BuildOptions &BuildOptions);
- ~AnalysisContext();
+ ~AnalysisDeclContext();
ASTContext &getASTContext() { return D->getASTContext(); }
const Decl *getDecl() const { return D; }
@@ -111,12 +121,12 @@ public:
const CFG::BuildOptions &getCFGBuildOptions() const {
return cfgBuildOptions;
}
-
+
/// getAddEHEdges - Return true iff we are adding exceptional edges from
/// callExprs. If this is false, then try/catch statements and blocks
/// reachable from them can appear to be dead in the CFG, analysis passes must
/// cope with that.
- bool getAddEHEdges() const { return cfgBuildOptions.AddEHEdges; }
+ bool getAddEHEdges() const { return cfgBuildOptions.AddEHEdges; }
bool getUseUnoptimizedCFG() const {
return !cfgBuildOptions.PruneTriviallyFalseEdges;
}
@@ -125,18 +135,18 @@ public:
void registerForcedBlockExpression(const Stmt *stmt);
const CFGBlock *getBlockForRegisteredExpression(const Stmt *stmt);
-
+
Stmt *getBody() const;
CFG *getCFG();
-
+
CFGStmtMap *getCFGStmtMap();
CFGReverseBlockReachabilityAnalysis *getCFGReachablityAnalysis();
-
+
/// Return a version of the CFG without any edges pruned.
CFG *getUnoptimizedCFG();
- void dumpCFG();
+ void dumpCFG(bool ShowColors);
/// \brief Returns true if we have built a CFG for this analysis context.
/// Note that this doesn't correspond to whether or not a valid CFG exists, it
@@ -152,9 +162,14 @@ public:
getReferencedBlockVars(const BlockDecl *BD);
/// Return the ImplicitParamDecl* associated with 'self' if this
- /// AnalysisContext wraps an ObjCMethodDecl. Returns NULL otherwise.
+ /// AnalysisDeclContext wraps an ObjCMethodDecl. Returns NULL otherwise.
const ImplicitParamDecl *getSelfDecl() const;
-
+
+ const StackFrameContext *getStackFrame(LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx);
+
/// Return the specified analysis object, lazily running the analysis if
/// necessary. Return NULL if the analysis could not run.
template <typename T>
@@ -168,31 +183,8 @@ public:
}
private:
ManagedAnalysis *&getAnalysisImpl(const void* tag);
-};
-
-class AnalysisContextManager {
- typedef llvm::DenseMap<const Decl*, AnalysisContext*> ContextMap;
- ContextMap Contexts;
- CFG::BuildOptions cfgBuildOptions;
-public:
- AnalysisContextManager(bool useUnoptimizedCFG = false,
- bool addImplicitDtors = false,
- bool addInitializers = false);
-
- ~AnalysisContextManager();
- AnalysisContext *getContext(const Decl *D, idx::TranslationUnit *TU = 0);
-
- bool getUseUnoptimizedCFG() const {
- return !cfgBuildOptions.PruneTriviallyFalseEdges;
- }
-
- CFG::BuildOptions &getCFGBuildOptions() {
- return cfgBuildOptions;
- }
-
- /// Discard all previously created AnalysisContexts.
- void clear();
+ LocationContextManager &getLocationContextManager();
};
class LocationContext : public llvm::FoldingSetNode {
@@ -202,13 +194,14 @@ public:
private:
ContextKind Kind;
- // AnalysisContext can't be const since some methods may modify its member.
- AnalysisContext *Ctx;
+ // AnalysisDeclContext can't be const since some methods may modify its
+ // member.
+ AnalysisDeclContext *Ctx;
const LocationContext *Parent;
protected:
- LocationContext(ContextKind k, AnalysisContext *ctx,
+ LocationContext(ContextKind k, AnalysisDeclContext *ctx,
const LocationContext *parent)
: Kind(k), Ctx(ctx), Parent(parent) {}
@@ -217,27 +210,27 @@ public:
ContextKind getKind() const { return Kind; }
- AnalysisContext *getAnalysisContext() const { return Ctx; }
+ AnalysisDeclContext *getAnalysisDeclContext() const { return Ctx; }
- idx::TranslationUnit *getTranslationUnit() const {
- return Ctx->getTranslationUnit();
+ idx::TranslationUnit *getTranslationUnit() const {
+ return Ctx->getTranslationUnit();
}
const LocationContext *getParent() const { return Parent; }
bool isParentOf(const LocationContext *LC) const;
- const Decl *getDecl() const { return getAnalysisContext()->getDecl(); }
+ const Decl *getDecl() const { return getAnalysisDeclContext()->getDecl(); }
- CFG *getCFG() const { return getAnalysisContext()->getCFG(); }
+ CFG *getCFG() const { return getAnalysisDeclContext()->getCFG(); }
template <typename T>
T *getAnalysis() const {
- return getAnalysisContext()->getAnalysis<T>();
+ return getAnalysisDeclContext()->getAnalysis<T>();
}
ParentMap &getParentMap() const {
- return getAnalysisContext()->getParentMap();
+ return getAnalysisDeclContext()->getParentMap();
}
const ImplicitParamDecl *getSelfDecl() const {
@@ -255,7 +248,7 @@ public:
public:
static void ProfileCommon(llvm::FoldingSetNodeID &ID,
ContextKind ck,
- AnalysisContext *ctx,
+ AnalysisDeclContext *ctx,
const LocationContext *parent,
const void *data);
};
@@ -271,8 +264,8 @@ class StackFrameContext : public LocationContext {
unsigned Index;
friend class LocationContextManager;
- StackFrameContext(AnalysisContext *ctx, const LocationContext *parent,
- const Stmt *s, const CFGBlock *blk,
+ StackFrameContext(AnalysisDeclContext *ctx, const LocationContext *parent,
+ const Stmt *s, const CFGBlock *blk,
unsigned idx)
: LocationContext(StackFrame, ctx, parent), CallSite(s),
Block(blk), Index(idx) {}
@@ -288,7 +281,7 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisContext *ctx,
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
const LocationContext *parent, const Stmt *s,
const CFGBlock *blk, unsigned idx) {
ProfileCommon(ID, StackFrame, ctx, parent, s);
@@ -305,7 +298,7 @@ class ScopeContext : public LocationContext {
const Stmt *Enter;
friend class LocationContextManager;
- ScopeContext(AnalysisContext *ctx, const LocationContext *parent,
+ ScopeContext(AnalysisDeclContext *ctx, const LocationContext *parent,
const Stmt *s)
: LocationContext(Scope, ctx, parent), Enter(s) {}
@@ -314,7 +307,7 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisContext *ctx,
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
const LocationContext *parent, const Stmt *s) {
ProfileCommon(ID, Scope, ctx, parent, s);
}
@@ -331,7 +324,8 @@ class BlockInvocationContext : public LocationContext {
friend class LocationContextManager;
- BlockInvocationContext(AnalysisContext *ctx, const LocationContext *parent,
+ BlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
const BlockDecl *bd)
: LocationContext(Block, ctx, parent), BD(bd) {}
@@ -342,7 +336,7 @@ public:
void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID, AnalysisContext *ctx,
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
const LocationContext *parent, const BlockDecl *bd) {
ProfileCommon(ID, Block, ctx, parent, bd);
}
@@ -357,12 +351,12 @@ class LocationContextManager {
public:
~LocationContextManager();
- const StackFrameContext *getStackFrame(AnalysisContext *ctx,
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s,
const CFGBlock *blk, unsigned idx);
- const ScopeContext *getScope(AnalysisContext *ctx,
+ const ScopeContext *getScope(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s);
@@ -370,10 +364,69 @@ public:
void clear();
private:
template <typename LOC, typename DATA>
- const LOC *getLocationContext(AnalysisContext *ctx,
+ const LOC *getLocationContext(AnalysisDeclContext *ctx,
const LocationContext *parent,
const DATA *d);
};
+class AnalysisDeclContextManager {
+ typedef llvm::DenseMap<const Decl*, AnalysisDeclContext*> ContextMap;
+
+ ContextMap Contexts;
+ LocationContextManager LocContexts;
+ CFG::BuildOptions cfgBuildOptions;
+
+public:
+ AnalysisDeclContextManager(bool useUnoptimizedCFG = false,
+ bool addImplicitDtors = false,
+ bool addInitializers = false);
+
+ ~AnalysisDeclContextManager();
+
+ AnalysisDeclContext *getContext(const Decl *D, idx::TranslationUnit *TU = 0);
+
+ bool getUseUnoptimizedCFG() const {
+ return !cfgBuildOptions.PruneTriviallyFalseEdges;
+ }
+
+ CFG::BuildOptions &getCFGBuildOptions() {
+ return cfgBuildOptions;
+ }
+
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *Ctx,
+ LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx) {
+ return LocContexts.getStackFrame(Ctx, Parent, S, Blk, Idx);
+ }
+
+ // Get the top level stack frame.
+ const StackFrameContext *getStackFrame(Decl const *D,
+ idx::TranslationUnit *TU) {
+ return LocContexts.getStackFrame(getContext(D, TU), 0, 0, 0, 0);
+ }
+
+ // Get a stack frame with parent.
+ StackFrameContext const *getStackFrame(const Decl *D,
+ LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx) {
+ return LocContexts.getStackFrame(getContext(D), Parent, S, Blk, Idx);
+ }
+
+
+ /// Discard all previously created AnalysisDeclContexts.
+ void clear();
+
+private:
+ friend class AnalysisDeclContext;
+
+ LocationContextManager &getLocationContextManager() {
+ return LocContexts;
+ }
+};
+
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
index 16d31b4..d4e1f5f 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define ANALYSISSTART
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
index f191c80..27b22b8 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
@@ -67,22 +67,22 @@ protected:
CFGElement(Kind kind, const void *Ptr1, const void *Ptr2 = 0)
: Data1(const_cast<void*>(Ptr1), ((unsigned) kind) & 0x3),
- Data2(const_cast<void*>(Ptr2), (((unsigned) kind) >> 2) & 0x3) {}
+ Data2(const_cast<void*>(Ptr2), (((unsigned) kind) >> 2) & 0x3) {}
public:
CFGElement() {}
- Kind getKind() const {
+ Kind getKind() const {
unsigned x = Data2.getInt();
x <<= 2;
x |= Data1.getInt();
return (Kind) x;
}
-
+
bool isValid() const { return getKind() != Invalid; }
operator bool() const { return isValid(); }
-
+
template<class ElemTy> const ElemTy *getAs() const {
if (llvm::isa<ElemTy>(this))
return static_cast<const ElemTy*>(this);
@@ -96,7 +96,7 @@ class CFGStmt : public CFGElement {
public:
CFGStmt(Stmt *S) : CFGElement(Statement, S) {}
- const Stmt *getStmt() const {
+ const Stmt *getStmt() const {
return static_cast<const Stmt *>(Data1.getPointer());
}
@@ -125,9 +125,9 @@ public:
/// by compiler on various occasions.
class CFGImplicitDtor : public CFGElement {
protected:
- CFGImplicitDtor(Kind kind, const void *data1, const void *data2 = 0)
+ CFGImplicitDtor(Kind kind, const void *data1, const void *data2 = 0)
: CFGElement(kind, data1, data2) {
- assert(kind >= DTOR_BEGIN && kind <= DTOR_END);
+ assert(kind >= DTOR_BEGIN && kind <= DTOR_END);
}
public:
@@ -272,12 +272,12 @@ class CFGBlock {
ImplTy Impl;
public:
ElementList(BumpVectorContext &C) : Impl(C, 4) {}
-
+
typedef std::reverse_iterator<ImplTy::iterator> iterator;
typedef std::reverse_iterator<ImplTy::const_iterator> const_iterator;
typedef ImplTy::iterator reverse_iterator;
- typedef ImplTy::const_iterator const_reverse_iterator;
-
+ typedef ImplTy::const_iterator const_reverse_iterator;
+
void push_back(CFGElement e, BumpVectorContext &C) { Impl.push_back(e, C); }
reverse_iterator insert(reverse_iterator I, size_t Cnt, CFGElement E,
BumpVectorContext &C) {
@@ -286,7 +286,7 @@ class CFGBlock {
CFGElement front() const { return Impl.back(); }
CFGElement back() const { return Impl.front(); }
-
+
iterator begin() { return Impl.rbegin(); }
iterator end() { return Impl.rend(); }
const_iterator begin() const { return Impl.rbegin(); }
@@ -300,7 +300,7 @@ class CFGBlock {
assert(i < Impl.size());
return Impl[Impl.size() - 1 - i];
}
-
+
size_t size() const { return Impl.size(); }
bool empty() const { return Impl.empty(); }
};
@@ -344,10 +344,14 @@ class CFGBlock {
/// storage if the memory usage of CFGBlock becomes an issue.
unsigned HasNoReturnElement : 1;
+ /// Parent - The parent CFG that owns this CFGBlock.
+ CFG *Parent;
+
public:
- explicit CFGBlock(unsigned blockid, BumpVectorContext &C)
- : Elements(C), Label(NULL), Terminator(NULL), LoopTarget(NULL),
- BlockID(blockid), Preds(C, 1), Succs(C, 1), HasNoReturnElement(false) {}
+ explicit CFGBlock(unsigned blockid, BumpVectorContext &C, CFG *parent)
+ : Elements(C), Label(NULL), Terminator(NULL), LoopTarget(NULL),
+ BlockID(blockid), Preds(C, 1), Succs(C, 1), HasNoReturnElement(false),
+ Parent(parent) {}
~CFGBlock() {}
// Statement iterators
@@ -489,16 +493,19 @@ public:
unsigned getBlockID() const { return BlockID; }
- void dump(const CFG *cfg, const LangOptions &LO) const;
- void print(raw_ostream &OS, const CFG* cfg, const LangOptions &LO) const;
+ CFG *getParent() const { return Parent; }
+
+ void dump(const CFG *cfg, const LangOptions &LO, bool ShowColors = false) const;
+ void print(raw_ostream &OS, const CFG* cfg, const LangOptions &LO,
+ bool ShowColors) const;
void printTerminator(raw_ostream &OS, const LangOptions &LO) const;
-
+
void addSuccessor(CFGBlock *Block, BumpVectorContext &C) {
if (Block)
Block->Preds.push_back(this, C);
Succs.push_back(Block, C);
}
-
+
void appendStmt(Stmt *statement, BumpVectorContext &C) {
Elements.push_back(CFGStmt(statement), C);
}
@@ -515,7 +522,7 @@ public:
void appendMemberDtor(FieldDecl *FD, BumpVectorContext &C) {
Elements.push_back(CFGMemberDtor(FD), C);
}
-
+
void appendTemporaryDtor(CXXBindTemporaryExpr *E, BumpVectorContext &C) {
Elements.push_back(CFGTemporaryDtor(E), C);
}
@@ -554,22 +561,22 @@ public:
llvm::BitVector alwaysAddMask;
public:
typedef llvm::DenseMap<const Stmt *, const CFGBlock*> ForcedBlkExprs;
- ForcedBlkExprs **forcedBlkExprs;
+ ForcedBlkExprs **forcedBlkExprs;
bool PruneTriviallyFalseEdges;
bool AddEHEdges;
bool AddInitializers;
bool AddImplicitDtors;
-
+
bool alwaysAdd(const Stmt *stmt) const {
return alwaysAddMask[stmt->getStmtClass()];
}
-
+
BuildOptions &setAlwaysAdd(Stmt::StmtClass stmtClass, bool val = true) {
alwaysAddMask[stmtClass] = val;
return *this;
}
-
+
BuildOptions &setAllAlwaysAdd() {
alwaysAddMask.set();
return *this;
@@ -583,6 +590,55 @@ public:
,AddImplicitDtors(false) {}
};
+ /// \brief Provides a custom implementation of the iterator class to have the
+ /// same interface as Function::iterator - iterator returns CFGBlock
+ /// (not a pointer to CFGBlock).
+ class graph_iterator {
+ public:
+ typedef const CFGBlock value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef BumpVector<CFGBlock*>::iterator ImplTy;
+
+ graph_iterator(const ImplTy &i) : I(i) {}
+
+ bool operator==(const graph_iterator &X) const { return I == X.I; }
+ bool operator!=(const graph_iterator &X) const { return I != X.I; }
+
+ reference operator*() const { return **I; }
+ pointer operator->() const { return *I; }
+ operator CFGBlock* () { return *I; }
+
+ graph_iterator &operator++() { ++I; return *this; }
+ graph_iterator &operator--() { --I; return *this; }
+
+ private:
+ ImplTy I;
+ };
+
+ class const_graph_iterator {
+ public:
+ typedef const CFGBlock value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef BumpVector<CFGBlock*>::const_iterator ImplTy;
+
+ const_graph_iterator(const ImplTy &i) : I(i) {}
+
+ bool operator==(const const_graph_iterator &X) const { return I == X.I; }
+ bool operator!=(const const_graph_iterator &X) const { return I != X.I; }
+
+ reference operator*() const { return **I; }
+ pointer operator->() const { return *I; }
+ operator CFGBlock* () const { return *I; }
+
+ const_graph_iterator &operator++() { ++I; return *this; }
+ const_graph_iterator &operator--() { --I; return *this; }
+
+ private:
+ ImplTy I;
+ };
+
/// buildCFG - Builds a CFG from an AST. The responsibility to free the
/// constructed CFG belongs to the caller.
static CFG* buildCFG(const Decl *D, Stmt *AST, ASTContext *C,
@@ -605,7 +661,7 @@ public:
// Block Iterators
//===--------------------------------------------------------------------===//
- typedef BumpVector<CFGBlock*> CFGBlockListTy;
+ typedef BumpVector<CFGBlock*> CFGBlockListTy;
typedef CFGBlockListTy::iterator iterator;
typedef CFGBlockListTy::const_iterator const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
@@ -619,6 +675,15 @@ public:
const_iterator begin() const { return Blocks.begin(); }
const_iterator end() const { return Blocks.end(); }
+ graph_iterator nodes_begin() { return graph_iterator(Blocks.begin()); }
+ graph_iterator nodes_end() { return graph_iterator(Blocks.end()); }
+ const_graph_iterator nodes_begin() const {
+ return const_graph_iterator(Blocks.begin());
+ }
+ const_graph_iterator nodes_end() const {
+ return const_graph_iterator(Blocks.end());
+ }
+
reverse_iterator rbegin() { return Blocks.rbegin(); }
reverse_iterator rend() { return Blocks.rend(); }
const_reverse_iterator rbegin() const { return Blocks.rbegin(); }
@@ -631,7 +696,7 @@ public:
CFGBlock * getIndirectGotoBlock() { return IndirectGotoBlock; }
const CFGBlock * getIndirectGotoBlock() const { return IndirectGotoBlock; }
-
+
typedef std::vector<const CFGBlock*>::const_iterator try_block_iterator;
try_block_iterator try_blocks_begin() const {
return TryDispatchBlocks.begin();
@@ -639,7 +704,7 @@ public:
try_block_iterator try_blocks_end() const {
return TryDispatchBlocks.end();
}
-
+
void addTryDispatchBlock(const CFGBlock *block) {
TryDispatchBlocks.push_back(block);
}
@@ -681,13 +746,18 @@ public:
/// start at 0).
unsigned getNumBlockIDs() const { return NumBlockIDs; }
+ /// size - Return the total number of CFGBlocks within the CFG
+ /// This is simply a renaming of the getNumBlockIDs(). This is necessary
+ /// because the dominator implementation needs such an interface.
+ unsigned size() const { return NumBlockIDs; }
+
//===--------------------------------------------------------------------===//
// CFG Debugging: Pretty-Printing and Visualization.
//===--------------------------------------------------------------------===//
void viewCFG(const LangOptions &LO) const;
- void print(raw_ostream &OS, const LangOptions &LO) const;
- void dump(const LangOptions &LO) const;
+ void print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const;
+ void dump(const LangOptions &LO, bool ShowColors) const;
//===--------------------------------------------------------------------===//
// Internal: constructors and data.
@@ -701,7 +771,7 @@ public:
llvm::BumpPtrAllocator& getAllocator() {
return BlkBVC.getAllocator();
}
-
+
BumpVectorContext &getBumpVectorContext() {
return BlkBVC;
}
@@ -717,11 +787,11 @@ private:
// It represents a map from Expr* to integers to record the set of
// block-level expressions and their "statement number" in the CFG.
void * BlkExprMap;
-
+
BumpVectorContext BlkBVC;
-
+
CFGBlockListTy Blocks;
-
+
/// C++ 'try' statements are modeled with an indirect dispatch block.
/// This is the collection of such blocks present in the CFG.
std::vector<const CFGBlock *> TryDispatchBlocks;
@@ -781,6 +851,20 @@ template <> struct GraphTraits< const ::clang::CFGBlock *> {
{ return N->succ_end(); }
};
+template <> struct GraphTraits<Inverse< ::clang::CFGBlock*> > {
+ typedef ::clang::CFGBlock NodeType;
+ typedef ::clang::CFGBlock::const_pred_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(Inverse< ::clang::CFGBlock*> G)
+ { return G.Graph; }
+
+ static inline ChildIteratorType child_begin(NodeType* N)
+ { return N->pred_begin(); }
+
+ static inline ChildIteratorType child_end(NodeType* N)
+ { return N->pred_end(); }
+};
+
template <> struct GraphTraits<Inverse<const ::clang::CFGBlock*> > {
typedef const ::clang::CFGBlock NodeType;
typedef ::clang::CFGBlock::const_pred_iterator ChildIteratorType;
@@ -800,37 +884,55 @@ template <> struct GraphTraits<Inverse<const ::clang::CFGBlock*> > {
template <> struct GraphTraits< ::clang::CFG* >
: public GraphTraits< ::clang::CFGBlock *> {
- typedef ::clang::CFG::iterator nodes_iterator;
+ typedef ::clang::CFG::graph_iterator nodes_iterator;
- static NodeType *getEntryNode(::clang::CFG* F) { return &F->getEntry(); }
- static nodes_iterator nodes_begin(::clang::CFG* F) { return F->begin(); }
- static nodes_iterator nodes_end(::clang::CFG* F) { return F->end(); }
+ static NodeType *getEntryNode(::clang::CFG* F) { return &F->getEntry(); }
+ static nodes_iterator nodes_begin(::clang::CFG* F) { return F->nodes_begin();}
+ static nodes_iterator nodes_end(::clang::CFG* F) { return F->nodes_end(); }
+ static unsigned size(::clang::CFG* F) { return F->size(); }
};
template <> struct GraphTraits<const ::clang::CFG* >
: public GraphTraits<const ::clang::CFGBlock *> {
- typedef ::clang::CFG::const_iterator nodes_iterator;
+ typedef ::clang::CFG::const_graph_iterator nodes_iterator;
static NodeType *getEntryNode( const ::clang::CFG* F) {
return &F->getEntry();
}
static nodes_iterator nodes_begin( const ::clang::CFG* F) {
- return F->begin();
+ return F->nodes_begin();
}
static nodes_iterator nodes_end( const ::clang::CFG* F) {
- return F->end();
+ return F->nodes_end();
+ }
+ static unsigned size(const ::clang::CFG* F) {
+ return F->size();
}
};
+template <> struct GraphTraits<Inverse< ::clang::CFG*> >
+ : public GraphTraits<Inverse< ::clang::CFGBlock*> > {
+
+ typedef ::clang::CFG::graph_iterator nodes_iterator;
+
+ static NodeType *getEntryNode( ::clang::CFG* F) { return &F->getExit(); }
+ static nodes_iterator nodes_begin( ::clang::CFG* F) {return F->nodes_begin();}
+ static nodes_iterator nodes_end( ::clang::CFG* F) { return F->nodes_end(); }
+};
+
template <> struct GraphTraits<Inverse<const ::clang::CFG*> >
: public GraphTraits<Inverse<const ::clang::CFGBlock*> > {
- typedef ::clang::CFG::const_iterator nodes_iterator;
+ typedef ::clang::CFG::const_graph_iterator nodes_iterator;
static NodeType *getEntryNode(const ::clang::CFG* F) { return &F->getExit(); }
- static nodes_iterator nodes_begin(const ::clang::CFG* F) { return F->begin();}
- static nodes_iterator nodes_end(const ::clang::CFG* F) { return F->end(); }
+ static nodes_iterator nodes_begin(const ::clang::CFG* F) {
+ return F->nodes_begin();
+ }
+ static nodes_iterator nodes_end(const ::clang::CFG* F) {
+ return F->nodes_end();
+ }
};
} // end llvm namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
new file mode 100644
index 0000000..9b68073
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
@@ -0,0 +1,257 @@
+//== CallGraph.h - AST-based Call graph ------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the AST-based CallGraph.
+//
+// A call graph for functions whose definitions/bodies are available in the
+// current translation unit. The graph has a "virtual" root node that contains
+// edges to all externally available functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_CALLGRAPH
+#define LLVM_CLANG_ANALYSIS_CALLGRAPH
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace clang {
+class CallGraphNode;
+
+/// \class The AST-based call graph.
+///
+/// The call graph extends itself with the given declarations by implementing
+/// the recursive AST visitor, which constructs the graph by visiting the given
+/// declarations.
+class CallGraph : public RecursiveASTVisitor<CallGraph> {
+ friend class CallGraphNode;
+
+ typedef llvm::DenseMap<const Decl *, CallGraphNode *> FunctionMapTy;
+
+ /// FunctionMap owns all CallGraphNodes.
+ FunctionMapTy FunctionMap;
+
+ /// This is a virtual root node that has edges to all the global functions -
+ /// 'main' or functions accessible from other translation units.
+ CallGraphNode *Root;
+
+ /// The list of nodes that have no parent. These are unreachable from Root.
+ /// Declarations can get to this list due to impressions in the graph, for
+ /// example, we do not track functions whose addresses were taken.
+ llvm::SetVector<CallGraphNode *> ParentlessNodes;
+
+public:
+ CallGraph();
+ ~CallGraph();
+
+ /// \brief Populate the call graph with the functions in the given
+ /// declaration.
+ ///
+ /// Recursively walks the declaration to find all the dependent Decls as well.
+ void addToCallGraph(Decl *D) {
+ TraverseDecl(D);
+ }
+
+ /// \brief Determine if a declaration should be included in the graph.
+ static bool includeInGraph(const Decl *D);
+
+ /// \brief Lookup the node for the given declaration.
+ CallGraphNode *getNode(const Decl *) const;
+
+ /// \brief Lookup the node for the given declaration. If none found, insert
+ /// one into the graph.
+ CallGraphNode *getOrInsertNode(Decl *);
+
+ /// Iterators through all the elements in the graph. Note, this gives
+ /// non-deterministic order.
+ typedef FunctionMapTy::iterator iterator;
+ typedef FunctionMapTy::const_iterator const_iterator;
+ iterator begin() { return FunctionMap.begin(); }
+ iterator end() { return FunctionMap.end(); }
+ const_iterator begin() const { return FunctionMap.begin(); }
+ const_iterator end() const { return FunctionMap.end(); }
+
+ /// \brief Get the number of nodes in the graph.
+ unsigned size() const { return FunctionMap.size(); }
+
+ /// \ brief Get the virtual root of the graph, all the functions available
+ /// externally are represented as callees of the node.
+ CallGraphNode *getRoot() const { return Root; }
+
+ /// Iterators through all the nodes of the graph that have no parent. These
+ /// are the unreachable nodes, which are either unused or are due to us
+ /// failing to add a call edge due to the analysis imprecision.
+ typedef llvm::SetVector<CallGraphNode *>::iterator nodes_iterator;
+ typedef llvm::SetVector<CallGraphNode *>::const_iterator const_nodes_iterator;
+ nodes_iterator parentless_begin() { return ParentlessNodes.begin(); }
+ nodes_iterator parentless_end() { return ParentlessNodes.end(); }
+ const_nodes_iterator
+ parentless_begin() const { return ParentlessNodes.begin(); }
+ const_nodes_iterator
+ parentless_end() const { return ParentlessNodes.end(); }
+
+ void print(raw_ostream &os) const;
+ void dump() const;
+ void viewGraph() const;
+
+ /// Part of recursive declaration visitation.
+ bool VisitFunctionDecl(FunctionDecl *FD) {
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (includeInGraph(FD))
+ // If this function has external linkage, anything could call it.
+ // Note, we are not precise here. For example, the function could have
+ // its address taken.
+ addNodeForDecl(FD, FD->isGlobal());
+ return true;
+ }
+
+ /// Part of recursive declaration visitation.
+ bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ if (includeInGraph(MD))
+ addNodeForDecl(MD, true);
+ return true;
+ }
+
+private:
+ /// \brief Add the given declaration to the call graph.
+ void addNodeForDecl(Decl *D, bool IsGlobal);
+
+ /// \brief Allocate a new node in the graph.
+ CallGraphNode *allocateNewNode(Decl *);
+};
+
+class CallGraphNode {
+public:
+ typedef CallGraphNode* CallRecord;
+
+private:
+ /// \brief The function/method declaration.
+ Decl *FD;
+
+ /// \brief The list of functions called from this node.
+ // Small vector might be more efficient since we are only tracking functions
+ // whose definition is in the current TU.
+ llvm::SmallVector<CallRecord, 5> CalledFunctions;
+
+public:
+ CallGraphNode(Decl *D) : FD(D) {}
+
+ typedef llvm::SmallVector<CallRecord, 5>::iterator iterator;
+ typedef llvm::SmallVector<CallRecord, 5>::const_iterator const_iterator;
+
+ /// Iterators through all the callees/children of the node.
+ inline iterator begin() { return CalledFunctions.begin(); }
+ inline iterator end() { return CalledFunctions.end(); }
+ inline const_iterator begin() const { return CalledFunctions.begin(); }
+ inline const_iterator end() const { return CalledFunctions.end(); }
+
+ inline bool empty() const {return CalledFunctions.empty(); }
+ inline unsigned size() const {return CalledFunctions.size(); }
+
+ void addCallee(CallGraphNode *N, CallGraph *CG) {
+ CalledFunctions.push_back(N);
+ CG->ParentlessNodes.remove(N);
+ }
+
+ Decl *getDecl() const { return FD; }
+
+ StringRef getName() const;
+
+ void print(raw_ostream &os) const;
+ void dump() const;
+};
+
+} // end clang namespace
+
+// Graph traits for iteration, viewing.
+namespace llvm {
+template <> struct GraphTraits<clang::CallGraphNode*> {
+ typedef clang::CallGraphNode NodeType;
+ typedef clang::CallGraphNode::CallRecord CallRecordTy;
+ typedef std::pointer_to_unary_function<CallRecordTy,
+ clang::CallGraphNode*> CGNDerefFun;
+ static NodeType *getEntryNode(clang::CallGraphNode *CGN) { return CGN; }
+ typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType;
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ }
+ static inline ChildIteratorType child_end (NodeType *N) {
+ return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ }
+ static clang::CallGraphNode *CGNDeref(CallRecordTy P) {
+ return P;
+ }
+};
+
+template <> struct GraphTraits<const clang::CallGraphNode*> {
+ typedef const clang::CallGraphNode NodeType;
+ typedef NodeType::const_iterator ChildIteratorType;
+ static NodeType *getEntryNode(const clang::CallGraphNode *CGN) { return CGN; }
+ static inline ChildIteratorType child_begin(NodeType *N) { return N->begin();}
+ static inline ChildIteratorType child_end (NodeType *N) { return N->end(); }
+};
+
+template <> struct GraphTraits<clang::CallGraph*>
+ : public GraphTraits<clang::CallGraphNode*> {
+
+ static NodeType *getEntryNode(clang::CallGraph *CGN) {
+ return CGN->getRoot(); // Start at the external node!
+ }
+ typedef std::pair<const clang::Decl*, clang::CallGraphNode*> PairTy;
+ typedef std::pointer_to_unary_function<PairTy, clang::CallGraphNode&> DerefFun;
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<clang::CallGraph::iterator, DerefFun> nodes_iterator;
+
+ static nodes_iterator nodes_begin(clang::CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end (clang::CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+ static clang::CallGraphNode &CGdereference(PairTy P) {
+ return *(P.second);
+ }
+
+ static unsigned size(clang::CallGraph *CG) {
+ return CG->size();
+ }
+};
+
+template <> struct GraphTraits<const clang::CallGraph*> :
+ public GraphTraits<const clang::CallGraphNode*> {
+ static NodeType *getEntryNode(const clang::CallGraph *CGN) {
+ return CGN->getRoot();
+ }
+ typedef std::pair<const clang::Decl*, clang::CallGraphNode*> PairTy;
+ typedef std::pointer_to_unary_function<PairTy, clang::CallGraphNode&> DerefFun;
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<clang::CallGraph::const_iterator,
+ DerefFun> nodes_iterator;
+
+ static nodes_iterator nodes_begin(const clang::CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end(const clang::CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+ static clang::CallGraphNode &CGdereference(PairTy P) {
+ return *(P.second);
+ }
+
+ static unsigned size(const clang::CallGraph *CG) {
+ return CG->size();
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
index fa8afcc..e6a2f13 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
@@ -14,25 +14,15 @@
#ifndef LLVM_CLANG_ANALYSIS_DS_COCOA
#define LLVM_CLANG_ANALYSIS_DS_COCOA
-#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
namespace clang {
class FunctionDecl;
-class ObjCMethodDecl;
class QualType;
namespace ento {
namespace cocoa {
-
- enum NamingConvention { NoConvention, CreateRule, InitRule };
-
- NamingConvention deriveNamingConvention(Selector S, const ObjCMethodDecl *MD);
-
- static inline bool followsFundamentalRule(Selector S,
- const ObjCMethodDecl *MD) {
- return deriveNamingConvention(S, MD) == CreateRule;
- }
bool isRefType(QualType RetTy, StringRef Prefix,
StringRef Name = StringRef());
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
index 7ec4ecd..b2200c6 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
@@ -19,6 +19,7 @@
#include "clang/Analysis/CFG.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Casting.h"
#include "llvm/ADT/StringRef.h"
@@ -28,7 +29,7 @@
namespace clang {
-class AnalysisContext;
+class AnalysisDeclContext;
class FunctionDecl;
class LocationContext;
class ProgramPointTag;
@@ -51,44 +52,72 @@ public:
CallEnterKind,
CallExitKind,
MinPostStmtKind = PostStmtKind,
- MaxPostStmtKind = CallExitKind };
+ MaxPostStmtKind = CallExitKind,
+ EpsilonKind};
private:
- std::pair<const void *, const void *> Data;
- Kind K;
+ llvm::PointerIntPair<const void *, 2, unsigned> Data1;
+ llvm::PointerIntPair<const void *, 2, unsigned> Data2;
// The LocationContext could be NULL to allow ProgramPoint to be used in
// context insensitive analysis.
- const LocationContext *L;
+ llvm::PointerIntPair<const LocationContext *, 2, unsigned> L;
+
const ProgramPointTag *Tag;
ProgramPoint();
protected:
- ProgramPoint(const void *P, Kind k, const LocationContext *l,
+ ProgramPoint(const void *P,
+ Kind k,
+ const LocationContext *l,
const ProgramPointTag *tag = 0)
- : Data(P, static_cast<const void*>(NULL)), K(k), L(l), Tag(tag) {}
-
- ProgramPoint(const void *P1, const void *P2, Kind k, const LocationContext *l,
+ : Data1(P, ((unsigned) k) & 0x3),
+ Data2(0, (((unsigned) k) >> 2) & 0x3),
+ L(l, (((unsigned) k) >> 4) & 0x3),
+ Tag(tag) {
+ assert(getKind() == k);
+ assert(getLocationContext() == l);
+ assert(getData1() == P);
+ }
+
+ ProgramPoint(const void *P1,
+ const void *P2,
+ Kind k,
+ const LocationContext *l,
const ProgramPointTag *tag = 0)
- : Data(P1, P2), K(k), L(l), Tag(tag) {}
+ : Data1(P1, ((unsigned) k) & 0x3),
+ Data2(P2, (((unsigned) k) >> 2) & 0x3),
+ L(l, (((unsigned) k) >> 4) & 0x3),
+ Tag(tag) {}
protected:
- const void *getData1() const { return Data.first; }
- const void *getData2() const { return Data.second; }
+ const void *getData1() const { return Data1.getPointer(); }
+ const void *getData2() const { return Data2.getPointer(); }
+ void setData2(const void *d) { Data2.setPointer(d); }
public:
/// Create a new ProgramPoint object that is the same as the original
/// except for using the specified tag value.
ProgramPoint withTag(const ProgramPointTag *tag) const {
- return ProgramPoint(Data.first, Data.second, K, L, tag);
+ return ProgramPoint(getData1(), getData2(), getKind(),
+ getLocationContext(), tag);
}
- Kind getKind() const { return K; }
+ Kind getKind() const {
+ unsigned x = L.getInt();
+ x <<= 2;
+ x |= Data2.getInt();
+ x <<= 2;
+ x |= Data1.getInt();
+ return (Kind) x;
+ }
const ProgramPointTag *getTag() const { return Tag; }
- const LocationContext *getLocationContext() const { return L; }
+ const LocationContext *getLocationContext() const {
+ return L.getPointer();
+ }
// For use with DenseMap. This hash is probably slow.
unsigned getHashValue() const {
@@ -100,25 +129,30 @@ public:
static bool classof(const ProgramPoint*) { return true; }
bool operator==(const ProgramPoint & RHS) const {
- return K == RHS.K && Data == RHS.Data && L == RHS.L && Tag == RHS.Tag;
+ return Data1 == Data1 &&
+ Data2 == RHS.Data2 &&
+ L == RHS.L &&
+ Tag == RHS.Tag;
}
bool operator!=(const ProgramPoint &RHS) const {
- return K != RHS.K || Data != RHS.Data || L != RHS.L || Tag != RHS.Tag;
+ return Data1 != RHS.Data1 ||
+ Data2 != RHS.Data2 ||
+ L != RHS.L ||
+ Tag != RHS.Tag;
}
void Profile(llvm::FoldingSetNodeID& ID) const {
- ID.AddInteger((unsigned) K);
- ID.AddPointer(Data.first);
- ID.AddPointer(Data.second);
- ID.AddPointer(L);
+ ID.AddInteger((unsigned) getKind());
+ ID.AddPointer(getData1());
+ ID.AddPointer(getData2());
+ ID.AddPointer(getLocationContext());
ID.AddPointer(Tag);
}
static ProgramPoint getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
const LocationContext *LC,
const ProgramPointTag *tag);
-
};
class BlockEntrance : public ProgramPoint {
@@ -195,7 +229,7 @@ public:
class PostStmt : public StmtPoint {
protected:
PostStmt(const Stmt *S, const void *data, Kind k, const LocationContext *L,
- const ProgramPointTag *tag =0)
+ const ProgramPointTag *tag = 0)
: StmtPoint(S, data, k, L, tag) {}
public:
@@ -270,15 +304,29 @@ public:
}
};
+/// \class Represents a program point after a store evaluation.
class PostStore : public PostStmt {
public:
- PostStore(const Stmt *S, const LocationContext *L,
+ /// Construct the post store point.
+ /// \param Loc can be used to store the information about the location
+ /// used in the form it was uttered in the code.
+ PostStore(const Stmt *S, const LocationContext *L, const void *Loc,
const ProgramPointTag *tag = 0)
- : PostStmt(S, PostStoreKind, L, tag) {}
+ : PostStmt(S, PostStoreKind, L, tag) {
+ assert(getData2() == 0);
+ setData2(Loc);
+ }
static bool classof(const ProgramPoint* Location) {
return Location->getKind() == PostStoreKind;
}
+
+ /// \brief Returns the information about the location used in the store,
+ /// how it was uttered in the code.
+ const void *getLocationValue() const {
+ return getData2();
+ }
+
};
class PostLValue : public PostStmt {
@@ -365,6 +413,21 @@ public:
}
};
+/// This is a meta program point, which should be skipped by all the diagnostic
+/// reasoning etc.
+class EpsilonPoint : public ProgramPoint {
+public:
+ EpsilonPoint(const LocationContext *L, const void *Data1,
+ const void *Data2 = 0, const ProgramPointTag *tag = 0)
+ : ProgramPoint(Data1, Data2, EpsilonKind, L, tag) {}
+
+ const void *getData() const { return getData1(); }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == EpsilonKind;
+ }
+};
+
/// ProgramPoints can be "tagged" as representing points specific to a given
/// analysis entity. Tags are abstract annotations, with an associated
/// description and potentially other information.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
index 5c5ec2d..97eb287 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
@@ -66,6 +66,7 @@ public:
DISPATCH_CASE(Record) // FIXME: Refine. VisitStructDecl?
DISPATCH_CASE(CXXRecord)
DISPATCH_CASE(Enum)
+ DISPATCH_CASE(Field)
DISPATCH_CASE(UsingDirective)
DISPATCH_CASE(Using)
default:
@@ -82,8 +83,8 @@ public:
DEFAULT_DISPATCH(Typedef)
DEFAULT_DISPATCH(Record)
DEFAULT_DISPATCH(Enum)
+ DEFAULT_DISPATCH(Field)
DEFAULT_DISPATCH(ObjCInterface)
- DEFAULT_DISPATCH(ObjCClass)
DEFAULT_DISPATCH(ObjCMethod)
DEFAULT_DISPATCH(ObjCProtocol)
DEFAULT_DISPATCH(ObjCCategory)
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
new file mode 100644
index 0000000..7e77435
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
@@ -0,0 +1,39 @@
+//===--- AllDiagnostics.h - Aggregate Diagnostic headers --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file includes all the separate Diagnostic headers & some related
+// helpers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ALL_DIAGNOSTICS_H
+#define LLVM_CLANG_ALL_DIAGNOSTICS_H
+
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Analysis/AnalysisDiagnostic.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
+
+namespace clang {
+template <size_t SizeOfStr, typename FieldType>
+class StringSizerHelper {
+ char FIELD_TOO_SMALL[SizeOfStr <= FieldType(~0U) ? 1 : -1];
+public:
+ enum { Size = SizeOfStr };
+};
+} // end namespace clang
+
+#define STR_SIZE(str, fieldTy) clang::StringSizerHelper<sizeof(str)-1, \
+ fieldTy>::Size
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
index 2a4ba5c..e8e0f35 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -93,6 +93,10 @@ class Attr {
list<string> Namespaces = [];
// Set to true for attributes with arguments which require delayed parsing.
bit LateParsed = 0;
+ // Set to true for attributes which must be instantiated within templates
+ bit TemplateDependent = 0;
+ // Set to true for attributes which have handler in Sema.
+ bit SemaHandler = 1;
// Any additional text that should be included verbatim in the class.
code AdditionalMembers = [{}];
}
@@ -122,6 +126,7 @@ def Aligned : InheritableAttr {
def AlignMac68k : InheritableAttr {
let Spellings = [];
+ let SemaHandler = 0;
}
def AlwaysInline : InheritableAttr {
@@ -140,13 +145,14 @@ def Annotate : InheritableParamAttr {
def AsmLabel : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Label">];
+ let SemaHandler = 0;
}
def Availability : InheritableAttr {
let Spellings = ["availability"];
let Args = [IdentifierArgument<"platform">, VersionArgument<"introduced">,
VersionArgument<"deprecated">, VersionArgument<"obsoleted">,
- BoolArgument<"unavailable">];
+ BoolArgument<"unavailable">, StringArgument<"message">];
let AdditionalMembers =
[{static llvm::StringRef getPrettyPlatformName(llvm::StringRef Platform) {
return llvm::StringSwitch<llvm::StringRef>(Platform)
@@ -274,6 +280,7 @@ def FastCall : InheritableAttr {
def Final : InheritableAttr {
let Spellings = [];
+ let SemaHandler = 0;
}
def MsStruct : InheritableAttr {
@@ -315,6 +322,7 @@ def Malloc : InheritableAttr {
def MaxFieldAlignment : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"Alignment">];
+ let SemaHandler = 0;
}
def MayAlias : InheritableAttr {
@@ -324,14 +332,17 @@ def MayAlias : InheritableAttr {
def MSP430Interrupt : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"Number">];
+ let SemaHandler = 0;
}
def MBlazeInterruptHandler : InheritableAttr {
let Spellings = [];
+ let SemaHandler = 0;
}
def MBlazeSaveVolatiles : InheritableAttr {
let Spellings = [];
+ let SemaHandler = 0;
}
def Naked : InheritableAttr {
@@ -441,12 +452,18 @@ def ObjCReturnsInnerPointer : Attr {
let Subjects = [ObjCMethod];
}
+def ObjCRootClass : Attr {
+ let Spellings = ["objc_root_class"];
+ let Subjects = [ObjCInterface];
+}
+
def Overloadable : Attr {
let Spellings = ["overloadable"];
}
def Override : InheritableAttr {
let Spellings = [];
+ let SemaHandler = 0;
}
def Ownership : InheritableAttr {
@@ -522,6 +539,12 @@ def Unavailable : InheritableAttr {
def ArcWeakrefUnavailable : InheritableAttr {
let Spellings = ["objc_arc_weak_reference_unavailable"];
+ let Subjects = [ObjCInterface];
+}
+
+def ObjCRequiresPropertyDefs : InheritableAttr {
+ let Spellings = ["objc_requires_property_definitions"];
+ let Subjects = [ObjCInterface];
}
def Unused : InheritableAttr {
@@ -570,6 +593,10 @@ def X86ForceAlignArgPointer : InheritableAttr {
let Spellings = [];
}
+// AddressSafety attribute (e.g. for AddressSanitizer)
+def NoAddressSafetyAnalysis : InheritableAttr {
+ let Spellings = ["no_address_safety_analysis"];
+}
// C/C++ Thread safety attributes (e.g. for deadlock, data race checking)
@@ -597,36 +624,42 @@ def GuardedBy : InheritableAttr {
let Spellings = ["guarded_by"];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def PtGuardedBy : InheritableAttr {
let Spellings = ["pt_guarded_by"];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def AcquiredAfter : InheritableAttr {
let Spellings = ["acquired_after"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def AcquiredBefore : InheritableAttr {
let Spellings = ["acquired_before"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def ExclusiveLockFunction : InheritableAttr {
let Spellings = ["exclusive_lock_function"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def SharedLockFunction : InheritableAttr {
let Spellings = ["shared_lock_function"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
// The first argument is an integer or boolean value specifying the return value
@@ -635,6 +668,7 @@ def ExclusiveTrylockFunction : InheritableAttr {
let Spellings = ["exclusive_trylock_function"];
let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
// The first argument is an integer or boolean value specifying the return value
@@ -643,34 +677,40 @@ def SharedTrylockFunction : InheritableAttr {
let Spellings = ["shared_trylock_function"];
let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def UnlockFunction : InheritableAttr {
let Spellings = ["unlock_function"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def LockReturned : InheritableAttr {
let Spellings = ["lock_returned"];
let Args = [ExprArgument<"Arg">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def LocksExcluded : InheritableAttr {
let Spellings = ["locks_excluded"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def ExclusiveLocksRequired : InheritableAttr {
let Spellings = ["exclusive_locks_required"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
def SharedLocksRequired : InheritableAttr {
let Spellings = ["shared_locks_required"];
let Args = [VariadicExprArgument<"Args">];
let LateParsed = 1;
+ let TemplateDependent = 1;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
index da5aadf..d1af218 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -108,6 +108,8 @@ BUILTIN(__builtin_huge_vall, "Ld", "nc")
BUILTIN(__builtin_inf , "d" , "nc")
BUILTIN(__builtin_inff , "f" , "nc")
BUILTIN(__builtin_infl , "Ld" , "nc")
+BUILTIN(__builtin_labs , "LiLi" , "Fnc")
+BUILTIN(__builtin_llabs, "LLiLLi", "Fnc")
BUILTIN(__builtin_ldexp , "ddi" , "Fnc")
BUILTIN(__builtin_ldexpf, "ffi" , "Fnc")
BUILTIN(__builtin_ldexpl, "LdLdi", "Fnc")
@@ -363,10 +365,12 @@ BUILTIN(__builtin_signbitf, "if", "nc")
BUILTIN(__builtin_signbitl, "iLd", "nc")
// Builtins for arithmetic.
+BUILTIN(__builtin_clzs , "iUs" , "nc")
BUILTIN(__builtin_clz , "iUi" , "nc")
BUILTIN(__builtin_clzl , "iULi" , "nc")
BUILTIN(__builtin_clzll, "iULLi", "nc")
// TODO: int clzimax(uintmax_t)
+BUILTIN(__builtin_ctzs , "iUs" , "nc")
BUILTIN(__builtin_ctz , "iUi" , "nc")
BUILTIN(__builtin_ctzl , "iULi" , "nc")
BUILTIN(__builtin_ctzll, "iULLi", "nc")
@@ -448,12 +452,15 @@ BUILTIN(__builtin_extend_pointer, "ULLiv*", "n") // _Unwind_Word == uint64_t
// GCC Object size checking builtins
BUILTIN(__builtin_object_size, "zvC*i", "n")
BUILTIN(__builtin___memcpy_chk, "v*v*vC*zz", "nF")
+BUILTIN(__builtin___memccpy_chk, "v*v*vC*iz", "nF")
BUILTIN(__builtin___memmove_chk, "v*v*vC*zz", "nF")
BUILTIN(__builtin___mempcpy_chk, "v*v*vC*zz", "nF")
BUILTIN(__builtin___memset_chk, "v*v*izz", "nF")
BUILTIN(__builtin___stpcpy_chk, "c*c*cC*z", "nF")
BUILTIN(__builtin___strcat_chk, "c*c*cC*z", "nF")
BUILTIN(__builtin___strcpy_chk, "c*c*cC*z", "nF")
+BUILTIN(__builtin___strlcat_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___strlcpy_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___strncat_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___strncpy_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___stpncpy_chk, "c*c*cC*zz", "nF")
@@ -587,18 +594,57 @@ BUILTIN(__sync_swap_4, "iiD*i.", "tn")
BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "tn")
BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "tn")
-BUILTIN(__atomic_load, "v.", "t")
-BUILTIN(__atomic_store, "v.", "t")
-BUILTIN(__atomic_exchange, "v.", "t")
-BUILTIN(__atomic_compare_exchange_strong, "v.", "t")
-BUILTIN(__atomic_compare_exchange_weak, "v.", "t")
-BUILTIN(__atomic_fetch_add, "v.", "t")
-BUILTIN(__atomic_fetch_sub, "v.", "t")
-BUILTIN(__atomic_fetch_and, "v.", "t")
-BUILTIN(__atomic_fetch_or, "v.", "t")
-BUILTIN(__atomic_fetch_xor, "v.", "t")
+// Some of our atomics builtins are handled by AtomicExpr rather than
+// as normal builtin CallExprs. This macro is used for such builtins.
+#ifndef ATOMIC_BUILTIN
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+// C11 _Atomic operations for <stdatomic.h>.
+ATOMIC_BUILTIN(__c11_atomic_init, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+BUILTIN(__c11_atomic_thread_fence, "vi", "n")
+BUILTIN(__c11_atomic_signal_fence, "vi", "n")
+BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
+
+// GNU atomic builtins.
+ATOMIC_BUILTIN(__atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__atomic_load_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_nand, "v.", "t")
+ATOMIC_BUILTIN(__atomic_add_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_nand_fetch, "v.", "t")
+BUILTIN(__atomic_test_and_set, "bvD*i", "n")
+BUILTIN(__atomic_clear, "vvD*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
+BUILTIN(__atomic_always_lock_free, "izvCD*", "n")
+BUILTIN(__atomic_is_lock_free, "izvCD*", "n")
+
+#undef ATOMIC_BUILTIN
// Non-overloaded atomic builtins.
BUILTIN(__sync_synchronize, "v.", "n")
@@ -629,9 +675,12 @@ LIBBUILTIN(malloc, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
LIBBUILTIN(realloc, "v*v*z", "f", "stdlib.h", ALL_LANGUAGES)
// C99 string.h
LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memcmp, "ivC*vC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(memmove, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(strcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(strncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcmp, "icC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncmp, "icC*cC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(strcat, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(strncat, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(strxfrm, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
@@ -655,7 +704,12 @@ LIBBUILTIN(vprintf, "icC*a", "fP:0:", "stdio.h", ALL_LANGUAGES)
LIBBUILTIN(vfprintf, "i.", "fP:1:", "stdio.h", ALL_LANGUAGES)
LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", "stdio.h", ALL_LANGUAGES)
LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(scanf, "icC*.", "fs:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(scanf, "icC*R.", "fs:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(fscanf, "iP*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(sscanf, "icC*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vscanf, "icC*Ra", "fS:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vfscanf, "iP*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsscanf, "icC*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
// C99
LIBBUILTIN(longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
@@ -671,6 +725,8 @@ LIBBUILTIN(strndup, "c*cC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(index, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
LIBBUILTIN(bzero, "vv*z", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_LANGUAGES)
// POSIX unistd.h
LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_LANGUAGES)
LIBBUILTIN(vfork, "i", "fj", "unistd.h", ALL_LANGUAGES)
@@ -698,6 +754,8 @@ LIBBUILTIN(objc_msgSend, "GGH.", "f", "objc/message.h", OBJC_LANG)
// long double objc_msgSend_fpret(id self, SEL op, ...)
LIBBUILTIN(objc_msgSend_fpret, "LdGH.", "f", "objc/message.h", OBJC_LANG)
+// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+LIBBUILTIN(objc_msgSend_fp2ret, "XLdGH.", "f", "objc/message.h", OBJC_LANG)
// id objc_msgSend_stret (id, SEL, ...)
LIBBUILTIN(objc_msgSend_stret, "GGH.", "f", "objc/message.h", OBJC_LANG)
// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
@@ -712,34 +770,39 @@ LIBBUILTIN(objc_getMetaClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
LIBBUILTIN(objc_enumerationMutation, "vG", "f", "objc/runtime.h", OBJC_LANG)
// id objc_read_weak(id *location)
-LIBBUILTIN(objc_read_weak, "GG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_read_weak, "GG*", "f", "objc/objc-auto.h", OBJC_LANG)
// id objc_assign_weak(id value, id *location)
-LIBBUILTIN(objc_assign_weak, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_weak, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
// id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
-LIBBUILTIN(objc_assign_ivar, "GGGY", "f", "/objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_ivar, "GGGY", "f", "objc/objc-auto.h", OBJC_LANG)
// id objc_assign_global(id val, id *dest)
-LIBBUILTIN(objc_assign_global, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_global, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
// id objc_assign_strongCast(id val, id *dest
-LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", "/objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
// id objc_exception_extract(void *localExceptionData)
-LIBBUILTIN(objc_exception_extract, "Gv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_extract, "Gv*", "f", "objc/objc-exception.h", OBJC_LANG)
// void objc_exception_try_enter(void *localExceptionData)
-LIBBUILTIN(objc_exception_try_enter, "vv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_try_enter, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
// void objc_exception_try_exit(void *localExceptionData)
-LIBBUILTIN(objc_exception_try_exit, "vv*", "f", "/objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_try_exit, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
// int objc_exception_match(Class exceptionClass, id exception)
-LIBBUILTIN(objc_exception_match, "iGG", "f", "/objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_match, "iGG", "f", "objc/objc-exception.h", OBJC_LANG)
// void objc_exception_throw(id exception)
-LIBBUILTIN(objc_exception_throw, "vG", "f", "/objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_throw, "vG", "f", "objc/objc-exception.h", OBJC_LANG)
// int objc_sync_enter(id obj)
-LIBBUILTIN(objc_sync_enter, "iG", "f", "/objc/objc-sync.h", OBJC_LANG)
+LIBBUILTIN(objc_sync_enter, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
// int objc_sync_exit(id obj)
-LIBBUILTIN(objc_sync_exit, "iG", "f", "/objc/objc-sync.h", OBJC_LANG)
+LIBBUILTIN(objc_sync_exit, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
BUILTIN(__builtin_objc_memmove_collectable, "v*v*vC*z", "nF")
+// void NSLog(NSString *fmt, ...)
+LIBBUILTIN(NSLog, "vG.", "fp:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+// void NSLogv(NSString *fmt, va_list args)
+LIBBUILTIN(NSLogv, "vGa", "fP:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+
// Builtin math library functions
LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
new file mode 100644
index 0000000..334224f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -0,0 +1,689 @@
+//==--- BuiltinsHexagon.def - Hexagon Builtin function database --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the X86-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+BUILTIN(__builtin_HEXAGON_C2_cmpeq, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgt, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsset, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsclr, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgti, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgei, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmplt, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpltu, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsclri, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_and, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_or, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_xor, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_andn, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_not, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_orn, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_any8, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_all8, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "")
+BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxir, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxri, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbeq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_tfrpr, "ii", "")
+BUILTIN(__builtin_HEXAGON_C2_tfrrp, "bi", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_macsip, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_macsin, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyui, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_accii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_nacci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_naccii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_subacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_A2_add, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addi, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_max, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_A2_min, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrsi, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfril, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrih, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_and, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_or, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_not, "ii", "")
+BUILTIN(__builtin_HEXAGON_M2_xor_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_A2_subri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_andir, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_orir, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_insert, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_extractu, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_S2_extractup, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_S2_insert_rp, "iiiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_insertp_rp, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "bii", "")
+BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "bii", "")
+BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "")
+
+BUILTIN(__builtin_SI_to_SXTHI_asrh, "ii", "")
+
+BUILTIN(__builtin_M2_vrcmpys_s1, "LLiLLii", "")
+BUILTIN(__builtin_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
+BUILTIN(__builtin_M2_vrcmpys_s1rp, "iLLii", "")
+
+BUILTIN(__builtin_M2_vradduh, "iLLiLLi", "")
+BUILTIN(__builtin_A2_addsp, "LLiiLLi", "")
+BUILTIN(__builtin_A2_addpsat, "LLiLLiLLi", "")
+
+BUILTIN(__builtin_A2_maxp, "LLiLLiLLi", "")
+BUILTIN(__builtin_A2_maxup, "LLiLLiLLi", "")
+
+BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpneq, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpltei, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplte, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplteui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplteu, "bii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iii", "")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_andn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_and, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_orn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_or, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_andn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_and, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_orn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_or, "biii", "")
+BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "")
+
+BUILTIN(__builtin_HEXAGON_M4_and_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_xor, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_xor, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiii", "")
+
+BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "")
+
+BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_ri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "")
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
index 6bd9014..f44aed6 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -42,8 +42,6 @@ BUILTIN(__builtin_ia32_pfrcpit1, "V2fV2fV2f", "nc")
BUILTIN(__builtin_ia32_pfrcpit2, "V2fV2fV2f", "nc")
BUILTIN(__builtin_ia32_pfrsqrt, "V2fV2f", "nc")
BUILTIN(__builtin_ia32_pfrsqit1, "V2fV2fV2f", "nc")
-// GCC has pfrsqrtit1, even though this is not the name of the instruction.
-BUILTIN(__builtin_ia32_pfrsqrtit1, "V2fV2fV2f", "nc")
BUILTIN(__builtin_ia32_pfsub, "V2fV2fV2f", "nc")
BUILTIN(__builtin_ia32_pfsubr, "V2fV2fV2f", "nc")
BUILTIN(__builtin_ia32_pi2fd, "V2fV2i", "nc")
@@ -187,14 +185,14 @@ BUILTIN(__builtin_ia32_ucomisdle, "iV2dV2d", "")
BUILTIN(__builtin_ia32_ucomisdgt, "iV2dV2d", "")
BUILTIN(__builtin_ia32_ucomisdge, "iV2dV2d", "")
BUILTIN(__builtin_ia32_ucomisdneq, "iV2dV2d", "")
-BUILTIN(__builtin_ia32_cmpps, "V4fV4fV4fc", "")
-BUILTIN(__builtin_ia32_cmpss, "V4fV4fV4fc", "")
+BUILTIN(__builtin_ia32_cmpps, "V4fV4fV4fIc", "")
+BUILTIN(__builtin_ia32_cmpss, "V4fV4fV4fIc", "")
BUILTIN(__builtin_ia32_minps, "V4fV4fV4f", "")
BUILTIN(__builtin_ia32_maxps, "V4fV4fV4f", "")
BUILTIN(__builtin_ia32_minss, "V4fV4fV4f", "")
BUILTIN(__builtin_ia32_maxss, "V4fV4fV4f", "")
-BUILTIN(__builtin_ia32_cmppd, "V2dV2dV2dc", "")
-BUILTIN(__builtin_ia32_cmpsd, "V2dV2dV2dc", "")
+BUILTIN(__builtin_ia32_cmppd, "V2dV2dV2dIc", "")
+BUILTIN(__builtin_ia32_cmpsd, "V2dV2dV2dIc", "")
BUILTIN(__builtin_ia32_minpd, "V2dV2dV2d", "")
BUILTIN(__builtin_ia32_maxpd, "V2dV2dV2d", "")
BUILTIN(__builtin_ia32_minsd, "V2dV2dV2d", "")
@@ -210,19 +208,13 @@ BUILTIN(__builtin_ia32_psubusw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_pmulhw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_pavgb128, "V16cV16cV16c", "")
BUILTIN(__builtin_ia32_pavgw128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_pcmpeqb128, "V16cV16cV16c", "")
-BUILTIN(__builtin_ia32_pcmpeqw128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_pcmpeqd128, "V4iV4iV4i", "")
-BUILTIN(__builtin_ia32_pcmpgtb128, "V16cV16cV16c", "")
-BUILTIN(__builtin_ia32_pcmpgtw128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_pcmpgtd128, "V4iV4iV4i", "")
BUILTIN(__builtin_ia32_pmaxub128, "V16cV16cV16c", "")
BUILTIN(__builtin_ia32_pmaxsw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_pminub128, "V16cV16cV16c", "")
BUILTIN(__builtin_ia32_pminsw128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_packsswb128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_packssdw128, "V4iV4iV4i", "")
-BUILTIN(__builtin_ia32_packuswb128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_packsswb128, "V16cV8sV8s", "")
+BUILTIN(__builtin_ia32_packssdw128, "V8sV4iV4i", "")
+BUILTIN(__builtin_ia32_packuswb128, "V16cV8sV8s", "")
BUILTIN(__builtin_ia32_pmulhuw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_addsubps, "V4fV4fV4f", "")
BUILTIN(__builtin_ia32_addsubpd, "V2dV2dV2d", "")
@@ -236,7 +228,7 @@ BUILTIN(__builtin_ia32_phaddsw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_phsubw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_phsubd128, "V4iV4iV4i", "")
BUILTIN(__builtin_ia32_phsubsw128, "V8sV8sV8s", "")
-BUILTIN(__builtin_ia32_pmaddubsw128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pmaddubsw128, "V8sV16cV16c", "")
BUILTIN(__builtin_ia32_pmulhrsw128, "V8sV8sV8s", "")
BUILTIN(__builtin_ia32_pshufb128, "V16cV16cV16c", "")
BUILTIN(__builtin_ia32_psignb128, "V16cV16cV16c", "")
@@ -304,19 +296,19 @@ BUILTIN(__builtin_ia32_psrldi128, "V4iV4ii", "")
BUILTIN(__builtin_ia32_psrlqi128, "V2LLiV2LLii", "")
BUILTIN(__builtin_ia32_psrawi128, "V8sV8si", "")
BUILTIN(__builtin_ia32_psradi128, "V4iV4ii", "")
-BUILTIN(__builtin_ia32_pmaddwd128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pmaddwd128, "V4iV8sV8s", "")
BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
-BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cIc", "") // FIXME: Correct type?
+BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cIc", "")
BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
BUILTIN(__builtin_ia32_pblendvb128, "V16cV16cV16cV16c", "")
-BUILTIN(__builtin_ia32_pblendw128, "V8sV8sV8si", "")
-BUILTIN(__builtin_ia32_blendpd, "V2dV2dV2di", "")
-BUILTIN(__builtin_ia32_blendps, "V4fV4fV4fi", "")
+BUILTIN(__builtin_ia32_pblendw128, "V8sV8sV8sIi", "")
+BUILTIN(__builtin_ia32_blendpd, "V2dV2dV2dIi", "")
+BUILTIN(__builtin_ia32_blendps, "V4fV4fV4fIi", "")
BUILTIN(__builtin_ia32_blendvpd, "V2dV2dV2dV2d", "")
BUILTIN(__builtin_ia32_blendvps, "V4fV4fV4fV4f", "")
@@ -353,28 +345,26 @@ BUILTIN(__builtin_ia32_movntdqa, "V2LLiV2LLi*", "")
BUILTIN(__builtin_ia32_ptestz128, "iV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_ptestc128, "iV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_ptestnzc128, "iV2LLiV2LLi", "")
-BUILTIN(__builtin_ia32_pcmpeqq, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_mpsadbw128, "V16cV16cV16ci", "")
+BUILTIN(__builtin_ia32_phminposuw128, "V8sV8s", "")
// SSE 4.2
-BUILTIN(__builtin_ia32_pcmpistrm128, "V16cV16cV16cc", "")
-BUILTIN(__builtin_ia32_pcmpistri128, "iV16cV16cc", "")
-BUILTIN(__builtin_ia32_pcmpestrm128, "V16cV16ciV16cic", "")
-BUILTIN(__builtin_ia32_pcmpestri128, "iV16ciV16cic","")
+BUILTIN(__builtin_ia32_pcmpistrm128, "V16cV16cV16cIc", "")
+BUILTIN(__builtin_ia32_pcmpistri128, "iV16cV16cIc", "")
+BUILTIN(__builtin_ia32_pcmpestrm128, "V16cV16ciV16ciIc", "")
+BUILTIN(__builtin_ia32_pcmpestri128, "iV16ciV16ciIc","")
-BUILTIN(__builtin_ia32_pcmpistria128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpistric128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpistrio128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpistris128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpistriz128, "iV16ciV16cic","")
-
-BUILTIN(__builtin_ia32_pcmpestria128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpestric128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16cic","")
-BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16cic","")
-
-BUILTIN(__builtin_ia32_pcmpgtq, "V2LLiV2LLiV2LLi", "")
+// FIXME: These builtins are horribly broken; reenable when PR11305 is fixed.
+//BUILTIN(__builtin_ia32_pcmpistria128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistric128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistrio128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistris128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistriz128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpestria128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestric128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16ciic","")
+//BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16ciIc","")
BUILTIN(__builtin_ia32_crc32qi, "UiUiUc", "")
BUILTIN(__builtin_ia32_crc32hi, "UiUiUs", "")
@@ -387,7 +377,7 @@ BUILTIN(__builtin_ia32_aesenclast128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aesdec128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aesdeclast128, "V2LLiV2LLiV2LLi", "")
BUILTIN(__builtin_ia32_aesimc128, "V2LLiV2LLi", "")
-BUILTIN(__builtin_ia32_aeskeygenassist128, "V2LLiV2LLic", "")
+BUILTIN(__builtin_ia32_aeskeygenassist128, "V2LLiV2LLiIc", "")
// AVX
BUILTIN(__builtin_ia32_addsubpd256, "V4dV4dV4d", "")
@@ -404,16 +394,16 @@ BUILTIN(__builtin_ia32_vpermilvarpd, "V2dV2dV2LLi", "")
BUILTIN(__builtin_ia32_vpermilvarps, "V4fV4fV4i", "")
BUILTIN(__builtin_ia32_vpermilvarpd256, "V4dV4dV4LLi", "")
BUILTIN(__builtin_ia32_vpermilvarps256, "V8fV8fV8i", "")
-BUILTIN(__builtin_ia32_blendpd256, "V4dV4dV4di", "")
-BUILTIN(__builtin_ia32_blendps256, "V8fV8fV8fi", "")
+BUILTIN(__builtin_ia32_blendpd256, "V4dV4dV4dIi", "")
+BUILTIN(__builtin_ia32_blendps256, "V8fV8fV8fIi", "")
BUILTIN(__builtin_ia32_blendvpd256, "V4dV4dV4dV4d", "")
BUILTIN(__builtin_ia32_blendvps256, "V8fV8fV8fV8f", "")
-BUILTIN(__builtin_ia32_dpps256, "V8fV8fV8fi", "")
+BUILTIN(__builtin_ia32_dpps256, "V8fV8fV8fIi", "")
BUILTIN(__builtin_ia32_cmppd256, "V4dV4dV4dc", "")
BUILTIN(__builtin_ia32_cmpps256, "V8fV8fV8fc", "")
-BUILTIN(__builtin_ia32_vextractf128_pd256, "V2dV4dc", "")
-BUILTIN(__builtin_ia32_vextractf128_ps256, "V4fV8fc", "")
-BUILTIN(__builtin_ia32_vextractf128_si256, "V4iV8ic", "")
+BUILTIN(__builtin_ia32_vextractf128_pd256, "V2dV4dIc", "")
+BUILTIN(__builtin_ia32_vextractf128_ps256, "V4fV8fIc", "")
+BUILTIN(__builtin_ia32_vextractf128_si256, "V4iV8iIc", "")
BUILTIN(__builtin_ia32_cvtdq2pd256, "V4dV4i", "")
BUILTIN(__builtin_ia32_cvtdq2ps256, "V8fV8i", "")
BUILTIN(__builtin_ia32_cvtpd2ps256, "V4fV4d", "")
@@ -422,22 +412,15 @@ BUILTIN(__builtin_ia32_cvtps2pd256, "V4dV4f", "")
BUILTIN(__builtin_ia32_cvttpd2dq256, "V4iV4d", "")
BUILTIN(__builtin_ia32_cvtpd2dq256, "V4iV4d", "")
BUILTIN(__builtin_ia32_cvttps2dq256, "V8iV8f", "")
-BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dc", "")
-BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fc", "")
-BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8ic", "")
-BUILTIN(__builtin_ia32_vpermilpd, "V2dV2dc", "")
-BUILTIN(__builtin_ia32_vpermilps, "V4fV4fc", "")
-BUILTIN(__builtin_ia32_vpermilpd256, "V4dV4dc", "")
-BUILTIN(__builtin_ia32_vpermilps256, "V8fV8fc", "")
-BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dc", "")
-BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fc", "")
-BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4ic", "")
+BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "")
+BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "")
+BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "")
BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "")
BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "")
BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "")
BUILTIN(__builtin_ia32_rcpps256, "V8fV8f", "")
-BUILTIN(__builtin_ia32_roundpd256, "V4dV4di", "")
-BUILTIN(__builtin_ia32_roundps256, "V8fV8fi", "")
+BUILTIN(__builtin_ia32_roundpd256, "V4dV4dIi", "")
+BUILTIN(__builtin_ia32_roundps256, "V8fV8fIi", "")
BUILTIN(__builtin_ia32_vtestzpd, "iV2dV2d", "")
BUILTIN(__builtin_ia32_vtestcpd, "iV2dV2d", "")
BUILTIN(__builtin_ia32_vtestnzcpd, "iV2dV2d", "")
@@ -462,11 +445,8 @@ BUILTIN(__builtin_ia32_vbroadcastsd256, "V4ddC*", "")
BUILTIN(__builtin_ia32_vbroadcastss256, "V8ffC*", "")
BUILTIN(__builtin_ia32_vbroadcastf128_pd256, "V4dV2dC*", "")
BUILTIN(__builtin_ia32_vbroadcastf128_ps256, "V8fV4fC*", "")
-BUILTIN(__builtin_ia32_loadupd256, "V4ddC*", "")
-BUILTIN(__builtin_ia32_loadups256, "V8ffC*", "")
BUILTIN(__builtin_ia32_storeupd256, "vd*V4d", "")
BUILTIN(__builtin_ia32_storeups256, "vf*V8f", "")
-BUILTIN(__builtin_ia32_loaddqu256, "V32ccC*", "")
BUILTIN(__builtin_ia32_storedqu256, "vc*V32c", "")
BUILTIN(__builtin_ia32_lddqu256, "V32ccC*", "")
BUILTIN(__builtin_ia32_movntdq256, "vV4LLi*V4LLi", "")
@@ -481,4 +461,173 @@ BUILTIN(__builtin_ia32_maskstoreps, "vV4f*V4fV4f", "")
BUILTIN(__builtin_ia32_maskstorepd256, "vV4d*V4dV4d", "")
BUILTIN(__builtin_ia32_maskstoreps256, "vV8f*V8fV8f", "")
+// AVX2
+BUILTIN(__builtin_ia32_mpsadbw256, "V32cV32cV32ci", "")
+BUILTIN(__builtin_ia32_pabsb256, "V32cV32c", "")
+BUILTIN(__builtin_ia32_pabsw256, "V16sV16s", "")
+BUILTIN(__builtin_ia32_pabsd256, "V8iV8i", "")
+BUILTIN(__builtin_ia32_packsswb256, "V32cV16sV16s", "")
+BUILTIN(__builtin_ia32_packssdw256, "V16sV8iV8i", "")
+BUILTIN(__builtin_ia32_packuswb256, "V32cV16sV16s", "")
+BUILTIN(__builtin_ia32_packusdw256, "V16sV8iV8i", "")
+BUILTIN(__builtin_ia32_paddsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_paddsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psubsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psubsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_paddusb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_paddusw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psubusb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psubusw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_palignr256, "V32cV32cV32cIc", "")
+BUILTIN(__builtin_ia32_pavgb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pavgw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pblendvb256, "V32cV32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pblendw256, "V16sV16sV16sIi", "")
+BUILTIN(__builtin_ia32_phaddw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phaddd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_phaddsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phsubw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phsubd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_phsubsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaddubsw256, "V16sV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaddwd256, "V8iV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxub256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaxuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxud256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pmaxsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaxsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxsd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pminub256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pminuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pminud256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pminsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pminsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pminsd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pmovmskb256, "iV32c", "")
+BUILTIN(__builtin_ia32_pmovsxbw256, "V16sV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbd256, "V8iV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbq256, "V4LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovsxwd256, "V8iV8s", "")
+BUILTIN(__builtin_ia32_pmovsxwq256, "V4LLiV8s", "")
+BUILTIN(__builtin_ia32_pmovsxdq256, "V4LLiV4i", "")
+BUILTIN(__builtin_ia32_pmovzxbw256, "V16sV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbd256, "V8iV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbq256, "V4LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovzxwd256, "V8iV8s", "")
+BUILTIN(__builtin_ia32_pmovzxwq256, "V4LLiV8s", "")
+BUILTIN(__builtin_ia32_pmovzxdq256, "V4LLiV4i", "")
+BUILTIN(__builtin_ia32_pmuldq256, "V4LLiV8iV8i", "")
+BUILTIN(__builtin_ia32_pmulhrsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmulhuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmulhw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmuludq256, "V4LLiV8iV8i", "")
+BUILTIN(__builtin_ia32_psadbw256, "V4LLiV32cV32c", "")
+BUILTIN(__builtin_ia32_pshufb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psignb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psignw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psignd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pslldqi256, "V4LLiV4LLiIi", "")
+BUILTIN(__builtin_ia32_psllwi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psllw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_pslldi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_pslld256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psllqi256, "V4LLiV4LLii", "")
+BUILTIN(__builtin_ia32_psllq256, "V4LLiV4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psrawi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psraw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_psradi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_psrad256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psrldqi256, "V4LLiV4LLiIi", "")
+BUILTIN(__builtin_ia32_psrlwi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psrlw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_psrldi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_psrld256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psrlqi256, "V4LLiV4LLii", "")
+BUILTIN(__builtin_ia32_psrlq256, "V4LLiV4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_movntdqa256, "V4LLiV4LLi*", "")
+BUILTIN(__builtin_ia32_vbroadcastss_ps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_vbroadcastss_ps256, "V8fV4f", "")
+BUILTIN(__builtin_ia32_vbroadcastsd_pd256, "V4dV2d", "")
+BUILTIN(__builtin_ia32_vbroadcastsi256, "V4LLiV2LLiC*", "")
+BUILTIN(__builtin_ia32_pblendd128, "V4iV4iV4iIi", "")
+BUILTIN(__builtin_ia32_pblendd256, "V8iV8iV8iIi", "")
+BUILTIN(__builtin_ia32_pbroadcastb256, "V32cV16c", "")
+BUILTIN(__builtin_ia32_pbroadcastw256, "V16sV8s", "")
+BUILTIN(__builtin_ia32_pbroadcastd256, "V8iV4i", "")
+BUILTIN(__builtin_ia32_pbroadcastq256, "V4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_pbroadcastb128, "V16cV16c", "")
+BUILTIN(__builtin_ia32_pbroadcastw128, "V8sV8s", "")
+BUILTIN(__builtin_ia32_pbroadcastd128, "V4iV4i", "")
+BUILTIN(__builtin_ia32_pbroadcastq128, "V2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_permvarsi256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_permdf256, "V4dV4dIc", "")
+BUILTIN(__builtin_ia32_permvarsf256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_permdi256, "V4LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_extract128i256, "V2LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_insert128i256, "V4LLiV4LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_maskloadd256, "V8iV8iC*V8i", "")
+BUILTIN(__builtin_ia32_maskloadq256, "V4LLiV4LLiC*V4LLi", "")
+BUILTIN(__builtin_ia32_maskloadd, "V4iV4iC*V4i", "")
+BUILTIN(__builtin_ia32_maskloadq, "V2LLiV2LLiC*V2LLi", "")
+BUILTIN(__builtin_ia32_maskstored256, "vV8i*V8iV8i", "")
+BUILTIN(__builtin_ia32_maskstoreq256, "vV4LLi*V4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_maskstored, "vV4i*V4iV4i", "")
+BUILTIN(__builtin_ia32_maskstoreq, "vV2LLi*V2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psllv8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psllv4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psllv4di, "V4LLiV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_psllv2di, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psrav8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psrav4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psrlv8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psrlv4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psrlv4di, "V4LLiV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_psrlv2di, "V2LLiV2LLiV2LLi", "")
+
+// BMI
+BUILTIN(__builtin_ia32_bextr_u32, "UiUiUi", "")
+BUILTIN(__builtin_ia32_bextr_u64, "ULLiULLiULLi", "")
+
+// BMI2
+BUILTIN(__builtin_ia32_bzhi_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_bzhi_di, "ULLiULLiULLi", "")
+BUILTIN(__builtin_ia32_pdep_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_pdep_di, "ULLiULLiULLi", "")
+BUILTIN(__builtin_ia32_pext_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_pext_di, "ULLiULLiULLi", "")
+
+// FMA4
+BUILTIN(__builtin_ia32_vfmaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmaddss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmaddsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmsubss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmsubsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmaddpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfnmaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfnmaddpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfnmsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfnmsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmaddsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmaddsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmsubaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmsubaddpd256, "V4dV4dV4dV4d", "")
+
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
index d928f9d..ec6b973 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
@@ -98,7 +98,7 @@
bit mask & shift operations.
------------------------------------------------------------------------ */
-typedef unsigned long UTF32; /* at least 32 bits */
+typedef unsigned int UTF32; /* at least 32 bits */
typedef unsigned short UTF16; /* at least 16 bits */
typedef unsigned char UTF8; /* typically 8 bits */
typedef unsigned char Boolean; /* 0 or 1 */
@@ -131,15 +131,15 @@ ConversionResult ConvertUTF8toUTF16 (
const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+ConversionResult ConvertUTF8toUTF32 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
#ifdef CLANG_NEEDS_THESE_ONE_DAY
ConversionResult ConvertUTF16toUTF8 (
const UTF16** sourceStart, const UTF16* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
-ConversionResult ConvertUTF8toUTF32 (
- const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
-
ConversionResult ConvertUTF32toUTF8 (
const UTF32** sourceStart, const UTF32* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
@@ -151,9 +151,11 @@ ConversionResult ConvertUTF16toUTF32 (
ConversionResult ConvertUTF32toUTF16 (
const UTF32** sourceStart, const UTF32* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
-#endif
Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+#endif
+
+Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd);
#ifdef __cplusplus
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
index a37dc10..6f2bb35 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
@@ -66,8 +66,6 @@ def Named : Decl<1>;
def ObjCCompatibleAlias : DDecl<Named>;
def LinkageSpec : Decl, DeclContext;
def ObjCPropertyImpl : Decl;
-def ObjCForwardProtocol : Decl;
-def ObjCClass : Decl;
def FileScopeAsm : Decl;
def AccessSpec : Decl;
def Friend : Decl;
@@ -75,3 +73,5 @@ def FriendTemplate : Decl;
def StaticAssert : Decl;
def Block : Decl, DeclContext;
def ClassScopeFunctionSpecialization : Decl;
+def Import : Decl;
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
index fefc44c..e157178 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -50,13 +50,19 @@ public:
/// insertion hint.
CharSourceRange RemoveRange;
+ /// \brief Code in the specific range that should be inserted in the insertion
+ /// location.
+ CharSourceRange InsertFromRange;
+
/// \brief The actual code to insert at the insertion location, as a
/// string.
std::string CodeToInsert;
+ bool BeforePreviousInsertions;
+
/// \brief Empty code modification hint, indicating that no code
/// modification is known.
- FixItHint() : RemoveRange() { }
+ FixItHint() : BeforePreviousInsertions(false) { }
bool isNull() const {
return !RemoveRange.isValid();
@@ -65,11 +71,26 @@ public:
/// \brief Create a code modification hint that inserts the given
/// code string at a specific location.
static FixItHint CreateInsertion(SourceLocation InsertionLoc,
- StringRef Code) {
+ StringRef Code,
+ bool BeforePreviousInsertions = false) {
FixItHint Hint;
Hint.RemoveRange =
CharSourceRange(SourceRange(InsertionLoc, InsertionLoc), false);
Hint.CodeToInsert = Code;
+ Hint.BeforePreviousInsertions = BeforePreviousInsertions;
+ return Hint;
+ }
+
+ /// \brief Create a code modification hint that inserts the given
+ /// code from \arg FromRange at a specific location.
+ static FixItHint CreateInsertionFromRange(SourceLocation InsertionLoc,
+ CharSourceRange FromRange,
+ bool BeforePreviousInsertions = false) {
+ FixItHint Hint;
+ Hint.RemoveRange =
+ CharSourceRange(SourceRange(InsertionLoc, InsertionLoc), false);
+ Hint.InsertFromRange = FromRange;
+ Hint.BeforePreviousInsertions = BeforePreviousInsertions;
return Hint;
}
@@ -105,7 +126,7 @@ public:
/// "report warnings as errors" and passes them off to the DiagnosticConsumer
/// for reporting to the user. DiagnosticsEngine is tied to one translation unit
/// and one SourceManager.
-class DiagnosticsEngine : public llvm::RefCountedBase<DiagnosticsEngine> {
+class DiagnosticsEngine : public RefCountedBase<DiagnosticsEngine> {
public:
/// Level - The level of the diagnostic, after it has been through mapping.
enum Level {
@@ -158,8 +179,10 @@ private:
unsigned ErrorLimit; // Cap of # errors emitted, 0 -> no limit.
unsigned TemplateBacktraceLimit; // Cap on depth of template backtrace stack,
// 0 -> no limit.
+ unsigned ConstexprBacktraceLimit; // Cap on depth of constexpr evaluation
+ // backtrace stack, 0 -> no limit.
ExtensionHandling ExtBehavior; // Map extensions onto warnings or errors?
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> Diags;
+ IntrusiveRefCntPtr<DiagnosticIDs> Diags;
DiagnosticConsumer *Client;
bool OwnsDiagClient;
SourceManager *SourceMgr;
@@ -287,7 +310,7 @@ private:
unsigned NumPrevArgs,
SmallVectorImpl<char> &Output,
void *Cookie,
- SmallVectorImpl<intptr_t> &QualTypeVals);
+ ArrayRef<intptr_t> QualTypeVals);
void *ArgToStringCookie;
ArgToStringFnTy ArgToStringFn;
@@ -304,12 +327,12 @@ private:
public:
explicit DiagnosticsEngine(
- const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &Diags,
+ const IntrusiveRefCntPtr<DiagnosticIDs> &Diags,
DiagnosticConsumer *client = 0,
bool ShouldOwnClient = true);
~DiagnosticsEngine();
- const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &getDiagnosticIDs() const {
+ const IntrusiveRefCntPtr<DiagnosticIDs> &getDiagnosticIDs() const {
return Diags;
}
@@ -363,13 +386,25 @@ public:
void setTemplateBacktraceLimit(unsigned Limit) {
TemplateBacktraceLimit = Limit;
}
-
+
/// \brief Retrieve the maximum number of template instantiation
- /// nodes to emit along with a given diagnostic.
+ /// notes to emit along with a given diagnostic.
unsigned getTemplateBacktraceLimit() const {
return TemplateBacktraceLimit;
}
-
+
+ /// \brief Specify the maximum number of constexpr evaluation
+ /// notes to emit along with a given diagnostic.
+ void setConstexprBacktraceLimit(unsigned Limit) {
+ ConstexprBacktraceLimit = Limit;
+ }
+
+ /// \brief Retrieve the maximum number of constexpr evaluation
+ /// notes to emit along with a given diagnostic.
+ unsigned getConstexprBacktraceLimit() const {
+ return ConstexprBacktraceLimit;
+ }
+
/// setIgnoreAllWarnings - When set to true, any unmapped warnings are
/// ignored. If this and WarningsAsErrors are both set, then this one wins.
void setIgnoreAllWarnings(bool Val) { IgnoreAllWarnings = Val; }
@@ -450,18 +485,32 @@ public:
bool setDiagnosticGroupMapping(StringRef Group, diag::Mapping Map,
SourceLocation Loc = SourceLocation());
+ /// \brief Set the warning-as-error flag for the given diagnostic. This
+ /// function always only operates on the current diagnostic state.
+ void setDiagnosticWarningAsError(diag::kind Diag, bool Enabled);
+
/// \brief Set the warning-as-error flag for the given diagnostic group. This
/// function always only operates on the current diagnostic state.
///
/// \returns True if the given group is unknown, false otherwise.
bool setDiagnosticGroupWarningAsError(StringRef Group, bool Enabled);
+ /// \brief Set the error-as-fatal flag for the given diagnostic. This function
+ /// always only operates on the current diagnostic state.
+ void setDiagnosticErrorAsFatal(diag::kind Diag, bool Enabled);
+
/// \brief Set the error-as-fatal flag for the given diagnostic group. This
/// function always only operates on the current diagnostic state.
///
/// \returns True if the given group is unknown, false otherwise.
bool setDiagnosticGroupErrorAsFatal(StringRef Group, bool Enabled);
+ /// \brief Add the specified mapping to all diagnostics. Mainly to be used
+ /// by -Wno-everything to disable all warnings but allow subsequent -W options
+ /// to enable specific warnings.
+ void setMappingToAllDiagnostics(diag::Mapping Map,
+ SourceLocation Loc = SourceLocation());
+
bool hasErrorOccurred() const { return ErrorOccurred; }
bool hasFatalErrorOccurred() const { return FatalErrorOccurred; }
@@ -587,16 +636,22 @@ private:
/// MaxArguments - The maximum number of arguments we can hold. We currently
/// only support up to 10 arguments (%0-%9). A single diagnostic with more
/// than that almost certainly has to be simplified anyway.
- MaxArguments = 10
+ MaxArguments = 10,
+
+ /// MaxRanges - The maximum number of ranges we can hold.
+ MaxRanges = 10,
+
+ /// MaxFixItHints - The maximum number of ranges we can hold.
+ MaxFixItHints = 10
};
/// NumDiagArgs - This contains the number of entries in Arguments.
signed char NumDiagArgs;
- /// NumRanges - This is the number of ranges in the DiagRanges array.
+ /// NumDiagRanges - This is the number of ranges in the DiagRanges array.
unsigned char NumDiagRanges;
- /// \brief The number of code modifications hints in the
- /// FixItHints array.
- unsigned char NumFixItHints;
+ /// NumDiagFixItHints - This is the number of hints in the DiagFixItHints
+ /// array.
+ unsigned char NumDiagFixItHints;
/// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
/// values, with one for each argument. This specifies whether the argument
@@ -614,15 +669,27 @@ private:
/// sort of argument kind it is.
intptr_t DiagArgumentsVal[MaxArguments];
- /// DiagRanges - The list of ranges added to this diagnostic. It currently
- /// only support 10 ranges, could easily be extended if needed.
- CharSourceRange DiagRanges[10];
+ /// DiagRanges - The list of ranges added to this diagnostic.
+ CharSourceRange DiagRanges[MaxRanges];
- enum { MaxFixItHints = 6 };
+ /// FixItHints - If valid, provides a hint with some code to insert, remove,
+ /// or modify at a particular position.
+ FixItHint DiagFixItHints[MaxFixItHints];
- /// FixItHints - If valid, provides a hint with some code
- /// to insert, remove, or modify at a particular position.
- FixItHint FixItHints[MaxFixItHints];
+ DiagnosticMappingInfo makeMappingInfo(diag::Mapping Map, SourceLocation L) {
+ bool isPragma = L.isValid();
+ DiagnosticMappingInfo MappingInfo = DiagnosticMappingInfo::Make(
+ Map, /*IsUser=*/true, isPragma);
+
+ // If this is a pragma mapping, then set the diagnostic mapping flags so
+ // that we override command line options.
+ if (isPragma) {
+ MappingInfo.setNoWarningAsError(true);
+ MappingInfo.setNoErrorAsFatal(true);
+ }
+
+ return MappingInfo;
+ }
/// ProcessDiag - This is the method used to report a diagnostic that is
/// finally fully formed.
@@ -633,6 +700,24 @@ private:
return Diags->ProcessDiag(*this);
}
+ /// @name Diagnostic Emission
+ /// @{
+protected:
+ // Sema requires access to the following functions because the current design
+ // of SFINAE requires it to use its own SemaDiagnosticBuilder, which needs to
+ // access us directly to ensure we minimize the emitted code for the common
+ // Sema::Diag() patterns.
+ friend class Sema;
+
+ /// \brief Emit the current diagnostic and clear the diagnostic state.
+ bool EmitCurrentDiagnostic();
+
+ unsigned getCurrentDiagID() const { return CurDiagID; }
+
+ SourceLocation getCurrentDiagLoc() const { return CurDiagLoc; }
+
+ /// @}
+
friend class ASTReader;
friend class ASTWriter;
};
@@ -685,37 +770,39 @@ public:
/// for example.
class DiagnosticBuilder {
mutable DiagnosticsEngine *DiagObj;
- mutable unsigned NumArgs, NumRanges, NumFixItHints;
+ mutable unsigned NumArgs, NumRanges, NumFixits;
+
+ /// \brief Status variable indicating if this diagnostic is still active.
+ ///
+ // NOTE: This field is redundant with DiagObj (IsActive iff (DiagObj == 0)),
+ // but LLVM is not currently smart enough to eliminate the null check that
+ // Emit() would end up with if we used that as our status variable.
+ mutable bool IsActive;
void operator=(const DiagnosticBuilder&); // DO NOT IMPLEMENT
friend class DiagnosticsEngine;
explicit DiagnosticBuilder(DiagnosticsEngine *diagObj)
- : DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixItHints(0) {}
+ : DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixits(0), IsActive(true) {
+ assert(diagObj && "DiagnosticBuilder requires a valid DiagnosticsEngine!");
+ }
friend class PartialDiagnostic;
protected:
- void FlushCounts();
-
-public:
- /// Copy constructor. When copied, this "takes" the diagnostic info from the
- /// input and neuters it.
- DiagnosticBuilder(const DiagnosticBuilder &D) {
- DiagObj = D.DiagObj;
- D.DiagObj = 0;
- NumArgs = D.NumArgs;
- NumRanges = D.NumRanges;
- NumFixItHints = D.NumFixItHints;
+ void FlushCounts() {
+ DiagObj->NumDiagArgs = NumArgs;
+ DiagObj->NumDiagRanges = NumRanges;
+ DiagObj->NumDiagFixItHints = NumFixits;
}
- /// \brief Simple enumeration value used to give a name to the
- /// suppress-diagnostic constructor.
- enum SuppressKind { Suppress };
+ /// \brief Clear out the current diagnostic.
+ void Clear() const {
+ DiagObj = 0;
+ IsActive = false;
+ }
- /// \brief Create an empty DiagnosticBuilder object that represents
- /// no actual diagnostic.
- explicit DiagnosticBuilder(SuppressKind)
- : DiagObj(0), NumArgs(0), NumRanges(0), NumFixItHints(0) { }
+ /// isActive - Determine whether this diagnostic is still active.
+ bool isActive() const { return IsActive; }
/// \brief Force the diagnostic builder to emit the diagnostic now.
///
@@ -724,25 +811,40 @@ public:
///
/// \returns true if a diagnostic was emitted, false if the
/// diagnostic was suppressed.
- bool Emit();
+ bool Emit() {
+ // If this diagnostic is inactive, then its soul was stolen by the copy ctor
+ // (or by a subclass, as in SemaDiagnosticBuilder).
+ if (!isActive()) return false;
- /// Destructor - The dtor emits the diagnostic if it hasn't already
- /// been emitted.
- ~DiagnosticBuilder() { Emit(); }
+ // When emitting diagnostics, we set the final argument count into
+ // the DiagnosticsEngine object.
+ FlushCounts();
- /// isActive - Determine whether this diagnostic is still active.
- bool isActive() const { return DiagObj != 0; }
+ // Process the diagnostic.
+ bool Result = DiagObj->EmitCurrentDiagnostic();
- /// \brief Retrieve the active diagnostic ID.
- ///
- /// \pre \c isActive()
- unsigned getDiagID() const {
- assert(isActive() && "DiagnosticsEngine is inactive");
- return DiagObj->CurDiagID;
+ // This diagnostic is dead.
+ Clear();
+
+ return Result;
}
- /// \brief Clear out the current diagnostic.
- void Clear() { DiagObj = 0; }
+public:
+ /// Copy constructor. When copied, this "takes" the diagnostic info from the
+ /// input and neuters it.
+ DiagnosticBuilder(const DiagnosticBuilder &D) {
+ DiagObj = D.DiagObj;
+ IsActive = D.IsActive;
+ D.Clear();
+ NumArgs = D.NumArgs;
+ NumRanges = D.NumRanges;
+ NumFixits = D.NumFixits;
+ }
+
+ /// Destructor - The dtor emits the diagnostic.
+ ~DiagnosticBuilder() {
+ Emit();
+ }
/// Operator bool: conversion of DiagnosticBuilder to bool always returns
/// true. This allows is to be used in boolean error contexts like:
@@ -750,38 +852,33 @@ public:
operator bool() const { return true; }
void AddString(StringRef S) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
assert(NumArgs < DiagnosticsEngine::MaxArguments &&
"Too many arguments to diagnostic!");
- if (DiagObj) {
- DiagObj->DiagArgumentsKind[NumArgs] = DiagnosticsEngine::ak_std_string;
- DiagObj->DiagArgumentsStr[NumArgs++] = S;
- }
+ DiagObj->DiagArgumentsKind[NumArgs] = DiagnosticsEngine::ak_std_string;
+ DiagObj->DiagArgumentsStr[NumArgs++] = S;
}
void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
assert(NumArgs < DiagnosticsEngine::MaxArguments &&
"Too many arguments to diagnostic!");
- if (DiagObj) {
- DiagObj->DiagArgumentsKind[NumArgs] = Kind;
- DiagObj->DiagArgumentsVal[NumArgs++] = V;
- }
+ DiagObj->DiagArgumentsKind[NumArgs] = Kind;
+ DiagObj->DiagArgumentsVal[NumArgs++] = V;
}
void AddSourceRange(const CharSourceRange &R) const {
- assert(NumRanges <
- sizeof(DiagObj->DiagRanges)/sizeof(DiagObj->DiagRanges[0]) &&
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumRanges < DiagnosticsEngine::MaxRanges &&
"Too many arguments to diagnostic!");
- if (DiagObj)
- DiagObj->DiagRanges[NumRanges++] = R;
+ DiagObj->DiagRanges[NumRanges++] = R;
}
void AddFixItHint(const FixItHint &Hint) const {
- assert(NumFixItHints < DiagnosticsEngine::MaxFixItHints &&
- "Too many fix-it hints!");
- if (NumFixItHints >= DiagnosticsEngine::MaxFixItHints)
- return; // Don't crash in release builds
- if (DiagObj)
- DiagObj->FixItHints[NumFixItHints++] = Hint;
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumFixits < DiagnosticsEngine::MaxFixItHints &&
+ "Too many arguments to diagnostic!");
+ DiagObj->DiagFixItHints[NumFixits++] = Hint;
}
};
@@ -849,7 +946,8 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
const FixItHint &Hint) {
- DB.AddFixItHint(Hint);
+ if (!Hint.isNull())
+ DB.AddFixItHint(Hint);
return DB;
}
@@ -951,17 +1049,22 @@ public:
return DiagObj->DiagRanges[Idx];
}
+ /// \brief Return an array reference for this diagnostic's ranges.
+ ArrayRef<CharSourceRange> getRanges() const {
+ return llvm::makeArrayRef(DiagObj->DiagRanges, DiagObj->NumDiagRanges);
+ }
+
unsigned getNumFixItHints() const {
- return DiagObj->NumFixItHints;
+ return DiagObj->NumDiagFixItHints;
}
const FixItHint &getFixItHint(unsigned Idx) const {
- return DiagObj->FixItHints[Idx];
+ assert(Idx < getNumFixItHints() && "Invalid index!");
+ return DiagObj->DiagFixItHints[Idx];
}
const FixItHint *getFixItHints() const {
- return DiagObj->NumFixItHints?
- &DiagObj->FixItHints[0] : 0;
+ return getNumFixItHints()? DiagObj->DiagFixItHints : 0;
}
/// FormatDiagnostic - Format this diagnostic into a string, substituting the
@@ -1012,11 +1115,20 @@ public:
range_iterator range_begin() const { return Ranges.begin(); }
range_iterator range_end() const { return Ranges.end(); }
unsigned range_size() const { return Ranges.size(); }
+
+ ArrayRef<CharSourceRange> getRanges() const {
+ return llvm::makeArrayRef(Ranges);
+ }
+
typedef std::vector<FixItHint>::const_iterator fixit_iterator;
fixit_iterator fixit_begin() const { return FixIts.begin(); }
fixit_iterator fixit_end() const { return FixIts.end(); }
unsigned fixit_size() const { return FixIts.size(); }
+
+ ArrayRef<FixItHint> getFixIts() const {
+ return llvm::makeArrayRef(FixIts);
+ }
};
/// DiagnosticConsumer - This is an abstract interface implemented by clients of
@@ -1031,6 +1143,7 @@ public:
unsigned getNumErrors() const { return NumErrors; }
unsigned getNumWarnings() const { return NumWarnings; }
+ virtual void clear() { NumWarnings = NumErrors = 0; }
virtual ~DiagnosticConsumer();
@@ -1053,6 +1166,10 @@ public:
/// objects made available via \see BeginSourceFile() are inaccessible.
virtual void EndSourceFile() {}
+ /// \brief Callback to inform the diagnostic client that processing of all
+ /// source files has ended.
+ virtual void finish() {}
+
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
/// DiagnosticConsumer should be included in the number of diagnostics
@@ -1075,6 +1192,7 @@ public:
/// IgnoringDiagConsumer - This is a diagnostic client that just ignores all
/// diags.
class IgnoringDiagConsumer : public DiagnosticConsumer {
+ virtual void anchor();
void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) {
// Just ignore it.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
index 8ae69fe..109cd08 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
@@ -62,8 +62,6 @@ class Diagnostic<string text, DiagClass DC, DiagMapping defaultmapping> {
DiagMapping DefaultMapping = defaultmapping;
DiagGroup Group;
string CategoryName = "";
- string Brief = "";
- string Explanation = "";
}
class Error<string str> : Diagnostic<str, CLASS_ERROR, MAP_ERROR>;
@@ -87,12 +85,6 @@ class DefaultWarnShowInSystemHeader {
class NoSFINAE { bit SFINAE = 0; }
class AccessControl { bit AccessControl = 1; }
-class Brief<string str> { string Brief = str; }
-class FullExplanation<string brief, string full> {
- string Brief = brief;
- string Explanation = full;
-}
-
// Definitions for Diagnostics.
include "DiagnosticASTKinds.td"
include "DiagnosticAnalysisKinds.td"
@@ -102,4 +94,5 @@ include "DiagnosticFrontendKinds.td"
include "DiagnosticLexKinds.td"
include "DiagnosticParseKinds.td"
include "DiagnosticSemaKinds.td"
+include "DiagnosticSerializationKinds.td"
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
index 705c95b..9cfe5ef 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -9,9 +9,103 @@
let Component = "AST" in {
-//def note_comma_in_ice : Note<
-// "C does not permit evaluated commas in an integer constant expression">;
+// Constant expression diagnostics. These (and their users) belong in Sema.
def note_expr_divide_by_zero : Note<"division by zero">;
+def note_constexpr_invalid_cast : Note<
+ "%select{reinterpret_cast|dynamic_cast|cast which performs the conversions of"
+ " a reinterpret_cast|cast from %1}0 is not allowed in a constant expression">;
+def note_constexpr_invalid_downcast : Note<
+ "cannot cast object of dynamic type %0 to type %1">;
+def note_constexpr_overflow : Note<
+ "value %0 is outside the range of representable values of type %1">;
+def note_constexpr_negative_shift : Note<"negative shift count %0">;
+def note_constexpr_large_shift : Note<
+ "shift count %0 >= width of type %1 (%2 bit%s2)">;
+def note_constexpr_lshift_of_negative : Note<"left shift of negative value %0">;
+def note_constexpr_lshift_discards : Note<"signed left shift discards bits">;
+def note_constexpr_invalid_function : Note<
+ "%select{non-constexpr|undefined}0 %select{function|constructor}1 %2 cannot "
+ "be used in a constant expression">;
+def note_constexpr_virtual_call : Note<
+ "cannot evaluate virtual function call in a constant expression">;
+def note_constexpr_virtual_base : Note<
+ "cannot construct object of type %0 with virtual base class "
+ "in a constant expression">;
+def note_constexpr_nonliteral : Note<
+ "non-literal type %0 cannot be used in a constant expression">;
+def note_constexpr_non_global : Note<
+ "%select{pointer|reference}0 to %select{|subobject of }1"
+ "%select{temporary|%3}2 is not a constant expression">;
+def note_constexpr_array_index : Note<"cannot refer to element %0 of "
+ "%select{array of %2 elements|non-array object}1 in a constant expression">;
+def note_constexpr_float_arithmetic : Note<
+ "floating point arithmetic produces %select{an infinity|a NaN}0">;
+def note_constexpr_pointer_subtraction_not_same_array : Note<
+ "subtracted pointers are not elements of the same array">;
+def note_constexpr_pointer_comparison_base_classes : Note<
+ "comparison of addresses of subobjects of different base classes "
+ "has unspecified value">;
+def note_constexpr_pointer_comparison_base_field : Note<
+ "comparison of address of base class subobject %0 of class %1 to field %2 "
+ "has unspecified value">;
+def note_constexpr_pointer_comparison_differing_access : Note<
+ "comparison of address of fields %0 and %2 of %4 with differing access "
+ "specifiers (%1 vs %3) has unspecified value">;
+def note_constexpr_compare_virtual_mem_ptr : Note<
+ "comparison of pointer to virtual member function %0 has unspecified value">;
+def note_constexpr_past_end : Note<
+ "dereferenced pointer past the end of %select{|subobject of }0"
+ "%select{temporary|%2}1 is not a constant expression">;
+def note_constexpr_past_end_subobject : Note<
+ "cannot %select{access base class of|access derived class of|access field of|"
+ "access array element of|ERROR|call member function on|"
+ "access real component of|access imaginary component of}0 "
+ "pointer past the end of object">;
+def note_constexpr_null_subobject : Note<
+ "cannot %select{access base class of|access derived class of|access field of|"
+ "access array element of|perform pointer arithmetic on|"
+ "call member function on|access real component of|"
+ "access imaginary component of}0 null pointer">;
+def note_constexpr_var_init_non_constant : Note<
+ "initializer of %0 is not a constant expression">;
+def note_constexpr_typeid_polymorphic : Note<
+ "typeid applied to expression of polymorphic type %0 is "
+ "not allowed in a constant expression">;
+def note_constexpr_void_comparison : Note<
+ "comparison between unequal pointers to void has unspecified result">;
+def note_constexpr_temporary_here : Note<"temporary created here">;
+def note_constexpr_conditional_never_const : Note<
+ "both arms of conditional operator are unable to produce a "
+ "constant expression">;
+def note_constexpr_depth_limit_exceeded : Note<
+ "constexpr evaluation exceeded maximum depth of %0 calls">;
+def note_constexpr_call_limit_exceeded : Note<
+ "constexpr evaluation hit maximum call limit">;
+def note_constexpr_lifetime_ended : Note<
+ "read of %select{temporary|variable}0 whose lifetime has ended">;
+def note_constexpr_ltor_volatile_type : Note<
+ "read of volatile-qualified type %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_volatile_obj : Note<
+ "read of volatile %select{temporary|object %1|member %1}0 is not allowed in "
+ "a constant expression">;
+def note_constexpr_ltor_mutable : Note<
+ "read of mutable member %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_non_const_int : Note<
+ "read of non-const variable %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_non_constexpr : Note<
+ "read of non-constexpr variable %0 is not allowed in a constant expression">;
+def note_constexpr_read_past_end : Note<
+ "read of dereferenced one-past-the-end pointer is not allowed in a "
+ "constant expression">;
+def note_constexpr_read_inactive_union_member : Note<
+ "read of member %0 of union with %select{active member %2|no active member}1 "
+ "is not allowed in a constant expression">;
+def note_constexpr_read_uninit : Note<
+ "read of uninitialized object is not allowed in a constant expression">;
+def note_constexpr_calls_suppressed : Note<
+ "(skipping %0 call%s0 in backtrace; use -fconstexpr-backtrace-limit=0 to "
+ "see all)">;
+def note_constexpr_call_here : Note<"in call to '%0'">;
// inline asm related.
let CategoryName = "Inline Assembly Issue" in {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
index 46dc0e6..5461212 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
@@ -9,7 +9,4 @@
let Component = "Analysis" in {
-// CHECK: use of uninitialized values
-def warn_uninit_val : Warning<"use of uninitialized variable">;
-
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
index f9a910a..103fc00 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -18,6 +18,7 @@ let Component = "Common" in {
def fatal_too_many_errors
: Error<"too many errors emitted, stopping now">, DefaultFatal;
+def note_declared_at : Note<"declared here">;
def note_previous_definition : Note<"previous definition is here">;
def note_previous_declaration : Note<"previous declaration is here">;
def note_previous_implicit_declaration : Note<
@@ -40,6 +41,12 @@ def err_expected_colon : Error<"expected ':'">;
def err_expected_colon_after_setter_name : Error<
"method name referenced in property setter attribute "
"must end with ':'">;
+def err_invalid_string_udl : Error<
+ "string literal with user-defined suffix cannot be used here">;
+def err_invalid_character_udl : Error<
+ "character literal with user-defined suffix cannot be used here">;
+def err_invalid_numeric_udl : Error<
+ "numeric literal with user-defined suffix cannot be used here">;
// Parse && Sema
def ext_no_declarators : ExtWarn<"declaration does not declare anything">,
@@ -58,8 +65,6 @@ def warn_cxx98_compat_variadic_templates :
InGroup<CXX98Compat>, DefaultIgnore;
def err_default_special_members : Error<
"only special member functions may be defaulted">;
-def err_friends_define_only_namespace_scope : Error<
- "cannot define a function with non-namespace scope in a friend declaration">;
def err_deleted_non_function : Error<
"only functions can have deleted definitions">;
def err_module_not_found : Error<"module '%0' not found">, DefaultFatal;
@@ -83,8 +88,8 @@ def warn_integer_too_large_for_signed : Warning<
"integer constant is so large that it is unsigned">;
// Sema && AST
-def note_invalid_subexpr_in_ice : Note<
- "subexpression not valid in an integer constant expression">;
+def note_invalid_subexpr_in_const_expr : Note<
+ "subexpression not valid in a constant expression">;
// Targets
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 3c0e4f5..b443159 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -18,6 +18,10 @@ def err_drv_unknown_stdin_type : Error<
def err_drv_unknown_language : Error<"language not recognized: '%0'">;
def err_drv_invalid_arch_name : Error<
"invalid arch name '%0'">;
+def err_drv_invalid_rtlib_name : Error<
+ "invalid runtime library name in argument '%0'">;
+def err_drv_unsupported_rtlib_for_platform : Error<
+ "unsupported runtime library '%0' for platform '%1'">;
def err_drv_invalid_stdlib_name : Error<
"invalid library name in argument '%0'">;
def err_drv_invalid_opt_with_multiple_archs : Error<
@@ -38,7 +42,7 @@ def err_drv_command_failure : Error<
def err_drv_invalid_darwin_version : Error<
"invalid Darwin version number: %0">;
def err_drv_missing_argument : Error<
- "argument to '%0' is missing (expected %1 %plural{1:value|:values}1)">;
+ "argument to '%0' is missing (expected %1 value%s1)">;
def err_drv_invalid_Xarch_argument_with_args : Error<
"invalid Xarch argument: '%0', options requiring arguments are unsupported">;
def err_drv_invalid_Xarch_argument_isdriver : Error<
@@ -62,11 +66,13 @@ def err_drv_clang_unsupported_opt_cxx_darwin_i386 : Error<
def err_drv_command_failed : Error<
"%0 command failed with exit code %1 (use -v to see invocation)">;
def err_drv_command_signalled : Error<
- "%0 command failed due to signal %1 (use -v to see invocation)">;
+ "%0 command failed due to signal (use -v to see invocation)">;
def err_drv_invalid_mfloat_abi : Error<
"invalid float ABI '%0'">;
def err_drv_invalid_libcxx_deployment : Error<
"invalid deployment target for -stdlib=libc++ (requires %0 or later)">;
+def err_drv_invalid_feature : Error<
+ "invalid feature '%0' for CPU '%1'">;
def err_drv_I_dash_not_supported : Error<
"'%0' not supported, please use -iquote instead">;
@@ -89,21 +95,23 @@ def err_drv_objc_gc_arr : Error<
"cannot specify both '-fobjc-arc' and '%0'">;
def err_arc_nonfragile_abi : Error<
"-fobjc-arc is not supported with fragile abi">;
+def err_arc_unsupported : Error<
+ "-fobjc-arc is not supported on current deployment target">;
def err_drv_mg_requires_m_or_mm : Error<
"option '-MG' requires '-M' or '-MM'">;
def warn_c_kext : Warning<
"ignoring -fapple-kext which is valid for c++ and objective-c++ only">;
-def warn_drv_unsupported_option_argument : Warning<
- "ignoring unsupported argument '%1' to option '%0'">;
def warn_drv_input_file_unused : Warning<
"%0: '%1' input unused when '%2' is present">;
def warn_drv_preprocessed_input_file_unused : Warning<
"%0: previously preprocessed input unused when '%1' is present">;
def warn_drv_unused_argument : Warning<
- "argument unused during compilation: '%0'">;
-def warn_drv_pipe_ignored_with_save_temps : Warning<
- "-pipe ignored because -save-temps specified">;
+ "argument unused during compilation: '%0'">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
+def warn_drv_empty_joined_argument : Warning<
+ "joined argument expects addition arg: '%0'">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
def warn_drv_not_using_clang_cpp : Warning<
"not using the clang preprocessor due to user override">;
def warn_drv_not_using_clang_cxx : Warning<
@@ -116,8 +124,6 @@ def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
def warn_ignoring_ftabstop_value : Warning<
"ignoring invalid -ftabstop value '%0', using default value %1">;
-def warn_drv_conflicting_deployment_targets : Warning<
- "conflicting deployment targets, both MACOSX_DEPLOYMENT_TARGET '%0' and IPHONEOS_DEPLOYMENT_TARGET '%1' are present in environment">;
def warn_drv_treating_input_as_cxx : Warning<
"treating '%0' input as '%1' when in C++ mode, this behavior is deprecated">,
InGroup<Deprecated>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index fffa42f..5d6b887 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -13,12 +13,12 @@ def err_fe_error_opening : Error<"error opening '%0': %1">;
def err_fe_error_reading : Error<"error reading '%0'">;
def err_fe_error_reading_stdin : Error<"error reading stdin">;
def err_fe_error_backend : Error<"error in backend: %0">, DefaultFatal;
-def err_fe_invalid_ast_file : Error<"invalid AST file: '%0'">, DefaultFatal;
-def err_fe_invalid_ast_action : Error<"invalid action for AST input">,
- DefaultFatal;
+
// Error generated by the backend.
def err_fe_inline_asm : Error<"%0">, CatInlineAsm;
def note_fe_inline_asm_here : Note<"instantiated into assembly here">;
+def err_fe_cannot_link_module : Error<"cannot link module '%0': %1">,
+ DefaultFatal;
@@ -26,16 +26,8 @@ def err_fe_invalid_code_complete_file : Error<
"cannot locate code-completion file %0">, DefaultFatal;
def err_fe_stdout_binary : Error<"unable to change standard output to binary">,
DefaultFatal;
-def err_fe_stderr_binary : Error<"unable to change standard error to binary">,
- DefaultFatal;
def err_fe_dependency_file_requires_MT : Error<
"-dependency-file requires at least one -MT or -MQ option">;
-def err_fe_incompatible_options : Error<
- "'%0' cannot be used with '%1'">, DefaultFatal;
-def err_fe_no_fixit_and_codegen : Error<
- "FIX-ITs cannot be applied when generating code">;
-def err_fe_unable_to_find_fixit_file : Error<
- "FIX-IT could not find file '%0'">;
def err_fe_invalid_plugin_name : Error<
"unable to find plugin '%0'">;
def err_fe_expected_compiler_job : Error<
@@ -54,19 +46,6 @@ def err_fe_unable_to_create_target : Error<
"unable to create target: '%0'">;
def err_fe_unable_to_interface_with_target : Error<
"unable to interface with target machine">;
-def err_fe_unable_to_read_pch_file : Error<
- "unable to read PCH file: '%0'">;
-def err_fe_not_a_pch_file : Error<
- "input is not a PCH file: '%0'">;
-def err_fe_pch_malformed : Error<
- "malformed or corrupted PCH file: '%0'">, DefaultFatal;
-def err_fe_pch_malformed_block : Error<
- "malformed block record in PCH file: '%0'">, DefaultFatal;
-def err_fe_pch_error_at_end_block : Error<
- "error at end of module block in PCH file: '%0'">, DefaultFatal;
-def err_fe_pch_file_modified : Error<
- "file '%0' has been modified since the precompiled header was built">,
- DefaultFatal;
def err_fe_unable_to_open_output : Error<
"unable to open output file '%0': '%1'">;
def err_fe_unable_to_rename_temp : Error<
@@ -82,6 +61,10 @@ def warn_fe_cc_print_header_failure : Warning<
def warn_fe_cc_log_diagnostics_failure : Warning<
"unable to open CC_LOG_DIAGNOSTICS file: %0 (using stderr)">;
+def warn_fe_serialized_diag_failure : Warning<
+ "unable to open file %0 for serializing diagnostics (%1)">,
+ InGroup<DiagGroup<"serialized-diagnostics">>;
+
def err_verify_missing_start : Error<
"cannot find start ('{{') of expected %0">;
def err_verify_missing_end : Error<
@@ -98,54 +81,25 @@ def note_fixit_in_macro : Note<
def note_fixit_failed : Note<
"FIX-IT unable to apply suggested code changes">;
def note_fixit_unfixed_error : Note<"FIX-IT detected an error it cannot fix">;
-def note_fixit_main_file_unchanged : Note<
- "main file unchanged">;
def warn_fixit_no_changes : Note<
"FIX-IT detected errors it could not fix; no output will be generated">;
-def err_fe_invoking : Error<"error invoking%0: %1">, DefaultFatal;
-
// PCH reader
def err_relocatable_without_isysroot : Error<
"must specify system root with -isysroot when building a relocatable "
"PCH file">;
-def warn_pch_target_triple : Error<
- "PCH file was compiled for the target '%0' but the current translation "
- "unit is being compiled for target '%1'">;
-def err_pch_langopt_mismatch : Error<"%0 was %select{disabled|enabled}1 in "
- "PCH file but is currently %select{disabled|enabled}2">;
-def err_pch_langopt_value_mismatch : Error<
- "%0 differs in PCH file vs. current file">;
-
-def warn_pch_version_too_old : Error<
- "PCH file uses an older PCH format that is no longer supported">;
-def warn_pch_version_too_new : Error<
- "PCH file uses a newer PCH format that cannot be read">;
-def warn_pch_different_branch : Error<
- "PCH file built from a different branch (%0) than the compiler (%1)">;
-def warn_cmdline_conflicting_macro_def : Error<
- "definition of the macro '%0' conflicts with the definition used to "
- "build the precompiled header">;
-def note_pch_macro_defined_as : Note<
- "definition of macro '%0' in the precompiled header">;
-def warn_cmdline_missing_macro_defs : Warning<
- "macro definitions used to build the precompiled header are missing">;
-def note_using_macro_def_from_pch : Note<
- "using this macro definition from precompiled header">;
-def warn_macro_name_used_in_pch : Error<
- "definition of macro %0 conflicts with an identifier used in the "
- "precompiled header">;
-def warn_pch_compiler_options_mismatch : Error<
- "compiler options used when building the precompiled header differ from "
- "the options used when using the precompiled header">;
-def err_not_a_pch_file : Error<
- "'%0' does not appear to be a precompiled header file">, DefaultFatal;
def warn_unknown_warning_option : Warning<
"unknown warning option '%0'">,
InGroup<DiagGroup<"unknown-warning-option"> >;
def warn_unknown_negative_warning_option : Warning<
- "unknown warning option '%0'">,
+ "unknown warning option '%0'?">,
+ InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
+def warn_unknown_warning_option_suggest : Warning<
+ "unknown warning option '%0'; did you mean '%1'?">,
+ InGroup<DiagGroup<"unknown-warning-option"> >;
+def warn_unknown_negative_warning_option_suggest : Warning<
+ "unknown warning option '%0'; did you mean '%1'?">,
InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
def warn_unknown_warning_specifier : Warning<
"unknown %0 warning specifier: '%1'">,
@@ -158,4 +112,23 @@ def warn_incompatible_analyzer_plugin_api : Warning<
InGroup<DiagGroup<"analyzer-incompatible-plugin"> >;
def note_incompatible_analyzer_plugin_api : Note<
"current API version is '%0', but plugin was compiled with version '%1'">;
+
+def err_module_map_not_found : Error<"module map file '%0' not found">,
+ DefaultFatal;
+def err_missing_module_name : Error<
+ "no module name provided; specify one with -fmodule-name=">,
+ DefaultFatal;
+def err_missing_module : Error<
+ "no module named '%0' declared in module map file '%1'">, DefaultFatal;
+def err_missing_umbrella_header : Error<
+ "cannot open umbrella header '%0': %1">, DefaultFatal;
+def err_no_submodule : Error<"no submodule named %0 in module '%1'">;
+def err_no_submodule_suggest : Error<
+ "no submodule named %0 in module '%1'; did you mean '%2'?">;
+def warn_missing_submodule : Warning<"missing submodule '%0'">,
+ InGroup<IncompleteUmbrella>;
+def err_module_map_temp_file : Error<
+ "unable to write temporary module map file '%0'">, DefaultFatal;
+def err_module_unavailable : Error<"module '%0' requires feature '%1'">;
+
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index 00a6663..c839853 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -25,7 +25,14 @@ def AmbigMemberTemplate : DiagGroup<"ambiguous-member-template">;
def : DiagGroup<"attributes">;
def : DiagGroup<"bad-function-cast">;
def Availability : DiagGroup<"availability">;
-def BoolConversions : DiagGroup<"bool-conversions">;
+def AutoImport : DiagGroup<"auto-import">;
+def ConstantConversion : DiagGroup<"constant-conversion">;
+def LiteralConversion : DiagGroup<"literal-conversion">;
+def StringConversion : DiagGroup<"string-conversion">;
+def SignConversion : DiagGroup<"sign-conversion">;
+def BoolConversion : DiagGroup<"bool-conversion">;
+def IntConversion : DiagGroup<"int-conversion">;
+def NullConversion : DiagGroup<"null-conversion">;
def BuiltinRequiresHeader : DiagGroup<"builtin-requires-header">;
def CXXCompat: DiagGroup<"c++-compat">;
def CastAlign : DiagGroup<"cast-align">;
@@ -55,34 +62,63 @@ def ExtraTokens : DiagGroup<"extra-tokens">;
def FormatExtraArgs : DiagGroup<"format-extra-args">;
def FormatZeroLength : DiagGroup<"format-zero-length">;
-def CXX98Compat : DiagGroup<"c++98-compat">;
+def CXX98CompatBindToTemporaryCopy :
+ DiagGroup<"c++98-compat-bind-to-temporary-copy">;
+def CXX98CompatLocalTypeTemplateArgs :
+ DiagGroup<"c++98-compat-local-type-template-args">;
+def CXX98CompatUnnamedTypeTemplateArgs :
+ DiagGroup<"c++98-compat-unnamed-type-template-args">;
+
+def CXX98Compat : DiagGroup<"c++98-compat",
+ [CXX98CompatBindToTemporaryCopy,
+ CXX98CompatLocalTypeTemplateArgs,
+ CXX98CompatUnnamedTypeTemplateArgs]>;
// Warnings for C++11 features which are Extensions in C++98 mode.
def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic", [CXX98Compat]>;
def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
+
+// Original name of this warning in Clang
def : DiagGroup<"c++0x-narrowing", [CXX11Narrowing]>;
-def CXX11Compat : DiagGroup<"c++11-compat", [CXX11Narrowing]>;
+// Name of this warning in GCC
+def : DiagGroup<"narrowing", [CXX11Narrowing]>;
+
+def CXX11CompatReservedUserDefinedLiteral :
+ DiagGroup<"c++11-compat-reserved-user-defined-literal">;
+def ReservedUserDefinedLiteral :
+ DiagGroup<"reserved-user-defined-literal",
+ [CXX11CompatReservedUserDefinedLiteral]>;
+
+def CXX11Compat : DiagGroup<"c++11-compat",
+ [CXX11Narrowing,
+ CXX11CompatReservedUserDefinedLiteral]>;
def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
def : DiagGroup<"effc++">;
def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
+def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
def GlobalConstructors : DiagGroup<"global-constructors">;
def : DiagGroup<"idiomatic-parentheses">;
def BitwiseOpParentheses: DiagGroup<"bitwise-op-parentheses">;
def LogicalOpParentheses: DiagGroup<"logical-op-parentheses">;
+def DanglingElse: DiagGroup<"dangling-else">;
def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
def : DiagGroup<"import">;
def IncompatiblePointerTypes : DiagGroup<"incompatible-pointer-types">;
+def IncompleteUmbrella : DiagGroup<"incomplete-umbrella">;
+def KNRPromotedParameter : DiagGroup<"knr-promoted-parameter">;
def : DiagGroup<"init-self">;
def : DiagGroup<"inline">;
def : DiagGroup<"int-to-pointer-cast">;
def : DiagGroup<"invalid-pch">;
def LiteralRange : DiagGroup<"literal-range">;
-def LocalTypeTemplateArgs : DiagGroup<"local-type-template-args">;
+def LocalTypeTemplateArgs : DiagGroup<"local-type-template-args",
+ [CXX98CompatLocalTypeTemplateArgs]>;
def MalformedWarningCheck : DiagGroup<"malformed-warning-check">;
def Main : DiagGroup<"main">;
+def MainReturnType : DiagGroup<"main-return-type">;
def MissingBraces : DiagGroup<"missing-braces">;
def MissingDeclarations: DiagGroup<"missing-declarations">;
def : DiagGroup<"missing-format-attribute">;
@@ -95,31 +131,37 @@ def LongLong : DiagGroup<"long-long">;
def MismatchedTags : DiagGroup<"mismatched-tags">;
def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
def ModuleBuild : DiagGroup<"module-build">;
+def NullCharacter : DiagGroup<"null-character">;
def NullDereference : DiagGroup<"null-dereference">;
def InitializerOverrides : DiagGroup<"initializer-overrides">;
def NonNull : DiagGroup<"nonnull">;
def : DiagGroup<"nonportable-cfstrings">;
def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
+def OveralignedType : DiagGroup<"over-aligned">;
def : DiagGroup<"old-style-cast">;
def : DiagGroup<"old-style-definition">;
def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;
def : DiagGroup<"overflow">;
def OverlengthStrings : DiagGroup<"overlength-strings">;
def OverloadedVirtual : DiagGroup<"overloaded-virtual">;
+def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
def ObjCMissingSuperCalls : DiagGroup<"objc-missing-super-calls">;
def ObjCRetainBlockProperty : DiagGroup<"objc-noncopy-retain-block-property">;
-def ObjCContinuationPropertyType :DiagGroup<"objc-continuation-property-type">;
+def ObjCReadonlyPropertyHasSetter : DiagGroup<"objc-readonly-with-setter-property">;
+def ObjCRootClass : DiagGroup<"objc-root-class">;
def Packed : DiagGroup<"packed">;
def Padded : DiagGroup<"padded">;
def PointerArith : DiagGroup<"pointer-arith">;
def PoundWarning : DiagGroup<"#warnings">,
DiagCategory<"#warning Directive">;
-def PoundPragmaMessage : DiagGroup<"#pragma messages">,
+def PoundPragmaMessage : DiagGroup<"#pragma-messages">,
DiagCategory<"#pragma message Directive">;
def : DiagGroup<"pointer-to-int-cast">;
def : DiagGroup<"redundant-decls">;
-def ReturnType : DiagGroup<"return-type">;
-def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy">;
+def ReturnTypeCLinkage : DiagGroup<"return-type-c-linkage">;
+def ReturnType : DiagGroup<"return-type", [ReturnTypeCLinkage]>;
+def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy",
+ [CXX98CompatBindToTemporaryCopy]>;
def SelfAssignment : DiagGroup<"self-assign">;
def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
def Sentinel : DiagGroup<"sentinel">;
@@ -133,6 +175,8 @@ def : DiagGroup<"stack-protector">;
def : DiagGroup<"switch-default">;
def : DiagGroup<"synth">;
def SizeofArrayArgument : DiagGroup<"sizeof-array-argument">;
+def StringPlusInt : DiagGroup<"string-plus-int">;
+def StrncatSize : DiagGroup<"strncat-size">;
def TautologicalCompare : DiagGroup<"tautological-compare">;
def HeaderHygiene : DiagGroup<"header-hygiene">;
@@ -155,18 +199,25 @@ def : DiagGroup<"strict-overflow=5">;
def : DiagGroup<"strict-overflow">;
def InvalidOffsetof : DiagGroup<"invalid-offsetof">;
+def LambdaExtensions : DiagGroup<"lambda-extensions">;
def : DiagGroup<"strict-prototypes">;
def StrictSelector : DiagGroup<"strict-selector-match">;
+def MethodDuplicate : DiagGroup<"duplicate-method-match">;
+def CoveredSwitchDefault : DiagGroup<"covered-switch-default">;
def SwitchEnum : DiagGroup<"switch-enum">;
-def Switch : DiagGroup<"switch", [SwitchEnum]>;
+def Switch : DiagGroup<"switch">;
def Trigraphs : DiagGroup<"trigraphs">;
def : DiagGroup<"type-limits">;
+def Unicode : DiagGroup<"unicode">;
def Uninitialized : DiagGroup<"uninitialized">;
def UninitializedMaybe : DiagGroup<"conditional-uninitialized">;
def UnknownPragmas : DiagGroup<"unknown-pragmas">;
+def NSobjectAttribute : DiagGroup<"NSObject-attribute">;
def UnknownAttributes : DiagGroup<"attributes">;
-def UnnamedTypeTemplateArgs : DiagGroup<"unnamed-type-template-args">;
+def IgnoredAttributes : DiagGroup<"ignored-attributes">;
+def UnnamedTypeTemplateArgs : DiagGroup<"unnamed-type-template-args",
+ [CXX98CompatUnnamedTypeTemplateArgs]>;
def UnusedArgument : DiagGroup<"unused-argument">;
def UnusedComparison : DiagGroup<"unused-comparison">;
def UnusedExceptionParameter : DiagGroup<"unused-exception-parameter">;
@@ -205,9 +256,11 @@ def SuperSubClassMismatch : DiagGroup<"super-class-method-mismatch">;
def OverridingMethodMismatch : DiagGroup<"overriding-method-mismatch">;
def : DiagGroup<"variadic-macros">;
def VariadicMacros : DiagGroup<"variadic-macros">;
-def VectorConversions : DiagGroup<"vector-conversions">; // clang specific
+def VectorConversion : DiagGroup<"vector-conversion">; // clang specific
+def VexingParse : DiagGroup<"vexing-parse">;
def VLA : DiagGroup<"vla">;
def VolatileRegisterVar : DiagGroup<"volatile-register-var">;
+def Visibility : DiagGroup<"visibility">;
// GCC calls -Wdeprecated-writable-strings -Wwrite-strings.
def GCCWriteStrings : DiagGroup<"write-strings" , [DeprecatedWritableStr]>;
@@ -226,7 +279,8 @@ def ParenthesesOnEquality : DiagGroup<"parentheses-equality">;
def Parentheses : DiagGroup<"parentheses",
[LogicalOpParentheses,
BitwiseOpParentheses,
- ParenthesesOnEquality]>;
+ ParenthesesOnEquality,
+ DanglingElse]>;
// -Wconversion has its own warnings, but we split a few out for
// legacy reasons:
@@ -234,13 +288,16 @@ def Parentheses : DiagGroup<"parentheses",
// - conversion warnings with constant sources are on by default
// - conversion warnings for literals are on by default
// - bool-to-pointer conversion warnings are on by default
+// - __null-to-integer conversion warnings are on by default
def Conversion : DiagGroup<"conversion",
[DiagGroup<"shorten-64-to-32">,
- DiagGroup<"constant-conversion">,
- DiagGroup<"literal-conversion">,
- DiagGroup<"string-conversion">,
- DiagGroup<"sign-conversion">,
- BoolConversions]>,
+ ConstantConversion,
+ LiteralConversion,
+ StringConversion,
+ SignConversion,
+ BoolConversion,
+ NullConversion,
+ IntConversion]>,
DiagCategory<"Value Conversion Issue">;
def Unused : DiagGroup<"unused",
@@ -253,6 +310,7 @@ def Unused : DiagGroup<"unused",
// Format settings.
def FormatInvalidSpecifier : DiagGroup<"format-invalid-specifier">;
def FormatSecurity : DiagGroup<"format-security">;
+def FormatNonStandard : DiagGroup<"format-non-iso">;
def FormatY2K : DiagGroup<"format-y2k">;
def Format : DiagGroup<"format",
[FormatExtraArgs, FormatZeroLength, NonNull,
@@ -284,8 +342,8 @@ def Most : DiagGroup<"most", [
Reorder,
ReturnType,
SelfAssignment,
- Switch,
SizeofArrayArgument,
+ StringPlusInt,
Trigraphs,
Uninitialized,
UnknownPragmas,
@@ -298,13 +356,23 @@ def Most : DiagGroup<"most", [
// Thread Safety warnings
def ThreadSafety : DiagGroup<"thread-safety">;
-// -Wall is -Wmost -Wparentheses -Wtop-level-comparison
-def : DiagGroup<"all", [Most, Parentheses]>;
+// Note that putting warnings in -Wall will not disable them by default. If a
+// warning should be active _only_ when -Wall is passed in, mark it as
+// DefaultIgnore in addition to putting it here.
+def : DiagGroup<"all", [Most, Parentheses, Switch]>;
// Aliases.
def : DiagGroup<"", [Extra]>; // -W = -Wextra
def : DiagGroup<"endif-labels", [ExtraTokens]>; // -Wendif-labels=-Wendif-tokens
def : DiagGroup<"comments", [Comment]>; // -Wcomments = -Wcomment
+def : DiagGroup<"conversion-null",
+ [NullConversion]>; // -Wconversion-null = -Wnull-conversion
+def : DiagGroup<"bool-conversions",
+ [BoolConversion]>; // -Wbool-conversions = -Wbool-conversion
+def : DiagGroup<"int-conversions",
+ [IntConversion]>; // -Wint-conversions = -Wint-conversion
+def : DiagGroup<"vector-conversions",
+ [VectorConversion]>; // -Wvector-conversions = -Wvector-conversion
// A warning group for warnings that we want to have on by default in clang,
// but which aren't on by default in GCC.
@@ -318,11 +386,16 @@ def : DiagGroup<"c++0x-extensions", [CXX11]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
-// A warning group for warnings about using C1X features as extensions.
-def C1X : DiagGroup<"c1x-extensions">;
+// A warning group for warnings about using C11 features as extensions.
+def C11 : DiagGroup<"c11-extensions">;
+
+// A warning group for warnings about using C99 features as extensions.
+def C99 : DiagGroup<"c99-extensions">;
// A warning group for warnings about GCC extensions.
def GNU : DiagGroup<"gnu", [GNUDesignator, VLA]>;
+// A warning group for warnings about code that clang accepts but gcc doesn't.
+def GccCompat : DiagGroup<"gcc-compat">;
// A warning group for warnings about Microsoft extensions.
def Microsoft : DiagGroup<"microsoft">;
@@ -330,3 +403,13 @@ def Microsoft : DiagGroup<"microsoft">;
def ObjCNonUnifiedException : DiagGroup<"objc-nonunified-exceptions">;
def ObjCProtocolMethodImpl : DiagGroup<"objc-protocol-method-implementation">;
+
+// ObjC API warning groups.
+def ObjCRedundantLiteralUse : DiagGroup<"objc-redundant-literal-use">;
+def ObjCRedundantAPIUse : DiagGroup<"objc-redundant-api-use", [
+ ObjCRedundantLiteralUse
+ ]>;
+
+def ObjCCocoaAPI : DiagGroup<"objc-cocoa-api", [
+ ObjCRedundantAPIUse
+ ]>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
index 16d9b39..a6c22db 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
@@ -31,14 +31,15 @@ namespace clang {
namespace diag {
// Start position for diagnostics.
enum {
- DIAG_START_DRIVER = 300,
- DIAG_START_FRONTEND = DIAG_START_DRIVER + 100,
- DIAG_START_LEX = DIAG_START_FRONTEND + 120,
- DIAG_START_PARSE = DIAG_START_LEX + 300,
- DIAG_START_AST = DIAG_START_PARSE + 300,
- DIAG_START_SEMA = DIAG_START_AST + 100,
- DIAG_START_ANALYSIS = DIAG_START_SEMA + 3000,
- DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
+ DIAG_START_DRIVER = 300,
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + 100,
+ DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + 100,
+ DIAG_START_LEX = DIAG_START_SERIALIZATION + 120,
+ DIAG_START_PARSE = DIAG_START_LEX + 300,
+ DIAG_START_AST = DIAG_START_PARSE + 400,
+ DIAG_START_SEMA = DIAG_START_AST + 100,
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + 3000,
+ DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
};
class CustomDiagInfo;
@@ -49,7 +50,7 @@ namespace clang {
// Get typedefs for common diagnostics.
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,CATEGORY,NOWERROR,SHOWINSYSHEADER,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,CATEGORY,NOWERROR,SHOWINSYSHEADER) ENUM,
#include "clang/Basic/DiagnosticCommonKinds.inc"
NUM_BUILTIN_COMMON_DIAGNOSTICS
#undef DIAG
@@ -108,7 +109,7 @@ public:
/// \brief Used for handling and querying diagnostic IDs. Can be used and shared
/// by multiple Diagnostics for multiple translation units.
-class DiagnosticIDs : public llvm::RefCountedBase<DiagnosticIDs> {
+class DiagnosticIDs : public RefCountedBase<DiagnosticIDs> {
public:
/// Level - The level of the diagnostic, after it has been through mapping.
enum Level {
@@ -182,6 +183,10 @@ public:
/// category.
static StringRef getCategoryNameFromID(unsigned CategoryID);
+ /// isARCDiagnostic - Return true if a given diagnostic falls into an
+ /// ARC diagnostic category;
+ static bool isARCDiagnostic(unsigned DiagID);
+
/// \brief Enumeration describing how the the emission of a diagnostic should
/// be treated when it occurs during C++ template argument deduction.
enum SFINAEResponse {
@@ -218,39 +223,6 @@ public:
/// are not SFINAE errors.
static SFINAEResponse getDiagnosticSFINAEResponse(unsigned DiagID);
- /// getName - Given a diagnostic ID, return its name
- static StringRef getName(unsigned DiagID);
-
- /// getIdFromName - Given a diagnostic name, return its ID, or 0
- static unsigned getIdFromName(StringRef Name);
-
- /// getBriefExplanation - Given a diagnostic ID, return a brief explanation
- /// of the issue
- static StringRef getBriefExplanation(unsigned DiagID);
-
- /// getFullExplanation - Given a diagnostic ID, return a full explanation
- /// of the issue
- static StringRef getFullExplanation(unsigned DiagID);
-
- /// Iterator class used for traversing all statically declared
- /// diagnostics.
- class diag_iterator {
- const void *impl;
-
- friend class DiagnosticIDs;
- diag_iterator(const void *im) : impl(im) {}
- public:
- diag_iterator &operator++();
- bool operator==(const diag_iterator &x) const { return impl == x.impl; }
- bool operator!=(const diag_iterator &x) const { return impl != x.impl; }
-
- llvm::StringRef getDiagName() const;
- unsigned getDiagID() const;
- };
-
- static diag_iterator diags_begin();
- static diag_iterator diags_end();
-
/// \brief Get the set of all diagnostic IDs in the group with the given name.
///
/// \param Diags [out] - On return, the diagnostics in the group.
@@ -258,6 +230,13 @@ public:
bool getDiagnosticsInGroup(StringRef Group,
llvm::SmallVectorImpl<diag::kind> &Diags) const;
+ /// \brief Get the set of all diagnostic IDs.
+ void getAllDiagnostics(llvm::SmallVectorImpl<diag::kind> &Diags) const;
+
+ /// \brief Get the warning option with the closest edit distance to the given
+ /// group name.
+ static StringRef getNearestWarningOption(StringRef Group);
+
private:
/// \brief Get the set of all diagnostic IDs in the given group.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index 9b3a178..670283e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -13,16 +13,19 @@
let Component = "Lex", CategoryName = "Lexical or Preprocessor Issue" in {
-def null_in_string : Warning<"null character(s) preserved in string literal">;
-def null_in_char : Warning<"null character(s) preserved in character literal">;
-def null_in_file : Warning<"null character ignored">;
+def null_in_string : Warning<"null character(s) preserved in string literal">,
+ InGroup<NullCharacter>;
+def null_in_char : Warning<"null character(s) preserved in character literal">,
+ InGroup<NullCharacter>;
+def null_in_file : Warning<"null character ignored">, InGroup<NullCharacter>;
def warn_nested_block_comment : Warning<"'/*' within block comment">,
InGroup<Comment>;
def escaped_newline_block_comment_end : Warning<
"escaped newline between */ characters at block comment end">,
InGroup<Comment>;
def backslash_newline_space : Warning<
- "backslash and newline separated by space">;
+ "backslash and newline separated by space">,
+ InGroup<DiagGroup<"backslash-newline-escape">>;
// Digraphs.
def warn_cxx98_compat_less_colon_colon : Warning<
@@ -45,14 +48,21 @@ def ext_bcpl_comment : Extension<
InGroup<Comment>;
def ext_no_newline_eof : Extension<"no newline at end of file">,
InGroup<DiagGroup<"newline-eof">>;
-def ext_dollar_in_identifier : Extension<"'$' in identifier">;
-def charize_microsoft_ext : Extension<"@# is a microsoft extension">;
+
+def warn_cxx98_compat_no_newline_eof : Warning<
+ "C++98 requires newline at end of file">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+
+def ext_dollar_in_identifier : Extension<"'$' in identifier">,
+ InGroup<DiagGroup<"dollar-in-identifier-extension">>;
+def ext_charize_microsoft : Extension<"@# is a microsoft extension">,
+ InGroup<Microsoft>;
def ext_token_used : Extension<"extension used">,
InGroup<DiagGroup<"language-extension-token">>;
def warn_cxx11_keyword : Warning<"'%0' is a keyword in C++11">,
- InGroup<CXX11Compat>;
+ InGroup<CXX11Compat>, DefaultIgnore;
def warn_unterminated_string : ExtWarn<"missing terminating '\"' character">;
def warn_unterminated_char : ExtWarn<"missing terminating ' character">;
@@ -90,7 +100,16 @@ def err_hex_escape_no_digits : Error<"\\x used with no following hex digits">;
def err_ucn_escape_no_digits : Error<"\\u used with no following hex digits">;
def err_ucn_escape_invalid : Error<"invalid universal character">;
def err_ucn_escape_incomplete : Error<"incomplete universal character name">;
-def err_ucn_escape_too_big : Error<"universal character name is too long">;
+def err_ucn_escape_basic_scs : Error<
+ "character '%0' cannot be specified by a universal character name">;
+def err_ucn_control_character : Error<
+ "universal character name refers to a control character">;
+def warn_cxx98_compat_literal_ucn_escape_basic_scs : Warning<
+ "specifying character '%0' with a universal character name "
+ "is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_literal_ucn_control_character : Warning<
+ "universal character name referring to a control character "
+ "is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_invalid_decimal_digit : Error<"invalid digit '%0' in decimal constant">;
def err_invalid_binary_digit : Error<"invalid digit '%0' in binary constant">;
def err_invalid_octal_digit : Error<"invalid digit '%0' in octal constant">;
@@ -102,10 +121,14 @@ def warn_extraneous_char_constant : Warning<
"extraneous characters in character constant ignored">;
def warn_char_constant_too_large : Warning<
"character constant too long for its type">;
+def err_multichar_utf_character_literal : Error<
+ "Unicode character literals may not contain multiple characters">;
def err_exponent_has_no_digits : Error<"exponent has no digits">;
def ext_imaginary_constant : Extension<"imaginary constants are an extension">;
def err_hexconstant_requires_exponent : Error<
"hexadecimal floating constants require an exponent">;
+def err_hexconstant_requires_digits : Error<
+ "hexadecimal floating constants require a significand">;
def ext_hexconstant_invalid : Extension<
"hexadecimal floating constants are a C99 feature">;
def ext_binary_literal : Extension<
@@ -116,36 +139,69 @@ def warn_hex_escape_too_large : ExtWarn<"hex escape sequence out of range">;
def ext_string_too_long : Extension<"string literal of length %0 exceeds "
"maximum length %1 that %select{C90|ISO C99|C++}2 compilers are required to "
"support">, InGroup<OverlengthStrings>;
-def warn_ucn_escape_too_large : ExtWarn<
- "character unicode escape sequence too long for its type">;
+def err_character_too_large : Error<
+ "character too large for enclosing character literal type">;
def warn_ucn_not_valid_in_c89 : ExtWarn<
- "unicode escape sequences are only valid in C99 or C++">;
+ "unicode escape sequences are only valid in C99 or C++">, InGroup<Unicode>;
def warn_cxx98_compat_unicode_literal : Warning<
"unicode literals are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx11_compat_user_defined_literal : Warning<
+ "identifier after literal will be treated as a user-defined literal suffix "
+ "in C++11">, InGroup<CXX11Compat>, DefaultIgnore;
+def warn_cxx11_compat_reserved_user_defined_literal : Warning<
+ "identifier after literal will be treated as a reserved user-defined literal "
+ "suffix in C++11">,
+ InGroup<CXX11CompatReservedUserDefinedLiteral>, DefaultIgnore;
+def ext_reserved_user_defined_literal : ExtWarn<
+ "invalid suffix on literal; C++11 requires a space between literal and "
+ "identifier">, InGroup<ReservedUserDefinedLiteral>, DefaultError;
+def ext_ms_reserved_user_defined_literal : ExtWarn<
+ "invalid suffix on literal; C++11 requires a space between literal and "
+ "identifier">, InGroup<ReservedUserDefinedLiteral>;
def err_unsupported_string_concat : Error<
"unsupported non-standard concatenation of string literals">;
-
+def err_string_concat_mixed_suffix : Error<
+ "differing user-defined suffixes ('%0' and '%1') in string literal "
+ "concatenation">;
+def err_pp_invalid_udl : Error<
+ "%select{character|integer}0 literal with user-defined suffix "
+ "cannot be used in preprocessor constant expression">;
+def err_bad_string_encoding : Error<
+ "illegal character encoding in string literal">;
+def warn_bad_string_encoding : ExtWarn<
+ "illegal character encoding in string literal">,
+ InGroup<DiagGroup<"invalid-source-encoding">>;
+def err_bad_character_encoding : Error<
+ "illegal character encoding in character literal">;
+def warn_bad_character_encoding : ExtWarn<
+ "illegal character encoding in character literal">,
+ InGroup<DiagGroup<"invalid-source-encoding">>;
+
+
//===----------------------------------------------------------------------===//
// PTH Diagnostics
//===----------------------------------------------------------------------===//
-def err_pth_cannot_read : Error<
- "PTH file '%0' could not be read">;
def err_invalid_pth_file : Error<
"invalid or corrupt PTH file '%0'">;
//===----------------------------------------------------------------------===//
// Preprocessor Diagnostics
//===----------------------------------------------------------------------===//
-def pp_hash_warning : Warning<"#warning%0">,
+
+let CategoryName = "User Defined Issues" in {
+def pp_hash_warning : Warning<"%0">,
InGroup<PoundWarning>, DefaultWarnShowInSystemHeader;
+def err_pp_hash_error : Error<"%0">;
+}
+
def pp_include_next_in_primary : Warning<
"#include_next in primary source file">;
def pp_include_macros_out_of_predefines : Error<
"the #__include_macros directive is only for internal use by -imacros">;
def pp_include_next_absolute_path : Warning<"#include_next with absolute path">;
def ext_c99_whitespace_required_after_macro_name : ExtWarn<
- "ISO C99 requires whitespace after the macro name">;
+ "ISO C99 requires whitespace after the macro name">, InGroup<C99>;
def ext_missing_whitespace_after_macro_name : ExtWarn<
"whitespace required after macro name">;
def warn_missing_whitespace_after_macro_name : Warning<
@@ -160,6 +216,9 @@ def pp_out_of_date_dependency : Warning<
def pp_undef_builtin_macro : Warning<"undefining builtin macro">;
def pp_redef_builtin_macro : Warning<"redefining builtin macro">,
InGroup<DiagGroup<"builtin-macro-redefined">>;
+def pp_disabled_macro_expansion : Warning<
+ "disabled expansion of recursive macro">, DefaultIgnore,
+ InGroup<DiagGroup<"disabled-macro-expansion">>;
def pp_macro_not_used : Warning<"macro is not used">, DefaultIgnore,
InGroup<DiagGroup<"unused-macros">>;
def warn_pp_undef_identifier : Warning<
@@ -175,7 +234,11 @@ def warn_pp_convert_lhs_to_positive : Warning<
def warn_pp_convert_rhs_to_positive : Warning<
"right side of operator converted from negative value to unsigned: %0">;
-def ext_pp_import_directive : Extension<"#import is a language extension">;
+def ext_pp_import_directive : Extension<"#import is a language extension">,
+ InGroup<DiagGroup<"import-preprocessor-directive-pedantic">>;
+def err_pp_import_directive_ms : Error<
+ "#import of type library is an unsupported Microsoft feature">;
+
def ext_pp_ident_directive : Extension<"#ident is a language extension">;
def ext_pp_include_next_directive : Extension<
"#include_next is a language extension">;
@@ -195,8 +258,11 @@ def warn_cxx98_compat_variadic_macro : Warning<
InGroup<CXX98CompatPedantic>, DefaultIgnore;
def ext_named_variadic_macro : Extension<
"named variadic macros are a GNU extension">, InGroup<VariadicMacros>;
+def err_embedded_include : Error<
+ "embedding a #%0 directive within macro arguments is not supported">;
def ext_embedded_directive : Extension<
- "embedding a directive within macro arguments is not portable">;
+ "embedding a directive within macro arguments has undefined behavior">,
+ InGroup<DiagGroup<"embedded-directive">>;
def ext_missing_varargs_arg : Extension<
"varargs argument missing, but tolerated as an extension">;
def ext_empty_fnmacro_arg : Extension<
@@ -206,7 +272,6 @@ def warn_cxx98_compat_empty_fnmacro_arg : Warning<
InGroup<CXX98CompatPedantic>, DefaultIgnore;
def err_pp_invalid_directive : Error<"invalid preprocessing directive">;
-def err_pp_hash_error : Error<"#error%0">;
def err_pp_file_not_found : Error<"'%0' file not found">, DefaultFatal;
def err_pp_error_opening_file : Error<
"error opening file '%0': %1">, DefaultFatal;
@@ -236,7 +301,6 @@ def pp_err_else_without_if : Error<"#else without #if">;
def pp_err_elif_without_if : Error<"#elif without #if">;
def err_pp_endif_without_if : Error<"#endif without #if">;
def err_pp_expected_value_in_expr : Error<"expected value in expression">;
-def err_pp_missing_val_before_operator : Error<"missing value before operator">;
def err_pp_expected_rparen : Error<"expected ')' in preprocessor expression">;
def err_pp_expected_eol : Error<
"expected end of line in preprocessor expression">;
@@ -266,6 +330,19 @@ def warn_has_warning_invalid_option :
ExtWarn<"__has_warning expected option name (e.g. \"-Wundef\")">,
InGroup<MalformedWarningCheck>;
+def warn_pragma_include_alias_mismatch_angle :
+ ExtWarn<"angle-bracketed include <%0> cannot be aliased to double-quoted "
+ "include \"%1\"">, InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_mismatch_quote :
+ ExtWarn<"double-quoted include \"%0\" cannot be aliased to angle-bracketed "
+ "include <%1>">, InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_expected :
+ ExtWarn<"pragma include_alias expected '%0'">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_expected_filename :
+ ExtWarn<"pragma include_alias expected include filename">,
+ InGroup<UnknownPragmas>;
+
def err__Pragma_malformed : Error<
"_Pragma takes a parenthesized string literal">;
def err_pragma_comment_malformed : Error<
@@ -355,7 +432,7 @@ def warn_cxx98_compat_pp_line_too_big : Warning<
"#line number greater than 32767 is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
-def err_pp_export_non_macro : Error<"no macro named %0 to export">;
+def err_pp_visibility_non_macro : Error<"no macro named %0">;
def err_pp_arc_cf_code_audited_syntax : Error<"expected 'begin' or 'end'">;
def err_pp_double_begin_of_arc_cf_code_audited : Error<
@@ -367,4 +444,60 @@ def err_pp_include_in_arc_cf_code_audited : Error<
def err_pp_eof_in_arc_cf_code_audited : Error<
"'#pragma clang arc_cf_code_audited' was not ended within this file">;
+// Module map parsing
+def err_mmap_unknown_token : Error<"skipping stray token">;
+def err_mmap_expected_module : Error<"expected module declaration">;
+def err_mmap_expected_module_name : Error<"expected module name">;
+def err_mmap_expected_lbrace : Error<"expected '{' to start module '%0'">;
+def err_mmap_expected_rbrace : Error<"expected '}'">;
+def note_mmap_lbrace_match : Note<"to match this '{'">;
+def err_mmap_expected_rsquare : Error<"expected ']' to close attribute">;
+def note_mmap_lsquare_match : Note<"to match this ']'">;
+def err_mmap_expected_member : Error<
+ "expected umbrella, header, submodule, or module export">;
+def err_mmap_expected_header : Error<"expected a header name after '%0'">;
+def err_mmap_module_redefinition : Error<
+ "redefinition of module '%0'">;
+def note_mmap_prev_definition : Note<"previously defined here">;
+def err_mmap_header_conflict : Error<
+ "header '%0' is already part of module '%1'">;
+def err_mmap_header_not_found : Error<
+ "%select{|umbrella }0header '%1' not found">;
+def err_mmap_umbrella_dir_not_found : Error<
+ "umbrella directory '%0' not found">;
+def err_mmap_umbrella_clash : Error<
+ "umbrella for module '%0' already covers this directory">;
+def err_mmap_export_module_id : Error<
+ "expected an exported module name or '*'">;
+def err_mmap_missing_module_unqualified : Error<
+ "no module named '%0' visible from '%1'">;
+def err_mmap_missing_module_qualified : Error<
+ "no module named '%0' in '%1'">;
+def err_mmap_top_level_inferred_submodule : Error<
+ "only submodules may be inferred with wildcard syntax">;
+def err_mmap_inferred_no_umbrella : Error<
+ "inferred submodules require a module with an umbrella">;
+def err_mmap_inferred_redef : Error<
+ "redefinition of inferred submodule">;
+def err_mmap_expected_lbrace_wildcard : Error<
+ "expected '{' to start inferred submodule">;
+def err_mmap_expected_wildcard_member : Error<
+ "expected module export wildcard">;
+def err_mmap_expected_export_wildcard : Error<
+ "only '*' can be exported from an inferred submodule">;
+def err_mmap_explicit_top_level : Error<
+ "'explicit' is not permitted on top-level modules">;
+def err_mmap_nested_submodule_id : Error<
+ "qualified module name can only be used to define modules at the top level">;
+def err_mmap_expected_feature : Error<"expected a feature name">;
+def err_mmap_expected_attribute : Error<"expected an attribute name">;
+def warn_mmap_unknown_attribute : Warning<"unknown attribute '%0'">,
+ InGroup<IgnoredAttributes>;
+
+def warn_auto_module_import : Warning<
+ "treating #%select{include|import|include_next|__include_macros}0 as an "
+ "import of module '%1'">, InGroup<AutoImport>, DefaultIgnore;
+def warn_uncovered_module_header : Warning<
+ "umbrella header does not include header '%0'">, InGroup<IncompleteUmbrella>;
+
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index 73437b2..c183da7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -23,6 +23,9 @@ let CategoryName = "Parse Issue" in {
def ext_empty_source_file : Extension<"ISO C forbids an empty source file">;
def ext_top_level_semi : Extension<
"extra ';' outside of a function">;
+def warn_cxx98_compat_top_level_semi : Warning<
+ "extra ';' outside of a function is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def ext_extra_struct_semi : Extension<
"extra ';' inside a %0">;
def ext_extra_ivar_semi : Extension<
@@ -35,9 +38,10 @@ def ext_integer_complex : Extension<
"complex integer types are an extension">;
def ext_thread_before : Extension<"'__thread' before 'static'">;
-def ext_empty_struct_union : Extension<"empty %select{struct|union}0 "
- "(accepted as an extension) has size 0 in C, size 1 in C++">,
- InGroup<CXXCompat>;
+def ext_empty_struct_union : Extension<
+ "empty %select{struct|union}0 is a GNU extension">, InGroup<GNU>;
+def warn_empty_struct_union_compat : Warning<"empty %select{struct|union}0 "
+ "has size 0 in C, size 1 in C++">, InGroup<CXXCompat>, DefaultIgnore;
def error_empty_enum : Error<"use of empty enum">;
def err_invalid_sign_spec : Error<"'%0' cannot be signed or unsigned">;
def err_invalid_short_spec : Error<"'short %0' is invalid">;
@@ -49,12 +53,17 @@ def err_friend_storage_spec : Error<"'%0' is invalid in friend declarations">;
def ext_ident_list_in_param : Extension<
"type-less parameter names in function declaration">;
def ext_c99_variable_decl_in_for_loop : Extension<
- "variable declaration in for loop is a C99-specific feature">;
+ "variable declaration in for loop is a C99-specific feature">, InGroup<C99>;
def ext_c99_compound_literal : Extension<
- "compound literals are a C99-specific feature">;
+ "compound literals are a C99-specific feature">, InGroup<C99>;
+def ext_c99_flexible_array_member : Extension<
+ "Flexible array members are a C99-specific feature">, InGroup<C99>;
def ext_enumerator_list_comma : Extension<
"commas at the end of enumerator lists are a %select{C99|C++11}0-specific "
"feature">;
+def warn_cxx98_compat_enumerator_list_comma : Warning<
+ "commas at the end of enumerator lists are incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def err_enumerator_list_missing_comma : Error<
"missing ',' between enumerators">;
def err_enumerator_unnamed_no_def : Error<
@@ -62,16 +71,26 @@ def err_enumerator_unnamed_no_def : Error<
def ext_ms_enum_fixed_underlying_type : Extension<
"enumeration types with a fixed underlying type are a Microsoft extension">,
InGroup<Microsoft>;
+def warn_cxx98_compat_enum_fixed_underlying_type : Warning<
+ "enumeration types with a fixed underlying type are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_alignof : Warning<
+ "alignof expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
-def ext_c1x_generic_selection : Extension<
- "generic selections are a C1X-specific feature">, InGroup<C1X>;
+def warn_microsoft_dependent_exists : Warning<
+ "dependent %select{__if_not_exists|__if_exists}0 declarations are ignored">,
+ InGroup<DiagGroup<"microsoft-exists">>;
+
+def ext_c11_generic_selection : Extension<
+ "generic selections are a C11-specific feature">, InGroup<C11>;
def err_duplicate_default_assoc : Error<
"duplicate default generic association">;
def note_previous_default_assoc : Note<
"previous default generic association is here">;
-def ext_c1x_alignas : Extension<
- "_Alignas is a C1X-specific feature">, InGroup<C1X>;
+def ext_c11_alignas : Extension<
+ "_Alignas is a C11-specific feature">, InGroup<C11>;
def ext_gnu_indirect_goto : Extension<
"use of GNU indirect-goto extension">, InGroup<GNU>;
@@ -98,10 +117,10 @@ def ext_gnu_case_range : Extension<"use of GNU case range extension">,
InGroup<GNU>;
// Generic errors.
-def err_parse_error : Error<"parse error">;
def err_expected_expression : Error<"expected expression">;
def err_expected_type : Error<"expected a type">;
def err_expected_external_declaration : Error<"expected external declaration">;
+def err_extraneous_closing_brace : Error<"extraneous closing brace ('}')">;
def err_expected_ident : Error<"expected identifier">;
def err_expected_ident_lparen : Error<"expected identifier or '('">;
def err_expected_ident_lbrace : Error<"expected identifier or '{'">;
@@ -126,17 +145,24 @@ def err_function_declared_typedef : Error<
"function definition declared 'typedef'">;
def err_iboutletcollection_builtintype : Error<
"type argument of iboutletcollection attribute cannot be a builtin type">;
-
+def err_iboutletcollection_with_protocol : Error<
+ "invalid argument of iboutletcollection attribute">;
def err_at_defs_cxx : Error<"@defs is not supported in Objective-C++">;
def err_at_in_class : Error<"unexpected '@' in member specification">;
def err_expected_fn_body : Error<
"expected function body after function declarator">;
+def warn_attribute_on_function_definition : Warning<
+ "GCC does not allow %0 attribute in this position on a function definition">,
+ InGroup<GccCompat>;
+def warn_attribute_no_decl : Warning<
+ "attribute %0 ignored, because it is not attached to a declaration">,
+ InGroup<IgnoredAttributes>;
def err_expected_method_body : Error<"expected method body">;
def err_invalid_token_after_toplevel_declarator : Error<
"expected ';' after top level declarator">;
-def err_invalid_equalequal_after_declarator : Error<
- "invalid '==' at end of declaration; did you mean '='?">;
+def err_invalid_token_after_declarator_suggest_equal : Error<
+ "invalid '%0' at end of declaration; did you mean '='?">;
def err_expected_statement : Error<"expected statement">;
def err_expected_lparen_after : Error<"expected '(' after '%0'">;
def err_expected_lparen_after_id : Error<"expected '(' after %0">;
@@ -174,8 +200,8 @@ def err_label_end_of_compound_statement : Error<
def err_address_of_label_outside_fn : Error<
"use of address-of-label extension outside of a function body">;
def err_expected_string_literal : Error<"expected string literal">;
-def err_expected_asm_operand : Error<
- "expected string literal or '[' for asm operand">, CatInlineAsm;
+def err_asm_operand_wide_string_literal : Error<
+ "cannot use %select{unicode|wide}0 string literal in 'asm'">;
def err_expected_selector_for_method : Error<
"expected selector for Objective-C method">;
def err_expected_property_name : Error<"expected property name">;
@@ -188,25 +214,45 @@ def err_illegal_decl_reference_to_reference : Error<
"%0 declared as a reference to a reference">;
def ext_rvalue_reference : ExtWarn<
"rvalue references are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_rvalue_reference : Warning<
+ "rvalue references are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def ext_ref_qualifier : ExtWarn<
"reference qualifiers on functions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_ref_qualifier : Warning<
+ "reference qualifiers on functions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def ext_inline_namespace : ExtWarn<
"inline namespaces are a C++11 feature">, InGroup<CXX11>;
-def err_generalized_initializer_lists : Error<
- "generalized initializer lists are a C++11 extension unsupported in Clang">;
+def warn_cxx98_compat_inline_namespace : Warning<
+ "inline namespaces are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def ext_generalized_initializer_lists : ExtWarn<
- "generalized initializer lists are a C++11 extension unsupported in Clang">,
+ "generalized initializer lists are a C++11 extension">,
InGroup<CXX11>;
+def warn_cxx98_compat_generalized_initializer_lists : Warning<
+ "generalized initializer lists are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_init_list_bin_op : Error<"initializer list cannot be used on the "
+ "%select{left|right}0 hand side of operator '%1'">;
+def warn_cxx98_compat_trailing_return_type : Warning<
+ "trailing return types are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def ext_auto_type_specifier : ExtWarn<
"'auto' type specifier is a C++11 extension">, InGroup<CXX11>;
def warn_auto_storage_class : Warning<
"'auto' storage class specifier is redundant and incompatible with C++11">,
- InGroup<CXX11Compat>;
+ InGroup<CXX11Compat>, DefaultIgnore;
def ext_auto_storage_class : ExtWarn<
"'auto' storage class specifier is not permitted in C++11, and will not "
- "be supported in future releases">;
+ "be supported in future releases">, InGroup<DiagGroup<"auto-storage-class">>;
def ext_for_range : ExtWarn<
"range-based for loop is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_for_range : Warning<
+ "range-based for loop is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_for_range_expected_decl : Error<
+ "for range declaration must declare a variable">;
def err_argument_required_after_attribute : Error<
"argument required after attribute">;
def err_missing_param : Error<"expected parameter declarator">;
@@ -214,7 +260,14 @@ def err_missing_comma_before_ellipsis : Error<
"C requires a comma prior to the ellipsis in a variadic function type">;
def err_unexpected_typedef_ident : Error<
"unexpected type name %0: expected identifier">;
+def warn_cxx98_compat_decltype : Warning<
+ "'decltype' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_unexpected_scope_on_base_decltype : Error<
+ "unexpected namespace scope prior to decltype">;
def err_expected_class_name : Error<"expected class name">;
+def err_expected_class_name_not_template :
+ Error<"'typename' is redundant; base classes are implicitly types">;
def err_unspecified_vla_size_with_static : Error<
"'static' may not be used with an unspecified variable length array size">;
@@ -228,9 +281,11 @@ def err_typename_invalid_storageclass : Error<
"type name does not allow storage class to be specified">;
def err_typename_invalid_functionspec : Error<
"type name does not allow function specifier to be specified">;
+def err_typename_invalid_constexpr : Error<
+ "type name does not allow constexpr specifier to be specified">;
def err_typename_identifiers_only : Error<
"typename is allowed for identifiers only">;
-
+
def err_invalid_decl_spec_combination : Error<
"cannot combine with previous '%0' declaration specifier">;
def err_invalid_vector_decl_spec_combination : Error<
@@ -255,30 +310,40 @@ def err_templated_using_directive : Error<
"cannot template a using directive">;
def err_templated_using_declaration : Error<
"cannot template a using declaration">;
-def err_expected_ident_in_using : Error<
- "expected an identifier in using directive">;
def err_unexected_colon_in_nested_name_spec : Error<
"unexpected ':' in nested name specifier">;
def err_bool_redeclaration : Error<
"redeclaration of C++ built-in type 'bool'">;
-def ext_c1x_static_assert : Extension<
- "_Static_assert is a C1X-specific feature">, InGroup<C1X>;
+def ext_c11_static_assert : Extension<
+ "_Static_assert is a C11-specific feature">, InGroup<C11>;
+def warn_cxx98_compat_static_assert : Warning<
+ "static_assert declarations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
/// Objective-C parser diagnostics
def err_expected_minus_or_plus : Error<
"method type specifier must start with '-' or '+'">;
def err_objc_no_attributes_on_category : Error<
"attributes may not be specified on a category">;
-def err_objc_missing_end : Error<"missing @end">;
+def err_objc_missing_end : Error<"missing '@end'">;
+def note_objc_container_start : Note<
+ "%select{class|protocol|category|class extension|implementation"
+ "|category implementation}0 started here">;
def warn_objc_protocol_qualifier_missing_id : Warning<
"protocol qualifiers without 'id' is archaic">;
def err_objc_unknown_at : Error<"expected an Objective-C directive after '@'">;
def err_illegal_super_cast : Error<
"cannot cast 'super' (it isn't an expression)">;
+def err_nsnumber_nonliteral_unary : Error<
+ "@%0 must be followed by a number to form an NSNumber object">;
-let CategoryName = "Automatic Reference Counting Issue" in {
+let CategoryName = "ARC Parse Issue" in {
def err_arc_bridge_retain : Error<
"unknown cast annotation __bridge_retain; did you mean __bridge_retained?">;
+// To be default mapped to an error later.
+def warn_arc_bridge_cast_nonarc : Warning<
+ "'%0' casts have no effect when not using ARC">,
+ InGroup<DiagGroup<"arc-bridge-casts-disallowed-in-nonarc">>;
}
def err_objc_illegal_visibility_spec : Error<
@@ -303,13 +368,8 @@ def err_objc_directive_only_in_protocol : Error<
def err_missing_catch_finally : Error<
"@try statement without a @catch and @finally clause">;
def err_objc_concat_string : Error<"unexpected token after Objective-C string">;
-def err_missing_sel_definition : Error<"cannot find definition of 'SEL'">;
-def err_missing_id_definition : Error<"cannot find definition of 'id'">;
-def err_missing_proto_definition : Error<
- "cannot find definition of 'Protocol'">;
-def err_missing_class_definition : Error<"cannot find definition of 'Class'">;
-def err_expected_implementation : Error<
- "@end must appear in an @implementation context">;
+def err_expected_objc_container : Error<
+ "'@end' must appear in an Objective-C context">;
def error_property_ivar_decl : Error<
"property synthesize requires specification of an ivar">;
def err_synthesized_property_name : Error<
@@ -332,10 +392,16 @@ def err_func_def_no_params : Error<
"function definition does not declare parameters">;
def err_expected_lparen_after_type : Error<
"expected '(' for function-style cast or type construction">;
-def err_expected_equal_after_declarator : Error<
- "expected '=' after declarator">;
+def err_expected_init_in_condition : Error<
+ "variable declaration in condition must have an initializer">;
+def err_expected_init_in_condition_lparen : Error<
+ "variable declaration in condition cannot have a parenthesized initializer">;
def warn_parens_disambiguated_as_function_decl : Warning<
- "parentheses were disambiguated as a function declarator">;
+ "parentheses were disambiguated as a function declarator">,
+ InGroup<VexingParse>;
+def warn_dangling_else : Warning<
+ "add explicit braces to avoid dangling else">,
+ InGroup<DanglingElse>;
def err_expected_member_or_base_name : Error<
"expected class member or base class name">;
def err_expected_lbrace_after_base_specifiers : Error<
@@ -344,8 +410,13 @@ def ext_ellipsis_exception_spec : Extension<
"exception specification of '...' is a Microsoft extension">;
def err_dynamic_and_noexcept_specification : Error<
"cannot have both throw() and noexcept() clause on the same function">;
+def warn_cxx98_compat_noexcept_decl : Warning<
+ "noexcept specifications are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_expected_catch : Error<"expected catch">;
def err_expected_lbrace_or_comma : Error<"expected '{' or ','">;
+def err_expected_rbrace_or_comma : Error<"expected '}' or ','">;
+def err_expected_rsquare_or_comma : Error<"expected ']' or ','">;
def err_using_namespace_in_class : Error<
"'using namespace' is not allowed in classes">;
def err_destructor_tilde_identifier : Error<
@@ -356,13 +427,24 @@ def err_default_arg_unparsed : Error<
"unexpected end of default argument expression">;
def err_parser_impl_limit_overflow : Error<
"parser recursion limit reached, program too complex">, DefaultFatal;
+def err_misplaced_ellipsis_in_declaration : Error<
+ "'...' must %select{immediately precede declared identifier|"
+ "be innermost component of anonymous pack declaration}0">;
// C++ derived classes
def err_dup_virtual : Error<"duplicate 'virtual' in base specifier">;
// C++ operator overloading
-def err_operator_string_not_empty : Error<
+def err_literal_operator_string_prefix : Error<
+ "string literal after 'operator' cannot have an encoding prefix">;
+def err_literal_operator_string_not_empty : Error<
"string literal after 'operator' must be '\"\"'">;
+def err_literal_operator_missing_space : Error<
+ "C++11 requires a space between the \"\" and the user-defined suffix in a "
+ "literal operator">;
+def warn_cxx98_compat_literal_operator : Warning<
+ "literal operators are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
// Classes.
def err_anon_type_definition : Error<
@@ -371,16 +453,27 @@ def err_default_delete_in_multiple_declaration : Error<
"'= %select{default|delete}0' is a function definition and must occur in a "
"standalone declaration">;
+def warn_cxx98_compat_noexcept_expr : Warning<
+ "noexcept expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_nullptr : Warning<
+ "'nullptr' is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+
def warn_cxx98_compat_alignas : Warning<"'alignas' is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_attribute : Warning<
"attributes are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
-def err_cxx0x_attribute_forbids_arguments : Error<
- "C++11 attribute '%0' cannot have an argument list">;
-def err_cxx0x_attribute_requires_arguments : Error<
- "C++1 attribute '%0' must have an argument list">;
+def err_cxx11_attribute_forbids_arguments : Error<
+ "attribute '%0' cannot have an argument list">;
+def err_cxx11_attribute_forbids_ellipsis : Error<
+ "attribute '%0' cannot be used as an attribute pack">;
def err_attributes_not_allowed : Error<"an attribute list cannot appear here">;
+def err_l_square_l_square_not_attribute : Error<
+ "C++11 only allows consecutive left square brackets when "
+ "introducing an attribute">;
+def err_alignas_pack_exp_unsupported : Error<
+ "pack expansions in alignment specifiers are not supported yet">;
/// C++ Templates
def err_expected_template : Error<"expected template">;
@@ -388,23 +481,22 @@ def err_unknown_template_name : Error<
"unknown template name %0">;
def err_expected_comma_greater : Error<
"expected ',' or '>' in template-parameter-list">;
-def err_expected_type_id_after : Error<"expected type-id after '%0'">;
-def err_expected_class_before : Error<"expected 'class' before '%0'">;
+def err_class_on_template_template_param : Error<
+ "template template parameter requires 'class' after the parameter list">;
def err_template_spec_syntax_non_template : Error<
"identifier followed by '<' indicates a class template specialization but "
"%0 %select{does not refer to a template|refers to a function "
"template|<unused>|refers to a template template parameter}1">;
def err_id_after_template_in_nested_name_spec : Error<
"expected template name after 'template' keyword in nested name specifier">;
-def err_id_after_template_in_typename_spec : Error<
- "expected template name after 'template' keyword in typename specifier">;
-def err_less_after_template_name_in_nested_name_spec : Error<
- "expected '<' after 'template %0' in nested name specifier">;
def err_two_right_angle_brackets_need_space : Error<
"a space is required between consecutive right angle brackets (use '> >')">;
def warn_cxx0x_right_shift_in_template_arg : Warning<
"use of right-shift operator ('>>') in template argument will require "
"parentheses in C++11">;
+def warn_cxx98_compat_two_right_angle_brackets : Warning<
+ "consecutive right angle brackets are incompatible with C++98 (use '> >')">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_multiple_template_declarators : Error<
"%select{|a template declaration|an explicit template specialization|"
"an explicit template instantiation}0 can "
@@ -414,12 +506,20 @@ def err_explicit_instantiation_with_definition : Error<
"definition is meant to be an explicit specialization, add '<>' after the "
"'template' keyword">;
def err_enum_template : Error<"enumeration cannot be a template">;
+def err_explicit_instantiation_enum : Error<
+ "enumerations cannot be explicitly instantiated">;
+def err_expected_template_parameter : Error<"expected template parameter">;
def err_missing_dependent_template_keyword : Error<
"use 'template' keyword to treat '%0' as a dependent template name">;
def warn_missing_dependent_template_keyword : ExtWarn<
"use 'template' keyword to treat '%0' as a dependent template name">;
+def ext_extern_template : Extension<
+ "extern templates are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_extern_template : Warning<
+ "extern templates are incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def warn_static_inline_explicit_inst_ignored : Warning<
"ignoring '%select{static|inline}0' keyword on explicit template "
"instantiation">;
@@ -457,23 +557,31 @@ def err_ctor_init_missing_comma : Error<
"missing ',' between base or member initializers">;
// C++ declarations
-def err_friend_decl_defines_class : Error<
+def err_friend_decl_defines_type : Error<
"cannot define a type in a friend declaration">;
def err_missing_whitespace_digraph : Error<
"found '<::' after a "
"%select{template name|const_cast|dynamic_cast|reinterpret_cast|static_cast}0"
" which forms the digraph '<:' (aka '[') and a ':', did you mean '< ::'?">;
-def warn_deleted_function_accepted_as_extension: ExtWarn<
- "deleted function definition accepted as a C++11 extension">, InGroup<CXX11>;
-def warn_defaulted_function_accepted_as_extension: ExtWarn<
- "defaulted function definition accepted as a C++11 extension">,
- InGroup<CXX11>;
+def ext_deleted_function : ExtWarn<
+ "deleted function definitions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_deleted_function : Warning<
+ "deleted function definitions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_defaulted_function : ExtWarn<
+ "defaulted function definitions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_defaulted_function : Warning<
+ "defaulted function definitions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
// C++11 in-class member initialization
-def warn_nonstatic_member_init_accepted_as_extension: ExtWarn<
- "in-class initialization of non-static data member accepted as a C++11 extension">,
+def ext_nonstatic_member_init : ExtWarn<
+ "in-class initialization of non-static data member is a C++11 extension">,
InGroup<CXX11>;
+def warn_cxx98_compat_nonstatic_member_init : Warning<
+ "in-class initialization of non-static data members is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_bitfield_member_init: Error<
"bitfield member cannot have an in-class initializer">;
def err_incomplete_array_member_init: Error<
@@ -481,23 +589,30 @@ def err_incomplete_array_member_init: Error<
// C++11 alias-declaration
def ext_alias_declaration : ExtWarn<
- "alias declarations accepted as a C++11 extension">, InGroup<CXX11>;
+ "alias declarations are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_alias_declaration : Warning<
+ "alias declarations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_alias_declaration_not_identifier : Error<
"name defined in alias declaration must be an identifier">;
def err_alias_declaration_specialization : Error<
"%select{partial specialization|explicit specialization|explicit instantiation}0 of alias templates is not permitted">;
// C++11 override control
-def ext_override_control_keyword : Extension<
- "'%0' keyword accepted as a C++11 extension">, InGroup<CXX11>;
+def ext_override_control_keyword : ExtWarn<
+ "'%0' keyword is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_override_control_keyword : Warning<
+ "'%0' keyword is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_duplicate_virt_specifier : Error<
"class member already marked '%0'">;
-def err_duplicate_class_virt_specifier : Error<
- "class already marked '%0'">;
def err_scoped_enum_missing_identifier : Error<
"scoped enumeration requires a name">;
+def warn_cxx98_compat_scoped_enum : Warning<
+ "scoped enumerations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_expected_parameter_pack : Error<
"expected the name of a parameter pack">;
@@ -514,6 +629,11 @@ def err_this_captured_by_reference : Error<
def err_expected_capture : Error<
"expected variable name or 'this' in lambda capture list">;
def err_expected_lambda_body : Error<"expected body of lambda expression">;
+def warn_cxx98_compat_lambda : Warning<
+ "lambda expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_lambda_missing_parens : Error<
+ "lambda requires '()' before %select{'mutable'|return type}0">;
// Availability attribute
def err_expected_version : Error<
@@ -554,8 +674,6 @@ def warn_pragma_align_invalid_option : Warning<
// - #pragma pack
def warn_pragma_pack_invalid_action : Warning<
"unknown action for '#pragma pack' - ignored">;
-def warn_pragma_pack_invalid_constant : Warning<
- "invalid constant for '#pragma pack', expected %0 - ignored">;
def warn_pragma_pack_malformed : Warning<
"expected integer or identifier in '#pragma pack' - ignored">;
// - #pragma unused
@@ -592,7 +710,7 @@ def err_seh___finally_block : Error<
let CategoryName = "Modules Issue" in {
def err_module_expected_ident : Error<
- "expected a module name after '__import_module__'">;
+ "expected a module name after module import">;
def err_module_expected_semi : Error<
"expected a semicolon name after module name">;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index d60e69b..e553740 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -16,10 +16,38 @@ let CategoryName = "Semantic Issue" in {
// Constant expressions
def err_expr_not_ice : Error<
- "expression is not an integer constant expression">;
+ "expression is not an %select{integer|integral}0 constant expression">;
def ext_expr_not_ice : Extension<
- "expression is not integer constant expression "
- "(but is allowed as an extension)">;
+ "expression is not an %select{integer|integral}0 constant expression; "
+ "folding it to a constant is a GNU extension">, InGroup<GNU>;
+def err_typecheck_converted_constant_expression : Error<
+ "value of type %0 is not implicitly convertible to %1">;
+def err_typecheck_converted_constant_expression_disallowed : Error<
+ "conversion from %0 to %1 is not allowed in a converted constant expression">;
+def err_expr_not_cce : Error<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "is not a constant expression">;
+def err_cce_narrowing : ExtWarn<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "%select{cannot be narrowed from type %2 to %3|"
+ "evaluates to %2, which cannot be narrowed to type %3}1">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def err_cce_narrowing_sfinae : Error<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "%select{cannot be narrowed from type %2 to %3|"
+ "evaluates to %2, which cannot be narrowed to type %3}1">;
+def err_ice_not_integral : Error<
+ "integral constant expression must have integral or unscoped enumeration "
+ "type, not %0">;
+def err_ice_incomplete_type : Error<
+ "integral constant expression has incomplete class type %0">;
+def err_ice_explicit_conversion : Error<
+ "integral constant expression requires explicit conversion from %0 to %1">;
+def note_ice_conversion_here : Note<
+ "conversion to %select{integral|enumeration}0 type %1 declared here">;
+def err_ice_ambiguous_conversion : Error<
+ "ambiguous conversion from type %0 to an integral or unscoped "
+ "enumeration type">;
// Semantic analysis of constant literals.
def ext_predef_outside_function : Warning<
@@ -35,8 +63,7 @@ def warn_double_const_requires_fp64 : Warning<
"double precision constant requires cl_khr_fp64, casting to single precision">;
// C99 variable-length arrays
-def ext_vla : Extension<
- "variable length arrays are a C99 feature, accepted as an extension">,
+def ext_vla : Extension<"variable length arrays are a C99 feature">,
InGroup<VLA>;
def err_vla_non_pod : Error<"variable length array of non-POD element type %0">;
def err_vla_in_sfinae : Error<
@@ -63,8 +90,6 @@ def err_variably_modified_new_type : Error<
// C99 Designated Initializers
def ext_designated_init : Extension<
"designated initializers are a C99 feature">;
-def ext_designated_init_cxx : Extension<
- "designated initializers are a C99 feature, accepted in C++ as an extension">;
def err_array_designator_negative : Error<
"array designator value '%0' is negative">;
def err_array_designator_empty_range : Error<
@@ -100,8 +125,6 @@ def ext_flexible_array_init : Extension<
"flexible array initialization is a GNU extension">, InGroup<GNU>;
// Declarations.
-def ext_anon_param_requires_type_specifier : Extension<
- "type specifier required for unnamed parameter, defaults to int">;
def err_bad_variable_name : Error<
"%0 cannot be the name of a variable or data member">;
def err_bad_parameter_name : Error<
@@ -114,7 +137,20 @@ def warn_unused_variable : Warning<"unused variable %0">,
def warn_unused_exception_param : Warning<"unused exception parameter %0">,
InGroup<UnusedExceptionParameter>, DefaultIgnore;
def warn_decl_in_param_list : Warning<
- "declaration of %0 will not be visible outside of this function">;
+ "declaration of %0 will not be visible outside of this function">,
+ InGroup<Visibility>;
+def warn_redefinition_in_param_list : Warning<
+ "redefinition of %0 will not be visible outside of this function">,
+ InGroup<Visibility>;
+def warn_empty_parens_are_function_decl : Warning<
+ "empty parentheses interpreted as a function declaration">,
+ InGroup<VexingParse>;
+def note_empty_parens_function_call : Note<
+ "change this ',' to a ';' to call %0">;
+def note_empty_parens_default_ctor : Note<
+ "remove parentheses to declare a variable">;
+def note_empty_parens_zero_initialize : Note<
+ "replace parentheses with an initializer to declare a variable">;
def warn_unused_function : Warning<"unused function %0">,
InGroup<UnusedFunction>, DefaultIgnore;
def warn_unused_member_function : Warning<"unused member function %0">,
@@ -134,13 +170,16 @@ def warn_parameter_size: Warning<
def warn_return_value_size: Warning<
"return value of %0 is a large (%1 bytes) pass-by-value object; "
"pass it by reference instead ?">, InGroup<LargeByValueCopy>;
-
+def warn_return_value_udt: Warning<
+ "%0 has C-linkage specified, but returns user-defined type %1 which is "
+ "incompatible with C">, InGroup<ReturnTypeCLinkage>;
def warn_implicit_function_decl : Warning<
"implicit declaration of function %0">,
InGroup<ImplicitFunctionDeclare>, DefaultIgnore;
def ext_implicit_function_decl : ExtWarn<
"implicit declaration of function %0 is invalid in C99">,
InGroup<ImplicitFunctionDeclare>;
+def note_function_suggestion : Note<"did you mean %0?">;
def err_ellipsis_first_arg : Error<
"ISO C requires a named argument before '...'">;
@@ -192,6 +231,9 @@ def err_using_decl_can_not_refer_to_namespace : Error<
"using declaration can not refer to namespace">;
def err_using_decl_constructor : Error<
"using declaration can not refer to a constructor">;
+def warn_cxx98_compat_using_decl_constructor : Warning<
+ "inherited constructors are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_using_decl_destructor : Error<
"using declaration can not refer to a destructor">;
def err_using_decl_template_id : Error<
@@ -247,7 +289,7 @@ def warn_unreachable : Warning<"will never be executed">,
/// Built-in functions.
def ext_implicit_lib_function_decl : ExtWarn<
- "implicitly declaring C library function '%0' with type %1">;
+ "implicitly declaring library function '%0' with type %1">;
def note_please_include_header : Note<
"please include the header <%0> or explicitly provide a "
"declaration for '%1'">;
@@ -269,7 +311,8 @@ def warn_redecl_library_builtin : Warning<
def err_builtin_definition : Error<"definition of builtin function %0">;
def err_types_compatible_p_in_cplusplus : Error<
"__builtin_types_compatible_p is not valid in C++">;
-def warn_builtin_unknown : Warning<"use of unknown builtin %0">, DefaultError;
+def warn_builtin_unknown : Warning<"use of unknown builtin %0">,
+ InGroup<ImplicitFunctionDeclare>, DefaultError;
def warn_dyn_class_memaccess : Warning<
"%select{destination for|source of|first operand of|second operand of}0 this "
"%1 call is a pointer to dynamic class %2; vtable pointer will be "
@@ -292,6 +335,15 @@ def warn_strlcpycat_wrong_size : Warning<
InGroup<DiagGroup<"strlcpy-strlcat-size">>;
def note_strlcpycat_wrong_size : Note<
"change size argument to be the size of the destination">;
+
+def warn_strncat_large_size : Warning<
+ "the value of the size argument in 'strncat' is too large, might lead to a "
+ "buffer overflow">, InGroup<StrncatSize>, DefaultIgnore;
+def warn_strncat_src_size : Warning<"size argument in 'strncat' call appears "
+ "to be size of the source">, InGroup<StrncatSize>, DefaultIgnore;
+def note_strncat_wrong_size : Note<
+ "change the argument to be the free space in the destination buffer minus "
+ "the terminating null byte">;
/// main()
// static/inline main() are not errors in C, just in C++.
@@ -299,8 +351,12 @@ def warn_static_main : Warning<"'main' should not be declared static">,
InGroup<Main>;
def err_static_main : Error<"'main' is not allowed to be declared static">;
def err_inline_main : Error<"'main' is not allowed to be declared inline">;
+def err_constexpr_main : Error<
+ "'main' is not allowed to be declared constexpr">;
def err_main_template_decl : Error<"'main' cannot be a template">;
def err_main_returns_nonint : Error<"'main' must return 'int'">;
+def ext_main_returns_nonint : ExtWarn<"return type of 'main' is not 'int'">,
+ InGroup<MainReturnType>;
def err_main_surplus_args : Error<"too many parameters (%0) for 'main': "
"must be 0, 2, or 3">;
def warn_main_one_arg : Warning<"only one parameter on 'main' declaration">,
@@ -339,8 +395,14 @@ def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">;
def warn_pragma_unused_expected_var_arg : Warning<
"only variables can be arguments to '#pragma unused'">;
-def err_unsupported_pragma_weak : Error<
- "using '#pragma weak' to refer to an undeclared identifier is not yet supported">;
+def err_pragma_push_visibility_mismatch : Error<
+ "#pragma visibility push with no matching #pragma visibility pop">;
+def note_surrounding_namespace_ends_here : Note<
+ "surrounding namespace with visibility attribute ends here">;
+def err_pragma_pop_visibility_mismatch : Error<
+ "#pragma visibility pop with no matching #pragma visibility push">;
+def note_surrounding_namespace_starts_here : Note<
+ "surrounding namespace with visibility attribute starts here">;
/// Objective-C parser diagnostics
def err_duplicate_class_def : Error<
@@ -370,12 +432,25 @@ def warn_property_attribute : Warning<
def warn_property_types_are_incompatible : Warning<
"property type %0 is incompatible with type %1 inherited from %2">;
def err_undef_interface : Error<"cannot find interface declaration for %0">;
+def err_category_forward_interface : Error<
+ "cannot define %select{category|class extension}0 for undefined class %1">;
def err_class_extension_after_impl : Error<
"cannot declare class extension for %0 after class implementation">;
def note_implementation_declared : Note<
"class implementation is declared here">;
def note_class_declared : Note<
"class is declared here">;
+def note_receiver_is_id : Note<
+ "receiver is treated with 'id' type for purpose of method lookup">;
+def note_suppressed_class_declare : Note<
+ "class with specified objc_requires_property_definitions attribute is declared here">;
+def err_objc_root_class_subclass : Error<
+ "objc_root_class attribute may only be specified on a root class declaration">;
+def warn_objc_root_class_missing : Warning<
+ "class %0 defined without specifying a base class">,
+ InGroup<ObjCRootClass>, DefaultIgnore;
+def note_objc_needs_superclass : Note<
+ "add a super class to fix this problem">;
def warn_dup_category_def : Warning<
"duplicate definition of category %1 on interface %0">;
def err_conflicting_super_class : Error<"conflicting super class name %0">;
@@ -479,11 +554,13 @@ def warn_strict_multiple_method_decl : Warning<
"multiple methods named %0 found">, InGroup<StrictSelector>, DefaultIgnore;
def warn_accessor_property_type_mismatch : Warning<
"type of property %0 does not match type of accessor %1">;
-def note_declared_at : Note<"declared here">;
-def note_method_declared_at : Note<"method declared here">;
+def not_conv_function_declared_at : Note<"type conversion function declared here">;
+def note_method_declared_at : Note<"method %0 declared here">;
def err_setter_type_void : Error<"type of setter must be void">;
def err_duplicate_method_decl : Error<"duplicate declaration of method %0">;
-def err_missing_atend : Error<"'@end' is missing in implementation context">;
+def warn_duplicate_method_decl :
+ Warning<"multiple declarations of method %0 found and ignored">,
+ InGroup<MethodDuplicate>, DefaultIgnore;
def err_objc_var_decl_inclass :
Error<"cannot declare variable inside @interface or @protocol">;
def error_missing_method_context : Error<
@@ -495,6 +572,10 @@ def err_objc_property_requires_object : Error<
def warn_objc_property_no_assignment_attribute : Warning<
"no 'assign', 'retain', or 'copy' attribute is specified - "
"'assign' is assumed">;
+def warn_objc_isa_use : Warning<
+ "direct access to objective-c's isa is deprecated "
+ "in favor of object_setClass() and object_getClass()">,
+ InGroup<DiagGroup<"deprecated-objc-isa-usage">>;
def warn_objc_property_default_assign_on_object : Warning<
"default property attribute 'assign' not appropriate for non-gc object">;
def warn_property_attr_mismatch : Warning<
@@ -505,19 +586,26 @@ def warn_objc_property_copy_missing_on_block : Warning<
def warn_objc_property_retain_of_block : Warning<
"retain'ed block property does not copy the block "
"- use copy attribute instead">, InGroup<ObjCRetainBlockProperty>;
+def warn_objc_readonly_property_has_setter : Warning<
+ "setter cannot be specified for a readonly property">,
+ InGroup<ObjCReadonlyPropertyHasSetter>;
def warn_atomic_property_rule : Warning<
"writable atomic property %0 cannot pair a synthesized %select{getter|setter}1 "
"with a user defined %select{getter|setter}2">,
InGroup<DiagGroup<"atomic-property-with-user-defined-accessor">>;
def note_atomic_property_fixup_suggest : Note<"setter and getter must both be "
"synthesized, or both be user defined,or the property must be nonatomic">;
-def warn_atomic_property_nontrivial_assign_op : Warning<
- "atomic property of type %0 synthesizing setter using non-trivial assignment"
- " operator">, InGroup<DiagGroup<"objc-property-atomic-setter-synthesis">>;
+def err_atomic_property_nontrivial_assign_op : Error<
+ "atomic property of reference type %0 cannot have non-trivial assignment"
+ " operator">;
def warn_owning_getter_rule : Warning<
"property's synthesized getter follows Cocoa naming"
" convention for returning 'owned' objects">,
InGroup<DiagGroup<"objc-property-matches-cocoa-ownership-rule">>;
+def warn_auto_synthesizing_protocol_property :Warning<
+ "auto property synthesis will not synthesize property"
+ " declared in a protocol">,
+ InGroup<DiagGroup<"objc-protocol-property-synthesis">>;
def warn_property_getter_owning_mismatch : Warning<
"property declared as returning non-retained objects"
"; getter returning retained objects">;
@@ -531,9 +619,9 @@ def warn_default_atomic_custom_getter_setter : Warning<
def err_use_continuation_class : Error<
"illegal redeclaration of property in continuation class %0"
" (attribute must be 'readwrite', while its primary must be 'readonly')">;
-def warn_type_mismatch_continuation_class : Warning<
+def err_type_mismatch_continuation_class : Error<
"type of property %0 in continuation class does not match "
- "property type in primary class">, InGroup<ObjCContinuationPropertyType>;
+ "property type in primary class">;
def err_use_continuation_class_redeclaration_readwrite : Error<
"illegal redeclaration of 'readwrite' property in continuation class %0"
" (perhaps you intended this to be a 'readwrite' redeclaration of a "
@@ -571,6 +659,9 @@ def warn_arc_perform_selector_leaks : Warning<
InGroup<DiagGroup<"arc-performSelector-leaks">>;
def err_gc_weak_property_strong_type : Error<
"weak attribute declared on a __strong type property in GC mode">;
+def warn_receiver_is_weak : Warning <
+ "weak receiver may be unpredictably null in ARC mode">,
+ InGroup<DiagGroup<"receiver-is-weak">>, DefaultIgnore;
def error_synthesized_ivar_yet_not_supported : Error<
"instance variable synthesis not yet supported"
@@ -608,12 +699,12 @@ def warn_auto_implicit_atomic_property : Warning<
def warn_unimplemented_selector: Warning<
"unimplemented selector %0">, InGroup<Selector>, DefaultIgnore;
def warn_unimplemented_protocol_method : Warning<
- "method in protocol not implemented">, InGroup<Protocol>;
+ "method %0 in protocol not implemented">, InGroup<Protocol>;
// C++ declarations
def err_static_assert_expression_is_not_constant : Error<
"static_assert expression is not an integral constant expression">;
-def err_static_assert_failed : Error<"static_assert failed \"%0\"">;
+def err_static_assert_failed : Error<"static_assert failed %0">;
def warn_inline_namespace_reopened_noninline : Warning<
"inline namespace cannot be re-opened as a non-inline namespace">;
@@ -625,13 +716,25 @@ def err_unexpected_friend : Error<
"friends can only be classes or functions">;
def ext_enum_friend : ExtWarn<
"enumeration type %0 cannot be a friend">;
+def warn_cxx98_compat_enum_friend : Warning<
+ "befriending enumeration type %0 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def ext_nonclass_type_friend : ExtWarn<
"non-class friend type %0 is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_nonclass_type_friend : Warning<
+ "non-class friend type %0 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_friend_is_member : Error<
"friends cannot be members of the declaring class">;
+def warn_cxx98_compat_friend_is_member : Warning<
+ "friend declaration naming a member of the declaring class is incompatible "
+ "with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def ext_unelaborated_friend_type : ExtWarn<
- "specify '%select{struct|union|class|enum}0' to befriend %1; accepted "
- "as a C++11 extension">, InGroup<CXX11>;
+ "unelaborated friend declaration is a C++11 extension; specify "
+ "'%select{struct|union|class|enum}0' to befriend %1">, InGroup<CXX11>;
+def warn_cxx98_compat_unelaborated_friend_type : Warning<
+ "befriending %1 without '%select{struct|union|class|enum}0' keyword is "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_qualified_friend_not_found : Error<
"no function named %0 with type %1 was found in the specified scope">;
def err_introducing_special_friend : Error<
@@ -681,6 +784,10 @@ def warn_weak_vtable : Warning<
"%0 has no out-of-line virtual method definitions; its vtable will be "
"emitted in every translation unit">,
InGroup<DiagGroup<"weak-vtables">>, DefaultIgnore;
+def warn_weak_template_vtable : Warning<
+ "explicit template instantiation %0 will emit a vtable in every "
+ "translation unit">,
+ InGroup<DiagGroup<"weak-template-vtables">>, DefaultIgnore;
def ext_using_undefined_std : ExtWarn<
"using directive refers to implicitly-defined namespace 'std'">;
@@ -720,9 +827,9 @@ def err_class_redeclared_with_different_access : Error<
"%0 redeclared with '%1' access">;
def err_access : Error<
"%1 is a %select{private|protected}0 member of %3">, AccessControl;
-def war_ms_using_declaration_inaccessible : ExtWarn<
- "using declaration refers to inaccessible member '%0', which refers "
- "to accessible member '%1', accepted for Microsoft compatibility">,
+def ext_ms_using_declaration_inaccessible : ExtWarn<
+ "using declaration referring to inaccessible member '%0' (which refers "
+ "to accessible member '%1') is a Microsoft compatibility extension">,
AccessControl, InGroup<Microsoft>;
def err_access_ctor : Error<
"calling a %select{private|protected}0 constructor of class %2">,
@@ -744,9 +851,6 @@ def err_access_field_ctor : Error<
"%select{default |copy |move |*ERROR* |*ERROR* |*ERROR* |}1constructor">,
AccessControl;
-def err_access_ctor_field :
- Error<"field of type %1 has %select{private|protected}2 constructor">,
- AccessControl;
def err_access_dtor : Error<
"calling a %select{private|protected}1 destructor of class %0">,
AccessControl;
@@ -769,36 +873,27 @@ def err_access_dtor_field :
def err_access_dtor_var :
Error<"variable of type %1 has %select{private|protected}2 destructor">,
AccessControl;
-def err_access_assign_field :
- Error<"field of type %1 has %select{private|protected}2 copy assignment"
- " operator">,
- AccessControl;
-def err_access_assign_base :
- Error<"base class %0 has %select{private|protected}1 copy assignment"
- " operator">,
- AccessControl;
-def err_access_copy_field :
- Error<"field of type %1 has %select{private|protected}2 copy constructor">,
- AccessControl;
-def err_access_copy_base :
- Error<"base class %0 has %select{private|protected}1 copy constructor">,
- AccessControl;
def err_access_dtor_ivar :
Error<"instance variable of type %0 has %select{private|protected}1 "
"destructor">,
AccessControl;
def note_previous_access_declaration : Note<
"previously declared '%1' here">;
-def err_access_outside_class : Error<
- "access to %select{private|protected}0 member outside any class context">,
- AccessControl;
def note_access_natural : Note<
"%select{|implicitly }1declared %select{private|protected}0 here">;
def note_access_constrained_by_path : Note<
"constrained by %select{|implicitly }1%select{private|protected}0"
" inheritance here">;
-def note_access_protected_restricted : Note<
- "object type %select{|%1 }0must derive from context type %2">;
+def note_access_protected_restricted_noobject : Note<
+ "must name member using the type of the current context %0">;
+def note_access_protected_restricted_ctordtor : Note<
+ "protected %select{constructor|destructor}0 can only be used to "
+ "%select{construct|destroy}0 a base class subobject">;
+def note_access_protected_restricted_object : Note<
+ "can only access this member on an object of type %0">;
+def warn_cxx98_compat_sfinae_access_control : Warning<
+ "substitution failure due to access control is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore, NoSFINAE;
// C++ name lookup
def err_incomplete_nested_name_spec : Error<
@@ -821,6 +916,9 @@ def err_incomplete_member_access : Error<
"member access into incomplete type %0">;
def err_incomplete_type : Error<
"incomplete type %0 where a complete type is required">;
+def warn_cxx98_compat_enum_nested_name_spec : Warning<
+ "enumeration type in nested name specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
// C++ class members
def err_storageclass_invalid_for_member : Error<
@@ -885,6 +983,11 @@ def err_illegal_union_or_anon_struct_member : Error<
"%select{anonymous struct|union}0 member %1 has a non-trivial "
"%select{constructor|copy constructor|move constructor|copy assignment "
"operator|move assignment operator|destructor}2">;
+def warn_cxx98_compat_nontrivial_union_or_anon_struct_member : Warning<
+ "%select{anonymous struct|union}0 member %1 with a non-trivial "
+ "%select{constructor|copy constructor|move constructor|copy assignment "
+ "operator|move assignment operator|destructor}2 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def note_nontrivial_has_virtual : Note<
"because type %0 has a virtual %select{member function|base class}1">;
def note_nontrivial_has_nontrivial : Note<
@@ -895,8 +998,13 @@ def note_nontrivial_user_defined : Note<
"because type %0 has a user-declared %select{constructor|copy constructor|"
"move constructor|copy assignment operator|move assignment operator|"
"destructor}1">;
-def err_static_data_member_not_allowed_in_union_or_anon_struct : Error<
- "static data member %0 not allowed in %select{anonymous struct|union}1">;
+def err_static_data_member_not_allowed_in_anon_struct : Error<
+ "static data member %0 not allowed in anonymous struct">;
+def ext_static_data_member_in_union : ExtWarn<
+ "static data member %0 in union is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_static_data_member_in_union : Warning<
+ "static data member %0 in union is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_union_member_of_reference_type : Error<
"union member %0 has reference type %1">;
def ext_anonymous_struct_union_qualified : Extension<
@@ -983,17 +1091,22 @@ def err_init_conversion_failed : Error<
"cannot initialize %select{a variable|a parameter|return object|an "
"exception object|a member subobject|an array element|a new value|a value|a "
"base class|a constructor delegation|a vector element}0 of type %1 with an "
- "%select{rvalue|lvalue}2 of type %3">;
+ "%select{rvalue|lvalue}2 of type %3"
+ "%select{|: different classes (%5 vs %6)"
+ "|: different number of parameters (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
+ "|: different return type (%5 vs %6)"
+ "|: different qualifiers ("
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}5 vs "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}6)}4">;
def err_lvalue_to_rvalue_ref : Error<"rvalue reference to type %0 cannot bind "
"to lvalue of type %1">;
-def err_invalid_initialization : Error<
-"invalid initialization of reference of type %0 from expression of type %1">;
-def err_lvalue_to_rvalue_ambig_ref : Error<"rvalue reference cannot bind to lvalue "
- "due to multiple conversion functions">;
-def err_not_reference_to_const_init : Error<
- "%select{non-const|volatile}0 lvalue reference to type %1 cannot be "
- "initialized with a %select{value|temporary}2 of type %3">;
+def err_lvalue_reference_bind_to_initlist : Error<
+ "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to an "
+ "initializer list temporary">;
def err_lvalue_reference_bind_to_temporary : Error<
"%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to a "
"temporary of type %2">;
@@ -1015,17 +1128,12 @@ def err_member_function_call_bad_cvr : Error<"member function %0 not viable: "
"%select{const|restrict|const or restrict|volatile|const or volatile|"
"volatile or restrict|const, volatile, or restrict}2">;
-def err_reference_init_drops_quals : Error<
- "initialization of reference to type %0 with a %select{value|temporary}1 of type %2 drops "
- "qualifiers">;
def err_reference_bind_to_bitfield : Error<
"%select{non-const|volatile}0 reference cannot bind to bit-field %1">;
def err_reference_bind_to_vector_element : Error<
"%select{non-const|volatile}0 reference cannot bind to vector element">;
def err_reference_var_requires_init : Error<
"declaration of reference variable %0 requires an initializer">;
-def err_const_var_requires_init : Error<
- "declaration of const variable '%0' requires an initializer">;
def err_reference_without_init : Error<
"reference to type %0 requires an initializer">;
def err_reference_has_multiple_inits : Error<
@@ -1055,6 +1163,11 @@ def warn_uninit_var_captured_by_block : Warning<
def warn_maybe_uninit_var_captured_by_block : Warning<
"variable %0 may be uninitialized when captured by block">,
InGroup<UninitializedMaybe>, DefaultIgnore;
+def warn_uninit_byref_blockvar_captured_by_block : Warning<
+ "block pointer variable %0 is uninitialized when captured by block">,
+ InGroup<Uninitialized>, DefaultIgnore;
+def note_block_var_fixit_add_initialization : Note<
+ "maybe you meant to use __block %0">;
def note_var_fixit_add_initialization : Note<
"initialize the variable %0 to silence this warning">;
def err_init_incomplete_type : Error<"initialization of incomplete type %0">;
@@ -1063,32 +1176,47 @@ def err_temp_copy_no_viable : Error<
"no viable constructor %select{copying variable|copying parameter|"
"returning object|throwing object|copying member subobject|copying array "
"element|allocating object|copying temporary|initializing base subobject|"
- "initializing vector element}0 of type %1">;
+ "initializing vector element|capturing value}0 of type %1">;
def ext_rvalue_to_reference_temp_copy_no_viable : ExtWarn<
"no viable constructor %select{copying variable|copying parameter|"
"returning object|throwing object|copying member subobject|copying array "
"element|allocating object|copying temporary|initializing base subobject|"
- "initializing vector element}0 of type %1; C++98 requires a copy "
+ "initializing vector element|capturing value}0 of type %1; C++98 requires a copy "
"constructor when binding a reference to a temporary">,
InGroup<BindToTemporaryCopy>;
def err_temp_copy_ambiguous : Error<
"ambiguous constructor call when %select{copying variable|copying "
"parameter|returning object|throwing object|copying member subobject|copying "
"array element|allocating object|copying temporary|initializing base subobject|"
- "initializing vector element}0 of type %1">;
+ "initializing vector element|capturing value}0 of type %1">;
def err_temp_copy_deleted : Error<
"%select{copying variable|copying parameter|returning object|throwing "
"object|copying member subobject|copying array element|allocating object|"
- "copying temporary|initializing base subobject|initializing vector element}0 "
- "of type %1 invokes deleted constructor">;
+ "copying temporary|initializing base subobject|initializing vector element|"
+ "capturing value}0 of type %1 invokes deleted constructor">;
def err_temp_copy_incomplete : Error<
"copying a temporary object of incomplete type %0">;
+def warn_cxx98_compat_temp_copy : Warning<
+ "%select{copying variable|copying parameter|returning object|throwing "
+ "object|copying member subobject|copying array element|allocating object|"
+ "copying temporary|initializing base subobject|initializing vector element}1 "
+ "of type %2 when binding a reference to a temporary would %select{invoke "
+ "an inaccessible constructor|find no viable constructor|find ambiguous "
+ "constructors|invoke a deleted constructor}0 in C++98">,
+ InGroup<CXX98CompatBindToTemporaryCopy>, DefaultIgnore;
+def err_selected_explicit_constructor : Error<
+ "chosen constructor is explicit in copy-initialization">;
+def note_constructor_declared_here : Note<
+ "constructor declared here">;
// C++11 decltype
-def err_cannot_determine_declared_type_of_overloaded_function : Error<
- "cannot determine the type of an overloaded function">;
+def err_decltype_in_declarator : Error<
+ "'decltype' cannot be used to name a declaration">;
// C++11 auto
+def warn_cxx98_compat_auto_type_specifier : Warning<
+ "'auto' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_auto_variable_cannot_appear_in_own_initializer : Error<
"variable %0 declared with 'auto' type cannot appear in its own initializer">;
def err_illegal_decl_array_of_auto : Error<
@@ -1104,6 +1232,10 @@ def err_auto_var_requires_init : Error<
"declaration of variable %0 with type %1 requires an initializer">;
def err_auto_new_requires_ctor_arg : Error<
"new expression for type %0 requires a constructor argument">;
+def err_auto_new_requires_parens : Error<
+ "new expression for type %0 cannot use list-initialization">;
+def err_auto_var_init_no_expression : Error<
+ "initializer for variable %0 with type %1 is empty">;
def err_auto_var_init_multiple_expressions : Error<
"initializer for variable %0 with type %1 contains multiple expressions">;
def err_auto_new_ctor_multiple_expressions : Error<
@@ -1116,10 +1248,21 @@ def err_trailing_return_in_parens : Error<
"trailing return type may not be nested within parentheses">;
def err_auto_var_deduction_failure : Error<
"variable %0 with type %1 has incompatible initializer of type %2">;
+def err_auto_var_deduction_failure_from_init_list : Error<
+ "cannot deduce actual type for variable %0 with type %1 from initializer list">;
def err_auto_new_deduction_failure : Error<
"new expression for type %0 has incompatible constructor argument of type %1">;
def err_auto_different_deductions : Error<
"'auto' deduced as %0 in declaration of %1 and deduced as %2 in declaration of %3">;
+def err_implied_std_initializer_list_not_found : Error<
+ "cannot deduce type of initializer list because std::initializer_list was "
+ "not found; include <initializer_list>">;
+def err_malformed_std_initializer_list : Error<
+ "std::initializer_list must be a class template with a single type parameter">;
+def warn_dangling_std_initializer_list : Warning<
+ "array backing the initializer list will be destroyed at the end of "
+ "%select{the full-expression|the constructor}0">,
+ InGroup<DiagGroup<"dangling-initializer-list">>;
// C++11 override control
def override_keyword_only_allowed_on_virtual_member_functions : Error<
@@ -1132,11 +1275,9 @@ def err_class_marked_final_used_as_base : Error<
// C++11 attributes
def err_repeat_attribute : Error<"'%0' attribute cannot be repeated">;
-// C++11 [[final]]
+// C++11 final
def err_final_function_overridden : Error<
"declaration of %0 overrides a 'final' function">;
-def err_final_base : Error<
- "derivation from 'final' %0">;
// C++11 scoped enumerations
def err_enum_invalid_underlying : Error<
@@ -1154,14 +1295,18 @@ def err_enum_redeclare_fixed_mismatch : Error<
"enumeration previously declared with %select{non|}0fixed underlying type">;
def err_enum_redeclare_scoped_mismatch : Error<
"enumeration previously declared as %select{un|}0scoped">;
+def err_enum_class_reference : Error<
+ "reference to %select{|scoped }0enumeration must use 'enum' "
+ "not 'enum class'">;
def err_only_enums_have_underlying_types : Error<
"only enumeration types have underlying types">;
-def err_incomplete_type_no_underlying_type : Error<
- "an incomplete enumeration type has no underlying type yet">;
// C++11 delegating constructors
-def err_delegation_0x_only : Error<
+def err_delegating_ctor : Error<
"delegating constructors are permitted only in C++11">;
+def warn_cxx98_compat_delegating_ctor : Warning<
+ "delegating constructors are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_delegating_initializer_alone : Error<
"an initializer for a delegating constructor must appear alone">;
def warn_delegating_ctor_cycle : Warning<
@@ -1171,8 +1316,6 @@ def note_it_delegates_to : Note<
"it delegates to">, InGroup<DelegatingCtorCycles>;
def note_which_delegates_to : Note<
"which delegates to">, InGroup<DelegatingCtorCycles>;
-def err_delegating_codegen_not_implemented : Error<
- "code generation for delegating constructors not implemented">;
// C++11 range-based for loop
def err_for_range_decl_must_be_var : Error<
@@ -1197,6 +1340,9 @@ def note_for_range_begin_end : Note<
"selected '%select{begin|end}0' %select{function|template }1%2 with iterator type %3">;
// C++11 constexpr
+def warn_cxx98_compat_constexpr : Warning<
+ "'constexpr' specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_invalid_constexpr : Error<
"%select{function parameter|typedef|non-static data member}0 "
"cannot be constexpr">;
@@ -1207,37 +1353,27 @@ def err_constexpr_no_declarators : Error<
"constexpr can only be used in variable and function declarations">;
def err_invalid_constexpr_var_decl : Error<
"constexpr variable declaration must be a definition">;
-def err_constexpr_var_requires_init : Error<
- "declaration of constexpr variable %0 requires an initializer">;
+def err_constexpr_static_mem_var_requires_init : Error<
+ "declaration of constexpr static data member %0 requires an initializer">;
+def err_constexpr_var_non_literal : Error<
+ "constexpr variable cannot have non-literal type %0">;
def err_constexpr_var_requires_const_init : Error<
"constexpr variable %0 must be initialized by a constant expression">;
def err_constexpr_redecl_mismatch : Error<
"%select{non-constexpr declaration of %0 follows constexpr declaration"
"|constexpr declaration of %0 follows non-constexpr declaration}1">;
-def note_constexpr_redecl_mismatch : Note<
- "previous declaration was %select{not |}0marked constexpr">;
def err_constexpr_virtual : Error<"virtual function cannot be constexpr">;
-def note_constexpr_tmpl_virtual : Note<"function template instantiation is not "
- "constexpr because it is virtual">;
-def err_constexpr_virtual_base : Error<"constexpr constructor not allowed in "
- "%select{class|struct}0 with virtual base %plural{1:class|:classes}1">;
-def note_constexpr_tmpl_virtual_base : Note<"constructor template instantiation is "
- "not constexpr because %select{class|struct}0 has virtual base "
- "%plural{1:class|:classes}1">;
+def err_constexpr_virtual_base : Error<
+ "constexpr %select{member function|constructor}0 not allowed in "
+ "%select{class|struct}1 with virtual base %plural{1:class|:classes}2">;
def note_non_literal_virtual_base : Note<"%select{class|struct}0 with virtual "
"base %plural{1:class|:classes}1 is not a literal type">;
def note_constexpr_virtual_base_here : Note<"virtual base class declared here">;
def err_constexpr_non_literal_return : Error<
"constexpr function's return type %0 is not a literal type">;
-def note_constexpr_tmpl_non_literal_return : Note<
- "function template instantiation is not constexpr because return type %0 is "
- "not a literal type">;
def err_constexpr_non_literal_param : Error<
"constexpr %select{function|constructor}1's %ordinal0 parameter type %2 is "
"not a literal type">;
-def note_constexpr_tmpl_non_literal_param : Note<
- "%select{function|constructor}1 template instantiation is not constexpr "
- "because %ordinal0 parameter type %2 is not a literal type">;
def err_constexpr_body_invalid_stmt : Error<
"statement not allowed in constexpr %select{function|constructor}0">;
def err_constexpr_type_definition : Error<
@@ -1247,6 +1383,9 @@ def err_constexpr_vla : Error<
"%select{function|constructor}1">;
def err_constexpr_var_declaration : Error<
"variables cannot be declared in a constexpr %select{function|constructor}0">;
+def err_constexpr_function_never_constant_expr : ExtWarn<
+ "constexpr %select{function|constructor}0 never produces a "
+ "constant expression">, InGroup<DiagGroup<"invalid-constexpr">>, DefaultError;
def err_constexpr_body_no_return : Error<
"no return statement in constexpr function">;
def err_constexpr_body_multiple_return : Error<
@@ -1269,21 +1408,24 @@ def note_non_literal_no_constexpr_ctors : Note<
def note_non_literal_base_class : Note<
"%0 is not literal because it has base class %1 of non-literal type">;
def note_non_literal_field : Note<
- "%0 is not literal because it has data member %1 of non-literal type %2">;
+ "%0 is not literal because it has data member %1 of "
+ "%select{non-literal|volatile}3 type %2">;
def note_non_literal_user_provided_dtor : Note<
"%0 is not literal because it has a user-provided destructor">;
def note_non_literal_nontrivial_dtor : Note<
"%0 is not literal because it has a non-trivial destructor">;
-def note_non_literal_mutable_field : Note<
- "%0 is not literal because it has a mutable data member">;
+
+// C++11 char16_t/char32_t
+def warn_cxx98_compat_unicode_type : Warning<
+ "'%0' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
// Objective-C++
def err_objc_decls_may_only_appear_in_global_scope : Error<
"Objective-C declarations may only appear in global scope">;
+// Attributes
def err_nsobject_attribute : Error<
"__attribute ((NSObject)) is for pointer types only">;
-
-// Attributes
def err_attribute_can_be_applied_only_to_symbol_declaration : Error<
"%0 attribute can be applied only to symbol declaration">;
def err_attributes_are_not_compatible : Error<
@@ -1293,6 +1435,9 @@ def err_attribute_wrong_number_arguments : Error<
":requires exactly %0 arguments}0">;
def err_attribute_too_many_arguments : Error<
"attribute takes no more than %0 argument%s0">;
+def err_suppress_autosynthesis : Error<
+ "objc_requires_property_definitions attribute may only be specified on a class"
+ "to a class declaration">;
def err_attribute_too_few_arguments : Error<
"attribute takes at least %0 argument%s0">;
def err_attribute_missing_parameter_name : Error<
@@ -1347,8 +1492,6 @@ def err_typecheck_vector_not_convertable : Error<
"can't convert between vector values of different size (%0 and %1)">;
def err_typecheck_ext_vector_not_typedef : Error<
"ext_vector_type only applies to types, not variables">;
-def err_unsupported_vector_size : Error<
- "unsupported type %0 for vector_size attribute, please use on typedef">;
def err_ext_vector_component_exceeds_length : Error<
"vector component access exceeds type %0">;
def err_ext_vector_component_name_illegal : Error<
@@ -1367,12 +1510,46 @@ def err_as_qualified_auto_decl : Error<
"automatic variable qualified with an address space">;
def err_arg_with_address_space : Error<
"parameter may not be qualified with an address space">;
-def err_attr_objc_ownership_bad_type : Error<
- "the type %0 cannot be retained">;
def err_attr_objc_ownership_redundant : Error<
- "the type %0 already has retainment attributes set on it">;
+ "the type %0 is already explicitly ownership-qualified">;
def err_attribute_not_string : Error<
"argument to %0 attribute was not a string literal">;
+def err_undeclared_nsnumber : Error<
+ "NSNumber must be available to use Objective-C literals">;
+def err_invalid_nsnumber_type : Error<
+ "%0 is not a valid literal type for NSNumber">;
+def err_undeclared_nsarray : Error<
+ "NSArray must be available to use Objective-C array literals">;
+def err_undeclared_nsdictionary : Error<
+ "NSDictionary must be available to use Objective-C dictionary "
+ "literals">;
+def err_undeclared_arraywithobjects : Error<
+ "declaration of %0 is missing in NSArray class">;
+def err_undeclared_dictwithobjects : Error<
+ "declaration of %0 is missing in NSDictionary class">;
+def err_undeclared_nsnumber_method : Error<
+ "declaration of %0 is missing in NSNumber class">;
+def err_objc_literal_method_sig : Error<
+ "literal construction method %0 has incompatible signature">;
+def note_objc_literal_method_param : Note<
+ "%select{first|second|third}0 parameter has unexpected type %1 "
+ "(should be %2)">;
+def note_objc_literal_method_return : Note<
+ "method returns unexpected type %0 (should be an object type)">;
+def err_invalid_collection_element : Error<
+ "collection element of type %0 is not an Objective-C object">;
+def err_box_literal_collection : Error<
+ "%select{string|character|boolean|numeric}0 literal must be prefixed by '@' "
+ "in a collection">;
+
+let CategoryName = "Cocoa API Issue" in {
+def warn_objc_redundant_literal_use : Warning<
+ "using %0 with a literal is redundant">, InGroup<ObjCRedundantLiteralUse>;
+}
+
+def warn_bool_for_boolean_literal : Warning<
+ "BOOL of type %0 is non-intergal and unsuitable for a "
+ "boolean literal - ignored">, InGroup<DiagGroup<"numeric-literals">>;
def err_only_annotate_after_access_spec : Error<
"access specifier can only have annotation attributes">;
def err_attribute_section_invalid_for_target : Error<
@@ -1386,6 +1563,8 @@ def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
def warn_attribute_ignored : Warning<"%0 attribute ignored">;
def warn_unknown_attribute_ignored : Warning<
"unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
+def warn_declspec_attribute_ignored : Warning<
+ "attribute %0 is ignored, place it after \"%select{class|struct|union|enum}1\" to apply attribute to type declaration">, InGroup<IgnoredAttributes>;
def warn_attribute_precede_definition : Warning<
"attribute declaration must precede definition">;
def warn_attribute_void_function_method : Warning<
@@ -1395,6 +1574,9 @@ def warn_attribute_weak_on_field : Warning<
"__weak attribute cannot be specified on a field declaration">;
def warn_gc_attribute_weak_on_local : Warning<
"Objective-C GC does not allow weak variables on the stack">;
+def warn_nsobject_attribute : Warning<
+ "__attribute ((NSObject)) may be put on a typedef only, "
+ "attribute is ignored">, InGroup<NSobjectAttribute>;
def warn_attribute_weak_on_local : Warning<
"__weak attribute cannot be specified on an automatic variable">;
def warn_weak_identifier_undeclared : Warning<
@@ -1414,16 +1596,15 @@ def err_alias_not_supported_on_darwin : Error <
def warn_attribute_wrong_decl_type : Warning<
"%0 attribute only applies to %select{functions|unions|"
"variables and functions|functions and methods|parameters|"
- "parameters and methods|functions, methods and blocks|"
- "classes and virtual methods|functions, methods, and parameters|"
- "classes|virtual methods|class members|variables|methods|"
- "variables, functions and labels|fields and global variables}1">;
+ "functions, methods and blocks|functions, methods, and parameters|"
+ "classes|variables|methods|variables, functions and labels|"
+ "fields and global variables|structs}1">;
def err_attribute_wrong_decl_type : Error<
"%0 attribute only applies to %select{functions|unions|"
"variables and functions|functions and methods|parameters|"
- "parameters and methods|functions, methods and blocks|"
- "classes and virtual methods|functions, methods, and parameters|"
- "classes|virtual methods|class members|variables|methods|structs}1">;
+ "functions, methods and blocks|functions, methods, and parameters|"
+ "classes|variables|methods|variables, functions and labels|"
+ "fields and global variables|structs}1">;
def warn_function_attribute_wrong_type : Warning<
"'%0' only applies to function types; type here is %1">;
def warn_pointer_attribute_wrong_type : Warning<
@@ -1455,8 +1636,6 @@ def err_objc_precise_lifetime_bad_type : Error<
def warn_objc_precise_lifetime_meaningless : Error<
"objc_precise_lifetime is not meaningful for "
"%select{__unsafe_unretained|__autoreleasing}0 objects">;
-def warn_label_attribute_not_unused : Warning<
- "The only valid attribute for labels is 'unused'">;
def err_invalid_pcs : Error<"Invalid PCS type">;
def err_attribute_can_be_applied_only_to_value_decl : Error<
"%0 attribute can only be applied to value declarations">;
@@ -1479,12 +1658,18 @@ def err_attribute_argument_out_of_range : Error<
"%plural{0:no parameters to index into|"
"1:can only be 1, since there is one parameter|"
":must be between 1 and %2}2">;
-def err_attribute_argument_not_lockable : Error<
+def warn_attribute_argument_not_lockable : Warning<
"%0 attribute requires arguments whose type is annotated "
- "with 'lockable' attribute">;
-def err_attribute_decl_not_lockable : Error<
+ "with 'lockable' attribute; type here is '%1'">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_attribute_decl_not_lockable : Warning<
"%0 attribute can only be applied in a context annotated "
- "with 'lockable' attribute">;
+ "with 'lockable' attribute">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_attribute_argument_not_class : Warning<
+ "%0 attribute requires arguments that are class type or point to"
+ " class type; type here is '%1'">,
+ InGroup<ThreadSafety>, DefaultIgnore;
def warn_unlock_but_no_lock : Warning<
"unlocking '%0' that was not locked">,
InGroup<ThreadSafety>, DefaultIgnore;
@@ -1495,12 +1680,13 @@ def warn_no_unlock : Warning<
"mutex '%0' is still locked at the end of function">,
InGroup<ThreadSafety>, DefaultIgnore;
// FIXME: improve the error message about locks not in scope
-def warn_lock_at_end_of_scope : Warning<
- "mutex '%0' is still locked at the end of its scope">,
+def warn_lock_some_predecessors : Warning<
+ "mutex '%0' is not locked on every path through here">,
InGroup<ThreadSafety>, DefaultIgnore;
def warn_expecting_lock_held_on_loop : Warning<
"expecting mutex '%0' to be locked at start of each loop">,
InGroup<ThreadSafety>, DefaultIgnore;
+def note_locked_here : Note<"mutex acquired here">;
def warn_lock_exclusive_and_shared : Warning<
"mutex '%0' is locked exclusively and shared in the same scope">,
InGroup<ThreadSafety>, DefaultIgnore;
@@ -1530,7 +1716,7 @@ def warn_fun_excludes_mutex : Warning<
"cannot call function '%0' while mutex '%1' is locked">,
InGroup<ThreadSafety>, DefaultIgnore;
def warn_cannot_resolve_lock : Warning<
- "cannot resolve lock expression to a specific lockable object">,
+ "cannot resolve lock expression">,
InGroup<ThreadSafety>, DefaultIgnore;
@@ -1548,10 +1734,10 @@ def warn_impcast_float_integer : Warning<
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_sign : Warning<
"implicit conversion changes signedness: %0 to %1">,
- InGroup<DiagGroup<"sign-conversion">>, DefaultIgnore;
+ InGroup<SignConversion>, DefaultIgnore;
def warn_impcast_integer_sign_conditional : Warning<
"operand of ? changes signedness: %0 to %1">,
- InGroup<DiagGroup<"sign-conversion">>, DefaultIgnore;
+ InGroup<SignConversion>, DefaultIgnore;
def warn_impcast_integer_precision : Warning<
"implicit conversion loses integer precision: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
@@ -1560,26 +1746,33 @@ def warn_impcast_integer_64_32 : Warning<
InGroup<DiagGroup<"shorten-64-to-32">>, DefaultIgnore;
def warn_impcast_integer_precision_constant : Warning<
"implicit conversion from %2 to %3 changes value from %0 to %1">,
- InGroup<DiagGroup<"constant-conversion">>;
+ InGroup<ConstantConversion>;
def warn_impcast_bitfield_precision_constant : Warning<
"implicit truncation from %2 to bitfield changes value from %0 to %1">,
- InGroup<DiagGroup<"constant-conversion">>;
+ InGroup<ConstantConversion>;
def warn_impcast_literal_float_to_integer : Warning<
"implicit conversion turns literal floating-point number into integer: "
"%0 to %1">,
- InGroup<DiagGroup<"literal-conversion">>, DefaultIgnore;
+ InGroup<LiteralConversion>;
def warn_impcast_string_literal_to_bool : Warning<
"implicit conversion turns string literal into bool: %0 to %1">,
- InGroup<DiagGroup<"string-conversion">>, DefaultIgnore;
+ InGroup<StringConversion>, DefaultIgnore;
def warn_impcast_different_enum_types : Warning<
"implicit conversion from enumeration type %0 to different enumeration type "
"%1">, InGroup<DiagGroup<"conversion">>;
def warn_impcast_bool_to_null_pointer : Warning<
"initialization of pointer of type %0 to null from a constant boolean "
- "expression">, InGroup<BoolConversions>;
+ "expression">, InGroup<BoolConversion>;
def warn_impcast_null_pointer_to_integer : Warning<
- "implicit conversion of NULL constant to integer">,
- InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+ "implicit conversion of NULL constant to %0">,
+ InGroup<NullConversion>;
+def warn_impcast_function_to_bool : Warning<
+ "address of function %q0 will always evaluate to 'true'">,
+ InGroup<BoolConversion>;
+def note_function_to_bool_silence : Note<
+ "prefix with the address-of operator to silence this warning">;
+def note_function_to_bool_call : Note<
+ "suffix with parentheses to turn this into a function call">;
def warn_cast_align : Warning<
"cast from %0 to %1 increases required alignment from %2 to %3">,
@@ -1605,6 +1798,9 @@ def warn_transparent_union_attribute_zero_fields : Warning<
def warn_attribute_type_not_supported : Warning<
"'%0' attribute argument not supported: %1">;
def warn_attribute_unknown_visibility : Warning<"unknown visibility '%0'">;
+def warn_attribute_protected_visibility :
+ Warning<"target does not support 'protected' visibility; using 'default'">,
+ InGroup<DiagGroup<"unsupported-visibility">>;
def err_unknown_machine_mode : Error<"unknown machine mode %0">;
def err_unsupported_machine_mode : Error<"unsupported machine mode %0">;
def err_mode_not_primitive : Error<
@@ -1617,9 +1813,6 @@ def warn_attribute_nonnull_no_pointers : Warning<
"'nonnull' attribute applied to function with no pointer arguments">;
def warn_attribute_malloc_pointer_only : Warning<
"'malloc' attribute only applies to functions returning a pointer type">;
-def warn_transparent_union_nonpointer : Warning<
- "'transparent_union' attribute support incomplete; only supported for "
- "pointer unions">;
def warn_attribute_sentinel_named_arguments : Warning<
"'sentinel' attribute requires named arguments">;
def warn_attribute_sentinel_not_variadic : Warning<
@@ -1650,9 +1843,10 @@ def warn_attribute_ibaction: Warning<
"ibaction attribute can only be applied to Objective-C instance methods">;
def err_iboutletcollection_type : Error<
"invalid type %0 as argument of iboutletcollection attribute">;
-def err_iboutlet_object_type : Error<
+def warn_iboutlet_object_type : Warning<
"%select{ivar|property}2 with %0 attribute must "
- "be an object type (invalid %1)">;
+ "be an object type (invalid %1)">,
+ InGroup<DiagGroup<"invalid-iboutlet">>;
def err_attribute_overloadable_not_function : Error<
"'overloadable' attribute can only be applied to a function">;
def err_attribute_overloadable_missing : Error<
@@ -1713,8 +1907,6 @@ def err_uninitialized_member_for_assign : Error<
"cannot define the implicit default assignment operator for %0, because "
"non-static %select{reference|const}1 member %2 can't use default "
"assignment operator">;
-def note_first_required_here : Note<
- "synthesized method is first required here">;
def err_uninitialized_member_in_ctor : Error<
"%select{|implicit default }0constructor for %1 must explicitly initialize "
"the %select{reference|const}2 member %3">;
@@ -1733,7 +1925,8 @@ def note_default_argument_declared_here : Note<
def ext_param_promoted_not_compatible_with_prototype : ExtWarn<
"promoted type %0 of K&R function parameter is not compatible with the "
- "parameter type %1 declared in a previous prototype">;
+ "parameter type %1 declared in a previous prototype">,
+ InGroup<KNRPromotedParameter>;
// C++ Overloading Semantic Analysis.
@@ -1766,7 +1959,16 @@ def note_ovl_candidate : Note<"candidate "
"is the implicit move constructor|"
"is the implicit copy assignment operator|"
"is the implicit move assignment operator|"
- "is an inherited constructor}0%1">;
+ "is an inherited constructor}0%1"
+ "%select{| has different class (expected %3 but has %4)"
+ "| has different number of parameters (expected %3 but has %4)"
+ "| has type mismatch at %ordinal3 parameter (expected %4 but has %5)"
+ "| has different return type (%3 expected but has %4)"
+ "| has different qualifiers (expected "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile"
+ "|volatile and restrict|const, volatile, and restrict}3 but found "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile"
+ "|volatile and restrict|const, volatile, and restrict}4)}2">;
def note_ovl_candidate_inherited_constructor : Note<"inherited from here">;
def note_ovl_candidate_bad_deduction : Note<
@@ -1811,8 +2013,9 @@ def note_ovl_candidate_deleted : Note<
"constructor (the implicit move constructor)|"
"function (the implicit copy assignment operator)|"
"function (the implicit move assignment operator)|"
- "constructor (inherited)}0%1 "
- "has been explicitly %select{made unavailable|deleted}2">;
+ "constructor (inherited)}0%1 has been "
+ "%select{explicitly made unavailable|explicitly deleted|"
+ "implicitly deleted}2">;
// Giving the index of the bad argument really clutters this message, and
// it's relatively unimportant because 1) it's generally obvious which
@@ -1979,6 +2182,10 @@ def err_ref_init_ambiguous : Error<
"reference initialization of type %0 with initializer of type %1 is ambiguous">;
def err_ovl_deleted_init : Error<
"call to %select{unavailable|deleted}0 constructor of %1">;
+def err_ovl_deleted_special_init : Error<
+ "call to implicitly-deleted %select{default constructor|copy constructor|"
+ "move constructor|copy assignment operator|move assignment operator|"
+ "destructor|function}0 of %1">;
def err_ovl_ambiguous_oper_unary : Error<
"use of overloaded operator '%0' is ambiguous (operand type %1)">;
def err_ovl_ambiguous_oper_binary : Error<
@@ -1986,6 +2193,10 @@ def err_ovl_ambiguous_oper_binary : Error<
def err_ovl_no_viable_oper : Error<"no viable overloaded '%0'">;
def err_ovl_deleted_oper : Error<
"overload resolution selected %select{unavailable|deleted}0 operator '%1'%2">;
+def err_ovl_deleted_special_oper : Error<
+ "overload resolution selected implicitly-deleted %select{default constructor|"
+ "copy constructor|move constructor|copy assignment operator|move assignment "
+ "operator|destructor|'%1'}0%2">;
def err_ovl_no_viable_subscript :
Error<"no viable overloaded operator[] for type %0">;
def err_ovl_no_oper :
@@ -2017,7 +2228,13 @@ def err_addr_ovl_not_func_ptrref : Error<
"address of overloaded function %0 cannot be converted to type %1">;
def err_addr_ovl_no_qualifier : Error<
"can't form member pointer of type %0 without '&' and class name">;
-
+
+// C++11 Literal Operators
+def err_ovl_no_viable_literal_operator : Error<
+ "no matching literal operator for call to %0"
+ "%select{| with argument of type %2| with arguments of types %2 and %3}1"
+ "%select{| or 'const char *', and no matching literal operator template}4">;
+
// C++ Template Declarations
def err_template_param_shadow : Error<
"declaration of %0 shadows template parameter">;
@@ -2063,6 +2280,9 @@ def err_template_param_default_arg_missing : Error<
def ext_template_parameter_default_in_function_template : ExtWarn<
"default template arguments for a function template are a C++11 extension">,
InGroup<CXX11>;
+def warn_cxx98_compat_template_parameter_default_in_function_template : Warning<
+ "default template arguments for a function template are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_template_parameter_default_template_member : Error<
"cannot add a default template argument to the definition of a member of a "
"class template">;
@@ -2103,6 +2323,12 @@ def ext_template_arg_local_type : ExtWarn<
"template argument uses local type %0">, InGroup<LocalTypeTemplateArgs>;
def ext_template_arg_unnamed_type : ExtWarn<
"template argument uses unnamed type">, InGroup<UnnamedTypeTemplateArgs>;
+def warn_cxx98_compat_template_arg_local_type : Warning<
+ "local type %0 as template argument is incompatible with C++98">,
+ InGroup<CXX98CompatLocalTypeTemplateArgs>, DefaultIgnore;
+def warn_cxx98_compat_template_arg_unnamed_type : Warning<
+ "unnamed type as template argument is incompatible with C++98">,
+ InGroup<CXX98CompatUnnamedTypeTemplateArgs>, DefaultIgnore;
def note_template_unnamed_type_here : Note<
"unnamed type used in template argument was declared here">;
def err_template_arg_overload_type : Error<
@@ -2121,6 +2347,13 @@ def err_template_arg_not_integral_or_enumeral : Error<
def err_template_arg_not_ice : Error<
"non-type template argument of type %0 is not an integral constant "
"expression">;
+def err_template_arg_not_address_constant : Error<
+ "non-type template argument of type %0 is not a constant expression">;
+def err_template_arg_untyped_null_constant : Error<
+ "null non-type template argument must be cast to template parameter type %0">;
+def err_template_arg_wrongtype_null_constant : Error<
+ "null non-type template argument of type %0 does not match template parameter "
+ "of type %1">;
def err_deduced_non_type_template_arg_type_mismatch : Error<
"deduced non-type template argument does not have the same type as the "
"its corresponding template parameter (%0 vs %1)">;
@@ -2156,20 +2389,31 @@ def err_template_arg_field : Error<
"non-type template argument refers to non-static data member %0">;
def err_template_arg_method : Error<
"non-type template argument refers to non-static member function %0">;
-def err_template_arg_function_not_extern : Error<
- "non-template argument refers to function %0 with internal linkage">;
-def err_template_arg_object_not_extern : Error<
- "non-template argument refers to object %0 that does not have external "
- "linkage">;
+def err_template_arg_object_no_linkage : Error<
+ "non-type template argument refers to %select{function|object}0 %1 that "
+ "does not have linkage">;
+def warn_cxx98_compat_template_arg_object_internal : Warning<
+ "non-type template argument referring to %select{function|object}0 %1 with "
+ "internal linkage is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_template_arg_object_internal : ExtWarn<
+ "non-type template argument referring to %select{function|object}0 %1 with "
+ "internal linkage is a C++11 extension">, InGroup<CXX11>;
+def err_template_arg_thread_local : Error<
+ "non-type template argument refers to thread-local object">;
def note_template_arg_internal_object : Note<
- "non-template argument refers to %select{function|object}0 here">;
-def note_template_arg_refers_here : Note<"non-template argument refers here">;
+ "non-type template argument refers to %select{function|object}0 here">;
+def note_template_arg_refers_here : Note<
+ "non-type template argument refers here">;
def err_template_arg_not_object_or_func : Error<
"non-type template argument does not refer to an object or function">;
def err_template_arg_not_pointer_to_member_form : Error<
"non-type template argument is not a pointer to member constant">;
def ext_template_arg_extra_parens : ExtWarn<
"address non-type template argument cannot be surrounded by parentheses">;
+def warn_cxx98_compat_template_arg_extra_parens : Warning<
+ "redundant parentheses surrounding address non-type template argument are "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_pointer_to_member_type : Error<
"invalid use of pointer to member type after %select{.*|->*}0">;
def err_pointer_to_member_call_drops_quals : Error<
@@ -2181,7 +2425,8 @@ def err_pointer_to_member_oper_value_classify: Error<
// C++ template specialization
def err_template_spec_unknown_kind : Error<
"can only provide an explicit specialization for a class template, function "
- "template, or a member function, static data member, or member class of a "
+ "template, or a member function, static data member, "
+ "%select{or member class|member class, or member enumeration}0 of a "
"class template">;
def note_specialized_entity : Note<
"explicitly specialized declaration is here">;
@@ -2193,30 +2438,30 @@ def err_template_spec_decl_friend : Error<
"cannot declare an explicit specialization in a friend">;
def err_template_spec_decl_out_of_scope_global : Error<
"%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 must "
- "originally be declared in the global scope">;
-def ext_template_spec_decl_out_of_scope_global : ExtWarn<
- "%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 must "
- "originally be declared in the global scope; accepted as a C++11 extension">,
- InGroup<CXX11>;
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must originally be declared in the global scope">;
def err_template_spec_decl_out_of_scope : Error<
"%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 must "
- "originally be declared in namespace %2">;
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must originally be declared in namespace %2">;
def ext_template_spec_decl_out_of_scope : ExtWarn<
+ "first declaration of %select{class template|class template partial|"
+ "function template|member function|static data member|member class|"
+ "member enumeration}0 specialization of %1 outside namespace %2 is a "
+ "C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_template_spec_decl_out_of_scope : Warning<
"%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 must "
- "originally be declared in namespace %2; accepted as a C++11 extension">,
- InGroup<CXX11>;
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 outside namespace %2 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_template_spec_redecl_out_of_scope : Error<
"%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 not in a "
- "namespace enclosing %2">;
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 not in a namespace enclosing %2">;
def err_template_spec_redecl_global_scope : Error<
"%select{class template|class template partial|function template|member "
- "function|static data member|member class}0 specialization of %1 must occur "
- "at global scope">;
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must occur at global scope">;
def err_spec_member_not_instantiated : Error<
"specialization of member %q0 does not specialize an instantiated member">;
def note_specialized_decl : Note<"attempt to specialize declaration here">;
@@ -2303,7 +2548,7 @@ def err_function_template_spec_no_match : Error<
"no function template matches function template specialization %0">;
def err_function_template_spec_ambiguous : Error<
"function template specialization %0 ambiguously refers to more than one "
- "function template; explicitly specify%select{|additional }1 template "
+ "function template; explicitly specify%select{| additional}1 template "
"arguments to identify a particular function template">;
def note_function_template_spec_matched : Note<
"function template matches specialization %0">;
@@ -2315,7 +2560,7 @@ def err_template_recursion_depth_exceeded : Error<
"recursive template instantiation exceeded maximum depth of %0">,
DefaultFatal, NoSFINAE;
def note_template_recursion_depth : Note<
- "use -ftemplate-depth-N to increase recursive template instantiation depth">;
+ "use -ftemplate-depth=N to increase recursive template instantiation depth">;
def err_template_instantiate_within_definition : Error<
"%select{implicit|explicit}0 instantiation of template %1 within its"
@@ -2334,6 +2579,8 @@ def note_function_template_spec_here : Note<
"in instantiation of function template specialization %q0 requested here">;
def note_template_static_data_member_def_here : Note<
"in instantiation of static data member %q0 requested here">;
+def note_template_enum_def_here : Note<
+ "in instantiation of enumeration %q0 requested here">;
def note_template_type_alias_instantiation_here : Note<
"in instantiation of template type alias %0 requested here">;
@@ -2376,10 +2623,12 @@ def ext_explicit_instantiation_after_specialization : Extension<
"explicit instantiation of %0 that occurs after an explicit "
"specialization will be ignored (C++11 extension)">,
InGroup<CXX11>;
+def warn_cxx98_compat_explicit_instantiation_after_specialization : Warning<
+ "explicit instantiation of %0 that occurs after an explicit "
+ "specialization is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def note_previous_template_specialization : Note<
"previous template specialization is here">;
-def err_explicit_instantiation_enum : Error<
- "explicit instantiation of enumeration type %0">;
def err_explicit_instantiation_nontemplate_type : Error<
"explicit instantiation of non-templated type %0">;
def note_nontemplate_decl_here : Note<
@@ -2392,10 +2641,10 @@ def err_explicit_instantiation_must_be_global : Error<
"explicit instantiation of %0 must occur at global scope">;
def warn_explicit_instantiation_out_of_scope_0x : Warning<
"explicit instantiation of %0 not in a namespace enclosing %1">,
- InGroup<CXX11Compat>;
+ InGroup<CXX11Compat>, DefaultIgnore;
def warn_explicit_instantiation_must_be_global_0x : Warning<
"explicit instantiation of %0 must occur at global scope">,
- InGroup<CXX11Compat>;
+ InGroup<CXX11Compat>, DefaultIgnore;
def err_explicit_instantiation_requires_name : Error<
"explicit instantiation declaration requires a name">;
@@ -2420,16 +2669,19 @@ def note_explicit_instantiation_candidate : Note<
"explicit instantiation candidate function template here %0">;
def err_explicit_instantiation_inline : Error<
"explicit instantiation cannot be 'inline'">;
+def warn_explicit_instantiation_inline_0x : Warning<
+ "explicit instantiation cannot be 'inline'">, InGroup<CXX11Compat>,
+ DefaultIgnore;
def err_explicit_instantiation_constexpr : Error<
"explicit instantiation cannot be 'constexpr'">;
def ext_explicit_instantiation_without_qualified_id : Extension<
"qualifier in explicit instantiation of %q0 requires a template-id "
"(a typedef is not permitted)">;
def err_explicit_instantiation_unqualified_wrong_namespace : Error<
- "explicit instantiation of %q0 must occur in %1">;
+ "explicit instantiation of %q0 must occur in namespace %1">;
def warn_explicit_instantiation_unqualified_wrong_namespace_0x : Warning<
- "explicit instantiation of %q0 must occur in %1">,
- InGroup<CXX11Compat>;
+ "explicit instantiation of %q0 must occur in namespace %1">,
+ InGroup<CXX11Compat>, DefaultIgnore;
def err_explicit_instantiation_undefined_member : Error<
"explicit instantiation of undefined %select{member class|member function|"
"static data member}0 %1 of class template %2">;
@@ -2454,6 +2706,9 @@ def warn_typename_missing : ExtWarn<
InGroup<DiagGroup<"typename-missing">>;
def ext_typename_outside_of_template : ExtWarn<
"'typename' occurs outside of a template">, InGroup<CXX11>;
+def warn_cxx98_compat_typename_outside_of_template : Warning<
+ "use of 'typename' outside of a template is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_typename_refers_to_using_value_decl : Error<
"typename specifier refers to a dependent using declaration for a value "
"%0 in %1">;
@@ -2462,8 +2717,6 @@ def note_using_value_decl_missing_typename : Note<
def err_template_kw_refers_to_non_template : Error<
"%0 following the 'template' keyword does not refer to a template">;
-def err_template_kw_refers_to_function_template : Error<
- "%0 following the 'template' keyword refers to a function template">;
def err_template_kw_refers_to_class_template : Error<
"'%0%1' instantiated to a class template, not a function template">;
def note_referenced_class_template : Error<
@@ -2472,6 +2725,9 @@ def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
def ext_template_outside_of_template : ExtWarn<
"'template' keyword outside of a template">, InGroup<CXX11>;
+def warn_cxx98_compat_template_outside_of_template : Warning<
+ "use of 'template' keyword outside of a template is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_non_type_template_in_nested_name_specifier : Error<
"qualified name refers into a specialization of function template '%0'">;
@@ -2480,6 +2736,8 @@ def err_template_id_not_a_type : Error<
def note_template_declared_here : Note<
"%select{function template|class template|type alias template|template template parameter}0 "
"%1 declared here">;
+def note_parameter_type : Note<
+ "parameter of type %0 is declared here">;
// C++11 Variadic Templates
def err_template_param_pack_default_arg : Error<
@@ -2503,25 +2761,29 @@ def err_unexpanded_parameter_pack_0 : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
- "non-type template parameter type|exception type|partial specialization}0 "
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
"contains an unexpanded parameter pack">;
def err_unexpanded_parameter_pack_1 : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
- "non-type template parameter type|exception type|partial specialization}0 "
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
"contains unexpanded parameter pack %1">;
def err_unexpanded_parameter_pack_2 : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
- "non-type template parameter type|exception type|partial specialization}0 "
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
"contains unexpanded parameter packs %1 and %2">;
def err_unexpanded_parameter_pack_3_or_more : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
- "non-type template parameter type|exception type|partial specialization}0 "
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
"contains unexpanded parameter packs %1, %2, ...">;
def err_pack_expansion_without_parameter_packs : Error<
@@ -2576,6 +2838,8 @@ def warn_unavailable_fwdclass_message : Warning<
def note_unavailable_here : Note<
"%select{declaration|function}0 has been explicitly marked "
"%select{unavailable|deleted|deprecated}1 here">;
+def note_implicitly_deleted : Note<
+ "explicitly defaulted function was implicitly deleted here">;
def warn_not_enough_argument : Warning<
"not enough variable arguments in %0 declaration to fit a sentinel">,
InGroup<Sentinel>;
@@ -2599,6 +2863,37 @@ def err_definition_of_explicitly_defaulted_member : Error<
def err_redefinition_extern_inline : Error<
"redefinition of a 'extern inline' function %0 is not supported in "
"%select{C99 mode|C++}1">;
+def warn_cxx98_compat_friend_redefinition : Warning<
+ "friend function %0 would be implicitly redefined in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def note_deleted_dtor_no_operator_delete : Note<
+ "virtual destructor requires an unambiguous, accessible 'operator delete'">;
+def note_deleted_special_member_class_subobject : Note<
+ "%select{default constructor|copy constructor|move constructor|"
+ "copy assignment operator|move assignment operator|destructor}0 of "
+ "%select{||||union }4%1 is implicitly deleted because "
+ "%select{base class %3|field %3}2 has "
+ "%select{no|a deleted|multiple|an inaccessible|a non-trivial}4 "
+ "%select{%select{default constructor|copy constructor|move constructor|copy "
+ "assignment operator|move assignment operator|destructor}0|destructor}5"
+ "%select{||s||}4">;
+def note_deleted_default_ctor_uninit_field : Note<
+ "default constructor of %0 is implicitly deleted because field %1 of "
+ "%select{reference|const-qualified}3 type %2 would not be initialized">;
+def note_deleted_default_ctor_all_const : Note<
+ "default constructor of %0 is implicitly deleted because all "
+ "%select{data members|data members of an anonymous union member}1"
+ " are const-qualified">;
+def note_deleted_copy_ctor_rvalue_reference : Note<
+ "copy constructor of %0 is implicitly deleted because field %1 is of "
+ "rvalue reference type %2">;
+def note_deleted_copy_user_declared_move : Note<
+ "copy %select{constructor|assignment operator}0 is implicitly deleted because"
+ " %1 has a user-declared move %select{constructor|assignment operator}2">;
+def note_deleted_assign_field : Note<
+ "%select{copy|move}0 assignment operator of %0 is implicitly deleted "
+ "because field %1 is of %select{reference|const-qualified}3 type %2">;
// This should eventually be an error.
def warn_undefined_internal : Warning<
@@ -2606,9 +2901,11 @@ def warn_undefined_internal : Warning<
DiagGroup<"undefined-internal">;
def note_used_here : Note<"used here">;
-def warn_redefinition_of_typedef : Warning<
- "redefinition of typedef %0 is invalid in C">,
- InGroup<DiagGroup<"typedef-redefinition"> >, DefaultError;
+def warn_redefinition_of_typedef : ExtWarn<
+ "redefinition of typedef %0 is a C11 feature">,
+ InGroup<DiagGroup<"typedef-redefinition"> >;
+def err_redefinition_variably_modified_typedef : Error<
+ "redefinition of %select{typedef|type alias}0 for variably-modified type %1">;
def err_inline_declaration_block_scope : Error<
"inline declaration of %0 not allowed in block scope">;
@@ -2632,6 +2929,9 @@ def err_redefinition_different_type : Error<
"redefinition of %0 with a different type">;
def err_redefinition_different_kind : Error<
"redefinition of %0 as different kind of symbol">;
+def warn_forward_class_redefinition : Warning<
+ "redefinition of forward class %0 of a typedef name of an object type is ignored">,
+ InGroup<DiagGroup<"objc-forward-class-redefinition">>;
def err_redefinition_different_typedef : Error<
"%select{typedef|type alias|type alias template}0 redefinition with different types (%1 vs %2)">;
def err_tag_reference_non_tag : Error<
@@ -2692,6 +2992,9 @@ def err_vm_func_decl : Error<
"function declaration cannot have variably modified type">;
def err_array_too_large : Error<
"array is too large (%0 elements)">;
+def warn_array_new_too_large : Warning<"array is too large (%0 elements)">,
+ // FIXME PR11644: ", will throw std::bad_array_new_length at runtime"
+ InGroup<DiagGroup<"bad-array-new-length">>;
// -Wpadded, -Wpacked
def warn_padded_struct_field : Warning<
@@ -2707,6 +3010,9 @@ def warn_unnecessary_packed : Warning<
"packed attribute is unnecessary for %0">, InGroup<Packed>, DefaultIgnore;
def err_typecheck_negative_array_size : Error<"array size is negative">;
+def warn_typecheck_negative_array_new_size : Warning<"array size is negative">,
+ // FIXME PR11644: ", will throw std::bad_array_new_length at runtime"
+ InGroup<DiagGroup<"bad-array-new-length">>;
def warn_typecheck_function_qualifiers : Warning<
"qualifier on function type %0 has unspecified behavior">;
def err_typecheck_invalid_restrict_not_pointer : Error<
@@ -2719,8 +3025,9 @@ def ext_typecheck_zero_array_size : Extension<
"zero size arrays are an extension">;
def err_typecheck_zero_array_size : Error<
"zero-length arrays are not permitted in C++">;
-def err_at_least_one_initializer_needed_to_size_array : Error<
- "at least one initializer value required to size array">;
+def warn_typecheck_zero_static_array_size : Warning<
+ "'static' has no effect on zero-length arrays">,
+ InGroup<DiagGroup<"array-bounds">>;
def err_array_size_non_int : Error<"size of array has non-integer type %0">;
def err_init_element_not_constant : Error<
"initializer element is not a compile-time constant">;
@@ -2731,8 +3038,6 @@ def err_block_extern_cant_init : Error<
def warn_extern_init : Warning<"'extern' variable has an initializer">;
def err_variable_object_no_init : Error<
"variable-sized object may not be initialized">;
-def err_array_init_list_required : Error<
- "initialization with '{...}' expected for array">;
def err_excess_initializers : Error<
"excess elements in %select{array|vector|scalar|union|struct}0 initializer">;
def warn_excess_initializers : ExtWarn<
@@ -2756,14 +3061,31 @@ def ext_complex_component_init : Extension<
"complex initialization specifying real and imaginary components "
"is an extension">, InGroup<DiagGroup<"complex-component-init">>;
def err_empty_scalar_initializer : Error<"scalar initializer cannot be empty">;
+def warn_cxx98_compat_empty_scalar_initializer : Warning<
+ "scalar initialized from empty initializer list is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_illegal_initializer : Error<
"illegal initializer (only variables can be initialized)">;
def err_illegal_initializer_type : Error<"illegal initializer type %0">;
-def err_init_list_variable_narrowing : Error<
+def err_init_list_type_narrowing_sfinae : Error<
+ "type %0 cannot be narrowed to %1 in initializer list">;
+def err_init_list_type_narrowing : ExtWarn<
+ "type %0 cannot be narrowed to %1 in initializer list">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def err_init_list_variable_narrowing_sfinae : Error<
"non-constant-expression cannot be narrowed from type %0 to %1 in "
"initializer list">;
-def err_init_list_constant_narrowing : Error<
+def err_init_list_variable_narrowing : ExtWarn<
+ "non-constant-expression cannot be narrowed from type %0 to %1 in "
+ "initializer list">, InGroup<CXX11Narrowing>, DefaultError;
+def err_init_list_constant_narrowing_sfinae : Error<
"constant expression evaluates to %0 which cannot be narrowed to type %1">;
+def err_init_list_constant_narrowing : ExtWarn<
+ "constant expression evaluates to %0 which cannot be narrowed to type %1">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def warn_init_list_type_narrowing : Warning<
+ "type %0 cannot be narrowed to %1 in initializer list in C++11">,
+ InGroup<CXX11Narrowing>, DefaultIgnore;
def warn_init_list_variable_narrowing : Warning<
"non-constant-expression cannot be narrowed from type %0 to %1 in "
"initializer list in C++11">,
@@ -2789,7 +3111,7 @@ def err_anon_bitfield_width_exceeds_type_size : Error<
"size of anonymous bit-field (%0 bits) exceeds size of its type (%1 bits)">;
def err_incorrect_number_of_vector_initializers : Error<
"number of elements must be either one or match the size of the vector">;
-
+
// Used by C++ which allows bit-fields that are wider than the type.
def warn_bitfield_width_exceeds_type_size: Warning<
"size of bit-field %0 (%1 bits) exceeds the size of its type; value will be "
@@ -2801,6 +3123,9 @@ def warn_anon_bitfield_width_exceeds_type_size : Warning<
def warn_missing_braces : Warning<
"suggest braces around initialization of subobject">,
InGroup<DiagGroup<"missing-braces">>, DefaultIgnore;
+def err_missing_braces : Error<
+ "cannot omit braces around initialization of subobject when using direct "
+ "list-initialization">;
def err_redefinition_of_label : Error<"redefinition of label %0">;
def err_undeclared_label_use : Error<"use of undeclared label %0">;
@@ -2810,17 +3135,28 @@ def warn_unused_label : Warning<"unused label %0">,
def err_goto_into_protected_scope : Error<"goto into protected scope">;
def warn_goto_into_protected_scope : ExtWarn<"goto into protected scope">,
InGroup<Microsoft>;
+def warn_cxx98_compat_goto_into_protected_scope : Warning<
+ "goto would jump into protected scope in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_switch_into_protected_scope : Error<
"switch case is in protected scope">;
+def warn_cxx98_compat_switch_into_protected_scope : Warning<
+ "switch case would be in a protected scope in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_indirect_goto_without_addrlabel : Error<
"indirect goto in function with no address-of-label expressions">;
def err_indirect_goto_in_protected_scope : Error<
"indirect goto might cross protected scopes">;
+def warn_cxx98_compat_indirect_goto_in_protected_scope : Warning<
+ "indirect goto might cross protected scopes in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def note_indirect_goto_target : Note<"possible target of indirect goto">;
def note_protected_by_variable_init : Note<
"jump bypasses variable initialization">;
def note_protected_by_variable_nontriv_destructor : Note<
"jump bypasses variable with a non-trivial destructor">;
+def note_protected_by_variable_non_pod : Note<
+ "jump bypasses initialization of non-POD variable">;
def note_protected_by_cleanup : Note<
"jump bypasses initialization of variable with __attribute__((cleanup))">;
def note_protected_by_vla_typedef : Note<
@@ -2896,9 +3232,11 @@ def err_flexible_array_empty_struct : Error<
def err_flexible_array_has_nonpod_type : Error<
"flexible array member %0 of non-POD element type %1">;
def ext_flexible_array_in_struct : Extension<
- "%0 may not be nested in a struct due to flexible array member">;
+ "%0 may not be nested in a struct due to flexible array member">,
+ InGroup<FlexibleArrayExtensions>;
def ext_flexible_array_in_array : Extension<
- "%0 may not be used as an array element due to flexible array member">;
+ "%0 may not be used as an array element due to flexible array member">,
+ InGroup<FlexibleArrayExtensions>;
def err_flexible_array_init : Error<
"initialization of flexible array member is not allowed">;
def ext_flexible_array_empty_aggregate_ms : Extension<
@@ -2913,18 +3251,29 @@ def ext_flexible_array_empty_aggregate_gnu : Extension<
def ext_flexible_array_union_gnu : Extension<
"flexible array member %0 in a union is a GNU extension">, InGroup<GNU>;
-let CategoryName = "Automatic Reference Counting Issue" in {
+let CategoryName = "ARC Semantic Issue" in {
// ARC-mode diagnostics.
+
+let CategoryName = "ARC Weak References" in {
+
def err_arc_weak_no_runtime : Error<
"the current deployment target does not support automated __weak references">;
def err_arc_unsupported_weak_class : Error<
"class is incompatible with __weak references">;
def err_arc_weak_unavailable_assign : Error<
"assignment of a weak-unavailable object to a __weak object">;
+def err_arc_weak_unavailable_property : Error<
+ "synthesis of a weak-unavailable property is disallowed "
+ "because it requires synthesis of an ivar of the __weak object">;
def err_arc_convesion_of_weak_unavailable : Error<
"%select{implicit conversion|cast}0 of weak-unavailable object of type %1 to"
" a __weak object of type %2">;
+
+} // end "ARC Weak References" category
+
+let CategoryName = "ARC Restrictions" in {
+
def err_arc_illegal_explicit_message : Error<
"ARC forbids explicit message send of %0">;
def err_arc_unused_init_message : Error<
@@ -2935,8 +3284,10 @@ def err_arc_mismatched_cast : Error<
"%select{%2|a non-Objective-C pointer type %2|a block pointer|"
"an Objective-C pointer|an indirect pointer to an Objective-C pointer}1"
" to %3 is disallowed with ARC">;
+def err_arc_nolifetime_behavior : Error<
+ "explicit ownership qualifier on cast result has no effect">;
def err_arc_objc_object_in_struct : Error<
- "ARC forbids Objective-C objects in structs or unions">;
+ "ARC forbids %select{Objective-C objects|blocks}0 in structs or unions">;
def err_arc_objc_property_default_assign_on_object : Error<
"ARC forbids synthesizing a property of an Objective-C object "
"with unspecified ownership or storage attribute">;
@@ -2944,6 +3295,9 @@ def err_arc_illegal_selector : Error<
"ARC forbids use of %0 in a @selector">;
def err_arc_illegal_method_def : Error<
"ARC forbids implementation of %0">;
+
+} // end "ARC Restrictions" category
+
def err_arc_lost_method_convention : Error<
"method was declared as %select{an 'alloc'|a 'copy'|an 'init'|a 'new'}0 "
"method, but its implementation doesn't match because %select{"
@@ -2956,8 +3310,10 @@ def note_arc_gained_method_convention : Note<
"declaration in interface is not in the '%select{alloc|copy|init|new}0' "
"family because %select{its result type is not an object pointer|"
"its result type is unrelated to its receiver type}1">;
-def err_typecheck_arr_assign_self : Error<
+def err_typecheck_arc_assign_self : Error<
"cannot assign to 'self' outside of a method in the init family">;
+def err_typecheck_arc_assign_self_class_method : Error<
+ "cannot assign to 'self' in a class method">;
def err_typecheck_arr_assign_enumeration : Error<
"fast enumeration variables can't be modified in ARC by default; "
"declare the variable __strong to allow this">;
@@ -2988,6 +3344,9 @@ def warn_err_new_delete_object_array : Warning<
def err_arc_autoreleasing_var : Error<
"%select{__block variables|global variables|fields|ivars}0 cannot have "
"__autoreleasing ownership">;
+def err_arc_autoreleasing_capture : Error<
+ "cannot capture __autoreleasing variable in a "
+ "%select{block|lambda by copy}0">;
def err_arc_thread_ownership : Error<
"thread-local variable has non-trivial ownership: type is %0">;
def err_arc_indirect_no_ownership : Error<
@@ -3011,17 +3370,26 @@ def err_arc_may_not_respond : Error<
"no visible @interface for %0 declares the selector %1">;
def err_arc_receiver_forward_instance : Error<
"receiver type %0 for instance message is a forward declaration">;
+def warn_receiver_forward_instance : Warning<
+ "receiver type %0 for instance message is a forward declaration">,
+ InGroup<DiagGroup<"receiver-forward-class">>, DefaultIgnore;
def err_arc_collection_forward : Error<
"collection expression type %0 is a forward declaration">;
def err_arc_multiple_method_decl : Error<
"multiple methods named %0 found with mismatched result, "
"parameter type or attributes">;
+
+let CategoryName = "ARC Retain Cycle" in {
+
def warn_arc_retain_cycle : Warning<
"capturing %0 strongly in this block is likely to lead to a retain cycle">,
InGroup<ARCRetainCycles>;
def note_arc_retain_cycle_owner : Note<
"block will be retained by %select{the captured object|an object strongly "
"retained by the captured object}0">;
+
+} // end "ARC Retain Cycle" category
+
def note_nontrivial_objc_ownership : Note<
"because type %0 has %select{no|no|__strong|__weak|__autoreleasing}1 "
"ownership">;
@@ -3029,6 +3397,8 @@ def warn_arc_object_memaccess : Warning<
"%select{destination for|source of}0 this %1 call is a pointer to "
"ownership-qualified type %2">, InGroup<ARCNonPodMemAccess>;
+let CategoryName = "ARC and @properties" in {
+
def err_arc_strong_property_ownership : Error<
"existing ivar %1 for strong property %0 may not be "
"%select{|__unsafe_unretained||__weak}2">;
@@ -3038,10 +3408,15 @@ def err_arc_assign_property_ownership : Error<
def err_arc_inconsistent_property_ownership : Error<
"%select{|unsafe_unretained|strong|weak}1 property %0 may not also be "
"declared %select{|__unsafe_unretained|__strong|__weak|__autoreleasing}2">;
+
+} // end "ARC and @properties" category
+
def err_arc_atomic_ownership : Error<
"cannot perform atomic operation on a pointer to type %0: type has "
"non-trivial ownership">;
+let CategoryName = "ARC Casting Rules" in {
+
def err_arc_bridge_cast_incompatible : Error<
"incompatible types casting %0 to %1 with a %select{__bridge|"
"__bridge_transfer|__bridge_retained}2 cast">;
@@ -3050,14 +3425,19 @@ def err_arc_bridge_cast_wrong_kind : Error<
"%select{Objective-C|block|C}2 pointer type %3 cannot use %select{__bridge|"
"__bridge_transfer|__bridge_retained}4">;
def err_arc_cast_requires_bridge : Error<
- "cast of %select{Objective-C|block|C}0 pointer type %1 to "
- "%select{Objective-C|block|C}2 pointer type %3 requires a bridged cast">;
+ "%select{cast|implicit conversion}0 of %select{Objective-C|block|C}1 "
+ "pointer type %2 to %select{Objective-C|block|C}3 pointer type %4 "
+ "requires a bridged cast">;
def note_arc_bridge : Note<
"use __bridge to convert directly (no change in ownership)">;
def note_arc_bridge_transfer : Note<
- "use __bridge_transfer to transfer ownership of a +1 %0 into ARC">;
+ "use %select{__bridge_transfer|CFBridgingRelease call}1 to transfer "
+ "ownership of a +1 %0 into ARC">;
def note_arc_bridge_retained : Note<
- "use __bridge_retained to make an ARC object available as a +1 %0">;
+ "use %select{__bridge_retained|CFBridgingRetain call}1 to make an "
+ "ARC object available as a +1 %0">;
+
+} // ARC Casting category
} // ARC category name
@@ -3086,15 +3466,11 @@ def err_illegal_decl_mempointer_in_nonclass : Error<
def err_mempointer_in_nonclass_type : Error<
"member pointer refers into non-class type %0">;
def err_reference_to_void : Error<"cannot form a reference to 'void'">;
-def err_qualified_block_pointer_type : Error<
- "qualifier specification on block pointer type not allowed">;
def err_nonfunction_block_type : Error<
"block pointer to non-function type is invalid">;
def err_return_block_has_expr : Error<"void block should not return a value">;
def err_block_return_missing_expr : Error<
"non-void block should return a value">;
-def err_block_with_return_type_requires_args : Error<
- "block with explicit return type requires argument list">;
def err_func_def_incomplete_result : Error<
"incomplete result type %0 in function definition">;
def err_atomic_specifier_bad_type : Error<
@@ -3105,9 +3481,6 @@ def err_atomic_specifier_bad_type : Error<
// Expressions.
def ext_sizeof_function_type : Extension<
"invalid application of 'sizeof' to a function type">, InGroup<PointerArith>;
-def err_sizeof_alignof_overloaded_function_type : Error<
- "invalid application of '%select{sizeof|__alignof|vec_step}0' to an "
- "overloaded function">;
def ext_sizeof_void_type : Extension<
"invalid application of '%select{sizeof|__alignof|vec_step}0' to a void "
"type">, InGroup<PointerArith>;
@@ -3124,7 +3497,8 @@ def err_offsetof_record_type : Error<
"offsetof requires struct, union, or class type, %0 invalid">;
def err_offsetof_array_type : Error<"offsetof requires array type, %0 invalid">;
def ext_offsetof_extended_field_designator : Extension<
- "using extended field designator is an extension">;
+ "using extended field designator is an extension">,
+ InGroup<DiagGroup<"extended-offsetof">>;
def warn_offsetof_non_pod_type : ExtWarn<"offset of on non-POD type %0">,
InGroup<InvalidOffsetof>;
def err_offsetof_bitfield : Error<"cannot compute offset of bit-field %0">;
@@ -3185,6 +3559,12 @@ def warn_self_assignment : Warning<
"explicitly assigning a variable of type %0 to itself">,
InGroup<SelfAssignment>, DefaultIgnore;
+def warn_string_plus_int : Warning<
+ "adding %0 to a string does not append to the string">,
+ InGroup<StringPlusInt>;
+def note_string_plus_int_silence : Note<
+ "use array indexing to silence this warning">;
+
def warn_sizeof_array_param : Warning<
"sizeof on array function parameter will return size of %0 instead of %1">,
InGroup<SizeofArrayArgument>;
@@ -3232,7 +3612,7 @@ def err_typecheck_member_reference_unknown : Error<
"cannot refer to member %0 in %1 with '%select{.|->}2'">;
def err_member_reference_needs_call : Error<
"base of member reference is a function; perhaps you meant to call "
- "it%select{| with no arguments}?">;
+ "it%select{| with no arguments}0?">;
def warn_subscript_is_char : Warning<"array subscript is of type 'char'">,
InGroup<CharSubscript>, DefaultIgnore;
@@ -3244,6 +3624,11 @@ def err_member_not_yet_instantiated : Error<
def note_non_instantiated_member_here : Note<
"not-yet-instantiated member is declared here">;
+def err_enumerator_does_not_exist : Error<
+ "enumerator %0 does not exist in instantiation of %1">;
+def note_enum_specialized_here : Note<
+ "enum %0 was explicitly specialized here">;
+
def err_member_redeclared : Error<"class member cannot be redeclared">;
def err_member_name_of_class : Error<"member %0 has the same name as its class">;
def err_member_def_undefined_record : Error<
@@ -3252,13 +3637,11 @@ def err_member_def_does_not_match : Error<
"out-of-line definition of %0 does not match any declaration in %1">;
def err_member_def_does_not_match_suggest : Error<
"out-of-line definition of %0 does not match any declaration in %1; "
- "did you mean %2">;
+ "did you mean %2?">;
def err_member_def_does_not_match_ret_type : Error<
"out-of-line definition of %q0 differs from the declaration in the return type">;
def err_nonstatic_member_out_of_line : Error<
"non-static data member defined out-of-line">;
-def err_nonstatic_flexible_variable : Error<
- "non-static initialization of a variable with flexible array member">;
def err_qualified_typedef_declarator : Error<
"typedef declarator cannot be qualified">;
def err_qualified_param_declarator : Error<
@@ -3313,7 +3696,11 @@ def err_array_init_non_constant_array : Error<
"cannot initialize array of type %0 with non-constant array of type %1">;
def ext_array_init_copy : Extension<
"initialization of an array of type %0 from a compound literal of type %1 is "
- "a GNU extension">;
+ "a GNU extension">, InGroup<GNU>;
+// This is intentionally not disabled by -Wno-gnu.
+def ext_array_init_parens : ExtWarn<
+ "parenthesized initialization of a member array is a GNU extension">,
+ InGroup<DiagGroup<"gnu-array-member-paren-init">>, DefaultError;
def warn_deprecated_string_literal_conversion : Warning<
"conversion from string literal to %0 is deprecated">, InGroup<DeprecatedWritableStr>;
def err_realimag_invalid_type : Error<"invalid type %0 to %1 operator">;
@@ -3355,14 +3742,10 @@ def warn_pointer_indirection_from_incompatible_type : Warning<
"behavior.">,
InGroup<DiagGroup<"undefined-reinterpret-cast">>, DefaultIgnore;
-def err_assignment_requires_nonfragile_object : Error<
- "cannot assign to class object in non-fragile ABI (%0 invalid)">;
-def err_direct_interface_unsupported : Error<
- "indirection to an interface is not supported (%0 invalid)">;
+def err_objc_object_assignment : Error<
+ "cannot assign to class object (%0 invalid)">;
def err_typecheck_invalid_operands : Error<
"invalid operands to binary expression (%0 and %1)">;
-def err_typecheck_sub_ptr_object : Error<
- "subtraction of pointer %0 requires pointee to be a complete object type">;
def err_typecheck_sub_ptr_compatible : Error<
"%0 and %1 are not pointers to compatible types">;
def ext_typecheck_ordered_comparison_of_pointer_integer : ExtWarn<
@@ -3383,13 +3766,13 @@ def ext_typecheck_comparison_of_distinct_pointers : ExtWarn<
"comparison of distinct pointer types (%0 and %1)">;
def ext_typecheck_cond_incompatible_operands : ExtWarn<
"incompatible operand types (%0 and %1)">;
+def err_cond_voidptr_arc : Error <
+ "operands to conditional of types %0 and %1 are incompatible in ARC mode">;
def err_typecheck_comparison_of_distinct_pointers : Error<
"comparison of distinct pointer types (%0 and %1)">;
def ext_typecheck_comparison_of_distinct_pointers_nonstandard : ExtWarn<
"comparison of distinct pointer types (%0 and %1) uses non-standard "
"composite pointer type %2">;
-def err_typecheck_vector_comparison : Error<
- "comparison of vector types (%0 and %1) not supported yet">;
def err_typecheck_assign_const : Error<"read-only variable is not assignable">;
def err_stmtexpr_file_scope : Error<
"statement expression not allowed at file scope">;
@@ -3414,26 +3797,15 @@ def warn_null_in_comparison_operation : Warning<
InGroup<DiagGroup<"null-arithmetic">>;
def err_invalid_this_use : Error<
- "invalid use of 'this' outside of a nonstatic member function">;
+ "invalid use of 'this' outside of a non-static member function">;
def err_invalid_member_use_in_static_method : Error<
"invalid use of member %0 in static member function">;
def err_invalid_qualified_function_type : Error<
- "type qualifier is not allowed on this function">;
-def err_invalid_ref_qualifier_function_type : Error<
- "ref-qualifier '%select{&&|&}0' is only allowed on non-static member functions,"
- " member function pointers, and typedefs of function types">;
-def ext_qualified_function_type_template_arg : ExtWarn<
- "template argument of '%0' qualified function type is a GNU extension">,
- InGroup<GNU>;
-
-def err_invalid_qualified_function_pointer : Error<
- "type qualifier is not allowed on this function %select{pointer|reference}0">;
-def err_invalid_qualified_typedef_function_type_use : Error<
- "a qualified function type cannot be used to declare a "
- "%select{static member|nonmember}0 function">;
-def err_invalid_ref_qualifier_typedef_function_type_use : Error<
- "%select{static member|nonmember}0 function cannot have a ref-qualifier "
- "'%select{&&|&}1'">;
+ "%select{static |non-}0member function %select{of type %2 |}1"
+ "cannot have '%3' qualifier">;
+def err_compound_qualified_function_type : Error<
+ "%select{block pointer|pointer|reference}0 to function type %select{%2 |}1"
+ "cannot have '%3' qualifier">;
def err_ref_qualifier_overload : Error<
"cannot overload a member function %select{without a ref-qualifier|with "
@@ -3441,13 +3813,17 @@ def err_ref_qualifier_overload : Error<
"without a ref-qualifier|with ref-qualifier '&'|with ref-qualifier '&&'}1">;
def err_invalid_non_static_member_use : Error<
- "invalid use of nonstatic data member %0">;
+ "invalid use of non-static data member %0">;
+def err_nested_non_static_member_use : Error<
+ "%select{call to non-static member function|use of non-static data member}0 "
+ "%2 of %1 from nested type %3">;
+def warn_cxx98_compat_non_static_member_use : Warning<
+ "use of non-static data member %0 in an unevaluated context is "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
def err_invalid_incomplete_type_use : Error<
"invalid use of incomplete type %0">;
def err_builtin_func_cast_more_than_one_arg : Error<
"function-style cast to a builtin type can only take one argument">;
-def err_builtin_direct_init_more_than_one_arg : Error<
- "initializer of a builtin type can only take one argument">;
def err_value_init_for_array_type : Error<
"array types cannot be value-initialized">;
def warn_format_nonliteral_noargs : Warning<
@@ -3470,13 +3846,43 @@ def err_invalid_property_name : Error<
"%0 is not a valid property name (accessing an object of type %1)">;
def err_getter_not_found : Error<
"expected getter method not found on object of type %0">;
+def err_objc_subscript_method_not_found : Error<
+ "expected method to %select{read|write}1 %select{dictionary|array}2 element not "
+ "found on object of type %0">;
+def err_objc_subscript_index_type : Error<
+ "method index parameter type %0 is not integral type">;
+def err_objc_subscript_key_type : Error<
+ "method key parameter type %0 is not object type">;
+def err_objc_subscript_dic_object_type : Error<
+ "method object parameter type %0 is not object type">;
+def err_objc_subscript_object_type : Error<
+ "cannot assign to this %select{dictionary|array}1 because assigning method's 2nd parameter"
+ " of type %0 is not an objective-C pointer type">;
+def err_objc_subscript_base_type : Error<
+ "%select{dictionary|array}1 subscript base type %0 is not an Objective-C object">;
+def err_objc_multiple_subscript_type_conversion : Error<
+ "indexing expression is invalid because subscript type %0 has "
+ "multiple type conversion functions">;
+def err_objc_subscript_type_conversion : Error<
+ "indexing expression is invalid because subscript type %0 is not an integral"
+ " or objective-C pointer type">;
+def err_objc_subscript_pointer : Error<
+ "indexing expression is invalid because subscript type %0 is not an"
+ " objective-C pointer">;
+def err_objc_indexing_method_result_type : Error<
+ "method for accessing %select{dictionary|array}1 element must have Objective-C"
+ " object return type instead of %0">;
+def err_objc_index_incomplete_class_type : Error<
+ "objective-C index expression has incomplete class type %0">;
+def err_illegal_container_subscripting_op : Error<
+ "illegal operation on objective-c container subscripting">;
def err_property_not_found_forward_class : Error<
"property %0 cannot be found in forward class object %1">;
def err_property_not_as_forward_class : Error<
"property %0 refers to an incomplete Objective-C class %1 "
"(with no @interface available)">;
def note_forward_class : Note<
- "forward class is declared here">;
+ "forward declaration of class here">;
def err_duplicate_property : Error<
"property has a previous declaration">;
def ext_gnu_void_ptr : Extension<
@@ -3486,16 +3892,22 @@ def ext_gnu_ptr_func_arith : Extension<
"arithmetic on%select{ a|}0 pointer%select{|s}0 to%select{ the|}2 function "
"type%select{|s}2 %1%select{| and %3}2 is a GNU extension">,
InGroup<PointerArith>;
-def error_readonly_property_assignment : Error<
- "assigning to property with 'readonly' attribute not allowed">;
def error_readonly_message_assignment : Error<
"assigning to 'readonly' return result of an objective-c message not allowed">;
def ext_integer_increment_complex : Extension<
"ISO C does not support '++'/'--' on complex integer type %0">;
def ext_integer_complement_complex : Extension<
"ISO C does not support '~' for complex conjugation of %0">;
-def error_nosetter_property_assignment : Error<
- "setter method is needed to assign to object using property" " assignment syntax">;
+def err_nosetter_property_assignment : Error<
+ "%select{assignment to readonly property|"
+ "no setter method %1 for assignment to property}0">;
+def err_nosetter_property_incdec : Error<
+ "%select{%select{increment|decrement}1 of readonly property|"
+ "no setter method %2 for %select{increment|decrement}1 of property}0">;
+def err_nogetter_property_compound_assignment : Error<
+ "a getter method is needed to perform a compound assignment on a property">;
+def err_nogetter_property_incdec : Error<
+ "no getter method %1 for %select{increment|decrement} of property">;
def error_no_subobject_property_setting : Error<
"expression is not assignable">;
def err_qualified_objc_access : Error<
@@ -3553,8 +3965,6 @@ def warn_register_objc_catch_parm : Warning<
"'register' storage specifier on @catch parameter will be ignored">;
def err_qualified_objc_catch_parm : Error<
"@catch parameter declarator cannot be qualified">;
-def err_objc_pointer_cxx_catch_gnu : Error<
- "can't catch Objective C exceptions in C++ in the GNU runtime">;
def warn_objc_pointer_cxx_catch_fragile : Warning<
"can not catch an exception thrown with @throw in C++ in the non-unified "
"exception model">, InGroup<ObjCNonUnifiedException>;
@@ -3566,12 +3976,12 @@ def err_incomplete_type_objc_at_encode : Error<
def warn_setter_getter_impl_required : Warning<
"property %0 requires method %1 to be defined - "
"use @synthesize, @dynamic or provide a method implementation "
- "in this class implementation">;
+ "in this class implementation">,
+ InGroup<ObjCPropertyImpl>;
def warn_setter_getter_impl_required_in_category : Warning<
"property %0 requires method %1 to be defined - "
- "use @dynamic or provide a method implementation in this category">;
-def note_property_impl_required : Note<
- "implementation is here">;
+ "use @dynamic or provide a method implementation in this category">,
+ InGroup<ObjCPropertyImpl>;
def note_parameter_named_here : Note<
"passing argument to parameter %0 here">;
def note_parameter_here : Note<
@@ -3604,6 +4014,9 @@ def err_bad_const_cast_dest : Error<
"which is not a reference, pointer-to-object, or pointer-to-data-member">;
def ext_cast_fn_obj : Extension<
"cast between pointer-to-function and pointer-to-object is an extension">;
+def warn_cxx98_compat_cast_fn_obj : Warning<
+ "cast between pointer-to-function and pointer-to-object is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def err_bad_reinterpret_cast_small_int : Error<
"cast from pointer to smaller type %2 loses information">;
def err_bad_cxx_cast_vector_to_scalar_different_size : Error<
@@ -3625,7 +4038,6 @@ def err_bad_static_cast_member_pointer_nonmp : Error<
def err_bad_cxx_cast_member_pointer_size : Error<
"cannot %select{||reinterpret_cast||C-style cast|}0 from member pointer "
"type %1 to member pointer type %2 of different size">;
-def err_bad_static_cast_incomplete : Error<"%0 is an incomplete type">;
def err_bad_reinterpret_cast_reference : Error<
"reinterpret_cast of a %0 to %1 needs its address which is not allowed">;
def warn_undefined_reinterpret_cast : Warning<
@@ -3675,7 +4087,8 @@ def err_placement_new_non_placement_delete : Error<
"'new' expression with placement arguments refers to non-placement "
"'operator delete'">;
def err_array_size_not_integral : Error<
- "array size expression must have integral or enumerated type, not %0">;
+ "array size expression must have integral or %select{|unscoped }0"
+ "enumeration type, not %1">;
def err_array_size_incomplete_type : Error<
"array size expression has incomplete class type %0">;
def err_array_size_explicit_conversion : Error<
@@ -3689,6 +4102,10 @@ def ext_array_size_conversion : Extension<
"implicit conversion from array size expression of type %0 to "
"%select{integral|enumeration}1 type %2 is a C++11 extension">,
InGroup<CXX11>;
+def warn_cxx98_compat_array_size_conversion : Warning<
+ "implicit conversion from array size expression of type %0 to "
+ "%select{integral|enumeration}1 type %2 is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
def err_address_space_qualified_new : Error<
"'new' cannot allocate objects of type %0 in address space '%1'">;
def err_address_space_qualified_delete : Error<
@@ -3703,7 +4120,8 @@ def ext_delete_void_ptr_operand : ExtWarn<
def err_ambiguous_delete_operand : Error<"ambiguous conversion of delete "
"expression of type %0 to a pointer">;
def warn_delete_incomplete : Warning<
- "deleting pointer to incomplete type %0 may cause undefined behaviour">;
+ "deleting pointer to incomplete type %0 may cause undefined behaviour">,
+ InGroup<DiagGroup<"delete-incomplete">>;
def err_delete_incomplete_class_type : Error<
"deleting incomplete class type %0; no conversions to pointer type">;
def warn_delete_array_type : Warning<
@@ -3717,10 +4135,10 @@ def note_member_declared_here : Note<
def err_decrement_bool : Error<"cannot decrement expression of type bool">;
def warn_increment_bool : Warning<
"incrementing expression of type bool is deprecated">, InGroup<Deprecated>;
-def ext_catch_incomplete_ptr : ExtWarn<
- "ISO C++ forbids catching a pointer to incomplete type %0">;
-def ext_catch_incomplete_ref : ExtWarn<
- "ISO C++ forbids catching a reference to incomplete type %0">;
+def err_catch_incomplete_ptr : Error<
+ "cannot catch pointer to incomplete type %0">;
+def err_catch_incomplete_ref : Error<
+ "cannot catch reference to incomplete type %0">;
def err_catch_incomplete : Error<"cannot catch incomplete type %0">;
def err_catch_rvalue_ref : Error<"cannot catch exceptions by rvalue reference">;
def err_qualified_catch_declarator : Error<
@@ -3728,8 +4146,6 @@ def err_qualified_catch_declarator : Error<
def err_early_catch_all : Error<"catch-all handler must come last">;
def err_bad_memptr_rhs : Error<
"right hand operand to %0 has non pointer-to-member type %1">;
-def err_memptr_rhs_to_incomplete : Error<
- "cannot dereference pointer into incomplete class type %0">;
def err_bad_memptr_lhs : Error<
"left hand operand to %0 must be a %select{|pointer to }1class "
"compatible with the right hand operand, but is %2">;
@@ -3757,6 +4173,10 @@ def note_hidden_overloaded_virtual_declared_here : Note<
def warn_using_directive_in_header : Warning<
"using namespace directive in global context in header">,
InGroup<HeaderHygiene>, DefaultIgnore;
+def warn_overaligned_type : Warning<
+ "type %0 requires %1 bytes of alignment and the default allocator only "
+ "guarantees %2 bytes">,
+ InGroup<OveralignedType>, DefaultIgnore;
def err_conditional_void_nonvoid : Error<
"%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "
@@ -3775,11 +4195,79 @@ def err_throw_incomplete_ptr : Error<
def err_return_in_constructor_handler : Error<
"return in the catch of a function try block of a constructor is illegal">;
+let CategoryName = "Lambda Issue" in {
+ def err_capture_more_than_once : Error<
+ "%0 can appear only once in a capture list">;
+ def err_reference_capture_with_reference_default : Error<
+ "'&' cannot precede a capture when the capture default is '&'">;
+ def err_this_capture_with_copy_default : Error<
+ "'this' cannot be explicitly captured when the capture default is '='">;
+ def err_copy_capture_with_copy_default : Error<
+ "'&' must precede a capture when the capture default is '='">;
+ def err_capture_does_not_name_variable : Error<
+ "%0 in capture list does not name a variable">;
+ def err_capture_non_automatic_variable : Error<
+ "%0 cannot be captured because it does not have automatic storage "
+ "duration">;
+ def err_this_capture : Error<
+ "'this' cannot be %select{implicitly |}0captured in this context">;
+ def err_lambda_capture_block : Error<
+ "__block variable %0 cannot be captured in a lambda expression">;
+ def err_lambda_capture_anonymous_var : Error<
+ "unnamed variable cannot be implicitly captured in a lambda expression">;
+ def err_lambda_capture_vm_type : Error<
+ "variable %0 with variably modified type cannot be captured in "
+ "a lambda expression">;
+ def err_lambda_impcap : Error<
+ "variable %0 cannot be implicitly captured in a lambda with no "
+ "capture-default specified">;
+ def note_lambda_decl : Note<"lambda expression begins here">;
+ def err_lambda_unevaluated_operand : Error<
+ "lambda expression in an unevaluated operand">;
+ def ext_lambda_implies_void_return : ExtWarn<
+ "C++11 requires lambda with omitted result type to consist of a single "
+ "return statement">,
+ InGroup<LambdaExtensions>;
+ def err_lambda_return_init_list : Error<
+ "cannot deduce lambda return type from initializer list">;
+ def err_lambda_capture_default_arg : Error<
+ "lambda expression in default argument cannot capture any entity">;
+ def err_lambda_unexpanded_pack : Error<
+ "unexpanded function parameter pack capture is unsupported">;
+ def err_lambda_incomplete_result : Error<
+ "incomplete result type %0 in lambda expression">;
+ def err_lambda_objc_object_result : Error<
+ "non-pointer Objective-C class type %0 in lambda expression result">;
+ def ext_lambda_default_arguments : ExtWarn<
+ "C++11 forbids default arguments for lambda expressions">,
+ InGroup<LambdaExtensions>;
+ def err_noreturn_lambda_has_return_expr : Error<
+ "lambda declared 'noreturn' should not return">;
+ def warn_maybe_falloff_nonvoid_lambda : Warning<
+ "control may reach end of non-void lambda">,
+ InGroup<ReturnType>;
+ def warn_falloff_nonvoid_lambda : Warning<
+ "control reaches end of non-void lambda">,
+ InGroup<ReturnType>;
+ def err_access_lambda_capture : Error<
+ // The ERRORs represent other special members that aren't constructors, in
+ // hopes that someone will bother noticing and reporting if they appear
+ "capture of variable '%0' as type %1 calls %select{private|protected}3 "
+ "%select{default |copy |move |*ERROR* |*ERROR* |*ERROR* |}2constructor">,
+ AccessControl;
+ def note_lambda_to_block_conv : Note<
+ "implicit capture of lambda object due to conversion to block pointer "
+ "here">;
+}
+
def err_operator_arrow_circular : Error<
"circular pointer delegation detected">;
def err_pseudo_dtor_base_not_scalar : Error<
"object expression of non-scalar type %0 cannot be used in a "
"pseudo-destructor expression">;
+def ext_pseudo_dtor_on_void : ExtWarn<
+ "pseudo-destructors on type void are a Microsoft extension">,
+ InGroup<Microsoft>;
def err_pseudo_dtor_type_mismatch : Error<
"the type of object expression (%0) does not match the type being destroyed "
"(%1) in pseudo-destructor expression">;
@@ -3791,9 +4279,6 @@ def err_dtor_expr_without_call : Error<
def err_pseudo_dtor_destructor_non_type : Error<
"%0 does not refer to a type name in pseudo-destructor expression; expected "
"the name of type %1">;
-def err_pseudo_dtor_template : Error<
- "specialization of template %0 does not refer to a scalar type in pseudo-"
- "destructor expression">;
def err_invalid_use_of_function_type : Error<
"a function type is not allowed here">;
def err_invalid_use_of_array_type : Error<"an array type is not allowed here">;
@@ -3809,10 +4294,10 @@ def err_typecheck_deleted_function : Error<
"conversion function from %0 to %1 invokes a deleted function">;
def err_expected_class_or_namespace : Error<"expected a class or namespace">;
-def err_missing_qualified_for_redecl : Error<
- "must qualify the name %0 to declare %q1 in this scope">;
-def err_invalid_declarator_scope : Error<
- "definition or redeclaration of %0 not in a namespace enclosing %1">;
+def err_expected_class : Error<"%0 is not a class%select{ or namespace|, "
+ "namespace, or scoped enumeration}1">;
+def err_invalid_declarator_scope : Error<"cannot define or redeclare %0 here "
+ "because namespace %1 does not enclose namespace %2">;
def err_invalid_declarator_global_scope : Error<
"definition or redeclaration of %0 cannot name the global scope">;
def err_invalid_declarator_in_function : Error<
@@ -3824,8 +4309,6 @@ def err_cannot_form_pointer_to_member_of_reference_type : Error<
"cannot form a pointer-to-member to member %0 of reference type %1">;
def err_incomplete_object_call : Error<
"incomplete type in call to object of type %0">;
-def err_incomplete_pointer_to_member_return : Error<
- "incomplete return type %0 of pointer-to-member constant">;
def warn_condition_is_assignment : Warning<"using the result of an "
"assignment as a condition without parentheses">,
@@ -3848,12 +4331,6 @@ def note_equality_comparison_to_assign : Note<
def note_equality_comparison_silence : Note<
"remove extraneous parentheses around the comparison to silence this warning">;
-def warn_synthesized_ivar_access : Warning<
- "direct access of synthesized ivar by using property access %0">,
- InGroup<NonfragileAbi2>, DefaultIgnore;
-
-def note_global_declared_at : Note<"global variable declared here">;
-
// assignment related diagnostics (also for argument passing, returning, etc).
// In most of these diagnostics the %2 is a value from the
// Sema::AssignmentAction enumeration
@@ -3863,11 +4340,24 @@ def err_typecheck_convert_incompatible : Error<
"%select{from incompatible type|to parameter of incompatible type|"
"from a function with incompatible result type|to incompatible type|"
"with an expression of incompatible type|to parameter of incompatible type|"
- "to incompatible type}2 %1; "
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">;
+ "to incompatible type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3"
+ "%select{|: different classes (%5 vs %6)"
+ "|: different number of parameters (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
+ "|: different return type (%5 vs %6)"
+ "|: different qualifiers ("
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}5 vs "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}6)}4">;
+def err_typecheck_missing_return_type_incompatible : Error<
+ "return type %0 must match previous return type %1 when %select{block "
+ "literal|lambda expression}2 has unspecified explicit return type">;
+
def warn_incompatible_qualified_id : Warning<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
@@ -3880,21 +4370,23 @@ def ext_typecheck_convert_pointer_int : ExtWarn<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1; "
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">;
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
+ InGroup<IntConversion>;
def ext_typecheck_convert_int_pointer : ExtWarn<
"incompatible integer to pointer conversion "
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
- "with an expression of type|to parameter of type|to type}2 %1; "
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">;
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
+ InGroup<IntConversion>;
def ext_typecheck_convert_pointer_void_func : Extension<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
" %0 "
@@ -3914,10 +4406,10 @@ def ext_typecheck_convert_incompatible_pointer : ExtWarn<
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
"with an expression of type|to parameter of type|to type}2 %1"
- "%select{|dereference with *|"
- "take the address with &|"
- "remove *|"
- "remove &}3">,
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
InGroup<IncompatiblePointerTypes>;
def ext_typecheck_convert_discards_qualifiers : ExtWarn<
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
@@ -3939,7 +4431,7 @@ def warn_incompatible_vectors : Warning<
" %0 "
"%select{from|to parameter of type|from a function with result type|to type|"
"with an expression of type|to parameter of type|to type}2 %1">,
- InGroup<VectorConversions>, DefaultIgnore;
+ InGroup<VectorConversion>, DefaultIgnore;
def err_int_to_block_pointer : Error<
"invalid block pointer conversion "
"%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
@@ -3970,8 +4462,6 @@ def err_typecheck_incompatible_ownership : Error<
"|sending %0 to parameter of type %1"
"|casting %0 to type %1}2"
" changes retain/release properties of pointer">;
-def err_typecheck_convert_ambiguous : Error<
- "ambiguity in initializing value of type %0 with initializer of type %1">;
def err_typecheck_comparison_of_distinct_blocks : Error<
"comparison of distinct block types (%0 and %1)">;
@@ -3990,6 +4480,8 @@ def err_typecheck_duplicate_vector_components_not_mlvalue : Error<
"vector is not assignable (contains duplicate components)">;
def err_block_decl_ref_not_modifiable_lvalue : Error<
"variable is not assignable (missing __block type specifier)">;
+def err_lambda_decl_ref_not_modifiable_lvalue : Error<
+ "cannot assign to a variable captured by copy in a non-mutable lambda">;
def err_typecheck_call_not_function : Error<
"called object type %0 is not a function or function pointer">;
def err_call_incomplete_return : Error<
@@ -4018,6 +4510,8 @@ def err_typecheck_call_too_many_args_at_most : Error<
"expected at most %1, have %2">;
def note_callee_decl : Note<
"%0 declared here">;
+def note_defined_here : Note<"%0 defined here">;
+
def warn_call_wrong_number_of_arguments : Warning<
"too %select{few|many}0 arguments in call to %1">;
def err_atomic_builtin_must_be_pointer : Error<
@@ -4031,12 +4525,15 @@ def err_atomic_builtin_pointer_size : Error<
def err_atomic_op_needs_atomic : Error<
"first argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
+def err_atomic_op_needs_trivial_copy : Error<
+ "first argument to atomic operation must be a pointer to a trivially-copyable"
+ " type (%0 invalid)">;
def err_atomic_op_needs_atomic_int_or_ptr : Error<
- "first argument to atomic operation must be a pointer to atomic "
- "integer or pointer (%0 invalid)">;
-def err_atomic_op_logical_needs_atomic_int : Error<
- "first argument to logical atomic operation must be a pointer to atomic "
- "integer (%0 invalid)">;
+ "first argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer or pointer (%1 invalid)">;
+def err_atomic_op_bitwise_needs_atomic_int : Error<
+ "first argument to bitwise atomic operation must be a pointer to "
+ "%select{|atomic }0integer (%1 invalid)">;
def err_deleted_function_use : Error<"attempt to use a deleted function">;
@@ -4061,6 +4558,10 @@ def warn_cannot_pass_non_pod_arg_to_vararg : Warning<
"cannot pass object of %select{non-POD|non-trivial}0 type %1 through variadic"
" %select{function|block|method|constructor}2; call will abort at runtime">,
InGroup<DiagGroup<"non-pod-varargs">>, DefaultError;
+def warn_cxx98_compat_pass_non_pod_arg_to_vararg : Warning<
+ "passing object of trivial but non-POD type %0 through variadic"
+ " %select{function|block|method|constructor}1 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
def err_typecheck_call_invalid_ordered_compare : Error<
"ordered compare requires two args of floating point type (%0 and %1)">;
@@ -4101,9 +4602,6 @@ def warn_typecheck_cond_pointer_integer_mismatch : ExtWarn<
InGroup<DiagGroup<"conditional-type-mismatch">>;
def err_typecheck_choose_expr_requires_constant : Error<
"'__builtin_choose_expr' requires a constant expression">;
-def ext_typecheck_expression_not_constant_but_accepted : Extension<
- "expression is not a constant, but is accepted as one by GNU extensions">,
- InGroup<GNU>;
def warn_unused_expr : Warning<"expression result unused">,
InGroup<UnusedValue>;
def warn_unused_voidptr : Warning<
@@ -4112,6 +4610,9 @@ def warn_unused_voidptr : Warning<
def warn_unused_property_expr : Warning<
"property access result unused - getters should not be used for side effects">,
InGroup<UnusedValue>;
+def warn_unused_container_subscript_expr : Warning<
+ "container access result unused - container access should not be used for side effects">,
+ InGroup<UnusedValue>;
def warn_unused_call : Warning<
"ignoring return value of function declared with %0 attribute">,
InGroup<UnusedValue>;
@@ -4126,6 +4627,10 @@ def note_inequality_comparison_to_or_assign : Note<
def err_incomplete_type_used_in_type_trait_expr : Error<
"incomplete type %0 used in type trait expression">;
+def err_type_trait_arity : Error<
+ "type trait requires %0%select{| or more}1 argument%select{|s}2; have "
+ "%3 argument%s3">;
+
def err_dimension_expr_not_constant_integer : Error<
"dimension expression does not evaluate to a constant unsigned int">;
def err_expected_ident_or_lparen : Error<"expected identifier or '('">;
@@ -4171,10 +4676,6 @@ def err_invalid_conversion_between_vector_and_integer : Error<
def err_invalid_conversion_between_vector_and_scalar : Error<
"invalid conversion between vector type %0 and scalar type %1">;
-def err_overload_expr_requires_non_zero_constant : Error<
- "overload requires a non-zero constant expression as first argument">;
-def err_overload_incorrect_fntype : Error<
- "argument is not a function, or has wrong number of parameters">;
// C++ member initializers.
def err_only_constructors_take_base_inits : Error<
@@ -4183,7 +4684,7 @@ def err_only_constructors_take_base_inits : Error<
def err_multiple_mem_initialization : Error <
"multiple initializations given for non-static member %0">;
def err_multiple_mem_union_initialization : Error <
- "initializing multiple members of anonymous union">;
+ "initializing multiple members of union">;
def err_multiple_base_initialization : Error <
"multiple initializations given for base %0">;
@@ -4219,16 +4720,19 @@ def err_in_class_initializer_literal_type : Error<
"in-class initializer for static data member of type %0 requires "
"'constexpr' specifier">;
def err_in_class_initializer_non_constant : Error<
- "in-class initializer is not a constant expression">;
+ "in-class initializer for static data member is not a constant expression">;
def ext_in_class_initializer_non_constant : Extension<
- "in-class initializer is not a constant expression, accepted as an extension">;
+ "in-class initializer for static data member is not a constant expression; "
+ "folding it to a constant is a GNU extension">;
// C++ anonymous unions and GNU anonymous structs/unions
def ext_anonymous_union : Extension<
- "anonymous unions are a GNU extension in C">, InGroup<GNU>;
-def ext_anonymous_struct : Extension<
+ "anonymous unions are a C11 extension">, InGroup<C11>;
+def ext_gnu_anonymous_struct : Extension<
"anonymous structs are a GNU extension">, InGroup<GNU>;
+def ext_c11_anonymous_struct : Extension<
+ "anonymous structs are a C11 extension">, InGroup<C11>;
def err_anonymous_union_not_static : Error<
"anonymous unions at namespace or global scope must be declared 'static'">;
def err_anonymous_union_with_storage_spec : Error<
@@ -4259,7 +4763,14 @@ def ext_ms_anonymous_struct : ExtWarn<
// C++ local classes
def err_reference_to_local_var_in_enclosing_function : Error<
- "reference to local variable %0 declared in enclosed function %1">;
+ "reference to local variable %0 declared in enclosing function %1">;
+def err_reference_to_local_var_in_enclosing_block : Error<
+ "reference to local variable %0 declared in enclosing block literal">;
+def err_reference_to_local_var_in_enclosing_lambda : Error<
+ "reference to local variable %0 declared in enclosing lambda expression">;
+def err_reference_to_local_var_in_enclosing_context : Error<
+ "reference to local variable %0 declared in enclosing context">;
+
def note_local_variable_declared_here : Note<
"%0 declared here">;
def err_static_data_member_not_allowed_in_local_class : Error<
@@ -4283,14 +4794,6 @@ def err_memptr_conv_via_virtual : Error<
"conversion from pointer to member of class %0 to pointer to member "
"of class %1 via virtual base %2 is not allowed">;
-// C++ access control
-def err_conv_to_inaccessible_base : Error<
- "conversion from %0 to inaccessible base class %1">, AccessControl;
-def note_inheritance_specifier_here : Note<
- "'%0' inheritance specifier here">;
-def note_inheritance_implicitly_private_here : Note<
- "inheritance is implicitly 'private'">;
-
// C++ member name lookup
def err_ambiguous_member_multiple_subobjects : Error<
"non-static member %0 found in multiple base-class subobjects of type %1:%2">;
@@ -4353,16 +4856,18 @@ def err_operator_delete_param_type : Error<
// C++ literal operators
def err_literal_operator_outside_namespace : Error<
"literal operator %0 must be in a namespace or global scope">;
+def err_literal_operator_default_argument : Error<
+ "literal operator cannot have a default argument">;
// FIXME: This diagnostic sucks
def err_literal_operator_params : Error<
"parameter declaration for literal operator %0 is not valid">;
-def warn_user_literal_hexfloat : Warning<
- "user-defined literal with suffix '%0' is preempted by C99 hexfloat "
- "extension">, InGroup<UserDefinedLiterals>;
+def err_literal_operator_extern_c : Error<
+ "literal operator must have C++ linkage">;
def warn_user_literal_reserved : Warning<
- "user-defined literals not starting with '_' are reserved by the "
- "implementation">, InGroup<UserDefinedLiterals>;
-
+ "user-defined literal suffixes not starting with '_' are reserved; "
+ "no literal will invoke this operator">,
+ InGroup<UserDefinedLiterals>;
+
// C++ conversion functions
def err_conv_function_not_member : Error<
"conversion function must be a non-static member function">;
@@ -4391,8 +4896,11 @@ def warn_not_compound_assign : Warning<
"use of unary operator that may be intended as compound assignment (%0=)">;
// C++11 explicit conversion operators
-def warn_explicit_conversion_functions : Warning<
+def ext_explicit_conversion_functions : ExtWarn<
"explicit conversion functions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_explicit_conversion_functions : Warning<
+ "explicit conversion functions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
// C++11 defaulted functions
def err_defaulted_default_ctor_params : Error<
@@ -4454,6 +4962,9 @@ def err_incorrect_defaulted_exception_spec : Error<
"copy constructor|move constructor|copy assignment operator|move assignment "
"operator|destructor}0 does not match the "
"calculated one">;
+def err_incorrect_defaulted_constexpr : Error<
+ "defaulted definition of %select{default constructor|copy constructor|"
+ "move constructor}0 is not constexpr">;
def err_out_of_line_default_deletes : Error<
"defaulting this %select{default constructor|copy constructor|move "
"constructor|copy assignment operator|move assignment operator|destructor}0 "
@@ -4467,10 +4978,10 @@ def warn_ptr_arith_exceeds_bounds : Warning<
"contains %1 element%s2)">,
InGroup<DiagGroup<"array-bounds-pointer-arithmetic">>, DefaultIgnore;
def warn_array_index_precedes_bounds : Warning<
- "array index of '%0' indexes before the beginning of the array">,
+ "array index %0 is before the beginning of the array">,
InGroup<DiagGroup<"array-bounds">>;
def warn_array_index_exceeds_bounds : Warning<
- "array index of '%0' indexes past the end of an array (that contains %1 "
+ "array index %0 is past the end of the array (which contains %1 "
"element%s2)">, InGroup<DiagGroup<"array-bounds">>;
def note_array_index_out_of_bounds : Note<
"array %0 declared here">;
@@ -4492,7 +5003,7 @@ def warn_scanf_nonzero_width : Warning<
"zero field width in scanf format string is unused">,
InGroup<Format>;
def warn_printf_conversion_argument_type_mismatch : Warning<
- "conversion specifies type %0 but the argument has type %1">,
+ "format specifies type %0 but the argument has type %1">,
InGroup<Format>;
def warn_printf_positional_arg_exceeds_data_args : Warning <
"data argument position '%0' exceeds the number of data arguments (%1)">,
@@ -4506,9 +5017,11 @@ def warn_format_invalid_positional_specifier : Warning<
def warn_format_mix_positional_nonpositional_args : Warning<
"cannot mix positional and non-positional arguments in format string">,
InGroup<Format>;
-def warn_null_arg : Warning<
- "null passed to a callee which requires a non-null argument">,
- InGroup<NonNull>;
+def warn_static_array_too_small : Warning<
+ "array argument is too small; contains %0 elements, callee requires at least %1">,
+ InGroup<DiagGroup<"array-bounds">>;
+def note_callee_static_array : Note<
+ "callee declares array parameter as static here">;
def warn_empty_format_string : Warning<
"format string is empty">, InGroup<FormatZeroLength>;
def warn_format_string_is_wide_literal : Warning<
@@ -4529,13 +5042,26 @@ def warn_printf_nonsensical_flag: Warning<
def warn_format_nonsensical_length: Warning<
"length modifier '%0' results in undefined behavior or no effect with '%1' conversion specifier">,
InGroup<Format>;
+def warn_format_non_standard_positional_arg: ExtWarn<
+ "positional arguments are not supported by ISO C">, InGroup<FormatNonStandard>, DefaultIgnore;
+def warn_format_non_standard: ExtWarn<
+ "'%0' %select{length modifier|conversion specifier}1 is not supported by ISO C">,
+ InGroup<FormatNonStandard>, DefaultIgnore;
+def warn_format_non_standard_conversion_spec: ExtWarn<
+ "using length modifier '%0' with conversion specifier '%1' is not supported by ISO C">,
+ InGroup<FormatNonStandard>, DefaultIgnore;
def warn_printf_ignored_flag: Warning<
"flag '%0' is ignored when flag '%1' is present">,
InGroup<Format>;
def warn_scanf_scanlist_incomplete : Warning<
"no closing ']' for '%%[' in scanf format string">,
InGroup<Format>;
-
+def note_format_string_defined : Note<"format string is defined here">;
+
+def warn_null_arg : Warning<
+ "null passed to a callee which requires a non-null argument">,
+ InGroup<NonNull>;
+
// CHECK: returning address/reference of stack memory
def warn_ret_stack_addr : Warning<
"address of stack memory associated with local variable %0 returned">,
@@ -4603,9 +5129,6 @@ def err_generic_sel_multi_match : Error<
// Blocks
def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
" or pick a deployment target that supports them">;
-def err_expected_block_lbrace : Error<"expected '{' in block literal">;
-def err_return_in_block_expression : Error<
- "return not allowed in block expression literal">;
def err_block_returning_array_function : Error<
"block cannot return %select{array|function}0 type %1">;
@@ -4639,20 +5162,37 @@ def err_duplicate_case : Error<"duplicate case value '%0'">;
def warn_case_empty_range : Warning<"empty case range specified">;
def warn_missing_case_for_condition :
Warning<"no case matching constant switch condition '%0'">;
+
+def warn_def_missing_case1 : Warning<
+ "enumeration value %0 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_case2 : Warning<
+ "enumeration values %0 and %1 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_case3 : Warning<
+ "enumeration values %0, %1, and %2 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_cases : Warning<
+ "%0 enumeration values not explicitly handled in switch: %1, %2, %3...">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+
def warn_missing_case1 : Warning<"enumeration value %0 not handled in switch">,
- InGroup<DiagGroup<"switch-enum"> >;
+ InGroup<Switch>;
def warn_missing_case2 : Warning<
"enumeration values %0 and %1 not handled in switch">,
- InGroup<DiagGroup<"switch-enum"> >;
+ InGroup<Switch>;
def warn_missing_case3 : Warning<
"enumeration values %0, %1, and %2 not handled in switch">,
- InGroup<DiagGroup<"switch-enum"> >;
+ InGroup<Switch>;
def warn_missing_cases : Warning<
"%0 enumeration values not handled in switch: %1, %2, %3...">,
- InGroup<DiagGroup<"switch-enum"> >;
+ InGroup<Switch>;
+def warn_unreachable_default : Warning<
+ "default label in switch which covers all enumeration values">,
+ InGroup<CoveredSwitchDefault>, DefaultIgnore;
def warn_not_in_enum : Warning<"case value not in enumerated type %0">,
- InGroup<DiagGroup<"switch-enum"> >;
+ InGroup<Switch>;
def err_typecheck_statement_requires_scalar : Error<
"statement requires expression of scalar type (%0 invalid)">;
def err_typecheck_statement_requires_integer : Error<
@@ -4668,8 +5208,20 @@ def err_switch_explicit_conversion : Error<
"switch condition type %0 requires explicit conversion to %1">;
def err_switch_incomplete_class_type : Error<
"switch condition has incomplete class type %0">;
+
def warn_empty_if_body : Warning<
"if statement has empty body">, InGroup<EmptyBody>;
+def warn_empty_for_body : Warning<
+ "for loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_range_based_for_body : Warning<
+ "range-based for loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_while_body : Warning<
+ "while loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_switch_body : Warning<
+ "switch statement has empty body">, InGroup<EmptyBody>;
+def note_empty_body_on_separate_line : Note<
+ "put the semicolon on a separate line to silence this warning">,
+ InGroup<EmptyBody>;
def err_va_start_used_in_non_variadic_function : Error<
"'va_start' used in function with fixed args">;
@@ -4702,7 +5254,10 @@ def ext_return_has_expr : ExtWarn<
"should not return a value">,
DefaultError, InGroup<ReturnType>;
def ext_return_has_void_expr : Extension<
- "void %select{function|method}1 %0 should not return void expression">;
+ "void %select{function|method|block}1 %0 should not return void expression">;
+def err_return_init_list : Error<
+ "%select{void function|void method|constructor|destructor}1 %0 "
+ "must not return a value">;
def warn_noreturn_function_has_return_expr : Warning<
"function %0 declared 'noreturn' should not return">,
InGroup<DiagGroup<"invalid-noreturn">>;
@@ -4775,14 +5330,14 @@ def err_decimal_unsupported : Error<
"GNU decimal type extension not supported">;
def err_missing_type_specifier : Error<
"C++ requires a type specifier for all declarations">;
-def err_missing_param_declspec : Error<
- "parameter requires a declaration specifier">;
def err_objc_array_of_interfaces : Error<
"array of interface %0 is invalid (probably should be an array of pointers)">;
def ext_c99_array_usage : Extension<
- "use of C99-specific array features, accepted as an extension">;
+ "%select{qualifier in |static |}0array size %select{||'[*] '}0is a C99 "
+ "feature">, InGroup<C99>;
def err_c99_array_usage_cxx : Error<
- "C99-specific array features are not permitted in C++">;
+ "%select{qualifier in |static |}0array size %select{||'[*] '}0is a C99 "
+ "feature, not permitted in C++">;
def err_double_requires_fp64 : Error<
"use of type 'double' requires cl_khr_fp64 extension to be enabled">;
def err_nsconsumed_attribute_mismatch : Error<
@@ -4809,7 +5364,8 @@ def error_protected_ivar_access : Error<"instance variable %0 is protected">,
AccessControl;
def warn_maynot_respond : Warning<"%0 may not respond to %1">;
def warn_attribute_method_def : Warning<
- "method attribute can only be specified on method declarations">;
+ "attributes on method implementation and its declaration must match">,
+ InGroup<DiagGroup<"mismatched-method-attributes">>;
def ext_typecheck_base_super : Warning<
"method parameter type %0 does not match "
"super class method parameter type %1">, InGroup<SuperSubClassMismatch>, DefaultIgnore;
@@ -4916,8 +5472,6 @@ def note_related_result_type_inferred : Note<
}
let CategoryName = "Modules Issue" in {
-def err_module_private_follows_public : Error<
- "__module_private__ declaration of %0 follows public declaration">;
def err_module_private_specialization : Error<
"%select{template|partial|member}0 specialization cannot be "
"declared __module_private__">;
@@ -4927,6 +5481,8 @@ def err_module_private_local : Error<
def err_module_private_local_class : Error<
"local %select{struct|union|class|enum}0 cannot be declared "
"__module_private__">;
+def err_module_private_definition : Error<
+ "definition of %0 must be imported before it is required">;
}
} // end of sema component.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
new file mode 100644
index 0000000..7f9fe26
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -0,0 +1,60 @@
+//==--- DiagnosticSerializationKinds.td - serialization diagnostics -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Serialization" in {
+
+def err_fe_unable_to_read_pch_file : Error<
+ "unable to read PCH file: '%0'">;
+def err_fe_not_a_pch_file : Error<
+ "input is not a PCH file: '%0'">;
+def err_fe_pch_malformed : Error<
+ "malformed or corrupted PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_malformed_block : Error<
+ "malformed block record in PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_error_at_end_block : Error<
+ "error at end of module block in PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_file_modified : Error<
+ "file '%0' has been modified since the precompiled header was built">,
+ DefaultFatal;
+
+def warn_pch_target_triple : Error<
+ "PCH file was compiled for the target '%0' but the current translation "
+ "unit is being compiled for target '%1'">;
+def err_pch_langopt_mismatch : Error<"%0 was %select{disabled|enabled}1 in "
+ "PCH file but is currently %select{disabled|enabled}2">;
+def err_pch_langopt_value_mismatch : Error<
+ "%0 differs in PCH file vs. current file">;
+
+def warn_pch_version_too_old : Error<
+ "PCH file uses an older PCH format that is no longer supported">;
+def warn_pch_version_too_new : Error<
+ "PCH file uses a newer PCH format that cannot be read">;
+def warn_pch_different_branch : Error<
+ "PCH file built from a different branch (%0) than the compiler (%1)">;
+def err_pch_with_compiler_errors : Error<
+ "PCH file contains compiler errors">;
+def warn_cmdline_conflicting_macro_def : Error<
+ "definition of the macro '%0' conflicts with the definition used to "
+ "build the precompiled header">;
+def note_pch_macro_defined_as : Note<
+ "definition of macro '%0' in the precompiled header">;
+def warn_cmdline_missing_macro_defs : Warning<
+ "macro definitions used to build the precompiled header are missing">;
+def note_using_macro_def_from_pch : Note<
+ "using this macro definition from precompiled header">;
+def warn_macro_name_used_in_pch : Error<
+ "definition of macro %0 conflicts with an identifier used in the "
+ "precompiled header">;
+def warn_pch_compiler_options_mismatch : Error<
+ "compiler options used when building the precompiled header differ from "
+ "the options used when using the precompiled header">;
+
+def err_not_a_pch_file : Error<
+ "'%0' does not appear to be a precompiled header file">, DefaultFatal;
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
index 403a59a..c4e6a1c 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
@@ -1,4 +1,4 @@
-//===--- ExpressionTraits.h - C++ Expression Traits Support Enumerations ----*- C++ -*-===//
+//===- ExpressionTraits.h - C++ Expression Traits Support Enums -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
index ea8ed9f..5c7d9eb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -39,7 +39,7 @@ namespace sys { class Path; }
namespace clang {
class FileManager;
class FileSystemStatCache;
-
+
/// DirectoryEntry - Cached information about one directory (either on
/// the disk or in the virtual file system).
///
@@ -64,12 +64,12 @@ class FileEntry {
dev_t Device; // ID for the device containing the file.
ino_t Inode; // Inode number for the file.
mode_t FileMode; // The file mode as returned by 'stat'.
-
+
/// FD - The file descriptor for the file entry if it is opened and owned
/// by the FileEntry. If not, this is set to -1.
mutable int FD;
friend class FileManager;
-
+
public:
FileEntry(dev_t device, ino_t inode, mode_t m)
: Name(0), Device(device), Inode(inode), FileMode(m), FD(-1) {}
@@ -80,7 +80,7 @@ public:
memcpy(this, &FE, sizeof(FE));
assert(FD == -1 && "Cannot copy a file-owning FileEntry");
}
-
+
void operator=(const FileEntry &FE) {
memcpy(this, &FE, sizeof(FE));
assert(FD == -1 && "Cannot assign a file-owning FileEntry");
@@ -110,13 +110,14 @@ public:
/// properties, such as uniquing files based on "inode", so that a file with two
/// names (e.g. symlinked) will be treated as a single file.
///
-class FileManager : public llvm::RefCountedBase<FileManager> {
+class FileManager : public RefCountedBase<FileManager> {
FileSystemOptions FileSystemOpts;
class UniqueDirContainer;
class UniqueFileContainer;
- /// UniqueRealDirs/UniqueRealFiles - Cache for existing real directories/files.
+ /// UniqueRealDirs/UniqueRealFiles - Cache for existing real
+ /// directories/files.
///
UniqueDirContainer &UniqueRealDirs;
UniqueFileContainer &UniqueRealFiles;
@@ -147,7 +148,7 @@ class FileManager : public llvm::RefCountedBase<FileManager> {
unsigned NumDirCacheMisses, NumFileCacheMisses;
// Caching.
- llvm::OwningPtr<FileSystemStatCache> StatCache;
+ OwningPtr<FileSystemStatCache> StatCache;
bool getStatValue(const char *Path, struct stat &StatBuf,
int *FileDescriptor);
@@ -224,7 +225,7 @@ public:
/// file to the corresponding FileEntry pointer.
void GetUniqueIDMapping(
SmallVectorImpl<const FileEntry *> &UIDToFiles) const;
-
+
void PrintStats() const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
index 77828b3..96a2f90 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_FILESYSTEMSTATCACHE_H
#define LLVM_CLANG_FILESYSTEMSTATCACHE_H
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringMap.h"
#include <sys/types.h>
@@ -25,8 +26,9 @@ namespace clang {
/// system calls, which is used by precompiled and pretokenized headers to
/// improve performance.
class FileSystemStatCache {
+ virtual void anchor();
protected:
- llvm::OwningPtr<FileSystemStatCache> NextStatCache;
+ OwningPtr<FileSystemStatCache> NextStatCache;
public:
virtual ~FileSystemStatCache() {}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index 5e48a86..cc0080b 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -20,11 +20,9 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
-#include <cctype>
#include <string>
namespace llvm {
@@ -49,8 +47,6 @@ namespace clang {
/// variable or function name). The preprocessor keeps this information in a
/// set, and all tok::identifier tokens have a pointer to one of these.
class IdentifierInfo {
- // Note: DON'T make TokenID a 'tok::TokenKind'; MSVC will treat it as a
- // signed char and TokenKinds > 255 won't be handled correctly.
unsigned TokenID : 9; // Front-end token ID or tok::identifier.
// Objective-C keyword ('protocol' in '@protocol') or builtin (__builtin_inf).
// First NUM_OBJC_KEYWORDS values are for Objective-C, the remaining values
@@ -62,11 +58,19 @@ class IdentifierInfo {
bool IsPoisoned : 1; // True if identifier is poisoned.
bool IsCPPOperatorKeyword : 1; // True if ident is a C++ operator keyword.
bool NeedsHandleIdentifier : 1; // See "RecomputeNeedsHandleIdentifier".
- bool IsFromAST : 1; // True if identfier first appeared in an AST
- // file and wasn't modified since.
+ bool IsFromAST : 1; // True if identifier was loaded (at least
+ // partially) from an AST file.
+ bool ChangedAfterLoad : 1; // True if identifier has changed from the
+ // definition loaded from an AST file.
bool RevertedTokenID : 1; // True if RevertTokenIDToIdentifier was
// called.
- // 5 bits left in 32-bit word.
+ bool OutOfDate : 1; // True if there may be additional
+ // information about this identifier
+ // stored externally.
+ bool IsModulesImport : 1; // True if this is the 'import' contextual
+ // keyword.
+ // 1 bit left in 32-bit word.
+
void *FETokenInfo; // Managed by the language front-end.
llvm::StringMapEntry<IdentifierInfo*> *Entry;
@@ -132,7 +136,6 @@ public:
NeedsHandleIdentifier = 1;
else
RecomputeNeedsHandleIdentifier();
- IsFromAST = false;
}
/// getTokenID - If this is a source-language token (e.g. 'for'), this API
@@ -221,7 +224,6 @@ public:
NeedsHandleIdentifier = 1;
else
RecomputeNeedsHandleIdentifier();
- IsFromAST = false;
}
/// isPoisoned - Return true if this token has been poisoned.
@@ -253,8 +255,48 @@ public:
/// from an AST file.
bool isFromAST() const { return IsFromAST; }
- void setIsFromAST(bool FromAST = true) { IsFromAST = FromAST; }
+ void setIsFromAST() { IsFromAST = true; }
+ /// \brief Determine whether this identifier has changed since it was loaded
+ /// from an AST file.
+ bool hasChangedSinceDeserialization() const {
+ return ChangedAfterLoad;
+ }
+
+ /// \brief Note that this identifier has changed since it was loaded from
+ /// an AST file.
+ void setChangedSinceDeserialization() {
+ ChangedAfterLoad = true;
+ }
+
+ /// \brief Determine whether the information for this identifier is out of
+ /// date with respect to the external source.
+ bool isOutOfDate() const { return OutOfDate; }
+
+ /// \brief Set whether the information for this identifier is out of
+ /// date with respect to the external source.
+ void setOutOfDate(bool OOD) {
+ OutOfDate = OOD;
+ if (OOD)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// \brief Determine whether this is the contextual keyword
+ /// '__experimental_modules_import'.
+ bool isModulesImport() const { return IsModulesImport; }
+
+ /// \brief Set whether this identifier is the contextual keyword
+ /// '__experimental_modules_import'.
+ void setModulesImport(bool I) {
+ IsModulesImport = I;
+ if (I)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
private:
/// RecomputeNeedsHandleIdentifier - The Preprocessor::HandleIdentifier does
/// several special (but rare) things to identifiers of various sorts. For
@@ -266,8 +308,8 @@ private:
void RecomputeNeedsHandleIdentifier() {
NeedsHandleIdentifier =
(isPoisoned() | hasMacroDefinition() | isCPlusPlusOperatorKeyword() |
- isExtensionToken() | isCXX11CompatKeyword() ||
- (getTokenID() == tok::kw___import_module__));
+ isExtensionToken() | isCXX11CompatKeyword() || isOutOfDate() ||
+ isModulesImport());
}
};
@@ -449,6 +491,10 @@ public:
// Make sure getName() knows how to find the IdentifierInfo
// contents.
II->Entry = &Entry;
+
+ // If this is the 'import' contextual keyword, mark it as such.
+ if (Name.equals("import"))
+ II->setModulesImport(true);
}
return *II;
@@ -662,14 +708,7 @@ public:
/// has been capitalized.
static Selector constructSetterName(IdentifierTable &Idents,
SelectorTable &SelTable,
- const IdentifierInfo *Name) {
- llvm::SmallString<100> SelectorName;
- SelectorName = "set";
- SelectorName += Name->getName();
- SelectorName[3] = toupper(SelectorName[3]);
- IdentifierInfo *SetterName = &Idents.get(SelectorName);
- return SelTable.getUnarySelector(SetterName);
- }
+ const IdentifierInfo *Name);
};
/// DeclarationNameExtra - Common base of the MultiKeywordSelector,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
index 27c459d..813b49e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
@@ -24,9 +24,20 @@ namespace llvm {
class StringRef;
class Twine;
template<typename T> class ArrayRef;
+ template<class T> class OwningPtr;
+ template<unsigned InternalLen> class SmallString;
template<typename T, unsigned N> class SmallVector;
template<typename T> class SmallVectorImpl;
+ template<typename T>
+ struct SaveAndRestore;
+
+ // Reference counting.
+ template <typename T> class IntrusiveRefCntPtr;
+ template <typename T> struct IntrusiveRefCntPtrInfo;
+ template <class Derived> class RefCountedBase;
+ class RefCountedBaseVPTR;
+
class raw_ostream;
// TODO: DenseMap, ...
}
@@ -44,9 +55,18 @@ namespace clang {
using llvm::StringRef;
using llvm::Twine;
using llvm::ArrayRef;
+ using llvm::OwningPtr;
+ using llvm::SmallString;
using llvm::SmallVector;
using llvm::SmallVectorImpl;
-
+ using llvm::SaveAndRestore;
+
+ // Reference counting.
+ using llvm::IntrusiveRefCntPtr;
+ using llvm::IntrusiveRefCntPtrInfo;
+ using llvm::RefCountedBase;
+ using llvm::RefCountedBaseVPTR;
+
using llvm::raw_ostream;
} // end namespace clang.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
new file mode 100644
index 0000000..df50d94
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
@@ -0,0 +1,38 @@
+//===--- Lambda.h - Types for C++ Lambdas -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several types used to describe C++ lambda
+// expressions that are shared between the parser and AST.
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_BASIC_LAMBDA_H
+#define LLVM_CLANG_BASIC_LAMBDA_H
+
+namespace clang {
+
+/// LambdaCaptureDefault - The default, if any, capture method for a
+/// lambda expression.
+enum LambdaCaptureDefault {
+ LCD_None,
+ LCD_ByCopy,
+ LCD_ByRef
+};
+
+/// LambdaCaptureKind - The different capture forms in a lambda
+/// introducer: 'this' or a copied or referenced variable.
+enum LambdaCaptureKind {
+ LCK_This,
+ LCK_ByCopy,
+ LCK_ByRef
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_LAMBDA_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
index c8389ab..786ae12 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
@@ -42,7 +42,7 @@
#endif
LANGOPT(C99 , 1, 0, "C99")
-LANGOPT(C1X , 1, 0, "C1X")
+LANGOPT(C11 , 1, 0, "C11")
LANGOPT(MicrosoftExt , 1, 0, "Microsoft extensions")
LANGOPT(MicrosoftMode , 1, 0, "Microsoft compatibility mode")
LANGOPT(Borland , 1, 0, "Borland extensions")
@@ -91,16 +91,18 @@ LANGOPT(Blocks , 1, 0, "blocks extension to C")
BENIGN_LANGOPT(EmitAllDecls , 1, 0, "support for emitting all declarations")
LANGOPT(MathErrno , 1, 1, "errno support for math functions")
BENIGN_LANGOPT(HeinousExtensions , 1, 0, "Extensions that we really don't like and may be ripped out at any time")
-
+LANGOPT(Modules , 1, 0, "modules extension to C")
LANGOPT(Optimize , 1, 0, "__OPTIMIZE__ predefined macro")
LANGOPT(OptimizeSize , 1, 0, "__OPTIMIZE_SIZE__ predefined macro")
LANGOPT(Static , 1, 0, "__STATIC__ predefined macro (as opposed to __DYNAMIC__)")
VALUE_LANGOPT(PackStruct , 32, 0,
"default struct packing maximum alignment")
-VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
+VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
+VALUE_LANGOPT(PIELevel , 2, 0, "__PIE__ level")
LANGOPT(GNUInline , 1, 0, "GNU inline semantics")
-LANGOPT(NoInline , 1, 0, "__NO_INLINE__ predefined macro")
+LANGOPT(NoInlineDefine , 1, 0, "__NO_INLINE__ predefined macro")
LANGOPT(Deprecated , 1, 0, "__DEPRECATED predefined macro")
+LANGOPT(FastMath , 1, 0, "__FAST_MATH__ predefined macro")
BENIGN_LANGOPT(ObjCGCBitmapPrint , 1, 0, "printing of GC's bitmap layout for __weak/__strong ivars")
@@ -117,17 +119,23 @@ LANGOPT(AssumeSaneOperatorNew , 1, 1, "implicit __attribute__((malloc)) for C++'
BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
BENIGN_LANGOPT(CatchUndefined , 1, 0, "catching undefined behavior at run time")
BENIGN_LANGOPT(DumpRecordLayouts , 1, 0, "dumping the layout of IRgen'd records")
+BENIGN_LANGOPT(DumpRecordLayoutsSimple , 1, 0, "dumping the layout of IRgen'd records in a simple form")
BENIGN_LANGOPT(DumpVTableLayouts , 1, 0, "dumping the layouts of emitted vtables")
LANGOPT(NoConstantCFStrings , 1, 0, "no constant CoreFoundation strings")
BENIGN_LANGOPT(InlineVisibilityHidden , 1, 0, "hidden default visibility for inline C++ methods")
BENIGN_LANGOPT(ParseUnknownAnytype, 1, 0, "__unknown_anytype")
BENIGN_LANGOPT(DebuggerSupport , 1, 0, "debugger support")
+BENIGN_LANGOPT(DebuggerCastResultToId, 1, 0, "for 'po' in the debugger, cast the result to id if it is of unknown type")
+BENIGN_LANGOPT(DebuggerObjCLiteral , 1, 0, "debugger objective-C literals and subscripting support")
+BENIGN_LANGOPT(AddressSanitizer , 1, 0, "AddressSanitizer enabled")
+BENIGN_LANGOPT(ThreadSanitizer , 1, 0, "ThreadSanitizer enabled")
BENIGN_LANGOPT(SpellChecking , 1, 1, "spell-checking")
LANGOPT(SinglePrecisionConstants , 1, 0, "treating double-precision floating point constants as single precision constants")
LANGOPT(FastRelaxedMath , 1, 0, "OpenCL fast relaxed math")
LANGOPT(DefaultFPContract , 1, 0, "FP_CONTRACT")
LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
+LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
LANGOPT(ObjCRuntimeHasWeak , 1, 0, "__weak support in the ARC runtime")
LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map")
@@ -146,11 +154,15 @@ ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 2, SOB_Undefined,
BENIGN_LANGOPT(InstantiationDepth, 32, 1024,
"maximum template instantiation depth")
+BENIGN_LANGOPT(ConstexprCallDepth, 32, 512,
+ "maximum constexpr call depth")
BENIGN_LANGOPT(NumLargeByValueCopy, 32, 0,
"if non-zero, warn about parameter or return Warn if parameter/return value is larger in bytes than this setting. 0 is no check.")
VALUE_LANGOPT(MSCVersion, 32, 0,
"version of Microsoft Visual C/C++")
+LANGOPT(ApplePragmaPack, 1, 0, "Apple gcc-compatible #pragma pack handling")
+
#undef LANGOPT
#undef VALUE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
index 688047f..ce4ff06 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
@@ -15,13 +15,33 @@
#define LLVM_CLANG_LANGOPTIONS_H
#include <string>
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/Visibility.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
namespace clang {
+/// Bitfields of LangOptions, split out from LangOptions in order to ensure that
+/// this large collection of bitfields is a trivial class type.
+class LangOptionsBase {
+public:
+ // Define simple language options (with no accessors).
+#define LANGOPT(Name, Bits, Default, Description) unsigned Name : Bits;
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+protected:
+ // Define language options of enumeration type. These are private, and will
+ // have accessors (below).
+#define LANGOPT(Name, Bits, Default, Description)
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ unsigned Name : Bits;
+#include "clang/Basic/LangOptions.def"
+};
+
/// LangOptions - This class keeps track of the various options that can be
/// enabled, which controls the dialect of C that is accepted.
-class LangOptions {
+class LangOptions : public RefCountedBase<LangOptions>, public LangOptionsBase {
public:
typedef clang::Visibility Visibility;
@@ -34,19 +54,6 @@ public:
SOB_Trapping // -ftrapv
};
- // Define simple language options (with no accessors).
-#define LANGOPT(Name, Bits, Default, Description) unsigned Name : Bits;
-#define ENUM_LANGOPT(Name, Type, Bits, Default, Description)
-#include "clang/Basic/LangOptions.def"
-
-private:
- // Define language options of enumeration type. These are private, and will
- // have accessors (below).
-#define LANGOPT(Name, Bits, Default, Description)
-#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- unsigned Name : Bits;
-#include "clang/Basic/LangOptions.def"
-
public:
std::string ObjCConstantStringClass;
@@ -54,6 +61,9 @@ public:
/// If none is specified, abort (GCC-compatible behaviour).
std::string OverflowHandler;
+ /// \brief The name of the current module.
+ std::string CurrentModule;
+
LangOptions();
// Define accessors/mutators for language options of enumeration type.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
index 01b6c79..09a5a0b 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
@@ -29,7 +29,7 @@ enum Linkage {
InternalLinkage,
/// \brief External linkage within a unique namespace. From the
- /// langauge perspective, these entities have external
+ /// language perspective, these entities have external
/// linkage. However, since they reside in an anonymous namespace,
/// their names are unique to this translation unit, which is
/// equivalent to having internal linkage from the code-generation
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Module.h b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
new file mode 100644
index 0000000..82dbd5b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
@@ -0,0 +1,284 @@
+//===--- Module.h - Describe a module ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module in the source
+// code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_MODULE_H
+#define LLVM_CLANG_BASIC_MODULE_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class raw_ostream;
+}
+
+namespace clang {
+
+class DirectoryEntry;
+class FileEntry;
+class LangOptions;
+class TargetInfo;
+
+/// \brief Describes the name of a module.
+typedef llvm::SmallVector<std::pair<std::string, SourceLocation>, 2>
+ ModuleId;
+
+/// \brief Describes a module or submodule.
+class Module {
+public:
+ /// \brief The name of this module.
+ std::string Name;
+
+ /// \brief The location of the module definition.
+ SourceLocation DefinitionLoc;
+
+ /// \brief The parent of this module. This will be NULL for the top-level
+ /// module.
+ Module *Parent;
+
+ /// \brief The umbrella header or directory.
+ llvm::PointerUnion<const DirectoryEntry *, const FileEntry *> Umbrella;
+
+private:
+ /// \brief The submodules of this module, indexed by name.
+ std::vector<Module *> SubModules;
+
+ /// \brief A mapping from the submodule name to the index into the
+ /// \c SubModules vector at which that submodule resides.
+ llvm::StringMap<unsigned> SubModuleIndex;
+
+public:
+ /// \brief The headers that are part of this module.
+ llvm::SmallVector<const FileEntry *, 2> Headers;
+
+ /// \brief The set of language features required to use this module.
+ ///
+ /// If any of these features is not present, the \c IsAvailable bit
+ /// will be false to indicate that this (sub)module is not
+ /// available.
+ llvm::SmallVector<std::string, 2> Requires;
+
+ /// \brief Whether this module is available in the current
+ /// translation unit.
+ unsigned IsAvailable : 1;
+
+ /// \brief Whether this module was loaded from a module file.
+ unsigned IsFromModuleFile : 1;
+
+ /// \brief Whether this is a framework module.
+ unsigned IsFramework : 1;
+
+ /// \brief Whether this is an explicit submodule.
+ unsigned IsExplicit : 1;
+
+ /// \brief Whether this is a "system" module (which assumes that all
+ /// headers in it are system headers).
+ unsigned IsSystem : 1;
+
+ /// \brief Whether we should infer submodules for this module based on
+ /// the headers.
+ ///
+ /// Submodules can only be inferred for modules with an umbrella header.
+ unsigned InferSubmodules : 1;
+
+ /// \brief Whether, when inferring submodules, the inferred submodules
+ /// should be explicit.
+ unsigned InferExplicitSubmodules : 1;
+
+ /// \brief Whether, when inferring submodules, the inferr submodules should
+ /// export all modules they import (e.g., the equivalent of "export *").
+ unsigned InferExportWildcard : 1;
+
+ /// \brief Describes the visibility of the various names within a
+ /// particular module.
+ enum NameVisibilityKind {
+ /// \brief All of the names in this module are hidden.
+ ///
+ Hidden,
+ /// \brief Only the macro names in this module are visible.
+ MacrosVisible,
+ /// \brief All of the names in this module are visible.
+ AllVisible
+ };
+
+ ///\ brief The visibility of names within this particular module.
+ NameVisibilityKind NameVisibility;
+
+ /// \brief The location of the inferred submodule.
+ SourceLocation InferredSubmoduleLoc;
+
+ /// \brief The set of modules imported by this module, and on which this
+ /// module depends.
+ llvm::SmallVector<Module *, 2> Imports;
+
+ /// \brief Describes an exported module.
+ ///
+ /// The pointer is the module being re-exported, while the bit will be true
+ /// to indicate that this is a wildcard export.
+ typedef llvm::PointerIntPair<Module *, 1, bool> ExportDecl;
+
+ /// \brief The set of export declarations.
+ llvm::SmallVector<ExportDecl, 2> Exports;
+
+ /// \brief Describes an exported module that has not yet been resolved
+ /// (perhaps because tASThe module it refers to has not yet been loaded).
+ struct UnresolvedExportDecl {
+ /// \brief The location of the 'export' keyword in the module map file.
+ SourceLocation ExportLoc;
+
+ /// \brief The name of the module.
+ ModuleId Id;
+
+ /// \brief Whether this export declaration ends in a wildcard, indicating
+ /// that all of its submodules should be exported (rather than the named
+ /// module itself).
+ bool Wildcard;
+ };
+
+ /// \brief The set of export declarations that have yet to be resolved.
+ llvm::SmallVector<UnresolvedExportDecl, 2> UnresolvedExports;
+
+ /// \brief Construct a top-level module.
+ explicit Module(StringRef Name, SourceLocation DefinitionLoc,
+ bool IsFramework)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(0), Umbrella(),
+ IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
+ IsExplicit(false), IsSystem(false),
+ InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), NameVisibility(Hidden) { }
+
+ /// \brief Construct a new module or submodule.
+ Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
+ bool IsFramework, bool IsExplicit);
+
+ ~Module();
+
+ /// \brief Determine whether this module is available for use within the
+ /// current translation unit.
+ bool isAvailable() const { return IsAvailable; }
+
+ /// \brief Determine whether this module is available for use within the
+ /// current translation unit.
+ ///
+ /// \param LangOpts The language options used for the current
+ /// translation unit.
+ ///
+ /// \param Target The target options used for the current translation unit.
+ ///
+ /// \param Feature If this module is unavailable, this parameter
+ /// will be set to one of the features that is required for use of
+ /// this module (but is not available).
+ bool isAvailable(const LangOptions &LangOpts,
+ const TargetInfo &Target,
+ StringRef &Feature) const;
+
+ /// \brief Determine whether this module is a submodule.
+ bool isSubModule() const { return Parent != 0; }
+
+ /// \brief Determine whether this module is a submodule of the given other
+ /// module.
+ bool isSubModuleOf(Module *Other) const;
+
+ /// \brief Determine whether this module is a part of a framework,
+ /// either because it is a framework module or because it is a submodule
+ /// of a framework module.
+ bool isPartOfFramework() const {
+ for (const Module *Mod = this; Mod; Mod = Mod->Parent)
+ if (Mod->IsFramework)
+ return true;
+
+ return false;
+ }
+
+ /// \brief Retrieve the full name of this module, including the path from
+ /// its top-level module.
+ std::string getFullModuleName() const;
+
+ /// \brief Retrieve the top-level module for this (sub)module, which may
+ /// be this module.
+ Module *getTopLevelModule() {
+ return const_cast<Module *>(
+ const_cast<const Module *>(this)->getTopLevelModule());
+ }
+
+ /// \brief Retrieve the top-level module for this (sub)module, which may
+ /// be this module.
+ const Module *getTopLevelModule() const;
+
+ /// \brief Retrieve the name of the top-level module.
+ ///
+ StringRef getTopLevelModuleName() const {
+ return getTopLevelModule()->Name;
+ }
+
+ /// \brief Retrieve the directory for which this module serves as the
+ /// umbrella.
+ const DirectoryEntry *getUmbrellaDir() const;
+
+ /// \brief Retrieve the header that serves as the umbrella header for this
+ /// module.
+ const FileEntry *getUmbrellaHeader() const {
+ return Umbrella.dyn_cast<const FileEntry *>();
+ }
+
+ /// \brief Determine whether this module has an umbrella directory that is
+ /// not based on an umbrella header.
+ bool hasUmbrellaDir() const {
+ return Umbrella && Umbrella.is<const DirectoryEntry *>();
+ }
+
+ /// \briaf Add the given feature requirement to the list of features
+ /// required by this module.
+ ///
+ /// \param Feature The feature that is required by this module (and
+ /// its submodules).
+ ///
+ /// \param LangOpts The set of language options that will be used to
+ /// evaluate the availability of this feature.
+ ///
+ /// \param Target The target options that will be used to evaluate the
+ /// availability of this feature.
+ void addRequirement(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target);
+
+ /// \brief Find the submodule with the given name.
+ ///
+ /// \returns The submodule if found, or NULL otherwise.
+ Module *findSubmodule(StringRef Name) const;
+
+ typedef std::vector<Module *>::iterator submodule_iterator;
+ typedef std::vector<Module *>::const_iterator submodule_const_iterator;
+
+ submodule_iterator submodule_begin() { return SubModules.begin(); }
+ submodule_const_iterator submodule_begin() const {return SubModules.begin();}
+ submodule_iterator submodule_end() { return SubModules.end(); }
+ submodule_const_iterator submodule_end() const { return SubModules.end(); }
+
+ /// \brief Print the module map for this module to the given stream.
+ ///
+ void print(llvm::raw_ostream &OS, unsigned Indent = 0) const;
+
+ /// \brief Dump the contents of this module to the given output stream.
+ void dump() const;
+};
+
+} // end namespace clang
+
+
+#endif // LLVM_CLANG_BASIC_MODULE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
index 7328b1a..8028a73 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
@@ -356,7 +356,7 @@ public:
friend bool operator!=(const key_iterator& X, const key_iterator &Y) {
return X.NumEntriesLeft != Y.NumEntriesLeft;
}
-
+
key_iterator& operator++() { // Preincrement
if (!NumItemsInBucketLeft) {
// 'Items' starts with a 16-bit unsigned integer representing the
@@ -421,7 +421,7 @@ public:
bool operator!=(const item_iterator& X) const {
return X.NumEntriesLeft != NumEntriesLeft;
}
-
+
item_iterator& operator++() { // Preincrement
if (!NumItemsInBucketLeft) {
// 'Items' starts with a 16-bit unsigned integer representing the
@@ -449,7 +449,7 @@ public:
LocalPtr += 4; // Skip the hash.
// Determine the length of the key and the data.
- const std::pair<unsigned, unsigned>& L = Info::ReadKeyDataLength(LocalPtr);
+ const std::pair<unsigned, unsigned>& L =Info::ReadKeyDataLength(LocalPtr);
// Read the key.
const internal_key_type& Key =
@@ -458,14 +458,14 @@ public:
InfoObj->ReadData(Key, LocalPtr + L.first, L.second));
}
};
-
+
item_iterator item_begin() {
return item_iterator(Base + 4, getNumEntries(), &InfoObj);
}
item_iterator item_end() { return item_iterator(); }
Info &getInfoObj() { return InfoObj; }
-
+
static OnDiskChainedHashTable* Create(const unsigned char* buckets,
const unsigned char* const base,
const Info &InfoObj = Info()) {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
index c6ca989..007e6a4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
@@ -25,88 +25,90 @@ namespace clang {
class PartialDiagnostic {
public:
+ enum {
+ // The MaxArguments and MaxFixItHints member enum values from
+ // DiagnosticsEngine are private but DiagnosticsEngine declares
+ // PartialDiagnostic a friend. These enum values are redeclared
+ // here so that the nested Storage class below can access them.
+ MaxArguments = DiagnosticsEngine::MaxArguments
+ };
+
struct Storage {
- Storage() : NumDiagArgs(0), NumDiagRanges(0), NumFixItHints(0) { }
+ Storage() : NumDiagArgs(0), NumDiagRanges(0) { }
enum {
- /// MaxArguments - The maximum number of arguments we can hold. We
+ /// MaxArguments - The maximum number of arguments we can hold. We
/// currently only support up to 10 arguments (%0-%9).
/// A single diagnostic with more than that almost certainly has to
/// be simplified anyway.
- MaxArguments = DiagnosticsEngine::MaxArguments
+ MaxArguments = PartialDiagnostic::MaxArguments
};
-
+
/// NumDiagArgs - This contains the number of entries in Arguments.
unsigned char NumDiagArgs;
-
+
/// NumDiagRanges - This is the number of ranges in the DiagRanges array.
unsigned char NumDiagRanges;
- /// \brief The number of code modifications hints in the
- /// FixItHints array.
- unsigned char NumFixItHints;
-
/// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
/// values, with one for each argument. This specifies whether the argument
/// is in DiagArgumentsStr or in DiagArguments.
unsigned char DiagArgumentsKind[MaxArguments];
-
- /// DiagArgumentsVal - The values for the various substitution positions.
- /// This is used when the argument is not an std::string. The specific value
+
+ /// DiagArgumentsVal - The values for the various substitution positions.
+ /// This is used when the argument is not an std::string. The specific value
/// is mangled into an intptr_t and the interpretation depends on exactly
/// what sort of argument kind it is.
intptr_t DiagArgumentsVal[MaxArguments];
-
+
/// \brief The values for the various substitution positions that have
/// string arguments.
std::string DiagArgumentsStr[MaxArguments];
-
+
/// DiagRanges - The list of ranges added to this diagnostic. It currently
/// only support 10 ranges, could easily be extended if needed.
CharSourceRange DiagRanges[10];
-
- enum { MaxFixItHints = DiagnosticsEngine::MaxFixItHints };
-
+
/// FixItHints - If valid, provides a hint with some code
/// to insert, remove, or modify at a particular position.
- FixItHint FixItHints[MaxFixItHints];
+ SmallVector<FixItHint, 6> FixItHints;
};
- /// \brief An allocator for Storage objects, which uses a small cache to
+ /// \brief An allocator for Storage objects, which uses a small cache to
/// objects, used to reduce malloc()/free() traffic for partial diagnostics.
class StorageAllocator {
static const unsigned NumCached = 16;
Storage Cached[NumCached];
Storage *FreeList[NumCached];
unsigned NumFreeListEntries;
-
+
public:
StorageAllocator();
~StorageAllocator();
-
+
/// \brief Allocate new storage.
Storage *Allocate() {
if (NumFreeListEntries == 0)
return new Storage;
-
+
Storage *Result = FreeList[--NumFreeListEntries];
Result->NumDiagArgs = 0;
Result->NumDiagRanges = 0;
- Result->NumFixItHints = 0;
+ Result->FixItHints.clear();
return Result;
}
-
+
/// \brief Free the given storage object.
void Deallocate(Storage *S) {
if (S >= Cached && S <= Cached + NumCached) {
FreeList[NumFreeListEntries++] = S;
return;
}
-
+
delete S;
}
};
-
+
private:
// NOTE: Sema assumes that PartialDiagnostic is location-invariant
// in the sense that its bits can be safely memcpy'ed and destructed
@@ -114,18 +116,18 @@ private:
/// DiagID - The diagnostic ID.
mutable unsigned DiagID;
-
+
/// DiagStorage - Storage for args and ranges.
mutable Storage *DiagStorage;
/// \brief Allocator used to allocate storage for this diagnostic.
StorageAllocator *Allocator;
-
+
/// \brief Retrieve storage for this particular diagnostic.
Storage *getStorage() const {
if (DiagStorage)
return DiagStorage;
-
+
if (Allocator)
DiagStorage = Allocator->Allocate();
else {
@@ -134,48 +136,53 @@ private:
}
return DiagStorage;
}
-
- void freeStorage() {
+
+ void freeStorage() {
if (!DiagStorage)
return;
-
+
+ // The hot path for PartialDiagnostic is when we just used it to wrap an ID
+ // (typically so we have the flexibility of passing a more complex
+ // diagnostic into the callee, but that does not commonly occur).
+ //
+ // Split this out into a slow function for silly compilers (*cough*) which
+ // can't do decent partial inlining.
+ freeStorageSlow();
+ }
+
+ void freeStorageSlow() {
if (Allocator)
Allocator->Deallocate(DiagStorage);
else if (Allocator != reinterpret_cast<StorageAllocator *>(~uintptr_t(0)))
delete DiagStorage;
DiagStorage = 0;
}
-
+
void AddSourceRange(const CharSourceRange &R) const {
if (!DiagStorage)
DiagStorage = getStorage();
- assert(DiagStorage->NumDiagRanges <
+ assert(DiagStorage->NumDiagRanges <
llvm::array_lengthof(DiagStorage->DiagRanges) &&
"Too many arguments to diagnostic!");
DiagStorage->DiagRanges[DiagStorage->NumDiagRanges++] = R;
- }
+ }
void AddFixItHint(const FixItHint &Hint) const {
if (Hint.isNull())
return;
-
+
if (!DiagStorage)
DiagStorage = getStorage();
- assert(DiagStorage->NumFixItHints < Storage::MaxFixItHints &&
- "Too many code modification hints!");
- if (DiagStorage->NumFixItHints >= Storage::MaxFixItHints)
- return; // Don't crash in release builds
- DiagStorage->FixItHints[DiagStorage->NumFixItHints++]
- = Hint;
+ DiagStorage->FixItHints.push_back(Hint);
}
-
+
public:
PartialDiagnostic(unsigned DiagID, StorageAllocator &Allocator)
: DiagID(DiagID), DiagStorage(0), Allocator(&Allocator) { }
-
- PartialDiagnostic(const PartialDiagnostic &Other)
+
+ PartialDiagnostic(const PartialDiagnostic &Other)
: DiagID(Other.DiagID), DiagStorage(0), Allocator(Other.Allocator)
{
if (Other.DiagStorage) {
@@ -184,14 +191,14 @@ public:
}
}
- PartialDiagnostic(const PartialDiagnostic &Other, Storage *DiagStorage)
- : DiagID(Other.DiagID), DiagStorage(DiagStorage),
+ PartialDiagnostic(const PartialDiagnostic &Other, Storage *DiagStorage)
+ : DiagID(Other.DiagID), DiagStorage(DiagStorage),
Allocator(reinterpret_cast<StorageAllocator *>(~uintptr_t(0)))
{
if (Other.DiagStorage)
*this->DiagStorage = *Other.DiagStorage;
}
-
+
PartialDiagnostic(const Diagnostic &Other, StorageAllocator &Allocator)
: DiagID(Other.getID()), DiagStorage(0), Allocator(&Allocator)
{
@@ -202,22 +209,22 @@ public:
else
AddTaggedVal(Other.getRawArg(I), Other.getArgKind(I));
}
-
+
// Copy source ranges.
for (unsigned I = 0, N = Other.getNumRanges(); I != N; ++I)
AddSourceRange(Other.getRange(I));
-
+
// Copy fix-its.
for (unsigned I = 0, N = Other.getNumFixItHints(); I != N; ++I)
AddFixItHint(Other.getFixItHint(I));
}
-
+
PartialDiagnostic &operator=(const PartialDiagnostic &Other) {
DiagID = Other.DiagID;
if (Other.DiagStorage) {
if (!DiagStorage)
DiagStorage = getStorage();
-
+
*DiagStorage = *Other.DiagStorage;
} else {
freeStorage();
@@ -245,7 +252,7 @@ public:
void AddString(StringRef V) const {
if (!DiagStorage)
DiagStorage = getStorage();
-
+
assert(DiagStorage->NumDiagArgs < Storage::MaxArguments &&
"Too many arguments to diagnostic!");
DiagStorage->DiagArgumentsKind[DiagStorage->NumDiagArgs]
@@ -256,7 +263,7 @@ public:
void Emit(const DiagnosticBuilder &DB) const {
if (!DiagStorage)
return;
-
+
// Add all arguments.
for (unsigned i = 0, e = DiagStorage->NumDiagArgs; i != e; ++i) {
if ((DiagnosticsEngine::ArgumentKind)DiagStorage->DiagArgumentsKind[i]
@@ -266,25 +273,25 @@ public:
DB.AddTaggedVal(DiagStorage->DiagArgumentsVal[i],
(DiagnosticsEngine::ArgumentKind)DiagStorage->DiagArgumentsKind[i]);
}
-
+
// Add all ranges.
for (unsigned i = 0, e = DiagStorage->NumDiagRanges; i != e; ++i)
DB.AddSourceRange(DiagStorage->DiagRanges[i]);
-
+
// Add all fix-its.
- for (unsigned i = 0, e = DiagStorage->NumFixItHints; i != e; ++i)
+ for (unsigned i = 0, e = DiagStorage->FixItHints.size(); i != e; ++i)
DB.AddFixItHint(DiagStorage->FixItHints[i]);
}
-
+
/// \brief Clear out this partial diagnostic, giving it a new diagnostic ID
/// and removing all of its arguments, ranges, and fix-it hints.
void Reset(unsigned DiagID = 0) {
this->DiagID = DiagID;
freeStorage();
}
-
+
bool hasStorage() const { return DiagStorage != 0; }
-
+
friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
unsigned I) {
PD.AddTaggedVal(I, DiagnosticsEngine::ak_uint);
@@ -306,11 +313,11 @@ public:
friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
StringRef S) {
-
+
PD.AddString(S);
return PD;
}
-
+
friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const SourceRange &R) {
PD.AddSourceRange(CharSourceRange::getTokenRange(R));
@@ -322,13 +329,13 @@ public:
PD.AddSourceRange(R);
return PD;
}
-
+
friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const FixItHint &Hint) {
PD.AddFixItHint(Hint);
return PD;
}
-
+
};
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
@@ -336,7 +343,7 @@ inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
PD.Emit(DB);
return DB;
}
-
+
/// \brief A partial diagnostic along with the source location where this
/// diagnostic occurs.
typedef std::pair<SourceLocation, PartialDiagnostic> PartialDiagnosticAt;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
index 154148c..d5fa7e7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -16,6 +16,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/Compiler.h"
#include <utility>
#include <functional>
#include <cassert>
@@ -245,6 +246,7 @@ public:
/// the last token. Return false if the end of this range specifies the last
/// character in the range.
bool isTokenRange() const { return IsTokenRange; }
+ bool isCharRange() const { return !IsTokenRange; }
SourceLocation getBegin() const { return Range.getBegin(); }
SourceLocation getEnd() const { return Range.getEnd(); }
@@ -323,7 +325,7 @@ public:
/// Prints information about this FullSourceLoc to stderr. Useful for
/// debugging.
- void dump() const { SourceLocation::dump(*SrcMgr); }
+ LLVM_ATTRIBUTE_USED void dump() const;
friend inline bool
operator==(const FullSourceLoc &LHS, const FullSourceLoc &RHS) {
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
index 985ddd6..bcb2d56 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
@@ -102,7 +102,39 @@ namespace SrcMgr {
/// NumLines - The number of lines in this ContentCache. This is only valid
/// if SourceLineCache is non-null.
- unsigned NumLines;
+ unsigned NumLines : 31;
+
+ /// \brief Indicates whether the buffer itself was provided to override
+ /// the actual file contents.
+ ///
+ /// When true, the original entry may be a virtual file that does not
+ /// exist.
+ unsigned BufferOverridden : 1;
+
+ ContentCache(const FileEntry *Ent = 0)
+ : Buffer(0, false), OrigEntry(Ent), ContentsEntry(Ent),
+ SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+
+ ContentCache(const FileEntry *Ent, const FileEntry *contentEnt)
+ : Buffer(0, false), OrigEntry(Ent), ContentsEntry(contentEnt),
+ SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+
+ ~ContentCache();
+
+ /// The copy ctor does not allow copies where source object has either
+ /// a non-NULL Buffer or SourceLineCache. Ownership of allocated memory
+ /// is not transferred, so this is a logical error.
+ ContentCache(const ContentCache &RHS)
+ : Buffer(0, false), SourceLineCache(0), BufferOverridden(false)
+ {
+ OrigEntry = RHS.OrigEntry;
+ ContentsEntry = RHS.ContentsEntry;
+
+ assert (RHS.Buffer.getPointer() == 0 && RHS.SourceLineCache == 0 &&
+ "Passed ContentCache object cannot own a buffer.");
+
+ NumLines = RHS.NumLines;
+ }
/// getBuffer - Returns the memory buffer for the associated content.
///
@@ -159,31 +191,6 @@ namespace SrcMgr {
return (Buffer.getInt() & DoNotFreeFlag) == 0;
}
- ContentCache(const FileEntry *Ent = 0)
- : Buffer(0, false), OrigEntry(Ent), ContentsEntry(Ent),
- SourceLineCache(0), NumLines(0) {}
-
- ContentCache(const FileEntry *Ent, const FileEntry *contentEnt)
- : Buffer(0, false), OrigEntry(Ent), ContentsEntry(contentEnt),
- SourceLineCache(0), NumLines(0) {}
-
- ~ContentCache();
-
- /// The copy ctor does not allow copies where source object has either
- /// a non-NULL Buffer or SourceLineCache. Ownership of allocated memory
- /// is not transferred, so this is a logical error.
- ContentCache(const ContentCache &RHS)
- : Buffer(0, false), SourceLineCache(0)
- {
- OrigEntry = RHS.OrigEntry;
- ContentsEntry = RHS.ContentsEntry;
-
- assert (RHS.Buffer.getPointer() == 0 && RHS.SourceLineCache == 0 &&
- "Passed ContentCache object cannot own a buffer.");
-
- NumLines = RHS.NumLines;
- }
-
private:
// Disable assignments.
ContentCache &operator=(const ContentCache& RHS);
@@ -293,6 +300,11 @@ namespace SrcMgr {
SourceLocation::getFromRawEncoding(ExpansionLocEnd).isInvalid();
}
+ bool isFunctionMacroExpansion() const {
+ return getExpansionLocStart().isValid() &&
+ getExpansionLocStart() != getExpansionLocEnd();
+ }
+
/// create - Return a ExpansionInfo for an expansion. Start and End specify
/// the expansion range (where the macro is expanded), and SpellingLoc
/// specifies the spelling location (where the characters from the token
@@ -431,8 +443,7 @@ public:
// to determine which came first. This will also take care the case where
// one of the locations points at the inclusion/expansion point of the other
// in which case its FileID will come before the other.
- if (LOffset == ROffset &&
- (LQueryFID != CommonFID || RQueryFID != CommonFID))
+ if (LOffset == ROffset)
return IsLQFIDBeforeRQFID;
return LOffset < ROffset;
@@ -472,7 +483,7 @@ public:
/// the case of a macro expansion, for example, the spelling location indicates
/// where the expanded token came from and the expansion location specifies
/// where it was expanded.
-class SourceManager : public llvm::RefCountedBase<SourceManager> {
+class SourceManager : public RefCountedBase<SourceManager> {
/// \brief DiagnosticsEngine object.
DiagnosticsEngine &Diag;
@@ -508,7 +519,7 @@ class SourceManager : public llvm::RefCountedBase<SourceManager> {
///
/// Negative FileIDs are indexes into this table. To get from ID to an index,
/// use (-ID - 2).
- std::vector<SrcMgr::SLocEntry> LoadedSLocEntryTable;
+ mutable std::vector<SrcMgr::SLocEntry> LoadedSLocEntryTable;
/// \brief The starting offset of the next local SLocEntry.
///
@@ -565,11 +576,13 @@ class SourceManager : public llvm::RefCountedBase<SourceManager> {
// Cache for the "fake" buffer used for error-recovery purposes.
mutable llvm::MemoryBuffer *FakeBufferForRecovery;
+ mutable SrcMgr::ContentCache *FakeContentCacheForRecovery;
+
/// \brief Lazily computed map of macro argument chunks to their expanded
/// source location.
typedef std::map<unsigned, SourceLocation> MacroArgsMap;
- mutable llvm::DenseMap<FileID, MacroArgsMap *> MacroArgsCacheMap;
+ mutable llvm::DenseMap<FileID, MacroArgsMap *> MacroArgsCacheMap;
// SourceManager doesn't support copy construction.
explicit SourceManager(const SourceManager&);
@@ -607,12 +620,19 @@ public:
FileID getMainFileID() const { return MainFileID; }
/// createMainFileID - Create the FileID for the main source file.
- FileID createMainFileID(const FileEntry *SourceFile) {
+ FileID createMainFileID(const FileEntry *SourceFile,
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User) {
assert(MainFileID.isInvalid() && "MainFileID already set!");
- MainFileID = createFileID(SourceFile, SourceLocation(), SrcMgr::C_User);
+ MainFileID = createFileID(SourceFile, SourceLocation(), Kind);
return MainFileID;
}
+ /// \brief Set the file ID for the main source file.
+ void setMainFileID(FileID FID) {
+ assert(MainFileID.isInvalid() && "MainFileID already set!");
+ MainFileID = FID;
+ }
+
/// \brief Set the file ID for the precompiled preamble.
void setPreambleFileID(FileID Preamble) {
assert(PreambleFileID.isInvalid() && "PreambleFileID already set!");
@@ -641,8 +661,9 @@ public:
/// specified memory buffer. This does no caching of the buffer and takes
/// ownership of the MemoryBuffer, so only pass a MemoryBuffer to this once.
FileID createFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer,
- int LoadedID = 0, unsigned LoadedOffset = 0) {
- return createFileID(createMemBufferContentCache(Buffer), SourceLocation(),
+ int LoadedID = 0, unsigned LoadedOffset = 0,
+ SourceLocation IncludeLoc = SourceLocation()) {
+ return createFileID(createMemBufferContentCache(Buffer), IncludeLoc,
SrcMgr::C_User, LoadedID, LoadedOffset);
}
@@ -738,13 +759,19 @@ public:
if (MyInvalid || !Entry.isFile())
return 0;
- return Entry.getFile().getContentCache()->OrigEntry;
+ const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
+ if (!Content)
+ return 0;
+ return Content->OrigEntry;
}
/// Returns the FileEntry record for the provided SLocEntry.
const FileEntry *getFileEntryForSLocEntry(const SrcMgr::SLocEntry &sloc) const
{
- return sloc.getFile().getContentCache()->OrigEntry;
+ const SrcMgr::ContentCache *Content = sloc.getFile().getContentCache();
+ if (!Content)
+ return 0;
+ return Content->OrigEntry;
}
/// getBufferData - Return a StringRef to the source buffer data for the
@@ -755,7 +782,7 @@ public:
StringRef getBufferData(FileID FID, bool *Invalid = 0) const;
/// \brief Get the number of FileIDs (files and macros) that were created
- /// during preprocessing of \arg FID, including it.
+ /// during preprocessing of \p FID, including it.
unsigned getNumCreatedFIDsForFileID(FileID FID) const {
bool Invalid = false;
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
@@ -766,7 +793,7 @@ public:
}
/// \brief Set the number of FileIDs (files and macros) that were created
- /// during preprocessing of \arg FID, including it.
+ /// during preprocessing of \p FID, including it.
void setNumCreatedFIDsForFileID(FileID FID, unsigned NumFIDs) const {
bool Invalid = false;
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
@@ -807,8 +834,20 @@ public:
unsigned FileOffset = Entry.getOffset();
return SourceLocation::getFileLoc(FileOffset);
}
+
+ /// \brief Return the source location corresponding to the last byte of the
+ /// specified file.
+ SourceLocation getLocForEndOfFile(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return SourceLocation();
+
+ unsigned FileOffset = Entry.getOffset();
+ return SourceLocation::getFileLoc(FileOffset + getFileIDSize(FID) - 1);
+ }
- /// \brief Returns the include location if \arg FID is a #include'd file
+ /// \brief Returns the include location if \p FID is a #include'd file
/// otherwise it returns an invalid location.
SourceLocation getIncludeLoc(FileID FID) const {
bool Invalid = false;
@@ -828,7 +867,7 @@ public:
return getExpansionLocSlowCase(Loc);
}
- /// \brief Given \arg Loc, if it is a macro location return the expansion
+ /// \brief Given \p Loc, if it is a macro location return the expansion
/// location or the spelling location, depending on if it comes from a
/// macro argument or not.
SourceLocation getFileLoc(SourceLocation Loc) const {
@@ -868,7 +907,11 @@ public:
/// offset from the start of the buffer of the location.
std::pair<FileID, unsigned> getDecomposedLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
- return std::make_pair(FID, Loc.getOffset()-getSLocEntry(FID).getOffset());
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &E = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
+ return std::make_pair(FID, Loc.getOffset()-E.getOffset());
}
/// getDecomposedExpansionLoc - Decompose the specified location into a raw
@@ -877,7 +920,10 @@ public:
std::pair<FileID, unsigned>
getDecomposedExpansionLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
- const SrcMgr::SLocEntry *E = &getSLocEntry(FID);
+ bool Invalid = false;
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
unsigned Offset = Loc.getOffset()-E->getOffset();
if (Loc.isFileID())
@@ -892,7 +938,10 @@ public:
std::pair<FileID, unsigned>
getDecomposedSpellingLoc(SourceLocation Loc) const {
FileID FID = getFileID(Loc);
- const SrcMgr::SLocEntry *E = &getSLocEntry(FID);
+ bool Invalid = false;
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
unsigned Offset = Loc.getOffset()-E->getOffset();
if (Loc.isFileID())
@@ -914,10 +963,10 @@ public:
/// expanded.
bool isMacroArgExpansion(SourceLocation Loc) const;
- /// \brief Returns true if \arg Loc is inside the [\arg Start, +\arg Length)
+ /// \brief Returns true if \p Loc is inside the [\p Start, +\p Length)
/// chunk of the source location address space.
- /// If it's true and \arg RelativeOffset is non-null, it will be set to the
- /// relative offset of \arg Loc inside the chunk.
+ /// If it's true and \p RelativeOffset is non-null, it will be set to the
+ /// relative offset of \p Loc inside the chunk.
bool isInSLocAddrSpace(SourceLocation Loc,
SourceLocation Start, unsigned Length,
unsigned *RelativeOffset = 0) const {
@@ -938,10 +987,10 @@ public:
return false;
}
- /// \brief Return true if both \arg LHS and \arg RHS are in the local source
- /// location address space or the loaded one. If it's true and
- /// \arg RelativeOffset is non-null, it will be set to the offset of \arg RHS
- /// relative to \arg LHS.
+ /// \brief Return true if both \p LHS and \p RHS are in the local source
+ /// location address space or the loaded one. If it's true and \p
+ /// RelativeOffset is non-null, it will be set to the offset of \p RHS
+ /// relative to \p LHS.
bool isInSameSLocAddrSpace(SourceLocation LHS, SourceLocation RHS,
int *RelativeOffset) const {
unsigned LHSOffs = LHS.getOffset(), RHSOffs = RHS.getOffset();
@@ -1041,12 +1090,17 @@ public:
return getFileCharacteristic(Loc) == SrcMgr::C_ExternCSystem;
}
- /// \brief The size of the SLocEnty that \arg FID represents.
+ /// \brief Returns whether \p Loc is expanded from a macro in a system header.
+ bool isInSystemMacro(SourceLocation loc) {
+ return loc.isMacroID() && isInSystemHeader(getSpellingLoc(loc));
+ }
+
+ /// \brief The size of the SLocEnty that \p FID represents.
unsigned getFileIDSize(FileID FID) const;
- /// \brief Given a specific FileID, returns true if \arg Loc is inside that
- /// FileID chunk and sets relative offset (offset of \arg Loc from beginning
- /// of FileID) to \arg relativeOffset.
+ /// \brief Given a specific FileID, returns true if \p Loc is inside that
+ /// FileID chunk and sets relative offset (offset of \p Loc from beginning
+ /// of FileID) to \p relativeOffset.
bool isInFileID(SourceLocation Loc, FileID FID,
unsigned *RelativeOffset = 0) const {
unsigned Offs = Loc.getOffset();
@@ -1124,12 +1178,12 @@ public:
/// first inclusion.
FileID translateFile(const FileEntry *SourceFile) const;
- /// \brief Get the source location in \arg FID for the given line:col.
- /// Returns null location if \arg FID is not a file SLocEntry.
+ /// \brief Get the source location in \p FID for the given line:col.
+ /// Returns null location if \p FID is not a file SLocEntry.
SourceLocation translateLineCol(FileID FID,
unsigned Line, unsigned Col) const;
- /// \brief If \arg Loc points inside a function macro argument, the returned
+ /// \brief If \p Loc points inside a function macro argument, the returned
/// location will be the macro location in which the argument was expanded.
/// If a macro argument is used multiple times, the expanded location will
/// be at the first expansion of the argument.
@@ -1205,14 +1259,19 @@ public:
unsigned loaded_sloc_entry_size() const { return LoadedSLocEntryTable.size();}
/// \brief Get a loaded SLocEntry. This is exposed for indexing.
- const SrcMgr::SLocEntry &getLoadedSLocEntry(unsigned Index, bool *Invalid=0) const {
+ const SrcMgr::SLocEntry &getLoadedSLocEntry(unsigned Index,
+ bool *Invalid = 0) const {
assert(Index < LoadedSLocEntryTable.size() && "Invalid index");
- if (!SLocEntryLoaded[Index])
- ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2));
- return LoadedSLocEntryTable[Index];
+ if (SLocEntryLoaded[Index])
+ return LoadedSLocEntryTable[Index];
+ return loadSLocEntry(Index, Invalid);
}
const SrcMgr::SLocEntry &getSLocEntry(FileID FID, bool *Invalid = 0) const {
+ if (FID.ID == 0 || FID.ID == -1) {
+ if (Invalid) *Invalid = true;
+ return LocalSLocEntryTable[0];
+ }
return getSLocEntryByID(FID.ID);
}
@@ -1233,18 +1292,32 @@ public:
std::pair<int, unsigned>
AllocateLoadedSLocEntries(unsigned NumSLocEntries, unsigned TotalSize);
- /// \brief Returns true if \arg Loc came from a PCH/Module.
+ /// \brief Returns true if \p Loc came from a PCH/Module.
bool isLoadedSourceLocation(SourceLocation Loc) const {
return Loc.getOffset() >= CurrentLoadedOffset;
}
- /// \brief Returns true if \arg Loc did not come from a PCH/Module.
+ /// \brief Returns true if \p Loc did not come from a PCH/Module.
bool isLocalSourceLocation(SourceLocation Loc) const {
return Loc.getOffset() < NextLocalOffset;
}
+ /// \brief Returns true if \p FID came from a PCH/Module.
+ bool isLoadedFileID(FileID FID) const {
+ assert(FID.ID != -1 && "Using FileID sentinel value");
+ return FID.ID < 0;
+ }
+
+ /// \brief Returns true if \p FID did not come from a PCH/Module.
+ bool isLocalFileID(FileID FID) const {
+ return !isLoadedFileID(FID);
+ }
+
private:
const llvm::MemoryBuffer *getFakeBufferForRecovery() const;
+ const SrcMgr::ContentCache *getFakeContentCacheForRecovery() const;
+
+ const SrcMgr::SLocEntry &loadSLocEntry(unsigned Index, bool *Invalid) const;
/// \brief Get the entry with the given unwrapped FileID.
const SrcMgr::SLocEntry &getSLocEntryByID(int ID) const {
@@ -1254,8 +1327,9 @@ private:
return getLocalSLocEntry(static_cast<unsigned>(ID));
}
- const SrcMgr::SLocEntry &getLoadedSLocEntryByID(int ID) const {
- return getLoadedSLocEntry(static_cast<unsigned>(-ID - 2));
+ const SrcMgr::SLocEntry &getLoadedSLocEntryByID(int ID,
+ bool *Invalid = 0) const {
+ return getLoadedSLocEntry(static_cast<unsigned>(-ID - 2), Invalid);
}
/// createExpansionLoc - Implements the common elements of storing an
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
index 2a95d61..9e71827 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
@@ -40,6 +40,7 @@ namespace clang {
TST_char16, // C++0x char16_t
TST_char32, // C++0x char32_t
TST_int,
+ TST_int128,
TST_half, // OpenCL half, ARM NEON __fp16
TST_float,
TST_double,
@@ -58,7 +59,7 @@ namespace clang {
TST_underlyingType, // __underlying_type for C++0x
TST_auto, // C++0x auto
TST_unknown_anytype, // __unknown_anytype extension
- TST_atomic, // C1X _Atomic
+ TST_atomic, // C11 _Atomic
TST_error // erroneous type
};
@@ -113,7 +114,12 @@ namespace clang {
/// An Objective C property is a logical field of an Objective-C
/// object which is read and written via Objective C method calls.
- OK_ObjCProperty
+ OK_ObjCProperty,
+
+ /// An Objective C array/dictionary subscripting which reads an object
+ /// or writes at the subscripted array/dictionary element via
+ /// Objective C method calls.
+ OK_ObjCSubscript
};
// \brief Describes the kind of template specialization that a
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
index 7b3d776..67d71e4 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -77,6 +77,7 @@ def ImplicitValueInitExpr : DStmt<Expr>;
def ParenListExpr : DStmt<Expr>;
def VAArgExpr : DStmt<Expr>;
def GenericSelectionExpr : DStmt<Expr>;
+def PseudoObjectExpr : DStmt<Expr>;
// Atomic expressions
def AtomicExpr : DStmt<Expr>;
@@ -97,6 +98,7 @@ def CXXReinterpretCastExpr : DStmt<CXXNamedCastExpr>;
def CXXConstCastExpr : DStmt<CXXNamedCastExpr>;
def CXXFunctionalCastExpr : DStmt<ExplicitCastExpr>;
def CXXTypeidExpr : DStmt<Expr>;
+def UserDefinedLiteral : DStmt<CallExpr>;
def CXXBoolLiteralExpr : DStmt<Expr>;
def CXXNullPtrLiteralExpr : DStmt<Expr>;
def CXXThisExpr : DStmt<Expr>;
@@ -106,6 +108,7 @@ def CXXScalarValueInitExpr : DStmt<Expr>;
def CXXNewExpr : DStmt<Expr>;
def CXXDeleteExpr : DStmt<Expr>;
def CXXPseudoDestructorExpr : DStmt<Expr>;
+def TypeTraitExpr : DStmt<Expr>;
def UnaryTypeTraitExpr : DStmt<Expr>;
def BinaryTypeTraitExpr : DStmt<Expr>;
def ArrayTypeTraitExpr : DStmt<Expr>;
@@ -126,9 +129,13 @@ def SizeOfPackExpr : DStmt<Expr>;
def SubstNonTypeTemplateParmExpr : DStmt<Expr>;
def SubstNonTypeTemplateParmPackExpr : DStmt<Expr>;
def MaterializeTemporaryExpr : DStmt<Expr>;
+def LambdaExpr : DStmt<Expr>;
// Obj-C Expressions.
def ObjCStringLiteral : DStmt<Expr>;
+def ObjCNumericLiteral : DStmt<Expr>;
+def ObjCArrayLiteral : DStmt<Expr>;
+def ObjCDictionaryLiteral : DStmt<Expr>;
def ObjCEncodeExpr : DStmt<Expr>;
def ObjCMessageExpr : DStmt<Expr>;
def ObjCSelectorExpr : DStmt<Expr>;
@@ -137,6 +144,8 @@ def ObjCIvarRefExpr : DStmt<Expr>;
def ObjCPropertyRefExpr : DStmt<Expr>;
def ObjCIsaExpr : DStmt<Expr>;
def ObjCIndirectCopyRestoreExpr : DStmt<Expr>;
+def ObjCBoolLiteralExpr : DStmt<Expr>;
+def ObjCSubscriptRefExpr : DStmt<Expr>;
// Obj-C ARC Expressions.
def ObjCBridgedCastExpr : DStmt<ExplicitCastExpr>;
@@ -147,7 +156,6 @@ def CUDAKernelCallExpr : DStmt<CallExpr>;
// Clang Extensions.
def ShuffleVectorExpr : DStmt<Expr>;
def BlockExpr : DStmt<Expr>;
-def BlockDeclRefExpr : DStmt<Expr>;
def OpaqueValueExpr : DStmt<Expr>;
// Microsoft Extensions.
@@ -155,6 +163,7 @@ def CXXUuidofExpr : DStmt<Expr>;
def SEHTryStmt : Stmt;
def SEHExceptStmt : Stmt;
def SEHFinallyStmt : Stmt;
+def MSDependentExistsStmt : Stmt;
// OpenCL Extensions.
def AsTypeExpr : DStmt<Expr>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
index 8bc60ff..7c04bf7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
@@ -56,6 +56,55 @@ namespace clang {
};
}
+ /// NeonTypeFlags - Flags to identify the types for overloaded Neon
+ /// builtins. These must be kept in sync with the flags in
+ /// utils/TableGen/NeonEmitter.h.
+ class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+ public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ EltType getEltType() const { return (EltType)(Flags & EltTypeMask); }
+ bool isPoly() const {
+ EltType ET = getEltType();
+ return ET == Poly8 || ET == Poly16;
+ }
+ bool isUnsigned() const { return (Flags & UnsignedFlag) != 0; }
+ bool isQuad() const { return (Flags & QuadFlag) != 0; }
+ };
+
+ /// Hexagon builtins
+ namespace Hexagon {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsHexagon.def"
+ LastTSBuiltin
+ };
+ }
} // end namespace clang.
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
index a87af2f..bbd376a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -59,11 +59,12 @@ enum TargetCXXABI {
/// TargetInfo - This class exposes information about the current target.
///
-class TargetInfo : public llvm::RefCountedBase<TargetInfo> {
+class TargetInfo : public RefCountedBase<TargetInfo> {
llvm::Triple Triple;
protected:
// Target values set by the ctor of the actual target implementation. Default
// values are specified by the TargetInfo constructor.
+ bool BigEndian;
bool TLSSupported;
bool NoAsmVariants; // True if {|} are normal characters.
unsigned char PointerWidth, PointerAlign;
@@ -76,6 +77,7 @@ protected:
unsigned char LargeArrayMinWidth, LargeArrayAlign;
unsigned char LongWidth, LongAlign;
unsigned char LongLongWidth, LongLongAlign;
+ unsigned char SuitableAlign;
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
const char *DescriptionString;
const char *UserLabelPrefix;
@@ -91,6 +93,7 @@ protected:
unsigned HasAlignMac68kSupport : 1;
unsigned RealTypeUsesObjCFPRet : 3;
+ unsigned ComplexLongDoubleUsesFP2Ret : 1;
// TargetInfo Constructor. Default initializes all fields.
TargetInfo(const std::string &T);
@@ -211,6 +214,10 @@ public:
unsigned getLongLongWidth() const { return LongLongWidth; }
unsigned getLongLongAlign() const { return LongLongAlign; }
+ /// getSuitableAlign - Return the alignment that is suitable for storing any
+ /// object with a fundamental alignment requirement.
+ unsigned getSuitableAlign() const { return SuitableAlign; }
+
/// getWCharWidth/Align - Return the size of 'wchar_t' for this target, in
/// bits.
unsigned getWCharWidth() const { return getTypeWidth(WCharType); }
@@ -249,6 +256,9 @@ public:
return *LongDoubleFormat;
}
+ /// getFloatEvalMethod - Return the value for the C99 FLT_EVAL_METHOD macro.
+ virtual unsigned getFloatEvalMethod() const { return 0; }
+
// getLargeArrayMinWidth/Align - Return the minimum array size that is
// 'large' and its alignment.
unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; }
@@ -327,6 +337,12 @@ public:
return RealTypeUsesObjCFPRet & (1 << T);
}
+ /// \brief Check whether _Complex long double should use the "fp2ret" flavor
+ /// of Obj-C message passing on this target.
+ bool useObjCFP2RetForComplexLongDouble() const {
+ return ComplexLongDoubleUsesFP2Ret;
+ }
+
///===---- Other target property query methods --------------------------===//
/// getTargetDefines - Appends the target-specific #define values for this
@@ -341,6 +357,13 @@ public:
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const = 0;
+ /// isCLZForZeroUndef - The __builtin_clz* and __builtin_ctz* built-in
+ /// functions are specified to have undefined results for zero inputs, but
+ /// on targets that support these operations in a way that provides
+ /// well-defined results for zero without loss of performance, it is a good
+ /// idea to avoid optimizing based on that undef behavior.
+ virtual bool isCLZForZeroUndef() const { return true; }
+
/// getVAListDeclaration - Return the declaration to use for
/// __builtin_va_list, which is target-specific.
virtual const char *getVAListDeclaration() const = 0;
@@ -456,6 +479,19 @@ public:
const unsigned RegNum;
};
+ /// hasProtectedVisibility - Does this target support "protected"
+ /// visibility?
+ ///
+ /// Any target which dynamic libraries will naturally support
+ /// something like "default" (meaning that the symbol is visible
+ /// outside this shared object) and "hidden" (meaning that it isn't)
+ /// visibilities, but "protected" is really an ELF-specific concept
+ /// with wierd semantics designed around the convenience of dynamic
+ /// linker implementations. Which is not to suggest that there's
+ /// consistent target-independent semantics for "default" visibility
+ /// either; the entire thing is pretty badly mangled.
+ virtual bool hasProtectedVisibility() const { return true; }
+
virtual bool useGlobalsForAutomaticVariables() const { return false; }
/// getCFStringSection - Return the section to use for CFString
@@ -551,7 +587,7 @@ public:
///
/// \return - False on error (invalid feature name).
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const {
return false;
}
@@ -565,6 +601,11 @@ public:
virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
}
+ /// \brief Determine whether the given target has the given feature.
+ virtual bool hasFeature(StringRef Feature) const {
+ return false;
+ }
+
// getRegParmMax - Returns maximal number of args passed in registers.
unsigned getRegParmMax() const {
assert(RegParmMax < 7 && "RegParmMax value is larger than AST can handle");
@@ -609,6 +650,8 @@ public:
/// which the program should be compiled.
VersionTuple getPlatformMinVersion() const { return PlatformMinVersion; }
+ bool isBigEndian() const { return BigEndian; }
+
protected:
virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
return PointerWidth;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
index 35a881c..2e4d34d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -90,7 +90,8 @@ PPKEYWORD(assert)
PPKEYWORD(unassert)
// Clang extensions
-PPKEYWORD(__export_macro__)
+PPKEYWORD(__public_macro)
+PPKEYWORD(__private_macro)
//===----------------------------------------------------------------------===//
// Language keywords.
@@ -203,7 +204,7 @@ PUNCTUATOR(greatergreatergreater, ">>>")
// is a keyword in the implementation namespace that should
// always be treated as a keyword
// KEYC99 - This is a keyword introduced to C in C99
-// KEYC1X - This is a keyword introduced to C in C1X
+// KEYC11 - This is a keyword introduced to C in C11
// KEYCXX - This is a C++ keyword, or a C++-specific keyword in the
// implementation namespace
// KEYNOCXX - This is a keyword in every non-C++ dialect.
@@ -257,6 +258,9 @@ KEYWORD(_Generic , KEYALL)
KEYWORD(_Imaginary , KEYALL)
KEYWORD(_Static_assert , KEYALL)
KEYWORD(__func__ , KEYALL)
+KEYWORD(__objc_yes , KEYALL)
+KEYWORD(__objc_no , KEYALL)
+
// C++ 2.11p1: Keywords.
KEYWORD(asm , KEYCXX|KEYGNU)
@@ -328,6 +332,7 @@ KEYWORD(__builtin_types_compatible_p, KEYALL)
KEYWORD(__builtin_va_arg , KEYALL)
KEYWORD(__extension__ , KEYALL)
KEYWORD(__imag , KEYALL)
+KEYWORD(__int128 , KEYALL)
KEYWORD(__label__ , KEYALL)
KEYWORD(__real , KEYALL)
KEYWORD(__thread , KEYALL)
@@ -352,6 +357,7 @@ KEYWORD(__is_class , KEYCXX)
KEYWORD(__is_convertible_to , KEYCXX)
KEYWORD(__is_empty , KEYCXX)
KEYWORD(__is_enum , KEYCXX)
+KEYWORD(__is_final , KEYCXX)
// Tentative name - there's no implementation of std::is_literal_type yet.
KEYWORD(__is_literal , KEYCXX)
// Name for GCC 4.6 compatibility - people have already written libraries using
@@ -363,7 +369,9 @@ KEYWORD(__is_trivial , KEYCXX)
KEYWORD(__is_union , KEYCXX)
// Clang-only C++ Type Traits
+KEYWORD(__is_trivially_constructible, KEYCXX)
KEYWORD(__is_trivially_copyable , KEYCXX)
+KEYWORD(__is_trivially_assignable , KEYCXX)
KEYWORD(__underlying_type , KEYCXX)
// Embarcadero Expression Traits
@@ -403,7 +411,6 @@ KEYWORD(__array_extent , KEYCXX)
// Apple Extension.
KEYWORD(__private_extern__ , KEYALL)
-KEYWORD(__import_module__ , KEYALL)
KEYWORD(__module_private__ , KEYALL)
// Microsoft Extension.
@@ -484,7 +491,6 @@ KEYWORD(__ptr32 , KEYMS)
KEYWORD(__w64 , KEYMS)
KEYWORD(__uuidof , KEYMS | KEYBORLAND)
KEYWORD(__try , KEYMS | KEYBORLAND)
-KEYWORD(__except , KEYMS | KEYBORLAND)
KEYWORD(__finally , KEYMS | KEYBORLAND)
KEYWORD(__leave , KEYMS | KEYBORLAND)
KEYWORD(__int64 , KEYMS)
@@ -501,7 +507,7 @@ ALIAS("_thiscall" , __thiscall , KEYMS)
ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
ALIAS("_inline" , inline , KEYMS)
ALIAS("_declspec" , __declspec , KEYMS)
-ALIAS("__interface" , class , KEYMS)
+ALIAS("__interface" , struct , KEYMS)
// Borland Extensions which should be disabled in strict conformance mode.
ALIAS("_pascal" , __pascal , KEYBORLAND)
@@ -547,6 +553,7 @@ OBJC2_AT_KEYWORD(required)
OBJC2_AT_KEYWORD(optional)
OBJC2_AT_KEYWORD(synthesize)
OBJC2_AT_KEYWORD(dynamic)
+OBJC2_AT_KEYWORD(__experimental_modules_import)
// TODO: What to do about context-sensitive keywords like:
// bycopy/byref/in/inout/oneway/out?
@@ -559,12 +566,24 @@ ANNOTATION(template_id) // annotation for a C++ template-id that names a
// function template specialization (not a type),
// e.g., "std::swap<int>"
ANNOTATION(primary_expr) // annotation for a primary expression
+ANNOTATION(decltype) // annotation for a decltype expression,
+ // e.g., "decltype(foo.bar())"
// Annotation for #pragma unused(...)
// For each argument inside the parentheses the pragma handler will produce
// one 'pragma_unused' annotation token followed by the argument token.
ANNOTATION(pragma_unused)
+// Annotation for #pragma GCC visibility...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_vis)
+
+// Annotation for #pragma pack...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_pack)
+
#undef ANNOTATION
#undef TESTING_KEYWORD
#undef OBJC2_AT_KEYWORD
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
index a7a45bd..721f44f 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
@@ -35,6 +35,7 @@ namespace clang {
UTT_IsConst,
UTT_IsEmpty,
UTT_IsEnum,
+ UTT_IsFinal,
UTT_IsFloatingPoint,
UTT_IsFunction,
UTT_IsFundamental,
@@ -67,7 +68,8 @@ namespace clang {
BTT_IsConvertible,
BTT_IsConvertibleTo,
BTT_IsSame,
- BTT_TypeCompatible
+ BTT_TypeCompatible,
+ BTT_IsTriviallyAssignable
};
/// ArrayTypeTrait - Names for the array type traits.
@@ -82,6 +84,12 @@ namespace clang {
UETT_AlignOf,
UETT_VecStep
};
+
+ /// \brief Names for type traits that operate specifically on types.
+ enum TypeTrait {
+ TT_IsTriviallyConstructible
+ };
+
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.h b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
index 15cdf1f..f3f5b5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Version.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This header defines version macros and version-related utility functions
+// This header defines version macros and version-related utility functions
// for Clang.
//
//===----------------------------------------------------------------------===//
@@ -29,7 +29,8 @@
/// \brief A string that describes the Clang version number, e.g.,
/// "1.0".
#define CLANG_VERSION_STRING \
- CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR,CLANG_VERSION_PATCHLEVEL)
+ CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR, \
+ CLANG_VERSION_PATCHLEVEL)
#else
/// \brief Helper macro for CLANG_VERSION_STRING.
#define CLANG_MAKE_VERSION_STRING(X,Y) CLANG_MAKE_VERSION_STRING2(X.Y)
@@ -41,21 +42,30 @@
#endif
namespace clang {
- /// \brief Retrieves the repository path (e.g., Subversion path) that
+ /// \brief Retrieves the repository path (e.g., Subversion path) that
/// identifies the particular Clang branch, tag, or trunk from which this
/// Clang was built.
std::string getClangRepositoryPath();
-
+
+ /// \brief Retrieves the repository path from which LLVM was built. Supports
+ /// LLVM residing in a separate repository from clang.
+ std::string getLLVMRepositoryPath();
+
/// \brief Retrieves the repository revision number (or identifer) from which
/// this Clang was built.
std::string getClangRevision();
-
+
+ /// \brief Retrieves the repository revision number (or identifer) from which
+ /// LLVM was built. If Clang and LLVM are in the same repository, this returns
+ /// the same string as getClangRevision.
+ std::string getLLVMRevision();
+
/// \brief Retrieves the full repository version that is an amalgamation of
/// the information in getClangRepositoryPath() and getClangRevision().
std::string getClangFullRepositoryVersion();
-
+
/// \brief Retrieves a string representing the complete clang version,
- /// which includes the clang version number, the repository version,
+ /// which includes the clang version number, the repository version,
/// and the vendor tag.
std::string getClangFullVersion();
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
index f1a2f6e..7fa589f 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
@@ -24,7 +24,8 @@ class BackendConsumer;
class CodeGenAction : public ASTFrontendAction {
private:
unsigned Act;
- llvm::OwningPtr<llvm::Module> TheModule;
+ OwningPtr<llvm::Module> TheModule;
+ llvm::Module *LinkModule;
llvm::LLVMContext *VMContext;
bool OwnsVMContext;
@@ -46,6 +47,11 @@ protected:
public:
~CodeGenAction();
+ /// setLinkModule - Set the link module to be used by this action. If a link
+ /// module is not provided, and CodeGenOptions::LinkBitcodeFile is non-empty,
+ /// the action will load it from the specified file.
+ void setLinkModule(llvm::Module *Mod) { LinkModule = Mod; }
+
/// takeModule - Take the generated LLVM module, for use after the action has
/// been run. The result may be null on failure.
llvm::Module *takeModule();
@@ -57,31 +63,37 @@ public:
};
class EmitAssemblyAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitAssemblyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitBCAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitBCAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitLLVMAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitLLVMAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitLLVMOnlyAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitCodeGenOnlyAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext = 0);
};
class EmitObjAction : public CodeGenAction {
+ virtual void anchor();
public:
EmitObjAction(llvm::LLVMContext *_VMContext = 0);
};
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
index 38aba89..ba9d1f9 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
@@ -28,6 +28,7 @@ namespace clang {
class CodeGenOptions;
class CodeGenerator : public ASTConsumer {
+ virtual void anchor();
public:
virtual llvm::Module* GetModule() = 0;
virtual llvm::Module* ReleaseModule() = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Action.h b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
index a33c33b..6e317a0 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
@@ -39,6 +39,7 @@ public:
PreprocessJobClass,
PrecompileJobClass,
AnalyzeJobClass,
+ MigrateJobClass,
CompileJobClass,
AssembleJobClass,
LinkJobClass,
@@ -94,6 +95,7 @@ public:
};
class InputAction : public Action {
+ virtual void anchor();
const Arg &Input;
public:
InputAction(const Arg &_Input, types::ID _Type);
@@ -107,6 +109,7 @@ public:
};
class BindArchAction : public Action {
+ virtual void anchor();
/// The architecture to bind, or 0 if the default architecture
/// should be bound.
const char *ArchName;
@@ -123,6 +126,7 @@ public:
};
class JobAction : public Action {
+ virtual void anchor();
protected:
JobAction(ActionClass Kind, Action *Input, types::ID Type);
JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type);
@@ -136,6 +140,7 @@ public:
};
class PreprocessJobAction : public JobAction {
+ virtual void anchor();
public:
PreprocessJobAction(Action *Input, types::ID OutputType);
@@ -146,6 +151,7 @@ public:
};
class PrecompileJobAction : public JobAction {
+ virtual void anchor();
public:
PrecompileJobAction(Action *Input, types::ID OutputType);
@@ -156,6 +162,7 @@ public:
};
class AnalyzeJobAction : public JobAction {
+ virtual void anchor();
public:
AnalyzeJobAction(Action *Input, types::ID OutputType);
@@ -165,7 +172,19 @@ public:
static bool classof(const AnalyzeJobAction *) { return true; }
};
+class MigrateJobAction : public JobAction {
+ virtual void anchor();
+public:
+ MigrateJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == MigrateJobClass;
+ }
+ static bool classof(const MigrateJobAction *) { return true; }
+};
+
class CompileJobAction : public JobAction {
+ virtual void anchor();
public:
CompileJobAction(Action *Input, types::ID OutputType);
@@ -176,6 +195,7 @@ public:
};
class AssembleJobAction : public JobAction {
+ virtual void anchor();
public:
AssembleJobAction(Action *Input, types::ID OutputType);
@@ -186,6 +206,7 @@ public:
};
class LinkJobAction : public JobAction {
+ virtual void anchor();
public:
LinkJobAction(ActionList &Inputs, types::ID Type);
@@ -196,6 +217,7 @@ public:
};
class LipoJobAction : public JobAction {
+ virtual void anchor();
public:
LipoJobAction(ActionList &Inputs, types::ID Type);
@@ -206,6 +228,7 @@ public:
};
class DsymutilJobAction : public JobAction {
+ virtual void anchor();
public:
DsymutilJobAction(ActionList &Inputs, types::ID Type);
@@ -216,6 +239,7 @@ public:
};
class VerifyJobAction : public JobAction {
+ virtual void anchor();
public:
VerifyJobAction(ActionList &Inputs, types::ID Type);
static bool classof(const Action *A) {
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
index 04faf64..3affb00 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -185,6 +185,8 @@ namespace driver {
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const;
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
OptSpecifier Id3) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4) const;
/// getArgString - Return the input argument string at \arg Index.
virtual const char *getArgString(unsigned Index) const = 0;
@@ -203,9 +205,17 @@ namespace driver {
StringRef Default = "") const;
/// getLastArgValue - Return the value of the last argument as an integer,
+ /// or a default. If Diags is non-null, emits an error if the argument
+ /// is given, but non-integral.
+ int getLastArgIntValue(OptSpecifier Id, int Default,
+ DiagnosticsEngine *Diags = 0) const;
+
+ /// getLastArgValue - Return the value of the last argument as an integer,
/// or a default. Emits an error if the argument is given, but non-integral.
int getLastArgIntValue(OptSpecifier Id, int Default,
- DiagnosticsEngine &Diags) const;
+ DiagnosticsEngine &Diags) const {
+ return getLastArgIntValue(Id, Default, &Diags);
+ }
/// getAllArgValues - Get the values of all instances of the given argument
/// as strings.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
index b1067b7..37ba602 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
@@ -20,6 +20,10 @@ include "OptParser.td"
def triple : Separate<"-triple">,
HelpText<"Specify target triple (e.g. x86_64-pc-linux-gnu)">;
+def target_cpu : Separate<"-target-cpu">,
+ HelpText<"Target a specific cpu type">;
+def target_feature : Separate<"-target-feature">,
+ HelpText<"Target specific attributes">;
//===----------------------------------------------------------------------===//
// Language Options
@@ -80,3 +84,8 @@ def no_exec_stack : Flag<"--noexecstack">,
def fatal_warnings : Flag<"--fatal-warnings">,
HelpText<"Consider warnings as errors">;
+
+def g : Flag<"-g">, HelpText<"Generate source level debug information">;
+
+def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
+ HelpText<"The string to embed in the Dwarf debug flags record.">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index 1a12e98..88009ed 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -80,12 +80,34 @@ def analyzer_viz_egraph_graphviz : Flag<"-analyzer-viz-egraph-graphviz">,
HelpText<"Display exploded graph using GraphViz">;
def analyzer_viz_egraph_ubigraph : Flag<"-analyzer-viz-egraph-ubigraph">,
HelpText<"Display exploded graph using Ubigraph">;
-def analyzer_inline_call : Flag<"-analyzer-inline-call">,
- HelpText<"Experimental transfer function inlining callees when its definition is available.">;
+
+def analyzer_inline_max_stack_depth : Separate<"-analyzer-inline-max-stack-depth">,
+ HelpText<"Bound on stack depth while inlining (4 by default)">;
+def analyzer_inline_max_stack_depth_EQ : Joined<"-analyzer-inline-max-stack-depth=">,
+ Alias<analyzer_inline_max_stack_depth>;
+
+def analyzer_inline_max_function_size : Separate<"-analyzer-inline-max-function-size">,
+ HelpText<"Bound on the number of basic blocks in an inlined function (200 by default)">;
+def analyzer_inline_max_function_size_EQ : Joined<"-analyzer-inline-max-function-size=">,
+ Alias<analyzer_inline_max_function_size>;
+
+def analyzer_ipa : Separate<"-analyzer-ipa">,
+ HelpText<"Specify the inter-procedural analysis mode">;
+def analyzer_ipa_EQ : Joined<"-analyzer-ipa=">, Alias<analyzer_ipa>;
+
+def analyzer_inlining_mode : Separate<"-analyzer-inlining-mode">,
+ HelpText<"Specify the function selection heuristic used during inlining">;
+def analyzer_inlining_mode_EQ : Joined<"-analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
+
+def analyzer_disable_retry_exhausted : Flag<"-analyzer-disable-retry-exhausted">,
+ HelpText<"Do not re-analyze paths leading to exhausted nodes with a different strategy (may decrease code coverage)">;
+
def analyzer_max_nodes : Separate<"-analyzer-max-nodes">,
HelpText<"The maximum number of nodes the analyzer can generate (150000 default, 0 = no limit)">;
def analyzer_max_loop : Separate<"-analyzer-max-loop">,
HelpText<"The maximum number of times the analyzer will go through a loop">;
+def analyzer_stats : Flag<"-analyzer-stats">,
+ HelpText<"Print internal analyzer statistics.">;
def analyzer_checker : Separate<"-analyzer-checker">,
HelpText<"Choose analyzer checkers to enable">;
@@ -101,6 +123,15 @@ def analyzer_checker_help : Flag<"-analyzer-checker-help">,
HelpText<"Display the list of analyzer checkers that are available">;
//===----------------------------------------------------------------------===//
+// Migrator Options
+//===----------------------------------------------------------------------===//
+def migrator_no_nsalloc_error : Flag<"-no-ns-alloc-error">,
+ HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">;
+
+def migrator_no_finalize_removal : Flag<"-no-finalize-removal">,
+ HelpText<"Do not remove finalize method in gc mode">;
+
+//===----------------------------------------------------------------------===//
// CodeGen Options
//===----------------------------------------------------------------------===//
@@ -110,13 +141,21 @@ def disable_llvm_verifier : Flag<"-disable-llvm-verifier">,
HelpText<"Don't run the LLVM IR verifier pass">;
def disable_red_zone : Flag<"-disable-red-zone">,
HelpText<"Do not emit code that uses the red zone.">;
+def fdebug_compilation_dir : Separate<"-fdebug-compilation-dir">,
+ HelpText<"The compilation directory to embed in the debug info.">;
def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
HelpText<"The string to embed in the Dwarf debug flags record.">;
+def faddress_sanitizer: Flag<"-faddress-sanitizer">,
+ HelpText<"Enable AddressSanitizer instrumentation (memory error detection)">;
+def fthread_sanitizer: Flag<"-fthread-sanitizer">,
+ HelpText<"Enable ThreadSanitizer instrumentation (race detection)">;
def fforbid_guard_variables : Flag<"-fforbid-guard-variables">,
HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
def g : Flag<"-g">, HelpText<"Generate source level debug information">;
def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">,
HelpText<"Don't use the cfi directives">;
+def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">,
+ HelpText<"Don't separate directory and filename in .file directives">;
def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
HelpText<"Generate runtime checks for undefined behavior.">;
def flimit_debug_info : Flag<"-flimit-debug-info">,
@@ -127,6 +166,8 @@ def no_implicit_float : Flag<"-no-implicit-float">,
HelpText<"Don't generate implicit floating point instructions (x86-only)">;
def finstrument_functions : Flag<"-finstrument-functions">,
HelpText<"Generate calls to instrument function entry and exit">;
+def fno_limit_debug_info : Flag<"-fno-limit-debug-info">,
+ HelpText<"Do not limit debug information produced to reduce size of debug binary">;
def fno_merge_all_constants : Flag<"-fno-merge-all-constants">,
HelpText<"Disallow merging of constants.">;
def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">,
@@ -137,6 +178,11 @@ def ffunction_sections : Flag<"-ffunction-sections">,
HelpText<"Place each function in its own section (ELF Only)">;
def fdata_sections : Flag<"-fdata-sections">,
HelpText<"Place each data in its own section (ELF Only)">;
+def fstrict_enums : Flag<"-fstrict-enums">,
+ HelpText<"Enable optimizations based on the strict definition of an enum's "
+ "value range.">;
+def ftrap_function_EQ : Joined<"-ftrap-function=">,
+ HelpText<"Issue call to specified function rather than a trap instruction">;
def funroll_loops : Flag<"-funroll-loops">,
HelpText<"Turn on loop unroller">;
def femit_coverage_notes : Flag<"-femit-coverage-notes">,
@@ -158,6 +204,15 @@ def mdebug_pass : Separate<"-mdebug-pass">,
HelpText<"Enable additional debug output">;
def mdisable_fp_elim : Flag<"-mdisable-fp-elim">,
HelpText<"Disable frame pointer elimination optimization">;
+def mdisable_tail_calls : Flag<"-mdisable-tail-calls">,
+ HelpText<"Disable tail call optimization, keeping the call stack accurate">;
+def menable_no_infinities : Flag<"-menable-no-infs">,
+ HelpText<"Allow optimization to assume there are no infinities.">;
+def menable_no_nans : Flag<"-menable-no-nans">,
+ HelpText<"Allow optimization to assume there are no NaNs.">;
+def menable_unsafe_fp_math : Flag<"-menable-unsafe-fp-math">,
+ HelpText<"Allow unsafe floating-point math optimizations which may decrease "
+ "precision">;
def mfloat_abi : Separate<"-mfloat-abi">,
HelpText<"The float ABI to use">;
def mno_global_merge : Flag<"-mno-global-merge">,
@@ -192,6 +247,10 @@ def mms_bitfields : Flag<"-mms-bitfields">,
HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
def mstackrealign : Flag<"-mstackrealign">,
HelpText<"Force realign the stack at entry to every function.">;
+def mstack_alignment : Joined<"-mstack-alignment=">,
+ HelpText<"Set the stack alignment">;
+def mlink_bitcode_file : Separate<"-mlink-bitcode-file">,
+ HelpText<"Link the given bitcode file before performing optimizations.">;
def O : Joined<"-O">, HelpText<"Optimization level">;
def Os : Flag<"-Os">, HelpText<"Optimize for size">;
def Oz : Flag<"-Oz">, HelpText<"Optimize for size, regardless of performance">;
@@ -203,6 +262,8 @@ def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">;
def dependency_file : Separate<"-dependency-file">,
HelpText<"Filename (or -) to write dependency output to">;
+def dependency_dot : Separate<"-dependency-dot">,
+ HelpText<"Filename to write DOT-formatted header dependencies to">;
def sys_header_deps : Flag<"-sys-header-deps">,
HelpText<"Include system headers in dependency output">;
def header_include_file : Separate<"-header-include-file">,
@@ -224,6 +285,9 @@ def dump_build_information : Separate<"-dump-build-information">,
HelpText<"output a dump of some build information to a file">;
def diagnostic_log_file : Separate<"-diagnostic-log-file">,
HelpText<"Filename (or -) to log diagnostics to">;
+def diagnostic_serialized_file : Separate<"-serialize-diagnostic-file">,
+ MetaVarName<"<filename>">,
+ HelpText<"File for serializing diagnostics in a binary format">;
def fno_show_column : Flag<"-fno-show-column">,
HelpText<"Do not include column number on diagnostics">;
def fshow_column : Flag<"-fshow-column">,
@@ -253,8 +317,6 @@ def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-rang
HelpText<"Print source range spans in numeric form">;
def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">,
HelpText<"Print fix-its in machine parseable form">;
-def fdiagnostics_show_name : Flag<"-fdiagnostics-show-name">,
- HelpText<"Print diagnostic name">;
def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">,
HelpText<"Print option name with mappable diagnostics">;
def fdiagnostics_format : Separate<"-fdiagnostics-format">,
@@ -272,6 +334,8 @@ def fmacro_backtrace_limit : Separate<"-fmacro-backtrace-limit">, MetaVarName<"<
HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">;
def ftemplate_backtrace_limit : Separate<"-ftemplate-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">;
+def fconstexpr_backtrace_limit : Separate<"-fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
def fmessage_length : Separate<"-fmessage-length">, MetaVarName<"<N>">,
HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">;
def fcolor_diagnostics : Flag<"-fcolor-diagnostics">,
@@ -364,8 +428,11 @@ def ast_view : Flag<"-ast-view">,
HelpText<"Build ASTs and view them with GraphViz">;
def print_decl_contexts : Flag<"-print-decl-contexts">,
HelpText<"Print DeclContexts and their Decls">;
+def pubnames_dump : Flag<"-pubnames-dump">,
+ HelpText<"Print all of the public (global) names in the source, e.g., the "
+ "names of all global declarations and macros">;
def emit_module : Flag<"-emit-module">,
- HelpText<"Generate pre-compiled module file">;
+ HelpText<"Generate pre-compiled module file from a module map">;
def emit_pth : Flag<"-emit-pth">,
HelpText<"Generate pre-tokenized header file">;
def emit_pch : Flag<"-emit-pch">,
@@ -388,21 +455,28 @@ def rewrite_objc : Flag<"-rewrite-objc">,
HelpText<"Rewrite ObjC into C (code rewriter example)">;
def rewrite_macros : Flag<"-rewrite-macros">,
HelpText<"Expand macros without full preprocessing">;
+def migrate : Flag<"-migrate">,
+ HelpText<"Migrate source code">;
}
+def mt_migrate_directory : Separate<"-mt-migrate-directory">,
+ HelpText<"Directory for temporary files produced during ARC or ObjC migration">;
def arcmt_check : Flag<"-arcmt-check">,
HelpText<"Check for ARC migration issues that need manual handling">;
def arcmt_modify : Flag<"-arcmt-modify">,
HelpText<"Apply modifications to files to conform to ARC">;
def arcmt_migrate : Flag<"-arcmt-migrate">,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-def arcmt_migrate_directory : Separate<"-arcmt-migrate-directory">,
- HelpText<"Directory for temporary files produced during ARC migration">;
def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
HelpText<"Output path for the plist report">;
def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
HelpText<"Emit ARC errors even if the migrator can fix them">;
+def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
+ HelpText<"Enable migration to modern ObjC literals">;
+def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
+ HelpText<"Enable migration to modern ObjC subscripting">;
+
def working_directory : JoinedOrSeparate<"-working-directory">,
HelpText<"Resolve file paths relative to the specified directory">;
def working_directory_EQ : Joined<"-working-directory=">,
@@ -416,14 +490,25 @@ def ftime_report : Flag<"-ftime-report">,
HelpText<"Print the amount of time each phase of compilation takes">;
def fdump_record_layouts : Flag<"-fdump-record-layouts">,
HelpText<"Dump record layout information">;
+def fdump_record_layouts_simple : Flag<"-fdump-record-layouts-simple">,
+ HelpText<"Dump record layout information in a simple form used for testing">;
def fix_what_you_can : Flag<"-fix-what-you-can">,
HelpText<"Apply fix-it advice even in the presence of unfixable errors">;
+def fix_only_warnings : Flag<"-fix-only-warnings">,
+ HelpText<"Apply fix-it advice only for warnings, not errors">;
+def fixit_recompile : Flag<"-fixit-recompile">,
+ HelpText<"Apply fix-it changes and recompile">;
+def fixit_to_temp : Flag<"-fixit-to-temporary">,
+ HelpText<"Apply fix-it changes to temporary files">;
// Generic forwarding to LLVM options. This should only be used for debugging
// and experimental features.
def mllvm : Separate<"-mllvm">,
HelpText<"Additional arguments to forward to LLVM's option processing">;
+def foverride_record_layout_EQ : Joined<"-foverride-record-layout=">,
+ HelpText<"Override record layouts with those in the given file">;
+
//===----------------------------------------------------------------------===//
// Language Options
//===----------------------------------------------------------------------===//
@@ -440,6 +525,10 @@ def fgnu_keywords : Flag<"-fgnu-keywords">,
HelpText<"Allow GNU-extension keywords regardless of language standard">;
def fgnu89_inline : Flag<"-fgnu89-inline">,
HelpText<"Use the gnu89 inline semantics">;
+def fno_inline : Flag<"-fno-inline">,
+ HelpText<"Disable use of the inline keyword">;
+def fno_inline_functions : Flag<"-fno-inline-functions">,
+ HelpText<"Disable automatic function inlining">;
def fno_gnu_keywords : Flag<"-fno-gnu-keywords">,
HelpText<"Disallow GNU-extension keywords regardless of language standard">;
def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">,
@@ -461,6 +550,10 @@ def fcxx_exceptions : Flag<"-fcxx-exceptions">,
HelpText<"Enable C++ exceptions">;
def fsjlj_exceptions : Flag<"-fsjlj-exceptions">,
HelpText<"Use SjLj style exceptions">;
+def ffast_math : Flag<"-ffast-math">,
+ HelpText<"Enable the *frontend*'s 'fast-math' mode. This has no effect on "
+ "optimizations, but provides a preprocessor macro __FAST_MATH__ the "
+ "same as GCC's -ffast-math flag.">;
def ffreestanding : Flag<"-ffreestanding">,
HelpText<"Assert that the compilation takes place in a freestanding environment">;
def fformat_extensions : Flag<"-fformat-extensions">,
@@ -541,6 +634,8 @@ def fwrapv : Flag<"-fwrapv">,
HelpText<"Treat signed integer overflow as two's complement">;
def pic_level : Separate<"-pic-level">,
HelpText<"Value for __PIC__">;
+def pie_level : Separate<"-pie-level">,
+ HelpText<"Value for __PIE__">;
def pthread : Flag<"-pthread">,
HelpText<"Support POSIX threads in generated code">;
def fpack_struct : Separate<"-fpack-struct">,
@@ -555,6 +650,8 @@ def dump_deserialized_pch_decls : Flag<"-dump-deserialized-decls">,
HelpText<"Dump declarations that are deserialized from PCH, for testing">;
def error_on_deserialized_pch_decl : Separate<"-error-on-deserialized-decl">,
HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl_EQ : Joined<"-error-on-deserialized-decl=">,
+ Alias<error_on_deserialized_pch_decl>;
def fshort_wchar : Flag<"-fshort-wchar">,
HelpText<"Force wchar_t to be a short unsigned int">;
def fshort_enums : Flag<"-fshort-enums">,
@@ -569,6 +666,8 @@ def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">,
HelpText<"Give inline C++ member functions default visibility by default">;
def ftemplate_depth : Separate<"-ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">;
+def fconstexpr_depth : Separate<"-fconstexpr-depth">,
+ HelpText<"Maximum depth of recursive constexpr function calls">;
def Wlarge_by_value_copy : Separate<"-Wlarge-by-value-copy">,
HelpText<"Warn if a function definition returns or accepts an object larger "
"in bytes that a given value">;
@@ -595,10 +694,16 @@ def funknown_anytype : Flag<"-funknown-anytype">,
HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
def fdebugger_support : Flag<"-fdebugger-support">,
HelpText<"Enable special debugger support behavior">;
+def fdebugger_cast_result_to_id : Flag<"-fdebugger-cast-result-to-id">,
+ HelpText<"Enable casting unknown expression results to id">;
+def fdebugger_objc_literal : Flag<"-fdebugger-objc-literal">,
+ HelpText<"Enable special debugger support for objective-C subscripting and literals">;
def fdeprecated_macro : Flag<"-fdeprecated-macro">,
HelpText<"Defines the __DEPRECATED macro">;
def fno_deprecated_macro : Flag<"-fno-deprecated-macro">,
HelpText<"Undefines the __DEPRECATED macro">;
+def fapple_pragma_pack : Flag<"-fapple-pragma-pack">,
+ HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
//===----------------------------------------------------------------------===//
// Header Search Options
@@ -613,11 +718,13 @@ def nobuiltininc : Flag<"-nobuiltininc">,
def fmodule_cache_path : Separate<"-fmodule-cache-path">,
MetaVarName<"<directory>">,
HelpText<"Specify the module cache path">;
+def fmodule_name : Joined<"-fmodule-name=">,
+ MetaVarName<"<name>">,
+ HelpText<"Specify the name of the module to build">;
def fdisable_module_hash : Flag<"-fdisable-module-hash">,
HelpText<"Disable the module hash">;
-def fauto_module_import : Flag<"-fauto-module-import">,
- HelpText<"Automatically translate #include/#import into module imports "
- "when possible">;
+def fmodules : Flag<"-fmodules">,
+ HelpText<"Enable the 'modules' language feature">;
def F : JoinedOrSeparate<"-F">, MetaVarName<"<directory>">,
HelpText<"Add directory to framework include search path">;
@@ -639,6 +746,8 @@ def objc_isystem : JoinedOrSeparate<"-objc-isystem">,
def objcxx_isystem : JoinedOrSeparate<"-objcxx-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
+def iframework : JoinedOrSeparate<"-iframework">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to SYSTEM framework search path">;
def isystem : JoinedOrSeparate<"-isystem">, MetaVarName<"<directory>">,
HelpText<"Add directory to SYSTEM include search path">;
def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">,MetaVarName<"<directory>">,
@@ -693,6 +802,8 @@ def undef : Flag<"-undef">, MetaVarName<"<macro>">,
HelpText<"undef all system defines">;
def detailed_preprocessing_record : Flag<"-detailed-preprocessing-record">,
HelpText<"include a detailed record of preprocessing actions">;
+def mqdsp6_compat : Flag<"-mqdsp6-compat">,
+ HelpText<"Enable hexagon-qdsp6 backward compatibility">;
//===----------------------------------------------------------------------===//
// Preprocessed Output Options
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
index 8c99909..fd88c3a 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
@@ -56,6 +56,10 @@ class Compilation {
/// Result files which should be removed on failure.
ArgStringList ResultFiles;
+ /// Result files which are generated correctly on failure, and which should
+ /// only be removed if we crash.
+ ArgStringList FailureResultFiles;
+
/// Redirection for stdout, stderr, etc.
const llvm::sys::Path **Redirects;
@@ -84,6 +88,10 @@ public:
const ArgStringList &getResultFiles() const { return ResultFiles; }
+ const ArgStringList &getFailureResultFiles() const {
+ return FailureResultFiles;
+ }
+
/// getArgsForToolChain - Return the derived argument list for the
/// tool chain \arg TC (or the default tool chain, if TC is not
/// specified).
@@ -106,6 +114,13 @@ public:
return Name;
}
+ /// addFailureResultFile - Add a file to remove if we crash, and returns its
+ /// argument.
+ const char *addFailureResultFile(const char *Name) {
+ FailureResultFiles.push_back(Name);
+ return Name;
+ }
+
/// CleanupFileList - Remove the files in the given list.
///
/// \param IssueErrors - Report failures as errors.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
index 6fdf6fc..0538334 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -16,6 +16,7 @@
#include "clang/Driver/Types.h"
#include "clang/Driver/Util.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Path.h" // FIXME: Kill when CompilationInfo
@@ -35,7 +36,6 @@ namespace driver {
class Command;
class Compilation;
class DerivedArgList;
- class HostInfo;
class InputArgList;
class InputInfo;
class JobAction;
@@ -86,8 +86,8 @@ public:
/// If the standard library is used
bool UseStdLib;
- /// Default host triple.
- std::string DefaultHostTriple;
+ /// Default target triple.
+ std::string DefaultTargetTriple;
/// Default name for linked images (e.g., "a.out").
std::string DefaultImageName;
@@ -95,10 +95,6 @@ public:
/// Driver title to use with help.
std::string DriverTitle;
- /// Host information for the platform the driver is running as. This
- /// will generally be the actual host platform, but not always.
- const HostInfo *Host;
-
/// Information about the host which can be overridden by the user.
std::string HostBits, HostMachine, HostSystem, HostRelease;
@@ -117,7 +113,7 @@ public:
/// Whether the driver should follow g++ like behavior.
unsigned CCCIsCXX : 1;
- /// Whether the driver is just the preprocessor
+ /// Whether the driver is just the preprocessor.
unsigned CCCIsCPP : 1;
/// Echo commands while executing (in -v style).
@@ -175,6 +171,13 @@ private:
std::list<std::string> TempFiles;
std::list<std::string> ResultFiles;
+ /// \brief Cache of all the ToolChains in use by the driver.
+ ///
+ /// This maps from the string representation of a triple to a ToolChain
+ /// created targetting that triple. The driver owns all the ToolChain objects
+ /// stored in it, and will clean them up when torn down.
+ mutable llvm::StringMap<ToolChain *> ToolChains;
+
private:
/// TranslateInputArgs - Create a new derived argument list from the input
/// arguments, after applying the standard argument translations.
@@ -187,7 +190,7 @@ private:
public:
Driver(StringRef _ClangExecutable,
- StringRef _DefaultHostTriple,
+ StringRef _DefaultTargetTriple,
StringRef _DefaultImageName,
bool IsProduction,
DiagnosticsEngine &_Diags);
@@ -379,10 +382,6 @@ public:
/// GCC goes to extra lengths here to be a bit more robust.
std::string GetTemporaryPath(StringRef Prefix, const char *Suffix) const;
- /// GetHostInfo - Construct a new host info object for the given
- /// host triple.
- const HostInfo *GetHostInfo(const char *HostTriple) const;
-
/// ShouldUseClangCompilar - Should the clang compiler be used to
/// handle this action.
bool ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
@@ -390,8 +389,17 @@ public:
bool IsUsingLTO(const ArgList &Args) const;
+private:
+ /// \brief Retrieves a ToolChain for a particular target triple.
+ ///
+ /// Will cache ToolChains for the life of the driver object, and create them
+ /// on-demand.
+ const ToolChain &getToolChain(const ArgList &Args,
+ StringRef DarwinArchName = "") const;
+
/// @}
+public:
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and
/// return the grouped values as integers. Numbers which are not
/// provided are set to 0.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
index 844f918..ea7b52f 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define DRIVERSTART
#include "clang/Basic/DiagnosticDriverKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h b/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
deleted file mode 100644
index 7285a48..0000000
--- a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
+++ /dev/null
@@ -1,94 +0,0 @@
-//===--- HostInfo.h - Host specific information -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_DRIVER_HOSTINFO_H_
-#define CLANG_DRIVER_HOSTINFO_H_
-
-#include "llvm/ADT/Triple.h"
-#include <string>
-
-namespace clang {
-namespace driver {
- class ArgList;
- class Driver;
- class ToolChain;
-
-/// HostInfo - Config information about a particular host which may interact
-/// with driver behavior.
-///
-/// The host information is used for controlling the parts of the driver which
-/// interact with the platform the driver is ostensibly being run from. For
-/// testing purposes, the HostInfo used by the driver may differ from the actual
-/// host.
-class HostInfo {
-protected:
- const Driver &TheDriver;
- const llvm::Triple Triple;
-
- HostInfo(const Driver &D, const llvm::Triple &_Triple);
-
-public:
- virtual ~HostInfo();
-
- const Driver &getDriver() const { return TheDriver; }
-
- const llvm::Triple& getTriple() const { return Triple; }
- std::string getArchName() const { return Triple.getArchName(); }
- std::string getPlatformName() const { return Triple.getVendorName(); }
- std::string getOSName() const { return Triple.getOSName(); }
-
- /// useDriverDriver - Whether the driver should act as a driver driver for
- /// this host and support -arch, -Xarch, etc.
- virtual bool useDriverDriver() const = 0;
-
- /// CreateToolChain - Construct the toolchain to use for this host (which the
- /// host retains ownership of).
- ///
- /// \param Args - The argument list, which may be used to alter the default
- /// toolchain, for example in the presence of -m32 or -m64.
- ///
- /// \param ArchName - The architecture to return a toolchain for, or 0 if
- /// unspecified. This will only ever be non-zero for hosts which support a
- /// driver driver.
-
- // FIXME: Pin down exactly what the HostInfo is allowed to use Args
- // for here. Currently this is for -m32 / -m64 defaulting.
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName=0) const = 0;
-};
-
-const HostInfo *createAuroraUXHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createDarwinHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createOpenBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createFreeBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createNetBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createMinixHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createDragonFlyHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createLinuxHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createTCEHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-const HostInfo *createWindowsHostInfo(const Driver &D,
- const llvm::Triple &Triple);
-const HostInfo *createMinGWHostInfo(const Driver &D,
- const llvm::Triple &Triple);
-const HostInfo *createUnknownHostInfo(const Driver &D,
- const llvm::Triple& Triple);
-
-} // end namespace driver
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Job.h b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
index 367955f..c94886d 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Job.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
@@ -46,6 +46,8 @@ public:
/// Command - An executable path/name and argument vector to
/// execute.
class Command : public Job {
+ virtual void anchor();
+
/// Source - The action which caused the creation of this job.
const Action &Source;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h b/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
index 5516460..094873a 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
@@ -30,6 +30,9 @@ public:
/// True if the runtime supports ARC zeroing __weak.
unsigned HasWeak : 1;
+ /// \brief True if the runtime supports subscripting methods.
+ unsigned HasSubscripting : 1;
+
/// True if the runtime provides the following entrypoint:
/// void objc_terminate(void);
/// If available, this will be called instead of abort() when an
@@ -37,7 +40,7 @@ public:
unsigned HasTerminate : 1;
ObjCRuntime() : RuntimeKind(NeXT), HasARC(false), HasWeak(false),
- HasTerminate(false) {}
+ HasSubscripting(false), HasTerminate(false) {}
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
index abd224d..3af6f8f 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -49,8 +49,8 @@ namespace options {
const char *HelpText;
const char *MetaVar;
unsigned char Kind;
- unsigned short Flags;
unsigned char Param;
+ unsigned short Flags;
unsigned short GroupID;
unsigned short AliasID;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index 5b91c19..0a29bb9 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -21,6 +21,7 @@ include "OptParser.td"
def CompileOnly_Group : OptionGroup<"<CompileOnly group>">;
def I_Group : OptionGroup<"<I group>">, Group<CompileOnly_Group>;
+def L_Group : OptionGroup<"<L group>">, Group<CompileOnly_Group>;
def M_Group : OptionGroup<"<M group>">, Group<CompileOnly_Group>;
def T_Group : OptionGroup<"<T group>">;
def O_Group : OptionGroup<"<O group>">, Group<CompileOnly_Group>;
@@ -101,8 +102,6 @@ def ccc_pch_is_pth : Flag<"-ccc-pch-is-pth">, CCCDriverOpt,
HelpText<"Use pretokenized headers for precompiled headers">;
class CCCDebugOpt : Group<ccc_debug_Group>, Flags<[DriverOption, HelpHidden]>;
-def ccc_host_triple : Separate<"-ccc-host-triple">, CCCDebugOpt,
- HelpText<"Simulate running on the given target">;
def ccc_install_dir : Separate<"-ccc-install-dir">, CCCDebugOpt,
HelpText<"Simulate installation in the given directory">;
def ccc_print_options : Flag<"-ccc-print-options">, CCCDebugOpt,
@@ -120,13 +119,21 @@ def ccc_arrmt_check : Flag<"-ccc-arrmt-check">, Alias<ccc_arcmt_check>;
def ccc_arrmt_modify : Flag<"-ccc-arrmt-modify">, Alias<ccc_arcmt_modify>;
def ccc_arcmt_migrate : Separate<"-ccc-arcmt-migrate">, CCCDriverOpt,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
-def ccc_arcmt_migrate_EQ : Joined<"-ccc-arcmt-migrate=">, CCCDriverOpt,
- Alias<ccc_arcmt_migrate>;
def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
HelpText<"Output path for the plist report">;
def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
HelpText<"Emit ARC errors even if the migrator can fix them">;
+def _migrate : Flag<"--migrate">, Flags<[DriverOption]>,
+ HelpText<"Run the migrator">;
+def ccc_objcmt_migrate : Separate<"-ccc-objcmt-migrate">, CCCDriverOpt,
+ HelpText<"Apply modifications and produces temporary files to migrate to "
+ "modern ObjC syntax">;
+def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
+ HelpText<"Enable migration to modern ObjC literals">;
+def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
+ HelpText<"Enable migration to modern ObjC subscripting">;
+
// Make sure all other -ccc- options are rejected.
def ccc_ : Joined<"-ccc-">, Group<ccc_Group>, Flags<[Unsupported]>;
@@ -145,6 +152,7 @@ def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>;
def E : Flag<"-E">, Flags<[DriverOption]>,
HelpText<"Only run the preprocessor">;
def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined]>;
+def G : Separate<"-G">, Flags<[DriverOption]>;
def H : Flag<"-H">;
def I_ : Flag<"-I-">, Group<I_Group>;
def I : JoinedOrSeparate<"-I">, Group<I_Group>;
@@ -229,7 +237,7 @@ def client__name : JoinedOrSeparate<"-client_name">;
def combine : Flag<"-combine">, Flags<[DriverOption, Unsupported]>;
def compatibility__version : JoinedOrSeparate<"-compatibility_version">;
def coverage : Flag<"-coverage">;
-def cpp_precomp : Flag<"-cpp-precomp">;
+def cpp_precomp : Flag<"-cpp-precomp">, Group<clang_ignored_f_Group>;
def current__version : JoinedOrSeparate<"-current_version">;
def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, Group<clang_i_Group>;
def c : Flag<"-c">, Flags<[DriverOption]>,
@@ -256,11 +264,18 @@ def emit_llvm : Flag<"-emit-llvm">,
def exported__symbols__list : Separate<"-exported_symbols_list">;
def e : JoinedOrSeparate<"-e">;
def fPIC : Flag<"-fPIC">, Group<f_Group>;
-def fPIE : Flag<"-fPIE">, Group<f_Group>, Flags<[NoArgumentUnused]>;
-def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>, Flags<[NoArgumentUnused]>;
+def fno_PIC : Flag<"-fno-PIC">, Group<f_Group>;
+def fPIE : Flag<"-fPIE">, Group<f_Group>;
+def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
def fallow_unsupported : Flag<"-fallow-unsupported">, Group<f_Group>;
+def faltivec : Flag<"-faltivec">, Group<f_Group>;
def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>;
+def fapple_pragma_pack : Flag<"-fapple-pragma-pack">, Group<f_Group>;
+def faddress_sanitizer : Flag<"-faddress-sanitizer">, Group<f_Group>;
+def fno_address_sanitizer : Flag<"-fno-address-sanitizer">, Group<f_Group>;
+def fthread_sanitizer : Flag<"-fthread-sanitizer">, Group<f_Group>;
+def fno_thread_sanitizer : Flag<"-fno-thread-sanitizer">, Group<f_Group>;
def fasm : Flag<"-fasm">, Group<f_Group>;
def fasm_blocks : Flag<"-fasm-blocks">, Group<f_Group>;
@@ -286,21 +301,27 @@ def fcommon : Flag<"-fcommon">, Group<f_Group>;
def fcompile_resource_EQ : Joined<"-fcompile-resource=">, Group<f_Group>;
def fconstant_cfstrings : Flag<"-fconstant-cfstrings">, Group<f_Group>;
def fconstant_string_class_EQ : Joined<"-fconstant-string-class=">, Group<f_Group>;
+def fconstexpr_depth_EQ : Joined<"-fconstexpr-depth=">, Group<f_Group>;
+def fconstexpr_backtrace_limit_EQ : Joined<"-fconstexpr-backtrace-limit=">,
+ Group<f_Group>;
+def fno_crash_diagnostics : Flag<"-fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
def fcreate_profile : Flag<"-fcreate-profile">, Group<f_Group>;
def fcxx_exceptions: Flag<"-fcxx-exceptions">, Group<f_Group>;
+def fcxx_modules : Flag <"-fcxx-modules">, Group<f_Group>, Flags<[NoForward]>;
def fdebug_pass_arguments : Flag<"-fdebug-pass-arguments">, Group<f_Group>;
def fdebug_pass_structure : Flag<"-fdebug-pass-structure">, Group<f_Group>;
def fdiagnostics_fixit_info : Flag<"-fdiagnostics-fixit-info">, Group<f_clang_Group>;
def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">, Group<f_clang_Group>;
def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_clang_Group>;
def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>;
-def fdiagnostics_show_name : Flag<"-fdiagnostics-show-name">, Group<f_Group>;
def fdiagnostics_show_note_include_stack : Flag<"-fdiagnostics-show-note-include-stack">, Group<f_Group>;
def fdiagnostics_format_EQ : Joined<"-fdiagnostics-format=">, Group<f_clang_Group>;
def fdiagnostics_show_category_EQ : Joined<"-fdiagnostics-show-category=">, Group<f_clang_Group>;
def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>;
def fdwarf2_cfi_asm : Flag<"-fdwarf2-cfi-asm">, Group<f_Group>;
def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">, Group<f_Group>;
+def fdwarf_directory_asm : Flag<"-fdwarf-directory-asm">, Group<f_Group>;
+def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">, Group<f_Group>;
def felide_constructors : Flag<"-felide-constructors">, Group<f_Group>;
def feliminate_unused_debug_symbols : Flag<"-feliminate-unused-debug-symbols">, Group<f_Group>;
def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>;
@@ -309,8 +330,34 @@ def ferror_limit_EQ : Joined<"-ferror-limit=">, Group<f_Group>;
def fexceptions : Flag<"-fexceptions">, Group<f_Group>;
def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
def fhosted : Flag<"-fhosted">, Group<f_Group>;
-def ffast_math : Flag<"-ffast-math">, Group<clang_ignored_f_Group>;
-def ffinite_math_only : Flag<"-ffinite-math-only">, Group<clang_ignored_f_Group>;
+def ffast_math : Flag<"-ffast-math">, Group<f_Group>;
+def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>;
+def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
+def fsignaling_math : Flag<"-fsignaling-math">, Group<f_Group>;
+def fno_signaling_math : Flag<"-fno-signaling-math">, Group<f_Group>;
+def funsafe_math_optimizations : Flag<"-funsafe-math-optimizations">,
+ Group<f_Group>;
+def fno_unsafe_math_optimizations : Flag<"-fno-unsafe-math-optimizations">,
+ Group<f_Group>;
+def fassociative_math : Flag<"-fassociative-math">, Group<f_Group>;
+def fno_associative_math : Flag<"-fno-associative-math">, Group<f_Group>;
+def freciprocal_math : Flag<"-freciprocal-math">, Group<f_Group>;
+def fno_reciprocal_math : Flag<"-fno-reciprocal-math">, Group<f_Group>;
+def ffinite_math_only : Flag<"-ffinite-math-only">, Group<f_Group>;
+def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<f_Group>;
+def fsigned_zeros : Flag<"-fsigned-zeros">, Group<f_Group>;
+def fno_signed_zeros : Flag<"-fno-signed-zeros">, Group<f_Group>;
+def fhonor_nans : Flag<"-fhonor-nans">, Group<f_Group>;
+def fno_honor_nans : Flag<"-fno-honor-nans">, Group<f_Group>;
+def fhonor_infinities : Flag<"-fhonor-infinities">, Group<f_Group>;
+def fno_honor_infinities : Flag<"-fno-honor-infinities">, Group<f_Group>;
+// Sic. This option was misspelled originally.
+def fhonor_infinites : Flag<"-fhonor-infinites">, Group<f_Group>,
+ Alias<fhonor_infinities>;
+def fno_honor_infinites : Flag<"-fno-honor-infinites">, Group<f_Group>,
+ Alias<fno_honor_infinities>;
+def ftrapping_math : Flag<"-ftrapping-math">, Group<f_Group>;
+def fno_trapping_math : Flag<"-fno-trapping-math">, Group<f_Group>;
def ffor_scope : Flag<"-ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<"-fno-for-scope">, Group<f_Group>;
@@ -337,7 +384,6 @@ def flto : Flag<"-flto">, Group<f_Group>;
def fno_lto : Flag<"-fno-lto">, Group<f_Group>;
def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
Group<f_Group>;
-def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>;
def fmerge_all_constants : Flag<"-fmerge-all-constants">, Group<f_Group>;
def fmessage_length_EQ : Joined<"-fmessage-length=">, Group<f_Group>;
def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>;
@@ -346,14 +392,14 @@ def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>;
def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">, Group<f_Group>;
def fmodule_cache_path : Separate<"-fmodule-cache-path">, Group<i_Group>,
Flags<[NoForward]>;
-def fauto_module_import : Flag <"-fauto-module-import">, Group<f_Group>,
- Flags<[NoForward]>;
+def fmodules : Flag <"-fmodules">, Group<f_Group>, Flags<[NoForward]>;
def fmudflapth : Flag<"-fmudflapth">, Group<f_Group>;
def fmudflap : Flag<"-fmudflap">, Group<f_Group>;
def fnested_functions : Flag<"-fnested-functions">, Group<f_Group>;
def fnext_runtime : Flag<"-fnext-runtime">, Group<f_Group>;
def fno_access_control : Flag<"-fno-access-control">, Group<f_Group>;
+def fno_apple_pragma_pack : Flag<"-fno-apple-pragma-pack">, Group<f_Group>;
def fno_asm : Flag<"-fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<"-fno-asynchronous-unwind-tables">, Group<f_Group>;
def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">, Group<f_Group>;
@@ -367,30 +413,30 @@ def fno_color_diagnostics : Flag<"-fno-color-diagnostics">, Group<f_Group>;
def fno_common : Flag<"-fno-common">, Group<f_Group>;
def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>;
def fno_cxx_exceptions: Flag<"-fno-cxx-exceptions">, Group<f_Group>;
+def fno_cxx_modules : Flag <"-fno-cxx-modules">, Group<f_Group>, Flags<[NoForward]>;
def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>;
-def fno_diagnostics_show_name : Flag<"-fno-diagnostics-show-name">, Group<f_Group>;
def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_Group>;
def fno_diagnostics_show_note_include_stack : Flag<"-fno-diagnostics-show-note-include-stack">, Group<f_Group>;
def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>;
def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>;
def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
-def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<clang_ignored_f_Group>;
def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>;
-def fno_inline_functions : Flag<"-fno-inline-functions">, Group<clang_ignored_f_Group>;
-def fno_inline : Flag<"-fno-inline">, Group<clang_ignored_f_Group>;
+def fno_inline_functions : Flag<"-fno-inline-functions">, Group<f_Group>;
+def fno_inline : Flag<"-fno-inline">, Group<f_Group>;
def fno_keep_inline_functions : Flag<"-fno-keep-inline-functions">, Group<clang_ignored_f_Group>;
def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Group>;
-def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
+def fno_limit_debug_info : Flag<"-fno-limit-debug-info">, Group<f_Group>,
+ HelpText<"Do not limit debug information produced to reduce size of debug binary">;
def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>;
+def fno_modules : Flag <"-fno-modules">, Group<f_Group>, Flags<[NoForward]>;
def fno_ms_extensions : Flag<"-fno-ms-extensions">, Group<f_Group>;
def fno_ms_compatibility : Flag<"-fno-ms-compatibility">, Group<f_Group>;
def fno_delayed_template_parsing : Flag<"-fno-delayed-template-parsing">, Group<f_Group>;
-def fno_objc_default_synthesize_properties
- : Flag<"-fno-objc-default-synthesize-properties">, Group<f_Group>;
def fno_objc_exceptions: Flag<"-fno-objc-exceptions">, Group<f_Group>;
def fno_objc_legacy_dispatch : Flag<"-fno-objc-legacy-dispatch">, Group<f_Group>;
def fno_omit_frame_pointer : Flag<"-fno-omit-frame-pointer">, Group<f_Group>;
+def fno_operator_names : Flag<"-fno-operator-names">, Group<f_Group>;
def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>;
def fno_short_enums : Flag<"-fno-short-enums">, Group<f_Group>;
@@ -399,6 +445,7 @@ def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>
def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>;
def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<f_Group>;
+def fno_strict_enums : Flag<"-fno-strict-enums">, Group<f_Group>;
def fno_strict_overflow : Flag<"-fno-strict-overflow">, Group<f_Group>;
def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>;
def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>;
@@ -414,8 +461,6 @@ def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">, Group<f_Group>;
def fno_objc_arc_exceptions : Flag<"-fno-objc-arc-exceptions">, Group<f_Group>;
def fobjc_atdefs : Flag<"-fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<"-fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
-def fobjc_default_synthesize_properties :
- Flag<"-fobjc-default-synthesize-properties">, Group<f_Group>;
def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>;
def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>;
@@ -426,6 +471,7 @@ def fobjc_infer_related_result_type : Flag<"-fobjc-infer-related-result-type">,
Group<f_Group>;
def fno_objc_infer_related_result_type : Flag<
"-fno-objc-infer-related-result-type">, Group<f_Group>;
+def fobjc_link_runtime: Flag<"-fobjc-link-runtime">, Group<f_Group>;
// Objective-C ABI options.
def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
@@ -437,6 +483,8 @@ def fobjc_sender_dependent_dispatch : Flag<"-fobjc-sender-dependent-dispatch">,
def fobjc : Flag<"-fobjc">, Group<f_Group>;
def fomit_frame_pointer : Flag<"-fomit-frame-pointer">, Group<f_Group>;
def fopenmp : Flag<"-fopenmp">, Group<f_Group>;
+def fno_optimize_sibling_calls : Flag<"-fno-optimize-sibling-calls">, Group<f_Group>;
+def foptimize_sibling_calls : Flag<"-foptimize-sibling-calls">, Group<f_Group>;
def force__cpusubtype__ALL : Flag<"-force_cpusubtype_ALL">;
def force__flat__namespace : Flag<"-force_flat_namespace">;
def force__load : Separate<"-force_load">;
@@ -447,8 +495,9 @@ def fpack_struct_EQ : Joined<"-fpack-struct=">, Group<f_Group>;
def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>;
def fpch_preprocess : Flag<"-fpch-preprocess">, Group<f_Group>;
def fpic : Flag<"-fpic">, Group<f_Group>;
-def fpie : Flag<"-fpie">, Group<f_Group>, Flags<[NoArgumentUnused]>;
-def fno_pie : Flag<"-fno-pie">, Group<f_Group>, Flags<[NoArgumentUnused]>;
+def fno_pic : Flag<"-fno-pic">, Group<f_Group>;
+def fpie : Flag<"-fpie">, Group<f_Group>;
+def fno_pie : Flag<"-fno-pie">, Group<f_Group>;
def fprofile_arcs : Flag<"-fprofile-arcs">, Group<f_Group>;
def fprofile_generate : Flag<"-fprofile-generate">, Group<f_Group>;
def framework : Separate<"-framework">, Flags<[LinkerInput]>;
@@ -467,9 +516,11 @@ def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
def fstack_protector : Flag<"-fstack-protector">, Group<f_Group>;
def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<f_Group>;
+def fstrict_enums : Flag<"-fstrict-enums">, Group<f_Group>;
def fstrict_overflow : Flag<"-fstrict-overflow">, Group<f_Group>;
def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption]>;
def ftabstop_EQ : Joined<"-ftabstop=">, Group<f_Group>;
+def ftemplate_depth_EQ : Joined<"-ftemplate-depth=">, Group<f_Group>;
def ftemplate_depth_ : Joined<"-ftemplate-depth-">, Group<f_Group>;
def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
Group<f_Group>;
@@ -506,17 +557,22 @@ def fzero_initialized_in_bss : Flag<"-fzero-initialized-in-bss">, Group<f_Group>
def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>;
def fdata_sections : Flag <"-fdata-sections">, Group<f_Group>;
def f : Joined<"-f">, Group<f_Group>;
-def g0 : Joined<"-g0">, Group<g_Group>;
-def g3 : Joined<"-g3">, Group<g_Group>;
-def gfull : Joined<"-gfull">, Group<g_Group>;
-def gstabs : Joined<"-gstabs">, Group<g_Group>;
-def gused : Joined<"-gused">, Group<g_Group>;
+def g0 : Flag<"-g0">, Group<g_Group>;
+def g2 : Flag<"-g2">, Group<g_Group>;
+def g3 : Flag<"-g3">, Group<g_Group>;
+def gdwarf2 : Flag<"-gdwarf-2">, Group<g_Group>;
+def gfull : Flag<"-gfull">, Group<g_Group>;
+def ggdb : Flag<"-ggdb">, Group<g_Group>;
+def gstabs : Flag<"-gstabs">, Group<g_Group>;
+def gstabsplus : Flag<"-gstabs+">, Group<g_Group>;
+def gstabs1 : Flag<"-gstabs1">, Group<g_Group>;
+def gstabs2 : Flag<"-gstabs2">, Group<g_Group>;
+def gused : Flag<"-gused">, Group<g_Group>;
def g_Flag : Flag<"-g">, Group<g_Group>;
-def g_Joined : Joined<"-g">, Group<g_Group>;
def headerpad__max__install__names : Joined<"-headerpad_max_install_names">;
def index_header_map : Flag<"-index-header-map">;
def idirafter : JoinedOrSeparate<"-idirafter">, Group<clang_i_Group>;
-def iframework : JoinedOrSeparate<"-iframework">, Group<clang_i_Group>;
+def iframework : Joined<"-iframework">, Group<clang_i_Group>;
def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>;
def image__base : Separate<"-image_base">;
def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">;
@@ -534,7 +590,10 @@ def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">, Group<clang_i_Group>;
def i : Joined<"-i">, Group<i_Group>;
def keep__private__externs : Flag<"-keep_private_externs">;
def l : JoinedOrSeparate<"-l">, Flags<[LinkerInput, RenderJoined]>;
+def lazy__framework : Separate<"-lazy_framework">, Flags<[LinkerInput]>;
+def lazy__library : Separate<"-lazy_library">, Flags<[LinkerInput]>;
def m32 : Flag<"-m32">, Group<m_Group>, Flags<[DriverOption]>;
+def mqdsp6_compat : Flag<"-mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption]>;
def m3dnowa : Flag<"-m3dnowa">, Group<m_x86_Features_Group>;
def m3dnow : Flag<"-m3dnow">, Group<m_x86_Features_Group>;
def m64 : Flag<"-m64">, Group<m_Group>, Flags<[DriverOption]>;
@@ -543,9 +602,10 @@ def march_EQ : Joined<"-march=">, Group<m_Group>;
def mcmodel_EQ : Joined<"-mcmodel=">, Group<m_Group>;
def mconstant_cfstrings : Flag<"-mconstant-cfstrings">, Group<clang_ignored_m_Group>;
def mcpu_EQ : Joined<"-mcpu=">, Group<m_Group>;
-def mdynamic_no_pic : Joined<"-mdynamic-no-pic">, Group<m_Group>, Flags<[NoArgumentUnused]>;
+def mdynamic_no_pic : Joined<"-mdynamic-no-pic">, Group<m_Group>;
def mfix_and_continue : Flag<"-mfix-and-continue">, Group<clang_ignored_m_Group>;
def mfloat_abi_EQ : Joined<"-mfloat-abi=">, Group<m_Group>;
+def mfpmath_EQ : Joined<"-mfpmath=">, Group<m_Group>;
def mfpu_EQ : Joined<"-mfpu=">, Group<m_Group>;
def mglobal_merge : Flag<"-mglobal-merge">, Group<m_Group>;
def mhard_float : Flag<"-mhard-float">, Group<m_Group>;
@@ -558,6 +618,7 @@ def mllvm : Separate<"-mllvm">;
def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>;
def mstackrealign : Flag<"-mstackrealign">, Group<m_Group>;
+def mstack_alignment : Joined<"-mstack-alignment=">, Group<m_Group>;
def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
@@ -569,6 +630,7 @@ def mno_red_zone : Flag<"-mno-red-zone">, Group<m_Group>;
def mno_relax_all : Flag<"-mno-relax-all">, Group<m_Group>;
def mno_rtd: Flag<"-mno-rtd">, Group<m_Group>;
def mno_soft_float : Flag<"-mno-soft-float">, Group<m_Group>;
+def mno_stackrealign : Flag<"-mno-stackrealign">, Group<m_Group>;
def mno_sse2 : Flag<"-mno-sse2">, Group<m_x86_Features_Group>;
def mno_sse3 : Flag<"-mno-sse3">, Group<m_x86_Features_Group>;
def mno_sse4a : Flag<"-mno-sse4a">, Group<m_x86_Features_Group>;
@@ -579,6 +641,12 @@ def mno_sse : Flag<"-mno-sse">, Group<m_x86_Features_Group>;
def mno_ssse3 : Flag<"-mno-ssse3">, Group<m_x86_Features_Group>;
def mno_aes : Flag<"-mno-aes">, Group<m_x86_Features_Group>;
def mno_avx : Flag<"-mno-avx">, Group<m_x86_Features_Group>;
+def mno_avx2 : Flag<"-mno-avx2">, Group<m_x86_Features_Group>;
+def mno_lzcnt : Flag<"-mno-lzcnt">, Group<m_x86_Features_Group>;
+def mno_bmi : Flag<"-mno-bmi">, Group<m_x86_Features_Group>;
+def mno_bmi2 : Flag<"-mno-bmi2">, Group<m_x86_Features_Group>;
+def mno_popcnt : Flag<"-mno-popcnt">, Group<m_x86_Features_Group>;
+def mno_fma4 : Flag<"-mno-fma4">, Group<m_x86_Features_Group>;
def mno_thumb : Flag<"-mno-thumb">, Group<m_Group>;
def marm : Flag<"-marm">, Alias<mno_thumb>;
@@ -591,6 +659,7 @@ def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
def mregparm_EQ : Joined<"-mregparm=">, Group<m_Group>;
def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>;
def mrtd: Flag<"-mrtd">, Group<m_Group>;
+def msmall_data_threshold_EQ : Joined <"-msmall-data-threshold=">, Group<m_Group>;
def msoft_float : Flag<"-msoft-float">, Group<m_Group>;
def msse2 : Flag<"-msse2">, Group<m_x86_Features_Group>;
def msse3 : Flag<"-msse3">, Group<m_x86_Features_Group>;
@@ -602,6 +671,12 @@ def msse : Flag<"-msse">, Group<m_x86_Features_Group>;
def mssse3 : Flag<"-mssse3">, Group<m_x86_Features_Group>;
def maes : Flag<"-maes">, Group<m_x86_Features_Group>;
def mavx : Flag<"-mavx">, Group<m_x86_Features_Group>;
+def mavx2 : Flag<"-mavx2">, Group<m_x86_Features_Group>;
+def mlzcnt : Flag<"-mlzcnt">, Group<m_x86_Features_Group>;
+def mbmi : Flag<"-mbmi">, Group<m_x86_Features_Group>;
+def mbmi2 : Flag<"-mbmi2">, Group<m_x86_Features_Group>;
+def mpopcnt : Flag<"-mpopcnt">, Group<m_x86_Features_Group>;
+def mfma4 : Flag<"-mfma4">, Group<m_x86_Features_Group>;
def mthumb : Flag<"-mthumb">, Group<m_Group>;
def mtune_EQ : Joined<"-mtune=">, Group<m_Group>;
def multi__module : Flag<"-multi_module">;
@@ -612,7 +687,7 @@ def m_Separate : Separate<"-m">, Group<m_Group>;
def m_Joined : Joined<"-m">, Group<m_Group>;
def no_canonical_prefixes : Flag<"-no-canonical-prefixes">, Flags<[HelpHidden]>,
HelpText<"Use relative instead of canonical paths">;
-def no_cpp_precomp : Flag<"-no-cpp-precomp">;
+def no_cpp_precomp : Flag<"-no-cpp-precomp">, Group<clang_ignored_f_Group>;
def no_integrated_as : Flag<"-no-integrated-as">, Flags<[DriverOption]>;
def no_integrated_cpp : Flag<"-no-integrated-cpp">, Flags<[DriverOption]>;
def no__dead__strip__inits__and__terms : Flag<"-no_dead_strip_inits_and_terms">;
@@ -662,8 +737,11 @@ def read__only__relocs : Separate<"-read_only_relocs">;
def remap : Flag<"-remap">;
def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption]>,
HelpText<"Rewrite Objective-C source to C++">;
+def rewrite_legacy_objc : Flag<"-rewrite-legacy-objc">, Flags<[DriverOption]>,
+ HelpText<"Rewrite Legacy Objective-C source to C++">;
def rdynamic : Flag<"-rdynamic">;
def rpath : Separate<"-rpath">, Flags<[LinkerInput]>;
+def rtlib_EQ : Joined<"-rtlib=">;
def r : Flag<"-r">;
def save_temps : Flag<"-save-temps">, Flags<[DriverOption]>,
HelpText<"Save intermediate compilation results">;
@@ -687,13 +765,20 @@ def single__module : Flag<"-single_module">;
def specs_EQ : Joined<"-specs=">;
def specs : Separate<"-specs">, Flags<[Unsupported]>;
def static_libgcc : Flag<"-static-libgcc">;
+def static_libstdcxx : Flag<"-static-libstdc++">;
def static : Flag<"-static">, Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<"-std-default=">;
-def std_EQ : Joined<"-std=">;
+def std_EQ : Joined<"-std=">, Group<L_Group>;
def stdlib_EQ : Joined<"-stdlib=">;
def sub__library : JoinedOrSeparate<"-sub_library">;
def sub__umbrella : JoinedOrSeparate<"-sub_umbrella">;
def s : Flag<"-s">;
+def target : Separate<"-target">, Flags<[DriverOption]>,
+ HelpText<"Generate code for the given target">;
+def gcc_toolchain : Separate<"-gcc-toolchain">, Flags<[DriverOption]>,
+ HelpText<"Use the gcc toolchain at the given directory">;
+// We should deprecate the use of -ccc-host-triple, and then remove.
+def ccc_host_triple : Separate<"-ccc-host-triple">, Alias<target>;
def time : Flag<"-time">,
HelpText<"Time individual commands">;
def traditional_cpp : Flag<"-traditional-cpp">;
@@ -835,7 +920,11 @@ def _relocatable_pch : Flag<"--relocatable-pch">,
HelpText<"Build a relocatable precompiled header">;
def _resource_EQ : Joined<"--resource=">, Alias<fcompile_resource_EQ>;
def _resource : Separate<"--resource">, Alias<fcompile_resource_EQ>;
+def _rtlib_EQ : Joined<"--rtlib=">, Alias<rtlib_EQ>;
+def _rtlib : Separate<"--rtlib">, Alias<rtlib_EQ>;
def _save_temps : Flag<"--save-temps">, Alias<save_temps>;
+def _serialize_diags : Separate<"--serialize-diagnostics">, Flags<[DriverOption]>,
+ HelpText<"Serialize compiler diagnostics to a file">;
def _shared : Flag<"--shared">, Alias<shared>;
def _signed_char : Flag<"--signed-char">, Alias<fsigned_char>;
def _specs_EQ : Joined<"--specs=">, Alias<specs_EQ>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Tool.h b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
index 378b516..8822d7b 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
@@ -49,6 +49,7 @@ public:
virtual bool hasIntegratedAssembler() const { return false; }
virtual bool hasIntegratedCPP() const = 0;
+ virtual bool isLinkJob() const { return false; }
/// \brief Does this tool have "good" standardized diagnostics, or should the
/// driver add an additional "command failed" diagnostic on failures.
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
index 2e6218a..c35cf67 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -23,7 +23,6 @@ namespace driver {
class Compilation;
class DerivedArgList;
class Driver;
- class HostInfo;
class InputArgList;
class JobAction;
class ObjCRuntime;
@@ -39,8 +38,13 @@ public:
CST_Libstdcxx
};
+ enum RuntimeLibType {
+ RLT_CompilerRT,
+ RLT_Libgcc
+ };
+
private:
- const HostInfo &Host;
+ const Driver &D;
const llvm::Triple Triple;
/// The list of toolchain specific path prefixes to search for
@@ -52,7 +56,20 @@ private:
path_list ProgramPaths;
protected:
- ToolChain(const HostInfo &Host, const llvm::Triple &_Triple);
+ ToolChain(const Driver &D, const llvm::Triple &T);
+
+ /// \name Utilities for implementing subclasses.
+ ///@{
+ static void addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path);
+ static void addExternCSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path);
+ static void addSystemIncludes(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ ArrayRef<StringRef> Paths);
+ ///@}
public:
virtual ~ToolChain();
@@ -131,7 +148,7 @@ public:
/// IsObjCLegacyDispatchDefault - Does this tool chain set
/// -fobjc-legacy-dispatch by default (this is only used with the non-fragile
/// ABI).
- virtual bool IsObjCLegacyDispatchDefault() const { return false; }
+ virtual bool IsObjCLegacyDispatchDefault() const { return true; }
/// UseObjCMixedDispatchDefault - When using non-legacy dispatch, should the
/// mixed dispatch method be used?
@@ -143,6 +160,11 @@ public:
return 0;
}
+ /// GetDefaultRuntimeLibType - Get the default runtime library variant to use.
+ virtual RuntimeLibType GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_Libgcc;
+ }
+
/// IsUnwindTablesDefault - Does this tool chain use -funwind-tables
/// by default.
virtual bool IsUnwindTablesDefault() const = 0;
@@ -162,6 +184,9 @@ public:
/// Does this tool chain support Objective-C garbage collection.
virtual bool SupportsObjCGC() const { return true; }
+ /// Does this tool chain support Objective-C ARC.
+ virtual bool SupportsObjCARC() const { return true; }
+
/// UseDwarfDebugFlags - Embed the compile options to clang into the Dwarf
/// compile unit information.
virtual bool UseDwarfDebugFlags() const { return false; }
@@ -202,6 +227,10 @@ public:
virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const;
+ // GetRuntimeLibType - Determine the runtime library type to use with the
+ // given compilation arguments.
+ virtual RuntimeLibType GetRuntimeLibType(const ArgList &Args) const;
+
// GetCXXStdlibType - Determine the C++ standard library type to use with the
// given compilation arguments.
virtual CXXStdlibType GetCXXStdlibType(const ArgList &Args) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.def b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
index 8449d63..b107dfb 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
@@ -82,6 +82,8 @@ TYPE("lto-bc", LTO_BC, INVALID, "o", "")
TYPE("ast", AST, INVALID, "ast", "u")
TYPE("plist", Plist, INVALID, "plist", "")
TYPE("rewritten-objc", RewrittenObjC,INVALID, "cpp", "")
+TYPE("rewritten-legacy-objc", RewrittenLegacyObjC,INVALID, "cpp", "")
+TYPE("remap", Remap, INVALID, "remap", "")
TYPE("precompiled-header", PCH, INVALID, "gch", "A")
TYPE("object", Object, INVALID, "o", "")
TYPE("treelang", Treelang, INVALID, 0, "u")
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/Commit.h b/contrib/llvm/tools/clang/include/clang/Edit/Commit.h
new file mode 100644
index 0000000..aaf6b18
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/Commit.h
@@ -0,0 +1,140 @@
+//===----- Commit.h - A unit of edits ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_COMMIT_H
+#define LLVM_CLANG_EDIT_COMMIT_H
+
+#include "clang/Edit/FileOffset.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class LangOptions;
+ class PreprocessingRecord;
+
+namespace edit {
+ class EditedSource;
+
+class Commit {
+public:
+ enum EditKind {
+ Act_Insert,
+ Act_InsertFromRange,
+ Act_Remove
+ };
+
+ struct Edit {
+ EditKind Kind;
+ StringRef Text;
+ SourceLocation OrigLoc;
+ FileOffset Offset;
+ FileOffset InsertFromRangeOffs;
+ unsigned Length;
+ bool BeforePrev;
+
+ SourceLocation getFileLocation(SourceManager &SM) const;
+ CharSourceRange getFileRange(SourceManager &SM) const;
+ CharSourceRange getInsertFromRange(SourceManager &SM) const;
+ };
+
+private:
+ const SourceManager &SourceMgr;
+ const LangOptions &LangOpts;
+ const PreprocessingRecord *PPRec;
+ EditedSource *Editor;
+
+ bool IsCommitable;
+ SmallVector<Edit, 8> CachedEdits;
+
+public:
+ explicit Commit(EditedSource &Editor);
+ Commit(const SourceManager &SM, const LangOptions &LangOpts,
+ const PreprocessingRecord *PPRec = 0)
+ : SourceMgr(SM), LangOpts(LangOpts), PPRec(PPRec), Editor(0),
+ IsCommitable(true) { }
+
+ bool isCommitable() const { return IsCommitable; }
+
+ bool insert(SourceLocation loc, StringRef text, bool afterToken = false,
+ bool beforePreviousInsertions = false);
+ bool insertAfterToken(SourceLocation loc, StringRef text,
+ bool beforePreviousInsertions = false) {
+ return insert(loc, text, /*afterToken=*/true, beforePreviousInsertions);
+ }
+ bool insertBefore(SourceLocation loc, StringRef text) {
+ return insert(loc, text, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ }
+ bool insertFromRange(SourceLocation loc, CharSourceRange range,
+ bool afterToken = false,
+ bool beforePreviousInsertions = false);
+ bool insertWrap(StringRef before, CharSourceRange range, StringRef after);
+
+ bool remove(CharSourceRange range);
+
+ bool replace(CharSourceRange range, StringRef text);
+ bool replaceWithInner(CharSourceRange range, CharSourceRange innerRange);
+ bool replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+
+ bool insertFromRange(SourceLocation loc, SourceRange TokenRange,
+ bool afterToken = false,
+ bool beforePreviousInsertions = false) {
+ return insertFromRange(loc, CharSourceRange::getTokenRange(TokenRange),
+ afterToken, beforePreviousInsertions);
+ }
+ bool insertWrap(StringRef before, SourceRange TokenRange, StringRef after) {
+ return insertWrap(before, CharSourceRange::getTokenRange(TokenRange), after);
+ }
+ bool remove(SourceRange TokenRange) {
+ return remove(CharSourceRange::getTokenRange(TokenRange));
+ }
+ bool replace(SourceRange TokenRange, StringRef text) {
+ return replace(CharSourceRange::getTokenRange(TokenRange), text);
+ }
+ bool replaceWithInner(SourceRange TokenRange, SourceRange TokenInnerRange) {
+ return replaceWithInner(CharSourceRange::getTokenRange(TokenRange),
+ CharSourceRange::getTokenRange(TokenInnerRange));
+ }
+
+ typedef SmallVector<Edit, 8>::const_iterator edit_iterator;
+ edit_iterator edit_begin() const { return CachedEdits.begin(); }
+ edit_iterator edit_end() const { return CachedEdits.end(); }
+
+private:
+ void addInsert(SourceLocation OrigLoc,
+ FileOffset Offs, StringRef text, bool beforePreviousInsertions);
+ void addInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset RangeOffs, unsigned RangeLen,
+ bool beforePreviousInsertions);
+ void addRemove(SourceLocation OrigLoc, FileOffset Offs, unsigned Len);
+
+ bool canInsert(SourceLocation loc, FileOffset &Offset);
+ bool canInsertAfterToken(SourceLocation loc, FileOffset &Offset,
+ SourceLocation &AfterLoc);
+ bool canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs);
+ bool canRemoveRange(CharSourceRange range, FileOffset &Offs, unsigned &Len);
+ bool canReplaceText(SourceLocation loc, StringRef text,
+ FileOffset &Offs, unsigned &Len);
+
+ void commitInsert(FileOffset offset, StringRef text,
+ bool beforePreviousInsertions);
+ void commitRemove(FileOffset offset, unsigned length);
+
+ bool isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin = 0) const;
+ bool isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd = 0) const;
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h b/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h
new file mode 100644
index 0000000..c685753
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h
@@ -0,0 +1,87 @@
+//===----- EditedSource.h - Collection of source edits ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_EDITEDSOURCE_H
+#define LLVM_CLANG_EDIT_EDITEDSOURCE_H
+
+#include "clang/Edit/FileOffset.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <map>
+
+namespace clang {
+ class LangOptions;
+ class PreprocessingRecord;
+
+namespace edit {
+ class Commit;
+ class EditsReceiver;
+
+class EditedSource {
+ const SourceManager &SourceMgr;
+ const LangOptions &LangOpts;
+ const PreprocessingRecord *PPRec;
+
+ struct FileEdit {
+ StringRef Text;
+ unsigned RemoveLen;
+
+ FileEdit() : RemoveLen(0) {}
+ };
+
+ typedef std::map<FileOffset, FileEdit> FileEditsTy;
+ FileEditsTy FileEdits;
+
+ llvm::DenseMap<unsigned, SourceLocation> ExpansionToArgMap;
+
+ llvm::BumpPtrAllocator StrAlloc;
+
+public:
+ EditedSource(const SourceManager &SM, const LangOptions &LangOpts,
+ const PreprocessingRecord *PPRec = 0)
+ : SourceMgr(SM), LangOpts(LangOpts), PPRec(PPRec),
+ StrAlloc(/*size=*/512) { }
+
+ const SourceManager &getSourceManager() const { return SourceMgr; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ const PreprocessingRecord *getPreprocessingRecord() const { return PPRec; }
+
+ bool canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs);
+
+ bool commit(const Commit &commit);
+
+ void applyRewrites(EditsReceiver &receiver);
+ void clearRewrites();
+
+ StringRef copyString(StringRef str) {
+ char *buf = StrAlloc.Allocate<char>(str.size());
+ std::memcpy(buf, str.data(), str.size());
+ return StringRef(buf, str.size());
+ }
+ StringRef copyString(const Twine &twine);
+
+private:
+ bool commitInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions);
+ bool commitInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset InsertFromRangeOffs, unsigned Len,
+ bool beforePreviousInsertions);
+ void commitRemove(SourceLocation OrigLoc, FileOffset BeginOffs, unsigned Len);
+
+ StringRef getSourceText(FileOffset BeginOffs, FileOffset EndOffs,
+ bool &Invalid);
+ FileEditsTy::iterator getActionForOffset(FileOffset Offs);
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h b/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h
new file mode 100644
index 0000000..600ac28
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h
@@ -0,0 +1,35 @@
+//===----- EditedSource.h - Collection of source edits ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_EDITSRECEIVER_H
+#define LLVM_CLANG_EDIT_EDITSRECEIVER_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+ class SourceLocation;
+ class CharSourceRange;
+
+namespace edit {
+
+class EditsReceiver {
+public:
+ virtual ~EditsReceiver() { }
+
+ virtual void insert(SourceLocation loc, StringRef text) = 0;
+ virtual void replace(CharSourceRange range, StringRef text) = 0;
+ /// \brief By default it calls replace with an empty string.
+ virtual void remove(CharSourceRange range);
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h b/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h
new file mode 100644
index 0000000..675ad18
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h
@@ -0,0 +1,65 @@
+//===----- FileOffset.h - Offset in a file ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_FILEOFFSET_H
+#define LLVM_CLANG_EDIT_FILEOFFSET_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+
+namespace edit {
+
+class FileOffset {
+ FileID FID;
+ unsigned Offs;
+public:
+ FileOffset() : Offs(0) { }
+ FileOffset(FileID fid, unsigned offs) : FID(fid), Offs(offs) { }
+
+ bool isInvalid() const { return FID.isInvalid(); }
+
+ FileID getFID() const { return FID; }
+ unsigned getOffset() const { return Offs; }
+
+ FileOffset getWithOffset(unsigned offset) const {
+ FileOffset NewOffs = *this;
+ NewOffs.Offs += offset;
+ return NewOffs;
+ }
+
+ friend bool operator==(FileOffset LHS, FileOffset RHS) {
+ return LHS.FID == RHS.FID && LHS.Offs == RHS.Offs;
+ }
+ friend bool operator!=(FileOffset LHS, FileOffset RHS) {
+ return !(LHS == RHS);
+ }
+ friend bool operator<(FileOffset LHS, FileOffset RHS) {
+ if (LHS.FID != RHS.FID)
+ return LHS.FID < RHS.FID;
+ return LHS.Offs < RHS.Offs;
+ }
+ friend bool operator>(FileOffset LHS, FileOffset RHS) {
+ if (LHS.FID != RHS.FID)
+ return LHS.FID > RHS.FID;
+ return LHS.Offs > RHS.Offs;
+ }
+ friend bool operator>=(FileOffset LHS, FileOffset RHS) {
+ return LHS > RHS || LHS == RHS;
+ }
+ friend bool operator<=(FileOffset LHS, FileOffset RHS) {
+ return LHS < RHS || LHS == RHS;
+ }
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h
new file mode 100644
index 0000000..aa7a5b2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h
@@ -0,0 +1,33 @@
+//===--- Rewriters.h - Rewritings ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_REWRITERS_H
+#define LLVM_CLANG_EDIT_REWRITERS_H
+
+namespace clang {
+ class ObjCMessageExpr;
+ class NSAPI;
+
+namespace edit {
+ class Commit;
+
+bool rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
index 471476a..5e4ecad 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
@@ -20,6 +20,7 @@
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
@@ -45,6 +46,7 @@ class ASTContext;
class ASTReader;
class CodeCompleteConsumer;
class CompilerInvocation;
+class CompilerInstance;
class Decl;
class DiagnosticsEngine;
class FileEntry;
@@ -57,39 +59,33 @@ class ASTFrontendAction;
using namespace idx;
-/// \brief Allocator for a cached set of global code completions.
-class GlobalCodeCompletionAllocator
- : public CodeCompletionAllocator,
- public llvm::RefCountedBase<GlobalCodeCompletionAllocator>
-{
-
-};
-
/// \brief Utility class for loading a ASTContext from an AST file.
///
class ASTUnit : public ModuleLoader {
private:
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
- llvm::IntrusiveRefCntPtr<FileManager> FileMgr;
- llvm::IntrusiveRefCntPtr<SourceManager> SourceMgr;
- llvm::OwningPtr<HeaderSearch> HeaderInfo;
- llvm::IntrusiveRefCntPtr<TargetInfo> Target;
- llvm::IntrusiveRefCntPtr<Preprocessor> PP;
- llvm::IntrusiveRefCntPtr<ASTContext> Ctx;
+ IntrusiveRefCntPtr<LangOptions> LangOpts;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<FileManager> FileMgr;
+ IntrusiveRefCntPtr<SourceManager> SourceMgr;
+ OwningPtr<HeaderSearch> HeaderInfo;
+ IntrusiveRefCntPtr<TargetInfo> Target;
+ IntrusiveRefCntPtr<Preprocessor> PP;
+ IntrusiveRefCntPtr<ASTContext> Ctx;
+ ASTReader *Reader;
FileSystemOptions FileSystemOpts;
/// \brief The AST consumer that received information about the translation
/// unit as it was parsed or loaded.
- llvm::OwningPtr<ASTConsumer> Consumer;
+ OwningPtr<ASTConsumer> Consumer;
/// \brief The semantic analysis object used to type-check the translation
/// unit.
- llvm::OwningPtr<Sema> TheSema;
+ OwningPtr<Sema> TheSema;
/// Optional owned invocation, just used to make the invocation used in
/// LoadFromCommandLine available.
- llvm::IntrusiveRefCntPtr<CompilerInvocation> Invocation;
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation;
/// \brief The set of target features.
///
@@ -126,6 +122,14 @@ private:
// source. In the long term we should make the Index library use efficient and
// more scalable search mechanisms.
std::vector<Decl*> TopLevelDecls;
+
+ /// \brief Sorted (by file offset) vector of pairs of file offset/Decl.
+ typedef SmallVector<std::pair<unsigned, Decl *>, 64> LocDeclsTy;
+ typedef llvm::DenseMap<FileID, LocDeclsTy *> FileDeclsTy;
+
+ /// \brief Map from FileID to the file-level declarations that it contains.
+ /// The files and decls are only local (and non-preamble) ones.
+ FileDeclsTy FileDecls;
/// The name of the original source file used to generate this ASTUnit.
std::string OriginalSourceFile;
@@ -140,6 +144,10 @@ private:
/// translation unit.
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+ /// \brief The set of diagnostics produced when failing to parse, e.g. due
+ /// to failure to load the PCH.
+ SmallVector<StoredDiagnostic, 4> FailedParseDiagnostics;
+
/// \brief The number of stored diagnostics that come from the driver
/// itself.
///
@@ -147,10 +155,6 @@ private:
/// the next.
unsigned NumStoredDiagnosticsFromDriver;
- /// \brief Temporary files that should be removed when the ASTUnit is
- /// destroyed.
- SmallVector<llvm::sys::Path, 4> TemporaryFiles;
-
/// \brief Counter that determines when we want to try building a
/// precompiled preamble.
///
@@ -161,10 +165,7 @@ private:
/// building the precompiled preamble fails, we won't try again for
/// some number of calls.
unsigned PreambleRebuildCounter;
-
- /// \brief The file in which the precompiled preamble is stored.
- std::string PreambleFile;
-
+
public:
class PreambleData {
const FileEntry *File;
@@ -254,15 +255,11 @@ private:
/// \brief Whether we should be caching code-completion results.
bool ShouldCacheCodeCompletionResults;
-
- /// \brief Whether we want to include nested macro expansions in the
- /// detailed preprocessing record.
- bool NestedMacroExpansions;
/// \brief The language options used when we load an AST file.
LangOptions ASTFileLangOpts;
- static void ConfigureDiags(llvm::IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
+ static void ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
const char **ArgBegin, const char **ArgEnd,
ASTUnit &AST, bool CaptureDiagnostics);
@@ -271,6 +268,8 @@ private:
const SmallVectorImpl<StoredDiagnostic> &Diags,
SmallVectorImpl<StoredDiagnostic> &Out);
+ void clearFileLevelDecls();
+
public:
/// \brief A cached code-completion result, which may be introduced in one of
/// many different contexts.
@@ -318,29 +317,24 @@ public:
}
/// \brief Retrieve the allocator used to cache global code completions.
- llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
getCachedCompletionAllocator() {
return CachedCompletionAllocator;
}
-
- /// \brief Retrieve the allocator used to cache global code completions.
- /// Creates the allocator if it doesn't already exist.
- llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
- getCursorCompletionAllocator() {
- if (!CursorCompletionAllocator.getPtr()) {
- CursorCompletionAllocator = new GlobalCodeCompletionAllocator;
- }
- return CursorCompletionAllocator;
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() {
+ if (!CCTUInfo)
+ CCTUInfo.reset(new CodeCompletionTUInfo(
+ new GlobalCodeCompletionAllocator));
+ return *CCTUInfo;
}
-
+
private:
/// \brief Allocator used to store cached code completions.
- llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
CachedCompletionAllocator;
- /// \brief Allocator used to store code completions for arbitrary cursors.
- llvm::IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
- CursorCompletionAllocator;
+ OwningPtr<CodeCompletionTUInfo> CCTUInfo;
/// \brief The set of cached code-completion results.
std::vector<CachedCodeCompletionResult> CachedCompletionResults;
@@ -395,7 +389,11 @@ private:
bool AllowRebuild = true,
unsigned MaxLines = 0);
void RealizeTopLevelDeclsFromPreamble();
-
+
+ /// \brief Transfers ownership of the objects (like SourceManager) from
+ /// \param CI to this ASTUnit.
+ void transferASTDataFromCompilerInstance(CompilerInstance &CI);
+
/// \brief Allows us to assert that ASTUnit is not being used concurrently,
/// which is not supported.
///
@@ -451,6 +449,7 @@ public:
ASTContext &getASTContext() { return *Ctx; }
void setASTContext(ASTContext *ctx) { Ctx = ctx; }
+ void setPreprocessor(Preprocessor *pp);
bool hasSema() const { return TheSema; }
Sema &getSema() const {
@@ -468,9 +467,7 @@ public:
/// \brief Add a temporary file that the ASTUnit depends on.
///
/// This file will be erased when the ASTUnit is destroyed.
- void addTemporaryFile(const llvm::sys::Path &TempFile) {
- TemporaryFiles.push_back(TempFile);
- }
+ void addTemporaryFile(const llvm::sys::Path &TempFile);
bool getOnlyLocalDecls() const { return OnlyLocalDecls; }
@@ -514,6 +511,15 @@ public:
TopLevelDecls.push_back(D);
}
+ /// \brief Add a new local file-level declaration.
+ void addFileLevelDecl(Decl *D);
+
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ void findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls);
+
/// \brief Add a new top-level declaration, identified by its ID in
/// the precompiled preamble.
void addTopLevelDeclFromPreamble(serialization::DeclID D) {
@@ -546,6 +552,11 @@ public:
/// preamble, otherwise it returns \arg Loc.
SourceLocation mapLocationToPreamble(SourceLocation Loc);
+ bool isInPreambleFileID(SourceLocation Loc);
+ bool isInMainFileID(SourceLocation Loc);
+ SourceLocation getStartOfMainFileID();
+ SourceLocation getEndOfPreambleFileID();
+
/// \brief \see mapLocationFromPreamble.
SourceRange mapRangeFromPreamble(SourceRange R) {
return SourceRange(mapLocationFromPreamble(R.getBegin()),
@@ -559,17 +570,26 @@ public:
}
// Retrieve the diagnostics associated with this AST
- typedef const StoredDiagnostic *stored_diag_iterator;
- stored_diag_iterator stored_diag_begin() const {
+ typedef StoredDiagnostic *stored_diag_iterator;
+ typedef const StoredDiagnostic *stored_diag_const_iterator;
+ stored_diag_const_iterator stored_diag_begin() const {
+ return StoredDiagnostics.begin();
+ }
+ stored_diag_iterator stored_diag_begin() {
return StoredDiagnostics.begin();
}
- stored_diag_iterator stored_diag_end() const {
+ stored_diag_const_iterator stored_diag_end() const {
+ return StoredDiagnostics.end();
+ }
+ stored_diag_iterator stored_diag_end() {
return StoredDiagnostics.end();
}
unsigned stored_diag_size() const { return StoredDiagnostics.size(); }
-
- SmallVector<StoredDiagnostic, 4> &getStoredDiagnostics() {
- return StoredDiagnostics;
+
+ stored_diag_iterator stored_diag_afterDriver_begin() {
+ if (NumStoredDiagnosticsFromDriver > StoredDiagnostics.size())
+ NumStoredDiagnosticsFromDriver = 0;
+ return StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver;
}
typedef std::vector<CachedCodeCompletionResult>::iterator
@@ -601,7 +621,8 @@ public:
/// \brief Create a ASTUnit. Gets ownership of the passed CompilerInvocation.
static ASTUnit *create(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags);
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool CaptureDiagnostics = false);
/// \brief Create a ASTUnit from an AST file.
///
@@ -612,12 +633,13 @@ public:
///
/// \returns - The initialized ASTUnit or null if the AST failed to load.
static ASTUnit *LoadFromASTFile(const std::string &Filename,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
bool OnlyLocalDecls = false,
RemappedFile *RemappedFiles = 0,
unsigned NumRemappedFiles = 0,
- bool CaptureDiagnostics = false);
+ bool CaptureDiagnostics = false,
+ bool AllowPCHWithCompilerErrors = false);
private:
/// \brief Helper function for \c LoadFromCompilerInvocation() and
@@ -646,10 +668,28 @@ public:
///
/// \param Unit - optionally an already created ASTUnit. Its ownership is not
/// transfered.
+ ///
+ /// \param Persistent - if true the returned ASTUnit will be complete.
+ /// false means the caller is only interested in getting info through the
+ /// provided \see Action.
+ ///
+ /// \param ErrAST - If non-null and parsing failed without any AST to return
+ /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit
+ /// mainly to allow the caller to see the diagnostics.
+ /// This will only receive an ASTUnit if a new one was created. If an already
+ /// created ASTUnit was passed in \param Unit then the caller can check that.
+ ///
static ASTUnit *LoadFromCompilerInvocationAction(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
ASTFrontendAction *Action = 0,
- ASTUnit *Unit = 0);
+ ASTUnit *Unit = 0,
+ bool Persistent = true,
+ StringRef ResourceFilesPath = StringRef(),
+ bool OnlyLocalDecls = false,
+ bool CaptureDiagnostics = false,
+ bool PrecompilePreamble = false,
+ bool CacheCodeCompletionResults = false,
+ OwningPtr<ASTUnit> *ErrAST = 0);
/// LoadFromCompilerInvocation - Create an ASTUnit from a source file, via a
/// CompilerInvocation object.
@@ -663,13 +703,12 @@ public:
// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
// shouldn't need to specify them at construction time.
static ASTUnit *LoadFromCompilerInvocation(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
bool OnlyLocalDecls = false,
bool CaptureDiagnostics = false,
bool PrecompilePreamble = false,
TranslationUnitKind TUKind = TU_Complete,
- bool CacheCodeCompletionResults = false,
- bool NestedMacroExpansions = true);
+ bool CacheCodeCompletionResults = false);
/// LoadFromCommandLine - Create an ASTUnit from a vector of command line
/// arguments, which must specify exactly one source file.
@@ -682,12 +721,16 @@ public:
/// lifetime is expected to extend past that of the returned ASTUnit.
///
/// \param ResourceFilesPath - The path to the compiler resource files.
- //
+ ///
+ /// \param ErrAST - If non-null and parsing failed without any AST to return
+ /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit
+ /// mainly to allow the caller to see the diagnostics.
+ ///
// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
// shouldn't need to specify them at construction time.
static ASTUnit *LoadFromCommandLine(const char **ArgBegin,
const char **ArgEnd,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
StringRef ResourceFilesPath,
bool OnlyLocalDecls = false,
bool CaptureDiagnostics = false,
@@ -697,7 +740,9 @@ public:
bool PrecompilePreamble = false,
TranslationUnitKind TUKind = TU_Complete,
bool CacheCodeCompletionResults = false,
- bool NestedMacroExpansions = true);
+ bool AllowPCHWithCompilerErrors = false,
+ bool SkipFunctionBodies = false,
+ OwningPtr<ASTUnit> *ErrAST = 0);
/// \brief Reparse the source files using the same command-line options that
/// were originally used to produce this translation unit.
@@ -743,9 +788,9 @@ public:
/// \returns True if an error occurred, false otherwise.
bool serialize(raw_ostream &OS);
- virtual ModuleKey loadModule(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc) {
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) {
// ASTUnit doesn't know how to load modules (not that this matters).
return 0;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
index 010f889..b5b9394 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
@@ -30,6 +30,7 @@ ANALYSIS_CONSTRAINTS(RangeConstraints, "range", "Use constraint tracking of conc
ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML", createHTMLDiagnosticConsumer, false)
ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists", createPlistDiagnosticConsumer, true)
+ANALYSIS_DIAGNOSTICS(PLIST_MULTI_FILE, "plist-multi-file", "Output analysis results using Plists (allowing for mult-file bugs)", createPlistMultiFileDiagnosticConsumer, true)
ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html", "Output analysis results using HTML wrapped with Plists", createPlistHTMLDiagnosticConsumer, true)
ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results", createTextPathDiagnosticConsumer, true)
@@ -41,8 +42,24 @@ ANALYSIS_PURGE(PurgeStmt, "statement", "Purge symbols, bindings, and constraint
ANALYSIS_PURGE(PurgeBlock, "block", "Purge symbols, bindings, and constraints before every basic block")
ANALYSIS_PURGE(PurgeNone, "none", "Do not purge symbols, bindings, or constraints")
+#ifndef ANALYSIS_IPA
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC)
+#endif
+
+ANALYSIS_IPA(None, "none", "Perform only intra-procedural analysis")
+ANALYSIS_IPA(Inlining, "inlining", "Experimental: Inline callees when their definitions are available")
+
+#ifndef ANALYSIS_INLINING_MODE
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC)
+#endif
+
+ANALYSIS_INLINING_MODE(All, "all", "Analyze all functions in the order defined in the TU")
+ANALYSIS_INLINING_MODE(NoRedundancy, "noredundancy", "Do not analyze a function which has been previously inlined, use call graph to order")
+
#undef ANALYSIS_STORE
#undef ANALYSIS_CONSTRAINTS
#undef ANALYSIS_DIAGNOSTICS
#undef ANALYSIS_PURGE
+#undef ANALYSIS_INLINING_MODE
+#undef ANALYSIS_IPA
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
index 3565a51..847bfbd 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
@@ -60,6 +60,20 @@ enum AnalysisPurgeMode {
NumPurgeModes
};
+/// AnalysisIPAMode - Set of inter-procedural modes.
+enum AnalysisIPAMode {
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) NAME,
+#include "clang/Frontend/Analyses.def"
+NumIPAModes
+};
+
+/// AnalysisInlineFunctionSelection - Set of inlining function selection heuristics.
+enum AnalysisInliningMode {
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) NAME,
+#include "clang/Frontend/Analyses.def"
+NumInliningModes
+};
+
class AnalyzerOptions {
public:
/// \brief Pair of checker name and enable/disable.
@@ -68,6 +82,7 @@ public:
AnalysisConstraints AnalysisConstraintsOpt;
AnalysisDiagClients AnalysisDiagOpt;
AnalysisPurgeMode AnalysisPurgeOpt;
+ AnalysisIPAMode IPAMode;
std::string AnalyzeSpecificFunction;
unsigned MaxNodes;
unsigned MaxLoop;
@@ -79,11 +94,15 @@ public:
unsigned TrimGraph : 1;
unsigned VisualizeEGDot : 1;
unsigned VisualizeEGUbi : 1;
- unsigned InlineCall : 1;
unsigned UnoptimizedCFG : 1;
unsigned CFGAddImplicitDtors : 1;
unsigned CFGAddInitializers : 1;
unsigned EagerlyTrimEGraph : 1;
+ unsigned PrintStats : 1;
+ unsigned NoRetryExhausted : 1;
+ unsigned InlineMaxStackDepth;
+ unsigned InlineMaxFunctionSize;
+ AnalysisInliningMode InliningMode;
public:
AnalyzerOptions() {
@@ -91,6 +110,7 @@ public:
AnalysisConstraintsOpt = RangeConstraintsModel;
AnalysisDiagOpt = PD_HTML;
AnalysisPurgeOpt = PurgeStmt;
+ IPAMode = Inlining;
ShowCheckerHelp = 0;
AnalyzeAll = 0;
AnalyzerDisplayProgress = 0;
@@ -99,11 +119,16 @@ public:
TrimGraph = 0;
VisualizeEGDot = 0;
VisualizeEGUbi = 0;
- InlineCall = 0;
UnoptimizedCFG = 0;
CFGAddImplicitDtors = 0;
CFGAddInitializers = 0;
EagerlyTrimEGraph = 0;
+ PrintStats = 0;
+ NoRetryExhausted = 0;
+ // Cap the stack depth at 4 calls (5 stack frames, base + 4 calls).
+ InlineMaxStackDepth = 5;
+ InlineMaxFunctionSize = 200;
+ InliningMode = NoRedundancy;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h
index f20cf6f..ce2b242 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h
@@ -21,8 +21,9 @@ class LangOptions;
/// should be the "primary" client, and will be used for computing whether the
/// diagnostics should be included in counts.
class ChainedDiagnosticConsumer : public DiagnosticConsumer {
- llvm::OwningPtr<DiagnosticConsumer> Primary;
- llvm::OwningPtr<DiagnosticConsumer> Secondary;
+ virtual void anchor();
+ OwningPtr<DiagnosticConsumer> Primary;
+ OwningPtr<DiagnosticConsumer> Secondary;
public:
ChainedDiagnosticConsumer(DiagnosticConsumer *_Primary,
@@ -42,6 +43,11 @@ public:
Primary->EndSourceFile();
}
+ virtual void finish() {
+ Secondary->finish();
+ Primary->finish();
+ }
+
virtual bool IncludeInDiagnosticCounts() const {
return Primary->IncludeInDiagnosticCounts();
}
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ChainedIncludesSource.h b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h
index 620dbdf..d7119e9 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ChainedIncludesSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h
@@ -30,7 +30,7 @@ private:
ExternalSemaSource &getFinalReader() const { return *FinalReader; }
std::vector<CompilerInstance *> CIs;
- llvm::OwningPtr<ExternalSemaSource> FinalReader;
+ OwningPtr<ExternalSemaSource> FinalReader;
protected:
@@ -66,7 +66,7 @@ protected:
virtual void InitializeSema(Sema &S);
virtual void ForgetSema();
- virtual std::pair<ObjCMethodList,ObjCMethodList> ReadMethodPool(Selector Sel);
+ virtual void ReadMethodPool(Selector Sel);
virtual bool LookupUnqualified(LookupResult &R, Scope *S);
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
index 4874c17..e844f88 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -50,6 +50,7 @@ public:
/// internal state before optimizations are
/// done.
unsigned DisableRedZone : 1; /// Set when -mno-red-zone is enabled.
+ unsigned DisableTailCalls : 1; /// Do not emit tail calls.
unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what
/// Decl* various IR entities came from. Only
/// useful when running CodeGen as a
@@ -71,10 +72,14 @@ public:
unsigned MergeAllConstants : 1; /// Merge identical constants.
unsigned NoCommon : 1; /// Set when -fno-common or C++ is enabled.
unsigned NoDwarf2CFIAsm : 1; /// Set when -fno-dwarf2-cfi-asm is enabled.
+ unsigned NoDwarfDirectoryAsm : 1; /// Set when -fno-dwarf-directory-asm is
+ /// enabled.
unsigned NoExecStack : 1; /// Set when -Wa,--noexecstack is enabled.
unsigned NoGlobalMerge : 1; /// Set when -mno-global-merge is enabled.
unsigned NoImplicitFloat : 1; /// Set when -mno-implicit-float is enabled.
unsigned NoInfsFPMath : 1; /// Assume FP arguments, results not +-Inf.
+ unsigned NoInline : 1; /// Set when -fno-inline is enabled. Disables
+ /// use of the inline keyword.
unsigned NoNaNsFPMath : 1; /// Assume FP arguments, results not NaN.
unsigned NoZeroInitializedInBSS : 1; /// -fno-zero-initialized-in-bss
unsigned ObjCDispatchMethod : 2; /// Method of Objective-C dispatch to use.
@@ -89,6 +94,7 @@ public:
unsigned SaveTempLabels : 1; /// Save temporary labels.
unsigned SimplifyLibCalls : 1; /// Set when -fbuiltin is enabled.
unsigned SoftFloat : 1; /// -soft-float.
+ unsigned StrictEnums : 1; /// Optimize based on strict enum definition.
unsigned TimePasses : 1; /// Set when -ftime-report is enabled.
unsigned UnitAtATime : 1; /// Unused. For mirroring GCC optimization
/// selection.
@@ -103,6 +109,11 @@ public:
unsigned VerifyModule : 1; /// Control whether the module should be run
/// through the LLVM Verifier.
+ unsigned StackRealignment : 1; /// Control whether to permit stack
+ /// realignment.
+ unsigned StackAlignment; /// Overrides default stack alignment,
+ /// if not 0.
+
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -113,6 +124,9 @@ public:
/// Enable additional debugging information.
std::string DebugPass;
+ /// The string to embed in debug information as the current working directory.
+ std::string DebugCompilationDir;
+
/// The string to embed in the debug information for the compile unit, if
/// non-empty.
std::string DwarfDebugFlags;
@@ -123,6 +137,9 @@ public:
/// The float precision limit to use, if non-empty.
std::string LimitFloatPrecision;
+ /// The name of the bitcode file to link before optzns.
+ std::string LinkBitcodeFile;
+
/// The kind of inlining to perform.
InliningMethod Inlining;
@@ -134,6 +151,10 @@ public:
/// The name of the relocation model to use.
std::string RelocationModel;
+ /// If not an empty string, trap intrinsics are lowered to calls to this
+ /// function instead of to trap instructions.
+ std::string TrapFuncName;
+
/// A list of command-line options to forward to the LLVM backend.
std::vector<std::string> BackendOptions;
@@ -153,6 +174,7 @@ public:
DisableFPElim = 0;
DisableLLVMOpts = 0;
DisableRedZone = 0;
+ DisableTailCalls = 0;
EmitDeclMetadata = 0;
EmitGcovArcs = 0;
EmitGcovNotes = 0;
@@ -168,6 +190,7 @@ public:
NoDwarf2CFIAsm = 0;
NoImplicitFloat = 0;
NoInfsFPMath = 0;
+ NoInline = 0;
NoNaNsFPMath = 0;
NoZeroInitializedInBSS = 0;
NumRegisterParameters = 0;
@@ -183,6 +206,7 @@ public:
SaveTempLabels = 0;
SimplifyLibCalls = 1;
SoftFloat = 0;
+ StrictEnums = 0;
TimePasses = 0;
UnitAtATime = 1;
UnrollLoops = 0;
@@ -190,6 +214,8 @@ public:
UnwindTables = 0;
UseRegisterSizedBitfieldAccess = 0;
VerifyModule = 1;
+ StackRealignment = 0;
+ StackAlignment = 0;
Inlining = NoInlining;
RelocationModel = "pic";
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
index 8817740..1bb7695 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -11,13 +11,17 @@
#define LLVM_CLANG_FRONTEND_COMPILERINSTANCE_H_
#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Lex/ModuleLoader.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/OwningPtr.h"
#include <cassert>
#include <list>
#include <string>
+#include <utility>
namespace llvm {
class raw_fd_ostream;
@@ -32,8 +36,10 @@ class CodeCompleteConsumer;
class DiagnosticsEngine;
class DiagnosticConsumer;
class ExternalASTSource;
+class FileEntry;
class FileManager;
class FrontendAction;
+class Module;
class Preprocessor;
class Sema;
class SourceManager;
@@ -59,41 +65,53 @@ class TargetInfo;
/// and a long form that takes explicit instances of any required objects.
class CompilerInstance : public ModuleLoader {
/// The options used in this compiler instance.
- llvm::IntrusiveRefCntPtr<CompilerInvocation> Invocation;
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation;
/// The diagnostics engine instance.
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
/// The target being compiled for.
- llvm::IntrusiveRefCntPtr<TargetInfo> Target;
+ IntrusiveRefCntPtr<TargetInfo> Target;
/// The file manager.
- llvm::IntrusiveRefCntPtr<FileManager> FileMgr;
+ IntrusiveRefCntPtr<FileManager> FileMgr;
/// The source manager.
- llvm::IntrusiveRefCntPtr<SourceManager> SourceMgr;
+ IntrusiveRefCntPtr<SourceManager> SourceMgr;
/// The preprocessor.
- llvm::IntrusiveRefCntPtr<Preprocessor> PP;
+ IntrusiveRefCntPtr<Preprocessor> PP;
/// The AST context.
- llvm::IntrusiveRefCntPtr<ASTContext> Context;
+ IntrusiveRefCntPtr<ASTContext> Context;
/// The AST consumer.
- llvm::OwningPtr<ASTConsumer> Consumer;
+ OwningPtr<ASTConsumer> Consumer;
/// The code completion consumer.
- llvm::OwningPtr<CodeCompleteConsumer> CompletionConsumer;
+ OwningPtr<CodeCompleteConsumer> CompletionConsumer;
/// \brief The semantic analysis object.
- llvm::OwningPtr<Sema> TheSema;
+ OwningPtr<Sema> TheSema;
/// \brief The frontend timer
- llvm::OwningPtr<llvm::Timer> FrontendTimer;
+ OwningPtr<llvm::Timer> FrontendTimer;
/// \brief Non-owning reference to the ASTReader, if one exists.
ASTReader *ModuleManager;
+ /// \brief The set of top-level modules that has already been loaded,
+ /// along with the module map
+ llvm::DenseMap<const IdentifierInfo *, Module *> KnownModules;
+
+ /// \brief The location of the module-import keyword for the last module
+ /// import.
+ SourceLocation LastModuleImportLoc;
+
+ /// \brief The result of the last module import.
+ ///
+ Module *LastModuleImportResult;
+
/// \brief Holds information about the output file.
///
/// If TempFilename is not empty we must rename it to Filename at the end.
@@ -218,10 +236,10 @@ public:
}
LangOptions &getLangOpts() {
- return Invocation->getLangOpts();
+ return *Invocation->getLangOpts();
}
const LangOptions &getLangOpts() const {
- return Invocation->getLangOpts();
+ return *Invocation->getLangOpts();
}
PreprocessorOptions &getPreprocessorOpts() {
@@ -491,7 +509,7 @@ public:
/// used by some diagnostics printers (for logging purposes only).
///
/// \return The new object on success, or null on failure.
- static llvm::IntrusiveRefCntPtr<DiagnosticsEngine>
+ static IntrusiveRefCntPtr<DiagnosticsEngine>
createDiagnostics(const DiagnosticOptions &Opts, int Argc,
const char* const *Argv,
DiagnosticConsumer *Client = 0,
@@ -517,6 +535,7 @@ public:
void createPCHExternalASTSource(StringRef Path,
bool DisablePCHValidation,
bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
void *DeserializationListener);
/// Create an external AST source to read a PCH file.
@@ -526,6 +545,7 @@ public:
createPCHExternalASTSource(StringRef Path, const std::string &Sysroot,
bool DisablePCHValidation,
bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
Preprocessor &PP, ASTContext &Context,
void *DeserializationListener, bool Preamble);
@@ -554,6 +574,10 @@ public:
/// Create the default output file (from the invocation's options) and add it
/// to the list of tracked output files.
///
+ /// The files created by this function always use temporary files to write to
+ /// their result (that is, the data is written to a temporary file which will
+ /// atomically replace the target output on success).
+ ///
/// \return - Null on error.
llvm::raw_fd_ostream *
createDefaultOutputFile(bool Binary = true, StringRef BaseInput = "",
@@ -568,7 +592,8 @@ public:
bool Binary = true, bool RemoveFileOnSignal = true,
StringRef BaseInput = "",
StringRef Extension = "",
- bool UseTemporary = false);
+ bool UseTemporary = false,
+ bool CreateMissingDirectories = false);
/// Create a new output file, optionally deriving the output path name.
///
@@ -588,7 +613,9 @@ public:
/// llvm::sys::RemoveFileOnSignal. Note that this is not safe for
/// multithreaded use, as the underlying signal mechanism is not reentrant
/// \param UseTemporary - Create a new temporary file that must be renamed to
- /// OutputPath in the end
+ /// OutputPath in the end.
+ /// \param CreateMissingDirectories - When \arg UseTemporary is true, create
+ /// missing directories in the output path.
/// \param ResultPathName [out] - If given, the result path name will be
/// stored here on success.
/// \param TempPathName [out] - If given, the temporary file path name
@@ -599,6 +626,7 @@ public:
StringRef BaseInput = "",
StringRef Extension = "",
bool UseTemporary = false,
+ bool CreateMissingDirectories = false,
std::string *ResultPathName = 0,
std::string *TempPathName = 0);
@@ -610,23 +638,25 @@ public:
/// as the main file.
///
/// \return True on success.
- bool InitializeSourceManager(StringRef InputFile);
+ bool InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User);
/// InitializeSourceManager - Initialize the source manager to set InputFile
/// as the main file.
///
/// \return True on success.
static bool InitializeSourceManager(StringRef InputFile,
- DiagnosticsEngine &Diags,
- FileManager &FileMgr,
- SourceManager &SourceMgr,
- const FrontendOptions &Opts);
+ SrcMgr::CharacteristicKind Kind,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr,
+ const FrontendOptions &Opts);
/// }
- virtual ModuleKey loadModule(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc);
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index 47c7031..0d2260a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -14,6 +14,7 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Frontend/MigratorOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/DiagnosticOptions.h"
@@ -30,18 +31,45 @@
namespace clang {
+class CompilerInvocation;
class DiagnosticsEngine;
+namespace driver {
+class ArgList;
+}
+
+/// CompilerInvocation - Fill out Opts based on the options given in Args.
+/// Args must have been created from the OptTable returned by
+/// createCC1OptTable(). When errors are encountered, return false and,
+/// if Diags is non-null, report the error(s).
+bool ParseDiagnosticArgs(DiagnosticOptions &Opts, driver::ArgList &Args,
+ DiagnosticsEngine *Diags = 0);
+
+class CompilerInvocationBase : public RefCountedBase<CompilerInvocation> {
+protected:
+ /// Options controlling the language variant.
+ IntrusiveRefCntPtr<LangOptions> LangOpts;
+public:
+ CompilerInvocationBase();
+
+ CompilerInvocationBase(const CompilerInvocationBase &X);
+
+ LangOptions *getLangOpts() { return LangOpts.getPtr(); }
+ const LangOptions *getLangOpts() const { return LangOpts.getPtr(); }
+};
+
/// CompilerInvocation - Helper class for holding the data necessary to invoke
/// the compiler.
///
/// This class is designed to represent an abstract "invocation" of the
/// compiler, including data such as the include paths, the code generation
/// options, the warning flags, and so on.
-class CompilerInvocation : public llvm::RefCountedBase<CompilerInvocation> {
+class CompilerInvocation : public CompilerInvocationBase {
/// Options controlling the static analyzer.
AnalyzerOptions AnalyzerOpts;
+ MigratorOptions MigratorOpts;
+
/// Options controlling IRgen and the backend.
CodeGenOptions CodeGenOpts;
@@ -60,9 +88,6 @@ class CompilerInvocation : public llvm::RefCountedBase<CompilerInvocation> {
/// Options controlling the #include directive.
HeaderSearchOptions HeaderSearchOpts;
- /// Options controlling the language variant.
- LangOptions LangOpts;
-
/// Options controlling the preprocessor (aside from #include handling).
PreprocessorOptions PreprocessorOpts;
@@ -79,13 +104,13 @@ public:
/// @{
/// CreateFromArgs - Create a compiler invocation from a list of input
- /// options.
+ /// options. Returns true on success.
///
/// \param Res [out] - The resulting invocation.
/// \param ArgBegin - The first element in the argument vector.
/// \param ArgEnd - The last element in the argument vector.
/// \param Diags - The diagnostic engine to use for errors.
- static void CreateFromArgs(CompilerInvocation &Res,
+ static bool CreateFromArgs(CompilerInvocation &Res,
const char* const *ArgBegin,
const char* const *ArgEnd,
DiagnosticsEngine &Diags);
@@ -111,7 +136,7 @@ public:
/// \param LangStd - The input language standard.
void setLangDefaults(InputKind IK,
LangStandard::Kind LangStd = LangStandard::lang_unspecified) {
- setLangDefaults(LangOpts, IK, LangStd);
+ setLangDefaults(*getLangOpts(), IK, LangStd);
}
/// setLangDefaults - Set language defaults for the given input language and
@@ -136,6 +161,11 @@ public:
return AnalyzerOpts;
}
+ MigratorOptions &getMigratorOpts() { return MigratorOpts; }
+ const MigratorOptions &getMigratorOpts() const {
+ return MigratorOpts;
+ }
+
CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }
const CodeGenOptions &getCodeGenOpts() const {
return CodeGenOpts;
@@ -166,9 +196,6 @@ public:
return FrontendOpts;
}
- LangOptions &getLangOpts() { return LangOpts; }
- const LangOptions &getLangOpts() const { return LangOpts; }
-
PreprocessorOptions &getPreprocessorOpts() { return PreprocessorOpts; }
const PreprocessorOptions &getPreprocessorOpts() const {
return PreprocessorOpts;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
index 1e22c22..83976c3 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -25,7 +25,7 @@ public:
/// dependency, which can avoid some 'make'
/// problems.
unsigned AddMissingHeaderDeps : 1; ///< Add missing headers to dependency list
-
+
/// The file to write dependency output to.
std::string OutputFile;
@@ -39,6 +39,9 @@ public:
/// must contain at least one entry.
std::vector<std::string> Targets;
+ /// \brief The file to write GraphViz-formatted header dependencies to.
+ std::string DOTOutputFile;
+
public:
DependencyOutputOptions() {
IncludeSystemHeaders = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
index 319abeb..1c6ba6a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
@@ -31,7 +31,6 @@ public:
unsigned ShowFixits : 1; /// Show fixit information.
unsigned ShowSourceRanges : 1; /// Show source ranges in numeric form.
unsigned ShowParseableFixits : 1; /// Show machine parseable fix-its.
- unsigned ShowNames : 1; /// Show the diagnostic name
unsigned ShowOptionNames : 1; /// Show the option name for mappable
/// diagnostics.
unsigned ShowNoteIncludeStack : 1; /// Show include stacks for notes.
@@ -51,12 +50,14 @@ public:
unsigned ErrorLimit; /// Limit # errors emitted.
unsigned MacroBacktraceLimit; /// Limit depth of macro expansion backtrace.
unsigned TemplateBacktraceLimit; /// Limit depth of instantiation backtrace.
+ unsigned ConstexprBacktraceLimit; /// Limit depth of constexpr backtrace.
/// The distance between tab stops.
unsigned TabStop;
enum { DefaultTabStop = 8, MaxTabStop = 100,
DefaultMacroBacktraceLimit = 6,
- DefaultTemplateBacktraceLimit = 10 };
+ DefaultTemplateBacktraceLimit = 10,
+ DefaultConstexprBacktraceLimit = 10 };
/// Column limit for formatting message diagnostics, or 0 if unused.
unsigned MessageLength;
@@ -67,6 +68,9 @@ public:
/// The file to log diagnostic output to.
std::string DiagnosticLogFile;
+
+ /// The file to serialize diagnostics to (non-appending).
+ std::string DiagnosticSerializationFile;
/// The list of -W... options used to alter the diagnostic mappings, with the
/// prefixes removed.
@@ -86,7 +90,6 @@ public:
ShowColumn = 1;
ShowFixits = 1;
ShowLocation = 1;
- ShowNames = 0;
ShowOptionNames = 0;
ShowCategories = 0;
Format = Clang;
@@ -96,6 +99,7 @@ public:
ErrorLimit = 0;
TemplateBacktraceLimit = DefaultTemplateBacktraceLimit;
MacroBacktraceLimit = DefaultMacroBacktraceLimit;
+ ConstexprBacktraceLimit = DefaultConstexprBacktraceLimit;
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
new file mode 100644
index 0000000..5ad88a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
@@ -0,0 +1,149 @@
+//===--- DiagnosticRenderer.h - Diagnostic Pretty-Printing ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class that provides support for pretty-printing of
+// diagnostics. It is used to implement the different code paths which require
+// such functionality in a consistent way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_DIAGNOSTIC_RENDERER_H_
+#define LLVM_CLANG_FRONTEND_DIAGNOSTIC_RENDERER_H_
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/PointerUnion.h"
+
+namespace clang {
+
+class DiagnosticOptions;
+class LangOptions;
+class SourceManager;
+
+typedef llvm::PointerUnion<const Diagnostic *,
+ const StoredDiagnostic *> DiagOrStoredDiag;
+
+/// \brief Class to encapsulate the logic for formatting a diagnostic message.
+/// Actual "printing" logic is implemented by subclasses.
+///
+/// This class provides an interface for building and emitting
+/// diagnostic, including all of the macro backtraces, caret diagnostics, FixIt
+/// Hints, and code snippets. In the presence of macros this involves
+/// a recursive process, synthesizing notes for each macro expansion.
+///
+/// A brief worklist:
+/// FIXME: Sink the recursive printing of template instantiations into this
+/// class.
+class DiagnosticRenderer {
+protected:
+ const SourceManager &SM;
+ const LangOptions &LangOpts;
+ const DiagnosticOptions &DiagOpts;
+
+ /// \brief The location of the previous diagnostic if known.
+ ///
+ /// This will be invalid in cases where there is no (known) previous
+ /// diagnostic location, or that location itself is invalid or comes from
+ /// a different source manager than SM.
+ SourceLocation LastLoc;
+
+ /// \brief The location of the last include whose stack was printed if known.
+ ///
+ /// Same restriction as \see LastLoc essentially, but tracking include stack
+ /// root locations rather than diagnostic locations.
+ SourceLocation LastIncludeLoc;
+
+ /// \brief The level of the last diagnostic emitted.
+ ///
+ /// The level of the last diagnostic emitted. Used to detect level changes
+ /// which change the amount of information displayed.
+ DiagnosticsEngine::Level LastLevel;
+
+ DiagnosticRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts);
+
+ virtual ~DiagnosticRenderer();
+
+ virtual void emitDiagnosticMessage(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag Info) = 0;
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) = 0;
+
+ virtual void emitBasicNote(StringRef Message) = 0;
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) = 0;
+
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc) = 0;
+
+ virtual void beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {}
+ virtual void endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {}
+
+
+private:
+ void emitIncludeStack(SourceLocation Loc, DiagnosticsEngine::Level Level);
+ void emitIncludeStackRecursively(SourceLocation Loc);
+ void emitMacroExpansionsAndCarets(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ unsigned &MacroDepth,
+ unsigned OnMacroInst = 0);
+public:
+ /// \brief Emit a diagnostic.
+ ///
+ /// This is the primary entry point for emitting diagnostic messages.
+ /// It handles formatting and rendering the message as well as any ancillary
+ /// information needed based on macros whose expansions impact the
+ /// diagnostic.
+ ///
+ /// \param Loc The location for this caret.
+ /// \param Level The level of the diagnostic to be emitted.
+ /// \param Message The diagnostic message to emit.
+ /// \param Ranges The underlined ranges for this code snippet.
+ /// \param FixItHints The FixIt hints active for this diagnostic.
+ void emitDiagnostic(SourceLocation Loc, DiagnosticsEngine::Level Level,
+ StringRef Message, ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixItHints,
+ DiagOrStoredDiag D = (Diagnostic *)0);
+
+ void emitStoredDiagnostic(StoredDiagnostic &Diag);
+};
+
+/// Subclass of DiagnosticRender that turns all subdiagostics into explicit
+/// notes. It is up to subclasses to further define the behavior.
+class DiagnosticNoteRenderer : public DiagnosticRenderer {
+public:
+ DiagnosticNoteRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticRenderer(SM, LangOpts, DiagOpts) {}
+
+ virtual ~DiagnosticNoteRenderer();
+
+ virtual void emitBasicNote(StringRef Message);
+
+ virtual void emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc);
+
+ virtual void emitNote(SourceLocation Loc, StringRef Message) = 0;
+};
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index f85cc7e..6839028 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -12,6 +12,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/FrontendOptions.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/OwningPtr.h"
#include <string>
@@ -23,30 +24,11 @@ class ASTMergeAction;
class ASTUnit;
class CompilerInstance;
-enum InputKind {
- IK_None,
- IK_Asm,
- IK_C,
- IK_CXX,
- IK_ObjC,
- IK_ObjCXX,
- IK_PreprocessedC,
- IK_PreprocessedCXX,
- IK_PreprocessedObjC,
- IK_PreprocessedObjCXX,
- IK_OpenCL,
- IK_CUDA,
- IK_AST,
- IK_LLVM_IR
-};
-
-
/// FrontendAction - Abstract base class for actions which can be performed by
/// the frontend.
class FrontendAction {
- std::string CurrentFile;
- InputKind CurrentFileKind;
- llvm::OwningPtr<ASTUnit> CurrentASTUnit;
+ FrontendInputFile CurrentInput;
+ OwningPtr<ASTUnit> CurrentASTUnit;
CompilerInstance *Instance;
friend class ASTMergeAction;
friend class WrapperFrontendAction;
@@ -103,7 +85,7 @@ protected:
/// EndSourceFileAction - Callback at the end of processing a single input;
/// this is guaranteed to only be called following a successful call to
- /// BeginSourceFileAction (and BeingSourceFile).
+ /// BeginSourceFileAction (and BeginSourceFile).
virtual void EndSourceFileAction() {}
/// @}
@@ -127,18 +109,22 @@ public:
/// @{
bool isCurrentFileAST() const {
- assert(!CurrentFile.empty() && "No current file!");
+ assert(!CurrentInput.File.empty() && "No current file!");
return CurrentASTUnit != 0;
}
+ const FrontendInputFile &getCurrentInput() const {
+ return CurrentInput;
+ }
+
const std::string &getCurrentFile() const {
- assert(!CurrentFile.empty() && "No current file!");
- return CurrentFile;
+ assert(!CurrentInput.File.empty() && "No current file!");
+ return CurrentInput.File;
}
InputKind getCurrentFileKind() const {
- assert(!CurrentFile.empty() && "No current file!");
- return CurrentFileKind;
+ assert(!CurrentInput.File.empty() && "No current file!");
+ return CurrentInput.Kind;
}
ASTUnit &getCurrentASTUnit() const {
@@ -150,7 +136,7 @@ public:
return CurrentASTUnit.take();
}
- void setCurrentFile(StringRef Value, InputKind Kind, ASTUnit *AST = 0);
+ void setCurrentInput(const FrontendInputFile &CurrentInput, ASTUnit *AST = 0);
/// @}
/// @name Supported Modes
@@ -189,10 +175,7 @@ public:
/// action may store and use this object up until the matching EndSourceFile
/// action.
///
- /// \param Filename - The input filename, which will be made available to
- /// clients via \see getCurrentFile().
- ///
- /// \param InputKind - The type of input. Some input kinds are handled
+ /// \param Input - The input filename and kind. Some input kinds are handled
/// specially, for example AST inputs, since the AST file itself contains
/// several objects which would normally be owned by the
/// CompilerInstance. When processing AST input files, these objects should
@@ -200,10 +183,9 @@ public:
/// automatically be shared with the AST file in between \see
/// BeginSourceFile() and \see EndSourceFile().
///
- /// \return True on success; the compilation of this file should be aborted
- /// and neither Execute nor EndSourceFile should be called.
- bool BeginSourceFile(CompilerInstance &CI, StringRef Filename,
- InputKind Kind);
+ /// \return True on success; on failure the compilation of this file should
+ /// be aborted and neither Execute nor EndSourceFile should be called.
+ bool BeginSourceFile(CompilerInstance &CI, const FrontendInputFile &Input);
/// Execute - Set the source managers main input file, and run the action.
void Execute();
@@ -231,6 +213,7 @@ public:
};
class PluginASTAction : public ASTFrontendAction {
+ virtual void anchor();
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) = 0;
@@ -265,7 +248,7 @@ public:
/// implements every virtual method in the FrontendAction interface by
/// forwarding to the wrapped action.
class WrapperFrontendAction : public FrontendAction {
- llvm::OwningPtr<FrontendAction> WrappedAction;
+ OwningPtr<FrontendAction> WrappedAction;
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
index 72a3d90..8817c5a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
@@ -16,6 +16,8 @@
namespace clang {
+class Module;
+
//===----------------------------------------------------------------------===//
// Custom Consumer Actions
//===----------------------------------------------------------------------===//
@@ -67,22 +69,17 @@ protected:
};
class GeneratePCHAction : public ASTFrontendAction {
- bool MakeModule;
-
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
StringRef InFile);
- virtual TranslationUnitKind getTranslationUnitKind() {
- return MakeModule? TU_Module : TU_Prefix;
+ virtual TranslationUnitKind getTranslationUnitKind() {
+ return TU_Prefix;
}
virtual bool hasASTFileSupport() const { return false; }
public:
- /// \brief Create a new action
- explicit GeneratePCHAction(bool MakeModule) : MakeModule(MakeModule) { }
-
/// \brief Compute the AST consumer arguments that will be used to
/// create the PCHGenerator instance returned by CreateASTConsumer.
///
@@ -94,6 +91,33 @@ public:
raw_ostream *&OS);
};
+class GenerateModuleAction : public ASTFrontendAction {
+ clang::Module *Module;
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual TranslationUnitKind getTranslationUnitKind() {
+ return TU_Module;
+ }
+
+ virtual bool hasASTFileSupport() const { return false; }
+
+public:
+ virtual bool BeginSourceFileAction(CompilerInstance &CI, StringRef Filename);
+
+ /// \brief Compute the AST consumer arguments that will be used to
+ /// create the PCHGenerator instance returned by CreateASTConsumer.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ static bool ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS);
+};
+
class SyntaxOnlyAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
@@ -129,8 +153,7 @@ protected:
virtual void EndSourceFileAction();
public:
- ASTMergeAction(FrontendAction *AdaptedAction,
- std::string *ASTFiles, unsigned NumASTFiles);
+ ASTMergeAction(FrontendAction *AdaptedAction, ArrayRef<std::string> ASTFiles);
virtual ~ASTMergeAction();
virtual bool usesPreprocessorOnly() const;
@@ -150,6 +173,15 @@ protected:
virtual bool usesPreprocessorOnly() const { return true; }
};
+class PubnamesDumpAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+public:
+ virtual bool hasCodeCompletionSupport() const { return false; }
+};
+
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
index 21cd2c6..0b05b74 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define FRONTENDSTART
#include "clang/Basic/DiagnosticFrontendKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
index fa6d044..a051d7f 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -11,7 +11,6 @@
#define LLVM_CLANG_FRONTEND_FRONTENDOPTIONS_H
#include "clang/Frontend/CommandLineSourceLoc.h"
-#include "clang/Frontend/FrontendAction.h"
#include "llvm/ADT/StringRef.h"
#include <string>
#include <vector>
@@ -43,14 +42,50 @@ namespace frontend {
PrintDeclContext, ///< Print DeclContext and their Decls.
PrintPreamble, ///< Print the "preamble" of the input file
PrintPreprocessedInput, ///< -E mode.
+ PubnamesDump, ///< Print all of the "public" names in the source.
RewriteMacros, ///< Expand macros but not #includes.
RewriteObjC, ///< ObjC->C Rewriter.
RewriteTest, ///< Rewriter playground
RunAnalysis, ///< Run one or more source code analyses.
+ MigrateSource, ///< Run migrator.
RunPreprocessorOnly ///< Just lex, no output.
};
}
+enum InputKind {
+ IK_None,
+ IK_Asm,
+ IK_C,
+ IK_CXX,
+ IK_ObjC,
+ IK_ObjCXX,
+ IK_PreprocessedC,
+ IK_PreprocessedCXX,
+ IK_PreprocessedObjC,
+ IK_PreprocessedObjCXX,
+ IK_OpenCL,
+ IK_CUDA,
+ IK_AST,
+ IK_LLVM_IR
+};
+
+
+/// \brief An input file for the front end.
+struct FrontendInputFile {
+ /// \brief The file name, or "-" to read from standard input.
+ std::string File;
+
+ /// \brief The kind of input, e.g., C source, AST file, LLVM IR.
+ InputKind Kind;
+
+ /// \brief Whether we're dealing with a 'system' input (vs. a 'user' input).
+ bool IsSystem;
+
+ FrontendInputFile() : Kind(IK_None) { }
+ FrontendInputFile(StringRef File, InputKind Kind, bool IsSystem = false)
+ : File(File.str()), Kind(Kind), IsSystem(IsSystem) { }
+};
+
/// FrontendOptions - Options for controlling the behavior of the frontend.
class FrontendOptions {
public:
@@ -72,8 +107,15 @@ public:
unsigned ShowVersion : 1; ///< Show the -version text.
unsigned FixWhatYouCan : 1; ///< Apply fixes even if there are
/// unfixable errors.
+ unsigned FixOnlyWarnings : 1; ///< Apply fixes only for warnings.
+ unsigned FixAndRecompile : 1; ///< Apply fixes and recompile.
+ unsigned FixToTemporaries : 1; ///< Apply fixes to temporary files.
unsigned ARCMTMigrateEmitARCErrors : 1; /// Emit ARC errors even if the
/// migrator can fix them
+ unsigned SkipFunctionBodies : 1; ///< Skip over function bodies to
+ /// speed up parsing in cases you do
+ /// not need them (e.g. with code
+ /// completion).
enum {
ARCMT_None,
@@ -82,11 +124,20 @@ public:
ARCMT_Migrate
} ARCMTAction;
- std::string ARCMTMigrateDir;
+ enum {
+ ObjCMT_None = 0,
+ /// \brief Enable migration to modern ObjC literals.
+ ObjCMT_Literals = 0x1,
+ /// \brief Enable migration to modern ObjC subscripting.
+ ObjCMT_Subscripting = 0x2
+ };
+ unsigned ObjCMTAction;
+
+ std::string MTMigrateDir;
std::string ARCMTMigrateReportOut;
/// The input files and their types.
- std::vector<std::pair<InputKind, std::string> > Inputs;
+ std::vector<FrontendInputFile> Inputs;
/// The output file, if any.
std::string OutputFile;
@@ -122,6 +173,10 @@ public:
/// should only be used for debugging and experimental features.
std::vector<std::string> LLVMArgs;
+ /// \brief File name of the file that will provide record layouts
+ /// (in the format produced by -fdump-record-layouts).
+ std::string OverrideRecordLayoutsFile;
+
public:
FrontendOptions() {
DisableFree = 0;
@@ -137,6 +192,8 @@ public:
ShowVersion = 0;
ARCMTAction = ARCMT_None;
ARCMTMigrateEmitARCErrors = 0;
+ SkipFunctionBodies = 0;
+ ObjCMTAction = ObjCMT_None;
}
/// getInputKindForExtension - Return the appropriate input kind for a file
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
index de2800c..e6f4403 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
@@ -21,7 +21,7 @@ enum LangFeatures {
BCPLComment = (1 << 0),
C89 = (1 << 1),
C99 = (1 << 2),
- C1X = (1 << 3),
+ C11 = (1 << 3),
CPlusPlus = (1 << 4),
CPlusPlus0x = (1 << 5),
Digraphs = (1 << 6),
@@ -62,8 +62,8 @@ public:
/// isC99 - Language is a superset of C99.
bool isC99() const { return Flags & frontend::C99; }
- /// isC1X - Language is a superset of C1X.
- bool isC1X() const { return Flags & frontend::C1X; }
+ /// isC11 - Language is a superset of C11.
+ bool isC11() const { return Flags & frontend::C11; }
/// isCPlusPlus - Language is a C++ variant.
bool isCPlusPlus() const { return Flags & frontend::CPlusPlus; }
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
index c82290b..4bcff4a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
@@ -62,17 +62,26 @@ LANGSTANDARD(gnu9x, "gnu9x",
"ISO C 1999 with GNU extensions",
BCPLComment | C99 | Digraphs | GNUMode | HexFloat)
-// C1X modes
+// C11 modes
+LANGSTANDARD(c11, "c11",
+ "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(c1x, "c1x",
- "ISO C 201X",
- BCPLComment | C99 | C1X | Digraphs | HexFloat)
+ "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+LANGSTANDARD(iso9899_2011,
+ "iso9899:2011", "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
LANGSTANDARD(iso9899_201x,
- "iso9899:201x", "ISO C 201X",
- BCPLComment | C99 | C1X | Digraphs | HexFloat)
+ "iso9899:2011", "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+LANGSTANDARD(gnu11, "gnu11",
+ "ISO C 2011 with GNU extensions",
+ BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
LANGSTANDARD(gnu1x, "gnu1x",
- "ISO C 201X with GNU extensions",
- BCPLComment | C99 | C1X | Digraphs | GNUMode | HexFloat)
+ "ISO C 2011 with GNU extensions",
+ BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
// C++ modes
LANGSTANDARD(cxx98, "c++98",
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h b/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h
new file mode 100644
index 0000000..225efe6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h
@@ -0,0 +1,61 @@
+//===--- LayoutOverrideSource.h --Override Record Layouts -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_LAYOUTOVERRIDESOURCE_H
+#define LLVM_CLANG_FRONTEND_LAYOUTOVERRIDESOURCE_H
+
+#include "clang/AST/ExternalASTSource.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+ /// \brief An external AST source that overrides the layout of
+ /// a specified set of record types.
+ ///
+ /// This class is used only for testing the ability of external AST sources
+ /// to override the layout of record types. Its input is the output format
+ /// of the command-line argument -fdump-record-layouts.
+ class LayoutOverrideSource : public ExternalASTSource {
+ /// \brief The layout of a given record.
+ struct Layout {
+ /// \brief The size of the record.
+ uint64_t Size;
+
+ /// \brief The alignment of the record.
+ uint64_t Align;
+
+ /// \brief The offsets of the fields, in source order.
+ llvm::SmallVector<uint64_t, 8> FieldOffsets;
+ };
+
+ /// \brief The set of layouts that will be overridden.
+ llvm::StringMap<Layout> Layouts;
+
+ public:
+ /// \brief Create a new AST source that overrides the layout of some
+ /// set of record types.
+ ///
+ /// The file is the result of passing -fdump-record-layouts to a file.
+ explicit LayoutOverrideSource(llvm::StringRef Filename);
+
+ /// \brief If this particular record type has an overridden layout,
+ /// return that layout.
+ virtual bool
+ layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets);
+
+ /// \brief Dump the overridden layouts.
+ void dump();
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h
new file mode 100644
index 0000000..f9554e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h
@@ -0,0 +1,31 @@
+//===--- MigratorOptions.h - MigratorOptions Options ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the structures necessary for a front-end to specify
+// various migration analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_MIGRATOROPTIONS
+#define LLVM_CLANG_FRONTEND_MIGRATOROPTIONS
+
+namespace clang {
+
+class MigratorOptions {
+public:
+ unsigned NoNSAllocReallocError : 1;
+ unsigned NoFinalizeRemoval : 1;
+ MigratorOptions() {
+ NoNSAllocReallocError = 0;
+ NoFinalizeRemoval = 0;
+ }
+};
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
index 4242f01..ffa7b4a 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -15,7 +15,9 @@
#ifndef CLANG_FRONTEND_MULTIPLEXCONSUMER_H
#define CLANG_FRONTEND_MULTIPLEXCONSUMER_H
+#include "clang/Basic/LLVM.h"
#include "clang/Sema/SemaConsumer.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/OwningPtr.h"
#include <vector>
@@ -28,15 +30,18 @@ class MultiplexASTDeserializationListener;
class MultiplexConsumer : public SemaConsumer {
public:
// Takes ownership of the pointers in C.
- MultiplexConsumer(const std::vector<ASTConsumer*>& C);
+ MultiplexConsumer(ArrayRef<ASTConsumer*> C);
~MultiplexConsumer();
// ASTConsumer
virtual void Initialize(ASTContext &Context);
- virtual void HandleTopLevelDecl(DeclGroupRef D);
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
virtual void HandleInterestingDecl(DeclGroupRef D);
virtual void HandleTranslationUnit(ASTContext &Ctx);
virtual void HandleTagDeclDefinition(TagDecl *D);
+ virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D);
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
virtual void CompleteTentativeDefinition(VarDecl *D);
virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired);
virtual ASTMutationListener *GetASTMutationListener();
@@ -50,8 +55,8 @@ public:
static bool classof(const MultiplexConsumer *) { return true; }
private:
std::vector<ASTConsumer*> Consumers; // Owns these.
- llvm::OwningPtr<MultiplexASTMutationListener> MutationListener;
- llvm::OwningPtr<MultiplexASTDeserializationListener> DeserializationListener;
+ OwningPtr<MultiplexASTMutationListener> MutationListener;
+ OwningPtr<MultiplexASTDeserializationListener> DeserializationListener;
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
index 0ee8cb3..d86a923 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
@@ -10,6 +10,7 @@
#ifndef LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
#define LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -49,14 +50,10 @@ public:
unsigned DetailedRecord : 1; /// Whether we should maintain a detailed
/// record of all macro definitions and
/// expansions.
-
- /// \brief Whether we should automatically translate #include or #import
- /// operations into module imports when possible.
- unsigned AutoModuleImport : 1;
-
- /// \brief Whether the detailed preprocessing record includes nested macro
- /// expansions.
- unsigned DetailedRecordIncludesNestedMacroExpansions : 1;
+ unsigned DetailedRecordConditionalDirectives : 1; /// Whether in the
+ /// preprocessing record we should also keep
+ /// track of locations of conditional directives
+ /// in non-system files.
/// The implicit PCH included at the start of the translation unit, or empty.
std::string ImplicitPCHInclude;
@@ -72,6 +69,9 @@ public:
/// precompiled header or AST file.
bool DisableStatCache;
+ /// \brief When true, a PCH with compiler errors will not be rejected.
+ bool AllowPCHWithCompilerErrors;
+
/// \brief Dump declarations that are deserialized from PCH, for testing.
bool DumpDeserializedPCHDecls;
@@ -166,9 +166,9 @@ public:
public:
PreprocessorOptions() : UsePredefines(true), DetailedRecord(false),
- AutoModuleImport(false),
- DetailedRecordIncludesNestedMacroExpansions(true),
+ DetailedRecordConditionalDirectives(false),
DisablePCHValidation(false), DisableStatCache(false),
+ AllowPCHWithCompilerErrors(false),
DumpDeserializedPCHDecls(false),
PrecompiledPreambleBytes(0, true),
RemappedFilesKeepOriginalName(true),
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
new file mode 100644
index 0000000..aa0695f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
@@ -0,0 +1,62 @@
+//===--- SerializedDiagnosticPrinter.h - Serializer for diagnostics -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTIC_PRINTER_H_
+#define LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTIC_PRINTER_H_
+
+#include "llvm/Bitcode/BitstreamWriter.h"
+
+namespace llvm {
+class raw_ostream;
+}
+
+namespace clang {
+class DiagnosticConsumer;
+class DiagnosticsEngine;
+class DiagnosticOptions;
+
+namespace serialized_diags {
+
+enum BlockIDs {
+ /// \brief A top-level block which represents any meta data associated
+ /// with the diagostics, including versioning of the format.
+ BLOCK_META = llvm::bitc::FIRST_APPLICATION_BLOCKID,
+
+ /// \brief The this block acts as a container for all the information
+ /// for a specific diagnostic.
+ BLOCK_DIAG
+};
+
+enum RecordIDs {
+ RECORD_VERSION = 1,
+ RECORD_DIAG,
+ RECORD_SOURCE_RANGE,
+ RECORD_DIAG_FLAG,
+ RECORD_CATEGORY,
+ RECORD_FILENAME,
+ RECORD_FIXIT,
+ RECORD_FIRST = RECORD_VERSION,
+ RECORD_LAST = RECORD_FIXIT
+};
+
+/// \brief Returns a DiagnosticConsumer that serializes diagnostics to
+/// a bitcode file.
+///
+/// The created DiagnosticConsumer is designed for quick and lightweight
+/// transfer of of diagnostics to the enclosing build system (e.g., an IDE).
+/// This allows wrapper tools for Clang to get diagnostics from Clang
+/// (via libclang) without needing to parse Clang's command line output.
+///
+DiagnosticConsumer *create(llvm::raw_ostream *OS,
+ const DiagnosticOptions &diags);
+
+} // end serialized_diags namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
new file mode 100644
index 0000000..519d3b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
@@ -0,0 +1,120 @@
+//===--- TextDiagnostic.h - Text Diagnostic Pretty-Printing -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class that provides support for textual pretty-printing of
+// diagnostics. It is used to implement the different code paths which require
+// such functionality in a consistent way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_H_
+#define LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_H_
+
+#include "clang/Frontend/DiagnosticRenderer.h"
+
+namespace clang {
+
+/// \brief Class to encapsulate the logic for formatting and printing a textual
+/// diagnostic message.
+///
+/// This class provides an interface for building and emitting a textual
+/// diagnostic, including all of the macro backtraces, caret diagnostics, FixIt
+/// Hints, and code snippets. In the presence of macros this involves
+/// a recursive process, synthesizing notes for each macro expansion.
+///
+/// The purpose of this class is to isolate the implementation of printing
+/// beautiful text diagnostics from any particular interfaces. The Clang
+/// DiagnosticClient is implemented through this class as is diagnostic
+/// printing coming out of libclang.
+class TextDiagnostic : public DiagnosticRenderer {
+ raw_ostream &OS;
+
+public:
+ TextDiagnostic(raw_ostream &OS,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts);
+
+ virtual ~TextDiagnostic();
+
+ /// \brief Print the diagonstic level to a raw_ostream.
+ ///
+ /// This is a static helper that handles colorizing the level and formatting
+ /// it into an arbitrary output stream. This is used internally by the
+ /// TextDiagnostic emission code, but it can also be used directly by
+ /// consumers that don't have a source manager or other state that the full
+ /// TextDiagnostic logic requires.
+ static void printDiagnosticLevel(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ bool ShowColors);
+
+ /// \brief Pretty-print a diagnostic message to a raw_ostream.
+ ///
+ /// This is a static helper to handle the line wrapping, colorizing, and
+ /// rendering of a diagnostic message to a particular ostream. It is
+ /// publically visible so that clients which do not have sufficient state to
+ /// build a complete TextDiagnostic object can still get consistent
+ /// formatting of their diagnostic messages.
+ ///
+ /// \param OS Where the message is printed
+ /// \param Level Used to colorizing the message
+ /// \param Message The text actually printed
+ /// \param CurrentColumn The starting column of the first line, accounting
+ /// for any prefix.
+ /// \param Columns The number of columns to use in line-wrapping, 0 disables
+ /// all line-wrapping.
+ /// \param ShowColors Enable colorizing of the message.
+ static void printDiagnosticMessage(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ unsigned CurrentColumn, unsigned Columns,
+ bool ShowColors);
+
+protected:
+ virtual void emitDiagnosticMessage(SourceLocation Loc,PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag D);
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges);
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) {
+ emitSnippetAndCaret(Loc, Level, Ranges, Hints);
+ }
+
+ virtual void emitBasicNote(StringRef Message);
+
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc);
+
+private:
+ void emitSnippetAndCaret(SourceLocation Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints);
+
+ void highlightRange(const CharSourceRange &R,
+ unsigned LineNo, FileID FID,
+ const std::string &SourceLine,
+ std::string &CaretLine);
+ std::string buildFixItInsertionLine(unsigned LineNo,
+ const char *LineStart,
+ const char *LineEnd,
+ ArrayRef<FixItHint> Hints);
+ void expandTabs(std::string &SourceLine, std::string &CaretLine);
+ void emitParseableFixits(ArrayRef<FixItHint> Hints);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
index 22fa18b..9b6ac24 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -16,25 +16,28 @@
#define LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_PRINTER_H_
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
namespace clang {
class DiagnosticOptions;
class LangOptions;
+class TextDiagnostic;
class TextDiagnosticPrinter : public DiagnosticConsumer {
raw_ostream &OS;
const LangOptions *LangOpts;
const DiagnosticOptions *DiagOpts;
+ const SourceManager *SM;
- SourceLocation LastWarningLoc;
- FullSourceLoc LastLoc;
- unsigned LastCaretDiagnosticWasNote : 1;
- unsigned OwnsOutputStream : 1;
+ /// \brief Handle to the currently active text diagnostic emitter.
+ OwningPtr<TextDiagnostic> TextDiag;
/// A string to prefix to error messages.
std::string Prefix;
+ unsigned OwnsOutputStream : 1;
+
public:
TextDiagnosticPrinter(raw_ostream &os, const DiagnosticOptions &diags,
bool OwnsOutputStream = false);
@@ -45,27 +48,10 @@ public:
/// used.
void setPrefix(std::string Value) { Prefix = Value; }
- void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) {
- LangOpts = &LO;
- }
-
- void EndSourceFile() {
- LangOpts = 0;
- }
-
- void PrintIncludeStack(DiagnosticsEngine::Level Level, SourceLocation Loc,
- const SourceManager &SM);
-
- virtual void HandleDiagnostic(DiagnosticsEngine::Level Level,
- const Diagnostic &Info);
-
+ void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP);
+ void EndSourceFile();
+ void HandleDiagnostic(DiagnosticsEngine::Level Level, const Diagnostic &Info);
DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
-
-private:
- void EmitDiagnosticLoc(DiagnosticsEngine::Level Level,
- const Diagnostic &Info,
- const SourceManager &SM,
- PresumedLoc PLoc);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
index 929beb0..6b1fc63 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
@@ -15,13 +15,11 @@
#define LLVM_CLANG_FRONTEND_UTILS_H
#include "clang/Basic/Diagnostic.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/Support/raw_ostream.h"
-#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/StringRef.h"
namespace llvm {
+class raw_fd_ostream;
class Triple;
}
@@ -46,11 +44,6 @@ class Stmt;
class TargetInfo;
class FrontendOptions;
-/// Normalize \arg File for use in a user defined #include directive (in the
-/// predefines buffer).
-std::string NormalizeDashIncludePath(StringRef File,
- FileManager &FileMgr);
-
/// Apply the header search options to get given HeaderSearch object.
void ApplyHeaderSearchOptions(HeaderSearch &HS,
const HeaderSearchOptions &HSOpts,
@@ -66,7 +59,8 @@ void InitializePreprocessor(Preprocessor &PP,
/// ProcessWarningOptions - Initialize the diagnostic client and process the
/// warning options specified on the command line.
-void ProcessWarningOptions(DiagnosticsEngine &Diags, const DiagnosticOptions &Opts);
+void ProcessWarningOptions(DiagnosticsEngine &Diags,
+ const DiagnosticOptions &Opts);
/// DoPrintPreprocessedInput - Implement -E mode.
void DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream* OS,
@@ -77,6 +71,11 @@ void DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream* OS,
void AttachDependencyFileGen(Preprocessor &PP,
const DependencyOutputOptions &Opts);
+/// AttachDependencyGraphGen - Create a dependency graph generator, and attach
+/// it to the given preprocessor.
+ void AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
+ StringRef SysRoot);
+
/// AttachHeaderIncludeGen - Create a header include list generator, and attach
/// it to the given preprocessor.
///
@@ -101,8 +100,8 @@ void CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS);
/// argument vector.
CompilerInvocation *
createInvocationFromCommandLine(ArrayRef<const char *> Args,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine>());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
+ IntrusiveRefCntPtr<DiagnosticsEngine>());
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
index 28dc9de..2fc6ccc 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -67,7 +67,7 @@ public:
DiagnosticsEngine &Diags;
DiagnosticConsumer *PrimaryClient;
bool OwnsPrimaryClient;
- llvm::OwningPtr<TextDiagnosticBuffer> Buffer;
+ OwningPtr<TextDiagnosticBuffer> Buffer;
Preprocessor *CurrentPreprocessor;
private:
diff --git a/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h b/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h
index 7b66e7e..45097cc 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h
@@ -16,6 +16,7 @@
#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
class Decl;
@@ -42,7 +43,7 @@ public:
struct NamedRef {
NamedDecl *ND;
SourceLocation Loc;
-
+
NamedRef() : ND(0) { }
NamedRef(NamedDecl *nd, SourceLocation loc) : ND(nd), Loc(loc) { }
};
@@ -95,14 +96,14 @@ public:
bool isValid() const { return ParentDecl.getPointer() != 0; }
bool isInvalid() const { return !isValid(); }
-
+
NodeKind getKind() const {
assert(isValid());
return (NodeKind)ParentDecl.getInt();
}
-
+
Decl *getParentDecl() const { return ParentDecl.getPointer(); }
-
+
Decl *AsDecl() const {
assert(getKind() == N_Decl);
return D;
@@ -121,14 +122,16 @@ public:
}
Decl *dyn_AsDecl() const { return isValid() && getKind() == N_Decl ? D : 0; }
- Stmt *dyn_AsStmt() const { return isValid() && getKind() == N_Stmt ? Stm : 0; }
+ Stmt *dyn_AsStmt() const {
+ return isValid() && getKind() == N_Stmt ? Stm : 0;
+ }
NamedRef dyn_AsNamedRef() const {
return getKind() == N_Type ? AsNamedRef() : NamedRef();
}
TypeLoc dyn_AsTypeLoc() const {
return getKind() == N_Type ? AsTypeLoc() : TypeLoc();
}
-
+
bool isDecl() const { return isValid() && getKind() == N_Decl; }
bool isStmt() const { return isValid() && getKind() == N_Stmt; }
bool isNamedRef() const { return isValid() && getKind() == N_NamedRef; }
@@ -144,7 +147,7 @@ public:
return const_cast<ASTLocation*>(this)->getReferencedDecl();
}
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
void print(raw_ostream &OS) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h b/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h
index 38baf0f..7ba1cce 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h
@@ -1,4 +1,4 @@
-//== CallGraph.cpp - Call graph building ------------------------*- C++ -*--==//
+//== GlobalCallGraph.h - Call graph building --------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_CALLGRAPH
-#define LLVM_CLANG_ANALYSIS_CALLGRAPH
+#ifndef LLVM_CLANG_INDEX_CALLGRAPH
+#define LLVM_CLANG_INDEX_CALLGRAPH
#include "clang/Index/ASTLocation.h"
#include "clang/Index/Entity.h"
@@ -23,15 +23,18 @@
#include <vector>
#include <map>
+using namespace clang;
+
namespace clang {
+namespace idx {
class CallGraphNode {
- idx::Entity F;
- typedef std::pair<idx::ASTLocation, CallGraphNode*> CallRecord;
+ Entity F;
+ typedef std::pair<ASTLocation, CallGraphNode*> CallRecord;
std::vector<CallRecord> CalledFunctions;
public:
- CallGraphNode(idx::Entity f) : F(f) {}
+ CallGraphNode(Entity f) : F(f) {}
typedef std::vector<CallRecord>::iterator iterator;
typedef std::vector<CallRecord>::const_iterator const_iterator;
@@ -41,7 +44,7 @@ public:
const_iterator begin() const { return CalledFunctions.begin(); }
const_iterator end() const { return CalledFunctions.end(); }
- void addCallee(idx::ASTLocation L, CallGraphNode *Node) {
+ void addCallee(ASTLocation L, CallGraphNode *Node) {
CalledFunctions.push_back(std::make_pair(L, Node));
}
@@ -54,9 +57,9 @@ public:
class CallGraph {
/// Program manages all Entities.
- idx::Program &Prog;
+ Program &Prog;
- typedef std::map<idx::Entity, CallGraphNode *> FunctionMapTy;
+ typedef std::map<Entity, CallGraphNode *> FunctionMapTy;
/// FunctionMap owns all CallGraphNodes.
FunctionMapTy FunctionMap;
@@ -71,7 +74,7 @@ class CallGraph {
CallGraphNode *ExternalCallingNode;
public:
- CallGraph(idx::Program &P);
+ CallGraph(Program &P);
~CallGraph();
typedef FunctionMapTy::iterator iterator;
@@ -88,7 +91,7 @@ public:
void addTU(ASTContext &AST);
- idx::Program &getProgram() { return Prog; }
+ Program &getProgram() { return Prog; }
CallGraphNode *getOrInsertFunction(idx::Entity F);
@@ -100,13 +103,13 @@ public:
void ViewCallGraph() const;
};
-} // end clang namespace
+}} // end clang idx namespace
namespace llvm {
-template <> struct GraphTraits<clang::CallGraph> {
- typedef clang::CallGraph GraphType;
- typedef clang::CallGraphNode NodeType;
+template <> struct GraphTraits<clang::idx::CallGraph> {
+ typedef clang::idx::CallGraph GraphType;
+ typedef clang::idx::CallGraphNode NodeType;
typedef std::pair<clang::idx::ASTLocation, NodeType*> CGNPairTy;
typedef std::pointer_to_unary_function<CGNPairTy, NodeType*> CGNDerefFun;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
index f7da61b..95f0d27 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
@@ -22,7 +22,8 @@ class HeaderMap;
class DirectoryEntry;
class FileEntry;
class HeaderSearch;
-
+class Module;
+
/// DirectoryLookup - This class represents one entry in the search list that
/// specifies the search order for directories in #include directives. It
/// represents either a directory, a framework, or a headermap.
@@ -141,24 +142,26 @@ public:
/// SearchPath at which the file was found. This only differs from the
/// Filename for framework includes.
///
- /// \param BuildingModule The name of the module we're currently building.
- ///
/// \param SuggestedModule If non-null, and the file found is semantically
- /// part of a known module, this will be set to the name of the module that
- /// could be imported instead of preprocessing/parsing the file found.
+ /// part of a known module, this will be set to the module that should
+ /// be imported instead of preprocessing/parsing the file found.
+ ///
+ /// \param InUserSpecifiedSystemHeader [out] If the file is found, set to true
+ /// if the file is located in a framework that has been user-specified to be
+ /// treated as a system framework.
const FileEntry *LookupFile(StringRef Filename, HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef BuildingModule,
- StringRef *SuggestedModule) const;
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemHeader) const;
private:
const FileEntry *DoFrameworkLookup(
StringRef Filename, HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef BuildingModule,
- StringRef *SuggestedModule) const;
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemHeader) const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
index dbf7389..f172b5c 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
@@ -30,6 +30,9 @@ public:
/// \brief Read the definition for the given macro.
virtual void LoadMacroDefinition(IdentifierInfo *II) = 0;
+
+ /// \brief Update an out-of-date identifier.
+ virtual void updateOutOfDateIdentifier(IdentifierInfo &II) = 0;
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
index 84d59f7..5128ce6 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -15,13 +15,16 @@
#define LLVM_CLANG_LEX_HEADERSEARCH_H
#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/ModuleMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/OwningPtr.h"
#include <vector>
namespace clang {
-
+
+class DiagnosticsEngine;
class ExternalIdentifierLookup;
class FileEntry;
class FileManager;
@@ -117,7 +120,19 @@ public:
/// HeaderSearch - This class encapsulates the information needed to find the
/// file referenced by a #include or #include_next, (sub-)framework lookup, etc.
class HeaderSearch {
+ /// This structure is used to record entries in our framework cache.
+ struct FrameworkCacheEntry {
+ /// The directory entry which should be used for the cached framework.
+ const DirectoryEntry *Directory;
+
+ /// Whether this framework has been "user-specified" to be treated as if it
+ /// were a system framework (even if it was found outside a system framework
+ /// directory).
+ bool IsUserSpecifiedSystemFramework;
+ };
+
FileManager &FileMgr;
+ DiagnosticsEngine &Diags;
/// #include search path information. Requests for #include "x" search the
/// directory of the #including file first, then each directory in SearchDirs
/// consecutively. Requests for <x> search the current dir first, then each
@@ -132,9 +147,6 @@ class HeaderSearch {
/// \brief The path to the module cache.
std::string ModuleCachePath;
- /// \brief The name of the module we're building.
- std::string BuildingModule;
-
/// FileInfo - This contains all of the preprocessor-specific data about files
/// that are included. The vector is indexed by the FileEntry's UID.
///
@@ -149,16 +161,27 @@ class HeaderSearch {
llvm::StringMap<std::pair<unsigned, unsigned>, llvm::BumpPtrAllocator>
LookupFileCache;
-
/// FrameworkMap - This is a collection mapping a framework or subframework
/// name like "Carbon" to the Carbon.framework directory.
- llvm::StringMap<const DirectoryEntry *, llvm::BumpPtrAllocator>
- FrameworkMap;
+ llvm::StringMap<FrameworkCacheEntry, llvm::BumpPtrAllocator> FrameworkMap;
+
+ /// IncludeAliases - maps include file names (including the quotes or
+ /// angle brackets) to other include file names. This is used to support the
+ /// include_alias pragma for Microsoft compatibility.
+ typedef llvm::StringMap<std::string, llvm::BumpPtrAllocator>
+ IncludeAliasMap;
+ OwningPtr<IncludeAliasMap> IncludeAliases;
/// HeaderMaps - This is a mapping from FileEntry -> HeaderMap, uniquing
/// headermaps. This vector owns the headermap.
std::vector<std::pair<const FileEntry*, const HeaderMap*> > HeaderMaps;
+ /// \brief The mapping between modules and headers.
+ ModuleMap ModMap;
+
+ /// \brief Describes whether a given directory has a module map in it.
+ llvm::DenseMap<const DirectoryEntry *, bool> DirectoryHasModuleMap;
+
/// \brief Uniqued set of framework names, which is used to track which
/// headers were included as framework headers.
llvm::StringSet<llvm::BumpPtrAllocator> FrameworkNames;
@@ -179,8 +202,12 @@ class HeaderSearch {
explicit HeaderSearch();
explicit HeaderSearch(const HeaderSearch&);
void operator=(const HeaderSearch&);
+
+ friend class DirectoryLookup;
+
public:
- HeaderSearch(FileManager &FM);
+ HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts, const TargetInfo *Target);
~HeaderSearch();
FileManager &getFileMgr() const { return FileMgr; }
@@ -199,13 +226,51 @@ public:
//LookupFileCache.clear();
}
- /// \brief Set the path to the module cache and the name of the module
- /// we're building
- void configureModules(StringRef CachePath, StringRef BuildingModule) {
+ /// AddSearchPath - Add an additional search path.
+ void AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
+ unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
+ SearchDirs.insert(SearchDirs.begin() + idx, dir);
+ if (!isAngled)
+ AngledDirIdx++;
+ SystemDirIdx++;
+ }
+
+ /// HasIncludeAliasMap - Checks whether the map exists or not
+ bool HasIncludeAliasMap() const {
+ return IncludeAliases;
+ }
+
+ /// AddIncludeAlias - Map the source include name to the dest include name.
+ /// The Source should include the angle brackets or quotes, the dest
+ /// should not. This allows for distinction between <> and "" headers.
+ void AddIncludeAlias(StringRef Source, StringRef Dest) {
+ if (!IncludeAliases)
+ IncludeAliases.reset(new IncludeAliasMap);
+ (*IncludeAliases)[Source] = Dest;
+ }
+
+ /// MapHeaderToIncludeAlias - Maps one header file name to a different header
+ /// file name, for use with the include_alias pragma. Note that the source
+ /// file name should include the angle brackets or quotes. Returns StringRef
+ /// as null if the header cannot be mapped.
+ StringRef MapHeaderToIncludeAlias(StringRef Source) {
+ assert(IncludeAliases && "Trying to map headers when there's no map");
+
+ // Do any filename replacements before anything else
+ IncludeAliasMap::const_iterator Iter = IncludeAliases->find(Source);
+ if (Iter != IncludeAliases->end())
+ return Iter->second;
+ return StringRef();
+ }
+
+ /// \brief Set the path to the module cache.
+ void setModuleCachePath(StringRef CachePath) {
ModuleCachePath = CachePath;
- this->BuildingModule = BuildingModule;
}
+ /// \brief Retrieve the path to the module cache.
+ StringRef getModuleCachePath() const { return ModuleCachePath; }
+
/// ClearFileInfo - Forget everything we know about headers so far.
void ClearFileInfo() {
FileInfo.clear();
@@ -224,6 +289,10 @@ public:
ExternalSource = ES;
}
+ /// \brief Set the target information for the header search, if not
+ /// already known.
+ void setTarget(const TargetInfo &Target);
+
/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
/// return null on failure.
///
@@ -247,15 +316,16 @@ public:
/// Filename for framework includes.
///
/// \param SuggestedModule If non-null, and the file found is semantically
- /// part of a known module, this will be set to the name of the module that
- /// could be imported instead of preprocessing/parsing the file found.
+ /// part of a known module, this will be set to the module that should
+ /// be imported instead of preprocessing/parsing the file found.
const FileEntry *LookupFile(StringRef Filename, bool isAngled,
const DirectoryLookup *FromDir,
const DirectoryLookup *&CurDir,
const FileEntry *CurFileEnt,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef *SuggestedModule);
+ Module **SuggestedModule,
+ bool SkipCache = false);
/// LookupSubframeworkHeader - Look up a subframework for the specified
/// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
@@ -271,7 +341,7 @@ public:
/// LookupFrameworkCache - Look up the specified framework name in our
/// framework cache, returning the DirectoryEntry it is in if we know,
/// otherwise, return null.
- const DirectoryEntry *&LookupFrameworkCache(StringRef FWName) {
+ FrameworkCacheEntry &LookupFrameworkCache(StringRef FWName) {
return FrameworkMap.GetOrCreateValue(FWName).getValue();
}
@@ -326,33 +396,96 @@ public:
/// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
const HeaderMap *CreateHeaderMap(const FileEntry *FE);
- /// \brief Search in the module cache path for a module with the given
- /// name.
+ /// \brief Retrieve the name of the module file that should be used to
+ /// load the given module.
///
- /// \param If non-NULL, will be set to the module file name we expected to
- /// find (regardless of whether it was actually found or not).
+ /// \param Module The module whose module file name will be returned.
///
- /// \param UmbrellaHeader If non-NULL, and no module was found in the module
- /// cache, this routine will search in the framework paths to determine
- /// whether a module can be built from an umbrella header. If so, the pointee
- /// will be set to the path of the umbrella header.
+ /// \returns The name of the module file that corresponds to this module,
+ /// or an empty string if this module does not correspond to any module file.
+ std::string getModuleFileName(Module *Module);
+
+ /// \brief Retrieve the name of the module file that should be used to
+ /// load a module with the given name.
+ ///
+ /// \param Module The module whose module file name will be returned.
+ ///
+ /// \returns The name of the module file that corresponds to this module,
+ /// or an empty string if this module does not correspond to any module file.
+ std::string getModuleFileName(StringRef ModuleName);
+
+ /// \brief Lookup a module Search for a module with the given name.
///
- /// \returns A file describing the named module, if available, or NULL to
- /// indicate that the module could not be found.
- const FileEntry *lookupModule(StringRef ModuleName,
- std::string *ModuleFileName = 0,
- std::string *UmbrellaHeader = 0);
+ /// \param ModuleName The name of the module we're looking for.
+ ///
+ /// \param AllowSearch Whether we are allowed to search in the various
+ /// search directories to produce a module definition. If not, this lookup
+ /// will only return an already-known module.
+ ///
+ /// \returns The module with the given name.
+ Module *lookupModule(StringRef ModuleName, bool AllowSearch = true);
void IncrementFrameworkLookupCount() { ++NumFrameworkLookups; }
- typedef std::vector<HeaderFileInfo>::const_iterator header_file_iterator;
- header_file_iterator header_file_begin() const { return FileInfo.begin(); }
- header_file_iterator header_file_end() const { return FileInfo.end(); }
+ /// \brief Determine whether there is a module map that may map the header
+ /// with the given file name to a (sub)module.
+ ///
+ /// \param Filename The name of the file.
+ ///
+ /// \param Root The "root" directory, at which we should stop looking for
+ /// module maps.
+ bool hasModuleMap(StringRef Filename, const DirectoryEntry *Root);
+
+ /// \brief Retrieve the module that corresponds to the given file, if any.
+ ///
+ /// \param File The header that we wish to map to a module.
+ Module *findModuleForHeader(const FileEntry *File);
+
+ /// \brief Read the contents of the given module map file.
+ ///
+ /// \param File The module map file.
+ ///
+ /// \param OnlyModule If non-NULL, this will receive the
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool loadModuleMapFile(const FileEntry *File);
+
+ /// \brief Collect the set of all known, top-level modules.
+ ///
+ /// \param Modules Will be filled with the set of known, top-level modules.
+ void collectAllModules(llvm::SmallVectorImpl<Module *> &Modules);
+
+private:
+ /// \brief Retrieve a module with the given name, which may be part of the
+ /// given framework.
+ ///
+ /// \param Name The name of the module to retrieve.
+ ///
+ /// \param Dir The framework directory (e.g., ModuleName.framework).
+ ///
+ /// \param IsSystem Whether the framework directory is part of the system
+ /// frameworks.
+ ///
+ /// \returns The module, if found; otherwise, null.
+ Module *loadFrameworkModule(StringRef Name,
+ const DirectoryEntry *Dir,
+ bool IsSystem);
+
+public:
+ /// \brief Retrieve the module map.
+ ModuleMap &getModuleMap() { return ModMap; }
+
unsigned header_file_size() const { return FileInfo.size(); }
// Used by ASTReader.
void setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID);
+ /// getFileInfo - Return the HeaderFileInfo structure for the specified
+ /// FileEntry.
+ const HeaderFileInfo &getFileInfo(const FileEntry *FE) const {
+ return const_cast<HeaderSearch*>(this)->getFileInfo(FE);
+ }
+
// Used by external tools
typedef std::vector<DirectoryLookup>::const_iterator search_dir_iterator;
search_dir_iterator search_dir_begin() const { return SearchDirs.begin(); }
@@ -385,7 +518,39 @@ public:
size_t getTotalMemory() const;
+ static std::string NormalizeDashIncludePath(StringRef File,
+ FileManager &FileMgr);
+
private:
+ /// \brief Describes what happened when we tried to load a module map file.
+ enum LoadModuleMapResult {
+ /// \brief The module map file had already been loaded.
+ LMM_AlreadyLoaded,
+ /// \brief The module map file was loaded by this invocation.
+ LMM_NewlyLoaded,
+ /// \brief There is was directory with the given name.
+ LMM_NoDirectory,
+ /// \brief There was either no module map file or the module map file was
+ /// invalid.
+ LMM_InvalidModuleMap
+ };
+
+ /// \brief Try to load the module map file in the given directory.
+ ///
+ /// \param DirName The name of the directory where we will look for a module
+ /// map file.
+ ///
+ /// \returns The result of attempting to load the module map file from the
+ /// named directory.
+ LoadModuleMapResult loadModuleMapFile(StringRef DirName);
+
+ /// \brief Try to load the module map file in the given directory.
+ ///
+ /// \param Dir The directory where we will look for a module map file.
+ ///
+ /// \returns The result of attempting to load the module map file from the
+ /// named directory.
+ LoadModuleMapResult loadModuleMapFile(const DirectoryEntry *Dir);
/// getFileInfo - Return the HeaderFileInfo structure for the specified
/// FileEntry.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
index f454e23..41b9396 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define LEXSTART
#include "clang/Basic/DiagnosticLexKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
index e01427f..04bcead 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -44,12 +44,14 @@ enum ConflictMarkerKind {
/// or buffering/seeking of tokens, only forward lexing is supported. It relies
/// on the specified Preprocessor object to handle preprocessor directives, etc.
class Lexer : public PreprocessorLexer {
+ virtual void anchor();
+
//===--------------------------------------------------------------------===//
// Constant configuration values for this lexer.
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
SourceLocation FileLoc; // Location for start of file.
- LangOptions Features; // Features enabled by this language (cache).
+ LangOptions LangOpts; // LangOpts enabled by this language (cache).
bool Is_PragmaLexer; // True if lexer for _Pragma handling.
//===--------------------------------------------------------------------===//
@@ -97,14 +99,14 @@ public:
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
- Lexer(SourceLocation FileLoc, const LangOptions &Features,
+ Lexer(SourceLocation FileLoc, const LangOptions &LangOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd);
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer,
- const SourceManager &SM, const LangOptions &Features);
+ const SourceManager &SM, const LangOptions &LangOpts);
/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
/// _Pragma expansion. This has a variety of magic semantics that this method
@@ -115,9 +117,9 @@ public:
unsigned TokLen, Preprocessor &PP);
- /// getFeatures - Return the language features currently enabled. NOTE: this
- /// lexer modifies features as a file is parsed!
- const LangOptions &getFeatures() const { return Features; }
+ /// getLangOpts - Return the language features currently enabled.
+ /// NOTE: this lexer modifies features as a file is parsed!
+ const LangOptions &getLangOpts() const { return LangOpts; }
/// getFileLoc - Return the File Location for the file we are lexing out of.
/// The physical location encodes the location where the characters come from,
@@ -238,7 +240,7 @@ public:
/// if an internal buffer is returned.
static unsigned getSpelling(const Token &Tok, const char *&Buffer,
const SourceManager &SourceMgr,
- const LangOptions &Features,
+ const LangOptions &LangOpts,
bool *Invalid = 0);
/// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
@@ -248,7 +250,7 @@ public:
/// UCNs, etc.
static std::string getSpelling(const Token &Tok,
const SourceManager &SourceMgr,
- const LangOptions &Features,
+ const LangOptions &LangOpts,
bool *Invalid = 0);
/// getSpelling - This method is used to get the spelling of the
@@ -262,7 +264,7 @@ public:
static StringRef getSpelling(SourceLocation loc,
SmallVectorImpl<char> &buffer,
const SourceManager &SourceMgr,
- const LangOptions &Features,
+ const LangOptions &LangOpts,
bool *invalid = 0);
/// MeasureTokenLength - Relex the token at the specified location and return
@@ -288,7 +290,7 @@ public:
static SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
unsigned Character,
const SourceManager &SM,
- const LangOptions &Features);
+ const LangOptions &LangOpts);
/// \brief Computes the source location just past the end of the
/// token at this source location.
@@ -307,19 +309,52 @@ public:
/// a source location pointing to the last character in the token, etc.
static SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
const SourceManager &SM,
- const LangOptions &Features);
+ const LangOptions &LangOpts);
/// \brief Returns true if the given MacroID location points at the first
/// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// begin location of the macro.
static bool isAtStartOfMacroExpansion(SourceLocation loc,
- const SourceManager &SM,
- const LangOptions &LangOpts);
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroBegin = 0);
/// \brief Returns true if the given MacroID location points at the last
/// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// end location of the macro.
static bool isAtEndOfMacroExpansion(SourceLocation loc,
- const SourceManager &SM,
- const LangOptions &LangOpts);
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroEnd = 0);
+
+ /// \brief Accepts a range and returns a character range with file locations.
+ ///
+ /// Returns a null range if a part of the range resides inside a macro
+ /// expansion or the range does not reside on the same FileID.
+ static CharSourceRange makeFileCharRange(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Returns a string for the source that the range encompasses.
+ static StringRef getSourceText(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool *Invalid = 0);
+
+ /// \brief Retrieve the name of the immediate macro expansion.
+ ///
+ /// This routine starts from a source location, and finds the name of the macro
+ /// responsible for its immediate expansion. It looks through any intervening
+ /// macro argument expansions to compute this. It returns a StringRef which
+ /// refers to the SourceManager-owned buffer of the source where that macro
+ /// name is spelled. Thus, the result shouldn't out-live that SourceManager.
+ static StringRef getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
/// \brief Compute the preamble of the given file.
///
@@ -337,7 +372,7 @@ public:
/// of the file begins along with a boolean value indicating whether
/// the preamble ends at the beginning of a new line.
static std::pair<unsigned, bool>
- ComputePreamble(const llvm::MemoryBuffer *Buffer, const LangOptions &Features,
+ ComputePreamble(const llvm::MemoryBuffer *Buffer, const LangOptions &LangOpts,
unsigned MaxLines = 0);
//===--------------------------------------------------------------------===//
@@ -451,7 +486,7 @@ public:
/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
/// emit a warning.
static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &Features) {
+ const LangOptions &LangOpts) {
// If this is not a trigraph and not a UCN or escaped newline, return
// quickly.
if (isObviouslySimpleCharacter(Ptr[0])) {
@@ -460,7 +495,7 @@ public:
}
Size = 0;
- return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
}
/// getEscapedNewLineSize - Return the size of the specified escaped newline,
@@ -489,12 +524,14 @@ private:
/// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
/// diagnostic.
static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &Features);
+ const LangOptions &LangOpts);
//===--------------------------------------------------------------------===//
// Other lexer functions.
void SkipBytes(unsigned Bytes, bool StartOfLine);
+
+ const char *LexUDSuffix(Token &Result, const char *CurPtr);
// Helper functions to lex the remainder of a token of the specific type.
void LexIdentifier (Token &Result, const char *CurPtr);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
index b33092c..7e7f82f 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
@@ -45,7 +45,7 @@ class NumericLiteralParser {
unsigned radix;
- bool saw_exponent, saw_period;
+ bool saw_exponent, saw_period, saw_ud_suffix;
public:
NumericLiteralParser(const char *begin, const char *end,
@@ -64,8 +64,17 @@ public:
bool isFloatingLiteral() const {
return saw_period || saw_exponent;
}
- bool hasSuffix() const {
- return SuffixBegin != ThisTokEnd;
+
+ bool hasUDSuffix() const {
+ return saw_ud_suffix;
+ }
+ StringRef getUDSuffix() const {
+ assert(saw_ud_suffix);
+ return StringRef(SuffixBegin, ThisTokEnd - SuffixBegin);
+ }
+ unsigned getUDSuffixOffset() const {
+ assert(saw_ud_suffix);
+ return SuffixBegin - ThisTokBegin;
}
unsigned getRadix() const { return radix; }
@@ -128,6 +137,8 @@ class CharLiteralParser {
tok::TokenKind Kind;
bool IsMultiChar;
bool HadError;
+ SmallString<32> UDSuffixBuf;
+ unsigned UDSuffixOffset;
public:
CharLiteralParser(const char *begin, const char *end,
SourceLocation Loc, Preprocessor &PP,
@@ -140,6 +151,11 @@ public:
bool isUTF32() const { return Kind == tok::utf32_char_constant; }
bool isMultiChar() const { return IsMultiChar; }
uint64_t getValue() const { return Value; }
+ StringRef getUDSuffix() const { return UDSuffixBuf; }
+ unsigned getUDSuffixOffset() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixOffset;
+ }
};
/// StringLiteralParser - This decodes string escape characters and performs
@@ -155,8 +171,11 @@ class StringLiteralParser {
unsigned SizeBound;
unsigned CharByteWidth;
tok::TokenKind Kind;
- llvm::SmallString<512> ResultBuf;
+ SmallString<512> ResultBuf;
char *ResultPtr; // cursor
+ SmallString<32> UDSuffixBuf;
+ unsigned UDSuffixToken;
+ unsigned UDSuffixOffset;
public:
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &PP, bool Complain = true);
@@ -189,15 +208,30 @@ public:
/// checking of the string literal and emit errors and warnings.
unsigned getOffsetOfStringByte(const Token &TheTok, unsigned ByteNo) const;
- bool isAscii() { return Kind == tok::string_literal; }
- bool isWide() { return Kind == tok::wide_string_literal; }
- bool isUTF8() { return Kind == tok::utf8_string_literal; }
- bool isUTF16() { return Kind == tok::utf16_string_literal; }
- bool isUTF32() { return Kind == tok::utf32_string_literal; }
+ bool isAscii() const { return Kind == tok::string_literal; }
+ bool isWide() const { return Kind == tok::wide_string_literal; }
+ bool isUTF8() const { return Kind == tok::utf8_string_literal; }
+ bool isUTF16() const { return Kind == tok::utf16_string_literal; }
+ bool isUTF32() const { return Kind == tok::utf32_string_literal; }
+ bool isPascal() const { return Pascal; }
+
+ StringRef getUDSuffix() const { return UDSuffixBuf; }
+
+ /// Get the index of a token containing a ud-suffix.
+ unsigned getUDSuffixToken() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixToken;
+ }
+ /// Get the spelling offset of the first byte of the ud-suffix.
+ unsigned getUDSuffixOffset() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixOffset;
+ }
private:
void init(const Token *StringToks, unsigned NumStringToks);
- void CopyStringFragment(StringRef Fragment);
+ bool CopyStringFragment(StringRef Fragment);
+ bool DiagnoseBadString(const Token& Tok);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
index b381e0f..8775d39 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
@@ -39,10 +39,11 @@ class MacroInfo {
IdentifierInfo **ArgumentList;
unsigned NumArguments;
- /// \brief The location at which this macro was exported from its module.
+ /// \brief The location at which this macro was either explicitly exported
+ /// from its module or marked as private.
///
- /// If invalid, this macro has not been explicitly exported.
- SourceLocation ExportLocation;
+ /// If invalid, this macro has not been explicitly given any visibility.
+ SourceLocation VisibilityLocation;
/// ReplacementTokens - This is the list of tokens that the macro is defined
/// to.
@@ -97,6 +98,9 @@ private:
/// \brief Must warn if the macro is unused at the end of translation unit.
bool IsWarnIfUnused : 1;
+ /// \brief Whether the macro has public (when described in a module).
+ bool IsPublic : 1;
+
~MacroInfo() {
assert(ArgumentList == 0 && "Didn't call destroy before dtor!");
}
@@ -279,17 +283,18 @@ public:
}
/// \brief Set the export location for this macro.
- void setExportLocation(SourceLocation ExportLoc) {
- ExportLocation = ExportLoc;
+ void setVisibility(bool Public, SourceLocation Loc) {
+ VisibilityLocation = Loc;
+ IsPublic = Public;
}
- /// \brief Determine whether this macro was explicitly exported from its
+ /// \brief Determine whether this macro is part of the public API of its
/// module.
- bool isExported() const { return ExportLocation.isValid(); }
+ bool isPublic() const { return IsPublic; }
- /// \brief Determine the location where this macro was explicitly exported
- /// from its module.
- SourceLocation getExportLocation() { return ExportLocation; }
+ /// \brief Determine the location where this macro was explicitly made
+ /// public or private within its module.
+ SourceLocation getVisibilityLocation() { return VisibilityLocation; }
private:
unsigned getDefinitionLengthSlow(SourceManager &SM) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h
index 72ec0e3..36d03c0 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h
@@ -14,15 +14,18 @@
#ifndef LLVM_CLANG_LEX_MODULE_LOADER_H
#define LLVM_CLANG_LEX_MODULE_LOADER_H
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/ArrayRef.h"
namespace clang {
class IdentifierInfo;
-/// \brief An opaque key that is used to describe the module and can be
-/// interpreted by the module loader itself.
-typedef void *ModuleKey;
+/// \brief A sequence of identifier/location pairs used to describe a particular
+/// module or submodule, e.g., std.vector.
+typedef llvm::ArrayRef<std::pair<IdentifierInfo*, SourceLocation> >
+ ModuleIdPath;
/// \brief Abstract interface for a module loader.
///
@@ -39,15 +42,22 @@ public:
/// parameters.
///
/// \param ImportLoc The location of the 'import' keyword.
- /// \param ModuleName The name of the module to be loaded.
- /// \param ModuleNameLoc The location of the module name.
///
- /// \returns If successful, a non-NULL module key describing this module.
- /// Otherwise, returns NULL to indicate that the module could not be
- /// loaded.
- virtual ModuleKey loadModule(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc) = 0;
+ /// \param Path The identifiers (and their locations) of the module
+ /// "path", e.g., "std.vector" would be split into "std" and "vector".
+ ///
+ /// \param Visibility The visibility provided for the names in the loaded
+ /// module.
+ ///
+ /// \param IsInclusionDirective Indicates that this module is being loaded
+ /// implicitly, due to the presence of an inclusion directive. Otherwise,
+ /// it is being loaded due to an import declaration.
+ ///
+ /// \returns If successful, returns the loaded module. Otherwise, returns
+ /// NULL to indicate that the module could not be loaded.
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) = 0;
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
new file mode 100644
index 0000000..4ebb1d4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
@@ -0,0 +1,237 @@
+//===--- ModuleMap.h - Describe the layout of modules -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleMap interface, which describes the layout of a
+// module as it relates to headers.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_LEX_MODULEMAP_H
+#define LLVM_CLANG_LEX_MODULEMAP_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include <string>
+
+namespace clang {
+
+class DirectoryEntry;
+class FileEntry;
+class FileManager;
+class DiagnosticConsumer;
+class DiagnosticsEngine;
+class ModuleMapParser;
+
+class ModuleMap {
+ SourceManager *SourceMgr;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags;
+ const LangOptions &LangOpts;
+ const TargetInfo *Target;
+
+ /// \brief The directory used for Clang-supplied, builtin include headers,
+ /// such as "stdint.h".
+ const DirectoryEntry *BuiltinIncludeDir;
+
+ /// \brief Language options used to parse the module map itself.
+ ///
+ /// These are always simple C language options.
+ LangOptions MMapLangOpts;
+
+ /// \brief The top-level modules that are known.
+ llvm::StringMap<Module *> Modules;
+
+ /// \brief Mapping from each header to the module that owns the contents of the
+ /// that header.
+ llvm::DenseMap<const FileEntry *, Module *> Headers;
+
+ /// \brief Mapping from directories with umbrella headers to the module
+ /// that is generated from the umbrella header.
+ ///
+ /// This mapping is used to map headers that haven't explicitly been named
+ /// in the module map over to the module that includes them via its umbrella
+ /// header.
+ llvm::DenseMap<const DirectoryEntry *, Module *> UmbrellaDirs;
+
+ friend class ModuleMapParser;
+
+ /// \brief Resolve the given export declaration into an actual export
+ /// declaration.
+ ///
+ /// \param Mod The module in which we're resolving the export declaration.
+ ///
+ /// \param Unresolved The export declaration to resolve.
+ ///
+ /// \param Complain Whether this routine should complain about unresolvable
+ /// exports.
+ ///
+ /// \returns The resolved export declaration, which will have a NULL pointer
+ /// if the export could not be resolved.
+ Module::ExportDecl
+ resolveExport(Module *Mod, const Module::UnresolvedExportDecl &Unresolved,
+ bool Complain);
+
+public:
+ /// \brief Construct a new module map.
+ ///
+ /// \param FileMgr The file manager used to find module files and headers.
+ /// This file manager should be shared with the header-search mechanism, since
+ /// they will refer to the same headers.
+ ///
+ /// \param DC A diagnostic consumer that will be cloned for use in generating
+ /// diagnostics.
+ ///
+ /// \param LangOpts Language options for this translation unit.
+ ///
+ /// \param Target The target for this translation unit.
+ ModuleMap(FileManager &FileMgr, const DiagnosticConsumer &DC,
+ const LangOptions &LangOpts, const TargetInfo *Target);
+
+ /// \brief Destroy the module map.
+ ///
+ ~ModuleMap();
+
+ /// \brief Set the target information.
+ void setTarget(const TargetInfo &Target);
+
+ /// \brief Set the directory that contains Clang-supplied include
+ /// files, such as our stdarg.h or tgmath.h.
+ void setBuiltinIncludeDir(const DirectoryEntry *Dir) {
+ BuiltinIncludeDir = Dir;
+ }
+
+ /// \brief Retrieve the module that owns the given header file, if any.
+ ///
+ /// \param File The header file that is likely to be included.
+ ///
+ /// \returns The module that owns the given header file, or null to indicate
+ /// that no module owns this header file.
+ Module *findModuleForHeader(const FileEntry *File);
+
+ /// \brief Determine whether the given header is part of a module
+ /// marked 'unavailable'.
+ bool isHeaderInUnavailableModule(const FileEntry *Header);
+
+ /// \brief Retrieve a module with the given name.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \returns The named module, if known; otherwise, returns null.
+ Module *findModule(StringRef Name);
+
+ /// \brief Retrieve a module with the given name using lexical name lookup,
+ /// starting at the given context.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \param Context The module context, from which we will perform lexical
+ /// name lookup.
+ ///
+ /// \returns The named module, if known; otherwise, returns null.
+ Module *lookupModuleUnqualified(StringRef Name, Module *Context);
+
+ /// \brief Retrieve a module with the given name within the given context,
+ /// using direct (qualified) name lookup.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \param Context The module for which we will look for a submodule. If
+ /// null, we will look for a top-level module.
+ ///
+ /// \returns The named submodule, if known; otherwose, returns null.
+ Module *lookupModuleQualified(StringRef Name, Module *Context);
+
+ /// \brief Find a new module or submodule, or create it if it does not already
+ /// exist.
+ ///
+ /// \param Name The name of the module to find or create.
+ ///
+ /// \param Parent The module that will act as the parent of this submodule,
+ /// or NULL to indicate that this is a top-level module.
+ ///
+ /// \param IsFramework Whether this is a framework module.
+ ///
+ /// \param IsExplicit Whether this is an explicit submodule.
+ ///
+ /// \returns The found or newly-created module, along with a boolean value
+ /// that will be true if the module is newly-created.
+ std::pair<Module *, bool> findOrCreateModule(StringRef Name, Module *Parent,
+ bool IsFramework,
+ bool IsExplicit);
+
+ /// \brief Infer the contents of a framework module map from the given
+ /// framework directory.
+ Module *inferFrameworkModule(StringRef ModuleName,
+ const DirectoryEntry *FrameworkDir,
+ bool IsSystem, Module *Parent);
+
+ /// \brief Retrieve the module map file containing the definition of the given
+ /// module.
+ ///
+ /// \param Module The module whose module map file will be returned, if known.
+ ///
+ /// \returns The file entry for the module map file containing the given
+ /// module, or NULL if the module definition was inferred.
+ const FileEntry *getContainingModuleMapFile(Module *Module);
+
+ /// \brief Resolve all of the unresolved exports in the given module.
+ ///
+ /// \param Mod The module whose exports should be resolved.
+ ///
+ /// \param Complain Whether to emit diagnostics for failures.
+ ///
+ /// \returns true if any errors were encountered while resolving exports,
+ /// false otherwise.
+ bool resolveExports(Module *Mod, bool Complain);
+
+ /// \brief Infers the (sub)module based on the given source location and
+ /// source manager.
+ ///
+ /// \param Loc The location within the source that we are querying, along
+ /// with its source manager.
+ ///
+ /// \returns The module that owns this source location, or null if no
+ /// module owns this source location.
+ Module *inferModuleFromLocation(FullSourceLoc Loc);
+
+ /// \brief Sets the umbrella header of the given module to the given
+ /// header.
+ void setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader);
+
+ /// \brief Sets the umbrella directory of the given module to the given
+ /// directory.
+ void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir);
+
+ /// \brief Adds this header to the given module.
+ void addHeader(Module *Mod, const FileEntry *Header);
+
+ /// \brief Parse the given module map file, and record any modules we
+ /// encounter.
+ ///
+ /// \param File The file to be parsed.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool parseModuleMapFile(const FileEntry *File);
+
+ /// \brief Dump the contents of the module map, for debugging purposes.
+ void dump();
+
+ typedef llvm::StringMap<Module *>::const_iterator module_iterator;
+ module_iterator module_begin() const { return Modules.begin(); }
+ module_iterator module_end() const { return Modules.end(); }
+};
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
index 1fc1a05..33558c8 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -58,6 +58,23 @@ public:
SrcMgr::CharacteristicKind FileType) {
}
+ /// FileNotFound - This callback is invoked whenever an inclusion directive
+ /// results in a file-not-found error.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param RecoveryPath If this client indicates that it can recover from
+ /// this missing file, the client should set this as an additional header
+ /// search patch.
+ ///
+ /// \returns true to indicate that the preprocessor should attempt to recover
+ /// by adding \p RecoveryPath as a header search path.
+ virtual bool FileNotFound(StringRef FileName,
+ SmallVectorImpl<char> &RecoveryPath) {
+ return false;
+ }
+
/// \brief This callback is invoked whenever an inclusion directive of
/// any kind (\c #include, \c #import, etc.) has been processed, regardless
/// of whether the inclusion will actually result in an inclusion.
@@ -173,40 +190,49 @@ public:
}
/// If -- This hook is called whenever an #if is seen.
- /// \param Range The SourceRange of the expression being tested.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
// FIXME: better to pass in a list (or tree!) of Tokens.
- virtual void If(SourceRange Range) {
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
}
/// Elif -- This hook is called whenever an #elif is seen.
- /// \param Range The SourceRange of the expression being tested.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
// FIXME: better to pass in a list (or tree!) of Tokens.
- virtual void Elif(SourceRange Range) {
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
}
/// Ifdef -- This hook is called whenever an #ifdef is seen.
- /// \param Loc The location of the token being tested.
+ /// \param Loc the source location of the directive.
/// \param II Information on the token being tested.
- virtual void Ifdef(const Token &MacroNameTok) {
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
}
/// Ifndef -- This hook is called whenever an #ifndef is seen.
- /// \param Loc The location of the token being tested.
+ /// \param Loc the source location of the directive.
/// \param II Information on the token being tested.
- virtual void Ifndef(const Token &MacroNameTok) {
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
}
/// Else -- This hook is called whenever an #else is seen.
- virtual void Else() {
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
}
/// Endif -- This hook is called whenever an #endif is seen.
- virtual void Endif() {
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
}
};
/// PPChainedCallbacks - Simple wrapper class for chaining callbacks.
class PPChainedCallbacks : public PPCallbacks {
+ virtual void anchor();
PPCallbacks *First, *Second;
public:
@@ -231,6 +257,12 @@ public:
Second->FileSkipped(ParentFile, FilenameTok, FileType);
}
+ virtual bool FileNotFound(StringRef FileName,
+ SmallVectorImpl<char> &RecoveryPath) {
+ return First->FileNotFound(FileName, RecoveryPath) ||
+ Second->FileNotFound(FileName, RecoveryPath);
+ }
+
virtual void InclusionDirective(SourceLocation HashLoc,
const Token &IncludeTok,
StringRef FileName,
@@ -311,39 +343,40 @@ public:
}
/// If -- This hook is called whenever an #if is seen.
- virtual void If(SourceRange Range) {
- First->If(Range);
- Second->If(Range);
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
+ First->If(Loc, ConditionRange);
+ Second->If(Loc, ConditionRange);
}
/// Elif -- This hook is called whenever an #if is seen.
- virtual void Elif(SourceRange Range) {
- First->Elif(Range);
- Second->Elif(Range);
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ First->Elif(Loc, ConditionRange, IfLoc);
+ Second->Elif(Loc, ConditionRange, IfLoc);
}
/// Ifdef -- This hook is called whenever an #ifdef is seen.
- virtual void Ifdef(const Token &MacroNameTok) {
- First->Ifdef(MacroNameTok);
- Second->Ifdef(MacroNameTok);
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
+ First->Ifdef(Loc, MacroNameTok);
+ Second->Ifdef(Loc, MacroNameTok);
}
/// Ifndef -- This hook is called whenever an #ifndef is seen.
- virtual void Ifndef(const Token &MacroNameTok) {
- First->Ifndef(MacroNameTok);
- Second->Ifndef(MacroNameTok);
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
+ First->Ifndef(Loc, MacroNameTok);
+ Second->Ifndef(Loc, MacroNameTok);
}
/// Else -- This hook is called whenever an #else is seen.
- virtual void Else() {
- First->Else();
- Second->Else();
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
+ First->Else(Loc, IfLoc);
+ Second->Else(Loc, IfLoc);
}
/// Endif -- This hook is called whenever an #endif is seen.
- virtual void Endif() {
- First->Endif();
- Second->Endif();
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
+ First->Endif(Loc, IfLoc);
+ Second->Endif(Loc, IfLoc);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
index 53da19e..45e3a5d 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
@@ -17,8 +17,11 @@
#include "clang/Lex/PPCallbacks.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
#include <vector>
namespace clang {
@@ -85,7 +88,7 @@ namespace clang {
/// \brief Retrieve the source range that covers this entire preprocessed
/// entity.
- SourceRange getSourceRange() const { return Range; }
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
/// \brief Returns true if there was a problem loading the preprocessed
/// entity.
@@ -269,6 +272,13 @@ namespace clang {
/// preprocessed entities that \arg Range encompasses.
virtual std::pair<unsigned, unsigned>
findPreprocessedEntitiesInRange(SourceRange Range) = 0;
+
+ /// \brief Optionally returns true or false if the preallocated preprocessed
+ /// entity with index \arg Index came from file \arg FID.
+ virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ return llvm::Optional<bool>();
+ }
};
/// \brief A record of the steps taken while preprocessing a source file,
@@ -276,10 +286,6 @@ namespace clang {
/// expanded, etc.
class PreprocessingRecord : public PPCallbacks {
SourceManager &SourceMgr;
-
- /// \brief Whether we should include nested macro expansions in
- /// the preprocessing record.
- bool IncludeNestedMacroExpansions;
/// \brief Allocator used to store preprocessing objects.
llvm::BumpPtrAllocator BumpAlloc;
@@ -295,6 +301,44 @@ namespace clang {
/// and are referenced by the iterator using negative indices.
std::vector<PreprocessedEntity *> LoadedPreprocessedEntities;
+ bool RecordCondDirectives;
+ unsigned CondDirectiveNextIdx;
+ SmallVector<unsigned, 6> CondDirectiveStack;
+
+ class CondDirectiveLoc {
+ SourceLocation Loc;
+ unsigned Idx;
+
+ public:
+ CondDirectiveLoc(SourceLocation Loc, unsigned Idx) : Loc(Loc), Idx(Idx) {}
+
+ SourceLocation getLoc() const { return Loc; }
+ unsigned getIdx() const { return Idx; }
+
+ class Comp {
+ SourceManager &SM;
+ public:
+ explicit Comp(SourceManager &SM) : SM(SM) {}
+ bool operator()(const CondDirectiveLoc &LHS,
+ const CondDirectiveLoc &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getLoc(), RHS.getLoc());
+ }
+ bool operator()(const CondDirectiveLoc &LHS, SourceLocation RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getLoc(), RHS);
+ }
+ bool operator()(SourceLocation LHS, const CondDirectiveLoc &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS, RHS.getLoc());
+ }
+ };
+ };
+
+ typedef std::vector<CondDirectiveLoc> CondDirectiveLocsTy;
+ /// \brief The locations of conditional directives in source order.
+ CondDirectiveLocsTy CondDirectiveLocs;
+
+ void addCondDirectiveLoc(CondDirectiveLoc DirLoc);
+ unsigned findCondDirectiveIdx(SourceLocation Loc) const;
+
/// \brief Global (loaded or local) ID for a preprocessed entity.
/// Negative values are used to indicate preprocessed entities
/// loaded from the external source while non-negative values are used to
@@ -345,7 +389,7 @@ namespace clang {
public:
/// \brief Construct a new preprocessing record.
- PreprocessingRecord(SourceManager &SM, bool IncludeNestedMacroExpansions);
+ PreprocessingRecord(SourceManager &SM, bool RecordConditionalDirectives);
/// \brief Allocate memory in the preprocessing record.
void *Allocate(unsigned Size, unsigned Align = 8) {
@@ -386,7 +430,7 @@ namespace clang {
iterator() : Self(0), Position(0) { }
- iterator(PreprocessingRecord *Self, int Position)
+ iterator(PreprocessingRecord *Self, PPEntityID Position)
: Self(Self), Position(Position) { }
value_type operator*() const {
@@ -471,6 +515,7 @@ namespace clang {
X.Position -= D;
return X;
}
+ friend class PreprocessingRecord;
};
friend class iterator;
@@ -496,11 +541,41 @@ namespace clang {
/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
/// that source range \arg R encompasses.
+ ///
+ /// \param R the range to look for preprocessed entities.
+ ///
std::pair<iterator, iterator> getPreprocessedEntitiesInRange(SourceRange R);
+ /// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+ /// points to is coming from the file \arg FID.
+ ///
+ /// Can be used to avoid implicit deserializations of preallocated
+ /// preprocessed entities if we only care about entities of a specific file
+ /// and not from files #included in the range given at
+ /// \see getPreprocessedEntitiesInRange.
+ bool isEntityInFileID(iterator PPEI, FileID FID);
+
/// \brief Add a new preprocessed entity to this record.
- void addPreprocessedEntity(PreprocessedEntity *Entity);
-
+ PPEntityID addPreprocessedEntity(PreprocessedEntity *Entity);
+
+ /// \brief Returns true if this PreprocessingRecord is keeping track of
+ /// conditional directives locations.
+ bool isRecordingConditionalDirectives() const {
+ return RecordCondDirectives;
+ }
+
+ /// \brief Returns true if the given range intersects with a conditional
+ /// directive. if a #if/#endif block is fully contained within the range,
+ /// this function will return false.
+ bool rangeIntersectsConditionalDirective(SourceRange Range) const;
+
+ /// \brief Returns true if the given locations are in different regions,
+ /// separated by conditional directive blocks.
+ bool areInDifferentConditionalDirectiveRegion(SourceLocation LHS,
+ SourceLocation RHS) const {
+ return findCondDirectiveIdx(LHS) != findCondDirectiveIdx(RHS);
+ }
+
/// \brief Set the external source for preprocessed entities.
void SetExternalSource(ExternalPreprocessingRecordSource &Source);
@@ -513,6 +588,7 @@ namespace clang {
/// \c MacroInfo.
MacroDefinition *findMacroDefinition(const MacroInfo *MI);
+ private:
virtual void MacroExpands(const Token &Id, const MacroInfo* MI,
SourceRange Range);
virtual void MacroDefined(const Token &Id, const MacroInfo *MI);
@@ -525,6 +601,23 @@ namespace clang {
SourceLocation EndLoc,
StringRef SearchPath,
StringRef RelativePath);
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange);
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc);
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok);
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok);
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc);
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc);
+
+ /// \brief Cached result of the last \see getPreprocessedEntitiesInRange
+ /// query.
+ struct {
+ SourceRange Range;
+ std::pair<PPEntityID, PPEntityID> Result;
+ } CachedRangeQuery;
+
+ std::pair<PPEntityID, PPEntityID>
+ getPreprocessedEntitiesInRangeSlow(SourceRange R);
friend class ASTReader;
friend class ASTWriter;
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index 8b77433..055008f 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -33,6 +33,10 @@
#include "llvm/Support/Allocator.h"
#include <vector>
+namespace llvm {
+ template<unsigned InternalLen> class SmallString;
+}
+
namespace clang {
class SourceManager;
@@ -50,15 +54,15 @@ class CodeCompletionHandler;
class DirectoryLookup;
class PreprocessingRecord;
class ModuleLoader;
-
+
/// Preprocessor - This object engages in a tight little dance with the lexer to
/// efficiently preprocess tokens. Lexers know only about tokens within a
/// single source file, and don't know anything about preprocessor-level issues
/// like the #include stack, token expansion, etc.
///
-class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
+class Preprocessor : public RefCountedBase<Preprocessor> {
DiagnosticsEngine *Diags;
- LangOptions &Features;
+ LangOptions &LangOpts;
const TargetInfo *Target;
FileManager &FileMgr;
SourceManager &SourceMgr;
@@ -69,10 +73,10 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// \brief External source of macros.
ExternalPreprocessorSource *ExternalSource;
-
+
/// PTH - An optional PTHManager object used for getting tokens from
/// a token cache rather than lexing the original source file.
- llvm::OwningPtr<PTHManager> PTH;
+ OwningPtr<PTHManager> PTH;
/// BP - A BumpPtrAllocator object used to quickly allocate and release
/// objects internal to the Preprocessor.
@@ -107,8 +111,7 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
bool KeepComments : 1;
bool KeepMacroComments : 1;
bool SuppressIncludeNotFoundError : 1;
- bool AutoModuleImport : 1;
-
+
// State that changes while the preprocessor runs:
bool InMacroArgs : 1; // True if parsing fn macro invocation args.
@@ -121,6 +124,9 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// \brief Whether we have already loaded macros from the external source.
mutable bool ReadMacrosFromExternalSource : 1;
+ /// \brief True if we are pre-expanding macro arguments.
+ bool InMacroArgPreExpansion;
+
/// Identifiers - This is mapping/lookup information for all identifiers in
/// the program, including program keywords.
mutable IdentifierTable Identifiers;
@@ -145,9 +151,13 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// with this preprocessor.
std::vector<CommentHandler *> CommentHandlers;
+ /// \brief True if we want to ignore EOF token and continue later on (thus
+ /// avoid tearing the Lexer and etc. down).
+ bool IncrementalProcessing;
+
/// \brief The code-completion handler.
CodeCompletionHandler *CodeComplete;
-
+
/// \brief The file that we're performing code-completion for, if any.
const FileEntry *CodeCompletionFile;
@@ -163,14 +173,22 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// for preprocessing.
SourceLocation CodeCompletionFileLoc;
- /// \brief The source location of the __import_module__ keyword we just
+ /// \brief The source location of the 'import' contextual keyword we just
/// lexed, if any.
SourceLocation ModuleImportLoc;
+ /// \brief The module import path that we're currently processing.
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2>
+ ModuleImportPath;
+
+ /// \brief Whether the module import expectes an identifier next. Otherwise,
+ /// it expects a '.' or ';'.
+ bool ModuleImportExpectsIdentifier;
+
/// \brief The source location of the currently-active
/// #pragma clang arc_cf_code_audited begin.
SourceLocation PragmaARCCFCodeAuditedLoc;
-
+
/// \brief True if we hit the code-completion point.
bool CodeCompletionReached;
@@ -179,22 +197,22 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// with a flag that indicates whether skipping this number of bytes will
/// place the lexer at the start of a line.
std::pair<unsigned, bool> SkipMainFilePreamble;
-
+
/// CurLexer - This is the current top of the stack that we're lexing from if
/// not expanding a macro and we are lexing directly from source code.
/// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null.
- llvm::OwningPtr<Lexer> CurLexer;
+ OwningPtr<Lexer> CurLexer;
/// CurPTHLexer - This is the current top of stack that we're lexing from if
/// not expanding from a macro and we are lexing from a PTH cache.
/// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null.
- llvm::OwningPtr<PTHLexer> CurPTHLexer;
+ OwningPtr<PTHLexer> CurPTHLexer;
/// CurPPLexer - This is the current top of the stack what we're lexing from
/// if not expanding a macro. This is an alias for either CurLexer or
/// CurPTHLexer.
PreprocessorLexer *CurPPLexer;
-
+
/// CurLookup - The DirectoryLookup structure used to find the current
/// FileEntry, if CurLexer is non-null and if applicable. This allows us to
/// implement #include_next and find directory-specific properties.
@@ -202,13 +220,13 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// CurTokenLexer - This is the current macro we are expanding, if we are
/// expanding a macro. One of CurLexer and CurTokenLexer must be null.
- llvm::OwningPtr<TokenLexer> CurTokenLexer;
+ OwningPtr<TokenLexer> CurTokenLexer;
/// \brief The kind of lexer we're currently working with.
- enum CurLexerKind {
- CLK_Lexer,
- CLK_PTHLexer,
- CLK_TokenLexer,
+ enum CurLexerKind {
+ CLK_Lexer,
+ CLK_PTHLexer,
+ CLK_TokenLexer,
CLK_CachingLexer,
CLK_LexAfterModuleImport
} CurLexerKind;
@@ -224,10 +242,10 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
TokenLexer *TheTokenLexer;
const DirectoryLookup *TheDirLookup;
- IncludeStackInfo(enum CurLexerKind K, Lexer *L, PTHLexer* P,
+ IncludeStackInfo(enum CurLexerKind K, Lexer *L, PTHLexer* P,
PreprocessorLexer* PPL,
TokenLexer* TL, const DirectoryLookup *D)
- : CurLexerKind(K), TheLexer(L), ThePTHLexer(P), ThePPLexer(PPL),
+ : CurLexerKind(K), TheLexer(L), ThePTHLexer(P), ThePPLexer(PPL),
TheTokenLexer(TL), TheDirLookup(D) {}
};
std::vector<IncludeStackInfo> IncludeMacroStack;
@@ -254,9 +272,9 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
/// reused for quick allocation.
MacroArgs *MacroArgCache;
friend class MacroArgs;
-
- /// PragmaPushMacroInfo - For each IdentifierInfo used in a #pragma
- /// push_macro directive, we keep a MacroInfo stack used to restore
+
+ /// PragmaPushMacroInfo - For each IdentifierInfo used in a #pragma
+ /// push_macro directive, we keep a MacroInfo stack used to restore
/// previous macro value.
llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> > PragmaPushMacroInfo;
@@ -286,12 +304,12 @@ class Preprocessor : public llvm::RefCountedBase<Preprocessor> {
std::vector<std::pair<TokenLexer *, size_t> > MacroExpandingLexersStack;
/// \brief A record of the macro definitions and expansions that
- /// occurred during preprocessing.
+ /// occurred during preprocessing.
///
/// This is an optional side structure that can be enabled with
/// \c createPreprocessingRecord() prior to preprocessing.
PreprocessingRecord *Record;
-
+
private: // Cached tokens state.
typedef SmallVector<Token, 1> CachedTokensTy;
@@ -325,7 +343,7 @@ private: // Cached tokens state.
MacroInfoChain *MICache;
MacroInfo *getInfoForMacro(IdentifierInfo *II) const;
-
+
public:
Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
const TargetInfo *target,
@@ -333,7 +351,8 @@ public:
ModuleLoader &TheModuleLoader,
IdentifierInfoLookup *IILookup = 0,
bool OwnsHeaderSearch = false,
- bool DelayInitialization = false);
+ bool DelayInitialization = false,
+ bool IncrProcessing = false);
~Preprocessor();
@@ -342,11 +361,11 @@ public:
///
/// \param Target Information about the target.
void Initialize(const TargetInfo &Target);
-
+
DiagnosticsEngine &getDiagnostics() const { return *Diags; }
void setDiagnostics(DiagnosticsEngine &D) { Diags = &D; }
- const LangOptions &getLangOptions() const { return Features; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
const TargetInfo &getTargetInfo() const { return *Target; }
FileManager &getFileManager() const { return FileMgr; }
SourceManager &getSourceManager() const { return SourceMgr; }
@@ -371,7 +390,7 @@ public:
/// \brief Retrieve the module loader associated with this preprocessor.
ModuleLoader &getModuleLoader() const { return TheModuleLoader; }
-
+
/// SetCommentRetentionState - Control whether or not the preprocessor retains
/// comments in output.
void SetCommentRetentionState(bool KeepComments, bool KeepMacroComments) {
@@ -389,11 +408,6 @@ public:
return SuppressIncludeNotFoundError;
}
- /// \brief Specify whether automatic module imports are enabled.
- void setAutoModuleImport(bool AutoModuleImport = true) {
- this->AutoModuleImport = AutoModuleImport;
- }
-
/// isCurrentLexer - Return true if we are lexing directly from the specified
/// lexer.
bool isCurrentLexer(const PreprocessorLexer *L) const {
@@ -425,13 +439,14 @@ public:
MacroInfo *getMacroInfo(IdentifierInfo *II) const {
if (!II->hasMacroDefinition())
return 0;
-
+
return getInfoForMacro(II);
}
/// setMacroInfo - Specify a macro for this identifier.
///
- void setMacroInfo(IdentifierInfo *II, MacroInfo *MI);
+ void setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ bool LoadedFromAST = false);
/// macro_iterator/macro_begin/macro_end - This allows you to walk the current
/// state of the macro table. This visits every currently-defined macro.
@@ -484,29 +499,29 @@ public:
void setCodeCompletionHandler(CodeCompletionHandler &Handler) {
CodeComplete = &Handler;
}
-
+
/// \brief Retrieve the current code-completion handler.
CodeCompletionHandler *getCodeCompletionHandler() const {
return CodeComplete;
}
-
+
/// \brief Clear out the code completion handler.
void clearCodeCompletionHandler() {
CodeComplete = 0;
}
-
+
/// \brief Hook used by the lexer to invoke the "natural language" code
/// completion point.
void CodeCompleteNaturalLanguage();
-
+
/// \brief Retrieve the preprocessing record, or NULL if there is no
/// preprocessing record.
PreprocessingRecord *getPreprocessingRecord() const { return Record; }
-
- /// \brief Create a new preprocessing record, which will keep track of
+
+ /// \brief Create a new preprocessing record, which will keep track of
/// all macro expansions, macro definitions, etc.
- void createPreprocessingRecord(bool IncludeNestedMacroExpansions);
-
+ void createPreprocessingRecord(bool RecordConditionalDirectives);
+
/// EnterMainSourceFile - Enter the specified FileID as the main source file,
/// which implicitly adds the builtin defines etc.
void EnterMainSourceFile();
@@ -588,7 +603,7 @@ public:
}
void LexAfterModuleImport(Token &Result);
-
+
/// LexNonComment - Lex a token. If it's a comment, keep lexing until we get
/// something not a comment. This is useful in -E -C mode where comments
/// would foul up preprocessor directive handling.
@@ -681,6 +696,18 @@ public:
CachedTokens[CachedLexPos-1] = Tok;
}
+ /// \brief Recompute the current lexer kind based on the CurLexer/CurPTHLexer/
+ /// CurTokenLexer pointers.
+ void recomputeCurLexerKind();
+
+ /// \brief Returns true if incremental processing is enabled
+ bool isIncrementalProcessingEnabled() const { return IncrementalProcessing; }
+
+ /// \brief Enables the incremental processing
+ void enableIncrementalProcessing(bool value = true) {
+ IncrementalProcessing = value;
+ }
+
/// \brief Specify the point at which code-completion will be performed.
///
/// \param File the file in which code completion should occur. If
@@ -745,11 +772,11 @@ public:
///
/// \brief StartOfLine Whether skipping these bytes puts the lexer at the
/// start of a line.
- void setSkipMainFilePreamble(unsigned Bytes, bool StartOfLine) {
+ void setSkipMainFilePreamble(unsigned Bytes, bool StartOfLine) {
SkipMainFilePreamble.first = Bytes;
SkipMainFilePreamble.second = StartOfLine;
}
-
+
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
@@ -771,7 +798,7 @@ public:
StringRef getSpelling(SourceLocation loc,
SmallVectorImpl<char> &buffer,
bool *invalid = 0) const {
- return Lexer::getSpelling(loc, buffer, SourceMgr, Features, invalid);
+ return Lexer::getSpelling(loc, buffer, SourceMgr, LangOpts, invalid);
}
/// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
@@ -782,7 +809,7 @@ public:
///
/// \param Invalid If non-null, will be set \c true if an error occurs.
std::string getSpelling(const Token &Tok, bool *Invalid = 0) const {
- return Lexer::getSpelling(Tok, SourceMgr, Features, Invalid);
+ return Lexer::getSpelling(Tok, SourceMgr, LangOpts, Invalid);
}
/// getSpelling - This method is used to get the spelling of a token into a
@@ -795,21 +822,21 @@ public:
/// to point to a constant buffer with the data already in it (avoiding a
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
- unsigned getSpelling(const Token &Tok, const char *&Buffer,
+ unsigned getSpelling(const Token &Tok, const char *&Buffer,
bool *Invalid = 0) const {
- return Lexer::getSpelling(Tok, Buffer, SourceMgr, Features, Invalid);
+ return Lexer::getSpelling(Tok, Buffer, SourceMgr, LangOpts, Invalid);
}
/// getSpelling - This method is used to get the spelling of a token into a
/// SmallVector. Note that the returned StringRef may not point to the
/// supplied buffer if a copy can be avoided.
StringRef getSpelling(const Token &Tok,
- SmallVectorImpl<char> &Buffer,
- bool *Invalid = 0) const;
+ SmallVectorImpl<char> &Buffer,
+ bool *Invalid = 0) const;
/// getSpellingOfSingleCharacterNumericConstant - Tok is a numeric constant
/// with length 1, return the character.
- char getSpellingOfSingleCharacterNumericConstant(const Token &Tok,
+ char getSpellingOfSingleCharacterNumericConstant(const Token &Tok,
bool *Invalid = 0) const {
assert(Tok.is(tok::numeric_constant) &&
Tok.getLength() == 1 && "Called on unsupported token");
@@ -824,6 +851,17 @@ public:
return *SourceMgr.getCharacterData(Tok.getLocation(), Invalid);
}
+ /// \brief Retrieve the name of the immediate macro expansion.
+ ///
+ /// This routine starts from a source location, and finds the name of the macro
+ /// responsible for its immediate expansion. It looks through any intervening
+ /// macro argument expansions to compute this. It returns a StringRef which
+ /// refers to the SourceManager-owned buffer of the source where that macro
+ /// name is spelled. Thus, the result shouldn't out-live the SourceManager.
+ StringRef getImmediateMacroName(SourceLocation Loc) {
+ return Lexer::getImmediateMacroName(Loc, SourceMgr, getLangOpts());
+ }
+
/// CreateString - Plop the specified string into a scratch buffer and set the
/// specified token's location and length to it. If specified, the source
/// location provides a location of the expansion point of the token.
@@ -847,19 +885,28 @@ public:
/// location pointing just past the end of the token; an offset of 1 produces
/// a source location pointing to the last character in the token, etc.
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0) {
- return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, Features);
+ return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
}
/// \brief Returns true if the given MacroID location points at the first
/// token of the macro expansion.
- bool isAtStartOfMacroExpansion(SourceLocation loc) const {
- return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, Features);
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// begin location of the macro.
+ bool isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin = 0) const {
+ return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts,
+ MacroBegin);
}
/// \brief Returns true if the given MacroID location points at the last
/// token of the macro expansion.
- bool isAtEndOfMacroExpansion(SourceLocation loc) const {
- return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, Features);
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// end location of the macro.
+ bool isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd = 0) const {
+ return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
}
/// DumpToken - Print the token to stderr, used for debugging.
@@ -872,7 +919,7 @@ public:
/// token, return a new location that specifies a character within the token.
SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
unsigned Char) const {
- return Lexer::AdvanceToTokenCharacter(TokStart, Char, SourceMgr, Features);
+ return Lexer::AdvanceToTokenCharacter(TokStart, Char, SourceMgr, LangOpts);
}
/// IncrementPasteCounter - Increment the counters for the number of token
@@ -930,9 +977,18 @@ public:
private:
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
- IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except block
- IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __except filter expression
- IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; // __finally
+ // __except block
+ IdentifierInfo *Ident__exception_code,
+ *Ident___exception_code,
+ *Ident_GetExceptionCode;
+ // __except filter expression
+ IdentifierInfo *Ident__exception_info,
+ *Ident___exception_info,
+ *Ident_GetExceptionInfo;
+ // __finally
+ IdentifierInfo *Ident__abnormal_termination,
+ *Ident___abnormal_termination,
+ *Ident_AbnormalTermination;
public:
void PoisonSEHIdentifiers(bool Poison = true); // Borland
@@ -976,6 +1032,9 @@ public:
unsigned getCounterValue() const { return CounterValue; }
void setCounterValue(unsigned V) { CounterValue = V; }
+ /// \brief Retrieves the module that we're currently building, if any.
+ Module *getCurrentModule();
+
/// AllocateMacroInfo - Allocate a new MacroInfo object with the provide
/// SourceLocation.
MacroInfo *AllocateMacroInfo(SourceLocation L);
@@ -999,7 +1058,8 @@ public:
const DirectoryLookup *&CurDir,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef *SuggestedModule);
+ Module **SuggestedModule,
+ bool SkipCache = false);
/// GetCurLookup - The DirectoryLookup structure used to find the current
/// FileEntry, if CurLexer is non-null and if applicable. This allows us to
@@ -1020,7 +1080,7 @@ public:
/// This code concatenates and consumes tokens up to the '>' token. It
/// returns false if the > was found, otherwise it returns true if it finds
/// and consumes the EOD marker.
- bool ConcatenateIncludeName(llvm::SmallString<128> &FilenameBuffer,
+ bool ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
SourceLocation &End);
/// LexOnOffSwitch - Lex an on-off-switch (C99 6.10.6p2) and verify that it is
@@ -1063,9 +1123,10 @@ private:
/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
/// definition has just been read. Lex the rest of the arguments and the
- /// closing ), updating MI with what we learn. Return true if an error occurs
- /// parsing the arg list.
- bool ReadMacroDefinitionArgList(MacroInfo *MI);
+ /// closing ), updating MI with what we learn and saving in LastTok the
+ /// last token read.
+ /// Return true if an error occurs parsing the arg list.
+ bool ReadMacroDefinitionArgList(MacroInfo *MI, Token& LastTok);
/// SkipExcludedConditionalBlock - We just read a #if or related directive and
/// decided that the subsequent tokens are in the #if'd out portion of the
@@ -1163,7 +1224,7 @@ private:
bool InCachingLexMode() const {
// If the Lexer pointers are 0 and IncludeMacroStack is empty, it means
// that we are past EOF, not that we are in CachingLex mode.
- return CurPPLexer == 0 && CurTokenLexer == 0 && CurPTHLexer == 0 &&
+ return CurPPLexer == 0 && CurTokenLexer == 0 && CurPTHLexer == 0 &&
!IncludeMacroStack.empty();
}
void EnterCachingLexMode();
@@ -1182,8 +1243,9 @@ private:
void HandleDigitDirective(Token &Tok);
void HandleUserDiagnosticDirective(Token &Tok, bool isWarning);
void HandleIdentSCCSDirective(Token &Tok);
- void HandleMacroExportDirective(Token &Tok);
-
+ void HandleMacroPublicDirective(Token &Tok);
+ void HandleMacroPrivateDirective(Token &Tok);
+
// File inclusion.
void HandleIncludeDirective(SourceLocation HashLoc,
Token &Tok,
@@ -1192,6 +1254,7 @@ private:
void HandleIncludeNextDirective(SourceLocation HashLoc, Token &Tok);
void HandleIncludeMacrosDirective(SourceLocation HashLoc, Token &Tok);
void HandleImportDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleMicrosoftImportDirective(Token &Tok);
// Macro handling.
void HandleDefineDirective(Token &Tok);
@@ -1217,6 +1280,7 @@ public:
void HandlePragmaMessage(Token &MessageTok);
void HandlePragmaPushMacro(Token &Tok);
void HandlePragmaPopMacro(Token &Tok);
+ void HandlePragmaIncludeAlias(Token &Tok);
IdentifierInfo *ParsePragmaPushOrPopMacro(Token &Tok);
// Return true and store the first token only if any CommentHandler
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
index e2e30bf..b551cd4 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
@@ -24,6 +24,7 @@ class FileEntry;
class Preprocessor;
class PreprocessorLexer {
+ virtual void anchor();
protected:
Preprocessor *PP; // Preprocessor object controlling lexing.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index e6dd160..a88f607 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -75,7 +75,8 @@ public:
LeadingSpace = 0x02, // Whitespace exists before this token.
DisableExpand = 0x04, // This identifier may never be macro expanded.
NeedsCleaning = 0x08, // Contained an escaped newline or trigraph.
- LeadingEmptyMacro = 0x10 // Empty macro exists before this token.
+ LeadingEmptyMacro = 0x10, // Empty macro exists before this token.
+ HasUDSuffix = 0x20 // This string or character literal has a ud-suffix.
};
tok::TokenKind getKind() const { return (tok::TokenKind)Kind; }
@@ -263,6 +264,9 @@ public:
return (Flags & LeadingEmptyMacro) ? true : false;
}
+ /// \brief Return true if this token is a string or character literal which
+ /// has a ud-suffix.
+ bool hasUDSuffix() const { return (Flags & HasUDSuffix) ? true : false; }
};
/// PPConditionalInfo - Information about the conditional stack (#if directives)
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h b/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h
index 7253870..2405a0c 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h
@@ -36,11 +36,13 @@ namespace clang {
void ParseAST(Preprocessor &pp, ASTConsumer *C,
ASTContext &Ctx, bool PrintStats = false,
TranslationUnitKind TUKind = TU_Complete,
- CodeCompleteConsumer *CompletionConsumer = 0);
+ CodeCompleteConsumer *CompletionConsumer = 0,
+ bool SkipFunctionBodies = false);
/// \brief Parse the main file known to the preprocessor, producing an
/// abstract syntax tree.
- void ParseAST(Sema &S, bool PrintStats = false);
+ void ParseAST(Sema &S, bool PrintStats = false,
+ bool SkipFunctionBodies = false);
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
index 0e76c61..0d47292 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define PARSESTART
#include "clang/Basic/DiagnosticParseKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 0046f88..dff8510 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -20,9 +20,10 @@
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/DeclSpec.h"
-#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PrettyStackTrace.h"
#include <stack>
namespace clang {
@@ -36,7 +37,7 @@ namespace clang {
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
-
+
/// PrettyStackTraceParserEntry - If a crash happens while the parser is active,
/// an entry is printed for it.
class PrettyStackTraceParserEntry : public llvm::PrettyStackTraceEntry {
@@ -96,7 +97,7 @@ class Parser : public CodeCompletionHandler {
unsigned short ParenCount, BracketCount, BraceCount;
/// Actions - These are the callbacks we invoke as we parse various constructs
- /// in the file.
+ /// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
@@ -108,9 +109,21 @@ class Parser : public CodeCompletionHandler {
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
- IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except block
- IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __except filter expression
- IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; // __finally
+ // __except block
+ IdentifierInfo *Ident__exception_code,
+ *Ident___exception_code,
+ *Ident_GetExceptionCode;
+ // __except filter expression
+ IdentifierInfo *Ident__exception_info,
+ *Ident___exception_info,
+ *Ident_GetExceptionInfo;
+ // __finally
+ IdentifierInfo *Ident__abnormal_termination,
+ *Ident___abnormal_termination,
+ *Ident_AbnormalTermination;
+
+ /// Contextual keywords for Microsoft extensions.
+ IdentifierInfo *Ident__except;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
@@ -123,7 +136,7 @@ class Parser : public CodeCompletionHandler {
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
-
+
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
@@ -135,43 +148,47 @@ class Parser : public CodeCompletionHandler {
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
+
+ /// \brief Identifier for "message".
+ IdentifierInfo *Ident_message;
- /// C++0x contextual keywords.
+ /// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_override;
- llvm::OwningPtr<PragmaHandler> AlignHandler;
- llvm::OwningPtr<PragmaHandler> GCCVisibilityHandler;
- llvm::OwningPtr<PragmaHandler> OptionsHandler;
- llvm::OwningPtr<PragmaHandler> PackHandler;
- llvm::OwningPtr<PragmaHandler> MSStructHandler;
- llvm::OwningPtr<PragmaHandler> UnusedHandler;
- llvm::OwningPtr<PragmaHandler> WeakHandler;
- llvm::OwningPtr<PragmaHandler> FPContractHandler;
- llvm::OwningPtr<PragmaHandler> OpenCLExtensionHandler;
+ OwningPtr<PragmaHandler> AlignHandler;
+ OwningPtr<PragmaHandler> GCCVisibilityHandler;
+ OwningPtr<PragmaHandler> OptionsHandler;
+ OwningPtr<PragmaHandler> PackHandler;
+ OwningPtr<PragmaHandler> MSStructHandler;
+ OwningPtr<PragmaHandler> UnusedHandler;
+ OwningPtr<PragmaHandler> WeakHandler;
+ OwningPtr<PragmaHandler> RedefineExtnameHandler;
+ OwningPtr<PragmaHandler> FPContractHandler;
+ OwningPtr<PragmaHandler> OpenCLExtensionHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
-
+
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
- /// \brief When true, we are directly inside an Objective-C messsage
+ /// \brief When true, we are directly inside an Objective-C messsage
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
-
+
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
-
+
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
@@ -179,20 +196,24 @@ class Parser : public CodeCompletionHandler {
/// declaration is finished.
DelayedCleanupPool TopLevelDeclCleanupPool;
+ IdentifierInfo *getSEHExceptKeyword();
+
+ bool SkipFunctionBodies;
+
public:
- Parser(Preprocessor &PP, Sema &Actions);
+ Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser();
- const LangOptions &getLang() const { return PP.getLangOptions(); }
+ const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
-
+
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
-
+
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
@@ -241,8 +262,6 @@ public:
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
- DeclGroupPtrTy FinishPendingObjCActions();
-
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
@@ -271,10 +290,9 @@ private:
Tok.getKind() == tok::utf32_string_literal;
}
- /// \brief Returns true if the current token is a '=' or '==' and
- /// false otherwise. If it's '==', we assume that it's a typo and we emit
- /// DiagID and a fixit hint to turn '==' -> '='.
- bool isTokenEqualOrMistypedEqualEqual(unsigned DiagID);
+ /// \brief Returns true if the current token is '=' or is a type of '='.
+ /// For typos, give a fixit to '='
+ bool isTokenEqualOrEqualTypo();
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with all kinds of tokens: strings and specific other
@@ -370,9 +388,9 @@ private:
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
- return PrevTokLocation;
+ return PrevTokLocation;
}
-
+
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
@@ -391,6 +409,14 @@ private:
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
+ /// \brief Handle the annotation token produced for
+ /// #pragma GCC visibility...
+ void HandlePragmaVisibility();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma pack...
+ void HandlePragmaPack();
+
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
@@ -409,83 +435,77 @@ private:
return PP.LookAhead(0);
}
- /// \brief Tracks information about the current nesting depth of
- /// opening delimiters of each kind.
- class DelimiterTracker {
- private:
- friend class Parser;
-
- unsigned Paren, Brace, Square, Less, LLLess;
- unsigned& get(tok::TokenKind t) {
- switch (t) {
- default: llvm_unreachable("Unexpected balanced token");
- case tok::l_brace: return Brace;
- case tok::l_paren: return Paren;
- case tok::l_square: return Square;
- case tok::less: return Less;
- case tok::lesslessless: return LLLess;
- }
- }
-
- void push(tok::TokenKind t) {
- get(t)++;
- }
-
- void pop(tok::TokenKind t) {
- get(t)--;
- }
-
- unsigned getDepth(tok::TokenKind t) {
- return get(t);
- }
-
- public:
- DelimiterTracker() : Paren(0), Brace(0), Square(0), Less(0), LLLess(0) { }
- };
-
/// \brief RAII class that helps handle the parsing of an open/close delimiter
/// pair, such as braces { ... } or parentheses ( ... ).
class BalancedDelimiterTracker {
- tok::TokenKind Kind, Close;
Parser& P;
- bool Cleanup;
- const unsigned MaxDepth;
+ tok::TokenKind Kind, Close;
+ SourceLocation (Parser::*Consumer)();
SourceLocation LOpen, LClose;
-
- void assignClosingDelimiter() {
+
+ unsigned short &getDepth() {
switch (Kind) {
- default: llvm_unreachable("Unexpected balanced token");
- case tok::l_brace: Close = tok::r_brace; break;
- case tok::l_paren: Close = tok::r_paren; break;
- case tok::l_square: Close = tok::r_square; break;
- case tok::less: Close = tok::greater; break;
- case tok::lesslessless: Close = tok::greatergreatergreater; break;
+ case tok::l_brace: return P.BraceCount;
+ case tok::l_square: return P.BracketCount;
+ case tok::l_paren: return P.ParenCount;
+ default: llvm_unreachable("Wrong token kind");
}
}
-
+
+ enum { MaxDepth = 512 };
+
+ bool diagnoseOverflow();
+ bool diagnoseMissingClose();
+
public:
- BalancedDelimiterTracker(Parser& p, tok::TokenKind k)
- : Kind(k), P(p), Cleanup(false), MaxDepth(256) {
- assignClosingDelimiter();
- }
-
- ~BalancedDelimiterTracker() {
- if (Cleanup)
- P.QuantityTracker.pop(Kind);
+ BalancedDelimiterTracker(Parser& p, tok::TokenKind k) : P(p), Kind(k) {
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::l_brace:
+ Close = tok::r_brace;
+ Consumer = &Parser::ConsumeBrace;
+ break;
+ case tok::l_paren:
+ Close = tok::r_paren;
+ Consumer = &Parser::ConsumeParen;
+ break;
+
+ case tok::l_square:
+ Close = tok::r_square;
+ Consumer = &Parser::ConsumeBracket;
+ break;
+ }
}
SourceLocation getOpenLocation() const { return LOpen; }
SourceLocation getCloseLocation() const { return LClose; }
SourceRange getRange() const { return SourceRange(LOpen, LClose); }
- bool consumeOpen();
- bool expectAndConsume(unsigned DiagID,
- const char *Msg = "",
+ bool consumeOpen() {
+ if (!P.Tok.is(Kind))
+ return true;
+
+ if (getDepth() < MaxDepth) {
+ LOpen = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseOverflow();
+ }
+
+ bool expectAndConsume(unsigned DiagID,
+ const char *Msg = "",
tok::TokenKind SkipToTok = tok::unknown);
- bool consumeClose();
- };
+ bool consumeClose() {
+ if (P.Tok.is(Close)) {
+ LClose = (P.*Consumer)();
+ return false;
+ }
- DelimiterTracker QuantityTracker;
+ return diagnoseMissingClose();
+ }
+ void skipToEnd();
+ };
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
@@ -495,16 +515,16 @@ private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
-
+
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(Token &Tok) {
if (Tok.getAnnotationValue())
return ExprResult((Expr *)Tok.getAnnotationValue());
-
+
return ExprResult(true);
}
-
+
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
@@ -526,11 +546,11 @@ private:
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
- if (!getLang().AltiVec ||
+ if (!getLangOpts().AltiVec ||
(Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_pixel))
return false;
-
+
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
@@ -538,11 +558,11 @@ private:
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
- if (!getLang().AltiVec ||
+ if (!getLangOpts().AltiVec ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
-
+
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
@@ -598,14 +618,14 @@ private:
Parser &P;
Decl *DC;
public:
- explicit ObjCDeclContextSwitch(Parser &p) : P(p),
+ explicit ObjCDeclContextSwitch(Parser &p) : P(p),
DC(p.getObjCDeclContext()) {
if (DC)
- P.Actions.ActOnObjCTemporaryExitContainerContext();
+ P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
- P.Actions.ActOnObjCReenterContainerContext();
+ P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
@@ -625,7 +645,7 @@ private:
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
-
+
//===--------------------------------------------------------------------===//
// Scope manipulation
@@ -695,6 +715,7 @@ public:
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
+ void CheckNestedObjCContexts(SourceLocation AtLoc);
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless DontConsume is true). Because we cannot guarantee that the
@@ -706,16 +727,26 @@ private:
/// returns false.
bool SkipUntil(tok::TokenKind T, bool StopAtSemi = true,
bool DontConsume = false, bool StopAtCodeCompletion = false) {
- return SkipUntil(&T, 1, StopAtSemi, DontConsume, StopAtCodeCompletion);
+ return SkipUntil(llvm::makeArrayRef(T), StopAtSemi, DontConsume,
+ StopAtCodeCompletion);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, bool StopAtSemi = true,
bool DontConsume = false, bool StopAtCodeCompletion = false) {
tok::TokenKind TokArray[] = {T1, T2};
- return SkipUntil(TokArray, 2, StopAtSemi, DontConsume,StopAtCodeCompletion);
+ return SkipUntil(TokArray, StopAtSemi, DontConsume,StopAtCodeCompletion);
}
- bool SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
+ bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
bool StopAtSemi = true, bool DontConsume = false,
- bool StopAtCodeCompletion = false);
+ bool StopAtCodeCompletion = false) {
+ tok::TokenKind TokArray[] = {T1, T2, T3};
+ return SkipUntil(TokArray, StopAtSemi, DontConsume,StopAtCodeCompletion);
+ }
+ bool SkipUntil(ArrayRef<tok::TokenKind> Toks, bool StopAtSemi = true,
+ bool DontConsume = false, bool StopAtCodeCompletion = false);
+
+ /// SkipMalformedDecl - Read tokens until we get to some likely good stopping
+ /// point for skipping past a simple-declaration.
+ void SkipMalformedDecl();
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
@@ -757,9 +788,9 @@ private:
ParsingClass *Class;
};
- /// Contains the lexed tokens of an attribute with arguments that
- /// may reference member variables and so need to be parsed at the
- /// end of the class declaration after parsing all other member
+ /// Contains the lexed tokens of an attribute with arguments that
+ /// may reference member variables and so need to be parsed at the
+ /// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
@@ -768,15 +799,15 @@ private:
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
- Decl *D;
+ SmallVector<Decl*, 2> Decls;
- explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
+ explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
- : Self(P), AttrName(Name), AttrNameLoc(Loc), D(0) {}
+ : Self(P), AttrName(Name), AttrNameLoc(Loc) {}
virtual void ParseLexedAttributes();
- void setDecl(Decl *Dec) { D = Dec; }
+ void addDecl(Decl *D) { Decls.push_back(D); }
};
/// A list of late parsed attributes. Used by ParseGNUAttributes.
@@ -872,8 +903,9 @@ private:
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
- /// will be stored here with the tokens that will be parsed to create those entities.
- typedef SmallVector<LateParsedDeclaration*, 2> LateParsedDeclarationsContainer;
+ /// will be stored here with the tokens that will be parsed to create those
+ /// entities.
+ typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
@@ -965,7 +997,7 @@ private:
Popped = Other.Popped;
Other.Popped = true;
}
-
+
void push() {
State = Actions.PushParsingDeclaration();
Popped = false;
@@ -1060,7 +1092,7 @@ private:
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
- TemplateParams(TemplateParams),
+ TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
@@ -1092,23 +1124,23 @@ private:
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
-
+
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
- SourceRange getSourceRange() const;
+ SourceRange getSourceRange() const LLVM_READONLY;
};
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit.
struct LateParsedTemplatedFunction {
- explicit LateParsedTemplatedFunction(Parser* P, Decl *MD)
+ explicit LateParsedTemplatedFunction(Decl *MD)
: D(MD) {}
CachedTokens Toks;
-
+
/// \brief The template function declaration to be late parsed.
- Decl *D;
+ Decl *D;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
@@ -1128,10 +1160,15 @@ private:
Decl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
- const VirtSpecifiers& VS, ExprResult& Init);
+ const VirtSpecifiers& VS,
+ FunctionDefinitionKind DefinitionKind,
+ ExprResult& Init);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
- void ParseLexedAttribute(LateParsedAttribute &LA);
+ void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition);
+ void ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
@@ -1168,9 +1205,10 @@ private:
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
AccessSpecifier AS = AS_none);
-
+
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
+ LateParsedAttrList *LateParsedAttrs = 0);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
@@ -1178,9 +1216,9 @@ private:
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
- Parser::DeclGroupPtrTy ParseObjCAtDirectives();
- Parser::DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
- Decl *ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
+ DeclGroupPtrTy ParseObjCAtDirectives();
+ DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
+ Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
@@ -1193,15 +1231,31 @@ private:
bool ParseObjCProtocolQualifiers(DeclSpec &DS);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
- Decl *ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
- ParsedAttributes &prefixAttrs);
+ DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
+ ParsedAttributes &prefixAttrs);
+
+ struct ObjCImplParsingDataRAII {
+ Parser &P;
+ Decl *Dcl;
+ typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
+ LateParsedObjCMethodContainer LateParsedObjCMethods;
+
+ ObjCImplParsingDataRAII(Parser &parser, Decl *D)
+ : P(parser), Dcl(D) {
+ P.CurParsedObjCImpl = this;
+ Finished = false;
+ }
+ ~ObjCImplParsingDataRAII();
- Decl *ObjCImpDecl;
- SmallVector<Decl *, 4> PendingObjCImpDecl;
- typedef SmallVector<LexedMethod*, 2> LateParsedObjCMethodContainer;
- LateParsedObjCMethodContainer LateParsedObjCMethods;
+ void finish(SourceRange AtEnd);
+ bool isFinished() const { return Finished; }
+
+ private:
+ bool Finished;
+ };
+ ObjCImplParsingDataRAII *CurParsedObjCImpl;
- Decl *ParseObjCAtImplementationDeclaration(SourceLocation atLoc);
+ DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
@@ -1232,11 +1286,18 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
-
- ExprResult ParseExpression();
- ExprResult ParseConstantExpression();
+
+ /// TypeCastState - State whether an expression is or may be a type cast.
+ enum TypeCastState {
+ NotTypeCast = 0,
+ MaybeTypeCast,
+ IsTypeCast
+ };
+
+ ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
+ ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
// Expr that doesn't include commas.
- ExprResult ParseAssignmentExpression();
+ ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
@@ -1247,10 +1308,10 @@ private:
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
- bool isTypeCast);
+ TypeCastState isTypeCast);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
- bool isTypeCast = false);
+ TypeCastState isTypeCast = NotTypeCast);
/// Returns true if the next token would start a postfix-expression
/// suffix.
@@ -1278,8 +1339,7 @@ private:
SmallVectorImpl<SourceLocation> &CommaLocs,
void (Sema::*Completer)(Scope *S,
Expr *Data,
- Expr **Args,
- unsigned NumArgs) = 0,
+ llvm::ArrayRef<Expr *> Args) = 0,
Expr *Data = 0);
/// ParenParseOption - Control what ParseParenExpression will parse.
@@ -1302,9 +1362,11 @@ private:
SourceLocation LParenLoc,
SourceLocation RParenLoc);
- ExprResult ParseStringLiteralExpression();
+ ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
+
+ ExprResult ParseObjCBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ Expressions
@@ -1420,6 +1482,7 @@ private:
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
+ bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
@@ -1432,29 +1495,35 @@ private:
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
+ ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
- SourceLocation SuperLoc,
- ParsedType ReceiverType,
- ExprArg ReceiverExpr);
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ ExprArg ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, ExprArg ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
-
+
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
- StmtResult ParseStatement() {
+ StmtResult ParseStatement(SourceLocation *TrailingElseLoc = NULL) {
StmtVector Stmts(Actions);
- return ParseStatementOrDeclaration(Stmts, true);
+ return ParseStatementOrDeclaration(Stmts, true, TrailingElseLoc);
}
StmtResult ParseStatementOrDeclaration(StmtVector& Stmts,
- bool OnlyStatement = false);
+ bool OnlyStatement,
+ SourceLocation *TrailingElseLoc = NULL);
StmtResult ParseExprStatement(ParsedAttributes &Attrs);
StmtResult ParseLabeledStatement(ParsedAttributes &Attr);
StmtResult ParseCaseStatement(ParsedAttributes &Attr,
@@ -1471,22 +1540,61 @@ private:
Decl *&DeclResult,
SourceLocation Loc,
bool ConvertToBoolean);
- StmtResult ParseIfStatement(ParsedAttributes &Attr);
- StmtResult ParseSwitchStatement(ParsedAttributes &Attr);
- StmtResult ParseWhileStatement(ParsedAttributes &Attr);
+ StmtResult ParseIfStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseSwitchStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseWhileStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement(ParsedAttributes &Attr);
- StmtResult ParseForStatement(ParsedAttributes &Attr);
+ StmtResult ParseForStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement(ParsedAttributes &Attr);
StmtResult ParseContinueStatement(ParsedAttributes &Attr);
StmtResult ParseBreakStatement(ParsedAttributes &Attr);
StmtResult ParseReturnStatement(ParsedAttributes &Attr);
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
- bool ParseMicrosoftIfExistsCondition(bool& Result);
+
+ /// \brief Describes the behavior that should be taken for an __if_exists
+ /// block.
+ enum IfExistsBehavior {
+ /// \brief Parse the block; this code is always used.
+ IEB_Parse,
+ /// \brief Skip the block entirely; this code is never used.
+ IEB_Skip,
+ /// \brief Parse the block as a dependent block, which may be used in
+ /// some template instantiations but not others.
+ IEB_Dependent
+ };
+
+ /// \brief Describes the condition of a Microsoft __if_exists or
+ /// __if_not_exists block.
+ struct IfExistsCondition {
+ /// \brief The location of the initial keyword.
+ SourceLocation KeywordLoc;
+ /// \brief Whether this is an __if_exists block (rather than an
+ /// __if_not_exists block).
+ bool IsIfExists;
+
+ /// \brief Nested-name-specifier preceding the name.
+ CXXScopeSpec SS;
+
+ /// \brief The name we're looking for.
+ UnqualifiedId Name;
+
+ /// \brief The behavior of this __if_exists or __if_not_exists block
+ /// should.
+ IfExistsBehavior Behavior;
+};
+
+ bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
+ bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
+ bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
@@ -1525,6 +1633,8 @@ private:
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
+ DSC_type_specifier, // C++ type-specifier-seq
+ DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_top_level // top-level/namespace declaration context
};
@@ -1546,13 +1656,14 @@ private:
ParsedAttributes &attrs,
bool RequireSemi,
ForRangeInit *FRI = 0);
+ bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
bool AllowFunctionDefinitions,
SourceLocation *DeclEnd = 0,
ForRangeInit *FRI = 0);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
- bool ParseAttributesAfterDeclarator(Declarator &D);
+ bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
@@ -1562,30 +1673,27 @@ private:
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
- bool trySkippingFunctionBodyForCodeCompletion();
+ bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS);
+ AccessSpecifier AS, DeclSpecContext DSC);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
- DeclSpecContext DSC = DSC_normal);
- bool ParseOptionalTypeSpecifier(DeclSpec &DS, bool &isInvalid,
- const char *&PrevSpec,
- unsigned &DiagID,
- const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
- bool SuppressDeclarations = false);
+ DeclSpecContext DSC = DSC_normal,
+ LateParsedAttrList *LateAttrs = 0);
- void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none);
+ void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
+ DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
- const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
- AccessSpecifier AS = AS_none);
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
@@ -1604,7 +1712,7 @@ private:
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
bool isTypeQualifier() const;
-
+
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
@@ -1614,25 +1722,25 @@ private:
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
- /// isSimpleDeclaration - Disambiguates between a declaration or an
- /// expression, mainly used for the C 'clause-1' or the C++
+ /// isForInitDeclaration - Disambiguates between a declaration or an
+ /// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
- bool isSimpleDeclaration() {
- if (getLang().CPlusPlus)
- return isCXXSimpleDeclaration();
+ bool isForInitDeclaration() {
+ if (getLangOpts().CPlusPlus)
+ return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
-
+
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
@@ -1650,7 +1758,7 @@ private:
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
@@ -1670,7 +1778,7 @@ private:
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
- bool isCXXSimpleDeclaration();
+ bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
@@ -1732,10 +1840,12 @@ private:
/// declaration specifier, TPResult::False() if it is not,
/// TPResult::Ambiguous() if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error() if a parsing error was
- /// encountered.
+ /// encountered. If it could be a braced C++11 function-style cast, returns
+ /// BracedCastResult.
/// Doesn't consume tokens.
- TPResult isCXXDeclarationSpecifier();
-
+ TPResult
+ isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False());
+
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error().
// Returning TPResult::True()/False() indicates that the ambiguity was
@@ -1744,7 +1854,7 @@ private:
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseDeclarationSpecifier();
- TPResult TryParseSimpleDeclaration();
+ TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParseInitDeclaratorList();
@@ -1760,6 +1870,16 @@ private:
Decl **OwnedType = 0);
void ParseBlockId();
+ // Check for the start of a C++11 attribute-specifier-seq in a context where
+ // an attribute is not allowed.
+ bool CheckProhibitedCXX11Attribute() {
+ assert(Tok.is(tok::l_square));
+ if (!getLangOpts().CPlusPlus0x || NextToken().isNot(tok::l_square))
+ return false;
+ return DiagnoseProhibitedCXX11Attribute();
+ }
+ bool DiagnoseProhibitedCXX11Attribute();
+
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
@@ -1790,35 +1910,38 @@ private:
SourceLocation *EndLoc);
void MaybeParseCXX0XAttributes(Declarator &D) {
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
+ if (getLangOpts().CPlusPlus0x && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
- ParseCXX0XAttributes(attrs, &endLoc);
+ ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX0XAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = 0) {
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
+ if (getLangOpts().CPlusPlus0x && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
- ParseCXX0XAttributes(attrsWithRange, endLoc);
+ ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
- SourceLocation *endLoc = 0) {
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
- ParseCXX0XAttributes(attrs, endLoc);
+ SourceLocation *endLoc = 0,
+ bool OuterMightBeMessageSend = false) {
+ if (getLangOpts().CPlusPlus0x &&
+ isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
+ ParseCXX11Attributes(attrs, endLoc);
}
- void ParseCXX0XAttributeSpecifier(ParsedAttributes &attrs,
+ void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = 0);
- void ParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+ void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = 0);
+ IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = 0) {
- if (getLang().MicrosoftExt && Tok.is(tok::l_square))
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
@@ -1843,15 +1966,22 @@ private:
void ParseTypeofSpecifier(DeclSpec &DS);
- void ParseDecltypeSpecifier(DeclSpec &DS);
+ SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
+ void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
- ExprResult ParseAlignArgument(SourceLocation Start);
+ ExprResult ParseAlignArgument(SourceLocation Start,
+ SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = 0);
- VirtSpecifiers::Specifier isCXX0XVirtSpecifier() const;
+ VirtSpecifiers::Specifier isCXX0XVirtSpecifier(const Token &Tok) const;
+ VirtSpecifiers::Specifier isCXX0XVirtSpecifier() const {
+ return isCXX0XVirtSpecifier(Tok);
+ }
void ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS);
bool isCXX0XFinalKeyword() const;
@@ -1918,9 +2048,20 @@ private:
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
- bool isCXX0XAttributeSpecifier(bool FullLookahead = false,
- tok::TokenKind *After = 0);
-
+ /// The kind of attribute specifier we have found.
+ enum CXX11AttributeKind {
+ /// This is not an attribute specifier.
+ CAK_NotAttributeSpecifier,
+ /// This should be treated as an attribute-specifier.
+ CAK_AttributeSpecifier,
+ /// The next tokens are '[[', but this is not an attribute-specifier. This
+ /// is ill-formed by C++11 [dcl.attr.grammar]p6.
+ CAK_InvalidAttributeSpecifier
+ };
+ CXX11AttributeKind
+ isCXX11AttributeSpecifier(bool Disambiguate = false,
+ bool OuterMightBeMessageSend = false);
+
Decl *ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
@@ -1952,15 +2093,13 @@ private:
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
- TypeResult ParseClassName(SourceLocation &EndLocation, CXXScopeSpec &SS);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
- DeclSpec &DS,
- const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
- AccessSpecifier AS = AS_none,
- bool SuppressDeclarations = false);
+ DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, bool EnteringContext,
+ DeclSpecContext DSC);
void ParseCXXMemberSpecification(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
- ExprResult ParseCXXMemberInitializer(bool IsFunction,
+ ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
void ParseCXXClassMemberDeclaration(AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
@@ -1972,18 +2111,20 @@ private:
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
+ TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
- bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
- bool AssumeTemplateId,
- SourceLocation TemplateKWLoc);
+ bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
@@ -1991,8 +2132,9 @@ private:
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
+ SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
-
+
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
@@ -2036,27 +2178,30 @@ private:
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
- SourceLocation TemplateKWLoc = SourceLocation(),
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType();
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
- Decl *ParseExplicitInstantiation(SourceLocation ExternLoc,
- SourceLocation TemplateLoc,
- SourceLocation &DeclEnd);
+ Decl *ParseExplicitInstantiation(unsigned Context,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
- DeclGroupPtrTy ParseModuleImport();
-
+ DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
+
//===--------------------------------------------------------------------===//
// GNU G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseUnaryTypeTrait();
ExprResult ParseBinaryTypeTrait();
-
+ ExprResult ParseTypeTrait();
+
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
index 7a636e5..c9c92e3 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
@@ -31,6 +31,11 @@ ASTConsumer *CreateObjCRewriter(const std::string &InFile,
DiagnosticsEngine &Diags,
const LangOptions &LOpts,
bool SilenceRewriteMacroWarning);
+ASTConsumer *CreateModernObjCRewriter(const std::string &InFile,
+ raw_ostream *OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning);
/// CreateHTMLPrinter - Create an AST consumer which rewrites source code to
/// HTML with syntax highlighting suitable for viewing in a web-browser.
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
index bf7e791..44f0611 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
@@ -18,6 +18,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Rewrite/Rewriter.h"
+#include "clang/Edit/EditedSource.h"
namespace clang {
@@ -26,20 +27,38 @@ class FileEntry;
class FixItOptions {
public:
+ FixItOptions() : FixWhatYouCan(false),
+ FixOnlyWarnings(false), Silent(false) { }
+
virtual ~FixItOptions();
/// \brief This file is about to be rewritten. Return the name of the file
/// that is okay to write to.
- virtual std::string RewriteFilename(const std::string &Filename) = 0;
+ ///
+ /// \param fd out parameter for file descriptor. After the call it may be set
+ /// to an open file descriptor for the returned filename, or it will be -1
+ /// otherwise.
+ ///
+ virtual std::string RewriteFilename(const std::string &Filename, int &fd) = 0;
/// \brief Whether to abort fixing a file when not all errors could be fixed.
bool FixWhatYouCan;
+
+ /// \brief Whether to only fix warnings and not errors.
+ bool FixOnlyWarnings;
+
+ /// \brief If true, only pass the diagnostic to the actual diagnostic consumer
+ /// if it is an error or a fixit was applied as part of the diagnostic.
+ /// It basically silences warnings without accompanying fixits.
+ bool Silent;
};
class FixItRewriter : public DiagnosticConsumer {
/// \brief The diagnostics machinery.
DiagnosticsEngine &Diags;
+ edit::EditedSource Editor;
+
/// \brief The rewriter used to perform the various code
/// modifications.
Rewriter Rewrite;
@@ -47,6 +66,7 @@ class FixItRewriter : public DiagnosticConsumer {
/// \brief The diagnostic client that performs the actual formatting
/// of error messages.
DiagnosticConsumer *Client;
+ bool OwnsClient;
/// \brief Turn an input path into an output path. NULL implies overwriting
/// the original.
@@ -55,6 +75,9 @@ class FixItRewriter : public DiagnosticConsumer {
/// \brief The number of rewriter failures.
unsigned NumFailures;
+ /// \brief Whether the previous diagnostic was not passed to the consumer.
+ bool PrevDiagSilenced;
+
public:
typedef Rewriter::buffer_iterator iterator;
@@ -82,7 +105,8 @@ public:
/// \brief Write the modified source files.
///
/// \returns true if there was an error, false otherwise.
- bool WriteFixedFiles();
+ bool WriteFixedFiles(
+ std::vector<std::pair<std::string, std::string> > *RewrittenFiles = 0);
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
index f7aeefa..6e9ecac 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
@@ -28,8 +28,8 @@ protected:
class FixItAction : public ASTFrontendAction {
protected:
- llvm::OwningPtr<FixItRewriter> Rewriter;
- llvm::OwningPtr<FixItOptions> FixItOpts;
+ OwningPtr<FixItRewriter> Rewriter;
+ OwningPtr<FixItOptions> FixItOpts;
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
StringRef InFile);
@@ -46,6 +46,17 @@ public:
~FixItAction();
};
+/// \brief Emits changes to temporary files and uses them for the original
+/// frontend action.
+class FixItRecompile : public WrapperFrontendAction {
+public:
+ FixItRecompile(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+};
+
class RewriteObjCAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
index 62ea12a..9ebd33a 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
@@ -41,7 +41,7 @@ namespace clang {
/// ScratchBuf - This is the buffer that we create scratch tokens from.
///
- llvm::OwningPtr<ScratchBuffer> ScratchBuf;
+ OwningPtr<ScratchBuffer> ScratchBuf;
TokenRewriter(const TokenRewriter&); // DO NOT IMPLEMENT
void operator=(const TokenRewriter&); // DO NOT IMPLEMENT.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
index bcacf7a..142f144 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -85,6 +85,8 @@ private:
/// \brief The location of the 'unavailable' keyword in an
/// availability attribute.
SourceLocation UnavailableLoc;
+
+ const Expr *MessageExpr;
/// The next attribute in the current position.
AttributeList *NextInPosition;
@@ -138,13 +140,15 @@ private:
const AvailabilityChange &introduced,
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
- SourceLocation unavailable,
+ SourceLocation unavailable,
+ const Expr *messageExpr,
bool declspec, bool cxx0x)
: AttrName(attrName), ScopeName(scopeName), ParmName(parmName),
AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc),
NumArgs(0), DeclspecAttribute(declspec), CXX0XAttribute(cxx0x),
Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
- UnavailableLoc(unavailable), NextInPosition(0), NextInPool(0) {
+ UnavailableLoc(unavailable), MessageExpr(messageExpr),
+ NextInPosition(0), NextInPool(0) {
new (&getAvailabilitySlot(IntroducedSlot)) AvailabilityChange(introduced);
new (&getAvailabilitySlot(DeprecatedSlot)) AvailabilityChange(deprecated);
new (&getAvailabilitySlot(ObsoletedSlot)) AvailabilityChange(obsoleted);
@@ -155,122 +159,21 @@ private:
friend class AttributeFactory;
public:
- enum Kind { // Please keep this list alphabetized.
- AT_acquired_after,
- AT_acquired_before,
- AT_address_space,
- AT_alias,
- AT_aligned,
- AT_always_inline,
- AT_analyzer_noreturn,
- AT_annotate,
- AT_arc_weakref_unavailable,
- AT_availability, // Clang-specific
- AT_base_check,
- AT_blocks,
- AT_carries_dependency,
- AT_cdecl,
- AT_cf_audited_transfer, // Clang-specific.
- AT_cf_consumed, // Clang-specific.
- AT_cf_returns_autoreleased, // Clang-specific.
- AT_cf_returns_not_retained, // Clang-specific.
- AT_cf_returns_retained, // Clang-specific.
- AT_cf_unknown_transfer, // Clang-specific.
- AT_cleanup,
- AT_common,
- AT_const,
- AT_constant,
- AT_constructor,
- AT_deprecated,
- AT_destructor,
- AT_device,
- AT_dllexport,
- AT_dllimport,
- AT_exclusive_lock_function,
- AT_exclusive_locks_required,
- AT_exclusive_trylock_function,
- AT_ext_vector_type,
- AT_fastcall,
- AT_format,
- AT_format_arg,
- AT_global,
- AT_gnu_inline,
- AT_guarded_by,
- AT_guarded_var,
- AT_host,
- AT_IBAction, // Clang-specific.
- AT_IBOutlet, // Clang-specific.
- AT_IBOutletCollection, // Clang-specific.
- AT_init_priority,
- AT_launch_bounds,
- AT_lock_returned,
- AT_lockable,
- AT_locks_excluded,
- AT_malloc,
- AT_may_alias,
- AT_mode,
- AT_MsStruct,
- AT_naked,
- AT_neon_polyvector_type, // Clang-specific.
- AT_neon_vector_type, // Clang-specific.
- AT_no_instrument_function,
- AT_no_thread_safety_analysis,
- AT_nocommon,
- AT_nodebug,
- AT_noinline,
- AT_nonnull,
- AT_noreturn,
- AT_nothrow,
- AT_ns_bridged, // Clang-specific.
- AT_ns_consumed, // Clang-specific.
- AT_ns_consumes_self, // Clang-specific.
- AT_ns_returns_autoreleased, // Clang-specific.
- AT_ns_returns_not_retained, // Clang-specific.
- AT_ns_returns_retained, // Clang-specific.
- AT_nsobject,
- AT_objc_exception,
- AT_objc_gc,
- AT_objc_method_family,
- AT_objc_ownership, // Clang-specific.
- AT_objc_precise_lifetime, // Clang-specific.
- AT_objc_returns_inner_pointer, // Clang-specific.
- AT_opencl_image_access, // OpenCL-specific.
- AT_opencl_kernel_function, // OpenCL-specific.
- AT_overloadable, // Clang-specific.
- AT_ownership_holds, // Clang-specific.
- AT_ownership_returns, // Clang-specific.
- AT_ownership_takes, // Clang-specific.
- AT_packed,
- AT_pascal,
- AT_pcs, // ARM specific
- AT_pt_guarded_by,
- AT_pt_guarded_var,
- AT_pure,
- AT_regparm,
- AT_reqd_wg_size,
- AT_scoped_lockable,
- AT_section,
- AT_sentinel,
- AT_shared,
- AT_shared_lock_function,
- AT_shared_locks_required,
- AT_shared_trylock_function,
- AT_stdcall,
- AT_thiscall,
- AT_transparent_union,
- AT_unavailable,
- AT_unlock_function,
- AT_unused,
- AT_used,
- AT_uuid,
- AT_vecreturn, // PS3 PPU-specific.
- AT_vector_size,
- AT_visibility,
- AT_warn_unused_result,
- AT_weak,
- AT_weak_import,
- AT_weakref,
- AT_returns_twice,
+ enum Kind {
+ #define PARSED_ATTR(NAME) AT_##NAME,
+ #include "clang/Sema/AttrParsedAttrList.inc"
+ PARSED_ATTR(address_space)
+ PARSED_ATTR(base_check)
+ PARSED_ATTR(cf_returns_autoreleased)
+ PARSED_ATTR(ext_vector_type)
+ PARSED_ATTR(mode)
+ PARSED_ATTR(neon_polyvector_type)
+ PARSED_ATTR(neon_vector_type)
+ PARSED_ATTR(objc_gc)
+ PARSED_ATTR(objc_ownership)
+ PARSED_ATTR(opencl_image_access)
+ PARSED_ATTR(vector_size)
+ #undef PARSED_ATTR
IgnoredAttribute,
UnknownAttribute
};
@@ -371,6 +274,11 @@ public:
assert(getKind() == AT_availability && "Not an availability attribute");
return UnavailableLoc;
}
+
+ const Expr * getMessageExpr() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return MessageExpr;
+ }
};
/// A factory, from which one makes pools, from which one creates
@@ -492,13 +400,14 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable,
+ const Expr *MessageExpr,
bool declspec = false, bool cxx0x = false) {
void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
return add(new (memory) AttributeList(attrName, attrRange,
scopeName, scopeLoc,
parmName, parmLoc,
introduced, deprecated, obsoleted,
- unavailable,
+ unavailable, MessageExpr,
declspec, cxx0x));
}
@@ -616,10 +525,12 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable,
+ const Expr *MessageExpr,
bool declspec = false, bool cxx0x = false) {
AttributeList *attr =
pool.create(attrName, attrRange, scopeName, scopeLoc, parmName, parmLoc,
introduced, deprecated, obsoleted, unavailable,
+ MessageExpr,
declspec, cxx0x);
add(attr);
return attr;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
index 9e2d60d..fe9bed5 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -24,14 +24,14 @@
namespace clang {
class Decl;
-
+
/// \brief Default priority values for code-completion results based
/// on their kind.
enum {
/// \brief Priority for the next initialization in a constructor initializer
/// list.
CCP_NextInitializer = 7,
- /// \brief Priority for an enumeration constant inside a switch whose
+ /// \brief Priority for an enumeration constant inside a switch whose
/// condition is of the enumeration type.
CCP_EnumInCase = 7,
/// \brief Priority for a send-to-super completion.
@@ -59,7 +59,7 @@ enum {
/// \brief Priority for a result that isn't likely to be what the user wants,
/// but is included for completeness.
CCP_Unlikely = 80,
-
+
/// \brief Priority for the Objective-C "_cmd" implicit parameter.
CCP_ObjC_cmd = CCP_Unlikely
};
@@ -76,15 +76,15 @@ enum {
/// of the current method, which might imply that some kind of delegation
/// is occurring.
CCD_SelectorMatch = -3,
-
+
/// \brief Adjustment to the "bool" type in Objective-C, where the typedef
/// "BOOL" is preferred.
CCD_bool_in_ObjC = 1,
-
+
/// \brief Adjustment for KVC code pattern priorities when it doesn't look
/// like the
CCD_ProbablyNotObjCCollection = 15,
-
+
/// \brief An Objective-C method being used as a property.
CCD_MethodAsProperty = 2
};
@@ -114,14 +114,14 @@ enum SimplifiedTypeClass {
STC_Record,
STC_Void
};
-
+
/// \brief Determine the simplified type class of the given canonical type.
SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T);
-
+
/// \brief Determine the type that this declaration will have if it is used
/// as a type or in an expression.
QualType getDeclUsageType(ASTContext &C, NamedDecl *ND);
-
+
/// \brief Determine the priority to be given to a macro code completion result
/// with the given name.
///
@@ -131,14 +131,14 @@ QualType getDeclUsageType(ASTContext &C, NamedDecl *ND);
///
/// \param PreferredTypeIsPointer Whether the preferred type for the context
/// of this macro is a pointer type.
-unsigned getMacroUsagePriority(StringRef MacroName,
+unsigned getMacroUsagePriority(StringRef MacroName,
const LangOptions &LangOpts,
bool PreferredTypeIsPointer = false);
/// \brief Determine the libclang cursor kind associated with the given
/// declaration.
CXCursorKind getCursorKindForDecl(Decl *D);
-
+
class FunctionDecl;
class FunctionType;
class FunctionTemplateDecl;
@@ -182,22 +182,22 @@ public:
/// \brief Code completion occurred on the right-hand side of a member
/// access expression using the dot operator.
///
- /// The results of this completion are the members of the type being
- /// accessed. The type itself is available via
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
/// \c CodeCompletionContext::getType().
CCC_DotMemberAccess,
/// \brief Code completion occurred on the right-hand side of a member
/// access expression using the arrow operator.
///
- /// The results of this completion are the members of the type being
- /// accessed. The type itself is available via
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
/// \c CodeCompletionContext::getType().
CCC_ArrowMemberAccess,
/// \brief Code completion occurred on the right-hand side of an Objective-C
/// property access expression.
///
- /// The results of this completion are the members of the type being
- /// accessed. The type itself is available via
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
/// \c CodeCompletionContext::getType().
CCC_ObjCPropertyAccess,
/// \brief Code completion occurred after the "enum" keyword, to indicate
@@ -228,7 +228,7 @@ public:
CCC_MacroNameUse,
/// \brief Code completion occurred within a preprocessor expression.
CCC_PreprocessorExpression,
- /// \brief Code completion occurred where a preprocessor directive is
+ /// \brief Code completion occurred where a preprocessor directive is
/// expected.
CCC_PreprocessorDirective,
/// \brief Code completion occurred in a context where natural language is
@@ -246,14 +246,14 @@ public:
CCC_ParenthesizedExpression,
/// \brief Code completion where an Objective-C instance message is expcted.
CCC_ObjCInstanceMessage,
- /// \brief Code completion where an Objective-C class message is expected.
+ /// \brief Code completion where an Objective-C class message is expected.
CCC_ObjCClassMessage,
/// \brief Code completion where the name of an Objective-C class is
/// expected.
CCC_ObjCInterfaceName,
/// \brief Code completion where an Objective-C category name is expected.
CCC_ObjCCategoryName,
- /// \brief An unknown context, in which we are recovering from a parsing
+ /// \brief An unknown context, in which we are recovering from a parsing
/// error and don't know which completions we should give.
CCC_Recovery
};
@@ -264,27 +264,27 @@ private:
/// \brief The type that would prefer to see at this point (e.g., the type
/// of an initializer or function parameter).
QualType PreferredType;
-
+
/// \brief The type of the base object in a member access expression.
QualType BaseType;
-
+
/// \brief The identifiers for Objective-C selector parts.
IdentifierInfo **SelIdents;
-
+
/// \brief The number of Objective-C selector parts.
unsigned NumSelIdents;
-
+
public:
/// \brief Construct a new code-completion context of the given kind.
- CodeCompletionContext(enum Kind Kind) : Kind(Kind), SelIdents(NULL),
+ CodeCompletionContext(enum Kind Kind) : Kind(Kind), SelIdents(NULL),
NumSelIdents(0) { }
-
+
/// \brief Construct a new code-completion context of the given kind.
CodeCompletionContext(enum Kind Kind, QualType T,
IdentifierInfo **SelIdents = NULL,
unsigned NumSelIdents = 0) : Kind(Kind),
SelIdents(SelIdents),
- NumSelIdents(NumSelIdents) {
+ NumSelIdents(NumSelIdents) {
if (Kind == CCC_DotMemberAccess || Kind == CCC_ArrowMemberAccess ||
Kind == CCC_ObjCPropertyAccess || Kind == CCC_ObjCClassMessage ||
Kind == CCC_ObjCInstanceMessage)
@@ -292,22 +292,22 @@ public:
else
PreferredType = T;
}
-
+
/// \brief Retrieve the kind of code-completion context.
enum Kind getKind() const { return Kind; }
-
+
/// \brief Retrieve the type that this expression would prefer to have, e.g.,
/// if the expression is a variable initializer or a function argument, the
/// type of the corresponding variable or function parameter.
QualType getPreferredType() const { return PreferredType; }
-
- /// \brief Retrieve the type of the base object in a member-access
+
+ /// \brief Retrieve the type of the base object in a member-access
/// expression.
QualType getBaseType() const { return BaseType; }
-
+
/// \brief Retrieve the Objective-C selector identifiers.
IdentifierInfo **getSelIdents() const { return SelIdents; }
-
+
/// \brief Retrieve the number of Objective-C selector identifiers.
unsigned getNumSelIdents() const { return NumSelIdents; }
@@ -320,10 +320,10 @@ public:
/// \brief A "string" used to describe how code completion can
/// be performed for an entity.
///
-/// A code completion string typically shows how a particular entity can be
+/// A code completion string typically shows how a particular entity can be
/// used. For example, the code completion string for a function would show
-/// the syntax to call it, including the parentheses, placeholders for the
-/// arguments, etc.
+/// the syntax to call it, including the parentheses, placeholders for the
+/// arguments, etc.
class CodeCompletionString {
public:
/// \brief The different kinds of "chunks" that can occur within a code
@@ -340,7 +340,7 @@ public:
/// an optional code completion string that describes the default arguments
/// in a function call.
CK_Optional,
- /// \brief A string that acts as a placeholder for, e.g., a function
+ /// \brief A string that acts as a placeholder for, e.g., a function
/// call argument.
CK_Placeholder,
/// \brief A piece of text that describes something about the result but
@@ -383,30 +383,30 @@ public:
/// platform).
CK_VerticalSpace
};
-
+
/// \brief One piece of the code completion string.
struct Chunk {
- /// \brief The kind of data stored in this piece of the code completion
+ /// \brief The kind of data stored in this piece of the code completion
/// string.
ChunkKind Kind;
-
+
union {
/// \brief The text string associated with a CK_Text, CK_Placeholder,
/// CK_Informative, or CK_Comma chunk.
- /// The string is owned by the chunk and will be deallocated
+ /// The string is owned by the chunk and will be deallocated
/// (with delete[]) when the chunk is destroyed.
const char *Text;
-
+
/// \brief The code completion string associated with a CK_Optional chunk.
/// The optional code completion string is owned by the chunk, and will
/// be deallocated (with delete) when the chunk is destroyed.
CodeCompletionString *Optional;
};
-
+
Chunk() : Kind(CK_Text), Text(0) { }
-
- Chunk(ChunkKind Kind, const char *Text = "");
-
+
+ explicit Chunk(ChunkKind Kind, const char *Text = "");
+
/// \brief Create a new text chunk.
static Chunk CreateText(const char *Text);
@@ -425,49 +425,56 @@ public:
/// \brief Create a new current-parameter chunk.
static Chunk CreateCurrentParameter(const char *CurrentParameter);
};
-
+
private:
/// \brief The number of chunks stored in this string.
unsigned NumChunks : 16;
-
+
/// \brief The number of annotations for this code-completion result.
unsigned NumAnnotations : 16;
/// \brief The priority of this code-completion string.
- unsigned Priority : 30;
-
+ unsigned Priority : 16;
+
/// \brief The availability of this code-completion result.
unsigned Availability : 2;
+ /// \brief The kind of the parent context.
+ unsigned ParentKind : 14;
+
+ /// \brief The name of the parent context.
+ StringRef ParentName;
+
CodeCompletionString(const CodeCompletionString &); // DO NOT IMPLEMENT
CodeCompletionString &operator=(const CodeCompletionString &); // DITTO
-
+
CodeCompletionString(const Chunk *Chunks, unsigned NumChunks,
unsigned Priority, CXAvailabilityKind Availability,
- const char **Annotations, unsigned NumAnnotations);
+ const char **Annotations, unsigned NumAnnotations,
+ CXCursorKind ParentKind, StringRef ParentName);
~CodeCompletionString() { }
-
+
friend class CodeCompletionBuilder;
friend class CodeCompletionResult;
-
+
public:
typedef const Chunk *iterator;
iterator begin() const { return reinterpret_cast<const Chunk *>(this + 1); }
iterator end() const { return begin() + NumChunks; }
bool empty() const { return NumChunks == 0; }
unsigned size() const { return NumChunks; }
-
+
const Chunk &operator[](unsigned I) const {
assert(I < size() && "Chunk index out-of-range");
return begin()[I];
}
-
+
/// \brief Returns the text in the TypedText chunk.
const char *getTypedText() const;
/// \brief Retrieve the priority of this code completion result.
unsigned getPriority() const { return Priority; }
-
+
/// \brief Retrieve the availability of this code completion result.
unsigned getAvailability() const { return Availability; }
@@ -476,106 +483,156 @@ public:
/// \brief Retrieve the annotation string specified by \c AnnotationNr.
const char *getAnnotation(unsigned AnnotationNr) const;
+
+ /// \brief Retrieve parent context's cursor kind.
+ CXCursorKind getParentContextKind() const {
+ return (CXCursorKind)ParentKind;
+ }
+
+ /// \brief Retrieve the name of the parent context.
+ StringRef getParentContextName() const {
+ return ParentName;
+ }
/// \brief Retrieve a string representation of the code completion string,
/// which is mainly useful for debugging.
- std::string getAsString() const;
+ std::string getAsString() const;
};
/// \brief An allocator used specifically for the purpose of code completion.
-class CodeCompletionAllocator : public llvm::BumpPtrAllocator {
+class CodeCompletionAllocator : public llvm::BumpPtrAllocator {
public:
/// \brief Copy the given string into this allocator.
const char *CopyString(StringRef String);
/// \brief Copy the given string into this allocator.
const char *CopyString(Twine String);
-
+
// \brief Copy the given string into this allocator.
const char *CopyString(const char *String) {
return CopyString(StringRef(String));
}
-
+
/// \brief Copy the given string into this allocator.
const char *CopyString(const std::string &String) {
return CopyString(StringRef(String));
}
};
-
+
+/// \brief Allocator for a cached set of global code completions.
+class GlobalCodeCompletionAllocator
+ : public CodeCompletionAllocator,
+ public RefCountedBase<GlobalCodeCompletionAllocator>
+{
+
+};
+
+class CodeCompletionTUInfo {
+ llvm::DenseMap<DeclContext *, StringRef> ParentNames;
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> AllocatorRef;
+
+public:
+ explicit CodeCompletionTUInfo(
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> Allocator)
+ : AllocatorRef(Allocator) { }
+
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> getAllocatorRef() const {
+ return AllocatorRef;
+ }
+ CodeCompletionAllocator &getAllocator() const {
+ assert(AllocatorRef);
+ return *AllocatorRef;
+ }
+
+ StringRef getParentName(DeclContext *DC);
+};
+
+} // end namespace clang
+
+namespace llvm {
+ template <> struct isPodLike<clang::CodeCompletionString::Chunk> {
+ static const bool value = true;
+ };
+}
+
+namespace clang {
+
/// \brief A builder class used to construct new code-completion strings.
class CodeCompletionBuilder {
-public:
+public:
typedef CodeCompletionString::Chunk Chunk;
-
+
private:
CodeCompletionAllocator &Allocator;
+ CodeCompletionTUInfo &CCTUInfo;
unsigned Priority;
CXAvailabilityKind Availability;
+ CXCursorKind ParentKind;
+ StringRef ParentName;
/// \brief The chunks stored in this string.
SmallVector<Chunk, 4> Chunks;
SmallVector<const char *, 2> Annotations;
-
+
public:
- CodeCompletionBuilder(CodeCompletionAllocator &Allocator)
- : Allocator(Allocator), Priority(0), Availability(CXAvailability_Available){
- }
-
CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
- unsigned Priority, CXAvailabilityKind Availability)
- : Allocator(Allocator), Priority(Priority), Availability(Availability) { }
+ CodeCompletionTUInfo &CCTUInfo)
+ : Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Priority(0), Availability(CXAvailability_Available),
+ ParentKind(CXCursor_NotImplemented) { }
+
+ CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ unsigned Priority, CXAvailabilityKind Availability)
+ : Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Priority(Priority), Availability(Availability),
+ ParentKind(CXCursor_NotImplemented) { }
/// \brief Retrieve the allocator into which the code completion
/// strings should be allocated.
CodeCompletionAllocator &getAllocator() const { return Allocator; }
-
- /// \brief Take the resulting completion string.
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// \brief Take the resulting completion string.
///
/// This operation can only be performed once.
CodeCompletionString *TakeString();
-
+
/// \brief Add a new typed-text chunk.
- void AddTypedTextChunk(const char *Text) {
- Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text));
- }
-
+ void AddTypedTextChunk(const char *Text);
+
/// \brief Add a new text chunk.
- void AddTextChunk(const char *Text) {
- Chunks.push_back(Chunk::CreateText(Text));
- }
+ void AddTextChunk(const char *Text);
/// \brief Add a new optional chunk.
- void AddOptionalChunk(CodeCompletionString *Optional) {
- Chunks.push_back(Chunk::CreateOptional(Optional));
- }
-
+ void AddOptionalChunk(CodeCompletionString *Optional);
+
/// \brief Add a new placeholder chunk.
- void AddPlaceholderChunk(const char *Placeholder) {
- Chunks.push_back(Chunk::CreatePlaceholder(Placeholder));
- }
-
+ void AddPlaceholderChunk(const char *Placeholder);
+
/// \brief Add a new informative chunk.
- void AddInformativeChunk(const char *Text) {
- Chunks.push_back(Chunk::CreateInformative(Text));
- }
-
+ void AddInformativeChunk(const char *Text);
+
/// \brief Add a new result-type chunk.
- void AddResultTypeChunk(const char *ResultType) {
- Chunks.push_back(Chunk::CreateResultType(ResultType));
- }
-
+ void AddResultTypeChunk(const char *ResultType);
+
/// \brief Add a new current-parameter chunk.
- void AddCurrentParameterChunk(const char *CurrentParameter) {
- Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
- }
-
+ void AddCurrentParameterChunk(const char *CurrentParameter);
+
/// \brief Add a new chunk.
- void AddChunk(Chunk C) { Chunks.push_back(C); }
+ void AddChunk(CodeCompletionString::ChunkKind CK, const char *Text = "");
void AddAnnotation(const char *A) { Annotations.push_back(A); }
-};
+
+ /// \brief Add the parent context information to this code completion.
+ void addParentContext(DeclContext *DC);
+ CXCursorKind getParentKind() const { return ParentKind; }
+ StringRef getParentName() const { return ParentName; }
+};
+
/// \brief Captures a result of code completion.
class CodeCompletionResult {
public:
@@ -586,23 +643,23 @@ public:
RK_Macro, //< Refers to a macro
RK_Pattern //< Refers to a precomputed pattern.
};
-
+
/// \brief The kind of result stored here.
ResultKind Kind;
-
+
+ /// \brief When Kind == RK_Declaration or RK_Pattern, the declaration we are
+ /// referring to. In the latter case, the declaration might be NULL.
+ NamedDecl *Declaration;
+
union {
- /// \brief When Kind == RK_Declaration, the declaration we are referring
- /// to.
- NamedDecl *Declaration;
-
- /// \brief When Kind == RK_Keyword, the string representing the keyword
+ /// \brief When Kind == RK_Keyword, the string representing the keyword
/// or symbol's spelling.
const char *Keyword;
-
+
/// \brief When Kind == RK_Pattern, the code-completion string that
/// describes the completion text to insert.
CodeCompletionString *Pattern;
-
+
/// \brief When Kind == RK_Macro, the identifier that refers to a macro.
IdentifierInfo *Macro;
};
@@ -612,21 +669,21 @@ public:
/// \brief The cursor kind that describes this result.
CXCursorKind CursorKind;
-
+
/// \brief The availability of this result.
CXAvailabilityKind Availability;
-
+
/// \brief Specifies which parameter (of a function, Objective-C method,
/// macro, etc.) we should start with when formatting the result.
unsigned StartParameter;
-
+
/// \brief Whether this result is hidden by another name.
bool Hidden : 1;
-
+
/// \brief Whether this result was found via lookup into a base class.
bool QualifierIsInformative : 1;
-
- /// \brief Whether this declaration is the beginning of a
+
+ /// \brief Whether this declaration is the beginning of a
/// nested-name-specifier and, therefore, should be followed by '::'.
bool StartsNestedNameSpecifier : 1;
@@ -637,43 +694,43 @@ public:
/// \brief Whether we're completing a declaration of the given entity,
/// rather than a use of that entity.
bool DeclaringEntity : 1;
-
+
/// \brief If the result should have a nested-name-specifier, this is it.
- /// When \c QualifierIsInformative, the nested-name-specifier is
+ /// When \c QualifierIsInformative, the nested-name-specifier is
/// informative rather than required.
NestedNameSpecifier *Qualifier;
-
+
/// \brief Build a result that refers to a declaration.
- CodeCompletionResult(NamedDecl *Declaration,
+ CodeCompletionResult(NamedDecl *Declaration,
NestedNameSpecifier *Qualifier = 0,
bool QualifierIsInformative = false,
bool Accessible = true)
- : Kind(RK_Declaration), Declaration(Declaration),
- Priority(getPriorityFromDecl(Declaration)),
- Availability(CXAvailability_Available), StartParameter(0),
+ : Kind(RK_Declaration), Declaration(Declaration),
+ Priority(getPriorityFromDecl(Declaration)),
+ Availability(CXAvailability_Available), StartParameter(0),
Hidden(false), QualifierIsInformative(QualifierIsInformative),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(Qualifier) {
+ DeclaringEntity(false), Qualifier(Qualifier) {
computeCursorKindAndAvailability(Accessible);
}
-
+
/// \brief Build a result that refers to a keyword or symbol.
CodeCompletionResult(const char *Keyword, unsigned Priority = CCP_Keyword)
- : Kind(RK_Keyword), Keyword(Keyword), Priority(Priority),
- Availability(CXAvailability_Available),
- StartParameter(0), Hidden(false), QualifierIsInformative(0),
+ : Kind(RK_Keyword), Declaration(0), Keyword(Keyword), Priority(Priority),
+ Availability(CXAvailability_Available),
+ StartParameter(0), Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
DeclaringEntity(false), Qualifier(0) {
computeCursorKindAndAvailability();
}
-
+
/// \brief Build a result that refers to a macro.
CodeCompletionResult(IdentifierInfo *Macro, unsigned Priority = CCP_Macro)
- : Kind(RK_Macro), Macro(Macro), Priority(Priority),
- Availability(CXAvailability_Available), StartParameter(0),
- Hidden(false), QualifierIsInformative(0),
+ : Kind(RK_Macro), Declaration(0), Macro(Macro), Priority(Priority),
+ Availability(CXAvailability_Available), StartParameter(0),
+ Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(0) {
+ DeclaringEntity(false), Qualifier(0) {
computeCursorKindAndAvailability();
}
@@ -681,27 +738,40 @@ public:
CodeCompletionResult(CodeCompletionString *Pattern,
unsigned Priority = CCP_CodePattern,
CXCursorKind CursorKind = CXCursor_NotImplemented,
- CXAvailabilityKind Availability = CXAvailability_Available)
- : Kind(RK_Pattern), Pattern(Pattern), Priority(Priority),
- CursorKind(CursorKind), Availability(Availability), StartParameter(0),
- Hidden(false), QualifierIsInformative(0),
- StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(0)
- {
+ CXAvailabilityKind Availability = CXAvailability_Available,
+ NamedDecl *D = 0)
+ : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
+ CursorKind(CursorKind), Availability(Availability), StartParameter(0),
+ Hidden(false), QualifierIsInformative(0),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0)
+ {
}
-
+
+ /// \brief Build a result that refers to a pattern with an associated
+ /// declaration.
+ CodeCompletionResult(CodeCompletionString *Pattern, NamedDecl *D,
+ unsigned Priority)
+ : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
+ Availability(CXAvailability_Available), StartParameter(0),
+ Hidden(false), QualifierIsInformative(false),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0) {
+ computeCursorKindAndAvailability();
+ }
+
/// \brief Retrieve the declaration stored in this result.
NamedDecl *getDeclaration() const {
assert(Kind == RK_Declaration && "Not a declaration result");
return Declaration;
}
-
+
/// \brief Retrieve the keyword stored in this result.
const char *getKeyword() const {
assert(Kind == RK_Keyword && "Not a keyword result");
return Keyword;
}
-
+
/// \brief Create a new code-completion string that describes how to insert
/// this result into a program.
///
@@ -710,37 +780,42 @@ public:
/// \param Allocator The allocator that will be used to allocate the
/// string itself.
CodeCompletionString *CreateCodeCompletionString(Sema &S,
- CodeCompletionAllocator &Allocator);
-
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo);
+ CodeCompletionString *CreateCodeCompletionString(ASTContext &Ctx,
+ Preprocessor &PP,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo);
+
/// \brief Determine a base priority for the given declaration.
static unsigned getPriorityFromDecl(NamedDecl *ND);
-
+
private:
void computeCursorKindAndAvailability(bool Accessible = true);
};
-
+
bool operator<(const CodeCompletionResult &X, const CodeCompletionResult &Y);
-
-inline bool operator>(const CodeCompletionResult &X,
+
+inline bool operator>(const CodeCompletionResult &X,
const CodeCompletionResult &Y) {
return Y < X;
}
-
-inline bool operator<=(const CodeCompletionResult &X,
+
+inline bool operator<=(const CodeCompletionResult &X,
const CodeCompletionResult &Y) {
return !(Y < X);
}
-inline bool operator>=(const CodeCompletionResult &X,
+inline bool operator>=(const CodeCompletionResult &X,
const CodeCompletionResult &Y) {
return !(X < Y);
}
-
-raw_ostream &operator<<(raw_ostream &OS,
+
+raw_ostream &operator<<(raw_ostream &OS,
const CodeCompletionString &CCS);
-/// \brief Abstract interface for a consumer of code-completion
+/// \brief Abstract interface for a consumer of code-completion
/// information.
class CodeCompleteConsumer {
protected:
@@ -750,15 +825,15 @@ protected:
/// \brief Whether to include code patterns (such as for loops) within
/// the completion results.
bool IncludeCodePatterns;
-
+
/// \brief Whether to include global (top-level) declarations and names in
/// the completion results.
bool IncludeGlobals;
-
+
/// \brief Whether the output format for the code-completion consumer is
/// binary.
bool OutputIsBinary;
-
+
public:
class OverloadCandidate {
public:
@@ -772,25 +847,25 @@ public:
/// for which we only have a function prototype.
CK_FunctionType
};
-
+
private:
/// \brief The kind of overload candidate.
CandidateKind Kind;
-
+
union {
- /// \brief The function overload candidate, available when
+ /// \brief The function overload candidate, available when
/// Kind == CK_Function.
FunctionDecl *Function;
-
+
/// \brief The function template overload candidate, available when
/// Kind == CK_FunctionTemplate.
FunctionTemplateDecl *FunctionTemplate;
-
+
/// \brief The function type that describes the entity being called,
/// when Kind == CK_FunctionType.
const FunctionType *Type;
};
-
+
public:
OverloadCandidate(FunctionDecl *Function)
: Kind(CK_Function), Function(Function) { }
@@ -803,55 +878,56 @@ public:
/// \brief Determine the kind of overload candidate.
CandidateKind getKind() const { return Kind; }
-
- /// \brief Retrieve the function overload candidate or the templated
+
+ /// \brief Retrieve the function overload candidate or the templated
/// function declaration for a function template.
FunctionDecl *getFunction() const;
-
+
/// \brief Retrieve the function template overload candidate.
FunctionTemplateDecl *getFunctionTemplate() const {
assert(getKind() == CK_FunctionTemplate && "Not a function template");
return FunctionTemplate;
}
-
+
/// \brief Retrieve the function type of the entity, regardless of how the
/// function is stored.
const FunctionType *getFunctionType() const;
-
+
/// \brief Create a new code-completion string that describes the function
/// signature of this overload candidate.
- CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
+ CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
Sema &S,
- CodeCompletionAllocator &Allocator) const;
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) const;
};
-
+
CodeCompleteConsumer() : IncludeMacros(false), IncludeCodePatterns(false),
IncludeGlobals(true), OutputIsBinary(false) { }
-
+
CodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
bool IncludeGlobals, bool OutputIsBinary)
: IncludeMacros(IncludeMacros), IncludeCodePatterns(IncludeCodePatterns),
IncludeGlobals(IncludeGlobals), OutputIsBinary(OutputIsBinary) { }
-
+
/// \brief Whether the code-completion consumer wants to see macros.
bool includeMacros() const { return IncludeMacros; }
/// \brief Whether the code-completion consumer wants to see code patterns.
bool includeCodePatterns() const { return IncludeCodePatterns; }
-
+
/// \brief Whether to include global (top-level) declaration results.
bool includeGlobals() const { return IncludeGlobals; }
-
+
/// \brief Determine whether the output of this consumer is binary.
bool isOutputBinary() const { return OutputIsBinary; }
-
+
/// \brief Deregisters and destroys this code-completion consumer.
virtual ~CodeCompleteConsumer();
/// \name Code-completion callbacks
//@{
/// \brief Process the finalized code-completion results.
- virtual void ProcessCodeCompleteResults(Sema &S,
+ virtual void ProcessCodeCompleteResults(Sema &S,
CodeCompletionContext Context,
CodeCompletionResult *Results,
unsigned NumResults) { }
@@ -868,20 +944,22 @@ public:
OverloadCandidate *Candidates,
unsigned NumCandidates) { }
//@}
-
+
/// \brief Retrieve the allocator that will be used to allocate
/// code completion strings.
virtual CodeCompletionAllocator &getAllocator() = 0;
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() = 0;
};
-/// \brief A simple code-completion consumer that prints the results it
+/// \brief A simple code-completion consumer that prints the results it
/// receives in a simple format.
class PrintingCodeCompleteConsumer : public CodeCompleteConsumer {
/// \brief The raw output stream.
raw_ostream &OS;
-
- CodeCompletionAllocator Allocator;
-
+
+ CodeCompletionTUInfo CCTUInfo;
+
public:
/// \brief Create a new printing code-completion consumer that prints its
/// results to the given raw output stream.
@@ -889,21 +967,26 @@ public:
bool IncludeGlobals,
raw_ostream &OS)
: CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
- false), OS(OS) {}
-
+ false), OS(OS),
+ CCTUInfo(new GlobalCodeCompletionAllocator) {}
+
/// \brief Prints the finalized code-completion results.
- virtual void ProcessCodeCompleteResults(Sema &S,
+ virtual void ProcessCodeCompleteResults(Sema &S,
CodeCompletionContext Context,
CodeCompletionResult *Results,
unsigned NumResults);
-
+
virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
- unsigned NumCandidates);
-
- virtual CodeCompletionAllocator &getAllocator() { return Allocator; }
+ unsigned NumCandidates);
+
+ virtual CodeCompletionAllocator &getAllocator() {
+ return CCTUInfo.getAllocator();
+ }
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() { return CCTUInfo; }
};
-
+
} // end namespace clang
#endif // LLVM_CLANG_SEMA_CODECOMPLETECONSUMER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
index 3260a70..67fd393 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
@@ -25,9 +25,11 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/Lex/Token.h"
#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/Lambda.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
@@ -243,6 +245,7 @@ public:
static const TST TST_char16 = clang::TST_char16;
static const TST TST_char32 = clang::TST_char32;
static const TST TST_int = clang::TST_int;
+ static const TST TST_int128 = clang::TST_int128;
static const TST TST_half = clang::TST_half;
static const TST TST_float = clang::TST_float;
static const TST TST_double = clang::TST_double;
@@ -445,7 +448,10 @@ public:
CXXScopeSpec &getTypeSpecScope() { return TypeScope; }
const CXXScopeSpec &getTypeSpecScope() const { return TypeScope; }
- const SourceRange &getSourceRange() const { return Range; }
+ const SourceRange &getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
+
SourceLocation getTypeSpecWidthLoc() const { return TSWLoc; }
SourceLocation getTypeSpecComplexLoc() const { return TSCLoc; }
SourceLocation getTypeSpecSignLoc() const { return TSSLoc; }
@@ -609,6 +615,11 @@ public:
bool isConstexprSpecified() const { return Constexpr_specified; }
SourceLocation getConstexprSpecLoc() const { return ConstexprLoc; }
+ void ClearConstexprSpec() {
+ Constexpr_specified = false;
+ ConstexprLoc = SourceLocation();
+ }
+
AttributePool &getAttributePool() const {
return Attrs.getPool();
}
@@ -962,9 +973,11 @@ public:
void setTemplateId(TemplateIdAnnotation *TemplateId);
/// \brief Return the source range that covers this unqualified-id.
- SourceRange getSourceRange() const {
+ SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(StartLocation, EndLocation);
}
+ SourceLocation getLocStart() const LLVM_READONLY { return StartLocation; }
+ SourceLocation getLocEnd() const LLVM_READONLY { return EndLocation; }
};
/// CachedTokens - A set of tokens that has been cached for later
@@ -1105,6 +1118,16 @@ struct DeclaratorChunk {
/// If this is an invalid location, there is no ref-qualifier.
unsigned RefQualifierLoc;
+ /// \brief The location of the const-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no const-qualifier.
+ unsigned ConstQualifierLoc;
+
+ /// \brief The location of the volatile-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no volatile-qualifier.
+ unsigned VolatileQualifierLoc;
+
/// \brief The location of the 'mutable' qualifer in a lambda-declarator, if
/// any.
unsigned MutableLoc;
@@ -1170,6 +1193,16 @@ struct DeclaratorChunk {
return SourceLocation::getFromRawEncoding(RefQualifierLoc);
}
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getConstQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(ConstQualifierLoc);
+ }
+
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getVolatileQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(VolatileQualifierLoc);
+ }
+
/// \brief Retrieve the location of the 'mutable' qualifier, if any.
SourceLocation getMutableLoc() const {
return SourceLocation::getFromRawEncoding(MutableLoc);
@@ -1230,7 +1263,6 @@ struct DeclaratorChunk {
void destroy() {
switch (Kind) {
- default: llvm_unreachable("Unknown decl type!");
case DeclaratorChunk::Function: return Fun.destroy();
case DeclaratorChunk::Pointer: return Ptr.destroy();
case DeclaratorChunk::BlockPointer: return Cls.destroy();
@@ -1306,6 +1338,8 @@ struct DeclaratorChunk {
unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
+ SourceLocation ConstQualifierLoc,
+ SourceLocation VolatileQualifierLoc,
SourceLocation MutableLoc,
ExceptionSpecificationType ESpecType,
SourceLocation ESpecLoc,
@@ -1357,6 +1391,15 @@ struct DeclaratorChunk {
};
+/// \brief Described the kind of function definition (if any) provided for
+/// a function.
+enum FunctionDefinitionKind {
+ FDK_Declaration,
+ FDK_Definition,
+ FDK_Defaulted,
+ FDK_Deleted
+};
+
/// Declarator - Information about one declarator, including the parsed type
/// information and the identifier. When the declarator is fully formed, this
/// is turned into the appropriate Decl object.
@@ -1385,9 +1428,11 @@ public:
CXXCatchContext, // C++ catch exception-declaration
ObjCCatchContext, // Objective-C catch exception-declaration
BlockLiteralContext, // Block literal declarator.
+ LambdaExprContext, // Lambda-expression declarator.
+ TrailingReturnContext, // C++11 trailing-type-specifier.
TemplateTypeArgContext, // Template type argument.
- AliasDeclContext, // C++0x alias-declaration.
- AliasTemplateContext // C++0x alias-declaration template.
+ AliasDeclContext, // C++11 alias-declaration.
+ AliasTemplateContext // C++11 alias-declaration template.
};
private:
@@ -1412,8 +1457,11 @@ private:
/// GroupingParens - Set by Parser::ParseParenDeclarator().
bool GroupingParens : 1;
- /// FunctionDefinition - Is this Declarator for a function or member defintion
- bool FunctionDefinition : 1;
+ /// FunctionDefinition - Is this Declarator for a function or member
+ /// definition and, if so, what kind?
+ ///
+ /// Actually a FunctionDefinitionKind.
+ unsigned FunctionDefinition : 2;
// Redeclaration - Is this Declarator is a redeclaration.
bool Redeclaration : 1;
@@ -1433,6 +1481,10 @@ private:
/// Extension - true if the declaration is preceded by __extension__.
bool Extension : 1;
+ /// \brief If this is the second or subsequent declarator in this declaration,
+ /// the location of the comma before this declarator.
+ SourceLocation CommaLoc;
+
/// \brief If provided, the source location of the ellipsis used to describe
/// this declarator as a parameter pack.
SourceLocation EllipsisLoc;
@@ -1443,7 +1495,8 @@ public:
Declarator(const DeclSpec &ds, TheContext C)
: DS(ds), Range(ds.getSourceRange()), Context(C),
InvalidType(DS.getTypeSpecType() == DeclSpec::TST_error),
- GroupingParens(false), FunctionDefinition(false), Redeclaration(false),
+ GroupingParens(false), FunctionDefinition(FDK_Declaration),
+ Redeclaration(false),
Attrs(ds.getAttributePool().getFactory()), AsmLabel(0),
InlineParamsUsed(false), Extension(false) {
}
@@ -1484,7 +1537,9 @@ public:
}
/// getSourceRange - Get the source range that spans this declarator.
- const SourceRange &getSourceRange() const { return Range; }
+ const SourceRange &getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
void SetSourceRange(SourceRange R) { Range = R; }
/// SetRangeBegin - Set the start of the source range to Loc, unless it's
@@ -1521,6 +1576,8 @@ public:
Attrs.clear();
AsmLabel = 0;
InlineParamsUsed = false;
+ CommaLoc = SourceLocation();
+ EllipsisLoc = SourceLocation();
}
/// mayOmitIdentifier - Return true if the identifier is either optional or
@@ -1547,7 +1604,9 @@ public:
case CXXCatchContext:
case ObjCCatchContext:
case BlockLiteralContext:
+ case LambdaExprContext:
case TemplateTypeArgContext:
+ case TrailingReturnContext:
return true;
}
llvm_unreachable("unknown context kind!");
@@ -1577,7 +1636,9 @@ public:
case ObjCParameterContext:
case ObjCResultContext:
case BlockLiteralContext:
+ case LambdaExprContext:
case TemplateTypeArgContext:
+ case TrailingReturnContext:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -1588,15 +1649,31 @@ public:
bool mayBeFollowedByCXXDirectInit() const {
if (hasGroupingParens()) return false;
+ if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return false;
+
+ if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern &&
+ Context != FileContext)
+ return false;
+
+ // Special names can't have direct initializers.
+ if (Name.getKind() != UnqualifiedId::IK_Identifier)
+ return false;
+
switch (Context) {
case FileContext:
case BlockContext:
case ForContext:
return true;
+ case ConditionContext:
+ // This may not be followed by a direct initializer, but it can't be a
+ // function declaration either, and we'd prefer to perform a tentative
+ // parse in order to produce the right diagnostic.
+ return true;
+
case KNRTypeListContext:
case MemberContext:
- case ConditionContext:
case PrototypeContext:
case ObjCParameterContext:
case ObjCResultContext:
@@ -1608,7 +1685,9 @@ public:
case AliasDeclContext:
case AliasTemplateContext:
case BlockLiteralContext:
+ case LambdaExprContext:
case TemplateTypeArgContext:
+ case TrailingReturnContext:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -1696,7 +1775,6 @@ public:
return !DeclTypeInfo[i].Arr.NumElts;
}
llvm_unreachable("Invalid type chunk");
- return false;
}
return false;
}
@@ -1721,7 +1799,6 @@ public:
return false;
}
llvm_unreachable("Invalid type chunk");
- return false;
}
return false;
}
@@ -1800,13 +1877,26 @@ public:
void setGroupingParens(bool flag) { GroupingParens = flag; }
bool hasGroupingParens() const { return GroupingParens; }
-
+
+ bool isFirstDeclarator() const { return !CommaLoc.isValid(); }
+ SourceLocation getCommaLoc() const { return CommaLoc; }
+ void setCommaLoc(SourceLocation CL) { CommaLoc = CL; }
+
bool hasEllipsis() const { return EllipsisLoc.isValid(); }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation EL) { EllipsisLoc = EL; }
- void setFunctionDefinition(bool Val) { FunctionDefinition = Val; }
- bool isFunctionDefinition() const { return FunctionDefinition; }
+ void setFunctionDefinitionKind(FunctionDefinitionKind Val) {
+ FunctionDefinition = Val;
+ }
+
+ bool isFunctionDefinition() const {
+ return getFunctionDefinitionKind() != FDK_Declaration;
+ }
+
+ FunctionDefinitionKind getFunctionDefinitionKind() const {
+ return (FunctionDefinitionKind)FunctionDefinition;
+ }
void setRedeclaration(bool Val) { Redeclaration = Val; }
bool isRedeclaration() const { return Redeclaration; }
@@ -1855,38 +1945,24 @@ private:
SourceLocation LastLocation;
};
-/// LambdaCaptureDefault - The default, if any, capture method for a
-/// lambda expression.
-enum LambdaCaptureDefault {
- LCD_None,
- LCD_ByCopy,
- LCD_ByRef
-};
-
-/// LambdaCaptureKind - The different capture forms in a lambda
-/// introducer: 'this' or a copied or referenced variable.
-enum LambdaCaptureKind {
- LCK_This,
- LCK_ByCopy,
- LCK_ByRef
-};
-
/// LambdaCapture - An individual capture in a lambda introducer.
struct LambdaCapture {
LambdaCaptureKind Kind;
SourceLocation Loc;
IdentifierInfo* Id;
-
- LambdaCapture(LambdaCaptureKind Kind,
- SourceLocation Loc,
- IdentifierInfo* Id = 0)
- : Kind(Kind), Loc(Loc), Id(Id)
+ SourceLocation EllipsisLoc;
+
+ LambdaCapture(LambdaCaptureKind Kind, SourceLocation Loc,
+ IdentifierInfo* Id = 0,
+ SourceLocation EllipsisLoc = SourceLocation())
+ : Kind(Kind), Loc(Loc), Id(Id), EllipsisLoc(EllipsisLoc)
{}
};
/// LambdaIntroducer - Represents a complete lambda introducer.
struct LambdaIntroducer {
SourceRange Range;
+ SourceLocation DefaultLoc;
LambdaCaptureDefault Default;
llvm::SmallVector<LambdaCapture, 4> Captures;
@@ -1896,8 +1972,9 @@ struct LambdaIntroducer {
/// addCapture - Append a capture in a lambda introducer.
void addCapture(LambdaCaptureKind Kind,
SourceLocation Loc,
- IdentifierInfo* Id = 0) {
- Captures.push_back(LambdaCapture(Kind, Loc, Id));
+ IdentifierInfo* Id = 0,
+ SourceLocation EllipsisLoc = SourceLocation()) {
+ Captures.push_back(LambdaCapture(Kind, Loc, Id, EllipsisLoc));
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
index dd2603d..3320cd8 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -122,8 +122,9 @@ public:
void Destroy();
static DelayedDiagnostic makeDeprecation(SourceLocation Loc,
- const NamedDecl *D,
- StringRef Msg);
+ const NamedDecl *D,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ StringRef Msg);
static DelayedDiagnostic makeAccess(SourceLocation Loc,
const AccessedEntity &Entity) {
@@ -187,12 +188,17 @@ public:
assert(Kind == ForbiddenType && "not a forbidden-type diagnostic");
return QualType::getFromOpaquePtr(ForbiddenTypeData.OperandType);
}
+
+ const ObjCInterfaceDecl *getUnknownObjCClass() const {
+ return DeprecationData.UnknownObjCClass;
+ }
private:
union {
/// Deprecation.
struct {
const NamedDecl *Decl;
+ const ObjCInterfaceDecl *UnknownObjCClass;
const char *Message;
size_t MessageLen;
} DeprecationData;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
index 7b83625..785bf6a 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
@@ -59,10 +59,7 @@ public:
/// \brief Load the contents of the global method pool for a given
/// selector.
- ///
- /// \returns a pair of Objective-C methods lists containing the
- /// instance and factory methods, respectively, with this selector.
- virtual std::pair<ObjCMethodList,ObjCMethodList> ReadMethodPool(Selector Sel);
+ virtual void ReadMethodPool(Selector Sel);
/// \brief Load the set of namespaces that are known to the external source,
/// which will be used during typo correction.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h b/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h
index 85d8ad2..dff0134 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_AST_SEMA_IDENTIFIERRESOLVER_H
#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallVector.h"
namespace clang {
@@ -23,9 +24,11 @@ class ASTContext;
class Decl;
class DeclContext;
class DeclarationName;
+class ExternalPreprocessorSource;
class NamedDecl;
+class Preprocessor;
class Scope;
-
+
/// IdentifierResolver - Keeps track of shadowed decls on enclosing
/// scopes. It manages the shadowing chains of declaration names and
/// implements efficient decl lookup based on a declaration name.
@@ -141,10 +144,10 @@ public:
};
/// begin - Returns an iterator for decls with the name 'Name'.
- static iterator begin(DeclarationName Name);
+ iterator begin(DeclarationName Name);
/// end - Returns an iterator that has 'finished'.
- static iterator end() {
+ iterator end() {
return iterator();
}
@@ -175,23 +178,29 @@ public:
/// position.
void InsertDeclAfter(iterator Pos, NamedDecl *D);
- /// \brief Link the declaration into the chain of declarations for
- /// the given identifier.
+ /// \brief Try to add the given declaration to the top level scope, if it
+ /// (or a redeclaration of it) hasn't already been added.
///
- /// This is a lower-level routine used by the AST reader to link a
- /// declaration into a specific IdentifierInfo before the
- /// declaration actually has a name.
- void AddDeclToIdentifierChain(IdentifierInfo *II, NamedDecl *D);
-
- explicit IdentifierResolver(const LangOptions &LangOpt);
+ /// \param D The externally-produced declaration to add.
+ ///
+ /// \param Name The name of the externally-produced declaration.
+ ///
+ /// \returns true if the declaration was added, false otherwise.
+ bool tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name);
+
+ explicit IdentifierResolver(Preprocessor &PP);
~IdentifierResolver();
private:
const LangOptions &LangOpt;
-
+ Preprocessor &PP;
+
class IdDeclInfoMap;
IdDeclInfoMap *IdDeclInfos;
+ void updatingIdentifier(IdentifierInfo &II);
+ void readingIdentifier(IdentifierInfo &II);
+
/// FETokenInfo contains a Decl pointer if lower bit == 0.
static inline bool isDeclPtr(void *Ptr) {
return (reinterpret_cast<uintptr_t>(Ptr) & 0x1) == 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
index e69bebd..4433843 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
@@ -1,4 +1,4 @@
-//===--- SemaInit.h - Semantic Analysis for Initializers --------*- C++ -*-===//
+//===--- Initialization.h - Semantic Analysis for Initializers --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -70,7 +70,10 @@ public:
EK_BlockElement,
/// \brief The entity being initialized is the real or imaginary part of a
/// complex number.
- EK_ComplexElement
+ EK_ComplexElement,
+ /// \brief The entity being initialized is the field that captures a
+ /// variable in a lambda.
+ EK_LambdaCapture
};
private:
@@ -85,7 +88,7 @@ private:
QualType Type;
union {
- /// \brief When Kind == EK_Variable or EK_Member, the VarDecl or
+ /// \brief When Kind == EK_Variable, or EK_Member, the VarDecl or
/// FieldDecl, respectively.
DeclaratorDecl *VariableOrMember;
@@ -98,7 +101,7 @@ private:
TypeSourceInfo *TypeInfo;
struct {
- /// \brief When Kind == EK_Result, EK_Exception, or EK_New, the
+ /// \brief When Kind == EK_Result, EK_Exception, EK_New, the
/// location of the 'return', 'throw', or 'new' keyword,
/// respectively. When Kind == EK_Temporary, the location where
/// the temporary is being created.
@@ -118,6 +121,14 @@ private:
/// EK_ComplexElement, the index of the array or vector element being
/// initialized.
unsigned Index;
+
+ struct {
+ /// \brief The variable being captured by an EK_LambdaCapture.
+ VarDecl *Var;
+
+ /// \brief The source location at which the capture occurs.
+ unsigned Location;
+ } Capture;
};
InitializedEntity() { }
@@ -147,6 +158,14 @@ private:
InitializedEntity(ASTContext &Context, unsigned Index,
const InitializedEntity &Parent);
+ /// \brief Create the initialization entity for a lambda capture.
+ InitializedEntity(VarDecl *Var, FieldDecl *Field, SourceLocation Loc)
+ : Kind(EK_LambdaCapture), Parent(0), Type(Field->getType())
+ {
+ Capture.Var = Var;
+ Capture.Location = Loc.getRawEncoding();
+ }
+
public:
/// \brief Create the initialization entity for a variable.
static InitializedEntity InitializeVariable(VarDecl *Var) {
@@ -156,7 +175,7 @@ public:
/// \brief Create the initialization entity for a parameter.
static InitializedEntity InitializeParameter(ASTContext &Context,
ParmVarDecl *Parm) {
- bool Consumed = (Context.getLangOptions().ObjCAutoRefCount &&
+ bool Consumed = (Context.getLangOpts().ObjCAutoRefCount &&
Parm->hasAttr<NSConsumedAttr>());
InitializedEntity Entity;
@@ -246,6 +265,13 @@ public:
return InitializedEntity(Context, Index, Parent);
}
+ /// \brief Create the initialization entity for a lambda capture.
+ static InitializedEntity InitializeLambdaCapture(VarDecl *Var,
+ FieldDecl *Field,
+ SourceLocation Loc) {
+ return InitializedEntity(Var, Field, Loc);
+ }
+
/// \brief Determine the kind of initialization.
EntityKind getKind() const { return Kind; }
@@ -314,9 +340,22 @@ public:
/// element, sets the element index.
void setElementIndex(unsigned Index) {
assert(getKind() == EK_ArrayElement || getKind() == EK_VectorElement ||
- EK_ComplexElement);
+ getKind() == EK_ComplexElement);
this->Index = Index;
}
+
+ /// \brief Retrieve the variable for a captured variable in a lambda.
+ VarDecl *getCapturedVar() const {
+ assert(getKind() == EK_LambdaCapture && "Not a lambda capture!");
+ return Capture.Var;
+ }
+
+ /// \brief Determine the location of the capture when initializing
+ /// field from a captured variable in a lambda.
+ SourceLocation getCaptureLoc() const {
+ assert(getKind() == EK_LambdaCapture && "Not a lambda capture!");
+ return SourceLocation::getFromRawEncoding(Capture.Location);
+ }
};
/// \brief Describes the kind of initialization being performed, along with
@@ -326,36 +365,36 @@ class InitializationKind {
public:
/// \brief The kind of initialization being performed.
enum InitKind {
- IK_Direct, ///< Direct initialization
- IK_Copy, ///< Copy initialization
- IK_Default, ///< Default initialization
- IK_Value ///< Value initialization
+ IK_Direct, ///< Direct initialization
+ IK_DirectList, ///< Direct list-initialization
+ IK_Copy, ///< Copy initialization
+ IK_Default, ///< Default initialization
+ IK_Value ///< Value initialization
};
private:
- /// \brief The kind of initialization that we're storing.
- enum StoredInitKind {
- SIK_Direct = IK_Direct, ///< Direct initialization
- SIK_Copy = IK_Copy, ///< Copy initialization
- SIK_Default = IK_Default, ///< Default initialization
- SIK_Value = IK_Value, ///< Value initialization
- SIK_ImplicitValue, ///< Implicit value initialization
- SIK_DirectCast, ///< Direct initialization due to a cast
- /// \brief Direct initialization due to a C-style cast.
- SIK_DirectCStyleCast,
- /// \brief Direct initialization due to a functional-style cast.
- SIK_DirectFunctionalCast
+ /// \brief The context of the initialization.
+ enum InitContext {
+ IC_Normal, ///< Normal context
+ IC_ExplicitConvs, ///< Normal context, but allows explicit conversion funcs
+ IC_Implicit, ///< Implicit context (value initialization)
+ IC_StaticCast, ///< Static cast context
+ IC_CStyleCast, ///< C-style cast context
+ IC_FunctionalCast ///< Functional cast context
};
/// \brief The kind of initialization being performed.
- StoredInitKind Kind;
+ InitKind Kind : 8;
+
+ /// \brief The context of the initialization.
+ InitContext Context : 8;
/// \brief The source locations involved in the initialization.
SourceLocation Locations[3];
- InitializationKind(StoredInitKind Kind, SourceLocation Loc1,
+ InitializationKind(InitKind Kind, InitContext Context, SourceLocation Loc1,
SourceLocation Loc2, SourceLocation Loc3)
- : Kind(Kind)
+ : Kind(Kind), Context(Context)
{
Locations[0] = Loc1;
Locations[1] = Loc2;
@@ -367,41 +406,53 @@ public:
static InitializationKind CreateDirect(SourceLocation InitLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
- return InitializationKind(SIK_Direct, InitLoc, LParenLoc, RParenLoc);
+ return InitializationKind(IK_Direct, IC_Normal,
+ InitLoc, LParenLoc, RParenLoc);
+ }
+
+ static InitializationKind CreateDirectList(SourceLocation InitLoc) {
+ return InitializationKind(IK_DirectList, IC_Normal,
+ InitLoc, InitLoc, InitLoc);
}
/// \brief Create a direct initialization due to a cast that isn't a C-style
/// or functional cast.
static InitializationKind CreateCast(SourceRange TypeRange) {
- return InitializationKind(SIK_DirectCast,
- TypeRange.getBegin(), TypeRange.getBegin(),
- TypeRange.getEnd());
+ return InitializationKind(IK_Direct, IC_StaticCast, TypeRange.getBegin(),
+ TypeRange.getBegin(), TypeRange.getEnd());
}
/// \brief Create a direct initialization for a C-style cast.
static InitializationKind CreateCStyleCast(SourceLocation StartLoc,
- SourceRange TypeRange) {
- return InitializationKind(SIK_DirectCStyleCast,
- StartLoc, TypeRange.getBegin(),
+ SourceRange TypeRange,
+ bool InitList) {
+ // C++ cast syntax doesn't permit init lists, but C compound literals are
+ // exactly that.
+ return InitializationKind(InitList ? IK_DirectList : IK_Direct,
+ IC_CStyleCast, StartLoc, TypeRange.getBegin(),
TypeRange.getEnd());
}
/// \brief Create a direct initialization for a functional cast.
- static InitializationKind CreateFunctionalCast(SourceRange TypeRange) {
- return InitializationKind(SIK_DirectFunctionalCast,
- TypeRange.getBegin(), TypeRange.getBegin(),
- TypeRange.getEnd());
+ static InitializationKind CreateFunctionalCast(SourceRange TypeRange,
+ bool InitList) {
+ return InitializationKind(InitList ? IK_DirectList : IK_Direct,
+ IC_FunctionalCast, TypeRange.getBegin(),
+ TypeRange.getBegin(), TypeRange.getEnd());
}
/// \brief Create a copy initialization.
static InitializationKind CreateCopy(SourceLocation InitLoc,
- SourceLocation EqualLoc) {
- return InitializationKind(SIK_Copy, InitLoc, EqualLoc, EqualLoc);
+ SourceLocation EqualLoc,
+ bool AllowExplicitConvs = false) {
+ return InitializationKind(IK_Copy,
+ AllowExplicitConvs? IC_ExplicitConvs : IC_Normal,
+ InitLoc, EqualLoc, EqualLoc);
}
/// \brief Create a default initialization.
static InitializationKind CreateDefault(SourceLocation InitLoc) {
- return InitializationKind(SIK_Default, InitLoc, InitLoc, InitLoc);
+ return InitializationKind(IK_Default, IC_Normal, InitLoc, InitLoc, InitLoc);
}
/// \brief Create a value initialization.
@@ -409,46 +460,39 @@ public:
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool isImplicit = false) {
- return InitializationKind(isImplicit? SIK_ImplicitValue : SIK_Value,
+ return InitializationKind(IK_Value, isImplicit ? IC_Implicit : IC_Normal,
InitLoc, LParenLoc, RParenLoc);
}
/// \brief Determine the initialization kind.
InitKind getKind() const {
- if (Kind > SIK_ImplicitValue)
- return IK_Direct;
- if (Kind == SIK_ImplicitValue)
- return IK_Value;
-
- return (InitKind)Kind;
+ return Kind;
}
/// \brief Determine whether this initialization is an explicit cast.
bool isExplicitCast() const {
- return Kind == SIK_DirectCast ||
- Kind == SIK_DirectCStyleCast ||
- Kind == SIK_DirectFunctionalCast;
+ return Context >= IC_StaticCast;
}
/// \brief Determine whether this initialization is a C-style cast.
bool isCStyleOrFunctionalCast() const {
- return Kind == SIK_DirectCStyleCast || Kind == SIK_DirectFunctionalCast;
+ return Context >= IC_CStyleCast;
}
- /// brief Determine whether this is a C-style cast.
+ /// \brief Determine whether this is a C-style cast.
bool isCStyleCast() const {
- return Kind == SIK_DirectCStyleCast;
+ return Context == IC_CStyleCast;
}
- /// brief Determine whether this is a functional-style cast.
+ /// \brief Determine whether this is a functional-style cast.
bool isFunctionalCast() const {
- return Kind == SIK_DirectFunctionalCast;
+ return Context == IC_FunctionalCast;
}
/// \brief Determine whether this initialization is an implicit
/// value-initialization, e.g., as occurs during aggregate
/// initialization.
- bool isImplicitValueInit() const { return Kind == SIK_ImplicitValue; }
+ bool isImplicitValueInit() const { return Context == IC_Implicit; }
/// \brief Retrieve the location at which initialization is occurring.
SourceLocation getLocation() const { return Locations[0]; }
@@ -461,16 +505,26 @@ public:
/// \brief Retrieve the location of the equal sign for copy initialization
/// (if present).
SourceLocation getEqualLoc() const {
- assert(Kind == SIK_Copy && "Only copy initialization has an '='");
+ assert(Kind == IK_Copy && "Only copy initialization has an '='");
return Locations[1];
}
-
- bool isCopyInit() const { return Kind == SIK_Copy; }
+
+ bool isCopyInit() const { return Kind == IK_Copy; }
+
+ /// \brief Retrieve whether this initialization allows the use of explicit
+ /// constructors.
+ bool AllowExplicit() const { return !isCopyInit(); }
+
+ /// \brief Retrieve whether this initialization allows the use of explicit
+ /// conversion functions.
+ bool allowExplicitConversionFunctions() const {
+ return !isCopyInit() || Context == IC_ExplicitConvs;
+ }
/// \brief Retrieve the source range containing the locations of the open
/// and closing parentheses for value and direct initializations.
SourceRange getParenRange() const {
- assert((getKind() == IK_Direct || Kind == SIK_Value) &&
+ assert((Kind == IK_Direct || Kind == IK_Value) &&
"Only direct- and value-initialization have parentheses");
return SourceRange(Locations[1], Locations[2]);
}
@@ -530,6 +584,10 @@ public:
SK_ListInitialization,
/// \brief Perform list-initialization with a constructor.
SK_ListConstructorCall,
+ /// \brief Unwrap the single-element initializer list for a reference.
+ SK_UnwrapInitList,
+ /// \brief Rewrap the single-element initializer list for a reference.
+ SK_RewrapInitList,
/// \brief Perform initialization via a constructor.
SK_ConstructorInitialization,
/// \brief Zero-initialize the object
@@ -544,12 +602,17 @@ public:
/// \brief Array initialization (from an array rvalue).
/// This is a GNU C extension.
SK_ArrayInit,
+ /// \brief Array initialization from a parenthesized initializer list.
+ /// This is a GNU C++ extension.
+ SK_ParenthesizedArrayInit,
/// \brief Pass an object by indirect copy-and-restore.
SK_PassByIndirectCopyRestore,
/// \brief Pass an object by indirect restore.
SK_PassByIndirectRestore,
/// \brief Produce an Objective-C object pointer.
- SK_ProduceObjCObject
+ SK_ProduceObjCObject,
+ /// \brief Construct a std::initializer_list from an initializer list.
+ SK_StdInitializerList
};
/// \brief A single step in the initialization sequence.
@@ -579,8 +642,12 @@ public:
} Function;
/// \brief When Kind = SK_ConversionSequence, the implicit conversion
- /// sequence
+ /// sequence.
ImplicitConversionSequence *ICS;
+
+ /// \brief When Kind = SK_RewrapInitList, the syntactic form of the
+ /// wrapping list.
+ InitListExpr *WrappingSyntacticList;
};
void Destroy();
@@ -635,14 +702,26 @@ public:
FK_InitListBadDestinationType,
/// \brief Overloading for a user-defined conversion failed.
FK_UserConversionOverloadFailed,
- /// \brief Overloaded for initialization by constructor failed.
+ /// \brief Overloading for initialization by constructor failed.
FK_ConstructorOverloadFailed,
+ /// \brief Overloading for list-initialization by constructor failed.
+ FK_ListConstructorOverloadFailed,
/// \brief Default-initialization of a 'const' object.
FK_DefaultInitOfConst,
/// \brief Initialization of an incomplete type.
FK_Incomplete,
+ /// \brief Variable-length array must not have an initializer.
+ FK_VariableLengthArrayHasInitializer,
/// \brief List initialization failed at some point.
- FK_ListInitializationFailed
+ FK_ListInitializationFailed,
+ /// \brief Initializer has a placeholder type which cannot be
+ /// resolved by initialization.
+ FK_PlaceholderType,
+ /// \brief Failed to initialize a std::initializer_list because copy
+ /// construction of some element failed.
+ FK_InitListElementCopyFailure,
+ /// \brief List-copy-initialization chose an explicit constructor.
+ FK_ExplicitConstructor
};
private:
@@ -655,6 +734,9 @@ private:
/// \brief The candidate set created when initialization failed.
OverloadCandidateSet FailedCandidateSet;
+ /// \brief The incomplete type that caused a failure.
+ QualType FailedIncompleteType;
+
/// \brief Prints a follow-up note that highlights the location of
/// the initialized entity, if it's remote.
void PrintInitLocationNote(Sema &S, const InitializedEntity &Entity);
@@ -763,8 +845,9 @@ public:
/// \param Function the function to which the overloaded function reference
/// resolves.
void AddAddressOverloadResolutionStep(FunctionDecl *Function,
- DeclAccessPair Found);
-
+ DeclAccessPair Found,
+ bool HadMultipleCandidates);
+
/// \brief Add a new step in the initialization that performs a derived-to-
/// base cast.
///
@@ -801,8 +884,9 @@ public:
/// a constructor or a conversion function.
void AddUserConversionStep(FunctionDecl *Function,
DeclAccessPair FoundDecl,
- QualType T);
-
+ QualType T,
+ bool HadMultipleCandidates);
+
/// \brief Add a new step that performs a qualification conversion to the
/// given type.
void AddQualificationConversionStep(QualType Ty,
@@ -812,17 +896,23 @@ public:
void AddConversionSequenceStep(const ImplicitConversionSequence &ICS,
QualType T);
- /// \brief Add a list-initialiation step.
+ /// \brief Add a list-initialization step.
void AddListInitializationStep(QualType T);
/// \brief Add a constructor-initialization step.
+ ///
+ /// \arg FromInitList The constructor call is syntactically an initializer
+ /// list.
+ /// \arg AsInitList The constructor is called as an init list constructor.
void AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
AccessSpecifier Access,
- QualType T);
+ QualType T,
+ bool HadMultipleCandidates,
+ bool FromInitList, bool AsInitList);
/// \brief Add a zero-initialization step.
void AddZeroInitializationStep(QualType T);
-
+
/// \brief Add a C assignment step.
//
// FIXME: It isn't clear whether this should ever be needed;
@@ -840,6 +930,9 @@ public:
/// \brief Add an array initialization step.
void AddArrayInitStep(QualType T);
+ /// \brief Add a parenthesized array initialization step.
+ void AddParenthesizedArrayInitStep(QualType T);
+
/// \brief Add a step to pass an object by indirect copy-restore.
void AddPassByIndirectCopyRestoreStep(QualType T, bool shouldCopy);
@@ -847,10 +940,20 @@ public:
/// retaining it).
void AddProduceObjCObjectStep(QualType T);
+ /// \brief Add a step to construct a std::initializer_list object from an
+ /// initializer list.
+ void AddStdInitializerListConstructionStep(QualType T);
+
+ /// \brief Add steps to unwrap a initializer list for a reference around a
+ /// single element and rewrap it at the end.
+ void RewrapReferenceInitList(QualType T, InitListExpr *Syntactic);
+
/// \brief Note that this initialization sequence failed.
void SetFailed(FailureKind Failure) {
SequenceKind = FailedSequence;
this->Failure = Failure;
+ assert((Failure != FK_Incomplete || !FailedIncompleteType.isNull()) &&
+ "Incomplete type failure requires a type!");
}
/// \brief Note that this initialization sequence failed due to failed
@@ -863,12 +966,19 @@ public:
return FailedCandidateSet;
}
- /// brief Get the overloading result, for when the initialization
+ /// \brief Get the overloading result, for when the initialization
/// sequence failed due to a bad overload.
OverloadingResult getFailedOverloadResult() const {
return FailedOverloadResult;
}
+ /// \brief Note that this initialization sequence failed due to an
+ /// incomplete type.
+ void setIncompleteTypeFailure(QualType IncompleteType) {
+ FailedIncompleteType = IncompleteType;
+ SetFailed(FK_Incomplete);
+ }
+
/// \brief Determine why initialization failed.
FailureKind getFailureKind() const {
assert(Failed() && "Not an initialization failure!");
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
index e1d3ae9..93cb8cb 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
@@ -36,16 +36,16 @@ class LocInfoType : public Type {
TypeSourceInfo *DeclInfo;
LocInfoType(QualType ty, TypeSourceInfo *TInfo)
- : Type((TypeClass)LocInfo, ty, ty->isDependentType(),
+ : Type((TypeClass)LocInfo, ty, ty->isDependentType(),
ty->isInstantiationDependentType(),
ty->isVariablyModifiedType(),
- ty->containsUnexpandedParameterPack()),
- DeclInfo(TInfo) {
- assert(getTypeClass() == (TypeClass)LocInfo && "LocInfo didn't fit in TC?");
- }
+ ty->containsUnexpandedParameterPack()),
+ DeclInfo(TInfo) {
+ assert(getTypeClass() == (TypeClass)LocInfo && "LocInfo didn't fit in TC?");
+ }
friend class Sema;
- public:
+public:
QualType getType() const { return getCanonicalTypeInternal(); }
TypeSourceInfo *getTypeSourceInfo() const { return DeclInfo; }
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
index 6630bb2..fe5d262 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
@@ -214,6 +214,12 @@ public:
return Redecl;
}
+ /// \brief Determine whether this lookup is permitted to see hidden
+ /// declarations, such as those in modules that have not yet been imported.
+ bool isHiddenDeclarationVisible() const {
+ return Redecl || LookupKind == Sema::LookupTagName;
+ }
+
/// Sets whether tag declarations should be hidden by non-tag
/// declarations during resolution. The default is true.
void setHideTags(bool Hide) {
@@ -266,23 +272,35 @@ public:
return Paths;
}
- /// \brief Tests whether the given declaration is acceptable.
- bool isAcceptableDecl(NamedDecl *D) const {
- if (!D->isInIdentifierNamespace(IDNS))
- return false;
-
- // So long as this declaration is not module-private or was parsed as
- // part of this translation unit (i.e., in the module), we're allowed to
- // find it.
- if (!D->isModulePrivate() || !D->isFromASTFile())
+ /// \brief Determine whether the given declaration is visible to the
+ /// program.
+ static bool isVisible(NamedDecl *D) {
+ // If this declaration is not hidden, it's visible.
+ if (!D->isHidden())
return true;
-
+
// FIXME: We should be allowed to refer to a module-private name from
// within the same module, e.g., during template instantiation.
// This requires us know which module a particular declaration came from.
return false;
}
-
+
+ /// \brief Retrieve the accepted (re)declaration of the given declaration,
+ /// if there is one.
+ NamedDecl *getAcceptableDecl(NamedDecl *D) const {
+ if (!D->isInIdentifierNamespace(IDNS))
+ return 0;
+
+ if (isHiddenDeclarationVisible() || isVisible(D))
+ return D;
+
+ return getAcceptableDeclSlow(D);
+ }
+
+private:
+ NamedDecl *getAcceptableDeclSlow(NamedDecl *D) const;
+public:
+
/// \brief Returns the identifier namespace mask for this lookup.
unsigned getIdentifierNamespace() const {
return IDNS;
@@ -532,6 +550,11 @@ public:
return *I++;
}
+ /// Restart the iteration.
+ void restart() {
+ I = Results.begin();
+ }
+
/// Erase the last element returned from this iterator.
void erase() {
Results.Decls.erase(--I);
@@ -569,7 +592,7 @@ private:
void diagnose() {
if (isAmbiguous())
SemaRef.DiagnoseAmbiguousLookup(*this);
- else if (isClassLookup() && SemaRef.getLangOptions().AccessControl)
+ else if (isClassLookup() && SemaRef.getLangOpts().AccessControl)
SemaRef.CheckLookupAccess(*this);
}
@@ -582,7 +605,13 @@ private:
void configure();
// Sanity checks.
- void sanity() const;
+ void sanityImpl() const;
+
+ void sanity() const {
+#ifndef NDEBUG
+ sanityImpl();
+#endif
+ }
bool sanityCheckUnresolved() const {
for (iterator I = begin(), E = end(); I != E; ++I)
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/MultiInitializer.h b/contrib/llvm/tools/clang/include/clang/Sema/MultiInitializer.h
deleted file mode 100644
index c44e393..0000000
--- a/contrib/llvm/tools/clang/include/clang/Sema/MultiInitializer.h
+++ /dev/null
@@ -1,72 +0,0 @@
-//===--- MultiInitializer.h - Initializer expression group ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the MultiInitializer class, which can represent a list
-// initializer or a parentheses-wrapped group of expressions in a C++ member
-// initializer.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_SEMA_MULTIINITIALIZER_H
-#define LLVM_CLANG_SEMA_MULTIINITIALIZER_H
-
-#include "clang/Sema/Ownership.h"
-#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/PointerUnion.h"
-
-namespace clang {
- class ASTContext;
- class Expr;
- class InitializationKind;
- class InitializedEntity;
- class InitListExpr;
- class Sema;
-
-class MultiInitializer {
- llvm::PointerUnion<Expr*, Expr**> InitListOrExpressions;
- unsigned NumInitializers;
- SourceLocation LParenLoc;
- SourceLocation RParenLoc;
-
- InitListExpr *getInitList() const;
- Expr **getExpressions() const { return InitListOrExpressions.get<Expr**>(); }
-
-public:
- MultiInitializer(Expr* InitList)
- : InitListOrExpressions(InitList)
- {}
-
- MultiInitializer(SourceLocation LParenLoc, Expr **Exprs, unsigned NumInits,
- SourceLocation RParenLoc)
- : InitListOrExpressions(Exprs), NumInitializers(NumInits),
- LParenLoc(LParenLoc), RParenLoc(RParenLoc)
- {}
-
- bool isInitializerList() const { return InitListOrExpressions.is<Expr*>(); }
-
- SourceLocation getStartLoc() const;
- SourceLocation getEndLoc() const;
-
- typedef Expr **iterator;
- iterator begin() const;
- iterator end() const;
-
- bool isTypeDependent() const;
-
- bool DiagnoseUnexpandedParameterPack(Sema &SemaRef) const;
-
- // Return the InitListExpr or create a ParenListExpr.
- Expr *CreateInitExpr(ASTContext &Ctx, QualType T) const;
-
- ExprResult PerformInit(Sema &SemaRef, InitializedEntity Entity,
- InitializationKind Kind) const;
-};
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
index dbc0926..d334447 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
@@ -24,6 +24,7 @@
#include "clang/Sema/SemaFixItUtils.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
namespace clang {
class ASTContext;
@@ -111,6 +112,23 @@ namespace clang {
ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind);
+ /// NarrowingKind - The kind of narrowing conversion being performed by a
+ /// standard conversion sequence according to C++11 [dcl.init.list]p7.
+ enum NarrowingKind {
+ /// Not a narrowing conversion.
+ NK_Not_Narrowing,
+
+ /// A narrowing conversion by virtue of the source and destination types.
+ NK_Type_Narrowing,
+
+ /// A narrowing conversion, because a constant expression got narrowed.
+ NK_Constant_Narrowing,
+
+ /// A narrowing conversion, because a non-constant-expression variable might
+ /// have got narrowed.
+ NK_Variable_Narrowing
+ };
+
/// StandardConversionSequence - represents a standard conversion
/// sequence (C++ 13.3.3.1.1). A standard conversion sequence
/// contains between zero and three conversions. If a particular
@@ -217,6 +235,9 @@ namespace clang {
}
ImplicitConversionRank getRank() const;
+ NarrowingKind getNarrowingKind(ASTContext &Context, const Expr *Converted,
+ APValue &ConstantValue,
+ QualType &ConstantType) const;
bool isPointerConversionToBool() const;
bool isPointerConversionToVoidPointer(ASTContext& Context) const;
void DebugPrint() const;
@@ -225,9 +246,10 @@ namespace clang {
/// UserDefinedConversionSequence - Represents a user-defined
/// conversion sequence (C++ 13.3.3.1.2).
struct UserDefinedConversionSequence {
- /// Before - Represents the standard conversion that occurs before
- /// the actual user-defined conversion. (C++ 13.3.3.1.2p1):
+ /// \brief Represents the standard conversion that occurs before
+ /// the actual user-defined conversion.
///
+ /// C++11 13.3.3.1.2p1:
/// If the user-defined conversion is specified by a constructor
/// (12.3.1), the initial standard conversion sequence converts
/// the source type to the type required by the argument of the
@@ -255,14 +277,15 @@ namespace clang {
StandardConversionSequence After;
/// ConversionFunction - The function that will perform the
- /// user-defined conversion.
+ /// user-defined conversion. Null if the conversion is an
+ /// aggregate initialization from an initializer list.
FunctionDecl* ConversionFunction;
/// \brief The declaration that we found via name lookup, which might be
/// the same as \c ConversionFunction or it might be a using declaration
/// that refers to \c ConversionFunction.
DeclAccessPair FoundConversionFunction;
-
+
void DebugPrint() const;
};
@@ -379,7 +402,14 @@ namespace clang {
};
/// ConversionKind - The kind of implicit conversion sequence.
- unsigned ConversionKind;
+ unsigned ConversionKind : 30;
+
+ /// \brief Whether the argument is an initializer list.
+ bool ListInitializationSequence : 1;
+
+ /// \brief Whether the target is really a std::initializer_list, and the
+ /// sequence only represents the worst element conversion.
+ bool StdInitializerListElement : 1;
void setKind(Kind K) {
destruct();
@@ -409,12 +439,17 @@ namespace clang {
BadConversionSequence Bad;
};
- ImplicitConversionSequence() : ConversionKind(Uninitialized) {}
+ ImplicitConversionSequence()
+ : ConversionKind(Uninitialized), ListInitializationSequence(false),
+ StdInitializerListElement(false)
+ {}
~ImplicitConversionSequence() {
destruct();
}
ImplicitConversionSequence(const ImplicitConversionSequence &Other)
- : ConversionKind(Other.ConversionKind)
+ : ConversionKind(Other.ConversionKind),
+ ListInitializationSequence(Other.ListInitializationSequence),
+ StdInitializerListElement(Other.StdInitializerListElement)
{
switch (ConversionKind) {
case Uninitialized: break;
@@ -461,7 +496,7 @@ namespace clang {
return 3;
}
- return 3;
+ llvm_unreachable("Invalid ImplicitConversionSequence::Kind!");
}
bool isBad() const { return getKind() == BadConversion; }
@@ -499,6 +534,26 @@ namespace clang {
Ambiguous.construct();
}
+ /// \brief Whether this sequence was created by the rules of
+ /// list-initialization sequences.
+ bool isListInitializationSequence() const {
+ return ListInitializationSequence;
+ }
+
+ void setListInitializationSequence() {
+ ListInitializationSequence = true;
+ }
+
+ /// \brief Whether the target is really a std::initializer_list, and the
+ /// sequence only represents the worst element conversion.
+ bool isStdInitializerListElement() const {
+ return StdInitializerListElement;
+ }
+
+ void setStdInitializerListElement(bool V = true) {
+ StdInitializerListElement = V;
+ }
+
// The result of a comparison between implicit conversion
// sequences. Use Sema::CompareImplicitConversionSequences to
// actually perform the comparison.
@@ -565,12 +620,17 @@ namespace clang {
CXXConversionDecl *Surrogate;
/// Conversions - The conversion sequences used to convert the
- /// function arguments to the function parameters.
- SmallVector<ImplicitConversionSequence, 4> Conversions;
+ /// function arguments to the function parameters, the pointer points to a
+ /// fixed size array with NumConversions elements. The memory is owned by
+ /// the OverloadCandidateSet.
+ ImplicitConversionSequence *Conversions;
/// The FixIt hints which can be used to fix the Bad candidate.
ConversionFixItGenerator Fix;
+ /// NumConversions - The number of elements in the Conversions array.
+ unsigned NumConversions;
+
/// Viable - True to indicate that this overload candidate is viable.
bool Viable;
@@ -639,10 +699,9 @@ namespace clang {
/// hasAmbiguousConversion - Returns whether this overload
/// candidate requires an ambiguous conversion or not.
bool hasAmbiguousConversion() const {
- for (SmallVectorImpl<ImplicitConversionSequence>::const_iterator
- I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
- if (!I->isInitialized()) return false;
- if (I->isAmbiguous()) return true;
+ for (unsigned i = 0, e = NumConversions; i != e; ++i) {
+ if (!Conversions[i].isInitialized()) return false;
+ if (Conversions[i].isAmbiguous()) return true;
}
return false;
}
@@ -663,17 +722,29 @@ namespace clang {
/// OverloadCandidateSet - A set of overload candidates, used in C++
/// overload resolution (C++ 13.3).
- class OverloadCandidateSet : public SmallVector<OverloadCandidate, 16> {
- typedef SmallVector<OverloadCandidate, 16> inherited;
+ class OverloadCandidateSet {
+ SmallVector<OverloadCandidate, 16> Candidates;
llvm::SmallPtrSet<Decl *, 16> Functions;
- SourceLocation Loc;
-
+ // Allocator for OverloadCandidate::Conversions. We store the first few
+ // elements inline to avoid allocation for small sets.
+ llvm::BumpPtrAllocator ConversionSequenceAllocator;
+
+ SourceLocation Loc;
+
+ unsigned NumInlineSequences;
+ char InlineSpace[16 * sizeof(ImplicitConversionSequence)];
+
OverloadCandidateSet(const OverloadCandidateSet &);
OverloadCandidateSet &operator=(const OverloadCandidateSet &);
public:
- OverloadCandidateSet(SourceLocation Loc) : Loc(Loc) {}
+ OverloadCandidateSet(SourceLocation Loc) : Loc(Loc), NumInlineSequences(0){}
+ ~OverloadCandidateSet() {
+ for (iterator i = begin(), e = end(); i != e; ++i)
+ for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
+ i->Conversions[ii].~ImplicitConversionSequence();
+ }
SourceLocation getLocation() const { return Loc; }
@@ -686,6 +757,40 @@ namespace clang {
/// \brief Clear out all of the candidates.
void clear();
+ typedef SmallVector<OverloadCandidate, 16>::iterator iterator;
+ iterator begin() { return Candidates.begin(); }
+ iterator end() { return Candidates.end(); }
+
+ size_t size() const { return Candidates.size(); }
+ bool empty() const { return Candidates.empty(); }
+
+ /// \brief Add a new candidate with NumConversions conversion sequence slots
+ /// to the overload set.
+ OverloadCandidate &addCandidate(unsigned NumConversions = 0) {
+ Candidates.push_back(OverloadCandidate());
+ OverloadCandidate &C = Candidates.back();
+
+ // Assign space from the inline array if there are enough free slots
+ // available.
+ if (NumConversions + NumInlineSequences <= 16) {
+ ImplicitConversionSequence *I =
+ (ImplicitConversionSequence*)InlineSpace;
+ C.Conversions = &I[NumInlineSequences];
+ NumInlineSequences += NumConversions;
+ } else {
+ // Otherwise get memory from the allocator.
+ C.Conversions = ConversionSequenceAllocator
+ .Allocate<ImplicitConversionSequence>(NumConversions);
+ }
+
+ // Construct the new objects.
+ for (unsigned i = 0; i != NumConversions; ++i)
+ new (&C.Conversions[i]) ImplicitConversionSequence();
+
+ C.NumConversions = NumConversions;
+ return C;
+ }
+
/// Find the best viable function on this overload set, if it exists.
OverloadingResult BestViableFunction(Sema &S, SourceLocation Loc,
OverloadCandidateSet::iterator& Best,
@@ -693,7 +798,7 @@ namespace clang {
void NoteCandidates(Sema &S,
OverloadCandidateDisplayKind OCD,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
const char *Opc = 0,
SourceLocation Loc = SourceLocation());
};
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
index 735a26b..3ff0459 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
@@ -141,7 +141,11 @@ namespace clang {
struct TemplateIdAnnotation {
/// \brief The nested-name-specifier that precedes the template name.
CXXScopeSpec SS;
-
+
+ /// TemplateKWLoc - The location of the template keyword within the
+ /// source.
+ SourceLocation TemplateKWLoc;
+
/// TemplateNameLoc - The location of the template name within the
/// source.
SourceLocation TemplateNameLoc;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h b/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h
index a31312c..aa55705 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h
@@ -33,9 +33,10 @@ class PrettyDeclStackTraceEntry : public llvm::PrettyStackTraceEntry {
Decl *TheDecl;
SourceLocation Loc;
const char *Message;
-
+
public:
- PrettyDeclStackTraceEntry(Sema &S, Decl *D, SourceLocation Loc, const char *Msg)
+ PrettyDeclStackTraceEntry(Sema &S, Decl *D, SourceLocation Loc,
+ const char *Msg)
: S(S), TheDecl(D), Loc(Loc), Message(Msg) {}
virtual void print(raw_ostream &OS) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
index cff8b33..e9aa173 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
@@ -57,7 +57,7 @@ public:
/// BlockScope - This is a scope that corresponds to a block/closure object.
/// Blocks serve as top-level scopes for some objects like labels, they
/// also prevent things like break and continue. BlockScopes always have
- /// the FnScope, BreakScope, ContinueScope, and DeclScope flags set as well.
+ /// the FnScope and DeclScope flags set as well.
BlockScope = 0x40,
/// TemplateParamScope - This is a scope that corresponds to the
@@ -114,16 +114,12 @@ private:
/// pointer is non-null and points to it. This is used for label processing.
Scope *FnParent;
- /// BreakParent/ContinueParent - This is a direct link to the immediately
- /// preceding BreakParent/ContinueParent if this scope is not one, or null if
- /// there is no containing break/continue scope.
+ /// BreakParent/ContinueParent - This is a direct link to the innermost
+ /// BreakScope/ContinueScope which contains the contents of this scope
+ /// for control flow purposes (and might be this scope itself), or null
+ /// if there is no such scope.
Scope *BreakParent, *ContinueParent;
- /// ControlParent - This is a direct link to the immediately
- /// preceding ControlParent if this scope is not one, or null if
- /// there is no containing control scope.
- Scope *ControlParent;
-
/// BlockParent - This is a direct link to the immediately containing
/// BlockScope if this scope is not one, or null if there is none.
Scope *BlockParent;
@@ -180,12 +176,9 @@ public:
Scope *getFnParent() { return FnParent; }
/// getContinueParent - Return the closest scope that a continue statement
- /// would be affected by. If the closest scope is a closure scope, we know
- /// that there is no loop *inside* the closure.
+ /// would be affected by.
Scope *getContinueParent() {
- if (ContinueParent && !ContinueParent->isBlockScope())
- return ContinueParent;
- return 0;
+ return ContinueParent;
}
const Scope *getContinueParent() const {
@@ -193,20 +186,14 @@ public:
}
/// getBreakParent - Return the closest scope that a break statement
- /// would be affected by. If the closest scope is a block scope, we know
- /// that there is no loop *inside* the block.
+ /// would be affected by.
Scope *getBreakParent() {
- if (BreakParent && !BreakParent->isBlockScope())
- return BreakParent;
- return 0;
+ return BreakParent;
}
const Scope *getBreakParent() const {
return const_cast<Scope*>(this)->getBreakParent();
}
- Scope *getControlParent() { return ControlParent; }
- const Scope *getControlParent() const { return ControlParent; }
-
Scope *getBlockParent() { return BlockParent; }
const Scope *getBlockParent() const { return BlockParent; }
@@ -310,6 +297,10 @@ public:
/// \brief Determine whether this scope is a C++ 'try' block.
bool isTryScope() const { return getFlags() & Scope::TryScope; }
+ /// containedInPrototypeScope - Return true if this or a parent scope
+ /// is a FunctionPrototypeScope.
+ bool containedInPrototypeScope() const;
+
typedef UsingDirectivesTy::iterator udir_iterator;
typedef UsingDirectivesTy::const_iterator const_udir_iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
index 9ef6d3c..ceaf586 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
@@ -18,19 +18,36 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/SetVector.h"
namespace clang {
class BlockDecl;
+class CXXMethodDecl;
class IdentifierInfo;
class LabelDecl;
class ReturnStmt;
class Scope;
class SwitchStmt;
+class VarDecl;
namespace sema {
+/// \brief Contains information about the compound statement currently being
+/// parsed.
+class CompoundScopeInfo {
+public:
+ CompoundScopeInfo()
+ : HasEmptyLoopBodies(false) { }
+
+ /// \brief Whether this compound stamement contains `for' or `while' loops
+ /// with empty bodies.
+ bool HasEmptyLoopBodies;
+
+ void setHasEmptyLoopBodies() {
+ HasEmptyLoopBodies = true;
+ }
+};
+
class PossiblyUnreachableDiag {
public:
PartialDiagnostic PD;
@@ -45,11 +62,17 @@ public:
/// \brief Retains information about a function, method, or block that is
/// currently being parsed.
class FunctionScopeInfo {
+protected:
+ enum ScopeKind {
+ SK_Function,
+ SK_Block,
+ SK_Lambda
+ };
+
public:
-
- /// \brief Whether this scope information structure defined information for
- /// a block.
- bool IsBlockInfo;
+ /// \brief What kind of scope we are describing.
+ ///
+ ScopeKind Kind;
/// \brief Whether this function contains a VLA, @try, try, C++
/// initializer, or anything else that can't be jumped past.
@@ -72,7 +95,11 @@ public:
/// block, if there is any chance of applying the named return value
/// optimization.
SmallVector<ReturnStmt*, 4> Returns;
-
+
+ /// \brief The stack of currently active compound stamement scopes in the
+ /// function.
+ SmallVector<CompoundScopeInfo, 4> CompoundScopes;
+
/// \brief A list of PartialDiagnostics created but delayed within the
/// current function scope. These diagnostics are vetted for reachability
/// prior to being emitted.
@@ -96,7 +123,7 @@ public:
}
FunctionScopeInfo(DiagnosticsEngine &Diag)
- : IsBlockInfo(false),
+ : Kind(SK_Function),
HasBranchProtectedScope(false),
HasBranchIntoScope(false),
HasIndirectGoto(false),
@@ -111,8 +138,162 @@ public:
static bool classof(const FunctionScopeInfo *FSI) { return true; }
};
+class CapturingScopeInfo : public FunctionScopeInfo {
+public:
+ enum ImplicitCaptureStyle {
+ ImpCap_None, ImpCap_LambdaByval, ImpCap_LambdaByref, ImpCap_Block
+ };
+
+ ImplicitCaptureStyle ImpCaptureStyle;
+
+ class Capture {
+ // There are two categories of capture: capturing 'this', and capturing
+ // local variables. There are three ways to capture a local variable:
+ // capture by copy in the C++11 sense, capture by reference
+ // in the C++11 sense, and __block capture. Lambdas explicitly specify
+ // capture by copy or capture by reference. For blocks, __block capture
+ // applies to variables with that annotation, variables of reference type
+ // are captured by reference, and other variables are captured by copy.
+ enum CaptureKind {
+ Cap_This, Cap_ByCopy, Cap_ByRef, Cap_Block
+ };
+
+ // The variable being captured (if we are not capturing 'this'),
+ // and misc bits descibing the capture.
+ llvm::PointerIntPair<VarDecl*, 2, CaptureKind> VarAndKind;
+
+ // Expression to initialize a field of the given type, and whether this
+ // is a nested capture; the expression is only required if we are
+ // capturing ByVal and the variable's type has a non-trivial
+ // copy constructor.
+ llvm::PointerIntPair<Expr*, 1, bool> CopyExprAndNested;
+
+ /// \brief The source location at which the first capture occurred..
+ SourceLocation Loc;
+
+ /// \brief The location of the ellipsis that expands a parameter pack.
+ SourceLocation EllipsisLoc;
+
+ /// \brief The type as it was captured, which is in effect the type of the
+ /// non-static data member that would hold the capture.
+ QualType CaptureType;
+
+ public:
+ Capture(VarDecl *Var, bool block, bool byRef, bool isNested,
+ SourceLocation Loc, SourceLocation EllipsisLoc,
+ QualType CaptureType, Expr *Cpy)
+ : VarAndKind(Var, block ? Cap_Block : byRef ? Cap_ByRef : Cap_ByCopy),
+ CopyExprAndNested(Cpy, isNested), Loc(Loc), EllipsisLoc(EllipsisLoc),
+ CaptureType(CaptureType){}
+
+ enum IsThisCapture { ThisCapture };
+ Capture(IsThisCapture, bool isNested, SourceLocation Loc,
+ QualType CaptureType, Expr *Cpy)
+ : VarAndKind(0, Cap_This), CopyExprAndNested(Cpy, isNested), Loc(Loc),
+ EllipsisLoc(), CaptureType(CaptureType) { }
+
+ bool isThisCapture() const { return VarAndKind.getInt() == Cap_This; }
+ bool isVariableCapture() const { return !isThisCapture(); }
+ bool isCopyCapture() const { return VarAndKind.getInt() == Cap_ByCopy; }
+ bool isReferenceCapture() const { return VarAndKind.getInt() == Cap_ByRef; }
+ bool isBlockCapture() const { return VarAndKind.getInt() == Cap_Block; }
+ bool isNested() { return CopyExprAndNested.getInt(); }
+
+ VarDecl *getVariable() const {
+ return VarAndKind.getPointer();
+ }
+
+ /// \brief Retrieve the location at which this variable was captured.
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Retrieve the source location of the ellipsis, whose presence
+ /// indicates that the capture is a pack expansion.
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+
+ /// \brief Retrieve the capture type for this capture, which is effectively
+ /// the type of the non-static data member in the lambda/block structure
+ /// that would store this capture.
+ QualType getCaptureType() const { return CaptureType; }
+
+ Expr *getCopyExpr() const {
+ return CopyExprAndNested.getPointer();
+ }
+ };
+
+ CapturingScopeInfo(DiagnosticsEngine &Diag, ImplicitCaptureStyle Style)
+ : FunctionScopeInfo(Diag), ImpCaptureStyle(Style), CXXThisCaptureIndex(0),
+ HasImplicitReturnType(false)
+ {}
+
+ /// CaptureMap - A map of captured variables to (index+1) into Captures.
+ llvm::DenseMap<VarDecl*, unsigned> CaptureMap;
+
+ /// CXXThisCaptureIndex - The (index+1) of the capture of 'this';
+ /// zero if 'this' is not captured.
+ unsigned CXXThisCaptureIndex;
+
+ /// Captures - The captures.
+ SmallVector<Capture, 4> Captures;
+
+ /// \brief - Whether the target type of return statements in this context
+ /// is deduced (e.g. a lambda or block with omitted return type).
+ bool HasImplicitReturnType;
+
+ /// ReturnType - The target type of return statements in this context,
+ /// or null if unknown.
+ QualType ReturnType;
+
+ void addCapture(VarDecl *Var, bool isBlock, bool isByref, bool isNested,
+ SourceLocation Loc, SourceLocation EllipsisLoc,
+ QualType CaptureType, Expr *Cpy) {
+ Captures.push_back(Capture(Var, isBlock, isByref, isNested, Loc,
+ EllipsisLoc, CaptureType, Cpy));
+ CaptureMap[Var] = Captures.size();
+ }
+
+ void addThisCapture(bool isNested, SourceLocation Loc, QualType CaptureType,
+ Expr *Cpy) {
+ Captures.push_back(Capture(Capture::ThisCapture, isNested, Loc, CaptureType,
+ Cpy));
+ CXXThisCaptureIndex = Captures.size();
+ }
+
+ /// \brief Determine whether the C++ 'this' is captured.
+ bool isCXXThisCaptured() const { return CXXThisCaptureIndex != 0; }
+
+ /// \brief Retrieve the capture of C++ 'this', if it has been captured.
+ Capture &getCXXThisCapture() {
+ assert(isCXXThisCaptured() && "this has not been captured");
+ return Captures[CXXThisCaptureIndex - 1];
+ }
+
+ /// \brief Determine whether the given variable has been captured.
+ bool isCaptured(VarDecl *Var) const {
+ return CaptureMap.count(Var);
+ }
+
+ /// \brief Retrieve the capture of the given variable, if it has been
+ /// captured already.
+ Capture &getCapture(VarDecl *Var) {
+ assert(isCaptured(Var) && "Variable has not been captured");
+ return Captures[CaptureMap[Var] - 1];
+ }
+
+ const Capture &getCapture(VarDecl *Var) const {
+ llvm::DenseMap<VarDecl*, unsigned>::const_iterator Known
+ = CaptureMap.find(Var);
+ assert(Known != CaptureMap.end() && "Variable has not been captured");
+ return Captures[Known->second - 1];
+ }
+
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Block || FSI->Kind == SK_Lambda;
+ }
+ static bool classof(const CapturingScopeInfo *BSI) { return true; }
+};
+
/// \brief Retains information about a block that is currently being parsed.
-class BlockScopeInfo : public FunctionScopeInfo {
+class BlockScopeInfo : public CapturingScopeInfo {
public:
BlockDecl *TheDecl;
@@ -120,36 +301,79 @@ public:
/// arguments etc.
Scope *TheScope;
- /// ReturnType - The return type of the block, or null if the block
- /// signature didn't provide an explicit return type.
- QualType ReturnType;
-
/// BlockType - The function type of the block, if one was given.
/// Its return type may be BuiltinType::Dependent.
QualType FunctionType;
- /// CaptureMap - A map of captured variables to (index+1) into Captures.
- llvm::DenseMap<VarDecl*, unsigned> CaptureMap;
-
- /// Captures - The captured variables.
- SmallVector<BlockDecl::Capture, 4> Captures;
-
- /// CapturesCXXThis - Whether this block captures 'this'.
- bool CapturesCXXThis;
-
BlockScopeInfo(DiagnosticsEngine &Diag, Scope *BlockScope, BlockDecl *Block)
- : FunctionScopeInfo(Diag), TheDecl(Block), TheScope(BlockScope),
- CapturesCXXThis(false)
+ : CapturingScopeInfo(Diag, ImpCap_Block), TheDecl(Block),
+ TheScope(BlockScope)
{
- IsBlockInfo = true;
+ Kind = SK_Block;
}
virtual ~BlockScopeInfo();
- static bool classof(const FunctionScopeInfo *FSI) { return FSI->IsBlockInfo; }
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Block;
+ }
static bool classof(const BlockScopeInfo *BSI) { return true; }
};
+class LambdaScopeInfo : public CapturingScopeInfo {
+public:
+ /// \brief The class that describes the lambda.
+ CXXRecordDecl *Lambda;
+
+ /// \brief The class that describes the lambda.
+ CXXMethodDecl *CallOperator;
+
+ /// \brief Source range covering the lambda introducer [...].
+ SourceRange IntroducerRange;
+
+ /// \brief The number of captures in the \c Captures list that are
+ /// explicit captures.
+ unsigned NumExplicitCaptures;
+
+ /// \brief Whether this is a mutable lambda.
+ bool Mutable;
+
+ /// \brief Whether the (empty) parameter list is explicit.
+ bool ExplicitParams;
+
+ /// \brief Whether any of the capture expressions requires cleanups.
+ bool ExprNeedsCleanups;
+
+ /// \brief Variables used to index into by-copy array captures.
+ llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
+
+ /// \brief Offsets into the ArrayIndexVars array at which each capture starts
+ /// its list of array index variables.
+ llvm::SmallVector<unsigned, 4> ArrayIndexStarts;
+
+ LambdaScopeInfo(DiagnosticsEngine &Diag, CXXRecordDecl *Lambda,
+ CXXMethodDecl *CallOperator)
+ : CapturingScopeInfo(Diag, ImpCap_None), Lambda(Lambda),
+ CallOperator(CallOperator), NumExplicitCaptures(0), Mutable(false),
+ ExprNeedsCleanups(false)
+ {
+ Kind = SK_Lambda;
+ }
+
+ virtual ~LambdaScopeInfo();
+
+ /// \brief Note when
+ void finishedExplicitCaptures() {
+ NumExplicitCaptures = Captures.size();
+ }
+
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Lambda;
+ }
+ static bool classof(const LambdaScopeInfo *BSI) { return true; }
+
+};
+
}
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
index 22d5db2..31c410a 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -22,13 +22,15 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/LocInfoType.h"
-#include "clang/Sema/MultiInitializer.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/Lex/ModuleLoader.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
@@ -44,6 +46,7 @@ namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
+ class SmallBitVector;
}
namespace clang {
@@ -58,6 +61,7 @@ namespace clang {
class BlockDecl;
class CXXBasePath;
class CXXBasePaths;
+ class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
@@ -74,6 +78,7 @@ namespace clang {
class ClassTemplateSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
+ class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
@@ -100,6 +105,7 @@ namespace clang {
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
+ class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
@@ -126,6 +132,7 @@ namespace clang {
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
+ class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
@@ -156,12 +163,15 @@ namespace clang {
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
-
+
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
+ class CompoundScopeInfo;
class DelayedDiagnostic;
class FunctionScopeInfo;
+ class LambdaScopeInfo;
+ class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
@@ -213,7 +223,7 @@ public:
/// PackContext - Manages the stack for #pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
-
+
bool MSStructPragmaOn; // True when #pragma ms_struct on
/// VisContext - Manages the stack for #pragma GCC visibility.
@@ -223,6 +233,13 @@ public:
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
+ /// ExprCleanupObjects - This is the stack of objects requiring
+ /// cleanup that are created by the current full expression. The
+ /// element type here is ExprWithCleanups::Object.
+ SmallVector<BlockDecl*, 8> ExprCleanupObjects;
+
+ llvm::SmallPtrSet<Expr*, 8> MaybeODRUseExprs;
+
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
@@ -231,11 +248,7 @@ public:
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
- /// ExprTemporaries - This is the stack of temporaries that are created by
- /// the current full expression.
- SmallVector<CXXTemporary*, 8> ExprTemporaries;
-
- typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
+ typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
@@ -244,15 +257,21 @@ public:
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
+ /// \brief The set of types for which we have already complained about the
+ /// definitions being hidden.
+ ///
+ /// This set is used to suppress redundant diagnostics.
+ llvm::SmallPtrSet<NamedDecl *, 4> HiddenDefinitions;
+
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
- llvm::OwningPtr<CXXFieldCollector> FieldCollector;
+ OwningPtr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
- llvm::OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
+ OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
@@ -284,27 +303,27 @@ public:
/// we find this declaration of "foo" and complain that it is
/// not visible.
llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternalDecls;
-
+
/// \brief Look for a locally scoped external declaration by the given name.
llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
findLocallyScopedExternalDecl(DeclarationName Name);
-
- typedef LazyVector<VarDecl *, ExternalSemaSource,
+
+ typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
- typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
+ typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
-
+
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
- typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
+ typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
@@ -322,11 +341,11 @@ public:
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD);
LateTemplateParserCB *LateTemplateParser;
- void *OpaqueParser;
+ void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) {
LateTemplateParser = LTP;
- OpaqueParser = P;
+ OpaqueParser = P;
}
class DelayedDiagnostics;
@@ -418,7 +437,7 @@ public:
assert(ParsingDepth == 0);
ActiveStackBase = state.SavedActiveStackBase;
ParsingDepth = state.SavedParsingDepth;
- }
+ }
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
@@ -427,11 +446,11 @@ public:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
-
+
public:
ContextRAII(Sema &S, DeclContext *ContextToPush)
- : S(S), SavedContext(S.CurContext),
- SavedContextState(S.DelayedDiagnostics.pushContext())
+ : S(S), SavedContext(S.CurContext),
+ SavedContextState(S.DelayedDiagnostics.pushContext())
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
@@ -454,9 +473,16 @@ public:
/// identifier, declared or undeclared
llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
+ /// ExtnameUndeclaredIdentifiers - Identifiers contained in
+ /// #pragma redefine_extname before declared. Used in Solaris system headers
+ /// to define functions that occur in multiple standards to call the version
+ /// in the currently selected standard.
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
+
+
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
-
+
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// #pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
@@ -478,17 +504,44 @@ public:
/// standard library.
LazyDeclPtr StdBadAlloc;
+ /// \brief The C++ "std::initializer_list" template, which is defined in
+ /// <initializer_list>.
+ ClassTemplateDecl *StdInitializerList;
+
/// \brief The C++ "type_info" declaration, which is defined in <typeinfo>.
RecordDecl *CXXTypeInfoDecl;
-
+
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
+ /// \brief Caches identifiers/selectors for NSFoundation APIs.
+ llvm::OwningPtr<NSAPI> NSAPIObj;
+
+ /// \brief The declaration of the Objective-C NSNumber class.
+ ObjCInterfaceDecl *NSNumberDecl;
+
+ /// \brief The Objective-C NSNumber methods used to create NSNumber literals.
+ ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
+
+ /// \brief The declaration of the Objective-C NSArray class.
+ ObjCInterfaceDecl *NSArrayDecl;
+
+ /// \brief The declaration of the arrayWithObjects:count: method.
+ ObjCMethodDecl *ArrayWithObjectsMethod;
+
+ /// \brief The declaration of the Objective-C NSDictionary class.
+ ObjCInterfaceDecl *NSDictionaryDecl;
+
+ /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
+ ObjCMethodDecl *DictionaryWithObjectsMethod;
+
+ /// \brief id<NSCopying> type.
+ QualType QIDNSCopying;
+
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
-
/// A flag that is set when parsing a -dealloc method and no [super dealloc]
/// call was found yet.
bool ObjCShouldCallSuperDealloc;
@@ -496,37 +549,26 @@ public:
/// call was found yet.
bool ObjCShouldCallSuperFinalize;
- /// \brief The set of declarations that have been referenced within
- /// a potentially evaluated expression.
- typedef SmallVector<std::pair<SourceLocation, Decl *>, 10>
- PotentiallyReferencedDecls;
-
- /// \brief A set of diagnostics that may be emitted.
- typedef SmallVector<std::pair<SourceLocation, PartialDiagnostic>, 10>
- PotentiallyEmittedDiagnostics;
-
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
- /// unevaluated operand (C++0x [expr]p8), such as a constant expression
- /// or the subexpression of \c sizeof, where the type or the value of the
- /// expression may be significant but no code will be generated to evaluate
- /// the value of the expression at run time.
+ /// unevaluated operand (C++11 [expr]p7), such as the subexpression of
+ /// \c sizeof, where the type of the expression may be significant but
+ /// no code will be generated to evaluate the value of the expression at
+ /// run time.
Unevaluated,
+ /// \brief The current context is "potentially evaluated" in C++11 terms,
+ /// but the expression is evaluated at compile-time (like the values of
+ /// cases in a switch statment).
+ ConstantEvaluated,
+
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
- /// \brief The current expression may be potentially evaluated or it may
- /// be unevaluated, but it is impossible to tell from the lexical context.
- /// This evaluation context is used primary for the operand of the C++
- /// \c typeid expression, whose argument is potentially evaluated only when
- /// it is an lvalue of polymorphic class type (C++ [basic.def.odr]p2).
- PotentiallyPotentiallyEvaluated,
-
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
@@ -547,47 +589,58 @@ public:
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
- /// \brief The number of temporaries that were active when we
- /// entered this expression evaluation context.
- unsigned NumTemporaries;
+ /// \brief Whether we are in a decltype expression.
+ bool IsDecltype;
+
+ /// \brief The number of active cleanup objects when we entered
+ /// this expression evaluation context.
+ unsigned NumCleanupObjects;
- /// \brief The set of declarations referenced within a
- /// potentially potentially-evaluated context.
+ llvm::SmallPtrSet<Expr*, 8> SavedMaybeODRUseExprs;
+
+ /// \brief The lambdas that are present within this context, if it
+ /// is indeed an unevaluated context.
+ llvm::SmallVector<LambdaExpr *, 2> Lambdas;
+
+ /// \brief The declaration that provides context for the lambda expression
+ /// if the normal declaration context does not suffice, e.g., in a
+ /// default function argument.
+ Decl *LambdaContextDecl;
+
+ /// \brief The context information used to mangle lambda expressions
+ /// within this context.
///
- /// When leaving a potentially potentially-evaluated context, each
- /// of these elements will be as referenced if the corresponding
- /// potentially potentially evaluated expression is potentially
- /// evaluated.
- PotentiallyReferencedDecls *PotentiallyReferenced;
+ /// This mangling information is allocated lazily, since most contexts
+ /// do not have lambda expressions.
+ LambdaMangleContext *LambdaMangle;
- /// \brief The set of diagnostics to emit should this potentially
- /// potentially-evaluated context become evaluated.
- PotentiallyEmittedDiagnostics *PotentiallyDiagnosed;
+ /// \brief If we are processing a decltype type, a set of call expressions
+ /// for which we have deferred checking the completeness of the return type.
+ llvm::SmallVector<CallExpr*, 8> DelayedDecltypeCalls;
+ /// \brief If we are processing a decltype type, a set of temporary binding
+ /// expressions for which we have deferred checking the destructor.
+ llvm::SmallVector<CXXBindTemporaryExpr*, 8> DelayedDecltypeBinds;
+
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
- unsigned NumTemporaries,
- bool ParentNeedsCleanups)
+ unsigned NumCleanupObjects,
+ bool ParentNeedsCleanups,
+ Decl *LambdaContextDecl,
+ bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
- NumTemporaries(NumTemporaries),
- PotentiallyReferenced(0), PotentiallyDiagnosed(0) { }
-
- void addReferencedDecl(SourceLocation Loc, Decl *Decl) {
- if (!PotentiallyReferenced)
- PotentiallyReferenced = new PotentiallyReferencedDecls;
- PotentiallyReferenced->push_back(std::make_pair(Loc, Decl));
- }
-
- void addDiagnostic(SourceLocation Loc, const PartialDiagnostic &PD) {
- if (!PotentiallyDiagnosed)
- PotentiallyDiagnosed = new PotentiallyEmittedDiagnostics;
- PotentiallyDiagnosed->push_back(std::make_pair(Loc, PD));
+ IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
+ LambdaContextDecl(LambdaContextDecl), LambdaMangle() { }
+
+ ~ExpressionEvaluationContextRecord() {
+ delete LambdaMangle;
}
-
- void Destroy() {
- delete PotentiallyReferenced;
- delete PotentiallyDiagnosed;
- PotentiallyReferenced = 0;
- PotentiallyDiagnosed = 0;
+
+ /// \brief Retrieve the mangling context for lambdas.
+ LambdaMangleContext &getLambdaMangleContext() {
+ assert(LambdaContextDecl && "Need to have a lambda context declaration");
+ if (!LambdaMangle)
+ LambdaMangle = new LambdaMangleContext;
+ return *LambdaMangle;
}
};
@@ -608,7 +661,17 @@ public:
/// This is used for determining parameter types of other objects and is
/// utterly meaningless on other types of special members.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
+ public:
+ enum Kind {
+ NoMemberOrDeleted,
+ Ambiguous,
+ SuccessNonConst,
+ SuccessConst
+ };
+
+ private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
+
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
@@ -617,15 +680,11 @@ public:
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
- bool hasSuccess() const { return Pair.getInt() & 0x1; }
- void setSuccess(bool B) {
- Pair.setInt(unsigned(B) | hasConstParamMatch() << 1);
- }
+ Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
+ void setKind(Kind K) { Pair.setInt(K); }
- bool hasConstParamMatch() const { return Pair.getInt() & 0x2; }
- void setConstParamMatch(bool B) {
- Pair.setInt(B << 1 | unsigned(hasSuccess()));
- }
+ bool hasSuccess() const { return getKind() >= SuccessNonConst; }
+ bool hasConstParamMatch() const { return getKind() == SuccessConst; }
};
/// \brief A cache of special member function overload resolution results
@@ -648,16 +707,16 @@ public:
typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> >
UnparsedDefaultArgInstantiationsMap;
-
+
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
- /// default arguments of its methods have been parsed.
+ /// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
-
+
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *,SourceLocation> UnparsedDefaultArgLocs;
@@ -675,25 +734,31 @@ public:
/// of selectors are "overloaded").
GlobalMethodPool MethodPool;
- /// Method selectors used in a @selector expression. Used for implementation
+ /// Method selectors used in a @selector expression. Used for implementation
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
- GlobalMethodPool::iterator ReadMethodPool(Selector Sel);
+ void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
+
+ /// \brief Cause the active diagnostic on the DiagosticsEngine to be
+ /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
+ /// should not be used elsewhere.
+ void EmitCurrentDiagnostic(unsigned DiagID);
+
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = 0);
~Sema();
-
+
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
-
- const LangOptions &getLangOptions() const { return LangOpts; }
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
@@ -724,14 +789,32 @@ public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
- explicit SemaDiagnosticBuilder(Sema &SemaRef)
- : DiagnosticBuilder(DiagnosticBuilder::Suppress), SemaRef(SemaRef) { }
-
- ~SemaDiagnosticBuilder();
+ ~SemaDiagnosticBuilder() {
+ // If we aren't active, there is nothing to do.
+ if (!isActive()) return;
+
+ // Otherwise, we need to emit the diagnostic. First flush the underlying
+ // DiagnosticBuilder data, and clear the diagnostic builder itself so it
+ // won't emit the diagnostic in its own destructor.
+ //
+ // This seems wasteful, in that as written the DiagnosticBuilder dtor will
+ // do its own needless checks to see if the diagnostic needs to be
+ // emitted. However, because we take care to ensure that the builder
+ // objects never escape, a sufficiently smart compiler will be able to
+ // eliminate that code.
+ FlushCounts();
+ Clear();
+
+ // Dispatch to Sema to emit the diagnostic.
+ SemaRef.EmitCurrentDiagnostic(DiagID);
+ }
};
/// \brief Emit a diagnostic.
- SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
+ DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
+ return SemaDiagnosticBuilder(DB, *this, DiagID);
+ }
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
@@ -741,6 +824,9 @@ public:
bool findMacroSpelling(SourceLocation &loc, StringRef name);
+ /// \brief Get a string to suggest for zero-initialization of a type.
+ const char *getFixItZeroInitializerForType(QualType T) const;
+
ExprResult Owned(Expr* E) { return E; }
ExprResult Owned(ExprResult R) { return R; }
StmtResult Owned(Stmt* S) { return S; }
@@ -753,18 +839,27 @@ public:
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
- void PopFunctionOrBlockScope(const sema::AnalysisBasedWarnings::Policy *WP =0,
- const Decl *D = 0, const BlockExpr *blkExpr = 0);
+ void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator);
+ void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0,
+ const Decl *D = 0, const BlockExpr *blkExpr = 0);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
+ void PushCompoundScope();
+ void PopCompoundScope();
+
+ sema::CompoundScopeInfo &getCurCompoundScope() const;
+
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
+ /// \brief Retrieve the current lambda expression, if any.
+ sema::LambdaScopeInfo *getCurLambda();
+
/// WeakTopLevelDeclDecls - access to #pragma weak-generated Decls
SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
@@ -787,8 +882,8 @@ public:
SourceLocation AttrLoc);
QualType BuildFunctionType(QualType T,
QualType *ParamTypes, unsigned NumParamTypes,
- bool Variadic, unsigned Quals,
- RefQualifierKind RefQualifier,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals, RefQualifierKind RefQualifier,
SourceLocation Loc, DeclarationName Entity,
FunctionType::ExtInfo Info);
QualType BuildMemberPointerType(QualType T, QualType Class,
@@ -831,11 +926,11 @@ public:
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
-
+
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
-
+
bool RequireCompleteType(SourceLocation Loc, QualType T,
const PartialDiagnostic &PD,
std::pair<SourceLocation, PartialDiagnostic> Note);
@@ -848,8 +943,7 @@ public:
PartialDiagnostic> Note);
bool RequireLiteralType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD,
- bool AllowIncompleteType = false);
+ const PartialDiagnostic &PD);
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
@@ -864,6 +958,17 @@ public:
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
+ /// List of decls defined in a function prototype. This contains EnumConstants
+ /// that incorrectly end up in translation unit scope because there is no
+ /// function to pin them on. ActOnFunctionDeclarator reads this list and patches
+ /// them into the FunctionDecl.
+ std::vector<NamedDecl*> DeclsInPrototypeScope;
+ /// Nonzero if we are currently parsing a function declarator. This is a counter
+ /// as opposed to a boolean so we can deal with nested function declarators
+ /// such as:
+ /// void f(void (*g)(), ...)
+ unsigned InFunctionDeclarator;
+
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0);
void DiagnoseUseOfUnimplementedSelectors();
@@ -873,6 +978,7 @@ public:
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
+ bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = 0);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
@@ -895,36 +1001,36 @@ public:
NC_TypeTemplate,
NC_FunctionTemplate
};
-
+
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
-
+
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
-
+
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
-
+
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
-
- NameClassification(const IdentifierInfo *Keyword)
+
+ NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
-
- static NameClassification Error() {
- return NameClassification(NC_Error);
+
+ static NameClassification Error() {
+ return NameClassification(NC_Error);
}
-
- static NameClassification Unknown() {
- return NameClassification(NC_Unknown);
+
+ static NameClassification Unknown() {
+ return NameClassification(NC_Unknown);
}
-
+
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
-
+
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
@@ -936,19 +1042,19 @@ public:
Result.Template = Name;
return Result;
}
-
+
NameClassificationKind getKind() const { return Kind; }
-
+
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
-
+
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
-
+
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Template;
@@ -959,7 +1065,7 @@ public:
return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template;
}
};
-
+
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
@@ -976,14 +1082,14 @@ public:
///
/// \param NameLoc The location of the identifier.
///
- /// \param NextToken The token following the identifier. Used to help
+ /// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
NameClassification ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
const Token &NextToken);
-
+
Decl *ActOnDeclarator(Scope *S, Declarator &D);
Decl *HandleDeclarator(Scope *S, Declarator &D,
@@ -992,6 +1098,9 @@ public:
const LookupResult &Previous,
Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
+ bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
+ DeclarationName Name,
+ SourceLocation Loc);
void DiagnoseFunctionSpecifiers(Declarator& D);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
@@ -1009,6 +1118,8 @@ public:
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckCompleteVariableDeclaration(VarDecl *var);
+ void ActOnStartFunctionDeclarator();
+ void ActOnEndFunctionDeclarator();
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
@@ -1016,20 +1127,7 @@ public:
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
- /// \brief The kind of constexpr declaration checking we are performing.
- ///
- /// The kind affects which diagnostics (if any) are emitted if the function
- /// does not satisfy the requirements of a constexpr function declaration.
- enum CheckConstexprKind {
- /// \brief Check a constexpr function declaration, and produce errors if it
- /// does not satisfy the requirements.
- CCK_Declaration,
- /// \brief Check a constexpr function template instantiation.
- CCK_Instantiation,
- /// \brief Produce notes explaining why an instantiation was not constexpr.
- CCK_NoteNonConstexprInstantiation
- };
- bool CheckConstexprFunctionDecl(const FunctionDecl *FD, CheckConstexprKind CCK);
+ bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
@@ -1105,23 +1203,22 @@ public:
/// \brief The parser has processed a module import declaration.
///
- /// \param ImportLoc The location of the '__import_module__' keyword.
+ /// \param AtLoc The location of the '@' symbol, if any.
///
- /// \param ModuleName The name of the module.
+ /// \param ImportLoc The location of the 'import' keyword.
///
- /// \param ModuleNameLoc The location of the module name.
- DeclResult ActOnModuleImport(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc);
-
- /// \brief Diagnose that \p New is a module-private redeclaration of
- /// \p Old.
- void diagnoseModulePrivateRedeclaration(NamedDecl *New, NamedDecl *Old,
- SourceLocation ModulePrivateKeyword
- = SourceLocation());
-
+ /// \param Path The module access path.
+ DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
+ ModuleIdPath Path);
+
/// \brief Retrieve a suitable printing policy.
- PrintingPolicy getPrintingPolicy() const;
+ PrintingPolicy getPrintingPolicy() const {
+ return getPrintingPolicy(Context, PP);
+ }
+
+ /// \brief Retrieve a suitable printing policy.
+ static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
+ const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
@@ -1132,14 +1229,12 @@ public:
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
-
- StmtResult ActOnVlaStmt(const DeclSpec &DS);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record);
- Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
@@ -1160,7 +1255,8 @@ public:
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
- bool &OwnedDecl, bool &IsDependent, bool ScopedEnum,
+ bool &OwnedDecl, bool &IsDependent,
+ SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
@@ -1208,7 +1304,7 @@ public:
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const RecordType* Record, CXXSpecialMember mem);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
- void ActOnLastBitfield(SourceLocation DeclStart,
+ void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
@@ -1245,8 +1341,8 @@ public:
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
- void ActOnObjCTemporaryExitContainerContext();
- void ActOnObjCReenterContainerContext();
+ void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
+ void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
@@ -1257,6 +1353,9 @@ public:
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
+ bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
+ bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
+ QualType EnumUnderlyingTy, const EnumDecl *Prev);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
@@ -1280,7 +1379,7 @@ public:
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
- void ActOnExitFunctionContext() { PopDeclContext(); }
+ void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
@@ -1302,6 +1401,14 @@ public:
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
+ /// \brief Make the given externally-produced declaration visible at the
+ /// top level scope.
+ ///
+ /// \param D The externally-produced declaration to push.
+ ///
+ /// \param Name The name of the externally-produced declaration.
+ void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
+
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
@@ -1319,14 +1426,17 @@ public:
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
+ bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
+ void mergeDeclAttributes(Decl *New, Decl *Old, bool MergeDeprecation = true);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
- bool MergeFunctionDecl(FunctionDecl *New, Decl *Old);
- bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
- void mergeObjCMethodDecls(ObjCMethodDecl *New, const ObjCMethodDecl *Old);
+ bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S);
+ bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S);
+ void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &OldDecls);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
- bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old);
+ bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
@@ -1355,7 +1465,7 @@ public:
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
- FunctionDecl *New,
+ FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
@@ -1388,9 +1498,12 @@ public:
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
- bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
- const FunctionProtoType *NewType);
-
+ bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos = 0);
+ void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
+ QualType FromType, QualType ToType);
+
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
@@ -1421,7 +1534,8 @@ public:
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
- bool TopLevelOfInitList = false);
+ bool TopLevelOfInitList = false,
+ bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
@@ -1430,7 +1544,16 @@ public:
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
- ExprResult
+ /// Contexts in which a converted constant expression is required.
+ enum CCEKind {
+ CCEK_CaseValue, ///< Expression in a case label.
+ CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
+ CCEK_TemplateArg ///< Value of a non-type template parameter.
+ };
+ ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value, CCEKind CCE);
+
+ ExprResult
ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE,
const PartialDiagnostic &NotIntDiag,
const PartialDiagnostic &IncompleteDiag,
@@ -1438,8 +1561,15 @@ public:
const PartialDiagnostic &ExplicitConvNote,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &AmbigNote,
- const PartialDiagnostic &ConvDiag);
-
+ const PartialDiagnostic &ConvDiag,
+ bool AllowScopedEnumerations);
+ enum ObjCSubscriptKind {
+ OS_Array,
+ OS_Dictionary,
+ OS_Error
+ };
+ ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
+
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
@@ -1450,21 +1580,18 @@ public:
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
- void AddOverloadCandidate(NamedDecl *Function,
- DeclAccessPair FoundDecl,
- Expr **Args, unsigned NumArgs,
- OverloadCandidateSet &CandidateSet);
-
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
- bool PartialOverloading = false);
+ bool PartialOverloading = false,
+ bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
- bool SuppressUserConversions = false);
+ bool SuppressUserConversions = false,
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
@@ -1475,7 +1602,7 @@ public:
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
@@ -1484,13 +1611,13 @@ public:
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
@@ -1507,7 +1634,7 @@ public:
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
- Expr *Object, Expr **Args, unsigned NumArgs,
+ Expr *Object, llvm::ArrayRef<Expr*> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
@@ -1524,20 +1651,20 @@ public:
Expr **Args, unsigned NumArgs,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
- bool Operator,
- Expr **Args, unsigned NumArgs,
+ bool Operator, SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false,
bool StdNamespaceIsAssociated = false);
// Emit as a 'note' the specific overload candidate
- void NoteOverloadCandidate(FunctionDecl *Fn);
-
+ void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
+
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
- void NoteAllOverloadCandidates(Expr* E);
-
+ void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
+
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
@@ -1546,9 +1673,12 @@ public:
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
- FunctionDecl *ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType,
- bool Complain,
- DeclAccessPair &Found);
+ FunctionDecl *
+ ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
+ QualType TargetType,
+ bool Complain,
+ DeclAccessPair &Found,
+ bool *pHadMultipleCandidates = 0);
FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
@@ -1557,9 +1687,9 @@ public:
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
- bool Complain = false,
- const SourceRange& OpRangeForComplaining = SourceRange(),
- QualType DestTypeForComplaining = QualType(),
+ bool Complain = false,
+ const SourceRange& OpRangeForComplaining = SourceRange(),
+ QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
@@ -1571,7 +1701,7 @@ public:
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
@@ -1580,7 +1710,8 @@ public:
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
- Expr *ExecConfig);
+ Expr *ExecConfig,
+ bool AllowTypoCorrection=true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
@@ -1702,8 +1833,21 @@ public:
ForRedeclaration
};
-private:
- bool CppLookupName(LookupResult &R, Scope *S);
+ /// \brief The possible outcomes of name lookup for a literal operator.
+ enum LiteralOperatorLookupResult {
+ /// \brief The lookup resulted in an error.
+ LOLR_Error,
+ /// \brief The lookup found a single 'cooked' literal operator, which
+ /// expects a normal literal to be built and passed to it.
+ LOLR_Cooked,
+ /// \brief The lookup found a single 'raw' literal operator, which expects
+ /// a string literal containing the spelling of the literal token.
+ LOLR_Raw,
+ /// \brief The lookup found an overload set of literal operator templates,
+ /// which expect the characters of the spelling of the literal token to be
+ /// passed as a non-type template argument pack.
+ LOLR_Template
+ };
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
@@ -1713,6 +1857,9 @@ private:
bool ConstThis,
bool VolatileThis);
+private:
+ bool CppLookupName(LookupResult &R, Scope *S);
+
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
@@ -1722,7 +1869,7 @@ private:
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
-
+
public:
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
@@ -1741,7 +1888,9 @@ public:
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
- ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc);
+ ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
+ RedeclarationKind Redecl
+ = NotForRedeclaration);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
@@ -1763,8 +1912,13 @@ public:
unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
+ LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
+ ArrayRef<QualType> ArgTys,
+ bool AllowRawAndTemplate);
+
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
- Expr **Args, unsigned NumArgs,
+ SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
ADLResult &Functions,
bool StdNamespaceIsAssociated = false);
@@ -1774,43 +1928,16 @@ public:
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
-
- /// \brief The context in which typo-correction occurs.
- ///
- /// The typo-correction context affects which keywords (if any) are
- /// considered when trying to correct for typos.
- enum CorrectTypoContext {
- /// \brief An unknown context, where any keyword might be valid.
- CTC_Unknown,
- /// \brief A context where no keywords are used (e.g. we expect an actual
- /// name).
- CTC_NoKeywords,
- /// \brief A context where we're correcting a type name.
- CTC_Type,
- /// \brief An expression context.
- CTC_Expression,
- /// \brief A type cast, or anything else that can be followed by a '<'.
- CTC_CXXCasts,
- /// \brief A member lookup context.
- CTC_MemberLookup,
- /// \brief An Objective-C ivar lookup context (e.g., self->ivar).
- CTC_ObjCIvarLookup,
- /// \brief An Objective-C property lookup context (e.g., self.prop).
- CTC_ObjCPropertyLookup,
- /// \brief The receiver of an Objective-C message send within an
- /// Objective-C method where 'super' is a valid keyword.
- CTC_ObjCMessageReceiver
- };
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
- DeclContext *MemberContext = NULL,
+ CorrectionCandidateCallback &CCC,
+ DeclContext *MemberContext = 0,
bool EnteringContext = false,
- CorrectTypoContext CTC = CTC_Unknown,
- const ObjCObjectPointerType *OPT = NULL);
+ const ObjCObjectPointerType *OPT = 0);
- void FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
+ void FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
@@ -1852,7 +1979,7 @@ public:
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
-
+
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
@@ -1868,7 +1995,7 @@ public:
typedef llvm::DenseSet<Selector, llvm::DenseMapInfo<Selector> > SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
-
+
/// CheckProtocolMethodDefs - This routine checks unimplemented
/// methods declared in protocol, and those referenced by it.
/// \param IDecl - Used for checking for methods which may have been
@@ -1898,18 +2025,18 @@ public:
ObjCContainerDecl *CDecl,
const SelectorSet &InsMap);
- /// DefaultSynthesizeProperties - This routine default synthesizes all
+ /// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in class's @implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
-
+
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void CollectImmediateProperties(ObjCContainerDecl *CDecl,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
-
+
/// LookupPropertyDecl - Looks up a property in the current class and all
/// its protocols.
@@ -1920,12 +2047,14 @@ public:
//// class extensions.
Decl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
+ SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
+ const unsigned AttributesAsWritten,
bool *isOverridingProperty,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind);
@@ -1935,12 +2064,14 @@ public:
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
+ SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
+ const unsigned AttributesAsWritten,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
@@ -1976,13 +2107,16 @@ public:
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
- bool WarnExactMatch=false);
+ bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
- /// warns each time an exact match is found.
+ /// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
+ /// \brief Add the given method to the list of globally-known methods.
+ void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
+
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
@@ -2017,7 +2151,7 @@ public:
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
- return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
+ return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/true);
}
@@ -2045,7 +2179,7 @@ public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(0) { }
-
+
// FIXME: The const_cast here is ugly. RValue references would make this
// much nicer (or we could duplicate a bunch of the move semantics
// emulation code from Ownership.h).
@@ -2079,9 +2213,28 @@ public:
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
+
+ void ActOnStartOfCompoundStmt();
+ void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg Elts,
bool isStmtExpr);
+
+ /// \brief A RAII object to enter scope of a compound statement.
+ class CompoundScopeRAII {
+ public:
+ CompoundScopeRAII(Sema &S): S(S) {
+ S.ActOnStartOfCompoundStmt();
+ }
+
+ ~CompoundScopeRAII() {
+ S.ActOnFinishOfCompoundStmt();
+ }
+
+ private:
+ Sema &S;
+ };
+
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
@@ -2097,7 +2250,7 @@ public:
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
-
+
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
@@ -2151,9 +2304,9 @@ public:
const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
-
+
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
- StmtResult ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+ StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnAsmStmt(SourceLocation AsmLoc,
bool IsSimple, bool IsVolatile,
@@ -2220,7 +2373,7 @@ public:
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
-
+
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
@@ -2229,7 +2382,22 @@ public:
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedDecl(const NamedDecl *ND);
-
+
+ /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
+ /// statement as a \p Body, and it is located on the same line.
+ ///
+ /// This helps prevent bugs due to typos, such as:
+ /// if (condition);
+ /// do_stuff();
+ void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
+ const Stmt *Body,
+ unsigned DiagID);
+
+ /// Warn if a for/while loop statement \p S, which is followed by
+ /// \p PossibleBody, has a suspicious null statement as a body.
+ void DiagnoseEmptyLoopBody(const Stmt *S,
+ const Stmt *PossibleBody);
+
ParsingDeclState PushParsingDeclaration() {
return DelayedDiagnostics.pushParsingDecl();
}
@@ -2246,7 +2414,7 @@ public:
}
void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
- SourceLocation Loc,
+ SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=0);
void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
@@ -2260,6 +2428,7 @@ public:
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=0);
+ void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
@@ -2267,15 +2436,82 @@ public:
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
Expr **Args, unsigned NumArgs);
- void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext);
+ void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = 0,
+ bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
- void MarkDeclarationReferenced(SourceLocation Loc, Decl *D);
+ ExprResult TranformToPotentiallyEvaluated(Expr *E);
+ ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
+
+ ExprResult ActOnConstantExpression(ExprResult Res);
+
+ // Functions for marking a declaration referenced. These functions also
+ // contain the relevant logic for marking if a reference to a function or
+ // variable is an odr-use (in the C++11 sense). There are separate variants
+ // for expressions referring to a decl; these exist because odr-use marking
+ // needs to be delayed for some constant variables when we build one of the
+ // named expressions.
+ void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D);
+ void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func);
+ void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
+ void MarkDeclRefReferenced(DeclRefExpr *E);
+ void MarkMemberReferenced(MemberExpr *E);
+
+ void UpdateMarkingForLValueToRValue(Expr *E);
+ void CleanupVarDeclMarking();
+
+ enum TryCaptureKind {
+ TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
+ };
+
+ /// \brief Try to capture the given variable.
+ ///
+ /// \param Var The variable to capture.
+ ///
+ /// \param Loc The location at which the capture occurs.
+ ///
+ /// \param Kind The kind of capture, which may be implicit (for either a
+ /// block or a lambda), or explicit by-value or by-reference (for a lambda).
+ ///
+ /// \param EllipsisLoc The location of the ellipsis, if one is provided in
+ /// an explicit lambda capture.
+ ///
+ /// \param BuildAndDiagnose Whether we are actually supposed to add the
+ /// captures or diagnose errors. If false, this routine merely check whether
+ /// the capture can occur without performing the capture itself or complaining
+ /// if the variable cannot be captured.
+ ///
+ /// \param CaptureType Will be set to the type of the field used to capture
+ /// this variable in the innermost block or lambda. Only valid when the
+ /// variable can be captured.
+ ///
+ /// \param DeclRefType Will be set to the type of a refernce to the capture
+ /// from within the current scope. Only valid when the variable can be
+ /// captured.
+ ///
+ /// \returns true if an error occurred (i.e., the variable cannot be
+ /// captured) and false if the capture succeeded.
+ bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
+ SourceLocation EllipsisLoc, bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType);
+
+ /// \brief Try to capture the given variable.
+ bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind = TryCapture_Implicit,
+ SourceLocation EllipsisLoc = SourceLocation());
+
+ /// \brief Given a variable, determine the type that a reference to that
+ /// variable will have in the given scope.
+ QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
+
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
- void MarkDeclarationsReferencedInExpr(Expr *E);
+ void MarkDeclarationsReferencedInExpr(Expr *E,
+ bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
@@ -2300,9 +2536,12 @@ public:
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
-
- ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS, UnqualifiedId &Id,
- bool HasTrailingLParen, bool IsAddressOfOperand);
+
+ ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ bool HasTrailingLParen, bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC = 0);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
@@ -2310,15 +2549,16 @@ public:
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
- CorrectTypoContext CTC = CTC_Unknown,
+ CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0,
- Expr **Args = 0, unsigned NumArgs = 0);
+ llvm::ArrayRef<Expr *> Args = llvm::ArrayRef<Expr *>());
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
@@ -2338,9 +2578,11 @@ public:
Expr *baseObjectExpr = 0,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
@@ -2351,6 +2593,7 @@ public:
ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
@@ -2361,18 +2604,25 @@ public:
const DeclarationNameInfo &NameInfo,
NamedDecl *D);
+ ExprResult BuildLiteralOperatorCall(LookupResult &R,
+ DeclarationNameInfo &SuffixInfo,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc,
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
+
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
- ExprResult ActOnNumericConstant(const Token &Tok);
- ExprResult ActOnCharacterConstant(const Token &Tok);
+ ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
+ ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0);
+ ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
- ExprResult ActOnParenOrParenListExpr(SourceLocation L,
- SourceLocation R,
- MultiExprArg Val);
+ ExprResult ActOnParenListExpr(SourceLocation L,
+ SourceLocation R,
+ MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
- ExprResult ActOnStringLiteral(const Token *StringToks,
- unsigned NumStringToks);
+ ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
+ Scope *UDLScope = 0);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
@@ -2431,6 +2681,7 @@ public:
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
@@ -2438,11 +2689,13 @@ public:
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false);
+ ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base,
bool &IsArrow, SourceLocation OpLoc,
CXXScopeSpec &SS,
@@ -2456,6 +2709,7 @@ public:
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
@@ -2464,6 +2718,7 @@ public:
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl,
bool HasTrailingLParen);
@@ -2475,6 +2730,9 @@ public:
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
bool ExecConfig = false);
+ void CheckStaticArrayArgument(SourceLocation CallLoc,
+ ParmVarDecl *Param,
+ const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
@@ -2544,9 +2802,11 @@ public:
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
-
+
+ void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
+ void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
@@ -2588,7 +2848,40 @@ public:
bool CheckCaseExpression(Expr *E);
- bool CheckMicrosoftIfExistsSymbol(CXXScopeSpec &SS, UnqualifiedId &Name);
+ /// \brief Describes the result of an "if-exists" condition check.
+ enum IfExistsResult {
+ /// \brief The symbol exists.
+ IER_Exists,
+
+ /// \brief The symbol does not exist.
+ IER_DoesNotExist,
+
+ /// \brief The name is a dependent name, so the results will differ
+ /// from one instantiation to the next.
+ IER_Dependent,
+
+ /// \brief An error occurred.
+ IER_Error
+ };
+
+ IfExistsResult
+ CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
+ const DeclarationNameInfo &TargetNameInfo);
+
+ IfExistsResult
+ CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
+ bool IsIfExists, CXXScopeSpec &SS,
+ UnqualifiedId &Name);
+
+ StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested);
+ StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ CXXScopeSpec &SS, UnqualifiedId &Name,
+ Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
@@ -2610,12 +2903,12 @@ public:
Scope *CurScope);
//===---------------------------- OpenCL Features -----------------------===//
-
+
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
- SourceLocation BuiltinLoc,
+ SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
-
+
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
@@ -2632,6 +2925,20 @@ public:
CXXRecordDecl *getStdBadAlloc() const;
+ /// \brief Tests whether Ty is an instance of std::initializer_list and, if
+ /// it is and Element is not NULL, assigns the element type to Element.
+ bool isStdInitializerList(QualType Ty, QualType *Element);
+
+ /// \brief Looks for the std::initializer_list template and instantiates it
+ /// with Element, or emits an error if it's not found.
+ ///
+ /// \returns The instantiated template, or null on error.
+ QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
+
+ /// \brief Determine whether Ctor is an initializer-list constructor, as
+ /// defined in [dcl.init.list]p2.
+ bool isInitListConstructor(const CXXConstructorDecl *Ctor);
+
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
@@ -2674,7 +2981,7 @@ public:
bool IsTypeName,
SourceLocation TypenameLoc);
- bool CheckInheritedConstructorUsingDecl(UsingDecl *UD);
+ bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
@@ -2692,15 +2999,6 @@ public:
UnqualifiedId &Name,
TypeResult Type);
- /// AddCXXDirectInitializerToDecl - This action is called immediately after
- /// ActOnDeclarator, when a C++ direct initializer is present.
- /// e.g: "int x(1);"
- void AddCXXDirectInitializerToDecl(Decl *Dcl,
- SourceLocation LParenLoc,
- MultiExprArg Exprs,
- SourceLocation RParenLoc,
- bool TypeMayContainAuto);
-
/// InitializeVarWithConstructor - Creates an CXXConstructExpr
/// and sets it as the initializer for the the passed in VarDecl.
bool InitializeVarWithConstructor(VarDecl *VD,
@@ -2737,7 +3035,7 @@ public:
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
- /// \brief Helper class that collects exception specifications for
+ /// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
@@ -2765,9 +3063,9 @@ public:
}
public:
- explicit ImplicitExceptionSpecification(ASTContext &Context)
+ explicit ImplicitExceptionSpecification(ASTContext &Context)
: Context(&Context), ComputedEST(EST_BasicNoexcept) {
- if (!Context.getLangOptions().CPlusPlus0x)
+ if (!Context.getLangOpts().CPlusPlus0x)
ComputedEST = EST_DynamicNone;
}
@@ -2839,28 +3137,18 @@ public:
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
- bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM);
-
- /// \brief Determine if a defaulted copy assignment operator ought to be
- /// deleted.
- bool ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD);
-
- /// \brief Determine if a defaulted move assignment operator ought to be
- /// deleted.
- bool ShouldDeleteMoveAssignmentOperator(CXXMethodDecl *MD);
-
- /// \brief Determine if a defaulted destructor ought to be deleted.
- bool ShouldDeleteDestructor(CXXDestructorDecl *DD);
+ bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
- /// \param ClassDecl The class declaration into which the implicit
+ /// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
-
+
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
@@ -2868,12 +3156,12 @@ public:
/// \brief Declare the implicit destructor for the given class.
///
- /// \param ClassDecl The class declaration into which the implicit
+ /// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
-
+
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
@@ -2894,12 +3182,12 @@ public:
/// \brief Declare the implicit copy constructor for the given class.
///
- /// \param ClassDecl The class declaration into which the implicit
+ /// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
-
+
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
@@ -2913,7 +3201,7 @@ public:
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
-
+
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
@@ -2921,12 +3209,12 @@ public:
/// \brief Declare the implicit copy assignment operator for the given class.
///
- /// \param ClassDecl The class declaration into which the implicit
+ /// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
-
+
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
@@ -2939,7 +3227,7 @@ public:
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
-
+
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
@@ -2947,6 +3235,10 @@ public:
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
+
+ /// \brief Determine whether the given function is an implicitly-deleted
+ /// special member function.
+ bool isImplicitlyDeleted(FunctionDecl *FD);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
@@ -2956,7 +3248,8 @@ public:
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
- ASTOwningVector<Expr*> &ConvertedArgs);
+ ASTOwningVector<Expr*> &ConvertedArgs,
+ bool AllowExplicit = false);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
@@ -2964,6 +3257,8 @@ public:
ParsedType ObjectType,
bool EnteringContext);
+ ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
+
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
@@ -3019,21 +3314,37 @@ public:
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
- /// getAndCaptureCurrentThisType - Try to capture a 'this' pointer. Returns
- /// the type of the 'this' pointer, or a null type if this is not possible.
- QualType getAndCaptureCurrentThisType();
+ /// \brief Try to retrieve the type of the 'this' pointer.
+ ///
+ /// \param Capture If true, capture 'this' in this context.
+ ///
+ /// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
+ QualType getCurrentThisType();
+
+ /// \brief Make sure the value of 'this' is actually available in the current
+ /// context, if it is a potentially evaluated context.
+ ///
+ /// \param Loc The location at which the capture of 'this' occurs.
+ ///
+ /// \param Explicit Whether 'this' is explicitly captured in a lambda
+ /// capture list.
+ void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
+
+
+ /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
- ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
- ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
+ ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
bool IsThrownVarInScope);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
@@ -3056,9 +3367,7 @@ public:
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
- SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen);
+ Expr *Initializer);
ExprResult BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
@@ -3067,9 +3376,8 @@ public:
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
- SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen,
+ SourceRange DirectInitRange,
+ Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
@@ -3134,6 +3442,14 @@ public:
TypeSourceInfo *RhsT,
SourceLocation RParen);
+ /// \brief Parsed one of the type trait support pseudo-functions.
+ ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<ParsedType> Args,
+ SourceLocation RParenLoc);
+ ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc);
+
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
@@ -3189,6 +3505,13 @@ public:
UnqualifiedId &SecondTypeName,
bool HasTrailingLParen);
+ ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation TildeLoc,
+ const DeclSpec& DS,
+ bool HasTrailingLParen);
+
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
@@ -3221,7 +3544,7 @@ public:
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
CXXScopeSpec &SS);
-
+
bool isAcceptableNestedNameSpecifier(NamedDecl *SD);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
@@ -3250,7 +3573,7 @@ public:
///
/// \param CCLoc The location of the '::'.
///
- /// \param ObjectType The type of the object, if we're parsing
+ /// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
@@ -3270,6 +3593,12 @@ public:
bool EnteringContext,
CXXScopeSpec &SS);
+ ExprResult ActOnDecltypeExpression(Expr *E);
+
+ bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc);
+
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
@@ -3277,34 +3606,32 @@ public:
ParsedType ObjectType,
bool EnteringContext);
- /// \brief The parser has parsed a nested-name-specifier
+ /// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
- /// \param TemplateLoc The location of the 'template' keyword, if any.
- ///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
- ///
- /// \param TemplateLoc the location of the 'template' keyword, if any.
+ ///
+ /// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
-
- /// \param EnteringContext Whether we're entering the context of the
+ ///
+ /// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
- SourceLocation TemplateLoc,
- CXXScopeSpec &SS,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
@@ -3314,29 +3641,29 @@ public:
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
- /// that the parser can use later to reconstruct the given
+ /// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
- /// \returns A pointer containing all of the information in the
+ /// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
-
- /// \brief Given an annotation pointer for a nested-name-specifier, restore
+
+ /// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
- /// \param Annotation The annotation pointer, produced by
+ /// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
- void RestoreNestedNameSpecifierAnnotation(void *Annotation,
+ void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
-
+
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
@@ -3365,16 +3692,107 @@ public:
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
+ /// \brief Create a new lambda closure type.
+ CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
+ bool KnownDependent = false);
+
+ /// \brief Start the definition of a lambda expression.
+ CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ TypeSourceInfo *MethodType,
+ SourceLocation EndLoc,
+ llvm::ArrayRef<ParmVarDecl *> Params,
+ llvm::Optional<unsigned> ManglingNumber
+ = llvm::Optional<unsigned>(),
+ Decl *ContextDecl = 0);
+
+ /// \brief Introduce the scope for a lambda expression.
+ sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ bool Mutable);
+
+ /// \brief Note that we have finished the explicit captures for the
+ /// given lambda.
+ void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
+
+ /// \brief Introduce the lambda parameters into scope.
+ void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
+
+ /// ActOnStartOfLambdaDefinition - This is called just before we start
+ /// parsing the body of a lambda; it analyzes the explicit captures and
+ /// arguments, and sets up various data-structures for the body of the
+ /// lambda.
+ void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo, Scope *CurScope);
+
+ /// ActOnLambdaError - If there is an error parsing a lambda, this callback
+ /// is invoked to pop the information about the lambda.
+ void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
+ bool IsInstantiation = false);
+
+ /// ActOnLambdaExpr - This is called when the body of a lambda expression
+ /// was successfully completed.
+ ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
+ Scope *CurScope,
+ bool IsInstantiation = false);
+
+ /// \brief Define the "body" of the conversion from a lambda object to a
+ /// function pointer.
+ ///
+ /// This routine doesn't actually define a sensible body; rather, it fills
+ /// in the initialization expression needed to copy the lambda object into
+ /// the block, and IR generation actually generates the real body of the
+ /// block pointer conversion.
+ void DefineImplicitLambdaToFunctionPointerConversion(
+ SourceLocation CurrentLoc, CXXConversionDecl *Conv);
+
+ /// \brief Define the "body" of the conversion from a lambda object to a
+ /// block pointer.
+ ///
+ /// This routine doesn't actually define a sensible body; rather, it fills
+ /// in the initialization expression needed to copy the lambda object into
+ /// the block, and IR generation actually generates the real body of the
+ /// block pointer conversion.
+ void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
+ CXXConversionDecl *Conv);
+
+ ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
+ SourceLocation ConvLocation,
+ CXXConversionDecl *Conv,
+ Expr *Src);
+
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
-
+
+ ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
+
+ /// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
+ /// numeric literal expression. Type of the expression will be "NSNumber *"
+ /// or "id" if NSNumber is unavailable.
+ ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
+ ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
+ bool Value);
+ ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
+
+ ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
+ Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod);
+
+ ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements);
+
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
- CXXMethodDecl *Method,
+ CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
@@ -3434,6 +3852,7 @@ public:
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
@@ -3445,6 +3864,7 @@ public:
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
@@ -3454,23 +3874,23 @@ public:
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
- const MultiInitializer &Init,
+ Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
- const MultiInitializer &Args,
+ Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
- const MultiInitializer &Args,
+ Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
- const MultiInitializer &Args,
- SourceLocation BaseLoc,
+ Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
@@ -3479,9 +3899,9 @@ public:
bool SetCtorInitializers(CXXConstructorDecl *Constructor,
CXXCtorInitializer **Initializers,
unsigned NumInitializers, bool AnyErrors);
-
+
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
-
+
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
@@ -3506,8 +3926,8 @@ public:
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
-
- typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
+
+ typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDynamicClasses, 2, 2>
DynamicClassesType;
@@ -3563,7 +3983,8 @@ public:
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
- FriendDecl *CheckFriendTypeDecl(SourceLocation FriendLoc,
+ FriendDecl *CheckFriendTypeDecl(SourceLocation Loc,
+ SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
@@ -3602,7 +4023,7 @@ public:
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
- ParsedType basetype,
+ ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
@@ -3652,7 +4073,7 @@ public:
/// C++0x [class.virtual]p3.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
-
+
//===--------------------------------------------------------------------===//
// C++ Access Control
@@ -3685,11 +4106,13 @@ public:
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
+ const InitializedEntity &Entity,
AccessSpecifier Access,
- PartialDiagnostic PD);
+ const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
- const PartialDiagnostic &PDiag);
+ const PartialDiagnostic &PDiag,
+ QualType objectType = QualType());
AccessResult CheckDirectMemberAccess(SourceLocation Loc,
NamedDecl *D,
const PartialDiagnostic &PDiag);
@@ -3706,7 +4129,10 @@ public:
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
- bool IsSimplyAccessible(NamedDecl *decl, CXXRecordDecl *Class);
+ bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
+ bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
+ AccessSpecifier access,
+ QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
@@ -3721,7 +4147,27 @@ public:
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
-
+
+ /// \brief RAII object used to temporarily suppress access checking.
+ class SuppressAccessChecksRAII {
+ Sema &S;
+ bool SuppressingAccess;
+
+ public:
+ SuppressAccessChecksRAII(Sema &S, bool Suppress)
+ : S(S), SuppressingAccess(Suppress) {
+ if (Suppress) S.ActOnStartSuppressingAccessChecks();
+ }
+ ~SuppressAccessChecksRAII() {
+ done();
+ }
+ void done() {
+ if (!SuppressingAccess) return;
+ S.ActOnStopSuppressingAccessChecks();
+ SuppressingAccess = false;
+ }
+ };
+
void ActOnStartSuppressingAccessChecks();
void ActOnStopSuppressingAccessChecks();
@@ -3752,9 +4198,11 @@ public:
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
- void FilterAcceptableTemplateNames(LookupResult &R);
- bool hasAnyAcceptableTemplateNames(LookupResult &R);
-
+ void FilterAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates = true);
+ bool hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates = true);
+
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
@@ -3775,7 +4223,7 @@ public:
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
- bool DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
+ void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
@@ -3841,7 +4289,7 @@ public:
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
- AccessSpecifier AS,
+ AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists);
@@ -3850,44 +4298,49 @@ public:
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
-
+
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
- ActOnTemplateIdType(CXXScopeSpec &SS,
+ ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
- SourceLocation RAngleLoc);
+ SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
///
- /// \param TUK
+ /// \param TUK
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
- TemplateTy TemplateD,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
-
+
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
- const TemplateArgumentListInfo &TemplateArgs);
+ const TemplateArgumentListInfo *TemplateArgs);
+
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo &TemplateArgs);
+ const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
- SourceLocation TemplateKWLoc,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
@@ -4013,12 +4466,18 @@ public:
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
+ ///
+ /// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate
+ /// when the template arguments contain a pack expansion that is being
+ /// expanded into a fixed parameter list.
+ ///
/// \returns True if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
- SmallVectorImpl<TemplateArgument> &Converted);
+ SmallVectorImpl<TemplateArgument> &Converted,
+ bool *ExpansionIntoFixedList = 0);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
const TemplateArgumentLoc &Arg,
@@ -4026,12 +4485,10 @@ public:
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
- bool CheckTemplateArgumentPointerToMember(Expr *Arg,
- TemplateArgument &Converted);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
- CheckTemplateArgumentKind CTAK = CTAK_Specified);
+ CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
const TemplateArgumentLoc &Arg);
@@ -4095,8 +4552,8 @@ public:
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
- ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS, const IdentifierInfo &II,
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
@@ -4113,9 +4570,9 @@ public:
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
- ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS,
- SourceLocation TemplateLoc,
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateLoc,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
@@ -4136,7 +4593,7 @@ public:
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
-
+
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
@@ -4191,19 +4648,37 @@ public:
/// \brief An initializer.
UPPC_Initializer,
-
+
/// \brief A default argument.
UPPC_DefaultArgument,
-
+
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
-
+
/// \brief Partial specialization.
- UPPC_PartialSpecialization
- };
+ UPPC_PartialSpecialization,
+
+ /// \brief Microsoft __if_exists.
+ UPPC_IfExists,
+
+ /// \brief Microsoft __if_not_exists.
+ UPPC_IfNotExists
+};
+
+ /// \brief Diagnose unexpanded parameter packs.
+ ///
+ /// \param Loc The location at which we should emit the diagnostic.
+ ///
+ /// \param UPPC The context in which we are diagnosing unexpanded
+ /// parameter packs.
+ ///
+ /// \param Unexpanded the set of unexpanded parameter packs.
+ void DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ UnexpandedParameterPackContext UPPC,
+ ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
@@ -4252,7 +4727,7 @@ public:
///
/// \param Loc The location of the template name.
///
- /// \param Template The template name that is being checked for unexpanded
+ /// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
@@ -4260,18 +4735,18 @@ public:
TemplateName Template,
UnexpandedParameterPackContext UPPC);
- /// \brief If the given template argument contains an unexpanded parameter
+ /// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
- /// \param Arg The template argument that is being checked for unexpanded
+ /// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
-
+
/// \brief Collect the set of unexpanded parameter packs within the given
- /// template argument.
+ /// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
@@ -4279,7 +4754,7 @@ public:
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
- /// template argument.
+ /// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
@@ -4287,7 +4762,7 @@ public:
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
- /// type.
+ /// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
@@ -4295,13 +4770,29 @@ public:
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
- /// type.
+ /// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// nested-name-specifier.
+ ///
+ /// \param SS The nested-name-specifier that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// name.
+ ///
+ /// \param NameInfo The name that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
@@ -4363,7 +4854,7 @@ public:
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
- /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param NumUnexpanded The number of unexpanded parameter packs in
@@ -4386,9 +4877,9 @@ public:
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
- /// \returns true if an error occurred (e.g., because the parameter packs
- /// are to be instantiated with arguments of different lengths), false
- /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
@@ -4404,9 +4895,9 @@ public:
/// This routine already assumes that the pack expansion type can be
/// expanded and that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
- unsigned getNumArgumentsInExpansion(QualType T,
+ unsigned getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
-
+
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
@@ -4423,11 +4914,11 @@ public:
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
-
+
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
-
+
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
@@ -4489,20 +4980,20 @@ public:
QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
- /// brief A function argument from which we performed template argument
+ /// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
- : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
+ : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
-
+
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
-
+
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
@@ -4514,7 +5005,7 @@ public:
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
@@ -4537,8 +5028,16 @@ public:
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
- bool DeduceAutoType(TypeSourceInfo *AutoType, Expr *Initializer,
- TypeSourceInfo *&Result);
+ /// \brief Result type of DeduceAutoType.
+ enum DeduceAutoResult {
+ DAR_Succeeded,
+ DAR_Failed,
+ DAR_FailedAlreadyDiagnosed
+ };
+
+ DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
+ TypeSourceInfo *&Result);
+ void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
@@ -4553,7 +5052,8 @@ public:
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
- bool Complain = true);
+ bool Complain = true,
+ QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
@@ -4564,9 +5064,14 @@ public:
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used);
+ llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
- SmallVectorImpl<bool> &Deduced);
+ llvm::SmallBitVector &Deduced) {
+ return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
+ }
+ static void MarkDeducedTemplateParameters(ASTContext &Ctx,
+ FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
@@ -4621,8 +5126,8 @@ public:
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
- /// \brief The template (or partial specialization) in which we are
- /// performing the instantiation, for substitutions of prior template
+ /// \brief The template (or partial specialization) in which we are
+ /// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
@@ -4636,7 +5141,7 @@ public:
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
- /// \brief The template deduction info object associated with the
+ /// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
@@ -4680,7 +5185,7 @@ public:
}
- return true;
+ llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
@@ -4704,7 +5209,7 @@ public:
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
-
+
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
@@ -4722,7 +5227,7 @@ public:
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
- /// The pack expansion index will be -1 to indicate that parameter packs
+ /// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
@@ -4734,26 +5239,26 @@ public:
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
-
+
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
-
+
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
-
+
friend class ArgumentPackSubstitutionRAII;
-
+
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
-
+
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
@@ -4761,7 +5266,7 @@ public:
/// FIXME: Serialize this structure to the AST file.
llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnostics;
-
+
/// \brief A stack object to be created when performing template
/// instantiation.
///
@@ -4863,15 +5368,15 @@ public:
};
void PrintInstantiationStack();
-
+
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c llvm::Optional if we're not in a SFINAE context.
- /// Otherwise, contains a pointer that, if non-NULL, contains the nearest
- /// template-deduction context object, which can be used to capture
- /// diagnostics that will be suppressed.
+ /// Otherwise, contains a pointer that, if non-NULL, contains the nearest
+ /// template-deduction context object, which can be used to capture
+ /// diagnostics that will be suppressed.
llvm::Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief RAII class used to determine whether SFINAE has
@@ -4882,22 +5387,22 @@ public:
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
-
+
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
- {
+ {
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
- ~SFINAETrap() {
- SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
- SemaRef.InNonInstantiationSFINAEContext
+ ~SFINAETrap() {
+ SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
+ SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
@@ -4917,7 +5422,7 @@ public:
typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection>
UnqualifiedTyposCorrectedMap;
-
+
/// \brief A cache containing the results of typo correction for unqualified
/// name lookup.
///
@@ -4925,7 +5430,7 @@ public:
/// there was no correction), while the boolean will be true when the
/// string represents a keyword.
UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
-
+
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
@@ -4973,15 +5478,16 @@ public:
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions);
- bool SubstParmTypes(SourceLocation Loc,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+ bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = 0);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
-
+
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
@@ -5007,6 +5513,10 @@ public:
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
+ ExprResult SubstInitializer(Expr *E,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool CXXDirectInit);
+
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
@@ -5019,8 +5529,27 @@ public:
TemplateSpecializationKind TSK,
bool Complain = true);
+ bool InstantiateEnum(SourceLocation PointOfInstantiation,
+ EnumDecl *Instantiation, EnumDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK);
+
+ struct LateInstantiatedAttribute {
+ const Attr *TmplAttr;
+ LocalInstantiationScope *Scope;
+ Decl *NewDecl;
+
+ LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
+ Decl *D)
+ : TmplAttr(A), Scope(S), NewDecl(D)
+ { }
+ };
+ typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
+
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
- const Decl *Pattern, Decl *Inst);
+ const Decl *Pattern, Decl *Inst,
+ LateInstantiatedAttrVec *LateAttrs = 0,
+ LocalInstantiationScope *OuterMostScope = 0);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
@@ -5046,7 +5575,7 @@ public:
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
- SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
+ SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
@@ -5066,11 +5595,6 @@ public:
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
- bool InstantiateInitializer(Expr *Init,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- SourceLocation &LParenLoc,
- ASTOwningVector<Expr*> &NewArgs,
- SourceLocation &RParenLoc);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
@@ -5078,6 +5602,17 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
+ enum ObjCContainerKind {
+ OCK_None = -1,
+ OCK_Interface = 0,
+ OCK_Protocol,
+ OCK_Category,
+ OCK_ClassExtension,
+ OCK_Implementation,
+ OCK_CategoryImplementation
+ };
+ ObjCContainerKind getObjCContainerKind() const;
+
Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
@@ -5129,12 +5664,15 @@ public:
IdentifierInfo *CatName,
SourceLocation CatLoc);
+ DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
+ ArrayRef<Decl *> Decls);
+
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
unsigned NumElts);
- Decl *ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
+ DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
@@ -5155,7 +5693,7 @@ public:
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param DC The semantic container for the property
- /// \param redeclaredProperty Declaration for property if redeclared
+ /// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
@@ -5180,12 +5718,13 @@ public:
void MatchOneProtocolPropertiesInClass(Decl *CDecl,
ObjCProtocolDecl *PDecl);
- void ActOnAtEnd(Scope *S, SourceRange AtEnd,
- Decl **allMethods = 0, unsigned allNum = 0,
- Decl **allProperties = 0, unsigned pNum = 0,
- DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
+ Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
+ Decl **allMethods = 0, unsigned allNum = 0,
+ Decl **allProperties = 0, unsigned pNum = 0,
+ DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
@@ -5246,9 +5785,11 @@ public:
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
+ ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
+ bool IsInstance);
bool inferObjCARCLifetime(ValueDecl *decl);
-
+
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
@@ -5264,7 +5805,7 @@ public:
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
- ObjCMethodDecl *tryCaptureObjCSelf();
+ ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
@@ -5277,7 +5818,7 @@ public:
/// name.
ObjCClassMessage
};
-
+
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
@@ -5300,7 +5841,15 @@ public:
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
- MultiExprArg Args);
+ MultiExprArg Args,
+ bool isImplicit = false);
+
+ ExprResult BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
@@ -5318,7 +5867,15 @@ public:
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
- MultiExprArg Args);
+ MultiExprArg Args,
+ bool isImplicit = false);
+
+ ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
@@ -5333,7 +5890,7 @@ public:
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
-
+
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
@@ -5341,19 +5898,19 @@ public:
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
-
+
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
-
+
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
- void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
+ void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden,
bool IsImplementation);
/// \brief Check whether the given method overrides any methods in its class,
/// calling \c CheckObjCMethodOverride for each overridden method.
bool CheckObjCMethodOverrides(ObjCMethodDecl *NewMethod, DeclContext *DC);
-
+
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
@@ -5387,7 +5944,7 @@ public:
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
-
+
/// ActOnPragmaMSStruct - Called on well formed #pragms ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
@@ -5397,7 +5954,7 @@ public:
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed #pragma GCC visibility... .
- void ActOnPragmaVisibility(bool IsPush, const IdentifierInfo* VisType,
+ void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
@@ -5409,6 +5966,14 @@ public:
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
+ /// ActOnPragmaRedefineExtname - Called on well formed
+ /// #pragma redefine_extname oldname newname.
+ void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc,
+ SourceLocation AliasNameLoc);
+
/// ActOnPragmaWeakAlias - Called on well formed #pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
@@ -5432,7 +5997,8 @@ public:
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
- void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr);
+ void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
+ SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
@@ -5440,7 +6006,7 @@ public:
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '#pragma GCC visibility' and visibility attributes on namespaces.
- void PopPragmaVisibility();
+ void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
@@ -5472,7 +6038,7 @@ public:
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = 0,
- CheckedConversionKind CCK
+ CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
@@ -5525,7 +6091,8 @@ public:
unsigned FirstProtoArg,
Expr **Args, unsigned NumArgs,
SmallVector<Expr *, 8> &AllArgs,
- VariadicCallType CallType = VariadicDoesNotApply);
+ VariadicCallType CallType = VariadicDoesNotApply,
+ bool AllowExplicit = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will warn if the resulting type is not a POD type.
@@ -5566,9 +6133,9 @@ public:
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
- /// point to integers which have a different sign, but are otherwise identical.
- /// This is a subset of the above, but broken out because it's by far the most
- /// common case of incompatible pointers.
+ /// point to integers which have a different sign, but are otherwise
+ /// identical. This is a subset of the above, but broken out because it's by
+ /// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
@@ -5652,13 +6219,11 @@ public:
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
- bool AllowExplicit = false,
- bool Diagnose = true);
+ bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
- ImplicitConversionSequence& ICS,
- bool Diagnose = true);
+ ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
@@ -5685,7 +6250,7 @@ public:
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = 0);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
@@ -5706,11 +6271,15 @@ public:
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
-
- void ConvertPropertyForLValue(ExprResult &LHS, ExprResult &RHS,
- QualType& LHSTy);
- ExprResult ConvertPropertyForRValue(Expr *E);
-
+
+ ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opcode, Expr *Op);
+ ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult checkPseudoObjectRValue(Expr *E);
+ Expr *recreateSyntacticForm(PseudoObjectExpr *E);
+
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
@@ -5719,10 +6288,12 @@ public:
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = 0);
- QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2,
+ QualType FindCompositePointerType(SourceLocation Loc,
+ ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = 0) {
Expr *E1Tmp = E1.take(), *E2Tmp = E2.take();
- QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType);
+ QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
+ NonStandardCompositeType);
E1 = Owned(E1Tmp);
E2 = Owned(E2Tmp);
return Composite;
@@ -5737,8 +6308,11 @@ public:
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
+ QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
+ QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
@@ -5776,6 +6350,10 @@ public:
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
+ /// \brief Force an expression with unknown-type to an expression of the
+ /// given type.
+ ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
+
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
@@ -5796,11 +6374,17 @@ public:
Expr *CastExpr,
SourceLocation RParenLoc);
+ enum ARCConversionResult { ACR_okay, ACR_unbridged };
+
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
- void CheckObjCARCConversion(SourceRange castRange, QualType castType,
- Expr *&op, CheckedConversionKind CCK);
-
+ ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
+ QualType castType, Expr *&op,
+ CheckedConversionKind CCK);
+
+ Expr *stripARCUnbridgedCast(Expr *e);
+ void diagnoseARCUnbridgedCast(Expr *e);
+
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
@@ -5838,7 +6422,7 @@ public:
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
-
+
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
@@ -5851,7 +6435,7 @@ public:
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
-
+
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
@@ -5875,18 +6459,28 @@ public:
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
- /// VerifyIntegerConstantExpression - verifies that an expression is an ICE,
+ /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
- bool VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result = 0);
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic Diag,
+ bool AllowFold,
+ PartialDiagnostic FoldDiag);
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic Diag,
+ bool AllowFold = true) {
+ return VerifyIntegerConstantExpression(E, Result, Diag, AllowFold,
+ PDiag(0));
+ }
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = 0);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
- bool VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
- QualType FieldTy, const Expr *BitWidth,
- bool *ZeroWidth = 0);
+ ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
+ QualType FieldTy, Expr *BitWidth,
+ bool *ZeroWidth = 0);
enum CUDAFunctionTarget {
CFT_Device,
@@ -5939,7 +6533,7 @@ public:
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
- /// \brief Code completion occurs within the body of a function on a
+ /// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
@@ -5948,19 +6542,20 @@ public:
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
- /// \brief Code completion occurs within a sequence of declaration
+ /// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
+ void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
-
+
struct CodeCompleteExpressionData;
- void CodeCompleteExpression(Scope *S,
+ void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
@@ -5969,12 +6564,12 @@ public:
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
- void CodeCompleteCall(Scope *S, Expr *Fn, Expr **Args, unsigned NumArgs);
+ void CodeCompleteCall(Scope *S, Expr *Fn, llvm::ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
-
+
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
@@ -5985,7 +6580,9 @@ public:
void CodeCompleteConstructorInitializer(Decl *Constructor,
CXXCtorInitializer** Initializers,
unsigned NumInitializers);
-
+ void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
+ bool AfterAmpersand);
+
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
@@ -5993,7 +6590,7 @@ public:
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
- void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
@@ -6010,7 +6607,7 @@ public:
unsigned NumSelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = 0);
- void CodeCompleteObjCForCollection(Scope *S,
+ void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
IdentifierInfo **SelIdents,
@@ -6035,7 +6632,7 @@ public:
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
- void CodeCompleteObjCMethodDeclSelector(Scope *S,
+ void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
@@ -6051,6 +6648,7 @@ public:
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
@@ -6061,14 +6659,16 @@ public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
-private:
+private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
- bool isSubscript=false, bool AllowOnePastEnd=true);
+ const ArraySubscriptExpr *ASE=0,
+ bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall);
+ bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
+ Expr **Args, unsigned NumArgs);
bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall);
- bool CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
@@ -6092,43 +6692,49 @@ private:
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
- bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
+ enum FormatStringType {
+ FST_Scanf,
+ FST_Printf,
+ FST_NSString,
+ FST_Strftime,
+ FST_Strfmon,
+ FST_Kprintf,
+ FST_Unknown
+ };
+ static FormatStringType GetFormatStringType(const FormatAttr *Format);
+ bool SemaCheckStringLiteral(const Expr *E, Expr **Args, unsigned NumArgs,
bool HasVAListArg, unsigned format_idx,
- unsigned firstDataArg, bool isPrintf);
+ unsigned firstDataArg, FormatStringType Type,
+ bool inFunctionCall = true);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
- const CallExpr *TheCall, bool HasVAListArg,
+ Expr **Args, unsigned NumArgs, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
- bool isPrintf);
+ FormatStringType Type, bool inFunctionCall);
+
+ void CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall);
+ void CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+ unsigned NumArgs, bool IsCXXMember,
+ SourceLocation Loc, SourceRange Range);
+ void CheckFormatArguments(Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ SourceLocation Loc, SourceRange range);
void CheckNonNullArguments(const NonNullAttr *NonNull,
const Expr * const *ExprArgs,
SourceLocation CallSiteLoc);
- void CheckPrintfScanfArguments(const CallExpr *TheCall, bool HasVAListArg,
- unsigned format_idx, unsigned firstDataArg,
- bool isPrintf);
-
- /// \brief Enumeration used to describe which of the memory setting or copying
- /// functions is being checked by \c CheckMemaccessArguments().
- enum CheckedMemoryFunction {
- CMF_Memset,
- CMF_Memcpy,
- CMF_Memmove,
- CMF_Memcmp,
- CMF_Strncpy,
- CMF_Strncmp,
- CMF_Strncasecmp,
- CMF_Strncat,
- CMF_Strndup
- };
-
- void CheckMemaccessArguments(const CallExpr *Call, CheckedMemoryFunction CMF,
+ void CheckMemaccessArguments(const CallExpr *Call,
+ unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
+ void CheckStrncatArguments(const CallExpr *Call,
+ IdentifierInfo *FnName);
+
void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
@@ -6141,13 +6747,13 @@ private:
///
/// The parser maintains this state here.
Scope *CurScope;
-
+
protected:
friend class Parser;
- friend class InitializationSequence;
+ friend class InitializationSequence;
friend class ASTReader;
friend class ASTWriter;
-
+
public:
/// \brief Retrieve the parser's current scope.
///
@@ -6158,7 +6764,7 @@ public:
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
-
+
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
@@ -6174,9 +6780,12 @@ class EnterExpressionEvaluationContext {
public:
EnterExpressionEvaluationContext(Sema &Actions,
- Sema::ExpressionEvaluationContext NewContext)
+ Sema::ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = 0,
+ bool IsDecltype = false)
: Actions(Actions) {
- Actions.PushExpressionEvaluationContext(NewContext);
+ Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
+ IsDecltype);
}
~EnterExpressionEvaluationContext() {
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
index 3689a6e..139cce8 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
@@ -24,6 +24,7 @@ namespace clang {
/// clients that read ASTs and then require further semantic
/// analysis of the entities in those ASTs.
class SemaConsumer : public ASTConsumer {
+ virtual void anchor();
public:
SemaConsumer() {
ASTConsumer::SemaConsumer = true;
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
index 2c4bf4b..9605bf8 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
@@ -16,7 +16,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
- SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY,BRIEF,FULL) ENUM,
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
#define SEMASTART
#include "clang/Basic/DiagnosticSemaKinds.inc"
#undef DIAG
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h
index 0c1bba5..fffca67 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h
@@ -39,7 +39,7 @@ struct ConversionFixItGenerator {
ExprValueKind FromVK);
/// The list of Hints generated so far.
- SmallVector<FixItHint, 1> Hints;
+ std::vector<FixItHint> Hints;
/// The number of Conversions fixed. This can be different from the size
/// of the Hints vector since we allow multiple FixIts per conversion.
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Template.h b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
index 78f50fa..c16823a 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
@@ -268,6 +268,50 @@ namespace clang {
Exited = true;
}
+ /// \brief Clone this scope, and all outer scopes, down to the given
+ /// outermost scope.
+ LocalInstantiationScope *cloneScopes(LocalInstantiationScope *Outermost) {
+ if (this == Outermost) return this;
+ LocalInstantiationScope *newScope =
+ new LocalInstantiationScope(SemaRef, CombineWithOuterScope);
+
+ newScope->Outer = 0;
+ if (Outer)
+ newScope->Outer = Outer->cloneScopes(Outermost);
+
+ newScope->PartiallySubstitutedPack = PartiallySubstitutedPack;
+ newScope->ArgsInPartiallySubstitutedPack = ArgsInPartiallySubstitutedPack;
+ newScope->NumArgsInPartiallySubstitutedPack =
+ NumArgsInPartiallySubstitutedPack;
+
+ for (LocalDeclsMap::iterator I = LocalDecls.begin(), E = LocalDecls.end();
+ I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored =
+ newScope->LocalDecls[D];
+ if (I->second.is<Decl *>()) {
+ Stored = I->second.get<Decl *>();
+ } else {
+ DeclArgumentPack *OldPack = I->second.get<DeclArgumentPack *>();
+ DeclArgumentPack *NewPack = new DeclArgumentPack(*OldPack);
+ Stored = NewPack;
+ newScope->ArgumentPacks.push_back(NewPack);
+ }
+ }
+ return newScope;
+ }
+
+ /// \brief deletes the given scope, and all otuer scopes, down to the
+ /// given outermost scope.
+ static void deleteScopes(LocalInstantiationScope *Scope,
+ LocalInstantiationScope *Outermost) {
+ while (Scope && Scope != Outermost) {
+ LocalInstantiationScope *Out = Scope->Outer;
+ delete Scope;
+ Scope = Out;
+ }
+ }
+
/// \brief Find the instantiation of the declaration D within the current
/// instantiation scope.
///
@@ -314,6 +358,8 @@ namespace clang {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex;
DeclContext *Owner;
const MultiLevelTemplateArgumentList &TemplateArgs;
+ Sema::LateInstantiatedAttrVec* LateAttrs;
+ LocalInstantiationScope *StartingScope;
/// \brief A list of out-of-line class template partial
/// specializations that will need to be instantiated after the
@@ -326,7 +372,7 @@ namespace clang {
TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs)
: SemaRef(SemaRef), SubstIndex(SemaRef, -1), Owner(Owner),
- TemplateArgs(TemplateArgs) { }
+ TemplateArgs(TemplateArgs), LateAttrs(0), StartingScope(0) { }
// FIXME: Once we get closer to completion, replace these manually-written
// declarations with automatically-generated ones from
@@ -382,6 +428,21 @@ namespace clang {
return 0;
}
+ // Enable late instantiation of attributes. Late instantiated attributes
+ // will be stored in LA.
+ void enableLateAttributeInstantiation(Sema::LateInstantiatedAttrVec *LA) {
+ LateAttrs = LA;
+ StartingScope = SemaRef.CurrentInstantiationScope;
+ }
+
+ // Disable late instantiation of attributes.
+ void disableLateAttributeInstantiation() {
+ LateAttrs = 0;
+ StartingScope = 0;
+ }
+
+ LocalInstantiationScope *getStartingScope() const { return StartingScope; }
+
typedef
SmallVectorImpl<std::pair<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *> >
@@ -423,6 +484,7 @@ namespace clang {
InstantiateClassTemplatePartialSpecialization(
ClassTemplateDecl *ClassTemplate,
ClassTemplatePartialSpecializationDecl *PartialSpec);
+ void InstantiateEnumDefinition(EnumDecl *Enum, EnumDecl *Pattern);
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
index 690129a..100d56e 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
@@ -23,7 +23,7 @@ class ASTContext;
class TemplateArgumentList;
namespace sema {
-
+
/// \brief Provides information about an attempted template argument
/// deduction, whose success or failure was described by a
/// TemplateDeductionResult value.
@@ -39,10 +39,10 @@ class TemplateDeductionInfo {
/// deduction is occurring.
SourceLocation Loc;
- /// \brief Warnings (and follow-on notes) that were suppressed due to
+ /// \brief Warnings (and follow-on notes) that were suppressed due to
/// SFINAE while performing template argument deduction.
SmallVector<PartialDiagnosticAt, 4> SuppressedDiagnostics;
-
+
// do not implement these
TemplateDeductionInfo(const TemplateDeductionInfo&);
TemplateDeductionInfo &operator=(const TemplateDeductionInfo&);
@@ -75,23 +75,24 @@ public:
Deduced = NewDeduced;
}
- /// \brief Add a new diagnostic to the set of diagnostics
- void addSuppressedDiagnostic(SourceLocation Loc, const PartialDiagnostic &PD) {
+ /// \brief Add a new diagnostic to the set of diagnostics
+ void addSuppressedDiagnostic(SourceLocation Loc,
+ const PartialDiagnostic &PD) {
SuppressedDiagnostics.push_back(std::make_pair(Loc, PD));
}
-
+
/// \brief Iterator over the set of suppressed diagnostics.
- typedef SmallVectorImpl<PartialDiagnosticAt>::const_iterator
+ typedef SmallVectorImpl<PartialDiagnosticAt>::const_iterator
diag_iterator;
-
+
/// \brief Returns an iterator at the beginning of the sequence of suppressed
/// diagnostics.
diag_iterator diag_begin() const { return SuppressedDiagnostics.begin(); }
-
+
/// \brief Returns an iterator at the end of the sequence of suppressed
/// diagnostics.
diag_iterator diag_end() const { return SuppressedDiagnostics.end(); }
-
+
/// \brief The template parameter to which a template argument
/// deduction failure refers.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
index 9537c30..a8f6e11 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
@@ -23,32 +23,46 @@ namespace clang {
/// @brief Simple class containing the result of Sema::CorrectTypo
class TypoCorrection {
public:
+ // "Distance" for unusable corrections
+ static const unsigned InvalidDistance = ~0U;
+ // The largest distance still considered valid (larger edit distances are
+ // mapped to InvalidDistance by getEditDistance).
+ static const unsigned MaximumDistance = 10000U;
+
+ // Relative weightings of the "edit distance" components. The higher the
+ // weight, the more of a penalty to fitness the component will give (higher
+ // weights mean greater contribution to the total edit distance, with the
+ // best correction candidates having the lowest edit distance).
+ static const unsigned CharDistanceWeight = 100U;
+ static const unsigned QualifierDistanceWeight = 110U;
+ static const unsigned CallbackDistanceWeight = 150U;
+
TypoCorrection(const DeclarationName &Name, NamedDecl *NameDecl,
- NestedNameSpecifier *NNS=0, unsigned distance=0)
- : CorrectionName(Name),
- CorrectionNameSpec(NNS),
- EditDistance(distance) {
+ NestedNameSpecifier *NNS=0, unsigned CharDistance=0,
+ unsigned QualifierDistance=0)
+ : CorrectionName(Name), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(QualifierDistance),
+ CallbackDistance(0) {
if (NameDecl)
CorrectionDecls.push_back(NameDecl);
}
TypoCorrection(NamedDecl *Name, NestedNameSpecifier *NNS=0,
- unsigned distance=0)
- : CorrectionName(Name->getDeclName()),
- CorrectionNameSpec(NNS),
- EditDistance(distance) {
+ unsigned CharDistance=0)
+ : CorrectionName(Name->getDeclName()), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(0), CallbackDistance(0) {
if (Name)
CorrectionDecls.push_back(Name);
}
TypoCorrection(DeclarationName Name, NestedNameSpecifier *NNS=0,
- unsigned distance=0)
- : CorrectionName(Name),
- CorrectionNameSpec(NNS),
- EditDistance(distance) {}
+ unsigned CharDistance=0)
+ : CorrectionName(Name), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(0), CallbackDistance(0) {}
TypoCorrection()
- : CorrectionNameSpec(0), EditDistance(0) {}
+ : CorrectionNameSpec(0), CharDistance(0), QualifierDistance(0),
+ CallbackDistance(0) {}
/// \brief Gets the DeclarationName of the typo correction
DeclarationName getCorrection() const { return CorrectionName; }
@@ -64,8 +78,41 @@ public:
CorrectionNameSpec = NNS;
}
- /// \brief Gets the "edit distance" of the typo correction from the typo
- unsigned getEditDistance() const { return EditDistance; }
+ void setQualifierDistance(unsigned ED) {
+ QualifierDistance = ED;
+ }
+
+ void setCallbackDistance(unsigned ED) {
+ CallbackDistance = ED;
+ }
+
+ // Convert the given weighted edit distance to a roughly equivalent number of
+ // single-character edits (typically for comparison to the length of the
+ // string being edited).
+ static unsigned NormalizeEditDistance(unsigned ED) {
+ if (ED > MaximumDistance)
+ return InvalidDistance;
+ return (ED + CharDistanceWeight / 2) / CharDistanceWeight;
+ }
+
+ /// \brief Gets the "edit distance" of the typo correction from the typo.
+ /// If Normalized is true, scale the distance down by the CharDistanceWeight
+ /// to return the edit distance in terms of single-character edits.
+ unsigned getEditDistance(bool Normalized = true) const {
+ if (CharDistance > MaximumDistance || QualifierDistance > MaximumDistance ||
+ CallbackDistance > MaximumDistance)
+ return InvalidDistance;
+ unsigned ED =
+ CharDistance * CharDistanceWeight +
+ QualifierDistance * QualifierDistanceWeight +
+ CallbackDistance * CallbackDistanceWeight;
+ if (ED > MaximumDistance)
+ return InvalidDistance;
+ // Half the CharDistanceWeight is added to ED to simulate rounding since
+ // integer division truncates the value (i.e. round-to-nearest-int instead
+ // of round-to-zero).
+ return Normalized ? NormalizeEditDistance(ED) : ED;
+ }
/// \brief Gets the pointer to the declaration of the typo correction
NamedDecl* getCorrectionDecl() const {
@@ -110,6 +157,12 @@ public:
CorrectionDecls.front() == 0;
}
+ // Check if this TypoCorrection is the given keyword.
+ template<std::size_t StrLen>
+ bool isKeyword(const char (&Str)[StrLen]) const {
+ return isKeyword() && getCorrectionAsIdentifierInfo()->isStr(Str);
+ }
+
// Returns true if the correction either is a keyword or has a known decl.
bool isResolved() const { return !CorrectionDecls.empty(); }
@@ -122,6 +175,11 @@ public:
return isKeyword() ? CorrectionDecls.end() : CorrectionDecls.begin();
}
decl_iterator end() { return CorrectionDecls.end(); }
+ typedef llvm::SmallVector<NamedDecl*, 1>::const_iterator const_decl_iterator;
+ const_decl_iterator begin() const {
+ return isKeyword() ? CorrectionDecls.end() : CorrectionDecls.begin();
+ }
+ const_decl_iterator end() const { return CorrectionDecls.end(); }
private:
bool hasCorrectionDecl() const {
@@ -132,7 +190,65 @@ private:
DeclarationName CorrectionName;
NestedNameSpecifier *CorrectionNameSpec;
llvm::SmallVector<NamedDecl*, 1> CorrectionDecls;
- unsigned EditDistance;
+ unsigned CharDistance;
+ unsigned QualifierDistance;
+ unsigned CallbackDistance;
+};
+
+/// @brief Base class for callback objects used by Sema::CorrectTypo to check
+/// the validity of a potential typo correction.
+class CorrectionCandidateCallback {
+ public:
+ static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
+
+ CorrectionCandidateCallback()
+ : WantTypeSpecifiers(true), WantExpressionKeywords(true),
+ WantCXXNamedCasts(true), WantRemainingKeywords(true),
+ WantObjCSuper(false),
+ IsObjCIvarLookup(false) {}
+
+ virtual ~CorrectionCandidateCallback() {}
+
+ /// \brief Simple predicate used by the default RankCandidate to
+ /// determine whether to return an edit distance of 0 or InvalidDistance.
+ /// This can be overrided by validators that only need to determine if a
+ /// candidate is viable, without ranking potentially viable candidates.
+ /// Only ValidateCandidate or RankCandidate need to be overriden by a
+ /// callback wishing to check the viability of correction candidates.
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return true;
+ }
+
+ /// \brief Method used by Sema::CorrectTypo to assign an "edit distance" rank
+ /// to a candidate (where a lower value represents a better candidate), or
+ /// returning InvalidDistance if the candidate is not at all viable. For
+ /// validation callbacks that only need to determine if a candidate is viable,
+ /// the default RankCandidate returns either 0 or InvalidDistance depending
+ /// whether ValidateCandidate returns true or false.
+ virtual unsigned RankCandidate(const TypoCorrection &candidate) {
+ return ValidateCandidate(candidate) ? 0 : InvalidDistance;
+ }
+
+ // Flags for context-dependent keywords.
+ // TODO: Expand these to apply to non-keywords or possibly remove them.
+ bool WantTypeSpecifiers;
+ bool WantExpressionKeywords;
+ bool WantCXXNamedCasts;
+ bool WantRemainingKeywords;
+ bool WantObjCSuper;
+ // Temporary hack for the one case where a CorrectTypoContext enum is used
+ // when looking up results.
+ bool IsObjCIvarLookup;
+};
+
+/// @brief Simple template class for restricting typo correction candidates
+/// to ones having a single Decl* of the given type.
+template <class C>
+class DeclFilterCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return candidate.getCorrectionDeclAs<C>();
+ }
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
index 494835c..4591630 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
@@ -127,7 +127,7 @@ namespace clang {
/// \brief The number of predefined identifier IDs.
const unsigned int NUM_PREDEF_IDENT_IDS = 1;
- /// \brief An ID number that refers to an ObjC selctor in an AST file.
+ /// \brief An ID number that refers to an ObjC selector in an AST file.
typedef uint32_t SelectorID;
/// \brief The number of predefined selector IDs.
@@ -141,6 +141,12 @@ namespace clang {
/// preprocessing record.
typedef uint32_t PreprocessedEntityID;
+ /// \brief An ID number that refers to a submodule in a module file.
+ typedef uint32_t SubmoduleID;
+
+ /// \brief The number of predefined submodule IDs.
+ const unsigned int NUM_PREDEF_SUBMODULE_IDS = 1;
+
/// \brief Source range/offset of a preprocessed entity.
struct PPEntityOffset {
/// \brief Raw source location of beginning of range.
@@ -156,6 +162,22 @@ namespace clang {
BitOffset(BitOffset) { }
};
+ /// \brief Source range/offset of a preprocessed entity.
+ struct DeclOffset {
+ /// \brief Raw source location.
+ unsigned Loc;
+ /// \brief Offset in the AST file.
+ uint32_t BitOffset;
+
+ DeclOffset() : Loc(0), BitOffset(0) { }
+ DeclOffset(SourceLocation Loc, uint32_t BitOffset)
+ : Loc(Loc.getRawEncoding()),
+ BitOffset(BitOffset) { }
+ void setLocation(SourceLocation L) {
+ Loc = L.getRawEncoding();
+ }
+ };
+
/// \brief The number of predefined preprocessed entity IDs.
const unsigned int NUM_PREDEF_PP_ENTITY_IDS = 1;
@@ -182,7 +204,10 @@ namespace clang {
DECL_UPDATES_BLOCK_ID,
/// \brief The block containing the detailed preprocessing record.
- PREPROCESSOR_DETAIL_BLOCK_ID
+ PREPROCESSOR_DETAIL_BLOCK_ID,
+
+ /// \brief The block containing the submodule structure.
+ SUBMODULE_BLOCK_ID
};
/// \brief Record types that occur within the AST block itself.
@@ -341,10 +366,11 @@ namespace clang {
/// \brief Record code for an update to the TU's lexically contained
/// declarations.
TU_UPDATE_LEXICAL = 28,
-
- /// \brief Record code for an update to first decls pointing to the
- /// latest redeclarations.
- REDECLS_UPDATE_LATEST = 29,
+
+ /// \brief Record code for the array describing the locations (in the
+ /// LOCAL_REDECLARATIONS record) of the redeclaration chains, indexed by
+ /// the first known ID.
+ LOCAL_REDECLARATIONS_MAP = 29,
/// \brief Record code for declarations that Sema keeps references of.
SEMA_DECL_REFS = 30,
@@ -418,9 +444,32 @@ namespace clang {
/// which stores information about #line directives.
SOURCE_MANAGER_LINE_TABLE = 48,
- /// \brief Record code for ObjC categories in a module that are chained to
- /// an interface.
- OBJC_CHAINED_CATEGORIES
+ /// \brief Record code for map of Objective-C class definition IDs to the
+ /// ObjC categories in a module that are attached to that class.
+ OBJC_CATEGORIES_MAP = 49,
+
+ /// \brief Record code for a file sorted array of DeclIDs in a module.
+ FILE_SORTED_DECLS = 50,
+
+ /// \brief Record code for an array of all of the (sub)modules that were
+ /// imported by the AST file.
+ IMPORTED_MODULES = 51,
+
+ /// \brief Record code for the set of merged declarations in an AST file.
+ MERGED_DECLARATIONS = 52,
+
+ /// \brief Record code for the array of redeclaration chains.
+ ///
+ /// This array can only be interpreted properly using the local
+ /// redeclarations map.
+ LOCAL_REDECLARATIONS = 53,
+
+ /// \brief Record code for the array of Objective-C categories (including
+ /// extensions).
+ ///
+ /// This array can only be interpreted properly using the Objective-C
+ /// categories map.
+ OBJC_CATEGORIES
};
/// \brief Record types used within a source manager block.
@@ -433,7 +482,8 @@ namespace clang {
SM_SLOC_BUFFER_ENTRY = 2,
/// \brief Describes a blob that contains the data for a buffer
/// entry. This kind of record always directly follows a
- /// SM_SLOC_BUFFER_ENTRY record.
+ /// SM_SLOC_BUFFER_ENTRY record or a SM_SLOC_FILE_ENTRY with an
+ /// overridden buffer.
SM_SLOC_BUFFER_BLOB = 3,
/// \brief Describes a source location entry (SLocEntry) for a
/// macro expansion.
@@ -472,6 +522,30 @@ namespace clang {
PPD_INCLUSION_DIRECTIVE = 2
};
+ /// \brief Record types used within a submodule description block.
+ enum SubmoduleRecordTypes {
+ /// \brief Metadata for submodules as a whole.
+ SUBMODULE_METADATA = 0,
+ /// \brief Defines the major attributes of a submodule, including its
+ /// name and parent.
+ SUBMODULE_DEFINITION = 1,
+ /// \brief Specifies the umbrella header used to create this module,
+ /// if any.
+ SUBMODULE_UMBRELLA_HEADER = 2,
+ /// \brief Specifies a header that falls into this (sub)module.
+ SUBMODULE_HEADER = 3,
+ /// \brief Specifies an umbrella directory.
+ SUBMODULE_UMBRELLA_DIR = 4,
+ /// \brief Specifies the submodules that are imported by this
+ /// submodule.
+ SUBMODULE_IMPORTS = 5,
+ /// \brief Specifies the submodules that are re-exported from this
+ /// submodule.
+ SUBMODULE_EXPORTS = 6,
+ /// \brief Specifies a required feature.
+ SUBMODULE_REQUIRES = 7
+ };
+
/// \defgroup ASTAST AST file AST constants
///
/// The constants in this group describe various components of the
@@ -554,7 +628,11 @@ namespace clang {
/// \brief The "auto &&" deduction type.
PREDEF_TYPE_AUTO_RREF_DEDUCT = 32,
/// \brief The OpenCL 'half' / ARM NEON __fp16 type.
- PREDEF_TYPE_HALF_ID = 33
+ PREDEF_TYPE_HALF_ID = 33,
+ /// \brief ARC's unbridged-cast placeholder type.
+ PREDEF_TYPE_ARC_UNBRIDGED_CAST = 34,
+ /// \brief The pseudo-object placeholder type.
+ PREDEF_TYPE_PSEUDO_OBJECT = 35
};
/// \brief The number of predefined type IDs that are reserved for
@@ -662,28 +740,26 @@ namespace clang {
enum SpecialTypeIDs {
/// \brief __builtin_va_list
SPECIAL_TYPE_BUILTIN_VA_LIST = 0,
- /// \brief Objective-C Protocol type
- SPECIAL_TYPE_OBJC_PROTOCOL = 1,
/// \brief CFConstantString type
- SPECIAL_TYPE_CF_CONSTANT_STRING = 2,
+ SPECIAL_TYPE_CF_CONSTANT_STRING = 1,
/// \brief C FILE typedef type
- SPECIAL_TYPE_FILE = 3,
+ SPECIAL_TYPE_FILE = 2,
/// \brief C jmp_buf typedef type
- SPECIAL_TYPE_jmp_buf = 4,
+ SPECIAL_TYPE_JMP_BUF = 3,
/// \brief C sigjmp_buf typedef type
- SPECIAL_TYPE_sigjmp_buf = 5,
+ SPECIAL_TYPE_SIGJMP_BUF = 4,
/// \brief Objective-C "id" redefinition type
- SPECIAL_TYPE_OBJC_ID_REDEFINITION = 6,
+ SPECIAL_TYPE_OBJC_ID_REDEFINITION = 5,
/// \brief Objective-C "Class" redefinition type
- SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 7,
+ SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 6,
/// \brief Objective-C "SEL" redefinition type
- SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 8,
+ SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 7,
/// \brief C ucontext_t typedef type
- SPECIAL_TYPE_UCONTEXT_T = 9
+ SPECIAL_TYPE_UCONTEXT_T = 8
};
/// \brief The number of special type IDs.
- const unsigned NumSpecialTypeIDs = 0;
+ const unsigned NumSpecialTypeIDs = 9;
/// \brief Predefined declaration IDs.
///
@@ -706,22 +782,25 @@ namespace clang {
/// \brief The Objective-C 'Class' type.
PREDEF_DECL_OBJC_CLASS_ID = 4,
+
+ /// \brief The Objective-C 'Protocol' type.
+ PREDEF_DECL_OBJC_PROTOCOL_ID = 5,
/// \brief The signed 128-bit integer type.
- PREDEF_DECL_INT_128_ID = 5,
+ PREDEF_DECL_INT_128_ID = 6,
/// \brief The unsigned 128-bit integer type.
- PREDEF_DECL_UNSIGNED_INT_128_ID = 6,
+ PREDEF_DECL_UNSIGNED_INT_128_ID = 7,
/// \brief The internal 'instancetype' typedef.
- PREDEF_DECL_OBJC_INSTANCETYPE_ID = 7
+ PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8
};
/// \brief The number of declaration IDs that are predefined.
///
/// For more information about predefined declarations, see the
/// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
- const unsigned int NUM_PREDEF_DECL_IDS = 8;
+ const unsigned int NUM_PREDEF_DECL_IDS = 9;
/// \brief Record codes for each kind of declaration.
///
@@ -752,10 +831,6 @@ namespace clang {
DECL_OBJC_IVAR,
/// \brief A ObjCAtDefsFieldDecl record.
DECL_OBJC_AT_DEFS_FIELD,
- /// \brief A ObjCClassDecl record.
- DECL_OBJC_CLASS,
- /// \brief A ObjCForwardProtocolDecl record.
- DECL_OBJC_FORWARD_PROTOCOL,
/// \brief A ObjCCategoryDecl record.
DECL_OBJC_CATEGORY,
/// \brief A ObjCCategoryImplDecl record.
@@ -859,7 +934,9 @@ namespace clang {
DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK,
/// \brief A ClassScopeFunctionSpecializationDecl record a class scope
/// function specialization. (Microsoft extension).
- DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION
+ DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
+ /// \brief An ImportDecl recording a module import.
+ DECL_IMPORT
};
/// \brief Record codes for each kind of statement or expression.
@@ -875,6 +952,8 @@ namespace clang {
STMT_STOP = 100,
/// \brief A NULL expression.
STMT_NULL_PTR,
+ /// \brief A reference to a previously [de]serialized Stmt record.
+ STMT_REF_PTR,
/// \brief A NullStmt record.
STMT_NULL,
/// \brief A CompoundStmt record.
@@ -973,10 +1052,10 @@ namespace clang {
EXPR_SHUFFLE_VECTOR,
/// \brief BlockExpr
EXPR_BLOCK,
- /// \brief A BlockDeclRef record.
- EXPR_BLOCK_DECL_REF,
/// \brief A GenericSelectionExpr record.
EXPR_GENERIC_SELECTION,
+ /// \brief A PseudoObjectExpr record.
+ EXPR_PSEUDO_OBJECT,
/// \brief An AtomicExpr record.
EXPR_ATOMIC,
@@ -984,6 +1063,12 @@ namespace clang {
/// \brief An ObjCStringLiteral record.
EXPR_OBJC_STRING_LITERAL,
+
+ EXPR_OBJC_NUMERIC_LITERAL,
+ EXPR_OBJC_ARRAY_LITERAL,
+ EXPR_OBJC_DICTIONARY_LITERAL,
+
+
/// \brief An ObjCEncodeExpr record.
EXPR_OBJC_ENCODE,
/// \brief An ObjCSelectorExpr record.
@@ -994,6 +1079,8 @@ namespace clang {
EXPR_OBJC_IVAR_REF_EXPR,
/// \brief An ObjCPropertyRefExpr record.
EXPR_OBJC_PROPERTY_REF_EXPR,
+ /// \brief An ObjCSubscriptRefExpr record.
+ EXPR_OBJC_SUBSCRIPT_REF_EXPR,
/// \brief UNUSED
EXPR_OBJC_KVC_REF_EXPR,
/// \brief An ObjCMessageExpr record.
@@ -1017,6 +1104,8 @@ namespace clang {
STMT_OBJC_AT_THROW,
/// \brief An ObjCAutoreleasePoolStmt record.
STMT_OBJC_AUTORELEASE_POOL,
+ /// \brief A ObjCBoolLiteralExpr record.
+ EXPR_OBJC_BOOL_LITERAL,
// C++
@@ -1045,6 +1134,8 @@ namespace clang {
EXPR_CXX_CONST_CAST,
/// \brief A CXXFunctionalCastExpr record.
EXPR_CXX_FUNCTIONAL_CAST,
+ /// \brief A UserDefinedLiteral record.
+ EXPR_USER_DEFINED_LITERAL,
/// \brief A CXXBoolLiteralExpr record.
EXPR_CXX_BOOL_LITERAL,
EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
@@ -1075,6 +1166,7 @@ namespace clang {
EXPR_OPAQUE_VALUE, // OpaqueValueExpr
EXPR_BINARY_CONDITIONAL_OPERATOR, // BinaryConditionalOperator
EXPR_BINARY_TYPE_TRAIT, // BinaryTypeTraitExpr
+ EXPR_TYPE_TRAIT, // TypeTraitExpr
EXPR_ARRAY_TYPE_TRAIT, // ArrayTypeTraitIntExpr
EXPR_PACK_EXPANSION, // PackExpansionExpr
@@ -1097,7 +1189,10 @@ namespace clang {
STMT_SEH_TRY, // SEHTryStmt
// ARC
- EXPR_OBJC_BRIDGED_CAST // ObjCBridgedCastExpr
+ EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr
+
+ STMT_MS_DEPENDENT_EXISTS, // MSDependentExistsStmt
+ EXPR_LAMBDA // LambdaExpr
};
/// \brief The kinds of designators that can occur in a
@@ -1123,6 +1218,58 @@ namespace clang {
CTOR_INITIALIZER_INDIRECT_MEMBER
};
+ /// \brief Describes the redeclarations of a declaration.
+ struct LocalRedeclarationsInfo {
+ DeclID FirstID; // The ID of the first declaration
+ unsigned Offset; // Offset into the array of redeclaration chains.
+
+ friend bool operator<(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID < Y.FirstID;
+ }
+
+ friend bool operator>(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID > Y.FirstID;
+ }
+
+ friend bool operator<=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID <= Y.FirstID;
+ }
+
+ friend bool operator>=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID >= Y.FirstID;
+ }
+ };
+
+ /// \brief Describes the categories of an Objective-C class.
+ struct ObjCCategoriesInfo {
+ DeclID DefinitionID; // The ID of the definition
+ unsigned Offset; // Offset into the array of category lists.
+
+ friend bool operator<(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID < Y.DefinitionID;
+ }
+
+ friend bool operator>(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID > Y.DefinitionID;
+ }
+
+ friend bool operator<=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID <= Y.DefinitionID;
+ }
+
+ friend bool operator>=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID >= Y.DefinitionID;
+ }
+ };
+
/// @}
}
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
index 588fe0e..ab0d313 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
@@ -23,6 +23,7 @@ class Decl;
class ASTReader;
class QualType;
class MacroDefinition;
+class Module;
class ASTDeserializationListener {
protected:
@@ -47,6 +48,11 @@ public:
/// \brief A macro definition was read from the AST file.
virtual void MacroDefinitionRead(serialization::PreprocessedEntityID,
MacroDefinition *MD) { }
+ /// \brief A macro definition that had previously been deserialized
+ /// (and removed via IdentifierRead) has now been made visible.
+ virtual void MacroVisible(IdentifierInfo *II) { }
+ /// \brief A module definition was read from the AST file.
+ virtual void ModuleRead(serialization::SubmoduleID ID, Module *Mod) { }
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
index 996a134..9baaf4b 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
@@ -34,6 +34,8 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/DenseSet.h"
@@ -162,16 +164,16 @@ private:
void Error(const char *Msg);
};
-namespace serialization {
+namespace serialization {
class ReadMethodPoolVisitor;
-
+
namespace reader {
class ASTIdentifierLookupTrait;
}
-
+
} // end namespace serialization
-
+
/// \brief Reads an AST files chain containing the contents of a translation
/// unit.
///
@@ -191,7 +193,7 @@ class ASTReader
public ExternalSemaSource,
public IdentifierInfoLookup,
public ExternalIdentifierLookup,
- public ExternalSLocEntrySource
+ public ExternalSLocEntrySource
{
public:
enum ASTReadResult { Success, Failure, IgnorePCH };
@@ -205,18 +207,18 @@ public:
friend class ASTWriter;
friend class ASTUnit; // ASTUnit needs to remap source locations.
friend class serialization::ReadMethodPoolVisitor;
-
- typedef serialization::Module Module;
+
+ typedef serialization::ModuleFile ModuleFile;
typedef serialization::ModuleKind ModuleKind;
typedef serialization::ModuleManager ModuleManager;
-
+
typedef ModuleManager::ModuleIterator ModuleIterator;
typedef ModuleManager::ModuleConstIterator ModuleConstIterator;
typedef ModuleManager::ModuleReverseIterator ModuleReverseIterator;
private:
/// \brief The receiver of some callbacks invoked by ASTReader.
- llvm::OwningPtr<ASTReaderListener> Listener;
+ OwningPtr<ASTReaderListener> Listener;
/// \brief The receiver of deserialization events.
ASTDeserializationListener *DeserializationListener;
@@ -224,7 +226,7 @@ private:
SourceManager &SourceMgr;
FileManager &FileMgr;
DiagnosticsEngine &Diags;
-
+
/// \brief The semantic analysis object that will be processing the
/// AST files and the translation unit that uses it.
Sema *SemaObj;
@@ -234,7 +236,7 @@ private:
/// \brief The AST context into which we'll read the AST files.
ASTContext &Context;
-
+
/// \brief The AST consumer.
ASTConsumer *Consumer;
@@ -243,24 +245,24 @@ private:
/// \brief A map of global bit offsets to the module that stores entities
/// at those bit offsets.
- ContinuousRangeMap<uint64_t, Module*, 4> GlobalBitOffsetsMap;
+ ContinuousRangeMap<uint64_t, ModuleFile*, 4> GlobalBitOffsetsMap;
/// \brief A map of negated SLocEntryIDs to the modules containing them.
- ContinuousRangeMap<unsigned, Module*, 64> GlobalSLocEntryMap;
+ ContinuousRangeMap<unsigned, ModuleFile*, 64> GlobalSLocEntryMap;
+
+ typedef ContinuousRangeMap<unsigned, ModuleFile*, 64> GlobalSLocOffsetMapType;
- typedef ContinuousRangeMap<unsigned, Module*, 64> GlobalSLocOffsetMapType;
-
/// \brief A map of reversed (SourceManager::MaxLoadedOffset - SLocOffset)
/// SourceLocation offsets to the modules containing them.
GlobalSLocOffsetMapType GlobalSLocOffsetMap;
-
+
/// \brief Types that have already been loaded from the chain.
///
/// When the pointer at index I is non-NULL, the type with
/// ID = (I + 1) << FastQual::Width has already been loaded
std::vector<QualType> TypesLoaded;
- typedef ContinuousRangeMap<serialization::TypeID, Module *, 4>
+ typedef ContinuousRangeMap<serialization::TypeID, ModuleFile *, 4>
GlobalTypeMapType;
/// \brief Mapping from global type IDs to the module in which the
@@ -274,60 +276,67 @@ private:
/// = I + 1 has already been loaded.
std::vector<Decl *> DeclsLoaded;
- typedef ContinuousRangeMap<serialization::DeclID, Module *, 4>
+ typedef ContinuousRangeMap<serialization::DeclID, ModuleFile *, 4>
GlobalDeclMapType;
-
+
/// \brief Mapping from global declaration IDs to the module in which the
/// declaration resides.
GlobalDeclMapType GlobalDeclMap;
-
- typedef std::pair<Module *, uint64_t> FileOffset;
+
+ typedef std::pair<ModuleFile *, uint64_t> FileOffset;
typedef SmallVector<FileOffset, 2> FileOffsetsTy;
typedef llvm::DenseMap<serialization::DeclID, FileOffsetsTy>
DeclUpdateOffsetsMap;
-
+
/// \brief Declarations that have modifications residing in a later file
/// in the chain.
DeclUpdateOffsetsMap DeclUpdateOffsets;
- typedef llvm::DenseMap<serialization::DeclID,
- std::pair<Module *, uint64_t> >
+ struct ReplacedDeclInfo {
+ ModuleFile *Mod;
+ uint64_t Offset;
+ unsigned RawLoc;
+
+ ReplacedDeclInfo() : Mod(0), Offset(0), RawLoc(0) {}
+ ReplacedDeclInfo(ModuleFile *Mod, uint64_t Offset, unsigned RawLoc)
+ : Mod(Mod), Offset(Offset), RawLoc(RawLoc) {}
+ };
+
+ typedef llvm::DenseMap<serialization::DeclID, ReplacedDeclInfo>
DeclReplacementMap;
/// \brief Declarations that have been replaced in a later file in the chain.
DeclReplacementMap ReplacedDecls;
+ struct FileDeclsInfo {
+ ModuleFile *Mod;
+ ArrayRef<serialization::LocalDeclID> Decls;
+
+ FileDeclsInfo() : Mod(0) {}
+ FileDeclsInfo(ModuleFile *Mod, ArrayRef<serialization::LocalDeclID> Decls)
+ : Mod(Mod), Decls(Decls) {}
+ };
+
+ /// \brief Map from a FileID to the file-level declarations that it contains.
+ llvm::DenseMap<FileID, FileDeclsInfo> FileDeclIDs;
+
// Updates for visible decls can occur for other contexts than just the
// TU, and when we read those update records, the actual context will not
// be available yet (unless it's the TU), so have this pending map using the
// ID as a key. It will be realized when the context is actually loaded.
- typedef SmallVector<std::pair<void *, Module*>, 1> DeclContextVisibleUpdates;
+ typedef SmallVector<std::pair<void *, ModuleFile*>, 1> DeclContextVisibleUpdates;
typedef llvm::DenseMap<serialization::DeclID, DeclContextVisibleUpdates>
DeclContextVisibleUpdatesPending;
/// \brief Updates to the visible declarations of declaration contexts that
/// haven't been loaded yet.
DeclContextVisibleUpdatesPending PendingVisibleUpdates;
-
- typedef SmallVector<CXXRecordDecl *, 4> ForwardRefs;
- typedef llvm::DenseMap<const CXXRecordDecl *, ForwardRefs>
- PendingForwardRefsMap;
- /// \brief Forward references that have a definition but the definition decl
- /// is still initializing. When the definition gets read it will update
- /// the DefinitionData pointer of all pending references.
- PendingForwardRefsMap PendingForwardRefs;
-
- typedef llvm::DenseMap<serialization::DeclID, serialization::DeclID>
- FirstLatestDeclIDMap;
- /// \brief Map of first declarations from a chained PCH that point to the
- /// most recent declarations in another AST file.
- FirstLatestDeclIDMap FirstLatestDeclIDs;
-
- /// \brief Set of ObjC interfaces that have categories chained to them in
- /// other modules.
- llvm::DenseSet<serialization::GlobalDeclID> ObjCChainedCategoriesInterfaces;
-
+
+ /// \brief The set of C++ or Objective-C classes that have forward
+ /// declarations that have not yet been linked to their definitions.
+ llvm::SmallPtrSet<Decl *, 4> PendingDefinitions;
+
/// \brief Read the records that describe the contents of declcontexts.
- bool ReadDeclContextStorage(Module &M,
+ bool ReadDeclContextStorage(ModuleFile &M,
llvm::BitstreamCursor &Cursor,
const std::pair<uint64_t, uint64_t> &Offsets,
serialization::DeclContextInfo &Info);
@@ -340,14 +349,62 @@ private:
/// been loaded.
std::vector<IdentifierInfo *> IdentifiersLoaded;
- typedef ContinuousRangeMap<serialization::IdentID, Module *, 4>
+ typedef ContinuousRangeMap<serialization::IdentID, ModuleFile *, 4>
GlobalIdentifierMapType;
-
+
/// \brief Mapping from global identifer IDs to the module in which the
/// identifier resides along with the offset that should be added to the
/// global identifier ID to produce a local ID.
GlobalIdentifierMapType GlobalIdentifierMap;
+ /// \brief A vector containing submodules that have already been loaded.
+ ///
+ /// This vector is indexed by the Submodule ID (-1). NULL submodule entries
+ /// indicate that the particular submodule ID has not yet been loaded.
+ SmallVector<Module *, 2> SubmodulesLoaded;
+
+ typedef ContinuousRangeMap<serialization::SubmoduleID, ModuleFile *, 4>
+ GlobalSubmoduleMapType;
+
+ /// \brief Mapping from global submodule IDs to the module file in which the
+ /// submodule resides along with the offset that should be added to the
+ /// global submodule ID to produce a local ID.
+ GlobalSubmoduleMapType GlobalSubmoduleMap;
+
+ /// \brief A set of hidden declarations.
+ typedef llvm::SmallVector<llvm::PointerUnion<Decl *, IdentifierInfo *>, 2>
+ HiddenNames;
+
+ typedef llvm::DenseMap<Module *, HiddenNames> HiddenNamesMapType;
+
+ /// \brief A mapping from each of the hidden submodules to the deserialized
+ /// declarations in that submodule that could be made visible.
+ HiddenNamesMapType HiddenNamesMap;
+
+
+ /// \brief A module import or export that hasn't yet been resolved.
+ struct UnresolvedModuleImportExport {
+ /// \brief The file in which this module resides.
+ ModuleFile *File;
+
+ /// \brief The module that is importing or exporting.
+ Module *Mod;
+
+ /// \brief The local ID of the module that is being exported.
+ unsigned ID;
+
+ /// \brief Whether this is an import (vs. an export).
+ unsigned IsImport : 1;
+
+ /// \brief Whether this is a wildcard export.
+ unsigned IsWildcard : 1;
+ };
+
+ /// \brief The set of module imports and exports that still need to be
+ /// resolved.
+ llvm::SmallVector<UnresolvedModuleImportExport, 2>
+ UnresolvedModuleImportExports;
+
/// \brief A vector containing selectors that have already been loaded.
///
/// This vector is indexed by the Selector ID (-1). NULL selector
@@ -355,27 +412,31 @@ private:
/// been loaded.
SmallVector<Selector, 16> SelectorsLoaded;
- typedef ContinuousRangeMap<serialization::SelectorID, Module *, 4>
+ typedef ContinuousRangeMap<serialization::SelectorID, ModuleFile *, 4>
GlobalSelectorMapType;
-
+
/// \brief Mapping from global selector IDs to the module in which the
/// selector resides along with the offset that should be added to the
/// global selector ID to produce a local ID.
GlobalSelectorMapType GlobalSelectorMap;
+ /// \brief The generation number of the last time we loaded data from the
+ /// global method pool for this selector.
+ llvm::DenseMap<Selector, unsigned> SelectorGeneration;
+
/// \brief Mapping from identifiers that represent macros whose definitions
/// have not yet been deserialized to the global offset where the macro
/// record resides.
llvm::DenseMap<IdentifierInfo *, uint64_t> UnreadMacroRecordOffsets;
- typedef ContinuousRangeMap<unsigned, Module *, 4>
+ typedef ContinuousRangeMap<unsigned, ModuleFile *, 4>
GlobalPreprocessedEntityMapType;
-
+
/// \brief Mapping from global preprocessing entity IDs to the module in
/// which the preprocessed entity resides along with the offset that should be
/// added to the global preprocessing entitiy ID to produce a local ID.
GlobalPreprocessedEntityMapType GlobalPreprocessedEntityMap;
-
+
/// \name CodeGen-relevant special data
/// \brief Fields containing data that is relevant to CodeGen.
//@{
@@ -423,7 +484,7 @@ private:
/// \brief A list of all the delegating constructors we've seen, to diagnose
/// cycles.
SmallVector<uint64_t, 4> DelegatingCtorDecls;
-
+
/// \brief Method selectors used in a @selector expression. Used for
/// implementation of -Wselector.
SmallVector<uint64_t, 64> ReferencedSelectorsData;
@@ -480,6 +541,9 @@ private:
/// \brief A list of the namespaces we've seen.
SmallVector<uint64_t, 4> KnownNamespaces;
+ /// \brief A list of modules that were imported by precompiled headers or
+ /// any other non-module AST file.
+ SmallVector<serialization::SubmoduleID, 2> ImportedModules;
//@}
/// \brief The original file name that was used to build the primary AST file,
@@ -493,7 +557,7 @@ private:
/// \brief The file ID for the original file that was used to build the
/// primary AST file.
FileID OriginalFileID;
-
+
/// \brief The directory that the PCH was originally created in. Used to
/// allow resolving headers even after headers+PCH was moved to a new path.
std::string OriginalDir;
@@ -511,19 +575,23 @@ private:
/// \brief Whether to disable the normal validation performed on precompiled
/// headers when they are loaded.
bool DisableValidation;
-
+
/// \brief Whether to disable the use of stat caches in AST files.
bool DisableStatCache;
+ /// \brief Whether to accept an AST file with compiler errors.
+ bool AllowASTWithCompilerErrors;
+
+ /// \brief The current "generation" of the module file import stack, which
+ /// indicates how many separate module file load operations have occurred.
+ unsigned CurrentGeneration;
+
/// \brief Mapping from switch-case IDs in the chain to switch-case statements
///
/// Statements usually don't have IDs, but switch cases need them, so that the
/// switch statement can refer to them.
std::map<unsigned, SwitchCase *> SwitchCaseStmts;
- /// \brief Mapping from opaque value IDs to OpaqueValueExprs.
- std::map<unsigned, OpaqueValueExpr*> OpaqueValueExprs;
-
/// \brief The number of stat() calls that hit/missed the stat
/// cache.
unsigned NumStatHits, NumStatMisses;
@@ -567,13 +635,19 @@ private:
/// Number of visible decl contexts read/total.
unsigned NumVisibleDeclContextsRead, TotalVisibleDeclContexts;
-
+
/// Total size of modules, in bits, currently loaded
uint64_t TotalModulesSizeInBits;
/// \brief Number of Decl/types that are currently deserializing.
unsigned NumCurrentElementsDeserializing;
+ /// \brief Set true while we are in the process of passing deserialized
+ /// "interesting" decls to consumer inside FinishedDeserializing().
+ /// This is used as a guard to avoid recursively repeating the process of
+ /// passing decls to consumer.
+ bool PassingDeclsToConsumer;
+
/// Number of CXX base specifiers currently loaded
unsigned NumCXXBaseSpecifiersLoaded;
@@ -591,6 +665,10 @@ private:
/// loaded once the recursive loading has completed.
std::deque<PendingIdentifierInfo> PendingIdentifierInfos;
+ /// \brief The generation number of each identifier, which keeps track of
+ /// the last time we loaded information about this identifier.
+ llvm::DenseMap<IdentifierInfo *, unsigned> IdentifierGeneration;
+
/// \brief Contains declarations and definitions that will be
/// "interesting" to the ASTConsumer, when we get that AST consumer.
///
@@ -599,10 +677,58 @@ private:
/// Objective-C protocols.
std::deque<Decl *> InterestingDecls;
- /// \brief We delay loading of the previous declaration chain to avoid
- /// deeply nested calls when there are many redeclarations.
- std::deque<std::pair<Decl *, serialization::DeclID> > PendingPreviousDecls;
-
+ /// \brief The set of redeclarable declaraations that have been deserialized
+ /// since the last time the declaration chains were linked.
+ llvm::SmallPtrSet<Decl *, 16> RedeclsDeserialized;
+
+ /// \brief The list of redeclaration chains that still need to be
+ /// reconstructed.
+ ///
+ /// Each element is the global declaration ID of the first declaration in
+ /// the chain. Elements in this vector should be unique; use
+ /// PendingDeclChainsKnown to ensure uniqueness.
+ llvm::SmallVector<serialization::DeclID, 16> PendingDeclChains;
+
+ /// \brief Keeps track of the elements added to PendingDeclChains.
+ llvm::SmallSet<serialization::DeclID, 16> PendingDeclChainsKnown;
+
+ /// \brief The set of Objective-C categories that have been deserialized
+ /// since the last time the declaration chains were linked.
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> CategoriesDeserialized;
+
+ /// \brief The set of Objective-C class definitions that have already been
+ /// loaded, for which we will need to check for categories whenever a new
+ /// module is loaded.
+ llvm::SmallVector<ObjCInterfaceDecl *, 16> ObjCClassesLoaded;
+
+ typedef llvm::DenseMap<Decl *, llvm::SmallVector<serialization::DeclID, 2> >
+ MergedDeclsMap;
+
+ /// \brief A mapping from canonical declarations to the set of additional
+ /// (global, previously-canonical) declaration IDs that have been merged with
+ /// that canonical declaration.
+ MergedDeclsMap MergedDecls;
+
+ typedef llvm::DenseMap<serialization::GlobalDeclID,
+ llvm::SmallVector<serialization::DeclID, 2> >
+ StoredMergedDeclsMap;
+
+ /// \brief A mapping from canonical declaration IDs to the set of additional
+ /// declaration IDs that have been merged with that canonical declaration.
+ ///
+ /// This is the deserialized representation of the entries in MergedDecls.
+ /// When we query entries in MergedDecls, they will be augmented with entries
+ /// from StoredMergedDecls.
+ StoredMergedDeclsMap StoredMergedDecls;
+
+ /// \brief Combine the stored merged declarations for the given canonical
+ /// declaration into the set of merged declarations.
+ ///
+ /// \returns An iterator into MergedDecls that corresponds to the position of
+ /// the given canonical declaration.
+ MergedDeclsMap::iterator
+ combineStoredMergedDecls(Decl *Canon, serialization::GlobalDeclID CanonID);
+
/// \brief Ready to load the previous declaration of the given Decl.
void loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID);
@@ -614,7 +740,7 @@ private:
Read_Decl, Read_Type, Read_Stmt
};
- /// \brief What kind of records we are reading.
+ /// \brief What kind of records we are reading.
ReadingKind ReadingKind;
/// \brief RAII object to change the reading kind.
@@ -649,7 +775,7 @@ private:
std::string SuggestedPredefines;
/// \brief Reads a statement from the specified cursor.
- Stmt *ReadStmtFromStream(Module &F);
+ Stmt *ReadStmtFromStream(ModuleFile &F);
/// \brief Get a FileEntry out of stored-in-PCH filename, making sure we take
/// into account all the necessary relocations.
@@ -658,20 +784,21 @@ private:
void MaybeAddSystemRootToFilename(std::string &Filename);
ASTReadResult ReadASTCore(StringRef FileName, ModuleKind Type,
- Module *ImportedBy);
- ASTReadResult ReadASTBlock(Module &F);
+ ModuleFile *ImportedBy);
+ ASTReadResult ReadASTBlock(ModuleFile &F);
bool CheckPredefinesBuffers();
- bool ParseLineTable(Module &F, SmallVectorImpl<uint64_t> &Record);
- ASTReadResult ReadSourceManagerBlock(Module &F);
+ bool ParseLineTable(ModuleFile &F, SmallVectorImpl<uint64_t> &Record);
+ ASTReadResult ReadSourceManagerBlock(ModuleFile &F);
ASTReadResult ReadSLocEntryRecord(int ID);
llvm::BitstreamCursor &SLocCursorForID(int ID);
- SourceLocation getImportLocation(Module *F);
+ SourceLocation getImportLocation(ModuleFile *F);
+ ASTReadResult ReadSubmoduleBlock(ModuleFile &F);
bool ParseLanguageOptions(const SmallVectorImpl<uint64_t> &Record);
-
+
struct RecordLocation {
- RecordLocation(Module *M, uint64_t O)
+ RecordLocation(ModuleFile *M, uint64_t O)
: F(M), Offset(O) {}
- Module *F;
+ ModuleFile *F;
uint64_t Offset;
};
@@ -679,19 +806,22 @@ private:
RecordLocation TypeCursorForIndex(unsigned Index);
void LoadedDecl(unsigned Index, Decl *D);
Decl *ReadDeclRecord(serialization::DeclID ID);
- RecordLocation DeclCursorForID(serialization::DeclID ID);
+ RecordLocation DeclCursorForID(serialization::DeclID ID,
+ unsigned &RawLocation);
void loadDeclUpdateRecords(serialization::DeclID ID, Decl *D);
- void loadObjCChainedCategories(serialization::GlobalDeclID ID,
- ObjCInterfaceDecl *D);
-
+ void loadPendingDeclChain(serialization::GlobalDeclID ID);
+ void loadObjCCategories(serialization::GlobalDeclID ID, ObjCInterfaceDecl *D,
+ unsigned PreviousGeneration = 0);
+
RecordLocation getLocalBitOffset(uint64_t GlobalOffset);
- uint64_t getGlobalBitOffset(Module &M, uint32_t LocalOffset);
+ uint64_t getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset);
/// \brief Returns the first preprocessed entity ID that ends after \arg BLoc.
serialization::PreprocessedEntityID
findBeginPreprocessedEntity(SourceLocation BLoc) const;
- /// \brief Returns the first preprocessed entity ID that begins after \arg ELoc.
+ /// \brief Returns the first preprocessed entity ID that begins after \arg
+ /// ELoc.
serialization::PreprocessedEntityID
findEndPreprocessedEntity(SourceLocation ELoc) const;
@@ -703,7 +833,15 @@ private:
findNextPreprocessedEntity(
GlobalSLocOffsetMapType::const_iterator SLocMapI) const;
+ /// \brief Returns (ModuleFile, Local index) pair for \arg GlobalIndex of a
+ /// preprocessed entity.
+ std::pair<ModuleFile *, unsigned>
+ getModulePreprocessedEntity(unsigned GlobalIndex);
+
void PassInterestingDeclsToConsumer();
+ void PassInterestingDeclToConsumer(Decl *D);
+
+ void finishPendingActions();
/// \brief Produce an error diagnostic and return true.
///
@@ -740,20 +878,38 @@ public:
/// help when an AST file is being used in cases where the
/// underlying files in the file system may have changed, but
/// parsing should still continue.
+ ///
+ /// \param AllowASTWithCompilerErrors If true, the AST reader will accept an
+ /// AST file the was created out of an AST with compiler errors,
+ /// otherwise it will reject it.
ASTReader(Preprocessor &PP, ASTContext &Context, StringRef isysroot = "",
- bool DisableValidation = false, bool DisableStatCache = false);
+ bool DisableValidation = false, bool DisableStatCache = false,
+ bool AllowASTWithCompilerErrors = false);
~ASTReader();
SourceManager &getSourceManager() const { return SourceMgr; }
-
+
/// \brief Load the AST file designated by the given file name.
ASTReadResult ReadAST(const std::string &FileName, ModuleKind Type);
/// \brief Checks that no file that is stored in PCH is out-of-sync with
/// the actual file in the file system.
- ASTReadResult validateFileEntries(Module &M);
+ ASTReadResult validateFileEntries(ModuleFile &M);
+ /// \brief Make the entities in the given module and any of its (non-explicit)
+ /// submodules visible to name lookup.
+ ///
+ /// \param Mod The module whose names should be made visible.
+ ///
+ /// \param Visibility The level of visibility to give the names in the module.
+ /// Visibility can only be increased over time.
+ void makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind NameVisibility);
+
+ /// \brief Make the names within this set of hidden names visible.
+ void makeNamesVisible(const HiddenNames &Names);
+
/// \brief Set the AST callbacks listener.
void setListener(ASTReaderListener *listener) {
Listener.reset(listener);
@@ -770,12 +926,19 @@ public:
ModuleMgr.addInMemoryBuffer(FileName, Buffer);
}
+ /// \brief Finalizes the AST reader's state before writing an AST file to
+ /// disk.
+ ///
+ /// This operation may undo temporary state in the AST that should not be
+ /// emitted.
+ void finalizeForWriting();
+
/// \brief Retrieve the module manager.
ModuleManager &getModuleManager() { return ModuleMgr; }
/// \brief Retrieve the preprocessor.
Preprocessor &getPreprocessor() const { return PP; }
-
+
/// \brief Retrieve the name of the original source file name
const std::string &getOriginalSourceFile() { return OriginalFileName; }
@@ -801,6 +964,11 @@ public:
virtual std::pair<unsigned, unsigned>
findPreprocessedEntitiesInRange(SourceRange Range);
+ /// \brief Optionally returns true or false if the preallocated preprocessed
+ /// entity with index \arg Index came from file \arg FID.
+ virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID);
+
/// \brief Read the header file information for the given file entry.
virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE);
@@ -826,6 +994,11 @@ public:
return static_cast<unsigned>(DeclsLoaded.size());
}
+ /// \brief Returns the number of submodules known.
+ unsigned getTotalNumSubmodules() const {
+ return static_cast<unsigned>(SubmodulesLoaded.size());
+ }
+
/// \brief Returns the number of selectors found in the chain.
unsigned getTotalNumSelectors() const {
return static_cast<unsigned>(SelectorsLoaded.size());
@@ -839,28 +1012,28 @@ public:
E = ModuleMgr.end(); I != E; ++I) {
Result += (*I)->NumPreprocessedEntities;
}
-
+
return Result;
}
-
+
/// \brief Returns the number of C++ base specifiers found in the chain.
unsigned getTotalNumCXXBaseSpecifiers() const {
return NumCXXBaseSpecifiersLoaded;
}
-
+
/// \brief Reads a TemplateArgumentLocInfo appropriate for the
/// given TemplateArgument kind.
TemplateArgumentLocInfo
- GetTemplateArgumentLocInfo(Module &F, TemplateArgument::ArgKind Kind,
+ GetTemplateArgumentLocInfo(ModuleFile &F, TemplateArgument::ArgKind Kind,
const RecordData &Record, unsigned &Idx);
/// \brief Reads a TemplateArgumentLoc.
TemplateArgumentLoc
- ReadTemplateArgumentLoc(Module &F,
+ ReadTemplateArgumentLoc(ModuleFile &F,
const RecordData &Record, unsigned &Idx);
/// \brief Reads a declarator info from the given record.
- TypeSourceInfo *GetTypeSourceInfo(Module &F,
+ TypeSourceInfo *GetTypeSourceInfo(ModuleFile &F,
const RecordData &Record, unsigned &Idx);
/// \brief Resolve a type ID into a type, potentially building a new
@@ -868,35 +1041,42 @@ public:
QualType GetType(serialization::TypeID ID);
/// \brief Resolve a local type ID within a given AST file into a type.
- QualType getLocalType(Module &F, unsigned LocalID);
-
+ QualType getLocalType(ModuleFile &F, unsigned LocalID);
+
/// \brief Map a local type ID within a given AST file into a global type ID.
- serialization::TypeID getGlobalTypeID(Module &F, unsigned LocalID) const;
-
- /// \brief Read a type from the current position in the given record, which
+ serialization::TypeID getGlobalTypeID(ModuleFile &F, unsigned LocalID) const;
+
+ /// \brief Read a type from the current position in the given record, which
/// was read from the given AST file.
- QualType readType(Module &F, const RecordData &Record, unsigned &Idx) {
+ QualType readType(ModuleFile &F, const RecordData &Record, unsigned &Idx) {
if (Idx >= Record.size())
return QualType();
-
+
return getLocalType(F, Record[Idx++]);
}
-
- /// \brief Map from a local declaration ID within a given module to a
+
+ /// \brief Map from a local declaration ID within a given module to a
/// global declaration ID.
- serialization::DeclID getGlobalDeclID(Module &F, unsigned LocalID) const;
+ serialization::DeclID getGlobalDeclID(ModuleFile &F, unsigned LocalID) const;
/// \brief Returns true if global DeclID \arg ID originated from module
/// \arg M.
- bool isDeclIDFromModule(serialization::GlobalDeclID ID, Module &M) const;
+ bool isDeclIDFromModule(serialization::GlobalDeclID ID, ModuleFile &M) const;
+
+ /// \brief Retrieve the module file that owns the given declaration, or NULL
+ /// if the declaration is not from a module file.
+ ModuleFile *getOwningModuleFile(Decl *D);
+ /// \brief Returns the source location for the decl \arg ID.
+ SourceLocation getSourceLocationForDeclID(serialization::GlobalDeclID ID);
+
/// \brief Resolve a declaration ID into a declaration, potentially
/// building a new declaration.
Decl *GetDecl(serialization::DeclID ID);
virtual Decl *GetExternalDecl(uint32_t ID);
/// \brief Reads a declaration with the given local ID in the given module.
- Decl *GetLocalDecl(Module &F, uint32_t LocalID) {
+ Decl *GetLocalDecl(ModuleFile &F, uint32_t LocalID) {
return GetDecl(getGlobalDeclID(F, LocalID));
}
@@ -904,40 +1084,49 @@ public:
///
/// \returns The requested declaration, casted to the given return type.
template<typename T>
- T *GetLocalDeclAs(Module &F, uint32_t LocalID) {
+ T *GetLocalDeclAs(ModuleFile &F, uint32_t LocalID) {
return cast_or_null<T>(GetLocalDecl(F, LocalID));
}
- /// \brief Reads a declaration ID from the given position in a record in the
+ /// \brief Map a global declaration ID into the declaration ID used to
+ /// refer to this declaration within the given module fule.
+ ///
+ /// \returns the global ID of the given declaration as known in the given
+ /// module file.
+ serialization::DeclID
+ mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ serialization::DeclID GlobalID);
+
+ /// \brief Reads a declaration ID from the given position in a record in the
/// given module.
///
/// \returns The declaration ID read from the record, adjusted to a global ID.
- serialization::DeclID ReadDeclID(Module &F, const RecordData &Record,
+ serialization::DeclID ReadDeclID(ModuleFile &F, const RecordData &Record,
unsigned &Idx);
-
+
/// \brief Reads a declaration from the given position in a record in the
/// given module.
- Decl *ReadDecl(Module &F, const RecordData &R, unsigned &I) {
+ Decl *ReadDecl(ModuleFile &F, const RecordData &R, unsigned &I) {
return GetDecl(ReadDeclID(F, R, I));
}
-
+
/// \brief Reads a declaration from the given position in a record in the
/// given module.
///
/// \returns The declaration read from this location, casted to the given
/// result type.
template<typename T>
- T *ReadDeclAs(Module &F, const RecordData &R, unsigned &I) {
+ T *ReadDeclAs(ModuleFile &F, const RecordData &R, unsigned &I) {
return cast_or_null<T>(GetDecl(ReadDeclID(F, R, I)));
}
/// \brief Read a CXXBaseSpecifiers ID form the given record and
/// return its global bit offset.
- uint64_t readCXXBaseSpecifiers(Module &M, const RecordData &Record,
+ uint64_t readCXXBaseSpecifiers(ModuleFile &M, const RecordData &Record,
unsigned &Idx);
-
+
virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
-
+
/// \brief Resolve the offset of a statement into a statement.
///
/// This operation will read a new statement from the external
@@ -974,6 +1163,12 @@ public:
bool (*isKindWeWant)(Decl::Kind),
SmallVectorImpl<Decl*> &Decls);
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
+ SmallVectorImpl<Decl *> &Decls);
+
/// \brief Notify ASTReader that we started deserialization of
/// a decl or type so until FinishedDeserializing is called there may be
/// decls that are initializing. Must be paired with FinishedDeserializing.
@@ -995,7 +1190,7 @@ public:
/// \brief Dump information about the AST reader to standard error.
void dump();
-
+
/// Return the amount of memory used by memory buffers, breaking down
/// by heap-backed versus mmap'ed memory.
virtual void getMemoryBufferSizes(MemoryBufferSizes &sizes) const;
@@ -1025,11 +1220,7 @@ public:
/// \brief Load the contents of the global method pool for a given
/// selector.
- ///
- /// \returns a pair of Objective-C methods lists containing the
- /// instance and factory methods, respectively, with this selector.
- virtual std::pair<ObjCMethodList, ObjCMethodList>
- ReadMethodPool(Selector Sel);
+ virtual void ReadMethodPool(Selector Sel);
/// \brief Load the set of namespaces that are known to the external source,
/// which will be used during typo correction.
@@ -1051,7 +1242,7 @@ public:
virtual void ReadLocallyScopedExternalDecls(
SmallVectorImpl<NamedDecl *> &Decls);
-
+
virtual void ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels);
@@ -1061,7 +1252,7 @@ public:
virtual void ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables);
virtual void ReadPendingInstantiations(
- SmallVectorImpl<std::pair<ValueDecl *,
+ SmallVectorImpl<std::pair<ValueDecl *,
SourceLocation> > &Pending);
/// \brief Load a selector from disk, registering its ID if it exists.
@@ -1080,7 +1271,7 @@ public:
IdentifierInfo *DecodeIdentifierInfo(serialization::IdentifierID ID);
- IdentifierInfo *GetIdentifierInfo(Module &M, const RecordData &Record,
+ IdentifierInfo *GetIdentifierInfo(ModuleFile &M, const RecordData &Record,
unsigned &Idx) {
return DecodeIdentifierInfo(getGlobalIdentifierID(M, Record[Idx++]));
}
@@ -1089,101 +1280,110 @@ public:
return DecodeIdentifierInfo(ID);
}
- IdentifierInfo *getLocalIdentifier(Module &M, unsigned LocalID);
-
- serialization::IdentifierID getGlobalIdentifierID(Module &M,
+ IdentifierInfo *getLocalIdentifier(ModuleFile &M, unsigned LocalID);
+
+ serialization::IdentifierID getGlobalIdentifierID(ModuleFile &M,
unsigned LocalID);
-
+
/// \brief Read the source location entry with index ID.
virtual bool ReadSLocEntry(int ID);
+ /// \brief Retrieve the global submodule ID given a module and its local ID
+ /// number.
+ serialization::SubmoduleID
+ getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID);
+
+ /// \brief Retrieve the submodule that corresponds to a global submodule ID.
+ ///
+ Module *getSubmodule(serialization::SubmoduleID GlobalID);
+
/// \brief Retrieve a selector from the given module with its local ID
/// number.
- Selector getLocalSelector(Module &M, unsigned LocalID);
+ Selector getLocalSelector(ModuleFile &M, unsigned LocalID);
Selector DecodeSelector(serialization::SelectorID Idx);
virtual Selector GetExternalSelector(serialization::SelectorID ID);
uint32_t GetNumExternalSelectors();
- Selector ReadSelector(Module &M, const RecordData &Record, unsigned &Idx) {
+ Selector ReadSelector(ModuleFile &M, const RecordData &Record, unsigned &Idx) {
return getLocalSelector(M, Record[Idx++]);
}
-
+
/// \brief Retrieve the global selector ID that corresponds to this
/// the local selector ID in a given module.
- serialization::SelectorID getGlobalSelectorID(Module &F,
+ serialization::SelectorID getGlobalSelectorID(ModuleFile &F,
unsigned LocalID) const;
/// \brief Read a declaration name.
- DeclarationName ReadDeclarationName(Module &F,
+ DeclarationName ReadDeclarationName(ModuleFile &F,
const RecordData &Record, unsigned &Idx);
- void ReadDeclarationNameLoc(Module &F,
+ void ReadDeclarationNameLoc(ModuleFile &F,
DeclarationNameLoc &DNLoc, DeclarationName Name,
const RecordData &Record, unsigned &Idx);
- void ReadDeclarationNameInfo(Module &F, DeclarationNameInfo &NameInfo,
+ void ReadDeclarationNameInfo(ModuleFile &F, DeclarationNameInfo &NameInfo,
const RecordData &Record, unsigned &Idx);
- void ReadQualifierInfo(Module &F, QualifierInfo &Info,
+ void ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
const RecordData &Record, unsigned &Idx);
- NestedNameSpecifier *ReadNestedNameSpecifier(Module &F,
+ NestedNameSpecifier *ReadNestedNameSpecifier(ModuleFile &F,
const RecordData &Record,
unsigned &Idx);
- NestedNameSpecifierLoc ReadNestedNameSpecifierLoc(Module &F,
+ NestedNameSpecifierLoc ReadNestedNameSpecifierLoc(ModuleFile &F,
const RecordData &Record,
unsigned &Idx);
/// \brief Read a template name.
- TemplateName ReadTemplateName(Module &F, const RecordData &Record,
+ TemplateName ReadTemplateName(ModuleFile &F, const RecordData &Record,
unsigned &Idx);
/// \brief Read a template argument.
- TemplateArgument ReadTemplateArgument(Module &F,
+ TemplateArgument ReadTemplateArgument(ModuleFile &F,
const RecordData &Record,unsigned &Idx);
-
+
/// \brief Read a template parameter list.
- TemplateParameterList *ReadTemplateParameterList(Module &F,
+ TemplateParameterList *ReadTemplateParameterList(ModuleFile &F,
const RecordData &Record,
unsigned &Idx);
-
+
/// \brief Read a template argument array.
void
ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
- Module &F, const RecordData &Record,
+ ModuleFile &F, const RecordData &Record,
unsigned &Idx);
/// \brief Read a UnresolvedSet structure.
- void ReadUnresolvedSet(Module &F, UnresolvedSetImpl &Set,
+ void ReadUnresolvedSet(ModuleFile &F, UnresolvedSetImpl &Set,
const RecordData &Record, unsigned &Idx);
/// \brief Read a C++ base specifier.
- CXXBaseSpecifier ReadCXXBaseSpecifier(Module &F,
+ CXXBaseSpecifier ReadCXXBaseSpecifier(ModuleFile &F,
const RecordData &Record,unsigned &Idx);
/// \brief Read a CXXCtorInitializer array.
std::pair<CXXCtorInitializer **, unsigned>
- ReadCXXCtorInitializers(Module &F, const RecordData &Record,
+ ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
unsigned &Idx);
/// \brief Read a source location from raw form.
- SourceLocation ReadSourceLocation(Module &Module, unsigned Raw) const {
+ SourceLocation ReadSourceLocation(ModuleFile &ModuleFile, unsigned Raw) const {
SourceLocation Loc = SourceLocation::getFromRawEncoding(Raw);
- assert(Module.SLocRemap.find(Loc.getOffset()) != Module.SLocRemap.end() &&
+ assert(ModuleFile.SLocRemap.find(Loc.getOffset()) != ModuleFile.SLocRemap.end() &&
"Cannot find offset to remap.");
- int Remap = Module.SLocRemap.find(Loc.getOffset())->second;
+ int Remap = ModuleFile.SLocRemap.find(Loc.getOffset())->second;
return Loc.getLocWithOffset(Remap);
}
/// \brief Read a source location.
- SourceLocation ReadSourceLocation(Module &Module,
+ SourceLocation ReadSourceLocation(ModuleFile &ModuleFile,
const RecordData &Record, unsigned& Idx) {
- return ReadSourceLocation(Module, Record[Idx++]);
+ return ReadSourceLocation(ModuleFile, Record[Idx++]);
}
/// \brief Read a source range.
- SourceRange ReadSourceRange(Module &F,
+ SourceRange ReadSourceRange(ModuleFile &F,
const RecordData &Record, unsigned& Idx);
/// \brief Read an integral value
@@ -1201,18 +1401,18 @@ public:
/// \brief Read a version tuple.
VersionTuple ReadVersionTuple(const RecordData &Record, unsigned &Idx);
- CXXTemporary *ReadCXXTemporary(Module &F, const RecordData &Record,
+ CXXTemporary *ReadCXXTemporary(ModuleFile &F, const RecordData &Record,
unsigned &Idx);
-
+
/// \brief Reads attributes from the current stream position.
- void ReadAttributes(Module &F, AttrVec &Attrs,
+ void ReadAttributes(ModuleFile &F, AttrVec &Attrs,
const RecordData &Record, unsigned &Idx);
/// \brief Reads a statement.
- Stmt *ReadStmt(Module &F);
+ Stmt *ReadStmt(ModuleFile &F);
/// \brief Reads an expression.
- Expr *ReadExpr(Module &F);
+ Expr *ReadExpr(ModuleFile &F);
/// \brief Reads a sub-statement operand during statement reading.
Stmt *ReadSubStmt() {
@@ -1228,29 +1428,47 @@ public:
Expr *ReadSubExpr();
/// \brief Reads the macro record located at the given offset.
- void ReadMacroRecord(Module &F, uint64_t Offset);
-
+ void ReadMacroRecord(ModuleFile &F, uint64_t Offset);
+
/// \brief Determine the global preprocessed entity ID that corresponds to
/// the given local ID within the given module.
- serialization::PreprocessedEntityID
- getGlobalPreprocessedEntityID(Module &M, unsigned LocalID) const;
-
+ serialization::PreprocessedEntityID
+ getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const;
+
/// \brief Note that the identifier is a macro whose record will be loaded
/// from the given AST file at the given (file-local) offset.
- void SetIdentifierIsMacro(IdentifierInfo *II, Module &F,
- uint64_t Offset);
-
+ ///
+ /// \param II The name of the macro.
+ ///
+ /// \param F The module file from which the macro definition was deserialized.
+ ///
+ /// \param Offset The offset into the module file at which the macro
+ /// definition is located.
+ ///
+ /// \param Visible Whether the macro should be made visible.
+ void setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
+ uint64_t Offset, bool Visible);
+
/// \brief Read the set of macros defined by this external macro source.
virtual void ReadDefinedMacros();
/// \brief Read the macro definition for this identifier.
virtual void LoadMacroDefinition(IdentifierInfo *II);
+ /// \brief Update an out-of-date identifier.
+ virtual void updateOutOfDateIdentifier(IdentifierInfo &II);
+
+ /// \brief Note that this identifier is up-to-date.
+ void markIdentifierUpToDate(IdentifierInfo *II);
+
/// \brief Read the macro definition corresponding to this iterator
/// into the unread macro record offsets table.
void LoadMacroDefinition(
llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos);
-
+
+ /// \brief Load all external visible decls in the given DeclContext.
+ void completeVisibleDeclsMap(DeclContext *DC);
+
/// \brief Retrieve the AST context that this AST reader supplements.
ASTContext &getContext() { return Context; }
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
index 7a49e48..4c62385 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
@@ -24,6 +24,8 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include <map>
#include <queue>
@@ -43,11 +45,13 @@ class CXXBaseSpecifier;
class CXXCtorInitializer;
class FPOptions;
class HeaderSearch;
+class IdentifierResolver;
class MacroDefinition;
class MemorizeStatCalls;
class OpaqueValueExpr;
class OpenCLOptions;
class ASTReader;
+class Module;
class PreprocessedEntity;
class PreprocessingRecord;
class Preprocessor;
@@ -57,6 +61,8 @@ class SwitchCase;
class TargetInfo;
class VersionTuple;
+namespace SrcMgr { class SLocEntry; }
+
/// \brief Writes an AST file containing the contents of a translation unit.
///
/// The ASTWriter class produces a bitstream containing the serialized
@@ -80,7 +86,7 @@ private:
/// allow for the const/volatile qualifiers.
///
/// Keys in the map never have const/volatile qualifiers.
- typedef llvm::DenseMap<QualType, serialization::TypeIdx,
+ typedef llvm::DenseMap<QualType, serialization::TypeIdx,
serialization::UnsafeQualTypeDenseMapInfo>
TypeIdxMap;
@@ -89,33 +95,42 @@ private:
/// \brief The ASTContext we're writing.
ASTContext *Context;
-
+
+ /// \brief The preprocessor we're writing.
+ Preprocessor *PP;
+
/// \brief The reader of existing AST files, if we're chaining.
ASTReader *Chain;
-
- /// \brief Indicates when the AST writing is actively performing
+
+ /// \brief The module we're currently writing, if any.
+ Module *WritingModule;
+
+ /// \brief Indicates when the AST writing is actively performing
/// serialization, rather than just queueing updates.
bool WritingAST;
-
+
+ /// \brief Indicates that the AST contained compiler errors.
+ bool ASTHasCompilerErrors;
+
/// \brief Stores a declaration or a type to be written to the AST file.
class DeclOrType {
public:
DeclOrType(Decl *D) : Stored(D), IsType(false) { }
DeclOrType(QualType T) : Stored(T.getAsOpaquePtr()), IsType(true) { }
-
+
bool isType() const { return IsType; }
bool isDecl() const { return !IsType; }
-
+
QualType getType() const {
assert(isType() && "Not a type!");
return QualType::getFromOpaquePtr(Stored);
}
-
+
Decl *getDecl() const {
assert(isDecl() && "Not a decl!");
return static_cast<Decl *>(Stored);
}
-
+
private:
void *Stored;
bool IsType;
@@ -140,7 +155,25 @@ private:
/// \brief Offset of each declaration in the bitstream, indexed by
/// the declaration's ID.
- std::vector<uint32_t> DeclOffsets;
+ std::vector<serialization::DeclOffset> DeclOffsets;
+
+ /// \brief Sorted (by file offset) vector of pairs of file offset/DeclID.
+ typedef SmallVector<std::pair<unsigned, serialization::DeclID>, 64>
+ LocDeclIDsTy;
+ struct DeclIDInFileInfo {
+ LocDeclIDsTy DeclIDs;
+ /// \brief Set when the DeclIDs vectors from all files are joined, this
+ /// indicates the index that this particular vector has in the global one.
+ unsigned FirstDeclIndex;
+ };
+ typedef llvm::DenseMap<const SrcMgr::SLocEntry *,
+ DeclIDInFileInfo *> FileDeclIDsTy;
+
+ /// \brief Map from file SLocEntries to info about the file-level declarations
+ /// that it contains.
+ FileDeclIDsTy FileDeclIDs;
+
+ void associateDeclWithFile(const Decl *D, serialization::DeclID);
/// \brief The first ID number we can use for our own types.
serialization::TypeID FirstTypeID;
@@ -177,14 +210,32 @@ private:
/// IdentifierInfo.
llvm::DenseMap<const IdentifierInfo *, serialization::IdentID> IdentifierIDs;
+ /// @name FlushStmt Caches
+ /// @{
+
+ /// \brief Set of parent Stmts for the currently serializing sub stmt.
+ llvm::DenseSet<Stmt *> ParentStmts;
+
+ /// \brief Offsets of sub stmts already serialized. The offset points
+ /// just after the stmt record.
+ llvm::DenseMap<Stmt *, uint64_t> SubStmtEntries;
+
+ /// @}
+
/// \brief Offsets of each of the identifier IDs into the identifier
/// table.
std::vector<uint32_t> IdentifierOffsets;
+ /// \brief The first ID number we can use for our own submodules.
+ serialization::SubmoduleID FirstSubmoduleID;
+
+ /// \brief The submodule ID that will be assigned to the next new submodule.
+ serialization::SubmoduleID NextSubmoduleID;
+
/// \brief The first ID number we can use for our own selectors.
serialization::SelectorID FirstSelectorID;
- /// \brief The selector ID that will be assigned to the next new identifier.
+ /// \brief The selector ID that will be assigned to the next new selector.
serialization::SelectorID NextSelectorID;
/// \brief Map that provides the ID numbers of each Selector.
@@ -193,7 +244,7 @@ private:
/// \brief Offset of each selector within the method pool/selector
/// table, indexed by the Selector ID (-1).
std::vector<uint32_t> SelectorOffsets;
-
+
/// \brief Offsets of each of the macro identifiers into the
/// bitstream.
///
@@ -204,7 +255,7 @@ private:
/// \brief The set of identifiers that had macro definitions at some point.
std::vector<const IdentifierInfo *> DeserializedMacroNames;
-
+
/// \brief Mapping from macro definitions (as they occur in the preprocessing
/// record) to the macro IDs.
llvm::DenseMap<const MacroDefinition *, serialization::PreprocessedEntityID>
@@ -220,7 +271,7 @@ private:
/// \brief Map of first declarations from a chained PCH that point to the
/// most recent declarations in another PCH.
FirstLatestDeclMap FirstLatestDecls;
-
+
/// \brief Declarations encountered that might be external
/// definitions.
///
@@ -238,31 +289,30 @@ private:
/// \brief DeclContexts that have received extensions since their serialized
/// form.
///
- /// For namespaces, when we're chaining and encountering a namespace, we check if
- /// its primary namespace comes from the chain. If it does, we add the primary
- /// to this set, so that we can write out lexical content updates for it.
+ /// For namespaces, when we're chaining and encountering a namespace, we check
+ /// if its primary namespace comes from the chain. If it does, we add the
+ /// primary to this set, so that we can write out lexical content updates for
+ /// it.
llvm::SmallPtrSet<const DeclContext *, 16> UpdatedDeclContexts;
typedef llvm::SmallPtrSet<const Decl *, 16> DeclsToRewriteTy;
/// \brief Decls that will be replaced in the current dependent AST file.
DeclsToRewriteTy DeclsToRewrite;
- struct ChainedObjCCategoriesData {
- /// \brief The interface in the imported module.
- const ObjCInterfaceDecl *Interface;
- /// \brief The local tail category ID that got chained to the imported
- /// interface.
- const ObjCCategoryDecl *TailCategory;
-
- /// \brief ID corresponding to \c Interface.
- serialization::DeclID InterfaceID;
-
- /// \brief ID corresponding to TailCategoryID.
- serialization::DeclID TailCategoryID;
+ /// \brief The set of Objective-C class that have categories we
+ /// should serialize.
+ llvm::SetVector<ObjCInterfaceDecl *> ObjCClassesWithCategories;
+
+ struct ReplacedDeclInfo {
+ serialization::DeclID ID;
+ uint64_t Offset;
+ unsigned Loc;
+
+ ReplacedDeclInfo() : ID(0), Offset(0), Loc(0) {}
+ ReplacedDeclInfo(serialization::DeclID ID, uint64_t Offset,
+ SourceLocation Loc)
+ : ID(ID), Offset(Offset), Loc(Loc.getRawEncoding()) {}
};
- /// \brief ObjC categories that got chained to an interface imported from
- /// another module.
- SmallVector<ChainedObjCCategoriesData, 16> LocalChainedObjCCategories;
/// \brief Decls that have been replaced in the current dependent AST file.
///
@@ -270,23 +320,24 @@ private:
/// happen, but the ObjC AST nodes are designed this way), it will be
/// serialized again. In this case, it is registered here, so that the reader
/// knows to read the updated version.
- SmallVector<std::pair<serialization::DeclID, uint64_t>, 16>
- ReplacedDecls;
-
+ SmallVector<ReplacedDeclInfo, 16> ReplacedDecls;
+
+ /// \brief The set of declarations that may have redeclaration chains that
+ /// need to be serialized.
+ llvm::SetVector<Decl *, llvm::SmallVector<Decl *, 4>,
+ llvm::SmallPtrSet<Decl *, 4> > Redeclarations;
+
/// \brief Statements that we've encountered while serializing a
/// declaration or type.
SmallVector<Stmt *, 16> StmtsToEmit;
/// \brief Statements collection to use for ASTWriter::AddStmt().
- /// It will point to StmtsToEmit unless it is overriden.
+ /// It will point to StmtsToEmit unless it is overriden.
SmallVector<Stmt *, 16> *CollectedStmts;
/// \brief Mapping from SwitchCase statements to IDs.
std::map<SwitchCase *, unsigned> SwitchCaseIDs;
- /// \brief Mapping from OpaqueValueExpr expressions to IDs.
- llvm::DenseMap<OpaqueValueExpr *, unsigned> OpaqueValues;
-
/// \brief The number of statements written to the AST file.
unsigned NumStatements;
@@ -303,35 +354,44 @@ private:
/// \brief The offset of each CXXBaseSpecifier set within the AST.
SmallVector<uint32_t, 4> CXXBaseSpecifiersOffsets;
-
+
/// \brief The first ID number we can use for our own base specifiers.
serialization::CXXBaseSpecifiersID FirstCXXBaseSpecifiersID;
-
- /// \brief The base specifiers ID that will be assigned to the next new
+
+ /// \brief The base specifiers ID that will be assigned to the next new
/// set of C++ base specifiers.
serialization::CXXBaseSpecifiersID NextCXXBaseSpecifiersID;
- /// \brief A set of C++ base specifiers that is queued to be written into the
- /// AST file.
+ /// \brief A set of C++ base specifiers that is queued to be written into the
+ /// AST file.
struct QueuedCXXBaseSpecifiers {
QueuedCXXBaseSpecifiers() : ID(), Bases(), BasesEnd() { }
-
+
QueuedCXXBaseSpecifiers(serialization::CXXBaseSpecifiersID ID,
CXXBaseSpecifier const *Bases,
CXXBaseSpecifier const *BasesEnd)
: ID(ID), Bases(Bases), BasesEnd(BasesEnd) { }
-
+
serialization::CXXBaseSpecifiersID ID;
CXXBaseSpecifier const * Bases;
CXXBaseSpecifier const * BasesEnd;
};
-
+
/// \brief Queue of C++ base specifiers to be written to the AST file,
/// in the order they should be written.
SmallVector<QueuedCXXBaseSpecifiers, 2> CXXBaseSpecifiersToWrite;
+
+ /// \brief A mapping from each known submodule to its ID number, which will
+ /// be a positive integer.
+ llvm::DenseMap<Module *, unsigned> SubmoduleIDs;
+
+ /// \brief Retrieve or create a submodule ID for this module.
+ unsigned getSubmoduleID(Module *Mod);
/// \brief Write the given subexpression to the bitstream.
- void WriteSubStmt(Stmt *S);
+ void WriteSubStmt(Stmt *S,
+ llvm::DenseMap<Stmt *, uint64_t> &SubStmtEntries,
+ llvm::DenseSet<Stmt *> &ParentStmts);
void WriteBlockInfoBlock();
void WriteMetadata(ASTContext &Context, StringRef isysroot,
@@ -342,27 +402,32 @@ private:
const Preprocessor &PP,
StringRef isysroot);
void WritePreprocessor(const Preprocessor &PP, bool IsModule);
- void WriteHeaderSearch(HeaderSearch &HS, StringRef isysroot);
+ void WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot);
void WritePreprocessorDetail(PreprocessingRecord &PPRec);
+ void WriteSubmodules(Module *WritingModule);
+
void WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag);
void WriteCXXBaseSpecifiersOffsets();
void WriteType(QualType T);
uint64_t WriteDeclContextLexicalBlock(ASTContext &Context, DeclContext *DC);
uint64_t WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC);
void WriteTypeDeclOffsets();
+ void WriteFileDeclIDsMap();
void WriteSelectors(Sema &SemaRef);
void WriteReferencedSelectorsPool(Sema &SemaRef);
- void WriteIdentifierTable(Preprocessor &PP, bool IsModule);
+ void WriteIdentifierTable(Preprocessor &PP, IdentifierResolver &IdResolver,
+ bool IsModule);
void WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record);
void ResolveDeclUpdatesBlocks();
void WriteDeclUpdatesBlocks();
void WriteDeclReplacementsBlock();
- void ResolveChainedObjCCategories();
- void WriteChainedObjCCategories();
void WriteDeclContextVisibleUpdate(const DeclContext *DC);
void WriteFPPragmaOptions(const FPOptions &Opts);
void WriteOpenCLExtensions(Sema &SemaRef);
-
+ void WriteObjCCategories();
+ void WriteRedeclarations();
+ void WriteMergedDecls();
+
unsigned DeclParmVarAbbrev;
unsigned DeclContextLexicalAbbrev;
unsigned DeclContextVisibleLookupAbbrev;
@@ -382,13 +447,14 @@ private:
void WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
StringRef isysroot, const std::string &OutputFile,
- bool IsModule);
-
+ Module *WritingModule);
+
public:
/// \brief Create a new precompiled header writer that outputs to
/// the given bitstream.
ASTWriter(llvm::BitstreamWriter &Stream);
-
+ ~ASTWriter();
+
/// \brief Write a precompiled header for the given semantic analysis.
///
/// \param SemaRef a reference to the semantic analysis object that processed
@@ -397,21 +463,22 @@ public:
/// \param StatCalls the object that cached all of the stat() calls made while
/// searching for source files and headers.
///
- /// \param IsModule Whether we're writing a module (otherwise, we're writing a
- /// precompiled header).
+ /// \param WritingModule The module that we are writing. If null, we are
+ /// writing a precompiled header.
///
/// \param isysroot if non-empty, write a relocatable file whose headers
/// are relative to the given system root.
void WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
const std::string &OutputFile,
- bool IsModule, StringRef isysroot);
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors = false);
/// \brief Emit a source location.
void AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record);
/// \brief Emit a source range.
void AddSourceRange(SourceRange Range, RecordDataImpl &Record);
-
+
/// \brief Emit an integral value.
void AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record);
@@ -434,10 +501,10 @@ public:
void AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
CXXBaseSpecifier const *BasesEnd,
RecordDataImpl &Record);
-
+
/// \brief Get the unique number used to refer to the given selector.
serialization::SelectorID getSelectorRef(Selector Sel);
-
+
/// \brief Get the unique number used to refer to the given identifier.
serialization::IdentID getIdentifierRef(const IdentifierInfo *II);
@@ -450,7 +517,7 @@ public:
"Identifier does not name a macro");
return MacroOffsets[II];
}
-
+
/// \brief Emit a reference to a type.
void AddTypeRef(QualType T, RecordDataImpl &Record);
@@ -484,7 +551,7 @@ public:
/// \brief Emit a reference to a declaration.
void AddDeclRef(const Decl *D, RecordDataImpl &Record);
-
+
/// \brief Force a declaration to be emitted and get its ID.
serialization::DeclID GetDeclRef(const Decl *D);
@@ -505,9 +572,9 @@ public:
void AddNestedNameSpecifier(NestedNameSpecifier *NNS, RecordDataImpl &Record);
/// \brief Emit a nested name specifier with source-location information.
- void AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ void AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
RecordDataImpl &Record);
-
+
/// \brief Emit a template name.
void AddTemplateName(TemplateName Name, RecordDataImpl &Record);
@@ -526,7 +593,8 @@ public:
void AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordDataImpl &Record);
/// \brief Emit a C++ base specifier.
- void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, RecordDataImpl &Record);
+ void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
+ RecordDataImpl &Record);
/// \brief Emit a CXXCtorInitializer array.
void AddCXXCtorInitializers(
@@ -549,10 +617,16 @@ public:
void RewriteDecl(const Decl *D) {
DeclsToRewrite.insert(D);
- // Reset the flag, so that we don't add this decl multiple times.
- const_cast<Decl *>(D)->setChangedSinceDeserialization(false);
}
+ bool isRewritten(const Decl *D) const {
+ return DeclsToRewrite.count(D);
+ }
+
+ /// \brief Infer the submodule ID that contains an entity at the given
+ /// source location.
+ serialization::SubmoduleID inferSubmoduleIDFromLocation(SourceLocation Loc);
+
/// \brief Note that the identifier II occurs at the given offset
/// within the identifier table.
void SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset);
@@ -577,10 +651,10 @@ public:
/// been added to the queue via AddStmt().
void FlushStmts();
- /// \brief Flush all of the C++ base specifier sets that have been added
+ /// \brief Flush all of the C++ base specifier sets that have been added
/// via \c AddCXXBaseSpecifiersRef().
void FlushCXXBaseSpecifiers();
-
+
/// \brief Record an ID for the given switch-case statement.
unsigned RecordSwitchCaseID(SwitchCase *S);
@@ -589,9 +663,6 @@ public:
void ClearSwitchCaseIDs();
- /// \brief Retrieve the ID for the given opaque value expression.
- unsigned getOpaqueValueID(OpaqueValueExpr *e);
-
unsigned getDeclParmVarAbbrev() const { return DeclParmVarAbbrev; }
unsigned getDeclRefExprAbbrev() const { return DeclRefExprAbbrev; }
unsigned getCharacterLiteralAbbrev() const { return CharacterLiteralAbbrev; }
@@ -609,11 +680,12 @@ public:
void ReaderInitialized(ASTReader *Reader);
void IdentifierRead(serialization::IdentID ID, IdentifierInfo *II);
void TypeRead(serialization::TypeIdx Idx, QualType T);
- void DeclRead(serialization::DeclID ID, const Decl *D);
void SelectorRead(serialization::SelectorID ID, Selector Sel);
void MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinition *MD);
-
+ void MacroVisible(IdentifierInfo *II);
+ void ModuleRead(serialization::SubmoduleID ID, Module *Mod);
+
// ASTMutationListener implementation.
virtual void CompletedTagDefinition(const TagDecl *D);
virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
@@ -626,6 +698,9 @@ public:
virtual void StaticDataMemberInstantiated(const VarDecl *D);
virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD);
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt);
};
/// \brief AST and semantic-analysis consumer that generates a
@@ -633,12 +708,12 @@ public:
class PCHGenerator : public SemaConsumer {
const Preprocessor &PP;
std::string OutputFile;
- bool IsModule;
+ clang::Module *Module;
std::string isysroot;
raw_ostream *Out;
Sema *SemaPtr;
MemorizeStatCalls *StatCalls; // owned by the FileManager
- std::vector<unsigned char> Buffer;
+ llvm::SmallVector<char, 128> Buffer;
llvm::BitstreamWriter Stream;
ASTWriter Writer;
@@ -647,8 +722,8 @@ protected:
const ASTWriter &getWriter() const { return Writer; }
public:
- PCHGenerator(const Preprocessor &PP, StringRef OutputFile,
- bool IsModule,
+ PCHGenerator(const Preprocessor &PP, StringRef OutputFile,
+ clang::Module *Module,
StringRef isysroot, raw_ostream *Out);
~PCHGenerator();
virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
index 7f78320..f368a80 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
@@ -68,6 +68,16 @@ public:
"Must insert keys in order.");
Rep.push_back(Val);
}
+
+ void insertOrReplace(const value_type &Val) {
+ iterator I = std::lower_bound(Rep.begin(), Rep.end(), Val, Compare());
+ if (I != Rep.end() && I->first == Val.first) {
+ I->second = Val.second;
+ return;
+ }
+
+ Rep.insert(I, Val);
+ }
typedef typename Representation::iterator iterator;
typedef typename Representation::const_iterator const_iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/Module.h b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
index 42b5a58..4c93c33 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
@@ -23,12 +23,13 @@
#include "llvm/Bitcode/BitstreamReader.h"
#include <string>
-namespace clang {
+namespace clang {
class DeclContext;
+class Module;
namespace serialization {
-
+
/// \brief Specifies the kind of module that has been loaded.
enum ModuleKind {
MK_Module, ///< File is a module proper.
@@ -39,9 +40,9 @@ enum ModuleKind {
/// \brief Information about the contents of a DeclContext.
struct DeclContextInfo {
- DeclContextInfo()
+ DeclContextInfo()
: NameLookupTableData(), LexicalDecls(), NumLexicalDecls() {}
-
+
void *NameLookupTableData; // an ASTDeclContextNameLookupTable.
const KindDeclIDPair *LexicalDecls;
unsigned NumLexicalDecls;
@@ -49,265 +50,303 @@ struct DeclContextInfo {
/// \brief Information about a module that has been loaded by the ASTReader.
///
-/// Each instance of the Module class corresponds to a single AST file, which
-/// may be a precompiled header, precompiled preamble, a module, or an AST file
-/// of some sort loaded as the main file, all of which are specific formulations of
-/// the general notion of a "module". A module may depend on any number of
+/// Each instance of the Module class corresponds to a single AST file, which
+/// may be a precompiled header, precompiled preamble, a module, or an AST file
+/// of some sort loaded as the main file, all of which are specific formulations
+/// of the general notion of a "module". A module may depend on any number of
/// other modules.
-class Module {
-public:
- Module(ModuleKind Kind);
- ~Module();
-
+class ModuleFile {
+public:
+ ModuleFile(ModuleKind Kind, unsigned Generation);
+ ~ModuleFile();
+
// === General information ===
-
+
/// \brief The type of this module.
ModuleKind Kind;
-
+
/// \brief The file name of the module file.
std::string FileName;
-
+
/// \brief Whether this module has been directly imported by the
/// user.
bool DirectlyImported;
+
+ /// \brief The generation of which this module file is a part.
+ unsigned Generation;
/// \brief The memory buffer that stores the data associated with
/// this AST file.
- llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
-
+ OwningPtr<llvm::MemoryBuffer> Buffer;
+
/// \brief The size of this file, in bits.
uint64_t SizeInBits;
-
+
/// \brief The global bit offset (or base) of this module
uint64_t GlobalBitOffset;
-
+
/// \brief The bitstream reader from which we'll read the AST file.
llvm::BitstreamReader StreamFile;
-
+
/// \brief The main bitstream cursor for the main block.
llvm::BitstreamCursor Stream;
-
+
/// \brief The source location where this module was first imported.
SourceLocation ImportLoc;
-
+
/// \brief The first source location in this module.
SourceLocation FirstLoc;
-
+
// === Source Locations ===
-
+
/// \brief Cursor used to read source location entries.
llvm::BitstreamCursor SLocEntryCursor;
-
+
/// \brief The number of source location entries in this AST file.
unsigned LocalNumSLocEntries;
-
+
/// \brief The base ID in the source manager's view of this module.
int SLocEntryBaseID;
-
+
/// \brief The base offset in the source manager's view of this module.
unsigned SLocEntryBaseOffset;
-
+
/// \brief Offsets for all of the source location entries in the
/// AST file.
const uint32_t *SLocEntryOffsets;
-
+
/// \brief SLocEntries that we're going to preload.
SmallVector<uint64_t, 4> PreloadSLocEntries;
/// \brief The number of source location file entries in this AST file.
unsigned LocalNumSLocFileEntries;
-
+
/// \brief Offsets for all of the source location file entries in the
/// AST file.
const uint32_t *SLocFileOffsets;
-
+
/// \brief Remapping table for source locations in this module.
ContinuousRangeMap<uint32_t, int, 2> SLocRemap;
-
+
// === Identifiers ===
-
+
/// \brief The number of identifiers in this AST file.
unsigned LocalNumIdentifiers;
-
+
/// \brief Offsets into the identifier table data.
///
/// This array is indexed by the identifier ID (-1), and provides
/// the offset into IdentifierTableData where the string data is
/// stored.
const uint32_t *IdentifierOffsets;
-
+
/// \brief Base identifier ID for identifiers local to this module.
serialization::IdentID BaseIdentifierID;
-
+
/// \brief Remapping table for identifier IDs in this module.
ContinuousRangeMap<uint32_t, int, 2> IdentifierRemap;
-
+
/// \brief Actual data for the on-disk hash table of identifiers.
///
/// This pointer points into a memory buffer, where the on-disk hash
/// table for identifiers actually lives.
const char *IdentifierTableData;
-
+
/// \brief A pointer to an on-disk hash table of opaque type
/// IdentifierHashTable.
void *IdentifierLookupTable;
-
+
// === Macros ===
-
+
/// \brief The cursor to the start of the preprocessor block, which stores
/// all of the macro definitions.
llvm::BitstreamCursor MacroCursor;
-
+
/// \brief The offset of the start of the set of defined macros.
uint64_t MacroStartOffset;
-
+
// === Detailed PreprocessingRecord ===
-
- /// \brief The cursor to the start of the (optional) detailed preprocessing
+
+ /// \brief The cursor to the start of the (optional) detailed preprocessing
/// record block.
llvm::BitstreamCursor PreprocessorDetailCursor;
-
+
/// \brief The offset of the start of the preprocessor detail cursor.
uint64_t PreprocessorDetailStartOffset;
-
- /// \brief Base preprocessed entity ID for preprocessed entities local to
+
+ /// \brief Base preprocessed entity ID for preprocessed entities local to
/// this module.
serialization::PreprocessedEntityID BasePreprocessedEntityID;
-
+
/// \brief Remapping table for preprocessed entity IDs in this module.
ContinuousRangeMap<uint32_t, int, 2> PreprocessedEntityRemap;
-
+
const PPEntityOffset *PreprocessedEntityOffsets;
unsigned NumPreprocessedEntities;
-
+
// === Header search information ===
-
+
/// \brief The number of local HeaderFileInfo structures.
unsigned LocalNumHeaderFileInfos;
-
- /// \brief Actual data for the on-disk hash table of header file
+
+ /// \brief Actual data for the on-disk hash table of header file
/// information.
///
/// This pointer points into a memory buffer, where the on-disk hash
/// table for header file information actually lives.
const char *HeaderFileInfoTableData;
-
+
/// \brief The on-disk hash table that contains information about each of
/// the header files.
void *HeaderFileInfoTable;
-
+
/// \brief Actual data for the list of framework names used in the header
/// search information.
const char *HeaderFileFrameworkStrings;
+
+ // === Submodule information ===
+ /// \brief The number of submodules in this module.
+ unsigned LocalNumSubmodules;
- // === Selectors ===
+ /// \brief Base submodule ID for submodules local to this module.
+ serialization::SubmoduleID BaseSubmoduleID;
+ /// \brief Remapping table for submodule IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> SubmoduleRemap;
+
+ // === Selectors ===
+
/// \brief The number of selectors new to this file.
///
/// This is the number of entries in SelectorOffsets.
unsigned LocalNumSelectors;
-
+
/// \brief Offsets into the selector lookup table's data array
/// where each selector resides.
const uint32_t *SelectorOffsets;
-
+
/// \brief Base selector ID for selectors local to this module.
serialization::SelectorID BaseSelectorID;
-
+
/// \brief Remapping table for selector IDs in this module.
ContinuousRangeMap<uint32_t, int, 2> SelectorRemap;
-
+
/// \brief A pointer to the character data that comprises the selector table
///
/// The SelectorOffsets table refers into this memory.
const unsigned char *SelectorLookupTableData;
-
+
/// \brief A pointer to an on-disk hash table of opaque type
/// ASTSelectorLookupTable.
///
/// This hash table provides the IDs of all selectors, and the associated
/// instance and factory methods.
void *SelectorLookupTable;
-
+
// === Declarations ===
-
+
/// DeclsCursor - This is a cursor to the start of the DECLS_BLOCK block. It
/// has read all the abbreviations at the start of the block and is ready to
/// jump around with these in context.
llvm::BitstreamCursor DeclsCursor;
-
+
/// \brief The number of declarations in this AST file.
unsigned LocalNumDecls;
-
+
/// \brief Offset of each declaration within the bitstream, indexed
/// by the declaration ID (-1).
- const uint32_t *DeclOffsets;
-
+ const DeclOffset *DeclOffsets;
+
/// \brief Base declaration ID for declarations local to this module.
serialization::DeclID BaseDeclID;
-
+
/// \brief Remapping table for declaration IDs in this module.
ContinuousRangeMap<uint32_t, int, 2> DeclRemap;
-
+
+ /// \brief Mapping from the module files that this module file depends on
+ /// to the base declaration ID for that module as it is understood within this
+ /// module.
+ ///
+ /// This is effectively a reverse global-to-local mapping for declaration
+ /// IDs, so that we can interpret a true global ID (for this translation unit)
+ /// as a local ID (for this module file).
+ llvm::DenseMap<ModuleFile *, serialization::DeclID> GlobalToLocalDeclIDs;
+
/// \brief The number of C++ base specifier sets in this AST file.
unsigned LocalNumCXXBaseSpecifiers;
-
+
/// \brief Offset of each C++ base specifier set within the bitstream,
/// indexed by the C++ base specifier set ID (-1).
const uint32_t *CXXBaseSpecifiersOffsets;
-
+
typedef llvm::DenseMap<const DeclContext *, DeclContextInfo>
DeclContextInfosMap;
-
+
/// \brief Information about the lexical and visible declarations
/// for each DeclContext.
DeclContextInfosMap DeclContextInfos;
- typedef llvm::DenseMap<serialization::GlobalDeclID,
- std::pair<serialization::LocalDeclID, serialization::LocalDeclID> >
- ChainedObjCCategoriesMap;
- /// \brief ObjC categories that got chained to an interface from another
- /// module.
- /// Key is the ID of the interface.
- /// Value is a pair of linked category DeclIDs (head category, tail category).
- ChainedObjCCategoriesMap ChainedObjCCategories;
+ /// \brief Array of file-level DeclIDs sorted by file.
+ const serialization::DeclID *FileSortedDecls;
+
+ /// \brief Array of redeclaration chain location information within this
+ /// module file, sorted by the first declaration ID.
+ const serialization::LocalRedeclarationsInfo *RedeclarationsMap;
+
+ /// \brief The number of redeclaration info entries in RedeclarationsMap.
+ unsigned LocalNumRedeclarationsInMap;
- // === Types ===
+ /// \brief The redeclaration chains for declarations local to this
+ /// module file.
+ SmallVector<uint64_t, 1> RedeclarationChains;
+
+ /// \brief Array of category list location information within this
+ /// module file, sorted by the definition ID.
+ const serialization::ObjCCategoriesInfo *ObjCCategoriesMap;
+ /// \brief The number of redeclaration info entries in ObjCCategoriesMap.
+ unsigned LocalNumObjCCategoriesInMap;
+
+ /// \brief The Objective-C category lists for categories known to this
+ /// module.
+ SmallVector<uint64_t, 1> ObjCCategories;
+
+ // === Types ===
+
/// \brief The number of types in this AST file.
unsigned LocalNumTypes;
-
+
/// \brief Offset of each type within the bitstream, indexed by the
/// type ID, or the representation of a Type*.
const uint32_t *TypeOffsets;
-
- /// \brief Base type ID for types local to this module as represented in
+
+ /// \brief Base type ID for types local to this module as represented in
/// the global type ID space.
serialization::TypeID BaseTypeIndex;
-
+
/// \brief Remapping table for type IDs in this module.
ContinuousRangeMap<uint32_t, int, 2> TypeRemap;
-
+
// === Miscellaneous ===
-
+
/// \brief Diagnostic IDs and their mappings that the user changed.
SmallVector<uint64_t, 8> PragmaDiagMappings;
-
+
/// \brief The AST stat cache installed for this file, if any.
///
/// The dynamic type of this stat cache is always ASTStatCache
void *StatCache;
-
+
/// \brief List of modules which depend on this module
- llvm::SetVector<Module *> ImportedBy;
-
+ llvm::SetVector<ModuleFile *> ImportedBy;
+
/// \brief List of modules which this module depends on
- llvm::SetVector<Module *> Imports;
-
+ llvm::SetVector<ModuleFile *> Imports;
+
/// \brief Determine whether this module was directly imported at
/// any point during translation.
bool isDirectlyImported() const { return DirectlyImported; }
-
+
/// \brief Dump debugging output for this module.
void dump();
};
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
index f86915a..6ff0640 100644
--- a/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
@@ -27,10 +27,10 @@ namespace serialization {
class ModuleManager {
/// \brief The chain of AST files. The first entry is the one named by the
/// user, the last one is the one that doesn't depend on anything further.
- llvm::SmallVector<Module*, 2> Chain;
+ llvm::SmallVector<ModuleFile*, 2> Chain;
/// \brief All loaded modules, indexed by name.
- llvm::DenseMap<const FileEntry *, Module *> Modules;
+ llvm::DenseMap<const FileEntry *, ModuleFile *> Modules;
/// \brief FileManager that handles translating between filenames and
/// FileEntry *.
@@ -40,9 +40,9 @@ class ModuleManager {
llvm::DenseMap<const FileEntry *, llvm::MemoryBuffer *> InMemoryBuffers;
public:
- typedef SmallVector<Module*, 2>::iterator ModuleIterator;
- typedef SmallVector<Module*, 2>::const_iterator ModuleConstIterator;
- typedef SmallVector<Module*, 2>::reverse_iterator ModuleReverseIterator;
+ typedef SmallVector<ModuleFile*, 2>::iterator ModuleIterator;
+ typedef SmallVector<ModuleFile*, 2>::const_iterator ModuleConstIterator;
+ typedef SmallVector<ModuleFile*, 2>::reverse_iterator ModuleReverseIterator;
typedef std::pair<uint32_t, StringRef> ModuleOffset;
ModuleManager(const FileSystemOptions &FSO);
@@ -68,17 +68,17 @@ public:
/// \brief Returns the primary module associated with the manager, that is,
/// the first module loaded
- Module &getPrimaryModule() { return *Chain[0]; }
+ ModuleFile &getPrimaryModule() { return *Chain[0]; }
/// \brief Returns the primary module associated with the manager, that is,
/// the first module loaded.
- Module &getPrimaryModule() const { return *Chain[0]; }
+ ModuleFile &getPrimaryModule() const { return *Chain[0]; }
/// \brief Returns the module associated with the given index
- Module &operator[](unsigned Index) const { return *Chain[Index]; }
+ ModuleFile &operator[](unsigned Index) const { return *Chain[Index]; }
/// \brief Returns the module associated with the given name
- Module *lookup(StringRef Name);
+ ModuleFile *lookup(StringRef Name);
/// \brief Returns the in-memory (virtual file) buffer with the given name
llvm::MemoryBuffer *lookupBuffer(StringRef Name);
@@ -95,14 +95,16 @@ public:
/// \param ImportedBy The module that is importing this module, or NULL if
/// this module is imported directly by the user.
///
+ /// \param Generation The generation in which this module was loaded.
+ ///
/// \param ErrorStr Will be set to a non-empty string if any errors occurred
/// while trying to load the module.
///
/// \return A pointer to the module that corresponds to this file name,
/// and a boolean indicating whether the module was newly added.
- std::pair<Module *, bool>
- addModule(StringRef FileName, ModuleKind Type, Module *ImportedBy,
- std::string &ErrorStr);
+ std::pair<ModuleFile *, bool>
+ addModule(StringRef FileName, ModuleKind Type, ModuleFile *ImportedBy,
+ unsigned Generation, std::string &ErrorStr);
/// \brief Add an in-memory buffer the list of known buffers
void addInMemoryBuffer(StringRef FileName, llvm::MemoryBuffer *Buffer);
@@ -125,7 +127,7 @@ public:
///
/// \param UserData User data associated with the visitor object, which
/// will be passed along to the visitor.
- void visit(bool (*Visitor)(Module &M, void *UserData), void *UserData);
+ void visit(bool (*Visitor)(ModuleFile &M, void *UserData), void *UserData);
/// \brief Visit each of the modules with a depth-first traversal.
///
@@ -143,7 +145,7 @@ public:
///
/// \param UserData User data ssociated with the visitor object,
/// which will be passed along to the user.
- void visitDepthFirst(bool (*Visitor)(Module &M, bool Preorder,
+ void visitDepthFirst(bool (*Visitor)(ModuleFile &M, bool Preorder,
void *UserData),
void *UserData);
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h
new file mode 100644
index 0000000..e63f814
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- SerializationDiagnostic.h - Serialization Diagnostics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATIONDIAGNOSTIC_H
+#define LLVM_CLANG_SERIALIZATIONDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define SERIALIZATIONSTART
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_SERIALIZATION_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h
new file mode 100644
index 0000000..9d4251b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h
@@ -0,0 +1,24 @@
+//=--- CommonBugCategories.h - Provides common issue categories -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATIC_ANALYZER_CHECKER_CATEGORIES_H
+#define LLVM_CLANG_STATIC_ANALYZER_CHECKER_CATEGORIES_H
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang {
+ namespace ento {
+ namespace categories {
+ extern const char *CoreFoundationObjectiveC;
+ extern const char *MemoryCoreFoundationObjectiveC;
+ extern const char *UnixAPI;
+ }
+ }
+}
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index bfb7ef8..2b699a8 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -20,10 +20,10 @@
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/ImmutableSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include <list>
+#include "llvm/ADT/DenseSet.h"
namespace clang {
@@ -49,9 +49,10 @@ class BugType;
/// This class provides an interface through which checkers can create
/// individual bug reports.
-class BugReport {
-public:
+class BugReport : public llvm::ilist_node<BugReport> {
+public:
class NodeResolver {
+ virtual void anchor();
public:
virtual ~NodeResolver() {}
virtual const ExplodedNode*
@@ -59,7 +60,8 @@ public:
};
typedef const SourceRange *ranges_iterator;
- typedef llvm::ImmutableList<BugReporterVisitor*>::iterator visitor_iterator;
+ typedef SmallVector<BugReporterVisitor *, 8> VisitorList;
+ typedef VisitorList::iterator visitor_iterator;
typedef SmallVector<StringRef, 2> ExtraTextList;
protected:
@@ -67,32 +69,65 @@ protected:
friend class BugReportEquivClass;
BugType& BT;
+ const Decl *DeclWithIssue;
std::string ShortDescription;
std::string Description;
PathDiagnosticLocation Location;
+ PathDiagnosticLocation UniqueingLocation;
const ExplodedNode *ErrorNode;
SmallVector<SourceRange, 4> Ranges;
ExtraTextList ExtraText;
+
+ typedef llvm::DenseSet<SymbolRef> Symbols;
+ typedef llvm::DenseSet<const MemRegion *> Regions;
+
+ /// A set of symbols that are registered with this report as being
+ /// "interesting", and thus used to help decide which diagnostics
+ /// to include when constructing the final path diagnostic.
+ Symbols interestingSymbols;
+
+ /// A set of regions that are registered with this report as being
+ /// "interesting", and thus used to help decide which diagnostics
+ /// to include when constructing the final path diagnostic.
+ Regions interestingRegions;
- // Not the most efficient data structure, but we use an ImmutableList for the
- // Callbacks because it is safe to make additions to list during iteration.
- llvm::ImmutableList<BugReporterVisitor*>::Factory F;
- llvm::ImmutableList<BugReporterVisitor*> Callbacks;
+ /// A set of custom visitors which generate "event" diagnostics at
+ /// interesting points in the path.
+ VisitorList Callbacks;
+
+ /// Used for ensuring the visitors are only added once.
llvm::FoldingSet<BugReporterVisitor> CallbacksSet;
+ /// Used for clients to tell if the report's configuration has changed
+ /// since the last time they checked.
+ unsigned ConfigurationChangeToken;
+
public:
BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode)
- : BT(bt), Description(desc), ErrorNode(errornode),
- Callbacks(F.getEmptyList()) {}
+ : BT(bt), DeclWithIssue(0), Description(desc), ErrorNode(errornode),
+ ConfigurationChangeToken(0) {}
BugReport(BugType& bt, StringRef shortDesc, StringRef desc,
const ExplodedNode *errornode)
- : BT(bt), ShortDescription(shortDesc), Description(desc),
- ErrorNode(errornode), Callbacks(F.getEmptyList()) {}
+ : BT(bt), DeclWithIssue(0), ShortDescription(shortDesc), Description(desc),
+ ErrorNode(errornode), ConfigurationChangeToken(0) {}
BugReport(BugType& bt, StringRef desc, PathDiagnosticLocation l)
- : BT(bt), Description(desc), Location(l), ErrorNode(0),
- Callbacks(F.getEmptyList()) {}
+ : BT(bt), DeclWithIssue(0), Description(desc), Location(l), ErrorNode(0),
+ ConfigurationChangeToken(0) {}
+
+ /// \brief Create a BugReport with a custom uniqueing location.
+ ///
+ /// The reports that have the same report location, description, bug type, and
+ /// ranges are uniqued - only one of the equivalent reports will be presented
+ /// to the user. This method allows to rest the location which should be used
+ /// for uniquing reports. For example, memory leaks checker, could set this to
+ /// the allocation site, rather then the location where the bug is reported.
+ BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode,
+ PathDiagnosticLocation LocationToUnique)
+ : BT(bt), DeclWithIssue(0), Description(desc),
+ UniqueingLocation(LocationToUnique),
+ ErrorNode(errornode), ConfigurationChangeToken(0) {}
virtual ~BugReport();
@@ -107,6 +142,28 @@ public:
return ShortDescription.empty() ? Description : ShortDescription;
}
+ void markInteresting(SymbolRef sym);
+ void markInteresting(const MemRegion *R);
+ void markInteresting(SVal V);
+
+ bool isInteresting(SymbolRef sym) const;
+ bool isInteresting(const MemRegion *R) const;
+ bool isInteresting(SVal V) const;
+
+ unsigned getConfigurationChangeToken() const {
+ return ConfigurationChangeToken;
+ }
+
+ /// Return the canonical declaration, be it a method or class, where
+ /// this issue semantically occurred.
+ const Decl *getDeclWithIssue() const;
+
+ /// Specifically set the Decl where an issue occurred. This isn't necessary
+ /// for BugReports that cover a path as it will be automatically inferred.
+ void setDeclWithIssue(const Decl *declWithIssue) {
+ DeclWithIssue = declWithIssue;
+ }
+
/// \brief This allows for addition of meta data to the diagnostic.
///
/// Currently, only the HTMLDiagnosticClient knows how to display it.
@@ -162,13 +219,38 @@ public:
virtual void Profile(llvm::FoldingSetNodeID& hash) const;
};
+} // end ento namespace
+} // end clang namespace
+
+namespace llvm {
+ template<> struct ilist_traits<clang::ento::BugReport>
+ : public ilist_default_traits<clang::ento::BugReport> {
+ clang::ento::BugReport *createSentinel() const {
+ return static_cast<clang::ento::BugReport *>(&Sentinel);
+ }
+ void destroySentinel(clang::ento::BugReport *) const {}
+
+ clang::ento::BugReport *provideInitialHead() const {
+ return createSentinel();
+ }
+ clang::ento::BugReport *ensureHead(clang::ento::BugReport *) const {
+ return createSentinel();
+ }
+ private:
+ mutable ilist_half_node<clang::ento::BugReport> Sentinel;
+ };
+}
+
+namespace clang {
+namespace ento {
+
//===----------------------------------------------------------------------===//
// BugTypes (collections of related reports).
//===----------------------------------------------------------------------===//
class BugReportEquivClass : public llvm::FoldingSetNode {
/// List of *owned* BugReport objects.
- std::list<BugReport*> Reports;
+ llvm::ilist<BugReport> Reports;
friend class BugReporter;
void AddReport(BugReport* R) { Reports.push_back(R); }
@@ -178,36 +260,17 @@ public:
void Profile(llvm::FoldingSetNodeID& ID) const {
assert(!Reports.empty());
- (*Reports.begin())->Profile(ID);
+ Reports.front().Profile(ID);
}
- class iterator {
- std::list<BugReport*>::iterator impl;
- public:
- iterator(std::list<BugReport*>::iterator i) : impl(i) {}
- iterator &operator++() { ++impl; return *this; }
- bool operator==(const iterator &I) const { return I.impl == impl; }
- bool operator!=(const iterator &I) const { return I.impl != impl; }
- BugReport* operator*() const { return *impl; }
- BugReport* operator->() const { return *impl; }
- };
+ typedef llvm::ilist<BugReport>::iterator iterator;
+ typedef llvm::ilist<BugReport>::const_iterator const_iterator;
- class const_iterator {
- std::list<BugReport*>::const_iterator impl;
- public:
- const_iterator(std::list<BugReport*>::const_iterator i) : impl(i) {}
- const_iterator &operator++() { ++impl; return *this; }
- bool operator==(const const_iterator &I) const { return I.impl == impl; }
- bool operator!=(const const_iterator &I) const { return I.impl != impl; }
- const BugReport* operator*() const { return *impl; }
- const BugReport* operator->() const { return *impl; }
- };
-
- iterator begin() { return iterator(Reports.begin()); }
- iterator end() { return iterator(Reports.end()); }
+ iterator begin() { return Reports.begin(); }
+ iterator end() { return Reports.end(); }
- const_iterator begin() const { return const_iterator(Reports.begin()); }
- const_iterator end() const { return const_iterator(Reports.end()); }
+ const_iterator begin() const { return Reports.begin(); }
+ const_iterator end() const { return Reports.end(); }
};
//===----------------------------------------------------------------------===//
@@ -294,34 +357,22 @@ public:
/// reports.
void EmitReport(BugReport *R);
- void EmitBasicReport(StringRef BugName, StringRef BugStr,
- PathDiagnosticLocation Loc,
- SourceRange* RangeBeg, unsigned NumRanges);
-
- void EmitBasicReport(StringRef BugName, StringRef BugCategory,
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef BugCategory,
StringRef BugStr, PathDiagnosticLocation Loc,
SourceRange* RangeBeg, unsigned NumRanges);
-
- void EmitBasicReport(StringRef BugName, StringRef BugStr,
- PathDiagnosticLocation Loc) {
- EmitBasicReport(BugName, BugStr, Loc, 0, 0);
- }
-
- void EmitBasicReport(StringRef BugName, StringRef BugCategory,
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef BugCategory,
StringRef BugStr, PathDiagnosticLocation Loc) {
- EmitBasicReport(BugName, BugCategory, BugStr, Loc, 0, 0);
- }
-
- void EmitBasicReport(StringRef BugName, StringRef BugStr,
- PathDiagnosticLocation Loc, SourceRange R) {
- EmitBasicReport(BugName, BugStr, Loc, &R, 1);
+ EmitBasicReport(DeclWithIssue, BugName, BugCategory, BugStr, Loc, 0, 0);
}
- void EmitBasicReport(StringRef BugName, StringRef Category,
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef Category,
StringRef BugStr, PathDiagnosticLocation Loc,
SourceRange R) {
- EmitBasicReport(BugName, Category, BugStr, Loc, &R, 1);
+ EmitBasicReport(DeclWithIssue, BugName, Category, BugStr, Loc, &R, 1);
}
static bool classof(const BugReporter* R) { return true; }
@@ -337,7 +388,6 @@ private:
// FIXME: Get rid of GRBugReporter. It's the wrong abstraction.
class GRBugReporter : public BugReporter {
ExprEngine& Eng;
- llvm::SmallSet<SymbolRef, 10> NotableSymbols;
public:
GRBugReporter(BugReporterData& d, ExprEngine& eng)
: BugReporter(d, GRBugReporterKind), Eng(eng) {}
@@ -359,14 +409,6 @@ public:
virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic,
SmallVectorImpl<BugReport*> &bugReports);
- void addNotableSymbol(SymbolRef Sym) {
- NotableSymbols.insert(Sym);
- }
-
- bool isNotable(SymbolRef Sym) const {
- return (bool) NotableSymbols.count(Sym);
- }
-
/// classof - Used by isa<>, cast<>, and dyn_cast<>.
static bool classof(const BugReporter* R) {
return R->getKind() == GRBugReporterKind;
@@ -374,6 +416,7 @@ public:
};
class BugReporterContext {
+ virtual void anchor();
GRBugReporter &BR;
public:
BugReporterContext(GRBugReporter& br) : BR(br) {}
@@ -384,16 +427,6 @@ public:
ExplodedGraph &getGraph() { return BR.getGraph(); }
- void addNotableSymbol(SymbolRef Sym) {
- // FIXME: For now forward to GRBugReporter.
- BR.addNotableSymbol(Sym);
- }
-
- bool isNotable(SymbolRef Sym) const {
- // FIXME: For now forward to GRBugReporter.
- return BR.isNotable(Sym);
- }
-
ProgramStateManager& getStateManager() {
return BR.getStateManager();
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
index 41c0a3a..7e665ce 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
@@ -28,10 +28,28 @@ class ExplodedNode;
class MemRegion;
class PathDiagnosticPiece;
+/// \brief BugReporterVisitors are used to add custom diagnostics along a path.
+///
+/// Custom visitors should subclass the BugReporterVisitorImpl class for a
+/// default implementation of the clone() method.
+/// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+/// default implementation of clone() will NOT do the right thing, and you
+/// will have to provide your own implementation.)
class BugReporterVisitor : public llvm::FoldingSetNode {
public:
virtual ~BugReporterVisitor();
+ /// \brief Returns a copy of this BugReporter.
+ ///
+ /// Custom BugReporterVisitors should not override this method directly.
+ /// Instead, they should inherit from BugReporterVisitorImpl and provide
+ /// a protected or public copy constructor.
+ ///
+ /// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+ /// default implementation of clone() will NOT do the right thing, and you
+ /// will have to provide your own implementation.)
+ virtual BugReporterVisitor *clone() const = 0;
+
/// \brief Return a diagnostic piece which should be associated with the
/// given node.
///
@@ -61,7 +79,24 @@ public:
};
-class FindLastStoreBRVisitor : public BugReporterVisitor {
+/// This class provides a convenience implementation for clone() using the
+/// Curiously-Recurring Template Pattern. If you are implementing a custom
+/// BugReporterVisitor, subclass BugReporterVisitorImpl and provide a public
+/// or protected copy constructor.
+///
+/// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+/// default implementation of clone() will NOT do the right thing, and you
+/// will have to provide your own implementation.)
+template <class DERIVED>
+class BugReporterVisitorImpl : public BugReporterVisitor {
+ virtual BugReporterVisitor *clone() const {
+ return new DERIVED(*static_cast<const DERIVED *>(this));
+ }
+};
+
+class FindLastStoreBRVisitor
+ : public BugReporterVisitorImpl<FindLastStoreBRVisitor>
+{
const MemRegion *R;
SVal V;
bool satisfied;
@@ -94,7 +129,9 @@ public:
BugReport &BR);
};
-class TrackConstraintBRVisitor : public BugReporterVisitor {
+class TrackConstraintBRVisitor
+ : public BugReporterVisitorImpl<TrackConstraintBRVisitor>
+{
DefinedSVal Constraint;
const bool Assumption;
bool isSatisfied;
@@ -111,7 +148,9 @@ public:
BugReport &BR);
};
-class NilReceiverBRVisitor : public BugReporterVisitor {
+class NilReceiverBRVisitor
+ : public BugReporterVisitorImpl<NilReceiverBRVisitor>
+{
public:
void Profile(llvm::FoldingSetNodeID &ID) const {
static int x = 0;
@@ -125,50 +164,71 @@ public:
};
/// Visitor that tries to report interesting diagnostics from conditions.
-class ConditionBRVisitor : public BugReporterVisitor {
+class ConditionBRVisitor : public BugReporterVisitorImpl<ConditionBRVisitor> {
public:
void Profile(llvm::FoldingSetNodeID &ID) const {
static int x = 0;
ID.AddPointer(&x);
}
+
virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *Prev,
BugReporterContext &BRC,
BugReport &BR);
+ PathDiagnosticPiece *VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR);
+
PathDiagnosticPiece *VisitTerminator(const Stmt *Term,
const ExplodedNode *N,
const CFGBlock *srcBlk,
const CFGBlock *dstBlk,
+ BugReport &R,
BugReporterContext &BRC);
PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC);
+ BugReport &R,
+ const ExplodedNode *N);
PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
const DeclRefExpr *DR,
const bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC);
+ BugReport &R,
+ const ExplodedNode *N);
PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
const BinaryOperator *BExpr,
const bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC);
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
bool patternMatch(const Expr *Ex,
llvm::raw_ostream &Out,
- BugReporterContext &BRC);
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N,
+ llvm::Optional<bool> &prunable);
};
-
+
namespace bugreporter {
BugReporterVisitor *getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
- const Stmt *S);
+ const Stmt *S,
+ BugReport *R);
const Stmt *GetDerefExpr(const ExplodedNode *N);
const Stmt *GetDenomExpr(const ExplodedNode *N);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
index 78067cd..cb49122 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -49,6 +49,7 @@ public:
};
class BuiltinBug : public BugType {
+ virtual void anchor();
const std::string desc;
public:
BuiltinBug(const char *name, const char *description)
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
index 406be3c..5a8a1c7 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
@@ -14,9 +14,12 @@
#ifndef LLVM_CLANG_PATH_DIAGNOSTIC_H
#define LLVM_CLANG_PATH_DIAGNOSTIC_H
-#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Analysis/ProgramPoint.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/Optional.h"
#include <deque>
#include <iterator>
#include <string>
@@ -24,7 +27,7 @@
namespace clang {
-class AnalysisContext;
+class AnalysisDeclContext;
class BinaryOperator;
class CompoundStmt;
class Decl;
@@ -38,6 +41,8 @@ class Stmt;
namespace ento {
class ExplodedNode;
+class SymExpr;
+typedef const SymExpr* SymbolRef;
//===----------------------------------------------------------------------===//
// High-level interface for handlers of path-sensitive diagnostics.
@@ -46,33 +51,34 @@ class ExplodedNode;
class PathDiagnostic;
class PathDiagnosticConsumer {
+ virtual void anchor();
public:
- PathDiagnosticConsumer() {}
+ PathDiagnosticConsumer() : flushed(false) {}
+ virtual ~PathDiagnosticConsumer();
+
+ void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade);
+
+ virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade)
+ = 0;
- virtual ~PathDiagnosticConsumer() {}
-
- virtual void
- FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade = 0) = 0;
-
- void FlushDiagnostics(SmallVectorImpl<std::string> &FilesMade) {
- FlushDiagnostics(&FilesMade);
- }
-
virtual StringRef getName() const = 0;
- void HandlePathDiagnostic(const PathDiagnostic* D);
+ void HandlePathDiagnostic(PathDiagnostic *D);
enum PathGenerationScheme { Minimal, Extensive };
virtual PathGenerationScheme getGenerationScheme() const { return Minimal; }
virtual bool supportsLogicalOpControlFlow() const { return false; }
virtual bool supportsAllBlockEdges() const { return false; }
virtual bool useVerboseDescription() const { return true; }
+
+ /// Return true if the PathDiagnosticConsumer supports individual
+ /// PathDiagnostics that span multiple files.
+ virtual bool supportsCrossFileDiagnostics() const { return false; }
protected:
- /// The actual logic for handling path diagnostics, as implemented
- /// by subclasses of PathDiagnosticConsumer.
- virtual void HandlePathDiagnosticImpl(const PathDiagnostic* D) = 0;
-
+ bool flushed;
+ llvm::FoldingSet<PathDiagnostic> Diags;
};
//===----------------------------------------------------------------------===//
@@ -89,8 +95,8 @@ public:
PathDiagnosticRange() : isPoint(false) {}
};
-typedef llvm::PointerUnion<const LocationContext*, AnalysisContext*>
- LocationOrAnalysisContext;
+typedef llvm::PointerUnion<const LocationContext*, AnalysisDeclContext*>
+ LocationOrAnalysisDeclContext;
class PathDiagnosticLocation {
private:
@@ -111,10 +117,10 @@ private:
FullSourceLoc
genLocation(SourceLocation L = SourceLocation(),
- LocationOrAnalysisContext LAC = (AnalysisContext*)0) const;
+ LocationOrAnalysisDeclContext LAC = (AnalysisDeclContext*)0) const;
PathDiagnosticRange
- genRange(LocationOrAnalysisContext LAC = (AnalysisContext*)0) const;
+ genRange(LocationOrAnalysisDeclContext LAC = (AnalysisDeclContext*)0) const;
public:
/// Create an invalid location.
@@ -124,10 +130,11 @@ public:
/// Create a location corresponding to the given statement.
PathDiagnosticLocation(const Stmt *s,
const SourceManager &sm,
- LocationOrAnalysisContext lac)
+ LocationOrAnalysisDeclContext lac)
: K(StmtK), S(s), D(0), SM(&sm),
Loc(genLocation(SourceLocation(), lac)),
Range(genRange(lac)) {
+ assert(S);
assert(Loc.isValid());
assert(Range.isValid());
}
@@ -136,6 +143,7 @@ public:
PathDiagnosticLocation(const Decl *d, const SourceManager &sm)
: K(DeclK), S(0), D(d), SM(&sm),
Loc(genLocation()), Range(genRange()) {
+ assert(D);
assert(Loc.isValid());
assert(Range.isValid());
}
@@ -153,7 +161,7 @@ public:
/// Create a location for the beginning of the statement.
static PathDiagnosticLocation createBegin(const Stmt *S,
const SourceManager &SM,
- const LocationOrAnalysisContext LAC);
+ const LocationOrAnalysisDeclContext LAC);
/// Create the location for the operator of the binary expression.
/// Assumes the statement has a valid location.
@@ -260,14 +268,13 @@ public:
// Path "pieces" for path-sensitive diagnostics.
//===----------------------------------------------------------------------===//
-class PathDiagnosticPiece {
+class PathDiagnosticPiece : public RefCountedBaseVPTR {
public:
- enum Kind { ControlFlow, Event, Macro };
+ enum Kind { ControlFlow, Event, Macro, Call };
enum DisplayHint { Above, Below };
private:
const std::string str;
- std::vector<FixItHint> FixItHints;
const Kind kind;
const DisplayHint Hint;
std::vector<SourceRange> ranges;
@@ -308,10 +315,6 @@ public:
ranges.push_back(SourceRange(B,E));
}
- void addFixItHint(const FixItHint& Hint) {
- FixItHints.push_back(Hint);
- }
-
typedef const SourceRange* range_iterator;
range_iterator ranges_begin() const {
@@ -322,23 +325,19 @@ public:
return ranges_begin() + ranges.size();
}
- typedef const FixItHint *fixit_iterator;
-
- fixit_iterator fixit_begin() const {
- return FixItHints.empty()? 0 : &FixItHints[0];
- }
-
- fixit_iterator fixit_end() const {
- return FixItHints.empty()? 0
- : &FixItHints[0] + FixItHints.size();
- }
-
static inline bool classof(const PathDiagnosticPiece *P) {
return true;
}
virtual void Profile(llvm::FoldingSetNodeID &ID) const;
};
+
+
+class PathPieces :
+ public std::deque<IntrusiveRefCntPtr<PathDiagnosticPiece> > {
+public:
+ ~PathPieces();
+};
class PathDiagnosticSpotPiece : public PathDiagnosticPiece {
private:
@@ -360,20 +359,170 @@ public:
virtual void Profile(llvm::FoldingSetNodeID &ID) const;
};
+/// \brief Interface for classes constructing Stack hints.
+///
+/// If a PathDiagnosticEvent occurs in a different frame than the final
+/// diagnostic the hints can be used to summarise the effect of the call.
+class StackHintGenerator {
+public:
+ virtual ~StackHintGenerator() = 0;
+
+ /// \brief Construct the Diagnostic message for the given ExplodedNode.
+ virtual std::string getMessage(const ExplodedNode *N) = 0;
+};
+
+/// \brief Constructs a Stack hint for the given symbol.
+///
+/// The class knows how to construct the stack hint message based on
+/// traversing the CallExpr associated with the call and checking if the given
+/// symbol is returned or is one of the arguments.
+/// The hint can be customized by redefining 'getMessageForX()' methods.
+class StackHintGeneratorForSymbol : public StackHintGenerator {
+private:
+ SymbolRef Sym;
+ std::string Msg;
+
+public:
+ StackHintGeneratorForSymbol(SymbolRef S, StringRef M) : Sym(S), Msg(M) {}
+ virtual ~StackHintGeneratorForSymbol() {}
+
+ /// \brief Search the call expression for the symbol Sym and dispatch the
+ /// 'getMessageForX()' methods to construct a specific message.
+ virtual std::string getMessage(const ExplodedNode *N);
+
+ /// Prints the ordinal form of the given integer,
+ /// only valid for ValNo : ValNo > 0.
+ void printOrdinal(unsigned ValNo, llvm::raw_svector_ostream &Out);
+
+ /// Produces the message of the following form:
+ /// 'Msg via Nth parameter'
+ virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex);
+ virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+ return Msg;
+ }
+ virtual std::string getMessageForSymbolNotFound() {
+ return Msg;
+ }
+};
+
class PathDiagnosticEventPiece : public PathDiagnosticSpotPiece {
+ llvm::Optional<bool> IsPrunable;
+
+ /// If the event occurs in a different frame than the final diagnostic,
+ /// supply a message that will be used to construct an extra hint on the
+ /// returns from all the calls on the stack from this event to the final
+ /// diagnostic.
+ llvm::OwningPtr<StackHintGenerator> CallStackHint;
public:
PathDiagnosticEventPiece(const PathDiagnosticLocation &pos,
- StringRef s, bool addPosRange = true)
- : PathDiagnosticSpotPiece(pos, s, Event, addPosRange) {}
+ StringRef s, bool addPosRange = true,
+ StackHintGenerator *stackHint = 0)
+ : PathDiagnosticSpotPiece(pos, s, Event, addPosRange),
+ CallStackHint(stackHint) {}
~PathDiagnosticEventPiece();
+ /// Mark the diagnostic piece as being potentially prunable. This
+ /// flag may have been previously set, at which point it will not
+ /// be reset unless one specifies to do so.
+ void setPrunable(bool isPrunable, bool override = false) {
+ if (IsPrunable.hasValue() && !override)
+ return;
+ IsPrunable = isPrunable;
+ }
+
+ /// Return true if the diagnostic piece is prunable.
+ bool isPrunable() const {
+ return IsPrunable.hasValue() ? IsPrunable.getValue() : false;
+ }
+
+ bool hasCallStackHint() {
+ return (CallStackHint != 0);
+ }
+
+ /// Produce the hint for the given node. The node contains
+ /// information about the call for which the diagnostic can be generated.
+ std::string getCallStackMessage(const ExplodedNode *N) {
+ if (CallStackHint)
+ return CallStackHint->getMessage(N);
+ return "";
+ }
+
static inline bool classof(const PathDiagnosticPiece *P) {
return P->getKind() == Event;
}
};
+class PathDiagnosticCallPiece : public PathDiagnosticPiece {
+ PathDiagnosticCallPiece(const Decl *callerD,
+ const PathDiagnosticLocation &callReturnPos)
+ : PathDiagnosticPiece(Call), Caller(callerD), Callee(0),
+ NoExit(false), callReturn(callReturnPos) {}
+
+ PathDiagnosticCallPiece(PathPieces &oldPath, const Decl *caller)
+ : PathDiagnosticPiece(Call), Caller(caller), Callee(0),
+ NoExit(true), path(oldPath) {}
+
+ const Decl *Caller;
+ const Decl *Callee;
+
+ // Flag signifying that this diagnostic has only call enter and no matching
+ // call exit.
+ bool NoExit;
+
+ // The custom string, which should appear after the call Return Diagnostic.
+ // TODO: Should we allow multiple diagnostics?
+ std::string CallStackMessage;
+
+public:
+ PathDiagnosticLocation callEnter;
+ PathDiagnosticLocation callEnterWithin;
+ PathDiagnosticLocation callReturn;
+ PathPieces path;
+
+ virtual ~PathDiagnosticCallPiece();
+
+ const Decl *getCaller() const { return Caller; }
+
+ const Decl *getCallee() const { return Callee; }
+ void setCallee(const CallEnter &CE, const SourceManager &SM);
+
+ bool hasCallStackMessage() { return !CallStackMessage.empty(); }
+ void setCallStackMessage(StringRef st) {
+ CallStackMessage = st;
+ }
+
+ virtual PathDiagnosticLocation getLocation() const {
+ return callEnter;
+ }
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> getCallEnterEvent() const;
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ getCallEnterWithinCallerEvent() const;
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> getCallExitEvent() const;
+
+ virtual void flattenLocations() {
+ callEnter.flatten();
+ callReturn.flatten();
+ for (PathPieces::iterator I = path.begin(),
+ E = path.end(); I != E; ++I) (*I)->flattenLocations();
+ }
+
+ static PathDiagnosticCallPiece *construct(const ExplodedNode *N,
+ const CallExit &CE,
+ const SourceManager &SM);
+
+ static PathDiagnosticCallPiece *construct(PathPieces &pieces,
+ const Decl *caller);
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == Call;
+ }
+};
+
class PathDiagnosticControlFlowPiece : public PathDiagnosticPiece {
std::vector<PathDiagnosticLocationPair> LPairs;
public:
@@ -431,30 +580,22 @@ public:
};
class PathDiagnosticMacroPiece : public PathDiagnosticSpotPiece {
- std::vector<PathDiagnosticPiece*> SubPieces;
public:
PathDiagnosticMacroPiece(const PathDiagnosticLocation &pos)
: PathDiagnosticSpotPiece(pos, "", Macro) {}
~PathDiagnosticMacroPiece();
+ PathPieces subPieces;
+
bool containsEvent() const;
- void push_back(PathDiagnosticPiece *P) { SubPieces.push_back(P); }
-
- typedef std::vector<PathDiagnosticPiece*>::iterator iterator;
- iterator begin() { return SubPieces.begin(); }
- iterator end() { return SubPieces.end(); }
-
virtual void flattenLocations() {
PathDiagnosticSpotPiece::flattenLocations();
- for (iterator I=begin(), E=end(); I!=E; ++I) (*I)->flattenLocations();
+ for (PathPieces::iterator I = subPieces.begin(),
+ E = subPieces.end(); I != E; ++I) (*I)->flattenLocations();
}
- typedef std::vector<PathDiagnosticPiece*>::const_iterator const_iterator;
- const_iterator begin() const { return SubPieces.begin(); }
- const_iterator end() const { return SubPieces.end(); }
-
static inline bool classof(const PathDiagnosticPiece *P) {
return P->getKind() == Macro;
}
@@ -466,17 +607,41 @@ public:
/// diagnostic. It represents an ordered-collection of PathDiagnosticPieces,
/// each which represent the pieces of the path.
class PathDiagnostic : public llvm::FoldingSetNode {
- std::deque<PathDiagnosticPiece*> path;
- unsigned Size;
+ const Decl *DeclWithIssue;
std::string BugType;
std::string Desc;
std::string Category;
std::deque<std::string> OtherDesc;
-
+ PathPieces pathImpl;
+ llvm::SmallVector<PathPieces *, 3> pathStack;
+
+ PathDiagnostic(); // Do not implement.
public:
- PathDiagnostic();
+ const PathPieces &path;
+
+ /// Return the path currently used by builders for constructing the
+ /// PathDiagnostic.
+ PathPieces &getActivePath() {
+ if (pathStack.empty())
+ return pathImpl;
+ return *pathStack.back();
+ }
+
+ /// Return a mutable version of 'path'.
+ PathPieces &getMutablePieces() {
+ return pathImpl;
+ }
+
+ /// Return the unrolled size of the path.
+ unsigned full_size();
- PathDiagnostic(StringRef bugtype, StringRef desc,
+ void pushActivePath(PathPieces *p) { pathStack.push_back(p); }
+ void popActivePath() { if (!pathStack.empty()) pathStack.pop_back(); }
+
+ // PathDiagnostic();
+ PathDiagnostic(const Decl *DeclWithIssue,
+ StringRef bugtype,
+ StringRef desc,
StringRef category);
~PathDiagnostic();
@@ -485,115 +650,26 @@ public:
StringRef getBugType() const { return BugType; }
StringRef getCategory() const { return Category; }
+ /// Return the semantic context where an issue occurred. If the
+ /// issue occurs along a path, this represents the "central" area
+ /// where the bug manifests.
+ const Decl *getDeclWithIssue() const { return DeclWithIssue; }
+
typedef std::deque<std::string>::const_iterator meta_iterator;
meta_iterator meta_begin() const { return OtherDesc.begin(); }
meta_iterator meta_end() const { return OtherDesc.end(); }
void addMeta(StringRef s) { OtherDesc.push_back(s); }
- PathDiagnosticLocation getLocation() const {
- assert(Size > 0 && "getLocation() requires a non-empty PathDiagnostic.");
- return rbegin()->getLocation();
- }
-
- void push_front(PathDiagnosticPiece *piece) {
- assert(piece);
- path.push_front(piece);
- ++Size;
- }
-
- void push_back(PathDiagnosticPiece *piece) {
- assert(piece);
- path.push_back(piece);
- ++Size;
- }
-
- PathDiagnosticPiece *back() {
- return path.back();
- }
-
- const PathDiagnosticPiece *back() const {
- return path.back();
- }
-
- unsigned size() const { return Size; }
- bool empty() const { return Size == 0; }
-
- void resetPath(bool deletePieces = true);
-
- class iterator {
- public:
- typedef std::deque<PathDiagnosticPiece*>::iterator ImplTy;
-
- typedef PathDiagnosticPiece value_type;
- typedef value_type& reference;
- typedef value_type* pointer;
- typedef ptrdiff_t difference_type;
- typedef std::bidirectional_iterator_tag iterator_category;
-
- private:
- ImplTy I;
-
- public:
- iterator(const ImplTy& i) : I(i) {}
-
- bool operator==(const iterator &X) const { return I == X.I; }
- bool operator!=(const iterator &X) const { return I != X.I; }
-
- PathDiagnosticPiece& operator*() const { return **I; }
- PathDiagnosticPiece *operator->() const { return *I; }
-
- iterator &operator++() { ++I; return *this; }
- iterator &operator--() { --I; return *this; }
- };
-
- class const_iterator {
- public:
- typedef std::deque<PathDiagnosticPiece*>::const_iterator ImplTy;
-
- typedef const PathDiagnosticPiece value_type;
- typedef value_type& reference;
- typedef value_type* pointer;
- typedef ptrdiff_t difference_type;
- typedef std::bidirectional_iterator_tag iterator_category;
-
- private:
- ImplTy I;
-
- public:
- const_iterator(const ImplTy& i) : I(i) {}
-
- bool operator==(const const_iterator &X) const { return I == X.I; }
- bool operator!=(const const_iterator &X) const { return I != X.I; }
-
- reference operator*() const { return **I; }
- pointer operator->() const { return *I; }
-
- const_iterator &operator++() { ++I; return *this; }
- const_iterator &operator--() { --I; return *this; }
- };
-
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
-
- // forward iterator creation methods.
-
- iterator begin() { return path.begin(); }
- iterator end() { return path.end(); }
-
- const_iterator begin() const { return path.begin(); }
- const_iterator end() const { return path.end(); }
-
- // reverse iterator creation methods.
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+ PathDiagnosticLocation getLocation() const;
void flattenLocations() {
- for (iterator I = begin(), E = end(); I != E; ++I) I->flattenLocations();
+ for (PathPieces::iterator I = pathImpl.begin(), E = pathImpl.end();
+ I != E; ++I) (*I)->flattenLocations();
}
void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ void FullProfile(llvm::FoldingSetNodeID &ID) const;
};
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
index 1e4edeb..76d8c15 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -199,9 +199,9 @@ public:
class EndPath {
template <typename CHECKER>
- static void _checkEndPath(void *checker, EndOfFunctionNodeBuilder &B,
- ExprEngine &Eng) {
- ((const CHECKER *)checker)->checkEndPath(B, Eng);
+ static void _checkEndPath(void *checker,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkEndPath(C);
}
public:
@@ -214,9 +214,9 @@ public:
class BranchCondition {
template <typename CHECKER>
- static void _checkBranchCondition(void *checker, const Stmt *condition,
- BranchNodeBuilder &B, ExprEngine &Eng) {
- ((const CHECKER *)checker)->checkBranchCondition(condition, B, Eng);
+ static void _checkBranchCondition(void *checker, const Stmt *Condition,
+ CheckerContext & C) {
+ ((const CHECKER *)checker)->checkBranchCondition(Condition, C);
}
public:
@@ -230,7 +230,7 @@ public:
class LiveSymbols {
template <typename CHECKER>
- static void _checkLiveSymbols(void *checker, const ProgramState *state,
+ static void _checkLiveSymbols(void *checker, ProgramStateRef state,
SymbolReaper &SR) {
((const CHECKER *)checker)->checkLiveSymbols(state, SR);
}
@@ -260,18 +260,19 @@ public:
class RegionChanges {
template <typename CHECKER>
- static const ProgramState *
+ static ProgramStateRef
_checkRegionChanges(void *checker,
- const ProgramState *state,
+ ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> Explicits,
- ArrayRef<const MemRegion *> Regions) {
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
return ((const CHECKER *)checker)->checkRegionChanges(state, invalidated,
- Explicits, Regions);
+ Explicits, Regions, Call);
}
template <typename CHECKER>
static bool _wantsRegionChangeUpdate(void *checker,
- const ProgramState *state) {
+ ProgramStateRef state) {
return ((const CHECKER *)checker)->wantsRegionChangeUpdate(state);
}
@@ -306,8 +307,8 @@ namespace eval {
class Assume {
template <typename CHECKER>
- static const ProgramState *_evalAssume(void *checker,
- const ProgramState *state,
+ static ProgramStateRef _evalAssume(void *checker,
+ ProgramStateRef state,
const SVal &cond,
bool assumption) {
return ((const CHECKER *)checker)->evalAssume(state, cond, assumption);
@@ -359,7 +360,7 @@ public:
StringRef getTagDescription() const;
/// See CheckerManager::runCheckersForPrintState.
- virtual void printState(raw_ostream &Out, const ProgramState *State,
+ virtual void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const { }
};
@@ -370,7 +371,8 @@ template <typename CHECK1, typename CHECK2=check::_VoidCheck,
typename CHECK9=check::_VoidCheck, typename CHECK10=check::_VoidCheck,
typename CHECK11=check::_VoidCheck,typename CHECK12=check::_VoidCheck,
typename CHECK13=check::_VoidCheck,typename CHECK14=check::_VoidCheck,
- typename CHECK15=check::_VoidCheck,typename CHECK16=check::_VoidCheck>
+ typename CHECK15=check::_VoidCheck,typename CHECK16=check::_VoidCheck,
+ typename CHECK17=check::_VoidCheck,typename CHECK18=check::_VoidCheck>
class Checker;
template <>
@@ -379,9 +381,10 @@ class Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
- check::_VoidCheck>
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck>
: public CheckerBase
{
+ virtual void anchor();
public:
static void _register(void *checker, CheckerManager &mgr) { }
};
@@ -389,19 +392,20 @@ public:
template <typename CHECK1, typename CHECK2, typename CHECK3, typename CHECK4,
typename CHECK5, typename CHECK6, typename CHECK7, typename CHECK8,
typename CHECK9, typename CHECK10,typename CHECK11,typename CHECK12,
- typename CHECK13,typename CHECK14,typename CHECK15,typename CHECK16>
+ typename CHECK13,typename CHECK14,typename CHECK15,typename CHECK16,
+ typename CHECK17,typename CHECK18>
class Checker
: public CHECK1,
public Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
- CHECK16> {
+ CHECK16,CHECK17,CHECK18> {
public:
template <typename CHECKER>
static void _register(CHECKER *checker, CheckerManager &mgr) {
CHECK1::_register(checker, mgr);
Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
- CHECK16>::_register(checker, mgr);
+ CHECK16,CHECK17,CHECK18>::_register(checker, mgr);
}
};
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index e3e4c49..d215f99 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/Analysis/ProgramPoint.h"
#include <vector>
namespace clang {
@@ -38,8 +39,8 @@ namespace ento {
class ExplodedNodeSet;
class ExplodedGraph;
class ProgramState;
- class EndOfFunctionNodeBuilder;
- class BranchNodeBuilder;
+ class NodeBuilder;
+ struct NodeBuilderContext;
class MemRegion;
class SymbolReaper;
@@ -51,6 +52,19 @@ public:
template <typename T> class CheckerFn;
+template <typename RET, typename P1, typename P2, typename P3, typename P4,
+ typename P5>
+class CheckerFn<RET(P1, P2, P3, P4, P5)> {
+ typedef RET (*Func)(void *, P1, P2, P3, P4, P5);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) const {
+ return Fn(Checker, p1, p2, p3, p4, p5);
+ }
+};
+
template <typename RET, typename P1, typename P2, typename P3, typename P4>
class CheckerFn<RET(P1, P2, P3, P4)> {
typedef RET (*Func)(void *, P1, P2, P3, P4);
@@ -114,7 +128,7 @@ public:
void finishedCheckerRegistration();
- const LangOptions &getLangOptions() const { return LangOpts; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
typedef CheckerBase *CheckerRef;
typedef const void *CheckerTag;
@@ -179,14 +193,16 @@ public:
void runCheckersForPostStmt(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const Stmt *S,
- ExprEngine &Eng) {
- runCheckersForStmt(/*isPreVisit=*/false, Dst, Src, S, Eng);
+ ExprEngine &Eng,
+ bool wasInlined = false) {
+ runCheckersForStmt(/*isPreVisit=*/false, Dst, Src, S, Eng, wasInlined);
}
/// \brief Run checkers for visiting Stmts.
void runCheckersForStmt(bool isPreVisit,
ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
- const Stmt *S, ExprEngine &Eng);
+ const Stmt *S, ExprEngine &Eng,
+ bool wasInlined = false);
/// \brief Run checkers for pre-visiting obj-c messages.
void runCheckersForPreObjCMessage(ExplodedNodeSet &Dst,
@@ -213,33 +229,39 @@ public:
/// \brief Run checkers for load/store of a location.
void runCheckersForLocation(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
- SVal location, bool isLoad,
- const Stmt *S,
+ SVal location,
+ bool isLoad,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
ExprEngine &Eng);
/// \brief Run checkers for binding of a value to a location.
void runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng);
+ const Stmt *S, ExprEngine &Eng,
+ ProgramPoint::Kind PointKind);
/// \brief Run checkers for end of analysis.
void runCheckersForEndAnalysis(ExplodedGraph &G, BugReporter &BR,
ExprEngine &Eng);
/// \brief Run checkers for end of path.
- void runCheckersForEndPath(EndOfFunctionNodeBuilder &B, ExprEngine &Eng);
+ void runCheckersForEndPath(NodeBuilderContext &BC,
+ ExplodedNodeSet &Dst,
+ ExprEngine &Eng);
/// \brief Run checkers for branch condition.
void runCheckersForBranchCondition(const Stmt *condition,
- BranchNodeBuilder &B, ExprEngine &Eng);
+ ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ ExprEngine &Eng);
/// \brief Run checkers for live symbols.
///
/// Allows modifying SymbolReaper object. For example, checkers can explicitly
/// register symbols of interest as live. These symbols will not be marked
/// dead and removed.
- void runCheckersForLiveSymbols(const ProgramState *state,
+ void runCheckersForLiveSymbols(ProgramStateRef state,
SymbolReaper &SymReaper);
/// \brief Run checkers for dead symbols.
@@ -253,7 +275,7 @@ public:
ExprEngine &Eng);
/// \brief True if at least one checker wants to check region changes.
- bool wantsRegionChangeUpdate(const ProgramState *state);
+ bool wantsRegionChangeUpdate(ProgramStateRef state);
/// \brief Run checkers for region changes.
///
@@ -264,15 +286,18 @@ public:
/// For example, in the case of a function call, these would be arguments.
/// \param Regions The transitive closure of accessible regions,
/// i.e. all regions that may have been touched by this change.
- const ProgramState *
- runCheckersForRegionChanges(const ProgramState *state,
+ /// \param The call expression wrapper if the regions are invalidated by a
+ /// call.
+ ProgramStateRef
+ runCheckersForRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions);
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call);
/// \brief Run checkers for handling assumptions on symbolic values.
- const ProgramState *runCheckersForEvalAssume(const ProgramState *state,
- SVal Cond, bool Assumption);
+ ProgramStateRef runCheckersForEvalAssume(ProgramStateRef state,
+ SVal Cond, bool Assumption);
/// \brief Run checkers for evaluating a call.
void runCheckersForEvalCall(ExplodedNodeSet &Dst,
@@ -293,7 +318,7 @@ public:
/// \param State The state being printed
/// \param NL The preferred representation of a newline.
/// \param Sep The preferred separator between different kinds of data.
- void runCheckersForPrintState(raw_ostream &Out, const ProgramState *State,
+ void runCheckersForPrintState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep);
//===----------------------------------------------------------------------===//
@@ -320,7 +345,8 @@ public:
typedef CheckerFn<void (const ObjCMessage &, CheckerContext &)>
CheckObjCMessageFunc;
- typedef CheckerFn<void (const SVal &location, bool isLoad, const Stmt *S,
+ typedef CheckerFn<void (const SVal &location, bool isLoad,
+ const Stmt *S,
CheckerContext &)>
CheckLocationFunc;
@@ -331,26 +357,27 @@ public:
typedef CheckerFn<void (ExplodedGraph &, BugReporter &, ExprEngine &)>
CheckEndAnalysisFunc;
- typedef CheckerFn<void (EndOfFunctionNodeBuilder &, ExprEngine &)>
+ typedef CheckerFn<void (CheckerContext &)>
CheckEndPathFunc;
- typedef CheckerFn<void (const Stmt *, BranchNodeBuilder &, ExprEngine &)>
+ typedef CheckerFn<void (const Stmt *, CheckerContext &)>
CheckBranchConditionFunc;
typedef CheckerFn<void (SymbolReaper &, CheckerContext &)>
CheckDeadSymbolsFunc;
- typedef CheckerFn<void (const ProgramState *,SymbolReaper &)> CheckLiveSymbolsFunc;
+ typedef CheckerFn<void (ProgramStateRef,SymbolReaper &)> CheckLiveSymbolsFunc;
- typedef CheckerFn<const ProgramState * (const ProgramState *,
+ typedef CheckerFn<ProgramStateRef (ProgramStateRef,
const StoreManager::InvalidatedSymbols *symbols,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions)>
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call)>
CheckRegionChangesFunc;
- typedef CheckerFn<bool (const ProgramState *)> WantsRegionChangeUpdateFunc;
+ typedef CheckerFn<bool (ProgramStateRef)> WantsRegionChangeUpdateFunc;
- typedef CheckerFn<const ProgramState * (const ProgramState *,
+ typedef CheckerFn<ProgramStateRef (ProgramStateRef,
const SVal &cond, bool assumption)>
EvalAssumeFunc;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h
index b59c14d..1452d45 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h
@@ -107,7 +107,9 @@ public:
/// checker does not require any custom initialization.
template <class T>
void addChecker(StringRef fullName, StringRef desc) {
- addChecker(&initializeManager<T>, fullName, desc);
+ // Avoid MSVC's Compiler Error C2276:
+ // http://msdn.microsoft.com/en-us/library/850cstw1(v=VS.80).aspx
+ addChecker(&CheckerRegistry::initializeManager<T>, fullName, desc);
}
/// Initializes a CheckerManager by calling the initialization functions for
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
index d1f5a7d..65be3a4 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -32,6 +32,10 @@ createPlistDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP,
PathDiagnosticConsumer *SubPD = 0);
PathDiagnosticConsumer*
+createPlistMultiFileDiagnosticConsumer(const std::string& prefix,
+ const Preprocessor &PP);
+
+PathDiagnosticConsumer*
createTextPathDiagnosticConsumer(const std::string& prefix,
const Preprocessor &PP);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
index 6c93f59..3cbecf7 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -31,14 +31,14 @@ namespace ento {
class CheckerManager;
class AnalysisManager : public BugReporterData {
- AnalysisContextManager AnaCtxMgr;
- LocationContextManager LocCtxMgr;
+ virtual void anchor();
+ AnalysisDeclContextManager AnaCtxMgr;
ASTContext &Ctx;
DiagnosticsEngine &Diags;
- const LangOptions &LangInfo;
+ const LangOptions &LangOpts;
- llvm::OwningPtr<PathDiagnosticConsumer> PD;
+ OwningPtr<PathDiagnosticConsumer> PD;
// Configurable components creators.
StoreManagerCreator CreateStoreMgr;
@@ -53,29 +53,47 @@ class AnalysisManager : public BugReporterData {
enum AnalysisScope { ScopeTU, ScopeDecl } AScope;
- // The maximum number of exploded nodes the analyzer will generate.
+ /// \brief The maximum number of exploded nodes the analyzer will generate.
unsigned MaxNodes;
- // The maximum number of times the analyzer visit a block.
+ /// \brief The maximum number of times the analyzer visits a block.
unsigned MaxVisit;
bool VisualizeEGDot;
bool VisualizeEGUbi;
AnalysisPurgeMode PurgeDead;
- /// EargerlyAssume - A flag indicating how the engine should handle
- // expressions such as: 'x = (y != 0)'. When this flag is true then
- // the subexpression 'y != 0' will be eagerly assumed to be true or false,
- // thus evaluating it to the integers 0 or 1 respectively. The upside
- // is that this can increase analysis precision until we have a better way
- // to lazily evaluate such logic. The downside is that it eagerly
- // bifurcates paths.
+ /// \brief The flag regulates if we should eagerly assume evaluations of
+ /// conditionals, thus, bifurcating the path.
+ ///
+ /// EagerlyAssume - A flag indicating how the engine should handle
+ /// expressions such as: 'x = (y != 0)'. When this flag is true then
+ /// the subexpression 'y != 0' will be eagerly assumed to be true or false,
+ /// thus evaluating it to the integers 0 or 1 respectively. The upside
+ /// is that this can increase analysis precision until we have a better way
+ /// to lazily evaluate such logic. The downside is that it eagerly
+ /// bifurcates paths.
bool EagerlyAssume;
bool TrimGraph;
- bool InlineCall;
bool EagerlyTrimEGraph;
public:
+ // \brief inter-procedural analysis mode.
+ AnalysisIPAMode IPAMode;
+
+ // Settings for inlining tuning.
+ /// \brief The inlining stack depth limit.
+ unsigned InlineMaxStackDepth;
+ /// \brief The max number of basic blocks in a function being inlined.
+ unsigned InlineMaxFunctionSize;
+ /// \brief The mode of function selection used during inlining.
+ AnalysisInliningMode InliningMode;
+
+ /// \brief Do not re-analyze paths leading to exhausted nodes with a different
+ /// strategy. We get better code coverage when retry is enabled.
+ bool NoRetryExhausted;
+
+public:
AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
const LangOptions &lang, PathDiagnosticConsumer *pd,
StoreManagerCreator storemgr,
@@ -85,9 +103,14 @@ public:
unsigned maxnodes, unsigned maxvisit,
bool vizdot, bool vizubi, AnalysisPurgeMode purge,
bool eager, bool trim,
- bool inlinecall, bool useUnoptimizedCFG,
+ bool useUnoptimizedCFG,
bool addImplicitDtors, bool addInitializers,
- bool eagerlyTrimEGraph);
+ bool eagerlyTrimEGraph,
+ AnalysisIPAMode ipa,
+ unsigned inlineMaxStack,
+ unsigned inlineMaxFunctionSize,
+ AnalysisInliningMode inliningMode,
+ bool NoRetry);
/// Construct a clone of the given AnalysisManager with the given ASTContext
/// and DiagnosticsEngine.
@@ -97,11 +120,10 @@ public:
~AnalysisManager() { FlushDiagnostics(); }
void ClearContexts() {
- LocCtxMgr.clear();
AnaCtxMgr.clear();
}
- AnalysisContextManager& getAnalysisContextManager() {
+ AnalysisDeclContextManager& getAnalysisDeclContextManager() {
return AnaCtxMgr;
}
@@ -129,8 +151,8 @@ public:
return Diags;
}
- const LangOptions &getLangOptions() const {
- return LangInfo;
+ const LangOptions &getLangOpts() const {
+ return LangOpts;
}
virtual PathDiagnosticConsumer *getPathDiagnosticConsumer() {
@@ -139,7 +161,7 @@ public:
void FlushDiagnostics() {
if (PD.get())
- PD->FlushDiagnostics();
+ PD->FlushDiagnostics(0);
}
unsigned getMaxNodes() const { return MaxNodes; }
@@ -162,11 +184,11 @@ public:
bool shouldEagerlyAssume() const { return EagerlyAssume; }
- bool shouldInlineCall() const { return InlineCall; }
+ bool shouldInlineCall() const { return (IPAMode == Inlining); }
bool hasIndexer() const { return Idxer != 0; }
- AnalysisContext *getAnalysisContextInAnotherTU(const Decl *D);
+ AnalysisDeclContext *getAnalysisDeclContextInAnotherTU(const Decl *D);
CFG *getCFG(Decl const *D) {
return AnaCtxMgr.getContext(D)->getCFG();
@@ -181,38 +203,17 @@ public:
return AnaCtxMgr.getContext(D)->getParentMap();
}
- AnalysisContext *getAnalysisContext(const Decl *D) {
+ AnalysisDeclContext *getAnalysisDeclContext(const Decl *D) {
return AnaCtxMgr.getContext(D);
}
- AnalysisContext *getAnalysisContext(const Decl *D, idx::TranslationUnit *TU) {
+ AnalysisDeclContext *getAnalysisDeclContext(const Decl *D, idx::TranslationUnit *TU) {
return AnaCtxMgr.getContext(D, TU);
}
- const StackFrameContext *getStackFrame(AnalysisContext *Ctx,
- LocationContext const *Parent,
- const Stmt *S,
- const CFGBlock *Blk, unsigned Idx) {
- return LocCtxMgr.getStackFrame(Ctx, Parent, S, Blk, Idx);
- }
-
- // Get the top level stack frame.
- const StackFrameContext *getStackFrame(Decl const *D,
- idx::TranslationUnit *TU) {
- return LocCtxMgr.getStackFrame(AnaCtxMgr.getContext(D, TU), 0, 0, 0, 0);
- }
-
- // Get a stack frame with parent.
- StackFrameContext const *getStackFrame(const Decl *D,
- LocationContext const *Parent,
- const Stmt *S,
- const CFGBlock *Blk, unsigned Idx) {
- return LocCtxMgr.getStackFrame(AnaCtxMgr.getContext(D), Parent, S,
- Blk,Idx);
- }
};
-} // end GR namespace
+} // enAnaCtxMgrspace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index 42a1537..9a699f9 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -18,17 +18,10 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
-#include "clang/AST/ASTContext.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/ImmutableList.h"
namespace clang {
-
namespace ento {
-class ProgramState;
-
class CompoundValData : public llvm::FoldingSetNode {
QualType T;
llvm::ImmutableList<SVal> L;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index 1f14787..b051d33 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -15,48 +15,41 @@
#ifndef LLVM_CLANG_SA_CORE_PATHSENSITIVE_CHECKERCONTEXT
#define LLVM_CLANG_SA_CORE_PATHSENSITIVE_CHECKERCONTEXT
-#include "clang/Analysis/Support/SaveAndRestore.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
namespace clang {
-
namespace ento {
class CheckerContext {
- ExplodedNodeSet &Dst;
- StmtNodeBuilder &B;
ExprEngine &Eng;
+ /// The current exploded(symbolic execution) graph node.
ExplodedNode *Pred;
- SaveAndRestore<bool> OldSink;
- SaveOr OldHasGen;
+ /// The flag is true if the (state of the execution) has been modified
+ /// by the checker using this context. For example, a new transition has been
+ /// added or a bug report issued.
+ bool Changed;
+ /// The tagged location, which is used to generate all new nodes.
const ProgramPoint Location;
- const ProgramState *ST;
- const unsigned size;
-public:
- bool *respondsToCallback;
+ NodeBuilder &NB;
+
public:
- CheckerContext(ExplodedNodeSet &dst,
- StmtNodeBuilder &builder,
+ /// If we are post visiting a call, this flag will be set if the
+ /// call was inlined. In all other cases it will be false.
+ const bool wasInlined;
+
+ CheckerContext(NodeBuilder &builder,
ExprEngine &eng,
ExplodedNode *pred,
const ProgramPoint &loc,
- bool *respondsToCB = 0,
- const ProgramState *st = 0)
- : Dst(dst),
- B(builder),
- Eng(eng),
+ bool wasInlined = false)
+ : Eng(eng),
Pred(pred),
- OldSink(B.BuildSinks),
- OldHasGen(B.hasGeneratedNode),
+ Changed(false),
Location(loc),
- ST(st),
- size(Dst.size()),
- respondsToCallback(respondsToCB) {}
-
- ~CheckerContext();
-
- ExprEngine &getEngine() {
- return Eng;
+ NB(builder),
+ wasInlined(wasInlined) {
+ assert(Pred->getState() &&
+ "We should not call the checkers on an empty state.");
}
AnalysisManager &getAnalysisManager() {
@@ -71,18 +64,34 @@ public:
return Eng.getStoreManager();
}
- ExplodedNodeSet &getNodeSet() { return Dst; }
- ExplodedNode *&getPredecessor() { return Pred; }
- const ProgramState *getState() { return ST ? ST : Pred->getState(); }
+ /// \brief Returns the previous node in the exploded graph, which includes
+ /// the state of the program before the checker ran. Note, checkers should
+ /// not retain the node in their state since the nodes might get invalidated.
+ ExplodedNode *getPredecessor() { return Pred; }
+ ProgramStateRef getState() const { return Pred->getState(); }
+
+ /// \brief Check if the checker changed the state of the execution; ex: added
+ /// a new transition or a bug report.
+ bool isDifferent() { return Changed; }
/// \brief Returns the number of times the current block has been visited
/// along the analyzed path.
- unsigned getCurrentBlockCount() {return B.getCurrentBlockCount();}
+ unsigned getCurrentBlockCount() const {
+ return NB.getContext().getCurrentBlockCount();
+ }
ASTContext &getASTContext() {
return Eng.getContext();
}
-
+
+ const LangOptions &getLangOpts() const {
+ return Eng.getContext().getLangOpts();
+ }
+
+ const LocationContext *getLocationContext() const {
+ return Pred->getLocationContext();
+ }
+
BugReporter &getBugReporter() {
return Eng.getBugReporter();
}
@@ -99,83 +108,130 @@ public:
return getSValBuilder().getSymbolManager();
}
- bool isObjCGCEnabled() {
+ bool isObjCGCEnabled() const {
return Eng.isObjCGCEnabled();
}
- /// \brief Generate a default checker node (containing checker tag but no
+ ProgramStateManager &getStateManager() {
+ return Eng.getStateManager();
+ }
+
+ AnalysisDeclContext *getCurrentAnalysisDeclContext() const {
+ return Pred->getLocationContext()->getAnalysisDeclContext();
+ }
+
+ /// \brief If the given node corresponds to a PostStore program point, retrieve
+ /// the location region as it was uttered in the code.
+ ///
+ /// This utility can be useful for generating extensive diagnostics, for
+ /// example, for finding variables that the given symbol was assigned to.
+ static const MemRegion *getLocationRegionIfPostStore(const ExplodedNode *N) {
+ ProgramPoint L = N->getLocation();
+ if (const PostStore *PSL = dyn_cast<PostStore>(&L))
+ return reinterpret_cast<const MemRegion*>(PSL->getLocationValue());
+ return 0;
+ }
+
+ /// \brief Generates a new transition in the program state graph
+ /// (ExplodedGraph). Uses the default CheckerContext predecessor node.
+ ///
+ /// @param State The state of the generated node.
+ /// @param Tag The tag is used to uniquely identify the creation site. If no
+ /// tag is specified, a default tag, unique to the given checker,
+ /// will be used. Tags are used to prevent states generated at
+ /// different sites from caching out.
+ ExplodedNode *addTransition(ProgramStateRef State,
+ const ProgramPointTag *Tag = 0) {
+ return addTransitionImpl(State, false, 0, Tag);
+ }
+
+ /// \brief Generates a default transition (containing checker tag but no
/// checker state changes).
- ExplodedNode *generateNode(bool autoTransition = true) {
- return generateNode(getState(), autoTransition);
+ ExplodedNode *addTransition() {
+ return addTransition(getState());
}
-
- /// \brief Generate a new checker node with the given predecessor.
+
+ /// \brief Generates a new transition with the given predecessor.
/// Allows checkers to generate a chain of nodes.
- ExplodedNode *generateNode(const ProgramState *state,
- ExplodedNode *pred,
- const ProgramPointTag *tag = 0,
- bool autoTransition = true) {
- ExplodedNode *N = generateNodeImpl(state, false, pred, tag);
- if (N && autoTransition)
- addTransition(N);
- return N;
- }
-
- /// \brief Generate a new checker node.
- ExplodedNode *generateNode(const ProgramState *state,
- bool autoTransition = true,
- const ProgramPointTag *tag = 0) {
- ExplodedNode *N = generateNodeImpl(state, false, 0, tag);
- if (N && autoTransition)
- addTransition(N);
- return N;
+ ///
+ /// @param State The state of the generated node.
+ /// @param Pred The transition will be generated from the specified Pred node
+ /// to the newly generated node.
+ /// @param Tag The tag to uniquely identify the creation site.
+ /// @param IsSink Mark the new node as sink, which will stop exploration of
+ /// the given path.
+ ExplodedNode *addTransition(ProgramStateRef State,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag = 0,
+ bool IsSink = false) {
+ return addTransitionImpl(State, IsSink, Pred, Tag);
}
/// \brief Generate a sink node. Generating sink stops exploration of the
/// given path.
- ExplodedNode *generateSink(const ProgramState *state = 0) {
- return generateNodeImpl(state ? state : getState(), true);
- }
-
- void addTransition(ExplodedNode *node) {
- Dst.Add(node);
- }
-
- void addTransition(const ProgramState *state,
- const ProgramPointTag *tag = 0) {
- assert(state);
- // If the 'state' is not new, we need to check if the cached state 'ST'
- // is new.
- if (state != getState() || (ST && ST != Pred->getState()))
- // state is new or equals to ST.
- generateNode(state, true, tag);
- else
- Dst.Add(Pred);
+ ExplodedNode *generateSink(ProgramStateRef state = 0) {
+ return addTransitionImpl(state ? state : getState(), true);
}
+ /// \brief Emit the diagnostics report.
void EmitReport(BugReport *R) {
+ Changed = true;
Eng.getBugReporter().EmitReport(R);
}
- AnalysisContext *getCurrentAnalysisContext() const {
- return Pred->getLocationContext()->getAnalysisContext();
+ /// \brief Get the declaration of the called function (path-sensitive).
+ const FunctionDecl *getCalleeDecl(const CallExpr *CE) const;
+
+ /// \brief Get the name of the called function (path-sensitive).
+ StringRef getCalleeName(const FunctionDecl *FunDecl) const;
+
+ /// \brief Get the name of the called function (path-sensitive).
+ StringRef getCalleeName(const CallExpr *CE) const {
+ const FunctionDecl *FunDecl = getCalleeDecl(CE);
+ return getCalleeName(FunDecl);
}
+ /// Given a function declaration and a name checks if this is a C lib
+ /// function with the given name.
+ bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name);
+ static bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name,
+ ASTContext &Context);
+
+ /// \brief Depending on wither the location corresponds to a macro, return
+ /// either the macro name or the token spelling.
+ ///
+ /// This could be useful when checkers' logic depends on whether a function
+ /// is called with a given macro argument. For example:
+ /// s = socket(AF_INET,..)
+ /// If AF_INET is a macro, the result should be treated as a source of taint.
+ ///
+ /// \sa clang::Lexer::getSpelling(), clang::Lexer::getImmediateMacroName().
+ StringRef getMacroNameOrSpelling(SourceLocation &Loc);
+
private:
- ExplodedNode *generateNodeImpl(const ProgramState *state,
- bool markAsSink,
- ExplodedNode *pred = 0,
- const ProgramPointTag *tag = 0) {
-
- ExplodedNode *node = B.generateNode(tag ? Location.withTag(tag) : Location,
- state,
- pred ? pred : Pred);
- if (markAsSink && node)
- node->markAsSink();
+ ExplodedNode *addTransitionImpl(ProgramStateRef State,
+ bool MarkAsSink,
+ ExplodedNode *P = 0,
+ const ProgramPointTag *Tag = 0) {
+ if (!State || (State == Pred->getState() && !Tag && !MarkAsSink))
+ return Pred;
+
+ Changed = true;
+ ExplodedNode *node = NB.generateNode(Tag ? Location.withTag(Tag) : Location,
+ State,
+ P ? P : Pred, MarkAsSink);
return node;
}
};
+/// \brief A helper class which wraps a boolean value set to false by default.
+struct DefaultBool {
+ bool Val;
+ DefaultBool() : Val(false) {}
+ operator bool() const { return Val; }
+ DefaultBool &operator=(bool b) { Val = b; return *this; }
+};
+
} // end GR namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
index 3f6ddde..631858d 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -22,44 +22,45 @@ class APSInt;
}
namespace clang {
-
namespace ento {
-class ProgramState;
-class ProgramStateManager;
class SubEngine;
class ConstraintManager {
public:
virtual ~ConstraintManager();
- virtual const ProgramState *assume(const ProgramState *state,
+ virtual ProgramStateRef assume(ProgramStateRef state,
DefinedSVal Cond,
bool Assumption) = 0;
- std::pair<const ProgramState*, const ProgramState*>
- assumeDual(const ProgramState *state, DefinedSVal Cond)
+ std::pair<ProgramStateRef, ProgramStateRef >
+ assumeDual(ProgramStateRef state, DefinedSVal Cond)
{
- return std::make_pair(assume(state, Cond, true),
- assume(state, Cond, false));
+ std::pair<ProgramStateRef, ProgramStateRef > res =
+ std::make_pair(assume(state, Cond, true), assume(state, Cond, false));
+
+ assert(!(!res.first && !res.second) && "System is over constrained.");
+ return res;
}
- virtual const llvm::APSInt* getSymVal(const ProgramState *state,
+ virtual const llvm::APSInt* getSymVal(ProgramStateRef state,
SymbolRef sym) const = 0;
- virtual bool isEqual(const ProgramState *state,
+ virtual bool isEqual(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) const = 0;
- virtual const ProgramState *removeDeadBindings(const ProgramState *state,
+ virtual ProgramStateRef removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper) = 0;
- virtual void print(const ProgramState *state,
+ virtual void print(ProgramStateRef state,
raw_ostream &Out,
const char* nl,
const char *sep) = 0;
- virtual void EndPath(const ProgramState *state) {}
+ virtual void EndPath(ProgramStateRef state) {}
+protected:
/// canReasonAbout - Not all ConstraintManagers can accurately reason about
/// all SVal values. This method returns true if the ConstraintManager can
/// reasonably handle a given SVal value. This is typically queried by
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 131d39e..59136fc 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -16,7 +16,9 @@
#define LLVM_CLANG_GR_COREENGINE
#include "clang/AST/Expr.h"
+#include "clang/Analysis/AnalysisContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
#include "llvm/ADT/OwningPtr.h"
@@ -27,6 +29,8 @@ class ProgramPointTag;
namespace ento {
+class NodeBuilder;
+
//===----------------------------------------------------------------------===//
/// CoreEngine - Implements the core logic of the graph-reachability
/// analysis. It traverses the CFG and generates the ExplodedGraph.
@@ -37,15 +41,13 @@ namespace ento {
/// at the statement and block-level. The analyses themselves must implement
/// any transfer function logic and the sub-expression level (if any).
class CoreEngine {
- friend class StmtNodeBuilder;
- friend class GenericNodeBuilderImpl;
- friend class BranchNodeBuilder;
+ friend struct NodeBuilderContext;
+ friend class NodeBuilder;
+ friend class ExprEngine;
+ friend class CommonNodeBuilder;
friend class IndirectGotoNodeBuilder;
friend class SwitchNodeBuilder;
friend class EndOfFunctionNodeBuilder;
- friend class CallEnterNodeBuilder;
- friend class CallExitNodeBuilder;
-
public:
typedef std::vector<std::pair<BlockEdge, const ExplodedNode*> >
BlocksExhausted;
@@ -58,7 +60,7 @@ private:
SubEngine& SubEng;
/// G - The simulation graph. Each node is a (location,state) pair.
- llvm::OwningPtr<ExplodedGraph> G;
+ OwningPtr<ExplodedGraph> G;
/// WList - A set of queued nodes that need to be processed by the
/// worklist algorithm. It is up to the implementation of WList to decide
@@ -78,8 +80,16 @@ private:
/// usually because it could not reason about something.
BlocksAborted blocksAborted;
+ /// The functions which have been analyzed through inlining. This is owned by
+ /// AnalysisConsumer. It can be null.
+ SetOfConstDecls *AnalyzedCallees;
+
+ /// The information about functions shared by the whole translation unit.
+ /// (This data is owned by AnalysisConsumer.)
+ FunctionSummariesTy *FunctionSummaries;
+
void generateNode(const ProgramPoint &Loc,
- const ProgramState *State,
+ ProgramStateRef State,
ExplodedNode *Pred);
void HandleBlockEdge(const BlockEdge &E, ExplodedNode *Pred);
@@ -89,28 +99,23 @@ private:
void HandleBranch(const Stmt *Cond, const Stmt *Term, const CFGBlock *B,
ExplodedNode *Pred);
- void HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
- unsigned Index, ExplodedNode *Pred);
- void HandleCallExit(const CallExit &L, ExplodedNode *Pred);
private:
CoreEngine(const CoreEngine&); // Do not implement.
CoreEngine& operator=(const CoreEngine&);
+ ExplodedNode *generateCallExitNode(ExplodedNode *N);
+
public:
/// Construct a CoreEngine object to analyze the provided CFG using
/// a DFS exploration of the exploded graph.
- CoreEngine(SubEngine& subengine)
+ CoreEngine(SubEngine& subengine, SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS)
: SubEng(subengine), G(new ExplodedGraph()),
WList(WorkList::makeBFS()),
- BCounterFactory(G->getAllocator()) {}
-
- /// Construct a CoreEngine object to analyze the provided CFG and to
- /// use the provided worklist object to execute the worklist algorithm.
- /// The CoreEngine object assumes ownership of 'wlist'.
- CoreEngine(WorkList* wlist, SubEngine& subengine)
- : SubEng(subengine), G(new ExplodedGraph()), WList(wlist),
- BCounterFactory(G->getAllocator()) {}
+ BCounterFactory(G->getAllocator()),
+ AnalyzedCallees(VisitedCallees),
+ FunctionSummaries(FS){}
~CoreEngine() {
delete WList;
@@ -126,12 +131,18 @@ public:
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of
/// steps. Returns true if there is still simulation state on the worklist.
bool ExecuteWorkList(const LocationContext *L, unsigned Steps,
- const ProgramState *InitState);
- void ExecuteWorkListWithInitialState(const LocationContext *L,
+ ProgramStateRef InitState);
+ /// Returns true if there is still simulation state on the worklist.
+ bool ExecuteWorkListWithInitialState(const LocationContext *L,
unsigned Steps,
- const ProgramState *InitState,
+ ProgramStateRef InitState,
ExplodedNodeSet &Dst);
+ /// Dispatch the work list item based on the given location information.
+ /// Use Pred parameter as the predecessor state.
+ void dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+ const WorkListUnit& WU);
+
// Functions for external checking of whether we have unfinished work
bool wasBlockAborted() const { return !blocksAborted.empty(); }
bool wasBlocksExhausted() const { return !blocksExhausted.empty(); }
@@ -159,160 +170,254 @@ public:
BlocksAborted::const_iterator blocks_aborted_end() const {
return blocksAborted.end();
}
+
+ /// \brief Enqueue the given set of nodes onto the work list.
+ void enqueue(ExplodedNodeSet &Set);
+
+ /// \brief Enqueue nodes that were created as a result of processing
+ /// a statement onto the work list.
+ void enqueue(ExplodedNodeSet &Set, const CFGBlock *Block, unsigned Idx);
+
+ /// \brief enqueue the nodes corresponding to the end of function onto the
+ /// end of path / work list.
+ void enqueueEndOfFunction(ExplodedNodeSet &Set);
+
+ /// \brief Enqueue a single node created as a result of statement processing.
+ void enqueueStmtNode(ExplodedNode *N, const CFGBlock *Block, unsigned Idx);
};
-class StmtNodeBuilder {
- CoreEngine& Eng;
- const CFGBlock &B;
- const unsigned Idx;
+// TODO: Turn into a calss.
+struct NodeBuilderContext {
+ CoreEngine &Eng;
+ const CFGBlock *Block;
ExplodedNode *Pred;
+ NodeBuilderContext(CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
+ : Eng(E), Block(B), Pred(N) { assert(B); assert(!N->isSink()); }
+ ExplodedNode *getPred() const { return Pred; }
-public:
- bool PurgingDeadSymbols;
- bool BuildSinks;
- bool hasGeneratedNode;
- ProgramPoint::Kind PointKind;
- const ProgramPointTag *Tag;
+ /// \brief Return the CFGBlock associated with this builder.
+ const CFGBlock *getBlock() const { return Block; }
- typedef llvm::SmallPtrSet<ExplodedNode*,5> DeferredTy;
- DeferredTy Deferred;
+ /// \brief Returns the number of times the current basic block has been
+ /// visited on the exploded graph path.
+ unsigned getCurrentBlockCount() const {
+ return Eng.WList->getBlockCounter().getNumVisited(
+ Pred->getLocationContext()->getCurrentStackFrame(),
+ Block->getBlockID());
+ }
+};
- void GenerateAutoTransition(ExplodedNode *N);
+/// \class NodeBuilder
+/// \brief This is the simplest builder which generates nodes in the
+/// ExplodedGraph.
+///
+/// The main benefit of the builder is that it automatically tracks the
+/// frontier nodes (or destination set). This is the set of nodes which should
+/// be propagated to the next step / builder. They are the nodes which have been
+/// added to the builder (either as the input node set or as the newly
+/// constructed nodes) but did not have any outgoing transitions added.
+class NodeBuilder {
+ virtual void anchor();
+protected:
+ const NodeBuilderContext &C;
+
+ /// Specifies if the builder results have been finalized. For example, if it
+ /// is set to false, autotransitions are yet to be generated.
+ bool Finalized;
+ bool HasGeneratedNodes;
+ /// \brief The frontier set - a set of nodes which need to be propagated after
+ /// the builder dies.
+ ExplodedNodeSet &Frontier;
+
+ /// Checkes if the results are ready.
+ virtual bool checkResults() {
+ if (!Finalized)
+ return false;
+ return true;
+ }
-public:
- StmtNodeBuilder(const CFGBlock *b,
- unsigned idx,
- ExplodedNode *N,
- CoreEngine* e);
+ bool hasNoSinksInFrontier() {
+ for (iterator I = Frontier.begin(), E = Frontier.end(); I != E; ++I) {
+ if ((*I)->isSink())
+ return false;
+ }
+ return true;
+ }
- ~StmtNodeBuilder();
+ /// Allow subclasses to finalize results before result_begin() is executed.
+ virtual void finalizeResults() {}
+
+ ExplodedNode *generateNodeImpl(const ProgramPoint &PP,
+ ProgramStateRef State,
+ ExplodedNode *Pred,
+ bool MarkAsSink = false);
- ExplodedNode *getPredecessor() const { return Pred; }
+public:
+ NodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, bool F = true)
+ : C(Ctx), Finalized(F), HasGeneratedNodes(false), Frontier(DstSet) {
+ Frontier.Add(SrcNode);
+ }
- // FIXME: This should not be exposed.
- WorkList *getWorkList() { return Eng.WList; }
+ NodeBuilder(const ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, bool F = true)
+ : C(Ctx), Finalized(F), HasGeneratedNodes(false), Frontier(DstSet) {
+ Frontier.insert(SrcSet);
+ assert(hasNoSinksInFrontier());
+ }
- BlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
+ virtual ~NodeBuilder() {}
- unsigned getCurrentBlockCount() const {
- return getBlockCounter().getNumVisited(
- Pred->getLocationContext()->getCurrentStackFrame(),
- B.getBlockID());
+ /// \brief Generates a node in the ExplodedGraph.
+ ///
+ /// When a node is marked as sink, the exploration from the node is stopped -
+ /// the node becomes the last node on the path.
+ ExplodedNode *generateNode(const ProgramPoint &PP,
+ ProgramStateRef State,
+ ExplodedNode *Pred,
+ bool MarkAsSink = false) {
+ return generateNodeImpl(PP, State, Pred, MarkAsSink);
}
- ExplodedNode *generateNode(const Stmt *S,
- const ProgramState *St,
- ExplodedNode *Pred,
- ProgramPoint::Kind K,
- const ProgramPointTag *tag = 0) {
- hasGeneratedNode = true;
+ const ExplodedNodeSet &getResults() {
+ finalizeResults();
+ assert(checkResults());
+ return Frontier;
+ }
+
+ typedef ExplodedNodeSet::iterator iterator;
+ /// \brief Iterators through the results frontier.
+ inline iterator begin() {
+ finalizeResults();
+ assert(checkResults());
+ return Frontier.begin();
+ }
+ inline iterator end() {
+ finalizeResults();
+ return Frontier.end();
+ }
- if (PurgingDeadSymbols)
- K = ProgramPoint::PostPurgeDeadSymbolsKind;
+ const NodeBuilderContext &getContext() { return C; }
+ bool hasGeneratedNodes() { return HasGeneratedNodes; }
- return generateNodeInternal(S, St, Pred, K, tag ? tag : Tag);
+ void takeNodes(const ExplodedNodeSet &S) {
+ for (ExplodedNodeSet::iterator I = S.begin(), E = S.end(); I != E; ++I )
+ Frontier.erase(*I);
}
+ void takeNodes(ExplodedNode *N) { Frontier.erase(N); }
+ void addNodes(const ExplodedNodeSet &S) { Frontier.insert(S); }
+ void addNodes(ExplodedNode *N) { Frontier.Add(N); }
+};
- ExplodedNode *generateNode(const Stmt *S,
- const ProgramState *St,
+/// \class NodeBuilderWithSinks
+/// \brief This node builder keeps track of the generated sink nodes.
+class NodeBuilderWithSinks: public NodeBuilder {
+ virtual void anchor();
+protected:
+ SmallVector<ExplodedNode*, 2> sinksGenerated;
+ ProgramPoint &Location;
+
+public:
+ NodeBuilderWithSinks(ExplodedNode *Pred, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, ProgramPoint &L)
+ : NodeBuilder(Pred, DstSet, Ctx), Location(L) {}
+ ExplodedNode *generateNode(ProgramStateRef State,
ExplodedNode *Pred,
- const ProgramPointTag *tag = 0) {
- return generateNode(S, St, Pred, PointKind, tag);
- }
+ const ProgramPointTag *Tag = 0,
+ bool MarkAsSink = false) {
+ ProgramPoint LocalLoc = (Tag ? Location.withTag(Tag): Location);
- ExplodedNode *generateNode(const ProgramPoint &PP,
- const ProgramState *State,
- ExplodedNode *Pred) {
- hasGeneratedNode = true;
- return generateNodeInternal(PP, State, Pred);
+ ExplodedNode *N = generateNodeImpl(LocalLoc, State, Pred, MarkAsSink);
+ if (N && N->isSink())
+ sinksGenerated.push_back(N);
+ return N;
}
- ExplodedNode*
- generateNodeInternal(const ProgramPoint &PP,
- const ProgramState *State,
- ExplodedNode *Pred);
-
- ExplodedNode*
- generateNodeInternal(const Stmt *S,
- const ProgramState *State,
- ExplodedNode *Pred,
- ProgramPoint::Kind K,
- const ProgramPointTag *tag = 0);
-
- /// getStmt - Return the current block-level expression associated with
- /// this builder.
- const Stmt *getStmt() const {
- const CFGStmt *CS = B[Idx].getAs<CFGStmt>();
- return CS ? CS->getStmt() : 0;
+ const SmallVectorImpl<ExplodedNode*> &getSinks() const {
+ return sinksGenerated;
}
+};
- /// getBlock - Return the CFGBlock associated with the block-level expression
- /// of this builder.
- const CFGBlock *getBlock() const { return &B; }
+/// \class StmtNodeBuilder
+/// \brief This builder class is useful for generating nodes that resulted from
+/// visiting a statement. The main difference from it's parent NodeBuilder is
+/// that it creates a statement specific ProgramPoint.
+class StmtNodeBuilder: public NodeBuilder {
+ NodeBuilder *EnclosingBldr;
+public:
- unsigned getIndex() const { return Idx; }
+ /// \brief Constructs a StmtNodeBuilder. If the builder is going to process
+ /// nodes currently owned by another builder(with larger scope), use
+ /// Enclosing builder to transfer ownership.
+ StmtNodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, NodeBuilder *Enclosing = 0)
+ : NodeBuilder(SrcNode, DstSet, Ctx), EnclosingBldr(Enclosing) {
+ if (EnclosingBldr)
+ EnclosingBldr->takeNodes(SrcNode);
+ }
- ExplodedNode *MakeNode(ExplodedNodeSet &Dst,
- const Stmt *S,
- ExplodedNode *Pred,
- const ProgramState *St) {
- return MakeNode(Dst, S, Pred, St, PointKind);
+ StmtNodeBuilder(ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, NodeBuilder *Enclosing = 0)
+ : NodeBuilder(SrcSet, DstSet, Ctx), EnclosingBldr(Enclosing) {
+ if (EnclosingBldr)
+ for (ExplodedNodeSet::iterator I = SrcSet.begin(),
+ E = SrcSet.end(); I != E; ++I )
+ EnclosingBldr->takeNodes(*I);
}
- ExplodedNode *MakeNode(ExplodedNodeSet &Dst,
- const Stmt *S,
- ExplodedNode *Pred,
- const ProgramState *St,
- ProgramPoint::Kind K);
+ virtual ~StmtNodeBuilder();
- ExplodedNode *MakeSinkNode(ExplodedNodeSet &Dst,
- const Stmt *S,
+ ExplodedNode *generateNode(const Stmt *S,
ExplodedNode *Pred,
- const ProgramState *St) {
- bool Tmp = BuildSinks;
- BuildSinks = true;
- ExplodedNode *N = MakeNode(Dst, S, Pred, St);
- BuildSinks = Tmp;
- return N;
+ ProgramStateRef St,
+ bool MarkAsSink = false,
+ const ProgramPointTag *tag = 0,
+ ProgramPoint::Kind K = ProgramPoint::PostStmtKind){
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), tag);
+ return generateNodeImpl(L, St, Pred, MarkAsSink);
+ }
+
+ ExplodedNode *generateNode(const ProgramPoint &PP,
+ ExplodedNode *Pred,
+ ProgramStateRef State,
+ bool MarkAsSink = false) {
+ return generateNodeImpl(PP, State, Pred, MarkAsSink);
}
};
-class BranchNodeBuilder {
- CoreEngine& Eng;
- const CFGBlock *Src;
+/// \brief BranchNodeBuilder is responsible for constructing the nodes
+/// corresponding to the two branches of the if statement - true and false.
+class BranchNodeBuilder: public NodeBuilder {
+ virtual void anchor();
const CFGBlock *DstT;
const CFGBlock *DstF;
- ExplodedNode *Pred;
- typedef SmallVector<ExplodedNode*,3> DeferredTy;
- DeferredTy Deferred;
-
- bool GeneratedTrue;
- bool GeneratedFalse;
bool InFeasibleTrue;
bool InFeasibleFalse;
public:
- BranchNodeBuilder(const CFGBlock *src, const CFGBlock *dstT,
- const CFGBlock *dstF, ExplodedNode *pred, CoreEngine* e)
- : Eng(*e), Src(src), DstT(dstT), DstF(dstF), Pred(pred),
- GeneratedTrue(false), GeneratedFalse(false),
- InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {}
-
- ~BranchNodeBuilder();
-
- ExplodedNode *getPredecessor() const { return Pred; }
-
- const ExplodedGraph& getGraph() const { return *Eng.G; }
-
- BlockCounter getBlockCounter() const { return Eng.WList->getBlockCounter();}
+ BranchNodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &C,
+ const CFGBlock *dstT, const CFGBlock *dstF)
+ : NodeBuilder(SrcNode, DstSet, C), DstT(dstT), DstF(dstF),
+ InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {
+ // The branch node builder does not generate autotransitions.
+ // If there are no successors it means that both branches are infeasible.
+ takeNodes(SrcNode);
+ }
- /// This function generates a new ExplodedNode but not a new
- /// branch(block edge).
- ExplodedNode *generateNode(const Stmt *Condition, const ProgramState *State);
+ BranchNodeBuilder(const ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &C,
+ const CFGBlock *dstT, const CFGBlock *dstF)
+ : NodeBuilder(SrcSet, DstSet, C), DstT(dstT), DstF(dstF),
+ InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {
+ takeNodes(SrcSet);
+ }
- ExplodedNode *generateNode(const ProgramState *State, bool branch);
+ ExplodedNode *generateNode(ProgramStateRef State, bool branch,
+ ExplodedNode *Pred);
const CFGBlock *getTargetBlock(bool branch) const {
return branch ? DstT : DstF;
@@ -320,18 +425,14 @@ public:
void markInfeasible(bool branch) {
if (branch)
- InFeasibleTrue = GeneratedTrue = true;
+ InFeasibleTrue = true;
else
- InFeasibleFalse = GeneratedFalse = true;
+ InFeasibleFalse = true;
}
bool isFeasible(bool branch) {
return branch ? !InFeasibleTrue : !InFeasibleFalse;
}
-
- const ProgramState *getState() const {
- return getPredecessor()->getState();
- }
};
class IndirectGotoNodeBuilder {
@@ -369,12 +470,16 @@ public:
iterator end() { return iterator(DispatchBlock.succ_end()); }
ExplodedNode *generateNode(const iterator &I,
- const ProgramState *State,
+ ProgramStateRef State,
bool isSink = false);
const Expr *getTarget() const { return E; }
- const ProgramState *getState() const { return Pred->State; }
+ ProgramStateRef getState() const { return Pred->State; }
+
+ const LocationContext *getLocationContext() const {
+ return Pred->getLocationContext();
+ }
};
class SwitchNodeBuilder {
@@ -416,167 +521,21 @@ public:
}
ExplodedNode *generateCaseStmtNode(const iterator &I,
- const ProgramState *State);
+ ProgramStateRef State);
- ExplodedNode *generateDefaultCaseNode(const ProgramState *State,
+ ExplodedNode *generateDefaultCaseNode(ProgramStateRef State,
bool isSink = false);
const Expr *getCondition() const { return Condition; }
- const ProgramState *getState() const { return Pred->State; }
-};
-
-class GenericNodeBuilderImpl {
-protected:
- CoreEngine &engine;
- ExplodedNode *pred;
- ProgramPoint pp;
- SmallVector<ExplodedNode*, 2> sinksGenerated;
-
- ExplodedNode *generateNodeImpl(const ProgramState *state,
- ExplodedNode *pred,
- ProgramPoint programPoint,
- bool asSink);
-
- GenericNodeBuilderImpl(CoreEngine &eng, ExplodedNode *pr, ProgramPoint p)
- : engine(eng), pred(pr), pp(p), hasGeneratedNode(false) {}
-
-public:
- bool hasGeneratedNode;
-
- WorkList &getWorkList() { return *engine.WList; }
-
- ExplodedNode *getPredecessor() const { return pred; }
+ ProgramStateRef getState() const { return Pred->State; }
- BlockCounter getBlockCounter() const {
- return engine.WList->getBlockCounter();
- }
-
- const SmallVectorImpl<ExplodedNode*> &sinks() const {
- return sinksGenerated;
- }
-};
-
-template <typename PP_T>
-class GenericNodeBuilder : public GenericNodeBuilderImpl {
-public:
- GenericNodeBuilder(CoreEngine &eng, ExplodedNode *pr, const PP_T &p)
- : GenericNodeBuilderImpl(eng, pr, p) {}
-
- ExplodedNode *generateNode(const ProgramState *state, ExplodedNode *pred,
- const ProgramPointTag *tag, bool asSink) {
- return generateNodeImpl(state, pred, cast<PP_T>(pp).withTag(tag),
- asSink);
- }
-
- const PP_T &getProgramPoint() const { return cast<PP_T>(pp); }
-};
-
-class EndOfFunctionNodeBuilder {
- CoreEngine &Eng;
- const CFGBlock &B;
- ExplodedNode *Pred;
- const ProgramPointTag *Tag;
-
-public:
- bool hasGeneratedNode;
-
-public:
- EndOfFunctionNodeBuilder(const CFGBlock *b, ExplodedNode *N, CoreEngine* e,
- const ProgramPointTag *tag = 0)
- : Eng(*e), B(*b), Pred(N), Tag(tag), hasGeneratedNode(false) {}
-
- ~EndOfFunctionNodeBuilder();
-
- EndOfFunctionNodeBuilder withCheckerTag(const ProgramPointTag *tag) {
- return EndOfFunctionNodeBuilder(&B, Pred, &Eng, tag);
- }
-
- WorkList &getWorkList() { return *Eng.WList; }
-
- ExplodedNode *getPredecessor() const { return Pred; }
-
- BlockCounter getBlockCounter() const {
- return Eng.WList->getBlockCounter();
- }
-
- unsigned getCurrentBlockCount() const {
- return getBlockCounter().getNumVisited(
- Pred->getLocationContext()->getCurrentStackFrame(),
- B.getBlockID());
- }
-
- ExplodedNode *generateNode(const ProgramState *State,
- ExplodedNode *P = 0,
- const ProgramPointTag *tag = 0);
-
- void GenerateCallExitNode(const ProgramState *state);
-
- const CFGBlock *getBlock() const { return &B; }
-
- const ProgramState *getState() const {
- return getPredecessor()->getState();
- }
-};
-
-class CallEnterNodeBuilder {
- CoreEngine &Eng;
-
- const ExplodedNode *Pred;
-
- // The call site. For implicit automatic object dtor, this is the trigger
- // statement.
- const Stmt *CE;
-
- // The context of the callee.
- const StackFrameContext *CalleeCtx;
-
- // The parent block of the CallExpr.
- const CFGBlock *Block;
-
- // The CFGBlock index of the CallExpr.
- unsigned Index;
-
-public:
- CallEnterNodeBuilder(CoreEngine &eng, const ExplodedNode *pred,
- const Stmt *s, const StackFrameContext *callee,
- const CFGBlock *blk, unsigned idx)
- : Eng(eng), Pred(pred), CE(s), CalleeCtx(callee), Block(blk), Index(idx) {}
-
- const ProgramState *getState() const { return Pred->getState(); }
-
- const LocationContext *getLocationContext() const {
+ const LocationContext *getLocationContext() const {
return Pred->getLocationContext();
}
-
- const Stmt *getCallExpr() const { return CE; }
-
- const StackFrameContext *getCalleeContext() const { return CalleeCtx; }
-
- const CFGBlock *getBlock() const { return Block; }
-
- unsigned getIndex() const { return Index; }
-
- void generateNode(const ProgramState *state);
};
-class CallExitNodeBuilder {
- CoreEngine &Eng;
- const ExplodedNode *Pred;
-
-public:
- CallExitNodeBuilder(CoreEngine &eng, const ExplodedNode *pred)
- : Eng(eng), Pred(pred) {}
-
- const ExplodedNode *getPredecessor() const { return Pred; }
-
- const ProgramState *getState() const { return Pred->getState(); }
-
- void generateNode(const ProgramState *state);
-};
-
-} // end GR namespace
-
+} // end ento namespace
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
index 2463e23..b80213e 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_GR_ENVIRONMENT_H
#define LLVM_CLANG_GR_ENVIRONMENT_H
+#include "clang/Analysis/AnalysisContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -26,15 +27,39 @@ namespace ento {
class EnvironmentManager;
class SValBuilder;
-/// Environment - An immutable map from Stmts to their current
-/// symbolic values (SVals).
-///
+/// An entry in the environment consists of a Stmt and an LocationContext.
+/// This allows the environment to manage context-sensitive bindings,
+/// which is essentially for modeling recursive function analysis, among
+/// other things.
+class EnvironmentEntry : public std::pair<const Stmt*,
+ const StackFrameContext *> {
+public:
+ EnvironmentEntry(const Stmt *s, const LocationContext *L)
+ : std::pair<const Stmt*,
+ const StackFrameContext*>(s, L ? L->getCurrentStackFrame():0) {}
+
+ const Stmt *getStmt() const { return first; }
+ const LocationContext *getLocationContext() const { return second; }
+
+ /// Profile an EnvironmentEntry for inclusion in a FoldingSet.
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const EnvironmentEntry &E) {
+ ID.AddPointer(E.getStmt());
+ ID.AddPointer(E.getLocationContext());
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, *this);
+ }
+};
+
+/// An immutable map from EnvironemntEntries to SVals.
class Environment {
private:
friend class EnvironmentManager;
// Type definitions.
- typedef llvm::ImmutableMap<const Stmt*,SVal> BindingsTy;
+ typedef llvm::ImmutableMap<EnvironmentEntry, SVal> BindingsTy;
// Data.
BindingsTy ExprBindings;
@@ -42,18 +67,18 @@ private:
Environment(BindingsTy eb)
: ExprBindings(eb) {}
- SVal lookupExpr(const Stmt *E) const;
+ SVal lookupExpr(const EnvironmentEntry &E) const;
public:
typedef BindingsTy::iterator iterator;
iterator begin() const { return ExprBindings.begin(); }
iterator end() const { return ExprBindings.end(); }
-
- /// getSVal - Fetches the current binding of the expression in the
- /// Environment.
- SVal getSVal(const Stmt *Ex, SValBuilder& svalBuilder,
- bool useOnlyDirectBindings = false) const;
+ /// Fetches the current binding of the expression in the
+ /// Environment.
+ SVal getSVal(const EnvironmentEntry &E,
+ SValBuilder &svalBuilder,
+ bool useOnlyDirectBindings = false) const;
/// Profile - Profile the contents of an Environment object for use
/// in a FoldingSet.
@@ -70,6 +95,12 @@ public:
bool operator==(const Environment& RHS) const {
return ExprBindings == RHS.ExprBindings;
}
+
+ void print(raw_ostream &Out, const char *NL, const char *Sep) const;
+
+private:
+ void printAux(raw_ostream &Out, bool printLocations,
+ const char *NL, const char *Sep) const;
};
class EnvironmentManager {
@@ -85,17 +116,20 @@ public:
return Environment(F.getEmptyMap());
}
- /// Bind the value 'V' to the statement 'S'.
- Environment bindExpr(Environment Env, const Stmt *S, SVal V,
+ /// Bind a symbolic value to the given environment entry.
+ Environment bindExpr(Environment Env, const EnvironmentEntry &E, SVal V,
bool Invalidate);
- /// Bind the location 'location' and value 'V' to the statement 'S'. This
- /// is used when simulating loads/stores.
- Environment bindExprAndLocation(Environment Env, const Stmt *S, SVal location,
+ /// Bind the location 'location' and value 'V' to the specified
+ /// environment entry.
+ Environment bindExprAndLocation(Environment Env,
+ const EnvironmentEntry &E,
+ SVal location,
SVal V);
Environment removeDeadBindings(Environment Env,
- SymbolReaper &SymReaper, const ProgramState *ST);
+ SymbolReaper &SymReaper,
+ ProgramStateRef state);
};
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index fdfed3d..46fbb88 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -32,6 +32,7 @@
#include "llvm/Support/Casting.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include <vector>
namespace clang {
@@ -53,7 +54,7 @@ class ExplodedGraph;
class ExplodedNode : public llvm::FoldingSetNode {
friend class ExplodedGraph;
friend class CoreEngine;
- friend class StmtNodeBuilder;
+ friend class NodeBuilder;
friend class BranchNodeBuilder;
friend class IndirectGotoNodeBuilder;
friend class SwitchNodeBuilder;
@@ -106,7 +107,7 @@ class ExplodedNode : public llvm::FoldingSetNode {
const ProgramPoint Location;
/// State - The state associated with this node.
- const ProgramState *State;
+ ProgramStateRef State;
/// Preds - The predecessors of this node.
NodeGroup Preds;
@@ -116,14 +117,14 @@ class ExplodedNode : public llvm::FoldingSetNode {
public:
- explicit ExplodedNode(const ProgramPoint &loc, const ProgramState *state)
+ explicit ExplodedNode(const ProgramPoint &loc, ProgramStateRef state,
+ bool IsSink)
: Location(loc), State(state) {
- const_cast<ProgramState*>(State)->incrementReferenceCount();
+ if (IsSink)
+ Succs.setFlag();
}
- ~ExplodedNode() {
- const_cast<ProgramState*>(State)->decrementReferenceCount();
- }
+ ~ExplodedNode() {}
/// getLocation - Returns the edge associated with the given node.
ProgramPoint getLocation() const { return Location; }
@@ -143,19 +144,22 @@ public:
return *getLocationContext()->getAnalysis<T>();
}
- const ProgramState *getState() const { return State; }
+ ProgramStateRef getState() const { return State; }
template <typename T>
const T* getLocationAs() const { return llvm::dyn_cast<T>(&Location); }
static void Profile(llvm::FoldingSetNodeID &ID,
- const ProgramPoint &Loc, const ProgramState *state) {
+ const ProgramPoint &Loc,
+ ProgramStateRef state,
+ bool IsSink) {
ID.Add(Loc);
- ID.AddPointer(state);
+ ID.AddPointer(state.getPtr());
+ ID.AddBoolean(IsSink);
}
void Profile(llvm::FoldingSetNodeID& ID) const {
- Profile(ID, getLocation(), getState());
+ Profile(ID, getLocation(), getState(), isSink());
}
/// addPredeccessor - Adds a predecessor to the current node, and
@@ -168,7 +172,10 @@ public:
bool pred_empty() const { return Preds.empty(); }
bool isSink() const { return Succs.getFlag(); }
- void markAsSink() { Succs.setFlag(); }
+
+ bool hasSinglePred() const {
+ return (pred_size() == 1);
+ }
ExplodedNode *getFirstPred() {
return pred_empty() ? NULL : *(pred_begin());
@@ -223,6 +230,7 @@ private:
// FIXME: Is this class necessary?
class InterExplodedGraphMap {
+ virtual void anchor();
llvm::DenseMap<const ExplodedNode*, ExplodedNode*> M;
friend class ExplodedGraph;
@@ -238,18 +246,17 @@ protected:
friend class CoreEngine;
// Type definitions.
- typedef SmallVector<ExplodedNode*,2> RootsTy;
- typedef SmallVector<ExplodedNode*,10> EndNodesTy;
+ typedef std::vector<ExplodedNode *> NodeVector;
- /// Roots - The roots of the simulation graph. Usually there will be only
+ /// The roots of the simulation graph. Usually there will be only
/// one, but clients are free to establish multiple subgraphs within a single
/// SimulGraph. Moreover, these subgraphs can often merge when paths from
/// different roots reach the same state at the same program location.
- RootsTy Roots;
+ NodeVector Roots;
- /// EndNodes - The nodes in the simulation graph which have been
- /// specially marked as the endpoint of an abstract simulation path.
- EndNodesTy EndNodes;
+ /// The nodes in the simulation graph which have been
+ /// specially marked as the endpoint of an abstract simulation path.
+ NodeVector EndNodes;
/// Nodes - The nodes in the graph.
llvm::FoldingSet<ExplodedNode> Nodes;
@@ -262,21 +269,25 @@ protected:
unsigned NumNodes;
/// A list of recently allocated nodes that can potentially be recycled.
- void *recentlyAllocatedNodes;
+ NodeVector ChangedNodes;
/// A list of nodes that can be reused.
- void *freeNodes;
+ NodeVector FreeNodes;
/// A flag that indicates whether nodes should be recycled.
bool reclaimNodes;
+
+ /// Counter to determine when to reclaim nodes.
+ unsigned reclaimCounter;
public:
- /// getNode - Retrieve the node associated with a (Location,State) pair,
+
+ /// \brief Retrieve the node associated with a (Location,State) pair,
/// where the 'Location' is a ProgramPoint in the CFG. If no node for
- /// this pair exists, it is created. IsNew is set to true if
+ /// this pair exists, it is created. IsNew is set to true if
/// the node was freshly created.
-
- ExplodedNode *getNode(const ProgramPoint &L, const ProgramState *State,
+ ExplodedNode *getNode(const ProgramPoint &L, ProgramStateRef State,
+ bool IsSink = false,
bool* IsNew = 0);
ExplodedGraph* MakeEmptyGraph() const {
@@ -295,9 +306,7 @@ public:
return V;
}
- ExplodedGraph()
- : NumNodes(0), recentlyAllocatedNodes(0),
- freeNodes(0), reclaimNodes(false) {}
+ ExplodedGraph();
~ExplodedGraph();
@@ -310,10 +319,10 @@ public:
// Iterators.
typedef ExplodedNode NodeTy;
typedef llvm::FoldingSet<ExplodedNode> AllNodesTy;
- typedef NodeTy** roots_iterator;
- typedef NodeTy* const * const_roots_iterator;
- typedef NodeTy** eop_iterator;
- typedef NodeTy* const * const_eop_iterator;
+ typedef NodeVector::iterator roots_iterator;
+ typedef NodeVector::const_iterator const_roots_iterator;
+ typedef NodeVector::iterator eop_iterator;
+ typedef NodeVector::const_iterator const_eop_iterator;
typedef AllNodesTy::iterator node_iterator;
typedef AllNodesTy::const_iterator const_node_iterator;
@@ -362,6 +371,10 @@ public:
/// Reclaim "uninteresting" nodes created since the last time this method
/// was called.
void reclaimRecentlyAllocatedNodes();
+
+private:
+ bool shouldCollect(const ExplodedNode *node);
+ void collectNode(ExplodedNode *node);
};
class ExplodedNodeSet {
@@ -380,19 +393,16 @@ public:
if (N && !static_cast<ExplodedNode*>(N)->isSink()) Impl.insert(N);
}
- ExplodedNodeSet &operator=(const ExplodedNodeSet &X) {
- Impl = X.Impl;
- return *this;
- }
-
typedef ImplTy::iterator iterator;
typedef ImplTy::const_iterator const_iterator;
unsigned size() const { return Impl.size(); }
bool empty() const { return Impl.empty(); }
+ bool erase(ExplodedNode *N) { return Impl.erase(N); }
void clear() { Impl.clear(); }
void insert(const ExplodedNodeSet &S) {
+ assert(&S != this);
if (empty())
Impl = S.Impl;
else
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 9bc470f..2a21a03 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -20,16 +20,24 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
-#include "clang/AST/ExprObjC.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtObjC.h"
namespace clang {
+class AnalysisDeclContextManager;
+class CXXCatchStmt;
+class CXXConstructExpr;
+class CXXDeleteExpr;
+class CXXNewExpr;
+class CXXTemporaryObjectExpr;
+class CXXThisExpr;
+class MaterializeTemporaryExpr;
+class ObjCAtSynchronizedStmt;
class ObjCForCollectionStmt;
-
+
namespace ento {
class AnalysisManager;
@@ -38,16 +46,14 @@ class ObjCMessage;
class ExprEngine : public SubEngine {
AnalysisManager &AMgr;
+
+ AnalysisDeclContextManager &AnalysisDeclContexts;
CoreEngine Engine;
/// G - the simulation graph.
ExplodedGraph& G;
- /// Builder - The current StmtNodeBuilder which is used when building the
- /// nodes for a given statement.
- StmtNodeBuilder* Builder;
-
/// StateMgr - Object that manages the data for all created states.
ProgramStateManager StateMgr;
@@ -62,10 +68,12 @@ class ExprEngine : public SubEngine {
/// CleanedState - The state for EntryNode "cleaned" of all dead
/// variables and symbols (as determined by a liveness analysis).
- const ProgramState *CleanedState;
+ ProgramStateRef CleanedState;
/// currentStmt - The current block-level statement.
const Stmt *currentStmt;
+ unsigned int currentStmtIdx;
+ const NodeBuilderContext *currentBuilderContext;
/// Obj-C Class Identifiers.
IdentifierInfo* NSExceptionII;
@@ -83,21 +91,25 @@ class ExprEngine : public SubEngine {
GRBugReporter BR;
public:
- ExprEngine(AnalysisManager &mgr, bool gcEnabled);
+ ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS);
~ExprEngine();
- void ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
- Engine.ExecuteWorkList(L, Steps, 0);
+ /// Returns true if there is still simulation state on the worklist.
+ bool ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
+ return Engine.ExecuteWorkList(L, Steps, 0);
}
/// Execute the work list with an initial state. Nodes that reaches the exit
/// of the function are added into the Dst set, which represent the exit
- /// state of the function call.
- void ExecuteWorkListWithInitialState(const LocationContext *L, unsigned Steps,
- const ProgramState *InitState,
+ /// state of the function call. Returns true if there is still simulation
+ /// state on the worklist.
+ bool ExecuteWorkListWithInitialState(const LocationContext *L, unsigned Steps,
+ ProgramStateRef InitState,
ExplodedNodeSet &Dst) {
- Engine.ExecuteWorkListWithInitialState(L, Steps, InitState, Dst);
+ return Engine.ExecuteWorkListWithInitialState(L, Steps, InitState, Dst);
}
/// getContext - Return the ASTContext associated with this analysis.
@@ -113,10 +125,19 @@ public:
BugReporter& getBugReporter() { return BR; }
- StmtNodeBuilder &getBuilder() { assert(Builder); return *Builder; }
+ const NodeBuilderContext &getBuilderContext() {
+ assert(currentBuilderContext);
+ return *currentBuilderContext;
+ }
bool isObjCGCEnabled() { return ObjCGCEnabled; }
+ const Stmt *getStmt() const;
+
+ void GenerateAutoTransition(ExplodedNode *N);
+ void enqueueEndOfPath(ExplodedNodeSet &S);
+ void GenerateCallExitNode(ExplodedNode *N);
+
/// ViewGraph - Visualize the ExplodedGraph created by executing the
/// simulation.
void ViewGraph(bool trim = false);
@@ -125,36 +146,43 @@ public:
/// getInitialState - Return the initial state used for the root vertex
/// in the ExplodedGraph.
- const ProgramState *getInitialState(const LocationContext *InitLoc);
+ ProgramStateRef getInitialState(const LocationContext *InitLoc);
ExplodedGraph& getGraph() { return G; }
const ExplodedGraph& getGraph() const { return G; }
/// processCFGElement - Called by CoreEngine. Used to generate new successor
/// nodes by processing the 'effects' of a CFG element.
- void processCFGElement(const CFGElement E, StmtNodeBuilder& builder);
+ void processCFGElement(const CFGElement E, ExplodedNode *Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx);
- void ProcessStmt(const CFGStmt S, StmtNodeBuilder &builder);
+ void ProcessStmt(const CFGStmt S, ExplodedNode *Pred);
- void ProcessInitializer(const CFGInitializer I, StmtNodeBuilder &builder);
+ void ProcessInitializer(const CFGInitializer I, ExplodedNode *Pred);
- void ProcessImplicitDtor(const CFGImplicitDtor D, StmtNodeBuilder &builder);
+ void ProcessImplicitDtor(const CFGImplicitDtor D, ExplodedNode *Pred);
void ProcessAutomaticObjDtor(const CFGAutomaticObjDtor D,
- StmtNodeBuilder &builder);
- void ProcessBaseDtor(const CFGBaseDtor D, StmtNodeBuilder &builder);
- void ProcessMemberDtor(const CFGMemberDtor D, StmtNodeBuilder &builder);
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void ProcessBaseDtor(const CFGBaseDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void ProcessMemberDtor(const CFGMemberDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
void ProcessTemporaryDtor(const CFGTemporaryDtor D,
- StmtNodeBuilder &builder);
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
/// Called by CoreEngine when processing the entrance of a CFGBlock.
- virtual void processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
- GenericNodeBuilder<BlockEntrance> &nodeBuilder);
+ virtual void processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder);
/// ProcessBranch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
void processBranch(const Stmt *Condition, const Stmt *Term,
- BranchNodeBuilder& builder);
+ NodeBuilderContext& BuilderCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF);
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
@@ -166,35 +194,36 @@ public:
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
- void processEndOfFunction(EndOfFunctionNodeBuilder& builder);
+ void processEndOfFunction(NodeBuilderContext& BC);
/// Generate the entry node of the callee.
- void processCallEnter(CallEnterNodeBuilder &builder);
+ void processCallEnter(CallEnter CE, ExplodedNode *Pred);
/// Generate the first post callsite node.
- void processCallExit(CallExitNodeBuilder &builder);
+ void processCallExit(ExplodedNode *Pred);
/// Called by CoreEngine when the analysis worklist has terminated.
void processEndWorklist(bool hasWorkRemaining);
/// evalAssume - Callback function invoked by the ConstraintManager when
/// making assumptions about state values.
- const ProgramState *processAssume(const ProgramState *state, SVal cond,bool assumption);
+ ProgramStateRef processAssume(ProgramStateRef state, SVal cond,bool assumption);
/// wantsRegionChangeUpdate - Called by ProgramStateManager to determine if a
/// region change should trigger a processRegionChanges update.
- bool wantsRegionChangeUpdate(const ProgramState *state);
+ bool wantsRegionChangeUpdate(ProgramStateRef state);
/// processRegionChanges - Called by ProgramStateManager whenever a change is made
/// to the store. Used to update checkers that track region values.
- const ProgramState *
- processRegionChanges(const ProgramState *state,
+ ProgramStateRef
+ processRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions);
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call);
/// printState - Called by ProgramStateManager to print checker-specific data.
- void printState(raw_ostream &Out, const ProgramState *State,
+ void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep);
virtual ProgramStateManager& getStateManager() { return StateMgr; }
@@ -225,11 +254,6 @@ public:
const CoreEngine &getCoreEngine() const { return Engine; }
public:
- ExplodedNode *MakeNode(ExplodedNodeSet &Dst, const Stmt *S,
- ExplodedNode *Pred, const ProgramState *St,
- ProgramPoint::Kind K = ProgramPoint::PostStmtKind,
- const ProgramPointTag *tag = 0);
-
/// Visit - Transfer function logic for all statements. Dispatches to
/// other functions that handle specific kinds of statements.
void Visit(const Stmt *S, ExplodedNode *Pred, ExplodedNodeSet &Dst);
@@ -241,16 +265,6 @@ public:
/// VisitAsmStmt - Transfer function logic for inline asm.
void VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, ExplodedNodeSet &Dst);
-
- void VisitAsmStmtHelperOutputs(const AsmStmt *A,
- AsmStmt::const_outputs_iterator I,
- AsmStmt::const_outputs_iterator E,
- ExplodedNode *Pred, ExplodedNodeSet &Dst);
-
- void VisitAsmStmtHelperInputs(const AsmStmt *A,
- AsmStmt::const_inputs_iterator I,
- AsmStmt::const_inputs_iterator E,
- ExplodedNode *Pred, ExplodedNodeSet &Dst);
/// VisitBlockExpr - Transfer function logic for BlockExprs.
void VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
@@ -328,13 +342,19 @@ public:
void VisitUnaryOperator(const UnaryOperator* B, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
+ /// Handle ++ and -- (both pre- and post-increment).
+ void VisitIncrementDecrementOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ void VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
void VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
ExplodedNodeSet & Dst);
void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- VisitCXXConstructExpr(expr, 0, Pred, Dst);
- }
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
void VisitCXXConstructExpr(const CXXConstructExpr *E, const MemRegion *Dest,
ExplodedNode *Pred, ExplodedNodeSet &Dst);
@@ -349,9 +369,6 @@ public:
void VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, ExplodedNode *Pred,
ExplodedNodeSet &Dst);
- void VisitAggExpr(const Expr *E, const MemRegion *Dest, ExplodedNode *Pred,
- ExplodedNodeSet &Dst);
-
/// Create a C++ temporary object for an rvalue.
void CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
ExplodedNode *Pred,
@@ -363,17 +380,7 @@ public:
const CXXThisRegion *getCXXThisRegion(const CXXMethodDecl *decl,
const StackFrameContext *frameCtx);
-
- /// Evaluate arguments with a work list algorithm.
- void evalArguments(ConstExprIterator AI, ConstExprIterator AE,
- const FunctionProtoType *FnType,
- ExplodedNode *Pred, ExplodedNodeSet &Dst,
- bool FstArgAsLValue = false);
- /// Evaluate callee expression (for a function call).
- void evalCallee(const CallExpr *callExpr, const ExplodedNodeSet &src,
- ExplodedNodeSet &dest);
-
/// evalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
/// expressions of the form 'x != 0' and generate new nodes (stored in Dst)
/// with those assumptions.
@@ -393,31 +400,34 @@ public:
public:
- SVal evalBinOp(const ProgramState *state, BinaryOperator::Opcode op,
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc L, NonLoc R, QualType T) {
return svalBuilder.evalBinOpNN(state, op, L, R, T);
}
- SVal evalBinOp(const ProgramState *state, BinaryOperator::Opcode op,
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc L, SVal R, QualType T) {
return R.isValid() ? svalBuilder.evalBinOpNN(state,op,L, cast<NonLoc>(R), T) : R;
}
- SVal evalBinOp(const ProgramState *ST, BinaryOperator::Opcode Op,
+ SVal evalBinOp(ProgramStateRef ST, BinaryOperator::Opcode Op,
SVal LHS, SVal RHS, QualType T) {
return svalBuilder.evalBinOp(ST, Op, LHS, RHS, T);
}
protected:
- void evalObjCMessage(ExplodedNodeSet &Dst, const ObjCMessage &msg,
- ExplodedNode *Pred, const ProgramState *state);
+ void evalObjCMessage(StmtNodeBuilder &Bldr, const ObjCMessage &msg,
+ ExplodedNode *Pred, ProgramStateRef state,
+ bool GenSink);
- const ProgramState *invalidateArguments(const ProgramState *State,
+ ProgramStateRef invalidateArguments(ProgramStateRef State,
const CallOrObjCMessage &Call,
const LocationContext *LC);
- const ProgramState *MarkBranch(const ProgramState *St, const Stmt *Terminator,
- bool branchTaken);
+ ProgramStateRef MarkBranch(ProgramStateRef state,
+ const Stmt *Terminator,
+ const LocationContext *LCtx,
+ bool branchTaken);
/// evalBind - Handle the semantics of binding a value to a specific location.
/// This method is used by evalStore, VisitDeclStmt, and others.
@@ -431,34 +441,52 @@ public:
// be the same as Pred->state, and when 'location' may not be the
// same as state->getLValue(Ex).
/// Simulate a read of the result of Ex.
- void evalLoad(ExplodedNodeSet &Dst, const Expr *Ex, ExplodedNode *Pred,
- const ProgramState *St, SVal location, const ProgramPointTag *tag = 0,
+ void evalLoad(ExplodedNodeSet &Dst,
+ const Expr *NodeEx, /* Eventually will be a CFGStmt */
+ const Expr *BoundExpr,
+ ExplodedNode *Pred,
+ ProgramStateRef St,
+ SVal location,
+ const ProgramPointTag *tag = 0,
QualType LoadTy = QualType());
// FIXME: 'tag' should be removed, and a LocationContext should be used
// instead.
void evalStore(ExplodedNodeSet &Dst, const Expr *AssignE, const Expr *StoreE,
- ExplodedNode *Pred, const ProgramState *St, SVal TargetLV, SVal Val,
+ ExplodedNode *Pred, ProgramStateRef St, SVal TargetLV, SVal Val,
const ProgramPointTag *tag = 0);
private:
- void evalLoadCommon(ExplodedNodeSet &Dst, const Expr *Ex, ExplodedNode *Pred,
- const ProgramState *St, SVal location, const ProgramPointTag *tag,
+ void evalLoadCommon(ExplodedNodeSet &Dst,
+ const Expr *NodeEx, /* Eventually will be a CFGStmt */
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef St,
+ SVal location,
+ const ProgramPointTag *tag,
QualType LoadTy);
// FIXME: 'tag' should be removed, and a LocationContext should be used
// instead.
- void evalLocation(ExplodedNodeSet &Dst, const Stmt *S, ExplodedNode *Pred,
- const ProgramState *St, SVal location,
+ void evalLocation(ExplodedNodeSet &Dst,
+ const Stmt *NodeEx, /* This will eventually be a CFGStmt */
+ const Stmt *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef St, SVal location,
const ProgramPointTag *tag, bool isLoad);
+ bool shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred);
bool InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE, ExplodedNode *Pred);
-
-
-public:
- /// Returns true if calling the specific function or method would possibly
- /// cause global variables to be invalidated.
- bool doesInvalidateGlobals(const CallOrObjCMessage &callOrMessage) const;
-
+
+ bool replayWithoutInlining(ExplodedNode *P, const LocationContext *CalleeLC);
+};
+
+/// Traits for storing the call processing policy inside GDM.
+/// The GDM stores the corresponding CallExpr pointer.
+struct ReplayWithoutInlining{};
+template <>
+struct ProgramStateTrait<ReplayWithoutInlining> :
+ public ProgramStatePartialTrait<void*> {
+ static void *GDMIndex() { static int index = 0; return &index; }
};
} // end ento namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h
deleted file mode 100644
index 89b47dc..0000000
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h
+++ /dev/null
@@ -1,80 +0,0 @@
-//===-- ExprEngineBuilders.h - "Builder" classes for ExprEngine ---*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines smart builder "references" which are used to marshal
-// builders between ExprEngine objects and their related components.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_GR_EXPRENGINE_BUILDERS
-#define LLVM_CLANG_GR_EXPRENGINE_BUILDERS
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
-
-namespace clang {
-
-namespace ento {
-
-class StmtNodeBuilderRef {
- ExplodedNodeSet &Dst;
- StmtNodeBuilder &B;
- ExprEngine& Eng;
- ExplodedNode *Pred;
- const ProgramState *state;
- const Stmt *stmt;
- const unsigned OldSize;
- const bool AutoCreateNode;
- SaveAndRestore<bool> OldSink;
- SaveOr OldHasGen;
-
-private:
- friend class ExprEngine;
-
- StmtNodeBuilderRef(); // do not implement
- void operator=(const StmtNodeBuilderRef&); // do not implement
-
- StmtNodeBuilderRef(ExplodedNodeSet &dst,
- StmtNodeBuilder &builder,
- ExprEngine& eng,
- ExplodedNode *pred,
- const ProgramState *st,
- const Stmt *s, bool auto_create_node)
- : Dst(dst), B(builder), Eng(eng), Pred(pred),
- state(st), stmt(s), OldSize(Dst.size()), AutoCreateNode(auto_create_node),
- OldSink(B.BuildSinks), OldHasGen(B.hasGeneratedNode) {}
-
-public:
-
- ~StmtNodeBuilderRef() {
- // Handle the case where no nodes where generated. Auto-generate that
- // contains the updated state if we aren't generating sinks.
- if (!B.BuildSinks && Dst.size() == OldSize && !B.hasGeneratedNode) {
- if (AutoCreateNode)
- B.MakeNode(Dst, const_cast<Stmt*>(stmt), Pred, state);
- else
- Dst.Add(Pred);
- }
- }
-
- const ProgramState *getState() { return state; }
-
- ProgramStateManager& getStateManager() {
- return Eng.getStateManager();
- }
-
- ExplodedNode *MakeNode(const ProgramState *state) {
- return B.MakeNode(Dst, const_cast<Stmt*>(stmt), Pred, state);
- }
-};
-
-} // end GR namespace
-
-} // end clang namespace
-
-#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
new file mode 100644
index 0000000..42adff3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -0,0 +1,107 @@
+//== FunctionSummary.h - Stores summaries of functions. ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analyzes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_FUNCTIONSUMMARY_H
+#define LLVM_CLANG_GR_FUNCTIONSUMMARY_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/BitVector.h"
+
+namespace clang {
+namespace ento {
+typedef llvm::SmallPtrSet<Decl*, 24> SetOfDecls;
+typedef llvm::SmallPtrSet<const Decl*, 24> SetOfConstDecls;
+
+class FunctionSummariesTy {
+ struct FunctionSummary {
+ /// True if this function has reached a max block count while inlined from
+ /// at least one call site.
+ bool MayReachMaxBlockCount;
+
+ /// Total number of blocks in the function.
+ unsigned TotalBasicBlocks;
+
+ /// Marks the IDs of the basic blocks visited during the analyzes.
+ llvm::BitVector VisitedBasicBlocks;
+
+ FunctionSummary() :
+ MayReachMaxBlockCount(false),
+ TotalBasicBlocks(0),
+ VisitedBasicBlocks(0) {}
+ };
+
+ typedef llvm::DenseMap<const Decl*, FunctionSummary*> MapTy;
+ MapTy Map;
+
+public:
+ ~FunctionSummariesTy();
+
+ MapTy::iterator findOrInsertSummary(const Decl *D) {
+ MapTy::iterator I = Map.find(D);
+ if (I != Map.end())
+ return I;
+ FunctionSummary *DS = new FunctionSummary();
+ I = Map.insert(std::pair<const Decl*, FunctionSummary*>(D, DS)).first;
+ assert(I != Map.end());
+ return I;
+ }
+
+ void markReachedMaxBlockCount(const Decl* D) {
+ MapTy::iterator I = findOrInsertSummary(D);
+ I->second->MayReachMaxBlockCount = true;
+ }
+
+ bool hasReachedMaxBlockCount(const Decl* D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return I->second->MayReachMaxBlockCount;
+ return false;
+ }
+
+ void markVisitedBasicBlock(unsigned ID, const Decl* D, unsigned TotalIDs) {
+ MapTy::iterator I = findOrInsertSummary(D);
+ llvm::BitVector &Blocks = I->second->VisitedBasicBlocks;
+ assert(ID < TotalIDs);
+ if (TotalIDs > Blocks.size()) {
+ Blocks.resize(TotalIDs);
+ I->second->TotalBasicBlocks = TotalIDs;
+ }
+ Blocks[ID] = true;
+ }
+
+ unsigned getNumVisitedBasicBlocks(const Decl* D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return I->second->VisitedBasicBlocks.count();
+ return 0;
+ }
+
+ /// Get the percentage of the reachable blocks.
+ unsigned getPercentBlocksReachable(const Decl *D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return ((I->second->VisitedBasicBlocks.count() * 100) /
+ I->second->TotalBasicBlocks);
+ return 0;
+ }
+
+ unsigned getTotalNumBasicBlocks();
+ unsigned getTotalNumVisitedBasicBlocks();
+
+};
+
+}} // end clang ento namespaces
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index c9941fe..87bc0df 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -18,7 +18,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/Support/ErrorHandling.h"
@@ -73,12 +73,16 @@ public:
StackArgumentsSpaceRegionKind,
HeapSpaceRegionKind,
UnknownSpaceRegionKind,
- NonStaticGlobalSpaceRegionKind,
StaticGlobalSpaceRegionKind,
- BEG_GLOBAL_MEMSPACES = NonStaticGlobalSpaceRegionKind,
- END_GLOBAL_MEMSPACES = StaticGlobalSpaceRegionKind,
+ GlobalInternalSpaceRegionKind,
+ GlobalSystemSpaceRegionKind,
+ GlobalImmutableSpaceRegionKind,
+ BEG_NON_STATIC_GLOBAL_MEMSPACES = GlobalInternalSpaceRegionKind,
+ END_NON_STATIC_GLOBAL_MEMSPACES = GlobalImmutableSpaceRegionKind,
+ BEG_GLOBAL_MEMSPACES = StaticGlobalSpaceRegionKind,
+ END_GLOBAL_MEMSPACES = GlobalImmutableSpaceRegionKind,
BEG_MEMSPACES = GenericMemSpaceRegionKind,
- END_MEMSPACES = StaticGlobalSpaceRegionKind,
+ END_MEMSPACES = GlobalImmutableSpaceRegionKind,
// Untyped regions.
SymbolicRegionKind,
AllocaRegionKind,
@@ -91,6 +95,7 @@ public:
CompoundLiteralRegionKind = BEG_TYPED_VALUE_REGIONS,
CXXThisRegionKind,
StringRegionKind,
+ ObjCStringRegionKind,
ElementRegionKind,
// Decl Regions.
BEG_DECL_REGIONS,
@@ -118,8 +123,6 @@ public:
virtual MemRegionManager* getMemRegionManager() const = 0;
- std::string getString() const;
-
const MemSpaceRegion *getMemorySpace() const;
const MemRegion *getBaseRegion() const;
@@ -137,10 +140,16 @@ public:
/// Compute the offset within the top level memory object.
RegionOffset getAsOffset() const;
+ /// \brief Get a string representation of a region for debug use.
+ std::string getString() const;
+
virtual void dumpToStream(raw_ostream &os) const;
void dump() const;
+ /// \brief Print the region for use in diagnostics.
+ virtual void dumpPretty(raw_ostream &os) const;
+
Kind getKind() const { return kind; }
template<typename RegionTy> const RegionTy* getAs() const;
@@ -150,7 +159,7 @@ public:
static bool classof(const MemRegion*) { return true; }
};
-/// MemSpaceRegion - A memory region that represents and "memory space";
+/// MemSpaceRegion - A memory region that represents a "memory space";
/// for example, the set of global variables, the stack frame, etc.
class MemSpaceRegion : public MemRegion {
protected:
@@ -177,6 +186,7 @@ public:
};
class GlobalsSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
protected:
GlobalsSpaceRegion(MemRegionManager *mgr, Kind k)
: MemSpaceRegion(mgr, k) {}
@@ -186,7 +196,11 @@ public:
return k >= BEG_GLOBAL_MEMSPACES && k <= END_GLOBAL_MEMSPACES;
}
};
-
+
+/// \class The region of the static variables within the current CodeTextRegion
+/// scope.
+/// Currently, only the static locals are placed there, so we know that these
+/// variables do not get invalidated by calls to other functions.
class StaticGlobalSpaceRegion : public GlobalsSpaceRegion {
friend class MemRegionManager;
@@ -206,23 +220,88 @@ public:
return R->getKind() == StaticGlobalSpaceRegionKind;
}
};
-
+
+/// \class The region for all the non-static global variables.
+///
+/// This class is further split into subclasses for efficient implementation of
+/// invalidating a set of related global values as is done in
+/// RegionStoreManager::invalidateRegions (instead of finding all the dependent
+/// globals, we invalidate the whole parent region).
class NonStaticGlobalSpaceRegion : public GlobalsSpaceRegion {
friend class MemRegionManager;
- NonStaticGlobalSpaceRegion(MemRegionManager *mgr)
- : GlobalsSpaceRegion(mgr, NonStaticGlobalSpaceRegionKind) {}
+protected:
+ NonStaticGlobalSpaceRegion(MemRegionManager *mgr, Kind k)
+ : GlobalsSpaceRegion(mgr, k) {}
public:
void dumpToStream(raw_ostream &os) const;
static bool classof(const MemRegion *R) {
- return R->getKind() == NonStaticGlobalSpaceRegionKind;
+ Kind k = R->getKind();
+ return k >= BEG_NON_STATIC_GLOBAL_MEMSPACES &&
+ k <= END_NON_STATIC_GLOBAL_MEMSPACES;
}
};
-
+
+/// \class The region containing globals which are defined in system/external
+/// headers and are considered modifiable by system calls (ex: errno).
+class GlobalSystemSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalSystemSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalSystemSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalSystemSpaceRegionKind;
+ }
+};
+
+/// \class The region containing globals which are considered not to be modified
+/// or point to data which could be modified as a result of a function call
+/// (system or internal). Ex: Const global scalars would be modeled as part of
+/// this region. This region also includes most system globals since they have
+/// low chance of being modified.
+class GlobalImmutableSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalImmutableSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalImmutableSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalImmutableSpaceRegionKind;
+ }
+};
+
+/// \class The region containing globals which can be modified by calls to
+/// "internally" defined functions - (for now just) functions other then system
+/// calls.
+class GlobalInternalSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalInternalSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalInternalSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalInternalSpaceRegionKind;
+ }
+};
+
class HeapSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
friend class MemRegionManager;
HeapSpaceRegion(MemRegionManager *mgr)
@@ -234,6 +313,7 @@ public:
};
class UnknownSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
friend class MemRegionManager;
UnknownSpaceRegion(MemRegionManager *mgr)
: MemSpaceRegion(mgr, UnknownSpaceRegionKind) {}
@@ -266,7 +346,7 @@ public:
};
class StackLocalsSpaceRegion : public StackSpaceRegion {
-private:
+ virtual void anchor();
friend class MemRegionManager;
StackLocalsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackLocalsSpaceRegionKind, sfc) {}
@@ -278,6 +358,7 @@ public:
class StackArgumentsSpaceRegion : public StackSpaceRegion {
private:
+ virtual void anchor();
friend class MemRegionManager;
StackArgumentsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
: StackSpaceRegion(mgr, StackArgumentsSpaceRegionKind, sfc) {}
@@ -291,6 +372,8 @@ public:
/// SubRegion - A region that subsets another larger region. Most regions
/// are subclasses of SubRegion.
class SubRegion : public MemRegion {
+private:
+ virtual void anchor();
protected:
const MemRegion* superRegion;
SubRegion(const MemRegion* sReg, Kind k) : MemRegion(k), superRegion(sReg) {}
@@ -351,6 +434,8 @@ public:
/// TypedRegion - An abstract class representing regions that are typed.
class TypedRegion : public SubRegion {
+public:
+ virtual void anchor();
protected:
TypedRegion(const MemRegion* sReg, Kind k) : SubRegion(sReg, k) {}
@@ -371,6 +456,8 @@ public:
/// TypedValueRegion - An abstract class representing regions having a typed value.
class TypedValueRegion : public TypedRegion {
+public:
+ virtual void anchor();
protected:
TypedValueRegion(const MemRegion* sReg, Kind k) : TypedRegion(sReg, k) {}
@@ -399,6 +486,8 @@ public:
class CodeTextRegion : public TypedRegion {
+public:
+ virtual void anchor();
protected:
CodeTextRegion(const MemRegion *sreg, Kind k) : TypedRegion(sreg, k) {}
public:
@@ -448,11 +537,11 @@ class BlockTextRegion : public CodeTextRegion {
friend class MemRegionManager;
const BlockDecl *BD;
- AnalysisContext *AC;
+ AnalysisDeclContext *AC;
CanQualType locTy;
BlockTextRegion(const BlockDecl *bd, CanQualType lTy,
- AnalysisContext *ac, const MemRegion* sreg)
+ AnalysisDeclContext *ac, const MemRegion* sreg)
: CodeTextRegion(sreg, BlockTextRegionKind), BD(bd), AC(ac), locTy(lTy) {}
public:
@@ -464,14 +553,14 @@ public:
return BD;
}
- AnalysisContext *getAnalysisContext() const { return AC; }
+ AnalysisDeclContext *getAnalysisDeclContext() const { return AC; }
virtual void dumpToStream(raw_ostream &os) const;
void Profile(llvm::FoldingSetNodeID& ID) const;
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const BlockDecl *BD,
- CanQualType, const AnalysisContext*,
+ CanQualType, const AnalysisDeclContext*,
const MemRegion*);
static bool classof(const MemRegion* R) {
@@ -611,6 +700,40 @@ public:
return R->getKind() == StringRegionKind;
}
};
+
+/// The region associated with an ObjCStringLiteral.
+class ObjCStringRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+ const ObjCStringLiteral* Str;
+protected:
+
+ ObjCStringRegion(const ObjCStringLiteral* str, const MemRegion* sreg)
+ : TypedValueRegion(sreg, ObjCStringRegionKind), Str(str) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCStringLiteral* Str,
+ const MemRegion* superRegion);
+
+public:
+
+ const ObjCStringLiteral* getObjCStringLiteral() const { return Str; }
+
+ QualType getValueType() const {
+ return Str->getType();
+ }
+
+ bool isBoundable() const { return false; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Str, superRegion);
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == ObjCStringRegionKind;
+ }
+};
/// CompoundLiteralRegion - A memory region representing a compound literal.
/// Compound literals are essentially temporaries that are stack allocated
@@ -695,6 +818,8 @@ public:
static bool classof(const MemRegion* R) {
return R->getKind() == VarRegionKind;
}
+
+ void dumpPretty(raw_ostream &os) const;
};
/// CXXThisRegion - Represents the region for the implicit 'this' parameter
@@ -734,9 +859,6 @@ class FieldRegion : public DeclRegion {
: DeclRegion(fd, sReg, FieldRegionKind) {}
public:
-
- void dumpToStream(raw_ostream &os) const;
-
const FieldDecl *getDecl() const { return cast<FieldDecl>(D); }
QualType getValueType() const {
@@ -754,23 +876,23 @@ public:
static bool classof(const MemRegion* R) {
return R->getKind() == FieldRegionKind;
}
+
+ void dumpToStream(raw_ostream &os) const;
+ void dumpPretty(raw_ostream &os) const;
};
class ObjCIvarRegion : public DeclRegion {
friend class MemRegionManager;
- ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg)
- : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+ ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg);
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const ObjCIvarDecl *ivd,
- const MemRegion* superRegion) {
- DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
- }
+ const MemRegion* superRegion);
public:
- const ObjCIvarDecl *getDecl() const { return cast<ObjCIvarDecl>(D); }
- QualType getValueType() const { return getDecl()->getType(); }
+ const ObjCIvarDecl *getDecl() const;
+ QualType getValueType() const;
void dumpToStream(raw_ostream &os) const;
@@ -803,6 +925,7 @@ public:
void dump() const;
};
+/// \brief ElementRegin is used to represent both array elements and casts.
class ElementRegion : public TypedValueRegion {
friend class MemRegionManager;
@@ -915,7 +1038,10 @@ class MemRegionManager {
llvm::BumpPtrAllocator& A;
llvm::FoldingSet<MemRegion> Regions;
- NonStaticGlobalSpaceRegion *globals;
+ GlobalInternalSpaceRegion *InternalGlobals;
+ GlobalSystemSpaceRegion *SystemGlobals;
+ GlobalImmutableSpaceRegion *ImmutableGlobals;
+
llvm::DenseMap<const StackFrameContext *, StackLocalsSpaceRegion *>
StackLocalsSpaceRegions;
@@ -930,7 +1056,8 @@ class MemRegionManager {
public:
MemRegionManager(ASTContext &c, llvm::BumpPtrAllocator& a)
- : C(c), A(a), globals(0), heap(0), unknown(0), code(0) {}
+ : C(c), A(a), InternalGlobals(0), SystemGlobals(0), ImmutableGlobals(0),
+ heap(0), unknown(0), code(0) {}
~MemRegionManager();
@@ -950,7 +1077,9 @@ public:
/// getGlobalsRegion - Retrieve the memory region associated with
/// global variables.
- const GlobalsSpaceRegion *getGlobalsRegion(const CodeTextRegion *R = 0);
+ const GlobalsSpaceRegion *getGlobalsRegion(
+ MemRegion::Kind K = MemRegion::GlobalInternalSpaceRegionKind,
+ const CodeTextRegion *R = 0);
/// getHeapRegion - Retrieve the memory region associated with the
/// generic "heap".
@@ -980,7 +1109,9 @@ public:
/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
const SymbolicRegion* getSymbolicRegion(SymbolRef sym);
- const StringRegion* getStringRegion(const StringLiteral* Str);
+ const StringRegion *getStringRegion(const StringLiteral* Str);
+
+ const ObjCStringRegion *getObjCStringRegion(const ObjCStringLiteral *Str);
/// getVarRegion - Retrieve or create the memory region associated with
/// a specified VarDecl and LocationContext.
@@ -1038,7 +1169,7 @@ public:
const FunctionTextRegion *getFunctionTextRegion(const FunctionDecl *FD);
const BlockTextRegion *getBlockTextRegion(const BlockDecl *BD,
CanQualType locTy,
- AnalysisContext *AC);
+ AnalysisDeclContext *AC);
/// getBlockDataRegion - Get the memory region associated with an instance
/// of a block. Unlike many other MemRegions, the LocationContext*
@@ -1047,11 +1178,6 @@ public:
const BlockDataRegion *getBlockDataRegion(const BlockTextRegion *bc,
const LocationContext *lc = NULL);
- bool isGlobalsRegion(const MemRegion* R) {
- assert(R);
- return R == globals;
- }
-
private:
template <typename RegionTy, typename A1>
RegionTy* getRegion(const A1 a1);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
index add3479..d8aec09 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
@@ -19,82 +19,72 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Compiler.h"
namespace clang {
namespace ento {
+using llvm::StrInStrNoCase;
/// \brief Represents both explicit ObjC message expressions and implicit
/// messages that are sent for handling properties in dot syntax.
class ObjCMessage {
- const Expr *MsgOrPropE;
- const Expr *OriginE;
- bool IsPropSetter;
- SVal SetterArgV;
-
-protected:
- ObjCMessage(const Expr *E, const Expr *origE, bool isSetter, SVal setArgV)
- : MsgOrPropE(E), OriginE(origE),
- IsPropSetter(isSetter), SetterArgV(setArgV) { }
-
+ const ObjCMessageExpr *Msg;
+ const ObjCPropertyRefExpr *PE;
+ const bool IsPropSetter;
public:
- ObjCMessage() : MsgOrPropE(0), OriginE(0) { }
+ ObjCMessage() : Msg(0), PE(0), IsPropSetter(false) {}
- ObjCMessage(const ObjCMessageExpr *E)
- : MsgOrPropE(E), OriginE(E) {
+ ObjCMessage(const ObjCMessageExpr *E, const ObjCPropertyRefExpr *pe = 0,
+ bool isSetter = false)
+ : Msg(E), PE(pe), IsPropSetter(isSetter) {
assert(E && "should not be initialized with null expression");
}
- bool isValid() const { return MsgOrPropE != 0; }
- bool isInvalid() const { return !isValid(); }
+ bool isValid() const { return Msg; }
+
+ bool isPureMessageExpr() const { return !PE; }
- bool isMessageExpr() const {
- return isValid() && isa<ObjCMessageExpr>(MsgOrPropE);
- }
+ bool isPropertyGetter() const { return PE && !IsPropSetter; }
- bool isPropertyGetter() const {
- return isValid() &&
- isa<ObjCPropertyRefExpr>(MsgOrPropE) && !IsPropSetter;
+ bool isPropertySetter() const {
+ return IsPropSetter;
}
- bool isPropertySetter() const {
- return isValid() &&
- isa<ObjCPropertyRefExpr>(MsgOrPropE) && IsPropSetter;
+ const Expr *getMessageExpr() const {
+ return Msg;
}
-
- const Expr *getOriginExpr() const { return OriginE; }
- QualType getType(ASTContext &ctx) const;
+ QualType getType(ASTContext &ctx) const {
+ return Msg->getType();
+ }
QualType getResultType(ASTContext &ctx) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- if (const ObjCMethodDecl *MD = msgE->getMethodDecl())
- return MD->getResultType();
+ if (const ObjCMethodDecl *MD = Msg->getMethodDecl())
+ return MD->getResultType();
return getType(ctx);
}
- ObjCMethodFamily getMethodFamily() const;
+ ObjCMethodFamily getMethodFamily() const {
+ return Msg->getMethodFamily();
+ }
- Selector getSelector() const;
+ Selector getSelector() const {
+ return Msg->getSelector();
+ }
const Expr *getInstanceReceiver() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getInstanceReceiver();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (propE->isObjectReceiver())
- return propE->getBase();
- return 0;
+ return Msg->getInstanceReceiver();
}
- SVal getInstanceReceiverSVal(const ProgramState *State,
+ SVal getInstanceReceiverSVal(ProgramStateRef State,
const LocationContext *LC) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
if (!isInstanceMessage())
return UndefinedVal();
if (const Expr *Ex = getInstanceReceiver())
- return State->getSValAsScalarOrLoc(Ex);
+ return State->getSValAsScalarOrLoc(Ex, LC);
// An instance message with no expression means we are sending to super.
// In this case the object reference is the same as 'self'.
@@ -104,99 +94,66 @@ public:
}
bool isInstanceMessage() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->isInstanceMessage();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- // FIXME: 'super' may be super class.
- return propE->isObjectReceiver() || propE->isSuperReceiver();
+ return Msg->isInstanceMessage();
}
- const ObjCMethodDecl *getMethodDecl() const;
-
- const ObjCInterfaceDecl *getReceiverInterface() const;
-
- SourceLocation getSuperLoc() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getSuperLoc();
- return cast<ObjCPropertyRefExpr>(MsgOrPropE)->getReceiverLocation();
+ const ObjCMethodDecl *getMethodDecl() const {
+ return Msg->getMethodDecl();
}
- const Expr *getMsgOrPropExpr() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- return MsgOrPropE;
+ const ObjCInterfaceDecl *getReceiverInterface() const {
+ return Msg->getReceiverInterface();
}
- SourceRange getSourceRange() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- return MsgOrPropE->getSourceRange();
+ SourceLocation getSuperLoc() const {
+ if (PE)
+ return PE->getReceiverLocation();
+ return Msg->getSuperLoc();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (PE)
+ return PE->getSourceRange();
+ return Msg->getSourceRange();
}
unsigned getNumArgs() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getNumArgs();
- return isPropertySetter() ? 1 : 0;
+ return Msg->getNumArgs();
}
- SVal getArgSVal(unsigned i, const ProgramState *state) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
+ SVal getArgSVal(unsigned i,
+ const LocationContext *LCtx,
+ ProgramStateRef state) const {
assert(i < getNumArgs() && "Invalid index for argument");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return state->getSVal(msgE->getArg(i));
- assert(isPropertySetter());
- return SetterArgV;
+ return state->getSVal(Msg->getArg(i), LCtx);
}
QualType getArgType(unsigned i) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
assert(i < getNumArgs() && "Invalid index for argument");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getArg(i)->getType();
- assert(isPropertySetter());
- return cast<ObjCPropertyRefExpr>(MsgOrPropE)->getType();
+ return Msg->getArg(i)->getType();
}
- const Expr *getArgExpr(unsigned i) const;
+ const Expr *getArgExpr(unsigned i) const {
+ assert(i < getNumArgs() && "Invalid index for argument");
+ return Msg->getArg(i);
+ }
SourceRange getArgSourceRange(unsigned i) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- assert(i < getNumArgs() && "Invalid index for argument");
- if (const Expr *argE = getArgExpr(i))
- return argE->getSourceRange();
- return OriginE->getSourceRange();
+ const Expr *argE = getArgExpr(i);
+ return argE->getSourceRange();
}
SourceRange getReceiverSourceRange() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getReceiverRange();
-
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (propE->isObjectReceiver())
- return propE->getBase()->getSourceRange();
+ if (PE) {
+ if (PE->isObjectReceiver())
+ return PE->getBase()->getSourceRange();
+ }
+ else {
+ return Msg->getReceiverRange();
+ }
// FIXME: This isn't a range.
- return propE->getReceiverLocation();
- }
-};
-
-class ObjCPropertyGetter : public ObjCMessage {
-public:
- ObjCPropertyGetter(const ObjCPropertyRefExpr *propE, const Expr *originE)
- : ObjCMessage(propE, originE, false, SVal()) {
- assert(propE && originE &&
- "should not be initialized with null expressions");
- }
-};
-
-class ObjCPropertySetter : public ObjCMessage {
-public:
- ObjCPropertySetter(const ObjCPropertyRefExpr *propE, const Expr *storeE,
- SVal argV)
- : ObjCMessage(propE, storeE, true, argV) {
- assert(propE && storeE &&"should not be initialized with null expressions");
+ return PE->getReceiverLocation();
}
};
@@ -205,14 +162,18 @@ public:
class CallOrObjCMessage {
llvm::PointerUnion<const CallExpr *, const CXXConstructExpr *> CallE;
ObjCMessage Msg;
- const ProgramState *State;
+ ProgramStateRef State;
+ const LocationContext *LCtx;
public:
- CallOrObjCMessage(const CallExpr *callE, const ProgramState *state)
- : CallE(callE), State(state) {}
- CallOrObjCMessage(const CXXConstructExpr *consE, const ProgramState *state)
- : CallE(consE), State(state) {}
- CallOrObjCMessage(const ObjCMessage &msg, const ProgramState *state)
- : CallE((CallExpr *)0), Msg(msg), State(state) {}
+ CallOrObjCMessage(const CallExpr *callE, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE(callE), State(state), LCtx(lctx) {}
+ CallOrObjCMessage(const CXXConstructExpr *consE, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE(consE), State(state), LCtx(lctx) {}
+ CallOrObjCMessage(const ObjCMessage &msg, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE((CallExpr *)0), Msg(msg), State(state), LCtx(lctx) {}
QualType getResultType(ASTContext &ctx) const;
@@ -233,9 +194,19 @@ public:
return ActualCallE && isa<CXXMemberCallExpr>(ActualCallE);
}
+ /// Check if the callee is declared in the system header.
+ bool isInSystemHeader() const {
+ if (const Decl *FD = getDecl()) {
+ const SourceManager &SM =
+ State->getStateManager().getContext().getSourceManager();
+ return SM.isInSystemHeader(FD->getLocation());
+ }
+ return false;
+ }
+
const Expr *getOriginExpr() const {
if (!CallE)
- return Msg.getOriginExpr();
+ return Msg.getMessageExpr();
if (const CXXConstructExpr *Ctor =
CallE.dyn_cast<const CXXConstructExpr *>())
return Ctor;
@@ -246,6 +217,9 @@ public:
SVal getCXXCallee() const;
SVal getInstanceMessageReceiver(const LocationContext *LC) const;
+ /// Get the declaration of the function or method.
+ const Decl *getDecl() const;
+
unsigned getNumArgs() const {
if (!CallE)
return Msg.getNumArgs();
@@ -258,8 +232,8 @@ public:
SVal getArgSVal(unsigned i) const {
assert(i < getNumArgs());
if (!CallE)
- return Msg.getArgSVal(i, State);
- return State->getSVal(getArg(i));
+ return Msg.getArgSVal(i, LCtx, State);
+ return State->getSVal(getArg(i), LCtx);
}
const Expr *getArg(unsigned i) const {
@@ -283,6 +257,34 @@ public:
assert(isObjCMessage());
return Msg.getReceiverSourceRange();
}
+
+ /// \brief Check if the name corresponds to a CoreFoundation or CoreGraphics
+ /// function that allows objects to escape.
+ ///
+ /// Many methods allow a tracked object to escape. For example:
+ ///
+ /// CFMutableDictionaryRef x = CFDictionaryCreateMutable(..., customDeallocator);
+ /// CFDictionaryAddValue(y, key, x);
+ ///
+ /// We handle this and similar cases with the following heuristic. If the
+ /// function name contains "InsertValue", "SetValue", "AddValue",
+ /// "AppendValue", or "SetAttribute", then we assume that arguments may
+ /// escape.
+ //
+ // TODO: To reduce false negatives here, we should track the container
+ // allocation site and check if a proper deallocator was set there.
+ static bool isCFCGAllowingEscape(StringRef FName) {
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G'))
+ if (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "WithData") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos) {
+ return true;
+ }
+ return false;
+ }
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index edae06e..360d648 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -19,6 +19,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/Environment.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
@@ -33,7 +35,7 @@ class ASTContext;
namespace ento {
-class ProgramStateManager;
+class CallOrObjCMessage;
typedef ConstraintManager* (*ConstraintManagerCreator)(ProgramStateManager&,
SubEngine&);
@@ -54,8 +56,6 @@ template <typename T> struct ProgramStateTrait {
}
};
-class ProgramStateManager;
-
/// \class ProgramState
/// ProgramState - This class encapsulates:
///
@@ -88,12 +88,11 @@ private:
/// makeWithStore - Return a ProgramState with the same values as the current
/// state with the exception of using the specified Store.
- const ProgramState *makeWithStore(const StoreRef &store) const;
+ ProgramStateRef makeWithStore(const StoreRef &store) const;
void setStore(const StoreRef &storeRef);
public:
-
/// This ctor is used when creating the first ProgramState object.
ProgramState(ProgramStateManager *mgr, const Environment& env,
StoreRef st, GenericDataMap gdm);
@@ -107,9 +106,6 @@ public:
/// Return the ProgramStateManager associated with this state.
ProgramStateManager &getStateManager() const { return *stateMgr; }
- /// Return true if this state is referenced by a persistent ExplodedNode.
- bool referencedByExplodedNode() const { return refCount > 0; }
-
/// getEnvironment - Return the environment associated with this state.
/// The environment is the mapping from expressions to values.
const Environment& getEnvironment() const { return Env; }
@@ -168,17 +164,18 @@ public:
// If no new state is feasible, NULL is returned.
//
- const ProgramState *assume(DefinedOrUnknownSVal cond, bool assumption) const;
+ ProgramStateRef assume(DefinedOrUnknownSVal cond, bool assumption) const;
/// This method assumes both "true" and "false" for 'cond', and
/// returns both corresponding states. It's shorthand for doing
/// 'assume' twice.
- std::pair<const ProgramState*, const ProgramState*>
+ std::pair<ProgramStateRef , ProgramStateRef >
assume(DefinedOrUnknownSVal cond) const;
- const ProgramState *assumeInBound(DefinedOrUnknownSVal idx,
+ ProgramStateRef assumeInBound(DefinedOrUnknownSVal idx,
DefinedOrUnknownSVal upperBound,
- bool assumption) const;
+ bool assumption,
+ QualType IndexType = QualType()) const;
/// Utility method for getting regions.
const VarRegion* getRegion(const VarDecl *D, const LocationContext *LC) const;
@@ -189,49 +186,50 @@ public:
/// BindCompoundLiteral - Return the state that has the bindings currently
/// in this state plus the bindings for the CompoundLiteral.
- const ProgramState *bindCompoundLiteral(const CompoundLiteralExpr *CL,
+ ProgramStateRef bindCompoundLiteral(const CompoundLiteralExpr *CL,
const LocationContext *LC,
SVal V) const;
/// Create a new state by binding the value 'V' to the statement 'S' in the
/// state's environment.
- const ProgramState *BindExpr(const Stmt *S, SVal V, bool Invalidate = true) const;
+ ProgramStateRef BindExpr(const Stmt *S, const LocationContext *LCtx,
+ SVal V, bool Invalidate = true) const;
/// Create a new state by binding the value 'V' and location 'locaton' to the
/// statement 'S' in the state's environment.
- const ProgramState *bindExprAndLocation(const Stmt *S, SVal location, SVal V)
- const;
+ ProgramStateRef bindExprAndLocation(const Stmt *S,
+ const LocationContext *LCtx,
+ SVal location, SVal V) const;
- const ProgramState *bindDecl(const VarRegion *VR, SVal V) const;
+ ProgramStateRef bindDecl(const VarRegion *VR, SVal V) const;
- const ProgramState *bindDeclWithNoInit(const VarRegion *VR) const;
+ ProgramStateRef bindDeclWithNoInit(const VarRegion *VR) const;
- const ProgramState *bindLoc(Loc location, SVal V) const;
+ ProgramStateRef bindLoc(Loc location, SVal V) const;
- const ProgramState *bindLoc(SVal location, SVal V) const;
+ ProgramStateRef bindLoc(SVal location, SVal V) const;
- const ProgramState *bindDefault(SVal loc, SVal V) const;
+ ProgramStateRef bindDefault(SVal loc, SVal V) const;
- const ProgramState *unbindLoc(Loc LV) const;
+ ProgramStateRef unbindLoc(Loc LV) const;
/// invalidateRegions - Returns the state with bindings for the given regions
/// cleared from the store. The regions are provided as a continuous array
/// from Begin to End. Optionally invalidates global regions as well.
- const ProgramState *invalidateRegions(ArrayRef<const MemRegion *> Regions,
- const Expr *E, unsigned BlockCount,
- StoreManager::InvalidatedSymbols *IS = 0,
- bool invalidateGlobals = false) const;
+ ProgramStateRef invalidateRegions(ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned BlockCount,
+ const LocationContext *LCtx,
+ StoreManager::InvalidatedSymbols *IS = 0,
+ const CallOrObjCMessage *Call = 0) const;
/// enterStackFrame - Returns the state for entry to the given stack frame,
/// preserving the current state.
- const ProgramState *enterStackFrame(const StackFrameContext *frame) const;
+ ProgramStateRef enterStackFrame(const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) const;
/// Get the lvalue for a variable reference.
Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
- /// Get the lvalue for a StringLiteral.
- Loc getLValue(const StringLiteral *literal) const;
-
Loc getLValue(const CompoundLiteralExpr *literal,
const LocationContext *LC) const;
@@ -247,15 +245,20 @@ public:
const llvm::APSInt *getSymVal(SymbolRef sym) const;
/// Returns the SVal bound to the statement 'S' in the state's environment.
- SVal getSVal(const Stmt *S, bool useOnlyDirectBindings = false) const;
+ SVal getSVal(const Stmt *S, const LocationContext *LCtx,
+ bool useOnlyDirectBindings = false) const;
- SVal getSValAsScalarOrLoc(const Stmt *Ex) const;
+ SVal getSValAsScalarOrLoc(const Stmt *Ex, const LocationContext *LCtx) const;
+ /// \brief Return the value bound to the specified location.
+ /// Returns UnknownVal() if none found.
SVal getSVal(Loc LV, QualType T = QualType()) const;
/// Returns the "raw" SVal bound to LV before any value simplfication.
SVal getRawSVal(Loc LV, QualType T= QualType()) const;
+ /// \brief Return the value bound to the specified location.
+ /// Returns UnknownVal() if none found.
SVal getSVal(const MemRegion* R) const;
SVal getSValAsScalarOrLoc(const MemRegion *R) const;
@@ -288,6 +291,25 @@ public:
scanReachableSymbols(const MemRegion * const *beg,
const MemRegion * const *end) const;
+ /// Create a new state in which the statement is marked as tainted.
+ ProgramStateRef addTaint(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Create a new state in which the symbol is marked as tainted.
+ ProgramStateRef addTaint(SymbolRef S,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Create a new state in which the region symbol is marked as tainted.
+ ProgramStateRef addTaint(const MemRegion *R,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Check if the statement is tainted in the current state.
+ bool isTainted(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(SVal V, TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(SymbolRef Sym, TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(const MemRegion *Reg, TaintTagType Kind=TaintTagGeneric) const;
+
//==---------------------------------------------------------------------==//
// Accessing the Generic Data Map (GDM).
//==---------------------------------------------------------------------==//
@@ -295,7 +317,7 @@ public:
void *const* FindGDM(void *K) const;
template<typename T>
- const ProgramState *add(typename ProgramStateTrait<T>::key_type K) const;
+ ProgramStateRef add(typename ProgramStateTrait<T>::key_type K) const;
template <typename T>
typename ProgramStateTrait<T>::data_type
@@ -315,23 +337,23 @@ public:
template<typename T>
- const ProgramState *remove(typename ProgramStateTrait<T>::key_type K) const;
+ ProgramStateRef remove(typename ProgramStateTrait<T>::key_type K) const;
template<typename T>
- const ProgramState *remove(typename ProgramStateTrait<T>::key_type K,
+ ProgramStateRef remove(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::context_type C) const;
template <typename T>
- const ProgramState *remove() const;
+ ProgramStateRef remove() const;
template<typename T>
- const ProgramState *set(typename ProgramStateTrait<T>::data_type D) const;
+ ProgramStateRef set(typename ProgramStateTrait<T>::data_type D) const;
template<typename T>
- const ProgramState *set(typename ProgramStateTrait<T>::key_type K,
+ ProgramStateRef set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E) const;
template<typename T>
- const ProgramState *set(typename ProgramStateTrait<T>::key_type K,
+ ProgramStateRef set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E,
typename ProgramStateTrait<T>::context_type C) const;
@@ -342,61 +364,25 @@ public:
}
// Pretty-printing.
- void print(raw_ostream &Out, CFG &C, const char *nl = "\n",
+ void print(raw_ostream &Out, const char *nl = "\n",
const char *sep = "") const;
+ void printDOT(raw_ostream &Out) const;
+ void printTaint(raw_ostream &Out, const char *nl = "\n",
+ const char *sep = "") const;
- void printStdErr(CFG &C) const;
-
- void printDOT(raw_ostream &Out, CFG &C) const;
+ void dump() const;
+ void dumpTaint() const;
private:
- /// Increments the number of times this state is referenced by ExplodeNodes.
- void incrementReferenceCount() { ++refCount; }
-
- /// Decrement the number of times this state is referenced by ExplodeNodes.
- void decrementReferenceCount() {
- assert(refCount > 0);
- --refCount;
- }
+ friend void ProgramStateRetain(const ProgramState *state);
+ friend void ProgramStateRelease(const ProgramState *state);
- const ProgramState *
+ ProgramStateRef
invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned BlockCount,
+ const LocationContext *LCtx,
StoreManager::InvalidatedSymbols &IS,
- bool invalidateGlobals) const;
-};
-
-class ProgramStateSet {
- typedef llvm::SmallPtrSet<const ProgramState*,5> ImplTy;
- ImplTy Impl;
-public:
- ProgramStateSet() {}
-
- inline void Add(const ProgramState *St) {
- Impl.insert(St);
- }
-
- typedef ImplTy::const_iterator iterator;
-
- inline unsigned size() const { return Impl.size(); }
- inline bool empty() const { return Impl.empty(); }
-
- inline iterator begin() const { return Impl.begin(); }
- inline iterator end() const { return Impl.end(); }
-
- class AutoPopulate {
- ProgramStateSet &S;
- unsigned StartSize;
- const ProgramState *St;
- public:
- AutoPopulate(ProgramStateSet &s, const ProgramState *st)
- : S(s), StartSize(S.size()), St(st) {}
-
- ~AutoPopulate() {
- if (StartSize == S.size())
- S.Add(St);
- }
- };
+ const CallOrObjCMessage *Call) const;
};
//===----------------------------------------------------------------------===//
@@ -405,13 +391,14 @@ public:
class ProgramStateManager {
friend class ProgramState;
+ friend void ProgramStateRelease(const ProgramState *state);
private:
/// Eng - The SubEngine that owns this state manager.
SubEngine *Eng; /* Can be null. */
EnvironmentManager EnvMgr;
- llvm::OwningPtr<StoreManager> StoreMgr;
- llvm::OwningPtr<ConstraintManager> ConstraintMgr;
+ OwningPtr<StoreManager> StoreMgr;
+ OwningPtr<ConstraintManager> ConstraintMgr;
ProgramState::GenericDataMap::Factory GDMFactory;
@@ -423,14 +410,10 @@ private:
llvm::FoldingSet<ProgramState> StateSet;
/// Object that manages the data for all created SVals.
- llvm::OwningPtr<SValBuilder> svalBuilder;
+ OwningPtr<SValBuilder> svalBuilder;
/// A BumpPtrAllocator to allocate states.
llvm::BumpPtrAllocator &Alloc;
-
- /// A vector of recently allocated ProgramStates that can potentially be
- /// reused.
- std::vector<ProgramState *> recentlyAllocatedStates;
/// A vector of ProgramStates that we can reuse.
std::vector<ProgramState *> freeStates;
@@ -465,7 +448,7 @@ public:
~ProgramStateManager();
- const ProgramState *getInitialState(const LocationContext *InitLoc);
+ ProgramStateRef getInitialState(const LocationContext *InitLoc);
ASTContext &getContext() { return svalBuilder->getContext(); }
const ASTContext &getContext() const { return svalBuilder->getContext(); }
@@ -501,13 +484,13 @@ public:
ConstraintManager& getConstraintManager() { return *ConstraintMgr; }
SubEngine* getOwningEngine() { return Eng; }
- const ProgramState *removeDeadBindings(const ProgramState *St,
+ ProgramStateRef removeDeadBindings(ProgramStateRef St,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
/// Marshal a new state for the callee in another translation unit.
/// 'state' is owned by the caller's engine.
- const ProgramState *MarshalState(const ProgramState *state, const StackFrameContext *L);
+ ProgramStateRef MarshalState(ProgramStateRef state, const StackFrameContext *L);
public:
@@ -516,31 +499,27 @@ public:
}
// Methods that manipulate the GDM.
- const ProgramState *addGDM(const ProgramState *St, void *Key, void *Data);
- const ProgramState *removeGDM(const ProgramState *state, void *Key);
+ ProgramStateRef addGDM(ProgramStateRef St, void *Key, void *Data);
+ ProgramStateRef removeGDM(ProgramStateRef state, void *Key);
// Methods that query & manipulate the Store.
- void iterBindings(const ProgramState *state, StoreManager::BindingsHandler& F) {
+ void iterBindings(ProgramStateRef state, StoreManager::BindingsHandler& F) {
StoreMgr->iterBindings(state->getStore(), F);
}
- const ProgramState *getPersistentState(ProgramState &Impl);
- const ProgramState *getPersistentStateWithGDM(const ProgramState *FromState,
- const ProgramState *GDMState);
+ ProgramStateRef getPersistentState(ProgramState &Impl);
+ ProgramStateRef getPersistentStateWithGDM(ProgramStateRef FromState,
+ ProgramStateRef GDMState);
- bool haveEqualEnvironments(const ProgramState * S1, const ProgramState * S2) {
+ bool haveEqualEnvironments(ProgramStateRef S1, ProgramStateRef S2) {
return S1->Env == S2->Env;
}
- bool haveEqualStores(const ProgramState * S1, const ProgramState * S2) {
+ bool haveEqualStores(ProgramStateRef S1, ProgramStateRef S2) {
return S1->store == S2->store;
}
- /// Periodically called by ExprEngine to recycle ProgramStates that were
- /// created but never used for creating an ExplodedNode.
- void recycleUnusedStates();
-
//==---------------------------------------------------------------------==//
// Generic Data Map methods.
//==---------------------------------------------------------------------==//
@@ -561,13 +540,13 @@ public:
// Trait based GDM dispatch.
template <typename T>
- const ProgramState *set(const ProgramState *st, typename ProgramStateTrait<T>::data_type D) {
+ ProgramStateRef set(ProgramStateRef st, typename ProgramStateTrait<T>::data_type D) {
return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
ProgramStateTrait<T>::MakeVoidPtr(D));
}
template<typename T>
- const ProgramState *set(const ProgramState *st,
+ ProgramStateRef set(ProgramStateRef st,
typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type V,
typename ProgramStateTrait<T>::context_type C) {
@@ -577,7 +556,7 @@ public:
}
template <typename T>
- const ProgramState *add(const ProgramState *st,
+ ProgramStateRef add(ProgramStateRef st,
typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::context_type C) {
return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
@@ -585,7 +564,7 @@ public:
}
template <typename T>
- const ProgramState *remove(const ProgramState *st,
+ ProgramStateRef remove(ProgramStateRef st,
typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::context_type C) {
@@ -594,7 +573,7 @@ public:
}
template <typename T>
- const ProgramState *remove(const ProgramState *st) {
+ ProgramStateRef remove(ProgramStateRef st) {
return removeGDM(st, ProgramStateTrait<T>::GDMIndex());
}
@@ -611,11 +590,11 @@ public:
return ProgramStateTrait<T>::MakeContext(p);
}
- const llvm::APSInt* getSymVal(const ProgramState *St, SymbolRef sym) {
+ const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) {
return ConstraintMgr->getSymVal(St, sym);
}
- void EndPath(const ProgramState *St) {
+ void EndPath(ProgramStateRef St) {
ConstraintMgr->EndPath(St);
}
};
@@ -626,11 +605,12 @@ public:
//===----------------------------------------------------------------------===//
inline const VarRegion* ProgramState::getRegion(const VarDecl *D,
- const LocationContext *LC) const {
+ const LocationContext *LC) const
+{
return getStateManager().getRegionManager().getVarRegion(D, LC);
}
-inline const ProgramState *ProgramState::assume(DefinedOrUnknownSVal Cond,
+inline ProgramStateRef ProgramState::assume(DefinedOrUnknownSVal Cond,
bool Assumption) const {
if (Cond.isUnknown())
return this;
@@ -639,7 +619,7 @@ inline const ProgramState *ProgramState::assume(DefinedOrUnknownSVal Cond,
Assumption);
}
-inline std::pair<const ProgramState*, const ProgramState*>
+inline std::pair<ProgramStateRef , ProgramStateRef >
ProgramState::assume(DefinedOrUnknownSVal Cond) const {
if (Cond.isUnknown())
return std::make_pair(this, this);
@@ -648,7 +628,7 @@ ProgramState::assume(DefinedOrUnknownSVal Cond) const {
cast<DefinedSVal>(Cond));
}
-inline const ProgramState *ProgramState::bindLoc(SVal LV, SVal V) const {
+inline ProgramStateRef ProgramState::bindLoc(SVal LV, SVal V) const {
return !isa<Loc>(LV) ? this : bindLoc(cast<Loc>(LV), V);
}
@@ -657,10 +637,6 @@ inline Loc ProgramState::getLValue(const VarDecl *VD,
return getStateManager().StoreMgr->getLValueVar(VD, LC);
}
-inline Loc ProgramState::getLValue(const StringLiteral *literal) const {
- return getStateManager().StoreMgr->getLValueString(literal);
-}
-
inline Loc ProgramState::getLValue(const CompoundLiteralExpr *literal,
const LocationContext *LC) const {
return getStateManager().StoreMgr->getLValueCompoundLiteral(literal, LC);
@@ -684,27 +660,32 @@ inline const llvm::APSInt *ProgramState::getSymVal(SymbolRef sym) const {
return getStateManager().getSymVal(this, sym);
}
-inline SVal ProgramState::getSVal(const Stmt *Ex, bool useOnlyDirectBindings) const{
- return Env.getSVal(Ex, *getStateManager().svalBuilder,
+inline SVal ProgramState::getSVal(const Stmt *Ex, const LocationContext *LCtx,
+ bool useOnlyDirectBindings) const{
+ return Env.getSVal(EnvironmentEntry(Ex, LCtx),
+ *getStateManager().svalBuilder,
useOnlyDirectBindings);
}
-inline SVal ProgramState::getSValAsScalarOrLoc(const Stmt *S) const {
+inline SVal
+ProgramState::getSValAsScalarOrLoc(const Stmt *S,
+ const LocationContext *LCtx) const {
if (const Expr *Ex = dyn_cast<Expr>(S)) {
QualType T = Ex->getType();
if (Ex->isLValue() || Loc::isLocType(T) || T->isIntegerType())
- return getSVal(S);
+ return getSVal(S, LCtx);
}
return UnknownVal();
}
inline SVal ProgramState::getRawSVal(Loc LV, QualType T) const {
- return getStateManager().StoreMgr->Retrieve(getStore(), LV, T);
+ return getStateManager().StoreMgr->getBinding(getStore(), LV, T);
}
inline SVal ProgramState::getSVal(const MemRegion* R) const {
- return getStateManager().StoreMgr->Retrieve(getStore(), loc::MemRegionVal(R));
+ return getStateManager().StoreMgr->getBinding(getStore(),
+ loc::MemRegionVal(R));
}
inline BasicValueFactory &ProgramState::getBasicVals() const {
@@ -716,7 +697,7 @@ inline SymbolManager &ProgramState::getSymbolManager() const {
}
template<typename T>
-const ProgramState *ProgramState::add(typename ProgramStateTrait<T>::key_type K) const {
+ProgramStateRef ProgramState::add(typename ProgramStateTrait<T>::key_type K) const {
return getStateManager().add<T>(this, K, get_context<T>());
}
@@ -726,34 +707,34 @@ typename ProgramStateTrait<T>::context_type ProgramState::get_context() const {
}
template<typename T>
-const ProgramState *ProgramState::remove(typename ProgramStateTrait<T>::key_type K) const {
+ProgramStateRef ProgramState::remove(typename ProgramStateTrait<T>::key_type K) const {
return getStateManager().remove<T>(this, K, get_context<T>());
}
template<typename T>
-const ProgramState *ProgramState::remove(typename ProgramStateTrait<T>::key_type K,
+ProgramStateRef ProgramState::remove(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::context_type C) const {
return getStateManager().remove<T>(this, K, C);
}
template <typename T>
-const ProgramState *ProgramState::remove() const {
+ProgramStateRef ProgramState::remove() const {
return getStateManager().remove<T>(this);
}
template<typename T>
-const ProgramState *ProgramState::set(typename ProgramStateTrait<T>::data_type D) const {
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::data_type D) const {
return getStateManager().set<T>(this, D);
}
template<typename T>
-const ProgramState *ProgramState::set(typename ProgramStateTrait<T>::key_type K,
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E) const {
return getStateManager().set<T>(this, K, E, get_context<T>());
}
template<typename T>
-const ProgramState *ProgramState::set(typename ProgramStateTrait<T>::key_type K,
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E,
typename ProgramStateTrait<T>::context_type C) const {
return getStateManager().set<T>(this, K, E, C);
@@ -785,15 +766,16 @@ CB ProgramState::scanReachableSymbols(const MemRegion * const *beg,
/// A Utility class that allows to visit the reachable symbols using a custom
/// SymbolVisitor.
class ScanReachableSymbols : public SubRegionMap::Visitor {
+ virtual void anchor();
typedef llvm::DenseMap<const void*, unsigned> VisitedItems;
VisitedItems visited;
- const ProgramState *state;
+ ProgramStateRef state;
SymbolVisitor &visitor;
- llvm::OwningPtr<SubRegionMap> SRM;
+ OwningPtr<SubRegionMap> SRM;
public:
- ScanReachableSymbols(const ProgramState *st, SymbolVisitor& v)
+ ScanReachableSymbols(ProgramStateRef st, SymbolVisitor& v)
: state(st), visitor(v) {}
bool scan(nonloc::CompoundVal val);
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
index b80d494..1c7bedb 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
@@ -9,8 +9,8 @@
//
// This file defines partial implementations of template specializations of
// the class ProgramStateTrait<>. ProgramStateTrait<> is used by ProgramState
-// to implement set/get methods for mapulating a ProgramState's
-// generic data map.
+// to implement set/get methods for manipulating a ProgramState's
+// generic data map.
//
//===----------------------------------------------------------------------===//
@@ -176,7 +176,20 @@ namespace ento {
return (void*) (uintptr_t) d;
}
};
-
+
+ // Partial specialization for void*.
+ template <> struct ProgramStatePartialTrait<void*> {
+ typedef void *data_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? *p
+ : data_type();
+ }
+ static inline void *MakeVoidPtr(data_type d) {
+ return d;
+ }
+ };
+
} // end GR namespace
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h
new file mode 100644
index 0000000..371f3c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h
@@ -0,0 +1,43 @@
+//== ProgramState_Fwd.h - Incomplete declarations of ProgramState -*- C++ -*--=/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PROGRAMSTATE_FWD_H
+#define LLVM_CLANG_PROGRAMSTATE_FWD_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+
+namespace clang {
+namespace ento {
+ class ProgramState;
+ class ProgramStateManager;
+ void ProgramStateRetain(const ProgramState *state);
+ void ProgramStateRelease(const ProgramState *state);
+}
+}
+
+namespace llvm {
+ template <> struct IntrusiveRefCntPtrInfo<const clang::ento::ProgramState> {
+ static void retain(const clang::ento::ProgramState *state) {
+ clang::ento::ProgramStateRetain(state);
+ }
+ static void release(const clang::ento::ProgramState *state) {
+ clang::ento::ProgramStateRelease(state);
+ }
+ };
+}
+
+namespace clang {
+namespace ento {
+ typedef IntrusiveRefCntPtr<const ProgramState> ProgramStateRef;
+}
+}
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 17233e1..4ad36f9 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -17,17 +17,19 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
namespace clang {
-namespace ento {
+class CXXBoolLiteralExpr;
-class ProgramState;
+namespace ento {
class SValBuilder {
+ virtual void anchor();
protected:
ASTContext &Context;
@@ -48,11 +50,13 @@ protected:
/// The width of the scalar type used for array indices.
const unsigned ArrayIndexWidth;
+ virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy) = 0;
+ virtual SVal evalCastFromLoc(Loc val, QualType castTy) = 0;
+
public:
// FIXME: Make these protected again once RegionStoreManager correctly
// handles loads from different bound value types.
- virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy) = 0;
- virtual SVal evalCastFromLoc(Loc val, QualType castTy) = 0;
+ virtual SVal dispatchCast(SVal val, QualType castTy) = 0;
public:
SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
@@ -66,29 +70,54 @@ public:
virtual ~SValBuilder() {}
+ bool haveSameType(const SymExpr *Sym1, const SymExpr *Sym2) {
+ return haveSameType(Sym1->getType(Context), Sym2->getType(Context));
+ }
+
+ bool haveSameType(QualType Ty1, QualType Ty2) {
+ // FIXME: Remove the second disjunct when we support symbolic
+ // truncation/extension.
+ return (Context.getCanonicalType(Ty1) == Context.getCanonicalType(Ty2) ||
+ (Ty2->isIntegerType() && Ty2->isIntegerType()));
+ }
+
SVal evalCast(SVal val, QualType castTy, QualType originalType);
virtual SVal evalMinus(NonLoc val) = 0;
virtual SVal evalComplement(NonLoc val) = 0;
- virtual SVal evalBinOpNN(const ProgramState *state, BinaryOperator::Opcode op,
+ /// Create a new value which represents a binary expression with two non
+ /// location operands.
+ virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy) = 0;
- virtual SVal evalBinOpLL(const ProgramState *state, BinaryOperator::Opcode op,
+ /// Create a new value which represents a binary expression with two memory
+ /// location operands.
+ virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, Loc rhs, QualType resultTy) = 0;
- virtual SVal evalBinOpLN(const ProgramState *state, BinaryOperator::Opcode op,
+ /// Create a new value which represents a binary expression with a memory
+ /// location and non location operands. For example, this would be used to
+ /// evaluate a pointer arithmetic operation.
+ virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) = 0;
- /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
- /// (integer) value, that value is returned. Otherwise, returns NULL.
- virtual const llvm::APSInt *getKnownValue(const ProgramState *state, SVal val) = 0;
+ /// Evaluates a given SVal. If the SVal has only one possible (integer) value,
+ /// that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal val) = 0;
- SVal evalBinOp(const ProgramState *state, BinaryOperator::Opcode op,
+ /// Handles generation of the value in case the builder is not smart enough to
+ /// handle the given binary expression. Depending on the state, decides to
+ /// either keep the expression or forget the history and generate an
+ /// UnknownVal.
+ SVal makeGenericVal(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
+
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type);
- DefinedOrUnknownSVal evalEQ(const ProgramState *state, DefinedOrUnknownSVal lhs,
+ DefinedOrUnknownSVal evalEQ(ProgramStateRef state, DefinedOrUnknownSVal lhs,
DefinedOrUnknownSVal rhs);
ASTContext &getContext() { return Context; }
@@ -115,28 +144,47 @@ public:
// Forwarding methods to SymbolManager.
- const SymbolConjured* getConjuredSymbol(const Stmt *stmt, QualType type,
+ const SymbolConjured* getConjuredSymbol(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
unsigned visitCount,
const void *symbolTag = 0) {
- return SymMgr.getConjuredSymbol(stmt, type, visitCount, symbolTag);
+ return SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount, symbolTag);
}
- const SymbolConjured* getConjuredSymbol(const Expr *expr, unsigned visitCount,
+ const SymbolConjured* getConjuredSymbol(const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned visitCount,
const void *symbolTag = 0) {
- return SymMgr.getConjuredSymbol(expr, visitCount, symbolTag);
+ return SymMgr.getConjuredSymbol(expr, LCtx, visitCount, symbolTag);
}
- /// makeZeroVal - Construct an SVal representing '0' for the specified type.
+ /// Construct an SVal representing '0' for the specified type.
DefinedOrUnknownSVal makeZeroVal(QualType type);
- /// getRegionValueSymbolVal - make a unique symbol for value of region.
+ /// Make a unique symbol for value of region.
DefinedOrUnknownSVal getRegionValueSymbolVal(const TypedValueRegion *region);
+ /// \brief Create a new symbol with a unique 'name'.
+ ///
+ /// We resort to conjured symbols when we cannot construct a derived symbol.
+ /// The advantage of symbols derived/built from other symbols is that we
+ /// preserve the relation between related(or even equivalent) expressions, so
+ /// conjured symbols should be used sparingly.
DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr, unsigned count);
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count);
DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr, QualType type,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
unsigned count);
+
+ DefinedOrUnknownSVal getConjuredSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount);
DefinedOrUnknownSVal getDerivedRegionValueSymbolVal(
SymbolRef parentSymbol, const TypedValueRegion *region);
@@ -175,11 +223,13 @@ public:
BasicVals.getValue(integer->getValue(),
integer->getType()->isUnsignedIntegerOrEnumerationType()));
}
-
- nonloc::ConcreteInt makeBoolVal(const CXXBoolLiteralExpr *boolean) {
- return makeTruthVal(boolean->getValue());
+
+ nonloc::ConcreteInt makeBoolVal(const ObjCBoolLiteralExpr *boolean) {
+ return makeTruthVal(boolean->getValue(), boolean->getType());
}
+ nonloc::ConcreteInt makeBoolVal(const CXXBoolLiteralExpr *boolean);
+
nonloc::ConcreteInt makeIntVal(const llvm::APSInt& integer) {
return nonloc::ConcreteInt(BasicVals.getValue(integer));
}
@@ -220,9 +270,15 @@ public:
NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const llvm::APSInt& rhs, QualType type);
+ NonLoc makeNonLoc(const llvm::APSInt& rhs, BinaryOperator::Opcode op,
+ const SymExpr *lhs, QualType type);
+
NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType type);
+ /// \brief Create a NonLoc value for cast.
+ NonLoc makeNonLoc(const SymExpr *operand, QualType fromTy, QualType toTy);
+
nonloc::ConcreteInt makeTruthVal(bool b, QualType type) {
return nonloc::ConcreteInt(BasicVals.getTruthValue(b, type));
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index 5827b00..ed01db2 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -17,6 +17,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "llvm/ADT/ImmutableList.h"
//==------------------------------------------------------------------------==//
@@ -121,50 +122,39 @@ public:
/// Otherwise return 0.
const FunctionDecl *getAsFunctionDecl() const;
- /// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
- /// wraps a symbol, return that SymbolRef. Otherwise return NULL.
+ /// If this SVal is a location (subclasses Loc) and
+ /// wraps a symbol, return that SymbolRef. Otherwise return 0.
SymbolRef getAsLocSymbol() const;
/// Get the symbol in the SVal or its base region.
SymbolRef getLocSymbolInBase() const;
- /// getAsSymbol - If this Sval wraps a symbol return that SymbolRef.
- /// Otherwise return a SymbolRef where 'isValid()' returns false.
+ /// If this SVal wraps a symbol return that SymbolRef.
+ /// Otherwise, return 0.
SymbolRef getAsSymbol() const;
/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
/// return that expression. Otherwise return NULL.
const SymExpr *getAsSymbolicExpression() const;
+ const SymExpr* getAsSymExpr() const;
+
const MemRegion *getAsRegion() const;
void dumpToStream(raw_ostream &OS) const;
void dump() const;
- // Iterators.
- class symbol_iterator {
- SmallVector<const SymExpr*, 5> itr;
- void expand();
- public:
- symbol_iterator() {}
- symbol_iterator(const SymExpr *SE);
-
- symbol_iterator &operator++();
- SymbolRef operator*();
-
- bool operator==(const symbol_iterator &X) const;
- bool operator!=(const symbol_iterator &X) const;
- };
-
- symbol_iterator symbol_begin() const {
+ SymExpr::symbol_iterator symbol_begin() const {
const SymExpr *SE = getAsSymbolicExpression();
if (SE)
- return symbol_iterator(SE);
+ return SE->symbol_begin();
else
- return symbol_iterator();
+ return SymExpr::symbol_iterator();
}
- symbol_iterator symbol_end() const { return symbol_iterator(); }
+ SymExpr::symbol_iterator symbol_end() const {
+ return SymExpr::symbol_end();
+ }
// Implement isa<T> support.
static inline bool classof(const SVal*) { return true; }
@@ -274,43 +264,30 @@ namespace nonloc {
enum Kind { ConcreteIntKind, SymbolValKind, SymExprValKind,
LocAsIntegerKind, CompoundValKind, LazyCompoundValKind };
+/// \brief Represents symbolic expression.
class SymbolVal : public NonLoc {
public:
SymbolVal(SymbolRef sym) : NonLoc(SymbolValKind, sym) {}
SymbolRef getSymbol() const {
- return (const SymbolData*) Data;
- }
-
- static inline bool classof(const SVal* V) {
- return V->getBaseKind() == NonLocKind &&
- V->getSubKind() == SymbolValKind;
- }
-
- static inline bool classof(const NonLoc* V) {
- return V->getSubKind() == SymbolValKind;
+ return (const SymExpr*) Data;
}
-};
-
-class SymExprVal : public NonLoc {
-public:
- explicit SymExprVal(const SymExpr *SE)
- : NonLoc(SymExprValKind, reinterpret_cast<const void*>(SE)) {}
- const SymExpr *getSymbolicExpression() const {
- return reinterpret_cast<const SymExpr*>(Data);
+ bool isExpression() {
+ return !isa<SymbolData>(getSymbol());
}
static inline bool classof(const SVal* V) {
return V->getBaseKind() == NonLocKind &&
- V->getSubKind() == SymExprValKind;
+ V->getSubKind() == SymbolValKind;
}
static inline bool classof(const NonLoc* V) {
- return V->getSubKind() == SymExprValKind;
+ return V->getSubKind() == SymbolValKind;
}
};
+/// \brief Value representing integer constant.
class ConcreteInt : public NonLoc {
public:
explicit ConcreteInt(const llvm::APSInt& V) : NonLoc(ConcreteIntKind, &V) {}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index a688d7f..5315f4b 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -29,6 +29,7 @@ class StackFrameContext;
namespace ento {
+class CallOrObjCMessage;
class ProgramState;
class ProgramStateManager;
class SubRegionMap;
@@ -54,7 +55,7 @@ public:
/// expected type of the returned value. This is used if the value is
/// lazily computed.
/// \return The value bound to the location \c loc.
- virtual SVal Retrieve(Store store, Loc loc, QualType T = QualType()) = 0;
+ virtual SVal getBinding(Store store, Loc loc, QualType T = QualType()) = 0;
/// Return a state with the specified value bound to the given location.
/// \param[in] state The analysis state.
@@ -93,18 +94,12 @@ public:
return svalBuilder.makeLoc(MRMgr.getVarRegion(VD, LC));
}
- virtual Loc getLValueString(const StringLiteral* S) {
- return svalBuilder.makeLoc(MRMgr.getStringRegion(S));
- }
-
Loc getLValueCompoundLiteral(const CompoundLiteralExpr *CL,
const LocationContext *LC) {
return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC));
}
- virtual SVal getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
- return getLValueFieldOrIvar(decl, base);
- }
+ virtual SVal getLValueIvar(const ObjCIvarDecl *decl, SVal base);
virtual SVal getLValueField(const FieldDecl *D, SVal Base) {
return getLValueFieldOrIvar(D, Base);
@@ -114,7 +109,7 @@ public:
// FIXME: This should soon be eliminated altogether; clients should deal with
// region extents directly.
- virtual DefinedOrUnknownSVal getSizeInElements(const ProgramState *state,
+ virtual DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
const MemRegion *region,
QualType EleTy) {
return UnknownVal();
@@ -125,17 +120,26 @@ public:
virtual SVal ArrayToPointer(Loc Array) = 0;
/// Evaluates DerivedToBase casts.
- virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType) {
- return UnknownVal();
- }
+ virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType) = 0;
+
+ /// \brief Evaluates C++ dynamic_cast cast.
+ /// The callback may result in the following 3 scenarios:
+ /// - Successful cast (ex: derived is subclass of base).
+ /// - Failed cast (ex: derived is definitely not a subclass of base).
+ /// - We don't know (base is a symbolic region and we don't have
+ /// enough info to determine if the cast will succeed at run time).
+ /// The function returns an SVal representing the derived class; it's
+ /// valid only if Failed flag is set to false.
+ virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,
+ bool &Failed) = 0;
class CastResult {
- const ProgramState *state;
+ ProgramStateRef state;
const MemRegion *region;
public:
- const ProgramState *getState() const { return state; }
+ ProgramStateRef getState() const { return state; }
const MemRegion* getRegion() const { return region; }
- CastResult(const ProgramState *s, const MemRegion* r = 0) : state(s), region(r){}
+ CastResult(ProgramStateRef s, const MemRegion* r = 0) : state(s), region(r){}
};
const ElementRegion *GetElementZeroRegion(const MemRegion *R, QualType T);
@@ -180,8 +184,8 @@ public:
/// symbols to mark the values of invalidated regions.
/// \param[in,out] IS A set to fill with any symbols that are no longer
/// accessible. Pass \c NULL if this information will not be used.
- /// \param[in] invalidateGlobals If \c true, any non-static global regions
- /// are invalidated as well.
+ /// \param[in] Call The call expression which will be used to determine which
+ /// globals should get invalidated.
/// \param[in,out] Regions A vector to fill with any regions being
/// invalidated. This should include any regions explicitly invalidated
/// even if they do not currently have bindings. Pass \c NULL if this
@@ -189,14 +193,16 @@ public:
virtual StoreRef invalidateRegions(Store store,
ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
InvalidatedSymbols &IS,
- bool invalidateGlobals,
+ const CallOrObjCMessage *Call,
InvalidatedRegions *Invalidated) = 0;
/// enterStackFrame - Let the StoreManager to do something when execution
/// engine is about to execute into a callee.
- virtual StoreRef enterStackFrame(const ProgramState *state,
- const StackFrameContext *frame);
+ virtual StoreRef enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx);
virtual void print(Store store, raw_ostream &Out,
const char* nl, const char *sep) = 0;
@@ -208,6 +214,21 @@ public:
const MemRegion *region, SVal val) = 0;
};
+ class FindUniqueBinding :
+ public BindingsHandler {
+ SymbolRef Sym;
+ const MemRegion* Binding;
+ bool First;
+
+ public:
+ FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal val);
+ operator bool() { return First && Binding; }
+ const MemRegion *getRegion() { return Binding; }
+ };
+
/// iterBindings - Iterate over the bindings in the Store.
virtual void iterBindings(Store store, BindingsHandler& f) = 0;
@@ -258,10 +279,12 @@ inline StoreRef &StoreRef::operator=(StoreRef const &newStore) {
/// SubRegionMap - An abstract interface that represents a queryable map
/// between MemRegion objects and their subregions.
class SubRegionMap {
+ virtual void anchor();
public:
virtual ~SubRegionMap() {}
class Visitor {
+ virtual void anchor();
public:
virtual ~Visitor() {}
virtual bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
index ae212bc..baf57d4 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
@@ -26,27 +26,26 @@ class Stmt;
namespace ento {
-template <typename PP> class GenericNodeBuilder;
+struct NodeBuilderContext;
class AnalysisManager;
class ExplodedNodeSet;
class ExplodedNode;
class ProgramState;
class ProgramStateManager;
class BlockCounter;
-class StmtNodeBuilder;
class BranchNodeBuilder;
class IndirectGotoNodeBuilder;
class SwitchNodeBuilder;
class EndOfFunctionNodeBuilder;
-class CallEnterNodeBuilder;
-class CallExitNodeBuilder;
+class NodeBuilderWithSinks;
class MemRegion;
class SubEngine {
+ virtual void anchor();
public:
virtual ~SubEngine() {}
- virtual const ProgramState *getInitialState(const LocationContext *InitLoc) = 0;
+ virtual ProgramStateRef getInitialState(const LocationContext *InitLoc) = 0;
virtual AnalysisManager &getAnalysisManager() = 0;
@@ -54,18 +53,23 @@ public:
/// Called by CoreEngine. Used to generate new successor
/// nodes by processing the 'effects' of a block-level statement.
- virtual void processCFGElement(const CFGElement E, StmtNodeBuilder& builder)=0;
+ virtual void processCFGElement(const CFGElement E, ExplodedNode* Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx)=0;
/// Called by CoreEngine when it starts processing a CFGBlock. The
/// SubEngine is expected to populate dstNodes with new nodes representing
/// updated analysis state, or generate no nodes at all if it doesn't.
- virtual void processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
- GenericNodeBuilder<BlockEntrance> &nodeBuilder) = 0;
+ virtual void processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder) = 0;
/// Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
virtual void processBranch(const Stmt *Condition, const Stmt *Term,
- BranchNodeBuilder& builder) = 0;
+ NodeBuilderContext& BuilderCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) = 0;
/// Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
@@ -77,40 +81,41 @@ public:
/// Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
- virtual void processEndOfFunction(EndOfFunctionNodeBuilder& builder) = 0;
+ virtual void processEndOfFunction(NodeBuilderContext& BC) = 0;
// Generate the entry node of the callee.
- virtual void processCallEnter(CallEnterNodeBuilder &builder) = 0;
+ virtual void processCallEnter(CallEnter CE, ExplodedNode *Pred) = 0;
// Generate the first post callsite node.
- virtual void processCallExit(CallExitNodeBuilder &builder) = 0;
+ virtual void processCallExit(ExplodedNode *Pred) = 0;
/// Called by ConstraintManager. Used to call checker-specific
/// logic for handling assumptions on symbolic values.
- virtual const ProgramState *processAssume(const ProgramState *state,
+ virtual ProgramStateRef processAssume(ProgramStateRef state,
SVal cond, bool assumption) = 0;
/// wantsRegionChangeUpdate - Called by ProgramStateManager to determine if a
/// region change should trigger a processRegionChanges update.
- virtual bool wantsRegionChangeUpdate(const ProgramState *state) = 0;
+ virtual bool wantsRegionChangeUpdate(ProgramStateRef state) = 0;
- /// processRegionChanges - Called by ProgramStateManager whenever a change is made
- /// to the store. Used to update checkers that track region values.
- virtual const ProgramState *
- processRegionChanges(const ProgramState *state,
+ /// processRegionChanges - Called by ProgramStateManager whenever a change is
+ /// made to the store. Used to update checkers that track region values.
+ virtual ProgramStateRef
+ processRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) = 0;
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) = 0;
- inline const ProgramState *
- processRegionChange(const ProgramState *state,
+ inline ProgramStateRef
+ processRegionChange(ProgramStateRef state,
const MemRegion* MR) {
- return processRegionChanges(state, 0, MR, MR);
+ return processRegionChanges(state, 0, MR, MR, 0);
}
/// printState - Called by ProgramStateManager to print checker-specific data.
- virtual void printState(raw_ostream &Out, const ProgramState *State,
+ virtual void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) = 0;
/// Called by CoreEngine when the analysis worklist is either empty or the
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index 0d6e18e..c7de7ef 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -40,13 +40,16 @@ namespace ento {
class TypedValueRegion;
class VarRegion;
+/// \brief Symbolic value. These values used to capture symbolic execution of
+/// the program.
class SymExpr : public llvm::FoldingSetNode {
+ virtual void anchor();
public:
enum Kind { RegionValueKind, ConjuredKind, DerivedKind, ExtentKind,
MetadataKind,
BEGIN_SYMBOLS = RegionValueKind,
END_SYMBOLS = MetadataKind,
- SymIntKind, SymSymKind };
+ SymIntKind, IntSymKind, SymSymKind, CastSymbolKind };
private:
Kind K;
@@ -58,21 +61,49 @@ public:
Kind getKind() const { return K; }
- void dump() const;
+ virtual void dump() const;
- virtual void dumpToStream(raw_ostream &os) const = 0;
+ virtual void dumpToStream(raw_ostream &os) const {}
virtual QualType getType(ASTContext&) const = 0;
virtual void Profile(llvm::FoldingSetNodeID& profile) = 0;
// Implement isa<T> support.
static inline bool classof(const SymExpr*) { return true; }
+
+ /// \brief Iterator over symbols that the current symbol depends on.
+ ///
+ /// For SymbolData, it's the symbol itself; for expressions, it's the
+ /// expression symbol and all the operands in it. Note, SymbolDerived is
+ /// treated as SymbolData - the iterator will NOT visit the parent region.
+ class symbol_iterator {
+ SmallVector<const SymExpr*, 5> itr;
+ void expand();
+ public:
+ symbol_iterator() {}
+ symbol_iterator(const SymExpr *SE);
+
+ symbol_iterator &operator++();
+ const SymExpr* operator*();
+
+ bool operator==(const symbol_iterator &X) const;
+ bool operator!=(const symbol_iterator &X) const;
+ };
+
+ symbol_iterator symbol_begin() const {
+ return symbol_iterator(this);
+ }
+ static symbol_iterator symbol_end() { return symbol_iterator(); }
};
-typedef unsigned SymbolID;
+typedef const SymExpr* SymbolRef;
+typedef llvm::SmallVector<SymbolRef, 2> SymbolRefSmallVectorTy;
+typedef unsigned SymbolID;
+/// \brief A symbol representing data which can be stored in a memory location
+/// (region).
class SymbolData : public SymExpr {
-private:
+ virtual void anchor();
const SymbolID Sym;
protected:
@@ -90,10 +121,7 @@ public:
}
};
-typedef const SymbolData* SymbolRef;
-typedef llvm::SmallVector<SymbolRef, 2> SymbolRefSmallVectorTy;
-
-/// A symbol representing the value of a MemRegion.
+///\brief A symbol representing the value stored at a MemRegion.
class SymbolRegionValue : public SymbolData {
const TypedValueRegion *R;
@@ -112,7 +140,7 @@ public:
Profile(profile, R);
}
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
QualType getType(ASTContext&) const;
@@ -122,17 +150,21 @@ public:
}
};
-/// A symbol representing the result of an expression.
+/// A symbol representing the result of an expression in the case when we do
+/// not know anything about what the expression is.
class SymbolConjured : public SymbolData {
const Stmt *S;
QualType T;
unsigned Count;
+ const LocationContext *LCtx;
const void *SymbolTag;
public:
- SymbolConjured(SymbolID sym, const Stmt *s, QualType t, unsigned count,
+ SymbolConjured(SymbolID sym, const Stmt *s, const LocationContext *lctx,
+ QualType t, unsigned count,
const void *symbolTag)
: SymbolData(ConjuredKind, sym), S(s), T(t), Count(count),
+ LCtx(lctx),
SymbolTag(symbolTag) {}
const Stmt *getStmt() const { return S; }
@@ -141,19 +173,21 @@ public:
QualType getType(ASTContext&) const;
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
static void Profile(llvm::FoldingSetNodeID& profile, const Stmt *S,
- QualType T, unsigned Count, const void *SymbolTag) {
+ QualType T, unsigned Count, const LocationContext *LCtx,
+ const void *SymbolTag) {
profile.AddInteger((unsigned) ConjuredKind);
profile.AddPointer(S);
+ profile.AddPointer(LCtx);
profile.Add(T);
profile.AddInteger(Count);
profile.AddPointer(SymbolTag);
}
virtual void Profile(llvm::FoldingSetNodeID& profile) {
- Profile(profile, S, T, Count, SymbolTag);
+ Profile(profile, S, T, Count, LCtx, SymbolTag);
}
// Implement isa<T> support.
@@ -177,7 +211,7 @@ public:
QualType getType(ASTContext&) const;
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
static void Profile(llvm::FoldingSetNodeID& profile, SymbolRef parent,
const TypedValueRegion *r) {
@@ -210,7 +244,7 @@ public:
QualType getType(ASTContext&) const;
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
static void Profile(llvm::FoldingSetNodeID& profile, const SubRegion *R) {
profile.AddInteger((unsigned) ExtentKind);
@@ -249,7 +283,7 @@ public:
QualType getType(ASTContext&) const;
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
static void Profile(llvm::FoldingSetNodeID& profile, const MemRegion *R,
const Stmt *S, QualType T, unsigned Count,
@@ -272,6 +306,42 @@ public:
}
};
+/// \brief Represents a cast expression.
+class SymbolCast : public SymExpr {
+ const SymExpr *Operand;
+ /// Type of the operand.
+ QualType FromTy;
+ /// The type of the result.
+ QualType ToTy;
+
+public:
+ SymbolCast(const SymExpr *In, QualType From, QualType To) :
+ SymExpr(CastSymbolKind), Operand(In), FromTy(From), ToTy(To) { }
+
+ QualType getType(ASTContext &C) const { return ToTy; }
+
+ const SymExpr *getOperand() const { return Operand; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& ID,
+ const SymExpr *In, QualType From, QualType To) {
+ ID.AddInteger((unsigned) CastSymbolKind);
+ ID.AddPointer(In);
+ ID.Add(From);
+ ID.Add(To);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, Operand, FromTy, ToTy);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == CastSymbolKind;
+ }
+};
+
/// SymIntExpr - Represents symbolic expression like 'x' + 3.
class SymIntExpr : public SymExpr {
const SymExpr *LHS;
@@ -290,7 +360,7 @@ public:
BinaryOperator::Opcode getOpcode() const { return Op; }
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
const SymExpr *getLHS() const { return LHS; }
const llvm::APSInt &getRHS() const { return RHS; }
@@ -315,6 +385,47 @@ public:
}
};
+/// IntSymExpr - Represents symbolic expression like 3 - 'x'.
+class IntSymExpr : public SymExpr {
+ const llvm::APSInt& LHS;
+ BinaryOperator::Opcode Op;
+ const SymExpr *RHS;
+ QualType T;
+
+public:
+ IntSymExpr(const llvm::APSInt& lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType t)
+ : SymExpr(IntSymKind), LHS(lhs), Op(op), RHS(rhs), T(t) {}
+
+ QualType getType(ASTContext &C) const { return T; }
+
+ BinaryOperator::Opcode getOpcode() const { return Op; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ const SymExpr *getRHS() const { return RHS; }
+ const llvm::APSInt &getLHS() const { return LHS; }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType t) {
+ ID.AddInteger((unsigned) IntSymKind);
+ ID.AddPointer(&lhs);
+ ID.AddInteger(op);
+ ID.AddPointer(rhs);
+ ID.Add(t);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, LHS, Op, RHS, T);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == IntSymKind;
+ }
+};
+
/// SymSymExpr - Represents symbolic expression like 'x' + 'y'.
class SymSymExpr : public SymExpr {
const SymExpr *LHS;
@@ -335,7 +446,7 @@ public:
// generation of virtual functions.
QualType getType(ASTContext &C) const { return T; }
- void dumpToStream(raw_ostream &os) const;
+ virtual void dumpToStream(raw_ostream &os) const;
static void Profile(llvm::FoldingSetNodeID& ID, const SymExpr *lhs,
BinaryOperator::Opcode op, const SymExpr *rhs, QualType t) {
@@ -382,13 +493,18 @@ public:
/// \brief Make a unique symbol for MemRegion R according to its kind.
const SymbolRegionValue* getRegionValueSymbol(const TypedValueRegion* R);
- const SymbolConjured* getConjuredSymbol(const Stmt *E, QualType T,
+ const SymbolConjured* getConjuredSymbol(const Stmt *E,
+ const LocationContext *LCtx,
+ QualType T,
unsigned VisitCount,
const void *SymbolTag = 0);
- const SymbolConjured* getConjuredSymbol(const Expr *E, unsigned VisitCount,
+ const SymbolConjured* getConjuredSymbol(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount,
const void *SymbolTag = 0) {
- return getConjuredSymbol(E, E->getType(), VisitCount, SymbolTag);
+ return getConjuredSymbol(E, LCtx, E->getType(),
+ VisitCount, SymbolTag);
}
const SymbolDerived *getDerivedSymbol(SymbolRef parentSymbol,
@@ -404,6 +520,9 @@ public:
QualType T, unsigned VisitCount,
const void *SymbolTag = 0);
+ const SymbolCast* getCastSymbol(const SymExpr *Operand,
+ QualType From, QualType To);
+
const SymIntExpr *getSymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
const llvm::APSInt& rhs, QualType t);
@@ -412,6 +531,10 @@ public:
return getSymIntExpr(&lhs, op, rhs, t);
}
+ const IntSymExpr *getIntSymExpr(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType t);
+
const SymSymExpr *getSymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType t);
@@ -464,7 +587,7 @@ public:
bool isLive(SymbolRef sym);
bool isLiveRegion(const MemRegion *region);
- bool isLive(const Stmt *ExprVal) const;
+ bool isLive(const Stmt *ExprVal, const LocationContext *LCtx) const;
bool isLive(const VarRegion *VR, bool includeStoreBindings = false) const;
/// \brief Unconditionally marks a symbol as live.
@@ -537,7 +660,7 @@ public:
namespace llvm {
static inline raw_ostream &operator<<(raw_ostream &os,
- const clang::ento::SymExpr *SE) {
+ const clang::ento::SymExpr *SE) {
SE->dumpToStream(os);
return os;
}
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
new file mode 100644
index 0000000..53205d3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
@@ -0,0 +1,40 @@
+//== TaintManager.h - Managing taint --------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides APIs for adding, removing, querying symbol taint.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TAINTMANAGER_H
+#define LLVM_CLANG_TAINTMANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h"
+
+namespace clang {
+namespace ento {
+
+/// The GDM component containing the tainted root symbols. We lazily infer the
+/// taint of the dependent symbols. Currently, this is a map from a symbol to
+/// tag kind. TODO: Should support multiple tag kinds.
+struct TaintMap {};
+typedef llvm::ImmutableMap<SymbolRef, TaintTagType> TaintMapImpl;
+template<> struct ProgramStateTrait<TaintMap>
+ : public ProgramStatePartialTrait<TaintMapImpl> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+};
+
+class TaintManager {
+
+ TaintManager() {}
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h
new file mode 100644
index 0000000..8ddc8b9d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h
@@ -0,0 +1,27 @@
+//== TaintTag.h - Path-sensitive "State" for tracking values -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a set of taint tags. Several tags are used to differentiate kinds
+// of taint.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_TAINTTAG_H
+#define LLVM_CLANG_TAINTTAG_H
+
+namespace clang {
+namespace ento {
+
+/// The type of taint, which helps to differentiate between different types of
+/// taint.
+typedef unsigned TaintTagType;
+static const TaintTagType TaintTagGeneric = 0;
+
+}}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
index fa34075..51aa753 100644
--- a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
@@ -73,6 +73,7 @@ public:
}
void enqueue(ExplodedNode *N) {
+ assert(N->getLocation().getKind() != ProgramPoint::PostStmtKind);
enqueue(WorkListUnit(N, CurrentCounter));
}
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
new file mode 100644
index 0000000..3430320
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
@@ -0,0 +1,164 @@
+//===--- CompilationDatabase.h - --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an interface and multiple implementations for
+// CompilationDatabases.
+//
+// While C++ refactoring and analysis tools are not compilers, and thus
+// don't run as part of the build system, they need the exact information
+// of a build in order to be able to correctly understand the C++ code of
+// the project. This information is provided via the CompilationDatabase
+// interface.
+//
+// To create a CompilationDatabase from a build directory one can call
+// CompilationDatabase::loadFromDirectory(), which deduces the correct
+// compilation database from the root of the build tree.
+//
+// See the concrete subclasses of CompilationDatabase for currently supported
+// formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+#define LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class MemoryBuffer;
+} // end namespace llvm
+
+namespace clang {
+namespace tooling {
+
+/// \brief Specifies the working directory and command of a compilation.
+struct CompileCommand {
+ CompileCommand() {}
+ CompileCommand(StringRef Directory, ArrayRef<std::string> CommandLine)
+ : Directory(Directory), CommandLine(CommandLine) {}
+
+ /// \brief The working directory the command was executed from.
+ std::string Directory;
+
+ /// \brief The command line that was executed.
+ std::vector<std::string> CommandLine;
+};
+
+/// \brief Interface for compilation databases.
+///
+/// A compilation database allows the user to retrieve all compile command lines
+/// that a specified file is compiled with in a project.
+/// The retrieved compile command lines can be used to run clang tools over
+/// a subset of the files in a project.
+class CompilationDatabase {
+public:
+ virtual ~CompilationDatabase();
+
+ /// \brief Loads a compilation database from a build directory.
+ ///
+ /// Looks at the specified 'BuildDirectory' and creates a compilation database
+ /// that allows to query compile commands for source files in the
+ /// corresponding source tree.
+ ///
+ /// Returns NULL and sets ErrorMessage if we were not able to build up a
+ /// compilation database for the build directory.
+ ///
+ /// FIXME: Currently only supports JSON compilation databases, which
+ /// are named 'compile_commands.json' in the given directory. Extend this
+ /// for other build types (like ninja build files).
+ static CompilationDatabase *loadFromDirectory(StringRef BuildDirectory,
+ std::string &ErrorMessage);
+
+ /// \brief Returns all compile commands in which the specified file was
+ /// compiled.
+ ///
+ /// This includes compile comamnds that span multiple source files.
+ /// For example, consider a project with the following compilations:
+ /// $ clang++ -o test a.cc b.cc t.cc
+ /// $ clang++ -o production a.cc b.cc -DPRODUCTION
+ /// A compilation database representing the project would return both command
+ /// lines for a.cc and b.cc and only the first command line for t.cc.
+ virtual std::vector<CompileCommand> getCompileCommands(
+ StringRef FilePath) const = 0;
+};
+
+/// \brief A JSON based compilation database.
+///
+/// JSON compilation database files must contain a list of JSON objects which
+/// provide the command lines in the attributes 'directory', 'command' and
+/// 'file':
+/// [
+/// { "directory": "<working directory of the compile>",
+/// "command": "<compile command line>",
+/// "file": "<path to source file>"
+/// },
+/// ...
+/// ]
+/// Each object entry defines one compile action. The specified file is
+/// considered to be the main source file for the translation unit.
+///
+/// JSON compilation databases can for example be generated in CMake projects
+/// by setting the flag -DCMAKE_EXPORT_COMPILE_COMMANDS.
+class JSONCompilationDatabase : public CompilationDatabase {
+public:
+
+ /// \brief Loads a JSON compilation database from the specified file.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be
+ /// loaded from the given file.
+ static JSONCompilationDatabase *loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage);
+
+ /// \brief Loads a JSON compilation database from a data buffer.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be loaded.
+ static JSONCompilationDatabase *loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage);
+
+ /// \brief Returns all compile comamnds in which the specified file was
+ /// compiled.
+ ///
+ /// FIXME: Currently FilePath must be an absolute path inside the
+ /// source directory which does not have symlinks resolved.
+ virtual std::vector<CompileCommand> getCompileCommands(
+ StringRef FilePath) const;
+
+private:
+ /// \brief Constructs a JSON compilation database on a memory buffer.
+ JSONCompilationDatabase(llvm::MemoryBuffer *Database)
+ : Database(Database) {}
+
+ /// \brief Parses the database file and creates the index.
+ ///
+ /// Returns whether parsing succeeded. Sets ErrorMessage if parsing
+ /// failed.
+ bool parse(std::string &ErrorMessage);
+
+ // Tuple (directory, commandline) where 'commandline' is a JSON escaped bash
+ // escaped command line.
+ typedef std::pair<StringRef, StringRef> CompileCommandRef;
+
+ // Maps file paths to the compile command lines for that file.
+ llvm::StringMap< std::vector<CompileCommandRef> > IndexByFile;
+
+ llvm::OwningPtr<llvm::MemoryBuffer> Database;
+};
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
new file mode 100644
index 0000000..868eae3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
@@ -0,0 +1,213 @@
+//===--- Tooling.h - Framework for standalone Clang tools -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions to run clang tools standalone instead
+// of running them as a plugin.
+//
+// A ClangTool is initialized with a CompilationDatabase and a set of files
+// to run over. The tool will then run a user-specified FrontendAction over
+// all TUs in which the given files are compiled.
+//
+// It is also possible to run a FrontendAction over a snippet of code by
+// calling runSyntaxOnlyToolOnCode, which is useful for unit testing.
+//
+// Applications that need more fine grained control over how to run
+// multiple FrontendActions over code can use ToolInvocation.
+//
+// Example tools:
+// - running clang -fsyntax-only over source code from an editor to get
+// fast syntax checks
+// - running match/replace tools over C++ code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_TOOLING_H
+#define LLVM_CLANG_TOOLING_TOOLING_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/Util.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+namespace driver {
+class Compilation;
+} // end namespace driver
+
+class CompilerInvocation;
+class SourceManager;
+class FrontendAction;
+
+namespace tooling {
+
+class CompilationDatabase;
+
+/// \brief Interface to generate clang::FrontendActions.
+class FrontendActionFactory {
+public:
+ virtual ~FrontendActionFactory();
+
+ /// \brief Returns a new clang::FrontendAction.
+ ///
+ /// The caller takes ownership of the returned action.
+ virtual clang::FrontendAction *create() = 0;
+};
+
+/// \brief Returns a new FrontendActionFactory for a given type.
+///
+/// T must extend clang::FrontendAction.
+///
+/// Example:
+/// FrontendActionFactory *Factory =
+/// newFrontendActionFactory<clang::SyntaxOnlyAction>();
+template <typename T>
+FrontendActionFactory *newFrontendActionFactory();
+
+/// \brief Returns a new FrontendActionFactory for any type that provides an
+/// implementation of newFrontendAction().
+///
+/// FactoryT must implement: FrontendAction *newFrontendAction().
+///
+/// Example:
+/// struct ProvidesFrontendActions {
+/// FrontendAction *newFrontendAction();
+/// } Factory;
+/// FrontendActionFactory *FactoryAdapter =
+/// newFrontendActionFactory(&Factory);
+template <typename FactoryT>
+FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory);
+
+/// \brief Runs (and deletes) the tool on 'Code' with the -fsyntax-only flag.
+///
+/// \param ToolAction The action to run over the code.
+/// \param Code C++ code.
+/// \param FileName The file name which 'Code' will be mapped as.
+///
+/// \return - True if 'ToolAction' was successfully executed.
+bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+ const Twine &FileName = "input.cc");
+
+/// \brief Utility to run a FrontendAction in a single clang invocation.
+class ToolInvocation {
+ public:
+ /// \brief Create a tool invocation.
+ ///
+ /// \param CommandLine The command line arguments to clang.
+ /// \param ToolAction The action to be executed. Class takes ownership.
+ /// \param Files The FileManager used for the execution. Class does not take
+ /// ownership.
+ ToolInvocation(ArrayRef<std::string> CommandLine, FrontendAction *ToolAction,
+ FileManager *Files);
+
+ /// \brief Map a virtual file to be used while running the tool.
+ ///
+ /// \param FilePath The path at which the content will be mapped.
+ /// \param Content A null terminated buffer of the file's content.
+ void mapVirtualFile(StringRef FilePath, StringRef Content);
+
+ /// \brief Run the clang invocation.
+ ///
+ /// \returns True if there were no errors during execution.
+ bool run();
+
+ private:
+ void addFileMappingsTo(SourceManager &SourceManager);
+
+ bool runInvocation(const char *BinaryName,
+ clang::driver::Compilation *Compilation,
+ clang::CompilerInvocation *Invocation,
+ const clang::driver::ArgStringList &CC1Args,
+ clang::FrontendAction *ToolAction);
+
+ std::vector<std::string> CommandLine;
+ llvm::OwningPtr<FrontendAction> ToolAction;
+ FileManager *Files;
+ // Maps <file name> -> <file content>.
+ llvm::StringMap<StringRef> MappedFileContents;
+};
+
+/// \brief Utility to run a FrontendAction over a set of files.
+///
+/// This class is written to be usable for command line utilities.
+class ClangTool {
+ public:
+ /// \brief Constructs a clang tool to run over a list of files.
+ ///
+ /// \param Compilations The CompilationDatabase which contains the compile
+ /// command lines for the given source paths.
+ /// \param SourcePaths The source files to run over. If a source files is
+ /// not found in Compilations, it is skipped.
+ ClangTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths);
+
+ /// \brief Map a virtual file to be used while running the tool.
+ ///
+ /// \param FilePath The path at which the content will be mapped.
+ /// \param Content A null terminated buffer of the file's content.
+ void mapVirtualFile(StringRef FilePath, StringRef Content);
+
+ /// Runs a frontend action over all files specified in the command line.
+ ///
+ /// \param ActionFactory Factory generating the frontend actions. The function
+ /// takes ownership of this parameter. A new action is generated for every
+ /// processed translation unit.
+ int run(FrontendActionFactory *ActionFactory);
+
+ /// \brief Returns the file manager used in the tool.
+ ///
+ /// The file manager is shared between all translation units.
+ FileManager &getFiles() { return Files; }
+
+ private:
+ // We store command lines as pair (file name, command line).
+ typedef std::pair< std::string, std::vector<std::string> > CommandLine;
+ std::vector<CommandLine> CommandLines;
+
+ FileManager Files;
+ // Contains a list of pairs (<file name>, <file content>).
+ std::vector< std::pair<StringRef, StringRef> > MappedFileContents;
+};
+
+template <typename T>
+FrontendActionFactory *newFrontendActionFactory() {
+ class SimpleFrontendActionFactory : public FrontendActionFactory {
+ public:
+ virtual clang::FrontendAction *create() { return new T; }
+ };
+
+ return new SimpleFrontendActionFactory;
+}
+
+template <typename FactoryT>
+FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory) {
+ class FrontendActionFactoryAdapter : public FrontendActionFactory {
+ public:
+ explicit FrontendActionFactoryAdapter(FactoryT *ActionFactory)
+ : ActionFactory(ActionFactory) {}
+
+ virtual clang::FrontendAction *create() {
+ return ActionFactory->newFrontendAction();
+ }
+
+ private:
+ FactoryT *ActionFactory;
+ };
+
+ return new FrontendActionFactoryAdapter(ActionFactory);
+}
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_TOOLING_H
+
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
index 6e1b0e5..9354dc3 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
@@ -10,6 +10,7 @@
#include "Internals.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "clang/AST/ASTConsumer.h"
@@ -98,7 +99,7 @@ public:
virtual void HandleDiagnostic(DiagnosticsEngine::Level level,
const Diagnostic &Info) {
- if (arcmt::isARCDiagnostic(Info.getID(), Diags) ||
+ if (DiagnosticIDs::isARCDiagnostic(Info.getID()) ||
level >= DiagnosticsEngine::Error || level == DiagnosticsEngine::Note) {
CapturedDiags.push_back(StoredDiagnostic(level, Info));
return;
@@ -184,18 +185,19 @@ static bool HasARCRuntime(CompilerInvocation &origCI) {
static CompilerInvocation *
createInvocationForMigration(CompilerInvocation &origCI) {
- llvm::OwningPtr<CompilerInvocation> CInvok;
+ OwningPtr<CompilerInvocation> CInvok;
CInvok.reset(new CompilerInvocation(origCI));
CInvok->getPreprocessorOpts().ImplicitPCHInclude = std::string();
CInvok->getPreprocessorOpts().ImplicitPTHInclude = std::string();
std::string define = getARCMTMacroName();
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
- CInvok->getLangOpts().ObjCAutoRefCount = true;
+ CInvok->getLangOpts()->ObjCAutoRefCount = true;
+ CInvok->getLangOpts()->setGC(LangOptions::NonGC);
CInvok->getDiagnosticOpts().ErrorLimit = 0;
CInvok->getDiagnosticOpts().Warnings.push_back(
"error=arc-unsafe-retained-assign");
- CInvok->getLangOpts().ObjCRuntimeHasWeak = HasARCRuntime(origCI);
+ CInvok->getLangOpts()->ObjCRuntimeHasWeak = HasARCRuntime(origCI);
return CInvok.take();
}
@@ -204,12 +206,12 @@ static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
const DiagnosticOptions &diagOpts,
Preprocessor &PP) {
TextDiagnosticPrinter printer(llvm::errs(), diagOpts);
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, &printer, /*ShouldOwnClient=*/false));
Diags->setSourceManager(&PP.getSourceManager());
- printer.BeginSourceFile(PP.getLangOptions(), &PP);
+ printer.BeginSourceFile(PP.getLangOpts(), &PP);
arcDiags.reportDiagnostics(*Diags);
printer.EndSourceFile();
}
@@ -219,33 +221,38 @@ static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
//===----------------------------------------------------------------------===//
bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient,
bool emitPremigrationARCErrors,
StringRef plistOut) {
- if (!origCI.getLangOpts().ObjC1)
+ if (!origCI.getLangOpts()->ObjC1)
return false;
- std::vector<TransformFn> transforms = arcmt::getAllTransformations();
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+ bool NoNSAllocReallocError = origCI.getMigratorOpts().NoNSAllocReallocError;
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
assert(!transforms.empty());
- llvm::OwningPtr<CompilerInvocation> CInvok;
+ OwningPtr<CompilerInvocation> CInvok;
CInvok.reset(createInvocationForMigration(origCI));
CInvok->getFrontendOpts().Inputs.clear();
- CInvok->getFrontendOpts().Inputs.push_back(std::make_pair(Kind, Filename));
+ CInvok->getFrontendOpts().Inputs.push_back(Input);
CapturedDiagList capturedDiags;
assert(DiagClient);
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
- llvm::OwningPtr<ASTUnit> Unit(
+ OwningPtr<ASTUnit> Unit(
ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags));
if (!Unit)
return true;
@@ -257,7 +264,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
if (Diags->hasFatalErrorOccurred()) {
Diags->Reset();
- DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
capturedDiags.reportDiagnostics(*Diags);
DiagClient->EndSourceFile();
return true;
@@ -272,7 +279,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I)
arcDiags.push_back(*I);
writeARCDiagsToPlist(plistOut, arcDiags,
- Ctx.getSourceManager(), Ctx.getLangOptions());
+ Ctx.getSourceManager(), Ctx.getLangOpts());
}
// After parsing of source files ended, we want to reuse the
@@ -280,14 +287,16 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
// We call BeginSourceFile because DiagnosticConsumer requires that
// diagnostics with source range information are emitted only in between
// BeginSourceFile() and EndSourceFile().
- DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
// No macros will be added since we are just checking and we won't modify
// source code.
std::vector<SourceLocation> ARCMTMacroLocs;
TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
- MigrationPass pass(Ctx, Unit->getSema(), testAct, ARCMTMacroLocs);
+ MigrationPass pass(Ctx, OrigGCMode, Unit->getSema(), testAct, ARCMTMacroLocs);
+ pass.setNSAllocReallocError(NoNSAllocReallocError);
+ pass.setNoFinalizeRemoval(NoFinalizeRemoval);
for (unsigned i=0, e = transforms.size(); i != e; ++i)
transforms[i](pass);
@@ -298,7 +307,7 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
// If we are migrating code that gets the '-fobjc-arc' flag, make sure
// to remove it so that we don't get errors from normal compilation.
- origCI.getLangOpts().ObjCAutoRefCount = false;
+ origCI.getLangOpts()->ObjCAutoRefCount = false;
return capturedDiags.hasErrors() || testAct.hasReportedErrors();
}
@@ -308,27 +317,31 @@ bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
//===----------------------------------------------------------------------===//
static bool applyTransforms(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient,
StringRef outputDir,
bool emitPremigrationARCErrors,
StringRef plistOut) {
- if (!origCI.getLangOpts().ObjC1)
+ if (!origCI.getLangOpts()->ObjC1)
return false;
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+
// Make sure checking is successful first.
CompilerInvocation CInvokForCheck(origCI);
- if (arcmt::checkForManualIssues(CInvokForCheck, Filename, Kind, DiagClient,
+ if (arcmt::checkForManualIssues(CInvokForCheck, Input, DiagClient,
emitPremigrationARCErrors, plistOut))
return true;
CompilerInvocation CInvok(origCI);
CInvok.getFrontendOpts().Inputs.clear();
- CInvok.getFrontendOpts().Inputs.push_back(std::make_pair(Kind, Filename));
+ CInvok.getFrontendOpts().Inputs.push_back(Input);
MigrationProcess migration(CInvok, DiagClient, outputDir);
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
- std::vector<TransformFn> transforms = arcmt::getAllTransformations();
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
assert(!transforms.empty());
for (unsigned i=0, e = transforms.size(); i != e; ++i) {
@@ -336,36 +349,36 @@ static bool applyTransforms(CompilerInvocation &origCI,
if (err) return true;
}
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
if (outputDir.empty()) {
- origCI.getLangOpts().ObjCAutoRefCount = true;
+ origCI.getLangOpts()->ObjCAutoRefCount = true;
return migration.getRemapper().overwriteOriginal(*Diags);
} else {
// If we are migrating code that gets the '-fobjc-arc' flag, make sure
// to remove it so that we don't get errors from normal compilation.
- origCI.getLangOpts().ObjCAutoRefCount = false;
+ origCI.getLangOpts()->ObjCAutoRefCount = false;
return migration.getRemapper().flushToDisk(outputDir, *Diags);
}
}
bool arcmt::applyTransformations(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient) {
- return applyTransforms(origCI, Filename, Kind, DiagClient,
+ return applyTransforms(origCI, Input, DiagClient,
StringRef(), false, StringRef());
}
bool arcmt::migrateWithTemporaryFiles(CompilerInvocation &origCI,
- StringRef Filename, InputKind Kind,
+ const FrontendInputFile &Input,
DiagnosticConsumer *DiagClient,
StringRef outputDir,
bool emitPremigrationARCErrors,
StringRef plistOut) {
assert(!outputDir.empty() && "Expected output directory path");
- return applyTransforms(origCI, Filename, Kind, DiagClient,
+ return applyTransforms(origCI, Input, DiagClient,
outputDir, emitPremigrationARCErrors, plistOut);
}
@@ -375,8 +388,8 @@ bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
DiagnosticConsumer *DiagClient) {
assert(!outputDir.empty());
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
FileRemapper remapper;
@@ -385,13 +398,51 @@ bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
if (err)
return true;
- CompilerInvocation CI;
- remapper.applyMappings(CI);
- remap = CI.getPreprocessorOpts().RemappedFiles;
+ PreprocessorOptions PPOpts;
+ remapper.applyMappings(PPOpts);
+ remap = PPOpts.RemappedFiles;
return false;
}
+bool arcmt::getFileRemappingsFromFileList(
+ std::vector<std::pair<std::string,std::string> > &remap,
+ ArrayRef<StringRef> remapFiles,
+ DiagnosticConsumer *DiagClient) {
+ bool hasErrorOccurred = false;
+ llvm::StringMap<bool> Uniquer;
+
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ for (ArrayRef<StringRef>::iterator
+ I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
+ StringRef file = *I;
+
+ FileRemapper remapper;
+ bool err = remapper.initFromFile(file, *Diags,
+ /*ignoreIfFilesChanged=*/true);
+ hasErrorOccurred = hasErrorOccurred || err;
+ if (err)
+ continue;
+
+ PreprocessorOptions PPOpts;
+ remapper.applyMappings(PPOpts);
+ for (PreprocessorOptions::remapped_file_iterator
+ RI = PPOpts.remapped_file_begin(), RE = PPOpts.remapped_file_end();
+ RI != RE; ++RI) {
+ bool &inserted = Uniquer[RI->first];
+ if (inserted)
+ continue;
+ inserted = true;
+ remap.push_back(*RI);
+ }
+ }
+
+ return hasErrorOccurred;
+}
+
//===----------------------------------------------------------------------===//
// CollectTransformActions.
//===----------------------------------------------------------------------===//
@@ -478,8 +529,8 @@ MigrationProcess::MigrationProcess(const CompilerInvocation &CI,
StringRef outputDir)
: OrigCI(CI), DiagClient(diagClient) {
if (!outputDir.empty()) {
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
}
@@ -487,28 +538,28 @@ MigrationProcess::MigrationProcess(const CompilerInvocation &CI,
bool MigrationProcess::applyTransform(TransformFn trans,
RewriteListener *listener) {
- llvm::OwningPtr<CompilerInvocation> CInvok;
+ OwningPtr<CompilerInvocation> CInvok;
CInvok.reset(createInvocationForMigration(OrigCI));
CInvok->getDiagnosticOpts().IgnoreWarnings = true;
- Remapper.applyMappings(*CInvok);
+ Remapper.applyMappings(CInvok->getPreprocessorOpts());
CapturedDiagList capturedDiags;
std::vector<SourceLocation> ARCMTMacroLocs;
assert(DiagClient);
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
// Filter of all diagnostics.
CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
- llvm::OwningPtr<ARCMTMacroTrackerAction> ASTAction;
+ OwningPtr<ARCMTMacroTrackerAction> ASTAction;
ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs));
- llvm::OwningPtr<ASTUnit> Unit(
+ OwningPtr<ASTUnit> Unit(
ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags,
ASTAction.get()));
if (!Unit)
@@ -522,7 +573,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
if (Diags->hasFatalErrorOccurred()) {
Diags->Reset();
- DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
capturedDiags.reportDiagnostics(*Diags);
DiagClient->EndSourceFile();
return true;
@@ -533,11 +584,12 @@ bool MigrationProcess::applyTransform(TransformFn trans,
// We call BeginSourceFile because DiagnosticConsumer requires that
// diagnostics with source range information are emitted only in between
// BeginSourceFile() and EndSourceFile().
- DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
- Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOptions());
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
- MigrationPass pass(Ctx, Unit->getSema(), TA, ARCMTMacroLocs);
+ MigrationPass pass(Ctx, OrigCI.getLangOpts()->getGC(),
+ Unit->getSema(), TA, ARCMTMacroLocs);
trans(pass);
@@ -559,25 +611,16 @@ bool MigrationProcess::applyTransform(TransformFn trans,
assert(file);
std::string newFname = file->getName();
newFname += "-trans";
- llvm::SmallString<512> newText;
+ SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
vecOS.flush();
llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
StringRef(newText.data(), newText.size()), newFname);
- llvm::SmallString<64> filePath(file->getName());
+ SmallString<64> filePath(file->getName());
Unit->getFileManager().FixupRelativePath(filePath);
Remapper.remap(filePath.str(), memBuf);
}
return false;
}
-
-//===----------------------------------------------------------------------===//
-// isARCDiagnostic.
-//===----------------------------------------------------------------------===//
-
-bool arcmt::isARCDiagnostic(unsigned diagID, DiagnosticsEngine &Diag) {
- return Diag.getDiagnosticIDs()->getCategoryNumberForDiag(diagID) ==
- diag::DiagCat_Automatic_Reference_Counting_Issue;
-}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
index dea867a..0ed36dd 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
@@ -15,8 +15,7 @@ using namespace clang;
using namespace arcmt;
bool CheckAction::BeginInvocation(CompilerInstance &CI) {
- if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentFile(),
- getCurrentFileKind(),
+ if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentInput(),
CI.getDiagnostics().getClient()))
return false; // errors, stop the action.
@@ -29,8 +28,7 @@ CheckAction::CheckAction(FrontendAction *WrappedAction)
: WrapperFrontendAction(WrappedAction) {}
bool ModifyAction::BeginInvocation(CompilerInstance &CI) {
- return !arcmt::applyTransformations(CI.getInvocation(),
- getCurrentFile(), getCurrentFileKind(),
+ return !arcmt::applyTransformations(CI.getInvocation(), getCurrentInput(),
CI.getDiagnostics().getClient());
}
@@ -39,12 +37,11 @@ ModifyAction::ModifyAction(FrontendAction *WrappedAction)
bool MigrateAction::BeginInvocation(CompilerInstance &CI) {
if (arcmt::migrateWithTemporaryFiles(CI.getInvocation(),
- getCurrentFile(),
- getCurrentFileKind(),
- CI.getDiagnostics().getClient(),
- MigrateDir,
- EmitPremigrationARCErros,
- PlistOut))
+ getCurrentInput(),
+ CI.getDiagnostics().getClient(),
+ MigrateDir,
+ EmitPremigrationARCErros,
+ PlistOut))
return false; // errors, stop the action.
// We only want to see diagnostics emitted by migrateWithTemporaryFiles.
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
index c6e6ce4..474ce7d 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -8,8 +8,9 @@
//===----------------------------------------------------------------------===//
#include "clang/ARCMigrate/FileRemapper.h"
-#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/PreprocessorOptions.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Diagnostic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/FileSystem.h"
@@ -50,9 +51,15 @@ std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
bool ignoreIfFilesChanged) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ return initFromFile(infoFile, Diag, ignoreIfFilesChanged);
+}
+
+bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged) {
assert(FromToMappings.empty() &&
"initFromDisk should be called before any remap calls");
- std::string infoFile = getRemapInfoFile(outputDir);
+ std::string infoFile = filePath;
bool fileExists = false;
llvm::sys::fs::exists(infoFile, fileExists);
if (!fileExists)
@@ -60,9 +67,8 @@ bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
- llvm::OwningPtr<llvm::MemoryBuffer> fileBuf;
- if (llvm::error_code ec = llvm::MemoryBuffer::getFile(infoFile.c_str(),
- fileBuf))
+ OwningPtr<llvm::MemoryBuffer> fileBuf;
+ if (llvm::MemoryBuffer::getFile(infoFile.c_str(), fileBuf))
return report("Error opening file: " + infoFile, Diag);
SmallVector<StringRef, 64> lines;
@@ -109,8 +115,15 @@ bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
if (fs::create_directory(outputDir, existed) != llvm::errc::success)
return report("Could not create directory: " + outputDir, Diag);
- std::string errMsg;
std::string infoFile = getRemapInfoFile(outputDir);
+ return flushToFile(infoFile, Diag);
+}
+
+bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ std::string errMsg;
+ std::string infoFile = outputPath;
llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg,
llvm::raw_fd_ostream::F_Binary);
if (!errMsg.empty())
@@ -120,18 +133,18 @@ bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
const FileEntry *origFE = I->first;
- llvm::SmallString<200> origPath = StringRef(origFE->getName());
+ SmallString<200> origPath = StringRef(origFE->getName());
fs::make_absolute(origPath);
infoOut << origPath << '\n';
infoOut << (uint64_t)origFE->getModificationTime() << '\n';
if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
- llvm::SmallString<200> newPath = StringRef(FE->getName());
+ SmallString<200> newPath = StringRef(FE->getName());
fs::make_absolute(newPath);
infoOut << newPath << '\n';
} else {
- llvm::SmallString<64> tempPath;
+ SmallString<64> tempPath;
tempPath = path::filename(origFE->getName());
tempPath += "-%%%%%%%%";
tempPath += path::extension(origFE->getName());
@@ -190,8 +203,7 @@ bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
return false;
}
-void FileRemapper::applyMappings(CompilerInvocation &CI) const {
- PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
for (MappingsTy::const_iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
@@ -205,8 +217,7 @@ void FileRemapper::applyMappings(CompilerInvocation &CI) const {
PPOpts.RetainRemappedFileBuffers = true;
}
-void FileRemapper::transferMappingsAndClear(CompilerInvocation &CI) {
- PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+void FileRemapper::transferMappingsAndClear(PreprocessorOptions &PPOpts) {
for (MappingsTy::iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
@@ -269,15 +280,12 @@ void FileRemapper::resetTarget(Target &targ) {
delete oldmem;
} else {
const FileEntry *toFE = targ.get<const FileEntry *>();
- llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
- I = ToFromMappings.find(toFE);
- if (I != ToFromMappings.end())
- ToFromMappings.erase(I);
+ ToFromMappings.erase(toFE);
}
}
bool FileRemapper::report(const Twine &err, DiagnosticsEngine &Diag) {
- llvm::SmallString<128> buf;
+ SmallString<128> buf;
unsigned ID = Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
err.toStringRef(buf));
Diag.Report(ID);
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
index 46f3bb6..59177c4 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
@@ -94,6 +94,8 @@ public:
void reportError(StringRef error, SourceLocation loc,
SourceRange range = SourceRange());
+ void reportWarning(StringRef warning, SourceLocation loc,
+ SourceRange range = SourceRange());
void reportNote(StringRef note, SourceLocation loc,
SourceRange range = SourceRange());
@@ -137,17 +139,26 @@ public:
class MigrationPass {
public:
ASTContext &Ctx;
+ LangOptions::GCMode OrigGCMode;
+ MigratorOptions MigOptions;
Sema &SemaRef;
TransformActions &TA;
std::vector<SourceLocation> &ARCMTMacroLocs;
- MigrationPass(ASTContext &Ctx, Sema &sema, TransformActions &TA,
+ MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
+ Sema &sema, TransformActions &TA,
std::vector<SourceLocation> &ARCMTMacroLocs)
- : Ctx(Ctx), SemaRef(sema), TA(TA), ARCMTMacroLocs(ARCMTMacroLocs) { }
+ : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(),
+ SemaRef(sema), TA(TA),
+ ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ bool isGCMigration() const { return OrigGCMode != LangOptions::NonGC; }
+ bool noNSAllocReallocError() const { return MigOptions.NoNSAllocReallocError; }
+ void setNSAllocReallocError(bool val) { MigOptions.NoNSAllocReallocError = val; }
+ bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; }
+ void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; }
};
-bool isARCDiagnostic(unsigned diagID, DiagnosticsEngine &Diag);
-
static inline StringRef getARCMTMacroName() {
return "__IMPL_ARCMT_REMOVED_EXPR__";
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
new file mode 100644
index 0000000..e635274
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -0,0 +1,226 @@
+//===--- ObjCMT.cpp - ObjC Migrate Tool -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace arcmt;
+
+namespace {
+
+class ObjCMigrateASTConsumer : public ASTConsumer {
+ void migrateDecl(Decl *D);
+
+public:
+ std::string MigrateDir;
+ bool MigrateLiterals;
+ bool MigrateSubscripting;
+ llvm::OwningPtr<NSAPI> NSAPIObj;
+ llvm::OwningPtr<edit::EditedSource> Editor;
+ FileRemapper &Remapper;
+ FileManager &FileMgr;
+ const PreprocessingRecord *PPRec;
+ bool IsOutputFile;
+
+ ObjCMigrateASTConsumer(StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting,
+ FileRemapper &remapper,
+ FileManager &fileMgr,
+ const PreprocessingRecord *PPRec,
+ bool isOutputFile = false)
+ : MigrateDir(migrateDir),
+ MigrateLiterals(migrateLiterals),
+ MigrateSubscripting(migrateSubscripting),
+ Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec),
+ IsOutputFile(isOutputFile) { }
+
+protected:
+ virtual void Initialize(ASTContext &Context) {
+ NSAPIObj.reset(new NSAPI(Context));
+ Editor.reset(new edit::EditedSource(Context.getSourceManager(),
+ Context.getLangOpts(),
+ PPRec));
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ migrateDecl(*I);
+ return true;
+ }
+ virtual void HandleInterestingDecl(DeclGroupRef DG) {
+ // Ignore decls from the PCH.
+ }
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+ ObjCMigrateASTConsumer::HandleTopLevelDecl(DG);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+};
+
+}
+
+ObjCMigrateAction::ObjCMigrateAction(FrontendAction *WrappedAction,
+ StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir),
+ MigrateLiterals(migrateLiterals), MigrateSubscripting(migrateSubscripting),
+ CompInst(0) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
+
+ASTConsumer *ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ ASTConsumer *
+ WrappedConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+ ASTConsumer *MTConsumer = new ObjCMigrateASTConsumer(MigrateDir,
+ MigrateLiterals,
+ MigrateSubscripting,
+ Remapper,
+ CompInst->getFileManager(),
+ CompInst->getPreprocessor().getPreprocessingRecord());
+ ASTConsumer *Consumers[] = { MTConsumer, WrappedConsumer };
+ return new MultiplexConsumer(Consumers);
+}
+
+bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
+ Remapper.initFromDisk(MigrateDir, CI.getDiagnostics(),
+ /*ignoreIfFilesChanges=*/true);
+ CompInst = &CI;
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ CI.getPreprocessorOpts().DetailedRecord = true;
+ CI.getPreprocessorOpts().DetailedRecordConditionalDirectives = true;
+ return true;
+}
+
+namespace {
+class ObjCMigrator : public RecursiveASTVisitor<ObjCMigrator> {
+ ObjCMigrateASTConsumer &Consumer;
+
+public:
+ ObjCMigrator(ObjCMigrateASTConsumer &consumer) : Consumer(consumer) { }
+
+ bool shouldVisitTemplateInstantiations() const { return false; }
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (Consumer.MigrateLiterals) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCLiteralSyntax(E, *Consumer.NSAPIObj, commit);
+ Consumer.Editor->commit(commit);
+ }
+
+ if (Consumer.MigrateSubscripting) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCSubscriptSyntax(E, *Consumer.NSAPIObj, commit);
+ Consumer.Editor->commit(commit);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMessageExpr(ObjCMessageExpr *E) {
+ // Do depth first; we want to rewrite the subexpressions first so that if
+ // we have to move expressions we will move them already rewritten.
+ for (Stmt::child_range range = E->children(); range; ++range)
+ if (!TraverseStmt(*range))
+ return false;
+
+ return WalkUpFromObjCMessageExpr(E);
+ }
+};
+}
+
+void ObjCMigrateASTConsumer::migrateDecl(Decl *D) {
+ if (!D)
+ return;
+ if (isa<ObjCMethodDecl>(D))
+ return; // Wait for the ObjC container declaration.
+
+ ObjCMigrator(*this).TraverseDecl(D);
+}
+
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ virtual void insert(SourceLocation loc, StringRef text) {
+ Rewrite.InsertText(loc, text);
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+}
+
+void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
+ RewritesReceiver Rec(rewriter);
+ Editor->applyRewrites(Rec);
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ llvm::SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ vecOS.flush();
+ llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), file->getName());
+ llvm::SmallString<64> filePath(file->getName());
+ FileMgr.FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), memBuf);
+ }
+
+ if (IsOutputFile) {
+ Remapper.flushToFile(MigrateDir, Ctx.getDiagnostics());
+ } else {
+ Remapper.flushToDisk(MigrateDir, Ctx.getDiagnostics());
+ }
+}
+
+bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) {
+ CI.getPreprocessorOpts().DetailedRecord = true;
+ CI.getPreprocessorOpts().DetailedRecordConditionalDirectives = true;
+ return true;
+}
+
+ASTConsumer *MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new ObjCMigrateASTConsumer(CI.getFrontendOpts().OutputFile,
+ /*MigrateLiterals=*/true,
+ /*MigrateSubscripting=*/true,
+ Remapper,
+ CI.getFileManager(),
+ CI.getPreprocessor().getPreprocessingRecord(),
+ /*isOutputFile=*/true);
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
index 1f10196..cfa6da1 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
@@ -39,6 +39,9 @@ public:
ARCAssignChecker(MigrationPass &pass) : Pass(pass) { }
bool VisitBinaryOperator(BinaryOperator *Exp) {
+ if (Exp->getType()->isDependentType())
+ return true;
+
Expr *E = Exp->getLHS();
SourceLocation OrigLoc = E->getExprLoc();
SourceLocation Loc = OrigLoc;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
index 08561f9..8787724 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -263,10 +263,6 @@ private:
return checkRef(E->getLocation(), E->getDecl()->getLocation());
}
- bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- return checkRef(E->getLocation(), E->getDecl()->getLocation());
- }
-
bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
return checkRef(TL.getBeginLoc(), TL.getTypedefNameDecl()->getLocation());
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
index 48c0ca9..3be8132 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -38,7 +38,7 @@ namespace {
class RootBlockObjCVarRewriter :
public RecursiveASTVisitor<RootBlockObjCVarRewriter> {
MigrationPass &Pass;
- llvm::DenseSet<VarDecl *> CheckedVars;
+ llvm::DenseSet<VarDecl *> &VarsToChange;
class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> {
VarDecl *Var;
@@ -48,13 +48,13 @@ class RootBlockObjCVarRewriter :
BlockVarChecker(VarDecl *var) : Var(var) { }
bool TraverseImplicitCastExpr(ImplicitCastExpr *castE) {
- if (BlockDeclRefExpr *
- ref = dyn_cast<BlockDeclRefExpr>(castE->getSubExpr())) {
+ if (DeclRefExpr *
+ ref = dyn_cast<DeclRefExpr>(castE->getSubExpr())) {
if (ref->getDecl() == Var) {
if (castE->getCastKind() == CK_LValueToRValue)
return true; // Using the value of the variable.
if (castE->getCastKind() == CK_NoOp && castE->isLValue() &&
- Var->getASTContext().getLangOptions().CPlusPlus)
+ Var->getASTContext().getLangOpts().CPlusPlus)
return true; // Binding to const C++ reference.
}
}
@@ -62,7 +62,7 @@ class RootBlockObjCVarRewriter :
return base::TraverseImplicitCastExpr(castE);
}
- bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
if (E->getDecl() == Var)
return false; // The reference of the variable, and not just its value,
// is needed.
@@ -71,7 +71,9 @@ class RootBlockObjCVarRewriter :
};
public:
- RootBlockObjCVarRewriter(MigrationPass &pass) : Pass(pass) { }
+ RootBlockObjCVarRewriter(MigrationPass &pass,
+ llvm::DenseSet<VarDecl *> &VarsToChange)
+ : Pass(pass), VarsToChange(VarsToChange) { }
bool VisitBlockDecl(BlockDecl *block) {
SmallVector<VarDecl *, 4> BlockVars;
@@ -80,7 +82,6 @@ public:
I = block->capture_begin(), E = block->capture_end(); I != E; ++I) {
VarDecl *var = I->getVariable();
if (I->isByRef() &&
- !isAlreadyChecked(var) &&
var->getType()->isObjCObjectPointerType() &&
isImplicitStrong(var->getType())) {
BlockVars.push_back(var);
@@ -89,32 +90,19 @@ public:
for (unsigned i = 0, e = BlockVars.size(); i != e; ++i) {
VarDecl *var = BlockVars[i];
- CheckedVars.insert(var);
BlockVarChecker checker(var);
bool onlyValueOfVarIsNeeded = checker.TraverseStmt(block->getBody());
- if (onlyValueOfVarIsNeeded) {
- BlocksAttr *attr = var->getAttr<BlocksAttr>();
- if(!attr)
- continue;
- bool useWeak = canApplyWeak(Pass.Ctx, var->getType());
- SourceManager &SM = Pass.Ctx.getSourceManager();
- Transaction Trans(Pass.TA);
- Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()),
- "__block",
- useWeak ? "__weak" : "__unsafe_unretained");
- }
-
+ if (onlyValueOfVarIsNeeded)
+ VarsToChange.insert(var);
+ else
+ VarsToChange.erase(var);
}
return true;
}
private:
- bool isAlreadyChecked(VarDecl *VD) {
- return CheckedVars.count(VD);
- }
-
bool isImplicitStrong(QualType ty) {
if (isa<AttributedType>(ty.getTypePtr()))
return false;
@@ -124,19 +112,39 @@ private:
class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> {
MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> &VarsToChange;
public:
- BlockObjCVarRewriter(MigrationPass &pass) : Pass(pass) { }
+ BlockObjCVarRewriter(MigrationPass &pass,
+ llvm::DenseSet<VarDecl *> &VarsToChange)
+ : Pass(pass), VarsToChange(VarsToChange) { }
bool TraverseBlockDecl(BlockDecl *block) {
- RootBlockObjCVarRewriter(Pass).TraverseDecl(block);
+ RootBlockObjCVarRewriter(Pass, VarsToChange).TraverseDecl(block);
return true;
}
};
} // anonymous namespace
-void trans::rewriteBlockObjCVariable(MigrationPass &pass) {
- BlockObjCVarRewriter trans(pass);
- trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) {
+ MigrationPass &Pass = BodyCtx.getMigrationContext().Pass;
+ llvm::DenseSet<VarDecl *> VarsToChange;
+
+ BlockObjCVarRewriter trans(Pass, VarsToChange);
+ trans.TraverseStmt(BodyCtx.getTopStmt());
+
+ for (llvm::DenseSet<VarDecl *>::iterator
+ I = VarsToChange.begin(), E = VarsToChange.end(); I != E; ++I) {
+ VarDecl *var = *I;
+ BlocksAttr *attr = var->getAttr<BlocksAttr>();
+ if(!attr)
+ continue;
+ bool useWeak = canApplyWeak(Pass.Ctx, var->getType());
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ Transaction Trans(Pass.TA);
+ Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()),
+ "__block",
+ useWeak ? "__weak" : "__unsafe_unretained");
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index 3ad05e6..0fb7141 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -196,35 +196,60 @@ static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx,
return true;
}
-static void removeDeallocMethod(MigrationPass &pass) {
+static void cleanupDeallocOrFinalize(MigrationPass &pass) {
ASTContext &Ctx = pass.Ctx;
TransformActions &TA = pass.TA;
DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
impl_iterator;
for (impl_iterator I = impl_iterator(DC->decls_begin()),
E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ ObjCMethodDecl *DeallocM = 0;
+ ObjCMethodDecl *FinalizeM = 0;
for (ObjCImplementationDecl::instmeth_iterator
MI = (*I)->instmeth_begin(),
ME = (*I)->instmeth_end(); MI != ME; ++MI) {
ObjCMethodDecl *MD = *MI;
+ if (!MD->hasBody())
+ continue;
+
if (MD->getMethodFamily() == OMF_dealloc) {
- if (MD->hasBody() &&
- isBodyEmpty(MD->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
- Transaction Trans(TA);
- TA.remove(MD->getSourceRange());
- }
- break;
+ DeallocM = MD;
+ } else if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ FinalizeM = MD;
+ }
+ }
+
+ if (DeallocM) {
+ if (isBodyEmpty(DeallocM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(DeallocM->getSourceRange());
+ }
+
+ if (FinalizeM) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ }
+
+ } else if (FinalizeM) {
+ if (isBodyEmpty(FinalizeM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ } else {
+ Transaction Trans(TA);
+ TA.replaceText(FinalizeM->getSelectorStartLoc(), "finalize", "dealloc");
}
}
}
}
-void trans::removeEmptyStatementsAndDealloc(MigrationPass &pass) {
+void trans::removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass) {
EmptyStatementsRemover(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
- removeDeallocMethod(pass);
+ cleanupDeallocOrFinalize(pass);
for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) {
Transaction Trans(pass.TA);
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
new file mode 100644
index 0000000..9f6066e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -0,0 +1,358 @@
+//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/TinyPtrVector.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+/// \brief Collects all the places where GC attributes __strong/__weak occur.
+class GCAttrsCollector : public RecursiveASTVisitor<GCAttrsCollector> {
+ MigrationContext &MigrateCtx;
+ bool FullyMigratable;
+ std::vector<ObjCPropertyDecl *> &AllProps;
+
+ typedef RecursiveASTVisitor<GCAttrsCollector> base;
+public:
+ GCAttrsCollector(MigrationContext &ctx,
+ std::vector<ObjCPropertyDecl *> &AllProps)
+ : MigrateCtx(ctx), FullyMigratable(false),
+ AllProps(AllProps) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ handleAttr(TL);
+ return true;
+ }
+
+ bool TraverseDecl(Decl *D) {
+ if (!D || D->isImplicit())
+ return true;
+
+ SaveAndRestore<bool> Save(FullyMigratable, isMigratable(D));
+
+ if (ObjCPropertyDecl *PropD = dyn_cast<ObjCPropertyDecl>(D)) {
+ lookForAttribute(PropD, PropD->getTypeSourceInfo());
+ AllProps.push_back(PropD);
+ } else if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ lookForAttribute(DD, DD->getTypeSourceInfo());
+ }
+ return base::TraverseDecl(D);
+ }
+
+ void lookForAttribute(Decl *D, TypeSourceInfo *TInfo) {
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ while (TL) {
+ if (const QualifiedTypeLoc *QL = dyn_cast<QualifiedTypeLoc>(&TL)) {
+ TL = QL->getUnqualifiedLoc();
+ } else if (const AttributedTypeLoc *
+ Attr = dyn_cast<AttributedTypeLoc>(&TL)) {
+ if (handleAttr(*Attr, D))
+ break;
+ TL = Attr->getModifiedLoc();
+ } else if (const ArrayTypeLoc *Arr = dyn_cast<ArrayTypeLoc>(&TL)) {
+ TL = Arr->getElementLoc();
+ } else if (const PointerTypeLoc *PT = dyn_cast<PointerTypeLoc>(&TL)) {
+ TL = PT->getPointeeLoc();
+ } else if (const ReferenceTypeLoc *RT = dyn_cast<ReferenceTypeLoc>(&TL))
+ TL = RT->getPointeeLoc();
+ else
+ break;
+ }
+ }
+
+ bool handleAttr(AttributedTypeLoc TL, Decl *D = 0) {
+ if (TL.getAttrKind() != AttributedType::attr_objc_ownership)
+ return false;
+
+ SourceLocation Loc = TL.getAttrNameLoc();
+ unsigned RawLoc = Loc.getRawEncoding();
+ if (MigrateCtx.AttrSet.count(RawLoc))
+ return true;
+
+ ASTContext &Ctx = MigrateCtx.Pass.Ctx;
+ SourceManager &SM = Ctx.getSourceManager();
+ if (Loc.isMacroID())
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SmallString<32> Buf;
+ bool Invalid = false;
+ StringRef Spell = Lexer::getSpelling(
+ SM.getSpellingLoc(TL.getAttrEnumOperandLoc()),
+ Buf, SM, Ctx.getLangOpts(), &Invalid);
+ if (Invalid)
+ return false;
+ MigrationContext::GCAttrOccurrence::AttrKind Kind;
+ if (Spell == "strong")
+ Kind = MigrationContext::GCAttrOccurrence::Strong;
+ else if (Spell == "weak")
+ Kind = MigrationContext::GCAttrOccurrence::Weak;
+ else
+ return false;
+
+ MigrateCtx.AttrSet.insert(RawLoc);
+ MigrateCtx.GCAttrs.push_back(MigrationContext::GCAttrOccurrence());
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs.back();
+
+ Attr.Kind = Kind;
+ Attr.Loc = Loc;
+ Attr.ModifiedType = TL.getModifiedLoc().getType();
+ Attr.Dcl = D;
+ Attr.FullyMigratable = FullyMigratable;
+ return true;
+ }
+
+ bool isMigratable(Decl *D) {
+ if (isa<TranslationUnitDecl>(D))
+ return false;
+
+ if (isInMainFile(D))
+ return true;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->hasBody();
+
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D))
+ return hasObjCImpl(ContD);
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ for (CXXRecordDecl::method_iterator
+ MI = RD->method_begin(), ME = RD->method_end(); MI != ME; ++MI) {
+ if ((*MI)->isOutOfLine())
+ return true;
+ }
+ return false;
+ }
+
+ return isMigratable(cast<Decl>(D->getDeclContext()));
+ }
+
+ static bool hasObjCImpl(Decl *D) {
+ if (!D)
+ return false;
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D)) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ContD))
+ return ID->getImplementation() != 0;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContD))
+ return CD->getImplementation() != 0;
+ if (isa<ObjCImplDecl>(ContD))
+ return true;
+ return false;
+ }
+ return false;
+ }
+
+ bool isInMainFile(Decl *D) {
+ if (!D)
+ return false;
+
+ for (Decl::redecl_iterator
+ I = D->redecls_begin(), E = D->redecls_end(); I != E; ++I)
+ if (!isInMainFile((*I)->getLocation()))
+ return false;
+
+ return true;
+ }
+
+ bool isInMainFile(SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return false;
+
+ SourceManager &SM = MigrateCtx.Pass.Ctx.getSourceManager();
+ return SM.isInFileID(SM.getExpansionLoc(Loc), SM.getMainFileID());
+ }
+};
+
+} // anonymous namespace
+
+static void errorForGCAttrsOnNonObjC(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.FullyMigratable && Attr.Dcl) {
+ if (Attr.ModifiedType.isNull())
+ continue;
+ if (!Attr.ModifiedType->isObjCRetainableType()) {
+ TA.reportError("GC managed memory will become unmanaged in ARC",
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+static void checkWeakGCAttrs(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.Kind == MigrationContext::GCAttrOccurrence::Weak) {
+ if (Attr.ModifiedType.isNull() ||
+ !Attr.ModifiedType->isObjCRetainableType())
+ continue;
+ if (!canApplyWeak(MigrateCtx.Pass.Ctx, Attr.ModifiedType,
+ /*AllowOnUnknownClass=*/true)) {
+ Transaction Trans(TA);
+ if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc.getRawEncoding()))
+ TA.replaceText(Attr.Loc, "__weak", "__unsafe_unretained");
+ TA.clearDiagnostic(diag::err_arc_weak_no_runtime,
+ diag::err_arc_unsupported_weak_class,
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+
+static void checkAllAtProps(MigrationContext &MigrateCtx,
+ SourceLocation AtLoc,
+ IndivPropsTy &IndProps) {
+ if (IndProps.empty())
+ return;
+
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ QualType T = (*PI)->getType();
+ if (T.isNull() || !T->isObjCRetainableType())
+ return;
+ }
+
+ SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs;
+ bool hasWeak = false, hasStrong = false;
+ ObjCPropertyDecl::PropertyAttributeKind
+ Attrs = ObjCPropertyDecl::OBJC_PR_noattr;
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ ObjCPropertyDecl *PD = *PI;
+ Attrs = PD->getPropertyAttributesAsWritten();
+ TypeSourceInfo *TInfo = PD->getTypeSourceInfo();
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ if (AttributedTypeLoc *ATL = dyn_cast<AttributedTypeLoc>(&TL)) {
+ ATLs.push_back(std::make_pair(*ATL, PD));
+ if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ hasWeak = true;
+ } else if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Strong)
+ hasStrong = true;
+ else
+ return;
+ }
+ }
+ if (ATLs.empty())
+ return;
+ if (hasWeak && hasStrong)
+ return;
+
+ TransformActions &TA = MigrateCtx.Pass.TA;
+ Transaction Trans(TA);
+
+ if (GCAttrsCollector::hasObjCImpl(
+ cast<Decl>(IndProps.front()->getDeclContext()))) {
+ if (hasWeak)
+ MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding());
+
+ } else {
+ StringRef toAttr = "strong";
+ if (hasWeak) {
+ if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(),
+ /*AllowOnUnkwownClass=*/true))
+ toAttr = "weak";
+ else
+ toAttr = "unsafe_unretained";
+ }
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc);
+ else
+ MigrateCtx.addPropertyAttribute(toAttr, AtLoc);
+ }
+
+ for (unsigned i = 0, e = ATLs.size(); i != e; ++i) {
+ SourceLocation Loc = ATLs[i].first.getAttrNameLoc();
+ if (Loc.isMacroID())
+ Loc = MigrateCtx.Pass.Ctx.getSourceManager()
+ .getImmediateExpansionRange(Loc).first;
+ TA.remove(Loc);
+ TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc);
+ TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership,
+ ATLs[i].second->getLocation());
+ MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding());
+ }
+}
+
+static void checkAllProps(MigrationContext &MigrateCtx,
+ std::vector<ObjCPropertyDecl *> &AllProps) {
+ typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+ llvm::DenseMap<unsigned, IndivPropsTy> AtProps;
+
+ for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
+ ObjCPropertyDecl *PD = AllProps[i];
+ if (PD->getPropertyAttributesAsWritten() &
+ (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_readonly)) {
+ SourceLocation AtLoc = PD->getAtLoc();
+ if (AtLoc.isInvalid())
+ continue;
+ unsigned RawAt = AtLoc.getRawEncoding();
+ AtProps[RawAt].push_back(PD);
+ }
+ }
+
+ for (llvm::DenseMap<unsigned, IndivPropsTy>::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation AtLoc = SourceLocation::getFromRawEncoding(I->first);
+ IndivPropsTy &IndProps = I->second;
+ checkAllAtProps(MigrateCtx, AtLoc, IndProps);
+ }
+}
+
+void GCAttrsTraverser::traverseTU(MigrationContext &MigrateCtx) {
+ std::vector<ObjCPropertyDecl *> AllProps;
+ GCAttrsCollector(MigrateCtx, AllProps).TraverseDecl(
+ MigrateCtx.Pass.Ctx.getTranslationUnitDecl());
+
+ errorForGCAttrsOnNonObjC(MigrateCtx);
+ checkAllProps(MigrateCtx, AllProps);
+ checkWeakGCAttrs(MigrateCtx);
+}
+
+void MigrationContext::dumpGCAttrs() {
+ llvm::errs() << "\n################\n";
+ for (unsigned i = 0, e = GCAttrs.size(); i != e; ++i) {
+ GCAttrOccurrence &Attr = GCAttrs[i];
+ llvm::errs() << "KIND: "
+ << (Attr.Kind == GCAttrOccurrence::Strong ? "strong" : "weak");
+ llvm::errs() << "\nLOC: ";
+ Attr.Loc.dump(Pass.Ctx.getSourceManager());
+ llvm::errs() << "\nTYPE: ";
+ Attr.ModifiedType.dump();
+ if (Attr.Dcl) {
+ llvm::errs() << "DECL:\n";
+ Attr.Dcl->dump();
+ } else {
+ llvm::errs() << "DECL: NONE";
+ }
+ llvm::errs() << "\nMIGRATABLE: " << Attr.FullyMigratable;
+ llvm::errs() << "\n----------------\n";
+ }
+ llvm::errs() << "\n################\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
new file mode 100644
index 0000000..1be9020
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
@@ -0,0 +1,84 @@
+//===--- TransGCCalls.cpp - Tranformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class GCCollectableCallsChecker :
+ public RecursiveASTVisitor<GCCollectableCallsChecker> {
+ MigrationContext &MigrateCtx;
+ ParentMap &PMap;
+ IdentifierInfo *NSMakeCollectableII;
+ IdentifierInfo *CFMakeCollectableII;
+
+public:
+ GCCollectableCallsChecker(MigrationContext &ctx, ParentMap &map)
+ : MigrateCtx(ctx), PMap(map) {
+ IdentifierTable &Ids = MigrateCtx.Pass.Ctx.Idents;
+ NSMakeCollectableII = &Ids.get("NSMakeCollectable");
+ CFMakeCollectableII = &Ids.get("CFMakeCollectable");
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitCallExpr(CallExpr *E) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ if (MigrateCtx.isGCOwnedNonObjC(E->getType())) {
+ if (MigrateCtx.Pass.noNSAllocReallocError())
+ TA.reportWarning("call returns pointer to GC managed memory; "
+ "it will become unmanaged in ARC",
+ E->getLocStart(), E->getSourceRange());
+ else
+ TA.reportError("call returns pointer to GC managed memory; "
+ "it will become unmanaged in ARC",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
+
+ Expr *CEE = E->getCallee()->IgnoreParenImpCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl())) {
+ if (!FD->getDeclContext()->getRedeclContext()->isFileContext())
+ return true;
+
+ if (FD->getIdentifier() == NSMakeCollectableII) {
+ Transaction Trans(TA);
+ TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ diag::err_ovl_deleted_call, // ObjC++
+ DRE->getSourceRange());
+ TA.replace(DRE->getSourceRange(), "CFBridgingRelease");
+
+ } else if (FD->getIdentifier() == CFMakeCollectableII) {
+ TA.reportError("CFMakeCollectable will leak the object that it "
+ "receives in ARC", DRE->getLocation(),
+ DRE->getSourceRange());
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void GCCollectableCallsTraverser::traverseBody(BodyContext &BodyCtx) {
+ GCCollectableCallsChecker(BodyCtx.getMigrationContext(),
+ BodyCtx.getParentMap())
+ .TraverseStmt(BodyCtx.getTopStmt());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
index ca845b6..cc85fe2 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
@@ -44,8 +44,17 @@ using namespace trans;
namespace {
class PropertiesRewriter {
+ MigrationContext &MigrateCtx;
MigrationPass &Pass;
ObjCImplementationDecl *CurImplD;
+
+ enum PropActionKind {
+ PropAction_None,
+ PropAction_RetainReplacedWithStrong,
+ PropAction_AssignRemoved,
+ PropAction_AssignRewritten,
+ PropAction_MaybeAddWeakOrUnsafe
+ };
struct PropData {
ObjCPropertyDecl *PropD;
@@ -58,9 +67,27 @@ class PropertiesRewriter {
typedef SmallVector<PropData, 2> PropsTy;
typedef std::map<unsigned, PropsTy> AtPropDeclsTy;
AtPropDeclsTy AtProps;
+ llvm::DenseMap<IdentifierInfo *, PropActionKind> ActionOnProp;
public:
- PropertiesRewriter(MigrationPass &pass) : Pass(pass) { }
+ explicit PropertiesRewriter(MigrationContext &MigrateCtx)
+ : MigrateCtx(MigrateCtx), Pass(MigrateCtx.Pass) { }
+
+ static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps,
+ AtPropDeclsTy *PrevAtProps = 0) {
+ for (ObjCInterfaceDecl::prop_iterator
+ propI = D->prop_begin(),
+ propE = D->prop_end(); propI != propE; ++propI) {
+ if (propI->getAtLoc().isInvalid())
+ continue;
+ unsigned RawLoc = propI->getAtLoc().getRawEncoding();
+ if (PrevAtProps)
+ if (PrevAtProps->find(RawLoc) != PrevAtProps->end())
+ continue;
+ PropsTy &props = AtProps[RawLoc];
+ props.push_back(*propI);
+ }
+ }
void doTransform(ObjCImplementationDecl *D) {
CurImplD = D;
@@ -68,14 +95,7 @@ public:
if (!iface)
return;
- for (ObjCInterfaceDecl::prop_iterator
- propI = iface->prop_begin(),
- propE = iface->prop_end(); propI != propE; ++propI) {
- if (propI->getAtLoc().isInvalid())
- continue;
- PropsTy &props = AtProps[propI->getAtLoc().getRawEncoding()];
- props.push_back(*propI);
- }
+ collectProperties(iface, AtProps);
typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl>
prop_impl_iterator;
@@ -110,19 +130,66 @@ public:
I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
PropsTy &props = I->second;
- QualType ty = getPropertyType(props);
- if (!ty->isObjCRetainableType())
+ if (!getPropertyType(props)->isObjCRetainableType())
continue;
- if (hasIvarWithExplicitOwnership(props))
+ if (hasIvarWithExplicitARCOwnership(props))
continue;
Transaction Trans(Pass.TA);
rewriteProperty(props, atLoc);
}
+
+ AtPropDeclsTy AtExtProps;
+ // Look through extensions.
+ for (ObjCCategoryDecl *Cat = iface->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory())
+ if (Cat->IsClassExtension())
+ collectProperties(Cat, AtExtProps, &AtProps);
+
+ for (AtPropDeclsTy::iterator
+ I = AtExtProps.begin(), E = AtExtProps.end(); I != E; ++I) {
+ SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ PropsTy &props = I->second;
+ Transaction Trans(Pass.TA);
+ doActionForExtensionProp(props, atLoc);
+ }
}
private:
- void rewriteProperty(PropsTy &props, SourceLocation atLoc) const {
+ void doPropAction(PropActionKind kind,
+ PropsTy &props, SourceLocation atLoc,
+ bool markAction = true) {
+ if (markAction)
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ ActionOnProp[I->PropD->getIdentifier()] = kind;
+
+ switch (kind) {
+ case PropAction_None:
+ return;
+ case PropAction_RetainReplacedWithStrong: {
+ StringRef toAttr = "strong";
+ MigrateCtx.rewritePropertyAttribute("retain", toAttr, atLoc);
+ return;
+ }
+ case PropAction_AssignRemoved:
+ return removeAssignForDefaultStrong(props, atLoc);
+ case PropAction_AssignRewritten:
+ return rewriteAssign(props, atLoc);
+ case PropAction_MaybeAddWeakOrUnsafe:
+ return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc);
+ }
+ }
+
+ void doActionForExtensionProp(PropsTy &props, SourceLocation atLoc) {
+ llvm::DenseMap<IdentifierInfo *, PropActionKind>::iterator I;
+ I = ActionOnProp.find(props[0].PropD->getIdentifier());
+ if (I == ActionOnProp.end())
+ return;
+
+ doPropAction(I->second, props, atLoc, false);
+ }
+
+ void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
@@ -132,79 +199,82 @@ private:
return;
if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
- rewriteAttribute("retain", "strong", atLoc);
- return;
+ // strong is the default.
+ return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc);
}
+ bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props);
+
if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) {
- if (hasIvarAssignedAPlusOneObject(props)) {
- rewriteAttribute("assign", "strong", atLoc);
- return;
- }
- return rewriteAssign(props, atLoc);
+ if (HasIvarAssignedAPlusOneObject)
+ return doPropAction(PropAction_AssignRemoved, props, atLoc);
+ return doPropAction(PropAction_AssignRewritten, props, atLoc);
}
- if (hasIvarAssignedAPlusOneObject(props))
- return maybeAddStrongAttr(props, atLoc);
+ if (HasIvarAssignedAPlusOneObject ||
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)))
+ return; // 'strong' by default.
- return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc);
+ return doPropAction(PropAction_MaybeAddWeakOrUnsafe, props, atLoc);
}
- void rewriteAssign(PropsTy &props, SourceLocation atLoc) const {
- bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props));
-
- bool rewroteAttr = rewriteAttribute("assign",
- canUseWeak ? "weak" : "unsafe_unretained",
- atLoc);
- if (!rewroteAttr)
- canUseWeak = false;
+ void removeAssignForDefaultStrong(PropsTy &props,
+ SourceLocation atLoc) const {
+ removeAttribute("retain", atLoc);
+ if (!removeAttribute("assign", atLoc))
+ return;
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
- if (isUserDeclared(I->IvarD))
- Pass.TA.insert(I->IvarD->getLocation(),
- canUseWeak ? "__weak " : "__unsafe_unretained ");
if (I->ImplD)
Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
I->ImplD->getLocation());
}
}
- void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props,
- SourceLocation atLoc) const {
- ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+ void rewriteAssign(PropsTy &props, SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "strong" :
+ (canUseWeak ? "weak" : "unsafe_unretained");
- bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props));
- if (!(propAttrs & ObjCPropertyDecl::OBJC_PR_readonly) ||
- !hasAllIvarsBacked(props)) {
- bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained",
- atLoc);
- if (!addedAttr)
- canUseWeak = false;
- }
+ bool rewroteAttr = rewriteAttribute("assign", toWhich, atLoc);
+ if (!rewroteAttr)
+ canUseWeak = false;
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
- if (isUserDeclared(I->IvarD))
- Pass.TA.insert(I->IvarD->getLocation(),
- canUseWeak ? "__weak " : "__unsafe_unretained ");
- if (I->ImplD) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak) {
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "__strong " :
+ (canUseWeak ? "__weak " : "__unsafe_unretained ");
+ Pass.TA.insert(I->IvarD->getLocation(), toWhich);
+ }
+ }
+ if (I->ImplD)
Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
I->ImplD->getLocation());
- Pass.TA.clearDiagnostic(
- diag::err_arc_objc_property_default_assign_on_object,
- I->ImplD->getLocation());
- }
}
}
- void maybeAddStrongAttr(PropsTy &props, SourceLocation atLoc) const {
- ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+ void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props,
+ SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
- if (!(propAttrs & ObjCPropertyDecl::OBJC_PR_readonly) ||
- !hasAllIvarsBacked(props)) {
- addAttribute("strong", atLoc);
- }
+ bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained",
+ atLoc);
+ if (!addedAttr)
+ canUseWeak = false;
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
+ Pass.TA.insert(I->IvarD->getLocation(),
+ canUseWeak ? "__weak " : "__unsafe_unretained ");
+ }
if (I->ImplD) {
Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
I->ImplD->getLocation());
@@ -215,108 +285,17 @@ private:
}
}
+ bool removeAttribute(StringRef fromAttr, SourceLocation atLoc) const {
+ return MigrateCtx.removePropertyAttribute(fromAttr, atLoc);
+ }
+
bool rewriteAttribute(StringRef fromAttr, StringRef toAttr,
SourceLocation atLoc) const {
- if (atLoc.isMacroID())
- return false;
-
- SourceManager &SM = Pass.Ctx.getSourceManager();
-
- // Break down the source location.
- std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
-
- // Try to load the file buffer.
- bool invalidTemp = false;
- StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
- if (invalidTemp)
- return false;
-
- const char *tokenBegin = file.data() + locInfo.second;
-
- // Lex from the start of the given location.
- Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
- Pass.Ctx.getLangOptions(),
- file.begin(), tokenBegin, file.end());
- Token tok;
- lexer.LexFromRawLexer(tok);
- if (tok.isNot(tok::at)) return false;
- lexer.LexFromRawLexer(tok);
- if (tok.isNot(tok::raw_identifier)) return false;
- if (StringRef(tok.getRawIdentifierData(), tok.getLength())
- != "property")
- return false;
- lexer.LexFromRawLexer(tok);
- if (tok.isNot(tok::l_paren)) return false;
-
- lexer.LexFromRawLexer(tok);
- if (tok.is(tok::r_paren))
- return false;
-
- while (1) {
- if (tok.isNot(tok::raw_identifier)) return false;
- StringRef ident(tok.getRawIdentifierData(), tok.getLength());
- if (ident == fromAttr) {
- Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr);
- return true;
- }
-
- do {
- lexer.LexFromRawLexer(tok);
- } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren));
- if (tok.is(tok::r_paren))
- break;
- lexer.LexFromRawLexer(tok);
- }
-
- return false;
+ return MigrateCtx.rewritePropertyAttribute(fromAttr, toAttr, atLoc);
}
bool addAttribute(StringRef attr, SourceLocation atLoc) const {
- if (atLoc.isMacroID())
- return false;
-
- SourceManager &SM = Pass.Ctx.getSourceManager();
-
- // Break down the source location.
- std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
-
- // Try to load the file buffer.
- bool invalidTemp = false;
- StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
- if (invalidTemp)
- return false;
-
- const char *tokenBegin = file.data() + locInfo.second;
-
- // Lex from the start of the given location.
- Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
- Pass.Ctx.getLangOptions(),
- file.begin(), tokenBegin, file.end());
- Token tok;
- lexer.LexFromRawLexer(tok);
- if (tok.isNot(tok::at)) return false;
- lexer.LexFromRawLexer(tok);
- if (tok.isNot(tok::raw_identifier)) return false;
- if (StringRef(tok.getRawIdentifierData(), tok.getLength())
- != "property")
- return false;
- lexer.LexFromRawLexer(tok);
-
- if (tok.isNot(tok::l_paren)) {
- Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") ");
- return true;
- }
-
- lexer.LexFromRawLexer(tok);
- if (tok.is(tok::r_paren)) {
- Pass.TA.insert(tok.getLocation(), attr);
- return true;
- }
-
- if (tok.isNot(tok::raw_identifier)) return false;
-
- Pass.TA.insert(tok.getLocation(), std::string(attr) + ", ");
- return true;
+ return MigrateCtx.addPropertyAttribute(attr, atLoc);
}
class PlusOneAssign : public RecursiveASTVisitor<PlusOneAssign> {
@@ -358,7 +337,10 @@ private:
return false;
}
- bool hasIvarWithExplicitOwnership(PropsTy &props) const {
+ bool hasIvarWithExplicitARCOwnership(PropsTy &props) const {
+ if (Pass.isGCMigration())
+ return false;
+
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
if (isUserDeclared(I->IvarD)) {
if (isa<AttributedType>(I->IvarD->getType()))
@@ -380,17 +362,26 @@ private:
return true;
}
+ // \brief Returns true if all declarations in the @property have GC __weak.
+ bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const {
+ if (!Pass.isGCMigration())
+ return false;
+ if (props.empty())
+ return false;
+ return MigrateCtx.AtPropsWeak.count(atLoc.getRawEncoding());
+ }
+
bool isUserDeclared(ObjCIvarDecl *ivarD) const {
return ivarD && !ivarD->getSynthesize();
}
QualType getPropertyType(PropsTy &props) const {
assert(!props.empty());
- QualType ty = props[0].PropD->getType();
+ QualType ty = props[0].PropD->getType().getUnqualifiedType();
#ifndef NDEBUG
for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
- assert(ty == I->PropD->getType());
+ assert(ty == I->PropD->getType().getUnqualifiedType());
#endif
return ty;
@@ -411,21 +402,10 @@ private:
}
};
-class ImplementationChecker :
- public RecursiveASTVisitor<ImplementationChecker> {
- MigrationPass &Pass;
-
-public:
- ImplementationChecker(MigrationPass &pass) : Pass(pass) { }
-
- bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) {
- PropertiesRewriter(Pass).doTransform(D);
- return true;
- }
-};
-
} // anonymous namespace
-void trans::rewriteProperties(MigrationPass &pass) {
- ImplementationChecker(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+void PropertyRewriteTraverser::traverseObjCImplementation(
+ ObjCImplementationContext &ImplCtx) {
+ PropertiesRewriter(ImplCtx.getMigrationContext())
+ .doTransform(ImplCtx.getImplementationDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index 394f848..11a6553 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -21,6 +21,8 @@
#include "Internals.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/AST/ParentMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
using namespace clang;
using namespace arcmt;
@@ -34,15 +36,17 @@ class RetainReleaseDeallocRemover :
MigrationPass &Pass;
ExprSet Removables;
- llvm::OwningPtr<ParentMap> StmtMap;
+ OwningPtr<ParentMap> StmtMap;
- Selector DelegateSel;
+ Selector DelegateSel, FinalizeSel;
public:
RetainReleaseDeallocRemover(MigrationPass &pass)
: Body(0), Pass(pass) {
DelegateSel =
Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("delegate"));
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
}
void transformBody(Stmt *body) {
@@ -55,6 +59,8 @@ public:
bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
switch (E->getMethodFamily()) {
default:
+ if (E->isInstanceMessage() && E->getSelector() == FinalizeSel)
+ break;
return true;
case OMF_autorelease:
if (isRemovable(E)) {
@@ -124,27 +130,105 @@ public:
Transaction Trans(Pass.TA);
clearDiagnostics(rec->getExprLoc());
- if (E->getMethodFamily() == OMF_release &&
- isRemovable(E) && isInAtFinally(E)) {
+ ObjCMessageExpr *Msg = E;
+ Expr *RecContainer = Msg;
+ SourceRange RecRange = rec->getSourceRange();
+ checkForGCDOrXPC(Msg, RecContainer, rec, RecRange);
+
+ if (Msg->getMethodFamily() == OMF_release &&
+ isRemovable(RecContainer) && isInAtFinally(RecContainer)) {
// Change the -release to "receiver = nil" in a finally to avoid a leak
// when an exception is thrown.
- Pass.TA.replace(E->getSourceRange(), rec->getSourceRange());
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
std::string str = " = ";
str += getNilString(Pass.Ctx);
- Pass.TA.insertAfterToken(rec->getLocEnd(), str);
+ Pass.TA.insertAfterToken(RecRange.getEnd(), str);
return true;
}
- if (!hasSideEffects(E, Pass.Ctx)) {
- if (tryRemoving(E))
+ if (!hasSideEffects(rec, Pass.Ctx)) {
+ if (tryRemoving(RecContainer))
return true;
}
- Pass.TA.replace(E->getSourceRange(), rec->getSourceRange());
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
return true;
}
private:
+ /// \brief Check if the retain/release is due to a GCD/XPC macro that are
+ /// defined as:
+ ///
+ /// #define dispatch_retain(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); (void)[_o retain]; })
+ /// #define dispatch_release(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); [_o release]; })
+ /// #define xpc_retain(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o retain]; })
+ /// #define xpc_release(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o release]; })
+ ///
+ /// and return the top container which is the StmtExpr and the macro argument
+ /// expression.
+ void checkForGCDOrXPC(ObjCMessageExpr *Msg, Expr *&RecContainer,
+ Expr *&Rec, SourceRange &RecRange) {
+ SourceLocation Loc = Msg->getExprLoc();
+ if (!Loc.isMacroID())
+ return;
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ StringRef MacroName = Lexer::getImmediateMacroName(Loc, SM,
+ Pass.Ctx.getLangOpts());
+ bool isGCDOrXPC = llvm::StringSwitch<bool>(MacroName)
+ .Case("dispatch_retain", true)
+ .Case("dispatch_release", true)
+ .Case("xpc_retain", true)
+ .Case("xpc_release", true)
+ .Default(false);
+ if (!isGCDOrXPC)
+ return;
+
+ StmtExpr *StmtE = 0;
+ Stmt *S = Msg;
+ while (S) {
+ if (StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
+ StmtE = SE;
+ break;
+ }
+ S = StmtMap->getParent(S);
+ }
+
+ if (!StmtE)
+ return;
+
+ Stmt::child_range StmtExprChild = StmtE->children();
+ if (!StmtExprChild)
+ return;
+ CompoundStmt *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild);
+ if (!CompS)
+ return;
+
+ Stmt::child_range CompStmtChild = CompS->children();
+ if (!CompStmtChild)
+ return;
+ DeclStmt *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild);
+ if (!DeclS)
+ return;
+ if (!DeclS->isSingleDecl())
+ return;
+ VarDecl *VD = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl());
+ if (!VD)
+ return;
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ RecContainer = StmtE;
+ Rec = Init->IgnoreParenImpCasts();
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec))
+ Rec = EWC->getSubExpr()->IgnoreParenImpCasts();
+ RecRange = Rec->getSourceRange();
+ if (SM.isMacroArgExpansion(RecRange.getBegin()))
+ RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin()));
+ if (SM.isMacroArgExpansion(RecRange.getEnd()))
+ RecRange.setEnd(SM.getImmediateSpellingLoc(RecRange.getEnd()));
+ }
+
void clearDiagnostics(SourceLocation loc) const {
Pass.TA.clearDiagnostic(diag::err_arc_illegal_explicit_message,
diag::err_unavailable,
@@ -156,12 +240,14 @@ private:
if (!E) return false;
E = E->IgnoreParenCasts();
+
+ // Also look through property-getter sugar.
+ if (PseudoObjectExpr *pseudoOp = dyn_cast<PseudoObjectExpr>(E))
+ E = pseudoOp->getResultExpr()->IgnoreImplicit();
+
if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
return (ME->isInstanceMessage() && ME->getSelector() == DelegateSel);
- if (ObjCPropertyRefExpr *propE = dyn_cast<ObjCPropertyRefExpr>(E))
- return propE->getGetterSelector() == DelegateSel;
-
return false;
}
@@ -211,7 +297,7 @@ private:
} // anonymous namespace
-void trans::removeRetainReleaseDealloc(MigrationPass &pass) {
+void trans::removeRetainReleaseDeallocFinalize(MigrationPass &pass) {
BodyTransform<RetainReleaseDeallocRemover> trans(pass);
trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 69fb2e8..48437c7 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -38,6 +38,7 @@
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace arcmt;
@@ -48,7 +49,7 @@ namespace {
class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
MigrationPass &Pass;
IdentifierInfo *SelfII;
- llvm::OwningPtr<ParentMap> StmtMap;
+ OwningPtr<ParentMap> StmtMap;
public:
UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass) {
@@ -128,6 +129,21 @@ private:
if (fname.endswith("Retain") ||
fname.find("Create") != StringRef::npos ||
fname.find("Copy") != StringRef::npos) {
+ // Do not migrate to couple of bridge transfer casts which
+ // cancel each other out. Leave it unchanged so error gets user
+ // attention instead.
+ if (FD->getName() == "CFRetain" &&
+ FD->getNumParams() == 1 &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->getLinkage() == ExternalLinkage) {
+ Expr *Arg = callE->getArg(0);
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ const Expr *sub = ICE->getSubExpr();
+ QualType T = sub->getType();
+ if (T->isObjCObjectPointerType())
+ return;
+ }
+ }
castToObjCObject(E, /*retained=*/true);
return;
}
@@ -179,7 +195,7 @@ private:
TA.insertAfterToken(CCE->getLParenLoc(), bridge);
} else {
SourceLocation insertLoc = E->getSubExpr()->getLocStart();
- llvm::SmallString<128> newCast;
+ SmallString<128> newCast;
newCast += '(';
newCast += bridge;
newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
@@ -236,7 +252,15 @@ private:
}
}
- if (ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E->getSubExpr())){
+ Expr *subExpr = E->getSubExpr();
+
+ // Look through pseudo-object expressions.
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(subExpr)) {
+ subExpr = pseudo->getResultExpr();
+ assert(subExpr && "no result for pseudo-object of non-void type?");
+ }
+
+ if (ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(subExpr)) {
if (implCE->getCastKind() == CK_ARCConsumeObject)
return rewriteToBridgedCast(E, OBC_BridgeRetained);
if (implCE->getCastKind() == CK_ARCReclaimReturnedObject)
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
index e2aa6ff..60ed32a 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -54,7 +54,11 @@ public:
Transaction Trans(Pass.TA);
Pass.TA.clearDiagnostic(diag::err_arc_unused_init_message,
ME->getExprLoc());
- Pass.TA.insert(ME->getExprLoc(), "self = ");
+ SourceRange ExprRange = ME->getSourceRange();
+ Pass.TA.insert(ExprRange.getBegin(), "if (!(self = ");
+ std::string retStr = ")) return ";
+ retStr += getNilString(Pass.Ctx);
+ Pass.TA.insertAfterToken(ExprRange.getEnd(), retStr);
}
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
index 1dbe811..d1f08aa 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -31,9 +31,13 @@ class ZeroOutInDeallocRemover :
llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*> SynthesizedProperties;
ImplicitParamDecl *SelfD;
ExprSet Removables;
+ Selector FinalizeSel;
public:
- ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(0) { }
+ ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(0) {
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
+ }
bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
ASTContext &Ctx = Pass.Ctx;
@@ -74,6 +78,15 @@ public:
return true;
}
+ bool VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
+ if (isZeroingPropIvar(POE) && isRemovable(POE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(POE);
+ }
+
+ return true;
+ }
+
bool VisitBinaryOperator(BinaryOperator *BOE) {
if (isZeroingPropIvar(BOE) && isRemovable(BOE)) {
Transaction Trans(Pass.TA);
@@ -84,7 +97,8 @@ public:
}
bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
- if (D->getMethodFamily() != OMF_dealloc)
+ if (D->getMethodFamily() != OMF_dealloc &&
+ !(D->isInstanceMethod() && D->getSelector() == FinalizeSel))
return true;
if (!D->hasBody())
return true;
@@ -137,17 +151,21 @@ private:
}
bool isZeroingPropIvar(Expr *E) {
- BinaryOperator *BOE = dyn_cast_or_null<BinaryOperator>(E);
- if (!BOE) return false;
+ E = E->IgnoreParens();
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
+ return isZeroingPropIvar(BO);
+ if (PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(E))
+ return isZeroingPropIvar(PO);
+ return false;
+ }
+ bool isZeroingPropIvar(BinaryOperator *BOE) {
if (BOE->getOpcode() == BO_Comma)
return isZeroingPropIvar(BOE->getLHS()) &&
isZeroingPropIvar(BOE->getRHS());
if (BOE->getOpcode() != BO_Assign)
- return false;
-
- ASTContext &Ctx = Pass.Ctx;
+ return false;
Expr *LHS = BOE->getLHS();
if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(LHS)) {
@@ -167,31 +185,44 @@ private:
if (!IvarBacksPropertySynthesis)
return false;
}
- else if (ObjCPropertyRefExpr *PropRefExp = dyn_cast<ObjCPropertyRefExpr>(LHS)) {
- // TODO: Using implicit property decl.
- if (PropRefExp->isImplicitProperty())
- return false;
- if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) {
- if (!SynthesizedProperties.count(PDecl))
- return false;
- }
- }
else
return false;
- Expr *RHS = BOE->getRHS();
- bool RHSIsNull = RHS->isNullPointerConstant(Ctx,
- Expr::NPC_ValueDependentIsNull);
- if (RHSIsNull)
+ return isZero(BOE->getRHS());
+ }
+
+ bool isZeroingPropIvar(PseudoObjectExpr *PO) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(PO->getSyntacticForm());
+ if (!BO) return false;
+ if (BO->getOpcode() != BO_Assign) return false;
+
+ ObjCPropertyRefExpr *PropRefExp =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParens());
+ if (!PropRefExp) return false;
+
+ // TODO: Using implicit property decl.
+ if (PropRefExp->isImplicitProperty())
+ return false;
+
+ if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) {
+ if (!SynthesizedProperties.count(PDecl))
+ return false;
+ }
+
+ return isZero(cast<OpaqueValueExpr>(BO->getRHS())->getSourceExpr());
+ }
+
+ bool isZero(Expr *E) {
+ if (E->isNullPointerConstant(Pass.Ctx, Expr::NPC_ValueDependentIsNull))
return true;
- return isZeroingPropIvar(RHS);
+ return isZeroingPropIvar(E);
}
};
} // anonymous namespace
-void trans::removeZeroOutPropsInDealloc(MigrationPass &pass) {
+void trans::removeZeroOutPropsInDeallocFinalize(MigrationPass &pass) {
ZeroOutInDeallocRemover trans(pass);
trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
index ec676e9..0ecfeb5 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
@@ -122,6 +122,8 @@ public:
ASTContext &ctx, Preprocessor &PP)
: CapturedDiags(capturedDiags), Ctx(ctx), PP(PP), IsInTransaction(false) { }
+ ASTContext &getASTContext() { return Ctx; }
+
void startTransaction();
bool commitTransaction();
void abortTransaction();
@@ -674,6 +676,12 @@ void TransformActions::reportError(StringRef error, SourceLocation loc,
SourceRange range) {
assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
"Errors should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
// FIXME: Use a custom category name to distinguish rewriter errors.
std::string rewriteErr = "[rewriter] ";
rewriteErr += error;
@@ -684,10 +692,35 @@ void TransformActions::reportError(StringRef error, SourceLocation loc,
ReportedErrors = true;
}
+void TransformActions::reportWarning(StringRef warning, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Warning should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriterWarn = "[rewriter] ";
+ rewriterWarn += warning;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Warning,
+ rewriterWarn);
+ Diags.Report(loc, diagID) << range;
+}
+
void TransformActions::reportNote(StringRef note, SourceLocation loc,
SourceRange range) {
assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
"Errors should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
// FIXME: Use a custom category name to distinguish rewriter errors.
std::string rewriteNote = "[rewriter] ";
rewriteNote += note;
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
index 4244faf..d342d1a 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
@@ -12,8 +12,6 @@
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/AST/ParentMap.h"
-#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/Lex/Lexer.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringSwitch.h"
@@ -24,62 +22,35 @@ using namespace clang;
using namespace arcmt;
using namespace trans;
+ASTTraverser::~ASTTraverser() { }
+
//===----------------------------------------------------------------------===//
// Helpers.
//===----------------------------------------------------------------------===//
-/// \brief True if the class is one that does not support weak.
-static bool isClassInWeakBlacklist(ObjCInterfaceDecl *cls) {
- if (!cls)
+bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass) {
+ if (!Ctx.getLangOpts().ObjCRuntimeHasWeak)
return false;
- bool inList = llvm::StringSwitch<bool>(cls->getName())
- .Case("NSColorSpace", true)
- .Case("NSFont", true)
- .Case("NSFontPanel", true)
- .Case("NSImage", true)
- .Case("NSLazyBrowserCell", true)
- .Case("NSWindow", true)
- .Case("NSWindowController", true)
- .Case("NSMenuView", true)
- .Case("NSPersistentUIWindowInfo", true)
- .Case("NSTableCellView", true)
- .Case("NSATSTypeSetter", true)
- .Case("NSATSGlyphStorage", true)
- .Case("NSLineFragmentRenderingContext", true)
- .Case("NSAttributeDictionary", true)
- .Case("NSParagraphStyle", true)
- .Case("NSTextTab", true)
- .Case("NSSimpleHorizontalTypesetter", true)
- .Case("_NSCachedAttributedString", true)
- .Case("NSStringDrawingTextStorage", true)
- .Case("NSTextView", true)
- .Case("NSSubTextStorage", true)
- .Default(false);
-
- if (inList)
- return true;
-
- return isClassInWeakBlacklist(cls->getSuperClass());
-}
-
-bool trans::canApplyWeak(ASTContext &Ctx, QualType type) {
- if (!Ctx.getLangOptions().ObjCRuntimeHasWeak)
+ QualType T = type;
+ if (T.isNull())
return false;
- QualType T = type;
+ // iOS is always safe to use 'weak'.
+ if (Ctx.getTargetInfo().getTriple().getOS() == llvm::Triple::IOS)
+ AllowOnUnknownClass = true;
+
while (const PointerType *ptr = T->getAs<PointerType>())
T = ptr->getPointeeType();
if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
- if (!Class || Class->getName() == "NSObject")
+ if (!AllowOnUnknownClass && (!Class || Class->getName() == "NSObject"))
return false; // id/NSObject is not safe for weak.
- if (Class->isForwardDecl())
+ if (!AllowOnUnknownClass && !Class->hasDefinition())
return false; // forward classes are not verifiable, therefore not safe.
if (Class->isArcWeakrefUnavailable())
return false;
- if (isClassInWeakBlacklist(Class))
- return false;
}
return true;
@@ -105,11 +76,10 @@ SourceLocation trans::findSemiAfterLocation(SourceLocation loc,
ASTContext &Ctx) {
SourceManager &SM = Ctx.getSourceManager();
if (loc.isMacroID()) {
- if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOptions()))
+ if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOpts(), &loc))
return SourceLocation();
- loc = SM.getExpansionRange(loc).second;
}
- loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOptions());
+ loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOpts());
// Break down the source location.
std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
@@ -124,7 +94,7 @@ SourceLocation trans::findSemiAfterLocation(SourceLocation loc,
// Lex from the start of the given location.
Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
- Ctx.getLangOptions(),
+ Ctx.getLangOpts(),
file.begin(), tokenBegin, file.end());
Token tok;
lexer.LexFromRawLexer(tok);
@@ -189,7 +159,6 @@ class ReferenceClear : public RecursiveASTVisitor<ReferenceClear> {
public:
ReferenceClear(ExprSet &refs) : Refs(refs) { }
bool VisitDeclRefExpr(DeclRefExpr *E) { Refs.erase(E); return true; }
- bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { Refs.erase(E); return true; }
};
class ReferenceCollector : public RecursiveASTVisitor<ReferenceCollector> {
@@ -205,12 +174,6 @@ public:
Refs.insert(E);
return true;
}
-
- bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- if (E->getDecl() == Dcl)
- Refs.insert(E);
- return true;
- }
};
class RemovablesCollector : public RecursiveASTVisitor<RemovablesCollector> {
@@ -290,27 +253,290 @@ void trans::collectRemovables(Stmt *S, ExprSet &exprs) {
}
//===----------------------------------------------------------------------===//
+// MigrationContext
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ASTTransform : public RecursiveASTVisitor<ASTTransform> {
+ MigrationContext &MigrateCtx;
+ typedef RecursiveASTVisitor<ASTTransform> base;
+
+public:
+ ASTTransform(MigrationContext &MigrateCtx) : MigrateCtx(MigrateCtx) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) {
+ ObjCImplementationContext ImplCtx(MigrateCtx, D);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseObjCImplementation(ImplCtx);
+
+ return base::TraverseObjCImplementationDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *rootS) {
+ if (!rootS)
+ return true;
+
+ BodyContext BodyCtx(MigrateCtx, rootS);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseBody(BodyCtx);
+
+ return true;
+ }
+};
+
+}
+
+MigrationContext::~MigrationContext() {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ delete *I;
+}
+
+bool MigrationContext::isGCOwnedNonObjC(QualType T) {
+ while (!T.isNull()) {
+ if (const AttributedType *AttrT = T->getAs<AttributedType>()) {
+ if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership)
+ return !AttrT->getModifiedType()->isObjCRetainableType();
+ }
+
+ if (T->isArrayType())
+ T = Pass.Ctx.getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return false;
+}
+
+bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr,
+ StringRef toAttr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::l_paren)) return false;
+
+ Token BeforeTok = tok;
+ Token AfterTok;
+ AfterTok.startToken();
+ SourceLocation AttrLoc;
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren))
+ return false;
+
+ while (1) {
+ if (tok.isNot(tok::raw_identifier)) return false;
+ StringRef ident(tok.getRawIdentifierData(), tok.getLength());
+ if (ident == fromAttr) {
+ if (!toAttr.empty()) {
+ Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr);
+ return true;
+ }
+ // We want to remove the attribute.
+ AttrLoc = tok.getLocation();
+ }
+
+ do {
+ lexer.LexFromRawLexer(tok);
+ if (AttrLoc.isValid() && AfterTok.is(tok::unknown))
+ AfterTok = tok;
+ } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren));
+ if (tok.is(tok::r_paren))
+ break;
+ if (AttrLoc.isInvalid())
+ BeforeTok = tok;
+ lexer.LexFromRawLexer(tok);
+ }
+
+ if (toAttr.empty() && AttrLoc.isValid() && AfterTok.isNot(tok::unknown)) {
+ // We want to remove the attribute.
+ if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::r_paren)) {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(),
+ AfterTok.getLocation()));
+ } else if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::comma)) {
+ Pass.TA.remove(SourceRange(AttrLoc, AfterTok.getLocation()));
+ } else {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(), AttrLoc));
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MigrationContext::addPropertyAttribute(StringRef attr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+
+ if (tok.isNot(tok::l_paren)) {
+ Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") ");
+ return true;
+ }
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren)) {
+ Pass.TA.insert(tok.getLocation(), attr);
+ return true;
+ }
+
+ if (tok.isNot(tok::raw_identifier)) return false;
+
+ Pass.TA.insert(tok.getLocation(), std::string(attr) + ", ");
+ return true;
+}
+
+void MigrationContext::traverse(TranslationUnitDecl *TU) {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ (*I)->traverseTU(*this);
+
+ ASTTransform(*this).TraverseDecl(TU);
+}
+
+static void GCRewriteFinalize(MigrationPass &pass) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ for (ObjCImplementationDecl::instmeth_iterator
+ MI = (*I)->instmeth_begin(),
+ ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ ObjCMethodDecl *MD = *MI;
+ if (!MD->hasBody())
+ continue;
+
+ if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ ObjCMethodDecl *FinalizeM = MD;
+ Transaction Trans(TA);
+ TA.insert(FinalizeM->getSourceRange().getBegin(),
+ "#if !__has_feature(objc_arc)\n");
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange());
+ const SourceManager &SM = pass.Ctx.getSourceManager();
+ const LangOptions &LangOpts = pass.Ctx.getLangOpts();
+ bool Invalid;
+ std::string str = "\n#endif\n";
+ str += Lexer::getSourceText(
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange()),
+ SM, LangOpts, &Invalid);
+ TA.insertAfterToken(FinalizeM->getSourceRange().getEnd(), str);
+
+ break;
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
// getAllTransformations.
//===----------------------------------------------------------------------===//
+static void traverseAST(MigrationPass &pass) {
+ MigrationContext MigrateCtx(pass);
+
+ if (pass.isGCMigration()) {
+ MigrateCtx.addTraverser(new GCCollectableCallsTraverser);
+ MigrateCtx.addTraverser(new GCAttrsTraverser());
+ }
+ MigrateCtx.addTraverser(new PropertyRewriteTraverser());
+ MigrateCtx.addTraverser(new BlockObjCVariableTraverser());
+
+ MigrateCtx.traverse(pass.Ctx.getTranslationUnitDecl());
+}
+
static void independentTransforms(MigrationPass &pass) {
rewriteAutoreleasePool(pass);
- rewriteProperties(pass);
- removeRetainReleaseDealloc(pass);
+ removeRetainReleaseDeallocFinalize(pass);
rewriteUnusedInitDelegate(pass);
- removeZeroOutPropsInDealloc(pass);
+ removeZeroOutPropsInDeallocFinalize(pass);
makeAssignARCSafe(pass);
rewriteUnbridgedCasts(pass);
- rewriteBlockObjCVariable(pass);
checkAPIUses(pass);
+ traverseAST(pass);
}
-std::vector<TransformFn> arcmt::getAllTransformations() {
+std::vector<TransformFn> arcmt::getAllTransformations(
+ LangOptions::GCMode OrigGCMode,
+ bool NoFinalizeRemoval) {
std::vector<TransformFn> transforms;
+ if (OrigGCMode == LangOptions::GCOnly && NoFinalizeRemoval)
+ transforms.push_back(GCRewriteFinalize);
transforms.push_back(independentTransforms);
// This depends on previous transformations removing various expressions.
- transforms.push_back(removeEmptyStatementsAndDealloc);
+ transforms.push_back(removeEmptyStatementsAndDeallocFinalize);
return transforms;
}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
index 5e4db56..445c3e5 100644
--- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/ParentMap.h"
#include "llvm/ADT/DenseSet.h"
namespace clang {
@@ -25,6 +26,8 @@ namespace arcmt {
namespace trans {
+ class MigrationContext;
+
//===----------------------------------------------------------------------===//
// Transformations.
//===----------------------------------------------------------------------===//
@@ -32,21 +35,124 @@ namespace trans {
void rewriteAutoreleasePool(MigrationPass &pass);
void rewriteUnbridgedCasts(MigrationPass &pass);
void makeAssignARCSafe(MigrationPass &pass);
-void removeRetainReleaseDealloc(MigrationPass &pass);
-void removeZeroOutPropsInDealloc(MigrationPass &pass);
-void rewriteProperties(MigrationPass &pass);
-void rewriteBlockObjCVariable(MigrationPass &pass);
+void removeRetainReleaseDeallocFinalize(MigrationPass &pass);
+void removeZeroOutPropsInDeallocFinalize(MigrationPass &pass);
void rewriteUnusedInitDelegate(MigrationPass &pass);
void checkAPIUses(MigrationPass &pass);
-void removeEmptyStatementsAndDealloc(MigrationPass &pass);
+void removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass);
+
+class BodyContext {
+ MigrationContext &MigrateCtx;
+ ParentMap PMap;
+ Stmt *TopStmt;
+
+public:
+ BodyContext(MigrationContext &MigrateCtx, Stmt *S)
+ : MigrateCtx(MigrateCtx), PMap(S), TopStmt(S) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ParentMap &getParentMap() { return PMap; }
+ Stmt *getTopStmt() { return TopStmt; }
+};
+
+class ObjCImplementationContext {
+ MigrationContext &MigrateCtx;
+ ObjCImplementationDecl *ImpD;
+
+public:
+ ObjCImplementationContext(MigrationContext &MigrateCtx,
+ ObjCImplementationDecl *D)
+ : MigrateCtx(MigrateCtx), ImpD(D) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ObjCImplementationDecl *getImplementationDecl() { return ImpD; }
+};
+
+class ASTTraverser {
+public:
+ virtual ~ASTTraverser();
+ virtual void traverseTU(MigrationContext &MigrateCtx) { }
+ virtual void traverseBody(BodyContext &BodyCtx) { }
+ virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) {}
+};
+
+class MigrationContext {
+ std::vector<ASTTraverser *> Traversers;
+
+public:
+ MigrationPass &Pass;
+
+ struct GCAttrOccurrence {
+ enum AttrKind { Weak, Strong } Kind;
+ SourceLocation Loc;
+ QualType ModifiedType;
+ Decl *Dcl;
+ /// \brief true if the attribute is owned, e.g. it is in a body and not just
+ /// in an interface.
+ bool FullyMigratable;
+ };
+ std::vector<GCAttrOccurrence> GCAttrs;
+ llvm::DenseSet<unsigned> AttrSet;
+ llvm::DenseSet<unsigned> RemovedAttrSet;
+
+ /// \brief Set of raw '@' locations for 'assign' properties group that contain
+ /// GC __weak.
+ llvm::DenseSet<unsigned> AtPropsWeak;
+
+ explicit MigrationContext(MigrationPass &pass) : Pass(pass) {}
+ ~MigrationContext();
+
+ typedef std::vector<ASTTraverser *>::iterator traverser_iterator;
+ traverser_iterator traversers_begin() { return Traversers.begin(); }
+ traverser_iterator traversers_end() { return Traversers.end(); }
+
+ void addTraverser(ASTTraverser *traverser) {
+ Traversers.push_back(traverser);
+ }
+
+ bool isGCOwnedNonObjC(QualType T);
+ bool removePropertyAttribute(StringRef fromAttr, SourceLocation atLoc) {
+ return rewritePropertyAttribute(fromAttr, StringRef(), atLoc);
+ }
+ bool rewritePropertyAttribute(StringRef fromAttr, StringRef toAttr,
+ SourceLocation atLoc);
+ bool addPropertyAttribute(StringRef attr, SourceLocation atLoc);
+
+ void traverse(TranslationUnitDecl *TU);
+
+ void dumpGCAttrs();
+};
+
+class PropertyRewriteTraverser : public ASTTraverser {
+public:
+ virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx);
+};
+
+class BlockObjCVariableTraverser : public ASTTraverser {
+public:
+ virtual void traverseBody(BodyContext &BodyCtx);
+};
+
+// GC transformations
+
+class GCAttrsTraverser : public ASTTraverser {
+public:
+ virtual void traverseTU(MigrationContext &MigrateCtx);
+};
+
+class GCCollectableCallsTraverser : public ASTTraverser {
+public:
+ virtual void traverseBody(BodyContext &BodyCtx);
+};
//===----------------------------------------------------------------------===//
// Helpers.
//===----------------------------------------------------------------------===//
/// \brief Determine whether we can add weak to the given type.
-bool canApplyWeak(ASTContext &Ctx, QualType type);
+bool canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass = false);
/// \brief 'Loc' is the end of a statement range. This returns the location
/// immediately after the semicolon following the statement.
diff --git a/contrib/llvm/tools/clang/lib/AST/APValue.cpp b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
index 6f63a32..a31b3c5 100644
--- a/contrib/llvm/tools/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
@@ -12,7 +12,11 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -20,49 +24,169 @@
using namespace clang;
namespace {
- struct LV {
- const Expr* Base;
+ struct LVBase {
+ llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
CharUnits Offset;
+ unsigned PathLength;
+ unsigned CallIndex;
};
}
-APValue::APValue(const Expr* B) : Kind(Uninitialized) {
- MakeLValue(); setLValue(B, CharUnits::Zero());
-}
-
-const APValue &APValue::operator=(const APValue &RHS) {
- if (Kind != RHS.Kind) {
- MakeUninit();
- if (RHS.isInt())
- MakeInt();
- else if (RHS.isFloat())
- MakeFloat();
- else if (RHS.isVector())
- MakeVector();
- else if (RHS.isComplexInt())
- MakeComplexInt();
- else if (RHS.isComplexFloat())
- MakeComplexFloat();
- else if (RHS.isLValue())
- MakeLValue();
+struct APValue::LV : LVBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+
+ /// Path - The sequence of base classes, fields and array indices to follow to
+ /// walk from Base to the subobject. When performing GCC-style folding, there
+ /// may not be such a path.
+ union {
+ LValuePathEntry Path[InlinePathSpace];
+ LValuePathEntry *PathPtr;
+ };
+
+ LV() { PathLength = (unsigned)-1; }
+ ~LV() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new LValuePathEntry[Length];
+ }
+
+ bool hasPath() const { return PathLength != (unsigned)-1; }
+ bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; }
+
+ LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const LValuePathEntry *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+namespace {
+ struct MemberPointerBase {
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember;
+ unsigned PathLength;
+ };
+}
+
+struct APValue::MemberPointerData : MemberPointerBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+ typedef const CXXRecordDecl *PathElem;
+ union {
+ PathElem Path[InlinePathSpace];
+ PathElem *PathPtr;
+ };
+
+ MemberPointerData() { PathLength = 0; }
+ ~MemberPointerData() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new PathElem[Length];
}
- if (isInt())
+
+ bool hasPathPtr() const { return PathLength > InlinePathSpace; }
+
+ PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const PathElem *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+// FIXME: Reduce the malloc traffic here.
+
+APValue::Arr::Arr(unsigned NumElts, unsigned Size) :
+ Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]),
+ NumElts(NumElts), ArrSize(Size) {}
+APValue::Arr::~Arr() { delete [] Elts; }
+
+APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) :
+ Elts(new APValue[NumBases+NumFields]),
+ NumBases(NumBases), NumFields(NumFields) {}
+APValue::StructData::~StructData() {
+ delete [] Elts;
+}
+
+APValue::UnionData::UnionData() : Field(0), Value(new APValue) {}
+APValue::UnionData::~UnionData () {
+ delete Value;
+}
+
+APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
+ switch (RHS.getKind()) {
+ case Uninitialized:
+ break;
+ case Int:
+ MakeInt();
setInt(RHS.getInt());
- else if (isFloat())
+ break;
+ case Float:
+ MakeFloat();
setFloat(RHS.getFloat());
- else if (isVector())
+ break;
+ case Vector:
+ MakeVector();
setVector(((const Vec *)(const char *)RHS.Data)->Elts,
RHS.getVectorLength());
- else if (isComplexInt())
+ break;
+ case ComplexInt:
+ MakeComplexInt();
setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
- else if (isComplexFloat())
+ break;
+ case ComplexFloat:
+ MakeComplexFloat();
setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
- else if (isLValue())
- setLValue(RHS.getLValueBase(), RHS.getLValueOffset());
- return *this;
+ break;
+ case LValue:
+ MakeLValue();
+ if (RHS.hasLValuePath())
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
+ RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
+ else
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
+ RHS.getLValueCallIndex());
+ break;
+ case Array:
+ MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
+ for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I)
+ getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I);
+ if (RHS.hasArrayFiller())
+ getArrayFiller() = RHS.getArrayFiller();
+ break;
+ case Struct:
+ MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields());
+ for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I)
+ getStructBase(I) = RHS.getStructBase(I);
+ for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I)
+ getStructField(I) = RHS.getStructField(I);
+ break;
+ case Union:
+ MakeUnion();
+ setUnion(RHS.getUnionField(), RHS.getUnionValue());
+ break;
+ case MemberPointer:
+ MakeMemberPointer(RHS.getMemberPointerDecl(),
+ RHS.isMemberPointerToDerivedMember(),
+ RHS.getMemberPointerPath());
+ break;
+ case AddrLabelDiff:
+ MakeAddrLabelDiff();
+ setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS());
+ break;
+ }
}
-void APValue::MakeUninit() {
+void APValue::DestroyDataAndMakeUninit() {
if (Kind == Int)
((APSInt*)(char*)Data)->~APSInt();
else if (Kind == Float)
@@ -73,14 +197,31 @@ void APValue::MakeUninit() {
((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt();
else if (Kind == ComplexFloat)
((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat();
- else if (Kind == LValue) {
+ else if (Kind == LValue)
((LV*)(char*)Data)->~LV();
- }
+ else if (Kind == Array)
+ ((Arr*)(char*)Data)->~Arr();
+ else if (Kind == Struct)
+ ((StructData*)(char*)Data)->~StructData();
+ else if (Kind == Union)
+ ((UnionData*)(char*)Data)->~UnionData();
+ else if (Kind == MemberPointer)
+ ((MemberPointerData*)(char*)Data)->~MemberPointerData();
+ else if (Kind == AddrLabelDiff)
+ ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData();
Kind = Uninitialized;
}
+void APValue::swap(APValue &RHS) {
+ std::swap(Kind, RHS.Kind);
+ char TmpData[MaxSize];
+ memcpy(TmpData, Data, MaxSize);
+ memcpy(Data, RHS.Data, MaxSize);
+ memcpy(RHS.Data, TmpData, MaxSize);
+}
+
void APValue::dump() const {
- print(llvm::errs());
+ dump(llvm::errs());
llvm::errs() << '\n';
}
@@ -92,9 +233,8 @@ static double GetApproxValue(const llvm::APFloat &F) {
return V.convertToDouble();
}
-void APValue::print(raw_ostream &OS) const {
+void APValue::dump(raw_ostream &OS) const {
switch (getKind()) {
- default: llvm_unreachable("Unknown APValue kind!");
case Uninitialized:
OS << "Uninitialized";
return;
@@ -105,9 +245,12 @@ void APValue::print(raw_ostream &OS) const {
OS << "Float: " << GetApproxValue(getFloat());
return;
case Vector:
- OS << "Vector: " << getVectorElt(0);
- for (unsigned i = 1; i != getVectorLength(); ++i)
- OS << ", " << getVectorElt(i);
+ OS << "Vector: ";
+ getVectorElt(0).dump(OS);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ OS << ", ";
+ getVectorElt(i).dump(OS);
+ }
return;
case ComplexInt:
OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
@@ -115,74 +258,350 @@ void APValue::print(raw_ostream &OS) const {
case ComplexFloat:
OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
<< ", " << GetApproxValue(getComplexFloatImag());
+ return;
case LValue:
OS << "LValue: <todo>";
return;
+ case Array:
+ OS << "Array: ";
+ for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
+ getArrayInitializedElt(I).dump(OS);
+ if (I != getArraySize() - 1) OS << ", ";
+ }
+ if (hasArrayFiller()) {
+ OS << getArraySize() - getArrayInitializedElts() << " x ";
+ getArrayFiller().dump(OS);
+ }
+ return;
+ case Struct:
+ OS << "Struct ";
+ if (unsigned N = getStructNumBases()) {
+ OS << " bases: ";
+ getStructBase(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructBase(I).dump(OS);
+ }
+ }
+ if (unsigned N = getStructNumFields()) {
+ OS << " fields: ";
+ getStructField(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructField(I).dump(OS);
+ }
+ }
+ return;
+ case Union:
+ OS << "Union: ";
+ getUnionValue().dump(OS);
+ return;
+ case MemberPointer:
+ OS << "MemberPointer: <todo>";
+ return;
+ case AddrLabelDiff:
+ OS << "AddrLabelDiff: <todo>";
+ return;
}
+ llvm_unreachable("Unknown APValue kind!");
}
-static void WriteShortAPValueToStream(raw_ostream& Out,
- const APValue& V) {
- switch (V.getKind()) {
- default: llvm_unreachable("Unknown APValue kind!");
+void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
+ switch (getKind()) {
case APValue::Uninitialized:
- Out << "Uninitialized";
- break;
+ Out << "<uninitialized>";
+ return;
case APValue::Int:
- Out << V.getInt();
- break;
+ if (Ty->isBooleanType())
+ Out << (getInt().getBoolValue() ? "true" : "false");
+ else
+ Out << getInt();
+ return;
case APValue::Float:
- Out << GetApproxValue(V.getFloat());
- break;
- case APValue::Vector:
- Out << '[';
- WriteShortAPValueToStream(Out, V.getVectorElt(0));
- for (unsigned i = 1; i != V.getVectorLength(); ++i) {
+ Out << GetApproxValue(getFloat());
+ return;
+ case APValue::Vector: {
+ Out << '{';
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ getVectorElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
Out << ", ";
- WriteShortAPValueToStream(Out, V.getVectorElt(i));
+ getVectorElt(i).printPretty(Out, Ctx, ElemTy);
}
- Out << ']';
- break;
+ Out << '}';
+ return;
+ }
case APValue::ComplexInt:
- Out << V.getComplexIntReal() << "+" << V.getComplexIntImag() << "i";
- break;
+ Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
+ return;
case APValue::ComplexFloat:
- Out << GetApproxValue(V.getComplexFloatReal()) << "+"
- << GetApproxValue(V.getComplexFloatImag()) << "i";
- break;
- case APValue::LValue:
- Out << "LValue: <todo>";
- break;
+ Out << GetApproxValue(getComplexFloatReal()) << "+"
+ << GetApproxValue(getComplexFloatImag()) << "i";
+ return;
+ case APValue::LValue: {
+ LValueBase Base = getLValueBase();
+ if (!Base) {
+ Out << "0";
+ return;
+ }
+
+ bool IsReference = Ty->isReferenceType();
+ QualType InnerTy
+ = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
+
+ if (!hasLValuePath()) {
+ // No lvalue path: just print the offset.
+ CharUnits O = getLValueOffset();
+ CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
+ if (!O.isZero()) {
+ if (IsReference)
+ Out << "*(";
+ if (O % S) {
+ Out << "(char*)";
+ S = CharUnits::One();
+ }
+ Out << '&';
+ } else if (!IsReference)
+ Out << '&';
+
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
+ Out << *VD;
+ else
+ Base.get<const Expr*>()->printPretty(Out, Ctx, 0,
+ Ctx.getPrintingPolicy());
+ if (!O.isZero()) {
+ Out << " + " << (O / S);
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+
+ // We have an lvalue path. Print it out nicely.
+ if (!IsReference)
+ Out << '&';
+ else if (isLValueOnePastTheEnd())
+ Out << "*(&";
+
+ QualType ElemTy;
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ Out << *VD;
+ ElemTy = VD->getType();
+ } else {
+ const Expr *E = Base.get<const Expr*>();
+ E->printPretty(Out, Ctx, 0,Ctx.getPrintingPolicy());
+ ElemTy = E->getType();
+ }
+
+ ArrayRef<LValuePathEntry> Path = getLValuePath();
+ const CXXRecordDecl *CastToBase = 0;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (ElemTy->getAs<RecordType>()) {
+ // The lvalue refers to a class type, so the next path entry is a base
+ // or member.
+ const Decl *BaseOrMember =
+ BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
+ CastToBase = RD;
+ ElemTy = Ctx.getRecordType(RD);
+ } else {
+ const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
+ Out << ".";
+ if (CastToBase)
+ Out << *CastToBase << "::";
+ Out << *VD;
+ ElemTy = VD->getType();
+ }
+ } else {
+ // The lvalue must refer to an array.
+ Out << '[' << Path[I].ArrayIndex << ']';
+ ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
+ }
+ }
+
+ // Handle formatting of one-past-the-end lvalues.
+ if (isLValueOnePastTheEnd()) {
+ // FIXME: If CastToBase is non-0, we should prefix the output with
+ // "(CastToBase*)".
+ Out << " + 1";
+ if (IsReference)
+ Out << ')';
+ }
+ return;
}
+ case APValue::Array: {
+ const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ QualType ElemTy = AT->getElementType();
+ Out << '{';
+ if (unsigned N = getArrayInitializedElts()) {
+ getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned I = 1; I != N; ++I) {
+ Out << ", ";
+ if (I == 10) {
+ // Avoid printing out the entire contents of large arrays.
+ Out << "...";
+ break;
+ }
+ getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
+ }
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Struct: {
+ Out << '{';
+ const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ bool First = true;
+ if (unsigned N = getStructNumBases()) {
+ const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
+ CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
+ for (unsigned I = 0; I != N; ++I, ++BI) {
+ assert(BI != CD->bases_end());
+ if (!First)
+ Out << ", ";
+ getStructBase(I).printPretty(Out, Ctx, BI->getType());
+ First = false;
+ }
+ }
+ for (RecordDecl::field_iterator FI = RD->field_begin();
+ FI != RD->field_end(); ++FI) {
+ if (!First)
+ Out << ", ";
+ if ((*FI)->isUnnamedBitfield()) continue;
+ getStructField((*FI)->getFieldIndex()).
+ printPretty(Out, Ctx, (*FI)->getType());
+ First = false;
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Union:
+ Out << '{';
+ if (const FieldDecl *FD = getUnionField()) {
+ Out << "." << *FD << " = ";
+ getUnionValue().printPretty(Out, Ctx, FD->getType());
+ }
+ Out << '}';
+ return;
+ case APValue::MemberPointer:
+ // FIXME: This is not enough to unambiguously identify the member in a
+ // multiple-inheritance scenario.
+ if (const ValueDecl *VD = getMemberPointerDecl()) {
+ Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
+ return;
+ }
+ Out << "0";
+ return;
+ case APValue::AddrLabelDiff:
+ Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
+ Out << " - ";
+ Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
+ std::string Result;
+ llvm::raw_string_ostream Out(Result);
+ printPretty(Out, Ctx, Ty);
+ Out.flush();
+ return Result;
+}
+
+const APValue::LValueBase APValue::getLValueBase() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer();
+}
+
+bool APValue::isLValueOnePastTheEnd() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt();
}
-const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
- const APValue &V) {
- llvm::SmallString<64> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- WriteShortAPValueToStream(Out, V);
- return DB << Out.str();
+CharUnits &APValue::getLValueOffset() {
+ assert(isLValue() && "Invalid accessor");
+ return ((LV*)(void*)Data)->Offset;
}
-const Expr* APValue::getLValueBase() const {
+bool APValue::hasLValuePath() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data)->Base;
+ return ((const LV*)(const char*)Data)->hasPath();
}
-CharUnits APValue::getLValueOffset() const {
- assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data)->Offset;
+ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
+ assert(isLValue() && hasLValuePath() && "Invalid accessor");
+ const LV &LVal = *((const LV*)(const char*)Data);
+ return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
}
-void APValue::setLValue(const Expr *B, const CharUnits &O) {
+unsigned APValue::getLValueCallIndex() const {
assert(isLValue() && "Invalid accessor");
- ((LV*)(char*)Data)->Base = B;
- ((LV*)(char*)Data)->Offset = O;
+ return ((const LV*)(const char*)Data)->CallIndex;
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(false);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath((unsigned)-1);
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath(Path.size());
+ memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
+}
+
+const ValueDecl *APValue::getMemberPointerDecl() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getPointer();
+}
+
+bool APValue::isMemberPointerToDerivedMember() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getInt();
+}
+
+ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
assert(isUninit() && "Bad state change");
+ assert(sizeof(LV) <= MaxSize && "LV too big");
new ((void*)(char*)Data) LV();
Kind = LValue;
}
+void APValue::MakeArray(unsigned InitElts, unsigned Size) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) Arr(InitElts, Size);
+ Kind = Array;
+}
+
+void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) {
+ assert(isUninit() && "Bad state change");
+ MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData;
+ Kind = MemberPointer;
+ MPD->MemberAndIsDerivedMember.setPointer(Member);
+ MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
+ MPD->resizePath(Path.size());
+ memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
index 04a084a..1672bc8 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
@@ -15,8 +15,12 @@
#include "clang/AST/DeclGroup.h"
using namespace clang;
-void ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {}
+bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ return true;
+}
void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
HandleTopLevelDecl(D);
}
+
+void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index 0833286..acf5e0b 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -74,12 +74,14 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
ID.AddBoolean(NTTP->isParameterPack());
- ID.AddPointer(NTTP->getType().getAsOpaquePtr());
+ ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
if (NTTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(NTTP->getNumExpansionTypes());
- for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I)
- ID.AddPointer(NTTP->getExpansionType(I).getAsOpaquePtr());
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
+ }
} else
ID.AddBoolean(false);
continue;
@@ -193,7 +195,7 @@ CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
case CXXABI_Microsoft:
return CreateMicrosoftCXXABI(*this);
}
- return 0;
+ llvm_unreachable("Invalid CXXABI type!");
}
static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
@@ -224,13 +226,14 @@ ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
SubstTemplateTemplateParmPacks(this_()),
GlobalNestedNameSpecifier(0),
Int128Decl(0), UInt128Decl(0),
- ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0),
+ ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0),
CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0),
FILEDecl(0),
jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0),
BlockDescriptorType(0), BlockDescriptorExtendedType(0),
cudaConfigureCallDecl(0),
- NullTypeSourceInfo(QualType()),
+ NullTypeSourceInfo(QualType()),
+ FirstLocalImport(), LastLocalImport(),
SourceMgr(SM), LangOpts(LOpts),
AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts),
Idents(idents), Selectors(sels),
@@ -258,12 +261,6 @@ ASTContext::~ASTContext() {
for (unsigned I = 0, N = Deallocations.size(); I != N; ++I)
Deallocations[I].first(Deallocations[I].second);
- // Release all of the memory associated with overridden C++ methods.
- for (llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::iterator
- OM = OverriddenMethods.begin(), OMEnd = OverriddenMethods.end();
- OM != OMEnd; ++OM)
- OM->second.Destroy();
-
// ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
// because they can contain DenseMaps.
for (llvm::DenseMap<const ObjCContainerDecl*,
@@ -291,7 +288,7 @@ void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
}
void
-ASTContext::setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source) {
+ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) {
ExternalSource.reset(Source.take());
}
@@ -331,14 +328,14 @@ void ASTContext::PrintStats() const {
llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
<< NumImplicitCopyConstructors
<< " implicit copy constructors created\n";
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
<< NumImplicitMoveConstructors
<< " implicit move constructors created\n";
llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
<< NumImplicitCopyAssignmentOperators
<< " implicit copy assignment operators created\n";
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
<< NumImplicitMoveAssignmentOperators
<< " implicit move assignment operators created\n";
@@ -462,9 +459,15 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
// Placeholder type for bound members.
InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
+ // Placeholder type for pseudo-objects.
+ InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
+
// "any" type; useful for debugger-like clients.
InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
+ // Placeholder type for unbridged ARC casts.
+ InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
+
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
DoubleComplexTy = getComplexType(DoubleTy);
@@ -476,7 +479,10 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
-
+
+ // Builtin type for __objc_yes and __objc_no
+ ObjCBuiltinBoolTy = SignedCharTy;
+
ObjCConstantStringType = QualType();
// void * type
@@ -676,6 +682,19 @@ void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
OverriddenMethods[Method].push_back(Overridden);
}
+void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
+ assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->isFromASTFile() && "Non-local import declaration");
+ if (!FirstLocalImport) {
+ FirstLocalImport = Import;
+ LastLocalImport = Import;
+ return;
+ }
+
+ LastLocalImport->NextLocalImport = Import;
+ LastLocalImport = Import;
+}
+
//===----------------------------------------------------------------------===//
// Type Sizing and Analysis
//===----------------------------------------------------------------------===//
@@ -796,14 +815,24 @@ ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
-/// getTypeSize - Return the size of the specified type, in bits. This method
-/// does not work on incomplete types.
+std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const {
+ TypeInfoMap::iterator it = MemoizedTypeInfo.find(T);
+ if (it != MemoizedTypeInfo.end())
+ return it->second;
+
+ std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T);
+ MemoizedTypeInfo.insert(std::make_pair(T, Info));
+ return Info;
+}
+
+/// getTypeInfoImpl - Return the size of the specified type, in bits. This
+/// method does not work on incomplete types.
///
/// FIXME: Pointers into different addr spaces could have different sizes and
/// alignment requirements: getPointerInfo should take an AddrSpace, this
/// should take a QualType, &c.
std::pair<uint64_t, unsigned>
-ASTContext::getTypeInfo(const Type *T) const {
+ASTContext::getTypeInfoImpl(const Type *T) const {
uint64_t Width=0;
unsigned Align=8;
switch (T->getTypeClass()) {
@@ -813,7 +842,6 @@ ASTContext::getTypeInfo(const Type *T) const {
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("Should not see dependent types");
- break;
case Type::FunctionNoProto:
case Type::FunctionProto:
@@ -832,7 +860,10 @@ ASTContext::getTypeInfo(const Type *T) const {
const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType());
- Width = EltInfo.first*CAT->getSize().getZExtValue();
+ uint64_t Size = CAT->getSize().getZExtValue();
+ assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) &&
+ "Overflow in array type bit size evaluation");
+ Width = EltInfo.first*Size;
Align = EltInfo.second;
Width = llvm::RoundUpToAlignment(Width, Align);
break;
@@ -1134,7 +1165,8 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
if (const ComplexType* CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
- T->isSpecificBuiltinType(BuiltinType::LongLong))
+ T->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ T->isSpecificBuiltinType(BuiltinType::ULongLong))
return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
@@ -1173,10 +1205,10 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(),
PE = OI->all_referenced_protocol_end(); P != PE; ++P) {
ObjCProtocolDecl *Proto = (*P);
- Protocols.insert(Proto);
+ Protocols.insert(Proto->getCanonicalDecl());
for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
PE = Proto->protocol_end(); P != PE; ++P) {
- Protocols.insert(*P);
+ Protocols.insert((*P)->getCanonicalDecl());
CollectInheritedProtocols(*P, Protocols);
}
}
@@ -1194,7 +1226,7 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(),
PE = OC->protocol_end(); P != PE; ++P) {
ObjCProtocolDecl *Proto = (*P);
- Protocols.insert(Proto);
+ Protocols.insert(Proto->getCanonicalDecl());
for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
PE = Proto->protocol_end(); P != PE; ++P)
CollectInheritedProtocols(*P, Protocols);
@@ -1203,7 +1235,7 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(),
PE = OP->protocol_end(); P != PE; ++P) {
ObjCProtocolDecl *Proto = (*P);
- Protocols.insert(Proto);
+ Protocols.insert(Proto->getCanonicalDecl());
for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
PE = Proto->protocol_end(); P != PE; ++P)
CollectInheritedProtocols(*P, Protocols);
@@ -1226,6 +1258,24 @@ unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
return count;
}
+bool ASTContext::isSentinelNullExpr(const Expr *E) {
+ if (!E)
+ return false;
+
+ // nullptr_t is always treated as null.
+ if (E->getType()->isNullPtrType()) return true;
+
+ if (E->getType()->isAnyPointerType() &&
+ E->IgnoreParenCasts()->isNullPointerConstant(*this,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // Unfortunately, __null has type 'int'.
+ if (isa<GNUNullExpr>(E)) return true;
+
+ return false;
+}
+
/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
@@ -1256,6 +1306,17 @@ void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCImpls[CatD] = ImplD;
}
+ObjCInterfaceDecl *ASTContext::getObjContainingInterface(NamedDecl *ND) const {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
+ return IMD->getClassInterface();
+
+ return 0;
+}
+
/// \brief Get the copy initialization expression of VarDecl,or NULL if
/// none exists.
Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
@@ -1337,8 +1398,8 @@ ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
QualType canon;
if (!baseType->isCanonicalUnqualified()) {
SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
- canonSplit.second.addConsistentQualifiers(quals);
- canon = getExtQualType(canonSplit.first, canonSplit.second);
+ canonSplit.Quals.addConsistentQualifiers(quals);
+ canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
// Re-find the insert position.
(void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
@@ -1640,9 +1701,9 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
QualType Canon;
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
- Canon = getConstantArrayType(QualType(canonSplit.first, 0), ArySize,
+ Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
ASM, IndexTypeQuals);
- Canon = getQualifiedType(Canon, canonSplit.second);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
// Get the new insert position for the node we care about.
ConstantArrayType *NewIP =
@@ -1667,7 +1728,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
QualType result;
SplitQualType split = type.getSplitDesugaredType();
- const Type *ty = split.first;
+ const Type *ty = split.Ty;
switch (ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
@@ -1786,7 +1847,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
}
// Apply the top-level qualifiers from the original.
- return getQualifiedType(result, split.second);
+ return getQualifiedType(result, split.Quals);
}
/// getVariableArrayType - Returns a non-unique reference to the type for a
@@ -1803,9 +1864,9 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
// Be sure to pull qualifiers off the element type.
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
- Canon = getVariableArrayType(QualType(canonSplit.first, 0), NumElts, ASM,
+ Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
IndexTypeQuals, Brackets);
- Canon = getQualifiedType(Canon, canonSplit.second);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
}
VariableArrayType *New = new(*this, TypeAlignment)
@@ -1850,7 +1911,7 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
void *insertPos = 0;
llvm::FoldingSetNodeID ID;
DependentSizedArrayType::Profile(ID, *this,
- QualType(canonElementType.first, 0),
+ QualType(canonElementType.Ty, 0),
ASM, elementTypeQuals, numElements);
// Look for an existing type with these properties.
@@ -1860,7 +1921,7 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// If we don't have one, build one.
if (!canonTy) {
canonTy = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, QualType(canonElementType.first, 0),
+ DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
QualType(), numElements, ASM, elementTypeQuals,
brackets);
DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
@@ -1869,11 +1930,11 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// Apply qualifiers from the element type to the array.
QualType canon = getQualifiedType(QualType(canonTy,0),
- canonElementType.second);
+ canonElementType.Quals);
// If we didn't need extra canonicalization for the element type,
// then just use that as our result.
- if (QualType(canonElementType.first, 0) == elementType)
+ if (QualType(canonElementType.Ty, 0) == elementType)
return canon;
// Otherwise, we need to build a type which follows the spelling
@@ -1904,9 +1965,9 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(elementType).split();
- canon = getIncompleteArrayType(QualType(canonSplit.first, 0),
+ canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
ASM, elementTypeQuals);
- canon = getQualifiedType(canon, canonSplit.second);
+ canon = getQualifiedType(canon, canonSplit.Quals);
// Get the new insert position for the node we care about.
IncompleteArrayType *existing =
@@ -2082,7 +2143,9 @@ ASTContext::getFunctionType(QualType ResultTy,
return QualType(FTP, 0);
// Determine whether the type being created is already canonical or not.
- bool isCanonical= EPI.ExceptionSpecType == EST_None && ResultTy.isCanonical();
+ bool isCanonical =
+ EPI.ExceptionSpecType == EST_None && ResultTy.isCanonical() &&
+ !EPI.HasTrailingReturn;
for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
if (!ArgArray[i].isCanonicalAsParam())
isCanonical = false;
@@ -2101,6 +2164,7 @@ ASTContext::getFunctionType(QualType ResultTy,
CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
+ CanonicalEPI.HasTrailingReturn = false;
CanonicalEPI.ExceptionSpecType = EST_None;
CanonicalEPI.NumExceptions = 0;
CanonicalEPI.ExtInfo
@@ -2162,7 +2226,7 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
assert(NeedsInjectedClassNameType(Decl));
if (Decl->TypeForDecl) {
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDeclaration()) {
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
assert(PrevDecl->TypeForDecl && "previous declaration has no type");
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
@@ -2188,12 +2252,12 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
"Template type parameter types are always available.");
if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
- assert(!Record->getPreviousDeclaration() &&
+ assert(!Record->getPreviousDecl() &&
"struct/union has previous declaration");
assert(!NeedsInjectedClassNameType(Record));
return getRecordType(Record);
} else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
- assert(!Enum->getPreviousDeclaration() &&
+ assert(!Enum->getPreviousDecl() &&
"enum has previous declaration");
return getEnumType(Enum);
} else if (const UnresolvedUsingTypenameDecl *Using =
@@ -2226,7 +2290,7 @@ ASTContext::getTypedefType(const TypedefNameDecl *Decl,
QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
- if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
@@ -2239,7 +2303,7 @@ QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
- if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
@@ -2374,6 +2438,7 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
TemplateSpecializationTypeLoc TL
= cast<TemplateSpecializationTypeLoc>(DI->getTypeLoc());
+ TL.setTemplateKeywordLoc(SourceLocation());
TL.setTemplateNameLoc(NameLoc);
TL.setLAngleLoc(Args.getLAngleLoc());
TL.setRAngleLoc(Args.getRAngleLoc());
@@ -2400,6 +2465,17 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
Underlying);
}
+#ifndef NDEBUG
+static bool hasAnyPackExpansions(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I].isPackExpansion())
+ return true;
+
+ return true;
+}
+#endif
+
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
@@ -2411,16 +2487,18 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Template = TemplateName(QTN->getTemplateDecl());
- bool isTypeAlias =
+ bool IsTypeAlias =
Template.getAsTemplateDecl() &&
isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
-
QualType CanonType;
if (!Underlying.isNull())
CanonType = getCanonicalType(Underlying);
else {
- assert(!isTypeAlias &&
- "Underlying type for template alias must be computed by caller");
+ // We can get here with an alias template when the specialization contains
+ // a pack expansion that does not match up with a parameter pack.
+ assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) &&
+ "Caller must compute aliased type");
+ IsTypeAlias = false;
CanonType = getCanonicalTemplateSpecializationType(Template, Args,
NumArgs);
}
@@ -2430,13 +2508,11 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
// we don't unique and don't want to lose.
void *Mem = Allocate(sizeof(TemplateSpecializationType) +
sizeof(TemplateArgument) * NumArgs +
- (isTypeAlias ? sizeof(QualType) : 0),
+ (IsTypeAlias? sizeof(QualType) : 0),
TypeAlignment);
TemplateSpecializationType *Spec
- = new (Mem) TemplateSpecializationType(Template,
- Args, NumArgs,
- CanonType,
- isTypeAlias ? Underlying : QualType());
+ = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType,
+ IsTypeAlias ? Underlying : QualType());
Types.push_back(Spec);
return QualType(Spec, 0);
@@ -2448,9 +2524,6 @@ ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
unsigned NumArgs) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
- assert((!Template.getAsTemplateDecl() ||
- !isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) &&
- "Underlying type for template alias must be computed by caller");
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
@@ -2677,8 +2750,12 @@ static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) {
if (NumProtocols == 0) return true;
+ if (Protocols[0]->getCanonicalDecl() != Protocols[0])
+ return false;
+
for (unsigned i = 1; i != NumProtocols; ++i)
- if (!CmpProtocolNames(Protocols[i-1], Protocols[i]))
+ if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) ||
+ Protocols[i]->getCanonicalDecl() != Protocols[i])
return false;
return true;
}
@@ -2690,6 +2767,10 @@ static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
// Sort protocols, keyed by name.
std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames);
+ // Canonicalize.
+ for (unsigned I = 0, N = NumProtocols; I != N; ++I)
+ Protocols[I] = Protocols[I]->getCanonicalDecl();
+
// Remove duplicates.
ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
NumProtocols = ProtocolsEnd-Protocols;
@@ -2775,11 +2856,21 @@ QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
/// getObjCInterfaceType - Return the unique reference to the type for the
/// specified ObjC interface decl. The list of protocols is optional.
-QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) const {
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl) const {
if (Decl->TypeForDecl)
return QualType(Decl->TypeForDecl, 0);
- // FIXME: redeclarations?
+ if (PrevDecl) {
+ assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ return QualType(PrevDecl->TypeForDecl, 0);
+ }
+
+ // Prefer the definition, if there is one.
+ if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
+ Decl = Def;
+
void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
Decl->TypeForDecl = T;
@@ -2833,44 +2924,13 @@ QualType ASTContext::getTypeOfType(QualType tofType) const {
return QualType(tot, 0);
}
-/// getDecltypeForExpr - Given an expr, will return the decltype for that
-/// expression, according to the rules in C++0x [dcl.type.simple]p4
-static QualType getDecltypeForExpr(const Expr *e, const ASTContext &Context) {
- if (e->isTypeDependent())
- return Context.DependentTy;
-
- // If e is an id expression or a class member access, decltype(e) is defined
- // as the type of the entity named by e.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(e)) {
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
- return VD->getType();
- }
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(e)) {
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
- return FD->getType();
- }
- // If e is a function call or an invocation of an overloaded operator,
- // (parentheses around e are ignored), decltype(e) is defined as the
- // return type of that function.
- if (const CallExpr *CE = dyn_cast<CallExpr>(e->IgnoreParens()))
- return CE->getCallReturnType();
-
- QualType T = e->getType();
-
- // Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is
- // defined as T&, otherwise decltype(e) is defined as T.
- if (e->isLValue())
- T = Context.getLValueReferenceType(T);
-
- return T;
-}
/// getDecltypeType - Unlike many "get<Type>" functions, we don't unique
/// DecltypeType AST's. The only motivation to unique these nodes would be
/// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be
/// an issue. This doesn't effect the type checker, since it operates
-/// on canonical type's (which are always unique).
-QualType ASTContext::getDecltypeType(Expr *e) const {
+/// on canonical types (which are always unique).
+QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
DecltypeType *dt;
// C++0x [temp.type]p2:
@@ -2896,8 +2956,8 @@ QualType ASTContext::getDecltypeType(Expr *e) const {
dt = Canon;
}
} else {
- QualType T = getDecltypeForExpr(e, *this);
- dt = new (*this, TypeAlignment) DecltypeType(e, T, getCanonicalType(T));
+ dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType,
+ getCanonicalType(UnderlyingType));
}
Types.push_back(dt);
return QualType(dt, 0);
@@ -2913,7 +2973,7 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType,
Kind,
UnderlyingType->isDependentType() ?
- QualType() : UnderlyingType);
+ QualType() : getCanonicalType(UnderlyingType));
Types.push_back(Ty);
return QualType(Ty, 0);
}
@@ -2996,6 +3056,16 @@ CanQualType ASTContext::getSizeType() const {
return getFromTargetType(Target->getSizeType());
}
+/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getIntMaxType() const {
+ return getFromTargetType(Target->getIntMaxType());
+}
+
+/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getUIntMaxType() const {
+ return getFromTargetType(Target->getUIntMaxType());
+}
+
/// getSignedWCharType - Return the type of "signed wchar_t".
/// Used when in C++, as a GCC extension.
QualType ASTContext::getSignedWCharType() const {
@@ -3010,7 +3080,7 @@ QualType ASTContext::getUnsignedWCharType() const {
return UnsignedIntTy;
}
-/// getPointerDiffType - Return the unique type for "ptrdiff_t" (ref?)
+/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType ASTContext::getPointerDiffType() const {
return getFromTargetType(Target->getPtrDiffType(0));
@@ -3047,12 +3117,12 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
// We then have to strip that sugar back off with
// getUnqualifiedDesugaredType(), which is silly.
const ArrayType *AT =
- dyn_cast<ArrayType>(splitType.first->getUnqualifiedDesugaredType());
+ dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
// If we don't have an array, just use the results in splitType.
if (!AT) {
- quals = splitType.second;
- return QualType(splitType.first, 0);
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
}
// Otherwise, recurse on the array's element type.
@@ -3063,13 +3133,13 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
// can just use the results in splitType.
if (elementType == unqualElementType) {
assert(quals.empty()); // from the recursive call
- quals = splitType.second;
- return QualType(splitType.first, 0);
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
}
// Otherwise, add in the qualifiers from the outermost type, then
// build the type back up.
- quals.addConsistentQualifiers(splitType.second);
+ quals.addConsistentQualifiers(splitType.Quals);
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
return getConstantArrayType(unqualElementType, CAT->getSize(),
@@ -3121,7 +3191,7 @@ bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
return true;
}
- if (getLangOptions().ObjC1) {
+ if (getLangOpts().ObjC1) {
const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
*T2OPType = T2->getAs<ObjCObjectPointerType>();
if (T1OPType && T2OPType) {
@@ -3243,8 +3313,11 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
case TemplateArgument::Expression:
return Arg;
- case TemplateArgument::Declaration:
- return TemplateArgument(Arg.getAsDecl()->getCanonicalDecl());
+ case TemplateArgument::Declaration: {
+ if (Decl *D = Arg.getAsDecl())
+ return TemplateArgument(D->getCanonicalDecl());
+ return TemplateArgument((Decl*)0);
+ }
case TemplateArgument::Template:
return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
@@ -3317,26 +3390,13 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
// types, e.g.,
// typedef typename T::type T1;
// typedef typename T1::type T2;
- if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
- NestedNameSpecifier *Prefix
- = getCanonicalNestedNameSpecifier(DNT->getQualifier());
- return NestedNameSpecifier::Create(*this, Prefix,
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>())
+ return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
const_cast<IdentifierInfo *>(DNT->getIdentifier()));
- }
- // Do the same thing as above, but with dependent-named specializations.
- if (const DependentTemplateSpecializationType *DTST
- = T->getAs<DependentTemplateSpecializationType>()) {
- NestedNameSpecifier *Prefix
- = getCanonicalNestedNameSpecifier(DTST->getQualifier());
-
- T = getDependentTemplateSpecializationType(DTST->getKeyword(),
- Prefix, DTST->getIdentifier(),
- DTST->getNumArgs(),
- DTST->getArgs());
- T = getCanonicalType(T);
- }
-
+ // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
+ // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
+ // first place?
return NestedNameSpecifier::Create(*this, 0, false,
const_cast<Type*>(T.getTypePtr()));
}
@@ -3346,8 +3406,7 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
return NNS;
}
- // Required to silence a GCC warning
- return 0;
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
@@ -3372,10 +3431,10 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// we must propagate them down into the element type.
SplitQualType split = T.getSplitDesugaredType();
- Qualifiers qs = split.second;
+ Qualifiers qs = split.Quals;
// If we have a simple case, just return now.
- const ArrayType *ATy = dyn_cast<ArrayType>(split.first);
+ const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
if (ATy == 0 || qs.empty())
return ATy;
@@ -3462,11 +3521,11 @@ QualType ASTContext::getBaseElementType(QualType type) const {
Qualifiers qs;
while (true) {
SplitQualType split = type.getSplitDesugaredType();
- const ArrayType *array = split.first->getAsArrayTypeUnsafe();
+ const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
if (!array) break;
type = array->getElementType();
- qs.addConsistentQualifiers(split.second);
+ qs.addConsistentQualifiers(split.Quals);
}
return getQualifiedType(type, qs);
@@ -3508,7 +3567,7 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
FloatingRank EltRank = getFloatingRank(Size);
if (Domain->isComplexType()) {
switch (EltRank) {
- default: llvm_unreachable("getFloatingRank(): illegal value for rank");
+ case HalfRank: llvm_unreachable("Complex half is not supported");
case FloatRank: return FloatComplexTy;
case DoubleRank: return DoubleComplexTy;
case LongDoubleRank: return LongDoubleComplexTy;
@@ -3517,11 +3576,12 @@ QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
assert(Domain->isRealFloatingType() && "Unknown domain!");
switch (EltRank) {
- default: llvm_unreachable("getFloatingRank(): illegal value for rank");
+ case HalfRank: llvm_unreachable("Half ranks are not valid here");
case FloatRank: return FloatTy;
case DoubleRank: return DoubleTy;
case LongDoubleRank: return LongDoubleTy;
}
+ llvm_unreachable("getFloatingRank(): illegal value for rank");
}
/// getFloatingTypeOrder - Compare the rank of the two specified floating
@@ -3544,18 +3604,6 @@ int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
/// or if it is not canonicalized.
unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
- if (const EnumType* ET = dyn_cast<EnumType>(T))
- T = ET->getDecl()->getPromotionType().getTypePtr();
-
- if (T->isSpecificBuiltinType(BuiltinType::WChar_S) ||
- T->isSpecificBuiltinType(BuiltinType::WChar_U))
- T = getFromTargetType(Target->getWCharType()).getTypePtr();
-
- if (T->isSpecificBuiltinType(BuiltinType::Char16))
- T = getFromTargetType(Target->getChar16Type()).getTypePtr();
-
- if (T->isSpecificBuiltinType(BuiltinType::Char32))
- T = getFromTargetType(Target->getChar32Type()).getTypePtr();
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("getIntegerRank(): not a built-in integer");
@@ -3625,6 +3673,34 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(Promotable->isPromotableIntegerType());
if (const EnumType *ET = Promotable->getAs<EnumType>())
return ET->getDecl()->getPromotionType();
+
+ if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
+ // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
+ // (3.9.1) can be converted to a prvalue of the first of the following
+ // types that can represent all the values of its underlying type:
+ // int, unsigned int, long int, unsigned long int, long long int, or
+ // unsigned long long int [...]
+ // FIXME: Is there some better way to compute this?
+ if (BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U ||
+ BT->getKind() == BuiltinType::Char16 ||
+ BT->getKind() == BuiltinType::Char32) {
+ bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
+ uint64_t FromSize = getTypeSize(BT);
+ QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
+ LongLongTy, UnsignedLongLongTy };
+ for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
+ uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
+ return PromoteTypes[Idx];
+ }
+ llvm_unreachable("char type should fit into long long");
+ }
+ }
+
+ // At this point, we should have a signed or unsigned integer type.
if (Promotable->isSignedIntegerType())
return IntTy;
uint64_t PromotableSize = getTypeSize(Promotable);
@@ -3697,7 +3773,7 @@ static RecordDecl *
CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
DeclContext *DC, IdentifierInfo *Id) {
SourceLocation Loc;
- if (Ctx.getLangOptions().CPlusPlus)
+ if (Ctx.getLangOpts().CPlusPlus)
return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
else
return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
@@ -3832,7 +3908,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() const {
bool ASTContext::BlockRequiresCopying(QualType Ty) const {
if (Ty->isObjCRetainableType())
return true;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
return RD->hasConstCopyConstructor();
@@ -3857,7 +3933,7 @@ ASTContext::BuildByRefType(StringRef DeclName, QualType Ty) const {
bool HasCopyAndDispose = BlockRequiresCopying(Ty);
// FIXME: Move up
- llvm::SmallString<36> Name;
+ SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
++UniqueBlockByRefTypeID << '_' << DeclName;
RecordDecl *T;
@@ -4037,15 +4113,32 @@ bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
return false;
}
+/// getObjCEncodingForMethodParameter - Return the encoded type for a single
+/// method parameter or return type. If Extended, include class names and
+/// block object types.
+void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const {
+ // Encode type qualifer, 'in', 'inout', etc. for the parameter.
+ getObjCEncodingForTypeQualifier(QT, S);
+ // Encode parameter type.
+ getObjCEncodingForTypeImpl(T, S, true, true, 0,
+ true /*OutermostType*/,
+ false /*EncodingProperty*/,
+ false /*StructField*/,
+ Extended /*EncodeBlockParameters*/,
+ Extended /*EncodeClassNames*/);
+}
+
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
/// declaration.
bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
- std::string& S) const {
+ std::string& S,
+ bool Extended) const {
// FIXME: This is not very efficient.
- // Encode type qualifer, 'in', 'inout', etc. for the return type.
- getObjCEncodingForTypeQualifier(Decl->getObjCDeclQualifier(), S);
- // Encode result type.
- getObjCEncodingForType(Decl->getResultType(), S);
+ // Encode return type.
+ getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+ Decl->getResultType(), S, Extended);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
@@ -4083,10 +4176,8 @@ bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
PType = PVDecl->getType();
} else if (PType->isFunctionType())
PType = PVDecl->getType();
- // Process argument qualifiers for user supplied arguments; such as,
- // 'in', 'inout', etc.
- getObjCEncodingForTypeQualifier(PVDecl->getObjCDeclQualifier(), S);
- getObjCEncodingForType(PType, S);
+ getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
+ PType, S, Extended);
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
@@ -4113,7 +4204,7 @@ bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
/// kPropertyGetter = 'G', // followed by getter selector name
/// kPropertySetter = 'S', // followed by setter selector name
/// kPropertyInstanceVariable = 'V' // followed by instance variable name
-/// kPropertyType = 't' // followed by old-style type encoding.
+/// kPropertyType = 'T' // followed by old-style type encoding.
/// kPropertyWeak = 'W' // 'weak' property
/// kPropertyStrong = 'P' // property GC'able
/// kPropertyNonAtomic = 'N' // property non-atomic
@@ -4293,7 +4384,7 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
// information is not especially sensible, but we're stuck with it for
// compatibility with GCC, although providing it breaks anything that
// actually uses runtime introspection and wants to work on both runtimes...
- if (!Ctx->getLangOptions().NeXTRuntime) {
+ if (!Ctx->getLangOpts().NeXTRuntime) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
@@ -4312,7 +4403,9 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
const FieldDecl *FD,
bool OutermostType,
bool EncodingProperty,
- bool StructField) const {
+ bool StructField,
+ bool EncodeBlockParameters,
+ bool EncodeClassNames) const {
if (T->getAs<BuiltinType>()) {
if (FD && FD->isBitField())
return EncodeBitField(this, S, T, FD);
@@ -4491,8 +4584,40 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
return;
}
- if (T->isBlockPointerType()) {
+ if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
S += "@?"; // Unlike a pointer-to-function, which is "^?".
+ if (EncodeBlockParameters) {
+ const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
+
+ S += '<';
+ // Block return type
+ getObjCEncodingForTypeImpl(FT->getResultType(), S,
+ ExpandPointedToStructures, ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ // Block self
+ S += "@?";
+ // Block parameters
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
+ E = FPT->arg_type_end(); I && (I != E); ++I) {
+ getObjCEncodingForTypeImpl(*I, S,
+ ExpandPointedToStructures,
+ ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ }
+ }
+ S += '>';
+ }
return;
}
@@ -4538,7 +4663,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
getObjCEncodingForTypeImpl(getObjCIdType(), S,
ExpandPointedToStructures,
ExpandStructures, FD);
- if (FD || EncodingProperty) {
+ if (FD || EncodingProperty || EncodeClassNames) {
// Note that we do extended encoding of protocol qualifer list
// Only when doing ivar or property encoding.
S += '"';
@@ -4567,7 +4692,8 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
}
S += '@';
- if (OPT->getInterfaceDecl() && (FD || EncodingProperty)) {
+ if (OPT->getInterfaceDecl() &&
+ (FD || EncodingProperty || EncodeClassNames)) {
S += '"';
S += OPT->getInterfaceDecl()->getIdentifier()->getName();
for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
@@ -4780,10 +4906,6 @@ TypedefDecl *ASTContext::getObjCSelDecl() const {
return ObjCSelDecl;
}
-void ASTContext::setObjCProtoType(QualType QT) {
- ObjCProtoType = QT;
-}
-
TypedefDecl *ASTContext::getObjCClassDecl() const {
if (!ObjCClassDecl) {
QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0);
@@ -4798,6 +4920,19 @@ TypedefDecl *ASTContext::getObjCClassDecl() const {
return ObjCClassDecl;
}
+ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
+ if (!ObjCProtocolClassDecl) {
+ ObjCProtocolClassDecl
+ = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ &Idents.get("Protocol"),
+ /*PrevDecl=*/0,
+ SourceLocation(), true);
+ }
+
+ return ObjCProtocolClassDecl;
+}
+
void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
assert(ObjCConstantStringType.isNull() &&
"'NSConstantString' type already set!");
@@ -4987,10 +5122,10 @@ CanQualType ASTContext::getFromTargetType(unsigned Type) const {
/// garbage collection attribute.
///
Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
- if (getLangOptions().getGC() == LangOptions::NonGC)
+ if (getLangOpts().getGC() == LangOptions::NonGC)
return Qualifiers::GCNone;
- assert(getLangOptions().ObjC1);
+ assert(getLangOpts().ObjC1);
Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
// Default behaviour under objective-C's gc is for ObjC pointers
@@ -5059,7 +5194,7 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
bool
ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
ObjCProtocolDecl *rProto) const {
- if (lProto == rProto)
+ if (declaresSameEntity(lProto, rProto))
return true;
for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
E = rProto->protocol_end(); PI != E; ++PI)
@@ -5360,7 +5495,7 @@ QualType ASTContext::areCommonBaseCompatible(
const ObjCObjectType *RHS = Rptr->getObjectType();
const ObjCInterfaceDecl* LDecl = LHS->getInterface();
const ObjCInterfaceDecl* RDecl = RHS->getInterface();
- if (!LDecl || !RDecl || (LDecl == RDecl))
+ if (!LDecl || !RDecl || (declaresSameEntity(LDecl, RDecl)))
return QualType();
do {
@@ -5484,7 +5619,7 @@ bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
/// same. See 6.7.[2,3,5] for additional rules.
bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
bool CompareUnqualified) {
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return hasSameType(LHS, RHS);
return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
@@ -5791,14 +5926,23 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// Compatibility is based on the underlying type, not the promotion
// type.
if (const EnumType* ETy = LHS->getAs<EnumType>()) {
- if (ETy->getDecl()->getIntegerType() == RHSCan.getUnqualifiedType())
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, RHSCan.getUnqualifiedType()))
return RHS;
}
if (const EnumType* ETy = RHS->getAs<EnumType>()) {
- if (ETy->getDecl()->getIntegerType() == LHSCan.getUnqualifiedType())
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, LHSCan.getUnqualifiedType()))
return LHS;
}
-
+ // allow block pointer type to match an 'id' type.
+ if (OfBlockPointer && !BlockReturnType) {
+ if (LHS->isObjCIdType() && RHS->isBlockPointerType())
+ return LHS;
+ if (RHS->isObjCIdType() && LHS->isBlockPointerType())
+ return RHS;
+ }
+
return QualType();
}
@@ -5959,7 +6103,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
LHS->getAs<ObjCObjectPointerType>(),
RHS->getAs<ObjCObjectPointerType>(),
BlockReturnType))
- return LHS;
+ return LHS;
return QualType();
}
if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
@@ -5967,10 +6111,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
return LHS;
return QualType();
- }
+ }
}
- return QualType();
+ llvm_unreachable("Invalid Type::Class!");
}
bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
@@ -6333,6 +6477,9 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
case 'D':
Type = Context.getVolatileType(Type);
break;
+ case 'R':
+ Type = Type.withRestrict();
+ break;
}
}
@@ -6425,7 +6572,7 @@ GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
if (!FD->isInlined())
return External;
- if (!getLangOptions().CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
+ if (!getLangOpts().CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
// GNU or C99 inline semantics. Determine whether this symbol should be
// externally visible.
if (FD->isInlineDefinitionExternallyVisible())
@@ -6457,7 +6604,7 @@ GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
TSK = VD->getTemplateSpecializationKind();
Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && getLangOptions().CPlusPlus &&
+ if (L == ExternalLinkage && getLangOpts().CPlusPlus &&
VD->getType()->getLinkage() == UniqueExternalLinkage)
L = UniqueExternalLinkage;
@@ -6485,7 +6632,7 @@ GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
}
}
- return GVA_StrongExternal;
+ llvm_unreachable("Invalid Linkage!");
}
bool ASTContext::DeclMustBeEmitted(const Decl *D) {
@@ -6602,6 +6749,13 @@ size_t ASTContext::getSideTableAllocatedMemory() const {
+ llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
}
+unsigned ASTContext::getLambdaManglingNumber(CXXMethodDecl *CallOperator) {
+ CXXRecordDecl *Lambda = CallOperator->getParent();
+ return LambdaMangleContexts[Lambda->getDeclContext()]
+ .getManglingNumber(CallOperator);
+}
+
+
void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
ParamIndices[D] = index;
}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
index 07820dc..ca4fe26 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -154,26 +154,34 @@ static std::string
ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
const DiagnosticsEngine::ArgumentValue *PrevArgs,
unsigned NumPrevArgs,
- SmallVectorImpl<intptr_t> &QualTypeVals) {
+ ArrayRef<intptr_t> QualTypeVals) {
// FIXME: Playing with std::string is really slow.
bool ForceAKA = false;
QualType CanTy = Ty.getCanonicalType();
std::string S = Ty.getAsString(Context.getPrintingPolicy());
std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
- for (SmallVectorImpl<intptr_t>::iterator I = QualTypeVals.begin(),
- E = QualTypeVals.end(); I != E; ++I) {
+ for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
QualType CompareTy =
- QualType::getFromOpaquePtr(reinterpret_cast<void*>(*I));
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ if (CompareTy.isNull())
+ continue;
if (CompareTy == Ty)
continue; // Same types
QualType CompareCanTy = CompareTy.getCanonicalType();
if (CompareCanTy == CanTy)
continue; // Same canonical types
std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
- if (CompareS != S)
- continue; // Original strings are different
- std::string CompareCanS = CompareCanTy.getAsString(Context.getPrintingPolicy());
+ bool aka;
+ QualType CompareDesugar = Desugar(Context, CompareTy, aka);
+ std::string CompareDesugarStr =
+ CompareDesugar.getAsString(Context.getPrintingPolicy());
+ if (CompareS != S && CompareDesugarStr != S)
+ continue; // The type string is different than the comparison string
+ // and the desugared comparison string.
+ std::string CompareCanS =
+ CompareCanTy.getAsString(Context.getPrintingPolicy());
+
if (CompareCanS == CanS)
continue; // No new info from canonical type
@@ -228,7 +236,7 @@ void clang::FormatASTNodeDiagnosticArgument(
unsigned NumPrevArgs,
SmallVectorImpl<char> &Output,
void *Cookie,
- SmallVectorImpl<intptr_t> &QualTypeVals) {
+ ArrayRef<intptr_t> QualTypeVals) {
ASTContext &Context = *static_cast<ASTContext*>(Cookie);
std::string S;
@@ -286,7 +294,7 @@ void clang::FormatASTNodeDiagnosticArgument(
if (DC->isTranslationUnit()) {
// FIXME: Get these strings from some localized place
- if (Context.getLangOptions().CPlusPlus)
+ if (Context.getLangOpts().CPlusPlus)
S = "the global namespace";
else
S = "the global scope";
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
index af66b04..3879907 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -25,9 +25,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include <deque>
-using namespace clang;
-
-namespace {
+namespace clang {
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
public DeclVisitor<ASTNodeImporter, Decl *>,
public StmtVisitor<ASTNodeImporter, Stmt *> {
@@ -80,7 +78,7 @@ namespace {
QualType VisitObjCObjectType(const ObjCObjectType *T);
QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
- // Importing declarations
+ // Importing declarations
bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclContext *&LexicalDC, DeclarationName &Name,
SourceLocation &Loc);
@@ -88,10 +86,33 @@ namespace {
void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
DeclarationNameInfo& To);
void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+
+ /// \brief What we should import from the definition.
+ enum ImportDefinitionKind {
+ /// \brief Import the default subset of the definition, which might be
+ /// nothing (if minimal import is set) or might be everything (if minimal
+ /// import is not set).
+ IDK_Default,
+ /// \brief Import everything.
+ IDK_Everything,
+ /// \brief Import only the bare bones needed to establish a valid
+ /// DeclContext.
+ IDK_Basic
+ };
+
+ bool shouldForceImportDeclContext(ImportDefinitionKind IDK) {
+ return IDK == IDK_Everything ||
+ (IDK == IDK_Default && !Importer.isMinimalImport());
+ }
+
bool ImportDefinition(RecordDecl *From, RecordDecl *To,
- bool ForceImport = false);
+ ImportDefinitionKind Kind = IDK_Default);
bool ImportDefinition(EnumDecl *From, EnumDecl *To,
- bool ForceImport = false);
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
TemplateParameterList *ImportTemplateParameterList(
TemplateParameterList *Params);
TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
@@ -102,6 +123,7 @@ namespace {
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
Decl *VisitDecl(Decl *D);
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
Decl *VisitTypedefDecl(TypedefDecl *D);
@@ -128,8 +150,6 @@ namespace {
Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
- Decl *VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
- Decl *VisitObjCClassDecl(ObjCClassDecl *D);
Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
@@ -154,6 +174,7 @@ namespace {
Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
};
}
+using namespace clang;
//----------------------------------------------------------------------------
// Structural Equivalence
@@ -304,6 +325,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return IsSameValue(*Arg1.getAsIntegral(), *Arg2.getAsIntegral());
case TemplateArgument::Declaration:
+ if (!Arg1.getAsDecl() || !Arg2.getAsDecl())
+ return !Arg1.getAsDecl() && !Arg2.getAsDecl();
return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
case TemplateArgument::Template:
@@ -334,7 +357,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
llvm_unreachable("Invalid template argument kind");
- return true;
}
/// \brief Determine structural equivalence for the common part of array
@@ -945,10 +967,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Base1->getType(), Base2->getType())) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
- Context.Diag2(Base2->getSourceRange().getBegin(), diag::note_odr_base)
+ Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
<< Base2->getType()
<< Base2->getSourceRange();
- Context.Diag1(Base1->getSourceRange().getBegin(), diag::note_odr_base)
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->getType()
<< Base1->getSourceRange();
return false;
@@ -958,10 +980,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Base1->isVirtual() != Base2->isVirtual()) {
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
- Context.Diag2(Base2->getSourceRange().getBegin(),
+ Context.Diag2(Base2->getLocStart(),
diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
- Context.Diag1(Base1->getSourceRange().getBegin(), diag::note_odr_base)
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->isVirtual()
<< Base1->getSourceRange();
return false;
@@ -971,7 +993,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.C2.getTypeDeclType(D2);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
- Context.Diag1(Base1->getSourceRange().getBegin(), diag::note_odr_base)
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
<< Base1->getType()
<< Base1->getSourceRange();
Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
@@ -1324,82 +1346,43 @@ QualType ASTNodeImporter::VisitType(const Type *T) {
QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
- case BuiltinType::Void: return Importer.getToContext().VoidTy;
- case BuiltinType::Bool: return Importer.getToContext().BoolTy;
-
+#define SHARED_SINGLETON_TYPE(Expansion)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: return Importer.getToContext().SingletonId;
+#include "clang/AST/BuiltinTypes.def"
+
+ // FIXME: for Char16, Char32, and NullPtr, make sure that the "to"
+ // context supports C++.
+
+ // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to"
+ // context supports ObjC.
+
case BuiltinType::Char_U:
// The context we're importing from has an unsigned 'char'. If we're
// importing into a context with a signed 'char', translate to
// 'unsigned char' instead.
- if (Importer.getToContext().getLangOptions().CharIsSigned)
+ if (Importer.getToContext().getLangOpts().CharIsSigned)
return Importer.getToContext().UnsignedCharTy;
return Importer.getToContext().CharTy;
- case BuiltinType::UChar: return Importer.getToContext().UnsignedCharTy;
-
- case BuiltinType::Char16:
- // FIXME: Make sure that the "to" context supports C++!
- return Importer.getToContext().Char16Ty;
-
- case BuiltinType::Char32:
- // FIXME: Make sure that the "to" context supports C++!
- return Importer.getToContext().Char32Ty;
-
- case BuiltinType::UShort: return Importer.getToContext().UnsignedShortTy;
- case BuiltinType::UInt: return Importer.getToContext().UnsignedIntTy;
- case BuiltinType::ULong: return Importer.getToContext().UnsignedLongTy;
- case BuiltinType::ULongLong:
- return Importer.getToContext().UnsignedLongLongTy;
- case BuiltinType::UInt128: return Importer.getToContext().UnsignedInt128Ty;
-
case BuiltinType::Char_S:
// The context we're importing from has an unsigned 'char'. If we're
// importing into a context with a signed 'char', translate to
// 'unsigned char' instead.
- if (!Importer.getToContext().getLangOptions().CharIsSigned)
+ if (!Importer.getToContext().getLangOpts().CharIsSigned)
return Importer.getToContext().SignedCharTy;
return Importer.getToContext().CharTy;
- case BuiltinType::SChar: return Importer.getToContext().SignedCharTy;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
// FIXME: If not in C++, shall we translate to the C equivalent of
// wchar_t?
return Importer.getToContext().WCharTy;
-
- case BuiltinType::Short : return Importer.getToContext().ShortTy;
- case BuiltinType::Int : return Importer.getToContext().IntTy;
- case BuiltinType::Long : return Importer.getToContext().LongTy;
- case BuiltinType::LongLong : return Importer.getToContext().LongLongTy;
- case BuiltinType::Int128 : return Importer.getToContext().Int128Ty;
- case BuiltinType::Half: return Importer.getToContext().HalfTy;
- case BuiltinType::Float: return Importer.getToContext().FloatTy;
- case BuiltinType::Double: return Importer.getToContext().DoubleTy;
- case BuiltinType::LongDouble: return Importer.getToContext().LongDoubleTy;
-
- case BuiltinType::NullPtr:
- // FIXME: Make sure that the "to" context supports C++0x!
- return Importer.getToContext().NullPtrTy;
-
- case BuiltinType::Overload: return Importer.getToContext().OverloadTy;
- case BuiltinType::Dependent: return Importer.getToContext().DependentTy;
- case BuiltinType::UnknownAny: return Importer.getToContext().UnknownAnyTy;
- case BuiltinType::BoundMember: return Importer.getToContext().BoundMemberTy;
-
- case BuiltinType::ObjCId:
- // FIXME: Make sure that the "to" context supports Objective-C!
- return Importer.getToContext().ObjCBuiltinIdTy;
-
- case BuiltinType::ObjCClass:
- return Importer.getToContext().ObjCBuiltinClassTy;
-
- case BuiltinType::ObjCSel:
- return Importer.getToContext().ObjCBuiltinSelTy;
}
-
- return QualType();
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
}
QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
@@ -1600,7 +1583,11 @@ QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
if (!ToExpr)
return QualType();
- return Importer.getToContext().getDecltypeType(ToExpr);
+ QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (UnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
}
QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
@@ -1813,11 +1800,11 @@ ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
To.setNamedTypeInfo(Importer.Import(FromTInfo));
return;
}
- llvm_unreachable("Unknown name kind.");
}
+ llvm_unreachable("Unknown name kind.");
}
-void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
if (Importer.isMinimalImport() && !ForceImport) {
Importer.ImportContext(FromDC);
return;
@@ -1831,16 +1818,75 @@ void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
}
bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
- bool ForceImport) {
- if (To->getDefinition() || To->isBeingDefined())
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+
return false;
+ }
To->startDefinition();
// Add base classes.
if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
-
+
+ struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
+ struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
+ ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor;
+ ToData.UserDeclaredCopyConstructor = FromData.UserDeclaredCopyConstructor;
+ ToData.UserDeclaredMoveConstructor = FromData.UserDeclaredMoveConstructor;
+ ToData.UserDeclaredCopyAssignment = FromData.UserDeclaredCopyAssignment;
+ ToData.UserDeclaredMoveAssignment = FromData.UserDeclaredMoveAssignment;
+ ToData.UserDeclaredDestructor = FromData.UserDeclaredDestructor;
+ ToData.Aggregate = FromData.Aggregate;
+ ToData.PlainOldData = FromData.PlainOldData;
+ ToData.Empty = FromData.Empty;
+ ToData.Polymorphic = FromData.Polymorphic;
+ ToData.Abstract = FromData.Abstract;
+ ToData.IsStandardLayout = FromData.IsStandardLayout;
+ ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
+ ToData.HasPrivateFields = FromData.HasPrivateFields;
+ ToData.HasProtectedFields = FromData.HasProtectedFields;
+ ToData.HasPublicFields = FromData.HasPublicFields;
+ ToData.HasMutableFields = FromData.HasMutableFields;
+ ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
+ ToData.HasTrivialDefaultConstructor = FromData.HasTrivialDefaultConstructor;
+ ToData.HasConstexprNonCopyMoveConstructor
+ = FromData.HasConstexprNonCopyMoveConstructor;
+ ToData.DefaultedDefaultConstructorIsConstexpr
+ = FromData.DefaultedDefaultConstructorIsConstexpr;
+ ToData.DefaultedCopyConstructorIsConstexpr
+ = FromData.DefaultedCopyConstructorIsConstexpr;
+ ToData.DefaultedMoveConstructorIsConstexpr
+ = FromData.DefaultedMoveConstructorIsConstexpr;
+ ToData.HasConstexprDefaultConstructor
+ = FromData.HasConstexprDefaultConstructor;
+ ToData.HasConstexprCopyConstructor = FromData.HasConstexprCopyConstructor;
+ ToData.HasConstexprMoveConstructor = FromData.HasConstexprMoveConstructor;
+ ToData.HasTrivialCopyConstructor = FromData.HasTrivialCopyConstructor;
+ ToData.HasTrivialMoveConstructor = FromData.HasTrivialMoveConstructor;
+ ToData.HasTrivialCopyAssignment = FromData.HasTrivialCopyAssignment;
+ ToData.HasTrivialMoveAssignment = FromData.HasTrivialMoveAssignment;
+ ToData.HasTrivialDestructor = FromData.HasTrivialDestructor;
+ ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor;
+ ToData.HasNonLiteralTypeFieldsOrBases
+ = FromData.HasNonLiteralTypeFieldsOrBases;
+ // ComputedVisibleConversions not imported.
+ ToData.UserProvidedDefaultConstructor
+ = FromData.UserProvidedDefaultConstructor;
+ ToData.DeclaredDefaultConstructor = FromData.DeclaredDefaultConstructor;
+ ToData.DeclaredCopyConstructor = FromData.DeclaredCopyConstructor;
+ ToData.DeclaredMoveConstructor = FromData.DeclaredMoveConstructor;
+ ToData.DeclaredCopyAssignment = FromData.DeclaredCopyAssignment;
+ ToData.DeclaredMoveAssignment = FromData.DeclaredMoveAssignment;
+ ToData.DeclaredDestructor = FromData.DeclaredDestructor;
+ ToData.FailedImplicitMoveConstructor
+ = FromData.FailedImplicitMoveConstructor;
+ ToData.FailedImplicitMoveAssignment = FromData.FailedImplicitMoveAssignment;
+ ToData.IsLambda = FromData.IsLambda;
+
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (CXXRecordDecl::base_class_iterator
Base1 = FromCXX->bases_begin(),
@@ -1871,15 +1917,20 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ToCXX->setBases(Bases.data(), Bases.size());
}
- ImportDeclContext(From, ForceImport);
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
To->completeDefinition();
return false;
}
bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
- bool ForceImport) {
- if (To->getDefinition() || To->isBeingDefined())
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
return false;
+ }
To->startDefinition();
@@ -1890,8 +1941,9 @@ bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
QualType ToPromotionType = Importer.Import(From->getPromotionType());
if (ToPromotionType.isNull())
return true;
-
- ImportDeclContext(From, ForceImport);
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
// FIXME: we might need to merge the number of positive or negative bits
// if the enumerator lists don't match.
@@ -1983,7 +2035,6 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
}
llvm_unreachable("Invalid template argument kind");
- return TemplateArgument();
}
bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
@@ -2029,6 +2080,15 @@ Decl *ASTNodeImporter::VisitDecl(Decl *D) {
return 0;
}
+Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ TranslationUnitDecl *ToD =
+ Importer.getToContext().getTranslationUnitDecl();
+
+ Importer.Imported(D, ToD);
+
+ return ToD;
+}
+
Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
@@ -2074,10 +2134,12 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
+ D->isInline(),
Importer.Import(D->getLocStart()),
- Loc, Name.getAsIdentifierInfo());
+ Loc, Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0);
ToNamespace->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToNamespace);
+ LexicalDC->addDeclInternal(ToNamespace);
// If this is an anonymous namespace, register it as the anonymous
// namespace within its context.
@@ -2156,7 +2218,7 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
ToTypedef->setAccess(D->getAccess());
ToTypedef->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToTypedef);
- LexicalDC->addDecl(ToTypedef);
+ LexicalDC->addDeclInternal(ToTypedef);
return ToTypedef;
}
@@ -2183,7 +2245,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (!SearchName && D->getTypedefNameForAnonDecl()) {
SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
IDNS = Decl::IDNS_Ordinary;
- } else if (Importer.getToContext().getLangOptions().CPlusPlus)
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
// We may already have an enum of the same name; try to find and match it.
@@ -2227,7 +2289,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, D2);
- LexicalDC->addDecl(D2);
+ LexicalDC->addDeclInternal(D2);
// Import the integer type.
QualType ToIntegerType = Importer.Import(D->getIntegerType());
@@ -2268,7 +2330,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!SearchName && D->getTypedefNameForAnonDecl()) {
SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
IDNS = Decl::IDNS_Ordinary;
- } else if (Importer.getToContext().getLangOptions().CPlusPlus)
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
// We may already have a record of the same name; try to find and match it.
@@ -2332,12 +2394,12 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(D2);
+ LexicalDC->addDeclInternal(D2);
}
Importer.Imported(D, D2);
- if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
return 0;
return D2;
@@ -2389,7 +2451,7 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
ToEnumerator->setAccess(D->getAccess());
ToEnumerator->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToEnumerator);
- LexicalDC->addDecl(ToEnumerator);
+ LexicalDC->addDeclInternal(ToEnumerator);
return ToEnumerator;
}
@@ -2425,7 +2487,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Sema::IsOverload out to the AST library.
// Function overloading is okay in C++.
- if (Importer.getToContext().getLangOptions().CPlusPlus)
+ if (Importer.getToContext().getLangOpts().CPlusPlus)
continue;
// Complain about inconsistent function types.
@@ -2530,14 +2592,14 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Set the parameters.
for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
Parameters[I]->setOwningFunction(ToFunction);
- ToFunction->addDecl(Parameters[I]);
+ ToFunction->addDeclInternal(Parameters[I]);
}
ToFunction->setParams(Parameters);
// FIXME: Other bits to merge?
// Add this function to the lexical context.
- LexicalDC->addDecl(ToFunction);
+ LexicalDC->addDeclInternal(ToFunction);
return ToFunction;
}
@@ -2605,7 +2667,7 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
if (ToField->hasInClassInitializer())
ToField->setInClassInitializer(D->getInClassInitializer());
Importer.Imported(D, ToField);
- LexicalDC->addDecl(ToField);
+ LexicalDC->addDeclInternal(ToField);
return ToField;
}
@@ -2661,7 +2723,7 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIndirectField);
- LexicalDC->addDecl(ToIndirectField);
+ LexicalDC->addDeclInternal(ToIndirectField);
return ToIndirectField;
}
@@ -2710,7 +2772,7 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
BitWidth, D->getSynthesize());
ToIvar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIvar);
- LexicalDC->addDecl(ToIvar);
+ LexicalDC->addDeclInternal(ToIvar);
return ToIvar;
}
@@ -2791,6 +2853,11 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
} else {
Expr *Init = Importer.Import(DDef->getInit());
MergeWithVar->setInit(Init);
+ if (DDef->isInitKnownICE()) {
+ EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = DDef->isInitICE();
+ }
}
}
@@ -2823,7 +2890,7 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToVar);
- LexicalDC->addDecl(ToVar);
+ LexicalDC->addDeclInternal(ToVar);
// Merge the initializer.
// FIXME: Can we really import any initializer? Alternatively, we could force
@@ -3002,7 +3069,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Set the parameters.
for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
ToParams[I]->setOwningFunction(ToMethod);
- ToMethod->addDecl(ToParams[I]);
+ ToMethod->addDeclInternal(ToParams[I]);
}
SmallVector<SourceLocation, 12> SelLocs;
D->getSelectorLocs(SelLocs);
@@ -3010,7 +3077,7 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
ToMethod->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToMethod);
- LexicalDC->addDecl(ToMethod);
+ LexicalDC->addDeclInternal(ToMethod);
return ToMethod;
}
@@ -3037,9 +3104,11 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
Loc,
Importer.Import(D->getCategoryNameLoc()),
Name.getAsIdentifierInfo(),
- ToInterface);
+ ToInterface,
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
ToCategory->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToCategory);
+ LexicalDC->addDeclInternal(ToCategory);
Importer.Imported(D, ToCategory);
// Import protocols
@@ -3084,7 +3153,59 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
return ToCategory;
}
+bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
+ ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the protocol definition
+ To->startDefinition();
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCProtocolDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+ for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this protocol.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ // If this protocol has a definition in the translation unit we're coming
+ // from, but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCProtocolDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
// Import the major distinguishing characteristics of a protocol.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -3104,48 +3225,133 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
}
ObjCProtocolDecl *ToProto = MergeWithProtocol;
- if (!ToProto || ToProto->isForwardDecl()) {
- if (!ToProto) {
- ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
- Name.getAsIdentifierInfo(), Loc,
- Importer.Import(D->getAtStartLoc()));
- ToProto->setForwardDecl(D->isForwardDecl());
- ToProto->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToProto);
- }
- Importer.Imported(D, ToProto);
+ if (!ToProto) {
+ ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
+ Name.getAsIdentifierInfo(), Loc,
+ Importer.Import(D->getAtStartLoc()),
+ /*PrevDecl=*/0);
+ ToProto->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProto);
+ }
+
+ Importer.Imported(D, ToProto);
- // Import protocols
- SmallVector<ObjCProtocolDecl *, 4> Protocols;
- SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCProtocolDecl::protocol_loc_iterator
- FromProtoLoc = D->protocol_loc_begin();
- for (ObjCProtocolDecl::protocol_iterator FromProto = D->protocol_begin(),
- FromProtoEnd = D->protocol_end();
- FromProto != FromProtoEnd;
- ++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return 0;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
+ return 0;
+
+ return ToProto;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
+ ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ // Check consistency of superclass.
+ ObjCInterfaceDecl *FromSuper = From->getSuperClass();
+ if (FromSuper) {
+ FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
+ if (!FromSuper)
+ return true;
}
- // FIXME: If we're merging, make sure that the protocol list is the same.
- ToProto->setProtocolList(Protocols.data(), Protocols.size(),
- ProtocolLocs.data(), Importer.getToContext());
- } else {
- Importer.Imported(D, ToProto);
+ ObjCInterfaceDecl *ToSuper = To->getSuperClass();
+ if ((bool)FromSuper != (bool)ToSuper ||
+ (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
+ Importer.ToDiag(To->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << To->getDeclName();
+ if (ToSuper)
+ Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
+ << To->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(To->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (From->getSuperClass())
+ Importer.FromDiag(From->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << From->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(From->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
}
+
+ // Start the definition.
+ To->startDefinition();
+
+ // If this class has a superclass, import it.
+ if (From->getSuperClass()) {
+ ObjCInterfaceDecl *Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(From->getSuperClass()));
+ if (!Super)
+ return true;
+
+ To->setSuperClass(Super);
+ To->setSuperClassLoc(Importer.Import(From->getSuperClassLoc()));
+ }
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCInterfaceDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+
+ for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ // Import categories. When the categories themselves are imported, they'll
+ // hook themselves into this interface.
+ for (ObjCCategoryDecl *FromCat = From->getCategoryList(); FromCat;
+ FromCat = FromCat->getNextClassCategory())
+ Importer.Import(FromCat);
- // Import all of the members of this protocol.
- ImportDeclContext(D);
+ // If we have an @implementation, import it as well.
+ if (From->getImplementation()) {
+ ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(From->getImplementation()));
+ if (!Impl)
+ return true;
+
+ To->setImplementation(Impl);
+ }
- return ToProto;
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this class.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
}
Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ // If this class has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCInterfaceDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
// Import the major distinguishing characteristics of an @interface.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -3153,6 +3359,7 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
return 0;
+ // Look for an existing interface with the same name.
ObjCInterfaceDecl *MergeWithIface = 0;
llvm::SmallVector<NamedDecl *, 2> FoundDecls;
DC->localUncachedLookup(Name, FoundDecls);
@@ -3164,105 +3371,22 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
break;
}
+ // Create an interface declaration, if one does not already exist.
ObjCInterfaceDecl *ToIface = MergeWithIface;
- if (!ToIface || ToIface->isForwardDecl()) {
- if (!ToIface) {
- ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()),
- Name.getAsIdentifierInfo(), Loc,
- D->isForwardDecl(),
- D->isImplicitInterfaceDecl());
- ToIface->setForwardDecl(D->isForwardDecl());
- ToIface->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToIface);
- }
- Importer.Imported(D, ToIface);
-
- if (D->getSuperClass()) {
- ObjCInterfaceDecl *Super
- = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getSuperClass()));
- if (!Super)
- return 0;
-
- ToIface->setSuperClass(Super);
- ToIface->setSuperClassLoc(Importer.Import(D->getSuperClassLoc()));
- }
-
- // Import protocols
- SmallVector<ObjCProtocolDecl *, 4> Protocols;
- SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCInterfaceDecl::protocol_loc_iterator
- FromProtoLoc = D->protocol_loc_begin();
-
- // FIXME: Should we be usng all_referenced_protocol_begin() here?
- for (ObjCInterfaceDecl::protocol_iterator FromProto = D->protocol_begin(),
- FromProtoEnd = D->protocol_end();
- FromProto != FromProtoEnd;
- ++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return 0;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
- }
-
- // FIXME: If we're merging, make sure that the protocol list is the same.
- ToIface->setProtocolList(Protocols.data(), Protocols.size(),
- ProtocolLocs.data(), Importer.getToContext());
-
- // Import @end range
- ToIface->setAtEndRange(Importer.Import(D->getAtEndRange()));
- } else {
- Importer.Imported(D, ToIface);
-
- // Check for consistency of superclasses.
- DeclarationName FromSuperName, ToSuperName;
- if (D->getSuperClass())
- FromSuperName = Importer.Import(D->getSuperClass()->getDeclName());
- if (ToIface->getSuperClass())
- ToSuperName = ToIface->getSuperClass()->getDeclName();
- if (FromSuperName != ToSuperName) {
- Importer.ToDiag(ToIface->getLocation(),
- diag::err_odr_objc_superclass_inconsistent)
- << ToIface->getDeclName();
- if (ToIface->getSuperClass())
- Importer.ToDiag(ToIface->getSuperClassLoc(),
- diag::note_odr_objc_superclass)
- << ToIface->getSuperClass()->getDeclName();
- else
- Importer.ToDiag(ToIface->getLocation(),
- diag::note_odr_objc_missing_superclass);
- if (D->getSuperClass())
- Importer.FromDiag(D->getSuperClassLoc(),
- diag::note_odr_objc_superclass)
- << D->getSuperClass()->getDeclName();
- else
- Importer.FromDiag(D->getLocation(),
- diag::note_odr_objc_missing_superclass);
- return 0;
- }
+ if (!ToIface) {
+ ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0,Loc,
+ D->isImplicitInterfaceDecl());
+ ToIface->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToIface);
}
+ Importer.Imported(D, ToIface);
- // Import categories. When the categories themselves are imported, they'll
- // hook themselves into this interface.
- for (ObjCCategoryDecl *FromCat = D->getCategoryList(); FromCat;
- FromCat = FromCat->getNextClassCategory())
- Importer.Import(FromCat);
-
- // Import all of the members of this class.
- ImportDeclContext(D);
-
- // If we have an @implementation, import it as well.
- if (D->getImplementation()) {
- ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
- Importer.Import(D->getImplementation()));
- if (!Impl)
- return 0;
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
+ return 0;
- ToIface->setImplementation(Impl);
- }
-
return ToIface;
}
@@ -3278,11 +3402,13 @@ Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
if (!DC)
return 0;
+ SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
Importer.Import(D->getIdentifier()),
Category->getClassInterface(),
Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()));
+ Importer.Import(D->getAtStartLoc()),
+ CategoryNameLoc);
DeclContext *LexicalDC = DC;
if (D->getDeclContext() != D->getLexicalDeclContext()) {
@@ -3293,7 +3419,7 @@ Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
ToImpl->setLexicalDeclContext(LexicalDC);
}
- LexicalDC->addDecl(ToImpl);
+ LexicalDC->addDeclInternal(ToImpl);
Category->setImplementation(ToImpl);
}
@@ -3326,7 +3452,9 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Importer.ImportContext(D->getDeclContext()),
Iface, Super,
Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()));
+ Importer.Import(D->getAtStartLoc()),
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
if (D->getDeclContext() != D->getLexicalDeclContext()) {
DeclContext *LexicalDC
@@ -3346,7 +3474,7 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
if ((Super && !Impl->getSuperClass()) ||
(!Super && Impl->getSuperClass()) ||
(Super && Impl->getSuperClass() &&
- Super->getCanonicalDecl() != Impl->getSuperClass())) {
+ !declaresSameEntity(Super->getCanonicalDecl(), Impl->getSuperClass()))) {
Importer.ToDiag(Impl->getLocation(),
diag::err_odr_objc_superclass_inconsistent)
<< Iface->getDeclName();
@@ -3418,11 +3546,12 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
= ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
Name.getAsIdentifierInfo(),
Importer.Import(D->getAtLoc()),
+ Importer.Import(D->getLParenLoc()),
T,
D->getPropertyImplementation());
Importer.Imported(D, ToProperty);
ToProperty->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToProperty);
+ LexicalDC->addDeclInternal(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
ToProperty->setPropertyAttributesAsWritten(
@@ -3481,7 +3610,7 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
Importer.Import(D->getPropertyIvarDeclLoc()));
ToImpl->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToImpl);
- LexicalDC->addDecl(ToImpl);
+ LexicalDC->addDeclInternal(ToImpl);
} else {
// Check that we have the same kind of property implementation (@synthesize
// vs. @dynamic).
@@ -3519,79 +3648,6 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
return ToImpl;
}
-Decl *
-ASTNodeImporter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
- // Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return 0;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return 0;
- }
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- SmallVector<ObjCProtocolDecl *, 4> Protocols;
- SmallVector<SourceLocation, 4> Locations;
- ObjCForwardProtocolDecl::protocol_loc_iterator FromProtoLoc
- = D->protocol_loc_begin();
- for (ObjCForwardProtocolDecl::protocol_iterator FromProto
- = D->protocol_begin(), FromProtoEnd = D->protocol_end();
- FromProto != FromProtoEnd;
- ++FromProto, ++FromProtoLoc) {
- ObjCProtocolDecl *ToProto
- = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- continue;
-
- Protocols.push_back(ToProto);
- Locations.push_back(Importer.Import(*FromProtoLoc));
- }
-
- ObjCForwardProtocolDecl *ToForward
- = ObjCForwardProtocolDecl::Create(Importer.getToContext(), DC, Loc,
- Protocols.data(), Protocols.size(),
- Locations.data());
- ToForward->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToForward);
- Importer.Imported(D, ToForward);
- return ToForward;
-}
-
-Decl *ASTNodeImporter::VisitObjCClassDecl(ObjCClassDecl *D) {
- // Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return 0;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return 0;
- }
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
- ObjCClassDecl::ObjCClassRef *From = D->getForwardDecl();
- ObjCInterfaceDecl *ToIface
- = cast_or_null<ObjCInterfaceDecl>(Importer.Import(From->getInterface()));
- ObjCClassDecl *ToClass = ObjCClassDecl::Create(Importer.getToContext(), DC,
- Loc,
- ToIface,
- Importer.Import(From->getLocation()));
-
- ToClass->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(ToClass);
- Importer.Imported(D, ToClass);
- return ToClass;
-}
-
Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// For template arguments, we adopt the translation unit as our declaration
// context. This context will be fixed when the actual template declaration
@@ -3749,7 +3805,7 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(D2);
+ LexicalDC->addDeclInternal(D2);
// Note the relationship between the class templates.
Importer.Imported(D, D2);
@@ -3844,7 +3900,7 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Add the specialization to this context.
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDecl(D2);
+ LexicalDC->addDeclInternal(D2);
}
Importer.Imported(D, D2);
@@ -3891,7 +3947,9 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()),
ToD,
+ E->refersToEnclosingLocal(),
Importer.Import(E->getLocation()),
T, E->getValueKind(),
FoundD,
@@ -4116,7 +4174,7 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
return 0;
return ToContext.getTrivialTypeSourceInfo(T,
- FromTSI->getTypeLoc().getSourceRange().getBegin());
+ FromTSI->getTypeLoc().getLocStart());
}
Decl *ASTImporter::Import(Decl *FromD) {
@@ -4170,7 +4228,55 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
- return cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ if (!ToDC)
+ return 0;
+
+ // When we're using a record/enum/Objective-C class/protocol as a context, we
+ // need it to have a definition.
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
+ RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
+ if (ToRecord->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromRecord->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToRecord);
+ }
+ } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
+ EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
+ if (ToEnum->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromEnum->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToEnum);
+ }
+ } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
+ ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
+ if (ToClass->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToClass);
+ }
+ } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
+ ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
+ if (ToProto->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToProto);
+ }
+ }
+
+ return ToDC;
}
Expr *ASTImporter::Import(Expr *FromE) {
@@ -4244,7 +4350,6 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
}
llvm_unreachable("Invalid nested name specifier kind");
- return 0;
}
NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
@@ -4339,7 +4444,6 @@ TemplateName ASTImporter::Import(TemplateName From) {
}
llvm_unreachable("Invalid template name kind");
- return TemplateName();
}
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
@@ -4413,7 +4517,7 @@ void ASTImporter::ImportDefinition(Decl *From) {
if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
if (!ToRecord->getDefinition()) {
Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
- /*ForceImport=*/true);
+ ASTNodeImporter::IDK_Everything);
return;
}
}
@@ -4421,11 +4525,27 @@ void ASTImporter::ImportDefinition(Decl *From) {
if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
if (!ToEnum->getDefinition()) {
Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
- /*ForceImport=*/true);
+ ASTNodeImporter::IDK_Everything);
return;
}
}
+
+ if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (!ToIFace->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+ if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (!ToProto->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
Importer.ImportDeclContext(FromDC, true);
}
}
@@ -4483,8 +4603,7 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
return DeclarationName::getUsingDirectiveName();
}
- // Silence bogus GCC warning
- return DeclarationName();
+ llvm_unreachable("Invalid DeclarationName Kind!");
}
IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
@@ -4521,6 +4640,26 @@ DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
return FromContext.getDiagnostics().Report(Loc, DiagID);
}
+void ASTImporter::CompleteDecl (Decl *D) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (!ID->getDefinition())
+ ID->startDefinition();
+ }
+ else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (!PD->getDefinition())
+ PD->startDefinition();
+ }
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (!TD->getDefinition() && !TD->isBeingDefined()) {
+ TD->startDefinition();
+ TD->setCompleteDefinition(true);
+ }
+ }
+ else {
+ assert (0 && "CompleteDecl called on a Decl that can't be completed");
+ }
+}
+
Decl *ASTImporter::Imported(Decl *From, Decl *To) {
ImportedDecls[From] = To;
return To;
diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
index 3ca7d4d..cffcc65 100644
--- a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
@@ -19,4 +19,8 @@ using namespace clang;
Attr::~Attr() { }
+void InheritableAttr::anchor() { }
+
+void InheritableParamAttr::anchor() { }
+
#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
index f29bfd1..2186730 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -23,12 +23,15 @@ using namespace clang;
void CXXBasePaths::ComputeDeclsFound() {
assert(NumDeclsFound == 0 && !DeclsFound &&
"Already computed the set of declarations");
-
- std::set<NamedDecl *> Decls;
- for (CXXBasePaths::paths_iterator Path = begin(), PathEnd = end();
- Path != PathEnd; ++Path)
- Decls.insert(*Path->Decls.first);
-
+
+ SmallVector<NamedDecl *, 8> Decls;
+ for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
+ Decls.push_back(*Path->Decls.first);
+
+ // Eliminate duplicated decls.
+ llvm::array_pod_sort(Decls.begin(), Decls.end());
+ Decls.erase(std::unique(Decls.begin(), Decls.end()), Decls.end());
+
NumDeclsFound = Decls.size();
DeclsFound = new NamedDecl * [NumDeclsFound];
std::copy(Decls.begin(), Decls.end(), DeclsFound);
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index 95d52cb..399f2e4 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
@@ -47,8 +48,6 @@ static llvm::Optional<Visibility> getVisibilityOf(const Decl *D) {
case VisibilityAttr::Protected:
return ProtectedVisibility;
}
-
- return DefaultVisibility;
}
// If we're on Mac OS X, an 'availability' for Mac OS X attribute
@@ -66,17 +65,6 @@ static llvm::Optional<Visibility> getVisibilityOf(const Decl *D) {
}
typedef NamedDecl::LinkageInfo LinkageInfo;
-typedef std::pair<Linkage,Visibility> LVPair;
-
-static LVPair merge(LVPair L, LVPair R) {
- return LVPair(minLinkage(L.first, R.first),
- minVisibility(L.second, R.second));
-}
-
-static LVPair merge(LVPair L, LinkageInfo R) {
- return LVPair(minLinkage(L.first, R.linkage()),
- minVisibility(L.second, R.visibility()));
-}
namespace {
/// Flags controlling the computation of linkage and visibility.
@@ -112,11 +100,16 @@ struct LVFlags {
};
} // end anonymous namespace
+static LinkageInfo getLVForType(QualType T) {
+ std::pair<Linkage,Visibility> P = T->getLinkageAndVisibility();
+ return LinkageInfo(P.first, P.second, T->isVisibilityExplicit());
+}
+
/// \brief Get the most restrictive linkage for the types in the given
/// template parameter list.
-static LVPair
+static LinkageInfo
getLVForTemplateParameterList(const TemplateParameterList *Params) {
- LVPair LV(ExternalLinkage, DefaultVisibility);
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
@@ -125,20 +118,20 @@ getLVForTemplateParameterList(const TemplateParameterList *Params) {
for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
QualType T = NTTP->getExpansionType(I);
if (!T->isDependentType())
- LV = merge(LV, T->getLinkageAndVisibility());
+ LV.merge(getLVForType(T));
}
continue;
}
-
+
if (!NTTP->getType()->isDependentType()) {
- LV = merge(LV, NTTP->getType()->getLinkageAndVisibility());
+ LV.merge(getLVForType(NTTP->getType()));
continue;
}
}
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(*P)) {
- LV = merge(LV, getLVForTemplateParameterList(TTP->getTemplateParameters()));
+ LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters()));
}
}
@@ -150,10 +143,10 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags F);
/// \brief Get the most restrictive linkage for the types and
/// declarations in the given template argument list.
-static LVPair getLVForTemplateArgumentList(const TemplateArgument *Args,
- unsigned NumArgs,
- LVFlags &F) {
- LVPair LV(ExternalLinkage, DefaultVisibility);
+static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs,
+ LVFlags &F) {
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
for (unsigned I = 0; I != NumArgs; ++I) {
switch (Args[I].getKind()) {
@@ -161,9 +154,9 @@ static LVPair getLVForTemplateArgumentList(const TemplateArgument *Args,
case TemplateArgument::Integral:
case TemplateArgument::Expression:
break;
-
+
case TemplateArgument::Type:
- LV = merge(LV, Args[I].getAsType()->getLinkageAndVisibility());
+ LV.merge(getLVForType(Args[I].getAsType()));
break;
case TemplateArgument::Declaration:
@@ -177,15 +170,15 @@ static LVPair getLVForTemplateArgumentList(const TemplateArgument *Args,
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- if (TemplateDecl *Template
+ if (TemplateDecl *Template
= Args[I].getAsTemplateOrTemplatePattern().getAsTemplateDecl())
- LV = merge(LV, getLVForDecl(Template, F));
+ LV.merge(getLVForDecl(Template, F));
break;
case TemplateArgument::Pack:
- LV = merge(LV, getLVForTemplateArgumentList(Args[I].pack_begin(),
- Args[I].pack_size(),
- F));
+ LV.mergeWithMin(getLVForTemplateArgumentList(Args[I].pack_begin(),
+ Args[I].pack_size(),
+ F));
break;
}
}
@@ -193,7 +186,7 @@ static LVPair getLVForTemplateArgumentList(const TemplateArgument *Args,
return LV;
}
-static LVPair
+static LinkageInfo
getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
LVFlags &F) {
return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
@@ -229,14 +222,14 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// and neither explicitly declared extern nor previously
// declared to have external linkage; or
// (there is no equivalent in C99)
- if (Context.getLangOptions().CPlusPlus &&
+ if (Context.getLangOpts().CPlusPlus &&
Var->getType().isConstant(Context) &&
Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern) {
bool FoundExtern = false;
- for (const VarDecl *PrevVar = Var->getPreviousDeclaration();
+ for (const VarDecl *PrevVar = Var->getPreviousDecl();
PrevVar && !FoundExtern;
- PrevVar = PrevVar->getPreviousDeclaration())
+ PrevVar = PrevVar->getPreviousDecl())
if (isExternalLinkage(PrevVar->getLinkage()))
FoundExtern = true;
@@ -244,8 +237,8 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
return LinkageInfo::internal();
}
if (Var->getStorageClass() == SC_None) {
- const VarDecl *PrevVar = Var->getPreviousDeclaration();
- for (; PrevVar; PrevVar = PrevVar->getPreviousDeclaration())
+ const VarDecl *PrevVar = Var->getPreviousDecl();
+ for (; PrevVar; PrevVar = PrevVar->getPreviousDecl())
if (PrevVar->getStorageClass() == SC_PrivateExtern)
break;
if (PrevVar)
@@ -274,7 +267,8 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
if (D->isInAnonymousNamespace()) {
const VarDecl *Var = dyn_cast<VarDecl>(D);
const FunctionDecl *Func = dyn_cast<FunctionDecl>(D);
- if ((!Var || !Var->isExternC()) && (!Func || !Func->isExternC()))
+ if ((!Var || !Var->getDeclContext()->isExternCContext()) &&
+ (!Func || !Func->getDeclContext()->isExternCContext()))
return LinkageInfo::uniqueExternal();
}
@@ -285,6 +279,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// scope and no storage-class specifier, its linkage is
// external.
LinkageInfo LV;
+ LV.mergeVisibility(Context.getLangOpts().getVisibilityMode());
if (F.ConsiderVisibilityAttributes) {
if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
@@ -296,10 +291,10 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
for (const DeclContext *DC = D->getDeclContext();
!isa<TranslationUnitDecl>(DC);
DC = DC->getParent()) {
- if (!isa<NamespaceDecl>(DC)) continue;
- if (llvm::Optional<Visibility> Vis
- = cast<NamespaceDecl>(DC)->getExplicitVisibility()) {
- LV.setVisibility(*Vis, false);
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND) continue;
+ if (llvm::Optional<Visibility> Vis = ND->getExplicitVisibility()) {
+ LV.setVisibility(*Vis, true);
F.ConsiderGlobalVisibility = false;
break;
}
@@ -335,18 +330,19 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
//
// Note that we don't want to make the variable non-external
// because of this, but unique-external linkage suits us.
- if (Context.getLangOptions().CPlusPlus && !Var->isExternC()) {
- LVPair TypeLV = Var->getType()->getLinkageAndVisibility();
- if (TypeLV.first != ExternalLinkage)
+ if (Context.getLangOpts().CPlusPlus &&
+ !Var->getDeclContext()->isExternCContext()) {
+ LinkageInfo TypeLV = getLVForType(Var->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
return LinkageInfo::uniqueExternal();
- if (!LV.visibilityExplicit())
- LV.mergeVisibility(TypeLV.second);
+ LV.mergeVisibilityWithMin(TypeLV.visibility(),
+ TypeLV.visibilityExplicit());
}
if (Var->getStorageClass() == SC_PrivateExtern)
LV.setVisibility(HiddenVisibility, true);
- if (!Context.getLangOptions().CPlusPlus &&
+ if (!Context.getLangOpts().CPlusPlus &&
(Var->getStorageClass() == SC_Extern ||
Var->getStorageClass() == SC_PrivateExtern)) {
@@ -359,7 +355,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// specified at the prior declaration. If no prior declaration
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
- if (const VarDecl *PrevVar = Var->getPreviousDeclaration()) {
+ if (const VarDecl *PrevVar = Var->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(PrevVar, F);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
@@ -381,7 +377,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// storage-class specifier, its linkage is determined exactly
// as if it were declared with the storage-class specifier
// extern.
- if (!Context.getLangOptions().CPlusPlus &&
+ if (!Context.getLangOpts().CPlusPlus &&
(Function->getStorageClass() == SC_Extern ||
Function->getStorageClass() == SC_PrivateExtern ||
Function->getStorageClass() == SC_None)) {
@@ -394,7 +390,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// specified at the prior declaration. If no prior declaration
// is visible, or if the prior declaration specifies no
// linkage, then the identifier has external linkage.
- if (const FunctionDecl *PrevFunc = Function->getPreviousDeclaration()) {
+ if (const FunctionDecl *PrevFunc = Function->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(PrevFunc, F);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
@@ -405,7 +401,8 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// unique-external linkage, it's not legally usable from outside
// this translation unit. However, we should use the C linkage
// rules instead for extern "C" declarations.
- if (Context.getLangOptions().CPlusPlus && !Function->isExternC() &&
+ if (Context.getLangOpts().CPlusPlus &&
+ !Function->getDeclContext()->isExternCContext() &&
Function->getType()->getLinkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
@@ -417,7 +414,7 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
LV.merge(getLVForDecl(specInfo->getTemplate(),
F.onlyTemplateVisibility()));
const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
- LV.merge(getLVForTemplateArgumentList(templateArgs, F));
+ LV.mergeWithMin(getLVForTemplateArgumentList(templateArgs, F));
}
}
@@ -443,14 +440,14 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// The arguments at which the template was instantiated.
const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
- LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
+ LV.mergeWithMin(getLVForTemplateArgumentList(TemplateArgs, F));
}
}
// Consider -fvisibility unless the type has C linkage.
if (F.ConsiderGlobalVisibility)
F.ConsiderGlobalVisibility =
- (Context.getLangOptions().CPlusPlus &&
+ (Context.getLangOpts().CPlusPlus &&
!Tag->getDeclContext()->isExternCContext());
// - an enumerator belonging to an enumeration with external linkage;
@@ -486,11 +483,6 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
if (LV.linkage() != ExternalLinkage)
return LinkageInfo(LV.linkage(), DefaultVisibility, false);
- // If we didn't end up with hidden visibility, consider attributes
- // and -fvisibility.
- if (F.ConsiderGlobalVisibility)
- LV.mergeVisibility(Context.getLangOptions().getVisibilityMode());
-
return LV;
}
@@ -507,6 +499,7 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
return LinkageInfo::none();
LinkageInfo LV;
+ LV.mergeVisibility(D->getASTContext().getLangOpts().getVisibilityMode());
// The flags we're going to use to compute the class's visibility.
LVFlags ClassF = F;
@@ -548,7 +541,8 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
if (FunctionTemplateSpecializationInfo *spec
= MD->getTemplateSpecializationInfo()) {
if (shouldConsiderTemplateLV(MD, spec)) {
- LV.merge(getLVForTemplateArgumentList(*spec->TemplateArguments, F));
+ LV.mergeWithMin(getLVForTemplateArgumentList(*spec->TemplateArguments,
+ F));
if (F.ConsiderTemplateParameterTypes)
LV.merge(getLVForTemplateParameterList(
spec->getTemplate()->getTemplateParameters()));
@@ -567,8 +561,9 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
// about whether containing classes have visibility attributes,
// and that's intentional.
if (TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition &&
F.ConsiderGlobalVisibility &&
- MD->getASTContext().getLangOptions().InlineVisibilityHidden) {
+ MD->getASTContext().getLangOpts().InlineVisibilityHidden) {
// InlineVisibilityHidden only applies to definitions, and
// isInlined() only gives meaningful answers on definitions
// anyway.
@@ -586,7 +581,8 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
if (shouldConsiderTemplateLV(spec)) {
// Merge template argument/parameter information for member
// class template specializations.
- LV.merge(getLVForTemplateArgumentList(spec->getTemplateArgs(), F));
+ LV.mergeWithMin(getLVForTemplateArgumentList(spec->getTemplateArgs(),
+ F));
if (F.ConsiderTemplateParameterTypes)
LV.merge(getLVForTemplateParameterList(
spec->getSpecializedTemplate()->getTemplateParameters()));
@@ -597,18 +593,11 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
} else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// Modify the variable's linkage by its type, but ignore the
// type's visibility unless it's a definition.
- LVPair TypeLV = VD->getType()->getLinkageAndVisibility();
- if (TypeLV.first != ExternalLinkage)
+ LinkageInfo TypeLV = getLVForType(VD->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
LV.mergeLinkage(UniqueExternalLinkage);
if (!LV.visibilityExplicit())
- LV.mergeVisibility(TypeLV.second);
- }
-
- F.ConsiderGlobalVisibility &= !LV.visibilityExplicit();
-
- // Apply -fvisibility if desired.
- if (F.ConsiderGlobalVisibility && LV.visibility() != HiddenVisibility) {
- LV.mergeVisibility(D->getASTContext().getLangOptions().getVisibilityMode());
+ LV.mergeVisibility(TypeLV.visibility(), TypeLV.visibilityExplicit());
}
return LV;
@@ -623,6 +612,8 @@ static void clearLinkageForClass(const CXXRecordDecl *record) {
}
}
+void NamedDecl::anchor() { }
+
void NamedDecl::ClearLinkageCache() {
// Note that we can't skip clearing the linkage of children just
// because the parent doesn't have cached linkage: we don't cache
@@ -683,13 +674,13 @@ LinkageInfo NamedDecl::getLinkageAndVisibility() const {
llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
// Use the most recent declaration of a variable.
if (const VarDecl *var = dyn_cast<VarDecl>(this))
- return getVisibilityOf(var->getMostRecentDeclaration());
+ return getVisibilityOf(var->getMostRecentDecl());
// Use the most recent declaration of a function, and also handle
// function template specializations.
if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(this)) {
if (llvm::Optional<Visibility> V
- = getVisibilityOf(fn->getMostRecentDeclaration()))
+ = getVisibilityOf(fn->getMostRecentDecl()))
return V;
// If the function is a specialization of a template with an
@@ -698,6 +689,12 @@ llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
= fn->getTemplateSpecializationInfo())
return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl());
+ // If the function is a member of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+
return llvm::Optional<Visibility>();
}
@@ -712,6 +709,14 @@ llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
= dyn_cast<ClassTemplateSpecializationDecl>(this))
return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl());
+ // If this is a member class of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(this)) {
+ CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+ }
+
return llvm::Optional<Visibility>();
}
@@ -721,19 +726,46 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
switch (D->getKind()) {
default:
break;
+ case Decl::ParmVar:
+ return LinkageInfo::none();
case Decl::TemplateTemplateParm: // count these as external
case Decl::NonTypeTemplateParm:
case Decl::ObjCAtDefsField:
case Decl::ObjCCategory:
case Decl::ObjCCategoryImpl:
case Decl::ObjCCompatibleAlias:
- case Decl::ObjCForwardProtocol:
case Decl::ObjCImplementation:
case Decl::ObjCMethod:
case Decl::ObjCProperty:
case Decl::ObjCPropertyImpl:
case Decl::ObjCProtocol:
return LinkageInfo::external();
+
+ case Decl::CXXRecord: {
+ const CXXRecordDecl *Record = cast<CXXRecordDecl>(D);
+ if (Record->isLambda()) {
+ if (!Record->getLambdaManglingNumber()) {
+ // This lambda has no mangling number, so it's internal.
+ return LinkageInfo::internal();
+ }
+
+ // This lambda has its linkage/visibility determined by its owner.
+ const DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (Decl *ContextDecl = Record->getLambdaContextDecl()) {
+ if (isa<ParmVarDecl>(ContextDecl))
+ DC = ContextDecl->getDeclContext()->getRedeclContext();
+ else
+ return getLVForDecl(cast<NamedDecl>(ContextDecl), Flags);
+ }
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
+ return getLVForDecl(ND, Flags);
+
+ return LinkageInfo::external();
+ }
+
+ break;
+ }
}
// Handle linkage for namespace-scope names.
@@ -763,7 +795,8 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
// external linkage.
if (D->getLexicalDeclContext()->isFunctionOrMethod()) {
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
- if (Function->isInAnonymousNamespace() && !Function->isExternC())
+ if (Function->isInAnonymousNamespace() &&
+ !Function->getDeclContext()->isExternCContext())
return LinkageInfo::uniqueExternal();
LinkageInfo LV;
@@ -772,7 +805,7 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
LV.setVisibility(*Vis);
}
- if (const FunctionDecl *Prev = Function->getPreviousDeclaration()) {
+ if (const FunctionDecl *Prev = Function->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
@@ -784,7 +817,8 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
if (const VarDecl *Var = dyn_cast<VarDecl>(D))
if (Var->getStorageClass() == SC_Extern ||
Var->getStorageClass() == SC_PrivateExtern) {
- if (Var->isInAnonymousNamespace() && !Var->isExternC())
+ if (Var->isInAnonymousNamespace() &&
+ !Var->getDeclContext()->isExternCContext())
return LinkageInfo::uniqueExternal();
LinkageInfo LV;
@@ -795,7 +829,7 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
LV.setVisibility(*Vis);
}
- if (const VarDecl *Prev = Var->getPreviousDeclaration()) {
+ if (const VarDecl *Prev = Var->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
LV.mergeVisibility(PrevLV);
@@ -811,7 +845,7 @@ static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
}
std::string NamedDecl::getQualifiedNameAsString() const {
- return getQualifiedNameAsString(getASTContext().getLangOptions());
+ return getQualifiedNameAsString(getASTContext().getPrintingPolicy());
}
std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
@@ -904,7 +938,7 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
// For function declarations, we keep track of redeclarations.
- return FD->getPreviousDeclaration() == OldD;
+ return FD->getPreviousDecl() == OldD;
// For function templates, the underlying function declarations are linked.
if (const FunctionTemplateDecl *FunctionTemplate
@@ -933,6 +967,12 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
cast<UsingDecl>(OldD)->getQualifier());
}
+ // A typedef of an Objective-C class type can replace an Objective-C class
+ // declaration or definition, and vice versa.
+ if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) ||
+ (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD)))
+ return true;
+
// For non-function declarations, if the declarations are of the
// same kind then this must be a redeclaration, or semantic analysis
// would not have given us the new declaration.
@@ -943,23 +983,21 @@ bool NamedDecl::hasLinkage() const {
return getLinkage() != NoLinkage;
}
-NamedDecl *NamedDecl::getUnderlyingDecl() {
+NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
NamedDecl *ND = this;
- while (true) {
- if (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
- ND = UD->getTargetDecl();
- else if (ObjCCompatibleAliasDecl *AD
- = dyn_cast<ObjCCompatibleAliasDecl>(ND))
- return AD->getClassInterface();
- else
- return ND;
- }
+ while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
+ ND = UD->getTargetDecl();
+
+ if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
+ return AD->getClassInterface();
+
+ return ND;
}
bool NamedDecl::isCXXInstanceMember() const {
- assert(isCXXClassMember() &&
- "checking whether non-member is instance member");
-
+ if (!isCXXClassMember())
+ return false;
+
const NamedDecl *D = this;
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
@@ -1131,7 +1169,6 @@ const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
}
llvm_unreachable("Invalid storage class");
- return 0;
}
VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
@@ -1141,6 +1178,12 @@ VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S, SCAsWritten);
}
+VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl));
+ return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, SC_None, SC_None);
+}
+
void VarDecl::setStorageClass(StorageClass SC) {
assert(isLegalForVariable(SC));
if (getStorageClass() != SC)
@@ -1156,34 +1199,26 @@ SourceRange VarDecl::getSourceRange() const {
}
bool VarDecl::isExternC() const {
- ASTContext &Context = getASTContext();
- if (!Context.getLangOptions().CPlusPlus)
- return (getDeclContext()->isTranslationUnit() &&
- getStorageClass() != SC_Static) ||
- (getDeclContext()->isFunctionOrMethod() && hasExternalStorage());
+ if (getLinkage() != ExternalLinkage)
+ return false;
const DeclContext *DC = getDeclContext();
- if (DC->isFunctionOrMethod())
+ if (DC->isRecord())
return false;
- for (; !DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC)) {
- if (Linkage->getLanguage() == LinkageSpecDecl::lang_c)
- return getStorageClass() != SC_Static;
-
- break;
- }
-
- }
-
- return false;
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+ return DC->isExternCContext();
}
VarDecl *VarDecl::getCanonicalDecl() {
return getFirstDeclaration();
}
-VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const {
+VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition(
+ ASTContext &C) const
+{
// C++ [basic.def]p2:
// A declaration is a definition unless [...] it contains the 'extern'
// specifier or a linkage-specification and neither an initializer [...],
@@ -1214,8 +1249,8 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const {
if (getStorageClassAsWritten() == SC_Extern ||
getStorageClassAsWritten() == SC_PrivateExtern) {
- for (const VarDecl *PrevVar = getPreviousDeclaration();
- PrevVar; PrevVar = PrevVar->getPreviousDeclaration()) {
+ for (const VarDecl *PrevVar = getPreviousDecl();
+ PrevVar; PrevVar = PrevVar->getPreviousDecl()) {
if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit())
return DeclarationOnly;
}
@@ -1225,7 +1260,7 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const {
// and without a storage class specifier or the scs 'static', constitutes
// a tentative definition.
// No such thing in C++.
- if (!getASTContext().getLangOptions().CPlusPlus && isFileVarDecl())
+ if (!C.getLangOpts().CPlusPlus && isFileVarDecl())
return TentativeDefinition;
// What's left is (in C, block-scope) declarations without initializers or
@@ -1263,23 +1298,26 @@ bool VarDecl::isTentativeDefinitionNow() const {
return true;
}
-VarDecl *VarDecl::getDefinition() {
+VarDecl *VarDecl::getDefinition(ASTContext &C) {
VarDecl *First = getFirstDeclaration();
for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
I != E; ++I) {
- if ((*I)->isThisDeclarationADefinition() == Definition)
+ if ((*I)->isThisDeclarationADefinition(C) == Definition)
return *I;
}
return 0;
}
-VarDecl::DefinitionKind VarDecl::hasDefinition() const {
+VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
DefinitionKind Kind = DeclarationOnly;
const VarDecl *First = getFirstDeclaration();
for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
- I != E; ++I)
- Kind = std::max(Kind, (*I)->isThisDeclarationADefinition());
+ I != E; ++I) {
+ Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C));
+ if (Kind == Definition)
+ break;
+ }
return Kind;
}
@@ -1334,6 +1372,130 @@ void VarDecl::setInit(Expr *I) {
Init = I;
}
+bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
+ const LangOptions &Lang = C.getLangOpts();
+
+ if (!Lang.CPlusPlus)
+ return false;
+
+ // In C++11, any variable of reference type can be used in a constant
+ // expression if it is initialized by a constant expression.
+ if (Lang.CPlusPlus0x && getType()->isReferenceType())
+ return true;
+
+ // Only const objects can be used in constant expressions in C++. C++98 does
+ // not require the variable to be non-volatile, but we consider this to be a
+ // defect.
+ if (!getType().isConstQualified() || getType().isVolatileQualified())
+ return false;
+
+ // In C++, const, non-volatile variables of integral or enumeration types
+ // can be used in constant expressions.
+ if (getType()->isIntegralOrEnumerationType())
+ return true;
+
+ // Additionally, in C++11, non-volatile constexpr variables can be used in
+ // constant expressions.
+ return Lang.CPlusPlus0x && isConstexpr();
+}
+
+/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
+/// form, which contains extra information on the evaluated value of the
+/// initializer.
+EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
+ EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
+ if (!Eval) {
+ Stmt *S = Init.get<Stmt *>();
+ Eval = new (getASTContext()) EvaluatedStmt;
+ Eval->Value = S;
+ Init = Eval;
+ }
+ return Eval;
+}
+
+APValue *VarDecl::evaluateValue() const {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ return evaluateValue(Notes);
+}
+
+APValue *VarDecl::evaluateValue(
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+
+ // We only produce notes indicating why an initializer is non-constant the
+ // first time it is evaluated. FIXME: The notes won't always be emitted the
+ // first time we try evaluation, so might not be produced at all.
+ if (Eval->WasEvaluated)
+ return Eval->Evaluated.isUninit() ? 0 : &Eval->Evaluated;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ if (Eval->IsEvaluating) {
+ // FIXME: Produce a diagnostic for self-initialization.
+ Eval->CheckedICE = true;
+ Eval->IsICE = false;
+ return 0;
+ }
+
+ Eval->IsEvaluating = true;
+
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
+ this, Notes);
+
+ // Ensure the result is an uninitialized APValue if evaluation fails.
+ if (!Result)
+ Eval->Evaluated = APValue();
+
+ Eval->IsEvaluating = false;
+ Eval->WasEvaluated = true;
+
+ // In C++11, we have determined whether the initializer was a constant
+ // expression as a side-effect.
+ if (getASTContext().getLangOpts().CPlusPlus0x && !Eval->CheckedICE) {
+ Eval->CheckedICE = true;
+ Eval->IsICE = Result && Notes.empty();
+ }
+
+ return Result ? &Eval->Evaluated : 0;
+}
+
+bool VarDecl::checkInitIsICE() const {
+ // Initializers of weak variables are never ICEs.
+ if (isWeak())
+ return false;
+
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ if (Eval->CheckedICE)
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ return Eval->IsICE;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ // In C++11, evaluate the initializer to check whether it's a constant
+ // expression.
+ if (getASTContext().getLangOpts().CPlusPlus0x) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ evaluateValue(Notes);
+ return Eval->IsICE;
+ }
+
+ // It's an ICE whether or not the definition we found is
+ // out-of-line. See DR 721 and the discussion in Clang PR
+ // 6206 for details.
+
+ if (Eval->CheckingICE)
+ return false;
+ Eval->CheckingICE = true;
+
+ Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
+ Eval->CheckingICE = false;
+ Eval->CheckedICE = true;
+ return Eval->IsICE;
+}
+
bool VarDecl::extendsLifetimeOfTemporary() const {
assert(getType()->isReferenceType() &&"Non-references never extend lifetime");
@@ -1390,6 +1552,12 @@ ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
S, SCAsWritten, DefArg);
}
+ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl));
+ return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, SC_None, SC_None, 0);
+}
+
SourceRange ParmVarDecl::getSourceRange() const {
if (!hasInheritedDefaultArg()) {
SourceRange ArgRange = getDefaultArgRange();
@@ -1412,21 +1580,6 @@ Expr *ParmVarDecl::getDefaultArg() {
return Arg;
}
-unsigned ParmVarDecl::getNumDefaultArgTemporaries() const {
- if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(getInit()))
- return E->getNumTemporaries();
-
- return 0;
-}
-
-CXXTemporary *ParmVarDecl::getDefaultArgTemporary(unsigned i) {
- assert(getNumDefaultArgTemporaries() &&
- "Default arguments does not have any temporaries!");
-
- ExprWithCleanups *E = cast<ExprWithCleanups>(getInit());
- return E->getTemporary(i);
-}
-
SourceRange ParmVarDecl::getDefaultArgRange() const {
if (const Expr *E = getInit())
return E->getSourceRange();
@@ -1540,7 +1693,7 @@ bool FunctionDecl::isMain() const {
const TranslationUnitDecl *tunit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
return tunit &&
- !tunit->getASTContext().getLangOptions().Freestanding &&
+ !tunit->getASTContext().getLangOpts().Freestanding &&
getIdentifier() &&
getIdentifier()->isStr("main");
}
@@ -1568,27 +1721,21 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
}
bool FunctionDecl::isExternC() const {
- ASTContext &Context = getASTContext();
- // In C, any non-static, non-overloadable function has external
- // linkage.
- if (!Context.getLangOptions().CPlusPlus)
- return getStorageClass() != SC_Static && !getAttr<OverloadableAttr>();
+ if (getLinkage() != ExternalLinkage)
+ return false;
+
+ if (getAttr<OverloadableAttr>())
+ return false;
const DeclContext *DC = getDeclContext();
if (DC->isRecord())
return false;
- for (; !DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC)) {
- if (Linkage->getLanguage() == LinkageSpecDecl::lang_c)
- return getStorageClass() != SC_Static &&
- !getAttr<OverloadableAttr>();
-
- break;
- }
- }
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
- return isMain();
+ return isMain() || DC->isExternCContext();
}
bool FunctionDecl::isGlobal() const {
@@ -1622,7 +1769,7 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
FunTmpl->setPreviousDeclaration(PrevFunTmpl);
}
- if (PrevDecl->IsInline)
+ if (PrevDecl && PrevDecl->IsInline)
IsInline = true;
}
@@ -1652,11 +1799,14 @@ void FunctionDecl::setStorageClass(StorageClass SC) {
/// value of type \c Builtin::ID if in the target-independent range
/// \c [1,Builtin::First), or a target-specific builtin value.
unsigned FunctionDecl::getBuiltinID() const {
- ASTContext &Context = getASTContext();
- if (!getIdentifier() || !getIdentifier()->getBuiltinID())
+ if (!getIdentifier())
return 0;
unsigned BuiltinID = getIdentifier()->getBuiltinID();
+ if (!BuiltinID)
+ return 0;
+
+ ASTContext &Context = getASTContext();
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return BuiltinID;
@@ -1670,7 +1820,7 @@ unsigned FunctionDecl::getBuiltinID() const {
// If this function is at translation-unit scope and we're not in
// C++, it refers to the C library function.
- if (!Context.getLangOptions().CPlusPlus &&
+ if (!Context.getLangOpts().CPlusPlus &&
getDeclContext()->isTranslationUnit())
return BuiltinID;
@@ -1710,12 +1860,22 @@ void FunctionDecl::setParams(ASTContext &C,
}
}
+void FunctionDecl::setDeclsInPrototypeScope(llvm::ArrayRef<NamedDecl *> NewDecls) {
+ assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
+
+ if (!NewDecls.empty()) {
+ NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
+ std::copy(NewDecls.begin(), NewDecls.end(), A);
+ DeclsInPrototypeScope = llvm::ArrayRef<NamedDecl*>(A, NewDecls.size());
+ }
+}
+
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
/// arguments (in C++) or the last parameter is a parameter pack.
unsigned FunctionDecl::getMinRequiredArguments() const {
- if (!getASTContext().getLangOptions().CPlusPlus)
+ if (!getASTContext().getLangOpts().CPlusPlus)
return getNumParams();
unsigned NumRequiredArgs = getNumParams();
@@ -1777,31 +1937,79 @@ bool FunctionDecl::isInlined() const {
return false;
}
+static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
+ // Only consider file-scope declarations in this test.
+ if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
+ return false;
+
+ // Only consider explicit declarations; the presence of a builtin for a
+ // libcall shouldn't affect whether a definition is externally visible.
+ if (Redecl->isImplicit())
+ return false;
+
+ if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
+ return true; // Not an inline definition
+
+ return false;
+}
+
/// \brief For a function declaration in C or C++, determine whether this
/// declaration causes the definition to be externally visible.
///
-/// Determines whether this is the first non-inline redeclaration of an inline
-/// function in a language where "inline" does not normally require an
-/// externally visible definition.
+/// Specifically, this determines if adding the current declaration to the set
+/// of redeclarations of the given functions causes
+/// isInlineDefinitionExternallyVisible to change from false to true.
bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
assert(!doesThisDeclarationHaveABody() &&
"Must have a declaration without a body.");
ASTContext &Context = getASTContext();
- // In C99 mode, a function may have an inline definition (causing it to
- // be deferred) then redeclared later. As a special case, "extern inline"
- // is not required to produce an external symbol.
- if (Context.getLangOptions().GNUInline || !Context.getLangOptions().C99 ||
- Context.getLangOptions().CPlusPlus)
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // With GNU inlining, a declaration with 'inline' but not 'extern', forces
+ // an externally visible definition.
+ //
+ // FIXME: What happens if gnu_inline gets added on after the first
+ // declaration?
+ if (!isInlineSpecified() || getStorageClassAsWritten() == SC_Extern)
+ return false;
+
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+
+ if (Prev->Body) {
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then it is always externally visible.
+ if (!Prev->isInlineSpecified() ||
+ Prev->getStorageClassAsWritten() != SC_Extern)
+ return false;
+ } else if (Prev->isInlineSpecified() &&
+ Prev->getStorageClassAsWritten() != SC_Extern) {
+ return false;
+ }
+ }
+ return FoundBody;
+ }
+
+ if (Context.getLangOpts().CPlusPlus)
return false;
- if (getLinkage() != ExternalLinkage || isInlineSpecified())
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ if (isInlineSpecified() && getStorageClass() != SC_Extern)
return false;
- const FunctionDecl *Definition = 0;
- if (hasBody(Definition))
- return Definition->isInlined() &&
- Definition->isInlineDefinitionExternallyVisible();
- return false;
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+ if (RedeclForcesDefC99(Prev))
+ return false;
+ }
+ return FoundBody;
}
/// \brief For an inline function definition in C or C++, determine whether the
@@ -1826,7 +2034,10 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
assert(isInlined() && "Function must be inline");
ASTContext &Context = getASTContext();
- if (Context.getLangOptions().GNUInline || hasAttr<GNUInlineAttr>()) {
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // Note: If you change the logic here, please change
+ // doesDeclarationForceExternallyVisibleDefinition as well.
+ //
// If it's not the case that both 'inline' and 'extern' are
// specified on the definition, then this inline definition is
// externally visible.
@@ -1845,7 +2056,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
return false;
}
-
+
// C99 6.7.4p6:
// [...] If all of the file scope declarations for a function in a
// translation unit include the inline function specifier without extern,
@@ -1853,17 +2064,8 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
Redecl != RedeclEnd;
++Redecl) {
- // Only consider file-scope declarations in this test.
- if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
- continue;
-
- // Only consider explicit declarations; the presence of a builtin for a
- // libcall shouldn't affect whether a definition is externally visible.
- if (Redecl->isImplicit())
- continue;
-
- if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
- return true; // Not an inline definition
+ if (RedeclForcesDefC99(*Redecl))
+ return true;
}
// C99 6.7.4p6:
@@ -1966,7 +2168,20 @@ bool FunctionDecl::isImplicitlyInstantiable() const {
return true;
return PatternDecl->isInlined();
-}
+}
+
+bool FunctionDecl::isTemplateInstantiation() const {
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ }
+ llvm_unreachable("All TSK values handled.");
+}
FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
// Handle class scope explicit specialization special case.
@@ -2040,22 +2255,7 @@ FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
TemplateArgsAsWritten,
PointOfInstantiation);
TemplateOrSpecialization = Info;
-
- // Insert this function template specialization into the set of known
- // function template specializations.
- if (InsertPos)
- Template->addSpecialization(Info, InsertPos);
- else {
- // Try to insert the new node. If there is an existing node, leave it, the
- // set will contain the canonical decls while
- // FunctionTemplateDecl::findSpecialization will return
- // the most recent redeclarations.
- FunctionTemplateSpecializationInfo *Existing
- = Template->getSpecializations().GetOrInsertNode(Info);
- (void)Existing;
- assert((!Existing || Existing->Function->isCanonicalDecl()) &&
- "Set is supposed to only contain canonical decls");
- }
+ Template->addSpecialization(Info, InsertPos);
}
void
@@ -2169,6 +2369,92 @@ SourceRange FunctionDecl::getSourceRange() const {
return SourceRange(getOuterLocStart(), EndRangeLoc);
}
+unsigned FunctionDecl::getMemoryFunctionKind() const {
+ IdentifierInfo *FnInfo = getIdentifier();
+
+ if (!FnInfo)
+ return 0;
+
+ // Builtin handling.
+ switch (getBuiltinID()) {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BImemset:
+ return Builtin::BImemset;
+
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BImemcpy:
+ return Builtin::BImemcpy;
+
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BImemmove:
+ return Builtin::BImemmove;
+
+ case Builtin::BIstrlcpy:
+ return Builtin::BIstrlcpy;
+ case Builtin::BIstrlcat:
+ return Builtin::BIstrlcat;
+
+ case Builtin::BI__builtin_memcmp:
+ case Builtin::BImemcmp:
+ return Builtin::BImemcmp;
+
+ case Builtin::BI__builtin_strncpy:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BIstrncpy:
+ return Builtin::BIstrncpy;
+
+ case Builtin::BI__builtin_strncmp:
+ case Builtin::BIstrncmp:
+ return Builtin::BIstrncmp;
+
+ case Builtin::BI__builtin_strncasecmp:
+ case Builtin::BIstrncasecmp:
+ return Builtin::BIstrncasecmp;
+
+ case Builtin::BI__builtin_strncat:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BIstrncat:
+ return Builtin::BIstrncat;
+
+ case Builtin::BI__builtin_strndup:
+ case Builtin::BIstrndup:
+ return Builtin::BIstrndup;
+
+ case Builtin::BI__builtin_strlen:
+ case Builtin::BIstrlen:
+ return Builtin::BIstrlen;
+
+ default:
+ if (isExternC()) {
+ if (FnInfo->isStr("memset"))
+ return Builtin::BImemset;
+ else if (FnInfo->isStr("memcpy"))
+ return Builtin::BImemcpy;
+ else if (FnInfo->isStr("memmove"))
+ return Builtin::BImemmove;
+ else if (FnInfo->isStr("memcmp"))
+ return Builtin::BImemcmp;
+ else if (FnInfo->isStr("strncpy"))
+ return Builtin::BIstrncpy;
+ else if (FnInfo->isStr("strncmp"))
+ return Builtin::BIstrncmp;
+ else if (FnInfo->isStr("strncasecmp"))
+ return Builtin::BIstrncasecmp;
+ else if (FnInfo->isStr("strncat"))
+ return Builtin::BIstrncat;
+ else if (FnInfo->isStr("strndup"))
+ return Builtin::BIstrndup;
+ else if (FnInfo->isStr("strlen"))
+ return Builtin::BIstrlen;
+ }
+ break;
+ }
+ return 0;
+}
+
//===----------------------------------------------------------------------===//
// FieldDecl Implementation
//===----------------------------------------------------------------------===//
@@ -2182,6 +2468,12 @@ FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
BW, Mutable, HasInit);
}
+FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
+ return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, 0, false, false);
+}
+
bool FieldDecl::isAnonymousStructOrUnion() const {
if (!isImplicit() || getDeclName())
return false;
@@ -2201,31 +2493,27 @@ unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
unsigned FieldDecl::getFieldIndex() const {
if (CachedFieldIndex) return CachedFieldIndex - 1;
- unsigned index = 0;
+ unsigned Index = 0;
const RecordDecl *RD = getParent();
const FieldDecl *LastFD = 0;
bool IsMsStruct = RD->hasAttr<MsStructAttr>();
-
- RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- while (true) {
- assert(i != e && "failed to find field in parent!");
- if (*i == this)
- break;
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++Index) {
+ (*I)->CachedFieldIndex = Index + 1;
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are ignored.
- if (getASTContext().ZeroBitfieldFollowsNonBitfield((*i), LastFD)) {
- ++i;
+ if (getASTContext().ZeroBitfieldFollowsNonBitfield((*I), LastFD)) {
+ --Index;
continue;
}
- LastFD = (*i);
+ LastFD = (*I);
}
- ++i;
- ++index;
}
- CachedFieldIndex = index + 1;
- return index;
+ assert(CachedFieldIndex && "failed to find field in parent");
+ return CachedFieldIndex - 1;
}
SourceRange FieldDecl::getSourceRange() const {
@@ -2339,6 +2627,8 @@ void TagDecl::setTemplateParameterListsInfo(ASTContext &Context,
// EnumDecl Implementation
//===----------------------------------------------------------------------===//
+void EnumDecl::anchor() { }
+
EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id,
@@ -2350,9 +2640,10 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
return Enum;
}
-EnumDecl *EnumDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) EnumDecl(0, SourceLocation(), SourceLocation(), 0, 0,
- false, false, false);
+EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl));
+ return new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(), 0, 0,
+ false, false, false);
}
void EnumDecl::completeDefinition(QualType NewType,
@@ -2368,6 +2659,37 @@ void EnumDecl::completeDefinition(QualType NewType,
TagDecl::completeDefinition();
}
+TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated member enumeration?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const {
+ if (SpecializationInfo)
+ return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom());
+
+ return 0;
+}
+
+void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ assert(!SpecializationInfo && "Member enum is already a specialization");
+ SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
+}
+
//===----------------------------------------------------------------------===//
// RecordDecl Implementation
//===----------------------------------------------------------------------===//
@@ -2392,9 +2714,10 @@ RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
-RecordDecl *RecordDecl::Create(const ASTContext &C, EmptyShell Empty) {
- return new (C) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
- SourceLocation(), 0, 0);
+RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl));
+ return new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
}
bool RecordDecl::isInjectedClassName() const {
@@ -2502,10 +2825,14 @@ SourceRange BlockDecl::getSourceRange() const {
// Other Decl Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
+void TranslationUnitDecl::anchor() { }
+
TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
return new (C) TranslationUnitDecl(C);
}
+void LabelDecl::anchor() { }
+
LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II) {
return new (C) LabelDecl(DC, IdentL, II, 0, IdentL);
@@ -2518,17 +2845,14 @@ LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
}
-
-NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id) {
- return new (C) NamespaceDecl(DC, StartLoc, IdLoc, Id);
+LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl));
+ return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
}
-NamespaceDecl *NamespaceDecl::getNextNamespace() {
- return dyn_cast_or_null<NamespaceDecl>(
- NextNamespace.get(getASTContext().getExternalSource()));
-}
+void ValueDecl::anchor() { }
+
+void ImplicitParamDecl::anchor() { }
ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdLoc,
@@ -2537,6 +2861,12 @@ ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type);
}
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl));
+ return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
+}
+
FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
@@ -2553,10 +2883,22 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
return New;
}
+FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl));
+ return new (Mem) FunctionDecl(Function, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(), 0,
+ SC_None, SC_None, false, false);
+}
+
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
return new (C) BlockDecl(DC, L);
}
+BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl));
+ return new (Mem) BlockDecl(0, SourceLocation());
+}
+
EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
SourceLocation L,
IdentifierInfo *Id, QualType T,
@@ -2564,6 +2906,15 @@ EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
}
+EnumConstantDecl *
+EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl));
+ return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
+ llvm::APSInt());
+}
+
+void IndirectFieldDecl::anchor() { }
+
IndirectFieldDecl *
IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, NamedDecl **CH,
@@ -2571,6 +2922,13 @@ IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
}
+IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl));
+ return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, 0);
+}
+
SourceRange EnumConstantDecl::getSourceRange() const {
SourceLocation End = getLocation();
if (Init)
@@ -2578,12 +2936,21 @@ SourceRange EnumConstantDecl::getSourceRange() const {
return SourceRange(getLocation(), End);
}
+void TypeDecl::anchor() { }
+
TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo) {
return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
}
+void TypedefNameDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl));
+ return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
@@ -2591,6 +2958,11 @@ TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
}
+TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl));
+ return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
SourceRange TypedefDecl::getSourceRange() const {
SourceLocation RangeEnd = getLocation();
if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
@@ -2607,9 +2979,96 @@ SourceRange TypeAliasDecl::getSourceRange() const {
return SourceRange(getLocStart(), RangeEnd);
}
+void FileScopeAsmDecl::anchor() { }
+
FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
StringLiteral *Str,
SourceLocation AsmLoc,
SourceLocation RParenLoc) {
return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
}
+
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl));
+ return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// ImportDecl Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the number of module identifiers needed to name the given
+/// module.
+static unsigned getNumModuleIdentifiers(Module *Mod) {
+ unsigned Result = 1;
+ while (Mod->Parent) {
+ Mod = Mod->Parent;
+ ++Result;
+ }
+ return Result;
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true),
+ NextLocalImport()
+{
+ assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
+ SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1);
+ memcpy(StoredLocs, IdentifierLocs.data(),
+ IdentifierLocs.size() * sizeof(SourceLocation));
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported, SourceLocation EndLoc)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
+ NextLocalImport()
+{
+ *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
+}
+
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) +
+ IdentifierLocs.size() * sizeof(SourceLocation));
+ return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+}
+
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ Module *Imported,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation));
+ ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc);
+ Import->setImplicit();
+ return Import;
+}
+
+ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ (sizeof(ImportDecl) +
+ NumLocations * sizeof(SourceLocation)));
+ return new (Mem) ImportDecl(EmptyShell());
+}
+
+ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
+ if (!ImportedAndComplete.getInt())
+ return ArrayRef<SourceLocation>();
+
+ const SourceLocation *StoredLocs
+ = reinterpret_cast<const SourceLocation *>(this + 1);
+ return ArrayRef<SourceLocation>(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
+}
+
+SourceRange ImportDecl::getSourceRange() const {
+ if (!ImportedAndComplete.getInt())
+ return SourceRange(getLocation(),
+ *reinterpret_cast<const SourceLocation *>(this + 1));
+
+ return SourceRange(getLocation(), getIdentifierLocs().back());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
index 321e40b..47a0d25 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -39,7 +39,24 @@ using namespace clang;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
-static bool StatSwitch = false;
+void *Decl::AllocateDeserializedDecl(const ASTContext &Context,
+ unsigned ID,
+ unsigned Size) {
+ // Allocate an extra 8 bytes worth of storage, which ensures that the
+ // resulting pointer will still be 8-byte aligned.
+ void *Start = Context.Allocate(Size + 8);
+ void *Result = (char*)Start + 8;
+
+ unsigned *PrefixPtr = (unsigned *)Result - 2;
+
+ // Zero out the first 4 bytes; this is used to store the owning module ID.
+ PrefixPtr[0] = 0;
+
+ // Store the global declaration ID in the second 4 bytes.
+ PrefixPtr[1] = ID;
+
+ return Result;
+}
const char *Decl::getDeclKindName() const {
switch (DeclKind) {
@@ -52,7 +69,7 @@ const char *Decl::getDeclKindName() const {
void Decl::setInvalidDecl(bool Invalid) {
InvalidDecl = Invalid;
- if (Invalid) {
+ if (Invalid && !isa<ParmVarDecl>(this)) {
// Defensive maneuver for ill-formed code: we're likely not to make it to
// a point where we set the access specifier, so default it to "public"
// to avoid triggering asserts elsewhere in the front end.
@@ -69,9 +86,9 @@ const char *DeclContext::getDeclKindName() const {
}
}
-bool Decl::CollectingStats(bool Enable) {
- if (Enable) StatSwitch = true;
- return StatSwitch;
+bool Decl::StatisticsEnabled = false;
+void Decl::EnableStatistics() {
+ StatisticsEnabled = true;
}
void Decl::PrintStats() {
@@ -100,7 +117,6 @@ void Decl::PrintStats() {
void Decl::add(Kind k) {
switch (k) {
- default: llvm_unreachable("Declaration not in DeclNodes.inc!");
#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
@@ -185,15 +201,24 @@ void Decl::setLexicalDeclContext(DeclContext *DC) {
return;
if (isInSemaDC()) {
- MultipleDC *MDC = new (getASTContext()) MultipleDC();
- MDC->SemanticDC = getDeclContext();
- MDC->LexicalDC = DC;
- DeclCtx = MDC;
+ setDeclContextsImpl(getDeclContext(), DC, getASTContext());
} else {
getMultipleDC()->LexicalDC = DC;
}
}
+void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx) {
+ if (SemaDC == LexicalDC) {
+ DeclCtx = SemaDC;
+ } else {
+ Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
+ MDC->SemanticDC = SemaDC;
+ MDC->LexicalDC = LexicalDC;
+ DeclCtx = MDC;
+ }
+}
+
bool Decl::isInAnonymousNamespace() const {
const DeclContext *DC = getDeclContext();
do {
@@ -282,13 +307,20 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
// Match the platform name.
if (A->getPlatform()->getName() != TargetPlatform)
return AR_Available;
-
+
+ std::string HintMessage;
+ if (!A->getMessage().empty()) {
+ HintMessage = " - ";
+ HintMessage += A->getMessage();
+ }
+
// Make sure that this declaration has not been marked 'unavailable'.
if (A->getUnavailable()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
- Out << "not available on " << PrettyPlatformName;
+ Out << "not available on " << PrettyPlatformName
+ << HintMessage;
}
return AR_Unavailable;
@@ -301,7 +333,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
Out << "introduced in " << PrettyPlatformName << ' '
- << A->getIntroduced();
+ << A->getIntroduced() << HintMessage;
}
return AR_NotYetIntroduced;
@@ -313,7 +345,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
Out << "obsoleted in " << PrettyPlatformName << ' '
- << A->getObsoleted();
+ << A->getObsoleted() << HintMessage;
}
return AR_Unavailable;
@@ -325,7 +357,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
Message->clear();
llvm::raw_string_ostream Out(*Message);
Out << "first deprecated in " << PrettyPlatformName << ' '
- << A->getDeprecated();
+ << A->getDeprecated() << HintMessage;
}
return AR_Deprecated;
@@ -391,7 +423,7 @@ bool Decl::canBeWeakImported(bool &IsDefinition) const {
}
} else if (isa<ObjCPropertyDecl>(this) || isa<ObjCMethodDecl>(this))
return false;
- else if (!(getASTContext().getLangOptions().ObjCNonFragileABI &&
+ else if (!(getASTContext().getLangOpts().ObjCNonFragileABI &&
isa<ObjCInterfaceDecl>(this)))
return false;
@@ -488,9 +520,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case LinkageSpec:
case FileScopeAsm:
case StaticAssert:
- case ObjCClass:
case ObjCPropertyImpl:
- case ObjCForwardProtocol:
case Block:
case TranslationUnit:
@@ -501,17 +531,18 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCImplementation:
case ObjCCategory:
case ObjCCategoryImpl:
+ case Import:
// Never looked up by name.
return 0;
}
- return 0;
+ llvm_unreachable("Invalid DeclKind!");
}
-void Decl::setAttrs(const AttrVec &attrs) {
+void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) {
assert(!HasAttrs && "Decl already contains attrs.");
- AttrVec &AttrBlank = getASTContext().getDeclAttrs(this);
+ AttrVec &AttrBlank = Ctx.getDeclAttrs(this);
assert(AttrBlank.empty() && "HasAttrs was wrong?");
AttrBlank = attrs;
@@ -640,7 +671,11 @@ void Decl::CheckAccessDeclContext() const {
}
DeclContext *Decl::getNonClosureContext() {
- DeclContext *DC = getDeclContext();
+ return getDeclContext()->getNonClosureAncestor();
+}
+
+DeclContext *DeclContext::getNonClosureAncestor() {
+ DeclContext *DC = this;
// This is basically "while (DC->isClosure()) DC = DC->getParent();"
// except that it's significantly more efficient to cast to a known
@@ -704,10 +739,14 @@ bool DeclContext::isDependentContext() const {
if (isa<ClassTemplatePartialSpecializationDecl>(this))
return true;
- if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
if (Record->getDescribedClassTemplate())
return true;
-
+
+ if (Record->isDependentLambda())
+ return true;
+ }
+
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
if (Function->getDescribedFunctionTemplate())
return true;
@@ -767,9 +806,18 @@ DeclContext *DeclContext::getPrimaryContext() {
return this;
case Decl::ObjCInterface:
+ if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
case Decl::ObjCProtocol:
+ if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
case Decl::ObjCCategory:
- // FIXME: Can Objective-C interfaces be forward-declared?
return this;
case Decl::ObjCImplementation:
@@ -804,21 +852,27 @@ DeclContext *DeclContext::getPrimaryContext() {
}
}
-DeclContext *DeclContext::getNextContext() {
- switch (DeclKind) {
- case Decl::Namespace:
- // Return the next namespace
- return static_cast<NamespaceDecl*>(this)->getNextNamespace();
-
- default:
- return 0;
+void
+DeclContext::collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts){
+ Contexts.clear();
+
+ if (DeclKind != Decl::Namespace) {
+ Contexts.push_back(this);
+ return;
}
+
+ NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
+ for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
+ N = N->getPreviousDecl())
+ Contexts.push_back(N);
+
+ std::reverse(Contexts.begin(), Contexts.end());
}
std::pair<Decl *, Decl *>
-DeclContext::BuildDeclChain(const SmallVectorImpl<Decl*> &Decls,
+DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
bool FieldsAlreadyLoaded) {
- // Build up a chain of declarations via the Decl::NextDeclInContext field.
+ // Build up a chain of declarations via the Decl::NextInContextAndBits field.
Decl *FirstNewDecl = 0;
Decl *PrevDecl = 0;
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
@@ -827,7 +881,7 @@ DeclContext::BuildDeclChain(const SmallVectorImpl<Decl*> &Decls,
Decl *D = Decls[I];
if (PrevDecl)
- PrevDecl->NextDeclInContext = D;
+ PrevDecl->NextInContextAndBits.setPointer(D);
else
FirstNewDecl = D;
@@ -873,7 +927,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
Decl *ExternalFirst, *ExternalLast;
llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls,
FieldsAlreadyLoaded);
- ExternalLast->NextDeclInContext = FirstDecl;
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
FirstDecl = ExternalFirst;
if (!LastDecl)
LastDecl = ExternalLast;
@@ -884,7 +938,7 @@ ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
DeclarationName Name) {
ASTContext &Context = DC->getParentASTContext();
StoredDeclsMap *Map;
- if (!(Map = DC->LookupPtr))
+ if (!(Map = DC->LookupPtr.getPointer()))
Map = DC->CreateStoredDeclsMap(Context);
StoredDeclsList &List = (*Map)[Name];
@@ -901,7 +955,7 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
ASTContext &Context = DC->getParentASTContext();;
StoredDeclsMap *Map;
- if (!(Map = DC->LookupPtr))
+ if (!(Map = DC->LookupPtr.getPointer()))
Map = DC->CreateStoredDeclsMap(Context);
StoredDeclsList &List = (*Map)[Name];
@@ -948,7 +1002,7 @@ bool DeclContext::decls_empty() const {
void DeclContext::removeDecl(Decl *D) {
assert(D->getLexicalDeclContext() == this &&
"decl being removed from non-lexical context");
- assert((D->NextDeclInContext || D == LastDecl) &&
+ assert((D->NextInContextAndBits.getPointer() || D == LastDecl) &&
"decl is not in decls list");
// Remove D from the decl chain. This is O(n) but hopefully rare.
@@ -956,12 +1010,12 @@ void DeclContext::removeDecl(Decl *D) {
if (D == LastDecl)
FirstDecl = LastDecl = 0;
else
- FirstDecl = D->NextDeclInContext;
+ FirstDecl = D->NextInContextAndBits.getPointer();
} else {
- for (Decl *I = FirstDecl; true; I = I->NextDeclInContext) {
+ for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) {
assert(I && "decl not found in linked list");
- if (I->NextDeclInContext == D) {
- I->NextDeclInContext = D->NextDeclInContext;
+ if (I->NextInContextAndBits.getPointer() == D) {
+ I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer());
if (D == LastDecl) LastDecl = I;
break;
}
@@ -969,7 +1023,7 @@ void DeclContext::removeDecl(Decl *D) {
}
// Mark that D is no longer in the decl chain.
- D->NextDeclInContext = 0;
+ D->NextInContextAndBits.setPointer(0);
// Remove D from the lookup table if necessary.
if (isa<NamedDecl>(D)) {
@@ -978,12 +1032,13 @@ void DeclContext::removeDecl(Decl *D) {
// Remove only decls that have a name
if (!ND->getDeclName()) return;
- StoredDeclsMap *Map = getPrimaryContext()->LookupPtr;
+ StoredDeclsMap *Map = getPrimaryContext()->LookupPtr.getPointer();
if (!Map) return;
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
- Pos->second.remove(ND);
+ if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ Pos->second.remove(ND);
}
}
@@ -994,7 +1049,7 @@ void DeclContext::addHiddenDecl(Decl *D) {
"Decl already inserted into a DeclContext");
if (FirstDecl) {
- LastDecl->NextDeclInContext = D;
+ LastDecl->NextInContextAndBits.setPointer(D);
LastDecl = D;
} else {
FirstDecl = LastDecl = D;
@@ -1004,55 +1059,118 @@ void DeclContext::addHiddenDecl(Decl *D) {
// update it's class-specific state.
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
Record->addedMember(D);
+
+ // If this is a newly-created (not de-serialized) import declaration, wire
+ // it in to the list of local import declarations.
+ if (!D->isFromASTFile()) {
+ if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
+ D->getASTContext().addedLocalImportDecl(Import);
+ }
}
void DeclContext::addDecl(Decl *D) {
addHiddenDecl(D);
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
- ND->getDeclContext()->makeDeclVisibleInContext(ND);
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, false, true);
+}
+
+void DeclContext::addDeclInternal(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, true, true);
+}
+
+/// shouldBeHidden - Determine whether a declaration which was declared
+/// within its semantic context should be invisible to qualified name lookup.
+static bool shouldBeHidden(NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return true;
+
+ // Skip entities that can't be found by name lookup into a particular
+ // context.
+ if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
+ D->isTemplateParameter())
+ return true;
+
+ // Skip template specializations.
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isFunctionTemplateSpecialization())
+ return true;
+
+ return false;
}
/// buildLookup - Build the lookup data structure with all of the
-/// declarations in DCtx (and any other contexts linked to it or
-/// transparent contexts nested within it).
-void DeclContext::buildLookup(DeclContext *DCtx) {
- for (; DCtx; DCtx = DCtx->getNextContext()) {
- for (decl_iterator D = DCtx->decls_begin(),
- DEnd = DCtx->decls_end();
- D != DEnd; ++D) {
- // Insert this declaration into the lookup structure, but only
- // if it's semantically in its decl context. During non-lazy
- // lookup building, this is implicitly enforced by addDecl.
- if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
- if (D->getDeclContext() == DCtx)
- makeDeclVisibleInContextImpl(ND);
-
- // Insert any forward-declared Objective-C interface into the lookup
- // data structure.
- if (ObjCClassDecl *Class = dyn_cast<ObjCClassDecl>(*D))
- makeDeclVisibleInContextImpl(Class->getForwardInterfaceDecl());
-
- // If this declaration is itself a transparent declaration context or
- // inline namespace, add its members (recursively).
- if (DeclContext *InnerCtx = dyn_cast<DeclContext>(*D))
- if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
- buildLookup(InnerCtx->getPrimaryContext());
- }
+/// declarations in this DeclContext (and any other contexts linked
+/// to it or transparent contexts nested within it) and return it.
+StoredDeclsMap *DeclContext::buildLookup() {
+ assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
+
+ if (!LookupPtr.getInt())
+ return LookupPtr.getPointer();
+
+ llvm::SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+ for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
+ buildLookupImpl(Contexts[I]);
+
+ // We no longer have any lazy decls.
+ LookupPtr.setInt(false);
+ return LookupPtr.getPointer();
+}
+
+/// buildLookupImpl - Build part of the lookup data structure for the
+/// declarations contained within DCtx, which will either be this
+/// DeclContext, a DeclContext linked to it, or a transparent context
+/// nested within it.
+void DeclContext::buildLookupImpl(DeclContext *DCtx) {
+ for (decl_iterator I = DCtx->decls_begin(), E = DCtx->decls_end();
+ I != E; ++I) {
+ Decl *D = *I;
+
+ // Insert this declaration into the lookup structure, but only if
+ // it's semantically within its decl context. Any other decls which
+ // should be found in this context are added eagerly.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND))
+ makeDeclVisibleInContextImpl(ND, false);
+
+ // If this declaration is itself a transparent declaration context
+ // or inline namespace, add the members of this declaration of that
+ // context (recursively).
+ if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
+ if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
+ buildLookupImpl(InnerCtx);
}
}
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) {
+ assert(DeclKind != Decl::LinkageSpec &&
+ "Should not perform lookups into linkage specs!");
+
DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
return PrimaryContext->lookup(Name);
if (hasExternalVisibleStorage()) {
- // Check to see if we've already cached the lookup results.
- if (LookupPtr) {
- StoredDeclsMap::iterator I = LookupPtr->find(Name);
- if (I != LookupPtr->end())
+ // If a PCH has a result for this name, and we have a local declaration, we
+ // will have imported the PCH result when adding the local declaration.
+ // FIXME: For modules, we could have had more declarations added by module
+ // imoprts since we saw the declaration of the local name.
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I != Map->end())
return I->second.getLookupResult();
}
@@ -1060,20 +1178,18 @@ DeclContext::lookup(DeclarationName Name) {
return Source->FindExternalVisibleDeclsByName(this, Name);
}
- /// If there is no lookup data structure, build one now by walking
- /// all of the linked DeclContexts (in declaration order!) and
- /// inserting their values.
- if (!LookupPtr) {
- buildLookup(this);
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (LookupPtr.getInt())
+ Map = buildLookup();
- if (!LookupPtr)
- return lookup_result(lookup_iterator(0), lookup_iterator(0));
- }
+ if (!Map)
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
- StoredDeclsMap::iterator Pos = LookupPtr->find(Name);
- if (Pos == LookupPtr->end())
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I == Map->end())
return lookup_result(lookup_iterator(0), lookup_iterator(0));
- return Pos->second.getLookupResult();
+
+ return I->second.getLookupResult();
}
DeclContext::lookup_const_result
@@ -1094,10 +1210,10 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
}
// If we have a lookup table, check there first. Maybe we'll get lucky.
- if (LookupPtr) {
- StoredDeclsMap::iterator Pos = LookupPtr->find(Name);
- if (Pos != LookupPtr->end()) {
- Results.insert(Results.end(),
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos != Map->end()) {
+ Results.insert(Results.end(),
Pos->second.getLookupResult().first,
Pos->second.getLookupResult().second);
return;
@@ -1147,79 +1263,97 @@ bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
return false;
}
-void DeclContext::makeDeclVisibleInContext(NamedDecl *D, bool Recoverable) {
- // FIXME: This feels like a hack. Should DeclarationName support
- // template-ids, or is there a better way to keep specializations
- // from being visible?
- if (isa<ClassTemplateSpecializationDecl>(D) || D->isTemplateParameter())
+void DeclContext::makeDeclVisibleInContext(NamedDecl *D) {
+ DeclContext *PrimaryDC = this->getPrimaryContext();
+ DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext();
+ // If the decl is being added outside of its semantic decl context, we
+ // need to ensure that we eagerly build the lookup information for it.
+ PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC);
+}
+
+void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Recoverable) {
+ assert(this == getPrimaryContext() && "expected a primary DC");
+
+ // Skip declarations within functions.
+ // FIXME: We shouldn't need to build lookup tables for function declarations
+ // ever, and we can't do so correctly because we can't model the nesting of
+ // scopes which occurs within functions. We use "qualified" lookup into
+ // function declarations when handling friend declarations inside nested
+ // classes, and consequently accept the following invalid code:
+ //
+ // void f() { void g(); { int g; struct S { friend void g(); }; } }
+ if (isFunctionOrMethod() && !isa<FunctionDecl>(D))
return;
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isFunctionTemplateSpecialization())
- return;
- DeclContext *PrimaryContext = getPrimaryContext();
- if (PrimaryContext != this) {
- PrimaryContext->makeDeclVisibleInContext(D, Recoverable);
+ // Skip declarations which should be invisible to name lookup.
+ if (shouldBeHidden(D))
return;
- }
- // If we already have a lookup data structure, perform the insertion
- // into it. If we haven't deserialized externally stored decls, deserialize
- // them so we can add the decl. Otherwise, be lazy and don't build that
- // structure until someone asks for it.
- if (LookupPtr || !Recoverable || hasExternalVisibleStorage())
- makeDeclVisibleInContextImpl(D);
+ // If we already have a lookup data structure, perform the insertion into
+ // it. If we might have externally-stored decls with this name, look them
+ // up and perform the insertion. If this decl was declared outside its
+ // semantic context, buildLookup won't add it, so add it now.
+ //
+ // FIXME: As a performance hack, don't add such decls into the translation
+ // unit unless we're in C++, since qualified lookup into the TU is never
+ // performed.
+ if (LookupPtr.getPointer() || hasExternalVisibleStorage() ||
+ ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) &&
+ (getParentASTContext().getLangOpts().CPlusPlus ||
+ !isTranslationUnit()))) {
+ // If we have lazily omitted any decls, they might have the same name as
+ // the decl which we are adding, so build a full lookup table before adding
+ // this decl.
+ buildLookup();
+ makeDeclVisibleInContextImpl(D, Internal);
+ } else {
+ LookupPtr.setInt(true);
+ }
// If we are a transparent context or inline namespace, insert into our
// parent context, too. This operation is recursive.
if (isTransparentContext() || isInlineNamespace())
- getParent()->makeDeclVisibleInContext(D, Recoverable);
+ getParent()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
Decl *DCAsDecl = cast<Decl>(this);
- // Notify that a decl was made visible unless it's a Tag being defined.
+ // Notify that a decl was made visible unless we are a Tag being defined.
if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
L->AddedVisibleDecl(this, D);
}
-void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
- // Skip unnamed declarations.
- if (!D->getDeclName())
- return;
-
- // Skip entities that can't be found by name lookup into a particular
- // context.
- if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
- D->isTemplateParameter())
- return;
-
- ASTContext *C = 0;
- if (!LookupPtr) {
- C = &getParentASTContext();
- CreateStoredDeclsMap(*C);
+void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
+ // Find or create the stored declaration map.
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (!Map) {
+ ASTContext *C = &getParentASTContext();
+ Map = CreateStoredDeclsMap(*C);
}
// If there is an external AST source, load any declarations it knows about
// with this declaration's name.
// If the lookup table contains an entry about this name it means that we
// have already checked the external source.
- if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
- if (hasExternalVisibleStorage() &&
- LookupPtr->find(D->getDeclName()) == LookupPtr->end())
- Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+ if (!Internal)
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage() &&
+ Map->find(D->getDeclName()) == Map->end())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
// Insert this declaration into the map.
- StoredDeclsList &DeclNameEntries = (*LookupPtr)[D->getDeclName()];
+ StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()];
if (DeclNameEntries.isNull()) {
DeclNameEntries.setOnlyValue(D);
return;
}
- // If it is possible that this is a redeclaration, check to see if there is
- // already a decl for which declarationReplaces returns true. If there is
- // one, just replace it and return.
- if (DeclNameEntries.HandleRedeclaration(D))
+ if (DeclNameEntries.HandleRedeclaration(D)) {
+ // This declaration has replaced an existing one for which
+ // declarationReplaces returns true.
return;
+ }
// Put this declaration into the appropriate slot.
DeclNameEntries.AddSubsequentDecl(D);
@@ -1229,6 +1363,8 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
/// this context.
DeclContext::udir_iterator_range
DeclContext::getUsingDirectives() const {
+ // FIXME: Use something more efficient than normal lookup for using
+ // directives. In C++, using directives are looked up more than anything else.
lookup_const_result Result = lookup(UsingDirectiveDecl::getName());
return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.first),
reinterpret_cast<udir_iterator>(Result.second));
@@ -1239,7 +1375,7 @@ DeclContext::getUsingDirectives() const {
//===----------------------------------------------------------------------===//
StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
- assert(!LookupPtr && "context already has a decls map");
+ assert(!LookupPtr.getPointer() && "context already has a decls map");
assert(getPrimaryContext() == this &&
"creating decls map on non-primary context");
@@ -1251,7 +1387,7 @@ StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
M = new StoredDeclsMap();
M->Previous = C.LastSDM;
C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
- LookupPtr = M;
+ LookupPtr.setPointer(M);
return M;
}
@@ -1283,11 +1419,11 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
assert(Parent->isDependentContext()
&& "cannot iterate dependent diagnostics of non-dependent context");
Parent = Parent->getPrimaryContext();
- if (!Parent->LookupPtr)
+ if (!Parent->LookupPtr.getPointer())
Parent->CreateStoredDeclsMap(C);
DependentStoredDeclsMap *Map
- = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr);
+ = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr.getPointer());
// Allocate the copy of the PartialDiagnostic via the ASTContext's
// BumpPtrAllocator, rather than the ASTContext itself.
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
index f3da67c..114322b 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
@@ -27,6 +28,13 @@ using namespace clang;
// Decl Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
+void AccessSpecDecl::anchor() { }
+
+AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl));
+ return new (Mem) AccessSpecDecl(EmptyShell());
+}
+
CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
: UserDeclaredConstructor(false), UserDeclaredCopyConstructor(false),
UserDeclaredMoveConstructor(false), UserDeclaredCopyAssignment(false),
@@ -34,17 +42,24 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
- HasMutableFields(false), HasTrivialDefaultConstructor(true),
- HasConstexprNonCopyMoveConstructor(false), HasTrivialCopyConstructor(true),
+ HasMutableFields(false), HasOnlyCMembers(true),
+ HasTrivialDefaultConstructor(true),
+ HasConstexprNonCopyMoveConstructor(false),
+ DefaultedDefaultConstructorIsConstexpr(true),
+ DefaultedCopyConstructorIsConstexpr(true),
+ DefaultedMoveConstructorIsConstexpr(true),
+ HasConstexprDefaultConstructor(false), HasConstexprCopyConstructor(false),
+ HasConstexprMoveConstructor(false), HasTrivialCopyConstructor(true),
HasTrivialMoveConstructor(true), HasTrivialCopyAssignment(true),
HasTrivialMoveAssignment(true), HasTrivialDestructor(true),
+ HasIrrelevantDestructor(true),
HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
UserProvidedDefaultConstructor(false), DeclaredDefaultConstructor(false),
DeclaredCopyConstructor(false), DeclaredMoveConstructor(false),
DeclaredCopyAssignment(false), DeclaredMoveAssignment(false),
DeclaredDestructor(false), FailedImplicitMoveConstructor(false),
- FailedImplicitMoveAssignment(false), NumBases(0), NumVBases(0), Bases(),
- VBases(), Definition(D), FirstFriend(0) {
+ FailedImplicitMoveAssignment(false), IsLambda(false), NumBases(0),
+ NumVBases(0), Bases(), VBases(), Definition(D), FirstFriend(0) {
}
CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
@@ -68,24 +83,41 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
return R;
}
-CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, EmptyShell Empty) {
- return new (C) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(),
- SourceLocation(), 0, 0);
+CXXRecordDecl *CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, bool Dependent) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
+ 0, 0);
+ R->IsBeingDefined = true;
+ R->DefinitionData = new (C) struct LambdaDefinitionData(R, Dependent);
+ C.getTypeDeclType(R, /*PrevDecl=*/0);
+ return R;
+}
+
+CXXRecordDecl *
+CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl));
+ return new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
}
void
CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
unsigned NumBases) {
ASTContext &C = getASTContext();
-
- // C++ [dcl.init.aggr]p1:
- // An aggregate is an array or a class (clause 9) with [...]
- // no base classes [...].
- data().Aggregate = false;
if (!data().Bases.isOffset() && data().NumBases > 0)
C.Deallocate(data().getBases());
+ if (NumBases) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+ }
+
// The set of seen virtual base types.
llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
@@ -105,14 +137,6 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
- // C++ [dcl.init.aggr]p1:
- // An aggregate is [...] a class with [...] no base classes [...].
- data().Aggregate = false;
-
- // C++ [class]p4:
- // A POD-struct is an aggregate class...
- data().PlainOldData = false;
-
// A class with a non-empty base class is not empty.
// FIXME: Standard ref?
if (!BaseClassDecl->isEmpty()) {
@@ -190,6 +214,13 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// A standard-layout class is a class that: [...]
// -- has [...] no virtual base classes
data().IsStandardLayout = false;
+
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ // -- the class shall not have any virtual base classes
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
} else {
// C++ [class.ctor]p5:
// A default constructor is trivial [...] if:
@@ -221,6 +252,32 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
data().HasTrivialCopyAssignment = false;
if (!BaseClassDecl->hasTrivialMoveAssignment())
data().HasTrivialMoveAssignment = false;
+
+ // C++11 [class.ctor]p6:
+ // If that user-written default constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // default constructor is constexpr.
+ if (!BaseClassDecl->hasConstexprDefaultConstructor())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the requirements
+ // of a constexpr constructor, the implicitly-defined constructor is
+ // constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing [...] base class
+ // sub-objects shall be a constexpr constructor
+ if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ if (BaseClassDecl->hasDeclaredMoveConstructor() ||
+ BaseClassDecl->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the base class
+ // would be ill-formed, the implicit move constructor generated for the
+ // derived class calls the base class' copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ BaseClassDecl->hasConstexprMoveConstructor();
+ else if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
}
// C++ [class.ctor]p3:
@@ -228,7 +285,10 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// have trivial destructors.
if (!BaseClassDecl->hasTrivialDestructor())
data().HasTrivialDestructor = false;
-
+
+ if (!BaseClassDecl->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+
// A class has an Objective-C object member if... or any of its bases
// has an Objective-C object member.
if (BaseClassDecl->hasObjectMember())
@@ -410,6 +470,12 @@ void CXXRecordDecl::markedVirtualFunctionPure() {
}
void CXXRecordDecl::addedMember(Decl *D) {
+ if (!D->isImplicit() &&
+ !isa<FieldDecl>(D) &&
+ !isa<IndirectFieldDecl>(D) &&
+ (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class))
+ data().HasOnlyCMembers = false;
+
// Ignore friends and invalid declarations.
if (D->getFriendObjectKind() || D->isInvalidDecl())
return;
@@ -453,8 +519,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// -- class X has no virtual functions [...]
data().HasTrivialCopyAssignment = false;
data().HasTrivialMoveAssignment = false;
- // FIXME: Destructor?
-
+
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has no virtual functions
@@ -472,13 +537,21 @@ void CXXRecordDecl::addedMember(Decl *D) {
// If this is a special member function, note that it was added and then
// return early.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
- if (Constructor->isDefaultConstructor())
+ if (Constructor->isDefaultConstructor()) {
data().DeclaredDefaultConstructor = true;
- else if (Constructor->isCopyConstructor())
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+ } else if (Constructor->isCopyConstructor()) {
data().DeclaredCopyConstructor = true;
- else if (Constructor->isMoveConstructor())
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
+ } else if (Constructor->isMoveConstructor()) {
data().DeclaredMoveConstructor = true;
- else
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
+ } else
goto NotASpecialMember;
return;
} else if (isa<CXXDestructorDecl>(D)) {
@@ -508,14 +581,18 @@ NotASpecialMember:;
// to all functions.
bool UserProvided = Constructor->isUserProvided();
- // C++0x [class.ctor]p5:
- // A default constructor is trivial if it is not user-provided [...]
if (Constructor->isDefaultConstructor()) {
data().DeclaredDefaultConstructor = true;
if (UserProvided) {
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial if it is not user-provided [...]
data().HasTrivialDefaultConstructor = false;
data().UserProvidedDefaultConstructor = true;
}
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
}
// Note when we have a user-declared copy or move constructor, which will
@@ -530,6 +607,9 @@ NotASpecialMember:;
// user-provided [...]
if (UserProvided)
data().HasTrivialCopyConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
} else if (Constructor->isMoveConstructor()) {
data().UserDeclaredMoveConstructor = true;
data().DeclaredMoveConstructor = true;
@@ -539,6 +619,9 @@ NotASpecialMember:;
// user-provided [...]
if (UserProvided)
data().HasTrivialMoveConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
}
}
if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor()) {
@@ -553,7 +636,7 @@ NotASpecialMember:;
// C++0x [dcl.init.aggr]p1:
// An aggregate is an array or a class with no user-provided
// constructors [...].
- if (!getASTContext().getLangOptions().CPlusPlus0x || UserProvided)
+ if (!getASTContext().getLangOpts().CPlusPlus0x || UserProvided)
data().Aggregate = false;
// C++ [class]p4:
@@ -569,17 +652,29 @@ NotASpecialMember:;
if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
data().DeclaredDestructor = true;
data().UserDeclaredDestructor = true;
-
+ data().HasIrrelevantDestructor = false;
+
// C++ [class]p4:
// A POD-struct is an aggregate class that has [...] no user-defined
// destructor.
// This bit is the C++03 POD bit, not the 0x one.
data().PlainOldData = false;
- // C++0x [class.dtor]p5:
- // A destructor is trivial if it is not user-provided and [...]
- if (DD->isUserProvided())
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if it is not user-provided and if
+ // -- the destructor is not virtual.
+ if (DD->isUserProvided() || DD->isVirtual()) {
data().HasTrivialDestructor = false;
+ // C++11 [dcl.constexpr]p1:
+ // The constexpr specifier shall be applied only to [...] the
+ // declaration of a static data member of a literal type.
+ // C++11 [basic.types]p10:
+ // A type is a literal type if it is [...] a class type that [...] has
+ // a trivial destructor.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
return;
}
@@ -634,14 +729,14 @@ NotASpecialMember:;
// hasn't been set yet. That's really just a misdesign in Sema.
if (FunTmpl) {
- if (FunTmpl->getPreviousDeclaration())
- data().Conversions.replace(FunTmpl->getPreviousDeclaration(),
+ if (FunTmpl->getPreviousDecl())
+ data().Conversions.replace(FunTmpl->getPreviousDecl(),
FunTmpl);
else
data().Conversions.addDecl(FunTmpl);
} else {
- if (Conversion->getPreviousDeclaration())
- data().Conversions.replace(Conversion->getPreviousDeclaration(),
+ if (Conversion->getPreviousDecl())
+ data().Conversions.replace(Conversion->getPreviousDecl(),
Conversion);
else
data().Conversions.addDecl(Conversion);
@@ -703,7 +798,7 @@ NotASpecialMember:;
ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
- if (!Context.getLangOptions().ObjCAutoRefCount ||
+ if (!Context.getLangOpts().ObjCAutoRefCount ||
T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone)
setHasObjectMember(true);
} else if (!T.isPODType(Context))
@@ -718,12 +813,8 @@ NotASpecialMember:;
data().IsStandardLayout = false;
}
- // Record if this field is the first non-literal field or base.
- // As a slight variation on the standard, we regard mutable members as being
- // non-literal, since mutating a constexpr variable would break C++11
- // constant expression semantics.
- if ((!hasNonLiteralTypeFieldsOrBases() && !T->isLiteralType()) ||
- Field->isMutable())
+ // Record if this field is the first non-literal or volatile field or base.
+ if (!T->isLiteralType() || T.isVolatileQualified())
data().HasNonLiteralTypeFieldsOrBases = true;
if (Field->hasInClassInitializer()) {
@@ -746,7 +837,7 @@ NotASpecialMember:;
CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
if (FieldRec->getDefinition()) {
// C++0x [class.ctor]p5:
- // A defulat constructor is trivial [...] if:
+ // A default constructor is trivial [...] if:
// -- for all the non-static data members of its class that are of
// class type (or array thereof), each such class has a trivial
// default constructor.
@@ -779,6 +870,8 @@ NotASpecialMember:;
if (!FieldRec->hasTrivialDestructor())
data().HasTrivialDestructor = false;
+ if (!FieldRec->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
if (FieldRec->hasObjectMember())
setHasObjectMember(true);
@@ -818,7 +911,41 @@ NotASpecialMember:;
// Keep track of the presence of mutable fields.
if (FieldRec->hasMutableFields())
data().HasMutableFields = true;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // constructor is constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing non-static data
+ // members [...] shall be a constexpr constructor
+ if (!Field->hasInClassInitializer() &&
+ !FieldRec->hasConstexprDefaultConstructor())
+ // The standard requires any in-class initializer to be a constant
+ // expression. We consider this to be a defect.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+
+ if (FieldRec->hasDeclaredMoveConstructor() ||
+ FieldRec->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the member's
+ // class would be ill-formed, the implicit move constructor generated
+ // for this class calls the member's copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ FieldRec->hasConstexprMoveConstructor();
+ else if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
}
+ } else {
+ // Base element type of field is a non-class type.
+ if (!T->isLiteralType()) {
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ } else if (!Field->hasInClassInitializer())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
}
// C++0x [class]p7:
@@ -849,6 +976,35 @@ NotASpecialMember:;
data().Conversions.addDecl(Shadow, Shadow->getAccess());
}
+bool CXXRecordDecl::isCLike() const {
+ if (getTagKind() == TTK_Class || !TemplateOrInstantiation.isNull())
+ return false;
+ if (!hasDefinition())
+ return true;
+
+ return isPOD() && data().HasOnlyCMembers;
+}
+
+void CXXRecordDecl::getCaptureFields(
+ llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const {
+ Captures.clear();
+ ThisCapture = 0;
+
+ LambdaDefinitionData &Lambda = getLambdaData();
+ RecordDecl::field_iterator Field = field_begin();
+ for (LambdaExpr::Capture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
+ C != CEnd; ++C, ++Field) {
+ if (C->capturesThis()) {
+ ThisCapture = *Field;
+ continue;
+ }
+
+ Captures[C->getCapturedVar()] = *Field;
+ }
+}
+
+
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
QualType T;
if (isa<UsingShadowDecl>(Conv))
@@ -1087,7 +1243,7 @@ void CXXRecordDecl::completeDefinition() {
void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
RecordDecl::completeDefinition();
- if (hasObjectMember() && getASTContext().getLangOptions().ObjCAutoRefCount) {
+ if (hasObjectMember() && getASTContext().getLangOpts().ObjCAutoRefCount) {
// Objective-C Automatic Reference Counting:
// If a class has a non-static data member of Objective-C pointer
// type (or array thereof), it is a non-POD type and its
@@ -1099,6 +1255,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
Data.HasTrivialCopyConstructor = false;
Data.HasTrivialCopyAssignment = false;
Data.HasTrivialDestructor = false;
+ Data.HasIrrelevantDestructor = false;
}
// If the class may be abstract (but hasn't been marked as such), check for
@@ -1157,6 +1314,8 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
}
+void CXXMethodDecl::anchor() { }
+
CXXMethodDecl *
CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
@@ -1169,6 +1328,14 @@ CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
EndLocation);
}
+CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl));
+ return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ 0, false, SC_None, false, false,
+ SourceLocation());
+}
+
bool CXXMethodDecl::isUsualDeallocationFunction() const {
if (getOverloadedOperator() != OO_Delete &&
getOverloadedOperator() != OO_Array_Delete)
@@ -1254,19 +1421,23 @@ void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
assert(MD->isCanonicalDecl() && "Method is not canonical!");
assert(!MD->getParent()->isDependentContext() &&
"Can't add an overridden method to a class template!");
+ assert(MD->isVirtual() && "Method is not virtual!");
getASTContext().addOverriddenMethod(this, MD);
}
CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
return getASTContext().overridden_methods_begin(this);
}
CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
return getASTContext().overridden_methods_end(this);
}
unsigned CXXMethodDecl::size_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
return getASTContext().overridden_methods_size(this);
}
@@ -1296,14 +1467,20 @@ bool CXXMethodDecl::hasInlineBody() const {
return CheckFn->hasBody(fn) && !fn->isOutOfLine();
}
+bool CXXMethodDecl::isLambdaStaticInvoker() const {
+ return getParent()->isLambda() &&
+ getIdentifier() && getIdentifier()->getName() == "__invoke";
+}
+
+
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
TypeSourceInfo *TInfo, bool IsVirtual,
SourceLocation L, Expr *Init,
SourceLocation R,
SourceLocation EllipsisLoc)
: Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
- LParenLoc(L), RParenLoc(R), IsVirtual(IsVirtual), IsWritten(false),
- SourceOrderOrNumArrayIndices(0)
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
@@ -1313,7 +1490,7 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation L, Expr *Init,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
- LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
@@ -1324,17 +1501,17 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation L, Expr *Init,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
- LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
- SourceLocation D, SourceLocation L,
- CXXConstructorDecl *Target, Expr *Init,
+ TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init,
SourceLocation R)
- : Initializee(Target), MemberOrEllipsisLocation(D), Init(Init),
- LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ : Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
@@ -1383,13 +1560,16 @@ const Type *CXXCtorInitializer::getBaseClass() const {
}
SourceLocation CXXCtorInitializer::getSourceLocation() const {
- if (isAnyMemberInitializer() || isDelegatingInitializer())
+ if (isAnyMemberInitializer())
return getMemberLocation();
if (isInClassMemberInitializer())
return getAnyMember()->getLocation();
- return getBaseClassLoc().getLocalSourceRange().getBegin();
+ if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
+ return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ return SourceLocation();
}
SourceRange CXXCtorInitializer::getSourceRange() const {
@@ -1403,10 +1583,13 @@ SourceRange CXXCtorInitializer::getSourceRange() const {
return SourceRange(getSourceLocation(), getRParenLoc());
}
+void CXXConstructorDecl::anchor() { }
+
CXXConstructorDecl *
-CXXConstructorDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) CXXConstructorDecl(0, SourceLocation(), DeclarationNameInfo(),
- QualType(), 0, false, false, false, false);
+CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl));
+ return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(),
+ QualType(), 0, false, false, false,false);
}
CXXConstructorDecl *
@@ -1424,6 +1607,15 @@ CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
isConstexpr);
}
+CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
+ assert(isDelegatingConstructor() && "Not a delegating constructor!");
+ Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
+ return Construct->getConstructor();
+
+ return 0;
+}
+
bool CXXConstructorDecl::isDefaultConstructor() const {
// C++ [class.ctor]p5:
// A default constructor for a class X is a constructor of class
@@ -1524,8 +1716,8 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const {
const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
// Hack: we store the inherited constructor in the overridden method table
- method_iterator It = begin_overridden_methods();
- if (It == end_overridden_methods())
+ method_iterator It = getASTContext().overridden_methods_begin(this);
+ if (It == getASTContext().overridden_methods_end(this))
return 0;
return cast<CXXConstructorDecl>(*It);
@@ -1534,13 +1726,17 @@ const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
void
CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
// Hack: we store the inherited constructor in the overridden method table
- assert(size_overridden_methods() == 0 && "Base ctor already set.");
- addOverriddenMethod(BaseCtor);
+ assert(getASTContext().overridden_methods_size(this) == 0 &&
+ "Base ctor already set.");
+ getASTContext().addOverriddenMethod(this, BaseCtor);
}
+void CXXDestructorDecl::anchor() { }
+
CXXDestructorDecl *
-CXXDestructorDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
+CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl));
+ return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
QualType(), 0, false, false);
}
@@ -1557,11 +1753,14 @@ CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
isImplicitlyDeclared);
}
+void CXXConversionDecl::anchor() { }
+
CXXConversionDecl *
-CXXConversionDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
- QualType(), 0, false, false, false,
- SourceLocation());
+CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl));
+ return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
+ QualType(), 0, false, false, false,
+ SourceLocation());
}
CXXConversionDecl *
@@ -1579,6 +1778,13 @@ CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
EndLocation);
}
+bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
+ return isImplicit() && getParent()->isLambda() &&
+ getConversionType()->isBlockPointerType();
+}
+
+void LinkageSpecDecl::anchor() { }
+
LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation ExternLoc,
@@ -1588,6 +1794,14 @@ LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, RBraceLoc);
}
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl));
+ return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
+ lang_c, SourceLocation());
+}
+
+void UsingDirectiveDecl::anchor() { }
+
UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
SourceLocation NamespaceLoc,
@@ -1601,6 +1815,14 @@ UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
IdentLoc, Used, CommonAncestor);
}
+UsingDirectiveDecl *
+UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl));
+ return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0, 0);
+}
+
NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
if (NamespaceAliasDecl *NA =
dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
@@ -1608,6 +1830,36 @@ NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
return cast_or_null<NamespaceDecl>(NominatedNamespace);
}
+void NamespaceDecl::anchor() { }
+
+NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl)
+ : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
+ LocStart(StartLoc), RBraceLoc(), AnonOrFirstNamespaceAndInline(0, Inline)
+{
+ setPreviousDeclaration(PrevDecl);
+
+ if (PrevDecl)
+ AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
+}
+
+NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl) {
+ return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
+}
+
+NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl));
+ return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
+ 0, 0);
+}
+
+void NamespaceAliasDecl::anchor() { }
+
NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
SourceLocation AliasLoc,
@@ -1621,6 +1873,22 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
QualifierLoc, IdentLoc, Namespace);
}
+NamespaceAliasDecl *
+NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl));
+ return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0,
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0);
+}
+
+void UsingShadowDecl::anchor() { }
+
+UsingShadowDecl *
+UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl));
+ return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0);
+}
+
UsingDecl *UsingShadowDecl::getUsingDecl() const {
const UsingShadowDecl *Shadow = this;
while (const UsingShadowDecl *NextShadow =
@@ -1629,14 +1897,16 @@ UsingDecl *UsingShadowDecl::getUsingDecl() const {
return cast<UsingDecl>(Shadow->UsingOrNextShadow);
}
+void UsingDecl::anchor() { }
+
void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
"declaration already in set");
assert(S->getUsingDecl() == this);
- if (FirstUsingShadow)
- S->UsingOrNextShadow = FirstUsingShadow;
- FirstUsingShadow = S;
+ if (FirstUsingShadow.getPointer())
+ S->UsingOrNextShadow = FirstUsingShadow.getPointer();
+ FirstUsingShadow.setPointer(S);
}
void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
@@ -1646,13 +1916,14 @@ void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
// Remove S from the shadow decl chain. This is O(n) but hopefully rare.
- if (FirstUsingShadow == S) {
- FirstUsingShadow = dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow);
+ if (FirstUsingShadow.getPointer() == S) {
+ FirstUsingShadow.setPointer(
+ dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow));
S->UsingOrNextShadow = this;
return;
}
- UsingShadowDecl *Prev = FirstUsingShadow;
+ UsingShadowDecl *Prev = FirstUsingShadow.getPointer();
while (Prev->UsingOrNextShadow != S)
Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
Prev->UsingOrNextShadow = S->UsingOrNextShadow;
@@ -1666,6 +1937,14 @@ UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, IsTypeNameArg);
}
+UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl));
+ return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
+ DeclarationNameInfo(), false);
+}
+
+void UnresolvedUsingValueDecl::anchor() { }
+
UnresolvedUsingValueDecl *
UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
@@ -1675,6 +1954,16 @@ UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
QualifierLoc, NameInfo);
}
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl));
+ return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo());
+}
+
+void UnresolvedUsingTypenameDecl::anchor() { }
+
UnresolvedUsingTypenameDecl *
UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
@@ -1687,6 +1976,19 @@ UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
TargetName.getAsIdentifierInfo());
}
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(UnresolvedUsingTypenameDecl));
+ return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(),
+ SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(),
+ 0);
+}
+
+void StaticAssertDecl::anchor() { }
+
StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
Expr *AssertExpr,
@@ -1696,9 +1998,14 @@ StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
RParenLoc);
}
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
+ return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,SourceLocation());
+}
+
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
- default:
case AS_none:
llvm_unreachable("Invalid access specifier!");
case AS_public:
@@ -1708,9 +2015,15 @@ static const char *getAccessName(AccessSpecifier AS) {
case AS_protected:
return "protected";
}
+ llvm_unreachable("Invalid access specifier!");
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
AccessSpecifier AS) {
return DB << getAccessName(AS);
}
+
+const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
index 99bfe40..6e3bd8d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
@@ -16,6 +16,8 @@
#include "clang/AST/DeclTemplate.h"
using namespace clang;
+void FriendDecl::anchor() { }
+
FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
FriendUnion Friend,
@@ -40,6 +42,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
return FD;
}
-FriendDecl *FriendDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) FriendDecl(Empty);
+FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendDecl));
+ return new (Mem) FriendDecl(EmptyShell());
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
index a589b7f..2370d3c 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -46,6 +46,8 @@ void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
+void ObjCContainerDecl::anchor() { }
+
/// getIvarDecl - This method looks up an ivar in this ContextDecl.
///
ObjCIvarDecl *
@@ -147,6 +149,8 @@ ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
return 0;
}
+void ObjCInterfaceDecl::anchor() { }
+
/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
/// with name 'PropertyId' in the primary class; including those in protocols
/// (direct or indirect) used by the primary class.
@@ -154,7 +158,11 @@ ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
ObjCPropertyDecl *
ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
IdentifierInfo *PropertyId) const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
if (ObjCPropertyDecl *PD =
@@ -175,11 +183,12 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
ASTContext &C)
{
- if (ExternallyCompleted)
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
- if (AllReferencedProtocols.empty() && ReferencedProtocols.empty()) {
- AllReferencedProtocols.set(ExtList, ExtNum, C);
+ if (data().AllReferencedProtocols.empty() &&
+ data().ReferencedProtocols.empty()) {
+ data().AllReferencedProtocols.set(ExtList, ExtNum, C);
return;
}
@@ -214,7 +223,28 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
ProtocolRefs.push_back(*p);
}
- AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(), C);
+ data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
+}
+
+void ObjCInterfaceDecl::allocateDefinitionData() {
+ assert(!hasDefinition() && "ObjC class already has a definition");
+ Data = new (getASTContext()) DefinitionData();
+ Data->Definition = this;
+
+ // Make the type point at the definition, now that we have one.
+ if (TypeForDecl)
+ cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
+}
+
+void ObjCInterfaceDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD) {
+ if (*RD != this)
+ RD->Data = Data;
+ }
}
/// getFirstClassExtension - Find first class extension of the given class.
@@ -237,6 +267,13 @@ const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const {
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
ObjCInterfaceDecl* ClassDecl = this;
while (ClassDecl != NULL) {
if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) {
@@ -261,6 +298,13 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
/// the it returns NULL.
ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
const IdentifierInfo*ICName) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
ObjCInterfaceDecl* ClassDecl = this;
while (ClassDecl != NULL) {
if (ClassDecl->getIdentifier() == ICName)
@@ -272,12 +316,17 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
/// lookupMethod - This method returns an instance/class method by looking in
/// the class, its categories, and its super classes (using a linear search).
-ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
- bool isInstance) const {
+ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
+ bool isInstance,
+ bool shallowCategoryLookup) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
const ObjCInterfaceDecl* ClassDecl = this;
ObjCMethodDecl *MethodDecl = 0;
- if (ExternallyCompleted)
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
while (ClassDecl != NULL) {
@@ -285,28 +334,30 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
return MethodDecl;
// Didn't find one yet - look through protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols =
- ClassDecl->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end(); I != E; ++I)
+ for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end();
+ I != E; ++I)
if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
return MethodDecl;
-
+
// Didn't find one yet - now look through categories.
ObjCCategoryDecl *CatDecl = ClassDecl->getCategoryList();
while (CatDecl) {
if ((MethodDecl = CatDecl->getMethod(Sel, isInstance)))
return MethodDecl;
- // Didn't find one yet - look through protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols =
- CatDecl->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end(); I != E; ++I)
- if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
- return MethodDecl;
+ if (!shallowCategoryLookup) {
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ }
CatDecl = CatDecl->getNextClassCategory();
}
+
ClassDecl = ClassDecl->getSuperClass();
}
return NULL;
@@ -315,6 +366,13 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
const Selector &Sel,
bool Instance) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
ObjCMethodDecl *Method = 0;
if (ObjCImplementationDecl *ImpDecl = getImplementation())
Method = Instance ? ImpDecl->getInstanceMethod(Sel)
@@ -351,6 +409,12 @@ ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
HasRelatedResultType);
}
+ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl));
+ return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+ Selector(), QualType(), 0, 0);
+}
+
void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
assert(PrevMethod);
getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
@@ -452,6 +516,10 @@ ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
return MD;
}
+ if (isRedeclaration())
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+
return this;
}
@@ -560,17 +628,26 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
bool selfIsPseudoStrong = false;
bool selfIsConsumed = false;
- if (isInstanceMethod() && Context.getLangOptions().ObjCAutoRefCount) {
- selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
-
- // 'self' is always __strong. It's actually pseudo-strong except
- // in init methods, though.
- Qualifiers qs;
- qs.setObjCLifetime(Qualifiers::OCL_Strong);
- selfTy = Context.getQualifiedType(selfTy, qs);
-
- // In addition, 'self' is const unless this is an init method.
- if (getMethodFamily() != OMF_init) {
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ if (isInstanceMethod()) {
+ selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
+
+ // 'self' is always __strong. It's actually pseudo-strong except
+ // in init methods (or methods labeled ns_consumes_self), though.
+ Qualifiers qs;
+ qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ selfTy = Context.getQualifiedType(selfTy, qs);
+
+ // In addition, 'self' is const unless this is an init method.
+ if (getMethodFamily() != OMF_init && !selfIsConsumed) {
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+ else {
+ assert(isClassMethod());
+ // 'self' is always const in class methods.
selfTy = selfTy.withConst();
selfIsPseudoStrong = true;
}
@@ -608,28 +685,45 @@ ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
-ObjCInterfaceDecl *ObjCInterfaceDecl::Create(ASTContext &C,
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
DeclContext *DC,
SourceLocation atLoc,
IdentifierInfo *Id,
+ ObjCInterfaceDecl *PrevDecl,
SourceLocation ClassLoc,
- bool ForwardDecl, bool isInternal){
- return new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, ForwardDecl,
- isInternal);
+ bool isInternal){
+ ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc,
+ PrevDecl, isInternal);
+ C.getObjCInterfaceType(Result, PrevDecl);
+ return Result;
+}
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl));
+ return new (Mem) ObjCInterfaceDecl(0, SourceLocation(), 0, SourceLocation(),
+ 0, false);
}
ObjCInterfaceDecl::
ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
- SourceLocation CLoc, bool FD, bool isInternal)
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl,
+ bool isInternal)
: ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, atLoc),
- TypeForDecl(0), SuperClass(0),
- CategoryList(0), IvarList(0),
- ForwardDecl(FD), InternalInterface(isInternal), ExternallyCompleted(false) {
+ TypeForDecl(0), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+
+ // Copy the 'data' pointer over.
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+
+ setImplicit(isInternal);
}
void ObjCInterfaceDecl::LoadExternalDefinition() const {
- assert(ExternallyCompleted && "Class is not externally completed");
- ExternallyCompleted = false;
+ assert(data().ExternallyCompleted && "Class is not externally completed");
+ data().ExternallyCompleted = false;
getASTContext().getExternalSource()->CompleteType(
const_cast<ObjCInterfaceDecl *>(this));
}
@@ -637,35 +731,44 @@ void ObjCInterfaceDecl::LoadExternalDefinition() const {
void ObjCInterfaceDecl::setExternallyCompleted() {
assert(getASTContext().getExternalSource() &&
"Class can't be externally completed without an external source");
- assert(!ForwardDecl &&
+ assert(hasDefinition() &&
"Forward declarations can't be externally completed");
- ExternallyCompleted = true;
+ data().ExternallyCompleted = true;
}
ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
- if (ExternallyCompleted)
- LoadExternalDefinition();
-
- return getASTContext().getObjCImplementation(
- const_cast<ObjCInterfaceDecl*>(this));
+ if (const ObjCInterfaceDecl *Def = getDefinition()) {
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl*>(Def));
+ }
+
+ // FIXME: Should make sure no callers ever do this.
+ return 0;
}
void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) {
- getASTContext().setObjCImplementation(this, ImplD);
+ getASTContext().setObjCImplementation(getDefinition(), ImplD);
}
/// all_declared_ivar_begin - return first ivar declared in this class,
/// its extensions and its implementation. Lazily build the list on first
/// access.
ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
- if (IvarList)
- return IvarList;
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().IvarList)
+ return data().IvarList;
ObjCIvarDecl *curIvar = 0;
if (!ivar_empty()) {
ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
- IvarList = (*I); ++I;
- for (curIvar = IvarList; I != E; curIvar = *I, ++I)
+ data().IvarList = (*I); ++I;
+ for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
}
@@ -674,9 +777,9 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
if (!CDecl->ivar_empty()) {
ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
E = CDecl->ivar_end();
- if (!IvarList) {
- IvarList = (*I); ++I;
- curIvar = IvarList;
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
}
for ( ;I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
@@ -687,15 +790,15 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
if (!ImplDecl->ivar_empty()) {
ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
E = ImplDecl->ivar_end();
- if (!IvarList) {
- IvarList = (*I); ++I;
- curIvar = IvarList;
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
}
for ( ;I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
}
}
- return IvarList;
+ return data().IvarList;
}
/// FindCategoryDeclaration - Finds category declaration in the list of
@@ -704,7 +807,11 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
///
ObjCCategoryDecl *
ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
- if (ExternallyCompleted)
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
LoadExternalDefinition();
for (ObjCCategoryDecl *Category = getCategoryList();
@@ -739,13 +846,13 @@ ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
bool lookupCategory,
bool RHSIsQualifiedID) {
+ if (!hasDefinition())
+ return false;
+
ObjCInterfaceDecl *IDecl = this;
// 1st, look up the class.
- const ObjCList<ObjCProtocolDecl> &Protocols =
- IDecl->getReferencedProtocols();
-
- for (ObjCList<ObjCProtocolDecl>::iterator PI = Protocols.begin(),
- E = Protocols.end(); PI != E; ++PI) {
+ for (ObjCInterfaceDecl::protocol_iterator
+ PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){
if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
return true;
// This is dubious and is added to be compatible with gcc. In gcc, it is
@@ -782,6 +889,8 @@ bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
// ObjCIvarDecl
//===----------------------------------------------------------------------===//
+void ObjCIvarDecl::anchor() { }
+
ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
@@ -824,6 +933,12 @@ ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
ac, BW, synthesized);
}
+ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl));
+ return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, ObjCIvarDecl::None, 0, false);
+}
+
const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
@@ -852,6 +967,8 @@ const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
// ObjCAtDefsFieldDecl
//===----------------------------------------------------------------------===//
+void ObjCAtDefsFieldDecl::anchor() { }
+
ObjCAtDefsFieldDecl
*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
@@ -859,15 +976,46 @@ ObjCAtDefsFieldDecl
return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
}
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl));
+ return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0);
+}
+
//===----------------------------------------------------------------------===//
// ObjCProtocolDecl
//===----------------------------------------------------------------------===//
+void ObjCProtocolDecl::anchor() { }
+
+ObjCProtocolDecl::ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl)
+ : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+}
+
ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
SourceLocation nameLoc,
- SourceLocation atStartLoc) {
- return new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc);
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl) {
+ ObjCProtocolDecl *Result
+ = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
+
+ return Result;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl));
+ return new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(), SourceLocation(),
+ 0);
}
ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
@@ -898,87 +1046,57 @@ ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
return NULL;
}
-//===----------------------------------------------------------------------===//
-// ObjCClassDecl
-//===----------------------------------------------------------------------===//
-
-ObjCClassDecl::ObjCClassDecl(DeclContext *DC, SourceLocation L,
- ObjCInterfaceDecl *const Elt,
- const SourceLocation Loc,
- ASTContext &C)
- : Decl(ObjCClass, DC, L) {
- setClass(C, Elt, Loc);
+void ObjCProtocolDecl::allocateDefinitionData() {
+ assert(!Data && "Protocol already has a definition!");
+ Data = new (getASTContext()) DefinitionData;
+ Data->Definition = this;
}
-ObjCClassDecl *ObjCClassDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- ObjCInterfaceDecl *const Elt,
- const SourceLocation Loc) {
- return new (C) ObjCClassDecl(DC, L, Elt, Loc, C);
-}
-
-void ObjCClassDecl::setClass(ASTContext &C, ObjCInterfaceDecl*const Cls,
- const SourceLocation Loc) {
-
- ForwardDecl = (ObjCClassRef*) C.Allocate(sizeof(ObjCClassRef),
- llvm::alignOf<ObjCClassRef>());
- new (ForwardDecl) ObjCClassRef(Cls, Loc);
-}
-
-SourceRange ObjCClassDecl::getSourceRange() const {
- // FIXME: We should include the semicolon
- return SourceRange(getLocation(), ForwardDecl->getLocation());
-}
-
-//===----------------------------------------------------------------------===//
-// ObjCForwardProtocolDecl
-//===----------------------------------------------------------------------===//
-
-ObjCForwardProtocolDecl::
-ObjCForwardProtocolDecl(DeclContext *DC, SourceLocation L,
- ObjCProtocolDecl *const *Elts, unsigned nElts,
- const SourceLocation *Locs, ASTContext &C)
-: Decl(ObjCForwardProtocol, DC, L) {
- ReferencedProtocols.set(Elts, nElts, Locs, C);
-}
-
-
-ObjCForwardProtocolDecl *
-ObjCForwardProtocolDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- ObjCProtocolDecl *const *Elts,
- unsigned NumElts,
- const SourceLocation *Locs) {
- return new (C) ObjCForwardProtocolDecl(DC, L, Elts, NumElts, Locs, C);
+void ObjCProtocolDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD)
+ RD->Data = this->Data;
}
//===----------------------------------------------------------------------===//
// ObjCCategoryDecl
//===----------------------------------------------------------------------===//
+void ObjCCategoryDecl::anchor() { }
+
ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation AtLoc,
SourceLocation ClassNameLoc,
SourceLocation CategoryNameLoc,
IdentifierInfo *Id,
- ObjCInterfaceDecl *IDecl) {
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc,
CategoryNameLoc, Id,
- IDecl);
+ IDecl,
+ IvarLBraceLoc, IvarRBraceLoc);
if (IDecl) {
// Link this category into its class's category list.
CatDecl->NextClassCategory = IDecl->getCategoryList();
- IDecl->setCategoryList(CatDecl);
- if (ASTMutationListener *L = C.getASTMutationListener())
- L->AddedObjCCategoryToInterface(CatDecl, IDecl);
+ if (IDecl->hasDefinition()) {
+ IDecl->setCategoryList(CatDecl);
+ if (ASTMutationListener *L = C.getASTMutationListener())
+ L->AddedObjCCategoryToInterface(CatDecl, IDecl);
+ }
}
return CatDecl;
}
-ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
- SourceLocation(), 0, 0);
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl));
+ return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
+ SourceLocation(), 0, 0);
}
ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
@@ -995,14 +1113,26 @@ void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) {
// ObjCCategoryImplDecl
//===----------------------------------------------------------------------===//
+void ObjCCategoryImplDecl::anchor() { }
+
ObjCCategoryImplDecl *
ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
ObjCInterfaceDecl *ClassInterface,
SourceLocation nameLoc,
- SourceLocation atStartLoc) {
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface,
- nameLoc, atStartLoc);
+ nameLoc, atStartLoc, CategoryNameLoc);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl));
+ return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
+ SourceLocation(), SourceLocation());
}
ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
@@ -1013,6 +1143,8 @@ ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
}
+void ObjCImplDecl::anchor() { }
+
void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
// FIXME: The context should be correct before we get here.
property->setLexicalDeclContext(this);
@@ -1066,8 +1198,8 @@ FindPropertyImplDecl(IdentifierInfo *Id) const {
}
raw_ostream &clang::operator<<(raw_ostream &OS,
- const ObjCCategoryImplDecl *CID) {
- OS << CID->getName();
+ const ObjCCategoryImplDecl &CID) {
+ OS << CID.getName();
return OS;
}
@@ -1075,14 +1207,28 @@ raw_ostream &clang::operator<<(raw_ostream &OS,
// ObjCImplementationDecl
//===----------------------------------------------------------------------===//
+void ObjCImplementationDecl::anchor() { }
+
ObjCImplementationDecl *
ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
ObjCInterfaceDecl *ClassInterface,
ObjCInterfaceDecl *SuperDecl,
SourceLocation nameLoc,
- SourceLocation atStartLoc) {
+ SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
- nameLoc, atStartLoc);
+ nameLoc, atStartLoc,
+ IvarLBraceLoc, IvarRBraceLoc);
+}
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl));
+ return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
+ SourceLocation());
}
void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
@@ -1099,8 +1245,8 @@ void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
}
raw_ostream &clang::operator<<(raw_ostream &OS,
- const ObjCImplementationDecl *ID) {
- OS << ID->getName();
+ const ObjCImplementationDecl &ID) {
+ OS << ID.getName();
return OS;
}
@@ -1108,6 +1254,8 @@ raw_ostream &clang::operator<<(raw_ostream &OS,
// ObjCCompatibleAliasDecl
//===----------------------------------------------------------------------===//
+void ObjCCompatibleAliasDecl::anchor() { }
+
ObjCCompatibleAliasDecl *
ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
@@ -1116,17 +1264,34 @@ ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
}
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl));
+ return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
+}
+
//===----------------------------------------------------------------------===//
// ObjCPropertyDecl
//===----------------------------------------------------------------------===//
+void ObjCPropertyDecl::anchor() { }
+
ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id,
SourceLocation AtLoc,
+ SourceLocation LParenLoc,
TypeSourceInfo *T,
PropertyControl propControl) {
- return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, T);
+ return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
+}
+
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl));
+ return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
+ SourceLocation(),
+ 0);
}
//===----------------------------------------------------------------------===//
@@ -1145,6 +1310,13 @@ ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
ivarLoc);
}
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl));
+ return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
+ 0, Dynamic, 0, SourceLocation());
+}
+
SourceRange ObjCPropertyImplDecl::getSourceRange() const {
SourceLocation EndLoc = getLocation();
if (IvarLoc.isValid())
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index 08a1ab5..74e1c1b 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Module.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -58,6 +59,7 @@ namespace {
void VisitLabelDecl(LabelDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
void VisitStaticAssertDecl(StaticAssertDecl *D);
void VisitNamespaceDecl(NamespaceDecl *D);
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
@@ -68,10 +70,8 @@ namespace {
void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitObjCMethodDecl(ObjCMethodDecl *D);
- void VisitObjCClassDecl(ObjCClassDecl *D);
void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
- void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
@@ -85,6 +85,7 @@ namespace {
void PrintTemplateParameters(const TemplateParameterList *Params,
const TemplateArgumentList *Args);
+ void prettyPrintAttributes(Decl *D);
};
}
@@ -182,6 +183,16 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
return Out;
}
+void DeclPrinter::prettyPrintAttributes(Decl *D) {
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
+ Attr *A = *i;
+ A->printPretty(Out, Context);
+ }
+ }
+}
+
void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
this->Indent();
Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
@@ -320,11 +331,11 @@ void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
Out << "__module_private__ ";
}
Out << S;
+ prettyPrintAttributes(D);
}
void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
- Out << "using " << D->getNameAsString() << " = "
- << D->getUnderlyingType().getAsString(Policy);
+ Out << "using " << *D << " = " << D->getUnderlyingType().getAsString(Policy);
}
void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
@@ -350,6 +361,7 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
VisitDeclContext(D);
Indent() << "}";
}
+ prettyPrintAttributes(D);
}
void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
@@ -376,7 +388,7 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (!Policy.SuppressSpecifiers) {
- switch (D->getStorageClass()) {
+ switch (D->getStorageClassAsWritten()) {
case SC_None: break;
case SC_Extern: Out << "extern "; break;
case SC_Static: Out << "static "; break;
@@ -466,12 +478,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
}
}
- if (D->hasAttr<NoReturnAttr>())
- Proto += " __attribute((noreturn))";
-
- if (D->hasAttr<ReturnsTwiceAttr>())
- Proto += " __attribute((returns_twice))";
-
if (CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D)) {
bool HasInitializerList = false;
for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
@@ -542,6 +548,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
}
Out << Proto;
+ prettyPrintAttributes(D);
if (D->isPure())
Out << " = 0";
@@ -588,16 +595,18 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
Out << " = ";
Init->printPretty(Out, Context, 0, Policy, Indentation);
}
+ prettyPrintAttributes(D);
}
void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
- Out << D->getNameAsString() << ":";
+ Out << *D << ":";
}
void DeclPrinter::VisitVarDecl(VarDecl *D) {
- if (!Policy.SuppressSpecifiers && D->getStorageClass() != SC_None)
- Out << VarDecl::getStorageClassSpecifierString(D->getStorageClass()) << " ";
+ StorageClass SCAsWritten = D->getStorageClassAsWritten();
+ if (!Policy.SuppressSpecifiers && SCAsWritten != SC_None)
+ Out << VarDecl::getStorageClassSpecifierString(SCAsWritten) << " ";
if (!Policy.SuppressSpecifiers && D->isThreadSpecified())
Out << "__thread ";
@@ -612,17 +621,22 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
Out << Name;
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
- if (D->hasCXXDirectInitializer())
- Out << "(";
- else {
- CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init);
- if (!CCE || CCE->getConstructor()->isCopyConstructor())
- Out << " = ";
+ bool ImplicitInit = false;
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ ImplicitInit = D->getInitStyle() == VarDecl::CallInit &&
+ Construct->getNumArgs() == 0 && !Construct->isListInitialization();
+ if (!ImplicitInit) {
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << "(";
+ else if (D->getInitStyle() == VarDecl::CInit) {
+ Out << " = ";
+ }
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << ")";
}
- Init->printPretty(Out, Context, 0, Policy, Indentation);
- if (D->hasCXXDirectInitializer())
- Out << ")";
}
+ prettyPrintAttributes(D);
}
void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
@@ -635,6 +649,11 @@ void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Out << ")";
}
+void DeclPrinter::VisitImportDecl(ImportDecl *D) {
+ Out << "@__experimental_modules_import " << D->getImportedModule()->getFullModuleName()
+ << ";\n";
+}
+
void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
D->getAssertExpr()->printPretty(Out, Context, 0, Policy, Indentation);
@@ -745,7 +764,7 @@ void DeclPrinter::PrintTemplateParameters(
if (TTP->isParameterPack())
Out << "... ";
- Out << TTP->getNameAsString();
+ Out << *TTP;
if (Args) {
Out << " = ";
@@ -829,10 +848,6 @@ void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// Objective-C declarations
//----------------------------------------------------------------------------
-void DeclPrinter::VisitObjCClassDecl(ObjCClassDecl *D) {
- Out << "@class " << *D->getForwardInterfaceDecl();
-}
-
void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->isInstanceMethod())
Out << "- ";
@@ -882,6 +897,11 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
std::string I = OID->getNameAsString();
ObjCInterfaceDecl *SID = OID->getSuperClass();
+ if (!OID->isThisDeclarationADefinition()) {
+ Out << "@class " << I << ";";
+ return;
+ }
+
if (SID)
Out << "@interface " << I << " : " << *SID;
else
@@ -914,17 +934,12 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
// FIXME: implement the rest...
}
-void DeclPrinter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
- Out << "@protocol ";
- for (ObjCForwardProtocolDecl::protocol_iterator I = D->protocol_begin(),
- E = D->protocol_end();
- I != E; ++I) {
- if (I != D->protocol_begin()) Out << ", ";
- Out << **I;
- }
-}
-
void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
+ if (!PID->isThisDeclarationADefinition()) {
+ Out << "@protocol " << PID->getIdentifier() << ";\n";
+ return;
+ }
+
Out << "@protocol " << *PID << '\n';
VisitDeclContext(PID, false);
Out << "@end";
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
index 558a4cc..4590195 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -112,42 +112,34 @@ static void AdoptTemplateParameterList(TemplateParameterList *Params,
//===----------------------------------------------------------------------===//
RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() {
- // Find the first declaration of this function template.
- RedeclarableTemplateDecl *First = getCanonicalDecl();
+ if (!Common) {
+ // Walk the previous-declaration chain until we either find a declaration
+ // with a common pointer or we run out of previous declarations.
+ llvm::SmallVector<RedeclarableTemplateDecl *, 2> PrevDecls;
+ for (RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl()) {
+ if (Prev->Common) {
+ Common = Prev->Common;
+ break;
+ }
+
+ PrevDecls.push_back(Prev);
+ }
- if (First->CommonOrPrev.isNull()) {
- CommonBase *CommonPtr = First->newCommon(getASTContext());
- First->CommonOrPrev = CommonPtr;
- CommonPtr->Latest = First;
+ // If we never found a common pointer, allocate one now.
+ if (!Common) {
+ // FIXME: If any of the declarations is from an AST file, we probably
+ // need an update record to add the common data.
+
+ Common = newCommon(getASTContext());
+ }
+
+ // Update any previous declarations we saw with the common pointer.
+ for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I)
+ PrevDecls[I]->Common = Common;
}
- return First->CommonOrPrev.get<CommonBase*>();
-}
-
-
-RedeclarableTemplateDecl *RedeclarableTemplateDecl::getCanonicalDeclImpl() {
- RedeclarableTemplateDecl *Tmpl = this;
- while (Tmpl->getPreviousDeclaration())
- Tmpl = Tmpl->getPreviousDeclaration();
- return Tmpl;
-}
-void RedeclarableTemplateDecl::setPreviousDeclarationImpl(
- RedeclarableTemplateDecl *Prev) {
- if (Prev) {
- CommonBase *Common = Prev->getCommonPtr();
- Prev = Common->Latest;
- Common->Latest = this;
- CommonOrPrev = Prev;
- } else {
- assert(CommonOrPrev.is<CommonBase*>() && "Cannot reset TemplateDecl Prev");
- }
-}
-
-RedeclarableTemplateDecl *RedeclarableTemplateDecl::getNextRedeclaration() {
- if (CommonOrPrev.is<RedeclarableTemplateDecl*>())
- return CommonOrPrev.get<RedeclarableTemplateDecl*>();
- CommonBase *Common = CommonOrPrev.get<CommonBase*>();
- return Common ? Common->Latest : this;
+ return Common;
}
template <class EntryType>
@@ -160,7 +152,7 @@ RedeclarableTemplateDecl::findSpecializationImpl(
llvm::FoldingSetNodeID ID;
EntryType::Profile(ID,Args,NumArgs, getASTContext());
EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
- return Entry ? SETraits::getMostRecentDeclaration(Entry) : 0;
+ return Entry ? SETraits::getMostRecentDecl(Entry) : 0;
}
/// \brief Generate the injected template arguments for the given template
@@ -181,7 +173,7 @@ static void GenerateInjectedTemplateArgs(ASTContext &Context,
Arg = TemplateArgument(ArgType);
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- Expr *E = new (Context) DeclRefExpr(NTTP,
+ Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
NTTP->getType().getNonLValueExprType(Context),
Expr::getValueKindForType(NTTP->getType()),
NTTP->getLocation());
@@ -224,9 +216,11 @@ FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
}
-FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C, EmptyShell) {
- return new (C) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
- 0, 0);
+FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl));
+ return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
}
RedeclarableTemplateDecl::CommonBase *
@@ -244,7 +238,10 @@ FunctionTemplateDecl::findSpecialization(const TemplateArgument *Args,
void FunctionTemplateDecl::addSpecialization(
FunctionTemplateSpecializationInfo *Info, void *InsertPos) {
- getSpecializations().InsertNode(Info, InsertPos);
+ if (InsertPos)
+ getSpecializations().InsertNode(Info, InsertPos);
+ else
+ getSpecializations().GetOrInsertNode(Info);
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(this, Info->Function);
}
@@ -284,8 +281,10 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
return New;
}
-ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) ClassTemplateDecl(Empty);
+ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl));
+ return new (Mem) ClassTemplateDecl(EmptyShell());
}
void ClassTemplateDecl::LoadLazySpecializations() {
@@ -326,7 +325,14 @@ ClassTemplateDecl::findSpecialization(const TemplateArgument *Args,
void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
void *InsertPos) {
- getSpecializations().InsertNode(D, InsertPos);
+ if (InsertPos)
+ getSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplateSpecializationDecl *Existing
+ = getSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(this, D);
}
@@ -342,7 +348,15 @@ ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args,
void ClassTemplateDecl::AddPartialSpecialization(
ClassTemplatePartialSpecializationDecl *D,
void *InsertPos) {
- getPartialSpecializations().InsertNode(D, InsertPos);
+ if (InsertPos)
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplatePartialSpecializationDecl *Existing
+ = getPartialSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXTemplateSpecialization(this, D);
}
@@ -357,7 +371,7 @@ void ClassTemplateDecl::getPartialSpecializations(
P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
P != PEnd; ++P) {
assert(!PS[P->getSequenceNumber()]);
- PS[P->getSequenceNumber()] = P->getMostRecentDeclaration();
+ PS[P->getSequenceNumber()] = P->getMostRecentDecl();
}
}
@@ -370,7 +384,7 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) {
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
if (Context.hasSameType(P->getInjectedSpecializationType(), T))
- return P->getMostRecentDeclaration();
+ return P->getMostRecentDecl();
}
return 0;
@@ -385,7 +399,7 @@ ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
PEnd = getPartialSpecializations().end();
P != PEnd; ++P) {
if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
- return P->getMostRecentDeclaration();
+ return P->getMostRecentDecl();
}
return 0;
@@ -433,9 +447,10 @@ TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
}
TemplateTypeParmDecl *
-TemplateTypeParmDecl::Create(const ASTContext &C, EmptyShell Empty) {
- return new (C) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
- 0, false);
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl));
+ return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
+ 0, false);
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
@@ -520,6 +535,27 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
ExpandedTInfos);
}
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl));
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), false, 0);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpandedTypes) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+
+ void *Mem = AllocateDeserializedDecl(C, ID, Size);
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), 0, 0, NumExpandedTypes,
+ 0);
+}
+
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getOuterLocStart(),
@@ -537,6 +573,8 @@ SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
// TemplateTemplateParmDecl Method Implementations
//===----------------------------------------------------------------------===//
+void TemplateTemplateParmDecl::anchor() { }
+
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
SourceLocation L, unsigned D, unsigned P,
@@ -546,6 +584,13 @@ TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
Params);
}
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
+ return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
+ 0, 0);
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
@@ -582,6 +627,12 @@ FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
}
//===----------------------------------------------------------------------===//
+// TemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void TemplateDecl::anchor() { }
+
+//===----------------------------------------------------------------------===//
// ClassTemplateSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
ClassTemplateSpecializationDecl::
@@ -628,9 +679,11 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
}
ClassTemplateSpecializationDecl *
-ClassTemplateSpecializationDecl::Create(ASTContext &Context, EmptyShell Empty) {
- return
- new (Context)ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplateSpecializationDecl));
+ return new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
}
void
@@ -682,6 +735,8 @@ ClassTemplateSpecializationDecl::getSourceRange() const {
//===----------------------------------------------------------------------===//
// ClassTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
+void ClassTemplatePartialSpecializationDecl::anchor() { }
+
ClassTemplatePartialSpecializationDecl::
ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
DeclContext *DC,
@@ -740,15 +795,19 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC,
}
ClassTemplatePartialSpecializationDecl *
-ClassTemplatePartialSpecializationDecl::Create(ASTContext &Context,
- EmptyShell Empty) {
- return new (Context)ClassTemplatePartialSpecializationDecl();
+ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplatePartialSpecializationDecl));
+ return new (Mem) ClassTemplatePartialSpecializationDecl();
}
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
+void FriendTemplateDecl::anchor() { }
+
FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
DeclContext *DC,
SourceLocation L,
@@ -761,9 +820,10 @@ FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
return Result;
}
-FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
- EmptyShell Empty) {
- return new (Context) FriendTemplateDecl(Empty);
+FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl));
+ return new (Mem) FriendTemplateDecl(EmptyShell());
}
//===----------------------------------------------------------------------===//
@@ -780,10 +840,11 @@ TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
}
-TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
- EmptyShell) {
- return new (C) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
- 0, 0);
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl));
+ return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
}
void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
@@ -796,3 +857,16 @@ TypeAliasTemplateDecl::newCommon(ASTContext &C) {
return CommonPtr;
}
+//===----------------------------------------------------------------------===//
+// ClassScopeFunctionSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassScopeFunctionSpecializationDecl::anchor() { }
+
+ClassScopeFunctionSpecializationDecl *
+ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassScopeFunctionSpecializationDecl));
+ return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
index bf647ed..64924ad 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
@@ -53,7 +53,7 @@ public:
void *FETokenInfo;
};
-/// CXXLiberalOperatorName - Contains the actual identifier that makes up the
+/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
/// name.
///
/// This identifier is stored here rather than directly in DeclarationName so as
@@ -64,6 +64,10 @@ class CXXLiteralOperatorIdName
public:
IdentifierInfo *ID;
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
void Profile(llvm::FoldingSetNodeID &FSID) {
FSID.AddPointer(ID);
}
@@ -125,8 +129,8 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
case DeclarationName::CXXUsingDirective:
return 0;
}
-
- return 0;
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
}
} // end namespace clang
@@ -189,7 +193,6 @@ DeclarationName::NameKind DeclarationName::getNameKind() const {
return ObjCMultiArgSelector;
}
- break;
}
// Can't actually get here.
@@ -334,7 +337,7 @@ void *DeclarationName::getFETokenInfoAsVoid() const {
return getAsCXXOperatorIdName()->FETokenInfo;
case CXXLiteralOperatorName:
- return getCXXLiteralIdentifier()->getFETokenInfo<void>();
+ return getAsCXXLiteralOperatorIdName()->FETokenInfo;
default:
llvm_unreachable("Declaration name has no FETokenInfo");
@@ -358,7 +361,7 @@ void DeclarationName::setFETokenInfo(void *T) {
break;
case CXXLiteralOperatorName:
- getCXXLiteralIdentifier()->setFETokenInfo(T);
+ getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
break;
default:
@@ -472,6 +475,7 @@ DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
LiteralName->ID = II;
+ LiteralName->FETokenInfo = 0;
LiteralNames->InsertNode(LiteralName, InsertPos);
return DeclarationName(LiteralName);
diff --git a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
index 2568ada..4c7cd8a 100644
--- a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
@@ -39,8 +39,7 @@
#include "clang/AST/TypeVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -66,7 +65,6 @@ template <class Impl> struct XMLDeclVisitor {
void dispatch(Decl *D) {
switch (D->getKind()) {
- default: llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
#define DECL(DERIVED, BASE) \
case Decl::DERIVED: \
DISPATCH(dispatch##DERIVED##DeclAttrs, DERIVED##Decl); \
@@ -121,7 +119,6 @@ template <class Impl> struct XMLTypeVisitor {
void dispatch(Type *T) {
switch (T->getTypeClass()) {
- default: llvm_unreachable("Type that isn't part of TypeNodes.inc!");
#define TYPE(DERIVED, BASE) \
case Type::DERIVED: \
DISPATCH(dispatch##DERIVED##TypeAttrs, DERIVED##Type); \
@@ -167,7 +164,6 @@ static StringRef getTypeKindName(Type *T) {
}
llvm_unreachable("unknown type kind!");
- return "unknown_type";
}
struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
@@ -227,7 +223,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
//---- General utilities -------------------------------------------//
void setPointer(StringRef prop, const void *p) {
- llvm::SmallString<10> buffer;
+ SmallString<10> buffer;
llvm::raw_svector_ostream os(buffer);
os << p;
os.flush();
@@ -243,7 +239,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
void setInteger(StringRef prop, unsigned n) {
- llvm::SmallString<10> buffer;
+ SmallString<10> buffer;
llvm::raw_svector_ostream os(buffer);
os << n;
os.flush();
@@ -324,7 +320,8 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
break;
case TemplateArgument::Declaration: {
- visitDeclRef(A.getAsDecl());
+ if (Decl *D = A.getAsDecl())
+ visitDeclRef(D);
break;
}
case TemplateArgument::Integral: {
@@ -411,7 +408,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
template <class T> void visitRedeclarableAttrs(T *D) {
- if (T *Prev = D->getPreviousDeclaration())
+ if (T *Prev = D->getPreviousDecl())
setPointer("previous", Prev);
}
@@ -465,7 +462,13 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
if (D->getStorageClass() != SC_None)
set("storage",
VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
- setFlag("directinit", D->hasCXXDirectInitializer());
+ StringRef initStyle = "";
+ switch (D->getInitStyle()) {
+ case VarDecl::CInit: initStyle = "c"; break;
+ case VarDecl::CallInit: initStyle = "call"; break;
+ case VarDecl::ListInit: initStyle = "list"; break;
+ }
+ set("initstyle", initStyle);
setFlag("nrvo", D->isNRVOVariable());
// TODO: instantiation, etc.
}
@@ -495,6 +498,10 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
for (FunctionDecl::param_iterator
I = D->param_begin(), E = D->param_end(); I != E; ++I)
dispatch(*I);
+ for (llvm::ArrayRef<NamedDecl*>::iterator
+ I = D->getDeclsInPrototypeScope().begin(), E = D->getDeclsInPrototypeScope().end();
+ I != E; ++I)
+ dispatch(*I);
if (D->doesThisDeclarationHaveABody())
dispatch(D->getBody());
}
@@ -740,11 +747,6 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
visitDeclContext(D);
}
- // ObjCClassDecl
- void visitObjCClassDeclChildren(ObjCClassDecl *D) {
- visitDeclRef(D->getForwardInterfaceDecl());
- }
-
// ObjCInterfaceDecl
void visitCategoryList(ObjCCategoryDecl *D) {
if (!D) return;
@@ -755,7 +757,7 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
void visitObjCInterfaceDeclAttrs(ObjCInterfaceDecl *D) {
setPointer("typeptr", D->getTypeForDecl());
- setFlag("forward_decl", D->isForwardDecl());
+ setFlag("forward_decl", !D->isThisDeclarationADefinition());
setFlag("implicit_interface", D->isImplicitInterfaceDecl());
}
void visitObjCInterfaceDeclChildren(ObjCInterfaceDecl *D) {
@@ -815,18 +817,11 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
}
- // ObjCForwardProtocolDecl
- void visitObjCForwardProtocolDeclChildren(ObjCForwardProtocolDecl *D) {
- for (ObjCForwardProtocolDecl::protocol_iterator
- I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
- visitDeclRef(*I);
- }
-
// ObjCProtocolDecl
- void visitObjCProtocolDeclAttrs(ObjCProtocolDecl *D) {
- setFlag("forward_decl", D->isForwardDecl());
- }
void visitObjCProtocolDeclChildren(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
if (D->protocol_begin() != D->protocol_end()) {
TemporaryContainer C(*this, "protocols");
for (ObjCInterfaceDecl::protocol_iterator
@@ -835,6 +830,9 @@ struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
}
}
void visitObjCProtocolDeclAsContext(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
visitDeclContext(D);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
index b0bcfe0..868109e 100644
--- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/LiteralSupport.h"
@@ -29,6 +30,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cstring>
using namespace clang;
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
@@ -92,6 +94,8 @@ bool Expr::isKnownToHaveBooleanValue() const {
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getExprLoc().
+//
+// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
namespace {
/// This implementation is used when a class provides a custom
/// implementation of getExprLoc.
@@ -108,7 +112,7 @@ namespace {
template <class E>
SourceLocation getExprLocImpl(const Expr *expr,
SourceLocation (Expr::*v)() const) {
- return static_cast<const E*>(expr)->getSourceRange().getBegin();
+ return static_cast<const E*>(expr)->getLocStart();
}
}
@@ -123,7 +127,6 @@ SourceLocation Expr::getExprLoc() const {
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind");
- return SourceLocation();
}
//===----------------------------------------------------------------------===//
@@ -133,7 +136,7 @@ SourceLocation Expr::getExprLoc() const {
/// \brief Compute the type-, value-, and instantiation-dependence of a
/// declaration reference
/// based on the declaration being referenced.
-static void computeDeclRefDependence(NamedDecl *D, QualType T,
+static void computeDeclRefDependence(ASTContext &Ctx, NamedDecl *D, QualType T,
bool &TypeDependent,
bool &ValueDependent,
bool &InstantiationDependent) {
@@ -184,21 +187,29 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
// (VD) - a constant with integral or enumeration type and is
// initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if (Var->getType()->isIntegralOrEnumerationType() &&
- Var->getType().getCVRQualifiers() == Qualifiers::Const) {
+ if ((Ctx.getLangOpts().CPlusPlus0x ?
+ Var->getType()->isLiteralType() :
+ Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().getCVRQualifiers() == Qualifiers::Const ||
+ Var->getType()->isReferenceType())) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent()) {
ValueDependent = true;
InstantiationDependent = true;
}
- }
-
+ }
+
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
- else if (Var->isStaticDataMember() &&
- Var->getDeclContext()->isDependentContext()) {
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
ValueDependent = true;
InstantiationDependent = true;
}
@@ -212,16 +223,15 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
ValueDependent = true;
InstantiationDependent = true;
- return;
- }
+ }
}
-void DeclRefExpr::computeDependence() {
+void DeclRefExpr::computeDependence(ASTContext &Ctx) {
bool TypeDependent = false;
bool ValueDependent = false;
bool InstantiationDependent = false;
- computeDeclRefDependence(getDecl(), getType(), TypeDependent, ValueDependent,
- InstantiationDependent);
+ computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
+ ValueDependent, InstantiationDependent);
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
@@ -250,8 +260,11 @@ void DeclRefExpr::computeDependence() {
ExprBits.ContainsUnexpandedParameterPack = true;
}
-DeclRefExpr::DeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
- ValueDecl *D, const DeclarationNameInfo &NameInfo,
+DeclRefExpr::DeclRefExpr(ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool RefersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK)
@@ -263,38 +276,48 @@ DeclRefExpr::DeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
getInternalFoundDecl() = FoundD;
- DeclRefExprBits.HasExplicitTemplateArgs = TemplateArgs ? 1 : 0;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo
+ = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
+ DeclRefExprBits.RefersToEnclosingLocal = RefersToEnclosingLocal;
if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
if (InstantiationDependent)
setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
DeclRefExprBits.HadMultipleCandidates = 0;
- computeDependence();
+ computeDependence(Ctx);
}
DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *D,
+ bool RefersToEnclosingLocal,
SourceLocation NameLoc,
QualType T,
ExprValueKind VK,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs) {
- return Create(Context, QualifierLoc, D,
+ return Create(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
DeclarationNameInfo(D->getDeclName(), NameLoc),
T, VK, FoundD, TemplateArgs);
}
DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *D,
+ bool RefersToEnclosingLocal,
const DeclarationNameInfo &NameInfo,
QualType T,
ExprValueKind VK,
@@ -310,25 +333,28 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
if (FoundD)
Size += sizeof(NamedDecl *);
if (TemplateArgs)
- Size += ASTTemplateArgumentListInfo::sizeFor(*TemplateArgs);
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
- return new (Mem) DeclRefExpr(QualifierLoc, D, NameInfo, FoundD, TemplateArgs,
- T, VK);
+ return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
+ NameInfo, FoundD, TemplateArgs, T, VK);
}
DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context,
bool HasQualifier,
bool HasFoundDecl,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t Size = sizeof(DeclRefExpr);
if (HasQualifier)
Size += sizeof(NestedNameSpecifierLoc);
if (HasFoundDecl)
Size += sizeof(NamedDecl *);
- if (HasExplicitTemplateArgs)
- Size += ASTTemplateArgumentListInfo::sizeFor(NumTemplateArgs);
+ if (HasTemplateKWAndArgsInfo)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(EmptyShell());
@@ -342,6 +368,16 @@ SourceRange DeclRefExpr::getSourceRange() const {
R.setEnd(getRAngleLoc());
return R;
}
+SourceLocation DeclRefExpr::getLocStart() const {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return getNameInfo().getLocStart();
+}
+SourceLocation DeclRefExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getNameInfo().getLocEnd();
+}
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
@@ -352,7 +388,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual)
return FD->getNameAsString();
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
@@ -362,22 +398,24 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
Out << "static ";
}
- PrintingPolicy Policy(Context.getLangOptions());
-
+ PrintingPolicy Policy(Context.getLangOpts());
std::string Proto = FD->getQualifiedNameAsString(Policy);
+ llvm::raw_string_ostream POut(Proto);
- const FunctionType *AFT = FD->getType()->getAs<FunctionType>();
+ const FunctionDecl *Decl = FD;
+ if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern())
+ Decl = Pattern;
+ const FunctionType *AFT = Decl->getType()->getAs<FunctionType>();
const FunctionProtoType *FT = 0;
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
- Proto += "(";
+ POut << "(";
if (FT) {
- llvm::raw_string_ostream POut(Proto);
- for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
if (i) POut << ", ";
std::string Param;
- FD->getParamDecl(i)->getType().getAsStringInternal(Param, Policy);
+ Decl->getParamDecl(i)->getType().getAsStringInternal(Param, Policy);
POut << Param;
}
@@ -386,16 +424,74 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
POut << "...";
}
}
- Proto += ")";
+ POut << ")";
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
Qualifiers ThisQuals = Qualifiers::fromCVRMask(MD->getTypeQualifiers());
if (ThisQuals.hasConst())
- Proto += " const";
+ POut << " const";
if (ThisQuals.hasVolatile())
- Proto += " volatile";
+ POut << " volatile";
+ RefQualifierKind Ref = MD->getRefQualifier();
+ if (Ref == RQ_LValue)
+ POut << " &";
+ else if (Ref == RQ_RValue)
+ POut << " &&";
+ }
+
+ typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
+ SpecsTy Specs;
+ const DeclContext *Ctx = FD->getDeclContext();
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ if (Spec && !Spec->isExplicitSpecialization())
+ Specs.push_back(Spec);
+ Ctx = Ctx->getParent();
}
+ std::string TemplateParams;
+ llvm::raw_string_ostream TOut(TemplateParams);
+ for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
+ I != E; ++I) {
+ const TemplateParameterList *Params
+ = (*I)->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &Args = (*I)->getTemplateArgs();
+ assert(Params->size() == Args.size());
+ for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args.get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ FunctionTemplateSpecializationInfo *FSI
+ = FD->getTemplateSpecializationInfo();
+ if (FSI && !FSI->isExplicitSpecialization()) {
+ const TemplateParameterList* Params
+ = FSI->getTemplate()->getTemplateParameters();
+ const TemplateArgumentList* Args = FSI->TemplateArguments;
+ assert(Params->size() == Args->size());
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args->get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ TOut.flush();
+ if (!TemplateParams.empty()) {
+ // remove the trailing comma and space
+ TemplateParams.resize(TemplateParams.size() - 2);
+ POut << " [" << TemplateParams << "]";
+ }
+
+ POut.flush();
+
if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
AFT->getResultType().getAsStringInternal(Proto, Policy);
@@ -405,7 +501,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
return Name.str().str();
}
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
Out << (MD->isInstanceMethod() ? '-' : '+');
Out << '[';
@@ -417,7 +513,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext()))
- Out << '(' << CID << ')';
+ Out << '(' << *CID << ')';
Out << ' ';
Out << MD->getSelector().getAsString();
@@ -468,7 +564,7 @@ FloatingLiteral::Create(ASTContext &C, const llvm::APFloat &V,
FloatingLiteral *
FloatingLiteral::Create(ASTContext &C, EmptyShell Empty) {
- return new (C) FloatingLiteral(Empty);
+ return new (C) FloatingLiteral(C, Empty);
}
/// getValueAsApproximateDouble - This returns the value as an inaccurate
@@ -482,6 +578,30 @@ double FloatingLiteral::getValueAsApproximateDouble() const {
return V.convertToDouble();
}
+int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
+ int CharByteWidth = 0;
+ switch(k) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = target.getChar32Width();
+ break;
+ }
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+ assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
+ && "character byte widths supported are 1, 2, and 4 only");
+ return CharByteWidth;
+}
+
StringLiteral *StringLiteral::Create(ASTContext &C, StringRef Str,
StringKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
@@ -494,12 +614,8 @@ StringLiteral *StringLiteral::Create(ASTContext &C, StringRef Str,
StringLiteral *SL = new (Mem) StringLiteral(Ty);
// OPTIMIZE: could allocate this appended to the StringLiteral.
- char *AStrData = new (C, 1) char[Str.size()];
- memcpy(AStrData, Str.data(), Str.size());
- SL->StrData = AStrData;
- SL->ByteLength = Str.size();
- SL->Kind = Kind;
- SL->IsPascal = Pascal;
+ SL->setString(C,Str,Kind,Pascal);
+
SL->TokLocs[0] = Loc[0];
SL->NumConcatenated = NumStrs;
@@ -513,17 +629,46 @@ StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
sizeof(SourceLocation)*(NumStrs-1),
llvm::alignOf<StringLiteral>());
StringLiteral *SL = new (Mem) StringLiteral(QualType());
- SL->StrData = 0;
- SL->ByteLength = 0;
+ SL->CharByteWidth = 0;
+ SL->Length = 0;
SL->NumConcatenated = NumStrs;
return SL;
}
-void StringLiteral::setString(ASTContext &C, StringRef Str) {
- char *AStrData = new (C, 1) char[Str.size()];
- memcpy(AStrData, Str.data(), Str.size());
- StrData = AStrData;
- ByteLength = Str.size();
+void StringLiteral::setString(ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal) {
+ //FIXME: we assume that the string data comes from a target that uses the same
+ // code unit size and endianess for the type of string.
+ this->Kind = Kind;
+ this->IsPascal = IsPascal;
+
+ CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
+ assert((Str.size()%CharByteWidth == 0)
+ && "size of data must be multiple of CharByteWidth");
+ Length = Str.size()/CharByteWidth;
+
+ switch(CharByteWidth) {
+ case 1: {
+ char *AStrData = new (C) char[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asChar = AStrData;
+ break;
+ }
+ case 2: {
+ uint16_t *AStrData = new (C) uint16_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt16 = AStrData;
+ break;
+ }
+ case 4: {
+ uint32_t *AStrData = new (C) uint32_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt32 = AStrData;
+ break;
+ }
+ default:
+ assert(false && "unsupported CharByteWidth");
+ }
}
/// getLocationOfByte - Return a source location that points to the specified
@@ -596,7 +741,6 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
/// corresponds to, e.g. "sizeof" or "[pre]++".
const char *UnaryOperator::getOpcodeStr(Opcode Op) {
switch (Op) {
- default: llvm_unreachable("Unknown unary operator");
case UO_PostInc: return "++";
case UO_PostDec: return "--";
case UO_PreInc: return "++";
@@ -611,6 +755,7 @@ const char *UnaryOperator::getOpcodeStr(Opcode Op) {
case UO_Imag: return "__imag";
case UO_Extension: return "__extension__";
}
+ llvm_unreachable("Unknown unary operator");
}
UnaryOperatorKind
@@ -778,7 +923,7 @@ void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
-unsigned CallExpr::isBuiltinCall(const ASTContext &Context) const {
+unsigned CallExpr::isBuiltinCall() const {
// All simple function calls (e.g. func()) are implicitly cast to pointer to
// function. As a result, we try and obtain the DeclRefExpr from the
// ImplicitCastExpr.
@@ -826,6 +971,24 @@ SourceRange CallExpr::getSourceRange() const {
end = getArg(getNumArgs() - 1)->getLocEnd();
return SourceRange(begin, end);
}
+SourceLocation CallExpr::getLocStart() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getBegin();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0)
+ begin = getArg(0)->getLocStart();
+ return begin;
+}
+SourceLocation CallExpr::getLocEnd() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getEnd();
+
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0)
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return end;
+}
OffsetOfExpr *OffsetOfExpr::Create(ASTContext &C, QualType type,
SourceLocation OperatorLoc,
@@ -886,6 +1049,7 @@ IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const {
MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
ValueDecl *memberdecl,
DeclAccessPair founddecl,
DeclarationNameInfo nameinfo,
@@ -902,7 +1066,9 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
Size += sizeof(MemberNameQualifier);
if (targs)
- Size += ASTTemplateArgumentListInfo::sizeFor(*targs);
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo,
@@ -926,41 +1092,46 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
NQ->FoundDecl = founddecl;
}
+ E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+
if (targs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- E->HasExplicitTemplateArgumentList = true;
- E->getExplicitTemplateArgs().initializeFrom(*targs, Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
if (InstantiationDependent)
E->setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
return E;
}
SourceRange MemberExpr::getSourceRange() const {
- SourceLocation StartLoc;
+ return SourceRange(getLocStart(), getLocEnd());
+}
+SourceLocation MemberExpr::getLocStart() const {
if (isImplicitAccess()) {
if (hasQualifier())
- StartLoc = getQualifierLoc().getBeginLoc();
- else
- StartLoc = MemberLoc;
- } else {
- // FIXME: We don't want this to happen. Rather, we should be able to
- // detect all kinds of implicit accesses more cleanly.
- StartLoc = getBase()->getLocStart();
- if (StartLoc.isInvalid())
- StartLoc = MemberLoc;
+ return getQualifierLoc().getBeginLoc();
+ return MemberLoc;
}
-
- SourceLocation EndLoc =
- HasExplicitTemplateArgumentList? getRAngleLoc()
- : getMemberNameInfo().getEndLoc();
-
- return SourceRange(StartLoc, EndLoc);
+
+ // FIXME: We don't want this to happen. Rather, we should be able to
+ // detect all kinds of implicit accesses more cleanly.
+ SourceLocation BaseStartLoc = getBase()->getLocStart();
+ if (BaseStartLoc.isValid())
+ return BaseStartLoc;
+ return MemberLoc;
+}
+SourceLocation MemberExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getMemberNameInfo().getEndLoc();
}
void CastExpr::CheckCastConsistency() const {
@@ -983,6 +1154,11 @@ void CastExpr::CheckCastConsistency() const {
assert(getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
+ case CK_ReinterpretMemberPointer:
+ assert(getType()->isMemberPointerType());
+ assert(getSubExpr()->getType()->isMemberPointerType());
+ goto CheckNoBasePath;
+
case CK_BitCast:
// Arbitrary casts to C pointer types count as bitcasts.
// Otherwise, we should only have block and ObjC pointer casts
@@ -1001,6 +1177,11 @@ void CastExpr::CheckCastConsistency() const {
!getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
+ case CK_CopyAndAutoreleaseBlockObject:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
@@ -1035,8 +1216,9 @@ void CastExpr::CheckCastConsistency() const {
case CK_Dependent:
case CK_LValueToRValue:
- case CK_GetObjCProperty:
case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_PointerToBoolean:
case CK_IntegralToBoolean:
case CK_FloatingToBoolean:
@@ -1061,8 +1243,6 @@ const char *CastExpr::getCastKindName() const {
return "LValueBitCast";
case CK_LValueToRValue:
return "LValueToRValue";
- case CK_GetObjCProperty:
- return "GetObjCProperty";
case CK_NoOp:
return "NoOp";
case CK_BaseToDerived:
@@ -1087,6 +1267,8 @@ const char *CastExpr::getCastKindName() const {
return "BaseToDerivedMemberPointer";
case CK_DerivedToBaseMemberPointer:
return "DerivedToBaseMemberPointer";
+ case CK_ReinterpretMemberPointer:
+ return "ReinterpretMemberPointer";
case CK_UserDefinedConversion:
return "UserDefinedConversion";
case CK_ConstructorConversion:
@@ -1151,10 +1333,15 @@ const char *CastExpr::getCastKindName() const {
return "ARCReclaimReturnedObject";
case CK_ARCExtendBlockObject:
return "ARCCExtendBlockObject";
+ case CK_AtomicToNonAtomic:
+ return "AtomicToNonAtomic";
+ case CK_NonAtomicToAtomic:
+ return "NonAtomicToAtomic";
+ case CK_CopyAndAutoreleaseBlockObject:
+ return "CopyAndAutoreleaseBlockObject";
}
llvm_unreachable("Unhandled cast kind!");
- return 0;
}
Expr *CastExpr::getSubExprAsWritten() {
@@ -1196,7 +1383,6 @@ CXXBaseSpecifier **CastExpr::path_buffer() {
#include "clang/AST/StmtNodes.inc"
default:
llvm_unreachable("non-cast expressions not possible here");
- return 0;
}
}
@@ -1284,7 +1470,7 @@ const char *BinaryOperator::getOpcodeStr(Opcode Op) {
case BO_Comma: return ",";
}
- return "";
+ llvm_unreachable("Invalid OpCode!");
}
BinaryOperatorKind
@@ -1355,9 +1541,10 @@ InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
: Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
false, false),
InitExprs(C, numInits),
- LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0),
- HadArrayRangeDesignator(false)
-{
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0)
+{
+ sawArrayRangeDesignator(false);
+ setInitializesStdInitializerList(false);
for (unsigned I = 0; I != numInits; ++I) {
if (initExprs[I]->isTypeDependent())
ExprBits.TypeDependent = true;
@@ -1394,6 +1581,7 @@ Expr *InitListExpr::updateInit(ASTContext &C, unsigned Init, Expr *expr) {
}
void InitListExpr::setArrayFiller(Expr *filler) {
+ assert(!hasArrayFiller() && "Filler already set!");
ArrayFillerOrUnionFieldInit = filler;
// Fill out any "holes" in the array due to designated initializers.
Expr **inits = getInits();
@@ -1433,9 +1621,10 @@ SourceRange InitListExpr::getSourceRange() const {
/// getFunctionType - Return the underlying function type for this block.
///
-const FunctionType *BlockExpr::getFunctionType() const {
- return getType()->getAs<BlockPointerType>()->
- getPointeeType()->getAs<FunctionType>();
+const FunctionProtoType *BlockExpr::getFunctionType() const {
+ // The block pointer is never sugared, but the function type might be.
+ return cast<BlockPointerType>(getType())
+ ->getPointeeType()->castAs<FunctionProtoType>();
}
SourceLocation BlockExpr::getCaretLocation() const {
@@ -1591,7 +1780,8 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
// Fallthrough for generic call handling.
}
case CallExprClass:
- case CXXMemberCallExprClass: {
+ case CXXMemberCallExprClass:
+ case UserDefinedLiteralClass: {
// If this is a direct call, get the callee.
const CallExpr *CE = cast<CallExpr>(this);
if (const Decl *FD = CE->getCalleeDecl()) {
@@ -1620,7 +1810,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
case ObjCMessageExprClass: {
const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
- if (Ctx.getLangOptions().ObjCAutoRefCount &&
+ if (Ctx.getLangOpts().ObjCAutoRefCount &&
ME->isInstanceMessage() &&
!ME->getType()->isVoidType() &&
ME->getSelector().getIdentifierInfoForSlot(0) &&
@@ -1644,6 +1834,19 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
R1 = getSourceRange();
return true;
+ case PseudoObjectExprClass: {
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+
+ // Only complain about things that have the form of a getter.
+ if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
+ isa<BinaryOperator>(PO->getSyntacticForm()))
+ return false;
+
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+
case StmtExprClass: {
// Statement exprs don't logically have side effects themselves, but are
// sometimes used in macros in ways that give them a type that is unused.
@@ -1730,14 +1933,8 @@ bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
->isOBJCGCCandidate(Ctx);
case CStyleCastExprClass:
return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
- case BlockDeclRefExprClass:
case DeclRefExprClass: {
-
- const Decl *D;
- if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E))
- D = BDRE->getDecl();
- else
- D = cast<DeclRefExpr>(E)->getDecl();
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasGlobalStorage())
@@ -1766,7 +1963,7 @@ bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
}
QualType Expr::findBoundMemberType(const Expr *expr) {
- assert(expr->getType()->isSpecificPlaceholderType(BuiltinType::BoundMember));
+ assert(expr->hasPlaceholderType(BuiltinType::BoundMember));
// Bound member expressions are always one of these possibilities:
// x->m x.m x->*y x.*y
@@ -1910,8 +2107,9 @@ Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
// pointer, or member function pointer that does not have a non-throwing
// exception-specification
case CallExprClass:
+ case CXXMemberCallExprClass:
case CXXOperatorCallExprClass:
- case CXXMemberCallExprClass: {
+ case UserDefinedLiteralClass: {
const CallExpr *CE = cast<CallExpr>(this);
CanThrowResult CT;
if (isTypeDependent())
@@ -1934,15 +2132,22 @@ Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
return MergeCanThrow(CT, CanSubExprsThrow(C, this));
}
+ case LambdaExprClass: {
+ const LambdaExpr *Lambda = cast<LambdaExpr>(this);
+ CanThrowResult CT = Expr::CT_Cannot;
+ for (LambdaExpr::capture_init_iterator Cap = Lambda->capture_init_begin(),
+ CapEnd = Lambda->capture_init_end();
+ Cap != CapEnd; ++Cap)
+ CT = MergeCanThrow(CT, (*Cap)->CanThrow(C));
+ return CT;
+ }
+
case CXXNewExprClass: {
CanThrowResult CT;
if (isTypeDependent())
CT = CT_Dependent;
else
- CT = MergeCanThrow(
- CanCalleeThrow(C, this, cast<CXXNewExpr>(this)->getOperatorNew()),
- CanCalleeThrow(C, this, cast<CXXNewExpr>(this)->getConstructor(),
- /*NullThrows*/false));
+ CT = CanCalleeThrow(C, this, cast<CXXNewExpr>(this)->getOperatorNew());
if (CT == CT_Can)
return CT;
return MergeCanThrow(CT, CanSubExprsThrow(C, this));
@@ -1979,38 +2184,47 @@ Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
// specs.
case ObjCMessageExprClass:
case ObjCPropertyRefExprClass:
+ case ObjCSubscriptRefExprClass:
+ return CT_Can;
+
+ // All the ObjC literals that are implemented as calls are
+ // potentially throwing unless we decide to close off that
+ // possibility.
+ case ObjCArrayLiteralClass:
+ case ObjCDictionaryLiteralClass:
+ case ObjCNumericLiteralClass:
return CT_Can;
// Many other things have subexpressions, so we have to test those.
// Some are simple:
- case ParenExprClass:
- case MemberExprClass:
- case CXXReinterpretCastExprClass:
- case CXXConstCastExprClass:
case ConditionalOperatorClass:
case CompoundLiteralExprClass:
- case ExtVectorElementExprClass:
- case InitListExprClass:
- case DesignatedInitExprClass:
- case ParenListExprClass:
- case VAArgExprClass:
+ case CXXConstCastExprClass:
case CXXDefaultArgExprClass:
+ case CXXReinterpretCastExprClass:
+ case DesignatedInitExprClass:
case ExprWithCleanupsClass:
- case ObjCIvarRefExprClass:
+ case ExtVectorElementExprClass:
+ case InitListExprClass:
+ case MemberExprClass:
case ObjCIsaExprClass:
+ case ObjCIvarRefExprClass:
+ case ParenExprClass:
+ case ParenListExprClass:
case ShuffleVectorExprClass:
+ case VAArgExprClass:
return CanSubExprsThrow(C, this);
// Some might be dependent for other reasons.
- case UnaryOperatorClass:
case ArraySubscriptExprClass:
- case ImplicitCastExprClass:
+ case BinaryOperatorClass:
+ case CompoundAssignOperatorClass:
case CStyleCastExprClass:
case CXXStaticCastExprClass:
case CXXFunctionalCastExprClass:
- case BinaryOperatorClass:
- case CompoundAssignOperatorClass:
- case MaterializeTemporaryExprClass: {
+ case ImplicitCastExprClass:
+ case MaterializeTemporaryExprClass:
+ case UnaryOperatorClass: {
CanThrowResult CT = isTypeDependent() ? CT_Dependent : CT_Cannot;
return MergeCanThrow(CT, CanSubExprsThrow(C, this));
}
@@ -2030,16 +2244,71 @@ Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
return cast<GenericSelectionExpr>(this)->getResultExpr()->CanThrow(C);
// Some expressions are always dependent.
- case DependentScopeDeclRefExprClass:
- case CXXUnresolvedConstructExprClass:
case CXXDependentScopeMemberExprClass:
+ case CXXUnresolvedConstructExprClass:
+ case DependentScopeDeclRefExprClass:
return CT_Dependent;
- default:
- // All other expressions don't have subexpressions, or else they are
- // unevaluated.
+ case AtomicExprClass:
+ case AsTypeExprClass:
+ case BinaryConditionalOperatorClass:
+ case BlockExprClass:
+ case CUDAKernelCallExprClass:
+ case DeclRefExprClass:
+ case ObjCBridgedCastExprClass:
+ case ObjCIndirectCopyRestoreExprClass:
+ case ObjCProtocolExprClass:
+ case ObjCSelectorExprClass:
+ case OffsetOfExprClass:
+ case PackExpansionExprClass:
+ case PseudoObjectExprClass:
+ case SubstNonTypeTemplateParmExprClass:
+ case SubstNonTypeTemplateParmPackExprClass:
+ case UnaryExprOrTypeTraitExprClass:
+ case UnresolvedLookupExprClass:
+ case UnresolvedMemberExprClass:
+ // FIXME: Can any of the above throw? If so, when?
return CT_Cannot;
+
+ case AddrLabelExprClass:
+ case ArrayTypeTraitExprClass:
+ case BinaryTypeTraitExprClass:
+ case TypeTraitExprClass:
+ case CXXBoolLiteralExprClass:
+ case CXXNoexceptExprClass:
+ case CXXNullPtrLiteralExprClass:
+ case CXXPseudoDestructorExprClass:
+ case CXXScalarValueInitExprClass:
+ case CXXThisExprClass:
+ case CXXUuidofExprClass:
+ case CharacterLiteralClass:
+ case ExpressionTraitExprClass:
+ case FloatingLiteralClass:
+ case GNUNullExprClass:
+ case ImaginaryLiteralClass:
+ case ImplicitValueInitExprClass:
+ case IntegerLiteralClass:
+ case ObjCEncodeExprClass:
+ case ObjCStringLiteralClass:
+ case ObjCBoolLiteralExprClass:
+ case OpaqueValueExprClass:
+ case PredefinedExprClass:
+ case SizeOfPackExprClass:
+ case StringLiteralClass:
+ case UnaryTypeTraitExprClass:
+ // These expressions can never throw.
+ return CT_Cannot;
+
+#define STMT(CLASS, PARENT) case CLASS##Class:
+#define STMT_RANGE(Base, First, Last)
+#define LAST_STMT_RANGE(BASE, FIRST, LAST)
+#define EXPR(CLASS, PARENT)
+#define ABSTRACT_STMT(STMT)
+#include "clang/AST/StmtNodes.inc"
+ case NoStmtClass:
+ llvm_unreachable("Invalid class for expression");
}
+ llvm_unreachable("Bogus StmtClass");
}
Expr* Expr::IgnoreParens() {
@@ -2364,24 +2633,14 @@ bool Expr::isImplicitCXXThis() const {
/// hasAnyTypeDependentArguments - Determines if any of the expressions
/// in Exprs is type-dependent.
-bool Expr::hasAnyTypeDependentArguments(Expr** Exprs, unsigned NumExprs) {
- for (unsigned I = 0; I < NumExprs; ++I)
+bool Expr::hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs) {
+ for (unsigned I = 0; I < Exprs.size(); ++I)
if (Exprs[I]->isTypeDependent())
return true;
return false;
}
-/// hasAnyValueDependentArguments - Determines if any of the expressions
-/// in Exprs is value-dependent.
-bool Expr::hasAnyValueDependentArguments(Expr** Exprs, unsigned NumExprs) {
- for (unsigned I = 0; I < NumExprs; ++I)
- if (Exprs[I]->isValueDependent())
- return true;
-
- return false;
-}
-
bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
// This function is attempting whether an expression is an initializer
// which can be evaluated at compile-time. isEvaluatable handles most
@@ -2399,6 +2658,8 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
switch (getStmtClass()) {
default: break;
+ case IntegerLiteralClass:
+ case FloatingLiteralClass:
case StringLiteralClass:
case ObjCStringLiteralClass:
case ObjCEncodeExprClass:
@@ -2408,15 +2669,20 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
// Only if it's
- // 1) an application of the trivial default constructor or
- if (!CE->getConstructor()->isTrivial()) return false;
- if (!CE->getNumArgs()) return true;
+ if (CE->getConstructor()->isTrivial()) {
+ // 1) an application of the trivial default constructor or
+ if (!CE->getNumArgs()) return true;
+
+ // 2) an elidable trivial copy construction of an operand which is
+ // itself a constant initializer. Note that we consider the
+ // operand on its own, *not* as a reference binding.
+ if (CE->isElidable() &&
+ CE->getArg(0)->isConstantInitializer(Ctx, false))
+ return true;
+ }
- // 2) an elidable trivial copy construction of an operand which is
- // itself a constant initializer. Note that we consider the
- // operand on its own, *not* as a reference binding.
- return CE->isElidable() &&
- CE->getArg(0)->isConstantInitializer(Ctx, false);
+ // 3) a foldable constexpr constructor.
+ break;
}
case CompoundLiteralExprClass: {
// This handles gcc's extension that allows global initializers like
@@ -2456,36 +2722,38 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
return Exp->getSubExpr()->isConstantInitializer(Ctx, false);
break;
}
- case BinaryOperatorClass: {
- // Special case &&foo - &&bar. It would be nice to generalize this somehow
- // but this handles the common case.
- const BinaryOperator *Exp = cast<BinaryOperator>(this);
- if (Exp->getOpcode() == BO_Sub &&
- isa<AddrLabelExpr>(Exp->getLHS()->IgnoreParenNoopCasts(Ctx)) &&
- isa<AddrLabelExpr>(Exp->getRHS()->IgnoreParenNoopCasts(Ctx)))
- return true;
- break;
- }
case CXXFunctionalCastExprClass:
case CXXStaticCastExprClass:
case ImplicitCastExprClass:
- case CStyleCastExprClass:
- // Handle casts with a destination that's a struct or union; this
- // deals with both the gcc no-op struct cast extension and the
- // cast-to-union extension.
- if (getType()->isRecordType())
- return cast<CastExpr>(this)->getSubExpr()
- ->isConstantInitializer(Ctx, false);
-
- // Integer->integer casts can be handled here, which is important for
- // things like (int)(&&x-&&y). Scary but true.
- if (getType()->isIntegerType() &&
- cast<CastExpr>(this)->getSubExpr()->getType()->isIntegerType())
- return cast<CastExpr>(this)->getSubExpr()
- ->isConstantInitializer(Ctx, false);
-
+ case CStyleCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // If we're promoting an integer to an _Atomic type then this is constant
+ // if the integer is constant. We also need to check the converse in case
+ // someone does something like:
+ //
+ // int a = (_Atomic(int))42;
+ //
+ // I doubt anyone would write code like this directly, but it's quite
+ // possible as the result of macro expansions.
+ if (CE->getCastKind() == CK_NonAtomicToAtomic ||
+ CE->getCastKind() == CK_AtomicToNonAtomic)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle bitcasts of vector constants.
+ if (getType()->isVectorType() && CE->getCastKind() == CK_BitCast)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle misc casts we want to ignore.
+ // FIXME: Is it really safe to ignore all these?
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_LValueToRValue ||
+ CE->getCastKind() == CK_ToUnion ||
+ CE->getCastKind() == CK_ConstructorConversion)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
break;
-
+ }
case MaterializeTemporaryExprClass:
return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
->isConstantInitializer(Ctx, false);
@@ -2493,6 +2761,60 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
return isEvaluatable(Ctx);
}
+namespace {
+ /// \brief Look for a call to a non-trivial function within an expression.
+ class NonTrivialCallFinder : public EvaluatedExprVisitor<NonTrivialCallFinder>
+ {
+ typedef EvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
+
+ bool NonTrivial;
+
+ public:
+ explicit NonTrivialCallFinder(ASTContext &Context)
+ : Inherited(Context), NonTrivial(false) { }
+
+ bool hasNonTrivialCall() const { return NonTrivial; }
+
+ void VisitCallExpr(CallExpr *E) {
+ if (CXXMethodDecl *Method
+ = dyn_cast_or_null<CXXMethodDecl>(E->getCalleeDecl())) {
+ if (Method->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ if (E->getTemporary()->getDestructor()->isTrivial()) {
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+ };
+}
+
+bool Expr::hasNonTrivialCall(ASTContext &Ctx) {
+ NonTrivialCallFinder Finder(Ctx);
+ Finder.Visit(this);
+ return Finder.hasNonTrivialCall();
+}
+
/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
/// pointer constant or not, as well as the specific kind of constant detected.
/// Null pointer constants can be integer constant expressions with the
@@ -2518,7 +2840,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
// Strip off a cast to void*, if it exists. Except in C++.
if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
- if (!Ctx.getLangOptions().CPlusPlus) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
// Check that it is a cast to void*.
if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
QualType Pointee = PT->getPointeeType();
@@ -2548,6 +2870,9 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
} else if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(this)) {
return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) {
+ if (const Expr *Source = OVE->getSourceExpr())
+ return Source->isNullPointerConstant(Ctx, NPC);
}
// C++0x nullptr_t is always a null pointer constant.
@@ -2563,15 +2888,22 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
}
// This expression must be an integer type.
if (!getType()->isIntegerType() ||
- (Ctx.getLangOptions().CPlusPlus && getType()->isEnumeralType()))
+ (Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType()))
return NPCK_NotNull;
// If we have an integer constant expression, we need to *evaluate* it and
- // test for the value 0.
- llvm::APSInt Result;
- bool IsNull = isIntegerConstantExpr(Result, Ctx) && Result == 0;
+ // test for the value 0. Don't use the C++11 constant expression semantics
+ // for this, for now; once the dust settles on core issue 903, we might only
+ // allow a literal 0 here in C++11 mode.
+ if (Ctx.getLangOpts().CPlusPlus0x) {
+ if (!isCXX98IntegralConstantExpr(Ctx))
+ return NPCK_NotNull;
+ } else {
+ if (!isIntegerConstantExpr(Ctx))
+ return NPCK_NotNull;
+ }
- return (IsNull ? NPCK_ZeroInteger : NPCK_NotNull);
+ return (EvaluateKnownConstInt(Ctx) == 0) ? NPCK_ZeroInteger : NPCK_NotNull;
}
/// \brief If this expression is an l-value for an Objective C
@@ -2722,7 +3054,8 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc)
+ SourceLocation RBracLoc,
+ bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
/*TypeDependent=*/false, /*ValueDependent=*/false,
/*InstantiationDependent=*/false,
@@ -2730,8 +3063,8 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(IsInstanceSuper? SuperInstance : SuperClass),
- HasMethod(Method != 0), IsDelegateInitCall(false), SuperLoc(SuperLoc),
- LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ SuperLoc(SuperLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(SuperType.getAsOpaquePtr());
@@ -2746,14 +3079,15 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc)
+ SourceLocation RBracLoc,
+ bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
T->isDependentType(), T->isInstantiationDependentType(),
T->containsUnexpandedParameterPack()),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(Class),
- HasMethod(Method != 0), IsDelegateInitCall(false),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
@@ -2769,7 +3103,8 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc)
+ SourceLocation RBracLoc,
+ bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
Receiver->isTypeDependent(),
Receiver->isInstantiationDependent(),
@@ -2777,7 +3112,7 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(Instance),
- HasMethod(Method != 0), IsDelegateInitCall(false),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
@@ -2803,8 +3138,10 @@ void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
}
SelLocsKind = SelLocsK;
- if (SelLocsK == SelLoc_NonStandard)
- std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ if (!isImplicit()) {
+ if (SelLocsK == SelLoc_NonStandard)
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ }
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
@@ -2817,12 +3154,19 @@ ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc) {
- SelectorLocationsKind SelLocsK;
- ObjCMessageExpr *Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
SuperType, Sel, SelLocs, SelLocsK,
- Method, Args, RBracLoc);
+ Method, Args, RBracLoc, isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
@@ -2833,11 +3177,19 @@ ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc) {
- SelectorLocationsKind SelLocsK;
- ObjCMessageExpr *Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
- SelLocs, SelLocsK, Method, Args, RBracLoc);
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
@@ -2848,11 +3200,19 @@ ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
- SourceLocation RBracLoc) {
- SelectorLocationsKind SelLocsK;
- ObjCMessageExpr *Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
- SelLocs, SelLocsK, Method, Args, RBracLoc);
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(ASTContext &Context,
@@ -2902,7 +3262,7 @@ SourceRange ObjCMessageExpr::getReceiverRange() const {
return getSuperLoc();
}
- return SourceLocation();
+ llvm_unreachable("Invalid ReceiverKind!");
}
Selector ObjCMessageExpr::getSelector() const {
@@ -2951,8 +3311,8 @@ StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
case OBC_BridgeRetained:
return "__bridge_retained";
}
-
- return "__bridge";
+
+ llvm_unreachable("Invalid BridgeKind!");
}
bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
@@ -3225,11 +3585,10 @@ void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
Expr **exprs, unsigned nexprs,
- SourceLocation rparenloc, QualType T)
- : Expr(ParenListExprClass, T, VK_RValue, OK_Ordinary,
+ SourceLocation rparenloc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
false, false, false, false),
NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
- assert(!T.isNull() && "ParenListExpr must have a valid type");
Exprs = new (C) Stmt*[nexprs];
for (unsigned i = 0; i != nexprs; ++i) {
if (exprs[i]->isTypeDependent())
@@ -3256,6 +3615,72 @@ const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
return cast<OpaqueValueExpr>(e);
}
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &Context, EmptyShell sh,
+ unsigned numSemanticExprs) {
+ void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + numSemanticExprs) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs)
+ : Expr(PseudoObjectExprClass, shell) {
+ PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1;
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &C, Expr *syntax,
+ ArrayRef<Expr*> semantics,
+ unsigned resultIndex) {
+ assert(syntax && "no syntactic expression!");
+ assert(semantics.size() && "no semantic expressions!");
+
+ QualType type;
+ ExprValueKind VK;
+ if (resultIndex == NoResult) {
+ type = C.VoidTy;
+ VK = VK_RValue;
+ } else {
+ assert(resultIndex < semantics.size());
+ type = semantics[resultIndex]->getType();
+ VK = semantics[resultIndex]->getValueKind();
+ assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
+ }
+
+ void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + semantics.size()) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
+ resultIndex);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntax, ArrayRef<Expr*> semantics,
+ unsigned resultIndex)
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
+ /*filled in at end of ctor*/ false, false, false, false) {
+ PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
+ PseudoObjectExprBits.ResultIndex = resultIndex + 1;
+
+ for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) {
+ Expr *E = (i == 0 ? syntax : semantics[i-1]);
+ getSubExprsBuffer()[i] = E;
+
+ if (E->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (E->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (E->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (E->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ if (isa<OpaqueValueExpr>(E))
+ assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != 0 &&
+ "opaque-value semantic expressions for pseudo-object "
+ "operations must have sources");
+ }
+}
+
//===----------------------------------------------------------------------===//
// ExprIterator.
//===----------------------------------------------------------------------===//
@@ -3298,24 +3723,117 @@ Stmt::child_range ObjCMessageExpr::children() {
reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
}
-// Blocks
-BlockDeclRefExpr::BlockDeclRefExpr(VarDecl *d, QualType t, ExprValueKind VK,
- SourceLocation l, bool ByRef,
- bool constAdded)
- : Expr(BlockDeclRefExprClass, t, VK, OK_Ordinary, false, false, false,
- d->isParameterPack()),
- D(d), Loc(l), IsByRef(ByRef), ConstQualAdded(constAdded)
+ObjCArrayLiteral::ObjCArrayLiteral(llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl *Method,
+ SourceRange SR)
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method)
{
- bool TypeDependent = false;
- bool ValueDependent = false;
- bool InstantiationDependent = false;
- computeDeclRefDependence(D, getType(), TypeDependent, ValueDependent,
- InstantiationDependent);
- ExprBits.TypeDependent = TypeDependent;
- ExprBits.ValueDependent = ValueDependent;
- ExprBits.InstantiationDependent = InstantiationDependent;
+ Expr **SaveElements = getElements();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Elements[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Elements[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SaveElements[I] = Elements[I];
+ }
}
+ObjCArrayLiteral *ObjCArrayLiteral::Create(ASTContext &C,
+ llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR) {
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + Elements.size() * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(ASTContext &C,
+ unsigned NumElements) {
+
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + NumElements * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
+}
+
+ObjCDictionaryLiteral::ObjCDictionaryLiteral(
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR)
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
+ DictWithObjectsMethod(method)
+{
+ KeyValuePair *KeyValues = getKeyValues();
+ ExpansionData *Expansions = getExpansionData();
+ for (unsigned I = 0; I < NumElements; I++) {
+ if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
+ VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (VK[I].Key->isInstantiationDependent() ||
+ VK[I].Value->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (VK[I].EllipsisLoc.isInvalid() &&
+ (VK[I].Key->containsUnexpandedParameterPack() ||
+ VK[I].Value->containsUnexpandedParameterPack()))
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ KeyValues[I].Key = VK[I].Key;
+ KeyValues[I].Value = VK[I].Value;
+ if (Expansions) {
+ Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
+ if (VK[I].NumExpansions)
+ Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
+ else
+ Expansions[I].NumExpansionsPlusOne = 0;
+ }
+ }
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::Create(ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * VK.size();
+
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::CreateEmpty(ASTContext &C, unsigned NumElements,
+ bool HasPackExpansions) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * NumElements;
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * NumElements + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements,
+ HasPackExpansions);
+}
+
+ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(ASTContext &C,
+ Expr *base,
+ Expr *key, QualType T,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod,
+ SourceLocation RB) {
+ void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
+ return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue,
+ OK_ObjCSubscript,
+ getMethod, setMethod, RB);
+}
AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
QualType t, AtomicOp op, SourceLocation RP)
@@ -3323,6 +3841,7 @@ AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
false, false, false, false),
NumSubExprs(nexpr), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
{
+ assert(nexpr == getNumSubExprs(op) && "wrong number of subexpressions");
for (unsigned i = 0; i < nexpr; i++) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
@@ -3336,3 +3855,49 @@ AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
SubExprs[i] = args[i];
}
}
+
+unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
+ switch (Op) {
+ case AO__c11_atomic_init:
+ case AO__c11_atomic_load:
+ case AO__atomic_load_n:
+ return 2;
+
+ case AO__c11_atomic_store:
+ case AO__c11_atomic_exchange:
+ case AO__atomic_load:
+ case AO__atomic_store:
+ case AO__atomic_store_n:
+ case AO__atomic_exchange_n:
+ case AO__c11_atomic_fetch_add:
+ case AO__c11_atomic_fetch_sub:
+ case AO__c11_atomic_fetch_and:
+ case AO__c11_atomic_fetch_or:
+ case AO__c11_atomic_fetch_xor:
+ case AO__atomic_fetch_add:
+ case AO__atomic_fetch_sub:
+ case AO__atomic_fetch_and:
+ case AO__atomic_fetch_or:
+ case AO__atomic_fetch_xor:
+ case AO__atomic_fetch_nand:
+ case AO__atomic_add_fetch:
+ case AO__atomic_sub_fetch:
+ case AO__atomic_and_fetch:
+ case AO__atomic_or_fetch:
+ case AO__atomic_xor_fetch:
+ case AO__atomic_nand_fetch:
+ return 3;
+
+ case AO__atomic_exchange:
+ return 4;
+
+ case AO__c11_atomic_compare_exchange_strong:
+ case AO__c11_atomic_compare_exchange_weak:
+ return 5;
+
+ case AO__atomic_compare_exchange:
+ case AO__atomic_compare_exchange_n:
+ return 6;
+ }
+ llvm_unreachable("unknown atomic op");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
index ad5ec8b..8cf519c 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -45,30 +45,26 @@ SourceRange CXXScalarValueInitExpr::getSourceRange() const {
// CXXNewExpr
CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
- Expr **placementArgs, unsigned numPlaceArgs,
- SourceRange TypeIdParens, Expr *arraySize,
- CXXConstructorDecl *constructor, bool initializer,
- Expr **constructorArgs, unsigned numConsArgs,
- bool HadMultipleCandidates,
FunctionDecl *operatorDelete,
- bool usualArrayDeleteWantsSize, QualType ty,
- TypeSourceInfo *AllocatedTypeInfo,
- SourceLocation startLoc, SourceLocation endLoc,
- SourceLocation constructorLParen,
- SourceLocation constructorRParen)
+ bool usualArrayDeleteWantsSize,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle,
+ Expr *initializer, QualType ty,
+ TypeSourceInfo *allocatedTypeInfo,
+ SourceLocation startLoc, SourceRange directInitRange)
: Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
ty->isDependentType(), ty->isDependentType(),
ty->isInstantiationDependentType(),
ty->containsUnexpandedParameterPack()),
- GlobalNew(globalNew), Initializer(initializer),
- UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize),
- HadMultipleCandidates(HadMultipleCandidates),
- SubExprs(0), OperatorNew(operatorNew),
- OperatorDelete(operatorDelete), Constructor(constructor),
- AllocatedTypeInfo(AllocatedTypeInfo), TypeIdParens(TypeIdParens),
- StartLoc(startLoc), EndLoc(endLoc), ConstructorLParen(constructorLParen),
- ConstructorRParen(constructorRParen) {
- AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
+ SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
+ AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
+ StartLoc(startLoc), DirectInitRange(directInitRange),
+ GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
+ assert((initializer != 0 || initializationStyle == NoInit) &&
+ "Only NoInit can have no initializer.");
+ StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
+ AllocateArgsArray(C, arraySize != 0, numPlaceArgs, initializer != 0);
unsigned i = 0;
if (Array) {
if (arraySize->isInstantiationDependent())
@@ -80,33 +76,33 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SubExprs[i++] = arraySize;
}
- for (unsigned j = 0; j < NumPlacementArgs; ++j) {
- if (placementArgs[j]->isInstantiationDependent())
+ if (initializer) {
+ if (initializer->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
- if (placementArgs[j]->containsUnexpandedParameterPack())
+
+ if (initializer->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- SubExprs[i++] = placementArgs[j];
+ SubExprs[i++] = initializer;
}
- for (unsigned j = 0; j < NumConstructorArgs; ++j) {
- if (constructorArgs[j]->isInstantiationDependent())
+ for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ if (placementArgs[j]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
- if (constructorArgs[j]->containsUnexpandedParameterPack())
+ if (placementArgs[j]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- SubExprs[i++] = constructorArgs[j];
+ SubExprs[i++] = placementArgs[j];
}
}
void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
- unsigned numPlaceArgs, unsigned numConsArgs){
+ unsigned numPlaceArgs, bool hasInitializer){
assert(SubExprs == 0 && "SubExprs already allocated");
Array = isArray;
NumPlacementArgs = numPlaceArgs;
- NumConstructorArgs = numConsArgs;
-
- unsigned TotalSize = Array + NumPlacementArgs + NumConstructorArgs;
+
+ unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
SubExprs = new (C) Stmt*[TotalSize];
}
@@ -115,6 +111,18 @@ bool CXXNewExpr::shouldNullCheckAllocation(ASTContext &Ctx) const {
castAs<FunctionProtoType>()->isNothrow(Ctx);
}
+SourceLocation CXXNewExpr::getEndLoc() const {
+ switch (getInitializationStyle()) {
+ case NoInit:
+ return AllocatedTypeInfo->getTypeLoc().getEndLoc();
+ case CallInit:
+ return DirectInitRange.getEnd();
+ case ListInit:
+ return getInitializer()->getSourceRange().getEnd();
+ }
+ llvm_unreachable("bogus initialization style");
+}
+
// CXXDeleteExpr
QualType CXXDeleteExpr::getDestroyedType() const {
const Expr *Arg = getArgument();
@@ -191,40 +199,45 @@ SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
return SourceRange(Base->getLocStart(), End);
}
-
// UnresolvedLookupExpr
UnresolvedLookupExpr *
UnresolvedLookupExpr::Create(ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool ADL,
- const TemplateArgumentListInfo &Args,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End)
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
{
- void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
- ASTTemplateArgumentListInfo::sizeFor(Args));
- return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc, NameInfo,
- ADL, /*Overload*/ true, &Args,
+ assert(Args || TemplateKWLoc.isValid());
+ unsigned num_args = Args ? Args->size() : 0;
+ void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
+ ASTTemplateKWAndArgsInfo::sizeFor(num_args));
+ return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ TemplateKWLoc, NameInfo,
+ ADL, /*Overload*/ true, Args,
Begin, End, /*StdIsAssociated=*/false);
}
UnresolvedLookupExpr *
-UnresolvedLookupExpr::CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedLookupExpr);
- if (HasExplicitTemplateArgs)
- size += ASTTemplateArgumentListInfo::sizeFor(NumTemplateArgs);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
- E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
@@ -243,8 +256,9 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()))),
- Results(0), NumResults(End - Begin), NameInfo(NameInfo),
- QualifierLoc(QualifierLoc), HasExplicitTemplateArgs(TemplateArgs != 0)
+ NameInfo(NameInfo), QualifierLoc(QualifierLoc),
+ Results(0), NumResults(End - Begin),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid())
{
NumResults = End - Begin;
if (NumResults) {
@@ -271,9 +285,10 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
if (Dependent) {
ExprBits.TypeDependent = true;
@@ -283,6 +298,8 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
ExprBits.InstantiationDependent = true;
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
if (isTypeDependent())
@@ -314,6 +331,7 @@ CXXRecordDecl *OverloadExpr::getNamingClass() const {
// DependentScopeDeclRefExpr
DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args)
: Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
@@ -326,47 +344,52 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()))),
QualifierLoc(QualifierLoc), NameInfo(NameInfo),
- HasExplicitTemplateArgs(Args != 0)
+ HasTemplateKWAndArgsInfo(Args != 0 || TemplateKWLoc.isValid())
{
if (Args) {
bool Dependent = true;
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack
= ExprBits.ContainsUnexpandedParameterPack;
-
- reinterpret_cast<ASTTemplateArgumentListInfo*>(this+1)
- ->initializeFrom(*Args, Dependent, InstantiationDependent,
- ContainsUnexpandedParameterPack);
-
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
}
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::Create(ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args) {
std::size_t size = sizeof(DependentScopeDeclRefExpr);
if (Args)
- size += ASTTemplateArgumentListInfo::sizeFor(*Args);
+ size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(size);
- return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
- NameInfo, Args);
+ return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
+ TemplateKWLoc, NameInfo, Args);
}
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(DependentScopeDeclRefExpr);
- if (HasExplicitTemplateArgs)
- size += ASTTemplateArgumentListInfo::sizeFor(NumTemplateArgs);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size);
- DependentScopeDeclRefExpr *E
+ DependentScopeDeclRefExpr *E
= new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
+ SourceLocation(),
DeclarationNameInfo(), 0);
- E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -601,6 +624,39 @@ CXXFunctionalCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
}
+UserDefinedLiteral::LiteralOperatorKind
+UserDefinedLiteral::getLiteralOperatorKind() const {
+ if (getNumArgs() == 0)
+ return LOK_Template;
+ if (getNumArgs() == 2)
+ return LOK_String;
+
+ assert(getNumArgs() == 1 && "unexpected #args in literal operator call");
+ QualType ParamTy =
+ cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType();
+ if (ParamTy->isPointerType())
+ return LOK_Raw;
+ if (ParamTy->isAnyCharacterType())
+ return LOK_Character;
+ if (ParamTy->isIntegerType())
+ return LOK_Integer;
+ if (ParamTy->isFloatingType())
+ return LOK_Floating;
+
+ llvm_unreachable("unknown kind of literal operator");
+}
+
+Expr *UserDefinedLiteral::getCookedLiteral() {
+#ifndef NDEBUG
+ LiteralOperatorKind LOK = getLiteralOperatorKind();
+ assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal");
+#endif
+ return getArg(0);
+}
+
+const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
+ return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
+}
CXXDefaultArgExpr *
CXXDefaultArgExpr::Create(ASTContext &C, SourceLocation Loc,
@@ -618,8 +674,9 @@ CXXTemporary *CXXTemporary::Create(ASTContext &C,
CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
CXXTemporary *Temp,
Expr* SubExpr) {
- assert(SubExpr->getType()->isRecordType() &&
- "Expression bound to a temporary must have record type!");
+ assert((SubExpr->getType()->isRecordType() ||
+ SubExpr->getType()->isArrayType()) &&
+ "Expression bound to a temporary must have record or array type!");
return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
}
@@ -636,7 +693,7 @@ CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
Type->getType().getNonReferenceType(),
Type->getTypeLoc().getBeginLoc(),
Cons, false, Args, NumArgs,
- HadMultipleCandidates, ZeroInitialization,
+ HadMultipleCandidates, /*FIXME*/false, ZeroInitialization,
CXXConstructExpr::CK_Complete, parenRange),
Type(Type) {
}
@@ -651,13 +708,15 @@ CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
CXXConstructorDecl *D, bool Elidable,
Expr **Args, unsigned NumArgs,
bool HadMultipleCandidates,
+ bool ListInitialization,
bool ZeroInitialization,
ConstructionKind ConstructKind,
SourceRange ParenRange) {
return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
Elidable, Args, NumArgs,
- HadMultipleCandidates, ZeroInitialization,
- ConstructKind, ParenRange);
+ HadMultipleCandidates, ListInitialization,
+ ZeroInitialization, ConstructKind,
+ ParenRange);
}
CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
@@ -665,6 +724,7 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
CXXConstructorDecl *D, bool elidable,
Expr **args, unsigned numargs,
bool HadMultipleCandidates,
+ bool ListInitialization,
bool ZeroInitialization,
ConstructionKind ConstructKind,
SourceRange ParenRange)
@@ -674,6 +734,7 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
T->containsUnexpandedParameterPack()),
Constructor(D), Loc(Loc), ParenRange(ParenRange), NumArgs(numargs),
Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
+ ListInitialization(ListInitialization),
ZeroInitialization(ZeroInitialization),
ConstructKind(ConstructKind), Args(0)
{
@@ -695,35 +756,228 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
}
}
-ExprWithCleanups::ExprWithCleanups(ASTContext &C,
- Expr *subexpr,
- CXXTemporary **temps,
- unsigned numtemps)
+LambdaExpr::Capture::Capture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var,
+ SourceLocation EllipsisLoc)
+ : VarAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc)
+{
+ unsigned Bits = 0;
+ if (Implicit)
+ Bits |= Capture_Implicit;
+
+ switch (Kind) {
+ case LCK_This:
+ assert(Var == 0 && "'this' capture cannot have a variable!");
+ break;
+
+ case LCK_ByCopy:
+ Bits |= Capture_ByCopy;
+ // Fall through
+ case LCK_ByRef:
+ assert(Var && "capture must have a variable!");
+ break;
+ }
+ VarAndBits.setInt(Bits);
+}
+
+LambdaCaptureKind LambdaExpr::Capture::getCaptureKind() const {
+ if (capturesThis())
+ return LCK_This;
+
+ return (VarAndBits.getInt() & Capture_ByCopy)? LCK_ByCopy : LCK_ByRef;
+}
+
+LambdaExpr::LambdaExpr(QualType T,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace)
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(), T->isDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ IntroducerRange(IntroducerRange),
+ NumCaptures(Captures.size()),
+ CaptureDefault(CaptureDefault),
+ ExplicitParams(ExplicitParams),
+ ExplicitResultType(ExplicitResultType),
+ ClosingBrace(ClosingBrace)
+{
+ assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ CXXRecordDecl *Class = getLambdaClass();
+ CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
+
+ // FIXME: Propagate "has unexpanded parameter pack" bit.
+
+ // Copy captures.
+ ASTContext &Context = Class->getASTContext();
+ Data.NumCaptures = NumCaptures;
+ Data.NumExplicitCaptures = 0;
+ Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
+ Capture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ // Copy initialization expressions for the non-static data members.
+ Stmt **Stored = getStoredStmts();
+ for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I)
+ *Stored++ = CaptureInits[I];
+
+ // Copy the body of the lambda.
+ *Stored++ = getCallOperator()->getBody();
+
+ // Copy the array index variables, if any.
+ HasArrayIndexVars = !ArrayIndexVars.empty();
+ if (HasArrayIndexVars) {
+ assert(ArrayIndexStarts.size() == NumCaptures);
+ memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
+ sizeof(VarDecl *) * ArrayIndexVars.size());
+ memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
+ sizeof(unsigned) * Captures.size());
+ getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
+ }
+}
+
+LambdaExpr *LambdaExpr::Create(ASTContext &Context,
+ CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace) {
+ // Determine the type of the expression (i.e., the type of the
+ // function object we're creating).
+ QualType T = Context.getTypeDeclType(Class);
+
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
+ if (!ArrayIndexVars.empty())
+ Size += sizeof(VarDecl *) * ArrayIndexVars.size()
+ + sizeof(unsigned) * (Captures.size() + 1);
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) LambdaExpr(T, IntroducerRange, CaptureDefault,
+ Captures, ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars, ArrayIndexStarts,
+ ClosingBrace);
+}
+
+LambdaExpr *LambdaExpr::CreateDeserialized(ASTContext &C, unsigned NumCaptures,
+ unsigned NumArrayIndexVars) {
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
+ if (NumArrayIndexVars)
+ Size += sizeof(VarDecl) * NumArrayIndexVars
+ + sizeof(unsigned) * (NumCaptures + 1);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
+ return getLambdaClass()->getLambdaData().Captures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
+ return capture_begin() + NumCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
+ return capture_begin();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
+ struct CXXRecordDecl::LambdaDefinitionData &Data
+ = getLambdaClass()->getLambdaData();
+ return Data.Captures + Data.NumExplicitCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const {
+ return explicit_capture_end();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const {
+ return capture_end();
+}
+
+ArrayRef<VarDecl *>
+LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
+ assert(HasArrayIndexVars && "No array index-var data?");
+
+ unsigned Index = Iter - capture_init_begin();
+ assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
+ "Capture index out-of-range");
+ VarDecl **IndexVars = getArrayIndexVars();
+ unsigned *IndexStarts = getArrayIndexStarts();
+ return ArrayRef<VarDecl *>(IndexVars + IndexStarts[Index],
+ IndexVars + IndexStarts[Index + 1]);
+}
+
+CXXRecordDecl *LambdaExpr::getLambdaClass() const {
+ return getType()->getAsCXXRecordDecl();
+}
+
+CXXMethodDecl *LambdaExpr::getCallOperator() const {
+ CXXRecordDecl *Record = getLambdaClass();
+ DeclarationName Name
+ = Record->getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_result Calls = Record->lookup(Name);
+ assert(Calls.first != Calls.second && "Missing lambda call operator!");
+ CXXMethodDecl *Result = cast<CXXMethodDecl>(*Calls.first++);
+ assert(Calls.first == Calls.second && "More than lambda one call operator?");
+ return Result;
+}
+
+CompoundStmt *LambdaExpr::getBody() const {
+ if (!getStoredStmts()[NumCaptures])
+ getStoredStmts()[NumCaptures] = getCallOperator()->getBody();
+
+ return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+}
+
+bool LambdaExpr::isMutable() const {
+ return (getCallOperator()->getTypeQualifiers() & Qualifiers::Const) == 0;
+}
+
+ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
+ ArrayRef<CleanupObject> objects)
: Expr(ExprWithCleanupsClass, subexpr->getType(),
subexpr->getValueKind(), subexpr->getObjectKind(),
subexpr->isTypeDependent(), subexpr->isValueDependent(),
subexpr->isInstantiationDependent(),
subexpr->containsUnexpandedParameterPack()),
- SubExpr(subexpr), Temps(0), NumTemps(0) {
- if (numtemps) {
- setNumTemporaries(C, numtemps);
- for (unsigned i = 0; i != numtemps; ++i)
- Temps[i] = temps[i];
- }
+ SubExpr(subexpr) {
+ ExprWithCleanupsBits.NumObjects = objects.size();
+ for (unsigned i = 0, e = objects.size(); i != e; ++i)
+ getObjectsBuffer()[i] = objects[i];
}
-void ExprWithCleanups::setNumTemporaries(ASTContext &C, unsigned N) {
- assert(Temps == 0 && "Cannot resize with this");
- NumTemps = N;
- Temps = new (C) CXXTemporary*[NumTemps];
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects) {
+ size_t size = sizeof(ExprWithCleanups)
+ + objects.size() * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(subexpr, objects);
}
+ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
+ : Expr(ExprWithCleanupsClass, empty) {
+ ExprWithCleanupsBits.NumObjects = numObjects;
+}
-ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C,
- Expr *SubExpr,
- CXXTemporary **Temps,
- unsigned NumTemps) {
- return new (C) ExprWithCleanups(C, SubExpr, Temps, NumTemps);
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, EmptyShell empty,
+ unsigned numObjects) {
+ size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(empty, numObjects);
}
CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
@@ -781,7 +1035,8 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
Expr *Base, QualType BaseType,
bool IsArrow,
SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
@@ -793,7 +1048,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
->containsUnexpandedParameterPack()) ||
MemberNameInfo.containsUnexpandedParameterPack())),
Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasExplicitTemplateArgs(TemplateArgs != 0),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid()),
OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
FirstQualifierFoundInScope(FirstQualifierFoundInScope),
MemberNameInfo(MemberNameInfo) {
@@ -801,11 +1056,14 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
bool Dependent = true;
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack = false;
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
- InstantiationDependent,
- ContainsUnexpandedParameterPack);
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
}
@@ -824,8 +1082,8 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
containsUnexpandedParameterPack()) ||
MemberNameInfo.containsUnexpandedParameterPack())),
Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasExplicitTemplateArgs(false), OperatorLoc(OperatorLoc),
- QualifierLoc(QualifierLoc),
+ HasTemplateKWAndArgsInfo(false),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
FirstQualifierFoundInScope(FirstQualifierFoundInScope),
MemberNameInfo(MemberNameInfo) { }
@@ -834,47 +1092,50 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
- if (!TemplateArgs)
+ if (!TemplateArgs && !TemplateKWLoc.isValid())
return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
FirstQualifierFoundInScope,
MemberNameInfo);
- std::size_t size = sizeof(CXXDependentScopeMemberExpr);
- if (TemplateArgs)
- size += ASTTemplateArgumentListInfo::sizeFor(*TemplateArgs);
+ unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr)
+ + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
+ TemplateKWLoc,
FirstQualifierFoundInScope,
MemberNameInfo, TemplateArgs);
}
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
- bool HasExplicitTemplateArgs,
+ bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
- if (!HasExplicitTemplateArgs)
+ if (!HasTemplateKWAndArgsInfo)
return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
- 0, SourceLocation(),
+ 0, SourceLocation(),
NestedNameSpecifierLoc(), 0,
DeclarationNameInfo());
std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
- ASTTemplateArgumentListInfo::sizeFor(NumTemplateArgs);
+ ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
CXXDependentScopeMemberExpr *E
= new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
- 0, SourceLocation(),
- NestedNameSpecifierLoc(), 0,
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0,
DeclarationNameInfo(), 0);
- E->HasExplicitTemplateArgs = true;
+ E->HasTemplateKWAndArgsInfo = true;
return E;
}
@@ -913,12 +1174,13 @@ UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C,
bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
- : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, MemberNameInfo,
- TemplateArgs, Begin, End,
+ : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End,
// Dependent
((Base && Base->isTypeDependent()) ||
BaseType->isDependentType()),
@@ -949,31 +1211,34 @@ UnresolvedMemberExpr::Create(ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End) {
std::size_t size = sizeof(UnresolvedMemberExpr);
if (TemplateArgs)
- size += ASTTemplateArgumentListInfo::sizeFor(*TemplateArgs);
+ size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
return new (Mem) UnresolvedMemberExpr(C,
HasUnresolvedUsing, Base, BaseType,
- IsArrow, OperatorLoc, QualifierLoc,
+ IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
MemberNameInfo, TemplateArgs, Begin, End);
}
UnresolvedMemberExpr *
-UnresolvedMemberExpr::CreateEmpty(ASTContext &C, bool HasExplicitTemplateArgs,
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedMemberExpr);
- if (HasExplicitTemplateArgs)
- size += ASTTemplateArgumentListInfo::sizeFor(NumTemplateArgs);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
- E->HasExplicitTemplateArgs = HasExplicitTemplateArgs;
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
@@ -1020,4 +1285,51 @@ TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(Arguments, NumArguments);
}
+TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(Loc), RParenLoc(RParenLoc)
+{
+ TypeTraitExprBits.Kind = Kind;
+ TypeTraitExprBits.Value = Value;
+ TypeTraitExprBits.NumArgs = Args.size();
+
+ TypeSourceInfo **ToArgs = getTypeSourceInfos();
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType())
+ setValueDependent(true);
+ if (Args[I]->getType()->isInstantiationDependentType())
+ setInstantiationDependent(true);
+ if (Args[I]->getType()->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack(true);
+
+ ToArgs[I] = Args[I];
+ }
+}
+
+TypeTraitExpr *TypeTraitExpr::Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
+}
+
+TypeTraitExpr *TypeTraitExpr::CreateDeserialized(ASTContext &C,
+ unsigned NumArgs) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(EmptyShell());
+}
+void ArrayTypeTraitExpr::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
index 49c6821..b091e19 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -47,7 +47,6 @@ static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
return Cl::CL_XValue;
}
llvm_unreachable("Invalid value category of implicit cast.");
- return Cl::CL_PRValue;
}
Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
@@ -56,7 +55,7 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
Cl::Kinds kind = ClassifyInternal(Ctx, this);
// C99 6.3.2.1: An lvalue is an expression with an object type or an
// incomplete type other than void.
- if (!Ctx.getLangOptions().CPlusPlus) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
// Thus, no functions.
if (TR->isFunctionType() || TR == Ctx.OverloadTy)
kind = Cl::CL_Function;
@@ -90,17 +89,17 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// This function takes the first stab at classifying expressions.
- const LangOptions &Lang = Ctx.getLangOptions();
+ const LangOptions &Lang = Ctx.getLangOpts();
switch (E->getStmtClass()) {
- // First come the expressions that are always lvalues, unconditionally.
case Stmt::NoStmtClass:
#define ABSTRACT_STMT(Kind)
#define STMT(Kind, Base) case Expr::Kind##Class:
#define EXPR(Kind, Base)
#include "clang/AST/StmtNodes.inc"
llvm_unreachable("cannot classify a statement");
- break;
+
+ // First come the expressions that are always lvalues, unconditionally.
case Expr::ObjCIsaExprClass:
// C++ [expr.prim.general]p1: A string literal is an lvalue.
case Expr::StringLiteralClass:
@@ -109,6 +108,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// __func__ and friends are too.
case Expr::PredefinedExprClass:
// Property references are lvalues
+ case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCPropertyRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
@@ -122,10 +122,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// FIXME: ObjC++0x might have different rules
case Expr::ObjCIvarRefExprClass:
return Cl::CL_LValue;
+
// C99 6.5.2.5p5 says that compound literals are lvalues.
// In C++, they're class temporaries.
case Expr::CompoundLiteralExprClass:
- return Ctx.getLangOptions().CPlusPlus? Cl::CL_ClassTemporary
+ return Ctx.getLangOpts().CPlusPlus? Cl::CL_ClassTemporary
: Cl::CL_LValue;
// Expressions that are prvalues.
@@ -151,13 +152,17 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXScalarValueInitExprClass:
case Expr::UnaryTypeTraitExprClass:
case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
case Expr::ParenListExprClass:
- case Expr::InitListExprClass:
case Expr::SizeOfPackExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
case Expr::AsTypeExprClass:
@@ -184,9 +189,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl())
? Cl::CL_PRValue : Cl::CL_LValue;
return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
- // We deal with names referenced from blocks the same way.
- case Expr::BlockDeclRefExprClass:
- return ClassifyDecl(Ctx, cast<BlockDeclRefExpr>(E)->getDecl());
// Member access is complex.
case Expr::MemberExprClass:
@@ -229,21 +231,24 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
}
case Expr::OpaqueValueExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // Pseudo-object expressions can produce l-values with reference magic.
+ case Expr::PseudoObjectExprClass:
return ClassifyExprValueKind(Lang, E,
- cast<OpaqueValueExpr>(E)->getValueKind());
+ cast<PseudoObjectExpr>(E)->getValueKind());
// Implicit casts are lvalues if they're lvalue casts. Other than that, we
// only specifically record class temporaries.
case Expr::ImplicitCastExprClass:
- return ClassifyExprValueKind(Lang, E,
- cast<ImplicitCastExpr>(E)->getValueKind());
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
// C++ [expr.prim.general]p4: The presence of parentheses does not affect
// whether the expression is an lvalue.
case Expr::ParenExprClass:
return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
- // C1X 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
+ // C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
// or a void expression if its result expression is, respectively, an
// lvalue, a function designator, or a void expression.
case Expr::GenericSelectionExprClass:
@@ -261,6 +266,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass:
case Expr::CXXMemberCallExprClass:
+ case Expr::UserDefinedLiteralClass:
case Expr::CUDAKernelCallExprClass:
return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
@@ -328,35 +334,46 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// Some C++ expressions are always class temporaries.
case Expr::CXXConstructExprClass:
case Expr::CXXTemporaryObjectExprClass:
+ case Expr::LambdaExprClass:
return Cl::CL_ClassTemporary;
case Expr::VAArgExprClass:
return ClassifyUnnamed(Ctx, E->getType());
-
+
case Expr::DesignatedInitExprClass:
return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
-
+
case Expr::StmtExprClass: {
const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
return ClassifyUnnamed(Ctx, LastExpr->getType());
return Cl::CL_PRValue;
}
-
+
case Expr::CXXUuidofExprClass:
return Cl::CL_LValue;
-
+
case Expr::PackExpansionExprClass:
return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
-
+
case Expr::MaterializeTemporaryExprClass:
return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
? Cl::CL_LValue
: Cl::CL_XValue;
+
+ case Expr::InitListExprClass:
+ // An init list can be an lvalue if it is bound to a reference and
+ // contains only one element. In that case, we look at that element
+ // for an exact classification. Init list creation takes care of the
+ // value kind for us, so we only need to fine-tune.
+ if (E->isRValue())
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only 1-element init lists can be glvalues.");
+ return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
}
-
+
llvm_unreachable("unhandled expression kind in classification");
- return Cl::CL_LValue;
}
/// ClassifyDecl - Return the classification of an expression referencing the
@@ -379,7 +396,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
- (Ctx.getLangOptions().CPlusPlus &&
+ (Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
@@ -390,7 +407,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
/// calls and casts.
static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
// In C, function calls are always rvalues.
- if (!Ctx.getLangOptions().CPlusPlus) return Cl::CL_PRValue;
+ if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue;
// C++ [expr.call]p10: A function call is an lvalue if the result type is an
// lvalue reference type or an rvalue reference to function type, an xvalue
@@ -411,7 +428,7 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
? Cl::CL_PRValue : Cl::CL_LValue);
// Handle C first, it's easier.
- if (!Ctx.getLangOptions().CPlusPlus) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
// C99 6.5.2.3p3
// For dot access, the expression is an lvalue if the first part is. For
// arrow access, it always is an lvalue.
@@ -463,7 +480,7 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
}
static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
- assert(Ctx.getLangOptions().CPlusPlus &&
+ assert(Ctx.getLangOpts().CPlusPlus &&
"This is only relevant for C++.");
// C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
// Except we override this for writes to ObjC properties.
@@ -480,14 +497,16 @@ static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
// is a pointer to a data member is of the same value category as its first
// operand.
if (E->getOpcode() == BO_PtrMemD)
- return (E->getType()->isFunctionType() || E->getType() == Ctx.BoundMemberTy)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
? Cl::CL_MemberFunction
: ClassifyInternal(Ctx, E->getLHS());
// C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
// second operand is a pointer to data member and a prvalue otherwise.
if (E->getOpcode() == BO_PtrMemI)
- return (E->getType()->isFunctionType() || E->getType() == Ctx.BoundMemberTy)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
? Cl::CL_MemberFunction
: Cl::CL_LValue;
@@ -497,7 +516,7 @@ static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
const Expr *False) {
- assert(Ctx.getLangOptions().CPlusPlus &&
+ assert(Ctx.getLangOpts().CPlusPlus &&
"This is only relevant for C++.");
// C++ [expr.cond]p2
@@ -536,18 +555,9 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// This is the lvalue case.
// Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
- if (Ctx.getLangOptions().CPlusPlus && E->getType()->isFunctionType())
+ if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType())
return Cl::CM_Function;
- // You cannot assign to a variable outside a block from within the block if
- // it is not marked __block, e.g.
- // void takeclosure(void (^C)(void));
- // void func() { int x = 1; takeclosure(^{ x = 7; }); }
- if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(E)) {
- if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
- return Cl::CM_NotBlockQualified;
- }
-
// Assignment to a property in ObjC is an implicit setter access. But a
// setter might not exist.
if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
@@ -559,6 +569,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Const stuff is obviously not modifiable.
if (CT.isConstQualified())
return Cl::CM_ConstQualified;
+
// Arrays are not modifiable, only their elements are.
if (CT->isArrayType())
return Cl::CM_ArrayType;
@@ -569,7 +580,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Records with any const fields (recursively) are not modifiable.
if (const RecordType *R = CT->getAs<RecordType>()) {
assert((E->getObjectKind() == OK_ObjCProperty ||
- !Ctx.getLangOptions().CPlusPlus) &&
+ !Ctx.getLangOpts().CPlusPlus) &&
"C++ struct assignment should be resolved by the "
"copy assignment operator.");
if (R->hasConstFields())
@@ -624,7 +635,6 @@ Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CM_Function: return MLV_NotObjectType;
case Cl::CM_LValueCast:
llvm_unreachable("CM_LValueCast and CL_LValue don't match");
- case Cl::CM_NotBlockQualified: return MLV_NotBlockQualified;
case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
case Cl::CM_ConstQualified: return MLV_ConstQualified;
case Cl::CM_ArrayType: return MLV_ArrayType;
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
index df75bc8..01c9fe7 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -9,6 +9,28 @@
//
// This file implements the Expr constant evaluator.
//
+// Constant expression evaluation produces four main results:
+//
+// * A success/failure flag indicating whether constant folding was successful.
+// This is the 'bool' return value used by most of the code in this file. A
+// 'false' return value indicates that constant folding has failed, and any
+// appropriate diagnostic has already been produced.
+//
+// * An evaluated result, valid only if constant folding has not failed.
+//
+// * A flag indicating if evaluation encountered (unevaluated) side-effects.
+// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
+// where it is possible to determine the evaluated result regardless.
+//
+// * A set of notes indicating why the evaluation was not a constant expression
+// (under the C++11 rules only, at the moment), or, if folding failed too,
+// why the expression could not be folded.
+//
+// If we are checking for a potential constant expression, failure to constant
+// fold a potential constant sub-expression will be indicated by a 'false'
+// return value (the expression could not be folded) and no diagnostic (the
+// expression is not necessarily non-constant).
+//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
@@ -23,46 +45,612 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallString.h"
#include <cstring>
+#include <functional>
using namespace clang;
using llvm::APSInt;
using llvm::APFloat;
-/// EvalInfo - This is a private struct used by the evaluator to capture
-/// information about a subexpression as it is folded. It retains information
-/// about the AST context, but also maintains information about the folded
-/// expression.
-///
-/// If an expression could be evaluated, it is still possible it is not a C
-/// "integer constant expression" or constant expression. If not, this struct
-/// captures information about how and why not.
-///
-/// One bit of information passed *into* the request for constant folding
-/// indicates whether the subexpression is "evaluated" or not according to C
-/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
-/// evaluate the expression regardless of what the RHS is, but C only allows
-/// certain things in certain situations.
+static bool IsGlobalLValue(APValue::LValueBase B);
+
namespace {
+ struct LValue;
+ struct CallStackFrame;
+ struct EvalInfo;
+
+ static QualType getType(APValue::LValueBase B) {
+ if (!B) return QualType();
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
+ return D->getType();
+ return B.get<const Expr*>()->getType();
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field or base class.
+ static
+ APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
+ APValue::BaseOrMemberType Value;
+ Value.setFromOpaqueValue(E.BaseOrMember);
+ return Value;
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field declaration.
+ static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
+ return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// base class declaration.
+ static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
+ return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Determine whether this LValue path entry for a base class names a virtual
+ /// base class.
+ static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
+ return getAsBaseOrMember(E).getInt();
+ }
+
+ /// Find the path length and type of the most-derived subobject in the given
+ /// path, and find the size of the containing array, if any.
+ static
+ unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type) {
+ unsigned MostDerivedLength = 0;
+ Type = Base;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (Type->isArrayType()) {
+ const ConstantArrayType *CAT =
+ cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
+ Type = CAT->getElementType();
+ ArraySize = CAT->getSize().getZExtValue();
+ MostDerivedLength = I + 1;
+ } else if (Type->isAnyComplexType()) {
+ const ComplexType *CT = Type->castAs<ComplexType>();
+ Type = CT->getElementType();
+ ArraySize = 2;
+ MostDerivedLength = I + 1;
+ } else if (const FieldDecl *FD = getAsField(Path[I])) {
+ Type = FD->getType();
+ ArraySize = 0;
+ MostDerivedLength = I + 1;
+ } else {
+ // Path[I] describes a base class.
+ ArraySize = 0;
+ }
+ }
+ return MostDerivedLength;
+ }
+
+ // The order of this enum is important for diagnostics.
+ enum CheckSubobjectKind {
+ CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
+ CSK_This, CSK_Real, CSK_Imag
+ };
+
+ /// A path from a glvalue to a subobject of that glvalue.
+ struct SubobjectDesignator {
+ /// True if the subobject was named in a manner not supported by C++11. Such
+ /// lvalues can still be folded, but they are not core constant expressions
+ /// and we cannot perform lvalue-to-rvalue conversions on them.
+ bool Invalid : 1;
+
+ /// Is this a pointer one past the end of an object?
+ bool IsOnePastTheEnd : 1;
+
+ /// The length of the path to the most-derived object of which this is a
+ /// subobject.
+ unsigned MostDerivedPathLength : 30;
+
+ /// The size of the array of which the most-derived object is an element, or
+ /// 0 if the most-derived object is not an array element.
+ uint64_t MostDerivedArraySize;
+
+ /// The type of the most derived object referred to by this address.
+ QualType MostDerivedType;
+
+ typedef APValue::LValuePathEntry PathEntry;
+
+ /// The entries on the path from the glvalue to the designated subobject.
+ SmallVector<PathEntry, 8> Entries;
+
+ SubobjectDesignator() : Invalid(true) {}
+
+ explicit SubobjectDesignator(QualType T)
+ : Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0), MostDerivedType(T) {}
+
+ SubobjectDesignator(ASTContext &Ctx, const APValue &V)
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ if (!Invalid) {
+ IsOnePastTheEnd = V.isLValueOnePastTheEnd();
+ ArrayRef<PathEntry> VEntries = V.getLValuePath();
+ Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
+ if (V.getLValueBase())
+ MostDerivedPathLength =
+ findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
+ V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType);
+ }
+ }
+
+ void setInvalid() {
+ Invalid = true;
+ Entries.clear();
+ }
+
+ /// Determine whether this is a one-past-the-end pointer.
+ bool isOnePastTheEnd() const {
+ if (IsOnePastTheEnd)
+ return true;
+ if (MostDerivedArraySize &&
+ Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
+ return true;
+ return false;
+ }
+
+ /// Check that this refers to a valid subobject.
+ bool isValidSubobject() const {
+ if (Invalid)
+ return false;
+ return !isOnePastTheEnd();
+ }
+ /// Check that this refers to a valid subobject, and if not, produce a
+ /// relevant diagnostic and set the designator as invalid.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+
+ /// Update this designator to refer to the first element within this array.
+ void addArrayUnchecked(const ConstantArrayType *CAT) {
+ PathEntry Entry;
+ Entry.ArrayIndex = 0;
+ Entries.push_back(Entry);
+
+ // This is a most-derived object.
+ MostDerivedType = CAT->getElementType();
+ MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the given base or member of this
+ /// object.
+ void addDeclUnchecked(const Decl *D, bool Virtual = false) {
+ PathEntry Entry;
+ APValue::BaseOrMemberType Value(D, Virtual);
+ Entry.BaseOrMember = Value.getOpaqueValue();
+ Entries.push_back(Entry);
+
+ // If this isn't a base class, it's a new most-derived object.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ MostDerivedType = FD->getType();
+ MostDerivedArraySize = 0;
+ MostDerivedPathLength = Entries.size();
+ }
+ }
+ /// Update this designator to refer to the given complex component.
+ void addComplexUnchecked(QualType EltTy, bool Imag) {
+ PathEntry Entry;
+ Entry.ArrayIndex = Imag;
+ Entries.push_back(Entry);
+
+ // This is technically a most-derived object, though in practice this
+ // is unlikely to matter.
+ MostDerivedType = EltTy;
+ MostDerivedArraySize = 2;
+ MostDerivedPathLength = Entries.size();
+ }
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ /// Add N to the address of this subobject.
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (Invalid) return;
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) {
+ Entries.back().ArrayIndex += N;
+ if (Entries.back().ArrayIndex > MostDerivedArraySize) {
+ diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
+ setInvalid();
+ }
+ return;
+ }
+ // [expr.add]p4: For the purposes of these operators, a pointer to a
+ // nonarray object behaves the same as a pointer to the first element of
+ // an array of length one with the type of the object as its element type.
+ if (IsOnePastTheEnd && N == (uint64_t)-1)
+ IsOnePastTheEnd = false;
+ else if (!IsOnePastTheEnd && N == 1)
+ IsOnePastTheEnd = true;
+ else if (N != 0) {
+ diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ setInvalid();
+ }
+ }
+ };
+
+ /// A stack frame in the constexpr call stack.
+ struct CallStackFrame {
+ EvalInfo &Info;
+
+ /// Parent - The caller of this stack frame.
+ CallStackFrame *Caller;
+
+ /// CallLoc - The location of the call expression for this call.
+ SourceLocation CallLoc;
+
+ /// Callee - The function which was called.
+ const FunctionDecl *Callee;
+
+ /// Index - The call index of this call.
+ unsigned Index;
+
+ /// This - The binding for the this pointer in this call, if any.
+ const LValue *This;
+
+ /// ParmBindings - Parameter bindings for this function call, indexed by
+ /// parameters' function scope indices.
+ const APValue *Arguments;
+
+ typedef llvm::DenseMap<const Expr*, APValue> MapTy;
+ typedef MapTy::const_iterator temp_iterator;
+ /// Temporaries - Temporary lvalues materialized within this stack frame.
+ MapTy Temporaries;
+
+ CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments);
+ ~CallStackFrame();
+ };
+
+ /// A partial diagnostic which we might know in advance that we are not going
+ /// to emit.
+ class OptionalDiagnostic {
+ PartialDiagnostic *Diag;
+
+ public:
+ explicit OptionalDiagnostic(PartialDiagnostic *Diag = 0) : Diag(Diag) {}
+
+ template<typename T>
+ OptionalDiagnostic &operator<<(const T &v) {
+ if (Diag)
+ *Diag << v;
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APSInt &I) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ I.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APFloat &F) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ F.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+ };
+
+ /// EvalInfo - This is a private struct used by the evaluator to capture
+ /// information about a subexpression as it is folded. It retains information
+ /// about the AST context, but also maintains information about the folded
+ /// expression.
+ ///
+ /// If an expression could be evaluated, it is still possible it is not a C
+ /// "integer constant expression" or constant expression. If not, this struct
+ /// captures information about how and why not.
+ ///
+ /// One bit of information passed *into* the request for constant folding
+ /// indicates whether the subexpression is "evaluated" or not according to C
+ /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+ /// evaluate the expression regardless of what the RHS is, but C only allows
+ /// certain things in certain situations.
struct EvalInfo {
- const ASTContext &Ctx;
+ ASTContext &Ctx;
+
+ /// EvalStatus - Contains information about the evaluation.
+ Expr::EvalStatus &EvalStatus;
+
+ /// CurrentCall - The top of the constexpr call stack.
+ CallStackFrame *CurrentCall;
- /// EvalResult - Contains information about the evaluation.
- Expr::EvalResult &EvalResult;
+ /// CallStackDepth - The number of calls in the call stack right now.
+ unsigned CallStackDepth;
+
+ /// NextCallIndex - The next call index to assign.
+ unsigned NextCallIndex;
typedef llvm::DenseMap<const OpaqueValueExpr*, APValue> MapTy;
+ /// OpaqueValues - Values used as the common expression in a
+ /// BinaryConditionalOperator.
MapTy OpaqueValues;
+
+ /// BottomFrame - The frame in which evaluation started. This must be
+ /// initialized after CurrentCall and CallStackDepth.
+ CallStackFrame BottomFrame;
+
+ /// EvaluatingDecl - This is the declaration whose initializer is being
+ /// evaluated, if any.
+ const VarDecl *EvaluatingDecl;
+
+ /// EvaluatingDeclValue - This is the value being constructed for the
+ /// declaration whose initializer is being evaluated, if any.
+ APValue *EvaluatingDeclValue;
+
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic;
+
+ /// CheckingPotentialConstantExpression - Are we checking whether the
+ /// expression is a potential constant expression? If so, some diagnostics
+ /// are suppressed.
+ bool CheckingPotentialConstantExpression;
+
+ EvalInfo(const ASTContext &C, Expr::EvalStatus &S)
+ : Ctx(const_cast<ASTContext&>(C)), EvalStatus(S), CurrentCall(0),
+ CallStackDepth(0), NextCallIndex(1),
+ BottomFrame(*this, SourceLocation(), 0, 0, 0),
+ EvaluatingDecl(0), EvaluatingDeclValue(0), HasActiveDiagnostic(false),
+ CheckingPotentialConstantExpression(false) {}
+
const APValue *getOpaqueValue(const OpaqueValueExpr *e) const {
MapTy::const_iterator i = OpaqueValues.find(e);
if (i == OpaqueValues.end()) return 0;
return &i->second;
}
- EvalInfo(const ASTContext &ctx, Expr::EvalResult &evalresult)
- : Ctx(ctx), EvalResult(evalresult) {}
+ void setEvaluatingDecl(const VarDecl *VD, APValue &Value) {
+ EvaluatingDecl = VD;
+ EvaluatingDeclValue = &Value;
+ }
+
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+
+ bool CheckCallLimit(SourceLocation Loc) {
+ // Don't perform any constexpr calls (other than the call we're checking)
+ // when checking a potential constant expression.
+ if (CheckingPotentialConstantExpression && CallStackDepth > 1)
+ return false;
+ if (NextCallIndex == 0) {
+ // NextCallIndex has wrapped around.
+ Diag(Loc, diag::note_constexpr_call_limit_exceeded);
+ return false;
+ }
+ if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
+ return true;
+ Diag(Loc, diag::note_constexpr_depth_limit_exceeded)
+ << getLangOpts().ConstexprCallDepth;
+ return false;
+ }
- const LangOptions &getLangOpts() { return Ctx.getLangOptions(); }
+ CallStackFrame *getCallFrame(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrame");
+ // We will eventually hit BottomFrame, which has Index 1, so Frame can't
+ // be null in this loop.
+ CallStackFrame *Frame = CurrentCall;
+ while (Frame->Index > CallIndex)
+ Frame = Frame->Caller;
+ return (Frame->Index == CallIndex) ? Frame : 0;
+ }
+
+ private:
+ /// Add a diagnostic to the diagnostics list.
+ PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) {
+ PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
+ EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
+ return EvalStatus.Diag->back().second;
+ }
+
+ /// Add notes containing a call stack to the current point of evaluation.
+ void addCallStack(unsigned Limit);
+
+ public:
+ /// Diagnose that the evaluation cannot be folded.
+ OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important.
+ // FIXME: We might want to show both diagnostics to the user.
+ if (EvalStatus.Diag) {
+ unsigned CallStackNotes = CallStackDepth - 1;
+ unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
+ if (Limit)
+ CallStackNotes = std::min(CallStackNotes, Limit + 1);
+ if (CheckingPotentialConstantExpression)
+ CallStackNotes = 0;
+
+ HasActiveDiagnostic = true;
+ EvalStatus.Diag->clear();
+ EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
+ addDiag(Loc, DiagId);
+ if (!CheckingPotentialConstantExpression)
+ addCallStack(Limit);
+ return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second);
+ }
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ if (EvalStatus.Diag)
+ return Diag(E->getExprLoc(), DiagId, ExtraNotes);
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ /// Diagnose that the evaluation does not produce a C++11 core constant
+ /// expression.
+ template<typename LocArg>
+ OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // Don't override a previous diagnostic.
+ if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+ return Diag(Loc, DiagId, ExtraNotes);
+ }
+
+ /// Add a note to a prior diagnostic.
+ OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) {
+ if (!HasActiveDiagnostic)
+ return OptionalDiagnostic();
+ return OptionalDiagnostic(&addDiag(Loc, DiagId));
+ }
+
+ /// Add a stack of notes to a prior diagnostic.
+ void addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
+ if (HasActiveDiagnostic) {
+ EvalStatus.Diag->insert(EvalStatus.Diag->end(),
+ Diags.begin(), Diags.end());
+ }
+ }
+
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be folded?
+ bool keepEvaluatingAfterFailure() {
+ return CheckingPotentialConstantExpression &&
+ EvalStatus.Diag && EvalStatus.Diag->empty();
+ }
};
+ /// Object used to treat all foldable expressions as constant expressions.
+ struct FoldConstant {
+ bool Enabled;
+
+ explicit FoldConstant(EvalInfo &Info)
+ : Enabled(Info.EvalStatus.Diag && Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects) {
+ }
+ // Treat the value we've computed since this object was created as constant.
+ void Fold(EvalInfo &Info) {
+ if (Enabled && !Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects)
+ Info.EvalStatus.Diag->clear();
+ }
+ };
+
+ /// RAII object used to suppress diagnostics and side-effects from a
+ /// speculative evaluation.
+ class SpeculativeEvaluationRAII {
+ EvalInfo &Info;
+ Expr::EvalStatus Old;
+
+ public:
+ SpeculativeEvaluationRAII(EvalInfo &Info,
+ llvm::SmallVectorImpl<PartialDiagnosticAt>
+ *NewDiag = 0)
+ : Info(Info), Old(Info.EvalStatus) {
+ Info.EvalStatus.Diag = NewDiag;
+ }
+ ~SpeculativeEvaluationRAII() {
+ Info.EvalStatus = Old;
+ }
+ };
+}
+
+bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Invalid)
+ return false;
+ if (isOnePastTheEnd()) {
+ Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
+ << CSK;
+ setInvalid();
+ return false;
+ }
+ return true;
+}
+
+void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
+ const Expr *E, uint64_t N) {
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize)
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*array*/ 0
+ << static_cast<unsigned>(MostDerivedArraySize);
+ else
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*non-array*/ 1;
+ setInvalid();
+}
+
+CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments)
+ : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
+ Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
+ Info.CurrentCall = this;
+ ++Info.CallStackDepth;
+}
+
+CallStackFrame::~CallStackFrame() {
+ assert(Info.CurrentCall == this && "calls retired out of order");
+ --Info.CallStackDepth;
+ Info.CurrentCall = Caller;
+}
+
+/// Produce a string describing the given constexpr call.
+static void describeCall(CallStackFrame *Frame, llvm::raw_ostream &Out) {
+ unsigned ArgIndex = 0;
+ bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) &&
+ !isa<CXXConstructorDecl>(Frame->Callee) &&
+ cast<CXXMethodDecl>(Frame->Callee)->isInstance();
+
+ if (!IsMemberCall)
+ Out << *Frame->Callee << '(';
+
+ for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(),
+ E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) {
+ if (ArgIndex > (unsigned)IsMemberCall)
+ Out << ", ";
+
+ const ParmVarDecl *Param = *I;
+ const APValue &Arg = Frame->Arguments[ArgIndex];
+ Arg.printPretty(Out, Frame->Info.Ctx, Param->getType());
+
+ if (ArgIndex == 0 && IsMemberCall)
+ Out << "->" << *Frame->Callee << '(';
+ }
+
+ Out << ')';
+}
+
+void EvalInfo::addCallStack(unsigned Limit) {
+ // Determine which calls to skip, if any.
+ unsigned ActiveCalls = CallStackDepth - 1;
+ unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart;
+ if (Limit && Limit < ActiveCalls) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveCalls - Limit / 2;
+ }
+
+ // Walk the call stack and add the diagnostics.
+ unsigned CallIdx = 0;
+ for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame;
+ Frame = Frame->Caller, ++CallIdx) {
+ // Skip this call?
+ if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
+ if (CallIdx == SkipStart) {
+ // Note that we're skipping calls.
+ addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed)
+ << unsigned(ActiveCalls - Limit);
+ }
+ continue;
+ }
+
+ llvm::SmallVector<char, 128> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ describeCall(Frame, Out);
+ addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str();
+ }
+}
+
+namespace {
struct ComplexValue {
private:
bool IsInt;
@@ -104,28 +692,203 @@ namespace {
};
struct LValue {
- const Expr *Base;
+ APValue::LValueBase Base;
CharUnits Offset;
+ unsigned CallIndex;
+ SubobjectDesignator Designator;
+
+ const APValue::LValueBase getLValueBase() const { return Base; }
+ CharUnits &getLValueOffset() { return Offset; }
+ const CharUnits &getLValueOffset() const { return Offset; }
+ unsigned getLValueCallIndex() const { return CallIndex; }
+ SubobjectDesignator &getLValueDesignator() { return Designator; }
+ const SubobjectDesignator &getLValueDesignator() const { return Designator;}
+
+ void moveInto(APValue &V) const {
+ if (Designator.Invalid)
+ V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
+ else
+ V = APValue(Base, Offset, Designator.Entries,
+ Designator.IsOnePastTheEnd, CallIndex);
+ }
+ void setFrom(ASTContext &Ctx, const APValue &V) {
+ assert(V.isLValue());
+ Base = V.getLValueBase();
+ Offset = V.getLValueOffset();
+ CallIndex = V.getLValueCallIndex();
+ Designator = SubobjectDesignator(Ctx, V);
+ }
- const Expr *getLValueBase() { return Base; }
- CharUnits getLValueOffset() { return Offset; }
+ void set(APValue::LValueBase B, unsigned I = 0) {
+ Base = B;
+ Offset = CharUnits::Zero();
+ CallIndex = I;
+ Designator = SubobjectDesignator(getType(B));
+ }
- void moveInto(APValue &v) const {
- v = APValue(Base, Offset);
+ // Check that this LValue is not based on a null pointer. If it is, produce
+ // a diagnostic and mark the designator as invalid.
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Designator.Invalid)
+ return false;
+ if (!Base) {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject)
+ << CSK;
+ Designator.setInvalid();
+ return false;
+ }
+ return true;
}
- void setFrom(const APValue &v) {
- assert(v.isLValue());
- Base = v.getLValueBase();
- Offset = v.getLValueOffset();
+
+ // Check this LValue refers to an object. If not, set the designator to be
+ // invalid and emit a diagnostic.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ // Outside C++11, do not build a designator referring to a subobject of
+ // any object: we won't use such a designator for anything.
+ if (!Info.getLangOpts().CPlusPlus0x)
+ Designator.setInvalid();
+ return checkNullPointer(Info, E, CSK) &&
+ Designator.checkSubobject(Info, E, CSK);
+ }
+
+ void addDecl(EvalInfo &Info, const Expr *E,
+ const Decl *D, bool Virtual = false) {
+ if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
+ Designator.addDeclUnchecked(D, Virtual);
+ }
+ void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
+ if (checkSubobject(Info, E, CSK_ArrayToPointer))
+ Designator.addArrayUnchecked(CAT);
+ }
+ void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
+ if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
+ Designator.addComplexUnchecked(EltTy, Imag);
}
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, N);
+ }
+ };
+
+ struct MemberPtr {
+ MemberPtr() {}
+ explicit MemberPtr(const ValueDecl *Decl) :
+ DeclAndIsDerivedMember(Decl, false), Path() {}
+
+ /// The member or (direct or indirect) field referred to by this member
+ /// pointer, or 0 if this is a null member pointer.
+ const ValueDecl *getDecl() const {
+ return DeclAndIsDerivedMember.getPointer();
+ }
+ /// Is this actually a member of some type derived from the relevant class?
+ bool isDerivedMember() const {
+ return DeclAndIsDerivedMember.getInt();
+ }
+ /// Get the class which the declaration actually lives in.
+ const CXXRecordDecl *getContainingRecord() const {
+ return cast<CXXRecordDecl>(
+ DeclAndIsDerivedMember.getPointer()->getDeclContext());
+ }
+
+ void moveInto(APValue &V) const {
+ V = APValue(getDecl(), isDerivedMember(), Path);
+ }
+ void setFrom(const APValue &V) {
+ assert(V.isMemberPointer());
+ DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
+ DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
+ Path.clear();
+ ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
+ Path.insert(Path.end(), P.begin(), P.end());
+ }
+
+ /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
+ /// whether the member is a member of some class derived from the class type
+ /// of the member pointer.
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
+ /// Path - The path of base/derived classes from the member declaration's
+ /// class (exclusive) to the class type of the member pointer (inclusive).
+ SmallVector<const CXXRecordDecl*, 4> Path;
+
+ /// Perform a cast towards the class of the Decl (either up or down the
+ /// hierarchy).
+ bool castBack(const CXXRecordDecl *Class) {
+ assert(!Path.empty());
+ const CXXRecordDecl *Expected;
+ if (Path.size() >= 2)
+ Expected = Path[Path.size() - 2];
+ else
+ Expected = getContainingRecord();
+ if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
+ // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
+ // if B does not contain the original member and is not a base or
+ // derived class of the class containing the original member, the result
+ // of the cast is undefined.
+ // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
+ // (D::*). We consider that to be a language defect.
+ return false;
+ }
+ Path.pop_back();
+ return true;
+ }
+ /// Perform a base-to-derived member pointer cast.
+ bool castToDerived(const CXXRecordDecl *Derived) {
+ if (!getDecl())
+ return true;
+ if (!isDerivedMember()) {
+ Path.push_back(Derived);
+ return true;
+ }
+ if (!castBack(Derived))
+ return false;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(false);
+ return true;
+ }
+ /// Perform a derived-to-base member pointer cast.
+ bool castToBase(const CXXRecordDecl *Base) {
+ if (!getDecl())
+ return true;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(true);
+ if (isDerivedMember()) {
+ Path.push_back(Base);
+ return true;
+ }
+ return castBack(Base);
+ }
+ };
+
+ /// Compare two member pointers, which are assumed to be of the same type.
+ static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
+ if (!LHS.getDecl() || !RHS.getDecl())
+ return !LHS.getDecl() && !RHS.getDecl();
+ if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ return false;
+ return LHS.Path == RHS.Path;
+ }
+
+ /// Kinds of constant expression checking, for diagnostics.
+ enum CheckConstantExpressionKind {
+ CCEK_Constant, ///< A normal constant.
+ CCEK_ReturnValue, ///< A constexpr function return value.
+ CCEK_MemberInit ///< A constexpr constructor mem-initializer.
};
}
-static bool Evaluate(EvalInfo &info, const Expr *E);
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
+ const LValue &This, const Expr *E,
+ CheckConstantExpressionKind CCEK = CCEK_Constant,
+ bool AllowNonLiteralTypes = false);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info);
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
-static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
@@ -134,116 +897,306 @@ static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
// Misc utilities
//===----------------------------------------------------------------------===//
-static bool IsGlobalLValue(const Expr* E) {
- if (!E) return true;
+/// Should this call expression be treated as a string literal?
+static bool IsStringLiteralCall(const CallExpr *E) {
+ unsigned Builtin = E->isBuiltinCall();
+ return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+}
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- if (isa<FunctionDecl>(DRE->getDecl()))
- return true;
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+static bool IsGlobalLValue(APValue::LValueBase B) {
+ // C++11 [expr.const]p3 An address constant expression is a prvalue core
+ // constant expression of pointer type that evaluates to...
+
+ // ... a null pointer value, or a prvalue core constant expression of type
+ // std::nullptr_t.
+ if (!B) return true;
+
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ // ... the address of an object with static storage duration,
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
- return false;
+ // ... the address of a function,
+ return isa<FunctionDecl>(D);
}
- if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(E))
- return CLE->isFileScope();
+ const Expr *E = B.get<const Expr*>();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case Expr::CompoundLiteralExprClass: {
+ const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ return CLE->isFileScope() && CLE->isLValue();
+ }
+ // A string literal has static storage duration.
+ case Expr::StringLiteralClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::CXXTypeidExprClass:
+ return true;
+ case Expr::CallExprClass:
+ return IsStringLiteralCall(cast<CallExpr>(E));
+ // For GCC compatibility, &&label has static storage duration.
+ case Expr::AddrLabelExprClass:
+ return true;
+ // A Block literal expression may be used as the initialization value for
+ // Block variables at global or local static scope.
+ case Expr::BlockExprClass:
+ return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ case Expr::ImplicitValueInitExprClass:
+ // FIXME:
+ // We can never form an lvalue with an implicit value initialization as its
+ // base through expression evaluation, so these only appear in one case: the
+ // implicit variable declaration we invent when checking whether a constexpr
+ // constructor can produce a constant expression. We must assume that such
+ // an expression might be a global lvalue.
+ return true;
+ }
+}
- return true;
+static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
+ assert(Base && "no location for a null lvalue");
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ if (VD)
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ else
+ Info.Note(Base.dyn_cast<const Expr*>()->getExprLoc(),
+ diag::note_constexpr_temporary_here);
}
-static bool EvalPointerValueAsBool(LValue& Value, bool& Result) {
- const Expr* Base = Value.Base;
+/// Check that this reference or pointer core constant expression is a valid
+/// value for an address or reference constant expression. Return true if we
+/// can fold this expression, whether or not it's a constant expression.
+static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, const LValue &LVal) {
+ bool IsReferenceType = Type->isReferenceType();
- // A null base expression indicates a null pointer. These are always
- // evaluatable, and they are false unless the offset is zero.
+ APValue::LValueBase Base = LVal.getLValueBase();
+ const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+
+ // Check that the object is a global. Note that the fake 'this' object we
+ // manufacture when checking potential constant expressions is conservatively
+ // assumed to be global here.
+ if (!IsGlobalLValue(Base)) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_non_global, 1)
+ << IsReferenceType << !Designator.Entries.empty()
+ << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ } else {
+ Info.Diag(Loc);
+ }
+ // Don't allow references to temporaries to escape.
+ return false;
+ }
+ assert((Info.CheckingPotentialConstantExpression ||
+ LVal.getLValueCallIndex() == 0) &&
+ "have call index for global lvalue");
+
+ // Allow address constant expressions to be past-the-end pointers. This is
+ // an extension: the standard requires them to point to an object.
+ if (!IsReferenceType)
+ return true;
+
+ // A reference constant expression must refer to an object.
if (!Base) {
- Result = !Value.Offset.isZero();
+ // FIXME: diagnostic
+ Info.CCEDiag(Loc);
return true;
}
- // Require the base expression to be a global l-value.
- if (!IsGlobalLValue(Base)) return false;
+ // Does this refer one past the end of some object?
+ if (Designator.isOnePastTheEnd()) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_past_end, 1)
+ << !Designator.Entries.empty() << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ }
- // We have a non-null base expression. These are generally known to
- // be true, but if it'a decl-ref to a weak symbol it can be null at
- // runtime.
- Result = true;
+ return true;
+}
- const DeclRefExpr* DeclRef = dyn_cast<DeclRefExpr>(Base);
- if (!DeclRef)
+/// Check that this core constant expression is of literal type, and if not,
+/// produce an appropriate diagnostic.
+static bool CheckLiteralType(EvalInfo &Info, const Expr *E) {
+ if (!E->isRValue() || E->getType()->isLiteralType())
return true;
- // If it's a weak symbol, it isn't constant-evaluable.
- const ValueDecl* Decl = DeclRef->getDecl();
- if (Decl->hasAttr<WeakAttr>() ||
- Decl->hasAttr<WeakRefAttr>() ||
- Decl->isWeakImported())
- return false;
+ // Prvalue constant expressions must be of literal types.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.Diag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+/// Check that this core constant expression value is a valid value for a
+/// constant expression. If not, report an appropriate diagnostic. Does not
+/// check that the expression is of literal type.
+static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
+ QualType Type, const APValue &Value) {
+ // Core issue 1454: For a literal constant expression of array or class type,
+ // each subobject of its value shall have been initialized by a constant
+ // expression.
+ if (Value.isArray()) {
+ QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
+ for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayInitializedElt(I)))
+ return false;
+ }
+ if (!Value.hasArrayFiller())
+ return true;
+ return CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayFiller());
+ }
+ if (Value.isUnion() && Value.getUnionField()) {
+ return CheckConstantExpression(Info, DiagLoc,
+ Value.getUnionField()->getType(),
+ Value.getUnionValue());
+ }
+ if (Value.isStruct()) {
+ RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ unsigned BaseIndex = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructBase(BaseIndex)))
+ return false;
+ }
+ }
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, (*I)->getType(),
+ Value.getStructField((*I)->getFieldIndex())))
+ return false;
+ }
+ }
+
+ if (Value.isLValue()) {
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Value);
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
+ }
+
+ // Everything else is fine.
return true;
}
-static bool HandleConversionToBool(const Expr* E, bool& Result,
- EvalInfo &Info) {
- if (E->getType()->isIntegralOrEnumerationType()) {
- APSInt IntResult;
- if (!EvaluateInteger(E, IntResult, Info))
- return false;
- Result = IntResult != 0;
+const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
+ return LVal.Base.dyn_cast<const ValueDecl*>();
+}
+
+static bool IsLiteralLValue(const LValue &Value) {
+ return Value.Base.dyn_cast<const Expr*>() && !Value.CallIndex;
+}
+
+static bool IsWeakLValue(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ return Decl && Decl->isWeak();
+}
+
+static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
+ // A null base expression indicates a null pointer. These are always
+ // evaluatable, and they are false unless the offset is zero.
+ if (!Value.getLValueBase()) {
+ Result = !Value.getLValueOffset().isZero();
return true;
- } else if (E->getType()->isRealFloatingType()) {
- APFloat FloatResult(0.0);
- if (!EvaluateFloat(E, FloatResult, Info))
- return false;
- Result = !FloatResult.isZero();
+ }
+
+ // We have a non-null base. These are generally known to be true, but if it's
+ // a weak declaration it can be null at runtime.
+ Result = true;
+ const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
+ return !Decl || !Decl->isWeak();
+}
+
+static bool HandleConversionToBool(const APValue &Val, bool &Result) {
+ switch (Val.getKind()) {
+ case APValue::Uninitialized:
+ return false;
+ case APValue::Int:
+ Result = Val.getInt().getBoolValue();
return true;
- } else if (E->getType()->hasPointerRepresentation()) {
- LValue PointerResult;
- if (!EvaluatePointer(E, PointerResult, Info))
- return false;
- return EvalPointerValueAsBool(PointerResult, Result);
- } else if (E->getType()->isAnyComplexType()) {
- ComplexValue ComplexResult;
- if (!EvaluateComplex(E, ComplexResult, Info))
- return false;
- if (ComplexResult.isComplexFloat()) {
- Result = !ComplexResult.getComplexFloatReal().isZero() ||
- !ComplexResult.getComplexFloatImag().isZero();
- } else {
- Result = ComplexResult.getComplexIntReal().getBoolValue() ||
- ComplexResult.getComplexIntImag().getBoolValue();
- }
+ case APValue::Float:
+ Result = !Val.getFloat().isZero();
+ return true;
+ case APValue::ComplexInt:
+ Result = Val.getComplexIntReal().getBoolValue() ||
+ Val.getComplexIntImag().getBoolValue();
return true;
+ case APValue::ComplexFloat:
+ Result = !Val.getComplexFloatReal().isZero() ||
+ !Val.getComplexFloatImag().isZero();
+ return true;
+ case APValue::LValue:
+ return EvalPointerValueAsBool(Val, Result);
+ case APValue::MemberPointer:
+ Result = Val.getMemberPointerDecl();
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::AddrLabelDiff:
+ return false;
}
+ llvm_unreachable("unknown APValue kind");
+}
+
+static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
+ APValue Val;
+ if (!Evaluate(Val, Info, E))
+ return false;
+ return HandleConversionToBool(Val, Result);
+}
+
+template<typename T>
+static bool HandleOverflow(EvalInfo &Info, const Expr *E,
+ const T &SrcValue, QualType DestType) {
+ Info.Diag(E, diag::note_constexpr_overflow)
+ << SrcValue << DestType;
return false;
}
-static APSInt HandleFloatToIntCast(QualType DestType, QualType SrcType,
- APFloat &Value, const ASTContext &Ctx) {
- unsigned DestWidth = Ctx.getIntWidth(DestType);
+static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APFloat &Value,
+ QualType DestType, APSInt &Result) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
// Determine whether we are converting to unsigned or signed.
bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
- // FIXME: Warning for overflow.
- APSInt Result(DestWidth, !DestSigned);
+ Result = APSInt(DestWidth, !DestSigned);
bool ignored;
- (void)Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored);
- return Result;
+ if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
+ & APFloat::opInvalidOp)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
}
-static APFloat HandleFloatToFloatCast(QualType DestType, QualType SrcType,
- APFloat &Value, const ASTContext &Ctx) {
+static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, QualType DestType,
+ APFloat &Result) {
+ APFloat Value = Result;
bool ignored;
- APFloat Result = Value;
- Result.convert(Ctx.getFloatTypeSemantics(DestType),
- APFloat::rmNearestTiesToEven, &ignored);
- return Result;
+ if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
}
-static APSInt HandleIntToIntCast(QualType DestType, QualType SrcType,
- APSInt &Value, const ASTContext &Ctx) {
- unsigned DestWidth = Ctx.getIntWidth(DestType);
+static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
+ QualType DestType, QualType SrcType,
+ APSInt &Value) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
@@ -252,22 +1205,1071 @@ static APSInt HandleIntToIntCast(QualType DestType, QualType SrcType,
return Result;
}
-static APFloat HandleIntToFloatCast(QualType DestType, QualType SrcType,
- APSInt &Value, const ASTContext &Ctx) {
+static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APSInt &Value,
+ QualType DestType, APFloat &Result) {
+ Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
+ if (Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
- APFloat Result(Ctx.getFloatTypeSemantics(DestType), 1);
- Result.convertFromAPInt(Value, Value.isSigned(),
- APFloat::rmNearestTiesToEven);
- return Result;
+static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
+ llvm::APInt &Res) {
+ APValue SVal;
+ if (!Evaluate(SVal, Info, E))
+ return false;
+ if (SVal.isInt()) {
+ Res = SVal.getInt();
+ return true;
+ }
+ if (SVal.isFloat()) {
+ Res = SVal.getFloat().bitcastToAPInt();
+ return true;
+ }
+ if (SVal.isVector()) {
+ QualType VecTy = E->getType();
+ unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
+ QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ Res = llvm::APInt::getNullValue(VecSize);
+ for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
+ APValue &Elt = SVal.getVectorElt(i);
+ llvm::APInt EltAsInt;
+ if (Elt.isInt()) {
+ EltAsInt = Elt.getInt();
+ } else if (Elt.isFloat()) {
+ EltAsInt = Elt.getFloat().bitcastToAPInt();
+ } else {
+ // Don't try to handle vectors of anything other than int or float
+ // (not sure if it's possible to hit this case).
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ unsigned BaseEltSize = EltAsInt.getBitWidth();
+ if (BigEndian)
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
+ else
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
+ }
+ return true;
+ }
+ // Give up if the input isn't an int, float, or vector. For example, we
+ // reject "(v4i16)(intptr_t)&a".
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Cast an lvalue referring to a base subobject to a derived class, by
+/// truncating the lvalue's path to the given length.
+static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
+ const RecordDecl *TruncatedType,
+ unsigned TruncatedElements) {
+ SubobjectDesignator &D = Result.Designator;
+
+ // Check we actually point to a derived class object.
+ if (TruncatedElements == D.Entries.size())
+ return true;
+ assert(TruncatedElements >= D.MostDerivedPathLength &&
+ "not casting to a derived class");
+ if (!Result.checkSubobject(Info, E, CSK_Derived))
+ return false;
+
+ // Truncate the path to the subobject, and remove any derived-to-base offsets.
+ const RecordDecl *RD = TruncatedType;
+ for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+ const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
+ if (isVirtualBaseClass(D.Entries[I]))
+ Result.Offset -= Layout.getVBaseClassOffset(Base);
+ else
+ Result.Offset -= Layout.getBaseClassOffset(Base);
+ RD = Base;
+ }
+ D.Entries.resize(TruncatedElements);
+ return true;
+}
+
+static void HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL) RL = &Info.Ctx.getASTRecordLayout(Derived);
+ Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
+ Obj.addDecl(Info, E, Base, /*Virtual*/ false);
+}
+
+static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *DerivedDecl,
+ const CXXBaseSpecifier *Base) {
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+
+ if (!Base->isVirtual()) {
+ HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
+ return true;
+ }
+
+ SubobjectDesignator &D = Obj.Designator;
+ if (D.Invalid)
+ return false;
+
+ // Extract most-derived object and corresponding type.
+ DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
+ if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
+ return false;
+
+ // Find the virtual base class.
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
+ Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
+ Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
+ return true;
+}
+
+/// Update LVal to refer to the given field, which must be a member of the type
+/// currently described by LVal.
+static void HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
+ const FieldDecl *FD,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL)
+ RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
+
+ unsigned I = FD->getFieldIndex();
+ LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
+ LVal.addDecl(Info, E, FD);
+}
+
+/// Update LVal to refer to the given indirect field.
+static void HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
+ LValue &LVal,
+ const IndirectFieldDecl *IFD) {
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end(); C != CE; ++C)
+ HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C));
+}
+
+/// Get the size of the given type in char units.
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, CharUnits &Size) {
+ // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
+ // extension.
+ if (Type->isVoidType() || Type->isFunctionType()) {
+ Size = CharUnits::One();
+ return true;
+ }
+
+ if (!Type->isConstantSizeType()) {
+ // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
+ // FIXME: Better diagnostic.
+ Info.Diag(Loc);
+ return false;
+ }
+
+ Size = Info.Ctx.getTypeSizeInChars(Type);
+ return true;
+}
+
+/// Update a pointer value to model pointer arithmetic.
+/// \param Info - Information about the ongoing evaluation.
+/// \param E - The expression being evaluated, for diagnostic purposes.
+/// \param LVal - The pointer value to be updated.
+/// \param EltTy - The pointee type represented by LVal.
+/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ CharUnits SizeOfPointee;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
+ return false;
+
+ // Compute the new offset in the appropriate width.
+ LVal.Offset += Adjustment * SizeOfPointee;
+ LVal.adjustIndex(Info, E, Adjustment);
+ return true;
+}
+
+/// Update an lvalue to refer to a component of a complex number.
+/// \param Info - Information about the ongoing evaluation.
+/// \param LVal - The lvalue to be updated.
+/// \param EltTy - The complex number's component type.
+/// \param Imag - False for the real component, true for the imaginary.
+static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ bool Imag) {
+ if (Imag) {
+ CharUnits SizeOfComponent;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
+ return false;
+ LVal.Offset += SizeOfComponent;
+ }
+ LVal.addComplex(Info, E, EltTy, Imag);
+ return true;
+}
+
+/// Try to evaluate the initializer for a variable declaration.
+static bool EvaluateVarDeclInit(EvalInfo &Info, const Expr *E,
+ const VarDecl *VD,
+ CallStackFrame *Frame, APValue &Result) {
+ // If this is a parameter to an active constexpr function call, perform
+ // argument substitution.
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ // Assume arguments of a potential constant expression are unknown
+ // constant expressions.
+ if (Info.CheckingPotentialConstantExpression)
+ return false;
+ if (!Frame || !Frame->Arguments) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Frame->Arguments[PVD->getFunctionScopeIndex()];
+ return true;
+ }
+
+ // Dig out the initializer, and use the declaration which it's attached to.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (!Init || Init->isValueDependent()) {
+ // If we're checking a potential constant expression, the variable could be
+ // initialized later.
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // If we're currently evaluating the initializer of this declaration, use that
+ // in-flight value.
+ if (Info.EvaluatingDecl == VD) {
+ Result = *Info.EvaluatingDeclValue;
+ return !Result.isUninit();
+ }
+
+ // Never evaluate the initializer of a weak variable. We can't be sure that
+ // this is the definition which will be used.
+ if (VD->isWeak()) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // Check that we can fold the initializer. In C++, we will have already done
+ // this in the cases where it matters for conformance.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!VD->evaluateValue(Notes)) {
+ Info.Diag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ return false;
+ } else if (!VD->checkInitIsICE()) {
+ Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ }
+
+ Result = *VD->getEvaluatedValue();
+ return true;
+}
+
+static bool IsConstNonVolatile(QualType T) {
+ Qualifiers Quals = T.getQualifiers();
+ return Quals.hasConst() && !Quals.hasVolatile();
+}
+
+/// Get the base index of the given base class within an APValue representing
+/// the given derived class.
+static unsigned getBaseIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base) {
+ Base = Base->getCanonicalDecl();
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
+ E = Derived->bases_end(); I != E; ++I, ++Index) {
+ if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
+ return Index;
+ }
+
+ llvm_unreachable("base class missing from derived class's bases list");
+}
+
+/// Extract the value of a character from a string literal.
+static APSInt ExtractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
+ uint64_t Index) {
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ const StringLiteral *S = dyn_cast<StringLiteral>(Lit);
+ assert(S && "unexpected string literal expression kind");
+
+ APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ Lit->getType()->getArrayElementTypeNoTypeQual()->isUnsignedIntegerType());
+ if (Index < S->getLength())
+ Value = S->getCodeUnit(Index);
+ return Value;
+}
+
+/// Extract the designated sub-object of an rvalue.
+static bool ExtractSubobject(EvalInfo &Info, const Expr *E,
+ APValue &Obj, QualType ObjType,
+ const SubobjectDesignator &Sub, QualType SubType) {
+ if (Sub.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+ if (Sub.isOnePastTheEnd()) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ if (Sub.Entries.empty())
+ return true;
+ if (Info.CheckingPotentialConstantExpression && Obj.isUninit())
+ // This object might be initialized later.
+ return false;
+
+ APValue *O = &Obj;
+ // Walk the designator's path to find the subobject.
+ for (unsigned I = 0, N = Sub.Entries.size(); I != N; ++I) {
+ if (ObjType->isArrayType()) {
+ // Next subobject is an array element.
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
+ assert(CAT && "vla in literal type?");
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (CAT->getSize().ule(Index)) {
+ // Note, it should not be possible to form a pointer with a valid
+ // designator which points more than one past the end of the array.
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ // An array object is represented as either an Array APValue or as an
+ // LValue which refers to a string literal.
+ if (O->isLValue()) {
+ assert(I == N - 1 && "extracting subobject of character?");
+ assert(!O->hasLValuePath() || O->getLValuePath().empty());
+ Obj = APValue(ExtractStringLiteralCharacter(
+ Info, O->getLValueBase().get<const Expr*>(), Index));
+ return true;
+ } else if (O->getArrayInitializedElts() > Index)
+ O = &O->getArrayInitializedElt(Index);
+ else
+ O = &O->getArrayFiller();
+ ObjType = CAT->getElementType();
+ } else if (ObjType->isAnyComplexType()) {
+ // Next subobject is a complex number.
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (Index > 1) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ assert(I == N - 1 && "extracting subobject of scalar?");
+ if (O->isComplexInt()) {
+ Obj = APValue(Index ? O->getComplexIntImag()
+ : O->getComplexIntReal());
+ } else {
+ assert(O->isComplexFloat());
+ Obj = APValue(Index ? O->getComplexFloatImag()
+ : O->getComplexFloatReal());
+ }
+ return true;
+ } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
+ if (Field->isMutable()) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1)
+ << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
+ // Next subobject is a class, struct or union field.
+ RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ const FieldDecl *UnionField = O->getUnionField();
+ if (!UnionField ||
+ UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
+ Info.Diag(E, diag::note_constexpr_read_inactive_union_member)
+ << Field << !UnionField << UnionField;
+ return false;
+ }
+ O = &O->getUnionValue();
+ } else
+ O = &O->getStructField(Field->getFieldIndex());
+ ObjType = Field->getType();
+
+ if (ObjType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ // FIXME: Include a description of the path to the volatile subobject.
+ Info.Diag(E, diag::note_constexpr_ltor_volatile_obj, 1)
+ << 2 << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+ }
+ } else {
+ // Next subobject is a base class.
+ const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
+ const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
+ O = &O->getStructBase(getBaseIndex(Derived, Base));
+ ObjType = Info.Ctx.getRecordType(Base);
+ }
+
+ if (O->isUninit()) {
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_constexpr_read_uninit);
+ return false;
+ }
+ }
+
+ // This may look super-stupid, but it serves an important purpose: if we just
+ // swapped Obj and *O, we'd create an object which had itself as a subobject.
+ // To avoid the leak, we ensure that Tmp ends up owning the original complete
+ // object, which is destroyed by Tmp's destructor.
+ APValue Tmp;
+ O->swap(Tmp);
+ Obj.swap(Tmp);
+ return true;
+}
+
+/// Find the position where two subobject designators diverge, or equivalently
+/// the length of the common initial subsequence.
+static unsigned FindDesignatorMismatch(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B,
+ bool &WasArrayIndex) {
+ unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
+ for (/**/; I != N; ++I) {
+ if (!ObjType.isNull() &&
+ (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
+ // Next subobject is an array element.
+ if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
+ WasArrayIndex = true;
+ return I;
+ }
+ if (ObjType->isAnyComplexType())
+ ObjType = ObjType->castAs<ComplexType>()->getElementType();
+ else
+ ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
+ } else {
+ if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
+ WasArrayIndex = false;
+ return I;
+ }
+ if (const FieldDecl *FD = getAsField(A.Entries[I]))
+ // Next subobject is a field.
+ ObjType = FD->getType();
+ else
+ // Next subobject is a base class.
+ ObjType = QualType();
+ }
+ }
+ WasArrayIndex = false;
+ return I;
+}
+
+/// Determine whether the given subobject designators refer to elements of the
+/// same array object.
+static bool AreElementsOfSameArray(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B) {
+ if (A.Entries.size() != B.Entries.size())
+ return false;
+
+ bool IsArray = A.MostDerivedArraySize != 0;
+ if (IsArray && A.MostDerivedPathLength != A.Entries.size())
+ // A is a subobject of the array element.
+ return false;
+
+ // If A (and B) designates an array element, the last entry will be the array
+ // index. That doesn't have to match. Otherwise, we're in the 'implicit array
+ // of length 1' case, and the entire path must match.
+ bool WasArrayIndex;
+ unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
+ return CommonLength >= A.Entries.size() - IsArray;
+}
+
+/// HandleLValueToRValueConversion - Perform an lvalue-to-rvalue conversion on
+/// the given lvalue. This can also be used for 'lvalue-to-lvalue' conversions
+/// for looking up the glvalue referred to by an entity of reference type.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param Conv - The expression for which we are performing the conversion.
+/// Used for diagnostics.
+/// \param Type - The type we expect this conversion to produce, before
+/// stripping cv-qualifiers in the case of a non-clas type.
+/// \param LVal - The glvalue on which we are attempting to perform this action.
+/// \param RVal - The produced value will be placed here.
+static bool HandleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type,
+ const LValue &LVal, APValue &RVal) {
+ if (LVal.Designator.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+
+ const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+
+ if (!LVal.Base) {
+ // FIXME: Indirection through a null pointer deserves a specific diagnostic.
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ CallStackFrame *Frame = 0;
+ if (LVal.CallIndex) {
+ Frame = Info.getCallFrame(LVal.CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, LVal.Base);
+ return false;
+ }
+ }
+
+ // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
+ // is not a constant expression (even if the object is non-volatile). We also
+ // apply this rule to C++98, in order to conform to the expected 'volatile'
+ // semantics.
+ if (Type.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus)
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_type) << Type;
+ else
+ Info.Diag(Conv);
+ return false;
+ }
+
+ if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
+ // In C++11, constexpr, non-volatile variables initialized with constant
+ // expressions are constant expressions too. Inside constexpr functions,
+ // parameters are constant expressions even if they're non-const.
+ // In C, such things can also be folded, although they are not ICEs.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
+ if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
+ VD = VDef;
+ }
+ if (!VD || VD->isInvalidDecl()) {
+ Info.Diag(Conv);
+ return false;
+ }
+
+ // DR1313: If the object is volatile-qualified but the glvalue was not,
+ // behavior is undefined so the result is not a constant expression.
+ QualType VT = VD->getType();
+ if (VT.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 1 << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (!isa<ParmVarDecl>(VD)) {
+ if (VD->isConstexpr()) {
+ // OK, we can read this variable.
+ } else if (VT->isIntegralOrEnumerationType()) {
+ if (!VT.isConstQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_const_int, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ } else if (VT->isFloatingType() && VT.isConstQualified()) {
+ // We support folding of const floating-point types, in order to make
+ // static const data members of such types (supported as an extension)
+ // more useful.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.CCEDiag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Conv);
+ }
+ } else {
+ // FIXME: Allow folding of values of any literal type in all languages.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ }
+
+ if (!EvaluateVarDeclInit(Info, Conv, VD, Frame, RVal))
+ return false;
+
+ if (isa<ParmVarDecl>(VD) || !VD->getAnyInitializer()->isLValue())
+ return ExtractSubobject(Info, Conv, RVal, VT, LVal.Designator, Type);
+
+ // The declaration was initialized by an lvalue, with no lvalue-to-rvalue
+ // conversion. This happens when the declaration and the lvalue should be
+ // considered synonymous, for instance when initializing an array of char
+ // from a string literal. Continue as if the initializer lvalue was the
+ // value we were originally given.
+ assert(RVal.getLValueOffset().isZero() &&
+ "offset for lvalue init of non-reference");
+ Base = RVal.getLValueBase().get<const Expr*>();
+
+ if (unsigned CallIndex = RVal.getLValueCallIndex()) {
+ Frame = Info.getCallFrame(CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, RVal.getLValueBase());
+ return false;
+ }
+ } else {
+ Frame = 0;
+ }
+ }
+
+ // Volatile temporary objects cannot be read in constant expressions.
+ if (Base->getType().isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 0;
+ Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (Frame) {
+ // If this is a temporary expression with a nontrivial initializer, grab the
+ // value from the relevant stack frame.
+ RVal = Frame->Temporaries[Base];
+ } else if (const CompoundLiteralExpr *CLE
+ = dyn_cast<CompoundLiteralExpr>(Base)) {
+ // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
+ // initializer until now for such expressions. Such an expression can't be
+ // an ICE in C, so this only matters for fold.
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ if (!Evaluate(RVal, Info, CLE->getInitializer()))
+ return false;
+ } else if (isa<StringLiteral>(Base)) {
+ // We represent a string literal array as an lvalue pointing at the
+ // corresponding expression, rather than building an array of chars.
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ RVal = APValue(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
+ } else {
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return ExtractSubobject(Info, Conv, RVal, Base->getType(), LVal.Designator,
+ Type);
+}
+
+/// Build an lvalue for the object argument of a member function call.
+static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
+ LValue &This) {
+ if (Object->getType()->isPointerType())
+ return EvaluatePointer(Object, This, Info);
+
+ if (Object->isGLValue())
+ return EvaluateLValue(Object, This, Info);
+
+ if (Object->getType()->isLiteralType())
+ return EvaluateTemporary(Object, This, Info);
+
+ return false;
+}
+
+/// HandleMemberPointerAccess - Evaluate a member access operation and build an
+/// lvalue referring to the result.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param BO - The member pointer access operation.
+/// \param LV - Filled in with a reference to the resulting object.
+/// \param IncludeMember - Specifies whether the member itself is included in
+/// the resulting LValue subobject designator. This is not possible when
+/// creating a bound member function.
+/// \return The field or method declaration to which the member pointer refers,
+/// or 0 if evaluation fails.
+static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
+ const BinaryOperator *BO,
+ LValue &LV,
+ bool IncludeMember = true) {
+ assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
+
+ bool EvalObjOK = EvaluateObjectArgument(Info, BO->getLHS(), LV);
+ if (!EvalObjOK && !Info.keepEvaluatingAfterFailure())
+ return 0;
+
+ MemberPtr MemPtr;
+ if (!EvaluateMemberPointer(BO->getRHS(), MemPtr, Info))
+ return 0;
+
+ // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
+ // member value, the behavior is undefined.
+ if (!MemPtr.getDecl())
+ return 0;
+
+ if (!EvalObjOK)
+ return 0;
+
+ if (MemPtr.isDerivedMember()) {
+ // This is a member of some derived class. Truncate LV appropriately.
+ // The end of the derived-to-base path for the base object must match the
+ // derived-to-base path for the member pointer.
+ if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
+ LV.Designator.Entries.size())
+ return 0;
+ unsigned PathLengthToMember =
+ LV.Designator.Entries.size() - MemPtr.Path.size();
+ for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *LVDecl = getAsBaseClass(
+ LV.Designator.Entries[PathLengthToMember + I]);
+ const CXXRecordDecl *MPDecl = MemPtr.Path[I];
+ if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl())
+ return 0;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ if (!CastToDerivedClass(Info, BO, LV, MemPtr.getContainingRecord(),
+ PathLengthToMember))
+ return 0;
+ } else if (!MemPtr.Path.empty()) {
+ // Extend the LValue path with the member pointer's path.
+ LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
+ MemPtr.Path.size() + IncludeMember);
+
+ // Walk down to the appropriate base class.
+ QualType LVType = BO->getLHS()->getType();
+ if (const PointerType *PT = LVType->getAs<PointerType>())
+ LVType = PT->getPointeeType();
+ const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
+ assert(RD && "member pointer access on non-class-type expression");
+ // The first class in the path is that of the lvalue.
+ for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
+ HandleLValueDirectBase(Info, BO, LV, RD, Base);
+ RD = Base;
+ }
+ // Finally cast to the class containing the member.
+ HandleLValueDirectBase(Info, BO, LV, RD, MemPtr.getContainingRecord());
+ }
+
+ // Add the member. Note that we cannot build bound member functions here.
+ if (IncludeMember) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl()))
+ HandleLValueMember(Info, BO, LV, FD);
+ else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl()))
+ HandleLValueIndirectMember(Info, BO, LV, IFD);
+ else
+ llvm_unreachable("can't construct reference to bound member function");
+ }
+
+ return MemPtr.getDecl();
+}
+
+/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
+/// the provided lvalue, which currently refers to the base object.
+static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
+ LValue &Result) {
+ SubobjectDesignator &D = Result.Designator;
+ if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
+ return false;
+
+ QualType TargetQT = E->getType();
+ if (const PointerType *PT = TargetQT->getAs<PointerType>())
+ TargetQT = PT->getPointeeType();
+
+ // Check this cast lands within the final derived-to-base subobject path.
+ if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Check the type of the final cast. We don't need to check the path,
+ // since a cast can only be formed if the path is unique.
+ unsigned NewEntriesSize = D.Entries.size() - E->path_size();
+ const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
+ const CXXRecordDecl *FinalType;
+ if (NewEntriesSize == D.MostDerivedPathLength)
+ FinalType = D.MostDerivedType->getAsCXXRecordDecl();
+ else
+ FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
+ if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
+}
+
+namespace {
+enum EvalStmtResult {
+ /// Evaluation failed.
+ ESR_Failed,
+ /// Hit a 'return' statement.
+ ESR_Returned,
+ /// Evaluation succeeded.
+ ESR_Succeeded
+};
+}
+
+// Evaluate a statement.
+static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
+ const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default:
+ return ESR_Failed;
+
+ case Stmt::NullStmtClass:
+ case Stmt::DeclStmtClass:
+ return ESR_Succeeded;
+
+ case Stmt::ReturnStmtClass: {
+ const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
+ if (!Evaluate(Result, Info, RetExpr))
+ return ESR_Failed;
+ return ESR_Returned;
+ }
+
+ case Stmt::CompoundStmtClass: {
+ const CompoundStmt *CS = cast<CompoundStmt>(S);
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+ return ESR_Succeeded;
+ }
+ }
+}
+
+/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
+/// default constructor. If so, we'll fold it whether or not it's marked as
+/// constexpr. If it is marked as constexpr, we will never implicitly define it,
+/// so we need special handling.
+static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
+ const CXXConstructorDecl *CD,
+ bool IsValueInitialization) {
+ if (!CD->isTrivial() || !CD->isDefaultConstructor())
+ return false;
+
+ // Value-initialization does not call a trivial default constructor, so such a
+ // call is a core constant expression whether or not the constructor is
+ // constexpr.
+ if (!CD->isConstexpr() && !IsValueInitialization) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ // FIXME: If DiagDecl is an implicitly-declared special member function,
+ // we should be much more explicit about why it's not constexpr.
+ Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
+ << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
+ Info.Note(CD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ }
+ return true;
+}
+
+/// CheckConstexprFunction - Check that a function can be called in a constant
+/// expression.
+static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Declaration,
+ const FunctionDecl *Definition) {
+ // Potential constant expressions can contain calls to declared, but not yet
+ // defined, constexpr functions.
+ if (Info.CheckingPotentialConstantExpression && !Definition &&
+ Declaration->isConstexpr())
+ return false;
+
+ // Can we evaluate this function call?
+ if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl())
+ return true;
+
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
+ // FIXME: If DiagDecl is an implicitly-declared special member function, we
+ // should be much more explicit about why it's not constexpr.
+ Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl)
+ << DiagDecl;
+ Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+}
+
+namespace {
+typedef SmallVector<APValue, 8> ArgVector;
+}
+
+/// EvaluateArgs - Evaluate the arguments to a function call.
+static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
+ EvalInfo &Info) {
+ bool Success = true;
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+ return Success;
+}
+
+/// Evaluate a function call.
+static bool HandleFunctionCall(SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ ArrayRef<const Expr*> Args, const Stmt *Body,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
+ return EvaluateStmt(Result, Info, Body) == ESR_Returned;
+}
+
+/// Evaluate a constructor call.
+static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
+ ArrayRef<const Expr*> Args,
+ const CXXConstructorDecl *Definition,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ const CXXRecordDecl *RD = Definition->getParent();
+ if (RD->getNumVBases()) {
+ Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
+
+ // If it's a delegating constructor, just delegate.
+ if (Definition->isDelegatingConstructor()) {
+ CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
+ return EvaluateInPlace(Result, Info, This, (*I)->getInit());
+ }
+
+ // For a trivial copy or move constructor, perform an APValue copy. This is
+ // essential for unions, where the operations performed by the constructor
+ // cannot be represented by ctor-initializers.
+ if (Definition->isDefaulted() &&
+ ((Definition->isCopyConstructor() && Definition->isTrivial()) ||
+ (Definition->isMoveConstructor() && Definition->isTrivial()))) {
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ArgValues[0]);
+ return HandleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
+ RHS, Result);
+ }
+
+ // Reserve space for the struct members.
+ if (!RD->isUnion() && Result.isUninit())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ bool Success = true;
+ unsigned BasesSeen = 0;
+#ifndef NDEBUG
+ CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
+#endif
+ for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(),
+ E = Definition->init_end(); I != E; ++I) {
+ LValue Subobject = This;
+ APValue *Value = &Result;
+
+ // Determine the subobject to initialize.
+ if ((*I)->isBaseInitializer()) {
+ QualType BaseType((*I)->getBaseClass(), 0);
+#ifndef NDEBUG
+ // Non-virtual base classes are initialized in the order in the class
+ // definition. We have already checked for virtual base classes.
+ assert(!BaseIt->isVirtual() && "virtual base for literal type");
+ assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
+ "base class initializers not in expected order");
+ ++BaseIt;
+#endif
+ HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+ BaseType->getAsCXXRecordDecl(), &Layout);
+ Value = &Result.getStructBase(BasesSeen++);
+ } else if (FieldDecl *FD = (*I)->getMember()) {
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout);
+ if (RD->isUnion()) {
+ Result = APValue(FD);
+ Value = &Result.getUnionValue();
+ } else {
+ Value = &Result.getStructField(FD->getFieldIndex());
+ }
+ } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) {
+ // Walk the indirect field decl's chain to find the object to initialize,
+ // and make sure we've initialized every step along it.
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ C != CE; ++C) {
+ FieldDecl *FD = cast<FieldDecl>(*C);
+ CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
+ // Switch the union field if it differs. This happens if we had
+ // preceding zero-initialization, and we're now initializing a union
+ // subobject other than the first.
+ // FIXME: In this case, the values of the other subobjects are
+ // specified, since zero-initialization sets all padding bits to zero.
+ if (Value->isUninit() ||
+ (Value->isUnion() && Value->getUnionField() != FD)) {
+ if (CD->isUnion())
+ *Value = APValue(FD);
+ else
+ *Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
+ std::distance(CD->field_begin(), CD->field_end()));
+ }
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD);
+ if (CD->isUnion())
+ Value = &Value->getUnionValue();
+ else
+ Value = &Value->getStructField(FD->getFieldIndex());
+ }
+ } else {
+ llvm_unreachable("unknown base initializer kind");
+ }
+
+ if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit(),
+ (*I)->isBaseInitializer()
+ ? CCEK_Constant : CCEK_MemberInit)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
}
namespace {
class HasSideEffect
: public ConstStmtVisitor<HasSideEffect, bool> {
- EvalInfo &Info;
+ const ASTContext &Ctx;
public:
- HasSideEffect(EvalInfo &info) : Info(info) {}
+ HasSideEffect(const ASTContext &C) : Ctx(C) {}
// Unhandled nodes conservatively default to having side effects.
bool VisitStmt(const Stmt *S) {
@@ -279,17 +2281,12 @@ public:
return Visit(E->getResultExpr());
}
bool VisitDeclRefExpr(const DeclRefExpr *E) {
- if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
return true;
return false;
}
bool VisitObjCIvarRefExpr(const ObjCIvarRefExpr *E) {
- if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
- return true;
- return false;
- }
- bool VisitBlockDeclRefExpr (const BlockDeclRefExpr *E) {
- if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
return true;
return false;
}
@@ -310,7 +2307,7 @@ public:
bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E)
{ return Visit(E->getLHS()) || Visit(E->getRHS()); }
bool VisitChooseExpr(const ChooseExpr *E)
- { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ { return Visit(E->getChosenSubExpr(Ctx)); }
bool VisitCastExpr(const CastExpr *E) { return Visit(E->getSubExpr()); }
bool VisitBinAssign(const BinaryOperator *E) { return true; }
bool VisitCompoundAssignOperator(const BinaryOperator *E) { return true; }
@@ -321,7 +2318,7 @@ public:
bool VisitUnaryPreDec(const UnaryOperator *E) { return true; }
bool VisitUnaryPostDec(const UnaryOperator *E) { return true; }
bool VisitUnaryDeref(const UnaryOperator *E) {
- if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
return true;
return Visit(E->getSubExpr());
}
@@ -349,16 +2346,18 @@ public:
: info(info), opaqueValue(opaqueValue) {
// If evaluation fails, fail immediately.
- if (!Evaluate(info, value)) {
+ if (!Evaluate(info.OpaqueValues[opaqueValue], info, value)) {
this->opaqueValue = 0;
return;
}
- info.OpaqueValues[opaqueValue] = info.EvalResult.Val;
}
bool hasError() const { return opaqueValue == 0; }
~OpaqueValueEvaluation() {
+ // FIXME: For a recursive constexpr call, an outer stack frame might have
+ // been using this opaque value too, and will now have to re-evaluate the
+ // source expression.
if (opaqueValue) info.OpaqueValues.erase(opaqueValue);
}
};
@@ -370,18 +2369,55 @@ public:
//===----------------------------------------------------------------------===//
namespace {
-template <class Derived, typename RetTy=void>
+// FIXME: RetTy is always bool. Remove it.
+template <class Derived, typename RetTy=bool>
class ExprEvaluatorBase
: public ConstStmtVisitor<Derived, RetTy> {
private:
RetTy DerivedSuccess(const APValue &V, const Expr *E) {
return static_cast<Derived*>(this)->Success(V, E);
}
- RetTy DerivedError(const Expr *E) {
- return static_cast<Derived*>(this)->Error(E);
+ RetTy DerivedZeroInitialization(const Expr *E) {
+ return static_cast<Derived*>(this)->ZeroInitialization(E);
}
- RetTy DerivedValueInitialization(const Expr *E) {
- return static_cast<Derived*>(this)->ValueInitialization(E);
+
+ // Check whether a conditional operator with a non-constant condition is a
+ // potential constant expression. If neither arm is a potential constant
+ // expression, then the conditional operator is not either.
+ template<typename ConditionalOperator>
+ void CheckPotentialConstantConditional(const ConditionalOperator *E) {
+ assert(Info.CheckingPotentialConstantExpression);
+
+ // Speculatively evaluate both arms.
+ {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diag;
+ SpeculativeEvaluationRAII Speculate(Info, &Diag);
+
+ StmtVisitorTy::Visit(E->getFalseExpr());
+ if (Diag.empty())
+ return;
+
+ Diag.clear();
+ StmtVisitorTy::Visit(E->getTrueExpr());
+ if (Diag.empty())
+ return;
+ }
+
+ Error(E, diag::note_constexpr_conditional_never_const);
+ }
+
+
+ template<typename ConditionalOperator>
+ bool HandleConditionalOperator(const ConditionalOperator *E) {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
+ if (Info.CheckingPotentialConstantExpression)
+ CheckPotentialConstantConditional(E);
+ return false;
+ }
+
+ Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+ return StmtVisitorTy::Visit(EvalExpr);
}
protected:
@@ -389,16 +2425,32 @@ protected:
typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy;
typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
- RetTy ValueInitialization(const Expr *E) { return DerivedError(E); }
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ RetTy ZeroInitialization(const Expr *E) { return Error(E); }
public:
ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
+ EvalInfo &getEvalInfo() { return Info; }
+
+ /// Report an evaluation error. This should only be called when an error is
+ /// first discovered. When propagating an error, just return false.
+ bool Error(const Expr *E, diag::kind D) {
+ Info.Diag(E, D);
+ return false;
+ }
+ bool Error(const Expr *E) {
+ return Error(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+
RetTy VisitStmt(const Stmt *) {
llvm_unreachable("Expression evaluator should not be called on stmts");
}
RetTy VisitExpr(const Expr *E) {
- return DerivedError(E);
+ return Error(E);
}
RetTy VisitParenExpr(const ParenExpr *E)
@@ -413,186 +2465,573 @@ public:
{ return StmtVisitorTy::Visit(E->getResultExpr()); }
RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
{ return StmtVisitorTy::Visit(E->getReplacement()); }
+ RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+ { return StmtVisitorTy::Visit(E->getExpr()); }
+ // We cannot create any objects for which cleanups are required, so there is
+ // nothing to do here; all cleanups must come from unevaluated subexpressions.
+ RetTy VisitExprWithCleanups(const ExprWithCleanups *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+
+ RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+ RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+
+ RetTy VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+
+ case BO_Comma:
+ VisitIgnoredValue(E->getLHS());
+ return StmtVisitorTy::Visit(E->getRHS());
+
+ case BO_PtrMemD:
+ case BO_PtrMemI: {
+ LValue Obj;
+ if (!HandleMemberPointerAccess(Info, E, Obj))
+ return false;
+ APValue Result;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+ }
+ }
RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+ // Cache the value of the common expression.
OpaqueValueEvaluation opaque(Info, E->getOpaqueValue(), E->getCommon());
if (opaque.hasError())
- return DerivedError(E);
-
- bool cond;
- if (!HandleConversionToBool(E->getCond(), cond, Info))
- return DerivedError(E);
+ return false;
- return StmtVisitorTy::Visit(cond ? E->getTrueExpr() : E->getFalseExpr());
+ return HandleConditionalOperator(E);
}
RetTy VisitConditionalOperator(const ConditionalOperator *E) {
- bool BoolResult;
- if (!HandleConversionToBool(E->getCond(), BoolResult, Info))
- return DerivedError(E);
+ bool IsBcpCall = false;
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // the result is a constant expression if it can be folded without
+ // side-effects. This is an important GNU extension. See GCC PR38377
+ // for discussion.
+ if (const CallExpr *CallCE =
+ dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ IsBcpCall = true;
+
+ // Always assume __builtin_constant_p(...) ? ... : ... is a potential
+ // constant expression; we can't check whether it's potentially foldable.
+ if (Info.CheckingPotentialConstantExpression && IsBcpCall)
+ return false;
- Expr* EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
- return StmtVisitorTy::Visit(EvalExpr);
+ FoldConstant Fold(Info);
+
+ if (!HandleConditionalOperator(E))
+ return false;
+
+ if (IsBcpCall)
+ Fold.Fold(Info);
+
+ return true;
}
RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
- const APValue *value = Info.getOpaqueValue(E);
- if (!value)
- return (E->getSourceExpr() ? StmtVisitorTy::Visit(E->getSourceExpr())
- : DerivedError(E));
- return DerivedSuccess(*value, E);
+ const APValue *Value = Info.getOpaqueValue(E);
+ if (!Value) {
+ const Expr *Source = E->getSourceExpr();
+ if (!Source)
+ return Error(E);
+ if (Source == E) { // sanity checking.
+ assert(0 && "OpaqueValueExpr recursively refers to itself");
+ return Error(E);
+ }
+ return StmtVisitorTy::Visit(Source);
+ }
+ return DerivedSuccess(*Value, E);
}
+ RetTy VisitCallExpr(const CallExpr *E) {
+ const Expr *Callee = E->getCallee()->IgnoreParens();
+ QualType CalleeType = Callee->getType();
+
+ const FunctionDecl *FD = 0;
+ LValue *This = 0, ThisVal;
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ bool HasQualifier = false;
+
+ // Extract function decl and 'this' pointer from the callee.
+ if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
+ const ValueDecl *Member = 0;
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
+ // Explicit bound member calls, such as x.f() or p->g();
+ if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
+ return false;
+ Member = ME->getMemberDecl();
+ This = &ThisVal;
+ HasQualifier = ME->hasQualifier();
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
+ // Indirect bound member calls ('.*' or '->*').
+ Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
+ if (!Member) return false;
+ This = &ThisVal;
+ } else
+ return Error(Callee);
+
+ FD = dyn_cast<FunctionDecl>(Member);
+ if (!FD)
+ return Error(Callee);
+ } else if (CalleeType->isFunctionPointerType()) {
+ LValue Call;
+ if (!EvaluatePointer(Callee, Call, Info))
+ return false;
+
+ if (!Call.getLValueOffset().isZero())
+ return Error(Callee);
+ FD = dyn_cast_or_null<FunctionDecl>(
+ Call.getLValueBase().dyn_cast<const ValueDecl*>());
+ if (!FD)
+ return Error(Callee);
+
+ // Overloaded operator calls to member functions are represented as normal
+ // calls with '*this' as the first argument.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && !MD->isStatic()) {
+ // FIXME: When selecting an implicit conversion for an overloaded
+ // operator delete, we sometimes try to evaluate calls to conversion
+ // operators without a 'this' parameter!
+ if (Args.empty())
+ return Error(E);
+
+ if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
+ return false;
+ This = &ThisVal;
+ Args = Args.slice(1);
+ }
+
+ // Don't call function pointers which have been cast to some other type.
+ if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
+ return Error(E);
+ } else
+ return Error(E);
+
+ if (This && !This->checkSubobject(Info, E, CSK_This))
+ return false;
+
+ // DR1358 allows virtual constexpr functions in some cases. Don't allow
+ // calls to such functions in constant expressions.
+ if (This && !HasQualifier &&
+ isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
+ return Error(E, diag::note_constexpr_virtual_call);
+
+ const FunctionDecl *Definition = 0;
+ Stmt *Body = FD->getBody(Definition);
+ APValue Result;
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body,
+ Info, Result))
+ return false;
+
+ return DerivedSuccess(Result, E);
+ }
+
+ RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return StmtVisitorTy::Visit(E->getInitializer());
+ }
RetTy VisitInitListExpr(const InitListExpr *E) {
- if (Info.getLangOpts().CPlusPlus0x) {
- if (E->getNumInits() == 0)
- return DerivedValueInitialization(E);
- if (E->getNumInits() == 1)
- return StmtVisitorTy::Visit(E->getInit(0));
- }
- return DerivedError(E);
+ if (E->getNumInits() == 0)
+ return DerivedZeroInitialization(E);
+ if (E->getNumInits() == 1)
+ return StmtVisitorTy::Visit(E->getInit(0));
+ return Error(E);
}
RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
- return DerivedValueInitialization(E);
+ return DerivedZeroInitialization(E);
}
RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
- return DerivedValueInitialization(E);
+ return DerivedZeroInitialization(E);
+ }
+ RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+
+ /// A member expression where the object is a prvalue is itself a prvalue.
+ RetTy VisitMemberExpr(const MemberExpr *E) {
+ assert(!E->isArrow() && "missing call to bound member function?");
+
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase()))
+ return false;
+
+ QualType BaseTy = E->getBase()->getType();
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
+ if (!FD) return Error(E);
+ assert(!FD->getType()->isReferenceType() && "prvalue reference?");
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+
+ SubobjectDesignator Designator(BaseTy);
+ Designator.addDeclUnchecked(FD);
+
+ return ExtractSubobject(Info, E, Val, BaseTy, Designator, E->getType()) &&
+ DerivedSuccess(Val, E);
+ }
+
+ RetTy VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return StmtVisitorTy::Visit(E->getSubExpr());
+
+ case CK_LValueToRValue: {
+ LValue LVal;
+ if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
+ return false;
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!HandleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
+ LVal, RVal))
+ return false;
+ return DerivedSuccess(RVal, E);
+ }
+ }
+
+ return Error(E);
}
+ /// Visit a value which is evaluated, but whose value is ignored.
+ void VisitIgnoredValue(const Expr *E) {
+ APValue Scratch;
+ if (!Evaluate(Scratch, Info, E))
+ Info.EvalStatus.HasSideEffects = true;
+ }
};
}
//===----------------------------------------------------------------------===//
-// LValue Evaluation
+// Common base class for lvalue and temporary evaluation.
//===----------------------------------------------------------------------===//
namespace {
-class LValueExprEvaluator
- : public ExprEvaluatorBase<LValueExprEvaluator, bool> {
+template<class Derived>
+class LValueExprEvaluatorBase
+ : public ExprEvaluatorBase<Derived, bool> {
+protected:
LValue &Result;
- const Decl *PrevDecl;
+ typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
+ typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy;
- bool Success(const Expr *E) {
- Result.Base = E;
- Result.Offset = CharUnits::Zero();
+ bool Success(APValue::LValueBase B) {
+ Result.set(B);
return true;
}
-public:
- LValueExprEvaluator(EvalInfo &info, LValue &Result) :
- ExprEvaluatorBaseTy(info), Result(Result), PrevDecl(0) {}
+public:
+ LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
+ ExprEvaluatorBaseTy(Info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
- Result.setFrom(V);
+ Result.setFrom(this->Info.Ctx, V);
return true;
}
- bool Error(const Expr *E) {
- return false;
+
+ bool VisitMemberExpr(const MemberExpr *E) {
+ // Handle non-static data members.
+ QualType BaseTy;
+ if (E->isArrow()) {
+ if (!EvaluatePointer(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
+ } else if (E->getBase()->isRValue()) {
+ assert(E->getBase()->getType()->isRecordType());
+ if (!EvaluateTemporary(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType();
+ } else {
+ if (!this->Visit(E->getBase()))
+ return false;
+ BaseTy = E->getBase()->getType();
+ }
+
+ const ValueDecl *MD = E->getMemberDecl();
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ (void)BaseTy;
+ HandleLValueMember(this->Info, E, Result, FD);
+ } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
+ HandleLValueIndirectMember(this->Info, E, Result, IFD);
+ } else
+ return this->Error(E);
+
+ if (MD->getType()->isReferenceType()) {
+ APValue RefValue;
+ if (!HandleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
+ RefValue))
+ return false;
+ return Success(RefValue, E);
+ }
+ return true;
}
-
+
+ bool VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return HandleMemberPointerAccess(this->Info, E, Result);
+ }
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ if (!this->Visit(E->getSubExpr()))
+ return false;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ QualType Type = E->getSubExpr()->getType();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(this->Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+
+ return true;
+ }
+ }
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Evaluation
+//
+// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
+// function designators (in C), decl references to void objects (in C), and
+// temporaries (if building with -Wno-address-of-temporary).
+//
+// LValue evaluation produces values comprising a base expression of one of the
+// following types:
+// - Declarations
+// * VarDecl
+// * FunctionDecl
+// - Literals
+// * CompoundLiteralExpr in C
+// * StringLiteral
+// * CXXTypeidExpr
+// * PredefinedExpr
+// * ObjCStringLiteralExpr
+// * ObjCEncodeExpr
+// * AddrLabelExpr
+// * BlockExpr
+// * CallExpr for a MakeStringConstant builtin
+// - Locals and temporaries
+// * Any Expr, with a CallIndex indicating the function in which the temporary
+// was evaluated.
+// plus an offset in bytes.
+//===----------------------------------------------------------------------===//
+namespace {
+class LValueExprEvaluator
+ : public LValueExprEvaluatorBase<LValueExprEvaluator> {
+public:
+ LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ bool VisitVarDecl(const Expr *E, const VarDecl *VD);
+
bool VisitDeclRefExpr(const DeclRefExpr *E);
bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
bool VisitMemberExpr(const MemberExpr *E);
bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
+ bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
bool VisitUnaryDeref(const UnaryOperator *E);
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
- return false;
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
- case CK_NoOp:
case CK_LValueBitCast:
- return Visit(E->getSubExpr());
+ this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ if (!Visit(E->getSubExpr()))
+ return false;
+ Result.Designator.setInvalid();
+ return true;
- // FIXME: Support CK_DerivedToBase and friends.
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleBaseToDerivedCast(Info, E, Result);
}
}
-
- // FIXME: Missing: __real__, __imag__
-
};
} // end anonymous namespace
+/// Evaluate an expression as an lvalue. This can be legitimately called on
+/// expressions which are not glvalues, in a few cases:
+/// * function designators in C,
+/// * "extern void" objects,
+/// * temporaries, if building with -Wno-address-of-temporary.
static bool EvaluateLValue(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert((E->isGLValue() || E->getType()->isFunctionType() ||
+ E->getType()->isVoidType() || isa<CXXTemporaryObjectExpr>(E)) &&
+ "can't evaluate expression as an lvalue");
return LValueExprEvaluator(Info, Result).Visit(E);
}
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
- if (isa<FunctionDecl>(E->getDecl())) {
- return Success(E);
- } else if (const VarDecl* VD = dyn_cast<VarDecl>(E->getDecl())) {
- if (!VD->getType()->isReferenceType())
- return Success(E);
- // Reference parameters can refer to anything even if they have an
- // "initializer" in the form of a default argument.
- if (!isa<ParmVarDecl>(VD)) {
- // FIXME: Check whether VD might be overridden!
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ return Success(FD);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ return VisitVarDecl(E, VD);
+ return Error(E);
+}
- // Check for recursive initializers of references.
- if (PrevDecl == VD)
- return Error(E);
- PrevDecl = VD;
- if (const Expr *Init = VD->getAnyInitializer())
- return Visit(Init);
+bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+ if (!VD->getType()->isReferenceType()) {
+ if (isa<ParmVarDecl>(VD)) {
+ Result.set(VD, Info.CurrentCall->Index);
+ return true;
}
+ return Success(VD);
}
- return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
+ APValue V;
+ if (!EvaluateVarDeclInit(Info, E, VD, Info.CurrentCall, V))
+ return false;
+ return Success(V, E);
+}
+
+bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ if (E->GetTemporaryExpr()->isRValue()) {
+ if (E->getType()->isRecordType())
+ return EvaluateTemporary(E->GetTemporaryExpr(), Result, Info);
+
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info,
+ Result, E->GetTemporaryExpr());
+ }
+
+ // Materialization of an lvalue temporary occurs when we need to force a copy
+ // (for instance, if it's a bitfield).
+ // FIXME: The AST should contain an lvalue-to-rvalue node for such cases.
+ if (!Visit(E->GetTemporaryExpr()))
+ return false;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Result,
+ Info.CurrentCall->Temporaries[E]))
+ return false;
+ Result.set(E, Info.CurrentCall->Index);
+ return true;
}
bool
LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ // Defer visiting the literal until the lvalue-to-rvalue conversion. We can
+ // only see this when folding in C, so there's no standard to follow here.
return Success(E);
}
-bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
- QualType Ty;
- if (E->isArrow()) {
- if (!EvaluatePointer(E->getBase(), Result, Info))
- return false;
- Ty = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
- } else {
- if (!Visit(E->getBase()))
- return false;
- Ty = E->getBase()->getType();
+bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ if (E->isTypeOperand())
+ return Success(E);
+ CXXRecordDecl *RD = E->getExprOperand()->getType()->getAsCXXRecordDecl();
+ if (RD && RD->isPolymorphic()) {
+ Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ return false;
}
+ return Success(E);
+}
- const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
- const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
-
- const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
- if (!FD) // FIXME: deal with other kinds of member expressions
- return false;
+bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
+ // Handle static data members.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return VisitVarDecl(E, VD);
+ }
- if (FD->getType()->isReferenceType())
- return false;
+ // Handle static member functions.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
+ if (MD->isStatic()) {
+ VisitIgnoredValue(E->getBase());
+ return Success(MD);
+ }
+ }
- unsigned i = FD->getFieldIndex();
- Result.Offset += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
- return true;
+ // Handle non-static data members.
+ return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
}
bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // FIXME: Deal with vectors as array subscript bases.
+ if (E->getBase()->getType()->isVectorType())
+ return Error(E);
+
if (!EvaluatePointer(E->getBase(), Result, Info))
return false;
APSInt Index;
if (!EvaluateInteger(E->getIdx(), Index, Info))
return false;
+ int64_t IndexValue
+ = Index.isSigned() ? Index.getSExtValue()
+ : static_cast<int64_t>(Index.getZExtValue());
- CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(E->getType());
- Result.Offset += Index.getSExtValue() * ElementSize;
- return true;
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), IndexValue);
}
bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
return EvaluatePointer(E->getSubExpr(), Result, Info);
}
+bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ // __real is a no-op on scalar lvalues.
+ if (E->getSubExpr()->getType()->isAnyComplexType())
+ HandleLValueComplexElement(Info, E, Result, E->getType(), false);
+ return true;
+}
+
+bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ assert(E->getSubExpr()->getType()->isAnyComplexType() &&
+ "lvalue __imag__ on scalar?");
+ if (!Visit(E->getSubExpr()))
+ return false;
+ HandleLValueComplexElement(Info, E, Result, E->getType(), true);
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Pointer Evaluation
//===----------------------------------------------------------------------===//
@@ -603,8 +3042,7 @@ class PointerExprEvaluator
LValue &Result;
bool Success(const Expr *E) {
- Result.Base = E;
- Result.Offset = CharUnits::Zero();
+ Result.set(E);
return true;
}
public:
@@ -613,13 +3051,10 @@ public:
: ExprEvaluatorBaseTy(info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
- Result.setFrom(V);
+ Result.setFrom(Info.Ctx, V);
return true;
}
- bool Error(const Stmt *S) {
- return false;
- }
- bool ValueInitialization(const Expr *E) {
+ bool ZeroInitialization(const Expr *E) {
return Success((Expr*)0);
}
@@ -628,71 +3063,64 @@ public:
bool VisitUnaryAddrOf(const UnaryOperator *E);
bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
{ return Success(E); }
+ bool VisitObjCNumericLiteral(const ObjCNumericLiteral *E)
+ { return Success(E); }
bool VisitAddrLabelExpr(const AddrLabelExpr *E)
{ return Success(E); }
bool VisitCallExpr(const CallExpr *E);
bool VisitBlockExpr(const BlockExpr *E) {
if (!E->getBlockDecl()->hasCaptures())
return Success(E);
- return false;
+ return Error(E);
+ }
+ bool VisitCXXThisExpr(const CXXThisExpr *E) {
+ if (!Info.CurrentCall->This)
+ return Error(E);
+ Result = *Info.CurrentCall->This;
+ return true;
}
- bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E)
- { return ValueInitialization(E); }
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
- assert(E->getType()->hasPointerRepresentation());
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
return PointerExprEvaluator(Info, Result).Visit(E);
}
bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() != BO_Add &&
E->getOpcode() != BO_Sub)
- return false;
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
const Expr *PExp = E->getLHS();
const Expr *IExp = E->getRHS();
if (IExp->getType()->isPointerType())
std::swap(PExp, IExp);
- if (!EvaluatePointer(PExp, Result, Info))
+ bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
+ if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure())
return false;
llvm::APSInt Offset;
- if (!EvaluateInteger(IExp, Offset, Info))
+ if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
return false;
int64_t AdditionalOffset
= Offset.isSigned() ? Offset.getSExtValue()
: static_cast<int64_t>(Offset.getZExtValue());
+ if (E->getOpcode() == BO_Sub)
+ AdditionalOffset = -AdditionalOffset;
- // Compute the new offset in the appropriate width.
-
- QualType PointeeType =
- PExp->getType()->getAs<PointerType>()->getPointeeType();
- CharUnits SizeOfPointee;
-
- // Explicitly handle GNU void* and function pointer arithmetic extensions.
- if (PointeeType->isVoidType() || PointeeType->isFunctionType())
- SizeOfPointee = CharUnits::One();
- else
- SizeOfPointee = Info.Ctx.getTypeSizeInChars(PointeeType);
-
- if (E->getOpcode() == BO_Add)
- Result.Offset += AdditionalOffset * SizeOfPointee;
- else
- Result.Offset -= AdditionalOffset * SizeOfPointee;
-
- return true;
+ QualType Pointee = PExp->getType()->getAs<PointerType>()->getPointeeType();
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
+ AdditionalOffset);
}
bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return EvaluateLValue(E->getSubExpr(), Result, Info);
}
-
bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
const Expr* SubExpr = E->getSubExpr();
@@ -700,260 +3128,654 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
default:
break;
- case CK_NoOp:
case CK_BitCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
- return Visit(SubExpr);
+ if (!Visit(SubExpr))
+ return false;
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ if (!E->getType()->isVoidPointerType()) {
+ Result.Designator.setInvalid();
+ if (SubExpr->getType()->isVoidPointerType())
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ }
+ return true;
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase: {
- LValue BaseLV;
- if (!EvaluatePointer(E->getSubExpr(), BaseLV, Info))
+ if (!EvaluatePointer(E->getSubExpr(), Result, Info))
return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
- // Now figure out the necessary offset to add to the baseLV to get from
+ // Now figure out the necessary offset to add to the base LV to get from
// the derived class to the base class.
- CharUnits Offset = CharUnits::Zero();
+ QualType Type =
+ E->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
- QualType Ty = E->getSubExpr()->getType();
- const CXXRecordDecl *DerivedDecl =
- Ty->getAs<PointerType>()->getPointeeType()->getAsCXXRecordDecl();
-
- for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
PathE = E->path_end(); PathI != PathE; ++PathI) {
- const CXXBaseSpecifier *Base = *PathI;
-
- // FIXME: If the base is virtual, we'd need to determine the type of the
- // most derived class and we don't support that right now.
- if (Base->isVirtual())
+ if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
return false;
-
- const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
- const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
-
- Offset += Layout.getBaseClassOffset(BaseDecl);
- DerivedDecl = BaseDecl;
+ Type = (*PathI)->getType();
}
- Result.Base = BaseLV.getLValueBase();
- Result.Offset = BaseLV.getLValueOffset() + Offset;
return true;
}
- case CK_NullToPointer: {
- Result.Base = 0;
- Result.Offset = CharUnits::Zero();
- return true;
- }
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+ return HandleBaseToDerivedCast(Info, E, Result);
+
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
case CK_IntegralToPointer: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
if (Value.isInt()) {
- Value.getInt() = Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
- Result.Base = 0;
- Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
+ unsigned Size = Info.Ctx.getTypeSize(E->getType());
+ uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
+ Result.Base = (Expr*)0;
+ Result.Offset = CharUnits::fromQuantity(N);
+ Result.CallIndex = 0;
+ Result.Designator.setInvalid();
return true;
} else {
// Cast is of an lvalue, no need to change value.
- Result.Base = Value.getLValueBase();
- Result.Offset = Value.getLValueOffset();
+ Result.setFrom(Info.Ctx, Value);
return true;
}
}
case CK_ArrayToPointerDecay:
+ if (SubExpr->isGLValue()) {
+ if (!EvaluateLValue(SubExpr, Result, Info))
+ return false;
+ } else {
+ Result.set(SubExpr, Info.CurrentCall->Index);
+ if (!EvaluateInPlace(Info.CurrentCall->Temporaries[SubExpr],
+ Info, Result, SubExpr))
+ return false;
+ }
+ // The result is a pointer to the first element of the array.
+ if (const ConstantArrayType *CAT
+ = Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
+ Result.addArray(Info, E, CAT);
+ else
+ Result.Designator.setInvalid();
+ return true;
+
case CK_FunctionToPointerDecay:
return EvaluateLValue(SubExpr, Result, Info);
}
- return false;
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
- if (E->isBuiltinCall(Info.Ctx) ==
- Builtin::BI__builtin___CFStringMakeConstantString ||
- E->isBuiltinCall(Info.Ctx) ==
- Builtin::BI__builtin___NSStringMakeConstantString)
+ if (IsStringLiteralCall(E))
return Success(E);
return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
//===----------------------------------------------------------------------===//
+// Member Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MemberPointerExprEvaluator
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> {
+ MemberPtr &Result;
+
+ bool Success(const ValueDecl *D) {
+ Result = MemberPtr(D);
+ return true;
+ }
+public:
+
+ MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((const ValueDecl*)0);
+ }
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isMemberPointerType());
+ return MemberPointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_NullToMemberPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_BaseToDerivedMemberPointer: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (E->path_empty())
+ return true;
+ // Base-to-derived member pointer casts store the path in derived-to-base
+ // order, so iterate backwards. The CXXBaseSpecifier also provides us with
+ // the wrong end of the derived->base arc, so stagger the path by one class.
+ typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
+ for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
+ PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToDerived(Derived))
+ return Error(E);
+ }
+ const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
+ if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
+ return Error(E);
+ return true;
+ }
+
+ case CK_DerivedToBaseMemberPointer:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToBase(Base))
+ return Error(E);
+ }
+ return true;
+ }
+}
+
+bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Record Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class RecordExprEvaluator
+ : public ExprEvaluatorBase<RecordExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+}
+
+/// Perform zero-initialization on an object of non-union class type.
+/// C++11 [dcl.init]p5:
+/// To zero-initialize an object or reference of type T means:
+/// [...]
+/// -- if T is a (possibly cv-qualified) non-union class type,
+/// each non-static data member and each base-class subobject is
+/// zero-initialized
+static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
+ const RecordDecl *RD,
+ const LValue &This, APValue &Result) {
+ assert(!RD->isUnion() && "Expected non-union class type");
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (CD) {
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++Index) {
+ const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ LValue Subobject = This;
+ HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout);
+ if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
+ Result.getStructBase(Index)))
+ return false;
+ }
+ }
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
+ I != End; ++I) {
+ // -- if T is a reference type, no initialization is performed.
+ if ((*I)->getType()->isReferenceType())
+ continue;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I, &Layout);
+
+ ImplicitValueInitExpr VIE((*I)->getType());
+ if (!EvaluateInPlace(
+ Result.getStructField((*I)->getFieldIndex()), Info, Subobject, &VIE))
+ return false;
+ }
+
+ return true;
+}
+
+bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
+ // object's first non-static named data member is zero-initialized
+ RecordDecl::field_iterator I = RD->field_begin();
+ if (I == RD->field_end()) {
+ Result = APValue((const FieldDecl*)0);
+ return true;
+ }
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I);
+ Result = APValue(*I);
+ ImplicitValueInitExpr VIE((*I)->getType());
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
+ }
+
+ if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
+ Info.Diag(E, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ return HandleClassZeroInitialization(Info, E, RD, This, Result);
+}
+
+bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return Visit(E->getSubExpr());
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ APValue DerivedObject;
+ if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
+ return false;
+ if (!DerivedObject.isStruct())
+ return Error(E->getSubExpr());
+
+ // Derived-to-base rvalue conversion: just slice off the derived part.
+ APValue *Value = &DerivedObject;
+ const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ Value = &Value->getStructBase(getBaseIndex(RD, Base));
+ RD = Base;
+ }
+ Result = *Value;
+ return true;
+ }
+ }
+}
+
+bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ // Cannot constant-evaluate std::initializer_list inits.
+ if (E->initializesStdInitializerList())
+ return false;
+
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (RD->isUnion()) {
+ const FieldDecl *Field = E->getInitializedFieldInUnion();
+ Result = APValue(Field);
+ if (!Field)
+ return true;
+
+ // If the initializer list for a union does not contain any elements, the
+ // first element of the union is value-initialized.
+ ImplicitValueInitExpr VIE(Field->getType());
+ const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout);
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
+ }
+
+ assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) &&
+ "initializer list for class with base classes");
+ Result = APValue(APValue::UninitStruct(), 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+ unsigned ElementNo = 0;
+ bool Success = true;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+ // Anonymous bit-fields are not considered members of the class for
+ // purposes of aggregate initialization.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ LValue Subobject = This;
+
+ bool HaveInit = ElementNo < E->getNumInits();
+
+ // FIXME: Diagnostics here should point to the end of the initializer
+ // list, not the start.
+ HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, Subobject,
+ *Field, &Layout);
+
+ // Perform an implicit value-initialization for members beyond the end of
+ // the initializer list.
+ ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
+
+ if (!EvaluateInPlace(
+ Result.getStructField((*Field)->getFieldIndex()),
+ Info, Subobject, HaveInit ? E->getInit(ElementNo++) : &VIE)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const CXXConstructorDecl *FD = E->getConstructor();
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ // If we've already performed zero-initialization, we're already done.
+ if (!Result.isUninit())
+ return true;
+
+ if (ZeroInit)
+ return ZeroInitialization(E);
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result = APValue((FieldDecl*)0);
+ else
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // Avoid materializing a temporary for an elidable copy/move constructor.
+ if (E->isElidable() && !ZeroInit)
+ if (const MaterializeTemporaryExpr *ME
+ = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
+ return Visit(ME->GetTemporaryExpr());
+
+ if (ZeroInit && !ZeroInitialization(E))
+ return false;
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), This, Args,
+ cast<CXXConstructorDecl>(Definition), Info,
+ Result);
+}
+
+static bool EvaluateRecord(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType() &&
+ "can't evaluate expression as a record rvalue");
+ return RecordExprEvaluator(Info, This, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Temporary Evaluation
+//
+// Temporaries are represented in the AST as rvalues, but generally behave like
+// lvalues. The full-object of which the temporary is a subobject is implicitly
+// materialized so that a reference can bind to it.
+//===----------------------------------------------------------------------===//
+namespace {
+class TemporaryExprEvaluator
+ : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
+public:
+ TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ /// Visit an expression which constructs the value of this temporary.
+ bool VisitConstructExpr(const Expr *E) {
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info, Result, E);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return VisitConstructExpr(E->getSubExpr());
+ }
+ }
+ bool VisitInitListExpr(const InitListExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCallExpr(const CallExpr *E) {
+ return VisitConstructExpr(E);
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression of record type as a temporary.
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType());
+ return TemporaryExprEvaluator(Info, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
// Vector Evaluation
//===----------------------------------------------------------------------===//
namespace {
class VectorExprEvaluator
- : public ExprEvaluatorBase<VectorExprEvaluator, APValue> {
- APValue GetZeroVector(QualType VecType);
+ : public ExprEvaluatorBase<VectorExprEvaluator, bool> {
+ APValue &Result;
public:
- VectorExprEvaluator(EvalInfo &info) : ExprEvaluatorBaseTy(info) {}
+ VectorExprEvaluator(EvalInfo &info, APValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
- APValue Success(const APValue &V, const Expr *E) { return V; }
- APValue Error(const Expr *E) { return APValue(); }
- APValue ValueInitialization(const Expr *E)
- { return GetZeroVector(E->getType()); }
+ bool Success(const ArrayRef<APValue> &V, const Expr *E) {
+ assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
+ // FIXME: remove this APValue copy.
+ Result = APValue(V.data(), V.size());
+ return true;
+ }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isVector());
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
- APValue VisitUnaryReal(const UnaryOperator *E)
+ bool VisitUnaryReal(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
- APValue VisitCastExpr(const CastExpr* E);
- APValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
- APValue VisitInitListExpr(const InitListExpr *E);
- APValue VisitUnaryImag(const UnaryOperator *E);
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
// FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
// binary comparisons, binary and/or/xor,
// shufflevector, ExtVectorElementExpr
- // (Note that these require implementing conversions
- // between vector types.)
};
} // end anonymous namespace
static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
- if (!E->getType()->isVectorType())
- return false;
- Result = VectorExprEvaluator(Info).Visit(E);
- return !Result.isUninit();
+ assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
+ return VectorExprEvaluator(Info, Result).Visit(E);
}
-APValue VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
- const VectorType *VTy = E->getType()->getAs<VectorType>();
- QualType EltTy = VTy->getElementType();
+bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const VectorType *VTy = E->getType()->castAs<VectorType>();
unsigned NElts = VTy->getNumElements();
- unsigned EltWidth = Info.Ctx.getTypeSize(EltTy);
- const Expr* SE = E->getSubExpr();
+ const Expr *SE = E->getSubExpr();
QualType SETy = SE->getType();
switch (E->getCastKind()) {
case CK_VectorSplat: {
- APValue Result = APValue();
+ APValue Val = APValue();
if (SETy->isIntegerType()) {
APSInt IntResult;
if (!EvaluateInteger(SE, IntResult, Info))
- return APValue();
- Result = APValue(IntResult);
+ return false;
+ Val = APValue(IntResult);
} else if (SETy->isRealFloatingType()) {
APFloat F(0.0);
if (!EvaluateFloat(SE, F, Info))
- return APValue();
- Result = APValue(F);
+ return false;
+ Val = APValue(F);
} else {
- return APValue();
+ return Error(E);
}
// Splat and create vector APValue.
- SmallVector<APValue, 4> Elts(NElts, Result);
- return APValue(&Elts[0], Elts.size());
+ SmallVector<APValue, 4> Elts(NElts, Val);
+ return Success(Elts, E);
}
case CK_BitCast: {
- if (SETy->isVectorType())
- return Visit(SE);
-
- if (!SETy->isIntegerType())
- return APValue();
-
- APSInt Init;
- if (!EvaluateInteger(SE, Init, Info))
- return APValue();
-
- assert((EltTy->isIntegerType() || EltTy->isRealFloatingType()) &&
- "Vectors must be composed of ints or floats");
-
+ // Evaluate the operand into an APInt we can extract from.
+ llvm::APInt SValInt;
+ if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
+ return false;
+ // Extract the elements
+ QualType EltTy = VTy->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
SmallVector<APValue, 4> Elts;
- for (unsigned i = 0; i != NElts; ++i) {
- APSInt Tmp = Init.extOrTrunc(EltWidth);
-
- if (EltTy->isIntegerType())
- Elts.push_back(APValue(Tmp));
- else
- Elts.push_back(APValue(APFloat(Tmp)));
-
- Init >>= EltWidth;
+ if (EltTy->isRealFloatingType()) {
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
+ bool isIEESem = &Sem != &APFloat::PPCDoubleDouble;
+ unsigned FloatEltSize = EltSize;
+ if (&Sem == &APFloat::x87DoubleExtended)
+ FloatEltSize = 80;
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
+ Elts.push_back(APValue(APFloat(Elt, isIEESem)));
+ }
+ } else if (EltTy->isIntegerType()) {
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
+ Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
+ }
+ } else {
+ return Error(E);
}
- return APValue(&Elts[0], Elts.size());
+ return Success(Elts, E);
}
- case CK_LValueToRValue:
- case CK_NoOp:
- return Visit(SE);
default:
- return APValue();
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
}
-APValue
-VectorExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
- return this->Visit(E->getInitializer());
-}
-
-APValue
+bool
VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
- const VectorType *VT = E->getType()->getAs<VectorType>();
+ const VectorType *VT = E->getType()->castAs<VectorType>();
unsigned NumInits = E->getNumInits();
unsigned NumElements = VT->getNumElements();
QualType EltTy = VT->getElementType();
SmallVector<APValue, 4> Elements;
- // If a vector is initialized with a single element, that value
- // becomes every element of the vector, not just the first.
- // This is the behavior described in the IBM AltiVec documentation.
- if (NumInits == 1) {
-
- // Handle the case where the vector is initialized by a another
- // vector (OpenCL 6.1.6).
- if (E->getInit(0)->getType()->isVectorType())
- return this->Visit(const_cast<Expr*>(E->getInit(0)));
-
- APValue InitValue;
- if (EltTy->isIntegerType()) {
+ // The number of initializers can be less than the number of
+ // vector elements. For OpenCL, this can be due to nested vector
+ // initialization. For GCC compatibility, missing trailing elements
+ // should be initialized with zeroes.
+ unsigned CountInits = 0, CountElts = 0;
+ while (CountElts < NumElements) {
+ // Handle nested vector initialization.
+ if (CountInits < NumInits
+ && E->getInit(CountInits)->getType()->isExtVectorType()) {
+ APValue v;
+ if (!EvaluateVector(E->getInit(CountInits), v, Info))
+ return Error(E);
+ unsigned vlen = v.getVectorLength();
+ for (unsigned j = 0; j < vlen; j++)
+ Elements.push_back(v.getVectorElt(j));
+ CountElts += vlen;
+ } else if (EltTy->isIntegerType()) {
llvm::APSInt sInt(32);
- if (!EvaluateInteger(E->getInit(0), sInt, Info))
- return APValue();
- InitValue = APValue(sInt);
+ if (CountInits < NumInits) {
+ if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
+ return false;
+ } else // trailing integer zero.
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ Elements.push_back(APValue(sInt));
+ CountElts++;
} else {
llvm::APFloat f(0.0);
- if (!EvaluateFloat(E->getInit(0), f, Info))
- return APValue();
- InitValue = APValue(f);
- }
- for (unsigned i = 0; i < NumElements; i++) {
- Elements.push_back(InitValue);
- }
- } else {
- for (unsigned i = 0; i < NumElements; i++) {
- if (EltTy->isIntegerType()) {
- llvm::APSInt sInt(32);
- if (i < NumInits) {
- if (!EvaluateInteger(E->getInit(i), sInt, Info))
- return APValue();
- } else {
- sInt = Info.Ctx.MakeIntValue(0, EltTy);
- }
- Elements.push_back(APValue(sInt));
- } else {
- llvm::APFloat f(0.0);
- if (i < NumInits) {
- if (!EvaluateFloat(E->getInit(i), f, Info))
- return APValue();
- } else {
- f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
- }
- Elements.push_back(APValue(f));
- }
+ if (CountInits < NumInits) {
+ if (!EvaluateFloat(E->getInit(CountInits), f, Info))
+ return false;
+ } else // trailing float zero.
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ Elements.push_back(APValue(f));
+ CountElts++;
}
+ CountInits++;
}
- return APValue(&Elements[0], Elements.size());
+ return Success(Elements, E);
}
-APValue
-VectorExprEvaluator::GetZeroVector(QualType T) {
- const VectorType *VT = T->getAs<VectorType>();
+bool
+VectorExprEvaluator::ZeroInitialization(const Expr *E) {
+ const VectorType *VT = E->getType()->getAs<VectorType>();
QualType EltTy = VT->getElementType();
APValue ZeroElement;
if (EltTy->isIntegerType())
@@ -963,17 +3785,175 @@ VectorExprEvaluator::GetZeroVector(QualType T) {
APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
- return APValue(&Elements[0], Elements.size());
+ return Success(Elements, E);
+}
+
+bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Array Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ArrayExprEvaluator
+ : public ExprEvaluatorBase<ArrayExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ assert((V.isArray() || V.isLValue()) &&
+ "expected array or string literal");
+ Result = V;
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ Result = APValue(APValue::UninitArray(), 0,
+ CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller()) return true;
+
+ // Zero-initialize all elements.
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+} // end anonymous namespace
+
+static bool EvaluateArray(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
+ return ArrayExprEvaluator(Info, This, Result).Visit(E);
}
-APValue VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
- if (!E->getSubExpr()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
- return GetZeroVector(E->getType());
+bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
+ // an appropriately-typed string literal enclosed in braces.
+ if (E->getNumInits() == 1 && E->getInit(0)->isGLValue() &&
+ Info.Ctx.hasSameUnqualifiedType(E->getType(), E->getInit(0)->getType())) {
+ LValue LV;
+ if (!EvaluateLValue(E->getInit(0), LV, Info))
+ return false;
+ APValue Val;
+ LV.moveInto(Val);
+ return Success(Val, E);
+ }
+
+ bool Success = true;
+
+ Result = APValue(APValue::UninitArray(), E->getNumInits(),
+ CAT->getSize().getZExtValue());
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ unsigned Index = 0;
+ for (InitListExpr::const_iterator I = E->begin(), End = E->end();
+ I != End; ++I, ++Index) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
+ Info, Subobject, cast<Expr>(*I)) ||
+ !HandleLValueArrayAdjustment(Info, cast<Expr>(*I), Subobject,
+ CAT->getElementType(), 1)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ if (!Result.hasArrayFiller()) return Success;
+ assert(E->hasArrayFiller() && "no array filler for incomplete init list");
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10] = {};
+ return EvaluateInPlace(Result.getArrayFiller(), Info,
+ Subobject, E->getArrayFiller()) && Success;
+}
+
+bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ bool HadZeroInit = !Result.isUninit();
+ if (!HadZeroInit)
+ Result = APValue(APValue::UninitArray(), 0, CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller())
+ return true;
+
+ const CXXConstructorDecl *FD = E->getConstructor();
+
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ if (HadZeroInit)
+ return true;
+
+ if (ZeroInit) {
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result.getArrayFiller() = APValue((FieldDecl*)0);
+ else
+ Result.getArrayFiller() =
+ APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10];
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+
+ if (ZeroInit && !HadZeroInit) {
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ if (!EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE))
+ return false;
+ }
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
+ cast<CXXConstructorDecl>(Definition),
+ Info, Result.getArrayFiller());
}
//===----------------------------------------------------------------------===//
// Integer Evaluation
+//
+// As a GNU extension, we support casting pointers to sufficiently-wide integer
+// types and back in constant folding. Integer values are thus represented
+// either as an integer-valued APValue, or as an lvalue-valued APValue.
//===----------------------------------------------------------------------===//
namespace {
@@ -984,7 +3964,7 @@ public:
IntExprEvaluator(EvalInfo &info, APValue &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
- bool Success(const llvm::APSInt &SI, const Expr *E) {
+ bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
@@ -994,8 +3974,11 @@ public:
Result = APValue(SI);
return true;
}
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ return Success(SI, E, Result);
+ }
- bool Success(const llvm::APInt &I, const Expr *E) {
+ bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
@@ -1005,37 +3988,33 @@ public:
E->getType()->isUnsignedIntegerOrEnumerationType());
return true;
}
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ return Success(I, E, Result);
+ }
- bool Success(uint64_t Value, const Expr *E) {
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
}
+ bool Success(uint64_t Value, const Expr *E) {
+ return Success(Value, E, Result);
+ }
bool Success(CharUnits Size, const Expr *E) {
return Success(Size.getQuantity(), E);
}
-
- bool Error(SourceLocation L, diag::kind D, const Expr *E) {
- // Take the first error.
- if (Info.EvalResult.Diag == 0) {
- Info.EvalResult.DiagLoc = L;
- Info.EvalResult.Diag = D;
- Info.EvalResult.DiagExpr = E;
- }
- return false;
- }
-
bool Success(const APValue &V, const Expr *E) {
+ if (V.isLValue() || V.isAddrLabelDiff()) {
+ Result = V;
+ return true;
+ }
return Success(V.getInt(), E);
}
- bool Error(const Expr *E) {
- return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
- }
- bool ValueInitialization(const Expr *E) { return Success(0, E); }
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
//===--------------------------------------------------------------------===//
// Visitor Methods
@@ -1057,8 +4036,7 @@ public:
}
bool VisitMemberExpr(const MemberExpr *E) {
if (CheckReferencedDecl(E, E->getMemberDecl())) {
- // Conservatively assume a MemberExpr will have side-effects
- Info.EvalResult.HasSideEffects = true;
+ VisitIgnoredValue(E->getBase());
return true;
}
@@ -1077,9 +4055,13 @@ public:
return Success(E->getValue(), E);
}
+ bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
// Note, GNU defines __null as an integer, not a pointer.
bool VisitGNUNullExpr(const GNUNullExpr *E) {
- return ValueInitialization(E);
+ return ZeroInitialization(E);
}
bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
@@ -1090,6 +4072,10 @@ public:
return Success(E->getValue(), E);
}
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
return Success(E->getValue(), E);
}
@@ -1107,27 +4093,43 @@ public:
private:
CharUnits GetAlignOfExpr(const Expr *E);
CharUnits GetAlignOfType(QualType T);
- static QualType GetObjectType(const Expr *E);
+ static QualType GetObjectType(APValue::LValueBase B);
bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
-static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralOrEnumerationType());
+/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
+/// produce either the integer value or a pointer.
+///
+/// GCC has a heinous extension which folds casts between pointer types and
+/// pointer-sized integral types. We support this by allowing the evaluation of
+/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
+/// Some simple arithmetic on such values is supported (they are treated much
+/// like char*).
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(E);
}
-static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralOrEnumerationType());
-
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
APValue Val;
- if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt())
+ if (!EvaluateIntegerOrLValue(E, Val, Info))
return false;
+ if (!Val.isInt()) {
+ // FIXME: It would be better to produce the diagnostic for casting
+ // a pointer to an integer.
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
Result = Val.getInt();
return true;
}
+/// Check whether the given declaration can be directly converted to an integral
+/// rvalue. If not, no diagnostic is produced; there are other things we can
+/// try.
bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
// Enums are integer constant exprs.
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
@@ -1149,43 +4151,6 @@ bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
return Success(Val, E);
}
}
-
- // In C++, const, non-volatile integers initialized with ICEs are ICEs.
- // In C, they can also be folded, although they are not ICEs.
- if (Info.Ctx.getCanonicalType(E->getType()).getCVRQualifiers()
- == Qualifiers::Const) {
-
- if (isa<ParmVarDecl>(D))
- return false;
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (const Expr *Init = VD->getAnyInitializer()) {
- if (APValue *V = VD->getEvaluatedValue()) {
- if (V->isInt())
- return Success(V->getInt(), E);
- return false;
- }
-
- if (VD->isEvaluatingValue())
- return false;
-
- VD->setEvaluatingValue();
-
- Expr::EvalResult EResult;
- if (Init->Evaluate(EResult, Info.Ctx) && !EResult.HasSideEffects &&
- EResult.Val.isInt()) {
- // Cache the evaluated value in the variable declaration.
- Result = EResult.Val;
- VD->setEvaluatedValue(Result);
- return true;
- }
-
- VD->setEvaluatedValue(APValue());
- }
- }
- }
-
- // Otherwise, random variable references are not constants.
return false;
}
@@ -1242,17 +4207,72 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E) {
return union_type_class;
else // FIXME: offset_type_class, method_type_class, & lang_type_class?
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
- return -1;
+}
+
+/// EvaluateBuiltinConstantPForLValue - Determine the result of
+/// __builtin_constant_p when applied to the given lvalue.
+///
+/// An lvalue is only "constant" if it is a pointer or reference to the first
+/// character of a string literal.
+template<typename LValue>
+static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
+ const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
+ return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
+}
+
+/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
+/// GCC as we can manage.
+static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
+ QualType ArgType = Arg->getType();
+
+ // __builtin_constant_p always has one operand. The rules which gcc follows
+ // are not precisely documented, but are as follows:
+ //
+ // - If the operand is of integral, floating, complex or enumeration type,
+ // and can be folded to a known value of that type, it returns 1.
+ // - If the operand and can be folded to a pointer to the first character
+ // of a string literal (or such a pointer cast to an integral type), it
+ // returns 1.
+ //
+ // Otherwise, it returns 0.
+ //
+ // FIXME: GCC also intends to return 1 for literals of aggregate types, but
+ // its support for this does not currently work.
+ if (ArgType->isIntegralOrEnumerationType()) {
+ Expr::EvalResult Result;
+ if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
+ return false;
+
+ APValue &V = Result.Val;
+ if (V.getKind() == APValue::Int)
+ return true;
+
+ return EvaluateBuiltinConstantPForLValue(V);
+ } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
+ return Arg->isEvaluatable(Ctx);
+ } else if (ArgType->isPointerType() || Arg->isGLValue()) {
+ LValue LV;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status);
+ if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
+ : EvaluatePointer(Arg, LV, Info)) &&
+ !Status.HasSideEffects)
+ return EvaluateBuiltinConstantPForLValue(LV);
+ }
+
+ // Anything else isn't considered to be sufficiently constant.
+ return false;
}
/// Retrieves the "underlying object type" of the given expression,
/// as used by __builtin_object_size.
-QualType IntExprEvaluator::GetObjectType(const Expr *E) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->getType();
- } else if (isa<CompoundLiteralExpr>(E)) {
- return E->getType();
+ } else if (const Expr *E = B.get<const Expr*>()) {
+ if (isa<CompoundLiteralExpr>(E))
+ return E->getType();
}
return QualType();
@@ -1265,16 +4285,15 @@ bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
return false;
// If we can prove the base is null, lower to zero now.
- const Expr *LVBase = Base.getLValueBase();
- if (!LVBase) return Success(0, E);
+ if (!Base.getLValueBase()) return Success(0, E);
- QualType T = GetObjectType(LVBase);
+ QualType T = GetObjectType(Base.getLValueBase());
if (T.isNull() ||
T->isIncompleteType() ||
T->isFunctionType() ||
T->isVariablyModifiedType() ||
T->isDependentType())
- return false;
+ return Error(E);
CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
CharUnits Offset = Base.getLValueOffset();
@@ -1287,7 +4306,7 @@ bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (E->isBuiltinCall(Info.Ctx)) {
+ switch (unsigned BuiltinOp = E->isBuiltinCall()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -1303,17 +4322,15 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(0, E);
}
- return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
+ return Error(E);
}
case Builtin::BI__builtin_classify_type:
return Success(EvaluateBuiltinClassifyType(E), E);
case Builtin::BI__builtin_constant_p:
- // __builtin_constant_p always has one operand: it returns true if that
- // operand can be folded, false otherwise.
- return Success(E->getArg(0)->isEvaluatable(Info.Ctx), E);
-
+ return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+
case Builtin::BI__builtin_eh_return_data_regno: {
int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
@@ -1322,8 +4339,15 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_expect:
return Visit(E->getArg(0));
-
+
case Builtin::BIstrlen:
+ // A call to strlen is not a constant expression.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
case Builtin::BI__builtin_strlen:
// As an extension, we support strlen() and __builtin_strlen() as constant
// expressions when the argument is a string literal.
@@ -1339,57 +4363,482 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(Str.size(), E);
}
- return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
+ return Error(E);
+
+ case Builtin::BI__atomic_always_lock_free:
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__c11_atomic_is_lock_free: {
+ APSInt SizeVal;
+ if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
+ return false;
+
+ // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+ // of two less than the maximum inline atomic width, we know it is
+ // lock-free. If the size isn't a power of two, or greater than the
+ // maximum alignment where we promote atomics, we know it is not lock-free
+ // (at least not in the sense of atomic_is_lock_free). Otherwise,
+ // the answer can only be determined at runtime; for example, 16-byte
+ // atomics have lock-free implementations on some, but not all,
+ // x86-64 processors.
+
+ // Check power-of-two.
+ CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
+
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
+
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
+ }
}
}
-bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() == BO_Comma) {
- if (!Visit(E->getRHS()))
+static bool HasSameBase(const LValue &A, const LValue &B) {
+ if (!A.getLValueBase())
+ return !B.getLValueBase();
+ if (!B.getLValueBase())
+ return false;
+
+ if (A.getLValueBase().getOpaqueValue() !=
+ B.getLValueBase().getOpaqueValue()) {
+ const Decl *ADecl = GetLValueBaseDecl(A);
+ if (!ADecl)
+ return false;
+ const Decl *BDecl = GetLValueBaseDecl(B);
+ if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
return false;
+ }
- // If we can't evaluate the LHS, it might have side effects;
- // conservatively mark it.
- if (!E->getLHS()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
+ return IsGlobalLValue(A.getLValueBase()) ||
+ A.getLValueCallIndex() == B.getLValueCallIndex();
+}
+/// Perform the given integer operation, which is known to need at most BitWidth
+/// bits, and check for overflow in the original type (if that type was not an
+/// unsigned type).
+template<typename Operation>
+static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &LHS, const APSInt &RHS,
+ unsigned BitWidth, Operation Op) {
+ if (LHS.isUnsigned())
+ return Op(LHS, RHS);
+
+ APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
+ APSInt Result = Value.trunc(LHS.getBitWidth());
+ if (Result.extend(BitWidth) != Value)
+ HandleOverflow(Info, E, Value, E->getType());
+ return Result;
+}
+
+namespace {
+
+/// \brief Data recursive integer evaluator of certain binary operators.
+///
+/// We use a data recursive algorithm for binary operators so that we are able
+/// to handle extreme cases of chained binary operators without causing stack
+/// overflow.
+class DataRecursiveIntBinOpEvaluator {
+ struct EvalResult {
+ APValue Val;
+ bool Failed;
+
+ EvalResult() : Failed(false) { }
+
+ void swap(EvalResult &RHS) {
+ Val.swap(RHS.Val);
+ Failed = RHS.Failed;
+ RHS.Failed = false;
+ }
+ };
+
+ struct Job {
+ const Expr *E;
+ EvalResult LHSResult; // meaningful only for binary operator expression.
+ enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
+
+ Job() : StoredInfo(0) { }
+ void startSpeculativeEval(EvalInfo &Info) {
+ OldEvalStatus = Info.EvalStatus;
+ Info.EvalStatus.Diag = 0;
+ StoredInfo = &Info;
+ }
+ ~Job() {
+ if (StoredInfo) {
+ StoredInfo->EvalStatus = OldEvalStatus;
+ }
+ }
+ private:
+ EvalInfo *StoredInfo; // non-null if status changed.
+ Expr::EvalStatus OldEvalStatus;
+ };
+
+ SmallVector<Job, 16> Queue;
+
+ IntExprEvaluator &IntEval;
+ EvalInfo &Info;
+ APValue &FinalResult;
+
+public:
+ DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
+ : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
+
+ /// \brief True if \param E is a binary operator that we are going to handle
+ /// data recursively.
+ /// We handle binary operators that are comma, logical, or that have operands
+ /// with integral or enumeration type.
+ static bool shouldEnqueue(const BinaryOperator *E) {
+ return E->getOpcode() == BO_Comma ||
+ E->isLogicalOp() ||
+ (E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+ }
+
+ bool Traverse(const BinaryOperator *E) {
+ enqueue(E);
+ EvalResult PrevResult;
+ while (!Queue.empty())
+ process(PrevResult);
+
+ if (PrevResult.Failed) return false;
+
+ FinalResult.swap(PrevResult.Val);
return true;
}
- if (E->isLogicalOp()) {
- // These need to be handled specially because the operands aren't
- // necessarily integral
- bool lhsResult, rhsResult;
+private:
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Error(const Expr *E) {
+ return IntEval.Error(E);
+ }
+ bool Error(const Expr *E, diag::kind D) {
+ return IntEval.Error(E, D);
+ }
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ // \brief Returns true if visiting the RHS is necessary, false otherwise.
+ bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags);
+
+ bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result);
+
+ void EvaluateExpr(const Expr *E, EvalResult &Result) {
+ Result.Failed = !Evaluate(Result.Val, Info, E);
+ if (Result.Failed)
+ Result.Val = APValue();
+ }
- if (HandleConversionToBool(E->getLHS(), lhsResult, Info)) {
+ void process(EvalResult &Result);
+
+ void enqueue(const Expr *E) {
+ E = E->IgnoreParens();
+ Queue.resize(Queue.size()+1);
+ Queue.back().E = E;
+ Queue.back().Kind = Job::AnyExprKind;
+ }
+};
+
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags) {
+ if (E->getOpcode() == BO_Comma) {
+ // Ignore LHS but note if we could not evaluate it.
+ if (LHSResult.Failed)
+ Info.EvalStatus.HasSideEffects = true;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult;
+ if (HandleConversionToBool(LHSResult.Val, lhsResult)) {
// We were able to evaluate the LHS, see if we can get away with not
// evaluating the RHS: 0 && X -> 0, 1 || X -> 1
- if (lhsResult == (E->getOpcode() == BO_LOr))
- return Success(lhsResult, E);
+ if (lhsResult == (E->getOpcode() == BO_LOr)) {
+ Success(lhsResult, E, LHSResult.Val);
+ return false; // Ignore RHS
+ }
+ } else {
+ // Since we weren't able to evaluate the left hand side, it
+ // must have had side effects.
+ Info.EvalStatus.HasSideEffects = true;
+
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ // Don't ignore RHS and suppress diagnostics from this arm.
+ SuppressRHSDiags = true;
+ }
+
+ return true;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure())
+ return false; // Ignore RHS;
+
+ return true;
+}
- if (HandleConversionToBool(E->getRHS(), rhsResult, Info)) {
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result) {
+ if (E->getOpcode() == BO_Comma) {
+ if (RHSResult.Failed)
+ return false;
+ Result = RHSResult.Val;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult, rhsResult;
+ bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
+ bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
+
+ if (LHSIsOK) {
+ if (RHSIsOK) {
if (E->getOpcode() == BO_LOr)
- return Success(lhsResult || rhsResult, E);
+ return Success(lhsResult || rhsResult, E, Result);
else
- return Success(lhsResult && rhsResult, E);
+ return Success(lhsResult && rhsResult, E, Result);
}
} else {
- if (HandleConversionToBool(E->getRHS(), rhsResult, Info)) {
+ if (RHSIsOK) {
// We can't evaluate the LHS; however, sometimes the result
// is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
- if (rhsResult == (E->getOpcode() == BO_LOr) ||
- !rhsResult == (E->getOpcode() == BO_LAnd)) {
- // Since we weren't able to evaluate the left hand side, it
- // must have had side effects.
- Info.EvalResult.HasSideEffects = true;
+ if (rhsResult == (E->getOpcode() == BO_LOr))
+ return Success(rhsResult, E, Result);
+ }
+ }
+
+ return false;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed || RHSResult.Failed)
+ return false;
+
+ const APValue &LHSVal = LHSResult.Val;
+ const APValue &RHSVal = RHSResult.Val;
+
+ // Handle cases like (unsigned long)&a + 4.
+ if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
+ Result = LHSVal;
+ CharUnits AdditionalOffset = CharUnits::fromQuantity(
+ RHSVal.getInt().getZExtValue());
+ if (E->getOpcode() == BO_Add)
+ Result.getLValueOffset() += AdditionalOffset;
+ else
+ Result.getLValueOffset() -= AdditionalOffset;
+ return true;
+ }
+
+ // Handle cases like 4 + (unsigned long)&a
+ if (E->getOpcode() == BO_Add &&
+ RHSVal.isLValue() && LHSVal.isInt()) {
+ Result = RHSVal;
+ Result.getLValueOffset() += CharUnits::fromQuantity(
+ LHSVal.getInt().getZExtValue());
+ return true;
+ }
+
+ if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
+ // Handle (intptr_t)&&A - (intptr_t)&&B.
+ if (!LHSVal.getLValueOffset().isZero() ||
+ !RHSVal.getLValueOffset().isZero())
+ return false;
+ const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+
+ // All the following cases expect both operands to be an integer
+ if (!LHSVal.isInt() || !RHSVal.isInt())
+ return Error(E);
+
+ const APSInt &LHS = LHSVal.getInt();
+ APSInt RHS = RHSVal.getInt();
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case BO_Mul:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() * 2,
+ std::multiplies<APSInt>()), E,
+ Result);
+ case BO_Add:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::plus<APSInt>()), E, Result);
+ case BO_Sub:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::minus<APSInt>()), E, Result);
+ case BO_And: return Success(LHS & RHS, E, Result);
+ case BO_Xor: return Success(LHS ^ RHS, E, Result);
+ case BO_Or: return Success(LHS | RHS, E, Result);
+ case BO_Div:
+ case BO_Rem:
+ if (RHS == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+ // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. The latter is
+ // not actually undefined behavior in C++11 due to a language defect.
+ if (RHS.isNegative() && RHS.isAllOnesValue() &&
+ LHS.isSigned() && LHS.isMinSignedValue())
+ HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
+ return Success(E->getOpcode() == BO_Rem ? LHS % RHS : LHS / RHS, E,
+ Result);
+ case BO_Shl: {
+ // During constant-folding, a negative shift is an opposite shift. Such
+ // a shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_right;
+ }
+
+ shift_left:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of
+ // the shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS) {
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+ } else if (LHS.isSigned()) {
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
+ else if (LHS.countLeadingZeros() < SA)
+ CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+
+ return Success(LHS << SA, E, Result);
+ }
+ case BO_Shr: {
+ // During constant-folding, a negative shift is an opposite shift. Such a
+ // shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_left;
+ }
+
+ shift_right:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
+ // shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS)
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+
+ return Success(LHS >> SA, E, Result);
+ }
+
+ case BO_LT: return Success(LHS < RHS, E, Result);
+ case BO_GT: return Success(LHS > RHS, E, Result);
+ case BO_LE: return Success(LHS <= RHS, E, Result);
+ case BO_GE: return Success(LHS >= RHS, E, Result);
+ case BO_EQ: return Success(LHS == RHS, E, Result);
+ case BO_NE: return Success(LHS != RHS, E, Result);
+ }
+}
- return Success(rhsResult, E);
+void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
+ Job &job = Queue.back();
+
+ switch (job.Kind) {
+ case Job::AnyExprKind: {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
+ if (shouldEnqueue(Bop)) {
+ job.Kind = Job::BinOpKind;
+ enqueue(Bop->getLHS());
+ return;
}
}
+
+ EvaluateExpr(job.E, Result);
+ Queue.pop_back();
+ return;
+ }
+
+ case Job::BinOpKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ bool SuppressRHSDiags = false;
+ if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
+ Queue.pop_back();
+ return;
+ }
+ if (SuppressRHSDiags)
+ job.startSpeculativeEval(Info);
+ job.LHSResult.swap(Result);
+ job.Kind = Job::BinOpVisitedLHSKind;
+ enqueue(Bop->getRHS());
+ return;
+ }
+
+ case Job::BinOpVisitedLHSKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ EvalResult RHS;
+ RHS.swap(Result);
+ Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ Queue.pop_back();
+ return;
}
-
- return false;
}
+
+ llvm_unreachable("Invalid Job::Kind!");
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isAssignmentOp())
+ return Error(E);
+
+ if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
+ return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
QualType LHSTy = E->getLHS()->getType();
QualType RHSTy = E->getRHS()->getType();
@@ -1398,10 +4847,11 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
assert(RHSTy->isAnyComplexType() && "Invalid comparison");
ComplexValue LHS, RHS;
- if (!EvaluateComplex(E->getLHS(), LHS, Info))
+ bool LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
- if (!EvaluateComplex(E->getRHS(), RHS, Info))
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
if (LHS.isComplexFloat()) {
@@ -1440,10 +4890,11 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
RHSTy->isRealFloatingType()) {
APFloat RHS(0.0), LHS(0.0);
- if (!EvaluateFloat(E->getRHS(), RHS, Info))
+ bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
- if (!EvaluateFloat(E->getLHS(), LHS, Info))
+ if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
return false;
APFloat::cmpResult CR = LHS.compare(RHS);
@@ -1470,155 +4921,232 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
}
if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
- if (E->getOpcode() == BO_Sub || E->isEqualityOp()) {
- LValue LHSValue;
- if (!EvaluatePointer(E->getLHS(), LHSValue, Info))
+ if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
+ LValue LHSValue, RHSValue;
+
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
return false;
- LValue RHSValue;
- if (!EvaluatePointer(E->getRHS(), RHSValue, Info))
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
return false;
- // Reject any bases from the normal codepath; we special-case comparisons
- // to null.
- if (LHSValue.getLValueBase()) {
- if (!E->isEqualityOp())
- return false;
- if (RHSValue.getLValueBase() || !RHSValue.getLValueOffset().isZero())
- return false;
- bool bres;
- if (!EvalPointerValueAsBool(LHSValue, bres))
- return false;
- return Success(bres ^ (E->getOpcode() == BO_EQ), E);
- } else if (RHSValue.getLValueBase()) {
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ if (E->getOpcode() == BO_Sub) {
+ // Handle &&A - &&B.
+ if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
+ return false;
+ const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ const Expr *RHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+ // Inequalities and subtractions between unrelated pointers have
+ // unspecified or undefined behavior.
if (!E->isEqualityOp())
- return false;
- if (LHSValue.getLValueBase() || !LHSValue.getLValueOffset().isZero())
- return false;
- bool bres;
- if (!EvalPointerValueAsBool(RHSValue, bres))
- return false;
- return Success(bres ^ (E->getOpcode() == BO_EQ), E);
+ return Error(E);
+ // A constant address may compare equal to the address of a symbol.
+ // The one exception is that address of an object cannot compare equal
+ // to a null pointer constant.
+ if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
+ (!RHSValue.Base && !RHSValue.Offset.isZero()))
+ return Error(E);
+ // It's implementation-defined whether distinct literals will have
+ // distinct addresses. In clang, the result of such a comparison is
+ // unspecified, so it is not a constant expression. However, we do know
+ // that the address of a literal will be non-null.
+ if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
+ LHSValue.Base && RHSValue.Base)
+ return Error(E);
+ // We can't tell whether weak symbols will end up pointing to the same
+ // object.
+ if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
+ return Error(E);
+ // Pointers with different bases cannot represent the same object.
+ // (Note that clang defaults to -fmerge-all-constants, which can
+ // lead to inconsistent results for comparisons involving the address
+ // of a constant; this generally doesn't matter in practice.)
+ return Success(E->getOpcode() == BO_NE, E);
}
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
if (E->getOpcode() == BO_Sub) {
+ // C++11 [expr.add]p6:
+ // Unless both pointers point to elements of the same array object, or
+ // one past the last element of the array object, the behavior is
+ // undefined.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ !AreElementsOfSameArray(getType(LHSValue.Base),
+ LHSDesignator, RHSDesignator))
+ CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
+
QualType Type = E->getLHS()->getType();
QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
- CharUnits ElementSize = CharUnits::One();
- if (!ElementType->isVoidType() && !ElementType->isFunctionType())
- ElementSize = Info.Ctx.getTypeSizeInChars(ElementType);
+ CharUnits ElementSize;
+ if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
+ return false;
- CharUnits Diff = LHSValue.getLValueOffset() -
- RHSValue.getLValueOffset();
- return Success(Diff / ElementSize, E);
+ // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
+ // and produce incorrect results when it overflows. Such behavior
+ // appears to be non-conforming, but is common, so perhaps we should
+ // assume the standard intended for such cases to be undefined behavior
+ // and check for them.
+
+ // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
+ // overflow in the final conversion to ptrdiff_t.
+ APSInt LHS(
+ llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
+ APSInt RHS(
+ llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
+ APSInt ElemSize(
+ llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
+ APSInt TrueResult = (LHS - RHS) / ElemSize;
+ APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
+
+ if (Result.extend(65) != TrueResult)
+ HandleOverflow(Info, E, TrueResult, E->getType());
+ return Success(Result, E);
}
- bool Result;
- if (E->getOpcode() == BO_EQ) {
- Result = LHSValue.getLValueOffset() == RHSValue.getLValueOffset();
- } else {
- Result = LHSValue.getLValueOffset() != RHSValue.getLValueOffset();
- }
- return Success(Result, E);
- }
- }
- if (!LHSTy->isIntegralOrEnumerationType() ||
- !RHSTy->isIntegralOrEnumerationType()) {
- // We can't continue from here for non-integral types, and they
- // could potentially confuse the following operations.
- return false;
- }
-
- // The LHS of a constant expr is always evaluated and needed.
- if (!Visit(E->getLHS()))
- return false; // error in subexpression.
- APValue RHSVal;
- if (!EvaluateIntegerOrLValue(E->getRHS(), RHSVal, Info))
- return false;
+ // C++11 [expr.rel]p3:
+ // Pointers to void (after pointer conversions) can be compared, with a
+ // result defined as follows: If both pointers represent the same
+ // address or are both the null pointer value, the result is true if the
+ // operator is <= or >= and false otherwise; otherwise the result is
+ // unspecified.
+ // We interpret this as applying to pointers to *cv* void.
+ if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
+ E->isRelationalOp())
+ CCEDiag(E, diag::note_constexpr_void_comparison);
+
+ // C++11 [expr.rel]p2:
+ // - If two pointers point to non-static data members of the same object,
+ // or to subobjects or array elements fo such members, recursively, the
+ // pointer to the later declared member compares greater provided the
+ // two members have the same access control and provided their class is
+ // not a union.
+ // [...]
+ // - Otherwise pointer comparisons are unspecified.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ E->isRelationalOp()) {
+ bool WasArrayIndex;
+ unsigned Mismatch =
+ FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
+ RHSDesignator, WasArrayIndex);
+ // At the point where the designators diverge, the comparison has a
+ // specified value if:
+ // - we are comparing array indices
+ // - we are comparing fields of a union, or fields with the same access
+ // Otherwise, the result is unspecified and thus the comparison is not a
+ // constant expression.
+ if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
+ Mismatch < RHSDesignator.Entries.size()) {
+ const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
+ const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
+ if (!LF && !RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
+ else if (!LF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(LHSDesignator.Entries[Mismatch])
+ << RF->getParent() << RF;
+ else if (!RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(RHSDesignator.Entries[Mismatch])
+ << LF->getParent() << LF;
+ else if (!LF->getParent()->isUnion() &&
+ LF->getAccess() != RF->getAccess())
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
+ << LF << LF->getAccess() << RF << RF->getAccess()
+ << LF->getParent();
+ }
+ }
- // Handle cases like (unsigned long)&a + 4.
- if (E->isAdditiveOp() && Result.isLValue() && RHSVal.isInt()) {
- CharUnits Offset = Result.getLValueOffset();
- CharUnits AdditionalOffset = CharUnits::fromQuantity(
- RHSVal.getInt().getZExtValue());
- if (E->getOpcode() == BO_Add)
- Offset += AdditionalOffset;
- else
- Offset -= AdditionalOffset;
- Result = APValue(Result.getLValueBase(), Offset);
- return true;
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("missing comparison operator");
+ case BO_LT: return Success(LHSOffset < RHSOffset, E);
+ case BO_GT: return Success(LHSOffset > RHSOffset, E);
+ case BO_LE: return Success(LHSOffset <= RHSOffset, E);
+ case BO_GE: return Success(LHSOffset >= RHSOffset, E);
+ case BO_EQ: return Success(LHSOffset == RHSOffset, E);
+ case BO_NE: return Success(LHSOffset != RHSOffset, E);
+ }
+ }
}
- // Handle cases like 4 + (unsigned long)&a
- if (E->getOpcode() == BO_Add &&
- RHSVal.isLValue() && Result.isInt()) {
- CharUnits Offset = RHSVal.getLValueOffset();
- Offset += CharUnits::fromQuantity(Result.getInt().getZExtValue());
- Result = APValue(RHSVal.getLValueBase(), Offset);
- return true;
- }
+ if (LHSTy->isMemberPointerType()) {
+ assert(E->isEqualityOp() && "unexpected member pointer operation");
+ assert(RHSTy->isMemberPointerType() && "invalid comparison");
- // All the following cases expect both operands to be an integer
- if (!Result.isInt() || !RHSVal.isInt())
- return false;
+ MemberPtr LHSValue, RHSValue;
- APSInt& RHS = RHSVal.getInt();
+ bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
- switch (E->getOpcode()) {
- default:
- return Error(E->getOperatorLoc(), diag::note_invalid_subexpr_in_ice, E);
- case BO_Mul: return Success(Result.getInt() * RHS, E);
- case BO_Add: return Success(Result.getInt() + RHS, E);
- case BO_Sub: return Success(Result.getInt() - RHS, E);
- case BO_And: return Success(Result.getInt() & RHS, E);
- case BO_Xor: return Success(Result.getInt() ^ RHS, E);
- case BO_Or: return Success(Result.getInt() | RHS, E);
- case BO_Div:
- if (RHS == 0)
- return Error(E->getOperatorLoc(), diag::note_expr_divide_by_zero, E);
- return Success(Result.getInt() / RHS, E);
- case BO_Rem:
- if (RHS == 0)
- return Error(E->getOperatorLoc(), diag::note_expr_divide_by_zero, E);
- return Success(Result.getInt() % RHS, E);
- case BO_Shl: {
- // During constant-folding, a negative shift is an opposite shift.
- if (RHS.isSigned() && RHS.isNegative()) {
- RHS = -RHS;
- goto shift_right;
- }
+ if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
- shift_left:
- unsigned SA
- = (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
- return Success(Result.getInt() << SA, E);
- }
- case BO_Shr: {
- // During constant-folding, a negative shift is an opposite shift.
- if (RHS.isSigned() && RHS.isNegative()) {
- RHS = -RHS;
- goto shift_left;
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
+ bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
}
- shift_right:
- unsigned SA =
- (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
- return Success(Result.getInt() >> SA, E);
- }
-
- case BO_LT: return Success(Result.getInt() < RHS, E);
- case BO_GT: return Success(Result.getInt() > RHS, E);
- case BO_LE: return Success(Result.getInt() <= RHS, E);
- case BO_GE: return Success(Result.getInt() >= RHS, E);
- case BO_EQ: return Success(Result.getInt() == RHS, E);
- case BO_NE: return Success(Result.getInt() != RHS, E);
- }
+ // Otherwise if either is a pointer to a virtual member function, the
+ // result is unspecified.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+
+ // Otherwise they compare equal if and only if they would refer to the
+ // same member of the same most derived object or the same subobject if
+ // they were dereferenced with a hypothetical object of the associated
+ // class type.
+ bool Equal = LHSValue == RHSValue;
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ if (LHSTy->isNullPtrType()) {
+ assert(E->isComparisonOp() && "unexpected nullptr operation");
+ assert(RHSTy->isNullPtrType() && "missing pointer conversion");
+ // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
+ // are compared, the result is true of the operator is <=, >= or ==, and
+ // false otherwise.
+ BinaryOperator::Opcode Opcode = E->getOpcode();
+ return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
+ }
+
+ assert((!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) &&
+ "DataRecursiveIntBinOpEvaluator should have handled integral types");
+ // We can't continue from here for non-integral types.
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
}
CharUnits IntExprEvaluator::GetAlignOfType(QualType T) {
- // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
- // the result is the size of the referenced type."
// C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
// result shall be the alignment of the referenced type."
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
@@ -1678,34 +5206,24 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
QualType SrcTy = E->getTypeOfArgument();
// C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
// the result is the size of the referenced type."
- // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
- // result shall be the alignment of the referenced type."
if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
SrcTy = Ref->getPointeeType();
- // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
- // extension.
- if (SrcTy->isVoidType() || SrcTy->isFunctionType())
- return Success(1, E);
-
- // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
- if (!SrcTy->isConstantSizeType())
+ CharUnits Sizeof;
+ if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
return false;
-
- // Get information about the size.
- return Success(Info.Ctx.getTypeSizeInChars(SrcTy), E);
+ return Success(Sizeof, E);
}
}
llvm_unreachable("unknown expr/type trait");
- return false;
}
bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
CharUnits Result;
unsigned n = OOE->getNumComponents();
if (n == 0)
- return false;
+ return Error(OOE);
QualType CurrentType = OOE->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i);
@@ -1717,18 +5235,18 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
return false;
const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
if (!AT)
- return false;
+ return Error(OOE);
CurrentType = AT->getElementType();
CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
Result += IdxResult.getSExtValue() * ElementSize;
break;
}
-
+
case OffsetOfExpr::OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
- return false;
+ if (!RT)
+ return Error(OOE);
RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
@@ -1737,20 +5255,19 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
CurrentType = MemberDecl->getType().getNonReferenceType();
break;
}
-
+
case OffsetOfExpr::OffsetOfNode::Identifier:
llvm_unreachable("dependent __builtin_offsetof");
- return false;
-
+
case OffsetOfExpr::OffsetOfNode::Base: {
CXXBaseSpecifier *BaseSpec = ON.getBase();
if (BaseSpec->isVirtual())
- return false;
+ return Error(OOE);
// Find the layout of the class whose base we are looking into.
const RecordType *RT = CurrentType->getAs<RecordType>();
- if (!RT)
- return false;
+ if (!RT)
+ return Error(OOE);
RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
@@ -1758,7 +5275,7 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
CurrentType = BaseSpec->getType();
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
if (!BaseRT)
- return false;
+ return Error(OOE);
// Add the offset to the base.
Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
@@ -1770,41 +5287,41 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
}
bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
- if (E->getOpcode() == UO_LNot) {
- // LNot's operand isn't necessarily an integer, so we handle it specially.
- bool bres;
- if (!HandleConversionToBool(E->getSubExpr(), bres, Info))
- return false;
- return Success(!bres, E);
- }
-
- // Only handle integral operations...
- if (!E->getSubExpr()->getType()->isIntegralOrEnumerationType())
- return false;
-
- // Get the operand value into 'Result'.
- if (!Visit(E->getSubExpr()))
- return false;
-
switch (E->getOpcode()) {
default:
// Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
// See C99 6.6p3.
- return Error(E->getOperatorLoc(), diag::note_invalid_subexpr_in_ice, E);
+ return Error(E);
case UO_Extension:
// FIXME: Should extension allow i-c-e extension expressions in its scope?
// If so, we could clear the diagnostic ID.
- return true;
+ return Visit(E->getSubExpr());
case UO_Plus:
- // The result is always just the subexpr.
- return true;
- case UO_Minus:
- if (!Result.isInt()) return false;
- return Success(-Result.getInt(), E);
- case UO_Not:
- if (!Result.isInt()) return false;
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ const APSInt &Value = Result.getInt();
+ if (Value.isSigned() && Value.isMinSignedValue())
+ HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType());
+ return Success(-Value, E);
+ }
+ case UO_Not: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
return Success(~Result.getInt(), E);
}
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+ }
}
/// HandleCast - This is used to evaluate implicit or explicit casts where the
@@ -1826,6 +5343,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_ToVoid:
@@ -1847,18 +5365,20 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_BitCast:
case CK_Dependent:
- case CK_GetObjCProperty:
case CK_LValueBitCast:
- case CK_UserDefinedConversion:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
- return false;
+ case CK_CopyAndAutoreleaseBlockObject:
+ return Error(E);
+ case CK_UserDefinedConversion:
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
- return Visit(E->getSubExpr());
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_MemberPointerToBoolean:
case CK_PointerToBoolean:
@@ -1867,7 +5387,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToBoolean: {
bool BoolResult;
- if (!HandleConversionToBool(SubExpr, BoolResult, Info))
+ if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
return false;
return Success(BoolResult, E);
}
@@ -1877,31 +5397,44 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
if (!Result.isInt()) {
+ // Allow casts of address-of-label differences if they are no-ops
+ // or narrowing. (The narrowing case isn't actually guaranteed to
+ // be constant-evaluatable except in some narrow cases which are hard
+ // to detect here. We let it through on the assumption the user knows
+ // what they are doing.)
+ if (Result.isAddrLabelDiff())
+ return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
// Only allow casts of lvalues if they are lossless.
return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
}
- return Success(HandleIntToIntCast(DestType, SrcType,
- Result.getInt(), Info.Ctx), E);
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
+ Result.getInt()), E);
}
case CK_PointerToIntegral: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
LValue LV;
if (!EvaluatePointer(SubExpr, LV, Info))
return false;
if (LV.getLValueBase()) {
// Only allow based lvalue casts if they are lossless.
+ // FIXME: Allow a larger integer size than the pointer size, and allow
+ // narrowing back down to pointer width in subsequent integral casts.
+ // FIXME: Check integer type's active bits, not its type size.
if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
- return false;
+ return Error(E);
+ LV.Designator.setInvalid();
LV.moveInto(Result);
return true;
}
APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
SrcType);
- return Success(HandleIntToIntCast(DestType, SrcType, AsInt, Info.Ctx), E);
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
}
case CK_IntegralComplexToReal: {
@@ -1916,19 +5449,23 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!EvaluateFloat(SubExpr, F, Info))
return false;
- return Success(HandleFloatToIntCast(DestType, SrcType, F, Info.Ctx), E);
+ APSInt Value;
+ if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
+ return false;
+ return Success(Value, E);
}
}
llvm_unreachable("unknown cast resulting in integral value");
- return false;
}
bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue LV;
- if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
- return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
return Success(LV.getComplexIntReal(), E);
}
@@ -1938,13 +5475,14 @@ bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isComplexIntegerType()) {
ComplexValue LV;
- if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
- return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
return Success(LV.getComplexIntImag(), E);
}
- if (!E->getSubExpr()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
+ VisitIgnoredValue(E->getSubExpr());
return Success(0, E);
}
@@ -1972,11 +5510,8 @@ public:
Result = V.getFloat();
return true;
}
- bool Error(const Stmt *S) {
- return false;
- }
- bool ValueInitialization(const Expr *E) {
+ bool ZeroInitialization(const Expr *E) {
Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
return true;
}
@@ -1991,15 +5526,12 @@ public:
bool VisitUnaryReal(const UnaryOperator *E);
bool VisitUnaryImag(const UnaryOperator *E);
- bool VisitDeclRefExpr(const DeclRefExpr *E);
-
- // FIXME: Missing: array subscript of vector, member of vector,
- // ImplicitValueInitExpr
+ // FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
- assert(E->getType()->isRealFloatingType());
+ assert(E->isRValue() && E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(E);
}
@@ -2029,7 +5561,7 @@ static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
}
bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (E->isBuiltinCall(Info.Ctx)) {
+ switch (E->isBuiltinCall()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -2048,16 +5580,20 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nans:
case Builtin::BI__builtin_nansf:
case Builtin::BI__builtin_nansl:
- return TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
- true, Result);
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ true, Result))
+ return Error(E);
+ return true;
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
- return TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
- false, Result);
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ false, Result))
+ return Error(E);
+ return true;
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
@@ -2082,48 +5618,6 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
}
-bool FloatExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
- if (ExprEvaluatorBaseTy::VisitDeclRefExpr(E))
- return true;
-
- const Decl *D = E->getDecl();
- if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D)) return false;
- const VarDecl *VD = cast<VarDecl>(D);
-
- // Require the qualifiers to be const and not volatile.
- CanQualType T = Info.Ctx.getCanonicalType(E->getType());
- if (!T.isConstQualified() || T.isVolatileQualified())
- return false;
-
- const Expr *Init = VD->getAnyInitializer();
- if (!Init) return false;
-
- if (APValue *V = VD->getEvaluatedValue()) {
- if (V->isFloat()) {
- Result = V->getFloat();
- return true;
- }
- return false;
- }
-
- if (VD->isEvaluatingValue())
- return false;
-
- VD->setEvaluatingValue();
-
- Expr::EvalResult InitResult;
- if (Init->Evaluate(InitResult, Info.Ctx) && !InitResult.HasSideEffects &&
- InitResult.Val.isFloat()) {
- // Cache the evaluated value in the variable declaration.
- Result = InitResult.Val.getFloat();
- VD->setEvaluatedValue(InitResult.Val);
- return true;
- }
-
- VD->setEvaluatedValue(APValue());
- return false;
-}
-
bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
ComplexValue CV;
@@ -2145,70 +5639,55 @@ bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
return true;
}
- if (!E->getSubExpr()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
+ VisitIgnoredValue(E->getSubExpr());
const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
Result = llvm::APFloat::getZero(Sem);
return true;
}
bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
- if (E->getOpcode() == UO_Deref)
- return false;
-
- if (!EvaluateFloat(E->getSubExpr(), Result, Info))
- return false;
-
switch (E->getOpcode()) {
- default: return false;
+ default: return Error(E);
case UO_Plus:
- return true;
+ return EvaluateFloat(E->getSubExpr(), Result, Info);
case UO_Minus:
+ if (!EvaluateFloat(E->getSubExpr(), Result, Info))
+ return false;
Result.changeSign();
return true;
}
}
bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() == BO_Comma) {
- if (!EvaluateFloat(E->getRHS(), Result, Info))
- return false;
-
- // If we can't evaluate the LHS, it might have side effects;
- // conservatively mark it.
- if (!E->getLHS()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
-
- return true;
- }
-
- // We can't evaluate pointer-to-member operations.
- if (E->isPtrMemOp())
- return false;
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
- // FIXME: Diagnostics? I really don't understand how the warnings
- // and errors are supposed to work.
APFloat RHS(0.0);
- if (!EvaluateFloat(E->getLHS(), Result, Info))
+ bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
- if (!EvaluateFloat(E->getRHS(), RHS, Info))
+ if (!EvaluateFloat(E->getRHS(), RHS, Info) || !LHSOK)
return false;
switch (E->getOpcode()) {
- default: return false;
+ default: return Error(E);
case BO_Mul:
Result.multiply(RHS, APFloat::rmNearestTiesToEven);
- return true;
+ break;
case BO_Add:
Result.add(RHS, APFloat::rmNearestTiesToEven);
- return true;
+ break;
case BO_Sub:
Result.subtract(RHS, APFloat::rmNearestTiesToEven);
- return true;
+ break;
case BO_Div:
Result.divide(RHS, APFloat::rmNearestTiesToEven);
- return true;
+ break;
}
+
+ if (Result.isInfinity() || Result.isNaN())
+ CCEDiag(E, diag::note_constexpr_float_arithmetic) << Result.isNaN();
+ return true;
}
bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
@@ -2221,27 +5700,20 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
- return false;
-
- case CK_LValueToRValue:
- case CK_NoOp:
- return Visit(SubExpr);
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_IntegralToFloating: {
APSInt IntResult;
- if (!EvaluateInteger(SubExpr, IntResult, Info))
- return false;
- Result = HandleIntToFloatCast(E->getType(), SubExpr->getType(),
- IntResult, Info.Ctx);
- return true;
+ return EvaluateInteger(SubExpr, IntResult, Info) &&
+ HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
+ E->getType(), Result);
}
case CK_FloatingCast: {
if (!Visit(SubExpr))
return false;
- Result = HandleFloatToFloatCast(E->getType(), SubExpr->getType(),
- Result, Info.Ctx);
- return true;
+ return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
+ Result);
}
case CK_FloatingComplexToReal: {
@@ -2252,8 +5724,6 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
return true;
}
}
-
- return false;
}
//===----------------------------------------------------------------------===//
@@ -2273,30 +5743,43 @@ public:
Result.setFrom(V);
return true;
}
- bool Error(const Expr *E) {
- return false;
- }
+
+ bool ZeroInitialization(const Expr *E);
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
-
bool VisitCastExpr(const CastExpr *E);
-
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitUnaryOperator(const UnaryOperator *E);
- // FIXME Missing: ImplicitValueInitExpr, InitListExpr
+ bool VisitInitListExpr(const InitListExpr *E);
};
} // end anonymous namespace
static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
EvalInfo &Info) {
- assert(E->getType()->isAnyComplexType());
+ assert(E->isRValue() && E->getType()->isAnyComplexType());
return ComplexExprEvaluator(Info, Result).Visit(E);
}
+bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ if (ElemTy->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
+ Result.FloatReal = Zero;
+ Result.FloatImag = Zero;
+ } else {
+ Result.makeComplexInt();
+ APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
+ Result.IntReal = Zero;
+ Result.IntImag = Zero;
+ }
+ return true;
+}
+
bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
const Expr* SubExpr = E->getSubExpr();
@@ -2338,6 +5821,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
@@ -2362,17 +5846,19 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
- return Visit(E->getSubExpr());
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_Dependent:
- case CK_GetObjCProperty:
case CK_LValueBitCast:
case CK_UserDefinedConversion:
- return false;
+ return Error(E);
case CK_FloatingRealToComplex: {
APFloat &Real = Result.FloatReal;
@@ -2392,11 +5878,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
- Result.FloatReal
- = HandleFloatToFloatCast(To, From, Result.FloatReal, Info.Ctx);
- Result.FloatImag
- = HandleFloatToFloatCast(To, From, Result.FloatImag, Info.Ctx);
- return true;
+ return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
+ HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
}
case CK_FloatingComplexToIntegralComplex: {
@@ -2407,9 +5890,10 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
Result.makeComplexInt();
- Result.IntReal = HandleFloatToIntCast(To, From, Result.FloatReal, Info.Ctx);
- Result.IntImag = HandleFloatToIntCast(To, From, Result.FloatImag, Info.Ctx);
- return true;
+ return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
+ To, Result.IntReal) &&
+ HandleFloatToIntCast(Info, E, From, Result.FloatImag,
+ To, Result.IntImag);
}
case CK_IntegralRealToComplex: {
@@ -2430,8 +5914,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
- Result.IntReal = HandleIntToIntCast(To, From, Result.IntReal, Info.Ctx);
- Result.IntImag = HandleIntToIntCast(To, From, Result.IntImag, Info.Ctx);
+ Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
+ Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
return true;
}
@@ -2443,39 +5927,32 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
QualType From
= E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
Result.makeComplexFloat();
- Result.FloatReal = HandleIntToFloatCast(To, From, Result.IntReal, Info.Ctx);
- Result.FloatImag = HandleIntToFloatCast(To, From, Result.IntImag, Info.Ctx);
- return true;
+ return HandleIntToFloatCast(Info, E, From, Result.IntReal,
+ To, Result.FloatReal) &&
+ HandleIntToFloatCast(Info, E, From, Result.IntImag,
+ To, Result.FloatImag);
}
}
llvm_unreachable("unknown cast resulting in complex value");
- return false;
}
bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() == BO_Comma) {
- if (!Visit(E->getRHS()))
- return false;
-
- // If we can't evaluate the LHS, it might have side effects;
- // conservatively mark it.
- if (!E->getLHS()->isEvaluatable(Info.Ctx))
- Info.EvalResult.HasSideEffects = true;
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
- return true;
- }
- if (!Visit(E->getLHS()))
+ bool LHSOK = Visit(E->getLHS());
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
return false;
ComplexValue RHS;
- if (!EvaluateComplex(E->getRHS(), RHS, Info))
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
return false;
assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
"Invalid operands to binary operator.");
switch (E->getOpcode()) {
- default: return false;
+ default: return Error(E);
case BO_Add:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
@@ -2559,10 +6036,9 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven);
Res_i.divide(Den, APFloat::rmNearestTiesToEven);
} else {
- if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0) {
- // FIXME: what about diagnostics?
- return false;
- }
+ if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+
ComplexValue LHS = Result;
APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
RHS.getComplexIntImag() * RHS.getComplexIntImag();
@@ -2586,8 +6062,7 @@ bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
default:
- // FIXME: what about diagnostics?
- return false;
+ return Error(E);
case UO_Extension:
return true;
case UO_Plus:
@@ -2612,109 +6087,279 @@ bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
}
}
+bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 2) {
+ if (E->getType()->isComplexType()) {
+ Result.makeComplexFloat();
+ if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
+ return false;
+ if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
+ return false;
+ } else {
+ Result.makeComplexInt();
+ if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
+ return false;
+ if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
+ return false;
+ }
+ return true;
+ }
+ return ExprEvaluatorBaseTy::VisitInitListExpr(E);
+}
+
//===----------------------------------------------------------------------===//
-// Top level Expr::Evaluate method.
+// Void expression evaluation, primarily for a cast to void on the LHS of a
+// comma operator
//===----------------------------------------------------------------------===//
-static bool Evaluate(EvalInfo &Info, const Expr *E) {
- if (E->getType()->isVectorType()) {
- if (!EvaluateVector(E, Info.EvalResult.Val, Info))
+namespace {
+class VoidExprEvaluator
+ : public ExprEvaluatorBase<VoidExprEvaluator, bool> {
+public:
+ VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
+
+ bool Success(const APValue &V, const Expr *e) { return true; }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_ToVoid:
+ VisitIgnoredValue(E->getSubExpr());
+ return true;
+ }
+ }
+};
+} // end anonymous namespace
+
+static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVoidType());
+ return VoidExprEvaluator(Info).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Top level Expr::EvaluateAsRValue method.
+//===----------------------------------------------------------------------===//
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
+ // In C, function designators are not lvalues, but we evaluate them as if they
+ // are.
+ if (E->isGLValue() || E->getType()->isFunctionType()) {
+ LValue LV;
+ if (!EvaluateLValue(E, LV, Info))
return false;
- } else if (E->getType()->isIntegralOrEnumerationType()) {
- if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(E))
+ LV.moveInto(Result);
+ } else if (E->getType()->isVectorType()) {
+ if (!EvaluateVector(E, Result, Info))
return false;
- if (Info.EvalResult.Val.isLValue() &&
- !IsGlobalLValue(Info.EvalResult.Val.getLValueBase()))
+ } else if (E->getType()->isIntegralOrEnumerationType()) {
+ if (!IntExprEvaluator(Info, Result).Visit(E))
return false;
} else if (E->getType()->hasPointerRepresentation()) {
LValue LV;
if (!EvaluatePointer(E, LV, Info))
return false;
- if (!IsGlobalLValue(LV.Base))
- return false;
- LV.moveInto(Info.EvalResult.Val);
+ LV.moveInto(Result);
} else if (E->getType()->isRealFloatingType()) {
llvm::APFloat F(0.0);
if (!EvaluateFloat(E, F, Info))
return false;
-
- Info.EvalResult.Val = APValue(F);
+ Result = APValue(F);
} else if (E->getType()->isAnyComplexType()) {
ComplexValue C;
if (!EvaluateComplex(E, C, Info))
return false;
- C.moveInto(Info.EvalResult.Val);
- } else
+ C.moveInto(Result);
+ } else if (E->getType()->isMemberPointerType()) {
+ MemberPtr P;
+ if (!EvaluateMemberPointer(E, P, Info))
+ return false;
+ P.moveInto(Result);
+ return true;
+ } else if (E->getType()->isArrayType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateArray(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateRecord(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isVoidType()) {
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ if (!EvaluateVoid(E, Info))
+ return false;
+ } else if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType();
return false;
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
return true;
}
-/// Evaluate - Return true if this is a constant which we can fold using
+/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
+/// cases, the in-place evaluation is essential, since later initializers for
+/// an object can indirectly refer to subobjects which were initialized earlier.
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
+ const Expr *E, CheckConstantExpressionKind CCEK,
+ bool AllowNonLiteralTypes) {
+ if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E))
+ return false;
+
+ if (E->isRValue()) {
+ // Evaluate arrays and record types in-place, so that later initializers can
+ // refer to earlier-initialized members of the object.
+ if (E->getType()->isArrayType())
+ return EvaluateArray(E, This, Result, Info);
+ else if (E->getType()->isRecordType())
+ return EvaluateRecord(E, This, Result, Info);
+ }
+
+ // For any other type, in-place evaluation is unimportant.
+ return Evaluate(Result, Info, E);
+}
+
+/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
+/// lvalue-to-rvalue cast if it is an lvalue.
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ if (!CheckLiteralType(Info, E))
+ return false;
+
+ if (!::Evaluate(Result, Info, E))
+ return false;
+
+ if (E->isGLValue()) {
+ LValue LV;
+ LV.setFrom(Info.Ctx, Result);
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
+ return false;
+ }
+
+ // Check this core constant expression is a constant expression.
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+}
+
+/// EvaluateAsRValue - Return true if this is a constant which we can fold using
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
-/// in Result.
-bool Expr::Evaluate(EvalResult &Result, const ASTContext &Ctx) const {
+/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
+/// will be applied to the result.
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
+ // Fast-path evaluations of integer literals, since we sometimes see files
+ // containing vast quantities of these.
+ if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(this)) {
+ Result.Val = APValue(APSInt(L->getValue(),
+ L->getType()->isUnsignedIntegerType()));
+ return true;
+ }
+
+ // FIXME: Evaluating values of large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
+
EvalInfo Info(Ctx, Result);
- return ::Evaluate(Info, this);
+ return ::EvaluateAsRValue(Info, this, Result.Val);
}
bool Expr::EvaluateAsBooleanCondition(bool &Result,
const ASTContext &Ctx) const {
EvalResult Scratch;
- EvalInfo Info(Ctx, Scratch);
-
- return HandleConversionToBool(this, Result, Info);
+ return EvaluateAsRValue(Scratch, Ctx) &&
+ HandleConversionToBool(Scratch.Val, Result);
}
-bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx) const {
- EvalResult Scratch;
- EvalInfo Info(Ctx, Scratch);
+bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects) const {
+ if (!getType()->isIntegralOrEnumerationType())
+ return false;
+
+ EvalResult ExprResult;
+ if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
+ (!AllowSideEffects && ExprResult.HasSideEffects))
+ return false;
- return EvaluateInteger(this, Result, Info) && !Scratch.HasSideEffects;
+ Result = ExprResult.Val.getInt();
+ return true;
}
bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
EvalInfo Info(Ctx, Result);
LValue LV;
- if (EvaluateLValue(this, LV, Info) &&
- !Result.HasSideEffects &&
- IsGlobalLValue(LV.Base)) {
- LV.moveInto(Result.Val);
- return true;
- }
- return false;
+ if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
+ !CheckLValueConstantExpression(Info, getExprLoc(),
+ Ctx.getLValueReferenceType(getType()), LV))
+ return false;
+
+ LV.moveInto(Result.Val);
+ return true;
}
-bool Expr::EvaluateAsAnyLValue(EvalResult &Result,
- const ASTContext &Ctx) const {
- EvalInfo Info(Ctx, Result);
+bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
+ const VarDecl *VD,
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ // FIXME: Evaluating initializers for large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
- LValue LV;
- if (EvaluateLValue(this, LV, Info)) {
- LV.moveInto(Result.Val);
- return true;
+ Expr::EvalStatus EStatus;
+ EStatus.Diag = &Notes;
+
+ EvalInfo InitInfo(Ctx, EStatus);
+ InitInfo.setEvaluatingDecl(VD, Value);
+
+ LValue LVal;
+ LVal.set(VD);
+
+ // C++11 [basic.start.init]p2:
+ // Variables with static storage duration or thread storage duration shall be
+ // zero-initialized before any other initialization takes place.
+ // This behavior is not present in C.
+ if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType()) {
+ ImplicitValueInitExpr VIE(VD->getType());
+ if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true))
+ return false;
}
- return false;
+
+ if (!EvaluateInPlace(Value, InitInfo, LVal, this, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true) ||
+ EStatus.HasSideEffects)
+ return false;
+
+ return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(),
+ Value);
}
-/// isEvaluatable - Call Evaluate to see if this expression can be constant
-/// folded, but discard the result.
+/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+/// constant folded, but discard the result.
bool Expr::isEvaluatable(const ASTContext &Ctx) const {
EvalResult Result;
- return Evaluate(Result, Ctx) && !Result.HasSideEffects;
+ return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
}
bool Expr::HasSideEffects(const ASTContext &Ctx) const {
- Expr::EvalResult Result;
- EvalInfo Info(Ctx, Result);
- return HasSideEffect(Info).Visit(this);
+ return HasSideEffect(Ctx).Visit(this);
}
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
EvalResult EvalResult;
- bool Result = Evaluate(EvalResult, Ctx);
+ bool Result = EvaluateAsRValue(EvalResult, Ctx);
(void)Result;
assert(Result && "Could not evaluate expression");
assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
@@ -2746,7 +6391,7 @@ APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
// value, it calls into Evalute.
//
// Meanings of Val:
-// 0: This expression is an ICE if it can be evaluated by Evaluate.
+// 0: This expression is an ICE.
// 1: This expression is not an ICE, but if it isn't evaluated, it's
// a legal subexpression for an ICE. This return value is used to handle
// the comma operator in C99 mode.
@@ -2769,7 +6414,7 @@ static ICEDiag NoDiag() { return ICEDiag(); }
static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
Expr::EvalResult EVResult;
- if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
+ if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
!EVResult.Val.isInt()) {
return ICEDiag(2, E->getLocStart());
}
@@ -2808,6 +6453,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXTypeidExprClass:
case Expr::CXXUuidofExprClass:
case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::UserDefinedLiteralClass:
case Expr::CXXThisExprClass:
case Expr::CXXThrowExprClass:
case Expr::CXXNewExprClass:
@@ -2823,16 +6469,19 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXDependentScopeMemberExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCEncodeExprClass:
case Expr::ObjCMessageExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCIvarRefExprClass:
case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::BlockExprClass:
- case Expr::BlockDeclRefExprClass:
case Expr::NoStmtClass:
case Expr::OpaqueValueExprClass:
case Expr::PackExpansionExprClass:
@@ -2840,18 +6489,10 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::AsTypeExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::MaterializeTemporaryExprClass:
+ case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
- return ICEDiag(2, E->getLocStart());
-
case Expr::InitListExprClass:
- if (Ctx.getLangOptions().CPlusPlus0x) {
- const InitListExpr *ILE = cast<InitListExpr>(E);
- if (ILE->getNumInits() == 0)
- return NoDiag();
- if (ILE->getNumInits() == 1)
- return CheckICE(ILE->getInit(0), Ctx);
- // Fall through for more than 1 expression.
- }
+ case Expr::LambdaExprClass:
return ICEDiag(2, E->getLocStart());
case Expr::SizeOfPackExprClass:
@@ -2869,28 +6510,32 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
case Expr::IntegerLiteralClass:
case Expr::CharacterLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
case Expr::CXXBoolLiteralExprClass:
case Expr::CXXScalarValueInitExprClass:
case Expr::UnaryTypeTraitExprClass:
case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::CXXNoexceptExprClass:
return NoDiag();
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass: {
+ // C99 6.6/3 allows function calls within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an operand of (pointer to) function type.
const CallExpr *CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall(Ctx))
+ if (CE->isBuiltinCall())
return CheckEvalInICE(E, Ctx);
return ICEDiag(2, E->getLocStart());
}
- case Expr::DeclRefExprClass:
+ case Expr::DeclRefExprClass: {
if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
return NoDiag();
- if (Ctx.getLangOptions().CPlusPlus &&
- E->getType().getCVRQualifiers() == Qualifiers::Const) {
- const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
-
+ const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (Ctx.getLangOpts().CPlusPlus &&
+ D && IsConstNonVolatile(D->getType())) {
// Parameter variables are never constants. Without this check,
// getAnyInitializer() can find a default argument, which leads
// to chaos.
@@ -2901,40 +6546,20 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
// A variable of non-volatile const-qualified integral or enumeration
// type initialized by an ICE can be used in ICEs.
if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
- Qualifiers Quals = Ctx.getCanonicalType(Dcl->getType()).getQualifiers();
- if (Quals.hasVolatile() || !Quals.hasConst())
+ if (!Dcl->getType()->isIntegralOrEnumerationType())
return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
-
- // Look for a declaration of this variable that has an initializer.
- const VarDecl *ID = 0;
- const Expr *Init = Dcl->getAnyInitializer(ID);
- if (Init) {
- if (ID->isInitKnownICE()) {
- // We have already checked whether this subexpression is an
- // integral constant expression.
- if (ID->isInitICE())
- return NoDiag();
- else
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
- }
-
- // It's an ICE whether or not the definition we found is
- // out-of-line. See DR 721 and the discussion in Clang PR
- // 6206 for details.
-
- if (Dcl->isCheckingICE()) {
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
- }
- Dcl->setCheckingICE();
- ICEDiag Result = CheckICE(Init, Ctx);
- // Cache the result of the ICE test.
- Dcl->setInitKnownICE(Result.Val == 0);
- return Result;
- }
+ const VarDecl *VD;
+ // Look for a declaration of this variable that has an initializer, and
+ // check whether it is an ICE.
+ if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
+ return NoDiag();
+ else
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
}
}
return ICEDiag(2, E->getLocStart());
+ }
case Expr::UnaryOperatorClass: {
const UnaryOperator *Exp = cast<UnaryOperator>(E);
switch (Exp->getOpcode()) {
@@ -2944,6 +6569,9 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case UO_PreDec:
case UO_AddrOf:
case UO_Deref:
+ // C99 6.6/3 allows increment and decrement within unevaluated
+ // subexpressions of constant expressions, but they can never be ICEs
+ // because an ICE cannot contain an lvalue operand.
return ICEDiag(2, E->getLocStart());
case UO_Extension:
case UO_LNot:
@@ -2959,8 +6587,8 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
}
case Expr::OffsetOfExprClass: {
// Note that per C99, offsetof must be an ICE. And AFAIK, using
- // Evaluate matches the proposed gcc behavior for cases like
- // "offsetof(struct s{int x[4];}, x[!.0])". This doesn't affect
+ // EvaluateAsRValue matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
// compliance: we should warn earlier for offsetof expressions with
// array subscripts that aren't ICEs, and if the array subscripts
// are ICEs, the value of the offsetof must be an integer constant.
@@ -2989,6 +6617,9 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case BO_AndAssign:
case BO_XorAssign:
case BO_OrAssign:
+ // C99 6.6/3 allows assignments within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an lvalue operand.
return ICEDiag(2, E->getLocStart());
case BO_Mul:
@@ -3012,7 +6643,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
if (Exp->getOpcode() == BO_Div ||
Exp->getOpcode() == BO_Rem) {
- // Evaluate gives an error for undefined Div/Rem, so make sure
+ // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
// we don't evaluate one.
if (LHSResult.Val == 0 && RHSResult.Val == 0) {
llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
@@ -3026,7 +6657,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
}
}
if (Exp->getOpcode() == BO_Comma) {
- if (Ctx.getLangOptions().C99) {
+ if (Ctx.getLangOpts().C99) {
// C99 6.6p3 introduces a strange edge case: comma can be in an ICE
// if it isn't evaluated.
if (LHSResult.Val == 0 && RHSResult.Val == 0)
@@ -3043,21 +6674,6 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case BO_LAnd:
case BO_LOr: {
ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
-
- // C++0x [expr.const]p2:
- // [...] subexpressions of logical AND (5.14), logical OR
- // (5.15), and condi- tional (5.16) operations that are not
- // evaluated are not considered.
- if (Ctx.getLangOptions().CPlusPlus0x && LHSResult.Val == 0) {
- if (Exp->getOpcode() == BO_LAnd &&
- Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0)
- return LHSResult;
-
- if (Exp->getOpcode() == BO_LOr &&
- Exp->getLHS()->EvaluateKnownConstInt(Ctx) != 0)
- return LHSResult;
- }
-
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
if (LHSResult.Val == 0 && RHSResult.Val == 1) {
// Rare case where the RHS has a comma "side-effect"; we need
@@ -3080,18 +6696,35 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXFunctionalCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXReinterpretCastExprClass:
- case Expr::CXXConstCastExprClass:
+ case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (isa<ExplicitCastExpr>(E)) {
+ if (const FloatingLiteral *FL
+ = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
+ unsigned DestWidth = Ctx.getIntWidth(E->getType());
+ bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ APSInt IgnoredVal(DestWidth, !DestSigned);
+ bool Ignored;
+ // If the value does not fit in the destination type, the behavior is
+ // undefined, so we are not required to treat it as a constant
+ // expression.
+ if (FL->getValue().convertToInteger(IgnoredVal,
+ llvm::APFloat::rmTowardZero,
+ &Ignored) & APFloat::opInvalidOp)
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ }
switch (cast<CastExpr>(E)->getCastKind()) {
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_IntegralToBoolean:
case CK_IntegralCast:
return CheckICE(SubExpr, Ctx);
default:
- if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
- return NoDiag();
return ICEDiag(2, E->getLocStart());
}
}
@@ -3114,30 +6747,14 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
// extension. See GCC PR38377 for discussion.
if (const CallExpr *CallCE
= dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
- if (CallCE->isBuiltinCall(Ctx) == Builtin::BI__builtin_constant_p) {
- Expr::EvalResult EVResult;
- if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
- !EVResult.Val.isInt()) {
- return ICEDiag(2, E->getLocStart());
- }
- return NoDiag();
- }
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ return CheckEvalInICE(E, Ctx);
ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
if (CondResult.Val == 2)
return CondResult;
- // C++0x [expr.const]p2:
- // subexpressions of [...] conditional (5.16) operations that
- // are not evaluated are not considered
- bool TrueBranch = Ctx.getLangOptions().CPlusPlus0x
- ? Exp->getCond()->EvaluateKnownConstInt(Ctx) != 0
- : false;
- ICEDiag TrueResult = NoDiag();
- if (!Ctx.getLangOptions().CPlusPlus0x || TrueBranch)
- TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
- ICEDiag FalseResult = NoDiag();
- if (!Ctx.getLangOptions().CPlusPlus0x || !TrueBranch)
- FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
if (TrueResult.Val == 2)
return TrueResult;
@@ -3162,22 +6779,116 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
}
}
- // Silence a GCC warning
- return ICEDiag(2, E->getLocStart());
+ llvm_unreachable("Invalid StmtClass!");
}
-bool Expr::isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
- SourceLocation *Loc, bool isEvaluated) const {
+/// Evaluate an expression as a C++11 integral constant expression.
+static bool EvaluateCPlusPlus11IntegralConstantExpr(ASTContext &Ctx,
+ const Expr *E,
+ llvm::APSInt *Value,
+ SourceLocation *Loc) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
+ APValue Result;
+ if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
+ return false;
+
+ assert(Result.isInt() && "pointer cast to int is not an ICE");
+ if (Value) *Value = Result.getInt();
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, 0, Loc);
+
ICEDiag d = CheckICE(this, Ctx);
if (d.Val != 0) {
if (Loc) *Loc = d.Loc;
return false;
}
- EvalResult EvalResult;
- if (!Evaluate(EvalResult, Ctx))
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
+
+ if (!isIntegerConstantExpr(Ctx, Loc))
+ return false;
+ if (!EvaluateAsInt(Value, Ctx))
llvm_unreachable("ICE cannot be evaluated!");
- assert(!EvalResult.HasSideEffects && "ICE with side effects!");
- assert(EvalResult.Val.isInt() && "ICE that isn't integer!");
- Result = EvalResult.Val.getInt();
return true;
}
+
+bool Expr::isCXX98IntegralConstantExpr(ASTContext &Ctx) const {
+ return CheckICE(this, Ctx).Val == 0;
+}
+
+bool Expr::isCXX11ConstantExpr(ASTContext &Ctx, APValue *Result,
+ SourceLocation *Loc) const {
+ // We support this checking in C++98 mode in order to diagnose compatibility
+ // issues.
+ assert(Ctx.getLangOpts().CPlusPlus);
+
+ // Build evaluation settings.
+ Expr::EvalStatus Status;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diags;
+ Status.Diag = &Diags;
+ EvalInfo Info(Ctx, Status);
+
+ APValue Scratch;
+ bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch);
+
+ if (!Diags.empty()) {
+ IsConstExpr = false;
+ if (Loc) *Loc = Diags[0].first;
+ } else if (!IsConstExpr) {
+ // FIXME: This shouldn't happen.
+ if (Loc) *Loc = getExprLoc();
+ }
+
+ return IsConstExpr;
+}
+
+bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
+ llvm::SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ // FIXME: It would be useful to check constexpr function templates, but at the
+ // moment the constant expression evaluator cannot cope with the non-rigorous
+ // ASTs which we build for dependent expressions.
+ if (FD->isDependentContext())
+ return true;
+
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status);
+ Info.CheckingPotentialConstantExpression = true;
+
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : 0;
+
+ // FIXME: Fabricate an arbitrary expression on the stack and pretend that it
+ // is a temporary being used as the 'this' pointer.
+ LValue This;
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ This.set(&VIE, Info.CurrentCall->Index);
+
+ ArrayRef<const Expr*> Args;
+
+ SourceLocation Loc = FD->getLocation();
+
+ APValue Scratch;
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
+ else
+ HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : 0,
+ Args, FD->getBody(), Info, Scratch);
+
+ return Diags.empty();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
index acedf70..d7b6354 100644
--- a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -40,14 +40,38 @@ using namespace clang;
namespace {
+/// \brief Retrieve the declaration context that should be used when mangling
+/// the given declaration.
+static const DeclContext *getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ return D->getDeclContext();
+}
+
+static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+}
+
static const CXXRecordDecl *GetLocalClassDecl(const NamedDecl *ND) {
const DeclContext *DC = dyn_cast<DeclContext>(ND);
if (!DC)
- DC = ND->getDeclContext();
+ DC = getEffectiveDeclContext(ND);
while (!DC->isNamespace() && !DC->isTranslationUnit()) {
- if (isa<FunctionDecl>(DC->getParent()))
+ const DeclContext *Parent = getEffectiveDeclContext(cast<Decl>(DC));
+ if (isa<FunctionDecl>(Parent))
return dyn_cast<CXXRecordDecl>(DC);
- DC = DC->getParent();
+ DC = Parent;
}
return 0;
}
@@ -63,7 +87,7 @@ static const NamedDecl *getStructor(const NamedDecl *decl) {
const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
return (fn ? getStructor(fn) : decl);
}
-
+
static const unsigned UnknownArity = ~0U;
class ItaniumMangleContext : public MangleContext {
@@ -122,6 +146,13 @@ public:
}
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types with external linkage (indicated by a
+ // non-zero lambda mangling number) have their own numbering scheme, so
+ // they do not need a discriminator.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND))
+ if (RD->isLambda() && RD->getLambdaManglingNumber() > 0)
+ return false;
+
unsigned &discriminator = Uniquifier[ND];
if (!discriminator)
discriminator = ++Discriminator;
@@ -272,6 +303,7 @@ private:
void mangleUnscopedTemplateName(TemplateName);
void mangleSourceName(const IdentifierInfo *II);
void mangleLocalName(const NamedDecl *ND);
+ void mangleLambda(const CXXRecordDecl *Lambda);
void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
bool NoFunction=false);
void mangleNestedName(const TemplateDecl *TD,
@@ -332,8 +364,8 @@ private:
static bool isInCLinkageSpecification(const Decl *D) {
D = D->getCanonicalDecl();
- for (const DeclContext *DC = D->getDeclContext();
- !DC->isTranslationUnit(); DC = DC->getParent()) {
+ for (const DeclContext *DC = getEffectiveDeclContext(D);
+ !DC->isTranslationUnit(); DC = getEffectiveParentContext(DC)) {
if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
}
@@ -343,7 +375,7 @@ static bool isInCLinkageSpecification(const Decl *D) {
bool ItaniumMangleContext::shouldMangleDeclName(const NamedDecl *D) {
// In C, functions with no attributes never need to be mangled. Fastpath them.
- if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
return false;
// Any decl can be declared with __asm("foo") on it, and this takes precedence
@@ -360,22 +392,22 @@ bool ItaniumMangleContext::shouldMangleDeclName(const NamedDecl *D) {
return true;
// Otherwise, no mangling is done outside C++ mode.
- if (!getASTContext().getLangOptions().CPlusPlus)
+ if (!getASTContext().getLangOpts().CPlusPlus)
return false;
// Variables at global scope with non-internal linkage are not mangled
if (!FD) {
- const DeclContext *DC = D->getDeclContext();
+ const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
+ DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
return false;
}
// Class members are always mangled.
- if (D->getDeclContext()->isRecord())
+ if (getEffectiveDeclContext(D)->isRecord())
return true;
// C functions and "main" are not mangled.
@@ -458,7 +490,7 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
while (isa<LinkageSpecDecl>(DC)) {
- DC = DC->getParent();
+ DC = getEffectiveParentContext(DC);
}
return DC;
@@ -466,7 +498,8 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
/// isStd - Return whether a given namespace is the 'std' namespace.
static bool isStd(const NamespaceDecl *NS) {
- if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit())
+ if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
+ ->isTranslationUnit())
return false;
const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
@@ -502,26 +535,35 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
return 0;
}
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
void CXXNameMangler::mangleName(const NamedDecl *ND) {
// <name> ::= <nested-name>
// ::= <unscoped-name>
// ::= <unscoped-template-name> <template-args>
// ::= <local-name>
//
- const DeclContext *DC = ND->getDeclContext();
+ const DeclContext *DC = getEffectiveDeclContext(ND);
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
- if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ // FIXME: This is a hack; extern variables declared locally should have
+ // a proper semantic declaration context!
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage() && !isLambda(ND))
while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
+ DC = getEffectiveParentContext(DC);
else if (GetLocalClassDecl(ND)) {
mangleLocalName(ND);
return;
}
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
+ DC = IgnoreLinkageSpecDecls(DC);
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
@@ -547,7 +589,7 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
void CXXNameMangler::mangleName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
- const DeclContext *DC = IgnoreLinkageSpecDecls(TD->getDeclContext());
+ const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
mangleUnscopedTemplateName(TD);
@@ -561,7 +603,8 @@ void CXXNameMangler::mangleName(const TemplateDecl *TD,
void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
- if (isStdNamespace(ND->getDeclContext()))
+
+ if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
Out << "St";
mangleUnqualifiedName(ND);
@@ -610,17 +653,41 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
// representation (IEEE on Itanium), high-order bytes first,
// without leading zeroes. For example: "Lf bf800000 E" is -1.0f
// on Itanium.
- // APInt::toString uses uppercase hexadecimal, and it's not really
- // worth embellishing that interface for this use case, so we just
- // do a second pass to lowercase things.
- typedef llvm::SmallString<20> buffer_t;
- buffer_t buffer;
- f.bitcastToAPInt().toString(buffer, 16, false);
+ // The 'without leading zeroes' thing seems to be an editorial
+ // mistake; see the discussion on cxx-abi-dev beginning on
+ // 2012-01-16.
+
+ // Our requirements here are just barely wierd enough to justify
+ // using a custom algorithm instead of post-processing APInt::toString().
+
+ llvm::APInt valueBits = f.bitcastToAPInt();
+ unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4;
+ assert(numCharacters != 0);
- for (buffer_t::iterator i = buffer.begin(), e = buffer.end(); i != e; ++i)
- if (isupper(*i)) *i = tolower(*i);
+ // Allocate a buffer of the right number of characters.
+ llvm::SmallVector<char, 20> buffer;
+ buffer.set_size(numCharacters);
- Out.write(buffer.data(), buffer.size());
+ // Fill the buffer left-to-right.
+ for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
+ // The bit-index of the next hex digit.
+ unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
+
+ // Project out 4 bits starting at 'digitIndex'.
+ llvm::integerPart hexDigit
+ = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
+ hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
+ hexDigit &= 0xF;
+
+ // Map that over to a lowercase hex digit.
+ static const char charForHex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ buffer[stringIndex] = charForHex[hexDigit];
+ }
+
+ Out.write(buffer.data(), numCharacters);
}
void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
@@ -997,7 +1064,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// This naming convention is the same as that followed by GCC,
// though it shouldn't actually matter.
if (ND && ND->getLinkage() == InternalLinkage &&
- ND->getDeclContext()->isFileContext())
+ getEffectiveDeclContext(ND)->isFileContext())
Out << 'L';
mangleSourceName(II);
@@ -1052,13 +1119,24 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ // <unnamed-type-name> ::= <closure-type-name>
+ //
+ // <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
+ // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ mangleLambda(Record);
+ break;
+ }
+ }
+
// Get a unique id for the anonymous struct.
uint64_t AnonStructId = Context.getAnonymousStructId(TD);
// Mangle it as a source name in the form
// [n] $_<id>
// where n is the length of the string.
- llvm::SmallString<8> Str;
+ SmallString<8> Str;
Str += "$_";
Str += llvm::utostr(AnonStructId);
@@ -1180,8 +1258,10 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
// := Z <function encoding> E s [<discriminator>]
+ // <local-name> := Z <function encoding> E d [ <parameter number> ]
+ // _ <entity name>
// <discriminator> := _ <non-negative number>
- const DeclContext *DC = ND->getDeclContext();
+ const DeclContext *DC = getEffectiveDeclContext(ND);
if (isa<ObjCMethodDecl>(DC) && isa<FunctionDecl>(ND)) {
// Don't add objc method name mangling to locally declared function
mangleUnqualifiedName(ND);
@@ -1193,23 +1273,46 @@ void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(MD);
} else if (const CXXRecordDecl *RD = GetLocalClassDecl(ND)) {
- mangleFunctionEncoding(cast<FunctionDecl>(RD->getDeclContext()));
+ mangleFunctionEncoding(cast<FunctionDecl>(getEffectiveDeclContext(RD)));
Out << 'E';
+ // The parameter number is omitted for the last parameter, 0 for the
+ // second-to-last parameter, 1 for the third-to-last parameter, etc. The
+ // <entity name> will of course contain a <closure-type-name>: Its
+ // numbering will be local to the particular argument in which it appears
+ // -- other default arguments do not affect its encoding.
+ bool SkipDiscriminator = false;
+ if (RD->isLambda()) {
+ if (const ParmVarDecl *Parm
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) {
+ if (const FunctionDecl *Func
+ = dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
+ Out << 'd';
+ unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
+ if (Num > 1)
+ mangleNumber(Num - 2);
+ Out << '_';
+ SkipDiscriminator = true;
+ }
+ }
+ }
+
// Mangle the name relative to the closest enclosing function.
if (ND == RD) // equality ok because RD derived from ND above
mangleUnqualifiedName(ND);
else
mangleNestedName(ND, DC, true /*NoFunction*/);
- unsigned disc;
- if (Context.getNextDiscriminator(RD, disc)) {
- if (disc < 10)
- Out << '_' << disc;
- else
- Out << "__" << disc << '_';
+ if (!SkipDiscriminator) {
+ unsigned disc;
+ if (Context.getNextDiscriminator(RD, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
}
-
+
return;
}
else
@@ -1219,6 +1322,48 @@ void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
mangleUnqualifiedName(ND);
}
+void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
+ // If the context of a closure type is an initializer for a class member
+ // (static or nonstatic), it is encoded in a qualified name with a final
+ // <prefix> of the form:
+ //
+ // <data-member-prefix> := <member source-name> M
+ //
+ // Technically, the data-member-prefix is part of the <prefix>. However,
+ // since a closure type will always be mangled with a prefix, it's easier
+ // to emit that last part of the prefix here.
+ if (Decl *Context = Lambda->getLambdaContextDecl()) {
+ if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ Context->getDeclContext()->isRecord()) {
+ if (const IdentifierInfo *Name
+ = cast<NamedDecl>(Context)->getIdentifier()) {
+ mangleSourceName(Name);
+ Out << 'M';
+ }
+ }
+ }
+
+ Out << "Ul";
+ DeclarationName Name
+ = getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ const FunctionProtoType *Proto
+ = cast<CXXMethodDecl>(*Lambda->lookup(Name).first)->getType()->
+ getAs<FunctionProtoType>();
+ mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
+ Out << "E";
+
+ // The number is omitted for the first closure type with a given
+ // <lambda-sig> in a given context; it is n-2 for the nth closure type
+ // (in lexical order) with that same <lambda-sig> and context.
+ //
+ // The AST keeps track of the number for us.
+ unsigned Number = Lambda->getLambdaManglingNumber();
+ assert(Number > 0 && "Lambda should be mangled as an unnamed class");
+ if (Number > 1)
+ mangleNumber(Number - 2);
+ Out << '_';
+}
+
void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
switch (qualifier->getKind()) {
case NestedNameSpecifier::Global:
@@ -1258,15 +1403,14 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// ::= # empty
// ::= <substitution>
- while (isa<LinkageSpecDecl>(DC))
- DC = DC->getParent();
+ DC = IgnoreLinkageSpecDecls(DC);
if (DC->isTranslationUnit())
return;
if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
- manglePrefix(DC->getParent(), NoFunction);
- llvm::SmallString<64> Name;
+ manglePrefix(getEffectiveParentContext(DC), NoFunction);
+ SmallString<64> Name;
llvm::raw_svector_ostream NameStream(Name);
Context.mangleBlock(Block, NameStream);
NameStream.flush();
@@ -1274,26 +1418,27 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
return;
}
- if (mangleSubstitution(cast<NamedDecl>(DC)))
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (mangleSubstitution(ND))
return;
-
+
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = 0;
- if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
}
- else if(NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ else if(NoFunction && (isa<FunctionDecl>(ND) || isa<ObjCMethodDecl>(ND)))
return;
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
mangleObjCMethodName(Method);
else {
- manglePrefix(DC->getParent(), NoFunction);
- mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND);
}
- addSubstitution(cast<NamedDecl>(DC));
+ addSubstitution(ND);
}
void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
@@ -1336,7 +1481,7 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
return;
}
- manglePrefix(ND->getDeclContext());
+ manglePrefix(getEffectiveDeclContext(ND));
mangleUnqualifiedName(ND->getTemplatedDecl());
addSubstitution(ND);
}
@@ -1370,7 +1515,6 @@ void CXXNameMangler::mangleType(TemplateName TN) {
case TemplateName::OverloadedTemplate:
llvm_unreachable("can't mangle an overloaded template name as a <type>");
- break;
case TemplateName::DependentTemplate: {
const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
@@ -1531,7 +1675,7 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
//
// where <address-space-number> is a source name consisting of 'AS'
// followed by the address space <number>.
- llvm::SmallString<64> ASString;
+ SmallString<64> ASString;
ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
Out << 'U' << ASString.size() << ASString;
}
@@ -1632,8 +1776,8 @@ void CXXNameMangler::mangleType(QualType T) {
} while (true);
}
SplitQualType split = T.split();
- Qualifiers quals = split.second;
- const Type *ty = split.first;
+ Qualifiers quals = split.Quals;
+ const Type *ty = split.Ty;
bool isSubstitutable = quals || !isa<BuiltinType>(T);
if (isSubstitutable && mangleSubstitution(T))
@@ -1735,12 +1879,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::LongDouble: Out << 'e'; break;
case BuiltinType::NullPtr: Out << "Dn"; break;
- case BuiltinType::Overload:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
llvm_unreachable("mangling a placeholder type");
- break;
case BuiltinType::ObjCId: Out << "11objc_object"; break;
case BuiltinType::ObjCClass: Out << "10objc_class"; break;
case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
@@ -2217,15 +2361,12 @@ recurse:
// These all can only appear in local or variable-initialization
// contexts and so should never appear in a mangling.
case Expr::AddrLabelExprClass:
- case Expr::BlockDeclRefExprClass:
case Expr::CXXThisExprClass:
case Expr::DesignatedInitExprClass:
case Expr::ImplicitValueInitExprClass:
- case Expr::InitListExprClass:
case Expr::ParenListExprClass:
- case Expr::CXXScalarValueInitExprClass:
+ case Expr::LambdaExprClass:
llvm_unreachable("unexpected statement kind");
- break;
// FIXME: invent manglings for all these.
case Expr::BlockExprClass:
@@ -2242,6 +2383,10 @@ recurse:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::OffsetOfExprClass:
case Expr::PredefinedExprClass:
@@ -2249,6 +2394,7 @@ recurse:
case Expr::StmtExprClass:
case Expr::UnaryTypeTraitExprClass:
case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
@@ -2256,6 +2402,7 @@ recurse:
case Expr::CXXNoexceptExprClass:
case Expr::CUDAKernelCallExprClass:
case Expr::AsTypeExprClass:
+ case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
{
// As bad as this diagnostic is, it's better than crashing.
@@ -2282,6 +2429,16 @@ recurse:
case Expr::OpaqueValueExprClass:
llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
+ case Expr::InitListExprClass: {
+ // Proposal by Jason Merrill, 2012-01-03
+ Out << "il";
+ const InitListExpr *InitList = cast<InitListExpr>(E);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ Out << "E";
+ break;
+ }
+
case Expr::CXXDefaultArgExprClass:
mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
break;
@@ -2291,6 +2448,9 @@ recurse:
Arity);
break;
+ case Expr::UserDefinedLiteralClass:
+ // We follow g++'s approach of mangling a UDL as a call to the literal
+ // operator.
case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
@@ -2318,7 +2478,6 @@ recurse:
}
case Expr::CXXNewExprClass: {
- // Proposal from David Vandervoorde, 2010.06.30
const CXXNewExpr *New = cast<CXXNewExpr>(E);
if (New->isGlobalNew()) Out << "gs";
Out << (New->isArray() ? "na" : "nw");
@@ -2328,10 +2487,29 @@ recurse:
Out << '_';
mangleType(New->getAllocatedType());
if (New->hasInitializer()) {
- Out << "pi";
- for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
- E = New->constructor_arg_end(); I != E; ++I)
- mangleExpression(*I);
+ // Proposal by Jason Merrill, 2012-01-03
+ if (New->getInitializationStyle() == CXXNewExpr::ListInit)
+ Out << "il";
+ else
+ Out << "pi";
+ const Expr *Init = New->getInitializer();
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ // Directly inline the initializers.
+ for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
+ I != E; ++I)
+ mangleExpression(*I);
+ } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
+ for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
+ mangleExpression(PLE->getExpr(i));
+ } else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
+ isa<InitListExpr>(Init)) {
+ // Only take InitListExprs apart for list-initialization.
+ const InitListExpr *InitList = cast<InitListExpr>(Init);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ } else
+ mangleExpression(Init);
}
Out << 'E';
break;
@@ -2395,7 +2573,11 @@ recurse:
const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
unsigned N = CE->getNumArgs();
- Out << "cv";
+ // Proposal by Jason Merrill, 2012-01-03
+ if (CE->isListInitialization())
+ Out << "tl";
+ else
+ Out << "cv";
mangleType(CE->getType());
if (N != 1) Out << '_';
for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
@@ -2403,6 +2585,12 @@ recurse:
break;
}
+ case Expr::CXXScalarValueInitExprClass:
+ Out <<"cv";
+ mangleType(E->getType());
+ Out <<"_E";
+ break;
+
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
@@ -2642,6 +2830,13 @@ recurse:
Out << 'E';
break;
+ // FIXME. __objc_yes/__objc_no are mangled same as true/false
+ case Expr::ObjCBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
case Expr::CXXBoolLiteralExprClass:
Out << "Lb";
Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
@@ -2897,23 +3092,48 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
Out << "Dp";
mangleType(A.getAsTemplateOrTemplatePattern());
break;
- case TemplateArgument::Expression:
+ case TemplateArgument::Expression: {
+ // It's possible to end up with a DeclRefExpr here in certain
+ // dependent cases, in which case we should mangle as a
+ // declaration.
+ const Expr *E = A.getAsExpr()->IgnoreParens();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *D = DRE->getDecl();
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
+ Out << "L";
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+ }
+ }
+
Out << 'X';
- mangleExpression(A.getAsExpr());
+ mangleExpression(E);
Out << 'E';
break;
+ }
case TemplateArgument::Integral:
mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
break;
case TemplateArgument::Declaration: {
assert(P && "Missing template parameter for declaration argument");
// <expr-primary> ::= L <mangled-name> E # external name
-
+ // <expr-primary> ::= L <type> 0 E
// Clang produces AST's where pointer-to-member-function expressions
// and pointer-to-function expressions are represented as a declaration not
// an expression. We compensate for it here to produce the correct mangling.
- NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+
+ // Handle NULL pointer arguments.
+ if (!A.getAsDecl()) {
+ Out << "L";
+ mangleType(Parameter->getType());
+ Out << "0E";
+ break;
+ }
+
+
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
bool compensateMangling = !Parameter->getType()->isReferenceType();
if (compensateMangling) {
Out << 'X';
@@ -2982,8 +3202,15 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
+/// \brief Determine whether the given type has any qualifiers that are
+/// relevant for substitutions.
+static bool hasMangledSubstitutionQualifiers(QualType T) {
+ Qualifiers Qs = T.getQualifiers();
+ return Qs.getCVRQualifiers() || Qs.hasAddressSpace();
+}
+
bool CXXNameMangler::mangleSubstitution(QualType T) {
- if (!T.getCVRQualifiers()) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>())
return mangleSubstitution(RT->getDecl());
}
@@ -3059,7 +3286,7 @@ static bool isCharSpecialization(QualType T, const char *Name) {
if (!SD)
return false;
- if (!isStdNamespace(SD->getDeclContext()))
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
return false;
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
@@ -3101,7 +3328,7 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
}
if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
- if (!isStdNamespace(TD->getDeclContext()))
+ if (!isStdNamespace(getEffectiveDeclContext(TD)))
return false;
// <substitution> ::= Sa # ::std::allocator
@@ -3119,7 +3346,7 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
if (const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
- if (!isStdNamespace(SD->getDeclContext()))
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
return false;
// <substitution> ::= Ss # ::std::basic_string<char,
@@ -3169,7 +3396,7 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
}
void CXXNameMangler::addSubstitution(QualType T) {
- if (!T.getCVRQualifiers()) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>()) {
addSubstitution(RT->getDecl());
return;
diff --git a/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
new file mode 100644
index 0000000..f5272a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
@@ -0,0 +1,30 @@
+//===--- LambdaMangleContext.cpp - Context for mangling lambdas -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LambdaMangleContext class, which keeps track of
+// the Itanium C++ ABI mangling numbers for lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/LambdaMangleContext.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+
+unsigned LambdaMangleContext::getManglingNumber(CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ ASTContext &Context = CallOperator->getASTContext();
+
+ QualType Key = Context.getFunctionType(Context.VoidTy,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ FunctionProtoType::ExtProtoInfo());
+ Key = Context.getCanonicalType(Key);
+ return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
index 5cb8f47..73c9f57 100644
--- a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
@@ -59,6 +59,8 @@ static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
}
+void MangleContext::anchor() { }
+
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
raw_ostream &Out) {
Out << "__block_global_" << getBlockId(BD, false);
@@ -68,7 +70,7 @@ void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
CXXCtorType CT, const BlockDecl *BD,
raw_ostream &ResStream) {
checkMangleDC(CD, BD);
- llvm::SmallString<64> Buffer;
+ SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXCtor(CD, CT, Out);
Out.flush();
@@ -79,7 +81,7 @@ void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
CXXDtorType DT, const BlockDecl *BD,
raw_ostream &ResStream) {
checkMangleDC(DD, BD);
- llvm::SmallString<64> Buffer;
+ SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXDtor(DD, DT, Out);
Out.flush();
@@ -91,7 +93,7 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
checkMangleDC(DC, BD);
- llvm::SmallString<64> Buffer;
+ SmallString<64> Buffer;
llvm::raw_svector_ostream Stream(Buffer);
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(Method, Stream);
@@ -114,7 +116,7 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
raw_ostream &Out) {
- llvm::SmallString<64> Name;
+ SmallString<64> Name;
llvm::raw_svector_ostream OS(Name);
const ObjCContainerDecl *CD =
@@ -122,7 +124,7 @@ void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
assert (CD && "Missing container decl in GetNameForMethod");
OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
- OS << '(' << CID << ')';
+ OS << '(' << *CID << ')';
OS << ' ' << MD->getSelector().getAsString() << ']';
Out << OS.str().size() << OS.str();
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
index 1515db4..ba9856a 100644
--- a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -119,7 +119,7 @@ static bool isInCLinkageSpecification(const Decl *D) {
bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
// In C, functions with no attributes never need to be mangled. Fastpath them.
- if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
return false;
// Any decl can be declared with __asm("foo") on it, and this takes precedence
@@ -136,7 +136,7 @@ bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
return true;
// Otherwise, no mangling is done outside C++ mode.
- if (!getASTContext().getLangOptions().CPlusPlus)
+ if (!getASTContext().getLangOpts().CPlusPlus)
return false;
// Variables at global scope with internal linkage are not mangled.
@@ -335,10 +335,12 @@ MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
llvm_unreachable("Can't mangle Objective-C selector names here!");
case DeclarationName::CXXConstructorName:
- llvm_unreachable("Can't mangle constructors yet!");
+ Out << "?0";
+ break;
case DeclarationName::CXXDestructorName:
- llvm_unreachable("Can't mangle destructors yet!");
+ Out << "?1";
+ break;
case DeclarationName::CXXConversionFunctionName:
// <operator-name> ::= ?B # (cast)
@@ -701,12 +703,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::WChar_S:
case BuiltinType::WChar_U: Out << "_W"; break;
- case BuiltinType::Overload:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
- case BuiltinType::UnknownAny:
- case BuiltinType::BoundMember:
- llvm_unreachable(
- "Overloaded and dependent types shouldn't get to name mangling");
+ llvm_unreachable("placeholder types shouldn't get to name mangling");
+
case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
@@ -715,7 +718,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::Char32:
case BuiltinType::Half:
case BuiltinType::NullPtr:
- llvm_unreachable("Don't know how to mangle this type");
+ assert(0 && "Don't know how to mangle this type yet");
}
}
@@ -1167,13 +1170,15 @@ void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
}
void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
CXXCtorType Type,
- raw_ostream &) {
- llvm_unreachable("Can't yet mangle constructors!");
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
}
void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
CXXDtorType Type,
- raw_ostream &) {
- llvm_unreachable("Can't yet mangle destructors!");
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
}
void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *,
raw_ostream &) {
diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
new file mode 100644
index 0000000..f5ea2c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
@@ -0,0 +1,312 @@
+//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+NSAPI::NSAPI(ASTContext &ctx)
+ : Ctx(ctx), ClassIds() {
+}
+
+IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
+ static const char *ClassName[NumClassIds] = {
+ "NSObject",
+ "NSString",
+ "NSArray",
+ "NSMutableArray",
+ "NSDictionary",
+ "NSMutableDictionary",
+ "NSNumber"
+ };
+
+ if (!ClassIds[K])
+ return (ClassIds[K] = &Ctx.Idents.get(ClassName[K]));
+
+ return ClassIds[K];
+}
+
+Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
+ if (NSStringSelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSStr_stringWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
+ break;
+ case NSStr_initWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
+ break;
+ }
+ return (NSStringSelectors[MK] = Sel);
+ }
+
+ return NSStringSelectors[MK];
+}
+
+Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
+ if (NSArraySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSArr_array:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array"));
+ break;
+ case NSArr_arrayWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray"));
+ break;
+ case NSArr_arrayWithObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject"));
+ break;
+ case NSArr_arrayWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
+ break;
+ case NSArr_arrayWithObjectsCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("arrayWithObjects"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSArr_initWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray"));
+ break;
+ case NSArr_initWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects"));
+ break;
+ case NSArr_objectAtIndex:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
+ break;
+ case NSMutableArr_replaceObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSArraySelectors[MK] = Sel);
+ }
+
+ return NSArraySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSArrayMethodKind>
+NSAPI::getNSArrayMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
+ NSArrayMethodKind MK = NSArrayMethodKind(i);
+ if (Sel == getNSArraySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSArrayMethodKind>();
+}
+
+Selector NSAPI::getNSDictionarySelector(
+ NSDictionaryMethodKind MK) const {
+ if (NSDictionarySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSDict_dictionary:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary"));
+ break;
+ case NSDict_dictionaryWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithDictionary"));
+ break;
+ case NSDict_dictionaryWithObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeys: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeysCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(3, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithObjectsAndKeys"));
+ break;
+ case NSDict_initWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithDictionary"));
+ break;
+ case NSDict_initWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithObjectsAndKeys"));
+ break;
+ case NSDict_objectForKey:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
+ break;
+ case NSMutableDict_setObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSDictionarySelectors[MK] = Sel);
+ }
+
+ return NSDictionarySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSDictionaryMethodKind>
+NSAPI::getNSDictionaryMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
+ NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
+ if (Sel == getNSDictionarySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSDictionaryMethodKind>();
+}
+
+Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const {
+ static const char *ClassSelectorName[NumNSNumberLiteralMethods] = {
+ "numberWithChar",
+ "numberWithUnsignedChar",
+ "numberWithShort",
+ "numberWithUnsignedShort",
+ "numberWithInt",
+ "numberWithUnsignedInt",
+ "numberWithLong",
+ "numberWithUnsignedLong",
+ "numberWithLongLong",
+ "numberWithUnsignedLongLong",
+ "numberWithFloat",
+ "numberWithDouble",
+ "numberWithBool",
+ "numberWithInteger",
+ "numberWithUnsignedInteger"
+ };
+ static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = {
+ "initWithChar",
+ "initWithUnsignedChar",
+ "initWithShort",
+ "initWithUnsignedShort",
+ "initWithInt",
+ "initWithUnsignedInt",
+ "initWithLong",
+ "initWithUnsignedLong",
+ "initWithLongLong",
+ "initWithUnsignedLongLong",
+ "initWithFloat",
+ "initWithDouble",
+ "initWithBool",
+ "initWithInteger",
+ "initWithUnsignedInteger"
+ };
+
+ Selector *Sels;
+ const char **Names;
+ if (Instance) {
+ Sels = NSNumberInstanceSelectors;
+ Names = InstanceSelectorName;
+ } else {
+ Sels = NSNumberClassSelectors;
+ Names = ClassSelectorName;
+ }
+
+ if (Sels[MK].isNull())
+ Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK]));
+ return Sels[MK];
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
+ NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
+ if (isNSNumberLiteralSelector(MK, Sel))
+ return MK;
+ }
+
+ return llvm::Optional<NSNumberLiteralMethodKind>();
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberFactoryMethodKind(QualType T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (!BT)
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+
+ switch (BT->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return NSAPI::NSNumberWithChar;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return NSAPI::NSNumberWithUnsignedChar;
+ case BuiltinType::Short:
+ return NSAPI::NSNumberWithShort;
+ case BuiltinType::UShort:
+ return NSAPI::NSNumberWithUnsignedShort;
+ case BuiltinType::Int:
+ return NSAPI::NSNumberWithInt;
+ case BuiltinType::UInt:
+ return NSAPI::NSNumberWithUnsignedInt;
+ case BuiltinType::Long:
+ return NSAPI::NSNumberWithLong;
+ case BuiltinType::ULong:
+ return NSAPI::NSNumberWithUnsignedLong;
+ case BuiltinType::LongLong:
+ return NSAPI::NSNumberWithLongLong;
+ case BuiltinType::ULongLong:
+ return NSAPI::NSNumberWithUnsignedLongLong;
+ case BuiltinType::Float:
+ return NSAPI::NSNumberWithFloat;
+ case BuiltinType::Double:
+ return NSAPI::NSNumberWithDouble;
+ case BuiltinType::Bool:
+ return NSAPI::NSNumberWithBool;
+
+ case BuiltinType::Void:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::LongDouble:
+ case BuiltinType::UInt128:
+ case BuiltinType::NullPtr:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::BoundMember:
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::Half:
+ case BuiltinType::PseudoObject:
+ break;
+ }
+
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
index 1ff2e71..dbf267b 100644
--- a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
@@ -130,7 +130,7 @@ NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
return TypeSpecWithTemplate;
}
- return Global;
+ llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Retrieve the namespace stored in this nested name
@@ -170,8 +170,7 @@ bool NestedNameSpecifier::isDependent() const {
return getAsType()->isDependentType();
}
- // Necessary to suppress a GCC warning.
- return false;
+ llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Whether this nested name specifier refers to a dependent
@@ -191,9 +190,8 @@ bool NestedNameSpecifier::isInstantiationDependent() const {
case TypeSpecWithTemplate:
return getAsType()->isInstantiationDependentType();
}
-
- // Necessary to suppress a GCC warning.
- return false;
+
+ llvm_unreachable("Invalid NNS Kind!");
}
bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
@@ -211,8 +209,7 @@ bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
return getAsType()->containsUnexpandedParameterPack();
}
- // Necessary to suppress a GCC warning.
- return false;
+ llvm_unreachable("Invalid NNS Kind!");
}
/// \brief Print this nested name specifier to the given output
@@ -229,6 +226,9 @@ NestedNameSpecifier::print(raw_ostream &OS,
break;
case Namespace:
+ if (getAsNamespace()->isAnonymousNamespace())
+ return;
+
OS << getAsNamespace()->getName();
break;
@@ -379,8 +379,8 @@ SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
LoadSourceLocation(Data, Offset + sizeof(void*)));
}
}
-
- return SourceRange();
+
+ llvm_unreachable("Invalid NNS Kind!");
}
TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
@@ -434,9 +434,6 @@ namespace {
}
}
-NestedNameSpecifierLocBuilder::NestedNameSpecifierLocBuilder()
- : Representation(0), Buffer(0), BufferSize(0), BufferCapacity(0) { }
-
NestedNameSpecifierLocBuilder::
NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other)
: Representation(Other.Representation), Buffer(0),
@@ -499,11 +496,6 @@ operator=(const NestedNameSpecifierLocBuilder &Other) {
return *this;
}
-NestedNameSpecifierLocBuilder::~NestedNameSpecifierLocBuilder() {
- if (BufferCapacity)
- free(Buffer);
-}
-
void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
SourceLocation TemplateKWLoc,
TypeLoc TL,
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
index 5eef83a..64016d9 100644
--- a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -26,6 +26,10 @@ static void BuildParentMap(MapTy& M, Stmt* S) {
M[*I] = S;
BuildParentMap(M, *I);
}
+
+ // Also include the source expr tree of an OpaqueValueExpr in the map.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
+ BuildParentMap(M, OVE->getSourceExpr());
}
ParentMap::ParentMap(Stmt* S) : Impl(0) {
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
index ccc591a..0114eba 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
@@ -43,7 +43,8 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
// Constructor for C++ records.
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
- CharUnits vbptroffset, CharUnits datasize,
+ CharUnits vfptroffset, CharUnits vbptroffset,
+ CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
CharUnits nonvirtualsize,
@@ -68,6 +69,7 @@ ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
+ CXXInfo->VFPtrOffset = vfptroffset;
CXXInfo->VBPtrOffset = vbptroffset;
#ifndef NDEBUG
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
index bbd3fc0..c2d9294 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -558,6 +558,14 @@ protected:
SmallVector<uint64_t, 16> FieldOffsets;
+ /// \brief Whether the external AST source has provided a layout for this
+ /// record.
+ unsigned ExternalLayout : 1;
+
+ /// \brief Whether we need to infer alignment, even when we have an
+ /// externally-provided layout.
+ unsigned InferAlignment : 1;
+
/// Packed - Whether the record is packed or not.
unsigned Packed : 1;
@@ -592,6 +600,9 @@ protected:
/// out is virtual.
bool PrimaryBaseIsVirtual;
+ /// VFPtrOffset - Virtual function table offset. Only for MS layout.
+ CharUnits VFPtrOffset;
+
/// VBPtrOffset - Virtual base table offset. Only for MS layout.
CharUnits VBPtrOffset;
@@ -615,19 +626,47 @@ protected:
/// avoid visiting virtual bases more than once.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
- RecordLayoutBuilder(const ASTContext &Context, EmptySubobjectMap
- *EmptySubobjects, CharUnits Alignment)
+ /// \brief Externally-provided size.
+ uint64_t ExternalSize;
+
+ /// \brief Externally-provided alignment.
+ uint64_t ExternalAlign;
+
+ /// \brief Externally-provided field offsets.
+ llvm::DenseMap<const FieldDecl *, uint64_t> ExternalFieldOffsets;
+
+ /// \brief Externally-provided direct, non-virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalBaseOffsets;
+
+ /// \brief Externally-provided virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalVirtualBaseOffsets;
+
+ RecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
: Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
- Alignment(Alignment), UnpackedAlignment(Alignment),
- Packed(false), IsUnion(false),
- IsMac68kAlign(false), IsMsStruct(false),
+ Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
+ ExternalLayout(false), InferAlignment(false),
+ Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
UnfilledBitsInLastByte(0), MaxFieldAlignment(CharUnits::Zero()),
DataSize(0), NonVirtualSize(CharUnits::Zero()),
NonVirtualAlignment(CharUnits::One()),
ZeroLengthBitfield(0), PrimaryBase(0),
- PrimaryBaseIsVirtual(false), VBPtrOffset(CharUnits::fromQuantity(-1)),
+ PrimaryBaseIsVirtual(false),
+ VFPtrOffset(CharUnits::fromQuantity(-1)),
+ VBPtrOffset(CharUnits::fromQuantity(-1)),
FirstNearlyEmptyVBase(0) { }
+ /// Reset this RecordLayoutBuilder to a fresh state, using the given
+ /// alignment as the initial alignment. This is used for the
+ /// correct layout of vb-table pointers in MSVC.
+ void resetWithTargetAlignment(CharUnits TargetAlignment) {
+ const ASTContext &Context = this->Context;
+ EmptySubobjectMap *EmptySubobjects = this->EmptySubobjects;
+ this->~RecordLayoutBuilder();
+ new (this) RecordLayoutBuilder(Context, EmptySubobjects);
+ Alignment = UnpackedAlignment = TargetAlignment;
+ }
+
void Layout(const RecordDecl *D);
void Layout(const CXXRecordDecl *D);
void Layout(const ObjCInterfaceDecl *D);
@@ -637,8 +676,12 @@ protected:
void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
bool FieldPacked, const FieldDecl *D);
void LayoutBitField(const FieldDecl *D);
+
+ bool isMicrosoftCXXABI() const {
+ return Context.getTargetInfo().getCXXABI() == CXXABI_Microsoft;
+ }
+
void MSLayoutVirtualBases(const CXXRecordDecl *RD);
- void MSLayout(const CXXRecordDecl *RD);
/// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
@@ -669,7 +712,7 @@ protected:
void SelectPrimaryVBase(const CXXRecordDecl *RD);
- CharUnits GetVirtualPointersSize(const CXXRecordDecl *RD) const;
+ void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign);
/// LayoutNonVirtualBases - Determines the primary base class (if any) and
/// lays it out. Will then proceed to lay out all non-virtual base clasess.
@@ -681,6 +724,10 @@ protected:
void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
CharUnits Offset);
+ bool needsVFTable(const CXXRecordDecl *RD) const;
+ bool hasNewVirtualFunction(const CXXRecordDecl *RD) const;
+ bool isPossiblePrimaryBase(const CXXRecordDecl *Base) const;
+
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass);
@@ -704,6 +751,14 @@ protected:
UpdateAlignment(NewAlignment, NewAlignment);
}
+ /// \brief Retrieve the externally-supplied field offset for the given
+ /// field.
+ ///
+ /// \param Field The field whose offset is being queried.
+ /// \param ComputedOffset The offset that we've computed for this field.
+ uint64_t updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset);
+
void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
uint64_t UnpackedOffset, unsigned UnpackedAlign,
bool isPacked, const FieldDecl *D);
@@ -730,20 +785,10 @@ protected:
void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
- bool HasVBPtr(const CXXRecordDecl *RD) const;
- bool HasNewVirtualFunction(const CXXRecordDecl *RD) const;
-
- /// Add vbptr or vfptr to layout.
- void AddVPointer();
-
RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
public:
static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
-
- virtual ~RecordLayoutBuilder() { }
-
- CharUnits GetVBPtrOffset() const { return VBPtrOffset; }
};
} // end anonymous namespace
@@ -778,11 +823,6 @@ RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
}
}
-CharUnits
-RecordLayoutBuilder::GetVirtualPointersSize(const CXXRecordDecl *RD) const {
- return Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
-}
-
/// DeterminePrimaryBase - Determine the primary base of the given class.
void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// If the class isn't dynamic, it won't have a primary base.
@@ -805,7 +845,7 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (Base->isDynamicClass()) {
+ if (isPossiblePrimaryBase(Base)) {
// We found it.
PrimaryBase = Base;
PrimaryBaseIsVirtual = false;
@@ -813,44 +853,30 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
}
}
- // Otherwise, it is the first nearly empty virtual base that is not an
- // indirect primary virtual base class, if one exists.
+ // The Microsoft ABI doesn't have primary virtual bases.
+ if (isMicrosoftCXXABI()) {
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+ return;
+ }
+
+ // Under the Itanium ABI, if there is no non-virtual primary base class,
+ // try to compute the primary virtual base. The primary virtual base is
+ // the first nearly empty virtual base that is not an indirect primary
+ // virtual base class, if one exists.
if (RD->getNumVBases() != 0) {
SelectPrimaryVBase(RD);
if (PrimaryBase)
return;
}
- // Otherwise, it is the first nearly empty virtual base that is not an
- // indirect primary virtual base class, if one exists.
+ // Otherwise, it is the first indirect primary base class, if one exists.
if (FirstNearlyEmptyVBase) {
PrimaryBase = FirstNearlyEmptyVBase;
PrimaryBaseIsVirtual = true;
return;
}
- // Otherwise there is no primary base class.
assert(!PrimaryBase && "Should not get here with a primary base!");
-
- // Allocate the virtual table pointer at offset zero.
- assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
-
- // Update the size.
- setSize(getSize() + GetVirtualPointersSize(RD));
- setDataSize(getSize());
-
- CharUnits UnpackedBaseAlign =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
- CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
-
- // The maximum field alignment overrides base align.
- if (!MaxFieldAlignment.isZero()) {
- BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
- UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
- }
-
- // Update the alignment.
- UpdateAlignment(BaseAlign, UnpackedBaseAlign);
}
BaseSubobjectInfo *
@@ -959,6 +985,24 @@ void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
}
void
+RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to pointer alignment.
+ setSize(getSize().RoundUpToAlignment(BaseAlign));
+ setDataSize(getSize());
+
+ // Update the alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+}
+
+void
RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
// Then, determine the primary base class.
DeterminePrimaryBase(RD);
@@ -990,20 +1034,45 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
LayoutNonVirtualBase(PrimaryBaseInfo);
}
+
+ // If this class needs a vtable/vf-table and didn't get one from a
+ // primary base, add it in now.
+ } else if (needsVFTable(RD)) {
+ assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ EnsureVTablePointerAlignment(PtrAlign);
+ if (isMicrosoftCXXABI())
+ VFPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
}
+ bool HasDirectVirtualBases = false;
+ bool HasNonVirtualBaseWithVBTable = false;
+
// Now lay out the non-virtual bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
- // Ignore virtual bases.
- if (I->isVirtual())
+ // Ignore virtual bases, but remember that we saw one.
+ if (I->isVirtual()) {
+ HasDirectVirtualBases = true;
continue;
+ }
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Remember if this base has virtual bases itself.
+ if (BaseDecl->getNumVBases())
+ HasNonVirtualBaseWithVBTable = true;
- // Skip the primary base.
+ // Skip the primary base, because we've already laid it out. The
+ // !PrimaryBaseIsVirtual check is required because we might have a
+ // non-virtual base of the same type as a primary virtual base.
if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
continue;
@@ -1013,6 +1082,37 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
LayoutNonVirtualBase(BaseInfo);
}
+
+ // In the MS ABI, add the vb-table pointer if we need one, which is
+ // whenever we have a virtual base and we can't re-use a vb-table
+ // pointer from a non-virtual base.
+ if (isMicrosoftCXXABI() &&
+ HasDirectVirtualBases && !HasNonVirtualBaseWithVBTable) {
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+
+ // MSVC potentially over-aligns the vb-table pointer by giving it
+ // the max alignment of all the non-virtual objects in the class.
+ // This is completely unnecessary, but we're not here to pass
+ // judgment.
+ //
+ // Note that we've only laid out the non-virtual bases, so on the
+ // first pass Alignment won't be set correctly here, but if the
+ // vb-table doesn't end up aligned correctly we'll come through
+ // and redo the layout from scratch with the right alignment.
+ //
+ // TODO: Instead of doing this, just lay out the fields as if the
+ // vb-table were at offset zero, then retroactively bump the field
+ // offsets up.
+ PtrAlign = std::max(PtrAlign, Alignment);
+
+ EnsureVTablePointerAlignment(PtrAlign);
+ VBPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
}
void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
@@ -1061,43 +1161,81 @@ RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
}
}
-void RecordLayoutBuilder::AddVPointer() {
- CharUnits PtrWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- setSize(getSize() + PtrWidth);
- setDataSize(getSize());
-
- if (Alignment > PtrWidth) {
- setSize(getSize() + (Alignment - PtrWidth));
- setDataSize(getSize());
- }
+/// needsVFTable - Return true if this class needs a vtable or vf-table
+/// when laid out as a base class. These are treated the same because
+/// they're both always laid out at offset zero.
+///
+/// This function assumes that the class has no primary base.
+bool RecordLayoutBuilder::needsVFTable(const CXXRecordDecl *RD) const {
+ assert(!PrimaryBase);
+
+ // In the Itanium ABI, every dynamic class needs a vtable: even if
+ // this class has no virtual functions as a base class (i.e. it's
+ // non-polymorphic or only has virtual functions from virtual
+ // bases),x it still needs a vtable to locate its virtual bases.
+ if (!isMicrosoftCXXABI())
+ return RD->isDynamicClass();
+
+ // In the MS ABI, we need a vfptr if the class has virtual functions
+ // other than those declared by its virtual bases. The AST doesn't
+ // tell us that directly, and checking manually for virtual
+ // functions that aren't overrides is expensive, but there are
+ // some important shortcuts:
+
+ // - Non-polymorphic classes have no virtual functions at all.
+ if (!RD->isPolymorphic()) return false;
+
+ // - Polymorphic classes with no virtual bases must either declare
+ // virtual functions directly or inherit them, but in the latter
+ // case we would have a primary base.
+ if (RD->getNumVBases() == 0) return true;
+
+ return hasNewVirtualFunction(RD);
}
+/// hasNewVirtualFunction - Does the given polymorphic class declare a
+/// virtual function that does not override a method from any of its
+/// base classes?
bool
-RecordLayoutBuilder::HasNewVirtualFunction(const CXXRecordDecl *RD) const {
+RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD) const {
+ assert(RD->isPolymorphic());
+ if (!RD->getNumBases())
+ return true;
+
for (CXXRecordDecl::method_iterator method = RD->method_begin();
method != RD->method_end();
++method) {
- if (method->isVirtual() &&
- !method->size_overridden_methods()) {
+ if (method->isVirtual() && !method->size_overridden_methods()) {
return true;
}
}
return false;
}
-bool
-RecordLayoutBuilder::HasVBPtr(const CXXRecordDecl *RD) const {
- if (!RD->getNumBases())
- return false;
-
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (!I->isVirtual()) {
- return false;
- }
- }
- return true;
+/// isPossiblePrimaryBase - Is the given base class an acceptable
+/// primary base class?
+bool
+RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *Base) const {
+ // In the Itanium ABI, a class can be a primary base class if it has
+ // a vtable for any reason.
+ if (!isMicrosoftCXXABI())
+ return Base->isDynamicClass();
+
+ // In the MS ABI, a class can only be a primary base class if it
+ // provides a vf-table at a static offset. That means it has to be
+ // non-virtual base. The existence of a separate vb-table means
+ // that it's possible to get virtual functions only from a virtual
+ // base, which we have to guard against.
+
+ // First off, it has to have virtual functions.
+ if (!Base->isPolymorphic()) return false;
+
+ // If it has no virtual bases, then everything is at a static offset.
+ if (!Base->getNumVBases()) return true;
+
+ // Okay, just ask the base class's layout.
+ return (Context.getASTRecordLayout(Base).getVFPtrOffset()
+ != CharUnits::fromQuantity(-1));
}
void
@@ -1121,7 +1259,7 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
"Cannot layout class with dependent bases.");
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
if (I->isVirtual()) {
if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
@@ -1149,6 +1287,23 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
}
}
+void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
+
+ if (!RD->getNumVBases())
+ return;
+
+ // This is substantially simplified because there are no virtual
+ // primary bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+
+ LayoutVirtualBase(BaseInfo);
+ }
+}
+
void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
assert(!Base->Derived && "Trying to lay out a primary virtual base!");
@@ -1165,8 +1320,31 @@ void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
+
+ CharUnits Offset;
+
+ // Query the external layout to see if it provides an offset.
+ bool HasExternalLayout = false;
+ if (ExternalLayout) {
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known;
+ if (Base->IsVirtual) {
+ Known = ExternalVirtualBaseOffsets.find(Base->Class);
+ if (Known != ExternalVirtualBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ } else {
+ Known = ExternalBaseOffsets.find(Base->Class);
+ if (Known != ExternalBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ }
+ }
+
// If we have an empty base class, try to place it at offset 0.
if (Base->Class->isEmpty() &&
+ (!HasExternalLayout || Offset == CharUnits::Zero()) &&
EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
setSize(std::max(getSize(), Layout.getSize()));
@@ -1182,13 +1360,19 @@ CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
}
- // Round up the current record size to the base's alignment boundary.
- CharUnits Offset = getDataSize().RoundUpToAlignment(BaseAlign);
-
- // Try to place the base.
- while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
- Offset += BaseAlign;
+ if (!HasExternalLayout) {
+ // Round up the current record size to the base's alignment boundary.
+ Offset = getDataSize().RoundUpToAlignment(BaseAlign);
+ // Try to place the base.
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
+ Offset += BaseAlign;
+ } else {
+ bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
+ (void)Allowed;
+ assert(Allowed && "Base subobject externally placed at overlapping offset");
+ }
+
if (!Base->Class->isEmpty()) {
// Update the data size.
setDataSize(Offset + Layout.getNonVirtualSize());
@@ -1212,7 +1396,7 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
IsMsStruct = D->hasAttr<MsStructAttr>();
// Honor the default struct packing maximum alignment flag.
- if (unsigned DefaultMaxFieldAlignment = Context.getLangOptions().PackStruct) {
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
}
@@ -1231,6 +1415,28 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (unsigned MaxAlign = D->getMaxAlignment())
UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
}
+
+ // If there is an external AST source, ask it for the various offsets.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ if (ExternalASTSource *External = Context.getExternalSource()) {
+ ExternalLayout = External->layoutRecordType(RD,
+ ExternalSize,
+ ExternalAlign,
+ ExternalFieldOffsets,
+ ExternalBaseOffsets,
+ ExternalVirtualBaseOffsets);
+
+ // Update based on external alignment.
+ if (ExternalLayout) {
+ if (ExternalAlign > 0) {
+ Alignment = Context.toCharUnitsFromBits(ExternalAlign);
+ UnpackedAlignment = Alignment;
+ } else {
+ // The external source didn't have alignment information; infer it.
+ InferAlignment = true;
+ }
+ }
+ }
}
void RecordLayoutBuilder::Layout(const RecordDecl *D) {
@@ -1243,11 +1449,6 @@ void RecordLayoutBuilder::Layout(const RecordDecl *D) {
}
void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
- if (Context.getTargetInfo().getCXXABI() == CXXABI_Microsoft) {
- MSLayout(RD);
- return ;
- }
-
InitializeLayout(RD);
// Lay out the vtable and the non-virtual bases.
@@ -1260,13 +1461,27 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
Context.getTargetInfo().getCharAlign()));
NonVirtualAlignment = Alignment;
- // Lay out the virtual bases and add the primary virtual base offsets.
- LayoutVirtualBases(RD, RD);
+ if (isMicrosoftCXXABI() &&
+ NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
+ CharUnits AlignMember =
+ NonVirtualSize.RoundUpToAlignment(Alignment) - NonVirtualSize;
- VisitedVirtualBases.clear();
+ setSize(getSize() + AlignMember);
+ setDataSize(getSize());
- // Finally, round the size of the total struct up to the alignment of the
- // struct itself.
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+
+ MSLayoutVirtualBases(RD);
+
+ } else {
+ // Lay out the virtual bases and add the primary virtual base offsets.
+ LayoutVirtualBases(RD, RD);
+ }
+
+ // Finally, round the size of the total struct up to the alignment
+ // of the struct itself.
FinishLayout(RD);
#ifndef NDEBUG
@@ -1350,13 +1565,21 @@ void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
uint64_t TypeSize = FieldInfo.first;
unsigned FieldAlign = FieldInfo.second;
// This check is needed for 'long long' in -m32 mode.
- if (TypeSize > FieldAlign)
+ if (TypeSize > FieldAlign &&
+ (Context.hasSameType(FD->getType(),
+ Context.UnsignedLongLongTy)
+ ||Context.hasSameType(FD->getType(),
+ Context.LongLongTy)))
FieldAlign = TypeSize;
FieldInfo = Context.getTypeInfo(LastFD->getType());
uint64_t TypeSizeLastFD = FieldInfo.first;
unsigned FieldAlignLastFD = FieldInfo.second;
// This check is needed for 'long long' in -m32 mode.
- if (TypeSizeLastFD > FieldAlignLastFD)
+ if (TypeSizeLastFD > FieldAlignLastFD &&
+ (Context.hasSameType(LastFD->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(LastFD->getType(),
+ Context.LongLongTy)))
FieldAlignLastFD = TypeSizeLastFD;
if (TypeSizeLastFD != TypeSize) {
@@ -1435,7 +1658,7 @@ void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
uint64_t TypeSize,
bool FieldPacked,
const FieldDecl *D) {
- assert(Context.getLangOptions().CPlusPlus &&
+ assert(Context.getLangOpts().CPlusPlus &&
"Can only have wide bit-fields in C++!");
// Itanium C++ ABI 2.4:
@@ -1507,7 +1730,10 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
unsigned FieldAlign = FieldInfo.second;
// This check is needed for 'long long' in -m32 mode.
- if (IsMsStruct && (TypeSize > FieldAlign))
+ if (IsMsStruct && (TypeSize > FieldAlign) &&
+ (Context.hasSameType(D->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(D->getType(), Context.LongLongTy)))
FieldAlign = TypeSize;
if (ZeroLengthBitfield) {
@@ -1557,18 +1783,21 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
// The maximum field alignment overrides the aligned attribute.
- if (!MaxFieldAlignment.isZero()) {
+ if (!MaxFieldAlignment.isZero() && FieldSize != 0) {
unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
}
// Check if we need to add padding to give the field the correct alignment.
- if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)
+ if (FieldSize == 0 ||
+ (MaxFieldAlignment.isZero() &&
+ (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize))
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
if (FieldSize == 0 ||
- (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize)
+ (MaxFieldAlignment.isZero() &&
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
UnpackedFieldAlign);
@@ -1580,11 +1809,15 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
if (!IsMsStruct)
ZeroLengthBitfield = 0;
+ if (ExternalLayout)
+ FieldOffset = updateExternalFieldOffset(D, FieldOffset);
+
// Place this field at the current location.
FieldOffsets.push_back(FieldOffset);
- CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
- UnpackedFieldAlign, FieldPacked, D);
+ if (!ExternalLayout)
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
+ UnpackedFieldAlign, FieldPacked, D);
// Update DataSize to include the last byte containing (part of) the bitfield.
if (IsUnion) {
@@ -1606,7 +1839,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
Context.toCharUnitsFromBits(UnpackedFieldAlign));
}
-void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
+void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
if (D->isBitField()) {
LayoutBitField(D);
return;
@@ -1667,7 +1900,7 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
ZeroLengthBitfield = 0;
}
- if (Context.getLangOptions().MSBitfields || IsMsStruct) {
+ if (Context.getLangOpts().MSBitfields || IsMsStruct) {
// If MS bitfield layout is required, figure out what type is being
// laid out and align the field to the width of that type.
@@ -1705,136 +1938,56 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
UnpackedFieldOffset =
UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
- if (!IsUnion && EmptySubobjects) {
- // Check if we can place the field at this offset.
- while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
- // We couldn't place the field at the offset. Try again at a new offset.
- FieldOffset += FieldAlign;
+ if (ExternalLayout) {
+ FieldOffset = Context.toCharUnitsFromBits(
+ updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
+
+ if (!IsUnion && EmptySubobjects) {
+ // Record the fact that we're placing a field at this offset.
+ bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset);
+ (void)Allowed;
+ assert(Allowed && "Externally-placed field cannot be placed here");
+ }
+ } else {
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
+ // We couldn't place the field at the offset. Try again at a new offset.
+ FieldOffset += FieldAlign;
+ }
}
}
-
+
// Place this field at the current location.
FieldOffsets.push_back(Context.toBits(FieldOffset));
- CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
- Context.toBits(UnpackedFieldOffset),
- Context.toBits(UnpackedFieldAlign), FieldPacked, D);
+ if (!ExternalLayout)
+ CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
+ Context.toBits(UnpackedFieldOffset),
+ Context.toBits(UnpackedFieldAlign), FieldPacked, D);
// Reserve space for this field.
uint64_t FieldSizeInBits = Context.toBits(FieldSize);
if (IsUnion)
- setSize(std::max(getSizeInBits(), FieldSizeInBits));
+ setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
else
- setSize(FieldOffset + FieldSize);
+ setDataSize(FieldOffset + FieldSize);
- // Update the data size.
- setDataSize(getSizeInBits());
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
// Remember max struct/class alignment.
UpdateAlignment(FieldAlign, UnpackedFieldAlign);
}
-void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
-
- if (!RD->getNumVBases())
+void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+ if (ExternalLayout) {
+ setSize(ExternalSize);
return;
-
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
-
- const CXXRecordDecl* BaseDecl = I->getType()->getAsCXXRecordDecl();
- const BaseSubobjectInfo* BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
-
- assert(BaseInfo && "Did not find virtual base info!");
-
- LayoutVirtualBase(BaseInfo);
- }
-}
-
-void RecordLayoutBuilder::MSLayout(const CXXRecordDecl *RD) {
-
- bool IsVBPtrAddedToLayout = false;
-
- InitializeLayout(RD);
-
- if (HasVBPtr(RD)) {
- // If all bases are virtual and the class declares a new virtual function,
- // MSVC builds a vfptr.
- if (HasNewVirtualFunction(RD)) {
- AddVPointer();
- }
-
- VBPtrOffset = getSize();
- AddVPointer();
- IsVBPtrAddedToLayout = true;
-
- ComputeBaseSubobjectInfo(RD);
- } else {
- LayoutNonVirtualBases(RD);
- }
-
- if (RD->getNumVBases() &&
- !IsVBPtrAddedToLayout) {
- // Add vbptr.
- VBPtrOffset = getSize();
- AddVPointer();
- }
-
- LayoutFields(RD);
-
- NonVirtualSize = Context.toCharUnitsFromBits(
- llvm::RoundUpToAlignment(getSizeInBits(),
- Context.getTargetInfo().getCharAlign()));
- NonVirtualAlignment = Alignment;
-
- if (NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
- CharUnits AlignMember =
- NonVirtualSize.RoundUpToAlignment(Alignment) - NonVirtualSize;
-
- setSize(getSize() + AlignMember);
- setDataSize(getSize());
-
- NonVirtualSize = Context.toCharUnitsFromBits(
- llvm::RoundUpToAlignment(getSizeInBits(),
- Context.getTargetInfo().getCharAlign()));
- }
-
- MSLayoutVirtualBases(RD);
-
- VisitedVirtualBases.clear();
-
- // Finally, round the size of the total struct up to the alignment of the
- // struct itself.
- if (!RD->getNumVBases())
- FinishLayout(RD);
-
-#ifndef NDEBUG
- // Check that we have base offsets for all bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- assert(Bases.count(BaseDecl) && "Did not find base offset!");
- }
-
- // And all virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- assert(VBases.count(BaseDecl) && "Did not find base offset!");
}
-#endif
-}
-
-void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+
// In C++, records cannot be of size 0.
- if (Context.getLangOptions().CPlusPlus && getSizeInBits() == 0) {
+ if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
// Compatibility with gcc requires a class (pod or non-pod)
// which is not empty but of size 0; such as having fields of
@@ -1845,6 +1998,13 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
else
setSize(CharUnits::One());
}
+
+ // MSVC doesn't round up to the alignment of the record with virtual bases.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (isMicrosoftCXXABI() && RD->getNumVBases())
+ return;
+ }
+
// Finally, round the size of the record up to the alignment of the
// record itself.
uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastByte;
@@ -1881,8 +2041,9 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
CharUnits UnpackedNewAlignment) {
- // The alignment is not modified when using 'mac68k' alignment.
- if (IsMac68kAlign)
+ // The alignment is not modified when using 'mac68k' alignment or when
+ // we have an externally-supplied layout that also provides overall alignment.
+ if (IsMac68kAlign || (ExternalLayout && !InferAlignment))
return;
if (NewAlignment > Alignment) {
@@ -1898,6 +2059,25 @@ void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
}
}
+uint64_t
+RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset) {
+ assert(ExternalFieldOffsets.find(Field) != ExternalFieldOffsets.end() &&
+ "Field does not have an external offset");
+
+ uint64_t ExternalFieldOffset = ExternalFieldOffsets[Field];
+
+ if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
+ // The externally-supplied field offset is before the field offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::fromQuantity(1);
+ InferAlignment = false;
+ }
+
+ // Use the externally-supplied field offset.
+ return ExternalFieldOffset;
+}
+
void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
uint64_t UnpaddedOffset,
uint64_t UnpackedOffset,
@@ -2007,6 +2187,10 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
// as soon as we begin to parse the definition. That definition is
// not a complete definition (which is what isDefinition() tests)
// until we *finish* parsing the definition.
+
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
+
D = D->getDefinition();
assert(D && "Cannot get layout of forward declarations!");
assert(D->isCompleteDefinition() && "Cannot layout type before complete!");
@@ -2021,36 +2205,21 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
EmptySubobjectMap EmptySubobjects(*this, RD);
-
- llvm::OwningPtr<RecordLayoutBuilder> Builder;
- CharUnits TargetAlign = CharUnits::One();
-
- Builder.reset(new RecordLayoutBuilder(*this,
- &EmptySubobjects,
- TargetAlign));
-
- // Recover resources if we crash before exiting this method.
- llvm::CrashRecoveryContextCleanupRegistrar<RecordLayoutBuilder>
- RecordBuilderCleanup(Builder.get());
-
- Builder->Layout(RD);
-
- TargetAlign = Builder->getAligment();
-
- if (getTargetInfo().getCXXABI() == CXXABI_Microsoft &&
- TargetAlign.getQuantity() > 4) {
- // MSVC rounds the vtable pointer to the struct alignment in what must
- // be a multi-pass operation. For now, let the builder figure out the
- // alignment and recalculate the layout once its known.
- Builder.reset(new RecordLayoutBuilder(*this,
- &EmptySubobjects,
- TargetAlign));
-
- Builder->Layout(RD);
-
- // Recover resources if we crash before exiting this method.
- llvm::CrashRecoveryContextCleanupRegistrar<RecordLayoutBuilder>
- RecordBuilderCleanup(Builder.get());
+ RecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // MSVC gives the vb-table pointer an alignment equal to that of
+ // the non-virtual part of the structure. That's an inherently
+ // multi-pass operation. If our first pass doesn't give us
+ // adequate alignment, try again with the specified minimum
+ // alignment. This is *much* more maintainable than computing the
+ // alignment in advance in a separately-coded pass; it's also
+ // significantly more efficient in the common case where the
+ // vb-table doesn't need extra padding.
+ if (Builder.VBPtrOffset != CharUnits::fromQuantity(-1) &&
+ (Builder.VBPtrOffset % Builder.NonVirtualAlignment) != 0) {
+ Builder.resetWithTargetAlignment(Builder.NonVirtualAlignment);
+ Builder.Layout(RD);
}
// FIXME: This is not always correct. See the part about bitfields at
@@ -2058,30 +2227,30 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
// FIXME: IsPODForThePurposeOfLayout should be stored in the record layout.
// This does not affect the calculations of MSVC layouts
bool IsPODForThePurposeOfLayout =
- (getTargetInfo().getCXXABI() == CXXABI_Microsoft) ||
- cast<CXXRecordDecl>(D)->isPOD();
+ (!Builder.isMicrosoftCXXABI() && cast<CXXRecordDecl>(D)->isPOD());
// FIXME: This should be done in FinalizeLayout.
CharUnits DataSize =
- IsPODForThePurposeOfLayout ? Builder->getSize() : Builder->getDataSize();
+ IsPODForThePurposeOfLayout ? Builder.getSize() : Builder.getDataSize();
CharUnits NonVirtualSize =
- IsPODForThePurposeOfLayout ? DataSize : Builder->NonVirtualSize;
+ IsPODForThePurposeOfLayout ? DataSize : Builder.NonVirtualSize;
NewEntry =
- new (*this) ASTRecordLayout(*this, Builder->getSize(),
- Builder->Alignment,
- Builder->GetVBPtrOffset(),
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.VFPtrOffset,
+ Builder.VBPtrOffset,
DataSize,
- Builder->FieldOffsets.data(),
- Builder->FieldOffsets.size(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(),
NonVirtualSize,
- Builder->NonVirtualAlignment,
+ Builder.NonVirtualAlignment,
EmptySubobjects.SizeOfLargestEmptySubobject,
- Builder->PrimaryBase,
- Builder->PrimaryBaseIsVirtual,
- Builder->Bases, Builder->VBases);
+ Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual,
+ Builder.Bases, Builder.VBases);
} else {
- RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0, CharUnits::One());
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
Builder.Layout(D);
NewEntry =
@@ -2094,9 +2263,9 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
ASTRecordLayouts[D] = NewEntry;
- if (getLangOptions().DumpRecordLayouts) {
+ if (getLangOpts().DumpRecordLayouts) {
llvm::errs() << "\n*** Dumping AST Record Layout\n";
- DumpRecordLayout(D, llvm::errs());
+ DumpRecordLayout(D, llvm::errs(), getLangOpts().DumpRecordLayoutsSimple);
}
return *NewEntry;
@@ -2113,6 +2282,28 @@ const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
return Entry;
}
+static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent());
+ return Layout.getFieldOffset(FD->getFieldIndex());
+}
+
+uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
+ uint64_t OffsetInBits;
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
+ OffsetInBits = ::getFieldOffset(*this, FD);
+ } else {
+ const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
+
+ OffsetInBits = 0;
+ for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ CI != CE; ++CI)
+ OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI));
+ }
+
+ return OffsetInBits;
+}
+
/// getObjCLayout - Get or compute information about the layout of the
/// given interface.
///
@@ -2121,7 +2312,11 @@ const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
const ASTRecordLayout &
ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
const ObjCImplementationDecl *Impl) const {
- assert(!D->isForwardDecl() && "Invalid interface decl!");
+ // Retrieve the definition
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
+ D = D->getDefinition();
+ assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
ObjCContainerDecl *Key =
@@ -2140,7 +2335,7 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
return getObjCLayout(D, 0);
}
- RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0, CharUnits::One());
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
Builder.Layout(D);
const ASTRecordLayout *NewEntry =
@@ -2157,7 +2352,7 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
static void PrintOffset(raw_ostream &OS,
CharUnits Offset, unsigned IndentLevel) {
- OS << llvm::format("%4d | ", Offset.getQuantity());
+ OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity());
OS.indent(IndentLevel * 2);
}
@@ -2180,22 +2375,16 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
IndentLevel++;
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ bool HasVfptr = Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1);
bool HasVbptr = Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1);
// Vtable pointer.
- if (RD->isDynamicClass() && !PrimaryBase) {
+ if (RD->isDynamicClass() && !PrimaryBase &&
+ C.getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
PrintOffset(OS, Offset, IndentLevel);
OS << '(' << *RD << " vtable pointer)\n";
}
- if (HasVbptr && !PrimaryBase) {
- PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
- OS << '(' << *RD << " vbtable pointer)\n";
-
- // one vbtable per class
- HasVbptr = false;
- }
-
// Dump (non-virtual) bases
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
@@ -2213,7 +2402,12 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
Base == PrimaryBase ? "(primary base)" : "(base)",
/*IncludeVirtualBases=*/false);
}
- // vbptr
+
+ // vfptr and vbptr (for Microsoft C++ ABI)
+ if (HasVfptr) {
+ PrintOffset(OS, Offset + Layout.getVFPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vftable pointer)\n";
+ }
if (HasVbptr) {
PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
OS << '(' << *RD << " vbtable pointer)\n";
@@ -2266,16 +2460,20 @@ static void DumpCXXRecordLayout(raw_ostream &OS,
}
void ASTContext::DumpRecordLayout(const RecordDecl *RD,
- raw_ostream &OS) const {
+ raw_ostream &OS,
+ bool Simple) const {
const ASTRecordLayout &Info = getASTRecordLayout(RD);
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0,
- /*IncludeVirtualBases=*/true);
+ if (!Simple)
+ return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0,
+ /*IncludeVirtualBases=*/true);
OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
- OS << "Record: ";
- RD->dump();
+ if (!Simple) {
+ OS << "Record: ";
+ RD->dump();
+ }
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
index e7b87e4..6af20df 100644
--- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -78,11 +78,9 @@ void Stmt::addStmtClass(StmtClass s) {
++getStmtInfoTableEntry(s).Counter;
}
-static bool StatSwitch = false;
-
-bool Stmt::CollectingStats(bool Enable) {
- if (Enable) StatSwitch = true;
- return StatSwitch;
+bool Stmt::StatisticsEnabled = false;
+void Stmt::EnableStatistics() {
+ StatisticsEnabled = true;
}
Stmt *Stmt::IgnoreImplicit() {
@@ -164,7 +162,6 @@ Stmt::child_range Stmt::children() {
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind!");
- return child_range();
}
SourceRange Stmt::getSourceRange() const {
@@ -177,7 +174,72 @@ SourceRange Stmt::getSourceRange() const {
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind!");
- return SourceRange();
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getLocStart() and getLocEnd().
+//
+// See also Expr.cpp:getExprLoc().
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocStart.
+ template <class S, class T>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocStart();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocStart. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getBegin();
+ }
+
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocEnd.
+ template <class S, class T>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocEnd();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocEnd. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getEnd();
+ }
+}
+
+SourceLocation Stmt::getLocStart() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocStartImpl<type>(this, &type::getLocStart);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+SourceLocation Stmt::getLocEnd() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocEndImpl<type>(this, &type::getLocEnd);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
}
void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
@@ -631,9 +693,9 @@ void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
return;
}
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
- V->getSourceRange().getBegin(),
- V->getSourceRange().getEnd());
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
}
ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
@@ -662,9 +724,9 @@ void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
return;
}
- SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V),
- V->getSourceRange().getBegin(),
- V->getSourceRange().getEnd());
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
}
SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
@@ -689,9 +751,9 @@ void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
return;
}
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
- V->getSourceRange().getBegin(),
- V->getSourceRange().getEnd());
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
}
Stmt *SwitchCase::getSubStmt() {
@@ -722,10 +784,10 @@ void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
SubExprs[VAR] = 0;
return;
}
-
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
- V->getSourceRange().getBegin(),
- V->getSourceRange().getEnd());
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
}
// IndirectGotoStmt
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
index 2968739..b5e298c 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
@@ -112,6 +112,7 @@ namespace {
case OK_Ordinary: break;
case OK_BitField: OS << " bitfield"; break;
case OK_ObjCProperty: OS << " objcproperty"; break;
+ case OK_ObjCSubscript: OS << " objcsubscript"; break;
case OK_VectorComponent: OS << " vectorcomponent"; break;
}
}
@@ -148,6 +149,7 @@ namespace {
void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
void VisitAddrLabelExpr(AddrLabelExpr *Node);
void VisitBlockExpr(BlockExpr *Node);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *Node);
// C++
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
@@ -167,7 +169,9 @@ namespace {
void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
+ void VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+ void VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node);
};
}
@@ -250,7 +254,7 @@ void StmtDumper::DumpDeclarator(Decl *D) {
std::string Name = VD->getNameAsString();
VD->getType().getAsStringInternal(Name,
- PrintingPolicy(VD->getASTContext().getLangOptions()));
+ PrintingPolicy(VD->getASTContext().getLangOpts()));
OS << Name;
// If this is a vardecl with an initializer, emit it.
@@ -283,10 +287,10 @@ void StmtDumper::DumpDeclarator(Decl *D) {
const char *tn = UD->isTypeName() ? "typename " : "";
OS << '"' << UD->getDeclKindName() << tn;
UD->getQualifier()->print(OS,
- PrintingPolicy(UD->getASTContext().getLangOptions()));
+ PrintingPolicy(UD->getASTContext().getLangOpts()));
OS << ";\"";
} else if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) {
- OS << "label " << LD->getNameAsString();
+ OS << "label " << *LD;
} else if (StaticAssertDecl *SAD = dyn_cast<StaticAssertDecl>(D)) {
OS << "\"static_assert(\n";
DumpSubTree(SAD->getAssertExpr());
@@ -425,7 +429,7 @@ void StmtDumper::VisitPredefinedExpr(PredefinedExpr *Node) {
void StmtDumper::VisitCharacterLiteral(CharacterLiteral *Node) {
DumpExpr(Node);
- OS << Node->getValue();
+ OS << " " << Node->getValue();
}
void StmtDumper::VisitIntegerLiteral(IntegerLiteral *Node) {
@@ -503,8 +507,10 @@ void StmtDumper::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
DumpExpr(Node);
- IndentLevel++;
BlockDecl *block = Node->getBlockDecl();
+ OS << " decl=" << block;
+
+ IndentLevel++;
if (block->capturesCXXThis()) {
OS << '\n'; Indent(); OS << "(capture this)";
}
@@ -515,15 +521,26 @@ void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
OS << "(capture ";
if (i->isByRef()) OS << "byref ";
if (i->isNested()) OS << "nested ";
- DumpDeclRef(i->getVariable());
+ if (i->getVariable())
+ DumpDeclRef(i->getVariable());
if (i->hasCopyExpr()) DumpSubTree(i->getCopyExpr());
OS << ")";
}
IndentLevel--;
+ OS << '\n';
DumpSubTree(block->getBody());
}
+void StmtDumper::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ DumpExpr(Node);
+
+ if (Expr *Source = Node->getSourceExpr()) {
+ OS << '\n';
+ DumpSubTree(Source);
+ }
+}
+
// GNU extensions.
void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
@@ -580,10 +597,12 @@ void StmtDumper::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
void StmtDumper::VisitExprWithCleanups(ExprWithCleanups *Node) {
DumpExpr(Node);
++IndentLevel;
- for (unsigned i = 0, e = Node->getNumTemporaries(); i != e; ++i) {
+ for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i) {
OS << "\n";
Indent();
- DumpCXXTemporary(Node->getTemporary(i));
+ OS << "(cleanup ";
+ DumpDeclRef(Node->getObject(i));
+ OS << ")";
}
--IndentLevel;
}
@@ -667,6 +686,40 @@ void StmtDumper::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
if (Node->isSuperReceiver())
OS << " super";
+
+ OS << " Messaging=";
+ if (Node->isMessagingGetter() && Node->isMessagingSetter())
+ OS << "Getter&Setter";
+ else if (Node->isMessagingGetter())
+ OS << "Getter";
+ else if (Node->isMessagingSetter())
+ OS << "Setter";
+}
+
+void StmtDumper::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+ DumpExpr(Node);
+ if (Node->isArraySubscriptRefExpr())
+ OS << " Kind=ArraySubscript GetterForArray=\"";
+ else
+ OS << " Kind=DictionarySubscript GetterForDictionary=\"";
+ if (Node->getAtIndexMethodDecl())
+ OS << Node->getAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+
+ if (Node->isArraySubscriptRefExpr())
+ OS << "\" SetterForArray=\"";
+ else
+ OS << "\" SetterForDictionary=\"";
+ if (Node->setAtIndexMethodDecl())
+ OS << Node->setAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+}
+
+void StmtDumper::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
index daaa354..3a44183 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -17,9 +17,9 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyPrinter.h"
-#include "llvm/Support/Format.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -305,6 +305,22 @@ void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
Indent() << "}\n";
}
+void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
+ Indent();
+ if (Node->isIfExists())
+ OS << "__if_exists (";
+ else
+ OS << "__if_not_exists (";
+
+ if (NestedNameSpecifier *Qualifier
+ = Node->getQualifierLoc().getNestedNameSpecifier())
+ Qualifier->print(OS, Policy);
+
+ OS << Node->getNameInfo() << ") ";
+
+ PrintRawCompoundStmt(Node->getSubStmt());
+}
+
void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
Indent() << "goto " << Node->getLabel()->getName() << ";\n";
}
@@ -528,6 +544,8 @@ void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
OS << TemplateSpecializationType::PrintTemplateArgumentList(
@@ -540,6 +558,8 @@ void StmtPrinter::VisitDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *Node) {
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
OS << TemplateSpecializationType::PrintTemplateArgumentList(
@@ -551,6 +571,8 @@ void StmtPrinter::VisitDependentScopeDeclRefExpr(
void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
if (Node->getQualifier())
Node->getQualifier()->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
OS << TemplateSpecializationType::PrintTemplateArgumentList(
@@ -581,6 +603,14 @@ void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
OS << Node->getExplicitProperty()->getName();
}
+void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+
+ PrintExpr(Node->getBaseExpr());
+ OS << "[";
+ PrintExpr(Node->getKeyExpr());
+ OS << "]";
+}
+
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
switch (Node->getIdentType()) {
default:
@@ -644,7 +674,8 @@ void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
if (value < 256 && isprint(value)) {
OS << "'" << (char)value << "'";
} else if (value < 256) {
- OS << "'\\x" << llvm::format("%x", value) << "'";
+ OS << "'\\x";
+ OS.write_hex(value) << "'";
} else {
// FIXME what to really do here?
OS << value;
@@ -659,16 +690,23 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
// Emit suffixes. Integer literals are always a builtin integer type.
switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for integer literal!");
+ // FIXME: The Short and UShort cases are to handle cases where a short
+ // integeral literal is formed during template instantiation. They should
+ // be removed when template instantiation no longer needs integer literals.
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
case BuiltinType::Int: break; // no suffix.
case BuiltinType::UInt: OS << 'U'; break;
case BuiltinType::Long: OS << 'L'; break;
case BuiltinType::ULong: OS << "UL"; break;
case BuiltinType::LongLong: OS << "LL"; break;
case BuiltinType::ULongLong: OS << "ULL"; break;
+ case BuiltinType::Int128: OS << "i128"; break;
+ case BuiltinType::UInt128: OS << "Ui128"; break;
}
}
void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
- llvm::SmallString<16> Str;
+ SmallString<16> Str;
Node->getValue().toString(Str);
OS << Str;
}
@@ -687,22 +725,74 @@ void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
case StringLiteral::UTF32: OS << 'U'; break;
}
OS << '"';
+ static char Hex[] = "0123456789ABCDEF";
- // FIXME: this doesn't print wstrings right.
- StringRef StrData = Str->getString();
- for (StringRef::iterator I = StrData.begin(), E = StrData.end();
- I != E; ++I) {
- unsigned char Char = *I;
-
- switch (Char) {
+ unsigned LastSlashX = Str->getLength();
+ for (unsigned I = 0, N = Str->getLength(); I != N; ++I) {
+ switch (uint32_t Char = Str->getCodeUnit(I)) {
default:
- if (isprint(Char))
+ // FIXME: Convert UTF-8 back to codepoints before rendering.
+
+ // Convert UTF-16 surrogate pairs back to codepoints before rendering.
+ // Leave invalid surrogates alone; we'll use \x for those.
+ if (Str->getKind() == StringLiteral::UTF16 && I != N - 1 &&
+ Char >= 0xd800 && Char <= 0xdbff) {
+ uint32_t Trail = Str->getCodeUnit(I + 1);
+ if (Trail >= 0xdc00 && Trail <= 0xdfff) {
+ Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
+ ++I;
+ }
+ }
+
+ if (Char > 0xff) {
+ // If this is a wide string, output characters over 0xff using \x
+ // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
+ // codepoint: use \x escapes for invalid codepoints.
+ if (Str->getKind() == StringLiteral::Wide ||
+ (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
+ // FIXME: Is this the best way to print wchar_t?
+ OS << "\\x";
+ int Shift = 28;
+ while ((Char >> Shift) == 0)
+ Shift -= 4;
+ for (/**/; Shift >= 0; Shift -= 4)
+ OS << Hex[(Char >> Shift) & 15];
+ LastSlashX = I;
+ break;
+ }
+
+ if (Char > 0xffff)
+ OS << "\\U00"
+ << Hex[(Char >> 20) & 15]
+ << Hex[(Char >> 16) & 15];
+ else
+ OS << "\\u";
+ OS << Hex[(Char >> 12) & 15]
+ << Hex[(Char >> 8) & 15]
+ << Hex[(Char >> 4) & 15]
+ << Hex[(Char >> 0) & 15];
+ break;
+ }
+
+ // If we used \x... for the previous character, and this character is a
+ // hexadecimal digit, prevent it being slurped as part of the \x.
+ if (LastSlashX + 1 == I) {
+ switch (Char) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ OS << "\"\"";
+ }
+ }
+
+ if (Char <= 0xff && isprint(Char))
OS << (char)Char;
else // Output anything hard as an octal escape.
OS << '\\'
- << (char)('0'+ ((Char >> 6) & 7))
- << (char)('0'+ ((Char >> 3) & 7))
- << (char)('0'+ ((Char >> 0) & 7));
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
break;
// Handle some common non-printable cases to make dumps prettier.
case '\\': OS << "\\\\"; break;
@@ -849,9 +939,9 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
OS << (Node->isArrow() ? "->" : ".");
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
-
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
OS << Node->getMemberNameInfo();
-
if (Node->hasExplicitTemplateArgs())
OS << TemplateSpecializationType::PrintTemplateArgumentList(
Node->getTemplateArgs(),
@@ -1011,52 +1101,42 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
+ PrintExpr(Node->getSyntacticForm());
+}
+
void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
const char *Name = 0;
switch (Node->getOp()) {
- case AtomicExpr::Load:
- Name = "__atomic_load(";
- break;
- case AtomicExpr::Store:
- Name = "__atomic_store(";
- break;
- case AtomicExpr::CmpXchgStrong:
- Name = "__atomic_compare_exchange_strong(";
- break;
- case AtomicExpr::CmpXchgWeak:
- Name = "__atomic_compare_exchange_weak(";
- break;
- case AtomicExpr::Xchg:
- Name = "__atomic_exchange(";
- break;
- case AtomicExpr::Add:
- Name = "__atomic_fetch_add(";
- break;
- case AtomicExpr::Sub:
- Name = "__atomic_fetch_sub(";
- break;
- case AtomicExpr::And:
- Name = "__atomic_fetch_and(";
- break;
- case AtomicExpr::Or:
- Name = "__atomic_fetch_or(";
- break;
- case AtomicExpr::Xor:
- Name = "__atomic_fetch_xor(";
- break;
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AtomicExpr::AO ## ID: \
+ Name = #ID "("; \
+ break;
+#include "clang/Basic/Builtins.def"
}
OS << Name;
+
+ // AtomicExpr stores its subexpressions in a permuted order.
PrintExpr(Node->getPtr());
OS << ", ";
- if (Node->getOp() != AtomicExpr::Load) {
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__atomic_load_n) {
PrintExpr(Node->getVal1());
OS << ", ";
}
- if (Node->isCmpXChg()) {
+ if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
+ Node->isCmpXChg()) {
PrintExpr(Node->getVal2());
OS << ", ";
}
- PrintExpr(Node->getOrder());
+ if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
+ PrintExpr(Node->getWeak());
+ OS << ", ";
+ }
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init)
+ PrintExpr(Node->getOrder());
if (Node->isCmpXChg()) {
OS << ", ";
PrintExpr(Node->getOrderFail());
@@ -1165,6 +1245,39 @@ void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
+ switch (Node->getLiteralOperatorKind()) {
+ case UserDefinedLiteral::LOK_Raw:
+ OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
+ break;
+ case UserDefinedLiteral::LOK_Template: {
+ DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
+ const TemplateArgumentList *Args =
+ cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
+ assert(Args);
+ const TemplateArgument &Pack = Args->get(0);
+ for (TemplateArgument::pack_iterator I = Pack.pack_begin(),
+ E = Pack.pack_end(); I != E; ++I) {
+ char C = (char)I->getAsIntegral()->getZExtValue();
+ OS << C;
+ }
+ break;
+ }
+ case UserDefinedLiteral::LOK_Integer: {
+ // Print integer literal without suffix.
+ IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
+ OS << Int->getValue().toString(10, /*isSigned*/false);
+ break;
+ }
+ case UserDefinedLiteral::LOK_Floating:
+ case UserDefinedLiteral::LOK_String:
+ case UserDefinedLiteral::LOK_Character:
+ PrintExpr(Node->getCookedLiteral());
+ break;
+ }
+ OS << Node->getUDSuffix()->getName();
+}
+
void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
OS << (Node->getValue() ? "true" : "false");
}
@@ -1214,6 +1327,98 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
OS << ")";
}
+void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
+ OS << '[';
+ bool NeedComma = false;
+ switch (Node->getCaptureDefault()) {
+ case LCD_None:
+ break;
+
+ case LCD_ByCopy:
+ OS << '=';
+ NeedComma = true;
+ break;
+
+ case LCD_ByRef:
+ OS << '&';
+ NeedComma = true;
+ break;
+ }
+ for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(),
+ CEnd = Node->explicit_capture_end();
+ C != CEnd;
+ ++C) {
+ if (NeedComma)
+ OS << ", ";
+ NeedComma = true;
+
+ switch (C->getCaptureKind()) {
+ case LCK_This:
+ OS << "this";
+ break;
+
+ case LCK_ByRef:
+ if (Node->getCaptureDefault() != LCD_ByRef)
+ OS << '&';
+ OS << C->getCapturedVar()->getName();
+ break;
+
+ case LCK_ByCopy:
+ if (Node->getCaptureDefault() != LCD_ByCopy)
+ OS << '=';
+ OS << C->getCapturedVar()->getName();
+ break;
+ }
+ }
+ OS << ']';
+
+ if (Node->hasExplicitParameters()) {
+ OS << " (";
+ CXXMethodDecl *Method = Node->getCallOperator();
+ NeedComma = false;
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P) {
+ if (NeedComma) {
+ OS << ", ";
+ } else {
+ NeedComma = true;
+ }
+ std::string ParamStr = (*P)->getNameAsString();
+ (*P)->getOriginalType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+ if (Method->isVariadic()) {
+ if (NeedComma)
+ OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+
+ if (Node->isMutable())
+ OS << " mutable";
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+ {
+ std::string ExceptionSpec;
+ Proto->printExceptionSpecification(ExceptionSpec, Policy);
+ OS << ExceptionSpec;
+ }
+
+ // FIXME: Attributes
+
+ // Print the trailing return type if it was specified in the source.
+ if (Node->hasExplicitResultType())
+ OS << " -> " << Proto->getResultType().getAsString(Policy);
+ }
+
+ // Print the body.
+ CompoundStmt *Body = Node->getBody();
+ OS << ' ';
+ PrintStmt(Body);
+}
+
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
OS << TSInfo->getType().getAsString(Policy) << "()";
@@ -1249,17 +1454,13 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isParenTypeId())
OS << ")";
- if (E->hasInitializer()) {
- OS << "(";
- unsigned NumCons = E->getNumConstructorArgs();
- if (NumCons > 0) {
- PrintExpr(E->getConstructorArg(0));
- for (unsigned i = 1; i < NumCons; ++i) {
- OS << ", ";
- PrintExpr(E->getConstructorArg(i));
- }
- }
- OS << ")";
+ CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
+ if (InitStyle) {
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << "(";
+ PrintExpr(E->getInitializer());
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << ")";
}
}
@@ -1329,12 +1530,9 @@ void StmtPrinter::VisitCXXDependentScopeMemberExpr(
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
- else if (Node->hasExplicitTemplateArgs())
- // FIXME: Track use of "template" keyword explicitly?
+ if (Node->hasTemplateKeyword())
OS << "template ";
-
OS << Node->getMemberNameInfo();
-
if (Node->hasExplicitTemplateArgs()) {
OS << TemplateSpecializationType::PrintTemplateArgumentList(
Node->getTemplateArgs(),
@@ -1350,11 +1548,9 @@ void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
-
- // FIXME: this might originally have been written with 'template'
-
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
OS << Node->getMemberNameInfo();
-
if (Node->hasExplicitTemplateArgs()) {
OS << TemplateSpecializationType::PrintTemplateArgumentList(
Node->getTemplateArgs(),
@@ -1382,6 +1578,7 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
case UTT_IsConst: return "__is_const";
case UTT_IsEmpty: return "__is_empty";
case UTT_IsEnum: return "__is_enum";
+ case UTT_IsFinal: return "__is_final";
case UTT_IsFloatingPoint: return "__is_floating_point";
case UTT_IsFunction: return "__is_function";
case UTT_IsFundamental: return "__is_fundamental";
@@ -1412,15 +1609,23 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
static const char *getTypeTraitName(BinaryTypeTrait BTT) {
switch (BTT) {
- case BTT_IsBaseOf: return "__is_base_of";
- case BTT_IsConvertible: return "__is_convertible";
- case BTT_IsSame: return "__is_same";
- case BTT_TypeCompatible: return "__builtin_types_compatible_p";
- case BTT_IsConvertibleTo: return "__is_convertible_to";
+ case BTT_IsBaseOf: return "__is_base_of";
+ case BTT_IsConvertible: return "__is_convertible";
+ case BTT_IsSame: return "__is_same";
+ case BTT_TypeCompatible: return "__builtin_types_compatible_p";
+ case BTT_IsConvertibleTo: return "__is_convertible_to";
+ case BTT_IsTriviallyAssignable: return "__is_trivially_assignable";
}
llvm_unreachable("Binary type trait not covered by switch");
}
+static const char *getTypeTraitName(TypeTrait TT) {
+ switch (TT) {
+ case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible";
+ }
+ llvm_unreachable("Type trait not covered by switch");
+}
+
static const char *getTypeTraitName(ArrayTypeTrait ATT) {
switch (ATT) {
case ATT_ArrayRank: return "__array_rank";
@@ -1448,6 +1653,16 @@ void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
<< E->getRhsType().getAsString(Policy) << ")";
}
+void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "(";
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+ OS << E->getArg(I)->getType().getAsString(Policy);
+ }
+ OS << ")";
+}
+
void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << "("
<< E->getQueriedType().getAsString(Policy) << ")";
@@ -1471,12 +1686,12 @@ void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
}
void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- OS << "sizeof...(" << E->getPack()->getNameAsString() << ")";
+ OS << "sizeof...(" << *E->getPack() << ")";
}
void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *Node) {
- OS << Node->getParameterPack()->getNameAsString();
+ OS << *Node->getParameterPack();
}
void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
@@ -1495,6 +1710,41 @@ void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
VisitStringLiteral(Node->getString());
}
+void StmtPrinter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ OS << "@";
+ Visit(E->getNumber());
+}
+
+void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ OS << "@[ ";
+ StmtRange ch = E->children();
+ if (ch.first != ch.second) {
+ while (1) {
+ Visit(*ch.first);
+ ++ch.first;
+ if (ch.first == ch.second) break;
+ OS << ", ";
+ }
+ }
+ OS << " ]";
+}
+
+void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ OS << "@{ ";
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ Visit(Element.Key);
+ OS << " : ";
+ Visit(Element.Value);
+ if (Element.isPackExpansion())
+ OS << "...";
+ }
+ OS << " }";
+}
+
void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')';
}
@@ -1545,6 +1795,10 @@ void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
OS << "]";
}
+void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
void
StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
PrintExpr(E->getSubExpr());
@@ -1585,12 +1839,10 @@ void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
}
}
-void StmtPrinter::VisitBlockDeclRefExpr(BlockDeclRefExpr *Node) {
- OS << *Node->getDecl();
+void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ PrintExpr(Node->getSourceExpr());
}
-void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {}
-
void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
OS << "__builtin_astype(";
PrintExpr(Node->getSrcExpr());
@@ -1604,7 +1856,7 @@ void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
void Stmt::dumpPretty(ASTContext& Context) const {
printPretty(llvm::errs(), Context, 0,
- PrintingPolicy(Context.getLangOptions()));
+ PrintingPolicy(Context.getLangOpts()));
}
void Stmt::printPretty(raw_ostream &OS, ASTContext& Context,
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
index 214378a..e5526ce 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -69,8 +69,12 @@ namespace {
void StmtProfiler::VisitStmt(const Stmt *S) {
ID.AddInteger(S->getStmtClass());
- for (Stmt::const_child_range C = S->children(); C; ++C)
- Visit(*C);
+ for (Stmt::const_child_range C = S->children(); C; ++C) {
+ if (*C)
+ Visit(*C);
+ else
+ ID.AddInteger(0);
+ }
}
void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
@@ -182,6 +186,13 @@ void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
VisitStmt(S);
}
+void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isIfExists());
+ VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier());
+ VisitName(S->getNameInfo().getName());
+}
+
void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
VisitStmt(S);
}
@@ -268,7 +279,7 @@ void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
VisitExpr(S);
- ID.AddString(S->getString());
+ ID.AddString(S->getBytes());
ID.AddInteger(S->getKind());
}
@@ -449,13 +460,6 @@ void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
VisitDecl(S->getBlockDecl());
}
-void StmtProfiler::VisitBlockDeclRefExpr(const BlockDeclRefExpr *S) {
- VisitExpr(S);
- VisitDecl(S->getDecl());
- ID.AddBoolean(S->isByRef());
- ID.AddBoolean(S->isConstQualAdded());
-}
-
void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
VisitExpr(S);
for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
@@ -468,6 +472,15 @@ void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
}
}
+void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) {
+ VisitExpr(S);
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i)
+ // Normally, we would not profile the source expressions of OVEs.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i))
+ Visit(OVE->getSourceExpr());
+}
+
void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getOp());
@@ -487,7 +500,6 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Invalid operator call kind");
- return Stmt::ArraySubscriptExprClass;
case OO_Plus:
if (S->getNumArgs() == 1) {
@@ -719,6 +731,10 @@ void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
+void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
+ VisitCallExpr(S);
+}
+
void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->getValue());
@@ -775,6 +791,24 @@ StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
}
void
+StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
+ VisitExpr(S);
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ ID.AddInteger(C->getCaptureKind());
+ if (C->capturesVariable()) {
+ VisitDecl(C->getCapturedVar());
+ ID.AddBoolean(C->isPackExpansion());
+ }
+ }
+ // Note: If we actually needed to be able to match lambda
+ // expressions, we would have to consider parameters and return type
+ // here, among other things.
+ VisitStmt(S->getBody());
+}
+
+void
StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
VisitExpr(S);
}
@@ -792,13 +826,11 @@ void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
VisitType(S->getAllocatedType());
VisitDecl(S->getOperatorNew());
VisitDecl(S->getOperatorDelete());
- VisitDecl(S->getConstructor());
ID.AddBoolean(S->isArray());
ID.AddInteger(S->getNumPlacementArgs());
ID.AddBoolean(S->isGlobalNew());
ID.AddBoolean(S->isParenTypeId());
- ID.AddBoolean(S->hasInitializer());
- ID.AddInteger(S->getNumConstructorArgs());
+ ID.AddInteger(S->getInitializationStyle());
}
void
@@ -837,6 +869,14 @@ void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
VisitType(S->getRhsType());
}
+void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ ID.AddInteger(S->getNumArgs());
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ VisitType(S->getArg(I)->getType());
+}
+
void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
@@ -935,6 +975,18 @@ void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+}
+
void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
VisitExpr(S);
VisitType(S->getEncodedType());
@@ -971,6 +1023,12 @@ void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
}
}
+void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getAtIndexMethodDecl());
+ VisitDecl(S->setAtIndexMethodDecl());
+}
+
void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
VisitExpr(S);
VisitName(S->getSelector());
@@ -982,6 +1040,11 @@ void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
ID.AddBoolean(S->isArrow());
}
+void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
const ObjCIndirectCopyRestoreExpr *S) {
VisitExpr(S);
@@ -1020,6 +1083,14 @@ void StmtProfiler::VisitDecl(const Decl *D) {
return;
}
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
ID.AddInteger(TTP->getDepth());
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
index 0c011a8..531e03e 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallString.h"
#include <algorithm>
#include <cctype>
@@ -40,10 +41,9 @@ static void printIntegral(const TemplateArgument &TemplArg,
if (T->isBooleanType()) {
Out << (Val->getBoolValue() ? "true" : "false");
} else if (T->isCharType()) {
- const unsigned char Ch = Val->getZExtValue();
- const std::string Str(1, Ch);
+ const char Ch = Val->getZExtValue();
Out << ((Ch == '\'') ? "'\\" : "'");
- Out.write_escaped(Str, /*UseHexEscapes=*/ true);
+ Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
Out << "'";
} else {
Out << Val->toString(10);
@@ -80,9 +80,13 @@ bool TemplateArgument::isDependent() const {
return true;
case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+
+ return false;
case Integral:
// Never dependent
@@ -100,7 +104,7 @@ bool TemplateArgument::isDependent() const {
return false;
}
- return false;
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::isInstantiationDependent() const {
@@ -118,10 +122,13 @@ bool TemplateArgument::isInstantiationDependent() const {
return true;
case Declaration:
- if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
- return DC->isDependentContext();
- return getAsDecl()->getDeclContext()->isDependentContext();
-
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+ return false;
+
case Integral:
// Never dependent
return false;
@@ -137,8 +144,8 @@ bool TemplateArgument::isInstantiationDependent() const {
return false;
}
-
- return false;
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::isPackExpansion() const {
@@ -159,8 +166,8 @@ bool TemplateArgument::isPackExpansion() const {
case Expression:
return isa<PackExpansionExpr>(getAsExpr());
}
-
- return false;
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::containsUnexpandedParameterPack() const {
@@ -278,8 +285,7 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
return true;
}
- // Suppress warnings.
- return false;
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
TemplateArgument TemplateArgument::getPackExpansionPattern() const {
@@ -302,8 +308,8 @@ TemplateArgument TemplateArgument::getPackExpansionPattern() const {
case Template:
return TemplateArgument();
}
-
- return TemplateArgument();
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
void TemplateArgument::print(const PrintingPolicy &Policy,
@@ -323,16 +329,14 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
}
case Declaration: {
- bool Unnamed = true;
if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getAsDecl())) {
if (ND->getDeclName()) {
- Unnamed = false;
- Out << ND->getNameAsString();
+ Out << *ND;
+ } else {
+ Out << "<anonymous>";
}
- }
-
- if (Unnamed) {
- Out << "<anonymous>";
+ } else {
+ Out << "nullptr";
}
break;
}
@@ -412,8 +416,7 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
return SourceRange();
}
- // Silence bonus gcc warning.
- return SourceRange();
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
TemplateArgumentLoc
@@ -474,8 +477,8 @@ TemplateArgumentLoc::getPackExpansionPattern(SourceLocation &Ellipsis,
case TemplateArgument::Null:
return TemplateArgumentLoc();
}
-
- return TemplateArgumentLoc();
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
@@ -490,7 +493,9 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << Arg.getAsType();
case TemplateArgument::Declaration:
- return DB << Arg.getAsDecl();
+ if (Decl *D = Arg.getAsDecl())
+ return DB << D;
+ return DB << "nullptr";
case TemplateArgument::Integral:
return DB << Arg.getAsIntegral()->toString(10);
@@ -505,7 +510,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
// This shouldn't actually ever happen, so it's okay that we're
// regurgitating an expression here.
// FIXME: We're guessing at LangOptions!
- llvm::SmallString<32> Str;
+ SmallString<32> Str;
llvm::raw_svector_ostream OS(Str);
LangOptions LangOpts;
LangOpts.CPlusPlus = true;
@@ -516,7 +521,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
case TemplateArgument::Pack: {
// FIXME: We're guessing at LangOptions!
- llvm::SmallString<32> Str;
+ SmallString<32> Str;
llvm::raw_svector_ostream OS(Str);
LangOptions LangOpts;
LangOpts.CPlusPlus = true;
@@ -525,15 +530,15 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
return DB << OS.str();
}
}
-
- return DB;
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
- ASTTemplateArgumentListInfo::sizeFor(List);
+ ASTTemplateArgumentListInfo::sizeFor(List.size());
void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
TAI->initializeFrom(List);
@@ -586,7 +591,38 @@ std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) {
sizeof(TemplateArgumentLoc) * NumTemplateArgs;
}
-std::size_t ASTTemplateArgumentListInfo::sizeFor(
- const TemplateArgumentListInfo &Info) {
- return sizeFor(Info.size());
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info) {
+ Base::initializeFrom(Info);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo
+::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
+ Base::initializeFrom(Info, Dependent, InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
+ // No explicit template arguments, but template keyword loc is valid.
+ assert(TemplateKWLoc.isValid());
+ LAngleLoc = SourceLocation();
+ RAngleLoc = SourceLocation();
+ NumTemplateArgs = 0;
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+std::size_t
+ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
+ // Add space for the template keyword location.
+ return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
index a0487ba..e89ba53 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
@@ -149,7 +149,7 @@ TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
subst->getReplacement().print(OS, Policy, SuppressNNS);
} else if (SubstTemplateTemplateParmPackStorage *SubstPack
= getAsSubstTemplateTemplateParmPack())
- OS << SubstPack->getParameterPack()->getNameAsString();
+ OS << *SubstPack->getParameterPack();
else {
OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
(*OTS->begin())->printName(OS);
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
index 44eeec0..c82aeaa 100644
--- a/contrib/llvm/tools/clang/lib/AST/Type.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -197,27 +197,28 @@ const Type *Type::getArrayElementTypeNoTypeQual() const {
/// concrete.
QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
SplitQualType split = getSplitDesugaredType(T);
- return Context.getQualifiedType(split.first, split.second);
+ return Context.getQualifiedType(split.Ty, split.Quals);
}
-QualType QualType::getSingleStepDesugaredType(const ASTContext &Context) const {
- QualifierCollector Qs;
-
- const Type *CurTy = Qs.strip(*this);
- switch (CurTy->getTypeClass()) {
+QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &Context) {
+ SplitQualType split = type.split();
+ QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
+ return Context.getQualifiedType(desugar, split.Quals);
+}
+
+QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
+ switch (getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *Ty = cast<Class##Type>(CurTy); \
- if (!Ty->isSugared()) \
- return *this; \
- return Context.getQualifiedType(Ty->desugar(), Qs); \
- break; \
+ const Class##Type *ty = cast<Class##Type>(this); \
+ if (!ty->isSugared()) return QualType(ty, 0); \
+ return ty->desugar(); \
}
#include "clang/AST/TypeNodes.def"
}
-
- return *this;
+ llvm_unreachable("bad type kind!");
}
SplitQualType QualType::getSplitDesugaredType(QualType T) {
@@ -245,21 +246,21 @@ SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
SplitQualType split = type.split();
// All the qualifiers we've seen so far.
- Qualifiers quals = split.second;
+ Qualifiers quals = split.Quals;
// The last type node we saw with any nodes inside it.
- const Type *lastTypeWithQuals = split.first;
+ const Type *lastTypeWithQuals = split.Ty;
while (true) {
QualType next;
// Do a single-step desugar, aborting the loop if the type isn't
// sugared.
- switch (split.first->getTypeClass()) {
+ switch (split.Ty->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
- const Class##Type *ty = cast<Class##Type>(split.first); \
+ const Class##Type *ty = cast<Class##Type>(split.Ty); \
if (!ty->isSugared()) goto done; \
next = ty->desugar(); \
break; \
@@ -270,9 +271,9 @@ SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
// Otherwise, split the underlying type. If that yields qualifiers,
// update the information.
split = next.split();
- if (!split.second.empty()) {
- lastTypeWithQuals = split.first;
- quals.addConsistentQualifiers(split.second);
+ if (!split.Quals.empty()) {
+ lastTypeWithQuals = split.Ty;
+ quals.addConsistentQualifiers(split.Quals);
}
}
@@ -308,13 +309,6 @@ const Type *Type::getUnqualifiedDesugaredType() const {
}
}
-/// isVoidType - Helper method to determine if this is the 'void' type.
-bool Type::isVoidType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Void;
- return false;
-}
-
bool Type::isDerivedType() const {
switch (CanonicalType->getTypeClass()) {
case Pointer:
@@ -555,17 +549,6 @@ AutoType *Type::getContainedAutoType() const {
return GetContainedAutoVisitor().Visit(this);
}
-bool Type::isIntegerType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Int128;
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- // Incomplete enum types are not treated as integer types.
- // FIXME: In C++, enum types are never integer types.
- return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
- return false;
-}
-
bool Type::hasIntegerRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isIntegerType();
@@ -597,25 +580,13 @@ bool Type::isIntegralType(ASTContext &Ctx) const {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (!Ctx.getLangOptions().CPlusPlus)
+ if (!Ctx.getLangOpts().CPlusPlus)
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete(); // Complete enum types are integral in C.
return false;
}
-bool Type::isIntegralOrEnumerationType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Int128;
-
- // Check for a complete enum type; incomplete enum types are not properly an
- // enumeration type in the sense required here.
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- return ET->getDecl()->isComplete();
-
- return false;
-}
bool Type::isIntegralOrUnscopedEnumerationType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
@@ -633,11 +604,6 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
}
-bool Type::isBooleanType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Bool;
- return false;
-}
bool Type::isCharType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
@@ -766,13 +732,6 @@ bool Type::hasUnsignedIntegerRepresentation() const {
return isUnsignedIntegerType();
}
-bool Type::isHalfType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() == BuiltinType::Half;
- // FIXME: Should we allow complex __fp16? Probably not.
- return false;
-}
-
bool Type::isFloatingType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Half &&
@@ -819,21 +778,6 @@ bool Type::isArithmeticType() const {
return isa<ComplexType>(CanonicalType);
}
-bool Type::isScalarType() const {
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
- return BT->getKind() > BuiltinType::Void &&
- BT->getKind() <= BuiltinType::NullPtr;
- if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
- // Enums are scalar types, but only if they are defined. Incomplete enums
- // are not treated as scalar types.
- return ET->getDecl()->isComplete();
- return isa<PointerType>(CanonicalType) ||
- isa<BlockPointerType>(CanonicalType) ||
- isa<MemberPointerType>(CanonicalType) ||
- isa<ComplexType>(CanonicalType) ||
- isa<ObjCObjectPointerType>(CanonicalType);
-}
-
Type::ScalarTypeKind Type::getScalarTypeKind() const {
assert(isScalarType());
@@ -897,37 +841,56 @@ bool Type::isConstantSizeType() const {
/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
/// - a type that can describe objects, but which lacks information needed to
/// determine its size.
-bool Type::isIncompleteType() const {
+bool Type::isIncompleteType(NamedDecl **Def) const {
+ if (Def)
+ *Def = 0;
+
switch (CanonicalType->getTypeClass()) {
default: return false;
case Builtin:
// Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
// be completed.
return isVoidType();
- case Enum:
+ case Enum: {
+ EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = EnumD;
+
// An enumeration with fixed underlying type is complete (C++0x 7.2p3).
- if (cast<EnumType>(CanonicalType)->getDecl()->isFixed())
- return false;
- // Fall through.
- case Record:
+ if (EnumD->isFixed())
+ return false;
+
+ return !EnumD->isCompleteDefinition();
+ }
+ case Record: {
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
// forward declaration, but not a full definition (C99 6.2.5p22).
- return !cast<TagType>(CanonicalType)->getDecl()->isCompleteDefinition();
+ RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Rec;
+ return !Rec->isCompleteDefinition();
+ }
case ConstantArray:
// An array is incomplete if its element type is incomplete
// (C++ [dcl.array]p1).
// We don't handle variable arrays (they're not allowed in C++) or
// dependent-sized arrays (dependent types are never treated as incomplete).
- return cast<ArrayType>(CanonicalType)->getElementType()->isIncompleteType();
+ return cast<ArrayType>(CanonicalType)->getElementType()
+ ->isIncompleteType(Def);
case IncompleteArray:
// An array of unknown size is an incomplete type (C99 6.2.5p22).
return true;
case ObjCObject:
return cast<ObjCObjectType>(CanonicalType)->getBaseType()
- ->isIncompleteType();
- case ObjCInterface:
+ ->isIncompleteType(Def);
+ case ObjCInterface: {
// ObjC interfaces are incomplete if they are @class, not @interface.
- return cast<ObjCInterfaceType>(CanonicalType)->getDecl()->isForwardDecl();
+ ObjCInterfaceDecl *Interface
+ = cast<ObjCInterfaceType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Interface;
+ return !Interface->hasDefinition();
+ }
}
}
@@ -944,7 +907,7 @@ bool QualType::isPODType(ASTContext &Context) const {
if ((*this)->isIncompleteType())
return false;
- if (Context.getLangOptions().ObjCAutoRefCount) {
+ if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
@@ -1006,7 +969,7 @@ bool QualType::isTrivialType(ASTContext &Context) const {
if ((*this)->isIncompleteType())
return false;
- if (Context.getLangOptions().ObjCAutoRefCount) {
+ if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
@@ -1056,7 +1019,7 @@ bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTrivialType(Context);
- if (Context.getLangOptions().ObjCAutoRefCount) {
+ if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
@@ -1126,15 +1089,13 @@ bool Type::isLiteralType() const {
if (BaseTy->isIncompleteType())
return false;
- // Objective-C lifetime types are not literal types.
- if (BaseTy->isObjCRetainableType())
- return false;
-
// C++0x [basic.types]p10:
// A type is a literal type if it is:
// -- a scalar type; or
- // As an extension, Clang treats vector types as literal types.
- if (BaseTy->isScalarType() || BaseTy->isVectorType())
+ // As an extension, Clang treats vector types and complex types as
+ // literal types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType() ||
+ BaseTy->isAnyComplexType())
return true;
// -- a reference type; or
if (BaseTy->isReferenceType())
@@ -1203,7 +1164,7 @@ bool QualType::isCXX11PODType(ASTContext &Context) const {
if (ty->isDependentType())
return false;
- if (Context.getLangOptions().ObjCAutoRefCount) {
+ if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
@@ -1272,6 +1233,10 @@ bool Type::isPromotableIntegerType() const {
case BuiltinType::UChar:
case BuiltinType::Short:
case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
return true;
default:
return false;
@@ -1284,21 +1249,12 @@ bool Type::isPromotableIntegerType() const {
|| ET->getDecl()->isScoped())
return false;
- const BuiltinType *BT
- = ET->getDecl()->getPromotionType()->getAs<BuiltinType>();
- return BT->getKind() == BuiltinType::Int
- || BT->getKind() == BuiltinType::UInt;
+ return true;
}
return false;
}
-bool Type::isNullPtrType() const {
- if (const BuiltinType *BT = getAs<BuiltinType>())
- return BT->getKind() == BuiltinType::NullPtr;
- return false;
-}
-
bool Type::isSpecifierType() const {
// Note that this intentionally does not use the canonical type.
switch (getTypeClass()) {
@@ -1346,7 +1302,6 @@ TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
}
llvm_unreachable("Type specifier is not a tag type kind.");
- return TTK_Union;
}
ElaboratedTypeKeyword
@@ -1401,7 +1356,6 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
}
llvm_unreachable("Unknown elaborated type keyword.");
- return "";
}
DependentTemplateSpecializationType::DependentTemplateSpecializationType(
@@ -1461,7 +1415,6 @@ const char *Type::getTypeClassName() const {
}
llvm_unreachable("Invalid type class.");
- return 0;
}
const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
@@ -1475,13 +1428,13 @@ const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
case Int: return "int";
case Long: return "long";
case LongLong: return "long long";
- case Int128: return "__int128_t";
+ case Int128: return "__int128";
case UChar: return "unsigned char";
case UShort: return "unsigned short";
case UInt: return "unsigned int";
case ULong: return "unsigned long";
case ULongLong: return "unsigned long long";
- case UInt128: return "__uint128_t";
+ case UInt128: return "unsigned __int128";
case Half: return "half";
case Float: return "float";
case Double: return "double";
@@ -1493,15 +1446,16 @@ const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
case NullPtr: return "nullptr_t";
case Overload: return "<overloaded function type>";
case BoundMember: return "<bound member function type>";
+ case PseudoObject: return "<pseudo-object type>";
case Dependent: return "<dependent type>";
case UnknownAny: return "<unknown type>";
+ case ARCUnbridgedCast: return "<ARC unbridged cast type>";
case ObjCId: return "id";
case ObjCClass: return "Class";
case ObjCSel: return "SEL";
}
llvm_unreachable("Invalid builtin type.");
- return 0;
}
QualType QualType::getNonLValueExprType(ASTContext &Context) const {
@@ -1513,7 +1467,7 @@ QualType QualType::getNonLValueExprType(ASTContext &Context) const {
// have cv-unqualified types.
//
// See also C99 6.3.2.1p2.
- if (!Context.getLangOptions().CPlusPlus ||
+ if (!Context.getLangOpts().CPlusPlus ||
(!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
return getUnqualifiedType();
@@ -1524,7 +1478,6 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
case CC_Default:
llvm_unreachable("no name for default cc");
- return "";
case CC_C: return "cdecl";
case CC_X86StdCall: return "stdcall";
@@ -1536,14 +1489,13 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
}
llvm_unreachable("Invalid calling convention.");
- return "";
}
FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
unsigned numArgs, QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, epi.Variadic, epi.TypeQuals,
- epi.RefQualifier, canonical,
+ : FunctionType(FunctionProto, result, epi.TypeQuals, epi.RefQualifier,
+ canonical,
result->isDependentType(),
result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
@@ -1551,7 +1503,8 @@ FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
epi.ExtInfo),
NumArgs(numArgs), NumExceptions(epi.NumExceptions),
ExceptionSpecType(epi.ExceptionSpecType),
- HasAnyConsumedArgs(epi.ConsumedArguments != 0)
+ HasAnyConsumedArgs(epi.ConsumedArguments != 0),
+ Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn)
{
// Fill in the trailing argument array.
QualType *argSlot = reinterpret_cast<QualType*>(this+1);
@@ -1649,8 +1602,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// This is followed by an optional "consumed argument" section of the
// same length as the first type sequence:
// bool*
- // Finally, we have the ext info:
- // int
+ // Finally, we have the ext info and trailing return type flag:
+ // int bool
//
// There is no ambiguity between the consumed arguments and an empty EH
// spec because of the leading 'bool' which unambiguously indicates
@@ -1682,6 +1635,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
ID.AddBoolean(epi.ConsumedArguments[i]);
}
epi.ExtInfo.Profile(ID);
+ ID.AddBoolean(epi.HasTrailingReturn);
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
@@ -1719,7 +1673,10 @@ void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
- : Type(Decltype, can, E->isTypeDependent(),
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can, E->isInstantiationDependent(),
E->isInstantiationDependent(),
E->getType()->isVariablyModifiedType(),
E->containsUnexpandedParameterPack()),
@@ -1785,14 +1742,6 @@ CXXRecordDecl *InjectedClassNameType::getDecl() const {
return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
}
-bool RecordType::classof(const TagType *TT) {
- return isa<RecordDecl>(TT->getDecl());
-}
-
-bool EnumType::classof(const TagType *TT) {
- return isa<EnumDecl>(TT->getDecl());
-}
-
IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? 0 : getDecl()->getIdentifier();
}
@@ -1872,8 +1821,10 @@ TemplateSpecializationType(TemplateName T,
Canon.isNull()? T.isDependent() : Canon->isDependentType(),
Canon.isNull()? T.isDependent()
: Canon->isInstantiationDependentType(),
- false, T.containsUnexpandedParameterPack()),
- Template(T), NumArgs(NumArgs) {
+ false,
+ Canon.isNull()? T.containsUnexpandedParameterPack()
+ : Canon->containsUnexpandedParameterPack()),
+ Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
assert(!T.getAsDependentTemplateName() &&
"Use DependentTemplateSpecializationType for dependent template-name");
assert((T.getKind() == TemplateName::Template ||
@@ -1905,17 +1856,14 @@ TemplateSpecializationType(TemplateName T,
if (Args[Arg].getKind() == TemplateArgument::Type &&
Args[Arg].getAsType()->isVariablyModifiedType())
setVariablyModified();
- if (Args[Arg].containsUnexpandedParameterPack())
+ if (Canon.isNull() && Args[Arg].containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
}
// Store the aliased type if this is a type alias template specialization.
- bool IsTypeAlias = !AliasedType.isNull();
- assert(IsTypeAlias == isTypeAlias() &&
- "allocated wrong size for type alias");
- if (IsTypeAlias) {
+ if (TypeAlias) {
TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
*reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
}
@@ -1932,11 +1880,6 @@ TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
Args[Idx].Profile(ID, Context);
}
-bool TemplateSpecializationType::isTypeAlias() const {
- TemplateDecl *D = Template.getAsTemplateDecl();
- return D && isa<TypeAliasTemplateDecl>(D);
-}
-
QualType
QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
if (!hasNonFastQualifiers())
@@ -1970,21 +1913,22 @@ namespace {
/// \brief The cached properties of a type.
class CachedProperties {
- char linkage;
- char visibility;
+ NamedDecl::LinkageInfo LV;
bool local;
public:
- CachedProperties(Linkage linkage, Visibility visibility, bool local)
- : linkage(linkage), visibility(visibility), local(local) {}
+ CachedProperties(NamedDecl::LinkageInfo LV, bool local)
+ : LV(LV), local(local) {}
- Linkage getLinkage() const { return (Linkage) linkage; }
- Visibility getVisibility() const { return (Visibility) visibility; }
+ Linkage getLinkage() const { return LV.linkage(); }
+ Visibility getVisibility() const { return LV.visibility(); }
+ bool isVisibilityExplicit() const { return LV.visibilityExplicit(); }
bool hasLocalOrUnnamedType() const { return local; }
friend CachedProperties merge(CachedProperties L, CachedProperties R) {
- return CachedProperties(minLinkage(L.getLinkage(), R.getLinkage()),
- minVisibility(L.getVisibility(), R.getVisibility()),
+ NamedDecl::LinkageInfo MergedLV = L.LV;
+ MergedLV.merge(R.LV);
+ return CachedProperties(MergedLV,
L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
}
};
@@ -2004,9 +1948,10 @@ public:
static CachedProperties get(const Type *T) {
ensure(T);
- return CachedProperties(T->TypeBits.getLinkage(),
- T->TypeBits.getVisibility(),
- T->TypeBits.hasLocalOrUnnamedType());
+ NamedDecl::LinkageInfo LV(T->TypeBits.getLinkage(),
+ T->TypeBits.getVisibility(),
+ T->TypeBits.isVisibilityExplicit());
+ return CachedProperties(LV, T->TypeBits.hasLocalOrUnnamedType());
}
static void ensure(const Type *T) {
@@ -2020,6 +1965,8 @@ public:
ensure(CT);
T->TypeBits.CacheValidAndVisibility =
CT->TypeBits.CacheValidAndVisibility;
+ T->TypeBits.CachedExplicitVisibility =
+ CT->TypeBits.CachedExplicitVisibility;
T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
return;
@@ -2028,6 +1975,7 @@ public:
// Compute the cached properties and then set the cache.
CachedProperties Result = computeCachedProperties(T);
T->TypeBits.CacheValidAndVisibility = Result.getVisibility() + 1U;
+ T->TypeBits.CachedExplicitVisibility = Result.isVisibilityExplicit();
assert(T->TypeBits.isCacheValid() &&
T->TypeBits.getVisibility() == Result.getVisibility());
T->TypeBits.CachedLinkage = Result.getLinkage();
@@ -2055,13 +2003,13 @@ static CachedProperties computeCachedProperties(const Type *T) {
#include "clang/AST/TypeNodes.def"
// Treat instantiation-dependent types as external.
assert(T->isInstantiationDependentType());
- return CachedProperties(ExternalLinkage, DefaultVisibility, false);
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
// - it is a fundamental type (3.9.1); or
- return CachedProperties(ExternalLinkage, DefaultVisibility, false);
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
case Type::Record:
case Type::Enum: {
@@ -2075,7 +2023,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
bool IsLocalOrUnnamed =
Tag->getDeclContext()->isFunctionOrMethod() ||
(!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl());
- return CachedProperties(LV.linkage(), LV.visibility(), IsLocalOrUnnamed);
+ return CachedProperties(LV, IsLocalOrUnnamed);
}
// C++ [basic.link]p8:
@@ -2115,7 +2063,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
case Type::ObjCInterface: {
NamedDecl::LinkageInfo LV =
cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
- return CachedProperties(LV.linkage(), LV.visibility(), false);
+ return CachedProperties(LV, false);
}
case Type::ObjCObject:
return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
@@ -2126,10 +2074,6 @@ static CachedProperties computeCachedProperties(const Type *T) {
}
llvm_unreachable("unhandled type class");
-
- // C++ [basic.link]p8:
- // Names not covered by these rules have no linkage.
- return CachedProperties(NoLinkage, DefaultVisibility, false);
}
/// \brief Determine the linkage of this type.
@@ -2144,6 +2088,11 @@ Visibility Type::getVisibility() const {
return TypeBits.getVisibility();
}
+bool Type::isVisibilityExplicit() const {
+ Cache::ensure(this);
+ return TypeBits.isVisibilityExplicit();
+}
+
bool Type::hasUnnamedOrLocalType() const {
Cache::ensure(this);
return TypeBits.hasLocalOrUnnamedType();
@@ -2285,7 +2234,7 @@ bool QualType::hasTrivialAssignment(ASTContext &Context, bool Copying) const {
case Qualifiers::OCL_Autoreleasing:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
- return !Context.getLangOptions().ObjCAutoRefCount;
+ return !Context.getLangOpts().ObjCAutoRefCount;
}
if (const CXXRecordDecl *Record
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
index 8e8b227..caa19b1 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -196,55 +196,54 @@ SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
if (needsExtraLocalData())
return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type);
- else {
- switch (getTypePtr()->getKind()) {
- case BuiltinType::Void:
- return TST_void;
- case BuiltinType::Bool:
- return TST_bool;
- case BuiltinType::Char_U:
- case BuiltinType::Char_S:
- return TST_char;
- case BuiltinType::Char16:
- return TST_char16;
- case BuiltinType::Char32:
- return TST_char32;
- case BuiltinType::WChar_S:
- case BuiltinType::WChar_U:
- return TST_wchar;
-
- case BuiltinType::UChar:
- case BuiltinType::UShort:
- case BuiltinType::UInt:
- case BuiltinType::ULong:
- case BuiltinType::ULongLong:
- case BuiltinType::UInt128:
- case BuiltinType::SChar:
- case BuiltinType::Short:
- case BuiltinType::Int:
- case BuiltinType::Long:
- case BuiltinType::LongLong:
- case BuiltinType::Int128:
- case BuiltinType::Half:
- case BuiltinType::Float:
- case BuiltinType::Double:
- case BuiltinType::LongDouble:
- llvm_unreachable("Builtin type needs extra local data!");
- // Fall through, if the impossible happens.
-
- case BuiltinType::NullPtr:
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
- case BuiltinType::ObjCId:
- case BuiltinType::ObjCClass:
- case BuiltinType::ObjCSel:
- return TST_unspecified;
- }
+ switch (getTypePtr()->getKind()) {
+ case BuiltinType::Void:
+ return TST_void;
+ case BuiltinType::Bool:
+ return TST_bool;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ return TST_char;
+ case BuiltinType::Char16:
+ return TST_char16;
+ case BuiltinType::Char32:
+ return TST_char32;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return TST_wchar;
+ case BuiltinType::UChar:
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::UInt128:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ llvm_unreachable("Builtin type needs extra local data!");
+ // Fall through, if the impossible happens.
+
+ case BuiltinType::NullPtr:
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return TST_unspecified;
}
-
- return TST_unspecified;
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
}
TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
@@ -255,7 +254,7 @@ TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
- setKeywordLoc(Loc);
+ setElaboratedKeywordLoc(Loc);
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
@@ -263,17 +262,17 @@ void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
- setKeywordLoc(Loc);
+ setElaboratedKeywordLoc(Loc);
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
setQualifierLoc(Builder.getWithLocInContext(Context));
setNameLoc(Loc);
}
-void
-DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+void
+DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
- setKeywordLoc(Loc);
+ setElaboratedKeywordLoc(Loc);
if (getTypePtr()->getQualifier()) {
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
@@ -281,8 +280,8 @@ DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
} else {
setQualifierLoc(NestedNameSpecifierLoc());
}
-
- setNameLoc(Loc);
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
@@ -302,8 +301,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
case TemplateArgument::Integral:
case TemplateArgument::Pack:
case TemplateArgument::Expression:
- // FIXME: Can we do better for declarations and integral values?
- ArgInfos[i] = TemplateArgumentLocInfo();
+ ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
break;
case TemplateArgument::Type:
@@ -332,4 +330,3 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
}
}
}
-
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
index fb7b918..3bf80e7 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -75,7 +75,7 @@ static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
void TypePrinter::print(QualType t, std::string &buffer) {
SplitQualType split = t.split();
- print(split.first, split.second, buffer);
+ print(split.Ty, split.Quals, buffer);
}
void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
@@ -264,8 +264,9 @@ void TypePrinter::printRValueReference(const RValueReferenceType *T,
void TypePrinter::printMemberPointer(const MemberPointerType *T,
std::string &S) {
- std::string C;
- print(QualType(T->getClass(), 0), C);
+ PrintingPolicy InnerPolicy(Policy);
+ Policy.SuppressTag = true;
+ std::string C = QualType(T->getClass(), 0).getAsString(InnerPolicy);
C += "::*";
S = C + S;
@@ -397,6 +398,35 @@ void TypePrinter::printExtVector(const ExtVectorType *T, std::string &S) {
print(T->getElementType(), S);
}
+void
+FunctionProtoType::printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const {
+
+ if (hasDynamicExceptionSpec()) {
+ S += " throw(";
+ if (getExceptionSpecType() == EST_MSAny)
+ S += "...";
+ else
+ for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
+ if (I)
+ S += ", ";
+
+ S += getExceptionType(I).getAsString(Policy);
+ }
+ S += ")";
+ } else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
+ S += " noexcept";
+ if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ S += "(";
+ llvm::raw_string_ostream EOut(S);
+ getNoexceptExpr()->printPretty(EOut, 0, Policy);
+ EOut.flush();
+ S += EOut.str();
+ S += ")";
+ }
+ }
+}
+
void TypePrinter::printFunctionProto(const FunctionProtoType *T,
std::string &S) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
@@ -427,8 +457,7 @@ void TypePrinter::printFunctionProto(const FunctionProtoType *T,
FunctionType::ExtInfo Info = T->getExtInfo();
switch(Info.getCC()) {
- case CC_Default:
- default: break;
+ case CC_Default: break;
case CC_C:
S += " __attribute__((cdecl))";
break;
@@ -471,34 +500,13 @@ void TypePrinter::printFunctionProto(const FunctionProtoType *T,
S += " &&";
break;
}
-
- if (T->hasDynamicExceptionSpec()) {
- S += " throw(";
- if (T->getExceptionSpecType() == EST_MSAny)
- S += "...";
- else
- for (unsigned I = 0, N = T->getNumExceptions(); I != N; ++I) {
- if (I)
- S += ", ";
-
- std::string ExceptionType;
- print(T->getExceptionType(I), ExceptionType);
- S += ExceptionType;
- }
- S += ")";
- } else if (isNoexceptExceptionSpec(T->getExceptionSpecType())) {
- S += " noexcept";
- if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
- S += "(";
- llvm::raw_string_ostream EOut(S);
- T->getNoexceptExpr()->printPretty(EOut, 0, Policy);
- EOut.flush();
- S += EOut.str();
- S += ")";
- }
- }
-
- print(T->getResultType(), S);
+ T->printExceptionSpecification(S, Policy);
+ if (T->hasTrailingReturn()) {
+ std::string ResultS;
+ print(T->getResultType(), ResultS);
+ S = "auto " + S + " -> " + ResultS;
+ } else
+ print(T->getResultType(), S);
}
void TypePrinter::printFunctionNoProto(const FunctionNoProtoType *T,
@@ -600,6 +608,9 @@ void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
unsigned OldSize = Buffer.size();
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (Policy.SuppressUnwrittenScope &&
+ (NS->isAnonymousNamespace() || NS->isInline()))
+ return;
if (NS->getIdentifier())
Buffer += NS->getNameAsString();
else
@@ -620,6 +631,8 @@ void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
Buffer += Typedef->getIdentifier()->getName();
else if (Tag->getIdentifier())
Buffer += Tag->getIdentifier()->getName();
+ else
+ return;
}
if (Buffer.size() != OldSize)
@@ -660,8 +673,14 @@ void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
// Make an unambiguous representation for anonymous types, e.g.
// <anonymous enum at /usr/include/string.h:120:9>
llvm::raw_string_ostream OS(Buffer);
- OS << "<anonymous";
-
+
+ if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
+ OS << "<lambda";
+ HasKindDecoration = true;
+ } else {
+ OS << "<anonymous";
+ }
+
if (Policy.AnonymousTagLocations) {
// Suppress the redundant tag keyword if we just printed one.
// We don't have to worry about ElaboratedTypes here because you can't
@@ -930,7 +949,7 @@ void TypePrinter::printAttributed(const AttributedType *T,
case AttributedType::attr_objc_ownership:
S += "objc_ownership(";
switch (T->getEquivalentType().getObjCLifetime()) {
- case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); break;
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
case Qualifiers::OCL_ExplicitNone: S += "none"; break;
case Qualifiers::OCL_Strong: S += "strong"; break;
case Qualifiers::OCL_Weak: S += "weak"; break;
@@ -1157,9 +1176,21 @@ void Qualifiers::getAsStringInternal(std::string &S,
AppendTypeQualList(S, getCVRQualifiers());
if (unsigned addrspace = getAddressSpace()) {
if (!S.empty()) S += ' ';
- S += "__attribute__((address_space(";
- S += llvm::utostr_32(addrspace);
- S += ")))";
+ switch (addrspace) {
+ case LangAS::opencl_global:
+ S += "__global";
+ break;
+ case LangAS::opencl_local:
+ S += "__local";
+ break;
+ case LangAS::opencl_constant:
+ S += "__constant";
+ break;
+ default:
+ S += "__attribute__((address_space(";
+ S += llvm::utostr_32(addrspace);
+ S += ")))";
+ }
}
if (Qualifiers::GC gc = getObjCGCAttr()) {
if (!S.empty()) S += ' ';
diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
index 7765817..7a45972 100644
--- a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
@@ -466,10 +466,10 @@ public:
static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
- ASTContext &C = LHS->getASTContext(); // TODO: thread this down
- CanQual<FunctionProtoType>
- LT = C.getCanonicalType(LHS->getType()).getAs<FunctionProtoType>(),
- RT = C.getCanonicalType(RHS->getType()).getAs<FunctionProtoType>();
+ const FunctionProtoType *LT =
+ cast<FunctionProtoType>(LHS->getType().getCanonicalType());
+ const FunctionProtoType *RT =
+ cast<FunctionProtoType>(RHS->getType().getCanonicalType());
// Fast-path matches in the canonical types.
if (LT == RT) return true;
@@ -477,7 +477,7 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
// Force the signatures to match. We can't rely on the overrides
// list here because there isn't necessarily an inheritance
// relationship between the two methods.
- if (LT.getQualifiers() != RT.getQualifiers() ||
+ if (LT->getTypeQuals() != RT->getTypeQuals() ||
LT->getNumArgs() != RT->getNumArgs())
return false;
for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
@@ -996,7 +996,7 @@ public:
LayoutVTable();
- if (Context.getLangOptions().DumpVTableLayouts)
+ if (Context.getLangOpts().DumpVTableLayouts)
dumpLayout(llvm::errs());
}
@@ -1580,7 +1580,7 @@ void VTableBuilder::LayoutVTable() {
LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
// -fapple-kext adds an extra entry at end of vtbl.
- bool IsAppleKext = Context.getLangOptions().AppleKext;
+ bool IsAppleKext = Context.getLangOpts().AppleKext;
if (IsAppleKext)
Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero()));
}
@@ -2136,7 +2136,8 @@ void VTableBuilder::dumpLayout(raw_ostream& Out) {
uint64_t VTableIndex = I->first;
const std::string &MethodName = I->second;
- Out << llvm::format(" %4u | ", VTableIndex) << MethodName << '\n';
+ Out << llvm::format(" %4" PRIu64 " | ", VTableIndex) << MethodName
+ << '\n';
}
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
index 3dd194b..659cc6d 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisContext.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -1,4 +1,4 @@
-//== AnalysisContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines AnalysisContext, a class that manages the analysis context
+// This file defines AnalysisDeclContext, a class that manages the analysis context
// data for path sensitive analysis.
//
//===----------------------------------------------------------------------===//
@@ -24,18 +24,21 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Support/BumpVector.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
-AnalysisContext::AnalysisContext(const Decl *d,
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d,
idx::TranslationUnit *tu,
const CFG::BuildOptions &buildOptions)
- : D(d), TU(tu),
+ : Manager(Mgr),
+ D(d),
+ TU(tu),
cfgBuildOptions(buildOptions),
forcedBlkExprs(0),
builtCFG(false),
@@ -46,9 +49,12 @@ AnalysisContext::AnalysisContext(const Decl *d,
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
-AnalysisContext::AnalysisContext(const Decl *d,
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d,
idx::TranslationUnit *tu)
-: D(d), TU(tu),
+: Manager(Mgr),
+ D(d),
+ TU(tu),
forcedBlkExprs(0),
builtCFG(false),
builtCompleteCFG(false),
@@ -58,7 +64,7 @@ AnalysisContext::AnalysisContext(const Decl *d,
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
-AnalysisContextManager::AnalysisContextManager(bool useUnoptimizedCFG,
+AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
bool addImplicitDtors,
bool addInitializers) {
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
@@ -66,13 +72,13 @@ AnalysisContextManager::AnalysisContextManager(bool useUnoptimizedCFG,
cfgBuildOptions.AddInitializers = addInitializers;
}
-void AnalysisContextManager::clear() {
+void AnalysisDeclContextManager::clear() {
for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
delete I->second;
Contexts.clear();
}
-Stmt *AnalysisContext::getBody() const {
+Stmt *AnalysisDeclContext::getBody() const {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
return FD->getBody();
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
@@ -86,14 +92,23 @@ Stmt *AnalysisContext::getBody() const {
llvm_unreachable("unknown code decl");
}
-const ImplicitParamDecl *AnalysisContext::getSelfDecl() const {
+const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // See if 'self' was captured by the block.
+ for (BlockDecl::capture_const_iterator it = BD->capture_begin(),
+ et = BD->capture_end(); it != et; ++it) {
+ const VarDecl *VD = it->getVariable();
+ if (VD->getName() == "self")
+ return dyn_cast<ImplicitParamDecl>(VD);
+ }
+ }
return NULL;
}
-void AnalysisContext::registerForcedBlockExpression(const Stmt *stmt) {
+void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
if (!forcedBlkExprs)
forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
// Default construct an entry for 'stmt'.
@@ -103,7 +118,7 @@ void AnalysisContext::registerForcedBlockExpression(const Stmt *stmt) {
}
const CFGBlock *
-AnalysisContext::getBlockForRegisteredExpression(const Stmt *stmt) {
+AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
assert(forcedBlkExprs);
if (const Expr *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
@@ -113,7 +128,7 @@ AnalysisContext::getBlockForRegisteredExpression(const Stmt *stmt) {
return itr->second;
}
-CFG *AnalysisContext::getCFG() {
+CFG *AnalysisDeclContext::getCFG() {
if (!cfgBuildOptions.PruneTriviallyFalseEdges)
return getUnoptimizedCFG();
@@ -127,7 +142,7 @@ CFG *AnalysisContext::getCFG() {
return cfg.get();
}
-CFG *AnalysisContext::getUnoptimizedCFG() {
+CFG *AnalysisDeclContext::getUnoptimizedCFG() {
if (!builtCompleteCFG) {
SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
false);
@@ -140,7 +155,7 @@ CFG *AnalysisContext::getUnoptimizedCFG() {
return completeCFG.get();
}
-CFGStmtMap *AnalysisContext::getCFGStmtMap() {
+CFGStmtMap *AnalysisDeclContext::getCFGStmtMap() {
if (cfgStmtMap)
return cfgStmtMap.get();
@@ -152,7 +167,7 @@ CFGStmtMap *AnalysisContext::getCFGStmtMap() {
return 0;
}
-CFGReverseBlockReachabilityAnalysis *AnalysisContext::getCFGReachablityAnalysis() {
+CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnalysis() {
if (CFA)
return CFA.get();
@@ -164,37 +179,49 @@ CFGReverseBlockReachabilityAnalysis *AnalysisContext::getCFGReachablityAnalysis(
return 0;
}
-void AnalysisContext::dumpCFG() {
- getCFG()->dump(getASTContext().getLangOptions());
+void AnalysisDeclContext::dumpCFG(bool ShowColors) {
+ getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
}
-ParentMap &AnalysisContext::getParentMap() {
+ParentMap &AnalysisDeclContext::getParentMap() {
if (!PM)
PM.reset(new ParentMap(getBody()));
return *PM;
}
-PseudoConstantAnalysis *AnalysisContext::getPseudoConstantAnalysis() {
+PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
if (!PCA)
PCA.reset(new PseudoConstantAnalysis(getBody()));
return PCA.get();
}
-AnalysisContext *AnalysisContextManager::getContext(const Decl *D,
+AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D,
idx::TranslationUnit *TU) {
- AnalysisContext *&AC = Contexts[D];
+ AnalysisDeclContext *&AC = Contexts[D];
if (!AC)
- AC = new AnalysisContext(D, TU, cfgBuildOptions);
+ AC = new AnalysisDeclContext(this, D, TU, cfgBuildOptions);
return AC;
}
+const StackFrameContext *
+AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
+ const CFGBlock *Blk, unsigned Idx) {
+ return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
+}
+
+LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
+ assert(Manager &&
+ "Cannot create LocationContexts without an AnalysisDeclContextManager!");
+ return Manager->getLocationContextManager();
+}
+
//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID,
ContextKind ck,
- AnalysisContext *ctx,
+ AnalysisDeclContext *ctx,
const LocationContext *parent,
const void *data) {
ID.AddInteger(ck);
@@ -204,15 +231,15 @@ void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID,
}
void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisContext(), getParent(), CallSite, Block, Index);
+ Profile(ID, getAnalysisDeclContext(), getParent(), CallSite, Block, Index);
}
void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisContext(), getParent(), Enter);
+ Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
}
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getAnalysisContext(), getParent(), BD);
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD);
}
//===----------------------------------------------------------------------===//
@@ -221,7 +248,7 @@ void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
template <typename LOC, typename DATA>
const LOC*
-LocationContextManager::getLocationContext(AnalysisContext *ctx,
+LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
const LocationContext *parent,
const DATA *d) {
llvm::FoldingSetNodeID ID;
@@ -238,7 +265,7 @@ LocationContextManager::getLocationContext(AnalysisContext *ctx,
}
const StackFrameContext*
-LocationContextManager::getStackFrame(AnalysisContext *ctx,
+LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s,
const CFGBlock *blk, unsigned idx) {
@@ -255,7 +282,7 @@ LocationContextManager::getStackFrame(AnalysisContext *ctx,
}
const ScopeContext *
-LocationContextManager::getScope(AnalysisContext *ctx,
+LocationContextManager::getScope(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s) {
return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
@@ -308,8 +335,8 @@ namespace {
class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
BumpVector<const VarDecl*> &BEVals;
BumpVectorContext &BC;
- llvm::DenseMap<const VarDecl*, unsigned> Visited;
- llvm::SmallSet<const DeclContext*, 4> IgnoredContexts;
+ llvm::SmallPtrSet<const VarDecl*, 4> Visited;
+ llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
public:
FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
BumpVectorContext &bc)
@@ -326,24 +353,14 @@ public:
Visit(child);
}
- void VisitDeclRefExpr(const DeclRefExpr *DR) {
+ void VisitDeclRefExpr(DeclRefExpr *DR) {
// Non-local variables are also directly modified.
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->hasLocalStorage()) {
- unsigned &flag = Visited[VD];
- if (!flag) {
- flag = 1;
+ if (Visited.insert(VD))
BEVals.push_back(VD, BC);
- }
- }
- }
-
- void VisitBlockDeclRefExpr(BlockDeclRefExpr *DR) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- unsigned &flag = Visited[VD];
- if (!flag) {
- flag = 1;
- if (IsTrackedDecl(VD))
+ } else if (DR->refersToEnclosingLocal()) {
+ if (Visited.insert(VD) && IsTrackedDecl(VD))
BEVals.push_back(VD, BC);
}
}
@@ -354,6 +371,16 @@ public:
IgnoredContexts.insert(BR->getBlockDecl());
Visit(BR->getBlockDecl()->getBody());
}
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *PE) {
+ for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
+ et = PE->semantics_end(); it != et; ++it) {
+ Expr *Semantic = *it;
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+ Visit(Semantic);
+ }
+ }
};
} // end anonymous namespace
@@ -377,9 +404,9 @@ static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
return BV;
}
-std::pair<AnalysisContext::referenced_decls_iterator,
- AnalysisContext::referenced_decls_iterator>
-AnalysisContext::getReferencedBlockVars(const BlockDecl *BD) {
+std::pair<AnalysisDeclContext::referenced_decls_iterator,
+ AnalysisDeclContext::referenced_decls_iterator>
+AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
if (!ReferencedBlockVars)
ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>();
@@ -387,7 +414,7 @@ AnalysisContext::getReferencedBlockVars(const BlockDecl *BD) {
return std::make_pair(V->begin(), V->end());
}
-ManagedAnalysis *&AnalysisContext::getAnalysisImpl(const void *tag) {
+ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
if (!ManagedAnalyses)
ManagedAnalyses = new ManagedAnalysisMap();
ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
@@ -400,7 +427,7 @@ ManagedAnalysis *&AnalysisContext::getAnalysisImpl(const void *tag) {
ManagedAnalysis::~ManagedAnalysis() {}
-AnalysisContext::~AnalysisContext() {
+AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
delete ReferencedBlockVars;
// Release the managed analyses.
@@ -412,7 +439,7 @@ AnalysisContext::~AnalysisContext() {
}
}
-AnalysisContextManager::~AnalysisContextManager() {
+AnalysisDeclContextManager::~AnalysisDeclContextManager() {
for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
delete I->second;
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index 83c7384..d1334a5 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/Support/SaveAndRestore.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "clang/Analysis/CFG.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
@@ -250,7 +250,7 @@ class CFGBuilder {
typedef BlockScopePosPair JumpSource;
ASTContext *Context;
- llvm::OwningPtr<CFG> cfg;
+ OwningPtr<CFG> cfg;
CFGBlock *Block;
CFGBlock *Succ;
@@ -286,6 +286,11 @@ class CFGBuilder {
CFG::BuildOptions::ForcedBlkExprs::value_type *cachedEntry;
const Stmt *lastLookup;
+ // Caches boolean evaluations of expressions to avoid multiple re-evaluations
+ // during construction of branches for chained logical operators.
+ typedef llvm::DenseMap<Expr *, TryResult> CachedBoolEvalsTy;
+ CachedBoolEvalsTy CachedBoolEvals;
+
public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
@@ -305,7 +310,6 @@ private:
// Visitors to walk an AST and construct the CFG.
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
- CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E,
@@ -331,19 +335,23 @@ private:
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
CFGBlock *VisitDoStmt(DoStmt *D);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *L);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
CFGBlock *VisitReturnStmt(ReturnStmt *R);
+ CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
AddStmtChoice asc);
CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
@@ -354,6 +362,7 @@ private:
CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd);
CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
CFGBlock *VisitChildren(Stmt *S);
+ CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
// Visitors to walk an AST and generate destructors of temporaries in
// full expression.
@@ -431,18 +440,70 @@ private:
return false;
return !S->isTypeDependent() &&
!S->isValueDependent() &&
- S->Evaluate(outResult, *Context);
+ S->EvaluateAsRValue(outResult, *Context);
}
/// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
/// if we can evaluate to a known value, otherwise return -1.
TryResult tryEvaluateBool(Expr *S) {
- bool Result;
if (!BuildOpts.PruneTriviallyFalseEdges ||
- S->isTypeDependent() || S->isValueDependent() ||
- !S->EvaluateAsBooleanCondition(Result, *Context))
+ S->isTypeDependent() || S->isValueDependent())
return TryResult();
- return Result;
+
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
+ if (Bop->isLogicalOp()) {
+ // Check the cache first.
+ CachedBoolEvalsTy::iterator I = CachedBoolEvals.find(S);
+ if (I != CachedBoolEvals.end())
+ return I->second; // already in map;
+
+ // Retrieve result at first, or the map might be updated.
+ TryResult Result = evaluateAsBooleanConditionNoCache(S);
+ CachedBoolEvals[S] = Result; // update or insert
+ return Result;
+ }
+ }
+
+ return evaluateAsBooleanConditionNoCache(S);
+ }
+
+ /// \brief Evaluate as boolean \param E without using the cache.
+ TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
+ if (Bop->isLogicalOp()) {
+ TryResult LHS = tryEvaluateBool(Bop->getLHS());
+ if (LHS.isKnown()) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (LHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return LHS.isTrue();
+
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ if (Bop->getOpcode() == BO_LOr)
+ return LHS.isTrue() || RHS.isTrue();
+ else
+ return LHS.isTrue() && RHS.isTrue();
+ }
+ } else {
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (RHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return RHS.isTrue();
+ }
+ }
+
+ return TryResult();
+ }
+ }
+
+ bool Result;
+ if (E->EvaluateAsBooleanCondition(Result, *Context))
+ return Result;
+
+ return TryResult();
}
};
@@ -638,6 +699,52 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
return Block;
}
+/// \brief Retrieve the type of the temporary object whose lifetime was
+/// extended by a local reference with the given initializer.
+static QualType getReferenceInitTemporaryType(ASTContext &Context,
+ const Expr *Init) {
+ while (true) {
+ // Skip parentheses.
+ Init = Init->IgnoreParens();
+
+ // Skip through cleanups.
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
+ Init = EWC->getSubExpr();
+ continue;
+ }
+
+ // Skip through the temporary-materialization expression.
+ if (const MaterializeTemporaryExpr *MTE
+ = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ Init = MTE->GetTemporaryExpr();
+ continue;
+ }
+
+ // Skip derived-to-base and no-op casts.
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) &&
+ Init->getType()->isRecordType()) {
+ Init = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ // Skip member accesses into rvalues.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ Init = ME->getBase();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return Init->getType();
+}
+
/// addAutomaticObjDtors - Add to current block automatic objects destructors
/// for objects in range of local scope positions. Use S as trigger statement
/// for destructors.
@@ -649,8 +756,6 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
if (B == E)
return;
- CFGBlock::iterator InsertPos;
-
// We need to append the destructors in reverse order, but any one of them
// may be a no-return destructor which changes the CFG. As a result, buffer
// this sequence up and replay them in reverse order when appending onto the
@@ -666,9 +771,13 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
- QualType Ty = (*I)->getType().getNonReferenceType();
- if (const ArrayType *AT = Context->getAsArrayType(Ty))
- Ty = AT->getElementType();
+ QualType Ty;
+ if ((*I)->getType()->isReferenceType()) {
+ Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
+ } else {
+ Ty = Context->getBaseElementType((*I)->getType());
+ }
+
const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr())
Block = createNoReturnBlock();
@@ -798,16 +907,15 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
// Check for const references bound to temporary. Set type to pointee.
QualType QT = VD->getType();
- if (const ReferenceType* RT = QT.getTypePtr()->getAs<ReferenceType>()) {
- QT = RT->getPointeeType();
- if (!QT.isConstQualified())
- return Scope;
+ if (QT.getTypePtr()->isReferenceType()) {
if (!VD->extendsLifetimeOfTemporary())
return Scope;
+
+ QT = getReferenceInitTemporaryType(*Context, VD->getInit());
}
// Check for constant size array. Set type to array element type.
- if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
if (AT->getSize() == 0)
return Scope;
QT = AT->getElementType();
@@ -878,7 +986,7 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
case Stmt::BlockExprClass:
- return VisitBlockExpr(cast<BlockExpr>(S), asc);
+ return VisitNoRecurse(cast<Expr>(S), asc);
case Stmt::BreakStmtClass:
return VisitBreakStmt(cast<BreakStmt>(S));
@@ -886,6 +994,7 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::CallExprClass:
case Stmt::CXXOperatorCallExprClass:
case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass:
return VisitCallExpr(cast<CallExpr>(S), asc);
case Stmt::CaseStmtClass:
@@ -957,12 +1066,21 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::LabelStmtClass:
return VisitLabelStmt(cast<LabelStmt>(S));
+ case Stmt::LambdaExprClass:
+ return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
+
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
+ case Stmt::NullStmtClass:
+ return Block;
+
case Stmt::ObjCAtCatchStmtClass:
return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
+
case Stmt::ObjCAtSynchronizedStmtClass:
return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
@@ -975,9 +1093,12 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::ObjCForCollectionStmtClass:
return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
- case Stmt::NullStmtClass:
+ case Stmt::OpaqueValueExprClass:
return Block;
+ case Stmt::PseudoObjectExprClass:
+ return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
+
case Stmt::ReturnStmtClass:
return VisitReturnStmt(cast<ReturnStmt>(S));
@@ -1068,6 +1189,10 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
RHSBlock = createBlock();
}
+ // Generate the blocks for evaluating the LHS.
+ Block = LHSBlock;
+ CFGBlock *EntryLHSBlock = addStmt(B->getLHS());
+
// See if this is a known constant.
TryResult KnownVal = tryEvaluateBool(B->getLHS());
if (KnownVal.isKnown() && (B->getOpcode() == BO_LOr))
@@ -1083,9 +1208,7 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
}
- // Generate the blocks for evaluating the LHS.
- Block = LHSBlock;
- return addStmt(B->getLHS());
+ return EntryLHSBlock;
}
if (B->getOpcode() == BO_Comma) { // ,
@@ -1117,7 +1240,7 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
return (LBlock ? LBlock : RBlock);
}
-CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
+CFGBlock *CFGBuilder::VisitNoRecurse(Expr *E, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
@@ -1180,7 +1303,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
bool AddEHEdge = false;
// Languages without exceptions are assumed to not throw.
- if (Context->getLangOptions().Exceptions) {
+ if (Context->getLangOpts().Exceptions) {
if (BuildOpts.AddEHEdges)
AddEHEdge = true;
}
@@ -1405,14 +1528,24 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
autoCreateBlock();
appendStmt(Block, DS);
+
+ // Keep track of the last non-null block, as 'Block' can be nulled out
+ // if the initializer expression is something like a 'while' in a
+ // statement-expression.
+ CFGBlock *LastBlock = Block;
if (Init) {
- if (HasTemporaries)
+ if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
// generating destructors for the second time.
- Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
- else
- Visit(Init);
+ ExprWithCleanups *EC = cast<ExprWithCleanups>(Init);
+ if (CFGBlock *newBlock = Visit(EC->getSubExpr()))
+ LastBlock = newBlock;
+ }
+ else {
+ if (CFGBlock *newBlock = Visit(Init))
+ LastBlock = newBlock;
+ }
}
// If the type of VD is a VLA, then we must process its size expressions.
@@ -1424,7 +1557,7 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
if (ScopePos && VD == *ScopePos)
++ScopePos;
- return Block;
+ return Block ? Block : LastBlock;
}
CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
@@ -1588,6 +1721,19 @@ CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
return LabelBlock;
}
+CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
+ CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+ for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
+ et = E->capture_init_end(); it != et; ++it) {
+ if (Expr *Init = *it) {
+ CFGBlock *Tmp = Visit(Init);
+ if (Tmp != 0)
+ LastBlock = Tmp;
+ }
+ }
+ return LastBlock;
+}
+
CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
// Goto is a control-flow statement. Thus we stop processing the current
// block and create a new one.
@@ -1875,6 +2021,12 @@ CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
return addStmt(S->getCollection());
}
+CFGBlock *CFGBuilder::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ // Inline the body.
+ return addStmt(S->getSubStmt());
+ // TODO: consider adding cleanups for the end of @autoreleasepool scope.
+}
+
CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// FIXME: Add locking 'primitives' to CFG for @synchronized.
@@ -1904,6 +2056,31 @@ CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
return NYS();
}
+CFGBlock *CFGBuilder::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ autoCreateBlock();
+
+ // Add the PseudoObject as the last thing.
+ appendStmt(Block, E);
+
+ CFGBlock *lastBlock = Block;
+
+ // Before that, evaluate all of the semantics in order. In
+ // CFG-land, that means appending them in reverse order.
+ for (unsigned i = E->getNumSemanticExprs(); i != 0; ) {
+ Expr *Semantic = E->getSemanticExpr(--i);
+
+ // If the semantic is an opaque value, we're being asked to bind
+ // it to its source expression.
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+
+ if (CFGBlock *B = Visit(Semantic))
+ lastBlock = B;
+ }
+
+ return lastBlock;
+}
+
CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
CFGBlock *LoopSuccessor = NULL;
@@ -2530,9 +2707,18 @@ CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
CFGBlock *CatchBlock = Block;
if (!CatchBlock)
CatchBlock = createBlock();
-
+
+ // CXXCatchStmt is more than just a label. They have semantic meaning
+ // as well, as they implicitly "initialize" the catch variable. Add
+ // it to the CFG as a CFGElement so that the control-flow of these
+ // semantics gets captured.
+ appendStmt(CatchBlock, CS);
+
+ // Also add the CXXCatchStmt as a label, to mirror handling of regular
+ // labels.
CatchBlock->setLabel(CS);
+ // Bail out if the CFG is bad.
if (badCFG)
return 0;
@@ -2687,8 +2873,7 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
- if (!C->isElidable())
- appendStmt(Block, C);
+ appendStmt(Block, C);
return VisitChildren(C);
}
@@ -2958,7 +3143,7 @@ CFGBlock *CFG::createBlock() {
// Create the block.
CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
- new (Mem) CFGBlock(NumBlockIDs++, BlkBVC);
+ new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
Blocks.push_back(Mem, BlkBVC);
// If this is the first block, set it as the Entry and Exit.
@@ -2989,7 +3174,7 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
const VarDecl *var = cast<CFGAutomaticObjDtor>(this)->getVarDecl();
QualType ty = var->getType();
ty = ty.getNonReferenceType();
- if (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
+ while (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
ty = arrayType->getElementType();
}
const RecordType *recordType = ty->getAs<RecordType>();
@@ -3010,7 +3195,6 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
return 0;
}
llvm_unreachable("getKind() returned bogus value");
- return 0;
}
bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
@@ -3402,9 +3586,19 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
if (isa<CXXOperatorCallExpr>(S)) {
OS << " (OperatorCall)";
- } else if (isa<CXXBindTemporaryExpr>(S)) {
+ }
+ else if (isa<CXXBindTemporaryExpr>(S)) {
OS << " (BindTemporary)";
}
+ else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
+ OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
+ }
+ else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ OS << " (" << CE->getStmtClassName() << ", "
+ << CE->getCastKindName()
+ << ", " << CE->getType().getAsString()
+ << ")";
+ }
// Expressions need a newline.
if (isa<Expr>(S))
@@ -3463,27 +3657,35 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
static void print_block(raw_ostream &OS, const CFG* cfg,
const CFGBlock &B,
- StmtPrinterHelper* Helper, bool print_edges) {
+ StmtPrinterHelper* Helper, bool print_edges,
+ bool ShowColors) {
- if (Helper) Helper->setBlockID(B.getBlockID());
+ if (Helper)
+ Helper->setBlockID(B.getBlockID());
// Print the header.
- OS << "\n [ B" << B.getBlockID();
+ if (ShowColors)
+ OS.changeColor(raw_ostream::YELLOW, true);
+
+ OS << "\n [B" << B.getBlockID();
if (&B == &cfg->getEntry())
- OS << " (ENTRY) ]\n";
+ OS << " (ENTRY)]\n";
else if (&B == &cfg->getExit())
- OS << " (EXIT) ]\n";
+ OS << " (EXIT)]\n";
else if (&B == cfg->getIndirectGotoBlock())
- OS << " (INDIRECT GOTO DISPATCH) ]\n";
+ OS << " (INDIRECT GOTO DISPATCH)]\n";
else
- OS << " ]\n";
+ OS << "]\n";
+
+ if (ShowColors)
+ OS.resetColor();
// Print the label of this block.
if (Stmt *Label = const_cast<Stmt*>(B.getLabel())) {
if (print_edges)
- OS << " ";
+ OS << " ";
if (LabelStmt *L = dyn_cast<LabelStmt>(Label))
OS << L->getName();
@@ -3521,22 +3723,22 @@ static void print_block(raw_ostream &OS, const CFG* cfg,
// Print the statement # in the basic block and the statement itself.
if (print_edges)
- OS << " ";
+ OS << " ";
OS << llvm::format("%3d", j) << ": ";
if (Helper)
Helper->setStmtID(j);
- print_elem(OS,Helper,*I);
+ print_elem(OS, Helper, *I);
}
// Print the terminator of this block.
if (B.getTerminator()) {
- if (print_edges)
- OS << " ";
+ if (ShowColors)
+ OS.changeColor(raw_ostream::GREEN);
- OS << " T: ";
+ OS << " T: ";
if (Helper) Helper->setBlockID(-1);
@@ -3544,54 +3746,86 @@ static void print_block(raw_ostream &OS, const CFG* cfg,
PrintingPolicy(Helper->getLangOpts()));
TPrinter.Visit(const_cast<Stmt*>(B.getTerminator().getStmt()));
OS << '\n';
+
+ if (ShowColors)
+ OS.resetColor();
}
if (print_edges) {
// Print the predecessors of this block.
- OS << " Predecessors (" << B.pred_size() << "):";
- unsigned i = 0;
+ if (!B.pred_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::BLUE;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Preds " ;
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.pred_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
+ I != E; ++I, ++i) {
- for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
- I != E; ++I, ++i) {
+ if (i == 8 || (i-8) == 0)
+ OS << "\n ";
- if (i == 8 || (i-8) == 0)
- OS << "\n ";
+ OS << " B" << (*I)->getBlockID();
+ }
+
+ if (ShowColors)
+ OS.resetColor();
- OS << " B" << (*I)->getBlockID();
+ OS << '\n';
}
- OS << '\n';
-
// Print the successors of this block.
- OS << " Successors (" << B.succ_size() << "):";
- i = 0;
-
- for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
- I != E; ++I, ++i) {
-
- if (i == 8 || (i-8) % 10 == 0)
- OS << "\n ";
-
- if (*I)
- OS << " B" << (*I)->getBlockID();
- else
- OS << " NULL";
+ if (!B.succ_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::MAGENTA;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Succs ";
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.succ_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) % 10 == 0)
+ OS << "\n ";
+
+ if (*I)
+ OS << " B" << (*I)->getBlockID();
+ else
+ OS << " NULL";
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
}
-
- OS << '\n';
}
}
/// dump - A simple pretty printer of a CFG that outputs to stderr.
-void CFG::dump(const LangOptions &LO) const { print(llvm::errs(), LO); }
+void CFG::dump(const LangOptions &LO, bool ShowColors) const {
+ print(llvm::errs(), LO, ShowColors);
+}
/// print - A simple pretty printer of a CFG that outputs to an ostream.
-void CFG::print(raw_ostream &OS, const LangOptions &LO) const {
+void CFG::print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const {
StmtPrinterHelper Helper(this, LO);
// Print the entry block.
- print_block(OS, this, getEntry(), &Helper, true);
+ print_block(OS, this, getEntry(), &Helper, true, ShowColors);
// Iterate through the CFGBlocks and print them one by one.
for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
@@ -3599,25 +3833,28 @@ void CFG::print(raw_ostream &OS, const LangOptions &LO) const {
if (&(**I) == &getEntry() || &(**I) == &getExit())
continue;
- print_block(OS, this, **I, &Helper, true);
+ print_block(OS, this, **I, &Helper, true, ShowColors);
}
// Print the exit block.
- print_block(OS, this, getExit(), &Helper, true);
+ print_block(OS, this, getExit(), &Helper, true, ShowColors);
+ OS << '\n';
OS.flush();
}
/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
-void CFGBlock::dump(const CFG* cfg, const LangOptions &LO) const {
- print(llvm::errs(), cfg, LO);
+void CFGBlock::dump(const CFG* cfg, const LangOptions &LO,
+ bool ShowColors) const {
+ print(llvm::errs(), cfg, LO, ShowColors);
}
/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
/// Generally this will only be called from CFG::print.
void CFGBlock::print(raw_ostream &OS, const CFG* cfg,
- const LangOptions &LO) const {
+ const LangOptions &LO, bool ShowColors) const {
StmtPrinterHelper Helper(cfg, LO);
- print_block(OS, cfg, *this, &Helper, true);
+ print_block(OS, cfg, *this, &Helper, true, ShowColors);
+ OS << '\n';
}
/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
@@ -3714,7 +3951,7 @@ struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
#ifndef NDEBUG
std::string OutSStr;
llvm::raw_string_ostream Out(OutSStr);
- print_block(Out,Graph, *Node, GraphHelper, false);
+ print_block(Out,Graph, *Node, GraphHelper, false, false);
std::string& OutStr = Out.str();
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
new file mode 100644
index 0000000..96a16c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
@@ -0,0 +1,184 @@
+//== CallGraph.cpp - AST-based Call graph ----------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AST-based CallGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/CallGraph.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/StmtVisitor.h"
+
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+namespace {
+/// A helper class, which walks the AST and locates all the call sites in the
+/// given function body.
+class CGBuilder : public StmtVisitor<CGBuilder> {
+ CallGraph *G;
+ const Decl *FD;
+ CallGraphNode *CallerNode;
+
+public:
+ CGBuilder(CallGraph *g, const Decl *D, CallGraphNode *N)
+ : G(g), FD(D), CallerNode(N) {}
+
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitCallExpr(CallExpr *CE) {
+ // TODO: We need to handle ObjC method calls as well.
+ if (FunctionDecl *CalleeDecl = CE->getDirectCallee())
+ if (G->includeInGraph(CalleeDecl)) {
+ CallGraphNode *CalleeNode = G->getOrInsertNode(CalleeDecl);
+ CallerNode->addCallee(CalleeNode, G);
+ }
+ }
+
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I)
+ static_cast<CGBuilder*>(this)->Visit(*I);
+ }
+};
+
+} // end anonymous namespace
+
+CallGraph::CallGraph() {
+ Root = getOrInsertNode(0);
+}
+
+CallGraph::~CallGraph() {
+ if (!FunctionMap.empty()) {
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ delete I->second;
+ FunctionMap.clear();
+ }
+}
+
+bool CallGraph::includeInGraph(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (!FD->isThisDeclarationADefinition() ||
+ FD->isDependentContext())
+ return false;
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return false;
+ }
+
+ if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+ if (!ID->isThisDeclarationADefinition())
+ return false;
+ }
+
+ return true;
+}
+
+void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
+ assert(D);
+
+ // Do nothing if the node already exists.
+ if (FunctionMap.find(D) != FunctionMap.end())
+ return;
+
+ // Allocate a new node, mark it as root, and process it's calls.
+ CallGraphNode *Node = getOrInsertNode(D);
+ if (IsGlobal)
+ Root->addCallee(Node, this);
+
+ // Process all the calls by this function as well.
+ CGBuilder builder(this, D, Node);
+ if (Stmt *Body = D->getBody())
+ builder.Visit(Body);
+}
+
+CallGraphNode *CallGraph::getNode(const Decl *F) const {
+ FunctionMapTy::const_iterator I = FunctionMap.find(F);
+ if (I == FunctionMap.end()) return 0;
+ return I->second;
+}
+
+CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
+ CallGraphNode *&Node = FunctionMap[F];
+ if (Node)
+ return Node;
+
+ Node = new CallGraphNode(F);
+ // If not root, add to the parentless list.
+ if (F != 0)
+ ParentlessNodes.insert(Node);
+ return Node;
+}
+
+void CallGraph::print(raw_ostream &OS) const {
+ OS << " --- Call graph Dump --- \n";
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ OS << " Function: ";
+ if (I->second == Root)
+ OS << "< root >";
+ else
+ I->second->print(OS);
+ OS << " calls: ";
+ for (CallGraphNode::iterator CI = I->second->begin(),
+ CE = I->second->end(); CI != CE; ++CI) {
+ assert(*CI != Root && "No one can call the root node.");
+ (*CI)->print(OS);
+ OS << " ";
+ }
+ OS << '\n';
+ }
+ OS.flush();
+}
+
+void CallGraph::dump() const {
+ print(llvm::errs());
+}
+
+void CallGraph::viewGraph() const {
+ llvm::ViewGraph(this, "CallGraph");
+}
+
+StringRef CallGraphNode::getName() const {
+ if (const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(FD))
+ if (const IdentifierInfo *II = D->getIdentifier())
+ return II->getName();
+ return "< >";
+}
+
+void CallGraphNode::print(raw_ostream &os) const {
+ os << getName();
+}
+
+void CallGraphNode::dump() const {
+ print(llvm::errs());
+}
+
+namespace llvm {
+
+template <>
+struct DOTGraphTraits<const CallGraph*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CallGraphNode *Node,
+ const CallGraph *CG) {
+ if (CG->getRoot() == Node) {
+ return "< root >";
+ }
+ return Node->getName();
+ }
+
+};
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
index 8acd189..7e9e38f 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -20,47 +20,6 @@
using namespace clang;
using namespace ento;
-// The "fundamental rule" for naming conventions of methods:
-// (url broken into two lines)
-// http://developer.apple.com/documentation/Cocoa/Conceptual/
-// MemoryMgmt/Tasks/MemoryManagementRules.html
-//
-// "You take ownership of an object if you create it using a method whose name
-// begins with "alloc" or "new" or contains "copy" (for example, alloc,
-// newObject, or mutableCopy), or if you send it a retain message. You are
-// responsible for relinquishing ownership of objects you own using release
-// or autorelease. Any other time you receive an object, you must
-// not release it."
-//
-
-cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S,
- const ObjCMethodDecl *MD) {
- switch (MD && MD->hasAttr<ObjCMethodFamilyAttr>()? MD->getMethodFamily()
- : S.getMethodFamily()) {
- case OMF_None:
- case OMF_autorelease:
- case OMF_dealloc:
- case OMF_finalize:
- case OMF_release:
- case OMF_retain:
- case OMF_retainCount:
- case OMF_self:
- case OMF_performSelector:
- return NoConvention;
-
- case OMF_init:
- return InitRule;
-
- case OMF_alloc:
- case OMF_copy:
- case OMF_mutableCopy:
- case OMF_new:
- return CreateRule;
- }
- llvm_unreachable("unexpected naming convention");
- return NoConvention;
-}
-
bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
StringRef Name) {
// Recursively walk the typedef stack, allowing typedefs of reference types.
@@ -68,7 +27,9 @@ bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
StringRef TDName = TD->getDecl()->getIdentifier()->getName();
if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
return true;
-
+ // XPC unfortunately uses CF-style function names, but aren't CF types.
+ if (TDName.startswith("xpc_"))
+ return false;
RetTy = TD->getDecl()->getUnderlyingType();
}
@@ -115,7 +76,7 @@ bool cocoa::isCocoaObjectRef(QualType Ty) {
// Assume that anything declared with a forward declaration and no
// @interface subclasses NSObject.
- if (ID->isForwardDecl())
+ if (!ID->hasDefinition())
return true;
for ( ; ID ; ID = ID->getSuperClass())
@@ -174,6 +135,4 @@ bool coreFoundation::followsCreateRule(const FunctionDecl *fn) {
// If we matched a lowercase character, it isn't the end of the
// word. Keep scanning.
}
-
- return false;
}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
new file mode 100644
index 0000000..0e02c6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
@@ -0,0 +1,14 @@
+//=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/Dominators.h"
+
+using namespace clang;
+
+void DominatorTree::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
index a26f0ad..51fac49 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "FormatStringParsing.h"
+#include "clang/Basic/LangOptions.h"
using clang::analyze_format_string::ArgTypeResult;
using clang::analyze_format_string::FormatStringHandler;
@@ -155,6 +156,9 @@ clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
}
if (Amt.getHowSpecified() == OptionalAmount::Constant && *(I++) == '$') {
+ // Warn that positional arguments are non-standard.
+ H.HandlePosition(Start, I - Start);
+
// Special case: '%0$', since this is an easy mistake.
if (Amt.getConstantAmount() == 0) {
H.HandleZeroPosition(Start, I - Start);
@@ -175,7 +179,9 @@ clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
bool
clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
const char *&I,
- const char *E) {
+ const char *E,
+ const LangOptions &LO,
+ bool IsScanf) {
LengthModifier::Kind lmKind = LengthModifier::None;
const char *lmPosition = I;
switch (*I) {
@@ -183,19 +189,39 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
return false;
case 'h':
++I;
- lmKind = (I != E && *I == 'h') ?
- ++I, LengthModifier::AsChar : LengthModifier::AsShort;
+ lmKind = (I != E && *I == 'h') ? (++I, LengthModifier::AsChar)
+ : LengthModifier::AsShort;
break;
case 'l':
++I;
- lmKind = (I != E && *I == 'l') ?
- ++I, LengthModifier::AsLongLong : LengthModifier::AsLong;
+ lmKind = (I != E && *I == 'l') ? (++I, LengthModifier::AsLongLong)
+ : LengthModifier::AsLong;
break;
case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
- case 'q': lmKind = LengthModifier::AsLongLong; ++I; break;
+ case 'q': lmKind = LengthModifier::AsQuad; ++I; break;
+ case 'a':
+ if (IsScanf && !LO.C99 && !LO.CPlusPlus0x) {
+ // For scanf in C90, look at the next character to see if this should
+ // be parsed as the GNU extension 'a' length modifier. If not, this
+ // will be parsed as a conversion specifier.
+ ++I;
+ if (I != E && (*I == 's' || *I == 'S' || *I == '[')) {
+ lmKind = LengthModifier::AsAllocate;
+ break;
+ }
+ --I;
+ }
+ return false;
+ case 'm':
+ if (IsScanf) {
+ lmKind = LengthModifier::AsMAllocate;
+ ++I;
+ break;
+ }
+ return false;
}
LengthModifier lm(lmPosition, lmKind);
FS.setLengthModifier(lm);
@@ -213,7 +239,21 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
case UnknownTy:
return true;
-
+
+ case AnyCharTy: {
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U:
+ return true;
+ }
+ return false;
+ }
+
case SpecificTy: {
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
@@ -297,15 +337,28 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
case CPointerTy:
return argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
- argTy->isNullPtrType();
+ argTy->isBlockPointerType() || argTy->isNullPtrType();
- case ObjCPointerTy:
- return argTy->getAs<ObjCObjectPointerType>() != NULL;
+ case ObjCPointerTy: {
+ if (argTy->getAs<ObjCObjectPointerType>() ||
+ argTy->getAs<BlockPointerType>())
+ return true;
+
+ // Handle implicit toll-free bridging.
+ if (const PointerType *PT = argTy->getAs<PointerType>()) {
+ // Things such as CFTypeRef are really just opaque pointers
+ // to C structs representing CF types that can often be bridged
+ // to Objective-C objects. Since the compiler doesn't know which
+ // structs can be toll-free bridged, we just accept them all.
+ QualType pointee = PT->getPointeeType();
+ if (pointee->getAsStructureType() || pointee->isVoidType())
+ return true;
+ }
+ return false;
+ }
}
- // FIXME: Should be unreachable, but Clang is currently emitting
- // a warning.
- return false;
+ llvm_unreachable("Invalid ArgTypeResult Kind!");
}
QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
@@ -314,6 +367,8 @@ QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
llvm_unreachable("No representative type for Invalid ArgTypeResult");
case UnknownTy:
return QualType();
+ case AnyCharTy:
+ return C.CharTy;
case SpecificTy:
return T;
case CStrTy:
@@ -330,11 +385,17 @@ QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
}
}
- // FIXME: Should be unreachable, but Clang is currently emitting
- // a warning.
- return QualType();
+ llvm_unreachable("Invalid ArgTypeResult Kind!");
}
+std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
+ std::string S = getRepresentativeType(C).getAsString();
+ if (Name && S != Name)
+ return std::string("'") + Name + "' (aka '" + S + "')";
+ return std::string("'") + S + "'";
+}
+
+
//===----------------------------------------------------------------------===//
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
@@ -359,6 +420,8 @@ analyze_format_string::LengthModifier::toString() const {
return "l";
case AsLongLong:
return "ll";
+ case AsQuad:
+ return "q";
case AsIntMax:
return "j";
case AsSizeT:
@@ -367,6 +430,10 @@ analyze_format_string::LengthModifier::toString() const {
return "t";
case AsLongDouble:
return "L";
+ case AsAllocate:
+ return "a";
+ case AsMAllocate:
+ return "m";
case None:
return "";
}
@@ -374,6 +441,52 @@ analyze_format_string::LengthModifier::toString() const {
}
//===----------------------------------------------------------------------===//
+// Methods on ConversionSpecifier.
+//===----------------------------------------------------------------------===//
+
+const char *ConversionSpecifier::toString() const {
+ switch (kind) {
+ case dArg: return "d";
+ case iArg: return "i";
+ case oArg: return "o";
+ case uArg: return "u";
+ case xArg: return "x";
+ case XArg: return "X";
+ case fArg: return "f";
+ case FArg: return "F";
+ case eArg: return "e";
+ case EArg: return "E";
+ case gArg: return "g";
+ case GArg: return "G";
+ case aArg: return "a";
+ case AArg: return "A";
+ case cArg: return "c";
+ case sArg: return "s";
+ case pArg: return "p";
+ case nArg: return "n";
+ case PercentArg: return "%";
+ case ScanListArg: return "[";
+ case InvalidSpecifier: return NULL;
+
+ // MacOS X unicode extensions.
+ case CArg: return "C";
+ case SArg: return "S";
+
+ // Objective-C specific specifiers.
+ case ObjCObjArg: return "@";
+
+ // FreeBSD specific specifiers.
+ case bArg: return "b";
+ case DArg: return "D";
+ case rArg: return "r";
+
+ // GlibC specific specifiers.
+ case PrintErrno: return "m";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
@@ -398,19 +511,16 @@ void OptionalAmount::toString(raw_ostream &os) const {
}
}
-//===----------------------------------------------------------------------===//
-// Methods on ConversionSpecifier.
-//===----------------------------------------------------------------------===//
-
bool FormatSpecifier::hasValidLengthModifier() const {
switch (LM.getKind()) {
case LengthModifier::None:
return true;
- // Handle most integer flags
+ // Handle most integer flags
case LengthModifier::AsChar:
case LengthModifier::AsShort:
case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
case LengthModifier::AsIntMax:
case LengthModifier::AsSizeT:
case LengthModifier::AsPtrDiff:
@@ -428,7 +538,7 @@ bool FormatSpecifier::hasValidLengthModifier() const {
return false;
}
- // Handle 'l' flag
+ // Handle 'l' flag
case LengthModifier::AsLong:
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
@@ -449,6 +559,7 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::rArg:
+ case ConversionSpecifier::ScanListArg:
return true;
default:
return false;
@@ -465,11 +576,113 @@ bool FormatSpecifier::hasValidLengthModifier() const {
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
return true;
+ // GNU extension.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsMAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
default:
return false;
}
}
- return false;
+ llvm_unreachable("Invalid LengthModifier Kind!");
}
+bool FormatSpecifier::hasStandardLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLong:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ case LengthModifier::AsLongDouble:
+ return true;
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsQuad:
+ return false;
+ }
+ llvm_unreachable("Invalid LengthModifier Kind!");
+}
+bool FormatSpecifier::hasStandardConversionSpecifier(const LangOptions &LangOpt) const {
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::pArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::ObjCObjArg:
+ case ConversionSpecifier::ScanListArg:
+ case ConversionSpecifier::PercentArg:
+ return true;
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ return LangOpt.ObjC1 || LangOpt.ObjC2;
+ case ConversionSpecifier::InvalidSpecifier:
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::PrintErrno:
+ return false;
+ }
+ llvm_unreachable("Invalid ConversionSpecifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardLengthConversionCombination() const {
+ if (LM.getKind() == LengthModifier::AsLongDouble) {
+ switch(CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return false;
+ default:
+ return true;
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
index 607e99c..f483ec6 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
@@ -8,6 +8,8 @@
namespace clang {
+class LangOptions;
+
template <typename T>
class UpdateOnReturn {
T &ValueToUpdate;
@@ -42,7 +44,8 @@ bool ParseArgPosition(FormatStringHandler &H,
/// Returns true if a LengthModifier was parsed and installed in the
/// FormatSpecifier& argument, and false otherwise.
-bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E);
+bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
+ const LangOptions &LO, bool IsScanf = false);
template <typename T> class SpecifierResult {
T FS;
@@ -69,4 +72,3 @@ public:
} // end clang namespace
#endif
-
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
index 62c5455..ff6607d 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -1,4 +1,6 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/AnalysisContext.h"
@@ -15,113 +17,14 @@ using namespace clang;
namespace {
-// FIXME: This is copy-pasted from ThreadSafety.c. I wanted a patch that
-// contained working code before refactoring the implementation of both
-// files.
-class CFGBlockSet {
- llvm::BitVector VisitedBlockIDs;
-
-public:
- // po_iterator requires this iterator, but the only interface needed is the
- // value_type typedef.
- struct iterator {
- typedef const CFGBlock *value_type;
- };
-
- CFGBlockSet() {}
- CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
-
- /// \brief Set the bit associated with a particular CFGBlock.
- /// This is the important method for the SetType template parameter.
- bool insert(const CFGBlock *Block) {
- // Note that insert() is called by po_iterator, which doesn't check to make
- // sure that Block is non-null. Moreover, the CFGBlock iterator will
- // occasionally hand out null pointers for pruned edges, so we catch those
- // here.
- if (Block == 0)
- return false; // if an edge is trivially false.
- if (VisitedBlockIDs.test(Block->getBlockID()))
- return false;
- VisitedBlockIDs.set(Block->getBlockID());
- return true;
- }
-
- /// \brief Check if the bit for a CFGBlock has been already set.
- /// This method is for tracking visited blocks in the main threadsafety loop.
- /// Block must not be null.
- bool alreadySet(const CFGBlock *Block) {
- return VisitedBlockIDs.test(Block->getBlockID());
- }
-};
-
-/// \brief We create a helper class which we use to iterate through CFGBlocks in
-/// the topological order.
-class TopologicallySortedCFG {
- typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
-
- std::vector<const CFGBlock*> Blocks;
-
- typedef llvm::DenseMap<const CFGBlock *, unsigned> BlockOrderTy;
- BlockOrderTy BlockOrder;
-
-
-public:
- typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
-
- TopologicallySortedCFG(const CFG *CFGraph) {
- Blocks.reserve(CFGraph->getNumBlockIDs());
- CFGBlockSet BSet(CFGraph);
-
- for (po_iterator I = po_iterator::begin(CFGraph, BSet),
- E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
- BlockOrder[*I] = Blocks.size() + 1;
- Blocks.push_back(*I);
- }
- }
-
- iterator begin() {
- return Blocks.rbegin();
- }
-
- iterator end() {
- return Blocks.rend();
- }
-
- bool empty() {
- return begin() == end();
- }
-
- struct BlockOrderCompare;
- friend struct BlockOrderCompare;
-
- struct BlockOrderCompare {
- const TopologicallySortedCFG &TSC;
- public:
- BlockOrderCompare(const TopologicallySortedCFG &tsc) : TSC(tsc) {}
-
- bool operator()(const CFGBlock *b1, const CFGBlock *b2) const {
- TopologicallySortedCFG::BlockOrderTy::const_iterator b1It = TSC.BlockOrder.find(b1);
- TopologicallySortedCFG::BlockOrderTy::const_iterator b2It = TSC.BlockOrder.find(b2);
-
- unsigned b1V = (b1It == TSC.BlockOrder.end()) ? 0 : b1It->second;
- unsigned b2V = (b2It == TSC.BlockOrder.end()) ? 0 : b2It->second;
- return b1V > b2V;
- }
- };
-
- BlockOrderCompare getComparator() const {
- return BlockOrderCompare(*this);
- }
-};
-
class DataflowWorklist {
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
- TopologicallySortedCFG TSC;
+ PostOrderCFGView *POV;
public:
- DataflowWorklist(const CFG &cfg)
+ DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
: enqueuedBlocks(cfg.getNumBlockIDs()),
- TSC(&cfg) {}
+ POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
void enqueueBlock(const CFGBlock *block);
void enqueueSuccessors(const CFGBlock *block);
@@ -168,10 +71,9 @@ void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
}
void DataflowWorklist::sortWorklist() {
- std::sort(worklist.begin(), worklist.end(), TSC.getComparator());
+ std::sort(worklist.begin(), worklist.end(), POV->getComparator());
}
-
const CFGBlock *DataflowWorklist::dequeue() {
if (worklist.empty())
return 0;
@@ -184,7 +86,7 @@ const CFGBlock *DataflowWorklist::dequeue() {
namespace {
class LiveVariablesImpl {
public:
- AnalysisContext &analysisContext;
+ AnalysisDeclContext &analysisContext;
std::vector<LiveVariables::LivenessValues> cfgBlockValues;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
@@ -204,7 +106,7 @@ public:
void dumpBlockLiveness(const SourceManager& M);
- LiveVariablesImpl(AnalysisContext &ac, bool KillAtAssign)
+ LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
: analysisContext(ac),
SSetFact(false), // Do not canonicalize ImmutableSets by default.
DSetFact(false), // This is a *major* performance win.
@@ -241,6 +143,8 @@ namespace {
}
}
+void LiveVariables::Observer::anchor() { }
+
LiveVariables::LivenessValues
LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB) {
@@ -329,6 +233,29 @@ static const VariableArrayType *FindVA(QualType Ty) {
return 0;
}
+static const Stmt *LookThroughStmt(const Stmt *S) {
+ while (S) {
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParens();
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
+ S = EWC->getSubExpr();
+ continue;
+ }
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
+ S = OVE->getSourceExpr();
+ continue;
+ }
+ break;
+ }
+ return S;
+}
+
+static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
+ llvm::ImmutableSet<const Stmt *>::Factory &F,
+ const Stmt *S) {
+ Set = F.add(Set, LookThroughStmt(S));
+}
+
void TransferFunctions::Visit(Stmt *S) {
if (observer)
observer->observeStmt(S, currentBlock, val);
@@ -353,8 +280,7 @@ void TransferFunctions::Visit(Stmt *S) {
// Include the implicit "this" pointer as being live.
CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
- ImplicitObj = ImplicitObj->IgnoreParens();
- val.liveStmts = LV.SSetFact.add(val.liveStmts, ImplicitObj);
+ AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
}
break;
}
@@ -363,12 +289,23 @@ void TransferFunctions::Visit(Stmt *S) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
for (const VariableArrayType* VA = FindVA(VD->getType());
VA != 0; VA = FindVA(VA->getElementType())) {
- val.liveStmts = LV.SSetFact.add(val.liveStmts,
- VA->getSizeExpr()->IgnoreParens());
+ AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
}
}
break;
}
+ case Stmt::PseudoObjectExprClass: {
+ // A pseudo-object operation only directly consumes its result
+ // expression.
+ Expr *child = cast<PseudoObjectExpr>(S)->getResultExpr();
+ if (!child) return;
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
+ child = OV->getSourceExpr();
+ child = child->IgnoreParens();
+ val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
+ return;
+ }
+
// FIXME: These cases eventually shouldn't be needed.
case Stmt::ExprWithCleanupsClass: {
S = cast<ExprWithCleanups>(S)->getSubExpr();
@@ -386,12 +323,8 @@ void TransferFunctions::Visit(Stmt *S) {
for (Stmt::child_iterator it = S->child_begin(), ei = S->child_end();
it != ei; ++it) {
- if (Stmt *child = *it) {
- if (Expr *Ex = dyn_cast<Expr>(child))
- child = Ex->IgnoreParens();
-
- val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
- }
+ if (Stmt *child = *it)
+ AddLiveStmt(val.liveStmts, LV.SSetFact, child);
}
}
@@ -421,7 +354,7 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
}
void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
- AnalysisContext::referenced_decls_iterator I, E;
+ AnalysisDeclContext::referenced_decls_iterator I, E;
llvm::tie(I, E) =
LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl());
for ( ; I != E ; ++I) {
@@ -545,7 +478,7 @@ LiveVariables::~LiveVariables() {
}
LiveVariables *
-LiveVariables::computeLiveness(AnalysisContext &AC,
+LiveVariables::computeLiveness(AnalysisDeclContext &AC,
bool killAtAssign) {
// No CFG? Bail out.
@@ -557,7 +490,7 @@ LiveVariables::computeLiveness(AnalysisContext &AC,
// Construct the dataflow worklist. Enqueue the exit block as the
// start of the analysis.
- DataflowWorklist worklist(*cfg);
+ DataflowWorklist worklist(*cfg, AC);
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
new file mode 100644
index 0000000..cfd66f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
@@ -0,0 +1,49 @@
+//===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements post order view of the blocks in a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+
+using namespace clang;
+
+void PostOrderCFGView::anchor() { }
+
+PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
+ Blocks.reserve(cfg->getNumBlockIDs());
+ CFGBlockSet BSet(cfg);
+
+ for (po_iterator I = po_iterator::begin(cfg, BSet),
+ E = po_iterator::end(cfg, BSet); I != E; ++I) {
+ BlockOrder[*I] = Blocks.size() + 1;
+ Blocks.push_back(*I);
+ }
+}
+
+PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
+ const CFG *cfg = ctx.getCFG();
+ if (!cfg)
+ return 0;
+ return new PostOrderCFGView(cfg);
+}
+
+const void *PostOrderCFGView::getTag() { static int x; return &x; }
+
+bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
+ const CFGBlock *b2) const {
+ PostOrderCFGView::BlockOrderTy::const_iterator b1It = POV.BlockOrder.find(b1);
+ PostOrderCFGView::BlockOrderTy::const_iterator b2It = POV.BlockOrder.find(b2);
+
+ unsigned b1V = (b1It == POV.BlockOrder.end()) ? 0 : b1It->second;
+ unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
+ return b1V > b2V;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 2bb39cf..4b2a19e 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -52,7 +52,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
unsigned &argIndex,
- bool FormatExtensions) {
+ const LangOptions &LO) {
using namespace clang::analyze_format_string;
using namespace clang::analyze_printf;
@@ -151,7 +151,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
}
// Look for the length modifier.
- if (ParseLengthModifier(FS, I, E) && I == E) {
+ if (ParseLengthModifier(FS, I, E, LO) && I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
@@ -197,10 +197,10 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
// FreeBSD format extensions
- case 'b': if (FormatExtensions) k = ConversionSpecifier::bArg; break; /* check for int and then char * */
- case 'r': if (FormatExtensions) k = ConversionSpecifier::rArg; break;
- case 'y': if (FormatExtensions) k = ConversionSpecifier::iArg; break;
- case 'D': if (FormatExtensions) k = ConversionSpecifier::DArg; break; /* check for u_char * pointer and a char * string */
+ case 'b': if (LO.FormatExtensions) k = ConversionSpecifier::bArg; break; /* check for int and then char * */
+ case 'r': if (LO.FormatExtensions) k = ConversionSpecifier::rArg; break;
+ case 'y': if (LO.FormatExtensions) k = ConversionSpecifier::iArg; break;
+ case 'D': if (LO.FormatExtensions) k = ConversionSpecifier::DArg; break; /* check for u_char * pointer and a char * string */
}
PrintfConversionSpecifier CS(conversionPosition, k);
FS.setConversionSpecifier(CS);
@@ -220,14 +220,14 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
const char *I,
const char *E,
- bool FormatExtensions) {
+ const LangOptions &LO) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
- FormatExtensions);
+ LO);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
@@ -246,55 +246,11 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
}
//===----------------------------------------------------------------------===//
-// Methods on ConversionSpecifier.
-//===----------------------------------------------------------------------===//
-const char *ConversionSpecifier::toString() const {
- switch (kind) {
- case dArg: return "d";
- case iArg: return "i";
- case oArg: return "o";
- case uArg: return "u";
- case xArg: return "x";
- case XArg: return "X";
- case fArg: return "f";
- case FArg: return "F";
- case eArg: return "e";
- case EArg: return "E";
- case gArg: return "g";
- case GArg: return "G";
- case aArg: return "a";
- case AArg: return "A";
- case cArg: return "c";
- case sArg: return "s";
- case pArg: return "p";
- case nArg: return "n";
- case PercentArg: return "%";
- case ScanListArg: return "[";
- case InvalidSpecifier: return NULL;
-
- // MacOS X unicode extensions.
- case CArg: return "C";
- case SArg: return "S";
-
- // Objective-C specific specifiers.
- case ObjCObjArg: return "@";
-
- // FreeBSD specific specifiers.
- case bArg: return "b";
- case DArg: return "D";
- case rArg: return "r";
-
- // GlibC specific specifiers.
- case PrintErrno: return "m";
- }
- return NULL;
-}
-
-//===----------------------------------------------------------------------===//
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
-ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
+ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
const PrintfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
@@ -303,7 +259,8 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None: return Ctx.IntTy;
- case LengthModifier::AsLong: return ArgTypeResult::WIntTy;
+ case LengthModifier::AsLong:
+ return ArgTypeResult(ArgTypeResult::WIntTy, "wint_t");
default:
return ArgTypeResult::Invalid();
}
@@ -311,39 +268,50 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
if (CS.isIntArg())
switch (LM.getKind()) {
case LengthModifier::AsLongDouble:
- return ArgTypeResult::Invalid();
+ // GNU extension.
+ return Ctx.LongLongTy;
case LengthModifier::None: return Ctx.IntTy;
- case LengthModifier::AsChar: return Ctx.SignedCharTy;
+ case LengthModifier::AsChar: return ArgTypeResult::AnyCharTy;
case LengthModifier::AsShort: return Ctx.ShortTy;
case LengthModifier::AsLong: return Ctx.LongTy;
- case LengthModifier::AsLongLong: return Ctx.LongLongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.LongLongTy;
case LengthModifier::AsIntMax:
- // FIXME: Return unknown for now.
+ return ArgTypeResult(Ctx.getIntMaxType(), "intmax_t");
+ case LengthModifier::AsSizeT:
+ // FIXME: How to get the corresponding signed version of size_t?
return ArgTypeResult();
- case LengthModifier::AsSizeT: return Ctx.getSizeType();
- case LengthModifier::AsPtrDiff: return Ctx.getPointerDiffType();
+ case LengthModifier::AsPtrDiff:
+ return ArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgTypeResult::Invalid();
}
if (CS.isUIntArg())
switch (LM.getKind()) {
case LengthModifier::AsLongDouble:
- return ArgTypeResult::Invalid();
+ // GNU extension.
+ return Ctx.UnsignedLongLongTy;
case LengthModifier::None: return Ctx.UnsignedIntTy;
case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
- case LengthModifier::AsLongLong: return Ctx.UnsignedLongLongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.UnsignedLongLongTy;
case LengthModifier::AsIntMax:
- // FIXME: Return unknown for now.
- return ArgTypeResult();
+ return ArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
- // FIXME: How to get the corresponding unsigned
- // version of size_t?
- return ArgTypeResult();
+ return ArgTypeResult(Ctx.getSizeType(), "size_t");
case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
return ArgTypeResult();
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgTypeResult::Invalid();
}
if (CS.isDoubleArg()) {
@@ -354,15 +322,24 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
- return ArgTypeResult(LM.getKind() == LengthModifier::AsWideChar ?
- ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
+ if (LM.getKind() == LengthModifier::AsWideChar) {
+ if (IsObjCLiteral)
+ return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
+ return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ }
+ return ArgTypeResult::CStrTy;
case ConversionSpecifier::SArg:
- // FIXME: This appears to be Mac OS X specific.
- return ArgTypeResult::WCStrTy;
+ if (IsObjCLiteral)
+ return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
+ return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
case ConversionSpecifier::CArg:
- return Ctx.WCharTy;
+ if (IsObjCLiteral)
+ return Ctx.UnsignedShortTy;
+ return ArgTypeResult(Ctx.WCharTy, "wchar_t");
case ConversionSpecifier::pArg:
return ArgTypeResult::CPointerTy;
+ case ConversionSpecifier::ObjCObjArg:
+ return ArgTypeResult::ObjCPointerTy;
default:
break;
}
@@ -371,7 +348,8 @@ ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx) const {
return ArgTypeResult();
}
-bool PrintfSpecifier::fixType(QualType QT) {
+bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+ ASTContext &Ctx, bool IsObjCLiteral) {
// Handle strings first (char *, wchar_t *)
if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
CS.setKind(ConversionSpecifier::sArg);
@@ -383,16 +361,16 @@ bool PrintfSpecifier::fixType(QualType QT) {
// Set the long length modifier for wide characters
if (QT->getPointeeType()->isWideCharType())
LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
return true;
}
// We can only work with builtin types.
- if (!QT->isBuiltinType())
- return false;
-
- // Everything else should be a base type
const BuiltinType *BT = QT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
// Set length modifier
switch (BT->getKind()) {
@@ -404,18 +382,15 @@ bool PrintfSpecifier::fixType(QualType QT) {
case BuiltinType::UInt128:
case BuiltinType::Int128:
case BuiltinType::Half:
- // Integral types which are non-trivial to correct.
+ // Various types which are non-trivial to correct.
return false;
- case BuiltinType::Void:
- case BuiltinType::NullPtr:
- case BuiltinType::ObjCId:
- case BuiltinType::ObjCClass:
- case BuiltinType::ObjCSel:
- case BuiltinType::Dependent:
- case BuiltinType::Overload:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
+#define SIGNED_TYPE(Id, SingletonId)
+#define UNSIGNED_TYPE(Id, SingletonId)
+#define FLOATING_TYPE(Id, SingletonId)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
// Misc other stuff which doesn't make sense here.
return false;
@@ -453,6 +428,28 @@ bool PrintfSpecifier::fixType(QualType QT) {
break;
}
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
+ const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ }
+ }
+
+ // If fixing the length modifier was enough, we are done.
+ const analyze_printf::ArgTypeResult &ATR = getArgType(Ctx, IsObjCLiteral);
+ if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+
// Set conversion specifier and disable any flags which do not apply to it.
// Let typedefs to char fall through to int, as %c is silly for uint8_t.
if (isa<TypedefType>(QT) && QT->isAnyCharacterType()) {
@@ -472,9 +469,7 @@ bool PrintfSpecifier::fixType(QualType QT) {
HasAlternativeForm = 0;
}
else if (QT->isUnsignedIntegerType()) {
- // Preserve the original formatting, e.g. 'X', 'o'.
- if (!cast<PrintfConversionSpecifier>(CS).isUIntArg())
- CS.setKind(ConversionSpecifier::uArg);
+ CS.setKind(ConversionSpecifier::uArg);
HasAlternativeForm = 0;
HasPlusPrefix = 0;
} else {
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
index 3a0bbd5..3f711b4 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
@@ -34,8 +34,6 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
return PostLoad(S, LC, tag);
case ProgramPoint::PreStoreKind:
return PreStore(S, LC, tag);
- case ProgramPoint::PostStoreKind:
- return PostStore(S, LC, tag);
case ProgramPoint::PostLValueKind:
return PostLValue(S, LC, tag);
case ProgramPoint::PostPurgeDeadSymbolsKind:
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
index 8f24c43..c8b491a 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -68,8 +68,6 @@ bool PseudoConstantAnalysis::wasReferenced(const VarDecl *VD) {
const Decl *PseudoConstantAnalysis::getDecl(const Expr *E) {
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
return DR->getDecl();
- else if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(E))
- return BDR->getDecl();
else
return 0;
}
@@ -198,18 +196,7 @@ void PseudoConstantAnalysis::RunAnalysis() {
break;
}
- // Case 4: Block variable references
- case Stmt::BlockDeclRefExprClass: {
- const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(Head);
- if (const VarDecl *VD = dyn_cast<VarDecl>(BDR->getDecl())) {
- // Add the Decl to the used list
- UsedVars->insert(VD);
- continue;
- }
- break;
- }
-
- // Case 5: Variable references
+ // Case 4: Variable references
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DR = cast<DeclRefExpr>(Head);
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
@@ -220,7 +207,7 @@ void PseudoConstantAnalysis::RunAnalysis() {
break;
}
- // Case 6: Block expressions
+ // Case 5: Block expressions
case Stmt::BlockExprClass: {
const BlockExpr *B = cast<BlockExpr>(Head);
// Add the body of the block to the list
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
index 4931771..bb63e2c 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
@@ -251,7 +251,9 @@ void DeadCodeScan::reportDeadCode(const Stmt *S,
}
namespace clang { namespace reachable_code {
-
+
+void Callback::anchor() { }
+
unsigned ScanReachableFromBlock(const CFGBlock *Start,
llvm::BitVector &Reachable) {
unsigned count = 0;
@@ -287,7 +289,7 @@ unsigned ScanReachableFromBlock(const CFGBlock *Start,
return count;
}
-void FindUnreachableCode(AnalysisContext &AC, Callback &CB) {
+void FindUnreachableCode(AnalysisDeclContext &AC, Callback &CB) {
CFG *cfg = AC.getCFG();
if (!cfg)
return;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
index 6a8673a..6bc4adb 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -20,9 +20,11 @@ using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::ConversionSpecifier;
+using clang::analyze_scanf::ScanfArgTypeResult;
using clang::analyze_scanf::ScanfConversionSpecifier;
using clang::analyze_scanf::ScanfSpecifier;
using clang::UpdateOnReturn;
+using namespace clang;
typedef clang::analyze_format_string::SpecifierResult<ScanfSpecifier>
ScanfSpecifierResult;
@@ -65,7 +67,8 @@ static bool ParseScanList(FormatStringHandler &H,
static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
- unsigned &argIndex) {
+ unsigned &argIndex,
+ const LangOptions &LO) {
using namespace clang::analyze_scanf;
const char *I = Beg;
@@ -130,7 +133,7 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
}
// Look for the length modifier.
- if (ParseLengthModifier(FS, I, E) && I == E) {
+ if (ParseLengthModifier(FS, I, E, LO, /*scanf=*/true) && I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
@@ -173,7 +176,7 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
}
ScanfConversionSpecifier CS(conversionPosition, k);
if (k == ScanfConversionSpecifier::ScanListArg) {
- if (!ParseScanList(H, CS, I, E))
+ if (ParseScanList(H, CS, I, E))
return true;
}
FS.setConversionSpecifier(CS);
@@ -190,16 +193,248 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
}
return ScanfSpecifierResult(Start, FS);
}
-
+
+ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
+ const ScanfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ScanfArgTypeResult::Invalid();
+
+ switch(CS.getKind()) {
+ // Signed int.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgTypeResult(ArgTypeResult::AnyCharTy);
+ case LengthModifier::AsShort: return ArgTypeResult(Ctx.ShortTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgTypeResult(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ScanfArgTypeResult(Ctx.getIntMaxType(), "intmax_t *");
+ case LengthModifier::AsSizeT:
+ // FIXME: ssize_t.
+ return ScanfArgTypeResult();
+ case LengthModifier::AsPtrDiff:
+ return ScanfArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t *");
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgTypeResult(Ctx.LongLongTy);
+ case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
+ case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ }
+
+ // Unsigned int.
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.UnsignedIntTy);
+ case LengthModifier::AsChar: return ArgTypeResult(Ctx.UnsignedCharTy);
+ case LengthModifier::AsShort: return ArgTypeResult(Ctx.UnsignedShortTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.UnsignedLongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsIntMax:
+ return ScanfArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t *");
+ case LengthModifier::AsSizeT:
+ return ScanfArgTypeResult(Ctx.getSizeType(), "size_t *");
+ case LengthModifier::AsPtrDiff:
+ // FIXME: Unsigned version of ptrdiff_t?
+ return ScanfArgTypeResult();
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
+ case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ }
+
+ // Float.
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.FloatTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.DoubleTy);
+ case LengthModifier::AsLongDouble:
+ return ArgTypeResult(Ctx.LongDoubleTy);
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+
+ // Char, string and scanlist.
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::ScanListArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ScanfArgTypeResult::CStrTy;
+ case LengthModifier::AsLong:
+ return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ScanfArgTypeResult(ArgTypeResult::CStrTy);
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ // FIXME: Mac OS X specific?
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ScanfArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t **");
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+
+ // Pointer.
+ case ConversionSpecifier::pArg:
+ return ScanfArgTypeResult(ArgTypeResult(ArgTypeResult::CPointerTy));
+
+ default:
+ break;
+ }
+
+ return ScanfArgTypeResult();
+}
+
+bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+ ASTContext &Ctx) {
+ if (!QT->isPointerType())
+ return false;
+
+ QualType PT = QT->getPointeeType();
+ const BuiltinType *BT = PT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ // Pointer to a character.
+ if (PT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::sArg);
+ if (PT->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
+ return true;
+ }
+
+ // Figure out the length modifier.
+ switch (BT->getKind()) {
+ // no modifier
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ case BuiltinType::Float:
+ LM.setKind(LengthModifier::None);
+ break;
+
+ // hh
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ // h
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ // l
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::Double:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ // ll
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ // L
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+
+ // Don't know.
+ default:
+ return false;
+ }
+
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
+ const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ }
+ }
+
+ // If fixing the length modifier was enough, we are done.
+ const analyze_scanf::ScanfArgTypeResult &ATR = getArgType(Ctx);
+ if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+
+ // Figure out the conversion specifier.
+ if (PT->isRealFloatingType())
+ CS.setKind(ConversionSpecifier::fArg);
+ else if (PT->isSignedIntegerType())
+ CS.setKind(ConversionSpecifier::dArg);
+ else if (PT->isUnsignedIntegerType())
+ CS.setKind(ConversionSpecifier::uArg);
+ else
+ llvm_unreachable("Unexpected type");
+
+ return true;
+}
+
+void ScanfSpecifier::toString(raw_ostream &os) const {
+ os << "%";
+
+ if (usesPositionalArg())
+ os << getPositionalArgIndex() << "$";
+ if (SuppressAssignment)
+ os << "*";
+
+ FieldWidth.toString(os);
+ os << LM.toString();
+ os << CS.toString();
+}
+
bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
const char *I,
- const char *E) {
+ const char *E,
+ const LangOptions &LO) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
- const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex);
+ const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex,
+ LO);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
@@ -218,4 +453,47 @@ bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
return false;
}
+bool ScanfArgTypeResult::matchesType(ASTContext& C, QualType argTy) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("ArgTypeResult must be valid");
+ case UnknownTy:
+ return true;
+ case CStrTy:
+ return ArgTypeResult(ArgTypeResult::CStrTy).matchesType(C, argTy);
+ case WCStrTy:
+ return ArgTypeResult(ArgTypeResult::WCStrTy).matchesType(C, argTy);
+ case PtrToArgTypeResultTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ return A.matchesType(C, PT->getPointeeType());
+ }
+ }
+
+ llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
+}
+
+QualType ScanfArgTypeResult::getRepresentativeType(ASTContext &C) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("No representative type for Invalid ArgTypeResult");
+ case UnknownTy:
+ return QualType();
+ case CStrTy:
+ return C.getPointerType(C.CharTy);
+ case WCStrTy:
+ return C.getPointerType(C.getWCharType());
+ case PtrToArgTypeResultTy:
+ return C.getPointerType(A.getRepresentativeType(C));
+ }
+ llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
+}
+
+std::string ScanfArgTypeResult::getRepresentativeTypeName(ASTContext& C) const {
+ std::string S = getRepresentativeType(C).getAsString();
+ if (!Name)
+ return std::string("'") + S + "'";
+ return std::string("'") + Name + "' (aka '" + S + "')";
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
index 5a12913..2f7e794 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -16,6 +16,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
@@ -31,7 +32,9 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <utility>
#include <vector>
using namespace clang;
@@ -40,90 +43,7 @@ using namespace thread_safety;
// Key method definition
ThreadSafetyHandler::~ThreadSafetyHandler() {}
-// Helper function
-static Expr *getParent(Expr *Exp) {
- if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
- return ME->getBase();
- if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp))
- return CE->getImplicitObjectArgument();
- return 0;
-}
-
namespace {
-/// \brief Implements a set of CFGBlocks using a BitVector.
-///
-/// This class contains a minimal interface, primarily dictated by the SetType
-/// template parameter of the llvm::po_iterator template, as used with external
-/// storage. We also use this set to keep track of which CFGBlocks we visit
-/// during the analysis.
-class CFGBlockSet {
- llvm::BitVector VisitedBlockIDs;
-
-public:
- // po_iterator requires this iterator, but the only interface needed is the
- // value_type typedef.
- struct iterator {
- typedef const CFGBlock *value_type;
- };
-
- CFGBlockSet() {}
- CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
-
- /// \brief Set the bit associated with a particular CFGBlock.
- /// This is the important method for the SetType template parameter.
- bool insert(const CFGBlock *Block) {
- // Note that insert() is called by po_iterator, which doesn't check to make
- // sure that Block is non-null. Moreover, the CFGBlock iterator will
- // occasionally hand out null pointers for pruned edges, so we catch those
- // here.
- if (Block == 0)
- return false; // if an edge is trivially false.
- if (VisitedBlockIDs.test(Block->getBlockID()))
- return false;
- VisitedBlockIDs.set(Block->getBlockID());
- return true;
- }
-
- /// \brief Check if the bit for a CFGBlock has been already set.
- /// This method is for tracking visited blocks in the main threadsafety loop.
- /// Block must not be null.
- bool alreadySet(const CFGBlock *Block) {
- return VisitedBlockIDs.test(Block->getBlockID());
- }
-};
-
-/// \brief We create a helper class which we use to iterate through CFGBlocks in
-/// the topological order.
-class TopologicallySortedCFG {
- typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
-
- std::vector<const CFGBlock*> Blocks;
-
-public:
- typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
-
- TopologicallySortedCFG(const CFG *CFGraph) {
- Blocks.reserve(CFGraph->getNumBlockIDs());
- CFGBlockSet BSet(CFGraph);
-
- for (po_iterator I = po_iterator::begin(CFGraph, BSet),
- E = po_iterator::end(CFGraph, BSet); I != E; ++I) {
- Blocks.push_back(*I);
- }
- }
-
- iterator begin() {
- return Blocks.rbegin();
- }
-
- iterator end() {
- return Blocks.rend();
- }
-
- bool empty() {
- return begin() == end();
- }
-};
/// \brief A MutexID object uniquely identifies a particular mutex, and
/// is built from an Expr* (i.e. calling a lock function).
@@ -136,16 +56,17 @@ public:
/// (1) Local variables in the expression, such as "x" have not changed.
/// (2) Values on the heap that affect the expression have not changed.
/// (3) The expression involves only pure function calls.
+///
/// The current implementation assumes, but does not verify, that multiple uses
/// of the same lock expression satisfies these criteria.
///
/// Clang introduces an additional wrinkle, which is that it is difficult to
/// derive canonical expressions, or compare expressions directly for equality.
-/// Thus, we identify a mutex not by an Expr, but by the set of named
+/// Thus, we identify a mutex not by an Expr, but by the list of named
/// declarations that are referenced by the Expr. In other words,
/// x->foo->bar.mu will be a four element vector with the Decls for
/// mu, bar, and foo, and x. The vector will uniquely identify the expression
-/// for all practical purposes.
+/// for all practical purposes. Null is used to denote 'this'.
///
/// Note we will need to perform substitution on "this" and function parameter
/// names when constructing a lock expression.
@@ -164,38 +85,176 @@ class MutexID {
SmallVector<NamedDecl*, 2> DeclSeq;
/// Build a Decl sequence representing the lock from the given expression.
- /// Recursive function that bottoms out when the final DeclRefExpr is reached.
- // FIXME: Lock expressions that involve array indices or function calls.
- // FIXME: Deal with LockReturned attribute.
- void buildMutexID(Expr *Exp, Expr *Parent) {
+ /// Recursive function that terminates on DeclRefExpr.
+ /// Note: this function merely creates a MutexID; it does not check to
+ /// ensure that the original expression is a valid mutex expression.
+ void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent,
+ unsigned NumArgs, Expr **FunArgs) {
+ if (!Exp) {
+ DeclSeq.clear();
+ return;
+ }
+
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
+ ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
+ if (PV) {
+ FunctionDecl *FD =
+ cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
+ unsigned i = PV->getFunctionScopeIndex();
+
+ if (FunArgs && FD == D->getCanonicalDecl()) {
+ // Substitute call arguments for references to function parameters
+ assert(i < NumArgs);
+ buildMutexID(FunArgs[i], D, 0, 0, 0);
+ return;
+ }
+ // Map the param back to the param of the original function declaration.
+ DeclSeq.push_back(FD->getParamDecl(i));
+ return;
+ }
+ // Not a function parameter -- just store the reference.
DeclSeq.push_back(ND);
} else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
NamedDecl *ND = ME->getMemberDecl();
DeclSeq.push_back(ND);
- buildMutexID(ME->getBase(), Parent);
+ buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs);
} else if (isa<CXXThisExpr>(Exp)) {
if (Parent)
- buildMutexID(Parent, 0);
- else
- return; // mutexID is still valid in this case
- } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp))
- buildMutexID(CE->getSubExpr(), Parent);
- else
- DeclSeq.clear(); // invalid lock expression
+ buildMutexID(Parent, D, 0, 0, 0);
+ else {
+ DeclSeq.push_back(0); // Use 0 to represent 'this'.
+ return; // mutexID is still valid in this case
+ }
+ } else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
+ DeclSeq.push_back(CMCE->getMethodDecl()->getCanonicalDecl());
+ buildMutexID(CMCE->getImplicitObjectArgument(),
+ D, Parent, NumArgs, FunArgs);
+ unsigned NumCallArgs = CMCE->getNumArgs();
+ Expr** CallArgs = CMCE->getArgs();
+ for (unsigned i = 0; i < NumCallArgs; ++i) {
+ buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ }
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
+ buildMutexID(CE->getCallee(), D, Parent, NumArgs, FunArgs);
+ unsigned NumCallArgs = CE->getNumArgs();
+ Expr** CallArgs = CE->getArgs();
+ for (unsigned i = 0; i < NumCallArgs; ++i) {
+ buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ }
+ } else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
+ buildMutexID(BOE->getLHS(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(BOE->getRHS(), D, Parent, NumArgs, FunArgs);
+ } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
+ buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) {
+ buildMutexID(ASE->getBase(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(ASE->getIdx(), D, Parent, NumArgs, FunArgs);
+ } else if (AbstractConditionalOperator *CE =
+ dyn_cast<AbstractConditionalOperator>(Exp)) {
+ buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getTrueExpr(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getFalseExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
+ buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getLHS(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getRHS(), D, Parent, NumArgs, FunArgs);
+ } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+ buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+ buildMutexID(PE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (isa<CharacterLiteral>(Exp) ||
+ isa<CXXNullPtrLiteralExpr>(Exp) ||
+ isa<GNUNullExpr>(Exp) ||
+ isa<CXXBoolLiteralExpr>(Exp) ||
+ isa<FloatingLiteral>(Exp) ||
+ isa<ImaginaryLiteral>(Exp) ||
+ isa<IntegerLiteral>(Exp) ||
+ isa<StringLiteral>(Exp) ||
+ isa<ObjCStringLiteral>(Exp)) {
+ return; // FIXME: Ignore literals for now
+ } else {
+ // Ignore. FIXME: mark as invalid expression?
+ }
+ }
+
+ /// \brief Construct a MutexID from an expression.
+ /// \param MutexExp The original mutex expression within an attribute
+ /// \param DeclExp An expression involving the Decl on which the attribute
+ /// occurs.
+ /// \param D The declaration to which the lock/unlock attribute is attached.
+ void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
+ Expr *Parent = 0;
+ unsigned NumArgs = 0;
+ Expr **FunArgs = 0;
+
+ // If we are processing a raw attribute expression, with no substitutions.
+ if (DeclExp == 0) {
+ buildMutexID(MutexExp, D, 0, 0, 0);
+ return;
+ }
+
+ // Examine DeclExp to find Parent and FunArgs, which are used to substitute
+ // for formal parameters when we call buildMutexID later.
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ Parent = ME->getBase();
+ } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ Parent = CE->getImplicitObjectArgument();
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
+ Parent = 0; // FIXME -- get the parent from DeclStmt
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (D && isa<CXXDestructorDecl>(D)) {
+ // There's no such thing as a "destructor call" in the AST.
+ Parent = DeclExp;
+ }
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (MutexExp == 0) {
+ buildMutexID(Parent, D, 0, 0, 0);
+ return;
+ }
+
+ buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs);
}
public:
- MutexID(Expr *LExpr, Expr *ParentExpr) {
- buildMutexID(LExpr, ParentExpr);
+ explicit MutexID(clang::Decl::EmptyShell e) {
+ DeclSeq.clear();
}
- /// If we encounter part of a lock expression we cannot parse
+ /// \param MutexExp The original mutex expression within an attribute
+ /// \param DeclExp An expression involving the Decl on which the attribute
+ /// occurs.
+ /// \param D The declaration to which the lock/unlock attribute is attached.
+ /// Caller must check isValid() after construction.
+ MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
+ buildMutexIDFromExp(MutexExp, DeclExp, D);
+ }
+
+ /// Return true if this is a valid decl sequence.
+ /// Caller must call this by hand after construction to handle errors.
bool isValid() const {
return !DeclSeq.empty();
}
+ /// Issue a warning about an invalid lock expression
+ static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp,
+ Expr *DeclExp, const NamedDecl* D) {
+ SourceLocation Loc;
+ if (DeclExp)
+ Loc = DeclExp->getExprLoc();
+
+ // FIXME: add a note about the attribute location in MutexExp or D
+ if (Loc.isValid())
+ Handler.handleInvalidLockExp(Loc);
+ }
+
bool operator==(const MutexID &other) const {
return DeclSeq == other.DeclSeq;
}
@@ -218,9 +277,11 @@ public:
/// The caret will point unambiguously to the lock expression, so using this
/// name in diagnostics is a way to get simple, and consistent, mutex names.
/// We do not want to output the entire expression text for security reasons.
- StringRef getName() const {
+ std::string getName() const {
assert(isValid());
- return DeclSeq.front()->getName();
+ if (!DeclSeq.front())
+ return "this"; // Use 0 to represent 'this'.
+ return DeclSeq.front()->getNameAsString();
}
void Profile(llvm::FoldingSetNodeID &ID) const {
@@ -231,6 +292,7 @@ public:
}
};
+
/// \brief This is a helper class that stores info about the most recent
/// accquire of a Lock.
///
@@ -245,9 +307,14 @@ struct LockData {
///
/// FIXME: add support for re-entrant locking and lock up/downgrading
LockKind LKind;
+ MutexID UnderlyingMutex; // for ScopedLockable objects
LockData(SourceLocation AcquireLoc, LockKind LKind)
- : AcquireLoc(AcquireLoc), LKind(LKind) {}
+ : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell())
+ {}
+
+ LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
+ : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {}
bool operator==(const LockData &other) const {
return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
@@ -258,14 +325,567 @@ struct LockData {
}
void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(AcquireLoc.getRawEncoding());
- ID.AddInteger(LKind);
- }
+ ID.AddInteger(AcquireLoc.getRawEncoding());
+ ID.AddInteger(LKind);
+ }
};
+
/// A Lockset maps each MutexID (defined above) to information about how it has
/// been locked.
typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
+typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext;
+
+class LocalVariableMap;
+
+/// A side (entry or exit) of a CFG node.
+enum CFGBlockSide { CBS_Entry, CBS_Exit };
+
+/// CFGBlockInfo is a struct which contains all the information that is
+/// maintained for each block in the CFG. See LocalVariableMap for more
+/// information about the contexts.
+struct CFGBlockInfo {
+ Lockset EntrySet; // Lockset held at entry to block
+ Lockset ExitSet; // Lockset held at exit from block
+ LocalVarContext EntryContext; // Context held at entry to block
+ LocalVarContext ExitContext; // Context held at exit from block
+ SourceLocation EntryLoc; // Location of first statement in block
+ SourceLocation ExitLoc; // Location of last statement in block.
+ unsigned EntryIndex; // Used to replay contexts later
+
+ const Lockset &getSet(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntrySet : ExitSet;
+ }
+ SourceLocation getLocation(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntryLoc : ExitLoc;
+ }
+
+private:
+ CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
+ : EntrySet(EmptySet), ExitSet(EmptySet),
+ EntryContext(EmptyCtx), ExitContext(EmptyCtx)
+ { }
+
+public:
+ static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
+ LocalVariableMap &M);
+};
+
+
+
+// A LocalVariableMap maintains a map from local variables to their currently
+// valid definitions. It provides SSA-like functionality when traversing the
+// CFG. Like SSA, each definition or assignment to a variable is assigned a
+// unique name (an integer), which acts as the SSA name for that definition.
+// The total set of names is shared among all CFG basic blocks.
+// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
+// with their SSA-names. Instead, we compute a Context for each point in the
+// code, which maps local variables to the appropriate SSA-name. This map
+// changes with each assignment.
+//
+// The map is computed in a single pass over the CFG. Subsequent analyses can
+// then query the map to find the appropriate Context for a statement, and use
+// that Context to look up the definitions of variables.
+class LocalVariableMap {
+public:
+ typedef LocalVarContext Context;
+
+ /// A VarDefinition consists of an expression, representing the value of the
+ /// variable, along with the context in which that expression should be
+ /// interpreted. A reference VarDefinition does not itself contain this
+ /// information, but instead contains a pointer to a previous VarDefinition.
+ struct VarDefinition {
+ public:
+ friend class LocalVariableMap;
+
+ NamedDecl *Dec; // The original declaration for this variable.
+ Expr *Exp; // The expression for this variable, OR
+ unsigned Ref; // Reference to another VarDefinition
+ Context Ctx; // The map with which Exp should be interpreted.
+
+ bool isReference() { return !Exp; }
+
+ private:
+ // Create ordinary variable definition
+ VarDefinition(NamedDecl *D, Expr *E, Context C)
+ : Dec(D), Exp(E), Ref(0), Ctx(C)
+ { }
+
+ // Create reference to previous definition
+ VarDefinition(NamedDecl *D, unsigned R, Context C)
+ : Dec(D), Exp(0), Ref(R), Ctx(C)
+ { }
+ };
+
+private:
+ Context::Factory ContextFactory;
+ std::vector<VarDefinition> VarDefinitions;
+ std::vector<unsigned> CtxIndices;
+ std::vector<std::pair<Stmt*, Context> > SavedContexts;
+
+public:
+ LocalVariableMap() {
+ // index 0 is a placeholder for undefined variables (aka phi-nodes).
+ VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext()));
+ }
+
+ /// Look up a definition, within the given context.
+ const VarDefinition* lookup(NamedDecl *D, Context Ctx) {
+ const unsigned *i = Ctx.lookup(D);
+ if (!i)
+ return 0;
+ assert(*i < VarDefinitions.size());
+ return &VarDefinitions[*i];
+ }
+
+ /// Look up the definition for D within the given context. Returns
+ /// NULL if the expression is not statically known. If successful, also
+ /// modifies Ctx to hold the context of the return Expr.
+ Expr* lookupExpr(NamedDecl *D, Context &Ctx) {
+ const unsigned *P = Ctx.lookup(D);
+ if (!P)
+ return 0;
+
+ unsigned i = *P;
+ while (i > 0) {
+ if (VarDefinitions[i].Exp) {
+ Ctx = VarDefinitions[i].Ctx;
+ return VarDefinitions[i].Exp;
+ }
+ i = VarDefinitions[i].Ref;
+ }
+ return 0;
+ }
+
+ Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
+
+ /// Return the next context after processing S. This function is used by
+ /// clients of the class to get the appropriate context when traversing the
+ /// CFG. It must be called for every assignment or DeclStmt.
+ Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
+ if (SavedContexts[CtxIndex+1].first == S) {
+ CtxIndex++;
+ Context Result = SavedContexts[CtxIndex].second;
+ return Result;
+ }
+ return C;
+ }
+
+ void dumpVarDefinitionName(unsigned i) {
+ if (i == 0) {
+ llvm::errs() << "Undefined";
+ return;
+ }
+ NamedDecl *Dec = VarDefinitions[i].Dec;
+ if (!Dec) {
+ llvm::errs() << "<<NULL>>";
+ return;
+ }
+ Dec->printName(llvm::errs());
+ llvm::errs() << "." << i << " " << ((void*) Dec);
+ }
+
+ /// Dumps an ASCII representation of the variable map to llvm::errs()
+ void dump() {
+ for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
+ Expr *Exp = VarDefinitions[i].Exp;
+ unsigned Ref = VarDefinitions[i].Ref;
+
+ dumpVarDefinitionName(i);
+ llvm::errs() << " = ";
+ if (Exp) Exp->dump();
+ else {
+ dumpVarDefinitionName(Ref);
+ llvm::errs() << "\n";
+ }
+ }
+ }
+
+ /// Dumps an ASCII representation of a Context to llvm::errs()
+ void dumpContext(Context C) {
+ for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ NamedDecl *D = I.getKey();
+ D->printName(llvm::errs());
+ const unsigned *i = C.lookup(D);
+ llvm::errs() << " -> ";
+ dumpVarDefinitionName(*i);
+ llvm::errs() << "\n";
+ }
+ }
+
+ /// Builds the variable map.
+ void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo);
+
+protected:
+ // Get the current context index
+ unsigned getContextIndex() { return SavedContexts.size()-1; }
+
+ // Save the current context for later replay
+ void saveContext(Stmt *S, Context C) {
+ SavedContexts.push_back(std::make_pair(S,C));
+ }
+
+ // Adds a new definition to the given context, and returns a new context.
+ // This method should be called when declaring a new variable.
+ Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ assert(!Ctx.contains(D));
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+
+ // Add a new reference to an existing definition.
+ Context addReference(NamedDecl *D, unsigned i, Context Ctx) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, i, Ctx));
+ return NewCtx;
+ }
+
+ // Updates a definition only if that definition is already in the map.
+ // This method should be called when assigning to an existing variable.
+ Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ if (Ctx.contains(D)) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.remove(Ctx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+ return Ctx;
+ }
+
+ // Removes a definition from the context, but keeps the variable name
+ // as a valid variable. The index 0 is a placeholder for cleared definitions.
+ Context clearDefinition(NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, 0);
+ }
+ return NewCtx;
+ }
+
+ // Remove a definition entirely frmo the context.
+ Context removeDefinition(NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ }
+ return NewCtx;
+ }
+
+ Context intersectContexts(Context C1, Context C2);
+ Context createReferenceContext(Context C);
+ void intersectBackEdge(Context C1, Context C2);
+
+ friend class VarMapBuilder;
+};
+
+
+// This has to be defined after LocalVariableMap.
+CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
+ LocalVariableMap &M) {
+ return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
+}
+
+
+/// Visitor which builds a LocalVariableMap
+class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
+public:
+ LocalVariableMap* VMap;
+ LocalVariableMap::Context Ctx;
+
+ VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
+ : VMap(VM), Ctx(C) {}
+
+ void VisitDeclStmt(DeclStmt *S);
+ void VisitBinaryOperator(BinaryOperator *BO);
+};
+
+
+// Add new local variables to the variable map
+void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
+ bool modifiedCtx = false;
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
+ Expr *E = VD->getInit();
+
+ // Add local variables with trivial type to the variable map
+ QualType T = VD->getType();
+ if (T.isTrivialType(VD->getASTContext())) {
+ Ctx = VMap->addDefinition(VD, E, Ctx);
+ modifiedCtx = true;
+ }
+ }
+ }
+ if (modifiedCtx)
+ VMap->saveContext(S, Ctx);
+}
+
+// Update local variable definitions in variable map
+void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
+
+ // Update the variable map and current context.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
+ ValueDecl *VDec = DRE->getDecl();
+ if (Ctx.lookup(VDec)) {
+ if (BO->getOpcode() == BO_Assign)
+ Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
+ else
+ // FIXME -- handle compound assignment operators
+ Ctx = VMap->clearDefinition(VDec, Ctx);
+ VMap->saveContext(BO, Ctx);
+ }
+ }
+}
+
+
+// Computes the intersection of two contexts. The intersection is the
+// set of variables which have the same definition in both contexts;
+// variables with different definitions are discarded.
+LocalVariableMap::Context
+LocalVariableMap::intersectContexts(Context C1, Context C2) {
+ Context Result = C1;
+ for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i1 = I.getData();
+ const unsigned *i2 = C2.lookup(Dec);
+ if (!i2) // variable doesn't exist on second path
+ Result = removeDefinition(Dec, Result);
+ else if (*i2 != i1) // variable exists, but has different definition
+ Result = clearDefinition(Dec, Result);
+ }
+ return Result;
+}
+
+// For every variable in C, create a new variable that refers to the
+// definition in C. Return a new context that contains these new variables.
+// (We use this for a naive implementation of SSA on loop back-edges.)
+LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
+ Context Result = getEmptyContext();
+ for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i = I.getData();
+ Result = addReference(Dec, i, Result);
+ }
+ return Result;
+}
+
+// This routine also takes the intersection of C1 and C2, but it does so by
+// altering the VarDefinitions. C1 must be the result of an earlier call to
+// createReferenceContext.
+void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
+ for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i1 = I.getData();
+ VarDefinition *VDef = &VarDefinitions[i1];
+ assert(VDef->isReference());
+
+ const unsigned *i2 = C2.lookup(Dec);
+ if (!i2 || (*i2 != i1))
+ VDef->Ref = 0; // Mark this variable as undefined
+ }
+}
+
+
+// Traverse the CFG in topological order, so all predecessors of a block
+// (excluding back-edges) are visited before the block itself. At
+// each point in the code, we calculate a Context, which holds the set of
+// variable definitions which are visible at that point in execution.
+// Visible variables are mapped to their definitions using an array that
+// contains all definitions.
+//
+// At join points in the CFG, the set is computed as the intersection of
+// the incoming sets along each edge, E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0; { x -> x1 | x1 = 0 }
+// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
+// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
+// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
+//
+// This is essentially a simpler and more naive version of the standard SSA
+// algorithm. Those definitions that remain in the intersection are from blocks
+// that strictly dominate the current block. We do not bother to insert proper
+// phi nodes, because they are not used in our analysis; instead, wherever
+// a phi node would be required, we simply remove that definition from the
+// context (E.g. x above).
+//
+// The initial traversal does not capture back-edges, so those need to be
+// handled on a separate pass. Whenever the first pass encounters an
+// incoming back edge, it duplicates the context, creating new definitions
+// that refer back to the originals. (These correspond to places where SSA
+// might have to insert a phi node.) On the second pass, these definitions are
+// set to NULL if the the variable has changed on the back-edge (i.e. a phi
+// node was actually required.) E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
+// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
+// ... { y -> y1 | x3 = 2, x2 = 1, ... }
+//
+void LocalVariableMap::traverseCFG(CFG *CFGraph,
+ PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ CtxIndices.resize(CFGraph->getNumBlockIDs());
+
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
+ const CFGBlock *CurrBlock = *I;
+ int CurrBlockID = CurrBlock->getBlockID();
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
+
+ VisitedBlocks.insert(CurrBlock);
+
+ // Calculate the entry context for the current block
+ bool HasBackEdges = false;
+ bool CtxInit = true;
+ for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end(); PI != PE; ++PI) {
+ // if *PI -> CurrBlock is a back edge, so skip it
+ if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) {
+ HasBackEdges = true;
+ continue;
+ }
+
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (CtxInit) {
+ CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
+ CtxInit = false;
+ }
+ else {
+ CurrBlockInfo->EntryContext =
+ intersectContexts(CurrBlockInfo->EntryContext,
+ PrevBlockInfo->ExitContext);
+ }
+ }
+
+ // Duplicate the context if we have back-edges, so we can call
+ // intersectBackEdges later.
+ if (HasBackEdges)
+ CurrBlockInfo->EntryContext =
+ createReferenceContext(CurrBlockInfo->EntryContext);
+
+ // Create a starting context index for the current block
+ saveContext(0, CurrBlockInfo->EntryContext);
+ CurrBlockInfo->EntryIndex = getContextIndex();
+
+ // Visit all the statements in the basic block.
+ VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ const CFGStmt *CS = cast<CFGStmt>(&*BI);
+ VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
+
+ // Mark variables on back edges as "unknown" if they've been changed.
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+ // if CurrBlock -> *SI is *not* a back edge
+ if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
+ continue;
+
+ CFGBlock *FirstLoopBlock = *SI;
+ Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
+ Context LoopEnd = CurrBlockInfo->ExitContext;
+ intersectBackEdge(LoopBegin, LoopEnd);
+ }
+ }
+
+ // Put an extra entry at the end of the indexed context array
+ unsigned exitID = CFGraph->getExit().getBlockID();
+ saveContext(0, BlockInfo[exitID].ExitContext);
+}
+
+/// Find the appropriate source locations to use when producing diagnostics for
+/// each block in the CFG.
+static void findBlockLocations(CFG *CFGraph,
+ PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
+ const CFGBlock *CurrBlock = *I;
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
+
+ // Find the source location of the last statement in the block, if the
+ // block is not empty.
+ if (const Stmt *S = CurrBlock->getTerminator()) {
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
+ } else {
+ for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
+ BE = CurrBlock->rend(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
+ CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ }
+
+ if (!CurrBlockInfo->ExitLoc.isInvalid()) {
+ // This block contains at least one statement. Find the source location
+ // of the first statement in the block.
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
+ CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
+ CurrBlock != &CFGraph->getExit()) {
+ // The block is empty, and has a single predecessor. Use its exit
+ // location.
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
+ BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
+ }
+ }
+}
+
+/// \brief Class which implements the core thread safety analysis routines.
+class ThreadSafetyAnalyzer {
+ friend class BuildLockset;
+
+ ThreadSafetyHandler &Handler;
+ Lockset::Factory LocksetFactory;
+ LocalVariableMap LocalVarMap;
+
+public:
+ ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
+
+ Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1,
+ const CFGBlockInfo &Block2, CFGBlockSide Side2,
+ LockErrorKind LEK);
+
+ Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
+ LockKind LK, SourceLocation Loc);
+
+ void runAnalysis(AnalysisDeclContext &AC);
+};
+
/// \brief We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
@@ -273,32 +893,51 @@ typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
/// output error messages related to missing locks.
/// FIXME: In future, we may be able to not inherit from a visitor.
class BuildLockset : public StmtVisitor<BuildLockset> {
+ friend class ThreadSafetyAnalyzer;
+
ThreadSafetyHandler &Handler;
- Lockset LSet;
Lockset::Factory &LocksetFactory;
+ LocalVariableMap &LocalVarMap;
+
+ Lockset LSet;
+ LocalVariableMap::Context LVarCtx;
+ unsigned CtxIndex;
// Helper functions
- void removeLock(SourceLocation UnlockLoc, Expr *LockExp, Expr *Parent);
- void addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
- LockKind LK);
+ void addLock(const MutexID &Mutex, const LockData &LDat);
+ void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc);
+
+ template <class AttrType>
+ void addLocksToSet(LockKind LK, AttrType *Attr,
+ Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
+ void removeLocksFromSet(UnlockFunctionAttr *Attr,
+ Expr *Exp, NamedDecl* FunDecl);
+
const ValueDecl *getValueDecl(Expr *Exp);
void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
Expr *MutexExp, ProtectedOperationKind POK);
void checkAccess(Expr *Exp, AccessKind AK);
void checkDereference(Expr *Exp, AccessKind AK);
+ void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
template <class AttrType>
- void addLocksToSet(LockKind LK, Attr *Attr, CXXMemberCallExpr *Exp);
+ void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
+ const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg);
+ CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C,
+ bool &Negate);
+ void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock,
+ const CFGBlock *CurrBlock);
/// \brief Returns true if the lockset contains a lock, regardless of whether
/// the lock is held exclusively or shared.
- bool locksetContains(MutexID Lock) const {
+ bool locksetContains(const MutexID &Lock) const {
return LSet.lookup(Lock);
}
/// \brief Returns true if the lockset contains a lock with the passed in
/// locktype.
- bool locksetContains(MutexID Lock, LockKind KindRequested) const {
+ bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
const LockData *LockHeld = LSet.lookup(Lock);
return (LockHeld && KindRequested == LockHeld->LKind);
}
@@ -307,7 +946,8 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
/// passed in locktype. So for example, if we pass in LK_Shared, this function
/// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
/// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
- bool locksetContainsAtLeast(MutexID Lock, LockKind KindRequested) const {
+ bool locksetContainsAtLeast(const MutexID &Lock,
+ LockKind KindRequested) const {
switch (KindRequested) {
case LK_Shared:
return locksetContains(Lock);
@@ -318,57 +958,121 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
}
public:
- BuildLockset(ThreadSafetyHandler &Handler, Lockset LS, Lockset::Factory &F)
- : StmtVisitor<BuildLockset>(), Handler(Handler), LSet(LS),
- LocksetFactory(F) {}
-
- Lockset getLockset() {
- return LSet;
- }
+ BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info)
+ : StmtVisitor<BuildLockset>(),
+ Handler(analyzer->Handler),
+ LocksetFactory(analyzer->LocksetFactory),
+ LocalVarMap(analyzer->LocalVarMap),
+ LSet(Info.EntrySet),
+ LVarCtx(Info.EntryContext),
+ CtxIndex(Info.EntryIndex)
+ {}
void VisitUnaryOperator(UnaryOperator *UO);
void VisitBinaryOperator(BinaryOperator *BO);
void VisitCastExpr(CastExpr *CE);
- void VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp);
+ void VisitCallExpr(CallExpr *Exp);
+ void VisitCXXConstructExpr(CXXConstructExpr *Exp);
+ void VisitDeclStmt(DeclStmt *S);
};
/// \brief Add a new lock to the lockset, warning if the lock is already there.
-/// \param LockLoc The source location of the acquire
-/// \param LockExp The lock expression corresponding to the lock to be added
-void BuildLockset::addLock(SourceLocation LockLoc, Expr *LockExp, Expr *Parent,
- LockKind LK) {
- // FIXME: deal with acquired before/after annotations. We can write a first
- // pass that does the transitive lookup lazily, and refine afterwards.
- MutexID Mutex(LockExp, Parent);
- if (!Mutex.isValid()) {
- Handler.handleInvalidLockExp(LockExp->getExprLoc());
- return;
- }
-
- LockData NewLock(LockLoc, LK);
-
+/// \param Mutex -- the Mutex expression for the lock
+/// \param LDat -- the LockData for the lock
+void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) {
+ // FIXME: deal with acquired before/after annotations.
// FIXME: Don't always warn when we have support for reentrant locks.
if (locksetContains(Mutex))
- Handler.handleDoubleLock(Mutex.getName(), LockLoc);
- LSet = LocksetFactory.add(LSet, Mutex, NewLock);
+ Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
+ else
+ LSet = LocksetFactory.add(LSet, Mutex, LDat);
}
/// \brief Remove a lock from the lockset, warning if the lock is not there.
/// \param LockExp The lock expression corresponding to the lock to be removed
/// \param UnlockLoc The source location of the unlock (only used in error msg)
-void BuildLockset::removeLock(SourceLocation UnlockLoc, Expr *LockExp,
- Expr *Parent) {
- MutexID Mutex(LockExp, Parent);
- if (!Mutex.isValid()) {
- Handler.handleInvalidLockExp(LockExp->getExprLoc());
+void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) {
+ const LockData *LDat = LSet.lookup(Mutex);
+ if (!LDat)
+ Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
+ else {
+ // For scoped-lockable vars, remove the mutex associated with this var.
+ if (LDat->UnderlyingMutex.isValid())
+ removeLock(LDat->UnderlyingMutex, UnlockLoc);
+ LSet = LocksetFactory.remove(LSet, Mutex);
+ }
+}
+
+/// \brief This function, parameterized by an attribute type, is used to add a
+/// set of locks specified as attribute arguments to the lockset.
+template <typename AttrType>
+void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr,
+ Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) {
+ typedef typename AttrType::args_iterator iterator_type;
+
+ SourceLocation ExpLocation = Exp->getExprLoc();
+
+ // Figure out if we're calling the constructor of scoped lockable class
+ bool isScopedVar = false;
+ if (VD) {
+ if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
+ CXXRecordDecl* PD = CD->getParent();
+ if (PD && PD->getAttr<ScopedLockableAttr>())
+ isScopedVar = true;
+ }
+ }
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ MutexID Mutex(0, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
+ else
+ addLock(Mutex, LockData(ExpLocation, LK));
return;
}
- Lockset NewLSet = LocksetFactory.remove(LSet, Mutex);
- if(NewLSet == LSet)
- Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
+ for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
+ MutexID Mutex(*I, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
+ else {
+ addLock(Mutex, LockData(ExpLocation, LK));
+ if (isScopedVar) {
+ // For scoped lockable vars, map this var to its underlying mutex.
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ MutexID SMutex(&DRE, 0, 0);
+ addLock(SMutex, LockData(VD->getLocation(), LK, Mutex));
+ }
+ }
+ }
+}
- LSet = NewLSet;
+/// \brief This function removes a set of locks specified as attribute
+/// arguments from the lockset.
+void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr,
+ Expr *Exp, NamedDecl* FunDecl) {
+ SourceLocation ExpLocation;
+ if (Exp) ExpLocation = Exp->getExprLoc();
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ MutexID Mu(0, Exp, FunDecl);
+ if (!Mu.isValid())
+ MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
+ else
+ removeLock(Mu, ExpLocation);
+ return;
+ }
+
+ for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
+ E = Attr->args_end(); I != E; ++I) {
+ MutexID Mutex(*I, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
+ else
+ removeLock(Mutex, ExpLocation);
+ }
}
/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
@@ -383,20 +1087,19 @@ const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
}
/// \brief Warn if the LSet does not contain a lock sufficient to protect access
-/// of at least the passed in AccessType.
+/// of at least the passed in AccessKind.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
AccessKind AK, Expr *MutexExp,
ProtectedOperationKind POK) {
LockKind LK = getLockKindFromAccessKind(AK);
- Expr *Parent = getParent(Exp);
- MutexID Mutex(MutexExp, Parent);
+
+ MutexID Mutex(MutexExp, Exp, D);
if (!Mutex.isValid())
- Handler.handleInvalidLockExp(MutexExp->getExprLoc());
+ MutexID::warnInvalidLock(Handler, MutexExp, Exp, D);
else if (!locksetContainsAtLeast(Mutex, LK))
Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
}
-
/// \brief This method identifies variable dereferences and checks pt_guarded_by
/// and pt_guarded_var annotations. Note that we only check these annotations
/// at the time a pointer is dereferenced.
@@ -440,71 +1143,10 @@ void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
}
-/// \brief For unary operations which read and write a variable, we need to
-/// check whether we hold any required mutexes. Reads are checked in
-/// VisitCastExpr.
-void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
- switch (UO->getOpcode()) {
- case clang::UO_PostDec:
- case clang::UO_PostInc:
- case clang::UO_PreDec:
- case clang::UO_PreInc: {
- Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
- checkAccess(SubExp, AK_Written);
- checkDereference(SubExp, AK_Written);
- break;
- }
- default:
- break;
- }
-}
-
-/// For binary operations which assign to a variable (writes), we need to check
-/// whether we hold any required mutexes.
-/// FIXME: Deal with non-primitive types.
-void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
- if (!BO->isAssignmentOp())
- return;
- Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
- checkAccess(LHSExp, AK_Written);
- checkDereference(LHSExp, AK_Written);
-}
-
-/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
-/// need to ensure we hold any required mutexes.
-/// FIXME: Deal with non-primitive types.
-void BuildLockset::VisitCastExpr(CastExpr *CE) {
- if (CE->getCastKind() != CK_LValueToRValue)
- return;
- Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
- checkAccess(SubExp, AK_Read);
- checkDereference(SubExp, AK_Read);
-}
-
-/// \brief This function, parameterized by an attribute type, is used to add a
-/// set of locks specified as attribute arguments to the lockset.
-template <typename AttrType>
-void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
- CXXMemberCallExpr *Exp) {
- typedef typename AttrType::args_iterator iterator_type;
- SourceLocation ExpLocation = Exp->getExprLoc();
- Expr *Parent = Exp->getImplicitObjectArgument();
- AttrType *SpecificAttr = cast<AttrType>(Attr);
-
- if (SpecificAttr->args_size() == 0) {
- // The mutex held is the "this" object.
- addLock(ExpLocation, Parent, 0, LK);
- return;
- }
-
- for (iterator_type I = SpecificAttr->args_begin(),
- E = SpecificAttr->args_end(); I != E; ++I)
- addLock(ExpLocation, *I, Parent, LK);
-}
-
-/// \brief When visiting CXXMemberCallExprs we need to examine the attributes on
-/// the method that is being called and add, remove or check locks in the
-/// lockset accordingly.
+/// \brief Process a function call, method call, constructor call,
+/// or destructor call. This involves looking at the attributes on the
+/// corresponding function/method/constructor/destructor, issuing warnings,
+/// and updating the locksets accordingly.
///
/// FIXME: For classes annotated with one of the guarded annotations, we need
/// to treat const method calls as reads and non-const method calls as writes,
@@ -515,44 +1157,32 @@ void BuildLockset::addLocksToSet(LockKind LK, Attr *Attr,
///
/// FIXME: Do not flag an error for member variables accessed in constructors/
/// destructors
-void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
- NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
-
- SourceLocation ExpLocation = Exp->getExprLoc();
- Expr *Parent = Exp->getImplicitObjectArgument();
-
- if(!D || !D->hasAttrs())
- return;
-
+void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
AttrVec &ArgAttrs = D->getAttrs();
for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
Attr *Attr = ArgAttrs[i];
switch (Attr->getKind()) {
// When we encounter an exclusive lock function, we need to add the lock
// to our lockset with kind exclusive.
- case attr::ExclusiveLockFunction:
- addLocksToSet<ExclusiveLockFunctionAttr>(LK_Exclusive, Attr, Exp);
+ case attr::ExclusiveLockFunction: {
+ ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
+ addLocksToSet(LK_Exclusive, A, Exp, D, VD);
break;
+ }
// When we encounter a shared lock function, we need to add the lock
// to our lockset with kind shared.
- case attr::SharedLockFunction:
- addLocksToSet<SharedLockFunctionAttr>(LK_Shared, Attr, Exp);
+ case attr::SharedLockFunction: {
+ SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
+ addLocksToSet(LK_Shared, A, Exp, D, VD);
break;
+ }
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::UnlockFunction: {
UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
-
- if (UFAttr->args_size() == 0) { // The lock held is the "this" object.
- removeLock(ExpLocation, Parent, 0);
- break;
- }
-
- for (UnlockFunctionAttr::args_iterator I = UFAttr->args_begin(),
- E = UFAttr->args_end(); I != E; ++I)
- removeLock(ExpLocation, *I, Parent);
+ removeLocksFromSet(UFAttr, Exp, D);
break;
}
@@ -579,12 +1209,12 @@ void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
E = LEAttr->args_end(); I != E; ++I) {
- MutexID Mutex(*I, Parent);
+ MutexID Mutex(*I, Exp, D);
if (!Mutex.isValid())
- Handler.handleInvalidLockExp((*I)->getExprLoc());
+ MutexID::warnInvalidLock(Handler, *I, Exp, D);
else if (locksetContains(Mutex))
Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
- ExpLocation);
+ Exp->getExprLoc());
}
break;
}
@@ -596,7 +1226,180 @@ void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
}
}
-} // end anonymous namespace
+
+/// \brief Add lock to set, if the current block is in the taken branch of a
+/// trylock.
+template <class AttrType>
+void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp,
+ NamedDecl *FunDecl, const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock, Expr *BrE, bool Neg) {
+ // Find out which branch has the lock
+ bool branch = 0;
+ if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
+ branch = BLE->getValue();
+ }
+ else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
+ branch = ILE->getValue().getBoolValue();
+ }
+ int branchnum = branch ? 0 : 1;
+ if (Neg) branchnum = !branchnum;
+
+ // If we've taken the trylock branch, then add the lock
+ int i = 0;
+ for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
+ SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
+ if (*SI == CurrBlock && i == branchnum) {
+ addLocksToSet(LK, Attr, Exp, FunDecl, 0);
+ }
+ }
+}
+
+
+// If Cond can be traced back to a function call, return the call expression.
+// The negate variable should be called with false, and will be set to true
+// if the function call is negated, e.g. if (!mu.tryLock(...))
+CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond,
+ LocalVariableMap::Context C,
+ bool &Negate) {
+ if (!Cond)
+ return 0;
+
+ if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ return CallExp;
+ }
+ else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ }
+ else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
+ return getTrylockCallExpr(E, C, Negate);
+ }
+ else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ if (UOP->getOpcode() == UO_LNot) {
+ Negate = !Negate;
+ return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
+ }
+ }
+ // FIXME -- handle && and || as well.
+ return NULL;
+}
+
+
+/// \brief Process a conditional branch from a previous block to the current
+/// block, looking for trylock calls.
+void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock) {
+ bool Negate = false;
+ CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
+ if (!Exp)
+ return;
+
+ NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!FunDecl || !FunDecl->hasAttrs())
+ return;
+
+ // If the condition is a call to a Trylock function, then grab the attributes
+ AttrVec &ArgAttrs = FunDecl->getAttrs();
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ switch (Attr->getKind()) {
+ case attr::ExclusiveTrylockFunction: {
+ ExclusiveTrylockFunctionAttr *A =
+ cast<ExclusiveTrylockFunctionAttr>(Attr);
+ addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ SharedTrylockFunctionAttr *A =
+ cast<SharedTrylockFunctionAttr>(Attr);
+ addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+
+/// \brief For unary operations which read and write a variable, we need to
+/// check whether we hold any required mutexes. Reads are checked in
+/// VisitCastExpr.
+void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
+ switch (UO->getOpcode()) {
+ case clang::UO_PostDec:
+ case clang::UO_PostInc:
+ case clang::UO_PreDec:
+ case clang::UO_PreInc: {
+ Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
+ checkAccess(SubExp, AK_Written);
+ checkDereference(SubExp, AK_Written);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/// For binary operations which assign to a variable (writes), we need to check
+/// whether we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ // adjust the context
+ LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
+
+ Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
+ checkAccess(LHSExp, AK_Written);
+ checkDereference(LHSExp, AK_Written);
+}
+
+/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
+/// need to ensure we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() != CK_LValueToRValue)
+ return;
+ Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
+ checkAccess(SubExp, AK_Read);
+ checkDereference(SubExp, AK_Read);
+}
+
+
+void BuildLockset::VisitCallExpr(CallExpr *Exp) {
+ NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!D || !D->hasAttrs())
+ return;
+ handleCall(Exp, D);
+}
+
+void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
+ // FIXME -- only handles constructors in DeclStmt below.
+}
+
+void BuildLockset::VisitDeclStmt(DeclStmt *S) {
+ // adjust the context
+ LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
+ Decl *D = *I;
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
+ Expr *E = VD->getInit();
+ if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
+ NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
+ if (!CtorD || !CtorD->hasAttrs())
+ return;
+ handleCall(CE, CtorD, VD);
+ }
+ }
+ }
+}
+
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
@@ -606,9 +1409,14 @@ void BuildLockset::VisitCXXMemberCallExpr(CXXMemberCallExpr *Exp) {
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
-static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
- const Lockset LSet1, const Lockset LSet2,
- Lockset::Factory &Fact, LockErrorKind LEK) {
+Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1,
+ CFGBlockSide Side1,
+ const CFGBlockInfo &Block2,
+ CFGBlockSide Side2,
+ LockErrorKind LEK) {
+ Lockset LSet1 = Block1.getSet(Side1);
+ Lockset LSet2 = Block2.getSet(Side2);
+
Lockset Intersection = LSet1;
for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
const MutexID &LSet2Mutex = I.getKey();
@@ -619,11 +1427,13 @@ static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
LSet2LockData.AcquireLoc,
LD->AcquireLoc);
if (LD->LKind != LK_Exclusive)
- Intersection = Fact.add(Intersection, LSet2Mutex, LSet2LockData);
+ Intersection = LocksetFactory.add(Intersection, LSet2Mutex,
+ LSet2LockData);
}
} else {
Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
- LSet2LockData.AcquireLoc, LEK);
+ LSet2LockData.AcquireLoc,
+ Block1.getLocation(Side1), LEK);
}
}
@@ -632,91 +1442,111 @@ static Lockset intersectAndWarn(ThreadSafetyHandler &Handler,
const MutexID &Mutex = I.getKey();
const LockData &MissingLock = I.getData();
Handler.handleMutexHeldEndOfScope(Mutex.getName(),
- MissingLock.AcquireLoc, LEK);
- Intersection = Fact.remove(Intersection, Mutex);
+ MissingLock.AcquireLoc,
+ Block2.getLocation(Side2), LEK);
+ Intersection = LocksetFactory.remove(Intersection, Mutex);
}
}
return Intersection;
}
-static Lockset addLock(ThreadSafetyHandler &Handler,
- Lockset::Factory &LocksetFactory,
- Lockset &LSet, Expr *LockExp, LockKind LK,
- SourceLocation Loc) {
- MutexID Mutex(LockExp, 0);
+Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp,
+ const NamedDecl *D,
+ LockKind LK, SourceLocation Loc) {
+ MutexID Mutex(MutexExp, 0, D);
if (!Mutex.isValid()) {
- Handler.handleInvalidLockExp(LockExp->getExprLoc());
+ MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
return LSet;
}
LockData NewLock(Loc, LK);
return LocksetFactory.add(LSet, Mutex, NewLock);
}
-namespace clang {
-namespace thread_safety {
/// \brief Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
-void runThreadSafetyAnalysis(AnalysisContext &AC,
- ThreadSafetyHandler &Handler) {
+void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFG *CFGraph = AC.getCFG();
if (!CFGraph) return;
- const Decl *D = AC.getDecl();
- if (D && D->getAttr<NoThreadSafetyAnalysisAttr>()) return;
+ const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
- Lockset::Factory LocksetFactory;
-
- // FIXME: Swith to SmallVector? Otherwise improve performance impact?
- std::vector<Lockset> EntryLocksets(CFGraph->getNumBlockIDs(),
- LocksetFactory.getEmptyMap());
- std::vector<Lockset> ExitLocksets(CFGraph->getNumBlockIDs(),
- LocksetFactory.getEmptyMap());
+ if (!D)
+ return; // Ignore anonymous functions for now.
+ if (D->getAttr<NoThreadSafetyAnalysisAttr>())
+ return;
+ // FIXME: Do something a bit more intelligent inside constructor and
+ // destructor code. Constructors and destructors must assume unique access
+ // to 'this', so checks on member variable access is disabled, but we should
+ // still enable checks on other objects.
+ if (isa<CXXConstructorDecl>(D))
+ return; // Don't check inside constructors.
+ if (isa<CXXDestructorDecl>(D))
+ return; // Don't check inside destructors.
+
+ std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(),
+ CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
// We need to explore the CFG via a "topological" ordering.
// That way, we will be guaranteed to have information about required
// predecessor locksets when exploring a new block.
- TopologicallySortedCFG SortedGraph(CFGraph);
- CFGBlockSet VisitedBlocks(CFGraph);
+ PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ // Compute SSA names for local variables
+ LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
+
+ // Fill in source locations for all CFGBlocks.
+ findBlockLocations(CFGraph, SortedGraph, BlockInfo);
- if (!SortedGraph.empty() && D->hasAttrs()) {
- const CFGBlock *FirstBlock = *SortedGraph.begin();
- Lockset &InitialLockset = EntryLocksets[FirstBlock->getBlockID()];
+ // Add locks from exclusive_locks_required and shared_locks_required
+ // to initial lockset. Also turn off checking for lock and unlock functions.
+ // FIXME: is there a more intelligent way to check lock/unlock functions?
+ if (!SortedGraph->empty() && D->hasAttrs()) {
+ const CFGBlock *FirstBlock = *SortedGraph->begin();
+ Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
const AttrVec &ArgAttrs = D->getAttrs();
- for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
Attr *Attr = ArgAttrs[i];
SourceLocation AttrLoc = Attr->getLocation();
if (SharedLocksRequiredAttr *SLRAttr
= dyn_cast<SharedLocksRequiredAttr>(Attr)) {
for (SharedLocksRequiredAttr::args_iterator
- SLRIter = SLRAttr->args_begin(),
- SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
- InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
- *SLRIter, LK_Shared,
+ SLRIter = SLRAttr->args_begin(),
+ SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
+ InitialLockset = addLock(InitialLockset,
+ *SLRIter, D, LK_Shared,
AttrLoc);
} else if (ExclusiveLocksRequiredAttr *ELRAttr
= dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
for (ExclusiveLocksRequiredAttr::args_iterator
- ELRIter = ELRAttr->args_begin(),
- ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
- InitialLockset = addLock(Handler, LocksetFactory, InitialLockset,
- *ELRIter, LK_Exclusive,
+ ELRIter = ELRAttr->args_begin(),
+ ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
+ InitialLockset = addLock(InitialLockset,
+ *ELRIter, D, LK_Exclusive,
AttrLoc);
+ } else if (isa<UnlockFunctionAttr>(Attr)) {
+ // Don't try to check unlock functions for now
+ return;
+ } else if (isa<ExclusiveLockFunctionAttr>(Attr)) {
+ // Don't try to check lock functions for now
+ return;
+ } else if (isa<SharedLockFunctionAttr>(Attr)) {
+ // Don't try to check lock functions for now
+ return;
}
}
}
- for (TopologicallySortedCFG::iterator I = SortedGraph.begin(),
- E = SortedGraph.end(); I!= E; ++I) {
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
const CFGBlock *CurrBlock = *I;
int CurrBlockID = CurrBlock->getBlockID();
-
- VisitedBlocks.insert(CurrBlock);
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
// Use the default initial lockset in case there are no predecessors.
- Lockset &Entryset = EntryLocksets[CurrBlockID];
- Lockset &Exitset = ExitLocksets[CurrBlockID];
+ VisitedBlocks.insert(CurrBlock);
// Iterate through the predecessor blocks and warn if the lockset for all
// predecessors is not the same. We take the entry lockset of the current
@@ -732,6 +1562,7 @@ void runThreadSafetyAnalysis(AnalysisContext &AC,
// union because the real error is probably that we forgot to unlock M on
// all code paths.
bool LocksetInitialized = false;
+ llvm::SmallVector<CFGBlock*, 8> SpecialBlocks;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
@@ -739,24 +1570,102 @@ void runThreadSafetyAnalysis(AnalysisContext &AC,
if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
continue;
+ // Ignore edges from blocks that can't return.
+ if ((*PI)->hasNoReturnElement())
+ continue;
+
+ // If the previous block ended in a 'continue' or 'break' statement, then
+ // a difference in locksets is probably due to a bug in that block, rather
+ // than in some other predecessor. In that case, keep the other
+ // predecessor's lockset.
+ if (const Stmt *Terminator = (*PI)->getTerminator()) {
+ if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
+ SpecialBlocks.push_back(*PI);
+ continue;
+ }
+ }
+
int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
if (!LocksetInitialized) {
- Entryset = ExitLocksets[PrevBlockID];
+ CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
LocksetInitialized = true;
} else {
- Entryset = intersectAndWarn(Handler, Entryset,
- ExitLocksets[PrevBlockID], LocksetFactory,
- LEK_LockedSomePredecessors);
+ CurrBlockInfo->EntrySet =
+ intersectAndWarn(*CurrBlockInfo, CBS_Entry,
+ *PrevBlockInfo, CBS_Exit,
+ LEK_LockedSomePredecessors);
}
}
- BuildLockset LocksetBuilder(Handler, Entryset, LocksetFactory);
+ // Process continue and break blocks. Assume that the lockset for the
+ // resulting block is unaffected by any discrepancies in them.
+ for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size();
+ SpecialI < SpecialN; ++SpecialI) {
+ CFGBlock *PrevBlock = SpecialBlocks[SpecialI];
+ int PrevBlockID = PrevBlock->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (!LocksetInitialized) {
+ CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ LocksetInitialized = true;
+ } else {
+ // Determine whether this edge is a loop terminator for diagnostic
+ // purposes. FIXME: A 'break' statement might be a loop terminator, but
+ // it might also be part of a switch. Also, a subsequent destructor
+ // might add to the lockset, in which case the real issue might be a
+ // double lock on the other path.
+ const Stmt *Terminator = PrevBlock->getTerminator();
+ bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
+
+ // Do not update EntrySet.
+ intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit,
+ IsLoop ? LEK_LockedSomeLoopIterations
+ : LEK_LockedSomePredecessors);
+ }
+ }
+
+ BuildLockset LocksetBuilder(this, *CurrBlockInfo);
+ CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end();
+ if (PI != PE) {
+ // If the predecessor ended in a branch, then process any trylocks.
+ // FIXME -- check to make sure there's only one predecessor.
+ if (Stmt *TCE = (*PI)->getTerminatorCondition()) {
+ LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock);
+ }
+ }
+
+ // Visit all the statements in the basic block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
BE = CurrBlock->end(); BI != BE; ++BI) {
- if (const CFGStmt *CfgStmt = dyn_cast<CFGStmt>(&*BI))
- LocksetBuilder.Visit(const_cast<Stmt*>(CfgStmt->getStmt()));
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ const CFGStmt *CS = cast<CFGStmt>(&*BI);
+ LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
+ break;
+ }
+ // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
+ case CFGElement::AutomaticObjectDtor: {
+ const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI);
+ CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>(
+ AD->getDestructorDecl(AC.getASTContext()));
+ if (!DD->hasAttrs())
+ break;
+
+ // Create a dummy expression,
+ VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl());
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue,
+ AD->getTriggerStmt()->getLocEnd());
+ LocksetBuilder.handleCall(&DRE, DD);
+ break;
+ }
+ default:
+ break;
+ }
}
- Exitset = LocksetBuilder.getLockset();
+ CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
// For every back edge from CurrBlock (the end of the loop) to another block
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
@@ -770,21 +1679,38 @@ void runThreadSafetyAnalysis(AnalysisContext &AC,
continue;
CFGBlock *FirstLoopBlock = *SI;
- Lockset PreLoop = EntryLocksets[FirstLoopBlock->getBlockID()];
- Lockset LoopEnd = ExitLocksets[CurrBlockID];
- intersectAndWarn(Handler, LoopEnd, PreLoop, LocksetFactory,
+ CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()];
+ CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID];
+ intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry,
LEK_LockedSomeLoopIterations);
}
}
- Lockset InitialLockset = EntryLocksets[CFGraph->getEntry().getBlockID()];
- Lockset FinalLockset = ExitLocksets[CFGraph->getExit().getBlockID()];
+ CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
// FIXME: Should we call this function for all blocks which exit the function?
- intersectAndWarn(Handler, InitialLockset, FinalLockset, LocksetFactory,
+ intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit,
LEK_LockedAtEndOfFunction);
}
+} // end anonymous namespace
+
+
+namespace clang {
+namespace thread_safety {
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
+ ThreadSafetyHandler &Handler) {
+ ThreadSafetyAnalyzer Analyzer(Handler);
+ Analyzer.runAnalysis(AC);
+}
+
/// \brief Helper function that returns a LockKind required for the given level
/// of access.
LockKind getLockKindFromAccessKind(AccessKind AK) {
@@ -796,4 +1722,5 @@ LockKind getLockKindFromAccessKind(AccessKind AK) {
}
llvm_unreachable("Unknown AccessKind");
}
+
}} // end namespace clang::thread_safety
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
index 9e98560..6e5da25 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -21,7 +21,7 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -337,7 +337,7 @@ public:
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
- AnalysisContext &ac;
+ AnalysisDeclContext &ac;
UninitVariablesHandler *handler;
/// The last DeclRefExpr seen when analyzing a block. Used to
@@ -356,7 +356,7 @@ class TransferFunctions : public StmtVisitor<TransferFunctions> {
public:
TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
- AnalysisContext &ac,
+ AnalysisDeclContext &ac,
UninitVariablesHandler *handler)
: vals(vals), cfg(cfg), ac(ac), handler(handler),
lastDR(0), lastLoad(0),
@@ -615,7 +615,7 @@ void TransferFunctions::ProcessUses(Stmt *s) {
//====------------------------------------------------------------------------//
static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
- AnalysisContext &ac, CFGBlockValues &vals,
+ AnalysisDeclContext &ac, CFGBlockValues &vals,
llvm::BitVector &wasAnalyzed,
UninitVariablesHandler *handler = 0) {
@@ -672,7 +672,7 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
void clang::runUninitializedVariablesAnalysis(
const DeclContext &dc,
const CFG &cfg,
- AnalysisContext &ac,
+ AnalysisDeclContext &ac,
UninitVariablesHandler &handler,
UninitVariablesAnalysisStats &stats) {
CFGBlockValues vals(cfg);
diff --git a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
index 7bdcdc6..c78a292 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SmallVector.h"
using namespace clang;
static const Builtin::Info BuiltinInfo[] = {
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
index 124e386..e197003 100644
--- a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
@@ -339,67 +339,6 @@ ConversionResult ConvertUTF32toUTF8 (
return result;
}
-/* --------------------------------------------------------------------- */
-
-ConversionResult ConvertUTF8toUTF32 (
- const UTF8** sourceStart, const UTF8* sourceEnd,
- UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
- ConversionResult result = conversionOK;
- const UTF8* source = *sourceStart;
- UTF32* target = *targetStart;
- while (source < sourceEnd) {
- UTF32 ch = 0;
- unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
- if (source + extraBytesToRead >= sourceEnd) {
- result = sourceExhausted; break;
- }
- /* Do this check whether lenient or strict */
- if (!isLegalUTF8(source, extraBytesToRead+1)) {
- result = sourceIllegal;
- break;
- }
- /*
- * The cases all fall through. See "Note A" below.
- */
- switch (extraBytesToRead) {
- case 5: ch += *source++; ch <<= 6;
- case 4: ch += *source++; ch <<= 6;
- case 3: ch += *source++; ch <<= 6;
- case 2: ch += *source++; ch <<= 6;
- case 1: ch += *source++; ch <<= 6;
- case 0: ch += *source++;
- }
- ch -= offsetsFromUTF8[extraBytesToRead];
-
- if (target >= targetEnd) {
- source -= (extraBytesToRead+1); /* Back up the source pointer! */
- result = targetExhausted; break;
- }
- if (ch <= UNI_MAX_LEGAL_UTF32) {
- /*
- * UTF-16 surrogate values are illegal in UTF-32, and anything
- * over Plane 17 (> 0x10FFFF) is illegal.
- */
- if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
- if (flags == strictConversion) {
- source -= (extraBytesToRead+1); /* return to the illegal value itself */
- result = sourceIllegal;
- break;
- } else {
- *target++ = UNI_REPLACEMENT_CHAR;
- }
- } else {
- *target++ = ch;
- }
- } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
- result = sourceIllegal;
- *target++ = UNI_REPLACEMENT_CHAR;
- }
- }
- *sourceStart = source;
- *targetStart = target;
- return result;
-}
#endif
/* --------------------------------------------------------------------- */
@@ -448,7 +387,7 @@ static Boolean isLegalUTF8(const UTF8 *source, int length) {
*/
Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
int length = trailingBytesForUTF8[*source]+1;
- if (source+length > sourceEnd) {
+ if (length > sourceEnd - source) {
return false;
}
return isLegalUTF8(source, length);
@@ -456,6 +395,22 @@ Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
/* --------------------------------------------------------------------- */
+/*
+ * Exported function to return whether a UTF-8 string is legal or not.
+ * This is not used here; it's just exported.
+ */
+Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd) {
+ while (source != sourceEnd) {
+ int length = trailingBytesForUTF8[*source] + 1;
+ if (length > sourceEnd - source || !isLegalUTF8(source, length))
+ return false;
+ source += length;
+ }
+ return true;
+}
+
+/* --------------------------------------------------------------------- */
+
ConversionResult ConvertUTF8toUTF16 (
const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
@@ -465,7 +420,7 @@ ConversionResult ConvertUTF8toUTF16 (
while (source < sourceEnd) {
UTF32 ch = 0;
unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
- if (source + extraBytesToRead >= sourceEnd) {
+ if (extraBytesToRead >= sourceEnd - source) {
result = sourceExhausted; break;
}
/* Do this check whether lenient or strict */
@@ -527,6 +482,68 @@ ConversionResult ConvertUTF8toUTF16 (
return result;
}
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF32 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF8* source = *sourceStart;
+ UTF32* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch = 0;
+ unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+ if (extraBytesToRead >= sourceEnd - source) {
+ result = sourceExhausted; break;
+ }
+ /* Do this check whether lenient or strict */
+ if (!isLegalUTF8(source, extraBytesToRead+1)) {
+ result = sourceIllegal;
+ break;
+ }
+ /*
+ * The cases all fall through. See "Note A" below.
+ */
+ switch (extraBytesToRead) {
+ case 5: ch += *source++; ch <<= 6;
+ case 4: ch += *source++; ch <<= 6;
+ case 3: ch += *source++; ch <<= 6;
+ case 2: ch += *source++; ch <<= 6;
+ case 1: ch += *source++; ch <<= 6;
+ case 0: ch += *source++;
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up the source pointer! */
+ result = targetExhausted; break;
+ }
+ if (ch <= UNI_MAX_LEGAL_UTF32) {
+ /*
+ * UTF-16 surrogate values are illegal in UTF-32, and anything
+ * over Plane 17 (> 0x10FFFF) is illegal.
+ */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ source -= (extraBytesToRead+1); /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = ch;
+ }
+ } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
+ result = sourceIllegal;
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
/* ---------------------------------------------------------------------
Note A.
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index e5f3901..f7d5d87 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -14,7 +14,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/PartialDiagnostic.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/CrashRecoveryContext.h"
@@ -27,14 +27,14 @@ static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
unsigned NumPrevArgs,
SmallVectorImpl<char> &Output,
void *Cookie,
- SmallVectorImpl<intptr_t> &QualTypeVals) {
+ ArrayRef<intptr_t> QualTypeVals) {
const char *Str = "<can't format argument>";
Output.append(Str, Str+strlen(Str));
}
DiagnosticsEngine::DiagnosticsEngine(
- const llvm::IntrusiveRefCntPtr<DiagnosticIDs> &diags,
+ const IntrusiveRefCntPtr<DiagnosticIDs> &diags,
DiagnosticConsumer *client, bool ShouldOwnClient)
: Diags(diags), Client(client), OwnsDiagClient(ShouldOwnClient),
SourceMgr(0) {
@@ -53,6 +53,7 @@ DiagnosticsEngine::DiagnosticsEngine(
ErrorLimit = 0;
TemplateBacktraceLimit = 0;
+ ConstexprBacktraceLimit = 0;
Reset();
}
@@ -118,7 +119,7 @@ void DiagnosticsEngine::Reset() {
}
void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
- StringRef Arg2) {
+ StringRef Arg2) {
if (DelayedDiagID)
return;
@@ -161,7 +162,7 @@ DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
/// \param The source location that this change of diagnostic state should
/// take affect. It can be null if we are setting the latest state.
void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
- SourceLocation L) {
+ SourceLocation L) {
assert(Diag < diag::DIAG_UPPER_LIMIT &&
"Can only map builtin diagnostics");
assert((Diags->isBuiltinWarningOrExtension(Diag) ||
@@ -169,18 +170,16 @@ void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
"Cannot map errors into warnings!");
assert(!DiagStatePoints.empty());
- bool isPragma = L.isValid();
FullSourceLoc Loc(L, *SourceMgr);
FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
- DiagnosticMappingInfo MappingInfo = DiagnosticMappingInfo::Make(
- Map, /*IsUser=*/true, isPragma);
-
- // If this is a pragma mapping, then set the diagnostic mapping flags so that
- // we override command line options.
- if (isPragma) {
- MappingInfo.setNoWarningAsError(true);
- MappingInfo.setNoErrorAsFatal(true);
+ // Don't allow a mapping to a warning override an error/fatal mapping.
+ if (Map == diag::MAP_WARNING) {
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+ if (Info.getMapping() == diag::MAP_ERROR ||
+ Info.getMapping() == diag::MAP_FATAL)
+ Map = Info.getMapping();
}
+ DiagnosticMappingInfo MappingInfo = makeMappingInfo(Map, L);
// Common case; setting all the diagnostics of a group in one place.
if (Loc.isInvalid() || Loc == LastStateChangePos) {
@@ -244,6 +243,24 @@ bool DiagnosticsEngine::setDiagnosticGroupMapping(
return false;
}
+void DiagnosticsEngine::setDiagnosticWarningAsError(diag::kind Diag,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ setDiagnosticMapping(Diag, diag::MAP_ERROR, SourceLocation());
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+
+ if (Info.getMapping() == diag::MAP_ERROR ||
+ Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_WARNING);
+
+ Info.setNoWarningAsError(true);
+}
+
bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
bool Enabled) {
// If we are enabling this feature, just set the diagnostic mappings to map to
@@ -274,6 +291,23 @@ bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
return false;
}
+void DiagnosticsEngine::setDiagnosticErrorAsFatal(diag::kind Diag,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ setDiagnosticMapping(Diag, diag::MAP_FATAL, SourceLocation());
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+
+ if (Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_ERROR);
+
+ Info.setNoErrorAsFatal(true);
+}
+
bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
bool Enabled) {
// If we are enabling this feature, just set the diagnostic mappings to map to
@@ -303,6 +337,18 @@ bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
return false;
}
+void DiagnosticsEngine::setMappingToAllDiagnostics(diag::Mapping Map,
+ SourceLocation Loc) {
+ // Get all the diagnostics.
+ llvm::SmallVector<diag::kind, 64> AllDiags;
+ Diags->getAllDiagnostics(AllDiags);
+
+ // Set the mapping.
+ for (unsigned i = 0, e = AllDiags.size(); i != e; ++i)
+ if (Diags->isBuiltinWarningOrExtension(AllDiags[i]))
+ setDiagnosticMapping(AllDiags[i], Map, Loc);
+}
+
void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
@@ -311,7 +357,7 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
NumDiagArgs = 0;
NumDiagRanges = storedDiag.range_size();
- assert(NumDiagRanges < sizeof(DiagRanges)/sizeof(DiagRanges[0]) &&
+ assert(NumDiagRanges < DiagnosticsEngine::MaxRanges &&
"Too many arguments to diagnostic!");
unsigned i = 0;
for (StoredDiagnostic::range_iterator
@@ -319,14 +365,13 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
RE = storedDiag.range_end(); RI != RE; ++RI)
DiagRanges[i++] = *RI;
- NumFixItHints = storedDiag.fixit_size();
- assert(NumFixItHints < DiagnosticsEngine::MaxFixItHints &&
- "Too many fix-it hints!");
- i = 0;
+ assert(NumDiagRanges < DiagnosticsEngine::MaxFixItHints &&
+ "Too many arguments to diagnostic!");
+ NumDiagFixItHints = 0;
for (StoredDiagnostic::fixit_iterator
FI = storedDiag.fixit_begin(),
FE = storedDiag.fixit_end(); FI != FE; ++FI)
- FixItHints[i++] = *FI;
+ DiagFixItHints[NumDiagFixItHints++] = *FI;
assert(Client && "DiagnosticConsumer not set!");
Level DiagLevel = storedDiag.getLevel();
@@ -340,35 +385,18 @@ void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
CurDiagID = ~0U;
}
-void DiagnosticBuilder::FlushCounts() {
- DiagObj->NumDiagArgs = NumArgs;
- DiagObj->NumDiagRanges = NumRanges;
- DiagObj->NumFixItHints = NumFixItHints;
-}
-
-bool DiagnosticBuilder::Emit() {
- // If DiagObj is null, then its soul was stolen by the copy ctor
- // or the user called Emit().
- if (DiagObj == 0) return false;
-
- // When emitting diagnostics, we set the final argument count into
- // the DiagnosticsEngine object.
- FlushCounts();
-
+bool DiagnosticsEngine::EmitCurrentDiagnostic() {
// Process the diagnostic, sending the accumulated information to the
// DiagnosticConsumer.
- bool Emitted = DiagObj->ProcessDiag();
+ bool Emitted = ProcessDiag();
// Clear out the current diagnostic object.
- unsigned DiagID = DiagObj->CurDiagID;
- DiagObj->Clear();
+ unsigned DiagID = CurDiagID;
+ Clear();
// If there was a delayed diagnostic, emit it now.
- if (DiagObj->DelayedDiagID && DiagObj->DelayedDiagID != DiagID)
- DiagObj->ReportDelayed();
-
- // This diagnostic is dead.
- DiagObj = 0;
+ if (DelayedDiagID && DelayedDiagID != DiagID)
+ ReportDelayed();
return Emitted;
}
@@ -802,7 +830,7 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level,
"Valid source location without setting a source manager for diagnostic");
if (Info.getLocation().isValid())
Loc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
- llvm::SmallString<64> Message;
+ SmallString<64> Message;
Info.FormatDiagnostic(Message);
this->Message.assign(Message.begin(), Message.end());
@@ -833,6 +861,8 @@ StoredDiagnostic::~StoredDiagnostic() { }
/// reported by DiagnosticsEngine.
bool DiagnosticConsumer::IncludeInDiagnosticCounts() const { return true; }
+void IgnoringDiagConsumer::anchor() { }
+
PartialDiagnostic::StorageAllocator::StorageAllocator() {
for (unsigned I = 0; I != NumCached; ++I)
FreeList[I] = Cached + I;
@@ -840,7 +870,9 @@ PartialDiagnostic::StorageAllocator::StorageAllocator() {
}
PartialDiagnostic::StorageAllocator::~StorageAllocator() {
- // Don't assert if we are in a CrashRecovery context, as this
- // invariant may be invalidated during a crash.
- assert((NumFreeListEntries == NumCached || llvm::CrashRecoveryContext::isRecoveringFromCrash()) && "A partial is on the lamb");
+ // Don't assert if we are in a CrashRecovery context, as this invariant may
+ // be invalidated during a crash.
+ assert((NumFreeListEntries == NumCached ||
+ llvm::CrashRecoveryContext::isRecoveringFromCrash()) &&
+ "A partial is on the lamb");
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
index 9481287..8c33a96 100644
--- a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -11,16 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTDiagnostic.h"
-#include "clang/Analysis/AnalysisDiagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/AllDiagnostics.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Parse/ParseDiagnostic.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
@@ -51,107 +45,49 @@ struct StaticDiagInfoRec {
unsigned WarnShowInSystemHeader : 1;
unsigned Category : 5;
- uint8_t NameLen;
- uint8_t OptionGroupLen;
+ uint16_t OptionGroupIndex;
uint16_t DescriptionLen;
- uint16_t BriefExplanationLen;
- uint16_t FullExplanationLen;
-
- const char *NameStr;
- const char *OptionGroupStr;
-
const char *DescriptionStr;
- const char *BriefExplanationStr;
- const char *FullExplanationStr;
- StringRef getName() const {
- return StringRef(NameStr, NameLen);
- }
- StringRef getOptionGroup() const {
- return StringRef(OptionGroupStr, OptionGroupLen);
+ unsigned getOptionGroupIndex() const {
+ return OptionGroupIndex;
}
StringRef getDescription() const {
return StringRef(DescriptionStr, DescriptionLen);
}
- StringRef getBriefExplanation() const {
- return StringRef(BriefExplanationStr, BriefExplanationLen);
- }
- StringRef getFullExplanation() const {
- return StringRef(FullExplanationStr, FullExplanationLen);
- }
bool operator<(const StaticDiagInfoRec &RHS) const {
return DiagID < RHS.DiagID;
}
};
-struct StaticDiagNameIndexRec {
- const char *NameStr;
- unsigned short DiagID;
- uint8_t NameLen;
-
- StringRef getName() const {
- return StringRef(NameStr, NameLen);
- }
-
- bool operator<(const StaticDiagNameIndexRec &RHS) const {
- return getName() < RHS.getName();
- }
-
- bool operator==(const StaticDiagNameIndexRec &RHS) const {
- return getName() == RHS.getName();
- }
-};
-
-template <size_t SizeOfStr, typename FieldType>
-class StringSizerHelper {
- char FIELD_TOO_SMALL[SizeOfStr <= FieldType(~0U) ? 1 : -1];
-public:
- enum { Size = SizeOfStr };
-};
-
} // namespace anonymous
-#define STR_SIZE(str, fieldTy) StringSizerHelper<sizeof(str)-1, fieldTy>::Size
-
static const StaticDiagInfoRec StaticDiagInfo[] = {
#define DIAG(ENUM,CLASS,DEFAULT_MAPPING,DESC,GROUP, \
SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER, \
- CATEGORY,BRIEF,FULL) \
+ CATEGORY) \
{ diag::ENUM, DEFAULT_MAPPING, CLASS, SFINAE, ACCESS, \
- NOWERROR, SHOWINSYSHEADER, CATEGORY, \
- STR_SIZE(#ENUM, uint8_t), STR_SIZE(GROUP, uint8_t), \
- STR_SIZE(DESC, uint16_t), STR_SIZE(BRIEF, uint16_t), \
- STR_SIZE(FULL, uint16_t), \
- #ENUM, GROUP, DESC, BRIEF, FULL },
+ NOWERROR, SHOWINSYSHEADER, CATEGORY, GROUP, \
+ STR_SIZE(DESC, uint16_t), DESC },
#include "clang/Basic/DiagnosticCommonKinds.inc"
#include "clang/Basic/DiagnosticDriverKinds.inc"
#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
#include "clang/Basic/DiagnosticLexKinds.inc"
#include "clang/Basic/DiagnosticParseKinds.inc"
#include "clang/Basic/DiagnosticASTKinds.inc"
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#undef DIAG
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
};
static const unsigned StaticDiagInfoSize =
sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
-/// To be sorted before first use (since it's splitted among multiple files)
-static const StaticDiagNameIndexRec StaticDiagNameIndex[] = {
-#define DIAG_NAME_INDEX(ENUM) { #ENUM, diag::ENUM, STR_SIZE(#ENUM, uint8_t) },
-#include "clang/Basic/DiagnosticIndexName.inc"
-#undef DIAG_NAME_INDEX
- { 0, 0, 0 }
-};
-
-static const unsigned StaticDiagNameIndexSize =
- sizeof(StaticDiagNameIndex)/sizeof(StaticDiagNameIndex[0])-1;
-
/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
/// or null if the ID is invalid.
static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
@@ -173,7 +109,7 @@ static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
// Search the diagnostic table with a binary search.
StaticDiagInfoRec Find = { static_cast<unsigned short>(DiagID),
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const StaticDiagInfoRec *Found =
std::lower_bound(StaticDiagInfo, StaticDiagInfo + StaticDiagInfoSize, Find);
@@ -207,15 +143,6 @@ static DiagnosticMappingInfo GetDefaultDiagMappingInfo(unsigned DiagID) {
return Info;
}
-/// getWarningOptionForDiag - Return the lowest-level warning option that
-/// enables the specified diagnostic. If there is no -Wfoo flag that controls
-/// the diagnostic, this returns null.
-StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->getOptionGroup();
- return StringRef();
-}
-
/// getCategoryNumberForDiag - Return the category number that a specified
/// DiagID belongs to, or 0 if no category.
unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) {
@@ -295,50 +222,6 @@ DiagnosticIDs::getDiagnosticSFINAEResponse(unsigned DiagID) {
return SFINAE_Report;
}
-/// getName - Given a diagnostic ID, return its name
-StringRef DiagnosticIDs::getName(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->getName();
- return StringRef();
-}
-
-/// getIdFromName - Given a diagnostic name, return its ID, or 0
-unsigned DiagnosticIDs::getIdFromName(StringRef Name) {
- const StaticDiagNameIndexRec *StaticDiagNameIndexEnd =
- StaticDiagNameIndex + StaticDiagNameIndexSize;
-
- if (Name.empty()) { return diag::DIAG_UPPER_LIMIT; }
-
- assert(Name.size() == static_cast<uint8_t>(Name.size()) &&
- "Name is too long");
- StaticDiagNameIndexRec Find = { Name.data(), 0,
- static_cast<uint8_t>(Name.size()) };
-
- const StaticDiagNameIndexRec *Found =
- std::lower_bound( StaticDiagNameIndex, StaticDiagNameIndexEnd, Find);
- if (Found == StaticDiagNameIndexEnd ||
- Found->getName() != Name)
- return diag::DIAG_UPPER_LIMIT;
-
- return Found->DiagID;
-}
-
-/// getBriefExplanation - Given a diagnostic ID, return a brief explanation
-/// of the issue
-StringRef DiagnosticIDs::getBriefExplanation(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->getBriefExplanation();
- return StringRef();
-}
-
-/// getFullExplanation - Given a diagnostic ID, return a full explanation
-/// of the issue
-StringRef DiagnosticIDs::getFullExplanation(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->getFullExplanation();
- return StringRef();
-}
-
/// getBuiltinDiagClass - Return the class field of the diagnostic.
///
static unsigned getBuiltinDiagClass(unsigned DiagID) {
@@ -348,35 +231,6 @@ static unsigned getBuiltinDiagClass(unsigned DiagID) {
}
//===----------------------------------------------------------------------===//
-// diag_iterator
-//===----------------------------------------------------------------------===//
-
-llvm::StringRef DiagnosticIDs::diag_iterator::getDiagName() const {
- return static_cast<const StaticDiagNameIndexRec*>(impl)->getName();
-}
-
-unsigned DiagnosticIDs::diag_iterator::getDiagID() const {
- return static_cast<const StaticDiagNameIndexRec*>(impl)->DiagID;
-}
-
-DiagnosticIDs::diag_iterator &DiagnosticIDs::diag_iterator::operator++() {
- const StaticDiagNameIndexRec* ptr =
- static_cast<const StaticDiagNameIndexRec*>(impl);;
- ++ptr;
- impl = ptr;
- return *this;
-}
-
-DiagnosticIDs::diag_iterator DiagnosticIDs::diags_begin() {
- return DiagnosticIDs::diag_iterator(StaticDiagNameIndex);
-}
-
-DiagnosticIDs::diag_iterator DiagnosticIDs::diags_end() {
- return DiagnosticIDs::diag_iterator(StaticDiagNameIndex +
- StaticDiagNameIndexSize);
-}
-
-//===----------------------------------------------------------------------===//
// Custom Diagnostic information
//===----------------------------------------------------------------------===//
@@ -530,7 +384,6 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass,
(diag::kind)DiagID);
switch (MappingInfo.getMapping()) {
- default: llvm_unreachable("Unknown mapping!");
case diag::MAP_IGNORE:
Result = DiagnosticIDs::Ignored;
break;
@@ -553,7 +406,7 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass,
// Ignore -pedantic diagnostics inside __extension__ blocks.
// (The diagnostics controlled by -pedantic are the extension diagnostics
// that are not enabled by default.)
- bool EnabledByDefault;
+ bool EnabledByDefault = false;
bool IsExtensionDiag = isBuiltinExtensionDiag(DiagID, EnabledByDefault);
if (Diag.AllExtensionsSilenced && IsExtensionDiag && !EnabledByDefault)
return DiagnosticIDs::Ignored;
@@ -648,6 +501,15 @@ static bool WarningOptionCompare(const WarningOption &LHS,
return LHS.getName() < RHS.getName();
}
+/// getWarningOptionForDiag - Return the lowest-level warning option that
+/// enables the specified diagnostic. If there is no -Wfoo flag that controls
+/// the diagnostic, this returns null.
+StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return OptionTable[Info->getOptionGroupIndex()].getName();
+ return StringRef();
+}
+
void DiagnosticIDs::getDiagnosticsInGroup(
const WarningOption *Group,
llvm::SmallVectorImpl<diag::kind> &Diags) const
@@ -681,6 +543,35 @@ bool DiagnosticIDs::getDiagnosticsInGroup(
return false;
}
+void DiagnosticIDs::getAllDiagnostics(
+ llvm::SmallVectorImpl<diag::kind> &Diags) const {
+ for (unsigned i = 0; i != StaticDiagInfoSize; ++i)
+ Diags.push_back(StaticDiagInfo[i].DiagID);
+}
+
+StringRef DiagnosticIDs::getNearestWarningOption(StringRef Group) {
+ StringRef Best;
+ unsigned BestDistance = Group.size() + 1; // Sanity threshold.
+ for (const WarningOption *i = OptionTable, *e = OptionTable + OptionTableSize;
+ i != e; ++i) {
+ // Don't suggest ignored warning flags.
+ if (!i->Members && !i->SubGroups)
+ continue;
+
+ unsigned Distance = i->getName().edit_distance(Group, true, BestDistance);
+ if (Distance == BestDistance) {
+ // Two matches with the same distance, don't prefer one over the other.
+ Best = "";
+ } else if (Distance < BestDistance) {
+ // This is a better match.
+ Best = i->getName();
+ BestDistance = Distance;
+ }
+ }
+
+ return Best;
+}
+
/// ProcessDiag - This is the method used to report a diagnostic that is
/// finally fully formed.
bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
@@ -766,19 +657,6 @@ bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
}
}
- // If we have any Fix-Its, make sure that all of the Fix-Its point into
- // source locations that aren't macro expansions. If any point into macro
- // expansions, remove all of the Fix-Its.
- for (unsigned I = 0, N = Diag.NumFixItHints; I != N; ++I) {
- const FixItHint &FixIt = Diag.FixItHints[I];
- if (FixIt.RemoveRange.isInvalid() ||
- FixIt.RemoveRange.getBegin().isMacroID() ||
- FixIt.RemoveRange.getEnd().isMacroID()) {
- Diag.NumFixItHints = 0;
- break;
- }
- }
-
// Finally, report it.
Diag.Client->HandleDiagnostic((DiagnosticsEngine::Level)DiagLevel, Info);
if (Diag.Client->IncludeInDiagnosticCounts()) {
@@ -806,9 +684,14 @@ bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
return false;
// Currently we consider all ARC errors as recoverable.
- if (getCategoryNumberForDiag(DiagID) ==
- diag::DiagCat_Automatic_Reference_Counting_Issue)
+ if (isARCDiagnostic(DiagID))
return false;
return true;
}
+
+bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
+ unsigned cat = getCategoryNumberForDiag(DiagID);
+ return DiagnosticIDs::getCategoryNameFromID(cat).startswith("ARC ");
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
index c1f715e..fd6d334 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -20,13 +20,12 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemStatCache.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/system_error.h"
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
#include <map>
#include <set>
#include <string>
@@ -106,8 +105,8 @@ public:
FileEntry &getFile(const char *Name, const struct stat & /*StatBuf*/) {
std::string FullPath(GetFullPath(Name));
- // LowercaseString because Windows filesystem is case insensitive.
- FullPath = llvm::LowercaseString(FullPath);
+ // Lowercase string because Windows filesystem is case insensitive.
+ FullPath = StringRef(FullPath).lower();
return UniqueFiles.GetOrCreateValue(FullPath).getValue();
}
@@ -266,6 +265,12 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
///
const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
bool CacheFailure) {
+ // stat doesn't like trailing separators.
+ // At least, on Win32 MSVCRT, stat() cannot strip trailing '/'.
+ // (though it can strip '\\')
+ if (DirName.size() > 1 && llvm::sys::path::is_separator(DirName.back()))
+ DirName = DirName.substr(0, DirName.size()-1);
+
++NumDirLookups;
llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
SeenDirEntries.GetOrCreateValue(DirName);
@@ -472,14 +477,14 @@ void FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
|| llvm::sys::path::is_absolute(pathRef))
return;
- llvm::SmallString<128> NewPath(FileSystemOpts.WorkingDir);
+ SmallString<128> NewPath(FileSystemOpts.WorkingDir);
llvm::sys::path::append(NewPath, pathRef);
path = NewPath;
}
llvm::MemoryBuffer *FileManager::
getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
- llvm::OwningPtr<llvm::MemoryBuffer> Result;
+ OwningPtr<llvm::MemoryBuffer> Result;
llvm::error_code ec;
const char *Filename = Entry->getName();
@@ -504,7 +509,7 @@ getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
return Result.take();
}
- llvm::SmallString<128> FilePath(Entry->getName());
+ SmallString<128> FilePath(Entry->getName());
FixupRelativePath(FilePath);
ec = llvm::MemoryBuffer::getFile(FilePath.str(), Result, Entry->getSize());
if (ec && ErrorStr)
@@ -514,7 +519,7 @@ getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
llvm::MemoryBuffer *FileManager::
getBufferForFile(StringRef Filename, std::string *ErrorStr) {
- llvm::OwningPtr<llvm::MemoryBuffer> Result;
+ OwningPtr<llvm::MemoryBuffer> Result;
llvm::error_code ec;
if (FileSystemOpts.WorkingDir.empty()) {
ec = llvm::MemoryBuffer::getFile(Filename, Result);
@@ -523,7 +528,7 @@ getBufferForFile(StringRef Filename, std::string *ErrorStr) {
return Result.take();
}
- llvm::SmallString<128> FilePath(Filename);
+ SmallString<128> FilePath(Filename);
FixupRelativePath(FilePath);
ec = llvm::MemoryBuffer::getFile(FilePath.c_str(), Result);
if (ec && ErrorStr)
@@ -544,7 +549,7 @@ bool FileManager::getStatValue(const char *Path, struct stat &StatBuf,
return FileSystemStatCache::get(Path, StatBuf, FileDescriptor,
StatCache.get());
- llvm::SmallString<128> FilePath(Path);
+ SmallString<128> FilePath(Path);
FixupRelativePath(FilePath);
return FileSystemStatCache::get(FilePath.c_str(), StatBuf, FileDescriptor,
@@ -553,7 +558,7 @@ bool FileManager::getStatValue(const char *Path, struct stat &StatBuf,
bool FileManager::getNoncachedStatValue(StringRef Path,
struct stat &StatBuf) {
- llvm::SmallString<128> FilePath(Path);
+ SmallString<128> FilePath(Path);
FixupRelativePath(FilePath);
return ::stat(FilePath.c_str(), &StatBuf) != 0;
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
index c8b07af..875d397 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
@@ -28,6 +28,8 @@ using namespace clang;
#define S_ISDIR(s) ((_S_IFDIR & s) !=0)
#endif
+void FileSystemStatCache::anchor() { }
+
/// FileSystemStatCache::get - Get the 'stat' information for the specified
/// path, using the cache to accelerate it if possible. This returns true if
/// the path does not exist or false if it exists.
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
index 38f09a0..43899f0 100644
--- a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -16,9 +16,10 @@
#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstdio>
using namespace clang;
@@ -37,7 +38,10 @@ IdentifierInfo::IdentifierInfo() {
IsCPPOperatorKeyword = false;
NeedsHandleIdentifier = false;
IsFromAST = false;
+ ChangedAfterLoad = false;
RevertedTokenID = false;
+ OutOfDate = false;
+ IsModulesImport = false;
FETokenInfo = 0;
Entry = 0;
}
@@ -74,6 +78,10 @@ IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
// Populate the identifier table with info about keywords for the current
// language.
AddKeywords(LangOpts);
+
+
+ // Add the '_experimental_modules_import' contextual keyword.
+ get("__experimental_modules_import").setModulesImport(true);
}
//===----------------------------------------------------------------------===//
@@ -93,7 +101,7 @@ namespace {
KEYNOCXX = 0x80,
KEYBORLAND = 0x100,
KEYOPENCL = 0x200,
- KEYC1X = 0x400,
+ KEYC11 = 0x400,
KEYARC = 0x800,
KEYALL = 0x0fff
};
@@ -105,7 +113,7 @@ namespace {
///
/// The C90/C99/CPP/CPP0x flags are set to 3 if the token is a keyword in a
/// future language standard, set to 2 if the token should be enabled in the
-/// specified langauge, set to 1 if it is an extension in the specified
+/// specified language, set to 1 if it is an extension in the specified
/// language, and set to 0 if disabled in the specified language.
static void AddKeyword(StringRef Keyword,
tok::TokenKind TokenCode, unsigned Flags,
@@ -122,8 +130,10 @@ static void AddKeyword(StringRef Keyword,
else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
- else if (LangOpts.C1X && (Flags & KEYC1X)) AddResult = 2;
- else if (LangOpts.ObjCAutoRefCount && (Flags & KEYARC)) AddResult = 2;
+ else if (LangOpts.C11 && (Flags & KEYC11)) AddResult = 2;
+ // We treat bridge casts as objective-C keywords so we can warn on them
+ // in non-arc mode.
+ else if (LangOpts.ObjC2 && (Flags & KEYARC)) AddResult = 2;
else if (LangOpts.CPlusPlus && (Flags & KEYCXX0X)) AddResult = 3;
// Don't add this keyword if disabled in this language.
@@ -212,7 +222,7 @@ tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
CASE( 6, 'i', 'n', ifndef);
CASE( 6, 'i', 'p', import);
CASE( 6, 'p', 'a', pragma);
-
+
CASE( 7, 'd', 'f', defined);
CASE( 7, 'i', 'c', include);
CASE( 7, 'w', 'r', warning);
@@ -220,8 +230,11 @@ tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
CASE( 8, 'u', 'a', unassert);
CASE(12, 'i', 'c', include_next);
+ CASE(14, '_', 'p', __public_macro);
+
+ CASE(15, '_', 'p', __private_macro);
+
CASE(16, '_', 'i', __include_macros);
- CASE(16, '_', 'e', __export_macro__);
#undef CASE
#undef HASH
}
@@ -347,7 +360,7 @@ StringRef Selector::getNameForSlot(unsigned int argIndex) const {
}
std::string MultiKeywordSelector::getName() const {
- llvm::SmallString<256> Str;
+ SmallString<256> Str;
llvm::raw_svector_ostream OS(Str);
for (keyword_iterator I = keyword_begin(), E = keyword_end(); I != E; ++I) {
if (*I)
@@ -445,6 +458,18 @@ static SelectorTableImpl &getSelectorTableImpl(void *P) {
return *static_cast<SelectorTableImpl*>(P);
}
+/*static*/ Selector
+SelectorTable::constructSetterName(IdentifierTable &Idents,
+ SelectorTable &SelTable,
+ const IdentifierInfo *Name) {
+ SmallString<100> SelectorName;
+ SelectorName = "set";
+ SelectorName += Name->getName();
+ SelectorName[3] = toupper(SelectorName[3]);
+ IdentifierInfo *SetterName = &Idents.get(SelectorName);
+ return SelTable.getUnarySelector(SetterName);
+}
+
size_t SelectorTable::getTotalMemory() const {
SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
return SelTabImpl.Allocator.getTotalMemory();
@@ -495,5 +520,5 @@ const char *clang::getOperatorSpelling(OverloadedOperatorKind Operator) {
#include "clang/Basic/OperatorKinds.def"
}
- return 0;
+ llvm_unreachable("Invalid OverloadedOperatorKind!");
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
index 5f479db..991992a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
@@ -26,5 +26,7 @@ void LangOptions::resetNonModularOptions() {
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
Name = Default;
#include "clang/Basic/LangOptions.def"
+
+ CurrentModule.clear();
}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Module.cpp b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
new file mode 100644
index 0000000..6348840
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
@@ -0,0 +1,274 @@
+//===--- Module.h - Describe a module ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module in the source
+// code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/Module.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
+ bool IsFramework, bool IsExplicit)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
+ Umbrella(), IsAvailable(true), IsFromModuleFile(false),
+ IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false),
+ InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), NameVisibility(Hidden)
+{
+ if (Parent) {
+ if (!Parent->isAvailable())
+ IsAvailable = false;
+ if (Parent->IsSystem)
+ IsSystem = true;
+
+ Parent->SubModuleIndex[Name] = Parent->SubModules.size();
+ Parent->SubModules.push_back(this);
+ }
+}
+
+Module::~Module() {
+ for (submodule_iterator I = submodule_begin(), IEnd = submodule_end();
+ I != IEnd; ++I) {
+ delete *I;
+ }
+
+}
+
+/// \brief Determine whether a translation unit built using the current
+/// language options has the given feature.
+static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("altivec", LangOpts.AltiVec)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("cplusplus", LangOpts.CPlusPlus)
+ .Case("cplusplus11", LangOpts.CPlusPlus0x)
+ .Case("objc", LangOpts.ObjC1)
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("opencl", LangOpts.OpenCL)
+ .Case("tls", Target.isTLSSupported())
+ .Default(Target.hasFeature(Feature));
+}
+
+bool
+Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ StringRef &Feature) const {
+ if (IsAvailable)
+ return true;
+
+ for (const Module *Current = this; Current; Current = Current->Parent) {
+ for (unsigned I = 0, N = Current->Requires.size(); I != N; ++I) {
+ if (!hasFeature(Current->Requires[I], LangOpts, Target)) {
+ Feature = Current->Requires[I];
+ return false;
+ }
+ }
+ }
+
+ llvm_unreachable("could not find a reason why module is unavailable");
+}
+
+bool Module::isSubModuleOf(Module *Other) const {
+ const Module *This = this;
+ do {
+ if (This == Other)
+ return true;
+
+ This = This->Parent;
+ } while (This);
+
+ return false;
+}
+
+const Module *Module::getTopLevelModule() const {
+ const Module *Result = this;
+ while (Result->Parent)
+ Result = Result->Parent;
+
+ return Result;
+}
+
+std::string Module::getFullModuleName() const {
+ llvm::SmallVector<StringRef, 2> Names;
+
+ // Build up the set of module names (from innermost to outermost).
+ for (const Module *M = this; M; M = M->Parent)
+ Names.push_back(M->Name);
+
+ std::string Result;
+ for (llvm::SmallVector<StringRef, 2>::reverse_iterator I = Names.rbegin(),
+ IEnd = Names.rend();
+ I != IEnd; ++I) {
+ if (!Result.empty())
+ Result += '.';
+
+ Result += *I;
+ }
+
+ return Result;
+}
+
+const DirectoryEntry *Module::getUmbrellaDir() const {
+ if (const FileEntry *Header = getUmbrellaHeader())
+ return Header->getDir();
+
+ return Umbrella.dyn_cast<const DirectoryEntry *>();
+}
+
+void Module::addRequirement(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ Requires.push_back(Feature);
+
+ // If this feature is currently available, we're done.
+ if (hasFeature(Feature, LangOpts, Target))
+ return;
+
+ if (!IsAvailable)
+ return;
+
+ llvm::SmallVector<Module *, 2> Stack;
+ Stack.push_back(this);
+ while (!Stack.empty()) {
+ Module *Current = Stack.back();
+ Stack.pop_back();
+
+ if (!Current->IsAvailable)
+ continue;
+
+ Current->IsAvailable = false;
+ for (submodule_iterator Sub = Current->submodule_begin(),
+ SubEnd = Current->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ if ((*Sub)->IsAvailable)
+ Stack.push_back(*Sub);
+ }
+ }
+}
+
+Module *Module::findSubmodule(StringRef Name) const {
+ llvm::StringMap<unsigned>::const_iterator Pos = SubModuleIndex.find(Name);
+ if (Pos == SubModuleIndex.end())
+ return 0;
+
+ return SubModules[Pos->getValue()];
+}
+
+static void printModuleId(llvm::raw_ostream &OS, const ModuleId &Id) {
+ for (unsigned I = 0, N = Id.size(); I != N; ++I) {
+ if (I)
+ OS << ".";
+ OS << Id[I].first;
+ }
+}
+
+void Module::print(llvm::raw_ostream &OS, unsigned Indent) const {
+ OS.indent(Indent);
+ if (IsFramework)
+ OS << "framework ";
+ if (IsExplicit)
+ OS << "explicit ";
+ OS << "module " << Name;
+
+ if (IsSystem) {
+ OS.indent(Indent + 2);
+ OS << " [system]";
+ }
+
+ OS << " {\n";
+
+ if (!Requires.empty()) {
+ OS.indent(Indent + 2);
+ OS << "requires ";
+ for (unsigned I = 0, N = Requires.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << Requires[I];
+ }
+ OS << "\n";
+ }
+
+ if (const FileEntry *UmbrellaHeader = getUmbrellaHeader()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella header \"";
+ OS.write_escaped(UmbrellaHeader->getName());
+ OS << "\"\n";
+ } else if (const DirectoryEntry *UmbrellaDir = getUmbrellaDir()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella \"";
+ OS.write_escaped(UmbrellaDir->getName());
+ OS << "\"\n";
+ }
+
+ for (unsigned I = 0, N = Headers.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "header \"";
+ OS.write_escaped(Headers[I]->getName());
+ OS << "\"\n";
+ }
+
+ for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
+ MI != MIEnd; ++MI)
+ (*MI)->print(OS, Indent + 2);
+
+ for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ if (Module *Restriction = Exports[I].getPointer()) {
+ OS << Restriction->getFullModuleName();
+ if (Exports[I].getInt())
+ OS << ".*";
+ } else {
+ OS << "*";
+ }
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = UnresolvedExports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ printModuleId(OS, UnresolvedExports[I].Id);
+ if (UnresolvedExports[I].Wildcard) {
+ if (UnresolvedExports[I].Id.empty())
+ OS << "*";
+ else
+ OS << ".*";
+ }
+ OS << "\n";
+ }
+
+ if (InferSubmodules) {
+ OS.indent(Indent + 2);
+ if (InferExplicitSubmodules)
+ OS << "explicit ";
+ OS << "module * {\n";
+ if (InferExportWildcard) {
+ OS.indent(Indent + 4);
+ OS << "export *\n";
+ }
+ OS.indent(Indent + 2);
+ OS << "}\n";
+ }
+
+ OS.indent(Indent);
+ OS << "}\n";
+}
+
+void Module::dump() const {
+ print(llvm::errs());
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
index 6e4f3e6..bb5a10a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
@@ -115,6 +115,10 @@ bool FullSourceLoc::isBeforeInTranslationUnitThan(SourceLocation Loc) const {
return SrcMgr->isBeforeInTranslationUnit(*this, Loc);
}
+void FullSourceLoc::dump() const {
+ SourceLocation::dump(*SrcMgr);
+}
+
const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
assert(isValid());
return SrcMgr->getCharacterData(*this, Invalid);
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
index 364663e..cef091c 100644
--- a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -71,7 +71,11 @@ unsigned ContentCache::getSize() const {
void ContentCache::replaceBuffer(const llvm::MemoryBuffer *B,
bool DoNotFree) {
- assert(B != Buffer.getPointer());
+ if (B == Buffer.getPointer()) {
+ assert(0 && "Replacing with the same buffer");
+ Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
+ return;
+ }
if (shouldFreeBuffer())
delete Buffer.getPointer();
@@ -366,7 +370,8 @@ LineTableInfo &SourceManager::getLineTable() {
SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr)
: Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
ExternalSLocEntries(0), LineTable(0), NumLinearScans(0),
- NumBinaryProbes(0), FakeBufferForRecovery(0) {
+ NumBinaryProbes(0), FakeBufferForRecovery(0),
+ FakeContentCacheForRecovery(0) {
clearIDTables();
Diag.setSourceManager(this);
}
@@ -378,16 +383,21 @@ SourceManager::~SourceManager() {
// content cache objects are bump pointer allocated, we just have to run the
// dtors, but we call the deallocate method for completeness.
for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i) {
- MemBufferInfos[i]->~ContentCache();
- ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
+ if (MemBufferInfos[i]) {
+ MemBufferInfos[i]->~ContentCache();
+ ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
+ }
}
for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
- I->second->~ContentCache();
- ContentCacheAlloc.Deallocate(I->second);
+ if (I->second) {
+ I->second->~ContentCache();
+ ContentCacheAlloc.Deallocate(I->second);
+ }
}
delete FakeBufferForRecovery;
+ delete FakeContentCacheForRecovery;
for (llvm::DenseMap<FileID, MacroArgsMap *>::iterator
I = MacroArgsCacheMap.begin(),E = MacroArgsCacheMap.end(); I!=E; ++I) {
@@ -461,6 +471,25 @@ SourceManager::createMemBufferContentCache(const MemoryBuffer *Buffer) {
return Entry;
}
+const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
+ bool *Invalid) const {
+ assert(!SLocEntryLoaded[Index]);
+ if (ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2))) {
+ if (Invalid)
+ *Invalid = true;
+ // If the file of the SLocEntry changed we could still have loaded it.
+ if (!SLocEntryLoaded[Index]) {
+ // Try to recover; create a SLocEntry so the rest of clang can handle it.
+ LoadedSLocEntryTable[Index] = SLocEntry::get(0,
+ FileInfo::get(SourceLocation(),
+ getFakeContentCacheForRecovery(),
+ SrcMgr::C_User));
+ }
+ }
+
+ return LoadedSLocEntryTable[Index];
+}
+
std::pair<int, unsigned>
SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
unsigned TotalSize) {
@@ -483,6 +512,18 @@ const llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
return FakeBufferForRecovery;
}
+/// \brief As part of recovering from missing or changed content, produce a
+/// fake content cache.
+const SrcMgr::ContentCache *
+SourceManager::getFakeContentCacheForRecovery() const {
+ if (!FakeContentCacheForRecovery) {
+ FakeContentCacheForRecovery = new ContentCache();
+ FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
+ /*DoNotFree=*/true);
+ }
+ return FakeContentCacheForRecovery;
+}
+
//===----------------------------------------------------------------------===//
// Methods to create new FileID's and macro expansions.
//===----------------------------------------------------------------------===//
@@ -580,6 +621,7 @@ void SourceManager::overrideFileContents(const FileEntry *SourceFile,
assert(IR && "getOrCreateContentCache() cannot return NULL");
const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
+ const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
}
void SourceManager::overrideFileContents(const FileEntry *SourceFile,
@@ -730,11 +772,11 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
/// This function knows that the SourceLocation is in a loaded buffer, not a
/// local one.
FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
- assert(SLocOffset >= CurrentLoadedOffset && "Bad function choice");
-
// Sanity checking, otherwise a bug may lead to hanging in release build.
- if (SLocOffset < CurrentLoadedOffset)
+ if (SLocOffset < CurrentLoadedOffset) {
+ assert(0 && "Invalid SLocOffset or bad function choice");
return FileID();
+ }
// Essentially the same as the local case, but the loaded array is sorted
// in the other direction.
@@ -946,13 +988,20 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
bool *Invalid) const {
bool MyInvalid = false;
- const char *Buf = getBuffer(FID, &MyInvalid)->getBufferStart();
+ const llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
if (Invalid)
*Invalid = MyInvalid;
if (MyInvalid)
return 1;
+ if (FilePos >= MemBuf->getBufferSize()) {
+ if (Invalid)
+ *Invalid = MyInvalid;
+ return 1;
+ }
+
+ const char *Buf = MemBuf->getBufferStart();
unsigned LineStart = FilePos;
while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
--LineStart;
@@ -988,6 +1037,10 @@ unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
return getPresumedLoc(Loc).getColumn();
}
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
static LLVM_ATTRIBUTE_NOINLINE void
ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
llvm::BumpPtrAllocator &Alloc,
@@ -1013,11 +1066,44 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
unsigned Offs = 0;
while (1) {
// Skip over the contents of the line.
- // TODO: Vectorize this? This is very performance sensitive for programs
- // with lots of diagnostics and in -E mode.
const unsigned char *NextBuf = (const unsigned char *)Buf;
+
+#ifdef __SSE2__
+ // Try to skip to the next newline using SSE instructions. This is very
+ // performance sensitive for programs with lots of diagnostics and in -E
+ // mode.
+ __m128i CRs = _mm_set1_epi8('\r');
+ __m128i LFs = _mm_set1_epi8('\n');
+
+ // First fix up the alignment to 16 bytes.
+ while (((uintptr_t)NextBuf & 0xF) != 0) {
+ if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
+ goto FoundSpecialChar;
+ ++NextBuf;
+ }
+
+ // Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
+ while (NextBuf+16 <= End) {
+ __m128i Chunk = *(__m128i*)NextBuf;
+ __m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
+ _mm_cmpeq_epi8(Chunk, LFs));
+ unsigned Mask = _mm_movemask_epi8(Cmp);
+
+ // If we found a newline, adjust the pointer and jump to the handling code.
+ if (Mask != 0) {
+ NextBuf += llvm::CountTrailingZeros_32(Mask);
+ goto FoundSpecialChar;
+ }
+ NextBuf += 16;
+ }
+#endif
+
while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
++NextBuf;
+
+#ifdef __SSE2__
+FoundSpecialChar:
+#endif
Offs += NextBuf-Buf;
Buf = NextBuf;
@@ -1508,9 +1594,10 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
return FileLoc.getLocWithOffset(Size);
}
+ const llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
unsigned FilePos = Content->SourceLineCache[Line - 1];
- const char *Buf = Content->getBuffer(Diag, *this)->getBufferStart() + FilePos;
- unsigned BufLength = Content->getBuffer(Diag, *this)->getBufferEnd() - Buf;
+ const char *Buf = Buffer->getBufferStart() + FilePos;
+ unsigned BufLength = Buffer->getBufferSize() - FilePos;
if (BufLength == 0)
return FileLoc.getLocWithOffset(FilePos);
@@ -1568,14 +1655,31 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
continue;
}
- if (!Entry.getExpansion().isMacroArgExpansion())
+ const ExpansionInfo &ExpInfo = Entry.getExpansion();
+
+ if (ExpInfo.getExpansionLocStart().isFileID()) {
+ if (!isInFileID(ExpInfo.getExpansionLocStart(), FID))
+ return; // No more files/macros that may be "contained" in this file.
+ }
+
+ if (!ExpInfo.isMacroArgExpansion())
+ continue;
+
+ SourceLocation SpellLoc = ExpInfo.getSpellingLoc();
+ while (!SpellLoc.isFileID()) {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(SpellLoc);
+ const ExpansionInfo &Info = getSLocEntry(LocInfo.first).getExpansion();
+ if (!Info.isMacroArgExpansion())
+ break;
+ SpellLoc = Info.getSpellingLoc().getLocWithOffset(LocInfo.second);
+ }
+ if (!SpellLoc.isFileID())
continue;
-
- SourceLocation SpellLoc =
- getSpellingLoc(Entry.getExpansion().getSpellingLoc());
+
unsigned BeginOffs;
if (!isInFileID(SpellLoc, FID, &BeginOffs))
- return; // No more files/macros that may be "contained" in this file.
+ continue;
+
unsigned EndOffs = BeginOffs + getFileIDSize(FileID::get(ID));
// Add a new chunk for this macro argument. A previous macro argument chunk
@@ -1648,7 +1752,7 @@ static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
SourceLocation UpperLoc;
const SrcMgr::SLocEntry &Entry = SM.getSLocEntry(Loc.first);
if (Entry.isExpansion())
- UpperLoc = Entry.getExpansion().getExpansionLocEnd();
+ UpperLoc = Entry.getExpansion().getExpansionLocStart();
else
UpperLoc = Entry.getFile().getIncludeLoc();
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
index 593db2b..f938b5a 100644
--- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -27,6 +27,7 @@ static const LangAS::Map DefaultAddrSpaceMap = { 0 };
TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
+ BigEndian = true;
TLSSupported = true;
NoAsmVariants = false;
PointerWidth = PointerAlign = 32;
@@ -34,6 +35,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
IntWidth = IntAlign = 32;
LongWidth = LongAlign = 32;
LongLongWidth = LongLongAlign = 64;
+ SuitableAlign = 64;
HalfWidth = 16;
HalfAlign = 16;
FloatWidth = 32;
@@ -74,6 +76,9 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
// Default to no types using fpret.
RealTypeUsesObjCFPRet = 0;
+ // Default to not using fp2ret for __Complex long double
+ ComplexLongDoubleUsesFP2Ret = false;
+
// Default to using the Itanium ABI.
CXXABI = CXXABI_Itanium;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index ae5f270..1ad37c4 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -54,6 +54,14 @@ static void DefineStd(MacroBuilder &Builder, StringRef MacroName,
Builder.defineMacro("__" + MacroName + "__");
}
+static void defineCPUMacros(MacroBuilder &Builder, StringRef CPUName,
+ bool Tuning = true) {
+ Builder.defineMacro("__" + CPUName);
+ Builder.defineMacro("__" + CPUName + "__");
+ if (Tuning)
+ Builder.defineMacro("__tune_" + CPUName + "__");
+}
+
//===----------------------------------------------------------------------===//
// Defines specific to certain operating systems.
//===----------------------------------------------------------------------===//
@@ -99,13 +107,6 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
// allow this in C, since one might have block pointers in structs that
// are used in pure C code and in Objective-C ARC.
Builder.defineMacro("__unsafe_unretained", "");
-
- // The Objective-C bridged cast keywords are defined to nothing in non-ARC
- // mode; then they become normal, C-style casts.
- Builder.defineMacro("__bridge", "");
- Builder.defineMacro("__bridge_transfer", "");
- Builder.defineMacro("__bridge_retained", "");
- Builder.defineMacro("__bridge_retain", "");
}
if (Opts.Static)
@@ -118,36 +119,15 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
// Get the platform type and version number from the triple.
unsigned Maj, Min, Rev;
-
- // If no version was given, default to to 10.4.0, for simplifying tests.
- if (Triple.getOSName() == "darwin" || Triple.getOSName() == "osx") {
+ if (Triple.isMacOSX()) {
+ Triple.getMacOSXVersion(Maj, Min, Rev);
PlatformName = "macosx";
- Min = Rev = 0;
- Maj = 8;
} else {
- // Otherwise, honor all three triple forms ("-darwinNNN[-iphoneos]",
- // "-osxNNN", and "-iosNNN").
-
- if (Triple.getOS() == llvm::Triple::Darwin) {
- // For historical reasons that make little sense, the version passed here
- // is the "darwin" version, which drops the 10 and offsets by 4.
- Triple.getOSVersion(Maj, Min, Rev);
-
- if (Triple.getEnvironmentName() == "iphoneos") {
- PlatformName = "ios";
- } else {
- PlatformName = "macosx";
- Rev = Min;
- Min = Maj - 4;
- Maj = 10;
- }
- } else {
- Triple.getOSVersion(Maj, Min, Rev);
- PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
- }
+ Triple.getOSVersion(Maj, Min, Rev);
+ PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
}
- // If -ccc-host-triple arch-pc-win32-macho option specified, we're
+ // If -target arch-pc-win32-macho option specified, we're
// generating code for Win32 ABI. No need to emit
// __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__.
if (PlatformName == "win32") {
@@ -156,7 +136,7 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
}
// Set the appropriate OS version define.
- if (PlatformName == "ios") {
+ if (Triple.getOS() == llvm::Triple::IOS) {
assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
char Str[6];
Str[0] = '0' + Maj;
@@ -217,6 +197,12 @@ public:
return "__TEXT,__StaticInit,regular,pure_instructions";
}
+ /// Darwin does not support protected visibility. Darwin's "default"
+ /// is very similar to ELF's "protected"; Darwin requires a "weak"
+ /// attribute on declarations that can be dynamically replaced.
+ virtual bool hasProtectedVisibility() const {
+ return false;
+ }
};
@@ -236,7 +222,18 @@ protected:
}
public:
DragonFlyBSDTargetInfo(const std::string &triple)
- : OSTargetInfo<Target>(triple) {}
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ }
+ }
};
// FreeBSD Target
@@ -249,7 +246,7 @@ protected:
unsigned Release = Triple.getOSMajorVersion();
if (Release == 0U)
- Release = 8U;
+ Release = 8;
Builder.defineMacro("__FreeBSD__", Twine(Release));
Builder.defineMacro("__FreeBSD_cc_version", Twine(Release * 100000U + 1U));
@@ -298,6 +295,7 @@ protected:
Builder.defineMacro("_EM_LSIZE", "4");
Builder.defineMacro("_EM_FSIZE", "4");
Builder.defineMacro("_EM_DSIZE", "8");
+ Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
}
public:
@@ -329,6 +327,10 @@ public:
this->UserLabelPrefix = "";
this->WIntType = TargetInfo::UnsignedInt;
}
+
+ virtual const char *getStaticInitSectionSpecifier() const {
+ return ".text.startup";
+ }
};
// NetBSD Target
@@ -367,7 +369,26 @@ protected:
}
public:
OpenBSDTargetInfo(const std::string &triple)
- : OSTargetInfo<Target>(triple) {}
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::sparc:
+ this->MCountName = "__mcount";
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::ppc:
+ case llvm::Triple::sparcv9:
+ this->MCountName = "_mcount";
+ break;
+ }
+ }
};
// PSP Target
@@ -408,13 +429,14 @@ public:
PS3PPUTargetInfo(const std::string& triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
- this->LongWidth = this->LongAlign = this->PointerWidth = this->PointerAlign = 32;
+ this->LongWidth = this->LongAlign = 32;
+ this->PointerWidth = this->PointerAlign = 32;
this->IntMaxType = TargetInfo::SignedLongLong;
this->UIntMaxType = TargetInfo::UnsignedLongLong;
this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
}
};
@@ -468,12 +490,26 @@ protected:
Builder.defineMacro("__ELF__");
Builder.defineMacro("__svr4__");
Builder.defineMacro("__SVR4");
+ // Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
+ // newer, but to 500 for everything else. feature_test.h has a check to
+ // ensure that you are not using C99 with an old version of X/Open or C89
+ // with a new version.
+ if (Opts.C99 || Opts.C11)
+ Builder.defineMacro("_XOPEN_SOURCE", "600");
+ else
+ Builder.defineMacro("_XOPEN_SOURCE", "500");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("__C99FEATURES__");
+ Builder.defineMacro("_LARGEFILE_SOURCE");
+ Builder.defineMacro("_LARGEFILE64_SOURCE");
+ Builder.defineMacro("__EXTENSIONS__");
+ Builder.defineMacro("_REENTRANT");
}
public:
SolarisTargetInfo(const std::string& triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
- this->WCharType = this->SignedLong;
+ this->WCharType = this->SignedInt;
// FIXME: WIntType should be SignedLong
}
};
@@ -538,7 +574,10 @@ class PPCTargetInfo : public TargetInfo {
static const char * const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
public:
- PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {}
+ PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
+ }
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
@@ -546,9 +585,13 @@ public:
NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin;
}
+ virtual bool isCLZForZeroUndef() const { return false; }
+
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ virtual bool hasFeature(StringRef Feature) const;
+
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -701,7 +744,11 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
}
+bool PPCTargetInfo::hasFeature(StringRef Feature) const {
+ return Feature == "powerpc";
+}
+
const char * const PPCTargetInfo::GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
@@ -814,13 +861,21 @@ public:
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
switch (getTriple().getOS()) {
+ case llvm::Triple::Linux:
case llvm::Triple::FreeBSD:
case llvm::Triple::NetBSD:
SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
break;
default:
break;
}
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
}
virtual const char *getVAListDeclaration() const {
@@ -846,6 +901,11 @@ public:
Int64Type = SignedLong;
DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
}
virtual const char *getVAListDeclaration() const {
return "typedef char* __builtin_va_list;";
@@ -862,6 +922,10 @@ public:
: DarwinTargetInfo<PPC32TargetInfo>(triple) {
HasAlignMac68kSupport = true;
BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
+ LongLongAlign = 32;
+ SuitableAlign = 128;
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:64:64-v128:128:128-n32";
}
virtual const char *getVAListDeclaration() const {
return "typedef char* __builtin_va_list;";
@@ -874,6 +938,7 @@ public:
DarwinPPC64TargetInfo(const std::string& triple)
: DarwinTargetInfo<PPC64TargetInfo>(triple) {
HasAlignMac68kSupport = true;
+ SuitableAlign = 128;
}
};
} // end anonymous namespace.
@@ -890,6 +955,7 @@ namespace {
std::vector<llvm::StringRef> AvailableFeatures;
public:
PTXTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
TLSSupported = false;
LongWidth = LongAlign = 64;
AddrSpaceMap = &PTXAddrSpaceMap;
@@ -924,7 +990,10 @@ namespace {
Records = BuiltinInfo;
NumRecords = clang::PTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
}
-
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "ptx";
+ }
+
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -948,7 +1017,7 @@ namespace {
}
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const;
};
@@ -970,7 +1039,7 @@ namespace {
}
bool PTXTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const {
if(std::binary_search(AvailableFeatures.begin(), AvailableFeatures.end(),
Name)) {
@@ -1023,6 +1092,10 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "mblaze";
+ }
+
virtual const char *getVAListDeclaration() const {
return "typedef char* __builtin_va_list;";
}
@@ -1146,6 +1219,8 @@ static const char* const GCCRegNames[] = {
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15",
};
const TargetInfo::AddlRegName AddlRegNames[] = {
@@ -1163,14 +1238,18 @@ const TargetInfo::AddlRegName AddlRegNames[] = {
// most of the implementation can be shared.
class X86TargetInfo : public TargetInfo {
enum X86SSEEnum {
- NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42
+ NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2
} SSELevel;
enum MMX3DNowEnum {
NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon
} MMX3DNowLevel;
bool HasAES;
- bool HasAVX;
+ bool HasLZCNT;
+ bool HasBMI;
+ bool HasBMI2;
+ bool HasPOPCNT;
+ bool HasFMA4;
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
///
@@ -1252,6 +1331,7 @@ class X86TargetInfo : public TargetInfo {
CK_Corei7,
CK_Corei7AVX,
CK_CoreAVXi,
+ CK_CoreAVX2,
//@}
/// \name K6
@@ -1283,6 +1363,20 @@ class X86TargetInfo : public TargetInfo {
CK_Opteron,
CK_OpteronSSE3,
CK_AMDFAM10,
+ //@}
+
+ /// \name Bobcat
+ /// Bobcat architecture processors.
+ //@{
+ CK_BTVER1,
+ //@}
+
+ /// \name Bulldozer
+ /// Bulldozer architecture processors.
+ //@{
+ CK_BDVER1,
+ CK_BDVER2,
+ //@}
/// This specification is deprecated and will be removed in the future.
/// Users should prefer \see CK_K8.
@@ -1300,9 +1394,15 @@ class X86TargetInfo : public TargetInfo {
public:
X86TargetInfo(const std::string& triple)
: TargetInfo(triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
- HasAES(false), HasAVX(false), CPU(CK_Generic) {
+ HasAES(false), HasLZCNT(false), HasBMI(false), HasBMI2(false),
+ HasPOPCNT(false), HasFMA4(false), CPU(CK_Generic) {
+ BigEndian = false;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
+ virtual unsigned getFloatEvalMethod() const {
+ // X87 evaluates with 80 bits "long double" precision.
+ return SSELevel == NoSSE ? 2 : 0;
+ }
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
@@ -1332,12 +1432,17 @@ public:
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const;
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const;
virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const;
+ virtual bool hasFeature(StringRef Feature) const;
virtual void HandleTargetFeatures(std::vector<std::string> &Features);
virtual const char* getABI() const {
- return MMX3DNowLevel == NoMMX3DNow ? "no-mmx" : "";
+ if (PointerWidth == 64 && SSELevel >= AVX)
+ return "avx";
+ else if (PointerWidth == 32 && MMX3DNowLevel == NoMMX3DNow)
+ return "no-mmx";
+ return "";
}
virtual bool setCPU(const std::string &Name) {
CPU = llvm::StringSwitch<CPUKind>(Name)
@@ -1367,6 +1472,7 @@ public:
.Case("corei7", CK_Corei7)
.Case("corei7-avx", CK_Corei7AVX)
.Case("core-avx-i", CK_CoreAVXi)
+ .Case("core-avx2", CK_CoreAVX2)
.Case("k6", CK_K6)
.Case("k6-2", CK_K6_2)
.Case("k6-3", CK_K6_3)
@@ -1383,6 +1489,9 @@ public:
.Case("opteron", CK_Opteron)
.Case("opteron-sse3", CK_OpteronSSE3)
.Case("amdfam10", CK_AMDFAM10)
+ .Case("btver1", CK_BTVER1)
+ .Case("bdver1", CK_BDVER1)
+ .Case("bdver2", CK_BDVER2)
.Case("x86-64", CK_x86_64)
.Case("geode", CK_Geode)
.Default(CK_Generic);
@@ -1436,6 +1545,7 @@ public:
case CK_Corei7:
case CK_Corei7AVX:
case CK_CoreAVXi:
+ case CK_CoreAVX2:
case CK_Athlon64:
case CK_Athlon64SSE3:
case CK_AthlonFX:
@@ -1444,6 +1554,9 @@ public:
case CK_Opteron:
case CK_OpteronSSE3:
case CK_AMDFAM10:
+ case CK_BTVER1:
+ case CK_BDVER1:
+ case CK_BDVER2:
case CK_x86_64:
return true;
}
@@ -1465,6 +1578,12 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
Features["sse4a"] = false;
Features["aes"] = false;
Features["avx"] = false;
+ Features["avx2"] = false;
+ Features["lzcnt"] = false;
+ Features["bmi"] = false;
+ Features["bmi2"] = false;
+ Features["popcnt"] = false;
+ Features["fma4"] = false;
// FIXME: This *really* should not be here.
@@ -1509,8 +1628,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
break;
case CK_Penryn:
setFeatureEnabled(Features, "mmx", true);
- setFeatureEnabled(Features, "sse4", true);
- Features["sse42"] = false;
+ setFeatureEnabled(Features, "sse4.1", true);
break;
case CK_Atom:
setFeatureEnabled(Features, "mmx", true);
@@ -1528,6 +1646,15 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabled(Features, "aes", true);
//setFeatureEnabled(Features, "avx", true);
break;
+ case CK_CoreAVX2:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "aes", true);
+ setFeatureEnabled(Features, "lzcnt", true);
+ setFeatureEnabled(Features, "bmi", true);
+ setFeatureEnabled(Features, "bmi2", true);
+ //setFeatureEnabled(Features, "avx2", true);
+ break;
case CK_K6:
case CK_WinChipC6:
setFeatureEnabled(Features, "mmx", true);
@@ -1567,6 +1694,15 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
setFeatureEnabled(Features, "sse4a", true);
setFeatureEnabled(Features, "3dnowa", true);
break;
+ case CK_BTVER1:
+ setFeatureEnabled(Features, "ssse3", true);
+ setFeatureEnabled(Features, "sse4a", true);
+ case CK_BDVER1:
+ case CK_BDVER2:
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "sse4a", true);
+ setFeatureEnabled(Features, "aes", true);
+ break;
case CK_C3_2:
setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse", true);
@@ -1575,7 +1711,7 @@ void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
}
bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const {
// FIXME: This *really* should not be here. We need some way of translating
// options into llvm subtarget features.
@@ -1600,7 +1736,8 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["ssse3"] = true;
else if (Name == "sse4" || Name == "sse4.2")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
- Features["ssse3"] = Features["sse41"] = Features["sse42"] = true;
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = true;
else if (Name == "sse4.1")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = true;
@@ -1613,21 +1750,39 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else if (Name == "avx")
Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] =
- Features["avx"] = true;
+ Features["popcnt"] = Features["avx"] = true;
+ else if (Name == "avx2")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["avx2"] = true;
+ else if (Name == "fma4")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["fma4"] = true;
else if (Name == "sse4a")
- Features["mmx"] = Features["sse4a"] = true;
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["lzcnt"] = Features["popcnt"] = Features["sse4a"] = true;
+ else if (Name == "lzcnt")
+ Features["lzcnt"] = true;
+ else if (Name == "bmi")
+ Features["bmi"] = true;
+ else if (Name == "bmi2")
+ Features["bmi2"] = true;
+ else if (Name == "popcnt")
+ Features["popcnt"] = true;
} else {
if (Name == "mmx")
Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = false;
else if (Name == "sse")
Features["sse"] = Features["sse2"] = Features["sse3"] =
- Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["sse4a"] = false;
else if (Name == "sse2")
Features["sse2"] = Features["sse3"] = Features["ssse3"] =
- Features["sse41"] = Features["sse42"] = false;
+ Features["sse41"] = Features["sse42"] = Features["sse4a"] = false;
else if (Name == "sse3")
Features["sse3"] = Features["ssse3"] = Features["sse41"] =
- Features["sse42"] = false;
+ Features["sse42"] = Features["sse4a"] = false;
else if (Name == "ssse3")
Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
else if (Name == "sse4" || Name == "sse4.1")
@@ -1641,9 +1796,21 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
else if (Name == "aes")
Features["aes"] = false;
else if (Name == "avx")
- Features["avx"] = false;
+ Features["avx"] = Features["avx2"] = Features["fma4"] = false;
+ else if (Name == "avx2")
+ Features["avx2"] = false;
else if (Name == "sse4a")
Features["sse4a"] = false;
+ else if (Name == "lzcnt")
+ Features["lzcnt"] = false;
+ else if (Name == "bmi")
+ Features["bmi"] = false;
+ else if (Name == "bmi2")
+ Features["bmi2"] = false;
+ else if (Name == "popcnt")
+ Features["popcnt"] = false;
+ else if (Name == "fma4")
+ Features["fma4"] = false;
}
return true;
@@ -1658,20 +1825,42 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
if (Features[i][0] == '-')
continue;
- if (Features[i].substr(1) == "aes") {
+ StringRef Feature = StringRef(Features[i]).substr(1);
+
+ if (Feature == "aes") {
HasAES = true;
continue;
}
- // FIXME: Not sure yet how to treat AVX in regard to SSE levels.
- // For now let it be enabled together with other SSE levels.
- if (Features[i].substr(1) == "avx") {
- HasAVX = true;
+ if (Feature == "lzcnt") {
+ HasLZCNT = true;
+ continue;
+ }
+
+ if (Feature == "bmi") {
+ HasBMI = true;
+ continue;
+ }
+
+ if (Feature == "bmi2") {
+ HasBMI2 = true;
+ continue;
+ }
+
+ if (Feature == "popcnt") {
+ HasPOPCNT = true;
+ continue;
+ }
+
+ if (Feature == "fma4") {
+ HasFMA4 = true;
continue;
}
assert(Features[i][0] == '+' && "Invalid target feature!");
- X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Features[i].substr(1))
+ X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
+ .Case("avx2", AVX2)
+ .Case("avx", AVX)
.Case("sse42", SSE42)
.Case("sse41", SSE41)
.Case("ssse3", SSSE3)
@@ -1682,7 +1871,7 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
SSELevel = std::max(SSELevel, Level);
MMX3DNowEnum ThreeDNowLevel =
- llvm::StringSwitch<MMX3DNowEnum>(Features[i].substr(1))
+ llvm::StringSwitch<MMX3DNowEnum>(Feature)
.Case("3dnowa", AMD3DNowAthlon)
.Case("3dnow", AMD3DNow)
.Case("mmx", MMX)
@@ -1705,8 +1894,10 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
if (PointerWidth == 64) {
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
+ if (getLongWidth() == 64) {
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+ }
Builder.defineMacro("__amd64__");
Builder.defineMacro("__amd64");
Builder.defineMacro("__x86_64");
@@ -1729,9 +1920,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_WinChipC6:
case CK_WinChip2:
case CK_C3:
- Builder.defineMacro("__i486");
- Builder.defineMacro("__i486__");
- Builder.defineMacro("__tune_i486__");
+ defineCPUMacros(Builder, "i486");
break;
case CK_PentiumMMX:
Builder.defineMacro("__pentium_mmx__");
@@ -1739,12 +1928,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
// Fallthrough
case CK_i586:
case CK_Pentium:
- Builder.defineMacro("__i586");
- Builder.defineMacro("__i586__");
- Builder.defineMacro("__tune_i586__");
- Builder.defineMacro("__pentium");
- Builder.defineMacro("__pentium__");
- Builder.defineMacro("__tune_pentium__");
+ defineCPUMacros(Builder, "i586");
+ defineCPUMacros(Builder, "pentium");
break;
case CK_Pentium3:
case CK_Pentium3M:
@@ -1768,34 +1953,25 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
case CK_Pentium4:
case CK_Pentium4M:
- Builder.defineMacro("__pentium4");
- Builder.defineMacro("__pentium4__");
- Builder.defineMacro("__tune_pentium4__");
+ defineCPUMacros(Builder, "pentium4");
break;
case CK_Yonah:
case CK_Prescott:
case CK_Nocona:
- Builder.defineMacro("__nocona");
- Builder.defineMacro("__nocona__");
- Builder.defineMacro("__tune_nocona__");
+ defineCPUMacros(Builder, "nocona");
break;
case CK_Core2:
case CK_Penryn:
- Builder.defineMacro("__core2");
- Builder.defineMacro("__core2__");
- Builder.defineMacro("__tune_core2__");
+ defineCPUMacros(Builder, "core2");
break;
case CK_Atom:
- Builder.defineMacro("__atom");
- Builder.defineMacro("__atom__");
- Builder.defineMacro("__tune_atom__");
+ defineCPUMacros(Builder, "atom");
break;
case CK_Corei7:
case CK_Corei7AVX:
case CK_CoreAVXi:
- Builder.defineMacro("__corei7");
- Builder.defineMacro("__corei7__");
- Builder.defineMacro("__tune_corei7__");
+ case CK_CoreAVX2:
+ defineCPUMacros(Builder, "corei7");
break;
case CK_K6_2:
Builder.defineMacro("__k6_2__");
@@ -1811,18 +1987,14 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Fallthrough
case CK_K6:
- Builder.defineMacro("__k6");
- Builder.defineMacro("__k6__");
- Builder.defineMacro("__tune_k6__");
+ defineCPUMacros(Builder, "k6");
break;
case CK_Athlon:
case CK_AthlonThunderbird:
case CK_Athlon4:
case CK_AthlonXP:
case CK_AthlonMP:
- Builder.defineMacro("__athlon");
- Builder.defineMacro("__athlon__");
- Builder.defineMacro("__tune_athlon__");
+ defineCPUMacros(Builder, "athlon");
if (SSELevel != NoSSE) {
Builder.defineMacro("__athlon_sse__");
Builder.defineMacro("__tune_athlon_sse__");
@@ -1836,19 +2008,22 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Athlon64:
case CK_Athlon64SSE3:
case CK_AthlonFX:
- Builder.defineMacro("__k8");
- Builder.defineMacro("__k8__");
- Builder.defineMacro("__tune_k8__");
+ defineCPUMacros(Builder, "k8");
break;
case CK_AMDFAM10:
- Builder.defineMacro("__amdfam10");
- Builder.defineMacro("__amdfam10__");
- Builder.defineMacro("__tune_amdfam10__");
+ defineCPUMacros(Builder, "amdfam10");
+ break;
+ case CK_BTVER1:
+ defineCPUMacros(Builder, "btver1");
+ break;
+ case CK_BDVER1:
+ defineCPUMacros(Builder, "bdver1");
+ break;
+ case CK_BDVER2:
+ defineCPUMacros(Builder, "bdver2");
break;
case CK_Geode:
- Builder.defineMacro("__geode");
- Builder.defineMacro("__geode__");
- Builder.defineMacro("__tune_geode__");
+ defineCPUMacros(Builder, "geode");
break;
}
@@ -1864,11 +2039,27 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasAES)
Builder.defineMacro("__AES__");
- if (HasAVX)
- Builder.defineMacro("__AVX__");
+ if (HasLZCNT)
+ Builder.defineMacro("__LZCNT__");
+
+ if (HasBMI)
+ Builder.defineMacro("__BMI__");
+
+ if (HasBMI2)
+ Builder.defineMacro("__BMI2__");
+
+ if (HasPOPCNT)
+ Builder.defineMacro("__POPCNT__");
+
+ if (HasFMA4)
+ Builder.defineMacro("__FMA4__");
// Each case falls through to the previous one here.
switch (SSELevel) {
+ case AVX2:
+ Builder.defineMacro("__AVX2__");
+ case AVX:
+ Builder.defineMacro("__AVX__");
case SSE42:
Builder.defineMacro("__SSE4_2__");
case SSE41:
@@ -1889,6 +2080,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.MicrosoftExt && PointerWidth == 32) {
switch (SSELevel) {
+ case AVX2:
+ case AVX:
case SSE42:
case SSE41:
case SSSE3:
@@ -1917,6 +2110,30 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
}
+bool X86TargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("aes", HasAES)
+ .Case("avx", SSELevel >= AVX)
+ .Case("avx2", SSELevel >= AVX2)
+ .Case("bmi", HasBMI)
+ .Case("bmi2", HasBMI2)
+ .Case("fma4", HasFMA4)
+ .Case("lzcnt", HasLZCNT)
+ .Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
+ .Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
+ .Case("mmx", MMX3DNowLevel >= MMX)
+ .Case("popcnt", HasPOPCNT)
+ .Case("sse", SSELevel >= SSE1)
+ .Case("sse2", SSELevel >= SSE2)
+ .Case("sse3", SSELevel >= SSE3)
+ .Case("ssse3", SSELevel >= SSSE3)
+ .Case("sse41", SSELevel >= SSE41)
+ .Case("sse42", SSELevel >= SSE42)
+ .Case("x86", true)
+ .Case("x86_32", PointerWidth == 32)
+ .Case("x86_64", PointerWidth == 64)
+ .Default(false);
+}
bool
X86TargetInfo::validateAsmConstraint(const char *&Name,
@@ -1959,7 +2176,6 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
// x86_64 instructions.
return true;
}
- return false;
}
@@ -1992,6 +2208,7 @@ public:
DoubleAlign = LongLongAlign = 32;
LongDoubleWidth = 96;
LongDoubleAlign = 32;
+ SuitableAlign = 128;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
"a0:0:64-f80:32:32-n8:16:32-S128";
@@ -2023,6 +2240,20 @@ public:
} // end anonymous namespace
namespace {
+class NetBSDI386TargetInfo : public NetBSDTargetInfo<X86_32TargetInfo> {
+public:
+ NetBSDI386TargetInfo(const std::string &triple) :
+ NetBSDTargetInfo<X86_32TargetInfo>(triple) {
+ }
+
+ virtual unsigned getFloatEvalMethod() const {
+ // NetBSD defaults to "double" rounding
+ return 1;
+ }
+};
+} // end anonymous namespace
+
+namespace {
class OpenBSDI386TargetInfo : public OpenBSDTargetInfo<X86_32TargetInfo> {
public:
OpenBSDI386TargetInfo(const std::string& triple) :
@@ -2041,6 +2272,7 @@ public:
DarwinTargetInfo<X86_32TargetInfo>(triple) {
LongDoubleWidth = 128;
LongDoubleAlign = 128;
+ SuitableAlign = 128;
SizeType = UnsignedLong;
IntPtrType = SignedLong;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
@@ -2233,6 +2465,7 @@ public:
LongDoubleAlign = 128;
LargeArrayMinWidth = 128;
LargeArrayAlign = 128;
+ SuitableAlign = 128;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
Int64Type = SignedLong;
@@ -2245,6 +2478,9 @@ public:
// Use fpret only for long double.
RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
+ // Use fp2ret for _Complex long double.
+ ComplexLongDoubleUsesFP2Ret = true;
+
// x86-64 has atomics up to 16 bytes.
// FIXME: Once the backend is fixed, increase MaxAtomicInlineWidth to 128
// on CPUs with cmpxchg16b
@@ -2399,8 +2635,11 @@ public:
ARMTargetInfo(const std::string &TripleStr)
: TargetInfo(TripleStr), ABI("aapcs-linux"), CPU("arm1136j-s")
{
+ BigEndian = false;
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
+ // AAPCS 7.1.1, ARM-Linux ABI 2.4: type of wchar_t is unsigned int.
+ WCharType = UnsignedInt;
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
@@ -2426,6 +2665,12 @@ public:
// ARM has atomics up to 8 bytes
// FIXME: Set MaxAtomicInlineWidth if we have the feature v6e
MaxAtomicPromoteWidth = 64;
+
+ // Do force alignment of members that follow zero length bitfields. If
+ // the alignment of the zero-length bitfield is greater than the member
+ // that follows it, `bar', `bar' will be aligned as the type of the
+ // zero length bitfield.
+ UseZeroLengthBitfieldAlignment = true;
}
virtual const char *getABI() const { return ABI.c_str(); }
virtual bool setABI(const std::string &Name) {
@@ -2436,19 +2681,16 @@ public:
// FIXME: We need support for -meabi... we could just mangle it into the
// name.
if (Name == "apcs-gnu") {
- DoubleAlign = LongLongAlign = LongDoubleAlign = 32;
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
SizeType = UnsignedLong;
+ // Revert to using SignedInt on apcs-gnu to comply with existing behaviour.
+ WCharType = SignedInt;
+
// Do not respect the alignment of bit-field types when laying out
// structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
UseBitFieldTypeAlignment = false;
- /// Do force alignment of members that follow zero length bitfields. If
- /// the alignment of the zero-length bitfield is greater than the member
- /// that follows it, `bar', `bar' will be aligned as the type of the
- /// zero length bitfield.
- UseZeroLengthBitfieldAlignment = true;
-
/// gcc forces the alignment to 4 bytes, regardless of the type of the
/// zero length bitfield. This corresponds to EMPTY_FIELD_BOUNDARY in
/// gcc.
@@ -2485,10 +2727,11 @@ public:
}
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const {
if (Name == "soft-float" || Name == "soft-float-abi" ||
- Name == "vfp2" || Name == "vfp3" || Name == "neon") {
+ Name == "vfp2" || Name == "vfp3" || Name == "neon" || Name == "d16" ||
+ Name == "neonfp") {
Features[Name] = Enabled;
} else
return false;
@@ -2522,6 +2765,15 @@ public:
Features.erase(it);
}
+ virtual bool hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("arm", true)
+ .Case("softfloat", SoftFloat)
+ .Case("thumb", IsThumb)
+ .Case("neon", FPU == NeonFPU && !SoftFloat &&
+ StringRef(getCPUDefineSuffix(CPU)).startswith("7"))
+ .Default(false);
+ }
static const char *getCPUDefineSuffix(StringRef Name) {
return llvm::StringSwitch<const char*>(Name)
.Cases("arm8", "arm810", "4")
@@ -2540,6 +2792,7 @@ public:
.Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
.Cases("cortex-a8", "cortex-a9", "7A")
.Case("cortex-m3", "7M")
+ .Case("cortex-m4", "7M")
.Case("cortex-m0", "6M")
.Default(0);
}
@@ -2606,6 +2859,7 @@ public:
Records = BuiltinInfo;
NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin;
}
+ virtual bool isCLZForZeroUndef() const { return false; }
virtual const char *getVAListDeclaration() const {
return "typedef void* __builtin_va_list;";
}
@@ -2729,7 +2983,6 @@ const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
};
} // end anonymous namespace.
-
namespace {
class DarwinARMTargetInfo :
public DarwinTargetInfo<ARMTargetInfo> {
@@ -2750,6 +3003,154 @@ public:
};
} // end anonymous namespace.
+
+namespace {
+// Hexagon abstract base class
+class HexagonTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
+public:
+ HexagonTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
+ DescriptionString = ("e-p:32:32:32-"
+ "i64:64:64-i32:32:32-"
+ "i16:16:16-i1:32:32-a:0:0");
+
+ // {} in inline assembly are packet specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ return true;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "hexagon";
+ }
+
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual const char *getClobbers() const {
+ return "";
+ }
+
+ static const char *getHexagonCPUSuffix(StringRef Name) {
+ return llvm::StringSwitch<const char*>(Name)
+ .Case("hexagonv2", "2")
+ .Case("hexagonv3", "3")
+ .Case("hexagonv4", "4")
+ .Default(0);
+ }
+
+ virtual bool setCPU(const std::string &Name) {
+ if (!getHexagonCPUSuffix(Name))
+ return false;
+
+ CPU = Name;
+ return true;
+ }
+};
+
+void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("qdsp6");
+ Builder.defineMacro("__qdsp6", "1");
+ Builder.defineMacro("__qdsp6__", "1");
+
+ Builder.defineMacro("hexagon");
+ Builder.defineMacro("__hexagon", "1");
+ Builder.defineMacro("__hexagon__", "1");
+
+ if(CPU == "hexagonv1") {
+ Builder.defineMacro("__HEXAGON_V1__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "1");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V1__");
+ Builder.defineMacro("__QDSP6_ARCH__", "1");
+ }
+ }
+ else if(CPU == "hexagonv2") {
+ Builder.defineMacro("__HEXAGON_V2__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "2");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V2__");
+ Builder.defineMacro("__QDSP6_ARCH__", "2");
+ }
+ }
+ else if(CPU == "hexagonv3") {
+ Builder.defineMacro("__HEXAGON_V3__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "3");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V3__");
+ Builder.defineMacro("__QDSP6_ARCH__", "3");
+ }
+ }
+ else if(CPU == "hexagonv4") {
+ Builder.defineMacro("__HEXAGON_V4__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "4");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V4__");
+ Builder.defineMacro("__QDSP6_ARCH__", "4");
+ }
+ }
+}
+
+const char * const HexagonTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "p0", "p1", "p2", "p3",
+ "sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp"
+};
+
+void HexagonTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+
+const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
+ { { "sp" }, "r29" },
+ { { "fp" }, "r30" },
+ { { "lr" }, "r31" },
+ };
+
+void HexagonTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+
+
+const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsHexagon.def"
+};
+}
+
+
namespace {
class SparcV8TargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
@@ -2758,11 +3159,12 @@ class SparcV8TargetInfo : public TargetInfo {
public:
SparcV8TargetInfo(const std::string& triple) : TargetInfo(triple) {
// FIXME: Support Sparc quad-precision long double?
+ BigEndian = false;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
}
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
- const std::string &Name,
+ StringRef Name,
bool Enabled) const {
if (Name == "soft-float")
Features[Name] = Enabled;
@@ -2786,6 +3188,14 @@ public:
if (SoftFloat)
Builder.defineMacro("SOFT_FLOAT", "1");
}
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("softfloat", SoftFloat)
+ .Case("sparc", true)
+ .Default(false);
+ }
+
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
// FIXME: Implement!
@@ -2887,11 +3297,13 @@ namespace {
static const char * const GCCRegNames[];
public:
MSP430TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
TLSSupported = false;
IntWidth = 16; IntAlign = 16;
LongWidth = 32; LongLongWidth = 64;
LongAlign = LongLongAlign = 16;
PointerWidth = 16; PointerAlign = 16;
+ SuitableAlign = 16;
SizeType = UnsignedInt;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
@@ -2912,6 +3324,9 @@ namespace {
Records = 0;
NumRecords = 0;
}
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "msp430";
+ }
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const;
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
@@ -2947,140 +3362,6 @@ namespace {
}
}
-
-namespace {
- class SystemZTargetInfo : public TargetInfo {
- static const char * const GCCRegNames[];
- public:
- SystemZTargetInfo(const std::string& triple) : TargetInfo(triple) {
- TLSSupported = false;
- IntWidth = IntAlign = 32;
- LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64;
- PointerWidth = PointerAlign = 64;
- DescriptionString = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16-n32:64";
- }
- virtual void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- Builder.defineMacro("__s390__");
- Builder.defineMacro("__s390x__");
- }
- virtual void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const {
- // FIXME: Implement.
- Records = 0;
- NumRecords = 0;
- }
-
- virtual void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const;
- virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- // No aliases.
- Aliases = 0;
- NumAliases = 0;
- }
- virtual bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const {
- // FIXME: implement
- return true;
- }
- virtual const char *getClobbers() const {
- // FIXME: Is this really right?
- return "";
- }
- virtual const char *getVAListDeclaration() const {
- // FIXME: implement
- return "typedef char* __builtin_va_list;";
- }
- };
-
- const char * const SystemZTargetInfo::GCCRegNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- };
-
- void SystemZTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
- }
-}
-
-namespace {
- class BlackfinTargetInfo : public TargetInfo {
- static const char * const GCCRegNames[];
- public:
- BlackfinTargetInfo(const std::string& triple) : TargetInfo(triple) {
- TLSSupported = false;
- DoubleAlign = 32;
- LongLongAlign = 32;
- LongDoubleAlign = 32;
- DescriptionString = "e-p:32:32-i64:32-f64:32-n32";
- }
-
- virtual void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- DefineStd(Builder, "bfin", Opts);
- DefineStd(Builder, "BFIN", Opts);
- Builder.defineMacro("__ADSPBLACKFIN__");
- // FIXME: This one is really dependent on -mcpu
- Builder.defineMacro("__ADSPLPBLACKFIN__");
- // FIXME: Add cpu-dependent defines and __SILICON_REVISION__
- }
-
- virtual void getTargetBuiltins(const Builtin::Info *&Records,
- unsigned &NumRecords) const {
- // FIXME: Implement.
- Records = 0;
- NumRecords = 0;
- }
-
- virtual void getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const;
-
- virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
- unsigned &NumAliases) const {
- // No aliases.
- Aliases = 0;
- NumAliases = 0;
- }
-
- virtual bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const {
- if (strchr("adzDWeABbvfcCtukxywZY", Name[0])) {
- Info.setAllowsRegister();
- return true;
- }
- return false;
- }
-
- virtual const char *getClobbers() const {
- return "";
- }
-
- virtual const char *getVAListDeclaration() const {
- return "typedef char* __builtin_va_list;";
- }
- };
-
- const char * const BlackfinTargetInfo::GCCRegNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "p0", "p1", "p2", "p3", "p4", "p5", "sp", "fp",
- "i0", "i1", "i2", "i3", "b0", "b1", "b2", "b3",
- "l0", "l1", "l2", "l3", "m0", "m1", "m2", "m3",
- "a0", "a1", "cc",
- "rets", "reti", "retx", "retn", "rete", "astat", "seqstat", "usp",
- "argp", "lt0", "lt1", "lc0", "lc1", "lb0", "lb1"
- };
-
- void BlackfinTargetInfo::getGCCRegNames(const char * const *&Names,
- unsigned &NumNames) const {
- Names = GCCRegNames;
- NumNames = llvm::array_lengthof(GCCRegNames);
- }
-}
-
namespace {
// LLVM and Clang cannot be used directly to output native binaries for
@@ -3107,6 +3388,7 @@ namespace {
IntAlign = 32;
LongAlign = LongLongAlign = 32;
PointerAlign = 32;
+ SuitableAlign = 32;
SizeType = UnsignedInt;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
@@ -3134,6 +3416,10 @@ namespace {
Builder.defineMacro("__TCE__");
Builder.defineMacro("__TCE_V1__");
}
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "tce";
+ }
+
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {}
virtual const char *getClobbers() const {
@@ -3156,14 +3442,22 @@ namespace {
namespace {
class MipsTargetInfoBase : public TargetInfo {
std::string CPU;
+ bool SoftFloat;
+ bool SingleFloat;
+
protected:
std::string ABI;
+
public:
- MipsTargetInfoBase(const std::string& triple, const std::string& ABIStr)
- : TargetInfo(triple), ABI(ABIStr) {
- SizeType = UnsignedInt;
- PtrDiffType = SignedInt;
- }
+ MipsTargetInfoBase(const std::string& triple,
+ const std::string& ABIStr,
+ const std::string& CPUStr)
+ : TargetInfo(triple),
+ CPU(CPUStr),
+ SoftFloat(false), SingleFloat(false),
+ ABI(ABIStr)
+ {}
+
virtual const char *getABI() const { return ABI.c_str(); }
virtual bool setABI(const std::string &Name) = 0;
virtual bool setCPU(const std::string &Name) {
@@ -3174,28 +3468,50 @@ public:
Features[ABI] = true;
Features[CPU] = true;
}
+
virtual void getArchDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const = 0;
+ MacroBuilder &Builder) const {
+ if (SoftFloat)
+ Builder.defineMacro("__mips_soft_float", Twine(1));
+ else if (SingleFloat)
+ Builder.defineMacro("__mips_single_float", Twine(1));
+ else if (!SoftFloat && !SingleFloat)
+ Builder.defineMacro("__mips_hard_float", Twine(1));
+ else
+ llvm_unreachable("Invalid float ABI for Mips.");
+
+ Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
+ Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
+ Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
+ }
+
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const = 0;
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
// FIXME: Implement!
}
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "mips";
+ }
virtual const char *getVAListDeclaration() const {
return "typedef void* __builtin_va_list;";
}
virtual void getGCCRegNames(const char * const *&Names,
unsigned &NumNames) const {
static const char * const GCCRegNames[] = {
+ // CPU register names
+ // Must match second column of GCCRegAliases
"$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
"$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
- "$24", "$25", "$26", "$27", "$28", "$sp", "$fp", "$31",
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
+ // Floating point register names
"$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
"$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
"$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
"$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
+ // Hi/lo and condition register names
"hi", "lo", "", "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4",
"$fcc5","$fcc6","$fcc7"
};
@@ -3208,26 +3524,64 @@ public:
TargetInfo::ConstraintInfo &Info) const {
switch (*Name) {
default:
+ return false;
+
case 'r': // CPU registers.
case 'd': // Equivalent to "r" unless generating MIPS16 code.
case 'y': // Equivalent to "r", backwards compatibility only.
case 'f': // floating-point registers.
+ case 'c': // $25 for indirect jumps
+ case 'l': // lo register
+ case 'x': // hilo register pair
Info.setAllowsRegister();
return true;
}
- return false;
}
virtual const char *getClobbers() const {
// FIXME: Implement!
return "";
}
+
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if (Name == "soft-float" || Name == "single-float") {
+ Features[Name] = Enabled;
+ return true;
+ }
+ return false;
+ }
+
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ SoftFloat = false;
+ SingleFloat = false;
+
+ for (std::vector<std::string>::iterator it = Features.begin(),
+ ie = Features.end(); it != ie; ++it) {
+ if (*it == "+single-float") {
+ SingleFloat = true;
+ break;
+ }
+
+ if (*it == "+soft-float") {
+ SoftFloat = true;
+ // This option is front-end specific.
+ // Do not need to pass it to the backend.
+ Features.erase(it);
+ break;
+ }
+ }
+ }
};
class Mips32TargetInfoBase : public MipsTargetInfoBase {
public:
Mips32TargetInfoBase(const std::string& triple) :
- MipsTargetInfoBase(triple, "o32") {}
+ MipsTargetInfoBase(triple, "o32", "mips32") {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ }
virtual bool setABI(const std::string &Name) {
if ((Name == "o32") || (Name == "eabi")) {
ABI = Name;
@@ -3237,6 +3591,8 @@ public:
}
virtual void getArchDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getArchDefines(Opts, Builder);
+
if (ABI == "o32") {
Builder.defineMacro("__mips_o32");
Builder.defineMacro("_ABIO32", "1");
@@ -3278,8 +3634,8 @@ public:
{ { "k0" }, "$26" },
{ { "k1" }, "$27" },
{ { "gp" }, "$28" },
- { { "sp" }, "$29" },
- { { "fp" }, "$30" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
{ { "ra" }, "$31" }
};
Aliases = GCCRegAliases;
@@ -3307,6 +3663,7 @@ public:
class Mips32ELTargetInfo : public Mips32TargetInfoBase {
public:
Mips32ELTargetInfo(const std::string& triple) : Mips32TargetInfoBase(triple) {
+ BigEndian = false;
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
}
@@ -3325,17 +3682,32 @@ class Mips64TargetInfoBase : public MipsTargetInfoBase {
virtual void SetDescriptionString(const std::string &Name) = 0;
public:
Mips64TargetInfoBase(const std::string& triple) :
- MipsTargetInfoBase(triple, "n64") {}
+ MipsTargetInfoBase(triple, "n64", "mips64") {
+ LongWidth = LongAlign = 64;
+ PointerWidth = PointerAlign = 64;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ SuitableAlign = 128;
+ }
virtual bool setABI(const std::string &Name) {
SetDescriptionString(Name);
- if ((Name == "n32") || (Name == "n64")) {
- ABI = Name;
- return true;
- } else
+
+ if (Name != "n32" && Name != "n64")
return false;
+
+ ABI = Name;
+
+ if (Name == "n32") {
+ LongWidth = LongAlign = 32;
+ PointerWidth = PointerAlign = 32;
+ }
+
+ return true;
}
virtual void getArchDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getArchDefines(Opts, Builder);
+
if (ABI == "n32") {
Builder.defineMacro("__mips_n32");
Builder.defineMacro("_ABIN32", "2");
@@ -3380,8 +3752,8 @@ public:
{ { "k0" }, "$26" },
{ { "k1" }, "$27" },
{ { "gp" }, "$28" },
- { { "sp" }, "$29" },
- { { "fp" }, "$30" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
{ { "ra" }, "$31" }
};
Aliases = GCCRegAliases;
@@ -3394,13 +3766,15 @@ class Mips64EBTargetInfo : public Mips64TargetInfoBase {
// Change DescriptionString only if ABI is n32.
if (Name == "n32")
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
}
public:
Mips64EBTargetInfo(const std::string& triple) : Mips64TargetInfoBase(triple) {
// Default ABI is n64.
DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -3418,13 +3792,16 @@ class Mips64ELTargetInfo : public Mips64TargetInfoBase {
// Change DescriptionString only if ABI is n32.
if (Name == "n32")
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128"
+ "-v64:64:64-n32";
}
public:
Mips64ELTargetInfo(const std::string& triple) : Mips64TargetInfoBase(triple) {
- // Default ABI is n64.
+ // Default ABI is n64.
+ BigEndian = false;
DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
}
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
@@ -3442,6 +3819,7 @@ namespace {
class PNaClTargetInfo : public TargetInfo {
public:
PNaClTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
this->UserLabelPrefix = "";
this->LongAlign = 32;
this->LongWidth = 32;
@@ -3477,9 +3855,13 @@ public:
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
+ Builder.defineMacro("__LITTLE_ENDIAN__");
Builder.defineMacro("__native_client__");
getArchDefines(Opts, Builder);
}
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "pnacl";
+ }
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
}
@@ -3526,6 +3908,9 @@ static TargetInfo *AllocateTarget(const std::string &T) {
default:
return NULL;
+ case llvm::Triple::hexagon:
+ return new HexagonTargetInfo(T);
+
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (Triple.isOSDarwin())
@@ -3544,11 +3929,6 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new ARMTargetInfo(T);
}
- case llvm::Triple::bfin:
- if ( os == llvm::Triple::RTEMS )
- return new RTEMSTargetInfo<BlackfinTargetInfo>(T);
- return new BlackfinTargetInfo(T);
-
case llvm::Triple::msp430:
return new MSP430TargetInfo(T);
@@ -3676,9 +4056,6 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::cellspu:
return new PS3SPUTargetInfo<PPC64TargetInfo>(T);
- case llvm::Triple::systemz:
- return new SystemZTargetInfo(T);
-
case llvm::Triple::tce:
return new TCETargetInfo(T);
@@ -3694,7 +4071,7 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::DragonFly:
return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<X86_32TargetInfo>(T);
+ return new NetBSDI386TargetInfo(T);
case llvm::Triple::OpenBSD:
return new OpenBSDI386TargetInfo(T);
case llvm::Triple::FreeBSD:
@@ -3753,7 +4130,7 @@ TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
llvm::Triple Triple(Opts.Triple);
// Construct the target
- llvm::OwningPtr<TargetInfo> Target(AllocateTarget(Triple.str()));
+ OwningPtr<TargetInfo> Target(AllocateTarget(Triple.str()));
if (!Target) {
Diags.Report(diag::err_target_unknown_triple) << Triple.str();
return 0;
@@ -3821,8 +4198,7 @@ TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Opts.Features.clear();
for (llvm::StringMap<bool>::const_iterator it = Features.begin(),
ie = Features.end(); it != ie; ++it)
- Opts.Features.push_back(std::string(it->second ? "+" : "-") +
- it->first().str());
+ Opts.Features.push_back((it->second ? "+" : "-") + it->first().str());
Target->HandleTargetFeatures(Opts.Features);
return Target.take();
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
index 526b2b4..36138ac 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -19,7 +19,7 @@
#include <cstdlib>
namespace clang {
-
+
std::string getClangRepositoryPath() {
#if defined(CLANG_REPOSITORY_STRING)
return CLANG_REPOSITORY_STRING;
@@ -32,7 +32,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/tags/RELEASE_30/final/lib/Basic/Version.cpp $");
+ static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
@@ -50,6 +50,23 @@ std::string getClangRepositoryPath() {
#endif
}
+std::string getLLVMRepositoryPath() {
+#ifdef LLVM_REPOSITORY
+ StringRef URL(LLVM_REPOSITORY);
+#else
+ StringRef URL("");
+#endif
+
+ // Trim path prefix off, assuming path came from standard llvm path.
+ // Leave "llvm/" prefix to distinguish the following llvm revision from the
+ // clang revision.
+ size_t Start = URL.find("llvm/");
+ if (Start != StringRef::npos)
+ URL = URL.substr(Start);
+
+ return URL;
+}
+
std::string getClangRevision() {
#ifdef SVN_REVISION
return SVN_REVISION;
@@ -58,29 +75,50 @@ std::string getClangRevision() {
#endif
}
+std::string getLLVMRevision() {
+#ifdef LLVM_REVISION
+ return LLVM_REVISION;
+#else
+ return "";
+#endif
+}
+
std::string getClangFullRepositoryVersion() {
std::string buf;
llvm::raw_string_ostream OS(buf);
std::string Path = getClangRepositoryPath();
std::string Revision = getClangRevision();
- if (!Path.empty())
- OS << Path;
- if (!Revision.empty()) {
+ if (!Path.empty() || !Revision.empty()) {
+ OS << '(';
if (!Path.empty())
- OS << ' ';
- OS << Revision;
+ OS << Path;
+ if (!Revision.empty()) {
+ if (!Path.empty())
+ OS << ' ';
+ OS << Revision;
+ }
+ OS << ')';
+ }
+ // Support LLVM in a separate repository.
+ std::string LLVMRev = getLLVMRevision();
+ if (!LLVMRev.empty() && LLVMRev != Revision) {
+ OS << " (";
+ std::string LLVMRepo = getLLVMRepositoryPath();
+ if (!LLVMRepo.empty())
+ OS << LLVMRepo << ' ';
+ OS << LLVMRev << ')';
}
return OS.str();
}
-
+
std::string getClangFullVersion() {
std::string buf;
llvm::raw_string_ostream OS(buf);
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
- OS << "clang version " CLANG_VERSION_STRING " ("
- << getClangFullRepositoryVersion() << ')';
+ OS << "clang version " CLANG_VERSION_STRING " "
+ << getClangFullRepositoryVersion();
#ifdef CLANG_VENDOR_SUFFIX
OS << CLANG_VENDOR_SUFFIX;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 1381238..2853bc8 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -42,7 +42,8 @@ namespace clang {
/// type, or by coercing to another specified type stored in
/// 'CoerceToType'). If an offset is specified (in UIntData), then the
/// argument passed is offset by some number of bytes in the memory
- /// representation.
+ /// representation. A dummy argument is emitted before the real argument
+ /// if the specified type stored in "PaddingType" is not zero.
Direct,
/// Extend - Valid only for integer argument types. Same as 'direct'
@@ -69,19 +70,22 @@ namespace clang {
private:
Kind TheKind;
llvm::Type *TypeData;
+ llvm::Type *PaddingType; // Currently allowed only for Direct.
unsigned UIntData;
bool BoolData0;
bool BoolData1;
- ABIArgInfo(Kind K, llvm::Type *TD=0,
- unsigned UI=0, bool B0 = false, bool B1 = false)
- : TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
+ ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
+ bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ : TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
+ BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0) {
- return ABIArgInfo(Direct, T, Offset);
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
+ llvm::Type *Padding = 0) {
+ return ABIArgInfo(Direct, T, Offset, false, false, Padding);
}
static ABIArgInfo getExtend(llvm::Type *T = 0) {
return ABIArgInfo(Extend, T, 0);
@@ -113,6 +117,11 @@ namespace clang {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
return UIntData;
}
+
+ llvm::Type *getPaddingType() const {
+ return PaddingType;
+ }
+
llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
index b9e3ed9..2f44711 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -43,7 +43,7 @@ namespace {
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
const CodeGenOptions &CodeGenOpts;
- const TargetOptions &TargetOpts;
+ const clang::TargetOptions &TargetOpts;
const LangOptions &LangOpts;
Module *TheModule;
@@ -87,7 +87,8 @@ private:
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
- const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M)
: Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
@@ -105,6 +106,11 @@ public:
}
+static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCAPElimPass());
+}
+
static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
if (Builder.OptLevel > 0)
PM.add(createObjCARCExpandPass());
@@ -115,6 +121,16 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
+static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createAddressSanitizerPass());
+}
+
+static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createThreadSanitizerPass());
+}
+
void EmitAssemblyHelper::CreatePasses() {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
@@ -138,10 +154,26 @@ void EmitAssemblyHelper::CreatePasses() {
if (LangOpts.ObjCAutoRefCount) {
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
+ addObjCARCAPElimPass);
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addObjCARCOptPass);
}
-
+
+ if (LangOpts.AddressSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addAddressSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addAddressSanitizerPass);
+ }
+
+ if (LangOpts.ThreadSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addThreadSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addThreadSanitizerPass);
+ }
+
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
@@ -164,7 +196,11 @@ void EmitAssemblyHelper::CreatePasses() {
}
case CodeGenOptions::OnlyAlwaysInlining:
// Respect always_inline.
- PMBuilder.Inliner = createAlwaysInlinerPass();
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
break;
}
@@ -206,35 +242,6 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// being gross, this is also totally broken if we ever care about
// concurrency.
- // Set frame pointer elimination mode.
- if (!CodeGenOpts.DisableFPElim) {
- llvm::NoFramePointerElim = false;
- llvm::NoFramePointerElimNonLeaf = false;
- } else if (CodeGenOpts.OmitLeafFramePointer) {
- llvm::NoFramePointerElim = false;
- llvm::NoFramePointerElimNonLeaf = true;
- } else {
- llvm::NoFramePointerElim = true;
- llvm::NoFramePointerElimNonLeaf = true;
- }
-
- // Set float ABI type.
- if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
- llvm::FloatABIType = llvm::FloatABI::Soft;
- else if (CodeGenOpts.FloatABI == "hard")
- llvm::FloatABIType = llvm::FloatABI::Hard;
- else {
- assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
- llvm::FloatABIType = llvm::FloatABI::Default;
- }
-
- llvm::LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
- llvm::NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
- llvm::NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
- NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- llvm::UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
- llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
-
TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
@@ -255,7 +262,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
CM = llvm::CodeModel::Default;
}
- std::vector<const char *> BackendArgs;
+ SmallVector<const char *, 16> BackendArgs;
BackendArgs.push_back("clang"); // Fake program name.
if (!CodeGenOpts.DebugPass.empty()) {
BackendArgs.push_back("-debug-pass");
@@ -273,7 +280,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
BackendArgs.push_back("-global-merge=false");
BackendArgs.push_back(0);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
- const_cast<char **>(&BackendArgs[0]));
+ BackendArgs.data());
std::string FeaturesStr;
if (TargetOpts.Features.size()) {
@@ -296,8 +303,52 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
RM = llvm::Reloc::DynamicNoPIC;
}
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ llvm::TargetOptions Options;
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = true;
+ } else {
+ Options.NoFramePointerElim = true;
+ Options.NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
+ Options.FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Options.FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ Options.FloatABIType = llvm::FloatABI::Default;
+ }
+
+ Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UseSoftFloat = CodeGenOpts.SoftFloat;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.RealignStack = CodeGenOpts.StackRealignment;
+ Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
+ Options.TrapFuncName = CodeGenOpts.TrapFuncName;
+ Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
- FeaturesStr, RM, CM);
+ FeaturesStr, Options,
+ RM, CM, OptLevel);
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
@@ -305,18 +356,19 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCSaveTempLabels(true);
if (CodeGenOpts.NoDwarf2CFIAsm)
TM->setMCUseCFI(false);
+ if (!CodeGenOpts.NoDwarfDirectoryAsm)
+ TM->setMCUseDwarfDirectory(true);
if (CodeGenOpts.NoExecStack)
TM->setMCNoExecStack(true);
// Create the code generator passes.
PassManager *PM = getCodeGenPasses();
- CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
- switch (CodeGenOpts.OptimizationLevel) {
- default: break;
- case 0: OptLevel = CodeGenOpt::None; break;
- case 3: OptLevel = CodeGenOpt::Aggressive; break;
- }
+ // Add LibraryInfo.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ PM->add(TLI);
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
@@ -331,10 +383,11 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// Add ObjC ARC final-cleanup optimizations. This is done as part of the
// "codegen" passes so that it isn't run multiple times when there is
// inlining happening.
- if (LangOpts.ObjCAutoRefCount)
+ if (LangOpts.ObjCAutoRefCount &&
+ CodeGenOpts.OptimizationLevel > 0)
PM->add(createObjCARCContractPass());
- if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT,
/*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
return false;
@@ -397,7 +450,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const CodeGenOptions &CGOpts,
- const TargetOptions &TOpts,
+ const clang::TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M,
BackendAction Action, raw_ostream *OS) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index 9694953..27bb4ef 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -25,13 +25,15 @@
using namespace clang;
using namespace CodeGen;
-CGBlockInfo::CGBlockInfo(const BlockExpr *blockExpr, const char *N)
- : Name(N), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), StructureType(0), Block(blockExpr) {
+CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ DominatingIP(0) {
- // Skip asm prefix, if any.
- if (Name && Name[0] == '\01')
- ++Name;
+ // Skip asm prefix, if any. 'name' is usually taken directly from
+ // the mangled name of the enclosing function.
+ if (!name.empty() && name[0] == '\01')
+ name = name.substr(1);
}
// Anchor the vtable to this translation unit.
@@ -90,7 +92,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
// GC layout.
- if (C.getLangOptions().ObjC1)
+ if (C.getLangOpts().ObjC1)
elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
elements.push_back(llvm::Constant::getNullValue(i8p));
@@ -213,6 +215,7 @@ static bool isSafeForCXXConstantCapture(QualType type) {
/// acceptable because we make no promises about address stability of
/// captured variables.
static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
const VarDecl *var) {
QualType type = var->getType();
@@ -224,7 +227,7 @@ static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
// Except that any class member declared mutable can be
// modified, any attempt to modify a const object during its
// lifetime results in undefined behavior.
- if (CGM.getLangOptions().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type))
return 0;
// If the variable doesn't have any initializer (shouldn't this be
@@ -233,7 +236,7 @@ static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
const Expr *init = var->getInit();
if (!init) return 0;
- return CGM.EmitConstantExpr(init, var->getType());
+ return CGM.EmitConstantInit(*var, CGF);
}
/// Get the low bit of a nonzero character count. This is the
@@ -276,7 +279,8 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
/// Compute the layout of the given block. Attempts to lay the block
/// out with minimal space requirements.
-static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
+static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
+ CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
@@ -340,7 +344,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// Otherwise, build a layout chunk with the size and alignment of
// the declaration.
- if (llvm::Constant *constant = tryCaptureAsConstant(CGM, variable)) {
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
continue;
}
@@ -370,7 +374,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
info.HasCXXObject = true;
// And so do types with destructors.
- } else if (CGM.getLangOptions().CPlusPlus) {
+ } else if (CGM.getLangOpts().CPlusPlus) {
if (const CXXRecordDecl *record =
variable->getType()->getAsCXXRecordDecl()) {
if (!record->hasTrivialDestructor()) {
@@ -380,13 +384,15 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
}
}
- CharUnits size = C.getTypeSizeInChars(variable->getType());
+ QualType VT = variable->getType();
+ CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
+
maxFieldAlign = std::max(maxFieldAlign, align);
llvm::Type *llvmType =
- CGM.getTypes().ConvertTypeForMem(variable->getType());
-
+ CGM.getTypes().ConvertTypeForMem(VT);
+
layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
}
@@ -481,18 +487,146 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
+/// Enter the scope of a block. This should be run at the entrance to
+/// a full-expression so that the block's cleanups are pushed at the
+/// right place in the stack.
+static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
+ // Allocate the block info and place it at the head of the list.
+ CGBlockInfo &blockInfo =
+ *new CGBlockInfo(block, CGF.CurFn->getName());
+ blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
+ CGF.FirstBlockInfo = &blockInfo;
+
+ // Compute information about the layout, etc., of this block,
+ // pushing cleanups as necessary.
+ computeBlockInfo(CGF.CGM, &CGF, blockInfo);
+
+ // Nothing else to do if it can be global.
+ if (blockInfo.CanBeGlobal) return;
+
+ // Make the allocation for the block.
+ blockInfo.Address =
+ CGF.CreateTempAlloca(blockInfo.StructureType, "block");
+ blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // If there are cleanups to emit, enter them (but inactive).
+ if (!blockInfo.NeedsCopyDispose) return;
+
+ // Walk through the captures (in order) and find the ones not
+ // captured by constant.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ // Ignore __block captures; there's nothing special in the
+ // on-stack block that we need to do for them.
+ if (ci->isByRef()) continue;
+
+ // Ignore variables that are constant-captured.
+ const VarDecl *variable = ci->getVariable();
+ CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind =
+ variable->getType().isDestructedType();
+ if (dtorKind == QualType::DK_none) continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ // GEP down to the address.
+ llvm::Value *addr = CGF.Builder.CreateStructGEP(blockInfo.Address,
+ capture.getIndex());
+
+ // We can use that GEP as the dominating IP.
+ if (!blockInfo.DominatingIP)
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+
+ CleanupKind cleanupKind = InactiveNormalCleanup;
+ bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = InactiveNormalAndEHCleanup;
+
+ CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ destroyer, useArrayEHCleanup);
+
+ // Remember where that cleanup was.
+ capture.setCleanup(CGF.EHStack.stable_begin());
+ }
+}
+
+/// Enter a full-expression with a non-trivial number of objects to
+/// clean up. This is in this file because, at the moment, the only
+/// kind of cleanup object is a BlockDecl*.
+void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
+ assert(E->getNumObjects() != 0);
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
+ for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
+ i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
+ enterBlockScope(*this, *i);
+ }
+}
+
+/// Find the layout for the given block in a linked list and remove it.
+static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
+ const BlockDecl *block) {
+ while (true) {
+ assert(head && *head);
+ CGBlockInfo *cur = *head;
+
+ // If this is the block we're looking for, splice it out of the list.
+ if (cur->getBlockDecl() == block) {
+ *head = cur->NextBlockInfo;
+ return cur;
+ }
+
+ head = &cur->NextBlockInfo;
+ }
+}
+
+/// Destroy a chain of block layouts.
+void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
+ assert(head && "destroying an empty chain");
+ do {
+ CGBlockInfo *cur = head;
+ head = cur->NextBlockInfo;
+ delete cur;
+ } while (head != 0);
+}
+
/// Emit a block literal expression in the current function.
llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
- std::string Name = CurFn->getName();
- CGBlockInfo blockInfo(blockExpr, Name.c_str());
+ // If the block has no captures, we won't have a pre-computed
+ // layout for it.
+ if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ return EmitBlockLiteral(blockInfo);
+ }
- // Compute information about the layout, etc., of this block.
- computeBlockInfo(CGM, blockInfo);
+ // Find the block info for this block and take ownership of it.
+ OwningPtr<CGBlockInfo> blockInfo;
+ blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
+ blockExpr->getBlockDecl()));
- // Using that metadata, generate the actual block function.
+ blockInfo->BlockExpression = blockExpr;
+ return EmitBlockLiteral(*blockInfo);
+}
+
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
+ // Using the computed layout, generate the actual block function.
+ bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
llvm::Constant *blockFn
= CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
- CurFuncDecl, LocalDeclMap);
+ CurFuncDecl, LocalDeclMap,
+ isLambdaConv);
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
@@ -507,11 +641,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- llvm::Type *intTy = ConvertType(getContext().IntTy);
-
- llvm::AllocaInst *blockAddr =
- CreateTempAlloca(blockInfo.StructureType, "block");
- blockAddr->setAlignment(blockInfo.BlockAlign.getQuantity());
+ llvm::AllocaInst *blockAddr = blockInfo.Address;
+ assert(blockAddr && "block has no address!");
// Compute the initial on-stack block flags.
BlockFlags flags = BLOCK_HAS_SIGNATURE;
@@ -521,9 +652,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Initialize the block literal.
Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
- Builder.CreateStore(llvm::ConstantInt::get(intTy, flags.getBitMask()),
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
- Builder.CreateStore(llvm::ConstantInt::get(intTy, 0),
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, 0),
Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
"block.invoke"));
@@ -570,6 +701,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
src = Builder.CreateStructGEP(LoadBlockStruct(),
enclosingCapture.getIndex(),
"block.capture.addr");
+ } else if (blockDecl->isConversionFromLambda()) {
+ // The lambda capture in a lambda's conversion-to-block-pointer is
+ // special; we'll simply emit it directly.
+ src = 0;
} else {
// This is a [[type]]*.
src = LocalDeclMap[variable];
@@ -591,7 +726,19 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If we have a copy constructor, evaluate that into the block field.
} else if (const Expr *copyExpr = ci->getCopyExpr()) {
- EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ if (blockDecl->isConversionFromLambda()) {
+ // If we have a lambda conversion, emit the expression
+ // directly into the block instead.
+ CharUnits Align = getContext().getTypeAlignInChars(type);
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(blockField, Align, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(copyExpr, Slot);
+ } else {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ }
// If it's a reference variable, copy the reference into the block field.
} else if (type->isReferenceType()) {
@@ -606,45 +753,23 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// We use one of these or the other depending on whether the
// reference is nested.
- DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
- SourceLocation());
- BlockDeclRefExpr nested(const_cast<VarDecl*>(variable), type,
- VK_LValue, SourceLocation(), /*byref*/ false);
-
- Expr *declRef =
- (ci->isNested() ? static_cast<Expr*>(&nested) : &notNested);
+ DeclRefExpr declRef(const_cast<VarDecl*>(variable),
+ /*refersToEnclosing*/ ci->isNested(), type,
+ VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
- declRef, VK_RValue);
+ &declRef, VK_RValue);
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
MakeAddrLValue(blockField, type,
- getContext().getDeclAlign(variable)
- .getQuantity()),
+ getContext().getDeclAlign(variable)),
/*captured by init*/ false);
}
- // Push a destructor if necessary. The semantics for when this
- // actually gets run are really obscure.
+ // Activate the cleanup if layout pushed one.
if (!ci->isByRef()) {
- switch (QualType::DestructionKind dtorKind = type.isDestructedType()) {
- case QualType::DK_none:
- break;
-
- // Block captures count as local values and have imprecise semantics.
- // They also can't be arrays, so need to worry about that.
- case QualType::DK_objc_strong_lifetime: {
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = &destroyARCStrongImprecise;
- pushDestroy(getCleanupKind(dtorKind), blockField, type,
- *destroyer, /*useEHCleanupForArray*/ false);
- break;
- }
-
- case QualType::DK_objc_weak_lifetime:
- case QualType::DK_cxx_destructor:
- pushDestroy(dtorKind, blockField, type);
- break;
- }
+ EHScopeStack::stable_iterator cleanup = capture.getCleanup();
+ if (cleanup.isValid())
+ ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
}
}
@@ -744,11 +869,11 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
llvm::Value *Func = Builder.CreateLoad(FuncPtr);
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FuncTy);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
// Cast the function pointer to the right type.
- llvm::Type *BlockFTy =
- CGM.getTypes().GetFunctionType(FnInfo, false);
+ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
@@ -798,10 +923,11 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
llvm::Constant *
CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
const char *name) {
- CGBlockInfo blockInfo(blockExpr, name);
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
+ blockInfo.BlockExpression = blockExpr;
// Compute information about the layout, etc., of this block.
- computeBlockInfo(*this, blockInfo);
+ computeBlockInfo(*this, 0, blockInfo);
// Using that metadata, generate the actual block function.
llvm::Constant *blockFn;
@@ -809,7 +935,8 @@ CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
blockInfo,
- 0, LocalDeclMap);
+ 0, LocalDeclMap,
+ false);
}
blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
@@ -863,7 +990,8 @@ llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &blockInfo,
const Decl *outerFnDecl,
- const DeclMapTy &ldm) {
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
// Check if we should generate debug info for this block function.
@@ -901,16 +1029,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
args.push_back(*i);
// Create the function declaration.
- const FunctionProtoType *fnType =
- cast<FunctionProtoType>(blockInfo.getBlockExpr()->getFunctionType());
+ const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
const CGFunctionInfo &fnInfo =
- CGM.getTypes().getFunctionInfo(fnType->getResultType(), args,
- fnType->getExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(fnType->getResultType(), args,
+ fnType->getExtInfo(),
+ fnType->isVariadic());
if (CGM.ReturnTypeUsesSRet(fnInfo))
blockInfo.UsesStret = true;
- llvm::FunctionType *fnLLVMType =
- CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
+ llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
MangleBuffer name;
CGM.getBlockMangledName(GD, name, blockDecl);
@@ -976,12 +1103,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
LocalDeclMap[variable] = alloca;
}
- // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+ // Save a spot to insert the debug information for all the DeclRefExprs.
llvm::BasicBlock *entry = Builder.GetInsertBlock();
llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
--entry_ptr;
- EmitStmt(blockDecl->getBody());
+ if (IsLambdaConversionToBlock)
+ EmitLambdaBlockInvokeBody();
+ else
+ EmitStmt(blockDecl->getBody());
// Remember where we were...
llvm::BasicBlock *resume = Builder.GetInsertBlock();
@@ -990,7 +1120,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
++entry_ptr;
Builder.SetInsertPoint(entry, entry_ptr);
- // Emit debug information for all the BlockDeclRefDecls.
+ // Emit debug information for all the DeclRefExprs.
// FIXME: also for 'this'
if (CGDebugInfo *DI = getDebugInfo()) {
for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
@@ -1052,11 +1182,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
args.push_back(&srcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
- llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1076,7 +1208,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
SC_Static,
SC_None,
false,
- true);
+ false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1119,7 +1251,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures:
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
// Don't generate special copy logic for a captured object
@@ -1167,11 +1299,13 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
args.push_back(&srcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1189,7 +1323,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1229,7 +1363,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
flags = BLOCK_FIELD_IS_BLOCK;
// Special rules for ARC captures.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Qualifiers qs = type.getQualifiers();
// Don't generate special dispose logic for a captured object
@@ -1340,15 +1474,23 @@ public:
// Do a "move" by copying the value and then zeroing out the old
// variable.
- llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
+ value->setAlignment(Alignment.getQuantity());
+
llvm::Value *null =
llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
- CGF.Builder.CreateStore(value, destField);
- CGF.Builder.CreateStore(null, srcField);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
+ store->setAlignment(Alignment.getQuantity());
+
+ store = CGF.Builder.CreateStore(null, srcField);
+ store->setAlignment(Alignment.getQuantity());
}
void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
- llvm::Value *value = CGF.Builder.CreateLoad(field);
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
CGF.EmitARCRelease(value, /*precise*/ false);
}
@@ -1358,6 +1500,39 @@ public:
}
};
+/// Emits the copy/dispose helpers for an ARC __block __strong
+/// variable that's of block-pointer type.
+class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do the copy with objc_retainBlock; that's all that
+ // _Block_object_assign would do anyway, and we'd have to pass the
+ // right arguments to make sure it doesn't get no-op'ed.
+ llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
+ oldValue->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 2 is distinguishable from all pointers and byref flags
+ id.AddInteger(2);
+ }
+};
+
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
@@ -1404,10 +1579,12 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
args.push_back(&src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1424,7 +1601,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
@@ -1472,10 +1649,12 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
args.push_back(&src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
CodeGenTypes &Types = CGF.CGM.getTypes();
- llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1493,7 +1672,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, 0,
SC_Static,
SC_None,
- false, true);
+ false, false);
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsDispose()) {
@@ -1565,7 +1744,7 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// If we have lifetime, that dominates.
if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
- assert(getLangOptions().ObjCAutoRefCount);
+ assert(getLangOpts().ObjCAutoRefCount);
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
@@ -1584,13 +1763,10 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
// ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
- // Block-pointers need to be _Block_copy'ed, so we let the
- // runtime be in charge. But we can't use the code below
- // because we don't want to set BYREF_CALLER, which will
- // just make the runtime ignore us.
+ // Block pointers need to be copied, and there's no direct
+ // transfer possible.
if (type->isBlockPointerType()) {
- BlockFieldFlags flags = BLOCK_FIELD_IS_BLOCK;
- ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment);
return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
// Otherwise, we transfer ownership of the retain from the stack
@@ -1674,7 +1850,8 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// int32_t __size;
types.push_back(Int32Ty);
- bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
+ bool HasCopyAndDispose =
+ (Ty->isObjCRetainableType()) || getContext().getBlockVarCopyInits(D);
if (HasCopyAndDispose) {
/// void *__copy_helper;
types.push_back(Int8PtrTy);
@@ -1701,7 +1878,7 @@ llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ llvm::Type *Ty = Int8Ty;
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignment and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -1803,7 +1980,7 @@ namespace {
/// to be done externally.
void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
// We don't enter this cleanup if we're in pure-GC mode.
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
return;
EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
@@ -1812,7 +1989,7 @@ void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
/// Adjust the declaration of something from the blocks API.
static void configureBlocksRuntimeObject(CodeGenModule &CGM,
llvm::Constant *C) {
- if (!CGM.getLangOptions().BlocksRuntimeOptional) return;
+ if (!CGM.getLangOpts().BlocksRuntimeOptional) return;
llvm::GlobalValue *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
if (GV->isDeclaration() &&
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
index 6e71c1f..095cfdb 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -23,6 +23,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "CodeGenFunction.h"
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
@@ -128,13 +129,14 @@ inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
class CGBlockInfo {
public:
/// Name - The name of the block, kindof.
- const char *Name;
+ llvm::StringRef Name;
/// The field index of 'this' within the block, if there is one.
unsigned CXXThisIndex;
class Capture {
uintptr_t Data;
+ EHScopeStack::stable_iterator Cleanup;
public:
bool isIndex() const { return (Data & 1) != 0; }
@@ -144,6 +146,14 @@ public:
assert(isConstant());
return reinterpret_cast<llvm::Value*>(Data);
}
+ EHScopeStack::stable_iterator getCleanup() const {
+ assert(isIndex());
+ return Cleanup;
+ }
+ void setCleanup(EHScopeStack::stable_iterator cleanup) {
+ assert(isIndex());
+ Cleanup = cleanup;
+ }
static Capture makeIndex(unsigned index) {
Capture v;
@@ -158,9 +168,6 @@ public:
}
};
- /// The mapping of allocated indexes within the block.
- llvm::DenseMap<const VarDecl*, Capture> Captures;
-
/// CanBeGlobal - True if the block can be global, i.e. it has
/// no non-constant captures.
bool CanBeGlobal : 1;
@@ -176,22 +183,44 @@ public:
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ llvm::AllocaInst *Address;
llvm::StructType *StructureType;
- const BlockExpr *Block;
+ const BlockDecl *Block;
+ const BlockExpr *BlockExpression;
CharUnits BlockSize;
CharUnits BlockAlign;
+ /// An instruction which dominates the full-expression that the
+ /// block is inside.
+ llvm::Instruction *DominatingIP;
+
+ /// The next block in the block-info chain. Invalid if this block
+ /// info is not part of the CGF's block-info chain, which is true
+ /// if it corresponds to a global block or a block whose expression
+ /// has been encountered.
+ CGBlockInfo *NextBlockInfo;
+
const Capture &getCapture(const VarDecl *var) const {
- llvm::DenseMap<const VarDecl*, Capture>::const_iterator
+ return const_cast<CGBlockInfo*>(this)->getCapture(var);
+ }
+ Capture &getCapture(const VarDecl *var) {
+ llvm::DenseMap<const VarDecl*, Capture>::iterator
it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
return it->second;
}
- const BlockDecl *getBlockDecl() const { return Block->getBlockDecl(); }
- const BlockExpr *getBlockExpr() const { return Block; }
+ const BlockDecl *getBlockDecl() const { return Block; }
+ const BlockExpr *getBlockExpr() const {
+ assert(BlockExpression);
+ assert(BlockExpression->getBlockDecl() == Block);
+ return BlockExpression;
+ }
- CGBlockInfo(const BlockExpr *blockExpr, const char *Name);
+ CGBlockInfo(const BlockDecl *blockDecl, llvm::StringRef Name);
};
} // end namespace CodeGen
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index ec0ca42..e30b513 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -16,7 +16,6 @@
#include "CodeGenModule.h"
#include "CGObjCRuntime.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -176,7 +175,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
- if (E->Evaluate(Result, CGM.getContext()) &&
+ if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
@@ -215,7 +214,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
- case Builtin::BI__builtin_abs: {
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
@@ -228,6 +229,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
@@ -237,12 +239,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue);
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
@@ -252,7 +256,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue);
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -268,7 +273,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue),
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
+ Builder.getTrue()),
llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
@@ -534,7 +540,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
- Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BImemcpy:
@@ -542,7 +549,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemCpy(Address, SrcAddr, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
return RValue::get(Address);
}
@@ -557,7 +566,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Dest = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemCpy(Dest, Src, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
return RValue::get(Dest);
}
@@ -581,7 +592,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Dest = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemMove(Dest, Src, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
return RValue::get(Dest);
}
@@ -590,7 +603,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemMove(Address, SrcAddr, SizeVal, 1, false);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BImemset:
@@ -599,7 +614,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
return RValue::get(Address);
}
case Builtin::BI__builtin___memset_chk: {
@@ -614,7 +630,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
- Builder.CreateMemSet(Address, ByteVal, SizeVal, 1, false);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
return RValue::get(Address);
}
@@ -925,12 +942,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- llvm::Type *ElLLVMTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
- llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr);
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
return RValue::get(0);
@@ -948,10 +966,186 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
case Builtin::BI__atomic_thread_fence:
- case Builtin::BI__atomic_signal_fence: {
+ case Builtin::BI__atomic_signal_fence:
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence: {
llvm::SynchronizationScope Scope;
- if (BuiltinID == Builtin::BI__atomic_signal_fence)
+ if (BuiltinID == Builtin::BI__atomic_signal_fence ||
+ BuiltinID == Builtin::BI__c11_atomic_signal_fence)
Scope = llvm::SingleThread;
else
Scope = llvm::CrossThread;
@@ -1145,8 +1339,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
- llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
- if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+ llvm::Type *RetTy = VoidTy;
+ if (!BuiltinRetType->isVoidType())
+ RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
@@ -1181,30 +1376,37 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
return EmitPPCBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::hexagon:
+ return EmitHexagonBuiltinExpr(BuiltinID, E);
default:
return 0;
}
}
-static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
- switch (type) {
- default: break;
- case 0:
- case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q);
- case 6:
- case 7:
- case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q);
- case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q);
- case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q);
- case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q);
- };
- return 0;
+static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags) {
+ int IsQuad = TypeFlags.isQuad();
+ switch (TypeFlags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ case NeonTypeFlags::Float16:
+ return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
+ case NeonTypeFlags::Float32:
+ return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
+ }
+ llvm_unreachable("Invalid NeonTypeFlags element type!");
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
- SmallVector<Constant*, 16> Indices(nElts, C);
- Value* SV = llvm::ConstantVector::get(Indices);
+ Value* SV = llvm::ConstantVector::getSplat(nElts, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
@@ -1224,26 +1426,28 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
- ConstantInt *CI = cast<ConstantInt>(V);
- int SV = CI->getSExtValue();
+ int SV = cast<ConstantInt>(V)->getSExtValue();
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
- SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
- return llvm::ConstantVector::get(CV);
+ return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
}
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
-static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
+unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
unsigned Align = 1;
// Check if the type is a pointer. The implicit cast operand might not be.
while (Addr->getType()->isPointerType()) {
QualType PtTy = Addr->getType()->getPointeeType();
- unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity();
- if (NewA > Align)
- Align = NewA;
+
+ // Can't get alignment of incomplete types.
+ if (!PtTy->isIncompleteType()) {
+ unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+ }
// If the address is an implicit cast, repeat with the cast operand.
if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
@@ -1252,7 +1456,14 @@ static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
}
break;
}
- return llvm::ConstantInt::get(CGF.Int32Ty, Align);
+ return Align;
+}
+
+/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+/// the alignment of the type referenced by the pointer. Skip over implicit
+/// casts. Return the alignment as an llvm::Value.
+Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
+ return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
@@ -1349,9 +1560,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
- Ty = llvm::Type::getFloatTy(getLLVMContext());
+ Ty = FloatTy;
else
- Ty = llvm::Type::getDoubleTy(getLLVMContext());
+ Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
@@ -1363,14 +1574,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
// Determine the type of this overloaded NEON intrinsic.
- unsigned type = Result.getZExtValue();
- bool usgn = type & 0x08;
- bool quad = type & 0x10;
- bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
- (void)poly; // Only used in assert()s.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
bool rightShift = false;
- llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
+ llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1429,34 +1638,40 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(F, Ops, "vcnt");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
- assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f16_v builtin");
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f16_v builtin");
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
return EmitNeonCall(F, Ops, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_f32_f16: {
- assert((type & 0x7) == 7 && !quad && "unexpected vcvt_f32_f16 builtin");
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f32_f16 builtin");
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
return EmitNeonCall(F, Ops, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_f32_v:
- case ARM::BI__builtin_neon_vcvtq_f32_v: {
+ case ARM::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ty = GetNeonType(getLLVMContext(), 4, quad);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- }
case ARM::BI__builtin_neon_vcvt_s32_v:
case ARM::BI__builtin_neon_vcvt_u32_v:
case ARM::BI__builtin_neon_vcvtq_s32_v:
case ARM::BI__builtin_neon_vcvtq_u32_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(getLLVMContext(), 4, quad));
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
- Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { FloatTy, Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
+ : Intrinsic::arm_neon_vcvtfxs2fp;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -1464,8 +1679,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
- Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { Ty, FloatTy };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
+ : Intrinsic::arm_neon_vcvtfp2fxs;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
@@ -1491,30 +1709,35 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1_lane_v:
- case ARM::BI__builtin_neon_vld1q_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
case ARM::BI__builtin_neon_vld1_dup_v:
case ARM::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateLoad(Ops[0]);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
- Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
}
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1523,7 +1746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1532,7 +1755,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1543,7 +1766,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1555,7 +1778,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1568,7 +1791,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1584,15 +1807,15 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::arm_neon_vld2;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ Int = Intrinsic::arm_neon_vld3;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld2;
+ Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1603,10 +1826,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::arm_neon_vld2lane;
break;
case ARM::BI__builtin_neon_vld3_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ Int = Intrinsic::arm_neon_vld3lane;
break;
case ARM::BI__builtin_neon_vld4_dup_v:
- Int = Intrinsic::arm_neon_vld2lane;
+ Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
@@ -1619,7 +1842,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
- Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
+ Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
@@ -1656,12 +1879,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vmul_v:
case ARM::BI__builtin_neon_vmulq_v:
- assert(poly && "vmul builtin only supported for polynomial types");
+ assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
- Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
@@ -1852,43 +2075,48 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1_lane_v:
- case ARM::BI__builtin_neon_vst1q_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ StoreInst *St = Builder.CreateStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return St;
+ }
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
- Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
@@ -1937,8 +2165,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
- Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ Indices.push_back(Builder.getInt32(i+vi));
+ Indices.push_back(Builder.getInt32(i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
@@ -1990,7 +2218,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
llvm::Value *CodeGenFunction::
-BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
+BuildVector(ArrayRef<llvm::Value*> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
@@ -1999,7 +2227,7 @@ BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
- std::vector<llvm::Constant*> CstOps;
+ SmallVector<llvm::Constant*, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
@@ -2010,8 +2238,7 @@ BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- Result = Builder.CreateInsertElement(Result, Ops[i],
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i));
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
return Result;
}
@@ -2043,61 +2270,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
- case X86::BI__builtin_ia32_pslldi128:
- case X86::BI__builtin_ia32_psllqi128:
- case X86::BI__builtin_ia32_psllwi128:
- case X86::BI__builtin_ia32_psradi128:
- case X86::BI__builtin_ia32_psrawi128:
- case X86::BI__builtin_ia32_psrldi128:
- case X86::BI__builtin_ia32_psrlqi128:
- case X86::BI__builtin_ia32_psrlwi128: {
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
- llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
- Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
- Ops[1], Zero, "insert");
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
- const char *name = 0;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported shift intrinsic!");
- case X86::BI__builtin_ia32_pslldi128:
- name = "pslldi";
- ID = Intrinsic::x86_sse2_psll_d;
- break;
- case X86::BI__builtin_ia32_psllqi128:
- name = "psllqi";
- ID = Intrinsic::x86_sse2_psll_q;
- break;
- case X86::BI__builtin_ia32_psllwi128:
- name = "psllwi";
- ID = Intrinsic::x86_sse2_psll_w;
- break;
- case X86::BI__builtin_ia32_psradi128:
- name = "psradi";
- ID = Intrinsic::x86_sse2_psra_d;
- break;
- case X86::BI__builtin_ia32_psrawi128:
- name = "psrawi";
- ID = Intrinsic::x86_sse2_psra_w;
- break;
- case X86::BI__builtin_ia32_psrldi128:
- name = "psrldi";
- ID = Intrinsic::x86_sse2_psrl_d;
- break;
- case X86::BI__builtin_ia32_psrlqi128:
- name = "psrlqi";
- ID = Intrinsic::x86_sse2_psrl_q;
- break;
- case X86::BI__builtin_ia32_psrlwi128:
- name = "psrlwi";
- ID = Intrinsic::x86_sse2_psrl_w;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, name);
- }
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
@@ -2106,66 +2278,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
- case X86::BI__builtin_ia32_pslldi:
- case X86::BI__builtin_ia32_psllqi:
- case X86::BI__builtin_ia32_psllwi:
- case X86::BI__builtin_ia32_psradi:
- case X86::BI__builtin_ia32_psrawi:
- case X86::BI__builtin_ia32_psrldi:
- case X86::BI__builtin_ia32_psrlqi:
- case X86::BI__builtin_ia32_psrlwi: {
- Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
- const char *name = 0;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
- switch (BuiltinID) {
- default: llvm_unreachable("Unsupported shift intrinsic!");
- case X86::BI__builtin_ia32_pslldi:
- name = "pslldi";
- ID = Intrinsic::x86_mmx_psll_d;
- break;
- case X86::BI__builtin_ia32_psllqi:
- name = "psllqi";
- ID = Intrinsic::x86_mmx_psll_q;
- break;
- case X86::BI__builtin_ia32_psllwi:
- name = "psllwi";
- ID = Intrinsic::x86_mmx_psll_w;
- break;
- case X86::BI__builtin_ia32_psradi:
- name = "psradi";
- ID = Intrinsic::x86_mmx_psra_d;
- break;
- case X86::BI__builtin_ia32_psrawi:
- name = "psrawi";
- ID = Intrinsic::x86_mmx_psra_w;
- break;
- case X86::BI__builtin_ia32_psrldi:
- name = "psrldi";
- ID = Intrinsic::x86_mmx_psrl_d;
- break;
- case X86::BI__builtin_ia32_psrlqi:
- name = "psrlqi";
- ID = Intrinsic::x86_mmx_psrl_q;
- break;
- case X86::BI__builtin_ia32_psrlwi:
- name = "psrlwi";
- ID = Intrinsic::x86_mmx_psrl_w;
- break;
- }
- llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Ops, name);
- }
- case X86::BI__builtin_ia32_cmpps: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
- return Builder.CreateCall(F, Ops, "cmpps");
- }
- case X86::BI__builtin_ia32_cmpss: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
- return Builder.CreateCall(F, Ops, "cmpss");
- }
case X86::BI__builtin_ia32_ldmxcsr: {
llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
@@ -2182,14 +2294,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
- case X86::BI__builtin_ia32_cmppd: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
- return Builder.CreateCall(F, Ops, "cmppd");
- }
- case X86::BI__builtin_ia32_cmpsd: {
- llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
- return Builder.CreateCall(F, Ops, "cmpsd");
- }
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
@@ -2268,6 +2372,44 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
+ case X86::BI__builtin_ia32_palignr256: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 32> Indices;
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned LaneStart = l * 16;
+ unsigned LaneEnd = (l+1) * 16;
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = shiftVal + i + LaneStart;
+ if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
+ }
+ }
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
case X86::BI__builtin_ia32_movntps:
case X86::BI__builtin_ia32_movntpd:
case X86::BI__builtin_ia32_movntdq:
@@ -2285,140 +2427,2013 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return SI;
}
// 3DNow!
- case X86::BI__builtin_ia32_pavgusb:
- case X86::BI__builtin_ia32_pf2id:
- case X86::BI__builtin_ia32_pfacc:
- case X86::BI__builtin_ia32_pfadd:
- case X86::BI__builtin_ia32_pfcmpeq:
- case X86::BI__builtin_ia32_pfcmpge:
- case X86::BI__builtin_ia32_pfcmpgt:
- case X86::BI__builtin_ia32_pfmax:
- case X86::BI__builtin_ia32_pfmin:
- case X86::BI__builtin_ia32_pfmul:
- case X86::BI__builtin_ia32_pfrcp:
- case X86::BI__builtin_ia32_pfrcpit1:
- case X86::BI__builtin_ia32_pfrcpit2:
- case X86::BI__builtin_ia32_pfrsqrt:
- case X86::BI__builtin_ia32_pfrsqit1:
- case X86::BI__builtin_ia32_pfrsqrtit1:
- case X86::BI__builtin_ia32_pfsub:
- case X86::BI__builtin_ia32_pfsubr:
- case X86::BI__builtin_ia32_pi2fd:
- case X86::BI__builtin_ia32_pmulhrw:
- case X86::BI__builtin_ia32_pf2iw:
- case X86::BI__builtin_ia32_pfnacc:
- case X86::BI__builtin_ia32_pfpnacc:
- case X86::BI__builtin_ia32_pi2fw:
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch(BuiltinID) {
- case X86::BI__builtin_ia32_pavgusb:
- name = "pavgusb";
- ID = Intrinsic::x86_3dnow_pavgusb;
- break;
- case X86::BI__builtin_ia32_pf2id:
- name = "pf2id";
- ID = Intrinsic::x86_3dnow_pf2id;
- break;
- case X86::BI__builtin_ia32_pfacc:
- name = "pfacc";
- ID = Intrinsic::x86_3dnow_pfacc;
- break;
- case X86::BI__builtin_ia32_pfadd:
- name = "pfadd";
- ID = Intrinsic::x86_3dnow_pfadd;
- break;
- case X86::BI__builtin_ia32_pfcmpeq:
- name = "pfcmpeq";
- ID = Intrinsic::x86_3dnow_pfcmpeq;
- break;
- case X86::BI__builtin_ia32_pfcmpge:
- name = "pfcmpge";
- ID = Intrinsic::x86_3dnow_pfcmpge;
- break;
- case X86::BI__builtin_ia32_pfcmpgt:
- name = "pfcmpgt";
- ID = Intrinsic::x86_3dnow_pfcmpgt;
- break;
- case X86::BI__builtin_ia32_pfmax:
- name = "pfmax";
- ID = Intrinsic::x86_3dnow_pfmax;
- break;
- case X86::BI__builtin_ia32_pfmin:
- name = "pfmin";
- ID = Intrinsic::x86_3dnow_pfmin;
- break;
- case X86::BI__builtin_ia32_pfmul:
- name = "pfmul";
- ID = Intrinsic::x86_3dnow_pfmul;
- break;
- case X86::BI__builtin_ia32_pfrcp:
- name = "pfrcp";
- ID = Intrinsic::x86_3dnow_pfrcp;
- break;
- case X86::BI__builtin_ia32_pfrcpit1:
- name = "pfrcpit1";
- ID = Intrinsic::x86_3dnow_pfrcpit1;
- break;
- case X86::BI__builtin_ia32_pfrcpit2:
- name = "pfrcpit2";
- ID = Intrinsic::x86_3dnow_pfrcpit2;
- break;
- case X86::BI__builtin_ia32_pfrsqrt:
- name = "pfrsqrt";
- ID = Intrinsic::x86_3dnow_pfrsqrt;
- break;
- case X86::BI__builtin_ia32_pfrsqit1:
- case X86::BI__builtin_ia32_pfrsqrtit1:
- name = "pfrsqit1";
- ID = Intrinsic::x86_3dnow_pfrsqit1;
- break;
- case X86::BI__builtin_ia32_pfsub:
- name = "pfsub";
- ID = Intrinsic::x86_3dnow_pfsub;
- break;
- case X86::BI__builtin_ia32_pfsubr:
- name = "pfsubr";
- ID = Intrinsic::x86_3dnow_pfsubr;
- break;
- case X86::BI__builtin_ia32_pi2fd:
- name = "pi2fd";
- ID = Intrinsic::x86_3dnow_pi2fd;
- break;
- case X86::BI__builtin_ia32_pmulhrw:
- name = "pmulhrw";
- ID = Intrinsic::x86_3dnow_pmulhrw;
- break;
- case X86::BI__builtin_ia32_pf2iw:
- name = "pf2iw";
- ID = Intrinsic::x86_3dnowa_pf2iw;
- break;
- case X86::BI__builtin_ia32_pfnacc:
- name = "pfnacc";
- ID = Intrinsic::x86_3dnowa_pfnacc;
- break;
- case X86::BI__builtin_ia32_pfpnacc:
- name = "pfpnacc";
- ID = Intrinsic::x86_3dnowa_pfpnacc;
- break;
- case X86::BI__builtin_ia32_pi2fw:
- name = "pi2fw";
- ID = Intrinsic::x86_3dnowa_pi2fw;
- break;
+ default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi:
name = "pswapd";
ID = Intrinsic::x86_3dnowa_pswapd;
break;
}
+ llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
+ Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, name);
}
}
}
+
+Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
+ ID = Intrinsic::hexagon_C2_cmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
+ ID = Intrinsic::hexagon_C2_cmpgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
+ ID = Intrinsic::hexagon_C2_cmpgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
+ ID = Intrinsic::hexagon_C2_cmpeqp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
+ ID = Intrinsic::hexagon_C2_cmpgtp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
+ ID = Intrinsic::hexagon_C2_cmpgtup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
+ ID = Intrinsic::hexagon_C2_bitsset; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
+ ID = Intrinsic::hexagon_C2_bitsclr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
+ ID = Intrinsic::hexagon_C2_cmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
+ ID = Intrinsic::hexagon_C2_cmpgti; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
+ ID = Intrinsic::hexagon_C2_cmpgtui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
+ ID = Intrinsic::hexagon_C2_cmpgei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
+ ID = Intrinsic::hexagon_C2_cmpgeui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
+ ID = Intrinsic::hexagon_C2_cmplt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
+ ID = Intrinsic::hexagon_C2_cmpltu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
+ ID = Intrinsic::hexagon_C2_bitsclri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_and:
+ ID = Intrinsic::hexagon_C2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_or:
+ ID = Intrinsic::hexagon_C2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_xor:
+ ID = Intrinsic::hexagon_C2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_andn:
+ ID = Intrinsic::hexagon_C2_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_not:
+ ID = Intrinsic::hexagon_C2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_orn:
+ ID = Intrinsic::hexagon_C2_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
+ ID = Intrinsic::hexagon_C2_pxfer_map; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_any8:
+ ID = Intrinsic::hexagon_C2_any8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_all8:
+ ID = Intrinsic::hexagon_C2_all8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
+ ID = Intrinsic::hexagon_C2_vitpack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mux:
+ ID = Intrinsic::hexagon_C2_mux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxii:
+ ID = Intrinsic::hexagon_C2_muxii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxir:
+ ID = Intrinsic::hexagon_C2_muxir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxri:
+ ID = Intrinsic::hexagon_C2_muxri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vmux:
+ ID = Intrinsic::hexagon_C2_vmux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mask:
+ ID = Intrinsic::hexagon_C2_mask; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
+ ID = Intrinsic::hexagon_A2_vcmpbeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
+ ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
+ ID = Intrinsic::hexagon_A2_vcmpheq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
+ ID = Intrinsic::hexagon_A2_vcmphgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
+ ID = Intrinsic::hexagon_A2_vcmphgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
+ ID = Intrinsic::hexagon_A2_vcmpweq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
+ ID = Intrinsic::hexagon_A2_vcmpwgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
+ ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
+ ID = Intrinsic::hexagon_C2_tfrpr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
+ ID = Intrinsic::hexagon_C2_tfrrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
+ ID = Intrinsic::hexagon_M2_mpysmi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsip:
+ ID = Intrinsic::hexagon_M2_macsip; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsin:
+ ID = Intrinsic::hexagon_M2_macsin; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
+ ID = Intrinsic::hexagon_M2_mpy_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
+ ID = Intrinsic::hexagon_M2_mpyu_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
+ ID = Intrinsic::hexagon_M2_mpyi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
+ ID = Intrinsic::hexagon_M2_mpyui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_maci:
+ ID = Intrinsic::hexagon_M2_maci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_acci:
+ ID = Intrinsic::hexagon_M2_acci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_accii:
+ ID = Intrinsic::hexagon_M2_accii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_nacci:
+ ID = Intrinsic::hexagon_M2_nacci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_naccii:
+ ID = Intrinsic::hexagon_M2_naccii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_subacc:
+ ID = Intrinsic::hexagon_M2_subacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
+ ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
+ ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
+ ID = Intrinsic::hexagon_M2_vmac2; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
+ ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
+ ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
+ ID = Intrinsic::hexagon_M2_vmac2es; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
+ ID = Intrinsic::hexagon_M2_vrmac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
+ ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
+ ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
+ ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
+ ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
+ ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
+ ID = Intrinsic::hexagon_M2_cmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
+ ID = Intrinsic::hexagon_M2_cmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
+ ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
+ ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
+ ID = Intrinsic::hexagon_M2_cmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
+ ID = Intrinsic::hexagon_M2_cmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
+ ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
+ ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
+ ID = Intrinsic::hexagon_M2_cnacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
+ ID = Intrinsic::hexagon_M2_cnacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
+ ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
+ ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
+ ID = Intrinsic::hexagon_M2_mmacls_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
+ ID = Intrinsic::hexagon_M2_mmacls_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
+ ID = Intrinsic::hexagon_M2_mmachs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
+ ID = Intrinsic::hexagon_M2_mmachs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
+ ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
+ ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
+ ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
+ ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
+ ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
+ ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
+ ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
+ ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
+ ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
+ ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
+ ID = Intrinsic::hexagon_M2_cmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
+ ID = Intrinsic::hexagon_M2_cmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
+ ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
+ ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
+ ID = Intrinsic::hexagon_S2_vcrotate; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_add:
+ ID = Intrinsic::hexagon_A2_add; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sub:
+ ID = Intrinsic::hexagon_A2_sub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsat:
+ ID = Intrinsic::hexagon_A2_addsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subsat:
+ ID = Intrinsic::hexagon_A2_subsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addi:
+ ID = Intrinsic::hexagon_A2_addi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_aslh:
+ ID = Intrinsic::hexagon_A2_aslh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_asrh:
+ ID = Intrinsic::hexagon_A2_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addp:
+ ID = Intrinsic::hexagon_A2_addp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
+ ID = Intrinsic::hexagon_A2_addpsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsp:
+ ID = Intrinsic::hexagon_A2_addsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subp:
+ ID = Intrinsic::hexagon_A2_subp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_neg:
+ ID = Intrinsic::hexagon_A2_neg; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negsat:
+ ID = Intrinsic::hexagon_A2_negsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abs:
+ ID = Intrinsic::hexagon_A2_abs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abssat:
+ ID = Intrinsic::hexagon_A2_abssat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vconj:
+ ID = Intrinsic::hexagon_A2_vconj; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negp:
+ ID = Intrinsic::hexagon_A2_negp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_absp:
+ ID = Intrinsic::hexagon_A2_absp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_max:
+ ID = Intrinsic::hexagon_A2_max; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxu:
+ ID = Intrinsic::hexagon_A2_maxu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_min:
+ ID = Intrinsic::hexagon_A2_min; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minu:
+ ID = Intrinsic::hexagon_A2_minu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxp:
+ ID = Intrinsic::hexagon_A2_maxp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxup:
+ ID = Intrinsic::hexagon_A2_maxup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minp:
+ ID = Intrinsic::hexagon_A2_minp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minup:
+ ID = Intrinsic::hexagon_A2_minup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfr:
+ ID = Intrinsic::hexagon_A2_tfr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
+ ID = Intrinsic::hexagon_A2_tfrsi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
+ ID = Intrinsic::hexagon_A2_tfrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
+ ID = Intrinsic::hexagon_A2_tfrpi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
+ ID = Intrinsic::hexagon_A2_zxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
+ ID = Intrinsic::hexagon_A2_sxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxth:
+ ID = Intrinsic::hexagon_A2_zxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxth:
+ ID = Intrinsic::hexagon_A2_sxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combinew:
+ ID = Intrinsic::hexagon_A2_combinew; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combineii:
+ ID = Intrinsic::hexagon_A2_combineii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
+ ID = Intrinsic::hexagon_A2_combine_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
+ ID = Intrinsic::hexagon_A2_combine_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
+ ID = Intrinsic::hexagon_A2_combine_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
+ ID = Intrinsic::hexagon_A2_combine_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfril:
+ ID = Intrinsic::hexagon_A2_tfril; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
+ ID = Intrinsic::hexagon_A2_tfrih; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_and:
+ ID = Intrinsic::hexagon_A2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_or:
+ ID = Intrinsic::hexagon_A2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xor:
+ ID = Intrinsic::hexagon_A2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_not:
+ ID = Intrinsic::hexagon_A2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
+ ID = Intrinsic::hexagon_M2_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subri:
+ ID = Intrinsic::hexagon_A2_subri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andir:
+ ID = Intrinsic::hexagon_A2_andir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orir:
+ ID = Intrinsic::hexagon_A2_orir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andp:
+ ID = Intrinsic::hexagon_A2_andp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orp:
+ ID = Intrinsic::hexagon_A2_orp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xorp:
+ ID = Intrinsic::hexagon_A2_xorp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_notp:
+ ID = Intrinsic::hexagon_A2_notp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
+ ID = Intrinsic::hexagon_A2_sxtw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sat:
+ ID = Intrinsic::hexagon_A2_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sath:
+ ID = Intrinsic::hexagon_A2_sath; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satuh:
+ ID = Intrinsic::hexagon_A2_satuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satub:
+ ID = Intrinsic::hexagon_A2_satub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satb:
+ ID = Intrinsic::hexagon_A2_satb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
+ ID = Intrinsic::hexagon_A2_vaddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
+ ID = Intrinsic::hexagon_A2_vaddubs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
+ ID = Intrinsic::hexagon_A2_vaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
+ ID = Intrinsic::hexagon_A2_vaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
+ ID = Intrinsic::hexagon_A2_vadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
+ ID = Intrinsic::hexagon_A2_vaddw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
+ ID = Intrinsic::hexagon_A2_vaddws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
+ ID = Intrinsic::hexagon_A2_svavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
+ ID = Intrinsic::hexagon_A2_svavghs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
+ ID = Intrinsic::hexagon_A2_svnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
+ ID = Intrinsic::hexagon_A2_svaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
+ ID = Intrinsic::hexagon_A2_svaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
+ ID = Intrinsic::hexagon_A2_svadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
+ ID = Intrinsic::hexagon_A2_svsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
+ ID = Intrinsic::hexagon_A2_svsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
+ ID = Intrinsic::hexagon_A2_svsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
+ ID = Intrinsic::hexagon_A2_vraddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
+ ID = Intrinsic::hexagon_A2_vraddub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
+ ID = Intrinsic::hexagon_M2_vradduh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
+ ID = Intrinsic::hexagon_A2_vsubub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
+ ID = Intrinsic::hexagon_A2_vsububs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
+ ID = Intrinsic::hexagon_A2_vsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
+ ID = Intrinsic::hexagon_A2_vsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
+ ID = Intrinsic::hexagon_A2_vsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
+ ID = Intrinsic::hexagon_A2_vsubw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
+ ID = Intrinsic::hexagon_A2_vsubws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
+ ID = Intrinsic::hexagon_A2_vabsh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
+ ID = Intrinsic::hexagon_A2_vabshsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
+ ID = Intrinsic::hexagon_A2_vabsw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
+ ID = Intrinsic::hexagon_A2_vabswsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
+ ID = Intrinsic::hexagon_M2_vabsdiffw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
+ ID = Intrinsic::hexagon_M2_vabsdiffh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
+ ID = Intrinsic::hexagon_A2_vrsadub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
+ ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
+ ID = Intrinsic::hexagon_A2_vavgub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
+ ID = Intrinsic::hexagon_A2_vavguh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
+ ID = Intrinsic::hexagon_A2_vavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
+ ID = Intrinsic::hexagon_A2_vnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
+ ID = Intrinsic::hexagon_A2_vavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
+ ID = Intrinsic::hexagon_A2_vnavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
+ ID = Intrinsic::hexagon_A2_vavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
+ ID = Intrinsic::hexagon_A2_vnavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
+ ID = Intrinsic::hexagon_A2_vavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
+ ID = Intrinsic::hexagon_A2_vnavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
+ ID = Intrinsic::hexagon_A2_vavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
+ ID = Intrinsic::hexagon_A2_vnavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
+ ID = Intrinsic::hexagon_A2_vavguw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
+ ID = Intrinsic::hexagon_A2_vavguwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
+ ID = Intrinsic::hexagon_A2_vavgubr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
+ ID = Intrinsic::hexagon_A2_vavguhr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
+ ID = Intrinsic::hexagon_A2_vavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
+ ID = Intrinsic::hexagon_A2_vnavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminh:
+ ID = Intrinsic::hexagon_A2_vminh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
+ ID = Intrinsic::hexagon_A2_vmaxh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminub:
+ ID = Intrinsic::hexagon_A2_vminub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
+ ID = Intrinsic::hexagon_A2_vmaxub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
+ ID = Intrinsic::hexagon_A2_vminuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
+ ID = Intrinsic::hexagon_A2_vmaxuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminw:
+ ID = Intrinsic::hexagon_A2_vminw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
+ ID = Intrinsic::hexagon_A2_vmaxw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
+ ID = Intrinsic::hexagon_A2_vminuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
+ ID = Intrinsic::hexagon_A2_vmaxuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
+ ID = Intrinsic::hexagon_S2_asr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
+ ID = Intrinsic::hexagon_S2_asl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
+ ID = Intrinsic::hexagon_S2_lsr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
+ ID = Intrinsic::hexagon_S2_lsl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
+ ID = Intrinsic::hexagon_S2_asr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
+ ID = Intrinsic::hexagon_S2_asl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
+ ID = Intrinsic::hexagon_S2_lsr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
+ ID = Intrinsic::hexagon_S2_lsl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
+ ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
+ ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
+ ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
+ ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
+ ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
+ ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
+ ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
+ ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
+ ID = Intrinsic::hexagon_S2_asr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
+ ID = Intrinsic::hexagon_S2_lsr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
+ ID = Intrinsic::hexagon_S2_asl_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
+ ID = Intrinsic::hexagon_S2_asr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
+ ID = Intrinsic::hexagon_S2_lsr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
+ ID = Intrinsic::hexagon_S2_asl_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
+ ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
+ ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
+ ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
+ ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
+ ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
+ ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
+ ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
+ ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
+ ID = Intrinsic::hexagon_S2_addasl_rrri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignib:
+ ID = Intrinsic::hexagon_S2_valignib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
+ ID = Intrinsic::hexagon_S2_valignrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
+ ID = Intrinsic::hexagon_S2_vspliceib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
+ ID = Intrinsic::hexagon_S2_vsplicerb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
+ ID = Intrinsic::hexagon_S2_vsplatrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
+ ID = Intrinsic::hexagon_S2_vsplatrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert:
+ ID = Intrinsic::hexagon_S2_insert; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu:
+ ID = Intrinsic::hexagon_S2_extractu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp:
+ ID = Intrinsic::hexagon_S2_insertp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup:
+ ID = Intrinsic::hexagon_S2_extractup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
+ ID = Intrinsic::hexagon_S2_insert_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
+ ID = Intrinsic::hexagon_S2_extractu_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
+ ID = Intrinsic::hexagon_S2_insertp_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
+ ID = Intrinsic::hexagon_S2_extractup_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
+ ID = Intrinsic::hexagon_S2_tstbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
+ ID = Intrinsic::hexagon_S2_setbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
+ ID = Intrinsic::hexagon_S2_togglebit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
+ ID = Intrinsic::hexagon_S2_clrbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
+ ID = Intrinsic::hexagon_S2_tstbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
+ ID = Intrinsic::hexagon_S2_setbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
+ ID = Intrinsic::hexagon_S2_togglebit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
+ ID = Intrinsic::hexagon_S2_clrbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
+ ID = Intrinsic::hexagon_S2_asr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
+ ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
+ ID = Intrinsic::hexagon_S2_asl_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
+ ID = Intrinsic::hexagon_S2_asr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
+ ID = Intrinsic::hexagon_S2_asl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
+ ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
+ ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
+ ID = Intrinsic::hexagon_S2_asr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
+ ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
+ ID = Intrinsic::hexagon_S2_asl_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
+ ID = Intrinsic::hexagon_S2_asr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
+ ID = Intrinsic::hexagon_S2_asl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
+ ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
+ ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
+ ID = Intrinsic::hexagon_S2_vrndpackwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
+ ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
+ ID = Intrinsic::hexagon_S2_vsxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
+ ID = Intrinsic::hexagon_S2_vzxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
+ ID = Intrinsic::hexagon_S2_vsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
+ ID = Intrinsic::hexagon_S2_svsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
+ ID = Intrinsic::hexagon_S2_svsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
+ ID = Intrinsic::hexagon_S2_vsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
+ ID = Intrinsic::hexagon_S2_vtrunohb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
+ ID = Intrinsic::hexagon_S2_vtrunewh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
+ ID = Intrinsic::hexagon_S2_vtrunowh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
+ ID = Intrinsic::hexagon_S2_vtrunehb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
+ ID = Intrinsic::hexagon_S2_vsxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
+ ID = Intrinsic::hexagon_S2_vzxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
+ ID = Intrinsic::hexagon_S2_vsatwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
+ ID = Intrinsic::hexagon_S2_vsatwuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_packhl:
+ ID = Intrinsic::hexagon_S2_packhl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_swiz:
+ ID = Intrinsic::hexagon_A2_swiz; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
+ ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
+ ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
+ ID = Intrinsic::hexagon_S2_shuffob; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
+ ID = Intrinsic::hexagon_S2_shuffeb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
+ ID = Intrinsic::hexagon_S2_shuffoh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
+ ID = Intrinsic::hexagon_S2_shuffeh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_parityp:
+ ID = Intrinsic::hexagon_S2_parityp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
+ ID = Intrinsic::hexagon_S2_lfsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
+ ID = Intrinsic::hexagon_S2_clbnorm; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clb:
+ ID = Intrinsic::hexagon_S2_clb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0:
+ ID = Intrinsic::hexagon_S2_cl0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1:
+ ID = Intrinsic::hexagon_S2_cl1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbp:
+ ID = Intrinsic::hexagon_S2_clbp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
+ ID = Intrinsic::hexagon_S2_cl0p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
+ ID = Intrinsic::hexagon_S2_cl1p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_brev:
+ ID = Intrinsic::hexagon_S2_brev; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct0:
+ ID = Intrinsic::hexagon_S2_ct0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct1:
+ ID = Intrinsic::hexagon_S2_ct1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_interleave:
+ ID = Intrinsic::hexagon_S2_interleave; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
+ ID = Intrinsic::hexagon_S2_deinterleave; break;
+
+ case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
+ ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_orn:
+ ID = Intrinsic::hexagon_A4_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andn:
+ ID = Intrinsic::hexagon_A4_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_ornp:
+ ID = Intrinsic::hexagon_A4_ornp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andnp:
+ ID = Intrinsic::hexagon_A4_andnp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineir:
+ ID = Intrinsic::hexagon_A4_combineir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineri:
+ ID = Intrinsic::hexagon_A4_combineri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
+ ID = Intrinsic::hexagon_C4_cmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
+ ID = Intrinsic::hexagon_C4_cmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
+ ID = Intrinsic::hexagon_C4_cmpltei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
+ ID = Intrinsic::hexagon_C4_cmplte; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
+ ID = Intrinsic::hexagon_C4_cmplteui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
+ ID = Intrinsic::hexagon_C4_cmplteu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
+ ID = Intrinsic::hexagon_A4_rcmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
+ ID = Intrinsic::hexagon_A4_rcmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
+ ID = Intrinsic::hexagon_A4_rcmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
+ ID = Intrinsic::hexagon_A4_rcmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
+ ID = Intrinsic::hexagon_C4_fastcorner9; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
+ ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
+ ID = Intrinsic::hexagon_C4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_and:
+ ID = Intrinsic::hexagon_C4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
+ ID = Intrinsic::hexagon_C4_and_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_or:
+ ID = Intrinsic::hexagon_C4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
+ ID = Intrinsic::hexagon_C4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_and:
+ ID = Intrinsic::hexagon_C4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
+ ID = Intrinsic::hexagon_C4_or_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_or:
+ ID = Intrinsic::hexagon_C4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
+ ID = Intrinsic::hexagon_S4_addaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
+ ID = Intrinsic::hexagon_S4_subaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
+ ID = Intrinsic::hexagon_M4_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_and:
+ ID = Intrinsic::hexagon_M4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_or:
+ ID = Intrinsic::hexagon_M4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
+ ID = Intrinsic::hexagon_M4_and_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
+ ID = Intrinsic::hexagon_M4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
+ ID = Intrinsic::hexagon_M4_xor_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
+ ID = Intrinsic::hexagon_M4_xor_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
+ ID = Intrinsic::hexagon_M4_xor_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_and:
+ ID = Intrinsic::hexagon_M4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_or:
+ ID = Intrinsic::hexagon_M4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
+ ID = Intrinsic::hexagon_M4_or_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
+ ID = Intrinsic::hexagon_M4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
+ ID = Intrinsic::hexagon_S4_or_andix; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
+ ID = Intrinsic::hexagon_S4_or_andi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
+ ID = Intrinsic::hexagon_S4_or_ori; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
+ ID = Intrinsic::hexagon_A4_modwrapu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
+ ID = Intrinsic::hexagon_A4_cround_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
+ ID = Intrinsic::hexagon_A4_round_ri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
+ ID = Intrinsic::hexagon_A4_round_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
+ ID = Intrinsic::hexagon_A4_round_ri_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
+ ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+
+ }
+
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+}
+
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
@@ -2506,5 +4521,4 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops, "");
}
}
- return 0;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
index b5e6e0d..7c08650 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -196,7 +196,8 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
GlobalDecl(ctor, Ctor_Base)))
return;
- const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(ctor, ctorType);
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
@@ -218,11 +219,10 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
- if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(ctor, ctorType);
+ if (!fnInfo)
+ fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
- const FunctionProtoType *proto = ctor->getType()->castAs<FunctionProtoType>();
- llvm::FunctionType *fnType =
- getTypes().GetFunctionType(*fnInfo, proto->isVariadic());
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -260,7 +260,8 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
return;
- const CGFunctionInfo &fnInfo = getTypes().getFunctionInfo(dtor, dtorType);
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXDestructor(dtor, dtorType);
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
@@ -282,11 +283,9 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
- if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(dtor, dtorType);
-
- llvm::FunctionType *fnType =
- getTypes().GetFunctionType(*fnInfo, false);
+ if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
@@ -359,12 +358,10 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
- const CGFunctionInfo *FInfo =
- &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+ const CGFunctionInfo &FInfo =
+ CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
Ty = Ty->getPointerTo()->getPointerTo();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
index 248448c..befebbe 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -49,9 +49,8 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
return llvm::Constant::getNullValue(FTy->getPointerTo());
}
@@ -71,6 +70,11 @@ llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
return GetBogusMemberPointer(CGM, E->getType());
}
+llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
llvm::Value *
CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -90,11 +94,6 @@ CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
}
llvm::Constant *
-CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
- return GetBogusMemberPointer(CGM, E->getType());
-}
-
-llvm::Constant *
CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
return GetBogusMemberPointer(CGM, QualType(MPT, 0));
}
@@ -110,6 +109,10 @@ llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
return GetBogusMemberPointer(CGM, QualType(MPT, 0));
}
+llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ return GetBogusMemberPointer(CGM, MPT);
+}
+
bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
// Fake answer.
return true;
@@ -169,6 +172,28 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
const VarDecl &D,
- llvm::GlobalVariable *GV) {
+ llvm::GlobalVariable *GV,
+ bool PerformInit) {
ErrorUnsupportedABI(CGF, "static local variable initialization");
}
+
+/// Returns the adjustment, in bytes, required for the given
+/// member-pointer operation. Returns null if no adjustment is
+/// required.
+llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ QualType derivedType;
+ if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
+ derivedType = E->getSubExpr()->getType();
+ else
+ derivedType = E->getType();
+
+ const CXXRecordDecl *derivedClass =
+ derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+
+ return CGM.GetNonVirtualBaseClassOffset(derivedClass,
+ E->path_begin(),
+ E->path_end());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index c2abf35..4e045f5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -42,17 +42,17 @@ namespace CodeGen {
class CGCXXABI {
protected:
CodeGenModule &CGM;
- llvm::OwningPtr<MangleContext> MangleCtx;
+ OwningPtr<MangleContext> MangleCtx;
CGCXXABI(CodeGenModule &CGM)
: CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
- return CGF.CXXThisDecl;
+ return CGF.CXXABIThisDecl;
}
llvm::Value *&getThisValue(CodeGenFunction &CGF) {
- return CGF.CXXThisValue;
+ return CGF.CXXABIThisValue;
}
ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
@@ -100,16 +100,16 @@ public:
llvm::Value *MemPtr,
const MemberPointerType *MPT);
- /// Perform a derived-to-base or base-to-derived member pointer
- /// conversion.
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion.
virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src);
- /// Perform a derived-to-base or base-to-derived member pointer
- /// conversion on a constant member pointer.
- virtual llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E);
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion on a constant value.
+ virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
/// Return true if the given member pointer can be zero-initialized
/// (in the C++ sense) with an LLVM zeroinitializer.
@@ -125,6 +125,9 @@ public:
virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset);
+ /// Create a member pointer for the given member pointer constant.
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
/// Emit a comparison between two member pointers. Returns an i1.
virtual llvm::Value *
EmitMemberPointerComparison(CodeGenFunction &CGF,
@@ -139,6 +142,15 @@ public:
llvm::Value *MemPtr,
const MemberPointerType *MPT);
+protected:
+ /// A utility method for computing the offset required for the given
+ /// base-to-derived or derived-to-base member-pointer conversion.
+ /// Does not handle virtual conversions (in case we ever fully
+ /// support an ABI that allows this). Returns null if no adjustment
+ /// is required.
+ llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+
+public:
/// Build the signature of the given constructor variant by adding
/// any required parameters. For convenience, ResTy has been
/// initialized to 'void', and ArgTys has been initialized with the
@@ -228,12 +240,14 @@ public:
/// Emits the guarded initializer and destructor setup for the given
/// variable, given that it couldn't be emitted as a constant.
+ /// If \p PerformInit is false, the initialization has been folded to a
+ /// constant and should not be performed.
///
/// The variable may be:
/// - a static local variable
/// - a static data member of a class template instantiation
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::GlobalVariable *DeclPtr);
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 6ae2d0c..4455f1a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -17,6 +17,7 @@
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -66,29 +67,39 @@ static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
+/// Arrange the argument and result information for a value of the
+/// given unprototyped function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
- return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
- SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo());
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
}
-/// \param Args - contains any initial parameters besides those
-/// in the formal type
-static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &ArgTys,
+/// Arrange the argument and result information for a value of the
+/// given function type, on top of any implicit parameters already
+/// stored.
+static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &argTypes,
CanQual<FunctionProtoType> FTP) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ argTypes.push_back(FTP->getArgType(i));
+ CanQualType resultType = FTP->getResultType().getUnqualifiedType();
+ return CGT.arrangeFunctionType(resultType, argTypes,
+ FTP->getExtInfo(), required);
}
+/// Arrange the argument and result information for a value of the
+/// given function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
- SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP);
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeFunctionType(*this, argTypes, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -111,162 +122,250 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
return CC_C;
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP) {
- SmallVector<CanQualType, 16> ArgTys;
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- ArgTys.push_back(GetThisType(Context, RD));
+ argTypes.push_back(GetThisType(Context, RD));
- return ::getFunctionInfo(*this, ArgTys,
+ return ::arrangeFunctionType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
-
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
- // Add the 'this' pointer unless this is a static method.
- if (MD->isInstance())
- ArgTys.push_back(GetThisType(Context, MD->getParent()));
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ }
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
+ return arrangeFunctionType(prototype);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
- CXXCtorType Type) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration
+/// or definition to the given constructor variant.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
+ CXXCtorType ctorKind) {
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
+ argTypes.push_back(FTP->getArgType(i));
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
- CXXDtorType Type) {
- SmallVector<CanQualType, 2> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration,
+/// definition, or call to the given destructor variant. It so
+/// happens that all three cases produce the same information.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType dtorKind) {
+ SmallVector<CanQualType, 2> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
+ RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
- return getFunctionInfo(MD);
+ return arrangeCXXMethodDeclaration(MD);
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
assert(isa<FunctionType>(FTy));
- if (isa<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeFunctionType(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
+ }
+
assert(isa<FunctionProtoType>(FTy));
- return getFunctionInfo(FTy.getAs<FunctionProtoType>());
+ return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
- ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
e = MD->param_end(); i != e; ++i) {
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
FunctionType::ExtInfo einfo;
einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
MD->hasAttr<NSReturnsRetainedAttr>())
einfo = einfo.withProducesResult(true);
- return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return getFunctionInfo(CD, GD.getCtorType());
+ return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return getFunctionInfo(DD, GD.getDtorType());
+ return arrangeCXXDestructor(DD, GD.getDtorType());
+
+ return arrangeFunctionDeclaration(FD);
+}
- return getFunctionInfo(FD);
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ RequiredArgs required = RequiredArgs::All;
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumArgs());
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(0);
+ }
+
+ return arrangeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const CallArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
+ const FunctionArgList &args,
+ const FunctionType::ExtInfo &info,
+ bool isVariadic) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
- SmallVector<CanQualType, 1> args;
- return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
- const SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info) {
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
#ifndef NDEBUG
- for (SmallVectorImpl<CanQualType>::const_iterator
- I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ for (ArrayRef<CanQualType>::const_iterator
+ I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
assert(I->isCanonicalAsParam());
#endif
- unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
- void *InsertPos = 0;
- CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ void *insertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
if (FI)
return *FI;
- // Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
- Info.getHasRegParm(), Info.getRegParm(), ResTy,
- ArgTys.data(), ArgTys.size());
- FunctionInfos.InsertNode(FI, InsertPos);
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
- bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
- assert(Inserted && "Recursively being processed?");
+ bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ assert(inserted && "Recursively being processed?");
// Compute ABI information.
getABIInfo().computeInfo(*FI);
@@ -274,39 +373,42 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
- ABIArgInfo &RetInfo = FI->getReturnInfo();
- if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
- RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
I->info.setCoerceToType(ConvertType(I->type));
- bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
- assert(Erased && "Not in set?");
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
return *FI;
}
-CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool returnsRetained,
- bool _HasRegParm, unsigned _RegParm,
- CanQualType ResTy,
- const CanQualType *ArgTys,
- unsigned NumArgTys)
- : CallingConvention(_CallingConvention),
- EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
- HasRegParm(_HasRegParm), RegParm(_RegParm)
-{
- NumArgs = NumArgTys;
-
- // FIXME: Coallocate with the CGFunctionInfo object.
- Args = new ArgInfo[1 + NumArgTys];
- Args[0].type = ResTy;
- for (unsigned i = 0; i != NumArgTys; ++i)
- Args[1 + i].type = ArgTys[i];
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
}
/***/
@@ -366,7 +468,7 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
QualType EltTy = CT->getElementType();
llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
- llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
} else {
EmitStoreThroughLValue(RValue::get(AI), LV);
@@ -521,7 +623,9 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
SI->setAlignment(1);
}
} else {
- CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
}
}
@@ -611,20 +715,24 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
return false;
}
-llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
- const CGFunctionInfo &FI = getFunctionInfo(GD);
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ }
+ }
- // For definition purposes, don't consider a K&R function variadic.
- bool Variadic = false;
- if (const FunctionProtoType *FPT =
- cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
- Variadic = FPT->isVariadic();
+ return false;
+}
- return GetFunctionType(FI, Variadic);
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
}
llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
assert(Inserted && "Recursively being processed?");
@@ -675,6 +783,9 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -697,7 +808,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
- return llvm::FunctionType::get(resultType, argTypes, isVariadic);
+ return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -709,18 +820,18 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
else
- Info = &getFunctionInfo(MD);
- return GetFunctionType(*Info, FPT->isVariadic());
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv) {
- unsigned FuncAttrs = 0;
- unsigned RetAttrs = 0;
+ llvm::Attributes FuncAttrs;
+ llvm::Attributes RetAttrs;
CallingConv = FI.getEffectiveCallingConvention();
@@ -806,7 +917,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
- unsigned Attributes = 0;
+ llvm::Attributes Attrs;
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -814,20 +925,24 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::SExt;
+ Attrs |= llvm::Attribute::SExt;
else if (ParamType->isUnsignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::ZExt;
+ Attrs |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ (ParamType->isIntegerType() || ParamType->isPointerType() ||
+ ParamType->isReferenceType())) {
RegParm -=
(Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
if (RegParm >= 0)
- Attributes |= llvm::Attribute::InReg;
+ Attrs |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
+ // Increment Index if there is padding.
+ Index += (AI.getPaddingType() != 0);
+
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
@@ -835,9 +950,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
- Attributes |= llvm::Attribute::ByVal;
+ Attrs |= llvm::Attribute::ByVal;
- Attributes |=
+ Attrs |=
llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
@@ -859,8 +974,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
}
- if (Attributes)
- PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ if (Attrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
++Index;
}
if (FuncAttrs)
@@ -970,6 +1085,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
@@ -991,7 +1110,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
@@ -1015,15 +1134,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (STy && STy->getNumElements() > 1) {
+ uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ if (SrcSize <= DstSize) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ llvm::AllocaInst *TempAlloca =
+ CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
+ TempAlloca->setAlignment(AlignmentToUse);
+ llvm::Value *TempV = TempAlloca;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -1047,10 +1187,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
- EmitParmDecl(*Arg, Temp, ArgNo);
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
+ CharUnits Align = getContext().getDeclAlign(Arg);
+ Alloca->setAlignment(Align.getQuantity());
+ LValue LV = MakeAddrLValue(Alloca, Ty, Align);
+ llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
+ EmitParmDecl(*Arg, Alloca, ArgNo);
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
@@ -1076,6 +1218,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
/// Try to emit a fused autorelease of a return result.
static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
@@ -1154,9 +1307,54 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, resultType);
}
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return 0;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return 0;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ return 0;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ return 0;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
// At -O0, try to emit a fused retain/autorelease.
if (CGF.shouldUseFusedARCCalls())
if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
@@ -1165,6 +1363,44 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.EmitARCAutoreleaseReturnValue(result);
}
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return 0;
+ llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
+ if (!store) return 0;
+ if (store->getPointerOperand() != CGF.ReturnValue) return 0;
+ assert(!store->isAtomic() && !store->isVolatile()); // see below
+ return store;
+ }
+
+ llvm::StoreInst *store =
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+ if (!store) return 0;
+
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!store->isAtomic() && !store->isVolatile());
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return 0;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1199,16 +1435,9 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
- } else {
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
// Get the stored value and nuke the now-dead store.
RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
@@ -1219,6 +1448,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
ReturnValue = 0;
}
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
}
} else {
llvm::Value *V = ReturnValue;
@@ -1236,7 +1469,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// In ARC, end functions that return a retainable type with a call
// to objc_autoreleaseReturnValue.
if (AutoreleaseResult) {
- assert(getLangOptions().ObjCAutoRefCount &&
+ assert(getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() &&
RetTy->isObjCRetainableType());
RV = emitAutoreleaseOfResult(*this, RV);
@@ -1431,7 +1664,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
- assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -1450,14 +1683,23 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
- type, /*NeedsCopy*/true);
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
return;
}
args.add(EmitAnyExprToTemp(E), type);
}
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
llvm::CallSite
@@ -1465,14 +1707,22 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
if (!InvokeDest)
- return Builder.CreateCall(Callee, Args, Name);
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
- llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
- llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
- Args, Name);
- EmitBlock(ContBB);
- return Invoke;
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return Inst;
}
llvm::CallSite
@@ -1501,8 +1751,11 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
LValue LV = MakeAddrLValue(EltAddr, EltTy);
RValue EltRV;
- if (CodeGenFunction::hasAggregateLLVMType(EltTy))
- EltRV = RValue::getAggregate(LV.getAddress());
+ if (EltTy->isAnyComplexType())
+ // FIXME: Volatile?
+ EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = LV.asAggregateRValue();
else
EltRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
@@ -1519,13 +1772,16 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
// FIXME: What are the right qualifiers here?
LValue LV = EmitLValueForField(Addr, FD, 0);
RValue FldRV;
- if (CodeGenFunction::hasAggregateLLVMType(FT))
- FldRV = RValue::getAggregate(LV.getAddress());
+ if (FT->isAnyComplexType())
+ // FIXME: Volatile?
+ FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(FT))
+ FldRV = LV.asAggregateRValue();
else
FldRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
}
- } else if (isa<ComplexType>(Ty)) {
+ } else if (Ty->isAnyComplexType()) {
ComplexPairTy CV = RV.getComplexVal();
Args.push_back(CV.first);
Args.push_back(CV.second);
@@ -1639,6 +1895,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
@@ -1769,6 +2031,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRgen to discard
// unreachable code.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
index 24ed366..dead7bd 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -98,6 +98,55 @@ namespace CodeGen {
SmallVector<Writeback, 1> Writebacks;
};
+ /// A class for recording the number of arguments that a function
+ /// signature requires.
+ class RequiredArgs {
+ /// The number of required arguments, or ~0 if the signature does
+ /// not permit optional arguments.
+ unsigned NumRequired;
+ public:
+ enum All_t { All };
+
+ RequiredArgs(All_t _) : NumRequired(~0U) {}
+ explicit RequiredArgs(unsigned n) : NumRequired(n) {
+ assert(n != ~0U);
+ }
+
+ /// Compute the arguments required by the given formal prototype,
+ /// given that there may be some additional, non-formal arguments
+ /// in play.
+ static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
+ unsigned additional) {
+ if (!prototype->isVariadic()) return All;
+ return RequiredArgs(prototype->getNumArgs() + additional);
+ }
+
+ static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
+ return forPrototypePlus(prototype, 0);
+ }
+
+ static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
+ return forPrototype(prototype.getTypePtr());
+ }
+
+ static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
+ unsigned additional) {
+ return forPrototypePlus(prototype.getTypePtr(), additional);
+ }
+
+ bool allowsOptionalArgs() const { return NumRequired != ~0U; }
+ bool getNumRequiredArgs() const {
+ assert(allowsOptionalArgs());
+ return NumRequired;
+ }
+
+ unsigned getOpaqueData() const { return NumRequired; }
+ static RequiredArgs getFromOpaqueData(unsigned value) {
+ if (value == ~0U) return All;
+ return RequiredArgs(value);
+ }
+ };
+
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
@@ -114,50 +163,71 @@ namespace CodeGen {
/// The LLVM::CallingConv to use for this function (as specified by the
/// user).
- unsigned CallingConvention;
+ unsigned CallingConvention : 8;
/// The LLVM::CallingConv to actually use for this function, which may
/// depend on the ABI.
- unsigned EffectiveCallingConvention;
+ unsigned EffectiveCallingConvention : 8;
+
+ /// The clang::CallingConv that this was originally created with.
+ unsigned ASTCallingConvention : 8;
/// Whether this function is noreturn.
- bool NoReturn;
+ unsigned NoReturn : 1;
/// Whether this function is returns-retained.
- bool ReturnsRetained;
+ unsigned ReturnsRetained : 1;
+
+ /// How many arguments to pass inreg.
+ unsigned HasRegParm : 1;
+ unsigned RegParm : 4;
+
+ RequiredArgs Required;
unsigned NumArgs;
- ArgInfo *Args;
+ ArgInfo *getArgsBuffer() {
+ return reinterpret_cast<ArgInfo*>(this+1);
+ }
+ const ArgInfo *getArgsBuffer() const {
+ return reinterpret_cast<const ArgInfo*>(this + 1);
+ }
- /// How many arguments to pass inreg.
- bool HasRegParm;
- unsigned RegParm;
+ CGFunctionInfo() : Required(RequiredArgs::All) {}
public:
+ static CGFunctionInfo *create(unsigned llvmCC,
+ const FunctionType::ExtInfo &extInfo,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required);
+
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- bool ReturnsRetained, bool HasRegParm, unsigned RegParm,
- CanQualType ResTy,
- const CanQualType *ArgTys, unsigned NumArgTys);
- ~CGFunctionInfo() { delete[] Args; }
-
- const_arg_iterator arg_begin() const { return Args + 1; }
- const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
- arg_iterator arg_begin() { return Args + 1; }
- arg_iterator arg_end() { return Args + 1 + NumArgs; }
+ const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
+ const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
+ arg_iterator arg_begin() { return getArgsBuffer() + 1; }
+ arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
unsigned arg_size() const { return NumArgs; }
+ bool isVariadic() const { return Required.allowsOptionalArgs(); }
+ RequiredArgs getRequiredArgs() const { return Required; }
+
bool isNoReturn() const { return NoReturn; }
- /// In ARR, whether this function retains its return value. This
+ /// In ARC, whether this function retains its return value. This
/// is not always reliable for call sites.
bool isReturnsRetained() const { return ReturnsRetained; }
- /// getCallingConvention - Return the user specified calling
+ /// getASTCallingConvention() - Return the AST-specified calling
/// convention.
+ CallingConv getASTCallingConvention() const {
+ return CallingConv(ASTCallingConvention);
+ }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention, which has been translated into an LLVM CC.
unsigned getCallingConvention() const { return CallingConvention; }
/// getEffectiveCallingConvention - Return the actual calling convention to
@@ -172,36 +242,44 @@ namespace CodeGen {
bool getHasRegParm() const { return HasRegParm; }
unsigned getRegParm() const { return RegParm; }
- CanQualType getReturnType() const { return Args[0].type; }
+ FunctionType::ExtInfo getExtInfo() const {
+ return FunctionType::ExtInfo(isNoReturn(),
+ getHasRegParm(), getRegParm(),
+ getASTCallingConvention(),
+ isReturnsRetained());
+ }
+
+ CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
- ABIArgInfo &getReturnInfo() { return Args[0].info; }
- const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+ ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddInteger(getCallingConvention());
+ ID.AddInteger(getASTCallingConvention());
ID.AddBoolean(NoReturn);
ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
+ ID.AddInteger(Required.getOpaqueData());
getReturnType().Profile(ID);
for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
it->type.Profile(ID);
}
- template<class Iterator>
static void Profile(llvm::FoldingSetNodeID &ID,
- const FunctionType::ExtInfo &Info,
- CanQualType ResTy,
- Iterator begin,
- Iterator end) {
- ID.AddInteger(Info.getCC());
- ID.AddBoolean(Info.getNoReturn());
- ID.AddBoolean(Info.getProducesResult());
- ID.AddBoolean(Info.getHasRegParm());
- ID.AddInteger(Info.getRegParm());
- ResTy.Profile(ID);
- for (; begin != end; ++begin) {
- CanQualType T = *begin; // force iterator to be over canonical types
- T.Profile(ID);
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes) {
+ ID.AddInteger(info.getCC());
+ ID.AddBoolean(info.getNoReturn());
+ ID.AddBoolean(info.getProducesResult());
+ ID.AddBoolean(info.getHasRegParm());
+ ID.AddInteger(info.getRegParm());
+ ID.AddInteger(required.getOpaqueData());
+ resultType.Profile(ID);
+ for (ArrayRef<CanQualType>::iterator
+ i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
+ i->Profile(ID);
}
}
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index c28ecc0..6303e20 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGBlocks.h"
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
@@ -95,7 +96,6 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
if (Offset.isPositive()) {
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
@@ -125,8 +125,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
BaseOffset = NonVirtualOffset;
// Apply the base offset.
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
return ThisPtr;
@@ -250,9 +249,9 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
}
// Apply the offset.
- Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
- Value = Builder.CreateSub(Value, NonVirtualOffset);
- Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+ Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
@@ -397,16 +396,16 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
BaseClassDecl,
isBaseVirtual);
-
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Qualifiers(),
+ AggValueSlot::forAddr(V, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.CGM.getLangOptions().Exceptions &&
+ if (CGF.CGM.getLangOpts().Exceptions &&
!BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
@@ -414,47 +413,59 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
+ Expr *Init,
llvm::Value *ArrayIndexVar,
- CXXCtorInitializer *MemberInit,
QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
unsigned Index) {
- if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::RunCleanupsScope Cleanups(CGF);
-
- llvm::Value *Dest = LHS.getAddress();
- if (ArrayIndexVar) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
- }
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+ { // Scope for Cleanups.
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
+ }
- if (!CGF.hasAggregateLLVMType(T)) {
- LValue lvalue = CGF.MakeAddrLValue(Dest, T);
- CGF.EmitScalarInit(MemberInit->getInit(), /*decl*/ 0, lvalue, false);
- } else if (T->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
- LHS.isVolatileQualified());
- } else {
- AggValueSlot Slot =
- AggValueSlot::forAddr(Dest, LHS.getQuals(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
+ LV.isVolatileQualified());
+ } else {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ }
}
-
+
+ // Now, outside of the initializer cleanup scope, destroy the backing array
+ // for a std::initializer_list member.
+ CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+
return;
}
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
llvm::Value *IndexVar
- = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
assert(IndexVar && "Array index variable not loaded");
// Initialize this index variable to zero.
@@ -490,8 +501,8 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
- Array->getElementType(), Index + 1);
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
}
CGF.EmitBlock(ContinueBlock);
@@ -511,19 +522,15 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
namespace {
struct CallMemberDtor : EHScopeStack::Cleanup {
- FieldDecl *Field;
+ llvm::Value *V;
CXXDestructorDecl *Dtor;
- CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
- : Field(Field), Dtor(Dtor) {}
+ CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
+ : V(V), Dtor(Dtor) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- // FIXME: Is this OK for C++0x delegating constructors?
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
-
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
+ V);
}
};
}
@@ -533,7 +540,7 @@ static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
return Moving ? Record->hasTrivialMoveConstructor() :
Record->hasTrivialCopyConstructor();
}
-
+
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXCtorInitializer *MemberInit,
@@ -545,7 +552,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// non-static data member initializers.
FieldDecl *Field = MemberInit->getAnyMember();
- QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+ QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS;
@@ -559,67 +566,83 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
+ LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ if (!hasAggregateLLVMType(FieldType)) {
if (LHS.isSimple()) {
- CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
+ EmitExprAsInit(Init, Field, LHS, false);
} else {
- RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS);
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
}
- } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
- LHS.isVolatileQualified());
+ } else if (FieldType->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
} else {
llvm::Value *ArrayIndexVar = 0;
- const ConstantArrayType *Array
- = CGF.getContext().getAsConstantArrayType(FieldType);
- if (Array && Constructor->isImplicitlyDefined() &&
- Constructor->isCopyOrMoveConstructor()) {
- llvm::Type *SizeTy
- = CGF.ConvertType(CGF.getContext().getSizeType());
+ if (ArrayIndexes.size()) {
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
- QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
- BasePtr);
- LHS = CGF.MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
- ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+ Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of PODs or classes with trivial copy
- // constructors, perform a single aggregate copy.
- const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
- if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && hasTrivialCopyOrMoveConstructor(Record,
- Constructor->isMoveConstructor()))) {
- // Find the source pointer. We knows it's the last argument because
- // we know we're in a copy constructor.
- unsigned SrcArgIndex = Args.size() - 1;
- llvm::Value *SrcPtr
- = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
- LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
-
- // Copy the aggregate.
- CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
- LHS.isVolatileQualified());
- return;
- }
// Emit the block variables for the array indices, if any.
- for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
- CGF.EmitAutoVarDecl(*MemberInit->getArrayIndex(I));
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
}
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
- if (!CGF.CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -631,8 +654,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor())
- CGF.EHStack.pushCleanup<CallMemberDtor>(EHCleanup, Field,
- RD->getDestructor());
+ EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
+ RD->getDestructor());
}
}
@@ -708,6 +731,9 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
// Emit the constructor prologue, i.e. the base and member
// initializers.
EmitCtorPrologue(Ctor, CtorType, Args);
@@ -912,7 +938,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
// -fapple-kext must inline any call to this dtor into
// the caller's body.
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
break;
}
@@ -940,13 +966,13 @@ namespace {
class DestroyField : public EHScopeStack::Cleanup {
const FieldDecl *field;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : field(field), destroyer(*destroyer),
+ : field(field), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -1039,6 +1065,10 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
QualType::DestructionKind dtorKind = type.isDestructedType();
if (!dtorKind) continue;
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
CleanupKind cleanupKind = getCleanupKind(dtorKind);
EHStack.pushCleanup<DestroyField>(cleanupKind, field,
getDestroyer(dtorKind),
@@ -1145,7 +1175,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Evaluate the constructor and its arguments in a regular
// partial-destroy cleanup.
- if (getLangOptions().Exceptions &&
+ if (getLangOpts().Exceptions &&
!ctor->getParent()->hasTrivialDestructor()) {
Destroyer *destroyer = destroyCXXObject;
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
@@ -1192,8 +1222,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGDebugInfo *DI = getDebugInfo();
if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
- // If debug info for this class has been emitted then this is the right time
- // to do so.
+ // If debug info for this class has not been emitted then this is the
+ // right time to do so.
const CXXRecordDecl *Parent = D->getParent();
DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
Parent->getLocation());
@@ -1273,7 +1303,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
ReturnValueSlot(), Args, D);
}
@@ -1309,7 +1339,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
ReturnValueSlot(), DelegateArgs, Ctor);
}
@@ -1338,8 +1368,10 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
+ QualType Ty = getContext().getTagDeclType(Ctor->getParent());
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Qualifiers(),
+ AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1347,7 +1379,7 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
const CXXRecordDecl *ClassDecl = Ctor->getParent();
- if (CGM.getLangOptions().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
CXXDtorType Type =
CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
@@ -1364,7 +1396,7 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
llvm::Value *Callee = 0;
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
DD->getParent());
@@ -1485,7 +1517,8 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
llvm::Type *AddressPointPtrTy =
VTableAddressPoint->getType()->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
- Builder.CreateStore(VTableAddressPoint, VTableField);
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
}
void
@@ -1568,7 +1601,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
llvm::Type *Ty) {
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
- return Builder.CreateLoad(VTablePtrSrc, "vtable");
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ return VTable;
}
static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
@@ -1682,7 +1717,7 @@ static bool UseVirtualCall(ASTContext &Context,
// When building with -fapple-kext, all calls must go through the vtable since
// the kernel linker can do runtime patching of vtables.
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return true;
return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
@@ -1692,13 +1727,110 @@ llvm::Value *
CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
llvm::Value *This) {
- const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
+ llvm::FunctionType *fnType =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodDeclaration(MD));
if (UseVirtualCall(getContext(), E, MD))
- return BuildVirtualCall(MD, This, Ty);
+ return BuildVirtualCall(MD, This, fnType);
+
+ return CGM.GetAddrOfFunction(MD, fnType);
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs) {
+ // Lookup the call operator
+ DeclarationName Name
+ = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
+ CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
+ const FunctionProtoType *FPT =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the address of the call operator.
+ GlobalDecl GD(CallOperator);
+ const CGFunctionInfo &CalleeFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+
+ // Forward the returned value
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (BlockDecl::param_const_iterator I = BD->param_begin(),
+ E = BD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
- return CGM.GetAddrOfFunction(MD, Ty);
+ EmitLambdaDelegatingInvokeBody(MD);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
index b2d0786..b00e2a2 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
@@ -84,7 +84,6 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
}
llvm_unreachable("bad saved r-value kind");
- return RValue();
}
/// Push an entry of the given size onto this protected-scope stack.
@@ -251,8 +250,7 @@ void CodeGenFunction::initFullExprCleanup() {
// Initialize it to false at a site that's guaranteed to be run
// before each evaluation.
- llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- new llvm::StoreInst(Builder.getFalse(), active, &block->back());
+ setBeforeOutermostConditional(Builder.getFalse(), active);
// Initialize it to true at the current location.
Builder.CreateStore(Builder.getTrue(), active);
@@ -504,9 +502,9 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
// The only uses should be fixup switches.
llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
- if (si->getNumCases() == 2 && si->getDefaultDest() == unreachableBB) {
+ if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
// Replace the switch with a branch.
- llvm::BranchInst::Create(si->getSuccessor(1), si);
+ llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
// The switch operand is a load from the cleanup-dest alloca.
llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
@@ -1000,57 +998,75 @@ enum ForActivation_t {
/// extra uses *after* the change-over point.
static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
EHScopeStack::stable_iterator C,
- ForActivation_t Kind) {
+ ForActivation_t kind,
+ llvm::Instruction *dominatingIP) {
EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
- // We always need the flag if we're activating the cleanup, because
- // we have to assume that the current location doesn't necessarily
- // dominate all future uses of the cleanup.
- bool NeedFlag = (Kind == ForActivation);
+ // We always need the flag if we're activating the cleanup in a
+ // conditional context, because we have to assume that the current
+ // location doesn't necessarily dominate the cleanup's code.
+ bool isActivatedInConditional =
+ (kind == ForActivation && CGF.isInConditionalBranch());
+
+ bool needFlag = false;
// Calculate whether the cleanup was used:
// - as a normal cleanup
- if (Scope.isNormalCleanup() && IsUsedAsNormalCleanup(CGF.EHStack, C)) {
+ if (Scope.isNormalCleanup() &&
+ (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
Scope.setTestFlagInNormalCleanup();
- NeedFlag = true;
+ needFlag = true;
}
// - as an EH cleanup
- if (Scope.isEHCleanup() && IsUsedAsEHCleanup(CGF.EHStack, C)) {
+ if (Scope.isEHCleanup() &&
+ (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
Scope.setTestFlagInEHCleanup();
- NeedFlag = true;
+ needFlag = true;
}
// If it hasn't yet been used as either, we're done.
- if (!NeedFlag) return;
+ if (!needFlag) return;
- llvm::AllocaInst *Var = Scope.getActiveFlag();
- if (!Var) {
- Var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
- Scope.setActiveFlag(Var);
+ llvm::AllocaInst *var = Scope.getActiveFlag();
+ if (!var) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(var);
+
+ assert(dominatingIP && "no existing variable and no dominating IP!");
// Initialize to true or false depending on whether it was
// active up to this point.
- CGF.InitTempAlloca(Var, CGF.Builder.getInt1(Kind == ForDeactivation));
+ llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+
+ // If we're in a conditional block, ignore the dominating IP and
+ // use the outermost conditional branch.
+ if (CGF.isInConditionalBranch()) {
+ CGF.setBeforeOutermostConditional(value, var);
+ } else {
+ new llvm::StoreInst(value, var, dominatingIP);
+ }
}
- CGF.Builder.CreateStore(CGF.Builder.getInt1(Kind == ForActivation), Var);
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
}
/// Activate a cleanup that was created in an inactivated state.
-void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C) {
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
assert(C != EHStack.stable_end() && "activating bottom of stack?");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
assert(!Scope.isActive() && "double activation");
- SetupCleanupBlockActivation(*this, C, ForActivation);
+ SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
Scope.setActive(true);
}
/// Deactive a cleanup that was created in an active state.
-void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
assert(Scope.isActive() && "double deactivation");
@@ -1066,7 +1082,7 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C) {
}
// Otherwise, follow the general case.
- SetupCleanupBlockActivation(*this, C, ForDeactivation);
+ SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
Scope.setActive(false);
}
@@ -1077,3 +1093,11 @@ llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
return NormalCleanupDest;
}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ QualType TempType,
+ llvm::Value *Ptr) {
+ pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
+ /*useEHCleanup*/ true);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index c7a9b40..7301d20 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -35,7 +35,6 @@
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -75,7 +74,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(LB);
llvm::DIDescriptor D
= DBuilder.createLexicalBlockFile(LBF.getScope(),
- getOrCreateFile(CurLoc));
+ getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
LexicalBlockStack.push_back(N);
@@ -118,12 +117,25 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
assert (FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
- if (FII)
+ FunctionTemplateSpecializationInfo *Info
+ = FD->getTemplateSpecializationInfo();
+ if (!Info && FII)
return FII->getName();
// Otherwise construct human readable name for debug info.
std::string NS = FD->getNameAsString();
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ NS += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
// Copy this name on the side and use its reference.
char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
memcpy(StrPtr, NS.data(), NS.length());
@@ -131,7 +143,7 @@ StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
}
StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
- llvm::SmallString<256> MethodName;
+ SmallString<256> MethodName;
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
@@ -164,8 +176,8 @@ StringRef CGDebugInfo::getSelectorName(Selector S) {
/// getClassName - Get class name including template argument list.
StringRef
-CGDebugInfo::getClassName(RecordDecl *RD) {
- ClassTemplateSpecializationDecl *Spec
+CGDebugInfo::getClassName(const RecordDecl *RD) {
+ const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD);
if (!Spec)
return RD->getName();
@@ -184,7 +196,7 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
NumArgs = TemplateArgs.size();
}
Buffer = RD->getIdentifier()->getNameStart();
- PrintingPolicy Policy(CGM.getLangOptions());
+ PrintingPolicy Policy(CGM.getLangOpts());
Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
NumArgs,
Policy);
@@ -223,7 +235,6 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
DIFileCache[fname] = F;
return F;
-
}
/// getOrCreateMainFile - Get the file info for main compile unit.
@@ -234,7 +245,8 @@ llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
- assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getLine() : 0;
@@ -243,16 +255,20 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
/// getColumnNumber - Get column number for the location. If location is
/// invalid then use current location.
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
- assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getColumn() : 0;
}
StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
+ return CGM.getCodeGenOpts().DebugCompilationDir;
+
if (!CWDName.empty())
return CWDName;
- llvm::SmallString<256> CWD;
+ SmallString<256> CWD;
llvm::sys::fs::current_path(CWD);
char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
memcpy(CompDirnamePtr, CWD.data(), CWD.size());
@@ -285,7 +301,7 @@ void CGDebugInfo::CreateCompileUnit() {
StringRef Filename(FilenamePtr, MainFileName.length());
unsigned LangTag;
- const LangOptions &LO = CGM.getLangOptions();
+ const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
if (LO.ObjC1)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
@@ -321,35 +337,32 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
const char *BTName = NULL;
switch (BT->getKind()) {
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
- llvm_unreachable("Unexpected builtin type Dependent");
- case BuiltinType::Overload:
- llvm_unreachable("Unexpected builtin type Overload");
- case BuiltinType::BoundMember:
- llvm_unreachable("Unexpected builtin type BoundMember");
- case BuiltinType::UnknownAny:
- llvm_unreachable("Unexpected builtin type UnknownAny");
+ llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
return DBuilder.
- createNullPtrType(BT->getName(CGM.getContext().getLangOptions()));
+ createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- return DBuilder.createStructType(TheCU, "objc_class",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
// typedef struct objc_object {
// Class isa;
// } *id;
- llvm::DIType OCTy =
- DBuilder.createStructType(TheCU, "objc_class",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ // TODO: Cache these two types to avoid duplicates.
+ llvm::DIType OCTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
@@ -367,10 +380,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
0, 0, 0, 0, Elements);
}
case BuiltinType::ObjCSel: {
- return DBuilder.createStructType(TheCU, "objc_selector",
- getOrCreateMainFile(), 0, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ return
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_selector", getOrCreateMainFile(),
+ 0);
}
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
@@ -403,7 +416,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::ULong: BTName = "long unsigned int"; break;
case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
default:
- BTName = BT->getName(CGM.getContext().getLangOptions());
+ BTName = BT->getName(CGM.getContext().getLangOpts());
break;
}
// Bit size, align and offset of the type.
@@ -479,34 +492,82 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
Ty->getPointeeType(), Unit);
}
+// Creates a forward declaration for a RecordDecl in the given context.
+llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
+ llvm::DIDescriptor Ctx) {
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ // Get the tag.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ unsigned Tag = 0;
+ if (CXXDecl) {
+ RDName = getClassName(RD);
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+ else if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else
+ llvm_unreachable("Unknown RecordDecl type!");
+
+ // Create the type.
+ return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+}
+
+// Walk up the context chain and create forward decls for record decls,
+// and normal descriptors for namespaces.
+llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ // See if we already have the parent.
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(Context)) {
+ if (!RD->isDependentType()) {
+ llvm::DIType Ty = getOrCreateLimitedType(CGM.getContext().getTypeDeclType(RD),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
/// CreatePointeeType - Create Pointee type. If Pointee is a record
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
if (!CGM.getCodeGenOpts().LimitDebugInfo)
return getOrCreateType(PointeeTy, Unit);
-
+
+ // Limit debug info for the pointee type.
+
+ // If we have an existing type, use that, it's still smaller than creating
+ // a new type.
+ llvm::DIType Ty = getTypeOrNull(PointeeTy);
+ if (Ty.Verify()) return Ty;
+
+ // Handle qualifiers.
+ if (PointeeTy.hasLocalQualifiers())
+ return CreateQualifiedType(PointeeTy, Unit);
+
if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
RecordDecl *RD = RTy->getDecl();
- llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
llvm::DIDescriptor FDContext =
getContextDescriptor(cast<Decl>(RD->getDeclContext()));
-
- if (RD->isStruct())
- return DBuilder.createStructType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIArray());
- else if (RD->isUnion())
- return DBuilder.createUnionType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIArray());
- else {
- assert(RD->isClass() && "Unknown RecordType!");
- return DBuilder.createClassType(FDContext, RD->getName(), DefUnit,
- Line, 0, 0, 0, llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray());
- }
+ llvm::DIType RetTy = createRecordFwdDecl(RD, FDContext);
+ TypeCache[QualType(RTy, 0).getAsOpaquePtr()] = RetTy;
+ return RetTy;
}
return getOrCreateType(PointeeTy, Unit);
@@ -516,7 +577,6 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
-
if (Tag == llvm::dwarf::DW_TAG_reference_type)
return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
@@ -527,8 +587,8 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return
- DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit), Size, Align);
+ return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
+ Size, Align);
}
llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
@@ -594,8 +654,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
return BlockLiteralGeneric;
}
-llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
@@ -605,11 +664,12 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
const TypedefNameDecl *TyDecl = Ty->getDecl();
- llvm::DIDescriptor TydefContext =
+
+ llvm::DIDescriptor TypedefContext =
getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
-
- return
- DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TydefContext);
+
+ return
+ DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
@@ -634,6 +694,34 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
return DbgTy;
}
+
+void CGDebugInfo::
+CollectRecordStaticVars(const RecordDecl *RD, llvm::DIType FwdDecl) {
+
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (V->getInit()) {
+ const APValue *Value = V->evaluateValue();
+ if (Value && Value->isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
+ }
+ }
+ }
+ }
+}
+
llvm::DIType CGDebugInfo::createFieldType(StringRef name,
QualType type,
uint64_t sizeInBitsOverride,
@@ -674,44 +762,75 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
SmallVectorImpl<llvm::Value *> &elements,
llvm::DIType RecordTy) {
unsigned fieldNo = 0;
- const FieldDecl *LastFD = 0;
- bool IsMsStruct = record->hasAttr<MsStructAttr>();
-
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
- for (RecordDecl::field_iterator I = record->field_begin(),
- E = record->field_end();
- I != E; ++I, ++fieldNo) {
- FieldDecl *field = *I;
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are ignored
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
- --fieldNo;
- continue;
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
+
+ // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // has the name and the location of the variable so we should iterate over
+ // both concurrently.
+ if (CXXDecl && CXXDecl->isLambda()) {
+ RecordDecl::field_iterator Field = CXXDecl->field_begin();
+ unsigned fieldno = 0;
+ for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
+ E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ const LambdaExpr::Capture C = *I;
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this'.
+ if (C.capturesVariable()) {
+ VarDecl *V = C.getCapturedVar();
+ llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
+ StringRef VName = V->getName();
+ uint64_t SizeInBitsOverride = 0;
+ if (Field->isBitField()) {
+ SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+ llvm::DIType fieldType
+ = createFieldType(VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno),
+ VUnit, RecordTy);
+ elements.push_back(fieldType);
}
- LastFD = field;
}
+ } else {
+ bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ const FieldDecl *LastFD = 0;
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end();
+ I != E; ++I, ++fieldNo) {
+ FieldDecl *field = *I;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
+ --fieldNo;
+ continue;
+ }
+ LastFD = field;
+ }
- StringRef name = field->getName();
- QualType type = field->getType();
+ StringRef name = field->getName();
+ QualType type = field->getType();
- // Ignore unnamed fields unless they're anonymous structs/unions.
- if (name.empty() && !type->isRecordType()) {
- LastFD = field;
- continue;
- }
+ // Ignore unnamed fields unless they're anonymous structs/unions.
+ if (name.empty() && !type->isRecordType()) {
+ LastFD = field;
+ continue;
+ }
- uint64_t SizeInBitsOverride = 0;
- if (field->isBitField()) {
- SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
- assert(SizeInBitsOverride && "found named 0-width bitfield");
- }
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
- llvm::DIType fieldType
- = createFieldType(name, type, SizeInBitsOverride,
- field->getLocation(), field->getAccess(),
- layout.getFieldOffset(fieldNo), tunit, RecordTy);
+ llvm::DIType fieldType
+ = createFieldType(name, type, SizeInBitsOverride,
+ field->getLocation(), field->getAccess(),
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
- elements.push_back(fieldType);
+ elements.push_back(fieldType);
+ }
}
}
@@ -725,7 +844,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
= getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
0),
Unit);
-
+
// Add "this" pointer.
llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
@@ -738,11 +857,29 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
if (!Method->isStatic()) {
// "this" pointer is always first argument.
QualType ThisPtr = Method->getThisType(CGM.getContext());
- llvm::DIType ThisPtrType =
- DBuilder.createArtificialType(getOrCreateType(ThisPtr, Unit));
-
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- Elts.push_back(ThisPtrType);
+
+ const CXXRecordDecl *RD = Method->getParent();
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ // Create pointer type directly in this case.
+ const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
+ QualType PointeeTy = ThisPtrTy->getPointeeType();
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ // TODO: This and the artificial type below are misleading, the
+ // types aren't artificial the argument is, but the current
+ // metadata doesn't represent that.
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ } else {
+ llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ }
}
// Copy rest of the arguments.
@@ -757,14 +894,13 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
/// inside a function.
static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
- if (const CXXRecordDecl *NRD =
- dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
return isFunctionLocalClass(NRD);
- else if (isa<FunctionDecl>(RD->getDeclContext()))
+ if (isa<FunctionDecl>(RD->getDeclContext()))
return true;
return false;
-
}
+
/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
/// a single member function GlobalDecl.
llvm::DISubprogram
@@ -823,35 +959,48 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
}
if (Method->hasPrototype())
Flags |= llvm::DIDescriptor::FlagPrototyped;
-
+
+ llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram SP =
DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
/* isDefinition=*/ false,
Virtuality, VIndex, ContainingType,
- Flags, CGM.getLangOptions().Optimize);
+ Flags, CGM.getLangOpts().Optimize, NULL,
+ TParamsArray);
- SPCache[Method] = llvm::WeakVH(SP);
+ SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
return SP;
}
/// CollectCXXMemberFunctions - A helper function to collect debug info for
-/// C++ member functions.This is used while creating debug info entry for
+/// C++ member functions. This is used while creating debug info entry for
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
- for(CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *Method = *I;
-
- if (Method->isImplicit() && !Method->isUsed())
+
+ // Since we want more than just the individual member decls if we
+ // have templated functions iterate over every declaration to gather
+ // the functions.
+ for(DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end(); I != E; ++I) {
+ Decl *D = *I;
+ if (D->isImplicit() && !D->isUsed())
continue;
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
+ SE = FTD->spec_end(); SI != SE; ++SI) {
+ FunctionDecl *FD = *SI;
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
+ EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
+ }
}
}
@@ -862,7 +1011,7 @@ void CGDebugInfo::
CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
- for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
BE = RD->friend_end(); BI != BE; ++BI) {
if ((*BI)->isUnsupportedFriend())
continue;
@@ -992,7 +1141,7 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
/// getVTableName - Get vtable name for the given Class.
StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
- // Otherwise construct gdb compatible name name.
+ // Construct gdb compatible name name.
std::string Name = "_vptr$" + RD->getNameAsString();
// Copy this name on the side and use its reference.
@@ -1028,7 +1177,15 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
- llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ return T;
+}
+
+/// getOrCreateInterfaceType - Emit an objective c interface type standalone
+/// debug info.
+llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
DBuilder.retainType(T);
return T;
}
@@ -1036,11 +1193,9 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// CreateType - get structure or union type.
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
- llvm::DIFile Unit = getOrCreateFile(RD->getLocation());
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
- unsigned Line = getLineNumber(RD->getLocation());
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
@@ -1048,126 +1203,63 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIDescriptor FDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- // If this is just a forward declaration, construct an appropriately
- // marked node and just return it.
- if (!RD->getDefinition()) {
- llvm::DIType FwdDecl =
- DBuilder.createStructType(FDContext, RD->getName(),
- DefUnit, Line, 0, 0,
- llvm::DIDescriptor::FlagFwdDecl,
- llvm::DIArray());
+ llvm::DIType FwdDecl = getOrCreateLimitedType(QualType(Ty, 0), DefUnit);
- return FwdDecl;
- }
+ if (FwdDecl.isForwardDecl())
+ return FwdDecl;
- llvm::DIType FwdDecl = DBuilder.createTemporaryType(DefUnit);
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(FwdDecl);
- llvm::MDNode *MN = FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
- // Otherwise, insert it into the TypeCache so that recursive uses will find
- // it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
LexicalBlockStack.push_back(FwdDeclNode);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ // Add this to the completed types cache since we're completing it.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
// Convert all the elements.
SmallVector<llvm::Value *, 16> EltTys;
+ // Note: The split of CXXDecl information here is intentional, the
+ // gdb tests will depend on a certain ordering at printout. The debug
+ // information offsets are still correct if we merge them all together
+ // though.
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
- CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
- CollectVTableInfo(CXXDecl, Unit, EltTys);
+ CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
}
-
- // Collect static variables with initializers.
- for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
- I != E; ++I)
- if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
- if (const Expr *Init = V->getInit()) {
- Expr::EvalResult Result;
- if (Init->Evaluate(Result, CGM.getContext()) && Result.Val.isInt()) {
- llvm::ConstantInt *CI
- = llvm::ConstantInt::get(CGM.getLLVMContext(), Result.Val.getInt());
-
- // Create the descriptor for static variable.
- llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
- StringRef VName = V->getName();
- llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
- // Do not use DIGlobalVariable for enums.
- if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
- DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
- getLineNumber(V->getLocation()),
- VTy, true, CI);
- }
- }
- }
- }
- CollectRecordFields(RD, Unit, EltTys, FwdDecl);
+ // Collect static variables with initializers and other fields.
+ CollectRecordStaticVars(RD, FwdDecl);
+ CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
llvm::DIArray TParamsArray;
if (CXXDecl) {
- CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
- CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, DefUnit, EltTys, FwdDecl);
if (const ClassTemplateSpecializationDecl *TSpecial
= dyn_cast<ClassTemplateSpecializationDecl>(RD))
- TParamsArray = CollectCXXTemplateParams(TSpecial, Unit);
+ TParamsArray = CollectCXXTemplateParams(TSpecial, DefUnit);
}
LexicalBlockStack.pop_back();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
- RegionMap.find(Ty->getDecl());
- if (RI != RegionMap.end())
- RegionMap.erase(RI);
+ RegionMap.erase(Ty->getDecl());
- llvm::DIDescriptor RDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- StringRef RDName = RD->getName();
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- llvm::MDNode *RealDecl = NULL;
-
+ // FIXME: Magic numbers ahoy! These should be changed when we
+ // get some enums in llvm/Analysis/DebugInfo.h to refer to
+ // them.
if (RD->isUnion())
- RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
+ FwdDeclNode->replaceOperandWith(10, Elements);
else if (CXXDecl) {
- RDName = getClassName(RD);
- // A class's primary base or the class itself contains the vtable.
- llvm::MDNode *ContainingType = NULL;
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
- // Seek non virtual primary base root.
- while (1) {
- const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
- const CXXRecordDecl *PBT = BRL.getPrimaryBase();
- if (PBT && !BRL.isPrimaryBaseVirtual())
- PBase = PBT;
- else
- break;
- }
- ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
- }
- else if (CXXDecl->isDynamicClass())
- ContainingType = FwdDecl;
-
- RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, 0, llvm::DIType(),
- Elements, ContainingType,
- TParamsArray);
- } else
- RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, Elements);
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ FwdDeclNode->replaceOperandWith(13, TParamsArray);
+ } else
+ FwdDeclNode->replaceOperandWith(10, Elements);
- // Now that we have a real decl for the struct, replace anything using the
- // old decl with the new one. This will recursively update the debug info.
- llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[RD] = llvm::WeakVH(RealDecl);
- return llvm::DIType(RealDecl);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDeclNode);
+ return llvm::DIType(FwdDeclNode);
}
/// CreateType - get objective-c object type.
@@ -1191,30 +1283,38 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// If this is just a forward declaration return a special forward-declaration
// debug type since we won't be able to lay out the entire type.
- if (ID->isForwardDecl()) {
+ ObjCInterfaceDecl *Def = ID->getDefinition();
+ if (!Def) {
llvm::DIType FwdDecl =
- DBuilder.createStructType(Unit, ID->getName(),
- DefUnit, Line, 0, 0, 0,
- llvm::DIArray(), RuntimeLang);
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(), DefUnit, Line,
+ RuntimeLang);
return FwdDecl;
}
- // To handle a recursive interface, we first generate a debug descriptor
- // for the struct as a forward declaration. Then (if it is a definition)
- // we go through and get debug info for all of its members. Finally, we
- // create a descriptor for the complete type (which may refer to the
- // forward decl if the struct is recursive) and replace all uses of the
- // forward declaration with the final definition.
- llvm::DIType FwdDecl = DBuilder.createTemporaryType(DefUnit);
-
- llvm::MDNode *MN = FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
- // Otherwise, insert it into the TypeCache so that recursive uses will find
- // it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ ID = Def;
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
+
+ llvm::DIType RealDecl =
+ DBuilder.createStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, Flags,
+ llvm::DIArray(), RuntimeLang);
+
+ // Otherwise, insert it into the CompletedTypeCache so that recursive uses
+ // will find it and we're emitting the complete type.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
// Push the struct on region stack.
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(RealDecl);
+
LexicalBlockStack.push_back(FwdDeclNode);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
// Convert all the elements.
SmallVector<llvm::Value *, 16> EltTys;
@@ -1227,12 +1327,31 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return llvm::DIType();
llvm::DIType InhTag =
- DBuilder.createInheritance(FwdDecl, SClassTy, 0, 0);
+ DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
+ for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
+ E = ID->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ llvm::MDNode *PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ EltTys.push_back(PropertyNode);
+ }
+
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
- ObjCImplementationDecl *ImpD = ID->getImplementation();
unsigned FieldNo = 0;
for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
Field = Field->getNextIvar(), ++FieldNo) {
@@ -1266,7 +1385,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// the non-fragile abi and the debugger should ignore the value anyways.
// Call it the FieldNo+1 due to how debuggers use the information,
// e.g. negating the value when it needs a lookup in the dynamic table.
- uint64_t FieldOffset = CGM.getLangOptions().ObjCNonFragileABI ? FieldNo+1
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
: RL.getFieldOffset(FieldNo);
unsigned Flags = 0;
@@ -1275,69 +1394,43 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
- StringRef PropertyName;
- StringRef PropertyGetter;
- StringRef PropertySetter;
- unsigned PropertyAttributes = 0;
- ObjCPropertyDecl *PD = NULL;
- if (ImpD)
+ llvm::MDNode *PropertyNode = NULL;
+ if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
- ImpD->FindPropertyImplIvarDecl(Field->getIdentifier()))
- PD = PImpD->getPropertyDecl();
- if (PD) {
- PropertyName = PD->getName();
- PropertyGetter = getSelectorName(PD->getGetterName());
- PropertySetter = getSelectorName(PD->getSetterName());
- PropertyAttributes = PD->getPropertyAttributes();
- }
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ }
+ }
+ }
FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
FieldLine, FieldSize, FieldAlign,
FieldOffset, Flags, FieldTy,
- PropertyName, PropertyGetter,
- PropertySetter, PropertyAttributes);
+ PropertyNode);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
LexicalBlockStack.pop_back();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
- RegionMap.find(Ty->getDecl());
- if (RI != RegionMap.end())
- RegionMap.erase(RI);
-
- // Bit size, align and offset of the type.
- uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
-
- unsigned Flags = 0;
- if (ID->getImplementation())
- Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
-
- llvm::DIType RealDecl =
- DBuilder.createStructType(Unit, ID->getName(), DefUnit,
- Line, Size, Align, Flags,
- Elements, RuntimeLang);
-
- // Now that we have a real decl for the struct, replace anything using the
- // old decl with the new one. This will recursively update the debug info.
- llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[ID] = llvm::WeakVH(RealDecl);
-
- return RealDecl;
-}
-
-llvm::DIType CGDebugInfo::CreateType(const TagType *Ty) {
- if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return CreateType(RT);
- else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return CreateEnumType(ET->getDecl());
-
- return llvm::DIType();
+ return llvm::DIType(FwdDeclNode);
}
-llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t NumElems = Ty->getNumElements();
int64_t LowerBound = 0;
@@ -1545,20 +1638,14 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
if (T == LastT)
return T;
} while (true);
-
- return T;
}
-/// getOrCreateType - Get the type from the cache or create a new
-/// one if necessary.
-llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
- llvm::DIFile Unit) {
- if (Ty.isNull())
- return llvm::DIType();
+/// getType - Get the type from the cache or return null type if it doesn't exist.
+llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty);
-
+
// Check for existing entry.
llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(Ty.getAsOpaquePtr());
@@ -1568,17 +1655,59 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
return llvm::DIType(cast<llvm::MDNode>(it->second));
}
+ return llvm::DIType();
+}
+
+/// getCompletedTypeOrNull - Get the type from the cache or return null if it
+/// doesn't exist.
+llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ CompletedTypeCache.find(Ty.getAsOpaquePtr());
+ if (it != CompletedTypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getCompletedTypeOrNull(Ty);
+
+ if (T.Verify()) return T;
+
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
+ llvm::DIType TC = getTypeOrNull(Ty);
+ if (TC.Verify() && TC.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+
// And update the type cache.
- TypeCache[Ty.getAsOpaquePtr()] = Res;
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+
+ if (!Res.isForwardDecl())
+ CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
return Res;
}
/// CreateTypeNode - Create a new debug type node.
-llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
// Handle qualifiers, which recursively handles what they refer to.
if (Ty.hasLocalQualifiers())
return CreateQualifiedType(Ty, Unit);
@@ -1603,15 +1732,20 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
- case Type::Builtin: return CreateType(cast<BuiltinType>(Ty));
- case Type::Complex: return CreateType(cast<ComplexType>(Ty));
- case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::Builtin:
+ return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex:
+ return CreateType(cast<ComplexType>(Ty));
+ case Type::Pointer:
+ return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
- case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Typedef:
+ return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
+ return CreateType(cast<RecordType>(Ty));
case Type::Enum:
- return CreateType(cast<TagType>(Ty));
+ return CreateEnumType(cast<EnumType>(Ty)->getDecl());
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
@@ -1642,7 +1776,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::UnaryTransform:
case Type::Auto:
llvm_unreachable("type should have been unwrapped!");
- return llvm::DIType();
}
assert(Diag && "Fall through without a diagnostic?");
@@ -1653,6 +1786,123 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return llvm::DIType();
}
+/// getOrCreateLimitedType - Get the type from the cache or create a new
+/// limited type if necessary.
+llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getTypeOrNull(Ty);
+
+ // We may have cached a forward decl when we could have created
+ // a non-forward decl. Go ahead and create a non-forward decl
+ // now.
+ if (T.Verify() && !T.isForwardDecl()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
+
+ if (T.Verify() && T.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+// TODO: Currently used for context chains when limiting debug info.
+llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ llvm::DIDescriptor RDContext;
+ if (CGM.getCodeGenOpts().LimitDebugInfo)
+ RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
+ else
+ RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition())
+ return createRecordFwdDecl(RD, RDContext);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ llvm::TrackingVH<llvm::MDNode> RealDecl;
+
+ if (RD->isUnion())
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+ else if (CXXDecl) {
+ RDName = getClassName(RD);
+
+ // FIXME: This could be a struct type giving a default visibility different
+ // than C++ class type, but needs llvm metadata changes first.
+ RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(),
+ llvm::DIArray());
+ } else
+ RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = llvm::DIType(RealDecl);
+
+ if (CXXDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = RealDecl;
+
+ RealDecl->replaceOperandWith(12, ContainingType);
+ }
+ return llvm::DIType(RealDecl);
+}
+
+/// CreateLimitedTypeNode - Create a new debug type node, but only forward
+/// declare composite types that haven't been processed yet.
+llvm::DIType CGDebugInfo::CreateLimitedTypeNode(QualType Ty,llvm::DIFile Unit) {
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+ #include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::Record:
+ return CreateLimitedType(cast<RecordType>(Ty));
+ default:
+ return CreateTypeNode(Ty, Unit);
+ }
+}
+
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name,
@@ -1677,7 +1927,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
getContextDescriptor(cast<Decl>(D->getDeclContext()));
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(FD);
+ MI = SPCache.find(FD->getCanonicalDecl());
if (MI != SPCache.end()) {
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
@@ -1688,7 +1938,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
E = FD->redecls_end(); I != E; ++I) {
const FunctionDecl *NextFD = *I;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(NextFD);
+ MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
@@ -1705,7 +1955,7 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
llvm::DIFile F) {
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
- else if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
SmallVector<llvm::Value *, 16> Elts;
@@ -1726,8 +1976,7 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
return getOrCreateType(FnType, F);
}
-/// EmitFunctionStart - Constructs the debug code for entering a function -
-/// "llvm.dbg.func.start.".
+/// EmitFunctionStart - Constructs the debug code for entering a function.
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn,
CGBuilderTy &Builder) {
@@ -1738,15 +1987,17 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
+ // Use the location of the declaration.
+ SourceLocation Loc = D->getLocation();
unsigned Flags = 0;
- llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ llvm::DIFile Unit = getOrCreateFile(Loc);
llvm::DIDescriptor FDContext(Unit);
llvm::DIArray TParamsArray;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // If there is a DISubprogram for this function available then use it.
+ // If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- FI = SPCache.find(FD);
+ FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
@@ -1758,12 +2009,13 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
- if (!Fn->hasInternalLinkage())
+ if (FD->hasPrototype()) {
LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
if (LinkageName == Name)
LinkageName = StringRef();
- if (FD->hasPrototype())
- Flags |= llvm::DIDescriptor::FlagPrototyped;
+
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
@@ -1784,18 +2036,17 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
- // It is expected that CurLoc is set before using EmitFunctionStart.
- // Usually, CurLoc points to the left bracket location of compound
- // statement representing function body.
- unsigned LineNo = getLineNumber(CurLoc);
+ unsigned LineNo = getLineNumber(Loc);
if (D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
+
llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
llvm::DISubprogram SP =
DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
LineNo, getOrCreateFunctionType(D, FnType, Unit),
Fn->hasInternalLinkage(), true/*definition*/,
- Flags, CGM.getLangOptions().Optimize, Fn,
+ getLineNumber(CurLoc),
+ Flags, CGM.getLangOpts().Optimize, Fn,
TParamsArray, SPDecl);
// Push function on region stack.
@@ -1835,11 +2086,11 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
llvm::DIDescriptor D =
DBuilder.createLexicalBlock(LexicalBlockStack.empty() ?
- llvm::DIDescriptor() :
- llvm::DIDescriptor(LexicalBlockStack.back()),
- getOrCreateFile(CurLoc),
- getLineNumber(CurLoc),
- getColumnNumber(CurLoc));
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
LexicalBlockStack.push_back(DN);
}
@@ -1855,8 +2106,8 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc
// Emit a line table change for the current location inside the new scope.
Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
- getColumnNumber(Loc),
- LexicalBlockStack.back()));
+ getColumnNumber(Loc),
+ LexicalBlockStack.back()));
}
/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
@@ -1997,7 +2248,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = CGM.Int64Ty;
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
@@ -2017,7 +2268,6 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
addr, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- // Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
@@ -2027,7 +2277,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
Name, Unit, Line, Ty,
- CGM.getLangOptions().Optimize, Flags, ArgNo);
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
@@ -2056,7 +2306,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
FieldName, Unit, Line, FieldTy,
- CGM.getLangOptions().Optimize, Flags,
+ CGM.getLangOpts().Optimize, Flags,
ArgNo);
// Insert an llvm.dbg.declare into the current block.
@@ -2103,7 +2353,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int64Ty = CGM.Int64Ty;
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
if (isByRef) {
@@ -2261,7 +2511,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
fields.push_back(fieldType);
}
- llvm::SmallString<36> typeName;
+ SmallString<36> typeName;
llvm::raw_svector_ostream(typeName)
<< "__block_literal_" << CGM.getUniqueBlockCount();
@@ -2284,7 +2534,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
llvm::DIDescriptor(scope),
name, tunit, line, type,
- CGM.getLangOptions().Optimize, flags,
+ CGM.getLangOpts().Optimize, flags,
cast<llvm::Argument>(addr)->getArgNo() + 1);
// Insert an llvm.dbg.value into the current block.
@@ -2297,7 +2547,6 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
-
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2314,7 +2563,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
- ArrayType::Normal, 0);
+ ArrayType::Normal, 0);
}
StringRef DeclName = D->getName();
StringRef LinkageName;
@@ -2395,16 +2644,25 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
return NS;
}
-/// UpdateCompletedType - Update type cache because the type is now
-/// translated.
-void CGDebugInfo::UpdateCompletedType(const TagDecl *TD) {
- QualType Ty = CGM.getContext().getTagDeclType(TD);
-
- // If the type exist in type cache then remove it from the cache.
- // There is no need to prepare debug info for the completed type
- // right now. It will be generated on demand lazily.
- llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
- TypeCache.find(Ty.getAsOpaquePtr());
- if (it != TypeCache.end())
- TypeCache.erase(it);
+void CGDebugInfo::finalize(void) {
+ for (std::vector<std::pair<void *, llvm::WeakVH> >::const_iterator VI
+ = ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) {
+ llvm::DIType Ty, RepTy;
+ // Verify that the debug info still exists.
+ if (&*VI->second)
+ Ty = llvm::DIType(cast<llvm::MDNode>(VI->second));
+
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(VI->first);
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ RepTy = llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
+ Ty.replaceAllUsesWith(RepTy);
+ }
+ }
+ DBuilder.finalize();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
index a4533a8..ec7705c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -53,6 +53,13 @@ class CGDebugInfo {
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+ /// CompleteTypeCache - Cache of previously constructed complete RecordTypes.
+ llvm::DenseMap<void *, llvm::WeakVH> CompletedTypeCache;
+
+ /// ReplaceMap - Cache of forward declared types to RAUW at the end of
+ /// compilation.
+ std::vector<std::pair<void *, llvm::WeakVH> >ReplaceMap;
+
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
@@ -83,8 +90,8 @@ class CGDebugInfo {
llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const TagType *Ty);
llvm::DIType CreateType(const RecordType *Ty);
+ llvm::DIType CreateLimitedType(const RecordType *Ty);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
@@ -94,6 +101,8 @@ class CGDebugInfo {
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType getTypeOrNull(const QualType);
+ llvm::DIType getCompletedTypeOrNull(const QualType);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
@@ -139,6 +148,7 @@ class CGDebugInfo {
AccessSpecifier AS, uint64_t offsetInBits,
llvm::DIFile tunit,
llvm::DIDescriptor scope);
+ void CollectRecordStaticVars(const RecordDecl *, llvm::DIType);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &E,
llvm::DIType RecordTy);
@@ -154,7 +164,8 @@ class CGDebugInfo {
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
- void finalize() { DBuilder.finalize(); }
+
+ void finalize(void);
/// setLocation - Update the current source location. If \arg loc is
/// invalid it is ignored.
@@ -172,10 +183,6 @@ public:
/// EmitFunctionEnd - Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder);
- /// UpdateCompletedType - Update type cache because the type is now
- /// translated.
- void UpdateCompletedType(const TagDecl *TD);
-
/// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a
/// new lexical block and push the block onto the stack.
void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
@@ -219,6 +226,12 @@ public:
/// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
+
+ /// getOrCreateInterfaceType - Emit an objective c interface type standalone
+ /// debug info.
+ llvm::DIType getOrCreateInterfaceType(QualType Ty,
+ SourceLocation Loc);
+
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
@@ -232,6 +245,13 @@ private:
/// getContextDescriptor - Get context info for the decl.
llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
+ /// createRecordFwdDecl - Create a forward decl for a RecordType in a given
+ /// context.
+ llvm::DIType createRecordFwdDecl(const RecordDecl *, llvm::DIDescriptor);
+
+ /// createContextChain - Create a set of decls for the context chain.
+ llvm::DIDescriptor createContextChain(const Decl *Decl);
+
/// getCurrentDirname - Return current directory name.
StringRef getCurrentDirname();
@@ -249,9 +269,17 @@ private:
/// necessary.
llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+ /// getOrCreateLimitedType - Get the type from the cache or create a new
+ /// partial type if necessary.
+ llvm::DIType getOrCreateLimitedType(QualType Ty, llvm::DIFile F);
+
/// CreateTypeNode - Create type metadata for a source language type.
llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+ /// CreateLimitedTypeNode - Create type metadata for a source language
+ /// type, but only partial types for records.
+ llvm::DIType CreateLimitedTypeNode(QualType Ty, llvm::DIFile F);
+
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name, uint64_t *Offset);
@@ -274,7 +302,7 @@ private:
StringRef getSelectorName(Selector S);
/// getClassName - Get class name including template argument list.
- StringRef getClassName(RecordDecl *RD);
+ StringRef getClassName(const RecordDecl *RD);
/// getVTableName - Get vtable name for the given Class.
StringRef getVTableName(const CXXRecordDecl *Decl);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index a6147ea..8c154f0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -65,8 +65,6 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
- case Decl::ObjCClass:
- case Decl::ObjCForwardProtocol:
case Decl::FileScopeAsm:
case Decl::Friend:
case Decl::FriendTemplate:
@@ -84,6 +82,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
+ case Decl::Import:
// None of these decls require codegen support.
return;
@@ -122,7 +121,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
// uniqued. We can't do this in C, though, because there's no
// standard way to agree on which variables are the same (i.e.
// there's no mangling).
- if (getContext().getLangOptions().CPlusPlus)
+ if (getContext().getLangOpts().CPlusPlus)
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
@@ -142,7 +141,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
- if (CGF.getContext().getLangOptions().CPlusPlus) {
+ if (CGF.getContext().getLangOpts().CPlusPlus) {
StringRef Name = CGM.getMangledName(&D);
return Name.str();
}
@@ -177,7 +176,12 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
- std::string Name = GetStaticDeclName(*this, D, Separator);
+ // Use the label if the variable is renamed with the asm-label extension.
+ std::string Name;
+ if (D.hasAttr<AsmLabelAttr>())
+ Name = CGM.getMangledName(&D);
+ else
+ Name = GetStaticDeclName(*this, D, Separator);
llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
llvm::GlobalVariable *GV =
@@ -192,6 +196,14 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
return GV;
}
+/// hasNontrivialDestruction - Determine whether a type's destruction is
+/// non-trivial. If so, and the variable uses static initialization, we must
+/// register its destructor to run on exit.
+static bool hasNontrivialDestruction(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return RD && !RD->hasTrivialDestructor();
+}
+
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
@@ -199,19 +211,19 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *
CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV) {
- llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+ llvm::Constant *Init = CGM.EmitConstantInit(D, this);
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
- if (!getContext().getLangOptions().CPlusPlus)
+ if (!getContext().getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
- EmitCXXGuardedInit(D, GV);
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
}
return GV;
}
@@ -243,7 +255,16 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
OldGV->eraseFromParent();
}
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true));
GV->setInitializer(Init);
+
+ if (hasNontrivialDestruction(D.getType())) {
+ // We have a constant initializer, but a nontrivial destructor. We still
+ // need to perform a guarded "initialization" in order to register the
+ // destructor.
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
+ }
+
return GV;
}
@@ -252,11 +273,23 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
- llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
+ // Check to see if we already have a global variable for this
+ // declaration. This can happen when double-emitting function
+ // bodies, e.g. with complete and base constructors.
+ llvm::Constant *addr =
+ CGM.getStaticLocalDeclAddress(&D);
+
+ llvm::GlobalVariable *var;
+ if (addr) {
+ var = cast<llvm::GlobalVariable>(addr->stripPointerCasts());
+ } else {
+ addr = var = CreateStaticVarDecl(D, ".", Linkage);
+ }
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- DMEntry = GV;
+ DMEntry = addr;
+ CGM.setStaticLocalDeclAddress(&D, addr);
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -264,40 +297,38 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
if (D.getType()->isVariablyModifiedType())
EmitVariablyModifiedType(D.getType());
- // Local static block variables must be treated as globals as they may be
- // referenced in their RHS initializer block-literal expresion.
- CGM.setStaticLocalDeclAddress(&D, GV);
+ // Save the type in case adding the initializer forces a type change.
+ llvm::Type *expectedType = addr->getType();
// If this value has an initializer, emit it.
if (D.getInit())
- GV = AddInitializerToStaticVarDecl(D, GV);
+ var = AddInitializerToStaticVarDecl(D, var);
- GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (D.hasAttr<AnnotateAttr>())
- CGM.AddGlobalAnnotations(&D, GV);
+ CGM.AddGlobalAnnotations(&D, var);
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
- GV->setSection(SA->getName());
+ var->setSection(SA->getName());
if (D.hasAttr<UsedAttr>())
- CGM.AddUsedGlobal(GV);
+ CGM.AddUsedGlobal(var);
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- llvm::Type *LPtrTy =
- LTy->getPointerTo(CGM.getContext().getTargetAddressSpace(D.getType()));
- DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+ llvm::Constant *castedAddr = llvm::ConstantExpr::getBitCast(var, expectedType);
+ DMEntry = castedAddr;
+ CGM.setStaticLocalDeclAddress(&D, castedAddr);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(D.getLocation());
- DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ DI->EmitGlobalVariable(var, &D);
}
}
@@ -306,12 +337,12 @@ namespace {
DestroyObject(llvm::Value *addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : addr(addr), type(type), destroyer(*destroyer),
+ : addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
llvm::Value *addr;
QualType type;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -371,8 +402,8 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) {
// Compute the address of the local variable, in case it's a
// byref or something.
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
CGF.EmitExtendGCLifetime(value);
}
@@ -388,8 +419,8 @@ namespace {
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
@@ -426,7 +457,7 @@ static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
break;
case Qualifiers::OCL_Strong: {
- CodeGenFunction::Destroyer &destroyer =
+ CodeGenFunction::Destroyer *destroyer =
(var.hasAttr<ObjCPreciseLifetimeAttr>()
? CodeGenFunction::destroyARCStrongPrecise
: CodeGenFunction::destroyARCStrongImprecise);
@@ -490,15 +521,17 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(RValue::get(value), lvalue);
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- CodeGenFunction::RunCleanupsScope Scope(*this);
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init))
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
+ enterFullExpression(ewc);
init = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
// zero-initialized. If the variable might be accessed in its
@@ -529,7 +562,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// Otherwise just do a simple store.
else
- EmitStoreOfScalar(zero, tempLV);
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
}
// Emit the initializer.
@@ -575,19 +608,19 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, /*precise*/ false);
return;
}
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
/// EmitScalarInit - Initialize the given lvalue with the given object.
void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime)
- return EmitStoreThroughLValue(RValue::get(init), lvalue);
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
switch (lifetime) {
case Qualifiers::OCL_None:
@@ -611,7 +644,7 @@ void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
break;
}
- EmitStoreOfScalar(init, lvalue);
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
@@ -638,6 +671,16 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
}
return true;
}
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
// Anything else is hard and scary.
return false;
@@ -648,17 +691,26 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
/// stores that would be required.
static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
bool isVolatile, CGBuilderTy &Builder) {
- // Zero doesn't require any stores.
- if (isa<llvm::ConstantAggregateZero>(Init) ||
- isa<llvm::ConstantPointerNull>(Init) ||
- isa<llvm::UndefValue>(Init))
+ // Zero doesn't require a store.
+ if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
return;
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
- if (!Init->isNullValue())
- Builder.CreateStore(Init, Loc, isVolatile);
+ Builder.CreateStore(Init, Loc, isVolatile);
+ return;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
return;
}
@@ -667,9 +719,7 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
- if (Elt->isNullValue()) continue;
-
- // Otherwise, get a pointer to the element and emit it.
+ // Get a pointer to the element and emit it.
emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
isVolatile, Builder);
}
@@ -728,15 +778,17 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getContext().getLangOptions().ElideConstructors &&
+ bool NRVO = getContext().getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is a POD array or struct with a statically
- // determinable constant initializer, there are optimizations we
- // can do.
- // TODO: we can potentially constant-evaluate non-POD structs and
- // arrays as long as the initialization is trivial (e.g. if they
- // have a non-trivial destructor, but not a non-trivial constructor).
+ // determinable constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
if (D.getInit() &&
(Ty->isArrayType() || Ty->isRecordType()) &&
(Ty.isPODType(getContext()) ||
@@ -744,9 +796,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
D.getInit()->isConstantInitializer(getContext(), false)) {
// If the variable's a const type, and it's neither an NRVO
- // candidate nor a __block variable, emit it as a global instead.
- if (CGM.getCodeGenOpts().MergeAllConstants && Ty.isConstQualified() &&
- !NRVO && !isByRef) {
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
emission.Address = 0; // signal this condition to later callbacks
@@ -788,7 +841,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getNameAsString());
+ Alloc->setName(D.getName());
CharUnits allocaAlignment = alignment;
if (isByRef)
@@ -960,21 +1013,22 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
- if (!emission.IsConstantAggregate) {
- LValue lv = MakeAddrLValue(Loc, type, alignment.getQuantity());
+ llvm::Constant *constant = 0;
+ if (emission.IsConstantAggregate) {
+ assert(!capturedByInit && "constant init contains a capturing block?");
+ constant = CGM.EmitConstantInit(D, this);
+ }
+
+ if (!constant) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
- assert(!capturedByInit && "constant init contains a capturing block?");
-
bool isVolatile = type.isVolatileQualified();
- llvm::Constant *constant = CGM.EmitConstantExpr(D.getInit(), type, this);
- assert(constant != 0 && "Wasn't a simple constant init?");
-
llvm::Value *SizeVal =
llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
@@ -1035,7 +1089,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
RValue rvalue = EmitReferenceBindingToExpr(init, D);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(rvalue, lvalue);
+ EmitStoreThroughLValue(rvalue, lvalue, true);
} else if (!hasAggregateLLVMType(type)) {
EmitScalarInit(init, D, lvalue, capturedByInit);
} else if (type->isAnyComplexType()) {
@@ -1049,6 +1103,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
+ MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
}
}
@@ -1101,7 +1156,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
}
// If we haven't chosen a more specific destroyer, use the default.
- if (!destroyer) destroyer = &getDestroyer(dtorKind);
+ if (!destroyer) destroyer = getDestroyer(dtorKind);
// Use an EH cleanup in array destructors iff the destructor itself
// is being pushed as an EH cleanup.
@@ -1123,7 +1178,7 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
emitAutoVarTypeCleanup(emission, dtorKind);
// In GC mode, honor objc_precise_lifetime.
- if (getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
@@ -1135,7 +1190,7 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
llvm::Constant *F = CGM.GetAddrOfFunction(FD);
assert(F && "Could not find function!");
- const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
}
@@ -1145,25 +1200,18 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
enterByrefCleanup(emission);
}
-CodeGenFunction::Destroyer &
+CodeGenFunction::Destroyer *
CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
- // This is surprisingly compiler-dependent. GCC 4.2 can't bind
- // references to functions directly in returns, and using '*&foo'
- // confuses MSVC. Luckily, the following code pattern works in both.
- Destroyer *destroyer = 0;
switch (kind) {
case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
case QualType::DK_cxx_destructor:
- destroyer = &destroyCXXObject;
- break;
+ return destroyCXXObject;
case QualType::DK_objc_strong_lifetime:
- destroyer = &destroyARCStrongPrecise;
- break;
+ return destroyARCStrongPrecise;
case QualType::DK_objc_weak_lifetime:
- destroyer = &destroyARCWeak;
- break;
+ return destroyARCWeak;
}
- return *destroyer;
+ llvm_unreachable("Unknown DestructionKind");
}
/// pushDestroy - Push the standard destructor for the given type.
@@ -1177,7 +1225,7 @@ void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
}
void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
- QualType type, Destroyer &destroyer,
+ QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
@@ -1195,7 +1243,7 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
/// used when destroying array elements, in case one of the
/// destructions throws an exception
void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
@@ -1232,7 +1280,7 @@ void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
assert(!type->isArrayType());
@@ -1283,7 +1331,7 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type,
- CodeGenFunction::Destroyer &destroyer) {
+ CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
@@ -1316,13 +1364,13 @@ namespace {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEnd;
QualType ElementType;
- CodeGenFunction::Destroyer &Destroyer;
+ CodeGenFunction::Destroyer *Destroyer;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
- ElementType(elementType), Destroyer(*destroyer) {}
+ ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
@@ -1337,14 +1385,14 @@ namespace {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEndPointer;
QualType ElementType;
- CodeGenFunction::Destroyer &Destroyer;
+ CodeGenFunction::Destroyer *Destroyer;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
- ElementType(elementType), Destroyer(*destroyer) {}
+ ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
@@ -1367,10 +1415,10 @@ namespace {
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
- Destroyer &destroyer) {
+ Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
- elementType, &destroyer);
+ elementType, destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
@@ -1386,10 +1434,10 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
- Destroyer &destroyer) {
+ Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
- elementType, &destroyer);
+ elementType, destroyer);
}
namespace {
@@ -1442,7 +1490,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DeclPtr = Arg;
} else {
// Otherwise, create a temporary to hold the value.
- DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
+ D.getName() + ".addr");
+ Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ DeclPtr = Alloc;
bool doStore = true;
@@ -1480,7 +1531,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
if (lt == Qualifiers::OCL_Weak) {
EmitARCInitWeak(DeclPtr, Arg);
- doStore = false; // The weak init is a store, no need to do two
+ doStore = false; // The weak init is a store, no need to do two.
}
}
@@ -1491,8 +1542,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Store the initial value into the alloca.
if (doStore) {
LValue lv = MakeAddrLValue(DeclPtr, Ty,
- getContext().getDeclAlign(&D).getQuantity());
- EmitStoreOfScalar(Arg, lv);
+ getContext().getDeclAlign(&D));
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index 3b8f830..10f0b83 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -28,7 +28,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
ASTContext &Context = CGF.getContext();
- unsigned alignment = Context.getDeclAlign(&D).getQuantity();
+ CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
@@ -101,60 +101,159 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
CGF.EmitCXXGlobalDtorRegistration(function, argument);
}
+/// Emit code to cause the variable at the given address to be considered as
+/// constant from this point onwards.
+static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Addr) {
+ // Don't emit the intrinsic if we're not optimizing.
+ if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // Grab the llvm.invariant.start intrinsic.
+ llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+
+ // Emit a call with the size in bytes of the object.
+ CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
+ uint64_t Width = WidthChars.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
+ CGF.Builder.CreateCall(InvariantStart, Args);
+}
+
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ llvm::Constant *DeclPtr,
+ bool PerformInit) {
const Expr *Init = D.getInit();
QualType T = D.getType();
if (!T->isReferenceType()) {
- EmitDeclInit(*this, D, DeclPtr);
- EmitDeclDestroy(*this, D, DeclPtr);
+ if (PerformInit)
+ EmitDeclInit(*this, D, DeclPtr);
+ if (CGM.isTypeConstant(D.getType(), true))
+ EmitDeclInvariant(*this, D, DeclPtr);
+ else
+ EmitDeclDestroy(*this, D, DeclPtr);
return;
}
+ assert(PerformInit && "cannot have constant initializer which needs "
+ "destruction for reference");
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init, &D);
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-void
-CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
- llvm::Constant *DeclPtr) {
- // Generate a global destructor entry if not using __cxa_atexit.
- if (!CGM.getCodeGenOpts().CXAAtExit) {
- CGM.AddCXXDtorEntry(DtorFn, DeclPtr);
- return;
- }
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args);
+}
- // Get the destructor function type
- llvm::Type *DtorFnTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- Int8PtrTy, false);
- DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *ty,
+ const Twine &name);
+
+/// Create a stub function, suitable for being passed to atexit,
+/// which passes the given address to the given destructor function.
+static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, ty,
+ Twine("__dtor_", addr->getName()));
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
+ CGM.getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *dtorFn =
+ dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ call->setCallingConv(dtorFn->getCallingConv());
+
+ CGF.FinishFunction();
- llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
+ return fn;
+}
- // Get the __cxa_atexit function type
- // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
- llvm::FunctionType *AtExitFnTy =
- llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+/// Register a global destructor using atexit.
+static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
- llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
- "__cxa_atexit");
- if (llvm::Function *Fn = dyn_cast<llvm::Function>(AtExitFn))
- Fn->setDoesNotThrow();
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ atexitFn->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(atexit, dtorStub);
+}
+
+void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
+ return;
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ CGM.AddCXXDtorEntry(dtor, addr);
+ return;
+ }
- llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
- "__dso_handle");
- llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
- llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
- Builder.CreateCall(AtExitFn, Args);
+ // Otherwise, we just use atexit.
+ emitGlobalDtorWithAtExit(*this, dtor, addr);
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
- llvm::GlobalVariable *DeclPtr) {
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
// If we've been asked to forbid guard variables, emit an error now.
// This diagnostic is hard-coded for Darwin's use case; we can find
// better phrasing if someone else needs it.
@@ -163,24 +262,24 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
"this initialization requires a guard variable, which "
"the kernel does not support");
- CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr);
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
}
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
llvm::FunctionType *FTy,
- StringRef Name) {
+ const Twine &Name) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
- if (!CGM.getContext().getLangOptions().AppleKext) {
+ if (!CGM.getContext().getLangOpts().AppleKext) {
// Set the section if needed.
if (const char *Section =
CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
Fn->setDoesNotThrow();
return Fn;
@@ -188,16 +287,16 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
- llvm::GlobalVariable *Addr) {
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create a variable initialization function.
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
- CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr);
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
+ PerformInit);
if (D->hasAttr<InitPriorityAttr>()) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
@@ -226,9 +325,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global initialization function.
llvm::Function *Fn =
@@ -260,24 +357,23 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
- llvm::FunctionType *FTy
- = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
- CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
+ CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
}
/// Emit the code necessary to initialize the given global variable.
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
- llvm::GlobalVariable *Addr) {
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
// Use guarded initialization if the global variable is weak. This
@@ -285,9 +381,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
// definitions explicitly marked weak.
if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
- EmitCXXGuardedInit(*D, Addr);
+ EmitCXXGuardedInit(*D, Addr, PerformInit);
} else {
- EmitCXXGlobalVarDeclInit(*D, Addr);
+ EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
}
FinishFunction();
@@ -297,14 +393,14 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
llvm::Constant **Decls,
unsigned NumDecls) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
RunCleanupsScope Scope(*this);
// When building in Objective-C++ ARC mode, create an autorelease pool
// around the global initializers.
- if (getLangOptions().ObjCAutoRefCount && getLangOptions().CPlusPlus) {
+ if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EmitObjCAutoreleasePoolCleanup(token);
}
@@ -318,11 +414,11 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
FinishFunction();
}
-void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
- getTypes().getNullaryFunctionInfo(),
+ getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
// Emit the dtors, in reverse order from construction.
@@ -343,16 +439,17 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
llvm::Function *
CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray) {
FunctionArgList args;
ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
args.push_back(&dst);
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
- FunctionType::ExtInfo());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ CGM.getTypes().arrangeFunctionDeclaration(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
@@ -365,4 +462,3 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
return fn;
}
-
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index 5e4fb98..95e0030 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -11,17 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/StmtCXX.h"
-
-#include "llvm/Intrinsics.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
-
-#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
-#include "CGException.h"
#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
#include "TargetInfo.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
using namespace clang;
using namespace CodeGen;
@@ -104,7 +100,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
- if (CGM.getLangOptions().SjLjExceptions)
+ if (CGM.getLangOpts().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume");
}
@@ -113,7 +109,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
- if (CGM.getLangOptions().SjLjExceptions)
+ if (CGM.getLangOpts().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
}
@@ -127,9 +123,9 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
StringRef name;
// In C++, use std::terminate().
- if (CGF.getLangOptions().CPlusPlus)
+ if (CGF.getLangOpts().CPlusPlus)
name = "_ZSt9terminatev"; // FIXME: mangling!
- else if (CGF.getLangOptions().ObjC1 &&
+ else if (CGF.getLangOpts().ObjC1 &&
CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
name = "objc_terminate";
else
@@ -145,14 +141,37 @@ static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
-const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
-const EHPersonality EHPersonality::GNU_C_SJLJ("__gcc_personality_sj0");
-const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
-const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
-const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
-const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
- "objc_exception_throw");
-const EHPersonality EHPersonality::GNU_ObjCXX("__gnustep_objcxx_personality_v0");
+namespace {
+ /// The exceptions personality for a function.
+ struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ };
+}
+
+const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", 0 };
+const EHPersonality EHPersonality::NeXT_ObjC = { "__objc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", 0};
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", 0 };
+const EHPersonality
+EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
+const EHPersonality
+EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", 0 };
static const EHPersonality &getCPersonality(const LangOptions &L) {
if (L.SjLjExceptions)
@@ -211,10 +230,8 @@ const EHPersonality &EHPersonality::get(const LangOptions &L) {
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getInt32Ty(CGM.getLLVMContext()),
- true),
- Personality.getPersonalityFnName());
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn);
return Fn;
}
@@ -283,17 +300,18 @@ void CodeGenModule::SimplifyPersonality() {
return;
// If we're not in ObjC++ -fexceptions, there's nothing to do.
- if (!Features.CPlusPlus || !Features.ObjC1 || !Features.Exceptions)
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
return;
- const EHPersonality &ObjCXX = EHPersonality::get(Features);
- const EHPersonality &CXX = getCXXPersonality(Features);
- if (&ObjCXX == &CXX ||
- ObjCXX.getPersonalityFnName() == CXX.getPersonalityFnName())
+ const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
+ const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ if (&ObjCXX == &CXX)
return;
- llvm::Function *Fn =
- getModule().getFunction(ObjCXX.getPersonalityFnName());
+ assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
+ "Different EHPersonalities using the same personality function.");
+
+ llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
// Nothing to do if it's unused.
if (!Fn || Fn->use_empty()) return;
@@ -359,7 +377,7 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
/*IsInit*/ true);
// Deactivate the cleanup block.
- CGF.DeactivateCleanupBlock(cleanup);
+ CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
@@ -452,7 +470,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().CXXExceptions)
+ if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -520,7 +538,7 @@ static void emitFilterDispatchBlock(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
- if (!CGM.getLangOptions().CXXExceptions)
+ if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
@@ -636,15 +654,14 @@ static bool isNonEHScope(const EHScope &S) {
return false;
}
- // Suppress warning.
- return false;
+ llvm_unreachable("Invalid EHScope Kind!");
}
llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
- if (!CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
return 0;
// Check the innermost scope for a cached landing pad. If this is
@@ -734,7 +751,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
- const EHPersonality &personality = EHPersonality::get(getLangOptions());
+ const EHPersonality &personality = EHPersonality::get(getLangOpts());
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
@@ -984,8 +1001,23 @@ static void InitCatchParam(CodeGenFunction &CGF,
if (CatchType->hasPointerRepresentation()) {
llvm::Value *CastExn =
CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
- CGF.Builder.CreateStore(CastExn, ParamAddr);
- return;
+
+ switch (CatchType.getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(ParamAddr, CastExn);
+ return;
+ }
+ llvm_unreachable("bad ownership qualifier!");
}
// Otherwise, it returns a pointer into the exception object.
@@ -1039,10 +1071,12 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased));
+ CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
+ CGF.EmitAggExpr(copyExpr,
+ AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -1170,14 +1204,10 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
if (nextIsEnd) {
CGF.Builder.restoreIP(savedIP);
return;
-
- // Otherwise we need to emit and continue at that block.
- } else {
- CGF.EmitBlock(nextBlock);
}
+ // Otherwise we need to emit and continue at that block.
+ CGF.EmitBlock(nextBlock);
}
-
- llvm_unreachable("fell out of loop!");
}
void CodeGenFunction::popCatchScope() {
@@ -1464,7 +1494,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
llvm::LandingPadInst *LPadInst =
Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
getOpaquePersonalityFn(CGM, Personality), 0);
@@ -1511,24 +1541,23 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
EHResumeBlock = createBasicBlock("eh.resume");
Builder.SetInsertPoint(EHResumeBlock);
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
- StringRef RethrowName = Personality.getCatchallRethrowFnName();
- if (!RethrowName.empty()) {
+ const char *RethrowName = Personality.CatchallRethrowFn;
+ if (RethrowName != 0) {
Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
getExceptionFromSlot())
->setDoesNotReturn();
} else {
- llvm::Value *Exn = getExceptionFromSlot();
-
switch (CleanupHackLevel) {
case CHL_MandatoryCatchall:
// In mandatory-catchall mode, we need to use
// _Unwind_Resume_or_Rethrow, or whatever the personality's
// equivalent is.
- Builder.CreateCall(getUnwindResumeOrRethrowFn(), Exn)
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ getExceptionFromSlot())
->setDoesNotReturn();
break;
case CHL_MandatoryCleanup: {
@@ -1552,7 +1581,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
// In an idealized mode where we don't have to worry about the
// optimizer combining landing pads, we should just use
// _Unwind_Resume (or the personality's equivalent).
- Builder.CreateCall(getUnwindResumeFn(), Exn)
+ Builder.CreateCall(getUnwindResumeFn(), getExceptionFromSlot())
->setDoesNotReturn();
break;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
deleted file mode 100644
index d021616..0000000
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These classes support the generation of LLVM IR for exceptions in
-// C++ and Objective C.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_CGEXCEPTION_H
-#define CLANG_CODEGEN_CGEXCEPTION_H
-
-#include "llvm/ADT/StringRef.h"
-
-namespace clang {
-class LangOptions;
-
-namespace CodeGen {
-
-/// The exceptions personality for a function. When
-class EHPersonality {
- StringRef PersonalityFn;
-
- // If this is non-null, this personality requires a non-standard
- // function for rethrowing an exception after a catchall cleanup.
- // This function must have prototype void(void*).
- StringRef CatchallRethrowFn;
-
- EHPersonality(StringRef PersonalityFn,
- StringRef CatchallRethrowFn = StringRef())
- : PersonalityFn(PersonalityFn),
- CatchallRethrowFn(CatchallRethrowFn) {}
-
-public:
- static const EHPersonality &get(const LangOptions &Lang);
- static const EHPersonality GNU_C;
- static const EHPersonality GNU_C_SJLJ;
- static const EHPersonality GNU_ObjC;
- static const EHPersonality GNU_ObjCXX;
- static const EHPersonality NeXT_ObjC;
- static const EHPersonality GNU_CPlusPlus;
- static const EHPersonality GNU_CPlusPlus_SJLJ;
-
- StringRef getPersonalityFnName() const { return PersonalityFn; }
- StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
-};
-
-}
-}
-
-#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index bd4e553..08970fd 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -134,14 +135,16 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
Qualifiers Quals,
bool IsInit) {
- if (E->getType()->isAnyComplexType())
+ // FIXME: This function should take an LValue as an argument.
+ if (E->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
- else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
+ } else if (hasAggregateLLVMType(E->getType())) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit)));
- else {
+ } else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
EmitStoreThroughLValue(RV, LV);
@@ -182,7 +185,7 @@ CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
Out.flush();
@@ -209,13 +212,20 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
// Look through expressions for materialized temporaries (for now).
if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(E)) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
- if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
+ if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
E->getType()->isObjCLifetimeType() &&
(E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
@@ -228,29 +238,21 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
- if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ CGF.enterFullExpression(EWC);
CodeGenFunction::RunCleanupsScope Scope(CGF);
- return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
ObjCARCReferenceLifetimeType,
InitializedDecl);
}
- if (const ObjCPropertyRefExpr *PRE =
- dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
- if (PRE->getGetterResultType()->isReferenceType())
- E = PRE;
-
RValue RV;
if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
- if (LV.isPropertyRef()) {
- RV = CGF.EmitLoadOfPropertyRefLValue(LV);
- return RV.getScalarVal();
- }
if (LV.isSimple())
return LV.getAddress();
@@ -358,10 +360,11 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
AggValueSlot::IsDestructed_t isDestructed
= AggValueSlot::IsDestructed_t(InitializedDecl != 0);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
- isDestructed,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
@@ -484,21 +487,17 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
case Qualifiers::OCL_Strong: {
bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind cleanupKind = getARCCleanupKind();
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
- &destroyARCStrongImprecise;
pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
- *destroyer, cleanupKind & EHCleanup);
+ precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
break;
}
case Qualifiers::OCL_Weak: {
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = &destroyARCWeak;
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
- ObjCARCReferenceLifetimeType, *destroyer, true);
+ ObjCARCReferenceLifetimeType, destroyARCWeak, true);
break;
}
}
@@ -512,10 +511,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
/// input field number being accessed.
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
const llvm::Constant *Elts) {
- if (isa<llvm::ConstantAggregateZero>(Elts))
- return 0;
-
- return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
}
void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
@@ -643,6 +640,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
case Expr::ObjCSelectorExprClass:
return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
@@ -656,6 +656,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
@@ -671,17 +672,28 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitStringLiteralLValue(cast<StringLiteral>(E));
case Expr::ObjCEncodeExprClass:
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
-
- case Expr::BlockDeclRefExprClass:
- return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only single-element init list can be lvalue.");
+ return EmitLValue(cast<InitListExpr>(E)->getInit(0));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
- case Expr::ExprWithCleanupsClass:
- return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
case Expr::CXXScalarValueInitExprClass:
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
@@ -693,8 +705,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
case Expr::ObjCIvarRefExprClass:
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
- case Expr::ObjCPropertyRefExprClass:
- return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
@@ -726,16 +736,188 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
-
+
case Expr::MaterializeTemporaryExprClass:
return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
}
}
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const RecordType *RT = dyn_cast<RecordType>(type))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(),
+ lvalue.getType(), lvalue.getTBAAInfo());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
+ CGM.getCodeGenOpts().StrictEnums &&
+ !ET->getDecl()->isFixed());
+ bool IsBool = hasBooleanRepresentation(Ty);
+ llvm::Type *LTy;
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return NULL;
+
+ llvm::APInt Min;
+ llvm::APInt End;
+ if (IsBool) {
+ Min = llvm::APInt(8, 0);
+ End = llvm::APInt(8, 2);
+ LTy = Int8Ty;
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ LTy = ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+
+ if (End == Min)
+ return NULL;
+
+ llvm::Value *LowAndHigh[2];
+ LowAndHigh[0] = llvm::ConstantInt::get(LTy, Min);
+ LowAndHigh[1] = llvm::ConstantInt::get(LTy, End);
+
+ llvm::LLVMContext &C = getLLVMContext();
+ llvm::MDNode *Range = llvm::MDNode::get(C, LowAndHigh);
+ return Range;
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
@@ -748,19 +930,20 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
Load->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
- return EmitFromMemory(Load, Ty);
-}
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
-static bool isBooleanUnderlyingType(QualType Ty) {
- if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return ET->getDecl()->getIntegerType()->isBooleanType();
- return false;
+ return EmitFromMemory(Load, Ty);
}
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
// This should really always be an i1, but sometimes it's already
// an i8, and it's awkward to track those cases down.
if (Value->getType()->isIntegerTy(1))
@@ -773,7 +956,7 @@ llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
@@ -784,7 +967,8 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
- llvm::MDNode *TBAAInfo) {
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
@@ -792,12 +976,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
Store->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getTBAAInfo(), isInit);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -821,9 +1008,10 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
}
if (LV.isVectorElt()) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
- LV.isVolatileQualified());
- return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
@@ -832,11 +1020,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isExtVectorElt())
return EmitLoadOfExtVectorElementLValue(LV);
- if (LV.isBitField())
- return EmitLoadOfBitfieldLValue(LV);
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return EmitLoadOfPropertyRefLValue(LV);
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
@@ -867,8 +1052,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
}
// Cast to the access type.
- llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
- AI.AccessWidth,
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -914,8 +1098,10 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -932,10 +1118,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
unsigned NumResultElts = ExprVT->getNumElements();
SmallVector<llvm::Constant*, 4> Mask;
- for (unsigned i = 0; i != NumResultElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
- }
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
@@ -948,15 +1132,19 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
- Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
return;
}
@@ -965,11 +1153,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (Dst.isExtVectorElt())
return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
- if (Dst.isBitField())
- return EmitStoreThroughBitfieldLValue(Src, Dst);
-
- assert(Dst.isPropertyRef() && "Unknown LValue type");
- return EmitStoreThroughPropertyRefLValue(Src, Dst);
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
}
// There's special magic for assigning into an ARC-qualified l-value.
@@ -1031,7 +1216,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst);
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -1045,7 +1230,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- if (Dst.getType()->isBooleanType())
+ if (hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
@@ -1143,8 +1328,10 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1158,10 +1345,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// elements and restore the vector mask since it is on the side it will be
// stored.
SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
@@ -1173,11 +1358,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
SmallVector<llvm::Constant*, 4> ExtMask;
- unsigned i;
- for (i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
- for (; i != NumDstElts; ++i)
- ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
@@ -1186,13 +1369,11 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// build identity
SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+ Mask.push_back(Builder.getInt32(i));
// modify when what gets shuffled in
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned Idx = getAccessedFieldNo(i, Elts);
- Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
} else {
@@ -1206,7 +1387,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
- Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
}
// setObjCGCLValueClass - sets class of he lvalue for the purpose of
@@ -1215,7 +1398,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LValue &LV,
bool IsMemberAccess=false) {
- if (Ctx.getLangOptions().getGC() == LangOptions::NonGC)
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
return;
if (isa<ObjCIvarRefExpr>(E)) {
@@ -1323,14 +1506,19 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
"Var decl must have external storage or be a file var decl!");
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
- if (VD->getType()->isReferenceType())
- V = CGF.Builder.CreateLoad(V);
-
- V = EmitBitCastOfLValueToProperType(CGF, V,
- CGF.getTypes().ConvertTypeForMem(E->getType()));
-
- unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
- LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ QualType T = E->getType();
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ }
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
}
@@ -1350,13 +1538,21 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
}
}
- unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(ND);
+ QualType T = E->getType();
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl *VD = cast<ValueDecl>(ND);
@@ -1365,30 +1561,46 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
-
// Check if this is a global variable.
if (VD->hasExternalStorage() || VD->isFileVarDecl())
return EmitGlobalVarDeclLValue(*this, E, VD);
+ bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+
bool NonGCable = VD->hasLocalStorage() &&
!VD->getType()->isReferenceType() &&
- !VD->hasAttr<BlocksAttr>();
+ !isBlockVariable;
llvm::Value *V = LocalDeclMap[VD];
if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
+
+ // Use special handling for lambdas.
+ if (!V) {
+ if (FieldDecl *FD = LambdaCaptureFields.lookup(VD))
+ return EmitLValueForField(CXXABIThisValue, FD, 0);
+
+ assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
+ CharUnits alignment = getContext().getDeclAlign(VD);
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
+ E->getType(), alignment);
+ }
+
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (VD->hasAttr<BlocksAttr>())
+ if (isBlockVariable)
V = BuildBlockByrefAddress(V, VD);
-
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V);
- V = EmitBitCastOfLValueToProperType(*this, V,
- getTypes().ConvertTypeForMem(E->getType()));
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = MakeAddrLValue(V, T, Alignment);
+ }
- LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
@@ -1401,16 +1613,6 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return EmitFunctionDeclLValue(*this, E, fn);
llvm_unreachable("Unhandled DeclRefExpr");
-
- // an invalid LValue, but the assert will
- // ensure that this point is never reached.
- return LValue();
-}
-
-LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
- unsigned Alignment =
- getContext().getDeclAlign(E->getDecl()).getQuantity();
- return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
@@ -1425,15 +1627,15 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1444,9 +1646,10 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
llvm::Value *Addr = LV.getAddress();
- // real and imag are valid on scalars. This is a faster way of
- // testing that.
- if (!cast<llvm::PointerType>(Addr->getType())
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !cast<llvm::PointerType>(Addr->getType())
->getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
@@ -1588,7 +1791,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType());
+ E->getBase()->getType(), LHS.getAlignment());
}
// Extend or truncate the index type to 32 or 64-bits.
@@ -1617,7 +1820,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
- unsigned ArrayAlignment = 0;
+ CharUnits ArrayAlignment;
if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
@@ -1632,7 +1835,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- if (getLangOptions().isSignedOverflowDefined()) {
+ if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
} else {
@@ -1667,14 +1870,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
@@ -1684,17 +1887,21 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
// Limit the alignment to that of the result type.
- if (ArrayAlignment) {
- unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
+ LValue LV;
+ if (!ArrayAlignment.isZero()) {
+ CharUnits Align = getContext().getTypeAlignInChars(T);
ArrayAlignment = std::min(Align, ArrayAlignment);
+ LV = MakeAddrLValue(Address, T, ArrayAlignment);
+ } else {
+ LV = MakeNaturalAlignAddrLValue(Address, T);
}
- LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC) {
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -1702,13 +1909,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
}
static
-llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
SmallVector<unsigned, 4> &Elts) {
SmallVector<llvm::Constant*, 4> CElts;
-
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
+ CElts.push_back(Builder.getInt32(Elts[i]));
return llvm::ConstantVector::get(CElts);
}
@@ -1751,22 +1956,20 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
+ llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignment());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
llvm::Constant *BaseElts = Base.getExtVectorElts();
SmallVector<llvm::Constant *, 4> CElts;
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- if (isa<llvm::ConstantAggregateZero>(BaseElts))
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
- else
- CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
- }
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
+ Base.getAlignment());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -1847,6 +2050,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
+ CharUnits alignment = getContext().getDeclAlign(field);
bool mayAlias = rec->hasAttr<MayAliasAttr>();
@@ -1863,6 +2067,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+ load->setAlignment(alignment.getQuantity());
if (CGM.shouldUseTBAA()) {
llvm::MDNode *tbaa;
@@ -1876,6 +2081,10 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
addr = load;
mayAlias = false;
type = refType->getPointeeType();
+ if (type->isIncompleteType())
+ alignment = CharUnits();
+ else
+ alignment = getContext().getTypeAlignInChars(type);
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
@@ -1891,7 +2100,6 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
- unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
LV.getQuals().addCVRQualifiers(cvr);
@@ -1932,11 +2140,16 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
- unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(Field);
return MakeAddrLValue(V, FieldType, Alignment);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType());
+ }
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -1957,6 +2170,8 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitAggExprToLValue(expr);
}
+ OpaqueValueMapping binding(*this, expr);
+
const Expr *condExpr = expr->getCond();
bool CondExprBool;
if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
@@ -1967,8 +2182,6 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitLValue(live);
}
- OpaqueValueMapping binding(*this, expr);
-
llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
@@ -2020,21 +2233,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
-
- case CK_GetObjCProperty: {
- LValue LV = EmitLValue(E->getSubExpr());
- assert(LV.isPropertyRef());
- RValue RV = EmitLoadOfPropertyRefLValue(LV);
-
- // Property is an aggregate r-value.
- if (RV.isAggregate()) {
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
- }
-
- // Implicit property returns an l-value.
- assert(RV.isScalar());
- return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
- }
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValue:
@@ -2071,11 +2274,13 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_AnyPointerToBlockPointerCast:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
- case CK_ARCExtendBlockObject: {
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
@@ -2163,7 +2368,7 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
}
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
- assert(e->isGLValue() || e->getType()->isRecordType());
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
return getOpaqueLValueMapping(e);
}
@@ -2206,7 +2411,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
DestroyedType->isObjCLifetimeType() &&
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
@@ -2342,7 +2547,14 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
- EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
return MakeAddrLValue(Slot.getAddr(), E->getType());
}
@@ -2427,7 +2639,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FnType);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -2446,11 +2659,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
- if (isa<FunctionNoProtoType>(FnType) &&
- !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) {
- assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0))
- ->isVarArg());
- llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false);
+ if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
}
@@ -2481,7 +2691,17 @@ static void
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
- if (E->isCmpXChg()) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
// Note that cmpxchg only supports specifying one ordering and
// doesn't support weak cmpxchg, at least at the moment.
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@@ -2498,7 +2718,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Load) {
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
Load->setAlignment(Size);
@@ -2508,7 +2730,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Store) {
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
assert(!Dest && "Store does not return a value");
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
@@ -2519,25 +2743,74 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- case AtomicExpr::CmpXchgStrong:
- case AtomicExpr::Store:
- case AtomicExpr::Load: assert(0 && "Already handled!");
- case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
- case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
- case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
- case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
- case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
- case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
}
+
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
llvm::AtomicRMWInst *RMWI =
CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
StoreDest->setAlignment(Align);
}
@@ -2562,7 +2835,9 @@ static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
- QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
@@ -2571,84 +2846,202 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getTargetInfo().getMaxAtomicInlineWidth();
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ assert(!Dest && "Init does not return a value");
+ if (!hasAggregateLLVMType(E->getVal1()->getType())) {
+ QualType PointeeType
+ = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
+ EmitScalarInit(EmitScalarExpr(E->getVal1()),
+ LValue::MakeAddr(Ptr, PointeeType, alignChars,
+ getContext()));
+ } else if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
+ AtomicTy.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(E->getVal1(), Slot);
+ }
+ return RValue::get(0);
+ }
+
Order = EmitScalarExpr(E->getOrder());
- if (E->isCmpXChg()) {
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
Val1 = EmitScalarExpr(E->getVal1());
- Val2 = EmitValToTemp(*this, E->getVal2());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- (void)OrderFail; // OrderFail is unused at the moment
- } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
- MemTy->isPointerType()) {
- // For pointers, we're required to do a bit of math: adding 1 to an int*
- // is not the same as adding 1 to a uintptr_t.
- QualType Val1Ty = E->getVal1()->getType();
- llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
- CharUnits PointeeIncAmt =
- getContext().getTypeSizeInChars(MemTy->getPointeeType());
- Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
- } else if (E->getOp() != AtomicExpr::Load) {
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
}
- if (E->getOp() != AtomicExpr::Store && !Dest)
+ if (!E->getType()->isVoidType() && !Dest)
Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
- // FIXME: Finalize what the libcalls are actually supposed to look like.
- // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
- return EmitUnsupportedRValue(E, "atomic library call");
- }
-#if 0
- if (UseLibcall) {
- const char* LibCallName;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::CmpXchgStrong:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
- case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
- case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
- case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
- case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
- case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break;
- case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
- case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break;
- }
- llvm::SmallVector<QualType, 4> Params;
+
+ llvm::SmallVector<QualType, 5> Params;
CallArgList Args;
- QualType RetTy = getContext().VoidTy;
- if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ // Size is always the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ // Atomic address is always the second parameter
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
getContext().VoidPtrTy);
- if (E->getOp() != AtomicExpr::Load)
+
+ const char* LibCallName;
+ QualType RetTy = getContext().VoidTy;
+ switch (E->getOp()) {
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
- if (E->isCmpXChg()) {
Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
getContext().VoidPtrTy);
- RetTy = getContext().IntTy;
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+#if 0
+ // These are only defined for 1-16 byte integers. It is not clear what
+ // their semantics would be on anything else...
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+#endif
+ default: return EmitUnsupportedRValue(E, "atomic library call");
}
- Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
- getContext().getSizeType());
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
+ CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
if (E->isCmpXChg())
return Res;
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), Dest);
}
-#endif
+
llvm::Type *IPtrTy =
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
llvm::Value *OrigDest = Dest;
@@ -2684,24 +3077,31 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
default: // invalid order
// We should not ever get here normally, but it's hard to
// enforce that in general.
- break;
+ break;
}
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
// Long case, when Order isn't obviously constant.
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
MonotonicBB = createBasicBlock("monotonic", CurFn);
- if (E->getOp() != AtomicExpr::Store)
+ if (!IsStore)
AcquireBB = createBasicBlock("acquire", CurFn);
- if (E->getOp() != AtomicExpr::Load)
+ if (!IsLoad)
ReleaseBB = createBasicBlock("release", CurFn);
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+ if (!IsLoad && !IsStore)
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
@@ -2718,7 +3118,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
Builder.CreateBr(ContBB);
- if (E->getOp() != AtomicExpr::Store) {
+ if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
@@ -2726,14 +3126,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
}
- if (E->getOp() != AtomicExpr::Load) {
+ if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
}
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+ if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
@@ -2748,7 +3148,102 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::Value *ULPs = llvm::ConstantFP::get(Builder.getFloatTy(), Accuracy);
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), ULPs);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy,
+ Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
+ !ov->getType()->isAnyComplexType()) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index 97754d5..b6efc1c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
@@ -74,12 +75,17 @@ public:
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
- void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
+ unsigned Alignment = 0);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+ void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
+ void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E);
+
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
- if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
+ if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
return AggValueSlot::NeedsGCBarriers;
return AggValueSlot::DoesNotNeedGCBarriers;
}
@@ -103,7 +109,24 @@ public:
}
// l-values.
- void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // For aggregates, we should always be able to emit the variable
+ // as an l-value unless it's a reference. This is due to the fact
+ // that we can't actually ever see a normal l2r conversion on an
+ // aggregate in C++, and in C there's no language standard
+ // actively preventing us from listing variables in the captures
+ // list of a block.
+ if (E->getDecl()->getType()->isReferenceType()) {
+ if (CodeGenFunction::ConstantEmission result
+ = CGF.tryEmitAsConstant(E)) {
+ EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ return;
+ }
+ }
+
+ EmitAggLoadOfLValue(E);
+ }
+
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
@@ -111,9 +134,6 @@ public:
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
- void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- EmitAggLoadOfLValue(E);
- }
void VisitPredefinedExpr(const PredefinedExpr *E) {
EmitAggLoadOfLValue(E);
}
@@ -131,7 +151,6 @@ public:
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
EmitAggLoadOfLValue(E);
}
- void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
@@ -142,12 +161,22 @@ public:
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitLambdaExpr(LambdaExpr *E);
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+ void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ if (E->isGLValue()) {
+ LValue LV = CGF.EmitPseudoObjectLValue(E);
+ return EmitFinalDestCopy(E, LV);
+ }
+
+ CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ }
+
void VisitVAArgExpr(VAArgExpr *E);
void EmitInitializationToLValue(Expr *E, LValue Address);
@@ -213,7 +242,8 @@ void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
+ unsigned Alignment) {
assert(Src.isAggregate() && "value must be aggregate value!");
// If Dest is ignored, then we're evaluating an aggregate expression
@@ -224,7 +254,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// volatile.
if (Dest.isIgnored()) {
if (!Src.isVolatileQualified() ||
- CGF.CGM.getLangOptions().CPlusPlus ||
+ CGF.CGM.getLangOpts().CPlusPlus ||
(IgnoreResult && Ignore))
return;
@@ -248,16 +278,239 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
// from the source as well, as we can't eliminate it if either operand
// is volatile, unless copy has volatile for both source and destination..
CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
- Dest.isVolatile()|Src.isVolatileQualified());
+ Dest.isVolatile()|Src.isVolatileQualified(),
+ Alignment);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
- EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
- Src.isVolatileQualified()),
- Ignore);
+ CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
+ EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+}
+
+static QualType GetStdInitializerListElementType(QualType T) {
+ // Just assume that this is really std::initializer_list.
+ ClassTemplateSpecializationDecl *specialization =
+ cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
+ return specialization->getTemplateArgs()[0].getAsType();
+}
+
+/// \brief Prepare cleanup for the temporary array.
+static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
+ QualType arrayType,
+ llvm::Value *addr,
+ const InitListExpr *initList) {
+ QualType::DestructionKind dtorKind = arrayType.isDestructedType();
+ if (!dtorKind)
+ return; // Type doesn't need destroying.
+ if (dtorKind != QualType::DK_cxx_destructor) {
+ CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
+ return;
+ }
+
+ CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
+ CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
+ /*EHCleanup=*/true);
+}
+
+/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// real initializer list.
+void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
+ InitListExpr *initList) {
+ // We emit an array containing the elements, then have the init list point
+ // at the array.
+ ASTContext &ctx = CGF.getContext();
+ unsigned numInits = initList->getNumInits();
+ QualType element = GetStdInitializerListElementType(initList->getType());
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(array);
+ llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
+ alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
+ alloc->setName(".initlist.");
+
+ EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
+
+ // FIXME: The diagnostics are somewhat out of place here.
+ RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ QualType elementPtr = ctx.getPointerType(element.withConst());
+
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue start = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
+ ++field;
+
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue endOrLength = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
+ } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
+ } else {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ if (!Dest.isExternallyDestructed())
+ EmitStdInitializerListCleanup(CGF, array, alloc, initList);
+}
+
+/// \brief Emit initialization of an array from an initializer list.
+void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E) {
+ uint64_t NumInitElements = E->getNumInits();
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ assert(NumInitElements <= NumArrayElements);
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // If these are nested std::initializer_list inits, do them directly,
+ // because they are conceptually the same "location".
+ InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
+ if (initList && initList->initializesStdInitializerList()) {
+ EmitStdInitializerList(element, initList);
+ } else {
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
+ }
+
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
+
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
+ else
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
+ }
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
}
//===----------------------------------------------------------------------===//
@@ -325,16 +578,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
"should have been unpacked before we got here");
}
- case CK_GetObjCProperty: {
- LValue LV = CGF.EmitLValue(E->getSubExpr());
- assert(LV.isPropertyRef());
- RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
- EmitMoveFromReturnSlot(E, RV);
- break;
- }
-
case CK_LValueToRValue: // hope for downstream optimization
case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
@@ -345,7 +592,6 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_LValueBitCast:
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
- break;
case CK_Dependent:
case CK_BitCast:
@@ -356,6 +602,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
@@ -385,6 +632,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -404,11 +652,6 @@ void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
EmitMoveFromReturnSlot(E, RV);
}
-void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- llvm_unreachable("direct property access not surrounded by "
- "lvalue-to-rvalue cast");
-}
-
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
CGF.EmitIgnoredExpr(E->getLHS());
Visit(E->getRHS());
@@ -456,29 +699,13 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LHS = CGF.EmitLValue(E->getLHS());
- // We have to special case property setters, otherwise we must have
- // a simple lvalue (no aggregates inside vectors, bitfields).
- if (LHS.isPropertyRef()) {
- const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
- QualType ArgType = RE->getSetterArgType();
- RValue Src;
- if (ArgType->isReferenceType())
- Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
- else {
- AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
- CGF.EmitAggExpr(E->getRHS(), Slot);
- Src = Slot.asRValue();
- }
- CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
- } else {
- // Codegen the RHS so that it stores directly into the LHS.
- AggValueSlot LHSSlot =
- AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
- needsGC(E->getLHS()->getType()),
- AggValueSlot::IsAliased);
- CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
- EmitFinalDestCopy(E, LHS, true);
- }
+ // Codegen the RHS so that it stores directly into the LHS.
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
+ EmitFinalDestCopy(E, LHS, true);
}
void AggExprEmitter::
@@ -547,7 +774,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Push that destructor we promised.
if (!wasExternallyDestructed)
- CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
}
void
@@ -556,8 +783,16 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
CGF.EmitCXXConstructExpr(E, Slot);
}
+void
+AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitLambdaExpr(E, Slot);
+}
+
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.EmitExprWithCleanups(E, Dest);
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ Visit(E->getSubExpr());
}
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
@@ -636,9 +871,16 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
return;
if (!CGF.hasAggregateLLVMType(type)) {
- // For non-aggregates, we can store zero
+ // For non-aggregates, we can store zero.
llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
- CGF.EmitStoreThroughLValue(RValue::get(null), lv);
+ // Note that the following is not equivalent to
+ // EmitStoreThroughBitfieldLValue for ARC types.
+ if (lv.isBitField()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
+ } else {
+ assert(lv.isSimple());
+ CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
+ }
} else {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
@@ -665,17 +907,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- llvm::Value *DestPtr = Dest.getAddr();
+ if (E->initializesStdInitializerList()) {
+ EmitStdInitializerList(Dest.getAddr(), E);
+ return;
+ }
+
+ llvm::Value *DestPtr = EnsureSlot(E->getType()).getAddr();
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- llvm::PointerType *APType =
- cast<llvm::PointerType>(DestPtr->getType());
- llvm::ArrayType *AType =
- cast<llvm::ArrayType>(APType->getElementType());
-
- uint64_t NumInitElements = E->getNumInits();
-
if (E->getNumInits() > 0) {
QualType T1 = E->getType();
QualType T2 = E->getInit(0)->getType();
@@ -685,136 +925,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
}
- uint64_t NumArrayElements = AType->getNumElements();
- assert(NumInitElements <= NumArrayElements);
-
- QualType elementType = E->getType().getCanonicalType();
- elementType = CGF.getContext().getQualifiedType(
- cast<ArrayType>(elementType)->getElementType(),
- elementType.getQualifiers() + Dest.getQualifiers());
-
- // DestPtr is an array*. Construct an elementType* by drilling
- // down a level.
- llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
- llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
-
- // Exception safety requires us to destroy all the
- // already-constructed members if an initializer throws.
- // For that, we'll need an EH cleanup.
- QualType::DestructionKind dtorKind = elementType.isDestructedType();
- llvm::AllocaInst *endOfInit = 0;
- EHScopeStack::stable_iterator cleanup;
- if (CGF.needsEHCleanup(dtorKind)) {
- // In principle we could tell the cleanup where we are more
- // directly, but the control flow can get so varied here that it
- // would actually be quite complex. Therefore we go through an
- // alloca.
- endOfInit = CGF.CreateTempAlloca(begin->getType(),
- "arrayinit.endOfInit");
- Builder.CreateStore(begin, endOfInit);
- CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
- CGF.getDestroyer(dtorKind));
- cleanup = CGF.EHStack.stable_begin();
-
- // Otherwise, remember that we didn't need a cleanup.
- } else {
- dtorKind = QualType::DK_none;
- }
-
- llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
-
- // The 'current element to initialize'. The invariants on this
- // variable are complicated. Essentially, after each iteration of
- // the loop, it points to the last initialized element, except
- // that it points to the beginning of the array before any
- // elements have been initialized.
- llvm::Value *element = begin;
-
- // Emit the explicit initializers.
- for (uint64_t i = 0; i != NumInitElements; ++i) {
- // Advance to the next element.
- if (i > 0) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
-
- // Tell the cleanup that it needs to destroy up to this
- // element. TODO: some of these stores can be trivially
- // observed to be unnecessary.
- if (endOfInit) Builder.CreateStore(element, endOfInit);
- }
-
- LValue elementLV = CGF.MakeAddrLValue(element, elementType);
- EmitInitializationToLValue(E->getInit(i), elementLV);
- }
-
- // Check whether there's a non-trivial array-fill expression.
- // Note that this will be a CXXConstructExpr even if the element
- // type is an array (or array of array, etc.) of class type.
- Expr *filler = E->getArrayFiller();
- bool hasTrivialFiller = true;
- if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
- assert(cons->getConstructor()->isDefaultConstructor());
- hasTrivialFiller = cons->getConstructor()->isTrivial();
- }
-
- // Any remaining elements need to be zero-initialized, possibly
- // using the filler expression. We can skip this if the we're
- // emitting to zeroed memory.
- if (NumInitElements != NumArrayElements &&
- !(Dest.isZeroed() && hasTrivialFiller &&
- CGF.getTypes().isZeroInitializable(elementType))) {
-
- // Use an actual loop. This is basically
- // do { *array++ = filler; } while (array != end);
-
- // Advance to the start of the rest of the array.
- if (NumInitElements) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
- if (endOfInit) Builder.CreateStore(element, endOfInit);
- }
-
- // Compute the end of the array.
- llvm::Value *end = Builder.CreateInBoundsGEP(begin,
- llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
- "arrayinit.end");
-
- llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
- llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
-
- // Jump into the body.
- CGF.EmitBlock(bodyBB);
- llvm::PHINode *currentElement =
- Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
- currentElement->addIncoming(element, entryBB);
-
- // Emit the actual filler expression.
- LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
- if (filler)
- EmitInitializationToLValue(filler, elementLV);
- else
- EmitNullInitializationToLValue(elementLV);
-
- // Move on to the next element.
- llvm::Value *nextElement =
- Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
-
- // Tell the EH cleanup that we finished with the last element.
- if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
-
- // Leave the loop if we're done.
- llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
- "arrayinit.done");
- llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
- Builder.CreateCondBr(done, endBB, bodyBB);
- currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
-
- CGF.EmitBlock(endBB);
- }
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
- // Leave the partial-array cleanup if we entered one.
- if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
+ llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+ EmitArrayInit(DestPtr, AType, elementType, E);
return;
}
@@ -862,6 +981,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We'll need to enter cleanup scopes in case any of the member
// initializers throw an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ llvm::Instruction *cleanupDominator = 0;
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
@@ -905,6 +1025,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator)
+ cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
+
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
cleanups.push_back(CGF.EHStack.stable_begin());
@@ -924,7 +1047,11 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Deactivate all the partial cleanups in reverse order, which
// generally means popping them.
for (unsigned i = cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(cleanups[i-1]);
+ CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (cleanupDominator)
+ cleanupDominator->eraseFromParent();
}
//===----------------------------------------------------------------------===//
@@ -996,7 +1123,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
// C++ objects with a user-declared constructor don't need zero'ing.
- if (CGF.getContext().getLangOptions().CPlusPlus)
+ if (CGF.getContext().getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -1021,9 +1148,8 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- Loc = CGF.Builder.CreateBitCast(Loc, BP);
+ Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
Align.getQuantity(), false);
@@ -1066,10 +1192,10 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
- bool isVolatile) {
+ bool isVolatile, unsigned Alignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- if (getContext().getLangOptions().CPlusPlus) {
+ if (getContext().getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
@@ -1099,6 +1225,9 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
+ if (!Alignment)
+ Alignment = TypeInfo.second.getQuantity();
+
// FIXME: Handle variable sized types.
// FIXME: If we have a volatile struct, the optimizer can remove what might
@@ -1125,7 +1254,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
@@ -1155,5 +1284,60 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
- TypeInfo.second.getQuantity(), isVolatile);
+ Alignment, isVolatile);
+}
+
+void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
+ const Expr *init) {
+ const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
+ if (cleanups)
+ init = cleanups->getSubExpr();
+
+ if (isa<InitListExpr>(init) &&
+ cast<InitListExpr>(init)->initializesStdInitializerList()) {
+ // We initialized this std::initializer_list with an initializer list.
+ // A backing array was created. Push a cleanup for it.
+ EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
+ }
+}
+
+static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
+ llvm::Value *arrayStart,
+ const InitListExpr *init) {
+ // Check if there are any recursive cleanups to do, i.e. if we have
+ // std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
+ // then we need to destroy the inner array as well.
+ for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
+ const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
+ if (!subInit || !subInit->initializesStdInitializerList())
+ continue;
+
+ // This one needs to be destroyed. Get the address of the std::init_list.
+ llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
+ llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
+ "std.initlist");
+ CGF.EmitStdInitializerListCleanup(loc, subInit);
+ }
+}
+
+void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init) {
+ ASTContext &ctx = getContext();
+ QualType element = GetStdInitializerListElementType(init->getType());
+ unsigned numInits = init->getNumInits();
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
+ QualType arrayPtr = ctx.getPointerType(array);
+ llvm::Type *arrayPtrType = ConvertType(arrayPtr);
+
+ // lvalue is the location of a std::initializer_list, which as its first
+ // element has a pointer to the array we want to destroy.
+ llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
+ llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
+
+ ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
+
+ llvm::Value *arrayAddress =
+ Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
+ ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index 78db590..d3ba770 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -33,8 +33,6 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
CallArgList Args;
// Push the this ptr.
@@ -45,13 +43,16 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
Args.add(RValue::get(VTT), T);
}
+
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
- // And the rest of the call args
+ // And the rest of the call args.
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
- QualType ResultType = FPT->getResultType();
- return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
- FPT->getExtInfo()),
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
+ FPT->getExtInfo(),
+ required),
Callee, ReturnValue, Args, MD);
}
@@ -112,7 +113,7 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
// When building with -fapple-kext, all calls must go through the vtable since
// the kernel linker can do runtime patching of vtables.
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return false;
// If the most derived class is marked final, we know that no subclass can
@@ -229,17 +230,16 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// Compute the function type we're calling.
const CGFunctionInfo *FInfo = 0;
if (isa<CXXDestructorDecl>(MD))
- FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
else if (isa<CXXConstructorDecl>(MD))
- FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
- Ctor_Complete);
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
+ cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
else
- FInfo = &CGM.getTypes().getFunctionInfo(MD);
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- llvm::Type *Ty
- = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -256,7 +256,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (UseVirtualCall) {
Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- if (getContext().getLangOptions().AppleKext &&
+ if (getContext().getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -269,7 +269,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = BuildVirtualCall(MD, This, Ty);
} else {
- if (getContext().getLangOptions().AppleKext &&
+ if (getContext().getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
@@ -322,7 +322,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
ReturnValue, Args);
}
@@ -427,7 +427,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Elide the constructor if we're constructing from a temporary.
// The temporary check is required because Sema sets this on NRVO
// returns.
- if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
assert(getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(0)->getType()));
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
@@ -508,6 +508,7 @@ static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
const CXXNewExpr *e,
+ unsigned minElements,
llvm::Value *&numElements,
llvm::Value *&sizeWithoutCookie) {
QualType type = e->getAllocatedType();
@@ -581,6 +582,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// Okay, compute a count at the right width.
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+ // If there is a brace-initializer, we cannot allocate fewer elements than
+ // there are initializers. If we do, that's treated like an overflow.
+ if (adjustedCount.ult(minElements))
+ hasAnyOverflow = true;
+
// Scale numElements by that. This might overflow, but we don't
// care because it only overflows if allocationSize does, too, and
// if that overflows then we shouldn't use this.
@@ -612,14 +618,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// Otherwise, we might need to use the overflow intrinsics.
} else {
- // There are up to four conditions we need to test for:
+ // There are up to five conditions we need to test for:
// 1) if isSigned, we need to check whether numElements is negative;
// 2) if numElementsWidth > sizeWidth, we need to check whether
// numElements is larger than something representable in size_t;
- // 3) we need to compute
+ // 3) if minElements > 0, we need to check whether numElements is smaller
+ // than that.
+ // 4) we need to compute
// sizeWithoutCookie := numElements * typeSizeMultiplier
// and check whether it overflows; and
- // 4) if we need a cookie, we need to compute
+ // 5) if we need a cookie, we need to compute
// size := sizeWithoutCookie + cookieSize
// and check whether it overflows.
@@ -646,10 +654,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// If there's a non-1 type size multiplier, then we can do the
// signedness check at the same time as we do the multiply
// because a negative number times anything will cause an
- // unsigned overflow. Otherwise, we have to do it here.
+ // unsigned overflow. Otherwise, we have to do it here. But at least
+ // in this case, we can subsume the >= minElements check.
if (typeSizeMultiplier == 1)
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
- llvm::ConstantInt::get(CGF.SizeTy, 0));
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
// Otherwise, zext up to size_t if necessary.
} else if (numElementsWidth < sizeWidth) {
@@ -658,6 +667,21 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
assert(numElements->getType() == CGF.SizeTy);
+ if (minElements) {
+ // Don't allow allocation of fewer elements than we have initializers.
+ if (!hasOverflow) {
+ hasOverflow = CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+ } else if (numElementsWidth > sizeWidth) {
+ // The other existing overflow subsumes this check.
+ // We do an unsigned comparison, since any signed value < -1 is
+ // taken care of either above or below.
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow,
+ CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements)));
+ }
+ }
+
size = numElements;
// Multiply by the type size if necessary. This multiplier
@@ -741,30 +765,26 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
return size;
}
-static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
- llvm::Value *NewPtr) {
-
- assert(E->getNumConstructorArgs() == 1 &&
- "Can only have one argument to initializer of POD type.");
-
- const Expr *Init = E->getConstructorArg(0);
- QualType AllocType = E->getAllocatedType();
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
+ QualType AllocType, llvm::Value *NewPtr) {
- unsigned Alignment =
- CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment),
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
+ Alignment),
false);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
+ = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(Init, Slot);
+
+ CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
}
}
@@ -773,30 +793,67 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
QualType elementType,
llvm::Value *beginPtr,
llvm::Value *numElements) {
- // We have a POD type.
- if (E->getNumConstructorArgs() == 0)
- return;
-
- // Check if the number of elements is constant.
- bool checkZero = true;
- if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
- // If it's constant zero, skip the whole loop.
- if (constNum->isZero()) return;
-
- checkZero = false;
- }
+ if (!E->hasInitializer())
+ return; // We have a POD type.
+ llvm::Value *explicitPtr = beginPtr;
// Find the end of the array, hoisted out of the loop.
llvm::Value *endPtr =
Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
+ unsigned initializerElements = 0;
+
+ const Expr *Init = E->getInitializer();
+ llvm::AllocaInst *endOfInit = 0;
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ // If the initializer is an initializer list, first do the explicit elements.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ initializerElements = ILE->getNumInits();
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
+ cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
+ pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ }
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
+ explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
+ }
+
+ // The remaining elements are filled with the array filler expression.
+ Init = ILE->getArrayFiller();
+ }
+
// Create the continuation block.
llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
- // If we need to check for zero, do so now.
- if (checkZero) {
+ // If the number of elements isn't constant, we have to now check if there is
+ // anything left to initialize.
+ if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
+ // If all elements have already been initialized, skip the whole loop.
+ if (constNum->getZExtValue() <= initializerElements) {
+ // If there was a cleanup, deactivate it.
+ if (cleanupDominator)
+ DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ return;
+ }
+ } else {
llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
- llvm::Value *isEmpty = Builder.CreateICmpEQ(beginPtr, endPtr,
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
"array.isempty");
Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
EmitBlock(nonEmptyBB);
@@ -810,24 +867,28 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
// Set up the current-element phi.
llvm::PHINode *curPtr =
- Builder.CreatePHI(beginPtr->getType(), 2, "array.cur");
- curPtr->addIncoming(beginPtr, entryBB);
+ Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
+ curPtr->addIncoming(explicitPtr, entryBB);
+
+ // Store the new cleanup position for irregular cleanups.
+ if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
// Enter a partial-destruction cleanup if necessary.
- QualType::DestructionKind dtorKind = elementType.isDestructedType();
- EHScopeStack::stable_iterator cleanup;
- if (needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator && needsEHCleanup(dtorKind)) {
pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
getDestroyer(dtorKind));
cleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
}
// Emit the initializer into this element.
- StoreAnyExprIntoOneUnit(*this, E, curPtr);
+ StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
// Leave the cleanup if we entered one.
- if (cleanup != EHStack.stable_end())
- DeactivateCleanupBlock(cleanup);
+ if (cleanupDominator) {
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
// Advance to the next element.
llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
@@ -854,13 +915,15 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
+ const Expr *Init = E->getInitializer();
if (E->isArray()) {
- if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
+ CXXConstructorDecl *Ctor = CCE->getConstructor();
bool RequiresZeroInitialization = false;
- if (Ctor->getParent()->hasTrivialDefaultConstructor()) {
+ if (Ctor->isTrivial()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
- if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
+ if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
return;
if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
@@ -873,43 +936,25 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
RequiresZeroInitialization = true;
}
- CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
- E->constructor_arg_begin(),
- E->constructor_arg_end(),
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ CCE->arg_begin(), CCE->arg_end(),
RequiresZeroInitialization);
return;
- } else if (E->getNumConstructorArgs() == 1 &&
- isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
+ } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
+ CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
// Optimization: since zero initialization will just set the memory
// to all zeroes, generate a single memset to do it in one shot.
EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
return;
- } else {
- CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
- return;
}
- }
-
- if (CXXConstructorDecl *Ctor = E->getConstructor()) {
- // Per C++ [expr.new]p15, if we have an initializer, then we're performing
- // direct initialization. C++ [dcl.init]p5 requires that we
- // zero-initialize storage if there are no user-declared constructors.
- if (E->hasInitializer() &&
- !Ctor->getParent()->hasUserDeclaredConstructor() &&
- !Ctor->getParent()->isEmpty())
- CGF.EmitNullInitialization(NewPtr, ElementType);
-
- CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- NewPtr, E->constructor_arg_begin(),
- E->constructor_arg_end());
-
+ CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
return;
}
- // We have a POD type.
- if (E->getNumConstructorArgs() == 0)
+
+ if (!Init)
return;
-
- StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
+
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
}
namespace {
@@ -961,7 +1006,7 @@ namespace {
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1022,7 +1067,7 @@ namespace {
}
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), DeleteArgs, OperatorDelete);
}
@@ -1057,7 +1102,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
SavedNewPtr,
@@ -1066,7 +1111,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
Cleanup->setPlacementArg(I,
DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
- CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
+ CGF.initFullExprCleanup();
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
@@ -1083,10 +1128,18 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ unsigned minElements = 0;
+ if (E->isArray() && E->hasInitializer()) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ minElements = ILE->getNumInits();
+ }
+
llvm::Value *numElements = 0;
llvm::Value *allocSizeWithoutCookie = 0;
llvm::Value *allocSize =
- EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie);
+ EmitCXXNewAllocSize(*this, E, minElements, numElements,
+ allocSizeWithoutCookie);
allocatorArgs.add(RValue::get(allocSize), sizeType);
@@ -1129,7 +1182,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
- RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
+ RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
+ allocatorType),
CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
allocatorArgs, allocator);
}
@@ -1140,7 +1194,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
// interesting initializer.
bool nullCheck = allocatorType->isNothrow(getContext()) &&
- !(allocType.isPODType(getContext()) && !E->hasInitializer());
+ (!allocType.isPODType(getContext()) || E->hasInitializer());
llvm::BasicBlock *nullCheckBB = 0;
llvm::BasicBlock *contBB = 0;
@@ -1168,10 +1222,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
EHScopeStack::stable_iterator operatorDeleteCleanup;
+ llvm::Instruction *cleanupDominator = 0;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
}
assert((allocSize == allocSizeWithoutCookie) ==
@@ -1200,9 +1256,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// Deactivate the 'operator delete' cleanup if we finished
// initialization.
- if (operatorDeleteCleanup.isValid())
- DeactivateCleanupBlock(operatorDeleteCleanup);
-
+ if (operatorDeleteCleanup.isValid()) {
+ DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
if (nullCheck) {
conditional.end(*this);
@@ -1248,7 +1306,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
- EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
+ EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
DeleteArgs, DeleteFD);
}
@@ -1295,9 +1353,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
}
llvm::Type *Ty =
- CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
- Dtor_Complete),
- /*isVariadic=*/false);
+ CGF.getTypes().GetFunctionType(
+ CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
llvm::Value *Callee
= CGF.BuildVirtualCall(Dtor,
@@ -1324,7 +1381,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Ptr);
- else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ else if (CGF.getLangOpts().ObjCAutoRefCount &&
ElementType->isObjCLifetimeType()) {
switch (ElementType.getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -1405,7 +1462,7 @@ namespace {
}
// Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
CGF.CGM.GetAddrOfFunction(OperatorDelete),
ReturnValueSlot(), Args, OperatorDelete);
}
@@ -1514,10 +1571,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
-
- llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
}
@@ -1598,7 +1652,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
// const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -1612,11 +1666,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
-
- llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, false);
-
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
}
@@ -1762,3 +1812,19 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
return Value;
}
+
+void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
+ RunCleanupsScope Scope(*this);
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+ LValue LV = EmitLValueForFieldInitialization(Slot.getAddr(), *CurField, 0);
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
index 4a31bcf..0233745 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -64,11 +64,8 @@ public:
}
ComplexPairTy EmitLoadOfLValue(LValue LV) {
- if (LV.isSimple())
- return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
}
/// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
@@ -78,11 +75,8 @@ public:
/// EmitStoreThroughLValue - Given an l-value of complex type, store
/// a complex number into it.
void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
- if (LV.isSimple())
- return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- CGF.EmitStoreThroughPropertyRefLValue(RValue::getComplex(Val), LV);
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
}
/// EmitStoreOfComplex - Store the specified real/imag parts into the
@@ -117,13 +111,18 @@ public:
}
// l-values.
- ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitBlockDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+
+ llvm::ConstantStruct *pair =
+ cast<llvm::ConstantStruct>(result.getValue());
+ return ComplexPairTy(pair->getOperand(0), pair->getOperand(1));
+ }
return EmitLoadOfLValue(E);
}
- ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- assert(E->getObjectKind() == OK_Ordinary);
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -137,6 +136,10 @@ public:
return CGF.getOpaqueRValueMapping(E).getComplexVal();
}
+ ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getComplexVal();
+ }
+
// FIXME: CompoundLiteralExpr
ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
@@ -185,7 +188,9 @@ public:
return Visit(DAE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
- return CGF.EmitExprWithCleanups(E).getComplexVal();
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
@@ -265,6 +270,10 @@ public:
ComplexPairTy VisitInitListExpr(InitListExpr *E);
+ ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
@@ -362,12 +371,10 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
- case CK_GetObjCProperty: {
- LValue LV = CGF.EmitLValue(Op);
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return CGF.EmitLoadOfPropertyRefLValue(LV).getComplexVal();
- }
-
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_LValueToRValue:
case CK_UserDefinedConversion:
@@ -394,6 +401,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
@@ -418,6 +426,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -631,11 +640,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return Val;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LV.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -670,11 +675,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
LValue LV = EmitBinAssignLValue(E, Val);
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return Val;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LV.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -831,7 +832,6 @@ EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
default:
llvm_unreachable("unexpected complex compound assignment");
- Op = 0;
}
ComplexPairTy Val; // ignored
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index 3997866..d528e0c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -40,20 +40,27 @@ class ConstStructBuilder {
bool Packed;
CharUnits NextFieldOffsetInChars;
CharUnits LLVMStructAlignment;
- std::vector<llvm::Constant *> Elements;
+ SmallVector<llvm::Constant *, 32> Elements;
public:
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
InitListExpr *ILE);
-
-private:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ const APValue &Value, QualType ValTy);
+
+private:
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
: CGM(CGM), CGF(CGF), Packed(false),
NextFieldOffsetInChars(CharUnits::Zero()),
LLVMStructAlignment(CharUnits::One()) { }
- bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
+ void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
+
void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::ConstantInt *InitExpr);
@@ -62,8 +69,12 @@ private:
void AppendTailPadding(CharUnits RecordSize);
void ConvertStructToPacked();
-
+
bool Build(InitListExpr *ILE);
+ void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
+ CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
CharUnits getAlignment(const llvm::Constant *C) const {
if (Packed) return CharUnits::One();
@@ -77,14 +88,36 @@ private:
}
};
-bool ConstStructBuilder::
+void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ // Find the appropriate vtable within the vtable group.
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ llvm::Value *Indices[] = {
+ llvm::ConstantInt::get(CGM.Int64Ty, 0),
+ llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
+ };
+ llvm::Constant *VTableAddressPoint =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
+
+ // Add the vtable at the start of the object.
+ AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
+}
+
+void ConstStructBuilder::
AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitCst) {
-
const ASTContext &Context = CGM.getContext();
CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+ AppendBytes(FieldOffsetInChars, InitCst);
+}
+
+void ConstStructBuilder::
+AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
+
assert(NextFieldOffsetInChars <= FieldOffsetInChars
&& "Field offset mismatch!");
@@ -99,14 +132,13 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
// Convert the struct to a packed struct.
ConvertStructToPacked();
-
+
AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
// We need to append padding.
- AppendPadding(
- FieldOffsetInChars - NextFieldOffsetInChars);
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
"Did not add enough padding!");
@@ -118,14 +150,12 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
Elements.push_back(InitCst);
NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
getSizeInChars(InitCst);
-
+
if (Packed)
- assert(LLVMStructAlignment == CharUnits::One() &&
+ assert(LLVMStructAlignment == CharUnits::One() &&
"Packed struct not byte-aligned!");
else
LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
-
- return true;
}
void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
@@ -280,7 +310,7 @@ void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
if (PadSize.isZero())
return;
- llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = CGM.Int8Ty;
if (PadSize > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
@@ -300,7 +330,7 @@ void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
}
void ConstStructBuilder::ConvertStructToPacked() {
- std::vector<llvm::Constant *> PackedElements;
+ SmallVector<llvm::Constant *, 16> PackedElements;
CharUnits ElementOffsetInChars = CharUnits::Zero();
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
@@ -316,7 +346,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
CharUnits NumChars =
AlignedElementOffsetInChars - ElementOffsetInChars;
- llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = CGM.Int8Ty;
if (NumChars > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
@@ -332,12 +362,17 @@ void ConstStructBuilder::ConvertStructToPacked() {
assert(ElementOffsetInChars == NextFieldOffsetInChars &&
"Packing the struct changed its size!");
- Elements = PackedElements;
+ Elements.swap(PackedElements);
LLVMStructAlignment = CharUnits::One();
Packed = true;
}
bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ if (ILE->initializesStdInitializerList()) {
+ //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
+ return false;
+ }
+
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
@@ -382,8 +417,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
if (!Field->isBitField()) {
// Handle non-bitfield members.
- if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
- return false;
+ AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
} else {
// Otherwise we have a bitfield.
AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
@@ -391,6 +425,106 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
}
}
+ return true;
+}
+
+namespace {
+struct BaseInfo {
+ BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
+ : Decl(Decl), Offset(Offset), Index(Index) {
+ }
+
+ const CXXRecordDecl *Decl;
+ CharUnits Offset;
+ unsigned Index;
+
+ bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
+};
+}
+
+void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
+ bool IsPrimaryBase, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ CharUnits Offset) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Add a vtable pointer, if we need one and it hasn't already been added.
+ if (CD->isDynamicClass() && !IsPrimaryBase)
+ AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
+
+ // Accumulate and sort bases, in order to visit them in address order, which
+ // may not be the same as declaration order.
+ llvm::SmallVector<BaseInfo, 8> Bases;
+ Bases.reserve(CD->getNumBases());
+ unsigned BaseNo = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
+ BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
+ assert(!Base->isVirtual() && "should not have virtual bases here");
+ const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
+ Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
+ }
+ std::stable_sort(Bases.begin(), Bases.end());
+
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
+ BaseInfo &Base = Bases[I];
+
+ bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
+ Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTable, VTableClass, Offset + Base.Offset);
+ }
+ }
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && Val.getUnionField() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Emit the value of the initializer.
+ const APValue &FieldValue =
+ RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
+ llvm::Constant *EltInit =
+ CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
+ assert(EltInit && "EmitConstantValue can't fail");
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+}
+
+llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
+ RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
CharUnits LayoutSizeInChars = Layout.getSize();
if (NextFieldOffsetInChars > LayoutSizeInChars) {
@@ -398,66 +532,85 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
// we must have a flexible array member at the end.
assert(RD->hasFlexibleArrayMember() &&
"Must have flexible array member if struct is bigger than type!");
-
+
// No tail padding is necessary.
- return true;
- }
+ } else {
+ // Append tail padding if necessary.
+ AppendTailPadding(LayoutSizeInChars);
- CharUnits LLVMSizeInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
- // Check if we need to convert the struct to a packed struct.
- if (NextFieldOffsetInChars <= LayoutSizeInChars &&
- LLVMSizeInChars > LayoutSizeInChars) {
- assert(!Packed && "Size mismatch!");
-
- ConvertStructToPacked();
- assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
- "Converting to packed did not help!");
- }
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
+ assert(!Packed && "Size mismatch!");
- // Append tail padding if necessary.
- AppendTailPadding(LayoutSizeInChars);
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
+ "Converting to packed did not help!");
+ }
- assert(LayoutSizeInChars == NextFieldOffsetInChars &&
- "Tail padding mismatch!");
+ assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ "Tail padding mismatch!");
+ }
- return true;
-}
-
-llvm::Constant *ConstStructBuilder::
- BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
- ConstStructBuilder Builder(CGM, CGF);
-
- if (!Builder.Build(ILE))
- return 0;
-
// Pick the type to use. If the type is layout identical to the ConvertType
// type then use it, otherwise use whatever the builder produced for us.
llvm::StructType *STy =
llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
- Builder.Elements,Builder.Packed);
- llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
- if (llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
- if (ILESTy->isLayoutIdentical(STy))
- STy = ILESTy;
+ Elements, Packed);
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
+ if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
+ if (ValSTy->isLayoutIdentical(STy))
+ STy = ValSTy;
}
-
- llvm::Constant *Result =
- llvm::ConstantStruct::get(STy, Builder.Elements);
-
- assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
- Builder.getAlignment(Result)) ==
- Builder.getSizeInChars(Result) && "Size mismatch!");
-
+
+ llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
+
+ assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
+ getSizeInChars(Result) && "Size mismatch!");
+
return Result;
}
-
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ return Builder.Finalize(ILE->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const APValue &Val,
+ QualType ValTy) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ llvm::Constant *VTable = 0;
+ if (CD && CD->isDynamicClass())
+ VTable = CGM.getVTables().GetAddrOfVTable(CD);
+
+ Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
+
+ return Builder.Finalize(ValTy);
+}
+
+
//===----------------------------------------------------------------------===//
// ConstExprEmitter
//===----------------------------------------------------------------------===//
-
+
+/// This class only needs to handle two cases:
+/// 1) Literals (this is used by APValue emission to emit literals).
+/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
+/// constant fold these types).
class ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
@@ -493,34 +646,6 @@ public:
return Visit(E->getInitializer());
}
- llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
- if (E->getType()->isMemberPointerType())
- return CGM.getMemberPointerConstant(E);
-
- return 0;
- }
-
- llvm::Constant *VisitBinSub(BinaryOperator *E) {
- // This must be a pointer/pointer subtraction. This only happens for
- // address of label.
- if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
- !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
- return 0;
-
- llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
- E->getLHS()->getType(), CGF);
- llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
- E->getRHS()->getType(), CGF);
-
- llvm::Type *ResultType = ConvertType(E->getType());
- LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
- RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
-
- // No need to divide by element size, since addr of label is always void*,
- // which has size 1 in GNUish.
- return llvm::ConstantExpr::getSub(LHS, RHS);
- }
-
llvm::Constant *VisitCastExpr(CastExpr* E) {
Expr *subExpr = E->getSubExpr();
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
@@ -536,8 +661,8 @@ public:
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
- std::vector<llvm::Constant*> Elts;
- std::vector<llvm::Type*> Types;
+ SmallVector<llvm::Constant*, 2> Elts;
+ SmallVector<llvm::Type*, 2> Types;
Elts.push_back(C);
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
@@ -545,7 +670,7 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Ty = CGM.Int8Ty;
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
@@ -557,46 +682,41 @@ public:
llvm::StructType::get(C->getType()->getContext(), Types, false);
return llvm::ConstantStruct::get(STy, Elts);
}
- case CK_NullToMemberPointer: {
- const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
- return CGM.getCXXABI().EmitNullMemberPointer(MPT);
- }
-
- case CK_DerivedToBaseMemberPointer:
- case CK_BaseToDerivedMemberPointer:
- return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return C;
- case CK_CPointerToObjCPointerCast:
- case CK_BlockPointerToObjCPointerCast:
- case CK_AnyPointerToBlockPointerCast:
- case CK_LValueBitCast:
- case CK_BitCast:
- if (C->getType() == destType) return C;
- return llvm::ConstantExpr::getBitCast(C, destType);
-
case CK_Dependent: llvm_unreachable("saw dependent cast!");
+ case CK_ReinterpretMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
+
// These will never be supported.
case CK_ObjCObjectLValueCast:
- case CK_GetObjCProperty:
- case CK_ToVoid:
- case CK_Dynamic:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
return 0;
- // These might need to be supported for constexpr.
+ // These don't need to be handled here because Evaluate knows how to
+ // evaluate them in the cases where they can be folded.
+ case CK_BitCast:
+ case CK_ToVoid:
+ case CK_Dynamic:
+ case CK_LValueBitCast:
+ case CK_NullToMemberPointer:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
- return 0;
-
- // These should eventually be supported.
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_BaseToDerived:
@@ -614,53 +734,17 @@ public:
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
- return 0;
-
case CK_PointerToIntegral:
- if (!E->getType()->isBooleanType())
- return llvm::ConstantExpr::getPtrToInt(C, destType);
- // fallthrough
-
case CK_PointerToBoolean:
- return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
- llvm::ConstantPointerNull::get(cast<llvm::PointerType>(C->getType())));
-
case CK_NullToPointer:
- return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destType));
-
- case CK_IntegralCast: {
- bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
- return llvm::ConstantExpr::getIntegerCast(C, destType, isSigned);
- }
-
- case CK_IntegralToPointer: {
- bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
- C = llvm::ConstantExpr::getIntegerCast(C, CGM.IntPtrTy, isSigned);
- return llvm::ConstantExpr::getIntToPtr(C, destType);
- }
-
+ case CK_IntegralCast:
+ case CK_IntegralToPointer:
case CK_IntegralToBoolean:
- return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
- llvm::Constant::getNullValue(C->getType()));
-
case CK_IntegralToFloating:
- if (subExpr->getType()->isSignedIntegerOrEnumerationType())
- return llvm::ConstantExpr::getSIToFP(C, destType);
- else
- return llvm::ConstantExpr::getUIToFP(C, destType);
-
case CK_FloatingToIntegral:
- if (E->getType()->isSignedIntegerOrEnumerationType())
- return llvm::ConstantExpr::getFPToSI(C, destType);
- else
- return llvm::ConstantExpr::getFPToUI(C, destType);
-
case CK_FloatingToBoolean:
- return llvm::ConstantExpr::getFCmp(llvm::CmpInst::FCMP_UNE, C,
- llvm::Constant::getNullValue(C->getType()));
-
case CK_FloatingCast:
- return llvm::ConstantExpr::getFPCast(C, destType);
+ return 0;
}
llvm_unreachable("Invalid CastKind");
}
@@ -675,12 +759,13 @@ public:
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
unsigned NumInitElements = ILE->getNumInits();
- if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
+ if (NumInitElements == 1 &&
+ CGM.getContext().hasSameUnqualifiedType(ILE->getType(),
+ ILE->getInit(0)->getType()) &&
(isa<StringLiteral>(ILE->getInit(0)) ||
isa<ObjCEncodeExpr>(ILE->getInit(0))))
return Visit(ILE->getInit(0));
- std::vector<llvm::Constant*> Elts;
llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
llvm::Type *ElemTy = AType->getElementType();
@@ -691,9 +776,11 @@ public:
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
// Copy initializer elements.
- unsigned i = 0;
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumInitableElts + NumElements);
+
bool RewriteType = false;
- for (; i < NumInitableElts; ++i) {
+ for (unsigned i = 0; i < NumInitableElts; ++i) {
Expr *Init = ILE->getInit(i);
llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
if (!C)
@@ -712,13 +799,13 @@ public:
if (!fillC)
return 0;
RewriteType |= (fillC->getType() != ElemTy);
- for (; i < NumElements; ++i)
- Elts.push_back(fillC);
+ Elts.resize(NumElements, fillC);
if (RewriteType) {
// FIXME: Try to avoid packing the array
std::vector<llvm::Type*> Types;
- for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.reserve(NumInitableElts + NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
Types.push_back(Elts[i]->getType());
llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
Types, true);
@@ -741,31 +828,6 @@ public:
}
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
- if (ILE->getType()->isAnyComplexType() && ILE->getNumInits() == 2) {
- // Complex type with element initializers
- Expr *Real = ILE->getInit(0);
- Expr *Imag = ILE->getInit(1);
- llvm::Constant *Complex[2];
- Complex[0] = CGM.EmitConstantExpr(Real, Real->getType(), CGF);
- if (!Complex[0])
- return 0;
- Complex[1] = CGM.EmitConstantExpr(Imag, Imag->getType(), CGF);
- if (!Complex[1])
- return 0;
- llvm::StructType *STy =
- cast<llvm::StructType>(ConvertType(ILE->getType()));
- return llvm::ConstantStruct::get(STy, Complex);
- }
-
- if (ILE->getType()->isScalarType()) {
- // We have a scalar in braces. Just use the first element.
- if (ILE->getNumInits() > 0) {
- Expr *Init = ILE->getInit(0);
- return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
- }
- return CGM.EmitNullConstant(ILE->getType());
- }
-
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -775,11 +837,7 @@ public:
if (ILE->getType()->isUnionType())
return EmitUnionInitialization(ILE);
- // If ILE was a constant vector, we would have handled it already.
- if (ILE->getType()->isVectorType())
- return 0;
-
- llvm_unreachable("Unable to handle InitListExpr");
+ return 0;
}
llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -817,13 +875,7 @@ public:
}
llvm::Constant *VisitStringLiteral(StringLiteral *E) {
- assert(!E->getType()->isPointerType() && "Strings are always arrays");
-
- // This must be a string initializing an array in a static initializer.
- // Don't emit it as the address of the string, emit the string data itself
- // as an inline array.
- return llvm::ConstantArray::get(VMContext,
- CGM.GetStringForStringLiteral(E), false);
+ return CGM.GetConstantArrayFromStringLiteral(E);
}
llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
@@ -837,7 +889,7 @@ public:
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
Str.resize(CAT->getSize().getZExtValue(), '\0');
- return llvm::ConstantArray::get(VMContext, Str, false);
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
@@ -850,25 +902,8 @@ public:
}
public:
- llvm::Constant *EmitLValue(Expr *E) {
- switch (E->getStmtClass()) {
- default: break;
- case Expr::CompoundLiteralExprClass: {
- // Note that due to the nature of compound literals, this is guaranteed
- // to be the only use of the variable, so we just generate it here.
- CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
- llvm::Constant* C = Visit(CLE->getInitializer());
- // FIXME: "Leaked" on failure.
- if (C)
- C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
- E->getType().isConstant(CGM.getContext()),
- llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", 0, false,
- CGM.getContext().getTargetAddressSpace(E->getType()));
- return C;
- }
- case Expr::DeclRefExprClass: {
- ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
if (Decl->hasAttr<WeakRefAttr>())
return CGM.GetWeakRefReference(Decl);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
@@ -884,7 +919,26 @@ public:
}
}
}
- break;
+ return 0;
+ }
+
+ Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
+ CLE->getType(), CGF);
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ CGM.getContext().getTargetAddressSpace(E->getType()));
+ return C;
}
case Expr::StringLiteralClass:
return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
@@ -915,7 +969,7 @@ public:
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
- unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+ unsigned builtin = CE->isBuiltinCall();
if (builtin !=
Builtin::BI__builtin___CFStringMakeConstantString &&
builtin !=
@@ -939,6 +993,15 @@ public:
return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
}
+ case Expr::CXXTypeidExprClass: {
+ CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
+ QualType T;
+ if (Typeid->isTypeOperand())
+ T = Typeid->getTypeOperand();
+ else
+ T = Typeid->getExprOperand()->getType();
+ return CGM.GetAddrOfRTTIDescriptor(T);
+ }
}
return 0;
@@ -947,6 +1010,31 @@ public:
} // end anonymous namespace.
+llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF) {
+ if (const APValue *Value = D.evaluateValue())
+ return EmitConstantValueForMemory(*Value, D.getType(), CGF);
+
+ // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
+ // reference is a constant expression, and the reference binds to a temporary,
+ // then constant initialization is performed. ConstExprEmitter will
+ // incorrectly emit a prvalue constant in this case, and the calling code
+ // interprets that as the (pointer) value of the reference, rather than the
+ // desired value of the referee.
+ if (D.getType()->isReferenceType())
+ return 0;
+
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
QualType DestType,
CodeGenFunction *CGF) {
@@ -957,142 +1045,207 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
if (DestType->isReferenceType())
Success = E->EvaluateAsLValue(Result, Context);
else
- Success = E->Evaluate(Result, Context);
-
- if (Success && !Result.HasSideEffects) {
- switch (Result.Val.getKind()) {
- case APValue::Uninitialized:
- llvm_unreachable("Constant expressions should be initialized.");
- case APValue::LValue: {
- llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
- llvm::Constant *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- Result.Val.getLValueOffset().getQuantity());
-
- llvm::Constant *C;
- if (const Expr *LVBase = Result.Val.getLValueBase()) {
- C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
-
- // Apply offset if necessary.
- if (!Offset->isNullValue()) {
- llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
- llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
- Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
- C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
- }
+ Success = E->EvaluateAsRValue(Result, Context);
- // Convert to the appropriate type; this could be an lvalue for
- // an integer.
- if (isa<llvm::PointerType>(DestTy))
- return llvm::ConstantExpr::getBitCast(C, DestTy);
+ llvm::Constant *C = 0;
+ if (Success && !Result.HasSideEffects)
+ C = EmitConstantValue(Result.Val, DestType, CGF);
+ else
+ C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- return llvm::ConstantExpr::getPtrToInt(C, DestTy);
- } else {
- C = Offset;
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
- // Convert to the appropriate type; this could be an lvalue for
- // an integer.
- if (isa<llvm::PointerType>(DestTy))
- return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::LValue: {
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (APValue::LValueBase LVBase = Value.getLValueBase()) {
+ // An array can be represented as an lvalue referring to the base.
+ if (isa<llvm::ArrayType>(DestTy)) {
+ assert(Offset->isNullValue() && "offset on array initializer");
+ return ConstExprEmitter(*this, CGF).Visit(
+ const_cast<Expr*>(LVBase.get<const Expr*>()));
+ }
- // If the types don't match this should only be a truncate.
- if (C->getType() != DestTy)
- return llvm::ConstantExpr::getTrunc(C, DestTy);
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
- return C;
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
}
- }
- case APValue::Int: {
- llvm::Constant *C = llvm::ConstantInt::get(VMContext,
- Result.Val.getInt());
- if (C->getType()->isIntegerTy(1)) {
- llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
- C = llvm::ConstantExpr::getZExt(C, BoolTy);
- }
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
return C;
}
- case APValue::ComplexInt: {
- llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantInt::get(VMContext,
- Result.Val.getComplexIntReal());
- Complex[1] = llvm::ConstantInt::get(VMContext,
- Result.Val.getComplexIntImag());
-
- // FIXME: the target may want to specify that this is packed.
- llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
- Complex[1]->getType(),
- NULL);
- return llvm::ConstantStruct::get(STy, Complex);
- }
- case APValue::Float: {
- const llvm::APFloat &Init = Result.Val.getFloat();
- if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
- return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ }
+ case APValue::Int:
+ return llvm::ConstantInt::get(VMContext, Value.getInt());
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Float: {
+ const llvm::APFloat &Init = Value.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Vector: {
+ SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Value.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ const APValue &Elt = Value.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
else
- return llvm::ConstantFP::get(VMContext, Init);
- }
- case APValue::ComplexFloat: {
- llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantFP::get(VMContext,
- Result.Val.getComplexFloatReal());
- Complex[1] = llvm::ConstantFP::get(VMContext,
- Result.Val.getComplexFloatImag());
-
- // FIXME: the target may want to specify that this is packed.
- llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
- Complex[1]->getType(),
- NULL);
- return llvm::ConstantStruct::get(STy, Complex);
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
}
- case APValue::Vector: {
- SmallVector<llvm::Constant *, 4> Inits;
- unsigned NumElts = Result.Val.getVectorLength();
-
- if (Context.getLangOptions().AltiVec &&
- isa<CastExpr>(E) &&
- cast<CastExpr>(E)->getCastKind() == CK_VectorSplat) {
- // AltiVec vector initialization with a single literal
- APValue &Elt = Result.Val.getVectorElt(0);
-
- llvm::Constant* InitValue = Elt.isInt()
- ? cast<llvm::Constant>
- (llvm::ConstantInt::get(VMContext, Elt.getInt()))
- : cast<llvm::Constant>
- (llvm::ConstantFP::get(VMContext, Elt.getFloat()));
-
- for (unsigned i = 0; i != NumElts; ++i)
- Inits.push_back(InitValue);
+ return llvm::ConstantVector::get(Inits);
+ }
+ case APValue::AddrLabelDiff: {
+ const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
+ const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
+ llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
+ llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
+
+ // Compute difference
+ llvm::Type *ResultType = getTypes().ConvertType(DestType);
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
+ llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
+
+ // LLVM is a bit sensitive about the exact format of the
+ // address-of-label difference; make sure to truncate after
+ // the subtraction.
+ return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
+ }
+ case APValue::Struct:
+ case APValue::Union:
+ return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
+ case APValue::Array: {
+ const ArrayType *CAT = Context.getAsArrayType(DestType);
+ unsigned NumElements = Value.getArraySize();
+ unsigned NumInitElts = Value.getArrayInitializedElts();
- } else {
- for (unsigned i = 0; i != NumElts; ++i) {
- APValue &Elt = Result.Val.getVectorElt(i);
- if (Elt.isInt())
- Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
- else
- Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
- }
- }
- return llvm::ConstantVector::get(Inits);
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
+
+ // Emit array filler, if there is one.
+ llvm::Constant *Filler = 0;
+ if (Value.hasArrayFiller())
+ Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
+ CAT->getElementType(), CGF);
+
+ // Emit initializer elements.
+ llvm::Type *CommonElementType = 0;
+ for (unsigned I = 0; I < NumElements; ++I) {
+ llvm::Constant *C = Filler;
+ if (I < NumInitElts)
+ C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
+ CAT->getElementType(), CGF);
+ if (I == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = 0;
+ Elts.push_back(C);
}
+
+ if (!CommonElementType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
}
+
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+ case APValue::MemberPointer:
+ return getCXXABI().EmitMemberPointer(Value, DestType);
}
+ llvm_unreachable("Unknown APValue kind");
+}
- llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- if (C && C->getType()->isIntegerTy(1)) {
- llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+llvm::Constant *
+CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
+ if (C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
}
-static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
- const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
- return layout.getFieldOffset(field->getFieldIndex());
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
+ assert(E->isFileScope() && "not a file-scope compound literal expr");
+ return ConstExprEmitter(*this, 0).EmitLValue(E);
}
-
+
llvm::Constant *
CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
// Member pointer constants always have a very particular form.
@@ -1104,25 +1257,14 @@ CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
return getCXXABI().EmitMemberPointer(method);
// Otherwise, a member data pointer.
- uint64_t fieldOffset;
- if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
- fieldOffset = getFieldOffset(getContext(), field);
- else {
- const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
-
- fieldOffset = 0;
- for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
- ce = ifield->chain_end(); ci != ce; ++ci)
- fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
- }
-
+ uint64_t fieldOffset = getContext().getFieldOffset(decl);
CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
return getCXXABI().EmitMemberDataPointer(type, chars);
}
static void
FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
- std::vector<llvm::Constant *> &Elements,
+ SmallVectorImpl<llvm::Constant *> &Elements,
uint64_t StartOffset) {
assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
"StartOffset not byte aligned!");
@@ -1189,8 +1331,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
// FIXME: hardcodes Itanium member pointer representation!
llvm::Constant *NegativeOne =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
- -1ULL, /*isSigned*/true);
+ llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
// Fill in the null data member pointer.
for (CharUnits I = StartIndex; I != EndIndex; ++I)
@@ -1238,13 +1379,17 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
for (RecordDecl::field_iterator I = record->field_begin(),
E = record->field_end(); I != E; ++I) {
const FieldDecl *field = *I;
-
- // Ignore bit fields.
- if (field->isBitField())
- continue;
-
- unsigned fieldIndex = layout.getLLVMFieldNo(field);
- elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!field->isBitField()) {
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // For unions, stop after the first named field.
+ if (record->isUnion() && field->getDeclName())
+ break;
}
// Fill in the virtual bases, if we're working with the complete object.
@@ -1299,14 +1444,13 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
unsigned numBaseElements = baseArrayType->getNumElements();
// Fill in null data member pointers.
- std::vector<llvm::Constant *> baseElements(numBaseElements);
+ SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
baseElements, 0);
// Now go through all other elements and zero them out.
if (numBaseElements) {
- llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
for (unsigned i = 0; i != numBaseElements; ++i) {
if (!baseElements[i])
baseElements[i] = i8_zero;
@@ -1321,17 +1465,18 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+ llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
QualType ElementTy = CAT->getElementType();
llvm::Constant *Element = EmitNullConstant(ElementTy);
unsigned NumElements = CAT->getSize().getZExtValue();
- std::vector<llvm::Constant *> Array(NumElements);
- for (unsigned i = 0; i != NumElements; ++i)
- Array[i] = Element;
-
- llvm::ArrayType *ATy =
- cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+
+ if (Element->isNullValue())
+ return llvm::ConstantAggregateZero::get(ATy);
+
+ SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
return llvm::ConstantArray::get(ATy, Array);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 3a9fbee..18891f7 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -177,6 +177,9 @@ public:
Value *VisitCharacterLiteral(const CharacterLiteral *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
+ Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
@@ -197,6 +200,10 @@ public:
return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
}
+ Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getScalarVal();
+ }
+
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
@@ -204,33 +211,17 @@ public:
// Otherwise, assume the mapping is the scalar directly.
return CGF.getOpaqueRValueMapping(E).getScalarVal();
}
-
+
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
- Expr::EvalResult Result;
- if (!E->Evaluate(Result, CGF.getContext()))
- return EmitLoadOfLValue(E);
-
- assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
-
- llvm::Constant *C;
- if (Result.Val.isInt())
- C = Builder.getInt(Result.Val.getInt());
- else if (Result.Val.isFloat())
- C = llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
- else
- return EmitLoadOfLValue(E);
-
- // Make sure we emit a debug reference to the global variable.
- if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) {
- if (!CGF.getContext().DeclMustBeEmitted(VD))
- CGF.EmitDeclRefExprDbgValue(E, C);
- } else if (isa<EnumConstantDecl>(E->getDecl())) {
- CGF.EmitDeclRefExprDbgValue(E, C);
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return result.getValue();
}
-
- return C;
+ return EmitLoadOfLValue(E);
}
+
Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
return CGF.EmitObjCSelectorExpr(E);
}
@@ -240,11 +231,6 @@ public:
Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
- Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- assert(E->getObjectKind() == OK_Ordinary &&
- "reached property reference without lvalue-to-rvalue");
- return EmitLoadOfLValue(E);
- }
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
if (E->getMethodDecl() &&
E->getMethodDecl()->getResultType()->isReferenceType())
@@ -287,8 +273,6 @@ public:
Value *VisitStmtExpr(const StmtExpr *E);
- Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
-
// Unary Operators.
Value *VisitUnaryPostDec(const UnaryOperator *E) {
LValue LV = EmitLValue(E->getSubExpr());
@@ -354,7 +338,9 @@ public:
}
Value *VisitExprWithCleanups(ExprWithCleanups *E) {
- return CGF.EmitExprWithCleanups(E).getScalarVal();
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
}
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
@@ -405,7 +391,7 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
case LangOptions::SOB_Defined:
@@ -420,7 +406,7 @@ public:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
bool isTrapvOverflowBehavior() {
- return CGF.getContext().getLangOptions().getSignedOverflowBehavior()
+ return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
== LangOptions::SOB_Trapping;
}
/// Create a binary op that checks for overflow.
@@ -512,6 +498,15 @@ public:
Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
return CGF.EmitObjCStringLiteral(E);
}
+ Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return CGF.EmitObjCNumericLiteral(E);
+ }
+ Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ return CGF.EmitObjCArrayLiteral(E);
+ }
+ Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ return CGF.EmitObjCDictionaryLiteral(E);
+ }
Value *VisitAsTypeExpr(AsTypeExpr *CE);
Value *VisitAtomicExpr(AtomicExpr *AE);
};
@@ -559,7 +554,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (SrcType->isHalfType()) {
Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src);
SrcType = CGF.getContext().FloatTy;
- SrcTy = llvm::Type::getFloatTy(VMContext);
+ SrcTy = CGF.FloatTy;
}
// Handle conversions to bool first, they are special: comparisons against 0.
@@ -609,12 +604,9 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
- for (unsigned i = 0; i != NumElements; ++i)
- Args.push_back(Builder.getInt32(0));
-
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements,
+ Builder.getInt32(0));
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -630,7 +622,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Cast to half via float
if (DstType->isHalfType())
- DstTy = llvm::Type::getFloatTy(VMContext);
+ DstTy = CGF.FloatTy;
if (isa<llvm::IntegerType>(SrcTy)) {
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
@@ -748,11 +740,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
(1 << llvm::Log2_32(LHSElts))-1);
// Mask off the high bits of each shuffle index.
- SmallVector<llvm::Constant *, 32> MaskV;
- for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
- MaskV.push_back(EltMask);
-
- Value* MaskBits = llvm::ConstantVector::get(MaskV);
+ Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
+ EltMask);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
// newv = undef
@@ -765,8 +754,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
- Value *Indx = Builder.getInt32(i);
- Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
+ Value *IIndx = Builder.getInt32(i);
+ Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
// Handle vec3 special since the index will be off by one for the RHS.
@@ -778,7 +767,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
}
Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
- NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
+ NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
}
return NewV;
}
@@ -800,13 +789,13 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
- Expr::EvalResult Result;
- if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
if (E->isArrow())
CGF.EmitScalarExpr(E->getBase());
else
EmitLValue(E->getBase());
- return Builder.getInt(Result.Val.getInt());
+ return Builder.getInt(Value);
}
// Emit debug info for aggregate now, if it was delayed to reduce
@@ -900,8 +889,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (CurIdx == 0) {
// insert into undef -> shuffle (src, undef)
Args.push_back(C);
- for (unsigned j = 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
LHS = EI->getVectorOperand();
RHS = V;
@@ -912,9 +900,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned j = 0; j != CurIdx; ++j)
Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
- for (unsigned j = CurIdx + 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
-
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
VIsUndefShuffle = false;
@@ -958,8 +945,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
- for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -973,8 +959,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
Args.push_back(Builder.getInt32(j));
- for (unsigned j = InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
@@ -984,8 +969,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(Builder.getInt32(j));
for (unsigned j = 0; j != InitElts; ++j)
Args.push_back(Builder.getInt32(j+Offset));
- for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
@@ -1053,7 +1037,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *V = EmitLValue(E).getAddress();
V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy));
}
case CK_CPointerToObjCPointerCast:
@@ -1063,6 +1047,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *Src = Visit(const_cast<Expr*>(E));
return Builder.CreateBitCast(Src, ConvertType(DestTy));
}
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_UserDefinedConversion:
return Visit(const_cast<Expr*>(E));
@@ -1130,6 +1116,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
}
+ case CK_ReinterpretMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer: {
Value *Src = Visit(E);
@@ -1155,6 +1142,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_ARCExtendBlockObject:
return CGF.EmitARCExtendBlockObject(E);
+ case CK_CopyAndAutoreleaseBlockObject:
+ return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -1164,15 +1154,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_ConstructorConversion:
case CK_ToUnion:
llvm_unreachable("scalar cast to non-scalar value");
- break;
-
- case CK_GetObjCProperty: {
- assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
- assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
- "CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
- RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E));
- return RV.getScalarVal();
- }
case CK_LValueToRValue:
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
@@ -1202,6 +1183,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
+ Elt = EmitScalarConversion(Elt, E->getType(),
+ DestTy->getAs<VectorType>()->getElementType());
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
@@ -1209,13 +1192,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
llvm::Constant *Zero = Builder.getInt32(0);
- for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(Zero);
-
- llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, Zero);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
@@ -1252,7 +1231,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
llvm_unreachable("unknown scalar cast");
- return 0;
}
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
@@ -1261,11 +1239,6 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
.getScalarVal();
}
-Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- LValue LV = CGF.EmitBlockDeclRefLValue(E);
- return CGF.EmitLoadOfLValue(LV).getScalarVal();
-}
-
//===----------------------------------------------------------------------===//
// Unary Operators
//===----------------------------------------------------------------------===//
@@ -1274,13 +1247,11 @@ llvm::Value *ScalarExprEmitter::
EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *InVal,
llvm::Value *NextVal, bool IsInc) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
- break;
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
- break;
case LangOptions::SOB_Trapping:
BinOpInfo BinOp;
BinOp.LHS = InVal;
@@ -1300,9 +1271,21 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
QualType type = E->getSubExpr()->getType();
llvm::Value *value = EmitLoadOfLValue(LV);
llvm::Value *input = value;
+ llvm::PHINode *atomicPHI = 0;
int amount = (isInc ? 1 : -1);
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ type = atomicTy->getValueType();
+ value = atomicPHI;
+ }
+
// Special case of integer increment that we have to check first: bool++.
// Due to promotion rules, we get:
// bool++ -> bool = bool + 1
@@ -1336,7 +1319,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).first;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
@@ -1346,7 +1329,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = Builder.getInt32(amount);
value = CGF.EmitCastToVoidPtr(value);
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
@@ -1355,7 +1338,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
@@ -1416,12 +1399,24 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *sizeValue =
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
+ value, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
// Store the updated result through the lvalue.
if (LV.isBitField())
@@ -1459,6 +1454,15 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
}
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+
+ // Perform vector logical not on comparison with zero vector.
+ if (E->getType()->isExtVectorType()) {
+ Value *Oper = Visit(E->getSubExpr());
+ Value *Zero = llvm::Constant::getNullValue(Oper->getType());
+ Value *Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+ }
+
// Compare operand to zero.
Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
@@ -1473,9 +1477,9 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Try folding the offsetof to a constant.
- Expr::EvalResult EvalResult;
- if (E->Evaluate(EvalResult, CGF.getContext()))
- return Builder.getInt(EvalResult.Val.getInt());
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext()))
+ return Builder.getInt(Value);
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
@@ -1596,9 +1600,7 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
// If this isn't sizeof(vla), the result must be constant; use the constant
// folding logic so we don't have to duplicate it here.
- Expr::EvalResult Result;
- E->Evaluate(Result, CGF.getContext());
- return Builder.getInt(Result.Val.getInt());
+ return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
@@ -1632,7 +1634,10 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
- CGF.EmitScalarExpr(Op, true);
+ if (Op->isGLValue())
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -1679,12 +1684,38 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
+
+ llvm::PHINode *atomicPHI = 0;
+ if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.Ty = atomicTy->getValueType();
+ OpInfo.LHS = atomicPHI;
+ }
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
+ Result, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
// Store the result value into the LHS lvalue. Bit-fields are handled
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
@@ -1709,11 +1740,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return RHS;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -1772,8 +1799,18 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Builder.SetInsertPoint(DivCont);
}
}
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (CGF.getContext().getLangOpts().OpenCL) {
+ // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ llvm::Type *ValTy = Val->getType();
+ if (ValTy->isFloatTy() ||
+ (isa<llvm::VectorType>(ValTy) &&
+ cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
+ CGF.SetFPAccuracy(Val, 2.5);
+ }
+ return Val;
+ }
else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
@@ -1817,7 +1854,6 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
break;
default:
llvm_unreachable("Unsupported operation for overflow detection");
- IID = 0;
}
OpID <<= 1;
OpID |= 1;
@@ -1841,7 +1877,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Handle overflow with llvm.trap.
const std::string *handlerName =
- &CGF.getContext().getLangOptions().OverflowHandler;
+ &CGF.getContext().getLangOpts().OverflowHandler;
if (handlerName->empty()) {
EmitOverflowBB(overflowBB);
Builder.SetInsertPoint(continueBB);
@@ -1853,7 +1889,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.SetInsertPoint(overflowBB);
// Get the overflow handler.
- llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Int8Ty = CGF.Int8Ty;
llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
@@ -1940,7 +1976,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- if (CGF.getLangOptions().isSignedOverflowDefined()) {
+ if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
} else {
@@ -1959,7 +1995,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
- if (CGF.getLangOptions().isSignedOverflowDefined())
+ if (CGF.getLangOpts().isSignedOverflowDefined())
return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
@@ -1971,7 +2007,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
case LangOptions::SOB_Defined:
@@ -1991,7 +2027,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
// The LHS is always a pointer if either side is.
if (!op.LHS->getType()->isPointerTy()) {
if (op.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Defined:
@@ -2117,36 +2153,28 @@ static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
case BuiltinType::UChar:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
- break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
- break;
case BuiltinType::UShort:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
- break;
case BuiltinType::Short:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
- break;
case BuiltinType::UInt:
case BuiltinType::ULong:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
- break;
case BuiltinType::Int:
case BuiltinType::Long:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
- break;
case BuiltinType::Float:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
- break;
}
- return llvm::Intrinsic::not_intrinsic;
}
Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
@@ -2325,11 +2353,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return 0;
// The result of an assignment in C is the assigned r-value.
- if (!CGF.getContext().getLangOptions().CPlusPlus)
- return RHS;
-
- // Objective-C property assignment never reloads the value following a store.
- if (LHS.isPropertyRef())
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
return RHS;
// If the lvalue is non-volatile, return the computed value of the assignment.
@@ -2341,6 +2365,18 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+
+ // Perform vector logical and on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *And = Builder.CreateAnd(LHS, RHS);
+ return Builder.CreateSExt(And, Zero->getType(), "sext");
+ }
+
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
@@ -2396,6 +2432,18 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+
+ // Perform vector logical or on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *Or = Builder.CreateOr(LHS, RHS);
+ return Builder.CreateSExt(Or, Zero->getType(), "sext");
+ }
+
llvm::Type *ResTy = ConvertType(E->getType());
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
@@ -2503,16 +2551,23 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
Expr *live = lhsExpr, *dead = rhsExpr;
if (!CondExprBool) std::swap(live, dead);
- // If the dead side doesn't have labels we need, and if the Live side isn't
- // the gnu missing ?: extension (which we could handle, but don't bother
- // to), just emit the Live part.
- if (!CGF.ContainsLabel(dead))
- return Visit(live);
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!CGF.ContainsLabel(dead)) {
+ Value *Result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value*. However, a conditional
+ // with non-void type must return a non-null Value*.
+ if (!Result && !E->getType()->isVoidType())
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+
+ return Result;
+ }
}
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getContext().getLangOptions().OpenCL
+ if (CGF.getContext().getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
@@ -2524,11 +2579,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
unsigned numElem = vecTy->getNumElements();
llvm::Type *elemType = vecTy->getElementType();
- std::vector<llvm::Constant*> Zvals;
- for (unsigned i = 0; i < numElem; ++i)
- Zvals.push_back(llvm::ConstantInt::get(elemType, 0));
-
- llvm::Value *zeroVec = llvm::ConstantVector::get(Zvals);
+ llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
llvm::Value *tmp = Builder.CreateSExt(TestMSB,
llvm::VectorType::get(elemType,
@@ -2564,6 +2615,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
+ if (!LHS) {
+ // If the conditional has void type, make sure we return a null Value*.
+ assert(!RHS && "LHS and RHS types must match");
+ return 0;
+ }
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
@@ -2661,8 +2717,7 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Args.push_back(Builder.getInt32(2));
if (numElementsDst == 4)
- Args.push_back(llvm::UndefValue::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext())));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
@@ -2733,11 +2788,11 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
Expr *BaseExpr = E->getBase();
if (BaseExpr->isRValue()) {
- V = CreateTempAlloca(ClassPtrTy, "resval");
+ V = CreateMemTemp(E->getType(), "resval");
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeAddrLValue(V, E->getType()));
+ MakeNaturalAlignAddrLValue(V, E->getType()));
} else {
if (E->isArrow())
V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
@@ -2748,7 +2803,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
// build Class* type
ClassPtrTy = ClassPtrTy->getPointerTo();
V = Builder.CreateBitCast(V, ClassPtrTy);
- return MakeAddrLValue(V, E->getType());
+ return MakeNaturalAlignAddrLValue(V, E->getType());
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 51f2053..d0aa0f5 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -29,6 +29,10 @@ using namespace CodeGen;
typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result);
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
@@ -47,6 +51,140 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
+/// EmitObjCNumericLiteral - This routine generates code for
+/// the appropriate +[NSNumber numberWith<Type>:] method.
+///
+llvm::Value *
+CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ // Generate the correct selector for this literal's concrete type.
+ const Expr *NL = E->getNumber();
+ // Get the method.
+ const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
+ assert(Method && "NSNumber method is null");
+ Selector Sel = Method->getSelector();
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ QualType ResultType = E->getType(); // should be NSNumber *
+ const ObjCObjectPointerType *InterfacePointerType =
+ ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *NSNumberDecl =
+ InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
+
+ const ParmVarDecl *argDecl = *Method->param_begin();
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ RValue RV = EmitAnyExpr(NL);
+ CallArgList Args;
+ Args.add(RV, ArgQT);
+
+ RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, Sel, Receiver, Args,
+ NSNumberDecl, Method);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects) {
+ ASTContext &Context = CGM.getContext();
+ const ObjCDictionaryLiteral *DLE = 0;
+ const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
+ if (!ALE)
+ DLE = cast<ObjCDictionaryLiteral>(E);
+
+ // Compute the type of the array we're initializing.
+ uint64_t NumElements =
+ ALE ? ALE->getNumElements() : DLE->getNumElements();
+ llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
+ NumElements);
+ QualType ElementType = Context.getObjCIdType().withConst();
+ QualType ElementArrayType
+ = Context.getConstantArrayType(ElementType, APNumElements,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+
+ // Allocate the temporary array(s).
+ llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects");
+ llvm::Value *Keys = 0;
+ if (DLE)
+ Keys = CreateMemTemp(ElementArrayType, "keys");
+
+ // Perform the actual initialialization of the array(s).
+ for (uint64_t i = 0; i < NumElements; i++) {
+ if (ALE) {
+ // Emit the initializer.
+ const Expr *Rhs = ALE->getElement(i);
+ LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Rhs->getType()),
+ Context);
+ EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
+ } else {
+ // Emit the key initializer.
+ const Expr *Key = DLE->getKeyValueElement(i).Key;
+ LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
+ ElementType,
+ Context.getTypeAlignInChars(Key->getType()),
+ Context);
+ EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
+
+ // Emit the value initializer.
+ const Expr *Value = DLE->getKeyValueElement(i).Value;
+ LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Value->getType()),
+ Context);
+ EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
+ }
+ }
+
+ // Generate the argument list.
+ CallArgList Args;
+ ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
+ const ParmVarDecl *argDecl = *PI++;
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Objects), ArgQT);
+ if (DLE) {
+ argDecl = *PI++;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Keys), ArgQT);
+ }
+ argDecl = *PI;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ llvm::Value *Count =
+ llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
+ Args.add(RValue::get(Count), ArgQT);
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ Selector Sel = MethodWithObjects->getSelector();
+ QualType ResultType = E->getType();
+ const ObjCObjectPointerType *InterfacePointerType
+ = ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *Class
+ = InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, Class);
+
+ // Generate the message send.
+ RValue result
+ = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ MethodWithObjects->getResultType(),
+ Sel,
+ Receiver, Args, Class,
+ MethodWithObjects);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
+ const ObjCDictionaryLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
+}
+
/// Emit a selector.
llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
// Untyped selector.
@@ -143,7 +281,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// though.
bool retainSelf =
(!isDelegateInit &&
- CGM.getLangOptions().ObjCAutoRefCount &&
+ CGM.getLangOpts().ObjCAutoRefCount &&
method &&
method->hasAttr<NSConsumesSelfAttr>());
@@ -197,7 +335,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// In ARC, we sometimes want to "extend the lifetime"
// (i.e. retain+autorelease) of receivers of returns-inner-pointer
// messages.
- if (getLangOptions().ObjCAutoRefCount && method &&
+ if (getLangOpts().ObjCAutoRefCount && method &&
method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
shouldExtendReceiverForInnerPointerMessage(E))
Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
@@ -216,7 +354,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// be an undefined read and write of an object in unordered
// expressions.
if (isDelegateInit) {
- assert(getLangOptions().ObjCAutoRefCount &&
+ assert(getLangOpts().ObjCAutoRefCount &&
"delegate init calls should only be marked in ARC");
// Do an unsafe store of null into self.
@@ -307,14 +445,14 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
- const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
args.push_back(OMD->getSelfDecl());
args.push_back(OMD->getCmdDecl());
for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
- E = OMD->param_end(); PI != E; ++PI)
+ E = OMD->param_end(); PI != E; ++PI)
args.push_back(*PI);
CurGD = OMD;
@@ -322,7 +460,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
// In ARC, certain methods get an extra cleanup.
- if (CGM.getLangOptions().ObjCAutoRefCount &&
+ if (CGM.getLangOpts().ObjCAutoRefCount &&
OMD->isInstanceMethod() &&
OMD->getSelector().isUnarySelector()) {
const IdentifierInfo *ident =
@@ -369,8 +507,9 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(Context.VoidTy, args,
- FunctionType::ExtInfo()),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
fn, ReturnValueSlot(), args);
}
@@ -467,13 +606,13 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Handle retain.
if (setterKind == ObjCPropertyDecl::Retain) {
// In GC-only, there's nothing special that needs to be done.
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
// fallthrough
// In ARC, if the property is non-atomic, use expression emission,
// which translates to objc_storeStrong. This isn't required, but
// it's slightly nicer.
- } else if (CGM.getLangOptions().ObjCAutoRefCount && !IsAtomic) {
+ } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
Kind = Expression;
return;
@@ -508,14 +647,14 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// expressions. This actually works out to being atomic anyway,
// except for ARC __strong, but that should trigger the above code.
if (ivarType.hasNonTrivialObjCLifetime() ||
- (CGM.getLangOptions().getGC() &&
+ (CGM.getLangOpts().getGC() &&
CGM.getContext().getObjCGCAttrKind(ivarType))) {
Kind = Expression;
return;
}
// Compute whether the ivar has strong members.
- if (CGM.getLangOptions().getGC())
+ if (CGM.getLangOpts().getGC())
if (const RecordType *recordType = ivarType->getAs<RecordType>())
HasStrong = recordType->getDecl()->hasObjectMember();
@@ -564,12 +703,14 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicGetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCGetterBody(IMP, PID);
+ generateObjCGetterBody(IMP, PID, AtomicHelperFn);
FinishFunction();
}
@@ -597,14 +738,53 @@ static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
return false;
}
+/// emitCPPObjectAtomicGetterCall - Call the runtime function to
+/// copy the ivar into the resturn slot.
+static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
+ llvm::Value *returnAddr,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The 1st argument is the return Slot.
+ args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
+
+ // The 2nd argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
- ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
- /*nrvo*/ 0);
- EmitReturnStmt(ret);
+ if (!AtomicHelperFn) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ 0);
+ EmitReturnStmt(ret);
+ }
+ else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ ivar, AtomicHelperFn);
+ }
return;
}
@@ -670,8 +850,9 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(getTypes().getFunctionInfo(propType, args,
- FunctionType::ExtInfo()),
+ RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
getPropertyFn, ReturnValueSlot(), args);
// We need to fix the type here. Ivars with copy & retain are
@@ -752,7 +933,8 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
- DeclRefExpr argRef(argVar, argVar->getType(), VK_LValue, SourceLocation());
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -770,11 +952,52 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
- CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
- FunctionType::ExtInfo()),
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
copyStructFn, ReturnValueSlot(), args);
}
+/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
+/// the value from the first formal parameter into the given ivar, using
+/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
+static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
+ ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+
+
+}
+
+
static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
Expr *setter = PID->getSetterCXXAssignment();
if (!setter) return true;
@@ -799,20 +1022,38 @@ static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
return false;
}
+static bool UseOptimizedSetter(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+ const TargetInfo &Target = CGM.getContext().getTargetInfo();
+
+ if (Target.getPlatformName() != "macosx")
+ return false;
+
+ return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+}
+
void
CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
// Just use the setter expression if Sema gave us one and it's
- // non-trivial. There's no way to do this atomically.
+ // non-trivial.
if (!hasTrivialSetExpr(propImpl)) {
- EmitStmt(propImpl->getSetterCXXAssignment());
+ if (!AtomicHelperFn)
+ // If non-atomic, assignment is called directly.
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ else
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
+ AtomicHelperFn);
return;
}
- const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
- ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
- ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
-
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
@@ -845,13 +1086,28 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Value *setPropertyFn =
- CGM.getObjCRuntime().GetPropertySetFunction();
- if (!setPropertyFn) {
- CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
- return;
+
+ llvm::Value *setOptimizedPropertyFn = 0;
+ llvm::Value *setPropertyFn = 0;
+ if (UseOptimizedSetter(CGM)) {
+ // 10.8 code and GC is off
+ setOptimizedPropertyFn =
+ CGM.getObjCRuntime()
+ .GetOptimizedPropertySetFunction(strategy.isAtomic(),
+ strategy.isCopy());
+ if (!setOptimizedPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
+ return;
+ }
}
-
+ else {
+ setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+ }
+
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
llvm::Value *cmd =
@@ -866,17 +1122,28 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
args.add(RValue::get(cmd), getContext().getObjCSelType());
- args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
- args.add(RValue::get(arg), getContext().getObjCIdType());
- args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
- getContext().BoolTy);
- args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
- getContext().BoolTy);
- // FIXME: We shouldn't need to get the function info here, the runtime
- // already should have computed it to build the function.
- EmitCall(getTypes().getFunctionInfo(getContext().VoidTy, args,
- FunctionType::ExtInfo()),
- setPropertyFn, ReturnValueSlot(), args);
+ if (setOptimizedPropertyFn) {
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setOptimizedPropertyFn, ReturnValueSlot(), args);
+ } else {
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setPropertyFn, ReturnValueSlot(), args);
+ }
+
return;
}
@@ -890,7 +1157,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Otherwise, fake up some ASTs and emit a normal assignment.
ValueDecl *selfDecl = setterMethod->getSelfDecl();
- DeclRefExpr self(selfDecl, selfDecl->getType(), VK_LValue, SourceLocation());
+ DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ VK_LValue, SourceLocation());
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
selfDecl->getType(), CK_LValueToRValue, &self,
VK_RValue);
@@ -899,7 +1167,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
ParmVarDecl *argDecl = *setterMethod->param_begin();
QualType argType = argDecl->getType().getNonReferenceType();
- DeclRefExpr arg(argDecl, argType, VK_LValue, SourceLocation());
+ DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
&arg, VK_RValue);
@@ -943,12 +1211,14 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicSetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
- generateObjCSetterBody(IMP, PID);
+ generateObjCSetterBody(IMP, PID, AtomicHelperFn);
FinishFunction();
}
@@ -958,13 +1228,13 @@ namespace {
private:
llvm::Value *addr;
const ObjCIvarDecl *ivar;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : addr(addr), ivar(ivar), destroyer(*destroyer),
+ : addr(addr), ivar(ivar), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -1004,11 +1274,11 @@ static void emitCXXDestructMethod(CodeGenFunction &CGF,
// Use a call to objc_storeStrong to destroy strong ivars, for the
// general benefit of the tools.
if (dtorKind == QualType::DK_objc_strong_lifetime) {
- destroyer = &destroyARCStrongWithStore;
+ destroyer = destroyARCStrongWithStore;
// Otherwise use the default for the destruction kind.
} else {
- destroyer = &CGF.getDestroyer(dtorKind);
+ destroyer = CGF.getDestroyer(dtorKind);
}
CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
@@ -1067,7 +1337,7 @@ bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
}
bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC)
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
return false;
if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
return FDTTy->getDecl()->hasObjectMember();
@@ -1087,117 +1357,6 @@ QualType CodeGenFunction::TypeOfSelfObject() {
return PTy->getPointeeType();
}
-LValue
-CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
- // This is a special l-value that just issues sends when we load or
- // store through it.
-
- // For certain base kinds, we need to emit the base immediately.
- llvm::Value *Base;
- if (E->isSuperReceiver())
- Base = LoadObjCSelf();
- else if (E->isClassReceiver())
- Base = CGM.getObjCRuntime().GetClass(Builder, E->getClassReceiver());
- else
- Base = EmitScalarExpr(E->getBase());
- return LValue::MakePropertyRef(E, Base);
-}
-
-static RValue GenerateMessageSendSuper(CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- Selector S,
- llvm::Value *Receiver,
- const CallArgList &CallArgs) {
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CGF.CurFuncDecl);
- bool isClassMessage = OMD->isClassMethod();
- bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGF.CGM.getObjCRuntime()
- .GenerateMessageSendSuper(CGF, Return, ResultType,
- S, OMD->getClassInterface(),
- isCategoryImpl, Receiver,
- isClassMessage, CallArgs);
-}
-
-RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
- ReturnValueSlot Return) {
- const ObjCPropertyRefExpr *E = LV.getPropertyRefExpr();
- QualType ResultType = E->getGetterResultType();
- Selector S;
- const ObjCMethodDecl *method;
- if (E->isExplicitProperty()) {
- const ObjCPropertyDecl *Property = E->getExplicitProperty();
- S = Property->getGetterName();
- method = Property->getGetterMethodDecl();
- } else {
- method = E->getImplicitPropertyGetter();
- S = method->getSelector();
- }
-
- llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
-
- if (CGM.getLangOptions().ObjCAutoRefCount) {
- QualType receiverType;
- if (E->isSuperReceiver())
- receiverType = E->getSuperReceiverType();
- else if (E->isClassReceiver())
- receiverType = getContext().getObjCClassType();
- else
- receiverType = E->getBase()->getType();
- }
-
- // Accesses to 'super' follow a different code path.
- if (E->isSuperReceiver())
- return AdjustRelatedResultType(*this, E, method,
- GenerateMessageSendSuper(*this, Return,
- ResultType,
- S, Receiver,
- CallArgList()));
- const ObjCInterfaceDecl *ReceiverClass
- = (E->isClassReceiver() ? E->getClassReceiver() : 0);
- return AdjustRelatedResultType(*this, E, method,
- CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass));
-}
-
-void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
- LValue Dst) {
- const ObjCPropertyRefExpr *E = Dst.getPropertyRefExpr();
- Selector S = E->getSetterSelector();
- QualType ArgType = E->getSetterArgType();
-
- // FIXME. Other than scalars, AST is not adequate for setter and
- // getter type mismatches which require conversion.
- if (Src.isScalar()) {
- llvm::Value *SrcVal = Src.getScalarVal();
- QualType DstType = getContext().getCanonicalType(ArgType);
- llvm::Type *DstTy = ConvertType(DstType);
- if (SrcVal->getType() != DstTy)
- Src =
- RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
- }
-
- CallArgList Args;
- Args.add(Src, ArgType);
-
- llvm::Value *Receiver = Dst.getPropertyRefBaseAddr();
- QualType ResultType = getContext().VoidTy;
-
- if (E->isSuperReceiver()) {
- GenerateMessageSendSuper(*this, ReturnValueSlot(),
- ResultType, S, Receiver, Args);
- return;
- }
-
- const ObjCInterfaceDecl *ReceiverClass
- = (E->isClassReceiver() ? E->getClassReceiver() : 0);
-
- CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
- ResultType, S, Receiver, Args,
- ReceiverClass);
-}
-
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
@@ -1243,7 +1402,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Collection = EmitARCRetainScalarExpr(S.getCollection());
// Enter a cleanup to do the release.
@@ -1343,8 +1502,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
- FunctionType::ExtInfo()),
+ EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
EnumerationMutationFn, ReturnValueSlot(), Args2);
// Otherwise, or if the mutation function returns, just continue.
@@ -1360,7 +1520,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitAutoVarInit(variable);
const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
- DeclRefExpr tempDRE(const_cast<VarDecl*>(D), D->getType(),
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
VK_LValue, SourceLocation());
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
@@ -1467,7 +1627,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
// Leave the cleanup we entered in ARC.
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
PopCleanupBlock();
EmitBlock(LoopEnd.getBlock());
@@ -1604,9 +1764,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
== value->getType());
if (!fn) {
- std::vector<llvm::Type*> argTypes(2);
- argTypes[0] = CGF.Int8PtrPtrTy;
- argTypes[1] = CGF.Int8PtrTy;
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
@@ -1720,8 +1878,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
// in a moment.
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
llvm::FunctionType *type =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- /*variadic*/ false);
+ llvm::FunctionType::get(VoidTy, /*variadic*/false);
marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
@@ -1813,7 +1970,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
// lvalue is inadequately aligned.
if (shouldUseFusedARCCalls() &&
!isBlock &&
- !(dst.getAlignment() && dst.getAlignment() < PointerAlignInBytes)) {
+ (dst.getAlignment().isZero() ||
+ dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
@@ -2086,7 +2244,7 @@ namespace {
}
void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
else
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
@@ -2109,7 +2267,6 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
}
llvm_unreachable("impossible lifetime!");
- return TryEmitResult();
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
@@ -2138,7 +2295,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
// As a very special optimization, in ARC++, if the l-value is the
// result of a non-volatile assignment, do a simple retain of the
// result of the call to objc_storeWeak instead of reloading.
- if (CGF.getLangOptions().CPlusPlus &&
+ if (CGF.getLangOpts().CPlusPlus &&
!type.isVolatileQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Weak &&
isa<BinaryOperator>(e) &&
@@ -2233,10 +2390,64 @@ static bool shouldEmitSeparateBlockRetain(const Expr *e) {
return true;
}
+/// Try to emit a PseudoObjectExpr at +1.
+///
+/// This massively duplicates emitPseudoObjectRValue.
+static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression.
+ const Expr *resultExpr = E->getResultExpr();
+ assert(resultExpr);
+ TryEmitResult result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+
+ // If this semantic is the result of the pseudo-object
+ // expression, try to evaluate the source as +1.
+ if (ov == resultExpr) {
+ assert(!OVMA::shouldBindAsLValue(ov));
+ result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
+ opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
+
+ // Otherwise, just bind it.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+ }
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ result = tryEmitARCRetainScalarExpr(CGF, semantic);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// Look through cleanups.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ CGF.enterFullExpression(cleanups);
CodeGenFunction::RunCleanupsScope scope(CGF);
return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
}
@@ -2331,12 +2542,6 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
return TryEmitResult(result, true);
}
- case CK_GetObjCProperty: {
- llvm::Value *result = emitARCRetainCall(CGF, ce);
- if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
- return TryEmitResult(result, true);
- }
-
default:
break;
}
@@ -2358,6 +2563,17 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
llvm::Value *result = emitARCRetainCall(CGF, e);
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
+
+ // Look through pseudo-object expressions.
+ } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ TryEmitResult result
+ = tryEmitARCRetainPseudoObject(CGF, pseudo);
+ if (resultType) {
+ llvm::Value *value = result.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ result.setPointer(value);
+ }
+ return result;
}
// Conservatively halt the search at any other expression kind.
@@ -2424,17 +2640,19 @@ llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
// In ARC, retain and autorelease the expression.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
// Do so before running any cleanups for the full-expression.
// tryEmitARCRetainScalarExpr does make an effort to do things
// inside cleanups, but there are crazy cases like
// @throw A().foo;
// where a full retain+autorelease is required and would
// otherwise happen after the destructor for the temporary.
- CodeGenFunction::RunCleanupsScope cleanups(*this);
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr))
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
+ enterFullExpression(ewc);
expr = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
return EmitARCRetainAutoreleaseScalarExpr(expr);
}
@@ -2468,12 +2686,8 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
llvm::Value *oldValue =
- EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
- lvalue.getAlignment(), e->getType(),
- lvalue.getTBAAInfo());
- EmitStoreOfScalar(value, lvalue.getAddress(),
- lvalue.isVolatileQualified(), lvalue.getAlignment(),
- e->getType(), lvalue.getTBAAInfo());
+ EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, /*precise*/ false);
} else {
value = EmitARCStoreStrong(lvalue, value, ignored);
@@ -2487,15 +2701,13 @@ CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
LValue lvalue = EmitLValue(e->getLHS());
- EmitStoreOfScalar(value, lvalue.getAddress(),
- lvalue.isVolatileQualified(), lvalue.getAlignment(),
- e->getType(), lvalue.getTBAAInfo());
+ EmitStoreOfScalar(value, lvalue);
return std::pair<LValue,llvm::Value*>(lvalue, value);
}
void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
- const ObjCAutoreleasePoolStmt &ARPS) {
+ const ObjCAutoreleasePoolStmt &ARPS) {
const Stmt *subStmt = ARPS.getSubStmt();
const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
@@ -2526,7 +2738,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
llvm::FunctionType *extenderType
- = llvm::FunctionType::get(VoidTy, VoidPtrTy, /*variadic*/ false);
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
llvm::Value *extender
= llvm::InlineAsm::get(extenderType,
/* assembly */ "",
@@ -2537,4 +2749,226 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
Builder.CreateCall(extender, object)->setDoesNotThrow();
}
+/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
+/// non-trivial copy assignment function, produce following helper function.
+/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
+///
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+ if (hasTrivialSetExpr(PID))
+ return 0;
+ assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
+ if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
+ return HelperFn;
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__assign_helper_atomic_property_",
+ &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ Expr *Args[2] = { &DST, &SRC };
+ CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
+ CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
+ Args, 2, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation());
+
+ EmitStmt(&TheCall);
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ QualType Ty = PD->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+
+ if (hasTrivialGetExpr(PID))
+ return 0;
+ assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
+ if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
+ return HelperFn;
+
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_atomic_property_", &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ CXXConstructExpr *CXXConstExpr =
+ cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
+
+ SmallVector<Expr*, 4> ConstructorArgs;
+ ConstructorArgs.push_back(&SRC);
+ CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
+ ++A;
+
+ for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
+ A != AEnd; ++A)
+ ConstructorArgs.push_back(*A);
+
+ CXXConstructExpr *TheCXXConstructExpr =
+ CXXConstructExpr::Create(C, Ty, SourceLocation(),
+ CXXConstExpr->getConstructor(),
+ CXXConstExpr->isElidable(),
+ &ConstructorArgs[0], ConstructorArgs.size(),
+ CXXConstExpr->hadMultipleCandidates(),
+ CXXConstExpr->isListInitialization(),
+ CXXConstExpr->requiresZeroInitialization(),
+ CXXConstExpr->getConstructionKind(),
+ SourceRange());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+
+ RValue DV = EmitAnyExpr(&DstExpr);
+ CharUnits Alignment
+ = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ EmitAggExpr(TheCXXConstructExpr,
+ AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Value *
+CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
+ // Get selectors for retain/autorelease.
+ IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ Selector CopySelector =
+ getContext().Selectors.getNullarySelector(CopyID);
+ IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ Selector AutoreleaseSelector =
+ getContext().Selectors.getNullarySelector(AutoreleaseID);
+
+ // Emit calls to retain/autorelease.
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Val = Block;
+ RValue Result;
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, CopySelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, AutoreleaseSelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ return Val;
+}
+
+
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index 688e992..db0bd95 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -18,7 +18,6 @@
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
#include "CGCleanup.h"
-
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -104,8 +103,6 @@ class LazyRuntimeFunction {
/// GNUstep).
class CGObjCGNU : public CGObjCRuntime {
protected:
- /// The module that is using this class
- CodeGenModule &CGM;
/// The LLVM module into which output is inserted
llvm::Module &TheModule;
/// strut objc_super. Used for sending messages to super. This structure
@@ -187,7 +184,7 @@ protected:
std::string name = prefix + Str;
llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
if (!ConstStr) {
- llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
}
@@ -288,6 +285,10 @@ private:
LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
WeakAssignFn, GlobalAssignFn;
+ typedef std::pair<std::string, std::string> ClassAliasPair;
+ /// All classes that have aliases set for them.
+ std::vector<ClassAliasPair> ClassAliases;
+
protected:
/// Function used for throwing Objective-C exceptions.
LazyRuntimeFunction ExceptionThrowFn;
@@ -336,10 +337,9 @@ private:
/// containing a size and an array of structures containing instance variable
/// metadata. This is used purely for introspection in the fragile ABI. In
/// the non-fragile ABI, it's used for instance variable fixup.
- llvm::Constant *GenerateIvarList(
- const SmallVectorImpl<llvm::Constant *> &IvarNames,
- const SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets);
/// Generates a method list structure. This is a structure containing a size
/// and an array of structures containing method metadata.
///
@@ -347,8 +347,8 @@ private:
/// pointer allowing them to be chained together in a linked list.
llvm::Constant *GenerateMethodList(const StringRef &ClassName,
const StringRef &CategoryName,
- const SmallVectorImpl<Selector> &MethodSels,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
/// Emits an empty protocol. This is used for @protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
@@ -361,8 +361,7 @@ private:
SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
/// Generates a list of referenced protocols. Classes, categories, and
/// protocols all use this structure.
- llvm::Constant *GenerateProtocolList(
- const SmallVectorImpl<std::string> &Protocols);
+ llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols);
/// To ensure that all protocols are seen by the runtime, we add a category on
/// a class defined in the runtime, declaring no methods, but adopting the
/// protocols. This is a horribly ugly hack, but it allows us to collect all
@@ -387,8 +386,8 @@ private:
/// Generates a method list. This is used by protocols to define the required
/// and optional methods.
llvm::Constant *GenerateProtocolMethodList(
- const SmallVectorImpl<llvm::Constant *> &MethodNames,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes);
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
@@ -427,7 +426,7 @@ protected:
/// significant bit being assumed to come first in the bitfield. Therefore,
/// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
/// while a bitfield / with the 63rd bit set will be 1<<64.
- llvm::Constant *MakeBitField(llvm::SmallVectorImpl<bool> &bits);
+ llvm::Constant *MakeBitField(ArrayRef<bool> bits);
public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion);
@@ -466,13 +465,17 @@ public:
const ObjCContainerDecl *CD);
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD);
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
virtual llvm::Function *ModuleInitFunction();
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
virtual llvm::Constant *GetGetStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
@@ -539,8 +542,8 @@ protected:
llvm::MDNode *node) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *args[] = {
- EnforceType(Builder, Receiver, IdTy),
- EnforceType(Builder, cmd, SelectorTy) };
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
imp->setMetadata(msgSendMDKind, node);
return imp.getInstruction();
@@ -599,9 +602,9 @@ class CGObjCGNUstep : public CGObjCGNU {
LookupFn->setDoesNotCapture(1);
llvm::Value *args[] = {
- EnforceType(Builder, ReceiverPtr, PtrToIdTy),
- EnforceType(Builder, cmd, SelectorTy),
- EnforceType(Builder, self, IdTy) };
+ EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy) };
llvm::CallSite slot = CGF.EmitCallOrInvoke(LookupFn, args);
slot.setOnlyReadsMemory();
slot->setMetadata(msgSendMDKind, node);
@@ -638,7 +641,7 @@ class CGObjCGNUstep : public CGObjCGNU {
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy, NULL);
// If we're in ObjC++ mode, then we want to make
- if (CGM.getLangOptions().CPlusPlus) {
+ if (CGM.getLangOpts().CPlusPlus) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
@@ -683,9 +686,9 @@ static std::string SymbolNameForMethod(const StringRef &ClassName,
CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion)
- : CGM(cgm), TheModule(CGM.getModule()), VMContext(cgm.getLLVMContext()),
- ClassPtrAlias(0), MetaClassPtrAlias(0), RuntimeVersion(runtimeABIVersion),
- ProtocolVersion(protocolClassVersion) {
+ : CGObjCRuntime(cgm), TheModule(CGM.getModule()),
+ VMContext(cgm.getLLVMContext()), ClassPtrAlias(0), MetaClassPtrAlias(0),
+ RuntimeVersion(runtimeABIVersion), ProtocolVersion(protocolClassVersion) {
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
@@ -770,7 +773,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
true));
- const LangOptions &Opts = CGM.getLangOptions();
+ const LangOptions &Opts = CGM.getLangOpts();
if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
RuntimeVersion = 10;
@@ -879,14 +882,14 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
- if (!CGM.getLangOptions().CPlusPlus) {
+ if (!CGM.getLangOpts().CPlusPlus) {
if (T->isObjCIdType()
|| T->isObjCQualifiedIdType()) {
// With the old ABI, there was only one kind of catchall, which broke
// foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
// a pointer indicating object catchalls, and NULL to indicate real
// catchalls
- if (CGM.getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
return MakeConstantString("@id");
} else {
return 0;
@@ -930,7 +933,8 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
// Return the existing typeinfo if it exists
llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
- if (typeinfo) return typeinfo;
+ if (typeinfo)
+ return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
// Otherwise create it.
@@ -970,12 +974,27 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
if (old != ObjCStrings.end())
return old->getValue();
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ std::string Sym = "_OBJC_CLASS_";
+ Sym += StringClass;
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalWeakLinkage, 0, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
std::vector<llvm::Constant*> Ivars;
- Ivars.push_back(NULLPtr);
+ Ivars.push_back(isa);
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
@@ -998,7 +1017,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method) {
CGBuilderTy &Builder = CGF.Builder;
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -1017,9 +1036,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
llvm::Value *ReceiverClass = 0;
if (isCategoryImpl) {
@@ -1072,12 +1089,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
- llvm::FunctionType *impType =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
// Get the IMP
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
- imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Value *impMD[] = {
llvm::MDString::get(VMContext, Sel.getAsString()),
@@ -1087,8 +1102,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
- 0, &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
}
@@ -1106,7 +1120,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
// Strip out message sends to retain / release in GC mode
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -1161,27 +1175,46 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
- // Get the IMP to call
- llvm::Value *imp = LookupIMP(CGF, Receiver, cmd, node);
-
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
- llvm::FunctionType *impType =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
- imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ // Get the IMP to call
+ llvm::Value *imp;
+
+ // If we have non-legacy dispatch specified, we try using the objc_msgSend()
+ // functions. These are not supported on all platforms (or all runtimes on a
+ // given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ imp = LookupIMP(CGF, Receiver, cmd, node);
+ break;
+ case CodeGenOptions::Mixed:
+ case CodeGenOptions::NonLegacy:
+ if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret");
+ } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret");
+ } else {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend");
+ }
+ }
+
+ // Reset the receiver in case the lookup modified it
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
- // For sender-aware dispatch, we pass the sender as the third argument to a
- // lookup function. When sending messages from C code, the sender is nil.
- // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
0, &call);
call->setMetadata(msgSendMDKind, node);
@@ -1225,11 +1258,12 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
-llvm::Constant *CGObjCGNU::GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
- const SmallVectorImpl<Selector> &MethodSels,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes,
- bool isClassMethodList) {
+llvm::Constant *CGObjCGNU::
+GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
// Get the method structure type.
@@ -1282,10 +1316,10 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const StringRef &ClassName,
}
/// Generates an IvarList. Used in construction of a objc_class.
-llvm::Constant *CGObjCGNU::GenerateIvarList(
- const SmallVectorImpl<llvm::Constant *> &IvarNames,
- const SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+llvm::Constant *CGObjCGNU::
+GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets) {
if (IvarNames.size() == 0)
return NULLPtr;
// Get the method structure type.
@@ -1345,7 +1379,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// anyway; the classes will still work with the GNU runtime, they will just
// be ignored.
llvm::StructType *ClassTy = llvm::StructType::get(
- PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // isa
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
LongTy, // version
@@ -1396,14 +1430,25 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(WeakIvarBitmap);
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
- // messages in the next ABI.
- return MakeGlobal(ClassTy, Elements, (isMeta ? "_OBJC_METACLASS_":
- "_OBJC_CLASS_") + std::string(Name), llvm::GlobalValue::ExternalLinkage);
+ // messages in the next ABI. We may already have some weak references to
+ // this, so check and fix them properly.
+ std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
+ std::string(Name));
+ llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
+ llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
+ if (ClassRef) {
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->getType()));
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
+ }
+ return Class;
}
-llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
- const SmallVectorImpl<llvm::Constant *> &MethodNames,
- const SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+llvm::Constant *CGObjCGNU::
+GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes) {
// Get the method structure type.
llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
@@ -1430,8 +1475,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
}
// Create the protocol list structure used in classes, categories and so on
-llvm::Constant *CGObjCGNU::GenerateProtocolList(
- const SmallVectorImpl<std::string> &Protocols) {
+llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(
@@ -1506,6 +1550,11 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
ASTContext &Context = CGM.getContext();
std::string ProtocolName = PD->getNameAsString();
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
SmallVector<std::string, 16> Protocols;
for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
E = PD->protocol_end(); PI != E; ++PI)
@@ -1723,7 +1772,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
/// significant bit being assumed to come first in the bitfield. Therefore, a
/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
/// bitfield / with the 63rd bit set will be 1<<64.
-llvm::Constant *CGObjCGNU::MakeBitField(llvm::SmallVectorImpl<bool> &bits) {
+llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
int bitCount = bits.size();
int ptrBits =
(TheModule.getPointerSize() == llvm::Module::Pointer32) ? 32 : 64;
@@ -1881,6 +1930,15 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
".objc_property_list");
}
+void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
+ // Get the class declaration for which the alias is specified.
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ std::string AliasName = OAD->getNameAsString();
+ ClassAliases.push_back(ClassAliasPair(ClassName,AliasName));
+}
+
void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ASTContext &Context = CGM.getContext();
@@ -1926,7 +1984,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
@@ -1941,7 +1999,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the offset
uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
- if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
Offset = BaseOffset - superInstanceSize;
}
llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
@@ -2013,9 +2071,9 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
}
// Collect the names of referenced protocols
SmallVector<std::string, 16> Protocols;
- const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
- E = Protos.end(); I != E; ++I)
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end(); I != E; ++I)
Protocols.push_back((*I)->getNameAsString());
@@ -2063,15 +2121,16 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Get the existing variable, if one exists.
llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
if (offset) {
- offset->setInitializer(offsetValue);
- // If this is the real definition, change its linkage type so that
- // different modules will use this one, rather than their private
- // copy.
- offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
} else {
- // Add a new alias if there isn't one already.
- offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
- false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ (void) offset; // Silence dead store warning.
}
++ivarIndex;
}
@@ -2135,7 +2194,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
ConstantStrings.size() + 1);
ConstantStrings.push_back(NULLPtr);
- StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
if (StringClass.empty()) StringClass = "NXConstantString";
@@ -2263,12 +2322,12 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(SymTab);
if (RuntimeVersion >= 10)
- switch (CGM.getLangOptions().getGC()) {
+ switch (CGM.getLangOpts().getGC()) {
case LangOptions::GCOnly:
Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
break;
case LangOptions::NonGC:
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
else
Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
@@ -2296,6 +2355,46 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::PointerType::getUnqual(ModuleTy), true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
+
+ if (!ClassAliases.empty()) {
+ llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
+ llvm::FunctionType *RegisterAliasTy =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ ArgTypes, false);
+ llvm::Function *RegisterAlias = llvm::Function::Create(
+ RegisterAliasTy,
+ llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np",
+ &TheModule);
+ llvm::BasicBlock *AliasBB =
+ llvm::BasicBlock::Create(VMContext, "alias", LoadFunction);
+ llvm::BasicBlock *NoAliasBB =
+ llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction);
+
+ // Branch based on whether the runtime provided class_registerAlias_np()
+ llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias,
+ llvm::Constant::getNullValue(RegisterAlias->getType()));
+ Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
+
+ // The true branch (has alias registration fucntion):
+ Builder.SetInsertPoint(AliasBB);
+ // Emit alias registration calls:
+ for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
+ iter != ClassAliases.end(); ++iter) {
+ llvm::Constant *TheClass =
+ TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
+ true);
+ if (0 != TheClass) {
+ TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
+ Builder.CreateCall2(RegisterAlias, TheClass,
+ MakeConstantString(iter->second));
+ }
+ }
+ // Jump to end:
+ Builder.CreateBr(NoAliasBB);
+
+ // Missing alias registration function, just return from the function:
+ Builder.SetInsertPoint(NoAliasBB);
+ }
Builder.CreateRetVoid();
return LoadFunction;
@@ -2312,7 +2411,7 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
- Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
@@ -2332,12 +2431,20 @@ llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
return SetPropertyFn;
}
+llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return 0;
+}
+
llvm::Constant *CGObjCGNU::GetGetStructFunction() {
return GetStructPropertyFn;
}
llvm::Constant *CGObjCGNU::GetSetStructFunction() {
return SetStructPropertyFn;
}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectFunction() {
+ return 0;
+}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
return EnumerationMutationFn;
@@ -2487,7 +2594,7 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// to replace it with the real version for a library. In non-PIC code you
// must compile with the fragile ABI if you want to use ivars from a
// GCC-compiled class.
- if (CGM.getLangOptions().PICLevel) {
+ if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) {
llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
Int32Ty, false,
llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
@@ -2533,7 +2640,7 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
- if (CGM.getLangOptions().ObjCNonFragileABI) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
if (RuntimeVersion < 10)
return CGF.Builder.CreateZExtOrBitCast(
@@ -2547,7 +2654,10 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
Offset = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::LinkOnceAnyLinkage,
llvm::Constant::getNullValue(IntTy), name);
- return CGF.Builder.CreateLoad(Offset);
+ Offset = CGF.Builder.CreateLoad(Offset);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
@@ -2555,7 +2665,7 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGObjCRuntime *
clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
- if (CGM.getLangOptions().ObjCNonFragileABI)
+ if (CGM.getLangOpts().ObjCNonFragileABI)
return new CGObjCGNUstep(CGM);
return new CGObjCGCC(CGM);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index 308e0c7..e5246f1 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -42,11 +42,8 @@
using namespace clang;
using namespace CodeGen;
-
namespace {
-typedef std::vector<llvm::Constant*> ConstantVector;
-
// FIXME: We should find a nicer way to make the labels for metadata, string
// concatenation is lame.
@@ -92,13 +89,28 @@ private:
/// would be unbalanced.
llvm::Constant *getMessageSendFpretFn() const {
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::getDoubleTy(VMContext),
- params, true),
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
+ params, true),
"objc_msgSend_fpret");
}
+ /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned in two values on the
+ /// x87 floating point stack; without a special entrypoint, the nil case
+ /// would be unbalanced. Only used on 64-bit X86.
+ llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
+ llvm::Type *resultType =
+ llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
+ params, true),
+ "objc_msgSend_fp2ret");
+ }
+
/// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
///
/// The messenger used for super calls, which have different dispatch
@@ -159,7 +171,7 @@ protected:
public:
llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- llvm::Type *Int8PtrTy;
+ llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
llvm::Type *ObjectPtrTy;
@@ -169,10 +181,26 @@ public:
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
llvm::Type *SelectorPtrTy;
+
+private:
/// ProtocolPtrTy - LLVM type for external protocol handles
/// (typeof(Protocol))
llvm::Type *ExternalProtocolPtrTy;
-
+
+public:
+ llvm::Type *getExternalProtocolPtrTy() {
+ if (!ExternalProtocolPtrTy) {
+ // FIXME: It would be nice to unify this with the opaque type, so that the
+ // IR comes out a bit cleaner.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+ }
+
+ return ExternalProtocolPtrTy;
+ }
+
// SuperCTy - clang type for struct objc_super.
QualType SuperCTy;
// SuperPtrCTy - clang type for struct objc_super *.
@@ -213,9 +241,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -233,12 +261,47 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
+ llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty_atomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_atomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ const char *name;
+ if (atomic && copy)
+ name = "objc_setProperty_atomic_copy";
+ else if (atomic && !copy)
+ name = "objc_setProperty_atomic";
+ else if (!atomic && copy)
+ name = "objc_setProperty_nonatomic_copy";
+ else
+ name = "objc_setProperty_nonatomic";
+
+ return CGM.CreateRuntimeFunction(FTy, name);
+ }
llvm::Constant *getCopyStructFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -251,12 +314,31 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
+ /// This routine declares and returns address of:
+ /// void objc_copyCppObjectAtomic(
+ /// void *dest, const void *src,
+ /// void (*copyHelper) (void *dest, const void *source));
+ llvm::Constant *getCppAtomicObjectFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
+ SmallVector<CanQualType,3> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
+ }
+
llvm::Constant *getEnumerationMutationFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -264,9 +346,9 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- FunctionType::ExtInfo()),
- false);
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
@@ -391,6 +473,14 @@ public:
return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
}
+ llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
+ }
+
+ llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
+ }
+
ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCCommonTypesHelper(){}
};
@@ -492,7 +582,8 @@ public:
llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
- "_setjmp");
+ "_setjmp",
+ llvm::Attribute::ReturnsTwice);
}
public:
@@ -663,7 +754,6 @@ public:
};
protected:
- CodeGen::CodeGenModule &CGM;
llvm::LLVMContext &VMContext;
// FIXME! May not be needing this after all.
unsigned ObjCABI;
@@ -718,16 +808,16 @@ protected:
llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
/// DefinedClasses - List of defined classes.
- std::vector<llvm::GlobalValue*> DefinedClasses;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
/// DefinedNonLazyClasses - List of defined "non-lazy" classes.
- std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
/// DefinedCategories - List of defined categories.
- std::vector<llvm::GlobalValue*> DefinedCategories;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
- std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
/// GetNameForMethod - Return a name for the given method.
/// \param[out] NameOut - The return value.
@@ -741,10 +831,11 @@ protected:
llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
/// GetMethodVarType - Return a unique constant for the given
- /// selector's name. The return value has type char *.
+ /// method's type encoding string. The return value has type char *.
// FIXME: This is a horrible name.
- llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended = false);
llvm::Constant *GetMethodVarType(const FieldDecl *D);
/// GetPropertyName - Return a unique constant for the given
@@ -775,7 +866,7 @@ protected:
void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const SmallVectorImpl<const FieldDecl*> &RecFields,
+ ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion);
@@ -791,12 +882,19 @@ protected:
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes);
+ /// EmitProtocolMethodTypes - Generate the array of extended method type
+ /// strings. The return value has type Int8PtrPtrTy.
+ llvm::Constant *EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
/// PushProtocolProperties - Push protocol's property on the input stack.
- void PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- std::vector<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *PROTO,
- const ObjCCommonTypesHelper &ObjCTypes);
+ void PushProtocolProperties(
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
/// GetProtocolRef - Return a reference to the internal protocol
/// description, creating an empty one if it has not been
@@ -840,10 +938,10 @@ protected:
public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
- CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+ CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
-
+
virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=0);
@@ -915,7 +1013,7 @@ private:
/// given implementation. The return value has type ClassPtrTy.
llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
@@ -925,7 +1023,7 @@ private:
/// implementation. The return value has type MethodListPtrTy.
llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// EmitMethodDescList - Emit a method description list for a list of
/// method declarations.
@@ -940,7 +1038,7 @@ private:
/// The return value has type MethodDescriptionListPtrTy.
llvm::Constant *EmitMethodDescList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
@@ -959,8 +1057,9 @@ private:
/// ProtocolExtensionPtrTy.
llvm::Constant *
EmitProtocolExtension(const ObjCProtocolDecl *PD,
- const ConstantVector &OptInstanceMethods,
- const ConstantVector &OptClassMethods);
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt);
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
@@ -1016,13 +1115,18 @@ public:
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
virtual llvm::Constant *GetGetStructFunction();
virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -1096,7 +1200,7 @@ private:
/// AddModuleClassList - Add the given list of class pointers to the
/// module with the provided symbol and section names.
- void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
const char *SymbolName,
const char *SectionName);
@@ -1118,7 +1222,7 @@ private:
/// implementation. The return value has type MethodListnfABITy.
llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods);
+ ArrayRef<llvm::Constant*> Methods);
/// EmitIvarList - Emit the ivar list for the given
/// implementation. If ForClass is true the list of class ivars
/// (i.e. metaclass ivars) is emitted, otherwise the list of
@@ -1267,6 +1371,9 @@ public:
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
@@ -1279,12 +1386,20 @@ public:
return ObjCTypes.getSetPropertyFn();
}
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+ }
+
virtual llvm::Constant *GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
virtual llvm::Constant *GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
+ virtual llvm::Constant *GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
virtual llvm::Constant *EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
@@ -1325,13 +1440,13 @@ public:
/// value.
struct NullReturnState {
llvm::BasicBlock *NullBB;
-
- NullReturnState() : NullBB(0) {}
+ llvm::BasicBlock *callBB;
+ NullReturnState() : NullBB(0), callBB(0) {}
void init(CodeGenFunction &CGF, llvm::Value *receiver) {
// Make blocks for the null-init and call edges.
NullBB = CGF.createBasicBlock("msgSend.nullinit");
- llvm::BasicBlock *callBB = CGF.createBasicBlock("msgSend.call");
+ callBB = CGF.createBasicBlock("msgSend.call");
// Check for a null receiver and, if there is one, jump to the
// null-init test.
@@ -1342,8 +1457,16 @@ struct NullReturnState {
CGF.EmitBlock(callBB);
}
- RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType) {
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
if (!NullBB) return result;
+
+ llvm::Value *NullInitPtr = 0;
+ if (result.isScalar() && !resultType->isVoidType()) {
+ NullInitPtr = CGF.CreateTempAlloca(result.getScalarVal()->getType());
+ CGF.Builder.CreateStore(result.getScalarVal(), NullInitPtr);
+ }
// Finish the call path.
llvm::BasicBlock *contBB = CGF.createBasicBlock("msgSend.cont");
@@ -1351,13 +1474,54 @@ struct NullReturnState {
// Emit the null-init block and perform the null-initialization there.
CGF.EmitBlock(NullBB);
- assert(result.isAggregate() && "null init of non-aggregate result?");
- CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+
+ // Release consumed arguments along the null-receiver path.
+ if (Method) {
+ CallArgList::const_iterator I = CallArgs.begin();
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i, ++I) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->RV;
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), true);
+ }
+ }
+ }
+
+ if (result.isScalar()) {
+ if (NullInitPtr)
+ CGF.EmitNullInitialization(NullInitPtr, resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return NullInitPtr ? RValue::get(CGF.Builder.CreateLoad(NullInitPtr))
+ : result;
+ }
+
+ if (!resultType->isAnyComplexType()) {
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return result;
+ }
- // Jump to the continuation block.
+ // _Complex type
+ // FIXME. Now easy to handle any other scalar type whose result is returned
+ // in memory due to ABI limitations.
CGF.EmitBlock(contBB);
-
- return result;
+ CodeGenFunction::ComplexPairTy CallCV = result.getComplexVal();
+ llvm::Type *MemberType = CallCV.first->getType();
+ llvm::Constant *ZeroCV = llvm::Constant::getNullValue(MemberType);
+ // Create phi instruction for scalar complex value.
+ llvm::PHINode *PHIReal = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIReal->addIncoming(ZeroCV, NullBB);
+ PHIReal->addIncoming(CallCV.first, callBB);
+ llvm::PHINode *PHIImag = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIImag->addIncoming(ZeroCV, NullBB);
+ PHIImag->addIncoming(CallCV.second, callBB);
+ return RValue::getComplex(PHIReal, PHIImag);
}
};
@@ -1428,7 +1592,6 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
- return 0;
}
/// Generate a constant CFString object.
@@ -1452,11 +1615,15 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
llvm::Constant *CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
- return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
+ return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
CGM.GetAddrOfConstantCFString(SL) :
CGM.GetAddrOfConstantString(SL));
}
+enum {
+ kCFTaggedObjectID_Integer = (1 << 1) + 1
+};
+
/// Generates a message send where the super is the receiver. This is
/// a message send to self with special delivery semantics indicating
/// which class's method should be called.
@@ -1553,11 +1720,8 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- FunctionType::ExtInfo());
- llvm::FunctionType *FTy =
- Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+ // If we're calling a method, use the formal signature.
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
if (Method)
assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
@@ -1567,20 +1731,38 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
NullReturnState nullReturn;
llvm::Constant *Fn = NULL;
- if (CGM.ReturnTypeUsesSRet(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
if (!IsSuper) nullReturn.init(CGF, Arg0);
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
} else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
: ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
+ : ObjCTypes.getSendFp2retFn(IsSuper);
} else {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
- Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
- RValue rvalue = CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
- return nullReturn.complete(CGF, rvalue, ResultType);
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && Method)
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, Arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs);
+ return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
+ requiresnullCheck ? Method : 0);
}
static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
@@ -1590,6 +1772,10 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
return Qualifiers::Weak;
+ // check for __unsafe_unretained
+ if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
+ return Qualifiers::GCNone;
+
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
return Qualifiers::Strong;
@@ -1601,11 +1787,10 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
- llvm::Constant *nullPtr =
- llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
- !CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
@@ -1673,10 +1858,10 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
- if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n block variable layout for block: ");
const unsigned char *s = (unsigned char*)BitMap.c_str();
- for (unsigned i = 0; i < BitMap.size(); i++)
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
else
@@ -1694,7 +1879,7 @@ llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
- ObjCTypes.ExternalProtocolPtrTy);
+ ObjCTypes.getExternalProtocolPtrTy());
}
void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1729,12 +1914,16 @@ struct _objc__method_prototype_list *class_methods
See EmitProtocolExtension().
*/
llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
- llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
// Early exit if a defining object has already been generated.
if (Entry && Entry->hasInitializer())
return Entry;
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
// FIXME: I don't understand why gcc generates this, or where it is
// resolved. Investigate. Its also wasteful to look this up over and over.
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
@@ -1742,6 +1931,7 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
// Construct method lists.
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
for (ObjCProtocolDecl::instmeth_iterator
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
@@ -1751,8 +1941,10 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
@@ -1765,26 +1957,30 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
- std::vector<llvm::Constant*> Values(5);
- Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
- Values[1] = GetClassName(PD->getIdentifier());
- Values[2] =
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[] = {
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getIdentifier()),
EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
PD->protocol_begin(),
- PD->protocol_end());
- Values[3] =
+ PD->protocol_end()),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
"__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[4] =
+ InstanceMethods),
EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
"__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+ ClassMethods)
+ };
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values);
@@ -1801,6 +1997,8 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
+
+ Protocols[PD->getIdentifier()] = Entry;
}
CGM.AddUsedGlobal(Entry);
@@ -1833,31 +2031,34 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
struct objc_method_description_list *optional_instance_methods;
struct objc_method_description_list *optional_class_methods;
struct objc_property_list *instance_properties;
+ const char ** extendedMethodTypes;
};
*/
llvm::Constant *
CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
- const ConstantVector &OptInstanceMethods,
- const ConstantVector &OptClassMethods) {
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt) {
uint64_t Size =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
- std::vector<llvm::Constant*> Values(4);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[1] =
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ PD->getName(),
"__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods);
- Values[2] =
+ OptInstanceMethods),
EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
"__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods);
- Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(),
- 0, PD, ObjCTypes);
+ OptClassMethods),
+ EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), 0, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)
+ };
// Return null if no extension bits are used.
if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
- Values[3]->isNullValue())
+ Values[3]->isNullValue() && Values[4]->isNullValue())
return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
llvm::Constant *Init =
@@ -1871,16 +2072,16 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
/*
struct objc_protocol_list {
- struct objc_protocol_list *next;
- long count;
- Protocol *list[];
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
};
*/
llvm::Constant *
CGObjCMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- std::vector<llvm::Constant*> ProtocolRefs;
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
for (; begin != end; ++begin)
ProtocolRefs.push_back(GetProtocolRef(*begin));
@@ -1909,12 +2110,12 @@ CGObjCMac::EmitProtocolList(Twine Name,
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
-void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- std::vector<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *PROTO,
- const ObjCCommonTypesHelper &ObjCTypes) {
- std::vector<llvm::Constant*> Prop(2);
+void CGObjCCommonMac::
+PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
E = PROTO->protocol_end(); P != E; ++P)
PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
@@ -1923,36 +2124,40 @@ void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierI
const ObjCPropertyDecl *PD = *I;
if (!PropertySet.insert(PD->getIdentifier()))
continue;
- Prop[0] = GetPropertyName(PD->getIdentifier());
- Prop[1] = GetPropertyTypeString(PD, Container);
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
}
}
/*
struct _objc_property {
- const char * const name;
- const char * const attributes;
+ const char * const name;
+ const char * const attributes;
};
struct _objc_property_list {
- uint32_t entsize; // sizeof (struct _objc_property)
- uint32_t prop_count;
- struct _objc_property[prop_count];
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
};
*/
llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
const Decl *Container,
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes) {
- std::vector<llvm::Constant*> Properties, Prop(2);
+ llvm::SmallVector<llvm::Constant*, 16> Properties;
llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
E = OCD->prop_end(); I != E; ++I) {
const ObjCPropertyDecl *PD = *I;
PropertySet.insert(PD->getIdentifier());
- Prop[0] = GetPropertyName(PD->getIdentifier());
- Prop[1] = GetPropertyTypeString(PD, Container);
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
Prop));
}
@@ -1993,6 +2198,26 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
+llvm::Constant *
+CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ // Return null for empty list.
+ if (MethodTypes.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy);
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ MethodTypes.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" : 0,
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+}
+
/*
struct objc_method_description_list {
int count;
@@ -2001,11 +2226,11 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
*/
llvm::Constant *
CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- std::vector<llvm::Constant*> Desc(2);
- Desc[0] =
+ llvm::Constant *Desc[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Desc[1] = GetMethodVarType(MD);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD)
+ };
if (!Desc[1])
return 0;
@@ -2013,9 +2238,9 @@ CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
Desc);
}
-llvm::Constant *CGObjCMac::EmitMethodDescList(Twine Name,
- const char *Section,
- const ConstantVector &Methods) {
+llvm::Constant *
+CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
@@ -2054,11 +2279,11 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
- llvm::SmallString<256> ExtName;
+ SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
<< OCD->getName();
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCCategoryImplDecl::instmeth_iterator
i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
// Instance methods should always be defined.
@@ -2164,7 +2389,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
for (ObjCImplementationDecl::instmeth_iterator
i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
// Instance methods should always be defined.
@@ -2244,7 +2469,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- const ConstantVector &Methods) {
+ ArrayRef<llvm::Constant*> Methods) {
unsigned Flags = eClassFlags_Meta;
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
@@ -2384,19 +2609,19 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
/*
struct objc_ivar {
- char *ivar_name;
- char *ivar_type;
- int ivar_offset;
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
};
struct objc_ivar_list {
- int ivar_count;
- struct objc_ivar list[count];
+ int ivar_count;
+ struct objc_ivar list[count];
};
*/
llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
bool ForClass) {
- std::vector<llvm::Constant*> Ivars, Ivar(3);
+ std::vector<llvm::Constant*> Ivars;
// When emitting the root class GCC emits ivar entries for the
// actual class structure. It is not clear if we need to follow this
@@ -2413,10 +2638,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- Ivar[0] = GetMethodVarName(IVD->getIdentifier());
- Ivar[1] = GetMethodVarType(IVD);
- Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
- ComputeIvarBaseOffset(CGM, OID, IVD));
+ llvm::Constant *Ivar[] = {
+ GetMethodVarName(IVD->getIdentifier()),
+ GetMethodVarType(IVD),
+ llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD))
+ };
Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
}
@@ -2465,18 +2692,18 @@ llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
if (!Fn)
return 0;
- std::vector<llvm::Constant*> Method(3);
- Method[0] =
+ llvm::Constant *Method[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Method[1] = GetMethodVarType(MD);
- Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
}
llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
const char *Section,
- const ConstantVector &Methods) {
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
@@ -2495,12 +2722,12 @@ llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
GetNameForMethod(OMD, CD, Name);
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
- Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
llvm::Function *Method =
llvm::Function::Create(MethodTy,
llvm::GlobalValue::InternalLinkage,
@@ -2544,6 +2771,11 @@ llvm::Constant *CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
+llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+}
+
llvm::Constant *CGObjCMac::GetGetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
@@ -2551,6 +2783,10 @@ llvm::Constant *CGObjCMac::GetSetStructFunction() {
return ObjCTypes.getCopyStructFn();
}
+llvm::Constant *CGObjCMac::GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+
llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -2959,6 +3195,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::CallInst *SetJmpResult =
CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
// If setjmp returned 0, enter the protected block; otherwise,
// branch to the handler.
@@ -3024,6 +3261,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
"setjmp.result");
SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
llvm::Value *Threw =
CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
@@ -3379,41 +3617,48 @@ enum ImageInfoFlags {
void CGObjCCommonMac::EmitImageInfo() {
unsigned version = 0; // Version is unused?
- unsigned flags = 0;
-
- // FIXME: Fix and continue?
- if (CGM.getLangOptions().getGC() != LangOptions::NonGC)
- flags |= eImageInfo_GarbageCollected;
- if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
- flags |= eImageInfo_GCOnly;
-
- // We never allow @synthesize of a superclass property.
- flags |= eImageInfo_CorrectedSynthesize;
-
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
-
- // Emitted as int[2];
- llvm::Constant *values[2] = {
- llvm::ConstantInt::get(Int32Ty, version),
- llvm::ConstantInt::get(Int32Ty, flags)
- };
- llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2);
-
- const char *Section;
- if (ObjCABI == 1)
- Section = "__OBJC, __image_info,regular";
- else
- Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
- llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
- llvm::ConstantArray::get(AT, values),
- Section,
- 0,
- true);
- GV->setConstant(true);
+ const char *Section = (ObjCABI == 1) ?
+ "__OBJC, __image_info,regular" :
+ "__DATA, __objc_imageinfo, regular, no_dead_strip";
+
+ // Generate module-level named metadata to convey this information to the
+ // linker and code-gen.
+ llvm::Module &Mod = CGM.getModule();
+
+ // Add the ObjC ABI version to the module flags.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version",
+ version);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
+ llvm::MDString::get(VMContext,Section));
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // Non-GC overrides those files which specify GC.
+ Mod.addModuleFlag(llvm::Module::Override,
+ "Objective-C Garbage Collection", (uint32_t)0);
+ } else {
+ // Add the ObjC garbage collection value.
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ eImageInfo_GarbageCollected);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // Add the ObjC GC Only value.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only",
+ eImageInfo_GCOnly);
+
+ // Require that GC be specified and set to eImageInfo_GarbageCollected.
+ llvm::Value *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ eImageInfo_GarbageCollected)
+ };
+ Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ }
}
-
// struct objc_module {
// unsigned long version;
// unsigned long size;
@@ -3427,12 +3672,13 @@ static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
- std::vector<llvm::Constant*> Values(4);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
- // This used to be the filename, now it is unused. <rdr://4327263>
- Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
- Values[3] = EmitModuleSymbols();
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
+ llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ GetClassName(&CGM.getContext().Idents.get("")),
+ EmitModuleSymbols()
+ };
CreateMetadataVar("\01L_OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
"__OBJC,__module_info,regular,no_dead_strip",
@@ -3455,7 +3701,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
// The runtime expects exactly the list of defined classes followed
// by the list of defined categories, in a single array.
- std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
for (unsigned i=0; i<NumClasses; i++)
Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
ObjCTypes.Int8PtrTy);
@@ -3466,7 +3712,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
Values[4] =
llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- NumClasses + NumCategories),
+ Symbols.size()),
Symbols);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
@@ -3531,8 +3777,8 @@ llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantArray::get(VMContext,
- Ident->getNameStart()),
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
((ObjCABI == 2) ?
"__TEXT,__objc_classname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3575,7 +3821,7 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const SmallVectorImpl<const FieldDecl*> &RecFields,
+ ArrayRef<const FieldDecl*> RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion) {
bool IsUnion = (RD && RD->isUnion());
@@ -3593,7 +3839,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
return;
unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
- if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
+ if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
const FieldDecl *FirstField = RecFields[0];
FirstFieldDelta =
ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
@@ -3740,9 +3986,9 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
/// the given argument BitMap string container. Routine reads
/// two containers, IvarsInfo and SkipIvars which are assumed to be
/// filled already by the caller.
-llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
unsigned int WordsToScan, WordsToSkip;
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
// Build the string of skip/scan nibbles
SmallVector<SKIP_SCAN, 32> SkipScanIvars;
@@ -3857,7 +4103,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
llvm::GlobalVariable * Entry =
CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
((ObjCABI == 2) ?
"__TEXT,__objc_classname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3886,14 +4132,14 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool ForStrongLayout) {
bool hasUnion = false;
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
- !CGM.getLangOptions().ObjCAutoRefCount)
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
const ObjCInterfaceDecl *OI = OMD->getClassInterface();
SmallVector<const FieldDecl*, 32> RecFields;
- if (CGM.getLangOptions().ObjCAutoRefCount) {
+ if (CGM.getLangOpts().ObjCAutoRefCount) {
for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
RecFields.push_back(cast<FieldDecl>(IVD));
@@ -3925,12 +4171,12 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
std::string BitMap;
llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
- if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
OMD->getClassInterface()->getName().data());
const unsigned char *s = (unsigned char*)BitMap.c_str();
- for (unsigned i = 0; i < BitMap.size(); i++)
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
if (!(s[i] & 0xf0))
printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
else
@@ -3943,10 +4189,10 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
- // FIXME: Avoid std::string copying.
+ // FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
- llvm::ConstantArray::get(VMContext, Sel.getAsString()),
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
((ObjCABI == 2) ?
"__TEXT,__objc_methname,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3968,7 +4214,7 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantArray::get(VMContext, TypeStr),
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ?
"__TEXT,__objc_methtype,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -3977,18 +4223,17 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
return getConstantGEP(VMContext, Entry, 0, 0);
}
-llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended) {
std::string TypeStr;
- if (CGM.getContext().getObjCEncodingForMethodDecl(
- const_cast<ObjCMethodDecl*>(D),
- TypeStr))
+ if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
return 0;
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantArray::get(VMContext, TypeStr),
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
((ObjCABI == 2) ?
"__TEXT,__objc_methtype,cstring_literals" :
"__TEXT,__cstring,cstring_literals"),
@@ -4003,8 +4248,8 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
- llvm::ConstantArray::get(VMContext,
- Ident->getNameStart()),
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
"__TEXT,__cstring,cstring_literals",
1, true);
@@ -4030,7 +4275,7 @@ void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
<< '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- OS << '(' << CID << ')';
+ OS << '(' << *CID << ')';
OS << ' ' << D->getSelector().getAsString() << ']';
}
@@ -4044,7 +4289,7 @@ void CGObjCMac::FinishModule() {
if (I->second->hasInitializer())
continue;
- std::vector<llvm::Constant*> Values(5);
+ llvm::Constant *Values[5];
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
Values[1] = GetClassName(I->first);
Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
@@ -4062,7 +4307,7 @@ void CGObjCMac::FinishModule() {
//
// FIXME: It would be nice if we had an LLVM construct for this.
if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
- llvm::SmallString<256> Asm;
+ SmallString<256> Asm;
Asm += CGM.getModule().getModuleInlineAsm();
if (!Asm.empty() && Asm.back() != '\n')
Asm += '\n';
@@ -4077,7 +4322,7 @@ void CGObjCMac::FinishModule() {
OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
}
- for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) {
+ for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
<< "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
}
@@ -4096,7 +4341,8 @@ CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
/* *** */
ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
- : VMContext(cgm.getLLVMContext()), CGM(cgm) {
+ : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(0)
+{
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -4104,17 +4350,13 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
IntTy = Types.ConvertType(Ctx.IntTy);
LongTy = Types.ConvertType(Ctx.LongTy);
LongLongTy = Types.ConvertType(Ctx.LongLongTy);
- Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrPtrTy = CGM.Int8PtrPtrTy;
ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
- // FIXME: It would be nice to unify this with the opaque type, so that the IR
- // comes out a bit cleaner.
- llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
- ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
-
// I'm not sure I like this. The implicit coordination is a bit
// gross. We should solve this in a reasonable fashion because this
// is a pretty common task (match some runtime data structure with
@@ -4206,12 +4448,13 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_method_description_list *optional_instance_methods;
// struct _objc_method_description_list *optional_class_methods;
// struct _objc_property_list *instance_properties;
+ // const char ** extendedMethodTypes;
// }
ProtocolExtensionTy =
llvm::StructType::create("struct._objc_protocol_extension",
IntTy, MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy, PropertyListPtrTy,
- NULL);
+ Int8PtrPtrTy, NULL);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
@@ -4349,14 +4592,12 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
uint64_t SetJmpBufferSize = 18;
// Exceptions
- llvm::Type *StackPtrTy = llvm::ArrayType::get(
- llvm::Type::getInt8PtrTy(VMContext), 4);
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4);
ExceptionDataTy =
llvm::StructType::create("struct._objc_exception_data",
- llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
- StackPtrTy, NULL);
+ llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
+ StackPtrTy, NULL);
}
@@ -4384,6 +4625,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// const struct _prop_list_t * properties;
// const uint32_t size; // sizeof(struct _protocol_t)
// const uint32_t flags; // = 0
+ // const char ** extendedMethodTypes;
// }
// Holder for struct _protocol_list_t *
@@ -4395,7 +4637,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
llvm::PointerType::getUnqual(ProtocolListnfABITy),
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
- PropertyListPtrTy, IntTy, IntTy, NULL);
+ PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
+ NULL);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -4555,23 +4798,22 @@ llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
return NULL;
}
-void CGObjCNonFragileABIMac::AddModuleClassList(const
- std::vector<llvm::GlobalValue*>
- &Container,
- const char *SymbolName,
- const char *SectionName) {
+void CGObjCNonFragileABIMac::
+AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName) {
unsigned NumClasses = Container.size();
if (!NumClasses)
return;
- std::vector<llvm::Constant*> Symbols(NumClasses);
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
for (unsigned i=0; i<NumClasses; i++)
Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
ObjCTypes.Int8PtrTy);
- llvm::Constant* Init =
+ llvm::Constant *Init =
llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- NumClasses),
+ Symbols.size()),
Symbols);
llvm::GlobalVariable *GV =
@@ -4593,14 +4835,14 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
"\01L_OBJC_LABEL_CLASS_$",
"__DATA, __objc_classlist, regular, no_dead_strip");
- for (unsigned i = 0; i < DefinedClasses.size(); i++) {
+ for (unsigned i = 0, e = DefinedClasses.size(); i < e; i++) {
llvm::GlobalValue *IMPLGV = DefinedClasses[i];
if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
continue;
IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
}
- for (unsigned i = 0; i < DefinedMetaClasses.size(); i++) {
+ for (unsigned i = 0, e = DefinedMetaClasses.size(); i < e; i++) {
llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
continue;
@@ -4631,8 +4873,6 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// At various points we've experimented with using vtable-based
// dispatch for all methods.
switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
- default:
- llvm_unreachable("Invalid dispatch method!");
case CodeGenOptions::Legacy:
return false;
case CodeGenOptions::NonLegacy:
@@ -4653,7 +4893,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is disabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGC() != LangOptions::GCOnly) {
+ if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
VTableDispatchMethods.insert(GetNullarySelector("retain"));
VTableDispatchMethods.insert(GetNullarySelector("release"));
VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
@@ -4669,7 +4909,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is enabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGC() != LangOptions::NonGC) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
VTableDispatchMethods.insert(GetNullarySelector("hash"));
VTableDispatchMethods.insert(GetUnarySelector("addObject"));
@@ -4721,9 +4961,9 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getNameAsString();
- std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ llvm::Constant *Values[10]; // 11 for 64bit targets!
- if (CGM.getLangOptions().ObjCAutoRefCount)
+ if (CGM.getLangOpts().ObjCAutoRefCount)
flags |= CLS_COMPILED_BY_ARC;
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
@@ -4819,14 +5059,15 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
llvm::Constant *SuperClassGV,
llvm::Constant *ClassRoGV,
bool HiddenVisibility) {
- std::vector<llvm::Constant*> Values(5);
- Values[0] = IsAGV;
- Values[1] = SuperClassGV;
+ llvm::Constant *Values[] = {
+ IsAGV,
+ SuperClassGV,
+ ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
+ ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
+ ClassRoGV // &CLASS_RO_GV
+ };
if (!Values[1])
Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
- Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
- Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
- Values[4] = ClassRoGV; // &CLASS_RO_GV
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
Values);
llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
@@ -4987,7 +5228,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
//
llvm::Constant *Init =
llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
- ObjCTypes.ExternalProtocolPtrTy);
+ ObjCTypes.getExternalProtocolPtrTy());
std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getName();
@@ -5025,7 +5266,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
std::string ExtClassName(getClassSymbolPrefix() +
Interface->getNameAsString());
- std::vector<llvm::Constant*> Values(6);
+ llvm::Constant *Values[6];
Values[0] = GetClassName(OCD->getIdentifier());
// meta-class entry symbol
llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
@@ -5064,7 +5305,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
- llvm::SmallString<256> ExtName;
+ SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
<< OCD->getName();
Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
@@ -5110,12 +5351,12 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
if (!Fn)
return 0;
- std::vector<llvm::Constant*> Method(3);
- Method[0] =
+ llvm::Constant *Method[] = {
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Method[1] = GetMethodVarType(MD);
- Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
}
@@ -5126,9 +5367,10 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
/// struct _objc_method method_list[method_count];
/// }
///
-llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
- const char *Section,
- const ConstantVector &Methods) {
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
// Return null for empty list.
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
@@ -5215,7 +5457,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
const ObjCImplementationDecl *ID) {
- std::vector<llvm::Constant*> Ivars, Ivar(5);
+ std::vector<llvm::Constant*> Ivars;
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
@@ -5227,6 +5469,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
+ llvm::Constant *Ivar[5];
Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
ComputeIvarBaseOffset(CGM, ID, IVD));
Ivar[1] = GetMethodVarName(IVD->getIdentifier());
@@ -5304,21 +5547,27 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
/// const struct _prop_list_t * properties;
/// const uint32_t size; // sizeof(struct _protocol_t)
/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
/// }
/// @endcode
///
llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
const ObjCProtocolDecl *PD) {
- llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
// Early exit if a defining object has already been generated.
if (Entry && Entry->hasInitializer())
return Entry;
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
// Construct method lists.
std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
for (ObjCProtocolDecl::instmeth_iterator
i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
ObjCMethodDecl *MD = *i;
@@ -5328,8 +5577,10 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
@@ -5342,12 +5593,17 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
} else {
ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
}
}
- std::vector<llvm::Constant*> Values(10);
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[11];
// isa is NULL
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
Values[1] = GetClassName(PD->getIdentifier());
@@ -5377,6 +5633,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ + PD->getName(),
+ MethodTypesExt, ObjCTypes);
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
Values);
@@ -5392,6 +5651,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Entry->setAlignment(
CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ Protocols[PD->getIdentifier()] = Entry;
}
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(Entry);
@@ -5422,14 +5683,14 @@ llvm::Constant *
CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- std::vector<llvm::Constant*> ProtocolRefs;
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
// Just return null for empty protocol lists
if (begin == end)
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
// FIXME: We shouldn't need to do this lookup here, should we?
- llvm::SmallString<256> TmpName;
+ SmallString<256> TmpName;
Name.toVector(TmpName);
llvm::GlobalVariable *GV =
CGM.getModule().getGlobalVariable(TmpName.str(), true);
@@ -5447,10 +5708,9 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
Values[0] =
llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
Values[1] =
- llvm::ConstantArray::get(
- llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
@@ -5473,7 +5733,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
llvm::Constant *
CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- std::vector<llvm::Constant*> Desc(3);
+ llvm::Constant *Desc[3];
Desc[0] =
llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
ObjCTypes.SelectorPtrTy);
@@ -5499,8 +5759,13 @@ LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
+ if (llvm::LoadInst *LI = dyn_cast<llvm::LoadInst>(Offset))
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
- EmitIvarOffset(CGF, ID, Ivar));
+ Offset);
}
llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
@@ -5557,9 +5822,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
args.insert(args.end(), formalArgs.begin(), formalArgs.end());
- const CGFunctionInfo &fnInfo =
- CGM.getTypes().getFunctionInfo(resultType, args,
- FunctionType::ExtInfo());
+ MessageSendInfo MSI = getMessageSendInfo(method, resultType, args);
NullReturnState nullReturn;
@@ -5572,7 +5835,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// FIXME: don't use this for that.
llvm::Constant *fn = 0;
std::string messageRefName("\01l_");
- if (CGM.ReturnTypeUsesSRet(fnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
if (isSuper) {
fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
messageRefName += "objc_msgSendSuper2_stret_fixup";
@@ -5616,6 +5879,20 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
messageRef->setAlignment(16);
messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
}
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && method)
+ for (ObjCMethodDecl::param_const_iterator i = method->param_begin(),
+ e = method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
llvm::Value *mref =
CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
@@ -5626,14 +5903,11 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0);
callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
- bool variadic = method ? method->isVariadic() : false;
- llvm::FunctionType *fnType =
- CGF.getTypes().GetFunctionType(fnInfo, variadic);
- callee = CGF.Builder.CreateBitCast(callee,
- llvm::PointerType::getUnqual(fnType));
+ callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
- RValue result = CGF.EmitCall(fnInfo, callee, returnSlot, args);
- return nullReturn.complete(CGF, result, resultType);
+ RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType, formalArgs,
+ requiresnullCheck ? method : 0);
}
/// Generate code for a message send expression in the nonfragile abi.
@@ -5839,7 +6113,12 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
if (lval)
return Entry;
- return Builder.CreateLoad(Entry);
+ llvm::LoadInst* LI = Builder.CreateLoad(Entry);
+
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return LI;
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
@@ -6049,13 +6328,13 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
llvm::GlobalValue::ExternalLinkage,
0, VTableName);
- llvm::Value *VTableIdx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
- std::vector<llvm::Constant*> Values(3);
- Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx);
- Values[1] = GetClassName(ID->getIdentifier());
- Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Values[] = {
+ llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx),
+ GetClassName(ID->getIdentifier()),
+ GetClassGlobal(ClassName)
+ };
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
@@ -6069,7 +6348,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
ID->getIdentifier()->getName()));
}
- if (CGM.getLangOptions().getVisibilityMode() == HiddenVisibility)
+ if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
ObjCTypes.EHTypeTy));
@@ -6088,7 +6367,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
CodeGen::CGObjCRuntime *
CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
- if (CGM.getLangOptions().ObjCNonFragileABI)
+ if (CGM.getLangOpts().ObjCNonFragileABI)
return new CGObjCNonFragileABIMac(CGM);
return new CGObjCMac(CGM);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
index ef426ce..9370096 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -41,7 +41,7 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
// If we know have an implementation (and the ivar is in it) then
// look up in the implementation layout.
const ASTRecordLayout *RL;
- if (ID && ID->getClassInterface() == Container)
+ if (ID && declaresSameEntity(ID->getClassInterface(), Container))
RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
else
RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
@@ -85,7 +85,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *I8Ptr = CGF.Int8PtrTy;
QualType IvarTy = Ivar->getType();
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
@@ -93,7 +93,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
if (!Ivar->isBitField()) {
- LValue LV = CGF.MakeAddrLValue(V, IvarTy);
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
@@ -229,7 +229,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cast<llvm::CallInst>(Exn)->setDoesNotThrow();
}
- CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
if (endCatchFn) {
// Add a cleanup to leave the catch.
@@ -246,7 +246,24 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
- CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+
+ llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
+
+ switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, CatchParamAddr);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
+ break;
+ }
}
CGF.ObjCEHValueStack.push_back(Exn);
@@ -293,7 +310,7 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
// ARC release and lock-release cleanups.
const Expr *lockExpr = S.getSynchExpr();
llvm::Value *lock;
- if (CGF.getLangOptions().ObjCAutoRefCount) {
+ if (CGF.getLangOpts().ObjCAutoRefCount) {
lock = CGF.EmitARCRetainScalarExpr(lockExpr);
lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
} else {
@@ -310,3 +327,48 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
}
+
+/// Compute the pointer-to-function type to which a message send
+/// should be casted in order to correctly call the given method
+/// with the given arguments.
+///
+/// \param method - may be null
+/// \param resultType - the result type to use if there's no method
+/// \param argInfo - the actual arguments, including implicit ones
+CGObjCRuntime::MessageSendInfo
+CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs) {
+ // If there's a method, use information from that.
+ if (method) {
+ const CGFunctionInfo &signature =
+ CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
+
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(signature)->getPointerTo();
+
+ // If that's not variadic, there's no need to recompute the ABI
+ // arrangement.
+ if (!signature.isVariadic())
+ return MessageSendInfo(signature, signatureType);
+
+ // Otherwise, there is.
+ FunctionType::ExtInfo einfo = signature.getExtInfo();
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
+
+ return MessageSendInfo(argsInfo, signatureType);
+ }
+
+ // There's no method; just use a default CC.
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ // Derive the signature to call from that.
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
+ return MessageSendInfo(argsInfo, signatureType);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index 4fa47a7..ccf4d4d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -63,6 +63,9 @@ namespace CodeGen {
/// Implements runtime-specific code generation functions.
class CGObjCRuntime {
protected:
+ CodeGen::CodeGenModule &CGM;
+ CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {}
+
// Utility functions for unified ivar access. These need to
// eventually be folded into other places (the structure layout
// code).
@@ -132,7 +135,7 @@ public:
/// Generate a constant string object.
virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
-
+
/// Generate a category. A category contains a list of methods (and
/// accompanying metadata) and a list of protocols.
virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
@@ -140,6 +143,9 @@ public:
/// Generate a class structure for this class.
virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+ /// Register an class alias.
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0;
+
/// Generate an Objective-C message send operation.
///
/// \param Method - The method being called, this may be null if synthesizing
@@ -196,10 +202,17 @@ public:
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
+ /// Return the runtime function for optimized setting properties.
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
+
// API for atomic copying of qualified aggregates in getter.
virtual llvm::Constant *GetGetStructFunction() = 0;
// API for atomic copying of qualified aggregates in setter.
virtual llvm::Constant *GetSetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates with non-trivial copy
+ // assignment (c++) in setter/getter.
+ virtual llvm::Constant *GetCppAtomicObjectFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
@@ -249,6 +262,19 @@ public:
virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
+
+ struct MessageSendInfo {
+ const CGFunctionInfo &CallInfo;
+ llvm::PointerType *MessengerType;
+
+ MessageSendInfo(const CGFunctionInfo &callInfo,
+ llvm::PointerType *messengerType)
+ : CallInfo(callInfo), MessengerType(messengerType) {}
+ };
+
+ MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs);
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index fbdb298..19973b4 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -26,8 +26,6 @@ class RTTIBuilder {
CodeGenModule &CGM; // Per-module state.
llvm::LLVMContext &VMContext;
- llvm::Type *Int8PtrTy;
-
/// Fields - The fields of the RTTI descriptor currently being built.
SmallVector<llvm::Constant *, 16> Fields;
@@ -65,8 +63,7 @@ class RTTIBuilder {
public:
RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
- VMContext(CGM.getModule().getContext()),
- Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
+ VMContext(CGM.getModule().getContext()) { }
// Pointer type info flags.
enum {
@@ -116,7 +113,7 @@ public:
llvm::GlobalVariable *
RTTIBuilder::GetAddrOfTypeName(QualType Ty,
llvm::GlobalVariable::LinkageTypes Linkage) {
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
@@ -125,7 +122,8 @@ RTTIBuilder::GetAddrOfTypeName(QualType Ty,
// We know that the mangled name of the type starts at index 4 of the
// mangled name of the typename, so we can just index into it in order to
// get the mangled name of the type.
- llvm::Constant *Init = llvm::ConstantArray::get(VMContext, Name.substr(4));
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ Name.substr(4));
llvm::GlobalVariable *GV =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
@@ -137,7 +135,7 @@ RTTIBuilder::GetAddrOfTypeName(QualType Ty,
llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
@@ -148,11 +146,12 @@ llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
if (!GV) {
// Create a new global variable.
- GV = new llvm::GlobalVariable(CGM.getModule(), Int8PtrTy, /*Constant=*/true,
+ GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, 0, Name);
}
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
@@ -195,10 +194,11 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::UInt128:
return true;
- case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
llvm_unreachable("asking for RRTI for a placeholder type!");
case BuiltinType::ObjCId:
@@ -206,9 +206,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::ObjCSel:
llvm_unreachable("FIXME: Objective-C types are unsupported!");
}
-
- // Silent gcc.
- return false;
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
}
static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
@@ -250,7 +249,7 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
ASTContext &Context = CGM.getContext();
// If RTTI is disabled, don't consider key functions.
- if (!Context.getLangOptions().RTTI) return false;
+ if (!Context.getLangOpts().RTTI) return false;
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
@@ -327,7 +326,7 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
return llvm::GlobalValue::InternalLinkage;
case ExternalLinkage:
- if (!CGM.getLangOptions().RTTI) {
+ if (!CGM.getLangOpts().RTTI) {
// RTTI is not enabled, which means that this type info struct is going
// to be used for exception handling. Give it linkonce_odr linkage.
return llvm::GlobalValue::LinkOnceODRLinkage;
@@ -335,6 +334,8 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->hasAttr<WeakAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
if (RD->isDynamicClass())
return CGM.getVTableLinkage(RD);
}
@@ -342,7 +343,7 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
return llvm::GlobalValue::LinkOnceODRLinkage;
}
- return llvm::GlobalValue::LinkOnceODRLinkage;
+ llvm_unreachable("Invalid linkage!");
}
// CanUseSingleInheritance - Return whether the given record decl has a "single,
@@ -479,7 +480,7 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
}
llvm::Constant *VTable =
- CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -487,7 +488,7 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
// The vtable address point is 2.
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two);
- VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
}
@@ -531,7 +532,7 @@ maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
GV->setLinkage(Linkage);
// Get the typename global.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
@@ -551,7 +552,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
@@ -561,7 +562,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
if (OldGV && !OldGV->isDeclaration()) {
maybeUpdateRTTILinkage(CGM, OldGV, Ty);
- return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
}
// Check if there is already an external RTTI descriptor for this type.
@@ -582,8 +583,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// And the name.
llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy));
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -705,7 +705,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
GV->setUnnamedAddr(true);
- return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
/// ComputeQualifierFlags - Compute the pointer type info flags from the
@@ -982,14 +982,11 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if (!ForEH && !getContext().getLangOptions().RTTI) {
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (!ForEH && !getContext().getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
- }
- if (ForEH && Ty->isObjCObjectPointerType() && !Features.NeXTRuntime) {
+ if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
return ObjCRuntime->GetEHType(Ty);
- }
return RTTIBuilder(*this).BuildTypeInfo(Ty);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 6475cca..1193e97 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -120,24 +120,29 @@ private:
bool LayoutFields(const RecordDecl *D);
/// Layout a single base, virtual or non-virtual
- void LayoutBase(const CXXRecordDecl *base,
+ bool LayoutBase(const CXXRecordDecl *base,
const CGRecordLayout &baseLayout,
CharUnits baseOffset);
/// LayoutVirtualBase - layout a single virtual base.
- void LayoutVirtualBase(const CXXRecordDecl *base,
+ bool LayoutVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset);
/// LayoutVirtualBases - layout the virtual bases of a record decl.
- void LayoutVirtualBases(const CXXRecordDecl *RD,
+ bool LayoutVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
+
+ /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
+ /// like MSVC.
+ bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
/// LayoutNonVirtualBase - layout a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *base,
+ bool LayoutNonVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset);
/// LayoutNonVirtualBases - layout the virtual bases of a record decl.
- void LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout);
/// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
@@ -526,6 +531,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
CharUnits unionAlign = CharUnits::Zero();
bool hasOnlyZeroSizedBitFields = true;
+ bool checkedFirstFieldZeroInit = false;
unsigned fieldNo = 0;
for (RecordDecl::field_iterator field = D->field_begin(),
@@ -537,6 +543,11 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
if (!fieldType)
continue;
+ if (field->getDeclName() && !checkedFirstFieldZeroInit) {
+ CheckZeroInitializable(field->getType());
+ checkedFirstFieldZeroInit = true;
+ }
+
hasOnlyZeroSizedBitFields = false;
CharUnits fieldAlign = CharUnits::fromQuantity(
@@ -565,6 +576,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
}
}
if (unionAlign.isZero()) {
+ (void)hasOnlyZeroSizedBitFields;
assert(hasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
unionAlign = CharUnits::One();
@@ -576,7 +588,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
AppendPadding(recordSize, unionAlign);
}
-void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
const CGRecordLayout &baseLayout,
CharUnits baseOffset) {
ResizeLastBaseFieldIfNecessary(baseOffset);
@@ -589,22 +601,18 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
LastLaidOutBase.Offset = NextFieldOffset;
LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
- // Fields and bases can be laid out in the tail padding of previous
- // bases. If this happens, we need to allocate the base as an i8
- // array; otherwise, we can use the subobject type. However,
- // actually doing that would require knowledge of what immediately
- // follows this base in the layout, so instead we do a conservative
- // approximation, which is to use the base subobject type if it
- // has the same LLVM storage size as the nvsize.
-
llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ if (getTypeAlignment(subobjectType) > Alignment)
+ return false;
+
AppendField(baseOffset, subobjectType);
+ return true;
}
-void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset) {
// Ignore empty bases.
- if (base->isEmpty()) return;
+ if (base->isEmpty()) return true;
const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
if (IsZeroInitializableAsBase) {
@@ -615,26 +623,51 @@ void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
baseLayout.isZeroInitializableAsBase();
}
- LayoutBase(base, baseLayout, baseOffset);
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
NonVirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
}
-void
+bool
CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
CharUnits baseOffset) {
// Ignore empty bases.
- if (base->isEmpty()) return;
+ if (base->isEmpty()) return true;
const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
if (IsZeroInitializable)
IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
- LayoutBase(base, baseLayout, baseOffset);
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
VirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ if (!RD->getNumVBases())
+ return true;
+
+ // The vbases list is uniqued and ordered by a depth-first
+ // traversal, which is what we need here.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+ return true;
}
/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
-void
+bool
CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -650,7 +683,8 @@ CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
continue;
CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
- LayoutVirtualBase(BaseDecl, vbaseOffset);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
}
if (!BaseDecl->getNumVBases()) {
@@ -658,32 +692,39 @@ CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
continue;
}
- LayoutVirtualBases(BaseDecl, Layout);
+ if (!LayoutVirtualBases(BaseDecl, Layout))
+ return false;
}
+ return true;
}
-void
+bool
CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout) {
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- // Check if we need to add a vtable pointer.
- if (RD->isDynamicClass()) {
- if (!PrimaryBase) {
- llvm::Type *FunctionType =
- llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
- /*isVarArg=*/true);
- llvm::Type *VTableTy = FunctionType->getPointerTo();
-
- assert(NextFieldOffset.isZero() &&
- "VTable pointer must come first!");
- AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
+ // If we have a primary base, lay it out first.
+ if (PrimaryBase) {
+ if (!Layout.isPrimaryBaseVirtual()) {
+ if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
} else {
- if (!Layout.isPrimaryBaseVirtual())
- LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
- else
- LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
+ if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
}
+
+ // Otherwise, add a vtable / vf-table if the layout says to do so.
+ } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
+ ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
+ : RD->isDynamicClass()) {
+ llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffset.isZero() &&
+ "VTable pointer must come first!");
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
}
// Layout the non-virtual bases.
@@ -699,8 +740,19 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
continue;
- LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
+ if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
+ return false;
+ }
+
+ // Add a vb-table pointer if the layout insists.
+ if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
+ CharUnits VBPtrOffset = Layout.getVBPtrOffset();
+ llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
+ AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
+ AppendField(VBPtrOffset, Vbptr);
}
+
+ return true;
}
bool
@@ -731,7 +783,6 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
FieldTypes.push_back(getByteArrayType(NumBytes));
}
-
BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
FieldTypes, "", Packed);
@@ -752,7 +803,8 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
if (RD)
- LayoutNonVirtualBases(RD, Layout);
+ if (!LayoutNonVirtualBases(RD, Layout))
+ return false;
unsigned FieldNo = 0;
const FieldDecl *LastFD = 0;
@@ -785,11 +837,19 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
return false;
}
- // And lay out the virtual bases.
- RD->getIndirectPrimaryBases(IndirectPrimaryBases);
- if (Layout.isPrimaryBaseVirtual())
- IndirectPrimaryBases.insert(Layout.getPrimaryBase());
- LayoutVirtualBases(RD, Layout);
+ // Lay out the virtual bases. The MS ABI uses a different
+ // algorithm here due to the lack of primary virtual bases.
+ if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+
+ if (!LayoutVirtualBases(RD, Layout))
+ return false;
+ } else {
+ if (!MSLayoutVirtualBases(RD, Layout))
+ return false;
+ }
}
// Append tail padding if necessary.
@@ -831,17 +891,24 @@ void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
assert(NextFieldOffset <= fieldOffset &&
"Incorrect field layout!");
- // Round up the field offset to the alignment of the field type.
- CharUnits alignedNextFieldOffset =
- NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ // Do nothing if we're already at the right offset.
+ if (fieldOffset == NextFieldOffset) return;
- if (alignedNextFieldOffset < fieldOffset) {
- // Even with alignment, the field offset is not at the right place,
- // insert padding.
- CharUnits padding = fieldOffset - NextFieldOffset;
+ // If we're not emitting a packed LLVM type, try to avoid adding
+ // unnecessary padding fields.
+ if (!Packed) {
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ assert(alignedNextFieldOffset <= fieldOffset);
- AppendBytes(padding);
+ // If that's the right offset, we're done.
+ if (alignedNextFieldOffset == fieldOffset) return;
}
+
+ // Otherwise we need explicit padding.
+ CharUnits padding = fieldOffset - NextFieldOffset;
+ AppendBytes(padding);
}
bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
@@ -905,7 +972,7 @@ void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
return;
// Can only have member pointers if we're compiling C++.
- if (!Types.getContext().getLangOptions().CPlusPlus)
+ if (!Types.getContext().getLangOpts().CPlusPlus)
return;
const Type *elementType = T->getBaseElementTypeUnsafe();
@@ -931,7 +998,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = 0;
- if (isa<CXXRecordDecl>(D)) {
+ if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
BaseTy = Builder.BaseSubobjectType;
if (!BaseTy) BaseTy = Ty;
}
@@ -950,7 +1017,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
RL->BitFields.swap(Builder.BitFields);
// Dump the layout, if requested.
- if (getContext().getLangOptions().DumpRecordLayouts) {
+ if (getContext().getLangOpts().DumpRecordLayouts) {
llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
llvm::errs() << "Record: ";
D->dump();
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index c56931b..bf42dcb 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -73,6 +73,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::CXXCatchStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
+ case Stmt::MSDependentExistsStmtClass:
llvm_unreachable("invalid statement class to emit generically");
case Stmt::NullStmtClass:
case Stmt::CompoundStmtClass:
@@ -190,20 +191,13 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
- CGDebugInfo *DI = getDebugInfo();
- if (DI)
- DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
-
- // Keep track of the current cleanup stack depth.
- RunCleanupsScope Scope(*this);
+ // Keep track of the current cleanup stack depth, including debug scopes.
+ LexicalScope Scope(*this, S.getSourceRange());
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- if (DI)
- DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
-
RValue RV;
if (!GetLast)
RV = RValue::get(0);
@@ -771,7 +765,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(),
+ CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
@@ -783,7 +778,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
// As long as debug info is modeled with instructions, we have to ensure we
// have a place to insert here and write the stop point here.
- if (getDebugInfo() && HaveInsertPoint())
+ if (HaveInsertPoint())
EmitStopPoint(&S);
for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
@@ -876,6 +871,16 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
}
void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // If there is no enclosing switch instance that we're aware of, then this
+ // case statement and its block can be elided. This situation only happens
+ // when we've constant-folded the switch, are emitting the constant case,
+ // and part of the constant case includes another case statement. For
+ // instance: switch (4) { case 4: do { case 5: } while (1); }
+ if (!SwitchInsn) {
+ EmitStmt(S.getSubStmt());
+ return;
+ }
+
// Handle case ranges.
if (S.getRHS()) {
EmitCaseStmtRange(S);
@@ -887,7 +892,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
// If the body of the case is just a 'break', and if there was no fallthrough,
// try to not emit an empty block.
- if (isa<BreakStmt>(S.getSubStmt())) {
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
JumpDest Block = BreakContinueStack.back().BreakBlock;
// Only do this optimization if there are no cleanups that need emitting.
@@ -1150,6 +1155,10 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
// See if we can constant fold the condition of the switch and therefore only
// emit the live case statement (if any) of the switch.
llvm::APInt ConstantCondValue;
@@ -1159,20 +1168,26 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
getContext())) {
RunCleanupsScope ExecutedScope(*this);
+ // At this point, we are no longer "within" a switch instance, so
+ // we can temporarily enforce this to ensure that any embedded case
+ // statements are not emitted.
+ SwitchInsn = 0;
+
// Okay, we can dead code eliminate everything except this case. Emit the
// specified series of statements and we're good.
for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
EmitStmt(CaseStmts[i]);
+
+ // Now we want to restore the saved switch instance so that nested
+ // switches continue to function properly
+ SwitchInsn = SavedSwitchInsn;
+
return;
}
}
llvm::Value *CondV = EmitScalarExpr(S.getCond());
- // Handle nested switch statements.
- llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
- llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
-
// Create basic block to hold stuff that comes after switch
// statement. We also need to create a default block now so that
// explicit case ranges tests can have a place to jump to on
@@ -1199,7 +1214,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// Update the default block in case explicit case range tests have
// been chained on top.
- SwitchInsn->setSuccessor(0, CaseRangeBlock);
+ SwitchInsn->setDefaultDest(CaseRangeBlock);
// If a default was never emitted:
if (!DefaultBlock->getParent()) {
@@ -1280,6 +1295,8 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
if (!Variable)
return Constraint;
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint;
AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
if (!Attr)
return Constraint;
@@ -1355,7 +1372,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
- const LangOptions &LangOpts = CGF.CGM.getLangOptions();
+ const LangOptions &LangOpts = CGF.CGM.getLangOpts();
// Add the location of the start of each subsequent line of the asm to the
// MDNode.
@@ -1490,6 +1507,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
InOutConstraints);
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -1548,7 +1570,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
}
- if (llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
@@ -1590,7 +1612,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::Type *ResultType;
if (ResultRegTypes.empty())
- ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ ResultType = VoidTy;
else if (ResultRegTypes.size() == 1)
ResultType = ResultRegTypes[0];
else
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
deleted file mode 100644
index 0387dae..0000000
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This contains code dealing with C++ code generation of temporaries
-//
-//===----------------------------------------------------------------------===//
-
-#include "CodeGenFunction.h"
-using namespace clang;
-using namespace CodeGen;
-
-namespace {
- struct DestroyTemporary : EHScopeStack::Cleanup {
- const CXXDestructorDecl *dtor;
- llvm::Value *addr;
- DestroyTemporary(const CXXDestructorDecl *dtor, llvm::Value *addr)
- : dtor(dtor), addr(addr) {}
- void Emit(CodeGenFunction &CGF, Flags flags) {
- CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- addr);
- }
- };
-}
-
-/// Emits all the code to cause the given temporary to be cleaned up.
-void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
- llvm::Value *Ptr) {
- pushFullExprCleanup<DestroyTemporary>(NormalAndEHCleanup,
- Temporary->getDestructor(),
- Ptr);
-}
-
-RValue
-CodeGenFunction::EmitExprWithCleanups(const ExprWithCleanups *E,
- AggValueSlot Slot) {
- RunCleanupsScope Scope(*this);
- return EmitAnyExpr(E->getSubExpr(), Slot);
-}
-
-LValue CodeGenFunction::EmitExprWithCleanupsLValue(const ExprWithCleanups *E) {
- RunCleanupsScope Scope(*this);
- return EmitLValue(E->getSubExpr());
-}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
index ea7b8cb..98be872 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -18,13 +18,11 @@
using namespace clang;
using namespace CodeGen;
-#define D1(x)
-
-llvm::Constant *GetAddrOfVTTVTable(CodeGenVTables &CGVT,
- const CXXRecordDecl *MostDerivedClass,
- const VTTVTable &VTable,
- llvm::GlobalVariable::LinkageTypes Linkage,
- llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+static llvm::Constant *
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
if (VTable.getBase() == MostDerivedClass) {
assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
@@ -45,8 +43,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()),
- *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
@@ -102,7 +99,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
Out.flush();
@@ -113,10 +110,8 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
llvm::GlobalVariable *GV =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index a306c85..17a0537 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -52,7 +52,7 @@ bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
// If we're building with optimization, we always emit VTables since that
// allows for virtual function calls to be devirtualized.
// (We don't want to do this in -fapple-kext mode however).
- if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOptions().AppleKext)
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
return true;
return KeyFunction->hasBody();
@@ -63,7 +63,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
// Compute the mangled name.
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
@@ -83,9 +83,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
- llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
-
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
if (NonVirtualAdjustment) {
@@ -244,8 +242,8 @@ void CodeGenFunction::GenerateVarArgsThunk(
QualType ResultType = FPT->getResultType();
// Get the original function
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(FnInfo, /*IsVariadic*/true);
+ assert(FnInfo.isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
@@ -331,6 +329,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
SourceLocation());
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ CXXThisValue = CXXABIThisValue;
// Adjust the 'this' pointer if necessary.
llvm::Value *AdjustedThisPtr =
@@ -352,13 +351,13 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
// Get our callee.
llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
- FPT->isVariadic());
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
#ifndef NDEBUG
const CGFunctionInfo &CallFnInfo =
- CGM.getTypes().getFunctionInfo(ResultType, CallArgs, FPT->getExtInfo());
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
@@ -399,7 +398,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
bool UseAvailableExternallyLinkage)
{
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(GD);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
// FIXME: re-use FnInfo in this computation.
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
@@ -511,7 +510,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
unsigned NumVTableThunks) {
SmallVector<llvm::Constant *, 64> Inits;
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -571,8 +570,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
// We have a pure virtual member function.
if (!PureVirtualFn) {
llvm::FunctionType *Ty =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
PureVirtualFn =
CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
@@ -586,8 +584,8 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
VTableThunks[NextVTableThunkIndex].first == I) {
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
- Init = CGM.GetAddrOfThunk(GD, Thunk);
MaybeEmitThunkAvailableExternally(GD, Thunk);
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
NextVTableThunkIndex++;
} else {
@@ -622,15 +620,14 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
if (ShouldEmitVTableInThisTU(RD))
CGM.DeferredVTables.push_back(RD);
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
Out.flush();
StringRef Name = OutName.str();
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy,
+ llvm::ArrayType::get(CGM.Int8PtrTy,
VTContext.getVTableLayout(RD).getNumVTableComponents());
VTable =
@@ -668,7 +665,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
bool BaseIsVirtual,
llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
- llvm::OwningPtr<VTableLayout> VTLayout(
+ OwningPtr<VTableLayout> VTLayout(
VTContext.createConstructionVTableLayout(Base.getBase(),
Base.getBaseOffset(),
BaseIsVirtual, RD));
@@ -677,7 +674,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
AddressPoints = VTLayout->getAddressPoints();
// Get the mangled construction vtable name.
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().
mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
@@ -685,9 +682,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
Out.flush();
StringRef Name = OutName.str();
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, VTLayout->getNumVTableComponents());
+ llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
index 489e600..ac704e7 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -16,6 +16,7 @@
#define CLANG_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
namespace llvm {
@@ -24,9 +25,8 @@ namespace llvm {
}
namespace clang {
- class ObjCPropertyRefExpr;
-
namespace CodeGen {
+ class AggValueSlot;
class CGBitFieldInfo;
/// RValue - This trivial value class is used to represent the result of an
@@ -105,9 +105,7 @@ class LValue {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
- ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
- PropertyRef // This is an Objective-C property reference, use
- // getPropertyRefExpr
+ ExtVectorElt // This is an extended vector subset, use getExtVectorComp
} LVType;
llvm::Value *V;
@@ -121,9 +119,6 @@ class LValue {
// BitField start bit and size
const CGBitFieldInfo *BitFieldInfo;
-
- // Obj-C property reference expression
- const ObjCPropertyRefExpr *PropertyRefExpr;
};
QualType Type;
@@ -131,7 +126,8 @@ class LValue {
// 'const' is unused here
Qualifiers Quals;
- /// The alignment to use when accessing this lvalue.
+ // The alignment to use when accessing this lvalue. (For vector elements,
+ // this is the alignment of the whole vector.)
unsigned short Alignment;
// objective-c's ivar
@@ -156,12 +152,14 @@ class LValue {
llvm::MDNode *TBAAInfo;
private:
- void Initialize(QualType Type, Qualifiers Quals, unsigned Alignment = 0,
+ void Initialize(QualType Type, Qualifiers Quals,
+ CharUnits Alignment = CharUnits(),
llvm::MDNode *TBAAInfo = 0) {
this->Type = Type;
this->Quals = Quals;
- this->Alignment = Alignment;
- assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
+ this->Alignment = Alignment.getQuantity();
+ assert(this->Alignment == Alignment.getQuantity() &&
+ "Alignment exceeds allowed max!");
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
@@ -175,7 +173,6 @@ public:
bool isVectorElt() const { return LVType == VectorElt; }
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
- bool isPropertyRef() const { return LVType == PropertyRef; }
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
@@ -226,7 +223,8 @@ public:
unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
- unsigned getAlignment() const { return Alignment; }
+ CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
+ void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
@@ -256,18 +254,8 @@ public:
return *BitFieldInfo;
}
- // property ref lvalue
- llvm::Value *getPropertyRefBaseAddr() const {
- assert(isPropertyRef());
- return V;
- }
- const ObjCPropertyRefExpr *getPropertyRefExpr() const {
- assert(isPropertyRef());
- return PropertyRefExpr;
- }
-
static LValue MakeAddr(llvm::Value *address, QualType type,
- unsigned alignment, ASTContext &Context,
+ CharUnits alignment, ASTContext &Context,
llvm::MDNode *TBAAInfo = 0) {
Qualifiers qs = type.getQualifiers();
qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
@@ -280,22 +268,22 @@ public:
}
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- QualType type) {
+ QualType type, CharUnits Alignment) {
LValue R;
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
- R.Initialize(type, type.getQualifiers());
+ R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
@@ -316,17 +304,9 @@ public:
return R;
}
- // FIXME: It is probably bad that we aren't emitting the target when we build
- // the lvalue. However, this complicates the code a bit, and I haven't figured
- // out how to make it go wrong yet.
- static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
- llvm::Value *Base) {
- LValue R;
- R.LVType = PropertyRef;
- R.V = Base;
- R.PropertyRefExpr = E;
- R.Initialize(QualType(), Qualifiers());
- return R;
+ RValue asAggregateRValue() const {
+ // FIMXE: Alignment
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -338,6 +318,8 @@ class AggValueSlot {
// Qualifiers
Qualifiers Quals;
+ unsigned short Alignment;
+
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
@@ -376,11 +358,8 @@ public:
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
- AggValueSlot AV;
- AV.Addr = 0;
- AV.Quals = Qualifiers();
- AV.DestructedFlag = AV.ObjCGCFlag = AV.ZeroedFlag = AV.AliasedFlag = false;
- return AV;
+ return forAddr(0, CharUnits(), Qualifiers(), IsNotDestructed,
+ DoesNotNeedGCBarriers, IsNotAliased);
}
/// forAddr - Make a slot for an aggregate value.
@@ -393,13 +372,15 @@ public:
/// for calling destructors on this object
/// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *addr, Qualifiers quals,
+ static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ Qualifiers quals,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
AV.Addr = addr;
+ AV.Alignment = align.getQuantity();
AV.Quals = quals;
AV.DestructedFlag = isDestructed;
AV.ObjCGCFlag = needsGC;
@@ -412,8 +393,8 @@ public:
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
- return forAddr(LV.getAddress(), LV.getQuals(),
- isDestructed, needsGC, isAliased, isZeroed);
+ return forAddr(LV.getAddress(), LV.getAlignment(),
+ LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
}
IsDestructed_t isExternallyDestructed() const {
@@ -445,14 +426,19 @@ public:
return Addr == 0;
}
+ CharUnits getAlignment() const {
+ return CharUnits::fromQuantity(Alignment);
+ }
+
IsAliased_t isPotentiallyAliased() const {
return IsAliased_t(AliasedFlag);
}
+ // FIXME: Alignment?
RValue asRValue() const {
return RValue::getAggregate(getAddr(), isVolatile());
}
-
+
void setZeroed(bool V = true) { ZeroedFlag = V; }
IsZeroed_t isZeroed() const {
return IsZeroed_t(ZeroedFlag);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
index 68dd5c9..dd32167 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/ASTConsumer.h"
@@ -18,9 +19,12 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Linker.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Support/IRReader.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
@@ -30,6 +34,7 @@ using namespace llvm;
namespace clang {
class BackendConsumer : public ASTConsumer {
+ virtual void anchor();
DiagnosticsEngine &Diags;
BackendAction Action;
const CodeGenOptions &CodeGenOpts;
@@ -40,9 +45,9 @@ namespace clang {
Timer LLVMIRGeneration;
- llvm::OwningPtr<CodeGenerator> Gen;
+ OwningPtr<CodeGenerator> Gen;
- llvm::OwningPtr<llvm::Module> TheModule;
+ OwningPtr<llvm::Module> TheModule, LinkModule;
public:
BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
@@ -50,7 +55,9 @@ namespace clang {
const TargetOptions &targetopts,
const LangOptions &langopts,
bool TimePasses,
- const std::string &infile, raw_ostream *OS,
+ const std::string &infile,
+ llvm::Module *LinkModule,
+ raw_ostream *OS,
LLVMContext &C) :
Diags(_Diags),
Action(action),
@@ -59,11 +66,17 @@ namespace clang {
LangOpts(langopts),
AsmOutStream(OS),
LLVMIRGeneration("LLVM IR Generation Time"),
- Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
+ LinkModule(LinkModule) {
llvm::TimePassesIsEnabled = TimePasses;
}
llvm::Module *takeModule() { return TheModule.take(); }
+ llvm::Module *takeLinkModule() { return LinkModule.take(); }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+ }
virtual void Initialize(ASTContext &Ctx) {
Context = &Ctx;
@@ -79,7 +92,7 @@ namespace clang {
LLVMIRGeneration.stopTimer();
}
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
Context->getSourceManager(),
"LLVM IR generation of declaration");
@@ -91,6 +104,8 @@ namespace clang {
if (llvm::TimePassesIsEnabled)
LLVMIRGeneration.stopTimer();
+
+ return true;
}
virtual void HandleTranslationUnit(ASTContext &C) {
@@ -111,7 +126,7 @@ namespace clang {
// Make sure IR generation is happy with the module. This is released by
// the module provider.
- Module *M = Gen->ReleaseModule();
+ llvm::Module *M = Gen->ReleaseModule();
if (!M) {
// The module has been released by IR gen on failures, do not double
// free.
@@ -122,6 +137,17 @@ namespace clang {
assert(TheModule.get() == M &&
"Unexpected module change during IR generation");
+ // Link LinkModule into this module if present, preserving its validity.
+ if (LinkModule) {
+ std::string ErrorMsg;
+ if (Linker::LinkModules(M, LinkModule.get(), Linker::PreserveSource,
+ &ErrorMsg)) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << LinkModule->getModuleIdentifier() << ErrorMsg;
+ return;
+ }
+ }
+
// Install an inline asm handler so that diagnostics get printed through
// our diagnostics hooks.
LLVMContext &Ctx = TheModule->getContext();
@@ -160,6 +186,8 @@ namespace clang {
void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
SourceLocation LocCookie);
};
+
+ void BackendConsumer::anchor() {}
}
/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
@@ -215,8 +243,17 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
if (LocCookie.isValid()) {
Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
- if (D.getLoc().isValid())
- Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) {
+ std::pair<unsigned, unsigned> Range = D.getRanges()[i];
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
return;
}
@@ -229,7 +266,8 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
//
CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
- : Act(_Act), VMContext(_VMContext ? _VMContext : new LLVMContext),
+ : Act(_Act), LinkModule(0),
+ VMContext(_VMContext ? _VMContext : new LLVMContext),
OwnsVMContext(!_VMContext) {}
CodeGenAction::~CodeGenAction() {
@@ -245,6 +283,10 @@ void CodeGenAction::EndSourceFileAction() {
if (!getCompilerInstance().hasASTConsumer())
return;
+ // If we were given a link module, release consumer's ownership of it.
+ if (LinkModule)
+ BEConsumer->takeLinkModule();
+
// Steal the module from the consumer.
TheModule.reset(BEConsumer->takeModule());
}
@@ -281,16 +323,40 @@ static raw_ostream *GetOutputStream(CompilerInstance &CI,
ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
BackendAction BA = static_cast<BackendAction>(Act);
- llvm::OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
if (BA != Backend_EmitNothing && !OS)
return 0;
+ llvm::Module *LinkModuleToUse = LinkModule;
+
+ // If we were not given a link module, and the user requested that one be
+ // loaded from bitcode, do so now.
+ const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
+ if (!LinkModuleToUse && !LinkBCFile.empty()) {
+ std::string ErrorStr;
+
+ llvm::MemoryBuffer *BCBuf =
+ CI.getFileManager().getBufferForFile(LinkBCFile, &ErrorStr);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+
+ LinkModuleToUse = getLazyBitcodeModule(BCBuf, *VMContext, &ErrorStr);
+ if (!LinkModuleToUse) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+ }
+
BEConsumer =
new BackendConsumer(BA, CI.getDiagnostics(),
CI.getCodeGenOpts(), CI.getTargetOpts(),
CI.getLangOpts(),
- CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
- *VMContext);
+ CI.getFrontendOpts().ShowTimers, InFile,
+ LinkModuleToUse, OS.take(), *VMContext);
return BEConsumer;
}
@@ -328,8 +394,17 @@ void CodeGenAction::ExecuteAction() {
StringRef Msg = Err.getMessage();
if (Msg.startswith("error: "))
Msg = Msg.substr(7);
+
+ // Escape '%', which is interpreted as a format character.
+ llvm::SmallString<128> EscapedMessage;
+ for (unsigned i = 0, e = Msg.size(); i != e; ++i) {
+ if (Msg[i] == '%')
+ EscapedMessage += '%';
+ EscapedMessage += Msg[i];
+ }
+
unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
- DiagnosticsEngine::Error, Msg);
+ DiagnosticsEngine::Error, EscapedMessage);
CI.getDiagnostics().Report(Loc, DiagID);
return;
@@ -348,20 +423,26 @@ void CodeGenAction::ExecuteAction() {
//
+void EmitAssemblyAction::anchor() { }
EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitAssembly, _VMContext) {}
+void EmitBCAction::anchor() { }
EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitBC, _VMContext) {}
+void EmitLLVMAction::anchor() { }
EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitLL, _VMContext) {}
+void EmitLLVMOnlyAction::anchor() { }
EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitNothing, _VMContext) {}
+void EmitCodeGenOnlyAction::anchor() { }
EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitMCNull, _VMContext) {}
+void EmitObjAction::anchor() { }
EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
: CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 8191f02..06e90b6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -16,9 +16,7 @@
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
-#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -31,20 +29,29 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
- Target(CGM.getContext().getTargetInfo()), Builder(cgm.getModule().getContext()),
+ Target(CGM.getContext().getTargetInfo()),
+ Builder(cgm.getModule().getContext()),
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
- NormalCleanupDest(0), NextCleanupDestIndex(1),
- EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
+ LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
+ FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
- CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
- TrapBB(0) {
+ CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
+ CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
+ TerminateHandler(0), TrapBB(0) {
- CatchUndefined = getContext().getLangOptions().CatchUndefined;
+ CatchUndefined = getContext().getLangOpts().CatchUndefined;
CGM.getCXXABI().getMangleContext().startNewFunction();
}
+CodeGenFunction::~CodeGenFunction() {
+ // If there are any unclaimed block infos, go ahead and destroy them
+ // now. This can happen if IR-gen gets clever and skips evaluating
+ // something.
+ if (FirstBlockInfo)
+ destroyBlockInfos(FirstBlockInfo);
+}
+
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
@@ -222,8 +229,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
llvm::PointerType *PointerTy = Int8PtrTy;
llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
llvm::FunctionType *FunctionTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- ProfileFuncArgs, false);
+ llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
@@ -237,8 +243,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
}
void CodeGenFunction::EmitMCountInstrumentation() {
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
Target.getMCountName());
@@ -261,15 +266,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
- RE = FD->redecls_end(); RI != RE; ++RI)
- if (RI->isInlineSpecified()) {
- Fn->addFnAttr(llvm::Attribute::InlineHint);
- break;
- }
+ if (!CGM.getCodeGenOpts().NoInline)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
- if (getContext().getLangOptions().OpenCL) {
+ if (getContext().getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
if (FD->hasAttr<OpenCLKernelAttr>()) {
@@ -298,12 +304,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
- // FIXME: what is going on here and why does it ignore all these
- // interesting type properties?
+ unsigned NumArgs = 0;
+ QualType *ArgsArray = new QualType[Args.size()];
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ ArgsArray[NumArgs++] = (*i)->getType();
+ }
+
QualType FnType =
- getContext().getFunctionType(RetTy, 0, 0,
+ getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
FunctionProtoType::ExtProtoInfo());
+ delete[] ArgsArray;
+
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
@@ -328,7 +341,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Tell the epilog emitter to autorelease the result. We do this
// now so that various specialized functions can suppress it
// during their IR-generation.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
!CurFnInfo->isReturnsRetained() &&
RetTy->isObjCRetainableType())
AutoreleaseResult = true;
@@ -339,8 +352,27 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ // We're in a lambda; figure out the captures.
+ MD->getParent()->getCaptureFields(LambdaCaptureFields,
+ LambdaThisCaptureField);
+ if (LambdaThisCaptureField) {
+ // If this lambda captures this, load it.
+ LValue ThisLValue = EmitLValueForField(CXXABIThisValue,
+ LambdaThisCaptureField, 0);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
+ }
+ } else {
+ // Not in a lambda; just use 'this' from the method.
+ // FIXME: Should we generate a new load for each use of 'this'? The
+ // fast register allocator would be happier...
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
@@ -397,9 +429,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
- if (FD->getNumParams())
- for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
- Args.push_back(FD->getParamDecl(i));
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(FD->getParamDecl(i));
SourceRange BodyRange;
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
@@ -412,10 +443,21 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
- else if (getContext().getLangOptions().CUDA &&
+ else if (getContext().getLangOpts().CUDA &&
!CGM.getCodeGenOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
+ else if (isa<CXXConversionDecl>(FD) &&
+ cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
+ // The lambda conversion to block pointer is special; the semantics can't be
+ // expressed in the AST, so IRGen needs to special-case it.
+ EmitLambdaToBlockPointerBody(Args);
+ } else if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
+ // The lambda "__invoke" function is special, because it forwards or
+ // clones the body of the function call operator (but is actually static).
+ EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
+ }
else
EmitFunctionBody(Args);
@@ -505,15 +547,14 @@ bool CodeGenFunction::
ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
- Expr::EvalResult Result;
- if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
- Result.HasSideEffects)
+ llvm::APSInt Int;
+ if (!Cond->EvaluateAsInt(Int, getContext()))
return false; // Not foldable, not integer or not fully evaluatable.
-
+
if (CodeGenFunction::ContainsLabel(Cond))
return false; // Contains a label.
-
- ResultInt = Result.Val.getInt();
+
+ ResultInt = Int;
return true;
}
@@ -606,29 +647,24 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
}
if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
- // Handle ?: operator.
-
- // Just ignore GNU ?: extension.
- if (CondOp->getLHS()) {
- // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
- llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
- ConditionalEvaluation cond(*this);
- EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ ConditionalEvaluation cond(*this);
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
- cond.begin(*this);
- EmitBlock(LHSBlock);
- EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
- cond.end(*this);
+ cond.begin(*this);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
- cond.begin(*this);
- EmitBlock(RHSBlock);
- EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
- cond.end(*this);
+ cond.begin(*this);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
- return;
- }
+ return;
}
// Emit the code with the fully general case.
@@ -696,7 +732,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
- if (getContext().getLangOptions().CPlusPlus) {
+ if (getContext().getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
return;
@@ -916,19 +952,19 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// We're going to walk down into the type and look for VLA
// expressions.
- type = type.getCanonicalType();
do {
assert(type->isVariablyModifiedType());
const Type *ty = type.getTypePtr();
switch (ty->getTypeClass()) {
+
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
-#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
-#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
- llvm_unreachable("unexpected dependent or non-canonical type!");
+ llvm_unreachable("unexpected dependent type!");
// These types are never variably-modified.
case Type::Builtin:
@@ -937,6 +973,8 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::ExtVector:
case Type::Record:
case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
@@ -986,11 +1024,31 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
break;
}
- case Type::FunctionProto:
+ case Type::FunctionProto:
case Type::FunctionNoProto:
type = cast<FunctionType>(ty)->getResultType();
break;
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
case Type::Atomic:
type = cast<AtomicType>(ty)->getValueType();
break;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index 157623d..3e0cd14 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -25,8 +25,10 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Debug.h"
#include "CodeGenModule.h"
#include "CGBuilder.h"
+#include "CGDebugInfo.h"
#include "CGValue.h"
namespace llvm {
@@ -41,8 +43,8 @@ namespace llvm {
}
namespace clang {
- class APValue;
class ASTContext;
+ class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
@@ -69,7 +71,6 @@ namespace clang {
namespace CodeGen {
class CodeGenTypes;
- class CGDebugInfo;
class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
@@ -598,6 +599,9 @@ public:
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
@@ -609,6 +613,9 @@ public:
unsigned NextCleanupDestIndex;
+ /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
+ CGBlockInfo *FirstBlockInfo;
+
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock;
@@ -625,10 +632,6 @@ public:
llvm::BasicBlock *getInvokeDestImpl();
- /// Set up the last cleaup that was pushed as a conditional
- /// full-expression cleanup.
- void initFullExprCleanup();
-
template <class T>
typename DominatingValue<T>::saved_type saveValueInCond(T value) {
return DominatingValue<T>::save(*this, value);
@@ -739,6 +742,10 @@ public:
initFullExprCleanup();
}
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -758,16 +765,27 @@ public:
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
/// stack.
- void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
/// ActivateCleanupBlock - Activates an initially-inactive cleanup.
/// Cannot be used to resurrect a deactivated cleanup.
- void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
- CodeGenFunction& CGF;
EHScopeStack::stable_iterator CleanupStackDepth;
bool OldDidCallStackSave;
bool PerformCleanup;
@@ -775,10 +793,13 @@ public:
RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ protected:
+ CodeGenFunction& CGF;
+
public:
/// \brief Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
- : CGF(CGF), PerformCleanup(true)
+ : PerformCleanup(true), CGF(CGF)
{
CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
@@ -809,6 +830,41 @@ public:
}
};
+ class LexicalScope: protected RunCleanupsScope {
+ SourceRange Range;
+ bool PopDebugStack;
+
+ LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
+ LexicalScope &operator=(const LexicalScope &);
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
+ : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~LexicalScope() {
+ if (PopDebugStack) {
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ }
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ if (CGDebugInfo *DI = CGF.getDebugInfo()) {
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ PopDebugStack = false;
+ }
+ }
+ };
+
/// PopCleanupBlocks - Takes the old cleanup stack size and emits
/// the cleanup blocks that have been added.
@@ -881,6 +937,12 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != 0; }
+ void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ assert(isInConditionalBranch());
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(value, addr, &block->back());
+ }
+
/// An RAII object to record that we're evaluating a statement
/// expression.
class StmtExprEvaluation {
@@ -912,18 +974,91 @@ public:
public:
PeepholeProtection() : Inst(0) {}
- };
+ };
- /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
- class OpaqueValueMapping {
- CodeGenFunction &CGF;
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
const OpaqueValueExpr *OpaqueValue;
bool BoundLValue;
CodeGenFunction::PeepholeProtection Protection;
+ OpaqueValueMappingData(const OpaqueValueExpr *ov,
+ bool boundLValue)
+ : OpaqueValue(ov), BoundLValue(boundLValue) {}
+ public:
+ OpaqueValueMappingData() : OpaqueValue(0) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() ||
+ expr->getType()->isRecordType() ||
+ expr->getType()->isFunctionType();
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(CGF, ov, CGF.EmitLValue(e));
+ return bind(CGF, ov, CGF.EmitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ data.Protection = CGF.protectFromPeepholes(rv);
+
+ return data;
+ }
+
+ bool isValid() const { return OpaqueValue != 0; }
+ void clear() { OpaqueValue = 0; }
+
+ void unbind(CodeGenFunction &CGF) {
+ assert(OpaqueValue && "no data to unbind!");
+
+ if (BoundLValue) {
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ } else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ OpaqueValueMappingData Data;
+
public:
static bool shouldBindAsLValue(const Expr *expr) {
- return expr->isGLValue() || expr->getType()->isRecordType();
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
}
/// Build the opaque value mapping for the given conditional
@@ -933,75 +1068,34 @@ public:
///
OpaqueValueMapping(CodeGenFunction &CGF,
const AbstractConditionalOperator *op) : CGF(CGF) {
- if (isa<ConditionalOperator>(op)) {
- OpaqueValue = 0;
- BoundLValue = false;
+ if (isa<ConditionalOperator>(op))
+ // Leave Data empty.
return;
- }
const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
- init(e->getOpaqueValue(), e->getCommon());
+ Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
+ e->getCommon());
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
LValue lvalue)
- : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
- assert(opaqueValue && "no opaque value expression!");
- assert(shouldBindAsLValue(opaqueValue));
- initLValue(lvalue);
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
RValue rvalue)
- : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
- assert(opaqueValue && "no opaque value expression!");
- assert(!shouldBindAsLValue(opaqueValue));
- initRValue(rvalue);
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
}
void pop() {
- assert(OpaqueValue && "mapping already popped!");
- popImpl();
- OpaqueValue = 0;
+ Data.unbind(CGF);
+ Data.clear();
}
~OpaqueValueMapping() {
- if (OpaqueValue) popImpl();
- }
-
- private:
- void popImpl() {
- if (BoundLValue)
- CGF.OpaqueLValues.erase(OpaqueValue);
- else {
- CGF.OpaqueRValues.erase(OpaqueValue);
- CGF.unprotectFromPeepholes(Protection);
- }
- }
-
- void init(const OpaqueValueExpr *ov, const Expr *e) {
- OpaqueValue = ov;
- BoundLValue = shouldBindAsLValue(ov);
- assert(BoundLValue == shouldBindAsLValue(e)
- && "inconsistent expression value kinds!");
- if (BoundLValue)
- initLValue(CGF.EmitLValue(e));
- else
- initRValue(CGF.EmitAnyExpr(e));
- }
-
- void initLValue(const LValue &lv) {
- CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
- }
-
- void initRValue(const RValue &rv) {
- // Work around an extremely aggressive peephole optimization in
- // EmitScalarConversion which assumes that all other uses of a
- // value are extant.
- Protection = CGF.protectFromPeepholes(rv);
- CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
+ if (Data.isValid()) Data.unbind(CGF);
}
};
@@ -1046,7 +1140,7 @@ private:
};
SmallVector<BreakContinue, 8> BreakContinueStack;
- /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// SwitchInsn - This is nearest current switch instruction. It is null if
/// current context is not in a switch.
llvm::SwitchInst *SwitchInsn;
@@ -1073,7 +1167,8 @@ private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
- ImplicitParamDecl *CXXThisDecl;
+ ImplicitParamDecl *CXXABIThisDecl;
+ llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
/// CXXVTTDecl - When generating code for a base object constructor or
@@ -1099,6 +1194,7 @@ private:
public:
CodeGenFunction(CodeGenModule &cgm);
+ ~CodeGenFunction();
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const { return CGM.getContext(); }
@@ -1114,7 +1210,7 @@ public:
return CGM.getCodeGenOpts().OptimizationLevel == 0;
}
- const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
+ const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
/// Returns a pointer to the function's exception object and selector slot,
/// which is assigned in every landing pad.
@@ -1152,27 +1248,27 @@ public:
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
- Destroyer &destroyer);
+ Destroyer *destroyer);
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
- Destroyer &destroyer);
+ Destroyer *destroyer);
void pushDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type);
void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
- Destroyer &destroyer, bool useEHCleanupForArray);
- void emitDestroy(llvm::Value *addr, QualType type, Destroyer &destroyer,
+ Destroyer *destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(llvm::Constant *addr,
QualType type,
- Destroyer &destroyer,
+ Destroyer *destroyer,
bool useEHCleanupForArray);
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
- QualType type, Destroyer &destroyer,
+ QualType type, Destroyer *destroyer,
bool checkZeroLength, bool useEHCleanup);
- Destroyer &getDestroyer(QualType::DestructionKind destructionKind);
+ Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
/// Determines whether an EH cleanup is required to destroy a type
/// with the given destruction kind.
@@ -1182,9 +1278,9 @@ public:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
- return getLangOptions().Exceptions;
+ return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
- return getLangOptions().Exceptions &&
+ return getLangOpts().Exceptions &&
CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
}
llvm_unreachable("bad destruction kind");
@@ -1208,7 +1304,8 @@ public:
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl);
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
@@ -1218,7 +1315,8 @@ public:
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
- const ObjCPropertyImplDecl *propImpl);
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
bool IvarTypeWithAggrGCObjects(QualType Ty);
@@ -1227,6 +1325,8 @@ public:
//===--------------------------------------------------------------------===//
llvm::Value *EmitBlockLiteral(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+ static void destroyBlockInfos(CGBlockInfo *info);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
llvm::StructType *,
@@ -1235,10 +1335,16 @@ public:
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- const DeclMapTy &ldm);
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock);
llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
@@ -1253,10 +1359,7 @@ public:
}
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
- void AllocateBlockDecl(const BlockDeclRefExpr *E);
- llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
- return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
- }
+ void AllocateBlockDecl(const DeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
llvm::Type *BuildByRefType(const VarDecl *var);
@@ -1272,6 +1375,13 @@ public:
void EmitDestructorBody(FunctionArgList &Args);
void EmitFunctionBody(FunctionArgList &Args);
+ void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs);
+ void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
+ void EmitLambdaBlockInvokeBody();
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+
/// EmitReturnBlock - Emit the unified return block, trying to avoid its
/// emission when possible.
void EmitReturnBlock();
@@ -1290,6 +1400,9 @@ public:
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes);
+
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
@@ -1442,7 +1555,15 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
- LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
+ LValue MakeAddrLValue(llvm::Value *V, QualType T,
+ CharUnits Alignment = CharUnits()) {
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ CharUnits Alignment;
+ if (!T->isIncompleteType())
+ Alignment = getContext().getTypeAlignInChars(T);
return LValue::MakeAddr(V, T, Alignment, getContext(),
CGM.getTBAAInfo(T));
}
@@ -1470,7 +1591,9 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
+ CharUnits Alignment = getContext().getTypeAlignInChars(T);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1519,7 +1642,8 @@ public:
/// \param isVolatile - True iff either the source or the destination is
/// volatile.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
- QualType EltTy, bool isVolatile=false);
+ QualType EltTy, bool isVolatile=false,
+ unsigned Alignment = 0);
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
@@ -1677,7 +1801,8 @@ public:
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
llvm::Value *NewPtr, llvm::Value *NumElements);
- void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+ void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
+ llvm::Value *Ptr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -1688,6 +1813,10 @@ public:
llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+ void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
+ void EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init);
+
void EmitCheck(llvm::Value *, unsigned Size);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
@@ -1917,13 +2046,14 @@ public:
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo = 0);
+ llvm::MDNode *TBAAInfo = 0, bool isInit=false);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
- /// l-value.
- void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
@@ -1931,15 +2061,12 @@ public:
RValue EmitLoadOfLValue(LValue V);
RValue EmitLoadOfExtVectorElementLValue(LValue V);
RValue EmitLoadOfBitfieldLValue(LValue LV);
- RValue EmitLoadOfPropertyRefLValue(LValue LV,
- ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
- void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
/// EmitStoreThroughLValue.
@@ -1977,6 +2104,40 @@ public:
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+ class ConstantEmission {
+ llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
+ ConstantEmission(llvm::Constant *C, bool isReference)
+ : ValueAndIsReference(C, isReference) {}
+ public:
+ ConstantEmission() {}
+ static ConstantEmission forReference(llvm::Constant *C) {
+ return ConstantEmission(C, true);
+ }
+ static ConstantEmission forValue(llvm::Constant *C) {
+ return ConstantEmission(C, false);
+ }
+
+ operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
+
+ bool isReference() const { return ValueAndIsReference.getInt(); }
+ LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
+ assert(isReference());
+ return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
+ refExpr->getType());
+ }
+
+ llvm::Constant *getValue() const {
+ assert(!isReference());
+ return ValueAndIsReference.getPointer();
+ }
+ };
+
+ ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+
+ RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
+ AggValueSlot slot = AggValueSlot::ignored());
+ LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
+
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForAnonRecordField(llvm::Value* Base,
@@ -1999,16 +2160,13 @@ public:
LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
unsigned CVRQualifiers);
- LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
-
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
- LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
+ LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
- LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
@@ -2098,12 +2256,18 @@ public:
llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
- llvm::Value *BuildVector(const SmallVectorImpl<llvm::Value*> &Ops);
+ llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
+ llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
+ llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects);
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
@@ -2243,7 +2407,8 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ bool PerformInit);
/// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
/// with the C++ runtime so that its destructor will be called at exit.
@@ -2255,7 +2420,8 @@ public:
/// possible to prove that an initialization will be done exactly
/// once, e.g. with a static local variable or a static data member
/// of a class template.
- void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
@@ -2263,26 +2429,32 @@ public:
llvm::Constant **Decls,
unsigned NumDecls);
- /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
+ /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
- void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::WeakVH,
- llvm::Constant*> > &DtorsAndObjects);
+ void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
- llvm::GlobalVariable *Addr);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
const Expr *Exp);
- RValue EmitExprWithCleanups(const ExprWithCleanups *E,
- AggValueSlot Slot =AggValueSlot::ignored());
+ void enterFullExpression(const ExprWithCleanups *E) {
+ if (E->getNumObjects() == 0) return;
+ enterNonTrivialFullExpression(E);
+ }
+ void enterNonTrivialFullExpression(const ExprWithCleanups *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E);
+ void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
+
RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
//===--------------------------------------------------------------------===//
@@ -2344,7 +2516,12 @@ public:
/// a r-value suitable for passing the given parameter.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
+ /// SetFPAccuracy - Set the minimum required accuracy of the given floating
+ /// point operation, expressed as the maximum relative error in ulp.
+ void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+
private:
+ llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
@@ -2432,6 +2609,18 @@ private:
CodeGenModule::ByrefHelpers *
buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission);
+
+ void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
+
+ /// GetPointeeAlignment - Given an expression with a pointer type, find the
+ /// alignment of the type referenced by the pointer. Skip over implicit
+ /// casts.
+ unsigned GetPointeeAlignment(const Expr *Addr);
+
+ /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+ /// the alignment of the type referenced by the pointer. Skip over implicit
+ /// casts. Return the alignment as an llvm::Value.
+ llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
};
/// Helper class with most of the code for saving a value for a
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index 275045c..c0ccf4d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -38,6 +39,7 @@
#include "llvm/Module.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
@@ -56,35 +58,53 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
}
llvm_unreachable("invalid C++ ABI kind");
- return *CreateItaniumCXXABI(CGM);
}
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
DiagnosticsEngine &diags)
- : Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
- Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI, CGO),
+ Types(*this),
TBAA(0),
VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
- DebugInfo(0), ARCData(0), RRData(0), CFConstantStringClassRef(0),
+ DebugInfo(0), ARCData(0), NoObjCARCExceptionsMetadata(0),
+ RRData(0), CFConstantStringClassRef(0),
ConstantStringClassRef(0), NSConstantStringType(0),
VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
BlockObjectAssign(0), BlockObjectDispose(0),
BlockDescriptorType(0), GenericBlockLiteralType(0) {
- if (Features.ObjC1)
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ FloatTy = llvm::Type::getFloatTy(LLVMContext);
+ DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+
+ if (LangOpts.ObjC1)
createObjCRuntime();
- if (Features.OpenCL)
+ if (LangOpts.OpenCL)
createOpenCLRuntime();
- if (Features.CUDA)
+ if (LangOpts.CUDA)
createCUDARuntime();
// Enable TBAA unless it's suppressed.
if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
- TBAA = new CodeGenTBAA(Context, VMContext, getLangOptions(),
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
ABI.getMangleContext());
// If debug info or coverage generation is enabled, create the CGDebugInfo
@@ -95,23 +115,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Block.GlobalUniqueCount = 0;
- if (C.getLangOptions().ObjCAutoRefCount)
+ if (C.getLangOpts().ObjCAutoRefCount)
ARCData = new ARCEntrypoints();
RRData = new RREntrypoints();
-
- // Initialize the type cache.
- llvm::LLVMContext &LLVMContext = M.getContext();
- VoidTy = llvm::Type::getVoidTy(LLVMContext);
- Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
- Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
- Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
- PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
- PointerAlignInBytes =
- C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
- IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
- IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
- Int8PtrTy = Int8Ty->getPointerTo(0);
- Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
}
CodeGenModule::~CodeGenModule() {
@@ -127,7 +133,7 @@ CodeGenModule::~CodeGenModule() {
}
void CodeGenModule::createObjCRuntime() {
- if (!Features.NeXTRuntime)
+ if (!LangOpts.NeXTRuntime)
ObjCRuntime = CreateGNUObjCRuntime(*this);
else
ObjCRuntime = CreateMacObjCRuntime(*this);
@@ -168,8 +174,6 @@ void CodeGenModule::Release() {
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
- if (DebugInfo)
- DebugInfo->UpdateCompletedType(TD);
}
llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
@@ -178,6 +182,12 @@ llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
return TBAA->getTBAAInfo(QTy);
}
+llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfoForVTablePtr();
+}
+
void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo) {
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
@@ -292,7 +302,7 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
// If there's a key function, there may be translation units
// that don't have the key function's definition. But ignore
// this if we're emitting RTTI under -fno-rtti.
- if (!(TVK != TVK_ForRTTI) || Features.RTTI) {
+ if (!(TVK != TVK_ForRTTI) || LangOpts.RTTI) {
if (Context.getKeyFunction(RD))
return;
}
@@ -317,7 +327,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
return Str;
}
- llvm::SmallString<256> Buffer;
+ SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
@@ -379,16 +389,15 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Get the type of a ctor entry, { i32, void ()* }.
llvm::StructType *CtorStructTy =
- llvm::StructType::get(llvm::Type::getInt32Ty(VMContext),
- llvm::PointerType::getUnqual(CtorFTy), NULL);
+ llvm::StructType::get(Int32Ty, llvm::PointerType::getUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
- std::vector<llvm::Constant*> Ctors;
+ SmallVector<llvm::Constant*, 8> Ctors;
for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
- std::vector<llvm::Constant*> S;
- S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- I->second, false));
- S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ llvm::Constant *S[] = {
+ llvm::ConstantInt::get(Int32Ty, I->second, false),
+ llvm::ConstantExpr::getBitCast(I->first, CtorPFTy)
+ };
Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
}
@@ -431,7 +440,7 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
- return !Context.getLangOptions().AppleKext
+ return !Context.getLangOpts().AppleKext
? llvm::Function::LinkOnceODRLinkage
: llvm::Function::InternalLinkage;
@@ -440,7 +449,7 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
if (Linkage == GVA_ExplicitTemplateInstantiation)
- return !Context.getLangOptions().AppleKext
+ return !Context.getLangOpts().AppleKext
? llvm::Function::WeakODRLinkage
: llvm::Function::ExternalLinkage;
@@ -475,16 +484,16 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
/// except under the fragile ObjC ABI with only ObjC exceptions
/// enabled. This means, for example, that C with -fexceptions
/// enables this.
-static bool hasUnwindExceptions(const LangOptions &Features) {
+static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If exceptions are completely disabled, obviously this is false.
- if (!Features.Exceptions) return false;
+ if (!LangOpts.Exceptions) return false;
// If C++ exceptions are enabled, this is true.
- if (Features.CXXExceptions) return true;
+ if (LangOpts.CXXExceptions) return true;
// If ObjC exceptions are enabled, this depends on the ABI.
- if (Features.ObjCExceptions) {
- if (!Features.ObjCNonFragileABI) return false;
+ if (LangOpts.ObjCExceptions) {
+ if (!LangOpts.ObjCNonFragileABI) return false;
}
return true;
@@ -495,7 +504,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (CodeGenOpts.UnwindTables)
F->setHasUWTable();
- if (!hasUnwindExceptions(Features))
+ if (!hasUnwindExceptions(LangOpts))
F->addFnAttr(llvm::Attribute::NoUnwind);
if (D->hasAttr<NakedAttr>()) {
@@ -515,11 +524,18 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
- if (Features.getStackProtector() == LangOptions::SSPOn)
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
F->addFnAttr(llvm::Attribute::StackProtect);
- else if (Features.getStackProtector() == LangOptions::SSPReq)
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
F->addFnAttr(llvm::Attribute::StackProtectReq);
+ if (LangOpts.AddressSanitizer) {
+ // When AddressSanitizer is enabled, set AddressSafety attribute
+ // unless __attribute__((no_address_safety_analysis)) is used.
+ if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
+ F->addFnAttr(llvm::Attribute::AddressSafety);
+ }
+
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
F->setAlignment(alignment);
@@ -569,7 +585,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
- SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(GD), F);
+ SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
// Only a few attributes are set on declarations; these may later be
// overridden by a definition.
@@ -605,20 +621,18 @@ void CodeGenModule::EmitLLVMUsed() {
if (LLVMUsed.empty())
return;
- llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
-
// Convert LLVMUsed to what ConstantArray needs.
- std::vector<llvm::Constant*> UsedArray;
+ SmallVector<llvm::Constant*, 8> UsedArray;
UsedArray.resize(LLVMUsed.size());
for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
- i8PTy);
+ Int8PtrTy);
}
if (UsedArray.empty())
return;
- llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), ATy, false,
@@ -689,7 +703,7 @@ llvm::Constant *CodeGenModule::EmitAnnotationString(llvm::StringRef Str) {
return i->second;
// Not found yet, create a new global.
- llvm::Constant *s = llvm::ConstantArray::get(getLLVMContext(), Str, true);
+ llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(), s->getType(),
true, llvm::GlobalValue::PrivateLinkage, s, ".str");
gv->setSection(AnnotationSection);
@@ -744,7 +758,7 @@ void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
- if (Features.EmitAllDecls)
+ if (LangOpts.EmitAllDecls)
return false;
return !getContext().DeclMustBeEmitted(Global);
@@ -788,7 +802,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return EmitAliasDefinition(GD);
// If this is CUDA, be selective about which declarations we emit.
- if (Features.CUDA) {
+ if (LangOpts.CUDA) {
if (CodeGenOpts.CUDAIsDevice) {
if (!Global->hasAttr<CUDADeviceAttr>() &&
!Global->hasAttr<CUDAGlobalAttr>() &&
@@ -815,10 +829,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
FD->getBody(InlineDefinition);
StringRef MangledName = getMangledName(GD);
- llvm::StringMap<GlobalDecl>::iterator DDI =
- DeferredDecls.find(MangledName);
- if (DDI != DeferredDecls.end())
- DeferredDecls.erase(DDI);
+ DeferredDecls.erase(MangledName);
EmitGlobalDefinition(InlineDefinition);
return;
}
@@ -840,7 +851,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
- if (getLangOptions().CPlusPlus && isa<VarDecl>(Global) &&
+ if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
cast<VarDecl>(Global)->hasInit()) {
DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
CXXGlobalInits.push_back(0);
@@ -863,20 +874,28 @@ namespace {
struct FunctionIsDirectlyRecursive :
public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
const StringRef Name;
+ const Builtin::Context &BI;
bool Result;
- FunctionIsDirectlyRecursive(const FunctionDecl *F) :
- Name(F->getName()), Result(false) {
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
+ Name(N), BI(C), Result(false) {
}
typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
bool TraverseCallExpr(CallExpr *E) {
- const Decl *D = E->getCalleeDecl();
- if (!D)
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (!FD)
return true;
- AsmLabelAttr *Attr = D->getAttr<AsmLabelAttr>();
- if (!Attr)
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel()) {
+ Result = true;
+ return false;
+ }
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (!BuiltinID)
return true;
- if (Name == Attr->getLabel()) {
+ StringRef BuiltinName = BI.GetName(BuiltinID);
+ if (BuiltinName.startswith("__builtin_") &&
+ Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
Result = true;
return false;
}
@@ -885,15 +904,24 @@ namespace {
};
}
-// isTriviallyRecursiveViaAsm - Check if this function calls another
-// decl that, because of the asm attribute, ends up pointing to itself.
+// isTriviallyRecursive - Check if this function calls another
+// decl that, because of the asm attribute or the other decl being a builtin,
+// ends up pointing to itself.
bool
-CodeGenModule::isTriviallyRecursiveViaAsm(const FunctionDecl *F) {
- if (getCXXABI().getMangleContext().shouldMangleDeclName(F))
- return false;
+CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
+ StringRef Name;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
+ // asm labels are a special kind of mangling we have to support.
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return false;
+ Name = Attr->getLabel();
+ } else {
+ Name = FD->getName();
+ }
- FunctionIsDirectlyRecursive Walker(F);
- Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
+ FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
+ Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
return Walker.Result;
}
@@ -909,7 +937,7 @@ CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
// but a function that calls itself is clearly not equivalent to the real
// implementation.
// This happens in glibc's btowc and in some configure checks.
- return !isTriviallyRecursiveViaAsm(F);
+ return !isTriviallyRecursive(F);
}
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
@@ -1023,7 +1051,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
//
// We also don't emit a definition for a function if it's going to be an entry
// in a vtable, unless it's already marked as used.
- } else if (getLangOptions().CPlusPlus && D.getDecl()) {
+ } else if (getLangOpts().CPlusPlus && D.getDecl()) {
// Look for a declaration that's lexically in a record.
const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
do {
@@ -1037,7 +1065,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
break;
}
}
- FD = FD->getPreviousDeclaration();
+ FD = FD->getPreviousDecl();
} while (FD);
}
@@ -1075,19 +1103,23 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
ExtraAttrs);
}
-static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
- bool ConstantInit) {
- if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
+/// isTypeConstant - Determine whether an object of this type can be emitted
+/// as a constant.
+///
+/// If ExcludeCtor is true, the duration when the object's constructor runs
+/// will not be considered. The caller will need to verify that the object is
+/// not written to during its construction.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+ if (!Ty.isConstant(Context) && !Ty->isReferenceType())
return false;
-
- if (Context.getLangOptions().CPlusPlus) {
- if (const RecordType *Record
- = Context.getBaseElementType(D->getType())->getAs<RecordType>())
- return ConstantInit &&
- cast<CXXRecordDecl>(Record->getDecl())->isPOD() &&
- !cast<CXXRecordDecl>(Record->getDecl())->hasMutableFields();
+
+ if (Context.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *Record
+ = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
+ return ExcludeCtor && !Record->hasMutableFields() &&
+ Record->hasTrivialDestructor();
}
-
+
return true;
}
@@ -1144,7 +1176,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
if (D) {
// FIXME: This code is overly simple and should be merged with other global
// handling.
- GV->setConstant(DeclIsConstantGlobal(Context, D, false));
+ GV->setConstant(isTypeConstant(D->getType(), false));
// Set linkage and visibility in case we never see a definition.
NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
@@ -1280,19 +1312,19 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::AvailableExternallyLinkage;
if (KeyFunction->isInlined())
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
case TSK_ExplicitInstantiationDefinition:
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::WeakODRLinkage :
llvm::Function::InternalLinkage;
@@ -1300,13 +1332,13 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
// FIXME: Use available_externally linkage. However, this currently
// breaks LLVM's build due to undefined symbols.
// return llvm::GlobalVariable::AvailableExternallyLinkage;
- return !Context.getLangOptions().AppleKext ?
+ return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
}
}
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return llvm::Function::InternalLinkage;
switch (RD->getTemplateSpecializationKind()) {
@@ -1322,9 +1354,8 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
case TSK_ExplicitInstantiationDefinition:
return llvm::GlobalVariable::WeakODRLinkage;
}
-
- // Silence GCC warning.
- return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
@@ -1332,13 +1363,134 @@ CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
TheTargetData.getTypeStoreSizeInBits(Ty));
}
+llvm::Constant *
+CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *rawInit) {
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups;
+ if (const ExprWithCleanups *withCleanups =
+ dyn_cast<ExprWithCleanups>(rawInit)) {
+ cleanups = withCleanups->getObjects();
+ rawInit = withCleanups->getSubExpr();
+ }
+
+ const InitListExpr *init = dyn_cast<InitListExpr>(rawInit);
+ if (!init || !init->initializesStdInitializerList() ||
+ init->getNumInits() == 0)
+ return 0;
+
+ ASTContext &ctx = getContext();
+ unsigned numInits = init->getNumInits();
+ // FIXME: This check is here because we would otherwise silently miscompile
+ // nested global std::initializer_lists. Better would be to have a real
+ // implementation.
+ for (unsigned i = 0; i < numInits; ++i) {
+ const InitListExpr *inner = dyn_cast<InitListExpr>(init->getInit(i));
+ if (inner && inner->initializesStdInitializerList()) {
+ ErrorUnsupported(inner, "nested global std::initializer_list");
+ return 0;
+ }
+ }
+
+ // Synthesize a fake VarDecl for the array and initialize that.
+ QualType elementType = init->getInit(0)->getType();
+ llvm::APInt numElements(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType arrayType = ctx.getConstantArrayType(elementType, numElements,
+ ArrayType::Normal, 0);
+
+ IdentifierInfo *name = &ctx.Idents.get(D->getNameAsString() + "__initlist");
+ TypeSourceInfo *sourceInfo = ctx.getTrivialTypeSourceInfo(
+ arrayType, D->getLocation());
+ VarDecl *backingArray = VarDecl::Create(ctx, const_cast<DeclContext*>(
+ D->getDeclContext()),
+ D->getLocStart(), D->getLocation(),
+ name, arrayType, sourceInfo,
+ SC_Static, SC_Static);
+
+ // Now clone the InitListExpr to initialize the array instead.
+ // Incredible hack: we want to use the existing InitListExpr here, so we need
+ // to tell it that it no longer initializes a std::initializer_list.
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
+ const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits(),
+ init->getRBraceLoc());
+ arrayInit->setType(arrayType);
+
+ if (!cleanups.empty())
+ arrayInit = ExprWithCleanups::Create(ctx, arrayInit, cleanups);
+
+ backingArray->setInit(arrayInit);
+
+ // Emit the definition of the array.
+ EmitGlobalVarDefinition(backingArray);
+
+ // Inspect the initializer list to validate it and determine its type.
+ // FIXME: doing this every time is probably inefficient; caching would be nice
+ RecordDecl *record = init->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ QualType elementPtr = ctx.getPointerType(elementType.withConst());
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ ++field;
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ bool isStartEnd = false;
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ isStartEnd = true;
+ } else if(!ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+
+ // Now build an APValue representing the std::initializer_list.
+ APValue initListValue(APValue::UninitStruct(), 0, 2);
+ APValue &startField = initListValue.getStructField(0);
+ APValue::LValuePathEntry startOffsetPathEntry;
+ startOffsetPathEntry.ArrayIndex = 0;
+ startField = APValue(APValue::LValueBase(backingArray),
+ CharUnits::fromQuantity(0),
+ llvm::makeArrayRef(startOffsetPathEntry),
+ /*IsOnePastTheEnd=*/false, 0);
+
+ if (isStartEnd) {
+ APValue &endField = initListValue.getStructField(1);
+ APValue::LValuePathEntry endOffsetPathEntry;
+ endOffsetPathEntry.ArrayIndex = numInits;
+ endField = APValue(APValue::LValueBase(backingArray),
+ ctx.getTypeSizeInChars(elementType) * numInits,
+ llvm::makeArrayRef(endOffsetPathEntry),
+ /*IsOnePastTheEnd=*/true, 0);
+ } else {
+ APValue &sizeField = initListValue.getStructField(1);
+ sizeField = APValue(llvm::APSInt(numElements));
+ }
+
+ // Emit the constant for the initializer_list.
+ llvm::Constant *llvmInit =
+ EmitConstantValueForMemory(initListValue, D->getType());
+ assert(llvmInit && "failed to initialize as constant");
+ return llvmInit;
+}
+
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
- bool NonConstInit = false;
+ CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ bool NeedsGlobalCtor = false;
+ bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
+
+ const VarDecl *InitDecl;
+ const Expr *InitExpr = D->getAnyInitializer(InitDecl);
- const Expr *InitExpr = D->getAnyInitializer();
-
if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
@@ -1352,23 +1504,32 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
Init = EmitNullConstant(D->getType());
} else {
- Init = EmitConstantExpr(InitExpr, D->getType());
+ // If this is a std::initializer_list, emit the special initializer.
+ Init = MaybeEmitGlobalStdInitializerListInitializer(D, InitExpr);
+ // An empty init list will perform zero-initialization, which happens
+ // to be exactly what we want.
+ // FIXME: It does so in a global constructor, which is *not* what we
+ // want.
+
+ if (!Init)
+ Init = EmitConstantInit(*InitDecl);
if (!Init) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
T = D->getType();
-
- if (getLangOptions().CPlusPlus) {
+
+ if (getLangOpts().CPlusPlus) {
Init = EmitNullConstant(T);
- NonConstInit = true;
+ NeedsGlobalCtor = true;
} else {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
}
} else {
// We don't need an initializer, so remove the entry for the delayed
- // initializer position (just in case this entry was delayed).
- if (getLangOptions().CPlusPlus)
+ // initializer position (just in case this entry was delayed) if we
+ // also don't need to register a destructor.
+ if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
DelayedCXXInitPosition.erase(D);
}
}
@@ -1422,12 +1583,11 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setInitializer(Init);
// If it is safe to mark the global 'constant', do so now.
- GV->setConstant(false);
- if (!NonConstInit && DeclIsConstantGlobal(Context, D, true))
- GV->setConstant(true);
+ GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
+ isTypeConstant(D->getType(), true));
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
-
+
// Set the llvm linkage type as appropriate.
llvm::GlobalValue::LinkageTypes Linkage =
GetLLVMLinkageVarDefinition(D, GV);
@@ -1439,8 +1599,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
SetCommonAttributes(D, GV);
// Emit the initializer function if necessary.
- if (NonConstInit)
- EmitCXXGlobalVarDeclInitFunc(D, GV);
+ if (NeedsGlobalCtor || NeedsGlobalDtor)
+ EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -1465,7 +1625,7 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
} else if (Linkage == GVA_TemplateInstantiation ||
Linkage == GVA_ExplicitTemplateInstantiation)
return llvm::GlobalVariable::WeakODRLinkage;
- else if (!getLangOptions().CPlusPlus &&
+ else if (!getLangOpts().CPlusPlus &&
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
@@ -1565,16 +1725,20 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
}
}
+void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
+ GetAddrOfGlobalVar(VD);
+}
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
- const CGFunctionInfo &FI = getTypes().getFunctionInfo(GD);
- bool variadic = false;
- if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
- variadic = fpt->isVariadic();
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1739,8 +1903,10 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
return Map.GetOrCreateValue(String);
}
- // Otherwise, convert the UTF8 literals into a byte string.
- SmallVector<UTF16, 128> ToBuf(NumBytes);
+ // Otherwise, convert the UTF8 literals into a string of shorts.
+ IsUTF16 = true;
+
+ SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
@@ -1751,38 +1917,20 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
- // Render the UTF-16 string into a byte array and convert to the target byte
- // order.
- //
- // FIXME: This isn't something we should need to do here.
- llvm::SmallString<128> AsBytes;
- AsBytes.reserve(StringLength * 2);
- for (unsigned i = 0; i != StringLength; ++i) {
- unsigned short Val = ToBuf[i];
- if (TargetIsLSB) {
- AsBytes.push_back(Val & 0xFF);
- AsBytes.push_back(Val >> 8);
- } else {
- AsBytes.push_back(Val >> 8);
- AsBytes.push_back(Val & 0xFF);
- }
- }
- // Append one extra null character, the second is automatically added by our
- // caller.
- AsBytes.push_back(0);
-
- IsUTF16 = true;
- return Map.GetOrCreateValue(StringRef(AsBytes.data(), AsBytes.size()));
+ // Add an explicit null.
+ *ToPtr = 0;
+ return Map.
+ GetOrCreateValue(StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2));
}
static llvm::StringMapEntry<llvm::Constant*> &
GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
- const StringLiteral *Literal,
- unsigned &StringLength)
-{
- StringRef String = Literal->getString();
- StringLength = String.size();
- return Map.GetOrCreateValue(String);
+ const StringLiteral *Literal,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return Map.GetOrCreateValue(String);
}
llvm::Constant *
@@ -1797,8 +1945,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
if (llvm::Constant *C = Entry.getValue())
return C;
- llvm::Constant *Zero =
- llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get __CFConstantStringClassReference.
@@ -1817,7 +1964,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(CFTy));
- std::vector<llvm::Constant*> Fields(4);
+ llvm::Constant *Fields[4];
// Class pointer.
Fields[0] = CFConstantStringClassRef;
@@ -1828,27 +1975,31 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::ConstantInt::get(Ty, 0x07C8);
// String pointer.
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+ llvm::Constant *C = 0;
+ if (isUTF16) {
+ ArrayRef<uint16_t> Arr =
+ llvm::makeArrayRef<uint16_t>((uint16_t*)Entry.getKey().data(),
+ Entry.getKey().size() / 2);
+ C = llvm::ConstantDataArray::get(VMContext, Arr);
+ } else {
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+ }
llvm::GlobalValue::LinkageTypes Linkage;
- bool isConstant;
- if (isUTF16) {
+ if (isUTF16)
// FIXME: why do utf strings get "_" labels instead of "L" labels?
Linkage = llvm::GlobalValue::InternalLinkage;
- // Note: -fwritable-strings doesn't make unicode CFStrings writable, but
- // does make plain ascii ones writable.
- isConstant = true;
- } else {
+ else
// FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
// when using private linkage. It is not clear if this is a bug in ld
// or a reasonable new restriction.
Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
- isConstant = !Features.WritableStrings;
- }
+ // Note: -fwritable-strings doesn't make the backing store strings of
+ // CFStrings writable. (See <rdar://problem/10657500>)
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
- ".str");
+ new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+ Linkage, C, ".str");
GV->setUnnamedAddr(true);
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
@@ -1857,8 +2008,14 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
}
+
+ // String.
Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ if (isUTF16)
+ // Cast the UTF16 string to the correct type.
+ Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+
// String length.
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
@@ -1879,7 +2036,7 @@ static RecordDecl *
CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
DeclContext *DC, IdentifierInfo *Id) {
SourceLocation Loc;
- if (Ctx.getLangOptions().CPlusPlus)
+ if (Ctx.getLangOpts().CPlusPlus)
return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
else
return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
@@ -1894,16 +2051,15 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
if (llvm::Constant *C = Entry.getValue())
return C;
- llvm::Constant *Zero =
- llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
- std::string StringClass(getLangOptions().ObjCConstantStringClass);
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
- if (Features.ObjCNonFragileABI) {
+ if (LangOpts.ObjCNonFragileABI) {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
@@ -1958,18 +2114,19 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
}
- std::vector<llvm::Constant*> Fields(3);
+ llvm::Constant *Fields[3];
// Class pointer.
Fields[0] = ConstantStringClassRef;
// String pointer.
- llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
llvm::GlobalValue::LinkageTypes Linkage;
bool isConstant;
Linkage = llvm::GlobalValue::PrivateLinkage;
- isConstant = !Features.WritableStrings;
+ isConstant = !LangOpts.WritableStrings;
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
@@ -1990,7 +2147,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
"_unnamed_nsstring_");
// FIXME. Fix section.
if (const char *Sect =
- Features.ObjCNonFragileABI
+ LangOpts.ObjCNonFragileABI
? getContext().getTargetInfo().getNSStringNonFragileABISection()
: getContext().getTargetInfo().getNSStringSection())
GV->setSection(Sect);
@@ -2034,54 +2191,72 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
return ObjCFastEnumerationStateType;
}
-/// GetStringForStringLiteral - Return the appropriate bytes for a
-/// string literal, properly padded to match the literal type.
-std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
- const ASTContext &Context = getContext();
- const ConstantArrayType *CAT =
- Context.getAsConstantArrayType(E->getType());
- assert(CAT && "String isn't pointer or array!");
-
- // Resize the string to the right size.
- uint64_t RealLen = CAT->getSize().getZExtValue();
+llvm::Constant *
+CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ if (E->getCharByteWidth() == 1) {
+ SmallString<64> Str(E->getString());
- switch (E->getKind()) {
- case StringLiteral::Ascii:
- case StringLiteral::UTF8:
- break;
- case StringLiteral::Wide:
- RealLen *= Context.getTargetInfo().getWCharWidth() / Context.getCharWidth();
- break;
- case StringLiteral::UTF16:
- RealLen *= Context.getTargetInfo().getChar16Width() / Context.getCharWidth();
- break;
- case StringLiteral::UTF32:
- RealLen *= Context.getTargetInfo().getChar32Width() / Context.getCharWidth();
- break;
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
- std::string Str = E->getString().str();
- Str.resize(RealLen, '\0');
+ // Wide strings have either 2-byte or 4-byte elements.
+ if (ElemTy->getPrimitiveSizeInBits() == 16) {
+ SmallVector<uint16_t, 32> Elements;
+ Elements.reserve(NumElements);
- return Str;
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+ }
+
+ assert(ElemTy->getPrimitiveSizeInBits() == 32);
+ SmallVector<uint32_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
}
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
llvm::Constant *
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
- // FIXME: This can be more efficient.
- // FIXME: We shouldn't need to bitcast the constant in the wide string case.
CharUnits Align = getContext().getTypeAlignInChars(S->getType());
- llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S),
- /* GlobalName */ 0,
- Align.getQuantity());
- if (S->isWide() || S->isUTF16() || S->isUTF32()) {
- llvm::Type *DestTy =
- llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
- C = llvm::ConstantExpr::getBitCast(C, DestTy);
+ if (S->isAscii() || S->isUTF8()) {
+ SmallString<64> Str(S->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return GetAddrOfConstantString(Str, /*GlobalName*/ 0, Align.getQuantity());
}
- return C;
+
+ // FIXME: the following does not memoize wide strings.
+ llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(),C->getType(),
+ !LangOpts.WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C,".str");
+
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(true);
+ return GV;
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -2103,7 +2278,7 @@ static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
unsigned Alignment) {
// Create Constant for this string literal. Don't add a '\0'.
llvm::Constant *C =
- llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), str, false);
// Create a global variable for this string
llvm::GlobalVariable *GV =
@@ -2126,14 +2301,12 @@ static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
const char *GlobalName,
unsigned Alignment) {
- bool IsConstant = !Features.WritableStrings;
-
// Get the default prefix if a name wasn't specified.
if (!GlobalName)
GlobalName = ".str";
// Don't share any string literals if strings aren't constant.
- if (!IsConstant)
+ if (LangOpts.WritableStrings)
return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
@@ -2147,7 +2320,8 @@ llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
}
// Create a global variable for this.
- llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName, Alignment);
+ llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName,
+ Alignment);
Entry.setValue(GV);
return GV;
}
@@ -2308,6 +2482,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::TypeAliasTemplate:
case Decl::NamespaceAlias:
case Decl::Block:
+ case Decl::Import:
break;
case Decl::CXXConstructor:
// Skip function templates
@@ -2330,8 +2505,6 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Objective-C Decls
// Forward declarations, no (immediate) code generation.
- case Decl::ObjCClass:
- case Decl::ObjCForwardProtocol:
case Decl::ObjCInterface:
break;
@@ -2342,10 +2515,13 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
- case Decl::ObjCProtocol:
- ObjCRuntime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ case Decl::ObjCProtocol: {
+ ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
+ if (Proto->isThisDeclarationADefinition())
+ ObjCRuntime->GenerateProtocol(Proto);
break;
-
+ }
+
case Decl::ObjCCategoryImpl:
// Categories have properties but don't support synthesize so we
// can ignore them here.
@@ -2354,11 +2530,16 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
- if (Features.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
ObjCRuntime->GenerateClass(OMD);
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(OMD->getClassInterface()),
+ OMD->getLocation());
+
break;
}
case Decl::ObjCMethod: {
@@ -2369,7 +2550,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
case Decl::ObjCCompatibleAlias:
- // compatibility-alias is a directive and has no code gen.
+ ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
break;
case Decl::LinkageSpec:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index dbc6a87..38f5008 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -26,7 +26,6 @@
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ValueHandle.h"
@@ -56,6 +55,7 @@ namespace clang {
class Decl;
class Expr;
class Stmt;
+ class InitListExpr;
class StringLiteral;
class NamedDecl;
class ValueDecl;
@@ -103,8 +103,10 @@ namespace CodeGen {
/// void
llvm::Type *VoidTy;
- /// i8, i32, and i64
- llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
/// int
llvm::IntegerType *IntTy;
@@ -213,7 +215,7 @@ class CodeGenModule : public CodeGenTypeCache {
typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
ASTContext &Context;
- const LangOptions &Features;
+ const LangOptions &LangOpts;
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
const llvm::TargetData &TheTargetData;
@@ -232,6 +234,7 @@ class CodeGenModule : public CodeGenTypeCache {
CGCUDARuntime* CUDARuntime;
CGDebugInfo* DebugInfo;
ARCEntrypoints *ARCData;
+ llvm::MDNode *NoObjCARCExceptionsMetadata;
RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
@@ -276,7 +279,11 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
- llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
+
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
@@ -324,7 +331,7 @@ class CodeGenModule : public CodeGenTypeCache {
void createOpenCLRuntime();
void createCUDARuntime();
- bool isTriviallyRecursiveViaAsm(const FunctionDecl *F);
+ bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(const FunctionDecl *F);
llvm::LLVMContext &VMContext;
@@ -382,7 +389,7 @@ public:
CGCXXABI &getCXXABI() { return ABI; }
ARCEntrypoints &getARCEntrypoints() const {
- assert(getLangOptions().ObjCAutoRefCount && ARCData != 0);
+ assert(getLangOpts().ObjCAutoRefCount && ARCData != 0);
return *ARCData;
}
@@ -391,19 +398,51 @@ public:
return *RRData;
}
- llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
- return StaticLocalDeclMap[VD];
+ llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
+ return StaticLocalDeclMap[D];
}
void setStaticLocalDeclAddress(const VarDecl *D,
- llvm::GlobalVariable *GV) {
- StaticLocalDeclMap[D] = GV;
+ llvm::Constant *C) {
+ StaticLocalDeclMap[D] = C;
+ }
+
+ llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
+ return StaticLocalDeclGuardMap[D];
+ }
+ void setStaticLocalDeclGuardAddress(const VarDecl *D,
+ llvm::GlobalVariable *C) {
+ StaticLocalDeclGuardMap[D] = C;
+ }
+
+ llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) {
+ return AtomicSetterHelperFnMap[Ty];
+ }
+ void setAtomicSetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicSetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) {
+ return AtomicGetterHelperFnMap[Ty];
+ }
+ void setAtomicGetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicGetterHelperFnMap[Ty] = Fn;
}
CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+ llvm::MDNode *getNoObjCARCExceptionsMetadata() {
+ if (!NoObjCARCExceptionsMetadata)
+ NoObjCARCExceptionsMetadata =
+ llvm::MDNode::get(getLLVMContext(),
+ SmallVector<llvm::Value*,1>());
+ return NoObjCARCExceptionsMetadata;
+ }
+
ASTContext &getContext() const { return Context; }
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
- const LangOptions &getLangOptions() const { return Features; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
CodeGenVTables &getVTables() { return VTables; }
@@ -418,6 +457,9 @@ public:
bool shouldUseTBAA() const { return TBAA != 0; }
llvm::MDNode *getTBAAInfo(QualType QTy);
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
static void DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo);
@@ -451,7 +493,6 @@ public:
case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
}
llvm_unreachable("unknown visibility!");
- return llvm::GlobalValue::DefaultVisibility;
}
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
@@ -551,11 +592,6 @@ public:
/// requires no captures.
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
- /// GetStringForStringLiteral - Return the appropriate bytes for a string
- /// literal, properly padded to match the literal type. If only the address of
- /// a constant is needed consider using GetAddrOfConstantStringLiteral.
- std::string GetStringForStringLiteral(const StringLiteral *E);
-
/// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
/// for the given string.
llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
@@ -565,6 +601,10 @@ public:
/// -fconstant-string-class=class_name option.
llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
+ /// GetConstantArrayFromStringLiteral - Return a constant array for the given
+ /// string.
+ llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
+
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
/// for the given string literal.
llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
@@ -596,6 +636,10 @@ public:
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0,
unsigned Alignment=1);
+
+ /// GetAddrOfConstantCompoundLiteral - Returns a pointer to a constant global
+ /// variable for the given file-scope compound literal expression.
+ llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
/// \brief Retrieve the record type that describes the state of an
/// Objective-C fast enumeration loop (for..in).
@@ -624,6 +668,10 @@ public:
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+
/// AddUsedGlobal - Add a global which should be forced to be
/// present in the object file; these are emitted to the llvm.used
/// metadata global.
@@ -661,12 +709,28 @@ public:
llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+ /// EmitConstantInit - Try to emit the initializer for the given declaration
+ /// as a constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF = 0);
+
/// EmitConstantExpr - Try to emit the given expression as a
/// constant; returns 0 if the expression cannot be emitted as a
/// constant.
llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
CodeGenFunction *CGF = 0);
+ /// EmitConstantValue - Emit the given constant value as a constant, in the
+ /// type's scalar representation.
+ llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValueForMemory - Emit the given constant value as a constant,
+ /// in the type's memory representation.
+ llvm::Constant *EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF = 0);
+
/// EmitNullConstant - Return the result of value-initializing the given
/// type, i.e. a null expression of the given type. This is usually,
/// but not always, an LLVM null constant.
@@ -715,10 +779,14 @@ public:
/// as a return type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
- /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
- /// as a return type.
+ /// ReturnTypeUsesFPRet - Return true iff the given type uses 'fpret' when
+ /// used as a return type.
bool ReturnTypeUsesFPRet(QualType ResultType);
+ /// ReturnTypeUsesFP2Ret - Return true iff the given type uses 'fp2ret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFP2Ret(QualType ResultType);
+
/// ConstructAttributeList - Get the LLVM attributes and calling convention to
/// use for a particular function type.
///
@@ -830,6 +898,8 @@ private:
void EmitGlobalFunctionDefinition(GlobalDecl GD);
void EmitGlobalVarDefinition(const VarDecl *D);
+ llvm::Constant *MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *init);
void EmitAliasDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
@@ -864,8 +934,11 @@ private:
/// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
void EmitCXXGlobalDtorFunc();
+ /// EmitCXXGlobalVarDeclInitFunc - Emit the function that initializes the
+ /// specified global (if PerformInit is true) and registers its destructor.
void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
- llvm::GlobalVariable *Addr);
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
index 887c1ea..9ee3f1d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -169,7 +169,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
- llvm::SmallString<256> OutName;
+ SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
Out.flush();
@@ -179,3 +179,7 @@ CodeGenTBAA::getTBAAInfo(QualType QTy) {
// For now, handle any other kind of type conservatively.
return MetadataCache[Ty] = getChar();
}
+
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
+ return getTBAAInfoForNamedType("vtable pointer", getRoot());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
index 9fe51fb..8e08498 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -68,6 +68,10 @@ public:
/// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
/// of the given type.
llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ /// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
+ /// dereference of a vtable pointer.
+ llvm::MDNode *getTBAAInfoForVTablePtr();
};
} // end namespace CodeGen
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index e0d9218..41fd536 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -15,6 +15,7 @@
#include "CGCall.h"
#include "CGCXXABI.h"
#include "CGRecordLayout.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
@@ -26,11 +27,12 @@
using namespace clang;
using namespace CodeGen;
-CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
- const llvm::TargetData &TD, const ABIInfo &Info,
- CGCXXABI &CXXABI, const CodeGenOptions &CGO)
- : Context(Ctx), Target(Ctx.getTargetInfo()), TheModule(M), TheTargetData(TD),
- TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) {
+CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
+ : Context(CGM.getContext()), Target(Context.getTargetInfo()),
+ TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
+ TheCXXABI(CGM.getCXXABI()),
+ CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
SkippedLayout = false;
}
@@ -48,7 +50,7 @@ CodeGenTypes::~CodeGenTypes() {
void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
llvm::StructType *Ty,
StringRef suffix) {
- llvm::SmallString<256> TypeName;
+ SmallString<256> TypeName;
llvm::raw_svector_ostream OS(TypeName);
OS << RD->getKindName() << '.';
@@ -193,11 +195,9 @@ bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
// If this isn't a tagged type, we can convert it!
const TagType *TT = Ty->getAs<TagType>();
if (TT == 0) return true;
-
-
- // If it's a tagged type used by-value, but is just a forward decl, we can't
- // convert it. Note that getDefinition()==0 is not the same as !isDefinition.
- if (TT->getDecl()->getDefinition() == 0)
+
+ // Incomplete types cannot be converted.
+ if (TT->isIncompleteType())
return false;
// If this is an enum, then it is always safe to convert.
@@ -305,7 +305,6 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("Non-canonical or dependent types aren't possible.");
- break;
case Type::Builtin: {
switch (cast<BuiltinType>(Ty)->getKind()) {
@@ -368,12 +367,12 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
break;
- case BuiltinType::Overload:
case BuiltinType::Dependent:
- case BuiltinType::BoundMember:
- case BuiltinType::UnknownAny:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
llvm_unreachable("Unexpected placeholder builtin type!");
- break;
}
break;
}
@@ -474,16 +473,13 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
- bool isVariadic;
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- FI = &getFunctionInfo(
+ FI = &arrangeFunctionType(
CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
- isVariadic = FPT->isVariadic();
} else {
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
- FI = &getFunctionInfo(
+ FI = &arrangeFunctionType(
CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
- isVariadic = true;
}
// If there is something higher level prodding our CGFunctionInfo, then
@@ -495,7 +491,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
} else {
// Otherwise, we're good to go, go ahead and convert it.
- ResultType = GetFunctionType(*FI, isVariadic);
+ ResultType = GetFunctionType(*FI);
}
RecordsBeingLaidOut.erase(Ty);
@@ -560,7 +556,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::Atomic: {
- ResultType = ConvertTypeForMem(cast<AtomicType>(Ty)->getValueType());
+ ResultType = ConvertType(cast<AtomicType>(Ty)->getValueType());
break;
}
}
@@ -655,7 +651,7 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
- if (!Context.getLangOptions().CPlusPlus)
+ if (!Context.getLangOpts().CPlusPlus)
return true;
T = Context.getBaseElementType(T);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index 7f0f8ac..ba2b3ae 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -52,10 +52,13 @@ namespace clang {
namespace CodeGen {
class CGCXXABI;
class CGRecordLayout;
+ class CodeGenModule;
+ class RequiredArgs;
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTypes {
+ // Some of this stuff should probably be left on the CGM.
ASTContext &Context;
const TargetInfo &Target;
llvm::Module &TheModule;
@@ -63,6 +66,7 @@ class CodeGenTypes {
const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
const CodeGenOptions &CodeGenOpts;
+ CodeGenModule &CGM;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
@@ -101,9 +105,7 @@ private:
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
- CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
- const ABIInfo &Info, CGCXXABI &CXXABI,
- const CodeGenOptions &Opts);
+ CodeGenTypes(CodeGenModule &CGM);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
@@ -124,8 +126,7 @@ public:
llvm::Type *ConvertTypeForMem(QualType T);
/// GetFunctionType - Get the LLVM function type for \arg Info.
- llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic);
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
llvm::FunctionType *GetFunctionType(GlobalDecl GD);
@@ -148,50 +149,66 @@ public:
/// getNullaryFunctionInfo - Get the function info for a void()
/// function with standard CC.
- const CGFunctionInfo &getNullaryFunctionInfo();
-
- /// getFunctionInfo - Get the function info for the specified function decl.
- const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
-
- const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
- const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
- const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
- const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D,
- CXXCtorType Type);
- const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D,
- CXXDtorType Type);
-
- const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
- const FunctionType *Ty) {
- return getFunctionInfo(Ty->getResultType(), Args,
- Ty->getExtInfo());
- }
-
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
-
- /// getFunctionInfo - Get the function info for a member function of
- /// the given type. This is used for calls through member function
- /// pointers.
- const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP);
-
- /// getFunctionInfo - Get the function info for a function described by a
- /// return type and argument types. If the calling convention is not
- /// specified, the "C" calling convention will be used.
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
- const CallArgList &Args,
- const FunctionType::ExtInfo &Info);
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args,
- const FunctionType::ExtInfo &Info);
+ const CGFunctionInfo &arrangeNullaryFunction();
+
+ // The arrangement methods are split into three families:
+ // - those meant to drive the signature and prologue/epilogue
+ // of a function declaration or definition,
+ // - those meant for the computation of the LLVM type for an abstract
+ // appearance of a function, and
+ // - those meant for performing the IR-generation of a call.
+ // They differ mainly in how they deal with optional (i.e. variadic)
+ // arguments, as well as unprototyped functions.
+ //
+ // Key points:
+ // - The CGFunctionInfo for emitting a specific call site must include
+ // entries for the optional arguments.
+ // - The function type used at the call site must reflect the formal
+ // signature of the declaration being called, or else the call will
+ // go awry.
+ // - For the most part, unprototyped functions are called by casting to
+ // a formal signature inferred from the specific argument types used
+ // at the call-site. However, some targets (e.g. x86-64) screw with
+ // this for compatibility reasons.
+
+ const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info,
+ bool isVariadic);
+
+ const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType);
+
+ const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeCXXConstructorDeclaration(
+ const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
/// Retrieves the ABI information for the given function signature.
+ /// This is the "core" routine to which all the others defer.
///
- /// \param ArgTys - must all actually be canonical as params
- const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
- const SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info);
+ /// \param argTypes - must all actually be canonical as params
+ const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs args);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
index c3f635a..98f67f3 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -73,15 +73,17 @@ public:
llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src);
-
- llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E);
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset);
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+ llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment);
llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
@@ -121,7 +123,7 @@ public:
llvm::Value *&AllocPtr, CharUnits &CookieSize);
void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::GlobalVariable *DeclPtr);
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
};
class ARMCXXABI : public ItaniumCXXABI {
@@ -216,8 +218,8 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
llvm::IntegerType *ptrdiff = getPtrDiffTy();
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
@@ -312,7 +314,10 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
return Builder.CreateBitCast(Addr, PType);
}
-/// Perform a derived-to-base or base-to-derived member pointer conversion.
+/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
+/// conversion.
+///
+/// Bitcast conversions are always a no-op under Itanium.
///
/// Obligatory offset/adjustment diagram:
/// <-- offset --> <-- adjustment -->
@@ -335,146 +340,106 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
llvm::Value *
ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
- llvm::Value *Src) {
+ llvm::Value *src) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
- E->getCastKind() == CK_BaseToDerivedMemberPointer);
-
- if (isa<llvm::Constant>(Src))
- return EmitMemberPointerConversion(cast<llvm::Constant>(Src), E);
-
- CGBuilderTy &Builder = CGF.Builder;
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
- const MemberPointerType *SrcTy =
- E->getSubExpr()->getType()->getAs<MemberPointerType>();
- const MemberPointerType *DestTy = E->getType()->getAs<MemberPointerType>();
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
- const CXXRecordDecl *SrcDecl = SrcTy->getClass()->getAsCXXRecordDecl();
- const CXXRecordDecl *DestDecl = DestTy->getClass()->getAsCXXRecordDecl();
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
- bool DerivedToBase =
- E->getCastKind() == CK_DerivedToBaseMemberPointer;
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
- const CXXRecordDecl *DerivedDecl;
- if (DerivedToBase)
- DerivedDecl = SrcDecl;
- else
- DerivedDecl = DestDecl;
+ CGBuilderTy &Builder = CGF.Builder;
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
- llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- E->path_begin(),
- E->path_end());
- if (!Adj) return Src;
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
// For member data pointers, this is just a matter of adding the
// offset if the source is non-null.
- if (SrcTy->isMemberDataPointer()) {
- llvm::Value *Dst;
- if (DerivedToBase)
- Dst = Builder.CreateNSWSub(Src, Adj, "adj");
+ if (destTy->isMemberDataPointer()) {
+ llvm::Value *dst;
+ if (isDerivedToBase)
+ dst = Builder.CreateNSWSub(src, adj, "adj");
else
- Dst = Builder.CreateNSWAdd(Src, Adj, "adj");
+ dst = Builder.CreateNSWAdd(src, adj, "adj");
// Null check.
- llvm::Value *Null = llvm::Constant::getAllOnesValue(Src->getType());
- llvm::Value *IsNull = Builder.CreateICmpEQ(Src, Null, "memptr.isnull");
- return Builder.CreateSelect(IsNull, Src, Dst);
+ llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
+ llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
+ return Builder.CreateSelect(isNull, src, dst);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (IsARM) {
- uint64_t Offset = cast<llvm::ConstantInt>(Adj)->getZExtValue();
- Offset <<= 1;
- Adj = llvm::ConstantInt::get(Adj->getType(), Offset);
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::Value *SrcAdj = Builder.CreateExtractValue(Src, 1, "src.adj");
- llvm::Value *DstAdj;
- if (DerivedToBase)
- DstAdj = Builder.CreateNSWSub(SrcAdj, Adj, "adj");
+ llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
+ llvm::Value *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
else
- DstAdj = Builder.CreateNSWAdd(SrcAdj, Adj, "adj");
+ dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
- return Builder.CreateInsertValue(Src, DstAdj, 1);
+ return Builder.CreateInsertValue(src, dstAdj, 1);
}
llvm::Constant *
-ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
- const CastExpr *E) {
- const MemberPointerType *SrcTy =
- E->getSubExpr()->getType()->getAs<MemberPointerType>();
- const MemberPointerType *DestTy =
- E->getType()->getAs<MemberPointerType>();
-
- bool DerivedToBase =
- E->getCastKind() == CK_DerivedToBaseMemberPointer;
-
- const CXXRecordDecl *DerivedDecl;
- if (DerivedToBase)
- DerivedDecl = SrcTy->getClass()->getAsCXXRecordDecl();
- else
- DerivedDecl = DestTy->getClass()->getAsCXXRecordDecl();
-
- // Calculate the offset to the base class.
- llvm::Constant *Offset =
- CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- E->path_begin(),
- E->path_end());
- // If there's no offset, we're done.
- if (!Offset) return C;
-
- // If the source is a member data pointer, we have to do a null
- // check and then add the offset. In the common case, we can fold
- // away the offset.
- if (SrcTy->isMemberDataPointer()) {
- assert(C->getType() == getPtrDiffTy());
-
- // If it's a constant int, just create a new constant int.
- if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
- int64_t Src = CI->getSExtValue();
-
- // Null converts to null.
- if (Src == -1) return CI;
-
- // Otherwise, just add the offset.
- int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
- int64_t Dst = (DerivedToBase ? Src - OffsetV : Src + OffsetV);
- return llvm::ConstantInt::get(CI->getType(), Dst, /*signed*/ true);
- }
+ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
- // Otherwise, we have to form a constant select expression.
- llvm::Constant *Null = llvm::Constant::getAllOnesValue(C->getType());
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
- llvm::Constant *IsNull =
- llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, C, Null);
+ // If the adjustment is trivial, we don't need to do anything.
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
- llvm::Constant *Dst;
- if (DerivedToBase)
- Dst = llvm::ConstantExpr::getNSWSub(C, Offset);
- else
- Dst = llvm::ConstantExpr::getNSWAdd(C, Offset);
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ // null maps to null.
+ if (src->isAllOnesValue()) return src;
- return llvm::ConstantExpr::getSelect(IsNull, Null, Dst);
+ if (isDerivedToBase)
+ return llvm::ConstantExpr::getNSWSub(src, adj);
+ else
+ return llvm::ConstantExpr::getNSWAdd(src, adj);
}
// The this-adjustment is left-shifted by 1 on ARM.
if (IsARM) {
- int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
- OffsetV <<= 1;
- Offset = llvm::ConstantInt::get(Offset->getType(), OffsetV);
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
-
- llvm::Constant *Values[2] = { CS->getOperand(0), 0 };
- if (DerivedToBase)
- Values[1] = llvm::ConstantExpr::getSub(CS->getOperand(1), Offset);
+ llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
else
- Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
-
- return llvm::ConstantStruct::get(CS->getType(), Values);
-}
+ dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
+ return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+}
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
@@ -500,6 +465,11 @@ ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
}
llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return BuildMemberPointer(MD, CharUnits::Zero());
+}
+
+llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment) {
assert(MD->isInstance() && "Member function must not be static!");
MD = MD->getCanonicalDecl();
@@ -524,14 +494,16 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
// discrimination as the least significant bit of ptr does for
// Itanium.
MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ 2 * ThisAdjustment.getQuantity() + 1);
} else {
// Itanium C++ ABI 2.3:
// For a virtual function, [the pointer field] is 1 plus the
// virtual table offset (in bytes) of the function,
// represented as a ptrdiff_t.
MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ ThisAdjustment.getQuantity());
}
} else {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
@@ -539,8 +511,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
- Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
- FPT->isVariadic());
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
} else {
// Use an arbitrary non-function type to tell GetAddrOfFunction that the
// function type is incomplete.
@@ -549,12 +520,45 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
- MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) *
+ ThisAdjustment.getQuantity());
}
return llvm::ConstantStruct::getAnon(MemPtr);
}
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(MPT);
+
+ // Compute the this-adjustment.
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
+ return BuildMemberPointer(MD, ThisAdjustment);
+
+ CharUnits FieldOffset =
+ getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
+ return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
+}
+
/// The comparison algorithm is pretty easy: the member pointers are
/// the same if they're either bitwise identical *or* both null.
///
@@ -775,7 +779,7 @@ void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
/// Initialize the return slot to 'this' at the start of the
/// function.
if (HasThisReturn(CGF.CurGD))
- CGF.Builder.CreateStore(CGF.LoadCXXThis(), CGF.ReturnValue);
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
}
void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
@@ -801,7 +805,7 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
// Automatic Reference Counting:
// We need an array cookie for pointers with strong or weak lifetime.
QualType AllocatedType = expr->getAllocatedType();
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
AllocatedType->isObjCLifetimeType()) {
switch (AllocatedType.getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -1020,27 +1024,28 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
+ llvm::Attribute::NoUnwind);
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
+ llvm::Attribute::NoUnwind);
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
+ llvm::Attribute::NoUnwind);
}
namespace {
@@ -1059,43 +1064,53 @@ namespace {
/// just special-case it at particular places.
void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
const VarDecl &D,
- llvm::GlobalVariable *GV) {
+ llvm::GlobalVariable *var,
+ bool shouldPerformInit) {
CGBuilderTy &Builder = CGF.Builder;
// We only need to use thread-safe statics for local variables;
// global initialization is always single-threaded.
bool threadsafe =
- (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
-
- llvm::IntegerType *GuardTy;
+ (getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl());
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
- bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage();
+ bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
+
+ llvm::IntegerType *guardTy;
if (useInt8GuardVariable) {
- GuardTy = CGF.Int8Ty;
+ guardTy = CGF.Int8Ty;
} else {
// Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
- GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ guardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ }
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+
+ // Create the guard variable if we don't already have it (as we
+ // might if we're double-emitting this function body).
+ llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
+ if (!guard) {
+ // Mangle the name for the guard.
+ SmallString<256> guardName;
+ {
+ llvm::raw_svector_ostream out(guardName);
+ getMangleContext().mangleItaniumGuardVariable(&D, out);
+ out.flush();
+ }
+
+ // Create the guard variable with a zero-initializer.
+ // Just absorb linkage and visibility from the guarded variable.
+ guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
+ false, var->getLinkage(),
+ llvm::ConstantInt::get(guardTy, 0),
+ guardName.str());
+ guard->setVisibility(var->getVisibility());
+
+ CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
- llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
-
- // Create the guard variable.
- llvm::SmallString<256> GuardVName;
- llvm::raw_svector_ostream Out(GuardVName);
- getMangleContext().mangleItaniumGuardVariable(&D, Out);
- Out.flush();
-
- // Just absorb linkage and visibility from the variable.
- llvm::GlobalVariable *GuardVariable =
- new llvm::GlobalVariable(CGM.getModule(), GuardTy,
- false, GV->getLinkage(),
- llvm::ConstantInt::get(GuardTy, 0),
- GuardVName.str());
- GuardVariable->setVisibility(GV->getVisibility());
// Test whether the variable has completed initialization.
- llvm::Value *IsInitialized;
+ llvm::Value *isInitialized;
// ARM C++ ABI 3.2.3.1:
// To support the potential use of initialization guard variables
@@ -1109,9 +1124,9 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// ...
// }
if (IsARM && !useInt8GuardVariable) {
- llvm::Value *V = Builder.CreateLoad(GuardVariable);
+ llvm::Value *V = Builder.CreateLoad(guard);
V = Builder.CreateAnd(V, Builder.getInt32(1));
- IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+ isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
// Itanium C++ ABI 3.3.2:
// The following is pseudo-code showing how these functions can be used:
@@ -1129,9 +1144,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// }
} else {
// Load the first byte of the guard variable.
- llvm::Type *PtrTy = Builder.getInt8PtrTy();
llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy));
+ Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
LI->setAlignment(1);
// Itanium ABI:
@@ -1143,14 +1157,14 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
if (threadsafe)
LI->setAtomic(llvm::Acquire);
- IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
+ isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
}
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
// Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+ Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
CGF.EmitBlock(InitCheckBlock);
@@ -1158,7 +1172,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
if (threadsafe) {
// Call __cxa_guard_acquire.
llvm::Value *V
- = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
+ = Builder.CreateCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
@@ -1166,22 +1180,22 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
InitBlock, EndBlock);
// Call __cxa_guard_abort along the exceptional edge.
- CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
CGF.EmitBlock(InitBlock);
}
// Emit the initializer and add a global destructor if appropriate.
- CGF.EmitCXXGlobalVarDeclInit(D, GV);
+ CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
if (threadsafe) {
// Pop the guard-abort cleanup if we pushed one.
CGF.PopCleanupBlock();
// Call __cxa_guard_release. This cannot throw.
- Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable);
+ Builder.CreateCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
} else {
- Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
}
CGF.EmitBlock(EndBlock);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index e200e79..825e041 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -78,6 +78,13 @@ public:
// delete[] p;
// }
// Whereas it prints "104" and "104" if you give A a destructor.
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
+ "in the Microsoft C++ ABI");
+ }
};
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index 793ee91..ea2389e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -28,12 +28,12 @@ using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
- llvm::OwningPtr<const llvm::TargetData> TD;
+ OwningPtr<const llvm::TargetData> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
protected:
- llvm::OwningPtr<llvm::Module> M;
- llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ OwningPtr<llvm::Module> M;
+ OwningPtr<CodeGen::CodeGenModule> Builder;
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
const CodeGenOptions &CGO, llvm::LLVMContext& C)
@@ -59,10 +59,15 @@ namespace {
*M, *TD, Diags));
}
- virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Builder->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
// Make sure to emit all elements of a Decl.
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
Builder->EmitTopLevelDecl(*I);
+ return true;
}
/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
@@ -74,7 +79,7 @@ namespace {
// In C++, we may have member functions that need to be emitted at this
// point.
- if (Ctx->getLangOptions().CPlusPlus && !D->isDependentContext()) {
+ if (Ctx->getLangOpts().CPlusPlus && !D->isDependentContext()) {
for (DeclContext::decl_iterator M = D->decls_begin(),
MEnd = D->decls_end();
M != MEnd; ++M)
@@ -112,6 +117,8 @@ namespace {
};
}
+void CodeGenerator::anchor() { }
+
CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
const std::string& ModuleName,
const CodeGenOptions &CGO,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index e1dc8f7..3ed1778 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -98,7 +98,8 @@ unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
return 32;
}
-bool TargetCodeGenInfo::isNoProtoCallVariadic(CallingConv CC) const {
+bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
// The following conventions are known to require this to be false:
// x86_stdcall
// MIPS
@@ -117,10 +118,14 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
QualType FT = FD->getType();
- // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
if (AllowArrays)
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
FT = AT->getElementType();
+ }
const RecordType *RT = FT->getAs<RecordType>();
if (!RT)
@@ -252,6 +257,11 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
}
}
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return 0;
+
return Found;
}
@@ -287,6 +297,8 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
return false;
+ uint64_t Size = 0;
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
const FieldDecl *FD = *i;
@@ -299,8 +311,14 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
// counts as "basic" is more complicated than what we were doing previously.
if (FD->isBitField())
return false;
+
+ Size += Context.getTypeSize(FD->getType());
}
+ // Make sure there are not any holes in the struct.
+ if (Size != Context.getTypeSize(Ty))
+ return false;
+
return true;
}
@@ -339,8 +357,14 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty))
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
return ABIArgInfo::getIndirect(0);
+ }
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
@@ -365,8 +389,8 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
-/// UseX86_MMXType - Return true if this is an MMX type that should use the special
-/// x86_mmx type.
+/// UseX86_MMXType - Return true if this is an MMX type that should use the
+/// special x86_mmx type.
bool UseX86_MMXType(llvm::Type *IRType) {
// If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
// special x86_mmx type.
@@ -394,12 +418,14 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsMMXDisabled;
+ bool IsWin32FloatStructABI;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
}
- static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+ unsigned callingConvention);
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
@@ -410,11 +436,13 @@ class X86_32ABIInfo : public ABIInfo {
public:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
it->info = classifyArgumentType(it->type);
@@ -423,15 +451,16 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m) {}
+ IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool d, bool p, bool m, bool w)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -459,7 +488,8 @@ public:
/// shouldReturnTypeInRegister - Determine if the given type should be
/// passed in a register (for the Darwin ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) {
+ ASTContext &Context,
+ unsigned callingConvention) {
uint64_t Size = Context.getTypeSize(Ty);
// Type must be register sized.
@@ -484,7 +514,8 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// Arrays are treated like records.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
+ return shouldReturnTypeInRegister(AT->getElementType(), Context,
+ callingConvention);
// Otherwise, it must be a record type.
const RecordType *RT = Ty->getAs<RecordType>();
@@ -492,6 +523,13 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// FIXME: Traverse bases here too.
+ // For thiscall conventions, structures will never be returned in
+ // a register. This is for compatibility with the MSVC ABI
+ if (callingConvention == llvm::CallingConv::X86_ThisCall &&
+ RT->isStructureType()) {
+ return false;
+ }
+
// Structure types are passed in register if all fields would be
// passed in a register.
for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
@@ -503,14 +541,15 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
continue;
// Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ if (!shouldReturnTypeInRegister(FD->getType(), Context,
+ callingConvention))
return false;
}
-
return true;
}
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
@@ -555,51 +594,24 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
return ABIArgInfo::getIndirect(0);
- // Classify "single element" structs as their element type.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
- if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
- if (BT->isIntegerType()) {
- // We need to use the size of the structure, padding
- // bit-fields can adjust that to be larger than the single
- // element type.
- uint64_t Size = getContext().getTypeSize(RetTy);
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), (unsigned)Size));
- }
-
- if (BT->getKind() == BuiltinType::Float) {
- assert(getContext().getTypeSize(RetTy) ==
- getContext().getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
- }
-
- if (BT->getKind() == BuiltinType::Double) {
- assert(getContext().getTypeSize(RetTy) ==
- getContext().getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
- }
- } else if (SeltTy->isPointerType()) {
- // FIXME: It would be really nice if this could come out as the proper
- // pointer type.
- llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
- return ABIArgInfo::getDirect(PtrTy);
- } else if (SeltTy->isVectorType()) {
- // 64- and 128-bit vectors are never returned in a
- // register when inside a structure.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size == 64 || Size == 128)
- return ABIArgInfo::getIndirect(0);
-
- return classifyReturnType(QualType(SeltTy, 0));
- }
- }
-
// Small structures which are register sized are generally returned
// in a register.
- if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
+ callingConvention)) {
uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
@@ -631,7 +643,7 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
i != e; ++i) {
QualType FT = i->getType();
- if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
+ if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
return true;
if (isRecordWithSSEVectorType(Context, FT))
@@ -655,7 +667,7 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (isRecordWithSSEVectorType(getContext(), Ty))
+ if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
return 16;
return MinABIStackAlignInBytes;
@@ -694,8 +706,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
return getIndirectResult(Ty);
}
- // Ignore empty structs.
- if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
// Expand small (<= 128-bit) record types when we know that the stack layout
@@ -743,19 +755,36 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Compute if the address needs to be aligned
+ unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
+ Align = getTypeStackAlignInBytes(Ty, Align);
+ Align = std::max(Align, 4U);
+ if (Align > 4) {
+ // addr = (addr + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
+ Addr = CGF.Builder.CreateGEP(Addr, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
+ CGF.Int32Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
+ Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ Addr->getType(),
+ "ap.cur.aligned");
+ }
+
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
@@ -782,10 +811,8 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-7 are the eight integer registers; the order is different
// on Darwin (for EH), but the range is the same.
@@ -796,7 +823,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 12-16 are st(0..4). Not sure why we stop at 4.
// These have size 16, which is sizeof(long double) on
// platforms with 8-byte alignment for that type.
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
} else {
@@ -807,7 +834,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
// platforms with 4-byte alignment for that type.
- llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
}
@@ -897,14 +924,20 @@ class X86_64ABIInfo : public ABIInfo {
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty) const;
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE) const;
+ bool IsIllegalVectorType(QualType Ty) const;
+
/// The 0.98 ABI revision clarified a lot of ambiguities,
/// unfortunately in ways that were not always consistent with
/// certain previous compilers. In particular, platforms which
@@ -914,8 +947,23 @@ class X86_64ABIInfo : public ABIInfo {
return !getContext().getTargetInfo().getTriple().isOSDarwin();
}
+ bool HasAVX;
+
public:
- X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
+ ABIInfo(CGT), HasAVX(hasavx) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return (vectorTy->getBitWidth() > 128);
+ }
+ return false;
+ }
virtual void computeInfo(CGFunctionInfo &FI) const;
@@ -939,8 +987,12 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+
+ const X86_64ABIInfo &getABIInfo() const {
+ return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 7;
@@ -948,16 +1000,11 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
- AssignToArrayRange(Builder, Address, Eight8, 0, 16);
-
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
@@ -967,13 +1014,29 @@ public:
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
- bool isNoProtoCallVariadic(CallingConv CC) const {
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
// The default CC on x86-64 sets %al to the number of SSA
// registers used, and GCC sets this when calling an unprototyped
- // function, so we override the default behavior.
- if (CC == CC_Default || CC == CC_C) return true;
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
- return TargetCodeGenInfo::isNoProtoCallVariadic(CC);
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
}
};
@@ -989,16 +1052,11 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
- AssignToArrayRange(Builder, Address, Eight8, 0, 16);
-
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
};
@@ -1164,7 +1222,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128 || Size == 256) {
+ } else if (Size == 128 || (HasAVX && Size == 256)) {
// Arguments of 256-bits are split into four eightbyte chunks. The
// least significant one belongs to class SSE and all the others to class
// SSEUP. The original Lo and Hi design considers that types can't be
@@ -1377,10 +1435,28 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = HasAVX ? 256 : 128;
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
- if (!isAggregateTypeForABI(Ty)) {
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -1395,6 +1471,38 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
return ABIArgInfo::getIndirect(Align);
}
@@ -1416,7 +1524,7 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
llvm::Type *EltTy = VT->getElementType();
unsigned BitWidth = VT->getBitWidth();
- if ((BitWidth == 128 || BitWidth == 256) &&
+ if ((BitWidth >= 128 && BitWidth <= 256) &&
(EltTy->isFloatTy() || EltTy->isDoubleTy() ||
EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
@@ -1810,8 +1918,10 @@ classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
- unsigned &neededSSE) const {
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(
+ QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE)
+ const
+{
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, 0, Lo, Hi);
@@ -1843,7 +1953,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case ComplexX87:
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
++neededInt;
- return getIndirectResult(Ty);
+ return getIndirectResult(Ty, freeIntRegs);
case SSEUp:
case X87Up:
@@ -1951,7 +2061,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, neededInt, neededSSE);
+ it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
+ neededSSE);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -1961,7 +2072,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
freeIntRegs -= neededInt;
freeSSERegs -= neededSSE;
} else {
- it->info = getIndirectResult(it->type);
+ it->info = getIndirectResult(it->type, freeIntRegs);
}
}
}
@@ -1976,19 +2087,17 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
// byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
if (Align > 8) {
- // Note that we follow the ABI & gcc here, even though the type
- // could in theory have an alignment greater than 16. This case
- // shouldn't ever matter in practice.
-
- // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, 15);
+ llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
CGF.Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
@@ -2019,8 +2128,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -2031,7 +2138,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
unsigned neededInt, neededSSE;
Ty = CGF.getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
@@ -2129,7 +2236,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
- llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ llvm::Type *DoubleTy = CGF.DoubleTy;
llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
llvm::StructType *ST = llvm::StructType::get(DoubleTy,
@@ -2192,7 +2299,8 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
if (Size == 128 &&
- getContext().getTargetInfo().getTriple().getOS() == llvm::Triple::MinGW32)
+ getContext().getTargetInfo().getTriple().getOS()
+ == llvm::Triple::MinGW32)
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
@@ -2224,8 +2332,7 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -2270,9 +2377,8 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// against gcc output. AFAIK all ABIs use the same encoding.
CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = CGF.Int8Ty;
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
@@ -2327,8 +2433,9 @@ public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
bool isEABI() const {
- StringRef Env = getContext().getTargetInfo().getTriple().getEnvironmentName();
- return (Env == "gnueabi" || Env == "eabi");
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
}
private:
@@ -2362,15 +2469,10 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-15 are the 16 integer registers.
- AssignToArrayRange(Builder, Address, Four8, 0, 15);
-
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
return false;
}
@@ -2671,6 +2773,14 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (getABIKind() == AAPCS_VFP) {
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(RetTy, Base, getContext()))
+ // Homogeneous Aggregates are returned directly.
+ return ABIArgInfo::getDirect();
+ }
+
// Aggregates <= 4 bytes are returned in r0; other aggregates
// are returned indirectly.
uint64_t Size = getContext().getTypeSize(RetTy);
@@ -2688,12 +2798,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Handle address alignment for type alignment > 32 bits
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
@@ -2773,7 +2882,7 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- const LangOptions &LangOpts = getContext().getLangOptions();
+ const LangOptions &LangOpts = getContext().getLangOpts();
if (LangOpts.OpenCL || LangOpts.CUDA) {
// If we are in OpenCL or CUDA mode, then default to device functions
DefaultCC = llvm::CallingConv::PTX_Device;
@@ -2793,7 +2902,6 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const {
llvm_unreachable("PTX does not support varargs");
- return 0;
}
void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
@@ -2805,7 +2913,7 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *F = cast<llvm::Function>(GV);
// Perform special handling in OpenCL mode
- if (M.getLangOptions().OpenCL) {
+ if (M.getLangOpts().OpenCL) {
// Use OpenCL function attributes to set proper calling conventions
// By default, all functions are device functions
if (FD->hasAttr<OpenCLKernelAttr>()) {
@@ -2817,7 +2925,7 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
// Perform special handling in CUDA mode.
- if (M.getLangOptions().CUDA) {
+ if (M.getLangOpts().CUDA) {
// CUDA __global__ functions get a kernel calling convention. Since
// __global__ functions cannot be called from the device, we do not
// need to set the noinline attribute.
@@ -2829,85 +2937,6 @@ void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
//===----------------------------------------------------------------------===//
-// SystemZ ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class SystemZABIInfo : public ABIInfo {
-public:
- SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- bool isPromotableIntegerType(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
-};
-
-}
-
-bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
- // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- case BuiltinType::Char_S:
- case BuiltinType::Char_U:
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- case BuiltinType::Short:
- case BuiltinType::UShort:
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- return false;
- }
- return false;
-}
-
-llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Implement
- return 0;
-}
-
-
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
- if (isAggregateTypeForABI(RetTy))
- return ABIArgInfo::getIndirect(0);
-
- return (isPromotableIntegerType(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty))
- return ABIArgInfo::getIndirect(0);
-
- return (isPromotableIntegerType(Ty) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-}
-
-//===----------------------------------------------------------------------===//
// MBlaze ABI Implementation
//===----------------------------------------------------------------------===//
@@ -3063,24 +3092,28 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
- static const unsigned MinABIStackAlignInBytes = 4;
+ bool IsO32;
+ unsigned MinABIStackAlignInBytes;
+ llvm::Type* HandleAggregates(QualType Ty) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
- MipsABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
};
-const unsigned MipsABIInfo::MinABIStackAlignInBytes;
-
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
unsigned SizeOfUnwindException;
public:
- MIPSTargetCodeGenInfo(CodeGenTypes &CGT, unsigned SZ)
- : TargetCodeGenInfo(new MipsABIInfo(CGT)), SizeOfUnwindException(SZ) {}
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 29;
@@ -3095,35 +3128,184 @@ public:
};
}
-ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const {
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
+ if (IsO32)
+ return 0;
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType())
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ uint64_t StructSize = getContext().getTypeSize(Ty);
+ assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+ SmallVector<llvm::Type*, 8> ArgList;
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = (*i)->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ // This struct/class doesn't have an aligned double field.
+ if (!LastOffset)
+ return 0;
+
+ // Add ((StructSize - LastOffset) / 64) args of type i64.
+ for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
+ ArgList.push_back(I64);
+
+ // If the size of the remainder is not zero, add one more integer type to
+ // ArgList.
+ unsigned R = (StructSize - LastOffset) % 64;
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
+ // Padding is inserted only for N32/64.
+ if (IsO32)
+ return 0;
+
+ assert(Align <= 16 && "Alignment larger than 16 not handled.");
+ return (Align == 16 && Offset & 0xf) ?
+ llvm::IntegerType::get(getVMContext(), 64) : 0;
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize =
+ llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+ Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
+ Offset += TySize;
+
if (isAggregateTypeForABI(Ty)) {
// Ignore empty aggregates.
- if (getContext().getTypeSize(Ty) == 0)
+ if (TySize == 0)
return ABIArgInfo::getIgnore();
// Records with non trivial destructors/constructors should not be passed
// by value.
- if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
+ Offset = OrigOffset + 8;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
- return ABIArgInfo::getIndirect(0);
+ // If we have reached here, aggregates are passed either indirectly via a
+ // byval pointer or directly by coercing to another structure type. In the
+ // latter case, padding is inserted if the offset of the aggregate is
+ // unaligned.
+ llvm::Type *ResType = HandleAggregates(Ty);
+
+ if (!ResType)
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 2> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType((*b)->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ RTList.push_back(llvm::IntegerType::get(getVMContext(),
+ std::min(Size, (uint64_t)64)));
+ if (Size > 64)
+ RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
+
+ return llvm::StructType::get(getVMContext(), RTList);
}
ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType() || Size == 0)
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy)) {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ }
return ABIArgInfo::getIndirect(0);
}
@@ -3137,29 +3319,36 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
}
void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ it->info = classifyArgumentType(it->type, Offset);
}
llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped;
+ unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
+ llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
if (TypeAlign > MinABIStackAlignInBytes) {
- llvm::Value *AddrAsInt32 = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- llvm::Value *Inc = llvm::ConstantInt::get(CGF.Int32Ty, TypeAlign - 1);
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -TypeAlign);
- llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt32, Inc);
+ llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
+ llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
+ llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
}
@@ -3167,11 +3356,11 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
AddrTyped = Builder.CreateBitCast(Addr, PTy);
llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
- TypeAlign = std::max(TypeAlign, MinABIStackAlignInBytes);
+ TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
llvm::Value *NextAddr =
- Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -3184,19 +3373,15 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// This information comes from gcc's implementation, which seems to
// as canonical as it gets.
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
- llvm::LLVMContext &Context = CGF.getLLVMContext();
-
// Everything on MIPS is 4 bytes. Double-precision FP registers
// are aliased to pairs of single-precision FP registers.
- llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-31 are the general purpose registers, $0 - $31.
// 32-63 are the floating-point registers, $f0 - $f31.
// 64 and 65 are the multiply/divide registers, $hi and $lo.
// 66 is the (notional, I think) register for signal-handler return.
- AssignToArrayRange(Builder, Address, Four8, 0, 65);
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
// 67-74 are the floating-point status registers, $fcc0 - $fcc7.
// They are one bit wide and ignored here.
@@ -3206,8 +3391,7 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
// 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
// 176-181 are the DSP accumulator registers.
- AssignToArrayRange(Builder, Address, Four8, 80, 181);
-
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
return false;
}
@@ -3236,7 +3420,7 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
llvm::Function *F = cast<llvm::Function>(GV);
- if (M.getLangOptions().OpenCL) {
+ if (M.getLangOpts().OpenCL) {
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL C Kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
@@ -3251,27 +3435,20 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
SmallVector<llvm::Value*, 5> Operands;
Operands.push_back(F);
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
- FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
- Operands.push_back(llvm::Constant::getIntegerValue(
- llvm::Type::getInt32Ty(Context),
- llvm::APInt(
- 32,
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
// Add a boolean constant operand for "required" (true) or "hint" (false)
// for implementing the work_group_size_hint attr later. Currently
// always true as the hint is not yet implemented.
- Operands.push_back(llvm::ConstantInt::getTrue(llvm::Type::getInt1Ty(Context)));
-
+ Operands.push_back(llvm::ConstantInt::getTrue(Context));
OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
}
}
@@ -3280,6 +3457,147 @@ void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public ABIInfo {
+
+
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 29;
+ }
+};
+
+}
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 64)
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // Pass in the smallest viable integer type.
+ else if (Size > 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ else if (Size > 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ else if (Size > 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ else
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+}
+
+llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
@@ -3291,11 +3609,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 24));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 32));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
case llvm::Triple::arm:
case llvm::Triple::thumb:
@@ -3317,9 +3635,6 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::ptx64:
return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
- case llvm::Triple::systemz:
- return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
-
case llvm::Triple::mblaze:
return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
@@ -3334,7 +3649,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, true, true, DisableMMX, false));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3343,24 +3659,36 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
- case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, false));
+
+ case llvm::Triple::Win32:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, true));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX));
+ new X86_32TargetCodeGenInfo(
+ Types, false, false, DisableMMX, false));
}
}
- case llvm::Triple::x86_64:
+ case llvm::Triple::x86_64: {
+ bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
+
switch (Triple.getOS()) {
case llvm::Triple::Win32:
case llvm::Triple::MinGW32:
case llvm::Triple::Cygwin:
return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
default:
- return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
+ HasAVX));
}
}
+ case llvm::Triple::hexagon:
+ return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
index 8f90c7b..88b4997 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -30,8 +30,10 @@ namespace clang {
class Decl;
namespace CodeGen {
+ class CallArgList;
class CodeGenModule;
class CodeGenFunction;
+ class CGFunctionInfo;
}
/// TargetCodeGenInfo - This class organizes various target-specific
@@ -160,7 +162,8 @@ namespace clang {
/// same way and some out-of-band information is passed for the
/// benefit of variadic callees, as is the case for x86-64.
/// In this case the ABI should be consulted.
- virtual bool isNoProtoCallVariadic(CallingConv CC) const;
+ virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
+ const FunctionNoProtoType *fnType) const;
};
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
index 52c0dbb..d7b4bc7 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
@@ -27,6 +27,7 @@ const char *Action::getClassName(ActionClass AC) {
case PreprocessJobClass: return "preprocessor";
case PrecompileJobClass: return "precompiler";
case AnalyzeJobClass: return "analyzer";
+ case MigrateJobClass: return "migrator";
case CompileJobClass: return "compiler";
case AssembleJobClass: return "assembler";
case LinkJobClass: return "linker";
@@ -38,14 +39,20 @@ const char *Action::getClassName(ActionClass AC) {
llvm_unreachable("invalid class");
}
+void InputAction::anchor() {}
+
InputAction::InputAction(const Arg &_Input, types::ID _Type)
: Action(InputClass, _Type), Input(_Input) {
}
+void BindArchAction::anchor() {}
+
BindArchAction::BindArchAction(Action *Input, const char *_ArchName)
: Action(BindArchClass, Input, Input->getType()), ArchName(_ArchName) {
}
+void JobAction::anchor() {}
+
JobAction::JobAction(ActionClass Kind, Action *Input, types::ID Type)
: Action(Kind, Input, Type) {
}
@@ -54,38 +61,62 @@ JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
: Action(Kind, Inputs, Type) {
}
+void PreprocessJobAction::anchor() {}
+
PreprocessJobAction::PreprocessJobAction(Action *Input, types::ID OutputType)
: JobAction(PreprocessJobClass, Input, OutputType) {
}
+void PrecompileJobAction::anchor() {}
+
PrecompileJobAction::PrecompileJobAction(Action *Input, types::ID OutputType)
: JobAction(PrecompileJobClass, Input, OutputType) {
}
+void AnalyzeJobAction::anchor() {}
+
AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
: JobAction(AnalyzeJobClass, Input, OutputType) {
}
+void MigrateJobAction::anchor() {}
+
+MigrateJobAction::MigrateJobAction(Action *Input, types::ID OutputType)
+ : JobAction(MigrateJobClass, Input, OutputType) {
+}
+
+void CompileJobAction::anchor() {}
+
CompileJobAction::CompileJobAction(Action *Input, types::ID OutputType)
: JobAction(CompileJobClass, Input, OutputType) {
}
+void AssembleJobAction::anchor() {}
+
AssembleJobAction::AssembleJobAction(Action *Input, types::ID OutputType)
: JobAction(AssembleJobClass, Input, OutputType) {
}
+void LinkJobAction::anchor() {}
+
LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
: JobAction(LinkJobClass, Inputs, Type) {
}
+void LipoJobAction::anchor() {}
+
LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
: JobAction(LipoJobClass, Inputs, Type) {
}
+void DsymutilJobAction::anchor() {}
+
DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
: JobAction(DsymutilJobClass, Inputs, Type) {
}
+void VerifyJobAction::anchor() {}
+
VerifyJobAction::VerifyJobAction(ActionList &Inputs, types::ID Type)
: JobAction(VerifyJobClass, Inputs, Type) {
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
index 39b7e55..c0a2a50 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
@@ -61,7 +61,7 @@ void Arg::dump() const {
}
std::string Arg::getAsString(const ArgList &Args) const {
- llvm::SmallString<256> Res;
+ SmallString<256> Res;
llvm::raw_svector_ostream OS(Res);
ArgStringList ASL;
@@ -94,7 +94,7 @@ void Arg::render(const ArgList &Args, ArgStringList &Output) const {
break;
case Option::RenderCommaJoinedStyle: {
- llvm::SmallString<256> Res;
+ SmallString<256> Res;
llvm::raw_svector_ostream OS(Res);
OS << getOption().getName();
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
index ca9b944..55a0ddf 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -122,6 +122,24 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
return Res;
}
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3,
+ OptSpecifier Id4) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3) ||
+ (*it)->getOption().matches(Id4)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
if (Arg *A = getLastArg(Pos, Neg))
return A->getOption().matches(Pos);
@@ -136,13 +154,15 @@ StringRef ArgList::getLastArgValue(OptSpecifier Id,
}
int ArgList::getLastArgIntValue(OptSpecifier Id, int Default,
- clang::DiagnosticsEngine &Diags) const {
+ clang::DiagnosticsEngine *Diags) const {
int Res = Default;
if (Arg *A = getLastArg(Id)) {
- if (StringRef(A->getValue(*this)).getAsInteger(10, Res))
- Diags.Report(diag::err_drv_invalid_int_value)
- << A->getAsString(*this) << A->getValue(*this);
+ if (StringRef(A->getValue(*this)).getAsInteger(10, Res)) {
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_int_value)
+ << A->getAsString(*this) << A->getValue(*this);
+ }
}
return Res;
@@ -210,7 +230,7 @@ void ArgList::ClaimAllArgs() const {
}
const char *ArgList::MakeArgString(const Twine &T) const {
- llvm::SmallString<256> Str;
+ SmallString<256> Str;
T.toVector(Str);
return MakeArgString(Str.str());
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
index 90c69ff..ea80f5a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
@@ -18,7 +18,7 @@ using namespace clang::driver::cc1asoptions;
static const OptTable::Info CC1AsInfoTable[] = {
#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, FLAGS, PARAM, \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
OPT_##GROUP, OPT_##ALIAS },
#include "clang/Driver/CC1AsOptions.inc"
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
index 14cf090..884b363 100644
--- a/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
@@ -18,7 +18,7 @@ using namespace clang::driver::cc1options;
static const OptTable::Info CC1InfoTable[] = {
#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, FLAGS, PARAM, \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
OPT_##GROUP, OPT_##ALIAS },
#include "clang/Driver/CC1Options.inc"
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
index d02da95..42c8449 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -217,8 +217,12 @@ void Compilation::initCompilationForDiagnostics(void) {
// Remove any user specified output. Claim any unclaimed arguments, so as
// to avoid emitting warnings about unused args.
- if (TranslatedArgs->hasArg(options::OPT_o))
- TranslatedArgs->eraseArg(options::OPT_o);
+ OptSpecifier OutputOpts[] = { options::OPT_o, options::OPT_MD,
+ options::OPT_MMD };
+ for (unsigned i = 0; i != sizeof(OutputOpts)/sizeof(OutputOpts[0]); ++i) {
+ if (TranslatedArgs->hasArg(OutputOpts[i]))
+ TranslatedArgs->eraseArg(OutputOpts[i]);
+ }
TranslatedArgs->ClaimAllArgs();
// Redirect stdout/stderr to /dev/null.
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
index 4e2d7b6..40e0c00 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -7,10 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#ifdef HAVE_CLANG_CONFIG_H
-# include "clang/Config/config.h"
-#endif
-
#include "clang/Driver/Driver.h"
#include "clang/Driver/Action.h"
@@ -18,7 +14,6 @@
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/HostInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Option.h"
@@ -28,7 +23,6 @@
#include "clang/Basic/Version.h"
-#include "llvm/Config/config.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/OwningPtr.h"
@@ -40,22 +34,25 @@
#include "llvm/Support/Program.h"
#include "InputInfo.h"
+#include "ToolChains.h"
#include <map>
+#include "clang/Config/config.h"
+
using namespace clang::driver;
using namespace clang;
Driver::Driver(StringRef ClangExecutable,
- StringRef DefaultHostTriple,
+ StringRef DefaultTargetTriple,
StringRef DefaultImageName,
bool IsProduction,
DiagnosticsEngine &Diags)
: Opts(createDriverOptTable()), Diags(Diags),
ClangExecutable(ClangExecutable), UseStdLib(true),
- DefaultHostTriple(DefaultHostTriple), DefaultImageName(DefaultImageName),
+ DefaultTargetTriple(DefaultTargetTriple),
+ DefaultImageName(DefaultImageName),
DriverTitle("clang \"gcc-compatible\" driver"),
- Host(0),
CCPrintOptionsFilename(0), CCPrintHeadersFilename(0),
CCLogDiagnosticsFilename(0), CCCIsCXX(false),
CCCIsCPP(false),CCCEcho(false), CCCPrintBindings(false),
@@ -65,7 +62,7 @@ Driver::Driver(StringRef ClangExecutable,
CCCUsePCH(true), SuppressMissingInputWarning(false) {
if (IsProduction) {
// In a "production" build, only use clang on architectures we expect to
- // work, and don't use clang C++.
+ // work.
//
// During development its more convenient to always have the driver use
// clang, but we don't want users to be confused when things don't work, or
@@ -80,7 +77,7 @@ Driver::Driver(StringRef ClangExecutable,
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
- llvm::SmallString<128> P(Dir);
+ SmallString<128> P(Dir);
if (ClangResourceDir != "")
llvm::sys::path::append(P, ClangResourceDir);
else
@@ -90,7 +87,11 @@ Driver::Driver(StringRef ClangExecutable,
Driver::~Driver() {
delete Opts;
- delete Host;
+
+ for (llvm::StringMap<ToolChain *>::iterator I = ToolChains.begin(),
+ E = ToolChains.end();
+ I != E; ++I)
+ delete I->second;
}
InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
@@ -112,6 +113,12 @@ InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(*Args);
continue;
}
+
+ // Warn about -mcpu= without an argument.
+ if (A->getOption().matches(options::OPT_mcpu_EQ) &&
+ A->containsValue("")) {
+ Diag(clang::diag::warn_drv_empty_joined_argument) << A->getAsString(*Args);
+ }
}
return Args;
@@ -134,8 +141,10 @@ const {
// -{fsyntax-only,-analyze,emit-ast,S} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze,
- options::OPT__analyze_auto)) ||
+ options::OPT__analyze_auto)) ||
(PhaseArg = DAL.getLastArg(options::OPT_emit_ast)) ||
(PhaseArg = DAL.getLastArg(options::OPT_S))) {
FinalPhase = phases::Compile;
@@ -239,7 +248,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
// FIXME: Handle environment options which affect driver behavior, somewhere
- // (client?). GCC_EXEC_PREFIX, LIBRARY_PATH, LPATH, CC_PRINT_OPTIONS.
+ // (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
if (char *env = ::getenv("COMPILER_PATH")) {
StringRef CompilerPath = env;
@@ -303,10 +312,10 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
Cur = Split.second;
}
}
- // FIXME: We shouldn't overwrite the default host triple here, but we have
- // nowhere else to put this currently.
- if (const Arg *A = Args->getLastArg(options::OPT_ccc_host_triple))
- DefaultHostTriple = A->getValue(*Args);
+ // FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
+ // and getToolChain is const.
+ if (const Arg *A = Args->getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue(*Args);
if (const Arg *A = Args->getLastArg(options::OPT_ccc_install_dir))
Dir = InstalledDir = A->getValue(*Args);
for (arg_iterator it = Args->filtered_begin(options::OPT_B),
@@ -320,14 +329,14 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
if (Args->hasArg(options::OPT_nostdlib))
UseStdLib = false;
- Host = GetHostInfo(DefaultHostTriple.c_str());
-
// Perform the default argument translations.
DerivedArgList *TranslatedArgs = TranslateInputArgs(*Args);
+ // Owned by the host.
+ const ToolChain &TC = getToolChain(*Args);
+
// The compilation takes ownership of Args.
- Compilation *C = new Compilation(*this, *Host->CreateToolChain(*Args), Args,
- TranslatedArgs);
+ Compilation *C = new Compilation(*this, TC, Args, TranslatedArgs);
// FIXME: This behavior shouldn't be here.
if (CCCPrintOptions) {
@@ -342,8 +351,9 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
InputList Inputs;
BuildInputs(C->getDefaultToolChain(), C->getArgs(), Inputs);
- // Construct the list of abstract actions to perform for this compilation.
- if (Host->useDriverDriver())
+ // Construct the list of abstract actions to perform for this compilation. On
+ // Darwin target OSes this uses the driver-driver and universal actions.
+ if (TC.getTriple().isOSDarwin())
BuildUniversalActions(C->getDefaultToolChain(), C->getArgs(),
Inputs, C->getActions());
else
@@ -365,6 +375,13 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// diagnostic information to a bug report.
void Driver::generateCompilationDiagnostics(Compilation &C,
const Command *FailingCommand) {
+ if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
+ return;
+
+ // Don't try to generate diagnostics for link jobs.
+ if (FailingCommand->getCreator().isLinkJob())
+ return;
+
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Please submit a bug report to " BUG_REPORT_URL " and include command"
" line arguments and all diagnostic information.";
@@ -373,6 +390,12 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
CCCIsCPP = true;
CCGenDiagnostics = true;
+ // Save the original job command(s).
+ std::string Cmd;
+ llvm::raw_string_ostream OS(Cmd);
+ C.PrintJob(OS, C.getJobs(), "\n", false);
+ OS.flush();
+
// Clear stale state and suppress tool output.
C.initCompilationForDiagnostics();
Diags.Reset();
@@ -403,21 +426,22 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
}
// Don't attempt to generate preprocessed files if multiple -arch options are
- // used.
- int Archs = 0;
+ // used, unless they're all duplicates.
+ llvm::StringSet<> ArchNames;
for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
it != ie; ++it) {
Arg *A = *it;
if (A->getOption().matches(options::OPT_arch)) {
- Archs++;
- if (Archs > 1) {
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Error generating preprocessed source(s) - cannot generate "
- "preprocessed source with multiple -arch options.";
- return;
- }
+ StringRef ArchName = A->getValue(C.getArgs());
+ ArchNames.insert(ArchName);
}
}
+ if (ArchNames.size() > 1) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - cannot generate "
+ "preprocessed source with multiple -arch options.";
+ return;
+ }
if (Inputs.empty()) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
@@ -425,13 +449,13 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
return;
}
- // Construct the list of abstract actions to perform for this compilation.
- if (Host->useDriverDriver())
- BuildUniversalActions(C.getDefaultToolChain(), C.getArgs(),
- Inputs, C.getActions());
+ // Construct the list of abstract actions to perform for this compilation. On
+ // Darwin OSes this uses the driver-driver and builds universal actions.
+ const ToolChain &TC = C.getDefaultToolChain();
+ if (TC.getTriple().isOSDarwin())
+ BuildUniversalActions(TC, C.getArgs(), Inputs, C.getActions());
else
- BuildActions(C.getDefaultToolChain(), C.getArgs(), Inputs,
- C.getActions());
+ BuildActions(TC, C.getArgs(), Inputs, C.getActions());
BuildJobs(C);
@@ -449,11 +473,26 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
// If the command succeeded, we are done.
if (Res == 0) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "Preprocessed source(s) are located at:";
+ << "Preprocessed source(s) and associated run script(s) are located at:";
ArgStringList Files = C.getTempFiles();
for (ArgStringList::const_iterator it = Files.begin(), ie = Files.end();
- it != ie; ++it)
+ it != ie; ++it) {
Diag(clang::diag::note_drv_command_failed_diag_msg) << *it;
+
+ std::string Err;
+ std::string Script = StringRef(*it).rsplit('.').first;
+ Script += ".sh";
+ llvm::raw_fd_ostream ScriptOS(Script.c_str(), Err,
+ llvm::raw_fd_ostream::F_Excl |
+ llvm::raw_fd_ostream::F_Binary);
+ if (!Err.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating run script: " + Script + " " + Err;
+ } else {
+ ScriptOS << Cmd;
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
+ }
+ }
} else {
// Failure, remove preprocessed files.
if (!C.getArgs().hasArg(options::OPT_save_temps))
@@ -486,9 +525,20 @@ int Driver::ExecuteCompilation(const Compilation &C,
return Res;
// Otherwise, remove result files as well.
- if (!C.getArgs().hasArg(options::OPT_save_temps))
+ if (!C.getArgs().hasArg(options::OPT_save_temps)) {
C.CleanupFileList(C.getResultFiles(), true);
+ // Failure result files are valid unless we crashed.
+ if (Res < 0) {
+ C.CleanupFileList(C.getFailureResultFiles(), true);
+#ifdef _WIN32
+ // Exit status should not be negative on Win32,
+ // unless abnormal termination.
+ Res = 1;
+#endif
+ }
+ }
+
// Print extra information about abnormal failures, if possible.
//
// This is ad-hoc, but we don't want to be excessively noisy. If the result
@@ -502,7 +552,7 @@ int Driver::ExecuteCompilation(const Compilation &C,
// FIXME: See FIXME above regarding result code interpretation.
if (Res < 0)
Diag(clang::diag::err_drv_command_signalled)
- << FailingTool.getShortName() << -Res;
+ << FailingTool.getShortName();
else
Diag(clang::diag::err_drv_command_failed)
<< FailingTool.getShortName() << Res;
@@ -820,35 +870,32 @@ void Driver::BuildUniversalActions(const ToolChain &TC,
else
Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
- // Add a 'dsymutil' step if necessary, when debug info is enabled and we
- // have a compile input. We need to run 'dsymutil' ourselves in such cases
- // because the debug info will refer to a temporary object file which is
- // will be removed at the end of the compilation process.
- if (Act->getType() == types::TY_Image) {
- Arg *A = Args.getLastArg(options::OPT_g_Group);
+ // Handle debug info queries.
+ Arg *A = Args.getLastArg(options::OPT_g_Group);
if (A && !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_gstabs) &&
ContainsCompileOrAssembleAction(Actions.back())) {
- ActionList Inputs;
- Inputs.push_back(Actions.back());
- Actions.pop_back();
-
- Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
-
- // Verify the debug output if we're in assert mode.
- // TODO: The verifier is noisy by default so put this under an
- // option for now.
- #ifndef NDEBUG
- if (Args.hasArg(options::OPT_verify)) {
- ActionList VerifyInputs;
+
+ // Add a 'dsymutil' step if necessary, when debug info is enabled and we
+ // have a compile input. We need to run 'dsymutil' ourselves in such cases
+ // because the debug info will refer to a temporary object file which is
+ // will be removed at the end of the compilation process.
+ if (Act->getType() == types::TY_Image) {
+ ActionList Inputs;
+ Inputs.push_back(Actions.back());
+ Actions.pop_back();
+ Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
+ }
+
+ // Verify the output (debug information only) if we passed '-verify'.
+ if (Args.hasArg(options::OPT_verify)) {
+ ActionList VerifyInputs;
VerifyInputs.push_back(Actions.back());
Actions.pop_back();
Actions.push_back(new VerifyJobAction(VerifyInputs,
types::TY_Nothing));
}
- #endif
}
- }
}
}
@@ -931,15 +978,17 @@ void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
// Check that the file exists, if enabled.
if (CheckInputsExist && memcmp(Value, "-", 2) != 0) {
- llvm::SmallString<64> Path(Value);
- if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory))
- if (llvm::sys::path::is_absolute(Path.str())) {
- Path = WorkDir->getValue(Args);
- llvm::sys::path::append(Path, Value);
+ SmallString<64> Path(Value);
+ if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
+ SmallString<64> Directory(WorkDir->getValue(Args));
+ if (llvm::sys::path::is_absolute(Directory.str())) {
+ llvm::sys::path::append(Directory, Value);
+ Path.assign(Directory);
}
+ }
bool exists = false;
- if (/*error_code ec =*/llvm::sys::fs::exists(Value, exists) || !exists)
+ if (llvm::sys::fs::exists(Path.c_str(), exists) || !exists)
Diag(clang::diag::err_drv_no_such_file) << Path.str();
else
Inputs.push_back(std::make_pair(Ty, A));
@@ -954,6 +1003,7 @@ void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
} else if (A->getOption().matches(options::OPT_x)) {
InputTypeArg = A;
InputType = types::lookupTypeForTypeSpecifier(A->getValue(Args));
+ A->claim();
// Follow gcc behavior and treat as linker input for invalid -x
// options. Its not clear why we shouldn't just revert to unknown; but
@@ -993,11 +1043,12 @@ void Driver::BuildActions(const ToolChain &TC, const DerivedArgList &Args,
// Construct the actions to perform.
ActionList LinkerInputs;
+ unsigned NumSteps = 0;
for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
types::ID InputType = Inputs[i].first;
const Arg *InputArg = Inputs[i].second;
- unsigned NumSteps = types::getNumCompilationPhases(InputType);
+ NumSteps = types::getNumCompilationPhases(InputType);
assert(NumSteps && "Invalid number of steps!");
// If the first step comes after the final phase we are doing as part of
@@ -1027,7 +1078,7 @@ void Driver::BuildActions(const ToolChain &TC, const DerivedArgList &Args,
}
// Build the pipeline for this file.
- llvm::OwningPtr<Action> Current(new InputAction(*InputArg, InputType));
+ OwningPtr<Action> Current(new InputAction(*InputArg, InputType));
for (unsigned i = 0; i != NumSteps; ++i) {
phases::ID Phase = types::getCompilationPhase(InputType, i);
@@ -1065,7 +1116,7 @@ void Driver::BuildActions(const ToolChain &TC, const DerivedArgList &Args,
// If we are linking, claim any options which are obviously only used for
// compilation.
- if (FinalPhase == phases::Link)
+ if (FinalPhase == phases::Link && (NumSteps == 1))
Args.ClaimAllArgs(options::OPT_CompileOnly_Group);
}
@@ -1094,8 +1145,12 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
return new CompileJobAction(Input, types::TY_Nothing);
} else if (Args.hasArg(options::OPT_rewrite_objc)) {
return new CompileJobAction(Input, types::TY_RewrittenObjC);
+ } else if (Args.hasArg(options::OPT_rewrite_legacy_objc)) {
+ return new CompileJobAction(Input, types::TY_RewrittenLegacyObjC);
} else if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto)) {
return new AnalyzeJobAction(Input, types::TY_Plist);
+ } else if (Args.hasArg(options::OPT__migrate)) {
+ return new MigrateJobAction(Input, types::TY_Remap);
} else if (Args.hasArg(options::OPT_emit_ast)) {
return new CompileJobAction(Input, types::TY_AST);
} else if (IsUsingLTO(Args)) {
@@ -1225,17 +1280,9 @@ static const Tool &SelectToolForJob(Compilation &C, const ToolChain *TC,
// bottom up, so what we are actually looking for is an assembler job with a
// compiler input.
- // FIXME: This doesn't belong here, but ideally we will support static soon
- // anyway.
- bool HasStatic = (C.getArgs().hasArg(options::OPT_mkernel) ||
- C.getArgs().hasArg(options::OPT_static) ||
- C.getArgs().hasArg(options::OPT_fapple_kext));
- bool IsDarwin = TC->getTriple().isOSDarwin();
- bool IsIADefault = TC->IsIntegratedAssemblerDefault() &&
- !(HasStatic && IsDarwin);
if (C.getArgs().hasFlag(options::OPT_integrated_as,
- options::OPT_no_integrated_as,
- IsIADefault) &&
+ options::OPT_no_integrated_as,
+ TC->IsIntegratedAssemblerDefault()) &&
!C.getArgs().hasArg(options::OPT_save_temps) &&
isa<AssembleJobAction>(JA) &&
Inputs->size() == 1 && isa<CompileJobAction>(*Inputs->begin())) {
@@ -1289,9 +1336,8 @@ void Driver::BuildJobsForAction(Compilation &C,
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
const ToolChain *TC = &C.getDefaultToolChain();
- std::string Arch;
if (BAA->getArchName())
- TC = Host->CreateToolChain(C.getArgs(), BAA->getArchName());
+ TC = &getToolChain(C.getArgs(), BAA->getArchName());
BuildJobsForAction(C, *BAA->begin(), TC, BAA->getArchName(),
AtTopLevel, LinkingOutput, Result);
@@ -1383,7 +1429,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
}
- llvm::SmallString<128> BasePath(BaseInput);
+ SmallString<128> BasePath(BaseInput);
StringRef BaseName;
// Dsymutil actions should use the full path.
@@ -1482,7 +1528,8 @@ static bool isPathExecutable(llvm::sys::Path &P, bool WantFile) {
std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
bool WantFile) const {
- std::string TargetSpecificExecutable(DefaultHostTriple + "-" + Name);
+ // FIXME: Needs a better variable than DefaultTargetTriple
+ std::string TargetSpecificExecutable(DefaultTargetTriple + "-" + Name);
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when lokup up program paths.
for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
@@ -1545,40 +1592,125 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, const char *Suffix)
return P.str();
}
-const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
- llvm::PrettyStackTraceString CrashInfo("Constructing host");
- llvm::Triple Triple(llvm::Triple::normalize(TripleStr).c_str());
-
- // TCE is an osless target
- if (Triple.getArchName() == "tce")
- return createTCEHostInfo(*this, Triple);
-
- switch (Triple.getOS()) {
- case llvm::Triple::AuroraUX:
- return createAuroraUXHostInfo(*this, Triple);
- case llvm::Triple::Darwin:
- case llvm::Triple::MacOSX:
- case llvm::Triple::IOS:
- return createDarwinHostInfo(*this, Triple);
- case llvm::Triple::DragonFly:
- return createDragonFlyHostInfo(*this, Triple);
- case llvm::Triple::OpenBSD:
- return createOpenBSDHostInfo(*this, Triple);
- case llvm::Triple::NetBSD:
- return createNetBSDHostInfo(*this, Triple);
- case llvm::Triple::FreeBSD:
- return createFreeBSDHostInfo(*this, Triple);
- case llvm::Triple::Minix:
- return createMinixHostInfo(*this, Triple);
- case llvm::Triple::Linux:
- return createLinuxHostInfo(*this, Triple);
- case llvm::Triple::Win32:
- return createWindowsHostInfo(*this, Triple);
- case llvm::Triple::MinGW32:
- return createMinGWHostInfo(*this, Triple);
- default:
- return createUnknownHostInfo(*this, Triple);
+/// \brief Compute target triple from args.
+///
+/// This routine provides the logic to compute a target triple from various
+/// args passed to the driver and the default triple string.
+static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
+ const ArgList &Args,
+ StringRef DarwinArchName) {
+ // FIXME: Already done in Compilation *Driver::BuildCompilation
+ if (const Arg *A = Args.getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue(Args);
+
+ llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+
+ // Handle Darwin-specific options available here.
+ if (Target.isOSDarwin()) {
+ // If an explict Darwin arch name is given, that trumps all.
+ if (!DarwinArchName.empty()) {
+ Target.setArch(
+ llvm::Triple::getArchTypeForDarwinArchName(DarwinArchName));
+ return Target;
+ }
+
+ // Handle the Darwin '-arch' flag.
+ if (Arg *A = Args.getLastArg(options::OPT_arch)) {
+ llvm::Triple::ArchType DarwinArch
+ = llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
+ if (DarwinArch != llvm::Triple::UnknownArch)
+ Target.setArch(DarwinArch);
+ }
+ }
+
+ // Skip further flag support on OSes which don't support '-m32' or '-m64'.
+ if (Target.getArchName() == "tce" ||
+ Target.getOS() == llvm::Triple::AuroraUX ||
+ Target.getOS() == llvm::Triple::Minix)
+ return Target;
+
+ // Handle pseudo-target flags '-m32' and '-m64'.
+ // FIXME: Should this information be in llvm::Triple?
+ if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
+ if (A->getOption().matches(options::OPT_m32)) {
+ if (Target.getArch() == llvm::Triple::x86_64)
+ Target.setArch(llvm::Triple::x86);
+ if (Target.getArch() == llvm::Triple::ppc64)
+ Target.setArch(llvm::Triple::ppc);
+ } else {
+ if (Target.getArch() == llvm::Triple::x86)
+ Target.setArch(llvm::Triple::x86_64);
+ if (Target.getArch() == llvm::Triple::ppc)
+ Target.setArch(llvm::Triple::ppc64);
+ }
+ }
+
+ return Target;
+}
+
+const ToolChain &Driver::getToolChain(const ArgList &Args,
+ StringRef DarwinArchName) const {
+ llvm::Triple Target = computeTargetTriple(DefaultTargetTriple, Args,
+ DarwinArchName);
+
+ ToolChain *&TC = ToolChains[Target.str()];
+ if (!TC) {
+ switch (Target.getOS()) {
+ case llvm::Triple::AuroraUX:
+ TC = new toolchains::AuroraUX(*this, Target, Args);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ if (Target.getArch() == llvm::Triple::x86 ||
+ Target.getArch() == llvm::Triple::x86_64 ||
+ Target.getArch() == llvm::Triple::arm ||
+ Target.getArch() == llvm::Triple::thumb)
+ TC = new toolchains::DarwinClang(*this, Target);
+ else
+ TC = new toolchains::Darwin_Generic_GCC(*this, Target, Args);
+ break;
+ case llvm::Triple::DragonFly:
+ TC = new toolchains::DragonFly(*this, Target, Args);
+ break;
+ case llvm::Triple::OpenBSD:
+ TC = new toolchains::OpenBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::NetBSD:
+ TC = new toolchains::NetBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::FreeBSD:
+ TC = new toolchains::FreeBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::Minix:
+ TC = new toolchains::Minix(*this, Target, Args);
+ break;
+ case llvm::Triple::Linux:
+ if (Target.getArch() == llvm::Triple::hexagon)
+ TC = new toolchains::Hexagon_TC(*this, Target);
+ else
+ TC = new toolchains::Linux(*this, Target, Args);
+ break;
+ case llvm::Triple::Solaris:
+ TC = new toolchains::Solaris(*this, Target, Args);
+ break;
+ case llvm::Triple::Win32:
+ TC = new toolchains::Windows(*this, Target);
+ break;
+ case llvm::Triple::MinGW32:
+ // FIXME: We need a MinGW toolchain. Fallthrough for now.
+ default:
+ // TCE is an OSless target
+ if (Target.getArchName() == "tce") {
+ TC = new toolchains::TCEToolChain(*this, Target);
+ break;
+ }
+
+ TC = new toolchains::Generic_GCC(*this, Target, Args);
+ break;
+ }
}
+ return *TC;
}
bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
diff --git a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
index f2d9af8..715819d 100644
--- a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
@@ -17,7 +17,7 @@ using namespace clang::driver::options;
static const OptTable::Info InfoTable[] = {
#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
HELPTEXT, METAVAR) \
- { NAME, HELPTEXT, METAVAR, Option::KIND##Class, FLAGS, PARAM, \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
OPT_##GROUP, OPT_##ALIAS },
#include "clang/Driver/Options.inc"
};
diff --git a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
deleted file mode 100644
index 292678b..0000000
--- a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
+++ /dev/null
@@ -1,731 +0,0 @@
-//===--- HostInfo.cpp - Host specific information -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Driver/HostInfo.h"
-
-#include "clang/Driver/Arg.h"
-#include "clang/Driver/ArgList.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/Option.h"
-#include "clang/Driver/Options.h"
-
-#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/Compiler.h"
-
-#include "ToolChains.h"
-
-#include <cassert>
-
-using namespace clang::driver;
-
-HostInfo::HostInfo(const Driver &D, const llvm::Triple &_Triple)
- : TheDriver(D), Triple(_Triple) {
-}
-
-HostInfo::~HostInfo() {
-}
-
-namespace {
-
-// Darwin Host Info
-
-/// DarwinHostInfo - Darwin host information implementation.
-class DarwinHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::DenseMap<unsigned, ToolChain*> ToolChains;
-
-public:
- DarwinHostInfo(const Driver &D, const llvm::Triple &Triple);
- ~DarwinHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-DarwinHostInfo::DarwinHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {
-}
-
-DarwinHostInfo::~DarwinHostInfo() {
- for (llvm::DenseMap<unsigned, ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool DarwinHostInfo::useDriverDriver() const {
- return true;
-}
-
-ToolChain *DarwinHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- llvm::Triple::ArchType Arch;
-
- if (!ArchName) {
- // If we aren't looking for a specific arch, infer the default architecture
- // based on -arch and -m32/-m64 command line options.
- if (Arg *A = Args.getLastArg(options::OPT_arch)) {
- // The gcc driver behavior with multiple -arch flags wasn't consistent for
- // things which rely on a default architecture. We just use the last -arch
- // to find the default tool chain (assuming it is valid).
- Arch = llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
-
- // If it was invalid just use the host, we will reject this command line
- // later.
- if (Arch == llvm::Triple::UnknownArch)
- Arch = getTriple().getArch();
- } else {
- // Otherwise default to the arch of the host.
- Arch = getTriple().getArch();
- }
-
- // Honor -m32 and -m64 when finding the default tool chain.
- //
- // FIXME: Should this information be in llvm::Triple?
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (A->getOption().matches(options::OPT_m32)) {
- if (Arch == llvm::Triple::x86_64)
- Arch = llvm::Triple::x86;
- if (Arch == llvm::Triple::ppc64)
- Arch = llvm::Triple::ppc;
- } else {
- if (Arch == llvm::Triple::x86)
- Arch = llvm::Triple::x86_64;
- if (Arch == llvm::Triple::ppc)
- Arch = llvm::Triple::ppc64;
- }
- }
- } else
- Arch = llvm::Triple::getArchTypeForDarwinArchName(ArchName);
-
- assert(Arch != llvm::Triple::UnknownArch && "Unexpected arch!");
- ToolChain *&TC = ToolChains[Arch];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArch(Arch);
-
- // If we recognized the arch, match it to the toolchains we support.
- if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64 ||
- Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
- TC = new toolchains::DarwinClang(*this, TCTriple);
- } else
- TC = new toolchains::Darwin_Generic_GCC(*this, TCTriple);
- }
-
- return TC;
-}
-
-// TCE Host Info
-
-/// TCEHostInfo - TCE host information implementation (see http://tce.cs.tut.fi)
-class TCEHostInfo : public HostInfo {
-
-public:
- TCEHostInfo(const Driver &D, const llvm::Triple &Triple);
- ~TCEHostInfo() {}
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-TCEHostInfo::TCEHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {
-}
-
-bool TCEHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *TCEHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- llvm::Triple TCTriple(getTriple());
-// TCTriple.setArchName(ArchName);
- return new toolchains::TCEToolChain(*this, TCTriple);
-}
-
-
-// Unknown Host Info
-
-/// UnknownHostInfo - Generic host information to use for unknown hosts.
-class UnknownHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- UnknownHostInfo(const Driver &D, const llvm::Triple& Triple);
- ~UnknownHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-UnknownHostInfo::UnknownHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {
-}
-
-UnknownHostInfo::~UnknownHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool UnknownHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *UnknownHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver support.");
-
- // Automatically handle some instances of -m32/-m64 we know about.
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::x86_64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
- } else if (Triple.getArch() == llvm::Triple::ppc ||
- Triple.getArch() == llvm::Triple::ppc64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "powerpc" : "powerpc64";
- }
- }
-
- ToolChain *&TC = ToolChains[ArchName];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::Generic_GCC(*this, TCTriple);
- }
-
- return TC;
-}
-
-// OpenBSD Host Info
-
-/// OpenBSDHostInfo - OpenBSD host information implementation.
-class OpenBSDHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- OpenBSDHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~OpenBSDHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-OpenBSDHostInfo::~OpenBSDHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool OpenBSDHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *OpenBSDHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
-
- ToolChain *&TC = ToolChains[ArchName];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::OpenBSD(*this, TCTriple);
- }
-
- return TC;
-}
-
-// AuroraUX Host Info
-
-/// AuroraUXHostInfo - AuroraUX host information implementation.
-class AuroraUXHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- AuroraUXHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~AuroraUXHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-AuroraUXHostInfo::~AuroraUXHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool AuroraUXHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *AuroraUXHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- ToolChain *&TC = ToolChains[getArchName()];
-
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(getArchName());
-
- TC = new toolchains::AuroraUX(*this, TCTriple);
- }
-
- return TC;
-}
-
-// FreeBSD Host Info
-
-/// FreeBSDHostInfo - FreeBSD host information implementation.
-class FreeBSDHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- FreeBSDHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~FreeBSDHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-FreeBSDHostInfo::~FreeBSDHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool FreeBSDHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *FreeBSDHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- // Automatically handle some instances of -m32/-m64 we know about.
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::x86_64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
- } else if (Triple.getArch() == llvm::Triple::ppc ||
- Triple.getArch() == llvm::Triple::ppc64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "powerpc" : "powerpc64";
- }
- }
-
- ToolChain *&TC = ToolChains[ArchName];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::FreeBSD(*this, TCTriple);
- }
-
- return TC;
-}
-
-// NetBSD Host Info
-
-/// NetBSDHostInfo - NetBSD host information implementation.
-class NetBSDHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- NetBSDHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~NetBSDHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-NetBSDHostInfo::~NetBSDHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool NetBSDHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *NetBSDHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- // Automatically handle some instances of -m32/-m64 we know about.
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::x86_64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
- } else if (Triple.getArch() == llvm::Triple::ppc ||
- Triple.getArch() == llvm::Triple::ppc64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "powerpc" : "powerpc64";
- }
- }
- llvm::Triple TargetTriple(getTriple());
- TargetTriple.setArchName(ArchName);
-
- ToolChain *TC;
-
- // XXX Cache toolchain even if -m32 is used
- if (Arch == ArchName) {
- TC = ToolChains[ArchName];
- if (TC)
- return TC;
- }
-
- TC = new toolchains::NetBSD(*this, TargetTriple, getTriple());
-
- return TC;
-}
-
-// Minix Host Info
-
-/// MinixHostInfo - Minix host information implementation.
-class MinixHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- MinixHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~MinixHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-MinixHostInfo::~MinixHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it){
- delete it->second;
- }
-}
-
-bool MinixHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *MinixHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
-
- ToolChain *&TC = ToolChains[ArchName];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::Minix(*this, TCTriple);
- }
-
- return TC;
-}
-
-// DragonFly Host Info
-
-/// DragonFlyHostInfo - DragonFly host information implementation.
-class DragonFlyHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- DragonFlyHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~DragonFlyHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-DragonFlyHostInfo::~DragonFlyHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool DragonFlyHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *DragonFlyHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- ToolChain *&TC = ToolChains[getArchName()];
-
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(getArchName());
-
- TC = new toolchains::DragonFly(*this, TCTriple);
- }
-
- return TC;
-}
-
-// Linux Host Info
-
-/// LinuxHostInfo - Linux host information implementation.
-class LinuxHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- LinuxHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {}
- ~LinuxHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-LinuxHostInfo::~LinuxHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool LinuxHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *LinuxHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
-
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- // Automatically handle some instances of -m32/-m64 we know about.
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::x86_64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
- } else if (Triple.getArch() == llvm::Triple::ppc ||
- Triple.getArch() == llvm::Triple::ppc64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "powerpc" : "powerpc64";
- }
- }
-
- ToolChain *&TC = ToolChains[ArchName];
-
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::Linux(*this, TCTriple);
- }
-
- return TC;
-}
-
-// Windows Host Info
-
-/// WindowsHostInfo - Host information to use on Microsoft Windows.
-class WindowsHostInfo : public HostInfo {
- /// Cache of tool chains we have created.
- mutable llvm::StringMap<ToolChain*> ToolChains;
-
-public:
- WindowsHostInfo(const Driver &D, const llvm::Triple& Triple);
- ~WindowsHostInfo();
-
- virtual bool useDriverDriver() const;
-
- virtual types::ID lookupTypeForExtension(const char *Ext) const {
- return types::lookupTypeForExtension(Ext);
- }
-
- virtual ToolChain *CreateToolChain(const ArgList &Args,
- const char *ArchName) const;
-};
-
-WindowsHostInfo::WindowsHostInfo(const Driver &D, const llvm::Triple& Triple)
- : HostInfo(D, Triple) {
-}
-
-WindowsHostInfo::~WindowsHostInfo() {
- for (llvm::StringMap<ToolChain*>::iterator
- it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
- delete it->second;
-}
-
-bool WindowsHostInfo::useDriverDriver() const {
- return false;
-}
-
-ToolChain *WindowsHostInfo::CreateToolChain(const ArgList &Args,
- const char *ArchName) const {
- assert(!ArchName &&
- "Unexpected arch name on platform without driver driver support.");
-
- // Automatically handle some instances of -m32/-m64 we know about.
- std::string Arch = getArchName();
- ArchName = Arch.c_str();
- if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
- if (Triple.getArch() == llvm::Triple::x86 ||
- Triple.getArch() == llvm::Triple::x86_64) {
- ArchName =
- (A->getOption().matches(options::OPT_m32)) ? "i386" : "x86_64";
- }
- }
-
- ToolChain *&TC = ToolChains[ArchName];
- if (!TC) {
- llvm::Triple TCTriple(getTriple());
- TCTriple.setArchName(ArchName);
-
- TC = new toolchains::Windows(*this, TCTriple);
- }
-
- return TC;
-}
-
-// FIXME: This is a placeholder.
-class MinGWHostInfo : public UnknownHostInfo {
-public:
- MinGWHostInfo(const Driver &D, const llvm::Triple& Triple);
-};
-
-MinGWHostInfo::MinGWHostInfo(const Driver &D, const llvm::Triple& Triple)
- : UnknownHostInfo(D, Triple) {}
-
-} // end anon namespace
-
-const HostInfo *
-clang::driver::createAuroraUXHostInfo(const Driver &D,
- const llvm::Triple& Triple){
- return new AuroraUXHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createDarwinHostInfo(const Driver &D,
- const llvm::Triple& Triple){
- return new DarwinHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createOpenBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new OpenBSDHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createFreeBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new FreeBSDHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createNetBSDHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new NetBSDHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createMinixHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new MinixHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createDragonFlyHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new DragonFlyHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createLinuxHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new LinuxHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createTCEHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new TCEHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createWindowsHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new WindowsHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createMinGWHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new MinGWHostInfo(D, Triple);
-}
-
-const HostInfo *
-clang::driver::createUnknownHostInfo(const Driver &D,
- const llvm::Triple& Triple) {
- return new UnknownHostInfo(D, Triple);
-}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Job.cpp b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
index 5443d70..825c86a 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
@@ -16,6 +16,8 @@ using namespace clang::driver;
Job::~Job() {}
+void Command::anchor() {}
+
Command::Command(const Action &_Source, const Tool &_Creator,
const char *_Executable, const ArgStringList &_Arguments)
: Job(CommandClass), Source(_Source), Creator(_Creator),
diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
index ee1963f..03360ea 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Option.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
@@ -61,8 +61,6 @@ Option::~Option() {
void Option::dump() const {
llvm::errs() << "<";
switch (Kind) {
- default:
- llvm_unreachable("Invalid kind");
#define P(N) case N: llvm::errs() << #N; break
P(GroupClass);
P(InputClass);
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
index 9453848..db4d2a8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -14,31 +14,31 @@
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/HostInfo.h"
#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang::driver;
using namespace clang;
-ToolChain::ToolChain(const HostInfo &H, const llvm::Triple &T)
- : Host(H), Triple(T) {
+ToolChain::ToolChain(const Driver &D, const llvm::Triple &T)
+ : D(D), Triple(T) {
}
ToolChain::~ToolChain() {
}
const Driver &ToolChain::getDriver() const {
- return Host.getDriver();
+ return D;
}
std::string ToolChain::GetFilePath(const char *Name) const {
- return Host.getDriver().GetFilePath(Name, *this);
+ return D.GetFilePath(Name, *this);
}
std::string ToolChain::GetProgramPath(const char *Name, bool WantFile) const {
- return Host.getDriver().GetProgramPath(Name, *this, WantFile);
+ return D.GetProgramPath(Name, *this, WantFile);
}
types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
@@ -55,6 +55,7 @@ void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
// Assume a minimal NeXT runtime.
runtime.HasARC = false;
runtime.HasWeak = false;
+ runtime.HasSubscripting = false;
runtime.HasTerminate = false;
return;
@@ -62,6 +63,7 @@ void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
// Assume a maximal GNU runtime.
runtime.HasARC = true;
runtime.HasWeak = true;
+ runtime.HasSubscripting = false; // to be added
runtime.HasTerminate = false; // to be added
return;
}
@@ -73,11 +75,15 @@ void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
// FIXME: tblgen this.
static const char *getARMTargetCPU(const ArgList &Args,
const llvm::Triple &Triple) {
- // FIXME: Warn on inconsistent use of -mcpu and -march.
-
- // If we have -mcpu=, use that.
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- return A->getValue(Args);
+ // For Darwin targets, the -arch option (which is translated to a
+ // corresponding -march option) should determine the architecture
+ // (and the Mach-O slice) regardless of any -mcpu options.
+ if (!Triple.isOSDarwin()) {
+ // FIXME: Warn on inconsistent use of -mcpu and -march.
+ // If we have -mcpu=, use that.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue(Args);
+ }
StringRef MArch;
if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
@@ -88,45 +94,27 @@ static const char *getARMTargetCPU(const ArgList &Args,
MArch = Triple.getArchName();
}
- if (MArch == "armv2" || MArch == "armv2a")
- return "arm2";
- if (MArch == "armv3")
- return "arm6";
- if (MArch == "armv3m")
- return "arm7m";
- if (MArch == "armv4" || MArch == "armv4t")
- return "arm7tdmi";
- if (MArch == "armv5" || MArch == "armv5t")
- return "arm10tdmi";
- if (MArch == "armv5e" || MArch == "armv5te")
- return "arm1026ejs";
- if (MArch == "armv5tej")
- return "arm926ej-s";
- if (MArch == "armv6" || MArch == "armv6k")
- return "arm1136jf-s";
- if (MArch == "armv6j")
- return "arm1136j-s";
- if (MArch == "armv6z" || MArch == "armv6zk")
- return "arm1176jzf-s";
- if (MArch == "armv6t2")
- return "arm1156t2-s";
- if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
- return "cortex-a8";
- if (MArch == "armv7r" || MArch == "armv7-r")
- return "cortex-r4";
- if (MArch == "armv7m" || MArch == "armv7-m")
- return "cortex-m3";
- if (MArch == "ep9312")
- return "ep9312";
- if (MArch == "iwmmxt")
- return "iwmmxt";
- if (MArch == "xscale")
- return "xscale";
- if (MArch == "armv6m" || MArch == "armv6-m")
- return "cortex-m0";
-
- // If all else failed, return the most base CPU LLVM supports.
- return "arm7tdmi";
+ return llvm::StringSwitch<const char *>(MArch)
+ .Cases("armv2", "armv2a","arm2")
+ .Case("armv3", "arm6")
+ .Case("armv3m", "arm7m")
+ .Cases("armv4", "armv4t", "arm7tdmi")
+ .Cases("armv5", "armv5t", "arm10tdmi")
+ .Cases("armv5e", "armv5te", "arm1026ejs")
+ .Case("armv5tej", "arm926ej-s")
+ .Cases("armv6", "armv6k", "arm1136jf-s")
+ .Case("armv6j", "arm1136j-s")
+ .Cases("armv6z", "armv6zk", "arm1176jzf-s")
+ .Case("armv6t2", "arm1156t2-s")
+ .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+ .Cases("armv7r", "armv7-r", "cortex-r4")
+ .Cases("armv7m", "armv7-m", "cortex-m3")
+ .Case("ep9312", "ep9312")
+ .Case("iwmmxt", "iwmmxt")
+ .Case("xscale", "xscale")
+ .Cases("armv6m", "armv6-m", "cortex-m0")
+ // If all else failed, return the most base CPU LLVM supports.
+ .Default("arm7tdmi");
}
/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
@@ -135,38 +123,23 @@ static const char *getARMTargetCPU(const ArgList &Args,
// FIXME: This is redundant with -mcpu, why does LLVM use this.
// FIXME: tblgen this, or kill it!
static const char *getLLVMArchSuffixForARM(StringRef CPU) {
- if (CPU == "arm7tdmi" || CPU == "arm7tdmi-s" || CPU == "arm710t" ||
- CPU == "arm720t" || CPU == "arm9" || CPU == "arm9tdmi" ||
- CPU == "arm920" || CPU == "arm920t" || CPU == "arm922t" ||
- CPU == "arm940t" || CPU == "ep9312")
- return "v4t";
-
- if (CPU == "arm10tdmi" || CPU == "arm1020t")
- return "v5";
-
- if (CPU == "arm9e" || CPU == "arm926ej-s" || CPU == "arm946e-s" ||
- CPU == "arm966e-s" || CPU == "arm968e-s" || CPU == "arm10e" ||
- CPU == "arm1020e" || CPU == "arm1022e" || CPU == "xscale" ||
- CPU == "iwmmxt")
- return "v5e";
-
- if (CPU == "arm1136j-s" || CPU == "arm1136jf-s" || CPU == "arm1176jz-s" ||
- CPU == "arm1176jzf-s" || CPU == "mpcorenovfp" || CPU == "mpcore")
- return "v6";
-
- if (CPU == "arm1156t2-s" || CPU == "arm1156t2f-s")
- return "v6t2";
-
- if (CPU == "cortex-a8" || CPU == "cortex-a9")
- return "v7";
-
- if (CPU == "cortex-m3")
- return "v7m";
-
- if (CPU == "cortex-m0")
- return "v6m";
-
- return "";
+ return llvm::StringSwitch<const char *>(CPU)
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
+ .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
+ .Cases("arm920", "arm920t", "arm922t", "v4t")
+ .Cases("arm940t", "ep9312","v4t")
+ .Cases("arm10tdmi", "arm1020t", "v5")
+ .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e")
+ .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e")
+ .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e")
+ .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
+ .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
+ .Cases("cortex-a8", "cortex-a9", "v7")
+ .Case("cortex-m3", "v7m")
+ .Case("cortex-m4", "v7m")
+ .Case("cortex-m0", "v6m")
+ .Default("");
}
std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
@@ -216,6 +189,22 @@ void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Each toolchain should provide the appropriate include flags.
}
+ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
+ const ArgList &Args) const
+{
+ if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue(Args);
+ if (Value == "compiler-rt")
+ return ToolChain::RLT_CompilerRT;
+ if (Value == "libgcc")
+ return ToolChain::RLT_Libgcc;
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return GetDefaultRuntimeLibType();
+}
+
ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
StringRef Value = A->getValue(Args);
@@ -230,6 +219,40 @@ ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
return ToolChain::CST_Libstdcxx;
}
+/// \brief Utility function to add a system include directory to CC1 arguments.
+/*static*/ void ToolChain::addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+/// \brief Utility function to add a system include directory with extern "C"
+/// semantics to CC1 arguments.
+///
+/// Note that this should be used rarely, and only for directories that
+/// historically and for legacy reasons are treated as having implicit extern
+/// "C" semantics. These semantics are *ignored* by and large today, but its
+/// important to preserve the preprocessor changes resulting from the
+/// classification.
+/*static*/ void ToolChain::addExternCSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-externc-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+/// \brief Utility function to add a list of system include directories to CC1.
+/*static*/ void ToolChain::addSystemIncludes(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ ArrayRef<StringRef> Paths) {
+ for (ArrayRef<StringRef>::iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(*I));
+ }
+}
+
void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Header search paths should be handled by each of the subclasses.
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
index 80394ac..fa9ed49 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -9,16 +9,11 @@
#include "ToolChains.h"
-#ifdef HAVE_CLANG_CONFIG_H
-# include "clang/Config/config.h"
-#endif
-
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/HostInfo.h"
#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Option.h"
@@ -38,16 +33,7 @@
#include <cstdlib> // ::getenv
-#include "llvm/Config/config.h" // for CXX_INCLUDE_ROOT
-
-// Include the necessary headers to interface with the Windows registry and
-// environment.
-#ifdef _MSC_VER
- #define WIN32_LEAN_AND_MEAN 1
- #include <Windows.h>
- #undef min
- #undef max
-#endif
+#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#ifndef CLANG_PREFIX
#define CLANG_PREFIX
@@ -57,56 +43,26 @@ using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace clang;
-/// \brief Utility function to add a system include directory to CC1 arguments.
-static void addSystemInclude(const ArgList &DriverArgs, ArgStringList &CC1Args,
- const Twine &Path) {
- CC1Args.push_back("-internal-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(Path));
-}
-
-/// \brief Utility function to add a system include directory with extern "C"
-/// semantics to CC1 arguments.
-///
-/// Note that this should be used rarely, and only for directories that
-/// historically and for legacy reasons are treated as having implicit extern
-/// "C" semantics. These semantics are *ignored* by and large today, but its
-/// important to preserve the preprocessor changes resulting from the
-/// classification.
-static void addExternCSystemInclude(const ArgList &DriverArgs,
- ArgStringList &CC1Args, const Twine &Path) {
- CC1Args.push_back("-internal-externc-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(Path));
-}
-
-/// \brief Utility function to add a list of system include directories to CC1.
-static void addSystemIncludes(const ArgList &DriverArgs,
- ArgStringList &CC1Args,
- ArrayRef<StringRef> Paths) {
- for (ArrayRef<StringRef>::iterator I = Paths.begin(), E = Paths.end();
- I != E; ++I) {
- CC1Args.push_back("-internal-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(*I));
- }
-}
-
/// Darwin - Darwin tool chain for i386 and x86_64.
-Darwin::Darwin(const HostInfo &Host, const llvm::Triple& Triple)
- : ToolChain(Host, Triple), TargetInitialized(false),
+Darwin::Darwin(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple), TargetInitialized(false),
ARCRuntimeForSimulator(ARCSimulator_None),
LibCXXForSimulator(LibCXXSimulator_None)
{
- // Compute the initial Darwin version based on the host.
- bool HadExtra;
- std::string OSName = Triple.getOSName();
- if (!Driver::GetReleaseVersion(&OSName.c_str()[6],
- DarwinVersion[0], DarwinVersion[1],
- DarwinVersion[2], HadExtra))
- getDriver().Diag(diag::err_drv_invalid_darwin_version) << OSName;
-
+ // Compute the initial Darwin version from the triple
+ unsigned Major, Minor, Micro;
+ if (!Triple.getMacOSXVersion(Major, Minor, Micro))
+ getDriver().Diag(diag::err_drv_invalid_darwin_version) <<
+ Triple.getOSName();
llvm::raw_string_ostream(MacosxVersionMin)
- << "10." << std::max(0, (int)DarwinVersion[0] - 4) << '.'
- << DarwinVersion[1];
+ << Major << '.' << Minor << '.' << Micro;
+
+ // FIXME: DarwinVersion is only used to find GCC's libexec directory.
+ // It should be removed when we stop supporting that.
+ DarwinVersion[0] = Minor + 4;
+ DarwinVersion[1] = Micro;
+ DarwinVersion[2] = 0;
}
types::ID Darwin::LookupTypeForExtension(const char *Ext) const {
@@ -141,12 +97,17 @@ bool Darwin::hasARCRuntime() const {
return !isMacosxVersionLT(10, 7);
}
+bool Darwin::hasSubscriptingRuntime() const {
+ return !isTargetIPhoneOS() && !isMacosxVersionLT(10, 8);
+}
+
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
void Darwin::configureObjCRuntime(ObjCRuntime &runtime) const {
if (runtime.getKind() != ObjCRuntime::NeXT)
return ToolChain::configureObjCRuntime(runtime);
runtime.HasARC = runtime.HasWeak = hasARCRuntime();
+ runtime.HasSubscripting = hasSubscriptingRuntime();
// So far, objc_terminate is only available in iOS 5.
// FIXME: do the simulator logic properly.
@@ -225,18 +186,16 @@ std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
if (!isTargetInitialized())
return Triple.getTriple();
- unsigned Version[3];
- getTargetVersion(Version);
-
- llvm::SmallString<16> Str;
- llvm::raw_svector_ostream(Str)
- << (isTargetIPhoneOS() ? "ios" : "macosx")
- << Version[0] << "." << Version[1] << "." << Version[2];
- Triple.setOSName(Str.str());
+ SmallString<16> Str;
+ Str += isTargetIPhoneOS() ? "ios" : "macosx";
+ Str += getTargetVersion().getAsString();
+ Triple.setOSName(Str);
return Triple.getTriple();
}
+void Generic_ELF::anchor() {}
+
Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const {
Action::ActionClass Key;
@@ -255,15 +214,9 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
} else
Key = JA.getKind();
- // FIXME: This doesn't belong here, but ideally we will support static soon
- // anyway.
- bool HasStatic = (C.getArgs().hasArg(options::OPT_mkernel) ||
- C.getArgs().hasArg(options::OPT_static) ||
- C.getArgs().hasArg(options::OPT_fapple_kext));
- bool IsIADefault = IsIntegratedAssemblerDefault() && !HasStatic;
bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
options::OPT_no_integrated_as,
- IsIADefault);
+ IsIntegratedAssemblerDefault());
Tool *&T = Tools[Key];
if (!T) {
@@ -274,6 +227,7 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
case Action::PreprocessJobClass:
T = new tools::darwin::Preprocess(*this); break;
case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
T = new tools::Clang(*this); break;
case Action::PrecompileJobClass:
case Action::CompileJobClass:
@@ -300,8 +254,8 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
}
-DarwinClang::DarwinClang(const HostInfo &Host, const llvm::Triple& Triple)
- : Darwin(Host, Triple)
+DarwinClang::DarwinClang(const Driver &D, const llvm::Triple& Triple)
+ : Darwin(D, Triple)
{
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
@@ -344,8 +298,11 @@ void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
// Unfortunately, we still might depend on a few of the libraries that are
// only available in the gcc library directory (in particular
// libstdc++.dylib). For now, hardcode the path to the known install location.
+ // FIXME: This should get ripped out someday. However, when building on
+ // 10.6 (darwin10), we're still relying on this to find libstdc++.dylib.
llvm::sys::Path P(getDriver().Dir);
P.eraseComponent(); // .../usr/bin -> ../usr
+ P.appendComponent("llvm-gcc-4.2");
P.appendComponent("lib");
P.appendComponent("gcc");
switch (getTriple().getArch()) {
@@ -416,9 +373,11 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
P.appendComponent("libarclite_");
std::string s = P.str();
// Mash in the platform.
- if (isTargetIPhoneOS())
+ if (isTargetIOSSimulator())
+ s += "iphonesimulator";
+ else if (isTargetIPhoneOS())
s += "iphoneos";
- // FIXME: isTargetIphoneOSSimulator() is not returning true.
+ // FIXME: Remove this once we depend fully on -mios-simulator-version-min.
else if (ARCRuntimeForSimulator != ARCSimulator_None)
s += "iphonesimulator";
else
@@ -445,6 +404,16 @@ void DarwinClang::AddLinkRuntimeLib(const ArgList &Args,
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ // Darwin only supports the compiler-rt based runtime libraries.
+ switch (GetRuntimeLibType(Args)) {
+ case ToolChain::RLT_CompilerRT:
+ break;
+ default:
+ getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
+ << Args.getLastArg(options::OPT_rtlib_EQ)->getValue(Args) << "darwin";
+ return;
+ }
+
// Darwin doesn't support real static executables, don't link any runtime
// libraries with -static.
if (Args.hasArg(options::OPT_static))
@@ -459,6 +428,38 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
return;
}
+ // If we are building profile support, link that library in.
+ if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)) {
+ // Select the appropriate runtime library for the target.
+ if (isTargetIPhoneOS()) {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a");
+ } else {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a");
+ }
+ }
+
+ // Add ASAN runtime library, if required. Dynamic libraries and bundles
+ // should not be linked with the runtime library.
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false)) {
+ if (Args.hasArg(options::OPT_dynamiclib) ||
+ Args.hasArg(options::OPT_bundle)) return;
+ if (isTargetIPhoneOS()) {
+ getDriver().Diag(diag::err_drv_clang_unsupported_per_platform)
+ << "-faddress-sanitizer";
+ } else {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.asan_osx.a");
+
+ // The ASAN runtime library requires C++ and CoreFoundation.
+ AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("CoreFoundation");
+ }
+ }
+
// Otherwise link libSystem, then the dynamic runtime library, and finally any
// target specific static runtime library.
CmdArgs.push_back("-lSystem");
@@ -540,7 +541,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// '-miphoneos-version-min' to help us know whether there is an ARC runtime
// or not; try to parse a __IPHONE_OS_VERSION_MIN_REQUIRED
// define passed in command-line.
- if (!iOSVersion) {
+ if (!iOSVersion && !iOSSimVersion) {
for (arg_iterator it = Args.filtered_begin(options::OPT_D),
ie = Args.filtered_end(); it != ie; ++it) {
StringRef define = (*it)->getValue(Args);
@@ -717,6 +718,8 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
}
// Otherwise, look in the root.
+ // FIXME: This should be removed someday when we don't have to care about
+ // 10.6 and earlier, where /usr/lib/libstdc++.dylib does not exist.
if ((llvm::sys::fs::exists("/usr/lib/libstdc++.dylib", Exists) || !Exists)&&
(!llvm::sys::fs::exists("/usr/lib/libstdc++.6.dylib", Exists) && Exists)){
CmdArgs.push_back("/usr/lib/libstdc++.6.dylib");
@@ -1022,6 +1025,10 @@ bool Darwin::SupportsObjCGC() const {
return !isTargetIPhoneOS();
}
+bool Darwin::SupportsObjCARC() const {
+ return isTargetIPhoneOS() || !isMacosxVersionLT(10, 6);
+}
+
std::string
Darwin_Generic_GCC::ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const {
@@ -1032,8 +1039,345 @@ Darwin_Generic_GCC::ComputeEffectiveClangTriple(const ArgList &Args,
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
-Generic_GCC::Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple)
- : ToolChain(Host, Triple) {
+/// \brief Parse a GCCVersion object out of a string of text.
+///
+/// This is the primary means of forming GCCVersion objects.
+/*static*/
+Generic_GCC::GCCVersion Linux::GCCVersion::Parse(StringRef VersionText) {
+ const GCCVersion BadVersion = { VersionText.str(), -1, -1, -1, "" };
+ std::pair<StringRef, StringRef> First = VersionText.split('.');
+ std::pair<StringRef, StringRef> Second = First.second.split('.');
+
+ GCCVersion GoodVersion = { VersionText.str(), -1, -1, -1, "" };
+ if (First.first.getAsInteger(10, GoodVersion.Major) ||
+ GoodVersion.Major < 0)
+ return BadVersion;
+ if (Second.first.getAsInteger(10, GoodVersion.Minor) ||
+ GoodVersion.Minor < 0)
+ return BadVersion;
+
+ // First look for a number prefix and parse that if present. Otherwise just
+ // stash the entire patch string in the suffix, and leave the number
+ // unspecified. This covers versions strings such as:
+ // 4.4
+ // 4.4.0
+ // 4.4.x
+ // 4.4.2-rc4
+ // 4.4.x-patched
+ // And retains any patch number it finds.
+ StringRef PatchText = GoodVersion.PatchSuffix = Second.second.str();
+ if (!PatchText.empty()) {
+ if (unsigned EndNumber = PatchText.find_first_not_of("0123456789")) {
+ // Try to parse the number and any suffix.
+ if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
+ GoodVersion.Patch < 0)
+ return BadVersion;
+ GoodVersion.PatchSuffix = PatchText.substr(EndNumber).str();
+ }
+ }
+
+ return GoodVersion;
+}
+
+/// \brief Less-than for GCCVersion, implementing a Strict Weak Ordering.
+bool Generic_GCC::GCCVersion::operator<(const GCCVersion &RHS) const {
+ if (Major < RHS.Major) return true; if (Major > RHS.Major) return false;
+ if (Minor < RHS.Minor) return true; if (Minor > RHS.Minor) return false;
+
+ // Note that we rank versions with *no* patch specified is better than ones
+ // hard-coding a patch version. Thus if the RHS has no patch, it always
+ // wins, and the LHS only wins if it has no patch and the RHS does have
+ // a patch.
+ if (RHS.Patch == -1) return true; if (Patch == -1) return false;
+ if (Patch < RHS.Patch) return true; if (Patch > RHS.Patch) return false;
+
+ // Finally, between completely tied version numbers, the version with the
+ // suffix loses as we prefer full releases.
+ if (RHS.PatchSuffix.empty()) return true;
+ return false;
+}
+
+static StringRef getGCCToolchainDir(const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_gcc_toolchain);
+ if (A)
+ return A->getValue(Args);
+ return GCC_INSTALL_PREFIX;
+}
+
+/// \brief Construct a GCCInstallationDetector from the driver.
+///
+/// This performs all of the autodetection and sets up the various paths.
+/// Once constructed, a GCCInstallation is esentially immutable.
+///
+/// FIXME: We shouldn't need an explicit TargetTriple parameter here, and
+/// should instead pull the target out of the driver. This is currently
+/// necessary because the driver doesn't store the final version of the target
+/// triple.
+Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
+ const Driver &D,
+ const llvm::Triple &TargetTriple,
+ const ArgList &Args)
+ : IsValid(false) {
+ llvm::Triple MultiarchTriple
+ = TargetTriple.isArch32Bit() ? TargetTriple.get64BitArchVariant()
+ : TargetTriple.get32BitArchVariant();
+ llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+ // The library directories which may contain GCC installations.
+ SmallVector<StringRef, 4> CandidateLibDirs, CandidateMultiarchLibDirs;
+ // The compatible GCC triples for this particular architecture.
+ SmallVector<StringRef, 10> CandidateTripleAliases;
+ SmallVector<StringRef, 10> CandidateMultiarchTripleAliases;
+ CollectLibDirsAndTriples(TargetTriple, MultiarchTriple, CandidateLibDirs,
+ CandidateTripleAliases,
+ CandidateMultiarchLibDirs,
+ CandidateMultiarchTripleAliases);
+
+ // Compute the set of prefixes for our search.
+ SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
+ D.PrefixDirs.end());
+
+ StringRef GCCToolchainDir = getGCCToolchainDir(Args);
+ if (GCCToolchainDir != "") {
+ if (GCCToolchainDir.back() == '/')
+ GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
+
+ Prefixes.push_back(GCCToolchainDir);
+ } else {
+ Prefixes.push_back(D.SysRoot);
+ Prefixes.push_back(D.SysRoot + "/usr");
+ Prefixes.push_back(D.InstalledDir + "/..");
+ }
+
+ // Loop over the various components which exist and select the best GCC
+ // installation available. GCC installs are ranked by version number.
+ Version = GCCVersion::Parse("0.0.0");
+ for (unsigned i = 0, ie = Prefixes.size(); i < ie; ++i) {
+ if (!llvm::sys::fs::exists(Prefixes[i]))
+ continue;
+ for (unsigned j = 0, je = CandidateLibDirs.size(); j < je; ++j) {
+ const std::string LibDir = Prefixes[i] + CandidateLibDirs[j].str();
+ if (!llvm::sys::fs::exists(LibDir))
+ continue;
+ for (unsigned k = 0, ke = CandidateTripleAliases.size(); k < ke; ++k)
+ ScanLibDirForGCCTriple(TargetArch, LibDir, CandidateTripleAliases[k]);
+ }
+ for (unsigned j = 0, je = CandidateMultiarchLibDirs.size(); j < je; ++j) {
+ const std::string LibDir
+ = Prefixes[i] + CandidateMultiarchLibDirs[j].str();
+ if (!llvm::sys::fs::exists(LibDir))
+ continue;
+ for (unsigned k = 0, ke = CandidateMultiarchTripleAliases.size(); k < ke;
+ ++k)
+ ScanLibDirForGCCTriple(TargetArch, LibDir,
+ CandidateMultiarchTripleAliases[k],
+ /*NeedsMultiarchSuffix=*/true);
+ }
+ }
+}
+
+/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
+ const llvm::Triple &TargetTriple,
+ const llvm::Triple &MultiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &MultiarchLibDirs,
+ SmallVectorImpl<StringRef> &MultiarchTripleAliases) {
+ // Declare a bunch of static data sets that we'll select between below. These
+ // are specifically designed to always refer to string literals to avoid any
+ // lifetime or initialization issues.
+ static const char *const ARMLibDirs[] = { "/lib" };
+ static const char *const ARMTriples[] = {
+ "arm-linux-gnueabi",
+ "arm-linux-androideabi"
+ };
+
+ static const char *const X86_64LibDirs[] = { "/lib64", "/lib" };
+ static const char *const X86_64Triples[] = {
+ "x86_64-linux-gnu",
+ "x86_64-unknown-linux-gnu",
+ "x86_64-pc-linux-gnu",
+ "x86_64-redhat-linux6E",
+ "x86_64-redhat-linux",
+ "x86_64-suse-linux",
+ "x86_64-manbo-linux-gnu",
+ "x86_64-linux-gnu",
+ "x86_64-slackware-linux"
+ };
+ static const char *const X86LibDirs[] = { "/lib32", "/lib" };
+ static const char *const X86Triples[] = {
+ "i686-linux-gnu",
+ "i686-pc-linux-gnu",
+ "i486-linux-gnu",
+ "i386-linux-gnu",
+ "i686-redhat-linux",
+ "i586-redhat-linux",
+ "i386-redhat-linux",
+ "i586-suse-linux",
+ "i486-slackware-linux"
+ };
+
+ static const char *const MIPSLibDirs[] = { "/lib" };
+ static const char *const MIPSTriples[] = { "mips-linux-gnu" };
+ static const char *const MIPSELLibDirs[] = { "/lib" };
+ static const char *const MIPSELTriples[] = { "mipsel-linux-gnu" };
+
+ static const char *const PPCLibDirs[] = { "/lib32", "/lib" };
+ static const char *const PPCTriples[] = {
+ "powerpc-linux-gnu",
+ "powerpc-unknown-linux-gnu",
+ "powerpc-suse-linux"
+ };
+ static const char *const PPC64LibDirs[] = { "/lib64", "/lib" };
+ static const char *const PPC64Triples[] = {
+ "powerpc64-linux-gnu",
+ "powerpc64-unknown-linux-gnu",
+ "powerpc64-suse-linux",
+ "ppc64-redhat-linux"
+ };
+
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
+ TripleAliases.append(
+ ARMTriples, ARMTriples + llvm::array_lengthof(ARMTriples));
+ break;
+ case llvm::Triple::x86_64:
+ LibDirs.append(
+ X86_64LibDirs, X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
+ TripleAliases.append(
+ X86_64Triples, X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ MultiarchLibDirs.append(
+ X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
+ MultiarchTripleAliases.append(
+ X86Triples, X86Triples + llvm::array_lengthof(X86Triples));
+ break;
+ case llvm::Triple::x86:
+ LibDirs.append(X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
+ TripleAliases.append(
+ X86Triples, X86Triples + llvm::array_lengthof(X86Triples));
+ MultiarchLibDirs.append(
+ X86_64LibDirs, X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
+ MultiarchTripleAliases.append(
+ X86_64Triples, X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ break;
+ case llvm::Triple::mips:
+ LibDirs.append(
+ MIPSLibDirs, MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
+ TripleAliases.append(
+ MIPSTriples, MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ break;
+ case llvm::Triple::mipsel:
+ LibDirs.append(
+ MIPSELLibDirs, MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
+ TripleAliases.append(
+ MIPSELTriples, MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
+ break;
+ case llvm::Triple::ppc:
+ LibDirs.append(PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
+ TripleAliases.append(
+ PPCTriples, PPCTriples + llvm::array_lengthof(PPCTriples));
+ MultiarchLibDirs.append(
+ PPC64LibDirs, PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
+ MultiarchTripleAliases.append(
+ PPC64Triples, PPC64Triples + llvm::array_lengthof(PPC64Triples));
+ break;
+ case llvm::Triple::ppc64:
+ LibDirs.append(
+ PPC64LibDirs, PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
+ TripleAliases.append(
+ PPC64Triples, PPC64Triples + llvm::array_lengthof(PPC64Triples));
+ MultiarchLibDirs.append(
+ PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
+ MultiarchTripleAliases.append(
+ PPCTriples, PPCTriples + llvm::array_lengthof(PPCTriples));
+ break;
+
+ default:
+ // By default, just rely on the standard lib directories and the original
+ // triple.
+ break;
+ }
+
+ // Always append the drivers target triple to the end, in case it doesn't
+ // match any of our aliases.
+ TripleAliases.push_back(TargetTriple.str());
+
+ // Also include the multiarch variant if it's different.
+ if (TargetTriple.str() != MultiarchTriple.str())
+ MultiarchTripleAliases.push_back(MultiarchTriple.str());
+}
+
+void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
+ llvm::Triple::ArchType TargetArch, const std::string &LibDir,
+ StringRef CandidateTriple, bool NeedsMultiarchSuffix) {
+ // There are various different suffixes involving the triple we
+ // check for. We also record what is necessary to walk from each back
+ // up to the lib directory.
+ const std::string LibSuffixes[] = {
+ "/gcc/" + CandidateTriple.str(),
+ "/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+
+ // Ubuntu has a strange mis-matched pair of triples that this happens to
+ // match.
+ // FIXME: It may be worthwhile to generalize this and look for a second
+ // triple.
+ "/i386-linux-gnu/gcc/" + CandidateTriple.str()
+ };
+ const std::string InstallSuffixes[] = {
+ "/../../..",
+ "/../../../..",
+ "/../../../.."
+ };
+ // Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
+ const unsigned NumLibSuffixes = (llvm::array_lengthof(LibSuffixes) -
+ (TargetArch != llvm::Triple::x86));
+ for (unsigned i = 0; i < NumLibSuffixes; ++i) {
+ StringRef LibSuffix = LibSuffixes[i];
+ llvm::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(LibDir + LibSuffix, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+ static const GCCVersion MinVersion = { "4.1.1", 4, 1, 1, "" };
+ if (CandidateVersion < MinVersion)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+
+ // Some versions of SUSE and Fedora on ppc64 put 32-bit libs
+ // in what would normally be GCCInstallPath and put the 64-bit
+ // libs in a subdirectory named 64. The simple logic we follow is that
+ // *if* there is a subdirectory of the right name with crtbegin.o in it,
+ // we use that. If not, and if not a multiarch triple, we look for
+ // crtbegin.o without the subdirectory.
+ StringRef MultiarchSuffix
+ = (TargetArch == llvm::Triple::x86_64 ||
+ TargetArch == llvm::Triple::ppc64) ? "/64" : "/32";
+ if (llvm::sys::fs::exists(LI->path() + MultiarchSuffix + "/crtbegin.o")) {
+ GCCMultiarchSuffix = MultiarchSuffix.str();
+ } else {
+ if (NeedsMultiarchSuffix ||
+ !llvm::sys::fs::exists(LI->path() + "/crtbegin.o"))
+ continue;
+ GCCMultiarchSuffix.clear();
+ }
+
+ Version = CandidateVersion;
+ GCCTriple.setTriple(CandidateTriple);
+ // FIXME: We hack together the directory name here instead of
+ // using LI to ensure stable path separators across Windows and
+ // Linux.
+ GCCInstallPath = LibDir + LibSuffixes[i] + "/" + VersionText.str();
+ GCCParentLibPath = GCCInstallPath + InstallSuffixes[i];
+ IsValid = true;
+ }
+ }
+}
+
+Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple), GCCInstallation(getDriver(), Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -1066,6 +1410,7 @@ Tool &Generic_GCC::SelectTool(const Compilation &C,
case Action::PrecompileJobClass:
T = new tools::gcc::Precompile(*this); break;
case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
T = new tools::Clang(*this); break;
case Action::CompileJobClass:
T = new tools::gcc::Compile(*this); break;
@@ -1101,13 +1446,80 @@ const char *Generic_GCC::GetDefaultRelocationModel() const {
const char *Generic_GCC::GetForcedPicModel() const {
return 0;
}
+/// Hexagon Toolchain
+
+Hexagon_TC::Hexagon_TC(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir.c_str())
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Hexagon_TC::~Hexagon_TC() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+Tool &Hexagon_TC::SelectTool(const Compilation &C,
+ const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ // if (JA.getKind () == Action::CompileJobClass)
+ // Key = JA.getKind ();
+ // else
+
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+ // if ((JA.getKind () == Action::CompileJobClass)
+ // && (JA.getType () != types::TY_LTO_BC)) {
+ // Key = JA.getKind ();
+ // }
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ assert(0 && "Invalid tool kind.");
+ case Action::AnalyzeJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::AssembleJobClass:
+ T = new tools::hexagon::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::hexagon::Link(*this); break;
+ default:
+ assert(false && "Unsupported action for Hexagon target.");
+ }
+ }
+
+ return *T;
+}
+
+bool Hexagon_TC::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Hexagon_TC::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Hexagon_TC::GetForcedPicModel() const {
+ return 0;
+} // End Hexagon
+
/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
/// Currently does not support anything else but compilation.
-TCEToolChain::TCEToolChain(const HostInfo &Host, const llvm::Triple& Triple)
- : ToolChain(Host, Triple) {
+TCEToolChain::TCEToolChain(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
// Path mangling to find libexec
std::string Path(getDriver().Dir);
@@ -1159,8 +1571,8 @@ Tool &TCEToolChain::SelectTool(const Compilation &C,
/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
-OpenBSD::OpenBSD(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_ELF(Host, Triple) {
+OpenBSD::OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back("/usr/lib");
}
@@ -1199,26 +1611,17 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA,
/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
-FreeBSD::FreeBSD(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_ELF(Host, Triple) {
-
- // Determine if we are compiling 32-bit code on an x86_64 platform.
- bool Lib32 = false;
- if (Triple.getArch() == llvm::Triple::x86 &&
- llvm::Triple(getDriver().DefaultHostTriple).getArch() ==
- llvm::Triple::x86_64)
- Lib32 = true;
+FreeBSD::FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
- if (Triple.getArch() == llvm::Triple::ppc &&
- llvm::Triple(getDriver().DefaultHostTriple).getArch() ==
- llvm::Triple::ppc64)
- Lib32 = true;
-
- if (Lib32) {
- getFilePaths().push_back(CLANG_PREFIX "/usr/lib32");
- } else {
- getFilePaths().push_back(CLANG_PREFIX "/usr/lib");
- }
+ // When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
+ // back to '/usr/lib' if it doesn't exist.
+ if ((Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::ppc) &&
+ llvm::sys::fs::exists(getDriver().SysRoot + CLANG_PREFIX "/usr/lib32/crt1.o"))
+ getFilePaths().push_back(getDriver().SysRoot + CLANG_PREFIX "/usr/lib32");
+ else
+ getFilePaths().push_back(getDriver().SysRoot + CLANG_PREFIX "/usr/lib");
}
Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA,
@@ -1254,21 +1657,19 @@ Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA,
/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
-NetBSD::NetBSD(const HostInfo &Host, const llvm::Triple& Triple,
- const llvm::Triple& ToolTriple)
- : Generic_ELF(Host, Triple), ToolTriple(ToolTriple) {
-
- // Determine if we are compiling 32-bit code on an x86_64 platform.
- bool Lib32 = false;
- if (ToolTriple.getArch() == llvm::Triple::x86_64 &&
- Triple.getArch() == llvm::Triple::x86)
- Lib32 = true;
+NetBSD::NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
if (getDriver().UseStdLib) {
- if (Lib32)
+ // When targeting a 32-bit platform, try the special directory used on
+ // 64-bit hosts, and only fall back to the main library directory if that
+ // doesn't work.
+ // FIXME: It'd be nicer to test if this directory exists, but I'm not sure
+ // what all logic is needed to emulate the '=' prefix here.
+ if (Triple.getArch() == llvm::Triple::x86)
getFilePaths().push_back("=/usr/lib/i386");
- else
- getFilePaths().push_back("=/usr/lib");
+
+ getFilePaths().push_back("=/usr/lib");
}
}
@@ -1291,10 +1692,10 @@ Tool &NetBSD::SelectTool(const Compilation &C, const JobAction &JA,
if (UseIntegratedAs)
T = new tools::ClangAs(*this);
else
- T = new tools::netbsd::Assemble(*this, ToolTriple);
+ T = new tools::netbsd::Assemble(*this);
break;
case Action::LinkJobClass:
- T = new tools::netbsd::Link(*this, ToolTriple);
+ T = new tools::netbsd::Link(*this);
break;
default:
T = &Generic_GCC::SelectTool(C, JA, Inputs);
@@ -1306,12 +1707,10 @@ Tool &NetBSD::SelectTool(const Compilation &C, const JobAction &JA,
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
-Minix::Minix(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
+Minix::Minix(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
getFilePaths().push_back(getDriver().Dir + "/../lib");
getFilePaths().push_back("/usr/lib");
- getFilePaths().push_back("/usr/gnu/lib");
- getFilePaths().push_back("/usr/gnu/lib/gcc/i686-pc-minix/4.4.3");
}
Tool &Minix::SelectTool(const Compilation &C, const JobAction &JA,
@@ -1339,8 +1738,9 @@ Tool &Minix::SelectTool(const Compilation &C, const JobAction &JA,
/// AuroraUX - AuroraUX tool chain which can call as(1) and ld(1) directly.
-AuroraUX::AuroraUX(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {
+AuroraUX::AuroraUX(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
@@ -1377,6 +1777,42 @@ Tool &AuroraUX::SelectTool(const Compilation &C, const JobAction &JA,
return *T;
}
+/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
+
+Solaris::Solaris(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool &Solaris::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::solaris::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::solaris::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
/// Linux toolchain (very bare-bones at the moment).
@@ -1392,6 +1828,7 @@ enum LinuxDistro {
Fedora13,
Fedora14,
Fedora15,
+ Fedora16,
FedoraRawhide,
OpenSuse11_3,
OpenSuse11_4,
@@ -1404,62 +1841,55 @@ enum LinuxDistro {
UbuntuMaverick,
UbuntuNatty,
UbuntuOneiric,
+ UbuntuPrecise,
UnknownDistro
};
static bool IsRedhat(enum LinuxDistro Distro) {
- return Distro == Fedora13 || Distro == Fedora14 ||
- Distro == Fedora15 || Distro == FedoraRawhide ||
- Distro == RHEL4 || Distro == RHEL5 || Distro == RHEL6;
+ return (Distro >= Fedora13 && Distro <= FedoraRawhide) ||
+ (Distro >= RHEL4 && Distro <= RHEL6);
}
static bool IsOpenSuse(enum LinuxDistro Distro) {
- return Distro == OpenSuse11_3 || Distro == OpenSuse11_4 ||
- Distro == OpenSuse12_1;
+ return Distro >= OpenSuse11_3 && Distro <= OpenSuse12_1;
}
static bool IsDebian(enum LinuxDistro Distro) {
- return Distro == DebianLenny || Distro == DebianSqueeze ||
- Distro == DebianWheezy;
+ return Distro >= DebianLenny && Distro <= DebianWheezy;
}
static bool IsUbuntu(enum LinuxDistro Distro) {
- return Distro == UbuntuHardy || Distro == UbuntuIntrepid ||
- Distro == UbuntuLucid || Distro == UbuntuMaverick ||
- Distro == UbuntuJaunty || Distro == UbuntuKarmic ||
- Distro == UbuntuNatty || Distro == UbuntuOneiric;
+ return Distro >= UbuntuHardy && Distro <= UbuntuPrecise;
}
static LinuxDistro DetectLinuxDistro(llvm::Triple::ArchType Arch) {
- llvm::OwningPtr<llvm::MemoryBuffer> File;
+ OwningPtr<llvm::MemoryBuffer> File;
if (!llvm::MemoryBuffer::getFile("/etc/lsb-release", File)) {
StringRef Data = File.get()->getBuffer();
SmallVector<StringRef, 8> Lines;
Data.split(Lines, "\n");
- for (unsigned int i = 0, s = Lines.size(); i < s; ++ i) {
- if (Lines[i] == "DISTRIB_CODENAME=hardy")
- return UbuntuHardy;
- else if (Lines[i] == "DISTRIB_CODENAME=intrepid")
- return UbuntuIntrepid;
- else if (Lines[i] == "DISTRIB_CODENAME=jaunty")
- return UbuntuJaunty;
- else if (Lines[i] == "DISTRIB_CODENAME=karmic")
- return UbuntuKarmic;
- else if (Lines[i] == "DISTRIB_CODENAME=lucid")
- return UbuntuLucid;
- else if (Lines[i] == "DISTRIB_CODENAME=maverick")
- return UbuntuMaverick;
- else if (Lines[i] == "DISTRIB_CODENAME=natty")
- return UbuntuNatty;
- else if (Lines[i] == "DISTRIB_CODENAME=oneiric")
- return UbuntuOneiric;
- }
- return UnknownDistro;
+ LinuxDistro Version = UnknownDistro;
+ for (unsigned i = 0, s = Lines.size(); i != s; ++i)
+ if (Version == UnknownDistro && Lines[i].startswith("DISTRIB_CODENAME="))
+ Version = llvm::StringSwitch<LinuxDistro>(Lines[i].substr(17))
+ .Case("hardy", UbuntuHardy)
+ .Case("intrepid", UbuntuIntrepid)
+ .Case("jaunty", UbuntuJaunty)
+ .Case("karmic", UbuntuKarmic)
+ .Case("lucid", UbuntuLucid)
+ .Case("maverick", UbuntuMaverick)
+ .Case("natty", UbuntuNatty)
+ .Case("oneiric", UbuntuOneiric)
+ .Case("precise", UbuntuPrecise)
+ .Default(UnknownDistro);
+ return Version;
}
if (!llvm::MemoryBuffer::getFile("/etc/redhat-release", File)) {
StringRef Data = File.get()->getBuffer();
- if (Data.startswith("Fedora release 15"))
+ if (Data.startswith("Fedora release 16"))
+ return Fedora16;
+ else if (Data.startswith("Fedora release 15"))
return Fedora15;
else if (Data.startswith("Fedora release 14"))
return Fedora14;
@@ -1486,23 +1916,19 @@ static LinuxDistro DetectLinuxDistro(llvm::Triple::ArchType Arch) {
StringRef Data = File.get()->getBuffer();
if (Data[0] == '5')
return DebianLenny;
- else if (Data.startswith("squeeze/sid"))
+ else if (Data.startswith("squeeze/sid") || Data[0] == '6')
return DebianSqueeze;
- else if (Data.startswith("wheezy/sid"))
+ else if (Data.startswith("wheezy/sid") || Data[0] == '7')
return DebianWheezy;
return UnknownDistro;
}
- if (!llvm::MemoryBuffer::getFile("/etc/SuSE-release", File)) {
- StringRef Data = File.get()->getBuffer();
- if (Data.startswith("openSUSE 11.3"))
- return OpenSuse11_3;
- else if (Data.startswith("openSUSE 11.4"))
- return OpenSuse11_4;
- else if (Data.startswith("openSUSE 12.1"))
- return OpenSuse12_1;
- return UnknownDistro;
- }
+ if (!llvm::MemoryBuffer::getFile("/etc/SuSE-release", File))
+ return llvm::StringSwitch<LinuxDistro>(File.get()->getBuffer())
+ .StartsWith("openSUSE 11.3", OpenSuse11_3)
+ .StartsWith("openSUSE 11.4", OpenSuse11_4)
+ .StartsWith("openSUSE 12.1", OpenSuse12_1)
+ .Default(UnknownDistro);
bool Exists;
if (!llvm::sys::fs::exists("/etc/exherbo-release", Exists) && Exists)
@@ -1514,247 +1940,6 @@ static LinuxDistro DetectLinuxDistro(llvm::Triple::ArchType Arch) {
return UnknownDistro;
}
-/// \brief Parse a GCCVersion object out of a string of text.
-///
-/// This is the primary means of forming GCCVersion objects.
-/*static*/ Linux::GCCVersion Linux::GCCVersion::Parse(StringRef VersionText) {
- const GCCVersion BadVersion = { VersionText.str(), -1, -1, -1, "" };
- std::pair<StringRef, StringRef> First = VersionText.split('.');
- std::pair<StringRef, StringRef> Second = First.second.split('.');
-
- GCCVersion GoodVersion = { VersionText.str(), -1, -1, -1, "" };
- if (First.first.getAsInteger(10, GoodVersion.Major) ||
- GoodVersion.Major < 0)
- return BadVersion;
- if (Second.first.getAsInteger(10, GoodVersion.Minor) ||
- GoodVersion.Minor < 0)
- return BadVersion;
-
- // First look for a number prefix and parse that if present. Otherwise just
- // stash the entire patch string in the suffix, and leave the number
- // unspecified. This covers versions strings such as:
- // 4.4
- // 4.4.0
- // 4.4.x
- // 4.4.2-rc4
- // 4.4.x-patched
- // And retains any patch number it finds.
- StringRef PatchText = GoodVersion.PatchSuffix = Second.second.str();
- if (!PatchText.empty()) {
- if (unsigned EndNumber = PatchText.find_first_not_of("0123456789")) {
- // Try to parse the number and any suffix.
- if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
- GoodVersion.Patch < 0)
- return BadVersion;
- GoodVersion.PatchSuffix = PatchText.substr(EndNumber).str();
- }
- }
-
- return GoodVersion;
-}
-
-/// \brief Less-than for GCCVersion, implementing a Strict Weak Ordering.
-bool Linux::GCCVersion::operator<(const GCCVersion &RHS) const {
- if (Major < RHS.Major) return true; if (Major > RHS.Major) return false;
- if (Minor < RHS.Minor) return true; if (Minor > RHS.Minor) return false;
-
- // Note that we rank versions with *no* patch specified is better than ones
- // hard-coding a patch version. Thus if the RHS has no patch, it always
- // wins, and the LHS only wins if it has no patch and the RHS does have
- // a patch.
- if (RHS.Patch == -1) return true; if (Patch == -1) return false;
- if (Patch < RHS.Patch) return true; if (Patch > RHS.Patch) return false;
-
- // Finally, between completely tied version numbers, the version with the
- // suffix loses as we prefer full releases.
- if (RHS.PatchSuffix.empty()) return true;
- return false;
-}
-
-/// \brief Construct a GCCInstallationDetector from the driver.
-///
-/// This performs all of the autodetection and sets up the various paths.
-/// Once constructed, a GCCInstallation is esentially immutable.
-Linux::GCCInstallationDetector::GCCInstallationDetector(const Driver &D)
- : IsValid(false),
- GccTriple(D.DefaultHostTriple) {
- // FIXME: Using CXX_INCLUDE_ROOT is here is a bit of a hack, but
- // avoids adding yet another option to configure/cmake.
- // It would probably be cleaner to break it in two variables
- // CXX_GCC_ROOT with just /foo/bar
- // CXX_GCC_VER with 4.5.2
- // Then we would have
- // CXX_INCLUDE_ROOT = CXX_GCC_ROOT/include/c++/CXX_GCC_VER
- // and this function would return
- // CXX_GCC_ROOT/lib/gcc/CXX_INCLUDE_ARCH/CXX_GCC_VER
- llvm::SmallString<128> CxxIncludeRoot(CXX_INCLUDE_ROOT);
- if (CxxIncludeRoot != "") {
- // This is of the form /foo/bar/include/c++/4.5.2/
- if (CxxIncludeRoot.back() == '/')
- llvm::sys::path::remove_filename(CxxIncludeRoot); // remove the /
- StringRef Version = llvm::sys::path::filename(CxxIncludeRoot);
- llvm::sys::path::remove_filename(CxxIncludeRoot); // remove the version
- llvm::sys::path::remove_filename(CxxIncludeRoot); // remove the c++
- llvm::sys::path::remove_filename(CxxIncludeRoot); // remove the include
- GccInstallPath = CxxIncludeRoot.str();
- GccInstallPath.append("/lib/gcc/");
- GccInstallPath.append(CXX_INCLUDE_ARCH);
- GccInstallPath.append("/");
- GccInstallPath.append(Version);
- GccParentLibPath = GccInstallPath + "/../../..";
- IsValid = true;
- return;
- }
-
- llvm::Triple::ArchType HostArch = llvm::Triple(GccTriple).getArch();
- // The library directories which may contain GCC installations.
- SmallVector<StringRef, 4> CandidateLibDirs;
- // The compatible GCC triples for this particular architecture.
- SmallVector<StringRef, 10> CandidateTriples;
- CollectLibDirsAndTriples(HostArch, CandidateLibDirs, CandidateTriples);
-
- // Always include the default host triple as the final fallback if no
- // specific triple is detected.
- CandidateTriples.push_back(D.DefaultHostTriple);
-
- // Compute the set of prefixes for our search.
- SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
- D.PrefixDirs.end());
- Prefixes.push_back(D.SysRoot);
- Prefixes.push_back(D.SysRoot + "/usr");
- Prefixes.push_back(D.InstalledDir + "/..");
-
- // Loop over the various components which exist and select the best GCC
- // installation available. GCC installs are ranked by version number.
- Version = GCCVersion::Parse("0.0.0");
- for (unsigned i = 0, ie = Prefixes.size(); i < ie; ++i) {
- if (!llvm::sys::fs::exists(Prefixes[i]))
- continue;
- for (unsigned j = 0, je = CandidateLibDirs.size(); j < je; ++j) {
- const std::string LibDir = Prefixes[i] + CandidateLibDirs[j].str();
- if (!llvm::sys::fs::exists(LibDir))
- continue;
- for (unsigned k = 0, ke = CandidateTriples.size(); k < ke; ++k)
- ScanLibDirForGCCTriple(HostArch, LibDir, CandidateTriples[k]);
- }
- }
-}
-
-/*static*/ void Linux::GCCInstallationDetector::CollectLibDirsAndTriples(
- llvm::Triple::ArchType HostArch, SmallVectorImpl<StringRef> &LibDirs,
- SmallVectorImpl<StringRef> &Triples) {
- if (HostArch == llvm::Triple::arm || HostArch == llvm::Triple::thumb) {
- static const char *const ARMLibDirs[] = { "/lib" };
- static const char *const ARMTriples[] = { "arm-linux-gnueabi" };
- LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
- Triples.append(ARMTriples, ARMTriples + llvm::array_lengthof(ARMTriples));
- } else if (HostArch == llvm::Triple::x86_64) {
- static const char *const X86_64LibDirs[] = { "/lib64", "/lib" };
- static const char *const X86_64Triples[] = {
- "x86_64-linux-gnu",
- "x86_64-unknown-linux-gnu",
- "x86_64-pc-linux-gnu",
- "x86_64-redhat-linux6E",
- "x86_64-redhat-linux",
- "x86_64-suse-linux",
- "x86_64-manbo-linux-gnu",
- "x86_64-linux-gnu",
- "x86_64-slackware-linux"
- };
- LibDirs.append(X86_64LibDirs,
- X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
- Triples.append(X86_64Triples,
- X86_64Triples + llvm::array_lengthof(X86_64Triples));
- } else if (HostArch == llvm::Triple::x86) {
- static const char *const X86LibDirs[] = { "/lib32", "/lib" };
- static const char *const X86Triples[] = {
- "i686-linux-gnu",
- "i686-pc-linux-gnu",
- "i486-linux-gnu",
- "i386-linux-gnu",
- "i686-redhat-linux",
- "i586-redhat-linux",
- "i386-redhat-linux",
- "i586-suse-linux",
- "i486-slackware-linux"
- };
- LibDirs.append(X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
- Triples.append(X86Triples, X86Triples + llvm::array_lengthof(X86Triples));
- } else if (HostArch == llvm::Triple::ppc) {
- static const char *const PPCLibDirs[] = { "/lib32", "/lib" };
- static const char *const PPCTriples[] = {
- "powerpc-linux-gnu",
- "powerpc-unknown-linux-gnu"
- };
- LibDirs.append(PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
- Triples.append(PPCTriples, PPCTriples + llvm::array_lengthof(PPCTriples));
- } else if (HostArch == llvm::Triple::ppc64) {
- static const char *const PPC64LibDirs[] = { "/lib64", "/lib" };
- static const char *const PPC64Triples[] = {
- "powerpc64-unknown-linux-gnu"
- };
- LibDirs.append(PPC64LibDirs,
- PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
- Triples.append(PPC64Triples,
- PPC64Triples + llvm::array_lengthof(PPC64Triples));
- }
-}
-
-void Linux::GCCInstallationDetector::ScanLibDirForGCCTriple(
- llvm::Triple::ArchType HostArch, const std::string &LibDir,
- StringRef CandidateTriple) {
- // There are various different suffixes involving the triple we
- // check for. We also record what is necessary to walk from each back
- // up to the lib directory.
- const std::string Suffixes[] = {
- "/gcc/" + CandidateTriple.str(),
- "/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
-
- // Ubuntu has a strange mis-matched pair of triples that this happens to
- // match.
- // FIXME: It may be worthwhile to generalize this and look for a second
- // triple.
- "/i386-linux-gnu/gcc/" + CandidateTriple.str()
- };
- const std::string InstallSuffixes[] = {
- "/../../..",
- "/../../../..",
- "/../../../.."
- };
- // Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
- const unsigned NumSuffixes = (llvm::array_lengthof(Suffixes) -
- (HostArch != llvm::Triple::x86));
- for (unsigned i = 0; i < NumSuffixes; ++i) {
- StringRef Suffix = Suffixes[i];
- llvm::error_code EC;
- for (llvm::sys::fs::directory_iterator LI(LibDir + Suffix, EC), LE;
- !EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->path());
- GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
- static const GCCVersion MinVersion = { "4.1.1", 4, 1, 1, "" };
- if (CandidateVersion < MinVersion)
- continue;
- if (CandidateVersion <= Version)
- continue;
- if (!llvm::sys::fs::exists(LI->path() + "/crtbegin.o"))
- continue;
-
- Version = CandidateVersion;
- GccTriple = CandidateTriple.str();
- // FIXME: We hack together the directory name here instead of
- // using LI to ensure stable path separators across Windows and
- // Linux.
- GccInstallPath = LibDir + Suffixes[i] + "/" + VersionText.str();
- GccParentLibPath = GccInstallPath + InstallSuffixes[i];
- IsValid = true;
- }
- }
-}
-
-static void addPathIfExists(Twine Path, ToolChain::path_list &Paths) {
- if (llvm::sys::fs::exists(Path)) Paths.push_back(Path.str());
-}
-
/// \brief Get our best guess at the multiarch triple for a target.
///
/// Debian-based systems are starting to use a multiarch setup where they use
@@ -1771,36 +1956,49 @@ static std::string getMultiarchTriple(const llvm::Triple TargetTriple,
// We use the existence of '/lib/<triple>' as a directory to detect some
// common linux triples that don't quite match the Clang triple for both
- // 32-bit and 64-bit targets. This works around annoying discrepancies on
- // Debian-based systems.
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
case llvm::Triple::x86:
- if (llvm::sys::fs::exists(SysRoot + "/lib/i686-linux-gnu"))
- return "i686-linux-gnu";
if (llvm::sys::fs::exists(SysRoot + "/lib/i386-linux-gnu"))
return "i386-linux-gnu";
return TargetTriple.str();
case llvm::Triple::x86_64:
if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
return "x86_64-linux-gnu";
- if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-pc-linux-gnu"))
- return "x86_64-pc-linux-gnu";
- if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-unknown-linux-gnu"))
- return "x86_64-unknown-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::mips:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/mips-linux-gnu"))
+ return "mips-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::mipsel:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/mipsel-linux-gnu"))
+ return "mipsel-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::ppc:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc-linux-gnu"))
+ return "powerpc-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::ppc64:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc64-linux-gnu"))
+ return "powerpc64-linux-gnu";
return TargetTriple.str();
}
}
-Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
- : Generic_ELF(Host, Triple), GCCInstallation(getDriver()) {
- llvm::Triple::ArchType Arch =
- llvm::Triple(getDriver().DefaultHostTriple).getArch();
+static void addPathIfExists(Twine Path, ToolChain::path_list &Paths) {
+ if (llvm::sys::fs::exists(Path)) Paths.push_back(Path.str());
+}
+
+Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ llvm::Triple::ArchType Arch = Triple.getArch();
const std::string &SysRoot = getDriver().SysRoot;
// OpenSuse stores the linker with the compiler, add that to the search
// path.
ToolChain::path_list &PPaths = getProgramPaths();
PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
- GCCInstallation.getTriple() + "/bin").str());
+ GCCInstallation.getTriple().str() + "/bin").str());
Linker = GetProgramPath("ld");
@@ -1814,13 +2012,27 @@ Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
ExtraOpts.push_back("-X");
- if (IsRedhat(Distro) || IsOpenSuse(Distro) || Distro == UbuntuMaverick ||
- Distro == UbuntuNatty || Distro == UbuntuOneiric)
- ExtraOpts.push_back("--hash-style=gnu");
+ const bool IsMips = Arch == llvm::Triple::mips ||
+ Arch == llvm::Triple::mipsel ||
+ Arch == llvm::Triple::mips64 ||
+ Arch == llvm::Triple::mips64el;
- if (IsDebian(Distro) || IsOpenSuse(Distro) || Distro == UbuntuLucid ||
- Distro == UbuntuJaunty || Distro == UbuntuKarmic)
- ExtraOpts.push_back("--hash-style=both");
+ const bool IsAndroid = Triple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+
+ // Do not use 'gnu' hash style for Mips targets because .gnu.hash
+ // and the MIPS ABI require .dynsym to be sorted in different ways.
+ // .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
+ // ABI requires a mapping between the GOT and the symbol table.
+ // Android loader does not support .gnu.hash.
+ if (!IsMips && !IsAndroid) {
+ if (IsRedhat(Distro) || IsOpenSuse(Distro) ||
+ (IsUbuntu(Distro) && Distro >= UbuntuMaverick))
+ ExtraOpts.push_back("--hash-style=gnu");
+
+ if (IsDebian(Distro) || IsOpenSuse(Distro) || Distro == UbuntuLucid ||
+ Distro == UbuntuJaunty || Distro == UbuntuKarmic)
+ ExtraOpts.push_back("--hash-style=both");
+ }
if (IsRedhat(Distro))
ExtraOpts.push_back("--no-add-needed");
@@ -1828,9 +2040,7 @@ Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
if (Distro == DebianSqueeze || Distro == DebianWheezy ||
IsOpenSuse(Distro) ||
(IsRedhat(Distro) && Distro != RHEL4 && Distro != RHEL5) ||
- Distro == UbuntuLucid ||
- Distro == UbuntuMaverick || Distro == UbuntuKarmic ||
- Distro == UbuntuNatty || Distro == UbuntuOneiric)
+ (IsUbuntu(Distro) && Distro >= UbuntuKarmic))
ExtraOpts.push_back("--build-id");
if (IsOpenSuse(Distro))
@@ -1842,24 +2052,31 @@ Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
// possible permutations of these directories, and seeing which ones it added
// to the link paths.
path_list &Paths = getFilePaths();
- const bool Is32Bits = (getArch() == llvm::Triple::x86 ||
- getArch() == llvm::Triple::ppc);
- const std::string Suffix32 = Arch == llvm::Triple::x86_64 ? "/32" : "";
- const std::string Suffix64 = Arch == llvm::Triple::x86_64 ? "" : "/64";
- const std::string Suffix = Is32Bits ? Suffix32 : Suffix64;
- const std::string Multilib = Is32Bits ? "lib32" : "lib64";
+ const std::string Multilib = Triple.isArch32Bit() ? "lib32" : "lib64";
const std::string MultiarchTriple = getMultiarchTriple(Triple, SysRoot);
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath = GCCInstallation.getParentLibPath();
- const std::string &GccTriple = GCCInstallation.getTriple();
- addPathIfExists(GCCInstallation.getInstallPath() + Suffix, Paths);
- addPathIfExists(LibPath + "/../" + GccTriple + "/lib/../" + Multilib,
+ addPathIfExists((GCCInstallation.getInstallPath() +
+ GCCInstallation.getMultiarchSuffix()),
Paths);
- addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
- addPathIfExists(LibPath + "/../" + Multilib, Paths);
+
+ // If the GCC installation we found is inside of the sysroot, we want to
+ // prefer libraries installed in the parent prefix of the GCC installation.
+ // It is important to *not* use these paths when the GCC installation is
+ // outside of the system root as that can pick up un-intented libraries.
+ // This usually happens when there is an external cross compiler on the
+ // host system, and a more minimal sysroot available that is the target of
+ // the cross.
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib/../" + Multilib,
+ Paths);
+ addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(LibPath + "/../" + Multilib, Paths);
+ }
}
addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(SysRoot + "/lib/../" + Multilib, Paths);
@@ -1869,22 +2086,22 @@ Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
// Try walking via the GCC triple path in case of multiarch GCC
// installations with strange symlinks.
if (GCCInstallation.isValid())
- addPathIfExists(SysRoot + "/usr/lib/" + GCCInstallation.getTriple() +
+ addPathIfExists(SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
"/../../" + Multilib, Paths);
// Add the non-multilib suffixed paths (if potentially different).
if (GCCInstallation.isValid()) {
const std::string &LibPath = GCCInstallation.getParentLibPath();
- const std::string &GccTriple = GCCInstallation.getTriple();
- if (!Suffix.empty())
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ if (!GCCInstallation.getMultiarchSuffix().empty())
addPathIfExists(GCCInstallation.getInstallPath(), Paths);
- addPathIfExists(LibPath + "/../" + GccTriple + "/lib", Paths);
- addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
- addPathIfExists(LibPath, Paths);
+
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib", Paths);
+ addPathIfExists(LibPath, Paths);
+ }
}
- addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(SysRoot + "/lib", Paths);
- addPathIfExists(SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
addPathIfExists(SysRoot + "/usr/lib", Paths);
}
@@ -1981,6 +2198,18 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
const StringRef ARMMultiarchIncludeDirs[] = {
"/usr/include/arm-linux-gnueabi"
};
+ const StringRef MIPSMultiarchIncludeDirs[] = {
+ "/usr/include/mips-linux-gnu"
+ };
+ const StringRef MIPSELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsel-linux-gnu"
+ };
+ const StringRef PPCMultiarchIncludeDirs[] = {
+ "/usr/include/powerpc-linux-gnu"
+ };
+ const StringRef PPC64MultiarchIncludeDirs[] = {
+ "/usr/include/powerpc64-linux-gnu"
+ };
ArrayRef<StringRef> MultiarchIncludeDirs;
if (getTriple().getArch() == llvm::Triple::x86_64) {
MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
@@ -1988,11 +2217,19 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
MultiarchIncludeDirs = X86MultiarchIncludeDirs;
} else if (getTriple().getArch() == llvm::Triple::arm) {
MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::mips) {
+ MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::mipsel) {
+ MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::ppc) {
+ MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::ppc64) {
+ MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
}
for (ArrayRef<StringRef>::iterator I = MultiarchIncludeDirs.begin(),
E = MultiarchIncludeDirs.end();
I != E; ++I) {
- if (llvm::sys::fs::exists(*I)) {
+ if (llvm::sys::fs::exists(D.SysRoot + *I)) {
addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + *I);
break;
}
@@ -2001,12 +2238,18 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (getTriple().getOS() == llvm::Triple::RTEMS)
return;
+ // Add an include of '/include' directly. This isn't provided by default by
+ // system GCCs, but is often used with cross-compiling GCCs, and harmless to
+ // add even when Clang is acting as-if it were a system compiler.
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/include");
+
addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/include");
}
-static bool addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
- const ArgList &DriverArgs,
- ArgStringList &CC1Args) {
+/// \brief Helper to add the thre variant paths for a libstdc++ installation.
+/*static*/ bool Linux::addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args) {
if (!llvm::sys::fs::exists(Base))
return false;
addSystemInclude(DriverArgs, CC1Args, Base);
@@ -2029,37 +2272,10 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
return;
}
- const llvm::Triple &TargetTriple = getTriple();
- const llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
- bool IsTarget64Bit = (TargetArch == llvm::Triple::x86_64 ||
- TargetArch == llvm::Triple::ppc64);
-
- StringRef CxxIncludeRoot(CXX_INCLUDE_ROOT);
- if (!CxxIncludeRoot.empty()) {
- StringRef CxxIncludeArch(CXX_INCLUDE_ARCH);
- if (CxxIncludeArch.empty())
- CxxIncludeArch = TargetTriple.str();
-
- addLibStdCXXIncludePaths(
- CxxIncludeRoot,
- CxxIncludeArch + (IsTarget64Bit ? CXX_INCLUDE_64BIT_DIR
- : CXX_INCLUDE_32BIT_DIR),
- DriverArgs, CC1Args);
+ // We need a detected GCC installation on Linux to provide libstdc++'s
+ // headers. We handled the libc++ case above.
+ if (!GCCInstallation.isValid())
return;
- }
-
- // Check if the target architecture specific dirs need a suffix. Note that we
- // only support the suffix-based bi-arch-like header scheme for host/target
- // mismatches of just bit width.
- llvm::Triple::ArchType HostArch =
- llvm::Triple(getDriver().DefaultHostTriple).getArch();
- StringRef Suffix;
- if ((HostArch == llvm::Triple::x86 && TargetArch == llvm::Triple::x86_64) ||
- (HostArch == llvm::Triple::ppc && TargetArch == llvm::Triple::ppc64))
- Suffix = "/64";
- if ((HostArch == llvm::Triple::x86_64 && TargetArch == llvm::Triple::x86) ||
- (HostArch == llvm::Triple::ppc64 && TargetArch == llvm::Triple::ppc))
- Suffix = "/32";
// By default, look for the C++ headers in an include directory adjacent to
// the lib directory of the GCC installation. Note that this is expect to be
@@ -2068,20 +2284,22 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
StringRef InstallDir = GCCInstallation.getInstallPath();
StringRef Version = GCCInstallation.getVersion();
if (!addLibStdCXXIncludePaths(LibDir + "/../include/c++/" + Version,
- GCCInstallation.getTriple() + Suffix,
+ (GCCInstallation.getTriple().str() +
+ GCCInstallation.getMultiarchSuffix()),
DriverArgs, CC1Args)) {
// Gentoo is weird and places its headers inside the GCC install, so if the
// first attempt to find the headers fails, try this pattern.
addLibStdCXXIncludePaths(InstallDir + "/include/g++-v4",
- GCCInstallation.getTriple() + Suffix,
+ (GCCInstallation.getTriple().str() +
+ GCCInstallation.getMultiarchSuffix()),
DriverArgs, CC1Args);
}
}
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
-DragonFly::DragonFly(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_ELF(Host, Triple) {
+DragonFly::DragonFly(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
// Path mangling to find libexec
getProgramPaths().push_back(getDriver().getInstalledDir());
@@ -2115,322 +2333,3 @@ Tool &DragonFly::SelectTool(const Compilation &C, const JobAction &JA,
return *T;
}
-
-Windows::Windows(const HostInfo &Host, const llvm::Triple& Triple)
- : ToolChain(Host, Triple) {
-}
-
-Tool &Windows::SelectTool(const Compilation &C, const JobAction &JA,
- const ActionList &Inputs) const {
- Action::ActionClass Key;
- if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
- Key = Action::AnalyzeJobClass;
- else
- Key = JA.getKind();
-
- bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
- options::OPT_no_integrated_as,
- IsIntegratedAssemblerDefault());
-
- Tool *&T = Tools[Key];
- if (!T) {
- switch (Key) {
- case Action::InputClass:
- case Action::BindArchClass:
- case Action::LipoJobClass:
- case Action::DsymutilJobClass:
- case Action::VerifyJobClass:
- llvm_unreachable("Invalid tool kind.");
- case Action::PreprocessJobClass:
- case Action::PrecompileJobClass:
- case Action::AnalyzeJobClass:
- case Action::CompileJobClass:
- T = new tools::Clang(*this); break;
- case Action::AssembleJobClass:
- if (!UseIntegratedAs && getTriple().getEnvironment() == llvm::Triple::MachO)
- T = new tools::darwin::Assemble(*this);
- else
- T = new tools::ClangAs(*this);
- break;
- case Action::LinkJobClass:
- T = new tools::visualstudio::Link(*this); break;
- }
- }
-
- return *T;
-}
-
-bool Windows::IsIntegratedAssemblerDefault() const {
- return true;
-}
-
-bool Windows::IsUnwindTablesDefault() const {
- // FIXME: Gross; we should probably have some separate target
- // definition, possibly even reusing the one in clang.
- return getArchName() == "x86_64";
-}
-
-const char *Windows::GetDefaultRelocationModel() const {
- return "static";
-}
-
-const char *Windows::GetForcedPicModel() const {
- if (getArchName() == "x86_64")
- return "pic";
- return 0;
-}
-
-// FIXME: This probably should goto to some platform utils place.
-#ifdef _MSC_VER
-
-/// \brief Read registry string.
-/// This also supports a means to look for high-versioned keys by use
-/// of a $VERSION placeholder in the key path.
-/// $VERSION in the key path is a placeholder for the version number,
-/// causing the highest value path to be searched for and used.
-/// I.e. "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
-/// There can be additional characters in the component. Only the numberic
-/// characters are compared.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- char *value, size_t maxLength) {
- HKEY hRootKey = NULL;
- HKEY hKey = NULL;
- const char* subKey = NULL;
- DWORD valueType;
- DWORD valueSize = maxLength - 1;
- long lResult;
- bool returnValue = false;
-
- if (strncmp(keyPath, "HKEY_CLASSES_ROOT\\", 18) == 0) {
- hRootKey = HKEY_CLASSES_ROOT;
- subKey = keyPath + 18;
- } else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
- hRootKey = HKEY_USERS;
- subKey = keyPath + 11;
- } else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
- hRootKey = HKEY_LOCAL_MACHINE;
- subKey = keyPath + 19;
- } else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
- hRootKey = HKEY_CURRENT_USER;
- subKey = keyPath + 18;
- } else {
- return false;
- }
-
- const char *placeHolder = strstr(subKey, "$VERSION");
- char bestName[256];
- bestName[0] = '\0';
- // If we have a $VERSION placeholder, do the highest-version search.
- if (placeHolder) {
- const char *keyEnd = placeHolder - 1;
- const char *nextKey = placeHolder;
- // Find end of previous key.
- while ((keyEnd > subKey) && (*keyEnd != '\\'))
- keyEnd--;
- // Find end of key containing $VERSION.
- while (*nextKey && (*nextKey != '\\'))
- nextKey++;
- size_t partialKeyLength = keyEnd - subKey;
- char partialKey[256];
- if (partialKeyLength > sizeof(partialKey))
- partialKeyLength = sizeof(partialKey);
- strncpy(partialKey, subKey, partialKeyLength);
- partialKey[partialKeyLength] = '\0';
- HKEY hTopKey = NULL;
- lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ, &hTopKey);
- if (lResult == ERROR_SUCCESS) {
- char keyName[256];
- int bestIndex = -1;
- double bestValue = 0.0;
- DWORD index, size = sizeof(keyName) - 1;
- for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
- NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
- const char *sp = keyName;
- while (*sp && !isdigit(*sp))
- sp++;
- if (!*sp)
- continue;
- const char *ep = sp + 1;
- while (*ep && (isdigit(*ep) || (*ep == '.')))
- ep++;
- char numBuf[32];
- strncpy(numBuf, sp, sizeof(numBuf) - 1);
- numBuf[sizeof(numBuf) - 1] = '\0';
- double value = strtod(numBuf, NULL);
- if (value > bestValue) {
- bestIndex = (int)index;
- bestValue = value;
- strcpy(bestName, keyName);
- }
- size = sizeof(keyName) - 1;
- }
- // If we found the highest versioned key, open the key and get the value.
- if (bestIndex != -1) {
- // Append rest of key.
- strncat(bestName, nextKey, sizeof(bestName) - 1);
- bestName[sizeof(bestName) - 1] = '\0';
- // Open the chosen key path remainder.
- lResult = RegOpenKeyEx(hTopKey, bestName, 0, KEY_READ, &hKey);
- if (lResult == ERROR_SUCCESS) {
- lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
- (LPBYTE)value, &valueSize);
- if (lResult == ERROR_SUCCESS)
- returnValue = true;
- RegCloseKey(hKey);
- }
- }
- RegCloseKey(hTopKey);
- }
- } else {
- lResult = RegOpenKeyEx(hRootKey, subKey, 0, KEY_READ, &hKey);
- if (lResult == ERROR_SUCCESS) {
- lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
- (LPBYTE)value, &valueSize);
- if (lResult == ERROR_SUCCESS)
- returnValue = true;
- RegCloseKey(hKey);
- }
- }
- return returnValue;
-}
-
-/// \brief Get Windows SDK installation directory.
-static bool getWindowsSDKDir(std::string &path) {
- char windowsSDKInstallDir[256];
- // Try the Windows registry.
- bool hasSDKDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder",
- windowsSDKInstallDir,
- sizeof(windowsSDKInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
- if (hasSDKDir && windowsSDKInstallDir[0]) {
- path = windowsSDKInstallDir;
- return true;
- }
- return false;
-}
-
- // Get Visual Studio installation directory.
-static bool getVisualStudioDir(std::string &path) {
- // First check the environment variables that vsvars32.bat sets.
- const char* vcinstalldir = getenv("VCINSTALLDIR");
- if (vcinstalldir) {
- char *p = const_cast<char *>(strstr(vcinstalldir, "\\VC"));
- if (p)
- *p = '\0';
- path = vcinstalldir;
- return true;
- }
-
- char vsIDEInstallDir[256];
- char vsExpressIDEInstallDir[256];
- // Then try the windows registry.
- bool hasVCDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
- "InstallDir", vsIDEInstallDir, sizeof(vsIDEInstallDir) - 1);
- bool hasVCExpressDir = getSystemRegistryString(
- "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
- "InstallDir", vsExpressIDEInstallDir, sizeof(vsExpressIDEInstallDir) - 1);
- // If we have both vc80 and vc90, pick version we were compiled with.
- if (hasVCDir && vsIDEInstallDir[0]) {
- char *p = (char*)strstr(vsIDEInstallDir, "\\Common7\\IDE");
- if (p)
- *p = '\0';
- path = vsIDEInstallDir;
- return true;
- }
-
- if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
- char *p = (char*)strstr(vsExpressIDEInstallDir, "\\Common7\\IDE");
- if (p)
- *p = '\0';
- path = vsExpressIDEInstallDir;
- return true;
- }
-
- // Try the environment.
- const char *vs100comntools = getenv("VS100COMNTOOLS");
- const char *vs90comntools = getenv("VS90COMNTOOLS");
- const char *vs80comntools = getenv("VS80COMNTOOLS");
- const char *vscomntools = NULL;
-
- // Try to find the version that we were compiled with
- if(false) {}
- #if (_MSC_VER >= 1600) // VC100
- else if(vs100comntools) {
- vscomntools = vs100comntools;
- }
- #elif (_MSC_VER == 1500) // VC80
- else if(vs90comntools) {
- vscomntools = vs90comntools;
- }
- #elif (_MSC_VER == 1400) // VC80
- else if(vs80comntools) {
- vscomntools = vs80comntools;
- }
- #endif
- // Otherwise find any version we can
- else if (vs100comntools)
- vscomntools = vs100comntools;
- else if (vs90comntools)
- vscomntools = vs90comntools;
- else if (vs80comntools)
- vscomntools = vs80comntools;
-
- if (vscomntools && *vscomntools) {
- const char *p = strstr(vscomntools, "\\Common7\\Tools");
- path = p ? std::string(vscomntools, p) : vscomntools;
- return true;
- }
- return false;
-}
-
-#endif // _MSC_VER
-
-void Windows::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdinc))
- return;
-
- if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
- llvm::sys::Path P(getDriver().ResourceDir);
- P.appendComponent("include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- }
-
- if (DriverArgs.hasArg(options::OPT_nostdlibinc))
- return;
-
- std::string VSDir;
- std::string WindowsSDKDir;
-
-#ifdef _MSC_VER
- // When built with access to the proper Windows APIs, try to actually find
- // the correct include paths first.
- if (getVisualStudioDir(VSDir)) {
- addSystemInclude(DriverArgs, CC1Args, VSDir + "\\VC\\include");
- if (getWindowsSDKDir(WindowsSDKDir))
- addSystemInclude(DriverArgs, CC1Args, WindowsSDKDir + "\\include");
- else
- addSystemInclude(DriverArgs, CC1Args,
- VSDir + "\\VC\\PlatformSDK\\Include");
- return;
- }
-#endif // _MSC_VER
-
- // As a fallback, select default install paths.
- const StringRef Paths[] = {
- "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
- "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
- "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
- "C:/Program Files/Microsoft Visual Studio 8/VC/include",
- "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
- };
- addSystemIncludes(DriverArgs, CC1Args, Paths);
-}
-
-void Windows::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- // FIXME: There should probably be logic here to find libc++ on Windows.
-}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
index 0e4d67c..eaa6be1 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -13,6 +13,7 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/ToolChain.h"
+#include "clang/Basic/VersionTuple.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Compiler.h"
@@ -27,10 +28,100 @@ namespace toolchains {
/// command line options.
class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
protected:
+ /// \brief Struct to store and manipulate GCC versions.
+ ///
+ /// We rely on assumptions about the form and structure of GCC version
+ /// numbers: they consist of at most three '.'-separated components, and each
+ /// component is a non-negative integer except for the last component. For
+ /// the last component we are very flexible in order to tolerate release
+ /// candidates or 'x' wildcards.
+ ///
+ /// Note that the ordering established among GCCVersions is based on the
+ /// preferred version string to use. For example we prefer versions without
+ /// a hard-coded patch number to those with a hard coded patch number.
+ ///
+ /// Currently this doesn't provide any logic for textual suffixes to patches
+ /// in the way that (for example) Debian's version format does. If that ever
+ /// becomes necessary, it can be added.
+ struct GCCVersion {
+ /// \brief The unparsed text of the version.
+ std::string Text;
+
+ /// \brief The parsed major, minor, and patch numbers.
+ int Major, Minor, Patch;
+
+ /// \brief Any textual suffix on the patch number.
+ std::string PatchSuffix;
+
+ static GCCVersion Parse(StringRef VersionText);
+ bool operator<(const GCCVersion &RHS) const;
+ bool operator>(const GCCVersion &RHS) const { return RHS < *this; }
+ bool operator<=(const GCCVersion &RHS) const { return !(*this > RHS); }
+ bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
+ };
+
+
+ /// \brief This is a class to find a viable GCC installation for Clang to
+ /// use.
+ ///
+ /// This class tries to find a GCC installation on the system, and report
+ /// information about it. It starts from the host information provided to the
+ /// Driver, and has logic for fuzzing that where appropriate.
+ class GCCInstallationDetector {
+
+ bool IsValid;
+ llvm::Triple GCCTriple;
+
+ // FIXME: These might be better as path objects.
+ std::string GCCInstallPath;
+ std::string GCCMultiarchSuffix;
+ std::string GCCParentLibPath;
+
+ GCCVersion Version;
+
+ public:
+ GCCInstallationDetector(const Driver &D, const llvm::Triple &TargetTriple,
+ const ArgList &Args);
+
+ /// \brief Check whether we detected a valid GCC install.
+ bool isValid() const { return IsValid; }
+
+ /// \brief Get the GCC triple for the detected install.
+ const llvm::Triple &getTriple() const { return GCCTriple; }
+
+ /// \brief Get the detected GCC installation path.
+ StringRef getInstallPath() const { return GCCInstallPath; }
+
+ /// \brief Get the detected GCC installation path suffix for multiarch GCCs.
+ StringRef getMultiarchSuffix() const { return GCCMultiarchSuffix; }
+
+ /// \brief Get the detected GCC parent lib path.
+ StringRef getParentLibPath() const { return GCCParentLibPath; }
+
+ /// \brief Get the detected GCC version string.
+ StringRef getVersion() const { return Version.Text; }
+
+ private:
+ static void CollectLibDirsAndTriples(
+ const llvm::Triple &TargetTriple,
+ const llvm::Triple &MultiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &MultiarchLibDirs,
+ SmallVectorImpl<StringRef> &MultiarchTripleAliases);
+
+ void ScanLibDirForGCCTriple(llvm::Triple::ArchType TargetArch,
+ const std::string &LibDir,
+ StringRef CandidateTriple,
+ bool NeedsMultiarchSuffix = false);
+ };
+
+ GCCInstallationDetector GCCInstallation;
+
mutable llvm::DenseMap<unsigned, Tool*> Tools;
public:
- Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple);
+ Generic_GCC(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
~Generic_GCC();
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
@@ -39,9 +130,37 @@ public:
virtual bool IsUnwindTablesDefault() const;
virtual const char *GetDefaultRelocationModel() const;
virtual const char *GetForcedPicModel() const;
+
+protected:
+ /// \name ToolChain Implementation Helper Functions
+ /// @{
+
+ /// \brief Check whether the target triple's architecture is 64-bits.
+ bool isTarget64Bit() const { return getTriple().isArch64Bit(); }
+
+ /// \brief Check whether the target triple's architecture is 32-bits.
+ bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+
+ /// @}
+};
+
+class LLVM_LIBRARY_VISIBILITY Hexagon_TC : public ToolChain {
+protected:
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+public:
+ Hexagon_TC(const Driver &D, const llvm::Triple& Triple);
+ ~Hexagon_TC();
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
};
-/// Darwin - The base Darwin tool chain.
+ /// Darwin - The base Darwin tool chain.
class LLVM_LIBRARY_VISIBILITY Darwin : public ToolChain {
public:
/// The host version.
@@ -80,19 +199,20 @@ private:
mutable bool TargetIsIPhoneOSSimulator;
/// The OS version we are targeting.
- mutable unsigned TargetVersion[3];
+ mutable VersionTuple TargetVersion;
/// The default macosx-version-min of this tool chain; empty until
/// initialized.
std::string MacosxVersionMin;
bool hasARCRuntime() const;
+ bool hasSubscriptingRuntime() const;
private:
void AddDeploymentTarget(DerivedArgList &Args) const;
public:
- Darwin(const HostInfo &Host, const llvm::Triple& Triple);
+ Darwin(const Driver &D, const llvm::Triple& Triple);
~Darwin();
std::string ComputeEffectiveClangTriple(const ArgList &Args,
@@ -111,17 +231,14 @@ public:
// change. This will go away when we move away from argument translation.
if (TargetInitialized && TargetIsIPhoneOS == IsIPhoneOS &&
TargetIsIPhoneOSSimulator == IsIOSSim &&
- TargetVersion[0] == Major && TargetVersion[1] == Minor &&
- TargetVersion[2] == Micro)
+ TargetVersion == VersionTuple(Major, Minor, Micro))
return;
assert(!TargetInitialized && "Target already initialized!");
TargetInitialized = true;
TargetIsIPhoneOS = IsIPhoneOS;
TargetIsIPhoneOSSimulator = IsIOSSim;
- TargetVersion[0] = Major;
- TargetVersion[1] = Minor;
- TargetVersion[2] = Micro;
+ TargetVersion = VersionTuple(Major, Minor, Micro);
}
bool isTargetIPhoneOS() const {
@@ -134,13 +251,17 @@ public:
return TargetIsIPhoneOSSimulator;
}
+ bool isTargetMacOS() const {
+ return !isTargetIOSSimulator() &&
+ !isTargetIPhoneOS() &&
+ ARCRuntimeForSimulator == ARCSimulator_None;
+ }
+
bool isTargetInitialized() const { return TargetInitialized; }
- void getTargetVersion(unsigned (&Res)[3]) const {
+ VersionTuple getTargetVersion() const {
assert(TargetInitialized && "Target not initialized!");
- Res[0] = TargetVersion[0];
- Res[1] = TargetVersion[1];
- Res[2] = TargetVersion[2];
+ return TargetVersion;
}
/// getDarwinArchName - Get the "Darwin" arch name for a particular compiler
@@ -148,24 +269,14 @@ public:
/// distinct architectures.
StringRef getDarwinArchName(const ArgList &Args) const;
- static bool isVersionLT(unsigned (&A)[3], unsigned (&B)[3]) {
- for (unsigned i=0; i < 3; ++i) {
- if (A[i] > B[i]) return false;
- if (A[i] < B[i]) return true;
- }
- return false;
- }
-
bool isIPhoneOSVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
assert(isTargetIPhoneOS() && "Unexpected call for OS X target!");
- unsigned B[3] = { V0, V1, V2 };
- return isVersionLT(TargetVersion, B);
+ return TargetVersion < VersionTuple(V0, V1, V2);
}
bool isMacosxVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
assert(!isTargetIPhoneOS() && "Unexpected call for iPhoneOS target!");
- unsigned B[3] = { V0, V1, V2 };
- return isVersionLT(TargetVersion, B);
+ return TargetVersion < VersionTuple(V0, V1, V2);
}
/// AddLinkSearchPathArgs - Add the linker search paths to \arg CmdArgs.
@@ -211,9 +322,8 @@ public:
#ifdef DISABLE_DEFAULT_INTEGRATED_ASSEMBLER
return false;
#else
- // Default integrated assembler to on for x86.
- return (getTriple().getArch() == llvm::Triple::x86 ||
- getTriple().getArch() == llvm::Triple::x86_64);
+ // Default integrated assembler to on for Darwin.
+ return true;
#endif
}
virtual bool IsStrictAliasingDefault() const {
@@ -225,7 +335,7 @@ public:
}
virtual bool IsObjCDefaultSynthPropertiesDefault() const {
- return false;
+ return true;
}
virtual bool IsObjCNonFragileABIDefault() const {
@@ -248,10 +358,13 @@ public:
virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
// Stack protectors default to on for user code on 10.5,
// and for everything in 10.6 and beyond
- return !isTargetIPhoneOS() &&
+ return isTargetIPhoneOS() ||
(!isMacosxVersionLT(10, 6) ||
(!isMacosxVersionLT(10, 5) && !KernelOrKext));
}
+ virtual RuntimeLibType GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_CompilerRT;
+ }
virtual const char *GetDefaultRelocationModel() const;
virtual const char *GetForcedPicModel() const;
@@ -259,6 +372,8 @@ public:
virtual bool SupportsObjCGC() const;
+ virtual bool SupportsObjCARC() const;
+
virtual bool UseDwarfDebugFlags() const;
virtual bool UseSjLjExceptions() const;
@@ -272,7 +387,7 @@ private:
void AddGCCLibexecPath(unsigned darwinVersion);
public:
- DarwinClang(const HostInfo &Host, const llvm::Triple& Triple);
+ DarwinClang(const Driver &D, const llvm::Triple& Triple);
/// @name Darwin ToolChain Implementation
/// {
@@ -299,8 +414,8 @@ public:
/// Darwin_Generic_GCC - Generic Darwin tool chain using gcc.
class LLVM_LIBRARY_VISIBILITY Darwin_Generic_GCC : public Generic_GCC {
public:
- Darwin_Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {}
+ Darwin_Generic_GCC(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {}
std::string ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const;
@@ -309,9 +424,10 @@ public:
};
class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
- public:
- Generic_ELF(const HostInfo &Host, const llvm::Triple& Triple)
- : Generic_GCC(Host, Triple) {}
+ virtual void anchor();
+public:
+ Generic_ELF(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {}
virtual bool IsIntegratedAssemblerDefault() const {
// Default integrated assembler to on for x86.
@@ -322,15 +438,36 @@ class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
public:
- AuroraUX(const HostInfo &Host, const llvm::Triple& Triple);
+ AuroraUX(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC {
+public:
+ Solaris(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
+
+ virtual bool IsIntegratedAssemblerDefault() const { return true; }
};
+
class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
public:
- OpenBSD(const HostInfo &Host, const llvm::Triple& Triple);
+ OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
@@ -338,26 +475,43 @@ public:
class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
public:
- FreeBSD(const HostInfo &Host, const llvm::Triple& Triple);
+ FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
};
class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
- const llvm::Triple ToolTriple;
-
public:
- NetBSD(const HostInfo &Host, const llvm::Triple& Triple,
- const llvm::Triple& ToolTriple);
+ NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
};
-class LLVM_LIBRARY_VISIBILITY Minix : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY Minix : public Generic_ELF {
public:
- Minix(const HostInfo &Host, const llvm::Triple& Triple);
+ Minix(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
@@ -365,95 +519,15 @@ public:
class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
public:
- DragonFly(const HostInfo &Host, const llvm::Triple& Triple);
+ DragonFly(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
};
class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
- /// \brief Struct to store and manipulate GCC versions.
- ///
- /// We rely on assumptions about the form and structure of GCC version
- /// numbers: they consist of at most three '.'-separated components, and each
- /// component is a non-negative integer except for the last component. For
- /// the last component we are very flexible in order to tolerate release
- /// candidates or 'x' wildcards.
- ///
- /// Note that the ordering established among GCCVersions is based on the
- /// preferred version string to use. For example we prefer versions without
- /// a hard-coded patch number to those with a hard coded patch number.
- ///
- /// Currently this doesn't provide any logic for textual suffixes to patches
- /// in the way that (for example) Debian's version format does. If that ever
- /// becomes necessary, it can be added.
- struct GCCVersion {
- /// \brief The unparsed text of the version.
- std::string Text;
-
- /// \brief The parsed major, minor, and patch numbers.
- int Major, Minor, Patch;
-
- /// \brief Any textual suffix on the patch number.
- std::string PatchSuffix;
-
- static GCCVersion Parse(StringRef VersionText);
- bool operator<(const GCCVersion &RHS) const;
- bool operator>(const GCCVersion &RHS) const { return RHS < *this; }
- bool operator<=(const GCCVersion &RHS) const { return !(*this > RHS); }
- bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
- };
-
-
- /// \brief This is a class to find a viable GCC installation for Clang to
- /// use.
- ///
- /// This class tries to find a GCC installation on the system, and report
- /// information about it. It starts from the host information provided to the
- /// Driver, and has logic for fuzzing that where appropriate.
- class GCCInstallationDetector {
-
- bool IsValid;
- std::string GccTriple;
-
- // FIXME: These might be better as path objects.
- std::string GccInstallPath;
- std::string GccParentLibPath;
-
- GCCVersion Version;
-
- public:
- GCCInstallationDetector(const Driver &D);
-
- /// \brief Check whether we detected a valid GCC install.
- bool isValid() const { return IsValid; }
-
- /// \brief Get the GCC triple for the detected install.
- StringRef getTriple() const { return GccTriple; }
-
- /// \brief Get the detected GCC installation path.
- StringRef getInstallPath() const { return GccInstallPath; }
-
- /// \brief Get the detected GCC parent lib path.
- StringRef getParentLibPath() const { return GccParentLibPath; }
-
- /// \brief Get the detected GCC version string.
- StringRef getVersion() const { return Version.Text; }
-
- private:
- static void CollectLibDirsAndTriples(llvm::Triple::ArchType HostArch,
- SmallVectorImpl<StringRef> &LibDirs,
- SmallVectorImpl<StringRef> &Triples);
-
- void ScanLibDirForGCCTriple(llvm::Triple::ArchType HostArch,
- const std::string &LibDir,
- StringRef CandidateTriple);
- };
-
- GCCInstallationDetector GCCInstallation;
-
public:
- Linux(const HostInfo &Host, const llvm::Triple& Triple);
+ Linux(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
virtual bool HasNativeLLVMSupport() const;
@@ -467,6 +541,11 @@ public:
std::string Linker;
std::vector<std::string> ExtraOpts;
+
+private:
+ static bool addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args);
};
@@ -474,7 +553,7 @@ public:
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain {
public:
- TCEToolChain(const HostInfo &Host, const llvm::Triple& Triple);
+ TCEToolChain(const Driver &D, const llvm::Triple& Triple);
~TCEToolChain();
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
@@ -493,7 +572,7 @@ class LLVM_LIBRARY_VISIBILITY Windows : public ToolChain {
mutable llvm::DenseMap<unsigned, Tool*> Tools;
public:
- Windows(const HostInfo &Host, const llvm::Triple& Triple);
+ Windows(const Driver &D, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
const ActionList &Inputs) const;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
index d9423d2..d3dab19 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -16,7 +16,6 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Job.h"
-#include "clang/Driver/HostInfo.h"
#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
@@ -36,13 +35,6 @@
#include "InputInfo.h"
#include "ToolChains.h"
-#ifdef __CYGWIN__
-#include <cygwin/version.h>
-#if defined(CYGWIN_VERSION_DLL_MAJOR) && CYGWIN_VERSION_DLL_MAJOR<1007
-#define IS_CYGWIN15 1
-#endif
-#endif
-
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
@@ -96,6 +88,39 @@ static void QuoteTarget(StringRef Target,
}
}
+static void addDirectoryList(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *ArgName,
+ const char *EnvVar) {
+ const char *DirList = ::getenv(EnvVar);
+ if (!DirList)
+ return; // Nothing to do.
+
+ StringRef Dirs(DirList);
+ if (Dirs.empty()) // Empty string should not add '.'.
+ return;
+
+ StringRef::size_type Delim;
+ while ((Delim = Dirs.find(llvm::sys::PathSeparator)) != StringRef::npos) {
+ if (Delim == 0) { // Leading colon.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
+ }
+ Dirs = Dirs.substr(Delim + 1);
+ }
+
+ if (Dirs.empty()) { // Trailing colon.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ } else { // Add the last path.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs));
+ }
+}
+
static void AddLinkerInputs(const ToolChain &TC,
const InputInfoList &Inputs, const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -136,6 +161,9 @@ static void AddLinkerInputs(const ToolChain &TC,
} else
A.renderAsInput(Args, CmdArgs);
}
+
+ // LIBRARY_PATH - included following the user specified library paths.
+ addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
}
/// \brief Determine whether Objective-C automated reference counting is
@@ -144,6 +172,13 @@ static bool isObjCAutoRefCount(const ArgList &Args) {
return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
}
+/// \brief Determine whether we are linking the ObjC runtime.
+static bool isObjCRuntimeLinked(const ArgList &Args) {
+ if (isObjCAutoRefCount(Args))
+ return true;
+ return Args.hasArg(options::OPT_fobjc_link_runtime);
+}
+
static void addProfileRT(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs,
llvm::Triple Triple) {
@@ -157,53 +192,14 @@ static void addProfileRT(const ToolChain &TC, const ArgList &Args,
// the link line. We cannot do the same thing because unlike gcov there is a
// libprofile_rt.so. We used to use the -l:libprofile_rt.a syntax, but that is
// not supported by old linkers.
- Twine ProfileRT =
- Twine(TC.getDriver().Dir) + "/../lib/" + "libprofile_rt.a";
-
- if (Triple.isOSDarwin()) {
- // On Darwin, if the static library doesn't exist try the dylib.
- bool Exists;
- if (llvm::sys::fs::exists(ProfileRT.str(), Exists) || !Exists)
- ProfileRT =
- Twine(TC.getDriver().Dir) + "/../lib/" + "libprofile_rt.dylib";
- }
+ std::string ProfileRT =
+ std::string(TC.getDriver().Dir) + "/../lib/libprofile_rt.a";
CmdArgs.push_back(Args.MakeArgString(ProfileRT));
}
-static void AddIncludeDirectoryList(const ArgList &Args,
- ArgStringList &CmdArgs,
- const char *ArgName,
- const char *DirList) {
- if (!DirList)
- return; // Nothing to do.
-
- StringRef Dirs(DirList);
- if (Dirs.empty()) // Empty string should not add '.'.
- return;
-
- StringRef::size_type Delim;
- while ((Delim = Dirs.find(llvm::sys::PathSeparator)) != StringRef::npos) {
- if (Delim == 0) { // Leading colon.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(".");
- } else {
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
- }
- Dirs = Dirs.substr(Delim + 1);
- }
-
- if (Dirs.empty()) { // Trailing colon.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(".");
- } else { // Add the last path.
- CmdArgs.push_back(ArgName);
- CmdArgs.push_back(Args.MakeArgString(Dirs));
- }
-}
-
-void Clang::AddPreprocessingOptions(const Driver &D,
+void Clang::AddPreprocessingOptions(Compilation &C,
+ const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfo &Output,
@@ -225,11 +221,13 @@ void Clang::AddPreprocessingOptions(const Driver &D,
DepFile = Output.getFilename();
} else if (Arg *MF = Args.getLastArg(options::OPT_MF)) {
DepFile = MF->getValue(Args);
+ C.addFailureResultFile(DepFile);
} else if (A->getOption().matches(options::OPT_M) ||
A->getOption().matches(options::OPT_MM)) {
DepFile = "-";
} else {
DepFile = darwin::CC1::getDependencyFileName(Args, Inputs);
+ C.addFailureResultFile(DepFile);
}
CmdArgs.push_back("-dependency-file");
CmdArgs.push_back(DepFile);
@@ -247,13 +245,13 @@ void Clang::AddPreprocessingOptions(const Driver &D,
// Otherwise derive from the base input.
//
// FIXME: This should use the computed output file location.
- llvm::SmallString<128> P(Inputs[0].getBaseInput());
+ SmallString<128> P(Inputs[0].getBaseInput());
llvm::sys::path::replace_extension(P, "o");
DepTarget = Args.MakeArgString(llvm::sys::path::filename(P));
}
CmdArgs.push_back("-MT");
- llvm::SmallString<128> Quoted;
+ SmallString<128> Quoted;
QuoteTarget(DepTarget, Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
}
@@ -281,7 +279,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
if (A->getOption().matches(options::OPT_MQ)) {
CmdArgs.push_back("-MT");
- llvm::SmallString<128> Quoted;
+ SmallString<128> Quoted;
QuoteTarget(A->getValue(Args), Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
@@ -392,7 +390,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
A->claim();
A->render(Args, CmdArgs);
} else {
- llvm::SmallString<128> DefaultModuleCache;
+ SmallString<128> DefaultModuleCache;
llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false,
DefaultModuleCache);
llvm::sys::path::append(DefaultModuleCache, "clang-module-cache");
@@ -400,24 +398,20 @@ void Clang::AddPreprocessingOptions(const Driver &D,
CmdArgs.push_back(Args.MakeArgString(DefaultModuleCache));
}
- Args.AddAllArgs(CmdArgs, options::OPT_fauto_module_import);
-
// Parse additional include paths from environment variables.
+ // FIXME: We should probably sink the logic for handling these from the
+ // frontend into the driver. It will allow deleting 4 otherwise unused flags.
// CPATH - included following the user specified includes (but prior to
// builtin and standard includes).
- AddIncludeDirectoryList(Args, CmdArgs, "-I", ::getenv("CPATH"));
+ addDirectoryList(Args, CmdArgs, "-I", "CPATH");
// C_INCLUDE_PATH - system includes enabled when compiling C.
- AddIncludeDirectoryList(Args, CmdArgs, "-c-isystem",
- ::getenv("C_INCLUDE_PATH"));
+ addDirectoryList(Args, CmdArgs, "-c-isystem", "C_INCLUDE_PATH");
// CPLUS_INCLUDE_PATH - system includes enabled when compiling C++.
- AddIncludeDirectoryList(Args, CmdArgs, "-cxx-isystem",
- ::getenv("CPLUS_INCLUDE_PATH"));
+ addDirectoryList(Args, CmdArgs, "-cxx-isystem", "CPLUS_INCLUDE_PATH");
// OBJC_INCLUDE_PATH - system includes enabled when compiling ObjC.
- AddIncludeDirectoryList(Args, CmdArgs, "-objc-isystem",
- ::getenv("OBJC_INCLUDE_PATH"));
+ addDirectoryList(Args, CmdArgs, "-objc-isystem", "OBJC_INCLUDE_PATH");
// OBJCPLUS_INCLUDE_PATH - system includes enabled when compiling ObjC++.
- AddIncludeDirectoryList(Args, CmdArgs, "-objcxx-isystem",
- ::getenv("OBJCPLUS_INCLUDE_PATH"));
+ addDirectoryList(Args, CmdArgs, "-objcxx-isystem", "OBJCPLUS_INCLUDE_PATH");
// Add C++ include arguments, if needed.
if (types::isCXX(Inputs[0].getType()))
@@ -489,6 +483,9 @@ static const char *getLLVMArchSuffixForARM(StringRef CPU) {
.Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
.Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
.Cases("cortex-a8", "cortex-a9", "v7")
+ .Case("cortex-m3", "v7m")
+ .Case("cortex-m4", "v7m")
+ .Case("cortex-m0", "v6m")
.Default("");
}
@@ -504,46 +501,78 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
if (Triple.isOSDarwin())
return true;
return false;
-
- case llvm::Triple::systemz:
- return false;
}
}
-void Clang::AddARMTargetArgs(const ArgList &Args,
- ArgStringList &CmdArgs,
- bool KernelOrKext) const {
- const Driver &D = getToolChain().getDriver();
- llvm::Triple Triple = getToolChain().getTriple();
+// Handle -mfpu=.
+//
+// FIXME: Centralize feature selection, defaulting shouldn't be also in the
+// frontend target.
+static void addFPUArgs(const Driver &D, const Arg *A, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ StringRef FPU = A->getValue(Args);
+
+ // Set the target features based on the FPU.
+ if (FPU == "fpa" || FPU == "fpe2" || FPU == "fpe3" || FPU == "maverick") {
+ // Disable any default FPU support.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-vfp2");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp3-d16" || FPU == "vfpv3-d16") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+d16");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp2");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp3" || FPU == "vfpv3") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "neon") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+neon");
+ } else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
- // Select the ABI to use.
- //
- // FIXME: Support -meabi.
- const char *ABIName = 0;
- if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
- ABIName = A->getValue(Args);
- } else {
- // Select the default based on the platform.
- switch(Triple.getEnvironment()) {
- case llvm::Triple::GNUEABI:
- ABIName = "aapcs-linux";
- break;
- case llvm::Triple::EABI:
- ABIName = "aapcs";
- break;
- default:
- ABIName = "apcs-gnu";
- }
- }
- CmdArgs.push_back("-target-abi");
- CmdArgs.push_back(ABIName);
+// Handle -mfpmath=.
+static void addFPMathArgs(const Driver &D, const Arg *A, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef CPU) {
+ StringRef FPMath = A->getValue(Args);
+
+ // Set the target features based on the FPMath.
+ if (FPMath == "neon") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+neonfp");
+
+ if (CPU != "cortex-a8" && CPU != "cortex-a9" && CPU != "cortex-a9-mp")
+ D.Diag(diag::err_drv_invalid_feature) << "-mfpmath=neon" << CPU;
+
+ } else if (FPMath == "vfp" || FPMath == "vfp2" || FPMath == "vfp3" ||
+ FPMath == "vfp4") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neonfp");
- // Set the CPU based on -march= and -mcpu=.
- CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+ // FIXME: Add warnings when disabling a feature not present for a given CPU.
+ } else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
- // Select the float ABI as determined by -msoft-float, -mhard-float, and
- // -mfloat-abi=.
+// Select the float ABI as determined by -msoft-float, -mhard-float, and
+// -mfloat-abi=.
+static StringRef getARMFloatABI(const Driver &D,
+ const ArgList &Args,
+ const llvm::Triple &Triple) {
StringRef FloatABI;
if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
options::OPT_mhard_float,
@@ -564,7 +593,6 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// If unspecified, choose the default based on the platform.
if (FloatABI.empty()) {
- const llvm::Triple &Triple = getToolChain().getTriple();
switch (Triple.getOS()) {
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
@@ -582,7 +610,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
case llvm::Triple::Linux: {
- if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUEABI) {
+ if (Triple.getEnvironment() == llvm::Triple::GNUEABI) {
FloatABI = "softfp";
break;
}
@@ -598,6 +626,15 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// EABI is always AAPCS, and if it was not marked 'hard', it's softfp
FloatABI = "softfp";
break;
+ case llvm::Triple::ANDROIDEABI: {
+ StringRef ArchName =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+ if (ArchName.startswith("v7"))
+ FloatABI = "softfp";
+ else
+ FloatABI = "soft";
+ break;
+ }
default:
// Assume "soft", but warn the user we are guessing.
FloatABI = "soft";
@@ -607,6 +644,45 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
}
+ return FloatABI;
+}
+
+
+void Clang::AddARMTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ bool KernelOrKext) const {
+ const Driver &D = getToolChain().getDriver();
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ // Select the ABI to use.
+ //
+ // FIXME: Support -meabi.
+ const char *ABIName = 0;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue(Args);
+ } else {
+ // Select the default based on the platform.
+ switch(Triple.getEnvironment()) {
+ case llvm::Triple::ANDROIDEABI:
+ case llvm::Triple::GNUEABI:
+ ABIName = "aapcs-linux";
+ break;
+ case llvm::Triple::EABI:
+ ABIName = "aapcs";
+ break;
+ default:
+ ABIName = "apcs-gnu";
+ }
+ }
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+
+ // Set the CPU based on -march= and -mcpu=.
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+
+ // Determine floating point ABI from the options & target defaults.
+ StringRef FloatABI = getARMFloatABI(D, Args, Triple);
if (FloatABI == "soft") {
// Floating point operations and argument passing are soft.
//
@@ -644,33 +720,12 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
// Honor -mfpu=.
- //
- // FIXME: Centralize feature selection, defaulting shouldn't be also in the
- // frontend target.
- if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ)) {
- StringRef FPU = A->getValue(Args);
-
- // Set the target features based on the FPU.
- if (FPU == "fpa" || FPU == "fpe2" || FPU == "fpe3" || FPU == "maverick") {
- // Disable any default FPU support.
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-vfp2");
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-vfp3");
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-neon");
- } else if (FPU == "vfp") {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+vfp2");
- } else if (FPU == "vfp3") {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+vfp3");
- } else if (FPU == "neon") {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("+neon");
- } else
- D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
- }
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ addFPUArgs(D, A, Args, CmdArgs);
+
+ // Honor -mfpmath=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ))
+ addFPMathArgs(D, A, Args, CmdArgs, getARMTargetCPU(Args, Triple));
// Setting -msoft-float effectively disables NEON because of the GCC
// implementation, although the same isn't true of VFP or VFP3.
@@ -703,25 +758,32 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// Get default architecture.
static const char* getMipsArchFromCPU(StringRef CPUName) {
- if (CPUName == "mips32r1" || CPUName == "4ke")
+ if (CPUName == "mips32" || CPUName == "mips32r2")
return "mips";
- assert((CPUName == "mips64r1" || CPUName == "mips64r2") &&
+ assert((CPUName == "mips64" || CPUName == "mips64r2") &&
"Unexpected cpu name.");
return "mips64";
}
+// Check that ArchName is a known Mips architecture name.
+static bool checkMipsArchName(StringRef ArchName) {
+ return ArchName == "mips" ||
+ ArchName == "mipsel" ||
+ ArchName == "mips64" ||
+ ArchName == "mips64el";
+}
+
// Get default target cpu.
-static const char* getMipsCPUFromArch(StringRef ArchName, const Driver &D) {
+static const char* getMipsCPUFromArch(StringRef ArchName) {
if (ArchName == "mips" || ArchName == "mipsel")
- return "mips32r1";
- else if (ArchName == "mips64" || ArchName == "mips64el")
- return "mips64r1";
- else
- D.Diag(diag::err_drv_invalid_arch_name) << ArchName;
+ return "mips32";
+
+ assert((ArchName == "mips64" || ArchName == "mips64el") &&
+ "Unexpected arch name.");
- return 0;
+ return "mips64";
}
// Get default ABI.
@@ -734,61 +796,98 @@ static const char* getMipsABIFromArch(StringRef ArchName) {
return "n64";
}
-void Clang::AddMIPSTargetArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- const Driver &D = getToolChain().getDriver();
-
+// Get CPU and ABI names. They are not independent
+// so we have to calculate them together.
+static void getMipsCPUAndABI(const ArgList &Args,
+ const ToolChain &TC,
+ StringRef &CPUName,
+ StringRef &ABIName) {
StringRef ArchName;
- const char *CPUName;
- // Set target cpu and architecture.
+ // Select target cpu and architecture.
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
CPUName = A->getValue(Args);
ArchName = getMipsArchFromCPU(CPUName);
}
else {
- ArchName = Args.MakeArgString(getToolChain().getArchName());
- CPUName = getMipsCPUFromArch(ArchName, D);
+ ArchName = Args.MakeArgString(TC.getArchName());
+ if (!checkMipsArchName(ArchName))
+ TC.getDriver().Diag(diag::err_drv_invalid_arch_name) << ArchName;
+ else
+ CPUName = getMipsCPUFromArch(ArchName);
}
-
- CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(CPUName);
// Select the ABI to use.
- const char *ABIName = 0;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
ABIName = A->getValue(Args);
else
ABIName = getMipsABIFromArch(ArchName);
+}
+
+void Clang::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
+
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(CPUName.data());
CmdArgs.push_back("-target-abi");
- CmdArgs.push_back(ABIName);
+ CmdArgs.push_back(ABIName.data());
- // Select the float ABI as determined by -msoft-float, -mhard-float, and
+ // Select the float ABI as determined by -msoft-float, -mhard-float,
+ // and -mfloat-abi=.
StringRef FloatABI;
if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
- options::OPT_mhard_float)) {
+ options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
if (A->getOption().matches(options::OPT_msoft_float))
FloatABI = "soft";
else if (A->getOption().matches(options::OPT_mhard_float))
FloatABI = "hard";
+ else {
+ FloatABI = A->getValue(Args);
+ if (FloatABI != "soft" && FloatABI != "single" && FloatABI != "hard") {
+ D.Diag(diag::err_drv_invalid_mfloat_abi)
+ << A->getAsString(Args);
+ FloatABI = "hard";
+ }
+ }
}
// If unspecified, choose the default based on the platform.
if (FloatABI.empty()) {
- // Assume "soft", but warn the user we are guessing.
- FloatABI = "soft";
- D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ // Assume "hard", because it's a default value used by gcc.
+ // When we start to recognize specific target MIPS processors,
+ // we will be able to select the default more correctly.
+ FloatABI = "hard";
}
if (FloatABI == "soft") {
// Floating point operations and argument passing are soft.
- //
- // FIXME: This changes CPP defines, we need -target-soft-float.
CmdArgs.push_back("-msoft-float");
- } else {
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+
+ // FIXME: Note, this is a hack. We need to pass the selected float
+ // mode to the MipsTargetInfoBase to define appropriate macros there.
+ // Now it is the only method.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ }
+ else if (FloatABI == "single") {
+ // Restrict the use of hardware floating-point
+ // instructions to 32-bit operations.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+single-float");
+ }
+ else {
+ // Floating point operations and argument passing are hard.
assert(FloatABI == "hard" && "Invalid float abi!");
- CmdArgs.push_back("-mhard-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
}
}
@@ -797,9 +896,8 @@ void Clang::AddSparcTargetArgs(const ArgList &Args,
const Driver &D = getToolChain().getDriver();
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- StringRef MArch = A->getValue(Args);
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(MArch.str().c_str());
+ CmdArgs.push_back(A->getValue(Args));
}
// Select the float ABI as determined by -msoft-float, -mhard-float, and
@@ -941,6 +1039,70 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
}
}
+static Arg* getLastHexagonArchArg (const ArgList &Args)
+{
+ Arg * A = NULL;
+
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ if ((*it)->getOption().matches(options::OPT_march_EQ) ||
+ (*it)->getOption().matches(options::OPT_mcpu_EQ)) {
+ A = *it;
+ A->claim();
+ }
+ else if ((*it)->getOption().matches(options::OPT_m_Joined)){
+ StringRef Value = (*it)->getValue(Args,0);
+ if (Value.startswith("v")) {
+ A = *it;
+ A->claim();
+ }
+ }
+ }
+ return A;
+}
+
+static StringRef getHexagonTargetCPU(const ArgList &Args)
+{
+ Arg *A;
+ llvm::StringRef WhichHexagon;
+
+ // Select the default CPU (v4) if none was given or detection failed.
+ if ((A = getLastHexagonArchArg (Args))) {
+ WhichHexagon = A->getValue(Args);
+ if (WhichHexagon == "")
+ return "v4";
+ else
+ return WhichHexagon;
+ }
+ else
+ return "v4";
+}
+
+void Clang::AddHexagonTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString("hexagon" + getHexagonTargetCPU(Args)));
+ CmdArgs.push_back("-fno-signed-char");
+ CmdArgs.push_back("-nobuiltininc");
+
+ if (Args.hasArg(options::OPT_mqdsp6_compat))
+ CmdArgs.push_back("-mqdsp6-compat");
+
+ if (Arg *A = Args.getLastArg(options::OPT_G,
+ options::OPT_msmall_data_threshold_EQ)) {
+ std::string SmallDataThreshold="-small-data-threshold=";
+ SmallDataThreshold += A->getValue(Args);
+ CmdArgs.push_back ("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold));
+ A->claim();
+ }
+
+ CmdArgs.push_back ("-mllvm");
+ CmdArgs.push_back ("-machine-sink-split=0");
+}
+
static bool
shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
const llvm::Triple &Triple) {
@@ -966,11 +1128,20 @@ shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
/// Objective-C exceptions.
static void addExceptionArgs(const ArgList &Args, types::ID InputType,
const llvm::Triple &Triple,
- bool KernelOrKext, bool IsRewriter,
+ bool KernelOrKext,
unsigned objcABIVersion,
ArgStringList &CmdArgs) {
- if (KernelOrKext)
+ if (KernelOrKext) {
+ // -mkernel and -fapple-kext imply no exceptions, so claim exception related
+ // arguments now to avoid warnings about unused arguments.
+ Args.ClaimAllArgs(options::OPT_fexceptions);
+ Args.ClaimAllArgs(options::OPT_fno_exceptions);
+ Args.ClaimAllArgs(options::OPT_fobjc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_objc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fcxx_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_cxx_exceptions);
return;
+ }
// Exceptions are enabled by default.
bool ExceptionsEnabled = true;
@@ -1033,28 +1204,29 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
static bool ShouldDisableCFI(const ArgList &Args,
const ToolChain &TC) {
+ bool Default = true;
if (TC.getTriple().isOSDarwin()) {
// The native darwin assembler doesn't support cfi directives, so
// we disable them if we think the .s file will be passed to it.
+ Default = Args.hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ TC.IsIntegratedAssemblerDefault());
+ }
+ return !Args.hasFlag(options::OPT_fdwarf2_cfi_asm,
+ options::OPT_fno_dwarf2_cfi_asm,
+ Default);
+}
- // FIXME: Duplicated code with ToolChains.cpp
- // FIXME: This doesn't belong here, but ideally we will support static soon
- // anyway.
- bool HasStatic = (Args.hasArg(options::OPT_mkernel) ||
- Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_fapple_kext));
- bool IsIADefault = TC.IsIntegratedAssemblerDefault() && !HasStatic;
- bool UseIntegratedAs = Args.hasFlag(options::OPT_integrated_as,
- options::OPT_no_integrated_as,
- IsIADefault);
- bool UseCFI = Args.hasFlag(options::OPT_fdwarf2_cfi_asm,
- options::OPT_fno_dwarf2_cfi_asm,
- UseIntegratedAs);
- return !UseCFI;
- }
-
- // For now we assume that every other assembler support CFI.
- return false;
+static bool ShouldDisableDwarfDirectory(const ArgList &Args,
+ const ToolChain &TC) {
+ bool IsIADefault = TC.IsIntegratedAssemblerDefault();
+ bool UseIntegratedAs = Args.hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIADefault);
+ bool UseDwarfDirectory = Args.hasFlag(options::OPT_fdwarf_directory_asm,
+ options::OPT_fno_dwarf_directory_asm,
+ UseIntegratedAs);
+ return !UseDwarfDirectory;
}
/// \brief Check whether the given input tree contains any compilation actions.
@@ -1092,6 +1264,46 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
RelaxDefault);
}
+/// If AddressSanitizer is enabled, add appropriate linker flags (Linux).
+/// This needs to be called before we add the C run-time (malloc, etc).
+static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Add asan linker flags when linking an executable, but not a shared object.
+ if (Args.hasArg(options::OPT_shared) ||
+ !Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false))
+ return;
+
+ // LibAsan is "libclang_rt.asan-<ArchName>.a" in the Linux library resource
+ // directory.
+ SmallString<128> LibAsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibAsan, "lib", "linux",
+ (Twine("libclang_rt.asan-") +
+ TC.getArchName() + ".a"));
+ CmdArgs.push_back(Args.MakeArgString(LibAsan));
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-ldl");
+ CmdArgs.push_back("-export-dynamic");
+}
+
+static bool shouldUseFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
+ options::OPT_fomit_frame_pointer))
+ return A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+
+ // Don't use a frame pointer on linux x86 and x86_64 if optimizing.
+ if ((Triple.getArch() == llvm::Triple::x86_64 ||
+ Triple.getArch() == llvm::Triple::x86) &&
+ Triple.getOS() == llvm::Triple::Linux) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ if (!A->getOption().matches(options::OPT_O0))
+ return false;
+ }
+
+ return true;
+}
+
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -1116,9 +1328,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Select the appropriate action.
bool IsRewriter = false;
+ bool IsModernRewriter = false;
+
if (isa<AnalyzeJobAction>(JA)) {
assert(JA.getType() == types::TY_Plist && "Invalid output type.");
CmdArgs.push_back("-analyze");
+ } else if (isa<MigrateJobAction>(JA)) {
+ CmdArgs.push_back("-migrate");
} else if (isa<PreprocessJobAction>(JA)) {
if (Output.getType() == types::TY_Dependencies)
CmdArgs.push_back("-Eonly");
@@ -1184,6 +1400,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-emit-pch");
} else if (JA.getType() == types::TY_RewrittenObjC) {
CmdArgs.push_back("-rewrite-objc");
+ IsModernRewriter = true;
+ } else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
+ CmdArgs.push_back("-rewrite-objc");
IsRewriter = true;
} else {
assert(JA.getType() == types::TY_PP_Asm &&
@@ -1218,17 +1437,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-analyzer-eagerly-assume");
+ CmdArgs.push_back("-analyzer-ipa=inlining");
+
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
- CmdArgs.push_back("-analyzer-checker=deadcode");
- CmdArgs.push_back("-analyzer-checker=security");
if (getToolChain().getTriple().getOS() != llvm::Triple::Win32)
CmdArgs.push_back("-analyzer-checker=unix");
if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
CmdArgs.push_back("-analyzer-checker=osx");
+
+ CmdArgs.push_back("-analyzer-checker=deadcode");
+
+ // Enable the following experimental checkers for testing.
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.UncheckedReturn");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
}
// Set the output format. The default is plist, for (lame) historical
@@ -1260,15 +1489,43 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// would do to enable flag_pic.
//
// FIXME: Centralize this code.
- bool PICEnabled = (Args.hasArg(options::OPT_fPIC) ||
- Args.hasArg(options::OPT_fpic) ||
- Args.hasArg(options::OPT_fPIE) ||
- Args.hasArg(options::OPT_fpie));
- bool PICDisabled = (Args.hasArg(options::OPT_mkernel) ||
- Args.hasArg(options::OPT_static));
+ Arg *LastPICArg = 0;
+ for (ArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I) {
+ if ((*I)->getOption().matches(options::OPT_fPIC) ||
+ (*I)->getOption().matches(options::OPT_fno_PIC) ||
+ (*I)->getOption().matches(options::OPT_fpic) ||
+ (*I)->getOption().matches(options::OPT_fno_pic) ||
+ (*I)->getOption().matches(options::OPT_fPIE) ||
+ (*I)->getOption().matches(options::OPT_fno_PIE) ||
+ (*I)->getOption().matches(options::OPT_fpie) ||
+ (*I)->getOption().matches(options::OPT_fno_pie)) {
+ LastPICArg = *I;
+ (*I)->claim();
+ }
+ }
+ bool PICDisabled = false;
+ bool PICEnabled = false;
+ bool PICForPIE = false;
+ if (LastPICArg) {
+ PICForPIE = (LastPICArg->getOption().matches(options::OPT_fPIE) ||
+ LastPICArg->getOption().matches(options::OPT_fpie));
+ PICEnabled = (PICForPIE ||
+ LastPICArg->getOption().matches(options::OPT_fPIC) ||
+ LastPICArg->getOption().matches(options::OPT_fpic));
+ PICDisabled = !PICEnabled;
+ }
+ // Note that these flags are trump-cards. Regardless of the order w.r.t. the
+ // PIC or PIE options above, if these show up, PIC is disabled.
+ if (Args.hasArg(options::OPT_mkernel))
+ PICDisabled = true;
+ if (Args.hasArg(options::OPT_static))
+ PICDisabled = true;
+ bool DynamicNoPIC = Args.hasArg(options::OPT_mdynamic_no_pic);
+
+ // Select the relocation model.
const char *Model = getToolChain().GetForcedPicModel();
if (!Model) {
- if (Args.hasArg(options::OPT_mdynamic_no_pic))
+ if (DynamicNoPIC)
Model = "dynamic-no-pic";
else if (PICDisabled)
Model = "static";
@@ -1277,19 +1534,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
Model = getToolChain().GetDefaultRelocationModel();
}
- if (StringRef(Model) != "pic") {
+ StringRef ModelStr = Model ? Model : "";
+ if (Model && ModelStr != "pic") {
CmdArgs.push_back("-mrelocation-model");
CmdArgs.push_back(Model);
}
- // Infer the __PIC__ value.
- //
- // FIXME: This isn't quite right on Darwin, which always sets
- // __PIC__=2.
- if (strcmp(Model, "pic") == 0 || strcmp(Model, "dynamic-no-pic") == 0) {
+ // Infer the __PIC__ and __PIE__ values.
+ if (ModelStr == "pic" && PICForPIE) {
+ CmdArgs.push_back("-pie-level");
+ CmdArgs.push_back((LastPICArg &&
+ LastPICArg->getOption().matches(options::OPT_fPIE)) ?
+ "2" : "1");
+ } else if (ModelStr == "pic" || ModelStr == "dynamic-no-pic") {
CmdArgs.push_back("-pic-level");
- CmdArgs.push_back(Args.hasArg(options::OPT_fPIC) ? "2" : "1");
+ CmdArgs.push_back(((ModelStr != "dynamic-no-pic" && LastPICArg &&
+ LastPICArg->getOption().matches(options::OPT_fPIC)) ||
+ getToolChain().getTriple().isOSDarwin()) ? "2" : "1");
}
+
if (!Args.hasFlag(options::OPT_fmerge_all_constants,
options::OPT_fno_merge_all_constants))
CmdArgs.push_back("-fno-merge-all-constants");
@@ -1304,9 +1567,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
CmdArgs.push_back("-mrtd");
- // FIXME: Set --enable-unsafe-fp-math.
- if (Args.hasFlag(options::OPT_fno_omit_frame_pointer,
- options::OPT_fomit_frame_pointer))
+ if (shouldUseFramePointer(Args, getToolChain().getTriple()))
CmdArgs.push_back("-mdisable-fp-elim");
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
options::OPT_fno_zero_initialized_in_bss))
@@ -1315,6 +1576,96 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_strict_aliasing,
getToolChain().IsStrictAliasingDefault()))
CmdArgs.push_back("-relaxed-aliasing");
+ if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
+ false))
+ CmdArgs.push_back("-fstrict-enums");
+ if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls))
+ CmdArgs.push_back("-mdisable-tail-calls");
+
+ // Handle various floating point optimization flags, mapping them to the
+ // appropriate LLVM code generation flags. The pattern for all of these is to
+ // default off the codegen optimizations, and if any flag enables them and no
+ // flag disables them after the flag enabling them, enable the codegen
+ // optimization. This is complicated by several "umbrella" flags.
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only,
+ options::OPT_fhonor_infinities,
+ options::OPT_fno_honor_infinities))
+ if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_infinities)
+ CmdArgs.push_back("-menable-no-infs");
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only,
+ options::OPT_fhonor_nans,
+ options::OPT_fno_honor_nans))
+ if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_nans)
+ CmdArgs.push_back("-menable-no-nans");
+
+ // -fno-math-errno is default.
+ bool MathErrno = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fmath_errno,
+ options::OPT_fno_math_errno)) {
+ if (A->getOption().getID() == options::OPT_fmath_errno) {
+ CmdArgs.push_back("-fmath-errno");
+ MathErrno = true;
+ }
+ }
+
+ // There are several flags which require disabling very specific
+ // optimizations. Any of these being disabled forces us to turn off the
+ // entire set of LLVM optimizations, so collect them through all the flag
+ // madness.
+ bool AssociativeMath = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fassociative_math,
+ options::OPT_fno_associative_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_associative_math)
+ AssociativeMath = true;
+ bool ReciprocalMath = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_freciprocal_math,
+ options::OPT_fno_reciprocal_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_reciprocal_math)
+ ReciprocalMath = true;
+ bool SignedZeros = true;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fsigned_zeros,
+ options::OPT_fno_signed_zeros))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fsigned_zeros)
+ SignedZeros = false;
+ bool TrappingMath = true;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_ftrapping_math,
+ options::OPT_fno_trapping_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_ftrapping_math)
+ TrappingMath = false;
+ if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
+ !TrappingMath)
+ CmdArgs.push_back("-menable-unsafe-fp-math");
+
+ // We separately look for the '-ffast-math' flag, and if we find it, tell the
+ // frontend to provide the appropriate preprocessor macros. This is distinct
+ // from enabling any optimizations as it induces a language change which must
+ // survive serialization and deserialization, etc.
+ if (Args.hasArg(options::OPT_ffast_math))
+ CmdArgs.push_back("-ffast-math");
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
@@ -1397,8 +1748,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::x86_64:
AddX86TargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::hexagon:
+ AddHexagonTargetArgs(Args, CmdArgs);
+ break;
}
+
+
// Pass the linker version in use.
if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
CmdArgs.push_back("-target-linker-version");
@@ -1411,21 +1768,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!getToolChain().getTriple().isOSDarwin()))
CmdArgs.push_back("-momit-leaf-frame-pointer");
- // -fno-math-errno is default.
- if (Args.hasFlag(options::OPT_fmath_errno,
- options::OPT_fno_math_errno,
- false))
- CmdArgs.push_back("-fmath-errno");
-
// Explicitly error on some things we know we don't support and can't just
// ignore.
types::ID InputType = Inputs[0].getType();
if (!Args.hasArg(options::OPT_fallow_unsupported)) {
Arg *Unsupported;
- if ((Unsupported = Args.getLastArg(options::OPT_iframework)))
- D.Diag(diag::err_drv_clang_unsupported)
- << Unsupported->getOption().getName();
-
if (types::isCXX(InputType) &&
getToolChain().getTriple().isOSDarwin() &&
getToolChain().getTriple().getArch() == llvm::Triple::x86) {
@@ -1456,8 +1803,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// wrong.
Args.ClaimAllArgs(options::OPT_g_Group);
if (Arg *A = Args.getLastArg(options::OPT_g_Group))
- if (!A->getOption().matches(options::OPT_g0))
+ if (!A->getOption().matches(options::OPT_g0)) {
CmdArgs.push_back("-g");
+ }
Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
@@ -1496,10 +1844,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_working_directory);
+ bool ARCMTEnabled = false;
if (!Args.hasArg(options::OPT_fno_objc_arc)) {
if (const Arg *A = Args.getLastArg(options::OPT_ccc_arcmt_check,
options::OPT_ccc_arcmt_modify,
options::OPT_ccc_arcmt_migrate)) {
+ ARCMTEnabled = true;
switch (A->getOption().getID()) {
default:
llvm_unreachable("missed a case");
@@ -1511,7 +1861,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
break;
case options::OPT_ccc_arcmt_migrate:
CmdArgs.push_back("-arcmt-migrate");
- CmdArgs.push_back("-arcmt-migrate-directory");
+ CmdArgs.push_back("-mt-migrate-directory");
CmdArgs.push_back(A->getValue(Args));
Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_report_output);
@@ -1521,12 +1871,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_objcmt_migrate)) {
+ if (ARCMTEnabled) {
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-ccc-arcmt-migrate";
+ }
+ CmdArgs.push_back("-mt-migrate-directory");
+ CmdArgs.push_back(A->getValue(Args));
+
+ if (!Args.hasArg(options::OPT_objcmt_migrate_literals,
+ options::OPT_objcmt_migrate_subscripting)) {
+ // None specified, means enable them all.
+ CmdArgs.push_back("-objcmt-migrate-literals");
+ CmdArgs.push_back("-objcmt-migrate-subscripting");
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting);
+ }
+ }
+
// Add preprocessing options like -I, -D, etc. if we are using the
// preprocessor.
//
// FIXME: Support -fpreprocessed
if (types::getPreprocessedType(InputType) != types::TY_INVALID)
- AddPreprocessingOptions(D, Args, CmdArgs, Output, Inputs);
+ AddPreprocessingOptions(C, D, Args, CmdArgs, Output, Inputs);
// Don't warn about "clang -c -DPIC -fPIC test.i" because libtool.m4 assumes
// that "The compiler can only warn and ignore the option if not recognized".
@@ -1611,11 +1980,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (ShouldDisableCFI(Args, getToolChain()))
CmdArgs.push_back("-fno-dwarf2-cfi-asm");
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_)) {
+ if (ShouldDisableDwarfDirectory(Args, getToolChain()))
+ CmdArgs.push_back("-fno-dwarf-directory-asm");
+
+ if (const char *pwd = ::getenv("PWD")) {
+ // GCC also verifies that stat(pwd) and stat(".") have the same inode
+ // number. Not doing those because stats are slow, but we could.
+ if (llvm::sys::path::is_absolute(pwd)) {
+ std::string CompDir = pwd;
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ CmdArgs.push_back(Args.MakeArgString(CompDir));
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
+ options::OPT_ftemplate_depth_EQ)) {
CmdArgs.push_back("-ftemplate-depth");
CmdArgs.push_back(A->getValue(Args));
}
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
+ CmdArgs.push_back("-fconstexpr-depth");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
options::OPT_Wlarge_by_value_copy_def)) {
CmdArgs.push_back("-Wlarge-by-value-copy");
@@ -1654,6 +2042,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue(Args));
}
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fconstexpr-backtrace-limit");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
// Pass -fmessage-length=.
CmdArgs.push_back("-fmessage-length");
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
@@ -1673,9 +2066,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
// -fhosted is default.
- if (KernelOrKext || Args.hasFlag(options::OPT_ffreestanding,
- options::OPT_fhosted,
- false))
+ if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) ||
+ KernelOrKext)
CmdArgs.push_back("-ffreestanding");
// Forward -f (flag) options which we can pass directly.
@@ -1684,9 +2076,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fformat_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_flimit_debug_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_limit_debug_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
+ Args.AddLastArg(CmdArgs, options::OPT_faltivec);
+
+ // Report and error for -faltivec on anything other then PowerPC.
+ if (const Arg *A = Args.getLastArg(options::OPT_faltivec))
+ if (!(getToolChain().getTriple().getArch() == llvm::Triple::ppc ||
+ getToolChain().getTriple().getArch() == llvm::Triple::ppc64))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "ppc/ppc64";
+
if (getToolChain().SupportsProfiling())
Args.AddLastArg(CmdArgs, options::OPT_pg);
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false))
+ CmdArgs.push_back("-faddress-sanitizer");
+
+ if (Args.hasFlag(options::OPT_fthread_sanitizer,
+ options::OPT_fno_thread_sanitizer, false))
+ CmdArgs.push_back("-fthread-sanitizer");
+
// -flax-vector-conversions is default.
if (!Args.hasFlag(options::OPT_flax_vector_conversions,
options::OPT_fno_lax_vector_conversions))
@@ -1706,12 +2117,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue(Args));
}
- // Forward -ftrap_function= options to the backend.
- if (Arg *A = Args.getLastArg(options::OPT_ftrap_function_EQ)) {
- StringRef FuncName = A->getValue(Args);
- CmdArgs.push_back("-backend-option");
- CmdArgs.push_back(Args.MakeArgString("-trap-func=" + FuncName));
- }
+ Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ);
// -fno-strict-overflow implies -fwrapv if it isn't disabled, but
// -fstrict-overflow won't turn off an explicitly enabled -fwrapv.
@@ -1748,10 +2154,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Translate -mstackrealign
- if (Args.hasArg(options::OPT_mstackrealign)) {
+ if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
+ false)) {
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-force-align-stack");
}
+ if (!Args.hasFlag(options::OPT_mno_stackrealign, options::OPT_mstackrealign,
+ false)) {
+ CmdArgs.push_back(Args.MakeArgString("-mstackrealign"));
+ }
+
+ if (Args.hasArg(options::OPT_mstack_alignment)) {
+ StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
+ CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment));
+ }
// Forward -f options with positive and negative forms; we translate
// these by hand.
@@ -1761,6 +2177,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fapple-kext");
if (!Args.hasArg(options::OPT_fbuiltin))
CmdArgs.push_back("-fno-builtin");
+ Args.ClaimAllArgs(options::OPT_fno_builtin);
}
// -fbuiltin is default.
else if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
@@ -1783,6 +2200,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fblocks-runtime-optional");
}
+ // -fmodules enables modules (off by default). However, for C++/Objective-C++,
+ // users must also pass -fcxx-modules. The latter flag will disappear once the
+ // modules implementation is solid for C++/Objective-C++ programs as well.
+ if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
+ bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
+ options::OPT_fno_cxx_modules,
+ false);
+ if (AllowedInCXX || !types::isCXX(InputType))
+ CmdArgs.push_back("-fmodules");
+ }
+
// -faccess-control is default.
if (Args.hasFlag(options::OPT_fno_access_control,
options::OPT_faccess_control,
@@ -1796,14 +2224,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-elide-constructors");
// -frtti is default.
- if (KernelOrKext ||
- !Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti))
+ if (!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti) ||
+ KernelOrKext)
CmdArgs.push_back("-fno-rtti");
- // -fshort-enums=0 is default.
- // FIXME: Are there targers where -fshort-enums is on by default ?
+ // -fshort-enums=0 is default for all architectures except Hexagon.
if (Args.hasFlag(options::OPT_fshort_enums,
- options::OPT_fno_short_enums, false))
+ options::OPT_fno_short_enums,
+ getToolChain().getTriple().getArch() ==
+ llvm::Triple::hexagon))
CmdArgs.push_back("-fshort-enums");
// -fsigned-char is default.
@@ -1817,10 +2246,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-threadsafe-statics");
// -fuse-cxa-atexit is default.
- if (KernelOrKext ||
- !Args.hasFlag(options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
- getToolChain().getTriple().getOS() != llvm::Triple::Cygwin &&
- getToolChain().getTriple().getOS() != llvm::Triple::MinGW32))
+ if (!Args.hasFlag(options::OPT_fuse_cxa_atexit,
+ options::OPT_fno_use_cxa_atexit,
+ getToolChain().getTriple().getOS() != llvm::Triple::Cygwin &&
+ getToolChain().getTriple().getOS() != llvm::Triple::MinGW32 &&
+ getToolChain().getTriple().getArch() != llvm::Triple::hexagon) ||
+ KernelOrKext)
CmdArgs.push_back("-fno-use-cxa-atexit");
// -fms-extensions=0 is default.
@@ -1829,8 +2260,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fms-extensions");
// -fms-compatibility=0 is default.
- if (Args.hasFlag(options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility,
- getToolChain().getTriple().getOS() == llvm::Triple::Win32))
+ if (Args.hasFlag(options::OPT_fms_compatibility,
+ options::OPT_fno_ms_compatibility,
+ (getToolChain().getTriple().getOS() == llvm::Triple::Win32 &&
+ Args.hasFlag(options::OPT_fms_extensions,
+ options::OPT_fno_ms_extensions,
+ true))))
CmdArgs.push_back("-fms-compatibility");
// -fmsc-version=1300 is default.
@@ -1868,11 +2303,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fgnu89-inline");
+ if (Args.hasArg(options::OPT_fno_inline))
+ CmdArgs.push_back("-fno-inline");
+
+ if (Args.hasArg(options::OPT_fno_inline_functions))
+ CmdArgs.push_back("-fno-inline-functions");
+
// -fobjc-nonfragile-abi=0 is default.
ObjCRuntime objCRuntime;
unsigned objcABIVersion = 0;
bool NeXTRuntimeIsDefault
- = (IsRewriter || getToolChain().getTriple().isOSDarwin());
+ = (IsRewriter || IsModernRewriter ||
+ getToolChain().getTriple().isOSDarwin());
if (Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
NeXTRuntimeIsDefault)) {
objCRuntime.setKind(ObjCRuntime::NeXT);
@@ -1906,8 +2348,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
} else {
// Otherwise, determine if we are using the non-fragile ABI.
- bool NonFragileABIIsDefault
- = (!IsRewriter && getToolChain().IsObjCNonFragileABIDefault());
+ bool NonFragileABIIsDefault =
+ (IsModernRewriter ||
+ (!IsRewriter && getToolChain().IsObjCNonFragileABIDefault()));
if (Args.hasFlag(options::OPT_fobjc_nonfragile_abi,
options::OPT_fno_objc_nonfragile_abi,
NonFragileABIIsDefault)) {
@@ -1950,10 +2393,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // -fobjc-default-synthesize-properties=0 is default.
- if (Args.hasFlag(options::OPT_fobjc_default_synthesize_properties,
- options::OPT_fno_objc_default_synthesize_properties,
- getToolChain().IsObjCDefaultSynthPropertiesDefault())) {
+ // -fobjc-default-synthesize-properties=1 is default. This only has an effect
+ // if the nonfragile objc abi is used.
+ if (getToolChain().IsObjCDefaultSynthPropertiesDefault()) {
CmdArgs.push_back("-fobjc-default-synthesize-properties");
}
@@ -1961,6 +2403,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// NOTE: This logic is duplicated in ToolChains.cpp.
bool ARC = isObjCAutoRefCount(Args);
if (ARC) {
+ if (!getToolChain().SupportsObjCARC())
+ D.Diag(diag::err_arc_unsupported);
+
CmdArgs.push_back("-fobjc-arc");
// FIXME: It seems like this entire block, and several around it should be
@@ -1983,7 +2428,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fobjc-infer-related-result-type is the default, except in the Objective-C
// rewriter.
- if (IsRewriter)
+ if (IsRewriter || IsModernRewriter)
CmdArgs.push_back("-fno-objc-infer-related-result-type");
// Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
@@ -2006,7 +2451,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add exception args.
addExceptionArgs(Args, InputType, getToolChain().getTriple(),
- KernelOrKext, IsRewriter, objcABIVersion, CmdArgs);
+ KernelOrKext, objcABIVersion, CmdArgs);
if (getToolChain().UseSjLjExceptions())
CmdArgs.push_back("-fsjlj-exceptions");
@@ -2057,6 +2502,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasArg(options::OPT_fapple_kext)) {
if (!Args.hasArg(options::OPT_fcommon))
CmdArgs.push_back("-fno-common");
+ Args.ClaimAllArgs(options::OPT_fno_common);
}
// -fcommon is default, only pass non-default.
@@ -2086,11 +2532,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_diagnostics_fixit_info))
CmdArgs.push_back("-fno-diagnostics-fixit-info");
- // Enable -fdiagnostics-show-name by default.
- if (Args.hasFlag(options::OPT_fdiagnostics_show_name,
- options::OPT_fno_diagnostics_show_name, false))
- CmdArgs.push_back("-fdiagnostics-show-name");
-
// Enable -fdiagnostics-show-option by default.
if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
options::OPT_fno_diagnostics_show_option))
@@ -2164,6 +2605,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
}
+ if (Args.hasFlag(options::OPT_fapple_pragma_pack,
+ options::OPT_fno_apple_pragma_pack, false))
+ CmdArgs.push_back("-fapple-pragma-pack");
+
// Default to -fno-builtin-str{cat,cpy} on Darwin for ARM.
//
// FIXME: This is disabled until clang -cc1 supports -fno-builtin-foo. PR4941.
@@ -2189,6 +2634,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_dM);
Args.AddLastArg(CmdArgs, options::OPT_dD);
+
+ // Handle serialized diagnostics.
+ if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
+ CmdArgs.push_back("-serialize-diagnostic-file");
+ CmdArgs.push_back(Args.MakeArgString(A->getValue(Args)));
+ }
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
@@ -2237,7 +2688,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
ie = Args.end(); it != ie; ++it)
(*it)->render(Args, OriginalArgs);
- llvm::SmallString<256> Flags;
+ SmallString<256> Flags;
Flags += Exec;
for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
Flags += " ";
@@ -2271,6 +2722,24 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_emit_llvm);
}
+void ClangAs::AddARMTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ // Set the CPU based on -march= and -mcpu=.
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+
+ // Honor -mfpu=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ addFPUArgs(D, A, Args, CmdArgs);
+
+ // Honor -mfpmath=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ))
+ addFPMathArgs(D, A, Args, CmdArgs, getARMTargetCPU(Args, Triple));
+}
+
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -2307,10 +2776,54 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
if (UseRelaxAll(C, Args))
CmdArgs.push_back("-relax-all");
+ // Add target specific cpu and features flags.
+ switch(getToolChain().getTriple().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddARMTargetArgs(Args, CmdArgs);
+ break;
+ }
+
// Ignore explicit -force_cpusubtype_ALL option.
(void) Args.hasArg(options::OPT_force__cpusubtype__ALL);
- // FIXME: Add -g support, once we have it.
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+
+ // Forward -g, assuming we are dealing with an actual assembly file.
+ if (SourceAction->getType() == types::TY_Asm ||
+ SourceAction->getType() == types::TY_PP_Asm) {
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
+ if (!A->getOption().matches(options::OPT_g0))
+ CmdArgs.push_back("-g");
+ }
+
+ // Optionally embed the -cc1as level arguments into the debug info, for build
+ // analysis.
+ if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it)
+ (*it)->render(Args, OriginalArgs);
+
+ SmallString<256> Flags;
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+ Flags += Exec;
+ for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
+ Flags += " ";
+ Flags += OriginalArgs[i];
+ }
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(Args.MakeArgString(Flags.str()));
+ }
// FIXME: Add -static support, once we have it.
@@ -2389,6 +2902,8 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fsyntax-only");
}
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
// Only pass -x if gcc will understand it; otherwise hope gcc
// understands the suffix correctly. The main use case this would go
@@ -2437,12 +2952,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
if (!customGCCName.empty())
GCCName = customGCCName.c_str();
else if (D.CCCIsCXX) {
-#ifdef IS_CYGWIN15
- // FIXME: Detect the version of Cygwin at runtime?
- GCCName = "g++-4";
-#else
GCCName = "g++";
-#endif
} else
GCCName = "gcc";
@@ -2488,6 +2998,155 @@ void gcc::Link::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
+// Hexagon tools start.
+void hexagon::Assemble::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+
+}
+void hexagon::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ std::string MarchString = "-march=";
+ MarchString += getHexagonTargetCPU(Args);
+ CmdArgs.push_back(Args.MakeArgString(MarchString));
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(clang::diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations. FIXME: Pranav: What is this ?
+ II.getInputArg().render(Args, CmdArgs);
+ }
+
+ const char *GCCName = "hexagon-as";
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+
+}
+void hexagon::Link::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ // The types are (hopefully) good enough.
+}
+
+void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().hasForwardToGCC()) {
+ // Don't forward any -g arguments to assembly steps.
+ if (isa<AssembleJobAction>(JA) &&
+ A->getOption().matches(options::OPT_g_Group))
+ continue;
+
+ // It is unfortunate that we have to claim here, as this means
+ // we will basically never report anything interesting for
+ // platforms using a generic gcc, even if we are just using gcc
+ // to get to the assembler.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ // Add Arch Information
+ Arg *A;
+ if ((A = getLastHexagonArchArg(Args))) {
+ if (A->getOption().matches(options::OPT_m_Joined))
+ A->render(Args, CmdArgs);
+ else
+ CmdArgs.push_back (Args.MakeArgString("-m" + getHexagonTargetCPU(Args)));
+ }
+ else {
+ CmdArgs.push_back (Args.MakeArgString("-m" + getHexagonTargetCPU(Args)));
+ }
+
+ CmdArgs.push_back("-mqdsp6-compat");
+
+ const char *GCCName;
+ if (C.getDriver().CCCIsCXX)
+ GCCName = "hexagon-g++";
+ else
+ GCCName = "hexagon-gcc";
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(clang::diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations. FIXME: Pranav: What is this ?
+ II.getInputArg().render(Args, CmdArgs);
+ }
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+
+}
+// Hexagon tools end.
+
+
const char *darwin::CC1::getCC1Name(types::ID Type) const {
switch (Type) {
default:
@@ -2510,6 +3169,8 @@ const char *darwin::CC1::getCC1Name(types::ID Type) const {
}
}
+void darwin::CC1::anchor() {}
+
const char *darwin::CC1::getBaseInputName(const ArgList &Args,
const InputInfoList &Inputs) {
return Args.MakeArgString(
@@ -2548,13 +3209,27 @@ void darwin::CC1::RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const {
StringRef Option = *it;
bool RemoveOption = false;
- // Remove -faltivec
- if (Option.equals("-faltivec")) {
- it = CmdArgs.erase(it);
+ // Erase both -fmodule-cache-path and its argument.
+ if (Option.equals("-fmodule-cache-path") && it+2 != ie) {
+ it = CmdArgs.erase(it, it+2);
ie = CmdArgs.end();
continue;
}
+ // Remove unsupported -f options.
+ if (Option.startswith("-f")) {
+ // Remove -f/-fno- to reduce the number of cases.
+ if (Option.startswith("-fno-"))
+ Option = Option.substr(5);
+ else
+ Option = Option.substr(2);
+ RemoveOption = llvm::StringSwitch<bool>(Option)
+ .Case("altivec", true)
+ .Case("modules", true)
+ .Case("diagnostics-show-note-include-stack", true)
+ .Default(false);
+ }
+
// Handle machine specific options.
if (Option.startswith("-m")) {
RemoveOption = llvm::StringSwitch<bool>(Option)
@@ -2595,6 +3270,7 @@ void darwin::CC1::RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const {
.Case("c++11-narrowing", true)
.Case("conditional-uninitialized", true)
.Case("constant-conversion", true)
+ .Case("conversion-null", true)
.Case("CFString-literal", true)
.Case("constant-logical-operand", true)
.Case("custom-atomic-properties", true)
@@ -3000,9 +3676,11 @@ void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA,
assert(Inputs.size() == 1 && "Unexpected number of inputs!");
+ // Silence warning about unused --serialize-diagnostics
+ Args.ClaimAllArgs(options::OPT__serialize_diags);
+
types::ID InputType = Inputs[0].getType();
- const Arg *A;
- if ((A = Args.getLastArg(options::OPT_traditional)))
+ if (const Arg *A = Args.getLastArg(options::OPT_traditional))
D.Diag(diag::err_drv_argument_only_allowed_with)
<< A->getAsString(Args) << "-E";
@@ -3119,7 +3797,7 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_gstabs))
CmdArgs.push_back("--gstabs");
else if (Args.hasArg(options::OPT_g_Group))
- CmdArgs.push_back("--gdwarf2");
+ CmdArgs.push_back("-g");
}
// Derived from asm spec.
@@ -3154,6 +3832,8 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
+void darwin::DarwinTool::anchor() {}
+
void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
ArgStringList &CmdArgs) const {
StringRef ArchName = getDarwinToolChain().getDarwinArchName(Args);
@@ -3185,13 +3865,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
// Newer linkers support -demangle, pass it if supported and not disabled by
// the user.
- //
- // FIXME: We temporarily avoid passing -demangle to any iOS linker, because
- // unfortunately we can't be guaranteed that the linker version used there
- // will match the linker version detected at configure time. We need the
- // universal driver.
- if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle) &&
- !DarwinTC.isTargetIPhoneOS()) {
+ if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) {
// Don't pass -demangle to ld_classic.
//
// FIXME: This is a temporary workaround, ld should be handling this.
@@ -3291,8 +3965,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- unsigned TargetVersion[3];
- DarwinTC.getTargetVersion(TargetVersion);
+ VersionTuple TargetVersion = DarwinTC.getTargetVersion();
// If we had an explicit -mios-simulator-version-min argument, honor that,
// otherwise use the traditional deployment targets. We can't just check the
@@ -3307,9 +3980,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
CmdArgs.push_back("-iphoneos_version_min");
else
CmdArgs.push_back("-macosx_version_min");
- CmdArgs.push_back(Args.MakeArgString(Twine(TargetVersion[0]) + "." +
- Twine(TargetVersion[1]) + "." +
- Twine(TargetVersion[2])));
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
Args.AddLastArg(CmdArgs, options::OPT_nomultidefs);
Args.AddLastArg(CmdArgs, options::OPT_multi__module);
@@ -3492,7 +4163,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lcrt1.o");
else if (getDarwinToolChain().isMacosxVersionLT(10, 6))
CmdArgs.push_back("-lcrt1.10.5.o");
- else
+ else if (getDarwinToolChain().isMacosxVersionLT(10, 8))
CmdArgs.push_back("-lcrt1.10.6.o");
// darwin_crt2 spec is empty.
@@ -3513,20 +4184,42 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
+ // If we're building a dynamic lib with -faddress-sanitizer, unresolved
+ // symbols may appear. Mark all of them as dynamic_lookup.
+ // Linking executables is handled in lib/Driver/ToolChains.cpp.
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false)) {
+ if (Args.hasArg(options::OPT_dynamiclib) ||
+ Args.hasArg(options::OPT_bundle)) {
+ CmdArgs.push_back("-undefined");
+ CmdArgs.push_back("dynamic_lookup");
+ }
+ }
+
if (Args.hasArg(options::OPT_fopenmp))
// This is more complicated in gcc...
CmdArgs.push_back("-lgomp");
getDarwinToolChain().AddLinkSearchPathArgs(Args, CmdArgs);
- // In ARC, if we don't have runtime support, link in the runtime
- // stubs. We have to do this *before* adding any of the normal
- // linker inputs so that its initializer gets run first.
- if (isObjCAutoRefCount(Args)) {
- ObjCRuntime runtime;
- getDarwinToolChain().configureObjCRuntime(runtime);
- if (!runtime.HasARC)
- getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
+ if (isObjCRuntimeLinked(Args)) {
+ // Avoid linking compatibility stubs on i386 mac.
+ if (!getDarwinToolChain().isTargetMacOS() ||
+ getDarwinToolChain().getArchName() != "i386") {
+ // If we don't have ARC or subscripting runtime support, link in the
+ // runtime stubs. We have to do this *before* adding any of the normal
+ // linker inputs so that its initializer gets run first.
+ ObjCRuntime runtime;
+ getDarwinToolChain().configureObjCRuntime(runtime);
+ // We use arclite library for both ARC and subscripting support.
+ if ((!runtime.HasARC && isObjCAutoRefCount(Args)) ||
+ !runtime.HasSubscripting)
+ getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Foundation");
+ }
+ // Link libobj.
+ CmdArgs.push_back("-lobjc");
}
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
@@ -3557,8 +4250,6 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// endfile_spec is empty.
}
- addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
-
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_F);
@@ -3618,6 +4309,9 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
ArgStringList CmdArgs;
CmdArgs.push_back("--verify");
+ CmdArgs.push_back("--debug-info");
+ CmdArgs.push_back("--eh-frame");
+ CmdArgs.push_back("--quiet");
assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
const InputInfo &Input = Inputs[0];
@@ -3631,6 +4325,137 @@ void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
+void solaris::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+
+void solaris::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ // FIXME: Find a real GCC, don't hard-code versions here
+ std::string GCCLibPath = "/usr/gcc/4.5/lib/gcc/";
+ const llvm::Triple &T = getToolChain().getTriple();
+ std::string LibPath = "/usr/lib/";
+ llvm::Triple::ArchType Arch = T.getArch();
+ switch (Arch) {
+ case llvm::Triple::x86:
+ GCCLibPath += ("i386-" + T.getVendorName() + "-" +
+ T.getOSName()).str() + "/4.5.2/";
+ break;
+ case llvm::Triple::x86_64:
+ GCCLibPath += ("i386-" + T.getVendorName() + "-" +
+ T.getOSName()).str();
+ GCCLibPath += "/4.5.2/amd64/";
+ LibPath += "amd64/";
+ break;
+ default:
+ assert(0 && "Unsupported architecture");
+ }
+
+ ArgStringList CmdArgs;
+
+ // Demangle C++ names in errors
+ CmdArgs.push_back("-C");
+
+ if ((!Args.hasArg(options::OPT_nostdlib)) &&
+ (!Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("_start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ CmdArgs.push_back("-dn");
+ } else {
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("--dynamic-linker");
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "ld.so.1"));
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crt1.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
+ }
+ if (getToolChain().getDriver().CCCIsCXX)
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "cxa_finalize.o"));
+ }
+
+ CmdArgs.push_back(Args.MakeArgString("-L" + GCCLibPath));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (getToolChain().getDriver().CCCIsCXX)
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lgcc_s");
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lm");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtend.o"));
+ }
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crtn.o"));
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
void auroraux::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -3806,8 +4631,12 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(
- getToolChain().GetFilePath("crt0.o")));
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("gcrt0.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
CmdArgs.push_back(Args.MakeArgString(
getToolChain().GetFilePath("crtbegin.o")));
} else {
@@ -3832,7 +4661,10 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
- CmdArgs.push_back("-lm");
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
}
// FIXME: For some reason GCC passes -lgcc before adding
@@ -3841,8 +4673,12 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
- if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lc_p");
+ else
+ CmdArgs.push_back("-lc");
+ }
CmdArgs.push_back("-lgcc");
}
@@ -4062,11 +4898,9 @@ void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on NetBSD/amd64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- if (ToolTriple.getArch() == llvm::Triple::x86_64 &&
- getToolChain().getArch() == llvm::Triple::x86)
+ if (getToolChain().getArch() == llvm::Triple::x86)
CmdArgs.push_back("--32");
-
// Set byte order explicitly
if (getToolChain().getArchName() == "mips")
CmdArgs.push_back("-EB");
@@ -4116,8 +4950,7 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on NetBSD/amd64, we have to explicitly
// instruct ld in the base system to link 32-bit code.
- if (ToolTriple.getArch() == llvm::Triple::x86_64 &&
- getToolChain().getArch() == llvm::Triple::x86) {
+ if (getToolChain().getArch() == llvm::Triple::x86) {
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386");
}
@@ -4218,12 +5051,49 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--32");
} else if (getToolChain().getArch() == llvm::Triple::x86_64) {
CmdArgs.push_back("--64");
+ } else if (getToolChain().getArch() == llvm::Triple::ppc) {
+ CmdArgs.push_back("-a32");
+ CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-many");
+ } else if (getToolChain().getArch() == llvm::Triple::ppc64) {
+ CmdArgs.push_back("-a64");
+ CmdArgs.push_back("-mppc64");
+ CmdArgs.push_back("-many");
} else if (getToolChain().getArch() == llvm::Triple::arm) {
StringRef MArch = getToolChain().getArchName();
if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
CmdArgs.push_back("-mfpu=neon");
+ } else if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mipsel ||
+ getToolChain().getArch() == llvm::Triple::mips64 ||
+ getToolChain().getArch() == llvm::Triple::mips64el) {
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ // Convert ABI name to the GNU tools acceptable variant.
+ if (ABIName == "o32")
+ ABIName = "32";
+ else if (ABIName == "n64")
+ ABIName = "64";
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(ABIName.data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
}
+ Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
options::OPT_Xassembler);
@@ -4241,6 +5111,30 @@ void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
+static void AddLibgcc(const Driver &D, ArgStringList &CmdArgs,
+ const ArgList &Args) {
+ bool StaticLibgcc = Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_static_libgcc);
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+
+ if (StaticLibgcc) {
+ if (D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+ } else {
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (StaticLibgcc)
+ CmdArgs.push_back("-lgcc_eh");
+ else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+}
+
void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -4290,6 +5184,14 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf32ppclinux");
else if (ToolChain.getArch() == llvm::Triple::ppc64)
CmdArgs.push_back("elf64ppc");
+ else if (ToolChain.getArch() == llvm::Triple::mips)
+ CmdArgs.push_back("elf32btsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("elf32ltsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("elf64btsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mips64el)
+ CmdArgs.push_back("elf64ltsmip");
else
CmdArgs.push_back("elf_x86_64");
@@ -4313,6 +5215,12 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
else if (ToolChain.getArch() == llvm::Triple::arm ||
ToolChain.getArch() == llvm::Triple::thumb)
CmdArgs.push_back("/lib/ld-linux.so.3");
+ else if (ToolChain.getArch() == llvm::Triple::mips ||
+ ToolChain.getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("/lib/ld.so.1");
+ else if (ToolChain.getArch() == llvm::Triple::mips64 ||
+ ToolChain.getArch() == llvm::Triple::mips64el)
+ CmdArgs.push_back("/lib64/ld.so.1");
else if (ToolChain.getArch() == llvm::Triple::ppc)
CmdArgs.push_back("/lib/ld.so.1");
else if (ToolChain.getArch() == llvm::Triple::ppc64)
@@ -4356,35 +5264,36 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
i != e; ++i)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + *i));
+ // Tell the linker to load the plugin. This has to come before AddLinkerInputs
+ // as gold requires -plugin to come before any -plugin-opt that -Wl might
+ // forward.
+ if (D.IsUsingLTO(Args) || Args.hasArg(options::OPT_use_gold_plugin)) {
+ CmdArgs.push_back("-plugin");
+ std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+ }
+
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
if (D.CCCIsCXX && !Args.hasArg(options::OPT_nostdlib)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
CmdArgs.push_back("-lm");
}
+ // Call this before we add the C run-time.
+ addAsanRTLinux(getToolChain(), Args, CmdArgs);
+
if (!Args.hasArg(options::OPT_nostdlib)) {
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--start-group");
- if (!D.CCCIsCXX)
- CmdArgs.push_back("-lgcc");
-
- if (Args.hasArg(options::OPT_static)) {
- if (D.CCCIsCXX)
- CmdArgs.push_back("-lgcc");
- } else {
- if (!D.CCCIsCXX)
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lgcc_s");
- if (!D.CCCIsCXX)
- CmdArgs.push_back("--no-as-needed");
- }
-
- if (Args.hasArg(options::OPT_static))
- CmdArgs.push_back("-lgcc_eh");
- else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
- CmdArgs.push_back("-lgcc");
+ AddLibgcc(D, CmdArgs, Args);
if (Args.hasArg(options::OPT_pthread) ||
Args.hasArg(options::OPT_pthreads))
@@ -4394,19 +5303,8 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--end-group");
- else {
- if (!D.CCCIsCXX)
- CmdArgs.push_back("-lgcc");
-
- if (!D.CCCIsCXX)
- CmdArgs.push_back("--as-needed");
- CmdArgs.push_back("-lgcc_s");
- if (!D.CCCIsCXX)
- CmdArgs.push_back("--no-as-needed");
-
- if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
- CmdArgs.push_back("-lgcc");
- }
+ else
+ AddLibgcc(D, CmdArgs, Args);
if (!Args.hasArg(options::OPT_nostartfiles)) {
@@ -4423,12 +5321,6 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
- if (Args.hasArg(options::OPT_use_gold_plugin)) {
- CmdArgs.push_back("-plugin");
- std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
- CmdArgs.push_back(Args.MakeArgString(Plugin));
- }
-
C.addCommand(new Command(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
}
@@ -4452,7 +5344,7 @@ void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("gas"));
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -4472,9 +5364,12 @@ void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nostartfiles))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
- "/usr/gnu/lib/crtso.o")));
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ }
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
@@ -4482,33 +5377,28 @@ void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX) {
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
-
- if (Args.hasArg(options::OPT_pthread))
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lgcc");
- CmdArgs.push_back("-L/usr/gnu/lib");
- // FIXME: fill in the correct search path for the final
- // support libraries.
- CmdArgs.push_back("-L/usr/gnu/lib/gcc/i686-pc-minix/4.4.3");
}
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
- "/usr/gnu/lib/libend.a")));
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lCompilerRT-Generic");
+ CmdArgs.push_back("-L/usr/pkg/compiler-rt/lib");
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
-
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("/usr/gnu/bin/gld"));
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("ld"));
C.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
index a4f732e..651a8f2 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.h
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -29,7 +29,8 @@ namespace tools {
/// \brief Clang compiler tool.
class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
- void AddPreprocessingOptions(const Driver &D,
+ void AddPreprocessingOptions(Compilation &C,
+ const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfo &Output,
@@ -40,6 +41,7 @@ namespace tools {
void AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddSparcTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddHexagonTargetArgs (const ArgList &Args, ArgStringList &CmdArgs) const;
public:
Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
@@ -57,6 +59,7 @@ namespace tools {
/// \brief Clang integrated assembler tool.
class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
+ void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
ClangAs(const ToolChain &TC) : Tool("clang::as",
"clang integrated assembler", TC) {}
@@ -145,14 +148,54 @@ namespace gcc {
"linker (via gcc)", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void RenderExtraToolArgs(const JobAction &JA,
ArgStringList &CmdArgs) const;
};
} // end namespace gcc
+namespace hexagon {
+ // For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile and Compile.
+ // We simply use "clang -cc1" for those actions.
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("hexagon::Assemble",
+ "hexagon-as", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("hexagon::Link",
+ "hexagon-ld", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace hexagon.
+
+
namespace darwin {
class LLVM_LIBRARY_VISIBILITY DarwinTool : public Tool {
+ virtual void anchor();
protected:
void AddDarwinArch(const ArgList &Args, ArgStringList &CmdArgs) const;
@@ -166,6 +209,7 @@ namespace darwin {
};
class LLVM_LIBRARY_VISIBILITY CC1 : public DarwinTool {
+ virtual void anchor();
public:
static const char *getBaseInputName(const ArgList &Args,
const InputInfoList &Input);
@@ -243,6 +287,7 @@ namespace darwin {
Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -314,6 +359,7 @@ namespace openbsd {
Link(const ToolChain &TC) : Tool("openbsd::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -343,6 +389,7 @@ namespace freebsd {
Link(const ToolChain &TC) : Tool("freebsd::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -355,12 +402,10 @@ namespace freebsd {
/// netbsd -- Directly call GNU Binutils assembler and linker
namespace netbsd {
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
- private:
- const llvm::Triple ToolTriple;
public:
- Assemble(const ToolChain &TC, const llvm::Triple &ToolTriple)
- : Tool("netbsd::Assemble", "assembler", TC), ToolTriple(ToolTriple) {}
+ Assemble(const ToolChain &TC)
+ : Tool("netbsd::Assemble", "assembler", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
@@ -371,14 +416,13 @@ namespace netbsd {
const char *LinkingOutput) const;
};
class LLVM_LIBRARY_VISIBILITY Link : public Tool {
- private:
- const llvm::Triple ToolTriple;
public:
- Link(const ToolChain &TC, const llvm::Triple &ToolTriple)
- : Tool("netbsd::Link", "linker", TC), ToolTriple(ToolTriple) {}
+ Link(const ToolChain &TC)
+ : Tool("netbsd::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -408,6 +452,7 @@ namespace linuxtools {
Link(const ToolChain &TC) : Tool("linux::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -436,6 +481,7 @@ namespace minix {
Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -445,6 +491,36 @@ namespace minix {
};
} // end namespace minix
+ /// solaris -- Directly call Solaris assembler and linker
+namespace solaris {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("solaris::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("solaris::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace solaris
+
/// auroraux -- Directly call GNU Binutils assembler and linker
namespace auroraux {
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
@@ -465,6 +541,7 @@ namespace auroraux {
Link(const ToolChain &TC) : Tool("auroraux::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -494,6 +571,7 @@ namespace dragonfly {
Link(const ToolChain &TC) : Tool("dragonfly::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -510,6 +588,7 @@ namespace visualstudio {
Link(const ToolChain &TC) : Tool("visualstudio::Link", "linker", TC) {}
virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
virtual void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
index d61ab68..50742fe 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -102,6 +102,7 @@ bool types::isOnlyAcceptedByClang(ID Id) {
case TY_LLVM_IR:
case TY_LLVM_BC:
case TY_RewrittenObjC:
+ case TY_RewrittenLegacyObjC:
return true;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
new file mode 100644
index 0000000..6827034
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
@@ -0,0 +1,368 @@
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "clang/Basic/Version.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Path.h"
+
+// Include the necessary headers to interface with the Windows registry and
+// environment.
+#ifdef _MSC_VER
+ #define WIN32_LEAN_AND_MEAN
+ #define NOGDI
+ #define NOMINMAX
+ #include <Windows.h>
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+
+Windows::Windows(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
+}
+
+Tool &Windows::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ case Action::LipoJobClass:
+ case Action::DsymutilJobClass:
+ case Action::VerifyJobClass:
+ llvm_unreachable("Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ case Action::PrecompileJobClass:
+ case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
+ case Action::CompileJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::AssembleJobClass:
+ if (!UseIntegratedAs && getTriple().getEnvironment() == llvm::Triple::MachO)
+ T = new tools::darwin::Assemble(*this);
+ else
+ T = new tools::ClangAs(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::visualstudio::Link(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+bool Windows::IsIntegratedAssemblerDefault() const {
+ return true;
+}
+
+bool Windows::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Windows::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Windows::GetForcedPicModel() const {
+ if (getArchName() == "x86_64")
+ return "pic";
+ return 0;
+}
+
+// FIXME: This probably should goto to some platform utils place.
+#ifdef _MSC_VER
+
+/// \brief Read registry string.
+/// This also supports a means to look for high-versioned keys by use
+/// of a $VERSION placeholder in the key path.
+/// $VERSION in the key path is a placeholder for the version number,
+/// causing the highest value path to be searched for and used.
+/// I.e. "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
+/// There can be additional characters in the component. Only the numberic
+/// characters are compared.
+static bool getSystemRegistryString(const char *keyPath, const char *valueName,
+ char *value, size_t maxLength) {
+ HKEY hRootKey = NULL;
+ HKEY hKey = NULL;
+ const char* subKey = NULL;
+ DWORD valueType;
+ DWORD valueSize = maxLength - 1;
+ long lResult;
+ bool returnValue = false;
+
+ if (strncmp(keyPath, "HKEY_CLASSES_ROOT\\", 18) == 0) {
+ hRootKey = HKEY_CLASSES_ROOT;
+ subKey = keyPath + 18;
+ } else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
+ hRootKey = HKEY_USERS;
+ subKey = keyPath + 11;
+ } else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
+ hRootKey = HKEY_LOCAL_MACHINE;
+ subKey = keyPath + 19;
+ } else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
+ hRootKey = HKEY_CURRENT_USER;
+ subKey = keyPath + 18;
+ } else {
+ return false;
+ }
+
+ const char *placeHolder = strstr(subKey, "$VERSION");
+ char bestName[256];
+ bestName[0] = '\0';
+ // If we have a $VERSION placeholder, do the highest-version search.
+ if (placeHolder) {
+ const char *keyEnd = placeHolder - 1;
+ const char *nextKey = placeHolder;
+ // Find end of previous key.
+ while ((keyEnd > subKey) && (*keyEnd != '\\'))
+ keyEnd--;
+ // Find end of key containing $VERSION.
+ while (*nextKey && (*nextKey != '\\'))
+ nextKey++;
+ size_t partialKeyLength = keyEnd - subKey;
+ char partialKey[256];
+ if (partialKeyLength > sizeof(partialKey))
+ partialKeyLength = sizeof(partialKey);
+ strncpy(partialKey, subKey, partialKeyLength);
+ partialKey[partialKeyLength] = '\0';
+ HKEY hTopKey = NULL;
+ lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ, &hTopKey);
+ if (lResult == ERROR_SUCCESS) {
+ char keyName[256];
+ int bestIndex = -1;
+ double bestValue = 0.0;
+ DWORD index, size = sizeof(keyName) - 1;
+ for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
+ NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
+ const char *sp = keyName;
+ while (*sp && !isdigit(*sp))
+ sp++;
+ if (!*sp)
+ continue;
+ const char *ep = sp + 1;
+ while (*ep && (isdigit(*ep) || (*ep == '.')))
+ ep++;
+ char numBuf[32];
+ strncpy(numBuf, sp, sizeof(numBuf) - 1);
+ numBuf[sizeof(numBuf) - 1] = '\0';
+ double value = strtod(numBuf, NULL);
+ if (value > bestValue) {
+ bestIndex = (int)index;
+ bestValue = value;
+ strcpy(bestName, keyName);
+ }
+ size = sizeof(keyName) - 1;
+ }
+ // If we found the highest versioned key, open the key and get the value.
+ if (bestIndex != -1) {
+ // Append rest of key.
+ strncat(bestName, nextKey, sizeof(bestName) - 1);
+ bestName[sizeof(bestName) - 1] = '\0';
+ // Open the chosen key path remainder.
+ lResult = RegOpenKeyEx(hTopKey, bestName, 0, KEY_READ, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
+ (LPBYTE)value, &valueSize);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ RegCloseKey(hKey);
+ }
+ }
+ RegCloseKey(hTopKey);
+ }
+ } else {
+ lResult = RegOpenKeyEx(hRootKey, subKey, 0, KEY_READ, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
+ (LPBYTE)value, &valueSize);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ RegCloseKey(hKey);
+ }
+ }
+ return returnValue;
+}
+
+/// \brief Get Windows SDK installation directory.
+static bool getWindowsSDKDir(std::string &path) {
+ char windowsSDKInstallDir[256];
+ // Try the Windows registry.
+ bool hasSDKDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
+ "InstallationFolder",
+ windowsSDKInstallDir,
+ sizeof(windowsSDKInstallDir) - 1);
+ // If we have both vc80 and vc90, pick version we were compiled with.
+ if (hasSDKDir && windowsSDKInstallDir[0]) {
+ path = windowsSDKInstallDir;
+ return true;
+ }
+ return false;
+}
+
+ // Get Visual Studio installation directory.
+static bool getVisualStudioDir(std::string &path) {
+ // First check the environment variables that vsvars32.bat sets.
+ const char* vcinstalldir = getenv("VCINSTALLDIR");
+ if (vcinstalldir) {
+ char *p = const_cast<char *>(strstr(vcinstalldir, "\\VC"));
+ if (p)
+ *p = '\0';
+ path = vcinstalldir;
+ return true;
+ }
+
+ char vsIDEInstallDir[256];
+ char vsExpressIDEInstallDir[256];
+ // Then try the windows registry.
+ bool hasVCDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
+ "InstallDir", vsIDEInstallDir, sizeof(vsIDEInstallDir) - 1);
+ bool hasVCExpressDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
+ "InstallDir", vsExpressIDEInstallDir, sizeof(vsExpressIDEInstallDir) - 1);
+ // If we have both vc80 and vc90, pick version we were compiled with.
+ if (hasVCDir && vsIDEInstallDir[0]) {
+ char *p = (char*)strstr(vsIDEInstallDir, "\\Common7\\IDE");
+ if (p)
+ *p = '\0';
+ path = vsIDEInstallDir;
+ return true;
+ }
+
+ if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
+ char *p = (char*)strstr(vsExpressIDEInstallDir, "\\Common7\\IDE");
+ if (p)
+ *p = '\0';
+ path = vsExpressIDEInstallDir;
+ return true;
+ }
+
+ // Try the environment.
+ const char *vs100comntools = getenv("VS100COMNTOOLS");
+ const char *vs90comntools = getenv("VS90COMNTOOLS");
+ const char *vs80comntools = getenv("VS80COMNTOOLS");
+ const char *vscomntools = NULL;
+
+ // Try to find the version that we were compiled with
+ if(false) {}
+ #if (_MSC_VER >= 1600) // VC100
+ else if(vs100comntools) {
+ vscomntools = vs100comntools;
+ }
+ #elif (_MSC_VER == 1500) // VC80
+ else if(vs90comntools) {
+ vscomntools = vs90comntools;
+ }
+ #elif (_MSC_VER == 1400) // VC80
+ else if(vs80comntools) {
+ vscomntools = vs80comntools;
+ }
+ #endif
+ // Otherwise find any version we can
+ else if (vs100comntools)
+ vscomntools = vs100comntools;
+ else if (vs90comntools)
+ vscomntools = vs90comntools;
+ else if (vs80comntools)
+ vscomntools = vs80comntools;
+
+ if (vscomntools && *vscomntools) {
+ const char *p = strstr(vscomntools, "\\Common7\\Tools");
+ path = p ? std::string(vscomntools, p) : vscomntools;
+ return true;
+ }
+ return false;
+}
+
+#endif // _MSC_VER
+
+void Windows::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+#ifdef _MSC_VER
+ // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
+ if (const char *cl_include_dir = getenv("INCLUDE")) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(cl_include_dir).split(Dirs, ";");
+ int n = 0;
+ for (SmallVectorImpl<StringRef>::iterator I = Dirs.begin(), E = Dirs.end();
+ I != E; ++I) {
+ StringRef d = *I;
+ if (d.size() == 0)
+ continue;
+ ++n;
+ addSystemInclude(DriverArgs, CC1Args, d);
+ }
+ if (n) return;
+ }
+
+ std::string VSDir;
+ std::string WindowsSDKDir;
+
+ // When built with access to the proper Windows APIs, try to actually find
+ // the correct include paths first.
+ if (getVisualStudioDir(VSDir)) {
+ addSystemInclude(DriverArgs, CC1Args, VSDir + "\\VC\\include");
+ if (getWindowsSDKDir(WindowsSDKDir))
+ addSystemInclude(DriverArgs, CC1Args, WindowsSDKDir + "\\include");
+ else
+ addSystemInclude(DriverArgs, CC1Args,
+ VSDir + "\\VC\\PlatformSDK\\Include");
+ return;
+ }
+#endif // _MSC_VER
+
+ // As a fallback, select default install paths.
+ const StringRef Paths[] = {
+ "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
+ };
+ addSystemIncludes(DriverArgs, CC1Args, Paths);
+}
+
+void Windows::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // FIXME: There should probably be logic here to find libc++ on Windows.
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/Commit.cpp b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
new file mode 100644
index 0000000..c45ee1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
@@ -0,0 +1,345 @@
+//===----- Commit.cpp - A unit of edits -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace edit;
+
+SourceLocation Commit::Edit::getFileLocation(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(Offset.getFID());
+ Loc = Loc.getLocWithOffset(Offset.getOffset());
+ assert(Loc.isFileID());
+ return Loc;
+}
+
+CharSourceRange Commit::Edit::getFileRange(SourceManager &SM) const {
+ SourceLocation Loc = getFileLocation(SM);
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+CharSourceRange Commit::Edit::getInsertFromRange(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(InsertFromRangeOffs.getFID());
+ Loc = Loc.getLocWithOffset(InsertFromRangeOffs.getOffset());
+ assert(Loc.isFileID());
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+Commit::Commit(EditedSource &Editor)
+ : SourceMgr(Editor.getSourceManager()), LangOpts(Editor.getLangOpts()),
+ PPRec(Editor.getPreprocessingRecord()),
+ Editor(&Editor), IsCommitable(true) { }
+
+bool Commit::insert(SourceLocation loc, StringRef text,
+ bool afterToken, bool beforePreviousInsertions) {
+ if (text.empty())
+ return true;
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsert(loc, Offs, text, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::insertFromRange(SourceLocation loc,
+ CharSourceRange range,
+ bool afterToken, bool beforePreviousInsertions) {
+ FileOffset RangeOffs;
+ unsigned RangeLen;
+ if (!canRemoveRange(range, RangeOffs, RangeLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ if (PPRec &&
+ PPRec->areInDifferentConditionalDirectiveRegion(loc, range.getBegin())) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsertFromRange(loc, Offs, RangeOffs, RangeLen, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::remove(CharSourceRange range) {
+ FileOffset Offs;
+ unsigned Len;
+ if (!canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ return true;
+}
+
+bool Commit::insertWrap(StringRef before, CharSourceRange range,
+ StringRef after) {
+ bool commitableBefore = insert(range.getBegin(), before, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ bool commitableAfter;
+ if (range.isTokenRange())
+ commitableAfter = insertAfterToken(range.getEnd(), after);
+ else
+ commitableAfter = insert(range.getEnd(), after);
+
+ return commitableBefore && commitableAfter;
+}
+
+bool Commit::replace(CharSourceRange range, StringRef text) {
+ if (text.empty())
+ return remove(range);
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canInsert(range.getBegin(), Offs) || !canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ addInsert(range.getBegin(), Offs, text, false);
+ return true;
+}
+
+bool Commit::replaceWithInner(CharSourceRange range,
+ CharSourceRange replacementRange) {
+ FileOffset OuterBegin;
+ unsigned OuterLen;
+ if (!canRemoveRange(range, OuterBegin, OuterLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset InnerBegin;
+ unsigned InnerLen;
+ if (!canRemoveRange(replacementRange, InnerBegin, InnerLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset OuterEnd = OuterBegin.getWithOffset(OuterLen);
+ FileOffset InnerEnd = InnerBegin.getWithOffset(InnerLen);
+ if (OuterBegin.getFID() != InnerBegin.getFID() ||
+ InnerBegin < OuterBegin ||
+ InnerBegin > OuterEnd ||
+ InnerEnd > OuterEnd) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(),
+ OuterBegin, InnerBegin.getOffset() - OuterBegin.getOffset());
+ addRemove(replacementRange.getEnd(),
+ InnerEnd, OuterEnd.getOffset() - InnerEnd.getOffset());
+ return true;
+}
+
+bool Commit::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ if (text.empty() || replacementText.empty())
+ return true;
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canReplaceText(loc, replacementText, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(loc, Offs, Len);
+ addInsert(loc, Offs, text, false);
+ return true;
+}
+
+void Commit::addInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (text.empty())
+ return;
+
+ Edit data;
+ data.Kind = Act_Insert;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Text = text;
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset RangeOffs, unsigned RangeLen,
+ bool beforePreviousInsertions) {
+ if (RangeLen == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_InsertFromRange;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.InsertFromRangeOffs = RangeOffs;
+ data.Length = RangeLen;
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addRemove(SourceLocation OrigLoc,
+ FileOffset Offs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_Remove;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Length = Len;
+ CachedEdits.push_back(data);
+}
+
+bool Commit::canInsert(SourceLocation loc, FileOffset &offs) {
+ if (loc.isInvalid())
+ return false;
+
+ if (loc.isMacroID())
+ isAtStartOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtStartOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertAfterToken(SourceLocation loc, FileOffset &offs,
+ SourceLocation &AfterLoc) {
+ if (loc.isInvalid())
+
+ return false;
+
+ SourceLocation spellLoc = SourceMgr.getSpellingLoc(loc);
+ unsigned tokLen = Lexer::MeasureTokenLength(spellLoc, SourceMgr, LangOpts);
+ AfterLoc = loc.getLocWithOffset(tokLen);
+
+ if (loc.isMacroID())
+ isAtEndOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtEndOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ loc = Lexer::getLocForEndOfToken(loc, 0, SourceMgr, LangOpts);
+ if (loc.isInvalid())
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ for (unsigned i = 0, e = CachedEdits.size(); i != e; ++i) {
+ Edit &act = CachedEdits[i];
+ if (act.Kind == Act_Remove) {
+ if (act.Offset.getFID() == Offs.getFID() &&
+ Offs > act.Offset && Offs < act.Offset.getWithOffset(act.Length))
+ return false; // position has been removed.
+ }
+ }
+
+ if (!Editor)
+ return true;
+ return Editor->canInsertInOffset(OrigLoc, Offs);
+}
+
+bool Commit::canRemoveRange(CharSourceRange range,
+ FileOffset &Offs, unsigned &Len) {
+ const SourceManager &SM = SourceMgr;
+ range = Lexer::makeFileCharRange(range, SM, LangOpts);
+ if (range.isInvalid())
+ return false;
+
+ if (range.getBegin().isMacroID() || range.getEnd().isMacroID())
+ return false;
+ if (SM.isInSystemHeader(range.getBegin()) ||
+ SM.isInSystemHeader(range.getEnd()))
+ return false;
+
+ if (PPRec && PPRec->rangeIntersectsConditionalDirective(range.getAsRange()))
+ return false;
+
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(range.getBegin());
+ std::pair<FileID, unsigned> endInfo = SM.getDecomposedLoc(range.getEnd());
+ if (beginInfo.first != endInfo.first ||
+ beginInfo.second > endInfo.second)
+ return false;
+
+ Offs = FileOffset(beginInfo.first, beginInfo.second);
+ Len = endInfo.second - beginInfo.second;
+ return true;
+}
+
+bool Commit::canReplaceText(SourceLocation loc, StringRef text,
+ FileOffset &Offs, unsigned &Len) {
+ assert(!text.empty());
+
+ if (!canInsert(loc, Offs))
+ return false;
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SourceMgr.getBufferData(Offs.getFID(), &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ return file.substr(Offs.getOffset()).startswith(text);
+}
+
+bool Commit::isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin) const {
+ return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts, MacroBegin);
+}
+bool Commit::isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd) const {
+ return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
new file mode 100644
index 0000000..5b7fa4a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
@@ -0,0 +1,329 @@
+//===----- EditedSource.cpp - Collection of source edits ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+
+using namespace clang;
+using namespace edit;
+
+void EditsReceiver::remove(CharSourceRange range) {
+ replace(range, StringRef());
+}
+
+StringRef EditedSource::copyString(const Twine &twine) {
+ llvm::SmallString<128> Data;
+ return copyString(twine.toStringRef(Data));
+}
+
+bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ FileEditsTy::iterator FA = getActionForOffset(Offs);
+ if (FA != FileEdits.end()) {
+ if (FA->first != Offs)
+ return false; // position has been removed.
+ }
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ SourceLocation
+ DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
+ SourceLocation
+ ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ llvm::DenseMap<unsigned, SourceLocation>::iterator
+ I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
+ if (I != ExpansionToArgMap.end() && I->second != DefArgLoc)
+ return false; // Trying to write in a macro argument input that has
+ // already been written for another argument of the same macro.
+ }
+
+ return true;
+}
+
+bool EditedSource::commitInsert(SourceLocation OrigLoc,
+ FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (!canInsertInOffset(OrigLoc, Offs))
+ return false;
+ if (text.empty())
+ return true;
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ SourceLocation
+ DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
+ SourceLocation
+ ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ ExpansionToArgMap[ExpLoc.getRawEncoding()] = DefArgLoc;
+ }
+
+ FileEdit &FA = FileEdits[Offs];
+ if (FA.Text.empty()) {
+ FA.Text = copyString(text);
+ return true;
+ }
+
+ Twine concat;
+ if (beforePreviousInsertions)
+ concat = Twine(text) + FA.Text;
+ else
+ concat = Twine(FA.Text) + text;
+
+ FA.Text = copyString(concat);
+ return true;
+}
+
+bool EditedSource::commitInsertFromRange(SourceLocation OrigLoc,
+ FileOffset Offs,
+ FileOffset InsertFromRangeOffs, unsigned Len,
+ bool beforePreviousInsertions) {
+ if (Len == 0)
+ return true;
+
+ llvm::SmallString<128> StrVec;
+ FileOffset BeginOffs = InsertFromRangeOffs;
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < E) {
+ if (BeginOffs >= B) {
+ BeginOffs = E;
+ ++I;
+ }
+ break;
+ }
+ }
+
+ for (; I != FileEdits.end() && EndOffs > I->first; ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < B) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, B, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+ StrVec += FA.Text;
+ BeginOffs = E;
+ }
+
+ if (BeginOffs < EndOffs) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, EndOffs, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+
+ return commitInsert(OrigLoc, Offs, StrVec.str(), beforePreviousInsertions);
+}
+
+void EditedSource::commitRemove(SourceLocation OrigLoc,
+ FileOffset BeginOffs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < E)
+ break;
+ }
+
+ FileOffset TopBegin, TopEnd;
+ FileEdit *TopFA = 0;
+
+ if (I == FileEdits.end()) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ NewI->second.RemoveLen = Len;
+ return;
+ }
+
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (BeginOffs < B) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ TopBegin = BeginOffs;
+ TopEnd = EndOffs;
+ TopFA = &NewI->second;
+ TopFA->RemoveLen = Len;
+ } else {
+ TopBegin = B;
+ TopEnd = E;
+ TopFA = &I->second;
+ if (TopEnd >= EndOffs)
+ return;
+ unsigned diff = EndOffs.getOffset() - TopEnd.getOffset();
+ TopEnd = EndOffs;
+ TopFA->RemoveLen += diff;
+ ++I;
+ }
+
+ while (I != FileEdits.end()) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (B >= TopEnd)
+ break;
+
+ if (E <= TopEnd) {
+ FileEdits.erase(I++);
+ continue;
+ }
+
+ if (B < TopEnd) {
+ unsigned diff = E.getOffset() - TopEnd.getOffset();
+ TopEnd = E;
+ TopFA->RemoveLen += diff;
+ FileEdits.erase(I);
+ }
+
+ break;
+ }
+}
+
+bool EditedSource::commit(const Commit &commit) {
+ if (!commit.isCommitable())
+ return false;
+
+ for (edit::Commit::edit_iterator
+ I = commit.edit_begin(), E = commit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &edit = *I;
+ switch (edit.Kind) {
+ case edit::Commit::Act_Insert:
+ commitInsert(edit.OrigLoc, edit.Offset, edit.Text, edit.BeforePrev);
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ commitInsertFromRange(edit.OrigLoc, edit.Offset,
+ edit.InsertFromRangeOffs, edit.Length,
+ edit.BeforePrev);
+ break;
+ case edit::Commit::Act_Remove:
+ commitRemove(edit.OrigLoc, edit.Offset, edit.Length);
+ break;
+ }
+ }
+
+ return true;
+}
+
+static void applyRewrite(EditsReceiver &receiver,
+ StringRef text, FileOffset offs, unsigned len,
+ const SourceManager &SM) {
+ assert(!offs.getFID().isInvalid());
+ SourceLocation Loc = SM.getLocForStartOfFile(offs.getFID());
+ Loc = Loc.getLocWithOffset(offs.getOffset());
+ assert(Loc.isFileID());
+ CharSourceRange range = CharSourceRange::getCharRange(Loc,
+ Loc.getLocWithOffset(len));
+
+ if (text.empty()) {
+ assert(len);
+ receiver.remove(range);
+ return;
+ }
+
+ if (len)
+ receiver.replace(range, text);
+ else
+ receiver.insert(Loc, text);
+}
+
+void EditedSource::applyRewrites(EditsReceiver &receiver) {
+ llvm::SmallString<128> StrVec;
+ FileOffset CurOffs, CurEnd;
+ unsigned CurLen;
+
+ if (FileEdits.empty())
+ return;
+
+ FileEditsTy::iterator I = FileEdits.begin();
+ CurOffs = I->first;
+ StrVec = I->second.Text;
+ CurLen = I->second.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ ++I;
+
+ for (FileEditsTy::iterator E = FileEdits.end(); I != E; ++I) {
+ FileOffset offs = I->first;
+ FileEdit act = I->second;
+ assert(offs >= CurEnd);
+
+ if (offs == CurEnd) {
+ StrVec += act.Text;
+ CurLen += act.RemoveLen;
+ CurEnd.getWithOffset(act.RemoveLen);
+ continue;
+ }
+
+ applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr);
+ CurOffs = offs;
+ StrVec = act.Text;
+ CurLen = act.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ }
+
+ applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr);
+}
+
+void EditedSource::clearRewrites() {
+ FileEdits.clear();
+ StrAlloc.Reset();
+}
+
+StringRef EditedSource::getSourceText(FileOffset BeginOffs, FileOffset EndOffs,
+ bool &Invalid) {
+ assert(BeginOffs.getFID() == EndOffs.getFID());
+ assert(BeginOffs <= EndOffs);
+ SourceLocation BLoc = SourceMgr.getLocForStartOfFile(BeginOffs.getFID());
+ BLoc = BLoc.getLocWithOffset(BeginOffs.getOffset());
+ assert(BLoc.isFileID());
+ SourceLocation
+ ELoc = BLoc.getLocWithOffset(EndOffs.getOffset() - BeginOffs.getOffset());
+ return Lexer::getSourceText(CharSourceRange::getCharRange(BLoc, ELoc),
+ SourceMgr, LangOpts, &Invalid);
+}
+
+EditedSource::FileEditsTy::iterator
+EditedSource::getActionForOffset(FileOffset Offs) {
+ FileEditsTy::iterator I = FileEdits.upper_bound(Offs);
+ if (I == FileEdits.begin())
+ return FileEdits.end();
+ --I;
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (Offs >= B && Offs < E)
+ return I;
+
+ return FileEdits.end();
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
new file mode 100644
index 0000000..24a0db1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -0,0 +1,587 @@
+//===--- RewriteObjCFoundationAPI.cpp - Foundation API Rewriter -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Rewrites legacy method calls to modern syntax.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NSAPI.h"
+
+using namespace clang;
+using namespace edit;
+
+static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
+ IdentifierInfo *&ClassId) {
+ if (!Msg || Msg->isImplicit() || !Msg->getMethodDecl())
+ return false;
+
+ const ObjCInterfaceDecl *Receiver = Msg->getReceiverInterface();
+ if (!Receiver)
+ return false;
+ ClassId = Receiver->getIdentifier();
+
+ if (Msg->getReceiverKind() == ObjCMessageExpr::Class)
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteObjCRedundantCallWithLiteral.
+//===----------------------------------------------------------------------===//
+
+bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ IdentifierInfo *II = 0;
+ if (!checkForLiteralCreation(Msg, II))
+ return false;
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ Selector Sel = Msg->getSelector();
+
+ if ((isa<ObjCStringLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSString) == II &&
+ NS.getNSStringSelector(NSAPI::NSStr_stringWithString) == Sel) ||
+
+ (isa<ObjCArrayLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSArray) == II &&
+ NS.getNSArraySelector(NSAPI::NSArr_arrayWithArray) == Sel) ||
+
+ (isa<ObjCDictionaryLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSDictionary) == II &&
+ NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithDictionary) == Sel)) {
+
+ commit.replaceWithInner(Msg->getSourceRange(),
+ Msg->getArg(0)->getSourceRange());
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCSubscriptSyntax.
+//===----------------------------------------------------------------------===//
+
+static void maybePutParensOnReceiver(const Expr *Receiver, Commit &commit) {
+ Receiver = Receiver->IgnoreImpCasts();
+ if (isa<BinaryOperator>(Receiver) || isa<UnaryOperator>(Receiver)) {
+ SourceRange RecRange = Receiver->getSourceRange();
+ commit.insertWrap("(", RecRange, ")");
+ }
+}
+
+static bool rewriteToSubscriptGet(const ObjCMessageExpr *Msg, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ ArgRange.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(ArgRange.getBegin(), MsgRange.getEnd()),
+ ArgRange);
+ commit.insertWrap("[", ArgRange, "]");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToArraySubscriptSet(const ObjCMessageExpr *Msg,
+ Commit &commit) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ CharSourceRange::getTokenRange(Arg0Range));
+ commit.replaceWithInner(SourceRange(Arg1Range.getBegin(), MsgRange.getEnd()),
+ Arg1Range);
+ commit.insertWrap("[", CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ "] = ");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToDictionarySubscriptSet(const ObjCMessageExpr *Msg,
+ Commit &commit) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ SourceLocation LocBeforeVal = Arg0Range.getBegin();
+ commit.insertBefore(LocBeforeVal, "] = ");
+ commit.insertFromRange(LocBeforeVal, Arg1Range, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ commit.insertBefore(LocBeforeVal, "[");
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(Arg0Range.getBegin(), MsgRange.getEnd()),
+ Arg0Range);
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+bool edit::rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (!Msg || Msg->isImplicit() ||
+ Msg->getReceiverKind() != ObjCMessageExpr::Instance)
+ return false;
+ const ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return false;
+
+ const ObjCInterfaceDecl *
+ IFace = NS.getASTContext().getObjContainingInterface(
+ const_cast<ObjCMethodDecl *>(Method));
+ if (!IFace)
+ return false;
+ IdentifierInfo *II = IFace->getIdentifier();
+ Selector Sel = Msg->getSelector();
+
+ if ((II == NS.getNSClassId(NSAPI::ClassId_NSArray) &&
+ Sel == NS.getNSArraySelector(NSAPI::NSArr_objectAtIndex)) ||
+ (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary) &&
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_objectForKey)))
+ return rewriteToSubscriptGet(Msg, commit);
+
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableArray) &&
+ Sel == NS.getNSArraySelector(NSAPI::NSMutableArr_replaceObjectAtIndex))
+ return rewriteToArraySubscriptSet(Msg, commit);
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableDictionary) &&
+ Sel == NS.getNSDictionarySelector(NSAPI::NSMutableDict_setObjectForKey))
+ return rewriteToDictionarySubscriptSet(Msg, commit);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCLiteralSyntax.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ IdentifierInfo *II = 0;
+ if (!checkForLiteralCreation(Msg, II))
+ return false;
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSArray))
+ return rewriteToArrayLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary))
+ return rewriteToDictionaryLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSNumber))
+ return rewriteToNumberLiteral(Msg, NS, commit);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToArrayLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_array)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObject)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects)) {
+ if (Msg->getNumArgs() == 0)
+ return false;
+ const Expr *SentinelExpr = Msg->getArg(Msg->getNumArgs() - 1);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+ SourceRange ArgRange(Msg->getArg(0)->getLocStart(),
+ Msg->getArg(Msg->getNumArgs()-2)->getLocEnd());
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToDictionaryLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSDict_dictionary)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectForKey)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ SourceRange ValRange = Msg->getArg(0)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(1)->getSourceRange();
+ // Insert key before the value.
+ commit.insertBefore(ValRange.getBegin(), ": ");
+ commit.insertFromRange(ValRange.getBegin(),
+ CharSourceRange::getTokenRange(KeyRange),
+ /*afterToken=*/false, /*beforePreviousInsertions=*/true);
+ commit.insertBefore(ValRange.getBegin(), "@{");
+ commit.insertAfterToken(ValRange.getEnd(), "}");
+ commit.replaceWithInner(MsgRange, ValRange);
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsAndKeys)) {
+ if (Msg->getNumArgs() % 2 != 1)
+ return false;
+ unsigned SentinelIdx = Msg->getNumArgs() - 1;
+ const Expr *SentinelExpr = Msg->getArg(SentinelIdx);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ for (unsigned i = 0; i < SentinelIdx; i += 2) {
+ SourceRange ValRange = Msg->getArg(i)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(i+1)->getSourceRange();
+ // Insert value after key.
+ commit.insertAfterToken(KeyRange.getEnd(), ": ");
+ commit.insertFromRange(KeyRange.getEnd(), ValRange, /*afterToken=*/true);
+ commit.remove(CharSourceRange::getCharRange(ValRange.getBegin(),
+ KeyRange.getBegin()));
+ }
+ // Range of arguments up until and including the last key.
+ // The sentinel and first value are cut off, the value will move after the
+ // key.
+ SourceRange ArgRange(Msg->getArg(1)->getLocStart(),
+ Msg->getArg(SentinelIdx-1)->getLocEnd());
+ commit.insertWrap("@{", ArgRange, "}");
+ commit.replaceWithInner(MsgRange, ArgRange);
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToNumberLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToCharLiteral(const ObjCMessageExpr *Msg,
+ const CharacterLiteral *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (Arg->getKind() != CharacterLiteral::Ascii)
+ return false;
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithChar,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return false;
+}
+
+static bool rewriteToBoolLiteral(const ObjCMessageExpr *Msg,
+ const Expr *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithBool,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return false;
+}
+
+namespace {
+
+struct LiteralInfo {
+ bool Hex, Octal;
+ StringRef U, F, L, LL;
+ CharSourceRange WithoutSuffRange;
+};
+
+}
+
+static bool getLiteralInfo(SourceRange literalRange,
+ bool isFloat, bool isIntZero,
+ ASTContext &Ctx, LiteralInfo &Info) {
+ if (literalRange.getBegin().isMacroID() ||
+ literalRange.getEnd().isMacroID())
+ return false;
+ StringRef text = Lexer::getSourceText(
+ CharSourceRange::getTokenRange(literalRange),
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (text.empty())
+ return false;
+
+ llvm::Optional<bool> UpperU, UpperL;
+ bool UpperF = false;
+
+ struct Suff {
+ static bool has(StringRef suff, StringRef &text) {
+ if (text.endswith(suff)) {
+ text = text.substr(0, text.size()-suff.size());
+ return true;
+ }
+ return false;
+ }
+ };
+
+ while (1) {
+ if (Suff::has("u", text)) {
+ UpperU = false;
+ } else if (Suff::has("U", text)) {
+ UpperU = true;
+ } else if (Suff::has("ll", text)) {
+ UpperL = false;
+ } else if (Suff::has("LL", text)) {
+ UpperL = true;
+ } else if (Suff::has("l", text)) {
+ UpperL = false;
+ } else if (Suff::has("L", text)) {
+ UpperL = true;
+ } else if (isFloat && Suff::has("f", text)) {
+ UpperF = false;
+ } else if (isFloat && Suff::has("F", text)) {
+ UpperF = true;
+ } else
+ break;
+ }
+
+ if (!UpperU.hasValue() && !UpperL.hasValue())
+ UpperU = UpperL = true;
+ else if (UpperU.hasValue() && !UpperL.hasValue())
+ UpperL = UpperU;
+ else if (UpperL.hasValue() && !UpperU.hasValue())
+ UpperU = UpperL;
+
+ Info.U = *UpperU ? "U" : "u";
+ Info.L = *UpperL ? "L" : "l";
+ Info.LL = *UpperL ? "LL" : "ll";
+ Info.F = UpperF ? "F" : "f";
+
+ Info.Hex = Info.Octal = false;
+ if (text.startswith("0x"))
+ Info.Hex = true;
+ else if (!isFloat && !isIntZero && text.startswith("0"))
+ Info.Octal = true;
+
+ SourceLocation B = literalRange.getBegin();
+ Info.WithoutSuffRange =
+ CharSourceRange::getCharRange(B, B.getLocWithOffset(text.size()));
+ return true;
+}
+
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ if (const CharacterLiteral *CharE = dyn_cast<CharacterLiteral>(Arg))
+ return rewriteToCharLiteral(Msg, CharE, NS, commit);
+ if (const ObjCBoolLiteralExpr *BE = dyn_cast<ObjCBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+ if (const CXXBoolLiteralExpr *BE = dyn_cast<CXXBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+
+ const Expr *literalE = Arg;
+ if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(literalE)) {
+ if (UOE->getOpcode() == UO_Plus || UOE->getOpcode() == UO_Minus)
+ literalE = UOE->getSubExpr();
+ }
+
+ // Only integer and floating literals; non-literals or imaginary literal
+ // cannot be rewritten.
+ if (!isa<IntegerLiteral>(literalE) && !isa<FloatingLiteral>(literalE))
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+ Selector Sel = Msg->getSelector();
+ llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+ MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ if (!MKOpt)
+ return false;
+ NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
+
+ bool CallIsUnsigned = false, CallIsLong = false, CallIsLongLong = false;
+ bool CallIsFloating = false, CallIsDouble = false;
+
+ switch (MK) {
+ // We cannot have these calls with int/float literals.
+ case NSAPI::NSNumberWithChar:
+ case NSAPI::NSNumberWithUnsignedChar:
+ case NSAPI::NSNumberWithShort:
+ case NSAPI::NSNumberWithUnsignedShort:
+ case NSAPI::NSNumberWithBool:
+ return false;
+
+ case NSAPI::NSNumberWithUnsignedInt:
+ case NSAPI::NSNumberWithUnsignedInteger:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithInt:
+ case NSAPI::NSNumberWithInteger:
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLong:
+ CallIsLong = true;
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLongLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLongLong:
+ CallIsLongLong = true;
+ break;
+
+ case NSAPI::NSNumberWithDouble:
+ CallIsDouble = true;
+ case NSAPI::NSNumberWithFloat:
+ CallIsFloating = true;
+ break;
+ }
+
+ SourceRange ArgRange = Arg->getSourceRange();
+ QualType ArgTy = Arg->getType();
+ QualType CallTy = Msg->getArg(0)->getType();
+
+ // Check for the easy case, the literal maps directly to the call.
+ if (Ctx.hasSameType(ArgTy, CallTy)) {
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ // We will need to modify the literal suffix to get the same type as the call.
+ // Don't even try if it came from a macro.
+ if (ArgRange.getBegin().isMacroID())
+ return false;
+
+ bool LitIsFloat = ArgTy->isFloatingType();
+ // For a float passed to integer call, don't try rewriting. It is difficult
+ // and a very uncommon case anyway.
+ if (LitIsFloat && !CallIsFloating)
+ return false;
+
+ // Try to modify the literal make it the same type as the method call.
+ // -Modify the suffix, and/or
+ // -Change integer to float
+
+ LiteralInfo LitInfo;
+ bool isIntZero = false;
+ if (const IntegerLiteral *IntE = dyn_cast<IntegerLiteral>(literalE))
+ isIntZero = !IntE->getValue().getBoolValue();
+ if (!getLiteralInfo(ArgRange, LitIsFloat, isIntZero, Ctx, LitInfo))
+ return false;
+
+ // Not easy to do int -> float with hex/octal and uncommon anyway.
+ if (!LitIsFloat && CallIsFloating && (LitInfo.Hex || LitInfo.Octal))
+ return false;
+
+ SourceLocation LitB = LitInfo.WithoutSuffRange.getBegin();
+ SourceLocation LitE = LitInfo.WithoutSuffRange.getEnd();
+
+ commit.replaceWithInner(CharSourceRange::getTokenRange(Msg->getSourceRange()),
+ LitInfo.WithoutSuffRange);
+ commit.insert(LitB, "@");
+
+ if (!LitIsFloat && CallIsFloating)
+ commit.insert(LitE, ".0");
+
+ if (CallIsFloating) {
+ if (!CallIsDouble)
+ commit.insert(LitE, LitInfo.F);
+ } else {
+ if (CallIsUnsigned)
+ commit.insert(LitE, LitInfo.U);
+
+ if (CallIsLong)
+ commit.insert(LitE, LitInfo.L);
+ else if (CallIsLongLong)
+ commit.insert(LitE, LitInfo.LL);
+ }
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
index 54bb282..390ae09 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -66,9 +66,10 @@ namespace {
this->Context = &Context;
}
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
HandleTopLevelSingleDecl(*I);
+ return true;
}
void HandleTopLevelSingleDecl(Decl *D);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
index cb195d1..9feb3de 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -26,8 +26,7 @@ bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
// FIXME: This is a hack. We need a better way to communicate the
// AST file, compiler instance, and file name than member variables
// of FrontendAction.
- AdaptedAction->setCurrentFile(getCurrentFile(), getCurrentFileKind(),
- takeCurrentASTUnit());
+ AdaptedAction->setCurrentInput(getCurrentInput(), takeCurrentASTUnit());
AdaptedAction->setCompilerInstance(&CI);
return AdaptedAction->BeginSourceFileAction(CI, Filename);
}
@@ -35,13 +34,13 @@ bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
void ASTMergeAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
CI.getDiagnostics().getClient()->BeginSourceFile(
- CI.getASTContext().getLangOptions());
+ CI.getASTContext().getLangOpts());
CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
&CI.getASTContext());
- llvm::IntrusiveRefCntPtr<DiagnosticIDs>
+ IntrusiveRefCntPtr<DiagnosticIDs>
DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine>
+ IntrusiveRefCntPtr<DiagnosticsEngine>
Diags(new DiagnosticsEngine(DiagIDs, CI.getDiagnostics().getClient(),
/*ShouldOwnClient=*/false));
ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags,
@@ -80,8 +79,8 @@ void ASTMergeAction::EndSourceFileAction() {
}
ASTMergeAction::ASTMergeAction(FrontendAction *AdaptedAction,
- std::string *ASTFiles, unsigned NumASTFiles)
- : AdaptedAction(AdaptedAction), ASTFiles(ASTFiles, ASTFiles + NumASTFiles) {
+ ArrayRef<std::string> ASTFiles)
+ : AdaptedAction(AdaptedAction), ASTFiles(ASTFiles.begin(), ASTFiles.end()) {
assert(AdaptedAction && "ASTMergeAction needs an action to adapt");
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
index 032adf3..e32fa63 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -27,6 +27,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
@@ -46,6 +47,7 @@
#include "llvm/Support/Timer.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include <cstdlib>
#include <cstdio>
@@ -81,6 +83,121 @@ namespace {
}
}
};
+
+ struct OnDiskData {
+ /// \brief The file in which the precompiled preamble is stored.
+ std::string PreambleFile;
+
+ /// \brief Temporary files that should be removed when the ASTUnit is
+ /// destroyed.
+ SmallVector<llvm::sys::Path, 4> TemporaryFiles;
+
+ /// \brief Erase temporary files.
+ void CleanTemporaryFiles();
+
+ /// \brief Erase the preamble file.
+ void CleanPreambleFile();
+
+ /// \brief Erase temporary files and the preamble file.
+ void Cleanup();
+ };
+}
+
+static llvm::sys::SmartMutex<false> &getOnDiskMutex() {
+ static llvm::sys::SmartMutex<false> M(/* recursive = */ true);
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit(void);
+
+typedef llvm::DenseMap<const ASTUnit *, OnDiskData *> OnDiskDataMap;
+static OnDiskDataMap &getOnDiskDataMap() {
+ static OnDiskDataMap M;
+ static bool hasRegisteredAtExit = false;
+ if (!hasRegisteredAtExit) {
+ hasRegisteredAtExit = true;
+ atexit(cleanupOnDiskMapAtExit);
+ }
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit(void) {
+ // No mutex required here since we are leaving the program.
+ OnDiskDataMap &M = getOnDiskDataMap();
+ for (OnDiskDataMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ // We don't worry about freeing the memory associated with OnDiskDataMap.
+ // All we care about is erasing stale files.
+ I->second->Cleanup();
+ }
+}
+
+static OnDiskData &getOnDiskData(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ OnDiskData *&D = M[AU];
+ if (!D)
+ D = new OnDiskData();
+ return *D;
+}
+
+static void erasePreambleFile(const ASTUnit *AU) {
+ getOnDiskData(AU).CleanPreambleFile();
+}
+
+static void removeOnDiskEntry(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ OnDiskDataMap::iterator I = M.find(AU);
+ if (I != M.end()) {
+ I->second->Cleanup();
+ delete I->second;
+ M.erase(AU);
+ }
+}
+
+static void setPreambleFile(const ASTUnit *AU, llvm::StringRef preambleFile) {
+ getOnDiskData(AU).PreambleFile = preambleFile;
+}
+
+static const std::string &getPreambleFile(const ASTUnit *AU) {
+ return getOnDiskData(AU).PreambleFile;
+}
+
+void OnDiskData::CleanTemporaryFiles() {
+ for (unsigned I = 0, N = TemporaryFiles.size(); I != N; ++I)
+ TemporaryFiles[I].eraseFromDisk();
+ TemporaryFiles.clear();
+}
+
+void OnDiskData::CleanPreambleFile() {
+ if (!PreambleFile.empty()) {
+ llvm::sys::Path(PreambleFile).eraseFromDisk();
+ PreambleFile.clear();
+ }
+}
+
+void OnDiskData::Cleanup() {
+ CleanTemporaryFiles();
+ CleanPreambleFile();
+}
+
+void ASTUnit::clearFileLevelDecls() {
+ for (FileDeclsTy::iterator
+ I = FileDecls.begin(), E = FileDecls.end(); I != E; ++I)
+ delete I->second;
+ FileDecls.clear();
+}
+
+void ASTUnit::CleanTemporaryFiles() {
+ getOnDiskData(this).CleanTemporaryFiles();
+}
+
+void ASTUnit::addTemporaryFile(const llvm::sys::Path &TempFile) {
+ getOnDiskData(this).TemporaryFiles.push_back(TempFile);
}
/// \brief After failing to build a precompiled preamble (due to
@@ -95,14 +212,14 @@ const unsigned DefaultPreambleRebuildInterval = 5;
static llvm::sys::cas_flag ActiveASTUnitObjects;
ASTUnit::ASTUnit(bool _MainFileIsAST)
- : OnlyLocalDecls(false), CaptureDiagnostics(false),
+ : Reader(0), OnlyLocalDecls(false), CaptureDiagnostics(false),
MainFileIsAST(_MainFileIsAST),
TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
OwnsRemappedFileBuffers(true),
NumStoredDiagnosticsFromDriver(0),
PreambleRebuildCounter(0), SavedMainFileBuffer(0), PreambleBuffer(0),
+ NumWarningsInPreamble(0),
ShouldCacheCodeCompletionResults(false),
- NestedMacroExpansions(true),
CompletionCacheTopLevelHashValue(0),
PreambleTopLevelHashValue(0),
CurrentTopLevelHashValue(0),
@@ -114,10 +231,11 @@ ASTUnit::ASTUnit(bool _MainFileIsAST)
}
ASTUnit::~ASTUnit() {
- CleanTemporaryFiles();
- if (!PreambleFile.empty())
- llvm::sys::Path(PreambleFile).eraseFromDisk();
-
+ clearFileLevelDecls();
+
+ // Clean up the temporary files and the preamble file.
+ removeOnDiskEntry(this);
+
// Free the buffers associated with remapped files. We are required to
// perform this operation here because we explicitly request that the
// compiler instance *not* free these buffers for each invocation of the
@@ -143,11 +261,7 @@ ASTUnit::~ASTUnit() {
}
}
-void ASTUnit::CleanTemporaryFiles() {
- for (unsigned I = 0, N = TemporaryFiles.size(); I != N; ++I)
- TemporaryFiles[I].eraseFromDisk();
- TemporaryFiles.clear();
-}
+void ASTUnit::setPreprocessor(Preprocessor *pp) { PP = pp; }
/// \brief Determine the set of code-completion contexts in which this
/// declaration should be shown.
@@ -237,7 +351,8 @@ void ASTUnit::CacheCodeCompletionResults() {
typedef CodeCompletionResult Result;
SmallVector<Result, 8> Results;
CachedCompletionAllocator = new GlobalCodeCompletionAllocator;
- TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator, Results);
+ TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
+ getCodeCompletionTUInfo(), Results);
// Translate global code completions into cached completions.
llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
@@ -248,9 +363,10 @@ void ASTUnit::CacheCodeCompletionResults() {
bool IsNestedNameSpecifier = false;
CachedCodeCompletionResult CachedResult;
CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema,
- *CachedCompletionAllocator);
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
CachedResult.ShowInContexts = getDeclShowContexts(Results[I].Declaration,
- Ctx->getLangOptions(),
+ Ctx->getLangOpts(),
IsNestedNameSpecifier);
CachedResult.Priority = Results[I].Priority;
CachedResult.Kind = Results[I].CursorKind;
@@ -283,7 +399,7 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedCompletionResults.push_back(CachedResult);
/// Handle nested-name-specifiers in C++.
- if (TheSema->Context.getLangOptions().CPlusPlus &&
+ if (TheSema->Context.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier && !Results[I].StartsNestedNameSpecifier) {
// The contexts in which a nested-name-specifier can appear in C++.
unsigned NNSContexts
@@ -312,7 +428,8 @@ void ASTUnit::CacheCodeCompletionResults() {
Results[I].StartsNestedNameSpecifier = true;
CachedResult.Completion
= Results[I].CreateCodeCompletionString(*TheSema,
- *CachedCompletionAllocator);
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
CachedResult.ShowInContexts = RemainingContexts;
CachedResult.Priority = CCP_NestedNameSpecifier;
CachedResult.TypeClass = STC_Void;
@@ -333,7 +450,8 @@ void ASTUnit::CacheCodeCompletionResults() {
CachedCodeCompletionResult CachedResult;
CachedResult.Completion
= Results[I].CreateCodeCompletionString(*TheSema,
- *CachedCompletionAllocator);
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
CachedResult.ShowInContexts
= (1 << (CodeCompletionContext::CCC_TopLevel - 1))
| (1 << (CodeCompletionContext::CCC_ObjCInterface - 1))
@@ -378,7 +496,7 @@ class ASTInfoCollector : public ASTReaderListener {
ASTContext &Context;
LangOptions &LangOpt;
HeaderSearch &HSI;
- llvm::IntrusiveRefCntPtr<TargetInfo> &Target;
+ IntrusiveRefCntPtr<TargetInfo> &Target;
std::string &Predefines;
unsigned &Counter;
@@ -388,7 +506,7 @@ class ASTInfoCollector : public ASTReaderListener {
public:
ASTInfoCollector(Preprocessor &PP, ASTContext &Context, LangOptions &LangOpt,
HeaderSearch &HSI,
- llvm::IntrusiveRefCntPtr<TargetInfo> &Target,
+ IntrusiveRefCntPtr<TargetInfo> &Target,
std::string &Predefines,
unsigned &Counter)
: PP(PP), Context(Context), LangOpt(LangOpt), HSI(HSI), Target(Target),
@@ -461,6 +579,9 @@ public:
DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
// Just drop any diagnostics that come from cloned consumers; they'll
// have different source managers anyway.
+ // FIXME: We'd like to be able to capture these somehow, even if it's just
+ // file/line/column, because they could occur when parsing module maps or
+ // building modules on-demand.
return new IgnoringDiagConsumer();
}
};
@@ -512,7 +633,7 @@ llvm::MemoryBuffer *ASTUnit::getBufferForFile(StringRef Filename,
}
/// \brief Configure the diagnostics object for use with ASTUnit.
-void ASTUnit::ConfigureDiags(llvm::IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
+void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
const char **ArgBegin, const char **ArgEnd,
ASTUnit &AST, bool CaptureDiagnostics) {
if (!Diags.getPtr()) {
@@ -530,13 +651,14 @@ void ASTUnit::ConfigureDiags(llvm::IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
}
ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
bool OnlyLocalDecls,
RemappedFile *RemappedFiles,
unsigned NumRemappedFiles,
- bool CaptureDiagnostics) {
- llvm::OwningPtr<ASTUnit> AST(new ASTUnit(true));
+ bool CaptureDiagnostics,
+ bool AllowPCHWithCompilerErrors) {
+ OwningPtr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -553,7 +675,10 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
AST->FileMgr = new FileManager(FileSystemOpts);
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
AST->getFileManager());
- AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager()));
+ AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager(),
+ AST->getDiagnostics(),
+ AST->ASTFileLangOpts,
+ /*Target=*/0));
for (unsigned I = 0; I != NumRemappedFiles; ++I) {
FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
@@ -608,7 +733,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
std::string Predefines;
unsigned Counter;
- llvm::OwningPtr<ASTReader> Reader;
+ OwningPtr<ASTReader> Reader;
AST->PP = new Preprocessor(AST->getDiagnostics(), AST->ASTFileLangOpts,
/*Target=*/0, AST->getSourceManager(), HeaderInfo,
@@ -628,7 +753,11 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
/*DelayInitialization=*/true);
ASTContext &Context = *AST->Ctx;
- Reader.reset(new ASTReader(PP, Context));
+ Reader.reset(new ASTReader(PP, Context,
+ /*isysroot=*/"",
+ /*DisableValidation=*/false,
+ /*DisableStatCache=*/false,
+ AllowPCHWithCompilerErrors));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTReader>
@@ -657,7 +786,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
// source, so that declarations will be deserialized from the
// AST file as needed.
ASTReader *ReaderPtr = Reader.get();
- llvm::OwningPtr<ExternalASTSource> Source(Reader.take());
+ OwningPtr<ExternalASTSource> Source(Reader.take());
// Unregister the cleanup for ASTReader. It will get cleaned up
// by the ASTUnit cleanup.
@@ -672,6 +801,7 @@ ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
AST->TheSema.reset(new Sema(PP, Context, *AST->Consumer));
AST->TheSema->Initialize();
ReaderPtr->InitializeSema(*AST->TheSema);
+ AST->Reader = ReaderPtr;
return AST.take();
}
@@ -711,22 +841,7 @@ void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
Hash = llvm::HashString(NameStr, Hash);
}
return;
- }
-
- if (ObjCForwardProtocolDecl *Forward
- = dyn_cast<ObjCForwardProtocolDecl>(D)) {
- for (ObjCForwardProtocolDecl::protocol_iterator
- P = Forward->protocol_begin(),
- PEnd = Forward->protocol_end();
- P != PEnd; ++P)
- AddTopLevelDeclarationToHash(*P, Hash);
- return;
- }
-
- if (ObjCClassDecl *Class = dyn_cast<ObjCClassDecl>(D)) {
- AddTopLevelDeclarationToHash(Class->getForwardInterfaceDecl(), Hash);
- return;
- }
+ }
}
class TopLevelDeclTrackerConsumer : public ASTConsumer {
@@ -738,24 +853,46 @@ public:
: Unit(_Unit), Hash(Hash) {
Hash = 0;
}
-
- void HandleTopLevelDecl(DeclGroupRef D) {
- for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
- Decl *D = *it;
- // FIXME: Currently ObjC method declarations are incorrectly being
- // reported as top-level declarations, even though their DeclContext
- // is the containing ObjC @interface/@implementation. This is a
- // fundamental problem in the parser right now.
- if (isa<ObjCMethodDecl>(D))
- continue;
- AddTopLevelDeclarationToHash(D, Hash);
- Unit.addTopLevelDecl(D);
+ void handleTopLevelDecl(Decl *D) {
+ if (!D)
+ return;
+
+ // FIXME: Currently ObjC method declarations are incorrectly being
+ // reported as top-level declarations, even though their DeclContext
+ // is the containing ObjC @interface/@implementation. This is a
+ // fundamental problem in the parser right now.
+ if (isa<ObjCMethodDecl>(D))
+ return;
+
+ AddTopLevelDeclarationToHash(D, Hash);
+ Unit.addTopLevelDecl(D);
+
+ handleFileLevelDecl(D);
+ }
+
+ void handleFileLevelDecl(Decl *D) {
+ Unit.addFileLevelDecl(D);
+ if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
+ for (NamespaceDecl::decl_iterator
+ I = NSD->decls_begin(), E = NSD->decls_end(); I != E; ++I)
+ handleFileLevelDecl(*I);
}
}
+ bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
+ handleTopLevelDecl(*it);
+ return true;
+ }
+
// We're not interested in "interesting" decls.
void HandleInterestingDecl(DeclGroupRef) {}
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+ for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
+ handleTopLevelDecl(*it);
+ }
};
class TopLevelDeclTrackerAction : public ASTFrontendAction {
@@ -787,12 +924,12 @@ class PrecompilePreambleConsumer : public PCHGenerator {
public:
PrecompilePreambleConsumer(ASTUnit &Unit, const Preprocessor &PP,
StringRef isysroot, raw_ostream *Out)
- : PCHGenerator(PP, "", /*IsModule=*/false, isysroot, Out), Unit(Unit),
+ : PCHGenerator(PP, "", 0, isysroot, Out), Unit(Unit),
Hash(Unit.getCurrentTopLevelHashValue()) {
Hash = 0;
}
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
Decl *D = *it;
// FIXME: Currently ObjC method declarations are incorrectly being
@@ -804,6 +941,7 @@ public:
AddTopLevelDeclarationToHash(D, Hash);
TopLevelDecls.push_back(D);
}
+ return true;
}
virtual void HandleTranslationUnit(ASTContext &Ctx) {
@@ -852,6 +990,34 @@ public:
}
+static void checkAndRemoveNonDriverDiags(SmallVectorImpl<StoredDiagnostic> &
+ StoredDiagnostics) {
+ // Get rid of stored diagnostics except the ones from the driver which do not
+ // have a source location.
+ for (unsigned I = 0; I < StoredDiagnostics.size(); ++I) {
+ if (StoredDiagnostics[I].getLocation().isValid()) {
+ StoredDiagnostics.erase(StoredDiagnostics.begin()+I);
+ --I;
+ }
+ }
+}
+
+static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
+ StoredDiagnostics,
+ SourceManager &SM) {
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. Since we've
+ // been careful to make sure that the source manager's state
+ // before and after are identical, so that we can reuse the source
+ // location itself.
+ for (unsigned I = 0, N = StoredDiagnostics.size(); I < N; ++I) {
+ if (StoredDiagnostics[I].getLocation().isValid()) {
+ FullSourceLoc Loc(StoredDiagnostics[I].getLocation(), SM);
+ StoredDiagnostics[I].setLocation(Loc);
+ }
+ }
+}
+
/// Parse the source file into a translation unit using the given compiler
/// invocation, replacing the current translation unit.
///
@@ -867,17 +1033,17 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
}
// Create the compiler instance to use for building the AST.
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
- llvm::IntrusiveRefCntPtr<CompilerInvocation>
+ IntrusiveRefCntPtr<CompilerInvocation>
CCInvocation(new CompilerInvocation(*Invocation));
Clang->setInvocation(CCInvocation.getPtr());
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].second;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -900,28 +1066,29 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
"IR inputs not support here!");
// Configure the various subsystems.
// FIXME: Should we retain the previous file manager?
+ LangOpts = &Clang->getLangOpts();
FileSystemOpts = Clang->getFileSystemOpts();
FileMgr = new FileManager(FileSystemOpts);
SourceMgr = new SourceManager(getDiagnostics(), *FileMgr);
TheSema.reset();
Ctx = 0;
PP = 0;
+ Reader = 0;
// Clear out old caches and data.
TopLevelDecls.clear();
+ clearFileLevelDecls();
CleanTemporaryFiles();
if (!OverrideMainBuffer) {
- StoredDiagnostics.erase(
- StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
- StoredDiagnostics.end());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
TopLevelDeclsInPreamble.clear();
}
@@ -934,14 +1101,12 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
- PreprocessorOpts.DetailedRecordIncludesNestedMacroExpansions
- = NestedMacroExpansions;
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
- PreprocessorOpts.ImplicitPCHInclude = PreambleFile;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
PreprocessorOpts.DisablePCHValidation = true;
// The stored diagnostic has the old source manager in it; update
@@ -949,49 +1114,37 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// been careful to make sure that the source manager's state
// before and after are identical, so that we can reuse the source
// location itself.
- for (unsigned I = NumStoredDiagnosticsFromDriver,
- N = StoredDiagnostics.size();
- I < N; ++I) {
- FullSourceLoc Loc(StoredDiagnostics[I].getLocation(),
- getSourceManager());
- StoredDiagnostics[I].setLocation(Loc);
- }
+ checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
// Keep track of the override buffer;
SavedMainFileBuffer = OverrideMainBuffer;
}
- llvm::OwningPtr<TopLevelDeclTrackerAction> Act(
+ OwningPtr<TopLevelDeclTrackerAction> Act(
new TopLevelDeclTrackerAction(*this));
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
ActCleanup(Act.get());
- if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0].second,
- Clang->getFrontendOpts().Inputs[0].first))
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
goto error;
if (OverrideMainBuffer) {
- std::string ModName = PreambleFile;
+ std::string ModName = getPreambleFile(this);
TranslateStoredDiagnostics(Clang->getModuleManager(), ModName,
getSourceManager(), PreambleDiagnostics,
StoredDiagnostics);
}
Act->Execute();
-
- // Steal the created target, context, and preprocessor.
- TheSema.reset(Clang->takeSema());
- Consumer.reset(Clang->takeASTConsumer());
- Ctx = &Clang->getASTContext();
- PP = &Clang->getPreprocessor();
- Clang->setSourceManager(0);
- Clang->setFileManager(0);
- Target = &Clang->getTarget();
+
+ transferASTDataFromCompilerInstance(*Clang);
Act->EndSourceFile();
+ FailedParseDiagnostics.clear();
+
return false;
error:
@@ -1000,8 +1153,13 @@ error:
delete OverrideMainBuffer;
SavedMainFileBuffer = 0;
}
-
+
+ // Keep the ownership of the data in the ASTUnit because the client may
+ // want to see the diagnostics.
+ transferASTDataFromCompilerInstance(*Clang);
+ FailedParseDiagnostics.swap(StoredDiagnostics);
StoredDiagnostics.clear();
+ NumStoredDiagnosticsFromDriver = 0;
return true;
}
@@ -1053,7 +1211,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
// command line (to another file) or directly through the compiler invocation
// (to a memory buffer).
llvm::MemoryBuffer *Buffer = 0;
- llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].second);
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
if (const llvm::sys::FileStatus *MainFileStatus = MainFilePath.getFileStatus()) {
// Check whether there is a file-file remapping of the main file
for (PreprocessorOptions::remapped_file_iterator
@@ -1103,7 +1261,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
// If the main source file was not remapped, load it now.
if (!Buffer) {
- Buffer = getBufferForFile(FrontendOpts.Inputs[0].second);
+ Buffer = getBufferForFile(FrontendOpts.Inputs[0].File);
if (!Buffer)
return std::make_pair((llvm::MemoryBuffer*)0, std::make_pair(0, true));
@@ -1111,7 +1269,7 @@ ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
}
return std::make_pair(Buffer, Lexer::ComputePreamble(Buffer,
- Invocation.getLangOpts(),
+ *Invocation.getLangOpts(),
MaxLines));
}
@@ -1154,7 +1312,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
bool AllowRebuild,
unsigned MaxLines) {
- llvm::IntrusiveRefCntPtr<CompilerInvocation>
+ IntrusiveRefCntPtr<CompilerInvocation>
PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
PreprocessorOptions &PreprocessorOpts
@@ -1165,7 +1323,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
= ComputePreamble(*PreambleInvocation, MaxLines, CreatedPreambleBuffer);
// If ComputePreamble() Take ownership of the preamble buffer.
- llvm::OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
+ OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
if (CreatedPreambleBuffer)
OwnedPreambleBuffer.reset(NewPreamble.first);
@@ -1173,10 +1331,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// We couldn't find a preamble in the main source. Clear out the current
// preamble, if we have one. It's obviously no good any more.
Preamble.clear();
- if (!PreambleFile.empty()) {
- llvm::sys::Path(PreambleFile).eraseFromDisk();
- PreambleFile.clear();
- }
+ erasePreambleFile(this);
// The next time we actually see a preamble, precompile it.
PreambleRebuildCounter = 1;
@@ -1259,8 +1414,6 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Set the state of the diagnostic object to mimic its state
// after parsing the preamble.
- // FIXME: This won't catch any #pragma push warning changes that
- // have occurred in the preamble.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(),
PreambleInvocation->getDiagnosticOpts());
@@ -1270,7 +1423,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// buffer size we reserved when creating the preamble.
return CreatePaddedMainFileBuffer(NewPreamble.first,
PreambleReservedSize,
- FrontendOpts.Inputs[0].second);
+ FrontendOpts.Inputs[0].File);
}
}
@@ -1282,7 +1435,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// We can't reuse the previously-computed preamble. Build a new one.
Preamble.clear();
PreambleDiagnostics.clear();
- llvm::sys::Path(PreambleFile).eraseFromDisk();
+ erasePreambleFile(this);
PreambleRebuildCounter = 1;
} else if (!AllowRebuild) {
// We aren't allowed to rebuild the precompiled preamble; just
@@ -1323,7 +1476,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// Save the preamble text for later; we'll need to compare against it for
// subsequent reparses.
- StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].second;
+ StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].File;
Preamble.assign(FileMgr->getFile(MainFilename),
NewPreamble.first->getBufferStart(),
NewPreamble.first->getBufferStart()
@@ -1333,7 +1486,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
delete PreambleBuffer;
PreambleBuffer
= llvm::MemoryBuffer::getNewUninitMemBuffer(PreambleReservedSize,
- FrontendOpts.Inputs[0].second);
+ FrontendOpts.Inputs[0].File);
memcpy(const_cast<char*>(PreambleBuffer->getBufferStart()),
NewPreamble.first->getBufferStart(), Preamble.size());
memset(const_cast<char*>(PreambleBuffer->getBufferStart()) + Preamble.size(),
@@ -1341,7 +1494,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
const_cast<char*>(PreambleBuffer->getBufferEnd())[-1] = '\n';
// Remap the main source file to the preamble buffer.
- llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].second);
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
PreprocessorOpts.addRemappedFile(MainFilePath.str(), PreambleBuffer);
// Tell the compiler invocation to generate a temporary precompiled header.
@@ -1352,14 +1505,14 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Create the compiler instance to use for building the precompiled preamble.
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(&*PreambleInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].second;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
// Set up diagnostics, capturing all of the diagnostics produced.
Clang->setDiagnostics(&getDiagnostics());
@@ -1385,17 +1538,15 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
"IR inputs not support here!");
// Clear out old caches and data.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(), Clang->getDiagnosticOpts());
- StoredDiagnostics.erase(
- StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
- StoredDiagnostics.end());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
TopLevelDecls.clear();
TopLevelDeclsInPreamble.clear();
@@ -1406,10 +1557,9 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
Clang->setSourceManager(new SourceManager(getDiagnostics(),
Clang->getFileManager()));
- llvm::OwningPtr<PrecompilePreambleAction> Act;
+ OwningPtr<PrecompilePreambleAction> Act;
Act.reset(new PrecompilePreambleAction(*this));
- if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0].second,
- Clang->getFrontendOpts().Inputs[0].first)) {
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
Preamble.clear();
PreambleRebuildCounter = DefaultPreambleRebuildInterval;
@@ -1438,14 +1588,11 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// of preamble diagnostics.
PreambleDiagnostics.clear();
PreambleDiagnostics.insert(PreambleDiagnostics.end(),
- StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
- StoredDiagnostics.end());
- StoredDiagnostics.erase(
- StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver,
- StoredDiagnostics.end());
+ stored_diag_afterDriver_begin(), stored_diag_end());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
// Keep track of the preamble we precompiled.
- PreambleFile = FrontendOpts.OutputFile;
+ setPreambleFile(this, FrontendOpts.OutputFile);
NumWarningsInPreamble = getDiagnostics().getNumWarnings();
// Keep track of all of the files that the source manager knows about,
@@ -1480,7 +1627,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
return CreatePaddedMainFileBuffer(NewPreamble.first,
PreambleReservedSize,
- FrontendOpts.Inputs[0].second);
+ FrontendOpts.Inputs[0].File);
}
void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
@@ -1498,15 +1645,28 @@ void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
TopLevelDecls.insert(TopLevelDecls.begin(), Resolved.begin(), Resolved.end());
}
+void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
+ // Steal the created target, context, and preprocessor.
+ TheSema.reset(CI.takeSema());
+ Consumer.reset(CI.takeASTConsumer());
+ Ctx = &CI.getASTContext();
+ PP = &CI.getPreprocessor();
+ CI.setSourceManager(0);
+ CI.setFileManager(0);
+ Target = &CI.getTarget();
+ Reader = CI.getModuleManager();
+}
+
StringRef ASTUnit::getMainFileName() const {
- return Invocation->getFrontendOpts().Inputs[0].second;
+ return Invocation->getFrontendOpts().Inputs[0].File;
}
ASTUnit *ASTUnit::create(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
- llvm::OwningPtr<ASTUnit> AST;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool CaptureDiagnostics) {
+ OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
- ConfigureDiags(Diags, 0, 0, *AST, /*CaptureDiagnostics=*/false);
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->Invocation = CI;
AST->FileSystemOpts = CI->getFileSystemOpts();
@@ -1517,23 +1677,36 @@ ASTUnit *ASTUnit::create(CompilerInvocation *CI,
}
ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
ASTFrontendAction *Action,
- ASTUnit *Unit) {
+ ASTUnit *Unit,
+ bool Persistent,
+ StringRef ResourceFilesPath,
+ bool OnlyLocalDecls,
+ bool CaptureDiagnostics,
+ bool PrecompilePreamble,
+ bool CacheCodeCompletionResults,
+ OwningPtr<ASTUnit> *ErrAST) {
assert(CI && "A CompilerInvocation is required");
- llvm::OwningPtr<ASTUnit> OwnAST;
+ OwningPtr<ASTUnit> OwnAST;
ASTUnit *AST = Unit;
if (!AST) {
// Create the AST unit.
- OwnAST.reset(create(CI, Diags));
+ OwnAST.reset(create(CI, Diags, CaptureDiagnostics));
AST = OwnAST.get();
}
- AST->OnlyLocalDecls = false;
- AST->CaptureDiagnostics = false;
+ if (!ResourceFilesPath.empty()) {
+ // Override the resources path.
+ CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ }
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ if (PrecompilePreamble)
+ AST->PreambleRebuildCounter = 2;
AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
- AST->ShouldCacheCodeCompletionResults = false;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1551,14 +1724,14 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
AST->TargetFeatures = CI->getTargetOpts().Features;
// Create the compiler instance to use for building the AST.
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(CI);
- AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].second;
+ AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
@@ -1579,15 +1752,16 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
"IR inputs not supported here!");
// Configure the various subsystems.
AST->TheSema.reset();
AST->Ctx = 0;
AST->PP = 0;
+ AST->Reader = 0;
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(&AST->getFileManager());
@@ -1597,7 +1771,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
ASTFrontendAction *Act = Action;
- llvm::OwningPtr<TopLevelDeclTrackerAction> TrackerAct;
+ OwningPtr<TopLevelDeclTrackerAction> TrackerAct;
if (!Act) {
TrackerAct.reset(new TopLevelDeclTrackerAction(*AST));
Act = TrackerAct.get();
@@ -1607,21 +1781,28 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
ActCleanup(TrackerAct.get());
- if (!Act->BeginSourceFile(*Clang.get(),
- Clang->getFrontendOpts().Inputs[0].second,
- Clang->getFrontendOpts().Inputs[0].first))
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ AST->transferASTDataFromCompilerInstance(*Clang);
+ if (OwnAST && ErrAST)
+ ErrAST->swap(OwnAST);
+
return 0;
-
+ }
+
+ if (Persistent && !TrackerAct) {
+ Clang->getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(AST->getCurrentTopLevelHashValue()));
+ std::vector<ASTConsumer*> Consumers;
+ if (Clang->hasASTConsumer())
+ Consumers.push_back(Clang->takeASTConsumer());
+ Consumers.push_back(new TopLevelDeclTrackerConsumer(*AST,
+ AST->getCurrentTopLevelHashValue()));
+ Clang->setASTConsumer(new MultiplexConsumer(Consumers));
+ }
Act->Execute();
-
+
// Steal the created target, context, and preprocessor.
- AST->TheSema.reset(Clang->takeSema());
- AST->Consumer.reset(Clang->takeASTConsumer());
- AST->Ctx = &Clang->getASTContext();
- AST->PP = &Clang->getPreprocessor();
- Clang->setSourceManager(0);
- Clang->setFileManager(0);
- AST->Target = &Clang->getTarget();
+ AST->transferASTDataFromCompilerInstance(*Clang);
Act->EndSourceFile();
@@ -1661,15 +1842,14 @@ bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
}
ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
bool OnlyLocalDecls,
bool CaptureDiagnostics,
bool PrecompilePreamble,
TranslationUnitKind TUKind,
- bool CacheCodeCompletionResults,
- bool NestedMacroExpansions) {
+ bool CacheCodeCompletionResults) {
// Create the AST unit.
- llvm::OwningPtr<ASTUnit> AST;
+ OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
@@ -1678,7 +1858,6 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
AST->TUKind = TUKind;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->Invocation = CI;
- AST->NestedMacroExpansions = NestedMacroExpansions;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1692,7 +1871,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
const char **ArgEnd,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
StringRef ResourceFilesPath,
bool OnlyLocalDecls,
bool CaptureDiagnostics,
@@ -1702,7 +1881,9 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
bool PrecompilePreamble,
TranslationUnitKind TUKind,
bool CacheCodeCompletionResults,
- bool NestedMacroExpansions) {
+ bool AllowPCHWithCompilerErrors,
+ bool SkipFunctionBodies,
+ OwningPtr<ASTUnit> *ErrAST) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
@@ -1713,7 +1894,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
- llvm::IntrusiveRefCntPtr<CompilerInvocation> CI;
+ IntrusiveRefCntPtr<CompilerInvocation> CI;
{
@@ -1738,18 +1919,21 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, fname);
}
}
- CI->getPreprocessorOpts().RemappedFilesKeepOriginalName =
- RemappedFilesKeepOriginalName;
+ PreprocessorOptions &PPOpts = CI->getPreprocessorOpts();
+ PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
+ PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors;
// Override the resources path.
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
+
// Create the AST unit.
- llvm::OwningPtr<ASTUnit> AST;
+ OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
ConfigureDiags(Diags, ArgBegin, ArgEnd, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
-
+ Diags = 0; // Zero out now to ease cleanup during crash recovery.
AST->FileSystemOpts = CI->getFileSystemOpts();
AST->FileMgr = new FileManager(AST->FileSystemOpts);
AST->OnlyLocalDecls = OnlyLocalDecls;
@@ -1759,24 +1943,30 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
- AST->NestedMacroExpansions = NestedMacroExpansions;
+ CI = 0; // Zero out now to ease cleanup during crash recovery.
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
ASTUnitCleanup(AST.get());
- llvm::CrashRecoveryContextCleanupRegistrar<CompilerInvocation,
- llvm::CrashRecoveryContextReleaseRefCleanup<CompilerInvocation> >
- CICleanup(CI.getPtr());
- llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
- llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
- DiagCleanup(Diags.getPtr());
- return AST->LoadFromCompilerInvocation(PrecompilePreamble) ? 0 : AST.take();
+ if (AST->LoadFromCompilerInvocation(PrecompilePreamble)) {
+ // Some error occurred, if caller wants to examine diagnostics, pass it the
+ // ASTUnit.
+ if (ErrAST) {
+ AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics);
+ ErrAST->swap(AST);
+ }
+ return 0;
+ }
+
+ return AST.take();
}
bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
if (!Invocation)
return true;
+
+ clearFileLevelDecls();
SimpleTimer ParsingTimer(WantTiming);
ParsingTimer.setOutput("Reparsing " + getMainFileName());
@@ -1808,15 +1998,15 @@ bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
// If we have a preamble file lying around, or if we might try to
// build a precompiled preamble, do so now.
llvm::MemoryBuffer *OverrideMainBuffer = 0;
- if (!PreambleFile.empty() || PreambleRebuildCounter > 0)
+ if (!getPreambleFile(this).empty() || PreambleRebuildCounter > 0)
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(*Invocation);
// Clear out the diagnostics state.
- if (!OverrideMainBuffer) {
- getDiagnostics().Reset();
- ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
- }
-
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+ if (OverrideMainBuffer)
+ getDiagnostics().setNumWarnings(NumWarningsInPreamble);
+
// Parse the sources
bool Result = Parse(OverrideMainBuffer);
@@ -1826,9 +2016,9 @@ bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
CacheCodeCompletionResults();
- // We now need to clear out the completion allocator for
- // clang_getCursorCompletionString; it'll be recreated if necessary.
- CursorCompletionAllocator = 0;
+ // We now need to clear out the completion info related to this translation
+ // unit; it'll be recreated if necessary.
+ CCTUInfo.reset();
return Result;
}
@@ -1870,7 +2060,7 @@ namespace {
| (1LL << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
| (1LL << (CodeCompletionContext::CCC_Recovery - 1));
- if (AST.getASTContext().getLangOptions().CPlusPlus)
+ if (AST.getASTContext().getLangOpts().CPlusPlus)
NormalContexts |= (1LL << (CodeCompletionContext::CCC_EnumTag - 1))
| (1LL << (CodeCompletionContext::CCC_UnionTag - 1))
| (1LL << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
@@ -1890,6 +2080,10 @@ namespace {
virtual CodeCompletionAllocator &getAllocator() {
return Next.getAllocator();
}
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() {
+ return Next.getCodeCompletionTUInfo();
+ }
};
}
@@ -1961,7 +2155,7 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
Decl::IDNS_Namespace | Decl::IDNS_Ordinary |
Decl::IDNS_NonMemberOperator);
- if (Ctx.getLangOptions().CPlusPlus)
+ if (Ctx.getLangOpts().CPlusPlus)
HiddenIDNS |= Decl::IDNS_Tag;
Hiding = (IDNS & HiddenIDNS);
}
@@ -2021,7 +2215,7 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
if (!Context.getPreferredType().isNull()) {
if (C->Kind == CXCursor_MacroDefinition) {
Priority = getMacroUsagePriority(C->Completion->getTypedText(),
- S.getLangOptions(),
+ S.getLangOpts(),
Context.getPreferredType()->isAnyPointerType());
} else if (C->Type) {
CanQualType Expected
@@ -2047,8 +2241,8 @@ void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
// Create a new code-completion string that just contains the
// macro name, without its arguments.
- CodeCompletionBuilder Builder(getAllocator(), CCP_CodePattern,
- C->Availability);
+ CodeCompletionBuilder Builder(getAllocator(), getCodeCompletionTUInfo(),
+ CCP_CodePattern, C->Availability);
Builder.AddTypedTextChunk(C->Completion->getTypedText());
CursorKind = CXCursor_NotImplemented;
Priority = CCP_CodePattern;
@@ -2089,7 +2283,7 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
CompletionTimer.setOutput("Code completion @ " + File + ":" +
Twine(Line) + ":" + Twine(Column));
- llvm::IntrusiveRefCntPtr<CompilerInvocation>
+ IntrusiveRefCntPtr<CompilerInvocation>
CCInvocation(new CompilerInvocation(*Invocation));
FrontendOptions &FrontendOpts = CCInvocation->getFrontendOpts();
@@ -2105,16 +2299,16 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
FrontendOpts.CodeCompletionAt.Column = Column;
// Set the language options appropriately.
- LangOpts = CCInvocation->getLangOpts();
+ LangOpts = *CCInvocation->getLangOpts();
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
Clang->setInvocation(&*CCInvocation);
- OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].second;
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
// Set up diagnostics, capturing any diagnostics produced.
Clang->setDiagnostics(&Diag);
@@ -2140,9 +2334,9 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_AST &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
"FIXME: AST inputs not yet supported here!");
- assert(Clang->getFrontendOpts().Inputs[0].first != IK_LLVM_IR &&
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
"IR inputs not support here!");
@@ -2174,12 +2368,14 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
FrontendOpts.ShowGlobalSymbolsInCodeCompletion);
Clang->setCodeCompletionConsumer(AugmentedConsumer);
+ Clang->getFrontendOpts().SkipFunctionBodies = true;
+
// If we have a precompiled preamble, try to use it. We only allow
// the use of the precompiled preamble if we're if the completion
// point is within the main file, after the end of the precompiled
// preamble.
llvm::MemoryBuffer *OverrideMainBuffer = 0;
- if (!PreambleFile.empty()) {
+ if (!getPreambleFile(this).empty()) {
using llvm::sys::FileStatus;
llvm::sys::PathWithStatus CompleteFilePath(File);
llvm::sys::PathWithStatus MainPath(OriginalSourceFile);
@@ -2196,14 +2392,14 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// make that override happen and introduce the preamble.
PreprocessorOpts.DisableStatCache = true;
StoredDiagnostics.insert(StoredDiagnostics.end(),
- this->StoredDiagnostics.begin(),
- this->StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver);
+ stored_diag_begin(),
+ stored_diag_afterDriver_begin());
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
PreprocessorOpts.PrecompiledPreambleBytes.second
= PreambleEndsAtStartOfLine;
- PreprocessorOpts.ImplicitPCHInclude = PreambleFile;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
PreprocessorOpts.DisablePCHValidation = true;
OwnedBuffers.push_back(OverrideMainBuffer);
@@ -2215,12 +2411,11 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
// Disable the preprocessing record
PreprocessorOpts.DetailedRecord = false;
- llvm::OwningPtr<SyntaxOnlyAction> Act;
+ OwningPtr<SyntaxOnlyAction> Act;
Act.reset(new SyntaxOnlyAction);
- if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0].second,
- Clang->getFrontendOpts().Inputs[0].first)) {
+ if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
if (OverrideMainBuffer) {
- std::string ModName = PreambleFile;
+ std::string ModName = getPreambleFile(this);
TranslateStoredDiagnostics(Clang->getModuleManager(), ModName,
getSourceManager(), PreambleDiagnostics,
StoredDiagnostics);
@@ -2228,15 +2423,14 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
Act->Execute();
Act->EndSourceFile();
}
+
+ checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
}
CXSaveError ASTUnit::Save(StringRef File) {
- if (getDiagnostics().hasUnrecoverableErrorOccurred())
- return CXSaveError_TranslationErrors;
-
// Write to a temporary file and later rename it to the actual file, to avoid
// possible race conditions.
- llvm::SmallString<128> TempPath;
+ SmallString<128> TempPath;
TempPath = File;
TempPath += "-%%%%%%%%";
int fd;
@@ -2250,10 +2444,12 @@ CXSaveError ASTUnit::Save(StringRef File) {
serialize(Out);
Out.close();
- if (Out.has_error())
+ if (Out.has_error()) {
+ Out.clear_error();
return CXSaveError_Unknown;
+ }
- if (llvm::error_code ec = llvm::sys::fs::rename(TempPath.str(), File)) {
+ if (llvm::sys::fs::rename(TempPath.str(), File)) {
bool exists;
llvm::sys::fs::remove(TempPath.str(), exists);
return CXSaveError_Unknown;
@@ -2263,14 +2459,13 @@ CXSaveError ASTUnit::Save(StringRef File) {
}
bool ASTUnit::serialize(raw_ostream &OS) {
- if (getDiagnostics().hasErrorOccurred())
- return true;
+ bool hasErrors = getDiagnostics().hasErrorOccurred();
- std::vector<unsigned char> Buffer;
+ SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
ASTWriter Writer(Stream);
// FIXME: Handle modules
- Writer.WriteAST(getSema(), 0, std::string(), /*IsModule=*/false, "");
+ Writer.WriteAST(getSema(), 0, std::string(), 0, "", hasErrors);
// Write the generated bitstream to "Out".
if (!Buffer.empty())
@@ -2303,7 +2498,7 @@ void ASTUnit::TranslateStoredDiagnostics(
SmallVector<StoredDiagnostic, 4> Result;
Result.reserve(Diags.size());
assert(MMan && "Don't have a module manager");
- serialization::Module *Mod = MMan->ModuleMgr.lookup(ModName);
+ serialization::ModuleFile *Mod = MMan->ModuleMgr.lookup(ModName);
assert(Mod && "Don't have preamble module");
SLocRemap &Remap = Mod->SLocRemap;
for (unsigned I = 0, N = Diags.size(); I != N; ++I) {
@@ -2347,6 +2542,95 @@ void ASTUnit::TranslateStoredDiagnostics(
Result.swap(Out);
}
+static inline bool compLocDecl(std::pair<unsigned, Decl *> L,
+ std::pair<unsigned, Decl *> R) {
+ return L.first < R.first;
+}
+
+void ASTUnit::addFileLevelDecl(Decl *D) {
+ assert(D);
+
+ // We only care about local declarations.
+ if (D->isFromASTFile())
+ return;
+
+ SourceManager &SM = *SourceMgr;
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid() || !SM.isLocalSourceLocation(Loc))
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+
+ LocDeclsTy *&Decls = FileDecls[FID];
+ if (!Decls)
+ Decls = new LocDeclsTy();
+
+ std::pair<unsigned, Decl *> LocDecl(Offset, D);
+
+ if (Decls->empty() || Decls->back().first <= Offset) {
+ Decls->push_back(LocDecl);
+ return;
+ }
+
+ LocDeclsTy::iterator
+ I = std::upper_bound(Decls->begin(), Decls->end(), LocDecl, compLocDecl);
+
+ Decls->insert(I, LocDecl);
+}
+
+void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ if (File.isInvalid())
+ return;
+
+ if (SourceMgr->isLoadedFileID(File)) {
+ assert(Ctx->getExternalSource() && "No external source!");
+ return Ctx->getExternalSource()->FindFileRegionDecls(File, Offset, Length,
+ Decls);
+ }
+
+ FileDeclsTy::iterator I = FileDecls.find(File);
+ if (I == FileDecls.end())
+ return;
+
+ LocDeclsTy &LocDecls = *I->second;
+ if (LocDecls.empty())
+ return;
+
+ LocDeclsTy::iterator
+ BeginIt = std::lower_bound(LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset, (Decl*)0), compLocDecl);
+ if (BeginIt != LocDecls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != LocDecls.begin() &&
+ BeginIt->second->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ LocDeclsTy::iterator
+ EndIt = std::upper_bound(LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset+Length, (Decl*)0),
+ compLocDecl);
+ if (EndIt != LocDecls.end())
+ ++EndIt;
+
+ for (LocDeclsTy::iterator DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(DIt->second);
+}
+
SourceLocation ASTUnit::getLocation(const FileEntry *File,
unsigned Line, unsigned Col) const {
const SourceManager &SM = getSourceManager();
@@ -2403,6 +2687,50 @@ SourceLocation ASTUnit::mapLocationToPreamble(SourceLocation Loc) {
return Loc;
}
+bool ASTUnit::isInPreambleFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+bool ASTUnit::isInMainFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+SourceLocation ASTUnit::getEndOfPreambleFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForEndOfFile(FID);
+}
+
+SourceLocation ASTUnit::getStartOfMainFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForStartOfFile(FID);
+}
+
void ASTUnit::PreambleData::countLines() const {
NumLines = 0;
if (empty())
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
index 8195445..58a6b8d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
@@ -467,7 +467,7 @@ void PTHWriter::GeneratePTH(const std::string &MainFile) {
// Iterate over all the files in SourceManager. Create a lexer
// for each file and cache the tokens.
SourceManager &SM = PP.getSourceManager();
- const LangOptions &LOpts = PP.getLangOptions();
+ const LangOptions &LOpts = PP.getLangOpts();
for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
E = SM.fileinfo_end(); I != E; ++I) {
@@ -540,7 +540,7 @@ void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) {
// Get the name of the main file.
const SourceManager &SrcMgr = PP.getSourceManager();
const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
- llvm::SmallString<128> MainFilePath(MainFile->getName());
+ SmallString<128> MainFilePath(MainFile->getName());
llvm::sys::fs::make_absolute(MainFilePath);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
new file mode 100644
index 0000000..c1d3db8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
@@ -0,0 +1,14 @@
+//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ChainedDiagnosticConsumer.h"
+
+using namespace clang;
+
+void ChainedDiagnosticConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ChainedIncludesSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
index 5fcf11a..dbb06bd 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ChainedIncludesSource.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -12,12 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ChainedIncludesSource.h"
-#include "clang/Serialization/ASTReader.h"
-#include "clang/Serialization/ASTWriter.h"
+#include "clang/Frontend/ChainedIncludesSource.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/ASTUnit.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
#include "clang/Parse/ParseAST.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/TargetInfo.h"
@@ -31,7 +31,7 @@ static ASTReader *createASTReader(CompilerInstance &CI,
SmallVector<std::string, 4> &bufNames,
ASTDeserializationListener *deserialListener = 0) {
Preprocessor &PP = CI.getPreprocessor();
- llvm::OwningPtr<ASTReader> Reader;
+ OwningPtr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, CI.getASTContext(), /*isysroot=*/"",
/*DisableValidation=*/true));
for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
@@ -62,15 +62,15 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
std::vector<std::string> &includes = CI.getPreprocessorOpts().ChainedIncludes;
assert(!includes.empty() && "No '-chain-include' in options!");
- llvm::OwningPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
- InputKind IK = CI.getFrontendOpts().Inputs[0].first;
+ OwningPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
+ InputKind IK = CI.getFrontendOpts().Inputs[0].Kind;
SmallVector<llvm::MemoryBuffer *, 4> serialBufs;
SmallVector<std::string, 4> serialBufNames;
for (unsigned i = 0, e = includes.size(); i != e; ++i) {
bool firstInclude = (i == 0);
- llvm::OwningPtr<CompilerInvocation> CInvok;
+ OwningPtr<CompilerInvocation> CInvok;
CInvok.reset(new CompilerInvocation(CI.getInvocation()));
CInvok->getPreprocessorOpts().ChainedIncludes.clear();
@@ -82,15 +82,16 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
CInvok->getPreprocessorOpts().Macros.clear();
CInvok->getFrontendOpts().Inputs.clear();
- CInvok->getFrontendOpts().Inputs.push_back(std::make_pair(IK, includes[i]));
+ CInvok->getFrontendOpts().Inputs.push_back(FrontendInputFile(includes[i],
+ IK));
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(llvm::errs(), DiagnosticOptions());
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine(DiagID, DiagClient));
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
Clang->setInvocation(CInvok.take());
Clang->setDiagnostics(Diags.getPtr());
Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
@@ -104,9 +105,9 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
SmallVector<char, 256> serialAST;
llvm::raw_svector_ostream OS(serialAST);
- llvm::OwningPtr<ASTConsumer> consumer;
- consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-",
- /*IsModule=*/false, /*isysroot=*/"", &OS));
+ OwningPtr<ASTConsumer> consumer;
+ consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-", 0,
+ /*isysroot=*/"", &OS));
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
Clang->setASTConsumer(consumer.take());
@@ -115,7 +116,7 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
if (firstInclude) {
Preprocessor &PP = Clang->getPreprocessor();
PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
- PP.getLangOptions());
+ PP.getLangOpts());
} else {
assert(!serialBufs.empty());
SmallVector<llvm::MemoryBuffer *, 4> bufs;
@@ -131,7 +132,7 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
serialBufNames.push_back(pchName);
- llvm::OwningPtr<ExternalASTSource> Reader;
+ OwningPtr<ExternalASTSource> Reader;
Reader.reset(createASTReader(*Clang, pchName, bufs, serialBufNames,
Clang->getASTConsumer().GetASTDeserializationListener()));
@@ -155,7 +156,7 @@ ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
assert(!serialBufs.empty());
std::string pchName = includes.back() + ".pch-final";
serialBufNames.push_back(pchName);
- llvm::OwningPtr<ASTReader> Reader;
+ OwningPtr<ASTReader> Reader;
Reader.reset(createASTReader(CI, pchName, serialBufs, serialBufNames));
if (!Reader)
return 0;
@@ -230,9 +231,8 @@ void ChainedIncludesSource::InitializeSema(Sema &S) {
void ChainedIncludesSource::ForgetSema() {
return getFinalReader().ForgetSema();
}
-std::pair<ObjCMethodList,ObjCMethodList>
-ChainedIncludesSource::ReadMethodPool(Selector Sel) {
- return getFinalReader().ReadMethodPool(Sel);
+void ChainedIncludesSource::ReadMethodPool(Selector Sel) {
+ getFinalReader().ReadMethodPool(Sel);
}
bool ChainedIncludesSource::LookupUnqualified(LookupResult &R, Scope *S) {
return getFinalReader().LookupUnqualified(R, S);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
index 5526487..cab6b90 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -11,6 +11,7 @@
#include "clang/Sema/Sema.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
@@ -24,6 +25,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Frontend/Utils.h"
@@ -35,6 +37,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
@@ -42,18 +45,6 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Config/config.h"
-// Support for FileLockManager
-#include <fstream>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#if LLVM_ON_WIN32
-#include <windows.h>
-#endif
-#if LLVM_ON_UNIX
-#include <unistd.h>
-#endif
-
using namespace clang;
CompilerInstance::CompilerInstance()
@@ -97,6 +88,7 @@ void CompilerInstance::setASTConsumer(ASTConsumer *Value) {
void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
CompletionConsumer.reset(Value);
+ getFrontendOpts().SkipFunctionBodies = Value != 0;
}
// Diagnostics
@@ -104,7 +96,7 @@ static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
unsigned argc, const char* const *argv,
DiagnosticsEngine &Diags) {
std::string ErrorInfo;
- llvm::OwningPtr<raw_ostream> OS(
+ OwningPtr<raw_ostream> OS(
new llvm::raw_fd_ostream(DiagOpts.DumpBuildInformation.c_str(), ErrorInfo));
if (!ErrorInfo.empty()) {
Diags.Report(diag::err_fe_unable_to_open_logfile)
@@ -153,6 +145,28 @@ static void SetUpDiagnosticLog(const DiagnosticOptions &DiagOpts,
Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
}
+static void SetupSerializedDiagnostics(const DiagnosticOptions &DiagOpts,
+ DiagnosticsEngine &Diags,
+ StringRef OutputFile) {
+ std::string ErrorInfo;
+ OwningPtr<llvm::raw_fd_ostream> OS;
+ OS.reset(new llvm::raw_fd_ostream(OutputFile.str().c_str(), ErrorInfo,
+ llvm::raw_fd_ostream::F_Binary));
+
+ if (!ErrorInfo.empty()) {
+ Diags.Report(diag::warn_fe_serialized_diag_failure)
+ << OutputFile << ErrorInfo;
+ return;
+ }
+
+ DiagnosticConsumer *SerializedConsumer =
+ clang::serialized_diags::create(OS.take(), DiagOpts);
+
+
+ Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(),
+ SerializedConsumer));
+}
+
void CompilerInstance::createDiagnostics(int Argc, const char* const *Argv,
DiagnosticConsumer *Client,
bool ShouldOwnClient,
@@ -162,15 +176,15 @@ void CompilerInstance::createDiagnostics(int Argc, const char* const *Argv,
&getCodeGenOpts());
}
-llvm::IntrusiveRefCntPtr<DiagnosticsEngine>
+IntrusiveRefCntPtr<DiagnosticsEngine>
CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
int Argc, const char* const *Argv,
DiagnosticConsumer *Client,
bool ShouldOwnClient,
bool ShouldCloneClient,
const CodeGenOptions *CodeGenOpts) {
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine>
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine>
Diags(new DiagnosticsEngine(DiagID));
// Create the diagnostic client for reporting errors or for
@@ -194,6 +208,10 @@ CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
if (!Opts.DumpBuildInformation.empty())
SetUpBuildDumpLog(Opts, Argc, Argv, *Diags);
+ if (!Opts.DiagnosticSerializationFile.empty())
+ SetupSerializedDiagnostics(Opts, *Diags,
+ Opts.DiagnosticSerializationFile);
+
// Configure our handling of diagnostics.
ProcessWarningOptions(*Diags, Opts);
@@ -223,7 +241,10 @@ void CompilerInstance::createPreprocessor() {
PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
// Create the Preprocessor.
- HeaderSearch *HeaderInfo = new HeaderSearch(getFileManager());
+ HeaderSearch *HeaderInfo = new HeaderSearch(getFileManager(),
+ getDiagnostics(),
+ getLangOpts(),
+ &getTarget());
PP = new Preprocessor(getDiagnostics(), getLangOpts(), &getTarget(),
getSourceManager(), *HeaderInfo, *this, PTHMgr,
/*OwnsHeaderSearch=*/true);
@@ -237,28 +258,28 @@ void CompilerInstance::createPreprocessor() {
}
if (PPOpts.DetailedRecord)
- PP->createPreprocessingRecord(
- PPOpts.DetailedRecordIncludesNestedMacroExpansions);
+ PP->createPreprocessingRecord(PPOpts.DetailedRecordConditionalDirectives);
InitializePreprocessor(*PP, PPOpts, getHeaderSearchOpts(), getFrontendOpts());
// Set up the module path, including the hash for the
// module-creation options.
- llvm::SmallString<256> SpecificModuleCache(
+ SmallString<256> SpecificModuleCache(
getHeaderSearchOpts().ModuleCachePath);
if (!getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache,
getInvocation().getModuleHash());
- PP->getHeaderSearchInfo().configureModules(SpecificModuleCache,
- getPreprocessorOpts().ModuleBuildPath.empty()
- ? std::string()
- : getPreprocessorOpts().ModuleBuildPath.back());
+ PP->getHeaderSearchInfo().setModuleCachePath(SpecificModuleCache);
// Handle generating dependencies, if requested.
const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
if (!DepOpts.OutputFile.empty())
AttachDependencyFileGen(*PP, DepOpts);
+ if (!DepOpts.DOTOutputFile.empty())
+ AttachDependencyGraphGen(*PP, DepOpts.DOTOutputFile,
+ getHeaderSearchOpts().Sysroot);
+
// Handle generating header include information, if requested.
if (DepOpts.ShowHeaderIncludes)
AttachHeaderIncludeGen(*PP);
@@ -286,12 +307,14 @@ void CompilerInstance::createASTContext() {
void CompilerInstance::createPCHExternalASTSource(StringRef Path,
bool DisablePCHValidation,
bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
void *DeserializationListener){
- llvm::OwningPtr<ExternalASTSource> Source;
+ OwningPtr<ExternalASTSource> Source;
bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
DisablePCHValidation,
DisableStatCache,
+ AllowPCHWithCompilerErrors,
getPreprocessor(), getASTContext(),
DeserializationListener,
Preamble));
@@ -304,14 +327,16 @@ CompilerInstance::createPCHExternalASTSource(StringRef Path,
const std::string &Sysroot,
bool DisablePCHValidation,
bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
Preprocessor &PP,
ASTContext &Context,
void *DeserializationListener,
bool Preamble) {
- llvm::OwningPtr<ASTReader> Reader;
+ OwningPtr<ASTReader> Reader;
Reader.reset(new ASTReader(PP, Context,
Sysroot.empty() ? "" : Sysroot.c_str(),
- DisablePCHValidation, DisableStatCache));
+ DisablePCHValidation, DisableStatCache,
+ AllowPCHWithCompilerErrors));
Reader->setDeserializationListener(
static_cast<ASTDeserializationListener *>(DeserializationListener));
@@ -359,7 +384,7 @@ static bool EnableCodeCompletion(Preprocessor &PP,
void CompilerInstance::createCodeCompletionConsumer() {
const ParsedSourceLocation &Loc = getFrontendOpts().CodeCompletionAt;
if (!CompletionConsumer) {
- CompletionConsumer.reset(
+ setCodeCompletionConsumer(
createCodeCompletionConsumer(getPreprocessor(),
Loc.FileName, Loc.Line, Loc.Column,
getFrontendOpts().ShowMacrosInCodeCompletion,
@@ -370,14 +395,14 @@ void CompilerInstance::createCodeCompletionConsumer() {
return;
} else if (EnableCodeCompletion(getPreprocessor(), Loc.FileName,
Loc.Line, Loc.Column)) {
- CompletionConsumer.reset();
+ setCodeCompletionConsumer(0);
return;
}
if (CompletionConsumer->isOutputBinary() &&
llvm::sys::Program::ChangeStdoutToBinary()) {
getPreprocessor().getDiagnostics().Report(diag::err_fe_stdout_binary);
- CompletionConsumer.reset();
+ setCodeCompletionConsumer(0);
}
}
@@ -424,7 +449,7 @@ void CompilerInstance::clearOutputFiles(bool EraseFiles) {
bool existed;
llvm::sys::fs::remove(it->TempFilename, existed);
} else {
- llvm::SmallString<128> NewOutFile(it->Filename);
+ SmallString<128> NewOutFile(it->Filename);
// If '-working-directory' was passed, the output filename should be
// relative to that.
@@ -450,7 +475,8 @@ CompilerInstance::createDefaultOutputFile(bool Binary,
StringRef InFile,
StringRef Extension) {
return createOutputFile(getFrontendOpts().OutputFile, Binary,
- /*RemoveFileOnSignal=*/true, InFile, Extension);
+ /*RemoveFileOnSignal=*/true, InFile, Extension,
+ /*UseTemporary=*/true);
}
llvm::raw_fd_ostream *
@@ -458,12 +484,14 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
bool Binary, bool RemoveFileOnSignal,
StringRef InFile,
StringRef Extension,
- bool UseTemporary) {
+ bool UseTemporary,
+ bool CreateMissingDirectories) {
std::string Error, OutputPathName, TempPathName;
llvm::raw_fd_ostream *OS = createOutputFile(OutputPath, Error, Binary,
RemoveFileOnSignal,
InFile, Extension,
UseTemporary,
+ CreateMissingDirectories,
&OutputPathName,
&TempPathName);
if (!OS) {
@@ -488,8 +516,12 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
StringRef InFile,
StringRef Extension,
bool UseTemporary,
+ bool CreateMissingDirectories,
std::string *ResultPathName,
std::string *TempPathName) {
+ assert((!CreateMissingDirectories || UseTemporary) &&
+ "CreateMissingDirectories is only allowed when using temporary files");
+
std::string OutFile, TempFile;
if (!OutputPath.empty()) {
OutFile = OutputPath;
@@ -504,18 +536,26 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
OutFile = "-";
}
- llvm::OwningPtr<llvm::raw_fd_ostream> OS;
+ OwningPtr<llvm::raw_fd_ostream> OS;
std::string OSFile;
if (UseTemporary && OutFile != "-") {
- llvm::sys::Path OutPath(OutFile);
- // Only create the temporary if we can actually write to OutPath, otherwise
- // we want to fail early.
+ // Only create the temporary if the parent directory exists (or create
+ // missing directories is true) and we can actually write to OutPath,
+ // otherwise we want to fail early.
+ SmallString<256> AbsPath(OutputPath);
+ llvm::sys::fs::make_absolute(AbsPath);
+ llvm::sys::Path OutPath(AbsPath);
+ bool ParentExists = false;
+ if (llvm::sys::fs::exists(llvm::sys::path::parent_path(AbsPath.str()),
+ ParentExists))
+ ParentExists = false;
bool Exists;
- if ((llvm::sys::fs::exists(OutPath.str(), Exists) || !Exists) ||
- (OutPath.isRegularFile() && OutPath.canWrite())) {
+ if ((CreateMissingDirectories || ParentExists) &&
+ ((llvm::sys::fs::exists(AbsPath.str(), Exists) || !Exists) ||
+ (OutPath.isRegularFile() && OutPath.canWrite()))) {
// Create a temporary file.
- llvm::SmallString<128> TempPath;
+ SmallString<128> TempPath;
TempPath = OutFile;
TempPath += "-%%%%%%%%";
int fd;
@@ -550,12 +590,15 @@ CompilerInstance::createOutputFile(StringRef OutputPath,
// Initialization Utilities
-bool CompilerInstance::InitializeSourceManager(StringRef InputFile) {
- return InitializeSourceManager(InputFile, getDiagnostics(), getFileManager(),
- getSourceManager(), getFrontendOpts());
+bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind){
+ return InitializeSourceManager(InputFile, Kind, getDiagnostics(),
+ getFileManager(), getSourceManager(),
+ getFrontendOpts());
}
bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind,
DiagnosticsEngine &Diags,
FileManager &FileMgr,
SourceManager &SourceMgr,
@@ -567,9 +610,9 @@ bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
Diags.Report(diag::err_fe_error_reading) << InputFile;
return false;
}
- SourceMgr.createMainFileID(File);
+ SourceMgr.createMainFileID(File, Kind);
} else {
- llvm::OwningPtr<llvm::MemoryBuffer> SB;
+ OwningPtr<llvm::MemoryBuffer> SB;
if (llvm::MemoryBuffer::getSTDIN(SB)) {
// FIXME: Give ec.message() in this diag.
Diags.Report(diag::err_fe_error_reading_stdin);
@@ -577,7 +620,7 @@ bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
}
const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
SB->getBufferSize(), 0);
- SourceMgr.createMainFileID(File);
+ SourceMgr.createMainFileID(File, Kind);
SourceMgr.overrideFileContents(File, SB.take());
}
@@ -612,7 +655,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (getHeaderSearchOpts().Verbose)
OS << "clang -cc1 version " CLANG_VERSION_STRING
<< " based upon " << PACKAGE_STRING
- << " hosted on " << llvm::sys::getHostTriple() << "\n";
+ << " default target " << llvm::sys::getDefaultTargetTriple() << "\n";
if (getFrontendOpts().ShowTimers)
createFrontendTimer();
@@ -621,18 +664,19 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
llvm::EnableStatistics();
for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
- const std::string &InFile = getFrontendOpts().Inputs[i].second;
-
// Reset the ID tables if we are reusing the SourceManager.
if (hasSourceManager())
getSourceManager().clearIDTables();
- if (Act.BeginSourceFile(*this, InFile, getFrontendOpts().Inputs[i].first)) {
+ if (Act.BeginSourceFile(*this, getFrontendOpts().Inputs[i])) {
Act.Execute();
Act.EndSourceFile();
}
}
+ // Notify the diagnostic client that all files were processed.
+ getDiagnostics().getClient()->finish();
+
if (getDiagnosticOpts().ShowCarets) {
// We can have multiple diagnostics sharing one diagnostic client.
// Get the total number of warnings/errors from the client.
@@ -670,320 +714,105 @@ static InputKind getSourceInputKindFromOptions(const LangOptions &LangOpts) {
}
namespace {
- struct CompileModuleData {
+ struct CompileModuleMapData {
CompilerInstance &Instance;
- GeneratePCHAction &CreateModuleAction;
+ GenerateModuleAction &CreateModuleAction;
};
}
/// \brief Helper function that executes the module-generating action under
/// a crash recovery context.
-static void doCompileModule(void *UserData) {
- CompileModuleData &Data = *reinterpret_cast<CompileModuleData *>(UserData);
+static void doCompileMapModule(void *UserData) {
+ CompileModuleMapData &Data
+ = *reinterpret_cast<CompileModuleMapData *>(UserData);
Data.Instance.ExecuteAction(Data.CreateModuleAction);
}
-namespace {
- /// \brief Class that manages the creation of a lock file to aid
- /// implicit coordination between different processes.
- ///
- /// The implicit coordination works by creating a ".lock" file alongside
- /// the file that we're coordinating for, using the atomicity of the file
- /// system to ensure that only a single process can create that ".lock" file.
- /// When the lock file is removed, the owning process has finished the
- /// operation.
- class LockFileManager {
- public:
- /// \brief Describes the state of a lock file.
- enum LockFileState {
- /// \brief The lock file has been created and is owned by this instance
- /// of the object.
- LFS_Owned,
- /// \brief The lock file already exists and is owned by some other
- /// instance.
- LFS_Shared,
- /// \brief An error occurred while trying to create or find the lock
- /// file.
- LFS_Error
- };
-
- private:
- llvm::SmallString<128> LockFileName;
- llvm::SmallString<128> UniqueLockFileName;
-
- llvm::Optional<std::pair<std::string, int> > Owner;
- llvm::Optional<llvm::error_code> Error;
-
- LockFileManager(const LockFileManager &);
- LockFileManager &operator=(const LockFileManager &);
-
- static llvm::Optional<std::pair<std::string, int> >
- readLockFile(StringRef LockFileName);
-
- static bool processStillExecuting(StringRef Hostname, int PID);
-
- public:
-
- LockFileManager(StringRef FileName);
- ~LockFileManager();
-
- /// \brief Determine the state of the lock file.
- LockFileState getState() const;
-
- operator LockFileState() const { return getState(); }
-
- /// \brief For a shared lock, wait until the owner releases the lock.
- void waitForUnlock();
- };
-}
-
-/// \brief Attempt to read the lock file with the given name, if it exists.
-///
-/// \param LockFileName The name of the lock file to read.
-///
-/// \returns The process ID of the process that owns this lock file
-llvm::Optional<std::pair<std::string, int> >
-LockFileManager::readLockFile(StringRef LockFileName) {
- // Check whether the lock file exists. If not, clearly there's nothing
- // to read, so we just return.
- bool Exists = false;
- if (llvm::sys::fs::exists(LockFileName, Exists) || !Exists)
- return llvm::Optional<std::pair<std::string, int> >();
-
- // Read the owning host and PID out of the lock file. If it appears that the
- // owning process is dead, the lock file is invalid.
- int PID = 0;
- std::string Hostname;
- std::ifstream Input(LockFileName.str().c_str());
- if (Input >> Hostname >> PID && PID > 0 &&
- processStillExecuting(Hostname, PID))
- return std::make_pair(Hostname, PID);
-
- // Delete the lock file. It's invalid anyway.
- bool Existed;
- llvm::sys::fs::remove(LockFileName, Existed);
- return llvm::Optional<std::pair<std::string, int> >();
-}
-
-bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) {
-#if LLVM_ON_UNIX
- char MyHostname[256];
- MyHostname[255] = 0;
- MyHostname[0] = 0;
- gethostname(MyHostname, 255);
- // Check whether the process is dead. If so, we're done.
- if (MyHostname == Hostname && getsid(PID) == -1 && errno == ESRCH)
- return false;
-#endif
-
- return true;
-}
-
-LockFileManager::LockFileManager(StringRef FileName)
-{
- LockFileName = FileName;
- LockFileName += ".lock";
-
- // If the lock file already exists, don't bother to try to create our own
- // lock file; it won't work anyway. Just figure out who owns this lock file.
- if ((Owner = readLockFile(LockFileName)))
- return;
-
- // Create a lock file that is unique to this instance.
- UniqueLockFileName = LockFileName;
- UniqueLockFileName += "-%%%%%%%%";
- int UniqueLockFileID;
- if (llvm::error_code EC
- = llvm::sys::fs::unique_file(UniqueLockFileName.str(),
- UniqueLockFileID,
- UniqueLockFileName,
- /*makeAbsolute=*/false)) {
- Error = EC;
- return;
- }
-
- // Write our process ID to our unique lock file.
- {
- llvm::raw_fd_ostream Out(UniqueLockFileID, /*shouldClose=*/true);
-
-#if LLVM_ON_UNIX
- // FIXME: move getpid() call into LLVM
- char hostname[256];
- hostname[255] = 0;
- hostname[0] = 0;
- gethostname(hostname, 255);
- Out << hostname << ' ' << getpid();
-#else
- Out << "localhost 1";
-#endif
- Out.close();
-
- if (Out.has_error()) {
- // We failed to write out PID, so make up an excuse, remove the
- // unique lock file, and fail.
- Error = llvm::make_error_code(llvm::errc::no_space_on_device);
- bool Existed;
- llvm::sys::fs::remove(UniqueLockFileName.c_str(), Existed);
- return;
- }
- }
-
- // Create a hard link from the lock file name. If this succeeds, we're done.
- llvm::error_code EC
- = llvm::sys::fs::create_hard_link(UniqueLockFileName.str(),
- LockFileName.str());
- if (EC == llvm::errc::success)
- return;
-
- // Creating the hard link failed.
-
-#ifdef LLVM_ON_UNIX
- // The creation of the hard link may appear to fail, but if stat'ing the
- // unique file returns a link count of 2, then we can still declare success.
- struct stat StatBuf;
- if (stat(UniqueLockFileName.c_str(), &StatBuf) == 0 &&
- StatBuf.st_nlink == 2)
- return;
-#endif
-
- // Someone else managed to create the lock file first. Wipe out our unique
- // lock file (it's useless now) and read the process ID from the lock file.
- bool Existed;
- llvm::sys::fs::remove(UniqueLockFileName.str(), Existed);
- if ((Owner = readLockFile(LockFileName)))
- return;
-
- // There is a lock file that nobody owns; try to clean it up and report
- // an error.
- llvm::sys::fs::remove(LockFileName.str(), Existed);
- Error = EC;
-}
-
-LockFileManager::LockFileState LockFileManager::getState() const {
- if (Owner)
- return LFS_Shared;
-
- if (Error)
- return LFS_Error;
-
- return LFS_Owned;
-}
-
-LockFileManager::~LockFileManager() {
- if (getState() != LFS_Owned)
- return;
-
- // Since we own the lock, remove the lock file and our own unique lock file.
- bool Existed;
- llvm::sys::fs::remove(LockFileName.str(), Existed);
- llvm::sys::fs::remove(UniqueLockFileName.str(), Existed);
-}
-
-void LockFileManager::waitForUnlock() {
- if (getState() != LFS_Shared)
- return;
-
-#if LLVM_ON_WIN32
- unsigned long Interval = 1;
-#else
- struct timespec Interval;
- Interval.tv_sec = 0;
- Interval.tv_nsec = 1000000;
-#endif
- // Don't wait more than an hour for the file to appear.
- const unsigned MaxSeconds = 3600;
- do {
- // Sleep for the designated interval, to allow the owning process time to
- // finish up and
- // FIXME: Should we hook in to system APIs to get a notification when the
- // lock file is deleted?
-#if LLVM_ON_WIN32
- Sleep(Interval);
-#else
- nanosleep(&Interval, NULL);
-#endif
- // If the file no longer exists, we're done.
- bool Exists = false;
- if (!llvm::sys::fs::exists(LockFileName.str(), Exists) && !Exists)
- return;
-
- if (!processStillExecuting((*Owner).first, (*Owner).second))
- return;
-
- // Exponentially increase the time we wait for the lock to be removed.
-#if LLVM_ON_WIN32
- Interval *= 2;
-#else
- Interval.tv_sec *= 2;
- Interval.tv_nsec *= 2;
- if (Interval.tv_nsec >= 1000000000) {
- ++Interval.tv_sec;
- Interval.tv_nsec -= 1000000000;
- }
-#endif
- } while (
-#if LLVM_ON_WIN32
- Interval < MaxSeconds * 1000
-#else
- Interval.tv_sec < (time_t)MaxSeconds
-#endif
- );
-
- // Give up.
-}
-
-/// \brief Compile a module file for the given module name with the given
-/// umbrella header, using the options provided by the importing compiler
-/// instance.
+/// \brief Compile a module file for the given module, using the options
+/// provided by the importing compiler instance.
static void compileModule(CompilerInstance &ImportingInstance,
- StringRef ModuleName,
- StringRef ModuleFileName,
- StringRef UmbrellaHeader) {
- LockFileManager Locked(ModuleFileName);
+ Module *Module,
+ StringRef ModuleFileName) {
+ llvm::LockFileManager Locked(ModuleFileName);
switch (Locked) {
- case LockFileManager::LFS_Error:
+ case llvm::LockFileManager::LFS_Error:
return;
- case LockFileManager::LFS_Owned:
+ case llvm::LockFileManager::LFS_Owned:
// We're responsible for building the module ourselves. Do so below.
break;
- case LockFileManager::LFS_Shared:
+ case llvm::LockFileManager::LFS_Shared:
// Someone else is responsible for building the module. Wait for them to
// finish.
Locked.waitForUnlock();
break;
}
+ ModuleMap &ModMap
+ = ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
// Construct a compiler invocation for creating this module.
- llvm::IntrusiveRefCntPtr<CompilerInvocation> Invocation
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation
(new CompilerInvocation(ImportingInstance.getInvocation()));
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+
// For any options that aren't intended to affect how a module is built,
// reset them to their default values.
- Invocation->getLangOpts().resetNonModularOptions();
- Invocation->getPreprocessorOpts().resetNonModularOptions();
+ Invocation->getLangOpts()->resetNonModularOptions();
+ PPOpts.resetNonModularOptions();
+
+ // Note the name of the module we're building.
+ Invocation->getLangOpts()->CurrentModule = Module->getTopLevelModuleName();
// Note that this module is part of the module build path, so that we
// can detect cycles in the module graph.
- Invocation->getPreprocessorOpts().ModuleBuildPath.push_back(ModuleName);
+ PPOpts.ModuleBuildPath.push_back(Module->getTopLevelModuleName());
+ // If there is a module map file, build the module using the module map.
// Set up the inputs/outputs so that we build the module from its umbrella
// header.
FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
FrontendOpts.OutputFile = ModuleFileName.str();
FrontendOpts.DisableFree = false;
FrontendOpts.Inputs.clear();
- FrontendOpts.Inputs.push_back(
- std::make_pair(getSourceInputKindFromOptions(Invocation->getLangOpts()),
- UmbrellaHeader));
+ InputKind IK = getSourceInputKindFromOptions(*Invocation->getLangOpts());
+
+ // Get or create the module map that we'll use to build this module.
+ SmallString<128> TempModuleMapFileName;
+ if (const FileEntry *ModuleMapFile
+ = ModMap.getContainingModuleMapFile(Module)) {
+ // Use the module map where this module resides.
+ FrontendOpts.Inputs.push_back(FrontendInputFile(ModuleMapFile->getName(),
+ IK));
+ } else {
+ // Create a temporary module map file.
+ TempModuleMapFileName = Module->Name;
+ TempModuleMapFileName += "-%%%%%%%%.map";
+ int FD;
+ if (llvm::sys::fs::unique_file(TempModuleMapFileName.str(), FD,
+ TempModuleMapFileName,
+ /*makeAbsolute=*/true)
+ != llvm::errc::success) {
+ ImportingInstance.getDiagnostics().Report(diag::err_module_map_temp_file)
+ << TempModuleMapFileName;
+ return;
+ }
+ // Print the module map to this file.
+ llvm::raw_fd_ostream OS(FD, /*shouldClose=*/true);
+ Module->print(OS);
+ FrontendOpts.Inputs.push_back(
+ FrontendInputFile(TempModuleMapFileName.str().str(), IK));
+ }
+ // Don't free the remapped file buffers; they are owned by our caller.
+ PPOpts.RetainRemappedFileBuffers = true;
+
Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
-
-
assert(ImportingInstance.getInvocation().getModuleHash() ==
- Invocation->getModuleHash() && "Module hash mismatch!");
-
+ Invocation->getModuleHash() && "Module hash mismatch!");
+
// Construct a compiler instance that will be used to actually create the
// module.
CompilerInstance Instance;
@@ -992,21 +821,39 @@ static void compileModule(CompilerInstance &ImportingInstance,
&ImportingInstance.getDiagnosticClient(),
/*ShouldOwnClient=*/true,
/*ShouldCloneClient=*/true);
-
+
// Construct a module-generating action.
- GeneratePCHAction CreateModuleAction(true);
-
+ GenerateModuleAction CreateModuleAction;
+
// Execute the action to actually build the module in-place. Use a separate
// thread so that we get a stack large enough.
const unsigned ThreadStackSize = 8 << 20;
llvm::CrashRecoveryContext CRC;
- CompileModuleData Data = { Instance, CreateModuleAction };
- CRC.RunSafelyOnThread(&doCompileModule, &Data, ThreadStackSize);
+ CompileModuleMapData Data = { Instance, CreateModuleAction };
+ CRC.RunSafelyOnThread(&doCompileMapModule, &Data, ThreadStackSize);
+
+ // Delete the temporary module map file.
+ // FIXME: Even though we're executing under crash protection, it would still
+ // be nice to do this with RemoveFileOnSignal when we can. However, that
+ // doesn't make sense for all clients, so clean this up manually.
+ if (!TempModuleMapFileName.empty())
+ llvm::sys::Path(TempModuleMapFileName).eraseFromDisk();
}
-ModuleKey CompilerInstance::loadModule(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc) {
+Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) {
+ // If we've already handled this import, just return the cached result.
+ // This one-element cache is important to eliminate redundant diagnostics
+ // when both the preprocessor and parser see the same import declaration.
+ if (!ImportLoc.isInvalid() && LastModuleImportLoc == ImportLoc) {
+ // Make the named module visible.
+ if (LastModuleImportResult)
+ ModuleManager->makeModuleVisible(LastModuleImportResult, Visibility);
+ return LastModuleImportResult;
+ }
+
// Determine what file we're searching from.
SourceManager &SourceMgr = getSourceManager();
SourceLocation ExpandedImportLoc = SourceMgr.getExpansionLoc(ImportLoc);
@@ -1015,98 +862,236 @@ ModuleKey CompilerInstance::loadModule(SourceLocation ImportLoc,
if (!CurFile)
CurFile = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
- // Search for a module with the given name.
- std::string UmbrellaHeader;
- std::string ModuleFileName;
- const FileEntry *ModuleFile
- = PP->getHeaderSearchInfo().lookupModule(ModuleName.getName(),
- &ModuleFileName,
- &UmbrellaHeader);
-
- bool BuildingModule = false;
- if (!ModuleFile && !UmbrellaHeader.empty()) {
- // We didn't find the module, but there is an umbrella header that
- // can be used to create the module file. Create a separate compilation
- // module to do so.
-
- // Check whether there is a cycle in the module graph.
- SmallVectorImpl<std::string> &ModuleBuildPath
- = getPreprocessorOpts().ModuleBuildPath;
- SmallVectorImpl<std::string>::iterator Pos
- = std::find(ModuleBuildPath.begin(), ModuleBuildPath.end(),
- ModuleName.getName());
- if (Pos != ModuleBuildPath.end()) {
- llvm::SmallString<256> CyclePath;
- for (; Pos != ModuleBuildPath.end(); ++Pos) {
- CyclePath += *Pos;
- CyclePath += " -> ";
- }
- CyclePath += ModuleName.getName();
-
- getDiagnostics().Report(ModuleNameLoc, diag::err_module_cycle)
- << ModuleName.getName() << CyclePath;
+ StringRef ModuleName = Path[0].first->getName();
+ SourceLocation ModuleNameLoc = Path[0].second;
+
+ clang::Module *Module = 0;
+
+ // If we don't already have information on this module, load the module now.
+ llvm::DenseMap<const IdentifierInfo *, clang::Module *>::iterator Known
+ = KnownModules.find(Path[0].first);
+ if (Known != KnownModules.end()) {
+ // Retrieve the cached top-level module.
+ Module = Known->second;
+ } else if (ModuleName == getLangOpts().CurrentModule) {
+ // This is the module we're building.
+ Module = PP->getHeaderSearchInfo().getModuleMap().findModule(ModuleName);
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ } else {
+ // Search for a module with the given name.
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ std::string ModuleFileName;
+ if (Module)
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(Module);
+ else
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(ModuleName);
+
+ if (ModuleFileName.empty()) {
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = 0;
return 0;
}
+
+ const FileEntry *ModuleFile
+ = getFileManager().getFile(ModuleFileName, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
+ bool BuildingModule = false;
+ if (!ModuleFile && Module) {
+ // The module is not cached, but we have a module map from which we can
+ // build the module.
+
+ // Check whether there is a cycle in the module graph.
+ SmallVectorImpl<std::string> &ModuleBuildPath
+ = getPreprocessorOpts().ModuleBuildPath;
+ SmallVectorImpl<std::string>::iterator Pos
+ = std::find(ModuleBuildPath.begin(), ModuleBuildPath.end(), ModuleName);
+ if (Pos != ModuleBuildPath.end()) {
+ SmallString<256> CyclePath;
+ for (; Pos != ModuleBuildPath.end(); ++Pos) {
+ CyclePath += *Pos;
+ CyclePath += " -> ";
+ }
+ CyclePath += ModuleName;
- getDiagnostics().Report(ModuleNameLoc, diag::warn_module_build)
- << ModuleName.getName();
- BuildingModule = true;
- compileModule(*this, ModuleName.getName(), ModuleFileName, UmbrellaHeader);
- ModuleFile = PP->getHeaderSearchInfo().lookupModule(ModuleName.getName());
- }
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_cycle)
+ << ModuleName << CyclePath;
+ return 0;
+ }
- if (!ModuleFile) {
- getDiagnostics().Report(ModuleNameLoc,
- BuildingModule? diag::err_module_not_built
- : diag::err_module_not_found)
- << ModuleName.getName()
- << SourceRange(ImportLoc, ModuleNameLoc);
- return 0;
- }
+ getDiagnostics().Report(ModuleNameLoc, diag::warn_module_build)
+ << ModuleName;
+ BuildingModule = true;
+ compileModule(*this, Module, ModuleFileName);
+ ModuleFile = FileMgr->getFile(ModuleFileName);
+ }
- // If we don't already have an ASTReader, create one now.
- if (!ModuleManager) {
- if (!hasASTContext())
- createASTContext();
-
- std::string Sysroot = getHeaderSearchOpts().Sysroot;
- const PreprocessorOptions &PPOpts = getPreprocessorOpts();
- ModuleManager = new ASTReader(getPreprocessor(), *Context,
- Sysroot.empty() ? "" : Sysroot.c_str(),
- PPOpts.DisablePCHValidation,
- PPOpts.DisableStatCache);
- if (hasASTConsumer()) {
- ModuleManager->setDeserializationListener(
- getASTConsumer().GetASTDeserializationListener());
- getASTContext().setASTMutationListener(
- getASTConsumer().GetASTMutationListener());
+ if (!ModuleFile) {
+ getDiagnostics().Report(ModuleNameLoc,
+ BuildingModule? diag::err_module_not_built
+ : diag::err_module_not_found)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ return 0;
}
- llvm::OwningPtr<ExternalASTSource> Source;
- Source.reset(ModuleManager);
- getASTContext().setExternalSource(Source);
- if (hasSema())
- ModuleManager->InitializeSema(getSema());
- if (hasASTConsumer())
- ModuleManager->StartTranslationUnit(&getASTConsumer());
- }
- // Try to load the module we found.
- switch (ModuleManager->ReadAST(ModuleFile->getName(),
- serialization::MK_Module)) {
- case ASTReader::Success:
- break;
+ // If we don't already have an ASTReader, create one now.
+ if (!ModuleManager) {
+ if (!hasASTContext())
+ createASTContext();
+
+ std::string Sysroot = getHeaderSearchOpts().Sysroot;
+ const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+ ModuleManager = new ASTReader(getPreprocessor(), *Context,
+ Sysroot.empty() ? "" : Sysroot.c_str(),
+ PPOpts.DisablePCHValidation,
+ PPOpts.DisableStatCache);
+ if (hasASTConsumer()) {
+ ModuleManager->setDeserializationListener(
+ getASTConsumer().GetASTDeserializationListener());
+ getASTContext().setASTMutationListener(
+ getASTConsumer().GetASTMutationListener());
+ }
+ OwningPtr<ExternalASTSource> Source;
+ Source.reset(ModuleManager);
+ getASTContext().setExternalSource(Source);
+ if (hasSema())
+ ModuleManager->InitializeSema(getSema());
+ if (hasASTConsumer())
+ ModuleManager->StartTranslationUnit(&getASTConsumer());
+ }
- case ASTReader::IgnorePCH:
- // FIXME: The ASTReader will already have complained, but can we showhorn
- // that diagnostic information into a more useful form?
- return 0;
+ // Try to load the module we found.
+ switch (ModuleManager->ReadAST(ModuleFile->getName(),
+ serialization::MK_Module)) {
+ case ASTReader::Success:
+ break;
- case ASTReader::Failure:
- // Already complained.
+ case ASTReader::IgnorePCH:
+ // FIXME: The ASTReader will already have complained, but can we showhorn
+ // that diagnostic information into a more useful form?
+ KnownModules[Path[0].first] = 0;
+ return 0;
+
+ case ASTReader::Failure:
+ // Already complained, but note now that we failed.
+ KnownModules[Path[0].first] = 0;
+ return 0;
+ }
+
+ if (!Module) {
+ // If we loaded the module directly, without finding a module map first,
+ // we'll have loaded the module's information from the module itself.
+ Module = PP->getHeaderSearchInfo().getModuleMap()
+ .findModule((Path[0].first->getName()));
+ }
+
+ // Cache the result of this top-level module lookup for later.
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ }
+
+ // If we never found the module, fail.
+ if (!Module)
return 0;
+
+ // Verify that the rest of the module path actually corresponds to
+ // a submodule.
+ if (Path.size() > 1) {
+ for (unsigned I = 1, N = Path.size(); I != N; ++I) {
+ StringRef Name = Path[I].first->getName();
+ clang::Module *Sub = Module->findSubmodule(Name);
+
+ if (!Sub) {
+ // Attempt to perform typo correction to find a module name that works.
+ llvm::SmallVector<StringRef, 2> Best;
+ unsigned BestEditDistance = (std::numeric_limits<unsigned>::max)();
+
+ for (clang::Module::submodule_iterator J = Module->submodule_begin(),
+ JEnd = Module->submodule_end();
+ J != JEnd; ++J) {
+ unsigned ED = Name.edit_distance((*J)->Name,
+ /*AllowReplacements=*/true,
+ BestEditDistance);
+ if (ED <= BestEditDistance) {
+ if (ED < BestEditDistance) {
+ Best.clear();
+ BestEditDistance = ED;
+ }
+
+ Best.push_back((*J)->Name);
+ }
+ }
+
+ // If there was a clear winner, user it.
+ if (Best.size() == 1) {
+ getDiagnostics().Report(Path[I].second,
+ diag::err_no_submodule_suggest)
+ << Path[I].first << Module->getFullModuleName() << Best[0]
+ << SourceRange(Path[0].second, Path[I-1].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[I].second),
+ Best[0]);
+
+ Sub = Module->findSubmodule(Best[0]);
+ }
+ }
+
+ if (!Sub) {
+ // No submodule by this name. Complain, and don't look for further
+ // submodules.
+ getDiagnostics().Report(Path[I].second, diag::err_no_submodule)
+ << Path[I].first << Module->getFullModuleName()
+ << SourceRange(Path[0].second, Path[I-1].second);
+ break;
+ }
+
+ Module = Sub;
+ }
}
+
+ // Make the named module visible, if it's not already part of the module
+ // we are parsing.
+ if (ModuleName != getLangOpts().CurrentModule) {
+ if (!Module->IsFromModuleFile) {
+ // We have an umbrella header or directory that doesn't actually include
+ // all of the headers within the directory it covers. Complain about
+ // this missing submodule and recover by forgetting that we ever saw
+ // this submodule.
+ // FIXME: Should we detect this at module load time? It seems fairly
+ // expensive (and rare).
+ getDiagnostics().Report(ImportLoc, diag::warn_missing_submodule)
+ << Module->getFullModuleName()
+ << SourceRange(Path.front().second, Path.back().second);
+
+ return 0;
+ }
- // FIXME: The module file's FileEntry makes a poor key indeed!
- return (ModuleKey)ModuleFile;
-}
+ // Check whether this module is available.
+ StringRef Feature;
+ if (!Module->isAvailable(getLangOpts(), getTarget(), Feature)) {
+ getDiagnostics().Report(ImportLoc, diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Feature
+ << SourceRange(Path.front().second, Path.back().second);
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = 0;
+ return 0;
+ }
+ ModuleManager->makeModuleVisible(Module, Visibility);
+ }
+
+ // If this module import was due to an inclusion directive, create an
+ // implicit import declaration to capture it in the AST.
+ if (IsInclusionDirective && hasASTContext()) {
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ TU->addDecl(ImportDecl::CreateImplicit(getASTContext(), TU,
+ ImportLoc, Module,
+ Path.back().second));
+ }
+
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = Module;
+ return Module;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index e926b89..612a0d8 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -30,6 +30,21 @@
#include "llvm/Support/Path.h"
using namespace clang;
+//===----------------------------------------------------------------------===//
+// Initialization.
+//===----------------------------------------------------------------------===//
+
+CompilerInvocationBase::CompilerInvocationBase()
+ : LangOpts(new LangOptions()) {}
+
+CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
+ : RefCountedBase<CompilerInvocation>(),
+ LangOpts(new LangOptions(*X.getLangOpts())) {}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
static const char *getAnalysisStoreName(AnalysisStores Kind) {
switch (Kind) {
default:
@@ -63,41 +78,81 @@ static const char *getAnalysisDiagClientName(AnalysisDiagClients Kind) {
static const char *getAnalysisPurgeModeName(AnalysisPurgeMode Kind) {
switch (Kind) {
default:
- llvm_unreachable("Unknown analysis client!");
+ llvm_unreachable("Unknown analysis purge mode!");
#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
case NAME: return CMDFLAG;
#include "clang/Frontend/Analyses.def"
}
}
+static const char *getAnalysisIPAModeName(AnalysisIPAMode Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis ipa mode!");
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
+ case NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *
+ getAnalysisInliningModeName(AnalysisInliningMode Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis inlining mode!");
+#define ANALYSIS_INLINE_SELECTION(NAME, CMDFLAG, DESC) \
+ case NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
//===----------------------------------------------------------------------===//
// Serialization (to args)
//===----------------------------------------------------------------------===//
-static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
- std::vector<std::string> &Res) {
+namespace {
+ /// ToArgsList - Helper class to create a list of std::strings.
+ class ToArgsList {
+ std::vector<std::string> &Res;
+ public:
+ explicit ToArgsList(std::vector<std::string> &Res) : Res(Res) {}
+
+ void push_back(StringRef Str) {
+ // Avoid creating a temporary string.
+ Res.push_back(std::string());
+ Res.back().assign(Str.data(), Str.size());
+ }
+
+ void push_back(StringRef Str1, StringRef Str2) {
+ push_back(Str1);
+ push_back(Str2);
+ }
+ };
+}
+
+static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts, ToArgsList &Res) {
if (Opts.ShowCheckerHelp)
Res.push_back("-analyzer-checker-help");
- if (Opts.AnalysisStoreOpt != RegionStoreModel) {
- Res.push_back("-analyzer-store");
- Res.push_back(getAnalysisStoreName(Opts.AnalysisStoreOpt));
- }
- if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel) {
- Res.push_back("-analyzer-constraints");
- Res.push_back(getAnalysisConstraintName(Opts.AnalysisConstraintsOpt));
- }
- if (Opts.AnalysisDiagOpt != PD_HTML) {
- Res.push_back("-analyzer-output");
- Res.push_back(getAnalysisDiagClientName(Opts.AnalysisDiagOpt));
- }
- if (Opts.AnalysisPurgeOpt != PurgeStmt) {
- Res.push_back("-analyzer-purge");
- Res.push_back(getAnalysisPurgeModeName(Opts.AnalysisPurgeOpt));
- }
- if (!Opts.AnalyzeSpecificFunction.empty()) {
- Res.push_back("-analyze-function");
- Res.push_back(Opts.AnalyzeSpecificFunction);
- }
+ if (Opts.AnalysisStoreOpt != RegionStoreModel)
+ Res.push_back("-analyzer-store",
+ getAnalysisStoreName(Opts.AnalysisStoreOpt));
+ if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel)
+ Res.push_back("-analyzer-constraints",
+ getAnalysisConstraintName(Opts.AnalysisConstraintsOpt));
+ if (Opts.AnalysisDiagOpt != PD_HTML)
+ Res.push_back("-analyzer-output",
+ getAnalysisDiagClientName(Opts.AnalysisDiagOpt));
+ if (Opts.AnalysisPurgeOpt != PurgeStmt)
+ Res.push_back("-analyzer-purge",
+ getAnalysisPurgeModeName(Opts.AnalysisPurgeOpt));
+ if (!Opts.AnalyzeSpecificFunction.empty())
+ Res.push_back("-analyze-function", Opts.AnalyzeSpecificFunction);
+ if (Opts.IPAMode != Inlining)
+ Res.push_back("-analyzer-ipa", getAnalysisIPAModeName(Opts.IPAMode));
+ if (Opts.InliningMode != NoRedundancy)
+ Res.push_back("-analyzer-inlining-mode",
+ getAnalysisInliningModeName(Opts.InliningMode));
+
if (Opts.AnalyzeAll)
Res.push_back("-analyzer-opt-analyze-headers");
if (Opts.AnalyzerDisplayProgress)
@@ -112,6 +167,8 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
Res.push_back("-analyzer-viz-egraph-graphviz");
if (Opts.VisualizeEGUbi)
Res.push_back("-analyzer-viz-egraph-ubigraph");
+ if (Opts.NoRetryExhausted)
+ Res.push_back("-analyzer-disable-retry-exhausted");
for (unsigned i = 0, e = Opts.CheckersControlList.size(); i != e; ++i) {
const std::pair<std::string, bool> &opt = Opts.CheckersControlList[i];
@@ -123,18 +180,19 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
}
}
-static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
- std::vector<std::string> &Res) {
+static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
if (Opts.DebugInfo)
Res.push_back("-g");
if (Opts.DisableLLVMOpts)
Res.push_back("-disable-llvm-optzns");
if (Opts.DisableRedZone)
Res.push_back("-disable-red-zone");
- if (!Opts.DwarfDebugFlags.empty()) {
- Res.push_back("-dwarf-debug-flags");
- Res.push_back(Opts.DwarfDebugFlags);
- }
+ if (Opts.DisableTailCalls)
+ Res.push_back("-mdisable-tail-calls");
+ if (!Opts.DebugCompilationDir.empty())
+ Res.push_back("-fdebug-compilation-dir", Opts.DebugCompilationDir);
+ if (!Opts.DwarfDebugFlags.empty())
+ Res.push_back("-dwarf-debug-flags", Opts.DwarfDebugFlags);
if (Opts.ObjCRuntimeHasARC)
Res.push_back("-fobjc-runtime-has-arc");
if (Opts.ObjCRuntimeHasTerminate)
@@ -160,10 +218,12 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Opts.OptimizeSize == 1 ? Res.push_back("-Os") : Res.push_back("-Oz");
} else if (Opts.OptimizationLevel != 0)
Res.push_back("-O" + llvm::utostr(Opts.OptimizationLevel));
- if (!Opts.MainFileName.empty()) {
- Res.push_back("-main-file-name");
- Res.push_back(Opts.MainFileName);
- }
+ if (!Opts.MainFileName.empty())
+ Res.push_back("-main-file-name", Opts.MainFileName);
+ if (Opts.NoInfsFPMath)
+ Res.push_back("-menable-no-infinities");
+ if (Opts.NoNaNsFPMath)
+ Res.push_back("-menable-no-nans");
// SimplifyLibCalls is only derived.
// TimePasses is only derived.
// UnitAtATime is unused.
@@ -179,10 +239,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-ffunction-sections");
if (Opts.AsmVerbose)
Res.push_back("-masm-verbose");
- if (!Opts.CodeModel.empty()) {
- Res.push_back("-mcode-model");
- Res.push_back(Opts.CodeModel);
- }
+ if (!Opts.CodeModel.empty())
+ Res.push_back("-mcode-model", Opts.CodeModel);
if (Opts.CUDAIsDevice)
Res.push_back("-fcuda-is-device");
if (!Opts.CXAAtExit)
@@ -192,19 +250,14 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
if (Opts.ObjCAutoRefCountExceptions)
Res.push_back("-fobjc-arc-eh");
if (!Opts.DebugPass.empty()) {
- Res.push_back("-mdebug-pass");
- Res.push_back(Opts.DebugPass);
+ Res.push_back("-mdebug-pass", Opts.DebugPass);
}
if (Opts.DisableFPElim)
Res.push_back("-mdisable-fp-elim");
- if (!Opts.FloatABI.empty()) {
- Res.push_back("-mfloat-abi");
- Res.push_back(Opts.FloatABI);
- }
- if (!Opts.LimitFloatPrecision.empty()) {
- Res.push_back("-mlimit-float-precision");
- Res.push_back(Opts.LimitFloatPrecision);
- }
+ if (!Opts.FloatABI.empty())
+ Res.push_back("-mfloat-abi", Opts.FloatABI);
+ if (!Opts.LimitFloatPrecision.empty())
+ Res.push_back("-mlimit-float-precision", Opts.LimitFloatPrecision);
if (Opts.NoZeroInitializedInBSS)
Res.push_back("-mno-zero-initialized-bss");
switch (Opts.getObjCDispatchMethod()) {
@@ -217,10 +270,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-fobjc-dispatch-method=non-legacy");
break;
}
- if (Opts.NumRegisterParameters) {
- Res.push_back("-mregparm");
- Res.push_back(llvm::utostr(Opts.NumRegisterParameters));
- }
+ if (Opts.NumRegisterParameters)
+ Res.push_back("-mregparm", llvm::utostr(Opts.NumRegisterParameters));
if (Opts.NoGlobalMerge)
Res.push_back("-mno-global-merge");
if (Opts.NoExecStack)
@@ -231,46 +282,40 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-msave-temp-labels");
if (Opts.NoDwarf2CFIAsm)
Res.push_back("-fno-dwarf2-cfi-asm");
+ if (Opts.NoDwarfDirectoryAsm)
+ Res.push_back("-fno-dwarf-directory-asm");
if (Opts.SoftFloat)
Res.push_back("-msoft-float");
+ if (Opts.StrictEnums)
+ Res.push_back("-fstrict-enums");
if (Opts.UnwindTables)
Res.push_back("-munwind-tables");
- if (Opts.RelocationModel != "pic") {
- Res.push_back("-mrelocation-model");
- Res.push_back(Opts.RelocationModel);
- }
+ if (Opts.RelocationModel != "pic")
+ Res.push_back("-mrelocation-model", Opts.RelocationModel);
if (!Opts.VerifyModule)
Res.push_back("-disable-llvm-verifier");
- for (unsigned i = 0, e = Opts.BackendOptions.size(); i != e; ++i) {
- Res.push_back("-backend-option");
- Res.push_back(Opts.BackendOptions[i]);
- }
+ for (unsigned i = 0, e = Opts.BackendOptions.size(); i != e; ++i)
+ Res.push_back("-backend-option", Opts.BackendOptions[i]);
}
static void DependencyOutputOptsToArgs(const DependencyOutputOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
if (Opts.IncludeSystemHeaders)
Res.push_back("-sys-header-deps");
if (Opts.ShowHeaderIncludes)
Res.push_back("-H");
- if (!Opts.HeaderIncludeOutputFile.empty()) {
- Res.push_back("-header-include-file");
- Res.push_back(Opts.HeaderIncludeOutputFile);
- }
+ if (!Opts.HeaderIncludeOutputFile.empty())
+ Res.push_back("-header-include-file", Opts.HeaderIncludeOutputFile);
if (Opts.UsePhonyTargets)
Res.push_back("-MP");
- if (!Opts.OutputFile.empty()) {
- Res.push_back("-dependency-file");
- Res.push_back(Opts.OutputFile);
- }
- for (unsigned i = 0, e = Opts.Targets.size(); i != e; ++i) {
- Res.push_back("-MT");
- Res.push_back(Opts.Targets[i]);
- }
+ if (!Opts.OutputFile.empty())
+ Res.push_back("-dependency-file", Opts.OutputFile);
+ for (unsigned i = 0, e = Opts.Targets.size(); i != e; ++i)
+ Res.push_back("-MT", Opts.Targets[i]);
}
static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
if (Opts.IgnoreWarnings)
Res.push_back("-w");
if (Opts.NoRewriteMacros)
@@ -295,8 +340,6 @@ static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
Res.push_back("-fcolor-diagnostics");
if (Opts.VerifyDiagnostics)
Res.push_back("-verify");
- if (Opts.ShowNames)
- Res.push_back("-fdiagnostics-show-name");
if (Opts.ShowOptionNames)
Res.push_back("-fdiagnostics-show-option");
if (Opts.ShowCategories == 1)
@@ -311,37 +354,29 @@ static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
case DiagnosticOptions::Vi:
Res.push_back("-fdiagnostics-format=vi"); break;
}
- if (Opts.ErrorLimit) {
- Res.push_back("-ferror-limit");
- Res.push_back(llvm::utostr(Opts.ErrorLimit));
- }
- if (!Opts.DiagnosticLogFile.empty()) {
- Res.push_back("-diagnostic-log-file");
- Res.push_back(Opts.DiagnosticLogFile);
- }
+ if (Opts.ErrorLimit)
+ Res.push_back("-ferror-limit", llvm::utostr(Opts.ErrorLimit));
+ if (!Opts.DiagnosticLogFile.empty())
+ Res.push_back("-diagnostic-log-file", Opts.DiagnosticLogFile);
if (Opts.MacroBacktraceLimit
- != DiagnosticOptions::DefaultMacroBacktraceLimit) {
- Res.push_back("-fmacro-backtrace-limit");
- Res.push_back(llvm::utostr(Opts.MacroBacktraceLimit));
- }
+ != DiagnosticOptions::DefaultMacroBacktraceLimit)
+ Res.push_back("-fmacro-backtrace-limit",
+ llvm::utostr(Opts.MacroBacktraceLimit));
if (Opts.TemplateBacktraceLimit
- != DiagnosticOptions::DefaultTemplateBacktraceLimit) {
- Res.push_back("-ftemplate-backtrace-limit");
- Res.push_back(llvm::utostr(Opts.TemplateBacktraceLimit));
- }
-
- if (Opts.TabStop != DiagnosticOptions::DefaultTabStop) {
- Res.push_back("-ftabstop");
- Res.push_back(llvm::utostr(Opts.TabStop));
- }
- if (Opts.MessageLength) {
- Res.push_back("-fmessage-length");
- Res.push_back(llvm::utostr(Opts.MessageLength));
- }
- if (!Opts.DumpBuildInformation.empty()) {
- Res.push_back("-dump-build-information");
- Res.push_back(Opts.DumpBuildInformation);
- }
+ != DiagnosticOptions::DefaultTemplateBacktraceLimit)
+ Res.push_back("-ftemplate-backtrace-limit",
+ llvm::utostr(Opts.TemplateBacktraceLimit));
+ if (Opts.ConstexprBacktraceLimit
+ != DiagnosticOptions::DefaultConstexprBacktraceLimit)
+ Res.push_back("-fconstexpr-backtrace-limit",
+ llvm::utostr(Opts.ConstexprBacktraceLimit));
+
+ if (Opts.TabStop != DiagnosticOptions::DefaultTabStop)
+ Res.push_back("-ftabstop", llvm::utostr(Opts.TabStop));
+ if (Opts.MessageLength)
+ Res.push_back("-fmessage-length", llvm::utostr(Opts.MessageLength));
+ if (!Opts.DumpBuildInformation.empty())
+ Res.push_back("-dump-build-information", Opts.DumpBuildInformation);
for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i)
Res.push_back("-W" + Opts.Warnings[i]);
}
@@ -365,7 +400,6 @@ static const char *getInputKindName(InputKind Kind) {
}
llvm_unreachable("Unexpected language kind!");
- return 0;
}
static const char *getActionName(frontend::ActionKind Kind) {
@@ -395,27 +429,24 @@ static const char *getActionName(frontend::ActionKind Kind) {
case frontend::PrintDeclContext: return "-print-decl-contexts";
case frontend::PrintPreamble: return "-print-preamble";
case frontend::PrintPreprocessedInput: return "-E";
+ case frontend::PubnamesDump: return "-pubnames-dump";
case frontend::RewriteMacros: return "-rewrite-macros";
case frontend::RewriteObjC: return "-rewrite-objc";
case frontend::RewriteTest: return "-rewrite-test";
case frontend::RunAnalysis: return "-analyze";
+ case frontend::MigrateSource: return "-migrate";
case frontend::RunPreprocessorOnly: return "-Eonly";
}
llvm_unreachable("Unexpected language kind!");
- return 0;
}
-static void FileSystemOptsToArgs(const FileSystemOptions &Opts,
- std::vector<std::string> &Res) {
- if (!Opts.WorkingDir.empty()) {
- Res.push_back("-working-directory");
- Res.push_back(Opts.WorkingDir);
- }
+static void FileSystemOptsToArgs(const FileSystemOptions &Opts, ToArgsList &Res){
+ if (!Opts.WorkingDir.empty())
+ Res.push_back("-working-directory", Opts.WorkingDir);
}
-static void FrontendOptsToArgs(const FrontendOptions &Opts,
- std::vector<std::string> &Res) {
+static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
if (Opts.DisableFree)
Res.push_back("-disable-free");
if (Opts.RelocatablePCH)
@@ -436,6 +467,12 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-version");
if (Opts.FixWhatYouCan)
Res.push_back("-fix-what-you-can");
+ if (Opts.FixOnlyWarnings)
+ Res.push_back("-fix-only-warnings");
+ if (Opts.FixAndRecompile)
+ Res.push_back("-fixit-recompile");
+ if (Opts.FixToTemporaries)
+ Res.push_back("-fixit-to-temporary");
switch (Opts.ARCMTAction) {
case FrontendOptions::ARCMT_None:
break;
@@ -449,76 +486,63 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-arcmt-migrate");
break;
}
- if (!Opts.ARCMTMigrateDir.empty()) {
- Res.push_back("-arcmt-migrate-directory");
- Res.push_back(Opts.ARCMTMigrateDir);
- }
- if (!Opts.ARCMTMigrateReportOut.empty()) {
- Res.push_back("-arcmt-migrate-report-output");
- Res.push_back(Opts.ARCMTMigrateReportOut);
- }
+ if (!Opts.MTMigrateDir.empty())
+ Res.push_back("-mt-migrate-directory", Opts.MTMigrateDir);
+ if (!Opts.ARCMTMigrateReportOut.empty())
+ Res.push_back("-arcmt-migrate-report-output", Opts.ARCMTMigrateReportOut);
if (Opts.ARCMTMigrateEmitARCErrors)
Res.push_back("-arcmt-migrate-emit-errors");
+ if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Literals)
+ Res.push_back("-objcmt-migrate-literals");
+ if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Subscripting)
+ Res.push_back("-objcmt-migrate-subscripting");
+
bool NeedLang = false;
for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i)
- if (FrontendOptions::getInputKindForExtension(Opts.Inputs[i].second) !=
- Opts.Inputs[i].first)
+ if (FrontendOptions::getInputKindForExtension(Opts.Inputs[i].File) !=
+ Opts.Inputs[i].Kind)
NeedLang = true;
- if (NeedLang) {
- Res.push_back("-x");
- Res.push_back(getInputKindName(Opts.Inputs[0].first));
- }
+ if (NeedLang)
+ Res.push_back("-x", getInputKindName(Opts.Inputs[0].Kind));
for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i) {
- assert((!NeedLang || Opts.Inputs[i].first == Opts.Inputs[0].first) &&
+ assert((!NeedLang || Opts.Inputs[i].Kind == Opts.Inputs[0].Kind) &&
"Unable to represent this input vector!");
- Res.push_back(Opts.Inputs[i].second);
+ Res.push_back(Opts.Inputs[i].File);
}
- if (!Opts.OutputFile.empty()) {
- Res.push_back("-o");
- Res.push_back(Opts.OutputFile);
- }
- if (!Opts.CodeCompletionAt.FileName.empty()) {
- Res.push_back("-code-completion-at");
- Res.push_back(Opts.CodeCompletionAt.FileName + ":" +
+ if (!Opts.OutputFile.empty())
+ Res.push_back("-o", Opts.OutputFile);
+ if (!Opts.CodeCompletionAt.FileName.empty())
+ Res.push_back("-code-completion-at",
+ Opts.CodeCompletionAt.FileName + ":" +
llvm::utostr(Opts.CodeCompletionAt.Line) + ":" +
llvm::utostr(Opts.CodeCompletionAt.Column));
- }
if (Opts.ProgramAction != frontend::PluginAction)
Res.push_back(getActionName(Opts.ProgramAction));
if (!Opts.ActionName.empty()) {
- Res.push_back("-plugin");
- Res.push_back(Opts.ActionName);
- for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i) {
- Res.push_back("-plugin-arg-" + Opts.ActionName);
- Res.push_back(Opts.PluginArgs[i]);
- }
- }
- for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i) {
- Res.push_back("-load");
- Res.push_back(Opts.Plugins[i]);
+ Res.push_back("-plugin", Opts.ActionName);
+ for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i)
+ Res.push_back("-plugin-arg-" + Opts.ActionName, Opts.PluginArgs[i]);
}
+ for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i)
+ Res.push_back("-load", Opts.Plugins[i]);
for (unsigned i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
- Res.push_back("-add-plugin");
- Res.push_back(Opts.AddPluginActions[i]);
- for(unsigned ai = 0, ae = Opts.AddPluginArgs.size(); ai != ae; ++ai) {
- Res.push_back("-plugin-arg-" + Opts.AddPluginActions[i]);
- Res.push_back(Opts.AddPluginArgs[i][ai]);
- }
- }
- for (unsigned i = 0, e = Opts.ASTMergeFiles.size(); i != e; ++i) {
- Res.push_back("-ast-merge");
- Res.push_back(Opts.ASTMergeFiles[i]);
- }
- for (unsigned i = 0, e = Opts.LLVMArgs.size(); i != e; ++i) {
- Res.push_back("-mllvm");
- Res.push_back(Opts.LLVMArgs[i]);
- }
+ Res.push_back("-add-plugin", Opts.AddPluginActions[i]);
+ for(unsigned ai = 0, ae = Opts.AddPluginArgs.size(); ai != ae; ++ai)
+ Res.push_back("-plugin-arg-" + Opts.AddPluginActions[i],
+ Opts.AddPluginArgs[i][ai]);
+ }
+ for (unsigned i = 0, e = Opts.ASTMergeFiles.size(); i != e; ++i)
+ Res.push_back("-ast-merge", Opts.ASTMergeFiles[i]);
+ for (unsigned i = 0, e = Opts.LLVMArgs.size(); i != e; ++i)
+ Res.push_back("-mllvm", Opts.LLVMArgs[i]);
+ if (!Opts.OverrideRecordLayoutsFile.empty())
+ Res.push_back("-foverride-record-layout=" + Opts.OverrideRecordLayoutsFile);
}
static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
if (Opts.Sysroot != "/") {
Res.push_back("-isysroot");
Res.push_back(Opts.Sysroot);
@@ -585,14 +609,10 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
Res.push_back(E.Path);
}
- if (!Opts.ResourceDir.empty()) {
- Res.push_back("-resource-dir");
- Res.push_back(Opts.ResourceDir);
- }
- if (!Opts.ModuleCachePath.empty()) {
- Res.push_back("-fmodule-cache-path");
- Res.push_back(Opts.ModuleCachePath);
- }
+ if (!Opts.ResourceDir.empty())
+ Res.push_back("-resource-dir", Opts.ResourceDir);
+ if (!Opts.ModuleCachePath.empty())
+ Res.push_back("-fmodule-cache-path", Opts.ModuleCachePath);
if (!Opts.UseStandardSystemIncludes)
Res.push_back("-nostdsysteminc");
if (!Opts.UseStandardCXXIncludes)
@@ -603,8 +623,7 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
Res.push_back("-v");
}
-static void LangOptsToArgs(const LangOptions &Opts,
- std::vector<std::string> &Res) {
+static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
LangOptions DefaultLangOpts;
// FIXME: Need to set -std to get all the implicit options.
@@ -629,6 +648,8 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fgnu-keywords");
if (Opts.MicrosoftExt)
Res.push_back("-fms-extensions");
+ if (Opts.MicrosoftMode)
+ Res.push_back("-fms-compatibility");
if (Opts.MSCVersion != 0)
Res.push_back("-fmsc-version=" + llvm::utostr(Opts.MSCVersion));
if (Opts.Borland)
@@ -644,6 +665,10 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fpascal-strings");
if (Opts.CatchUndefined)
Res.push_back("-fcatch-undefined-behavior");
+ if (Opts.AddressSanitizer)
+ Res.push_back("-faddress-sanitizer");
+ if (Opts.ThreadSanitizer)
+ Res.push_back("-fthread-sanitizer");
if (Opts.WritableStrings)
Res.push_back("-fwritable-strings");
if (Opts.ConstStrings)
@@ -684,6 +709,8 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fblocks");
if (Opts.BlocksRuntimeOptional)
Res.push_back("-fblocks-runtime-optional");
+ if (Opts.Modules)
+ Res.push_back("-fmodules");
if (Opts.EmitAllDecls)
Res.push_back("-femit-all-decls");
if (Opts.MathErrno)
@@ -692,28 +719,31 @@ static void LangOptsToArgs(const LangOptions &Opts,
case LangOptions::SOB_Undefined: break;
case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break;
case LangOptions::SOB_Trapping:
- Res.push_back("-ftrapv"); break;
- if (!Opts.OverflowHandler.empty()) {
- Res.push_back("-ftrapv-handler");
- Res.push_back(Opts.OverflowHandler);
- }
+ Res.push_back("-ftrapv");
+ if (!Opts.OverflowHandler.empty())
+ Res.push_back("-ftrapv-handler", Opts.OverflowHandler);
+ break;
}
if (Opts.HeinousExtensions)
Res.push_back("-fheinous-gnu-extensions");
// Optimize is implicit.
// OptimizeSize is implicit.
+ if (Opts.FastMath)
+ Res.push_back("-ffast-math");
if (Opts.Static)
Res.push_back("-static-define");
- if (Opts.DumpRecordLayouts)
+ if (Opts.DumpRecordLayoutsSimple)
+ Res.push_back("-fdump-record-layouts-simple");
+ else if (Opts.DumpRecordLayouts)
Res.push_back("-fdump-record-layouts");
if (Opts.DumpVTableLayouts)
Res.push_back("-fdump-vtable-layouts");
if (Opts.NoBitFieldTypeAlign)
Res.push_back("-fno-bitfield-type-alignment");
- if (Opts.PICLevel) {
- Res.push_back("-pic-level");
- Res.push_back(llvm::utostr(Opts.PICLevel));
- }
+ if (Opts.PICLevel)
+ Res.push_back("-pic-level", llvm::utostr(Opts.PICLevel));
+ if (Opts.PIELevel)
+ Res.push_back("-pie-level", llvm::utostr(Opts.PIELevel));
if (Opts.ObjCGCBitmapPrint)
Res.push_back("-print-ivar-layout");
if (Opts.NoConstantCFStrings)
@@ -757,77 +787,70 @@ static void LangOptsToArgs(const LangOptions &Opts,
if (Opts.InlineVisibilityHidden)
Res.push_back("-fvisibility-inlines-hidden");
- if (Opts.getStackProtector() != 0) {
- Res.push_back("-stack-protector");
- Res.push_back(llvm::utostr(Opts.getStackProtector()));
- }
- if (Opts.InstantiationDepth != DefaultLangOpts.InstantiationDepth) {
- Res.push_back("-ftemplate-depth");
- Res.push_back(llvm::utostr(Opts.InstantiationDepth));
- }
- if (!Opts.ObjCConstantStringClass.empty()) {
- Res.push_back("-fconstant-string-class");
- Res.push_back(Opts.ObjCConstantStringClass);
- }
+ if (Opts.getStackProtector() != 0)
+ Res.push_back("-stack-protector", llvm::utostr(Opts.getStackProtector()));
+ if (Opts.InstantiationDepth != DefaultLangOpts.InstantiationDepth)
+ Res.push_back("-ftemplate-depth", llvm::utostr(Opts.InstantiationDepth));
+ if (Opts.ConstexprCallDepth != DefaultLangOpts.ConstexprCallDepth)
+ Res.push_back("-fconstexpr-depth", llvm::utostr(Opts.ConstexprCallDepth));
+ if (!Opts.ObjCConstantStringClass.empty())
+ Res.push_back("-fconstant-string-class", Opts.ObjCConstantStringClass);
if (Opts.FakeAddressSpaceMap)
Res.push_back("-ffake-address-space-map");
if (Opts.ParseUnknownAnytype)
Res.push_back("-funknown-anytype");
if (Opts.DebuggerSupport)
Res.push_back("-fdebugger-support");
+ if (Opts.DebuggerCastResultToId)
+ Res.push_back("-fdebugger-cast-result-to-id");
+ if (Opts.DebuggerObjCLiteral)
+ Res.push_back("-fdebugger-objc-literal");
if (Opts.DelayedTemplateParsing)
Res.push_back("-fdelayed-template-parsing");
if (Opts.Deprecated)
Res.push_back("-fdeprecated-macro");
+ if (Opts.ApplePragmaPack)
+ Res.push_back("-fapple-pragma-pack");
+ if (!Opts.CurrentModule.empty())
+ Res.push_back("-fmodule-name=" + Opts.CurrentModule);
}
static void PreprocessorOptsToArgs(const PreprocessorOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
for (unsigned i = 0, e = Opts.Macros.size(); i != e; ++i)
Res.push_back(std::string(Opts.Macros[i].second ? "-U" : "-D") +
Opts.Macros[i].first);
for (unsigned i = 0, e = Opts.Includes.size(); i != e; ++i) {
// FIXME: We need to avoid reincluding the implicit PCH and PTH includes.
- Res.push_back("-include");
- Res.push_back(Opts.Includes[i]);
- }
- for (unsigned i = 0, e = Opts.MacroIncludes.size(); i != e; ++i) {
- Res.push_back("-imacros");
- Res.push_back(Opts.MacroIncludes[i]);
+ Res.push_back("-include", Opts.Includes[i]);
}
+ for (unsigned i = 0, e = Opts.MacroIncludes.size(); i != e; ++i)
+ Res.push_back("-imacros", Opts.MacroIncludes[i]);
if (!Opts.UsePredefines)
Res.push_back("-undef");
if (Opts.DetailedRecord)
Res.push_back("-detailed-preprocessing-record");
- if (!Opts.ImplicitPCHInclude.empty()) {
- Res.push_back("-include-pch");
- Res.push_back(Opts.ImplicitPCHInclude);
- }
- if (!Opts.ImplicitPTHInclude.empty()) {
- Res.push_back("-include-pth");
- Res.push_back(Opts.ImplicitPTHInclude);
- }
+ if (!Opts.ImplicitPCHInclude.empty())
+ Res.push_back("-include-pch", Opts.ImplicitPCHInclude);
+ if (!Opts.ImplicitPTHInclude.empty())
+ Res.push_back("-include-pth", Opts.ImplicitPTHInclude);
if (!Opts.TokenCache.empty()) {
- if (Opts.ImplicitPTHInclude.empty()) {
- Res.push_back("-token-cache");
- Res.push_back(Opts.TokenCache);
- } else
+ if (Opts.ImplicitPTHInclude.empty())
+ Res.push_back("-token-cache", Opts.TokenCache);
+ else
assert(Opts.ImplicitPTHInclude == Opts.TokenCache &&
"Unsupported option combination!");
}
- for (unsigned i = 0, e = Opts.ChainedIncludes.size(); i != e; ++i) {
- Res.push_back("-chain-include");
- Res.push_back(Opts.ChainedIncludes[i]);
- }
+ for (unsigned i = 0, e = Opts.ChainedIncludes.size(); i != e; ++i)
+ Res.push_back("-chain-include", Opts.ChainedIncludes[i]);
for (unsigned i = 0, e = Opts.RemappedFiles.size(); i != e; ++i) {
- Res.push_back("-remap-file");
- Res.push_back(Opts.RemappedFiles[i].first + ";" +
- Opts.RemappedFiles[i].second);
+ Res.push_back("-remap-file", Opts.RemappedFiles[i].first + ";" +
+ Opts.RemappedFiles[i].second);
}
}
static void PreprocessorOutputOptsToArgs(const PreprocessorOutputOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
if (!Opts.ShowCPP && !Opts.ShowMacros)
llvm::report_fatal_error("Invalid option combination!");
@@ -845,43 +868,34 @@ static void PreprocessorOutputOptsToArgs(const PreprocessorOutputOptions &Opts,
}
static void TargetOptsToArgs(const TargetOptions &Opts,
- std::vector<std::string> &Res) {
+ ToArgsList &Res) {
Res.push_back("-triple");
Res.push_back(Opts.Triple);
- if (!Opts.CPU.empty()) {
- Res.push_back("-target-cpu");
- Res.push_back(Opts.CPU);
- }
- if (!Opts.ABI.empty()) {
- Res.push_back("-target-abi");
- Res.push_back(Opts.ABI);
- }
- if (!Opts.LinkerVersion.empty()) {
- Res.push_back("-target-linker-version");
- Res.push_back(Opts.LinkerVersion);
- }
- if (!Opts.CXXABI.empty()) {
- Res.push_back("-cxx-abi");
- Res.push_back(Opts.CXXABI);
- }
- for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i) {
- Res.push_back("-target-feature");
- Res.push_back(Opts.Features[i]);
- }
+ if (!Opts.CPU.empty())
+ Res.push_back("-target-cpu", Opts.CPU);
+ if (!Opts.ABI.empty())
+ Res.push_back("-target-abi", Opts.ABI);
+ if (!Opts.LinkerVersion.empty())
+ Res.push_back("-target-linker-version", Opts.LinkerVersion);
+ if (!Opts.CXXABI.empty())
+ Res.push_back("-cxx-abi", Opts.CXXABI);
+ for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i)
+ Res.push_back("-target-feature", Opts.Features[i]);
}
void CompilerInvocation::toArgs(std::vector<std::string> &Res) {
- AnalyzerOptsToArgs(getAnalyzerOpts(), Res);
- CodeGenOptsToArgs(getCodeGenOpts(), Res);
- DependencyOutputOptsToArgs(getDependencyOutputOpts(), Res);
- DiagnosticOptsToArgs(getDiagnosticOpts(), Res);
- FileSystemOptsToArgs(getFileSystemOpts(), Res);
- FrontendOptsToArgs(getFrontendOpts(), Res);
- HeaderSearchOptsToArgs(getHeaderSearchOpts(), Res);
- LangOptsToArgs(getLangOpts(), Res);
- PreprocessorOptsToArgs(getPreprocessorOpts(), Res);
- PreprocessorOutputOptsToArgs(getPreprocessorOutputOpts(), Res);
- TargetOptsToArgs(getTargetOpts(), Res);
+ ToArgsList List(Res);
+ AnalyzerOptsToArgs(getAnalyzerOpts(), List);
+ CodeGenOptsToArgs(getCodeGenOpts(), List);
+ DependencyOutputOptsToArgs(getDependencyOutputOpts(), List);
+ DiagnosticOptsToArgs(getDiagnosticOpts(), List);
+ FileSystemOptsToArgs(getFileSystemOpts(), List);
+ FrontendOptsToArgs(getFrontendOpts(), List);
+ HeaderSearchOptsToArgs(getHeaderSearchOpts(), List);
+ LangOptsToArgs(*getLangOpts(), List);
+ PreprocessorOptsToArgs(getPreprocessorOpts(), List);
+ PreprocessorOutputOptsToArgs(getPreprocessorOutputOpts(), List);
+ TargetOptsToArgs(getTargetOpts(), List);
}
//===----------------------------------------------------------------------===//
@@ -903,10 +917,10 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
Args.getLastArgIntValue(OPT_O, DefaultOpt, Diags);
}
-static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
+static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
using namespace cc1options;
-
+ bool Success = true;
if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
StringRef Name = A->getValue(Args);
AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
@@ -914,12 +928,13 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
.Case(CMDFLAG, NAME##Model)
#include "clang/Frontend/Analyses.def"
.Default(NumStores);
- // FIXME: Error handling.
- if (Value == NumStores)
+ if (Value == NumStores) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- else
+ Success = false;
+ } else {
Opts.AnalysisStoreOpt = Value;
+ }
}
if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
@@ -929,12 +944,13 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
.Case(CMDFLAG, NAME##Model)
#include "clang/Frontend/Analyses.def"
.Default(NumConstraints);
- // FIXME: Error handling.
- if (Value == NumConstraints)
+ if (Value == NumConstraints) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- else
+ Success = false;
+ } else {
Opts.AnalysisConstraintsOpt = Value;
+ }
}
if (Arg *A = Args.getLastArg(OPT_analyzer_output)) {
@@ -944,12 +960,13 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
.Case(CMDFLAG, PD_##NAME)
#include "clang/Frontend/Analyses.def"
.Default(NUM_ANALYSIS_DIAG_CLIENTS);
- // FIXME: Error handling.
- if (Value == NUM_ANALYSIS_DIAG_CLIENTS)
+ if (Value == NUM_ANALYSIS_DIAG_CLIENTS) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- else
+ Success = false;
+ } else {
Opts.AnalysisDiagOpt = Value;
+ }
}
if (Arg *A = Args.getLastArg(OPT_analyzer_purge)) {
@@ -959,17 +976,51 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
.Case(CMDFLAG, NAME)
#include "clang/Frontend/Analyses.def"
.Default(NumPurgeModes);
- // FIXME: Error handling.
- if (Value == NumPurgeModes)
+ if (Value == NumPurgeModes) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- else
+ Success = false;
+ } else {
Opts.AnalysisPurgeOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_ipa)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisIPAMode Value = llvm::StringSwitch<AnalysisIPAMode>(Name)
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumIPAModes);
+ if (Value == NumIPAModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.IPAMode = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_inlining_mode)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisInliningMode Value = llvm::StringSwitch<AnalysisInliningMode>(Name)
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumInliningModes);
+ if (Value == NumInliningModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.InliningMode = Value;
+ }
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
Opts.VisualizeEGDot = Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
Opts.VisualizeEGUbi = Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
+ Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
@@ -983,7 +1034,13 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
Opts.EagerlyTrimEGraph = !Args.hasArg(OPT_analyzer_no_eagerly_trim_egraph);
- Opts.InlineCall = Args.hasArg(OPT_analyzer_inline_call);
+ Opts.PrintStats = Args.hasArg(OPT_analyzer_stats);
+ Opts.InlineMaxStackDepth =
+ Args.getLastArgIntValue(OPT_analyzer_inline_max_stack_depth,
+ Opts.InlineMaxStackDepth, Diags);
+ Opts.InlineMaxFunctionSize =
+ Args.getLastArgIntValue(OPT_analyzer_inline_max_function_size,
+ Opts.InlineMaxFunctionSize, Diags);
Opts.CheckersControlList.clear();
for (arg_iterator it = Args.filtered_begin(OPT_analyzer_checker,
@@ -1000,25 +1057,41 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
for (unsigned i = 0, e = checkers.size(); i != e; ++i)
Opts.CheckersControlList.push_back(std::make_pair(checkers[i], enable));
}
+
+ return Success;
}
-static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
+static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
+ Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
+ Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
+ return true;
+}
+
+static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
using namespace cc1options;
+ bool Success = true;
- Opts.OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
- if (Opts.OptimizationLevel > 3) {
+ unsigned OptLevel = getOptimizationLevel(Args, IK, Diags);
+ if (OptLevel > 3) {
Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_O)->getAsString(Args) << Opts.OptimizationLevel;
- Opts.OptimizationLevel = 3;
+ << Args.getLastArg(OPT_O)->getAsString(Args) << OptLevel;
+ OptLevel = 3;
+ Success = false;
}
+ Opts.OptimizationLevel = OptLevel;
// We must always run at least the always inlining pass.
Opts.Inlining = (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
: CodeGenOptions::OnlyAlwaysInlining;
+ // -fno-inline-functions overrides OptimizationLevel > 1.
+ Opts.NoInline = Args.hasArg(OPT_fno_inline);
+ Opts.Inlining = Args.hasArg(OPT_fno_inline_functions) ?
+ CodeGenOptions::OnlyAlwaysInlining : Opts.Inlining;
Opts.DebugInfo = Args.hasArg(OPT_g);
- Opts.LimitDebugInfo = Args.hasArg(OPT_flimit_debug_info);
+ Opts.LimitDebugInfo = !Args.hasArg(OPT_fno_limit_debug_info)
+ || Args.hasArg(OPT_flimit_debug_info);
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
@@ -1046,12 +1119,17 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CodeModel = Args.getLastArgValue(OPT_mcode_model);
Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
Opts.DisableFPElim = Args.hasArg(OPT_mdisable_fp_elim);
+ Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
Opts.HiddenWeakVTables = Args.hasArg(OPT_fhidden_weak_vtables);
Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
- Opts.NoInfsFPMath = Opts.NoNaNsFPMath = Args.hasArg(OPT_cl_finite_math_only)||
- Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
+ Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
+ Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
Opts.NumRegisterParameters = Args.getLastArgIntValue(OPT_mregparm, 0, Diags);
@@ -1061,11 +1139,15 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
Opts.NoDwarf2CFIAsm = Args.hasArg(OPT_fno_dwarf2_cfi_asm);
+ Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
- Opts.UnsafeFPMath = Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
+ Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
Opts.FunctionSections = Args.hasArg(OPT_ffunction_sections);
Opts.DataSections = Args.hasArg(OPT_fdata_sections);
@@ -1078,6 +1160,13 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
+ Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
+ Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
+ Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
+ if (Arg *A = Args.getLastArg(OPT_mstack_alignment)) {
+ StringRef Val = A->getValue(Args);
+ Val.getAsInteger(10, Opts.StackAlignment);
+ }
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
StringRef Name = A->getValue(Args);
@@ -1086,11 +1175,15 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
.Case("non-legacy", CodeGenOptions::NonLegacy)
.Case("mixed", CodeGenOptions::Mixed)
.Default(~0U);
- if (Method == ~0U)
+ if (Method == ~0U) {
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- else
+ Success = false;
+ } else {
Opts.ObjCDispatchMethod = Method;
+ }
}
+
+ return Success;
}
static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
@@ -1103,12 +1196,17 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
+ Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
}
-static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
- DiagnosticsEngine &Diags) {
+bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
+ DiagnosticsEngine *Diags) {
using namespace cc1options;
+ bool Success = true;
+
Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
+ Opts.DiagnosticSerializationFile =
+ Args.getLastArgValue(OPT_diagnostic_serialized_file);
Opts.IgnoreWarnings = Args.hasArg(OPT_w);
Opts.NoRewriteMacros = Args.hasArg(OPT_Wno_rewrite_macros);
Opts.Pedantic = Args.hasArg(OPT_pedantic);
@@ -1120,7 +1218,6 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
/*Default=*/true);
Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
- Opts.ShowNames = Args.hasArg(OPT_fdiagnostics_show_name);
Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option);
// Default behavior is to not to show note include stacks.
@@ -1136,10 +1233,13 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.ShowOverloads = DiagnosticsEngine::Ovl_Best;
else if (ShowOverloads == "all")
Opts.ShowOverloads = DiagnosticsEngine::Ovl_All;
- else
- Diags.Report(diag::err_drv_invalid_value)
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args)
<< ShowOverloads;
+ }
StringRef ShowCategory =
Args.getLastArgValue(OPT_fdiagnostics_show_category, "none");
@@ -1149,23 +1249,29 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.ShowCategories = 1;
else if (ShowCategory == "name")
Opts.ShowCategories = 2;
- else
- Diags.Report(diag::err_drv_invalid_value)
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fdiagnostics_show_category)->getAsString(Args)
<< ShowCategory;
+ }
StringRef Format =
Args.getLastArgValue(OPT_fdiagnostics_format, "clang");
if (Format == "clang")
Opts.Format = DiagnosticOptions::Clang;
- else if (Format == "msvc")
+ else if (Format == "msvc")
Opts.Format = DiagnosticOptions::Msvc;
- else if (Format == "vi")
+ else if (Format == "vi")
Opts.Format = DiagnosticOptions::Vi;
- else
- Diags.Report(diag::err_drv_invalid_value)
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fdiagnostics_format)->getAsString(Args)
<< Format;
+ }
Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
@@ -1178,16 +1284,32 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
= Args.getLastArgIntValue(OPT_ftemplate_backtrace_limit,
DiagnosticOptions::DefaultTemplateBacktraceLimit,
Diags);
+ Opts.ConstexprBacktraceLimit
+ = Args.getLastArgIntValue(OPT_fconstexpr_backtrace_limit,
+ DiagnosticOptions::DefaultConstexprBacktraceLimit,
+ Diags);
Opts.TabStop = Args.getLastArgIntValue(OPT_ftabstop,
DiagnosticOptions::DefaultTabStop, Diags);
if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
- Diags.Report(diag::warn_ignoring_ftabstop_value)
- << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
Opts.TabStop = DiagnosticOptions::DefaultTabStop;
+ if (Diags)
+ Diags->Report(diag::warn_ignoring_ftabstop_value)
+ << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
}
Opts.MessageLength = Args.getLastArgIntValue(OPT_fmessage_length, 0, Diags);
Opts.DumpBuildInformation = Args.getLastArgValue(OPT_dump_build_information);
- Opts.Warnings = Args.getAllArgValues(OPT_W);
+
+ for (arg_iterator it = Args.filtered_begin(OPT_W),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ StringRef V = (*it)->getValue(Args);
+ // "-Wl," and such are not warnings options.
+ if (V.startswith("l,") || V.startswith("a,") || V.startswith("p,"))
+ continue;
+
+ Opts.Warnings.push_back(V);
+ }
+
+ return Success;
}
static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
@@ -1249,6 +1371,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::PrintPreamble; break;
case OPT_E:
Opts.ProgramAction = frontend::PrintPreprocessedInput; break;
+ case OPT_pubnames_dump:
+ Opts.ProgramAction = frontend::PubnamesDump; break;
case OPT_rewrite_macros:
Opts.ProgramAction = frontend::RewriteMacros; break;
case OPT_rewrite_objc:
@@ -1257,6 +1381,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::RewriteTest; break;
case OPT_analyze:
Opts.ProgramAction = frontend::RunAnalysis; break;
+ case OPT_migrate:
+ Opts.ProgramAction = frontend::MigrateSource; break;
case OPT_Eonly:
Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
}
@@ -1308,8 +1434,11 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
-
- Opts.ARCMTAction = FrontendOptions::ARCMT_None;
+ Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
+ Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
+ Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
+ Opts.OverrideRecordLayoutsFile
+ = Args.getLastArgValue(OPT_foverride_record_layout_EQ);
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
OPT_arcmt_modify,
OPT_arcmt_migrate)) {
@@ -1327,12 +1456,23 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
break;
}
}
- Opts.ARCMTMigrateDir = Args.getLastArgValue(OPT_arcmt_migrate_directory);
+ Opts.MTMigrateDir = Args.getLastArgValue(OPT_mt_migrate_directory);
Opts.ARCMTMigrateReportOut
= Args.getLastArgValue(OPT_arcmt_migrate_report_output);
Opts.ARCMTMigrateEmitARCErrors
= Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
+ if (Args.hasArg(OPT_objcmt_migrate_literals))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Literals;
+ if (Args.hasArg(OPT_objcmt_migrate_subscripting))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Subscripting;
+
+ if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
+ Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "ARC migration" << "ObjC migration";
+ }
+
InputKind DashX = IK_None;
if (const Arg *A = Args.getLastArg(OPT_x)) {
DashX = llvm::StringSwitch<InputKind>(A->getValue(Args))
@@ -1376,7 +1516,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (i == 0)
DashX = IK;
}
- Opts.Inputs.push_back(std::make_pair(IK, Inputs[i]));
+ Opts.Inputs.push_back(FrontendInputFile(Inputs[i], IK));
}
return DashX;
@@ -1457,6 +1597,10 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
OPT_iwithsysroot), ie = Args.filtered_end(); it != ie; ++it)
Opts.AddPath((*it)->getValue(Args), frontend::System, true, false,
!(*it)->getOption().matches(OPT_iwithsysroot));
+ for (arg_iterator it = Args.filtered_begin(OPT_iframework),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::System, true, true,
+ true);
// Add the paths for the various language specific isystem flags.
for (arg_iterator it = Args.filtered_begin(OPT_c_isystem),
@@ -1529,7 +1673,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
Opts.BCPLComment = Std.hasBCPLComments();
Opts.C99 = Std.isC99();
- Opts.C1X = Std.isC1X();
+ Opts.C11 = Std.isC11();
Opts.CPlusPlus = Std.isCPlusPlus();
Opts.CPlusPlus0x = Std.isCPlusPlus0x();
Opts.Digraphs = Std.hasDigraphs();
@@ -1680,6 +1824,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
else if (Vis == "hidden")
Opts.setVisibilityMode(HiddenVisibility);
else if (Vis == "protected")
+ // FIXME: diagnose if target does not support protected visibility
Opts.setVisibilityMode(ProtectedVisibility);
else
Diags.Report(diag::err_drv_invalid_value)
@@ -1704,7 +1849,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
OPT_fno_dollars_in_identifiers,
Opts.DollarIdents);
Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
- Opts.MicrosoftExt = Args.hasArg(OPT_fms_extensions);
+ Opts.MicrosoftExt
+ = Args.hasArg(OPT_fms_extensions) || Args.hasArg(OPT_fms_compatibility);
Opts.MicrosoftMode = Args.hasArg(OPT_fms_compatibility);
Opts.MSCVersion = Args.getLastArgIntValue(OPT_fmsc_version, 0, Diags);
Opts.Borland = Args.hasArg(OPT_fborland_extensions);
@@ -1724,6 +1870,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.RTTI = !Args.hasArg(OPT_fno_rtti);
Opts.Blocks = Args.hasArg(OPT_fblocks);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
+ Opts.Modules = Args.hasArg(OPT_fmodules);
Opts.CharIsSigned = !Args.hasArg(OPT_fno_signed_char);
Opts.ShortWChar = Args.hasArg(OPT_fshort_wchar);
Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
@@ -1736,7 +1883,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
Opts.MathErrno = Args.hasArg(OPT_fmath_errno);
Opts.InstantiationDepth = Args.getLastArgIntValue(OPT_ftemplate_depth, 1024,
- Diags);
+ Diags);
+ Opts.ConstexprCallDepth = Args.getLastArgIntValue(OPT_fconstexpr_depth, 512,
+ Diags);
Opts.DelayedTemplateParsing = Args.hasArg(OPT_fdelayed_template_parsing);
Opts.NumLargeByValueCopy = Args.getLastArgIntValue(OPT_Wlarge_by_value_copy,
0, Diags);
@@ -1753,18 +1902,27 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
Opts.PackStruct = Args.getLastArgIntValue(OPT_fpack_struct, 0, Diags);
Opts.PICLevel = Args.getLastArgIntValue(OPT_pic_level, 0, Diags);
+ Opts.PIELevel = Args.getLastArgIntValue(OPT_pie_level, 0, Diags);
Opts.Static = Args.hasArg(OPT_static_define);
- Opts.DumpRecordLayouts = Args.hasArg(OPT_fdump_record_layouts);
+ Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
+ Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
+ || Args.hasArg(OPT_fdump_record_layouts);
Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
- Opts.OptimizeSize = 0;
Opts.MRTD = Args.hasArg(OPT_mrtd);
+ Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
+ Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
+ Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
+ Opts.AddressSanitizer = Args.hasArg(OPT_faddress_sanitizer);
+ Opts.ThreadSanitizer = Args.hasArg(OPT_fthread_sanitizer);
+ Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
+ Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name);
// Record whether the __DEPRECATED define was requested.
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
@@ -1774,13 +1932,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// FIXME: Eliminate this dependency.
unsigned Opt = getOptimizationLevel(Args, IK, Diags);
Opts.Optimize = Opt != 0;
+ Opts.OptimizeSize = Args.hasArg(OPT_Os) || Args.hasArg(OPT_Oz);
// This is the __NO_INLINE__ define, which just depends on things like the
// optimization level and -fno-inline, not actually whether the backend has
// inlining enabled.
- //
- // FIXME: This is affected by other options (-fno-inline).
- Opts.NoInline = !Opt;
+ Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
+
+ Opts.FastMath = Args.hasArg(OPT_ffast_math);
unsigned SSP = Args.getLastArgIntValue(OPT_stack_protector, 0, Diags);
switch (SSP) {
@@ -1806,7 +1965,6 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.TokenCache = Opts.ImplicitPTHInclude;
Opts.UsePredefines = !Args.hasArg(OPT_undef);
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
- Opts.AutoModuleImport = Args.hasArg(OPT_fauto_module_import);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
@@ -1917,45 +2075,54 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
- // Use the host triple if unspecified.
+ // Use the default target triple if unspecified.
if (Opts.Triple.empty())
- Opts.Triple = llvm::sys::getHostTriple();
+ Opts.Triple = llvm::sys::getDefaultTargetTriple();
}
//
-void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
+bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
const char *const *ArgBegin,
const char *const *ArgEnd,
DiagnosticsEngine &Diags) {
+ bool Success = true;
+
// Parse the arguments.
- llvm::OwningPtr<OptTable> Opts(createCC1OptTable());
+ OwningPtr<OptTable> Opts(createCC1OptTable());
unsigned MissingArgIndex, MissingArgCount;
- llvm::OwningPtr<InputArgList> Args(
+ OwningPtr<InputArgList> Args(
Opts->ParseArgs(ArgBegin, ArgEnd,MissingArgIndex, MissingArgCount));
// Check for missing argument error.
- if (MissingArgCount)
+ if (MissingArgCount) {
Diags.Report(diag::err_drv_missing_argument)
<< Args->getArgString(MissingArgIndex) << MissingArgCount;
+ Success = false;
+ }
// Issue errors on unknown arguments.
for (arg_iterator it = Args->filtered_begin(OPT_UNKNOWN),
- ie = Args->filtered_end(); it != ie; ++it)
+ ie = Args->filtered_end(); it != ie; ++it) {
Diags.Report(diag::err_drv_unknown_argument) << (*it)->getAsString(*Args);
+ Success = false;
+ }
- ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags);
+ Success = ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags) && Success;
+ Success = ParseMigratorArgs(Res.getMigratorOpts(), *Args) && Success;
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
- ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, Diags);
+ Success = ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, &Diags)
+ && Success;
ParseFileSystemArgs(Res.getFileSystemOpts(), *Args);
// FIXME: We shouldn't have to pass the DashX option around here
InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
- ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags);
+ Success = ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags)
+ && Success;
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args);
if (DashX != IK_AST && DashX != IK_LLVM_IR) {
- ParseLangArgs(Res.getLangOpts(), *Args, DashX, Diags);
+ ParseLangArgs(*Res.getLangOpts(), *Args, DashX, Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
- Res.getLangOpts().ObjCExceptions = 1;
+ Res.getLangOpts()->ObjCExceptions = 1;
}
// FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
// PCH file and find the original header name. Remove the need to do that in
@@ -1965,6 +2132,8 @@ void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, FileMgr, Diags);
ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args);
ParseTargetArgs(Res.getTargetOpts(), *Args);
+
+ return Success;
}
namespace {
@@ -2024,13 +2193,16 @@ std::string CompilerInvocation::getModuleHash() const {
ModuleSignature Signature;
// Start the signature with the compiler version.
- Signature.add(getClangFullRepositoryVersion());
+ // FIXME: The full version string can be quite long. Omit it from the
+ // module hash for now to avoid failures where the path name becomes too
+ // long. An MD5 or similar checksum would work well here.
+ // Signature.add(getClangFullRepositoryVersion());
// Extend the signature with the language options
#define LANGOPT(Name, Bits, Default, Description) \
- Signature.add(LangOpts.Name, Bits);
+ Signature.add(LangOpts->Name, Bits);
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- Signature.add(static_cast<unsigned>(LangOpts.get##Name()), Bits);
+ Signature.add(static_cast<unsigned>(LangOpts->get##Name()), Bits);
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index fc15081..b477ade 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -30,7 +30,7 @@ using namespace clang;
/// argument vector.
CompilerInvocation *
clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
@@ -48,13 +48,13 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
Args.push_back("-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
- driver::Driver TheDriver("clang", llvm::sys::getHostTriple(),
+ driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
"a.out", false, *Diags);
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
- llvm::OwningPtr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
+ OwningPtr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
// Just print the cc1 options if -### was present.
if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
@@ -66,7 +66,7 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
// failed.
const driver::JobList &Jobs = C->getJobs();
if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
- llvm::SmallString<256> Msg;
+ SmallString<256> Msg;
llvm::raw_svector_ostream OS(Msg);
C->PrintJob(OS, C->getJobs(), "; ", true);
Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
@@ -80,11 +80,12 @@ clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
}
const driver::ArgStringList &CCArgs = Cmd->getArguments();
- CompilerInvocation *CI = new CompilerInvocation();
- CompilerInvocation::CreateFromArgs(*CI,
+ OwningPtr<CompilerInvocation> CI(new CompilerInvocation());
+ if (!CompilerInvocation::CreateFromArgs(*CI,
const_cast<const char **>(CCArgs.data()),
const_cast<const char **>(CCArgs.data()) +
CCArgs.size(),
- *Diags);
- return CI;
+ *Diags))
+ return 0;
+ return CI.take();
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
index ff3a123..21f5daa 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
@@ -31,11 +31,12 @@ class DependencyFileCallback : public PPCallbacks {
std::vector<std::string> Files;
llvm::StringSet<> FilesSet;
const Preprocessor *PP;
+ std::string OutputFile;
std::vector<std::string> Targets;
- raw_ostream *OS;
bool IncludeSystemHeaders;
bool PhonyTarget;
bool AddMissingHeaderDeps;
+ bool SeenMissingHeader;
private:
bool FileMatchesDepCriteria(const char *Filename,
SrcMgr::CharacteristicKind FileType);
@@ -44,12 +45,12 @@ private:
public:
DependencyFileCallback(const Preprocessor *_PP,
- raw_ostream *_OS,
const DependencyOutputOptions &Opts)
- : PP(_PP), Targets(Opts.Targets), OS(_OS),
+ : PP(_PP), OutputFile(Opts.OutputFile), Targets(Opts.Targets),
IncludeSystemHeaders(Opts.IncludeSystemHeaders),
PhonyTarget(Opts.UsePhonyTargets),
- AddMissingHeaderDeps(Opts.AddMissingHeaderDeps) {}
+ AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
+ SeenMissingHeader(false) {}
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -65,8 +66,6 @@ public:
virtual void EndOfMainFile() {
OutputDependencyFile();
- delete OS;
- OS = 0;
}
};
}
@@ -78,19 +77,11 @@ void clang::AttachDependencyFileGen(Preprocessor &PP,
return;
}
- std::string Err;
- raw_ostream *OS(new llvm::raw_fd_ostream(Opts.OutputFile.c_str(), Err));
- if (!Err.empty()) {
- PP.getDiagnostics().Report(diag::err_fe_error_opening)
- << Opts.OutputFile << Err;
- return;
- }
-
// Disable the "file not found" diagnostic if the -MG option was given.
if (Opts.AddMissingHeaderDeps)
PP.SetSuppressIncludeNotFoundError(true);
- PP.addPPCallbacks(new DependencyFileCallback(&PP, OS, Opts));
+ PP.addPPCallbacks(new DependencyFileCallback(&PP, Opts));
}
/// FileMatchesDepCriteria - Determine whether the given Filename should be
@@ -145,8 +136,12 @@ void DependencyFileCallback::InclusionDirective(SourceLocation HashLoc,
SourceLocation EndLoc,
StringRef SearchPath,
StringRef RelativePath) {
- if (AddMissingHeaderDeps && !File)
- AddFilename(FileName);
+ if (!File) {
+ if (AddMissingHeaderDeps)
+ AddFilename(FileName);
+ else
+ SeenMissingHeader = true;
+ }
}
void DependencyFileCallback::AddFilename(StringRef Filename) {
@@ -165,6 +160,19 @@ static void PrintFilename(raw_ostream &OS, StringRef Filename) {
}
void DependencyFileCallback::OutputDependencyFile() {
+ if (SeenMissingHeader) {
+ llvm::sys::Path(OutputFile).eraseFromDisk();
+ return;
+ }
+
+ std::string Err;
+ llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+ if (!Err.empty()) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening)
+ << OutputFile << Err;
+ return;
+ }
+
// Write out the dependency targets, trying to avoid overly long
// lines when possible. We try our best to emit exactly the same
// dependency file as GCC (4.2), assuming the included files are the
@@ -179,16 +187,16 @@ void DependencyFileCallback::OutputDependencyFile() {
Columns += N;
} else if (Columns + N + 2 > MaxColumns) {
Columns = N + 2;
- *OS << " \\\n ";
+ OS << " \\\n ";
} else {
Columns += N + 1;
- *OS << ' ';
+ OS << ' ';
}
// Targets already quoted as needed.
- *OS << *I;
+ OS << *I;
}
- *OS << ':';
+ OS << ':';
Columns += 1;
// Now add each dependency in the order it was seen, but avoiding
@@ -200,23 +208,23 @@ void DependencyFileCallback::OutputDependencyFile() {
// break the line on the next iteration.
unsigned N = I->length();
if (Columns + (N + 1) + 2 > MaxColumns) {
- *OS << " \\\n ";
+ OS << " \\\n ";
Columns = 2;
}
- *OS << ' ';
- PrintFilename(*OS, *I);
+ OS << ' ';
+ PrintFilename(OS, *I);
Columns += N + 1;
}
- *OS << '\n';
+ OS << '\n';
// Create phony targets if requested.
if (PhonyTarget && !Files.empty()) {
// Skip the first entry, this is always the input file itself.
for (std::vector<std::string>::iterator I = Files.begin() + 1,
E = Files.end(); I != E; ++I) {
- *OS << '\n';
- PrintFilename(*OS, *I);
- *OS << ":\n";
+ OS << '\n';
+ PrintFilename(OS, *I);
+ OS << ":\n";
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
new file mode 100644
index 0000000..eebaf0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
@@ -0,0 +1,140 @@
+//===--- DependencyGraph.cpp - Generate dependency file -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates a header dependency graph in DOT format, for use
+// with, e.g., GraphViz.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+namespace DOT = llvm::DOT;
+
+namespace {
+class DependencyGraphCallback : public PPCallbacks {
+ const Preprocessor *PP;
+ std::string OutputFile;
+ std::string SysRoot;
+ llvm::SetVector<const FileEntry *> AllFiles;
+ typedef llvm::DenseMap<const FileEntry *,
+ llvm::SmallVector<const FileEntry *, 2> >
+ DependencyMap;
+
+ DependencyMap Dependencies;
+
+private:
+ llvm::raw_ostream &writeNodeReference(llvm::raw_ostream &OS,
+ const FileEntry *Node);
+ void OutputGraphFile();
+
+public:
+ DependencyGraphCallback(const Preprocessor *_PP, StringRef OutputFile,
+ StringRef SysRoot)
+ : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) { }
+
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath);
+
+ virtual void EndOfMainFile() {
+ OutputGraphFile();
+ }
+
+};
+}
+
+void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
+ StringRef SysRoot) {
+ PP.addPPCallbacks(new DependencyGraphCallback(&PP, OutputFile, SysRoot));
+}
+
+void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ if (!File)
+ return;
+
+ SourceManager &SM = PP->getSourceManager();
+ const FileEntry *FromFile
+ = SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ if (FromFile == 0)
+ return;
+
+ Dependencies[FromFile].push_back(File);
+
+ AllFiles.insert(File);
+ AllFiles.insert(FromFile);
+}
+
+llvm::raw_ostream &
+DependencyGraphCallback::writeNodeReference(llvm::raw_ostream &OS,
+ const FileEntry *Node) {
+ OS << "header_" << Node->getUID();
+ return OS;
+}
+
+void DependencyGraphCallback::OutputGraphFile() {
+ std::string Err;
+ llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+ if (!Err.empty()) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening)
+ << OutputFile << Err;
+ return;
+ }
+
+ OS << "digraph \"dependencies\" {\n";
+
+ // Write the nodes
+ for (unsigned I = 0, N = AllFiles.size(); I != N; ++I) {
+ // Write the node itself.
+ OS.indent(2);
+ writeNodeReference(OS, AllFiles[I]);
+ OS << " [ shape=\"box\", label=\"";
+ StringRef FileName = AllFiles[I]->getName();
+ if (FileName.startswith(SysRoot))
+ FileName = FileName.substr(SysRoot.size());
+
+ OS << DOT::EscapeString(FileName)
+ << "\"];\n";
+ }
+
+ // Write the edges
+ for (DependencyMap::iterator F = Dependencies.begin(),
+ FEnd = Dependencies.end();
+ F != FEnd; ++F) {
+ for (unsigned I = 0, N = F->second.size(); I != N; ++I) {
+ OS.indent(2);
+ writeNodeReference(OS, F->first);
+ OS << " -> ";
+ writeNodeReference(OS, F->second[I]);
+ OS << ";\n";
+ }
+ }
+ OS << "}\n";
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
new file mode 100644
index 0000000..6c3bb1d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -0,0 +1,386 @@
+//===--- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+/// Look through spelling locations for a macro argument expansion, and
+/// if found skip to it so that we can trace the argument rather than the macros
+/// in which that argument is used. If no macro argument expansion is found,
+/// don't skip anything and return the starting location.
+static SourceLocation skipToMacroArgExpansion(const SourceManager &SM,
+ SourceLocation StartLoc) {
+ for (SourceLocation L = StartLoc; L.isMacroID();
+ L = SM.getImmediateSpellingLoc(L)) {
+ if (SM.isMacroArgExpansion(L))
+ return L;
+ }
+
+ // Otherwise just return initial location, there's nothing to skip.
+ return StartLoc;
+}
+
+/// Gets the location of the immediate macro caller, one level up the stack
+/// toward the initial macro typed into the source.
+static SourceLocation getImmediateMacroCallerLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its spelling
+ // location points to the argument as typed into the macro call, and
+ // therefore is used to locate the macro caller.
+ if (SM.isMacroArgExpansion(Loc))
+ return SM.getImmediateSpellingLoc(Loc);
+
+ // Otherwise, the caller of the macro is located where this macro is
+ // expanded (while the spelling is part of the macro definition).
+ return SM.getImmediateExpansionRange(Loc).first;
+}
+
+/// Gets the location of the immediate macro callee, one level down the stack
+/// toward the leaf macro.
+static SourceLocation getImmediateMacroCalleeLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its
+ // expansion location points to the unexpanded paramater reference within
+ // the macro definition (or callee).
+ if (SM.isMacroArgExpansion(Loc))
+ return SM.getImmediateExpansionRange(Loc).first;
+
+ // Otherwise, the callee of the macro is located where this location was
+ // spelled inside the macro definition.
+ return SM.getImmediateSpellingLoc(Loc);
+}
+
+/// \brief Retrieve the name of the immediate macro expansion.
+///
+/// This routine starts from a source location, and finds the name of the macro
+/// responsible for its immediate expansion. It looks through any intervening
+/// macro argument expansions to compute this. It returns a StringRef which
+/// refers to the SourceManager-owned buffer of the source where that macro
+/// name is spelled. Thus, the result shouldn't out-live that SourceManager.
+///
+/// This differs from Lexer::getImmediateMacroName in that any macro argument
+/// location will result in the topmost function macro that accepted it.
+/// e.g.
+/// \code
+/// MAC1( MAC2(foo) )
+/// \endcode
+/// for location of 'foo' token, this function will return "MAC1" while
+/// Lexer::getImmediateMacroName will return "MAC2".
+static StringRef getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+ // Walk past macro argument expanions.
+ while (SM.isMacroArgExpansion(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
+}
+
+/// Get the presumed location of a diagnostic message. This computes the
+/// presumed location for the top of any macro backtrace when present.
+static PresumedLoc getDiagnosticPresumedLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ // This is a condensed form of the algorithm used by emitCaretDiagnostic to
+ // walk to the top of the macro call stack.
+ while (Loc.isMacroID()) {
+ Loc = skipToMacroArgExpansion(SM, Loc);
+ Loc = getImmediateMacroCallerLoc(SM, Loc);
+ }
+
+ return SM.getPresumedLoc(Loc);
+}
+
+DiagnosticRenderer::DiagnosticRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+: SM(SM), LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+
+DiagnosticRenderer::~DiagnosticRenderer() {}
+
+namespace {
+
+class FixitReceiver : public edit::EditsReceiver {
+ SmallVectorImpl<FixItHint> &MergedFixits;
+
+public:
+ FixitReceiver(SmallVectorImpl<FixItHint> &MergedFixits)
+ : MergedFixits(MergedFixits) { }
+ virtual void insert(SourceLocation loc, StringRef text) {
+ MergedFixits.push_back(FixItHint::CreateInsertion(loc, text));
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ MergedFixits.push_back(FixItHint::CreateReplacement(range, text));
+ }
+};
+
+}
+
+static void mergeFixits(ArrayRef<FixItHint> FixItHints,
+ const SourceManager &SM, const LangOptions &LangOpts,
+ SmallVectorImpl<FixItHint> &MergedFixits) {
+ edit::Commit commit(SM, LangOpts);
+ for (ArrayRef<FixItHint>::const_iterator
+ I = FixItHints.begin(), E = FixItHints.end(); I != E; ++I) {
+ const FixItHint &Hint = *I;
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
+ }
+ }
+
+ edit::EditedSource Editor(SM, LangOpts);
+ if (Editor.commit(commit)) {
+ FixitReceiver Rec(MergedFixits);
+ Editor.applyRewrites(Rec);
+ }
+}
+
+void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixItHints,
+ DiagOrStoredDiag D) {
+
+ beginDiagnostic(D, Level);
+
+ PresumedLoc PLoc = getDiagnosticPresumedLoc(SM, Loc);
+
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ emitIncludeStack(PLoc.getIncludeLoc(), Level);
+
+ // Next, emit the actual diagnostic message.
+ emitDiagnosticMessage(Loc, PLoc, Level, Message, Ranges, D);
+
+ // Only recurse if we have a valid location.
+ if (Loc.isValid()) {
+ // Get the ranges into a local array we can hack on.
+ SmallVector<CharSourceRange, 20> MutableRanges(Ranges.begin(),
+ Ranges.end());
+
+ llvm::SmallVector<FixItHint, 8> MergedFixits;
+ if (!FixItHints.empty()) {
+ mergeFixits(FixItHints, SM, LangOpts, MergedFixits);
+ FixItHints = MergedFixits;
+ }
+
+ for (ArrayRef<FixItHint>::const_iterator I = FixItHints.begin(),
+ E = FixItHints.end();
+ I != E; ++I)
+ if (I->RemoveRange.isValid())
+ MutableRanges.push_back(I->RemoveRange);
+
+ unsigned MacroDepth = 0;
+ emitMacroExpansionsAndCarets(Loc, Level, MutableRanges, FixItHints,
+ MacroDepth);
+ }
+
+ LastLoc = Loc;
+ LastLevel = Level;
+
+ endDiagnostic(D, Level);
+}
+
+
+void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
+ emitDiagnostic(Diag.getLocation(), Diag.getLevel(), Diag.getMessage(),
+ Diag.getRanges(), Diag.getFixIts(),
+ &Diag);
+}
+
+/// \brief Prints an include stack when appropriate for a particular
+/// diagnostic level and location.
+///
+/// This routine handles all the logic of suppressing particular include
+/// stacks (such as those for notes) and duplicate include stacks when
+/// repeated warnings occur within the same file. It also handles the logic
+/// of customizing the formatting and display of the include stack.
+///
+/// \param Level The diagnostic level of the message this stack pertains to.
+/// \param Loc The include location of the current file (not the diagnostic
+/// location).
+void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
+ DiagnosticsEngine::Level Level) {
+ // Skip redundant include stacks altogether.
+ if (LastIncludeLoc == Loc)
+ return;
+ LastIncludeLoc = Loc;
+
+ if (!DiagOpts.ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
+ return;
+
+ emitIncludeStackRecursively(Loc);
+}
+
+/// \brief Helper to recursivly walk up the include stack and print each layer
+/// on the way back down.
+void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return;
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid())
+ return;
+
+ // Emit the other include frames first.
+ emitIncludeStackRecursively(PLoc.getIncludeLoc());
+
+ // Emit the inclusion text/note.
+ emitIncludeLocation(Loc, PLoc);
+}
+
+/// \brief Recursively emit notes for each macro expansion and caret
+/// diagnostics where appropriate.
+///
+/// Walks up the macro expansion stack printing expansion notes, the code
+/// snippet, caret, underlines and FixItHint display as appropriate at each
+/// level.
+///
+/// \param Loc The location for this caret.
+/// \param Level The diagnostic level currently being emitted.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+/// \param MacroSkipEnd The depth to stop skipping macro expansions.
+/// \param OnMacroInst The current depth of the macro expansion stack.
+void DiagnosticRenderer::emitMacroExpansionsAndCarets(
+ SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ unsigned &MacroDepth,
+ unsigned OnMacroInst)
+{
+ assert(!Loc.isInvalid() && "must have a valid source location here");
+
+ // If this is a file source location, directly emit the source snippet and
+ // caret line. Also record the macro depth reached.
+ if (Loc.isFileID()) {
+ assert(MacroDepth == 0 && "We shouldn't hit a leaf node twice!");
+ MacroDepth = OnMacroInst;
+ emitCodeContext(Loc, Level, Ranges, Hints);
+ return;
+ }
+ // Otherwise recurse through each macro expansion layer.
+
+ // When processing macros, skip over the expansions leading up to
+ // a macro argument, and trace the argument's expansion stack instead.
+ Loc = skipToMacroArgExpansion(SM, Loc);
+
+ SourceLocation OneLevelUp = getImmediateMacroCallerLoc(SM, Loc);
+
+ // FIXME: Map ranges?
+ emitMacroExpansionsAndCarets(OneLevelUp, Level, Ranges, Hints, MacroDepth,
+ OnMacroInst + 1);
+
+ // Save the original location so we can find the spelling of the macro call.
+ SourceLocation MacroLoc = Loc;
+
+ // Map the location.
+ Loc = getImmediateMacroCalleeLoc(SM, Loc);
+
+ unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
+ if (MacroDepth > DiagOpts.MacroBacktraceLimit &&
+ DiagOpts.MacroBacktraceLimit != 0) {
+ MacroSkipStart = DiagOpts.MacroBacktraceLimit / 2 +
+ DiagOpts.MacroBacktraceLimit % 2;
+ MacroSkipEnd = MacroDepth - DiagOpts.MacroBacktraceLimit / 2;
+ }
+
+ // Whether to suppress printing this macro expansion.
+ bool Suppressed = (OnMacroInst >= MacroSkipStart &&
+ OnMacroInst < MacroSkipEnd);
+
+ // Map the ranges.
+ for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I) {
+ SourceLocation Start = I->getBegin(), End = I->getEnd();
+ if (Start.isMacroID())
+ I->setBegin(getImmediateMacroCalleeLoc(SM, Start));
+ if (End.isMacroID())
+ I->setEnd(getImmediateMacroCalleeLoc(SM, End));
+ }
+
+ if (Suppressed) {
+ // Tell the user that we've skipped contexts.
+ if (OnMacroInst == MacroSkipStart) {
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "(skipping " << (MacroSkipEnd - MacroSkipStart)
+ << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
+ "see all)";
+ emitBasicNote(Message.str());
+ }
+ return;
+ }
+
+ SmallString<100> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "expanded from macro '"
+ << getImmediateMacroName(MacroLoc, SM, LangOpts) << "'";
+ emitDiagnostic(SM.getSpellingLoc(Loc), DiagnosticsEngine::Note,
+ Message.str(),
+ Ranges, ArrayRef<FixItHint>());
+}
+
+DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
+
+void DiagnosticNoteRenderer::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc) {
+ // Generate a note indicating the include location.
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "in file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":";
+ emitNote(Loc, Message.str());
+}
+
+void DiagnosticNoteRenderer::emitBasicNote(StringRef Message) {
+ emitNote(SourceLocation(), Message);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index ba2d63b..da4bdfa 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -14,14 +14,15 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/ChainedIncludesSource.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Parse/ParseAST.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
-#include "clang/Serialization/ChainedIncludesSource.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/ErrorHandling.h"
@@ -30,37 +31,70 @@ using namespace clang;
namespace {
-/// \brief Dumps deserialized declarations.
-class DeserializedDeclsDumper : public ASTDeserializationListener {
+class DelegatingDeserializationListener : public ASTDeserializationListener {
ASTDeserializationListener *Previous;
public:
- DeserializedDeclsDumper(ASTDeserializationListener *Previous)
+ explicit DelegatingDeserializationListener(
+ ASTDeserializationListener *Previous)
: Previous(Previous) { }
+ virtual void ReaderInitialized(ASTReader *Reader) {
+ if (Previous)
+ Previous->ReaderInitialized(Reader);
+ }
+ virtual void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II) {
+ if (Previous)
+ Previous->IdentifierRead(ID, II);
+ }
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T) {
+ if (Previous)
+ Previous->TypeRead(Idx, T);
+ }
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ if (Previous)
+ Previous->DeclRead(ID, D);
+ }
+ virtual void SelectorRead(serialization::SelectorID ID, Selector Sel) {
+ if (Previous)
+ Previous->SelectorRead(ID, Sel);
+ }
+ virtual void MacroDefinitionRead(serialization::PreprocessedEntityID PPID,
+ MacroDefinition *MD) {
+ if (Previous)
+ Previous->MacroDefinitionRead(PPID, MD);
+ }
+};
+
+/// \brief Dumps deserialized declarations.
+class DeserializedDeclsDumper : public DelegatingDeserializationListener {
+public:
+ explicit DeserializedDeclsDumper(ASTDeserializationListener *Previous)
+ : DelegatingDeserializationListener(Previous) { }
+
virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
llvm::outs() << "PCH DECL: " << D->getDeclKindName();
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- llvm::outs() << " - " << ND->getNameAsString();
+ llvm::outs() << " - " << *ND;
llvm::outs() << "\n";
- if (Previous)
- Previous->DeclRead(ID, D);
+ DelegatingDeserializationListener::DeclRead(ID, D);
}
};
/// \brief Checks deserialized declarations and emits error if a name
/// matches one given in command-line using -error-on-deserialized-decl.
- class DeserializedDeclsChecker : public ASTDeserializationListener {
+ class DeserializedDeclsChecker : public DelegatingDeserializationListener {
ASTContext &Ctx;
std::set<std::string> NamesToCheck;
- ASTDeserializationListener *Previous;
public:
DeserializedDeclsChecker(ASTContext &Ctx,
const std::set<std::string> &NamesToCheck,
ASTDeserializationListener *Previous)
- : Ctx(Ctx), NamesToCheck(NamesToCheck), Previous(Previous) { }
+ : DelegatingDeserializationListener(Previous),
+ Ctx(Ctx), NamesToCheck(NamesToCheck) { }
virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
@@ -72,8 +106,7 @@ public:
<< ND->getNameAsString();
}
- if (Previous)
- Previous->DeclRead(ID, D);
+ DelegatingDeserializationListener::DeclRead(ID, D);
}
};
@@ -83,10 +116,9 @@ FrontendAction::FrontendAction() : Instance(0) {}
FrontendAction::~FrontendAction() {}
-void FrontendAction::setCurrentFile(StringRef Value, InputKind Kind,
- ASTUnit *AST) {
- CurrentFile = Value;
- CurrentFileKind = Kind;
+void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
+ ASTUnit *AST) {
+ this->CurrentInput = CurrentInput;
CurrentASTUnit.reset(AST);
}
@@ -112,7 +144,7 @@ ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
ie = FrontendPluginRegistry::end();
it != ie; ++it) {
if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
- llvm::OwningPtr<PluginASTAction> P(it->instantiate());
+ OwningPtr<PluginASTAction> P(it->instantiate());
FrontendAction* c = P.get();
if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
Consumers.push_back(c->CreateASTConsumer(CI, InFile));
@@ -124,11 +156,10 @@ ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
}
bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
- StringRef Filename,
- InputKind InputKind) {
+ const FrontendInputFile &Input) {
assert(!Instance && "Already processing a source file!");
- assert(!Filename.empty() && "Unexpected empty filename!");
- setCurrentFile(Filename, InputKind);
+ assert(!Input.File.empty() && "Unexpected empty filename!");
+ setCurrentInput(Input);
setCompilerInstance(&CI);
if (!BeginInvocation(CI))
@@ -136,20 +167,20 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// AST files follow a very different path, since they share objects via the
// AST unit.
- if (InputKind == IK_AST) {
+ if (Input.Kind == IK_AST) {
assert(!usesPreprocessorOnly() &&
"Attempt to pass AST file to preprocessor only action!");
assert(hasASTFileSupport() &&
"This action does not have AST file support!");
- llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
std::string Error;
- ASTUnit *AST = ASTUnit::LoadFromASTFile(Filename, Diags,
+ ASTUnit *AST = ASTUnit::LoadFromASTFile(Input.File, Diags,
CI.getFileSystemOpts());
if (!AST)
goto failure;
- setCurrentFile(Filename, InputKind, AST);
+ setCurrentInput(Input, AST);
// Set the shared objects, these are reset when we finish processing the
// file, otherwise the CompilerInstance will happily destroy them.
@@ -159,11 +190,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.setASTContext(&AST->getASTContext());
// Initialize the action.
- if (!BeginSourceFileAction(CI, Filename))
+ if (!BeginSourceFileAction(CI, Input.File))
goto failure;
/// Create the AST consumer.
- CI.setASTConsumer(CreateWrappedASTConsumer(CI, Filename));
+ CI.setASTConsumer(CreateWrappedASTConsumer(CI, Input.File));
if (!CI.hasASTConsumer())
goto failure;
@@ -177,7 +208,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.createSourceManager(CI.getFileManager());
// IR files bypass the rest of initialization.
- if (InputKind == IK_LLVM_IR) {
+ if (Input.Kind == IK_LLVM_IR) {
assert(hasIRSupport() &&
"This action does not have IR file support!");
@@ -185,7 +216,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), 0);
// Initialize the action.
- if (!BeginSourceFileAction(CI, Filename))
+ if (!BeginSourceFileAction(CI, Input.File))
goto failure;
return true;
@@ -199,7 +230,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
&CI.getPreprocessor());
// Initialize the action.
- if (!BeginSourceFileAction(CI, Filename))
+ if (!BeginSourceFileAction(CI, Input.File))
goto failure;
/// Create the AST context and consumer unless this is a preprocessor only
@@ -207,8 +238,8 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!usesPreprocessorOnly()) {
CI.createASTContext();
- llvm::OwningPtr<ASTConsumer> Consumer(
- CreateWrappedASTConsumer(CI, Filename));
+ OwningPtr<ASTConsumer> Consumer(
+ CreateWrappedASTConsumer(CI, Input.File));
if (!Consumer)
goto failure;
@@ -216,7 +247,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
// Convert headers to PCH and chain them.
- llvm::OwningPtr<ExternalASTSource> source;
+ OwningPtr<ExternalASTSource> source;
source.reset(ChainedIncludesSource::create(CI));
if (!source)
goto failure;
@@ -237,6 +268,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.getPreprocessorOpts().ImplicitPCHInclude,
CI.getPreprocessorOpts().DisablePCHValidation,
CI.getPreprocessorOpts().DisableStatCache,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
DeserialListener);
if (!CI.getASTContext().getExternalSource())
goto failure;
@@ -252,9 +284,19 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.hasASTContext() || !CI.getASTContext().getExternalSource()) {
Preprocessor &PP = CI.getPreprocessor();
PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
- PP.getLangOptions());
+ PP.getLangOpts());
}
+ // If there is a layout overrides file, attach an external AST source that
+ // provides the layouts from that file.
+ if (!CI.getFrontendOpts().OverrideRecordLayoutsFile.empty() &&
+ CI.hasASTContext() && !CI.getASTContext().getExternalSource()) {
+ OwningPtr<ExternalASTSource>
+ Override(new LayoutOverrideSource(
+ CI.getFrontendOpts().OverrideRecordLayoutsFile));
+ CI.getASTContext().setExternalSource(Override);
+ }
+
return true;
// If we failed, reset state since the client will not end up calling the
@@ -268,7 +310,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
}
CI.getDiagnosticClient().EndSourceFile();
- setCurrentFile("", IK_None);
+ setCurrentInput(FrontendInputFile());
setCompilerInstance(0);
return false;
}
@@ -278,17 +320,11 @@ void FrontendAction::Execute() {
// Initialize the main file entry. This needs to be delayed until after PCH
// has loaded.
- if (isCurrentFileAST()) {
- // Set the main file ID to an empty file.
- //
- // FIXME: We probably shouldn't need this, but for now this is the
- // simplest way to reuse the logic in ParseAST.
- const char *EmptyStr = "";
- llvm::MemoryBuffer *SB =
- llvm::MemoryBuffer::getMemBuffer(EmptyStr, "<dummy input>");
- CI.getSourceManager().createMainFileIDForMemBuffer(SB);
- } else {
- if (!CI.InitializeSourceManager(getCurrentFile()))
+ if (!isCurrentFileAST()) {
+ if (!CI.InitializeSourceManager(getCurrentFile(),
+ getCurrentInput().IsSystem
+ ? SrcMgr::C_System
+ : SrcMgr::C_User))
return;
}
@@ -352,7 +388,7 @@ void FrontendAction::EndSourceFile() {
}
setCompilerInstance(0);
- setCurrentFile("", IK_None);
+ setCurrentInput(FrontendInputFile());
}
//===----------------------------------------------------------------------===//
@@ -376,9 +412,12 @@ void ASTFrontendAction::ExecuteAction() {
if (!CI.hasSema())
CI.createSema(getTranslationUnitKind(), CompletionConsumer);
- ParseAST(CI.getSema(), CI.getFrontendOpts().ShowStats);
+ ParseAST(CI.getSema(), CI.getFrontendOpts().ShowStats,
+ CI.getFrontendOpts().SkipFunctionBodies);
}
+void PluginASTAction::anchor() { }
+
ASTConsumer *
PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
@@ -394,7 +433,7 @@ bool WrapperFrontendAction::BeginInvocation(CompilerInstance &CI) {
}
bool WrapperFrontendAction::BeginSourceFileAction(CompilerInstance &CI,
StringRef Filename) {
- WrappedAction->setCurrentFile(getCurrentFile(), getCurrentFileKind());
+ WrappedAction->setCurrentInput(getCurrentInput());
WrappedAction->setCompilerInstance(&CI);
return WrappedAction->BeginSourceFileAction(CI, Filename);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index 6f84da9..b4a439d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -9,6 +9,7 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/Parser.h"
@@ -20,9 +21,11 @@
#include "clang/Frontend/Utils.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
+#include <set>
using namespace clang;
@@ -85,8 +88,7 @@ ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
- return new PCHGenerator(CI.getPreprocessor(), OutputFile, MakeModule,
- Sysroot, OS);
+ return new PCHGenerator(CI.getPreprocessor(), OutputFile, 0, Sysroot, OS);
}
bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
@@ -113,11 +115,317 @@ bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
return false;
}
+ASTConsumer *GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_ostream *OS = 0;
+ if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS))
+ return 0;
+
+ return new PCHGenerator(CI.getPreprocessor(), OutputFile, Module,
+ Sysroot, OS);
+}
+
+/// \brief Collect the set of header includes needed to construct the given
+/// module.
+///
+/// \param Module The module we're collecting includes from.
+///
+/// \param Includes Will be augmented with the set of #includes or #imports
+/// needed to load all of the named headers.
+static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
+ FileManager &FileMgr,
+ ModuleMap &ModMap,
+ clang::Module *Module,
+ SmallString<256> &Includes) {
+ // Don't collect any headers for unavailable modules.
+ if (!Module->isAvailable())
+ return;
+
+ // Add includes for each of these headers.
+ for (unsigned I = 0, N = Module->Headers.size(); I != N; ++I) {
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += Module->Headers[I]->getName();
+ Includes += "\"\n";
+ }
+
+ if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) {
+ if (Module->Parent) {
+ // Include the umbrella header for submodules.
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += UmbrellaHeader->getName();
+ Includes += "\"\n";
+ }
+ } else if (const DirectoryEntry *UmbrellaDir = Module->getUmbrellaDir()) {
+ // Add all of the headers we find in this subdirectory.
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(UmbrellaDir->getName(), DirNative);
+ for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative.str(), EC),
+ DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ // If this header is marked 'unavailable' in this module, don't include
+ // it.
+ if (const FileEntry *Header = FileMgr.getFile(Dir->path()))
+ if (ModMap.isHeaderInUnavailableModule(Header))
+ continue;
+
+ // Include this header umbrella header for submodules.
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += Dir->path();
+ Includes += "\"\n";
+ }
+ }
+
+ // Recurse into submodules.
+ for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
+ SubEnd = Module->submodule_end();
+ Sub != SubEnd; ++Sub)
+ collectModuleHeaderIncludes(LangOpts, FileMgr, ModMap, *Sub, Includes);
+}
+
+bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // Find the module map file.
+ const FileEntry *ModuleMap = CI.getFileManager().getFile(Filename);
+ if (!ModuleMap) {
+ CI.getDiagnostics().Report(diag::err_module_map_not_found)
+ << Filename;
+ return false;
+ }
+
+ // Parse the module map file.
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ if (HS.loadModuleMapFile(ModuleMap))
+ return false;
+
+ if (CI.getLangOpts().CurrentModule.empty()) {
+ CI.getDiagnostics().Report(diag::err_missing_module_name);
+
+ // FIXME: Eventually, we could consider asking whether there was just
+ // a single module described in the module map, and use that as a
+ // default. Then it would be fairly trivial to just "compile" a module
+ // map with a single module (the common case).
+ return false;
+ }
+
+ // Dig out the module definition.
+ Module = HS.lookupModule(CI.getLangOpts().CurrentModule,
+ /*AllowSearch=*/false);
+ if (!Module) {
+ CI.getDiagnostics().Report(diag::err_missing_module)
+ << CI.getLangOpts().CurrentModule << Filename;
+
+ return false;
+ }
+
+ // Check whether we can build this module at all.
+ StringRef Feature;
+ if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Feature)) {
+ CI.getDiagnostics().Report(diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Feature;
+
+ return false;
+ }
+
+ // Do we have an umbrella header for this module?
+ const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader();
+
+ // Collect the set of #includes we need to build the module.
+ SmallString<256> HeaderContents;
+ collectModuleHeaderIncludes(CI.getLangOpts(), CI.getFileManager(),
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(),
+ Module, HeaderContents);
+ if (UmbrellaHeader && HeaderContents.empty()) {
+ // Simple case: we have an umbrella header and there are no additional
+ // includes, we can just parse the umbrella header directly.
+ setCurrentInput(FrontendInputFile(UmbrellaHeader->getName(),
+ getCurrentFileKind(),
+ Module->IsSystem));
+ return true;
+ }
+
+ FileManager &FileMgr = CI.getFileManager();
+ SmallString<128> HeaderName;
+ time_t ModTime;
+ if (UmbrellaHeader) {
+ // Read in the umbrella header.
+ // FIXME: Go through the source manager; the umbrella header may have
+ // been overridden.
+ std::string ErrorStr;
+ llvm::MemoryBuffer *UmbrellaContents
+ = FileMgr.getBufferForFile(UmbrellaHeader, &ErrorStr);
+ if (!UmbrellaContents) {
+ CI.getDiagnostics().Report(diag::err_missing_umbrella_header)
+ << UmbrellaHeader->getName() << ErrorStr;
+ return false;
+ }
+
+ // Combine the contents of the umbrella header with the automatically-
+ // generated includes.
+ SmallString<256> OldContents = HeaderContents;
+ HeaderContents = UmbrellaContents->getBuffer();
+ HeaderContents += "\n\n";
+ HeaderContents += "/* Module includes */\n";
+ HeaderContents += OldContents;
+
+ // Pretend that we're parsing the umbrella header.
+ HeaderName = UmbrellaHeader->getName();
+ ModTime = UmbrellaHeader->getModificationTime();
+
+ delete UmbrellaContents;
+ } else {
+ // Pick an innocuous-sounding name for the umbrella header.
+ HeaderName = Module->Name + ".h";
+ if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
+ /*CacheFailure=*/false)) {
+ // Try again!
+ HeaderName = Module->Name + "-module.h";
+ if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
+ /*CacheFailure=*/false)) {
+ // Pick something ridiculous and go with it.
+ HeaderName = Module->Name + "-module.hmod";
+ }
+ }
+ ModTime = time(0);
+ }
+
+ // Remap the contents of the header name we're using to our synthesized
+ // buffer.
+ const FileEntry *HeaderFile = FileMgr.getVirtualFile(HeaderName,
+ HeaderContents.size(),
+ ModTime);
+ llvm::MemoryBuffer *HeaderContentsBuf
+ = llvm::MemoryBuffer::getMemBufferCopy(HeaderContents);
+ CI.getSourceManager().overrideFileContents(HeaderFile, HeaderContentsBuf);
+ setCurrentInput(FrontendInputFile(HeaderName, getCurrentFileKind(),
+ Module->IsSystem));
+ return true;
+}
+
+bool GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS) {
+ // If no output file was provided, figure out where this module would go
+ // in the module cache.
+ if (CI.getFrontendOpts().OutputFile.empty()) {
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ SmallString<256> ModuleFileName(HS.getModuleCachePath());
+ llvm::sys::path::append(ModuleFileName,
+ CI.getLangOpts().CurrentModule + ".pcm");
+ CI.getFrontendOpts().OutputFile = ModuleFileName.str();
+ }
+
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ // We use a temporary to avoid race conditions.
+ OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
+ if (!OS)
+ return true;
+
+ OutputFile = CI.getFrontendOpts().OutputFile;
+ return false;
+}
+
ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
return new ASTConsumer();
}
+namespace {
+ class PubnamesDumpConsumer : public ASTConsumer {
+ Preprocessor &PP;
+
+ /// \brief Determine whether the given identifier provides a 'public' name.
+ bool isPublicName(IdentifierInfo *II) {
+ // If there are any top-level declarations associated with this
+ // identifier, it is a public name.
+ if (II->getFETokenInfo<void>())
+ return true;
+
+ // If this identifier is the name of a non-builtin macro that isn't
+ // defined on the command line or implicitly by the front end, it is a
+ // public name.
+ if (II->hasMacroDefinition()) {
+ if (MacroInfo *M = PP.getMacroInfo(II))
+ if (!M->isBuiltinMacro()) {
+ SourceLocation Loc = M->getDefinitionLoc();
+ FileID File = PP.getSourceManager().getFileID(Loc);
+ if (PP.getSourceManager().getFileEntryForID(File))
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public:
+ PubnamesDumpConsumer(Preprocessor &PP) : PP(PP) { }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ std::set<StringRef> Pubnames;
+
+ // Add the names of any non-builtin macros.
+ for (IdentifierTable::iterator I = Ctx.Idents.begin(),
+ IEnd = Ctx.Idents.end();
+ I != IEnd; ++I) {
+ if (isPublicName(I->second))
+ Pubnames.insert(I->first());
+ }
+
+ // If there is an external identifier lookup source, consider those
+ // identifiers as well.
+ if (IdentifierInfoLookup *External
+ = Ctx.Idents.getExternalIdentifierLookup()) {
+ OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
+
+ if (isPublicName(PP.getIdentifierInfo(Name)))
+ Pubnames.insert(Name);
+ } while (true);
+ }
+
+ // Print the names, in lexicographical order.
+ for (std::set<StringRef>::iterator N = Pubnames.begin(),
+ NEnd = Pubnames.end();
+ N != NEnd; ++N) {
+ llvm::outs() << *N << '\n';
+ }
+ }
+ };
+}
+
+ASTConsumer *PubnamesDumpAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new PubnamesDumpConsumer(CI.getPreprocessor());
+}
+
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
@@ -128,7 +436,7 @@ void DumpRawTokensAction::ExecuteAction() {
// Start lexing the specified input file.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
- Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOptions());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
RawLex.SetKeepWhitespaceMode(true);
Token RawTok;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
index 3b41d89..79920df 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -11,6 +11,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -107,10 +108,10 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
// are showing all headers.
if (ShowHeader && Reason == PPCallbacks::EnterFile) {
// Write to a temporary string to avoid unnecessary flushing on errs().
- llvm::SmallString<512> Filename(UserLoc.getFilename());
+ SmallString<512> Filename(UserLoc.getFilename());
Lexer::Stringify(Filename);
- llvm::SmallString<256> Msg;
+ SmallString<256> Msg;
if (ShowDepth) {
// The main source file is at depth 1, so skip one dot.
for (unsigned i = 1; i != CurrentIncludeDepth; ++i)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index 1b1c551..7f01cd9 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -11,10 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#ifdef HAVE_CLANG_CONFIG_H
-# include "clang/Config/config.h"
-#endif
-
#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
@@ -30,7 +26,9 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
-#include "llvm/Config/config.h"
+
+#include "clang/Config/config.h" // C_INCLUDE_DIRS
+
#ifndef CLANG_PREFIX
#define CLANG_PREFIX
#endif
@@ -112,7 +110,7 @@ void InitHeaderSearch::AddPath(const Twine &Path,
FileManager &FM = Headers.getFileMgr();
// Compute the actual path, taking into consideration -isysroot.
- llvm::SmallString<256> MappedPathStorage;
+ SmallString<256> MappedPathStorage;
StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
// Handle isysroot.
@@ -336,19 +334,6 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
void InitHeaderSearch::
AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOptions &HSOpts) {
llvm::Triple::OSType os = triple.getOS();
- StringRef CxxIncludeRoot(CXX_INCLUDE_ROOT);
- if (CxxIncludeRoot != "") {
- StringRef CxxIncludeArch(CXX_INCLUDE_ARCH);
- if (CxxIncludeArch == "")
- AddGnuCPlusPlusIncludePaths(CxxIncludeRoot, triple.str().c_str(),
- CXX_INCLUDE_32BIT_DIR, CXX_INCLUDE_64BIT_DIR,
- triple);
- else
- AddGnuCPlusPlusIncludePaths(CxxIncludeRoot, CXX_INCLUDE_ARCH,
- CXX_INCLUDE_32BIT_DIR, CXX_INCLUDE_64BIT_DIR,
- triple);
- return;
- }
// FIXME: temporary hack: hard-coded paths.
if (triple.isOSDarwin()) {
@@ -391,11 +376,10 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
case llvm::Triple::Cygwin:
// Cygwin-1.7
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.5.3");
AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
// g++-4 / Cygwin-1.5
AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
- // FIXME: Do we support g++-3.4.4?
- AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "3.4.4");
break;
case llvm::Triple::MinGW32:
// mingw-w64 C++ include paths (i686-w64-mingw32 and x86_64-w64-mingw32)
@@ -403,12 +387,17 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.1");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.2");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.3");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.4");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.0");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.1");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.2");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.3");
AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
// mingw.org C++ include paths
AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2"); //MSYS
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.2");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.1");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.2");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
@@ -440,6 +429,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOp
"", "", "", triple);
break;
case llvm::Triple::Solaris:
+ AddGnuCPlusPlusIncludePaths("/usr/gcc/4.5/include/c++/4.5.2/",
+ "i386-pc-solaris2.11", "", "", triple);
// Solaris - Fall though..
case llvm::Triple::AuroraUX:
// AuroraUX
@@ -484,6 +475,11 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
AddPath(P.str(), CXXSystem, true, false, false, true);
}
}
+ // On Solaris, include the support directory for things like xlocale and
+ // fudged system headers.
+ if (triple.getOS() == llvm::Triple::Solaris)
+ AddPath("/usr/include/c++/v1/support/solaris", CXXSystem, true, false,
+ false);
AddPath("/usr/include/c++/v1", CXXSystem, true, false, false);
} else {
@@ -675,5 +671,13 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
Init.AddDefaultIncludePaths(Lang, Triple, HSOpts);
+ if (HSOpts.UseBuiltinIncludes) {
+ // Set up the builtin include directory in the module map.
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ P.appendComponent("include");
+ if (const DirectoryEntry *Dir = HS.getFileMgr().getDirectory(P.str()))
+ HS.getModuleMap().setBuiltinIncludeDir(Dir);
+ }
+
Init.Realize(Lang);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
index 6f49ec4..93d49b0 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -18,6 +18,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/PreprocessorOptions.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
@@ -48,39 +49,19 @@ static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
}
}
-std::string clang::NormalizeDashIncludePath(StringRef File,
- FileManager &FileMgr) {
- // Implicit include paths should be resolved relative to the current
- // working directory first, and then use the regular header search
- // mechanism. The proper way to handle this is to have the
- // predefines buffer located at the current working directory, but
- // it has no file entry. For now, workaround this by using an
- // absolute path if we find the file here, and otherwise letting
- // header search handle it.
- llvm::SmallString<128> Path(File);
- llvm::sys::fs::make_absolute(Path);
- bool exists;
- if (llvm::sys::fs::exists(Path.str(), exists) || !exists)
- Path = File;
- else if (exists)
- FileMgr.getFile(File);
-
- return Lexer::Stringify(Path.str());
-}
-
/// AddImplicitInclude - Add an implicit #include of the specified file to the
/// predefines buffer.
static void AddImplicitInclude(MacroBuilder &Builder, StringRef File,
FileManager &FileMgr) {
- Builder.append("#include \"" +
- Twine(NormalizeDashIncludePath(File, FileMgr)) + "\"");
+ Builder.append(Twine("#include \"") +
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
}
static void AddImplicitIncludeMacros(MacroBuilder &Builder,
StringRef File,
FileManager &FileMgr) {
- Builder.append("#__include_macros \"" +
- Twine(NormalizeDashIncludePath(File, FileMgr)) + "\"");
+ Builder.append(Twine("#__include_macros \"") +
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
// Marker token to stop the __include_macros fetch loop.
Builder.append("##"); // ##?
}
@@ -146,7 +127,7 @@ static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
"1.79769313486231580793728971405301e+308L",
"1.18973149535723176508575932662800702e+4932L");
- llvm::SmallString<32> DefPrefix;
+ SmallString<32> DefPrefix;
DefPrefix = "__";
DefPrefix += Prefix;
DefPrefix += "_";
@@ -221,6 +202,20 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
ConstSuffix);
}
+/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
+/// the specified properties.
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
+ unsigned InlineWidth) {
+ // Fully-aligned, power-of-2 sizes no larger than the inline
+ // width will be inlined as lock-free operations.
+ if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
+ TypeWidth <= InlineWidth)
+ return "2"; // "always lock free"
+ // We cannot be certain what operations the lib calls might be
+ // able to implement as lock-free on future processors.
+ return "1"; // "sometimes lock free"
+}
+
/// \brief Add definitions required for a smooth interaction between
/// Objective-C++ automated reference counting and libstdc++ (4.2).
static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
@@ -278,7 +273,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
- if (!LangOpts.MicrosoftExt && !LangOpts.TraditionalCPP)
+ if (!LangOpts.MicrosoftMode && !LangOpts.TraditionalCPP)
Builder.defineMacro("__STDC__");
if (LangOpts.Freestanding)
Builder.defineMacro("__STDC_HOSTED__", "0");
@@ -286,7 +281,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_HOSTED__");
if (!LangOpts.CPlusPlus) {
- if (LangOpts.C99)
+ if (LangOpts.C11)
+ Builder.defineMacro("__STDC_VERSION__", "201112L");
+ else if (LangOpts.C99)
Builder.defineMacro("__STDC_VERSION__", "199901L");
else if (!LangOpts.GNUMode && LangOpts.Digraphs)
Builder.defineMacro("__STDC_VERSION__", "199409L");
@@ -336,11 +333,25 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
+ getClangFullRepositoryVersion() + ")\"");
#undef TOSTR
#undef TOSTR2
- // Currently claim to be compatible with GCC 4.2.1-5621.
- Builder.defineMacro("__GNUC_MINOR__", "2");
- Builder.defineMacro("__GNUC_PATCHLEVEL__", "1");
- Builder.defineMacro("__GNUC__", "4");
- Builder.defineMacro("__GXX_ABI_VERSION", "1002");
+ if (!LangOpts.MicrosoftMode) {
+ // Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
+ // not compiling for MSVC compatibility
+ Builder.defineMacro("__GNUC_MINOR__", "2");
+ Builder.defineMacro("__GNUC_PATCHLEVEL__", "1");
+ Builder.defineMacro("__GNUC__", "4");
+ Builder.defineMacro("__GXX_ABI_VERSION", "1002");
+ }
+
+ // Define macros for the C11 / C++11 memory orderings
+ Builder.defineMacro("__ATOMIC_RELAXED", "0");
+ Builder.defineMacro("__ATOMIC_CONSUME", "1");
+ Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
+ Builder.defineMacro("__ATOMIC_RELEASE", "3");
+ Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
+ Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+
+ // Support for #pragma redefine_extname (Sun compatibility)
+ Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
// As sad as it is, enough software depends on the __VERSION__ for version
// checks that it is necessary to report 4.2.1 (the base GCC version we claim
@@ -428,6 +439,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.OptimizeSize)
Builder.defineMacro("__OPTIMIZE_SIZE__");
+ if (LangOpts.FastMath)
+ Builder.defineMacro("__FAST_MATH__");
+
// Initialize target-specific preprocessor defines.
// Define type sizing macros based on the target properties.
@@ -521,16 +535,46 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else
Builder.defineMacro("__GNUC_STDC_INLINE__");
- if (LangOpts.NoInline)
+ // The value written by __atomic_test_and_set.
+ // FIXME: This is target-dependent.
+ Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
+
+ // Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
+ unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
+#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
+ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
+ getLockFreeValue(TI.get##Type##Width(), \
+ TI.get##Type##Align(), \
+ InlineWidthBits));
+ DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
+ DEFINE_LOCK_FREE_MACRO(CHAR, Char);
+ DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
+ DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
+ DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
+ DEFINE_LOCK_FREE_MACRO(SHORT, Short);
+ DEFINE_LOCK_FREE_MACRO(INT, Int);
+ DEFINE_LOCK_FREE_MACRO(LONG, Long);
+ DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
+ Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
+ getLockFreeValue(TI.getPointerWidth(0),
+ TI.getPointerAlign(0),
+ InlineWidthBits));
+#undef DEFINE_LOCK_FREE_MACRO
+
+ if (LangOpts.NoInlineDefine)
Builder.defineMacro("__NO_INLINE__");
if (unsigned PICLevel = LangOpts.PICLevel) {
Builder.defineMacro("__PIC__", Twine(PICLevel));
Builder.defineMacro("__pic__", Twine(PICLevel));
}
+ if (unsigned PIELevel = LangOpts.PIELevel) {
+ Builder.defineMacro("__PIE__", Twine(PIELevel));
+ Builder.defineMacro("__pie__", Twine(PIELevel));
+ }
// Macros to control C99 numerics and <float.h>
- Builder.defineMacro("__FLT_EVAL_METHOD__", "0");
+ Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
Builder.defineMacro("__FLT_RADIX__", "2");
int Dig = PickFP(&TI.getLongDoubleFormat(), -1/*FIXME*/, 17, 21, 33, 36);
Builder.defineMacro("__DECIMAL_DIG__", Twine(Dig));
@@ -632,7 +676,7 @@ void clang::InitializePreprocessor(Preprocessor &PP,
const PreprocessorOptions &InitOpts,
const HeaderSearchOptions &HSOpts,
const FrontendOptions &FEOpts) {
- const LangOptions &LangOpts = PP.getLangOptions();
+ const LangOptions &LangOpts = PP.getLangOpts();
std::string PredefineBuffer;
PredefineBuffer.reserve(4080);
llvm::raw_string_ostream Predefines(PredefineBuffer);
@@ -641,14 +685,10 @@ void clang::InitializePreprocessor(Preprocessor &PP,
InitializeFileRemapping(PP.getDiagnostics(), PP.getSourceManager(),
PP.getFileManager(), InitOpts);
- // Specify whether the preprocessor should replace #include/#import with
- // module imports when plausible.
- PP.setAutoModuleImport(InitOpts.AutoModuleImport);
-
// Emit line markers for various builtin sections of the file. We don't do
// this in asm preprocessor mode, because "# 4" is not a line marker directive
// in this mode.
- if (!PP.getLangOptions().AsmPreprocessor)
+ if (!PP.getLangOpts().AsmPreprocessor)
Builder.append("# 1 \"<built-in>\" 3");
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
@@ -673,12 +713,12 @@ void clang::InitializePreprocessor(Preprocessor &PP,
// Even with predefines off, some macros are still predefined.
// These should all be defined in the preprocessor according to the
// current language configuration.
- InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOptions(),
+ InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOpts(),
FEOpts, Builder);
// Add on the predefines from the driver. Wrap in a #line directive to report
// that they come from the command line.
- if (!PP.getLangOptions().AsmPreprocessor)
+ if (!PP.getLangOpts().AsmPreprocessor)
Builder.append("# 1 \"<command line>\" 1");
// Process #define's and #undef's in the order they are given.
@@ -706,7 +746,7 @@ void clang::InitializePreprocessor(Preprocessor &PP,
}
// Exit the command line and go back to <built-in> (2 is LC_LEAVE).
- if (!PP.getLangOptions().AsmPreprocessor)
+ if (!PP.getLangOpts().AsmPreprocessor)
Builder.append("# 1 \"<built-in>\" 2");
// Instruct the preprocessor to skip the preamble.
@@ -718,6 +758,6 @@ void clang::InitializePreprocessor(Preprocessor &PP,
// Initialize the header search object.
ApplyHeaderSearchOptions(PP.getHeaderSearchInfo(), HSOpts,
- PP.getLangOptions(),
+ PP.getLangOpts(),
PP.getTargetInfo().getTriple());
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
index abb521b..f86a574 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
@@ -19,14 +19,13 @@ using namespace clang::frontend;
const LangStandard &LangStandard::getLangStandardForKind(Kind K) {
switch (K) {
- default:
- llvm_unreachable("Invalid language kind!");
case lang_unspecified:
llvm::report_fatal_error("getLangStandardForKind() on unspecified kind");
#define LANGSTANDARD(id, name, desc, features) \
case lang_##id: return Lang_##id;
#include "clang/Frontend/LangStandards.def"
}
+ llvm_unreachable("Invalid language kind!");
}
const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
new file mode 100644
index 0000000..eb7865e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -0,0 +1,206 @@
+//===--- LayoutOverrideSource.cpp --Override Record Layouts ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/LayoutOverrideSource.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+#include <string>
+
+using namespace clang;
+
+/// \brief Parse a simple identifier.
+static std::string parseName(StringRef S) {
+ unsigned Offset = 0;
+ while (Offset < S.size() &&
+ (isalpha(S[Offset]) || S[Offset] == '_' ||
+ (Offset > 0 && isdigit(S[Offset]))))
+ ++Offset;
+
+ return S.substr(0, Offset).str();
+}
+
+LayoutOverrideSource::LayoutOverrideSource(llvm::StringRef Filename) {
+ std::ifstream Input(Filename.str().c_str());
+ if (!Input.is_open())
+ return;
+
+ // Parse the output of -fdump-record-layouts.
+ std::string CurrentType;
+ Layout CurrentLayout;
+ bool ExpectingType = false;
+
+ while (Input.good()) {
+ std::string Line;
+ getline(Input, Line);
+
+ StringRef LineStr(Line);
+
+ // Determine whether the following line will start a
+ if (LineStr.find("*** Dumping AST Record Layout") != StringRef::npos) {
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+ CurrentLayout = Layout();
+
+ ExpectingType = true;
+ continue;
+ }
+
+ // If we're expecting a type, grab it.
+ if (ExpectingType) {
+ ExpectingType = false;
+
+ StringRef::size_type Pos;
+ if ((Pos = LineStr.find("struct ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("struct "));
+ else if ((Pos = LineStr.find("class ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("class "));
+ else if ((Pos = LineStr.find("union ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("union "));
+ else
+ continue;
+
+ // Find the name of the type.
+ CurrentType = parseName(LineStr);
+ CurrentLayout = Layout();
+ continue;
+ }
+
+ // Check for the size of the type.
+ StringRef::size_type Pos = LineStr.find(" Size:");
+ if (Pos != StringRef::npos) {
+ // Skip past the " Size:" prefix.
+ LineStr = LineStr.substr(Pos + strlen(" Size:"));
+
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+ continue;
+ }
+
+ // Check for the alignment of the type.
+ Pos = LineStr.find("Alignment:");
+ if (Pos != StringRef::npos) {
+ // Skip past the "Alignment:" prefix.
+ LineStr = LineStr.substr(Pos + strlen("Alignment:"));
+
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ continue;
+ }
+
+ // Check for the size/alignment of the type.
+ Pos = LineStr.find("sizeof=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the sizeof= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("sizeof="));
+
+ // Parse size.
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+
+ Pos = LineStr.find("align=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the align= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("align="));
+
+ // Parse alignment.
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ }
+
+ continue;
+ }
+
+ // Check for the field offsets of the type.
+ Pos = LineStr.find("FieldOffsets: [");
+ if (Pos == StringRef::npos)
+ continue;
+
+ LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
+ while (!LineStr.empty() && isdigit(LineStr[0])) {
+ // Parse this offset.
+ unsigned Idx = 1;
+ while (Idx < LineStr.size() && isdigit(LineStr[Idx]))
+ ++Idx;
+
+ unsigned long long Offset = 0;
+ (void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
+
+ CurrentLayout.FieldOffsets.push_back(Offset);
+
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(Idx + 1);
+ while (!LineStr.empty() && isspace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
+ }
+
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+}
+
+bool
+LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
+{
+ // We can't override unnamed declarations.
+ if (!Record->getIdentifier())
+ return false;
+
+ // Check whether we have a layout for this record.
+ llvm::StringMap<Layout>::iterator Known = Layouts.find(Record->getName());
+ if (Known == Layouts.end())
+ return false;
+
+ // Provide field layouts.
+ unsigned NumFields = 0;
+ for (RecordDecl::field_iterator F = Record->field_begin(),
+ FEnd = Record->field_end();
+ F != FEnd; ++F, ++NumFields) {
+ if (NumFields >= Known->second.FieldOffsets.size())
+ continue;
+
+ FieldOffsets[*F] = Known->second.FieldOffsets[NumFields];
+ }
+
+ // Wrong number of fields.
+ if (NumFields != Known->second.FieldOffsets.size())
+ return false;
+
+ Size = Known->second.Size;
+ Alignment = Known->second.Align;
+ return true;
+}
+
+void LayoutOverrideSource::dump() {
+ llvm::raw_ostream &OS = llvm::errs();
+ for (llvm::StringMap<Layout>::iterator L = Layouts.begin(),
+ LEnd = Layouts.end();
+ L != LEnd; ++L) {
+ OS << "Type: blah " << L->first() << '\n';
+ OS << " Size:" << L->second.Size << '\n';
+ OS << " Alignment:" << L->second.Align << '\n';
+ OS << " FieldOffsets: [";
+ for (unsigned I = 0, N = L->second.FieldOffsets.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << L->second.FieldOffsets[I];
+ }
+ OS << "]\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index 8b585be..3fee957 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -12,6 +12,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
LogDiagnosticPrinter::LogDiagnosticPrinter(raw_ostream &os,
@@ -28,14 +29,13 @@ LogDiagnosticPrinter::~LogDiagnosticPrinter() {
static StringRef getLevelName(DiagnosticsEngine::Level Level) {
switch (Level) {
- default:
- return "<unknown>";
case DiagnosticsEngine::Ignored: return "ignored";
case DiagnosticsEngine::Note: return "note";
case DiagnosticsEngine::Warning: return "warning";
case DiagnosticsEngine::Error: return "error";
case DiagnosticsEngine::Fatal: return "fatal error";
}
+ llvm_unreachable("Invalid DiagnosticsEngine level!");
}
// Escape XML characters inside the raw string.
@@ -64,7 +64,7 @@ void LogDiagnosticPrinter::EndSourceFile() {
return;
// Write to a temporary string to ensure atomic write of diagnostic object.
- llvm::SmallString<512> Msg;
+ SmallString<512> Msg;
llvm::raw_svector_ostream OS(Msg);
OS << "<dict>\n";
@@ -140,7 +140,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
DE.DiagnosticLevel = Level;
// Format the message.
- llvm::SmallString<100> MessageStr;
+ SmallString<100> MessageStr;
Info.FormatDiagnostic(MessageStr);
DE.Message = MessageStr.str();
diff --git a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
index 8e746f6..992eeb0 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -89,7 +89,7 @@ void MultiplexASTDeserializationListener::MacroDefinitionRead(
class MultiplexASTMutationListener : public ASTMutationListener {
public:
// Does NOT take ownership of the elements in L.
- MultiplexASTMutationListener(const std::vector<ASTMutationListener*>& L);
+ MultiplexASTMutationListener(ArrayRef<ASTMutationListener*> L);
virtual void CompletedTagDefinition(const TagDecl *D);
virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
@@ -99,13 +99,18 @@ public:
const FunctionDecl *D);
virtual void CompletedImplicitDefinition(const FunctionDecl *D);
virtual void StaticDataMemberInstantiated(const VarDecl *D);
+ virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD);
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt);
private:
std::vector<ASTMutationListener*> Listeners;
};
MultiplexASTMutationListener::MultiplexASTMutationListener(
- const std::vector<ASTMutationListener*>& L)
- : Listeners(L) {
+ ArrayRef<ASTMutationListener*> L)
+ : Listeners(L.begin(), L.end()) {
}
void MultiplexASTMutationListener::CompletedTagDefinition(const TagDecl *D) {
@@ -144,12 +149,26 @@ void MultiplexASTMutationListener::StaticDataMemberInstantiated(
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->StaticDataMemberInstantiated(D);
}
+void MultiplexASTMutationListener::AddedObjCCategoryToInterface(
+ const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedObjCCategoryToInterface(CatD, IFD);
+}
+void MultiplexASTMutationListener::AddedObjCPropertyInClassExtension(
+ const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedObjCPropertyInClassExtension(Prop, OrigProp, ClassExt);
+}
} // end namespace clang
-MultiplexConsumer::MultiplexConsumer(const std::vector<ASTConsumer*>& C)
- : Consumers(C), MutationListener(0), DeserializationListener(0) {
+MultiplexConsumer::MultiplexConsumer(ArrayRef<ASTConsumer*> C)
+ : Consumers(C.begin(), C.end()),
+ MutationListener(0), DeserializationListener(0) {
// Collect the mutation listeners and deserialization listeners of all
// children, and create a multiplex listener each if so.
std::vector<ASTMutationListener*> mutationListeners;
@@ -183,9 +202,16 @@ void MultiplexConsumer::Initialize(ASTContext &Context) {
Consumers[i]->Initialize(Context);
}
-void MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+bool MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ bool Continue = true;
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Continue = Continue && Consumers[i]->HandleTopLevelDecl(D);
+ return Continue;
+}
+
+void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
for (size_t i = 0, e = Consumers.size(); i != e; ++i)
- Consumers[i]->HandleTopLevelDecl(D);
+ Consumers[i]->HandleCXXStaticMemberVarInstantiation(VD);
}
void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
@@ -203,6 +229,16 @@ void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
Consumers[i]->HandleTagDeclDefinition(D);
}
+void MultiplexConsumer::HandleCXXImplicitFunctionInstantiation(FunctionDecl *D){
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleCXXImplicitFunctionInstantiation(D);
+}
+
+void MultiplexConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTopLevelDeclInObjCContainer(D);
+}
+
void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
for (size_t i = 0, e = Consumers.size(); i != e; ++i)
Consumers[i]->CompleteTentativeDefinition(D);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 8a61f96..9e1587c 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -24,7 +24,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Config/config.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdio>
@@ -63,7 +62,7 @@ static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
if (MI.tokens_empty() || !MI.tokens_begin()->hasLeadingSpace())
OS << ' ';
- llvm::SmallString<128> SpellingBuffer;
+ SmallString<128> SpellingBuffer;
for (MacroInfo::tokens_iterator I = MI.tokens_begin(), E = MI.tokens_end();
I != E; ++I) {
if (I->hasLeadingSpace())
@@ -90,7 +89,7 @@ private:
bool EmittedTokensOnThisLine;
bool EmittedMacroOnThisLine;
SrcMgr::CharacteristicKind FileType;
- llvm::SmallString<512> CurFilename;
+ SmallString<512> CurFilename;
bool Initialized;
bool DisableLineMarkers;
bool DumpDefines;
@@ -109,7 +108,7 @@ public:
Initialized = false;
// If we're in microsoft mode, use normal #line instead of line markers.
- UseLineDirective = PP.getLangOptions().MicrosoftExt;
+ UseLineDirective = PP.getLangOpts().MicrosoftExt;
}
void SetEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
@@ -390,7 +389,6 @@ PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
MoveToLine(Loc);
OS << "#pragma " << Namespace << " diagnostic ";
switch (Map) {
- default: llvm_unreachable("unexpected diagnostic kind");
case diag::MAP_WARNING:
OS << "warning";
break;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
new file mode 100644
index 0000000..7bf8742
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -0,0 +1,592 @@
+//===--- SerializedDiagnosticPrinter.cpp - Serializer for diagnostics -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <vector>
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/DenseSet.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
+#include "clang/Frontend/DiagnosticRenderer.h"
+
+using namespace clang;
+using namespace clang::serialized_diags;
+
+namespace {
+
+class AbbreviationMap {
+ llvm::DenseMap<unsigned, unsigned> Abbrevs;
+public:
+ AbbreviationMap() {}
+
+ void set(unsigned recordID, unsigned abbrevID) {
+ assert(Abbrevs.find(recordID) == Abbrevs.end()
+ && "Abbreviation already set.");
+ Abbrevs[recordID] = abbrevID;
+ }
+
+ unsigned get(unsigned recordID) {
+ assert(Abbrevs.find(recordID) != Abbrevs.end() &&
+ "Abbreviation not set.");
+ return Abbrevs[recordID];
+ }
+};
+
+typedef llvm::SmallVector<uint64_t, 64> RecordData;
+typedef llvm::SmallVectorImpl<uint64_t> RecordDataImpl;
+
+class SDiagsWriter;
+
+class SDiagsRenderer : public DiagnosticNoteRenderer {
+ SDiagsWriter &Writer;
+ RecordData &Record;
+public:
+ SDiagsRenderer(SDiagsWriter &Writer, RecordData &Record,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticNoteRenderer(SM, LangOpts, DiagOpts),
+ Writer(Writer), Record(Record){}
+
+ virtual ~SDiagsRenderer() {}
+
+protected:
+ virtual void emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag D);
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) {}
+
+ void emitNote(SourceLocation Loc, StringRef Message);
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints);
+
+ virtual void beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level);
+ virtual void endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level);
+};
+
+class SDiagsWriter : public DiagnosticConsumer {
+ friend class SDiagsRenderer;
+public:
+ explicit SDiagsWriter(llvm::raw_ostream *os, const DiagnosticOptions &diags)
+ : LangOpts(0), DiagOpts(diags),
+ Stream(Buffer), OS(os), inNonNoteDiagnostic(false)
+ {
+ EmitPreamble();
+ }
+
+ ~SDiagsWriter() {}
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ void BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ LangOpts = &LO;
+ }
+
+ virtual void finish();
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ // It makes no sense to clone this.
+ return 0;
+ }
+
+private:
+ /// \brief Emit the preamble for the serialized diagnostics.
+ void EmitPreamble();
+
+ /// \brief Emit the BLOCKINFO block.
+ void EmitBlockInfoBlock();
+
+ /// \brief Emit the META data block.
+ void EmitMetaBlock();
+
+ /// \brief Emit a record for a CharSourceRange.
+ void EmitCharSourceRange(CharSourceRange R, const SourceManager &SM);
+
+ /// \brief Emit the string information for the category.
+ unsigned getEmitCategory(unsigned category = 0);
+
+ /// \brief Emit the string information for diagnostic flags.
+ unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID = 0);
+
+ /// \brief Emit (lazily) the file string and retrieved the file identifier.
+ unsigned getEmitFile(const char *Filename);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, const SourceManager &SM,
+ PresumedLoc PLoc, RecordDataImpl &Record,
+ unsigned TokSize = 0);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, RecordDataImpl &Record,
+ const SourceManager &SM,
+ unsigned TokSize = 0) {
+ AddLocToRecord(Loc, SM, SM.getPresumedLoc(Loc), Record, TokSize);
+ }
+
+ /// \brief Add CharSourceRange information the specified record.
+ void AddCharSourceRangeToRecord(CharSourceRange R, RecordDataImpl &Record,
+ const SourceManager &SM);
+
+ /// \brief The version of the diagnostics file.
+ enum { Version = 1 };
+
+ const LangOptions *LangOpts;
+ const DiagnosticOptions &DiagOpts;
+
+ /// \brief The byte buffer for the serialized content.
+ SmallString<1024> Buffer;
+
+ /// \brief The BitStreamWriter for the serialized diagnostics.
+ llvm::BitstreamWriter Stream;
+
+ /// \brief The name of the diagnostics file.
+ OwningPtr<llvm::raw_ostream> OS;
+
+ /// \brief The set of constructed record abbreviations.
+ AbbreviationMap Abbrevs;
+
+ /// \brief A utility buffer for constructing record content.
+ RecordData Record;
+
+ /// \brief A text buffer for rendering diagnostic text.
+ SmallString<256> diagBuf;
+
+ /// \brief The collection of diagnostic categories used.
+ llvm::DenseSet<unsigned> Categories;
+
+ /// \brief The collection of files used.
+ llvm::DenseMap<const char *, unsigned> Files;
+
+ typedef llvm::DenseMap<const void *, std::pair<unsigned, llvm::StringRef> >
+ DiagFlagsTy;
+
+ /// \brief Map for uniquing strings.
+ DiagFlagsTy DiagFlags;
+
+ /// \brief Flag indicating whether or not we are in the process of
+ /// emitting a non-note diagnostic.
+ bool inNonNoteDiagnostic;
+};
+} // end anonymous namespace
+
+namespace clang {
+namespace serialized_diags {
+DiagnosticConsumer *create(llvm::raw_ostream *OS,
+ const DiagnosticOptions &diags) {
+ return new SDiagsWriter(OS, diags);
+}
+} // end namespace serialized_diags
+} // end namespace clang
+
+//===----------------------------------------------------------------------===//
+// Serialization methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Emits a block ID in the BLOCKINFO block.
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (Name == 0 || Name[0] == 0)
+ return;
+
+ Record.clear();
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+/// \brief Emits a record ID in the BLOCKINFO block.
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record){
+ Record.clear();
+ Record.push_back(ID);
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+void SDiagsWriter::AddLocToRecord(SourceLocation Loc,
+ const SourceManager &SM,
+ PresumedLoc PLoc,
+ RecordDataImpl &Record,
+ unsigned TokSize) {
+ if (PLoc.isInvalid()) {
+ // Emit a "sentinel" location.
+ Record.push_back((unsigned)0); // File.
+ Record.push_back((unsigned)0); // Line.
+ Record.push_back((unsigned)0); // Column.
+ Record.push_back((unsigned)0); // Offset.
+ return;
+ }
+
+ Record.push_back(getEmitFile(PLoc.getFilename()));
+ Record.push_back(PLoc.getLine());
+ Record.push_back(PLoc.getColumn()+TokSize);
+ Record.push_back(SM.getFileOffset(Loc));
+}
+
+void SDiagsWriter::AddCharSourceRangeToRecord(CharSourceRange Range,
+ RecordDataImpl &Record,
+ const SourceManager &SM) {
+ AddLocToRecord(Range.getBegin(), Record, SM);
+ unsigned TokSize = 0;
+ if (Range.isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(Range.getEnd(),
+ SM, *LangOpts);
+
+ AddLocToRecord(Range.getEnd(), Record, SM, TokSize);
+}
+
+unsigned SDiagsWriter::getEmitFile(const char *FileName){
+ if (!FileName)
+ return 0;
+
+ unsigned &entry = Files[FileName];
+ if (entry)
+ return entry;
+
+ // Lazily generate the record for the file.
+ entry = Files.size();
+ RecordData Record;
+ Record.push_back(RECORD_FILENAME);
+ Record.push_back(entry);
+ Record.push_back(0); // For legacy.
+ Record.push_back(0); // For legacy.
+ StringRef Name(FileName);
+ Record.push_back(Name.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_FILENAME), Record, Name);
+
+ return entry;
+}
+
+void SDiagsWriter::EmitCharSourceRange(CharSourceRange R,
+ const SourceManager &SM) {
+ Record.clear();
+ Record.push_back(RECORD_SOURCE_RANGE);
+ AddCharSourceRangeToRecord(R, Record, SM);
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_SOURCE_RANGE), Record);
+}
+
+/// \brief Emits the preamble of the diagnostics file.
+void SDiagsWriter::EmitPreamble() {
+ // Emit the file header.
+ Stream.Emit((unsigned)'D', 8);
+ Stream.Emit((unsigned)'I', 8);
+ Stream.Emit((unsigned)'A', 8);
+ Stream.Emit((unsigned)'G', 8);
+
+ EmitBlockInfoBlock();
+ EmitMetaBlock();
+}
+
+static void AddSourceLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ using namespace llvm;
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // File ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Line.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Column.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Offset;
+}
+
+static void AddRangeLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ AddSourceLocationAbbrev(Abbrev);
+ AddSourceLocationAbbrev(Abbrev);
+}
+
+void SDiagsWriter::EmitBlockInfoBlock() {
+ Stream.EnterBlockInfoBlock(3);
+
+ using namespace llvm;
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Meta" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_META, "Meta", Stream, Record);
+ EmitRecordID(RECORD_VERSION, "Version", Stream, Record);
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_VERSION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrevs.set(RECORD_VERSION, Stream.EmitBlockInfoAbbrev(BLOCK_META, Abbrev));
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Diagnostic" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_DIAG, "Diag", Stream, Record);
+ EmitRecordID(RECORD_DIAG, "DiagInfo", Stream, Record);
+ EmitRecordID(RECORD_SOURCE_RANGE, "SrcRange", Stream, Record);
+ EmitRecordID(RECORD_CATEGORY, "CatName", Stream, Record);
+ EmitRecordID(RECORD_DIAG_FLAG, "DiagFlag", Stream, Record);
+ EmitRecordID(RECORD_FILENAME, "FileName", Stream, Record);
+ EmitRecordID(RECORD_FIXIT, "FixIt", Stream, Record);
+
+ // Emit abbreviation for RECORD_DIAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Diag level.
+ AddSourceLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Category.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Diagnostc text.
+ Abbrevs.set(RECORD_DIAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_CATEGORY.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_CATEGORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Category ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Category text.
+ Abbrevs.set(RECORD_CATEGORY, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_SOURCE_RANGE.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_SOURCE_RANGE));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrevs.set(RECORD_SOURCE_RANGE,
+ Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit the abbreviation for RECORD_DIAG_FLAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG_FLAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Flag name text.
+ Abbrevs.set(RECORD_DIAG_FLAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FILENAME.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FILENAME));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped file ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Modifcation time.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name text.
+ Abbrevs.set(RECORD_FILENAME, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FIXIT.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FIXIT));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // FixIt text.
+ Abbrevs.set(RECORD_FIXIT, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ Stream.ExitBlock();
+}
+
+void SDiagsWriter::EmitMetaBlock() {
+ Stream.EnterSubblock(BLOCK_META, 3);
+ Record.clear();
+ Record.push_back(RECORD_VERSION);
+ Record.push_back(Version);
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
+ Stream.ExitBlock();
+}
+
+unsigned SDiagsWriter::getEmitCategory(unsigned int category) {
+ if (Categories.count(category))
+ return category;
+
+ Categories.insert(category);
+
+ // We use a local version of 'Record' so that we can be generating
+ // another record when we lazily generate one for the category entry.
+ RecordData Record;
+ Record.push_back(RECORD_CATEGORY);
+ Record.push_back(category);
+ StringRef catName = DiagnosticIDs::getCategoryNameFromID(category);
+ Record.push_back(catName.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_CATEGORY), Record, catName);
+
+ return category;
+}
+
+unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID) {
+ if (DiagLevel == DiagnosticsEngine::Note)
+ return 0; // No flag for notes.
+
+ StringRef FlagName = DiagnosticIDs::getWarningOptionForDiag(DiagID);
+ if (FlagName.empty())
+ return 0;
+
+ // Here we assume that FlagName points to static data whose pointer
+ // value is fixed. This allows us to unique by diagnostic groups.
+ const void *data = FlagName.data();
+ std::pair<unsigned, StringRef> &entry = DiagFlags[data];
+ if (entry.first == 0) {
+ entry.first = DiagFlags.size();
+ entry.second = FlagName;
+
+ // Lazily emit the string in a separate record.
+ RecordData Record;
+ Record.push_back(RECORD_DIAG_FLAG);
+ Record.push_back(entry.first);
+ Record.push_back(FlagName.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_DIAG_FLAG),
+ Record, FlagName);
+ }
+
+ return entry.first;
+}
+
+void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ if (DiagLevel != DiagnosticsEngine::Note) {
+ if (inNonNoteDiagnostic) {
+ // We have encountered a non-note diagnostic. Finish up the previous
+ // diagnostic block before starting a new one.
+ Stream.ExitBlock();
+ }
+ inNonNoteDiagnostic = true;
+ }
+
+ // Compute the diagnostic text.
+ diagBuf.clear();
+ Info.FormatDiagnostic(diagBuf);
+
+ SourceManager &SM = Info.getSourceManager();
+ SDiagsRenderer Renderer(*this, Record, SM, *LangOpts, DiagOpts);
+ Renderer.emitDiagnostic(Info.getLocation(), DiagLevel,
+ diagBuf.str(),
+ Info.getRanges(),
+ llvm::makeArrayRef(Info.getFixItHints(),
+ Info.getNumFixItHints()),
+ &Info);
+}
+
+void
+SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ DiagOrStoredDiag D) {
+ // Emit the RECORD_DIAG record.
+ Writer.Record.clear();
+ Writer.Record.push_back(RECORD_DIAG);
+ Writer.Record.push_back(Level);
+ Writer.AddLocToRecord(Loc, SM, PLoc, Record);
+
+ if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
+ // Emit the category string lazily and get the category ID.
+ unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID());
+ Writer.Record.push_back(Writer.getEmitCategory(DiagID));
+ // Emit the diagnostic flag string lazily and get the mapped ID.
+ Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level, Info->getID()));
+ }
+ else {
+ Writer.Record.push_back(Writer.getEmitCategory());
+ Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level));
+ }
+
+ Writer.Record.push_back(Message.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
+ Writer.Record, Message);
+}
+
+void SDiagsRenderer::beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
+}
+
+void SDiagsRenderer::endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ if (D && Level != DiagnosticsEngine::Note)
+ return;
+ Writer.Stream.ExitBlock();
+}
+
+void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints) {
+ // Emit Source Ranges.
+ for (ArrayRef<CharSourceRange>::iterator it=Ranges.begin(), ei=Ranges.end();
+ it != ei; ++it) {
+ if (it->isValid())
+ Writer.EmitCharSourceRange(*it, SM);
+ }
+
+ // Emit FixIts.
+ for (ArrayRef<FixItHint>::iterator it = Hints.begin(), et = Hints.end();
+ it != et; ++it) {
+ const FixItHint &fix = *it;
+ if (fix.isNull())
+ continue;
+ Writer.Record.clear();
+ Writer.Record.push_back(RECORD_FIXIT);
+ Writer.AddCharSourceRangeToRecord(fix.RemoveRange, Record, SM);
+ Writer.Record.push_back(fix.CodeToInsert.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_FIXIT), Record,
+ fix.CodeToInsert);
+ }
+}
+
+void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message) {
+ Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
+ RecordData Record;
+ Record.push_back(RECORD_DIAG);
+ Record.push_back(DiagnosticsEngine::Note);
+ Writer.AddLocToRecord(Loc, Record, SM);
+ Record.push_back(Writer.getEmitCategory());
+ Record.push_back(Writer.getEmitDiagnosticFlag(DiagnosticsEngine::Note));
+ Record.push_back(Message.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
+ Record, Message);
+ Writer.Stream.ExitBlock();
+}
+
+void SDiagsWriter::finish() {
+ if (inNonNoteDiagnostic) {
+ // Finish off any diagnostics we were in the process of emitting.
+ Stream.ExitBlock();
+ inNonNoteDiagnostic = false;
+ }
+
+ // Write the generated bitstream to "Out".
+ OS->write((char *)&Buffer.front(), Buffer.size());
+ OS->flush();
+
+ OS.reset(0);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
new file mode 100644
index 0000000..9f5dcb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
@@ -0,0 +1,881 @@
+//===--- TextDiagnostic.cpp - Text Diagnostic Pretty-Printing -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+static const enum raw_ostream::Colors noteColor =
+ raw_ostream::BLACK;
+static const enum raw_ostream::Colors fixitColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors caretColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors warningColor =
+ raw_ostream::MAGENTA;
+static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
+static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
+// Used for changing only the bold attribute.
+static const enum raw_ostream::Colors savedColor =
+ raw_ostream::SAVEDCOLOR;
+
+/// \brief Number of spaces to indent when word-wrapping.
+const unsigned WordWrapIndentation = 6;
+
+/// \brief When the source code line we want to print is too long for
+/// the terminal, select the "interesting" region.
+static void selectInterestingSourceRegion(std::string &SourceLine,
+ std::string &CaretLine,
+ std::string &FixItInsertionLine,
+ unsigned EndOfCaretToken,
+ unsigned Columns) {
+ unsigned MaxSize = std::max(SourceLine.size(),
+ std::max(CaretLine.size(),
+ FixItInsertionLine.size()));
+ if (MaxSize > SourceLine.size())
+ SourceLine.resize(MaxSize, ' ');
+ if (MaxSize > CaretLine.size())
+ CaretLine.resize(MaxSize, ' ');
+ if (!FixItInsertionLine.empty() && MaxSize > FixItInsertionLine.size())
+ FixItInsertionLine.resize(MaxSize, ' ');
+
+ // Find the slice that we need to display the full caret line
+ // correctly.
+ unsigned CaretStart = 0, CaretEnd = CaretLine.size();
+ for (; CaretStart != CaretEnd; ++CaretStart)
+ if (!isspace(CaretLine[CaretStart]))
+ break;
+
+ for (; CaretEnd != CaretStart; --CaretEnd)
+ if (!isspace(CaretLine[CaretEnd - 1]))
+ break;
+
+ // Make sure we don't chop the string shorter than the caret token
+ // itself.
+ if (CaretEnd < EndOfCaretToken)
+ CaretEnd = EndOfCaretToken;
+
+ // If we have a fix-it line, make sure the slice includes all of the
+ // fix-it information.
+ if (!FixItInsertionLine.empty()) {
+ unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
+ for (; FixItStart != FixItEnd; ++FixItStart)
+ if (!isspace(FixItInsertionLine[FixItStart]))
+ break;
+
+ for (; FixItEnd != FixItStart; --FixItEnd)
+ if (!isspace(FixItInsertionLine[FixItEnd - 1]))
+ break;
+
+ if (FixItStart < CaretStart)
+ CaretStart = FixItStart;
+ if (FixItEnd > CaretEnd)
+ CaretEnd = FixItEnd;
+ }
+
+ // CaretLine[CaretStart, CaretEnd) contains all of the interesting
+ // parts of the caret line. While this slice is smaller than the
+ // number of columns we have, try to grow the slice to encompass
+ // more context.
+
+ // If the end of the interesting region comes before we run out of
+ // space in the terminal, start at the beginning of the line.
+ if (Columns > 3 && CaretEnd < Columns - 3)
+ CaretStart = 0;
+
+ unsigned TargetColumns = Columns;
+ if (TargetColumns > 8)
+ TargetColumns -= 8; // Give us extra room for the ellipses.
+ unsigned SourceLength = SourceLine.size();
+ while ((CaretEnd - CaretStart) < TargetColumns) {
+ bool ExpandedRegion = false;
+ // Move the start of the interesting region left until we've
+ // pulled in something else interesting.
+ if (CaretStart == 1)
+ CaretStart = 0;
+ else if (CaretStart > 1) {
+ unsigned NewStart = CaretStart - 1;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewStart && isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Skip over this bit of "interesting" text.
+ while (NewStart && !isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Move up to the non-whitespace character we just saw.
+ if (NewStart)
+ ++NewStart;
+
+ // If we're still within our limit, update the starting
+ // position within the source/caret line.
+ if (CaretEnd - NewStart <= TargetColumns) {
+ CaretStart = NewStart;
+ ExpandedRegion = true;
+ }
+ }
+
+ // Move the end of the interesting region right until we've
+ // pulled in something else interesting.
+ if (CaretEnd != SourceLength) {
+ assert(CaretEnd < SourceLength && "Unexpected caret position!");
+ unsigned NewEnd = CaretEnd;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewEnd != SourceLength && isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ // Skip over this bit of "interesting" text.
+ while (NewEnd != SourceLength && !isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ if (NewEnd - CaretStart <= TargetColumns) {
+ CaretEnd = NewEnd;
+ ExpandedRegion = true;
+ }
+ }
+
+ if (!ExpandedRegion)
+ break;
+ }
+
+ // [CaretStart, CaretEnd) is the slice we want. Update the various
+ // output lines to show only this slice, with two-space padding
+ // before the lines so that it looks nicer.
+ if (CaretEnd < SourceLine.size())
+ SourceLine.replace(CaretEnd, std::string::npos, "...");
+ if (CaretEnd < CaretLine.size())
+ CaretLine.erase(CaretEnd, std::string::npos);
+ if (FixItInsertionLine.size() > CaretEnd)
+ FixItInsertionLine.erase(CaretEnd, std::string::npos);
+
+ if (CaretStart > 2) {
+ SourceLine.replace(0, CaretStart, " ...");
+ CaretLine.replace(0, CaretStart, " ");
+ if (FixItInsertionLine.size() >= CaretStart)
+ FixItInsertionLine.replace(0, CaretStart, " ");
+ }
+}
+
+/// \brief Skip over whitespace in the string, starting at the given
+/// index.
+///
+/// \returns The index of the first non-whitespace character that is
+/// greater than or equal to Idx or, if no such character exists,
+/// returns the end of the string.
+static unsigned skipWhitespace(unsigned Idx, StringRef Str, unsigned Length) {
+ while (Idx < Length && isspace(Str[Idx]))
+ ++Idx;
+ return Idx;
+}
+
+/// \brief If the given character is the start of some kind of
+/// balanced punctuation (e.g., quotes or parentheses), return the
+/// character that will terminate the punctuation.
+///
+/// \returns The ending punctuation character, if any, or the NULL
+/// character if the input character does not start any punctuation.
+static inline char findMatchingPunctuation(char c) {
+ switch (c) {
+ case '\'': return '\'';
+ case '`': return '\'';
+ case '"': return '"';
+ case '(': return ')';
+ case '[': return ']';
+ case '{': return '}';
+ default: break;
+ }
+
+ return 0;
+}
+
+/// \brief Find the end of the word starting at the given offset
+/// within a string.
+///
+/// \returns the index pointing one character past the end of the
+/// word.
+static unsigned findEndOfWord(unsigned Start, StringRef Str,
+ unsigned Length, unsigned Column,
+ unsigned Columns) {
+ assert(Start < Str.size() && "Invalid start position!");
+ unsigned End = Start + 1;
+
+ // If we are already at the end of the string, take that as the word.
+ if (End == Str.size())
+ return End;
+
+ // Determine if the start of the string is actually opening
+ // punctuation, e.g., a quote or parentheses.
+ char EndPunct = findMatchingPunctuation(Str[Start]);
+ if (!EndPunct) {
+ // This is a normal word. Just find the first space character.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+ return End;
+ }
+
+ // We have the start of a balanced punctuation sequence (quotes,
+ // parentheses, etc.). Determine the full sequence is.
+ SmallString<16> PunctuationEndStack;
+ PunctuationEndStack.push_back(EndPunct);
+ while (End < Length && !PunctuationEndStack.empty()) {
+ if (Str[End] == PunctuationEndStack.back())
+ PunctuationEndStack.pop_back();
+ else if (char SubEndPunct = findMatchingPunctuation(Str[End]))
+ PunctuationEndStack.push_back(SubEndPunct);
+
+ ++End;
+ }
+
+ // Find the first space character after the punctuation ended.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+
+ unsigned PunctWordLength = End - Start;
+ if (// If the word fits on this line
+ Column + PunctWordLength <= Columns ||
+ // ... or the word is "short enough" to take up the next line
+ // without too much ugly white space
+ PunctWordLength < Columns/3)
+ return End; // Take the whole thing as a single "word".
+
+ // The whole quoted/parenthesized string is too long to print as a
+ // single "word". Instead, find the "word" that starts just after
+ // the punctuation and use that end-point instead. This will recurse
+ // until it finds something small enough to consider a word.
+ return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
+}
+
+/// \brief Print the given string to a stream, word-wrapping it to
+/// some number of columns in the process.
+///
+/// \param OS the stream to which the word-wrapping string will be
+/// emitted.
+/// \param Str the string to word-wrap and output.
+/// \param Columns the number of columns to word-wrap to.
+/// \param Column the column number at which the first character of \p
+/// Str will be printed. This will be non-zero when part of the first
+/// line has already been printed.
+/// \param Indentation the number of spaces to indent any lines beyond
+/// the first line.
+/// \returns true if word-wrapping was required, or false if the
+/// string fit on the first line.
+static bool printWordWrapped(raw_ostream &OS, StringRef Str,
+ unsigned Columns,
+ unsigned Column = 0,
+ unsigned Indentation = WordWrapIndentation) {
+ const unsigned Length = std::min(Str.find('\n'), Str.size());
+
+ // The string used to indent each line.
+ SmallString<16> IndentStr;
+ IndentStr.assign(Indentation, ' ');
+ bool Wrapped = false;
+ for (unsigned WordStart = 0, WordEnd; WordStart < Length;
+ WordStart = WordEnd) {
+ // Find the beginning of the next word.
+ WordStart = skipWhitespace(WordStart, Str, Length);
+ if (WordStart == Length)
+ break;
+
+ // Find the end of this word.
+ WordEnd = findEndOfWord(WordStart, Str, Length, Column, Columns);
+
+ // Does this word fit on the current line?
+ unsigned WordLength = WordEnd - WordStart;
+ if (Column + WordLength < Columns) {
+ // This word fits on the current line; print it there.
+ if (WordStart) {
+ OS << ' ';
+ Column += 1;
+ }
+ OS << Str.substr(WordStart, WordLength);
+ Column += WordLength;
+ continue;
+ }
+
+ // This word does not fit on the current line, so wrap to the next
+ // line.
+ OS << '\n';
+ OS.write(&IndentStr[0], Indentation);
+ OS << Str.substr(WordStart, WordLength);
+ Column = Indentation + WordLength;
+ Wrapped = true;
+ }
+
+ // Append any remaning text from the message with its existing formatting.
+ OS << Str.substr(Length);
+
+ return Wrapped;
+}
+
+TextDiagnostic::TextDiagnostic(raw_ostream &OS,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticRenderer(SM, LangOpts, DiagOpts), OS(OS) {}
+
+TextDiagnostic::~TextDiagnostic() {}
+
+void
+TextDiagnostic::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ DiagOrStoredDiag D) {
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ // Emit the location of this particular diagnostic.
+ emitDiagnosticLoc(Loc, PLoc, Level, Ranges);
+
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+
+ printDiagnosticLevel(OS, Level, DiagOpts.ShowColors);
+ printDiagnosticMessage(OS, Level, Message,
+ OS.tell() - StartOfLocationInfo,
+ DiagOpts.MessageLength, DiagOpts.ShowColors);
+}
+
+/*static*/ void
+TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ bool ShowColors) {
+ if (ShowColors) {
+ // Print diagnostic category in bold and color
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS.changeColor(noteColor, true); break;
+ case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
+ case DiagnosticsEngine::Error: OS.changeColor(errorColor, true); break;
+ case DiagnosticsEngine::Fatal: OS.changeColor(fatalColor, true); break;
+ }
+ }
+
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS << "note: "; break;
+ case DiagnosticsEngine::Warning: OS << "warning: "; break;
+ case DiagnosticsEngine::Error: OS << "error: "; break;
+ case DiagnosticsEngine::Fatal: OS << "fatal error: "; break;
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+}
+
+/*static*/ void
+TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ unsigned CurrentColumn, unsigned Columns,
+ bool ShowColors) {
+ if (ShowColors) {
+ // Print warnings, errors and fatal errors in bold, no color
+ switch (Level) {
+ case DiagnosticsEngine::Warning: OS.changeColor(savedColor, true); break;
+ case DiagnosticsEngine::Error: OS.changeColor(savedColor, true); break;
+ case DiagnosticsEngine::Fatal: OS.changeColor(savedColor, true); break;
+ default: break; //don't bold notes
+ }
+ }
+
+ if (Columns)
+ printWordWrapped(OS, Message, Columns, CurrentColumn);
+ else
+ OS << Message;
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
+}
+
+/// \brief Print out the file/line/column information and include trace.
+///
+/// This method handlen the emission of the diagnostic location information.
+/// This includes extracting as much location information as is present for
+/// the diagnostic and printing it, as well as any include stack or source
+/// ranges necessary.
+void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) {
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Loc);
+ if (!FID.isInvalid()) {
+ const FileEntry* FE = SM.getFileEntryForID(FID);
+ if (FE && FE->getName()) {
+ OS << FE->getName();
+ if (FE->getDevice() == 0 && FE->getInode() == 0
+ && FE->getFileMode() == 0) {
+ // in PCH is a guess, but a good one:
+ OS << " (in PCH)";
+ }
+ OS << ": ";
+ }
+ }
+ return;
+ }
+ unsigned LineNo = PLoc.getLine();
+
+ if (!DiagOpts.ShowLocation)
+ return;
+
+ if (DiagOpts.ShowColors)
+ OS.changeColor(savedColor, true);
+
+ OS << PLoc.getFilename();
+ switch (DiagOpts.Format) {
+ case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
+ case DiagnosticOptions::Msvc: OS << '(' << LineNo; break;
+ case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
+ }
+
+ if (DiagOpts.ShowColumn)
+ // Compute the column number.
+ if (unsigned ColNo = PLoc.getColumn()) {
+ if (DiagOpts.Format == DiagnosticOptions::Msvc) {
+ OS << ',';
+ ColNo--;
+ } else
+ OS << ':';
+ OS << ColNo;
+ }
+ switch (DiagOpts.Format) {
+ case DiagnosticOptions::Clang:
+ case DiagnosticOptions::Vi: OS << ':'; break;
+ case DiagnosticOptions::Msvc: OS << ") : "; break;
+ }
+
+ if (DiagOpts.ShowSourceRanges && !Ranges.empty()) {
+ FileID CaretFileID =
+ SM.getFileID(SM.getExpansionLoc(Loc));
+ bool PrintedRange = false;
+
+ for (ArrayRef<CharSourceRange>::const_iterator RI = Ranges.begin(),
+ RE = Ranges.end();
+ RI != RE; ++RI) {
+ // Ignore invalid ranges.
+ if (!RI->isValid()) continue;
+
+ SourceLocation B = SM.getExpansionLoc(RI->getBegin());
+ SourceLocation E = SM.getExpansionLoc(RI->getEnd());
+
+ // If the End location and the start location are the same and are a
+ // macro location, then the range was something that came from a
+ // macro expansion or _Pragma. If this is an object-like macro, the
+ // best we can do is to highlight the range. If this is a
+ // function-like macro, we'd also like to highlight the arguments.
+ if (B == E && RI->getEnd().isMacroID())
+ E = SM.getExpansionRange(RI->getEnd()).second;
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char
+ // tokens.
+ unsigned TokSize = 0;
+ if (RI->isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
+
+ OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize)
+ << '}';
+ PrintedRange = true;
+ }
+
+ if (PrintedRange)
+ OS << ':';
+ }
+ OS << ' ';
+}
+
+void TextDiagnostic::emitBasicNote(StringRef Message) {
+ // FIXME: Emit this as a real note diagnostic.
+ // FIXME: Format an actual diagnostic rather than a hard coded string.
+ OS << "note: " << Message << "\n";
+}
+
+void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc) {
+ if (DiagOpts.ShowLocation)
+ OS << "In file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":\n";
+ else
+ OS << "In included file:\n";
+}
+
+/// \brief Emit a code snippet and caret line.
+///
+/// This routine emits a single line's code snippet and caret line..
+///
+/// \param Loc The location for the caret.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+void TextDiagnostic::emitSnippetAndCaret(
+ SourceLocation Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) {
+ assert(!Loc.isInvalid() && "must have a valid source location here");
+ assert(Loc.isFileID() && "must have a file location here");
+
+ // If caret diagnostics are enabled and we have location, we want to
+ // emit the caret. However, we only do this if the location moved
+ // from the last diagnostic, if the last diagnostic was a note that
+ // was part of a different warning or error diagnostic, or if the
+ // diagnostic has ranges. We don't want to emit the same caret
+ // multiple times if one loc has multiple diagnostics.
+ if (!DiagOpts.ShowCarets)
+ return;
+ if (Loc == LastLoc && Ranges.empty() && Hints.empty() &&
+ (LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
+ return;
+
+ // Decompose the location into a FID/Offset pair.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ FileID FID = LocInfo.first;
+ unsigned FileOffset = LocInfo.second;
+
+ // Get information about the buffer it points into.
+ bool Invalid = false;
+ const char *BufStart = SM.getBufferData(FID, &Invalid).data();
+ if (Invalid)
+ return;
+
+ unsigned LineNo = SM.getLineNumber(FID, FileOffset);
+ unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
+ unsigned CaretEndColNo
+ = ColNo + Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+
+ // Rewind from the current position to the start of the line.
+ const char *TokPtr = BufStart+FileOffset;
+ const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.
+
+
+ // Compute the line end. Scan forward from the error position to the end of
+ // the line.
+ const char *LineEnd = TokPtr;
+ while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
+ ++LineEnd;
+
+ // FIXME: This shouldn't be necessary, but the CaretEndColNo can extend past
+ // the source line length as currently being computed. See
+ // test/Misc/message-length.c.
+ CaretEndColNo = std::min(CaretEndColNo, unsigned(LineEnd - LineStart));
+
+ // Copy the line of code into an std::string for ease of manipulation.
+ std::string SourceLine(LineStart, LineEnd);
+
+ // Create a line for the caret that is filled with spaces that is the same
+ // length as the line of source code.
+ std::string CaretLine(LineEnd-LineStart, ' ');
+
+ // Highlight all of the characters covered by Ranges with ~ characters.
+ for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I)
+ highlightRange(*I, LineNo, FID, SourceLine, CaretLine);
+
+ // Next, insert the caret itself.
+ if (ColNo-1 < CaretLine.size())
+ CaretLine[ColNo-1] = '^';
+ else
+ CaretLine.push_back('^');
+
+ expandTabs(SourceLine, CaretLine);
+
+ // If we are in -fdiagnostics-print-source-range-info mode, we are trying
+ // to produce easily machine parsable output. Add a space before the
+ // source line and the caret to make it trivial to tell the main diagnostic
+ // line from what the user is intended to see.
+ if (DiagOpts.ShowSourceRanges) {
+ SourceLine = ' ' + SourceLine;
+ CaretLine = ' ' + CaretLine;
+ }
+
+ std::string FixItInsertionLine = buildFixItInsertionLine(LineNo,
+ LineStart, LineEnd,
+ Hints);
+
+ // If the source line is too long for our terminal, select only the
+ // "interesting" source region within that line.
+ unsigned Columns = DiagOpts.MessageLength;
+ if (Columns && SourceLine.size() > Columns)
+ selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
+ CaretEndColNo, Columns);
+
+ // Finally, remove any blank spaces from the end of CaretLine.
+ while (CaretLine[CaretLine.size()-1] == ' ')
+ CaretLine.erase(CaretLine.end()-1);
+
+ // Emit what we have computed.
+ OS << SourceLine << '\n';
+
+ if (DiagOpts.ShowColors)
+ OS.changeColor(caretColor, true);
+ OS << CaretLine << '\n';
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+
+ if (!FixItInsertionLine.empty()) {
+ if (DiagOpts.ShowColors)
+ // Print fixit line in color
+ OS.changeColor(fixitColor, false);
+ if (DiagOpts.ShowSourceRanges)
+ OS << ' ';
+ OS << FixItInsertionLine << '\n';
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+ }
+
+ // Print out any parseable fixit information requested by the options.
+ emitParseableFixits(Hints);
+}
+
+/// \brief Highlight a SourceRange (with ~'s) for any characters on LineNo.
+void TextDiagnostic::highlightRange(const CharSourceRange &R,
+ unsigned LineNo, FileID FID,
+ const std::string &SourceLine,
+ std::string &CaretLine) {
+ assert(CaretLine.size() == SourceLine.size() &&
+ "Expect a correspondence between source and caret line!");
+ if (!R.isValid()) return;
+
+ SourceLocation Begin = SM.getExpansionLoc(R.getBegin());
+ SourceLocation End = SM.getExpansionLoc(R.getEnd());
+
+ // If the End location and the start location are the same and are a macro
+ // location, then the range was something that came from a macro expansion
+ // or _Pragma. If this is an object-like macro, the best we can do is to
+ // highlight the range. If this is a function-like macro, we'd also like to
+ // highlight the arguments.
+ if (Begin == End && R.getEnd().isMacroID())
+ End = SM.getExpansionRange(R.getEnd()).second;
+
+ unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
+ if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
+ return; // No intersection.
+
+ unsigned EndLineNo = SM.getExpansionLineNumber(End);
+ if (EndLineNo < LineNo || SM.getFileID(End) != FID)
+ return; // No intersection.
+
+ // Compute the column number of the start.
+ unsigned StartColNo = 0;
+ if (StartLineNo == LineNo) {
+ StartColNo = SM.getExpansionColumnNumber(Begin);
+ if (StartColNo) --StartColNo; // Zero base the col #.
+ }
+
+ // Compute the column number of the end.
+ unsigned EndColNo = CaretLine.size();
+ if (EndLineNo == LineNo) {
+ EndColNo = SM.getExpansionColumnNumber(End);
+ if (EndColNo) {
+ --EndColNo; // Zero base the col #.
+
+ // Add in the length of the token, so that we cover multi-char tokens if
+ // this is a token range.
+ if (R.isTokenRange())
+ EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
+ } else {
+ EndColNo = CaretLine.size();
+ }
+ }
+
+ assert(StartColNo <= EndColNo && "Invalid range!");
+
+ // Check that a token range does not highlight only whitespace.
+ if (R.isTokenRange()) {
+ // Pick the first non-whitespace column.
+ while (StartColNo < SourceLine.size() &&
+ (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
+ ++StartColNo;
+
+ // Pick the last non-whitespace column.
+ if (EndColNo > SourceLine.size())
+ EndColNo = SourceLine.size();
+ while (EndColNo-1 &&
+ (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
+ --EndColNo;
+
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace, which must be some sort of other
+ // bug.
+ assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
+ }
+
+ // Fill the range with ~'s.
+ for (unsigned i = StartColNo; i < EndColNo; ++i)
+ CaretLine[i] = '~';
+}
+
+std::string TextDiagnostic::buildFixItInsertionLine(unsigned LineNo,
+ const char *LineStart,
+ const char *LineEnd,
+ ArrayRef<FixItHint> Hints) {
+ std::string FixItInsertionLine;
+ if (Hints.empty() || !DiagOpts.ShowFixits)
+ return FixItInsertionLine;
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (!I->CodeToInsert.empty()) {
+ // We have an insertion hint. Determine whether the inserted
+ // code is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo
+ = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
+ if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second)) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ unsigned HintColNo
+ = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second);
+ unsigned LastColumnModified
+ = HintColNo - 1 + I->CodeToInsert.size();
+ if (LastColumnModified > FixItInsertionLine.size())
+ FixItInsertionLine.resize(LastColumnModified, ' ');
+ std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
+ FixItInsertionLine.begin() + HintColNo - 1);
+ } else {
+ FixItInsertionLine.clear();
+ break;
+ }
+ }
+ }
+
+ if (FixItInsertionLine.empty())
+ return FixItInsertionLine;
+
+ // Now that we have the entire fixit line, expand the tabs in it.
+ // Since we don't want to insert spaces in the middle of a word,
+ // find each word and the column it should line up with and insert
+ // spaces until they match.
+ unsigned FixItPos = 0;
+ unsigned LinePos = 0;
+ unsigned TabExpandedCol = 0;
+ unsigned LineLength = LineEnd - LineStart;
+
+ while (FixItPos < FixItInsertionLine.size() && LinePos < LineLength) {
+ // Find the next word in the FixIt line.
+ while (FixItPos < FixItInsertionLine.size() &&
+ FixItInsertionLine[FixItPos] == ' ')
+ ++FixItPos;
+ unsigned CharDistance = FixItPos - TabExpandedCol;
+
+ // Walk forward in the source line, keeping track of
+ // the tab-expanded column.
+ for (unsigned I = 0; I < CharDistance; ++I, ++LinePos)
+ if (LinePos >= LineLength || LineStart[LinePos] != '\t')
+ ++TabExpandedCol;
+ else
+ TabExpandedCol =
+ (TabExpandedCol/DiagOpts.TabStop + 1) * DiagOpts.TabStop;
+
+ // Adjust the fixit line to match this column.
+ FixItInsertionLine.insert(FixItPos, TabExpandedCol-FixItPos, ' ');
+ FixItPos = TabExpandedCol;
+
+ // Walk to the end of the word.
+ while (FixItPos < FixItInsertionLine.size() &&
+ FixItInsertionLine[FixItPos] != ' ')
+ ++FixItPos;
+ }
+
+ return FixItInsertionLine;
+}
+
+void TextDiagnostic::expandTabs(std::string &SourceLine,
+ std::string &CaretLine) {
+ // Scan the source line, looking for tabs. If we find any, manually expand
+ // them to spaces and update the CaretLine to match.
+ for (unsigned i = 0; i != SourceLine.size(); ++i) {
+ if (SourceLine[i] != '\t') continue;
+
+ // Replace this tab with at least one space.
+ SourceLine[i] = ' ';
+
+ // Compute the number of spaces we need to insert.
+ unsigned TabStop = DiagOpts.TabStop;
+ assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
+ "Invalid -ftabstop value");
+ unsigned NumSpaces = ((i+TabStop)/TabStop * TabStop) - (i+1);
+ assert(NumSpaces < TabStop && "Invalid computation of space amt");
+
+ // Insert spaces into the SourceLine.
+ SourceLine.insert(i+1, NumSpaces, ' ');
+
+ // Insert spaces or ~'s into CaretLine.
+ CaretLine.insert(i+1, NumSpaces, CaretLine[i] == '~' ? '~' : ' ');
+ }
+}
+
+void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints) {
+ if (!DiagOpts.ShowParseableFixits)
+ return;
+
+ // We follow FixItRewriter's example in not (yet) handling
+ // fix-its in macros.
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (I->RemoveRange.isInvalid() ||
+ I->RemoveRange.getBegin().isMacroID() ||
+ I->RemoveRange.getEnd().isMacroID())
+ return;
+ }
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ SourceLocation BLoc = I->RemoveRange.getBegin();
+ SourceLocation ELoc = I->RemoveRange.getEnd();
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+
+ // Adjust for token ranges.
+ if (I->RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
+
+ // We specifically do not do word-wrapping or tab-expansion here,
+ // because this is supposed to be easy to parse.
+ PresumedLoc PLoc = SM.getPresumedLoc(BLoc);
+ if (PLoc.isInvalid())
+ break;
+
+ OS << "fix-it:\"";
+ OS.write_escaped(PLoc.getFilename());
+ OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
+ << ':' << SM.getColumnNumber(BInfo.first, BInfo.second)
+ << '-' << SM.getLineNumber(EInfo.first, EInfo.second)
+ << ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
+ << "}:\"";
+ OS.write_escaped(I->CodeToInsert);
+ OS << "\"\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
index f8ea9f1..57105f1 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -24,7 +24,7 @@ void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(Level, Info);
- llvm::SmallString<100> Buf;
+ SmallString<100> Buf;
Info.FormatDiagnostic(Buf);
switch (Level) {
default: llvm_unreachable(
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 10e7238..6445a0c 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/TextDiagnostic.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
@@ -23,28 +24,10 @@
#include <algorithm>
using namespace clang;
-static const enum raw_ostream::Colors noteColor =
- raw_ostream::BLACK;
-static const enum raw_ostream::Colors fixitColor =
- raw_ostream::GREEN;
-static const enum raw_ostream::Colors caretColor =
- raw_ostream::GREEN;
-static const enum raw_ostream::Colors warningColor =
- raw_ostream::MAGENTA;
-static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
-static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
-// Used for changing only the bold attribute.
-static const enum raw_ostream::Colors savedColor =
- raw_ostream::SAVEDCOLOR;
-
-/// \brief Number of spaces to indent when word-wrapping.
-const unsigned WordWrapIndentation = 6;
-
TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
const DiagnosticOptions &diags,
bool _OwnsOutputStream)
- : OS(os), LangOpts(0), DiagOpts(&diags),
- LastCaretDiagnosticWasNote(0),
+ : OS(os), LangOpts(0), DiagOpts(&diags), SM(0),
OwnsOutputStream(_OwnsOutputStream) {
}
@@ -53,878 +36,14 @@ TextDiagnosticPrinter::~TextDiagnosticPrinter() {
delete &OS;
}
-/// \brief Helper to recursivly walk up the include stack and print each layer
-/// on the way back down.
-static void PrintIncludeStackRecursively(raw_ostream &OS,
- const SourceManager &SM,
- SourceLocation Loc,
- bool ShowLocation) {
- if (Loc.isInvalid())
- return;
-
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid())
- return;
-
- // Print out the other include frames first.
- PrintIncludeStackRecursively(OS, SM, PLoc.getIncludeLoc(), ShowLocation);
-
- if (ShowLocation)
- OS << "In file included from " << PLoc.getFilename()
- << ':' << PLoc.getLine() << ":\n";
- else
- OS << "In included file:\n";
-}
-
-/// \brief Prints an include stack when appropriate for a particular diagnostic
-/// level and location.
-///
-/// This routine handles all the logic of suppressing particular include stacks
-/// (such as those for notes) and duplicate include stacks when repeated
-/// warnings occur within the same file. It also handles the logic of
-/// customizing the formatting and display of the include stack.
-///
-/// \param Level The diagnostic level of the message this stack pertains to.
-/// \param Loc The include location of the current file (not the diagnostic
-/// location).
-void TextDiagnosticPrinter::PrintIncludeStack(DiagnosticsEngine::Level Level,
- SourceLocation Loc,
- const SourceManager &SM) {
- // Skip redundant include stacks altogether.
- if (LastWarningLoc == Loc)
- return;
- LastWarningLoc = Loc;
-
- if (!DiagOpts->ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
- return;
-
- PrintIncludeStackRecursively(OS, SM, Loc, DiagOpts->ShowLocation);
-}
-
-/// \brief When the source code line we want to print is too long for
-/// the terminal, select the "interesting" region.
-static void SelectInterestingSourceRegion(std::string &SourceLine,
- std::string &CaretLine,
- std::string &FixItInsertionLine,
- unsigned EndOfCaretToken,
- unsigned Columns) {
- unsigned MaxSize = std::max(SourceLine.size(),
- std::max(CaretLine.size(),
- FixItInsertionLine.size()));
- if (MaxSize > SourceLine.size())
- SourceLine.resize(MaxSize, ' ');
- if (MaxSize > CaretLine.size())
- CaretLine.resize(MaxSize, ' ');
- if (!FixItInsertionLine.empty() && MaxSize > FixItInsertionLine.size())
- FixItInsertionLine.resize(MaxSize, ' ');
-
- // Find the slice that we need to display the full caret line
- // correctly.
- unsigned CaretStart = 0, CaretEnd = CaretLine.size();
- for (; CaretStart != CaretEnd; ++CaretStart)
- if (!isspace(CaretLine[CaretStart]))
- break;
-
- for (; CaretEnd != CaretStart; --CaretEnd)
- if (!isspace(CaretLine[CaretEnd - 1]))
- break;
-
- // Make sure we don't chop the string shorter than the caret token
- // itself.
- if (CaretEnd < EndOfCaretToken)
- CaretEnd = EndOfCaretToken;
-
- // If we have a fix-it line, make sure the slice includes all of the
- // fix-it information.
- if (!FixItInsertionLine.empty()) {
- unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
- for (; FixItStart != FixItEnd; ++FixItStart)
- if (!isspace(FixItInsertionLine[FixItStart]))
- break;
-
- for (; FixItEnd != FixItStart; --FixItEnd)
- if (!isspace(FixItInsertionLine[FixItEnd - 1]))
- break;
-
- if (FixItStart < CaretStart)
- CaretStart = FixItStart;
- if (FixItEnd > CaretEnd)
- CaretEnd = FixItEnd;
- }
-
- // CaretLine[CaretStart, CaretEnd) contains all of the interesting
- // parts of the caret line. While this slice is smaller than the
- // number of columns we have, try to grow the slice to encompass
- // more context.
-
- // If the end of the interesting region comes before we run out of
- // space in the terminal, start at the beginning of the line.
- if (Columns > 3 && CaretEnd < Columns - 3)
- CaretStart = 0;
-
- unsigned TargetColumns = Columns;
- if (TargetColumns > 8)
- TargetColumns -= 8; // Give us extra room for the ellipses.
- unsigned SourceLength = SourceLine.size();
- while ((CaretEnd - CaretStart) < TargetColumns) {
- bool ExpandedRegion = false;
- // Move the start of the interesting region left until we've
- // pulled in something else interesting.
- if (CaretStart == 1)
- CaretStart = 0;
- else if (CaretStart > 1) {
- unsigned NewStart = CaretStart - 1;
-
- // Skip over any whitespace we see here; we're looking for
- // another bit of interesting text.
- while (NewStart && isspace(SourceLine[NewStart]))
- --NewStart;
-
- // Skip over this bit of "interesting" text.
- while (NewStart && !isspace(SourceLine[NewStart]))
- --NewStart;
-
- // Move up to the non-whitespace character we just saw.
- if (NewStart)
- ++NewStart;
-
- // If we're still within our limit, update the starting
- // position within the source/caret line.
- if (CaretEnd - NewStart <= TargetColumns) {
- CaretStart = NewStart;
- ExpandedRegion = true;
- }
- }
-
- // Move the end of the interesting region right until we've
- // pulled in something else interesting.
- if (CaretEnd != SourceLength) {
- assert(CaretEnd < SourceLength && "Unexpected caret position!");
- unsigned NewEnd = CaretEnd;
-
- // Skip over any whitespace we see here; we're looking for
- // another bit of interesting text.
- while (NewEnd != SourceLength && isspace(SourceLine[NewEnd - 1]))
- ++NewEnd;
-
- // Skip over this bit of "interesting" text.
- while (NewEnd != SourceLength && !isspace(SourceLine[NewEnd - 1]))
- ++NewEnd;
-
- if (NewEnd - CaretStart <= TargetColumns) {
- CaretEnd = NewEnd;
- ExpandedRegion = true;
- }
- }
-
- if (!ExpandedRegion)
- break;
- }
-
- // [CaretStart, CaretEnd) is the slice we want. Update the various
- // output lines to show only this slice, with two-space padding
- // before the lines so that it looks nicer.
- if (CaretEnd < SourceLine.size())
- SourceLine.replace(CaretEnd, std::string::npos, "...");
- if (CaretEnd < CaretLine.size())
- CaretLine.erase(CaretEnd, std::string::npos);
- if (FixItInsertionLine.size() > CaretEnd)
- FixItInsertionLine.erase(CaretEnd, std::string::npos);
-
- if (CaretStart > 2) {
- SourceLine.replace(0, CaretStart, " ...");
- CaretLine.replace(0, CaretStart, " ");
- if (FixItInsertionLine.size() >= CaretStart)
- FixItInsertionLine.replace(0, CaretStart, " ");
- }
-}
-
-/// Look through spelling locations for a macro argument expansion, and
-/// if found skip to it so that we can trace the argument rather than the macros
-/// in which that argument is used. If no macro argument expansion is found,
-/// don't skip anything and return the starting location.
-static SourceLocation skipToMacroArgExpansion(const SourceManager &SM,
- SourceLocation StartLoc) {
- for (SourceLocation L = StartLoc; L.isMacroID();
- L = SM.getImmediateSpellingLoc(L)) {
- if (SM.isMacroArgExpansion(L))
- return L;
- }
-
- // Otherwise just return initial location, there's nothing to skip.
- return StartLoc;
-}
-
-/// Gets the location of the immediate macro caller, one level up the stack
-/// toward the initial macro typed into the source.
-static SourceLocation getImmediateMacroCallerLoc(const SourceManager &SM,
- SourceLocation Loc) {
- if (!Loc.isMacroID()) return Loc;
-
- // When we have the location of (part of) an expanded parameter, its spelling
- // location points to the argument as typed into the macro call, and
- // therefore is used to locate the macro caller.
- if (SM.isMacroArgExpansion(Loc))
- return SM.getImmediateSpellingLoc(Loc);
-
- // Otherwise, the caller of the macro is located where this macro is
- // expanded (while the spelling is part of the macro definition).
- return SM.getImmediateExpansionRange(Loc).first;
-}
-
-/// Gets the location of the immediate macro callee, one level down the stack
-/// toward the leaf macro.
-static SourceLocation getImmediateMacroCalleeLoc(const SourceManager &SM,
- SourceLocation Loc) {
- if (!Loc.isMacroID()) return Loc;
-
- // When we have the location of (part of) an expanded parameter, its
- // expansion location points to the unexpanded paramater reference within
- // the macro definition (or callee).
- if (SM.isMacroArgExpansion(Loc))
- return SM.getImmediateExpansionRange(Loc).first;
-
- // Otherwise, the callee of the macro is located where this location was
- // spelled inside the macro definition.
- return SM.getImmediateSpellingLoc(Loc);
-}
-
-namespace {
-
-/// \brief Class to encapsulate the logic for formatting and printing a textual
-/// diagnostic message.
-///
-/// This class provides an interface for building and emitting a textual
-/// diagnostic, including all of the macro backtraces, caret diagnostics, FixIt
-/// Hints, and code snippets. In the presence of macros this involves
-/// a recursive process, synthesizing notes for each macro expansion.
-///
-/// The purpose of this class is to isolate the implementation of printing
-/// beautiful text diagnostics from any particular interfaces. The Clang
-/// DiagnosticClient is implemented through this class as is diagnostic
-/// printing coming out of libclang.
-///
-/// A brief worklist:
-/// FIXME: Sink the printing of the diagnostic message itself into this class.
-/// FIXME: Sink the printing of the include stack into this class.
-/// FIXME: Remove the TextDiagnosticPrinter as an input.
-/// FIXME: Sink the recursive printing of template instantiations into this
-/// class.
-class TextDiagnostic {
- TextDiagnosticPrinter &Printer;
- raw_ostream &OS;
- const SourceManager &SM;
- const LangOptions &LangOpts;
- const DiagnosticOptions &DiagOpts;
-
-public:
- TextDiagnostic(TextDiagnosticPrinter &Printer,
- raw_ostream &OS,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- const DiagnosticOptions &DiagOpts)
- : Printer(Printer), OS(OS), SM(SM), LangOpts(LangOpts), DiagOpts(DiagOpts) {
- }
-
- /// \brief Emit the caret and underlining text.
- ///
- /// Walks up the macro expansion stack printing the code snippet, caret,
- /// underlines and FixItHint display as appropriate at each level. Walk is
- /// accomplished by calling itself recursively.
- ///
- /// FIXME: Remove macro expansion from this routine, it shouldn't be tied to
- /// caret diagnostics.
- /// FIXME: Break up massive function into logical units.
- ///
- /// \param Loc The location for this caret.
- /// \param Ranges The underlined ranges for this code snippet.
- /// \param Hints The FixIt hints active for this diagnostic.
- /// \param MacroSkipEnd The depth to stop skipping macro expansions.
- /// \param OnMacroInst The current depth of the macro expansion stack.
- void EmitCaret(SourceLocation Loc,
- SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints,
- unsigned &MacroDepth,
- unsigned OnMacroInst = 0) {
- assert(!Loc.isInvalid() && "must have a valid source location here");
-
- // If this is a file source location, directly emit the source snippet and
- // caret line. Also record the macro depth reached.
- if (Loc.isFileID()) {
- assert(MacroDepth == 0 && "We shouldn't hit a leaf node twice!");
- MacroDepth = OnMacroInst;
- EmitSnippetAndCaret(Loc, Ranges, Hints);
- return;
- }
- // Otherwise recurse through each macro expansion layer.
-
- // When processing macros, skip over the expansions leading up to
- // a macro argument, and trace the argument's expansion stack instead.
- Loc = skipToMacroArgExpansion(SM, Loc);
-
- SourceLocation OneLevelUp = getImmediateMacroCallerLoc(SM, Loc);
-
- // FIXME: Map ranges?
- EmitCaret(OneLevelUp, Ranges, Hints, MacroDepth, OnMacroInst + 1);
-
- // Map the location.
- Loc = getImmediateMacroCalleeLoc(SM, Loc);
-
- unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
- if (MacroDepth > DiagOpts.MacroBacktraceLimit) {
- MacroSkipStart = DiagOpts.MacroBacktraceLimit / 2 +
- DiagOpts.MacroBacktraceLimit % 2;
- MacroSkipEnd = MacroDepth - DiagOpts.MacroBacktraceLimit / 2;
- }
-
- // Whether to suppress printing this macro expansion.
- bool Suppressed = (OnMacroInst >= MacroSkipStart &&
- OnMacroInst < MacroSkipEnd);
-
- // Map the ranges.
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I) {
- SourceLocation Start = I->getBegin(), End = I->getEnd();
- if (Start.isMacroID())
- I->setBegin(getImmediateMacroCalleeLoc(SM, Start));
- if (End.isMacroID())
- I->setEnd(getImmediateMacroCalleeLoc(SM, End));
- }
-
- if (!Suppressed) {
- // Don't print recursive expansion notes from an expansion note.
- Loc = SM.getSpellingLoc(Loc);
-
- // Get the pretty name, according to #line directives etc.
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid())
- return;
-
- // If this diagnostic is not in the main file, print out the
- // "included from" lines.
- Printer.PrintIncludeStack(DiagnosticsEngine::Note, PLoc.getIncludeLoc(),
- SM);
-
- if (DiagOpts.ShowLocation) {
- // Emit the file/line/column that this expansion came from.
- OS << PLoc.getFilename() << ':' << PLoc.getLine() << ':';
- if (DiagOpts.ShowColumn)
- OS << PLoc.getColumn() << ':';
- OS << ' ';
- }
- OS << "note: expanded from:\n";
-
- EmitSnippetAndCaret(Loc, Ranges, ArrayRef<FixItHint>());
- return;
- }
-
- if (OnMacroInst == MacroSkipStart) {
- // Tell the user that we've skipped contexts.
- OS << "note: (skipping " << (MacroSkipEnd - MacroSkipStart)
- << " expansions in backtrace; use -fmacro-backtrace-limit=0 to see "
- "all)\n";
- }
- }
-
- /// \brief Emit a code snippet and caret line.
- ///
- /// This routine emits a single line's code snippet and caret line..
- ///
- /// \param Loc The location for the caret.
- /// \param Ranges The underlined ranges for this code snippet.
- /// \param Hints The FixIt hints active for this diagnostic.
- void EmitSnippetAndCaret(SourceLocation Loc,
- SmallVectorImpl<CharSourceRange>& Ranges,
- ArrayRef<FixItHint> Hints) {
- assert(!Loc.isInvalid() && "must have a valid source location here");
- assert(Loc.isFileID() && "must have a file location here");
-
- // Decompose the location into a FID/Offset pair.
- std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
- FileID FID = LocInfo.first;
- unsigned FileOffset = LocInfo.second;
-
- // Get information about the buffer it points into.
- bool Invalid = false;
- const char *BufStart = SM.getBufferData(FID, &Invalid).data();
- if (Invalid)
- return;
-
- unsigned LineNo = SM.getLineNumber(FID, FileOffset);
- unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
- unsigned CaretEndColNo
- = ColNo + Lexer::MeasureTokenLength(Loc, SM, LangOpts);
-
- // Rewind from the current position to the start of the line.
- const char *TokPtr = BufStart+FileOffset;
- const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.
-
-
- // Compute the line end. Scan forward from the error position to the end of
- // the line.
- const char *LineEnd = TokPtr;
- while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
- ++LineEnd;
-
- // FIXME: This shouldn't be necessary, but the CaretEndColNo can extend past
- // the source line length as currently being computed. See
- // test/Misc/message-length.c.
- CaretEndColNo = std::min(CaretEndColNo, unsigned(LineEnd - LineStart));
-
- // Copy the line of code into an std::string for ease of manipulation.
- std::string SourceLine(LineStart, LineEnd);
-
- // Create a line for the caret that is filled with spaces that is the same
- // length as the line of source code.
- std::string CaretLine(LineEnd-LineStart, ' ');
-
- // Highlight all of the characters covered by Ranges with ~ characters.
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- HighlightRange(*I, LineNo, FID, SourceLine, CaretLine);
-
- // Next, insert the caret itself.
- if (ColNo-1 < CaretLine.size())
- CaretLine[ColNo-1] = '^';
- else
- CaretLine.push_back('^');
-
- ExpandTabs(SourceLine, CaretLine);
-
- // If we are in -fdiagnostics-print-source-range-info mode, we are trying
- // to produce easily machine parsable output. Add a space before the
- // source line and the caret to make it trivial to tell the main diagnostic
- // line from what the user is intended to see.
- if (DiagOpts.ShowSourceRanges) {
- SourceLine = ' ' + SourceLine;
- CaretLine = ' ' + CaretLine;
- }
-
- std::string FixItInsertionLine = BuildFixItInsertionLine(LineNo,
- LineStart, LineEnd,
- Hints);
-
- // If the source line is too long for our terminal, select only the
- // "interesting" source region within that line.
- unsigned Columns = DiagOpts.MessageLength;
- if (Columns && SourceLine.size() > Columns)
- SelectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
- CaretEndColNo, Columns);
-
- // Finally, remove any blank spaces from the end of CaretLine.
- while (CaretLine[CaretLine.size()-1] == ' ')
- CaretLine.erase(CaretLine.end()-1);
-
- // Emit what we have computed.
- OS << SourceLine << '\n';
-
- if (DiagOpts.ShowColors)
- OS.changeColor(caretColor, true);
- OS << CaretLine << '\n';
- if (DiagOpts.ShowColors)
- OS.resetColor();
-
- if (!FixItInsertionLine.empty()) {
- if (DiagOpts.ShowColors)
- // Print fixit line in color
- OS.changeColor(fixitColor, false);
- if (DiagOpts.ShowSourceRanges)
- OS << ' ';
- OS << FixItInsertionLine << '\n';
- if (DiagOpts.ShowColors)
- OS.resetColor();
- }
-
- // Print out any parseable fixit information requested by the options.
- EmitParseableFixits(Hints);
- }
-
-private:
- /// \brief Highlight a SourceRange (with ~'s) for any characters on LineNo.
- void HighlightRange(const CharSourceRange &R,
- unsigned LineNo, FileID FID,
- const std::string &SourceLine,
- std::string &CaretLine) {
- assert(CaretLine.size() == SourceLine.size() &&
- "Expect a correspondence between source and caret line!");
- if (!R.isValid()) return;
-
- SourceLocation Begin = SM.getExpansionLoc(R.getBegin());
- SourceLocation End = SM.getExpansionLoc(R.getEnd());
-
- // If the End location and the start location are the same and are a macro
- // location, then the range was something that came from a macro expansion
- // or _Pragma. If this is an object-like macro, the best we can do is to
- // highlight the range. If this is a function-like macro, we'd also like to
- // highlight the arguments.
- if (Begin == End && R.getEnd().isMacroID())
- End = SM.getExpansionRange(R.getEnd()).second;
-
- unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
- if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
- return; // No intersection.
-
- unsigned EndLineNo = SM.getExpansionLineNumber(End);
- if (EndLineNo < LineNo || SM.getFileID(End) != FID)
- return; // No intersection.
-
- // Compute the column number of the start.
- unsigned StartColNo = 0;
- if (StartLineNo == LineNo) {
- StartColNo = SM.getExpansionColumnNumber(Begin);
- if (StartColNo) --StartColNo; // Zero base the col #.
- }
-
- // Compute the column number of the end.
- unsigned EndColNo = CaretLine.size();
- if (EndLineNo == LineNo) {
- EndColNo = SM.getExpansionColumnNumber(End);
- if (EndColNo) {
- --EndColNo; // Zero base the col #.
-
- // Add in the length of the token, so that we cover multi-char tokens if
- // this is a token range.
- if (R.isTokenRange())
- EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
- } else {
- EndColNo = CaretLine.size();
- }
- }
-
- assert(StartColNo <= EndColNo && "Invalid range!");
-
- // Check that a token range does not highlight only whitespace.
- if (R.isTokenRange()) {
- // Pick the first non-whitespace column.
- while (StartColNo < SourceLine.size() &&
- (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
- ++StartColNo;
-
- // Pick the last non-whitespace column.
- if (EndColNo > SourceLine.size())
- EndColNo = SourceLine.size();
- while (EndColNo-1 &&
- (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
- --EndColNo;
-
- // If the start/end passed each other, then we are trying to highlight a
- // range that just exists in whitespace, which must be some sort of other
- // bug.
- assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
- }
-
- // Fill the range with ~'s.
- for (unsigned i = StartColNo; i < EndColNo; ++i)
- CaretLine[i] = '~';
- }
-
- std::string BuildFixItInsertionLine(unsigned LineNo,
- const char *LineStart,
- const char *LineEnd,
- ArrayRef<FixItHint> Hints) {
- std::string FixItInsertionLine;
- if (Hints.empty() || !DiagOpts.ShowFixits)
- return FixItInsertionLine;
-
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (!I->CodeToInsert.empty()) {
- // We have an insertion hint. Determine whether the inserted
- // code is on the same line as the caret.
- std::pair<FileID, unsigned> HintLocInfo
- = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
- if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second)) {
- // Insert the new code into the line just below the code
- // that the user wrote.
- unsigned HintColNo
- = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second);
- unsigned LastColumnModified
- = HintColNo - 1 + I->CodeToInsert.size();
- if (LastColumnModified > FixItInsertionLine.size())
- FixItInsertionLine.resize(LastColumnModified, ' ');
- std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
- FixItInsertionLine.begin() + HintColNo - 1);
- } else {
- FixItInsertionLine.clear();
- break;
- }
- }
- }
-
- if (FixItInsertionLine.empty())
- return FixItInsertionLine;
-
- // Now that we have the entire fixit line, expand the tabs in it.
- // Since we don't want to insert spaces in the middle of a word,
- // find each word and the column it should line up with and insert
- // spaces until they match.
- unsigned FixItPos = 0;
- unsigned LinePos = 0;
- unsigned TabExpandedCol = 0;
- unsigned LineLength = LineEnd - LineStart;
-
- while (FixItPos < FixItInsertionLine.size() && LinePos < LineLength) {
- // Find the next word in the FixIt line.
- while (FixItPos < FixItInsertionLine.size() &&
- FixItInsertionLine[FixItPos] == ' ')
- ++FixItPos;
- unsigned CharDistance = FixItPos - TabExpandedCol;
-
- // Walk forward in the source line, keeping track of
- // the tab-expanded column.
- for (unsigned I = 0; I < CharDistance; ++I, ++LinePos)
- if (LinePos >= LineLength || LineStart[LinePos] != '\t')
- ++TabExpandedCol;
- else
- TabExpandedCol =
- (TabExpandedCol/DiagOpts.TabStop + 1) * DiagOpts.TabStop;
-
- // Adjust the fixit line to match this column.
- FixItInsertionLine.insert(FixItPos, TabExpandedCol-FixItPos, ' ');
- FixItPos = TabExpandedCol;
-
- // Walk to the end of the word.
- while (FixItPos < FixItInsertionLine.size() &&
- FixItInsertionLine[FixItPos] != ' ')
- ++FixItPos;
- }
-
- return FixItInsertionLine;
- }
-
- void ExpandTabs(std::string &SourceLine, std::string &CaretLine) {
- // Scan the source line, looking for tabs. If we find any, manually expand
- // them to spaces and update the CaretLine to match.
- for (unsigned i = 0; i != SourceLine.size(); ++i) {
- if (SourceLine[i] != '\t') continue;
-
- // Replace this tab with at least one space.
- SourceLine[i] = ' ';
-
- // Compute the number of spaces we need to insert.
- unsigned TabStop = DiagOpts.TabStop;
- assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
- "Invalid -ftabstop value");
- unsigned NumSpaces = ((i+TabStop)/TabStop * TabStop) - (i+1);
- assert(NumSpaces < TabStop && "Invalid computation of space amt");
-
- // Insert spaces into the SourceLine.
- SourceLine.insert(i+1, NumSpaces, ' ');
-
- // Insert spaces or ~'s into CaretLine.
- CaretLine.insert(i+1, NumSpaces, CaretLine[i] == '~' ? '~' : ' ');
- }
- }
-
- void EmitParseableFixits(ArrayRef<FixItHint> Hints) {
- if (!DiagOpts.ShowParseableFixits)
- return;
-
- // We follow FixItRewriter's example in not (yet) handling
- // fix-its in macros.
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (I->RemoveRange.isInvalid() ||
- I->RemoveRange.getBegin().isMacroID() ||
- I->RemoveRange.getEnd().isMacroID())
- return;
- }
-
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- SourceLocation BLoc = I->RemoveRange.getBegin();
- SourceLocation ELoc = I->RemoveRange.getEnd();
-
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
-
- // Adjust for token ranges.
- if (I->RemoveRange.isTokenRange())
- EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
-
- // We specifically do not do word-wrapping or tab-expansion here,
- // because this is supposed to be easy to parse.
- PresumedLoc PLoc = SM.getPresumedLoc(BLoc);
- if (PLoc.isInvalid())
- break;
-
- OS << "fix-it:\"";
- OS.write_escaped(PLoc.getFilename());
- OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
- << ':' << SM.getColumnNumber(BInfo.first, BInfo.second)
- << '-' << SM.getLineNumber(EInfo.first, EInfo.second)
- << ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
- << "}:\"";
- OS.write_escaped(I->CodeToInsert);
- OS << "\"\n";
- }
- }
-};
-
-} // end namespace
-
-/// Get the presumed location of a diagnostic message. This computes the
-/// presumed location for the top of any macro backtrace when present.
-static PresumedLoc getDiagnosticPresumedLoc(const SourceManager &SM,
- SourceLocation Loc) {
- // This is a condensed form of the algorithm used by EmitCaretDiagnostic to
- // walk to the top of the macro call stack.
- while (Loc.isMacroID()) {
- Loc = skipToMacroArgExpansion(SM, Loc);
- Loc = getImmediateMacroCallerLoc(SM, Loc);
- }
-
- return SM.getPresumedLoc(Loc);
-}
-
-/// \brief Print out the file/line/column information and include trace.
-///
-/// This method handlen the emission of the diagnostic location information.
-/// This includes extracting as much location information as is present for the
-/// diagnostic and printing it, as well as any include stack or source ranges
-/// necessary.
-void TextDiagnosticPrinter::EmitDiagnosticLoc(DiagnosticsEngine::Level Level,
- const Diagnostic &Info,
- const SourceManager &SM,
- PresumedLoc PLoc) {
- if (PLoc.isInvalid()) {
- // At least print the file name if available:
- FileID FID = SM.getFileID(Info.getLocation());
- if (!FID.isInvalid()) {
- const FileEntry* FE = SM.getFileEntryForID(FID);
- if (FE && FE->getName()) {
- OS << FE->getName();
- if (FE->getDevice() == 0 && FE->getInode() == 0
- && FE->getFileMode() == 0) {
- // in PCH is a guess, but a good one:
- OS << " (in PCH)";
- }
- OS << ": ";
- }
- }
- return;
- }
- unsigned LineNo = PLoc.getLine();
-
- if (!DiagOpts->ShowLocation)
- return;
-
- if (DiagOpts->ShowColors)
- OS.changeColor(savedColor, true);
-
- OS << PLoc.getFilename();
- switch (DiagOpts->Format) {
- case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
- case DiagnosticOptions::Msvc: OS << '(' << LineNo; break;
- case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
- }
-
- if (DiagOpts->ShowColumn)
- // Compute the column number.
- if (unsigned ColNo = PLoc.getColumn()) {
- if (DiagOpts->Format == DiagnosticOptions::Msvc) {
- OS << ',';
- ColNo--;
- } else
- OS << ':';
- OS << ColNo;
- }
- switch (DiagOpts->Format) {
- case DiagnosticOptions::Clang:
- case DiagnosticOptions::Vi: OS << ':'; break;
- case DiagnosticOptions::Msvc: OS << ") : "; break;
- }
-
- if (DiagOpts->ShowSourceRanges && Info.getNumRanges()) {
- FileID CaretFileID =
- SM.getFileID(SM.getExpansionLoc(Info.getLocation()));
- bool PrintedRange = false;
-
- for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) {
- // Ignore invalid ranges.
- if (!Info.getRange(i).isValid()) continue;
-
- SourceLocation B = Info.getRange(i).getBegin();
- SourceLocation E = Info.getRange(i).getEnd();
- B = SM.getExpansionLoc(B);
- E = SM.getExpansionLoc(E);
-
- // If the End location and the start location are the same and are a
- // macro location, then the range was something that came from a
- // macro expansion or _Pragma. If this is an object-like macro, the
- // best we can do is to highlight the range. If this is a
- // function-like macro, we'd also like to highlight the arguments.
- if (B == E && Info.getRange(i).getEnd().isMacroID())
- E = SM.getExpansionRange(Info.getRange(i).getEnd()).second;
-
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
-
- // If the start or end of the range is in another file, just discard
- // it.
- if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
- continue;
-
- // Add in the length of the token, so that we cover multi-char
- // tokens.
- unsigned TokSize = 0;
- if (Info.getRange(i).isTokenRange())
- TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
-
- OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
- << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
- << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
- << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize)
- << '}';
- PrintedRange = true;
- }
-
- if (PrintedRange)
- OS << ':';
- }
- OS << ' ';
-}
-
-/// \brief Print the diagonstic level to a raw_ostream.
-///
-/// Handles colorizing the level and formatting.
-static void printDiagnosticLevel(raw_ostream &OS,
- DiagnosticsEngine::Level Level,
- bool ShowColors) {
- if (ShowColors) {
- // Print diagnostic category in bold and color
- switch (Level) {
- case DiagnosticsEngine::Ignored:
- llvm_unreachable("Invalid diagnostic type");
- case DiagnosticsEngine::Note: OS.changeColor(noteColor, true); break;
- case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
- case DiagnosticsEngine::Error: OS.changeColor(errorColor, true); break;
- case DiagnosticsEngine::Fatal: OS.changeColor(fatalColor, true); break;
- }
- }
-
- switch (Level) {
- case DiagnosticsEngine::Ignored: llvm_unreachable("Invalid diagnostic type");
- case DiagnosticsEngine::Note: OS << "note: "; break;
- case DiagnosticsEngine::Warning: OS << "warning: "; break;
- case DiagnosticsEngine::Error: OS << "error: "; break;
- case DiagnosticsEngine::Fatal: OS << "fatal error: "; break;
- }
-
- if (ShowColors)
- OS.resetColor();
+void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ LangOpts = &LO;
}
-/// \brief Print the diagnostic name to a raw_ostream.
-///
-/// This prints the diagnostic name to a raw_ostream if it has one. It formats
-/// the name according to the expected diagnostic message formatting:
-/// " [diagnostic_name_here]"
-static void printDiagnosticName(raw_ostream &OS, const Diagnostic &Info) {
- if (!DiagnosticIDs::isBuiltinNote(Info.getID()))
- OS << " [" << DiagnosticIDs::getName(Info.getID()) << "]";
+void TextDiagnosticPrinter::EndSourceFile() {
+ LangOpts = 0;
+ TextDiag.reset(0);
}
/// \brief Print any diagnostic option information to a raw_ostream.
@@ -996,182 +115,6 @@ static void printDiagnosticOptions(raw_ostream &OS,
OS << ']';
}
-/// \brief Skip over whitespace in the string, starting at the given
-/// index.
-///
-/// \returns The index of the first non-whitespace character that is
-/// greater than or equal to Idx or, if no such character exists,
-/// returns the end of the string.
-static unsigned skipWhitespace(unsigned Idx, StringRef Str, unsigned Length) {
- while (Idx < Length && isspace(Str[Idx]))
- ++Idx;
- return Idx;
-}
-
-/// \brief If the given character is the start of some kind of
-/// balanced punctuation (e.g., quotes or parentheses), return the
-/// character that will terminate the punctuation.
-///
-/// \returns The ending punctuation character, if any, or the NULL
-/// character if the input character does not start any punctuation.
-static inline char findMatchingPunctuation(char c) {
- switch (c) {
- case '\'': return '\'';
- case '`': return '\'';
- case '"': return '"';
- case '(': return ')';
- case '[': return ']';
- case '{': return '}';
- default: break;
- }
-
- return 0;
-}
-
-/// \brief Find the end of the word starting at the given offset
-/// within a string.
-///
-/// \returns the index pointing one character past the end of the
-/// word.
-static unsigned findEndOfWord(unsigned Start, StringRef Str,
- unsigned Length, unsigned Column,
- unsigned Columns) {
- assert(Start < Str.size() && "Invalid start position!");
- unsigned End = Start + 1;
-
- // If we are already at the end of the string, take that as the word.
- if (End == Str.size())
- return End;
-
- // Determine if the start of the string is actually opening
- // punctuation, e.g., a quote or parentheses.
- char EndPunct = findMatchingPunctuation(Str[Start]);
- if (!EndPunct) {
- // This is a normal word. Just find the first space character.
- while (End < Length && !isspace(Str[End]))
- ++End;
- return End;
- }
-
- // We have the start of a balanced punctuation sequence (quotes,
- // parentheses, etc.). Determine the full sequence is.
- llvm::SmallString<16> PunctuationEndStack;
- PunctuationEndStack.push_back(EndPunct);
- while (End < Length && !PunctuationEndStack.empty()) {
- if (Str[End] == PunctuationEndStack.back())
- PunctuationEndStack.pop_back();
- else if (char SubEndPunct = findMatchingPunctuation(Str[End]))
- PunctuationEndStack.push_back(SubEndPunct);
-
- ++End;
- }
-
- // Find the first space character after the punctuation ended.
- while (End < Length && !isspace(Str[End]))
- ++End;
-
- unsigned PunctWordLength = End - Start;
- if (// If the word fits on this line
- Column + PunctWordLength <= Columns ||
- // ... or the word is "short enough" to take up the next line
- // without too much ugly white space
- PunctWordLength < Columns/3)
- return End; // Take the whole thing as a single "word".
-
- // The whole quoted/parenthesized string is too long to print as a
- // single "word". Instead, find the "word" that starts just after
- // the punctuation and use that end-point instead. This will recurse
- // until it finds something small enough to consider a word.
- return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
-}
-
-/// \brief Print the given string to a stream, word-wrapping it to
-/// some number of columns in the process.
-///
-/// \param OS the stream to which the word-wrapping string will be
-/// emitted.
-/// \param Str the string to word-wrap and output.
-/// \param Columns the number of columns to word-wrap to.
-/// \param Column the column number at which the first character of \p
-/// Str will be printed. This will be non-zero when part of the first
-/// line has already been printed.
-/// \param Indentation the number of spaces to indent any lines beyond
-/// the first line.
-/// \returns true if word-wrapping was required, or false if the
-/// string fit on the first line.
-static bool printWordWrapped(raw_ostream &OS, StringRef Str,
- unsigned Columns,
- unsigned Column = 0,
- unsigned Indentation = WordWrapIndentation) {
- const unsigned Length = std::min(Str.find('\n'), Str.size());
-
- // The string used to indent each line.
- llvm::SmallString<16> IndentStr;
- IndentStr.assign(Indentation, ' ');
- bool Wrapped = false;
- for (unsigned WordStart = 0, WordEnd; WordStart < Length;
- WordStart = WordEnd) {
- // Find the beginning of the next word.
- WordStart = skipWhitespace(WordStart, Str, Length);
- if (WordStart == Length)
- break;
-
- // Find the end of this word.
- WordEnd = findEndOfWord(WordStart, Str, Length, Column, Columns);
-
- // Does this word fit on the current line?
- unsigned WordLength = WordEnd - WordStart;
- if (Column + WordLength < Columns) {
- // This word fits on the current line; print it there.
- if (WordStart) {
- OS << ' ';
- Column += 1;
- }
- OS << Str.substr(WordStart, WordLength);
- Column += WordLength;
- continue;
- }
-
- // This word does not fit on the current line, so wrap to the next
- // line.
- OS << '\n';
- OS.write(&IndentStr[0], Indentation);
- OS << Str.substr(WordStart, WordLength);
- Column = Indentation + WordLength;
- Wrapped = true;
- }
-
- // Append any remaning text from the message with its existing formatting.
- OS << Str.substr(Length);
-
- return Wrapped;
-}
-
-static void printDiagnosticMessage(raw_ostream &OS,
- DiagnosticsEngine::Level Level,
- StringRef Message,
- unsigned CurrentColumn, unsigned Columns,
- bool ShowColors) {
- if (ShowColors) {
- // Print warnings, errors and fatal errors in bold, no color
- switch (Level) {
- case DiagnosticsEngine::Warning: OS.changeColor(savedColor, true); break;
- case DiagnosticsEngine::Error: OS.changeColor(savedColor, true); break;
- case DiagnosticsEngine::Fatal: OS.changeColor(savedColor, true); break;
- default: break; //don't bold notes
- }
- }
-
- if (Columns)
- printWordWrapped(OS, Message, Columns, CurrentColumn);
- else
- OS << Message;
-
- if (ShowColors)
- OS.resetColor();
- OS << '\n';
-}
-
void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
const Diagnostic &Info) {
// Default implementation (Warnings/errors count).
@@ -1179,15 +122,12 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// Render the diagnostic message into a temporary buffer eagerly. We'll use
// this later as we print out the diagnostic to the terminal.
- llvm::SmallString<100> OutStr;
+ SmallString<100> OutStr;
Info.FormatDiagnostic(OutStr);
llvm::raw_svector_ostream DiagMessageStream(OutStr);
- if (DiagOpts->ShowNames)
- printDiagnosticName(DiagMessageStream, Info);
printDiagnosticOptions(DiagMessageStream, Level, Info, *DiagOpts);
-
// Keeps track of the the starting position of the location
// information (e.g., "foo.c:10:4:") that precedes the error
// message. We use this information to determine how long the
@@ -1202,10 +142,11 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// diagnostics in a context that lacks language options, a source manager, or
// other infrastructure necessary when emitting more rich diagnostics.
if (!Info.getLocation().isValid()) {
- printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
- printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
- OS.tell() - StartOfLocationInfo,
- DiagOpts->MessageLength, DiagOpts->ShowColors);
+ TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
+ TextDiagnostic::printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
+ OS.tell() - StartOfLocationInfo,
+ DiagOpts->MessageLength,
+ DiagOpts->ShowColors);
OS.flush();
return;
}
@@ -1215,60 +156,19 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
assert(DiagOpts && "Unexpected diagnostic without options set");
assert(Info.hasSourceManager() &&
"Unexpected diagnostic with no source manager");
- const SourceManager &SM = Info.getSourceManager();
- TextDiagnostic TextDiag(*this, OS, SM, *LangOpts, *DiagOpts);
-
- PresumedLoc PLoc = getDiagnosticPresumedLoc(SM, Info.getLocation());
-
- // First, if this diagnostic is not in the main file, print out the
- // "included from" lines.
- PrintIncludeStack(Level, PLoc.getIncludeLoc(), SM);
- StartOfLocationInfo = OS.tell();
-
- // Next emit the location of this particular diagnostic.
- EmitDiagnosticLoc(Level, Info, SM, PLoc);
- if (DiagOpts->ShowColors)
- OS.resetColor();
-
- printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
- printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
- OS.tell() - StartOfLocationInfo,
- DiagOpts->MessageLength, DiagOpts->ShowColors);
-
- // If caret diagnostics are enabled and we have location, we want to
- // emit the caret. However, we only do this if the location moved
- // from the last diagnostic, if the last diagnostic was a note that
- // was part of a different warning or error diagnostic, or if the
- // diagnostic has ranges. We don't want to emit the same caret
- // multiple times if one loc has multiple diagnostics.
- if (DiagOpts->ShowCarets &&
- ((LastLoc != Info.getLocation()) || Info.getNumRanges() ||
- (LastCaretDiagnosticWasNote && Level != DiagnosticsEngine::Note) ||
- Info.getNumFixItHints())) {
- // Cache the LastLoc, it allows us to omit duplicate source/caret spewage.
- LastLoc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
- LastCaretDiagnosticWasNote = (Level == DiagnosticsEngine::Note);
-
- // Get the ranges into a local array we can hack on.
- SmallVector<CharSourceRange, 20> Ranges;
- Ranges.reserve(Info.getNumRanges());
- for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
- Ranges.push_back(Info.getRange(i));
-
- for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i) {
- const FixItHint &Hint = Info.getFixItHint(i);
- if (Hint.RemoveRange.isValid())
- Ranges.push_back(Hint.RemoveRange);
- }
-
- unsigned MacroDepth = 0;
- TextDiag.EmitCaret(LastLoc, Ranges,
- llvm::makeArrayRef(Info.getFixItHints(),
- Info.getNumFixItHints()),
- MacroDepth);
+ // Rebuild the TextDiagnostic utility if missing or the source manager has
+ // changed.
+ if (!TextDiag || SM != &Info.getSourceManager()) {
+ SM = &Info.getSourceManager();
+ TextDiag.reset(new TextDiagnostic(OS, *SM, *LangOpts, *DiagOpts));
}
+ TextDiag->emitDiagnostic(Info.getLocation(), Level, DiagMessageStream.str(),
+ Info.getRanges(),
+ llvm::makeArrayRef(Info.getFixItHints(),
+ Info.getNumFixItHints()));
+
OS.flush();
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index cf35c8e..552282d 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -18,6 +18,8 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"
+#include <climits>
+
using namespace clang;
VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &_Diags)
@@ -38,7 +40,7 @@ VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
// DiagnosticConsumer interface.
void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
- const Preprocessor *PP) {
+ const Preprocessor *PP) {
// FIXME: Const hack, we screw up the preprocessor but in practice its ok
// because it doesn't get reused. It would be better if we could make a copy
// though.
@@ -82,6 +84,9 @@ public:
static Directive* Create(bool RegexKind, const SourceLocation &Location,
const std::string &Text, unsigned Count);
public:
+ /// Constant representing one or more matches aka regex "+".
+ static const unsigned OneOrMoreCount = UINT_MAX;
+
SourceLocation Location;
const std::string Text;
unsigned Count;
@@ -119,8 +124,7 @@ public:
}
virtual bool Match(const std::string &S) {
- return S.find(Text) != std::string::npos ||
- Text.find(S) != std::string::npos;
+ return S.find(Text) != std::string::npos;
}
};
@@ -277,10 +281,14 @@ static void ParseDirective(const char *CommentStart, unsigned CommentLen,
// skip optional whitespace
PH.SkipWhitespace();
- // next optional token: positive integer
+ // next optional token: positive integer or a '+'.
unsigned Count = 1;
if (PH.Next(Count))
PH.Advance();
+ else if (PH.Next("+")) {
+ Count = Directive::OneOrMoreCount;
+ PH.Advance();
+ }
// skip optional whitespace
PH.SkipWhitespace();
@@ -340,7 +348,7 @@ static void FindExpectedDiags(Preprocessor &PP, ExpectedData &ED, FileID FID) {
SourceManager& SM = PP.getSourceManager();
// Create a lexer to lex all the tokens of the main file in raw mode.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer RawLex(FID, FromFile, SM, PP.getLangOptions());
+ Lexer RawLex(FID, FromFile, SM, PP.getLangOpts());
// Return comments as tokens, this is how we find expected diagnostics.
RawLex.SetCommentRetentionState(true);
@@ -370,7 +378,7 @@ static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
const char *Kind, bool Expected) {
if (diag_begin == diag_end) return 0;
- llvm::SmallString<256> Fmt;
+ SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (const_diag_iterator I = diag_begin, E = diag_end; I != E; ++I) {
if (I->first.isInvalid() || !SourceMgr)
@@ -391,7 +399,7 @@ static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
if (DL.empty())
return 0;
- llvm::SmallString<256> Fmt;
+ SmallString<256> Fmt;
llvm::raw_svector_ostream OS(Fmt);
for (DirectiveList::iterator I = DL.begin(), E = DL.end(); I != E; ++I) {
Directive& D = **I;
@@ -421,6 +429,7 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
for (DirectiveList::iterator I = Left.begin(), E = Left.end(); I != E; ++I) {
Directive& D = **I;
unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.Location);
+ bool FoundOnce = false;
for (unsigned i = 0; i < D.Count; ++i) {
DiagList::iterator II, IE;
@@ -434,19 +443,26 @@ static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
break;
}
if (II == IE) {
+ if (D.Count == D.OneOrMoreCount) {
+ if (!FoundOnce)
+ LeftOnly.push_back(*I);
+ // We are only interested in at least one match, so exit the loop.
+ break;
+ }
// Not found.
LeftOnly.push_back(*I);
} else {
// Found. The same cannot be found twice.
Right.erase(II);
+ FoundOnce = true;
}
}
}
// Now all that's left in Right are those that were not matched.
-
- return (PrintProblem(Diags, &SourceMgr, LeftOnly, Label, true) +
- PrintProblem(Diags, &SourceMgr, Right.begin(), Right.end(),
- Label, false));
+ unsigned num = PrintProblem(Diags, &SourceMgr, LeftOnly, Label, true);
+ num += PrintProblem(Diags, &SourceMgr, Right.begin(), Right.end(),
+ Label, false);
+ return num;
}
/// CheckResults - This compares the expected results to those that
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
index 8fbcd4b..ec5fde0 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
@@ -31,6 +31,22 @@
#include <algorithm>
using namespace clang;
+// EmitUnknownDiagWarning - Emit a warning and typo hint for unknown warning
+// opts
+static void EmitUnknownDiagWarning(DiagnosticsEngine &Diags,
+ StringRef Prefix, StringRef Opt,
+ bool isPositive) {
+ StringRef Suggestion = DiagnosticIDs::getNearestWarningOption(Opt);
+ if (!Suggestion.empty())
+ Diags.Report(isPositive? diag::warn_unknown_warning_option_suggest :
+ diag::warn_unknown_negative_warning_option_suggest)
+ << (Prefix.str() += Opt) << (Prefix.str() += Suggestion);
+ else
+ Diags.Report(isPositive? diag::warn_unknown_warning_option :
+ diag::warn_unknown_negative_warning_option)
+ << (Prefix.str() += Opt);
+}
+
void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
const DiagnosticOptions &Opts) {
Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
@@ -43,6 +59,8 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
Diags.setErrorLimit(Opts.ErrorLimit);
if (Opts.TemplateBacktraceLimit)
Diags.setTemplateBacktraceLimit(Opts.TemplateBacktraceLimit);
+ if (Opts.ConstexprBacktraceLimit)
+ Diags.setConstexprBacktraceLimit(Opts.ConstexprBacktraceLimit);
// If -pedantic or -pedantic-errors was specified, then we want to map all
// extension diagnostics onto WARNING or ERROR unless the user has futz'd
@@ -54,92 +72,120 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
else
Diags.setExtensionHandlingBehavior(DiagnosticsEngine::Ext_Ignore);
- for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i) {
- StringRef Opt = Opts.Warnings[i];
+ llvm::SmallVector<diag::kind, 10> _Diags;
+ const IntrusiveRefCntPtr< DiagnosticIDs > DiagIDs =
+ Diags.getDiagnosticIDs();
+ // We parse the warning options twice. The first pass sets diagnostic state,
+ // while the second pass reports warnings/errors. This has the effect that
+ // we follow the more canonical "last option wins" paradigm when there are
+ // conflicting options.
+ for (unsigned Report = 0, ReportEnd = 2; Report != ReportEnd; ++Report) {
+ bool SetDiagnostic = (Report == 0);
+ for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i) {
+ StringRef Opt = Opts.Warnings[i];
- // Check to see if this warning starts with "no-", if so, this is a negative
- // form of the option.
- bool isPositive = true;
- if (Opt.startswith("no-")) {
- isPositive = false;
- Opt = Opt.substr(3);
- }
+ // Treat -Wformat=0 as an alias for -Wno-format.
+ if (Opt == "format=0")
+ Opt = "no-format";
- // Figure out how this option affects the warning. If -Wfoo, map the
- // diagnostic to a warning, if -Wno-foo, map it to ignore.
- diag::Mapping Mapping = isPositive ? diag::MAP_WARNING : diag::MAP_IGNORE;
-
- // -Wsystem-headers is a special case, not driven by the option table. It
- // cannot be controlled with -Werror.
- if (Opt == "system-headers") {
- Diags.setSuppressSystemWarnings(!isPositive);
- continue;
- }
-
- // -Weverything is a special case as well. It implicitly enables all
- // warnings, including ones not explicitly in a warning group.
- if (Opt == "everything") {
- Diags.setEnableAllWarnings(true);
- continue;
- }
-
- // -Werror/-Wno-error is a special case, not controlled by the option table.
- // It also has the "specifier" form of -Werror=foo and -Werror-foo.
- if (Opt.startswith("error")) {
- StringRef Specifier;
- if (Opt.size() > 5) { // Specifier must be present.
- if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
- Diags.Report(diag::warn_unknown_warning_specifier)
- << "-Werror" << ("-W" + Opt.str());
- continue;
- }
- Specifier = Opt.substr(6);
+ // Check to see if this warning starts with "no-", if so, this is a
+ // negative form of the option.
+ bool isPositive = true;
+ if (Opt.startswith("no-")) {
+ isPositive = false;
+ Opt = Opt.substr(3);
}
- if (Specifier.empty()) {
- Diags.setWarningsAsErrors(isPositive);
+ // Figure out how this option affects the warning. If -Wfoo, map the
+ // diagnostic to a warning, if -Wno-foo, map it to ignore.
+ diag::Mapping Mapping = isPositive ? diag::MAP_WARNING : diag::MAP_IGNORE;
+
+ // -Wsystem-headers is a special case, not driven by the option table. It
+ // cannot be controlled with -Werror.
+ if (Opt == "system-headers") {
+ if (SetDiagnostic)
+ Diags.setSuppressSystemWarnings(!isPositive);
continue;
}
-
- // Set the warning as error flag for this specifier.
- if (Diags.setDiagnosticGroupWarningAsError(Specifier, isPositive)) {
- Diags.Report(isPositive ? diag::warn_unknown_warning_option :
- diag::warn_unknown_negative_warning_option)
- << ("-W" + Opt.str());
+
+ // -Weverything is a special case as well. It implicitly enables all
+ // warnings, including ones not explicitly in a warning group.
+ if (Opt == "everything") {
+ if (SetDiagnostic) {
+ if (isPositive) {
+ Diags.setEnableAllWarnings(true);
+ } else {
+ Diags.setEnableAllWarnings(false);
+ Diags.setMappingToAllDiagnostics(diag::MAP_IGNORE);
+ }
+ }
+ continue;
}
- continue;
- }
-
- // -Wfatal-errors is yet another special case.
- if (Opt.startswith("fatal-errors")) {
- StringRef Specifier;
- if (Opt.size() != 12) {
- if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
- Diags.Report(diag::warn_unknown_warning_specifier)
- << "-Wfatal-errors" << ("-W" + Opt.str());
+
+ // -Werror/-Wno-error is a special case, not controlled by the option
+ // table. It also has the "specifier" form of -Werror=foo and -Werror-foo.
+ if (Opt.startswith("error")) {
+ StringRef Specifier;
+ if (Opt.size() > 5) { // Specifier must be present.
+ if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Werror" << ("-W" + Opt.str());
+ continue;
+ }
+ Specifier = Opt.substr(6);
+ }
+
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setWarningsAsErrors(isPositive);
continue;
}
- Specifier = Opt.substr(13);
+
+ if (SetDiagnostic) {
+ // Set the warning as error flag for this specifier.
+ Diags.setDiagnosticGroupWarningAsError(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, "-Werror=", Specifier, isPositive);
+ }
+ continue;
}
+
+ // -Wfatal-errors is yet another special case.
+ if (Opt.startswith("fatal-errors")) {
+ StringRef Specifier;
+ if (Opt.size() != 12) {
+ if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Wfatal-errors" << ("-W" + Opt.str());
+ continue;
+ }
+ Specifier = Opt.substr(13);
+ }
- if (Specifier.empty()) {
- Diags.setErrorsAsFatal(isPositive);
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setErrorsAsFatal(isPositive);
+ continue;
+ }
+
+ if (SetDiagnostic) {
+ // Set the error as fatal flag for this specifier.
+ Diags.setDiagnosticGroupErrorAsFatal(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, "-Wfatal-errors=", Specifier,
+ isPositive);
+ }
continue;
}
-
- // Set the error as fatal flag for this specifier.
- if (Diags.setDiagnosticGroupErrorAsFatal(Specifier, isPositive)) {
- Diags.Report(isPositive ? diag::warn_unknown_warning_option :
- diag::warn_unknown_negative_warning_option)
- << ("-W" + Opt.str());
+
+ if (Report) {
+ if (DiagIDs->getDiagnosticsInGroup(Opt, _Diags))
+ EmitUnknownDiagWarning(Diags, "-W", Opt, isPositive);
+ } else {
+ Diags.setDiagnosticGroupMapping(Opt, Mapping);
}
- continue;
- }
-
- if (Diags.setDiagnosticGroupMapping(Opt, Mapping)) {
- Diags.Report(isPositive ? diag::warn_unknown_warning_option :
- diag::warn_unknown_negative_warning_option)
- << ("-W" + Opt.str());
}
}
}
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index c9af3cc..2066505 100644
--- a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -32,9 +32,6 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
using namespace clang::frontend;
switch (CI.getFrontendOpts().ProgramAction) {
- default:
- llvm_unreachable("Invalid program action!");
-
case ASTDump: return new ASTDumpAction();
case ASTDumpXML: return new ASTDumpXMLAction();
case ASTPrint: return new ASTPrintAction();
@@ -49,8 +46,8 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case EmitCodeGenOnly: return new EmitCodeGenOnlyAction();
case EmitObj: return new EmitObjAction();
case FixIt: return new FixItAction();
- case GenerateModule: return new GeneratePCHAction(true);
- case GeneratePCH: return new GeneratePCHAction(false);
+ case GenerateModule: return new GenerateModuleAction;
+ case GeneratePCH: return new GeneratePCHAction;
case GeneratePTH: return new GeneratePTHAction();
case InitOnly: return new InitOnlyAction();
case ParseSyntaxOnly: return new SyntaxOnlyAction();
@@ -60,7 +57,7 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
it != ie; ++it) {
if (it->getName() == CI.getFrontendOpts().ActionName) {
- llvm::OwningPtr<PluginASTAction> P(it->instantiate());
+ OwningPtr<PluginASTAction> P(it->instantiate());
if (!P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs))
return 0;
return P.take();
@@ -75,12 +72,15 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case PrintDeclContext: return new DeclContextPrintAction();
case PrintPreamble: return new PrintPreambleAction();
case PrintPreprocessedInput: return new PrintPreprocessedAction();
+ case PubnamesDump: return new PubnamesDumpAction();
case RewriteMacros: return new RewriteMacrosAction();
case RewriteObjC: return new RewriteObjCAction();
case RewriteTest: return new RewriteTestAction();
case RunAnalysis: return new ento::AnalysisAction();
+ case MigrateSource: return new arcmt::MigrateSourceAction();
case RunPreprocessorOnly: return new PreprocessOnlyAction();
}
+ llvm_unreachable("Invalid program action!");
}
static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
@@ -89,8 +89,14 @@ static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
if (!Act)
return 0;
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+
+ if (FEOpts.FixAndRecompile) {
+ Act = new FixItRecompile(Act);
+ }
+
// Potentially wrap the base FE action in an ARC Migrate Tool action.
- switch (CI.getFrontendOpts().ARCMTAction) {
+ switch (FEOpts.ARCMTAction) {
case FrontendOptions::ARCMT_None:
break;
case FrontendOptions::ARCMT_Check:
@@ -101,17 +107,22 @@ static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
break;
case FrontendOptions::ARCMT_Migrate:
Act = new arcmt::MigrateAction(Act,
- CI.getFrontendOpts().ARCMTMigrateDir,
- CI.getFrontendOpts().ARCMTMigrateReportOut,
- CI.getFrontendOpts().ARCMTMigrateEmitARCErrors);
+ FEOpts.MTMigrateDir,
+ FEOpts.ARCMTMigrateReportOut,
+ FEOpts.ARCMTMigrateEmitARCErrors);
break;
}
+ if (FEOpts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Act = new arcmt::ObjCMigrateAction(Act, FEOpts.MTMigrateDir,
+ FEOpts.ObjCMTAction & ~FrontendOptions::ObjCMT_Literals,
+ FEOpts.ObjCMTAction & ~FrontendOptions::ObjCMT_Subscripting);
+ }
+
// If there are any AST files to merge, create a frontend action
// adaptor to perform the merge.
- if (!CI.getFrontendOpts().ASTMergeFiles.empty())
- Act = new ASTMergeAction(Act, &CI.getFrontendOpts().ASTMergeFiles[0],
- CI.getFrontendOpts().ASTMergeFiles.size());
+ if (!FEOpts.ASTMergeFiles.empty())
+ Act = new ASTMergeAction(Act, FEOpts.ASTMergeFiles);
return Act;
}
@@ -119,7 +130,7 @@ static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
- llvm::OwningPtr<driver::OptTable> Opts(driver::createCC1OptTable());
+ OwningPtr<driver::OptTable> Opts(driver::createCC1OptTable());
Opts->PrintHelp(llvm::outs(), "clang -cc1",
"LLVM 'Clang' Compiler: http://clang.llvm.org");
return 0;
@@ -154,7 +165,7 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
for (unsigned i = 0; i != NumArgs; ++i)
Args[i + 1] = Clang->getFrontendOpts().LLVMArgs[i].c_str();
Args[NumArgs + 1] = 0;
- llvm::cl::ParseCommandLineOptions(NumArgs + 1, const_cast<char **>(Args));
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args);
}
// Honor -analyzer-checker-help.
@@ -168,7 +179,7 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
bool Success = false;
if (!Clang->getDiagnostics().hasErrorOccurred()) {
// Create and execute the frontend action.
- llvm::OwningPtr<FrontendAction> Act(CreateFrontendAction(*Clang));
+ OwningPtr<FrontendAction> Act(CreateFrontendAction(*Clang));
if (Act) {
Success = Clang->ExecuteAction(*Act);
if (Clang->getFrontendOpts().DisableFree)
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
new file mode 100644
index 0000000..d165f1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
@@ -0,0 +1,961 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi8(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi16(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi32(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a + (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a + (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a + (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi64(__m256i a, __m256i b)
+{
+ return a + b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)a, (__v16hi)b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ __m256i __a = (a); \
+ __m256i __b = (b); \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_and_si256(__m256i a, __m256i b)
+{
+ return a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_si256(__m256i a, __m256i b)
+{
+ return ~a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendw256((__v16hi)__V1, (__v16hi)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a == (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a == (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a == (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a == b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a > (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a > (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a > (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a > b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maddubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_madd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_epi8(__m256i a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhrs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a * (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi32 (__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a * (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epu32(__m256i a, __m256i b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_or_si256(__m256i a, __m256i b)
+{
+ return a | b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sad_epu8(__m256i a, __m256i b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)a, (__v32qi)b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) & 0x03) >> 0), \
+ 12 + (((imm) & 0x0c) >> 2), \
+ 12 + (((imm) & 0x30) >> 4), \
+ 12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ (imm) & 0x3,((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7, \
+ 8 + (((imm) & 0x03) >> 0), \
+ 8 + (((imm) & 0x0c) >> 2), \
+ 8 + (((imm) & 0x30) >> 4), \
+ 8 + (((imm) & 0xc0) >> 6), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psllqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psllq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)a, (__v4si)count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psrlqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psrlq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a - (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a - (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a - (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi64(__m256i a, __m256i b)
+{
+ return a - b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_si256(__m256i a, __m256i b)
+{
+ return a ^ b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_load_si256(__m256i *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastsi128_si256(__m128i const *a)
+{
+ return (__m256i)__builtin_ia32_vbroadcastsi256(a);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i)__builtin_ia32_pblendd128((__v4si)__V1, (__v4si)__V2, (M)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendd256((__v8si)__V1, (__v8si)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastq256(__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X);
+}
+
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastq128(__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_permdf256((__v4df)__V, (M)); })
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)a, (__v8sf)b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ __m256i __V = (V); \
+ (__m256i)__builtin_ia32_permdi256(__V, (M)); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ __builtin_shufflevector(__V1, __V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_extracti128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_extract128i256(__A, (O)); })
+
+#define _mm256_inserti128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
index 0a0d2e4..7a0ec3f 100644
--- a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
@@ -145,17 +145,13 @@ _mm256_rcp_ps(__m256 a)
return (__m256)__builtin_ia32_rcpps256((__v8sf)a);
}
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_round_pd(__m256d v, const int m)
-{
- return (__m256d)__builtin_ia32_roundpd256((__v4df)v, m);
-}
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_round_ps(__m256 v, const int m)
-{
- return (__m256)__builtin_ia32_roundps256((__v8sf)v, m);
-}
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ __m256 __V = (V); \
+ (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
@@ -262,60 +258,79 @@ _mm256_permutevar_ps(__m256 a, __m256i c)
(__v8si)c);
}
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_permute_pd(__m128d a, const int c)
-{
- return (__m128d)__builtin_ia32_vpermilpd((__v2df)a, c);
-}
-
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_pd(__m256d a, const int c)
-{
- return (__m256d)__builtin_ia32_vpermilpd256((__v4df)a, c);
-}
-
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_permute_ps(__m128 a, const int c)
-{
- return (__m128)__builtin_ia32_vpermilps((__v4sf)a, c);
-}
-
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute_ps(__m256 a, const int c)
-{
- return (__m256)__builtin_ia32_vpermilps256((__v8sf)a, c);
-}
-
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_pd(__m256d a, __m256d b, const int c)
-{
- return (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)a, (__v4df)b, c);
-}
-
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)a, (__v8sf)b, c);
-}
-
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_permute2f128_si256(__m256i a, __m256i b, const int c)
-{
- return (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)a, (__v8si)b, c);
-}
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ __m128d __A = (A); \
+ (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1); })
+
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1, \
+ 2 + (((C) & 0x4) >> 2), \
+ 2 + (((C) & 0x8) >> 3)); })
+
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ __m128 __A = (A); \
+ (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+ 4 + (((C) & 0x03) >> 0), \
+ 4 + (((C) & 0x0c) >> 2), \
+ 4 + (((C) & 0x30) >> 4), \
+ 4 + (((C) & 0xc0) >> 6)); })
+
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
+
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
/* Vector Blend */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_pd(__m256d a, __m256d b, const int c)
-{
- return (__m256d)__builtin_ia32_blendpd256((__v4df)a, (__v4df)b, c);
-}
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_blend_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_blendps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_blendv_pd(__m256d a, __m256d b, __m256d c)
@@ -330,26 +345,29 @@ _mm256_blendv_ps(__m256 a, __m256 b, __m256 c)
}
/* Vector Dot Product */
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_dp_ps(__m256 a, __m256 b, const int c)
-{
- return (__m256)__builtin_ia32_dpps256((__v8sf)a, (__v8sf)b, c);
-}
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
/* Vector shuffle */
-#define _mm256_shuffle_ps(a, b, mask) \
- (__builtin_shufflevector((__v8sf)(a), (__v8sf)(b), \
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
(mask) & 0x3, ((mask) & 0xc) >> 2, \
(((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
- (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12))
+ (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
-#define _mm256_shuffle_pd(a, b, mask) \
- (__builtin_shufflevector((__v4df)(a), (__v4df)(b), \
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
(mask) & 0x1, \
(((mask) & 0x2) >> 1) + 4, \
(((mask) & 0x4) >> 2) + 2, \
- (((mask) & 0x8) >> 3) + 6))
+ (((mask) & 0x8) >> 3) + 6); })
/* Compare */
#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
@@ -385,42 +403,48 @@ _mm256_dp_ps(__m256 a, __m256 b, const int c)
#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
-#define _mm_cmp_pd(a, b, c) \
- (__m128d)__builtin_ia32_cmppd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
-#define _mm_cmp_ps(a, b, c) \
- (__m128)__builtin_ia32_cmpps((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
-#define _mm256_cmp_pd(a, b, c) \
- (__m256d)__builtin_ia32_cmppd256((__v4df)(a), (__v4df)(b), (c))
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
-#define _mm256_cmp_ps(a, b, c) \
- (__m256)__builtin_ia32_cmpps256((__v8sf)(a), (__v8sf)(b), (c))
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
-#define _mm_cmp_sd(a, b, c) \
- (__m128d)__builtin_ia32_cmpsd((__v2df)(a), (__v2df)(b), (c))
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
-#define _mm_cmp_ss(a, b, c) \
- (__m128)__builtin_ia32_cmpss((__v4sf)(a), (__v4sf)(b), (c))
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
/* Vector extract */
-static __inline __m128d __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_pd(__m256d a, const int o)
-{
- return (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)a, o);
-}
+#define _mm256_extractf128_pd(A, O) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); })
-static __inline __m128 __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_ps(__m256 a, const int o)
-{
- return (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)a, o);
-}
+#define _mm256_extractf128_ps(A, O) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); })
-static __inline __m128i __attribute__((__always_inline__, __nodebug__))
-_mm256_extractf128_si256(__m256i a, const int o)
-{
- return (__m128i)__builtin_ia32_vextractf128_si256((__v8si)a, o);
-}
+#define _mm256_extractf128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); })
static __inline int __attribute__((__always_inline__, __nodebug__))
_mm256_extract_epi32(__m256i a, int const imm)
@@ -453,23 +477,20 @@ _mm256_extract_epi64(__m256i a, const int imm)
#endif
/* Vector insert */
-static __inline __m256d __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_pd(__m256d a, __m128d b, const int o)
-{
- return (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)a, (__v2df)b, o);
-}
+#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
-static __inline __m256 __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_ps(__m256 a, __m128 b, const int o)
-{
- return (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)a, (__v4sf)b, o);
-}
+#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
-static __inline __m256i __attribute__((__always_inline__, __nodebug__))
-_mm256_insertf128_si256(__m256i a, __m128i b, const int o)
-{
- return (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)a, (__v4si)b, o);
-}
+#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_insert_epi32(__m256i a, int b, int const imm)
@@ -762,13 +783,19 @@ _mm256_load_ps(float const *p)
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_pd(double const *p)
{
- return (__m256d)__builtin_ia32_loadupd256(p);
+ struct __loadu_pd {
+ __m256d v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_pd*)p)->v;
}
static __inline __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_ps(float const *p)
{
- return (__m256)__builtin_ia32_loadups256(p);
+ struct __loadu_ps {
+ __m256 v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_ps*)p)->v;
}
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -780,7 +807,10 @@ _mm256_load_si256(__m256i const *p)
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
_mm256_loadu_si256(__m256i const *p)
{
- return (__m256i)__builtin_ia32_loaddqu256((char const *)p);
+ struct __loadu_si256 {
+ __m256i v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_si256*)p)->v;
}
static __inline __m256i __attribute__((__always_inline__, __nodebug__))
@@ -1136,3 +1166,70 @@ _mm256_castsi128_si256(__m128i in)
__m128i zero = _mm_setzero_si128();
return __builtin_shufflevector(in, zero, 0, 1, 2, 2);
}
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128(float const *addr_hi, float const *addr_lo)
+{
+ struct __loadu_ps {
+ __m128 v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256 v256 = _mm256_castps128_ps256(((struct __loadu_ps*)addr_lo)->v);
+ return _mm256_insertf128_ps(v256, ((struct __loadu_ps*)addr_hi)->v, 1);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128d(double const *addr_hi, double const *addr_lo)
+{
+ struct __loadu_pd {
+ __m128d v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256d v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)addr_lo)->v);
+ return _mm256_insertf128_pd(v256, ((struct __loadu_pd*)addr_hi)->v, 1);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128i(__m128i const *addr_hi, __m128i const *addr_lo)
+{
+ struct __loadu_si128 {
+ __m128i v;
+ } __attribute__((packed, may_alias));
+ __m256i v256 = _mm256_castsi128_si256(((struct __loadu_si128*)addr_lo)->v);
+ return _mm256_insertf128_si256(v256, ((struct __loadu_si128*)addr_hi)->v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128(float *addr_hi, float *addr_lo, __m256 a)
+{
+ __m128 v128;
+
+ v128 = _mm256_castps256_ps128(a);
+ __builtin_ia32_storeups(addr_lo, v128);
+ v128 = _mm256_extractf128_ps(a, 1);
+ __builtin_ia32_storeups(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128d(double *addr_hi, double *addr_lo, __m256d a)
+{
+ __m128d v128;
+
+ v128 = _mm256_castpd256_pd128(a);
+ __builtin_ia32_storeupd(addr_lo, v128);
+ v128 = _mm256_extractf128_pd(a, 1);
+ __builtin_ia32_storeupd(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128i(__m128i *addr_hi, __m128i *addr_lo, __m256i a)
+{
+ __m128i v128;
+
+ v128 = _mm256_castsi256_si128(a);
+ __builtin_ia32_storedqu((char *)addr_lo, (__v16qi)v128);
+ v128 = _mm256_extractf128_si256(a, 1);
+ __builtin_ia32_storedqu((char *)addr_hi, (__v16qi)v128);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
new file mode 100644
index 0000000..c60b0c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
@@ -0,0 +1,75 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2__
+# error "BMI2 instruction set not enabled"
+#endif /* __BMI2__ */
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+#endif /* !__x86_64__ */
+
+#endif /* __BMI2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
new file mode 100644
index 0000000..2f7db73
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
@@ -0,0 +1,115 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI__
+# error "BMI instruction set not enabled"
+#endif /* __BMI__ */
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__tzcnt16(unsigned short __X)
+{
+ return __builtin_ctzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__tzcnt32(unsigned int __X)
+{
+ return __builtin_ctz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__tzcnt64(unsigned long long __X)
+{
+ return __builtin_ctzll(__X);
+}
+#endif
+
+#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/cpuid.h b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
new file mode 100644
index 0000000..05c293f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
@@ -0,0 +1,33 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+static inline int __get_cpuid (unsigned int level, unsigned int *eax,
+ unsigned int *ebx, unsigned int *ecx,
+ unsigned int *edx) {
+ asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
+ return 1;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
index 903cfde..e10b77d 100644
--- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -821,8 +821,9 @@ _mm_xor_si128(__m128i a, __m128i b)
return a ^ b;
}
-#define _mm_slli_si128(VEC, IMM) \
- ((__m128i)__builtin_ia32_pslldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_slli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi16(__m128i a, int count)
@@ -885,8 +886,9 @@ _mm_sra_epi32(__m128i a, __m128i count)
}
-#define _mm_srli_si128(VEC, IMM) \
- ((__m128i)__builtin_ia32_psrldqi128((__m128i)(VEC), (IMM)*8))
+#define _mm_srli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi16(__m128i a, int count)
@@ -945,7 +947,10 @@ _mm_cmpeq_epi32(__m128i a, __m128i b)
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi8(__m128i a, __m128i b)
{
- return (__m128i)((__v16qi)a > (__v16qi)b);
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)((__v16qs)a > (__v16qs)b);
}
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -1259,23 +1264,27 @@ _mm_movemask_epi8(__m128i a)
return __builtin_ia32_pmovmskb128((__v16qi)a);
}
-#define _mm_shuffle_epi32(a, imm) \
- ((__m128i)__builtin_shufflevector((__v4si)(a), (__v4si) _mm_set1_epi32(0), \
- (imm) & 0x3, ((imm) & 0xc) >> 2, \
- ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6))
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7); })
-#define _mm_shufflelo_epi16(a, imm) \
- ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), \
- (imm) & 0x3, ((imm) & 0xc) >> 2, \
- ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
- 4, 5, 6, 7))
-#define _mm_shufflehi_epi16(a, imm) \
- ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) _mm_set1_epi16(0), 0, 1, 2, 3, \
- 4 + (((imm) & 0x03) >> 0), \
- 4 + (((imm) & 0x0c) >> 2), \
- 4 + (((imm) & 0x30) >> 4), \
- 4 + (((imm) & 0xc0) >> 6)))
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi8(__m128i a, __m128i b)
@@ -1361,9 +1370,10 @@ _mm_movemask_pd(__m128d a)
return __builtin_ia32_movmskpd(a);
}
-#define _mm_shuffle_pd(a, b, i) \
- (__builtin_shufflevector((__m128d)(a), (__m128d)(b), (i) & 1, \
- (((i) & 2) >> 1) + 2))
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ __builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_castpd_ps(__m128d in)
diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h
index b7cb73a..65b517d 100644
--- a/contrib/llvm/tools/clang/lib/Headers/float.h
+++ b/contrib/llvm/tools/clang/lib/Headers/float.h
@@ -64,6 +64,11 @@
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# endif
#endif
/* Characteristics of floating point types, C99 5.2.4.2.2 */
@@ -110,4 +115,10 @@
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
#endif /* __FLOAT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
new file mode 100644
index 0000000..c30920d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
@@ -0,0 +1,231 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#ifndef __FMA4__
+# error "FMA4 instruction set is not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA4__ */
+
+#endif /* __FMA4INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
index a19deaa..1605525 100644
--- a/contrib/llvm/tools/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -48,7 +48,7 @@
#include <smmintrin.h>
#endif
-#if defined (__AES__) || defined (__PCLMUL__)
+#if defined (__AES__)
#include <wmmintrin.h>
#endif
@@ -56,4 +56,20 @@
#include <avxintrin.h>
#endif
+#ifdef __AVX2__
+#include <avx2intrin.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
new file mode 100644
index 0000000..62ab5ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
@@ -0,0 +1,55 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNT__
+# error "LZCNT instruction is not enabled"
+#endif /* __LZCNT__ */
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__lzcnt16(unsigned short __X)
+{
+ return __builtin_clzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__lzcnt32(unsigned int __X)
+{
+ return __builtin_clz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__lzcnt64(unsigned long long __X)
+{
+ return __builtin_clzll(__X);
+}
+#endif
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
index 2f456ad..d5236f8 100644
--- a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
+++ b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
@@ -105,7 +105,7 @@ _m_pfrsqrt(__m64 __m) {
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
- return (__m64)__builtin_ia32_pfrsqrtit1((__v2sf)__m1, (__v2sf)__m2);
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
}
static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
diff --git a/contrib/llvm/tools/clang/lib/Headers/module.map b/contrib/llvm/tools/clang/lib/Headers/module.map
new file mode 100644
index 0000000..418ba50
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/module.map
@@ -0,0 +1,108 @@
+module _Builtin_intrinsics [system] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ header "x86intrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ requires mmx
+ header "mmintrin.h"
+ }
+
+ explicit module sse {
+ requires sse
+ export mmx
+ export * // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ requires sse2
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ requires sse3
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ requires ssse3
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ requires sse41
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ requires sse42
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module avx {
+ requires avx
+ export sse4_2
+ header "avxintrin.h"
+ }
+
+ explicit module avx2 {
+ requires avx2
+ export avx
+ header "avx2intrin.h"
+ }
+
+ explicit module bmi {
+ requires bmi
+ header "bmiintrin.h"
+ }
+
+ explicit module bmi2 {
+ requires bmi2
+ header "bmi2intrin.h"
+ }
+
+ explicit module fma4 {
+ requires fma4
+ export sse3
+ header "fma4intrin.h"
+ }
+
+ explicit module lzcnt {
+ requires lzcnt
+ header "lzcntintrin.h"
+ }
+
+ explicit module popcnt {
+ requires popcnt
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ requires mm3dnow
+ header "mm3dnow.h"
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
new file mode 100644
index 0000000..d439daa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
@@ -0,0 +1,45 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __POPCNT__
+#error "POPCNT instruction set not enabled"
+#endif
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
index 2b8b321..2fab50e 100644
--- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -57,23 +57,34 @@
#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
-#define _mm_round_ps(X, Y) __builtin_ia32_roundps((X), (Y))
-#define _mm_round_ss(X, Y, M) __builtin_ia32_roundss((X), (Y), (M))
-#define _mm_round_pd(X, M) __builtin_ia32_roundpd((X), (M))
-#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M))
+#define _mm_round_ps(X, M) __extension__ ({ \
+ __m128 __X = (X); \
+ (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ __m128d __X = (X); \
+ (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Packed Blending Intrinsics. */
-static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
-_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
-{
- return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
-}
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ __m128d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
-static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
-_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
-{
- return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
-}
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ __m128 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
@@ -96,11 +107,10 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
(__v16qi)__M);
}
-static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
-_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
-{
- return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
-}
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
/* SSE4 Dword Multiply Instructions. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -116,8 +126,15 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
}
/* SSE4 Floating Point Dot Product Instructions. */
-#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M))
-#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M))
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
/* SSE4 Streaming Load Hint Instruction. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
@@ -198,14 +215,14 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/* Insert int into packed integer array at index. */
#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#ifdef __x86_64__
#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N] = I; \
+ __a[(N)] = (I); \
__a;}))
#endif /* __x86_64__ */
@@ -213,12 +230,12 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
* as a zero extended value, so it is unsigned.
*/
#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
- (unsigned char)__a[N];}))
+ (unsigned char)__a[(N)];}))
#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
- (unsigned)__a[N];}))
+ (unsigned)__a[(N)];}))
#ifdef __x86_64__
#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
- __a[N];}))
+ __a[(N)];}))
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
@@ -242,13 +259,13 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((V), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
/* SSE4 Packed Integer Sign-Extension. */
@@ -333,7 +350,16 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
-#define _mm_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw128((X), (Y), (M))
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ __m128i __X = (X); \
+ __m128i __Y = (Y); \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
/* These definitions are normally in nmmintrin.h, but gcc puts them in here
so we'll do the same. */
@@ -371,20 +397,20 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
#define _mm_cmpestrm(A, LA, B, LB, M) \
__builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
-#define _mm_cmpestri(X, LX, Y, LY, M) \
+#define _mm_cmpestri(A, LA, B, LB, M) \
__builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
-#define _mm_cmpistra(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistria128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrc(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistric128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistro(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistrio128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrs(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistris128((A), (LA), (B), (LB), (M))
-#define _mm_cmpistrz(A, LA, B, LB, M) \
- __builtin_ia32_pcmpistriz128((A), (LA), (B), (LB), (M))
+#define _mm_cmpistra(A, B, M) \
+ __builtin_ia32_pcmpistria128((A), (B), (M))
+#define _mm_cmpistrc(A, B, M) \
+ __builtin_ia32_pcmpistric128((A), (B), (M))
+#define _mm_cmpistro(A, B, M) \
+ __builtin_ia32_pcmpistrio128((A), (B), (M))
+#define _mm_cmpistrs(A, B, M) \
+ __builtin_ia32_pcmpistris128((A), (B), (M))
+#define _mm_cmpistrz(A, B, M) \
+ __builtin_ia32_pcmpistriz128((A), (B), (M))
#define _mm_cmpestra(A, LA, B, LB, M) \
__builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
@@ -401,7 +427,7 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
- return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2);
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
/* SSE4.2 Accumulate CRC32. */
@@ -431,20 +457,9 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
}
#endif /* __x86_64__ */
-/* SSE4.2 Population Count. */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u32(unsigned int __A)
-{
- return __builtin_popcount(__A);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-_mm_popcnt_u64(unsigned long long __A)
-{
- return __builtin_popcountll(__A);
-}
-#endif /* __x86_64__ */
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
#endif /* __SSE4_2__ */
#endif /* __SSE4_1__ */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tgmath.h b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
index 1b0b9d2..4fa1cf7 100644
--- a/contrib/llvm/tools/clang/lib/Headers/tgmath.h
+++ b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
@@ -540,15 +540,15 @@ static long double
_TG_ATTRS
__tg_fabs(long double __x) {return fabsl(__x);}
-static float _Complex
+static float
_TG_ATTRS
__tg_fabs(float _Complex __x) {return cabsf(__x);}
-static double _Complex
+static double
_TG_ATTRS
__tg_fabs(double _Complex __x) {return cabs(__x);}
-static long double _Complex
+static long double
_TG_ATTRS
__tg_fabs(long double _Complex __x) {return cabsl(__x);}
@@ -976,6 +976,23 @@ static long double
#undef log2
#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
// lrint
static long
diff --git a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
index 07fea1c..a62c6cc 100644
--- a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
@@ -66,8 +66,15 @@ _mm_abs_epi32(__m128i a)
return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
}
-#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ __m128i __a = (a); \
+ __m128i __b = (b); \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ __m64 __a = (a); \
+ __m64 __b = (b); \
+ (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i a, __m128i b)
diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h
new file mode 100644
index 0000000..a065920
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h
@@ -0,0 +1,124 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#if __has_include_next(<unwind.h>)
+/* Darwin and libunwind provide an unwind.h. If that's available, use
+ * it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#pragma GCC visibility push(default)
+
+struct _Unwind_Context;
+typedef enum {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+
+#ifdef __arm__
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ uint32_t regno,
+ _Unwind_VRS_DataRepresentation representation,
+ void *valuep);
+
+#else
+
+uintptr_t _Unwind_GetIP(struct _Unwind_Context* context);
+
+#endif
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context*, void*);
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void*);
+
+#pragma GCC visibility pop
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
index 6b2e468..8f58850 100644
--- a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -28,7 +28,7 @@
# error "AES instructions not enabled"
#else
-#include <smmintrin.h>
+#include <xmmintrin.h>
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_aesenc_si128(__m128i __V, __m128i __R)
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
index e5e7a6a..f5e4d88 100644
--- a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -26,6 +26,30 @@
#include <immintrin.h>
-// FIXME: SSE4A, 3dNOW, FMA4, XOP, LWP, ABM, POPCNT
+#ifdef __3dNOW__
+#include <mm3dnow.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#ifdef __FMA4__
+#include <fma4intrin.h>
+#endif
+
+// FIXME: SSE4A, XOP, LWP, ABM
#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index a0bc0bb..e616157 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -664,7 +664,7 @@ _mm_storer_ps(float *p, __m128 a)
/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, sel))
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pi(__m64 *p, __m64 a)
@@ -735,8 +735,9 @@ _mm_mulhi_pu16(__m64 a, __m64 b)
return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
}
-#define _mm_shuffle_pi16(a, n) \
- ((__m64)__builtin_ia32_pshufw(a, n))
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ __m64 __a = (a); \
+ (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_maskmove_si64(__m64 d, __m64 n, char *p)
@@ -774,11 +775,13 @@ _mm_setcsr(unsigned int i)
__builtin_ia32_ldmxcsr(i);
}
-#define _mm_shuffle_ps(a, b, mask) \
- (__builtin_shufflevector((__v4sf)(a), (__v4sf)(b), \
- (mask) & 0x3, ((mask) & 0xc) >> 2, \
- (((mask) & 0x30) >> 4) + 4, \
- (((mask) & 0xc0) >> 6) + 4))
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4); })
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_ps(__m128 a, __m128 b)
@@ -935,7 +938,7 @@ _mm_movemask_ps(__m128 a)
#define _MM_FLUSH_ZERO_MASK (0x8000)
#define _MM_FLUSH_ZERO_ON (0x8000)
-#define _MM_FLUSH_ZERO_OFF (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
diff --git a/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp b/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp
index 66b393e..fce6099 100644
--- a/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp
@@ -41,7 +41,6 @@ Decl *ASTLocation::getReferencedDecl() {
return 0;
switch (getKind()) {
- default: llvm_unreachable("Invalid Kind");
case N_Type:
return 0;
case N_Decl:
@@ -51,8 +50,8 @@ Decl *ASTLocation::getReferencedDecl() {
case N_Stmt:
return getDeclFromExpr(Stm);
}
-
- return 0;
+
+ llvm_unreachable("Invalid ASTLocation Kind!");
}
SourceRange ASTLocation::getSourceRange() const {
@@ -60,7 +59,6 @@ SourceRange ASTLocation::getSourceRange() const {
return SourceRange();
switch (getKind()) {
- default: llvm_unreachable("Invalid Kind");
case N_Decl:
return D->getSourceRange();
case N_Stmt:
@@ -70,8 +68,8 @@ SourceRange ASTLocation::getSourceRange() const {
case N_Type:
return AsTypeLoc().getLocalSourceRange();
}
-
- return SourceRange();
+
+ llvm_unreachable("Invalid ASTLocation Kind!");
}
void ASTLocation::print(raw_ostream &OS) const {
@@ -91,7 +89,7 @@ void ASTLocation::print(raw_ostream &OS) const {
case N_Stmt:
OS << "[Stmt: " << AsStmt()->getStmtClassName() << " ";
- AsStmt()->printPretty(OS, Ctx, 0, PrintingPolicy(Ctx.getLangOptions()));
+ AsStmt()->printPretty(OS, Ctx, 0, PrintingPolicy(Ctx.getLangOpts()));
break;
case N_NamedRef:
diff --git a/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp b/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp
index 6be35ab..f77e6ef 100644
--- a/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp
@@ -205,7 +205,7 @@ public:
assert(MsgD);
// Same interface ? We have a winner!
- if (MsgD == IFace)
+ if (declaresSameEntity(MsgD, IFace))
return true;
// If the message interface is a superclass of the original interface,
@@ -220,7 +220,7 @@ public:
if (IFace) {
Selector Sel = Msg->getSelector();
for (ObjCInterfaceDecl *Cls = MsgD; Cls; Cls = Cls->getSuperClass()) {
- if (Cls == IFace)
+ if (declaresSameEntity(Cls, IFace))
return true;
if (Cls->getMethod(Sel, IsInstanceMethod))
return false;
diff --git a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp
index 741e781..a21b52a 100644
--- a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp
@@ -1,4 +1,4 @@
-//== CallGraph.cpp - Call graph building ------------------------*- C++ -*--==//
+//== GlobalCallGraph.cpp - Call graph building ------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
@@ -11,15 +11,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Index/CallGraph.h"
+#include "clang/Index/GlobalCallGraph.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/GraphWriter.h"
-using namespace clang;
-using namespace idx;
+using namespace clang::idx;
+using clang::FunctionDecl;
+using clang::DeclContext;
+using clang::ASTContext;
namespace {
class CGBuilder : public StmtVisitor<CGBuilder> {
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
index 0cb564c..bbfc1df 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -81,7 +81,7 @@ const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
unsigned FileSize = FE->getSize();
if (FileSize <= sizeof(HMapHeader)) return 0;
- llvm::OwningPtr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
+ OwningPtr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
if (FileBuffer == 0) return 0; // Unreadable file?
const char *FileStart = FileBuffer->getBufferStart();
@@ -220,7 +220,7 @@ const FileEntry *HeaderMap::LookupFile(
// If so, we have a match in the hash table. Construct the destination
// path.
- llvm::SmallString<1024> DestPath;
+ SmallString<1024> DestPath;
DestPath += getString(B.Prefix);
DestPath += getString(B.Suffix);
return FM.getFile(DestPath.str());
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
index 931145a..d688e23 100644
--- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -13,6 +13,8 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/Support/FileSystem.h"
@@ -36,8 +38,12 @@ HeaderFileInfo::getControllingMacro(ExternalIdentifierLookup *External) {
ExternalHeaderFileInfoSource::~ExternalHeaderFileInfoSource() {}
-HeaderSearch::HeaderSearch(FileManager &FM)
- : FileMgr(FM), FrameworkMap(64) {
+HeaderSearch::HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts,
+ const TargetInfo *Target)
+ : FileMgr(FM), Diags(Diags), FrameworkMap(64),
+ ModMap(FileMgr, *Diags.getClient(), LangOpts, Target)
+{
AngledDirIdx = 0;
SystemDirIdx = 0;
NoCurDirSearch = false;
@@ -98,58 +104,81 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
return 0;
}
-const FileEntry *HeaderSearch::lookupModule(StringRef ModuleName,
- std::string *ModuleFileName,
- std::string *UmbrellaHeader) {
+std::string HeaderSearch::getModuleFileName(Module *Module) {
// If we don't have a module cache path, we can't do anything.
- if (ModuleCachePath.empty()) {
- if (ModuleFileName)
- ModuleFileName->clear();
- return 0;
- }
+ if (ModuleCachePath.empty())
+ return std::string();
+
+
+ SmallString<256> Result(ModuleCachePath);
+ llvm::sys::path::append(Result, Module->getTopLevelModule()->Name + ".pcm");
+ return Result.str().str();
+}
+
+std::string HeaderSearch::getModuleFileName(StringRef ModuleName) {
+ // If we don't have a module cache path, we can't do anything.
+ if (ModuleCachePath.empty())
+ return std::string();
- // Try to find the module path.
- llvm::SmallString<256> FileName(ModuleCachePath);
- llvm::sys::path::append(FileName, ModuleName + ".pcm");
- if (ModuleFileName)
- *ModuleFileName = FileName.str();
-
- if (const FileEntry *ModuleFile
- = getFileMgr().getFile(FileName, /*OpenFile=*/false,
- /*CacheFailure=*/false))
- return ModuleFile;
- // We didn't find the module. If we're not supposed to look for an
- // umbrella header, this is the end of the road.
- if (!UmbrellaHeader)
- return 0;
+ SmallString<256> Result(ModuleCachePath);
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
+ return Result.str().str();
+}
+
+Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
+ // Look in the module map to determine if there is a module by this name.
+ Module *Module = ModMap.findModule(ModuleName);
+ if (Module || !AllowSearch)
+ return Module;
- // Look in each of the framework directories for an umbrella header with
- // the same name as the module.
- // FIXME: We need a way for non-frameworks to provide umbrella headers.
- llvm::SmallString<128> UmbrellaHeaderName;
- UmbrellaHeaderName = ModuleName;
- UmbrellaHeaderName += '/';
- UmbrellaHeaderName += ModuleName;
- UmbrellaHeaderName += ".h";
+ // Look through the various header search paths to load any avai;able module
+ // maps, searching for a module map that describes this module.
for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
- // Skip non-framework include paths
- if (!SearchDirs[Idx].isFramework())
+ if (SearchDirs[Idx].isFramework()) {
+ // Search for or infer a module map for a framework.
+ SmallString<128> FrameworkDirName;
+ FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
+ llvm::sys::path::append(FrameworkDirName, ModuleName + ".framework");
+ if (const DirectoryEntry *FrameworkDir
+ = FileMgr.getDirectory(FrameworkDirName)) {
+ bool IsSystem
+ = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ Module = loadFrameworkModule(ModuleName, FrameworkDir, IsSystem);
+ if (Module)
+ break;
+ }
+ }
+
+ // FIXME: Figure out how header maps and module maps will work together.
+
+ // Only deal with normal search directories.
+ if (!SearchDirs[Idx].isNormalDir())
continue;
- // Look for the umbrella header in this directory.
- if (const FileEntry *HeaderFile
- = SearchDirs[Idx].LookupFile(UmbrellaHeaderName, *this, 0, 0,
- StringRef(), 0)) {
- *UmbrellaHeader = HeaderFile->getName();
- return 0;
+ // Search for a module map file in this directory.
+ if (loadModuleMapFile(SearchDirs[Idx].getDir()) == LMM_NewlyLoaded) {
+ // We just loaded a module map file; check whether the module is
+ // available now.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+
+ // Search for a module map in a subdirectory with the same name as the
+ // module.
+ SmallString<128> NestedModuleMapDirName;
+ NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
+ llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
+ if (loadModuleMapFile(NestedModuleMapDirName) == LMM_NewlyLoaded) {
+ // If we just loaded a module map file, look for the module again.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
}
}
- // We did not find an umbrella header. Clear out the UmbrellaHeader pointee
- // so our caller knows that we failed.
- UmbrellaHeader->clear();
- return 0;
+ return Module;
}
//===----------------------------------------------------------------------===//
@@ -175,9 +204,11 @@ const FileEntry *DirectoryLookup::LookupFile(
HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef BuildingModule,
- StringRef *SuggestedModule) const {
- llvm::SmallString<1024> TmpDir;
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemFramework) const {
+ InUserSpecifiedSystemFramework = false;
+
+ SmallString<1024> TmpDir;
if (isNormalDir()) {
// Concatenate the requested file onto the directory.
TmpDir = getDir()->getName();
@@ -191,12 +222,27 @@ const FileEntry *DirectoryLookup::LookupFile(
RelativePath->clear();
RelativePath->append(Filename.begin(), Filename.end());
}
+
+ // If we have a module map that might map this header, load it and
+ // check whether we'll have a suggestion for a module.
+ if (SuggestedModule && HS.hasModuleMap(TmpDir, getDir())) {
+ const FileEntry *File = HS.getFileMgr().getFile(TmpDir.str(),
+ /*openFile=*/false);
+ if (!File)
+ return File;
+
+ // If there is a module that corresponds to this header,
+ // suggest it.
+ *SuggestedModule = HS.findModuleForHeader(File);
+ return File;
+ }
+
return HS.getFileMgr().getFile(TmpDir.str(), /*openFile=*/true);
}
if (isFramework())
return DoFrameworkLookup(Filename, HS, SearchPath, RelativePath,
- BuildingModule, SuggestedModule);
+ SuggestedModule, InUserSpecifiedSystemFramework);
assert(isHeaderMap() && "Unknown directory lookup");
const FileEntry * const Result = getHeaderMap()->LookupFile(
@@ -223,8 +269,8 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
HeaderSearch &HS,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef BuildingModule,
- StringRef *SuggestedModule) const
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemFramework) const
{
FileManager &FileMgr = HS.getFileMgr();
@@ -233,49 +279,71 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
if (SlashPos == StringRef::npos) return 0;
// Find out if this is the home for the specified framework, by checking
- // HeaderSearch. Possible answer are yes/no and unknown.
- const DirectoryEntry *&FrameworkDirCache =
+ // HeaderSearch. Possible answers are yes/no and unknown.
+ HeaderSearch::FrameworkCacheEntry &CacheEntry =
HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
// If it is known and in some other directory, fail.
- if (FrameworkDirCache && FrameworkDirCache != getFrameworkDir())
+ if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDir())
return 0;
// Otherwise, construct the path to this framework dir.
// FrameworkName = "/System/Library/Frameworks/"
- llvm::SmallString<1024> FrameworkName;
+ SmallString<1024> FrameworkName;
FrameworkName += getFrameworkDir()->getName();
if (FrameworkName.empty() || FrameworkName.back() != '/')
FrameworkName.push_back('/');
// FrameworkName = "/System/Library/Frameworks/Cocoa"
- FrameworkName.append(Filename.begin(), Filename.begin()+SlashPos);
+ StringRef ModuleName(Filename.begin(), SlashPos);
+ FrameworkName += ModuleName;
// FrameworkName = "/System/Library/Frameworks/Cocoa.framework/"
FrameworkName += ".framework/";
- // If the cache entry is still unresolved, query to see if the cache entry is
- // still unresolved. If so, check its existence now.
- if (FrameworkDirCache == 0) {
+ // If the cache entry was unresolved, populate it now.
+ if (CacheEntry.Directory == 0) {
HS.IncrementFrameworkLookupCount();
// If the framework dir doesn't exist, we fail.
- // FIXME: It's probably more efficient to query this with FileMgr.getDir.
- bool Exists;
- if (llvm::sys::fs::exists(FrameworkName.str(), Exists) || !Exists)
- return 0;
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str());
+ if (Dir == 0) return 0;
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- FrameworkDirCache = getFrameworkDir();
+ CacheEntry.Directory = getFrameworkDir();
+
+ // If this is a user search directory, check if the framework has been
+ // user-specified as a system framework.
+ if (getDirCharacteristic() == SrcMgr::C_User) {
+ SmallString<1024> SystemFrameworkMarker(FrameworkName);
+ SystemFrameworkMarker += ".system_framework";
+ if (llvm::sys::fs::exists(SystemFrameworkMarker.str())) {
+ CacheEntry.IsUserSpecifiedSystemFramework = true;
+ }
+ }
}
+ // Set the 'user-specified system framework' flag.
+ InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
+
if (RelativePath != NULL) {
RelativePath->clear();
RelativePath->append(Filename.begin()+SlashPos+1, Filename.end());
}
+ // If we're allowed to look for modules, try to load or create the module
+ // corresponding to this framework.
+ Module *Module = 0;
+ if (SuggestedModule) {
+ if (const DirectoryEntry *FrameworkDir
+ = FileMgr.getDirectory(FrameworkName)) {
+ bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
+ Module = HS.loadFrameworkModule(ModuleName, FrameworkDir, IsSystem);
+ }
+ }
+
// Check "/System/Library/Frameworks/Cocoa.framework/Headers/file.h"
unsigned OrigSize = FrameworkName.size();
@@ -287,16 +355,13 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
SearchPath->append(FrameworkName.begin(), FrameworkName.end()-1);
}
- /// Determine whether this is the module we're building or not.
- bool AutomaticImport = SuggestedModule &&
- (BuildingModule != StringRef(Filename.begin(), SlashPos)) &&
- !Filename.substr(SlashPos + 1).startswith("..");
-
+ // Determine whether this is the module we're building or not.
+ bool AutomaticImport = Module;
FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end());
if (const FileEntry *FE = FileMgr.getFile(FrameworkName.str(),
/*openFile=*/!AutomaticImport)) {
if (AutomaticImport)
- *SuggestedModule = StringRef(Filename.begin(), SlashPos);
+ *SuggestedModule = HS.findModuleForHeader(FE);
return FE;
}
@@ -311,10 +376,14 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
const FileEntry *FE = FileMgr.getFile(FrameworkName.str(),
/*openFile=*/!AutomaticImport);
if (FE && AutomaticImport)
- *SuggestedModule = StringRef(Filename.begin(), SlashPos);
+ *SuggestedModule = HS.findModuleForHeader(FE);
return FE;
}
+void HeaderSearch::setTarget(const TargetInfo &Target) {
+ ModMap.setTarget(Target);
+}
+
//===----------------------------------------------------------------------===//
// Header File Location.
@@ -334,10 +403,11 @@ const FileEntry *HeaderSearch::LookupFile(
const FileEntry *CurFileEnt,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef *SuggestedModule)
+ Module **SuggestedModule,
+ bool SkipCache)
{
if (SuggestedModule)
- *SuggestedModule = StringRef();
+ *SuggestedModule = 0;
// If 'Filename' is absolute, check to see if it exists and no searching.
if (llvm::sys::path::is_absolute(Filename)) {
@@ -362,7 +432,7 @@ const FileEntry *HeaderSearch::LookupFile(
// a subsequent include of "baz.h" should resolve to "whatever/foo/baz.h".
// This search is not done for <> headers.
if (CurFileEnt && !isAngled && !NoCurDirSearch) {
- llvm::SmallString<1024> TmpDir;
+ SmallString<1024> TmpDir;
// Concatenate the requested file onto the directory.
// FIXME: Portability. Filename concatenation should be in sys::Path.
TmpDir += CurFileEnt->getDir()->getName();
@@ -410,7 +480,7 @@ const FileEntry *HeaderSearch::LookupFile(
// If the entry has been previously looked up, the first value will be
// non-zero. If the value is equal to i (the start point of our search), then
// this is a matching hit.
- if (CacheLookup.first == i+1) {
+ if (!SkipCache && CacheLookup.first == i+1) {
// Skip querying potentially lots of directories for this lookup.
i = CacheLookup.second;
} else {
@@ -422,9 +492,10 @@ const FileEntry *HeaderSearch::LookupFile(
// Check each directory in sequence to see if it contains this file.
for (; i != SearchDirs.size(); ++i) {
+ bool InUserSpecifiedSystemFramework = false;
const FileEntry *FE =
SearchDirs[i].LookupFile(Filename, *this, SearchPath, RelativePath,
- BuildingModule, SuggestedModule);
+ SuggestedModule, InUserSpecifiedSystemFramework);
if (!FE) continue;
CurDir = &SearchDirs[i];
@@ -433,6 +504,12 @@ const FileEntry *HeaderSearch::LookupFile(
HeaderFileInfo &HFI = getFileInfo(FE);
HFI.DirInfo = CurDir->getDirCharacteristic();
+ // If the directory characteristic is User but this framework was
+ // user-specified to be treated as a system framework, promote the
+ // characteristic.
+ if (HFI.DirInfo == SrcMgr::C_User && InUserSpecifiedSystemFramework)
+ HFI.DirInfo = SrcMgr::C_System;
+
// If this file is found in a header map and uses the framework style of
// includes, then this header is part of a framework we're building.
if (CurDir->isIndexHeaderMap()) {
@@ -456,7 +533,7 @@ const FileEntry *HeaderSearch::LookupFile(
if (CurFileEnt && !isAngled && Filename.find('/') == StringRef::npos) {
HeaderFileInfo &IncludingHFI = getFileInfo(CurFileEnt);
if (IncludingHFI.IndexHeaderMapHeader) {
- llvm::SmallString<128> ScratchFilename;
+ SmallString<128> ScratchFilename;
ScratchFilename += IncludingHFI.Framework;
ScratchFilename += '/';
ScratchFilename += Filename;
@@ -491,6 +568,7 @@ LookupSubframeworkHeader(StringRef Filename,
assert(ContextFileEnt && "No context file?");
// Framework names must have a '/' in the filename. Find it.
+ // FIXME: Should we permit '\' on Windows?
size_t SlashPos = Filename.find('/');
if (SlashPos == StringRef::npos) return 0;
@@ -498,30 +576,32 @@ LookupSubframeworkHeader(StringRef Filename,
const char *ContextName = ContextFileEnt->getName();
// If the context info wasn't a framework, couldn't be a subframework.
- const char *FrameworkPos = strstr(ContextName, ".framework/");
- if (FrameworkPos == 0)
+ const unsigned DotFrameworkLen = 10;
+ const char *FrameworkPos = strstr(ContextName, ".framework");
+ if (FrameworkPos == 0 ||
+ (FrameworkPos[DotFrameworkLen] != '/' &&
+ FrameworkPos[DotFrameworkLen] != '\\'))
return 0;
- llvm::SmallString<1024> FrameworkName(ContextName,
- FrameworkPos+strlen(".framework/"));
+ SmallString<1024> FrameworkName(ContextName, FrameworkPos+DotFrameworkLen+1);
// Append Frameworks/HIToolbox.framework/
FrameworkName += "Frameworks/";
FrameworkName.append(Filename.begin(), Filename.begin()+SlashPos);
FrameworkName += ".framework/";
- llvm::StringMapEntry<const DirectoryEntry *> &CacheLookup =
+ llvm::StringMapEntry<FrameworkCacheEntry> &CacheLookup =
FrameworkMap.GetOrCreateValue(Filename.substr(0, SlashPos));
// Some other location?
- if (CacheLookup.getValue() &&
+ if (CacheLookup.getValue().Directory &&
CacheLookup.getKeyLength() == FrameworkName.size() &&
memcmp(CacheLookup.getKeyData(), &FrameworkName[0],
CacheLookup.getKeyLength()) != 0)
return 0;
// Cache subframework.
- if (CacheLookup.getValue() == 0) {
+ if (CacheLookup.getValue().Directory == 0) {
++NumSubFrameworkLookups;
// If the framework dir doesn't exist, we fail.
@@ -530,7 +610,7 @@ LookupSubframeworkHeader(StringRef Filename,
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheLookup.setValue(Dir);
+ CacheLookup.getValue().Directory = Dir;
}
const FileEntry *FE = 0;
@@ -541,7 +621,7 @@ LookupSubframeworkHeader(StringRef Filename,
}
// Check ".../Frameworks/HIToolbox.framework/Headers/HIToolbox.h"
- llvm::SmallString<1024> HeadersFilename(FrameworkName);
+ SmallString<1024> HeadersFilename(FrameworkName);
HeadersFilename += "Headers/";
if (SearchPath != NULL) {
SearchPath->clear();
@@ -576,6 +656,28 @@ LookupSubframeworkHeader(StringRef Filename,
return FE;
}
+/// \brief Helper static function to normalize a path for injection into
+/// a synthetic header.
+/*static*/ std::string
+HeaderSearch::NormalizeDashIncludePath(StringRef File, FileManager &FileMgr) {
+ // Implicit include paths should be resolved relative to the current
+ // working directory first, and then use the regular header search
+ // mechanism. The proper way to handle this is to have the
+ // predefines buffer located at the current working directory, but
+ // it has no file entry. For now, workaround this by using an
+ // absolute path if we find the file here, and otherwise letting
+ // header search handle it.
+ SmallString<128> Path(File);
+ llvm::sys::fs::make_absolute(Path);
+ bool exists;
+ if (llvm::sys::fs::exists(Path.str(), exists) || !exists)
+ Path = File;
+ else if (exists)
+ FileMgr.getFile(File);
+
+ return Lexer::Stringify(Path.str());
+}
+
//===----------------------------------------------------------------------===//
// File Info Management.
//===----------------------------------------------------------------------===//
@@ -687,3 +789,247 @@ size_t HeaderSearch::getTotalMemory() const {
StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
return FrameworkNames.GetOrCreateValue(Framework).getKey();
}
+
+bool HeaderSearch::hasModuleMap(StringRef FileName,
+ const DirectoryEntry *Root) {
+ llvm::SmallVector<const DirectoryEntry *, 2> FixUpDirectories;
+
+ StringRef DirName = FileName;
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ return false;
+
+ // Determine whether this directory exists.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ return false;
+
+ // Try to load the module map file in this directory.
+ switch (loadModuleMapFile(Dir)) {
+ case LMM_NewlyLoaded:
+ case LMM_AlreadyLoaded:
+ // Success. All of the directories we stepped through inherit this module
+ // map file.
+ for (unsigned I = 0, N = FixUpDirectories.size(); I != N; ++I)
+ DirectoryHasModuleMap[FixUpDirectories[I]] = true;
+
+ return true;
+
+ case LMM_NoDirectory:
+ case LMM_InvalidModuleMap:
+ break;
+ }
+
+ // If we hit the top of our search, we're done.
+ if (Dir == Root)
+ return false;
+
+ // Keep track of all of the directories we checked, so we can mark them as
+ // having module maps if we eventually do find a module map.
+ FixUpDirectories.push_back(Dir);
+ } while (true);
+}
+
+Module *HeaderSearch::findModuleForHeader(const FileEntry *File) {
+ if (Module *Mod = ModMap.findModuleForHeader(File))
+ return Mod;
+
+ return 0;
+}
+
+bool HeaderSearch::loadModuleMapFile(const FileEntry *File) {
+ const DirectoryEntry *Dir = File->getDir();
+
+ llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
+ = DirectoryHasModuleMap.find(Dir);
+ if (KnownDir != DirectoryHasModuleMap.end())
+ return !KnownDir->second;
+
+ bool Result = ModMap.parseModuleMapFile(File);
+ if (!Result && llvm::sys::path::filename(File->getName()) == "module.map") {
+ // If the file we loaded was a module.map, look for the corresponding
+ // module_private.map.
+ SmallString<128> PrivateFilename(Dir->getName());
+ llvm::sys::path::append(PrivateFilename, "module_private.map");
+ if (const FileEntry *PrivateFile = FileMgr.getFile(PrivateFilename))
+ Result = ModMap.parseModuleMapFile(PrivateFile);
+ }
+
+ DirectoryHasModuleMap[Dir] = !Result;
+ return Result;
+}
+
+Module *HeaderSearch::loadFrameworkModule(StringRef Name,
+ const DirectoryEntry *Dir,
+ bool IsSystem) {
+ if (Module *Module = ModMap.findModule(Name))
+ return Module;
+
+ // Try to load a module map file.
+ switch (loadModuleMapFile(Dir)) {
+ case LMM_InvalidModuleMap:
+ break;
+
+ case LMM_AlreadyLoaded:
+ case LMM_NoDirectory:
+ return 0;
+
+ case LMM_NewlyLoaded:
+ return ModMap.findModule(Name);
+ }
+
+ // The top-level framework directory, from which we'll infer a framework
+ // module.
+ const DirectoryEntry *TopFrameworkDir = Dir;
+
+ // The path from the module we're actually looking for back to the top-level
+ // framework name.
+ llvm::SmallVector<StringRef, 2> SubmodulePath;
+ SubmodulePath.push_back(Name);
+
+ // Walk the directory structure to find any enclosing frameworks.
+ StringRef DirName = Dir->getName();
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Determine whether this directory exists.
+ Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ break;
+
+ // If this is a framework directory, then we're a subframework of this
+ // framework.
+ if (llvm::sys::path::extension(DirName) == ".framework") {
+ SubmodulePath.push_back(llvm::sys::path::stem(DirName));
+ TopFrameworkDir = Dir;
+ }
+ } while (true);
+
+ // Try to infer a module map from the top-level framework directory.
+ Module *Result = ModMap.inferFrameworkModule(SubmodulePath.back(),
+ TopFrameworkDir,
+ IsSystem,
+ /*Parent=*/0);
+
+ // Follow the submodule path to find the requested (sub)framework module
+ // within the top-level framework module.
+ SubmodulePath.pop_back();
+ while (!SubmodulePath.empty() && Result) {
+ Result = ModMap.lookupModuleQualified(SubmodulePath.back(), Result);
+ SubmodulePath.pop_back();
+ }
+ return Result;
+}
+
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(StringRef DirName) {
+ if (const DirectoryEntry *Dir = FileMgr.getDirectory(DirName))
+ return loadModuleMapFile(Dir);
+
+ return LMM_NoDirectory;
+}
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir) {
+ llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
+ = DirectoryHasModuleMap.find(Dir);
+ if (KnownDir != DirectoryHasModuleMap.end())
+ return KnownDir->second? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+
+ SmallString<128> ModuleMapFileName;
+ ModuleMapFileName += Dir->getName();
+ unsigned ModuleMapDirNameLen = ModuleMapFileName.size();
+ llvm::sys::path::append(ModuleMapFileName, "module.map");
+ if (const FileEntry *ModuleMapFile = FileMgr.getFile(ModuleMapFileName)) {
+ // We have found a module map file. Try to parse it.
+ if (ModMap.parseModuleMapFile(ModuleMapFile)) {
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+ }
+
+ // This directory has a module map.
+ DirectoryHasModuleMap[Dir] = true;
+
+ // Check whether there is a private module map that we need to load as well.
+ ModuleMapFileName.erase(ModuleMapFileName.begin() + ModuleMapDirNameLen,
+ ModuleMapFileName.end());
+ llvm::sys::path::append(ModuleMapFileName, "module_private.map");
+ if (const FileEntry *PrivateModuleMapFile
+ = FileMgr.getFile(ModuleMapFileName)) {
+ if (ModMap.parseModuleMapFile(PrivateModuleMapFile)) {
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+ }
+ }
+
+ return LMM_NewlyLoaded;
+ }
+
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+}
+
+void HeaderSearch::collectAllModules(llvm::SmallVectorImpl<Module *> &Modules) {
+ Modules.clear();
+
+ // Load module maps for each of the header search directories.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ if (SearchDirs[Idx].isFramework()) {
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
+ DirNative);
+
+ // Search each of the ".framework" directories to load them as modules.
+ bool IsSystem = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
+ continue;
+
+ const DirectoryEntry *FrameworkDir = FileMgr.getDirectory(Dir->path());
+ if (!FrameworkDir)
+ continue;
+
+ // Load this framework module.
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
+ }
+ continue;
+ }
+
+ // FIXME: Deal with header maps.
+ if (SearchDirs[Idx].isHeaderMap())
+ continue;
+
+ // Try to load a module map file for the search directory.
+ loadModuleMapFile(SearchDirs[Idx].getDir());
+
+ // Try to load module map files for immediate subdirectories of this search
+ // directory.
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getDir()->getName(), DirNative);
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ loadModuleMapFile(Dir->path());
+ }
+ }
+
+ // Populate the list of modules.
+ for (ModuleMap::module_iterator M = ModMap.module_begin(),
+ MEnd = ModMap.module_end();
+ M != MEnd; ++M) {
+ Modules.push_back(M->getValue());
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index a98d889..535a852 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -30,6 +30,7 @@
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstring>
@@ -59,6 +60,8 @@ tok::ObjCKeywordKind Token::getObjCKeywordID() const {
// Lexer Class Implementation
//===----------------------------------------------------------------------===//
+void Lexer::anchor() { }
+
void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
const char *BufEnd) {
InitCharacterInfo();
@@ -114,7 +117,7 @@ void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
: PreprocessorLexer(&PP, FID),
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
- Features(PP.getLangOptions()) {
+ LangOpts(PP.getLangOpts()) {
InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
InputFile->getBufferEnd());
@@ -126,9 +129,9 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
-Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
+Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd)
- : FileLoc(fileloc), Features(features) {
+ : FileLoc(fileloc), LangOpts(langOpts) {
InitLexer(BufStart, BufPtr, BufEnd);
@@ -140,8 +143,8 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
- const SourceManager &SM, const LangOptions &features)
- : FileLoc(SM.getLocForStartOfFile(FID)), Features(features) {
+ const SourceManager &SM, const LangOptions &langOpts)
+ : FileLoc(SM.getLocForStartOfFile(FID)), LangOpts(langOpts) {
InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
FromFile->getBufferEnd());
@@ -284,7 +287,7 @@ StringRef Lexer::getSpelling(SourceLocation loc,
/// wants to get the true, uncanonicalized, spelling of things like digraphs
/// UCNs, etc.
std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
- const LangOptions &Features, bool *Invalid) {
+ const LangOptions &LangOpts, bool *Invalid) {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
// If this token contains nothing interesting, return it directly.
@@ -306,7 +309,7 @@ std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
Ptr != End; ) {
unsigned CharSize;
- Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
+ Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts));
Ptr += CharSize;
}
assert(Result.size() != unsigned(Tok.getLength()) &&
@@ -326,7 +329,7 @@ std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
/// if an internal buffer is returned.
unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
const SourceManager &SourceMgr,
- const LangOptions &Features, bool *Invalid) {
+ const LangOptions &LangOpts, bool *Invalid) {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
const char *TokStart = 0;
@@ -366,7 +369,7 @@ unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
Ptr != End; ) {
unsigned CharSize;
- *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
+ *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts);
Ptr += CharSize;
}
assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
@@ -487,11 +490,11 @@ SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
SourceLocation FileLoc = SM.getSpellingLoc(Loc);
SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
- std::pair<FileID, unsigned> BeginFileLocInfo= SM.getDecomposedLoc(BeginFileLoc);
+ std::pair<FileID, unsigned> BeginFileLocInfo
+ = SM.getDecomposedLoc(BeginFileLoc);
assert(FileLocInfo.first == BeginFileLocInfo.first &&
FileLocInfo.second >= BeginFileLocInfo.second);
- return Loc.getLocWithOffset(SM.getDecomposedLoc(BeginFileLoc).second -
- SM.getDecomposedLoc(FileLoc).second);
+ return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
}
namespace {
@@ -505,13 +508,13 @@ namespace {
std::pair<unsigned, bool>
Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
- const LangOptions &Features, unsigned MaxLines) {
+ const LangOptions &LangOpts, unsigned MaxLines) {
// Create a lexer starting at the beginning of the file. Note that we use a
// "fake" file source location at offset 1 so that the lexer will track our
// position within the file.
const unsigned StartOffset = 1;
SourceLocation StartLoc = SourceLocation::getFromRawEncoding(StartOffset);
- Lexer TheLexer(StartLoc, Features, Buffer->getBufferStart(),
+ Lexer TheLexer(StartLoc, LangOpts, Buffer->getBufferStart(),
Buffer->getBufferStart(), Buffer->getBufferEnd());
bool InPreprocessorDirective = false;
@@ -655,7 +658,7 @@ Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
unsigned CharNo,
const SourceManager &SM,
- const LangOptions &Features) {
+ const LangOptions &LangOpts) {
// Figure out how many physical characters away the specified expansion
// character is. This needs to take into consideration newlines and
// trigraphs.
@@ -681,7 +684,7 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
// lexer to parse it correctly.
for (; CharNo; --CharNo) {
unsigned Size;
- Lexer::getCharAndSizeNoWarn(TokPtr, Size, Features);
+ Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
TokPtr += Size;
PhysOffset += Size;
}
@@ -713,19 +716,16 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
/// a source location pointing to the last character in the token, etc.
SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
const SourceManager &SM,
- const LangOptions &Features) {
+ const LangOptions &LangOpts) {
if (Loc.isInvalid())
return SourceLocation();
if (Loc.isMacroID()) {
- if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, Features))
+ if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
return SourceLocation(); // Points inside the macro expansion.
-
- // Continue and find the location just after the macro expansion.
- Loc = SM.getExpansionRange(Loc).second;
}
- unsigned Len = Lexer::MeasureTokenLength(Loc, SM, Features);
+ unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
if (Len > Offset)
Len = Len - Offset;
else
@@ -738,7 +738,8 @@ SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
/// token of the macro expansion.
bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
- const LangOptions &LangOpts) {
+ const LangOptions &LangOpts,
+ SourceLocation *MacroBegin) {
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
std::pair<FileID, unsigned> infoLoc = SM.getDecomposedLoc(loc);
@@ -749,17 +750,22 @@ bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
SourceLocation expansionLoc =
SM.getSLocEntry(infoLoc.first).getExpansion().getExpansionLocStart();
- if (expansionLoc.isFileID())
- return true; // No other macro expansions, this is the first.
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions, this is the first.
+ if (MacroBegin)
+ *MacroBegin = expansionLoc;
+ return true;
+ }
- return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts);
+ return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
}
/// \brief Returns true if the given MacroID location points at the last
/// token of the macro expansion.
bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
- const SourceManager &SM,
- const LangOptions &LangOpts) {
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroEnd) {
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
SourceLocation spellLoc = SM.getSpellingLoc(loc);
@@ -777,10 +783,192 @@ bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
SourceLocation expansionLoc =
SM.getSLocEntry(FID).getExpansion().getExpansionLocEnd();
- if (expansionLoc.isFileID())
- return true; // No other macro expansions.
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions.
+ if (MacroEnd)
+ *MacroEnd = expansionLoc;
+ return true;
+ }
+
+ return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
+}
+
+static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ assert(Begin.isFileID() && End.isFileID());
+ if (Range.isTokenRange()) {
+ End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
+ if (End.isInvalid())
+ return CharSourceRange();
+ }
+
+ // Break down the source locations.
+ FileID FID;
+ unsigned BeginOffs;
+ llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+ if (FID.isInvalid())
+ return CharSourceRange();
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(End, FID, &EndOffs) ||
+ BeginOffs > EndOffs)
+ return CharSourceRange();
+
+ return CharSourceRange::getCharRange(Begin, End);
+}
+
+/// \brief Accepts a range and returns a character range with file locations.
+///
+/// Returns a null range if a part of the range resides inside a macro
+/// expansion or the range does not reside on the same FileID.
+CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ if (Begin.isInvalid() || End.isInvalid())
+ return CharSourceRange();
+
+ if (Begin.isFileID() && End.isFileID())
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+
+ if (Begin.isMacroID() && End.isFileID()) {
+ if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
+ return CharSourceRange();
+ Range.setBegin(Begin);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ if (Begin.isFileID() && End.isMacroID()) {
+ if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &End)) ||
+ (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &End)))
+ return CharSourceRange();
+ Range.setEnd(End);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ assert(Begin.isMacroID() && End.isMacroID());
+ SourceLocation MacroBegin, MacroEnd;
+ if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
+ ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)) ||
+ (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)))) {
+ Range.setBegin(MacroBegin);
+ Range.setEnd(MacroEnd);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
- return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts);
+ FileID FID;
+ unsigned BeginOffs;
+ llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+ if (FID.isInvalid())
+ return CharSourceRange();
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(End, FID, &EndOffs) ||
+ BeginOffs > EndOffs)
+ return CharSourceRange();
+
+ const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ if (Expansion.isMacroArgExpansion() &&
+ Expansion.getSpellingLoc().isFileID()) {
+ SourceLocation SpellLoc = Expansion.getSpellingLoc();
+ Range.setBegin(SpellLoc.getLocWithOffset(BeginOffs));
+ Range.setEnd(SpellLoc.getLocWithOffset(EndOffs));
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ return CharSourceRange();
+}
+
+StringRef Lexer::getSourceText(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool *Invalid) {
+ Range = makeFileCharRange(Range, SM, LangOpts);
+ if (Range.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
+ if (beginInfo.first.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
+ beginInfo.second > EndOffs) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Try to the load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
+ if (invalidTemp) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ if (Invalid) *Invalid = false;
+ return file.substr(beginInfo.second, EndOffs - beginInfo.second);
+}
+
+StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+
+ // Find the location of the immediate macro expansion.
+ while (1) {
+ FileID FID = SM.getFileID(Loc);
+ const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ Loc = Expansion.getExpansionLocStart();
+ if (!Expansion.isMacroArgExpansion())
+ break;
+
+ // For macro arguments we need to check that the argument did not come
+ // from an inner macro, e.g: "MAC1( MAC2(foo) )"
+
+ // Loc points to the argument id of the macro definition, move to the
+ // macro expansion.
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SourceLocation SpellLoc = Expansion.getSpellingLoc();
+ if (SpellLoc.isFileID())
+ break; // No inner macro.
+
+ // If spelling location resides in the same FileID as macro expansion
+ // location, it means there is no inner macro.
+ FileID MacroFID = SM.getFileID(Loc);
+ if (SM.isInFileID(SpellLoc, MacroFID))
+ break;
+
+ // Argument came from inner macro.
+ Loc = SpellLoc;
+ }
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(Loc);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
//===----------------------------------------------------------------------===//
@@ -890,6 +1078,12 @@ static void InitCharacterInfo() {
}
+/// isIdentifierHead - Return true if this is the first character of an
+/// identifier, which is [a-zA-Z_].
+static inline bool isIdentifierHead(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_UNDER)) ? true : false;
+}
+
/// isIdentifierBody - Return true if this is the body character of an
/// identifier, which is [a-zA-Z0-9_].
static inline bool isIdentifierBody(unsigned char c) {
@@ -1018,7 +1212,7 @@ static char DecodeTrigraphChar(const char *CP, Lexer *L) {
char Res = GetTrigraphCharForLetter(*CP);
if (!Res || !L) return Res;
- if (!L->getFeatures().Trigraphs) {
+ if (!L->getLangOpts().Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_ignored);
return 0;
@@ -1085,9 +1279,8 @@ SourceLocation Lexer::findLocationAfterToken(SourceLocation Loc,
const LangOptions &LangOpts,
bool SkipTrailingWhitespaceAndNewLine) {
if (Loc.isMacroID()) {
- if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts))
+ if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
return SourceLocation();
- Loc = SM.getExpansionRange(Loc).second;
}
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
@@ -1169,6 +1362,13 @@ Slash:
// Found backslash<whitespace><newline>. Parse the char after it.
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
+
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
// Use slow version to accumulate a correct size field.
return getCharAndSizeSlow(Ptr, Size, Tok);
}
@@ -1205,7 +1405,7 @@ Slash:
/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
/// be updated to match.
char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &Features) {
+ const LangOptions &LangOpts) {
// If we have a slash, look for an escaped newline.
if (Ptr[0] == '\\') {
++Size;
@@ -1220,8 +1420,14 @@ Slash:
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
// Use slow version to accumulate a correct size field.
- return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
}
// Otherwise, this is not an escaped newline, just return the slash.
@@ -1229,7 +1435,7 @@ Slash:
}
// If this is a trigraph, process it.
- if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
+ if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
// If this is actually a legal trigraph (not something like "??x"), return
// it.
if (char C = GetTrigraphCharForLetter(Ptr[2])) {
@@ -1272,7 +1478,7 @@ void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
//
// TODO: Could merge these checks into a CharInfo flag to make the comparison
// cheaper
- if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
+ if (C != '\\' && C != '?' && (C != '$' || !LangOpts.DollarIdents)) {
FinishIdentifier:
const char *IdStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
@@ -1301,7 +1507,7 @@ FinishIdentifier:
while (1) {
if (C == '$') {
// If we hit a $ and they are not supported in identifiers, we are done.
- if (!Features.DollarIdents) goto FinishIdentifier;
+ if (!LangOpts.DollarIdents) goto FinishIdentifier;
// Otherwise, emit a diagnostic and continue.
if (!isLexingRawMode())
@@ -1327,12 +1533,12 @@ FinishIdentifier:
/// isHexaLiteral - Return true if Start points to a hex constant.
/// in microsoft mode (where this is supposed to be several different tokens).
-static bool isHexaLiteral(const char *Start, const LangOptions &Features) {
+static bool isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
unsigned Size;
- char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, Features);
+ char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
if (C1 != '0')
return false;
- char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, Features);
+ char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
return (C2 == 'x' || C2 == 'X');
}
@@ -1343,7 +1549,7 @@ void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
char PrevCh = 0;
- while (isNumberBody(C)) { // FIXME: UCNs?
+ while (isNumberBody(C)) { // FIXME: UCNs.
CurPtr = ConsumeChar(CurPtr, Size, Result);
PrevCh = C;
C = getCharAndSize(CurPtr, Size);
@@ -1353,7 +1559,7 @@ void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
// If we are in Microsoft mode, don't continue if the constant is hex.
// For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
- if (!Features.MicrosoftExt || !isHexaLiteral(BufferPtr, Features))
+ if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
}
@@ -1367,6 +1573,46 @@ void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
Result.setLiteralData(TokStart);
}
+/// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
+/// in C++11, or warn on a ud-suffix in C++98.
+const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr) {
+ assert(getLangOpts().CPlusPlus);
+
+ // Maximally munch an identifier. FIXME: UCNs.
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ if (isIdentifierHead(C)) {
+ if (!getLangOpts().CPlusPlus0x) {
+ if (!isLexingRawMode())
+ Diag(CurPtr,
+ C == '_' ? diag::warn_cxx11_compat_user_defined_literal
+ : diag::warn_cxx11_compat_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
+ // that does not start with an underscore is ill-formed. As a conforming
+ // extension, we treat all such suffixes as if they had whitespace before
+ // them.
+ if (C != '_') {
+ if (!isLexingRawMode())
+ Diag(CurPtr, getLangOpts().MicrosoftMode ?
+ diag::ext_ms_reserved_user_defined_literal :
+ diag::ext_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ Result.setFlag(Token::HasUDSuffix);
+ do {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ } while (isIdentifierBody(C));
+ }
+ return CurPtr;
+}
+
/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
/// either " or L" or u8" or u" or U".
void Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
@@ -1388,7 +1634,7 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::warn_unterminated_string);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
@@ -1406,6 +1652,10 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
C = getAndAdvanceChar(CurPtr, Result);
}
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
// If a nul character existed in the string, warn about it.
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_string);
@@ -1485,6 +1735,10 @@ void Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
}
}
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
// Update the location of token as well as BufferPtr.
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, Kind);
@@ -1538,7 +1792,7 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
char C = getAndAdvanceChar(CurPtr, Result);
if (C == '\'') {
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::err_empty_character);
FormTokenWithChars(Result, CurPtr, tok::unknown);
return;
@@ -1552,7 +1806,7 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
C = getAndAdvanceChar(CurPtr, Result);
} else if (C == '\n' || C == '\r' || // Newline.
(C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::warn_unterminated_char);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
@@ -1568,6 +1822,10 @@ void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
C = getAndAdvanceChar(CurPtr, Result);
}
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
// If a nul character existed in the character, warn about it.
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_char);
@@ -1633,12 +1891,12 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
// If BCPL comments aren't explicitly enabled for this language, emit an
// extension warning.
- if (!Features.BCPLComment && !isLexingRawMode()) {
+ if (!LangOpts.BCPLComment && !isLexingRawMode()) {
Diag(BufferPtr, diag::ext_bcpl_comment);
// Mark them enabled so we only emit one warning for this translation
// unit.
- Features.BCPLComment = true;
+ LangOpts.BCPLComment = true;
}
// Scan over the body of the comment. The common case, when scanning, is that
@@ -1687,14 +1945,6 @@ bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
break;
}
- // If the char that we finally got was a \n, then we must have had something
- // like \<newline><newline>. We don't want to have consumed the second
- // newline, we want CurPtr, to end up pointing to it down below.
- if (C == '\n' || C == '\r') {
- --CurPtr;
- C = 'x'; // doesn't matter what this is.
- }
-
// If we read multiple characters, and one of those characters was a \r or
// \n, then we had an escaped newline within the comment. Emit diagnostic
// unless the next line is also a // comment.
@@ -1833,7 +2083,7 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
// If no trigraphs are enabled, warn that we ignored this trigraph and
// ignore this * character.
- if (!L->getFeatures().Trigraphs) {
+ if (!L->getLangOpts().Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
return false;
@@ -1916,11 +2166,18 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
if (C == '/') goto FoundSlash;
#ifdef __SSE2__
- __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/',
- '/', '/', '/', '/', '/', '/', '/', '/');
- while (CurPtr+16 <= BufferEnd &&
- _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0)
+ __m128i Slashes = _mm_set1_epi8('/');
+ while (CurPtr+16 <= BufferEnd) {
+ int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes));
+ if (cmp != 0) {
+ // Adjust the pointer to point directly after the first slash. It's
+ // not necessary to set C here, it will be overwritten at the end of
+ // the outer loop.
+ CurPtr += llvm::CountTrailingZeros_32(cmp) + 1;
+ goto FoundSlash;
+ }
CurPtr += 16;
+ }
#elif __ALTIVEC__
__vector unsigned char Slashes = {
'/', '/', '/', '/', '/', '/', '/', '/',
@@ -1948,8 +2205,8 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
while (C != '/' && C != '\0')
C = *CurPtr++;
- FoundSlash:
if (C == '/') {
+ FoundSlash:
if (CurPtr[-2] == '*') // We found the final */. We're done!
break;
@@ -2119,8 +2376,9 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
// C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
// a pedwarn.
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
- Diag(BufferEnd, diag::ext_no_newline_eof)
- << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n");
+ Diag(BufferEnd, LangOpts.CPlusPlus0x ? // C++11 [lex.phases] 2.2 p2
+ diag::warn_cxx98_compat_no_newline_eof : diag::ext_no_newline_eof)
+ << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n");
BufferPtr = CurPtr;
@@ -2345,7 +2603,7 @@ LexNextToken:
case 26: // DOS & CP/M EOF: "^Z".
// If we're in Microsoft extensions mode, treat this as end of file.
- if (Features.MicrosoftExt) {
+ if (LangOpts.MicrosoftExt) {
// Read the PP instance variable into an automatic variable, because
// LexEndOfFile will often delete 'this'.
Preprocessor *PPCache = PP;
@@ -2398,7 +2656,7 @@ LexNextToken:
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
- Features.BCPLComment && !Features.TraditionalCPP) {
+ LangOpts.BCPLComment && !LangOpts.TraditionalCPP) {
if (SkipBCPLComment(Result, CurPtr+2))
return; // There is a token to return.
goto SkipIgnoredUnits;
@@ -2423,7 +2681,7 @@ LexNextToken:
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- if (Features.CPlusPlus0x) {
+ if (LangOpts.CPlusPlus0x) {
Char = getCharAndSize(CurPtr, SizeTmp);
// UTF-16 string literal
@@ -2475,7 +2733,7 @@ LexNextToken:
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- if (Features.CPlusPlus0x) {
+ if (LangOpts.CPlusPlus0x) {
Char = getCharAndSize(CurPtr, SizeTmp);
// UTF-32 string literal
@@ -2503,7 +2761,7 @@ LexNextToken:
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- if (Features.CPlusPlus0x) {
+ if (LangOpts.CPlusPlus0x) {
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
@@ -2526,7 +2784,7 @@ LexNextToken:
tok::wide_string_literal);
// Wide raw string literal.
- if (Features.CPlusPlus0x && Char == 'R' &&
+ if (LangOpts.CPlusPlus0x && Char == 'R' &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -2554,7 +2812,7 @@ LexNextToken:
return LexIdentifier(Result, CurPtr);
case '$': // $ in identifiers.
- if (Features.DollarIdents) {
+ if (LangOpts.DollarIdents) {
if (!isLexingRawMode())
Diag(CurPtr-1, diag::ext_dollar_in_identifier);
// Notify MIOpt that we read a non-whitespace/non-comment token.
@@ -2606,7 +2864,7 @@ LexNextToken:
MIOpt.ReadToken();
return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
- } else if (Features.CPlusPlus && Char == '*') {
+ } else if (LangOpts.CPlusPlus && Char == '*') {
Kind = tok::periodstar;
CurPtr += SizeTmp;
} else if (Char == '.' &&
@@ -2655,7 +2913,7 @@ LexNextToken:
if (Char == '-') { // --
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::minusminus;
- } else if (Char == '>' && Features.CPlusPlus &&
+ } else if (Char == '>' && LangOpts.CPlusPlus &&
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
@@ -2693,9 +2951,9 @@ LexNextToken:
// "foo". Check to see if the character after the second slash is a '*'.
// If so, we will lex that as a "/" instead of the start of a comment.
// However, we never do this in -traditional-cpp mode.
- if ((Features.BCPLComment ||
+ if ((LangOpts.BCPLComment ||
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') &&
- !Features.TraditionalCPP) {
+ !LangOpts.TraditionalCPP) {
if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
return; // There is a token to return.
@@ -2724,20 +2982,20 @@ LexNextToken:
if (Char == '=') {
Kind = tok::percentequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if (Features.Digraphs && Char == '>') {
+ } else if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_brace; // '%>' -> '}'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if (Features.Digraphs && Char == ':') {
+ } else if (LangOpts.Digraphs && Char == ':') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
Kind = tok::hashhash; // '%:%:' -> '##'
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
- } else if (Char == '@' && Features.MicrosoftExt) {// %:@ -> #@ -> Charize
+ } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
if (!isLexingRawMode())
- Diag(BufferPtr, diag::charize_microsoft_ext);
+ Diag(BufferPtr, diag::ext_charize_microsoft);
Kind = tok::hashat;
} else { // '%:' -> '#'
// We parsed a # character. If this occurs at the start of the line,
@@ -2789,7 +3047,7 @@ LexNextToken:
// If this is '<<<<' and we're in a Perforce-style conflict marker,
// ignore it.
goto LexNextToken;
- } else if (Features.CUDA && After == '<') {
+ } else if (LangOpts.CUDA && After == '<') {
Kind = tok::lesslessless;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
@@ -2800,8 +3058,8 @@ LexNextToken:
} else if (Char == '=') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::lessequal;
- } else if (Features.Digraphs && Char == ':') { // '<:' -> '['
- if (Features.CPlusPlus0x &&
+ } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '['
+ if (LangOpts.CPlusPlus0x &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
// C++0x [lex.pptoken]p3:
// Otherwise, if the next three characters are <:: and the subsequent
@@ -2820,7 +3078,7 @@ LexNextToken:
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::l_square;
- } else if (Features.Digraphs && Char == '%') { // '<%' -> '{'
+ } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::l_brace;
} else {
@@ -2845,7 +3103,7 @@ LexNextToken:
} else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
// If this is '>>>>>>>' and we're in a conflict marker, ignore it.
goto LexNextToken;
- } else if (Features.CUDA && After == '>') {
+ } else if (LangOpts.CUDA && After == '>') {
Kind = tok::greatergreatergreater;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
@@ -2884,10 +3142,10 @@ LexNextToken:
break;
case ':':
Char = getCharAndSize(CurPtr, SizeTmp);
- if (Features.Digraphs && Char == '>') {
+ if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_square; // ':>' -> ']'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if (Features.CPlusPlus && Char == ':') {
+ } else if (LangOpts.CPlusPlus && Char == ':') {
Kind = tok::coloncolon;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
@@ -2918,10 +3176,10 @@ LexNextToken:
if (Char == '#') {
Kind = tok::hashhash;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if (Char == '@' && Features.MicrosoftExt) { // #@ -> Charize
+ } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize
Kind = tok::hashat;
if (!isLexingRawMode())
- Diag(BufferPtr, diag::charize_microsoft_ext);
+ Diag(BufferPtr, diag::ext_charize_microsoft);
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
// We parsed a # character. If this occurs at the start of the line,
@@ -2954,7 +3212,7 @@ LexNextToken:
case '@':
// Objective C support.
- if (CurPtr[-1] == '@' && Features.ObjC1)
+ if (CurPtr[-1] == '@' && LangOpts.ObjC1)
Kind = tok::at;
else
Kind = tok::unknown;
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index 70183fd..c1d228b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -16,6 +16,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -178,15 +179,16 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
/// return the UTF32.
-static bool ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
+static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
uint32_t &UcnVal, unsigned short &UcnLen,
FullSourceLoc Loc, DiagnosticsEngine *Diags,
- const LangOptions &Features) {
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
if (!Features.CPlusPlus && !Features.C99 && Diags)
Diags->Report(Loc, diag::warn_ucn_not_valid_in_c89);
- // Save the beginning of the string (for error diagnostics).
- const char *ThisTokBegin = ThisTokBuf;
+ const char *UcnBegin = ThisTokBuf;
// Skip the '\u' char's.
ThisTokBuf += 2;
@@ -208,22 +210,43 @@ static bool ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
if (UcnLenSave) {
if (Diags) {
SourceLocation L =
- Lexer::AdvanceToTokenCharacter(Loc, ThisTokBuf-ThisTokBegin,
+ Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
Loc.getManager(), Features);
- Diags->Report(FullSourceLoc(L, Loc.getManager()),
- diag::err_ucn_escape_incomplete);
+ Diags->Report(L, diag::err_ucn_escape_incomplete);
}
return false;
}
- // Check UCN constraints (C99 6.4.3p2).
- if ((UcnVal < 0xa0 &&
- (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60 )) // $, @, `
- || (UcnVal >= 0xD800 && UcnVal <= 0xDFFF)
- || (UcnVal > 0x10FFFF)) /* the maximum legal UTF32 value */ {
+
+ // Check UCN constraints (C99 6.4.3p2) [C++11 lex.charset p2]
+ if ((0xD800 <= UcnVal && UcnVal <= 0xDFFF) || // surrogate codepoints
+ UcnVal > 0x10FFFF) { // maximum legal UTF32 value
if (Diags)
Diags->Report(Loc, diag::err_ucn_escape_invalid);
return false;
}
+
+ // C++11 allows UCNs that refer to control characters and basic source
+ // characters inside character and string literals
+ if (UcnVal < 0xa0 &&
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
+ bool IsError = (!Features.CPlusPlus0x || !in_char_string_literal);
+ if (Diags) {
+ SourceLocation UcnBeginLoc =
+ Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
+ Loc.getManager(), Features);
+ char BasicSCSChar = UcnVal;
+ if (UcnVal >= 0x20 && UcnVal < 0x7f)
+ Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_escape_basic_scs :
+ diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ << StringRef(&BasicSCSChar, 1);
+ else
+ Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_control_character :
+ diag::warn_cxx98_compat_literal_ucn_control_character);
+ }
+ if (IsError)
+ return false;
+ }
+
return true;
}
@@ -231,7 +254,8 @@ static bool ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
/// convert the UTF32 to UTF8 or UTF16. This is a subroutine of
/// StringLiteralParser. When we decide to implement UCN's for identifiers,
/// we will likely rework our support for UCN's.
-static void EncodeUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
+static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
char *&ResultBuf, bool &HadError,
FullSourceLoc Loc, unsigned CharByteWidth,
DiagnosticsEngine *Diags,
@@ -239,8 +263,8 @@ static void EncodeUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
typedef uint32_t UTF32;
UTF32 UcnVal = 0;
unsigned short UcnLen = 0;
- if (!ProcessUCNEscape(ThisTokBuf, ThisTokEnd, UcnVal, UcnLen, Loc, Diags,
- Features)) {
+ if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal, UcnLen,
+ Loc, Diags, Features, true)) {
HadError = 1;
return;
}
@@ -252,31 +276,30 @@ static void EncodeUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
assert((UcnLen== 4 || UcnLen== 8) && "only ucn length of 4 or 8 supported");
if (CharByteWidth == 4) {
- // Note: our internal rep of wide char tokens is always little-endian.
- *ResultBuf++ = (UcnVal & 0x000000FF);
- *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
- *ResultBuf++ = (UcnVal & 0x00FF0000) >> 16;
- *ResultBuf++ = (UcnVal & 0xFF000000) >> 24;
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultPtr = reinterpret_cast<UTF32*>(ResultBuf);
+ *ResultPtr = UcnVal;
+ ResultBuf += 4;
return;
}
if (CharByteWidth == 2) {
- // Convert to UTF16.
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultPtr = reinterpret_cast<UTF16*>(ResultBuf);
+
if (UcnVal < (UTF32)0xFFFF) {
- *ResultBuf++ = (UcnVal & 0x000000FF);
- *ResultBuf++ = (UcnVal & 0x0000FF00) >> 8;
+ *ResultPtr = UcnVal;
+ ResultBuf += 2;
return;
}
- if (Diags) Diags->Report(Loc, diag::warn_ucn_escape_too_large);
- typedef uint16_t UTF16;
+ // Convert to UTF16.
UcnVal -= 0x10000;
- UTF16 surrogate1 = 0xD800 + (UcnVal >> 10);
- UTF16 surrogate2 = 0xDC00 + (UcnVal & 0x3FF);
- *ResultBuf++ = (surrogate1 & 0x000000FF);
- *ResultBuf++ = (surrogate1 & 0x0000FF00) >> 8;
- *ResultBuf++ = (surrogate2 & 0x000000FF);
- *ResultBuf++ = (surrogate2 & 0x0000FF00) >> 8;
+ *ResultPtr = 0xD800 + (UcnVal >> 10);
+ *(ResultPtr+1) = 0xDC00 + (UcnVal & 0x3FF);
+ ResultBuf += 4;
return;
}
@@ -323,6 +346,10 @@ static void EncodeUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
/// decimal-constant integer-suffix
/// octal-constant integer-suffix
/// hexadecimal-constant integer-suffix
+/// user-defined-integer-literal: [C++11 lex.ext]
+/// decimal-literal ud-suffix
+/// octal-literal ud-suffix
+/// hexadecimal-literal ud-suffix
/// decimal-constant:
/// nonzero-digit
/// decimal-constant digit
@@ -372,6 +399,7 @@ NumericLiteralParser(const char *begin, const char *end,
s = DigitsBegin = begin;
saw_exponent = false;
saw_period = false;
+ saw_ud_suffix = false;
isLong = false;
isUnsigned = false;
isLongLong = false;
@@ -454,7 +482,7 @@ NumericLiteralParser(const char *begin, const char *end,
continue; // Success.
case 'i':
case 'I':
- if (PP.getLangOptions().MicrosoftExt) {
+ if (PP.getLangOpts().MicrosoftExt) {
if (isFPConstant || isLong || isLongLong) break;
// Allow i8, i16, i32, i64, and i128.
@@ -509,13 +537,20 @@ NumericLiteralParser(const char *begin, const char *end,
isImaginary = true;
continue; // Success.
}
- // If we reached here, there was an error.
+ // If we reached here, there was an error or a ud-suffix.
break;
}
- // Report an error if there are any.
if (s != ThisTokEnd) {
- PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ if (PP.getLangOpts().CPlusPlus0x && s == SuffixBegin && *s == '_') {
+ // We have a ud-suffix! By C++11 [lex.ext]p10, ud-suffixes not starting
+ // with an '_' are ill-formed.
+ saw_ud_suffix = true;
+ return;
+ }
+
+ // Report an error if there are any.
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin-begin),
isFPConstant ? diag::err_invalid_suffix_float_constant :
diag::err_invalid_suffix_integer_constant)
<< StringRef(SuffixBegin, ThisTokEnd-SuffixBegin);
@@ -539,13 +574,24 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
radix = 16;
DigitsBegin = s;
s = SkipHexDigits(s);
+ bool noSignificand = (s == DigitsBegin);
if (s == ThisTokEnd) {
// Done.
} else if (*s == '.') {
s++;
saw_period = true;
+ const char *floatDigitsBegin = s;
s = SkipHexDigits(s);
+ noSignificand &= (floatDigitsBegin == s);
+ }
+
+ if (noSignificand) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin), \
+ diag::err_hexconstant_requires_digits);
+ hadError = true;
+ return;
}
+
// A binary exponent can appear with or with a '.'. If dotted, the
// binary exponent is required.
if (*s == 'p' || *s == 'P') {
@@ -562,7 +608,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
}
s = first_non_digit;
- if (!PP.getLangOptions().HexFloats)
+ if (!PP.getLangOpts().HexFloats)
PP.Diag(TokLoc, diag::ext_hexconstant_invalid);
} else if (saw_period) {
PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
@@ -710,7 +756,11 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
}
-/// character-literal: [C++0x lex.ccon]
+/// user-defined-character-literal: [C++11 lex.ext]
+/// character-literal ud-suffix
+/// ud-suffix:
+/// identifier
+/// character-literal: [C++11 lex.ccon]
/// ' c-char-sequence '
/// u' c-char-sequence '
/// U' c-char-sequence '
@@ -723,7 +773,7 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
/// backslash \, or new-line character
/// escape-sequence
/// universal-character-name
-/// escape-sequence: [C++0x lex.ccon]
+/// escape-sequence:
/// simple-escape-sequence
/// octal-escape-sequence
/// hexadecimal-escape-sequence
@@ -736,7 +786,7 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
/// hexadecimal-escape-sequence:
/// \x hexadecimal-digit
/// hexadecimal-escape-sequence hexadecimal-digit
-/// universal-character-name:
+/// universal-character-name: [C++11 lex.charset]
/// \u hex-quad
/// \U hex-quad hex-quad
/// hex-quad:
@@ -745,14 +795,15 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
SourceLocation Loc, Preprocessor &PP,
tok::TokenKind kind) {
- // At this point we know that the character matches the regex "L?'.*'".
+ // At this point we know that the character matches the regex "(L|u|U)?'.*'".
HadError = false;
Kind = kind;
- // Determine if this is a wide or UTF character.
- if (Kind == tok::wide_char_constant || Kind == tok::utf16_char_constant ||
- Kind == tok::utf32_char_constant) {
+ const char *TokBegin = begin;
+
+ // Skip over wide character determinant.
+ if (Kind != tok::char_constant) {
++begin;
}
@@ -760,6 +811,20 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
assert(begin[0] == '\'' && "Invalid token lexed");
++begin;
+ // Remove an optional ud-suffix.
+ if (end[-1] != '\'') {
+ const char *UDSuffixEnd = end;
+ do {
+ --end;
+ } while (end[-1] != '\'');
+ UDSuffixBuf.assign(end, UDSuffixEnd);
+ UDSuffixOffset = end - TokBegin;
+ }
+
+ // Trim the ending quote.
+ assert(end != begin && "Invalid token lexed");
+ --end;
+
// FIXME: The "Value" is an uint64_t so we can handle char literals of
// up to 64-bits.
// FIXME: This extensively assumes that 'char' is 8-bits.
@@ -771,76 +836,129 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
assert(PP.getTargetInfo().getWCharWidth() <= 64 &&
"Assumes sizeof(wchar) on target is <= 64");
- // This is what we will use for overflow detection
- llvm::APInt LitVal(PP.getTargetInfo().getIntWidth(), 0);
+ SmallVector<uint32_t,4> codepoint_buffer;
+ codepoint_buffer.resize(end-begin);
+ uint32_t *buffer_begin = &codepoint_buffer.front();
+ uint32_t *buffer_end = buffer_begin + codepoint_buffer.size();
+
+ // Unicode escapes representing characters that cannot be correctly
+ // represented in a single code unit are disallowed in character literals
+ // by this implementation.
+ uint32_t largest_character_for_kind;
+ if (tok::wide_char_constant == Kind) {
+ largest_character_for_kind = 0xFFFFFFFFu >> (32-PP.getTargetInfo().getWCharWidth());
+ } else if (tok::utf16_char_constant == Kind) {
+ largest_character_for_kind = 0xFFFF;
+ } else if (tok::utf32_char_constant == Kind) {
+ largest_character_for_kind = 0x10FFFF;
+ } else {
+ largest_character_for_kind = 0x7Fu;
+ }
- unsigned NumCharsSoFar = 0;
- bool Warned = false;
- while (begin[0] != '\'') {
- uint64_t ResultChar;
-
- // Is this a Universal Character Name escape?
- if (begin[0] != '\\') // If this is a normal character, consume it.
- ResultChar = (unsigned char)*begin++;
- else { // Otherwise, this is an escape character.
- unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
- // Check for UCN.
- if (begin[1] == 'u' || begin[1] == 'U') {
- uint32_t utf32 = 0;
- unsigned short UcnLen = 0;
- if (!ProcessUCNEscape(begin, end, utf32, UcnLen,
- FullSourceLoc(Loc, PP.getSourceManager()),
- &PP.getDiagnostics(), PP.getLangOptions())) {
- HadError = 1;
+ while (begin!=end) {
+ // Is this a span of non-escape characters?
+ if (begin[0] != '\\') {
+ char const *start = begin;
+ do {
+ ++begin;
+ } while (begin != end && *begin != '\\');
+
+ char const *tmp_in_start = start;
+ uint32_t *tmp_out_start = buffer_begin;
+ ConversionResult res =
+ ConvertUTF8toUTF32(reinterpret_cast<UTF8 const **>(&start),
+ reinterpret_cast<UTF8 const *>(begin),
+ &buffer_begin,buffer_end,strictConversion);
+ if (res!=conversionOK) {
+ // If we see bad encoding for unprefixed character literals, warn and
+ // simply copy the byte values, for compatibility with gcc and
+ // older versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ unsigned Msg = diag::err_bad_character_encoding;
+ if (NoErrorOnBadEncoding)
+ Msg = diag::warn_bad_character_encoding;
+ PP.Diag(Loc, Msg);
+ if (NoErrorOnBadEncoding) {
+ start = tmp_in_start;
+ buffer_begin = tmp_out_start;
+ for ( ; start != begin; ++start, ++buffer_begin)
+ *buffer_begin = static_cast<uint8_t>(*start);
+ } else {
+ HadError = true;
}
- ResultChar = utf32;
- if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
- PP.Diag(Loc, diag::warn_ucn_escape_too_large);
- ResultChar &= ~0U >> (32-CharWidth);
- }
- } else {
- // Otherwise, this is a non-UCN escape character. Process it.
- ResultChar = ProcessCharEscape(begin, end, HadError,
- FullSourceLoc(Loc,PP.getSourceManager()),
- CharWidth, &PP.getDiagnostics());
- }
- }
-
- // If this is a multi-character constant (e.g. 'abc'), handle it. These are
- // implementation defined (C99 6.4.4.4p10).
- if (NumCharsSoFar) {
- if (!isAscii()) {
- // Emulate GCC's (unintentional?) behavior: L'ab' -> L'b'.
- LitVal = 0;
} else {
- // Narrow character literals act as though their value is concatenated
- // in this implementation, but warn on overflow.
- if (LitVal.countLeadingZeros() < 8 && !Warned) {
- PP.Diag(Loc, diag::warn_char_constant_too_large);
- Warned = true;
+ for (; tmp_out_start <buffer_begin; ++tmp_out_start) {
+ if (*tmp_out_start > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc, diag::err_character_too_large);
+ }
}
- LitVal <<= 8;
}
+
+ continue;
}
+ // Is this a Universal Character Name excape?
+ if (begin[1] == 'u' || begin[1] == 'U') {
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen,
+ FullSourceLoc(Loc, PP.getSourceManager()),
+ &PP.getDiagnostics(), PP.getLangOpts(),
+ true))
+ {
+ HadError = true;
+ } else if (*buffer_begin > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc,diag::err_character_too_large);
+ }
- LitVal = LitVal + ResultChar;
- ++NumCharsSoFar;
+ ++buffer_begin;
+ continue;
+ }
+ unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
+ uint64_t result =
+ ProcessCharEscape(begin, end, HadError,
+ FullSourceLoc(Loc,PP.getSourceManager()),
+ CharWidth, &PP.getDiagnostics());
+ *buffer_begin++ = result;
}
- // If this is the second character being processed, do special handling.
+ unsigned NumCharsSoFar = buffer_begin-&codepoint_buffer.front();
+
if (NumCharsSoFar > 1) {
- // Warn about discarding the top bits for multi-char wide-character
- // constants (L'abcd').
- if (!isAscii())
+ if (isWide())
PP.Diag(Loc, diag::warn_extraneous_char_constant);
- else if (NumCharsSoFar != 4)
+ else if (isAscii() && NumCharsSoFar == 4)
+ PP.Diag(Loc, diag::ext_four_char_character_literal);
+ else if (isAscii())
PP.Diag(Loc, diag::ext_multichar_character_literal);
else
- PP.Diag(Loc, diag::ext_four_char_character_literal);
+ PP.Diag(Loc, diag::err_multichar_utf_character_literal);
IsMultiChar = true;
} else
IsMultiChar = false;
+ llvm::APInt LitVal(PP.getTargetInfo().getIntWidth(), 0);
+
+ // Narrow character literals act as though their value is concatenated
+ // in this implementation, but warn on overflow.
+ bool multi_char_too_long = false;
+ if (isAscii() && isMultiChar()) {
+ LitVal = 0;
+ for (size_t i=0;i<NumCharsSoFar;++i) {
+ // check for enough leading zeros to shift into
+ multi_char_too_long |= (LitVal.countLeadingZeros() < 8);
+ LitVal <<= 8;
+ LitVal = LitVal + (codepoint_buffer[i] & 0xFF);
+ }
+ } else if (NumCharsSoFar > 0) {
+ // otherwise just take the last character
+ LitVal = buffer_begin[-1];
+ }
+
+ if (!HadError && multi_char_too_long) {
+ PP.Diag(Loc,diag::warn_char_constant_too_large);
+ }
+
// Transfer the value from APInt to uint64_t
Value = LitVal.getZExtValue();
@@ -849,7 +967,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// character constants are not sign extended in the this implementation:
// '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
if (isAscii() && NumCharsSoFar == 1 && (Value & 128) &&
- PP.getLangOptions().CharIsSigned)
+ PP.getLangOpts().CharIsSigned)
Value = (signed char)Value;
}
@@ -909,7 +1027,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
StringLiteralParser::
StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &PP, bool Complain)
- : SM(PP.getSourceManager()), Features(PP.getLangOptions()),
+ : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
Target(PP.getTargetInfo()), Diags(Complain ? &PP.getDiagnostics() : 0),
MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
@@ -985,7 +1103,7 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
ResultBuf.resize(SizeBound);
// Likewise, but for each string piece.
- llvm::SmallString<512> TokenBuf;
+ SmallString<512> TokenBuf;
TokenBuf.resize(MaxTokenLength);
// Loop over all the strings, getting their spelling, and expanding them to
@@ -994,6 +1112,8 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
Pascal = false;
+ SourceLocation UDSuffixTokLoc;
+
for (unsigned i = 0, e = NumStringToks; i != e; ++i) {
const char *ThisTokBuf = &TokenBuf[0];
// Get the spelling of the token, which eliminates trigraphs, etc. We know
@@ -1008,7 +1128,42 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
continue;
}
- const char *ThisTokEnd = ThisTokBuf+ThisTokLen-1; // Skip end quote.
+ const char *ThisTokBegin = ThisTokBuf;
+ const char *ThisTokEnd = ThisTokBuf+ThisTokLen;
+
+ // Remove an optional ud-suffix.
+ if (ThisTokEnd[-1] != '"') {
+ const char *UDSuffixEnd = ThisTokEnd;
+ do {
+ --ThisTokEnd;
+ } while (ThisTokEnd[-1] != '"');
+
+ StringRef UDSuffix(ThisTokEnd, UDSuffixEnd - ThisTokEnd);
+
+ if (UDSuffixBuf.empty()) {
+ UDSuffixBuf.assign(UDSuffix);
+ UDSuffixToken = i;
+ UDSuffixOffset = ThisTokEnd - ThisTokBuf;
+ UDSuffixTokLoc = StringToks[i].getLocation();
+ } else if (!UDSuffixBuf.equals(UDSuffix)) {
+ // C++11 [lex.ext]p8: At the end of phase 6, if a string literal is the
+ // result of a concatenation involving at least one user-defined-string-
+ // literal, all the participating user-defined-string-literals shall
+ // have the same ud-suffix.
+ if (Diags) {
+ SourceLocation TokLoc = StringToks[i].getLocation();
+ Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+ << UDSuffixBuf << UDSuffix
+ << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
+ << SourceRange(TokLoc, TokLoc);
+ }
+ hadError = true;
+ }
+ }
+
+ // Strip the end quote.
+ --ThisTokEnd;
+
// TODO: Input character set mapping support.
// Skip marker for wide or unicode strings.
@@ -1028,12 +1183,14 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
++ThisTokBuf;
++ThisTokBuf; // skip '('
- // remove same number of characters from the end
- if (ThisTokEnd >= ThisTokBuf + (ThisTokBuf - Prefix))
- ThisTokEnd -= (ThisTokBuf - Prefix);
+ // Remove same number of characters from the end
+ ThisTokEnd -= ThisTokBuf - Prefix;
+ assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
// Copy the string over
- CopyStringFragment(StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf));
+ if (CopyStringFragment(StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf)))
+ if (DiagnoseBadString(StringToks[i]))
+ hadError = true;
} else {
assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
++ThisTokBuf; // skip "
@@ -1060,13 +1217,16 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
} while (ThisTokBuf != ThisTokEnd && ThisTokBuf[0] != '\\');
// Copy the character span over.
- CopyStringFragment(StringRef(InStart, ThisTokBuf - InStart));
+ if (CopyStringFragment(StringRef(InStart, ThisTokBuf - InStart)))
+ if (DiagnoseBadString(StringToks[i]))
+ hadError = true;
continue;
}
// Is this a Universal Character Name escape?
if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
- EncodeUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
- hadError, FullSourceLoc(StringToks[i].getLocation(),SM),
+ EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
+ ResultPtr, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
CharByteWidth, Diags, Features);
continue;
}
@@ -1076,18 +1236,41 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
FullSourceLoc(StringToks[i].getLocation(), SM),
CharByteWidth*8, Diags);
- // Note: our internal rep of wide char tokens is always little-endian.
- *ResultPtr++ = ResultChar & 0xFF;
-
- for (unsigned i = 1, e = CharByteWidth; i != e; ++i)
- *ResultPtr++ = ResultChar >> i*8;
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultPtr);
+ *ResultWidePtr = ResultChar;
+ ResultPtr += 4;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultPtr);
+ *ResultWidePtr = ResultChar & 0xFFFF;
+ ResultPtr += 2;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ *ResultPtr++ = ResultChar & 0xFF;
+ }
}
}
}
if (Pascal) {
- ResultBuf[0] = ResultPtr-&ResultBuf[0]-1;
- ResultBuf[0] /= CharByteWidth;
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ ResultBuf[0] = GetNumStringChars() - 1;
+ }
// Verify that pascal strings aren't too large.
if (GetStringLength() > 256) {
@@ -1116,22 +1299,55 @@ void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
/// copyStringFragment - This function copies from Start to End into ResultPtr.
/// Performs widening for multi-byte characters.
-void StringLiteralParser::CopyStringFragment(StringRef Fragment) {
+bool StringLiteralParser::CopyStringFragment(StringRef Fragment) {
+ assert(CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4);
+ ConversionResult result = conversionOK;
// Copy the character span over.
if (CharByteWidth == 1) {
+ if (!isLegalUTF8String(reinterpret_cast<const UTF8*>(Fragment.begin()),
+ reinterpret_cast<const UTF8*>(Fragment.end())))
+ result = sourceIllegal;
memcpy(ResultPtr, Fragment.data(), Fragment.size());
ResultPtr += Fragment.size();
- } else {
- // Note: our internal rep of wide char tokens is always little-endian.
- for (StringRef::iterator I=Fragment.begin(), E=Fragment.end(); I!=E; ++I) {
- *ResultPtr++ = *I;
- // Add zeros at the end.
- for (unsigned i = 1, e = CharByteWidth; i != e; ++i)
- *ResultPtr++ = 0;
- }
+ } else if (CharByteWidth == 2) {
+ UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *targetStart = reinterpret_cast<UTF16*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF16(
+ &sourceStart,sourceStart + Fragment.size(),
+ &targetStart,targetStart + 2*Fragment.size(),flags);
+ if (result==conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
+ } else if (CharByteWidth == 4) {
+ UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *targetStart = reinterpret_cast<UTF32*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF32(
+ &sourceStart,sourceStart + Fragment.size(),
+ &targetStart,targetStart + 4*Fragment.size(),flags);
+ if (result==conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
}
+ assert((result != targetExhausted)
+ && "ConvertUTF8toUTFXX exhausted target buffer");
+ return result != conversionOK;
}
+bool StringLiteralParser::DiagnoseBadString(const Token &Tok) {
+ // If we see bad encoding for unprefixed string literals, warn and
+ // simply copy the byte values, for compatibility with gcc and older
+ // versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ unsigned Msg = NoErrorOnBadEncoding ? diag::warn_bad_string_encoding :
+ diag::err_bad_string_encoding;
+ if (Diags)
+ Diags->Report(FullSourceLoc(Tok.getLocation(), SM), Msg);
+ return !NoErrorOnBadEncoding;
+}
/// getOffsetOfStringByte - This function returns the offset of the
/// specified byte of the string data represented by Token. This handles
@@ -1139,7 +1355,7 @@ void StringLiteralParser::CopyStringFragment(StringRef Fragment) {
unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
unsigned ByteNo) const {
// Get the spelling of the token.
- llvm::SmallString<32> SpellingBuffer;
+ SmallString<32> SpellingBuffer;
SpellingBuffer.resize(Tok.getLength());
bool StringInvalid = false;
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
index 1846d1c..e2b251a 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
@@ -15,7 +15,8 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/LexDiagnostic.h"
-
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <algorithm>
using namespace clang;
@@ -155,6 +156,8 @@ MacroArgs::getPreExpArgument(unsigned Arg, const MacroInfo *MI,
std::vector<Token> &Result = PreExpArgTokens[Arg];
if (!Result.empty()) return Result;
+ SaveAndRestore<bool> PreExpandingMacroArgs(PP.InMacroArgPreExpansion, true);
+
const Token *AT = getUnexpArgument(Arg);
unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
@@ -177,6 +180,8 @@ MacroArgs::getPreExpArgument(unsigned Arg, const MacroInfo *MI,
// will not otherwise be popped until the next token is lexed. The problem is
// that the token may be lexed sometime after the vector of tokens itself is
// destroyed, which would be badness.
+ if (PP.InCachingLexMode())
+ PP.ExitCachingLexMode();
PP.RemoveTopOfLexerStack();
return Result;
}
@@ -198,7 +203,7 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
const Token *ArgTokStart = ArgToks;
// Stringify all the tokens.
- llvm::SmallString<128> Result;
+ SmallString<128> Result;
Result += "\"";
bool isFirst = true;
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
index 5a7af56..3d0c9a1 100644
--- a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -27,7 +27,8 @@ MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
IsAllowRedefinitionsWithoutWarning = false;
IsWarnIfUnused = false;
IsDefinitionLengthCached = false;
-
+ IsPublic = true;
+
ArgumentList = 0;
NumArguments = 0;
}
@@ -48,6 +49,8 @@ MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator) {
IsWarnIfUnused = MI.IsWarnIfUnused;
IsDefinitionLengthCached = MI.IsDefinitionLengthCached;
DefinitionLength = MI.DefinitionLength;
+ IsPublic = MI.IsPublic;
+
ArgumentList = 0;
NumArguments = 0;
setArgumentList(MI.ArgumentList, MI.NumArguments, PPAllocator);
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
new file mode 100644
index 0000000..5304311
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -0,0 +1,1437 @@
+//===--- ModuleMap.cpp - Describe the layout of modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleMap implementation, which describes the layout
+// of a module as it relates to headers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+Module::ExportDecl
+ModuleMap::resolveExport(Module *Mod,
+ const Module::UnresolvedExportDecl &Unresolved,
+ bool Complain) {
+ // We may have just a wildcard.
+ if (Unresolved.Id.empty()) {
+ assert(Unresolved.Wildcard && "Invalid unresolved export");
+ return Module::ExportDecl(0, true);
+ }
+
+ // Find the starting module.
+ Module *Context = lookupModuleUnqualified(Unresolved.Id[0].first, Mod);
+ if (!Context) {
+ if (Complain)
+ Diags->Report(Unresolved.Id[0].second,
+ diag::err_mmap_missing_module_unqualified)
+ << Unresolved.Id[0].first << Mod->getFullModuleName();
+
+ return Module::ExportDecl();
+ }
+
+ // Dig into the module path.
+ for (unsigned I = 1, N = Unresolved.Id.size(); I != N; ++I) {
+ Module *Sub = lookupModuleQualified(Unresolved.Id[I].first,
+ Context);
+ if (!Sub) {
+ if (Complain)
+ Diags->Report(Unresolved.Id[I].second,
+ diag::err_mmap_missing_module_qualified)
+ << Unresolved.Id[I].first << Context->getFullModuleName()
+ << SourceRange(Unresolved.Id[0].second, Unresolved.Id[I-1].second);
+
+ return Module::ExportDecl();
+ }
+
+ Context = Sub;
+ }
+
+ return Module::ExportDecl(Context, Unresolved.Wildcard);
+}
+
+ModuleMap::ModuleMap(FileManager &FileMgr, const DiagnosticConsumer &DC,
+ const LangOptions &LangOpts, const TargetInfo *Target)
+ : LangOpts(LangOpts), Target(Target), BuiltinIncludeDir(0)
+{
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs);
+ Diags = IntrusiveRefCntPtr<DiagnosticsEngine>(
+ new DiagnosticsEngine(DiagIDs));
+ Diags->setClient(DC.clone(*Diags), /*ShouldOwnClient=*/true);
+ SourceMgr = new SourceManager(*Diags, FileMgr);
+}
+
+ModuleMap::~ModuleMap() {
+ for (llvm::StringMap<Module *>::iterator I = Modules.begin(),
+ IEnd = Modules.end();
+ I != IEnd; ++I) {
+ delete I->getValue();
+ }
+
+ delete SourceMgr;
+}
+
+void ModuleMap::setTarget(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Improper target override");
+ this->Target = &Target;
+}
+
+Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
+ llvm::DenseMap<const FileEntry *, Module *>::iterator Known
+ = Headers.find(File);
+ if (Known != Headers.end()) {
+ // If a header corresponds to an unavailable module, don't report
+ // that it maps to anything.
+ if (!Known->second->isAvailable())
+ return 0;
+
+ return Known->second;
+ }
+
+ const DirectoryEntry *Dir = File->getDir();
+ llvm::SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ StringRef DirName = Dir->getName();
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ llvm::DenseMap<const DirectoryEntry *, Module *>::iterator KnownDir
+ = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end()) {
+ Module *Result = KnownDir->second;
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Result;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ // Infer submodules for each of the directories we found between
+ // the directory of the umbrella header and the directory where
+ // the actual header is located.
+ bool Explicit = UmbrellaModule->InferExplicitSubmodules;
+
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+
+ // Associate the module and the directory.
+ UmbrellaDirs[SkippedDirs[I-1]] = Result;
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+ }
+
+ // Infer a submodule with the same name as this header file.
+ StringRef Name = llvm::sys::path::stem(File->getName());
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+ } else {
+ // Record each of the directories we stepped through as being part of
+ // the module we found, since the umbrella header covers them all.
+ for (unsigned I = 0, N = SkippedDirs.size(); I != N; ++I)
+ UmbrellaDirs[SkippedDirs[I]] = Result;
+ }
+
+ Headers[File] = Result;
+
+ // If a header corresponds to an unavailable module, don't report
+ // that it maps to anything.
+ if (!Result->isAvailable())
+ return 0;
+
+ return Result;
+ }
+
+ SkippedDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr->getFileManager().getDirectory(DirName);
+ } while (Dir);
+
+ return 0;
+}
+
+bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) {
+ llvm::DenseMap<const FileEntry *, Module *>::iterator Known
+ = Headers.find(Header);
+ if (Known != Headers.end())
+ return !Known->second->isAvailable();
+
+ const DirectoryEntry *Dir = Header->getDir();
+ llvm::SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ StringRef DirName = Dir->getName();
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ llvm::DenseMap<const DirectoryEntry *, Module *>::iterator KnownDir
+ = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end()) {
+ Module *Found = KnownDir->second;
+ if (!Found->isAvailable())
+ return true;
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Found;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ if (!Found->isAvailable())
+ return true;
+ }
+
+ // Infer a submodule with the same name as this header file.
+ StringRef Name = llvm::sys::path::stem(Header->getName());
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ }
+
+ return !Found->isAvailable();
+ }
+
+ SkippedDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr->getFileManager().getDirectory(DirName);
+ } while (Dir);
+
+ return false;
+}
+
+Module *ModuleMap::findModule(StringRef Name) {
+ llvm::StringMap<Module *>::iterator Known = Modules.find(Name);
+ if (Known != Modules.end())
+ return Known->getValue();
+
+ return 0;
+}
+
+Module *ModuleMap::lookupModuleUnqualified(StringRef Name, Module *Context) {
+ for(; Context; Context = Context->Parent) {
+ if (Module *Sub = lookupModuleQualified(Name, Context))
+ return Sub;
+ }
+
+ return findModule(Name);
+}
+
+Module *ModuleMap::lookupModuleQualified(StringRef Name, Module *Context) {
+ if (!Context)
+ return findModule(Name);
+
+ return Context->findSubmodule(Name);
+}
+
+std::pair<Module *, bool>
+ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
+ bool IsExplicit) {
+ // Try to find an existing module with this name.
+ if (Module *Sub = lookupModuleQualified(Name, Parent))
+ return std::make_pair(Sub, false);
+
+ // Create a new module with this name.
+ Module *Result = new Module(Name, SourceLocation(), Parent, IsFramework,
+ IsExplicit);
+ if (!Parent)
+ Modules[Name] = Result;
+ return std::make_pair(Result, true);
+}
+
+Module *
+ModuleMap::inferFrameworkModule(StringRef ModuleName,
+ const DirectoryEntry *FrameworkDir,
+ bool IsSystem,
+ Module *Parent) {
+ // Check whether we've already found this module.
+ if (Module *Mod = lookupModuleQualified(ModuleName, Parent))
+ return Mod;
+
+ FileManager &FileMgr = SourceMgr->getFileManager();
+
+ // Look for an umbrella header.
+ SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(UmbrellaName, "Headers");
+ llvm::sys::path::append(UmbrellaName, ModuleName + ".h");
+ const FileEntry *UmbrellaHeader = FileMgr.getFile(UmbrellaName);
+
+ // FIXME: If there's no umbrella header, we could probably scan the
+ // framework to load *everything*. But, it's not clear that this is a good
+ // idea.
+ if (!UmbrellaHeader)
+ return 0;
+
+ Module *Result = new Module(ModuleName, SourceLocation(), Parent,
+ /*IsFramework=*/true, /*IsExplicit=*/false);
+ if (IsSystem)
+ Result->IsSystem = IsSystem;
+
+ if (!Parent)
+ Modules[ModuleName] = Result;
+
+ // umbrella header "umbrella-header-name"
+ Result->Umbrella = UmbrellaHeader;
+ Headers[UmbrellaHeader] = Result;
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Result;
+
+ // export *
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+
+ // module * { export * }
+ Result->InferSubmodules = true;
+ Result->InferExportWildcard = true;
+
+ // Look for subframeworks.
+ llvm::error_code EC;
+ SmallString<128> SubframeworksDirName
+ = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(SubframeworksDirName, "Frameworks");
+ SmallString<128> SubframeworksDirNameNative;
+ llvm::sys::path::native(SubframeworksDirName.str(),
+ SubframeworksDirNameNative);
+ for (llvm::sys::fs::directory_iterator
+ Dir(SubframeworksDirNameNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (!StringRef(Dir->path()).endswith(".framework"))
+ continue;
+
+ if (const DirectoryEntry *SubframeworkDir
+ = FileMgr.getDirectory(Dir->path())) {
+ // FIXME: Do we want to warn about subframeworks without umbrella headers?
+ inferFrameworkModule(llvm::sys::path::stem(Dir->path()), SubframeworkDir,
+ IsSystem, Result);
+ }
+ }
+
+ return Result;
+}
+
+void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader){
+ Headers[UmbrellaHeader] = Mod;
+ Mod->Umbrella = UmbrellaHeader;
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
+}
+
+void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir) {
+ Mod->Umbrella = UmbrellaDir;
+ UmbrellaDirs[UmbrellaDir] = Mod;
+}
+
+void ModuleMap::addHeader(Module *Mod, const FileEntry *Header) {
+ Mod->Headers.push_back(Header);
+ Headers[Header] = Mod;
+}
+
+const FileEntry *
+ModuleMap::getContainingModuleMapFile(Module *Module) {
+ if (Module->DefinitionLoc.isInvalid() || !SourceMgr)
+ return 0;
+
+ return SourceMgr->getFileEntryForID(
+ SourceMgr->getFileID(Module->DefinitionLoc));
+}
+
+void ModuleMap::dump() {
+ llvm::errs() << "Modules:";
+ for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
+ MEnd = Modules.end();
+ M != MEnd; ++M)
+ M->getValue()->print(llvm::errs(), 2);
+
+ llvm::errs() << "Headers:";
+ for (llvm::DenseMap<const FileEntry *, Module *>::iterator
+ H = Headers.begin(),
+ HEnd = Headers.end();
+ H != HEnd; ++H) {
+ llvm::errs() << " \"" << H->first->getName() << "\" -> "
+ << H->second->getFullModuleName() << "\n";
+ }
+}
+
+bool ModuleMap::resolveExports(Module *Mod, bool Complain) {
+ bool HadError = false;
+ for (unsigned I = 0, N = Mod->UnresolvedExports.size(); I != N; ++I) {
+ Module::ExportDecl Export = resolveExport(Mod, Mod->UnresolvedExports[I],
+ Complain);
+ if (Export.getPointer() || Export.getInt())
+ Mod->Exports.push_back(Export);
+ else
+ HadError = true;
+ }
+ Mod->UnresolvedExports.clear();
+ return HadError;
+}
+
+Module *ModuleMap::inferModuleFromLocation(FullSourceLoc Loc) {
+ if (Loc.isInvalid())
+ return 0;
+
+ // Use the expansion location to determine which module we're in.
+ FullSourceLoc ExpansionLoc = Loc.getExpansionLoc();
+ if (!ExpansionLoc.isFileID())
+ return 0;
+
+
+ const SourceManager &SrcMgr = Loc.getManager();
+ FileID ExpansionFileID = ExpansionLoc.getFileID();
+
+ while (const FileEntry *ExpansionFile
+ = SrcMgr.getFileEntryForID(ExpansionFileID)) {
+ // Find the module that owns this header (if any).
+ if (Module *Mod = findModuleForHeader(ExpansionFile))
+ return Mod;
+
+ // No module owns this header, so look up the inclusion chain to see if
+ // any included header has an associated module.
+ SourceLocation IncludeLoc = SrcMgr.getIncludeLoc(ExpansionFileID);
+ if (IncludeLoc.isInvalid())
+ return 0;
+
+ ExpansionFileID = SrcMgr.getFileID(IncludeLoc);
+ }
+
+ return 0;
+}
+
+//----------------------------------------------------------------------------//
+// Module map file parser
+//----------------------------------------------------------------------------//
+
+namespace clang {
+ /// \brief A token in a module map file.
+ struct MMToken {
+ enum TokenKind {
+ Comma,
+ EndOfFile,
+ HeaderKeyword,
+ Identifier,
+ ExplicitKeyword,
+ ExportKeyword,
+ FrameworkKeyword,
+ ModuleKeyword,
+ Period,
+ UmbrellaKeyword,
+ RequiresKeyword,
+ Star,
+ StringLiteral,
+ LBrace,
+ RBrace,
+ LSquare,
+ RSquare
+ } Kind;
+
+ unsigned Location;
+ unsigned StringLength;
+ const char *StringData;
+
+ void clear() {
+ Kind = EndOfFile;
+ Location = 0;
+ StringLength = 0;
+ StringData = 0;
+ }
+
+ bool is(TokenKind K) const { return Kind == K; }
+
+ SourceLocation getLocation() const {
+ return SourceLocation::getFromRawEncoding(Location);
+ }
+
+ StringRef getString() const {
+ return StringRef(StringData, StringLength);
+ }
+ };
+
+ class ModuleMapParser {
+ Lexer &L;
+ SourceManager &SourceMgr;
+ DiagnosticsEngine &Diags;
+ ModuleMap &Map;
+
+ /// \brief The directory that this module map resides in.
+ const DirectoryEntry *Directory;
+
+ /// \brief The directory containing Clang-supplied headers.
+ const DirectoryEntry *BuiltinIncludeDir;
+
+ /// \brief Whether an error occurred.
+ bool HadError;
+
+ /// \brief Default target information, used only for string literal
+ /// parsing.
+ OwningPtr<TargetInfo> Target;
+
+ /// \brief Stores string data for the various string literals referenced
+ /// during parsing.
+ llvm::BumpPtrAllocator StringData;
+
+ /// \brief The current token.
+ MMToken Tok;
+
+ /// \brief The active module.
+ Module *ActiveModule;
+
+ /// \brief Consume the current token and return its location.
+ SourceLocation consumeToken();
+
+ /// \brief Skip tokens until we reach the a token with the given kind
+ /// (or the end of the file).
+ void skipUntil(MMToken::TokenKind K);
+
+ typedef llvm::SmallVector<std::pair<std::string, SourceLocation>, 2>
+ ModuleId;
+ bool parseModuleId(ModuleId &Id);
+ void parseModuleDecl();
+ void parseRequiresDecl();
+ void parseHeaderDecl(SourceLocation UmbrellaLoc);
+ void parseUmbrellaDirDecl(SourceLocation UmbrellaLoc);
+ void parseExportDecl();
+ void parseInferredSubmoduleDecl(bool Explicit);
+
+ const DirectoryEntry *getOverriddenHeaderSearchDir();
+
+ public:
+ explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags,
+ ModuleMap &Map,
+ const DirectoryEntry *Directory,
+ const DirectoryEntry *BuiltinIncludeDir)
+ : L(L), SourceMgr(SourceMgr), Diags(Diags), Map(Map),
+ Directory(Directory), BuiltinIncludeDir(BuiltinIncludeDir),
+ HadError(false), ActiveModule(0)
+ {
+ TargetOptions TargetOpts;
+ TargetOpts.Triple = llvm::sys::getDefaultTargetTriple();
+ Target.reset(TargetInfo::CreateTargetInfo(Diags, TargetOpts));
+
+ Tok.clear();
+ consumeToken();
+ }
+
+ bool parseModuleMapFile();
+ };
+}
+
+SourceLocation ModuleMapParser::consumeToken() {
+retry:
+ SourceLocation Result = Tok.getLocation();
+ Tok.clear();
+
+ Token LToken;
+ L.LexFromRawLexer(LToken);
+ Tok.Location = LToken.getLocation().getRawEncoding();
+ switch (LToken.getKind()) {
+ case tok::raw_identifier:
+ Tok.StringData = LToken.getRawIdentifierData();
+ Tok.StringLength = LToken.getLength();
+ Tok.Kind = llvm::StringSwitch<MMToken::TokenKind>(Tok.getString())
+ .Case("header", MMToken::HeaderKeyword)
+ .Case("explicit", MMToken::ExplicitKeyword)
+ .Case("export", MMToken::ExportKeyword)
+ .Case("framework", MMToken::FrameworkKeyword)
+ .Case("module", MMToken::ModuleKeyword)
+ .Case("requires", MMToken::RequiresKeyword)
+ .Case("umbrella", MMToken::UmbrellaKeyword)
+ .Default(MMToken::Identifier);
+ break;
+
+ case tok::comma:
+ Tok.Kind = MMToken::Comma;
+ break;
+
+ case tok::eof:
+ Tok.Kind = MMToken::EndOfFile;
+ break;
+
+ case tok::l_brace:
+ Tok.Kind = MMToken::LBrace;
+ break;
+
+ case tok::l_square:
+ Tok.Kind = MMToken::LSquare;
+ break;
+
+ case tok::period:
+ Tok.Kind = MMToken::Period;
+ break;
+
+ case tok::r_brace:
+ Tok.Kind = MMToken::RBrace;
+ break;
+
+ case tok::r_square:
+ Tok.Kind = MMToken::RSquare;
+ break;
+
+ case tok::star:
+ Tok.Kind = MMToken::Star;
+ break;
+
+ case tok::string_literal: {
+ if (LToken.hasUDSuffix()) {
+ Diags.Report(LToken.getLocation(), diag::err_invalid_string_udl);
+ HadError = true;
+ goto retry;
+ }
+
+ // Parse the string literal.
+ LangOptions LangOpts;
+ StringLiteralParser StringLiteral(&LToken, 1, SourceMgr, LangOpts, *Target);
+ if (StringLiteral.hadError)
+ goto retry;
+
+ // Copy the string literal into our string data allocator.
+ unsigned Length = StringLiteral.GetStringLength();
+ char *Saved = StringData.Allocate<char>(Length + 1);
+ memcpy(Saved, StringLiteral.GetString().data(), Length);
+ Saved[Length] = 0;
+
+ // Form the token.
+ Tok.Kind = MMToken::StringLiteral;
+ Tok.StringData = Saved;
+ Tok.StringLength = Length;
+ break;
+ }
+
+ case tok::comment:
+ goto retry;
+
+ default:
+ Diags.Report(LToken.getLocation(), diag::err_mmap_unknown_token);
+ HadError = true;
+ goto retry;
+ }
+
+ return Result;
+}
+
+void ModuleMapParser::skipUntil(MMToken::TokenKind K) {
+ unsigned braceDepth = 0;
+ unsigned squareDepth = 0;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return;
+
+ case MMToken::LBrace:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++braceDepth;
+ break;
+
+ case MMToken::LSquare:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++squareDepth;
+ break;
+
+ case MMToken::RBrace:
+ if (braceDepth > 0)
+ --braceDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ case MMToken::RSquare:
+ if (squareDepth > 0)
+ --squareDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ default:
+ if (braceDepth == 0 && squareDepth == 0 && Tok.is(K))
+ return;
+ break;
+ }
+
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Parse a module-id.
+///
+/// module-id:
+/// identifier
+/// identifier '.' module-id
+///
+/// \returns true if an error occurred, false otherwise.
+bool ModuleMapParser::parseModuleId(ModuleId &Id) {
+ Id.clear();
+ do {
+ if (Tok.is(MMToken::Identifier)) {
+ Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
+ consumeToken();
+ } else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module_name);
+ return true;
+ }
+
+ if (!Tok.is(MMToken::Period))
+ break;
+
+ consumeToken();
+ } while (true);
+
+ return false;
+}
+
+namespace {
+ /// \brief Enumerates the known attributes.
+ enum AttributeKind {
+ /// \brief An unknown attribute.
+ AT_unknown,
+ /// \brief The 'system' attribute.
+ AT_system
+ };
+}
+
+/// \brief Parse a module declaration.
+///
+/// module-declaration:
+/// 'explicit'[opt] 'framework'[opt] 'module' module-id attributes[opt]
+/// { module-member* }
+///
+/// attributes:
+/// attribute attributes
+/// attribute
+///
+/// attribute:
+/// [ identifier ]
+///
+/// module-member:
+/// requires-declaration
+/// header-declaration
+/// submodule-declaration
+/// export-declaration
+///
+/// submodule-declaration:
+/// module-declaration
+/// inferred-submodule-declaration
+void ModuleMapParser::parseModuleDecl() {
+ assert(Tok.is(MMToken::ExplicitKeyword) || Tok.is(MMToken::ModuleKeyword) ||
+ Tok.is(MMToken::FrameworkKeyword));
+ // Parse 'explicit' or 'framework' keyword, if present.
+ SourceLocation ExplicitLoc;
+ bool Explicit = false;
+ bool Framework = false;
+
+ // Parse 'explicit' keyword, if present.
+ if (Tok.is(MMToken::ExplicitKeyword)) {
+ ExplicitLoc = consumeToken();
+ Explicit = true;
+ }
+
+ // Parse 'framework' keyword, if present.
+ if (Tok.is(MMToken::FrameworkKeyword)) {
+ consumeToken();
+ Framework = true;
+ }
+
+ // Parse 'module' keyword.
+ if (!Tok.is(MMToken::ModuleKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ consumeToken();
+ HadError = true;
+ return;
+ }
+ consumeToken(); // 'module' keyword
+
+ // If we have a wildcard for the module name, this is an inferred submodule.
+ // Parse it.
+ if (Tok.is(MMToken::Star))
+ return parseInferredSubmoduleDecl(Explicit);
+
+ // Parse the module name.
+ ModuleId Id;
+ if (parseModuleId(Id)) {
+ HadError = true;
+ return;
+ }
+
+ if (ActiveModule) {
+ if (Id.size() > 1) {
+ Diags.Report(Id.front().second, diag::err_mmap_nested_submodule_id)
+ << SourceRange(Id.front().second, Id.back().second);
+
+ HadError = true;
+ return;
+ }
+ } else if (Id.size() == 1 && Explicit) {
+ // Top-level modules can't be explicit.
+ Diags.Report(ExplicitLoc, diag::err_mmap_explicit_top_level);
+ Explicit = false;
+ ExplicitLoc = SourceLocation();
+ HadError = true;
+ }
+
+ Module *PreviousActiveModule = ActiveModule;
+ if (Id.size() > 1) {
+ // This module map defines a submodule. Go find the module of which it
+ // is a submodule.
+ ActiveModule = 0;
+ for (unsigned I = 0, N = Id.size() - 1; I != N; ++I) {
+ if (Module *Next = Map.lookupModuleQualified(Id[I].first, ActiveModule)) {
+ ActiveModule = Next;
+ continue;
+ }
+
+ if (ActiveModule) {
+ Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
+ << Id[I].first << ActiveModule->getTopLevelModule();
+ } else {
+ Diags.Report(Id[I].second, diag::err_mmap_expected_module_name);
+ }
+ HadError = true;
+ return;
+ }
+ }
+
+ StringRef ModuleName = Id.back().first;
+ SourceLocation ModuleNameLoc = Id.back().second;
+
+ // Parse the optional attribute list.
+ bool IsSystem = false;
+ while (Tok.is(MMToken::LSquare)) {
+ // Consume the '['.
+ SourceLocation LSquareLoc = consumeToken();
+
+ // Check whether we have an attribute name here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_attribute);
+ skipUntil(MMToken::RSquare);
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ continue;
+ }
+
+ // Decode the attribute name.
+ AttributeKind Attribute
+ = llvm::StringSwitch<AttributeKind>(Tok.getString())
+ .Case("system", AT_system)
+ .Default(AT_unknown);
+ switch (Attribute) {
+ case AT_unknown:
+ Diags.Report(Tok.getLocation(), diag::warn_mmap_unknown_attribute)
+ << Tok.getString();
+ break;
+
+ case AT_system:
+ IsSystem = true;
+ break;
+ }
+ consumeToken();
+
+ // Consume the ']'.
+ if (!Tok.is(MMToken::RSquare)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rsquare);
+ Diags.Report(LSquareLoc, diag::note_mmap_lsquare_match);
+ skipUntil(MMToken::RSquare);
+ }
+
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ }
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace)
+ << ModuleName;
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Determine whether this (sub)module has already been defined.
+ if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
+ if (Existing->DefinitionLoc.isInvalid() && !ActiveModule) {
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+ return;
+ }
+
+ Diags.Report(ModuleNameLoc, diag::err_mmap_module_redefinition)
+ << ModuleName;
+ Diags.Report(Existing->DefinitionLoc, diag::note_mmap_prev_definition);
+
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+
+ HadError = true;
+ return;
+ }
+
+ // Start defining this module.
+ ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
+ Explicit).first;
+ ActiveModule->DefinitionLoc = ModuleNameLoc;
+ if (IsSystem)
+ ActiveModule->IsSystem = true;
+
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::FrameworkKeyword:
+ case MMToken::ModuleKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::ExportKeyword:
+ parseExportDecl();
+ break;
+
+ case MMToken::RequiresKeyword:
+ parseRequiresDecl();
+ break;
+
+ case MMToken::UmbrellaKeyword: {
+ SourceLocation UmbrellaLoc = consumeToken();
+ if (Tok.is(MMToken::HeaderKeyword))
+ parseHeaderDecl(UmbrellaLoc);
+ else
+ parseUmbrellaDirDecl(UmbrellaLoc);
+ break;
+ }
+
+ case MMToken::HeaderKeyword:
+ parseHeaderDecl(SourceLocation());
+ break;
+
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_member);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+
+ // We're done parsing this module. Pop back to the previous module.
+ ActiveModule = PreviousActiveModule;
+}
+
+/// \brief Parse a requires declaration.
+///
+/// requires-declaration:
+/// 'requires' feature-list
+///
+/// feature-list:
+/// identifier ',' feature-list
+/// identifier
+void ModuleMapParser::parseRequiresDecl() {
+ assert(Tok.is(MMToken::RequiresKeyword));
+
+ // Parse 'requires' keyword.
+ consumeToken();
+
+ // Parse the feature-list.
+ do {
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_feature);
+ HadError = true;
+ return;
+ }
+
+ // Consume the feature name.
+ std::string Feature = Tok.getString();
+ consumeToken();
+
+ // Add this feature.
+ ActiveModule->addRequirement(Feature, Map.LangOpts, *Map.Target);
+
+ if (!Tok.is(MMToken::Comma))
+ break;
+
+ // Consume the comma.
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Append to \p Paths the set of paths needed to get to the
+/// subframework in which the given module lives.
+static void appendSubframeworkPaths(Module *Mod,
+ llvm::SmallVectorImpl<char> &Path) {
+ // Collect the framework names from the given module to the top-level module.
+ llvm::SmallVector<StringRef, 2> Paths;
+ for (; Mod; Mod = Mod->Parent) {
+ if (Mod->IsFramework)
+ Paths.push_back(Mod->Name);
+ }
+
+ if (Paths.empty())
+ return;
+
+ // Add Frameworks/Name.framework for each subframework.
+ for (unsigned I = Paths.size() - 1; I != 0; --I) {
+ llvm::sys::path::append(Path, "Frameworks");
+ llvm::sys::path::append(Path, Paths[I-1] + ".framework");
+ }
+}
+
+/// \brief Determine whether the given file name is the name of a builtin
+/// header, supplied by Clang to replace, override, or augment existing system
+/// headers.
+static bool isBuiltinHeader(StringRef FileName) {
+ return llvm::StringSwitch<bool>(FileName)
+ .Case("float.h", true)
+ .Case("iso646.h", true)
+ .Case("limits.h", true)
+ .Case("stdalign.h", true)
+ .Case("stdarg.h", true)
+ .Case("stdbool.h", true)
+ .Case("stddef.h", true)
+ .Case("stdint.h", true)
+ .Case("tgmath.h", true)
+ .Case("unwind.h", true)
+ .Default(false);
+}
+
+/// \brief Parse a header declaration.
+///
+/// header-declaration:
+/// 'umbrella'[opt] 'header' string-literal
+void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc) {
+ assert(Tok.is(MMToken::HeaderKeyword));
+ consumeToken();
+
+ bool Umbrella = UmbrellaLoc.isValid();
+
+ // Parse the header name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "header";
+ HadError = true;
+ return;
+ }
+ std::string FileName = Tok.getString();
+ SourceLocation FileNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (Umbrella && ActiveModule->Umbrella) {
+ Diags.Report(FileNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const FileEntry *File = 0;
+ const FileEntry *BuiltinFile = 0;
+ SmallString<128> PathName;
+ if (llvm::sys::path::is_absolute(FileName)) {
+ PathName = FileName;
+ File = SourceMgr.getFileManager().getFile(PathName);
+ } else if (const DirectoryEntry *Dir = getOverriddenHeaderSearchDir()) {
+ PathName = Dir->getName();
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+ } else {
+ // Search for the header file within the search directory.
+ PathName = Directory->getName();
+ unsigned PathLength = PathName.size();
+
+ if (ActiveModule->isPartOfFramework()) {
+ appendSubframeworkPaths(ActiveModule, PathName);
+
+ // Check whether this file is in the public headers.
+ llvm::sys::path::append(PathName, "Headers");
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+
+ if (!File) {
+ // Check whether this file is in the private headers.
+ PathName.resize(PathLength);
+ llvm::sys::path::append(PathName, "PrivateHeaders");
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+ }
+ } else {
+ // Lookup for normal headers.
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+
+ // If this is a system module with a top-level header, this header
+ // may have a counterpart (or replacement) in the set of headers
+ // supplied by Clang. Find that builtin header.
+ if (ActiveModule->IsSystem && !Umbrella && BuiltinIncludeDir &&
+ BuiltinIncludeDir != Directory && isBuiltinHeader(FileName)) {
+ SmallString<128> BuiltinPathName(BuiltinIncludeDir->getName());
+ llvm::sys::path::append(BuiltinPathName, FileName);
+ BuiltinFile = SourceMgr.getFileManager().getFile(BuiltinPathName);
+
+ // If Clang supplies this header but the underlying system does not,
+ // just silently swap in our builtin version. Otherwise, we'll end
+ // up adding both (later).
+ if (!File && BuiltinFile) {
+ File = BuiltinFile;
+ BuiltinFile = 0;
+ }
+ }
+ }
+ }
+
+ // FIXME: We shouldn't be eagerly stat'ing every file named in a module map.
+ // Come up with a lazy way to do this.
+ if (File) {
+ if (const Module *OwningModule = Map.Headers[File]) {
+ Diags.Report(FileNameLoc, diag::err_mmap_header_conflict)
+ << FileName << OwningModule->getFullModuleName();
+ HadError = true;
+ } else if (Umbrella) {
+ const DirectoryEntry *UmbrellaDir = File->getDir();
+ if ((OwningModule = Map.UmbrellaDirs[UmbrellaDir])) {
+ Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
+ << OwningModule->getFullModuleName();
+ HadError = true;
+ } else {
+ // Record this umbrella header.
+ Map.setUmbrellaHeader(ActiveModule, File);
+ }
+ } else {
+ // Record this header.
+ Map.addHeader(ActiveModule, File);
+
+ // If there is a builtin counterpart to this file, add it now.
+ if (BuiltinFile)
+ Map.addHeader(ActiveModule, BuiltinFile);
+ }
+ } else {
+ Diags.Report(FileNameLoc, diag::err_mmap_header_not_found)
+ << Umbrella << FileName;
+ HadError = true;
+ }
+}
+
+/// \brief Parse an umbrella directory declaration.
+///
+/// umbrella-dir-declaration:
+/// umbrella string-literal
+void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
+ // Parse the directory name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "umbrella";
+ HadError = true;
+ return;
+ }
+
+ std::string DirName = Tok.getString();
+ SourceLocation DirNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (ActiveModule->Umbrella) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const DirectoryEntry *Dir = 0;
+ if (llvm::sys::path::is_absolute(DirName))
+ Dir = SourceMgr.getFileManager().getDirectory(DirName);
+ else {
+ SmallString<128> PathName;
+ PathName = Directory->getName();
+ llvm::sys::path::append(PathName, DirName);
+ Dir = SourceMgr.getFileManager().getDirectory(PathName);
+ }
+
+ if (!Dir) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_dir_not_found)
+ << DirName;
+ HadError = true;
+ return;
+ }
+
+ if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
+ << OwningModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Record this umbrella directory.
+ Map.setUmbrellaDir(ActiveModule, Dir);
+}
+
+/// \brief Parse a module export declaration.
+///
+/// export-declaration:
+/// 'export' wildcard-module-id
+///
+/// wildcard-module-id:
+/// identifier
+/// '*'
+/// identifier '.' wildcard-module-id
+void ModuleMapParser::parseExportDecl() {
+ assert(Tok.is(MMToken::ExportKeyword));
+ SourceLocation ExportLoc = consumeToken();
+
+ // Parse the module-id with an optional wildcard at the end.
+ ModuleId ParsedModuleId;
+ bool Wildcard = false;
+ do {
+ if (Tok.is(MMToken::Identifier)) {
+ ParsedModuleId.push_back(std::make_pair(Tok.getString(),
+ Tok.getLocation()));
+ consumeToken();
+
+ if (Tok.is(MMToken::Period)) {
+ consumeToken();
+ continue;
+ }
+
+ break;
+ }
+
+ if(Tok.is(MMToken::Star)) {
+ Wildcard = true;
+ consumeToken();
+ break;
+ }
+
+ Diags.Report(Tok.getLocation(), diag::err_mmap_export_module_id);
+ HadError = true;
+ return;
+ } while (true);
+
+ Module::UnresolvedExportDecl Unresolved = {
+ ExportLoc, ParsedModuleId, Wildcard
+ };
+ ActiveModule->UnresolvedExports.push_back(Unresolved);
+}
+
+void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
+ assert(Tok.is(MMToken::Star));
+ SourceLocation StarLoc = consumeToken();
+ bool Failed = false;
+
+ // Inferred modules must be submodules.
+ if (!ActiveModule) {
+ Diags.Report(StarLoc, diag::err_mmap_top_level_inferred_submodule);
+ Failed = true;
+ }
+
+ // Inferred modules must have umbrella directories.
+ if (!Failed && !ActiveModule->getUmbrellaDir()) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
+ Failed = true;
+ }
+
+ // Check for redefinition of an inferred module.
+ if (!Failed && ActiveModule->InferSubmodules) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_redef);
+ if (ActiveModule->InferredSubmoduleLoc.isValid())
+ Diags.Report(ActiveModule->InferredSubmoduleLoc,
+ diag::note_mmap_prev_definition);
+ Failed = true;
+ }
+
+ // If there were any problems with this inferred submodule, skip its body.
+ if (Failed) {
+ if (Tok.is(MMToken::LBrace)) {
+ consumeToken();
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ }
+ HadError = true;
+ return;
+ }
+
+ // Note that we have an inferred submodule.
+ ActiveModule->InferSubmodules = true;
+ ActiveModule->InferredSubmoduleLoc = StarLoc;
+ ActiveModule->InferExplicitSubmodules = Explicit;
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace_wildcard);
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Parse the body of the inferred submodule.
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ExportKeyword: {
+ consumeToken();
+ if (Tok.is(MMToken::Star))
+ ActiveModule->InferExportWildcard = true;
+ else
+ Diags.Report(Tok.getLocation(),
+ diag::err_mmap_expected_export_wildcard);
+ consumeToken();
+ break;
+ }
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::UmbrellaKeyword:
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_wildcard_member);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+}
+
+/// \brief If there is a specific header search directory due the presence
+/// of an umbrella directory, retrieve that directory. Otherwise, returns null.
+const DirectoryEntry *ModuleMapParser::getOverriddenHeaderSearchDir() {
+ for (Module *Mod = ActiveModule; Mod; Mod = Mod->Parent) {
+ // If we have an umbrella directory, use that.
+ if (Mod->hasUmbrellaDir())
+ return Mod->getUmbrellaDir();
+
+ // If we have a framework directory, stop looking.
+ if (Mod->IsFramework)
+ return 0;
+ }
+
+ return 0;
+}
+
+/// \brief Parse a module map file.
+///
+/// module-map-file:
+/// module-declaration*
+bool ModuleMapParser::parseModuleMapFile() {
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return HadError;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::FrameworkKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::Comma:
+ case MMToken::ExportKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::Identifier:
+ case MMToken::LBrace:
+ case MMToken::LSquare:
+ case MMToken::Period:
+ case MMToken::RBrace:
+ case MMToken::RSquare:
+ case MMToken::RequiresKeyword:
+ case MMToken::Star:
+ case MMToken::StringLiteral:
+ case MMToken::UmbrellaKeyword:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ HadError = true;
+ consumeToken();
+ break;
+ }
+ } while (true);
+}
+
+bool ModuleMap::parseModuleMapFile(const FileEntry *File) {
+ assert(Target != 0 && "Missing target information");
+ FileID ID = SourceMgr->createFileID(File, SourceLocation(), SrcMgr::C_User);
+ const llvm::MemoryBuffer *Buffer = SourceMgr->getBuffer(ID);
+ if (!Buffer)
+ return true;
+
+ // Parse this module map file.
+ Lexer L(ID, SourceMgr->getBuffer(ID), *SourceMgr, MMapLangOpts);
+ Diags->getClient()->BeginSourceFile(MMapLangOpts);
+ ModuleMapParser Parser(L, *SourceMgr, *Diags, *this, File->getDir(),
+ BuiltinIncludeDir);
+ bool Result = Parser.parseModuleMapFile();
+ Diags->getClient()->EndSourceFile();
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
index 986341b..6f4c189 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
@@ -42,6 +42,7 @@ void Preprocessor::Backtrack() {
&& "EnableBacktrackAtThisPos was not called!");
CachedLexPos = BacktrackPositions.back();
BacktrackPositions.pop_back();
+ recomputeCurLexerKind();
}
void Preprocessor::CachingLex(Token &Result) {
@@ -56,17 +57,21 @@ void Preprocessor::CachingLex(Token &Result) {
ExitCachingLexMode();
Lex(Result);
- if (!isBacktrackEnabled()) {
+ if (isBacktrackEnabled()) {
+ // Cache the lexed token.
+ EnterCachingLexMode();
+ CachedTokens.push_back(Result);
+ ++CachedLexPos;
+ return;
+ }
+
+ if (CachedLexPos < CachedTokens.size()) {
+ EnterCachingLexMode();
+ } else {
// All cached tokens were consumed.
CachedTokens.clear();
CachedLexPos = 0;
- return;
}
-
- // Cache the lexed token.
- EnterCachingLexMode();
- CachedTokens.push_back(Result);
- ++CachedLexPos;
}
void Preprocessor::EnterCachingLexMode() {
@@ -74,8 +79,7 @@ void Preprocessor::EnterCachingLexMode() {
return;
PushIncludeMacroStack();
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_CachingLexer;
+ CurLexerKind = CLK_CachingLexer;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
new file mode 100644
index 0000000..952b926
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
@@ -0,0 +1,14 @@
+//===--- PPCallbacks.cpp - Callbacks for Preprocessor actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PPCallbacks.h"
+
+using namespace clang;
+
+void PPChainedCallbacks::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
index de50c75..625a204 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -22,6 +22,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -119,8 +120,15 @@ void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
std::string Spelling = getSpelling(MacroNameTok, &Invalid);
if (Invalid)
return;
-
+
const IdentifierInfo &Info = Identifiers.get(Spelling);
+
+ // Allow #defining |and| and friends in microsoft mode.
+ if (Info.isCPlusPlusOperatorKeyword() && getLangOpts().MicrosoftMode) {
+ MacroNameTok.setIdentifierInfo(getIdentifierInfo(Spelling));
+ return;
+ }
+
if (Info.isCPlusPlusOperatorKeyword())
// C++ 2.5p2: Alternative tokens behave the same as its primary token
// except for their spellings.
@@ -173,7 +181,7 @@ void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
// trouble than it is worth to insert /**/ and check that there is no /**/
// in the range also.
FixItHint Hint;
- if ((Features.GNUMode || Features.C99 || Features.CPlusPlus) &&
+ if ((LangOpts.GNUMode || LangOpts.C99 || LangOpts.CPlusPlus) &&
!CurTokenLexer)
Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
@@ -305,9 +313,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
/*foundnonskip*/false,
/*foundelse*/false);
-
- if (Callbacks)
- Callbacks->Endif();
}
} else if (Directive[0] == 'e') {
StringRef Sub = Directive.substr(1);
@@ -320,8 +325,11 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
assert(!InCond && "Can't be skipping if not in a conditional!");
// If we popped the outermost skipping block, we're done skipping!
- if (!CondInfo.WasSkipping)
+ if (!CondInfo.WasSkipping) {
+ if (Callbacks)
+ Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
break;
+ }
} else if (Sub == "lse") { // "else".
// #else directive in a skipping conditional. If not in some other
// skipping conditional, and if #else hasn't already been seen, enter it
@@ -334,14 +342,13 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// Note that we've seen a #else in this conditional.
CondInfo.FoundElse = true;
- if (Callbacks)
- Callbacks->Else();
-
// If the conditional is at the top level, and the #if block wasn't
// entered, enter the #else block now.
if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
CondInfo.FoundNonSkip = true;
CheckEndOfDirective("else");
+ if (Callbacks)
+ Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
break;
} else {
DiscardUntilEndOfDirective(); // C99 6.10p4.
@@ -370,12 +377,13 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
- if (Callbacks)
- Callbacks->Elif(SourceRange(ConditionalBegin, ConditionalEnd));
-
// If this condition is true, enter it!
if (ShouldEnter) {
CondInfo.FoundNonSkip = true;
+ if (Callbacks)
+ Callbacks->Elif(Tok.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd),
+ CondInfo.IfLoc);
break;
}
}
@@ -486,7 +494,8 @@ const FileEntry *Preprocessor::LookupFile(
const DirectoryLookup *&CurDir,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
- StringRef *SuggestedModule) {
+ Module **SuggestedModule,
+ bool SkipCache) {
// If the header lookup mechanism may be relative to the current file, pass in
// info about where the current file is.
const FileEntry *CurFileEnt = 0;
@@ -510,7 +519,7 @@ const FileEntry *Preprocessor::LookupFile(
CurDir = CurDirLookup;
const FileEntry *FE = HeaderInfo.LookupFile(
Filename, isAngled, FromDir, CurDir, CurFileEnt,
- SearchPath, RelativePath, SuggestedModule);
+ SearchPath, RelativePath, SuggestedModule, SkipCache);
if (FE) return FE;
// Otherwise, see if this is a subframework header. If so, this is relative
@@ -575,9 +584,25 @@ void Preprocessor::HandleDirective(Token &Result) {
// A(abc
// #warning blah
// def)
- // If so, the user is relying on non-portable behavior, emit a diagnostic.
- if (InMacroArgs)
+ // If so, the user is relying on undefined behavior, emit a diagnostic. Do
+ // not support this for #include-like directives, since that can result in
+ // terrible diagnostics, and does not work in GCC.
+ if (InMacroArgs) {
+ if (IdentifierInfo *II = Result.getIdentifierInfo()) {
+ switch (II->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next:
+ case tok::pp___include_macros:
+ Diag(Result, diag::err_embedded_include) << II->getName();
+ DiscardUntilEndOfDirective();
+ return;
+ default:
+ break;
+ }
+ }
Diag(Result, diag::ext_embedded_directive);
+ }
TryAgain:
switch (Result.getKind()) {
@@ -594,7 +619,7 @@ TryAgain:
setCodeCompletionReached();
return;
case tok::numeric_constant: // # 7 GNU line marker directive.
- if (getLangOptions().AsmPreprocessor)
+ if (getLangOpts().AsmPreprocessor)
break; // # 4 is not a preprocessor directive in .S files.
return HandleDigitDirective(Result);
default:
@@ -664,8 +689,15 @@ TryAgain:
//isExtension = true; // FIXME: implement #unassert
break;
- case tok::pp___export_macro__:
- return HandleMacroExportDirective(Result);
+ case tok::pp___public_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPublicDirective(Result);
+ break;
+
+ case tok::pp___private_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPrivateDirective(Result);
+ break;
}
break;
}
@@ -674,7 +706,7 @@ TryAgain:
// directives. This is important because # may be a comment or introduce
// various pseudo-ops. Just return the # token and push back the following
// token to be lexed next time.
- if (getLangOptions().AsmPreprocessor) {
+ if (getLangOpts().AsmPreprocessor) {
Token *Toks = new Token[2];
// Return the # and the token after it.
Toks[0] = SavedHash;
@@ -713,7 +745,7 @@ static bool GetLineValue(Token &DigitTok, unsigned &Val,
return true;
}
- llvm::SmallString<64> IntegerBuffer;
+ SmallString<64> IntegerBuffer;
IntegerBuffer.resize(DigitTok.getLength());
const char *DigitTokBegin = &IntegerBuffer[0];
bool Invalid = false;
@@ -773,11 +805,11 @@ void Preprocessor::HandleLineDirective(Token &Tok) {
// Enforce C99 6.10.4p3: "The digit sequence shall not specify ... a
// number greater than 2147483647". C90 requires that the line # be <= 32767.
unsigned LineLimit = 32768U;
- if (Features.C99 || Features.CPlusPlus0x)
+ if (LangOpts.C99 || LangOpts.CPlusPlus0x)
LineLimit = 2147483648U;
if (LineNo >= LineLimit)
Diag(DigitTok, diag::ext_pp_line_too_big) << LineLimit;
- else if (Features.CPlusPlus0x && LineNo >= 32768U)
+ else if (LangOpts.CPlusPlus0x && LineNo >= 32768U)
Diag(DigitTok, diag::warn_cxx98_compat_pp_line_too_big);
int FilenameID = -1;
@@ -790,8 +822,10 @@ void Preprocessor::HandleLineDirective(Token &Tok) {
; // ok
else if (StrTok.isNot(tok::string_literal)) {
Diag(StrTok, diag::err_pp_line_invalid_filename);
- DiscardUntilEndOfDirective();
- return;
+ return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(&StrTok, 1, *this);
@@ -925,6 +959,9 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
else if (StrTok.isNot(tok::string_literal)) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(&StrTok, 1, *this);
@@ -982,10 +1019,18 @@ void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
// collapse multiple consequtive white space between tokens, but this isn't
// specified by the standard.
std::string Message = CurLexer->ReadToEndOfLine();
+
+ // Find the first non-whitespace character, so that we can make the
+ // diagnostic more succinct.
+ StringRef Msg(Message);
+ size_t i = Msg.find_first_not_of(' ');
+ if (i < Msg.size())
+ Msg = Msg.substr(i);
+
if (isWarning)
- Diag(Tok, diag::pp_hash_warning) << Message;
+ Diag(Tok, diag::pp_hash_warning) << Msg;
else
- Diag(Tok, diag::err_pp_hash_error) << Message;
+ Diag(Tok, diag::err_pp_hash_error) << Msg;
}
/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
@@ -1007,6 +1052,11 @@ void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
return;
}
+ if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ }
+
// Verify that there is nothing after the string, other than EOD.
CheckEndOfDirective("ident");
@@ -1018,8 +1068,8 @@ void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
}
}
-/// \brief Handle a #__export_macro__ directive.
-void Preprocessor::HandleMacroExportDirective(Token &Tok) {
+/// \brief Handle a #public directive.
+void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
Token MacroNameTok;
ReadMacroName(MacroNameTok, 2);
@@ -1027,21 +1077,52 @@ void Preprocessor::HandleMacroExportDirective(Token &Tok) {
if (MacroNameTok.is(tok::eod))
return;
- // Check to see if this is the last token on the #__export_macro__ line.
- CheckEndOfDirective("__export_macro__");
+ // Check to see if this is the last token on the #__public_macro line.
+ CheckEndOfDirective("__public_macro");
// Okay, we finally have a valid identifier to undef.
MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
// If the macro is not defined, this is an error.
if (MI == 0) {
- Diag(MacroNameTok, diag::err_pp_export_non_macro)
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro)
<< MacroNameTok.getIdentifierInfo();
return;
}
// Note that this macro has now been exported.
- MI->setExportLocation(MacroNameTok.getLocation());
+ MI->setVisibility(/*IsPublic=*/true, MacroNameTok.getLocation());
+
+ // If this macro definition came from a PCH file, mark it
+ // as having changed since serialization.
+ if (MI->isFromAST())
+ MI->setChangedAfterLoad();
+}
+
+/// \brief Handle a #private directive.
+void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 2);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #__private_macro line.
+ CheckEndOfDirective("__private_macro");
+
+ // Okay, we finally have a valid identifier to undef.
+ MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+
+ // If the macro is not defined, this is an error.
+ if (MI == 0) {
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro)
+ << MacroNameTok.getIdentifierInfo();
+ return;
+ }
+
+ // Note that this macro has now been marked private.
+ MI->setVisibility(/*IsPublic=*/false, MacroNameTok.getLocation());
// If this macro definition came from a PCH file, mark it
// as having changed since serialization.
@@ -1109,7 +1190,7 @@ bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
/// false if the > was found, otherwise it returns true if it finds and consumes
/// the EOD marker.
bool Preprocessor::ConcatenateIncludeName(
- llvm::SmallString<128> &FilenameBuffer,
+ SmallString<128> &FilenameBuffer,
SourceLocation &End) {
Token CurTok;
@@ -1171,9 +1252,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
CurPPLexer->LexIncludeFilename(FilenameTok);
// Reserve a buffer to get the spelling.
- llvm::SmallString<128> FilenameBuffer;
+ SmallString<128> FilenameBuffer;
StringRef Filename;
SourceLocation End;
+ SourceLocation CharEnd; // the end of this directive, in characters
switch (FilenameTok.getKind()) {
case tok::eod:
@@ -1184,6 +1266,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
case tok::string_literal:
Filename = getSpelling(FilenameTok, FilenameBuffer);
End = FilenameTok.getLocation();
+ CharEnd = End.getLocWithOffset(Filename.size());
break;
case tok::less:
@@ -1193,6 +1276,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (ConcatenateIncludeName(FilenameBuffer, End))
return; // Found <eod> but no ">"? Diagnostic already emitted.
Filename = FilenameBuffer.str();
+ CharEnd = getLocForEndOfToken(End);
break;
default:
Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
@@ -1200,6 +1284,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
return;
}
+ StringRef OriginalFilename = Filename;
bool isAngled =
GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
@@ -1230,38 +1315,128 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
PragmaARCCFCodeAuditedLoc = SourceLocation();
}
+ if (HeaderInfo.HasIncludeAliasMap()) {
+ // Map the filename with the brackets still attached. If the name doesn't
+ // map to anything, fall back on the filename we've already gotten the
+ // spelling for.
+ StringRef NewName = HeaderInfo.MapHeaderToIncludeAlias(OriginalFilename);
+ if (!NewName.empty())
+ Filename = NewName;
+ }
+
// Search include directories.
const DirectoryLookup *CurDir;
- llvm::SmallString<1024> SearchPath;
- llvm::SmallString<1024> RelativePath;
+ SmallString<1024> SearchPath;
+ SmallString<1024> RelativePath;
// We get the raw path only if we have 'Callbacks' to which we later pass
// the path.
- StringRef SuggestedModule;
+ Module *SuggestedModule = 0;
const FileEntry *File = LookupFile(
Filename, isAngled, LookupFrom, CurDir,
Callbacks ? &SearchPath : NULL, Callbacks ? &RelativePath : NULL,
- AutoModuleImport? &SuggestedModule : 0);
+ getLangOpts().Modules? &SuggestedModule : 0);
- // If we are supposed to import a module rather than including the header,
- // do so now.
- if (!SuggestedModule.empty()) {
- TheModuleLoader.loadModule(IncludeTok.getLocation(),
- Identifiers.get(SuggestedModule),
- FilenameTok.getLocation());
- return;
- }
-
- // Notify the callback object that we've seen an inclusion directive.
- if (Callbacks)
+ if (Callbacks) {
+ if (!File) {
+ // Give the clients a chance to recover.
+ SmallString<128> RecoveryPath;
+ if (Callbacks->FileNotFound(Filename, RecoveryPath)) {
+ if (const DirectoryEntry *DE = FileMgr.getDirectory(RecoveryPath)) {
+ // Add the recovery path to the list of search paths.
+ DirectoryLookup DL(DE, SrcMgr::C_User, true, false);
+ HeaderInfo.AddSearchPath(DL, isAngled);
+
+ // Try the lookup again, skipping the cache.
+ File = LookupFile(Filename, isAngled, LookupFrom, CurDir, 0, 0,
+ getLangOpts().Modules? &SuggestedModule : 0,
+ /*SkipCache*/true);
+ }
+ }
+ }
+
+ // Notify the callback object that we've seen an inclusion directive.
Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, File,
End, SearchPath, RelativePath);
-
+ }
+
if (File == 0) {
if (!SuppressIncludeNotFoundError)
Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
return;
}
+ // If we are supposed to import a module rather than including the header,
+ // do so now.
+ if (SuggestedModule) {
+ // Compute the module access path corresponding to this module.
+ // FIXME: Should we have a second loadModule() overload to avoid this
+ // extra lookup step?
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ for (Module *Mod = SuggestedModule; Mod; Mod = Mod->Parent)
+ Path.push_back(std::make_pair(getIdentifierInfo(Mod->Name),
+ FilenameTok.getLocation()));
+ std::reverse(Path.begin(), Path.end());
+
+ // Warn that we're replacing the include/import with a module import.
+ SmallString<128> PathString;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (I)
+ PathString += '.';
+ PathString += Path[I].first->getName();
+ }
+ int IncludeKind = 0;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ IncludeKind = 0;
+ break;
+
+ case tok::pp_import:
+ IncludeKind = 1;
+ break;
+
+ case tok::pp_include_next:
+ IncludeKind = 2;
+ break;
+
+ case tok::pp___include_macros:
+ IncludeKind = 3;
+ break;
+
+ default:
+ llvm_unreachable("unknown include directive kind");
+ }
+
+ // Determine whether we are actually building the module that this
+ // include directive maps to.
+ bool BuildingImportedModule
+ = Path[0].first->getName() == getLangOpts().CurrentModule;
+
+ if (!BuildingImportedModule && getLangOpts().ObjC2) {
+ // If we're not building the imported module, warn that we're going
+ // to automatically turn this inclusion directive into a module import.
+ // We only do this in Objective-C, where we have a module-import syntax.
+ CharSourceRange ReplaceRange(SourceRange(HashLoc, CharEnd),
+ /*IsTokenRange=*/false);
+ Diag(HashLoc, diag::warn_auto_module_import)
+ << IncludeKind << PathString
+ << FixItHint::CreateReplacement(ReplaceRange,
+ "@__experimental_modules_import " + PathString.str().str() + ";");
+ }
+
+ // Load the module.
+ // If this was an #__include_macros directive, only make macros visible.
+ Module::NameVisibilityKind Visibility
+ = (IncludeKind == 3)? Module::MacrosVisible : Module::AllVisible;
+ Module *Imported
+ = TheModuleLoader.loadModule(IncludeTok.getLocation(), Path, Visibility,
+ /*IsIncludeDirective=*/true);
+
+ // If this header isn't part of the module we're building, we're done.
+ if (!BuildingImportedModule && Imported)
+ return;
+ }
+
// The #included file will be considered to be a system header if either it is
// in a system include directory, or if the #includer is a system include
// header.
@@ -1278,8 +1453,12 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
// Look up the file, create a File ID for it.
- FileID FID = SourceMgr.createFileID(File, FilenameTok.getLocation(),
- FileCharacter);
+ SourceLocation IncludePos = End;
+ // If the filename string was the result of macro expansions, set the include
+ // position on the file where it will be included and after the expansions.
+ if (IncludePos.isMacroID())
+ IncludePos = SourceMgr.getExpansionRange(IncludePos).second;
+ FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
assert(!FID.isInvalid() && "Expected valid file ID");
// Finally, if all is good, enter the new file!
@@ -1309,13 +1488,29 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup);
}
+/// HandleMicrosoftImportDirective - Implements #import for Microsoft Mode
+void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
+ // The Microsoft #import directive takes a type library and generates header
+ // files from it, and includes those. This is beyond the scope of what clang
+ // does, so we ignore it and error out. However, #import can optionally have
+ // trailing attributes that span multiple lines. We're going to eat those
+ // so we can continue processing from there.
+ Diag(Tok, diag::err_pp_import_directive_ms );
+
+ // Read tokens until we get to the end of the directive. Note that the
+ // directive can be split over multiple lines using the backslash character.
+ DiscardUntilEndOfDirective();
+}
+
/// HandleImportDirective - Implements #import.
///
void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
Token &ImportTok) {
- if (!Features.ObjC1) // #import is standard for ObjC.
+ if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (LangOpts.MicrosoftMode)
+ return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
-
+ }
return HandleIncludeDirective(HashLoc, ImportTok, 0, true);
}
@@ -1354,10 +1549,9 @@ void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
/// definition has just been read. Lex the rest of the arguments and the
/// closing ), updating MI with what we learn. Return true if an error occurs
/// parsing the arg list.
-bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
+bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
SmallVector<IdentifierInfo*, 32> Arguments;
- Token Tok;
while (1) {
LexUnexpandedToken(Tok);
switch (Tok.getKind()) {
@@ -1369,8 +1563,8 @@ bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
Diag(Tok, diag::err_pp_expected_ident_in_arg_list);
return true;
case tok::ellipsis: // #define X(... -> C99 varargs
- if (!Features.C99)
- Diag(Tok, Features.CPlusPlus0x ?
+ if (!LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus0x ?
diag::warn_cxx98_compat_variadic_macro :
diag::ext_variadic_macro);
@@ -1476,7 +1670,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
} else if (Tok.is(tok::l_paren)) {
// This is a function-like macro definition. Read the argument list.
MI->setIsFunctionLike();
- if (ReadMacroDefinitionArgList(MI)) {
+ if (ReadMacroDefinitionArgList(MI, LastTok)) {
// Forget about MI.
ReleaseMacroInfo(MI);
// Throw away the rest of the line.
@@ -1496,7 +1690,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
// Read the first token after the arg list for down below.
LexUnexpandedToken(Tok);
- } else if (Features.C99 || Features.CPlusPlus0x) {
+ } else if (LangOpts.C99 || LangOpts.CPlusPlus0x) {
// C99 requires whitespace between the macro definition and the body. Emit
// a diagnostic for something like "#define X+".
Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
@@ -1561,7 +1755,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok) {
// the '#' because '#' is often a comment character. However, change
// the kind of the token to tok::unknown so that the preprocessor isn't
// confused.
- if (getLangOptions().AsmPreprocessor && Tok.isNot(tok::eod)) {
+ if (getLangOpts().AsmPreprocessor && Tok.isNot(tok::eod)) {
LastTok.setKind(tok::unknown);
} else {
Diag(Tok, diag::err_pp_stringize_not_parameter);
@@ -1732,6 +1926,13 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
if (MI) // Mark it used.
markMacroAsUsed(MI);
+ if (Callbacks) {
+ if (isIfndef)
+ Callbacks->Ifndef(DirectiveTok.getLocation(), MacroNameTok);
+ else
+ Callbacks->Ifdef(DirectiveTok.getLocation(), MacroNameTok);
+ }
+
// Should we include the stuff contained by this directive?
if (!MI == isIfndef) {
// Yes, remember that we are inside a conditional, then lex the next token.
@@ -1744,13 +1945,6 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
/*Foundnonskip*/false,
/*FoundElse*/false);
}
-
- if (Callbacks) {
- if (isIfndef)
- Callbacks->Ifndef(MacroNameTok);
- else
- Callbacks->Ifdef(MacroNameTok);
- }
}
/// HandleIfDirective - Implements the #if directive.
@@ -1774,6 +1968,10 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
CurPPLexer->MIOpt.EnterTopLevelConditional();
}
+ if (Callbacks)
+ Callbacks->If(IfToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd));
+
// Should we include the stuff contained by this directive?
if (ConditionalTrue) {
// Yes, remember that we are inside a conditional, then lex the next token.
@@ -1784,9 +1982,6 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
/*FoundElse*/false);
}
-
- if (Callbacks)
- Callbacks->If(SourceRange(ConditionalBegin, ConditionalEnd));
}
/// HandleEndifDirective - Implements the #endif directive.
@@ -1812,7 +2007,7 @@ void Preprocessor::HandleEndifDirective(Token &EndifToken) {
"This code should only be reachable in the non-skipping case!");
if (Callbacks)
- Callbacks->Endif();
+ Callbacks->Endif(EndifToken.getLocation(), CondInfo.IfLoc);
}
/// HandleElseDirective - Implements the #else directive.
@@ -1836,12 +2031,12 @@ void Preprocessor::HandleElseDirective(Token &Result) {
// If this is a #else with a #else before it, report the error.
if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
+ if (Callbacks)
+ Callbacks->Else(Result.getLocation(), CI.IfLoc);
+
// Finally, skip the rest of the contents of this block.
SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
/*FoundElse*/true, Result.getLocation());
-
- if (Callbacks)
- Callbacks->Else();
}
/// HandleElifDirective - Implements the #elif directive.
@@ -1868,12 +2063,13 @@ void Preprocessor::HandleElifDirective(Token &ElifToken) {
// If this is a #elif with a #else before it, report the error.
if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
+
+ if (Callbacks)
+ Callbacks->Elif(ElifToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd), CI.IfLoc);
// Finally, skip the rest of the contents of this block.
SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
/*FoundElse*/CI.FoundElse,
ElifToken.getLocation());
-
- if (Callbacks)
- Callbacks->Elif(SourceRange(ConditionalBegin, ConditionalEnd));
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
index 20f624a..7cac63e 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
@@ -197,7 +197,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
return true;
case tok::numeric_constant: {
- llvm::SmallString<64> IntegerBuffer;
+ SmallString<64> IntegerBuffer;
bool NumberInvalid = false;
StringRef Spelling = PP.getSpelling(PeekTok, IntegerBuffer,
&NumberInvalid);
@@ -215,9 +215,13 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
}
assert(Literal.isIntegerLiteral() && "Unknown ppnumber");
+ // Complain about, and drop, any ud-suffix.
+ if (Literal.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*integer*/1;
+
// long long is a C99 feature.
- if (!PP.getLangOptions().C99 && Literal.isLongLong)
- PP.Diag(PeekTok, PP.getLangOptions().CPlusPlus0x ?
+ if (!PP.getLangOpts().C99 && Literal.isLongLong)
+ PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus0x ?
diag::warn_cxx98_compat_longlong : diag::ext_longlong);
// Parse the integer literal into Result.
@@ -251,7 +255,11 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
case tok::wide_char_constant: { // L'x'
case tok::utf16_char_constant: // u'x'
case tok::utf32_char_constant: // U'x'
- llvm::SmallString<32> CharBuffer;
+ // Complain about, and drop, any ud-suffix.
+ if (PeekTok.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*character*/0;
+
+ SmallString<32> CharBuffer;
bool CharInvalid = false;
StringRef ThisTok = PP.getSpelling(PeekTok, CharBuffer, &CharInvalid);
if (CharInvalid)
@@ -282,7 +290,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
Val = Literal.getValue();
// Set the signedness. UTF-16 and UTF-32 are always unsigned
if (!Literal.isUTF16() && !Literal.isUTF32())
- Val.setIsUnsigned(!PP.getLangOptions().CharIsSigned);
+ Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned);
if (Result.Val.getBitWidth() > Val.getBitWidth()) {
Result.Val = Val.extend(Result.Val.getBitWidth());
@@ -646,7 +654,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
case tok::comma:
// Comma is invalid in pp expressions in c89/c++ mode, but is valid in C99
// if not being evaluated.
- if (!PP.getLangOptions().C99 || ValueLive)
+ if (!PP.getLangOpts().C99 || ValueLive)
PP.Diag(OpLoc, diag::ext_pp_comma_expr)
<< LHS.getRange() << RHS.getRange();
Res = RHS.Val; // LHS = LHS,RHS -> RHS.
@@ -703,8 +711,6 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
LHS.Val = Res;
LHS.setEnd(RHS.getRange().getEnd());
}
-
- return false;
}
/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
index 25a98ae..b6689df 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -16,8 +16,12 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/ADT/StringSwitch.h"
using namespace clang;
PPCallbacks::~PPCallbacks() {}
@@ -198,6 +202,31 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
CurLexerKind = CLK_TokenLexer;
}
+/// \brief Compute the relative path that names the given file relative to
+/// the given directory.
+static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
+ const FileEntry *File,
+ SmallString<128> &Result) {
+ Result.clear();
+
+ StringRef FilePath = File->getDir()->getName();
+ StringRef Path = FilePath;
+ while (!Path.empty()) {
+ if (const DirectoryEntry *CurDir = FM.getDirectory(Path)) {
+ if (CurDir == Dir) {
+ Result = FilePath.substr(Path.size());
+ llvm::sys::path::append(Result,
+ llvm::sys::path::filename(File->getName()));
+ return;
+ }
+ }
+
+ Path = llvm::sys::path::parent_path(Path);
+ }
+
+ Result = File->getName();
+}
+
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
@@ -216,8 +245,11 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
}
- // Complain about reaching an EOF within arc_cf_code_audited.
- if (PragmaARCCFCodeAuditedLoc.isValid()) {
+ // Complain about reaching a true EOF within arc_cf_code_audited.
+ // We don't want to complain about reaching the end of a macro
+ // instantiation or a _Pragma.
+ if (PragmaARCCFCodeAuditedLoc.isValid() &&
+ !isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
Diag(PragmaARCCFCodeAuditedLoc, diag::err_pp_eof_in_arc_cf_code_audited);
// Recover by leaving immediately.
@@ -296,15 +328,17 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
CurLexer->BufferPtr = EndPos;
CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
- // We're done with the #included file.
- CurLexer.reset();
+ if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
} else {
assert(CurPTHLexer && "Got EOF but no current lexer set!");
CurPTHLexer->getEOF(Result);
CurPTHLexer.reset();
}
-
- CurPPLexer = 0;
+
+ if (!isIncrementalProcessingEnabled())
+ CurPPLexer = 0;
// This is the end of the top-level file. 'WarnUnusedMacroLocs' has collected
// all macro locations that we need to warn because they are not used.
@@ -312,6 +346,48 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end(); I!=E; ++I)
Diag(*I, diag::pp_macro_not_used);
+ // If we are building a module that has an umbrella header, make sure that
+ // each of the headers within the directory covered by the umbrella header
+ // was actually included by the umbrella header.
+ if (Module *Mod = getCurrentModule()) {
+ if (Mod->getUmbrellaHeader()) {
+ SourceLocation StartLoc
+ = SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+
+ if (getDiagnostics().getDiagnosticLevel(
+ diag::warn_uncovered_module_header,
+ StartLoc) != DiagnosticsEngine::Ignored) {
+ ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
+ typedef llvm::sys::fs::recursive_directory_iterator
+ recursive_directory_iterator;
+ const DirectoryEntry *Dir = Mod->getUmbrellaDir();
+ llvm::error_code EC;
+ for (recursive_directory_iterator Entry(Dir->getName(), EC), End;
+ Entry != End && !EC; Entry.increment(EC)) {
+ using llvm::StringSwitch;
+
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ if (const FileEntry *Header = getFileManager().getFile(Entry->path()))
+ if (!getSourceManager().hasFileInfo(Header)) {
+ if (!ModMap.isHeaderInUnavailableModule(Header)) {
+ // Find the relative path that would access this header.
+ SmallString<128> RelativePath;
+ computeRelativePath(FileMgr, Dir, Header, RelativePath);
+ Diag(StartLoc, diag::warn_uncovered_module_header)
+ << RelativePath;
+ }
+ }
+ }
+ }
+ }
+ }
+
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index e10c95c..fe70585 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -24,7 +24,7 @@
#include "clang/Lex/LiteralSupport.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdio>
@@ -47,13 +47,18 @@ MacroInfo *Preprocessor::getInfoForMacro(IdentifierInfo *II) const {
/// setMacroInfo - Specify a macro for this identifier.
///
-void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI) {
+void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ bool LoadedFromAST) {
if (MI) {
Macros[II] = MI;
II->setHasMacroDefinition(true);
+ if (II->isFromAST() && !LoadedFromAST)
+ II->setChangedSinceDeserialization();
} else if (II->hasMacroDefinition()) {
Macros.erase(II);
II->setHasMacroDefinition(false);
+ if (II->isFromAST() && !LoadedFromAST)
+ II->setChangedSinceDeserialization();
}
}
@@ -96,7 +101,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
// Microsoft Extensions.
- if (Features.MicrosoftExt)
+ if (LangOpts.MicrosoftExt)
Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
else
Ident__pragma = 0;
@@ -112,6 +117,11 @@ static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
// If the token isn't an identifier, it's always literally expanded.
if (II == 0) return true;
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ if (II->isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(*II);
+
// If the identifier is a macro, and if that macro is enabled, it may be
// expanded so it's not a trivial expansion.
if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled() &&
@@ -296,8 +306,10 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// unexpandable.
if (IdentifierInfo *NewII = Identifier.getIdentifierInfo()) {
if (MacroInfo *NewMI = getMacroInfo(NewII))
- if (!NewMI->isEnabled() || NewMI == MI)
+ if (!NewMI->isEnabled() || NewMI == MI) {
Identifier.setFlag(Token::DisableExpand);
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ }
}
// Since this is not an identifier token, it can't be macro expanded, so
@@ -421,8 +433,8 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
// Empty arguments are standard in C99 and C++0x, and are supported as an extension in
// other modes.
- if (ArgTokens.size() == ArgTokenStart && !Features.C99)
- Diag(Tok, Features.CPlusPlus0x ?
+ if (ArgTokens.size() == ArgTokenStart && !LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus0x ?
diag::warn_cxx98_compat_empty_fnmacro_arg :
diag::ext_empty_fnmacro_arg);
@@ -576,9 +588,15 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
/// HasFeature - Return true if we recognize and implement the feature
/// specified by the identifier as a standard language feature.
static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
- const LangOptions &LangOpts = PP.getLangOptions();
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Feature = II->getName();
- return llvm::StringSwitch<bool>(II->getName())
+ // Normalize the feature name, __foo__ becomes foo.
+ if (Feature.startswith("__") && Feature.endswith("__") && Feature.size() >= 4)
+ Feature = Feature.substr(2, Feature.size() - 4);
+
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("address_sanitizer", LangOpts.AddressSanitizer)
.Case("attribute_analyzer_noreturn", true)
.Case("attribute_availability", true)
.Case("attribute_cf_returns_not_retained", true)
@@ -603,48 +621,60 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
.Case("objc_arc_weak", LangOpts.ObjCAutoRefCount &&
LangOpts.ObjCRuntimeHasWeak)
+ .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
.Case("objc_fixed_enum", LangOpts.ObjC2)
.Case("objc_instancetype", LangOpts.ObjC2)
+ .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
.Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
.Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
.Case("ownership_holds", true)
.Case("ownership_returns", true)
.Case("ownership_takes", true)
- // C1X features
- .Case("c_alignas", LangOpts.C1X)
- .Case("c_generic_selections", LangOpts.C1X)
- .Case("c_static_assert", LangOpts.C1X)
- // C++0x features
+ .Case("objc_bool", true)
+ .Case("objc_subscripting", LangOpts.ObjCNonFragileABI)
+ .Case("objc_array_literals", LangOpts.ObjC2)
+ .Case("objc_dictionary_literals", LangOpts.ObjC2)
+ .Case("arc_cf_code_audited", true)
+ // C11 features
+ .Case("c_alignas", LangOpts.C11)
+ .Case("c_atomic", LangOpts.C11)
+ .Case("c_generic_selections", LangOpts.C11)
+ .Case("c_static_assert", LangOpts.C11)
+ // C++11 features
.Case("cxx_access_control_sfinae", LangOpts.CPlusPlus0x)
.Case("cxx_alias_templates", LangOpts.CPlusPlus0x)
.Case("cxx_alignas", LangOpts.CPlusPlus0x)
+ .Case("cxx_atomic", LangOpts.CPlusPlus0x)
.Case("cxx_attributes", LangOpts.CPlusPlus0x)
.Case("cxx_auto_type", LangOpts.CPlusPlus0x)
- //.Case("cxx_constexpr", false);
+ .Case("cxx_constexpr", LangOpts.CPlusPlus0x)
.Case("cxx_decltype", LangOpts.CPlusPlus0x)
+ .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus0x)
.Case("cxx_default_function_template_args", LangOpts.CPlusPlus0x)
+ .Case("cxx_defaulted_functions", LangOpts.CPlusPlus0x)
.Case("cxx_delegating_constructors", LangOpts.CPlusPlus0x)
.Case("cxx_deleted_functions", LangOpts.CPlusPlus0x)
.Case("cxx_explicit_conversions", LangOpts.CPlusPlus0x)
- //.Case("cxx_generalized_initializers", LangOpts.CPlusPlus0x)
+ .Case("cxx_generalized_initializers", LangOpts.CPlusPlus0x)
.Case("cxx_implicit_moves", LangOpts.CPlusPlus0x)
//.Case("cxx_inheriting_constructors", false)
.Case("cxx_inline_namespaces", LangOpts.CPlusPlus0x)
- //.Case("cxx_lambdas", false)
+ .Case("cxx_lambdas", LangOpts.CPlusPlus0x)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus0x)
.Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus0x)
.Case("cxx_noexcept", LangOpts.CPlusPlus0x)
.Case("cxx_nullptr", LangOpts.CPlusPlus0x)
.Case("cxx_override_control", LangOpts.CPlusPlus0x)
.Case("cxx_range_for", LangOpts.CPlusPlus0x)
- //.Case("cxx_raw_string_literals", false)
+ .Case("cxx_raw_string_literals", LangOpts.CPlusPlus0x)
.Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus0x)
.Case("cxx_rvalue_references", LangOpts.CPlusPlus0x)
.Case("cxx_strong_enums", LangOpts.CPlusPlus0x)
.Case("cxx_static_assert", LangOpts.CPlusPlus0x)
.Case("cxx_trailing_return", LangOpts.CPlusPlus0x)
- //.Case("cxx_unicode_literals", false)
- //.Case("cxx_unrestricted_unions", false)
- //.Case("cxx_user_literals", false)
+ .Case("cxx_unicode_literals", LangOpts.CPlusPlus0x)
+ .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus0x)
+ .Case("cxx_user_literals", LangOpts.CPlusPlus0x)
.Case("cxx_variadic_templates", LangOpts.CPlusPlus0x)
// Type traits
.Case("has_nothrow_assign", LangOpts.CPlusPlus)
@@ -668,6 +698,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
PP.getIdentifierInfo("__is_empty")->getTokenID()
!= tok::identifier)
.Case("is_enum", LangOpts.CPlusPlus)
+ .Case("is_final", LangOpts.CPlusPlus)
.Case("is_literal", LangOpts.CPlusPlus)
.Case("is_standard_layout", LangOpts.CPlusPlus)
// __is_pod is available only if the horrible
@@ -680,8 +711,11 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
!= tok::identifier)
.Case("is_polymorphic", LangOpts.CPlusPlus)
.Case("is_trivial", LangOpts.CPlusPlus)
+ .Case("is_trivially_assignable", LangOpts.CPlusPlus)
+ .Case("is_trivially_constructible", LangOpts.CPlusPlus)
.Case("is_trivially_copyable", LangOpts.CPlusPlus)
.Case("is_union", LangOpts.CPlusPlus)
+ .Case("modules", LangOpts.Modules)
.Case("tls", PP.getTargetInfo().isTLSSupported())
.Case("underlying_type", LangOpts.CPlusPlus)
.Default(false);
@@ -700,19 +734,28 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
DiagnosticsEngine::Ext_Error)
return false;
- const LangOptions &LangOpts = PP.getLangOptions();
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Extension = II->getName();
+
+ // Normalize the extension name, __foo__ becomes foo.
+ if (Extension.startswith("__") && Extension.endswith("__") &&
+ Extension.size() >= 4)
+ Extension = Extension.substr(2, Extension.size() - 4);
// Because we inherit the feature list from HasFeature, this string switch
// must be less restrictive than HasFeature's.
- return llvm::StringSwitch<bool>(II->getName())
- // C1X features supported by other languages as extensions.
+ return llvm::StringSwitch<bool>(Extension)
+ // C11 features supported by other languages as extensions.
.Case("c_alignas", true)
+ .Case("c_atomic", true)
.Case("c_generic_selections", true)
.Case("c_static_assert", true)
// C++0x features supported by other languages as extensions.
+ .Case("cxx_atomic", LangOpts.CPlusPlus)
.Case("cxx_deleted_functions", LangOpts.CPlusPlus)
.Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
.Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus)
.Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus)
.Case("cxx_override_control", LangOpts.CPlusPlus)
.Case("cxx_range_for", LangOpts.CPlusPlus)
@@ -724,7 +767,12 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
/// HasAttribute - Return true if we recognize and implement the attribute
/// specified by the given identifier.
static bool HasAttribute(const IdentifierInfo *II) {
- return llvm::StringSwitch<bool>(II->getName())
+ StringRef Name = II->getName();
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Name.startswith("__") && Name.endswith("__") && Name.size() >= 4)
+ Name = Name.substr(2, Name.size() - 4);
+
+ return llvm::StringSwitch<bool>(Name)
#include "clang/Lex/AttrSpellings.inc"
.Default(false);
}
@@ -753,7 +801,7 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
PP.getCurrentLexer()->LexIncludeFilename(Tok);
// Reserve a buffer to get the spelling.
- llvm::SmallString<128> FilenameBuffer;
+ SmallString<128> FilenameBuffer;
StringRef Filename;
SourceLocation EndLoc;
@@ -784,6 +832,16 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
return false;
}
+ // Get ')'.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a trailing ).
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
+ PP.Diag(LParenLoc, diag::note_matching) << "(";
+ return false;
+ }
+
bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
// error.
@@ -795,20 +853,8 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
const FileEntry *File =
PP.LookupFile(Filename, isAngled, LookupFrom, CurDir, NULL, NULL, NULL);
- // Get the result value. Result = true means the file exists.
- bool Result = File != 0;
-
- // Get ')'.
- PP.LexNonComment(Tok);
-
- // Ensure we have a trailing ).
- if (Tok.isNot(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
- PP.Diag(LParenLoc, diag::note_matching) << "(";
- return false;
- }
-
- return Result;
+ // Get the result value. A result of true means the file exists.
+ return File != 0;
}
/// EvaluateHasInclude - Process a '__has_include("path")' expression.
@@ -855,7 +901,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
++NumBuiltinMacroExpanded;
- llvm::SmallString<128> TmpBuffer;
+ SmallString<128> TmpBuffer;
llvm::raw_svector_ostream OS(TmpBuffer);
// Set up the return result.
@@ -902,7 +948,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
}
// Escape this filename. Turn '\' -> '\\' '"' -> '\"'
- llvm::SmallString<128> FN;
+ SmallString<128> FN;
if (PLoc.isValid()) {
FN += PLoc.getFilename();
Lexer::Stringify(FN);
@@ -1010,7 +1056,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
}
OS << (int)Value;
- Tok.setKind(tok::numeric_constant);
+ if (IsValid)
+ Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_include ||
II == Ident__has_include_next) {
// The argument to these two builtins should be a parenthesized
@@ -1049,6 +1096,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// from macro expansion.
SmallVector<Token, 4> StrToks;
while (Tok.is(tok::string_literal)) {
+ // Complain about, and drop, any ud-suffix.
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
StrToks.push_back(Tok);
LexUnexpandedToken(Tok);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
index e0c4cf0..f104f96 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -438,7 +438,7 @@ static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
PTHManager *PTHManager::Create(const std::string &file,
DiagnosticsEngine &Diags) {
// Memory map the PTH file.
- llvm::OwningPtr<llvm::MemoryBuffer> File;
+ OwningPtr<llvm::MemoryBuffer> File;
if (llvm::MemoryBuffer::getFile(file, File)) {
// FIXME: Add ec.message() to this diag.
@@ -488,7 +488,7 @@ PTHManager *PTHManager::Create(const std::string &file,
return 0; // FIXME: Proper error diagnostic?
}
- llvm::OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
+ OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
// Warn if the PTH file is empty. We still want to create a PTHManager
// as the PTH could be used with -include-pth.
@@ -514,7 +514,7 @@ PTHManager *PTHManager::Create(const std::string &file,
return 0;
}
- llvm::OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
+ OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
BufBeg));
// Get the location of the spelling cache.
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index f6532c2..e2a192b 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -115,10 +115,61 @@ void Preprocessor::HandlePragmaDirective(unsigned Introducer) {
DiscardUntilEndOfDirective();
}
+namespace {
+/// \brief Helper class for \see Preprocessor::Handle_Pragma.
+class LexingFor_PragmaRAII {
+ Preprocessor &PP;
+ bool InMacroArgPreExpansion;
+ bool Failed;
+ Token &OutTok;
+ Token PragmaTok;
+
+public:
+ LexingFor_PragmaRAII(Preprocessor &PP, bool InMacroArgPreExpansion,
+ Token &Tok)
+ : PP(PP), InMacroArgPreExpansion(InMacroArgPreExpansion),
+ Failed(false), OutTok(Tok) {
+ if (InMacroArgPreExpansion) {
+ PragmaTok = OutTok;
+ PP.EnableBacktrackAtThisPos();
+ }
+ }
+
+ ~LexingFor_PragmaRAII() {
+ if (InMacroArgPreExpansion) {
+ if (Failed) {
+ PP.CommitBacktrackedTokens();
+ } else {
+ PP.Backtrack();
+ OutTok = PragmaTok;
+ }
+ }
+ }
+
+ void failed() {
+ Failed = true;
+ }
+};
+}
+
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
void Preprocessor::Handle_Pragma(Token &Tok) {
+
+ // This works differently if we are pre-expanding a macro argument.
+ // In that case we don't actually "activate" the pragma now, we only lex it
+ // until we are sure it is lexically correct and then we backtrack so that
+ // we activate the pragma whenever we encounter the tokens again in the token
+ // stream. This ensures that we will activate it in the correct location
+ // or that we will ignore it if it never enters the token stream, e.g:
+ //
+ // #define EMPTY(x)
+ // #define INACTIVE(x) EMPTY(x)
+ // INACTIVE(_Pragma("clang diagnostic ignored \"-Wconversion\""))
+
+ LexingFor_PragmaRAII _PragmaLexing(*this, InMacroArgPreExpansion, Tok);
+
// Remember the pragma token location.
SourceLocation PragmaLoc = Tok.getLocation();
@@ -126,27 +177,45 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- return;
+ return _PragmaLexing.failed();
}
// Read the '"..."'.
Lex(Tok);
if (Tok.isNot(tok::string_literal) && Tok.isNot(tok::wide_string_literal)) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- return;
+ // Skip this token, and the ')', if present.
+ if (Tok.isNot(tok::r_paren))
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
+ }
+
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ // Skip this token, and the ')', if present.
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
}
// Remember the string.
- std::string StrVal = getSpelling(Tok);
+ Token StrTok = Tok;
// Read the ')'.
Lex(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(PragmaLoc, diag::err__Pragma_malformed);
- return;
+ return _PragmaLexing.failed();
}
+ if (InMacroArgPreExpansion)
+ return;
+
SourceLocation RParenLoc = Tok.getLocation();
+ std::string StrVal = getSpelling(StrTok);
// The _Pragma is lexically sound. Destringize according to C99 6.10.9.1:
// "The string literal is destringized by deleting the L prefix, if present,
@@ -304,6 +373,8 @@ void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
// Finally, poison it!
II->setIsPoisoned();
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
}
}
@@ -351,7 +422,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
return;
// Reserve a buffer to get the spelling.
- llvm::SmallString<128> FilenameBuffer;
+ SmallString<128> FilenameBuffer;
bool Invalid = false;
StringRef Filename = getSpelling(FilenameTok, FilenameBuffer, &Invalid);
if (Invalid)
@@ -440,6 +511,8 @@ void Preprocessor::HandlePragmaComment(Token &Tok) {
// "foo " "bar" "Baz"
SmallVector<Token, 4> StrToks;
while (Tok.is(tok::string_literal)) {
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
StrToks.push_back(Tok);
Lex(Tok);
}
@@ -516,6 +589,8 @@ void Preprocessor::HandlePragmaMessage(Token &Tok) {
// "foo " "bar" "Baz"
SmallVector<Token, 4> StrToks;
while (Tok.is(tok::string_literal)) {
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
StrToks.push_back(Tok);
Lex(Tok);
}
@@ -575,6 +650,11 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
return 0;
}
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ return 0;
+ }
+
// Remember the macro string.
std::string StrVal = getSpelling(Tok);
@@ -661,6 +741,111 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
}
}
+void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
+ // We will either get a quoted filename or a bracketed filename, and we
+ // have to track which we got. The first filename is the source name,
+ // and the second name is the mapped filename. If the first is quoted,
+ // the second must be as well (cannot mix and match quotes and brackets).
+
+ // Get the open paren
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << "(";
+ return;
+ }
+
+ // We expect either a quoted string literal, or a bracketed name
+ Token SourceFilenameTok;
+ CurPPLexer->LexIncludeFilename(SourceFilenameTok);
+ if (SourceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef SourceFileName;
+ SmallString<128> FileNameBuffer;
+ if (SourceFilenameTok.is(tok::string_literal) ||
+ SourceFilenameTok.is(tok::angle_string_literal)) {
+ SourceFileName = getSpelling(SourceFilenameTok, FileNameBuffer);
+ } else if (SourceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ SourceFileName = FileNameBuffer.str();
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+ FileNameBuffer.clear();
+
+ // Now we expect a comma, followed by another include name
+ Lex(Tok);
+ if (Tok.isNot(tok::comma)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ",";
+ return;
+ }
+
+ Token ReplaceFilenameTok;
+ CurPPLexer->LexIncludeFilename(ReplaceFilenameTok);
+ if (ReplaceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef ReplaceFileName;
+ if (ReplaceFilenameTok.is(tok::string_literal) ||
+ ReplaceFilenameTok.is(tok::angle_string_literal)) {
+ ReplaceFileName = getSpelling(ReplaceFilenameTok, FileNameBuffer);
+ } else if (ReplaceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ ReplaceFileName = FileNameBuffer.str();
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+
+ // Finally, we expect the closing paren
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ")";
+ return;
+ }
+
+ // Now that we have the source and target filenames, we need to make sure
+ // they're both of the same type (angled vs non-angled)
+ StringRef OriginalSource = SourceFileName;
+
+ bool SourceIsAngled =
+ GetIncludeFilenameSpelling(SourceFilenameTok.getLocation(),
+ SourceFileName);
+ bool ReplaceIsAngled =
+ GetIncludeFilenameSpelling(ReplaceFilenameTok.getLocation(),
+ ReplaceFileName);
+ if (!SourceFileName.empty() && !ReplaceFileName.empty() &&
+ (SourceIsAngled != ReplaceIsAngled)) {
+ unsigned int DiagID;
+ if (SourceIsAngled)
+ DiagID = diag::warn_pragma_include_alias_mismatch_angle;
+ else
+ DiagID = diag::warn_pragma_include_alias_mismatch_quote;
+
+ Diag(SourceFilenameTok.getLocation(), DiagID)
+ << SourceFileName
+ << ReplaceFileName;
+
+ return;
+ }
+
+ // Now we can let the include handler know about this mapping
+ getHeaderSearchInfo().AddIncludeAlias(OriginalSource, ReplaceFileName);
+}
+
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
@@ -712,8 +897,10 @@ void Preprocessor::RemovePragmaHandler(StringRef Namespace,
// If this is a non-default namespace and it is now empty, remove
// it.
- if (NS != PragmaHandlers && NS->IsEmpty())
+ if (NS != PragmaHandlers && NS->IsEmpty()) {
PragmaHandlers->RemovePragmaHandler(NS);
+ delete NS;
+ }
}
bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
@@ -939,6 +1126,15 @@ struct PragmaCommentHandler : public PragmaHandler {
}
};
+/// PragmaIncludeAliasHandler - "#pragma include_alias("...")".
+struct PragmaIncludeAliasHandler : public PragmaHandler {
+ PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &IncludeAliasTok) {
+ PP.HandlePragmaIncludeAlias(IncludeAliasTok);
+ }
+};
+
/// PragmaMessageHandler - "#pragma message("...")".
struct PragmaMessageHandler : public PragmaHandler {
PragmaMessageHandler() : PragmaHandler("message") {}
@@ -1089,7 +1285,8 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
// MS extensions.
- if (Features.MicrosoftExt) {
+ if (LangOpts.MicrosoftExt) {
AddPragmaHandler(new PragmaCommentHandler());
+ AddPragmaHandler(new PragmaIncludeAliasHandler());
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
index 2816609..89d19fd 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -38,10 +38,13 @@ InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
}
PreprocessingRecord::PreprocessingRecord(SourceManager &SM,
- bool IncludeNestedMacroExpansions)
- : SourceMgr(SM), IncludeNestedMacroExpansions(IncludeNestedMacroExpansions),
+ bool RecordConditionalDirectives)
+ : SourceMgr(SM),
+ RecordCondDirectives(RecordConditionalDirectives), CondDirectiveNextIdx(0),
ExternalSource(0)
{
+ if (RecordCondDirectives)
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
}
/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
@@ -49,35 +52,108 @@ PreprocessingRecord::PreprocessingRecord(SourceManager &SM,
std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
if (Range.isInvalid())
- return std::make_pair(iterator(this, 0), iterator(this, 0));
- assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+ return std::make_pair(iterator(), iterator());
+
+ if (CachedRangeQuery.Range == Range) {
+ return std::make_pair(iterator(this, CachedRangeQuery.Result.first),
+ iterator(this, CachedRangeQuery.Result.second));
+ }
+
+ std::pair<PPEntityID, PPEntityID>
+ Res = getPreprocessedEntitiesInRangeSlow(Range);
+
+ CachedRangeQuery.Range = Range;
+ CachedRangeQuery.Result = Res;
+
+ return std::make_pair(iterator(this, Res.first), iterator(this, Res.second));
+}
+
+static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
+ SourceManager &SM) {
+ assert(!FID.isInvalid());
+ if (!PPE)
+ return false;
+
+ SourceLocation Loc = PPE->getSourceRange().getBegin();
+ if (Loc.isInvalid())
+ return false;
+
+ if (SM.isInFileID(SM.getFileLoc(Loc), FID))
+ return true;
+ else
+ return false;
+}
+
+/// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+/// points to is coming from the file \arg FID.
+///
+/// Can be used to avoid implicit deserializations of preallocated
+/// preprocessed entities if we only care about entities of a specific file
+/// and not from files #included in the range given at
+/// \see getPreprocessedEntitiesInRange.
+bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ PPEntityID PPID = PPEI.Position;
+ if (PPID < 0) {
+ assert(unsigned(-PPID-1) < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ assert(ExternalSource && "No external source to load from");
+ unsigned LoadedIndex = LoadedPreprocessedEntities.size()+PPID;
+ if (PreprocessedEntity *PPE = LoadedPreprocessedEntities[LoadedIndex])
+ return isPreprocessedEntityIfInFileID(PPE, FID, SourceMgr);
+
+ // See if the external source can see if the entity is in the file without
+ // deserializing it.
+ llvm::Optional<bool>
+ IsInFile = ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID);
+ if (IsInFile.hasValue())
+ return IsInFile.getValue();
+
+ // The external source did not provide a definite answer, go and deserialize
+ // the entity to check it.
+ return isPreprocessedEntityIfInFileID(
+ getLoadedPreprocessedEntity(LoadedIndex),
+ FID, SourceMgr);
+ }
+
+ assert(unsigned(PPID) < PreprocessedEntities.size() &&
+ "Out-of bounds local preprocessed entity");
+ return isPreprocessedEntityIfInFileID(PreprocessedEntities[PPID],
+ FID, SourceMgr);
+}
+/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// that source range \arg R encompasses.
+std::pair<PreprocessingRecord::PPEntityID, PreprocessingRecord::PPEntityID>
+PreprocessingRecord::getPreprocessedEntitiesInRangeSlow(SourceRange Range) {
+ assert(Range.isValid());
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
std::pair<unsigned, unsigned>
Local = findLocalPreprocessedEntitiesInRange(Range);
-
+
// Check if range spans local entities.
if (!ExternalSource || SourceMgr.isLocalSourceLocation(Range.getBegin()))
- return std::make_pair(iterator(this, Local.first),
- iterator(this, Local.second));
-
+ return std::make_pair(Local.first, Local.second);
+
std::pair<unsigned, unsigned>
Loaded = ExternalSource->findPreprocessedEntitiesInRange(Range);
-
+
// Check if range spans local entities.
if (Loaded.first == Loaded.second)
- return std::make_pair(iterator(this, Local.first),
- iterator(this, Local.second));
-
+ return std::make_pair(Local.first, Local.second);
+
unsigned TotalLoaded = LoadedPreprocessedEntities.size();
-
+
// Check if range spans loaded entities.
if (Local.first == Local.second)
- return std::make_pair(iterator(this, int(Loaded.first)-TotalLoaded),
- iterator(this, int(Loaded.second)-TotalLoaded));
-
+ return std::make_pair(int(Loaded.first)-TotalLoaded,
+ int(Loaded.second)-TotalLoaded);
+
// Range spands loaded and local entities.
- return std::make_pair(iterator(this, int(Loaded.first)-TotalLoaded),
- iterator(this, Local.second));
+ return std::make_pair(int(Loaded.first)-TotalLoaded, Local.second);
}
std::pair<unsigned, unsigned>
@@ -168,33 +244,58 @@ unsigned PreprocessingRecord::findEndLocalPreprocessedEntity(
return I - PreprocessedEntities.begin();
}
-void PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) {
+PreprocessingRecord::PPEntityID
+PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) {
assert(Entity);
SourceLocation BeginLoc = Entity->getSourceRange().getBegin();
-
+
+ if (!isa<class InclusionDirective>(Entity)) {
+ assert((PreprocessedEntities.empty() ||
+ !SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ PreprocessedEntities.back()->getSourceRange().getBegin())) &&
+ "a macro directive was encountered out-of-order");
+ PreprocessedEntities.push_back(Entity);
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
+ }
+
// Check normal case, this entity begin location is after the previous one.
if (PreprocessedEntities.empty() ||
!SourceMgr.isBeforeInTranslationUnit(BeginLoc,
PreprocessedEntities.back()->getSourceRange().getBegin())) {
PreprocessedEntities.push_back(Entity);
- return;
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
}
- // The entity's location is not after the previous one; this can happen rarely
- // e.g. with "#include MACRO".
- // Iterate the entities vector in reverse until we find the right place to
- // insert the new entity.
- for (std::vector<PreprocessedEntity *>::iterator
- RI = PreprocessedEntities.end(), Begin = PreprocessedEntities.begin();
- RI != Begin; --RI) {
- std::vector<PreprocessedEntity *>::iterator I = RI;
+ // The entity's location is not after the previous one; this can happen with
+ // include directives that form the filename using macros, e.g:
+ // "#include MACRO(STUFF)".
+
+ typedef std::vector<PreprocessedEntity *>::iterator pp_iter;
+
+ // Usually there are few macro expansions when defining the filename, do a
+ // linear search for a few entities.
+ unsigned count = 0;
+ for (pp_iter RI = PreprocessedEntities.end(),
+ Begin = PreprocessedEntities.begin();
+ RI != Begin && count < 4; --RI, ++count) {
+ pp_iter I = RI;
--I;
if (!SourceMgr.isBeforeInTranslationUnit(BeginLoc,
(*I)->getSourceRange().getBegin())) {
- PreprocessedEntities.insert(RI, Entity);
- return;
+ pp_iter insertI = PreprocessedEntities.insert(RI, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
}
}
+
+ // Linear search unsuccessful. Do a binary search.
+ pp_iter I = std::upper_bound(PreprocessedEntities.begin(),
+ PreprocessedEntities.end(),
+ BeginLoc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ pp_iter insertI = PreprocessedEntities.insert(I, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
}
void PreprocessingRecord::SetExternalSource(
@@ -258,7 +359,8 @@ MacroDefinition *PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
void PreprocessingRecord::MacroExpands(const Token &Id, const MacroInfo* MI,
SourceRange Range) {
- if (!IncludeNestedMacroExpansions && Id.getLocation().isMacroID())
+ // We don't record nested macro expansions.
+ if (Id.getLocation().isMacroID())
return;
if (MI->isBuiltinMacro())
@@ -274,17 +376,12 @@ void PreprocessingRecord::MacroDefined(const Token &Id,
SourceRange R(MI->getDefinitionLoc(), MI->getDefinitionEndLoc());
MacroDefinition *Def
= new (*this) MacroDefinition(Id.getIdentifierInfo(), R);
- addPreprocessedEntity(Def);
- MacroDefinitions[MI] = getPPEntityID(PreprocessedEntities.size()-1,
- /*isLoaded=*/false);
+ MacroDefinitions[MI] = addPreprocessedEntity(Def);
}
void PreprocessingRecord::MacroUndefined(const Token &Id,
const MacroInfo *MI) {
- llvm::DenseMap<const MacroInfo *, PPEntityID>::iterator Pos
- = MacroDefinitions.find(MI);
- if (Pos != MacroDefinitions.end())
- MacroDefinitions.erase(Pos);
+ MacroDefinitions.erase(MI);
}
void PreprocessingRecord::InclusionDirective(
@@ -317,7 +414,6 @@ void PreprocessingRecord::InclusionDirective(
default:
llvm_unreachable("Unknown include directive kind");
- return;
}
clang::InclusionDirective *ID
@@ -326,6 +422,95 @@ void PreprocessingRecord::InclusionDirective(
addPreprocessedEntity(ID);
}
+bool PreprocessingRecord::rangeIntersectsConditionalDirective(
+ SourceRange Range) const {
+ if (Range.isInvalid())
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Range.getBegin(), CondDirectiveLoc::Comp(SourceMgr));
+ if (low == CondDirectiveLocs.end())
+ return false;
+
+ if (SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), low->getLoc()))
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ upp = std::upper_bound(low, CondDirectiveLocs.end(),
+ Range.getEnd(), CondDirectiveLoc::Comp(SourceMgr));
+ unsigned uppIdx;
+ if (upp != CondDirectiveLocs.end())
+ uppIdx = upp->getIdx();
+ else
+ uppIdx = 0;
+
+ return low->getIdx() != uppIdx;
+}
+
+unsigned PreprocessingRecord::findCondDirectiveIdx(SourceLocation Loc) const {
+ if (Loc.isInvalid())
+ return 0;
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Loc, CondDirectiveLoc::Comp(SourceMgr));
+ if (low == CondDirectiveLocs.end())
+ return 0;
+ return low->getIdx();
+}
+
+void PreprocessingRecord::addCondDirectiveLoc(CondDirectiveLoc DirLoc) {
+ // Ignore directives in system headers.
+ if (SourceMgr.isInSystemHeader(DirLoc.getLoc()))
+ return;
+
+ assert(CondDirectiveLocs.empty() ||
+ SourceMgr.isBeforeInTranslationUnit(CondDirectiveLocs.back().getLoc(),
+ DirLoc.getLoc()));
+ CondDirectiveLocs.push_back(DirLoc);
+}
+
+void PreprocessingRecord::If(SourceLocation Loc, SourceRange ConditionRange) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Ifndef(SourceLocation Loc,const Token &MacroNameTok) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ if (RecordCondDirectives)
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+}
+
+void PreprocessingRecord::Else(SourceLocation Loc, SourceLocation IfLoc) {
+ if (RecordCondDirectives)
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+}
+
+void PreprocessingRecord::Endif(SourceLocation Loc, SourceLocation IfLoc) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ assert(!CondDirectiveStack.empty());
+ CondDirectiveStack.pop_back();
+ }
+}
+
size_t PreprocessingRecord::getTotalMemory() const {
return BumpAlloc.getTotalMemory()
+ llvm::capacity_in_bytes(MacroDefinitions)
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index 31662ad..06e5685 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -40,7 +40,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Capacity.h"
@@ -54,18 +54,19 @@ Preprocessor::Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
IdentifierInfoLookup* IILookup,
bool OwnsHeaders,
- bool DelayInitialization)
- : Diags(&diags), Features(opts), Target(target),FileMgr(Headers.getFileMgr()),
+ bool DelayInitialization,
+ bool IncrProcessing)
+ : Diags(&diags), LangOpts(opts), Target(target),FileMgr(Headers.getFileMgr()),
SourceMgr(SM), HeaderInfo(Headers), TheModuleLoader(TheModuleLoader),
- ExternalSource(0),
- Identifiers(opts, IILookup), CodeComplete(0),
+ ExternalSource(0), Identifiers(opts, IILookup),
+ IncrementalProcessing(IncrProcessing), CodeComplete(0),
CodeCompletionFile(0), CodeCompletionOffset(0), CodeCompletionReached(0),
SkipMainFilePreamble(0, true), CurPPLexer(0),
CurDirLookup(0), CurLexerKind(CLK_Lexer), Callbacks(0), MacroArgCache(0),
Record(0), MIChainHead(0), MICache(0)
{
OwnsHeaderSearch = OwnsHeaders;
-
+
if (!DelayInitialization) {
assert(Target && "Must provide target information for PP initialization");
Initialize(*Target);
@@ -74,9 +75,6 @@ Preprocessor::Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
Preprocessor::~Preprocessor() {
assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
- assert(((MacroExpandingLexersStack.empty() && MacroExpandedTokens.empty()) ||
- isCodeCompletionReached()) &&
- "Preprocessor::HandleEndOfTokenLexer should have cleared those");
while (!IncludeMacroStack.empty()) {
delete IncludeMacroStack.back().TheLexer;
@@ -133,11 +131,11 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
KeepComments = false;
KeepMacroComments = false;
SuppressIncludeNotFoundError = false;
- AutoModuleImport = false;
// Macro expansion is enabled.
DisableMacroExpansion = false;
InMacroArgs = false;
+ InMacroArgPreExpansion = false;
NumCachedTokenLexers = 0;
CachedLexPos = 0;
@@ -157,7 +155,7 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
// Initialize builtin macros like __LINE__ and friends.
RegisterBuiltinMacros();
- if(Features.Borland) {
+ if(LangOpts.Borland) {
Ident__exception_info = getIdentifierInfo("_exception_info");
Ident___exception_info = getIdentifierInfo("__exception_info");
Ident_GetExceptionInfo = getIdentifierInfo("GetExceptionInformation");
@@ -171,7 +169,9 @@ void Preprocessor::Initialize(const TargetInfo &Target) {
Ident__exception_info = Ident__exception_code = Ident__abnormal_termination = 0;
Ident___exception_info = Ident___exception_code = Ident___abnormal_termination = 0;
Ident_GetExceptionInfo = Ident_GetExceptionCode = Ident_AbnormalTermination = 0;
- }
+ }
+
+ HeaderInfo.setTarget(Target);
}
void Preprocessor::setPTHManager(PTHManager* pm) {
@@ -270,6 +270,17 @@ Preprocessor::macro_end(bool IncludeExternalMacros) const {
return Macros.end();
}
+void Preprocessor::recomputeCurLexerKind() {
+ if (CurLexer)
+ CurLexerKind = CLK_Lexer;
+ else if (CurPTHLexer)
+ CurLexerKind = CLK_PTHLexer;
+ else if (CurTokenLexer)
+ CurLexerKind = CLK_TokenLexer;
+ else
+ CurLexerKind = CLK_CachingLexer;
+}
+
bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
unsigned CompleteLine,
unsigned CompleteColumn) {
@@ -372,7 +383,12 @@ void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
Tok.setLiteralData(DestPtr);
}
-
+Module *Preprocessor::getCurrentModule() {
+ if (getLangOpts().CurrentModule.empty())
+ return 0;
+
+ return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
+}
//===----------------------------------------------------------------------===//
// Preprocessor Initialization Methods
@@ -388,19 +404,23 @@ void Preprocessor::EnterMainSourceFile() {
assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
FileID MainFileID = SourceMgr.getMainFileID();
- // Enter the main file source buffer.
- EnterSourceFile(MainFileID, 0, SourceLocation());
-
- // If we've been asked to skip bytes in the main file (e.g., as part of a
- // precompiled preamble), do so now.
- if (SkipMainFilePreamble.first > 0)
- CurLexer->SkipBytes(SkipMainFilePreamble.first,
- SkipMainFilePreamble.second);
+ // If MainFileID is loaded it means we loaded an AST file, no need to enter
+ // a main file.
+ if (!SourceMgr.isLoadedFileID(MainFileID)) {
+ // Enter the main file source buffer.
+ EnterSourceFile(MainFileID, 0, SourceLocation());
- // Tell the header info that the main file was entered. If the file is later
- // #imported, it won't be re-entered.
- if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
- HeaderInfo.IncrementIncludeCount(FE);
+ // If we've been asked to skip bytes in the main file (e.g., as part of a
+ // precompiled preamble), do so now.
+ if (SkipMainFilePreamble.first > 0)
+ CurLexer->SkipBytes(SkipMainFilePreamble.first,
+ SkipMainFilePreamble.second);
+
+ // Tell the header info that the main file was entered. If the file is later
+ // #imported, it won't be re-entered.
+ if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
+ HeaderInfo.IncrementIncludeCount(FE);
+ }
// Preprocess Predefines to populate the initial preprocessor state.
llvm::MemoryBuffer *SB =
@@ -437,7 +457,7 @@ IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
Identifier.getLength()));
} else {
// Cleaning needed, alloca a buffer, clean into it, then use the buffer.
- llvm::SmallString<64> IdentifierBuffer;
+ SmallString<64> IdentifierBuffer;
StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
II = getIdentifierInfo(CleanedStr);
}
@@ -492,6 +512,13 @@ void Preprocessor::HandleIdentifier(Token &Identifier) {
IdentifierInfo &II = *Identifier.getIdentifierInfo();
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ if (II.isOutOfDate()) {
+ ExternalSource->updateOutOfDateIdentifier(II);
+ Identifier.setKind(II.getTokenID());
+ }
+
// If this identifier was poisoned, and if it was not produced from a macro
// expansion, emit an error.
if (II.isPoisoned() && CurPPLexer) {
@@ -500,8 +527,10 @@ void Preprocessor::HandleIdentifier(Token &Identifier) {
// If this is a macro to be expanded, do it.
if (MacroInfo *MI = getMacroInfo(&II)) {
- if (!DisableMacroExpansion && !Identifier.isExpandDisabled()) {
- if (MI->isEnabled()) {
+ if (!DisableMacroExpansion) {
+ if (Identifier.isExpandDisabled()) {
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ } else if (MI->isEnabled()) {
if (!HandleMacroExpandedIdentifier(Identifier, MI))
return;
} else {
@@ -509,6 +538,7 @@ void Preprocessor::HandleIdentifier(Token &Identifier) {
// expanded, even if it's in a context where it could be expanded in the
// future.
Identifier.setFlag(Token::DisableExpand);
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
}
}
}
@@ -537,43 +567,59 @@ void Preprocessor::HandleIdentifier(Token &Identifier) {
if (II.isExtensionToken() && !DisableMacroExpansion)
Diag(Identifier, diag::ext_token_used);
- // If this is the '__import_module__' keyword, note that the next token
- // indicates a module name.
- if (II.getTokenID() == tok::kw___import_module__ &&
- !InMacroArgs && !DisableMacroExpansion) {
+ // If this is the '__experimental_modules_import' contextual keyword, note
+ // that the next token indicates a module name.
+ //
+ // Note that we do not treat '__experimental_modules_import' as a contextual
+ // keyword when we're in a caching lexer, because caching lexers only get
+ // used in contexts where import declarations are disallowed.
+ if (II.isModulesImport() && !InMacroArgs && !DisableMacroExpansion &&
+ getLangOpts().Modules && CurLexerKind != CLK_CachingLexer) {
ModuleImportLoc = Identifier.getLocation();
+ ModuleImportPath.clear();
+ ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
}
}
-/// \brief Lex a token following the __import_module__ keyword.
+/// \brief Lex a token following the 'import' contextual keyword.
+///
void Preprocessor::LexAfterModuleImport(Token &Result) {
// Figure out what kind of lexer we actually have.
- if (CurLexer)
- CurLexerKind = CLK_Lexer;
- else if (CurPTHLexer)
- CurLexerKind = CLK_PTHLexer;
- else if (CurTokenLexer)
- CurLexerKind = CLK_TokenLexer;
- else
- CurLexerKind = CLK_CachingLexer;
+ recomputeCurLexerKind();
// Lex the next token.
Lex(Result);
// The token sequence
//
- // __import_module__ identifier
+ // import identifier (. identifier)*
//
- // indicates a module import directive. We already saw the __import_module__
- // keyword, so now we're looking for the identifier.
- if (Result.getKind() != tok::identifier)
+ // indicates a module import directive. We already saw the 'import'
+ // contextual keyword, so now we're looking for the identifiers.
+ if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
+ // We expected to see an identifier here, and we did; continue handling
+ // identifiers.
+ ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
+ Result.getLocation()));
+ ModuleImportExpectsIdentifier = false;
+ CurLexerKind = CLK_LexAfterModuleImport;
return;
+ }
- // Load the module.
- (void)TheModuleLoader.loadModule(ModuleImportLoc,
- *Result.getIdentifierInfo(),
- Result.getLocation());
+ // If we're expecting a '.' or a ';', and we got a '.', then wait until we
+ // see the next identifier.
+ if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return;
+ }
+
+ // If we have a non-empty module path, load the named module.
+ if (!ModuleImportPath.empty())
+ (void)TheModuleLoader.loadModule(ModuleImportLoc, ModuleImportPath,
+ Module::MacrosVisible,
+ /*IsIncludeDirective=*/false);
}
void Preprocessor::AddCommentHandler(CommentHandler *Handler) {
@@ -610,12 +656,11 @@ CommentHandler::~CommentHandler() { }
CodeCompletionHandler::~CodeCompletionHandler() { }
-void Preprocessor::createPreprocessingRecord(
- bool IncludeNestedMacroExpansions) {
+void Preprocessor::createPreprocessingRecord(bool RecordConditionalDirectives) {
if (Record)
return;
Record = new PreprocessingRecord(getSourceManager(),
- IncludeNestedMacroExpansions);
+ RecordConditionalDirectives);
addPPCallbacks(Record);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
index 0da9ef5..a72bbca 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
@@ -17,6 +17,8 @@
#include "clang/Basic/SourceManager.h"
using namespace clang;
+void PreprocessorLexer::anchor() { }
+
PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
: PP(pp), FID(fid), InitialNumSLocEntries(0),
ParsingPreprocessorDirective(false),
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
index dc6d686..84a46ed 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -45,7 +45,7 @@ static bool IsStringPrefix(StringRef Str, bool CPlusPlus0x) {
/// IsIdentifierStringPrefix - Return true if the spelling of the token
/// is literally 'L', 'u', 'U', or 'u8'. Including raw versions.
bool TokenConcatenation::IsIdentifierStringPrefix(const Token &Tok) const {
- const LangOptions &LangOpts = PP.getLangOptions();
+ const LangOptions &LangOpts = PP.getLangOpts();
if (!Tok.needsCleaning()) {
if (Tok.getLength() < 1 || Tok.getLength() > 3)
@@ -85,6 +85,19 @@ TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
TokenInfo[tok::hash ] |= aci_custom_firstchar;
TokenInfo[tok::arrow ] |= aci_custom_firstchar;
+ // These tokens have custom code in C++11 mode.
+ if (PP.getLangOpts().CPlusPlus0x) {
+ TokenInfo[tok::string_literal ] |= aci_custom;
+ TokenInfo[tok::wide_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf8_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf16_string_literal] |= aci_custom;
+ TokenInfo[tok::utf32_string_literal] |= aci_custom;
+ TokenInfo[tok::char_constant ] |= aci_custom;
+ TokenInfo[tok::wide_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf16_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf32_char_constant ] |= aci_custom;
+ }
+
// These tokens change behavior if followed by an '='.
TokenInfo[tok::amp ] |= aci_avoid_equal; // &=
TokenInfo[tok::plus ] |= aci_avoid_equal; // +=
@@ -179,12 +192,32 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
switch (PrevKind) {
default:
llvm_unreachable("InitAvoidConcatTokenInfo built wrong");
- return true;
case tok::raw_identifier:
llvm_unreachable("tok::raw_identifier in non-raw lexing mode!");
- return true;
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::char_constant:
+ case tok::wide_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ if (!PP.getLangOpts().CPlusPlus0x)
+ return false;
+
+ // In C++11, a string or character literal followed by an identifier is a
+ // single token.
+ if (Tok.getIdentifierInfo())
+ return true;
+
+ // A ud-suffix is an identifier. If the previous token ends with one, treat
+ // it as an identifier.
+ if (!PrevTok.hasUDSuffix())
+ return false;
+ // FALL THROUGH.
case tok::identifier: // id+id or id+number or id+L"foo".
// id+'.'... will not append.
if (Tok.is(tok::numeric_constant))
@@ -203,13 +236,15 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
// Otherwise, this is a narrow character or string. If the *identifier*
// is a literal 'L', 'u8', 'u' or 'U', avoid pasting L "foo" -> L"foo".
return IsIdentifierStringPrefix(PrevTok);
+
case tok::numeric_constant:
return isalnum(FirstChar) || Tok.is(tok::numeric_constant) ||
- FirstChar == '+' || FirstChar == '-' || FirstChar == '.';
+ FirstChar == '+' || FirstChar == '-' || FirstChar == '.' ||
+ (PP.getLangOpts().CPlusPlus0x && FirstChar == '_');
case tok::period: // ..., .*, .1234
return (FirstChar == '.' && PrevPrevTok.is(tok::period)) ||
isdigit(FirstChar) ||
- (PP.getLangOptions().CPlusPlus && FirstChar == '*');
+ (PP.getLangOpts().CPlusPlus && FirstChar == '*');
case tok::amp: // &&
return FirstChar == '&';
case tok::plus: // ++
@@ -228,10 +263,10 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
return FirstChar == '>' || FirstChar == ':';
case tok::colon: // ::, :>
return FirstChar == '>' ||
- (PP.getLangOptions().CPlusPlus && FirstChar == ':');
+ (PP.getLangOpts().CPlusPlus && FirstChar == ':');
case tok::hash: // ##, #@, %:%:
return FirstChar == '#' || FirstChar == '@' || FirstChar == '%';
case tok::arrow: // ->*
- return PP.getLangOptions().CPlusPlus && FirstChar == '*';
+ return PP.getLangOpts().CPlusPlus && FirstChar == '*';
}
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
index a580544..696754c 100644
--- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -17,7 +17,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/LexDiagnostic.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -450,7 +450,7 @@ void TokenLexer::Lex(Token &Tok) {
/// are more ## after it, chomp them iteratively. Return the result as Tok.
/// If this returns true, the caller should immediately return the token.
bool TokenLexer::PasteTokens(Token &Tok) {
- llvm::SmallString<128> Buffer;
+ SmallString<128> Buffer;
const char *ResultTokStrPtr = 0;
SourceLocation StartLoc = Tok.getLocation();
SourceLocation PasteOpLoc;
@@ -527,7 +527,7 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Make a lexer to lex this string from. Lex just this one token.
// Make a lexer object so that we lex and expand the paste result.
Lexer TL(SourceMgr.getLocForStartOfFile(LocFileID),
- PP.getLangOptions(), ScratchBufStart,
+ PP.getLangOpts(), ScratchBufStart,
ResultTokStrPtr, ResultTokStrPtr+LHSLen+RHSLen);
// Lex a token in raw mode. This way it won't look up identifiers
@@ -546,14 +546,14 @@ bool TokenLexer::PasteTokens(Token &Tok) {
if (isInvalid) {
// Test for the Microsoft extension of /##/ turning into // here on the
// error path.
- if (PP.getLangOptions().MicrosoftExt && Tok.is(tok::slash) &&
+ if (PP.getLangOpts().MicrosoftExt && Tok.is(tok::slash) &&
RHS.is(tok::slash)) {
HandleMicrosoftCommentPaste(Tok);
return true;
}
// Do not emit the error when preprocessing assembler code.
- if (!PP.getLangOptions().AsmPreprocessor) {
+ if (!PP.getLangOpts().AsmPreprocessor) {
// Explicitly convert the token location to have proper expansion
// information so that the user knows where it came from.
SourceManager &SM = PP.getSourceManager();
@@ -563,7 +563,7 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// error to a warning that defaults to an error. This allows
// disabling it.
PP.Diag(Loc,
- PP.getLangOptions().MicrosoftExt ? diag::err_pp_bad_paste_ms
+ PP.getLangOpts().MicrosoftExt ? diag::err_pp_bad_paste_ms
: diag::err_pp_bad_paste)
<< Buffer.str();
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
index fdd7d0f..d1c2624 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -38,23 +38,24 @@ using namespace clang;
void clang::ParseAST(Preprocessor &PP, ASTConsumer *Consumer,
ASTContext &Ctx, bool PrintStats,
TranslationUnitKind TUKind,
- CodeCompleteConsumer *CompletionConsumer) {
+ CodeCompleteConsumer *CompletionConsumer,
+ bool SkipFunctionBodies) {
- llvm::OwningPtr<Sema> S(new Sema(PP, Ctx, *Consumer,
+ OwningPtr<Sema> S(new Sema(PP, Ctx, *Consumer,
TUKind,
CompletionConsumer));
// Recover resources if we crash before exiting this method.
- llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleaupSema(S.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(S.get());
- ParseAST(*S.get(), PrintStats);
+ ParseAST(*S.get(), PrintStats, SkipFunctionBodies);
}
-void clang::ParseAST(Sema &S, bool PrintStats) {
+void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
// Collect global stats on Decls/Stmts (until we have a module streamer).
if (PrintStats) {
- Decl::CollectingStats(true);
- Stmt::CollectingStats(true);
+ Decl::EnableStatistics();
+ Stmt::EnableStatistics();
}
// Also turn on collection of stats inside of the Sema object.
@@ -63,14 +64,15 @@ void clang::ParseAST(Sema &S, bool PrintStats) {
ASTConsumer *Consumer = &S.getASTConsumer();
- llvm::OwningPtr<Parser> ParseOP(new Parser(S.getPreprocessor(), S));
+ OwningPtr<Parser> ParseOP(new Parser(S.getPreprocessor(), S,
+ SkipFunctionBodies));
Parser &P = *ParseOP.get();
PrettyStackTraceParserEntry CrashInfo(P);
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<Parser>
- CleaupParser(ParseOP.get());
+ CleanupParser(ParseOP.get());
S.getPreprocessor().EnterMainSourceFile();
P.Initialize();
@@ -79,18 +81,23 @@ void clang::ParseAST(Sema &S, bool PrintStats) {
if (ExternalASTSource *External = S.getASTContext().getExternalSource())
External->StartTranslationUnit(Consumer);
+ bool Abort = false;
Parser::DeclGroupPtrTy ADecl;
while (!P.ParseTopLevelDecl(ADecl)) { // Not end of file.
// If we got a null return and something *was* parsed, ignore it. This
// is due to a top-level semicolon, an action override, or a parse error
// skipping something.
- if (ADecl)
- Consumer->HandleTopLevelDecl(ADecl.get());
+ if (ADecl) {
+ if (!Consumer->HandleTopLevelDecl(ADecl.get())) {
+ Abort = true;
+ break;
+ }
+ }
};
- // Check for any pending objective-c implementation decl.
- while ((ADecl = P.FinishPendingObjCActions()))
- Consumer->HandleTopLevelDecl(ADecl.get());
+
+ if (Abort)
+ return;
// Process any TopLevelDecls generated by #pragma weak.
for (SmallVector<Decl*,2>::iterator
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
index b387e9e..c000f69 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -24,8 +24,10 @@ using namespace clang;
Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo,
- const VirtSpecifiers& VS, ExprResult& Init) {
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS,
+ FunctionDefinitionKind DefinitionKind,
+ ExprResult& Init) {
assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try) ||
Tok.is(tok::equal)) &&
@@ -36,20 +38,20 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
Decl *FnD;
- D.setFunctionDefinition(true);
+ D.setFunctionDefinitionKind(DefinitionKind);
if (D.getDeclSpec().isFriendSpecified())
FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D,
move(TemplateParams));
else {
FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
move(TemplateParams), 0,
- VS, /*HasInit=*/false);
+ VS, /*HasDeferredInit=*/false);
if (FnD) {
Actions.ProcessDeclAttributeList(getCurScope(), FnD, AccessAttrs,
false, true);
bool TypeSpecContainsAuto
= D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
- if (Init.get())
+ if (Init.isUsable())
Actions.AddInitializerToDecl(FnD, Init.get(), false,
TypeSpecContainsAuto);
else
@@ -64,18 +66,25 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
if (Tok.is(tok::equal)) {
ConsumeToken();
+ if (!FnD) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
bool Delete = false;
SourceLocation KWLoc;
if (Tok.is(tok::kw_delete)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::warn_deleted_function_accepted_as_extension);
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_deleted_function :
+ diag::ext_deleted_function);
KWLoc = ConsumeToken();
Actions.SetDeclDeleted(FnD, KWLoc);
Delete = true;
} else if (Tok.is(tok::kw_default)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::warn_defaulted_function_accepted_as_extension);
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_defaulted_function :
+ diag::ext_defaulted_function);
KWLoc = ConsumeToken();
Actions.SetDeclDefaulted(FnD, KWLoc);
@@ -98,15 +107,13 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
// In delayed template parsing mode, if we are within a class template
// or if we are about to parse function member template then consume
// the tokens and store them for parsing at the end of the translation unit.
- if (getLang().DelayedTemplateParsing &&
+ if (getLangOpts().DelayedTemplateParsing &&
((Actions.CurContext->isDependentContext() ||
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) &&
- !Actions.IsInsideALocalClassWithinATemplateFunction()) &&
- !D.getDeclSpec().isFriendSpecified()) {
+ !Actions.IsInsideALocalClassWithinATemplateFunction())) {
if (FnD) {
- LateParsedTemplatedFunction *LPT =
- new LateParsedTemplatedFunction(this, FnD);
+ LateParsedTemplatedFunction *LPT = new LateParsedTemplatedFunction(FnD);
FunctionDecl *FD = 0;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(FnD))
@@ -138,16 +145,14 @@ Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
// function body.
if (ConsumeAndStoreFunctionPrologue(Toks)) {
// We didn't find the left-brace we expected after the
- // constructor initializer.
- if (Tok.is(tok::semi)) {
- // We found a semicolon; complain, consume the semicolon, and
- // don't try to parse this method later.
- Diag(Tok.getLocation(), diag::err_expected_lbrace);
- ConsumeAnyToken();
- delete getCurrentClass().LateParsedDeclarations.back();
- getCurrentClass().LateParsedDeclarations.pop_back();
- return FnD;
- }
+ // constructor initializer; we already printed an error, and it's likely
+ // impossible to recover, so don't try to parse this method later.
+ // If we stopped at a semicolon, consume it to avoid an extra warning.
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
+ return FnD;
} else {
// Consume everything up to (and including) the matching right brace.
ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
@@ -188,7 +193,7 @@ void Parser::ParseCXXNonStaticMemberInitializer(Decl *VarD) {
tok::TokenKind kind = Tok.getKind();
if (kind == tok::equal) {
Toks.push_back(Tok);
- ConsumeAnyToken();
+ ConsumeToken();
}
if (kind == tok::l_brace) {
@@ -290,7 +295,8 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
Scope::FunctionPrototypeScope|Scope::DeclScope);
for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
// Introduce the parameter into scope.
- Actions.ActOnDelayedCXXMethodParameter(getCurScope(), LM.DefaultArgs[I].Param);
+ Actions.ActOnDelayedCXXMethodParameter(getCurScope(),
+ LM.DefaultArgs[I].Param);
if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
// Save the current token position.
@@ -310,9 +316,15 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// The argument isn't actually potentially evaluated unless it is
// used.
EnterExpressionEvaluationContext Eval(Actions,
- Sema::PotentiallyEvaluatedIfUsed);
-
- ExprResult DefArgResult(ParseAssignmentExpression());
+ Sema::PotentiallyEvaluatedIfUsed,
+ LM.DefaultArgs[I].Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
if (DefArgResult.isInvalid())
Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
else {
@@ -469,7 +481,8 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
ConsumeAnyToken();
SourceLocation EqualLoc;
- ExprResult Init = ParseCXXMemberInitializer(/*IsFunction=*/false, EqualLoc);
+ ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false,
+ EqualLoc);
Actions.ActOnCXXInClassMemberInitializer(MI.Field, EqualLoc, Init.release());
@@ -596,6 +609,7 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
Toks.push_back(Tok);
ConsumeToken();
}
+ bool ReadInitializer = false;
if (Tok.is(tok::colon)) {
// Initializers can contain braces too.
Toks.push_back(Tok);
@@ -603,37 +617,52 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
while (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) {
if (Tok.is(tok::eof) || Tok.is(tok::semi))
- return true;
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace);
// Grab the identifier.
if (!ConsumeAndStoreUntil(tok::l_paren, tok::l_brace, Toks,
/*StopAtSemi=*/true,
/*ConsumeFinalToken=*/false))
- return true;
+ return Diag(Tok.getLocation(), diag::err_expected_lparen);
tok::TokenKind kind = Tok.getKind();
Toks.push_back(Tok);
- if (kind == tok::l_paren)
+ bool IsLParen = (kind == tok::l_paren);
+ SourceLocation LOpen = Tok.getLocation();
+
+ if (IsLParen) {
ConsumeParen();
- else {
+ } else {
assert(kind == tok::l_brace && "Must be left paren or brace here.");
ConsumeBrace();
// In C++03, this has to be the start of the function body, which
- // means the initializer is malformed.
- if (!getLang().CPlusPlus0x)
+ // means the initializer is malformed; we'll diagnose it later.
+ if (!getLangOpts().CPlusPlus0x)
return false;
}
// Grab the initializer
- if (!ConsumeAndStoreUntil(kind == tok::l_paren ? tok::r_paren :
- tok::r_brace,
- Toks, /*StopAtSemi=*/true))
+ if (!ConsumeAndStoreUntil(IsLParen ? tok::r_paren : tok::r_brace,
+ Toks, /*StopAtSemi=*/true)) {
+ Diag(Tok, IsLParen ? diag::err_expected_rparen :
+ diag::err_expected_rbrace);
+ Diag(LOpen, diag::note_matching) << (IsLParen ? "(" : "{");
return true;
+ }
+
+ // Grab pack ellipsis, if present
+ if (Tok.is(tok::ellipsis)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
// Grab the separating comma, if any.
if (Tok.is(tok::comma)) {
Toks.push_back(Tok);
ConsumeToken();
+ } else if (Tok.isNot(tok::l_brace)) {
+ ReadInitializer = true;
+ break;
}
}
}
@@ -641,11 +670,14 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
// Grab any remaining garbage to be diagnosed later. We stop when we reach a
// brace: an opening one is the function body, while a closing one probably
// means we've reached the end of the class.
- if (!ConsumeAndStoreUntil(tok::l_brace, tok::r_brace, Toks,
- /*StopAtSemi=*/true, /*ConsumeFinalToken=*/false))
- return true;
- if(Tok.isNot(tok::l_brace))
- return true;
+ ConsumeAndStoreUntil(tok::l_brace, tok::r_brace, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false);
+ if (Tok.isNot(tok::l_brace)) {
+ if (ReadInitializer)
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace);
+ }
Toks.push_back(Tok);
ConsumeBrace();
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
index 2aa178f..cf3dca2 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -19,6 +19,7 @@
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "RAIIObjectsForParser.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -35,9 +36,11 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
Declarator::TheContext Context,
AccessSpecifier AS,
Decl **OwnedType) {
+ DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
+
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
- ParseSpecifierQualifierList(DS, AS);
+ ParseSpecifierQualifierList(DS, AS, DSC);
if (OwnedType)
*OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : 0;
@@ -94,6 +97,8 @@ static bool isAttributeLateParsed(const IdentifierInfo &II) {
/// which is followed by a comma or close parenthesis, then the arguments
/// start with that identifier; otherwise they are an expression list."
///
+/// GCC does not require the ',' between attribs in an attribute-list.
+///
/// At the moment, I am not doing 2 token lookahead. I am also unaware of
/// any attributes that don't work (based on my limited testing). Most
/// attributes are very simple in practice. Until we find a bug, I don't see
@@ -129,14 +134,15 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
if (Tok.is(tok::l_paren)) {
// handle "parameterized" attributes
- if (LateAttrs && !ClassStack.empty() &&
- isAttributeLateParsed(*AttrName)) {
- // Delayed parsing is only available for attributes that occur
- // in certain locations within a class scope.
+ if (LateAttrs && isAttributeLateParsed(*AttrName)) {
LateParsedAttribute *LA =
new LateParsedAttribute(this, *AttrName, AttrNameLoc);
LateAttrs->push_back(LA);
- getCurrentClass().LateParsedDeclarations.push_back(LA);
+
+ // Attributes in a class are parsed at the end of the class, along
+ // with other late-parsed declarations.
+ if (!ClassStack.empty())
+ getCurrentClass().LateParsedDeclarations.push_back(LA);
// consume everything up to and including the matching right parens
ConsumeAndStoreUntil(tok::r_paren, LA->Toks, true, false);
@@ -187,107 +193,89 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
ConsumeParen(); // ignore the left paren loc for now
- if (Tok.is(tok::identifier)) {
- IdentifierInfo *ParmName = Tok.getIdentifierInfo();
- SourceLocation ParmLoc = ConsumeToken();
+ IdentifierInfo *ParmName = 0;
+ SourceLocation ParmLoc;
+ bool BuiltinType = false;
- if (Tok.is(tok::r_paren)) {
- // __attribute__(( mode(byte) ))
- SourceLocation RParen = ConsumeParen();
- Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
- ParmName, ParmLoc, 0, 0);
- } else if (Tok.is(tok::comma)) {
+ switch (Tok.getKind()) {
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw_typeof:
+ // __attribute__(( vec_type_hint(char) ))
+ // FIXME: Don't just discard the builtin type token.
+ ConsumeToken();
+ BuiltinType = true;
+ break;
+
+ case tok::identifier:
+ ParmName = Tok.getIdentifierInfo();
+ ParmLoc = ConsumeToken();
+ break;
+
+ default:
+ break;
+ }
+
+ ExprVector ArgExprs(Actions);
+
+ if (!BuiltinType &&
+ (ParmLoc.isValid() ? Tok.is(tok::comma) : Tok.isNot(tok::r_paren))) {
+ // Eat the comma.
+ if (ParmLoc.isValid())
ConsumeToken();
- // __attribute__(( format(printf, 1, 2) ))
- ExprVector ArgExprs(Actions);
- bool ArgExprsOk = true;
-
- // now parse the non-empty comma separated list of expressions
- while (1) {
- ExprResult ArgExpr(ParseAssignmentExpression());
- if (ArgExpr.isInvalid()) {
- ArgExprsOk = false;
- SkipUntil(tok::r_paren);
- break;
- } else {
- ArgExprs.push_back(ArgExpr.release());
- }
- if (Tok.isNot(tok::comma))
- break;
- ConsumeToken(); // Eat the comma, move to the next argument
- }
- if (ArgExprsOk && Tok.is(tok::r_paren)) {
- SourceLocation RParen = ConsumeParen();
- Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
- ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size());
+
+ // Parse the non-empty comma-separated list of expressions.
+ while (1) {
+ ExprResult ArgExpr(ParseAssignmentExpression());
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return;
}
+ ArgExprs.push_back(ArgExpr.release());
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // Eat the comma, move to the next argument
}
- } else { // not an identifier
- switch (Tok.getKind()) {
- case tok::r_paren: {
- // parse a possibly empty comma separated list of expressions
- // __attribute__(( nonnull() ))
- SourceLocation RParen = ConsumeParen();
- Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
- 0, SourceLocation(), 0, 0);
- break;
- }
- case tok::kw_char:
- case tok::kw_wchar_t:
- case tok::kw_char16_t:
- case tok::kw_char32_t:
- case tok::kw_bool:
- case tok::kw_short:
- case tok::kw_int:
- case tok::kw_long:
- case tok::kw___int64:
- case tok::kw_signed:
- case tok::kw_unsigned:
- case tok::kw_float:
- case tok::kw_double:
- case tok::kw_void:
- case tok::kw_typeof: {
- // If it's a builtin type name, eat it and expect a rparen
- // __attribute__(( vec_type_hint(char) ))
- SourceLocation EndLoc = ConsumeToken();
- if (Tok.is(tok::r_paren))
- EndLoc = ConsumeParen();
- AttributeList *attr
- = Attrs.addNew(AttrName, SourceRange(AttrNameLoc, EndLoc), 0,
- AttrNameLoc, 0, SourceLocation(), 0, 0);
- if (attr->getKind() == AttributeList::AT_IBOutletCollection)
- Diag(Tok, diag::err_iboutletcollection_builtintype);
- break;
- }
- default:
- // __attribute__(( aligned(16) ))
- ExprVector ArgExprs(Actions);
- bool ArgExprsOk = true;
-
- // now parse the list of expressions
- while (1) {
- ExprResult ArgExpr(ParseAssignmentExpression());
- if (ArgExpr.isInvalid()) {
- ArgExprsOk = false;
- SkipUntil(tok::r_paren);
+ }
+ else if (Tok.is(tok::less) && AttrName->isStr("iboutletcollection")) {
+ if (!ExpectAndConsume(tok::less, diag::err_expected_less_after, "<",
+ tok::greater)) {
+ while (Tok.is(tok::identifier)) {
+ ConsumeToken();
+ if (Tok.is(tok::greater))
break;
- } else {
- ArgExprs.push_back(ArgExpr.release());
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
}
- if (Tok.isNot(tok::comma))
- break;
- ConsumeToken(); // Eat the comma, move to the next argument
- }
- // Match the ')'.
- if (ArgExprsOk && Tok.is(tok::r_paren)) {
- SourceLocation RParen = ConsumeParen();
- Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0,
- AttrNameLoc, 0, SourceLocation(),
- ArgExprs.take(), ArgExprs.size());
}
- break;
+ if (Tok.isNot(tok::greater))
+ Diag(Tok, diag::err_iboutletcollection_with_protocol);
+ SkipUntil(tok::r_paren, false, true); // skip until ')'
}
}
+
+ SourceLocation RParen = Tok.getLocation();
+ if (!ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+ AttributeList *attr =
+ Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
+ ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size());
+ if (BuiltinType && attr->getKind() == AttributeList::AT_iboutletcollection)
+ Diag(Tok, diag::err_iboutletcollection_builtintype);
+ }
}
@@ -450,7 +438,7 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
// are stored in the numeric constant. We utilize a quirk of the
// lexer, which is that it handles something like 1.2.3 as a single
// numeric constant, rather than two separate tokens.
- llvm::SmallString<512> Buffer;
+ SmallString<512> Buffer;
Buffer.resize(Tok.getLength()+1);
const char *ThisTokBegin = &Buffer[0];
@@ -539,7 +527,7 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
/// \brief Parse the contents of the "availability" attribute.
///
/// availability-attribute:
-/// 'availability' '(' platform ',' version-arg-list ')'
+/// 'availability' '(' platform ',' version-arg-list, opt-message')'
///
/// platform:
/// identifier
@@ -551,8 +539,10 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
/// version-arg:
/// 'introduced' '=' version
/// 'deprecated' '=' version
-/// 'removed' = version
+/// 'obsoleted' = version
/// 'unavailable'
+/// opt-message:
+/// 'message' '=' <string>
void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
@@ -562,6 +552,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
+ ExprResult MessageExpr;
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -590,6 +581,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
Ident_deprecated = PP.getIdentifierInfo("deprecated");
Ident_obsoleted = PP.getIdentifierInfo("obsoleted");
Ident_unavailable = PP.getIdentifierInfo("unavailable");
+ Ident_message = PP.getIdentifierInfo("message");
}
// Parse the set of introductions/deprecations/removals.
@@ -616,7 +608,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
ConsumeToken();
continue;
}
-
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_equal_after)
<< Keyword;
@@ -624,6 +616,15 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
return;
}
ConsumeToken();
+ if (Keyword == Ident_message) {
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ MessageExpr = ParseStringLiteralExpression();
+ break;
+ }
SourceRange VersionRange;
VersionTuple Version = ParseVersionTuple(VersionRange);
@@ -694,12 +695,13 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
// Record this attribute
attrs.addNew(&Availability,
SourceRange(AvailabilityLoc, T.getCloseLocation()),
- 0, SourceLocation(),
+ 0, AvailabilityLoc,
Platform, PlatformLoc,
Changes[Introduced],
Changes[Deprecated],
Changes[Obsoleted],
- UnavailableLoc, false, false);
+ UnavailableLoc, MessageExpr.take(),
+ false, false);
}
@@ -713,7 +715,7 @@ void Parser::LateParsedClass::ParseLexedAttributes() {
}
void Parser::LateParsedAttribute::ParseLexedAttributes() {
- Self->ParseLexedAttribute(*this);
+ Self->ParseLexedAttribute(*this, true, false);
}
/// Wrapper class which calls ParseLexedAttribute, after setting up the
@@ -733,17 +735,39 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) {
ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
+ // Enter the scope of nested classes
+ if (!AlreadyHasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i) {
Class.LateParsedDeclarations[i]->ParseLexedAttributes();
}
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+}
+
+
+/// \brief Parse all attributes in LAs, and attach them to Decl D.
+void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition) {
+ for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
+ LAs[i]->addDecl(D);
+ ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
+ }
+ LAs.clear();
}
+
/// \brief Finish parsing an attribute for which parsing was delayed.
/// This will be called at the end of parsing a class declaration
/// for each LateParsedAttribute. We consume the saved tokens and
/// create an attribute with the arguments filled in. We add this
/// to the Attribute list for the decl.
-void Parser::ParseLexedAttribute(LateParsedAttribute &LA) {
+void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition) {
// Save the current token position.
SourceLocation OrigLoc = Tok.getLocation();
@@ -754,35 +778,49 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA) {
// Consume the previously pushed token.
ConsumeAnyToken();
+ if (OnDefinition && !IsThreadSafetyAttribute(LA.AttrName.getName())) {
+ Diag(Tok, diag::warn_attribute_on_function_definition)
+ << LA.AttrName.getName();
+ }
+
ParsedAttributes Attrs(AttrFactory);
SourceLocation endLoc;
- // If the Decl is templatized, add template parameters to scope.
- bool HasTemplateScope = LA.D && LA.D->isTemplateDecl();
- ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
- if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(Actions.CurScope, LA.D);
+ if (LA.Decls.size() == 1) {
+ Decl *D = LA.Decls[0];
- // If the Decl is on a function, add function parameters to the scope.
- bool HasFunctionScope = LA.D && LA.D->isFunctionOrFunctionTemplate();
- ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunctionScope);
- if (HasFunctionScope)
- Actions.ActOnReenterFunctionContext(Actions.CurScope, LA.D);
+ // If the Decl is templatized, add template parameters to scope.
+ bool HasTemplateScope = EnterScope && D->isTemplateDecl();
+ ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunctionScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunctionScope);
+ if (HasFunctionScope)
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
- if (HasFunctionScope) {
- Actions.ActOnExitFunctionContext();
- FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
- }
- if (HasTemplateScope) {
- TempScope.Exit();
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
+
+ if (HasFunctionScope) {
+ Actions.ActOnExitFunctionContext();
+ FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
+ }
+ if (HasTemplateScope) {
+ TempScope.Exit();
+ }
+ } else if (LA.Decls.size() > 0) {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
+ } else {
+ Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
}
- // Late parsed attributes must be attached to Decls by hand. If the
- // LA.D is not set, then this was not done properly.
- assert(LA.D && "No decl attached to late parsed attribute");
- Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.D, Attrs);
+ for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i) {
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
+ }
if (Tok.getLocation() != OrigLoc) {
// Due to a parsing error, we either went over the cached tokens or
@@ -792,7 +830,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA) {
if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
OrigLoc))
while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
- ConsumeAnyToken();
+ ConsumeAnyToken();
}
}
@@ -846,7 +884,7 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
bool ArgExprsOk = true;
// now parse the list of expressions
- while (1) {
+ while (Tok.isNot(tok::r_paren)) {
ExprResult ArgExpr(ParseAssignmentExpression());
if (ArgExpr.isInvalid()) {
ArgExprsOk = false;
@@ -868,6 +906,40 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
*EndLoc = T.getCloseLocation();
}
+/// DiagnoseProhibitedCXX11Attribute - We have found the opening square brackets
+/// of a C++11 attribute-specifier in a location where an attribute is not
+/// permitted. By C++11 [dcl.attr.grammar]p6, this is ill-formed. Diagnose this
+/// situation.
+///
+/// \return \c true if we skipped an attribute-like chunk of tokens, \c false if
+/// this doesn't appear to actually be an attribute-specifier, and the caller
+/// should try to parse it.
+bool Parser::DiagnoseProhibitedCXX11Attribute() {
+ assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square));
+
+ switch (isCXX11AttributeSpecifier(/*Disambiguate*/true)) {
+ case CAK_NotAttributeSpecifier:
+ // No diagnostic: we're in Obj-C++11 and this is not actually an attribute.
+ return false;
+
+ case CAK_InvalidAttributeSpecifier:
+ Diag(Tok.getLocation(), diag::err_l_square_l_square_not_attribute);
+ return false;
+
+ case CAK_AttributeSpecifier:
+ // Parse and discard the attributes.
+ SourceLocation BeginLoc = ConsumeBracket();
+ ConsumeBracket();
+ SkipUntil(tok::r_square, /*StopAtSemi*/ false);
+ assert(Tok.is(tok::r_square) && "isCXX11AttributeSpecifier lied");
+ SourceLocation EndLoc = ConsumeBracket();
+ Diag(BeginLoc, diag::err_attributes_not_allowed)
+ << SourceRange(BeginLoc, EndLoc);
+ return true;
+ }
+ llvm_unreachable("All cases handled above.");
+}
+
void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
Diag(attrs.Range.getBegin(), diag::err_attributes_not_allowed)
<< attrs.Range;
@@ -886,7 +958,7 @@ void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
/// [C++] namespace-definition
/// [C++] using-directive
/// [C++] using-declaration
-/// [C++0x/C1X] static_assert-declaration
+/// [C++0x/C11] static_assert-declaration
/// others... [FIXME]
///
Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
@@ -908,7 +980,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
break;
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
- if (getLang().CPlusPlus && NextToken().is(tok::kw_namespace)) {
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
ProhibitAttributes(attrs);
SourceLocation InlineLoc = ConsumeToken();
SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
@@ -965,10 +1037,7 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none,
getDeclSpecContextFromDeclaratorContext(Context));
- StmtResult R = Actions.ActOnVlaStmt(DS);
- if (R.isUsable())
- Stmts.push_back(R.release());
-
+
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
@@ -982,6 +1051,133 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
return ParseDeclGroup(DS, Context, /*FunctionDefs=*/ false, &DeclEnd, FRI);
}
+/// Returns true if this might be the start of a declarator, or a common typo
+/// for a declarator.
+bool Parser::MightBeDeclarator(unsigned Context) {
+ switch (Tok.getKind()) {
+ case tok::annot_cxxscope:
+ case tok::annot_template_id:
+ case tok::caret:
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::ellipsis:
+ case tok::kw___attribute:
+ case tok::kw_operator:
+ case tok::l_paren:
+ case tok::star:
+ return true;
+
+ case tok::amp:
+ case tok::ampamp:
+ return getLangOpts().CPlusPlus;
+
+ case tok::l_square: // Might be an attribute on an unnamed bit-field.
+ return Context == Declarator::MemberContext && getLangOpts().CPlusPlus0x &&
+ NextToken().is(tok::l_square);
+
+ case tok::colon: // Might be a typo for '::' or an unnamed bit-field.
+ return Context == Declarator::MemberContext || getLangOpts().CPlusPlus;
+
+ case tok::identifier:
+ switch (NextToken().getKind()) {
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::comma:
+ case tok::equal:
+ case tok::equalequal: // Might be a typo for '='.
+ case tok::kw_alignas:
+ case tok::kw_asm:
+ case tok::kw___attribute:
+ case tok::l_brace:
+ case tok::l_paren:
+ case tok::l_square:
+ case tok::less:
+ case tok::r_brace:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::semi:
+ return true;
+
+ case tok::colon:
+ // At namespace scope, 'identifier:' is probably a typo for 'identifier::'
+ // and in block scope it's probably a label. Inside a class definition,
+ // this is a bit-field.
+ return Context == Declarator::MemberContext ||
+ (getLangOpts().CPlusPlus && Context == Declarator::FileContext);
+
+ case tok::identifier: // Possible virt-specifier.
+ return getLangOpts().CPlusPlus0x && isCXX0XVirtSpecifier(NextToken());
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+/// Skip until we reach something which seems like a sensible place to pick
+/// up parsing after a malformed declaration. This will sometimes stop sooner
+/// than SkipUntil(tok::r_brace) would, but will never stop later.
+void Parser::SkipMalformedDecl() {
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::l_brace:
+ // Skip until matching }, then stop. We've probably skipped over
+ // a malformed class or function definition or similar.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
+ if (Tok.is(tok::comma) || Tok.is(tok::l_brace) || Tok.is(tok::kw_try)) {
+ // This declaration isn't over yet. Keep skipping.
+ continue;
+ }
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+
+ case tok::l_square:
+ ConsumeBracket();
+ SkipUntil(tok::r_square, /*StopAtSemi*/false);
+ continue;
+
+ case tok::l_paren:
+ ConsumeParen();
+ SkipUntil(tok::r_paren, /*StopAtSemi*/false);
+ continue;
+
+ case tok::r_brace:
+ return;
+
+ case tok::semi:
+ ConsumeToken();
+ return;
+
+ case tok::kw_inline:
+ // 'inline namespace' at the start of a line is almost certainly
+ // a good place to pick back up parsing.
+ if (Tok.isAtStartOfLine() && NextToken().is(tok::kw_namespace))
+ return;
+ break;
+
+ case tok::kw_namespace:
+ // 'namespace' at the start of a line is almost certainly a good
+ // place to pick back up parsing.
+ if (Tok.isAtStartOfLine())
+ return;
+ break;
+
+ case tok::eof:
+ return;
+
+ default:
+ break;
+ }
+
+ ConsumeAnyToken();
+ }
+}
+
/// ParseDeclGroup - Having concluded that this is either a function
/// definition or a group of object declarations, actually parse the
/// result.
@@ -996,13 +1192,16 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Bail out if the first declarator didn't seem well-formed.
if (!D.hasName() && !D.mayOmitIdentifier()) {
- // Skip until ; or }.
- SkipUntil(tok::r_brace, true, true);
- if (Tok.is(tok::semi))
- ConsumeToken();
+ SkipMalformedDecl();
return DeclGroupPtrTy();
}
+ // Save late-parsed attributes for now; they need to be parsed in the
+ // appropriate function scope after the function Decl has been constructed.
+ LateParsedAttrList LateParsedAttrs;
+ if (D.isFunctionDeclarator())
+ MaybeParseGNUAttributes(D, &LateParsedAttrs);
+
// Check to see if we have a function *definition* which must have a body.
if (AllowFunctionDefinitions && D.isFunctionDeclarator() &&
// Look at the next token to make sure that this isn't a function
@@ -1018,7 +1217,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DS.ClearStorageClassSpecs();
}
- Decl *TheDecl = ParseFunctionDefinition(D);
+ Decl *TheDecl =
+ ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
@@ -1035,7 +1235,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
}
}
- if (ParseAttributesAfterDeclarator(D))
+ if (ParseAsmAttributesAfterDeclarator(D))
return DeclGroupPtrTy();
// C++0x [stmt.iter]p1: Check if we have a for-range-declarator. If so, we
@@ -1050,23 +1250,38 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
Decl *ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
Actions.ActOnCXXForRangeDecl(ThisDecl);
Actions.FinalizeDeclaration(ThisDecl);
+ D.complete(ThisDecl);
return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, &ThisDecl, 1);
}
SmallVector<Decl *, 8> DeclsInGroup;
Decl *FirstDecl = ParseDeclarationAfterDeclaratorAndAttributes(D);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, FirstDecl, true, false);
D.complete(FirstDecl);
if (FirstDecl)
DeclsInGroup.push_back(FirstDecl);
+ bool ExpectSemi = Context != Declarator::ForContext;
+
// If we don't have a comma, it is either the end of the list (a ';') or an
// error, bail out.
while (Tok.is(tok::comma)) {
- // Consume the comma.
- ConsumeToken();
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isAtStartOfLine() && ExpectSemi && !MightBeDeclarator(Context)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
// Parse the next declarator.
D.clear();
+ D.setCommaLoc(CommaLoc);
// Accept attributes in an init-declarator. In the first declarator in a
// declaration, these would be part of the declspec. In subsequent
@@ -1078,17 +1293,18 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
MaybeParseGNUAttributes(D);
ParseDeclarator(D);
-
- Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
- D.complete(ThisDecl);
- if (ThisDecl)
- DeclsInGroup.push_back(ThisDecl);
+ if (!D.isInvalidType()) {
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
+ D.complete(ThisDecl);
+ if (ThisDecl)
+ DeclsInGroup.push_back(ThisDecl);
+ }
}
if (DeclEnd)
*DeclEnd = Tok.getLocation();
- if (Context != Declarator::ForContext &&
+ if (ExpectSemi &&
ExpectAndConsume(tok::semi,
Context == Declarator::FileContext
? diag::err_invalid_token_after_toplevel_declarator
@@ -1110,7 +1326,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
/// Parse an optional simple-asm-expr and attributes, and attach them to a
/// declarator. Returns true on an error.
-bool Parser::ParseAttributesAfterDeclarator(Declarator &D) {
+bool Parser::ParseAsmAttributesAfterDeclarator(Declarator &D) {
// If a simple-asm-expr is present, parse it.
if (Tok.is(tok::kw_asm)) {
SourceLocation Loc;
@@ -1152,7 +1368,7 @@ bool Parser::ParseAttributesAfterDeclarator(Declarator &D) {
///
Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo) {
- if (ParseAttributesAfterDeclarator(D))
+ if (ParseAsmAttributesAfterDeclarator(D))
return 0;
return ParseDeclarationAfterDeclaratorAndAttributes(D, TemplateInfo);
@@ -1196,8 +1412,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
// Parse declarator '=' initializer.
- if (isTokenEqualOrMistypedEqualEqual(
- diag::err_invalid_equalequal_after_declarator)) {
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ if (isTokenEqualOrEqualTypo()) {
ConsumeToken();
if (Tok.is(tok::kw_delete)) {
if (D.isFunctionDeclarator())
@@ -1207,12 +1423,12 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
Diag(ConsumeToken(), diag::err_deleted_non_function);
} else if (Tok.is(tok::kw_default)) {
if (D.isFunctionDeclarator())
- Diag(Tok, diag::err_default_delete_in_multiple_declaration)
- << 1 /* delete */;
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members);
} else {
- if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
EnterScope(0);
Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
}
@@ -1225,7 +1441,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
ExprResult Init(ParseInitializer());
- if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
@@ -1245,7 +1461,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
ExprVector Exprs(Actions);
CommaLocsTy CommaLocs;
- if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
EnterScope(0);
Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
}
@@ -1253,7 +1469,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
if (ParseExpressionList(Exprs, CommaLocs)) {
SkipUntil(tok::r_paren);
- if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
@@ -1264,18 +1480,21 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
assert(!Exprs.empty() && Exprs.size()-1 == CommaLocs.size() &&
"Unexpected number of commas!");
- if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
- Actions.AddCXXDirectInitializerToDecl(ThisDecl, T.getOpenLocation(),
- move_arg(Exprs),
- T.getCloseLocation(),
- TypeContainsAuto);
+ ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
+ T.getCloseLocation(),
+ move_arg(Exprs));
+ Actions.AddInitializerToDecl(ThisDecl, Initializer.take(),
+ /*DirectInit=*/true, TypeContainsAuto);
}
- } else if (getLang().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
// Parse C++0x braced-init-list.
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
if (D.getCXXScopeSpec().isSet()) {
EnterScope(0);
Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
@@ -1309,17 +1528,24 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
/// type-qualifier specifier-qualifier-list[opt]
/// [GNU] attributes specifier-qualifier-list[opt]
///
-void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS) {
+void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
+ DeclSpecContext DSC) {
/// specifier-qualifier-list is a subset of declaration-specifiers. Just
/// parse declaration-specifiers and complain about extra stuff.
/// TODO: diagnose attribute-specifiers and alignment-specifiers.
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS);
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC);
// Validate declspec for type-name.
unsigned Specs = DS.getParsedSpecifiers();
- if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
- !DS.hasAttributes())
+ if (DSC == DSC_type_specifier && !DS.hasTypeSpecifier()) {
+ Diag(Tok, diag::err_expected_type);
+ DS.SetTypeSpecError();
+ } else if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
+ !DS.hasAttributes()) {
Diag(Tok, diag::err_typename_requires_specqual);
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ }
// Issue diagnostic and remove storage class if present.
if (Specs & DeclSpec::PQ_StorageClassSpecifier) {
@@ -1340,6 +1566,12 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS) {
Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
DS.ClearFunctionSpecs();
}
+
+ // Issue diagnostic and remove constexpr specfier if present.
+ if (DS.isConstexprSpecified()) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr);
+ DS.ClearConstexprSpec();
+ }
}
/// isValidAfterIdentifierInDeclaratorAfterDeclSpec - Return true if the
@@ -1378,7 +1610,7 @@ static bool isValidAfterIdentifierInDeclarator(const Token &T) {
///
bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS) {
+ AccessSpecifier AS, DeclSpecContext DSC) {
assert(Tok.is(tok::identifier) && "should have identifier");
SourceLocation Loc = Tok.getLocation();
@@ -1397,8 +1629,13 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
assert(!DS.hasTypeSpecifier() && "Type specifier checked above");
// Since we know that this either implicit int (which is rare) or an
- // error, we'd do lookahead to try to do better recovery.
- if (isValidAfterIdentifierInDeclarator(NextToken())) {
+ // error, do lookahead to try to do better recovery. This never applies within
+ // a type specifier.
+ // FIXME: Don't bail out here in languages with no implicit int (like
+ // C++ with no -fms-extensions). This is much more likely to be an undeclared
+ // type or typo than a use of implicit int.
+ if (DSC != DSC_type_specifier &&
+ isValidAfterIdentifierInDeclarator(NextToken())) {
// If this token is valid for implicit int, e.g. "static x = 4", then
// we just avoid eating the identifier, so it will be parsed as the
// identifier in the declarator.
@@ -1429,14 +1666,15 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
if (TagName) {
Diag(Loc, diag::err_use_of_tag_name_without_tag)
- << Tok.getIdentifierInfo() << TagName << getLang().CPlusPlus
+ << Tok.getIdentifierInfo() << TagName << getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(Tok.getLocation(),FixitTagName);
// Parse this as a tag as if the missing tag were present.
if (TagKind == tok::kw_enum)
- ParseEnumSpecifier(Loc, DS, TemplateInfo, AS);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSC_normal);
else
- ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS);
+ ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS,
+ /*EnteringContext*/ false, DSC_normal);
return true;
}
}
@@ -1470,9 +1708,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
}
// Mark this as an error.
- const char *PrevSpec;
- unsigned DiagID;
- DS.SetTypeSpecType(DeclSpec::TST_error, Loc, PrevSpec, DiagID);
+ DS.SetTypeSpecError();
DS.SetRangeEnd(Tok.getLocation());
ConsumeToken();
@@ -1493,6 +1729,8 @@ Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
return DSC_class;
if (Context == Declarator::FileContext)
return DSC_top_level;
+ if (Context == Declarator::TrailingReturnContext)
+ return DSC_trailing;
return DSC_normal;
}
@@ -1501,29 +1739,36 @@ Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
/// FIXME: Simply returns an alignof() expression if the argument is a
/// type. Ideally, the type should be propagated directly into Sema.
///
-/// [C1X/C++0x] type-id
-/// [C1X] constant-expression
-/// [C++0x] assignment-expression
-ExprResult Parser::ParseAlignArgument(SourceLocation Start) {
+/// [C11] type-id
+/// [C11] constant-expression
+/// [C++0x] type-id ...[opt]
+/// [C++0x] assignment-expression ...[opt]
+ExprResult Parser::ParseAlignArgument(SourceLocation Start,
+ SourceLocation &EllipsisLoc) {
+ ExprResult ER;
if (isTypeIdInParens()) {
- EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
SourceLocation TypeLoc = Tok.getLocation();
ParsedType Ty = ParseTypeName().get();
SourceRange TypeRange(Start, Tok.getLocation());
- return Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
- Ty.getAsOpaquePtr(), TypeRange);
+ ER = Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
+ Ty.getAsOpaquePtr(), TypeRange);
} else
- return ParseConstantExpression();
+ ER = ParseConstantExpression();
+
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ return ER;
}
/// ParseAlignmentSpecifier - Parse an alignment-specifier, and add the
/// attribute to Attrs.
///
/// alignment-specifier:
-/// [C1X] '_Alignas' '(' type-id ')'
-/// [C1X] '_Alignas' '(' constant-expression ')'
-/// [C++0x] 'alignas' '(' type-id ')'
-/// [C++0x] 'alignas' '(' assignment-expression ')'
+/// [C11] '_Alignas' '(' type-id ')'
+/// [C11] '_Alignas' '(' constant-expression ')'
+/// [C++0x] 'alignas' '(' type-id ...[opt] ')'
+/// [C++0x] 'alignas' '(' assignment-expression ...[opt] ')'
void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc) {
assert((Tok.is(tok::kw_alignas) || Tok.is(tok::kw__Alignas)) &&
@@ -1536,7 +1781,8 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
if (T.expectAndConsume(diag::err_expected_lparen))
return;
- ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation());
+ SourceLocation EllipsisLoc;
+ ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation(), EllipsisLoc);
if (ArgExpr.isInvalid()) {
SkipUntil(tok::r_paren);
return;
@@ -1546,6 +1792,12 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
if (endLoc)
*endLoc = T.getCloseLocation();
+ // FIXME: Handle pack-expansions here.
+ if (EllipsisLoc.isValid()) {
+ Diag(EllipsisLoc, diag::err_alignas_pack_exp_unsupported);
+ return;
+ }
+
ExprVector ArgExprs(Actions);
ArgExprs.push_back(ArgExpr.release());
Attrs.addNew(PP.getIdentifierInfo("aligned"), KWLoc, 0, KWLoc,
@@ -1557,7 +1809,7 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
/// storage-class-specifier declaration-specifiers[opt]
/// type-specifier declaration-specifiers[opt]
/// [C99] function-specifier declaration-specifiers[opt]
-/// [C1X] alignment-specifier declaration-specifiers[opt]
+/// [C11] alignment-specifier declaration-specifiers[opt]
/// [GNU] attributes declaration-specifiers[opt]
/// [Clang] '__module_private__' declaration-specifiers[opt]
///
@@ -1581,12 +1833,14 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS,
- DeclSpecContext DSContext) {
+ DeclSpecContext DSContext,
+ LateParsedAttrList *LateAttrs) {
if (DS.getSourceRange().isInvalid()) {
DS.SetRangeStart(Tok.getLocation());
DS.SetRangeEnd(Tok.getLocation());
}
+ bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
while (1) {
bool isInvalid = false;
const char *PrevSpec = 0;
@@ -1631,7 +1885,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
: Sema::PCC_Template;
else if (DSContext == DSC_class)
CCC = Sema::PCC_Class;
- else if (ObjCImpDecl)
+ else if (CurParsedObjCImpl)
CCC = Sema::PCC_ObjCImplementation;
Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
@@ -1755,6 +2009,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
Next.getLocation(),
getCurScope(), &SS,
false, false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
/*NonTrivialSourceInfo=*/true);
// If the referenced identifier is not a type, then this declspec is
@@ -1763,7 +2018,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// typename.
if (TypeRep == 0) {
ConsumeToken(); // Eat the scope spec so the identifier is current.
- if (ParseImplicitInt(DS, &SS, TemplateInfo, AS)) continue;
+ if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext)) continue;
goto DoneWithDeclSpec;
}
@@ -1798,7 +2053,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
// Objective-C interface.
- if (Tok.is(tok::less) && getLang().ObjC1)
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
ParseObjCProtocolQualifiers(DS);
continue;
@@ -1823,10 +2078,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
// typedef-name
+ case tok::kw_decltype:
case tok::identifier: {
// In C++, check to see if this is a scope specifier like foo::bar::, if
// so handle it as such. This is important for ctor parsing.
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (TryAnnotateCXXScopeToken(true)) {
if (!DS.hasTypeSpecifier())
DS.SetTypeSpecError();
@@ -1846,7 +2102,6 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (TryAltiVecToken(DS, Loc, PrevSpec, DiagID, isInvalid))
break;
- // It has to be available as a typedef too!
ParsedType TypeRep =
Actions.getTypeName(*Tok.getIdentifierInfo(),
Tok.getLocation(), getCurScope());
@@ -1854,13 +2109,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If this is not a typedef name, don't parse it as part of the declspec,
// it must be an implicit int or an error.
if (!TypeRep) {
- if (ParseImplicitInt(DS, 0, TemplateInfo, AS)) continue;
+ if (ParseImplicitInt(DS, 0, TemplateInfo, AS, DSContext)) continue;
goto DoneWithDeclSpec;
}
// If we're in a context where the identifier could be a class name,
// check whether this is a constructor declaration.
- if (getLang().CPlusPlus && DSContext == DSC_class &&
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -1876,7 +2131,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
// Objective-C interface.
- if (Tok.is(tok::less) && getLang().ObjC1)
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
ParseObjCProtocolQualifiers(DS);
// Need to support trailing type qualifiers (e.g. "id<p> const").
@@ -1896,7 +2151,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If we're in a context where the template-id could be a
// constructor name or specialization, check whether this is a
// constructor declaration.
- if (getLang().CPlusPlus && DSContext == DSC_class &&
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -1909,7 +2164,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// GNU attributes support.
case tok::kw___attribute:
- ParseGNUAttributes(DS.getAttributes());
+ ParseGNUAttributes(DS.getAttributes(), 0, LateAttrs);
continue;
// Microsoft declspec support.
@@ -1965,7 +2220,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
PrevSpec, DiagID);
break;
case tok::kw_auto:
- if (getLang().CPlusPlus0x) {
+ if (getLangOpts().CPlusPlus0x) {
if (isKnownToBeTypeSpecifier(GetLookAheadToken(1))) {
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
PrevSpec, DiagID);
@@ -2004,8 +2259,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// alignment-specifier
case tok::kw__Alignas:
- if (!getLang().C1X)
- Diag(Tok, diag::ext_c1x_alignas);
+ if (!getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_alignas);
ParseAlignmentSpecifier(DS.getAttributes());
continue;
@@ -2075,10 +2330,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
DiagID);
break;
- case tok::kw_half:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
- DiagID);
- break;
+ case tok::kw___int128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_half:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
+ DiagID);
+ break;
case tok::kw_float:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
DiagID);
@@ -2143,28 +2402,29 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw_union: {
tok::TokenKind Kind = Tok.getKind();
ConsumeToken();
- ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS);
+ ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS,
+ EnteringContext, DSContext);
continue;
}
// enum-specifier:
case tok::kw_enum:
ConsumeToken();
- ParseEnumSpecifier(Loc, DS, TemplateInfo, AS);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSContext);
continue;
// cv-qualifier:
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
// C++ typename-specifier:
@@ -2182,7 +2442,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParseTypeofSpecifier(DS);
continue;
- case tok::kw_decltype:
+ case tok::annot_decltype:
ParseDecltypeSpecifier(DS);
continue;
@@ -2196,7 +2456,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// OpenCL qualifiers:
case tok::kw_private:
- if (!getLang().OpenCL)
+ if (!getLangOpts().OpenCL)
goto DoneWithDeclSpec;
case tok::kw___private:
case tok::kw___global:
@@ -2212,7 +2472,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// GCC ObjC supports types like "<SomeProtocol>" as a synonym for
// "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
// but we support it.
- if (DS.hasTypeSpecifier() || !getLang().ObjC1)
+ if (DS.hasTypeSpecifier() || !getLangOpts().ObjC1)
goto DoneWithDeclSpec;
if (!ParseObjCProtocolQualifiers(DS))
@@ -2242,299 +2502,6 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
}
}
-/// ParseOptionalTypeSpecifier - Try to parse a single type-specifier. We
-/// primarily follow the C++ grammar with additions for C99 and GNU,
-/// which together subsume the C grammar. Note that the C++
-/// type-specifier also includes the C type-qualifier (for const,
-/// volatile, and C99 restrict). Returns true if a type-specifier was
-/// found (and parsed), false otherwise.
-///
-/// type-specifier: [C++ 7.1.5]
-/// simple-type-specifier
-/// class-specifier
-/// enum-specifier
-/// elaborated-type-specifier [TODO]
-/// cv-qualifier
-///
-/// cv-qualifier: [C++ 7.1.5.1]
-/// 'const'
-/// 'volatile'
-/// [C99] 'restrict'
-///
-/// simple-type-specifier: [ C++ 7.1.5.2]
-/// '::'[opt] nested-name-specifier[opt] type-name [TODO]
-/// '::'[opt] nested-name-specifier 'template' template-id [TODO]
-/// 'char'
-/// 'wchar_t'
-/// 'bool'
-/// 'short'
-/// 'int'
-/// 'long'
-/// 'signed'
-/// 'unsigned'
-/// 'float'
-/// 'double'
-/// 'void'
-/// [C99] '_Bool'
-/// [C99] '_Complex'
-/// [C99] '_Imaginary' // Removed in TC2?
-/// [GNU] '_Decimal32'
-/// [GNU] '_Decimal64'
-/// [GNU] '_Decimal128'
-/// [GNU] typeof-specifier
-/// [OBJC] class-name objc-protocol-refs[opt] [TODO]
-/// [OBJC] typedef-name objc-protocol-refs[opt] [TODO]
-/// [C++0x] 'decltype' ( expression )
-/// [AltiVec] '__vector'
-bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
- const char *&PrevSpec,
- unsigned &DiagID,
- const ParsedTemplateInfo &TemplateInfo,
- bool SuppressDeclarations) {
- SourceLocation Loc = Tok.getLocation();
-
- switch (Tok.getKind()) {
- case tok::identifier: // foo::bar
- // If we already have a type specifier, this identifier is not a type.
- if (DS.getTypeSpecType() != DeclSpec::TST_unspecified ||
- DS.getTypeSpecWidth() != DeclSpec::TSW_unspecified ||
- DS.getTypeSpecSign() != DeclSpec::TSS_unspecified)
- return false;
- // Check for need to substitute AltiVec keyword tokens.
- if (TryAltiVecToken(DS, Loc, PrevSpec, DiagID, isInvalid))
- break;
- // Fall through.
- case tok::kw_typename: // typename foo::bar
- // Annotate typenames and C++ scope specifiers. If we get one, just
- // recurse to handle whatever we get.
- if (TryAnnotateTypeOrScopeToken(/*EnteringContext=*/false,
- /*NeedType=*/true))
- return true;
- if (Tok.is(tok::identifier))
- return false;
- return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- TemplateInfo, SuppressDeclarations);
- case tok::coloncolon: // ::foo::bar
- if (NextToken().is(tok::kw_new) || // ::new
- NextToken().is(tok::kw_delete)) // ::delete
- return false;
-
- // Annotate typenames and C++ scope specifiers. If we get one, just
- // recurse to handle whatever we get.
- if (TryAnnotateTypeOrScopeToken(/*EnteringContext=*/false,
- /*NeedType=*/true))
- return true;
- return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- TemplateInfo, SuppressDeclarations);
-
- // simple-type-specifier:
- case tok::annot_typename: {
- if (ParsedType T = getTypeAnnotation(Tok)) {
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
- Tok.getAnnotationEndLoc(), PrevSpec,
- DiagID, T);
- } else
- DS.SetTypeSpecError();
- DS.SetRangeEnd(Tok.getAnnotationEndLoc());
- ConsumeToken(); // The typename
-
- // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
- // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
- // Objective-C interface. If we don't have Objective-C or a '<', this is
- // just a normal reference to a typedef name.
- if (Tok.is(tok::less) && getLang().ObjC1)
- ParseObjCProtocolQualifiers(DS);
-
- return true;
- }
-
- case tok::kw_short:
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_long:
- if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec,
- DiagID);
- else
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw___int64:
- isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw_signed:
- isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_unsigned:
- isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw__Complex:
- isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_complex, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw__Imaginary:
- isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_imaginary, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw_void:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_char:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_int:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_half:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_float:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_double:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_wchar_t:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_char16_t:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_char32_t:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec, DiagID);
- break;
- case tok::kw_bool:
- case tok::kw__Bool:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID);
- break;
- case tok::kw__Decimal32:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw__Decimal64:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw__Decimal128:
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec,
- DiagID);
- break;
- case tok::kw___vector:
- isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
- break;
- case tok::kw___pixel:
- isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
- break;
-
- // class-specifier:
- case tok::kw_class:
- case tok::kw_struct:
- case tok::kw_union: {
- tok::TokenKind Kind = Tok.getKind();
- ConsumeToken();
- ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS_none,
- SuppressDeclarations);
- return true;
- }
-
- // enum-specifier:
- case tok::kw_enum:
- ConsumeToken();
- ParseEnumSpecifier(Loc, DS, TemplateInfo, AS_none);
- return true;
-
- // cv-qualifier:
- case tok::kw_const:
- isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec,
- DiagID, getLang());
- break;
- case tok::kw_volatile:
- isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec,
- DiagID, getLang());
- break;
- case tok::kw_restrict:
- isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec,
- DiagID, getLang());
- break;
-
- // GNU typeof support.
- case tok::kw_typeof:
- ParseTypeofSpecifier(DS);
- return true;
-
- // C++0x decltype support.
- case tok::kw_decltype:
- ParseDecltypeSpecifier(DS);
- return true;
-
- // C++0x type traits support.
- case tok::kw___underlying_type:
- ParseUnderlyingTypeSpecifier(DS);
- return true;
-
- case tok::kw__Atomic:
- ParseAtomicSpecifier(DS);
- return true;
-
- // OpenCL qualifiers:
- case tok::kw_private:
- if (!getLang().OpenCL)
- return false;
- case tok::kw___private:
- case tok::kw___global:
- case tok::kw___local:
- case tok::kw___constant:
- case tok::kw___read_only:
- case tok::kw___write_only:
- case tok::kw___read_write:
- ParseOpenCLQualifiers(DS);
- break;
-
- // C++0x auto support.
- case tok::kw_auto:
- // This is only called in situations where a storage-class specifier is
- // illegal, so we can assume an auto type specifier was intended even in
- // C++98. In C++98 mode, DeclSpec::Finish will produce an appropriate
- // extension diagnostic.
- if (!getLang().CPlusPlus)
- return false;
-
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec, DiagID);
- break;
-
- case tok::kw___ptr64:
- case tok::kw___ptr32:
- case tok::kw___w64:
- case tok::kw___cdecl:
- case tok::kw___stdcall:
- case tok::kw___fastcall:
- case tok::kw___thiscall:
- case tok::kw___unaligned:
- ParseMicrosoftTypeAttributes(DS.getAttributes());
- return true;
-
- case tok::kw___pascal:
- ParseBorlandTypeAttributes(DS.getAttributes());
- return true;
-
- default:
- // Not a type-specifier; do nothing.
- return false;
- }
-
- // If the specifier combination wasn't legal, issue a diagnostic.
- if (isInvalid) {
- assert(PrevSpec && "Method did not return previous specifier!");
- // Pick between error or extwarn.
- Diag(Tok, DiagID) << PrevSpec;
- }
- DS.SetRangeEnd(Tok.getLocation());
- ConsumeToken(); // whatever we parsed above.
- return true;
-}
-
/// ParseStructDeclaration - Parse a struct declaration without the terminating
/// semicolon.
///
@@ -2574,9 +2541,11 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
// Read struct-declarators until we find the semicolon.
bool FirstDeclarator = true;
+ SourceLocation CommaLoc;
while (1) {
ParsingDeclRAIIObject PD(*this);
FieldDeclarator DeclaratorInfo(DS);
+ DeclaratorInfo.D.setCommaLoc(CommaLoc);
// Attributes are only allowed here on successive declarators.
if (!FirstDeclarator)
@@ -2612,7 +2581,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
return;
// Consume the comma.
- ConsumeToken();
+ CommaLoc = ConsumeToken();
FirstDeclarator = false;
}
@@ -2642,9 +2611,10 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// Empty structs are an extension in C (C99 6.7.2.1p7), but are allowed in
// C++.
- if (Tok.is(tok::r_brace) && !getLang().CPlusPlus)
- Diag(Tok, diag::ext_empty_struct_union)
- << (TagType == TST_union);
+ if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus) {
+ Diag(Tok, diag::ext_empty_struct_union) << (TagType == TST_union);
+ Diag(Tok, diag::warn_empty_struct_union_compat) << (TagType == TST_union);
+ }
SmallVector<Decl *, 32> FieldDecls;
@@ -2742,22 +2712,25 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
///[C99/C++]'enum' identifier[opt] '{' enumerator-list ',' '}'
/// [GNU] 'enum' attributes[opt] identifier[opt] '{' enumerator-list ',' [opt]
/// '}' attributes[opt]
+/// [MS] 'enum' __declspec[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}'
/// 'enum' identifier
/// [GNU] 'enum' attributes[opt] identifier
///
-/// [C++0x] enum-head '{' enumerator-list[opt] '}'
-/// [C++0x] enum-head '{' enumerator-list ',' '}'
+/// [C++11] enum-head '{' enumerator-list[opt] '}'
+/// [C++11] enum-head '{' enumerator-list ',' '}'
///
-/// enum-head: [C++0x]
-/// enum-key attributes[opt] identifier[opt] enum-base[opt]
-/// enum-key attributes[opt] nested-name-specifier identifier enum-base[opt]
+/// enum-head: [C++11]
+/// enum-key attribute-specifier-seq[opt] identifier[opt] enum-base[opt]
+/// enum-key attribute-specifier-seq[opt] nested-name-specifier
+/// identifier enum-base[opt]
///
-/// enum-key: [C++0x]
+/// enum-key: [C++11]
/// 'enum'
/// 'enum' 'class'
/// 'enum' 'struct'
///
-/// enum-base: [C++0x]
+/// enum-base: [C++11]
/// ':' type-specifier-seq
///
/// [C++] elaborated-type-specifier:
@@ -2765,7 +2738,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
///
void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS) {
+ AccessSpecifier AS, DeclSpecContext DSC) {
// Parse the tag portion of this.
if (Tok.is(tok::code_completion)) {
// Code completion for an enum name.
@@ -2773,30 +2746,46 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return cutOffParsing();
}
- bool IsScopedEnum = false;
+ SourceLocation ScopedEnumKWLoc;
bool IsScopedUsingClassTag = false;
- if (getLang().CPlusPlus0x &&
+ if (getLangOpts().CPlusPlus0x &&
(Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
- IsScopedEnum = true;
+ Diag(Tok, diag::warn_cxx98_compat_scoped_enum);
IsScopedUsingClassTag = Tok.is(tok::kw_class);
- ConsumeToken();
+ ScopedEnumKWLoc = ConsumeToken();
}
-
+
+ // C++11 [temp.explicit]p12: The usual access controls do not apply to names
+ // used to specify explicit instantiations. We extend this to also cover
+ // explicit specializations.
+ Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+
// If attributes exist after tag, parse them.
ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- bool AllowFixedUnderlyingType
- = getLang().CPlusPlus0x || getLang().MicrosoftExt || getLang().ObjC2;
+ // If declspecs exist after tag, parse them.
+ while (Tok.is(tok::kw___declspec))
+ ParseMicrosoftDeclSpec(attrs);
+
+ // Enum definitions should not be parsed in a trailing-return-type.
+ bool AllowDeclaration = DSC != DSC_trailing;
+
+ bool AllowFixedUnderlyingType = AllowDeclaration &&
+ (getLangOpts().CPlusPlus0x || getLangOpts().MicrosoftExt ||
+ getLangOpts().ObjC2);
CXXScopeSpec &SS = DS.getTypeSpecScope();
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// "enum foo : bar;" is not a potential typo for "enum foo::bar;"
// if a fixed underlying type is allowed.
ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false))
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false))
return;
if (SS.isSet() && Tok.isNot(tok::identifier)) {
@@ -2812,7 +2801,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Must have either 'enum name' or 'enum {...}'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
- (AllowFixedUnderlyingType && Tok.isNot(tok::colon))) {
+ !(AllowFixedUnderlyingType && Tok.is(tok::colon))) {
Diag(Tok, diag::err_expected_ident_lbrace);
// Skip the rest of this declarator, up until the comma or semicolon.
@@ -2828,14 +2817,17 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
NameLoc = ConsumeToken();
}
- if (!Name && IsScopedEnum) {
+ if (!Name && ScopedEnumKWLoc.isValid()) {
// C++0x 7.2p2: The optional identifier shall not be omitted in the
// declaration of a scoped enumeration.
Diag(Tok, diag::err_scoped_enum_missing_identifier);
- IsScopedEnum = false;
+ ScopedEnumKWLoc = SourceLocation();
IsScopedUsingClassTag = false;
}
+ // Stop suppressing access control now we've parsed the enum name.
+ SuppressAccess.done();
+
TypeResult BaseType;
// Parse the fixed underlying type.
@@ -2870,10 +2862,16 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Consume the ':'.
ConsumeToken();
-
- if ((getLang().CPlusPlus &&
- isCXXDeclarationSpecifier() != TPResult::True()) ||
- (!getLang().CPlusPlus && !isDeclarationSpecifier(true))) {
+
+ // If we see a type specifier followed by an open-brace, we have an
+ // ambiguity between an underlying type and a C++11 braced
+ // function-style cast. Resolve this by always treating it as an
+ // underlying type.
+ // FIXME: The standard is not entirely clear on how to disambiguate in
+ // this case.
+ if ((getLangOpts().CPlusPlus &&
+ isCXXDeclarationSpecifier(TPResult::True()) != TPResult::True()) ||
+ (!getLangOpts().CPlusPlus && !isDeclarationSpecifier(true))) {
// We'll parse this as a bitfield later.
PossibleBitfield = true;
TPA.Revert();
@@ -2891,56 +2889,74 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SourceRange Range;
BaseType = ParseTypeName(&Range);
- if (!getLang().CPlusPlus0x && !getLang().ObjC2)
+ if (!getLangOpts().CPlusPlus0x && !getLangOpts().ObjC2)
Diag(StartLoc, diag::ext_ms_enum_fixed_underlying_type)
<< Range;
+ if (getLangOpts().CPlusPlus0x)
+ Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
}
}
- // There are three options here. If we have 'enum foo;', then this is a
- // forward declaration. If we have 'enum foo {...' then this is a
- // definition. Otherwise we have something like 'enum foo xyz', a reference.
+ // There are four options here. If we have 'friend enum foo;' then this is a
+ // friend declaration, and cannot have an accompanying definition. If we have
+ // 'enum foo;', then this is a forward declaration. If we have
+ // 'enum foo {...' then this is a definition. Otherwise we have something
+ // like 'enum foo xyz', a reference.
//
// This is needed to handle stuff like this right (C99 6.7.2.3p11):
// enum foo {..}; void bar() { enum foo; } <- new foo in bar.
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
Sema::TagUseKind TUK;
- if (Tok.is(tok::l_brace))
+ if (DS.isFriendSpecified())
+ TUK = Sema::TUK_Friend;
+ else if (!AllowDeclaration)
+ TUK = Sema::TUK_Reference;
+ else if (Tok.is(tok::l_brace))
TUK = Sema::TUK_Definition;
- else if (Tok.is(tok::semi))
+ else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
TUK = Sema::TUK_Declaration;
else
TUK = Sema::TUK_Reference;
-
- // enums cannot be templates, although they can be referenced from a
- // template.
+
+ MultiTemplateParamsArg TParams;
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
TUK != Sema::TUK_Reference) {
- Diag(Tok, diag::err_enum_template);
-
- // Skip the rest of this declarator, up until the comma or semicolon.
- SkipUntil(tok::comma, true);
- return;
+ if (!getLangOpts().CPlusPlus0x || !SS.isSet()) {
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ Diag(Tok, diag::err_enum_template);
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Enumerations can't be explicitly instantiated.
+ DS.SetTypeSpecError();
+ Diag(StartLoc, diag::err_explicit_instantiation_enum);
+ return;
+ }
+
+ assert(TemplateInfo.TemplateParams && "no template parameters");
+ TParams = MultiTemplateParamsArg(TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size());
}
-
+
if (!Name && TUK != Sema::TUK_Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
-
+
// Skip the rest of this declarator, up until the comma or semicolon.
SkipUntil(tok::comma, true);
return;
}
-
+
bool Owned = false;
bool IsDependent = false;
const char *PrevSpec = 0;
unsigned DiagID;
Decl *TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
StartLoc, SS, Name, NameLoc, attrs.getList(),
- AS, DS.getModulePrivateSpecLoc(),
- MultiTemplateParamsArg(Actions),
- Owned, IsDependent, IsScopedEnum,
+ AS, DS.getModulePrivateSpecLoc(), TParams,
+ Owned, IsDependent, ScopedEnumKWLoc,
IsScopedUsingClassTag, BaseType);
if (IsDependent) {
@@ -2971,7 +2987,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (!TagDecl) {
// The action failed to produce an enumeration tag. If this is a
// definition, consume the entire definition.
- if (Tok.is(tok::l_brace)) {
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
ConsumeBrace();
SkipUntil(tok::r_brace);
}
@@ -2979,9 +2995,17 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
DS.SetTypeSpecError();
return;
}
-
- if (Tok.is(tok::l_brace))
- ParseEnumBody(StartLoc, TagDecl);
+
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (TUK == Sema::TUK_Friend) {
+ Diag(Tok, diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ } else {
+ ParseEnumBody(StartLoc, TagDecl);
+ }
+ }
if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
NameLoc.isValid() ? NameLoc : StartLoc,
@@ -3008,7 +3032,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
T.consumeOpen();
// C does not allow an empty enumerator-list, C++ does [dcl.enum].
- if (Tok.is(tok::r_brace) && !getLang().CPlusPlus)
+ if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus)
Diag(Tok, diag::error_empty_enum);
SmallVector<Decl *, 32> EnumConstantDecls;
@@ -3026,6 +3050,8 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
SourceLocation EqualLoc;
ExprResult AssignedVal;
+ ParsingDeclRAIIObject PD(*this);
+
if (Tok.is(tok::equal)) {
EqualLoc = ConsumeToken();
AssignedVal = ParseConstantExpression();
@@ -3039,6 +3065,8 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
IdentLoc, Ident,
attrs.getList(), EqualLoc,
AssignedVal.release());
+ PD.complete(EnumConstDecl);
+
EnumConstantDecls.push_back(EnumConstDecl);
LastEnumConstDecl = EnumConstDecl;
@@ -3054,11 +3082,15 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
break;
SourceLocation CommaLoc = ConsumeToken();
- if (Tok.isNot(tok::identifier) &&
- !(getLang().C99 || getLang().CPlusPlus0x))
- Diag(CommaLoc, diag::ext_enumerator_list_comma)
- << getLang().CPlusPlus
- << FixItHint::CreateRemoval(CommaLoc);
+ if (Tok.isNot(tok::identifier)) {
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus0x)
+ Diag(CommaLoc, diag::ext_enumerator_list_comma)
+ << getLangOpts().CPlusPlus
+ << FixItHint::CreateRemoval(CommaLoc);
+ else if (getLangOpts().CPlusPlus0x)
+ Diag(CommaLoc, diag::warn_cxx98_compat_enumerator_list_comma)
+ << FixItHint::CreateRemoval(CommaLoc);
+ }
}
// Eat the }.
@@ -3086,7 +3118,7 @@ bool Parser::isTypeQualifier() const {
// type-qualifier only in OpenCL
case tok::kw_private:
- return getLang().OpenCL;
+ return getLangOpts().OpenCL;
// type-qualifier
case tok::kw_const:
@@ -3113,6 +3145,7 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw__Complex:
@@ -3183,6 +3216,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw__Complex:
@@ -3221,7 +3255,7 @@ bool Parser::isTypeSpecifierQualifier() {
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLang().ObjC1;
+ return getLangOpts().ObjC1;
case tok::kw___cdecl:
case tok::kw___stdcall:
@@ -3244,9 +3278,9 @@ bool Parser::isTypeSpecifierQualifier() {
return true;
case tok::kw_private:
- return getLang().OpenCL;
+ return getLangOpts().OpenCL;
- // C1x _Atomic()
+ // C11 _Atomic()
case tok::kw__Atomic:
return true;
}
@@ -3262,15 +3296,16 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
default: return false;
case tok::kw_private:
- return getLang().OpenCL;
+ return getLangOpts().OpenCL;
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
- if (getLang().ObjC1 && NextToken().is(tok::period))
+ if (getLangOpts().ObjC1 && NextToken().is(tok::period))
return false;
if (TryAltiVecVectorToken())
return true;
// Fall through.
+ case tok::kw_decltype: // decltype(T())::type
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
@@ -3317,6 +3352,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw__Complex:
@@ -3366,16 +3402,16 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
return true;
// C++0x decltype.
- case tok::kw_decltype:
+ case tok::annot_decltype:
return true;
- // C1x _Atomic()
+ // C11 _Atomic()
case tok::kw__Atomic:
return true;
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLang().ObjC1;
+ return getLangOpts().ObjC1;
// typedef-name
case tok::annot_typename:
@@ -3411,7 +3447,8 @@ bool Parser::isConstructorDeclarator() {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), true)) {
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/true)) {
TPA.Revert();
return false;
}
@@ -3426,15 +3463,17 @@ bool Parser::isConstructorDeclarator() {
return false;
}
- // Current class name must be followed by a left parentheses.
+ // Current class name must be followed by a left parenthesis.
if (Tok.isNot(tok::l_paren)) {
TPA.Revert();
return false;
}
ConsumeParen();
- // A right parentheses or ellipsis signals that we have a constructor.
- if (Tok.is(tok::r_paren) || Tok.is(tok::ellipsis)) {
+ // A right parenthesis, or ellipsis followed by a right parenthesis signals
+ // that we have a constructor.
+ if (Tok.is(tok::r_paren) ||
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren))) {
TPA.Revert();
return true;
}
@@ -3451,7 +3490,43 @@ bool Parser::isConstructorDeclarator() {
// Check whether the next token(s) are part of a declaration
// specifier, in which case we have the start of a parameter and,
// therefore, we know that this is a constructor.
- bool IsConstructor = isDeclarationSpecifier();
+ bool IsConstructor = false;
+ if (isDeclarationSpecifier())
+ IsConstructor = true;
+ else if (Tok.is(tok::identifier) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) {
+ // We've seen "C ( X" or "C ( X::Y", but "X" / "X::Y" is not a type.
+ // This might be a parenthesized member name, but is more likely to
+ // be a constructor declaration with an invalid argument type. Keep
+ // looking.
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ ConsumeToken();
+
+ // If this is not a constructor, we must be parsing a declarator,
+ // which must have one of the following syntactic forms (see the
+ // grammar extract at the start of ParseDirectDeclarator):
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // C(X ( int));
+ case tok::l_square:
+ // C(X [ 5]);
+ // C(X [ [attribute]]);
+ case tok::coloncolon:
+ // C(X :: Y);
+ // C(X :: *p);
+ case tok::r_paren:
+ // C(X )
+ // Assume this isn't a constructor, rather than assuming it's a
+ // constructor with an unnamed parameter of an ill-formed type.
+ break;
+
+ default:
+ IsConstructor = true;
+ break;
+ }
+ }
+
TPA.Revert();
return IsConstructor;
}
@@ -3470,15 +3545,12 @@ bool Parser::isConstructorDeclarator() {
///
void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
bool VendorAttributesAllowed,
- bool CXX0XAttributesAllowed) {
- if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) {
- SourceLocation Loc = Tok.getLocation();
+ bool CXX11AttributesAllowed) {
+ if (getLangOpts().CPlusPlus0x && CXX11AttributesAllowed &&
+ isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
- ParseCXX0XAttributes(attrs);
- if (CXX0XAttributesAllowed)
- DS.takeAttributesFrom(attrs);
- else
- Diag(Loc, diag::err_attributes_not_allowed);
+ ParseCXX11Attributes(attrs);
+ DS.takeAttributesFrom(attrs);
}
SourceLocation EndLoc;
@@ -3496,20 +3568,20 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
case tok::kw_volatile:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
case tok::kw_restrict:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
- getLang());
+ getLangOpts());
break;
// OpenCL qualifiers:
case tok::kw_private:
- if (!getLang().OpenCL)
+ if (!getLangOpts().OpenCL)
goto DoneWithTypeQuals;
case tok::kw___private:
case tok::kw___global:
@@ -3574,11 +3646,26 @@ void Parser::ParseDeclarator(Declarator &D) {
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
}
+static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang) {
+ if (Kind == tok::star || Kind == tok::caret)
+ return true;
+
+ // We parse rvalue refs in C++03, because otherwise the errors are scary.
+ if (!Lang.CPlusPlus)
+ return false;
+
+ return Kind == tok::amp || Kind == tok::ampamp;
+}
+
/// ParseDeclaratorInternal - Parse a C or C++ declarator. The direct-declarator
/// is parsed by the function passed to it. Pass null, and the direct-declarator
/// isn't parsed at all, making this function effectively parse the C++
/// ptr-operator production.
///
+/// If the grammar of this construct is extended, matching changes must also be
+/// made to TryParseDeclarator and MightBeDeclarator, and possibly to
+/// isConstructorDeclarator.
+///
/// declarator: [C99 6.7.5] [C++ 8p4, dcl.decl]
/// [C] pointer[opt] direct-declarator
/// [C++] direct-declarator
@@ -3603,11 +3690,13 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// C++ member pointers start with a '::' or a nested-name.
// Member pointers get special handling, since there's no place for the
// scope spec in the generic path below.
- if (getLang().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
(Tok.is(tok::coloncolon) || Tok.is(tok::identifier) ||
Tok.is(tok::annot_cxxscope))) {
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), true); // ignore fail
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext);
if (SS.isNotEmpty()) {
if (Tok.isNot(tok::star)) {
@@ -3639,10 +3728,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
tok::TokenKind Kind = Tok.getKind();
// Not a pointer, C++ reference, or block.
- if (Kind != tok::star && Kind != tok::caret &&
- (Kind != tok::amp || !getLang().CPlusPlus) &&
- // We parse rvalue refs in C++03, because otherwise the errors are scary.
- (Kind != tok::ampamp || !getLang().CPlusPlus)) {
+ if (!isPtrOperatorToken(Kind, getLangOpts())) {
if (DirectDeclParser)
(this->*DirectDeclParser)(D);
return;
@@ -3657,6 +3743,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Is a pointer.
DeclSpec DS(AttrFactory);
+ // FIXME: GNU attributes are not allowed here in a new-type-id.
ParseTypeQualifierListOpt(DS);
D.ExtendWithDeclSpec(DS);
@@ -3682,19 +3769,18 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Complain about rvalue references in C++03, but then go on and build
// the declarator.
- if (Kind == tok::ampamp && !getLang().CPlusPlus0x)
- Diag(Loc, diag::ext_rvalue_reference);
+ if (Kind == tok::ampamp)
+ Diag(Loc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_rvalue_reference :
+ diag::ext_rvalue_reference);
+
+ // GNU-style and C++11 attributes are allowed here, as is restrict.
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
// C++ 8.3.2p1: cv-qualified references are ill-formed except when the
// cv-qualifiers are introduced through the use of a typedef or of a
// template type argument, in which case the cv-qualifiers are ignored.
- //
- // [GNU] Retricted references are allowed.
- // [GNU] Attributes on references are allowed.
- // [C++0x] Attributes on references are not allowed.
- ParseTypeQualifierListOpt(DS, true, false);
- D.ExtendWithDeclSpec(DS);
-
if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(),
@@ -3732,6 +3818,19 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
}
}
+static void diagnoseMisplacedEllipsis(Parser &P, Declarator &D,
+ SourceLocation EllipsisLoc) {
+ if (EllipsisLoc.isValid()) {
+ FixItHint Insertion;
+ if (!D.getEllipsisLoc().isValid()) {
+ Insertion = FixItHint::CreateInsertion(D.getIdentifierLoc(), "...");
+ D.setEllipsisLoc(EllipsisLoc);
+ }
+ P.Diag(EllipsisLoc, diag::err_misplaced_ellipsis_in_declaration)
+ << FixItHint::CreateRemoval(EllipsisLoc) << Insertion << !D.hasName();
+ }
+}
+
/// ParseDirectDeclarator
/// direct-declarator: [C99 6.7.5]
/// [C99] identifier
@@ -3742,13 +3841,19 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
/// direct-declarator '(' parameter-type-list ')'
/// direct-declarator '(' identifier-list[opt] ')'
/// [GNU] direct-declarator '(' parameter-forward-declarations
/// parameter-type-list[opt] ')'
/// [C++] direct-declarator '(' parameter-declaration-clause ')'
/// cv-qualifier-seq[opt] exception-specification[opt]
+/// [C++11] direct-declarator '(' parameter-declaration-clause ')'
+/// attribute-specifier-seq[opt] cv-qualifier-seq[opt]
+/// ref-qualifier[opt] exception-specification[opt]
/// [C++] declarator-id
+/// [C++11] declarator-id attribute-specifier-seq[opt]
///
/// declarator-id: [C++ 8]
/// '...'[opt] id-expression
@@ -3765,13 +3870,18 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
/// '~' class-name
/// template-id
///
+/// Note, any additional constructs added here may need corresponding changes
+/// in isConstructorDeclarator.
void Parser::ParseDirectDeclarator(Declarator &D) {
DeclaratorScopeObj DeclScopeObj(*this, D.getCXXScopeSpec());
- if (getLang().CPlusPlus && D.mayHaveIdentifier()) {
+ if (getLangOpts().CPlusPlus && D.mayHaveIdentifier()) {
// ParseDeclaratorInternal might already have parsed the scope.
if (D.getCXXScopeSpec().isEmpty()) {
- ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(), true);
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
+ ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(),
+ EnteringContext);
}
if (D.getCXXScopeSpec().isValid()) {
@@ -3788,13 +3898,26 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// abstract-declarator if the type of the parameter names a template
// parameter pack that has not been expanded; otherwise, it is parsed
// as part of the parameter-declaration-clause.
- if (Tok.is(tok::ellipsis) &&
+ if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
!((D.getContext() == Declarator::PrototypeContext ||
D.getContext() == Declarator::BlockLiteralContext) &&
NextToken().is(tok::r_paren) &&
- !Actions.containsUnexpandedParameterPacks(D)))
- D.setEllipsisLoc(ConsumeToken());
-
+ !Actions.containsUnexpandedParameterPacks(D))) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (isPtrOperatorToken(Tok.getKind(), getLangOpts())) {
+ // The ellipsis was put in the wrong place. Recover, and explain to
+ // the user what they should have done.
+ ParseDeclarator(D);
+ diagnoseMisplacedEllipsis(*this, D, EllipsisLoc);
+ return;
+ } else
+ D.setEllipsisLoc(EllipsisLoc);
+
+ // The ellipsis can't be followed by a parenthesized declarator. We
+ // check for that in ParseParenDeclarator, after we have disambiguated
+ // the l_paren token.
+ }
+
if (Tok.is(tok::identifier) || Tok.is(tok::kw_operator) ||
Tok.is(tok::annot_template_id) || Tok.is(tok::tilde)) {
// We found something that indicates the start of an unqualified-id.
@@ -3810,11 +3933,13 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
else
AllowConstructorName = (D.getContext() == Declarator::MemberContext);
+ SourceLocation TemplateKWLoc;
if (ParseUnqualifiedId(D.getCXXScopeSpec(),
/*EnteringContext=*/true,
/*AllowDestructorName=*/true,
AllowConstructorName,
ParsedType(),
+ TemplateKWLoc,
D.getName()) ||
// Once we're past the identifier, if the scope was bad, mark the
// whole declarator bad.
@@ -3830,14 +3955,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
goto PastIdentifier;
}
} else if (Tok.is(tok::identifier) && D.mayHaveIdentifier()) {
- assert(!getLang().CPlusPlus &&
+ assert(!getLangOpts().CPlusPlus &&
"There's a C++-specific check for tok::identifier above");
assert(Tok.getIdentifierInfo() && "Not an identifier?");
D.SetIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
ConsumeToken();
goto PastIdentifier;
}
-
+
if (Tok.is(tok::l_paren)) {
// direct-declarator: '(' declarator ')'
// direct-declarator: '(' attributes declarator ')'
@@ -3849,7 +3974,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// the scope already. Re-enter the scope, if we need to.
if (D.getCXXScopeSpec().isSet()) {
// If there was an error parsing parenthesized declarator, declarator
- // scope may have been enterred before. Don't do it again.
+ // scope may have been entered before. Don't do it again.
if (!D.isInvalidType() &&
Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec()))
// Change the declaration context for name lookup, until this function
@@ -3864,8 +3989,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.getContext() == Declarator::MemberContext)
Diag(Tok, diag::err_expected_member_name_or_semi)
<< D.getDeclSpec().getSourceRange();
- else if (getLang().CPlusPlus)
- Diag(Tok, diag::err_expected_unqualified_id) << getLang().CPlusPlus;
+ else if (getLangOpts().CPlusPlus)
+ Diag(Tok, diag::err_expected_unqualified_id) << getLangOpts().CPlusPlus;
else
Diag(Tok, diag::err_expected_ident_lparen);
D.SetIdentifier(0, Tok.getLocation());
@@ -3876,16 +4001,20 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
assert(D.isPastIdentifier() &&
"Haven't past the location of the identifier yet?");
- // Don't parse attributes unless we have an identifier.
- if (D.getIdentifier())
+ // Don't parse attributes unless we have parsed an unparenthesized name.
+ if (D.hasName() && !D.getNumTypeObjects())
MaybeParseCXX0XAttributes(D);
while (1) {
if (Tok.is(tok::l_paren)) {
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
// The paren may be part of a C++ direct initializer, eg. "int x(1);".
// In such a case, check if we actually have a function declarator; if it
// is not, the declarator has been fully parsed.
- if (getLang().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
// When not in file scope, warn for ambiguous function declarators, just
// in case the author intended it as a variable definition.
bool warnIfAmbiguous = D.getContext() != Declarator::FileContext;
@@ -3896,13 +4025,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
ParseFunctionDeclarator(D, attrs, T);
+ PrototypeScope.Exit();
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
} else {
break;
}
}
-}
+}
/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
/// only called before the identifier, so these are most likely just grouping
@@ -3964,8 +4094,10 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// paren, because we haven't seen the identifier yet.
isGrouping = true;
} else if (Tok.is(tok::r_paren) || // 'int()' is a function.
- (getLang().CPlusPlus && Tok.is(tok::ellipsis)) || // C++ int(...)
- isDeclarationSpecifier()) { // 'int(int)' is a function.
+ (getLangOpts().CPlusPlus && Tok.is(tok::ellipsis) &&
+ NextToken().is(tok::r_paren)) || // C++ int(...)
+ isDeclarationSpecifier() || // 'int(int)' is a function.
+ isCXX11AttributeSpecifier()) { // 'int([[]]int)' is a function.
// This handles C99 6.7.5.3p11: in "typedef int X; void foo(X)", X is
// considered to be a type, not a K&R identifier-list.
isGrouping = false;
@@ -3978,9 +4110,11 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// direct-declarator: '(' declarator ')'
// direct-declarator: '(' attributes declarator ')'
if (isGrouping) {
+ SourceLocation EllipsisLoc = D.getEllipsisLoc();
+ D.setEllipsisLoc(SourceLocation());
+
bool hadGroupingParens = D.hasGroupingParens();
D.setGroupingParens(true);
-
ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
// Match the ')'.
T.consumeClose();
@@ -3989,6 +4123,11 @@ void Parser::ParseParenDeclarator(Declarator &D) {
attrs, T.getCloseLocation());
D.setGroupingParens(hadGroupingParens);
+
+ // An ellipsis cannot be placed outside parentheses.
+ if (EllipsisLoc.isValid())
+ diagnoseMisplacedEllipsis(*this, D, EllipsisLoc);
+
return;
}
@@ -3998,31 +4137,39 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// ParseFunctionDeclarator to handle of argument list.
D.SetIdentifier(0, Tok.getLocation());
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
ParseFunctionDeclarator(D, attrs, T, RequiresArg);
+ PrototypeScope.Exit();
}
/// ParseFunctionDeclarator - We are after the identifier and have parsed the
/// declarator D up to a paren, which indicates that we are parsing function
/// arguments.
///
-/// If attrs is non-null, then the caller parsed those arguments immediately
-/// after the open paren - they should be considered to be the first argument of
-/// a parameter. If RequiresArg is true, then the first argument of the
-/// function is required to be present and required to not be an identifier
-/// list.
+/// If FirstArgAttrs is non-null, then the caller parsed those arguments
+/// immediately after the open paren - they should be considered to be the
+/// first argument of a parameter.
+///
+/// If RequiresArg is true, then the first argument of the function is required
+/// to be present and required to not be an identifier list.
///
-/// For C++, after the parameter-list, it also parses cv-qualifier-seq[opt],
-/// (C++0x) ref-qualifier[opt], exception-specification[opt], and
-/// (C++0x) trailing-return-type[opt].
+/// For C++, after the parameter-list, it also parses the cv-qualifier-seq[opt],
+/// (C++11) ref-qualifier[opt], exception-specification[opt],
+/// (C++11) attribute-specifier-seq[opt], and (C++11) trailing-return-type[opt].
///
-/// [C++0x] exception-specification:
+/// [C++11] exception-specification:
/// dynamic-exception-specification
/// noexcept-specification
///
void Parser::ParseFunctionDeclarator(Declarator &D,
- ParsedAttributes &attrs,
+ ParsedAttributes &FirstArgAttrs,
BalancedDelimiterTracker &Tracker,
bool RequiresArg) {
+ assert(getCurScope()->isFunctionPrototypeScope() &&
+ "Should call from a Function scope");
// lparen is already consumed!
assert(D.isPastIdentifier() && "Should not call before identifier!");
@@ -4037,13 +4184,18 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DeclSpec DS(AttrFactory);
bool RefQualifierIsLValueRef = true;
SourceLocation RefQualifierLoc;
+ SourceLocation ConstQualifierLoc;
+ SourceLocation VolatileQualifierLoc;
ExceptionSpecificationType ESpecType = EST_None;
SourceRange ESpecRange;
SmallVector<ParsedType, 2> DynamicExceptions;
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
+ ParsedAttributes FnAttrs(AttrFactory);
ParsedType TrailingReturnType;
-
+
+ Actions.ActOnStartFunctionDeclarator();
+
SourceLocation EndLoc;
if (isFunctionDeclaratorIdentifierList()) {
if (RequiresArg)
@@ -4054,35 +4206,36 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
Tracker.consumeClose();
EndLoc = Tracker.getCloseLocation();
} else {
- // Enter function-declaration scope, limiting any declarators to the
- // function prototype scope, including parameter declarators.
- ParseScope PrototypeScope(this,
- Scope::FunctionPrototypeScope|Scope::DeclScope);
-
if (Tok.isNot(tok::r_paren))
- ParseParameterDeclarationClause(D, attrs, ParamInfo, EllipsisLoc);
+ ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo, EllipsisLoc);
else if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- HasProto = ParamInfo.size() || getLang().CPlusPlus;
+ HasProto = ParamInfo.size() || getLangOpts().CPlusPlus;
// If we have the closing ')', eat it.
Tracker.consumeClose();
EndLoc = Tracker.getCloseLocation();
- if (getLang().CPlusPlus) {
- MaybeParseCXX0XAttributes(attrs);
+ if (getLangOpts().CPlusPlus) {
+ // FIXME: Accept these components in any order, and produce fixits to
+ // correct the order if the user gets it wrong. Ideally we should deal
+ // with the virt-specifier-seq and pure-specifier in the same way.
// Parse cv-qualifier-seq[opt].
- ParseTypeQualifierListOpt(DS, false /*no attributes*/);
- if (!DS.getSourceRange().getEnd().isInvalid())
- EndLoc = DS.getSourceRange().getEnd();
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/, false);
+ if (!DS.getSourceRange().getEnd().isInvalid()) {
+ EndLoc = DS.getSourceRange().getEnd();
+ ConstQualifierLoc = DS.getConstSpecLoc();
+ VolatileQualifierLoc = DS.getVolatileSpecLoc();
+ }
// Parse ref-qualifier[opt].
if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::ext_ref_qualifier);
-
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_ref_qualifier :
+ diag::ext_ref_qualifier);
+
RefQualifierIsLValueRef = Tok.is(tok::amp);
RefQualifierLoc = ConsumeToken();
EndLoc = RefQualifierLoc;
@@ -4096,17 +4249,19 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
if (ESpecType != EST_None)
EndLoc = ESpecRange.getEnd();
+ // Parse attribute-specifier-seq[opt]. Per DR 979 and DR 1297, this goes
+ // after the exception-specification.
+ MaybeParseCXX0XAttributes(FnAttrs);
+
// Parse trailing-return-type[opt].
- if (getLang().CPlusPlus0x && Tok.is(tok::arrow)) {
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::arrow)) {
+ Diag(Tok, diag::warn_cxx98_compat_trailing_return_type);
SourceRange Range;
TrailingReturnType = ParseTrailingReturnType(Range).get();
if (Range.getEnd().isValid())
EndLoc = Range.getEnd();
}
}
-
- // Leave prototype scope.
- PrototypeScope.Exit();
}
// Remember that we parsed a function type, and remember the attributes.
@@ -4116,7 +4271,8 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
ParamInfo.data(), ParamInfo.size(),
DS.getTypeQualifiers(),
RefQualifierIsLValueRef,
- RefQualifierLoc,
+ RefQualifierLoc, ConstQualifierLoc,
+ VolatileQualifierLoc,
/*MutableLoc=*/SourceLocation(),
ESpecType, ESpecRange.getBegin(),
DynamicExceptions.data(),
@@ -4127,7 +4283,9 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
Tracker.getOpenLocation(),
EndLoc, D,
TrailingReturnType),
- attrs, EndLoc);
+ FnAttrs, EndLoc);
+
+ Actions.ActOnEndFunctionDeclarator();
}
/// isFunctionDeclaratorIdentifierList - This parameter list may have an
@@ -4136,7 +4294,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
/// Note that identifier-lists are only allowed for normal declarators, not for
/// abstract-declarators.
bool Parser::isFunctionDeclaratorIdentifierList() {
- return !getLang().CPlusPlus
+ return !getLangOpts().CPlusPlus
&& Tok.is(tok::identifier)
&& !TryAltiVecVectorToken()
// K&R identifier lists can't have typedefs as identifiers, per C99
@@ -4219,9 +4377,9 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// after the opening parenthesis. This function will not parse a K&R-style
/// identifier list.
///
-/// D is the declarator being parsed. If attrs is non-null, then the caller
-/// parsed those arguments immediately after the open paren - they should be
-/// considered to be the first argument of a parameter.
+/// D is the declarator being parsed. If FirstArgAttrs is non-null, then the
+/// caller parsed those arguments immediately after the open paren - they should
+/// be considered to be part of the first parameter.
///
/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
/// be the location of the ellipsis, if any was parsed.
@@ -4238,20 +4396,24 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// parameter-declaration: [C99 6.7.5]
/// declaration-specifiers declarator
/// [C++] declaration-specifiers declarator '=' assignment-expression
+/// [C++11] initializer-clause
/// [GNU] declaration-specifiers declarator attributes
/// declaration-specifiers abstract-declarator[opt]
/// [C++] declaration-specifiers abstract-declarator[opt]
/// '=' assignment-expression
/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
+/// [C++11] attribute-specifier-seq parameter-declaration
///
void Parser::ParseParameterDeclarationClause(
Declarator &D,
- ParsedAttributes &attrs,
+ ParsedAttributes &FirstArgAttrs,
SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo,
SourceLocation &EllipsisLoc) {
while (1) {
if (Tok.is(tok::ellipsis)) {
+ // FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
+ // before deciding this was a parameter-declaration-clause.
EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
break;
}
@@ -4260,20 +4422,21 @@ void Parser::ParseParameterDeclarationClause(
// Just use the ParsingDeclaration "scope" of the declarator.
DeclSpec DS(AttrFactory);
+ // Parse any C++11 attributes.
+ MaybeParseCXX0XAttributes(DS.getAttributes());
+
// Skip any Microsoft attributes before a param.
- if (getLang().MicrosoftExt && Tok.is(tok::l_square))
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(DS.getAttributes());
SourceLocation DSStart = Tok.getLocation();
// If the caller parsed attributes for the first argument, add them now.
// Take them so that we only apply the attributes to the first parameter.
- // FIXME: If we saw an ellipsis first, this code is not reached. Are the
- // attributes lost? Should they even be allowed?
// FIXME: If we can leave the attributes in the token stream somehow, we can
- // get rid of a parameter (attrs) and this statement. It might be too much
- // hassle.
- DS.takeAttributesFrom(attrs);
+ // get rid of a parameter (FirstArgAttrs) and this statement. It might be
+ // too much hassle.
+ DS.takeAttributesFrom(FirstArgAttrs);
ParseDeclarationSpecifiers(DS);
@@ -4346,9 +4509,15 @@ void Parser::ParseParameterDeclarationClause(
// The argument isn't actually potentially evaluated unless it is
// used.
EnterExpressionEvaluationContext Eval(Actions,
- Sema::PotentiallyEvaluatedIfUsed);
-
- ExprResult DefArgResult(ParseAssignmentExpression());
+ Sema::PotentiallyEvaluatedIfUsed,
+ Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
if (DefArgResult.isInvalid()) {
Actions.ActOnParamDefaultArgumentError(Param);
SkipUntil(tok::comma, tok::r_paren, true, true);
@@ -4370,7 +4539,7 @@ void Parser::ParseParameterDeclarationClause(
if (Tok.is(tok::ellipsis)) {
EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
- if (!getLang().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
// We have ellipsis without a preceding ',', which is ill-formed
// in C. Complain and provide the fix.
Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
@@ -4392,7 +4561,12 @@ void Parser::ParseParameterDeclarationClause(
/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
void Parser::ParseBracketDeclarator(Declarator &D) {
+ if (CheckProhibitedCXX11Attribute())
+ return;
+
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
@@ -4413,7 +4587,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
} else if (Tok.getKind() == tok::numeric_constant &&
GetLookAheadToken(1).is(tok::r_square)) {
// [4] is very common. Parse the numeric constant expression.
- ExprResult ExprRes(Actions.ActOnNumericConstant(Tok));
+ ExprResult ExprRes(Actions.ActOnNumericConstant(Tok, getCurScope()));
ConsumeToken();
T.consumeClose();
@@ -4468,10 +4642,13 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
// Parse the constant-expression or assignment-expression now (depending
// on dialect).
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus) {
NumElements = ParseConstantExpression();
- else
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
NumElements = ParseAssignmentExpression();
+ }
}
// If there was an error parsing the assignment-expression, recover.
@@ -4508,6 +4685,8 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
const bool hasParens = Tok.is(tok::l_paren);
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+
bool isCastExpr;
ParsedType CastTy;
SourceRange CastRange;
@@ -4543,6 +4722,13 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
return;
}
+ // We might need to transform the operand if it is potentially evaluated.
+ Operand = Actions.HandleExprEvaluationContextForTypeof(Operand.get());
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
const char *PrevSpec = 0;
unsigned DiagID;
// Check for duplicate type specifiers (e.g. "int typeof(int)").
@@ -4551,7 +4737,7 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
Diag(StartLoc, DiagID) << PrevSpec;
}
-/// [C1X] atomic-specifier:
+/// [C11] atomic-specifier:
/// _Atomic ( type-name )
///
void Parser::ParseAtomicSpecifier(DeclSpec &DS) {
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index 4339047..b2a65ff 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -18,6 +18,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "llvm/ADT/SmallString.h"
#include "RAIIObjectsForParser.h"
using namespace clang;
@@ -148,8 +149,9 @@ Decl *Parser::ParseNamespace(unsigned Context,
}
// If we're still good, complain about inline namespaces in non-C++0x now.
- if (!getLang().CPlusPlus0x && InlineLoc.isValid())
- Diag(InlineLoc, diag::ext_inline_namespace);
+ if (InlineLoc.isValid())
+ Diag(InlineLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_inline_namespace : diag::ext_inline_namespace);
// Enter a scope for the namespace.
ParseScope NamespaceScope(this, Scope::DeclScope);
@@ -233,7 +235,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_namespace_name);
@@ -264,12 +266,17 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
///
Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
assert(Tok.is(tok::string_literal) && "Not a string literal!");
- llvm::SmallString<8> LangBuffer;
+ SmallString<8> LangBuffer;
bool Invalid = false;
StringRef Lang = PP.getSpelling(Tok, LangBuffer, &Invalid);
if (Invalid)
return 0;
+ // FIXME: This is incorrect: linkage-specifiers are parsed in translation
+ // phase 7, so string-literal concatenation is supposed to occur.
+ // extern "" "C" "" "+" "+" { } is legal.
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
SourceLocation Loc = ConsumeStringToken();
ParseScope LinkageScope(this, Scope::DeclScope);
@@ -381,7 +388,7 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
IdentifierInfo *NamespcName = 0;
SourceLocation IdentLoc = SourceLocation();
@@ -449,7 +456,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
IsTypeName = false;
// Parse nested-name-specifier.
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
// Check nested-name specifier.
if (SS.isInvalid()) {
@@ -460,12 +467,14 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
// Parse the unqualified-id. We allow parsing of both constructor and
// destructor names and allow the action module to diagnose any semantic
// errors.
+ SourceLocation TemplateKWLoc;
UnqualifiedId Name;
if (ParseUnqualifiedId(SS,
/*EnteringContext=*/false,
/*AllowDestructorName=*/true,
/*AllowConstructorName=*/true,
ParsedType(),
+ TemplateKWLoc,
Name)) {
SkipUntil(tok::semi);
return 0;
@@ -481,8 +490,9 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
// Where can GNU attributes appear?
ConsumeToken();
- if (!getLang().CPlusPlus0x)
- Diag(Tok.getLocation(), diag::ext_alias_declaration);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_alias_declaration :
+ diag::ext_alias_declaration);
// Type alias templates cannot be specialized.
int SpecKind = -1;
@@ -571,20 +581,22 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
IsTypeName, TypenameLoc);
}
-/// ParseStaticAssertDeclaration - Parse C++0x or C1X static_assert-declaration.
+/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
///
/// [C++0x] static_assert-declaration:
/// static_assert ( constant-expression , string-literal ) ;
///
-/// [C1X] static_assert-declaration:
+/// [C11] static_assert-declaration:
/// _Static_assert ( constant-expression , string-literal ) ;
///
Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
assert((Tok.is(tok::kw_static_assert) || Tok.is(tok::kw__Static_assert)) &&
"Not a static_assert declaration");
- if (Tok.is(tok::kw__Static_assert) && !getLang().C1X)
- Diag(Tok, diag::ext_c1x_static_assert);
+ if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_static_assert);
+ if (Tok.is(tok::kw_static_assert))
+ Diag(Tok, diag::warn_cxx98_compat_static_assert);
SourceLocation StaticAssertLoc = ConsumeToken();
@@ -603,15 +615,17 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::semi))
return 0;
- if (Tok.isNot(tok::string_literal)) {
+ if (!isTokenStringLiteral()) {
Diag(Tok, diag::err_expected_string_literal);
SkipUntil(tok::semi);
return 0;
}
ExprResult AssertMessage(ParseStringLiteralExpression());
- if (AssertMessage.isInvalid())
+ if (AssertMessage.isInvalid()) {
+ SkipUntil(tok::semi);
return 0;
+ }
T.consumeClose();
@@ -628,39 +642,94 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
///
/// 'decltype' ( expression )
///
-void Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
- assert(Tok.is(tok::kw_decltype) && "Not a decltype specifier");
+SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
+ assert((Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype))
+ && "Not a decltype specifier");
+
- SourceLocation StartLoc = ConsumeToken();
- BalancedDelimiterTracker T(*this, tok::l_paren);
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "decltype", tok::r_paren)) {
- return;
- }
+ ExprResult Result;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
- // Parse the expression
+ if (Tok.is(tok::annot_decltype)) {
+ Result = getExprAnnotation(Tok);
+ EndLoc = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return EndLoc;
+ }
+ } else {
+ if (Tok.getIdentifierInfo()->isStr("decltype"))
+ Diag(Tok, diag::warn_cxx98_compat_decltype);
- // C++0x [dcl.type.simple]p4:
- // The operand of the decltype specifier is an unevaluated operand.
- EnterExpressionEvaluationContext Unevaluated(Actions,
- Sema::Unevaluated);
- ExprResult Result = ParseExpression();
- if (Result.isInvalid()) {
- SkipUntil(tok::r_paren);
- return;
- }
+ ConsumeToken();
- // Match the ')'
- T.consumeClose();
- if (T.getCloseLocation().isInvalid())
- return;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "decltype", tok::r_paren)) {
+ DS.SetTypeSpecError();
+ return T.getOpenLocation() == Tok.getLocation() ?
+ StartLoc : T.getOpenLocation();
+ }
+
+ // Parse the expression
+
+ // C++0x [dcl.type.simple]p4:
+ // The operand of the decltype specifier is an unevaluated operand.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ 0, /*IsDecltype=*/true);
+ Result = ParseExpression();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ DS.SetTypeSpecError();
+ return StartLoc;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid()) {
+ DS.SetTypeSpecError();
+ // FIXME: this should return the location of the last token
+ // that was consumed (by "consumeClose()")
+ return T.getCloseLocation();
+ }
+
+ Result = Actions.ActOnDecltypeExpression(Result.take());
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return T.getCloseLocation();
+ }
+
+ EndLoc = T.getCloseLocation();
+ }
const char *PrevSpec = 0;
unsigned DiagID;
// Check for duplicate type specifiers (e.g. "int decltype(a)").
if (DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
- DiagID, Result.release()))
+ DiagID, Result.release())) {
Diag(StartLoc, DiagID) << PrevSpec;
+ DS.SetTypeSpecError();
+ }
+ return EndLoc;
+}
+
+void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // make sure we have a token we can turn into an annotation token
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+
+ Tok.setKind(tok::annot_decltype);
+ setExprAnnotation(Tok, DS.getTypeSpecType() == TST_decltype ?
+ DS.getRepAsExpr() : ExprResult());
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(StartLoc);
+ PP.AnnotateCachedTokens(Tok);
}
void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
@@ -692,18 +761,52 @@ void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
Diag(StartLoc, DiagID) << PrevSpec;
}
-/// ParseClassName - Parse a C++ class-name, which names a class. Note
-/// that we only check that the result names a type; semantic analysis
-/// will need to verify that the type names a class. The result is
-/// either a type or NULL, depending on whether a type name was
-/// found.
-///
+/// ParseBaseTypeSpecifier - Parse a C++ base-type-specifier which is either a
+/// class name or decltype-specifier. Note that we only check that the result
+/// names a type; semantic analysis will need to verify that the type names a
+/// class. The result is either a type or null, depending on whether a type
+/// name was found.
+///
+/// base-type-specifier: [C++ 10.1]
+/// class-or-decltype
+/// class-or-decltype: [C++ 10.1]
+/// nested-name-specifier[opt] class-name
+/// decltype-specifier
/// class-name: [C++ 9.1]
/// identifier
/// simple-template-id
///
-Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
- CXXScopeSpec &SS) {
+Parser::TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation) {
+ // Ignore attempts to use typename
+ if (Tok.is(tok::kw_typename)) {
+ Diag(Tok, diag::err_expected_class_name_not_template)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ }
+
+ // Parse optional nested-name-specifier
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ BaseLoc = Tok.getLocation();
+
+ // Parse decltype-specifier
+ // tok == kw_decltype is just error recovery, it can only happen when SS
+ // isn't empty
+ if (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype)) {
+ if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(), diag::err_unexpected_scope_on_base_decltype)
+ << FixItHint::CreateRemoval(SS.getRange());
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+
+ EndLocation = ParseDecltypeSpecifier(DS);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
@@ -751,8 +854,8 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
TemplateName.setIdentifier(Id, IdLoc);
// Parse the full template-id, then turn it into a type.
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateName,
- SourceLocation(), true))
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, true))
return true;
if (TNK == TNK_Dependent_template_name)
AnnotateTemplateIdTokenAsType();
@@ -773,6 +876,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
// We have an identifier; check whether it is actually a type.
ParsedType Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, true,
false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
/*NonTrivialTypeSourceInfo=*/true);
if (!Type) {
Diag(IdLoc, diag::err_expected_class_name);
@@ -799,7 +903,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
/// ParseClassSpecifier - Parse a C++ class-specifier [C++ class] or
/// elaborated-type-specifier [C++ dcl.type.elab]; we can't tell which
/// until we reach the start of a definition or see a token that
-/// cannot start a definition. If SuppressDeclarations is true, we do know.
+/// cannot start a definition.
///
/// class-specifier: [C++ class]
/// class-head '{' member-specification[opt] '}'
@@ -839,7 +943,8 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS, bool SuppressDeclarations){
+ AccessSpecifier AS,
+ bool EnteringContext, DeclSpecContext DSC) {
DeclSpec::TST TagType;
if (TagTokKind == tok::kw_struct)
TagType = DeclSpec::TST_struct;
@@ -863,12 +968,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// As an extension we do not perform access checking on the names used to
// specify explicit specializations either. This is important to allow
// specializing traits classes for private types.
- bool SuppressingAccessChecks = false;
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization) {
- Actions.ActOnStartSuppressingAccessChecks();
- SuppressingAccessChecks = true;
- }
+ Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
ParsedAttributes attrs(AttrFactory);
// If attributes exist after tag, parse them.
@@ -914,11 +1016,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Parse the (optional) nested-name-specifier.
CXXScopeSpec &SS = DS.getTypeSpecScope();
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// "FOO : BAR" is not a potential typo for "FOO::BAR".
ColonProtectionRAIIObject X(*this);
- if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), true))
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
DS.SetTypeSpecError();
if (SS.isSet())
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
@@ -935,7 +1037,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Name = Tok.getIdentifierInfo();
NameLoc = ConsumeToken();
- if (Tok.is(tok::less) && getLang().CPlusPlus) {
+ if (Tok.is(tok::less) && getLangOpts().CPlusPlus) {
// The name was supposed to refer to a template, but didn't.
// Eat the template argument list and try to continue parsing this as
// a class (or template thereof).
@@ -997,39 +1099,41 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DS.SetTypeSpecError();
SkipUntil(tok::semi, false, true);
- if (SuppressingAccessChecks)
- Actions.ActOnStopSuppressingAccessChecks();
-
return;
}
}
// As soon as we're finished parsing the class's template-id, turn access
// checking back on.
- if (SuppressingAccessChecks)
- Actions.ActOnStopSuppressingAccessChecks();
-
- // There are four options here. If we have 'struct foo;', then this
- // is either a forward declaration or a friend declaration, which
- // have to be treated differently. If we have 'struct foo {...',
- // 'struct foo :...' or 'struct foo final[opt]' then this is a
- // definition. Otherwise we have something like 'struct foo xyz', a reference.
- // However, in some contexts, things look like declarations but are just
- // references, e.g.
- // new struct s;
+ SuppressAccess.done();
+
+ // There are four options here.
+ // - If we are in a trailing return type, this is always just a reference,
+ // and we must not try to parse a definition. For instance,
+ // [] () -> struct S { };
+ // does not define a type.
+ // - If we have 'struct foo {...', 'struct foo :...',
+ // 'struct foo final :' or 'struct foo final {', then this is a definition.
+ // - If we have 'struct foo;', then this is either a forward declaration
+ // or a friend declaration, which have to be treated differently.
+ // - Otherwise we have something like 'struct foo xyz', a reference.
+ // However, in type-specifier-seq's, things look like declarations but are
+ // just references, e.g.
+ // new struct s;
// or
- // &T::operator struct s;
- // For these, SuppressDeclarations is true.
+ // &T::operator struct s;
+ // For these, DSC is DSC_type_specifier.
Sema::TagUseKind TUK;
- if (SuppressDeclarations)
+ if (DSC == DSC_trailing)
TUK = Sema::TUK_Reference;
- else if (Tok.is(tok::l_brace) ||
- (getLang().CPlusPlus && Tok.is(tok::colon)) ||
- isCXX0XFinalKeyword()) {
+ else if (Tok.is(tok::l_brace) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ (isCXX0XFinalKeyword() &&
+ (NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
if (DS.isFriendSpecified()) {
// C++ [class.friend]p2:
// A class shall not be defined in a friend declaration.
- Diag(Tok.getLocation(), diag::err_friend_decl_defines_class)
+ Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
<< SourceRange(DS.getFriendSpecLoc());
// Skip everything up to the semicolon, so that this looks like a proper
@@ -1040,7 +1144,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Okay, this is a class definition.
TUK = Sema::TUK_Definition;
}
- } else if (Tok.is(tok::semi))
+ } else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
else
TUK = Sema::TUK_Reference;
@@ -1092,14 +1196,14 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TUK == Sema::TUK_Reference ||
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
- TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType,
- StartLoc,
+ TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
TemplateId->SS,
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
TemplateArgsPtr,
- TemplateId->RAngleLoc);
+ TemplateId->RAngleLoc);
} else {
// This is an explicit specialization or a class template
// partial specialization.
@@ -1191,8 +1295,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
SS, Name, NameLoc, attrs.getList(), AS,
DS.getModulePrivateSpecLoc(),
- TParams, Owned, IsDependent, false,
- false, clang::TypeResult());
+ TParams, Owned, IsDependent,
+ SourceLocation(), false,
+ clang::TypeResult());
// If ActOnTag said the type was dependent, try again with the
// less common call.
@@ -1206,9 +1311,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If there is a body, parse it and inform the actions module.
if (TUK == Sema::TUK_Definition) {
assert(Tok.is(tok::l_brace) ||
- (getLang().CPlusPlus && Tok.is(tok::colon)) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
isCXX0XFinalKeyword());
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
ParseCXXMemberSpecification(StartLoc, TagType, TagOrTempResult.get());
else
ParseStructUnionBody(StartLoc, TagType, TagOrTempResult.get());
@@ -1290,7 +1395,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
case tok::r_brace: // struct bar { struct foo {...} }
// Missing ';' at end of struct is accepted as an extension in C mode.
- if (!getLang().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
ExpectedSemi = false;
break;
}
@@ -1359,9 +1464,9 @@ void Parser::ParseBaseClause(Decl *ClassDecl) {
/// base-specifier: [C++ class.derived]
/// ::[opt] nested-name-specifier[opt] class-name
/// 'virtual' access-specifier[opt] ::[opt] nested-name-specifier[opt]
-/// class-name
+/// base-type-specifier
/// access-specifier 'virtual'[opt] ::[opt] nested-name-specifier[opt]
-/// class-name
+/// base-type-specifier
Parser::BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
bool IsVirtual = false;
SourceLocation StartLoc = Tok.getLocation();
@@ -1390,16 +1495,10 @@ Parser::BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
IsVirtual = true;
}
- // Parse optional '::' and optional nested-name-specifier.
- CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
-
- // The location of the base class itself.
- SourceLocation BaseLoc = Tok.getLocation();
-
// Parse the class-name.
SourceLocation EndLocation;
- TypeResult BaseType = ParseClassName(EndLocation, SS);
+ SourceLocation BaseLoc;
+ TypeResult BaseType = ParseBaseTypeSpecifier(BaseLoc, EndLocation);
if (BaseType.isInvalid())
return true;
@@ -1468,14 +1567,14 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
}
}
-/// isCXX0XVirtSpecifier - Determine whether the next token is a C++0x
+/// isCXX0XVirtSpecifier - Determine whether the given token is a C++0x
/// virt-specifier.
///
/// virt-specifier:
/// override
/// final
-VirtSpecifiers::Specifier Parser::isCXX0XVirtSpecifier() const {
- if (!getLang().CPlusPlus)
+VirtSpecifiers::Specifier Parser::isCXX0XVirtSpecifier(const Token &Tok) const {
+ if (!getLangOpts().CPlusPlus)
return VirtSpecifiers::VS_None;
if (Tok.is(tok::identifier)) {
@@ -1516,9 +1615,10 @@ void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
<< PrevSpec
<< FixItHint::CreateRemoval(Tok.getLocation());
- if (!getLang().CPlusPlus0x)
- Diag(Tok.getLocation(), diag::ext_override_control_keyword)
- << VirtSpecifiers::getSpecifierName(Specifier);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
ConsumeToken();
}
}
@@ -1526,7 +1626,7 @@ void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
/// isCXX0XFinalKeyword - Determine whether the next token is a C++0x
/// contextual 'final' keyword.
bool Parser::isCXX0XFinalKeyword() const {
- if (!getLang().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return false;
if (!Tok.is(tok::identifier))
@@ -1581,7 +1681,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
if (Tok.is(tok::at)) {
- if (getLang().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
+ if (getLangOpts().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
Diag(Tok, diag::err_at_defs_cxx);
else
Diag(Tok, diag::err_at_in_class);
@@ -1605,11 +1705,14 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (isAccessDecl) {
// Collect the scope specifier token we annotated earlier.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
// Try to parse an unqualified-id.
+ SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS, false, true, true, ParsedType(), Name)) {
+ if (ParseUnqualifiedId(SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Name)) {
SkipUntil(tok::semi);
return;
}
@@ -1684,11 +1787,15 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return;
}
+ // Hold late-parsed attributes so we can attach a Decl to them later.
+ LateParsedAttrList CommonLateParsedAttrs;
+
// decl-specifier-seq:
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this, TemplateDiags);
DS.takeAttributesFrom(attrs);
- ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class);
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
+ &CommonLateParsedAttrs);
MultiTemplateParamsArg TemplateParams(Actions,
TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data() : 0,
@@ -1708,6 +1815,9 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Hold late-parsed attributes so we can attach a Decl to them later.
LateParsedAttrList LateParsedAttrs;
+ SourceLocation EqualLoc;
+ bool HasInitializer = false;
+ ExprResult Init;
if (Tok.isNot(tok::colon)) {
// Don't parse FOO:BAR as if it were a typo for FOO::BAR.
ColonProtectionRAIIObject X(*this);
@@ -1730,39 +1840,42 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// MSVC permits pure specifier on inline functions declared at class scope.
// Hence check for =0 before checking for function definition.
- ExprResult Init;
- if (getLang().MicrosoftExt && Tok.is(tok::equal) &&
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::equal) &&
DeclaratorInfo.isFunctionDeclarator() &&
NextToken().is(tok::numeric_constant)) {
- ConsumeToken();
+ EqualLoc = ConsumeToken();
Init = ParseInitializer();
if (Init.isInvalid())
SkipUntil(tok::comma, true, true);
+ else
+ HasInitializer = true;
}
- bool IsDefinition = false;
+ FunctionDefinitionKind DefinitionKind = FDK_Declaration;
// function-definition:
//
// In C++11, a non-function declarator followed by an open brace is a
// braced-init-list for an in-class member initialization, not an
// erroneous function definition.
- if (Tok.is(tok::l_brace) && !getLang().CPlusPlus0x) {
- IsDefinition = true;
+ if (Tok.is(tok::l_brace) && !getLangOpts().CPlusPlus0x) {
+ DefinitionKind = FDK_Definition;
} else if (DeclaratorInfo.isFunctionDeclarator()) {
if (Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try)) {
- IsDefinition = true;
+ DefinitionKind = FDK_Definition;
} else if (Tok.is(tok::equal)) {
const Token &KW = NextToken();
- if (KW.is(tok::kw_default) || KW.is(tok::kw_delete))
- IsDefinition = true;
+ if (KW.is(tok::kw_default))
+ DefinitionKind = FDK_Defaulted;
+ else if (KW.is(tok::kw_delete))
+ DefinitionKind = FDK_Deleted;
}
}
- if (IsDefinition) {
+ if (DefinitionKind) {
if (!DeclaratorInfo.isFunctionDeclarator()) {
- Diag(Tok, diag::err_func_def_no_params);
+ Diag(DeclaratorInfo.getIdentifierLoc(), diag::err_func_def_no_params);
ConsumeBrace();
- SkipUntil(tok::r_brace, true);
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
// Consume the optional ';'
if (Tok.is(tok::semi))
@@ -1771,12 +1884,13 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- Diag(Tok, diag::err_function_declared_typedef);
+ Diag(DeclaratorInfo.getIdentifierLoc(),
+ diag::err_function_declared_typedef);
// This recovery skips the entire function body. It would be nice
// to simply call ParseCXXInlineMethodDef() below, however Sema
// assumes the declarator represents a function, not a typedef.
ConsumeBrace();
- SkipUntil(tok::r_brace, true);
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
// Consume the optional ';'
if (Tok.is(tok::semi))
@@ -1786,10 +1900,13 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
Decl *FunDecl =
ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo,
- VS, Init);
+ VS, DefinitionKind, Init);
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
+ CommonLateParsedAttrs[i]->addDecl(FunDecl);
+ }
for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i) {
- LateParsedAttrs[i]->setDecl(FunDecl);
+ LateParsedAttrs[i]->addDecl(FunDecl);
}
LateParsedAttrs.clear();
@@ -1808,6 +1925,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SmallVector<Decl *, 8> DeclsInGroup;
ExprResult BitfieldSize;
+ bool ExpectSemi = true;
while (1) {
// member-declarator:
@@ -1839,9 +1957,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// goes before or after the GNU attributes and __asm__.
ParseOptionalCXX0XVirtSpecifierSeq(VS);
- bool HasInitializer = false;
bool HasDeferredInitializer = false;
- if (Tok.is(tok::equal) || Tok.is(tok::l_brace)) {
+ if ((Tok.is(tok::equal) || Tok.is(tok::l_brace)) && !HasInitializer) {
if (BitfieldSize.get()) {
Diag(Tok, diag::err_bitfield_member_init);
SkipUntil(tok::comma, true, true);
@@ -1876,40 +1993,45 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
// Set the Decl for any late parsed attributes
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
+ CommonLateParsedAttrs[i]->addDecl(ThisDecl);
+ }
for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i) {
- LateParsedAttrs[i]->setDecl(ThisDecl);
+ LateParsedAttrs[i]->addDecl(ThisDecl);
}
LateParsedAttrs.clear();
// Handle the initializer.
if (HasDeferredInitializer) {
// The initializer was deferred; parse it and cache the tokens.
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::warn_nonstatic_member_init_accepted_as_extension);
-
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nonstatic_member_init :
+ diag::ext_nonstatic_member_init);
+
if (DeclaratorInfo.isArrayOfUnknownBound()) {
// C++0x [dcl.array]p3: An array bound may also be omitted when the
// declarator is followed by an initializer.
//
// A brace-or-equal-initializer for a member-declarator is not an
- // initializer in the gramamr, so this is ill-formed.
+ // initializer in the grammar, so this is ill-formed.
Diag(Tok, diag::err_incomplete_array_member_init);
SkipUntil(tok::comma, true, true);
- // Avoid later warnings about a class member of incomplete type.
- ThisDecl->setInvalidDecl();
+ if (ThisDecl)
+ // Avoid later warnings about a class member of incomplete type.
+ ThisDecl->setInvalidDecl();
} else
ParseCXXNonStaticMemberInitializer(ThisDecl);
} else if (HasInitializer) {
// Normal initializer.
- SourceLocation EqualLoc;
- ExprResult Init
- = ParseCXXMemberInitializer(DeclaratorInfo.isDeclarationOfFunction(),
- EqualLoc);
+ if (!Init.isUsable())
+ Init = ParseCXXMemberInitializer(ThisDecl,
+ DeclaratorInfo.isDeclarationOfFunction(), EqualLoc);
+
if (Init.isInvalid())
SkipUntil(tok::comma, true, true);
else if (ThisDecl)
- Actions.AddInitializerToDecl(ThisDecl, Init.get(), false,
- DS.getTypeSpecType() == DeclSpec::TST_auto);
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(), EqualLoc.isInvalid(),
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
} else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static) {
// No initializer.
Actions.ActOnUninitializedDecl(ThisDecl,
@@ -1935,12 +2057,26 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
break;
// Consume the comma.
- ConsumeToken();
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isAtStartOfLine() &&
+ !MightBeDeclarator(Declarator::MemberContext)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
// Parse the next declarator.
DeclaratorInfo.clear();
VS.clear();
BitfieldSize = true;
+ Init = true;
+ HasInitializer = false;
+ DeclaratorInfo.setCommaLoc(CommaLoc);
// Attributes are only allowed on the second declarator.
MaybeParseGNUAttributes(DeclaratorInfo);
@@ -1949,7 +2085,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ParseDeclarator(DeclaratorInfo);
}
- if (ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list)) {
+ if (ExpectSemi &&
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list)) {
// Skip to end of block or statement.
SkipUntil(tok::r_brace, true, true);
// If we stopped at a ';', eat it.
@@ -1968,26 +2105,29 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
///
/// pure-specifier:
/// '= 0'
-///
+///
/// brace-or-equal-initializer:
/// '=' initializer-expression
-/// braced-init-list [TODO]
-///
+/// braced-init-list
+///
/// initializer-clause:
/// assignment-expression
-/// braced-init-list [TODO]
-///
+/// braced-init-list
+///
/// defaulted/deleted function-definition:
/// '=' 'default'
/// '=' 'delete'
///
/// Prior to C++0x, the assignment-expression in an initializer-clause must
/// be a constant-expression.
-ExprResult Parser::ParseCXXMemberInitializer(bool IsFunction,
+ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc) {
assert((Tok.is(tok::equal) || Tok.is(tok::l_brace))
&& "Data member initializer not starting with '=' or '{'");
+ EnterExpressionEvaluationContext Context(Actions,
+ Sema::PotentiallyEvaluated,
+ D);
if (Tok.is(tok::equal)) {
EqualLoc = ConsumeToken();
if (Tok.is(tok::kw_delete)) {
@@ -2015,9 +2155,8 @@ ExprResult Parser::ParseCXXMemberInitializer(bool IsFunction,
return ExprResult();
}
- return ParseInitializer();
- } else
- return ExprError(Diag(Tok, diag::err_generalized_initializer_lists));
+ }
+ return ParseInitializer();
}
/// ParseCXXMemberSpecification - Parse the class definition.
@@ -2071,20 +2210,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation FinalLoc;
// Parse the optional 'final' keyword.
- if (getLang().CPlusPlus && Tok.is(tok::identifier)) {
- IdentifierInfo *II = Tok.getIdentifierInfo();
-
- // Initialize the contextual keywords.
- if (!Ident_final) {
- Ident_final = &PP.getIdentifierTable().get("final");
- Ident_override = &PP.getIdentifierTable().get("override");
- }
-
- if (II == Ident_final)
- FinalLoc = ConsumeToken();
+ if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
+ assert(isCXX0XFinalKeyword() && "not a class definition");
+ FinalLoc = ConsumeToken();
- if (!getLang().CPlusPlus0x)
- Diag(FinalLoc, diag::ext_override_control_keyword) << "final";
+ Diag(FinalLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword) << "final";
}
if (Tok.is(tok::colon)) {
@@ -2122,7 +2254,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
// Each iteration of this loop reads one member-declaration.
- if (getLang().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
Tok.is(tok::kw___if_not_exists))) {
ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
continue;
@@ -2137,6 +2269,16 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
continue;
}
+ if (Tok.is(tok::annot_pragma_vis)) {
+ HandlePragmaVisibility();
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_pack)) {
+ HandlePragmaPack();
+ continue;
+ }
+
AccessSpecifier AS = getAccessSpecifierIfPresent();
if (AS != AS_none) {
// Current token is a C++ access specifier.
@@ -2150,11 +2292,6 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation EndLoc;
if (Tok.is(tok::colon)) {
EndLoc = Tok.getLocation();
- if (Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc,
- AccessAttrs.getList())) {
- // found another attribute than only annotations
- AccessAttrs.clear();
- }
ConsumeToken();
} else if (Tok.is(tok::semi)) {
EndLoc = Tok.getLocation();
@@ -2166,7 +2303,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
Diag(EndLoc, diag::err_expected_colon)
<< FixItHint::CreateInsertion(EndLoc, ":");
}
- Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc);
+
+ if (Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc,
+ AccessAttrs.getList())) {
+ // found another attribute than only annotations
+ AccessAttrs.clear();
+ }
+
continue;
}
@@ -2303,7 +2446,7 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
ParsedType TemplateTypeTy;
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
@@ -2314,18 +2457,33 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
TemplateTypeTy = getTypeAnnotation(Tok);
}
}
- if (!TemplateTypeTy && Tok.isNot(tok::identifier)) {
+ // Uses of decltype will already have been converted to annot_decltype by
+ // ParseOptionalCXXScopeSpecifier at this point.
+ if (!TemplateTypeTy && Tok.isNot(tok::identifier)
+ && Tok.isNot(tok::annot_decltype)) {
Diag(Tok, diag::err_expected_member_or_base_name);
return true;
}
- // Get the identifier. This may be a member name or a class name,
- // but we'll let the semantic analysis determine which it is.
- IdentifierInfo *II = Tok.is(tok::identifier) ? Tok.getIdentifierInfo() : 0;
- SourceLocation IdLoc = ConsumeToken();
+ IdentifierInfo *II = 0;
+ DeclSpec DS(AttrFactory);
+ SourceLocation IdLoc = Tok.getLocation();
+ if (Tok.is(tok::annot_decltype)) {
+ // Get the decltype expression, if there is one.
+ ParseDecltypeSpecifier(DS);
+ } else {
+ if (Tok.is(tok::identifier))
+ // Get the identifier. This may be a member name or a class name,
+ // but we'll let the semantic analysis determine which it is.
+ II = Tok.getIdentifierInfo();
+ ConsumeToken();
+ }
+
// Parse the '('.
- if (getLang().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
ExprResult InitList = ParseBraceInitializer();
if (InitList.isInvalid())
return true;
@@ -2335,8 +2493,8 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
EllipsisLoc = ConsumeToken();
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, IdLoc, InitList.take(),
- EllipsisLoc);
+ TemplateTypeTy, DS, IdLoc,
+ InitList.take(), EllipsisLoc);
} else if(Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -2356,13 +2514,13 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
EllipsisLoc = ConsumeToken();
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy, IdLoc,
+ TemplateTypeTy, DS, IdLoc,
T.getOpenLocation(), ArgExprs.take(),
ArgExprs.size(), T.getCloseLocation(),
EllipsisLoc);
}
- Diag(Tok, getLang().CPlusPlus0x ? diag::err_expected_lparen_or_lbrace
+ Diag(Tok, getLangOpts().CPlusPlus0x ? diag::err_expected_lparen_or_lbrace
: diag::err_expected_lparen);
return true;
}
@@ -2396,6 +2554,8 @@ Parser::MaybeParseExceptionSpecification(SourceRange &SpecificationRange,
if (Tok.isNot(tok::kw_noexcept))
return Result;
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
+
// If we already had a dynamic specification, parse the noexcept for,
// recovery, but emit a diagnostic and don't store the results.
SourceRange NoexceptRange;
@@ -2468,7 +2628,7 @@ ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
// can throw anything".
if (Tok.is(tok::ellipsis)) {
SourceLocation EllipsisLoc = ConsumeToken();
- if (!getLang().MicrosoftExt)
+ if (!getLangOpts().MicrosoftExt)
Diag(EllipsisLoc, diag::ext_ellipsis_exception_spec);
T.consumeClose();
SpecificationRange.setEnd(T.getCloseLocation());
@@ -2513,14 +2673,7 @@ TypeResult Parser::ParseTrailingReturnType(SourceRange &Range) {
ConsumeToken();
- // FIXME: Need to suppress declarations when parsing this typename.
- // Otherwise in this function definition:
- //
- // auto f() -> struct X {}
- //
- // struct X is parsed as class definition because of the trailing
- // brace.
- return ParseTypeName(&Range);
+ return ParseTypeName(&Range, Declarator::TrailingReturnContext);
}
/// \brief We have just started parsing the definition of a new class,
@@ -2583,43 +2736,87 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
}
-/// ParseCXX0XAttributeSpecifier - Parse a C++0x attribute-specifier. Currently
+/// \brief Try to parse an 'identifier' which appears within an attribute-token.
+///
+/// \return the parsed identifier on success, and 0 if the next token is not an
+/// attribute-token.
+///
+/// C++11 [dcl.attr.grammar]p3:
+/// If a keyword or an alternative token that satisfies the syntactic
+/// requirements of an identifier is contained in an attribute-token,
+/// it is considered an identifier.
+IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
+ switch (Tok.getKind()) {
+ default:
+ // Identifiers and keywords have identifier info attached.
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ Loc = ConsumeToken();
+ return II;
+ }
+ return 0;
+
+ case tok::ampamp: // 'and'
+ case tok::pipe: // 'bitor'
+ case tok::pipepipe: // 'or'
+ case tok::caret: // 'xor'
+ case tok::tilde: // 'compl'
+ case tok::amp: // 'bitand'
+ case tok::ampequal: // 'and_eq'
+ case tok::pipeequal: // 'or_eq'
+ case tok::caretequal: // 'xor_eq'
+ case tok::exclaim: // 'not'
+ case tok::exclaimequal: // 'not_eq'
+ // Alternative tokens do not have identifier info, but their spelling
+ // starts with an alphabetical character.
+ llvm::SmallString<8> SpellingBuf;
+ StringRef Spelling = PP.getSpelling(Tok.getLocation(), SpellingBuf);
+ if (std::isalpha(Spelling[0])) {
+ Loc = ConsumeToken();
+ return &PP.getIdentifierTable().get(Spelling.data());
+ }
+ return 0;
+ }
+}
+
+/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier. Currently
/// only parses standard attributes.
///
-/// [C++0x] attribute-specifier:
+/// [C++11] attribute-specifier:
/// '[' '[' attribute-list ']' ']'
/// alignment-specifier
///
-/// [C++0x] attribute-list:
+/// [C++11] attribute-list:
/// attribute[opt]
/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
///
-/// [C++0x] attribute:
+/// [C++11] attribute:
/// attribute-token attribute-argument-clause[opt]
///
-/// [C++0x] attribute-token:
+/// [C++11] attribute-token:
/// identifier
/// attribute-scoped-token
///
-/// [C++0x] attribute-scoped-token:
+/// [C++11] attribute-scoped-token:
/// attribute-namespace '::' identifier
///
-/// [C++0x] attribute-namespace:
+/// [C++11] attribute-namespace:
/// identifier
///
-/// [C++0x] attribute-argument-clause:
+/// [C++11] attribute-argument-clause:
/// '(' balanced-token-seq ')'
///
-/// [C++0x] balanced-token-seq:
+/// [C++11] balanced-token-seq:
/// balanced-token
/// balanced-token-seq balanced-token
///
-/// [C++0x] balanced-token:
+/// [C++11] balanced-token:
/// '(' balanced-token-seq ')'
/// '[' balanced-token-seq ']'
/// '{' balanced-token-seq '}'
/// any token but '(', ')', '[', ']', '{', or '}'
-void Parser::ParseCXX0XAttributeSpecifier(ParsedAttributes &attrs,
+void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *endLoc) {
if (Tok.is(tok::kw_alignas)) {
Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
@@ -2628,56 +2825,53 @@ void Parser::ParseCXX0XAttributeSpecifier(ParsedAttributes &attrs,
}
assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square)
- && "Not a C++0x attribute list");
+ && "Not a C++11 attribute list");
Diag(Tok.getLocation(), diag::warn_cxx98_compat_attribute);
ConsumeBracket();
ConsumeBracket();
- if (Tok.is(tok::comma)) {
- Diag(Tok.getLocation(), diag::err_expected_ident);
- ConsumeToken();
- }
-
- while (Tok.is(tok::identifier) || Tok.is(tok::comma)) {
+ while (Tok.isNot(tok::r_square)) {
// attribute not present
if (Tok.is(tok::comma)) {
ConsumeToken();
continue;
}
- IdentifierInfo *ScopeName = 0, *AttrName = Tok.getIdentifierInfo();
- SourceLocation ScopeLoc, AttrLoc = ConsumeToken();
+ SourceLocation ScopeLoc, AttrLoc;
+ IdentifierInfo *ScopeName = 0, *AttrName = 0;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName)
+ // Break out to the "expected ']'" diagnostic.
+ break;
// scoped attribute
if (Tok.is(tok::coloncolon)) {
ConsumeToken();
- if (!Tok.is(tok::identifier)) {
+ ScopeName = AttrName;
+ ScopeLoc = AttrLoc;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName) {
Diag(Tok.getLocation(), diag::err_expected_ident);
SkipUntil(tok::r_square, tok::comma, true, true);
continue;
}
-
- ScopeName = AttrName;
- ScopeLoc = AttrLoc;
-
- AttrName = Tok.getIdentifierInfo();
- AttrLoc = ConsumeToken();
}
bool AttrParsed = false;
// No scoped names are supported; ideally we could put all non-standard
// attributes into namespaces.
if (!ScopeName) {
- switch(AttributeList::getKind(AttrName))
- {
+ switch (AttributeList::getKind(AttrName)) {
// No arguments
case AttributeList::AT_carries_dependency:
case AttributeList::AT_noreturn: {
if (Tok.is(tok::l_paren)) {
- Diag(Tok.getLocation(), diag::err_cxx0x_attribute_forbids_arguments)
+ Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
<< AttrName->getName();
break;
}
@@ -2699,6 +2893,13 @@ void Parser::ParseCXX0XAttributeSpecifier(ParsedAttributes &attrs,
// SkipUntil maintains the balancedness of tokens.
SkipUntil(tok::r_paren, false);
}
+
+ if (Tok.is(tok::ellipsis)) {
+ if (AttrParsed)
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
+ << AttrName->getName();
+ ConsumeToken();
+ }
}
if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
@@ -2709,19 +2910,19 @@ void Parser::ParseCXX0XAttributeSpecifier(ParsedAttributes &attrs,
SkipUntil(tok::r_square, false);
}
-/// ParseCXX0XAttributes - Parse a C++0x attribute-specifier-seq.
+/// ParseCXX11Attributes - Parse a C++0x attribute-specifier-seq.
///
/// attribute-specifier-seq:
/// attribute-specifier-seq[opt] attribute-specifier
-void Parser::ParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+void Parser::ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc) {
SourceLocation StartLoc = Tok.getLocation(), Loc;
if (!endLoc)
endLoc = &Loc;
do {
- ParseCXX0XAttributeSpecifier(attrs, endLoc);
- } while (isCXX0XAttributeSpecifier());
+ ParseCXX11AttributeSpecifier(attrs, endLoc);
+ } while (isCXX11AttributeSpecifier());
attrs.Range = SourceRange(StartLoc, *endLoc);
}
@@ -2739,6 +2940,7 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
while (Tok.is(tok::l_square)) {
+ // FIXME: If this is actually a C++11 attribute, parse it as one.
ConsumeBracket();
SkipUntil(tok::r_square, true, true);
if (endLoc) *endLoc = Tok.getLocation();
@@ -2748,25 +2950,32 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS) {
- bool Result;
+ IfExistsCondition Result;
if (ParseMicrosoftIfExistsCondition(Result))
return;
- if (Tok.isNot(tok::l_brace)) {
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
Diag(Tok, diag::err_expected_lbrace);
return;
}
- ConsumeBrace();
- // Condition is false skip all inside the {}.
- if (!Result) {
- SkipUntil(tok::r_brace, false);
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
return;
}
- // Condition is true, parse the declaration.
- while (Tok.isNot(tok::r_brace)) {
-
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
// __if_exists, __if_not_exists can nest.
if ((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists))) {
ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
@@ -2799,10 +3008,6 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
// Parse all the comma separated declarators.
ParseCXXClassMemberDeclaration(CurAS, 0);
}
-
- if (Tok.isNot(tok::r_brace)) {
- Diag(Tok, diag::err_expected_rbrace);
- return;
- }
- ConsumeBrace();
+
+ Braces.consumeClose();
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
index bc8bbf5..7f3a815 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -23,6 +23,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/TypoCorrection.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "RAIIObjectsForParser.h"
#include "llvm/ADT/SmallVector.h"
@@ -174,8 +175,8 @@ static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
/// expression: [C99 6.5.17]
/// assignment-expression ...[opt]
/// expression ',' assignment-expression ...[opt]
-ExprResult Parser::ParseExpression() {
- ExprResult LHS(ParseAssignmentExpression());
+ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
+ ExprResult LHS(ParseAssignmentExpression(isTypeCast));
return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
}
@@ -211,7 +212,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
}
/// ParseAssignmentExpression - Parse an expr that doesn't include commas.
-ExprResult Parser::ParseAssignmentExpression() {
+ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
cutOffParsing();
@@ -221,7 +222,9 @@ ExprResult Parser::ParseAssignmentExpression() {
if (Tok.is(tok::kw_throw))
return ParseThrowExpression();
- ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false);
+ ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ isTypeCast);
return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
}
@@ -246,15 +249,17 @@ Parser::ParseAssignmentExprWithObjCMessageExprStart(SourceLocation LBracLoc,
}
-ExprResult Parser::ParseConstantExpression() {
- // C++ [basic.def.odr]p2:
+ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
+ // C++03 [basic.def.odr]p2:
// An expression is potentially evaluated unless it appears where an
// integral constant expression is required (see 5.19) [...].
+ // C++98 and C++11 have no such rule, but this is only a defect in C++98.
EnterExpressionEvaluationContext Unevaluated(Actions,
- Sema::Unevaluated);
+ Sema::ConstantEvaluated);
- ExprResult LHS(ParseCastExpression(false));
- return ParseRHSOfBinaryExpression(LHS, prec::Conditional);
+ ExprResult LHS(ParseCastExpression(false, false, isTypeCast));
+ ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ return Actions.ActOnConstantExpression(Res);
}
/// ParseRHSOfBinaryExpression - Parse a binary expression that starts with
@@ -263,7 +268,7 @@ ExprResult
Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
prec::Level NextTokPrec = getBinOpPrecedence(Tok.getKind(),
GreaterThanIsOperator,
- getLang().CPlusPlus0x);
+ getLangOpts().CPlusPlus0x);
SourceLocation ColonLoc;
while (1) {
@@ -311,8 +316,8 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
SourceLocation FILoc = Tok.getLocation();
const char *FIText = ": ";
const SourceManager &SM = PP.getSourceManager();
- if (FILoc.isFileID() || PP.isAtStartOfMacroExpansion(FILoc)) {
- FILoc = SM.getExpansionLoc(FILoc);
+ if (FILoc.isFileID() || PP.isAtStartOfMacroExpansion(FILoc, &FILoc)) {
+ assert(FILoc.isFileID());
bool IsInvalid = false;
const char *SourcePtr =
SM.getCharacterData(FILoc.getLocWithOffset(-1), &IsInvalid);
@@ -347,9 +352,16 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// be a throw-expression, which is not a valid cast-expression.
// Therefore we need some special-casing here.
// Also note that the third operand of the conditional operator is
- // an assignment-expression in C++.
+ // an assignment-expression in C++, and in C++11, we can have a
+ // braced-init-list on the RHS of an assignment. For better diagnostics,
+ // parse as if we were allowed braced-init-lists everywhere, and check that
+ // they only appear on the RHS of assignments later.
ExprResult RHS;
- if (getLang().CPlusPlus && NextTokPrec <= prec::Conditional)
+ bool RHSIsInitList = false;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ RHS = ParseBraceInitializer();
+ RHSIsInitList = true;
+ } else if (getLangOpts().CPlusPlus && NextTokPrec <= prec::Conditional)
RHS = ParseAssignmentExpression();
else
RHS = ParseCastExpression(false);
@@ -361,7 +373,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// operator immediately to the right of the RHS.
prec::Level ThisPrec = NextTokPrec;
NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
- getLang().CPlusPlus0x);
+ getLangOpts().CPlusPlus0x);
// Assignment and conditional expressions are right-associative.
bool isRightAssoc = ThisPrec == prec::Conditional ||
@@ -371,6 +383,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// more tightly with RHS than we do, evaluate it completely first.
if (ThisPrec < NextTokPrec ||
(ThisPrec == NextTokPrec && isRightAssoc)) {
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ Diag(Tok, diag::err_init_list_bin_op)
+ << /*LHS*/0 << PP.getSpelling(Tok) << Actions.getExprRange(RHS.get());
+ RHS = ExprError();
+ }
// If this is left-associative, only parse things on the RHS that bind
// more tightly than the current operator. If it is left-associative, it
// is okay, to bind exactly as tightly. For example, compile A=B=C=D as
@@ -378,15 +395,28 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
// The function takes ownership of the RHS.
RHS = ParseRHSOfBinaryExpression(RHS,
static_cast<prec::Level>(ThisPrec + !isRightAssoc));
+ RHSIsInitList = false;
if (RHS.isInvalid())
LHS = ExprError();
NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
- getLang().CPlusPlus0x);
+ getLangOpts().CPlusPlus0x);
}
assert(NextTokPrec <= ThisPrec && "Recursion didn't work!");
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ if (ThisPrec == prec::Assignment) {
+ Diag(OpToken, diag::warn_cxx98_compat_generalized_initializer_lists)
+ << Actions.getExprRange(RHS.get());
+ } else {
+ Diag(OpToken, diag::err_init_list_bin_op)
+ << /*RHS*/1 << PP.getSpelling(OpToken)
+ << Actions.getExprRange(RHS.get());
+ LHS = ExprError();
+ }
+ }
+
if (!LHS.isInvalid()) {
// Combine the LHS and RHS into the LHS (e.g. build AST).
if (TernaryMiddle.isInvalid()) {
@@ -416,7 +446,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
///
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
- bool isTypeCast) {
+ TypeCastState isTypeCast) {
bool NotCastExpr;
ExprResult Res = ParseCastExpression(isUnaryExpression,
isAddressOfOperand,
@@ -427,6 +457,29 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return move(Res);
}
+namespace {
+class CastExpressionIdValidator : public CorrectionCandidateCallback {
+ public:
+ CastExpressionIdValidator(bool AllowTypes, bool AllowNonTypes)
+ : AllowNonTypes(AllowNonTypes) {
+ WantTypeSpecifiers = AllowTypes;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ if (!ND)
+ return candidate.isKeyword();
+
+ if (isa<TypeDecl>(ND))
+ return WantTypeSpecifiers;
+ return AllowNonTypes;
+ }
+
+ private:
+ bool AllowNonTypes;
+};
+}
+
/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
/// true, parse a unary-expression. isAddressOfOperand exists because an
/// id-expression that is the operand of address-of gets special treatment
@@ -444,14 +497,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// unary-operator cast-expression
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
-/// [C++0x] 'sizeof' '...' '(' identifier ')'
+/// [C++11] 'sizeof' '...' '(' identifier ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
-/// [C++0x] 'alignof' '(' type-id ')'
+/// [C++11] 'alignof' '(' type-id ')'
/// [GNU] '&&' identifier
+/// [C++11] 'noexcept' '(' expression ')' [C++11 5.3.7]
/// [C++] new-expression
/// [C++] delete-expression
-/// [C++0x] 'noexcept' '(' expression ')'
///
/// unary-operator: one of
/// '&' '*' '+' '-' '~' '!'
@@ -463,9 +516,10 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// constant
/// string-literal
/// [C++] boolean-literal [C++ 2.13.5]
-/// [C++0x] 'nullptr' [C++0x 2.14.7]
+/// [C++11] 'nullptr' [C++11 2.14.7]
+/// [C++11] user-defined-literal
/// '(' expression ')'
-/// [C1X] generic-selection
+/// [C11] generic-selection
/// '__func__' [C99 6.4.2.2]
/// [GNU] '__FUNCTION__'
/// [GNU] '__PRETTY_FUNCTION__'
@@ -482,9 +536,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// [OBJC] '@encode' '(' type-name ')'
/// [OBJC] objc-string-literal
/// [C++] simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
-/// [C++0x] simple-type-specifier braced-init-list [C++ 5.2.3]
+/// [C++11] simple-type-specifier braced-init-list [C++11 5.2.3]
/// [C++] typename-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
-/// [C++0x] typename-specifier braced-init-list [C++ 5.2.3]
+/// [C++11] typename-specifier braced-init-list [C++11 5.2.3]
/// [C++] 'const_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
/// [C++] 'dynamic_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
/// [C++] 'reinterpret_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
@@ -565,6 +619,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
/// '__is_class'
/// '__is_empty' [TODO]
/// '__is_enum'
+/// '__is_final'
/// '__is_pod'
/// '__is_polymorphic'
/// '__is_trivial'
@@ -590,7 +645,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
- bool isTypeCast) {
+ TypeCastState isTypeCast) {
ExprResult Res;
tok::TokenKind SavedKind = Tok.getKind();
NotCastExpr = false;
@@ -611,7 +666,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// If this expression is limited to being a unary-expression, the parent can
// not start a cast expression.
ParenParseOption ParenExprType =
- (isUnaryExpression && !getLang().CPlusPlus)? CompoundLiteral : CastExpr;
+ (isUnaryExpression && !getLangOpts().CPlusPlus)? CompoundLiteral : CastExpr;
ParsedType CastTy;
SourceLocation RParenLoc;
@@ -621,7 +676,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ColonProtectionRAIIObject X(*this, false);
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
- isTypeCast, CastTy, RParenLoc);
+ isTypeCast == IsTypeCast, CastTy, RParenLoc);
}
switch (ParenExprType) {
@@ -645,15 +700,20 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// constant: integer-constant
// constant: floating-constant
- Res = Actions.ActOnNumericConstant(Tok);
+ Res = Actions.ActOnNumericConstant(Tok, /*UDLScope*/getCurScope());
ConsumeToken();
break;
case tok::kw_true:
case tok::kw_false:
return ParseCXXBoolLiteral();
+
+ case tok::kw___objc_yes:
+ case tok::kw___objc_no:
+ return ParseObjCBoolLiteral();
case tok::kw_nullptr:
+ Diag(Tok, diag::warn_cxx98_compat_nullptr);
return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
case tok::annot_primary_expr:
@@ -662,19 +722,21 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ConsumeToken();
break;
+ case tok::kw_decltype:
case tok::identifier: { // primary-expression: identifier
// unqualified-id: identifier
// constant: enumeration-constant
// Turn a potentially qualified name into a annot_typename or
// annot_cxxscope if it would be valid. This handles things like x::y, etc.
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Avoid the unnecessary parse-time lookup in the common case
// where the syntax forbids a type.
const Token &Next = NextToken();
if (Next.is(tok::coloncolon) ||
(!ColonIsSacred && Next.is(tok::colon)) ||
Next.is(tok::less) ||
- Next.is(tok::l_paren)) {
+ Next.is(tok::l_paren) ||
+ Next.is(tok::l_brace)) {
// If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
if (TryAnnotateTypeOrScopeToken())
return ExprError();
@@ -689,13 +751,15 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation ILoc = ConsumeToken();
// Support 'Class.property' and 'super.property' notation.
- if (getLang().ObjC1 && Tok.is(tok::period) &&
+ if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
(Actions.getTypeName(II, ILoc, getCurScope()) ||
// Allow the base to be 'super' if in an objc-method.
(&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
ConsumeToken();
- if (Tok.isNot(tok::identifier)) {
+ // Allow either an identifier or the keyword 'class' (in C++).
+ if (Tok.isNot(tok::identifier) &&
+ !(getLangOpts().CPlusPlus && Tok.is(tok::kw_class))) {
Diag(Tok, diag::err_expected_property_name);
return ExprError();
}
@@ -711,7 +775,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// the token sequence is ill-formed. However, if there's a ':' or ']' after
// that identifier, this is probably a message send with a missing open
// bracket. Treat it as such.
- if (getLang().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ if (getLangOpts().ObjC1 && &II == Ident_super && !InMessageExpression &&
getCurScope()->isInObjcMethodScope() &&
((Tok.is(tok::identifier) &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
@@ -726,7 +790,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// send that's missing the opening '['. Recovery
// appropriately. Also take this path if we're performing code
// completion after an Objective-C class name.
- if (getLang().ObjC1 &&
+ if (getLangOpts().ObjC1 &&
((Tok.is(tok::identifier) && !InMessageExpression) ||
Tok.is(tok::code_completion))) {
const Token& Next = NextToken();
@@ -764,16 +828,20 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// not.
UnqualifiedId Name;
CXXScopeSpec ScopeSpec;
+ SourceLocation TemplateKWLoc;
+ CastExpressionIdValidator Validator(isTypeCast != NotTypeCast,
+ isTypeCast != IsTypeCast);
Name.setIdentifier(&II, ILoc);
- Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, Name,
- Tok.is(tok::l_paren), isAddressOfOperand);
+ Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, TemplateKWLoc,
+ Name, Tok.is(tok::l_paren),
+ isAddressOfOperand, &Validator);
break;
}
case tok::char_constant: // constant: character-constant
case tok::wide_char_constant:
case tok::utf16_char_constant:
case tok::utf32_char_constant:
- Res = Actions.ActOnCharacterConstant(Tok);
+ Res = Actions.ActOnCharacterConstant(Tok, /*UDLScope*/getCurScope());
ConsumeToken();
break;
case tok::kw___func__: // primary-expression: __func__ [C99 6.4.2.2]
@@ -787,9 +855,9 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::utf8_string_literal:
case tok::utf16_string_literal:
case tok::utf32_string_literal:
- Res = ParseStringLiteralExpression();
+ Res = ParseStringLiteralExpression(true);
break;
- case tok::kw__Generic: // primary-expression: generic-selection [C1X 6.5.1]
+ case tok::kw__Generic: // primary-expression: generic-selection [C11 6.5.1]
Res = ParseGenericSelectionExpression();
break;
case tok::kw___builtin_va_arg:
@@ -807,7 +875,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// ++ cast-expression
// -- cast-expression
SourceLocation SavedLoc = ConsumeToken();
- Res = ParseCastExpression(!getLang().CPlusPlus);
+ Res = ParseCastExpression(!getLangOpts().CPlusPlus);
if (!Res.isInvalid())
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
return move(Res);
@@ -909,6 +977,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
// Fall through
+ case tok::annot_decltype:
case tok::kw_char:
case tok::kw_wchar_t:
case tok::kw_char16_t:
@@ -918,6 +987,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_int:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
@@ -927,7 +997,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw_typename:
case tok::kw_typeof:
case tok::kw___vector: {
- if (!getLang().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
Diag(Tok, diag::err_expected_expression);
return ExprError();
}
@@ -945,10 +1015,13 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
DeclSpec DS(AttrFactory);
ParseCXXSimpleTypeSpecifier(DS);
if (Tok.isNot(tok::l_paren) &&
- (!getLang().CPlusPlus0x || Tok.isNot(tok::l_brace)))
+ (!getLangOpts().CPlusPlus0x || Tok.isNot(tok::l_brace)))
return ExprError(Diag(Tok, diag::err_expected_lparen_after_type)
<< DS.getSourceRange());
+ if (Tok.is(tok::l_brace))
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
Res = ParseCXXTypeConstructExpression(DS);
break;
}
@@ -970,7 +1043,8 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// type, translate it into a type and continue parsing as a
// cast expression.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
AnnotateTemplateIdTokenAsType();
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
NotCastExpr, isTypeCast);
@@ -1028,12 +1102,13 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ParseCXXDeleteExpression(false, Tok.getLocation());
case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_expr);
SourceLocation KeyLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume(diag::err_expected_lparen_after, "noexcept"))
return ExprError();
- // C++ [expr.unary.noexcept]p1:
+ // C++11 [expr.unary.noexcept]p1:
// The noexcept operator determines whether the evaluation of its operand,
// which is an unevaluated operand, can throw an exception.
EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
@@ -1081,6 +1156,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___is_trivial:
case tok::kw___is_trivially_copyable:
case tok::kw___is_union:
+ case tok::kw___is_final:
case tok::kw___has_trivial_constructor:
case tok::kw___has_trivial_copy:
case tok::kw___has_trivial_assign:
@@ -1096,8 +1172,12 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw___is_same:
case tok::kw___is_convertible:
case tok::kw___is_convertible_to:
+ case tok::kw___is_trivially_assignable:
return ParseBinaryTypeTrait();
+ case tok::kw___is_trivially_constructible:
+ return ParseTypeTrait();
+
case tok::kw___array_rank:
case tok::kw___array_extent:
return ParseArrayTypeTrait();
@@ -1119,17 +1199,22 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ExprError();
}
case tok::l_square:
- if (getLang().CPlusPlus0x) {
- if (getLang().ObjC1) {
+ if (getLangOpts().CPlusPlus0x) {
+ if (getLangOpts().ObjC1) {
+ // C++11 lambda expressions and Objective-C message sends both start with a
+ // square bracket. There are three possibilities here:
+ // we have a valid lambda expression, we have an invalid lambda
+ // expression, or we have something that doesn't appear to be a lambda.
+ // If we're in the last case, we fall back to ParseObjCMessageExpression.
Res = TryParseLambdaExpression();
- if (Res.isInvalid())
+ if (!Res.isInvalid() && !Res.get())
Res = ParseObjCMessageExpression();
break;
}
Res = ParseLambdaExpression();
break;
}
- if (getLang().ObjC1) {
+ if (getLangOpts().ObjC1) {
Res = ParseObjCMessageExpression();
break;
}
@@ -1181,7 +1266,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// If we see identifier: after an expression, and we're not already in a
// message send, then this is probably a message send with a missing
// opening bracket '['.
- if (getLang().ObjC1 && !InMessageExpression &&
+ if (getLangOpts().ObjC1 && !InMessageExpression &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
ParsedType(), LHS.get());
@@ -1199,17 +1284,23 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// actually another message send. In this case, do some look-ahead to see
// if the contents of the square brackets are obviously not a valid
// expression and recover by pretending there is no suffix.
- if (getLang().ObjC1 && Tok.isAtStartOfLine() &&
+ if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
isSimpleObjCMessageExpression())
return move(LHS);
-
+
+ // Reject array indices starting with a lambda-expression. '[[' is
+ // reserved for attributes.
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
ExprResult Idx;
- if (getLang().CPlusPlus0x && Tok.is(tok::l_brace))
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
Idx = ParseBraceInitializer();
- else
+ } else
Idx = ParseExpression();
SourceLocation RLoc = Tok.getLocation();
@@ -1233,22 +1324,27 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Expr *ExecConfig = 0;
- BalancedDelimiterTracker LLLT(*this, tok::lesslessless);
BalancedDelimiterTracker PT(*this, tok::l_paren);
if (OpKind == tok::lesslessless) {
ExprVector ExecConfigExprs(Actions);
CommaLocsTy ExecConfigCommaLocs;
- LLLT.consumeOpen();
+ SourceLocation OpenLoc = ConsumeToken();
if (ParseExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
LHS = ExprError();
}
- if (LHS.isInvalid()) {
+ SourceLocation CloseLoc = Tok.getLocation();
+ if (Tok.is(tok::greatergreatergreater)) {
+ ConsumeToken();
+ } else if (LHS.isInvalid()) {
SkipUntil(tok::greatergreatergreater);
- } else if (LLLT.consumeClose()) {
+ } else {
// There was an error closing the brackets
+ Diag(Tok, diag::err_expected_ggg);
+ Diag(OpenLoc, diag::note_matching) << "<<<";
+ SkipUntil(tok::greatergreatergreater);
LHS = ExprError();
}
@@ -1261,9 +1357,9 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (!LHS.isInvalid()) {
ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
- LLLT.getOpenLocation(),
+ OpenLoc,
move_arg(ExecConfigExprs),
- LLLT.getCloseLocation());
+ CloseLoc);
if (ECResult.isInvalid())
LHS = ExprError();
else
@@ -1278,7 +1374,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CommaLocsTy CommaLocs;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteCall(getCurScope(), LHS.get(), 0, 0);
+ Actions.CodeCompleteCall(getCurScope(), LHS.get(),
+ llvm::ArrayRef<Expr *>());
cutOffParsing();
return ExprError();
}
@@ -1320,14 +1417,15 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CXXScopeSpec SS;
ParsedType ObjectType;
bool MayBePseudoDestructor = false;
- if (getLang().CPlusPlus && !LHS.isInvalid()) {
+ if (getLangOpts().CPlusPlus && !LHS.isInvalid()) {
LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), LHS.take(),
OpLoc, OpKind, ObjectType,
MayBePseudoDestructor);
if (LHS.isInvalid())
break;
- ParseOptionalCXXScopeSpecifier(SS, ObjectType, false,
+ ParseOptionalCXXScopeSpecifier(SS, ObjectType,
+ /*EnteringContext=*/false,
&MayBePseudoDestructor);
if (SS.isNotEmpty())
ObjectType = ParsedType();
@@ -1355,18 +1453,31 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// names a real destructor.
// Allow explicit constructor calls in Microsoft mode.
// FIXME: Add support for explicit call of template constructor.
+ SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/true,
- /*AllowConstructorName=*/ getLang().MicrosoftExt,
- ObjectType,
- Name))
+ if (getLangOpts().ObjC2 && OpKind == tok::period && Tok.is(tok::kw_class)) {
+ // Objective-C++:
+ // After a '.' in a member access expression, treat the keyword
+ // 'class' as if it were an identifier.
+ //
+ // This hack allows property access to the 'class' method because it is
+ // such a common method name. For other C++ keywords that are
+ // Objective-C method names, one must use the message send syntax.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation Loc = ConsumeToken();
+ Name.setIdentifier(Id, Loc);
+ } else if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/
+ getLangOpts().MicrosoftExt,
+ ObjectType, TemplateKWLoc, Name))
LHS = ExprError();
if (!LHS.isInvalid())
LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.take(), OpLoc,
- OpKind, SS, Name, ObjCImpDecl,
+ OpKind, SS, TemplateKWLoc, Name,
+ CurParsedObjCImpl ? CurParsedObjCImpl->Dcl : 0,
Tok.is(tok::l_paren));
break;
}
@@ -1419,19 +1530,11 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
// If the operand doesn't start with an '(', it must be an expression.
if (Tok.isNot(tok::l_paren)) {
isCastExpr = false;
- if (OpTok.is(tok::kw_typeof) && !getLang().CPlusPlus) {
+ if (OpTok.is(tok::kw_typeof) && !getLangOpts().CPlusPlus) {
Diag(Tok,diag::err_expected_lparen_after_id) << OpTok.getIdentifierInfo();
return ExprError();
}
- // C++0x [expr.sizeof]p1:
- // [...] The operand is either an expression, which is an unevaluated
- // operand (Clause 5) [...]
- //
- // The GNU typeof and alignof extensions also behave as unevaluated
- // operands.
- EnterExpressionEvaluationContext Unevaluated(Actions,
- Sema::Unevaluated);
Operand = ParseCastExpression(true/*isUnaryExpression*/);
} else {
// If it starts with a '(', we know that it is either a parenthesized
@@ -1441,14 +1544,6 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
ParenParseOption ExprType = CastExpr;
SourceLocation LParenLoc = Tok.getLocation(), RParenLoc;
- // C++0x [expr.sizeof]p1:
- // [...] The operand is either an expression, which is an unevaluated
- // operand (Clause 5) [...]
- //
- // The GNU typeof and alignof extensions also behave as unevaluated
- // operands.
- EnterExpressionEvaluationContext Unevaluated(Actions,
- Sema::Unevaluated);
Operand = ParseParenExpression(ExprType, true/*stopIfCastExpr*/,
false, CastTy, RParenLoc);
CastRange = SourceRange(LParenLoc, RParenLoc);
@@ -1460,7 +1555,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
return ExprEmpty();
}
- if (getLang().CPlusPlus || OpTok.isNot(tok::kw_typeof)) {
+ if (getLangOpts().CPlusPlus || OpTok.isNot(tok::kw_typeof)) {
// GNU typeof in C requires the expression to be parenthesized. Not so for
// sizeof/alignof or in C++. Therefore, the parenthesized expression is
// the start of a unary-expression, but doesn't include any postfix
@@ -1533,7 +1628,12 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
*Name, NameLoc,
RParenLoc);
}
-
+
+ if (OpTok.is(tok::kw_alignof))
+ Diag(OpTok, diag::warn_cxx98_compat_alignof);
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+
bool isCastExpr;
ParsedType CastTy;
SourceRange CastRange;
@@ -1573,7 +1673,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
/// assign-expr ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
-/// [OCL] '__builtin_astype' '(' type-name expr ')'
+/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
///
/// [GNU] offsetof-member-designator:
/// [GNU] identifier
@@ -1661,6 +1761,9 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Comps.back().LocEnd = ConsumeToken();
} else if (Tok.is(tok::l_square)) {
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
// offsetof-member-designator: offsetof-member-design '[' expression ']'
Comps.push_back(Sema::OffsetOfComponent());
Comps.back().isBrackets = true;
@@ -1803,23 +1906,40 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
return ExprError();
}
+ // Diagnose use of bridge casts in non-arc mode.
+ bool BridgeCast = (getLangOpts().ObjC2 &&
+ (Tok.is(tok::kw___bridge) ||
+ Tok.is(tok::kw___bridge_transfer) ||
+ Tok.is(tok::kw___bridge_retained) ||
+ Tok.is(tok::kw___bridge_retain)));
+ if (BridgeCast && !getLangOpts().ObjCAutoRefCount) {
+ StringRef BridgeCastName = Tok.getName();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::warn_arc_bridge_cast_nonarc)
+ << BridgeCastName
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "");
+ BridgeCast = false;
+ }
+
// None of these cases should fall through with an invalid Result
// unless they've already reported an error.
-
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
Diag(Tok, diag::ext_gnu_statement_expr);
+
+ Actions.ActOnStartStmtExpr();
+
ParsedAttributes attrs(AttrFactory);
StmtResult Stmt(ParseCompoundStatement(attrs, true));
ExprType = CompoundStmt;
// If the substmt parsed correctly, build the AST node.
- if (!Stmt.isInvalid())
+ if (!Stmt.isInvalid()) {
Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.take(), Tok.getLocation());
- } else if (ExprType >= CompoundLiteral &&
- (Tok.is(tok::kw___bridge) ||
- Tok.is(tok::kw___bridge_transfer) ||
- Tok.is(tok::kw___bridge_retained) ||
- Tok.is(tok::kw___bridge_retain))) {
+ } else {
+ Actions.ActOnStmtExprError();
+ }
+ } else if (ExprType >= CompoundLiteral && BridgeCast) {
tok::TokenKind tokenKind = Tok.getKind();
SourceLocation BridgeKeywordLoc = ConsumeToken();
@@ -1879,7 +1999,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// this is probably an Objective-C message send where the leading '[' is
// missing. Recover as if that were the case.
if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
- !InMessageExpression && getLang().ObjC1 &&
+ !InMessageExpression && getLangOpts().ObjC1 &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
TypeResult Ty;
{
@@ -1922,7 +2042,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
}
// Reject the cast of super idiom in ObjC.
- if (Tok.is(tok::identifier) && getLang().ObjC1 &&
+ if (Tok.is(tok::identifier) && getLangOpts().ObjC1 &&
Tok.getIdentifierInfo() == Ident_super &&
getCurScope()->isInObjcMethodScope() &&
GetLookAheadToken(1).isNot(tok::period)) {
@@ -1935,7 +2055,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// TODO: For cast expression with CastTy.
Result = ParseCastExpression(/*isUnaryExpression=*/false,
/*isAddressOfOperand=*/false,
- /*isTypeCast=*/true);
+ /*isTypeCast=*/IsTypeCast);
if (!Result.isInvalid()) {
Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc,
DeclaratorInfo, CastTy,
@@ -1956,13 +2076,13 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (!ParseExpressionList(ArgExprs, CommaLocs)) {
ExprType = SimpleExpr;
- Result = Actions.ActOnParenOrParenListExpr(OpenLoc, Tok.getLocation(),
- move_arg(ArgExprs));
+ Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
+ move_arg(ArgExprs));
}
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
- Result = ParseExpression();
+ Result = ParseExpression(MaybeTypeCast);
ExprType = SimpleExpr;
// Don't build a paren expression unless we actually match a ')'.
@@ -1993,7 +2113,7 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
assert(Tok.is(tok::l_brace) && "Not a compound literal!");
- if (!getLang().C99) // Compound literals don't exist in C90.
+ if (!getLangOpts().C99) // Compound literals don't exist in C90.
Diag(LParenLoc, diag::ext_c99_compound_literal);
ExprResult Result = ParseInitializer();
if (!Result.isInvalid() && Ty)
@@ -2007,7 +2127,7 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
///
/// primary-expression: [C99 6.5.1]
/// string-literal
-ExprResult Parser::ParseStringLiteralExpression() {
+ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
assert(isTokenStringLiteral() && "Not a string literal!");
// String concat. Note that keywords like __func__ and __FUNCTION__ are not
@@ -2020,11 +2140,12 @@ ExprResult Parser::ParseStringLiteralExpression() {
} while (isTokenStringLiteral());
// Pass the set of string tokens, ready for concatenation, to the actions.
- return Actions.ActOnStringLiteral(&StringToks[0], StringToks.size());
+ return Actions.ActOnStringLiteral(&StringToks[0], StringToks.size(),
+ AllowUserDefinedLiteral ? getCurScope() : 0);
}
-/// ParseGenericSelectionExpression - Parse a C1X generic-selection
-/// [C1X 6.5.1.1].
+/// ParseGenericSelectionExpression - Parse a C11 generic-selection
+/// [C11 6.5.1.1].
///
/// generic-selection:
/// _Generic ( assignment-expression , generic-assoc-list )
@@ -2038,8 +2159,8 @@ ExprResult Parser::ParseGenericSelectionExpression() {
assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
SourceLocation KeyLoc = ConsumeToken();
- if (!getLang().C1X)
- Diag(KeyLoc, diag::ext_c1x_generic_selection);
+ if (!getLangOpts().C11)
+ Diag(KeyLoc, diag::ext_c11_generic_selection);
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume(diag::err_expected_lparen))
@@ -2047,7 +2168,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
ExprResult ControllingExpr;
{
- // C1X 6.5.1.1p3 "The controlling expression of a generic selection is
+ // C11 6.5.1.1p3 "The controlling expression of a generic selection is
// not evaluated."
EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
ControllingExpr = ParseAssignmentExpression();
@@ -2068,7 +2189,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
while (1) {
ParsedType Ty;
if (Tok.is(tok::kw_default)) {
- // C1X 6.5.1.1p2 "A generic selection shall have no more than one default
+ // C11 6.5.1.1p2 "A generic selection shall have no more than one default
// generic association."
if (!DefaultLoc.isInvalid()) {
Diag(Tok, diag::err_duplicate_default_assoc);
@@ -2143,13 +2264,12 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
void (Sema::*Completer)(Scope *S,
Expr *Data,
- Expr **Args,
- unsigned NumArgs),
+ llvm::ArrayRef<Expr *> Args),
Expr *Data) {
while (1) {
if (Tok.is(tok::code_completion)) {
if (Completer)
- (Actions.*Completer)(getCurScope(), Data, Exprs.data(), Exprs.size());
+ (Actions.*Completer)(getCurScope(), Data, Exprs);
else
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
cutOffParsing();
@@ -2157,9 +2277,10 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
}
ExprResult Expr;
- if (getLang().CPlusPlus0x && Tok.is(tok::l_brace))
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
Expr = ParseBraceInitializer();
- else
+ } else
Expr = ParseAssignmentExpression();
if (Tok.is(tok::ellipsis))
@@ -2225,7 +2346,6 @@ ExprResult Parser::ParseBlockLiteralExpression() {
// allows determining whether a variable reference inside the block is
// within or outside of the block.
ParseScope BlockScope(this, Scope::BlockScope | Scope::FnScope |
- Scope::BreakScope | Scope::ContinueScope |
Scope::DeclScope);
// Inform sema that we are starting a block.
@@ -2270,6 +2390,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
0, 0, 0,
true, SourceLocation(),
SourceLocation(),
+ SourceLocation(),
+ SourceLocation(),
EST_None,
SourceLocation(),
0, 0, 0, 0,
@@ -2300,3 +2422,12 @@ ExprResult Parser::ParseBlockLiteralExpression() {
Actions.ActOnBlockError(CaretLoc, getCurScope());
return move(Result);
}
+
+/// ParseObjCBoolLiteral - This handles the objective-c Boolean literals.
+///
+/// '__objc_yes'
+/// '__objc_no'
+ExprResult Parser::ParseObjCBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnObjCBoolLiteral(ConsumeToken(), Kind);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
index 60166e8..2af7482 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -14,6 +14,8 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "RAIIObjectsForParser.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Lex/LiteralSupport.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -136,7 +138,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
bool EnteringContext,
bool *MayBePseudoDestructor,
bool IsTypename) {
- assert(getLang().CPlusPlus &&
+ assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
if (Tok.is(tok::annot_cxxscope)) {
@@ -168,6 +170,22 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
*MayBePseudoDestructor = false;
}
+ if (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+ if (Tok.isNot(tok::coloncolon)) {
+ AnnotateExistingDecltypeSpecifier(DS, DeclLoc, EndLoc);
+ return false;
+ }
+
+ SourceLocation CCLoc = ConsumeToken();
+ if (Actions.ActOnCXXNestedNameSpecifierDecltype(SS, DS, CCLoc))
+ SS.SetInvalid(SourceRange(DeclLoc, CCLoc));
+
+ HasScopeSpecifier = true;
+ }
+
while (true) {
if (HasScopeSpecifier) {
// C++ [basic.lookup.classref]p5:
@@ -247,15 +265,13 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Commit to parsing the template-id.
TPA.Commit();
TemplateTy Template;
- if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(getCurScope(),
- TemplateKWLoc,
- SS,
- TemplateName,
- ObjectType,
- EnteringContext,
- Template)) {
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateName,
- TemplateKWLoc, false))
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template)) {
+ if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
+ TemplateName, false))
return true;
} else
return true;
@@ -283,16 +299,15 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
assert(Tok.is(tok::coloncolon) && "NextToken() not working properly!");
SourceLocation CCLoc = ConsumeToken();
- if (!HasScopeSpecifier)
- HasScopeSpecifier = true;
+ HasScopeSpecifier = true;
ASTTemplateArgsPtr TemplateArgsPtr(Actions,
TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
- /*FIXME:*/SourceLocation(),
- SS,
+ SS,
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
@@ -381,15 +396,15 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
EnteringContext,
Template,
MemberOfUnknownSpecialization)) {
- // We have found a template name, so annotate this this token
+ // We have found a template name, so annotate this token
// with a template-id annotation. We do not permit the
// template-id to be translated into a type annotation,
// because some clients (e.g., the parsing of class template
// specializations) still want to see the original template-id
// token.
ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateName,
- SourceLocation(), false))
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
return true;
continue;
}
@@ -401,7 +416,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// parse correctly as a template, so suggest the keyword 'template'
// before 'getAs' and treat this as a dependent template name.
unsigned DiagID = diag::err_missing_dependent_template_keyword;
- if (getLang().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_missing_dependent_template_keyword;
Diag(Tok.getLocation(), DiagID)
@@ -410,14 +425,14 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (TemplateNameKind TNK
= Actions.ActOnDependentTemplateName(getCurScope(),
- Tok.getLocation(), SS,
+ SS, SourceLocation(),
TemplateName, ObjectType,
EnteringContext, Template)) {
// Consume the identifier.
ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateName,
- SourceLocation(), false))
- return true;
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
+ return true;
}
else
return true;
@@ -488,14 +503,16 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
// '::' unqualified-id
//
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
-
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (ParseUnqualifiedId(SS,
- /*EnteringContext=*/false,
- /*AllowDestructorName=*/false,
- /*AllowConstructorName=*/false,
+ if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
/*ObjectType=*/ ParsedType(),
+ TemplateKWLoc,
Name))
return ExprError();
@@ -503,10 +520,9 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
// followed by a postfix-expression suffix.
if (isAddressOfOperand && isPostfixExpressionSuffixStart())
isAddressOfOperand = false;
-
- return Actions.ActOnIdExpression(getCurScope(), SS, Name, Tok.is(tok::l_paren),
- isAddressOfOperand);
-
+
+ return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
+ Tok.is(tok::l_paren), isAddressOfOperand);
}
/// ParseLambdaExpression - Parse a C++0x lambda expression.
@@ -548,6 +564,9 @@ ExprResult Parser::ParseLambdaExpression() {
if (DiagID) {
Diag(Tok, DiagID.getValue());
SkipUntil(tok::r_square);
+ SkipUntil(tok::l_brace);
+ SkipUntil(tok::r_brace);
+ return ExprError();
}
return ParseLambdaExpressionAfterIntroducer(Intro);
@@ -559,7 +578,7 @@ ExprResult Parser::ParseLambdaExpression() {
///
/// If we are not looking at a lambda expression, returns ExprError().
ExprResult Parser::TryParseLambdaExpression() {
- assert(getLang().CPlusPlus0x
+ assert(getLangOpts().CPlusPlus0x
&& Tok.is(tok::l_square)
&& "Not at the start of a possible lambda expression.");
@@ -576,21 +595,28 @@ ExprResult Parser::TryParseLambdaExpression() {
return ParseLambdaExpression();
}
- // If lookahead indicates this is an Objective-C message...
+ // If lookahead indicates an ObjC message send...
+ // [identifier identifier
if (Next.is(tok::identifier) && After.is(tok::identifier)) {
- return ExprError();
+ return ExprEmpty();
}
+ // Here, we're stuck: lambda introducers and Objective-C message sends are
+ // unambiguous, but it requires arbitrary lookhead. [a,b,c,d,e,f,g] is a
+ // lambda, and [a,b,c,d,e,f,g h] is a Objective-C message send. Instead of
+ // writing two routines to parse a lambda introducer, just try to parse
+ // a lambda introducer first, and fall back if that fails.
+ // (TryParseLambdaIntroducer never produces any diagnostic output.)
LambdaIntroducer Intro;
if (TryParseLambdaIntroducer(Intro))
- return ExprError();
+ return ExprEmpty();
return ParseLambdaExpressionAfterIntroducer(Intro);
}
/// ParseLambdaExpression - Parse a lambda introducer.
///
/// Returns a DiagnosticID if it hit something unexpected.
-llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro) {
+llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro){
typedef llvm::Optional<unsigned> DiagResult;
assert(Tok.is(tok::l_square) && "Lambda expressions begin with '['.");
@@ -605,28 +631,49 @@ llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro)
if (Tok.is(tok::amp) &&
(NextToken().is(tok::comma) || NextToken().is(tok::r_square))) {
Intro.Default = LCD_ByRef;
- ConsumeToken();
+ Intro.DefaultLoc = ConsumeToken();
first = false;
} else if (Tok.is(tok::equal)) {
Intro.Default = LCD_ByCopy;
- ConsumeToken();
+ Intro.DefaultLoc = ConsumeToken();
first = false;
}
while (Tok.isNot(tok::r_square)) {
if (!first) {
- if (Tok.isNot(tok::comma))
+ if (Tok.isNot(tok::comma)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ ConsumeCodeCompletionToken();
+ break;
+ }
+
return DiagResult(diag::err_expected_comma_or_rsquare);
+ }
ConsumeToken();
}
- first = false;
+ if (Tok.is(tok::code_completion)) {
+ // If we're in Objective-C++ and we have a bare '[', then this is more
+ // likely to be a message receiver.
+ if (getLangOpts().ObjC1 && first)
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ else
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ ConsumeCodeCompletionToken();
+ break;
+ }
+ first = false;
+
// Parse capture.
LambdaCaptureKind Kind = LCK_ByCopy;
SourceLocation Loc;
IdentifierInfo* Id = 0;
-
+ SourceLocation EllipsisLoc;
+
if (Tok.is(tok::kw_this)) {
Kind = LCK_This;
Loc = ConsumeToken();
@@ -634,11 +681,21 @@ llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro)
if (Tok.is(tok::amp)) {
Kind = LCK_ByRef;
ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/true);
+ ConsumeCodeCompletionToken();
+ break;
+ }
}
if (Tok.is(tok::identifier)) {
Id = Tok.getIdentifierInfo();
Loc = ConsumeToken();
+
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
} else if (Tok.is(tok::kw_this)) {
// FIXME: If we want to suggest a fixit here, will need to return more
// than just DiagnosticID. Perhaps full DiagnosticBuilder that can be
@@ -649,7 +706,7 @@ llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro)
}
}
- Intro.addCapture(Kind, Loc, Id);
+ Intro.addCapture(Kind, Loc, Id, EllipsisLoc);
}
T.consumeClose();
@@ -658,7 +715,7 @@ llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro)
return DiagResult();
}
-/// TryParseLambdaExpression - Tentatively parse a lambda introducer.
+/// TryParseLambdaIntroducer - Tentatively parse a lambda introducer.
///
/// Returns true if it hit something unexpected.
bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
@@ -679,9 +736,15 @@ bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
/// expression.
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro) {
+ SourceLocation LambdaBeginLoc = Intro.Range.getBegin();
+ Diag(LambdaBeginLoc, diag::warn_cxx98_compat_lambda);
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
+ "lambda expression parsing");
+
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
- Declarator D(DS, Declarator::PrototypeContext);
+ Declarator D(DS, Declarator::LambdaExprContext);
if (Tok.is(tok::l_paren)) {
ParseScope PrototypeScope(this,
@@ -746,6 +809,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DS.getTypeQualifiers(),
/*RefQualifierIsLValueRef=*/true,
/*RefQualifierLoc=*/SourceLocation(),
+ /*ConstQualifierLoc=*/SourceLocation(),
+ /*VolatileQualifierLoc=*/SourceLocation(),
MutableLoc,
ESpecType, ESpecRange.getBegin(),
DynamicExceptions.data(),
@@ -756,24 +821,78 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DeclLoc, DeclEndLoc, D,
TrailingReturnType),
Attr, DeclEndLoc);
+ } else if (Tok.is(tok::kw_mutable) || Tok.is(tok::arrow)) {
+ // It's common to forget that one needs '()' before 'mutable' or the
+ // result type. Deal with this.
+ Diag(Tok, diag::err_lambda_missing_parens)
+ << Tok.is(tok::arrow)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation DeclEndLoc = DeclLoc;
+
+ // Parse 'mutable', if it's there.
+ SourceLocation MutableLoc;
+ if (Tok.is(tok::kw_mutable)) {
+ MutableLoc = ConsumeToken();
+ DeclEndLoc = MutableLoc;
+ }
+
+ // Parse the return type, if there is one.
+ ParsedType TrailingReturnType;
+ if (Tok.is(tok::arrow)) {
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range).get();
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ ParsedAttributes Attr(AttrFactory);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
+ /*isVariadic=*/false,
+ /*EllipsisLoc=*/SourceLocation(),
+ /*Params=*/0, /*NumParams=*/0,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/SourceLocation(),
+ /*ConstQualifierLoc=*/SourceLocation(),
+ /*VolatileQualifierLoc=*/SourceLocation(),
+ MutableLoc,
+ EST_None,
+ /*ESpecLoc=*/SourceLocation(),
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
+ Attr, DeclEndLoc);
}
+
- // Parse compound-statement.
- if (Tok.is(tok::l_brace)) {
- // FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
- // it.
- ParseScope BodyScope(this, Scope::BlockScope | Scope::FnScope |
- Scope::BreakScope | Scope::ContinueScope |
- Scope::DeclScope);
+ // FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
+ // it.
+ unsigned ScopeFlags = Scope::BlockScope | Scope::FnScope | Scope::DeclScope;
+ if (getCurScope()->getFlags() & Scope::ThisScope)
+ ScopeFlags |= Scope::ThisScope;
+ ParseScope BodyScope(this, ScopeFlags);
- StmtResult Stmt(ParseCompoundStatementBody());
+ Actions.ActOnStartOfLambdaDefinition(Intro, D, getCurScope());
- BodyScope.Exit();
- } else {
+ // Parse compound-statement.
+ if (!Tok.is(tok::l_brace)) {
Diag(Tok, diag::err_expected_lambda_body);
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
}
- return ExprEmpty();
+ StmtResult Stmt(ParseCompoundStatementBody());
+ BodyScope.Exit();
+
+ if (!Stmt.isInvalid())
+ return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.take(), getCurScope());
+
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
}
/// ParseCXXCasts - This handles the various ways to cast expressions to another
@@ -882,10 +1001,10 @@ ExprResult Parser::ParseCXXTypeid() {
// operand (Clause 5).
//
// Note that we can't tell whether the expression is an lvalue of a
- // polymorphic class type until after we've parsed the expression, so
- // we the expression is potentially potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(Actions,
- Sema::PotentiallyPotentiallyEvaluated);
+ // polymorphic class type until after we've parsed the expression; we
+ // speculatively assume the subexpression is unevaluated, and fix it up
+ // later.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
Result = ParseExpression();
// Match the ')'.
@@ -988,6 +1107,8 @@ Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
CCLoc = ConsumeToken();
} else if (Tok.is(tok::annot_template_id)) {
+ // FIXME: retrieve TemplateKWLoc from template-id annotation and
+ // store it in the pseudo-dtor node (to be used when instantiating it).
FirstTypeName.setTemplateId(
(TemplateIdAnnotation *)Tok.getAnnotationValue());
ConsumeToken();
@@ -1000,6 +1121,17 @@ Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
// Parse the tilde.
assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
SourceLocation TildeLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid() && SS.isEmpty()) {
+ DeclSpec DS(AttrFactory);
+ ParseDecltypeSpecifier(DS);
+ if (DS.getTypeSpecType() == TST_error)
+ return ExprError();
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc,
+ OpKind, TildeLoc, DS,
+ Tok.is(tok::l_paren));
+ }
+
if (!Tok.is(tok::identifier)) {
Diag(Tok, diag::err_destructor_tilde_identifier);
return ExprError();
@@ -1014,9 +1146,10 @@ Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
// If there is a '<', the second type name is a template-id. Parse
// it as such.
if (Tok.is(tok::less) &&
- ParseUnqualifiedIdTemplateId(SS, Name, NameLoc, false, ObjectType,
- SecondTypeName, /*AssumeTemplateName=*/true,
- /*TemplateKWLoc*/SourceLocation()))
+ ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
+ Name, NameLoc,
+ false, ObjectType, SecondTypeName,
+ /*AssumeTemplateName=*/true))
return ExprError();
return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base,
@@ -1092,14 +1225,17 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert((Tok.is(tok::l_paren) ||
- (getLang().CPlusPlus0x && Tok.is(tok::l_brace)))
+ (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)))
&& "Expected '(' or '{'!");
if (Tok.is(tok::l_brace)) {
-
- // FIXME: Convert to a proper type construct expression.
- return ParseBraceInitializer();
-
+ ExprResult Init = ParseBraceInitializer();
+ if (Init.isInvalid())
+ return Init;
+ Expr *InitList = Init.take();
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, SourceLocation(),
+ MultiExprArg(&InitList, 1),
+ SourceLocation());
} else {
GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
@@ -1136,6 +1272,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// condition:
/// expression
/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
/// '=' assignment-expression
///
@@ -1206,18 +1344,35 @@ bool Parser::ParseCXXCondition(ExprResult &ExprOut,
ExprOut = ExprError();
// '=' assignment-expression
- if (isTokenEqualOrMistypedEqualEqual(
- diag::err_invalid_equalequal_after_declarator)) {
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ bool CopyInitialization = isTokenEqualOrEqualTypo();
+ if (CopyInitialization)
ConsumeToken();
- ExprResult AssignExpr(ParseAssignmentExpression());
- if (!AssignExpr.isInvalid())
- Actions.AddInitializerToDecl(DeclOut, AssignExpr.take(), false,
- DS.getTypeSpecType() == DeclSpec::TST_auto);
+
+ ExprResult InitExpr = ExprError();
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ InitExpr = ParseBraceInitializer();
+ } else if (CopyInitialization) {
+ InitExpr = ParseAssignmentExpression();
+ } else if (Tok.is(tok::l_paren)) {
+ // This was probably an attempt to initialize the variable.
+ SourceLocation LParen = ConsumeParen(), RParen = LParen;
+ if (SkipUntil(tok::r_paren, true, /*DontConsume=*/true))
+ RParen = ConsumeParen();
+ Diag(DeclOut ? DeclOut->getLocation() : LParen,
+ diag::err_expected_init_in_condition_lparen)
+ << SourceRange(LParen, RParen);
} else {
- // FIXME: C++0x allows a braced-init-list
- Diag(Tok, diag::err_expected_equal_after_declarator);
+ Diag(DeclOut ? DeclOut->getLocation() : Tok.getLocation(),
+ diag::err_expected_init_in_condition);
}
-
+
+ if (!InitExpr.isInvalid())
+ Actions.AddInitializerToDecl(DeclOut, InitExpr.take(), !CopyInitialization,
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
+
// FIXME: Build a reference to this declaration? Convert it to bool?
// (This is currently handled by Sema).
@@ -1234,6 +1389,7 @@ bool Parser::isCXXSimpleTypeSpecifier() const {
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_void:
@@ -1312,7 +1468,7 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
// is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
// Objective-C interface. If we don't have Objective-C or a '<', this is
// just a normal reference to a typedef name.
- if (Tok.is(tok::less) && getLang().ObjC1)
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
ParseObjCProtocolQualifiers(DS);
DS.Finish(Diags, PP);
@@ -1344,6 +1500,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_int:
DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID);
break;
+ case tok::kw___int128:
+ DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID);
+ break;
case tok::kw_half:
DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID);
break;
@@ -1365,8 +1524,11 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_bool:
DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID);
break;
+ case tok::annot_decltype:
+ case tok::kw_decltype:
+ DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
+ return DS.Finish(Diags, PP);
- // FIXME: C++0x decltype support.
// GNU typeof support.
case tok::kw_typeof:
ParseTypeofSpecifier(DS);
@@ -1393,22 +1555,7 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
/// type-specifier type-specifier-seq[opt]
///
bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
- DS.SetRangeStart(Tok.getLocation());
- const char *PrevSpec = 0;
- unsigned DiagID;
- bool isInvalid = 0;
-
- // Parse one or more of the type specifiers.
- if (!ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- ParsedTemplateInfo(), /*SuppressDeclarations*/true)) {
- Diag(Tok, diag::err_expected_type);
- return true;
- }
-
- while (ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- ParsedTemplateInfo(), /*SuppressDeclarations*/true))
- {}
-
+ ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
DS.Finish(Diags, PP);
return false;
}
@@ -1446,13 +1593,13 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
///
/// \returns true if a parse error occurred, false otherwise.
bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
- bool AssumeTemplateId,
- SourceLocation TemplateKWLoc) {
+ bool AssumeTemplateId) {
assert((AssumeTemplateId || Tok.is(tok::less)) &&
"Expected '<' to finish parsing a template-id");
@@ -1463,7 +1610,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
case UnqualifiedId::IK_OperatorFunctionId:
case UnqualifiedId::IK_LiteralOperatorId:
if (AssumeTemplateId) {
- TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS,
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), SS, TemplateKWLoc,
Id, ObjectType, EnteringContext,
Template);
if (TNK == TNK_Non_template)
@@ -1494,9 +1641,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
<< Name
<< FixItHint::CreateInsertion(Id.StartLocation, "template ");
- TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc,
- SS, Id, ObjectType,
- EnteringContext, Template);
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Id,
+ ObjectType, EnteringContext,
+ Template);
if (TNK == TNK_Non_template)
return true;
}
@@ -1519,9 +1667,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
if (ObjectType) {
- TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS,
- TemplateName, ObjectType,
- EnteringContext, Template);
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template);
if (TNK == TNK_Non_template)
return true;
} else {
@@ -1575,6 +1724,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
}
TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
TemplateId->Template = Template;
TemplateId->Kind = TNK;
TemplateId->LAngleLoc = LAngleLoc;
@@ -1591,12 +1741,13 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
// Bundle the template arguments together.
ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
TemplateArgs.size());
-
+
// Constructor and destructor names.
TypeResult Type
- = Actions.ActOnTemplateIdType(SS, Template, NameLoc,
- LAngleLoc, TemplateArgsPtr,
- RAngleLoc);
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
+ Template, NameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc,
+ /*IsCtorOrDtorName=*/true);
if (Type.isInvalid())
return true;
@@ -1666,7 +1817,9 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
bool isNew = Tok.getKind() == tok::kw_new;
// Consume the 'new' or 'delete'.
SymbolLocations[SymbolIdx++] = ConsumeToken();
- if (Tok.is(tok::l_square)) {
+ // Check for array new/delete.
+ if (Tok.is(tok::l_square) &&
+ (!getLangOpts().CPlusPlus0x || NextToken().isNot(tok::l_square))) {
// Consume the '[' and ']'.
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
@@ -1742,18 +1895,68 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// literal-operator-id: [C++0x 13.5.8]
// operator "" identifier
- if (getLang().CPlusPlus0x && Tok.is(tok::string_literal)) {
- if (Tok.getLength() != 2)
- Diag(Tok.getLocation(), diag::err_operator_string_not_empty);
- ConsumeStringToken();
+ if (getLangOpts().CPlusPlus0x && isTokenStringLiteral()) {
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_literal_operator);
- if (Tok.isNot(tok::identifier)) {
+ SourceLocation DiagLoc;
+ unsigned DiagId = 0;
+
+ // We're past translation phase 6, so perform string literal concatenation
+ // before checking for "".
+ llvm::SmallVector<Token, 4> Toks;
+ llvm::SmallVector<SourceLocation, 4> TokLocs;
+ while (isTokenStringLiteral()) {
+ if (!Tok.is(tok::string_literal) && !DiagId) {
+ DiagLoc = Tok.getLocation();
+ DiagId = diag::err_literal_operator_string_prefix;
+ }
+ Toks.push_back(Tok);
+ TokLocs.push_back(ConsumeStringToken());
+ }
+
+ StringLiteralParser Literal(Toks.data(), Toks.size(), PP);
+ if (Literal.hadError)
+ return true;
+
+ // Grab the literal operator's suffix, which will be either the next token
+ // or a ud-suffix from the string literal.
+ IdentifierInfo *II = 0;
+ SourceLocation SuffixLoc;
+ if (!Literal.getUDSuffix().empty()) {
+ II = &PP.getIdentifierTable().get(Literal.getUDSuffix());
+ SuffixLoc =
+ Lexer::AdvanceToTokenCharacter(TokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset(),
+ PP.getSourceManager(), getLangOpts());
+ // This form is not permitted by the standard (yet).
+ DiagLoc = SuffixLoc;
+ DiagId = diag::err_literal_operator_missing_space;
+ } else if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ SuffixLoc = ConsumeToken();
+ TokLocs.push_back(SuffixLoc);
+ } else {
Diag(Tok.getLocation(), diag::err_expected_ident);
return true;
}
- IdentifierInfo *II = Tok.getIdentifierInfo();
- Result.setLiteralOperatorId(II, KeywordLoc, ConsumeToken());
+ // The string literal must be empty.
+ if (!Literal.GetString().empty() || Literal.Pascal) {
+ DiagLoc = TokLocs.front();
+ DiagId = diag::err_literal_operator_string_not_empty;
+ }
+
+ if (DiagId) {
+ // This isn't a valid literal-operator-id, but we think we know
+ // what the user meant. Tell them what they should have written.
+ llvm::SmallString<32> Str;
+ Str += "\"\" ";
+ Str += II->getName();
+ Diag(DiagLoc, DiagId) << FixItHint::CreateReplacement(
+ SourceRange(TokLocs.front(), TokLocs.back()), Str);
+ }
+
+ Result.setLiteralOperatorId(II, KeywordLoc, SuffixLoc);
return false;
}
@@ -1823,13 +2026,13 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
+ SourceLocation& TemplateKWLoc,
UnqualifiedId &Result) {
// Handle 'A::template B'. This is for template-ids which have not
// already been annotated by ParseOptionalCXXScopeSpecifier().
bool TemplateSpecified = false;
- SourceLocation TemplateKWLoc;
- if (getLang().CPlusPlus && Tok.is(tok::kw_template) &&
+ if (getLangOpts().CPlusPlus && Tok.is(tok::kw_template) &&
(ObjectType || SS.isSet())) {
TemplateSpecified = true;
TemplateKWLoc = ConsumeToken();
@@ -1843,7 +2046,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
IdentifierInfo *Id = Tok.getIdentifierInfo();
SourceLocation IdLoc = ConsumeToken();
- if (!getLang().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
// If we're not in C++, only identifiers matter. Record the
// identifier and return.
Result.setIdentifier(Id, IdLoc);
@@ -1853,11 +2056,12 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (AllowConstructorName &&
Actions.isCurrentClassName(*Id, getCurScope(), &SS)) {
// We have parsed a constructor name.
- Result.setConstructorName(Actions.getTypeName(*Id, IdLoc, getCurScope(),
- &SS, false, false,
- ParsedType(),
- /*NonTrivialTypeSourceInfo=*/true),
- IdLoc, IdLoc);
+ ParsedType Ty = Actions.getTypeName(*Id, IdLoc, getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NonTrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, IdLoc, IdLoc);
} else {
// We have parsed an identifier.
Result.setIdentifier(Id, IdLoc);
@@ -1865,9 +2069,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// If the next token is a '<', we may have a template.
if (TemplateSpecified || Tok.is(tok::less))
- return ParseUnqualifiedIdTemplateId(SS, Id, IdLoc, EnteringContext,
- ObjectType, Result,
- TemplateSpecified, TemplateKWLoc);
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc, Id, IdLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
return false;
}
@@ -1890,13 +2094,14 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
<< TemplateId->Name
<< FixItHint::CreateRemoval(
SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
- Result.setConstructorName(Actions.getTypeName(*TemplateId->Name,
- TemplateId->TemplateNameLoc,
- getCurScope(),
- &SS, false, false,
- ParsedType(),
- /*NontrivialTypeSourceInfo=*/true),
- TemplateId->TemplateNameLoc,
+ ParsedType Ty = Actions.getTypeName(*TemplateId->Name,
+ TemplateId->TemplateNameLoc,
+ getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NontrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, TemplateId->TemplateNameLoc,
TemplateId->RAngleLoc);
ConsumeToken();
return false;
@@ -1910,6 +2115,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// We have already parsed a template-id; consume the annotation token as
// our unqualified-id.
Result.setTemplateId(TemplateId);
+ TemplateKWLoc = TemplateId->TemplateKWLoc;
ConsumeToken();
return false;
}
@@ -1929,15 +2135,15 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if ((Result.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
Result.getKind() == UnqualifiedId::IK_LiteralOperatorId) &&
(TemplateSpecified || Tok.is(tok::less)))
- return ParseUnqualifiedIdTemplateId(SS, 0, SourceLocation(),
- EnteringContext, ObjectType,
- Result,
- TemplateSpecified, TemplateKWLoc);
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ 0, SourceLocation(),
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
return false;
}
- if (getLang().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
(AllowDestructorName || SS.isSet()) && Tok.is(tok::tilde)) {
// C++ [expr.unary.op]p10:
// There is an ambiguity in the unary-expression ~X(), where X is a
@@ -1946,6 +2152,16 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// Parse the '~'.
SourceLocation TildeLoc = ConsumeToken();
+
+ if (SS.isEmpty() && Tok.is(tok::kw_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+ if (ParsedType Type = Actions.getDestructorType(DS, ObjectType)) {
+ Result.setDestructorName(TildeLoc, Type, EndLoc);
+ return false;
+ }
+ return true;
+ }
// Parse the class-name.
if (Tok.isNot(tok::identifier)) {
@@ -1959,9 +2175,10 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
if (TemplateSpecified || Tok.is(tok::less)) {
Result.setDestructorName(TildeLoc, ParsedType(), ClassNameLoc);
- return ParseUnqualifiedIdTemplateId(SS, ClassName, ClassNameLoc,
- EnteringContext, ObjectType, Result,
- TemplateSpecified, TemplateKWLoc);
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ ClassName, ClassNameLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
}
// Note that this is a destructor name.
@@ -1977,7 +2194,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
}
Diag(Tok, diag::err_expected_unqualified_id)
- << getLang().CPlusPlus;
+ << getLangOpts().CPlusPlus;
return true;
}
@@ -2083,10 +2300,11 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
return ExprError();
}
- ExprVector ConstructorArgs(Actions);
- SourceLocation ConstructorLParen, ConstructorRParen;
+ ExprResult Initializer;
if (Tok.is(tok::l_paren)) {
+ SourceLocation ConstructorLParen, ConstructorRParen;
+ ExprVector ConstructorArgs(Actions);
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
ConstructorLParen = T.getOpenLocation();
@@ -2103,15 +2321,20 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
return ExprError();
}
- } else if (Tok.is(tok::l_brace)) {
- // FIXME: Have to communicate the init-list to ActOnCXXNew.
- ParseBraceInitializer();
+ Initializer = Actions.ActOnParenListExpr(ConstructorLParen,
+ ConstructorRParen,
+ move_arg(ConstructorArgs));
+ } else if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus0x) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ Initializer = ParseBraceInitializer();
}
+ if (Initializer.isInvalid())
+ return Initializer;
return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
move_arg(PlacementArgs), PlacementRParen,
- TypeIdParens, DeclaratorInfo, ConstructorLParen,
- move_arg(ConstructorArgs), ConstructorRParen);
+ TypeIdParens, DeclaratorInfo, Initializer.take());
}
/// ParseDirectNewDeclarator - Parses a direct-new-declarator. Intended to be
@@ -2125,6 +2348,10 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
// Parse the array dimensions.
bool first = true;
while (Tok.is(tok::l_square)) {
+ // An array-size expression can't start with a lambda.
+ if (CheckProhibitedCXX11Attribute())
+ continue;
+
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
@@ -2139,13 +2366,16 @@ void Parser::ParseDirectNewDeclarator(Declarator &D) {
T.consumeClose();
- ParsedAttributes attrs(AttrFactory);
+ // Attributes here appertain to the array type. C++11 [expr.new]p5.
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(Attrs);
+
D.AddTypeInfo(DeclaratorChunk::getArray(0,
/*static=*/false, /*star=*/false,
Size.release(),
T.getOpenLocation(),
T.getCloseLocation()),
- attrs, T.getCloseLocation());
+ Attrs, T.getCloseLocation());
if (T.getCloseLocation().isInvalid())
return;
@@ -2197,7 +2427,11 @@ Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
// Array delete?
bool ArrayDelete = false;
- if (Tok.is(tok::l_square)) {
+ if (Tok.is(tok::l_square) && NextToken().is(tok::r_square)) {
+ // FIXME: This could be the start of a lambda-expression. We should
+ // disambiguate this, but that will require arbitrary lookahead if
+ // the next token is '(':
+ // delete [](int*){ /* ... */
ArrayDelete = true;
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -2235,6 +2469,7 @@ static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
case tok::kw___is_const: return UTT_IsConst;
case tok::kw___is_empty: return UTT_IsEmpty;
case tok::kw___is_enum: return UTT_IsEnum;
+ case tok::kw___is_final: return UTT_IsFinal;
case tok::kw___is_floating_point: return UTT_IsFloatingPoint;
case tok::kw___is_function: return UTT_IsFunction;
case tok::kw___is_fundamental: return UTT_IsFundamental;
@@ -2271,6 +2506,15 @@ static BinaryTypeTrait BinaryTypeTraitFromTokKind(tok::TokenKind kind) {
case tok::kw___is_same: return BTT_IsSame;
case tok::kw___builtin_types_compatible_p: return BTT_TypeCompatible;
case tok::kw___is_convertible_to: return BTT_IsConvertibleTo;
+ case tok::kw___is_trivially_assignable: return BTT_IsTriviallyAssignable;
+ }
+}
+
+static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
+ switch (kind) {
+ default: llvm_unreachable("Not a known type trait");
+ case tok::kw___is_trivially_constructible:
+ return TT_IsTriviallyConstructible;
}
}
@@ -2356,6 +2600,58 @@ ExprResult Parser::ParseBinaryTypeTrait() {
T.getCloseLocation());
}
+/// \brief Parse the built-in type-trait pseudo-functions that allow
+/// implementation of the TR1/C++11 type traits templates.
+///
+/// primary-expression:
+/// type-trait '(' type-id-seq ')'
+///
+/// type-id-seq:
+/// type-id ...[opt] type-id-seq[opt]
+///
+ExprResult Parser::ParseTypeTrait() {
+ TypeTrait Kind = TypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ if (Parens.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ llvm::SmallVector<ParsedType, 2> Args;
+ do {
+ // Parse the next type.
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+
+ // Parse the ellipsis, if present.
+ if (Tok.is(tok::ellipsis)) {
+ Ty = Actions.ActOnPackExpansion(Ty.get(), ConsumeToken());
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+ }
+
+ // Add this type to the list of arguments.
+ Args.push_back(Ty.get());
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ if (Parens.consumeClose())
+ return ExprError();
+
+ return Actions.ActOnTypeTrait(Kind, Loc, Args, Parens.getCloseLocation());
+}
+
/// ParseArrayTypeTrait - Parse the built-in array type-trait
/// pseudo-functions.
///
@@ -2396,10 +2692,8 @@ ExprResult Parser::ParseArrayTypeTrait() {
return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), DimExpr.get(),
T.getCloseLocation());
}
- default:
- break;
}
- return ExprError();
+ llvm_unreachable("Invalid ArrayTypeTrait!");
}
/// ParseExpressionTrait - Parse built-in expression-trait
@@ -2432,7 +2726,7 @@ ExprResult
Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
ParsedType &CastTy,
BalancedDelimiterTracker &Tracker) {
- assert(getLang().CPlusPlus && "Should only be called for C++!");
+ assert(getLangOpts().CPlusPlus && "Should only be called for C++!");
assert(ExprType == CastExpr && "Compound literals are not ambiguous!");
assert(isTypeIdInParens() && "Not a type-id!");
@@ -2484,7 +2778,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
false/*isAddressofOperand*/,
NotCastExpr,
// type-id has priority.
- true/*isTypeCast*/);
+ IsTypeCast);
}
// If we parsed a cast-expression, it's really a type-id, otherwise it's
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
index 33abc93..1c349fd 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -21,18 +21,92 @@
using namespace clang;
-/// MayBeDesignationStart - Return true if this token might be the start of a
-/// designator. If we can tell it is impossible that it is a designator, return
-/// false.
-static bool MayBeDesignationStart(tok::TokenKind K, Preprocessor &PP) {
- switch (K) {
- default: return false;
+/// MayBeDesignationStart - Return true if the current token might be the start
+/// of a designator. If we can tell it is impossible that it is a designator,
+/// return false.
+bool Parser::MayBeDesignationStart() {
+ switch (Tok.getKind()) {
+ default:
+ return false;
+
case tok::period: // designator: '.' identifier
- case tok::l_square: // designator: array-designator
+ return true;
+
+ case tok::l_square: { // designator: array-designator
+ if (!PP.getLangOpts().CPlusPlus0x)
return true;
+
+ // C++11 lambda expressions and C99 designators can be ambiguous all the
+ // way through the closing ']' and to the next character. Handle the easy
+ // cases here, and fall back to tentative parsing if those fail.
+ switch (PP.LookAhead(0).getKind()) {
+ case tok::equal:
+ case tok::r_square:
+ // Definitely starts a lambda expression.
+ return false;
+
+ case tok::amp:
+ case tok::kw_this:
+ case tok::identifier:
+ // We have to do additional analysis, because these could be the
+ // start of a constant expression or a lambda capture list.
+ break;
+
+ default:
+ // Anything not mentioned above cannot occur following a '[' in a
+ // lambda expression.
+ return true;
+ }
+
+ // Handle the complicated case below.
+ break;
+ }
case tok::identifier: // designation: identifier ':'
return PP.LookAhead(0).is(tok::colon);
}
+
+ // Parse up to (at most) the token after the closing ']' to determine
+ // whether this is a C99 designator or a lambda.
+ TentativeParsingAction Tentative(*this);
+ ConsumeBracket();
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::equal:
+ case tok::amp:
+ case tok::identifier:
+ case tok::kw_this:
+ // These tokens can occur in a capture list or a constant-expression.
+ // Keep looking.
+ ConsumeToken();
+ continue;
+
+ case tok::comma:
+ // Since a comma cannot occur in a constant-expression, this must
+ // be a lambda.
+ Tentative.Revert();
+ return false;
+
+ case tok::r_square: {
+ // Once we hit the closing square bracket, we look at the next
+ // token. If it's an '=', this is a designator. Otherwise, it's a
+ // lambda expression. This decision favors lambdas over the older
+ // GNU designator syntax, which allows one to omit the '=', but is
+ // consistent with GCC.
+ ConsumeBracket();
+ tok::TokenKind Kind = Tok.getKind();
+ Tentative.Revert();
+ return Kind == tok::equal;
+ }
+
+ default:
+ // Anything else cannot occur in a lambda capture list, so it
+ // must be a designator.
+ Tentative.Revert();
+ return true;
+ }
+ }
+
+ return true;
}
static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
@@ -81,7 +155,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
if (Tok.is(tok::identifier)) {
const IdentifierInfo *FieldName = Tok.getIdentifierInfo();
- llvm::SmallString<256> NewSyntax;
+ SmallString<256> NewSyntax;
llvm::raw_svector_ostream(NewSyntax) << '.' << FieldName->getName()
<< " = ";
@@ -137,6 +211,11 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// [foo ... bar] -> array designator
// [4][foo bar] -> obsolete GNU designation with objc message send.
//
+ // We do not need to check for an expression starting with [[ here. If it
+ // contains an Objective-C message send, then it is not an ill-formed
+ // attribute. If it is a lambda-expression within an array-designator, then
+ // it will be rejected because a constant-expression cannot begin with a
+ // lambda-expression.
InMessageExpressionRAIIObject InMessage(*this, true);
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -149,7 +228,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// send) or send to 'super', parse this as a message send
// expression. We handle C++ and C separately, since C++ requires
// much more complicated parsing.
- if (getLang().ObjC1 && getLang().CPlusPlus) {
+ if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus) {
// Send to 'super'.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
NextToken().isNot(tok::period) &&
@@ -184,7 +263,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// adopt the expression for further analysis below.
// FIXME: potentially-potentially evaluated expression above?
Idx = ExprResult(static_cast<Expr*>(TypeOrExpr));
- } else if (getLang().ObjC1 && Tok.is(tok::identifier)) {
+ } else if (getLangOpts().ObjC1 && Tok.is(tok::identifier)) {
IdentifierInfo *II = Tok.getIdentifierInfo();
SourceLocation IILoc = Tok.getLocation();
ParsedType ReceiverType;
@@ -242,7 +321,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// tokens are '...' or ']' or an objc message send. If this is an objc
// message send, handle it now. An objc-message send is the start of
// an assignment-expression production.
- if (getLang().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ if (getLangOpts().ObjC1 && Tok.isNot(tok::ellipsis) &&
Tok.isNot(tok::r_square)) {
CheckArrayDesignatorSyntax(*this, Tok.getLocation(), Desig);
return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
@@ -330,7 +409,7 @@ ExprResult Parser::ParseBraceInitializer() {
if (Tok.is(tok::r_brace)) {
// Empty initializers are a C++ feature and a GNU extension to C.
- if (!getLang().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
// Match the '}'.
return Actions.ActOnInitList(LBraceLoc, MultiExprArg(Actions),
@@ -340,12 +419,23 @@ ExprResult Parser::ParseBraceInitializer() {
bool InitExprsOk = true;
while (1) {
+ // Handle Microsoft __if_exists/if_not_exists if necessary.
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ Tok.is(tok::kw___if_not_exists))) {
+ if (ParseMicrosoftIfExistsBraceInitializer(InitExprs, InitExprsOk)) {
+ if (Tok.isNot(tok::comma)) break;
+ ConsumeToken();
+ }
+ if (Tok.is(tok::r_brace)) break;
+ continue;
+ }
+
// Parse: designation[opt] initializer
// If we know that this cannot be a designation, just parse the nested
// initializer directly.
ExprResult SubElt;
- if (MayBeDesignationStart(Tok.getKind(), PP))
+ if (MayBeDesignationStart())
SubElt = ParseInitializerWithPotentialDesignator();
else
SubElt = ParseInitializer();
@@ -392,3 +482,66 @@ ExprResult Parser::ParseBraceInitializer() {
return ExprError(); // an error occurred.
}
+
+// Return true if a comma (or closing brace) is necessary after the
+// __if_exists/if_not_exists statement.
+bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
+ bool &InitExprsOk) {
+ bool trailingComma = false;
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return false;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return false;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return false;
+ }
+
+ while (Tok.isNot(tok::eof)) {
+ trailingComma = false;
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ ExprResult SubElt;
+ if (MayBeDesignationStart())
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
+ // If we couldn't parse the subelement, bail out.
+ if (!SubElt.isInvalid())
+ InitExprs.push_back(SubElt.release());
+ else
+ InitExprsOk = false;
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ trailingComma = true;
+ }
+
+ if (Tok.is(tok::r_brace))
+ break;
+ }
+
+ Braces.consumeClose();
+
+ return !trailingComma;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
index 88044d1..789a8ae 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -42,7 +42,6 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
switch (Tok.getObjCKeywordID()) {
case tok::objc_class:
return ParseObjCAtClassDeclaration(AtLoc);
- break;
case tok::objc_interface: {
ParsedAttributes attrs(AttrFactory);
SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, attrs);
@@ -50,15 +49,12 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
}
case tok::objc_protocol: {
ParsedAttributes attrs(AttrFactory);
- SingleDecl = ParseObjCAtProtocolDeclaration(AtLoc, attrs);
- break;
+ return ParseObjCAtProtocolDeclaration(AtLoc, attrs);
}
case tok::objc_implementation:
- SingleDecl = ParseObjCAtImplementationDeclaration(AtLoc);
- break;
+ return ParseObjCAtImplementationDeclaration(AtLoc);
case tok::objc_end:
return ParseObjCAtEndDeclaration(AtLoc);
- break;
case tok::objc_compatibility_alias:
SingleDecl = ParseObjCAtAliasDeclaration(AtLoc);
break;
@@ -68,6 +64,12 @@ Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
case tok::objc_dynamic:
SingleDecl = ParseObjCPropertyDynamic(AtLoc);
break;
+ case tok::objc___experimental_modules_import:
+ if (getLangOpts().Modules)
+ return ParseModuleImport(AtLoc);
+
+ // Fall through
+
default:
Diag(AtLoc, diag::err_unexpected_at);
SkipUntil(tok::semi);
@@ -113,6 +115,25 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
ClassNames.size());
}
+void Parser::CheckNestedObjCContexts(SourceLocation AtLoc)
+{
+ Sema::ObjCContainerKind ock = Actions.getObjCContainerKind();
+ if (ock == Sema::OCK_None)
+ return;
+
+ Decl *Decl = Actions.getObjCDeclContext();
+ if (CurParsedObjCImpl) {
+ CurParsedObjCImpl->finish(AtLoc);
+ } else {
+ Actions.ActOnAtEnd(getCurScope(), AtLoc);
+ }
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ if (Decl)
+ Diag(Decl->getLocStart(), diag::note_objc_container_start)
+ << (int) ock;
+}
+
///
/// objc-interface:
/// objc-class-interface-attributes[opt] objc-class-interface
@@ -140,11 +161,13 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
/// __attribute__((deprecated))
/// __attribute__((unavailable))
/// __attribute__((objc_exception)) - used by NSException on 64-bit
+/// __attribute__((objc_root_class))
///
-Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
+Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &attrs) {
assert(Tok.isObjCAtKeyword(tok::objc_interface) &&
"ParseObjCAtInterfaceDeclaration(): Expected @interface");
+ CheckNestedObjCContexts(AtLoc);
ConsumeToken(); // the "interface" identifier
// Code completion after '@interface'.
@@ -181,7 +204,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
categoryId = Tok.getIdentifierInfo();
categoryLoc = ConsumeToken();
}
- else if (!getLang().ObjC2) {
+ else if (!getLangOpts().ObjC2) {
Diag(Tok, diag::err_expected_ident); // missing category name.
return 0;
}
@@ -205,7 +228,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
return 0;
Decl *CategoryType =
- Actions.ActOnStartCategoryInterface(atLoc,
+ Actions.ActOnStartCategoryInterface(AtLoc,
nameId, nameLoc,
categoryId, categoryLoc,
ProtocolRefs.data(),
@@ -214,7 +237,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
EndProtoLoc);
if (Tok.is(tok::l_brace))
- ParseObjCClassInstanceVariables(CategoryType, tok::objc_private, atLoc);
+ ParseObjCClassInstanceVariables(CategoryType, tok::objc_private, AtLoc);
ParseObjCInterfaceDeclList(tok::objc_not_keyword, CategoryType);
return CategoryType;
@@ -250,14 +273,14 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
return 0;
Decl *ClsType =
- Actions.ActOnStartClassInterface(atLoc, nameId, nameLoc,
+ Actions.ActOnStartClassInterface(AtLoc, nameId, nameLoc,
superClassId, superClassLoc,
ProtocolRefs.data(), ProtocolRefs.size(),
ProtocolLocs.data(),
EndProtoLoc, attrs.getList());
if (Tok.is(tok::l_brace))
- ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, atLoc);
+ ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, AtLoc);
ParseObjCInterfaceDeclList(tok::objc_interface, ClsType);
return ClsType;
@@ -266,17 +289,22 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation atLoc,
/// The Objective-C property callback. This should be defined where
/// it's used, but instead it's been lifted to here to support VS2005.
struct Parser::ObjCPropertyCallback : FieldCallback {
+private:
+ virtual void anchor();
+public:
Parser &P;
SmallVectorImpl<Decl *> &Props;
ObjCDeclSpec &OCDS;
SourceLocation AtLoc;
+ SourceLocation LParenLoc;
tok::ObjCKeywordKind MethodImplKind;
ObjCPropertyCallback(Parser &P,
SmallVectorImpl<Decl *> &Props,
ObjCDeclSpec &OCDS, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
tok::ObjCKeywordKind MethodImplKind) :
- P(P), Props(Props), OCDS(OCDS), AtLoc(AtLoc),
+ P(P), Props(Props), OCDS(OCDS), AtLoc(AtLoc), LParenLoc(LParenLoc),
MethodImplKind(MethodImplKind) {
}
@@ -308,7 +336,8 @@ struct Parser::ObjCPropertyCallback : FieldCallback {
FD.D.getIdentifier());
bool isOverridingProperty = false;
Decl *Property =
- P.Actions.ActOnProperty(P.getCurScope(), AtLoc, FD, OCDS,
+ P.Actions.ActOnProperty(P.getCurScope(), AtLoc, LParenLoc,
+ FD, OCDS,
GetterSel, SetterSel,
&isOverridingProperty,
MethodImplKind);
@@ -319,6 +348,9 @@ struct Parser::ObjCPropertyCallback : FieldCallback {
}
};
+void Parser::ObjCPropertyCallback::anchor() {
+}
+
/// objc-interface-decl-list:
/// empty
/// objc-interface-decl-list objc-property-decl [OBJC2]
@@ -348,8 +380,12 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
allMethods.push_back(methodPrototype);
// Consume the ';' here, since ParseObjCMethodPrototype() is re-used for
// method definitions.
- ExpectAndConsume(tok::semi, diag::err_expected_semi_after_method_proto,
- "", tok::semi);
+ if (ExpectAndConsumeSemi(diag::err_expected_semi_after_method_proto)) {
+ // We didn't find a semi and we error'ed out. Skip until a ';' or '@'.
+ SkipUntil(tok::at, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
continue;
}
if (Tok.is(tok::l_paren)) {
@@ -372,7 +408,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Code completion within an Objective-C interface.
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteOrdinaryName(getCurScope(),
- ObjCImpDecl? Sema::PCC_ObjCImplementation
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
: Sema::PCC_ObjCInterface);
return cutOffParsing();
}
@@ -394,7 +430,6 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCAtDirective(getCurScope());
return cutOffParsing();
- break;
}
tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
@@ -425,7 +460,10 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
case tok::objc_implementation:
case tok::objc_interface:
- Diag(Tok, diag::err_objc_missing_end);
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
ConsumeToken();
break;
@@ -440,16 +478,19 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
break;
case tok::objc_property:
- if (!getLang().ObjC2)
+ if (!getLangOpts().ObjC2)
Diag(AtLoc, diag::err_objc_properties_require_objc2);
ObjCDeclSpec OCDS;
+ SourceLocation LParenLoc;
// Parse property attribute list, if any.
- if (Tok.is(tok::l_paren))
+ if (Tok.is(tok::l_paren)) {
+ LParenLoc = Tok.getLocation();
ParseObjCPropertyAttribute(OCDS);
+ }
ObjCPropertyCallback Callback(*this, allProperties,
- OCDS, AtLoc, MethodImplKind);
+ OCDS, AtLoc, LParenLoc, MethodImplKind);
// Parse all the comma separated declarators.
DeclSpec DS(AttrFactory);
@@ -465,10 +506,16 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCAtDirective(getCurScope());
return cutOffParsing();
- } else if (Tok.isObjCAtKeyword(tok::objc_end))
+ } else if (Tok.isObjCAtKeyword(tok::objc_end)) {
ConsumeToken(); // the "end" identifier
- else
- Diag(Tok, diag::err_objc_missing_end);
+ } else {
+ Diag(Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "\n@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
+ AtEnd.setBegin(Tok.getLocation());
+ AtEnd.setEnd(Tok.getLocation());
+ }
// Insert collected methods declarations into the @interface object.
// This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
@@ -732,7 +779,7 @@ bool Parser::isTokIdentifier_in() const {
// FIXME: May have to do additional look-ahead to only allow for
// valid tokens following an 'in'; such as an identifier, unary operators,
// '[' etc.
- return (getLang().ObjC2 && Tok.is(tok::identifier) &&
+ return (getLangOpts().ObjC2 && Tok.is(tok::identifier) &&
Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
}
@@ -935,7 +982,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
- if (getLang().ObjC2)
+ if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
if (Tok.is(tok::code_completion)) {
@@ -961,7 +1008,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- if (getLang().ObjC2)
+ if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
@@ -1004,7 +1051,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
ArgInfo.ArgAttrs = 0;
- if (getLang().ObjC2) {
+ if (getLangOpts().ObjC2) {
MaybeParseGNUAttributes(paramAttrs);
ArgInfo.ArgAttrs = paramAttrs.getList();
}
@@ -1083,7 +1130,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// FIXME: Add support for optional parameter list...
// If attributes exist after the method, parse them.
- if (getLang().ObjC2)
+ if (getLangOpts().ObjC2)
MaybeParseGNUAttributes(methodAttrs);
if (KeyIdents.size() == 0)
@@ -1146,7 +1193,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
return true;
}
- EndLoc = ConsumeAnyToken();
+ EndLoc = ConsumeToken();
// Convert the list of protocols identifiers into a list of protocol decls.
Actions.FindProtocolDeclaration(WarnOnDeclarations,
@@ -1159,7 +1206,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
/// in a decl-specifier-seq, starting at the '<'.
bool Parser::ParseObjCProtocolQualifiers(DeclSpec &DS) {
assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
- assert(getLang().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+ assert(getLangOpts().ObjC1 && "Protocol qualifiers only exist in Objective-C");
SourceLocation LAngleLoc, EndProtoLoc;
SmallVector<Decl *, 8> ProtocolDecl;
SmallVector<SourceLocation, 8> ProtocolLocs;
@@ -1312,8 +1359,9 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
/// "@protocol identifier ;" should be resolved as "@protocol
/// identifier-list ;": objc-interface-decl-list may not start with a
/// semicolon in the first alternative if objc-protocol-refs are omitted.
-Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
- ParsedAttributes &attrs) {
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &attrs) {
assert(Tok.isObjCAtKeyword(tok::objc_protocol) &&
"ParseObjCAtProtocolDeclaration(): Expected @protocol");
ConsumeToken(); // the "protocol" identifier
@@ -1321,12 +1369,12 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCProtocolDecl(getCurScope());
cutOffParsing();
- return 0;
+ return DeclGroupPtrTy();
}
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_ident); // missing protocol name.
- return 0;
+ return DeclGroupPtrTy();
}
// Save the protocol name, then consume it.
IdentifierInfo *protocolName = Tok.getIdentifierInfo();
@@ -1339,6 +1387,8 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
attrs.getList());
}
+ CheckNestedObjCContexts(AtLoc);
+
if (Tok.is(tok::comma)) { // list of forward declarations.
SmallVector<IdentifierLocPair, 8> ProtocolRefs;
ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
@@ -1349,7 +1399,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_ident);
SkipUntil(tok::semi);
- return 0;
+ return DeclGroupPtrTy();
}
ProtocolRefs.push_back(IdentifierLocPair(Tok.getIdentifierInfo(),
Tok.getLocation()));
@@ -1360,7 +1410,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
}
// Consume the ';'.
if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@protocol"))
- return 0;
+ return DeclGroupPtrTy();
return Actions.ActOnForwardProtocolDeclaration(AtLoc,
&ProtocolRefs[0],
@@ -1376,7 +1426,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
if (Tok.is(tok::less) &&
ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, false,
LAngleLoc, EndProtoLoc))
- return 0;
+ return DeclGroupPtrTy();
Decl *ProtoType =
Actions.ActOnStartProtocolInterface(AtLoc, protocolName, nameLoc,
@@ -1386,7 +1436,7 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
EndProtoLoc, attrs.getList());
ParseObjCInterfaceDeclList(tok::objc_protocol, ProtoType);
- return ProtoType;
+ return Actions.ConvertDeclToDeclGroup(ProtoType);
}
/// objc-implementation:
@@ -1399,26 +1449,28 @@ Decl *Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
///
/// objc-category-implementation-prologue:
/// @implementation identifier ( identifier )
-Decl *Parser::ParseObjCAtImplementationDeclaration(
- SourceLocation atLoc) {
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
assert(Tok.isObjCAtKeyword(tok::objc_implementation) &&
"ParseObjCAtImplementationDeclaration(): Expected @implementation");
+ CheckNestedObjCContexts(AtLoc);
ConsumeToken(); // the "implementation" identifier
// Code completion after '@implementation'.
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCImplementationDecl(getCurScope());
cutOffParsing();
- return 0;
+ return DeclGroupPtrTy();
}
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_ident); // missing class or category name.
- return 0;
+ return DeclGroupPtrTy();
}
// We have a class or category name - consume it.
IdentifierInfo *nameId = Tok.getIdentifierInfo();
SourceLocation nameLoc = ConsumeToken(); // consume class or category name
+ Decl *ObjCImpDecl = 0;
if (Tok.is(tok::l_paren)) {
// we have a category implementation.
@@ -1429,7 +1481,7 @@ Decl *Parser::ParseObjCAtImplementationDeclaration(
if (Tok.is(tok::code_completion)) {
Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
cutOffParsing();
- return 0;
+ return DeclGroupPtrTy();
}
if (Tok.is(tok::identifier)) {
@@ -1437,45 +1489,57 @@ Decl *Parser::ParseObjCAtImplementationDeclaration(
categoryLoc = ConsumeToken();
} else {
Diag(Tok, diag::err_expected_ident); // missing category name.
- return 0;
+ return DeclGroupPtrTy();
}
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_expected_rparen);
SkipUntil(tok::r_paren, false); // don't stop at ';'
- return 0;
+ return DeclGroupPtrTy();
}
rparenLoc = ConsumeParen();
- Decl *ImplCatType = Actions.ActOnStartCategoryImplementation(
- atLoc, nameId, nameLoc, categoryId,
+ ObjCImpDecl = Actions.ActOnStartCategoryImplementation(
+ AtLoc, nameId, nameLoc, categoryId,
categoryLoc);
- ObjCImpDecl = ImplCatType;
- PendingObjCImpDecl.push_back(ObjCImpDecl);
- return 0;
- }
- // We have a class implementation
- SourceLocation superClassLoc;
- IdentifierInfo *superClassId = 0;
- if (Tok.is(tok::colon)) {
- // We have a super class
- ConsumeToken();
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_expected_ident); // missing super class name.
- return 0;
+ } else {
+ // We have a class implementation
+ SourceLocation superClassLoc;
+ IdentifierInfo *superClassId = 0;
+ if (Tok.is(tok::colon)) {
+ // We have a super class
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing super class name.
+ return DeclGroupPtrTy();
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken(); // Consume super class name
}
- superClassId = Tok.getIdentifierInfo();
- superClassLoc = ConsumeToken(); // Consume super class name
+ ObjCImpDecl = Actions.ActOnStartClassImplementation(
+ AtLoc, nameId, nameLoc,
+ superClassId, superClassLoc);
+
+ if (Tok.is(tok::l_brace)) // we have ivars
+ ParseObjCClassInstanceVariables(ObjCImpDecl, tok::objc_private, AtLoc);
}
- Decl *ImplClsType = Actions.ActOnStartClassImplementation(
- atLoc, nameId, nameLoc,
- superClassId, superClassLoc);
+ assert(ObjCImpDecl);
- if (Tok.is(tok::l_brace)) // we have ivars
- ParseObjCClassInstanceVariables(ImplClsType, tok::objc_private, atLoc);
+ SmallVector<Decl *, 8> DeclsInGroup;
- ObjCImpDecl = ImplClsType;
- PendingObjCImpDecl.push_back(ObjCImpDecl);
- return 0;
+ {
+ ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
+ while (!ObjCImplParsing.isFinished() && Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
+ DeclGroupRef DG = DGP.get();
+ DeclsInGroup.append(DG.begin(), DG.end());
+ }
+ }
+ }
+
+ return Actions.ActOnFinishObjCImplementation(ObjCImpDecl, DeclsInGroup);
}
Parser::DeclGroupPtrTy
@@ -1483,35 +1547,44 @@ Parser::ParseObjCAtEndDeclaration(SourceRange atEnd) {
assert(Tok.isObjCAtKeyword(tok::objc_end) &&
"ParseObjCAtEndDeclaration(): Expected @end");
ConsumeToken(); // the "end" identifier
- SmallVector<Decl *, 8> DeclsInGroup;
- Actions.DefaultSynthesizeProperties(getCurScope(), ObjCImpDecl);
- for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i) {
- Decl *D = ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i]);
- DeclsInGroup.push_back(D);
- }
- DeclsInGroup.push_back(ObjCImpDecl);
-
- if (ObjCImpDecl) {
- Actions.ActOnAtEnd(getCurScope(), atEnd);
- PendingObjCImpDecl.pop_back();
- }
+ if (CurParsedObjCImpl)
+ CurParsedObjCImpl->finish(atEnd);
else
// missing @implementation
- Diag(atEnd.getBegin(), diag::err_expected_implementation);
-
- LateParsedObjCMethods.clear();
- ObjCImpDecl = 0;
- return Actions.BuildDeclaratorGroup(
- DeclsInGroup.data(), DeclsInGroup.size(), false);
+ Diag(atEnd.getBegin(), diag::err_expected_objc_container);
+ return DeclGroupPtrTy();
}
-Parser::DeclGroupPtrTy Parser::FinishPendingObjCActions() {
- Actions.DiagnoseUseOfUnimplementedSelectors();
- if (PendingObjCImpDecl.empty())
- return Actions.ConvertDeclToDeclGroup(0);
- Decl *ImpDecl = PendingObjCImpDecl.pop_back_val();
- Actions.ActOnAtEnd(getCurScope(), SourceRange());
- return Actions.ConvertDeclToDeclGroup(ImpDecl);
+Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
+ if (!Finished) {
+ finish(P.Tok.getLocation());
+ if (P.Tok.is(tok::eof)) {
+ P.Diag(P.Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
+ P.Diag(Dcl->getLocStart(), diag::note_objc_container_start)
+ << Sema::OCK_Implementation;
+ }
+ }
+ P.CurParsedObjCImpl = 0;
+ assert(LateParsedObjCMethods.empty());
+}
+
+void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
+ assert(!Finished);
+ P.Actions.DefaultSynthesizeProperties(P.getCurScope(), Dcl);
+ for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i]);
+
+ P.Actions.ActOnAtEnd(P.getCurScope(), AtEnd);
+
+ /// \brief Clear and free the cached objc methods.
+ for (LateParsedObjCMethodContainer::iterator
+ I = LateParsedObjCMethods.begin(),
+ E = LateParsedObjCMethods.end(); I != E; ++I)
+ delete *I;
+ LateParsedObjCMethods.clear();
+
+ Finished = true;
}
/// compatibility-alias-decl:
@@ -1850,7 +1923,7 @@ Decl *Parser::ParseObjCMethodDefinition() {
// parse optional ';'
if (Tok.is(tok::semi)) {
- if (ObjCImpDecl) {
+ if (CurParsedObjCImpl) {
Diag(Tok, diag::warn_semicolon_before_method_body)
<< FixItHint::CreateRemoval(Tok.getLocation());
}
@@ -1868,19 +1941,32 @@ Decl *Parser::ParseObjCMethodDefinition() {
if (Tok.isNot(tok::l_brace))
return 0;
}
+
+ if (!MDecl) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/false);
+ return 0;
+ }
+
// Allow the rest of sema to find private method decl implementations.
- if (MDecl)
- Actions.AddAnyMethodToGlobalPool(MDecl);
-
- // Consume the tokens and store them for later parsing.
- LexedMethod* LM = new LexedMethod(this, MDecl);
- LateParsedObjCMethods.push_back(LM);
- CachedTokens &Toks = LM->Toks;
- // Begin by storing the '{' token.
- Toks.push_back(Tok);
- ConsumeBrace();
- // Consume everything up to (and including) the matching right brace.
- ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ Actions.AddAnyMethodToGlobalPool(MDecl);
+
+ if (CurParsedObjCImpl) {
+ // Consume the tokens and store them for later parsing.
+ LexedMethod* LM = new LexedMethod(this, MDecl);
+ CurParsedObjCImpl->LateParsedObjCMethods.push_back(LM);
+ CachedTokens &Toks = LM->Toks;
+ // Begin by storing the '{' token.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+
+ } else {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/false);
+ }
+
return MDecl;
}
@@ -1924,9 +2010,62 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
cutOffParsing();
return ExprError();
+ case tok::minus:
+ case tok::plus: {
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken();
+
+ if (!Tok.is(tok::numeric_constant)) {
+ const char *Symbol = 0;
+ switch (Kind) {
+ case tok::minus: Symbol = "-"; break;
+ case tok::plus: Symbol = "+"; break;
+ default: llvm_unreachable("missing unary operator case");
+ }
+ Diag(Tok, diag::err_nsnumber_nonliteral_unary)
+ << Symbol;
+ return ExprError();
+ }
+
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+
+ Lit = Actions.ActOnUnaryOp(getCurScope(), OpLoc, Kind, Lit.take());
+ if (Lit.isInvalid())
+ return move(Lit);
+
+ return ParsePostfixExpressionSuffix(
+ Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+ }
+
case tok::string_literal: // primary-expression: string-literal
case tok::wide_string_literal:
return ParsePostfixExpressionSuffix(ParseObjCStringLiteral(AtLoc));
+
+ case tok::char_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCCharacterLiteral(AtLoc));
+
+ case tok::numeric_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCNumericLiteral(AtLoc));
+
+ case tok::kw_true: // Objective-C++, etc.
+ case tok::kw___objc_yes: // c/c++/objc/objc++ __objc_yes
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, true));
+ case tok::kw_false: // Objective-C++, etc.
+ case tok::kw___objc_no: // c/c++/objc/objc++ __objc_no
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, false));
+
+ case tok::l_square:
+ // Objective-C array literal
+ return ParsePostfixExpressionSuffix(ParseObjCArrayLiteral(AtLoc));
+
+ case tok::l_brace:
+ // Objective-C dictionary literal
+ return ParsePostfixExpressionSuffix(ParseObjCDictionaryLiteral(AtLoc));
+
default:
if (Tok.getIdentifierInfo() == 0)
return ExprError(Diag(AtLoc, diag::err_unexpected_at));
@@ -2037,14 +2176,14 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
/// This routine will only return true for a subset of valid message-send
/// expressions.
bool Parser::isSimpleObjCMessageExpression() {
- assert(Tok.is(tok::l_square) && getLang().ObjC1 &&
+ assert(Tok.is(tok::l_square) && getLangOpts().ObjC1 &&
"Incorrect start for isSimpleObjCMessageExpression");
return GetLookAheadToken(1).is(tok::identifier) &&
GetLookAheadToken(2).is(tok::identifier);
}
bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
- if (!getLang().ObjC1 || !NextToken().is(tok::identifier) ||
+ if (!getLangOpts().ObjC1 || !NextToken().is(tok::identifier) ||
InMessageExpression)
return false;
@@ -2093,7 +2232,7 @@ ExprResult Parser::ParseObjCMessageExpression() {
InMessageExpressionRAIIObject InMessage(*this, true);
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// We completely separate the C and C++ cases because C++ requires
// more complicated (read: slower) parsing.
@@ -2404,6 +2543,134 @@ ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
AtStrings.size()));
}
+/// ParseObjCBooleanLiteral -
+/// objc-scalar-literal : '@' boolean-keyword
+/// ;
+/// boolean-keyword: 'true' | 'false' | '__objc_yes' | '__objc_no'
+/// ;
+ExprResult Parser::ParseObjCBooleanLiteral(SourceLocation AtLoc,
+ bool ArgValue) {
+ SourceLocation EndLoc = ConsumeToken(); // consume the keyword.
+ return Actions.ActOnObjCBoolLiteral(AtLoc, EndLoc, ArgValue);
+}
+
+/// ParseObjCCharacterLiteral -
+/// objc-scalar-literal : '@' character-literal
+/// ;
+ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnCharacterConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+}
+
+/// ParseObjCNumericLiteral -
+/// objc-scalar-literal : '@' scalar-literal
+/// ;
+/// scalar-literal : | numeric-constant /* any numeric constant. */
+/// ;
+ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+}
+
+ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
+ ExprVector ElementExprs(Actions); // array elements.
+ ConsumeBracket(); // consume the l_square.
+
+ while (Tok.isNot(tok::r_square)) {
+ // Parse list of array element expressions (all must be id types).
+ ExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // Parse the ellipsis that indicates a pack expansion.
+ if (Tok.is(tok::ellipsis))
+ Res = Actions.ActOnPackExpansion(Res.get(), ConsumeToken());
+ if (Res.isInvalid())
+ return true;
+
+ ElementExprs.push_back(Res.release());
+
+ if (Tok.is(tok::comma))
+ ConsumeToken(); // Eat the ','.
+ else if (Tok.isNot(tok::r_square))
+ return ExprError(Diag(Tok, diag::err_expected_rsquare_or_comma));
+ }
+ SourceLocation EndLoc = ConsumeBracket(); // location of ']'
+ MultiExprArg Args(Actions, ElementExprs.take(), ElementExprs.size());
+ return Owned(Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args));
+}
+
+ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
+ SmallVector<ObjCDictionaryElement, 4> Elements; // dictionary elements.
+ ConsumeBrace(); // consume the l_square.
+ while (Tok.isNot(tok::r_brace)) {
+ // Parse the comma separated key : value expressions.
+ ExprResult KeyExpr;
+ {
+ ColonProtectionRAIIObject X(*this);
+ KeyExpr = ParseAssignmentExpression();
+ if (KeyExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace);
+ return move(KeyExpr);
+ }
+ }
+
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ } else {
+ return ExprError(Diag(Tok, diag::err_expected_colon));
+ }
+
+ ExprResult ValueExpr(ParseAssignmentExpression());
+ if (ValueExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace);
+ return move(ValueExpr);
+ }
+
+ // Parse the ellipsis that designates this as a pack expansion.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis) && getLangOpts().CPlusPlus)
+ EllipsisLoc = ConsumeToken();
+
+ // We have a valid expression. Collect it in a vector so we can
+ // build the argument list.
+ ObjCDictionaryElement Element = {
+ KeyExpr.get(), ValueExpr.get(), EllipsisLoc, llvm::Optional<unsigned>()
+ };
+ Elements.push_back(Element);
+
+ if (Tok.is(tok::comma))
+ ConsumeToken(); // Eat the ','.
+ else if (Tok.isNot(tok::r_brace))
+ return ExprError(Diag(Tok, diag::err_expected_rbrace_or_comma));
+ }
+ SourceLocation EndLoc = ConsumeBrace();
+
+ // Create the ObjCDictionaryLiteral.
+ return Owned(Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
+ Elements.data(),
+ Elements.size()));
+}
+
/// objc-encode-expression:
/// @encode ( type-name )
ExprResult
@@ -2519,7 +2786,10 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
}
Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
-
+
+ // Save the current token position.
+ SourceLocation OrigLoc = Tok.getLocation();
+
assert(!LM.Toks.empty() && "ParseLexedObjCMethodDef - Empty body!");
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
@@ -2541,22 +2811,36 @@ Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
// specified Declarator for the method.
Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl);
- if (PP.isCodeCompletionEnabled()) {
- if (trySkippingFunctionBodyForCodeCompletion()) {
- BodyScope.Exit();
- return Actions.ActOnFinishFunctionBody(MDecl, 0);
- }
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(MDecl, 0);
}
StmtResult FnBody(ParseCompoundStatementBody());
// If the function body could not be parsed, make a bogus compoundstmt.
- if (FnBody.isInvalid())
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
FnBody = Actions.ActOnCompoundStmt(BraceLoc, BraceLoc,
MultiStmtArg(Actions), false);
+ }
// Leave the function body scope.
BodyScope.Exit();
- return Actions.ActOnFinishFunctionBody(MDecl, FnBody.take());
+ MDecl = Actions.ActOnFinishFunctionBody(MDecl, FnBody.take());
+
+ if (Tok.getLocation() != OrigLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ OrigLoc))
+ while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ return MDecl;
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
index 2ccb6ea..eb13e0d 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -29,6 +29,31 @@ void Parser::HandlePragmaUnused() {
ConsumeToken(); // The argument token.
}
+void Parser::HandlePragmaVisibility() {
+ assert(Tok.is(tok::annot_pragma_vis));
+ const IdentifierInfo *VisType =
+ static_cast<IdentifierInfo *>(Tok.getAnnotationValue());
+ SourceLocation VisLoc = ConsumeToken();
+ Actions.ActOnPragmaVisibility(VisType, VisLoc);
+}
+
+struct PragmaPackInfo {
+ Sema::PragmaPackKind Kind;
+ IdentifierInfo *Name;
+ Expr *Alignment;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+
+void Parser::HandlePragmaPack() {
+ assert(Tok.is(tok::annot_pragma_pack));
+ PragmaPackInfo *Info =
+ static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaPack(Info->Kind, Info->Name, Info->Alignment, PragmaLoc,
+ Info->LParenLoc, Info->RParenLoc);
+}
+
// #pragma GCC visibility comes in two variants:
// 'push' '(' [visibility] ')'
// 'pop'
@@ -42,13 +67,10 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
const IdentifierInfo *PushPop = Tok.getIdentifierInfo();
- bool IsPush;
const IdentifierInfo *VisType;
if (PushPop && PushPop->isStr("pop")) {
- IsPush = false;
VisType = 0;
} else if (PushPop && PushPop->isStr("push")) {
- IsPush = true;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen)
@@ -80,7 +102,14 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
return;
}
- Actions.ActOnPragmaVisibility(IsPush, VisType, VisLoc);
+ Token *Toks = new Token[1];
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_vis);
+ Toks[0].setLocation(VisLoc);
+ Toks[0].setAnnotationValue(
+ const_cast<void*>(static_cast<const void*>(VisType)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/true);
}
// #pragma pack(...) comes in the following delicious flavors:
@@ -110,6 +139,12 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
return;
PP.Lex(Tok);
+
+ // In MSVC/gcc, #pragma pack(4) sets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc, #pragma pack(4) is equivalent to #pragma pack(push, 4)
+ if (PP.getLangOpts().ApplePragmaPack)
+ Kind = Sema::PPK_Push;
} else if (Tok.is(tok::identifier)) {
const IdentifierInfo *II = Tok.getIdentifierInfo();
if (II->isStr("show")) {
@@ -159,6 +194,11 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
}
}
}
+ } else if (PP.getLangOpts().ApplePragmaPack) {
+ // In MSVC/gcc, #pragma pack() resets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc #pragma pack() is equivalent to #pragma pack(pop).
+ Kind = Sema::PPK_Pop;
}
if (Tok.isNot(tok::r_paren)) {
@@ -173,8 +213,26 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
return;
}
- Actions.ActOnPragmaPack(Kind, Name, Alignment.release(), PackLoc,
- LParenLoc, RParenLoc);
+ PragmaPackInfo *Info =
+ (PragmaPackInfo*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(PragmaPackInfo), llvm::alignOf<PragmaPackInfo>());
+ new (Info) PragmaPackInfo();
+ Info->Kind = Kind;
+ Info->Name = Name;
+ Info->Alignment = Alignment.release();
+ Info->LParenLoc = LParenLoc;
+ Info->RParenLoc = RParenLoc;
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_pack);
+ Toks[0].setLocation(PackLoc);
+ Toks[0].setAnnotationValue(static_cast<void*>(Info));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
}
// #pragma ms_struct on
@@ -203,7 +261,8 @@ void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
}
if (Tok.isNot(tok::eod)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "ms_struct";
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "ms_struct";
return;
}
Actions.ActOnPragmaMSStruct(Kind);
@@ -348,7 +407,9 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
// This allows us to cache a "#pragma unused" that occurs inside an inline
// C++ member function.
- Token *Toks = new Token[2*Identifiers.size()];
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 2 * Identifiers.size(), llvm::alignOf<Token>());
for (unsigned i=0; i != Identifiers.size(); i++) {
Token &pragmaUnusedTok = Toks[2*i], &idTok = Toks[2*i+1];
pragmaUnusedTok.startToken();
@@ -356,7 +417,8 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
pragmaUnusedTok.setLocation(UnusedLoc);
idTok = Identifiers[i];
}
- PP.EnterTokenStream(Toks, 2*Identifiers.size(), /*DisableMacroExpansion=*/true, /*OwnsTokens=*/true);
+ PP.EnterTokenStream(Toks, 2*Identifiers.size(),
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
}
// #pragma weak identifier
@@ -403,6 +465,44 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
}
}
+// #pragma redefine_extname identifier identifier
+void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &RedefToken) {
+ SourceLocation RedefLoc = RedefToken.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "redefine_extname";
+ return;
+ }
+
+ IdentifierInfo *RedefName = Tok.getIdentifierInfo(), *AliasName = 0;
+ SourceLocation RedefNameLoc = Tok.getLocation(), AliasNameLoc;
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "redefine_extname";
+ return;
+ }
+ AliasName = Tok.getIdentifierInfo();
+ AliasNameLoc = Tok.getLocation();
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "redefine_extname";
+ return;
+ }
+
+ Actions.ActOnPragmaRedefineExtname(RedefName, AliasName, RedefLoc,
+ RedefNameLoc, AliasNameLoc);
+}
+
+
void
PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
PragmaIntroducerKind Introducer,
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
index 1d3138f..ebb185a 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -90,6 +90,16 @@ public:
Token &FirstToken);
};
+class PragmaRedefineExtnameHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaRedefineExtnameHandler(Sema &A)
+ : PragmaHandler("redefine_extname"), Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
class PragmaOpenCLExtensionHandler : public PragmaHandler {
Sema &Actions;
Parser &parser;
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index a2b7cdd..fdb9788 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -76,14 +76,15 @@ using namespace clang;
/// [OBC] '@' 'throw' ';'
///
StmtResult
-Parser::ParseStatementOrDeclaration(StmtVector &Stmts, bool OnlyStatement) {
+Parser::ParseStatementOrDeclaration(StmtVector &Stmts, bool OnlyStatement,
+ SourceLocation *TrailingElseLoc) {
const char *SemiError = 0;
StmtResult Res;
ParenBraceBracketBalancer BalancerRAIIObj(*this);
ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX0XAttributes(attrs);
+ MaybeParseCXX0XAttributes(attrs, 0, /*MightBeObjCMessageSend*/ true);
// Cases in this switch statement should fall through if the parser expects
// the token to end in a semicolon (in which case SemiError should be set),
@@ -115,7 +116,7 @@ Retry:
IdentifierInfo *Name = Tok.getIdentifierInfo();
SourceLocation NameLoc = Tok.getLocation();
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
CheckForTemplateAndDigraph(Next, ParsedType(),
/*EnteringContext=*/false, *Name, SS);
@@ -170,7 +171,7 @@ Retry:
if (AnnotateTemplateIdToken(
TemplateTy::make(Classification.getTemplateName()),
Classification.getTemplateNameKind(),
- SS, Id, SourceLocation(),
+ SS, SourceLocation(), Id,
/*AllowTypeAnnotation=*/false)) {
// Handle errors here by skipping up to the next semicolon or '}', and
// eat the semicolon if that's what stopped us.
@@ -206,7 +207,7 @@ Retry:
}
default: {
- if ((getLang().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
+ if ((getLangOpts().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl = ParseDeclaration(Stmts, Declarator::BlockContext,
DeclEnd, attrs);
@@ -234,18 +235,18 @@ Retry:
}
case tok::kw_if: // C99 6.8.4.1: if-statement
- return ParseIfStatement(attrs);
+ return ParseIfStatement(attrs, TrailingElseLoc);
case tok::kw_switch: // C99 6.8.4.2: switch-statement
- return ParseSwitchStatement(attrs);
+ return ParseSwitchStatement(attrs, TrailingElseLoc);
case tok::kw_while: // C99 6.8.5.1: while-statement
- return ParseWhileStatement(attrs);
+ return ParseWhileStatement(attrs, TrailingElseLoc);
case tok::kw_do: // C99 6.8.5.2: do-statement
Res = ParseDoStatement(attrs);
SemiError = "do/while";
break;
case tok::kw_for: // C99 6.8.5.3: for-statement
- return ParseForStatement(attrs);
+ return ParseForStatement(attrs, TrailingElseLoc);
case tok::kw_goto: // C99 6.8.6.1: goto-statement
Res = ParseGotoStatement(attrs);
@@ -279,6 +280,14 @@ Retry:
case tok::kw___try:
return ParseSEHTryBlock(attrs);
+
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ return StmtEmpty();
+
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ return StmtEmpty();
}
// If we reached this code, the statement must end in a semicolon.
@@ -355,7 +364,8 @@ StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
return move(TryBlock);
StmtResult Handler;
- if(Tok.is(tok::kw___except)) {
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
SourceLocation Loc = ConsumeToken();
Handler = ParseSEHExceptBlock(Loc);
} else if (Tok.is(tok::kw___finally)) {
@@ -389,14 +399,14 @@ StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) {
ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope);
- if (getLang().Borland) {
+ if (getLangOpts().Borland) {
Ident__exception_info->setIsPoisoned(false);
Ident___exception_info->setIsPoisoned(false);
Ident_GetExceptionInfo->setIsPoisoned(false);
}
ExprResult FilterExpr(ParseExpression());
- if (getLang().Borland) {
+ if (getLangOpts().Borland) {
Ident__exception_info->setIsPoisoned(true);
Ident___exception_info->setIsPoisoned(true);
Ident_GetExceptionInfo->setIsPoisoned(true);
@@ -594,7 +604,8 @@ StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs, bool MissingCase,
// Nicely diagnose the common error "switch (X) { case 4: }", which is
// not valid.
SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
- Diag(AfterColonLoc, diag::err_label_end_of_compound_statement);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
SubStmt = true;
}
@@ -636,16 +647,22 @@ StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
ColonLoc = ExpectedLoc;
}
- // Diagnose the common error "switch (X) {... default: }", which is not valid.
- if (Tok.is(tok::r_brace)) {
+ StmtResult SubStmt;
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Diagnose the common error "switch (X) {... default: }", which is
+ // not valid.
SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
- Diag(AfterColonLoc, diag::err_label_end_of_compound_statement);
- return StmtError();
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
+ SubStmt = true;
}
- StmtResult SubStmt(ParseStatement());
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
if (SubStmt.isInvalid())
- return StmtError();
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
SubStmt.get(), getCurScope());
@@ -698,7 +715,6 @@ StmtResult Parser::ParseCompoundStatement(ParsedAttributes &attrs,
return ParseCompoundStatementBody(isStmtExpr);
}
-
/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
/// consume the '}' at the end of the block. It does not manipulate the scope
@@ -712,6 +728,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
if (T.consumeOpen())
return StmtError();
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+
StmtVector Stmts(Actions);
// "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
@@ -752,7 +770,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
continue;
}
- if (getLang().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
Tok.is(tok::kw___if_not_exists))) {
ParseMicrosoftIfExistsStatement(Stmts);
continue;
@@ -771,7 +789,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ConsumeToken();
ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX0XAttributes(attrs);
+ MaybeParseCXX0XAttributes(attrs, 0, /*MightBeObjCMessageSend*/ true);
// If this is the start of a declaration, parse it as such.
if (isDeclarationStatement()) {
@@ -805,17 +823,20 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
Stmts.push_back(R.release());
}
+ SourceLocation CloseLoc = Tok.getLocation();
+
// We broke out of the while loop because we found a '}' or EOF.
if (Tok.isNot(tok::r_brace)) {
Diag(Tok, diag::err_expected_rbrace);
Diag(T.getOpenLocation(), diag::note_matching) << "{";
- return StmtError();
+ // Recover by creating a compound statement with what we parsed so far,
+ // instead of dropping everything and returning StmtError();
+ } else {
+ if (!T.consumeClose())
+ CloseLoc = T.getCloseLocation();
}
- if (T.consumeClose())
- return StmtError();
-
- return Actions.ActOnCompoundStmt(T.getOpenLocation(), T.getCloseLocation(),
+ return Actions.ActOnCompoundStmt(T.getOpenLocation(), CloseLoc,
move_arg(Stmts), isStmtExpr);
}
@@ -837,7 +858,7 @@ bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
ParseCXXCondition(ExprResult, DeclResult, Loc, ConvertToBoolean);
else {
ExprResult = ParseExpression();
@@ -873,7 +894,8 @@ bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
/// [C++] 'if' '(' condition ')' statement
/// [C++] 'if' '(' condition ')' statement 'else' statement
///
-StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
+StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
// FIXME: Use attributes?
assert(Tok.is(tok::kw_if) && "Not an if stmt!");
@@ -885,7 +907,7 @@ StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
return StmtError();
}
- bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
// C99 6.8.4p3 - In C99, the if statement is a block. This is not
// the case for C90.
@@ -932,7 +954,9 @@ StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
// Read the 'then' stmt.
SourceLocation ThenStmtLoc = Tok.getLocation();
- StmtResult ThenStmt(ParseStatement());
+
+ SourceLocation InnerStatementTrailingElseLoc;
+ StmtResult ThenStmt(ParseStatement(&InnerStatementTrailingElseLoc));
// Pop the 'if' scope if needed.
InnerScope.Exit();
@@ -943,6 +967,9 @@ StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
StmtResult ElseStmt;
if (Tok.is(tok::kw_else)) {
+ if (TrailingElseLoc)
+ *TrailingElseLoc = Tok.getLocation();
+
ElseLoc = ConsumeToken();
ElseStmtLoc = Tok.getLocation();
@@ -966,6 +993,8 @@ StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
Actions.CodeCompleteAfterIf(getCurScope());
cutOffParsing();
return StmtError();
+ } else if (InnerStatementTrailingElseLoc.isValid()) {
+ Diag(InnerStatementTrailingElseLoc, diag::warn_dangling_else);
}
IfScope.Exit();
@@ -999,7 +1028,8 @@ StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs) {
/// switch-statement:
/// 'switch' '(' expression ')' statement
/// [C++] 'switch' '(' condition ')' statement
-StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs) {
+StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
// FIXME: Use attributes?
assert(Tok.is(tok::kw_switch) && "Not a switch stmt!");
@@ -1011,7 +1041,7 @@ StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs) {
return StmtError();
}
- bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
// C99 6.8.4p3 - In C99, the switch statement is a block. This is
// not the case for C90. Start the switch scope.
@@ -1067,15 +1097,20 @@ StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs) {
C99orCXX && Tok.isNot(tok::l_brace));
// Read the body statement.
- StmtResult Body(ParseStatement());
+ StmtResult Body(ParseStatement(TrailingElseLoc));
// Pop the scopes.
InnerScope.Exit();
SwitchScope.Exit();
- if (Body.isInvalid())
+ if (Body.isInvalid()) {
// FIXME: Remove the case statement list from the Switch statement.
- Body = Actions.ActOnNullStmt(Tok.getLocation());
+
+ // Put the synthesized null statement on the same line as the end of switch
+ // condition.
+ SourceLocation SynthesizedNullStmtLocation = Cond.get()->getLocEnd();
+ Body = Actions.ActOnNullStmt(SynthesizedNullStmtLocation);
+ }
return Actions.ActOnFinishSwitchStmt(SwitchLoc, Switch.get(), Body.get());
}
@@ -1084,7 +1119,8 @@ StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs) {
/// while-statement: [C99 6.8.5.1]
/// 'while' '(' expression ')' statement
/// [C++] 'while' '(' condition ')' statement
-StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs) {
+StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
// FIXME: Use attributes?
assert(Tok.is(tok::kw_while) && "Not a while stmt!");
@@ -1097,7 +1133,7 @@ StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs) {
return StmtError();
}
- bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
// C99 6.8.5p5 - In C99, the while statement is a block. This is not
// the case for C90. Start the loop scope.
@@ -1142,7 +1178,7 @@ StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs) {
C99orCXX && Tok.isNot(tok::l_brace));
// Read the body statement.
- StmtResult Body(ParseStatement());
+ StmtResult Body(ParseStatement(TrailingElseLoc));
// Pop the body scope if needed.
InnerScope.Exit();
@@ -1167,7 +1203,7 @@ StmtResult Parser::ParseDoStatement(ParsedAttributes &attrs) {
// C99 6.8.5p5 - In C99, the do statement is a block. This is not
// the case for C90. Start the loop scope.
unsigned ScopeFlags;
- if (getLang().C99)
+ if (getLangOpts().C99)
ScopeFlags = Scope::BreakScope | Scope::ContinueScope | Scope::DeclScope;
else
ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
@@ -1183,7 +1219,7 @@ StmtResult Parser::ParseDoStatement(ParsedAttributes &attrs) {
// which is entered and exited each time through the loop.
//
ParseScope InnerScope(this, Scope::DeclScope,
- (getLang().C99 || getLang().CPlusPlus) &&
+ (getLangOpts().C99 || getLangOpts().CPlusPlus) &&
Tok.isNot(tok::l_brace));
// Read the body statement.
@@ -1241,7 +1277,8 @@ StmtResult Parser::ParseDoStatement(ParsedAttributes &attrs) {
/// [C++0x] for-range-initializer:
/// [C++0x] expression
/// [C++0x] braced-init-list [TODO]
-StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
+StmtResult Parser::ParseForStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
// FIXME: Use attributes?
assert(Tok.is(tok::kw_for) && "Not a for stmt!");
@@ -1253,7 +1290,7 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
return StmtError();
}
- bool C99orCXXorObjC = getLang().C99 || getLang().CPlusPlus || getLang().ObjC1;
+ bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus || getLangOpts().ObjC1;
// C99 6.8.5p5 - In C99, the for statement is a block. This is not
// the case for C90. Start the loop scope.
@@ -1305,7 +1342,7 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
if (Tok.is(tok::semi)) { // for (;
// no first part, eat the ';'.
ConsumeToken();
- } else if (isSimpleDeclaration()) { // for (int X = 4;
+ } else if (isForInitDeclaration()) { // for (int X = 4;
// Parse declaration, which eats the ';'.
if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
@@ -1314,7 +1351,7 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
MaybeParseCXX0XAttributes(attrs);
// In C++0x, "for (T NS:a" might not be a typo for ::
- bool MightBeForRangeStmt = getLang().CPlusPlus;
+ bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
@@ -1326,8 +1363,8 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (ForRangeInit.ParsedForRangeDecl()) {
- if (!getLang().CPlusPlus0x)
- Diag(ForRangeInit.ColonLoc, diag::ext_for_range);
+ Diag(ForRangeInit.ColonLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_for_range : diag::ext_for_range);
ForRange = true;
} else if (Tok.is(tok::semi)) { // for (int x = 4;
@@ -1370,6 +1407,13 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
return StmtError();
}
Collection = ParseExpression();
+ } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::colon) && FirstPart.get()) {
+ // User tried to write the reasonable, but ill-formed, for-range-statement
+ // for (expr : expr) { ... }
+ Diag(Tok, diag::err_for_range_expected_decl)
+ << FirstPart.get()->getSourceRange();
+ SkipUntil(tok::r_paren, false, true);
+ SecondPartIsInvalid = true;
} else {
if (!Value.isInvalid()) {
Diag(Tok, diag::err_expected_semi_for);
@@ -1390,7 +1434,7 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
// missing both semicolons.
} else {
ExprResult Second;
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
ParseCXXCondition(Second, SecondVar, ForLoc, true);
else {
Second = ParseExpression();
@@ -1458,7 +1502,7 @@ StmtResult Parser::ParseForStatement(ParsedAttributes &attrs) {
C99orCXXorObjC && Tok.isNot(tok::l_brace));
// Read the body statement.
- StmtResult Body(ParseStatement());
+ StmtResult Body(ParseStatement(TrailingElseLoc));
// Pop the body scope if needed.
InnerScope.Exit();
@@ -1564,13 +1608,12 @@ StmtResult Parser::ParseReturnStatement(ParsedAttributes &attrs) {
return StmtError();
}
- // FIXME: This is a hack to allow something like C++0x's generalized
- // initializer lists, but only enough of this feature to allow Clang to
- // parse libstdc++ 4.5's headers.
- if (Tok.is(tok::l_brace) && getLang().CPlusPlus) {
+ if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus) {
R = ParseInitializer();
- if (R.isUsable() && !getLang().CPlusPlus0x)
- Diag(R.get()->getLocStart(), diag::ext_generalized_initializer_lists)
+ if (R.isUsable())
+ Diag(R.get()->getLocStart(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_generalized_initializer_lists :
+ diag::ext_generalized_initializer_lists)
<< R.get()->getSourceRange();
} else
R = ParseExpression();
@@ -1731,7 +1774,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
assert(Tok.is(tok::kw_asm) && "Not an asm stmt");
SourceLocation AsmLoc = ConsumeToken();
- if (getLang().MicrosoftExt && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
+ if (getLangOpts().MicrosoftExt && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
msAsm = true;
return ParseMicrosoftAsmStatement(AsmLoc);
}
@@ -1756,8 +1799,11 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
T.consumeOpen();
ExprResult AsmString(ParseAsmStringLiteral());
- if (AsmString.isInvalid())
+ if (AsmString.isInvalid()) {
+ // Consume up to and including the closing paren.
+ T.skipToEnd();
return StmtError();
+ }
SmallVector<IdentifierInfo *, 4> Names;
ExprVector Constraints(Actions);
@@ -1906,19 +1952,15 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
if (Tok.isNot(tok::comma)) return false;
ConsumeToken();
}
-
- return true;
}
Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
assert(Tok.is(tok::l_brace));
SourceLocation LBraceLoc = Tok.getLocation();
- if (PP.isCodeCompletionEnabled()) {
- if (trySkippingFunctionBodyForCodeCompletion()) {
- BodyScope.Exit();
- return Actions.ActOnFinishFunctionBody(Decl, 0);
- }
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
}
PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
@@ -1930,9 +1972,11 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
StmtResult FnBody(ParseCompoundStatementBody());
// If the function body could not be parsed, make a bogus compoundstmt.
- if (FnBody.isInvalid())
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
MultiStmtArg(Actions), false);
+ }
BodyScope.Exit();
return Actions.ActOnFinishFunctionBody(Decl, FnBody.take());
@@ -1956,36 +2000,36 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
else
Actions.ActOnDefaultCtorInitializers(Decl);
- if (PP.isCodeCompletionEnabled()) {
- if (trySkippingFunctionBodyForCodeCompletion()) {
- BodyScope.Exit();
- return Actions.ActOnFinishFunctionBody(Decl, 0);
- }
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
}
SourceLocation LBraceLoc = Tok.getLocation();
StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc));
// If we failed to parse the try-catch, we just give the function an empty
// compound statement as the body.
- if (FnBody.isInvalid())
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
MultiStmtArg(Actions), false);
+ }
BodyScope.Exit();
return Actions.ActOnFinishFunctionBody(Decl, FnBody.take());
}
-bool Parser::trySkippingFunctionBodyForCodeCompletion() {
+bool Parser::trySkippingFunctionBody() {
assert(Tok.is(tok::l_brace));
- assert(PP.isCodeCompletionEnabled() &&
- "Should only be called when in code-completion mode");
+ assert(SkipFunctionBodies &&
+ "Should only be called when SkipFunctionBodies is enabled");
// We're in code-completion mode. Skip parsing for all function bodies unless
// the body contains the code-completion point.
TentativeParsingAction PA(*this);
ConsumeBrace();
if (SkipUntil(tok::r_brace, /*StopAtSemi=*/false, /*DontConsume=*/false,
- /*StopAtCodeCompletion=*/true)) {
+ /*StopAtCodeCompletion=*/PP.isCodeCompletionEnabled())) {
PA.Commit();
return true;
}
@@ -2035,10 +2079,13 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
return move(TryBlock);
// Borland allows SEH-handlers with 'try'
- if(Tok.is(tok::kw___except) || Tok.is(tok::kw___finally)) {
+
+ if((Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) ||
+ Tok.is(tok::kw___finally)) {
// TODO: Factor into common return ParseSEHHandlerCommon(...)
StmtResult Handler;
- if(Tok.is(tok::kw___except)) {
+ if(Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
SourceLocation Loc = ConsumeToken();
Handler = ParseSEHExceptBlock(Loc);
}
@@ -2130,19 +2177,51 @@ StmtResult Parser::ParseCXXCatchBlock() {
}
void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
- bool Result;
+ IfExistsCondition Result;
if (ParseMicrosoftIfExistsCondition(Result))
return;
- if (Tok.isNot(tok::l_brace)) {
+ // Handle dependent statements by parsing the braces as a compound statement.
+ // This is not the same behavior as Visual C++, which don't treat this as a
+ // compound statement, but for Clang's type checking we can't have anything
+ // inside these braces escaping to the surrounding code.
+ if (Result.Behavior == IEB_Dependent) {
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return;
+ }
+
+ ParsedAttributes Attrs(AttrFactory);
+ StmtResult Compound = ParseCompoundStatement(Attrs);
+ if (Compound.isInvalid())
+ return;
+
+ StmtResult DepResult = Actions.ActOnMSDependentExistsStmt(Result.KeywordLoc,
+ Result.IsIfExists,
+ Result.SS,
+ Result.Name,
+ Compound.get());
+ if (DepResult.isUsable())
+ Stmts.push_back(DepResult.get());
+ return;
+ }
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
Diag(Tok, diag::err_expected_lbrace);
return;
}
- ConsumeBrace();
- // Condition is false skip all inside the {}.
- if (!Result) {
- SkipUntil(tok::r_brace, false);
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the statements below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Dependent case handled above");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
return;
}
@@ -2152,10 +2231,5 @@ void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
if (R.isUsable())
Stmts.push_back(R.release());
}
-
- if (Tok.isNot(tok::r_brace)) {
- Diag(Tok, diag::err_expected_rbrace);
- return;
- }
- ConsumeBrace();
+ Braces.consumeClose();
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
index 3d68a4a..61cd9f2 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -31,8 +31,9 @@ Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less)) {
- return ParseExplicitInstantiation(SourceLocation(), ConsumeToken(),
- DeclEnd);
+ return ParseExplicitInstantiation(Context,
+ SourceLocation(), ConsumeToken(),
+ DeclEnd, AS);
}
return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AS,
AccessAttrs);
@@ -240,6 +241,10 @@ Parser::ParseSingleDeclarationAfterTemplate(
return 0;
}
+ LateParsedAttrList LateParsedAttrs;
+ if (DeclaratorInfo.isFunctionDeclarator())
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
// If we have a declaration or declarator list, handle it.
if (isDeclarationAfterDeclarator()) {
// Parse this declaration.
@@ -255,6 +260,8 @@ Parser::ParseSingleDeclarationAfterTemplate(
// Eat the semi colon after the declaration.
ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, ThisDecl, true, false);
DeclaratorInfo.complete(ThisDecl);
return ThisDecl;
}
@@ -262,20 +269,15 @@ Parser::ParseSingleDeclarationAfterTemplate(
if (DeclaratorInfo.isFunctionDeclarator() &&
isStartOfFunctionDefinition(DeclaratorInfo)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
- Diag(Tok, diag::err_function_declared_typedef);
-
- if (Tok.is(tok::l_brace)) {
- // This recovery skips the entire function body. It would be nice
- // to simply call ParseFunctionDefinition() below, however Sema
- // assumes the declarator represents a function, not a typedef.
- ConsumeBrace();
- SkipUntil(tok::r_brace, true);
- } else {
- SkipUntil(tok::semi);
- }
- return 0;
+ // Recover by ignoring the 'typedef'. This was probably supposed to be
+ // the 'typename' keyword, which we should have already suggested adding
+ // if it's appropriate.
+ Diag(DS.getStorageClassSpecLoc(), diag::err_function_declared_typedef)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ DS.ClearStorageClassSpecs();
}
- return ParseFunctionDefinition(DeclaratorInfo, TemplateInfo);
+ return ParseFunctionDefinition(DeclaratorInfo, TemplateInfo,
+ &LateParsedAttrs);
}
if (DeclaratorInfo.isFunctionDeclarator())
@@ -307,14 +309,19 @@ bool Parser::ParseTemplateParameters(unsigned Depth,
LAngleLoc = ConsumeToken();
// Try to parse the template parameter list.
- if (Tok.is(tok::greater))
- RAngleLoc = ConsumeToken();
- else if (ParseTemplateParameterList(Depth, TemplateParams)) {
- if (!Tok.is(tok::greater)) {
- Diag(Tok.getLocation(), diag::err_expected_greater);
- return true;
- }
+ bool Failed = false;
+ if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater))
+ Failed = ParseTemplateParameterList(Depth, TemplateParams);
+
+ if (Tok.is(tok::greatergreater)) {
+ Tok.setKind(tok::greater);
+ RAngleLoc = Tok.getLocation();
+ Tok.setLocation(Tok.getLocation().getLocWithOffset(1));
+ } else if (Tok.is(tok::greater))
RAngleLoc = ConsumeToken();
+ else if (Failed) {
+ Diag(Tok.getLocation(), diag::err_expected_greater);
+ return true;
}
return false;
}
@@ -337,23 +344,21 @@ Parser::ParseTemplateParameterList(unsigned Depth,
} else {
// If we failed to parse a template parameter, skip until we find
// a comma or closing brace.
- SkipUntil(tok::comma, tok::greater, true, true);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
}
// Did we find a comma or the end of the template parmeter list?
if (Tok.is(tok::comma)) {
ConsumeToken();
- } else if (Tok.is(tok::greater)) {
+ } else if (Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
// Don't consume this... that's done by template parser.
break;
} else {
// Somebody probably forgot to close the template. Skip ahead and
// try to get out of the expression. This error is currently
// subsumed by whatever goes on in ParseTemplateParameter.
- // TODO: This could match >>, and it would be nice to avoid those
- // silly errors with template <vec<T>>.
Diag(Tok.getLocation(), diag::err_expected_comma_greater);
- SkipUntil(tok::greater, true, true);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
return false;
}
}
@@ -476,7 +481,7 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
EllipsisLoc = ConsumeToken();
Diag(EllipsisLoc,
- getLang().CPlusPlus0x
+ getLangOpts().CPlusPlus0x
? diag::warn_cxx98_compat_variadic_templates
: diag::ext_variadic_templates);
}
@@ -488,7 +493,7 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
ParamName = Tok.getIdentifierInfo();
NameLoc = ConsumeToken();
} else if (Tok.is(tok::equal) || Tok.is(tok::comma) ||
- Tok.is(tok::greater)) {
+ Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
// Unnamed template parameter. Don't have to do anything here, just
// don't consume this token.
} else {
@@ -503,9 +508,10 @@ Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
ParsedType DefaultArg;
if (Tok.is(tok::equal)) {
EqualLoc = ConsumeToken();
- DefaultArg = ParseTypeName().get();
+ DefaultArg = ParseTypeName(/*Range=*/0,
+ Declarator::TemplateTypeArgContext).get();
}
-
+
return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, Ellipsis,
EllipsisLoc, KeyLoc, ParamName, NameLoc,
Depth, Position, EqualLoc, DefaultArg);
@@ -536,13 +542,25 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
}
// Generate a meaningful error if the user forgot to put class before the
- // identifier, comma, or greater.
+ // identifier, comma, or greater. Provide a fixit if the identifier, comma,
+ // or greater appear immediately or after 'typename' or 'struct'. In the
+ // latter case, replace the keyword with 'class'.
if (!Tok.is(tok::kw_class)) {
- Diag(Tok.getLocation(), diag::err_expected_class_before)
- << PP.getSpelling(Tok);
- return 0;
- }
- ConsumeToken();
+ bool Replace = Tok.is(tok::kw_typename) || Tok.is(tok::kw_struct);
+ const Token& Next = Replace ? NextToken() : Tok;
+ if (Next.is(tok::identifier) || Next.is(tok::comma) ||
+ Next.is(tok::greater) || Next.is(tok::greatergreater) ||
+ Next.is(tok::ellipsis))
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param)
+ << (Replace ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
+ : FixItHint::CreateInsertion(Tok.getLocation(), "class "));
+ else
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param);
+
+ if (Replace)
+ ConsumeToken();
+ } else
+ ConsumeToken();
// Parse the ellipsis, if given.
SourceLocation EllipsisLoc;
@@ -550,7 +568,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
EllipsisLoc = ConsumeToken();
Diag(EllipsisLoc,
- getLang().CPlusPlus0x
+ getLangOpts().CPlusPlus0x
? diag::warn_cxx98_compat_variadic_templates
: diag::ext_variadic_templates);
}
@@ -561,7 +579,8 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
if (Tok.is(tok::identifier)) {
ParamName = Tok.getIdentifierInfo();
NameLoc = ConsumeToken();
- } else if (Tok.is(tok::equal) || Tok.is(tok::comma) || Tok.is(tok::greater)) {
+ } else if (Tok.is(tok::equal) || Tok.is(tok::comma) ||
+ Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
// Unnamed template parameter. Don't have to do anything here, just
// don't consume this token.
} else {
@@ -587,10 +606,7 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
if (DefaultArg.isInvalid()) {
Diag(Tok.getLocation(),
diag::err_default_template_template_parameter_not_template);
- static const tok::TokenKind EndToks[] = {
- tok::comma, tok::greater, tok::greatergreater
- };
- SkipUntil(EndToks, 3, true, true);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
}
}
@@ -618,12 +634,7 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
Declarator ParamDecl(DS, Declarator::TemplateParamContext);
ParseDeclarator(ParamDecl);
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
- // This probably shouldn't happen - and it's more of a Sema thing, but
- // basically we didn't parse the type name because we couldn't associate
- // it with an AST node. we should just skip to the comma or greater.
- // TODO: This is currently a placeholder for some kind of Sema Error.
- Diag(Tok.getLocation(), diag::err_parse_error);
- SkipUntil(tok::comma, tok::greater, true, true);
+ Diag(Tok.getLocation(), diag::err_expected_template_parameter);
return 0;
}
@@ -709,15 +720,15 @@ Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
RAngleLoc = Tok.getLocation();
if (Tok.is(tok::greatergreater)) {
- if (!getLang().CPlusPlus0x) {
- const char *ReplaceStr = "> >";
- if (NextToken().is(tok::greater) || NextToken().is(tok::greatergreater))
- ReplaceStr = "> > ";
-
- Diag(Tok.getLocation(), diag::err_two_right_angle_brackets_need_space)
- << FixItHint::CreateReplacement(
- SourceRange(Tok.getLocation()), ReplaceStr);
- }
+ const char *ReplaceStr = "> >";
+ if (NextToken().is(tok::greater) || NextToken().is(tok::greatergreater))
+ ReplaceStr = "> > ";
+
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_two_right_angle_brackets :
+ diag::err_two_right_angle_brackets_need_space)
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()),
+ ReplaceStr);
Tok.setKind(tok::greater);
if (!ConsumeLastToken) {
@@ -770,10 +781,10 @@ Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
///
bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
- UnqualifiedId &TemplateName,
SourceLocation TemplateKWLoc,
+ UnqualifiedId &TemplateName,
bool AllowTypeAnnotation) {
- assert(getLang().CPlusPlus && "Can only annotate template-ids in C++");
+ assert(getLangOpts().CPlusPlus && "Can only annotate template-ids in C++");
assert(Template && Tok.is(tok::less) &&
"Parser isn't at the beginning of a template-id");
@@ -803,10 +814,9 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
// Build the annotation token.
if (TNK == TNK_Type_template && AllowTypeAnnotation) {
TypeResult Type
- = Actions.ActOnTemplateIdType(SS,
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
Template, TemplateNameLoc,
- LAngleLoc, TemplateArgsPtr,
- RAngleLoc);
+ LAngleLoc, TemplateArgsPtr, RAngleLoc);
if (Type.isInvalid()) {
// If we failed to parse the template ID but skipped ahead to a >, we're not
// going to be able to form a token annotation. Eat the '>' if present.
@@ -838,6 +848,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
TemplateId->Operator = TemplateName.OperatorFunctionId.Operator;
}
TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
TemplateId->Template = Template;
TemplateId->Kind = TNK;
TemplateId->LAngleLoc = LAngleLoc;
@@ -883,6 +894,7 @@ void Parser::AnnotateTemplateIdTokenAsType() {
TypeResult Type
= Actions.ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
@@ -932,7 +944,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
if (SS.isSet() && Tok.is(tok::kw_template)) {
// Parse the optional 'template' keyword following the
// nested-name-specifier.
- SourceLocation TemplateLoc = ConsumeToken();
+ SourceLocation TemplateKWLoc = ConsumeToken();
if (Tok.is(tok::identifier)) {
// We appear to have a dependent template name.
@@ -949,8 +961,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// template argument.
TemplateTy Template;
if (isEndOfTemplateArgument(Tok) &&
- Actions.ActOnDependentTemplateName(getCurScope(), TemplateLoc,
- SS, Name,
+ Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Name,
/*ObjectType=*/ ParsedType(),
/*EnteringContext=*/false,
Template))
@@ -1033,7 +1045,7 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
// Parse a non-type template argument.
SourceLocation Loc = Tok.getLocation();
- ExprResult ExprArg = ParseConstantExpression();
+ ExprResult ExprArg = ParseConstantExpression(MaybeTypeCast);
if (ExprArg.isInvalid() || !ExprArg.get())
return ParsedTemplateArgument();
@@ -1113,17 +1125,19 @@ Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
/// 'extern' [opt] 'template' declaration
///
/// Note that the 'extern' is a GNU extension and C++0x feature.
-Decl *Parser::ParseExplicitInstantiation(SourceLocation ExternLoc,
+Decl *Parser::ParseExplicitInstantiation(unsigned Context,
+ SourceLocation ExternLoc,
SourceLocation TemplateLoc,
- SourceLocation &DeclEnd) {
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
// This isn't really required here.
ParsingDeclRAIIObject ParsingTemplateParams(*this);
- return ParseSingleDeclarationAfterTemplate(Declarator::FileContext,
+ return ParseSingleDeclarationAfterTemplate(Context,
ParsedTemplateInfo(ExternLoc,
TemplateLoc),
ParsingTemplateParams,
- DeclEnd, AS_none);
+ DeclEnd, AS);
}
SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
@@ -1157,29 +1171,27 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
if(!LMT.D)
return;
- // If this is a member template, introduce the template parameter scope.
- ParseScope TemplateScope(this, Scope::TemplateParamScope);
-
// Get the FunctionDecl.
FunctionDecl *FD = 0;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(LMT.D))
FD = FunTmpl->getTemplatedDecl();
else
FD = cast<FunctionDecl>(LMT.D);
-
- // Reinject the template parameters.
+
+ // To restore the context after late parsing.
+ Sema::ContextRAII GlobalSavedContext(Actions, Actions.CurContext);
+
SmallVector<ParseScope*, 4> TemplateParamScopeStack;
DeclaratorDecl* Declarator = dyn_cast<DeclaratorDecl>(FD);
if (Declarator && Declarator->getNumTemplateParameterLists() != 0) {
+ TemplateParamScopeStack.push_back(new ParseScope(this, Scope::TemplateParamScope));
Actions.ActOnReenterDeclaratorTemplateScope(getCurScope(), Declarator);
Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
} else {
- Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
-
// Get the list of DeclContext to reenter.
SmallVector<DeclContext*, 4> DeclContextToReenter;
DeclContext *DD = FD->getLexicalParent();
- while (DD && DD->isRecord()) {
+ while (DD && !DD->isTranslationUnit()) {
DeclContextToReenter.push_back(DD);
DD = DD->getLexicalParent();
}
@@ -1190,7 +1202,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
for (; II != DeclContextToReenter.rend(); ++II) {
if (ClassTemplatePartialSpecializationDecl* MD =
dyn_cast_or_null<ClassTemplatePartialSpecializationDecl>(*II)) {
- TemplateParamScopeStack.push_back(new ParseScope(this,
+ TemplateParamScopeStack.push_back(new ParseScope(this,
Scope::TemplateParamScope));
Actions.ActOnReenterTemplateScope(getCurScope(), MD);
} else if (CXXRecordDecl* MD = dyn_cast_or_null<CXXRecordDecl>(*II)) {
@@ -1200,8 +1212,14 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
Actions.ActOnReenterTemplateScope(getCurScope(),
MD->getDescribedClassTemplate());
}
+ TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
+ Actions.PushDeclContext(Actions.getCurScope(), *II);
}
+ TemplateParamScopeStack.push_back(new ParseScope(this,
+ Scope::TemplateParamScope));
+ Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
}
+
assert(!LMT.Toks.empty() && "Empty body!");
// Append the current token at the end of the new token stream so that it
@@ -1218,8 +1236,8 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
// to be re-used for method bodies as well.
ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
- // Recreate the DeclContext.
- Sema::ContextRAII SavedContext(Actions, Actions.getContainingDC(FD));
+ // Recreate the containing function DeclContext.
+ Sema::ContextRAII FunctionSavedContext(Actions, Actions.getContainingDC(FD));
if (FunctionTemplateDecl *FunctionTemplate
= dyn_cast_or_null<FunctionTemplateDecl>(LMT.D))
@@ -1227,7 +1245,7 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
FunctionTemplate->getTemplatedDecl());
if (FunctionDecl *Function = dyn_cast_or_null<FunctionDecl>(LMT.D))
Actions.ActOnStartOfFunctionDef(getCurScope(), Function);
-
+
if (Tok.is(tok::kw_try)) {
ParseFunctionTryBlock(LMT.D, FnScope);
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
index d53839f..28c5e8b 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -62,7 +62,7 @@ bool Parser::isCXXDeclarationStatement() {
return true;
// simple-declaration
default:
- return isCXXSimpleDeclaration();
+ return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
}
}
@@ -75,7 +75,11 @@ bool Parser::isCXXDeclarationStatement() {
/// simple-declaration:
/// decl-specifier-seq init-declarator-list[opt] ';'
///
-bool Parser::isCXXSimpleDeclaration() {
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
// C++ 6.8p1:
// There is an ambiguity in the grammar involving expression-statements and
// declarations: An expression-statement with a function-style explicit type
@@ -112,7 +116,7 @@ bool Parser::isCXXSimpleDeclaration() {
// We need tentative parsing...
TentativeParsingAction PA(*this);
- TPR = TryParseSimpleDeclaration();
+ TPR = TryParseSimpleDeclaration(AllowForRangeDecl);
PA.Revert();
// In case of an error, let the declaration parsing code handle it.
@@ -130,7 +134,12 @@ bool Parser::isCXXSimpleDeclaration() {
/// simple-declaration:
/// decl-specifier-seq init-declarator-list[opt] ';'
///
-Parser::TPResult Parser::TryParseSimpleDeclaration() {
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+///
+Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
// We know that we have a simple-type-specifier/typename-specifier followed
// by a '('.
assert(isCXXDeclarationSpecifier() == TPResult::Ambiguous());
@@ -140,7 +149,7 @@ Parser::TPResult Parser::TryParseSimpleDeclaration() {
else {
ConsumeToken();
- if (getLang().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
@@ -150,7 +159,7 @@ Parser::TPResult Parser::TryParseSimpleDeclaration() {
if (TPR != TPResult::Ambiguous())
return TPR;
- if (Tok.isNot(tok::semi))
+ if (Tok.isNot(tok::semi) && (!AllowForRangeDecl || Tok.isNot(tok::colon)))
return TPResult::False();
return TPResult::Ambiguous();
@@ -224,6 +233,8 @@ Parser::TPResult Parser::TryParseInitDeclaratorList() {
/// condition:
/// expression
/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
/// '=' assignment-expression
///
@@ -247,7 +258,7 @@ bool Parser::isCXXConditionDeclaration() {
else {
ConsumeToken();
- if (getLang().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
assert(Tok.is(tok::l_paren) && "Expected '('");
@@ -265,6 +276,8 @@ bool Parser::isCXXConditionDeclaration() {
if (Tok.is(tok::equal) ||
Tok.is(tok::kw_asm) || Tok.is(tok::kw___attribute))
TPR = TPResult::True();
+ else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace))
+ TPR = TPResult::True();
else
TPR = TPResult::False();
}
@@ -322,7 +335,7 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
else {
ConsumeToken();
- if (getLang().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
@@ -347,7 +360,7 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
// ',', this is a type-id. Otherwise, it's an expression.
} else if (Context == TypeIdAsTemplateArgument &&
(Tok.is(tok::greater) || Tok.is(tok::comma) ||
- (getLang().CPlusPlus0x && Tok.is(tok::greatergreater)))) {
+ (getLangOpts().CPlusPlus0x && Tok.is(tok::greatergreater)))) {
TPR = TPResult::True();
isAmbiguous = true;
@@ -361,91 +374,166 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
return TPR == TPResult::True();
}
-/// isCXX0XAttributeSpecifier - returns true if this is a C++0x
-/// attribute-specifier. By default, unless in Obj-C++, only a cursory check is
-/// performed that will simply return true if a [[ is seen. Currently C++ has no
-/// syntactical ambiguities from this check, but it may inhibit error recovery.
-/// If CheckClosing is true, a check is made for closing ]] brackets.
+/// \brief Returns true if this is a C++11 attribute-specifier. Per
+/// C++11 [dcl.attr.grammar]p6, two consecutive left square bracket tokens
+/// always introduce an attribute. In Objective-C++11, this rule does not
+/// apply if either '[' begins a message-send.
+///
+/// If Disambiguate is true, we try harder to determine whether a '[[' starts
+/// an attribute-specifier, and return CAK_InvalidAttributeSpecifier if not.
///
-/// If given, After is set to the token after the attribute-specifier so that
-/// appropriate parsing decisions can be made; it is left untouched if false is
-/// returned.
+/// If OuterMightBeMessageSend is true, we assume the outer '[' is either an
+/// Obj-C message send or the start of an attribute. Otherwise, we assume it
+/// is not an Obj-C message send.
///
-/// FIXME: If an error is in the closing ]] brackets, the program assumes
-/// the absence of an attribute-specifier, which can cause very yucky errors
-/// to occur.
+/// C++11 [dcl.attr.grammar]:
///
-/// [C++0x] attribute-specifier:
+/// attribute-specifier:
/// '[' '[' attribute-list ']' ']'
/// alignment-specifier
///
-/// [C++0x] attribute-list:
+/// attribute-list:
/// attribute[opt]
/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
///
-/// [C++0x] attribute:
+/// attribute:
/// attribute-token attribute-argument-clause[opt]
///
-/// [C++0x] attribute-token:
-/// identifier
-/// attribute-scoped-token
-///
-/// [C++0x] attribute-scoped-token:
-/// attribute-namespace '::' identifier
-///
-/// [C++0x] attribute-namespace:
+/// attribute-token:
/// identifier
+/// identifier '::' identifier
///
-/// [C++0x] attribute-argument-clause:
+/// attribute-argument-clause:
/// '(' balanced-token-seq ')'
-///
-/// [C++0x] balanced-token-seq:
-/// balanced-token
-/// balanced-token-seq balanced-token
-///
-/// [C++0x] balanced-token:
-/// '(' balanced-token-seq ')'
-/// '[' balanced-token-seq ']'
-/// '{' balanced-token-seq '}'
-/// any token but '(', ')', '[', ']', '{', or '}'
-bool Parser::isCXX0XAttributeSpecifier (bool CheckClosing,
- tok::TokenKind *After) {
+Parser::CXX11AttributeKind
+Parser::isCXX11AttributeSpecifier(bool Disambiguate,
+ bool OuterMightBeMessageSend) {
if (Tok.is(tok::kw_alignas))
- return true;
+ return CAK_AttributeSpecifier;
if (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square))
- return false;
-
- // No tentative parsing if we don't need to look for ]]
- if (!CheckClosing && !getLang().ObjC1)
- return true;
-
- struct TentativeReverter {
- TentativeParsingAction PA;
+ return CAK_NotAttributeSpecifier;
- TentativeReverter (Parser& P)
- : PA(P)
- {}
- ~TentativeReverter () {
- PA.Revert();
- }
- } R(*this);
+ // No tentative parsing if we don't need to look for ']]' or a lambda.
+ if (!Disambiguate && !getLangOpts().ObjC1)
+ return CAK_AttributeSpecifier;
+
+ TentativeParsingAction PA(*this);
// Opening brackets were checked for above.
ConsumeBracket();
- ConsumeBracket();
- // SkipUntil will handle balanced tokens, which are guaranteed in attributes.
- SkipUntil(tok::r_square, false);
+ // Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
+ if (!getLangOpts().ObjC1) {
+ ConsumeBracket();
+
+ bool IsAttribute = SkipUntil(tok::r_square, false);
+ IsAttribute &= Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ return IsAttribute ? CAK_AttributeSpecifier : CAK_InvalidAttributeSpecifier;
+ }
+
+ // In Obj-C++11, we need to distinguish four situations:
+ // 1a) int x[[attr]]; C++11 attribute.
+ // 1b) [[attr]]; C++11 statement attribute.
+ // 2) int x[[obj](){ return 1; }()]; Lambda in array size/index.
+ // 3a) int x[[obj get]]; Message send in array size/index.
+ // 3b) [[Class alloc] init]; Message send in message send.
+ // 4) [[obj]{ return self; }() doStuff]; Lambda in message send.
+ // (1) is an attribute, (2) is ill-formed, and (3) and (4) are accepted.
+
+ // If we have a lambda-introducer, then this is definitely not a message send.
+ // FIXME: If this disambiguation is too slow, fold the tentative lambda parse
+ // into the tentative attribute parse below.
+ LambdaIntroducer Intro;
+ if (!TryParseLambdaIntroducer(Intro)) {
+ // A lambda cannot end with ']]', and an attribute must.
+ bool IsAttribute = Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 attribute.
+ return CAK_AttributeSpecifier;
+
+ if (OuterMightBeMessageSend)
+ // Case 4: Lambda in message send.
+ return CAK_NotAttributeSpecifier;
+
+ // Case 2: Lambda in array size / index.
+ return CAK_InvalidAttributeSpecifier;
+ }
- if (Tok.isNot(tok::r_square))
- return false;
ConsumeBracket();
- if (After)
- *After = Tok.getKind();
+ // If we don't have a lambda-introducer, then we have an attribute or a
+ // message-send.
+ bool IsAttribute = true;
+ while (Tok.isNot(tok::r_square)) {
+ if (Tok.is(tok::comma)) {
+ // Case 1: Stray commas can only occur in attributes.
+ PA.Revert();
+ return CAK_AttributeSpecifier;
+ }
+
+ // Parse the attribute-token, if present.
+ // C++11 [dcl.attr.grammar]:
+ // If a keyword or an alternative token that satisfies the syntactic
+ // requirements of an identifier is contained in an attribute-token,
+ // it is considered an identifier.
+ SourceLocation Loc;
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ if (Tok.is(tok::coloncolon)) {
+ ConsumeToken();
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ }
- return true;
+ // Parse the attribute-argument-clause, if present.
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, false)) {
+ IsAttribute = false;
+ break;
+ }
+ }
+
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ // An attribute must end ']]'.
+ if (IsAttribute) {
+ if (Tok.is(tok::r_square)) {
+ ConsumeBracket();
+ IsAttribute = Tok.is(tok::r_square);
+ } else {
+ IsAttribute = false;
+ }
+ }
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 statement attribute.
+ return CAK_AttributeSpecifier;
+
+ // Case 3: Message send.
+ return CAK_NotAttributeSpecifier;
}
/// declarator:
@@ -540,7 +628,8 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
ConsumeParen();
if (mayBeAbstract &&
(Tok.is(tok::r_paren) || // 'int()' is a function.
- Tok.is(tok::ellipsis) || // 'int(...)' is a function.
+ // 'int(...)' is a function.
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren)) ||
isDeclarationSpecifier())) { // 'int(int)' is a function.
// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
// exception-specification[opt]
@@ -670,11 +759,14 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___is_convertible_to:
case tok::kw___is_empty:
case tok::kw___is_enum:
+ case tok::kw___is_final:
case tok::kw___is_literal:
case tok::kw___is_literal_type:
case tok::kw___is_pod:
case tok::kw___is_polymorphic:
case tok::kw___is_trivial:
+ case tok::kw___is_trivially_assignable:
+ case tok::kw___is_trivially_constructible:
case tok::kw___is_trivially_copyable:
case tok::kw___is_union:
case tok::kw___uuidof:
@@ -690,6 +782,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw_int:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_restrict:
case tok::kw_short:
case tok::kw_signed:
@@ -705,7 +798,6 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw_wchar_t:
case tok::kw_char16_t:
case tok::kw_char32_t:
- case tok::kw_decltype:
case tok::kw___underlying_type:
case tok::kw_thread_local:
case tok::kw__Decimal32:
@@ -825,7 +917,8 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
/// 'volatile'
/// [GNU] restrict
///
-Parser::TPResult Parser::isCXXDeclarationSpecifier() {
+Parser::TPResult
+Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
switch (Tok.getKind()) {
case tok::identifier: // foo::bar
// Check for need to substitute AltiVec __vector keyword
@@ -840,21 +933,22 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
return TPResult::Error();
if (Tok.is(tok::identifier))
return TPResult::False();
- return isCXXDeclarationSpecifier();
+ return isCXXDeclarationSpecifier(BracedCastResult);
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
if (Next.is(tok::kw_new) || // ::new
Next.is(tok::kw_delete)) // ::delete
return TPResult::False();
-
+ }
+ // Fall through.
+ case tok::kw_decltype:
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error();
- return isCXXDeclarationSpecifier();
- }
-
+ return isCXXDeclarationSpecifier(BracedCastResult);
+
// decl-specifier:
// storage-class-specifier
// type-specifier
@@ -940,8 +1034,31 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
// We've already annotated a scope; try to annotate a type.
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error();
- if (!Tok.is(tok::annot_typename))
+ if (!Tok.is(tok::annot_typename)) {
+ // If the next token is an identifier or a type qualifier, then this
+ // can't possibly be a valid expression either.
+ if (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier)) {
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ if (SS.getScopeRep() && SS.getScopeRep()->isDependent()) {
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ ConsumeToken();
+ bool isIdentifier = Tok.is(tok::identifier);
+ TPResult TPR = TPResult::False();
+ if (!isIdentifier)
+ TPR = isCXXDeclarationSpecifier(BracedCastResult);
+ PA.Revert();
+
+ if (isIdentifier ||
+ TPR == TPResult::True() || TPR == TPResult::Error())
+ return TPResult::Error();
+ }
+ }
return TPResult::False();
+ }
// If that succeeded, fallthrough into the generic simple-type-id case.
// The ambiguity resides in a simple-type-specifier/typename-specifier
@@ -965,13 +1082,14 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
case tok::annot_typename:
case_typename:
// In Objective-C, we might have a protocol-qualified type.
- if (getLang().ObjC1 && NextToken().is(tok::less)) {
+ if (getLangOpts().ObjC1 && NextToken().is(tok::less)) {
// Tentatively parse the
TentativeParsingAction PA(*this);
ConsumeToken(); // The type token
TPResult TPR = TryParseProtocolQualifiers();
bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
PA.Revert();
@@ -980,6 +1098,9 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
if (isFollowedByParen)
return TPResult::Ambiguous();
+
+ if (getLangOpts().CPlusPlus0x && isFollowedByBrace)
+ return BracedCastResult;
return TPResult::True();
}
@@ -993,15 +1114,26 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
case tok::kw_int:
case tok::kw_long:
case tok::kw___int64:
+ case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
case tok::kw_void:
+ case tok::annot_decltype:
if (NextToken().is(tok::l_paren))
return TPResult::Ambiguous();
+ // This is a function-style cast in all cases we disambiguate other than
+ // one:
+ // struct S {
+ // enum E : int { a = 4 }; // enum
+ // enum E : int { 4 }; // bit-field
+ // };
+ if (getLangOpts().CPlusPlus0x && NextToken().is(tok::l_brace))
+ return BracedCastResult;
+
if (isStartOfObjCClassMessageMissingOpenBracket())
return TPResult::False();
@@ -1016,6 +1148,7 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
TPResult TPR = TryParseTypeofSpecifier();
bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
PA.Revert();
@@ -1025,18 +1158,17 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
if (isFollowedByParen)
return TPResult::Ambiguous();
- return TPResult::True();
- }
+ if (getLangOpts().CPlusPlus0x && isFollowedByBrace)
+ return BracedCastResult;
- // C++0x decltype support.
- case tok::kw_decltype:
return TPResult::True();
+ }
// C++0x type traits support
case tok::kw___underlying_type:
return TPResult::True();
- // C1x _Atomic
+ // C11 _Atomic
case tok::kw__Atomic:
return TPResult::True();
@@ -1096,7 +1228,7 @@ Parser::TPResult Parser::TryParseDeclarationSpecifier() {
else {
ConsumeToken();
- if (getLang().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
TryParseProtocolQualifiers();
}
@@ -1160,11 +1292,13 @@ bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
/// parameter-declaration-list ',' parameter-declaration
///
/// parameter-declaration:
-/// decl-specifier-seq declarator attributes[opt]
-/// decl-specifier-seq declarator attributes[opt] '=' assignment-expression
-/// decl-specifier-seq abstract-declarator[opt] attributes[opt]
-/// decl-specifier-seq abstract-declarator[opt] attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
/// '=' assignment-expression
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt] '=' assignment-expression
///
Parser::TPResult Parser::TryParseParameterDeclarationClause() {
@@ -1182,13 +1316,23 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause() {
// '...'[opt]
if (Tok.is(tok::ellipsis)) {
ConsumeToken();
- return TPResult::True(); // '...' is a sign of a function declarator.
+ if (Tok.is(tok::r_paren))
+ return TPResult::True(); // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False();
}
+ // An attribute-specifier-seq here is a sign of a function declarator.
+ if (isCXX11AttributeSpecifier(/*Disambiguate*/false,
+ /*OuterMightBeMessageSend*/true))
+ return TPResult::True();
+
ParsedAttributes attrs(AttrFactory);
MaybeParseMicrosoftAttributes(attrs);
// decl-specifier-seq
+ // A parameter-declaration's initializer must be preceded by an '=', so
+ // decl-specifier-seq '{' is not a parameter in C++11.
TPResult TPR = TryParseDeclarationSpecifier();
if (TPR != TPResult::Ambiguous())
return TPR;
@@ -1206,14 +1350,17 @@ Parser::TPResult Parser::TryParseParameterDeclarationClause() {
if (Tok.is(tok::equal)) {
// '=' assignment-expression
// Parse through assignment-expression.
- tok::TokenKind StopToks[2] ={ tok::comma, tok::r_paren };
- if (!SkipUntil(StopToks, 2, true/*StopAtSemi*/, true/*DontConsume*/))
+ if (!SkipUntil(tok::comma, tok::r_paren, true/*StopAtSemi*/,
+ true/*DontConsume*/))
return TPResult::Error();
}
if (Tok.is(tok::ellipsis)) {
ConsumeToken();
- return TPResult::True(); // '...' is a sign of a function declarator.
+ if (Tok.is(tok::r_paren))
+ return TPResult::True(); // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False();
}
if (Tok.isNot(tok::comma))
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index c909643..054a8fd 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -23,15 +23,24 @@
#include "clang/AST/ASTConsumer.h"
using namespace clang;
-Parser::Parser(Preprocessor &pp, Sema &actions)
+IdentifierInfo *Parser::getSEHExceptKeyword() {
+ // __except is accepted as a (contextual) keyword
+ if (!Ident__except && (getLangOpts().MicrosoftExt || getLangOpts().Borland))
+ Ident__except = PP.getIdentifierInfo("__except");
+
+ return Ident__except;
+}
+
+Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
: PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
GreaterThanIsOperator(true), ColonIsSacred(false),
- InMessageExpression(false), TemplateParameterDepth(0) {
+ InMessageExpression(false), TemplateParameterDepth(0),
+ SkipFunctionBodies(SkipFunctionBodies) {
Tok.setKind(tok::eof);
Actions.CurScope = 0;
NumCachedScopes = 0;
ParenCount = BracketCount = BraceCount = 0;
- ObjCImpDecl = 0;
+ CurParsedObjCImpl = 0;
// Add #pragma handlers. These are removed and destroyed in the
// destructor.
@@ -56,10 +65,13 @@ Parser::Parser(Preprocessor &pp, Sema &actions)
WeakHandler.reset(new PragmaWeakHandler(actions));
PP.AddPragmaHandler(WeakHandler.get());
+ RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler(actions));
+ PP.AddPragmaHandler(RedefineExtnameHandler.get());
+
FPContractHandler.reset(new PragmaFPContractHandler(actions, *this));
PP.AddPragmaHandler("STDC", FPContractHandler.get());
- if (getLang().OpenCL) {
+ if (getLangOpts().OpenCL) {
OpenCLExtensionHandler.reset(
new PragmaOpenCLExtensionHandler(actions, *this));
PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
@@ -202,15 +214,14 @@ bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
-bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
- bool StopAtSemi, bool DontConsume,
- bool StopAtCodeCompletion) {
+bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, bool StopAtSemi,
+ bool DontConsume, bool StopAtCodeCompletion) {
// We always want this function to skip at least one token if the first token
// isn't T and if not at EOF.
bool isFirstTokenSkipped = true;
while (1) {
// If we found one of the tokens, stop and return true.
- for (unsigned i = 0; i != NumToks; ++i) {
+ for (unsigned i = 0, NumToks = Toks.size(); i != NumToks; ++i) {
if (Tok.is(Toks[i])) {
if (DontConsume) {
// Noop, don't consume the token.
@@ -276,9 +287,6 @@ bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
ConsumeStringToken();
break;
- case tok::at:
- return false;
-
case tok::semi:
if (StopAtSemi)
return false;
@@ -377,8 +385,10 @@ Parser::~Parser() {
UnusedHandler.reset();
PP.RemovePragmaHandler(WeakHandler.get());
WeakHandler.reset();
+ PP.RemovePragmaHandler(RedefineExtnameHandler.get());
+ RedefineExtnameHandler.reset();
- if (getLang().OpenCL) {
+ if (getLangOpts().OpenCL) {
PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
OpenCLExtensionHandler.reset();
PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
@@ -401,12 +411,12 @@ void Parser::Initialize() {
ConsumeToken();
if (Tok.is(tok::eof) &&
- !getLang().CPlusPlus) // Empty source file is an extension in C
+ !getLangOpts().CPlusPlus) // Empty source file is an extension in C
Diag(Tok, diag::ext_empty_source_file);
// Initialization for Objective-C context sensitive keywords recognition.
// Referenced in Parser::ParseObjCTypeQualifierList.
- if (getLang().ObjC1) {
+ if (getLangOpts().ObjC1) {
ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
@@ -421,7 +431,7 @@ void Parser::Initialize() {
Ident_super = &PP.getIdentifierTable().get("super");
- if (getLang().AltiVec) {
+ if (getLangOpts().AltiVec) {
Ident_vector = &PP.getIdentifierTable().get("vector");
Ident_pixel = &PP.getIdentifierTable().get("pixel");
}
@@ -431,11 +441,13 @@ void Parser::Initialize() {
Ident_obsoleted = 0;
Ident_unavailable = 0;
+ Ident__except = 0;
+
Ident__exception_code = Ident__exception_info = Ident__abnormal_termination = 0;
Ident___exception_code = Ident___exception_info = Ident___abnormal_termination = 0;
Ident_GetExceptionCode = Ident_GetExceptionInfo = Ident_AbnormalTermination = 0;
- if(getLang().Borland) {
+ if(getLangOpts().Borland) {
Ident__exception_info = PP.getIdentifierInfo("_exception_info");
Ident___exception_info = PP.getIdentifierInfo("__exception_info");
Ident_GetExceptionInfo = PP.getIdentifierInfo("GetExceptionInformation");
@@ -463,23 +475,30 @@ void Parser::Initialize() {
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
DelayedCleanupPoint CleanupRAII(TopLevelDeclCleanupPool);
+ // Skip over the EOF token, flagging end of previous input for incremental
+ // processing
+ if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
+ ConsumeToken();
+
while (Tok.is(tok::annot_pragma_unused))
HandlePragmaUnused();
Result = DeclGroupPtrTy();
if (Tok.is(tok::eof)) {
// Late template parsing can begin.
- if (getLang().DelayedTemplateParsing)
+ if (getLangOpts().DelayedTemplateParsing)
Actions.SetLateTemplateParser(LateTemplateParserCallback, this);
+ if (!PP.isIncrementalProcessingEnabled())
+ Actions.ActOnEndOfTranslationUnit();
+ //else don't tell Sema that we ended parsing: more input might come.
- Actions.ActOnEndOfTranslationUnit();
return true;
}
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX0XAttributes(attrs);
MaybeParseMicrosoftAttributes(attrs);
-
+
Result = ParseExternalDeclaration(attrs);
return false;
}
@@ -534,16 +553,22 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Decl *SingleDecl = 0;
switch (Tok.getKind()) {
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ return DeclGroupPtrTy();
case tok::semi:
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::ext_top_level_semi)
- << FixItHint::CreateRemoval(Tok.getLocation());
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_top_level_semi : diag::ext_top_level_semi)
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
// TODO: Invoke action for top-level semicolon.
return DeclGroupPtrTy();
case tok::r_brace:
- Diag(Tok, diag::err_expected_external_declaration);
+ Diag(Tok, diag::err_extraneous_closing_brace);
ConsumeBrace();
return DeclGroupPtrTy();
case tok::eof:
@@ -572,10 +597,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
}
case tok::at:
return ParseObjCAtDirectives();
- break;
case tok::minus:
case tok::plus:
- if (!getLang().ObjC1) {
+ if (!getLangOpts().ObjC1) {
Diag(Tok, diag::err_expected_external_declaration);
ConsumeToken();
return DeclGroupPtrTy();
@@ -584,7 +608,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
break;
case tok::code_completion:
Actions.CodeCompleteOrdinaryName(getCurScope(),
- ObjCImpDecl? Sema::PCC_ObjCImplementation
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
: Sema::PCC_Namespace);
cutOffParsing();
return DeclGroupPtrTy();
@@ -605,7 +629,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::kw_static:
// Parse (then ignore) 'static' prior to a template instantiation. This is
// a GCC extension that we intentionally do not support.
- if (getLang().CPlusPlus && NextToken().is(tok::kw_template)) {
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
@@ -615,7 +639,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
goto dont_know;
case tok::kw_inline:
- if (getLang().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
tok::TokenKind NextKind = NextToken().getKind();
// Inline namespaces. Allowed as an extension even in C++03.
@@ -638,13 +662,17 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
goto dont_know;
case tok::kw_extern:
- if (getLang().CPlusPlus && NextToken().is(tok::kw_template)) {
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
// Extern templates
SourceLocation ExternLoc = ConsumeToken();
SourceLocation TemplateLoc = ConsumeToken();
+ Diag(ExternLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_extern_template :
+ diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
return Actions.ConvertDeclToDeclGroup(
- ParseExplicitInstantiation(ExternLoc, TemplateLoc, DeclEnd));
+ ParseExplicitInstantiation(Declarator::FileContext,
+ ExternLoc, TemplateLoc, DeclEnd));
}
// FIXME: Detect C++ linkage specifications here?
goto dont_know;
@@ -653,9 +681,6 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::kw___if_not_exists:
ParseMicrosoftIfExistsExternalDeclaration();
return DeclGroupPtrTy();
-
- case tok::kw___import_module__:
- return ParseModuleImport();
default:
dont_know:
@@ -677,7 +702,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
/// declarator, continues a declaration or declaration list.
bool Parser::isDeclarationAfterDeclarator() {
// Check for '= delete' or '= default'
- if (getLang().CPlusPlus && Tok.is(tok::equal)) {
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
const Token &KW = NextToken();
if (KW.is(tok::kw_default) || KW.is(tok::kw_delete))
return false;
@@ -688,7 +713,7 @@ bool Parser::isDeclarationAfterDeclarator() {
Tok.is(tok::semi) || // int X(); -> not a function def
Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
- (getLang().CPlusPlus &&
+ (getLangOpts().CPlusPlus &&
Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
}
@@ -700,11 +725,11 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
return true;
// Handle K&R C argument lists: int X(f) int f; {}
- if (!getLang().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
Declarator.getFunctionTypeInfo().isKNRPrototype())
return isDeclarationSpecifier();
- if (getLang().CPlusPlus && Tok.is(tok::equal)) {
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
const Token &KW = NextToken();
return KW.is(tok::kw_default) || KW.is(tok::kw_delete);
}
@@ -747,7 +772,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
// attributes here, no types, etc.
- if (getLang().ObjC2 && Tok.is(tok::at)) {
+ if (getLangOpts().ObjC2 && Tok.is(tok::at)) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
!Tok.isObjCAtKeyword(tok::objc_protocol)) {
@@ -763,18 +788,17 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec, DiagID))
Diag(AtLoc, DiagID) << PrevSpec;
- Decl *TheDecl = 0;
if (Tok.isObjCAtKeyword(tok::objc_protocol))
- TheDecl = ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
- else
- TheDecl = ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes());
- return Actions.ConvertDeclToDeclGroup(TheDecl);
+ return ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
+
+ return Actions.ConvertDeclToDeclGroup(
+ ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes()));
}
// If the declspec consisted only of 'extern' and we have a string
// literal following it, this must be a C++ linkage specifier like
// 'extern "C"'.
- if (Tok.is(tok::string_literal) && getLang().CPlusPlus &&
+ if (Tok.is(tok::string_literal) && getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
Decl *TheDecl = ParseLinkage(DS, Declarator::FileContext);
@@ -812,7 +836,8 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
/// decl-specifier-seq[opt] declarator function-try-block
///
Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo) {
+ const ParsedTemplateInfo &TemplateInfo,
+ LateParsedAttrList *LateParsedAttrs) {
// Poison the SEH identifiers so they are flagged as illegal in function bodies
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
@@ -820,7 +845,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// If this is C90 and the declspecs were completely missing, fudge in an
// implicit int. We do this here because this is the only place where
// declaration-specifiers are completely optional in the grammar.
- if (getLang().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
const char *PrevSpec;
unsigned DiagID;
D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
@@ -835,11 +860,10 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
if (FTI.isKNRPrototype())
ParseKNRParamDeclarations(D);
-
// We should have either an opening brace or, in a C++ constructor,
// we may have a colon.
if (Tok.isNot(tok::l_brace) &&
- (!getLang().CPlusPlus ||
+ (!getLangOpts().CPlusPlus ||
(Tok.isNot(tok::colon) && Tok.isNot(tok::kw_try) &&
Tok.isNot(tok::equal)))) {
Diag(Tok, diag::err_expected_fn_body);
@@ -852,9 +876,22 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
return 0;
}
+ // Check to make sure that any normal attributes are allowed to be on
+ // a definition. Late parsed attributes are checked at the end.
+ if (Tok.isNot(tok::equal)) {
+ AttributeList *DtorAttrs = D.getAttributes();
+ while (DtorAttrs) {
+ if (!IsThreadSafetyAttribute(DtorAttrs->getName()->getName())) {
+ Diag(DtorAttrs->getLoc(), diag::warn_attribute_on_function_definition)
+ << DtorAttrs->getName()->getName();
+ }
+ DtorAttrs = DtorAttrs->getNext();
+ }
+ }
+
// In delayed template parsing mode, for function template we consume the
// tokens and store them for late parsing at the end of the translation unit.
- if (getLang().DelayedTemplateParsing &&
+ if (getLangOpts().DelayedTemplateParsing &&
TemplateInfo.Kind == ParsedTemplateInfo::Template) {
MultiTemplateParamsArg TemplateParameterLists(Actions,
TemplateInfo.TemplateParams->data(),
@@ -863,14 +900,14 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
Scope *ParentScope = getCurScope()->getParent();
- D.setFunctionDefinition(true);
+ D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = Actions.HandleDeclarator(ParentScope, D,
move(TemplateParameterLists));
D.complete(DP);
D.getMutableDeclSpec().abort();
if (DP) {
- LateParsedTemplatedFunction *LPT = new LateParsedTemplatedFunction(this, DP);
+ LateParsedTemplatedFunction *LPT = new LateParsedTemplatedFunction(DP);
FunctionDecl *FnD = 0;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(DP))
@@ -910,7 +947,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
D.getMutableDeclSpec().abort();
if (Tok.is(tok::equal)) {
- assert(getLang().CPlusPlus && "Only C++ function definitions have '='");
+ assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
ConsumeToken();
Actions.ActOnFinishFunctionBody(Res, 0, false);
@@ -918,15 +955,17 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
bool Delete = false;
SourceLocation KWLoc;
if (Tok.is(tok::kw_delete)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::warn_deleted_function_accepted_as_extension);
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_deleted_function :
+ diag::ext_deleted_function);
KWLoc = ConsumeToken();
Actions.SetDeclDeleted(Res, KWLoc);
Delete = true;
} else if (Tok.is(tok::kw_default)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::warn_defaulted_function_accepted_as_extension);
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_defaulted_function :
+ diag::ext_defaulted_function);
KWLoc = ConsumeToken();
Actions.SetDeclDefaulted(Res, KWLoc);
@@ -963,6 +1002,10 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
} else
Actions.ActOnDefaultCtorInitializers(Res);
+ // Late attributes are parsed in the same scope as the function body.
+ if (LateParsedAttrs)
+ ParseLexedAttributeList(*LateParsedAttrs, Res, false, true);
+
return ParseFunctionStatementBody(Res, BodyScope);
}
@@ -1056,18 +1099,19 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
if (Tok.isNot(tok::comma))
break;
+ ParmDeclarator.clear();
+
// Consume the comma.
- ConsumeToken();
+ ParmDeclarator.setCommaLoc(ConsumeToken());
// Parse the next declarator.
- ParmDeclarator.clear();
ParseDeclarator(ParmDeclarator);
}
if (Tok.is(tok::semi)) {
ConsumeToken();
} else {
- Diag(Tok, diag::err_parse_error);
+ Diag(Tok, diag::err_expected_semi_declaration);
// Skip to end of block or statement
SkipUntil(tok::semi, true);
if (Tok.is(tok::semi))
@@ -1087,17 +1131,25 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
/// string-literal
///
Parser::ExprResult Parser::ParseAsmStringLiteral() {
- if (!isTokenStringLiteral()) {
- Diag(Tok, diag::err_expected_string_literal);
- return ExprError();
+ switch (Tok.getKind()) {
+ case tok::string_literal:
+ break;
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::wide_string_literal: {
+ SourceLocation L = Tok.getLocation();
+ Diag(Tok, diag::err_asm_operand_wide_string_literal)
+ << (Tok.getKind() == tok::wide_string_literal)
+ << SourceRange(L, L);
+ return ExprError();
+ }
+ default:
+ Diag(Tok, diag::err_expected_string_literal);
+ return ExprError();
}
- ExprResult Res(ParseStringLiteralExpression());
- if (Res.isInvalid()) return move(Res);
-
- // TODO: Diagnose: wide string literal in 'asm'
-
- return move(Res);
+ return ParseStringLiteralExpression();
}
/// ParseSimpleAsm
@@ -1178,8 +1230,8 @@ TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
/// as the current tokens, so only call it in contexts where these are invalid.
bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
- || Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)) &&
- "Cannot be a type or scope token!");
+ || Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)
+ || Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
// Parse a C++ typename-specifier, e.g., "typename T::type".
@@ -1190,11 +1242,12 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
// simple-template-id
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/ParsedType(), false,
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/ParsedType(),
+ /*EnteringContext=*/false,
0, /*IsTypename*/true))
return true;
if (!SS.isSet()) {
- if (getLang().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
Diag(Tok.getLocation(), diag::warn_expected_qualified_after_typename);
else
Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
@@ -1218,13 +1271,13 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
ASTTemplateArgsPtr TemplateArgsPtr(Actions,
TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
-
+
Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
- /*FIXME:*/SourceLocation(),
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
- TemplateArgsPtr,
+ TemplateArgsPtr,
TemplateId->RAngleLoc);
} else {
Diag(Tok, diag::err_expected_type_name_after_typename)
@@ -1245,7 +1298,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
bool wasScopeAnnotation = Tok.is(tok::annot_cxxscope);
CXXScopeSpec SS;
- if (getLang().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
return true;
@@ -1257,6 +1310,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
&SS, false,
NextToken().is(tok::period),
ParsedType(),
+ /*IsCtorOrDtorName=*/false,
/*NonTrivialTypeSourceInfo*/true,
NeedType ? &CorrectedII : NULL)) {
// A FixIt was applied as a result of typo correction
@@ -1276,7 +1330,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
return false;
}
- if (!getLang().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
// If we're in C, we can't have :: tokens at all (the lexer won't return
// them). If the identifier is not a type, then it can't be scope either,
// just early exit.
@@ -1297,7 +1351,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
Template, MemberOfUnknownSpecialization)) {
// Consume the identifier.
ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateName)) {
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName)) {
// If an unrecoverable error occurred, we need to return true here,
// because the token stream is in a damaged state. We may not return
// a valid identifier.
@@ -1354,11 +1409,11 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
/// Note that this routine emits an error if you call it with ::new or ::delete
/// as the current tokens, so only call it in contexts where these are invalid.
bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
- assert(getLang().CPlusPlus &&
+ assert(getLangOpts().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
- (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)))&&
- "Cannot be a type or scope token!");
+ (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) ||
+ Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
@@ -1382,18 +1437,31 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
return false;
}
-bool Parser::isTokenEqualOrMistypedEqualEqual(unsigned DiagID) {
- if (Tok.is(tok::equalequal)) {
- // We have '==' in a context that we would expect a '='.
- // The user probably made a typo, intending to type '='. Emit diagnostic,
- // fixit hint to turn '==' -> '=' and continue as if the user typed '='.
- Diag(Tok, DiagID)
- << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()),
- getTokenSimpleSpelling(tok::equal));
+bool Parser::isTokenEqualOrEqualTypo() {
+ tok::TokenKind Kind = Tok.getKind();
+ switch (Kind) {
+ default:
+ return false;
+ case tok::ampequal: // &=
+ case tok::starequal: // *=
+ case tok::plusequal: // +=
+ case tok::minusequal: // -=
+ case tok::exclaimequal: // !=
+ case tok::slashequal: // /=
+ case tok::percentequal: // %=
+ case tok::lessequal: // <=
+ case tok::lesslessequal: // <<=
+ case tok::greaterequal: // >=
+ case tok::greatergreaterequal: // >>=
+ case tok::caretequal: // ^=
+ case tok::pipeequal: // |=
+ case tok::equalequal: // ==
+ Diag(Tok, diag::err_invalid_token_after_declarator_suggest_equal)
+ << getTokenSimpleSpelling(Kind)
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
+ case tok::equal:
return true;
}
-
- return Tok.is(tok::equal);
}
SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
@@ -1455,98 +1523,134 @@ void Parser::CodeCompleteNaturalLanguage() {
Actions.CodeCompleteNaturalLanguage();
}
-bool Parser::ParseMicrosoftIfExistsCondition(bool& Result) {
+bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
assert((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists)) &&
"Expected '__if_exists' or '__if_not_exists'");
- Token Condition = Tok;
- SourceLocation IfExistsLoc = ConsumeToken();
+ Result.IsIfExists = Tok.is(tok::kw___if_exists);
+ Result.KeywordLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
- Diag(Tok, diag::err_expected_lparen_after) << IfExistsLoc;
- SkipUntil(tok::semi);
+ Diag(Tok, diag::err_expected_lparen_after)
+ << (Result.IsIfExists? "__if_exists" : "__if_not_exists");
return true;
}
// Parse nested-name-specifier.
- CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
+ ParseOptionalCXXScopeSpecifier(Result.SS, ParsedType(),
+ /*EnteringContext=*/false);
// Check nested-name specifier.
- if (SS.isInvalid()) {
- SkipUntil(tok::semi);
+ if (Result.SS.isInvalid()) {
+ T.skipToEnd();
return true;
}
- // Parse the unqualified-id.
- UnqualifiedId Name;
- if (ParseUnqualifiedId(SS, false, true, true, ParsedType(), Name)) {
- SkipUntil(tok::semi);
+ // Parse the unqualified-id.
+ SourceLocation TemplateKWLoc; // FIXME: parsed, but unused.
+ if (ParseUnqualifiedId(Result.SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Result.Name)) {
+ T.skipToEnd();
return true;
}
- T.consumeClose();
- if (T.getCloseLocation().isInvalid())
+ if (T.consumeClose())
return true;
-
+
// Check if the symbol exists.
- bool Exist = Actions.CheckMicrosoftIfExistsSymbol(SS, Name);
+ switch (Actions.CheckMicrosoftIfExistsSymbol(getCurScope(), Result.KeywordLoc,
+ Result.IsIfExists, Result.SS,
+ Result.Name)) {
+ case Sema::IER_Exists:
+ Result.Behavior = Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
- Result = ((Condition.is(tok::kw___if_exists) && Exist) ||
- (Condition.is(tok::kw___if_not_exists) && !Exist));
+ case Sema::IER_DoesNotExist:
+ Result.Behavior = !Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
+
+ case Sema::IER_Dependent:
+ Result.Behavior = IEB_Dependent;
+ break;
+
+ case Sema::IER_Error:
+ return true;
+ }
return false;
}
void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
- bool Result;
+ IfExistsCondition Result;
if (ParseMicrosoftIfExistsCondition(Result))
return;
- if (Tok.isNot(tok::l_brace)) {
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
Diag(Tok, diag::err_expected_lbrace);
return;
}
- ConsumeBrace();
- // Condition is false skip all inside the {}.
- if (!Result) {
- SkipUntil(tok::r_brace, false);
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse declarations below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Cannot have a dependent external declaration");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
return;
}
- // Condition is true, parse the declaration.
- while (Tok.isNot(tok::r_brace)) {
+ // Parse the declarations.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX0XAttributes(attrs);
MaybeParseMicrosoftAttributes(attrs);
DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
if (Result && !getCurScope()->getParent())
Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
- }
-
- if (Tok.isNot(tok::r_brace)) {
- Diag(Tok, diag::err_expected_rbrace);
- return;
- }
- ConsumeBrace();
+ }
+ Braces.consumeClose();
}
-Parser::DeclGroupPtrTy Parser::ParseModuleImport() {
- assert(Tok.is(tok::kw___import_module__) &&
+Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc___experimental_modules_import) &&
"Improper start to module import");
SourceLocation ImportLoc = ConsumeToken();
- // Parse the module name.
- if (!Tok.is(tok::identifier)) {
- Diag(Tok, diag::err_module_expected_ident);
- SkipUntil(tok::semi);
- return DeclGroupPtrTy();
- }
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- IdentifierInfo &ModuleName = *Tok.getIdentifierInfo();
- SourceLocation ModuleNameLoc = ConsumeToken();
- DeclResult Import = Actions.ActOnModuleImport(ImportLoc, ModuleName, ModuleNameLoc);
+ // Parse the module path.
+ do {
+ if (!Tok.is(tok::identifier)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteModuleImport(ImportLoc, Path);
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ Diag(Tok, diag::err_module_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // Record this part of the module path.
+ Path.push_back(std::make_pair(Tok.getIdentifierInfo(), Tok.getLocation()));
+ ConsumeToken();
+
+ if (Tok.is(tok::period)) {
+ ConsumeToken();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ DeclResult Import = Actions.ActOnModuleImport(AtLoc, ImportLoc, Path);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
if (Import.isInvalid())
return DeclGroupPtrTy();
@@ -1554,63 +1658,43 @@ Parser::DeclGroupPtrTy Parser::ParseModuleImport() {
return Actions.ConvertDeclToDeclGroup(Import.get());
}
-bool Parser::BalancedDelimiterTracker::consumeOpen() {
- // Try to consume the token we are holding
- if (P.Tok.is(Kind)) {
- P.QuantityTracker.push(Kind);
- Cleanup = true;
- if (P.QuantityTracker.getDepth(Kind) < MaxDepth) {
- LOpen = P.ConsumeAnyToken();
- return false;
- } else {
- P.Diag(P.Tok, diag::err_parser_impl_limit_overflow);
- P.SkipUntil(tok::eof);
- }
- }
- return true;
+bool Parser::BalancedDelimiterTracker::diagnoseOverflow() {
+ P.Diag(P.Tok, diag::err_parser_impl_limit_overflow);
+ P.SkipUntil(tok::eof);
+ return true;
}
bool Parser::BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
const char *Msg,
tok::TokenKind SkipToToc ) {
LOpen = P.Tok.getLocation();
- if (!P.ExpectAndConsume(Kind, DiagID, Msg, SkipToToc)) {
- P.QuantityTracker.push(Kind);
- Cleanup = true;
- if (P.QuantityTracker.getDepth(Kind) < MaxDepth) {
- return false;
- } else {
- P.Diag(P.Tok, diag::err_parser_impl_limit_overflow);
- P.SkipUntil(tok::eof);
- }
- }
- return true;
+ if (P.ExpectAndConsume(Kind, DiagID, Msg, SkipToToc))
+ return true;
+
+ if (getDepth() < MaxDepth)
+ return false;
+
+ return diagnoseOverflow();
}
-bool Parser::BalancedDelimiterTracker::consumeClose() {
- if (P.Tok.is(Close)) {
- LClose = P.ConsumeAnyToken();
- if (Cleanup)
- P.QuantityTracker.pop(Kind);
-
- Cleanup = false;
- return false;
- } else {
- const char *LHSName = "unknown";
- diag::kind DID = diag::err_parse_error;
- switch (Close) {
- default: break;
- case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
- case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
- case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
- case tok::greater: LHSName = "<"; DID = diag::err_expected_greater; break;
- case tok::greatergreatergreater:
- LHSName = "<<<"; DID = diag::err_expected_ggg; break;
- }
- P.Diag(P.Tok, DID);
- P.Diag(LOpen, diag::note_matching) << LHSName;
- if (P.SkipUntil(Close))
- LClose = P.Tok.getLocation();
+bool Parser::BalancedDelimiterTracker::diagnoseMissingClose() {
+ assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
+
+ const char *LHSName = "unknown";
+ diag::kind DID;
+ switch (Close) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
+ case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
+ case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
}
+ P.Diag(P.Tok, DID);
+ P.Diag(LOpen, diag::note_matching) << LHSName;
+ if (P.SkipUntil(Close))
+ LClose = P.Tok.getLocation();
return true;
}
+
+void Parser::BalancedDelimiterTracker::skipToEnd() {
+ P.SkipUntil(Close, false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
index 632c0de..3863adb 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
@@ -14,6 +14,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Rewrite/FixItRewriter.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -29,16 +31,19 @@ FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
const LangOptions &LangOpts,
FixItOptions *FixItOpts)
: Diags(Diags),
+ Editor(SourceMgr, LangOpts),
Rewrite(SourceMgr, LangOpts),
FixItOpts(FixItOpts),
- NumFailures(0) {
+ NumFailures(0),
+ PrevDiagSilenced(false) {
+ OwnsClient = Diags.ownsClient();
Client = Diags.takeClient();
Diags.setClient(this);
}
FixItRewriter::~FixItRewriter() {
Diags.takeClient();
- Diags.setClient(Client);
+ Diags.setClient(Client, OwnsClient);
}
bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
@@ -49,26 +54,57 @@ bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
return false;
}
-bool FixItRewriter::WriteFixedFiles() {
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ virtual void insert(SourceLocation loc, StringRef text) {
+ Rewrite.InsertText(loc, text);
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+}
+
+bool FixItRewriter::WriteFixedFiles(
+ std::vector<std::pair<std::string, std::string> > *RewrittenFiles) {
if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) {
Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
return true;
}
+ RewritesReceiver Rec(Rewrite);
+ Editor.applyRewrites(Rec);
+
for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
- std::string Filename = FixItOpts->RewriteFilename(Entry->getName());
+ int fd;
+ std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
std::string Err;
- llvm::raw_fd_ostream OS(Filename.c_str(), Err,
- llvm::raw_fd_ostream::F_Binary);
+ OwningPtr<llvm::raw_fd_ostream> OS;
+ if (fd != -1) {
+ OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
+ } else {
+ OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err,
+ llvm::raw_fd_ostream::F_Binary));
+ }
if (!Err.empty()) {
Diags.Report(clang::diag::err_fe_unable_to_open_output)
<< Filename << Err;
continue;
}
RewriteBuffer &RewriteBuf = I->second;
- RewriteBuf.write(OS);
- OS.flush();
+ RewriteBuf.write(*OS);
+ OS->flush();
+
+ if (RewrittenFiles)
+ RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
}
return false;
@@ -83,58 +119,63 @@ void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
// Default implementation (Warnings/errors count).
DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info);
- Client->HandleDiagnostic(DiagLevel, Info);
+ if (!FixItOpts->Silent ||
+ DiagLevel >= DiagnosticsEngine::Error ||
+ (DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) ||
+ (DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) {
+ Client->HandleDiagnostic(DiagLevel, Info);
+ PrevDiagSilenced = false;
+ } else {
+ PrevDiagSilenced = true;
+ }
// Skip over any diagnostics that are ignored or notes.
if (DiagLevel <= DiagnosticsEngine::Note)
return;
+ // Skip over errors if we are only fixing warnings.
+ if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) {
+ ++NumFailures;
+ return;
+ }
// Make sure that we can perform all of the modifications we
// in this diagnostic.
- bool CanRewrite = Info.getNumFixItHints() > 0;
+ edit::Commit commit(Editor);
for (unsigned Idx = 0, Last = Info.getNumFixItHints();
Idx < Last; ++Idx) {
const FixItHint &Hint = Info.getFixItHint(Idx);
- if (Hint.RemoveRange.isValid() &&
- Rewrite.getRangeSize(Hint.RemoveRange) == -1) {
- CanRewrite = false;
- break;
+
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
}
}
+ bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable();
if (!CanRewrite) {
if (Info.getNumFixItHints() > 0)
Diag(Info.getLocation(), diag::note_fixit_in_macro);
// If this was an error, refuse to perform any rewriting.
- if (DiagLevel == DiagnosticsEngine::Error ||
- DiagLevel == DiagnosticsEngine::Fatal) {
+ if (DiagLevel >= DiagnosticsEngine::Error) {
if (++NumFailures == 1)
Diag(Info.getLocation(), diag::note_fixit_unfixed_error);
}
return;
}
-
- bool Failed = false;
- for (unsigned Idx = 0, Last = Info.getNumFixItHints();
- Idx < Last; ++Idx) {
- const FixItHint &Hint = Info.getFixItHint(Idx);
-
- if (Hint.CodeToInsert.empty()) {
- // We're removing code.
- if (Rewrite.RemoveText(Hint.RemoveRange))
- Failed = true;
- continue;
- }
-
- // We're replacing code.
- if (Rewrite.ReplaceText(Hint.RemoveRange.getBegin(),
- Rewrite.getRangeSize(Hint.RemoveRange),
- Hint.CodeToInsert))
- Failed = true;
- }
-
- if (Failed) {
+
+ if (!Editor.commit(commit)) {
++NumFailures;
Diag(Info.getLocation(), diag::note_fixit_failed);
return;
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
index f00e7fd..1753325 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
@@ -12,6 +12,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/Parser.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
@@ -21,6 +22,8 @@
#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -45,7 +48,10 @@ ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
namespace {
class FixItRewriteInPlace : public FixItOptions {
public:
- std::string RewriteFilename(const std::string &Filename) { return Filename; }
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ fd = -1;
+ return Filename;
+ }
};
class FixItActionSuffixInserter : public FixItOptions {
@@ -57,13 +63,27 @@ public:
this->FixWhatYouCan = FixWhatYouCan;
}
- std::string RewriteFilename(const std::string &Filename) {
- llvm::SmallString<128> Path(Filename);
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ fd = -1;
+ SmallString<128> Path(Filename);
llvm::sys::path::replace_extension(Path,
NewSuffix + llvm::sys::path::extension(Path));
return Path.str();
}
};
+
+class FixItRewriteToTemp : public FixItOptions {
+public:
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ SmallString<128> Path;
+ Path = llvm::sys::path::filename(Filename);
+ Path += "-%%%%%%%%";
+ Path += llvm::sys::path::extension(Filename);
+ SmallString<128> NewPath;
+ llvm::sys::fs::unique_file(Path.str(), fd, NewPath);
+ return NewPath.str();
+ }
+};
} // end anonymous namespace
bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
@@ -86,16 +106,63 @@ void FixItAction::EndSourceFileAction() {
Rewriter->WriteFixedFiles();
}
+bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
+
+ std::vector<std::pair<std::string, std::string> > RewrittenFiles;
+ bool err = false;
+ {
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+ OwningPtr<FrontendAction> FixAction(new SyntaxOnlyAction());
+ if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) {
+ OwningPtr<FixItOptions> FixItOpts;
+ if (FEOpts.FixToTemporaries)
+ FixItOpts.reset(new FixItRewriteToTemp());
+ else
+ FixItOpts.reset(new FixItRewriteInPlace());
+ FixItOpts->Silent = true;
+ FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
+ FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings;
+ FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), FixItOpts.get());
+ FixAction->Execute();
+
+ err = Rewriter.WriteFixedFiles(&RewrittenFiles);
+
+ FixAction->EndSourceFile();
+ CI.setSourceManager(0);
+ CI.setFileManager(0);
+ } else {
+ err = true;
+ }
+ }
+ if (err)
+ return false;
+ CI.getDiagnosticClient().clear();
+ CI.getDiagnostics().Reset();
+
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(),
+ RewrittenFiles.begin(), RewrittenFiles.end());
+ PPOpts.RemappedFilesKeepOriginalName = false;
+
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
- if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp"))
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
+ if (CI.getLangOpts().ObjCNonFragileABI)
+ return CreateModernObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros);
return CreateObjCRewriter(InFile, OS,
CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
+ }
return 0;
}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
index 6a89265..3d190ab 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
@@ -55,7 +55,7 @@ ASTConsumer* clang::CreateHTMLPrinter(raw_ostream *OS,
}
void HTMLPrinter::Initialize(ASTContext &context) {
- R.setSourceMgr(context.getSourceManager(), context.getLangOptions());
+ R.setSourceMgr(context.getSourceManager(), context.getLangOpts());
}
void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
index ba39602..dc39dde 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -209,7 +209,7 @@ std::string html::EscapeText(const std::string& s, bool EscapeSpaces,
static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
unsigned B, unsigned E) {
- llvm::SmallString<256> Str;
+ SmallString<256> Str;
llvm::raw_svector_ostream OS(Str);
OS << "<tr><td class=\"num\" id=\"LN"
@@ -292,7 +292,7 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
" h1 { font-size:14pt }\n"
" .code { border-collapse:collapse; width:100%; }\n"
- " .code { font-family: \"Andale Mono\", monospace; font-size:10pt }\n"
+ " .code { font-family: \"Monospace\", monospace; font-size:10pt }\n"
" .code { line-height: 1.2em }\n"
" .comment { color: green; font-style: oblique }\n"
" .keyword { color: blue }\n"
@@ -360,7 +360,7 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
const SourceManager &SM = PP.getSourceManager();
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer L(FID, FromFile, SM, PP.getLangOptions());
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
const char *BufferStart = L.getBufferStart();
// Inform the preprocessor that we want to retain comments as tokens, so we
@@ -381,7 +381,6 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
default: break;
case tok::identifier:
llvm_unreachable("tok::identifier in raw lexing mode!");
- break;
case tok::raw_identifier: {
// Fill in Result.IdentifierInfo and update the token kind,
// looking up the identifier in the identifier table.
@@ -410,6 +409,7 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
--TokLen;
// FALL THROUGH.
case tok::string_literal:
+ // FIXME: Exclude the optional ud-suffix from the highlighted range.
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
"<span class='string_literal'>", "</span>");
break;
@@ -450,7 +450,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
std::vector<Token> TokenStream;
const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer L(FID, FromFile, SM, PP.getLangOptions());
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
// macros.
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
index d569100..3fa0bdb 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
@@ -66,7 +66,7 @@ static void LexRawTokensFromMainFile(Preprocessor &PP,
// Create a lexer to lex all the tokens of the main file in raw mode. Even
// though it is in raw mode, it will not return comments.
const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
- Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOptions());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
// Switch on comment lexing because we really do want them.
RawLex.SetCommentRetentionState(true);
@@ -91,7 +91,7 @@ void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) {
SourceManager &SM = PP.getSourceManager();
Rewriter Rewrite;
- Rewrite.setSourceMgr(SM, PP.getLangOptions());
+ Rewrite.setSourceMgr(SM, PP.getLangOpts());
RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID());
std::vector<Token> RawTokens;
@@ -167,7 +167,7 @@ void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) {
// Comment out a whole run of tokens instead of bracketing each one with
// comments. Add a leading space if RawTok didn't have one.
bool HasSpace = RawTok.hasLeadingSpace();
- RB.InsertTextAfter(RawOffs, " /*"+HasSpace);
+ RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]);
unsigned EndPos;
do {
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
new file mode 100644
index 0000000..57109de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
@@ -0,0 +1,7275 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteModernObjC : public ASTConsumer {
+ protected:
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
+ __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+ static const int OBJC_ABI_VERSION = 7;
+
+ Rewriter Rewrite;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ Expr *GlobalConstructionExp;
+ unsigned RewriteFailedDiag;
+ unsigned GlobalBlockRewriteFailedDiag;
+ // ObjC string constant support.
+ unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *GetSuperClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperContructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+ FunctionDecl *CurFunctionDeclToDeclareForBlock;
+
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCWrittenInterfaces;
+ llvm::SmallPtrSet<TagDecl*, 8> TagsDefinedInIvarDecls;
+ SmallVector<ObjCInterfaceDecl*, 32> ObjCInterfacesSeen;
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ SmallVector<ObjCInterfaceDecl*, 8> DefinedNonLazyClasses;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ llvm::SmallVector<ObjCCategoryDecl*, 8> DefinedNonLazyCategories;
+
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
+
+ // Block expressions.
+ SmallVector<BlockExpr *, 32> Blocks;
+ SmallVector<int, 32> InnerDeclRefsCount;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
+
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
+
+ // Block related declarations.
+ SmallVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
+ SmallVector<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+ llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+ llvm::DenseMap<ObjCInterfaceDecl *,
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> > ReferencedIvars;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool objc_impl_method;
+
+ bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteModernObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteModernObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
+
+ public:
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ // Top Level Driver code.
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ } else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(Class);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*I);
+ }
+ return true;
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteModernObjC(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn);
+
+ ~RewriteModernObjC() {}
+
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return;
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceStmt(Old, New)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ if (DisableReplaceStmt)
+ return;
+
+ // Measure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts));
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef Str) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
+ void RewriteInclude();
+ void RewriteForwardClassDecl(DeclGroupRef D);
+ void RewriteForwardClassDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString);
+ void RewriteImplementations();
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
+ void RewriteByRefString(std::string &ResultStr, const std::string &Name,
+ ValueDecl *VD, bool def=false);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteTypeOfDecl(VarDecl *VD);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp);
+ Stmt *RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp);
+ Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp);
+ Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+ void RewriteImplicitCastObjCExpr(CastExpr *IE);
+ void RewriteLinkageSpec(LinkageSpecDecl *LSD);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result);
+
+ bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result);
+
+ void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ virtual void Initialize(ASTContext &context);
+
+ // Misc. AST transformation routines. Somtimes they end up calling
+ // rewriting routines on the new ASTs.
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ Expr **args, unsigned nargs,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ void SynthCountByEnumWithState(std::string &buf);
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthGetSuperClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperContructorFunctionDecl();
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ std::string &Result);
+ virtual void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result);
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+ virtual void RewriteClassSetupInitHook(std::string &Result);
+
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result);
+ virtual void WriteImageInfo(std::string &Result);
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+ virtual void RewriteCategorySetupInitHook(std::string &Result);
+
+ // Rewriting ivar
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result);
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+
+
+ std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE,
+ std::string Tag, std::string Desc);
+ std::string SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag,
+ int i, StringRef funcName,
+ unsigned hasCopy);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs);
+
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ /// convertBlockPointerToFunctionPointer - Converts a block-pointer type
+ /// to a function pointer type and upon success, returns true; false
+ /// otherwise.
+ bool convertBlockPointerToFunctionPointer(QualType &T) {
+ if (isTopLevelBlockPointerType(T)) {
+ const BlockPointerType *BPT = T->getAs<BlockPointerType>();
+ T = Context->getPointerType(BPT->getPointeeType());
+ return true;
+ }
+ return false;
+ }
+
+ bool convertObjCTypeToCStyleType(QualType &T);
+
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType()) {
+ bool isConst = T.isConstQualified();
+ T = isConst ? Context->getObjCIdType().withConst()
+ : Context->getObjCIdType();
+ }
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType()) {
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ T = QualType(IFaceT, 0);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAs<PointerType>()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for (unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+
+ QualType getSimpleFunctionType(QualType result,
+ const QualType *args,
+ unsigned numArgs,
+ bool variadic = false) {
+ if (result == Context->getObjCInstanceType())
+ result = Context->getObjCIdType();
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, numArgs, fpi);
+ }
+
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
+ SourceLocation(), SourceLocation());
+ }
+
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ IdentifierInfo* II = &Context->Idents.get("load");
+ Selector LoadSel = Context->Selectors.getSelector(0, &II);
+ return OD->getClassMethod(LoadSel) != 0;
+ }
+ };
+
+}
+
+void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
+ for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
+ E = fproto->arg_type_end(); I && (I != E); ++I)
+ if (isTopLevelBlockPointerType(*I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAs<PointerType>();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ // FIXME. This should be an error. But if block is not called, it is OK. And it
+ // may break including some headers.
+ GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting block literal declared in global scope is not implemented");
+
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+ASTConsumer *clang::CreateModernObjCRewriter(const std::string& InFile,
+ raw_ostream* OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return new RewriteModernObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+}
+
+void RewriteModernObjC::InitializeCommon(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = 0;
+ MsgSendSuperFunctionDecl = 0;
+ MsgSendStretFunctionDecl = 0;
+ MsgSendSuperStretFunctionDecl = 0;
+ MsgSendFpretFunctionDecl = 0;
+ GetClassFunctionDecl = 0;
+ GetMetaClassFunctionDecl = 0;
+ GetSuperClassFunctionDecl = 0;
+ SelGetUidFunctionDecl = 0;
+ CFStringFunctionDecl = 0;
+ ConstantStringClassReference = 0;
+ NSStringRecord = 0;
+ CurMethodDef = 0;
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ GlobalVarDecl = 0;
+ GlobalConstructionExp = 0;
+ SuperStructDecl = 0;
+ ProtocolTypeDecl = 0;
+ ConstantStringDecl = 0;
+ BcLabelCount = 0;
+ SuperContructorFunctionDecl = 0;
+ NumObjCStringLiterals = 0;
+ PropParentMap = 0;
+ CurrentBody = 0;
+ DisableReplaceStmt = false;
+ objc_impl_method = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getExpansionLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (FVD->getName() == "_NSConstantStringClassReference") {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // FIXME. This will not work in all situations and leaving it out
+ // is harmless.
+ // RewriteLinkageSpec(LSD);
+
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(),
+ DIEnd = LSD->decls_end();
+ DI != DIEnd; ) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(IFace);
+ ++DI;
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*DI);
+ ++DI;
+ }
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isFromMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ StringRef MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.begin();
+ const char *MainBufEnd = MainBuf.end();
+ size_t ImportLen = strlen("import");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include");
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+static std::string getIvarAccessString(ObjCIvarDecl *OID) {
+ const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
+ std::string S;
+ S = "((struct ";
+ S += ClassDecl->getIdentifier()->getName();
+ S += "_IMPL *)self)->";
+ S += OID->getName();
+ return S;
+}
+
+void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ SourceLocation onePastSemiLoc =
+ startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+
+ if (!OID)
+ return;
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (!PD->getGetterMethodDecl()->isDefined()) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = 0;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr);
+ }
+
+ if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getName();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(OID) + " = ";
+ Setr += PD->getName();
+ }
+ Setr += "; }";
+ InsertText(onePastSemiLoc, Setr);
+}
+
+static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
+ std::string &typedefString) {
+ typedefString += "#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ // typedef struct { } _objc_exc_Classname;
+ typedefString += ";\ntypedef struct {} _objc_exc_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+}
+
+void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString) {
+ SourceLocation startLoc = ClassDecl->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) {
+ std::string typedefString;
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
+ if (I == D.begin()) {
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ DeclGroupRef::iterator I = D.begin();
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(
+ const llvm::SmallVector<Decl*, 8> &D) {
+ std::string typedefString;
+ for (unsigned i = 0; i < D.size(); i++) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
+ if (i == 0) {
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
+}
+
+void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ // When method is a synthesized one, such as a getter/setter there is
+ // nothing to rewrite.
+ if (Method->isImplicit())
+ return;
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getExpansionLineNumber(LocEnd) >
+ SM->getExpansionLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n");
+ ReplaceText(LocEnd, 1, ";\n#endif\n");
+ } else {
+ InsertText(LocStart, "// ");
+ }
+}
+
+void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) {
+ SourceLocation Loc = prop->getAtLoc();
+
+ ReplaceText(Loc, 0, "// ");
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+ if (CatDecl->getIvarLBraceLoc().isValid())
+ InsertText(CatDecl->getIvarLBraceLoc(), "// ");
+ for (ObjCCategoryDecl::ivar_iterator
+ I = CatDecl->ivar_begin(), E = CatDecl->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl *Ivar = (*I);
+ SourceLocation LocStart = Ivar->getLocStart();
+ ReplaceText(LocStart, 0, "// ");
+ }
+ if (CatDecl->getIvarRBraceLoc().isValid())
+ InsertText(CatDecl->getIvarRBraceLoc(), "// ");
+
+ for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
+ E = CatDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ for (ObjCCategoryDecl::instmeth_iterator
+ I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCCategoryDecl::classmeth_iterator
+ I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndRange().getBegin(),
+ strlen("@end"), "/* @end */");
+}
+
+void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocStart();
+ assert(PDecl->isThisDeclarationADefinition());
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
+ ReplaceText(LocEnd, strlen("@end"), "/* @end */");
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
+
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) {
+ SourceLocation LocStart = LSD->getExternLoc();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid extern SourceLocation");
+
+ ReplaceText(LocStart, 0, "// ");
+ if (!LSD->hasBraces())
+ return;
+ // FIXME. We don't rewrite well if '{' is not on same line as 'extern'.
+ SourceLocation LocRBrace = LSD->getRBraceLoc();
+ if (LocRBrace.isInvalid())
+ llvm_unreachable("Invalid rbrace SourceLocation");
+ ReplaceText(LocRBrace, 0, "// ");
+}
+
+void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = T;
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAs<PointerType>())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
+ ResultStr += FPRetType->getResultType().getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += T.getAsString(Context->getPrintingPolicy());
+}
+
+void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = 0;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += IDecl->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.MicrosoftExt) {
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += IDecl->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->getPrintingPolicy());
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PDecl = *PI;
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ QualType QT = PDecl->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ if (IMD) {
+ InsertText(IMD->getLocStart(), "// ");
+ if (IMD->getIvarLBraceLoc().isValid())
+ InsertText(IMD->getIvarLBraceLoc(), "// ");
+ for (ObjCImplementationDecl::ivar_iterator
+ I = IMD->ivar_begin(), E = IMD->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl *Ivar = (*I);
+ SourceLocation LocStart = Ivar->getLocStart();
+ ReplaceText(LocStart, 0, "// ");
+ }
+ if (IMD->getIvarRBraceLoc().isValid())
+ InsertText(IMD->getIvarRBraceLoc(), "// ");
+ }
+ else
+ InsertText(CID->getLocStart(), "// ");
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(),
+ E = IMD ? IMD->instmeth_end() : CID->instmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(),
+ E = IMD ? IMD->classmeth_end() : CID->classmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(),
+ E = IMD ? IMD->propimpl_end() : CID->propimpl_end();
+ I != E; ++I) {
+ RewritePropertyImplDecl(*I, IMD, CID);
+ }
+
+ InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+}
+
+void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(ClassDecl))
+ return;
+ // Make sure super class's are written before current class is written.
+ ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass();
+ while (SuperClass) {
+ RewriteInterfaceDecl(SuperClass);
+ SuperClass = SuperClass->getSuperClass();
+ }
+ std::string ResultStr;
+ if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) {
+ // we haven't seen a forward decl - generate a typedef.
+ RewriteOneForwardClassDecl(ClassDecl, ResultStr);
+ RewriteIvarOffsetSymbols(ClassDecl, ResultStr);
+
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
+ // Mark this typedef as having been written into its c++ equivalent.
+ ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl());
+
+ for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
+ E = ClassDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+ for (ObjCInterfaceDecl::instmeth_iterator
+ I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
+ "/* @end */");
+ }
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base;
+ SmallVector<Expr*, 2> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = 0;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = 0;
+ SmallVector<Expr*, 1> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((unsigned int (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)__rw_items, (unsigned int)16)
+///
+void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, unsigned int))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)__rw_items, (unsigned int)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf);
+
+ return 0;
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf);
+
+ return 0;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id __rw_items[16];
+/// id l_collection = (id)collection;
+/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16]);
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ StringRef elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ buf = "\n{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ if (ElementType->isObjCQualifiedIdType() ||
+ ElementType->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getName();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getName();
+ ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ if (VD->getType()->isObjCQualifiedIdType() ||
+ VD->getType()->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:__rw_items count:16];
+ // which is synthesized into:
+ // unsigned int limit =
+ // ((unsigned int (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)__rw_items, (unsigned int)16);
+ buf += "unsigned long limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf);
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:__rw_items count:16]);
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while (limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += ");\n\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
+ InsertText(endBodyLoc, buf);
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf);
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return 0;
+}
+
+static void Write_RethrowObject(std::string &buf) {
+ buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n";
+ buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n";
+ buf += "\tid rethrow;\n";
+ buf += "\t} _fin_force_rethow(_rethrow);";
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ buf = "{ id _rethrow = 0; id _sync_obj = ";
+
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
+
+ buf = "; objc_sync_enter(_sync_obj);\n";
+ buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}";
+ buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}";
+ buf += "\n\tid sync_exit;";
+ buf += "\n\t} _sync_exit(_sync_obj);\n";
+
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart();
+ const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc);
+ while (*RParenExprLocBuf != ')') RParenExprLocBuf--;
+ RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf);
+
+ SourceLocation LBranceLoc = S->getSynchBody()->getLocStart();
+ const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc);
+ assert (*LBraceLocBuf == '{');
+ ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf);
+
+ SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd();
+ assert((*SM->getCharacterData(startRBraceLoc) == '}') &&
+ "bogus @synchronized block");
+
+ buf = "} catch (id e) {_rethrow = e;}\n";
+ Write_RethrowObject(buf);
+ buf += "}\n";
+ buf += "}\n";
+
+ ReplaceText(startRBraceLoc, 1, buf);
+
+ return 0;
+}
+
+void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI)
+ WarnAboutReturnGotoStmts(*CI);
+
+ if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
+ bool noCatch = S->getNumCatchStmts() == 0;
+ std::string buf;
+
+ if (finalStmt) {
+ if (noCatch)
+ buf = "{ id volatile _rethrow = 0;\n";
+ else {
+ buf = "{ id volatile _rethrow = 0;\ntry {\n";
+ }
+ }
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+ if (finalStmt)
+ ReplaceText(startLoc, 1, buf);
+ else
+ // @try -> try
+ ReplaceText(startLoc, 1, "");
+
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ VarDecl *catchDecl = Catch->getCatchParamDecl();
+
+ startLoc = Catch->getLocStart();
+ bool AtRemoved = false;
+ if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (const ObjCObjectPointerType *Ptr = t->getAs<ObjCObjectPointerType>()) {
+ // Should be a pointer to a class.
+ ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
+ if (IDecl) {
+ std::string Result;
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @catch location");
+ SourceLocation rParenLoc = Catch->getRParenLoc();
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+
+ // _objc_exc_Foo *_e as argument to catch.
+ Result = "catch (_objc_exc_"; Result += IDecl->getNameAsString();
+ Result += " *_"; Result += catchDecl->getNameAsString();
+ Result += ")";
+ ReplaceText(startLoc, rParenBuf-startBuf+1, Result);
+ // Foo *e = (Foo *)_e;
+ Result.clear();
+ Result = "{ ";
+ Result += IDecl->getNameAsString();
+ Result += " *"; Result += catchDecl->getNameAsString();
+ Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)";
+ Result += "_"; Result += catchDecl->getNameAsString();
+
+ Result += "; ";
+ SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart();
+ ReplaceText(lBraceLoc, 1, Result);
+ AtRemoved = true;
+ }
+ }
+ }
+ if (!AtRemoved)
+ // @catch -> catch
+ ReplaceText(startLoc, 1, "");
+
+ }
+ if (finalStmt) {
+ buf.clear();
+ if (noCatch)
+ buf = "catch (id e) {_rethrow = e;}\n";
+ else
+ buf = "}\ncatch (id e) {_rethrow = e;}\n";
+
+ SourceLocation startFinalLoc = finalStmt->getLocStart();
+ ReplaceText(startFinalLoc, 8, buf);
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startFinalBodyLoc = body->getLocStart();
+ buf.clear();
+ Write_RethrowObject(buf);
+ ReplaceText(startFinalBodyLoc, 1, buf);
+
+ SourceLocation endFinalBodyLoc = body->getLocEnd();
+ ReplaceText(endFinalBodyLoc, 1, "}\n}");
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoStmts(S->getTryBody());
+ }
+
+ return 0;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else
+ buf = "throw";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
+ if (S->getThrowExpr())
+ ReplaceText(semiLoc, 1, ");");
+ return 0;
+}
+
+Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ QualType StrType = Context->getPointerType(Context->CharTy);
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
+ StringLiteral::Ascii, false,
+ StrType, SourceLocation());
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size());
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
+ FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE =
+ new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE =
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, 0, VK_RValue);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+
+ CallExpr *Exp =
+ new (Context) CallExpr(*Context, ICE, args, nargs,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
+ return Exp;
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteModernObjC::needToScanForQualifiers(QualType T) {
+ if (T->isObjCQualifiedIdType())
+ return true;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ if (T->isObjCObjectPointerType()) {
+ T = T->getPointeeType();
+ return T->isObjCQualifiedInterfaceType();
+ }
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getResultType();
+ }
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ Type = FD->getType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumArgs(); i++) {
+ if (needToScanForQualifiers(proto->getArgType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) {
+ QualType QT = ND->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (!isa<TypeOfExprType>(TypePtr))
+ return;
+ while (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ TypePtr = QT->getAs<Type>();
+ }
+ // FIXME. This will not work for multiple declarators; as in:
+ // __typeof__(a) b,c,d;
+ std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ if (ND->getInit()) {
+ std::string Name(ND->getNameAsString());
+ TypeAsString += " " + Name + " = ";
+ Expr *E = ND->getInit();
+ SourceLocation startLoc;
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ const char *endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+ else {
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteModernObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size());
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ SelGetUidIdent, getFuncType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ FD->getName() == "sel_registerName") {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ if (!strchr(argPtr, '^')) {
+ Str += TypeString;
+ return;
+ }
+ while (*argPtr) {
+ Str += (*argPtr == '^' ? '*' : *argPtr);
+ argPtr++;
+ }
+}
+
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+
+void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getResultType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getArgType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+ CurFunctionDeclToDeclareForBlock = 0;
+}
+
+// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super);
+void RewriteModernObjC::SynthSuperContructorFunctionDecl() {
+ if (SuperContructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteModernObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
+void RewriteModernObjC::SynthGetSuperClassFunctionDecl() {
+ IdentifierInfo *getSuperClassIdent =
+ &Context->Idents.get("class_getSuperclass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getObjCClassType());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ &ArgTys[0], ArgTys.size());
+ GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getSuperClassIdent,
+ getClassType, 0,
+ SC_Extern,
+ SC_None,
+ false);
+}
+
+// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
+void RewriteModernObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non alphanumeric characters with '_'.
+ if (!isalpha(c) && (c < '0' || c > '9'))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(S),
+ strType, 0, SC_Static, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
+ SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
+ CK_CPointerToObjCPointerCast, Unop);
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return cast;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) {
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+
+ Expr *FlagExp = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, Exp->getValue()),
+ Context->IntTy, Exp->getLocation());
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy,
+ CK_BitCast, FlagExp);
+ ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(),
+ cast);
+ ReplaceStmt(Exp, PE);
+ return PE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 4> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSNumber"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("numberWithBool:"), etc.
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *NumericMethod = Exp->getObjCNumericLiteralMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ NumericMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // User provided numeric literal is the 3rd, and last, argument.
+ Expr *userExpr = Exp->getNumber();
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ QualType type = ICE->getType();
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK = CK_BitCast;
+ if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType())
+ CK = CK_IntegralToBoolean;
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ MsgExprs.push_back(userExpr);
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = NumericMethod->param_begin(),
+ E = NumericMethod->param_end(); PI != E; ++PI)
+ ArgTypes.push_back((*PI)->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ NumericMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSArrayFType =
+ getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true);
+ std::string NSArrayFName("__NSContainer_literal");
+ FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
+ DeclRefExpr *NSArrayDRE =
+ new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> InitExprs;
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ InitExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++)
+ InitExprs.push_back(Exp->getElement(i));
+ Expr *NSArrayCallExpr =
+ new (Context) CallExpr(*Context, NSArrayDRE, &InitExprs[0], InitExprs.size(),
+ NSArrayFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy), 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ArrayLiteralME =
+ new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * ArrayLiteralObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ ArrayLiteralME);
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ ArrayMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(ArrayLiteralObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = ArrayMethod->param_begin(),
+ E = ArrayMethod->param_end(); PI != E; ++PI)
+ ArgTypes.push_back((*PI)->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ ArrayMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSDictFType =
+ getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true);
+ std::string NSDictFName("__NSContainer_literal");
+ FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
+ DeclRefExpr *NSDictDRE =
+ new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> KeyExprs;
+ SmallVector<Expr*, 16> ValueExprs;
+
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ KeyExprs.push_back(count);
+ ValueExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++) {
+ ObjCDictionaryElement Element = Exp->getKeyValueElement(i);
+ KeyExprs.push_back(Element.Key);
+ ValueExprs.push_back(Element.Value);
+ }
+
+ // (const id [])objects
+ Expr *NSValueCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, &ValueExprs[0], ValueExprs.size(),
+ NSDictFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy), 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *DictLiteralValueME =
+ new (Context) MemberExpr(NSValueCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * DictValueObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralValueME);
+ // (const id <NSCopying> [])keys
+ Expr *NSKeyCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, &KeyExprs[0], KeyExprs.size(),
+ NSDictFType, VK_LValue, SourceLocation());
+
+ MemberExpr *DictLiteralKeyME =
+ new (Context) MemberExpr(NSKeyCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ CStyleCastExpr * DictKeyObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralKeyME);
+
+
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ DictMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(DictValueObjects);
+
+ // (const id <NSCopying> [])keys
+ MsgExprs.push_back(DictKeyObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 8> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = DictMethod->param_begin(),
+ E = DictMethod->param_end(); PI != E; ++PI) {
+ QualType T = (*PI)->getType();
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ QualType PointeeTy = PT->getPointeeType();
+ convertToUnqualifiedObjCType(PointeeTy);
+ T = Context->getPointerType(PointeeTy);
+ }
+ ArgTypes.push_back(T);
+ }
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ DictMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
+QualType RewriteModernObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_class *super;
+ FieldTypes[1] = Context->getObjCClassType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false));
+ }
+
+ SuperStructDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteModernObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/true,
+ /*HasInit=*/false));
+ }
+
+ ConstantStringDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetSuperClassFunctionDecl)
+ SynthGetSuperClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = 0;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getResultType();
+ if (resultType->isRecordType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 8> MsgExprs;
+ switch (Exp->getReceiverKind()) {
+ case ObjCMessageExpr::SuperClass: {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc,
+ EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue,
+ SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_LValue,
+ ILE, false);
+ // struct objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ObjCInterfaceDecl *Class
+ = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:{
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+ SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false, argType,
+ SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_RValue, ILE,
+ false);
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Instance: {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ Expr *recExpr = Exp->getInstanceReceiver();
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ CastKind CK = recExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast : recExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+
+ recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, recExpr);
+ MsgExprs.push_back(recExpr);
+ break;
+ }
+ }
+
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc,
+ EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType();
+ if (needToScanForQualifiers(type))
+ type = Context->getObjCIdType();
+ // Make sure we convert "type (^)(...)" to "type (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(type);
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK;
+ if (SubExpr->getType()->isIntegralType(*Context) &&
+ type->isBooleanType()) {
+ CK = CK_IntegralToBoolean;
+ } else if (type->isObjCObjectPointerType()) {
+ if (SubExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (SubExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ } else {
+ CK = CK_BitCast;
+ }
+
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ CastKind CK;
+ if (userExpr->getType()->isIntegralType(*Context)) {
+ CK = CK_IntegralToPointer;
+ } else if (userExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (userExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, userExpr);
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : (*PI)->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(t);
+ ArgTypes.push_back(t);
+ }
+ returnType = Exp->getType();
+ convertToUnqualifiedObjCType(returnType);
+ (void)convertBlockPointerToFunctionPointer(returnType);
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ // If we don't have a method decl, force a variadic cast.
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
+ VK_LValue, SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, STDRE);
+ // Now do the "normal" pointer to function cast.
+ castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ FT = msgSendType->getAs<FunctionType>();
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ SourceLocation());
+
+ // Build sizeof(returnType)
+ UnaryExprOrTypeTraitExpr *sizeofExpr =
+ new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf,
+ Context->getTrivialTypeSourceInfo(returnType),
+ Context->getSizeType(), SourceLocation(),
+ SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
+ // For X86 it is more complicated and some kind of target specific routine
+ // is needed to decide what to do.
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ IntegerLiteral *limit = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, 8),
+ Context->IntTy,
+ SourceLocation());
+ BinaryOperator *lessThanExpr =
+ new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
+ VK_RValue, OK_Ordinary, SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(lessThanExpr,
+ SourceLocation(), CE,
+ SourceLocation(), STCE,
+ returnType, VK_RValue, OK_Ordinary);
+ ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ CondExpr);
+ }
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteModernObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ TInfo);
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteModernObjC::HandleTranslationUnit().
+Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" +
+ Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, getProtocolType(), 0,
+ SC_Extern, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
+ Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
+ CK_BitCast,
+ DerefExpr);
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
+ std::string &Result) {
+ if (Type->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(Type);
+ return RewriteObjCFieldDeclType(ElemTy, Result);
+ }
+ else if (Type->isRecordType()) {
+ RecordDecl *RD = Type->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition()) {
+ if (RD->isStruct())
+ Result += "\n\tstruct ";
+ else if (RD->isUnion())
+ Result += "\n\tunion ";
+ else
+ assert(false && "class not allowed as an ivar type");
+
+ Result += RD->getName();
+ if (TagsDefinedInIvarDecls.count(RD)) {
+ // This struct is already defined. Do not write its definition again.
+ Result += " ";
+ return true;
+ }
+ TagsDefinedInIvarDecls.insert(RD);
+ Result += " {\n";
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i) {
+ FieldDecl *FD = *i;
+ RewriteObjCFieldDecl(FD, Result);
+ }
+ Result += "\t} ";
+ return true;
+ }
+ }
+ else if (Type->isEnumeralType()) {
+ EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
+ if (ED->isCompleteDefinition()) {
+ Result += "\n\tenum ";
+ Result += ED->getName();
+ if (TagsDefinedInIvarDecls.count(ED)) {
+ // This enum is already defined. Do not write its definition again.
+ Result += " ";
+ return true;
+ }
+ TagsDefinedInIvarDecls.insert(ED);
+
+ Result += " {\n";
+ for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(),
+ ECEnd = ED->enumerator_end(); EC != ECEnd; ++EC) {
+ Result += "\t"; Result += EC->getName(); Result += " = ";
+ llvm::APSInt Val = EC->getInitVal();
+ Result += Val.toString(10);
+ Result += ",\n";
+ }
+ Result += "\t} ";
+ return true;
+ }
+ }
+
+ Result += "\t";
+ convertObjCTypeToCStyleType(Type);
+ return false;
+}
+
+
+/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
+ std::string &Result) {
+ QualType Type = fieldDecl->getType();
+ std::string Name = fieldDecl->getNameAsString();
+
+ bool EleboratedType = RewriteObjCFieldDeclType(Type, Result);
+ if (!EleboratedType)
+ Type.getAsStringInternal(Name, Context->getPrintingPolicy());
+ Result += Name;
+ if (fieldDecl->isBitField()) {
+ Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context));
+ }
+ else if (EleboratedType && Type->isArrayType()) {
+ CanQualType CType = Context->getCanonicalType(Type);
+ while (isa<ArrayType>(CType)) {
+ if (const ConstantArrayType *CAT = Context->getAsConstantArrayType(CType)) {
+ Result += "[";
+ llvm::APInt Dim = CAT->getSize();
+ Result += utostr(Dim.getZExtValue());
+ Result += "]";
+ }
+ CType = CType->getAs<ArrayType>()->getElementType();
+ }
+ }
+
+ Result += ";\n";
+}
+
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getName() != "" &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ IVars.push_back(IVD);
+
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ return;
+ }
+
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ Result += "_IMPL {\n";
+
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result += "\tstruct "; Result += RCDecl->getNameAsString();
+ Result += "_IMPL "; Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+ }
+ TagsDefinedInIvarDecls.clear();
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ RewriteObjCFieldDecl(IVars[i], Result);
+
+ Result += "};\n";
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl))
+ llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
+}
+
+static void WriteInternalIvarName(ObjCInterfaceDecl *IDecl,
+ ObjCIvarDecl *IvarDecl, std::string &Result) {
+ Result += "OBJC_IVAR_$_";
+ Result += IDecl->getName();
+ Result += "$";
+ Result += IvarDecl->getName();
+}
+
+/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which
+/// have been referenced in an ivar access expression.
+void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ // write out ivar offset symbols which have been referenced in an ivar
+ // access expression.
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> Ivars = ReferencedIvars[CDecl];
+ if (Ivars.empty())
+ return;
+ for (llvm::SmallPtrSet<ObjCIvarDecl *, 8>::iterator i = Ivars.begin(),
+ e = Ivars.end(); i != e; i++) {
+ ObjCIvarDecl *IvarDecl = (*i);
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+ Result += "extern \"C\" ";
+ if (LangOpts.MicrosoftExt &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Private &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Package)
+ Result += "__declspec(dllimport) ";
+
+ Result += "unsigned long ";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ";";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteModernObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *OIMP = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = OIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(OIMP);
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *CIMP = CategoryImplementation[i];
+ ObjCInterfaceDecl *CDecl = CIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(CIMP);
+ }
+}
+
+void RewriteModernObjC::RewriteByRefString(std::string &ResultStr,
+ const std::string &Name,
+ ValueDecl *VD, bool def) {
+ assert(BlockByRefDeclNo.count(VD) &&
+ "RewriteByRefString: ByRef decl missing");
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
+ "_" + utostr(BlockByRefDeclNo[VD]) ;
+}
+
+static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
+ return false;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getResultType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ QualType QT = (*AI)->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ std::string TypeString;
+ RewriteByRefString(TypeString, Name, (*I));
+ TypeString += " *";
+ Name = TypeString + Name;
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_assign((void*)&dst->";
+ S += (*I)->getNameAsString();
+ S += ", (void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_dispose((void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ std::string Desc) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+ S += " struct " + Desc;
+ S += "* Desc;\n";
+
+ Constructor += "(void *fp, "; // Invoke function pointer.
+ Constructor += "struct " + Desc; // Descriptor pointer.
+ Constructor += " *desc";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
+ QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ {
+ std::string TypeString;
+ RewriteByRefString(TypeString, FieldName, (*I));
+ TypeString += " *";
+ FieldName = TypeString + FieldName;
+ ArgName = TypeString + ArgName;
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0)";
+ // Initialize all "by copy" arguments.
+ bool firsTime = true;
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + "((struct __block_impl *)_" + Name + ")";
+ else
+ Constructor += Name + "(_" + Name + ")";
+ }
+ // Initialize all "by ref" arguments.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ Constructor += Name + "(_" + Name + "->__forwarding)";
+ }
+
+ Constructor += " {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ Constructor += " Desc = desc;\n";
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ Constructor += " Desc = desc;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag, int i,
+ StringRef FunName,
+ unsigned hasCopy) {
+ std::string S = "\nstatic struct " + DescTag;
+
+ S += " {\n unsigned long reserved;\n";
+ S += " unsigned long Block_size;\n";
+ if (hasCopy) {
+ S += " void (*copy)(struct ";
+ S += ImplTag; S += "*, struct ";
+ S += ImplTag; S += "*);\n";
+
+ S += " void (*dispose)(struct ";
+ S += ImplTag; S += "*);\n";
+ }
+ S += "} ";
+
+ S += DescTag + "_DATA = { 0, sizeof(struct ";
+ S += ImplTag + ")";
+ if (hasCopy) {
+ S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
+ S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
+ }
+ S += "};\n";
+ return S;
+}
+
+void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName) {
+ // Insert declaration for the function in which block literal is used.
+ if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ bool RewriteSC = (GlobalVarDecl &&
+ !Blocks.empty() &&
+ GlobalVarDecl->getStorageClass() == SC_Static &&
+ GlobalVarDecl->getType().getCVRQualifiers());
+ if (RewriteSC) {
+ std::string SC(" void __");
+ SC += GlobalVarDecl->getNameAsString();
+ SC += "() {}";
+ InsertText(FunLocStart, SC);
+ }
+
+ // Insert closures that were part of the function.
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
+ CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!VD->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ continue;
+ }
+
+ if (!BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
+ }
+
+ std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
+ std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
+
+ InsertText(FunLocStart, CI);
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
+
+ InsertText(FunLocStart, CF);
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
+ InsertText(FunLocStart, HF);
+ }
+ std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
+ ImportedBlockDecls.size() > 0);
+ InsertText(FunLocStart, BD);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ }
+ if (RewriteSC) {
+ // Must insert any 'const/volatile/static here. Since it has been
+ // removed as result of rewriting of block literals.
+ std::string SC;
+ if (GlobalVarDecl->getStorageClass() == SC_Static)
+ SC = "static ";
+ if (GlobalVarDecl->getType().isConstQualified())
+ SC += "const ";
+ if (GlobalVarDecl->getType().isVolatileQualified())
+ SC += "volatile ";
+ if (GlobalVarDecl->getType().isRestrictQualified())
+ SC += "restrict ";
+ InsertText(FunLocStart, SC);
+ }
+ if (GlobalConstructionExp) {
+ // extra fancy dance for global literal expression.
+
+ // Always the latest block expression on the block stack.
+ std::string Tag = "__";
+ Tag += FunName;
+ Tag += "_block_impl_";
+ Tag += utostr(Blocks.size()-1);
+ std::string globalBuf = "static ";
+ globalBuf += Tag; globalBuf += " ";
+ std::string SStr;
+
+ llvm::raw_string_ostream constructorExprBuf(SStr);
+ GlobalConstructionExp->printPretty(constructorExprBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ globalBuf += constructorExprBuf.str();
+ globalBuf += ";\n";
+ InsertText(FunLocStart, globalBuf);
+ GlobalConstructionExp = 0;
+ }
+
+ Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ StringRef FuncName = FD->getName();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+static void BuildUniqueMethodName(std::string &Name,
+ ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ Name = IFace->getName();
+ Name += "__" + MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = Name.find(":", loc)) != std::string::npos)
+ Name.replace(loc, 1, "_");
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName;
+ BuildUniqueMethodName(FuncName, MD);
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(*CI);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingLocal() &&
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ BlockDeclRefs.push_back(DRE);
+ }
+
+ return;
+}
+
+void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
+ else
+ GetInnerBlockDeclRefExprs(*CI,
+ InnerBlockDeclRefs,
+ InnerContexts);
+
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ if (!isa<FunctionDecl>(DRE->getDecl()) &&
+ !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
+ }
+
+ return;
+}
+
+/// convertObjCTypeToCStyleType - This routine converts such objc types
+/// as qualified objects, and blocks to their closest c/c++ types that
+/// it can. It returns true if input type was modified.
+bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) {
+ QualType oldT = T;
+ convertBlockPointerToFunctionPointer(T);
+ if (T->isFunctionPointerType()) {
+ QualType PointeeTy;
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ PointeeTy = PT->getPointeeType();
+ if (const FunctionType *FT = PointeeTy->getAs<FunctionType>()) {
+ T = convertFunctionTypeOfBlocks(FT);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ convertToUnqualifiedObjCType(T);
+ return T != oldT;
+}
+
+/// convertFunctionTypeOfBlocks - This routine converts a function type
+/// whose result type may be a block pointer or whose argument type(s)
+/// might be block pointers to an equivalent function type replacing
+/// all block pointers to function pointers.
+QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+ QualType Res = FT->getResultType();
+ bool modified = convertObjCTypeToCStyleType(Res);
+
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (convertObjCTypeToCStyleType(t))
+ modified = true;
+ ArgTypes.push_back(t);
+ }
+ }
+ QualType FuncType;
+ if (modified)
+ FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size());
+ else FuncType = QualType(FT, 0);
+ return FuncType;
+}
+
+Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
+ // Navigate to relevant type information.
+ const BlockPointerType *CPT = 0;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
+ CPT = DRE->getType()->getAs<BlockPointerType>();
+ } else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
+ CPT = MExpr->getType()->getAs<BlockPointerType>();
+ }
+ else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
+ return SynthesizeBlockCall(Exp, PRE->getSubExpr());
+ }
+ else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
+ CPT = IEXPR->getType()->getAs<BlockPointerType>();
+ else if (const ConditionalOperator *CEXPR =
+ dyn_cast<ConditionalOperator>(BlockExp)) {
+ Expr *LHSExp = CEXPR->getLHS();
+ Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
+ Expr *RHSExp = CEXPR->getRHS();
+ Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
+ Expr *CONDExp = CEXPR->getCond();
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(CONDExp,
+ SourceLocation(), cast<Expr>(LHSStmt),
+ SourceLocation(), cast<Expr>(RHSStmt),
+ Exp->getType(), VK_RValue, OK_Ordinary);
+ return CondExpr;
+ } else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
+ CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType
+ = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size());
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
+ CK_BitCast,
+ const_cast<Expr*>(BlockExp));
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("FuncPtr"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+
+ CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
+ CK_BitCast, ME);
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
+ BlkExprs.size(),
+ Exp->getType(), VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+// We need to return the rewritten expression to handle cases where the
+// DeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
+ // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
+ // for each DeclRefExp where BYREFVAR is name of the variable.
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingLocal();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__forwarding"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
+ FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ StringRef Name = VD->getName();
+ FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Name),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+
+
+
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
+ DeclRefExp->getExprLoc(),
+ ME);
+ ReplaceStmt(DeclRefExp, PE);
+ return PE;
+}
+
+// Rewrites the imported local variable V with external storage
+// (static, extern, etc.) as *V
+//
+Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!ImportedLocalExternalDecls.count(Var))
+ return DRE;
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Exp);
+ ReplaceStmt(DRE, PE);
+ return PE;
+}
+
+void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ QualType QT = CE->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ std::string TypeAsString = "(";
+ RewriteBlockPointerType(TypeAsString, QT);
+ TypeAsString += ")";
+ ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
+ return;
+ }
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*");
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
+ CastKind CastKind = IC->getCastKind();
+
+ if (CastKind == CK_BlockPointerToObjCPointerCast) {
+ CStyleCastExpr * CastExpr =
+ NoTypeInfoCStyleCastExpr(Context, IC->getType(), CK_BitCast, IC);
+ ReplaceStmt(IC, CastExpr);
+ }
+ else if (CastKind == CK_AnyPointerToBlockPointerCast) {
+ QualType BlockT = IC->getType();
+ (void)convertBlockPointerToFunctionPointer(BlockT);
+ CStyleCastExpr * CastExpr =
+ NoTypeInfoCStyleCastExpr(Context, BlockT, CK_BitCast, IC);
+ ReplaceStmt(IC, CastExpr);
+ }
+ return;
+}
+
+void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*");
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I)
+ if (isTopLevelBlockPointerType(*I))
+ return true;
+ }
+ return false;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I) {
+ if ((*I)->isObjCQualifiedIdType())
+ return true;
+ if ((*I)->isObjCObjectPointerType() &&
+ (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
+void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+ SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ buf = '*';
+ startBuf++;
+ OrigLength++;
+ }
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
+ }
+ else
+ buf += *argListBegin;
+ argListBegin++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+ }
+ ReplaceText(Start, OrigLength, buf);
+
+ return;
+}
+
+
+/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
+/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
+/// struct Block_byref_id_object *src) {
+/// _Block_object_assign (&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_assign(&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+/// And:
+/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+
+std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
+ int flag) {
+ std::string S;
+ if (CopyDestroyCache.count(flag))
+ return S;
+ CopyDestroyCache.insert(flag);
+ S = "static void __Block_byref_id_object_copy_";
+ S += utostr(flag);
+ S += "(void *dst, void *src) {\n";
+
+ // offset into the object pointer is computed as:
+ // void * + void* + int + int + void* + void *
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ unsigned VoidPtrSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
+
+ unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
+ S += " _Block_object_assign((char*)dst + ";
+ S += utostr(offset);
+ S += ", *(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+
+ S += "static void __Block_byref_id_object_dispose_";
+ S += utostr(flag);
+ S += "(void *src) {\n";
+ S += " _Block_object_dispose(*(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+ return S;
+}
+
+/// RewriteByRefVar - For each __block typex ND variable this routine transforms
+/// the declaration into:
+/// struct __Block_byref_ND {
+/// void *__isa; // NULL for everything except __weak pointers
+/// struct __Block_byref_ND *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
+/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
+/// typex ND;
+/// };
+///
+/// It then replaces declaration of ND variable with:
+/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
+/// __size=sizeof(struct __Block_byref_ND),
+/// ND=initializer-if-any};
+///
+///
+void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
+ // Insert declaration for the function in which block literal is
+ // used.
+ if (CurFunctionDeclToDeclareForBlock)
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ int flag = 0;
+ int isa = 0;
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ std::string Name(ND->getNameAsString());
+ std::string ByrefType;
+ RewriteByRefString(ByrefType, Name, ND, true);
+ ByrefType += " {\n";
+ ByrefType += " void *__isa;\n";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += " *__forwarding;\n";
+ ByrefType += " int __flags;\n";
+ ByrefType += " int __size;\n";
+ // Add void *__Block_byref_id_object_copy;
+ // void *__Block_byref_id_object_dispose; if needed.
+ QualType Ty = ND->getType();
+ bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
+ ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
+ }
+
+ QualType T = Ty;
+ (void)convertBlockPointerToFunctionPointer(T);
+ T.getAsStringInternal(Name, Context->getPrintingPolicy());
+
+ ByrefType += " " + Name + ";\n";
+ ByrefType += "};\n";
+ // Insert this type in global scope. It is needed by helper function.
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
+ else {
+ assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+ InsertText(FunLocStart, ByrefType);
+ if (Ty.isObjCGCWeak()) {
+ flag |= BLOCK_FIELD_IS_WEAK;
+ isa = 1;
+ }
+
+ if (HasCopyAndDispose) {
+ flag = BLOCK_BYREF_CALLER;
+ QualType Ty = ND->getType();
+ // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
+ if (Ty->isBlockPointerType())
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ else
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
+ if (!HF.empty())
+ InsertText(FunLocStart, HF);
+ }
+
+ // struct __Block_byref_ND ND =
+ // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
+ // initializer-if-any};
+ bool hasInit = (ND->getInit() != 0);
+ // FIXME. rewriter does not support __block c++ objects which
+ // require construction.
+ if (hasInit && dyn_cast<CXXConstructExpr>(ND->getInit()))
+ hasInit = false;
+ unsigned flags = 0;
+ if (HasCopyAndDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ Name = ND->getNameAsString();
+ ByrefType.clear();
+ RewriteByRefString(ByrefType, Name, ND);
+ std::string ForwardingCastType("(");
+ ForwardingCastType += ByrefType + " *)";
+ if (!hasInit) {
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+ ByrefType += "};\n";
+ unsigned nameSize = Name.size();
+ // for block or function pointer declaration. Name is aleady
+ // part of the declaration.
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
+ nameSize = 1;
+ ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
+ }
+ else {
+ SourceLocation startLoc;
+ Expr *E = ND->getInit();
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ endBuf = SM->getCharacterData(startLoc);
+ ByrefType += " " + Name;
+ ByrefType += " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += "), ";
+ if (HasCopyAndDispose) {
+ ByrefType += "__Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ ByrefType += ", ";
+ }
+ ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
+
+ // Complete the newly synthesized compound expression by inserting a right
+ // curly brace before the end of the declaration.
+ // FIXME: This approach avoids rewriting the initializer expression. It
+ // also assumes there is only one declarator. For example, the following
+ // isn't currently supported by this routine (in general):
+ //
+ // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
+ //
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *semiBuf = strchr(startInitializerBuf, ';');
+ assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
+ SourceLocation semiLoc =
+ startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
+
+ InsertText(semiLoc, "}");
+ }
+ return;
+}
+
+void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+}
+
+FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, FType, 0, SC_Extern,
+ SC_None, false, false);
+}
+
+Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs) {
+
+ const BlockDecl *block = Exp->getBlockDecl();
+
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef)
+ BuildUniqueMethodName(FuncName, CurMethodDef);
+ else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ bool GlobalBlockExpr =
+ block->getDeclContext()->getRedeclContext()->isFileContext();
+
+ if (GlobalBlockExpr && !GlobalVarDecl) {
+ Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag);
+ GlobalBlockExpr = false;
+ }
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
+ QualType FType = Context->getPointerType(BFT);
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a contructor call...
+ std::string Tag;
+
+ if (GlobalBlockExpr)
+ Tag = "__global_";
+ else
+ Tag = "__";
+ Tag += FuncName + "_block_impl_" + BlockNumber;
+
+ FD = SynthBlockInitFunctionDecl(Tag);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ InitExprs.push_back(castExpr);
+
+ // Initialize the block descriptor.
+ std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData.c_str()),
+ Context->VoidPtrTy, 0,
+ SC_Static, SC_None);
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ InitExprs.push_back(DescRefExpr);
+
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ ValueDecl *ND = (*I);
+ std::string Name(ND->getNameAsString());
+ std::string RecName;
+ RewriteByRefString(RecName, Name, ND, true);
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ + sizeof("struct"));
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ if (variable == ND && ci->isNested()) {
+ assert (ci->isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
+ InitExprs.push_back(Exp);
+ }
+ }
+ if (ImportedBlockDecls.size()) {
+ // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
+ int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
+ Context->IntTy, SourceLocation());
+ InitExprs.push_back(FlagExp);
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ FType, VK_LValue, SourceLocation());
+
+ if (GlobalBlockExpr) {
+ assert (GlobalConstructionExp == 0 &&
+ "SynthBlockInitExpr - GlobalConstructionExp must be null");
+ GlobalConstructionExp = NewRep;
+ NewRep = DRE;
+ }
+
+ NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
+ NewRep);
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
+ if (const ObjCForCollectionStmt * CS =
+ dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
+ return CS->getElement() == DS;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ Stmt *childStmt = (*CI);
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
+ if (newStmt) {
+ *CI = newStmt;
+ }
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ ImportedLocalExternalDecls.clear();
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
+ // Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = 0;
+ // block literal on rhs of a property-dot-sytax assignment
+ // must be replaced by its synthesize ast so getRewrittenText
+ // works as expected. In this case, what actually ends up on RHS
+ // is the blockTranscribed which is the helper function for the
+ // block literal; as in: self.c = ^() {[ace ARR];};
+ bool saveDisableReplaceStmt = DisableReplaceStmt;
+ DisableReplaceStmt = false;
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ DisableReplaceStmt = saveDisableReplaceStmt;
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = 0;
+ ImportedLocalExternalDecls.clear();
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast<ObjCBoolLiteralExpr>(S))
+ return RewriteObjCBoolLiteralExpr(BoolLitExpr);
+
+ if (ObjCNumericLiteral *NumericLitExpr = dyn_cast<ObjCNumericLiteral>(S))
+ return RewriteObjCNumericLiteralExpr(NumericLitExpr);
+
+ if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast<ObjCArrayLiteral>(S))
+ return RewriteObjCArrayLiteralExpr(ArrayLitExpr);
+
+ if (ObjCDictionaryLiteral *DictionaryLitExpr =
+ dyn_cast<ObjCDictionaryLiteral>(S))
+ return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString.c_str(), messString.size());
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ // NOTE: We need to avoid rewriting the DeclStmt if it is within
+ // the context of an ObjCForCollectionStmt. For example:
+ // NSArray *someArray;
+ // for (id <FooProtocol> index in someArray) ;
+ // This is because RewriteObjCForCollectionStmt() does textual rewriting
+ // and it depends on the original text locations/positions.
+ if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ static unsigned uniqueByrefDeclCount = 0;
+ assert(!BlockByRefDeclNo.count(ND) &&
+ "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
+ BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
+ RewriteByRefVar(VD);
+ }
+ else
+ RewriteTypeOfDecl(VD);
+ }
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->hasAttr<BlocksAttr>())
+ return RewriteBlockDeclRefExpr(DRE);
+ if (HasLocalVariableExternalStorage(VD))
+ return RewriteLocalVariableExternalStorage(DRE);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ RewriteImplicitCastObjCExpr(ICE);
+ }
+#if 0
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
+ ICE->getSubExpr(),
+ SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf, *Context);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) {
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i) {
+ FieldDecl *FD = *i;
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ if (FD->getType()->isObjCQualifiedIdType() ||
+ FD->getType()->isObjCQualifiedInterfaceType())
+ RewriteObjCQualifiedInterfaceTypes(FD);
+ }
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurFunctionDeclToDeclareForBlock = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
+ }
+ break;
+ }
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = 0;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ break;
+ }
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
+ }
+ // Nothing yet.
+}
+
+/// Write_ProtocolExprReferencedMetadata - This routine writer out the
+/// protocol reference symbols in the for of:
+/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA.
+static void Write_ProtocolExprReferencedMetadata(ASTContext *Context,
+ ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+ // Also output .objc_protorefs$B section and its meta-data.
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_protorefs$B\")) ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_PROTOCOL_REFERENCE_$_";
+ Result += PDecl->getNameAsString();
+ Result += " = &";
+ Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+}
+
+void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I) {
+ RewriteObjCProtocolMetaData(*I, Preamble);
+ Write_ProtocolExprReferencedMetadata(Context, (*I), Preamble);
+ }
+
+ InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+ for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) {
+ ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i];
+ // Write struct declaration for the class matching its ivar declarations.
+ // Note that for modern abi, this is postponed until the end of TU
+ // because class extensions and the implementation might declare their own
+ // private ivars.
+ RewriteInterfaceDecl(CDecl);
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ llvm::errs() << "No changes\n";
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ RewriteMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ // Emit ImageInfo;
+ {
+ std::string ResultStr;
+ WriteImageInfo(ResultStr);
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
+void RewriteModernObjC::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ Preamble += "#ifndef __OBJC2__\n";
+ Preamble += "#define __OBJC2__\n";
+ Preamble += "#endif\n";
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; ";
+ Preamble += "\n\tstruct objc_object *superClass; ";
+ // Add a constructor for creating temporary objects.
+ Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) ";
+ Preamble += ": object(o), superClass(s) {} ";
+ Preamble += "\n};\n";
+
+ if (LangOpts.MicrosoftExt) {
+ // Define all sections using syntax that makes sense.
+ // These are currently generated.
+ Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_protolist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_protorefs$B\", long, read, write)\n";
+ // These are generated but not necessary for functionality.
+ Preamble += "#pragma section(\".datacoal_nt$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n";
+
+ // These need be generated for performance. Currently they are not,
+ // using API calls instead.
+ Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n";
+
+ }
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ }
+ else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __weak\n";
+ Preamble += "#define __weak\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __block\n";
+ Preamble += "#define __block\n";
+ Preamble += "#endif\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+
+ // Declarations required for modern objective-c array and dictionary literals.
+ Preamble += "\n#include <stdarg.h>\n";
+ Preamble += "struct __NSContainer_literal {\n";
+ Preamble += " void * *arr;\n";
+ Preamble += " __NSContainer_literal (unsigned int count, ...) {\n";
+ Preamble += "\tva_list marker;\n";
+ Preamble += "\tva_start(marker, count);\n";
+ Preamble += "\tarr = new void *[count];\n";
+ Preamble += "\tfor (unsigned i = 0; i < count; i++)\n";
+ Preamble += "\t arr[i] = va_arg(marker, void *);\n";
+ Preamble += "\tva_end( marker );\n";
+ Preamble += " };\n";
+ Preamble += " __NSContainer_literal() {\n";
+ Preamble += "\tdelete[] arr;\n";
+ Preamble += " }\n";
+ Preamble += "};\n";
+
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI.
+/// struct _prop_t {
+/// const char *name;
+/// char *attributes;
+/// }
+
+/// struct _prop_list_t {
+/// uint32_t entsize; // sizeof(struct _prop_t)
+/// uint32_t count_of_properties;
+/// struct _prop_t prop_list[count_of_properties];
+/// }
+
+/// struct _protocol_t;
+
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t * protocol_list[protocol_count];
+/// }
+
+/// struct _objc_method {
+/// SEL _cmd;
+/// const char *method_type;
+/// char *_imp;
+/// }
+
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char *protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t *instance_methods;
+/// const struct method_list_t *class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// }
+
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// const char *name;
+/// const char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _ivar_t list[count];
+/// }
+
+/// struct _class_ro_t {
+/// uint32_t flags;
+/// uint32_t instanceStart;
+/// uint32_t instanceSize;
+/// uint32_t reserved; // only when building for 64bit targets
+/// const uint8_t *ivarLayout;
+/// const char *name;
+/// const struct _method_list_t *baseMethods;
+/// const struct _protocol_list_t *baseProtocols;
+/// const struct _ivar_list_t *ivars;
+/// const uint8_t *weakIvarLayout;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t *superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct _class_ro_t *ro;
+/// }
+
+/// struct _category_t {
+/// const char *name;
+/// struct _class_t *cls;
+/// const struct _method_list_t *instance_methods;
+/// const struct _method_list_t *class_methods;
+/// const struct _protocol_list_t *protocols;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// MessageRefTy - LLVM for:
+/// struct _message_ref_t {
+/// IMP messenger;
+/// SEL name;
+/// };
+
+/// SuperMessageRefTy - LLVM for:
+/// struct _super_message_ref_t {
+/// SUPER_IMP messenger;
+/// SEL name;
+/// };
+
+static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) {
+ static bool meta_data_declared = false;
+ if (meta_data_declared)
+ return;
+
+ Result += "\nstruct _prop_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *attributes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t;\n";
+
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tstruct objc_selector * _cmd;\n";
+ Result += "\tconst char *method_type;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t {\n";
+ Result += "\tvoid * isa; // NULL\n";
+ Result += "\tconst char *protocol_name;\n";
+ Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n";
+ Result += "\tconst struct method_list_t *instance_methods;\n";
+ Result += "\tconst struct method_list_t *class_methods;\n";
+ Result += "\tconst struct method_list_t *optionalInstanceMethods;\n";
+ Result += "\tconst struct method_list_t *optionalClassMethods;\n";
+ Result += "\tconst struct _prop_list_t * properties;\n";
+ Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n";
+ Result += "\tconst unsigned int flags; // = 0\n";
+ Result += "\tconst char ** extendedMethodTypes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _ivar_t {\n";
+ Result += "\tunsigned long int *offset; // pointer to ivar offset location\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *type;\n";
+ Result += "\tunsigned int alignment;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_ro_t {\n";
+ Result += "\tunsigned int flags;\n";
+ Result += "\tunsigned int instanceStart;\n";
+ Result += "\tunsigned int instanceSize;\n";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ Result += "\tunsigned int reserved;\n";
+ Result += "\tconst unsigned char *ivarLayout;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst struct _method_list_t *baseMethods;\n";
+ Result += "\tconst struct _objc_protocol_list *baseProtocols;\n";
+ Result += "\tconst struct _ivar_list_t *ivars;\n";
+ Result += "\tconst unsigned char *weakIvarLayout;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_t {\n";
+ Result += "\tstruct _class_t *isa;\n";
+ Result += "\tstruct _class_t *superclass;\n";
+ Result += "\tvoid *cache;\n";
+ Result += "\tvoid *vtable;\n";
+ Result += "\tstruct _class_ro_t *ro;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _category_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _class_t *cls;\n";
+ Result += "\tconst struct _method_list_t *instance_methods;\n";
+ Result += "\tconst struct _method_list_t *class_methods;\n";
+ Result += "\tconst struct _protocol_list_t *protocols;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n";
+ Result += "#pragma warning(disable:4273)\n";
+ meta_data_declared = true;
+}
+
+static void Write_protocol_list_t_TypeDecl(std::string &Result,
+ long super_protocol_count) {
+ Result += "struct /*_protocol_list_t*/"; Result += " {\n";
+ Result += "\tlong protocol_count; // Note, this is 32/64 bit\n";
+ Result += "\tstruct _protocol_t *super_protocols[";
+ Result += utostr(super_protocol_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_method_list_t_TypeDecl(std::string &Result,
+ unsigned int method_count) {
+ Result += "struct /*_method_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n";
+ Result += "\tunsigned int method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(method_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__prop_list_t_TypeDecl(std::string &Result,
+ unsigned int property_count) {
+ Result += "struct /*_prop_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count_of_properties;\n";
+ Result += "\tstruct _prop_t prop_list[";
+ Result += utostr(property_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__ivar_list_t_TypeDecl(std::string &Result,
+ unsigned int ivar_count) {
+ Result += "struct /*_ivar_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count;\n";
+ Result += "\tstruct _ivar_t ivar_list[";
+ Result += utostr(ivar_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCProtocolDecl *> SuperProtocols,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (SuperProtocols.size() > 0) {
+ Result += "\nstatic ";
+ Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n";
+ for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) {
+ ObjCProtocolDecl *SuperPD = SuperProtocols[i];
+ Result += "\t&"; Result += "_OBJC_PROTOCOL_";
+ Result += SuperPD->getNameAsString();
+ if (i == e-1)
+ Result += "\n};\n";
+ else
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef TopLevelDeclName,
+ bool MethodImpl) {
+ if (Methods.size() > 0) {
+ Result += "\nstatic ";
+ Write_method_list_t_TypeDecl(Result, Methods.size());
+ Result += " "; Result += VarName;
+ Result += TopLevelDeclName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Methods.size()); Result += ",\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ if (i == 0)
+ Result += "\t{{(struct objc_selector *)\"";
+ else
+ Result += "\t{(struct objc_selector *)\"";
+ Result += (MD)->getSelector().getAsString(); Result += "\"";
+ Result += ", ";
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString);
+ Result += "\""; Result += MethodTypeString; Result += "\"";
+ Result += ", ";
+ if (!MethodImpl)
+ Result += "0";
+ else {
+ Result += "(void *)";
+ Result += RewriteObj.MethodInternalNames[MD];
+ }
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCPropertyDecl *> Properties,
+ const Decl *Container,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Properties.size() > 0) {
+ Result += "\nstatic ";
+ Write__prop_list_t_TypeDecl(Result, Properties.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Properties.size()); Result += ",\n";
+ for (unsigned i = 0, e = Properties.size(); i < e; i++) {
+ ObjCPropertyDecl *PropDecl = Properties[i];
+ if (i == 0)
+ Result += "\t{{\"";
+ else
+ Result += "\t{\"";
+ Result += PropDecl->getName(); Result += "\",";
+ std::string PropertyTypeString, QuotePropertyTypeString;
+ Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString);
+ RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString);
+ Result += "\""; Result += QuotePropertyTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
+};
+
+static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result,
+ unsigned int flags,
+ const std::string &InstanceStart,
+ const std::string &InstanceSize,
+ ArrayRef<ObjCMethodDecl *>baseMethods,
+ ArrayRef<ObjCProtocolDecl *>baseProtocols,
+ ArrayRef<ObjCIvarDecl *>ivars,
+ ArrayRef<ObjCPropertyDecl *>Properties,
+ StringRef VarName,
+ StringRef ClassName) {
+ Result += "\nstatic struct _class_ro_t ";
+ Result += VarName; Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t";
+ Result += llvm::utostr(flags); Result += ", ";
+ Result += InstanceStart; Result += ", ";
+ Result += InstanceSize; Result += ", \n";
+ Result += "\t";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ // uint32_t const reserved; // only when building for 64bit targets
+ Result += "(unsigned int)0, \n\t";
+ // const uint8_t * const ivarLayout;
+ Result += "0, \n\t";
+ Result += "\""; Result += ClassName; Result += "\",\n\t";
+ bool metaclass = ((flags & CLS_META) != 0);
+ if (baseMethods.size() > 0) {
+ Result += "(const struct _method_list_t *)&";
+ if (metaclass)
+ Result += "_OBJC_$_CLASS_METHODS_";
+ else
+ Result += "_OBJC_$_INSTANCE_METHODS_";
+ Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && baseProtocols.size() > 0) {
+ Result += "(const struct _objc_protocol_list *)&";
+ Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && ivars.size() > 0) {
+ Result += "(const struct _ivar_list_t *)&";
+ Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ // weakIvarLayout
+ Result += "0, \n\t";
+ if (!metaclass && Properties.size() > 0) {
+ Result += "(const struct _prop_list_t *)&";
+ Result += "_OBJC_$_PROP_LIST_"; Result += ClassName;
+ Result += ",\n";
+ }
+ else
+ Result += "0, \n";
+
+ Result += "};\n";
+}
+
+static void Write_class_t(ASTContext *Context, std::string &Result,
+ StringRef VarName,
+ const ObjCInterfaceDecl *CDecl, bool metaclass) {
+ bool rootClass = (!CDecl->getSuperClass());
+ const ObjCInterfaceDecl *RootClass = CDecl;
+
+ if (!rootClass) {
+ // Find the Root class
+ RootClass = CDecl->getSuperClass();
+ while (RootClass->getSuperClass()) {
+ RootClass = RootClass->getSuperClass();
+ }
+ }
+
+ if (metaclass && rootClass) {
+ // Need to handle a case of use of forward declaration.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (CDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t OBJC_CLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ";\n";
+ }
+ // Also, for possibility of 'super' metadata class not having been defined yet.
+ if (!rootClass) {
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (SuperClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += SuperClass->getNameAsString();
+ Result += ";\n";
+
+ if (metaclass && RootClass != SuperClass) {
+ Result += "extern \"C\" ";
+ if (RootClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ";\n";
+ }
+ }
+
+ Result += "\nextern \"C\" __declspec(dllexport) struct _class_t ";
+ Result += VarName; Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n";
+ Result += "\t";
+ if (metaclass) {
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ }
+ }
+ else {
+ Result += "0, // &OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else
+ Result += "0,\n\t";
+ }
+ Result += "0, // (void *)&_objc_empty_cache,\n\t";
+ Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t";
+ if (metaclass)
+ Result += "&_OBJC_METACLASS_RO_$_";
+ else
+ Result += "&_OBJC_CLASS_RO_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n};\n";
+
+ // Add static function to initialize some of the meta-data fields.
+ // avoid doing it twice.
+ if (metaclass)
+ return;
+
+ const ObjCInterfaceDecl *SuperClass =
+ rootClass ? CDecl : CDecl->getSuperClass();
+
+ Result += "static void OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getNameAsString();
+ Result += "(void ) {\n";
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += RootClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = ";
+ if (rootClass)
+ Result += "&OBJC_CLASS_$_";
+ else
+ Result += "&OBJC_METACLASS_$_";
+
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString(); Result += ";\n";
+
+ if (!rootClass) {
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = "; Result += "&OBJC_CLASS_$_";
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+ }
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+ Result += "}\n";
+}
+
+static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context,
+ std::string &Result,
+ ObjCCategoryDecl *CatDecl,
+ ObjCInterfaceDecl *ClassDecl,
+ ArrayRef<ObjCMethodDecl *> InstanceMethods,
+ ArrayRef<ObjCMethodDecl *> ClassMethods,
+ ArrayRef<ObjCProtocolDecl *> RefedProtocols,
+ ArrayRef<ObjCPropertyDecl *> ClassProperties) {
+ StringRef CatName = CatDecl->getName();
+ StringRef ClassName = ClassDecl->getName();
+ // must declare an extern class object in case this class is not implemented
+ // in this TU.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (ClassDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n";
+
+ Result += "\nstatic struct _category_t ";
+ Result += "_OBJC_$_CATEGORY_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ Result += "\t\""; Result += ClassName; Result += "\",\n";
+ Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ",\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_CLASS_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (RefedProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&";
+ Result += "_OBJC_CATEGORY_PROTOCOLS_$_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "};\n";
+
+ // Add static function to initialize the class pointer in the category structure.
+ Result += "static void OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += "(void ) {\n";
+ Result += "\t_OBJC_$_CATEGORY_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n}\n";
+}
+
+static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Methods.size() == 0)
+ return;
+
+ Result += "\nstatic const char *";
+ Result += VarName; Result += ProtocolName;
+ Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ std::string MethodTypeString, QuoteMethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true);
+ RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString);
+ Result += "\t\""; Result += QuoteMethodTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "\n};\n";
+ else {
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
+ ASTContext *Context,
+ std::string &Result,
+ ArrayRef<ObjCIvarDecl *> Ivars,
+ ObjCInterfaceDecl *CDecl) {
+ // FIXME. visibilty of offset symbols may have to be set; for Darwin
+ // this is what happens:
+ /**
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ Class->getVisibility() == HiddenVisibility)
+ Visibility shoud be: HiddenVisibility;
+ else
+ Visibility shoud be: DefaultVisibility;
+ */
+
+ Result += "\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+
+ if (!Context->getLangOpts().MicrosoftExt ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Private ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Package)
+ Result += "extern \"C\" unsigned long int ";
+ else
+ Result += "extern \"C\" __declspec(dllexport) unsigned long int ";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))";
+ Result += " = ";
+ RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result);
+ Result += ";\n";
+ }
+}
+
+static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCIvarDecl *> Ivars,
+ StringRef VarName,
+ ObjCInterfaceDecl *CDecl) {
+ if (Ivars.size() > 0) {
+ Write_IvarOffsetVar(RewriteObj, Context, Result, Ivars, CDecl);
+
+ Result += "\nstatic ";
+ Write__ivar_list_t_TypeDecl(Result, Ivars.size());
+ Result += " "; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (i == 0)
+ Result += "\t{{";
+ else
+ Result += "\t {";
+ Result += "(unsigned long int *)&";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ", ";
+
+ Result += "\""; Result += IvarDecl->getName(); Result += "\", ";
+ std::string IvarTypeString, QuoteIvarTypeString;
+ Context->getObjCEncodingForType(IvarDecl->getType(), IvarTypeString,
+ IvarDecl);
+ RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString);
+ Result += "\""; Result += QuoteIvarTypeString; Result += "\", ";
+
+ // FIXME. this alignment represents the host alignment and need be changed to
+ // represent the target alignment.
+ unsigned Align = Context->getTypeAlign(IvarDecl->getType())/8;
+ Align = llvm::Log2_32(Align);
+ Result += llvm::utostr(Align); Result += ", ";
+ CharUnits Size = Context->getTypeSizeInChars(IvarDecl->getType());
+ Result += llvm::utostr(Size.getQuantity());
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+ WriteModernMetadataDeclarations(Context, Result);
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I)
+ RewriteObjCProtocolMetaData(*I, Result);
+
+ // Construct method lists.
+ std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
+ std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I) {
+ ObjCMethodDecl *MD = *I;
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(MD);
+ } else {
+ InstanceMethods.push_back(MD);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ ObjCMethodDecl *MD = *I;
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(MD);
+ } else {
+ ClassMethods.push_back(MD);
+ }
+ }
+ std::vector<ObjCMethodDecl *> AllMethods;
+ for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(InstanceMethods[i]);
+ for (unsigned i = 0, e = ClassMethods.size(); i < e; i++)
+ AllMethods.push_back(ClassMethods[i]);
+ for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(OptInstanceMethods[i]);
+ for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++)
+ AllMethods.push_back(OptClassMethods[i]);
+
+ Write__extendedMethodTypes_initializer(*this, Context, Result,
+ AllMethods,
+ "_OBJC_PROTOCOL_METHOD_TYPES_",
+ PDecl->getNameAsString());
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> SuperProtocols;
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I)
+ SuperProtocols.push_back(*I);
+
+ Write_protocol_list_initializer(Context, Result, SuperProtocols,
+ "_OBJC_PROTOCOL_REFS_",
+ PDecl->getNameAsString());
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_PROTOCOL_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_PROTOCOL_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods,
+ "_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptClassMethods,
+ "_OBJC_PROTOCOL_OPT_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ProtocolProperties;
+ for (ObjCContainerDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ ProtocolProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties,
+ /* Container */0,
+ "_OBJC_PROTOCOL_PROPERTIES_",
+ PDecl->getNameAsString());
+
+ // Writer out root metadata for current protocol: struct _protocol_t
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".datacoal_nt$B\")) ";
+ Result += "struct _protocol_t _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n";
+ Result += "\t0,\n"; // id is; is null
+ Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n";
+ if (SuperProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptInstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ProtocolProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n";
+ Result += "\t0,\n";
+
+ if (AllMethods.size() > 0) {
+ Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_";
+ Result += PDecl->getNameAsString();
+ Result += "\n};\n";
+ }
+ else
+ Result += "\t0\n};\n";
+
+ // Use this protocol meta-data to build protocol list table in section
+ // .objc_protolist$B
+ // Unspecified visibility means 'private extern'.
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_protolist$B\")) ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteModernObjC::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".cat_cls_meth$B\")) ";
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+
+ WriteModernMetadataDeclarations(Context, Result);
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ IVars.push_back(IVD);
+ }
+
+ Write__ivar_list_t_initializer(*this, Context, Result, IVars,
+ "_OBJC_$_INSTANCE_VARIABLES_",
+ CDecl);
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_INSTANCE_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ SmallVector<ObjCMethodDecl *, 32>
+ ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CLASS_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> RefedProtocols;
+ const ObjCList<ObjCProtocolDecl> &Protocols = CDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I) {
+ RefedProtocols.push_back(*I);
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(*I, Result);
+ }
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CLASS_PROTOCOLS_$_",
+ IDecl->getNameAsString());
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ClassProperties;
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
+ E = CDecl->prop_end(); I != E; ++I)
+ ClassProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */IDecl,
+ "_OBJC_$_PROP_LIST_",
+ CDecl->getNameAsString());
+
+
+ // Data for initializing _class_ro_t metaclass meta-data
+ uint32_t flags = CLS_META;
+ std::string InstanceSize;
+ std::string InstanceStart;
+
+
+ bool classIsHidden = CDecl->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+ InstanceSize = "sizeof(struct _class_t)";
+ InstanceStart = InstanceSize;
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ ClassMethods,
+ 0,
+ 0,
+ 0,
+ "_OBJC_METACLASS_RO_$_",
+ CDecl->getNameAsString());
+
+
+ // Data for initializing _class_ro_t meta-data
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (hasObjCExceptionAttribute(*Context, CDecl))
+ flags |= CLS_EXCEPTION;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+
+ InstanceSize.clear();
+ InstanceStart.clear();
+ if (!ObjCSynthesizedStructs.count(CDecl)) {
+ InstanceSize = "0";
+ InstanceStart = "0";
+ }
+ else {
+ InstanceSize = "sizeof(struct ";
+ InstanceSize += CDecl->getNameAsString();
+ InstanceSize += "_IMPL)";
+
+ ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ if (IVD) {
+ RewriteIvarOffsetComputation(IVD, InstanceStart);
+ }
+ else
+ InstanceStart = InstanceSize;
+ }
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ InstanceMethods,
+ RefedProtocols,
+ IVars,
+ ClassProperties,
+ "_OBJC_CLASS_RO_$_",
+ CDecl->getNameAsString());
+
+ Write_class_t(Context, Result,
+ "OBJC_METACLASS_$_",
+ CDecl, /*metaclass*/true);
+
+ Write_class_t(Context, Result,
+ "OBJC_CLASS_$_",
+ CDecl, /*metaclass*/false);
+
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyClasses.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ if (!ClsDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CLASS_SETUP[] = {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *IDecl = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getName(); Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ RewriteClassSetupInitHook(Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ RewriteCategorySetupInitHook(Result);
+
+ if (ClsDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_classlist$B\")) ";
+ Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ [";
+ Result += llvm::utostr(ClsDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_classlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t&OBJC_CLASS_$_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+
+ if (!DefinedNonLazyClasses.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n";
+ Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) {
+ Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+ }
+
+ if (CatDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_catlist$B\")) ";
+ Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ [";
+ Result += llvm::utostr(CatDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_catlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+
+ if (!DefinedNonLazyCategories.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n";
+ Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += DefinedNonLazyCategories[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+}
+
+void RewriteModernObjC::WriteImageInfo(std::string &Result) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n";
+
+ Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } ";
+ // version 0, ObjCABI is 2
+ Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n";
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ WriteModernMetadataDeclarations(Context, Result);
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl=0;
+ for (CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->getIdentifier() == IDecl->getIdentifier())
+ break;
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += "_$_";
+ FullCategoryName += CDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_CATEGORY_INSTANCE_METHODS_",
+ FullCategoryName, true);
+
+ SmallVector<ObjCMethodDecl *, 32>
+ ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CATEGORY_CLASS_METHODS_",
+ FullCategoryName, true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> RefedProtocols;
+ for (ObjCInterfaceDecl::protocol_iterator I = CDecl->protocol_begin(),
+ E = CDecl->protocol_end();
+
+ I != E; ++I) {
+ RefedProtocols.push_back(*I);
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(*I, Result);
+ }
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CATEGORY_PROTOCOLS_$_",
+ FullCategoryName);
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ClassProperties;
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
+ E = CDecl->prop_end(); I != E; ++I)
+ ClassProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */0,
+ "_OBJC_$_PROP_LIST_",
+ FullCategoryName);
+
+ Write_category_t(*this, Context, Result,
+ CDecl,
+ ClassDecl,
+ InstanceMethods,
+ ClassMethods,
+ RefedProtocols,
+ ClassProperties);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyCategories.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) {
+ int CatDefCount = CategoryImplementation.size();
+ if (!CatDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CATEGORY_SETUP[] = {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *IDecl = CategoryImplementation[i];
+ ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl();
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getName();
+ Result += "_$_";
+ Result += CatDecl->getName();
+ Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\n";
+ if (LangOpts.MicrosoftExt) {
+ if (IsInstanceMethod)
+ Result += "__declspec(allocate(\".inst_meth$B\")) ";
+ else
+ Result += "__declspec(allocate(\".cls_meth$B\")) ";
+ }
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Build name of symbol holding ivar offset.
+ std::string IvarOffsetName;
+ WriteInternalIvarName(clsDeclared, D, IvarOffsetName);
+
+ ReferencedIvars[clsDeclared].insert(D);
+
+ // cast offset to "char *".
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->CharTy),
+ CK_BitCast,
+ BaseExpr);
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(IvarOffsetName),
+ Context->UnsignedLongTy, 0, SC_Extern, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false,
+ Context->UnsignedLongTy, VK_LValue,
+ SourceLocation());
+ BinaryOperator *addExpr =
+ new (Context) BinaryOperator(castExpr, DRE, BO_Add,
+ Context->getPointerType(Context->CharTy),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
+ SourceLocation(),
+ addExpr);
+ QualType IvarT = D->getType();
+ convertObjCTypeToCStyleType(IvarT);
+ QualType castT = Context->getPointerType(IvarT);
+
+ castExpr = NoTypeInfoCStyleCastExpr(Context,
+ castT,
+ CK_BitCast,
+ PE);
+ Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
+ VK_LValue, OK_Ordinary,
+ SourceLocation());
+ PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ Exp);
+
+ Replacement = PE;
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
index 6a392ea..9c0737f 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
@@ -32,6 +32,8 @@ using llvm::utostr;
namespace {
class RewriteObjC : public ASTConsumer {
+ protected:
+
enum {
BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
block, ... */
@@ -53,35 +55,39 @@ namespace {
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_HAS_DESCRIPTOR = (1 << 29)
};
+ static const int OBJC_ABI_VERSION = 7;
Rewriter Rewrite;
DiagnosticsEngine &Diags;
const LangOptions &LangOpts;
- unsigned RewriteFailedDiag;
- unsigned TryFinallyContainsReturnDiag;
-
ASTContext *Context;
SourceManager *SM;
TranslationUnitDecl *TUDecl;
FileID MainFileID;
const char *MainFileStart, *MainFileEnd;
- SourceLocation LastIncLoc;
-
- SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
- SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
- llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
- llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
- llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
- llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
- SmallVector<Stmt *, 32> Stmts;
- SmallVector<int, 8> ObjCBcLabelNo;
- // Remember all the @protocol(<expr>) expressions.
- llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
-
- llvm::DenseSet<uint64_t> CopyDestroyCache;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ unsigned RewriteFailedDiag;
+ // ObjC string constant support.
unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
FunctionDecl *MsgSendFunctionDecl;
FunctionDecl *MsgSendSuperFunctionDecl;
FunctionDecl *MsgSendStretFunctionDecl;
@@ -93,39 +99,29 @@ namespace {
FunctionDecl *SelGetUidFunctionDecl;
FunctionDecl *CFStringFunctionDecl;
FunctionDecl *SuperContructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+ FunctionDecl *CurFunctionDeclToDeclareForBlock;
- // ObjC string constant support.
- VarDecl *ConstantStringClassReference;
- RecordDecl *NSStringRecord;
-
- // ObjC foreach break/continue generation support.
- int BcLabelCount;
-
- // Needed for super.
- ObjCMethodDecl *CurMethodDef;
- RecordDecl *SuperStructDecl;
- RecordDecl *ConstantStringDecl;
-
- TypeDecl *ProtocolTypeDecl;
- QualType getProtocolType();
-
- // Needed for header files being rewritten
- bool IsHeader;
-
- std::string InFileName;
- raw_ostream* OutFile;
-
- bool SilenceRewriteMacroWarning;
- bool objc_impl_method;
-
- std::string Preamble;
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
// Block expressions.
SmallVector<BlockExpr *, 32> Blocks;
SmallVector<int, 32> InnerDeclRefsCount;
- SmallVector<BlockDeclRefExpr *, 32> InnerDeclRefs;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
- SmallVector<BlockDeclRefExpr *, 32> BlockDeclRefs;
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
// Block related declarations.
SmallVector<ValueDecl *, 8> BlockByCopyDecls;
@@ -138,36 +134,54 @@ namespace {
llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
- // This maps a property to it's assignment statement.
- llvm::DenseMap<Expr *, BinaryOperator *> PropSetters;
- // This maps a property to it's synthesied message expression.
- // This allows us to rewrite chained getters (e.g. o.a.b.c).
- llvm::DenseMap<Expr *, Stmt *> PropGetters;
-
// This maps an original source AST to it's rewritten form. This allows
// us to avoid rewriting the same node twice (which is very uncommon).
// This is needed to support some of the exotic property rewriting.
llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
- FunctionDecl *CurFunctionDef;
- FunctionDecl *CurFunctionDeclToDeclareForBlock;
- VarDecl *GlobalVarDecl;
-
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool objc_impl_method;
+
bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
- static const int OBJC_ABI_VERSION = 7;
public:
- virtual void Initialize(ASTContext &context);
// Top Level Driver code.
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
- if (isa<ObjCClassDecl>((*I))) {
- RewriteForwardClassDecl(D);
- break;
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
}
+
HandleTopLevelSingleDecl(*I);
}
+ return true;
}
void HandleTopLevelSingleDecl(Decl *D);
void HandleDeclInMainFile(Decl *D);
@@ -186,7 +200,7 @@ namespace {
return; // We can't rewrite the same node twice.
if (DisableReplaceStmt)
- return; // Used when rewriting the assignment of a property setter.
+ return;
// If replacement succeeded or warning disabled return with no warning.
if (!Rewrite.ReplaceStmt(Old, New)) {
@@ -200,6 +214,9 @@ namespace {
}
void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ if (DisableReplaceStmt)
+ return;
+
// Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
@@ -245,12 +262,13 @@ namespace {
}
// Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
void RewriteInclude();
void RewriteForwardClassDecl(DeclGroupRef D);
void RewriteForwardClassDecl(const llvm::SmallVector<Decl*, 8> &DG);
- void RewriteForwardClassEpilogue(ObjCClassDecl *ClassDecl,
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString);
-
+ void RewriteImplementations();
void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID);
@@ -264,7 +282,8 @@ namespace {
ValueDecl *VD, bool def=false);
void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
- void RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG);
void RewriteMethodDeclaration(ObjCMethodDecl *Method);
void RewriteProperty(ObjCPropertyDecl *prop);
void RewriteFunctionDecl(FunctionDecl *FD);
@@ -274,32 +293,16 @@ namespace {
void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
void RewriteTypeOfDecl(VarDecl *VD);
void RewriteObjCQualifiedInterfaceTypes(Expr *E);
- bool needToScanForQualifiers(QualType T);
- QualType getSuperStructType();
- QualType getConstantStringStructType();
- QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
- bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
-
+
// Expression Rewriting.
Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
- void CollectPropertySetters(Stmt *S);
-
- Stmt *CurrentBody;
- ParentMap *PropParentMap; // created lazily.
-
Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
- Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV, SourceLocation OrigStart,
- bool &replaced);
- Stmt *RewriteObjCNestedIvarRefExpr(Stmt *S, bool &replaced);
- Stmt *RewritePropertyOrImplicitGetter(Expr *PropOrGetterRefExpr);
- Stmt *RewritePropertyOrImplicitSetter(BinaryOperator *BinOp, Expr *newStmt,
- SourceRange SrcRange);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
- void WarnAboutReturnGotoStmts(Stmt *S);
- void HasReturnStmts(Stmt *S, bool &hasReturns);
void RewriteTryReturnStmts(Stmt *S);
void RewriteSyncReturnStmts(Stmt *S, std::string buf);
Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
@@ -307,18 +310,57 @@ namespace {
Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd);
- bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ virtual void Initialize(ASTContext &context) = 0;
+
+ // Metadata Rewriting.
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
+ virtual void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result) = 0;
+ virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) = 0;
+
+ // Rewriting ivar access
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0;
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) = 0;
+
+ // Misc. AST transformation routines. Somtimes they end up calling
+ // rewriting routines on the new ASTs.
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
- Expr **args, unsigned nargs,
- SourceLocation StartLoc=SourceLocation(),
- SourceLocation EndLoc=SourceLocation());
+ Expr **args, unsigned nargs,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation StartLoc=SourceLocation(),
SourceLocation EndLoc=SourceLocation());
- Stmt *RewriteBreakStmt(BreakStmt *S);
- Stmt *RewriteContinueStmt(ContinueStmt *S);
+
void SynthCountByEnumWithState(std::string &buf);
-
void SynthMsgSendFunctionDecl();
void SynthMsgSendSuperFunctionDecl();
void SynthMsgSendStretFunctionDecl();
@@ -329,52 +371,8 @@ namespace {
void SynthGetSuperClassFunctionDecl();
void SynthSelGetUidFunctionDecl();
void SynthSuperContructorFunctionDecl();
-
- // Metadata emission.
- void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
- std::string &Result);
-
- void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
- std::string &Result);
-
- template<typename MethodIterator>
- void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
- MethodIterator MethodEnd,
- bool IsInstanceMethod,
- StringRef prefix,
- StringRef ClassName,
- std::string &Result);
-
- void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
- StringRef prefix,
- StringRef ClassName,
- std::string &Result);
- void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
- StringRef prefix,
- StringRef ClassName,
- std::string &Result);
- void SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
- std::string &Result);
- void SynthesizeIvarOffsetComputation(ObjCIvarDecl *ivar,
- std::string &Result);
- void RewriteImplementations();
- void SynthesizeMetaDataIntoBuffer(std::string &Result);
-
- // Block rewriting.
- void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
- void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
-
- void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
- void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
-
- // Block specific rewrite rules.
- void RewriteBlockPointerDecl(NamedDecl *VD);
- void RewriteByRefVar(VarDecl *VD);
+
std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
- Stmt *RewriteBlockDeclRefExpr(Expr *VD);
- Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
- void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
-
std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
StringRef funcName, std::string Tag);
std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
@@ -388,12 +386,23 @@ namespace {
Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
void SynthesizeBlockLiterals(SourceLocation FunLocStart,
StringRef FunName);
- void RewriteRecordBody(RecordDecl *RD);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs);
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void HasReturnStmts(Stmt *S, bool &hasReturns);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
void CollectBlockDeclRefInfo(BlockExpr *Exp);
void GetBlockDeclRefExprs(Stmt *S);
void GetInnerBlockDeclRefExprs(Stmt *S,
- SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
@@ -414,6 +423,12 @@ namespace {
return false;
}
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
void convertToUnqualifiedObjCType(QualType &T) {
if (T->isObjCQualifiedIdType())
T = Context->getObjCIdType();
@@ -452,12 +467,7 @@ namespace {
bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
void GetExtentOfArgList(const char *Name, const char *&LParen,
const char *&RParen);
- void RewriteCastExpr(CStyleCastExpr *CE);
-
- FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
- Stmt *SynthBlockInitExpr(BlockExpr *Exp,
- const SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs);
-
+
void QuoteDoublequotes(std::string &From, std::string &To) {
for (unsigned i = 0; i < From.length(); i++) {
if (From[i] == '"')
@@ -477,15 +487,54 @@ namespace {
fpi.Variadic = variadic;
return Context->getFunctionType(result, args, numArgs, fpi);
}
- };
- // Helper function: create a CStyleCastExpr with trivial type source info.
- CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
- CastKind Kind, Expr *E) {
- TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
- return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
- SourceLocation(), SourceLocation());
- }
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
+ SourceLocation(), SourceLocation());
+ }
+ };
+
+ class RewriteObjCFragileABI : public RewriteObjC {
+ public:
+
+ RewriteObjCFragileABI(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn) : RewriteObjC(inFile, OS,
+ D, LOpts,
+ silenceMacroWarn) {}
+
+ ~RewriteObjCFragileABI() {}
+ virtual void Initialize(ASTContext &context);
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ virtual void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result);
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result);
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+
+ // Rewriting ivar
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result);
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+ };
}
void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
@@ -541,10 +590,10 @@ ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile,
DiagnosticsEngine &Diags,
const LangOptions &LOpts,
bool SilenceRewriteMacroWarning) {
- return new RewriteObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+ return new RewriteObjCFragileABI(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
}
-void RewriteObjC::Initialize(ASTContext &context) {
+void RewriteObjC::InitializeCommon(ASTContext &context) {
Context = &context;
SM = &Context->getSourceManager();
TUDecl = Context->getTranslationUnitDecl();
@@ -581,121 +630,9 @@ void RewriteObjC::Initialize(ASTContext &context) {
MainFileStart = MainBuf->getBufferStart();
MainFileEnd = MainBuf->getBufferEnd();
- Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOptions());
-
- // declaring objc_selector outside the parameter list removes a silly
- // scope related warning...
- if (IsHeader)
- Preamble = "#pragma once\n";
- Preamble += "struct objc_selector; struct objc_class;\n";
- Preamble += "struct __rw_objc_super { struct objc_object *object; ";
- Preamble += "struct objc_object *superClass; ";
- if (LangOpts.MicrosoftExt) {
- // Add a constructor for creating temporary objects.
- Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
- ": ";
- Preamble += "object(o), superClass(s) {} ";
- }
- Preamble += "};\n";
- Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
- Preamble += "typedef struct objc_object Protocol;\n";
- Preamble += "#define _REWRITER_typedef_Protocol\n";
- Preamble += "#endif\n";
- if (LangOpts.MicrosoftExt) {
- Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
- Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
- } else
- Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
- Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
- Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret";
- Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret";
- Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
- Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
- Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
- Preamble += "(const char *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
- Preamble += "(struct objc_class *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
- Preamble += "(const char *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
- Preamble += "(struct objc_class *, struct objc_object *);\n";
- // @synchronized hooks.
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n";
- Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
- Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
- Preamble += "struct __objcFastEnumerationState {\n\t";
- Preamble += "unsigned long state;\n\t";
- Preamble += "void **itemsPtr;\n\t";
- Preamble += "unsigned long *mutationsPtr;\n\t";
- Preamble += "unsigned long extra[5];\n};\n";
- Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
- Preamble += "#define __FASTENUMERATIONSTATE\n";
- Preamble += "#endif\n";
- Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
- Preamble += "struct __NSConstantStringImpl {\n";
- Preamble += " int *isa;\n";
- Preamble += " int flags;\n";
- Preamble += " char *str;\n";
- Preamble += " long length;\n";
- Preamble += "};\n";
- Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
- Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
- Preamble += "#else\n";
- Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
- Preamble += "#endif\n";
- Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
- Preamble += "#endif\n";
- // Blocks preamble.
- Preamble += "#ifndef BLOCK_IMPL\n";
- Preamble += "#define BLOCK_IMPL\n";
- Preamble += "struct __block_impl {\n";
- Preamble += " void *isa;\n";
- Preamble += " int Flags;\n";
- Preamble += " int Reserved;\n";
- Preamble += " void *FuncPtr;\n";
- Preamble += "};\n";
- Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
- Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
- Preamble += "extern \"C\" __declspec(dllexport) "
- "void _Block_object_assign(void *, const void *, const int);\n";
- Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
- Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
- Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
- Preamble += "#else\n";
- Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
- Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
- Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
- Preamble += "#endif\n";
- Preamble += "#endif\n";
- if (LangOpts.MicrosoftExt) {
- Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
- Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
- Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
- Preamble += "#define __attribute__(X)\n";
- Preamble += "#endif\n";
- Preamble += "#define __weak\n";
- }
- else {
- Preamble += "#define __block\n";
- Preamble += "#define __weak\n";
- }
- // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
- // as this avoids warning in any 64bit/32bit compilation model.
- Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
}
-
//===----------------------------------------------------------------------===//
// Top Level Driver Code
//===----------------------------------------------------------------------===//
@@ -722,33 +659,57 @@ void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
ConstantStringClassReference = FVD;
return;
}
- } else if (ObjCInterfaceDecl *MD = dyn_cast<ObjCInterfaceDecl>(D)) {
- RewriteInterfaceDecl(MD);
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (ID->isThisDeclarationADefinition())
+ RewriteInterfaceDecl(ID);
} else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
RewriteCategoryDecl(CD);
} else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
- RewriteProtocolDecl(PD);
- } else if (ObjCForwardProtocolDecl *FP =
- dyn_cast<ObjCForwardProtocolDecl>(D)){
- RewriteForwardProtocolDecl(FP);
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
} else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
// Recurse into linkage specifications
for (DeclContext::decl_iterator DI = LSD->decls_begin(),
DIEnd = LSD->decls_end();
DI != DIEnd; ) {
- if (isa<ObjCClassDecl>((*DI))) {
- SmallVector<Decl *, 8> DG;
- Decl *D = (*DI);
- SourceLocation Loc = D->getLocation();
- while (DI != DIEnd &&
- isa<ObjCClassDecl>(D) && D->getLocation() == Loc) {
- DG.push_back(D);
- ++DI;
- D = (*DI);
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
}
- RewriteForwardClassDecl(DG);
- continue;
}
+
HandleTopLevelSingleDecl(*DI);
++DI;
}
@@ -868,7 +829,7 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Getr += ";\n";
Getr += "return (_TYPE)";
Getr += "objc_getProperty(self, _cmd, ";
- SynthesizeIvarOffsetComputation(OID, Getr);
+ RewriteIvarOffsetComputation(OID, Getr);
Getr += ", 1)";
}
else
@@ -898,7 +859,7 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// See objc-act.c:objc_synthesize_new_setter() for details.
if (GenSetProperty) {
Setr += "objc_setProperty (self, _cmd, ";
- SynthesizeIvarOffsetComputation(OID, Setr);
+ RewriteIvarOffsetComputation(OID, Setr);
Setr += ", (id)";
Setr += PD->getName();
Setr += ", ";
@@ -932,9 +893,9 @@ static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
typedefString += ";\n#endif\n";
}
-void RewriteObjC::RewriteForwardClassEpilogue(ObjCClassDecl *ClassDecl,
+void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString) {
- SourceLocation startLoc = ClassDecl->getLocation();
+ SourceLocation startLoc = ClassDecl->getLocStart();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiPtr = strchr(startBuf, ';');
// Replace the @class with typedefs corresponding to the classes.
@@ -944,8 +905,7 @@ void RewriteObjC::RewriteForwardClassEpilogue(ObjCClassDecl *ClassDecl,
void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
std::string typedefString;
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
- ObjCClassDecl *ClassDecl = cast<ObjCClassDecl>(*I);
- ObjCInterfaceDecl *ForwardDecl = ClassDecl->getForwardInterfaceDecl();
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
if (I == D.begin()) {
// Translate to typedef's that forward reference structs with the same name
// as the class. As a convenience, we include the original declaration
@@ -957,15 +917,14 @@ void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
DeclGroupRef::iterator I = D.begin();
- RewriteForwardClassEpilogue(cast<ObjCClassDecl>(*I), typedefString);
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
}
void RewriteObjC::RewriteForwardClassDecl(
const llvm::SmallVector<Decl*, 8> &D) {
std::string typedefString;
for (unsigned i = 0; i < D.size(); i++) {
- ObjCClassDecl *ClassDecl = cast<ObjCClassDecl>(D[i]);
- ObjCInterfaceDecl *ForwardDecl = ClassDecl->getForwardInterfaceDecl();
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
if (i == 0) {
typedefString += "// @class ";
typedefString += ForwardDecl->getNameAsString();
@@ -973,7 +932,7 @@ void RewriteObjC::RewriteForwardClassDecl(
}
RewriteOneForwardClassDecl(ForwardDecl, typedefString);
}
- RewriteForwardClassEpilogue(cast<ObjCClassDecl>(D[0]), typedefString);
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
}
void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
@@ -1026,7 +985,8 @@ void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
SourceLocation LocStart = PDecl->getLocStart();
-
+ assert(PDecl->isThisDeclarationADefinition());
+
// FIXME: handle protocol headers that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
@@ -1064,8 +1024,17 @@ void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
}
}
-void RewriteObjC::RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *PDecl) {
- SourceLocation LocStart = PDecl->getLocation();
+void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteObjC::RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
@@ -1168,10 +1137,8 @@ void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
std::string Name = PDecl->getNameAsString();
QualType QT = PDecl->getType();
// Make sure we convert "t (^)(...)" to "t (*)(...)".
- if (convertBlockPointerToFunctionPointer(QT))
- QT.getAsStringInternal(Name, Context->getPrintingPolicy());
- else
- PDecl->getType().getAsStringInternal(Name, Context->getPrintingPolicy());
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
ResultStr += Name;
}
}
@@ -1248,7 +1215,7 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
std::string ResultStr;
- if (!ObjCForwardDecls.count(ClassDecl)) {
+ if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) {
// we haven't seen a forward decl - generate a typedef.
ResultStr = "#ifndef _REWRITER_typedef_";
ResultStr += ClassDecl->getNameAsString();
@@ -1260,9 +1227,9 @@ void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
ResultStr += ClassDecl->getNameAsString();
ResultStr += ";\n#endif\n";
// Mark this typedef as having been generated.
- ObjCForwardDecls.insert(ClassDecl);
+ ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl());
}
- SynthesizeObjCInternalStruct(ClassDecl, ResultStr);
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
E = ClassDecl->prop_end(); I != E; ++I)
@@ -1281,287 +1248,163 @@ void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
"/* @end */");
}
-Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(BinaryOperator *BinOp, Expr *newStmt,
- SourceRange SrcRange) {
- ObjCMethodDecl *OMD = 0;
- QualType Ty;
- Selector Sel;
- Stmt *Receiver = 0;
- bool Super = false;
- QualType SuperTy;
- SourceLocation SuperLocation;
- SourceLocation SelectorLoc;
- // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr or ObjCImplicitSetterGetterRefExpr.
- // This allows us to reuse all the fun and games in SynthMessageExpr().
- if (ObjCPropertyRefExpr *PropRefExpr =
- dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS())) {
- SelectorLoc = PropRefExpr->getLocation();
- if (PropRefExpr->isExplicitProperty()) {
- ObjCPropertyDecl *PDecl = PropRefExpr->getExplicitProperty();
- OMD = PDecl->getSetterMethodDecl();
- Ty = PDecl->getType();
- Sel = PDecl->getSetterName();
- } else {
- OMD = PropRefExpr->getImplicitPropertySetter();
- Sel = OMD->getSelector();
- Ty = PropRefExpr->getType();
- }
- Super = PropRefExpr->isSuperReceiver();
- if (!Super) {
- Receiver = PropRefExpr->getBase();
- } else {
- SuperTy = PropRefExpr->getSuperReceiverType();
- SuperLocation = PropRefExpr->getReceiverLocation();
- }
- }
-
- assert(OMD && "RewritePropertyOrImplicitSetter - null OMD");
-
- ObjCMessageExpr *MsgExpr;
- if (Super)
- MsgExpr = ObjCMessageExpr::Create(*Context,
- Ty.getNonReferenceType(),
- Expr::getValueKindForType(Ty),
- /*FIXME?*/SourceLocation(),
- SuperLocation,
- /*IsInstanceSuper=*/true,
- SuperTy,
- Sel, SelectorLoc, OMD,
- newStmt,
- /*FIXME:*/SourceLocation());
- else {
- // FIXME. Refactor this into common code with that in
- // RewritePropertyOrImplicitGetter
- assert(Receiver && "RewritePropertyOrImplicitSetter - null Receiver");
- if (Expr *Exp = dyn_cast<Expr>(Receiver))
- if (PropGetters[Exp])
- // This allows us to handle chain/nested property/implicit getters.
- Receiver = PropGetters[Exp];
-
- MsgExpr = ObjCMessageExpr::Create(*Context,
- Ty.getNonReferenceType(),
- Expr::getValueKindForType(Ty),
- /*FIXME: */SourceLocation(),
- cast<Expr>(Receiver),
- Sel, SelectorLoc, OMD,
- newStmt,
- /*FIXME:*/SourceLocation());
- }
- Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr);
+Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
- // Now do the actual rewrite.
- ReplaceStmtWithRange(BinOp, ReplacingStmt, SrcRange);
- //delete BinOp;
- // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
- // to things that stay around.
- Context->Deallocate(MsgExpr);
- return ReplacingStmt;
-}
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
-Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(Expr *PropOrGetterRefExpr) {
- // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr or ImplicitGetter.
- // This allows us to reuse all the fun and games in SynthMessageExpr().
- Stmt *Receiver = 0;
- ObjCMethodDecl *OMD = 0;
- QualType Ty;
- Selector Sel;
- bool Super = false;
- QualType SuperTy;
- SourceLocation SuperLocation;
- SourceLocation SelectorLoc;
- if (ObjCPropertyRefExpr *PropRefExpr =
- dyn_cast<ObjCPropertyRefExpr>(PropOrGetterRefExpr)) {
- SelectorLoc = PropRefExpr->getLocation();
- if (PropRefExpr->isExplicitProperty()) {
- ObjCPropertyDecl *PDecl = PropRefExpr->getExplicitProperty();
- OMD = PDecl->getGetterMethodDecl();
- Ty = PDecl->getType();
- Sel = PDecl->getGetterName();
- } else {
- OMD = PropRefExpr->getImplicitPropertyGetter();
- Sel = OMD->getSelector();
- Ty = PropRefExpr->getType();
- }
- Super = PropRefExpr->isSuperReceiver();
- if (!Super)
- Receiver = PropRefExpr->getBase();
- else {
- SuperTy = PropRefExpr->getSuperReceiverType();
- SuperLocation = PropRefExpr->getReceiverLocation();
- }
- }
-
- assert (OMD && "RewritePropertyOrImplicitGetter - OMD is null");
-
- ObjCMessageExpr *MsgExpr;
- if (Super)
- MsgExpr = ObjCMessageExpr::Create(*Context,
- Ty.getNonReferenceType(),
- Expr::getValueKindForType(Ty),
- PropOrGetterRefExpr->getLocStart(),
- SuperLocation,
- /*IsInstanceSuper=*/true,
- SuperTy,
- Sel, SelectorLoc, OMD,
- ArrayRef<Expr*>(),
- PropOrGetterRefExpr->getLocEnd());
- else {
- assert (Receiver && "RewritePropertyOrImplicitGetter - Receiver is null");
- if (Expr *Exp = dyn_cast<Expr>(Receiver))
- if (PropGetters[Exp])
- // This allows us to handle chain/nested property/implicit getters.
- Receiver = PropGetters[Exp];
- MsgExpr = ObjCMessageExpr::Create(*Context,
- Ty.getNonReferenceType(),
- Expr::getValueKindForType(Ty),
- PropOrGetterRefExpr->getLocStart(),
- cast<Expr>(Receiver),
- Sel, SelectorLoc, OMD,
- ArrayRef<Expr*>(),
- PropOrGetterRefExpr->getLocEnd());
- }
-
- Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr, MsgExpr->getLocStart(),
- MsgExpr->getLocEnd());
-
- if (!PropParentMap)
- PropParentMap = new ParentMap(CurrentBody);
- bool NestedPropertyRef = false;
- Stmt *Parent = PropParentMap->getParent(PropOrGetterRefExpr);
- ImplicitCastExpr*ICE=0;
- if (Parent)
- if ((ICE = dyn_cast<ImplicitCastExpr>(Parent))) {
- assert((ICE->getCastKind() == CK_GetObjCProperty)
- && "RewritePropertyOrImplicitGetter");
- Parent = PropParentMap->getParent(Parent);
- NestedPropertyRef = (Parent && isa<ObjCPropertyRefExpr>(Parent));
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base, *RHS;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = 0;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
- if (NestedPropertyRef) {
- // We stash away the ReplacingStmt since actually doing the
- // replacement/rewrite won't work for nested getters (e.g. obj.p.i)
- PropGetters[ICE] = ReplacingStmt;
- // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
- // to things that stay around.
- Context->Deallocate(MsgExpr);
- return PropOrGetterRefExpr; // return the original...
- } else {
- ReplaceStmt(PropOrGetterRefExpr, ReplacingStmt);
- // delete PropRefExpr; elsewhere...
- // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
- // to things that stay around.
- Context->Deallocate(MsgExpr);
- return ReplacingStmt;
+
+ // Rebuild the RHS.
+ RHS = cast<BinaryOperator>(PseudoOp->getSyntacticForm())->getRHS();
+ RHS = cast<OpaqueValueExpr>(RHS)->getSourceExpr();
+ RHS = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(RHS));
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
}
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
}
-Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
- SourceLocation OrigStart,
- bool &replaced) {
- ObjCIvarDecl *D = IV->getDecl();
- const Expr *BaseExpr = IV->getBase();
- if (CurMethodDef) {
- if (BaseExpr->getType()->isObjCObjectPointerType()) {
- const ObjCInterfaceType *iFaceDecl =
- dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
- assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
- // lookup which class implements the instance variable.
- ObjCInterfaceDecl *clsDeclared = 0;
- iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
- clsDeclared);
- assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
- // Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
- RecName += "_IMPL";
- IdentifierInfo *II = &Context->Idents.get(RecName);
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
- assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
- CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
- CK_BitCast,
- IV->getBase());
- // Don't forget the parens to enforce the proper binding.
- ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
- IV->getBase()->getLocEnd(),
- castExpr);
- replaced = true;
- if (IV->isFreeIvar() &&
- CurMethodDef->getClassInterface() == iFaceDecl->getDecl()) {
- MemberExpr *ME = new (Context) MemberExpr(PE, true, D,
- IV->getLocation(),
- D->getType(),
- VK_LValue, OK_Ordinary);
- // delete IV; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
- return ME;
- }
- // Get the new text
- // Cannot delete IV->getBase(), since PE points to it.
- // Replace the old base with the cast. This is important when doing
- // embedded rewrites. For example, [newInv->_container addObject:0].
- IV->setBase(PE);
- return IV;
- }
- } else { // we are outside a method.
- assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
- // Explicit ivar refs need to have a cast inserted.
- // FIXME: consider sharing some of this code with the code above.
- if (BaseExpr->getType()->isObjCObjectPointerType()) {
- const ObjCInterfaceType *iFaceDecl =
- dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
- // lookup which class implements the instance variable.
- ObjCInterfaceDecl *clsDeclared = 0;
- iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
- clsDeclared);
- assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = 0;
+ {
+ DisableReplaceStmtScope S(*this);
- // Synthesize an explicit cast to gain access to the ivar.
- std::string RecName = clsDeclared->getIdentifier()->getName();
- RecName += "_IMPL";
- IdentifierInfo *II = &Context->Idents.get(RecName);
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
- assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
- QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
- CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
- CK_BitCast,
- IV->getBase());
- // Don't forget the parens to enforce the proper binding.
- ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
- IV->getBase()->getLocEnd(), castExpr);
- replaced = true;
- // Cannot delete IV->getBase(), since PE points to it.
- // Replace the old base with the cast. This is important when doing
- // embedded rewrites. For example, [newInv->_container addObject:0].
- IV->setBase(PE);
- return IV;
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
}
}
- return IV;
-}
-Stmt *RewriteObjC::RewriteObjCNestedIvarRefExpr(Stmt *S, bool &replaced) {
- for (Stmt::child_range CI = S->children(); CI; ++CI) {
- if (*CI) {
- Stmt *newStmt = RewriteObjCNestedIvarRefExpr(*CI, replaced);
- if (newStmt)
- *CI = newStmt;
- }
- }
- if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
- SourceRange OrigStmtRange = S->getSourceRange();
- Stmt *newStmt = RewriteObjCIvarRefExpr(IvarRefExpr, OrigStmtRange.getBegin(),
- replaced);
- return newStmt;
- }
- if (ObjCMessageExpr *MsgRefExpr = dyn_cast<ObjCMessageExpr>(S)) {
- Stmt *newStmt = SynthMessageExpr(MsgRefExpr);
- return newStmt;
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+ SmallVector<Expr*, 1> Args;
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
}
- return S;
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
}
/// SynthCountByEnumWithState - To print:
@@ -1571,7 +1414,7 @@ Stmt *RewriteObjC::RewriteObjCNestedIvarRefExpr(Stmt *S, bool &replaced) {
/// sel_registerName(
/// "countByEnumeratingWithState:objects:count:"),
/// &enumState,
-/// (id *)items, (unsigned int)16)
+/// (id *)__rw_items, (unsigned int)16)
///
void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
@@ -1581,7 +1424,7 @@ void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
buf += "\n\t\t";
buf += "&enumState, "
- "(id *)items, (unsigned int)16)";
+ "(id *)__rw_items, (unsigned int)16)";
}
/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
@@ -1626,10 +1469,10 @@ Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
/// {
/// type elem;
/// struct __objcFastEnumerationState enumState = { 0 };
-/// id items[16];
+/// id __rw_items[16];
/// id l_collection = (id)collection;
/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
-/// objects:items count:16];
+/// objects:__rw_items count:16];
/// if (limit) {
/// unsigned long startMutations = *enumState.mutationsPtr;
/// do {
@@ -1642,7 +1485,7 @@ Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
/// __continue_label: ;
/// } while (counter < limit);
/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
-/// objects:items count:16]);
+/// objects:__rw_items count:16]);
/// elem = nil;
/// __break_label: ;
/// }
@@ -1694,8 +1537,8 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
// struct __objcFastEnumerationState enumState = { 0 };
buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
- // id items[16];
- buf += "id items[16];\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
// id l_collection = (id)
buf += "id l_collection = (id)";
// Find start location of 'collection' the hard way!
@@ -1720,7 +1563,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
buf = ";\n\t";
// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
- // objects:items count:16];
+ // objects:__rw_items count:16];
// which is synthesized into:
// unsigned int limit =
// ((unsigned int (*)
@@ -1729,7 +1572,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
// sel_registerName(
// "countByEnumeratingWithState:objects:count:"),
// (struct __objcFastEnumerationState *)&state,
- // (id *)items, (unsigned int)16);
+ // (id *)__rw_items, (unsigned int)16);
buf += "unsigned long limit =\n\t\t";
SynthCountByEnumWithState(buf);
buf += ";\n\t";
@@ -1758,7 +1601,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
/// __continue_label: ;
/// } while (counter < limit);
/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
- /// objects:items count:16]);
+ /// objects:__rw_items count:16]);
/// elem = nil;
/// __break_label: ;
/// }
@@ -2200,8 +2043,8 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE =
- new (Context) DeclRefExpr(FD, msgSendType, VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType,
+ VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
@@ -2667,7 +2510,7 @@ void RewriteObjC::SynthGetSuperClassFunctionDecl() {
false);
}
-// SynthGetMetaClassFunctionDecl - id objc_getClass(const char *name);
+// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
void RewriteObjC::SynthGetMetaClassFunctionDecl() {
IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
SmallVector<QualType, 16> ArgTys;
@@ -2714,7 +2557,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(S),
strType, 0, SC_Static, SC_None);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, strType, VK_LValue,
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
SourceLocation());
Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
@@ -2842,6 +2685,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
Context->getObjCIdType(),
VK_RValue,
SourceLocation()))
@@ -2862,7 +2706,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// (Class)objc_getClass("CurrentClass")
CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
Context->getObjCClassType(),
- CK_CPointerToObjCPointerCast, Cls);
+ CK_BitCast, Cls);
ClsExprs.clear();
ClsExprs.push_back(ArgExpr);
Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
@@ -2883,7 +2727,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SynthSuperContructorFunctionDecl();
// Simulate a contructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
- superType, VK_LValue,
+ false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
InitExprs.size(),
@@ -2953,6 +2797,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
Context->getObjCIdType(),
VK_RValue, SourceLocation()))
); // set the 'receiver'.
@@ -2992,7 +2837,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SynthSuperContructorFunctionDecl();
// Simulate a contructor call...
DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
- superType, VK_LValue,
+ false, superType, VK_LValue,
SourceLocation());
SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
InitExprs.size(),
@@ -3144,7 +2989,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, msgSendType,
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
@@ -3180,7 +3025,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// method's return type.
// Create a reference to the objc_msgSend_stret() declaration.
- DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor, msgSendType,
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
VK_LValue, SourceLocation());
// Need to cast objc_msgSend_stret to "void *" (see above comment).
cast = NoTypeInfoCStyleCastExpr(Context,
@@ -3268,8 +3114,8 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, getProtocolType(), 0,
SC_Extern, SC_None);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, getProtocolType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary, SourceLocation());
@@ -3277,7 +3123,7 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
CK_BitCast,
DerefExpr);
ReplaceStmt(Exp, castExpr);
- ProtocolExprDecls.insert(Exp->getProtocol());
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
// delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
return castExpr;
@@ -3309,9 +3155,9 @@ bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf,
return false;
}
-/// SynthesizeObjCInternalStruct - Rewrite one internal struct corresponding to
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
/// an objective-c class with ivars.
-void RewriteObjC::SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result) {
assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
assert(CDecl->getName() != "" &&
@@ -3322,14 +3168,14 @@ void RewriteObjC::SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
int NumIvars = CDecl->ivar_size();
SourceLocation LocStart = CDecl->getLocStart();
- SourceLocation LocEnd = CDecl->getLocEnd();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
// If no ivars and no root or if its root, directly or indirectly,
// have no ivars (thus not synthesized) then no need to synthesize this class.
- if ((CDecl->isForwardDecl() || NumIvars == 0) &&
+ if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) &&
(!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
ReplaceText(LocStart, endBuf-startBuf, Result);
@@ -3441,667 +3287,10 @@ void RewriteObjC::SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct");
}
-// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
-/// class methods.
-template<typename MethodIterator>
-void RewriteObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
- MethodIterator MethodEnd,
- bool IsInstanceMethod,
- StringRef prefix,
- StringRef ClassName,
- std::string &Result) {
- if (MethodBegin == MethodEnd) return;
-
- if (!objc_impl_method) {
- /* struct _objc_method {
- SEL _cmd;
- char *method_types;
- void *_imp;
- }
- */
- Result += "\nstruct _objc_method {\n";
- Result += "\tSEL _cmd;\n";
- Result += "\tchar *method_types;\n";
- Result += "\tvoid *_imp;\n";
- Result += "};\n";
-
- objc_impl_method = true;
- }
-
- // Build _objc_method_list for class's methods if needed
-
- /* struct {
- struct _objc_method_list *next_method;
- int method_count;
- struct _objc_method method_list[];
- }
- */
- unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
- Result += "\nstatic struct {\n";
- Result += "\tstruct _objc_method_list *next_method;\n";
- Result += "\tint method_count;\n";
- Result += "\tstruct _objc_method method_list[";
- Result += utostr(NumMethods);
- Result += "];\n} _OBJC_";
- Result += prefix;
- Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
- Result += "_METHODS_";
- Result += ClassName;
- Result += " __attribute__ ((used, section (\"__OBJC, __";
- Result += IsInstanceMethod ? "inst" : "cls";
- Result += "_meth\")))= ";
- Result += "{\n\t0, " + utostr(NumMethods) + "\n";
-
- Result += "\t,{{(SEL)\"";
- Result += (*MethodBegin)->getSelector().getAsString().c_str();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
- Result += "\", \"";
- Result += MethodTypeString;
- Result += "\", (void *)";
- Result += MethodInternalNames[*MethodBegin];
- Result += "}\n";
- for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
- Result += "\t ,{(SEL)\"";
- Result += (*MethodBegin)->getSelector().getAsString().c_str();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
- Result += "\", \"";
- Result += MethodTypeString;
- Result += "\", (void *)";
- Result += MethodInternalNames[*MethodBegin];
- Result += "}\n";
- }
- Result += "\t }\n};\n";
-}
-
-/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
-void RewriteObjC::
-RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl, StringRef prefix,
- StringRef ClassName, std::string &Result) {
- static bool objc_protocol_methods = false;
-
- // Output struct protocol_methods holder of method selector and type.
- if (!objc_protocol_methods && !PDecl->isForwardDecl()) {
- /* struct protocol_methods {
- SEL _cmd;
- char *method_types;
- }
- */
- Result += "\nstruct _protocol_methods {\n";
- Result += "\tstruct objc_selector *_cmd;\n";
- Result += "\tchar *method_types;\n";
- Result += "};\n";
-
- objc_protocol_methods = true;
- }
- // Do not synthesize the protocol more than once.
- if (ObjCSynthesizedProtocols.count(PDecl))
- return;
-
- if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
- unsigned NumMethods = std::distance(PDecl->instmeth_begin(),
- PDecl->instmeth_end());
- /* struct _objc_protocol_method_list {
- int protocol_method_count;
- struct protocol_methods protocols[];
- }
- */
- Result += "\nstatic struct {\n";
- Result += "\tint protocol_method_count;\n";
- Result += "\tstruct _protocol_methods protocol_methods[";
- Result += utostr(NumMethods);
- Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
- Result += PDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
- "{\n\t" + utostr(NumMethods) + "\n";
-
- // Output instance methods declared in this protocol.
- for (ObjCProtocolDecl::instmeth_iterator
- I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
- I != E; ++I) {
- if (I == PDecl->instmeth_begin())
- Result += "\t ,{{(struct objc_selector *)\"";
- else
- Result += "\t ,{(struct objc_selector *)\"";
- Result += (*I)->getSelector().getAsString();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
- Result += "\", \"";
- Result += MethodTypeString;
- Result += "\"}\n";
- }
- Result += "\t }\n};\n";
- }
-
- // Output class methods declared in this protocol.
- unsigned NumMethods = std::distance(PDecl->classmeth_begin(),
- PDecl->classmeth_end());
- if (NumMethods > 0) {
- /* struct _objc_protocol_method_list {
- int protocol_method_count;
- struct protocol_methods protocols[];
- }
- */
- Result += "\nstatic struct {\n";
- Result += "\tint protocol_method_count;\n";
- Result += "\tstruct _protocol_methods protocol_methods[";
- Result += utostr(NumMethods);
- Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
- Result += PDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
- "{\n\t";
- Result += utostr(NumMethods);
- Result += "\n";
-
- // Output instance methods declared in this protocol.
- for (ObjCProtocolDecl::classmeth_iterator
- I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
- I != E; ++I) {
- if (I == PDecl->classmeth_begin())
- Result += "\t ,{{(struct objc_selector *)\"";
- else
- Result += "\t ,{(struct objc_selector *)\"";
- Result += (*I)->getSelector().getAsString();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
- Result += "\", \"";
- Result += MethodTypeString;
- Result += "\"}\n";
- }
- Result += "\t }\n};\n";
- }
-
- // Output:
- /* struct _objc_protocol {
- // Objective-C 1.0 extensions
- struct _objc_protocol_extension *isa;
- char *protocol_name;
- struct _objc_protocol **protocol_list;
- struct _objc_protocol_method_list *instance_methods;
- struct _objc_protocol_method_list *class_methods;
- };
- */
- static bool objc_protocol = false;
- if (!objc_protocol) {
- Result += "\nstruct _objc_protocol {\n";
- Result += "\tstruct _objc_protocol_extension *isa;\n";
- Result += "\tchar *protocol_name;\n";
- Result += "\tstruct _objc_protocol **protocol_list;\n";
- Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
- Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
- Result += "};\n";
-
- objc_protocol = true;
- }
-
- Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
- Result += PDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
- "{\n\t0, \"";
- Result += PDecl->getNameAsString();
- Result += "\", 0, ";
- if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
- Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
- Result += PDecl->getNameAsString();
- Result += ", ";
- }
- else
- Result += "0, ";
- if (PDecl->classmeth_begin() != PDecl->classmeth_end()) {
- Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
- Result += PDecl->getNameAsString();
- Result += "\n";
- }
- else
- Result += "0\n";
- Result += "};\n";
-
- // Mark this protocol as having been generated.
- if (!ObjCSynthesizedProtocols.insert(PDecl))
- llvm_unreachable("protocol already synthesized");
-
-}
-
-void RewriteObjC::
-RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Protocols,
- StringRef prefix, StringRef ClassName,
- std::string &Result) {
- if (Protocols.empty()) return;
-
- for (unsigned i = 0; i != Protocols.size(); i++)
- RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
-
- // Output the top lovel protocol meta-data for the class.
- /* struct _objc_protocol_list {
- struct _objc_protocol_list *next;
- int protocol_count;
- struct _objc_protocol *class_protocols[];
- }
- */
- Result += "\nstatic struct {\n";
- Result += "\tstruct _objc_protocol_list *next;\n";
- Result += "\tint protocol_count;\n";
- Result += "\tstruct _objc_protocol *class_protocols[";
- Result += utostr(Protocols.size());
- Result += "];\n} _OBJC_";
- Result += prefix;
- Result += "_PROTOCOLS_";
- Result += ClassName;
- Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
- "{\n\t0, ";
- Result += utostr(Protocols.size());
- Result += "\n";
-
- Result += "\t,{&_OBJC_PROTOCOL_";
- Result += Protocols[0]->getNameAsString();
- Result += " \n";
-
- for (unsigned i = 1; i != Protocols.size(); i++) {
- Result += "\t ,&_OBJC_PROTOCOL_";
- Result += Protocols[i]->getNameAsString();
- Result += "\n";
- }
- Result += "\t }\n};\n";
-}
-
-
-/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
-/// implementation.
-void RewriteObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
- std::string &Result) {
- ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
- // Find category declaration for this implementation.
- ObjCCategoryDecl *CDecl;
- for (CDecl = ClassDecl->getCategoryList(); CDecl;
- CDecl = CDecl->getNextClassCategory())
- if (CDecl->getIdentifier() == IDecl->getIdentifier())
- break;
-
- std::string FullCategoryName = ClassDecl->getNameAsString();
- FullCategoryName += '_';
- FullCategoryName += IDecl->getNameAsString();
-
- // Build _objc_method_list for class's instance methods if needed
- SmallVector<ObjCMethodDecl *, 32>
- InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
-
- // If any of our property implementations have associated getters or
- // setters, produce metadata for them as well.
- for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
- PropEnd = IDecl->propimpl_end();
- Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
- continue;
- if (!(*Prop)->getPropertyIvarDecl())
- continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
- if (!PD)
- continue;
- if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- InstanceMethods.push_back(Getter);
- if (PD->isReadOnly())
- continue;
- if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
- InstanceMethods.push_back(Setter);
- }
- RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
- true, "CATEGORY_", FullCategoryName.c_str(),
- Result);
-
- // Build _objc_method_list for class's class methods if needed
- RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
- false, "CATEGORY_", FullCategoryName.c_str(),
- Result);
-
- // Protocols referenced in class declaration?
- // Null CDecl is case of a category implementation with no category interface
- if (CDecl)
- RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
- FullCategoryName, Result);
- /* struct _objc_category {
- char *category_name;
- char *class_name;
- struct _objc_method_list *instance_methods;
- struct _objc_method_list *class_methods;
- struct _objc_protocol_list *protocols;
- // Objective-C 1.0 extensions
- uint32_t size; // sizeof (struct _objc_category)
- struct _objc_property_list *instance_properties; // category's own
- // @property decl.
- };
- */
-
- static bool objc_category = false;
- if (!objc_category) {
- Result += "\nstruct _objc_category {\n";
- Result += "\tchar *category_name;\n";
- Result += "\tchar *class_name;\n";
- Result += "\tstruct _objc_method_list *instance_methods;\n";
- Result += "\tstruct _objc_method_list *class_methods;\n";
- Result += "\tstruct _objc_protocol_list *protocols;\n";
- Result += "\tunsigned int size;\n";
- Result += "\tstruct _objc_property_list *instance_properties;\n";
- Result += "};\n";
- objc_category = true;
- }
- Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
- Result += FullCategoryName;
- Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
- Result += IDecl->getNameAsString();
- Result += "\"\n\t, \"";
- Result += ClassDecl->getNameAsString();
- Result += "\"\n";
-
- if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
- Result += "\t, (struct _objc_method_list *)"
- "&_OBJC_CATEGORY_INSTANCE_METHODS_";
- Result += FullCategoryName;
- Result += "\n";
- }
- else
- Result += "\t, 0\n";
- if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
- Result += "\t, (struct _objc_method_list *)"
- "&_OBJC_CATEGORY_CLASS_METHODS_";
- Result += FullCategoryName;
- Result += "\n";
- }
- else
- Result += "\t, 0\n";
-
- if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
- Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
- Result += FullCategoryName;
- Result += "\n";
- }
- else
- Result += "\t, 0\n";
- Result += "\t, sizeof(struct _objc_category), 0\n};\n";
-}
-
-/// SynthesizeIvarOffsetComputation - This rutine synthesizes computation of
-/// ivar offset.
-void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCIvarDecl *ivar,
- std::string &Result) {
- if (ivar->isBitField()) {
- // FIXME: The hack below doesn't work for bitfields. For now, we simply
- // place all bitfields at offset 0.
- Result += "0";
- } else {
- Result += "__OFFSETOFIVAR__(struct ";
- Result += ivar->getContainingInterface()->getNameAsString();
- if (LangOpts.MicrosoftExt)
- Result += "_IMPL";
- Result += ", ";
- Result += ivar->getNameAsString();
- Result += ")";
- }
-}
-
//===----------------------------------------------------------------------===//
// Meta Data Emission
//===----------------------------------------------------------------------===//
-void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
- std::string &Result) {
- ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
-
- // Explicitly declared @interface's are already synthesized.
- if (CDecl->isImplicitInterfaceDecl()) {
- // FIXME: Implementation of a class with no @interface (legacy) doese not
- // produce correct synthesis as yet.
- SynthesizeObjCInternalStruct(CDecl, Result);
- }
-
- // Build _objc_ivar_list metadata for classes ivars if needed
- unsigned NumIvars = !IDecl->ivar_empty()
- ? IDecl->ivar_size()
- : (CDecl ? CDecl->ivar_size() : 0);
- if (NumIvars > 0) {
- static bool objc_ivar = false;
- if (!objc_ivar) {
- /* struct _objc_ivar {
- char *ivar_name;
- char *ivar_type;
- int ivar_offset;
- };
- */
- Result += "\nstruct _objc_ivar {\n";
- Result += "\tchar *ivar_name;\n";
- Result += "\tchar *ivar_type;\n";
- Result += "\tint ivar_offset;\n";
- Result += "};\n";
-
- objc_ivar = true;
- }
-
- /* struct {
- int ivar_count;
- struct _objc_ivar ivar_list[nIvars];
- };
- */
- Result += "\nstatic struct {\n";
- Result += "\tint ivar_count;\n";
- Result += "\tstruct _objc_ivar ivar_list[";
- Result += utostr(NumIvars);
- Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
- Result += IDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
- "{\n\t";
- Result += utostr(NumIvars);
- Result += "\n";
-
- ObjCInterfaceDecl::ivar_iterator IVI, IVE;
- SmallVector<ObjCIvarDecl *, 8> IVars;
- if (!IDecl->ivar_empty()) {
- for (ObjCInterfaceDecl::ivar_iterator
- IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end();
- IV != IVEnd; ++IV)
- IVars.push_back(*IV);
- IVI = IDecl->ivar_begin();
- IVE = IDecl->ivar_end();
- } else {
- IVI = CDecl->ivar_begin();
- IVE = CDecl->ivar_end();
- }
- Result += "\t,{{\"";
- Result += (*IVI)->getNameAsString();
- Result += "\", \"";
- std::string TmpString, StrEncoding;
- Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
- QuoteDoublequotes(TmpString, StrEncoding);
- Result += StrEncoding;
- Result += "\", ";
- SynthesizeIvarOffsetComputation(*IVI, Result);
- Result += "}\n";
- for (++IVI; IVI != IVE; ++IVI) {
- Result += "\t ,{\"";
- Result += (*IVI)->getNameAsString();
- Result += "\", \"";
- std::string TmpString, StrEncoding;
- Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
- QuoteDoublequotes(TmpString, StrEncoding);
- Result += StrEncoding;
- Result += "\", ";
- SynthesizeIvarOffsetComputation((*IVI), Result);
- Result += "}\n";
- }
-
- Result += "\t }\n};\n";
- }
-
- // Build _objc_method_list for class's instance methods if needed
- SmallVector<ObjCMethodDecl *, 32>
- InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
-
- // If any of our property implementations have associated getters or
- // setters, produce metadata for them as well.
- for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
- PropEnd = IDecl->propimpl_end();
- Prop != PropEnd; ++Prop) {
- if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
- continue;
- if (!(*Prop)->getPropertyIvarDecl())
- continue;
- ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
- if (!PD)
- continue;
- if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- if (!Getter->isDefined())
- InstanceMethods.push_back(Getter);
- if (PD->isReadOnly())
- continue;
- if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
- if (!Setter->isDefined())
- InstanceMethods.push_back(Setter);
- }
- RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
- true, "", IDecl->getName(), Result);
-
- // Build _objc_method_list for class's class methods if needed
- RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
- false, "", IDecl->getName(), Result);
-
- // Protocols referenced in class declaration?
- RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
- "CLASS", CDecl->getName(), Result);
-
- // Declaration of class/meta-class metadata
- /* struct _objc_class {
- struct _objc_class *isa; // or const char *root_class_name when metadata
- const char *super_class_name;
- char *name;
- long version;
- long info;
- long instance_size;
- struct _objc_ivar_list *ivars;
- struct _objc_method_list *methods;
- struct objc_cache *cache;
- struct objc_protocol_list *protocols;
- const char *ivar_layout;
- struct _objc_class_ext *ext;
- };
- */
- static bool objc_class = false;
- if (!objc_class) {
- Result += "\nstruct _objc_class {\n";
- Result += "\tstruct _objc_class *isa;\n";
- Result += "\tconst char *super_class_name;\n";
- Result += "\tchar *name;\n";
- Result += "\tlong version;\n";
- Result += "\tlong info;\n";
- Result += "\tlong instance_size;\n";
- Result += "\tstruct _objc_ivar_list *ivars;\n";
- Result += "\tstruct _objc_method_list *methods;\n";
- Result += "\tstruct objc_cache *cache;\n";
- Result += "\tstruct _objc_protocol_list *protocols;\n";
- Result += "\tconst char *ivar_layout;\n";
- Result += "\tstruct _objc_class_ext *ext;\n";
- Result += "};\n";
- objc_class = true;
- }
-
- // Meta-class metadata generation.
- ObjCInterfaceDecl *RootClass = 0;
- ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
- while (SuperClass) {
- RootClass = SuperClass;
- SuperClass = SuperClass->getSuperClass();
- }
- SuperClass = CDecl->getSuperClass();
-
- Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
- Result += CDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
- "{\n\t(struct _objc_class *)\"";
- Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
- Result += "\"";
-
- if (SuperClass) {
- Result += ", \"";
- Result += SuperClass->getNameAsString();
- Result += "\", \"";
- Result += CDecl->getNameAsString();
- Result += "\"";
- }
- else {
- Result += ", 0, \"";
- Result += CDecl->getNameAsString();
- Result += "\"";
- }
- // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
- // 'info' field is initialized to CLS_META(2) for metaclass
- Result += ", 0,2, sizeof(struct _objc_class), 0";
- if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
- Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
- Result += IDecl->getNameAsString();
- Result += "\n";
- }
- else
- Result += ", 0\n";
- if (CDecl->protocol_begin() != CDecl->protocol_end()) {
- Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
- Result += CDecl->getNameAsString();
- Result += ",0,0\n";
- }
- else
- Result += "\t,0,0,0,0\n";
- Result += "};\n";
-
- // class metadata generation.
- Result += "\nstatic struct _objc_class _OBJC_CLASS_";
- Result += CDecl->getNameAsString();
- Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
- "{\n\t&_OBJC_METACLASS_";
- Result += CDecl->getNameAsString();
- if (SuperClass) {
- Result += ", \"";
- Result += SuperClass->getNameAsString();
- Result += "\", \"";
- Result += CDecl->getNameAsString();
- Result += "\"";
- }
- else {
- Result += ", 0, \"";
- Result += CDecl->getNameAsString();
- Result += "\"";
- }
- // 'info' field is initialized to CLS_CLASS(1) for class
- Result += ", 0,1";
- if (!ObjCSynthesizedStructs.count(CDecl))
- Result += ",0";
- else {
- // class has size. Must synthesize its size.
- Result += ",sizeof(struct ";
- Result += CDecl->getNameAsString();
- if (LangOpts.MicrosoftExt)
- Result += "_IMPL";
- Result += ")";
- }
- if (NumIvars > 0) {
- Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
- Result += CDecl->getNameAsString();
- Result += "\n\t";
- }
- else
- Result += ",0";
- if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
- Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
- Result += CDecl->getNameAsString();
- Result += ", 0\n\t";
- }
- else
- Result += ",0,0";
- if (CDecl->protocol_begin() != CDecl->protocol_end()) {
- Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
- Result += CDecl->getNameAsString();
- Result += ", 0,0\n";
- }
- else
- Result += ",0,0,0\n";
- Result += "};\n";
-}
/// RewriteImplementations - This routine rewrites all method implementations
/// and emits meta-data.
@@ -4118,103 +3307,6 @@ void RewriteObjC::RewriteImplementations() {
RewriteImplementationDecl(CategoryImplementation[i]);
}
-void RewriteObjC::SynthesizeMetaDataIntoBuffer(std::string &Result) {
- int ClsDefCount = ClassImplementation.size();
- int CatDefCount = CategoryImplementation.size();
-
- // For each implemented class, write out all its meta data.
- for (int i = 0; i < ClsDefCount; i++)
- RewriteObjCClassMetaData(ClassImplementation[i], Result);
-
- // For each implemented category, write out all its meta data.
- for (int i = 0; i < CatDefCount; i++)
- RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
-
- // Write objc_symtab metadata
- /*
- struct _objc_symtab
- {
- long sel_ref_cnt;
- SEL *refs;
- short cls_def_cnt;
- short cat_def_cnt;
- void *defs[cls_def_cnt + cat_def_cnt];
- };
- */
-
- Result += "\nstruct _objc_symtab {\n";
- Result += "\tlong sel_ref_cnt;\n";
- Result += "\tSEL *refs;\n";
- Result += "\tshort cls_def_cnt;\n";
- Result += "\tshort cat_def_cnt;\n";
- Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
- Result += "};\n\n";
-
- Result += "static struct _objc_symtab "
- "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
- Result += "\t0, 0, " + utostr(ClsDefCount)
- + ", " + utostr(CatDefCount) + "\n";
- for (int i = 0; i < ClsDefCount; i++) {
- Result += "\t,&_OBJC_CLASS_";
- Result += ClassImplementation[i]->getNameAsString();
- Result += "\n";
- }
-
- for (int i = 0; i < CatDefCount; i++) {
- Result += "\t,&_OBJC_CATEGORY_";
- Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
- Result += "_";
- Result += CategoryImplementation[i]->getNameAsString();
- Result += "\n";
- }
-
- Result += "};\n\n";
-
- // Write objc_module metadata
-
- /*
- struct _objc_module {
- long version;
- long size;
- const char *name;
- struct _objc_symtab *symtab;
- }
- */
-
- Result += "\nstruct _objc_module {\n";
- Result += "\tlong version;\n";
- Result += "\tlong size;\n";
- Result += "\tconst char *name;\n";
- Result += "\tstruct _objc_symtab *symtab;\n";
- Result += "};\n\n";
- Result += "static struct _objc_module "
- "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
- Result += "\t" + utostr(OBJC_ABI_VERSION) +
- ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
- Result += "};\n\n";
-
- if (LangOpts.MicrosoftExt) {
- if (ProtocolExprDecls.size()) {
- Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
- Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
- for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
- E = ProtocolExprDecls.end(); I != E; ++I) {
- Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
- Result += (*I)->getNameAsString();
- Result += " = &_OBJC_PROTOCOL_";
- Result += (*I)->getNameAsString();
- Result += ";\n";
- }
- Result += "#pragma data_seg(pop)\n\n";
- }
- Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
- Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
- Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
- Result += "&_OBJC_MODULES;\n";
- Result += "#pragma data_seg(pop)\n\n";
- }
-}
-
void RewriteObjC::RewriteByRefString(std::string &ResultStr,
const std::string &Name,
ValueDecl *VD, bool def) {
@@ -4261,10 +3353,8 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
if (AI != BD->param_begin()) S += ", ";
ParamStr = (*AI)->getNameAsString();
QualType QT = (*AI)->getType();
- if (convertBlockPointerToFunctionPointer(QT))
- QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
- else
- QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
S += ParamStr;
}
if (FT->isVariadic()) {
@@ -4539,20 +3629,20 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
// Need to copy-in the inner copied-in variables not actually used in this
// block.
for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
- BlockDeclRefExpr *Exp = InnerDeclRefs[count++];
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
ValueDecl *VD = Exp->getDecl();
BlockDeclRefs.push_back(Exp);
- if (!Exp->isByRef() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
- if (Exp->isByRef() && !BlockByRefDeclsPtrSet.count(VD)) {
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
BlockByRefDeclsPtrSet.insert(VD);
BlockByRefDecls.push_back(VD);
}
// imported objects in the inner blocks not used in the outer
// blocks must be copied/disposed in the outer block as well.
- if (Exp->isByRef() ||
+ if (VD->hasAttr<BlocksAttr>() ||
VD->getType()->isObjCObjectPointerType() ||
VD->getType()->isBlockPointerType())
ImportedBlockDecls.insert(VD);
@@ -4641,25 +3731,21 @@ void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
GetBlockDeclRefExprs(*CI);
}
// Handle specific things.
- if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(S)) {
- // FIXME: Handle enums.
- if (!isa<FunctionDecl>(CDRE->getDecl()))
- BlockDeclRefs.push_back(CDRE);
- }
- else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
- if (HasLocalVariableExternalStorage(DRE->getDecl())) {
- BlockDeclRefExpr *BDRE =
- new (Context)BlockDeclRefExpr(cast<VarDecl>(DRE->getDecl()),
- DRE->getType(),
- VK_LValue, DRE->getLocation(), false);
- BlockDeclRefs.push_back(BDRE);
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ // FIXME: Handle enums.
+ if (!isa<FunctionDecl>(DRE->getDecl()))
+ BlockDeclRefs.push_back(DRE);
+ if (HasLocalVariableExternalStorage(DRE->getDecl()))
+ BlockDeclRefs.push_back(DRE);
}
+ }
return;
}
void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
- SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
@@ -4676,15 +3762,15 @@ void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
}
// Handle specific things.
- if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(S)) {
- if (!isa<FunctionDecl>(CDRE->getDecl()) &&
- !InnerContexts.count(CDRE->getDecl()->getDeclContext()))
- InnerBlockDeclRefs.push_back(CDRE);
- }
- else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
- if (Var->isFunctionOrMethodVarDecl())
- ImportedLocalExternalDecls.insert(Var);
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ if (!isa<FunctionDecl>(DRE->getDecl()) &&
+ !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
}
return;
@@ -4727,9 +3813,6 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
CPT = DRE->getType()->getAs<BlockPointerType>();
- } else if (const BlockDeclRefExpr *CDRE =
- dyn_cast<BlockDeclRefExpr>(BlockExp)) {
- CPT = CDRE->getType()->getAs<BlockPointerType>();
} else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
CPT = MExpr->getType()->getAs<BlockPointerType>();
}
@@ -4753,6 +3836,9 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
} else {
assert(1 && "RewriteBlockClass: Bad type");
}
@@ -4839,17 +3925,11 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
// i = 77;
// };
//}
-Stmt *RewriteObjC::RewriteBlockDeclRefExpr(Expr *DeclRefExp) {
+Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
// Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
// for each DeclRefExp where BYREFVAR is name of the variable.
- ValueDecl *VD;
- bool isArrow = true;
- if (BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(DeclRefExp))
- VD = BDRE->getDecl();
- else {
- VD = cast<DeclRefExpr>(DeclRefExp)->getDecl();
- isArrow = false;
- }
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingLocal();
FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
SourceLocation(),
@@ -5343,7 +4423,7 @@ void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
if (BlockDeclRefs.size()) {
// Unique all "by copy" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
- if (!BlockDeclRefs[i]->isByRef()) {
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
@@ -5351,7 +4431,7 @@ void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
}
// Unique all "by ref" declarations.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
- if (BlockDeclRefs[i]->isByRef()) {
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
@@ -5359,7 +4439,7 @@ void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
- if (BlockDeclRefs[i]->isByRef() ||
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
BlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
@@ -5375,7 +4455,7 @@ FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) {
}
Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
- const SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs) {
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs) {
const BlockDecl *block = Exp->getBlockDecl();
Blocks.push_back(Exp);
@@ -5385,9 +4465,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
int countOfInnerDecls = 0;
if (!InnerBlockDeclRefs.empty()) {
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
- BlockDeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
ValueDecl *VD = Exp->getDecl();
- if (!Exp->isByRef() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
// We need to save the copied-in variables in nested
// blocks because it is needed at the end for some of the API generations.
// See SynthesizeBlockLiterals routine.
@@ -5396,7 +4476,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
BlockByCopyDeclsPtrSet.insert(VD);
BlockByCopyDecls.push_back(VD);
}
- if (Exp->isByRef() && !BlockByRefDeclsPtrSet.count(VD)) {
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
BlockDeclRefs.push_back(Exp);
BlockByRefDeclsPtrSet.insert(VD);
@@ -5405,7 +4485,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
// Find any imported blocks...they will need special attention.
for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
- if (InnerBlockDeclRefs[i]->isByRef() ||
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
@@ -5435,15 +4515,15 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Simulate a contructor call...
FD = SynthBlockInitFunctionDecl(Tag);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, FType, VK_RValue,
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
SourceLocation());
SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
- DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
- SourceLocation());
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
InitExprs.push_back(castExpr);
@@ -5457,7 +4537,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
Context->VoidPtrTy, 0,
SC_Static, SC_None);
UnaryOperator *DescRefExpr =
- new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD,
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
Context->VoidPtrTy,
VK_LValue,
SourceLocation()),
@@ -5476,7 +4556,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
@@ -5486,13 +4566,13 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Arg = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
@@ -5520,7 +4600,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, FD->getType(), VK_LValue,
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
SourceLocation());
bool isNestedCapturedVar = false;
if (block)
@@ -5580,26 +4660,6 @@ bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
// Function Body / Expression rewriting
//===----------------------------------------------------------------------===//
-// This is run as a first "pass" prior to RewriteFunctionBodyOrGlobalInitializer().
-// The allows the main rewrite loop to associate all ObjCPropertyRefExprs with
-// their respective BinaryOperator. Without this knowledge, we'd need to rewrite
-// the ObjCPropertyRefExpr twice (once as a getter, and later as a setter).
-// Since the rewriter isn't capable of rewriting rewritten code, it's important
-// we get this right.
-void RewriteObjC::CollectPropertySetters(Stmt *S) {
- // Perform a bottom up traversal of all children.
- for (Stmt::child_range CI = S->children(); CI; ++CI)
- if (*CI)
- CollectPropertySetters(*CI);
-
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
- if (BinOp->isAssignmentOp()) {
- if (isa<ObjCPropertyRefExpr>(BinOp->getLHS()))
- PropSetters[BinOp->getLHS()] = BinOp;
- }
- }
-}
-
Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
isa<DoStmt>(S) || isa<ForStmt>(S))
@@ -5609,50 +4669,32 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
ObjCBcLabelNo.push_back(++BcLabelCount);
}
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+
SourceRange OrigStmtRange = S->getSourceRange();
// Perform a bottom up rewrite of all children.
for (Stmt::child_range CI = S->children(); CI; ++CI)
if (*CI) {
- Stmt *newStmt;
- Stmt *ChildStmt = (*CI);
- if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(ChildStmt)) {
- Expr *OldBase = IvarRefExpr->getBase();
- bool replaced = false;
- newStmt = RewriteObjCNestedIvarRefExpr(ChildStmt, replaced);
- if (replaced) {
- if (ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(newStmt))
- ReplaceStmt(OldBase, IRE->getBase());
- else
- ReplaceStmt(ChildStmt, newStmt);
- }
- }
- else
- newStmt = RewriteFunctionBodyOrGlobalInitializer(ChildStmt);
+ Stmt *childStmt = (*CI);
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
if (newStmt) {
- if (Expr *PropOrImplicitRefExpr = dyn_cast<Expr>(ChildStmt))
- if (PropSetters[PropOrImplicitRefExpr] == S) {
- S = newStmt;
- newStmt = 0;
- }
- if (newStmt)
- *CI = newStmt;
+ *CI = newStmt;
}
- // If dealing with an assignment with LHS being a property reference
- // expression, the entire assignment tree is rewritten into a property
- // setter messaging. This involvs the RHS too. Do not attempt to rewrite
- // RHS again.
- if (Expr *Exp = dyn_cast<Expr>(ChildStmt))
- if (isa<ObjCPropertyRefExpr>(Exp)) {
- if (PropSetters[Exp]) {
- ++CI;
- continue;
- }
- }
}
if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
- SmallVector<BlockDeclRefExpr *, 8> InnerBlockDeclRefs;
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
InnerContexts.insert(BE->getBlockDecl());
ImportedLocalExternalDecls.clear();
@@ -5661,7 +4703,6 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
// Rewrite the block body in place.
Stmt *SaveCurrentBody = CurrentBody;
CurrentBody = BE->getBody();
- CollectPropertySetters(CurrentBody);
PropParentMap = 0;
// block literal on rhs of a property-dot-sytax assignment
// must be replaced by its synthesize ast so getRewrittenText
@@ -5689,67 +4730,6 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
return RewriteAtEncode(AtEncode);
- if (isa<ObjCPropertyRefExpr>(S)) {
- Expr *PropOrImplicitRefExpr = dyn_cast<Expr>(S);
- assert(PropOrImplicitRefExpr && "Property or implicit setter/getter is null");
-
- BinaryOperator *BinOp = PropSetters[PropOrImplicitRefExpr];
- if (BinOp) {
- // Because the rewriter doesn't allow us to rewrite rewritten code,
- // we need to rewrite the right hand side prior to rewriting the setter.
- DisableReplaceStmt = true;
- // Save the source range. Even if we disable the replacement, the
- // rewritten node will have been inserted into the tree. If the synthesized
- // node is at the 'end', the rewriter will fail. Consider this:
- // self.errorHandler = handler ? handler :
- // ^(NSURL *errorURL, NSError *error) { return (BOOL)1; };
- SourceRange SrcRange = BinOp->getSourceRange();
- Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(BinOp->getRHS());
- // Need to rewrite the ivar access expression if need be.
- if (isa<ObjCIvarRefExpr>(newStmt)) {
- bool replaced = false;
- newStmt = RewriteObjCNestedIvarRefExpr(newStmt, replaced);
- }
-
- DisableReplaceStmt = false;
- //
- // Unlike the main iterator, we explicily avoid changing 'BinOp'. If
- // we changed the RHS of BinOp, the rewriter would fail (since it needs
- // to see the original expression). Consider this example:
- //
- // Foo *obj1, *obj2;
- //
- // obj1.i = [obj2 rrrr];
- //
- // 'BinOp' for the previous expression looks like:
- //
- // (BinaryOperator 0x231ccf0 'int' '='
- // (ObjCPropertyRefExpr 0x231cc70 'int' Kind=PropertyRef Property="i"
- // (DeclRefExpr 0x231cc50 'Foo *' Var='obj1' 0x231cbb0))
- // (ObjCMessageExpr 0x231ccb0 'int' selector=rrrr
- // (DeclRefExpr 0x231cc90 'Foo *' Var='obj2' 0x231cbe0)))
- //
- // 'newStmt' represents the rewritten message expression. For example:
- //
- // (CallExpr 0x231d300 'id':'struct objc_object *'
- // (ParenExpr 0x231d2e0 'int (*)(id, SEL)'
- // (CStyleCastExpr 0x231d2c0 'int (*)(id, SEL)'
- // (CStyleCastExpr 0x231d220 'void *'
- // (DeclRefExpr 0x231d200 'id (id, SEL, ...)' FunctionDecl='objc_msgSend' 0x231cdc0))))
- //
- // Note that 'newStmt' is passed to RewritePropertyOrImplicitSetter so that it
- // can be used as the setter argument. ReplaceStmt() will still 'see'
- // the original RHS (since we haven't altered BinOp).
- //
- // This implies the Rewrite* routines can no longer delete the original
- // node. As a result, we now leak the original AST nodes.
- //
- return RewritePropertyOrImplicitSetter(BinOp, dyn_cast<Expr>(newStmt), SrcRange);
- } else {
- return RewritePropertyOrImplicitGetter(PropOrImplicitRefExpr);
- }
- }
-
if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
return RewriteAtSelector(AtSelector);
@@ -5859,10 +4839,6 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
Stmts.pop_back();
}
// Handle blocks rewriting.
- if (BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
- if (BDRE->isByRef())
- return RewriteBlockDeclRefExpr(BDRE);
- }
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
ValueDecl *VD = DRE->getDecl();
if (VD->hasAttr<BlocksAttr>())
@@ -5917,108 +4893,124 @@ void RewriteObjC::RewriteRecordBody(RecordDecl *RD) {
/// HandleDeclInMainFile - This is called for each top-level decl defined in the
/// main file of the input.
void RewriteObjC::HandleDeclInMainFile(Decl *D) {
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->isOverloadedOperator())
- return;
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
- // Since function prototypes don't have ParmDecl's, we check the function
- // prototype. This enables us to rewrite function declarations and
- // definitions using the same code.
- RewriteBlocksInFunctionProtoType(FD->getType(), FD);
-
- // FIXME: If this should support Obj-C++, support CXXTryStmt
- if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
- CurFunctionDef = FD;
- CurFunctionDeclToDeclareForBlock = FD;
- CollectPropertySetters(Body);
- CurrentBody = Body;
- Body =
- cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
- FD->setBody(Body);
- CurrentBody = 0;
- if (PropParentMap) {
- delete PropParentMap;
- PropParentMap = 0;
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurFunctionDeclToDeclareForBlock = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
}
- // This synthesizes and inserts the block "impl" struct, invoke function,
- // and any copy/dispose helper functions.
- InsertBlockLiteralsWithinFunction(FD);
- CurFunctionDef = 0;
- CurFunctionDeclToDeclareForBlock = 0;
+ break;
}
- return;
- }
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (CompoundStmt *Body = MD->getCompoundBody()) {
- CurMethodDef = MD;
- CollectPropertySetters(Body);
- CurrentBody = Body;
- Body =
- cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
- MD->setBody(Body);
- CurrentBody = 0;
- if (PropParentMap) {
- delete PropParentMap;
- PropParentMap = 0;
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
}
- InsertBlockLiteralsWithinMethod(MD);
- CurMethodDef = 0;
+ break;
}
- }
- if (ObjCImplementationDecl *CI = dyn_cast<ObjCImplementationDecl>(D))
- ClassImplementation.push_back(CI);
- else if (ObjCCategoryImplDecl *CI = dyn_cast<ObjCCategoryImplDecl>(D))
- CategoryImplementation.push_back(CI);
- else if (isa<ObjCClassDecl>(D))
- llvm_unreachable("RewriteObjC::HandleDeclInMainFile - ObjCClassDecl");
- else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
- RewriteObjCQualifiedInterfaceTypes(VD);
- if (isTopLevelBlockPointerType(VD->getType()))
- RewriteBlockPointerDecl(VD);
- else if (VD->getType()->isFunctionPointerType()) {
- CheckFunctionPointerDecl(VD->getType(), VD);
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = 0;
+
+ // This is needed for blocks.
if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
- RewriteCastExpr(CE);
+ RewriteCastExpr(CE);
}
}
- } else if (VD->getType()->isRecordType()) {
- RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
- if (RD->isCompleteDefinition())
- RewriteRecordBody(RD);
+ break;
}
- if (VD->getInit()) {
- GlobalVarDecl = VD;
- CollectPropertySetters(VD->getInit());
- CurrentBody = VD->getInit();
- RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
- CurrentBody = 0;
- if (PropParentMap) {
- delete PropParentMap;
- PropParentMap = 0;
- }
- SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(),
- VD->getName());
- GlobalVarDecl = 0;
-
- // This is needed for blocks.
- if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
- RewriteCastExpr(CE);
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
}
+ break;
}
- return;
- }
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
- if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
- RewriteBlockPointerDecl(TD);
- else if (TD->getUnderlyingType()->isFunctionPointerType())
- CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
- return;
- }
- if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
- if (RD->isCompleteDefinition())
- RewriteRecordBody(RD);
- return;
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
}
// Nothing yet.
}
@@ -6053,9 +5045,974 @@ void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
ProtocolExprDecls.size()) {
// Rewrite Objective-c meta data*
std::string ResultStr;
- SynthesizeMetaDataIntoBuffer(ResultStr);
+ RewriteMetaDataIntoBuffer(ResultStr);
// Emit metadata.
*OutFile << ResultStr;
}
OutFile->flush();
}
+
+void RewriteObjCFragileABI::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { struct objc_object *object; ";
+ Preamble += "struct objc_object *superClass; ";
+ if (LangOpts.MicrosoftExt) {
+ // Add a constructor for creating temporary objects.
+ Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
+ ": ";
+ Preamble += "object(o), superClass(s) {} ";
+ }
+ Preamble += "};\n";
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ } else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
+ Preamble += "(struct objc_class *, struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __weak\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
+ ObjCProtocolDecl *PDecl, StringRef prefix,
+ StringRef ClassName, std::string &Result) {
+ static bool objc_protocol_methods = false;
+
+ // Output struct protocol_methods holder of method selector and type.
+ if (!objc_protocol_methods && PDecl->hasDefinition()) {
+ /* struct protocol_methods {
+ SEL _cmd;
+ char *method_types;
+ }
+ */
+ Result += "\nstruct _protocol_methods {\n";
+ Result += "\tstruct objc_selector *_cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "};\n";
+
+ objc_protocol_methods = true;
+ }
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ unsigned NumMethods = std::distance(PDecl->instmeth_begin(),
+ PDecl->instmeth_end());
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
+ "{\n\t" + utostr(NumMethods) + "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->instmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output class methods declared in this protocol.
+ unsigned NumMethods = std::distance(PDecl->classmeth_begin(),
+ PDecl->classmeth_end());
+ if (NumMethods > 0) {
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t";
+ Result += utostr(NumMethods);
+ Result += "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->classmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output:
+ /* struct _objc_protocol {
+ // Objective-C 1.0 extensions
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol **protocol_list;
+ struct _objc_protocol_method_list *instance_methods;
+ struct _objc_protocol_method_list *class_methods;
+ };
+ */
+ static bool objc_protocol = false;
+ if (!objc_protocol) {
+ Result += "\nstruct _objc_protocol {\n";
+ Result += "\tstruct _objc_protocol_extension *isa;\n";
+ Result += "\tchar *protocol_name;\n";
+ Result += "\tstruct _objc_protocol **protocol_list;\n";
+ Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
+ Result += "};\n";
+
+ objc_protocol = true;
+ }
+
+ Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
+ "{\n\t0, \"";
+ Result += PDecl->getNameAsString();
+ Result += "\", 0, ";
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += ", ";
+ }
+ else
+ Result += "0, ";
+ if (PDecl->classmeth_begin() != PDecl->classmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += "0\n";
+ Result += "};\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl()) {
+ // FIXME: Implementation of a class with no @interface (legacy) does not
+ // produce correct synthesis as yet.
+ RewriteObjCInternalStruct(CDecl, Result);
+ }
+
+ // Build _objc_ivar_list metadata for classes ivars if needed
+ unsigned NumIvars = !IDecl->ivar_empty()
+ ? IDecl->ivar_size()
+ : (CDecl ? CDecl->ivar_size() : 0);
+ if (NumIvars > 0) {
+ static bool objc_ivar = false;
+ if (!objc_ivar) {
+ /* struct _objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+ */
+ Result += "\nstruct _objc_ivar {\n";
+ Result += "\tchar *ivar_name;\n";
+ Result += "\tchar *ivar_type;\n";
+ Result += "\tint ivar_offset;\n";
+ Result += "};\n";
+
+ objc_ivar = true;
+ }
+
+ /* struct {
+ int ivar_count;
+ struct _objc_ivar ivar_list[nIvars];
+ };
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint ivar_count;\n";
+ Result += "\tstruct _objc_ivar ivar_list[";
+ Result += utostr(NumIvars);
+ Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
+ Result += IDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
+ "{\n\t";
+ Result += utostr(NumIvars);
+ Result += "\n";
+
+ ObjCInterfaceDecl::ivar_iterator IVI, IVE;
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ if (!IDecl->ivar_empty()) {
+ for (ObjCInterfaceDecl::ivar_iterator
+ IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end();
+ IV != IVEnd; ++IV)
+ IVars.push_back(*IV);
+ IVI = IDecl->ivar_begin();
+ IVE = IDecl->ivar_end();
+ } else {
+ IVI = CDecl->ivar_begin();
+ IVE = CDecl->ivar_end();
+ }
+ Result += "\t,{{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation(*IVI, Result);
+ Result += "}\n";
+ for (++IVI; IVI != IVE; ++IVI) {
+ Result += "\t ,{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation((*IVI), Result);
+ Result += "}\n";
+ }
+
+ Result += "\t }\n};\n";
+ }
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "", IDecl->getName(), Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "", IDecl->getName(), Result);
+
+ // Protocols referenced in class declaration?
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
+ "CLASS", CDecl->getName(), Result);
+
+ // Declaration of class/meta-class metadata
+ /* struct _objc_class {
+ struct _objc_class *isa; // or const char *root_class_name when metadata
+ const char *super_class_name;
+ char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct objc_cache *cache;
+ struct objc_protocol_list *protocols;
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+ */
+ static bool objc_class = false;
+ if (!objc_class) {
+ Result += "\nstruct _objc_class {\n";
+ Result += "\tstruct _objc_class *isa;\n";
+ Result += "\tconst char *super_class_name;\n";
+ Result += "\tchar *name;\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong info;\n";
+ Result += "\tlong instance_size;\n";
+ Result += "\tstruct _objc_ivar_list *ivars;\n";
+ Result += "\tstruct _objc_method_list *methods;\n";
+ Result += "\tstruct objc_cache *cache;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tconst char *ivar_layout;\n";
+ Result += "\tstruct _objc_class_ext *ext;\n";
+ Result += "};\n";
+ objc_class = true;
+ }
+
+ // Meta-class metadata generation.
+ ObjCInterfaceDecl *RootClass = 0;
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ while (SuperClass) {
+ RootClass = SuperClass;
+ SuperClass = SuperClass->getSuperClass();
+ }
+ SuperClass = CDecl->getSuperClass();
+
+ Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
+ "{\n\t(struct _objc_class *)\"";
+ Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
+ Result += "\"";
+
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
+ // 'info' field is initialized to CLS_META(2) for metaclass
+ Result += ", 0,2, sizeof(struct _objc_class), 0";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
+ Result += IDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += ", 0\n";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ",0,0\n";
+ }
+ else
+ Result += "\t,0,0,0,0\n";
+ Result += "};\n";
+
+ // class metadata generation.
+ Result += "\nstatic struct _objc_class _OBJC_CLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
+ "{\n\t&_OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // 'info' field is initialized to CLS_CLASS(1) for class
+ Result += ", 0,1";
+ if (!ObjCSynthesizedStructs.count(CDecl))
+ Result += ",0";
+ else {
+ // class has size. Must synthesize its size.
+ Result += ",sizeof(struct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ")";
+ }
+ if (NumIvars > 0) {
+ Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
+ Result += CDecl->getNameAsString();
+ Result += "\n\t";
+ }
+ else
+ Result += ",0";
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0\n\t";
+ }
+ else
+ Result += ",0,0";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0,0\n";
+ }
+ else
+ Result += ",0,0,0\n";
+ Result += "};\n";
+}
+
+void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ // Write objc_symtab metadata
+ /*
+ struct _objc_symtab
+ {
+ long sel_ref_cnt;
+ SEL *refs;
+ short cls_def_cnt;
+ short cat_def_cnt;
+ void *defs[cls_def_cnt + cat_def_cnt];
+ };
+ */
+
+ Result += "\nstruct _objc_symtab {\n";
+ Result += "\tlong sel_ref_cnt;\n";
+ Result += "\tSEL *refs;\n";
+ Result += "\tshort cls_def_cnt;\n";
+ Result += "\tshort cat_def_cnt;\n";
+ Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
+ Result += "};\n\n";
+
+ Result += "static struct _objc_symtab "
+ "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
+ Result += "\t0, 0, " + utostr(ClsDefCount)
+ + ", " + utostr(CatDefCount) + "\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t,&_OBJC_CLASS_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t,&_OBJC_CATEGORY_";
+ Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ Result += "};\n\n";
+
+ // Write objc_module metadata
+
+ /*
+ struct _objc_module {
+ long version;
+ long size;
+ const char *name;
+ struct _objc_symtab *symtab;
+ }
+ */
+
+ Result += "\nstruct _objc_module {\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong size;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _objc_symtab *symtab;\n";
+ Result += "};\n\n";
+ Result += "static struct _objc_module "
+ "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
+ Result += "\t" + utostr(OBJC_ABI_VERSION) +
+ ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
+ Result += "};\n\n";
+
+ if (LangOpts.MicrosoftExt) {
+ if (ProtocolExprDecls.size()) {
+ Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I) {
+ Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += ";\n";
+ }
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+ Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
+ Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
+ Result += "&_OBJC_MODULES;\n";
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl;
+ for (CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->getIdentifier() == IDecl->getIdentifier())
+ break;
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += '_';
+ FullCategoryName += IDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Protocols referenced in class declaration?
+ // Null CDecl is case of a category implementation with no category interface
+ if (CDecl)
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
+ FullCategoryName, Result);
+ /* struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions
+ uint32_t size; // sizeof (struct _objc_category)
+ struct _objc_property_list *instance_properties; // category's own
+ // @property decl.
+ };
+ */
+
+ static bool objc_category = false;
+ if (!objc_category) {
+ Result += "\nstruct _objc_category {\n";
+ Result += "\tchar *category_name;\n";
+ Result += "\tchar *class_name;\n";
+ Result += "\tstruct _objc_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_method_list *class_methods;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "\tstruct _objc_property_list *instance_properties;\n";
+ Result += "};\n";
+ objc_category = true;
+ }
+ Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
+ Result += FullCategoryName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
+ Result += IDecl->getNameAsString();
+ Result += "\"\n\t, \"";
+ Result += ClassDecl->getNameAsString();
+ Result += "\"\n";
+
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_INSTANCE_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_CLASS_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+
+ if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ Result += "\t, sizeof(struct _objc_category), 0\n};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+ if (CurMethodDef) {
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ castExpr);
+ if (IV->isFreeIvar() &&
+ declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) {
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, D,
+ IV->getLocation(),
+ D->getType(),
+ VK_LValue, OK_Ordinary);
+ Replacement = ME;
+ } else {
+ IV->setBase(PE);
+ }
+ }
+ } else { // we are outside a method.
+ assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
+
+ // Explicit ivar refs need to have a cast inserted.
+ // FIXME: consider sharing some of this code with the code above.
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
+ IV->getBase()->getLocEnd(), castExpr);
+ // Cannot delete IV->getBase(), since PE points to it.
+ // Replace the old base with the cast. This is important when doing
+ // embedded rewrites. For example, [newInv->_container addObject:0].
+ IV->setBase(PE);
+ }
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
index c446324..019e5e7 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
@@ -18,7 +18,7 @@
void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
SourceManager &SM = PP.getSourceManager();
- const LangOptions &LangOpts = PP.getLangOptions();
+ const LangOptions &LangOpts = PP.getLangOpts();
TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts);
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
index 464b299cc..43fb01b 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Decl.h"
#include "clang/Lex/Lexer.h"
#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
@@ -235,7 +236,7 @@ bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
FileID FID;
unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
- llvm::SmallString<128> indentedStr;
+ SmallString<128> indentedStr;
if (indentNewLines && Str.find('\n') != StringRef::npos) {
StringRef MB = SourceMgr->getBufferData(FID);
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
index babb8af..a8e6791 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -63,7 +63,7 @@ namespace {
}
/// CheckUnreachable - Check for unreachable code.
-static void CheckUnreachable(Sema &S, AnalysisContext &AC) {
+static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
UnreachableCodeHandler UC(S);
reachable_code::FindUnreachableCode(AC, UC);
}
@@ -89,7 +89,7 @@ enum ControlFlowKind {
/// return. We assume NeverFallThrough iff we never fall off the end of the
/// statement but we may return. We assume that functions not marked noreturn
/// will return.
-static ControlFlowKind CheckFallThrough(AnalysisContext &AC) {
+static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
CFG *cfg = AC.getCFG();
if (cfg == 0) return UnknownFallThrough;
@@ -218,7 +218,7 @@ struct CheckFallThroughDiagnostics {
unsigned diag_AlwaysFallThrough_HasNoReturn;
unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
unsigned diag_NeverFallThroughOrReturn;
- bool funMode;
+ enum { Function, Block, Lambda } funMode;
SourceLocation FuncLoc;
static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
@@ -241,19 +241,8 @@ struct CheckFallThroughDiagnostics {
// Don't suggest that template instantiations be marked "noreturn"
bool isTemplateInstantiation = false;
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) {
- switch (Function->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- break;
-
- case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- isTemplateInstantiation = true;
- break;
- }
- }
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
if (!isVirtualMethod && !isTemplateInstantiation)
D.diag_NeverFallThroughOrReturn =
@@ -261,7 +250,7 @@ struct CheckFallThroughDiagnostics {
else
D.diag_NeverFallThroughOrReturn = 0;
- D.funMode = true;
+ D.funMode = Function;
return D;
}
@@ -277,13 +266,28 @@ struct CheckFallThroughDiagnostics {
diag::err_falloff_nonvoid_block;
D.diag_NeverFallThroughOrReturn =
diag::warn_suggest_noreturn_block;
- D.funMode = false;
+ D.funMode = Block;
+ return D;
+ }
+
+ static CheckFallThroughDiagnostics MakeForLambda() {
+ CheckFallThroughDiagnostics D;
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_lambda;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_lambda;
+ D.diag_NeverFallThroughOrReturn = 0;
+ D.funMode = Lambda;
return D;
}
bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
bool HasNoReturn) const {
- if (funMode) {
+ if (funMode == Function) {
return (ReturnsVoid ||
D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
FuncLoc) == DiagnosticsEngine::Ignored)
@@ -295,9 +299,9 @@ struct CheckFallThroughDiagnostics {
== DiagnosticsEngine::Ignored);
}
- // For blocks.
- return ReturnsVoid && !HasNoReturn
- && (!ReturnsVoid ||
+ // For blocks / lambdas.
+ return ReturnsVoid && !HasNoReturn
+ && ((funMode == Lambda) ||
D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
== DiagnosticsEngine::Ignored);
}
@@ -312,7 +316,7 @@ struct CheckFallThroughDiagnostics {
static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
const BlockExpr *blkExpr,
const CheckFallThroughDiagnostics& CD,
- AnalysisContext &AC) {
+ AnalysisDeclContext &AC) {
bool ReturnsVoid = false;
bool HasNoReturn = false;
@@ -421,47 +425,27 @@ public:
}
static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
+ QualType VariableTy = VD->getType().getCanonicalType();
+ if (VariableTy->isBlockPointerType() &&
+ !VD->hasAttr<BlocksAttr>()) {
+ S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName()
+ << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
+ return true;
+ }
+
// Don't issue a fixit if there is already an initializer.
if (VD->getInit())
return false;
-
+
// Suggest possible initialization (if any).
- const char *initialization = 0;
- QualType VariableTy = VD->getType().getCanonicalType();
-
- if (VariableTy->isObjCObjectPointerType() ||
- VariableTy->isBlockPointerType()) {
- // Check if 'nil' is defined.
- if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("nil")))
- initialization = " = nil";
- else
- initialization = " = 0";
- }
- else if (VariableTy->isRealFloatingType())
- initialization = " = 0.0";
- else if (VariableTy->isBooleanType() && S.Context.getLangOptions().CPlusPlus)
- initialization = " = false";
- else if (VariableTy->isEnumeralType())
+ const char *Init = S.getFixItZeroInitializerForType(VariableTy);
+ if (!Init)
return false;
- else if (VariableTy->isPointerType() || VariableTy->isMemberPointerType()) {
- if (S.Context.getLangOptions().CPlusPlus0x)
- initialization = " = nullptr";
- // Check if 'NULL' is defined.
- else if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("NULL")))
- initialization = " = NULL";
- else
- initialization = " = 0";
- }
- else if (VariableTy->isScalarType())
- initialization = " = 0";
-
- if (initialization) {
- SourceLocation loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
- S.Diag(loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
- << FixItHint::CreateInsertion(loc, initialization);
- return true;
- }
- return false;
+ SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
+
+ S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
+ << FixItHint::CreateInsertion(Loc, Init);
+ return true;
}
/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
@@ -512,10 +496,15 @@ static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
}
} else {
const BlockExpr *BE = cast<BlockExpr>(E);
- S.Diag(BE->getLocStart(),
- isAlwaysUninit ? diag::warn_uninit_var_captured_by_block
- : diag::warn_maybe_uninit_var_captured_by_block)
- << VD->getDeclName();
+ if (VD->getType()->isBlockPointerType() &&
+ !VD->hasAttr<BlocksAttr>())
+ S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block)
+ << VD->getDeclName();
+ else
+ S.Diag(BE->getLocStart(),
+ isAlwaysUninit ? diag::warn_uninit_var_captured_by_block
+ : diag::warn_maybe_uninit_var_captured_by_block)
+ << VD->getDeclName();
}
// Report where the variable was declared when the use wasn't within
@@ -586,9 +575,10 @@ public:
// Specially handle the case where we have uses of an uninitialized
// variable, but the root cause is an idiomatic self-init. We want
// to report the diagnostic at the self-init since that is the root cause.
- if (!vec->empty() && hasSelfInit)
+ if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
DiagnoseUninitializedUse(S, vd, vd->getInit()->IgnoreParenCasts(),
- true, /* alwaysReportSelfInit */ true);
+ /* isAlwaysUninit */ true,
+ /* alwaysReportSelfInit */ true);
else {
// Sort the uses by their SourceLocations. While not strictly
// guaranteed to produce them in line/column order, this will provide
@@ -610,6 +600,16 @@ public:
}
delete uses;
}
+
+private:
+ static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
+ for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
+ if (i->second) {
+ return true;
+ }
+ }
+ return false;
+}
};
}
@@ -619,49 +619,60 @@ public:
//===----------------------------------------------------------------------===//
namespace clang {
namespace thread_safety {
-typedef std::pair<SourceLocation, PartialDiagnostic> DelayedDiag;
-typedef llvm::SmallVector<DelayedDiag, 4> DiagList;
+typedef llvm::SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
+typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
+typedef std::list<DelayedDiag> DiagList;
struct SortDiagBySourceLocation {
- Sema &S;
- SortDiagBySourceLocation(Sema &S) : S(S) {}
+ SourceManager &SM;
+ SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
// Although this call will be slow, this is only called when outputting
// multiple warnings.
- return S.getSourceManager().isBeforeInTranslationUnit(left.first,
- right.first);
+ return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
}
};
+namespace {
class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
Sema &S;
DiagList Warnings;
+ SourceLocation FunLocation, FunEndLocation;
// Helper functions
void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
- PartialDiagnostic Warning = S.PDiag(DiagID) << LockName;
- Warnings.push_back(DelayedDiag(Loc, Warning));
+ // Gracefully handle rare cases when the analysis can't get a more
+ // precise source location.
+ if (!Loc.isValid())
+ Loc = FunLocation;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
public:
- ThreadSafetyReporter(Sema &S) : S(S) {}
+ ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
+ : S(S), FunLocation(FL), FunEndLocation(FEL) {}
/// \brief Emit all buffered diagnostics in order of sourcelocation.
/// We need to output diagnostics produced while iterating through
/// the lockset in deterministic order, so this function orders diagnostics
/// and outputs them.
void emitDiagnostics() {
- SortDiagBySourceLocation SortDiagBySL(S);
- sort(Warnings.begin(), Warnings.end(), SortDiagBySL);
+ Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
- I != E; ++I)
- S.Diag(I->first, I->second);
+ I != E; ++I) {
+ S.Diag(I->first.first, I->first.second);
+ const OptionalNotes &Notes = I->second;
+ for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI)
+ S.Diag(Notes[NoteI].first, Notes[NoteI].second);
+ }
}
void handleInvalidLockExp(SourceLocation Loc) {
- PartialDiagnostic Warning = S.PDiag(diag::warn_cannot_resolve_lock) << Loc;
- Warnings.push_back(DelayedDiag(Loc, Warning));
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
@@ -671,12 +682,13 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
warnLockMismatch(diag::warn_double_lock, LockName, Loc);
}
- void handleMutexHeldEndOfScope(Name LockName, SourceLocation Loc,
+ void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
+ SourceLocation LocEndOfScope,
LockErrorKind LEK){
unsigned DiagID = 0;
switch (LEK) {
case LEK_LockedSomePredecessors:
- DiagID = diag::warn_lock_at_end_of_scope;
+ DiagID = diag::warn_lock_some_predecessors;
break;
case LEK_LockedSomeLoopIterations:
DiagID = diag::warn_expecting_lock_held_on_loop;
@@ -685,18 +697,22 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
DiagID = diag::warn_no_unlock;
break;
}
- warnLockMismatch(DiagID, LockName, Loc);
+ if (LocEndOfScope.isInvalid())
+ LocEndOfScope = FunEndLocation;
+
+ PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
+ PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
}
void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
SourceLocation Loc2) {
- PartialDiagnostic Warning =
- S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName;
- PartialDiagnostic Note =
- S.PDiag(diag::note_lock_exclusive_and_shared) << LockName;
- Warnings.push_back(DelayedDiag(Loc1, Warning));
- Warnings.push_back(DelayedDiag(Loc2, Note));
+ PartialDiagnosticAt Warning(
+ Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
+ PartialDiagnosticAt Note(
+ Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
}
void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
@@ -706,9 +722,9 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
unsigned DiagID = POK == POK_VarAccess?
diag::warn_variable_requires_any_lock:
diag::warn_var_deref_requires_any_lock;
- PartialDiagnostic Warning = S.PDiag(DiagID)
- << D->getName() << getLockKindFromAccessKind(AK);
- Warnings.push_back(DelayedDiag(Loc, Warning));
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getName() << getLockKindFromAccessKind(AK));
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
@@ -725,19 +741,20 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
DiagID = diag::warn_fun_requires_lock;
break;
}
- PartialDiagnostic Warning = S.PDiag(DiagID)
- << D->getName() << LockName << LK;
- Warnings.push_back(DelayedDiag(Loc, Warning));
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getName() << LockName << LK);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
- PartialDiagnostic Warning =
- S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName;
- Warnings.push_back(DelayedDiag(Loc, Warning));
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
}
};
}
}
+}
//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
@@ -813,7 +830,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
const Stmt *Body = D->getBody();
assert(Body);
- AnalysisContext AC(D, 0);
+ AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D, 0);
// Don't generate EH edges for CallExprs as we'd like to avoid the n^2
// explosion for destrutors that can result and the compile time hit.
@@ -828,8 +845,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// prototyping, but we need a way for analyses to say what expressions they
// expect to always be CFGElements and then fill in the BuildOptions
// appropriately. This is essentially a layering violation.
- if (P.enableCheckUnreachable) {
- // Unreachable code analysis requires a linearized CFG.
+ if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) {
+ // Unreachable code analysis and thread safety require a linearized CFG.
AC.getCFGBuildOptions().setAllAlwaysAdd();
}
else {
@@ -868,8 +885,12 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
bool processed = false;
if (const Stmt *stmt = i->stmt) {
const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
- assert(block);
- if (CFGReverseBlockReachabilityAnalysis *cra = AC.getCFGReachablityAnalysis()) {
+ CFGReverseBlockReachabilityAnalysis *cra =
+ AC.getCFGReachablityAnalysis();
+ // FIXME: We should be able to assert that block is non-null, but
+ // the CFG analysis can skip potentially-evaluated expressions in
+ // edge cases; see test/Sema/vla-2.c.
+ if (block && cra) {
// Can this block be reached from the entrance?
if (cra->isReachable(&AC.getCFG()->getEntry(), block))
S.Diag(D.Loc, D.PD);
@@ -892,17 +913,32 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
if (P.enableCheckFallThrough) {
const CheckFallThroughDiagnostics &CD =
(isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
- : CheckFallThroughDiagnostics::MakeForFunction(D));
+ : (isa<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
+ cast<CXXMethodDecl>(D)->getParent()->isLambda())
+ ? CheckFallThroughDiagnostics::MakeForLambda()
+ : CheckFallThroughDiagnostics::MakeForFunction(D));
CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
}
// Warning: check for unreachable code
- if (P.enableCheckUnreachable)
- CheckUnreachable(S, AC);
+ if (P.enableCheckUnreachable) {
+ // Only check for unreachable code on non-template instantiations.
+ // Different template instantiations can effectively change the control-flow
+ // and it is very difficult to prove that a snippet of code in a template
+ // is unreachable for all instantiations.
+ bool isTemplateInstantiation = false;
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
+ if (!isTemplateInstantiation)
+ CheckUnreachable(S, AC);
+ }
// Check for thread safety violations
if (P.enableThreadSafetyAnalysis) {
- thread_safety::ThreadSafetyReporter Reporter(S);
+ SourceLocation FL = AC.getDecl()->getLocation();
+ SourceLocation FEL = AC.getDecl()->getLocEnd();
+ thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
thread_safety::runThreadSafetyAnalysis(AC, Reporter);
Reporter.emitDiagnostics();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
index 13a0ede..f142ab4 100644
--- a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -101,135 +101,26 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
StringRef AttrName = Name->getName();
// Normalize the attribute name, __foo__ becomes foo.
- if (AttrName.startswith("__") && AttrName.endswith("__"))
+ if (AttrName.startswith("__") && AttrName.endswith("__") &&
+ AttrName.size() >= 4)
AttrName = AttrName.substr(2, AttrName.size() - 4);
return llvm::StringSwitch<AttributeList::Kind>(AttrName)
- .Case("weak", AT_weak)
- .Case("weakref", AT_weakref)
- .Case("objc_arc_weak_reference_unavailable", AT_arc_weakref_unavailable)
- .Case("pure", AT_pure)
- .Case("mode", AT_mode)
- .Case("used", AT_used)
- .Case("alias", AT_alias)
- .Case("align", AT_aligned)
- .Case("cdecl", AT_cdecl)
- .Case("const", AT_const)
- .Case("__const", AT_const) // some GCC headers do contain this spelling
- .Case("blocks", AT_blocks)
- .Case("format", AT_format)
- .Case("malloc", AT_malloc)
- .Case("packed", AT_packed)
- .Case("unused", AT_unused)
- .Case("aligned", AT_aligned)
- .Case("cleanup", AT_cleanup)
- .Case("naked", AT_naked)
- .Case("nodebug", AT_nodebug)
- .Case("nonnull", AT_nonnull)
- .Case("nothrow", AT_nothrow)
- .Case("objc_gc", AT_objc_gc)
- .Case("regparm", AT_regparm)
- .Case("section", AT_section)
- .Case("stdcall", AT_stdcall)
- .Case("annotate", AT_annotate)
- .Case("fastcall", AT_fastcall)
- .Case("ibaction", AT_IBAction)
- .Case("iboutlet", AT_IBOutlet)
- .Case("iboutletcollection", AT_IBOutletCollection)
- .Case("noreturn", AT_noreturn)
- .Case("noinline", AT_noinline)
- .Case("sentinel", AT_sentinel)
- .Case("NSObject", AT_nsobject)
- .Case("dllimport", AT_dllimport)
- .Case("dllexport", AT_dllexport)
- .Case("may_alias", AT_may_alias)
- .Case("base_check", AT_base_check)
- .Case("deprecated", AT_deprecated)
- .Case("availability", AT_availability)
- .Case("visibility", AT_visibility)
- .Case("destructor", AT_destructor)
- .Case("format_arg", AT_format_arg)
- .Case("gnu_inline", AT_gnu_inline)
- .Case("weak_import", AT_weak_import)
- .Case("vecreturn", AT_vecreturn)
- .Case("vector_size", AT_vector_size)
- .Case("constructor", AT_constructor)
- .Case("unavailable", AT_unavailable)
- .Case("overloadable", AT_overloadable)
+ #include "clang/Sema/AttrParsedAttrKinds.inc"
.Case("address_space", AT_address_space)
- .Case("opencl_image_access", AT_opencl_image_access)
- .Case("always_inline", AT_always_inline)
- .Case("returns_twice", AT_returns_twice)
+ .Case("align", AT_aligned) // FIXME - should it be "aligned"?
+ .Case("base_check", AT_base_check)
+ .Case("bounded", IgnoredAttribute) // OpenBSD
+ .Case("__const", AT_const) // some GCC headers do contain this spelling
+ .Case("cf_returns_autoreleased", AT_cf_returns_autoreleased)
+ .Case("mode", AT_mode)
.Case("vec_type_hint", IgnoredAttribute)
- .Case("objc_exception", AT_objc_exception)
- .Case("objc_method_family", AT_objc_method_family)
- .Case("objc_returns_inner_pointer", AT_objc_returns_inner_pointer)
.Case("ext_vector_type", AT_ext_vector_type)
.Case("neon_vector_type", AT_neon_vector_type)
.Case("neon_polyvector_type", AT_neon_polyvector_type)
- .Case("transparent_union", AT_transparent_union)
- .Case("analyzer_noreturn", AT_analyzer_noreturn)
- .Case("warn_unused_result", AT_warn_unused_result)
- .Case("carries_dependency", AT_carries_dependency)
- .Case("ns_bridged", AT_ns_bridged)
- .Case("ns_consumed", AT_ns_consumed)
- .Case("ns_consumes_self", AT_ns_consumes_self)
- .Case("ns_returns_autoreleased", AT_ns_returns_autoreleased)
- .Case("ns_returns_not_retained", AT_ns_returns_not_retained)
- .Case("ns_returns_retained", AT_ns_returns_retained)
- .Case("cf_audited_transfer", AT_cf_audited_transfer)
- .Case("cf_consumed", AT_cf_consumed)
- .Case("cf_returns_not_retained", AT_cf_returns_not_retained)
- .Case("cf_returns_retained", AT_cf_returns_retained)
- .Case("cf_returns_autoreleased", AT_cf_returns_autoreleased)
- .Case("cf_unknown_transfer", AT_cf_unknown_transfer)
- .Case("ns_consumes_self", AT_ns_consumes_self)
- .Case("ns_consumed", AT_ns_consumed)
+ .Case("opencl_image_access", AT_opencl_image_access)
+ .Case("objc_gc", AT_objc_gc)
.Case("objc_ownership", AT_objc_ownership)
- .Case("objc_precise_lifetime", AT_objc_precise_lifetime)
- .Case("ownership_returns", AT_ownership_returns)
- .Case("ownership_holds", AT_ownership_holds)
- .Case("ownership_takes", AT_ownership_takes)
- .Case("reqd_work_group_size", AT_reqd_wg_size)
- .Case("init_priority", AT_init_priority)
- .Case("no_instrument_function", AT_no_instrument_function)
- .Case("thiscall", AT_thiscall)
- .Case("bounded", IgnoredAttribute) // OpenBSD
- .Case("pascal", AT_pascal)
- .Case("__cdecl", AT_cdecl)
- .Case("__stdcall", AT_stdcall)
- .Case("__fastcall", AT_fastcall)
- .Case("__thiscall", AT_thiscall)
- .Case("__pascal", AT_pascal)
- .Case("constant", AT_constant)
- .Case("device", AT_device)
- .Case("global", AT_global)
- .Case("host", AT_host)
- .Case("shared", AT_shared)
- .Case("launch_bounds", AT_launch_bounds)
- .Case("common", AT_common)
- .Case("nocommon", AT_nocommon)
- .Case("opencl_kernel_function", AT_opencl_kernel_function)
- .Case("uuid", AT_uuid)
- .Case("pcs", AT_pcs)
- .Case("ms_struct", AT_MsStruct)
- .Case("guarded_var", AT_guarded_var)
- .Case("pt_guarded_var", AT_pt_guarded_var)
- .Case("scoped_lockable", AT_scoped_lockable)
- .Case("lockable", AT_lockable)
- .Case("no_thread_safety_analysis", AT_no_thread_safety_analysis)
- .Case("guarded_by", AT_guarded_by)
- .Case("pt_guarded_by", AT_pt_guarded_by)
- .Case("acquired_after", AT_acquired_after)
- .Case("acquired_before", AT_acquired_before)
- .Case("exclusive_lock_function", AT_exclusive_lock_function)
- .Case("exclusive_locks_required", AT_exclusive_locks_required)
- .Case("exclusive_trylock_function", AT_exclusive_trylock_function)
- .Case("lock_returned", AT_lock_returned)
- .Case("locks_excluded", AT_locks_excluded)
- .Case("shared_lock_function", AT_shared_lock_function)
- .Case("shared_locks_required", AT_shared_locks_required)
- .Case("shared_trylock_function", AT_shared_trylock_function)
- .Case("unlock_function", AT_unlock_function)
+ .Case("vector_size", AT_vector_size)
.Default(UnknownAttribute);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
index da98603..ce9bbb9 100644
--- a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/Lex/Preprocessor.h"
#include "clang-c/Index.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
@@ -71,8 +72,8 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCCategoryName:
return false;
}
-
- return false;
+
+ llvm_unreachable("Invalid CodeCompletionContext::Kind!");
}
//===----------------------------------------------------------------------===//
@@ -93,7 +94,6 @@ CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text)
case CK_Optional:
llvm_unreachable("Optional strings cannot be created from text");
- break;
case CK_LeftParen:
this->Text = "(";
@@ -192,9 +192,12 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
unsigned Priority,
CXAvailabilityKind Availability,
const char **Annotations,
- unsigned NumAnnotations)
- : NumChunks(NumChunks), NumAnnotations(NumAnnotations)
- , Priority(Priority), Availability(Availability)
+ unsigned NumAnnotations,
+ CXCursorKind ParentKind,
+ StringRef ParentName)
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
+ Priority(Priority), Availability(Availability), ParentKind(ParentKind),
+ ParentName(ParentName)
{
assert(NumChunks <= 0xffff);
assert(NumAnnotations <= 0xffff);
@@ -260,23 +263,137 @@ const char *CodeCompletionAllocator::CopyString(Twine String) {
// FIXME: It would be more efficient to teach Twine to tell us its size and
// then add a routine there to fill in an allocated char* with the contents
// of the string.
- llvm::SmallString<128> Data;
+ SmallString<128> Data;
return CopyString(String.toStringRef(Data));
}
+StringRef CodeCompletionTUInfo::getParentName(DeclContext *DC) {
+ NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return StringRef();
+
+ // Check whether we've already cached the parent name.
+ StringRef &CachedParentName = ParentNames[DC];
+ if (!CachedParentName.empty())
+ return CachedParentName;
+
+ // If we already processed this DeclContext and assigned empty to it, the
+ // data pointer will be non-null.
+ if (CachedParentName.data() != 0)
+ return StringRef();
+
+ // Find the interesting names.
+ llvm::SmallVector<DeclContext *, 2> Contexts;
+ while (DC && !DC->isFunctionOrMethod()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(DC)) {
+ if (ND->getIdentifier())
+ Contexts.push_back(DC);
+ }
+
+ DC = DC->getParent();
+ }
+
+ {
+ llvm::SmallString<128> S;
+ llvm::raw_svector_ostream OS(S);
+ bool First = true;
+ for (unsigned I = Contexts.size(); I != 0; --I) {
+ if (First)
+ First = false;
+ else {
+ OS << "::";
+ }
+
+ DeclContext *CurDC = Contexts[I-1];
+ if (ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
+ CurDC = CatImpl->getCategoryDecl();
+
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
+ ObjCInterfaceDecl *Interface = Cat->getClassInterface();
+ if (!Interface) {
+ // Assign an empty StringRef but with non-null data to distinguish
+ // between empty because we didn't process the DeclContext yet.
+ CachedParentName = StringRef((const char *)~0U, 0);
+ return StringRef();
+ }
+
+ OS << Interface->getName() << '(' << Cat->getName() << ')';
+ } else {
+ OS << cast<NamedDecl>(CurDC)->getName();
+ }
+ }
+
+ CachedParentName = AllocatorRef->CopyString(OS.str());
+ }
+
+ return CachedParentName;
+}
+
CodeCompletionString *CodeCompletionBuilder::TakeString() {
- void *Mem = Allocator.Allocate(
+ void *Mem = getAllocator().Allocate(
sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size()
+ sizeof(const char *) * Annotations.size(),
llvm::alignOf<CodeCompletionString>());
CodeCompletionString *Result
= new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
Priority, Availability,
- Annotations.data(), Annotations.size());
+ Annotations.data(), Annotations.size(),
+ ParentKind, ParentName);
Chunks.clear();
return Result;
}
+void CodeCompletionBuilder::AddTypedTextChunk(const char *Text) {
+ Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text));
+}
+
+void CodeCompletionBuilder::AddTextChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateText(Text));
+}
+
+void CodeCompletionBuilder::AddOptionalChunk(CodeCompletionString *Optional) {
+ Chunks.push_back(Chunk::CreateOptional(Optional));
+}
+
+void CodeCompletionBuilder::AddPlaceholderChunk(const char *Placeholder) {
+ Chunks.push_back(Chunk::CreatePlaceholder(Placeholder));
+}
+
+void CodeCompletionBuilder::AddInformativeChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateInformative(Text));
+}
+
+void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) {
+ Chunks.push_back(Chunk::CreateResultType(ResultType));
+}
+
+void
+CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) {
+ Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
+}
+
+void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK,
+ const char *Text) {
+ Chunks.push_back(Chunk(CK, Text));
+}
+
+void CodeCompletionBuilder::addParentContext(DeclContext *DC) {
+ if (DC->isTranslationUnit()) {
+ ParentKind = CXCursor_TranslationUnit;
+ return;
+ }
+
+ if (DC->isFunctionOrMethod())
+ return;
+
+ NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return;
+
+ ParentKind = getCursorKindForDecl(ND);
+ ParentName = getCodeCompletionTUInfo().getParentName(DC);
+}
+
unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
if (!ND)
return CCP_Unlikely;
@@ -330,8 +447,8 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
case CK_FunctionType:
return Type;
}
-
- return 0;
+
+ llvm_unreachable("Invalid CandidateKind!");
}
//===----------------------------------------------------------------------===//
@@ -356,7 +473,8 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
if (Results[I].Hidden)
OS << " (Hidden)";
if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Allocator)) {
+ = Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
+ CCTUInfo)) {
OS << " : " << CCS->getAsString();
}
@@ -370,7 +488,8 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
case CodeCompletionResult::RK_Macro: {
OS << Results[I].Macro->getName();
if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Allocator)) {
+ = Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
+ CCTUInfo)) {
OS << " : " << CCS->getAsString();
}
OS << '\n';
@@ -394,17 +513,32 @@ PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
for (unsigned I = 0; I != NumCandidates; ++I) {
if (CodeCompletionString *CCS
= Candidates[I].CreateSignatureString(CurrentArg, SemaRef,
- Allocator)) {
+ getAllocator(), CCTUInfo)) {
OS << "OVERLOAD: " << CCS->getAsString() << "\n";
}
}
}
+/// \brief Retrieve the effective availability of the given declaration.
+static AvailabilityResult getDeclAvailability(Decl *D) {
+ AvailabilityResult AR = D->getAvailability();
+ if (isa<EnumConstantDecl>(D))
+ AR = std::max(AR, cast<Decl>(D->getDeclContext())->getAvailability());
+ return AR;
+}
+
void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
switch (Kind) {
- case RK_Declaration:
+ case RK_Pattern:
+ if (!Declaration) {
+ // Do nothing: Patterns can come with cursor kinds!
+ break;
+ }
+ // Fall through
+
+ case RK_Declaration: {
// Set the availability based on attributes.
- switch (Declaration->getAvailability()) {
+ switch (getDeclAvailability(Declaration)) {
case AR_Available:
case AR_NotYetIntroduced:
Availability = CXAvailability_Available;
@@ -424,9 +558,19 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
Availability = CXAvailability_NotAvailable;
CursorKind = getCursorKindForDecl(Declaration);
- if (CursorKind == CXCursor_UnexposedDecl)
- CursorKind = CXCursor_NotImplemented;
+ if (CursorKind == CXCursor_UnexposedDecl) {
+ // FIXME: Forward declarations of Objective-C classes and protocols
+ // are not directly exposed, but we want code completion to treat them
+ // like a definition.
+ if (isa<ObjCInterfaceDecl>(Declaration))
+ CursorKind = CXCursor_ObjCInterfaceDecl;
+ else if (isa<ObjCProtocolDecl>(Declaration))
+ CursorKind = CXCursor_ObjCProtocolDecl;
+ else
+ CursorKind = CXCursor_NotImplemented;
+ }
break;
+ }
case RK_Macro:
Availability = CXAvailability_Available;
@@ -436,11 +580,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
case RK_Keyword:
Availability = CXAvailability_Available;
CursorKind = CXCursor_NotImplemented;
- break;
-
- case RK_Pattern:
- // Do nothing: Patterns can come with cursor kinds!
- break;
+ break;
}
if (!Accessible)
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
index f0a763e..b531acc 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -15,6 +15,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/Sema.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
@@ -150,6 +151,9 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
+ SourceLocation ConstQualifierLoc,
+ SourceLocation
+ VolatileQualifierLoc,
SourceLocation MutableLoc,
ExceptionSpecificationType
ESpecType,
@@ -176,6 +180,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.Fun.ArgInfo = 0;
I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+ I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding();
+ I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding();
I.Fun.MutableLoc = MutableLoc.getRawEncoding();
I.Fun.ExceptionSpecType = ESpecType;
I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding();
@@ -239,7 +245,6 @@ bool Declarator::isDeclarationOfFunction() const {
return false;
}
llvm_unreachable("Invalid type chunk");
- return false;
}
switch (DS.getTypeSpecType()) {
@@ -259,6 +264,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_float:
case TST_half:
case TST_int:
+ case TST_int128:
case TST_struct:
case TST_union:
case TST_unknown_anytype:
@@ -289,8 +295,8 @@ bool Declarator::isDeclarationOfFunction() const {
return QT->isFunctionType();
}
}
-
- return false;
+
+ llvm_unreachable("Invalid TypeSpecType!");
}
/// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
@@ -374,6 +380,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T) {
case DeclSpec::TST_char16: return "char16_t";
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
+ case DeclSpec::TST_int128: return "__int128";
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
@@ -416,7 +423,7 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// It seems sensible to prohibit private_extern too
// The cl_clang_storage_class_specifiers extension enables support for
// these storage-class specifiers.
- if (S.getLangOptions().OpenCL &&
+ if (S.getLangOpts().OpenCL &&
!S.getOpenCLOptions().cl_clang_storage_class_specifiers) {
switch (SC) {
case SCS_extern:
@@ -435,7 +442,7 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
if (StorageClassSpec != SCS_unspecified) {
// Maybe this is an attempt to use C++0x 'auto' outside of C++0x mode.
bool isInvalid = true;
- if (TypeSpecType == TST_unspecified && S.getLangOptions().CPlusPlus) {
+ if (TypeSpecType == TST_unspecified && S.getLangOpts().CPlusPlus) {
if (SC == SCS_auto)
return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID);
if (StorageClassSpec == SCS_auto) {
@@ -813,7 +820,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
if (TypeSpecSign != TSS_unspecified) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
- else if (TypeSpecType != TST_int &&
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
Diag(D, TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType);
@@ -861,7 +868,8 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
TypeSpecType = TST_double; // _Complex -> _Complex double.
} else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
// Note that this intentionally doesn't include _Complex _Bool.
- Diag(D, TSTLoc, diag::ext_integer_complex);
+ if (!PP.getLangOpts().CPlusPlus)
+ Diag(D, TSTLoc, diag::ext_integer_complex);
} else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
Diag(D, TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType);
@@ -874,7 +882,7 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
// class specifier, then assume this is an attempt to use C++0x's 'auto'
// type specifier.
// FIXME: Does Microsoft really support implicit int in C++?
- if (PP.getLangOptions().CPlusPlus && !PP.getLangOptions().MicrosoftExt &&
+ if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().MicrosoftExt &&
TypeSpecType == TST_unspecified && StorageClassSpec == SCS_auto) {
TypeSpecType = TST_auto;
StorageClassSpec = StorageClassSpecAsWritten = SCS_unspecified;
@@ -883,12 +891,17 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
}
// Diagnose if we've recovered from an ill-formed 'auto' storage class
// specifier in a pre-C++0x dialect of C++.
- if (!PP.getLangOptions().CPlusPlus0x && TypeSpecType == TST_auto)
+ if (!PP.getLangOpts().CPlusPlus0x && TypeSpecType == TST_auto)
Diag(D, TSTLoc, diag::ext_auto_type_specifier);
- if (PP.getLangOptions().CPlusPlus && !PP.getLangOptions().CPlusPlus0x &&
+ if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().CPlusPlus0x &&
StorageClassSpec == SCS_auto)
Diag(D, StorageClassSpecLoc, diag::warn_auto_storage_class)
<< FixItHint::CreateRemoval(StorageClassSpecLoc);
+ if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
+ Diag(D, TSTLoc, diag::warn_cxx98_compat_unicode_type)
+ << (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
+ if (Constexpr_specified)
+ Diag(D, ConstexprLoc, diag::warn_cxx98_compat_constexpr);
// C++ [class.friend]p6:
// No storage-class-specifier shall appear in the decl-specifier-seq
diff --git a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
index d6c1ad1..876f9d7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
@@ -20,13 +20,15 @@ using namespace clang;
using namespace sema;
DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc,
- const NamedDecl *D,
- StringRef Msg) {
+ const NamedDecl *D,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ StringRef Msg) {
DelayedDiagnostic DD;
DD.Kind = Deprecation;
DD.Triggered = false;
DD.Loc = Loc;
DD.DeprecationData.Decl = D;
+ DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
char *MessageData = 0;
if (Msg.size()) {
MessageData = new char [Msg.size()];
diff --git a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
index a643267..4d62cab 100644
--- a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
@@ -15,7 +15,10 @@
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/Scope.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/Preprocessor.h"
using namespace clang;
@@ -93,9 +96,11 @@ IdentifierResolver::IdDeclInfo::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
// IdentifierResolver Implementation
//===----------------------------------------------------------------------===//
-IdentifierResolver::IdentifierResolver(const LangOptions &langOpt)
- : LangOpt(langOpt), IdDeclInfos(new IdDeclInfoMap) {
+IdentifierResolver::IdentifierResolver(Preprocessor &PP)
+ : LangOpt(PP.getLangOpts()), PP(PP),
+ IdDeclInfos(new IdDeclInfoMap) {
}
+
IdentifierResolver::~IdentifierResolver() {
delete IdDeclInfos;
}
@@ -108,7 +113,7 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx,
bool ExplicitInstantiationOrSpecialization) const {
Ctx = Ctx->getRedeclContext();
- if (Ctx->isFunctionOrMethod()) {
+ if (Ctx->isFunctionOrMethod() || S->isFunctionPrototypeScope()) {
// Ignore the scopes associated within transparent declaration contexts.
while (S->getEntity() &&
((DeclContext *)S->getEntity())->isTransparentContext())
@@ -146,7 +151,7 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx,
void IdentifierResolver::AddDecl(NamedDecl *D) {
DeclarationName Name = D->getDeclName();
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
- II->setIsFromAST(false);
+ updatingIdentifier(*II);
void *Ptr = Name.getFETokenInfo<void>();
@@ -170,6 +175,9 @@ void IdentifierResolver::AddDecl(NamedDecl *D) {
void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) {
DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
void *Ptr = Name.getFETokenInfo<void>();
if (!Ptr) {
@@ -194,9 +202,6 @@ void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) {
return;
}
- if (IdentifierInfo *II = Name.getAsIdentifierInfo())
- II->setIsFromAST(false);
-
// General case: insert the declaration at the appropriate point in the
// list, which already has at least two elements.
IdDeclInfo *IDI = toIdDeclInfo(Ptr);
@@ -212,7 +217,7 @@ void IdentifierResolver::RemoveDecl(NamedDecl *D) {
assert(D && "null param passed");
DeclarationName Name = D->getDeclName();
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
- II->setIsFromAST(false);
+ updatingIdentifier(*II);
void *Ptr = Name.getFETokenInfo<void>();
@@ -233,7 +238,7 @@ bool IdentifierResolver::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
DeclarationName Name = Old->getDeclName();
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
- II->setIsFromAST(false);
+ updatingIdentifier(*II);
void *Ptr = Name.getFETokenInfo<void>();
@@ -254,6 +259,9 @@ bool IdentifierResolver::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
/// begin - Returns an iterator for decls with name 'Name'.
IdentifierResolver::iterator
IdentifierResolver::begin(DeclarationName Name) {
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
void *Ptr = Name.getFETokenInfo<void>();
if (!Ptr) return end();
@@ -269,27 +277,133 @@ IdentifierResolver::begin(DeclarationName Name) {
return end();
}
-void IdentifierResolver::AddDeclToIdentifierChain(IdentifierInfo *II,
- NamedDecl *D) {
- II->setIsFromAST(false);
- void *Ptr = II->getFETokenInfo<void>();
+namespace {
+ enum DeclMatchKind {
+ DMK_Different,
+ DMK_Replace,
+ DMK_Ignore
+ };
+}
- if (!Ptr) {
- II->setFETokenInfo(D);
- return;
+/// \brief Compare two declarations to see whether they are different or,
+/// if they are the same, whether the new declaration should replace the
+/// existing declaration.
+static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) {
+ // If the declarations are identical, ignore the new one.
+ if (Existing == New)
+ return DMK_Ignore;
+
+ // If the declarations have different kinds, they're obviously different.
+ if (Existing->getKind() != New->getKind())
+ return DMK_Different;
+
+ // If the declarations are redeclarations of each other, keep the newest one.
+ if (Existing->getCanonicalDecl() == New->getCanonicalDecl()) {
+ // If the existing declaration is somewhere in the previous declaration
+ // chain of the new declaration, then prefer the new declaration.
+ for (Decl::redecl_iterator RD = New->redecls_begin(),
+ RDEnd = New->redecls_end();
+ RD != RDEnd; ++RD) {
+ if (*RD == Existing)
+ return DMK_Replace;
+
+ if (RD->isCanonicalDecl())
+ break;
+ }
+
+ return DMK_Ignore;
}
+
+ return DMK_Different;
+}
+bool IdentifierResolver::tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name){
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
IdDeclInfo *IDI;
-
+
if (isDeclPtr(Ptr)) {
- II->setFETokenInfo(NULL);
- IDI = &(*IdDeclInfos)[II];
NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
- IDI->AddDecl(PrevD);
- } else
- IDI = toIdDeclInfo(Ptr);
+
+ switch (compareDeclarations(PrevD, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
+ Name.setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[Name];
+
+ // If the existing declaration is not visible in translation unit scope,
+ // then add the new top-level declaration first.
+ if (!PrevD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ IDI->AddDecl(D);
+ IDI->AddDecl(PrevD);
+ } else {
+ IDI->AddDecl(PrevD);
+ IDI->AddDecl(D);
+ }
+ return true;
+ }
+
+ IDI = toIdDeclInfo(Ptr);
+ // See whether this declaration is identical to any existing declarations.
+ // If not, find the right place to insert it.
+ for (IdDeclInfo::DeclsTy::iterator I = IDI->decls_begin(),
+ IEnd = IDI->decls_end();
+ I != IEnd; ++I) {
+
+ switch (compareDeclarations(*I, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ *I = D;
+ return true;
+ }
+
+ if (!(*I)->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ // We've found a declaration that is not visible from the translation
+ // unit (it's in an inner scope). Insert our declaration here.
+ IDI->InsertDecl(I, D);
+ return true;
+ }
+ }
+
+ // Add the declaration to the end.
IDI->AddDecl(D);
+ return true;
+}
+
+void IdentifierResolver::readingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+}
+
+void IdentifierResolver::updatingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+
+ if (II.isFromAST())
+ II.setChangedSinceDeserialization();
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
index 3e74dfc..ab786c6 100644
--- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -42,10 +42,10 @@ class JumpScopeChecker {
/// the parent scope is the function body.
unsigned ParentScope;
- /// InDiag - The diagnostic to emit if there is a jump into this scope.
+ /// InDiag - The note to emit if there is a jump into this scope.
unsigned InDiag;
- /// OutDiag - The diagnostic to emit if there is an indirect jump out
+ /// OutDiag - The note to emit if there is an indirect jump out
/// of this scope. Direct jumps always clean up their current scope
/// in an orderly way.
unsigned OutDiag;
@@ -74,10 +74,12 @@ private:
void VerifyJumps();
void VerifyIndirectJumps();
+ void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
LabelDecl *Target, unsigned TargetScope);
void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
- unsigned JumpDiag, unsigned JumpDiagWarning);
+ unsigned JumpDiag, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat);
unsigned GetDeepestCommonScope(unsigned A, unsigned B);
};
@@ -133,7 +135,7 @@ static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
return ScopePair(diag::note_protected_by_cleanup,
diag::note_exits_cleanup);
- if (Context.getLangOptions().ObjCAutoRefCount && VD->hasLocalStorage()) {
+ if (Context.getLangOpts().ObjCAutoRefCount && VD->hasLocalStorage()) {
switch (VD->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -147,8 +149,8 @@ static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
}
}
- if (Context.getLangOptions().CPlusPlus && VD->hasLocalStorage()) {
- // C++0x [stmt.dcl]p3:
+ if (Context.getLangOpts().CPlusPlus && VD->hasLocalStorage()) {
+ // C++11 [stmt.dcl]p3:
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope
// is ill-formed unless the variable has scalar type, class type with
@@ -177,13 +179,12 @@ static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
record = ctor->getParent();
if (ctor->isTrivial() && ctor->isDefaultConstructor()) {
- if (Context.getLangOptions().CPlusPlus0x) {
- inDiagToUse = (record->hasTrivialDestructor() ? 0 :
- diag::note_protected_by_variable_nontriv_destructor);
- } else {
- if (record->isPOD())
- inDiagToUse = 0;
- }
+ if (!record->hasTrivialDestructor())
+ inDiagToUse = diag::note_protected_by_variable_nontriv_destructor;
+ else if (!record->isPOD())
+ inDiagToUse = diag::note_protected_by_variable_non_pod;
+ else
+ inDiagToUse = 0;
}
} else if (VD->getType()->isArrayType()) {
record = VD->getType()->getBaseElementTypeUnsafe()
@@ -258,7 +259,7 @@ void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
diag::note_exits_block_captures_weak);
break;
case QualType::DK_none:
- llvm_unreachable("no-liftime captured variable");
+ llvm_unreachable("non-lifetime captured variable");
}
SourceLocation Loc = D->getLocation();
if (Loc.isInvalid())
@@ -477,7 +478,8 @@ void JumpScopeChecker::VerifyJumps() {
if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
diag::err_goto_into_protected_scope,
- diag::warn_goto_into_protected_scope);
+ diag::warn_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
continue;
}
@@ -486,7 +488,8 @@ void JumpScopeChecker::VerifyJumps() {
LabelDecl *Target = IGS->getConstantTarget();
CheckJump(IGS, Target->getStmt(), IGS->getGotoLoc(),
diag::err_goto_into_protected_scope,
- diag::warn_goto_into_protected_scope);
+ diag::warn_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
continue;
}
@@ -495,7 +498,8 @@ void JumpScopeChecker::VerifyJumps() {
SC = SC->getNextSwitchCase()) {
assert(LabelAndGotoScopes.count(SC) && "Case not visited?");
CheckJump(SS, SC, SC->getLocStart(),
- diag::err_switch_into_protected_scope, 0);
+ diag::err_switch_into_protected_scope, 0,
+ diag::warn_cxx98_compat_switch_into_protected_scope);
}
}
}
@@ -639,6 +643,39 @@ void JumpScopeChecker::VerifyIndirectJumps() {
}
}
+/// Return true if a particular error+note combination must be downgraded to a
+/// warning in Microsoft mode.
+static bool IsMicrosoftJumpWarning(unsigned JumpDiag, unsigned InDiagNote) {
+ return (JumpDiag == diag::err_goto_into_protected_scope &&
+ (InDiagNote == diag::note_protected_by_variable_init ||
+ InDiagNote == diag::note_protected_by_variable_nontriv_destructor));
+}
+
+/// Return true if a particular note should be downgraded to a compatibility
+/// warning in C++11 mode.
+static bool IsCXX98CompatWarning(Sema &S, unsigned InDiagNote) {
+ return S.getLangOpts().CPlusPlus0x &&
+ InDiagNote == diag::note_protected_by_variable_non_pod;
+}
+
+/// Produce primary diagnostic for an indirect jump statement.
+static void DiagnoseIndirectJumpStmt(Sema &S, IndirectGotoStmt *Jump,
+ LabelDecl *Target, bool &Diagnosed) {
+ if (Diagnosed)
+ return;
+ S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ Diagnosed = true;
+}
+
+/// Produce note diagnostics for a jump into a protected scope.
+void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
+ assert(!ToScopes.empty());
+ for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
+ if (Scopes[ToScopes[I]].InDiag)
+ S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
+}
+
/// Diagnose an indirect jump which is known to cross scopes.
void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
unsigned JumpScope,
@@ -646,36 +683,41 @@ void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
unsigned TargetScope) {
assert(JumpScope != TargetScope);
- S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
- S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
-
unsigned Common = GetDeepestCommonScope(JumpScope, TargetScope);
+ bool Diagnosed = false;
// Walk out the scope chain until we reach the common ancestor.
for (unsigned I = JumpScope; I != Common; I = Scopes[I].ParentScope)
- if (Scopes[I].OutDiag)
+ if (Scopes[I].OutDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
+ }
+
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
// Now walk into the scopes containing the label whose address was taken.
for (unsigned I = TargetScope; I != Common; I = Scopes[I].ParentScope)
- if (Scopes[I].InDiag)
+ if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
+ else if (Scopes[I].InDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].InDiag);
-}
+ }
-/// Return true if a particular error+note combination must be downgraded
-/// to a warning in Microsoft mode.
-static bool IsMicrosoftJumpWarning(unsigned JumpDiag, unsigned InDiagNote)
-{
- return (JumpDiag == diag::err_goto_into_protected_scope &&
- (InDiagNote == diag::note_protected_by_variable_init ||
- InDiagNote == diag::note_protected_by_variable_nontriv_destructor));
+ // Diagnose this jump if it would be ill-formed in C++98.
+ if (!Diagnosed && !ToScopesCXX98Compat.empty()) {
+ S.Diag(Jump->getGotoLoc(),
+ diag::warn_cxx98_compat_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
+ }
}
-
/// CheckJump - Validate that the specified jump statement is valid: that it is
/// jumping within or out of its current scope, not into a deeper one.
void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
- unsigned JumpDiagError, unsigned JumpDiagWarning) {
+ unsigned JumpDiagError, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat) {
assert(LabelAndGotoScopes.count(From) && "Jump didn't get added to scopes?");
unsigned FromScope = LabelAndGotoScopes[From];
@@ -691,12 +733,15 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
if (CommonScope == ToScope) return;
// Pull out (and reverse) any scopes we might need to diagnose skipping.
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
SmallVector<unsigned, 10> ToScopesError;
SmallVector<unsigned, 10> ToScopesWarning;
for (unsigned I = ToScope; I != CommonScope; I = Scopes[I].ParentScope) {
- if (S.getLangOptions().MicrosoftMode && JumpDiagWarning != 0 &&
+ if (S.getLangOpts().MicrosoftMode && JumpDiagWarning != 0 &&
IsMicrosoftJumpWarning(JumpDiagError, Scopes[I].InDiag))
ToScopesWarning.push_back(I);
+ else if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
else if (Scopes[I].InDiag)
ToScopesError.push_back(I);
}
@@ -704,16 +749,19 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
// Handle warnings.
if (!ToScopesWarning.empty()) {
S.Diag(DiagLoc, JumpDiagWarning);
- for (unsigned i = 0, e = ToScopesWarning.size(); i != e; ++i)
- S.Diag(Scopes[ToScopesWarning[i]].Loc, Scopes[ToScopesWarning[i]].InDiag);
+ NoteJumpIntoScopes(ToScopesWarning);
}
// Handle errors.
if (!ToScopesError.empty()) {
S.Diag(DiagLoc, JumpDiagError);
- // Emit diagnostics note for whatever is left in ToScopesError.
- for (unsigned i = 0, e = ToScopesError.size(); i != e; ++i)
- S.Diag(Scopes[ToScopesError[i]].Loc, Scopes[ToScopesError[i]].InDiag);
+ NoteJumpIntoScopes(ToScopesError);
+ }
+
+ // Handle -Wc++98-compat warnings if the jump is well-formed.
+ if (ToScopesError.empty() && !ToScopesCXX98Compat.empty()) {
+ S.Diag(DiagLoc, JumpDiagCXX98Compat);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/MultiInitializer.cpp b/contrib/llvm/tools/clang/lib/Sema/MultiInitializer.cpp
deleted file mode 100644
index 8bd2213..0000000
--- a/contrib/llvm/tools/clang/lib/Sema/MultiInitializer.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-//===--- MultiInitializer.cpp - Initializer expression group ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the MultiInitializer class, which can represent a list
-// initializer or a parentheses-wrapped group of expressions in a C++ member
-// initializer.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Sema/MultiInitializer.h"
-#include "clang/Sema/Initialization.h"
-#include "clang/Sema/Sema.h"
-#include "clang/AST/Expr.h"
-
-using namespace clang;
-
-InitListExpr *MultiInitializer::getInitList() const {
- return cast<InitListExpr>(InitListOrExpressions.get<Expr*>());
-}
-
-SourceLocation MultiInitializer::getStartLoc() const {
- return isInitializerList() ? getInitList()->getLBraceLoc() : LParenLoc;
-}
-
-SourceLocation MultiInitializer::getEndLoc() const {
- return isInitializerList() ? getInitList()->getRBraceLoc() : RParenLoc;
-}
-
-MultiInitializer::iterator MultiInitializer::begin() const {
- return isInitializerList() ? getInitList()->getInits() : getExpressions();
-}
-
-MultiInitializer::iterator MultiInitializer::end() const {
- if (isInitializerList()) {
- InitListExpr *ILE = getInitList();
- return ILE->getInits() + ILE->getNumInits();
- }
- return getExpressions() + NumInitializers;
-}
-
-bool MultiInitializer::isTypeDependent() const {
- if (isInitializerList())
- return getInitList()->isTypeDependent();
- for (iterator I = begin(), E = end(); I != E; ++I) {
- if ((*I)->isTypeDependent())
- return true;
- }
- return false;
-}
-
-bool MultiInitializer::DiagnoseUnexpandedParameterPack(Sema &SemaRef) const {
- if (isInitializerList())
- return SemaRef.DiagnoseUnexpandedParameterPack(getInitList());
- for (iterator I = begin(), E = end(); I != E; ++I) {
- if (SemaRef.DiagnoseUnexpandedParameterPack(*I))
- return true;
- }
- return false;
-}
-
-Expr *MultiInitializer::CreateInitExpr(ASTContext &Ctx, QualType T) const {
- if (isInitializerList())
- return InitListOrExpressions.get<Expr*>();
-
- return new (Ctx) ParenListExpr(Ctx, LParenLoc, getExpressions(),
- NumInitializers, RParenLoc, T);
-}
-
-ExprResult MultiInitializer::PerformInit(Sema &SemaRef,
- InitializedEntity Entity,
- InitializationKind Kind) const {
- Expr *Single;
- Expr **Args;
- unsigned NumArgs;
- if (isInitializerList()) {
- Single = InitListOrExpressions.get<Expr*>();
- Args = &Single;
- NumArgs = 1;
- } else {
- Args = getExpressions();
- NumArgs = NumInitializers;
- }
- InitializationSequence InitSeq(SemaRef, Entity, Kind, Args, NumArgs);
- return InitSeq.Perform(SemaRef, Entity, Kind,
- MultiExprArg(SemaRef, Args, NumArgs), 0);
-}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
index 833a59f..10f12ce 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
@@ -19,23 +19,28 @@ using namespace clang;
void Scope::Init(Scope *parent, unsigned flags) {
AnyParent = parent;
Flags = flags;
-
+
+ if (parent && !(flags & FnScope)) {
+ BreakParent = parent->BreakParent;
+ ContinueParent = parent->ContinueParent;
+ } else {
+ // Control scopes do not contain the contents of nested function scopes for
+ // control flow purposes.
+ BreakParent = ContinueParent = 0;
+ }
+
if (parent) {
Depth = parent->Depth + 1;
PrototypeDepth = parent->PrototypeDepth;
PrototypeIndex = 0;
FnParent = parent->FnParent;
- BreakParent = parent->BreakParent;
- ContinueParent = parent->ContinueParent;
- ControlParent = parent->ControlParent;
BlockParent = parent->BlockParent;
TemplateParamParent = parent->TemplateParamParent;
} else {
Depth = 0;
PrototypeDepth = 0;
PrototypeIndex = 0;
- FnParent = BreakParent = ContinueParent = BlockParent = 0;
- ControlParent = 0;
+ FnParent = BlockParent = 0;
TemplateParamParent = 0;
}
@@ -43,7 +48,6 @@ void Scope::Init(Scope *parent, unsigned flags) {
if (flags & FnScope) FnParent = this;
if (flags & BreakScope) BreakParent = this;
if (flags & ContinueScope) ContinueParent = this;
- if (flags & ControlScope) ControlParent = this;
if (flags & BlockScope) BlockParent = this;
if (flags & TemplateParamScope) TemplateParamParent = this;
@@ -55,3 +59,13 @@ void Scope::Init(Scope *parent, unsigned flags) {
Entity = 0;
ErrorTrap.reset();
}
+
+bool Scope::containedInPrototypeScope() const {
+ const Scope *S = this;
+ while (S) {
+ if (S->isFunctionPrototypeScope())
+ return true;
+ S = S->getParent();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 533b21c..fcdfcac 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -33,6 +33,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/PartialDiagnostic.h"
@@ -54,10 +55,12 @@ void FunctionScopeInfo::Clear() {
}
BlockScopeInfo::~BlockScopeInfo() { }
+LambdaScopeInfo::~LambdaScopeInfo() { }
-PrintingPolicy Sema::getPrintingPolicy() const {
+PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
PrintingPolicy Policy = Context.getPrintingPolicy();
- Policy.Bool = getLangOptions().Bool;
+ Policy.Bool = Context.getLangOpts().Bool;
if (!Policy.Bool) {
if (MacroInfo *BoolMacro = PP.getMacroInfo(&Context.Idents.get("bool"))) {
Policy.Bool = BoolMacro->isObjectLike() &&
@@ -74,36 +77,26 @@ void Sema::ActOnTranslationUnitScope(Scope *S) {
PushDeclContext(S, Context.getTranslationUnitDecl());
VAListTagName = PP.getIdentifierInfo("__va_list_tag");
-
- if (PP.getLangOptions().ObjC1) {
- // Synthesize "@class Protocol;
- if (Context.getObjCProtoType().isNull()) {
- ObjCInterfaceDecl *ProtocolDecl =
- ObjCInterfaceDecl::Create(Context, CurContext, SourceLocation(),
- &Context.Idents.get("Protocol"),
- SourceLocation(), true);
- Context.setObjCProtoType(Context.getObjCInterfaceType(ProtocolDecl));
- PushOnScopeChains(ProtocolDecl, TUScope, false);
- }
- }
}
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind,
CodeCompleteConsumer *CodeCompleter)
- : TheTargetAttributesSema(0), FPFeatures(pp.getLangOptions()),
- LangOpts(pp.getLangOptions()), PP(pp), Context(ctxt), Consumer(consumer),
+ : TheTargetAttributesSema(0), FPFeatures(pp.getLangOpts()),
+ LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
CollectStats(false), ExternalSource(0), CodeCompleter(CodeCompleter),
CurContext(0), OriginalLexicalContext(0),
PackContext(0), MSStructPragmaOn(false), VisContext(0),
- ExprNeedsCleanups(0), LateTemplateParser(0), OpaqueParser(0),
- IdResolver(pp.getLangOptions()), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
+ ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0),
+ IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
+ NSNumberDecl(0), NSArrayDecl(0), ArrayWithObjectsMethod(0),
+ NSDictionaryDecl(0), DictionaryWithObjectsMethod(0),
GlobalNewDeleteDeclared(false),
ObjCShouldCallSuperDealloc(false),
ObjCShouldCallSuperFinalize(false),
TUKind(TUKind),
- NumSFINAEErrors(0), SuppressAccessChecking(false),
+ NumSFINAEErrors(0), InFunctionDeclarator(0), SuppressAccessChecking(false),
AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
CurrentInstantiationScope(0), TyposCorrected(0),
@@ -111,8 +104,13 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
{
TUScope = 0;
LoadedExternalKnownNamespaces = false;
-
- if (getLangOptions().CPlusPlus)
+ for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
+ NSNumberLiteralMethods[I] = 0;
+
+ if (getLangOpts().ObjC1)
+ NSAPIObj.reset(new NSAPI(Context));
+
+ if (getLangOpts().CPlusPlus)
FieldCollector.reset(new CXXFieldCollector());
// Tell diagnostics how to render things from the AST library.
@@ -120,7 +118,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
&Context);
ExprEvalContexts.push_back(
- ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0, false));
+ ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0,
+ false, 0, false));
FunctionScopes.push_back(new FunctionScopeInfo(Diags));
}
@@ -143,33 +142,38 @@ void Sema::Initialize() {
// If either of the 128-bit integer types are unavailable to name lookup,
// define them now.
DeclarationName Int128 = &Context.Idents.get("__int128_t");
- if (IdentifierResolver::begin(Int128) == IdentifierResolver::end())
+ if (IdResolver.begin(Int128) == IdResolver.end())
PushOnScopeChains(Context.getInt128Decl(), TUScope);
DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
- if (IdentifierResolver::begin(UInt128) == IdentifierResolver::end())
+ if (IdResolver.begin(UInt128) == IdResolver.end())
PushOnScopeChains(Context.getUInt128Decl(), TUScope);
}
// Initialize predefined Objective-C types:
- if (PP.getLangOptions().ObjC1) {
+ if (PP.getLangOpts().ObjC1) {
// If 'SEL' does not yet refer to any declarations, make it refer to the
// predefined 'SEL'.
DeclarationName SEL = &Context.Idents.get("SEL");
- if (IdentifierResolver::begin(SEL) == IdentifierResolver::end())
+ if (IdResolver.begin(SEL) == IdResolver.end())
PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
// If 'id' does not yet refer to any declarations, make it refer to the
// predefined 'id'.
DeclarationName Id = &Context.Idents.get("id");
- if (IdentifierResolver::begin(Id) == IdentifierResolver::end())
+ if (IdResolver.begin(Id) == IdResolver.end())
PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
// Create the built-in typedef for 'Class'.
DeclarationName Class = &Context.Idents.get("Class");
- if (IdentifierResolver::begin(Class) == IdentifierResolver::end())
+ if (IdResolver.begin(Class) == IdResolver.end())
PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
+
+ // Create the built-in forward declaratino for 'Protocol'.
+ DeclarationName Protocol = &Context.Idents.get("Protocol");
+ if (IdResolver.begin(Protocol) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
}
}
@@ -240,13 +244,28 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
CastKind Kind, ExprValueKind VK,
const CXXCastPath *BasePath,
CheckedConversionKind CCK) {
+#ifndef NDEBUG
+ if (VK == VK_RValue && !E->isRValue()) {
+ switch (Kind) {
+ default:
+ assert(0 && "can't implicitly cast lvalue to rvalue with this cast kind");
+ case CK_LValueToRValue:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_ToVoid:
+ break;
+ }
+ }
+ assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
+#endif
+
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
if (ExprTy == TypeTy)
return Owned(E);
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
CheckObjCARCConversion(SourceRange(), Ty, E, CCK);
// If this is a derived-to-base cast to a through a virtual base, we
@@ -303,7 +322,7 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
// Later redecls may add new information resulting in not having to warn,
// so check again.
- DeclToCheck = FD->getMostRecentDeclaration();
+ DeclToCheck = FD->getMostRecentDecl();
if (DeclToCheck != FD)
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
}
@@ -317,7 +336,7 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
// Later redecls may add new information resulting in not having to warn,
// so check again.
- DeclToCheck = VD->getMostRecentDeclaration();
+ DeclToCheck = VD->getMostRecentDecl();
if (DeclToCheck != VD)
return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
}
@@ -407,6 +426,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// Only complete translation units define vtables and perform implicit
// instantiations.
if (TUKind == TU_Complete) {
+ DiagnoseUseOfUnimplementedSelectors();
+
// If any dynamic classes have their key function defined within
// this translation unit, then those vtables are considered "used" and must
// be emitted.
@@ -468,34 +489,32 @@ void Sema::ActOnEndOfTranslationUnit() {
}
if (TUKind == TU_Module) {
- // Mark any macros from system headers (in /usr/include) as exported, along
- // with our own Clang headers.
- // FIXME: This is a gross hack to deal with the fact that system headers
- // are #include'd in many places within module headers, but are not
- // themselves modularized. This doesn't actually work, but it lets us
- // focus on other issues for the moment.
- for (Preprocessor::macro_iterator M = PP.macro_begin(false),
- MEnd = PP.macro_end(false);
- M != MEnd; ++M) {
- if (M->second &&
- !M->second->isExported() &&
- !M->second->isBuiltinMacro()) {
- SourceLocation Loc = M->second->getDefinitionLoc();
- if (SourceMgr.isInSystemHeader(Loc)) {
- const FileEntry *File
- = SourceMgr.getFileEntryForID(SourceMgr.getFileID(Loc));
- if (File &&
- ((StringRef(File->getName()).find("lib/clang")
- != StringRef::npos) ||
- (StringRef(File->getName()).find("usr/include")
- != StringRef::npos) ||
- (StringRef(File->getName()).find("usr/local/include")
- != StringRef::npos)))
- M->second->setExportLocation(Loc);
+ // If we are building a module, resolve all of the exported declarations
+ // now.
+ if (Module *CurrentModule = PP.getCurrentModule()) {
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+
+ llvm::SmallVector<Module *, 2> Stack;
+ Stack.push_back(CurrentModule);
+ while (!Stack.empty()) {
+ Module *Mod = Stack.back();
+ Stack.pop_back();
+
+ // Resolve the exported declarations.
+ // FIXME: Actually complain, once we figure out how to teach the
+ // diagnostic client to deal with complains in the module map at this
+ // point.
+ ModMap.resolveExports(Mod, /*Complain=*/false);
+
+ // Queue the submodules, so their exports will also be resolved.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ Stack.push_back(*Sub);
}
}
}
-
+
// Modules don't need any of the checking below.
TUScope = 0;
return;
@@ -620,8 +639,16 @@ void Sema::ActOnEndOfTranslationUnit() {
DeclContext *Sema::getFunctionLevelDeclContext() {
DeclContext *DC = CurContext;
- while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
- DC = DC->getParent();
+ while (true) {
+ if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC)) {
+ DC = DC->getParent();
+ } else if (isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
+ cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
+ DC = DC->getParent()->getParent();
+ }
+ else break;
+ }
return DC;
}
@@ -646,58 +673,75 @@ NamedDecl *Sema::getCurFunctionOrMethodDecl() {
return 0;
}
-Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
- if (!isActive())
- return;
-
- if (llvm::Optional<TemplateDeductionInfo*> Info = SemaRef.isSFINAEContext()) {
- switch (DiagnosticIDs::getDiagnosticSFINAEResponse(getDiagID())) {
+void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
+ // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
+ // and yet we also use the current diag ID on the DiagnosticsEngine. This has
+ // been made more painfully obvious by the refactor that introduced this
+ // function, but it is possible that the incoming argument can be
+ // eliminnated. If it truly cannot be (for example, there is some reentrancy
+ // issue I am not seeing yet), then there should at least be a clarifying
+ // comment somewhere.
+ if (llvm::Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) {
+ switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
+ Diags.getCurrentDiagID())) {
case DiagnosticIDs::SFINAE_Report:
- // Fall through; we'll report the diagnostic below.
+ // We'll report the diagnostic below.
break;
- case DiagnosticIDs::SFINAE_AccessControl:
- // Per C++ Core Issue 1170, access control is part of SFINAE.
- // Additionally, the AccessCheckingSFINAE flag can be used to temporary
- // make access control a part of SFINAE for the purposes of checking
- // type traits.
- if (!SemaRef.AccessCheckingSFINAE &&
- !SemaRef.getLangOptions().CPlusPlus0x)
- break;
-
case DiagnosticIDs::SFINAE_SubstitutionFailure:
// Count this failure so that we know that template argument deduction
// has failed.
- ++SemaRef.NumSFINAEErrors;
- SemaRef.Diags.setLastDiagnosticIgnored();
- SemaRef.Diags.Clear();
- Clear();
+ ++NumSFINAEErrors;
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
return;
+ case DiagnosticIDs::SFINAE_AccessControl: {
+ // Per C++ Core Issue 1170, access control is part of SFINAE.
+ // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
+ // make access control a part of SFINAE for the purposes of checking
+ // type traits.
+ if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus0x)
+ break;
+
+ SourceLocation Loc = Diags.getCurrentDiagLoc();
+
+ // Suppress this diagnostic.
+ ++NumSFINAEErrors;
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+
+ // Now the diagnostic state is clear, produce a C++98 compatibility
+ // warning.
+ Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
+
+ // The last diagnostic which Sema produced was ignored. Suppress any
+ // notes attached to it.
+ Diags.setLastDiagnosticIgnored();
+ return;
+ }
+
case DiagnosticIDs::SFINAE_Suppress:
// Make a copy of this suppressed diagnostic and store it with the
// template-deduction information;
- FlushCounts();
- Diagnostic DiagInfo(&SemaRef.Diags);
+ Diagnostic DiagInfo(&Diags);
if (*Info)
(*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
- PartialDiagnostic(DiagInfo,
- SemaRef.Context.getDiagAllocator()));
+ PartialDiagnostic(DiagInfo,Context.getDiagAllocator()));
// Suppress this diagnostic.
- SemaRef.Diags.setLastDiagnosticIgnored();
- SemaRef.Diags.Clear();
- Clear();
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
return;
}
}
// Set up the context's printing policy based on our current state.
- SemaRef.Context.setPrintingPolicy(SemaRef.getPrintingPolicy());
+ Context.setPrintingPolicy(getPrintingPolicy());
// Emit the diagnostic.
- if (!this->Emit())
+ if (!Diags.EmitCurrentDiagnostic())
return;
// If this is not a note, and we're in a template instantiation
@@ -705,20 +749,14 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
// we emitted an error, print a template instantiation
// backtrace.
if (!DiagnosticIDs::isBuiltinNote(DiagID) &&
- !SemaRef.ActiveTemplateInstantiations.empty() &&
- SemaRef.ActiveTemplateInstantiations.back()
- != SemaRef.LastTemplateInstantiationErrorContext) {
- SemaRef.PrintInstantiationStack();
- SemaRef.LastTemplateInstantiationErrorContext
- = SemaRef.ActiveTemplateInstantiations.back();
+ !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back()
+ != LastTemplateInstantiationErrorContext) {
+ PrintInstantiationStack();
+ LastTemplateInstantiationErrorContext = ActiveTemplateInstantiations.back();
}
}
-Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID) {
- DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
- return SemaDiagnosticBuilder(DB, *this, DiagID);
-}
-
Sema::SemaDiagnosticBuilder
Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
SemaDiagnosticBuilder Builder(Diag(Loc, PD.getDiagID()));
@@ -795,8 +833,14 @@ void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
BlockScope, Block));
}
-void Sema::PopFunctionOrBlockScope(const AnalysisBasedWarnings::Policy *WP,
- const Decl *D, const BlockExpr *blkExpr) {
+void Sema::PushLambdaScope(CXXRecordDecl *Lambda,
+ CXXMethodDecl *CallOperator) {
+ FunctionScopes.push_back(new LambdaScopeInfo(getDiagnostics(), Lambda,
+ CallOperator));
+}
+
+void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
+ const Decl *D, const BlockExpr *blkExpr) {
FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
assert(!FunctionScopes.empty() && "mismatched push/pop!");
@@ -818,6 +862,17 @@ void Sema::PopFunctionOrBlockScope(const AnalysisBasedWarnings::Policy *WP,
}
}
+void Sema::PushCompoundScope() {
+ getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo());
+}
+
+void Sema::PopCompoundScope() {
+ FunctionScopeInfo *CurFunction = getCurFunction();
+ assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
+
+ CurFunction->CompoundScopes.pop_back();
+}
+
/// \brief Determine whether any errors occurred within this function/method/
/// block.
bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
@@ -831,13 +886,17 @@ BlockScopeInfo *Sema::getCurBlock() {
return dyn_cast<BlockScopeInfo>(FunctionScopes.back());
}
+LambdaScopeInfo *Sema::getCurLambda() {
+ if (FunctionScopes.empty())
+ return 0;
+
+ return dyn_cast<LambdaScopeInfo>(FunctionScopes.back());
+}
+
// Pin this vtable to this file.
ExternalSemaSource::~ExternalSemaSource() {}
-std::pair<ObjCMethodList, ObjCMethodList>
-ExternalSemaSource::ReadMethodPool(Selector Sel) {
- return std::pair<ObjCMethodList, ObjCMethodList>();
-}
+void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
void ExternalSemaSource::ReadKnownNamespaces(
SmallVectorImpl<NamespaceDecl *> &Namespaces) {
@@ -963,7 +1022,7 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
}
NamedDecl *Fn = (*It)->getUnderlyingDecl();
- S.Diag(Fn->getLocStart(), diag::note_possible_target_of_call);
+ S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
++ShownOverloads;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
index 6cd9230..dea5e76 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/DependentDiagnostic.h"
#include "clang/AST/ExprCXX.h"
@@ -164,6 +165,10 @@ struct AccessTarget : public AccessedEntity {
initialize();
}
+ bool isInstanceMember() const {
+ return (isMemberAccess() && getTargetDecl()->isCXXInstanceMember());
+ }
+
bool hasInstanceContext() const {
return HasInstanceContext;
}
@@ -264,6 +269,9 @@ static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived,
SmallVector<const CXXRecordDecl*, 8> Queue; // actually a stack
while (true) {
+ if (Derived->isDependentContext() && !Derived->hasDefinition())
+ return AR_dependent;
+
for (CXXRecordDecl::base_class_const_iterator
I = Derived->bases_begin(), E = Derived->bases_end(); I != E; ++I) {
@@ -667,18 +675,25 @@ struct ProtectedFriendContext {
}
/// Search for a class P that EC is a friend of, under the constraint
-/// InstanceContext <= P <= NamingClass
+/// InstanceContext <= P
+/// if InstanceContext exists, or else
+/// NamingClass <= P
/// and with the additional restriction that a protected member of
-/// NamingClass would have some natural access in P.
+/// NamingClass would have some natural access in P, which implicitly
+/// imposes the constraint that P <= NamingClass.
///
-/// That second condition isn't actually quite right: the condition in
-/// the standard is whether the target would have some natural access
-/// in P. The difference is that the target might be more accessible
-/// along some path not passing through NamingClass. Allowing that
+/// This isn't quite the condition laid out in the standard.
+/// Instead of saying that a notional protected member of NamingClass
+/// would have to have some natural access in P, it says the actual
+/// target has to have some natural access in P, which opens up the
+/// possibility that the target (which is not necessarily a member
+/// of NamingClass) might be more accessible along some path not
+/// passing through it. That's really a bad idea, though, because it
/// introduces two problems:
-/// - It breaks encapsulation because you can suddenly access a
-/// forbidden base class's members by subclassing it elsewhere.
-/// - It makes access substantially harder to compute because it
+/// - Most importantly, it breaks encapsulation because you can
+/// access a forbidden base class's members by directly subclassing
+/// it elsewhere.
+/// - It also makes access substantially harder to compute because it
/// breaks the hill-climbing algorithm: knowing that the target is
/// accessible in some base class would no longer let you change
/// the question solely to whether the base class is accessible,
@@ -688,9 +703,15 @@ struct ProtectedFriendContext {
static AccessResult GetProtectedFriendKind(Sema &S, const EffectiveContext &EC,
const CXXRecordDecl *InstanceContext,
const CXXRecordDecl *NamingClass) {
- assert(InstanceContext->getCanonicalDecl() == InstanceContext);
+ assert(InstanceContext == 0 ||
+ InstanceContext->getCanonicalDecl() == InstanceContext);
assert(NamingClass->getCanonicalDecl() == NamingClass);
+ // If we don't have an instance context, our constraints give us
+ // that NamingClass <= P <= NamingClass, i.e. P == NamingClass.
+ // This is just the usual friendship check.
+ if (!InstanceContext) return GetFriendKind(S, EC, NamingClass);
+
ProtectedFriendContext PRC(S, EC, InstanceContext, NamingClass);
if (PRC.findFriendship(InstanceContext)) return AR_accessible;
if (PRC.EverDependent) return AR_dependent;
@@ -733,15 +754,6 @@ static AccessResult HasAccess(Sema &S,
case AR_dependent: OnFailure = AR_dependent; continue;
}
- if (!Target.hasInstanceContext())
- return AR_accessible;
-
- const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
- if (!InstanceContext) {
- OnFailure = AR_dependent;
- continue;
- }
-
// C++ [class.protected]p1:
// An additional access check beyond those described earlier in
// [class.access] is applied when a non-static data member or
@@ -754,8 +766,49 @@ static AccessResult HasAccess(Sema &S,
// expression. In this case, the class of the object expression
// shall be C or a class derived from C.
//
- // We interpret this as a restriction on [M3]. Most of the
- // conditions are encoded by not having any instance context.
+ // We interpret this as a restriction on [M3].
+
+ // In this part of the code, 'C' is just our context class ECRecord.
+
+ // These rules are different if we don't have an instance context.
+ if (!Target.hasInstanceContext()) {
+ // If it's not an instance member, these restrictions don't apply.
+ if (!Target.isInstanceMember()) return AR_accessible;
+
+ // If it's an instance member, use the pointer-to-member rule
+ // that the naming class has to be derived from the effective
+ // context.
+
+ // Despite the standard's confident wording, there is a case
+ // where you can have an instance member that's neither in a
+ // pointer-to-member expression nor in a member access: when
+ // it names a field in an unevaluated context that can't be an
+ // implicit member. Pending clarification, we just apply the
+ // same naming-class restriction here.
+ // FIXME: we're probably not correctly adding the
+ // protected-member restriction when we retroactively convert
+ // an expression to being evaluated.
+
+ // We know that ECRecord derives from NamingClass. The
+ // restriction says to check whether NamingClass derives from
+ // ECRecord, but that's not really necessary: two distinct
+ // classes can't be recursively derived from each other. So
+ // along this path, we just need to check whether the classes
+ // are equal.
+ if (NamingClass == ECRecord) return AR_accessible;
+
+ // Otherwise, this context class tells us nothing; on to the next.
+ continue;
+ }
+
+ assert(Target.isInstanceMember());
+
+ const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) {
+ OnFailure = AR_dependent;
+ continue;
+ }
+
switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) {
case AR_accessible: return AR_accessible;
case AR_inaccessible: continue;
@@ -774,9 +827,14 @@ static AccessResult HasAccess(Sema &S,
// *unless* the [class.protected] restriction applies. If it does,
// however, we should ignore whether the naming class is a friend,
// and instead rely on whether any potential P is a friend.
- if (Access == AS_protected && Target.hasInstanceContext()) {
- const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
- if (!InstanceContext) return AR_dependent;
+ if (Access == AS_protected && Target.isInstanceMember()) {
+ // Compute the instance context if possible.
+ const CXXRecordDecl *InstanceContext = 0;
+ if (Target.hasInstanceContext()) {
+ InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) return AR_dependent;
+ }
+
switch (GetProtectedFriendKind(S, EC, InstanceContext, NamingClass)) {
case AR_accessible: return AR_accessible;
case AR_inaccessible: return OnFailure;
@@ -793,7 +851,6 @@ static AccessResult HasAccess(Sema &S,
// Silence bogus warnings
llvm_unreachable("impossible friendship kind");
- return OnFailure;
}
/// Finds the best path from the naming class to the declaring class,
@@ -947,31 +1004,46 @@ static CXXBasePath *FindBestPath(Sema &S,
static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
AccessTarget &Target) {
// Only applies to instance accesses.
- if (!Target.hasInstanceContext())
+ if (!Target.isInstanceMember())
return false;
+
assert(Target.isMemberAccess());
- NamedDecl *D = Target.getTargetDecl();
- const CXXRecordDecl *DeclaringClass = Target.getDeclaringClass();
- DeclaringClass = DeclaringClass->getCanonicalDecl();
+ const CXXRecordDecl *NamingClass = Target.getNamingClass();
+ NamingClass = NamingClass->getCanonicalDecl();
for (EffectiveContext::record_iterator
I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
const CXXRecordDecl *ECRecord = *I;
- switch (IsDerivedFromInclusive(ECRecord, DeclaringClass)) {
+ switch (IsDerivedFromInclusive(ECRecord, NamingClass)) {
case AR_accessible: break;
case AR_inaccessible: continue;
case AR_dependent: continue;
}
// The effective context is a subclass of the declaring class.
- // If that class isn't a superclass of the instance context,
- // then the [class.protected] restriction applies.
+ // Check whether the [class.protected] restriction is limiting
+ // access.
// To get this exactly right, this might need to be checked more
// holistically; it's not necessarily the case that gaining
// access here would grant us access overall.
+ NamedDecl *D = Target.getTargetDecl();
+
+ // If we don't have an instance context, [class.protected] says the
+ // naming class has to equal the context class.
+ if (!Target.hasInstanceContext()) {
+ // If it does, the restriction doesn't apply.
+ if (NamingClass == ECRecord) continue;
+
+ // TODO: it would be great to have a fixit here, since this is
+ // such an obvious error.
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject)
+ << S.Context.getTypeDeclType(ECRecord);
+ return true;
+ }
+
const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
assert(InstanceContext && "diagnosing dependent access");
@@ -979,12 +1051,25 @@ static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
case AR_accessible: continue;
case AR_dependent: continue;
case AR_inaccessible:
- S.Diag(D->getLocation(), diag::note_access_protected_restricted)
- << (InstanceContext != Target.getNamingClass()->getCanonicalDecl())
- << S.Context.getTypeDeclType(InstanceContext)
- << S.Context.getTypeDeclType(ECRecord);
+ break;
+ }
+
+ // Okay, the restriction seems to be what's limiting us.
+
+ // Use a special diagnostic for constructors and destructors.
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D) ||
+ (isa<FunctionTemplateDecl>(D) &&
+ isa<CXXConstructorDecl>(
+ cast<FunctionTemplateDecl>(D)->getTemplatedDecl()))) {
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_ctordtor)
+ << isa<CXXDestructorDecl>(D);
return true;
}
+
+ // Otherwise, use the generic diagnostic.
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_object)
+ << S.Context.getTypeDeclType(ECRecord);
+ return true;
}
return false;
@@ -996,8 +1081,6 @@ static void DiagnoseAccessPath(Sema &S,
const EffectiveContext &EC,
AccessTarget &Entity) {
AccessSpecifier Access = Entity.getAccess();
- const CXXRecordDecl *NamingClass = Entity.getNamingClass();
- NamingClass = NamingClass->getCanonicalDecl();
NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : 0);
const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
@@ -1016,15 +1099,15 @@ static void DiagnoseAccessPath(Sema &S,
while (D->isOutOfLine()) {
NamedDecl *PrevDecl = 0;
if (VarDecl *VD = dyn_cast<VarDecl>(D))
- PrevDecl = VD->getPreviousDeclaration();
+ PrevDecl = VD->getPreviousDecl();
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- PrevDecl = FD->getPreviousDeclaration();
+ PrevDecl = FD->getPreviousDecl();
else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D))
- PrevDecl = TND->getPreviousDeclaration();
+ PrevDecl = TND->getPreviousDecl();
else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
if (isa<RecordDecl>(D) && cast<RecordDecl>(D)->isInjectedClassName())
break;
- PrevDecl = TD->getPreviousDeclaration();
+ PrevDecl = TD->getPreviousDecl();
}
if (!PrevDecl) break;
D = PrevDecl;
@@ -1064,7 +1147,6 @@ static void DiagnoseAccessPath(Sema &S,
case AR_dependent:
llvm_unreachable("can't diagnose dependent access failures");
- return;
}
}
@@ -1162,7 +1244,7 @@ static bool IsMicrosoftUsingDeclarationAccessBug(Sema& S,
if (Entity.getTargetDecl()->getAccess() == AS_private &&
(OrigDecl->getAccess() == AS_public ||
OrigDecl->getAccess() == AS_protected)) {
- S.Diag(AccessLoc, diag::war_ms_using_declaration_inaccessible)
+ S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible)
<< Shadow->getUsingDecl()->getQualifiedNameAsString()
<< OrigDecl->getQualifiedNameAsString();
return true;
@@ -1274,7 +1356,7 @@ static AccessResult CheckEffectiveAccess(Sema &S,
AccessTarget &Entity) {
assert(Entity.getAccess() != AS_public && "called for public access!");
- if (S.getLangOptions().MicrosoftMode &&
+ if (S.getLangOpts().MicrosoftMode &&
IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity))
return AR_accessible;
@@ -1294,7 +1376,6 @@ static AccessResult CheckEffectiveAccess(Sema &S,
// silence unnecessary warning
llvm_unreachable("invalid access result");
- return AR_accessible;
}
static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
@@ -1329,7 +1410,6 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
case AR_dependent: return Sema::AR_dependent;
}
llvm_unreachable("falling off end");
- return Sema::AR_accessible;
}
void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *decl) {
@@ -1396,7 +1476,7 @@ void Sema::HandleDependentAccessCheck(const DependentDiagnostic &DD,
Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair Found) {
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
!E->getNamingClass() ||
Found.getAccess() == AS_public)
return AR_accessible;
@@ -1412,7 +1492,7 @@ Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
/// access which has now been resolved to a member.
Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair Found) {
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
Found.getAccess() == AS_public)
return AR_accessible;
@@ -1427,10 +1507,34 @@ Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
return CheckAccess(*this, E->getMemberLoc(), Entity);
}
+/// Is the given special member function accessible for the purposes of
+/// deciding whether to define a special member function as deleted?
+bool Sema::isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
+ AccessSpecifier access,
+ QualType objectType) {
+ // Fast path.
+ if (access == AS_public || !getLangOpts().AccessControl) return true;
+
+ AccessTarget entity(Context, AccessTarget::Member, decl->getParent(),
+ DeclAccessPair::make(decl, access), objectType);
+
+ // Suppress diagnostics.
+ entity.setDiag(PDiag());
+
+ switch (CheckAccess(*this, SourceLocation(), entity)) {
+ case AR_accessible: return true;
+ case AR_inaccessible: return false;
+ case AR_dependent: llvm_unreachable("dependent for =delete computation");
+ case AR_delayed: llvm_unreachable("cannot delay =delete computation");
+ }
+ llvm_unreachable("bad access result");
+}
+
Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
- const PartialDiagnostic &PDiag) {
- if (!getLangOptions().AccessControl)
+ const PartialDiagnostic &PDiag,
+ QualType ObjectTy) {
+ if (!getLangOpts().AccessControl)
return AR_accessible;
// There's never a path involved when checking implicit destructor access.
@@ -1439,9 +1543,11 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
return AR_accessible;
CXXRecordDecl *NamingClass = Dtor->getParent();
+ if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass);
+
AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Dtor, Access),
- QualType());
+ ObjectTy);
Entity.setDiag(PDiag); // TODO: avoid copy
return CheckAccess(*this, Loc, Entity);
@@ -1453,14 +1559,9 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp) {
- if (!getLangOptions().AccessControl ||
- Access == AS_public)
+ if (!getLangOpts().AccessControl || Access == AS_public)
return AR_accessible;
- CXXRecordDecl *NamingClass = Constructor->getParent();
- AccessTarget AccessEntity(Context, AccessTarget::Member, NamingClass,
- DeclAccessPair::make(Constructor, Access),
- QualType());
PartialDiagnostic PD(PDiag());
switch (Entity.getKind()) {
default:
@@ -1483,28 +1584,47 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
break;
}
+ case InitializedEntity::EK_LambdaCapture: {
+ const VarDecl *Var = Entity.getCapturedVar();
+ PD = PDiag(diag::err_access_lambda_capture);
+ PD << Var->getName() << Entity.getType() << getSpecialMember(Constructor);
+ break;
+ }
+
}
- return CheckConstructorAccess(UseLoc, Constructor, Access, PD);
+ return CheckConstructorAccess(UseLoc, Constructor, Entity, Access, PD);
}
/// Checks access to a constructor.
Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
AccessSpecifier Access,
- PartialDiagnostic PD) {
- if (!getLangOptions().AccessControl ||
+ const PartialDiagnostic &PD) {
+ if (!getLangOpts().AccessControl ||
Access == AS_public)
return AR_accessible;
CXXRecordDecl *NamingClass = Constructor->getParent();
+
+ // Initializing a base sub-object is an instance method call on an
+ // object of the derived class. Otherwise, we have an instance method
+ // call on an object of the constructed type.
+ CXXRecordDecl *ObjectClass;
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ObjectClass = cast<CXXConstructorDecl>(CurContext)->getParent();
+ } else {
+ ObjectClass = NamingClass;
+ }
+
AccessTarget AccessEntity(Context, AccessTarget::Member, NamingClass,
DeclAccessPair::make(Constructor, Access),
- QualType());
+ Context.getTypeDeclType(ObjectClass));
AccessEntity.setDiag(PD);
return CheckAccess(*this, UseLoc, AccessEntity);
-}
+}
/// Checks direct (i.e. non-inherited) access to an arbitrary class
/// member.
@@ -1512,7 +1632,7 @@ Sema::AccessResult Sema::CheckDirectMemberAccess(SourceLocation UseLoc,
NamedDecl *Target,
const PartialDiagnostic &Diag) {
AccessSpecifier Access = Target->getAccess();
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
Access == AS_public)
return AR_accessible;
@@ -1531,7 +1651,7 @@ Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found,
bool Diagnose) {
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
!NamingClass ||
Found.getAccess() == AS_public)
return AR_accessible;
@@ -1551,7 +1671,7 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair Found) {
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
Found.getAccess() == AS_public)
return AR_accessible;
@@ -1569,7 +1689,7 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair Found) {
- if (!getLangOptions().AccessControl ||
+ if (!getLangOpts().AccessControl ||
Found.getAccess() == AS_none ||
Found.getAccess() == AS_public)
return AR_accessible;
@@ -1578,7 +1698,7 @@ Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
CXXRecordDecl *NamingClass = Ovl->getNamingClass();
AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
- Context.getTypeDeclType(NamingClass));
+ /*no instance context*/ QualType());
Entity.setDiag(diag::err_access)
<< Ovl->getSourceRange();
@@ -1601,7 +1721,7 @@ Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
unsigned DiagID,
bool ForceCheck,
bool ForceUnprivileged) {
- if (!ForceCheck && !getLangOptions().AccessControl)
+ if (!ForceCheck && !getLangOpts().AccessControl)
return AR_accessible;
if (Path.Access == AS_public)
@@ -1630,7 +1750,7 @@ Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
/// Checks access to all the declarations in the given result set.
void Sema::CheckLookupAccess(const LookupResult &R) {
- assert(getLangOptions().AccessControl
+ assert(getLangOpts().AccessControl
&& "performing access check without access control");
assert(R.getNamingClass() && "performing access check without naming class");
@@ -1651,19 +1771,63 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
/// \param Decl the declaration to check if it can be accessed
/// \param Class the class/context from which to start the search
/// \return true if the Decl is accessible from the Class, false otherwise.
-bool Sema::IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *Class) {
- if (!Class || !Decl->isCXXClassMember())
- return true;
+bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (!Decl->isCXXClassMember())
+ return true;
- QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
- AccessTarget Entity(Context, AccessedEntity::Member, Class,
- DeclAccessPair::make(Decl, Decl->getAccess()),
- qType);
- if (Entity.getAccess() == AS_public)
- return true;
+ QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+ AccessTarget Entity(Context, AccessedEntity::Member, Class,
+ DeclAccessPair::make(Decl, Decl->getAccess()),
+ qType);
+ if (Entity.getAccess() == AS_public)
+ return true;
- EffectiveContext EC(CurContext);
- return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
+ EffectiveContext EC(CurContext);
+ return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
+ }
+
+ if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Decl)) {
+ // @public and @package ivars are always accessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Public ||
+ Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Package)
+ return true;
+
+
+
+ // If we are inside a class or category implementation, determine the
+ // interface we're in.
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (FunctionDecl *FD = getCurFunctionDecl()) {
+ if (ObjCImplDecl *Impl
+ = dyn_cast<ObjCImplDecl>(FD->getLexicalDeclContext())) {
+ if (ObjCImplementationDecl *IMPD
+ = dyn_cast<ObjCImplementationDecl>(Impl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass
+ = dyn_cast<ObjCCategoryImplDecl>(Impl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+ }
+
+ // If we're not in an interface, this ivar is inaccessible.
+ if (!ClassOfMethodDecl)
+ return false;
+
+ // If we're inside the same interface that owns the ivar, we're fine.
+ if (declaresSameEntity(ClassOfMethodDecl, Ivar->getContainingInterface()))
+ return true;
+
+ // If the ivar is private, it's inaccessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Private)
+ return false;
+
+ return Ivar->getContainingInterface()->isSuperClassOf(ClassOfMethodDecl);
+ }
+
+ return true;
}
void Sema::ActOnStartSuppressingAccessChecks() {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
index 77410db..e935fc7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -263,9 +263,6 @@ void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name,
Context->setAlignment(AlignmentVal);
}
break;
-
- default:
- llvm_unreachable("Invalid #pragma pack kind.");
}
}
@@ -348,9 +345,9 @@ static void PushPragmaVisibility(Sema &S, unsigned type, SourceLocation loc) {
Stack->push_back(std::make_pair(type, loc));
}
-void Sema::ActOnPragmaVisibility(bool IsPush, const IdentifierInfo* VisType,
+void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc) {
- if (IsPush) {
+ if (VisType) {
// Compute visibility to use.
VisibilityAttr::VisibilityType type;
if (VisType->isStr("default"))
@@ -368,7 +365,7 @@ void Sema::ActOnPragmaVisibility(bool IsPush, const IdentifierInfo* VisType,
}
PushPragmaVisibility(*this, type, PragmaLoc);
} else {
- PopPragmaVisibility();
+ PopPragmaVisibility(false, PragmaLoc);
}
}
@@ -381,28 +378,49 @@ void Sema::ActOnPragmaFPContract(tok::OnOffSwitch OOS) {
FPFeatures.fp_contract = 0;
break;
case tok::OOS_DEFAULT:
- FPFeatures.fp_contract = getLangOptions().DefaultFPContract;
+ FPFeatures.fp_contract = getLangOpts().DefaultFPContract;
break;
}
}
-void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr) {
+void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
+ SourceLocation Loc) {
// Visibility calculations will consider the namespace's visibility.
// Here we just want to note that we're in a visibility context
// which overrides any enclosing #pragma context, but doesn't itself
// contribute visibility.
- PushPragmaVisibility(*this, NoVisibility, SourceLocation());
+ PushPragmaVisibility(*this, NoVisibility, Loc);
}
-void Sema::PopPragmaVisibility() {
- // Pop visibility from stack, if there is one on the stack.
- if (VisContext) {
- VisStack *Stack = static_cast<VisStack*>(VisContext);
+void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
+ if (!VisContext) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ return;
+ }
+
+ // Pop visibility from stack
+ VisStack *Stack = static_cast<VisStack*>(VisContext);
- Stack->pop_back();
- // To simplify the implementation, never keep around an empty stack.
- if (Stack->empty())
- FreeVisContext();
+ const std::pair<unsigned, SourceLocation> *Back = &Stack->back();
+ bool StartsWithPragma = Back->first != NoVisibility;
+ if (StartsWithPragma && IsNamespaceEnd) {
+ Diag(Back->second, diag::err_pragma_push_visibility_mismatch);
+ Diag(EndLoc, diag::note_surrounding_namespace_ends_here);
+
+ // For better error recovery, eat all pushes inside the namespace.
+ do {
+ Stack->pop_back();
+ Back = &Stack->back();
+ StartsWithPragma = Back->first != NoVisibility;
+ } while (StartsWithPragma);
+ } else if (!StartsWithPragma && !IsNamespaceEnd) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ Diag(Back->second, diag::note_surrounding_namespace_starts_here);
+ return;
}
- // FIXME: Add diag for pop without push.
+
+ Stack->pop_back();
+ // To simplify the implementation, never keep around an empty stack.
+ if (Stack->empty())
+ FreeVisContext();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 360a040..5a0fcec 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -13,6 +13,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Template.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
@@ -150,14 +151,13 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
const TagType *Tag = NNS->getAsType()->getAs<TagType>();
assert(Tag && "Non-tag type in nested-name-specifier");
return Tag->getDecl();
- } break;
+ }
case NestedNameSpecifier::Global:
return Context.getTranslationUnitDecl();
}
- // Required to silence a GCC warning.
- return 0;
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
@@ -187,7 +187,7 @@ bool Sema::isUnknownSpecialization(const CXXScopeSpec &SS) {
///
/// \param NNS a dependent nested name specifier.
CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
- assert(getLangOptions().CPlusPlus && "Only callable in C++");
+ assert(getLangOpts().CPlusPlus && "Only callable in C++");
assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
if (!NNS->getAsType())
@@ -210,43 +210,56 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
DeclContext *DC) {
assert(DC != 0 && "given null context");
- if (TagDecl *tag = dyn_cast<TagDecl>(DC)) {
- // If this is a dependent type, then we consider it complete.
- if (tag->isDependentContext())
- return false;
+ TagDecl *tag = dyn_cast<TagDecl>(DC);
- // If we're currently defining this type, then lookup into the
- // type is okay: don't complain that it isn't complete yet.
- QualType type = Context.getTypeDeclType(tag);
- const TagType *tagType = type->getAs<TagType>();
- if (tagType && tagType->isBeingDefined())
- return false;
+ // If this is a dependent type, then we consider it complete.
+ if (!tag || tag->isDependentContext())
+ return false;
- SourceLocation loc = SS.getLastQualifierNameLoc();
- if (loc.isInvalid()) loc = SS.getRange().getBegin();
+ // If we're currently defining this type, then lookup into the
+ // type is okay: don't complain that it isn't complete yet.
+ QualType type = Context.getTypeDeclType(tag);
+ const TagType *tagType = type->getAs<TagType>();
+ if (tagType && tagType->isBeingDefined())
+ return false;
- // The type must be complete.
- if (RequireCompleteType(loc, type,
- PDiag(diag::err_incomplete_nested_name_spec)
- << SS.getRange())) {
- SS.SetInvalid(SS.getRange());
- return true;
- }
+ SourceLocation loc = SS.getLastQualifierNameLoc();
+ if (loc.isInvalid()) loc = SS.getRange().getBegin();
+
+ // The type must be complete.
+ if (RequireCompleteType(loc, type,
+ PDiag(diag::err_incomplete_nested_name_spec)
+ << SS.getRange())) {
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
- // Fixed enum types are complete, but they aren't valid as scopes
- // until we see a definition, so awkwardly pull out this special
- // case.
- if (const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType)) {
- if (!enumType->getDecl()->isCompleteDefinition()) {
- Diag(loc, diag::err_incomplete_nested_name_spec)
- << type << SS.getRange();
+ // Fixed enum types are complete, but they aren't valid as scopes
+ // until we see a definition, so awkwardly pull out this special
+ // case.
+ const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType);
+ if (!enumType || enumType->getDecl()->isCompleteDefinition())
+ return false;
+
+ // Try to instantiate the definition, if this is a specialization of an
+ // enumeration temploid.
+ EnumDecl *ED = enumType->getDecl();
+ if (EnumDecl *Pattern = ED->getInstantiatedFromMemberEnum()) {
+ MemberSpecializationInfo *MSI = ED->getMemberSpecializationInfo();
+ if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
+ if (InstantiateEnum(loc, ED, Pattern, getTemplateInstantiationArgs(ED),
+ TSK_ImplicitInstantiation)) {
SS.SetInvalid(SS.getRange());
return true;
}
+ return false;
}
}
- return false;
+ Diag(loc, diag::err_incomplete_nested_name_spec)
+ << type << SS.getRange();
+ SS.SetInvalid(SS.getRange());
+ return true;
}
bool Sema::ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
@@ -268,18 +281,18 @@ bool Sema::isAcceptableNestedNameSpecifier(NamedDecl *SD) {
if (!isa<TypeDecl>(SD))
return false;
- // Determine whether we have a class (or, in C++0x, an enum) or
+ // Determine whether we have a class (or, in C++11, an enum) or
// a typedef thereof. If so, build the nested-name-specifier.
QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
if (T->isDependentType())
return true;
else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
if (TD->getUnderlyingType()->isRecordType() ||
- (Context.getLangOptions().CPlusPlus0x &&
+ (Context.getLangOpts().CPlusPlus0x &&
TD->getUnderlyingType()->isEnumeralType()))
return true;
} else if (isa<RecordDecl>(SD) ||
- (Context.getLangOptions().CPlusPlus0x && isa<EnumDecl>(SD)))
+ (Context.getLangOpts().CPlusPlus0x && isa<EnumDecl>(SD)))
return true;
return false;
@@ -363,6 +376,25 @@ bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
return false;
}
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit NestedNameSpecifierValidatorCCC(Sema &SRef)
+ : SRef(SRef) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl());
+ }
+
+ private:
+ Sema &SRef;
+};
+
+}
+
/// \brief Build a new nested-name-specifier for "identifier::", as described
/// by ActOnCXXNestedNameSpecifier.
///
@@ -478,14 +510,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// We haven't found anything, and we're not recovering from a
// different kind of error, so look for typos.
DeclarationName Name = Found.getLookupName();
+ NestedNameSpecifierValidatorCCC Validator(*this);
TypoCorrection Corrected;
Found.clear();
if ((Corrected = CorrectTypo(Found.getLookupNameInfo(),
- Found.getLookupKind(), S, &SS, LookupCtx,
- EnteringContext, CTC_NoKeywords)) &&
- isAcceptableNestedNameSpecifier(Corrected.getCorrectionDecl())) {
- std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ Found.getLookupKind(), S, &SS, Validator,
+ LookupCtx, EnteringContext))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_suggest)
<< Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
@@ -596,6 +628,9 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
}
+ if (T->isEnumeralType())
+ Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
CCLoc);
return false;
@@ -631,7 +666,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// public:
// void foo() { D::foo2(); }
// };
- if (getLangOptions().MicrosoftExt) {
+ if (getLangOpts().MicrosoftExt) {
DeclContext *DC = LookupCtx ? LookupCtx : CurContext;
if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
@@ -673,6 +708,29 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
/*ScopeLookupResult=*/0, false);
}
+bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc) {
+ if (SS.isInvalid() || DS.getTypeSpecType() == DeclSpec::TST_error)
+ return true;
+
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ if (!T->isDependentType() && !T->getAs<TagType>()) {
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class)
+ << T << getLangOpts().CPlusPlus;
+ return true;
+ }
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
+ ColonColonLoc);
+ return false;
+}
+
/// IsInvalidUnlessNestedName - This method is used for error recovery
/// purposes to determine whether the specified identifier is only valid as
/// a nested name specifier, for example a namespace name. It is
@@ -695,8 +753,8 @@ bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
}
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
- SourceLocation TemplateLoc,
- CXXScopeSpec &SS,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
@@ -723,17 +781,18 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
// Create source-location information for this type.
TypeLocBuilder Builder;
- DependentTemplateSpecializationTypeLoc SpecTL
+ DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setKeywordLoc(SourceLocation());
- SpecTL.setNameLoc(TemplateNameLoc);
- SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
- SS.Extend(Context, TemplateLoc, Builder.getTypeLocInContext(Context, T),
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
CCLoc);
return false;
}
@@ -766,20 +825,19 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
return true;
}
- // Provide source-location information for the template specialization
- // type.
+ // Provide source-location information for the template specialization type.
TypeLocBuilder Builder;
- TemplateSpecializationTypeLoc SpecTL
+ TemplateSpecializationTypeLoc SpecTL
= Builder.push<TemplateSpecializationTypeLoc>(T);
-
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setTemplateNameLoc(TemplateNameLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
- SS.Extend(Context, TemplateLoc, Builder.getTypeLocInContext(Context, T),
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
CCLoc);
return false;
}
@@ -854,8 +912,7 @@ bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
return true;
}
- // Silence bogus warning.
- return false;
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
index 8bd9351..54683e1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
@@ -49,7 +49,7 @@ namespace {
: Self(S), SrcExpr(src), DestType(destType),
ResultType(destType.getNonLValueExprType(S.Context)),
ValueKind(Expr::getValueKindForType(destType)),
- Kind(CK_Dependent) {
+ Kind(CK_Dependent), IsARCUnbridgedCast(false) {
if (const BuiltinType *placeholder =
src.get()->getType()->getAsPlaceholderType()) {
@@ -67,6 +67,7 @@ namespace {
CastKind Kind;
BuiltinType::Kind PlaceholderKind;
CXXCastPath BasePath;
+ bool IsARCUnbridgedCast;
SourceRange OpRange;
SourceRange DestRange;
@@ -76,9 +77,23 @@ namespace {
void CheckReinterpretCast();
void CheckStaticCast();
void CheckDynamicCast();
- void CheckCXXCStyleCast(bool FunctionalCast);
+ void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
+ /// Complete an apparently-successful cast operation that yields
+ /// the given expression.
+ ExprResult complete(CastExpr *castExpr) {
+ // If this is an unbridged cast, wrap the result in an implicit
+ // cast that yields the unbridged-cast placeholder type.
+ if (IsARCUnbridgedCast) {
+ castExpr = ImplicitCastExpr::Create(Self.Context,
+ Self.Context.ARCUnbridgedCastTy,
+ CK_Dependent, castExpr, 0,
+ castExpr->getValueKind());
+ }
+ return Self.Owned(castExpr);
+ }
+
// Internal convenience methods.
/// Try to handle the given placeholder expression kind. Return
@@ -103,8 +118,12 @@ namespace {
}
void checkObjCARCConversion(Sema::CheckedConversionKind CCK) {
+ assert(Self.getLangOpts().ObjCAutoRefCount);
+
Expr *src = SrcExpr.get();
- Self.CheckObjCARCConversion(OpRange, DestType, src, CCK);
+ if (Self.CheckObjCARCConversion(OpRange, DestType, src, CCK) ==
+ Sema::ACR_unbridged)
+ IsARCUnbridgedCast = true;
SrcExpr = src;
}
@@ -171,15 +190,15 @@ static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
- unsigned &msg,
- CastKind &Kind);
+ unsigned &msg, CastKind &Kind,
+ bool ListInitialization);
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
- unsigned &msg,
- CastKind &Kind,
- CXXCastPath &BasePath);
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool ListInitialization);
static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
bool CStyle, unsigned &msg);
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
@@ -203,7 +222,7 @@ Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (D.isInvalidType())
return ExprError();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
@@ -237,9 +256,9 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (Op.SrcExpr.isInvalid())
return ExprError();
}
- return Owned(CXXConstCastExpr::Create(Context, Op.ResultType, Op.ValueKind,
- Op.SrcExpr.take(), DestTInfo, OpLoc,
- Parens.getEnd()));
+ return Op.complete(CXXConstCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.SrcExpr.take(), DestTInfo,
+ OpLoc, Parens.getEnd()));
case tok::kw_dynamic_cast: {
if (!TypeDependent) {
@@ -247,10 +266,10 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (Op.SrcExpr.isInvalid())
return ExprError();
}
- return Owned(CXXDynamicCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, Op.Kind,
- Op.SrcExpr.take(), &Op.BasePath,
- DestTInfo, OpLoc, Parens.getEnd()));
+ return Op.complete(CXXDynamicCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd()));
}
case tok::kw_reinterpret_cast: {
if (!TypeDependent) {
@@ -258,11 +277,10 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (Op.SrcExpr.isInvalid())
return ExprError();
}
- return Owned(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, Op.Kind,
- Op.SrcExpr.take(), 0,
- DestTInfo, OpLoc,
- Parens.getEnd()));
+ return Op.complete(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ 0, DestTInfo, OpLoc,
+ Parens.getEnd()));
}
case tok::kw_static_cast: {
if (!TypeDependent) {
@@ -271,21 +289,20 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
return ExprError();
}
- return Owned(CXXStaticCastExpr::Create(Context, Op.ResultType, Op.ValueKind,
- Op.Kind, Op.SrcExpr.take(),
- &Op.BasePath, DestTInfo, OpLoc,
- Parens.getEnd()));
+ return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd()));
}
}
-
- return ExprError();
}
/// Try to diagnose a failed overloaded cast. Returns true if
/// diagnostics were emitted.
static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
SourceRange range, Expr *src,
- QualType destType) {
+ QualType destType,
+ bool listInitialization) {
switch (CT) {
// These cast kinds don't consider user-defined conversions.
case CT_Const:
@@ -307,8 +324,9 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
InitializationKind initKind
= (CT == CT_CStyle)? InitializationKind::CreateCStyleCast(range.getBegin(),
- range)
- : (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range)
+ range, listInitialization)
+ : (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range,
+ listInitialization)
: InitializationKind::CreateCast(/*type range?*/ range);
InitializationSequence sequence(S, entity, initKind, &src, 1);
@@ -328,7 +346,6 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
switch (sequence.getFailedOverloadResult()) {
case OR_Success: llvm_unreachable("successful failed overload");
- return false;
case OR_No_Viable_Function:
if (candidates.empty())
msg = diag::err_ovl_no_conversion_in_cast;
@@ -352,21 +369,23 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
<< CT << srcType << destType
<< range << src->getSourceRange();
- candidates.NoteCandidates(S, howManyCandidates, &src, 1);
+ candidates.NoteCandidates(S, howManyCandidates, src);
return true;
}
/// Diagnose a failed cast.
static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
- SourceRange opRange, Expr *src, QualType destType) {
+ SourceRange opRange, Expr *src, QualType destType,
+ bool listInitialization) {
if (src->getType() == S.Context.BoundMemberTy) {
(void) S.CheckPlaceholderExpr(src); // will always fail
return;
}
if (msg == diag::err_bad_cxx_cast_generic &&
- tryDiagnoseOverloadedCast(S, castType, opRange, src, destType))
+ tryDiagnoseOverloadedCast(S, castType, opRange, src, destType,
+ listInitialization))
return;
S.Diag(opRange.getBegin(), msg) << castType
@@ -443,7 +462,7 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ARC mode, there's nothing to check.
if (!CheckCVR && CheckObjCLifetime &&
- !Self.Context.getLangOptions().ObjCAutoRefCount)
+ !Self.Context.getLangOpts().ObjCAutoRefCount)
return false;
// Casting away constness is defined in C++ 5.2.11p8 with reference to
@@ -511,6 +530,13 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
/// checked downcasts in class hierarchies.
void CastOperation::CheckDynamicCast() {
+ if (ValueKind == VK_RValue)
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
QualType OrigSrcType = SrcExpr.get()->getType();
QualType DestType = Self.Context.getCanonicalType(this->DestType);
@@ -638,11 +664,12 @@ void CastOperation::CheckDynamicCast() {
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void CastOperation::CheckConstCast() {
- if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload)) {
+ if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
- if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
- return;
- }
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
unsigned msg = diag::err_bad_cxx_cast_generic;
if (TryConstCast(Self, SrcExpr.get(), DestType, /*CStyle*/false, msg) != TC_Success
@@ -657,11 +684,12 @@ void CastOperation::CheckConstCast() {
/// like this:
/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
void CastOperation::CheckReinterpretCast() {
- if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload)) {
+ if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload))
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
- if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
- return;
- }
+ else
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr =
@@ -679,9 +707,10 @@ void CastOperation::CheckReinterpretCast() {
Self.NoteAllOverloadCandidates(SrcExpr.get());
} else {
- diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(), DestType);
+ diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(),
+ DestType, /*listInitialization=*/false);
}
- } else if (tcr == TC_Success && Self.getLangOptions().ObjCAutoRefCount) {
+ } else if (tcr == TC_Success && Self.getLangOpts().ObjCAutoRefCount) {
checkObjCARCConversion(Sema::CCK_OtherCast);
}
}
@@ -726,7 +755,7 @@ void CastOperation::CheckStaticCast() {
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr
= TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
- Kind, BasePath);
+ Kind, BasePath, /*ListInitialization=*/false);
if (tcr != TC_Success && msg != 0) {
if (SrcExpr.isInvalid())
return;
@@ -737,12 +766,13 @@ void CastOperation::CheckStaticCast() {
<< oe->getQualifierLoc().getSourceRange();
Self.NoteAllOverloadCandidates(SrcExpr.get());
} else {
- diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType);
+ diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType,
+ /*listInitialization=*/false);
}
} else if (tcr == TC_Success) {
if (Kind == CK_BitCast)
checkCastAlign();
- if (Self.getLangOptions().ObjCAutoRefCount)
+ if (Self.getLangOpts().ObjCAutoRefCount)
checkObjCARCConversion(Sema::CCK_OtherCast);
} else if (Kind == CK_BitCast) {
checkCastAlign();
@@ -756,8 +786,8 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
- CastKind &Kind,
- CXXCastPath &BasePath) {
+ CastKind &Kind, CXXCastPath &BasePath,
+ bool ListInitialization) {
// Determine whether we have the semantics of a C-style cast.
bool CStyle
= (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
@@ -782,23 +812,23 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// C++ 5.2.9p5, reference downcast.
// See the function for details.
// DR 427 specifies that this is to be applied before paragraph 2.
- tcr = TryStaticReferenceDowncast(Self, SrcExpr.get(), DestType, CStyle, OpRange,
- msg, Kind, BasePath);
+ tcr = TryStaticReferenceDowncast(Self, SrcExpr.get(), DestType, CStyle,
+ OpRange, msg, Kind, BasePath);
if (tcr != TC_NotApplicable)
return tcr;
// C++0x [expr.static.cast]p3:
// A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
// T2" if "cv2 T2" is reference-compatible with "cv1 T1".
- tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind, BasePath,
- msg);
+ tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
+ BasePath, msg);
if (tcr != TC_NotApplicable)
return tcr;
// C++ 5.2.9p2: An expression e can be explicitly converted to a type T
// [...] if the declaration "T t(e);" is well-formed, [...].
tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CCK, OpRange, msg,
- Kind);
+ Kind, ListInitialization);
if (SrcExpr.isInvalid())
return TC_Failed;
if (tcr != TC_NotApplicable)
@@ -1269,7 +1299,7 @@ TryCastResult
TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
- CastKind &Kind) {
+ CastKind &Kind, bool ListInitialization) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_bad_dynamic_cast_incomplete)) {
@@ -1277,13 +1307,14 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
return TC_Failed;
}
}
-
+
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
InitializationKind InitKind
= (CCK == Sema::CCK_CStyleCast)
- ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange)
+ ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
+ ListInitialization)
: (CCK == Sema::CCK_FunctionalCast)
- ? InitializationKind::CreateFunctionalCast(OpRange)
+ ? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
: InitializationKind::CreateCast(OpRange);
Expr *SrcExprRaw = SrcExpr.get();
InitializationSequence InitSeq(Self, Entity, InitKind, &SrcExprRaw, 1);
@@ -1497,6 +1528,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
case OK_BitField: inappropriate = "bit-field"; break;
case OK_VectorComponent: inappropriate = "vector element"; break;
case OK_ObjCProperty: inappropriate = "property expression"; break;
+ case OK_ObjCSubscript: inappropriate = "container subscripting expression";
+ break;
}
if (inappropriate) {
Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_reference)
@@ -1545,7 +1578,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
}
// A valid member pointer cast.
- Kind = IsLValueCast? CK_LValueBitCast : CK_BitCast;
+ assert(!IsLValueCast);
+ Kind = CK_ReinterpretMemberPointer;
return TC_Success;
}
@@ -1592,7 +1626,27 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Failed;
}
-
+
+ if (SrcType == DestType) {
+ // C++ 5.2.10p2 has a note that mentions that, subject to all other
+ // restrictions, a cast to the same type is allowed so long as it does not
+ // cast away constness. In C++98, the intent was not entirely clear here,
+ // since all other paragraphs explicitly forbid casts to the same type.
+ // C++11 clarifies this case with p2.
+ //
+ // The only allowed types are: integral, enumeration, pointer, or
+ // pointer-to-member types. We also won't restrict Obj-C pointers either.
+ Kind = CK_NoOp;
+ TryCastResult Result = TC_NotApplicable;
+ if (SrcType->isIntegralOrEnumerationType() ||
+ SrcType->isAnyPointerType() ||
+ SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) {
+ Result = TC_Success;
+ }
+ return Result;
+ }
+
bool destIsPtr = DestType->isAnyPointerType() ||
DestType->isBlockPointerType();
bool srcIsPtr = SrcType->isAnyPointerType() ||
@@ -1603,17 +1657,6 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
}
- if (SrcType == DestType) {
- // C++ 5.2.10p2 has a note that mentions that, subject to all other
- // restrictions, a cast to the same type is allowed. The intent is not
- // entirely clear here, since all other paragraphs explicitly forbid casts
- // to the same type. However, the behavior of compilers is pretty consistent
- // on this point: allow same-type conversion if the involved types are
- // pointers, disallow otherwise.
- Kind = CK_NoOp;
- return TC_Success;
- }
-
if (DestType->isIntegralType(Self.Context)) {
assert(srcIsPtr && "One type must be a pointer");
// C++ 5.2.10p4: A pointer can be explicitly converted to any integral
@@ -1621,7 +1664,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// integral type size doesn't matter.
if ((Self.Context.getTypeSize(SrcType) >
Self.Context.getTypeSize(DestType)) &&
- !Self.getLangOptions().MicrosoftExt) {
+ !Self.getLangOpts().MicrosoftExt) {
msg = diag::err_bad_reinterpret_cast_small_int;
return TC_Failed;
}
@@ -1694,15 +1737,19 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// casting the return value of dlsym() and GetProcAddress().
// FIXME: Conditionally-supported behavior should be configurable in the
// TargetInfo or similar.
- if (!Self.getLangOptions().CPlusPlus0x)
- Self.Diag(OpRange.getBegin(), diag::ext_cast_fn_obj) << OpRange;
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
return TC_Success;
}
if (DestType->isFunctionPointerType()) {
// See above.
- if (!Self.getLangOptions().CPlusPlus0x)
- Self.Diag(OpRange.getBegin(), diag::ext_cast_fn_obj) << OpRange;
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
return TC_Success;
}
@@ -1714,7 +1761,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Success;
}
-void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
+void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
+ bool ListInitialization) {
// Handle placeholders.
if (isPlaceholder()) {
// C-style casts can resolve __unknown_any types.
@@ -1728,7 +1776,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid())
return;
- }
+ }
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
// This test is outside everything else because it's the only case where
@@ -1797,7 +1845,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
if (tcr == TC_NotApplicable) {
// ... or if that is not possible, a static_cast, ignoring const, ...
tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange,
- msg, Kind, BasePath);
+ msg, Kind, BasePath, ListInitialization);
if (SrcExpr.isInvalid())
return;
@@ -1810,7 +1858,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
}
}
- if (Self.getLangOptions().ObjCAutoRefCount && tcr == TC_Success)
+ if (Self.getLangOpts().ObjCAutoRefCount && tcr == TC_Success)
checkObjCARCConversion(CCK);
if (tcr != TC_Success && msg != 0) {
@@ -1826,7 +1874,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
} else {
diagnoseBadCast(Self, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
- OpRange, SrcExpr.get(), DestType);
+ OpRange, SrcExpr.get(), DestType, ListInitialization);
}
} else if (Kind == CK_BitCast) {
checkCastAlign();
@@ -1839,26 +1887,15 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle) {
/// Check the semantics of a C-style cast operation, in C.
void CastOperation::CheckCStyleCast() {
- assert(!Self.getLangOptions().CPlusPlus);
+ assert(!Self.getLangOpts().CPlusPlus);
- // Handle placeholders.
- if (isPlaceholder()) {
- // C-style casts can resolve __unknown_any types.
- if (claimPlaceholder(BuiltinType::UnknownAny)) {
- SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
- SrcExpr.get(), Kind,
- ValueKind, BasePath);
- return;
- }
-
- // We allow overloads in C, but we don't allow them to be resolved
- // by anything except calls.
- SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
- if (SrcExpr.isInvalid())
- return;
- }
-
- assert(!isPlaceholder());
+ // C-style casts can resolve __unknown_any types.
+ if (claimPlaceholder(BuiltinType::UnknownAny)) {
+ SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
+ SrcExpr.get(), Kind,
+ ValueKind, BasePath);
+ return;
+ }
// C99 6.5.4p2: the cast type needs to be void or scalar and the expression
// type needs to be scalar.
@@ -1878,6 +1915,12 @@ void CastOperation::CheckCStyleCast() {
return;
QualType SrcType = SrcExpr.get()->getType();
+ // You can cast an _Atomic(T) to anything you can cast a T to.
+ if (const AtomicType *AtomicSrcType = SrcType->getAs<AtomicType>())
+ SrcType = AtomicSrcType->getValueType();
+
+ assert(!SrcType->isPlaceholderType());
+
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_typecheck_cast_to_incomplete)) {
SrcExpr = ExprError();
@@ -1983,14 +2026,14 @@ void CastOperation::CheckCStyleCast() {
DestType->isArithmeticType()) {
Self.Diag(SrcExpr.get()->getLocStart(),
diag::err_cast_pointer_to_non_pointer_int)
- << SrcType << SrcExpr.get()->getSourceRange();
+ << DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
}
// ARC imposes extra restrictions on casts.
- if (Self.getLangOptions().ObjCAutoRefCount) {
+ if (Self.getLangOpts().ObjCAutoRefCount) {
checkObjCARCConversion(Sema::CCK_CStyleCast);
if (SrcExpr.isInvalid())
return;
@@ -2035,8 +2078,9 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
Op.OpRange = SourceRange(LPLoc, CastExpr->getLocEnd());
- if (getLangOptions().CPlusPlus) {
- Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false);
+ if (getLangOpts().CPlusPlus) {
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
+ isa<InitListExpr>(CastExpr));
} else {
Op.CheckCStyleCast();
}
@@ -2044,26 +2088,25 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
if (Op.SrcExpr.isInvalid())
return ExprError();
- return Owned(CStyleCastExpr::Create(Context, Op.ResultType, Op.ValueKind,
- Op.Kind, Op.SrcExpr.take(), &Op.BasePath,
- CastTypeInfo, LPLoc, RPLoc));
+ return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, CastTypeInfo, LPLoc, RPLoc));
}
ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
SourceLocation LPLoc,
Expr *CastExpr,
SourceLocation RPLoc) {
+ assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getLocEnd());
- Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ true);
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
return ExprError();
- return Owned(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
- Op.ValueKind, CastTypeInfo,
- Op.DestRange.getBegin(),
- Op.Kind, Op.SrcExpr.take(),
- &Op.BasePath, RPLoc));
+ return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, CastTypeInfo, Op.DestRange.getBegin(),
+ Op.Kind, Op.SrcExpr.take(), &Op.BasePath, RPLoc));
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index a6c49bb..fdc2349 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -42,31 +43,7 @@ using namespace sema;
SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const {
return SL->getLocationOfByte(ByteNo, PP.getSourceManager(),
- PP.getLangOptions(), PP.getTargetInfo());
-}
-
-
-/// CheckablePrintfAttr - does a function call have a "printf" attribute
-/// and arguments that merit checking?
-bool Sema::CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall) {
- if (Format->getType() == "printf") return true;
- if (Format->getType() == "printf0") {
- // printf0 allows null "format" string; if so don't check format/args
- unsigned format_idx = Format->getFormatIdx() - 1;
- // Does the index refer to the implicit object argument?
- if (isa<CXXMemberCallExpr>(TheCall)) {
- if (format_idx == 0)
- return false;
- --format_idx;
- }
- if (format_idx < TheCall->getNumArgs()) {
- Expr *Format = TheCall->getArg(format_idx)->IgnoreParenCasts();
- if (!Format->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull))
- return true;
- }
- }
- return false;
+ PP.getLangOpts(), PP.getTargetInfo());
}
/// Checks that a call expression's argument count is the desired number.
@@ -183,43 +160,101 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TheCall->setType(Context.IntTy);
break;
case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(move(TheCallResult));
- case Builtin::BI__atomic_load:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load);
- case Builtin::BI__atomic_store:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store);
- case Builtin::BI__atomic_exchange:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg);
- case Builtin::BI__atomic_compare_exchange_strong:
- return SemaAtomicOpsOverloaded(move(TheCallResult),
- AtomicExpr::CmpXchgStrong);
- case Builtin::BI__atomic_compare_exchange_weak:
- return SemaAtomicOpsOverloaded(move(TheCallResult),
- AtomicExpr::CmpXchgWeak);
- case Builtin::BI__atomic_fetch_add:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Add);
- case Builtin::BI__atomic_fetch_sub:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Sub);
- case Builtin::BI__atomic_fetch_and:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::And);
- case Builtin::BI__atomic_fetch_or:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Or);
- case Builtin::BI__atomic_fetch_xor:
- return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xor);
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case Builtin::BI##ID: \
+ return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID);
+#include "clang/Basic/Builtins.def"
case Builtin::BI__builtin_annotation:
if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1)))
return ExprError();
@@ -245,29 +280,52 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// Get the valid immediate range for the specified NEON type code.
static unsigned RFT(unsigned t, bool shift = false) {
- bool quad = t & 0x10;
-
- switch (t & 0x7) {
- case 0: // i8
- return shift ? 7 : (8 << (int)quad) - 1;
- case 1: // i16
- return shift ? 15 : (4 << (int)quad) - 1;
- case 2: // i32
- return shift ? 31 : (2 << (int)quad) - 1;
- case 3: // i64
- return shift ? 63 : (1 << (int)quad) - 1;
- case 4: // f32
- assert(!shift && "cannot shift float types!");
- return (2 << (int)quad) - 1;
- case 5: // poly8
- return shift ? 7 : (8 << (int)quad) - 1;
- case 6: // poly16
- return shift ? 15 : (4 << (int)quad) - 1;
- case 7: // float16
- assert(!shift && "cannot shift float types!");
- return (4 << (int)quad) - 1;
- }
- return 0;
+ NeonTypeFlags Type(t);
+ int IsQuad = Type.isQuad();
+ switch (Type.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return shift ? 7 : (8 << IsQuad) - 1;
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ return shift ? 15 : (4 << IsQuad) - 1;
+ case NeonTypeFlags::Int32:
+ return shift ? 31 : (2 << IsQuad) - 1;
+ case NeonTypeFlags::Int64:
+ return shift ? 63 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Float16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ case NeonTypeFlags::Float32:
+ assert(!shift && "cannot shift float types!");
+ return (2 << IsQuad) - 1;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+/// getNeonEltType - Return the QualType corresponding to the elements of
+/// the vector type specified by the NeonTypeFlags. This is used to check
+/// the pointer arguments for Neon load/store intrinsics.
+static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context) {
+ switch (Flags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Int16:
+ return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Int32:
+ return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
+ case NeonTypeFlags::Int64:
+ return Flags.isUnsigned() ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ case NeonTypeFlags::Poly8:
+ return Context.SignedCharTy;
+ case NeonTypeFlags::Poly16:
+ return Context.ShortTy;
+ case NeonTypeFlags::Float16:
+ return Context.UnsignedShortTy;
+ case NeonTypeFlags::Float32:
+ return Context.FloatTy;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
}
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
@@ -275,6 +333,8 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
unsigned mask = 0;
unsigned TV = 0;
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
switch (BuiltinID) {
#define GET_NEON_OVERLOAD_CHECK
#include "clang/Basic/arm_neon.inc"
@@ -283,15 +343,35 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// For NEON intrinsics which are overloaded on vector element type, validate
// the immediate which specifies which variant to emit.
+ unsigned ImmArg = TheCall->getNumArgs()-1;
if (mask) {
- unsigned ArgNo = TheCall->getNumArgs()-1;
- if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
return true;
- TV = Result.getLimitedValue(32);
- if ((TV > 31) || (mask & (1 << TV)) == 0)
+ TV = Result.getLimitedValue(64);
+ if ((TV > 63) || (mask & (1 << TV)) == 0)
return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
- << TheCall->getArg(ArgNo)->getSourceRange();
+ << TheCall->getArg(ImmArg)->getSourceRange();
+ }
+
+ if (PtrArgNum >= 0) {
+ // Check that pointer arguments have the specified type.
+ Expr *Arg = TheCall->getArg(PtrArgNum);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = ICE->getSubExpr();
+ ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
+ QualType RHSTy = RHS.get()->getType();
+ QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context);
+ if (HasConstPtr)
+ EltTy = EltTy.withConst();
+ QualType LHSTy = Context.getPointerType(EltTy);
+ AssignConvertType ConvTy;
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return true;
+ if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
+ RHS.get(), AA_Assigning))
+ return true;
}
// For NEON intrinsics which take an immediate value as part of the
@@ -341,16 +421,7 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
for (specific_attr_iterator<FormatAttr>
i = FDecl->specific_attr_begin<FormatAttr>(),
e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
-
- const FormatAttr *Format = *i;
- const bool b = Format->getType() == "scanf";
- if (b || CheckablePrintfAttr(Format, TheCall)) {
- bool HasVAListArg = Format->getFirstArg() == 0;
- CheckPrintfScanfArguments(TheCall, HasVAListArg,
- Format->getFormatIdx() - 1,
- HasVAListArg ? 0 : Format->getFirstArg() - 1,
- !b);
- }
+ CheckFormatArguments(*i, TheCall);
}
for (specific_attr_iterator<NonNullAttr>
@@ -360,98 +431,42 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
TheCall->getCallee()->getLocStart());
}
- // Builtin handling
- int CMF = -1;
- switch (FDecl->getBuiltinID()) {
- case Builtin::BI__builtin_memset:
- case Builtin::BI__builtin___memset_chk:
- case Builtin::BImemset:
- CMF = CMF_Memset;
- break;
-
- case Builtin::BI__builtin_memcpy:
- case Builtin::BI__builtin___memcpy_chk:
- case Builtin::BImemcpy:
- CMF = CMF_Memcpy;
- break;
-
- case Builtin::BI__builtin_memmove:
- case Builtin::BI__builtin___memmove_chk:
- case Builtin::BImemmove:
- CMF = CMF_Memmove;
- break;
+ unsigned CMId = FDecl->getMemoryFunctionKind();
+ if (CMId == 0)
+ return false;
- case Builtin::BIstrlcpy:
- case Builtin::BIstrlcat:
+ // Handle memory setting and copying functions.
+ if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
CheckStrlcpycatArguments(TheCall, FnInfo);
- break;
-
- case Builtin::BI__builtin_memcmp:
- CMF = CMF_Memcmp;
- break;
-
- case Builtin::BI__builtin_strncpy:
- case Builtin::BI__builtin___strncpy_chk:
- case Builtin::BIstrncpy:
- CMF = CMF_Strncpy;
- break;
-
- case Builtin::BI__builtin_strncmp:
- CMF = CMF_Strncmp;
- break;
+ else if (CMId == Builtin::BIstrncat)
+ CheckStrncatArguments(TheCall, FnInfo);
+ else
+ CheckMemaccessArguments(TheCall, CMId, FnInfo);
- case Builtin::BI__builtin_strncasecmp:
- CMF = CMF_Strncasecmp;
- break;
+ return false;
+}
- case Builtin::BI__builtin_strncat:
- case Builtin::BIstrncat:
- CMF = CMF_Strncat;
- break;
+bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
+ Expr **Args, unsigned NumArgs) {
+ for (specific_attr_iterator<FormatAttr>
+ i = Method->specific_attr_begin<FormatAttr>(),
+ e = Method->specific_attr_end<FormatAttr>(); i != e ; ++i) {
- case Builtin::BI__builtin_strndup:
- case Builtin::BIstrndup:
- CMF = CMF_Strndup;
- break;
+ CheckFormatArguments(*i, Args, NumArgs, false, lbrac,
+ Method->getSourceRange());
+ }
- default:
- if (FDecl->getLinkage() == ExternalLinkage &&
- (!getLangOptions().CPlusPlus || FDecl->isExternC())) {
- if (FnInfo->isStr("memset"))
- CMF = CMF_Memset;
- else if (FnInfo->isStr("memcpy"))
- CMF = CMF_Memcpy;
- else if (FnInfo->isStr("memmove"))
- CMF = CMF_Memmove;
- else if (FnInfo->isStr("memcmp"))
- CMF = CMF_Memcmp;
- else if (FnInfo->isStr("strncpy"))
- CMF = CMF_Strncpy;
- else if (FnInfo->isStr("strncmp"))
- CMF = CMF_Strncmp;
- else if (FnInfo->isStr("strncasecmp"))
- CMF = CMF_Strncasecmp;
- else if (FnInfo->isStr("strncat"))
- CMF = CMF_Strncat;
- else if (FnInfo->isStr("strndup"))
- CMF = CMF_Strndup;
- }
- break;
+ // diagnose nonnull arguments.
+ for (specific_attr_iterator<NonNullAttr>
+ i = Method->specific_attr_begin<NonNullAttr>(),
+ e = Method->specific_attr_end<NonNullAttr>(); i != e; ++i) {
+ CheckNonNullArguments(*i, Args, lbrac);
}
-
- // Memset/memcpy/memmove handling
- if (CMF != -1)
- CheckMemaccessArguments(TheCall, CheckedMemoryFunction(CMF), FnInfo);
return false;
}
bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
- // Printf checking.
- const FormatAttr *Format = NDecl->getAttr<FormatAttr>();
- if (!Format)
- return false;
-
const VarDecl *V = dyn_cast<VarDecl>(NDecl);
if (!V)
return false;
@@ -460,84 +475,187 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
if (!Ty->isBlockPointerType())
return false;
- const bool b = Format->getType() == "scanf";
- if (!b && !CheckablePrintfAttr(Format, TheCall))
- return false;
-
- bool HasVAListArg = Format->getFirstArg() == 0;
- CheckPrintfScanfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1,
- HasVAListArg ? 0 : Format->getFirstArg() - 1, !b);
+ // format string checking.
+ for (specific_attr_iterator<FormatAttr>
+ i = NDecl->specific_attr_begin<FormatAttr>(),
+ e = NDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ CheckFormatArguments(*i, TheCall);
+ }
return false;
}
-ExprResult
-Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) {
+ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op) {
CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
- // All these operations take one of the following four forms:
- // T __atomic_load(_Atomic(T)*, int) (loads)
- // T* __atomic_add(_Atomic(T*)*, ptrdiff_t, int) (pointer add/sub)
- // int __atomic_compare_exchange_strong(_Atomic(T)*, T*, T, int, int)
- // (cmpxchg)
- // T __atomic_exchange(_Atomic(T)*, T, int) (everything else)
- // where T is an appropriate type, and the int paremeterss are for orderings.
- unsigned NumVals = 1;
- unsigned NumOrders = 1;
- if (Op == AtomicExpr::Load) {
- NumVals = 0;
- } else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong) {
- NumVals = 2;
- NumOrders = 2;
- }
-
- if (TheCall->getNumArgs() < NumVals+NumOrders+1) {
+ // All these operations take one of the following forms:
+ enum {
+ // C __c11_atomic_init(A *, C)
+ Init,
+ // C __c11_atomic_load(A *, int)
+ Load,
+ // void __atomic_load(A *, CP, int)
+ Copy,
+ // C __c11_atomic_add(A *, M, int)
+ Arithmetic,
+ // C __atomic_exchange_n(A *, CP, int)
+ Xchg,
+ // void __atomic_exchange(A *, C *, CP, int)
+ GNUXchg,
+ // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
+ C11CmpXchg,
+ // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
+ GNUCmpXchg
+ } Form = Init;
+ const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 };
+ const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 };
+ // where:
+ // C is an appropriate type,
+ // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
+ // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
+ // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
+ // the int parameters are for orderings.
+
+ assert(AtomicExpr::AO__c11_atomic_init == 0 &&
+ AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load
+ && "need to update code for modified C11 atomics");
+ bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init &&
+ Op <= AtomicExpr::AO__c11_atomic_fetch_xor;
+ bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
+ Op == AtomicExpr::AO__atomic_store_n ||
+ Op == AtomicExpr::AO__atomic_exchange_n ||
+ Op == AtomicExpr::AO__atomic_compare_exchange_n;
+ bool IsAddSub = false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ Form = Init;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ Form = Load;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ Form = Copy;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ IsAddSub = true;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Form = Arithmetic;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ Form = Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Form = GNUXchg;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ Form = C11CmpXchg;
+ break;
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ Form = GNUCmpXchg;
+ break;
+ }
+
+ // Check we have the right number of arguments.
+ if (TheCall->getNumArgs() < NumArgs[Form]) {
Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
- } else if (TheCall->getNumArgs() > NumVals+NumOrders+1) {
- Diag(TheCall->getArg(NumVals+NumOrders+1)->getLocStart(),
+ } else if (TheCall->getNumArgs() > NumArgs[Form]) {
+ Diag(TheCall->getArg(NumArgs[Form])->getLocStart(),
diag::err_typecheck_call_too_many_args)
- << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return ExprError();
}
- // Inspect the first argument of the atomic operation. This should always be
- // a pointer to an _Atomic type.
+ // Inspect the first argument of the atomic operation.
Expr *Ptr = TheCall->getArg(0);
Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get();
const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- QualType AtomTy = pointerType->getPointeeType();
- if (!AtomTy->isAtomicType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
- << Ptr->getType() << Ptr->getSourceRange();
- return ExprError();
+ // For a __c11 builtin, this should be a pointer to an _Atomic type.
+ QualType AtomTy = pointerType->getPointeeType(); // 'A'
+ QualType ValType = AtomTy; // 'C'
+ if (IsC11) {
+ if (!AtomTy->isAtomicType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ ValType = AtomTy->getAs<AtomicType>()->getValueType();
}
- QualType ValType = AtomTy->getAs<AtomicType>()->getValueType();
- if ((Op == AtomicExpr::Add || Op == AtomicExpr::Sub) &&
- !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For an arithmetic operation, the implied arithmetic must be well-formed.
+ if (Form == Arithmetic) {
+ // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
+ if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (!IsAddSub && !ValType->isIntegerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For __atomic_*_n operations, the value type must be a scalar integral or
+ // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
- << Ptr->getType() << Ptr->getSourceRange();
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (!ValType->isIntegerType() &&
- (Op == AtomicExpr::And || Op == AtomicExpr::Or || Op == AtomicExpr::Xor)){
- Diag(DRE->getLocStart(), diag::err_atomic_op_logical_needs_atomic_int)
+ if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context)) {
+ // For GNU atomics, require a trivially-copyable type. This is not part of
+ // the GNU atomics specification, but we enforce it for sanity.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
+ // FIXME: For any builtin other than a load, the ValType must not be
+ // const-qualified.
+
switch (ValType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -547,61 +665,107 @@ Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op)
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
+ // FIXME: Can this happen? By this point, ValType should be known
+ // to be trivially copyable.
Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
<< ValType << Ptr->getSourceRange();
return ExprError();
}
QualType ResultType = ValType;
- if (Op == AtomicExpr::Store)
+ if (Form == Copy || Form == GNUXchg || Form == Init)
ResultType = Context.VoidTy;
- else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong)
+ else if (Form == C11CmpXchg || Form == GNUCmpXchg)
ResultType = Context.BoolTy;
+ // The type of a parameter passed 'by value'. In the GNU atomics, such
+ // arguments are actually passed as pointers.
+ QualType ByValType = ValType; // 'CP'
+ if (!IsC11 && !IsN)
+ ByValType = Ptr->getType();
+
// The first argument --- the pointer --- has a fixed type; we
// deduce the types of the rest of the arguments accordingly. Walk
// the remaining arguments, converting them to the deduced value type.
- for (unsigned i = 1; i != NumVals+NumOrders+1; ++i) {
- ExprResult Arg = TheCall->getArg(i);
+ for (unsigned i = 1; i != NumArgs[Form]; ++i) {
QualType Ty;
- if (i < NumVals+1) {
- // The second argument to a cmpxchg is a pointer to the data which will
- // be exchanged. The second argument to a pointer add/subtract is the
- // amount to add/subtract, which must be a ptrdiff_t. The third
- // argument to a cmpxchg and the second argument in all other cases
- // is the type of the value.
- if (i == 1 && (Op == AtomicExpr::CmpXchgWeak ||
- Op == AtomicExpr::CmpXchgStrong))
- Ty = Context.getPointerType(ValType.getUnqualifiedType());
- else if (!ValType->isIntegerType() &&
- (Op == AtomicExpr::Add || Op == AtomicExpr::Sub))
- Ty = Context.getPointerDiffType();
- else
- Ty = ValType;
+ if (i < NumVals[Form] + 1) {
+ switch (i) {
+ case 1:
+ // The second argument is the non-atomic operand. For arithmetic, this
+ // is always passed by value, and for a compare_exchange it is always
+ // passed by address. For the rest, GNU uses by-address and C11 uses
+ // by-value.
+ assert(Form != Load);
+ if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
+ Ty = ValType;
+ else if (Form == Copy || Form == Xchg)
+ Ty = ByValType;
+ else if (Form == Arithmetic)
+ Ty = Context.getPointerDiffType();
+ else
+ Ty = Context.getPointerType(ValType.getUnqualifiedType());
+ break;
+ case 2:
+ // The third argument to compare_exchange / GNU exchange is a
+ // (pointer to a) desired value.
+ Ty = ByValType;
+ break;
+ case 3:
+ // The fourth argument to GNU compare_exchange is a 'weak' flag.
+ Ty = Context.BoolTy;
+ break;
+ }
} else {
// The order(s) are always converted to int.
Ty = Context.IntTy;
}
+
InitializedEntity Entity =
InitializedEntity::InitializeParameter(Context, Ty, false);
+ ExprResult Arg = TheCall->getArg(i);
Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
TheCall->setArg(i, Arg.get());
}
+ // Permute the arguments into a 'consistent' order.
SmallVector<Expr*, 5> SubExprs;
SubExprs.push_back(Ptr);
- if (Op == AtomicExpr::Load) {
+ switch (Form) {
+ case Init:
+ // Note, AtomicExpr::getVal1() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ break;
+ case Load:
SubExprs.push_back(TheCall->getArg(1)); // Order
- } else if (Op != AtomicExpr::CmpXchgWeak && Op != AtomicExpr::CmpXchgStrong) {
+ break;
+ case Copy:
+ case Arithmetic:
+ case Xchg:
SubExprs.push_back(TheCall->getArg(2)); // Order
SubExprs.push_back(TheCall->getArg(1)); // Val1
- } else {
+ break;
+ case GNUXchg:
+ // Note, AtomicExpr::getVal2() has a special case for this atomic.
SubExprs.push_back(TheCall->getArg(3)); // Order
SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case C11CmpXchg:
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
SubExprs.push_back(TheCall->getArg(4)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case GNUCmpXchg:
+ SubExprs.push_back(TheCall->getArg(4)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(5)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ SubExprs.push_back(TheCall->getArg(3)); // Weak
+ break;
}
return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
@@ -663,6 +827,12 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// casts here.
// FIXME: We don't allow floating point scalars as input.
Expr *FirstArg = TheCall->getArg(0);
+ ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
+ if (FirstArgResult.isInvalid())
+ return ExprError();
+ FirstArg = FirstArgResult.take();
+ TheCall->setArg(0, FirstArg);
+
const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
if (!pointerType) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
@@ -749,34 +919,145 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
unsigned BuiltinIndex, NumFixed = 1;
switch (BuiltinID) {
default: llvm_unreachable("Unknown overloaded atomic builtin!");
- case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break;
- case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break;
- case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break;
- case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break;
- case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break;
-
- case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 5; break;
- case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 6; break;
- case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 7; break;
- case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 8; break;
- case Builtin::BI__sync_xor_and_fetch: BuiltinIndex = 9; break;
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ BuiltinIndex = 0;
+ break;
+
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ BuiltinIndex = 1;
+ break;
+
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ BuiltinIndex = 2;
+ break;
+
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ BuiltinIndex = 3;
+ break;
+
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ BuiltinIndex = 4;
+ break;
+
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ BuiltinIndex = 5;
+ break;
+
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ BuiltinIndex = 6;
+ break;
+
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ BuiltinIndex = 7;
+ break;
+
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ BuiltinIndex = 8;
+ break;
+
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ BuiltinIndex = 9;
+ break;
case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
BuiltinIndex = 10;
NumFixed = 2;
break;
+
case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
BuiltinIndex = 11;
NumFixed = 2;
ResultType = Context.BoolTy;
break;
- case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 12; break;
+
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ BuiltinIndex = 12;
+ break;
+
case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
BuiltinIndex = 13;
NumFixed = 0;
ResultType = Context.VoidTy;
break;
- case Builtin::BI__sync_swap: BuiltinIndex = 14; break;
+
+ case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ BuiltinIndex = 14;
+ break;
}
// Now that we know how many fixed arguments we expect, first check that we
@@ -827,7 +1108,9 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
DeclRefExpr* NewDRE = DeclRefExpr::Create(
Context,
DRE->getQualifierLoc(),
+ SourceLocation(),
NewBuiltinDecl,
+ /*enclosing*/ false,
DRE->getLocation(),
NewBuiltinDecl->getType(),
DRE->getValueKind());
@@ -1020,7 +1303,6 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
"promotion from float to double is the only expected cast here");
Cast->setSubExpr(0);
TheCall->setArg(NumArgs-1, CastArg);
- OrigArg = CastArg;
}
}
@@ -1204,33 +1486,35 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
}
// Handle i > 1 ? "x" : "y", recursively.
-bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
- bool HasVAListArg,
+bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
+ unsigned NumArgs, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
- bool isPrintf) {
+ FormatStringType Type, bool inFunctionCall) {
tryAgain:
if (E->isTypeDependent() || E->isValueDependent())
return false;
- E = E->IgnoreParens();
+ E = E->IgnoreParenCasts();
- switch (E->getStmtClass()) {
- case Stmt::BinaryConditionalOperatorClass:
- case Stmt::ConditionalOperatorClass: {
- const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E);
- return SemaCheckStringLiteral(C->getTrueExpr(), TheCall, HasVAListArg,
- format_idx, firstDataArg, isPrintf)
- && SemaCheckStringLiteral(C->getFalseExpr(), TheCall, HasVAListArg,
- format_idx, firstDataArg, isPrintf);
- }
-
- case Stmt::IntegerLiteralClass:
+ if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
// Technically -Wformat-nonliteral does not warn about this case.
// The behavior of printf and friends in this case is implementation
// dependent. Ideally if the format string cannot be null then
// it should have a 'nonnull' attribute in the function prototype.
return true;
+ switch (E->getStmtClass()) {
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E);
+ return SemaCheckStringLiteral(C->getTrueExpr(), Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall)
+ && SemaCheckStringLiteral(C->getFalseExpr(), Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall);
+ }
+
case Stmt::ImplicitCastExprClass: {
E = cast<ImplicitCastExpr>(E)->getSubExpr();
goto tryAgain;
@@ -1263,13 +1547,17 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
} else if (const PointerType *PT = T->getAs<PointerType>()) {
isConstant = T.isConstant(Context) &&
PT->getPointeeType().isConstant(Context);
+ } else if (T->isObjCObjectPointerType()) {
+ // In ObjC, there is usually no "const ObjectPointer" type,
+ // so don't check if the pointee type is constant.
+ isConstant = T.isConstant(Context);
}
if (isConstant) {
if (const Expr *Init = VD->getAnyInitializer())
- return SemaCheckStringLiteral(Init, TheCall,
+ return SemaCheckStringLiteral(Init, Args, NumArgs,
HasVAListArg, format_idx, firstDataArg,
- isPrintf);
+ Type, /*inFunctionCall*/false);
}
// For vprintf* functions (i.e., HasVAListArg==true), we add a
@@ -1286,32 +1574,46 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
// vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
// ...
//
- //
- // FIXME: We don't have full attribute support yet, so just check to see
- // if the argument is a DeclRefExpr that references a parameter. We'll
- // add proper support for checking the attribute later.
- if (HasVAListArg)
- if (isa<ParmVarDecl>(VD))
- return true;
+ if (HasVAListArg) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
+ int PVIndex = PV->getFunctionScopeIndex() + 1;
+ for (specific_attr_iterator<FormatAttr>
+ i = ND->specific_attr_begin<FormatAttr>(),
+ e = ND->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ FormatAttr *PVFormat = *i;
+ // adjust for implicit parameter
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ ++PVIndex;
+ // We also check if the formats are compatible.
+ // We can't pass a 'scanf' string to a 'printf' function.
+ if (PVIndex == PVFormat->getFormatIdx() &&
+ Type == GetFormatStringType(PVFormat))
+ return true;
+ }
+ }
+ }
+ }
}
return false;
}
- case Stmt::CallExprClass: {
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
- if (const ImplicitCastExpr *ICE
- = dyn_cast<ImplicitCastExpr>(CE->getCallee())) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
- if (const FormatArgAttr *FA = FD->getAttr<FormatArgAttr>()) {
- unsigned ArgIndex = FA->getFormatIdx();
- const Expr *Arg = CE->getArg(ArgIndex - 1);
-
- return SemaCheckStringLiteral(Arg, TheCall, HasVAListArg,
- format_idx, firstDataArg, isPrintf);
- }
- }
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
+ if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
+ unsigned ArgIndex = FA->getFormatIdx();
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ --ArgIndex;
+ const Expr *Arg = CE->getArg(ArgIndex - 1);
+
+ return SemaCheckStringLiteral(Arg, Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall);
}
}
@@ -1327,8 +1629,8 @@ bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
StrE = cast<StringLiteral>(E);
if (StrE) {
- CheckFormatString(StrE, E, TheCall, HasVAListArg, format_idx,
- firstDataArg, isPrintf);
+ CheckFormatString(StrE, E, Args, NumArgs, HasVAListArg, format_idx,
+ firstDataArg, Type, inFunctionCall);
return true;
}
@@ -1354,39 +1656,58 @@ Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
}
}
+Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
+ return llvm::StringSwitch<FormatStringType>(Format->getType())
+ .Case("scanf", FST_Scanf)
+ .Cases("printf", "printf0", FST_Printf)
+ .Cases("NSString", "CFString", FST_NSString)
+ .Case("strftime", FST_Strftime)
+ .Case("strfmon", FST_Strfmon)
+ .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
+ .Default(FST_Unknown);
+}
+
/// CheckPrintfScanfArguments - Check calls to printf and scanf (and similar
/// functions) for correct use of format strings.
-void
-Sema::CheckPrintfScanfArguments(const CallExpr *TheCall, bool HasVAListArg,
- unsigned format_idx, unsigned firstDataArg,
- bool isPrintf) {
-
- const Expr *Fn = TheCall->getCallee();
-
+void Sema::CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall) {
+ bool IsCXXMember = false;
// The way the format attribute works in GCC, the implicit this argument
// of member functions is counted. However, it doesn't appear in our own
// lists, so decrement format_idx in that case.
- if (isa<CXXMemberCallExpr>(TheCall)) {
- const CXXMethodDecl *method_decl =
- dyn_cast<CXXMethodDecl>(TheCall->getCalleeDecl());
- if (method_decl && method_decl->isInstance()) {
- // Catch a format attribute mistakenly referring to the object argument.
- if (format_idx == 0)
- return;
- --format_idx;
- if(firstDataArg != 0)
- --firstDataArg;
- }
+ IsCXXMember = isa<CXXMemberCallExpr>(TheCall);
+ CheckFormatArguments(Format, TheCall->getArgs(), TheCall->getNumArgs(),
+ IsCXXMember, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange());
+}
+
+void Sema::CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+ unsigned NumArgs, bool IsCXXMember,
+ SourceLocation Loc, SourceRange Range) {
+ bool HasVAListArg = Format->getFirstArg() == 0;
+ unsigned format_idx = Format->getFormatIdx() - 1;
+ unsigned firstDataArg = HasVAListArg ? 0 : Format->getFirstArg() - 1;
+ if (IsCXXMember) {
+ if (format_idx == 0)
+ return;
+ --format_idx;
+ if(firstDataArg != 0)
+ --firstDataArg;
}
+ CheckFormatArguments(Args, NumArgs, HasVAListArg, format_idx,
+ firstDataArg, GetFormatStringType(Format), Loc, Range);
+}
+void Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ SourceLocation Loc, SourceRange Range) {
// CHECK: printf/scanf-like function is called with no format string.
- if (format_idx >= TheCall->getNumArgs()) {
- Diag(TheCall->getRParenLoc(), diag::warn_missing_format_string)
- << Fn->getSourceRange();
+ if (format_idx >= NumArgs) {
+ Diag(Loc, diag::warn_missing_format_string) << Range;
return;
}
- const Expr *OrigFormatExpr = TheCall->getArg(format_idx)->IgnoreParenCasts();
+ const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
// CHECK: format string is not a string literal.
//
@@ -1400,18 +1721,30 @@ Sema::CheckPrintfScanfArguments(const CallExpr *TheCall, bool HasVAListArg,
// C string (e.g. "%d")
// ObjC string uses the same format specifiers as C string, so we can use
// the same format string checking logic for both ObjC and C strings.
- if (SemaCheckStringLiteral(OrigFormatExpr, TheCall, HasVAListArg, format_idx,
- firstDataArg, isPrintf))
+ if (SemaCheckStringLiteral(OrigFormatExpr, Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type))
return; // Literal format string found, check done!
+ // Strftime is particular as it always uses a single 'time' argument,
+ // so it is safe to pass a non-literal string.
+ if (Type == FST_Strftime)
+ return;
+
+ // Do not emit diag when the string param is a macro expansion and the
+ // format is either NSString or CFString. This is a hack to prevent
+ // diag when using the NSLocalizedString and CFCopyLocalizedString macros
+ // which are usually used in place of NS and CF string literals.
+ if (Type == FST_NSString && Args[format_idx]->getLocStart().isMacroID())
+ return;
+
// If there are no arguments specified, warn with -Wformat-security, otherwise
// warn only with -Wformat-nonliteral.
- if (TheCall->getNumArgs() == format_idx+1)
- Diag(TheCall->getArg(format_idx)->getLocStart(),
+ if (NumArgs == format_idx+1)
+ Diag(Args[format_idx]->getLocStart(),
diag::warn_format_nonliteral_noargs)
<< OrigFormatExpr->getSourceRange();
else
- Diag(TheCall->getArg(format_idx)->getLocStart(),
+ Diag(Args[format_idx]->getLocStart(),
diag::warn_format_nonliteral)
<< OrigFormatExpr->getSourceRange();
}
@@ -1427,24 +1760,28 @@ protected:
const bool IsObjCLiteral;
const char *Beg; // Start of format string.
const bool HasVAListArg;
- const CallExpr *TheCall;
+ const Expr * const *Args;
+ const unsigned NumArgs;
unsigned FormatIdx;
llvm::BitVector CoveredArgs;
bool usesPositionalArgs;
bool atFirstArg;
+ bool inFunctionCall;
public:
CheckFormatHandler(Sema &s, const StringLiteral *fexpr,
const Expr *origFormatExpr, unsigned firstDataArg,
unsigned numDataArgs, bool isObjCLiteral,
const char *beg, bool hasVAListArg,
- const CallExpr *theCall, unsigned formatIdx)
+ Expr **args, unsigned numArgs,
+ unsigned formatIdx, bool inFunctionCall)
: S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
FirstDataArg(firstDataArg),
NumDataArgs(numDataArgs),
IsObjCLiteral(isObjCLiteral), Beg(beg),
HasVAListArg(hasVAListArg),
- TheCall(theCall), FormatIdx(formatIdx),
- usesPositionalArgs(false), atFirstArg(true) {
+ Args(args), NumArgs(numArgs), FormatIdx(formatIdx),
+ usesPositionalArgs(false), atFirstArg(true),
+ inFunctionCall(inFunctionCall) {
CoveredArgs.resize(numDataArgs);
CoveredArgs.reset();
}
@@ -1453,7 +1790,22 @@ public:
void HandleIncompleteSpecifier(const char *startSpecifier,
unsigned specifierLen);
-
+
+ void HandleNonStandardLengthModifier(
+ const analyze_format_string::LengthModifier &LM,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandleNonStandardConversionSpecification(
+ const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ virtual void HandlePosition(const char *startPos, unsigned posLen);
+
virtual void HandleInvalidPosition(const char *startSpecifier,
unsigned specifierLen,
analyze_format_string::PositionContext p);
@@ -1462,11 +1814,23 @@ public:
void HandleNullChar(const char *nullCharacter);
+ template <typename Range>
+ static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ FixItHint Fixit = FixItHint());
+
protected:
bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
const char *startSpec,
unsigned specifierLen,
const char *csStart, unsigned csLen);
+
+ void HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen);
SourceRange getFormatStringRange();
CharSourceRange getSpecifierRange(const char *startSpecifier,
@@ -1479,6 +1843,14 @@ protected:
const analyze_format_string::ConversionSpecifier &CS,
const char *startSpecifier, unsigned specifierLen,
unsigned argIndex);
+
+ template <typename Range>
+ void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ FixItHint Fixit = FixItHint());
+
+ void CheckPositionalAndNonpositionalArgs(
+ const analyze_format_string::FormatSpecifier *FS);
};
}
@@ -1503,37 +1875,80 @@ SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
unsigned specifierLen){
- SourceLocation Loc = getLocationOfByte(startSpecifier);
- S.Diag(Loc, diag::warn_printf_incomplete_specifier)
- << getSpecifierRange(startSpecifier, specifierLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
+ getLocationOfByte(startSpecifier),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardLengthModifier(
+ const analyze_format_string::LengthModifier &LM,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString()
+ << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString()
+ << 1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardConversionSpecification(
+ const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_conversion_spec)
+ << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandlePosition(const char *startPos,
+ unsigned posLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
}
void
CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
analyze_format_string::PositionContext p) {
- SourceLocation Loc = getLocationOfByte(startPos);
- S.Diag(Loc, diag::warn_format_invalid_positional_specifier)
- << (unsigned) p << getSpecifierRange(startPos, posLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
+ << (unsigned) p,
+ getLocationOfByte(startPos), /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
}
void CheckFormatHandler::HandleZeroPosition(const char *startPos,
unsigned posLen) {
- SourceLocation Loc = getLocationOfByte(startPos);
- S.Diag(Loc, diag::warn_format_zero_positional_specifier)
- << getSpecifierRange(startPos, posLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
}
void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
if (!IsObjCLiteral) {
// The presence of a null character is likely an error.
- S.Diag(getLocationOfByte(nullCharacter),
- diag::warn_printf_format_string_contains_null_char)
- << getFormatStringRange();
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_format_string_contains_null_char),
+ getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
+ getFormatStringRange());
}
}
const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
- return TheCall->getArg(FirstDataArg + i);
+ return Args[FirstDataArg + i];
}
void CheckFormatHandler::DoneProcessing() {
@@ -1545,9 +1960,9 @@ void CheckFormatHandler::DoneProcessing() {
signed notCoveredArg = CoveredArgs.find_first();
if (notCoveredArg >= 0) {
assert((unsigned)notCoveredArg < NumDataArgs);
- S.Diag(getDataArg((unsigned) notCoveredArg)->getLocStart(),
- diag::warn_printf_data_arg_not_used)
- << getFormatStringRange();
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used),
+ getDataArg((unsigned) notCoveredArg)->getLocStart(),
+ /*IsStringLocation*/false, getFormatStringRange());
}
}
}
@@ -1575,13 +1990,23 @@ CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
keepGoing = false;
}
- S.Diag(Loc, diag::warn_format_invalid_conversion)
- << StringRef(csStart, csLen)
- << getSpecifierRange(startSpec, specifierLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_conversion)
+ << StringRef(csStart, csLen),
+ Loc, /*IsStringLocation*/true,
+ getSpecifierRange(startSpec, specifierLen));
return keepGoing;
}
+void
+CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen) {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
+ Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
+}
+
bool
CheckFormatHandler::CheckNumArgs(
const analyze_format_string::FormatSpecifier &FS,
@@ -1589,23 +2014,74 @@ CheckFormatHandler::CheckNumArgs(
const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
if (argIndex >= NumDataArgs) {
- if (FS.usesPositionalArg()) {
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_positional_arg_exceeds_data_args)
- << (argIndex+1) << NumDataArgs
- << getSpecifierRange(startSpecifier, specifierLen);
- }
- else {
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_insufficient_data_args)
- << getSpecifierRange(startSpecifier, specifierLen);
- }
-
+ PartialDiagnostic PDiag = FS.usesPositionalArg()
+ ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
+ << (argIndex+1) << NumDataArgs)
+ : S.PDiag(diag::warn_printf_insufficient_data_args);
+ EmitFormatDiagnostic(
+ PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
return false;
}
return true;
}
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ FixItHint FixIt) {
+ EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
+ Loc, IsStringLocation, StringRange, FixIt);
+}
+
+/// \brief If the format string is not within the funcion call, emit a note
+/// so that the function call and string are in diagnostic messages.
+///
+/// \param inFunctionCall if true, the format string is within the function
+/// call and only one diagnostic message will be produced. Otherwise, an
+/// extra note will be emitted pointing to location of the format string.
+///
+/// \param ArgumentExpr the expression that is passed as the format string
+/// argument in the function call. Used for getting locations when two
+/// diagnostics are emitted.
+///
+/// \param PDiag the callee should already have provided any strings for the
+/// diagnostic message. This function only adds locations and fixits
+/// to diagnostics.
+///
+/// \param Loc primary location for diagnostic. If two diagnostics are
+/// required, one will be at Loc and a new SourceLocation will be created for
+/// the other one.
+///
+/// \param IsStringLocation if true, Loc points to the format string should be
+/// used for the note. Otherwise, Loc points to the argument list and will
+/// be used with PDiag.
+///
+/// \param StringRange some or all of the string to highlight. This is
+/// templated so it can accept either a CharSourceRange or a SourceRange.
+///
+/// \param Fixit optional fix it hint for the format string.
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ FixItHint FixIt) {
+ if (InFunctionCall)
+ S.Diag(Loc, PDiag) << StringRange << FixIt;
+ else {
+ S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
+ << ArgumentExpr->getSourceRange();
+ S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
+ diag::note_format_string_defined)
+ << StringRange << FixIt;
+ }
+}
+
//===--- CHECK: Printf format string checking ------------------------------===//
namespace {
@@ -1615,10 +2091,11 @@ public:
const Expr *origFormatExpr, unsigned firstDataArg,
unsigned numDataArgs, bool isObjCLiteral,
const char *beg, bool hasVAListArg,
- const CallExpr *theCall, unsigned formatIdx)
+ Expr **Args, unsigned NumArgs,
+ unsigned formatIdx, bool inFunctionCall)
: CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
numDataArgs, isObjCLiteral, beg, hasVAListArg,
- theCall, formatIdx) {}
+ Args, NumArgs, formatIdx, inFunctionCall) {}
bool HandleInvalidPrintfConversionSpecifier(
@@ -1668,9 +2145,11 @@ bool CheckPrintfHandler::HandleAmount(
if (!HasVAListArg) {
unsigned argIndex = Amt.getArgIndex();
if (argIndex >= NumDataArgs) {
- S.Diag(getLocationOfByte(Amt.getStart()),
- diag::warn_printf_asterisk_missing_arg)
- << k << getSpecifierRange(startSpecifier, specifierLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
+ << k,
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
// Don't do any more checking. We will just emit
// spurious errors.
return false;
@@ -1688,12 +2167,12 @@ bool CheckPrintfHandler::HandleAmount(
assert(ATR.isValid());
if (!ATR.matchesType(S.Context, T)) {
- S.Diag(getLocationOfByte(Amt.getStart()),
- diag::warn_printf_asterisk_wrong_type)
- << k
- << ATR.getRepresentativeType(S.Context) << T
- << getSpecifierRange(startSpecifier, specifierLen)
- << Arg->getSourceRange();
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
+ << k << ATR.getRepresentativeTypeName(S.Context)
+ << T << Arg->getSourceRange(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
// Don't do any more checking. We will just emit
// spurious errors.
return false;
@@ -1711,25 +2190,19 @@ void CheckPrintfHandler::HandleInvalidAmount(
unsigned specifierLen) {
const analyze_printf::PrintfConversionSpecifier &CS =
FS.getConversionSpecifier();
- switch (Amt.getHowSpecified()) {
- case analyze_printf::OptionalAmount::Constant:
- S.Diag(getLocationOfByte(Amt.getStart()),
- diag::warn_printf_nonsensical_optional_amount)
- << type
- << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen)
- << FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
- Amt.getConstantLength()));
- break;
- default:
- S.Diag(getLocationOfByte(Amt.getStart()),
- diag::warn_printf_nonsensical_optional_amount)
- << type
- << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen);
- break;
- }
+ FixItHint fixit =
+ Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
+ ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength()))
+ : FixItHint();
+
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
+ << type << CS.toString(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ fixit);
}
void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
@@ -1739,11 +2212,13 @@ void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
// Warn about pointless flag with a fixit removal.
const analyze_printf::PrintfConversionSpecifier &CS =
FS.getConversionSpecifier();
- S.Diag(getLocationOfByte(flag.getPosition()),
- diag::warn_printf_nonsensical_flag)
- << flag.toString() << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen)
- << FixItHint::CreateRemoval(getSpecifierRange(flag.getPosition(), 1));
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
+ << flag.toString() << CS.toString(),
+ getLocationOfByte(flag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(flag.getPosition(), 1)));
}
void CheckPrintfHandler::HandleIgnoredFlag(
@@ -1753,12 +2228,13 @@ void CheckPrintfHandler::HandleIgnoredFlag(
const char *startSpecifier,
unsigned specifierLen) {
// Warn about ignored flag with a fixit removal.
- S.Diag(getLocationOfByte(ignoredFlag.getPosition()),
- diag::warn_printf_ignored_flag)
- << ignoredFlag.toString() << flag.toString()
- << getSpecifierRange(startSpecifier, specifierLen)
- << FixItHint::CreateRemoval(getSpecifierRange(
- ignoredFlag.getPosition(), 1));
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
+ << ignoredFlag.toString() << flag.toString(),
+ getLocationOfByte(ignoredFlag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(ignoredFlag.getPosition(), 1)));
}
bool
@@ -1777,10 +2253,8 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
usesPositionalArgs = FS.usesPositionalArg();
}
else if (usesPositionalArgs != FS.usesPositionalArg()) {
- // Cannot mix-and-match positional and non-positional arguments.
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_format_mix_positional_nonpositional_args)
- << getSpecifierRange(startSpecifier, specifierLen);
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
return false;
}
}
@@ -1889,18 +2363,29 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// Check the length modifier is valid with the given conversion specifier.
const LengthModifier &LM = FS.getLengthModifier();
if (!FS.hasValidLengthModifier())
- S.Diag(getLocationOfByte(LM.getStart()),
- diag::warn_format_nonsensical_length)
- << LM.toString() << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen)
- << FixItHint::CreateRemoval(getSpecifierRange(LM.getStart(),
- LM.getLength()));
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
+ << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(LM.getStart(),
+ LM.getLength())));
+ if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+ if (!FS.hasStandardLengthConversionCombination())
+ HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
+ specifierLen);
// Are we using '%n'?
if (CS.getKind() == ConversionSpecifier::nArg) {
// Issue a warning about this being a possible security issue.
- S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_write_back)
- << getSpecifierRange(startSpecifier, specifierLen);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_write_back),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
// Continue checking the other format specifiers.
return true;
}
@@ -1915,7 +2400,8 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// Now type check the data expression that matches the
// format specifier.
const Expr *Ex = getDataArg(argIndex);
- const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context);
+ const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context,
+ IsObjCLiteral);
if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
// Check if we didn't match because of an implicit cast from a 'char'
// or 'short' to an 'int'. This is done because printf is a varargs
@@ -1930,32 +2416,35 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// We may be able to offer a FixItHint if it is a supported type.
PrintfSpecifier fixedFS = FS;
- bool success = fixedFS.fixType(Ex->getType());
+ bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
+ S.Context, IsObjCLiteral);
if (success) {
// Get the fix string from the fixed format specifier
- llvm::SmallString<128> buf;
+ SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
fixedFS.toString(os);
- // FIXME: getRepresentativeType() perhaps should return a string
- // instead of a QualType to better handle when the representative
- // type is 'wint_t' (which is defined in the system headers).
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeType(S.Context) << Ex->getType()
- << getSpecifierRange(startSpecifier, specifierLen)
- << Ex->getSourceRange()
- << FixItHint::CreateReplacement(
- getSpecifierRange(startSpecifier, specifierLen),
- os.str());
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateReplacement(
+ getSpecifierRange(startSpecifier, specifierLen),
+ os.str()));
}
else {
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeType(S.Context) << Ex->getType()
- << getSpecifierRange(startSpecifier, specifierLen)
- << Ex->getSourceRange();
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << getSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ true,
+ getSpecifierRange(startSpecifier, specifierLen));
}
}
@@ -1971,10 +2460,11 @@ public:
const Expr *origFormatExpr, unsigned firstDataArg,
unsigned numDataArgs, bool isObjCLiteral,
const char *beg, bool hasVAListArg,
- const CallExpr *theCall, unsigned formatIdx)
+ Expr **Args, unsigned NumArgs,
+ unsigned formatIdx, bool inFunctionCall)
: CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
numDataArgs, isObjCLiteral, beg, hasVAListArg,
- theCall, formatIdx) {}
+ Args, NumArgs, formatIdx, inFunctionCall) {}
bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
const char *startSpecifier,
@@ -1991,8 +2481,9 @@ public:
void CheckScanfHandler::HandleIncompleteScanList(const char *start,
const char *end) {
- S.Diag(getLocationOfByte(end), diag::warn_scanf_scanlist_incomplete)
- << getSpecifierRange(start, end - start);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
+ getLocationOfByte(end), /*IsStringLocation*/true,
+ getSpecifierRange(start, end - start));
}
bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
@@ -2027,10 +2518,8 @@ bool CheckScanfHandler::HandleScanfSpecifier(
usesPositionalArgs = FS.usesPositionalArg();
}
else if (usesPositionalArgs != FS.usesPositionalArg()) {
- // Cannot mix-and-match positional and non-positional arguments.
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_format_mix_positional_nonpositional_args)
- << getSpecifierRange(startSpecifier, specifierLen);
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
return false;
}
}
@@ -2041,9 +2530,10 @@ bool CheckScanfHandler::HandleScanfSpecifier(
if (Amt.getConstantAmount() == 0) {
const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
Amt.getConstantLength());
- S.Diag(getLocationOfByte(Amt.getStart()),
- diag::warn_scanf_nonzero_width)
- << R << FixItHint::CreateRemoval(R);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true, R,
+ FixItHint::CreateRemoval(R));
}
}
@@ -2065,13 +2555,22 @@ bool CheckScanfHandler::HandleScanfSpecifier(
// Check the length modifier is valid with the given conversion specifier.
const LengthModifier &LM = FS.getLengthModifier();
if (!FS.hasValidLengthModifier()) {
- S.Diag(getLocationOfByte(LM.getStart()),
- diag::warn_format_nonsensical_length)
- << LM.toString() << CS.toString()
- << getSpecifierRange(startSpecifier, specifierLen)
- << FixItHint::CreateRemoval(getSpecifierRange(LM.getStart(),
- LM.getLength()));
- }
+ const CharSourceRange &R = getSpecifierRange(LM.getStart(), LM.getLength());
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
+ << LM.toString() << CS.toString()
+ << getSpecifierRange(startSpecifier, specifierLen),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true, R,
+ FixItHint::CreateRemoval(R));
+ }
+
+ if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+ if (!FS.hasStandardLengthConversionCombination())
+ HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
+ specifierLen);
// The remaining checks depend on the data arguments.
if (HasVAListArg)
@@ -2080,22 +2579,57 @@ bool CheckScanfHandler::HandleScanfSpecifier(
if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
return false;
- // FIXME: Check that the argument type matches the format specifier.
-
+ // Check that the argument type matches the format specifier.
+ const Expr *Ex = getDataArg(argIndex);
+ const analyze_scanf::ScanfArgTypeResult &ATR = FS.getArgType(S.Context);
+ if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
+ ScanfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
+ S.Context);
+
+ if (success) {
+ // Get the fix string from the fixed format specifier.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateReplacement(
+ getSpecifierRange(startSpecifier, specifierLen),
+ os.str()));
+ } else {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+ }
+
return true;
}
void Sema::CheckFormatString(const StringLiteral *FExpr,
const Expr *OrigFormatExpr,
- const CallExpr *TheCall, bool HasVAListArg,
- unsigned format_idx, unsigned firstDataArg,
- bool isPrintf) {
+ Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ bool inFunctionCall) {
// CHECK: is the format string a wide literal?
if (!FExpr->isAscii()) {
- Diag(FExpr->getLocStart(),
- diag::warn_format_string_is_wide_literal)
- << OrigFormatExpr->getSourceRange();
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
return;
}
@@ -2103,33 +2637,36 @@ void Sema::CheckFormatString(const StringLiteral *FExpr,
StringRef StrRef = FExpr->getString();
const char *Str = StrRef.data();
unsigned StrLen = StrRef.size();
- const unsigned numDataArgs = TheCall->getNumArgs() - firstDataArg;
+ const unsigned numDataArgs = NumArgs - firstDataArg;
// CHECK: empty format string?
if (StrLen == 0 && numDataArgs > 0) {
- Diag(FExpr->getLocStart(), diag::warn_empty_format_string)
- << OrigFormatExpr->getSourceRange();
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_empty_format_string), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
return;
}
- if (isPrintf) {
+ if (Type == FST_Printf || Type == FST_NSString) {
CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
- Str, HasVAListArg, TheCall, format_idx);
+ Str, HasVAListArg, Args, NumArgs, format_idx,
+ inFunctionCall);
- bool FormatExtensions = getLangOptions().FormatExtensions;
if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
- FormatExtensions))
+ getLangOpts()))
H.DoneProcessing();
- }
- else {
+ } else if (Type == FST_Scanf) {
CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
- Str, HasVAListArg, TheCall, format_idx);
+ Str, HasVAListArg, Args, NumArgs, format_idx,
+ inFunctionCall);
- if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen))
+ if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
+ getLangOpts()))
H.DoneProcessing();
- }
+ } // TODO: handle other formats
}
//===--- CHECK: Standard memory functions ---------------------------------===//
@@ -2174,16 +2711,19 @@ static QualType getSizeOfArgType(const Expr* E) {
///
/// \param Call The call expression to diagnose.
void Sema::CheckMemaccessArguments(const CallExpr *Call,
- CheckedMemoryFunction CMF,
+ unsigned BId,
IdentifierInfo *FnName) {
+ assert(BId != 0);
+
// It is possible to have a non-standard definition of memset. Validate
// we have enough arguments, and if not, abort further checking.
- unsigned ExpectedNumArgs = (CMF == CMF_Strndup ? 2 : 3);
+ unsigned ExpectedNumArgs = (BId == Builtin::BIstrndup ? 2 : 3);
if (Call->getNumArgs() < ExpectedNumArgs)
return;
- unsigned LastArg = (CMF == CMF_Memset || CMF == CMF_Strndup ? 1 : 2);
- unsigned LenArg = (CMF == CMF_Strndup ? 1 : 2);
+ unsigned LastArg = (BId == Builtin::BImemset ||
+ BId == Builtin::BIstrndup ? 1 : 2);
+ unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2);
const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
// We have special checking when the length is a sizeof expression.
@@ -2227,7 +2767,8 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
if (Context.getTypeSize(PointeeTy) == Context.getCharWidth())
ActionIdx = 2; // If the pointee's size is sizeof(char),
// suggest an explicit length.
- unsigned DestSrcSelect = (CMF == CMF_Strndup ? 1 : ArgIdx);
+ unsigned DestSrcSelect =
+ (BId == Builtin::BIstrndup ? 1 : ArgIdx);
DiagRuntimeBehavior(SizeOfArg->getExprLoc(), Dest,
PDiag(diag::warn_sizeof_pointer_expr_memaccess)
<< FnName << DestSrcSelect << ActionIdx
@@ -2253,16 +2794,29 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
}
// Always complain about dynamic classes.
- if (isDynamicClassType(PointeeTy))
+ if (isDynamicClassType(PointeeTy)) {
+
+ unsigned OperationType = 0;
+ // "overwritten" if we're warning about the destination for any call
+ // but memcmp; otherwise a verb appropriate to the call.
+ if (ArgIdx != 0 || BId == Builtin::BImemcmp) {
+ if (BId == Builtin::BImemcpy)
+ OperationType = 1;
+ else if(BId == Builtin::BImemmove)
+ OperationType = 2;
+ else if (BId == Builtin::BImemcmp)
+ OperationType = 3;
+ }
+
DiagRuntimeBehavior(
Dest->getExprLoc(), Dest,
PDiag(diag::warn_dyn_class_memaccess)
- << (CMF == CMF_Memcmp ? ArgIdx + 2 : ArgIdx) << FnName << PointeeTy
- // "overwritten" if we're warning about the destination for any call
- // but memcmp; otherwise a verb appropriate to the call.
- << (ArgIdx == 0 && CMF != CMF_Memcmp ? 0 : (unsigned)CMF)
+ << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx)
+ << FnName << PointeeTy
+ << OperationType
<< Call->getCallee()->getSourceRange());
- else if (PointeeTy.hasNonTrivialObjCLifetime() && CMF != CMF_Memset)
+ } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
+ BId != Builtin::BImemset)
DiagRuntimeBehavior(
Dest->getExprLoc(), Dest,
PDiag(diag::warn_arc_object_memaccess)
@@ -2324,7 +2878,7 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
else {
// Look for 'strlcpy(dst, x, strlen(x))'
if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
- if (SizeCall->isBuiltinCall(Context) == Builtin::BIstrlen
+ if (SizeCall->isBuiltinCall() == Builtin::BIstrlen
&& SizeCall->getNumArgs() == 1)
CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
}
@@ -2366,7 +2920,7 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
return;
}
- llvm::SmallString<128> sizeString;
+ SmallString<128> sizeString;
llvm::raw_svector_ostream OS(sizeString);
OS << "sizeof(";
DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
@@ -2377,6 +2931,108 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
OS.str());
}
+/// Check if two expressions refer to the same declaration.
+static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
+ if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
+ if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+}
+
+static const Expr *getStrlenExprArg(const Expr *E) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
+ return 0;
+ return CE->getArg(0)->IgnoreParenCasts();
+ }
+ return 0;
+}
+
+// Warn on anti-patterns as the 'size' argument to strncat.
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+void Sema::CheckStrncatArguments(const CallExpr *CE,
+ IdentifierInfo *FnName) {
+ // Don't crash if the user has the wrong number of arguments.
+ if (CE->getNumArgs() < 3)
+ return;
+ const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
+ const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
+ const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
+
+ // Identify common expressions, which are wrongly used as the size argument
+ // to strncat and may lead to buffer overflows.
+ unsigned PatternType = 0;
+ if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
+ // - sizeof(dst)
+ if (referToTheSameDecl(SizeOfArg, DstArg))
+ PatternType = 1;
+ // - sizeof(src)
+ else if (referToTheSameDecl(SizeOfArg, SrcArg))
+ PatternType = 2;
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS()->IgnoreParenCasts();
+ const Expr *R = BE->getRHS()->IgnoreParenCasts();
+ // - sizeof(dst) - strlen(dst)
+ if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
+ referToTheSameDecl(DstArg, getStrlenExprArg(R)))
+ PatternType = 1;
+ // - sizeof(src) - (anything)
+ else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
+ PatternType = 2;
+ }
+ }
+
+ if (PatternType == 0)
+ return;
+
+ // Generate the diagnostic.
+ SourceLocation SL = LenArg->getLocStart();
+ SourceRange SR = LenArg->getSourceRange();
+ SourceManager &SM = PP.getSourceManager();
+
+ // If the function is defined as a builtin macro, do not show macro expansion.
+ if (SM.isMacroArgExpansion(SL)) {
+ SL = SM.getSpellingLoc(SL);
+ SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
+ SM.getSpellingLoc(SR.getEnd()));
+ }
+
+ if (PatternType == 1)
+ Diag(SL, diag::warn_strncat_large_size) << SR;
+ else
+ Diag(SL, diag::warn_strncat_src_size) << SR;
+
+ // Output a FIXIT hint if the destination is an array (rather than a
+ // pointer to an array). This could be enhanced to handle some
+ // pointers if we know the actual size, like if DstArg is 'array+2'
+ // we could say 'sizeof(array)-2'.
+ QualType DstArgTy = DstArg->getType();
+
+ // Only handle constant-sized or VLAs, but not flexible members.
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(DstArgTy)) {
+ // Only issue the FIXIT for arrays of size > 1.
+ if (CAT->getSize().getSExtValue() <= 1)
+ return;
+ } else if (!DstArgTy->isVariableArrayType()) {
+ return;
+ }
+
+ SmallString<128> sizeString;
+ llvm::raw_svector_ostream OS(sizeString);
+ OS << "sizeof(";
+ DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ OS << ") - ";
+ OS << "strlen(";
+ DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ OS << ") - 1";
+
+ Diag(SL, diag::note_strncat_wrong_size)
+ << FixItHint::CreateReplacement(SR, OS.str());
+}
+
//===--- CHECK: Return Address of Stack Variable --------------------------===//
static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars);
@@ -2394,7 +3050,7 @@ Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
// Perform checking for returned stack addresses, local blocks,
// label addresses or references to temporaries.
if (lhsType->isPointerType() ||
- (!getLangOptions().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
+ (!getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
stackE = EvalAddr(RetValExp, refVars);
} else if (lhsType->isReferenceType()) {
stackE = EvalVal(RetValExp, refVars);
@@ -2561,42 +3217,39 @@ static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
case Stmt::AddrLabelExprClass:
return E; // address of label.
+ case Stmt::ExprWithCleanupsClass:
+ return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+
// For casts, we need to handle conversions from arrays to
// pointer values, and pointer-to-pointer conversions.
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
- case Stmt::ObjCBridgedCastExprClass: {
- Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
- QualType T = SubExpr->getType();
-
- if (SubExpr->getType()->isPointerType() ||
- SubExpr->getType()->isBlockPointerType() ||
- SubExpr->getType()->isObjCQualifiedIdType())
- return EvalAddr(SubExpr, refVars);
- else if (T->isArrayType())
- return EvalVal(SubExpr, refVars);
- else
- return 0;
- }
-
- // C++ casts. For dynamic casts, static casts, and const casts, we
- // are always converting from a pointer-to-pointer, so we just blow
- // through the cast. In the case the dynamic cast doesn't fail (and
- // return NULL), we take the conservative route and report cases
- // where we return the address of a stack variable. For Reinterpre
- // FIXME: The comment about is wrong; we're not always converting
- // from pointer to pointer. I'm guessing that this code should also
- // handle references to objects.
+ case Stmt::ObjCBridgedCastExprClass:
case Stmt::CXXStaticCastExprClass:
case Stmt::CXXDynamicCastExprClass:
case Stmt::CXXConstCastExprClass:
case Stmt::CXXReinterpretCastExprClass: {
- Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr();
- if (S->getType()->isPointerType() || S->getType()->isBlockPointerType())
- return EvalAddr(S, refVars);
- else
- return NULL;
+ Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return EvalAddr(SubExpr, refVars);
+
+ case CK_ArrayToPointerDecay:
+ return EvalVal(SubExpr, refVars);
+
+ default:
+ return 0;
+ }
}
case Stmt::MaterializeTemporaryExprClass:
@@ -2637,6 +3290,9 @@ do {
return NULL;
}
+ case Stmt::ExprWithCleanupsClass:
+ return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+
case Stmt::DeclRefExprClass: {
// When we hit a DeclRefExpr we are looking at code that refers to a
// variable's name. If it's not a reference variable we check if it has
@@ -2766,12 +3422,12 @@ void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
// Check for comparisons with builtin types.
if (EmitWarning)
if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
- if (CL->isBuiltinCall(Context))
+ if (CL->isBuiltinCall())
EmitWarning = false;
if (EmitWarning)
if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
- if (CR->isBuiltinCall(Context))
+ if (CR->isBuiltinCall())
EmitWarning = false;
// Emit the diagnostic.
@@ -2870,7 +3526,8 @@ struct IntRange {
}
};
-IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) {
+static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
+ unsigned MaxWidth) {
if (value.isSigned() && value.isNegative())
return IntRange(value.getMinSignedBits(), false);
@@ -2882,8 +3539,8 @@ IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) {
return IntRange(value.getActiveBits(), true);
}
-IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
- unsigned MaxWidth) {
+static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
+ unsigned MaxWidth) {
if (result.isInt())
return GetValueRange(C, result.getInt(), MaxWidth);
@@ -2907,7 +3564,7 @@ IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
// FIXME: The only reason we need to pass the type in here is to get
// the sign right on this one case. It would be nice if APValue
// preserved this.
- assert(result.isLValue());
+ assert(result.isLValue() || result.isAddrLabelDiff());
return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
}
@@ -2915,19 +3572,19 @@ IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
/// range of values it might take.
///
/// \param MaxWidth - the width to which the value will be truncated
-IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
+static IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
E = E->IgnoreParens();
// Try a full evaluation first.
Expr::EvalResult result;
- if (E->Evaluate(result, C))
+ if (E->EvaluateAsRValue(result, C))
return GetValueRange(C, result.Val, E->getType(), MaxWidth);
// I think we only want to look through implicit casts here; if the
// user has an explicit widening cast, we should treat the value as
// being of the new, wider type.
if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
- if (CE->getCastKind() == CK_NoOp)
+ if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
return GetExprRange(C, CE->getSubExpr(), MaxWidth);
IntRange OutputTypeRange = IntRange::forValueOfType(C, CE->getType());
@@ -3133,16 +3790,16 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
return IntRange::forValueOfType(C, E->getType());
}
-IntRange GetExprRange(ASTContext &C, Expr *E) {
+static IntRange GetExprRange(ASTContext &C, Expr *E) {
return GetExprRange(C, E, C.getIntWidth(E->getType()));
}
/// Checks whether the given value, which currently has the given
/// source semantics, has the same value when coerced through the
/// target semantics.
-bool IsSameFloatAfterCast(const llvm::APFloat &value,
- const llvm::fltSemantics &Src,
- const llvm::fltSemantics &Tgt) {
+static bool IsSameFloatAfterCast(const llvm::APFloat &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
llvm::APFloat truncated = value;
bool ignored;
@@ -3157,9 +3814,9 @@ bool IsSameFloatAfterCast(const llvm::APFloat &value,
/// target semantics.
///
/// The value might be a vector of floats (or a complex number).
-bool IsSameFloatAfterCast(const APValue &value,
- const llvm::fltSemantics &Src,
- const llvm::fltSemantics &Tgt) {
+static bool IsSameFloatAfterCast(const APValue &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
if (value.isFloat())
return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
@@ -3175,7 +3832,7 @@ bool IsSameFloatAfterCast(const APValue &value,
IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
}
-void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
+static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
static bool IsZero(Sema &S, Expr *E) {
// Suppress cases where we are comparing against an enum constant.
@@ -3204,7 +3861,7 @@ static bool HasEnumType(Expr *E) {
return E->getType()->isEnumeralType();
}
-void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
+static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
BinaryOperatorKind op = E->getOpcode();
if (E->isValueDependent())
return;
@@ -3230,7 +3887,7 @@ void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
/// Analyze the operands of the given comparison. Implements the
/// fallback case from AnalyzeComparison.
-void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
+static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
}
@@ -3238,7 +3895,7 @@ void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
/// \brief Implements -Wsign-compare.
///
/// \param E the binary operator to check for warnings
-void AnalyzeComparison(Sema &S, BinaryOperator *E) {
+static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
// The type the comparison is being performed in.
QualType T = E->getLHS()->getType();
assert(S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())
@@ -3311,8 +3968,8 @@ void AnalyzeComparison(Sema &S, BinaryOperator *E) {
/// Analyzes an attempt to assign the given value to a bitfield.
///
/// Returns true if there was something fishy about the attempt.
-bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
- SourceLocation InitLoc) {
+static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
+ SourceLocation InitLoc) {
assert(Bitfield->isBitField());
if (Bitfield->isInvalidDecl())
return false;
@@ -3330,31 +3987,30 @@ bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
Expr *OriginalInit = Init->IgnoreParenImpCasts();
- Expr::EvalResult InitValue;
- if (!OriginalInit->Evaluate(InitValue, S.Context) ||
- !InitValue.Val.isInt())
+ llvm::APSInt Value;
+ if (!OriginalInit->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects))
return false;
- const llvm::APSInt &Value = InitValue.Val.getInt();
unsigned OriginalWidth = Value.getBitWidth();
unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
if (OriginalWidth <= FieldWidth)
return false;
+ // Compute the value which the bitfield will contain.
llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
+ TruncatedValue.setIsSigned(Bitfield->getType()->isSignedIntegerType());
- // It's fairly common to write values into signed bitfields
- // that, if sign-extended, would end up becoming a different
- // value. We don't want to warn about that.
- if (Value.isSigned() && Value.isNegative())
- TruncatedValue = TruncatedValue.sext(OriginalWidth);
- else
- TruncatedValue = TruncatedValue.zext(OriginalWidth);
-
+ // Check whether the stored value is equal to the original value.
+ TruncatedValue = TruncatedValue.extend(OriginalWidth);
if (Value == TruncatedValue)
return false;
+ // Special-case bitfields of width 1: booleans are naturally 0/1, and
+ // therefore don't strictly fit into a signed bitfield of width 1.
+ if (FieldWidth == 1 && Value == 1)
+ return false;
+
std::string PrettyValue = Value.toString(10);
std::string PrettyTrunc = TruncatedValue.toString(10);
@@ -3367,7 +4023,7 @@ bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
/// Analyze the given simple or compound assignment for warning-worthy
/// operations.
-void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
+static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
// Just recurse on the LHS.
AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
@@ -3386,16 +4042,25 @@ void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
}
/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
-void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
- SourceLocation CContext, unsigned diag) {
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ if (pruneControlFlow) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag)
+ << SourceType << T << E->getSourceRange()
+ << SourceRange(CContext));
+ return;
+ }
S.Diag(E->getExprLoc(), diag)
<< SourceType << T << E->getSourceRange() << SourceRange(CContext);
}
/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
-void DiagnoseImpCast(Sema &S, Expr *E, QualType T, SourceLocation CContext,
- unsigned diag) {
- DiagnoseImpCast(S, E, E->getType(), T, CContext, diag);
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
}
/// Diagnose an implicit cast from a literal expression. Does not warn when the
@@ -3425,11 +4090,6 @@ std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
return ValueInRange.toString(10);
}
-static bool isFromSystemMacro(Sema &S, SourceLocation loc) {
- SourceManager &smgr = S.Context.getSourceManager();
- return loc.isMacroID() && smgr.isInSystemHeader(smgr.getSpellingLoc(loc));
-}
-
void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool *ICContext = 0) {
if (E->isTypeDependent() || E->isValueDependent()) return;
@@ -3455,13 +4115,43 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// by a check in AnalyzeImplicitConversions().
return DiagnoseImpCast(S, E, T, CC,
diag::warn_impcast_string_literal_to_bool);
+ if (Source->isFunctionType()) {
+ // Warn on function to bool. Checks free functions and static member
+ // functions. Weakly imported functions are excluded from the check,
+ // since it's common to test their value to check whether the linker
+ // found a definition for them.
+ ValueDecl *D = 0;
+ if (DeclRefExpr* R = dyn_cast<DeclRefExpr>(E)) {
+ D = R->getDecl();
+ } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
+ D = M->getMemberDecl();
+ }
+
+ if (D && !D->isWeak()) {
+ if (FunctionDecl* F = dyn_cast<FunctionDecl>(D)) {
+ S.Diag(E->getExprLoc(), diag::warn_impcast_function_to_bool)
+ << F << E->getSourceRange() << SourceRange(CC);
+ S.Diag(E->getExprLoc(), diag::note_function_to_bool_silence)
+ << FixItHint::CreateInsertion(E->getExprLoc(), "&");
+ QualType ReturnType;
+ UnresolvedSet<4> NonTemplateOverloads;
+ S.isExprCallable(*E, ReturnType, NonTemplateOverloads);
+ if (!ReturnType.isNull()
+ && ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
+ S.Diag(E->getExprLoc(), diag::note_function_to_bool_call)
+ << FixItHint::CreateInsertion(
+ S.getPreprocessor().getLocForEndOfToken(E->getLocEnd()), "()");
+ return;
+ }
+ }
+ }
return; // Other casts to bool are not checked.
}
// Strip vector types.
if (isa<VectorType>(Source)) {
if (!isa<VectorType>(Target)) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
}
@@ -3478,7 +4168,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip complex types.
if (isa<ComplexType>(Source)) {
if (!isa<ComplexType>(Target)) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_complex_scalar);
@@ -3502,7 +4192,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Don't warn about float constants that are precisely
// representable in the target type.
Expr::EvalResult result;
- if (E->Evaluate(result, S.Context)) {
+ if (E->EvaluateAsRValue(result, S.Context)) {
// Value might be a float, a float vector, or a float complex.
if (IsSameFloatAfterCast(result.Val,
S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
@@ -3510,7 +4200,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
@@ -3520,7 +4210,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// If the target is integral, always warn.
if ((TargetBT && TargetBT->isInteger())) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
Expr *InnerE = E->IgnoreParenImpCasts();
@@ -3544,8 +4234,11 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)
== Expr::NPCK_GNUNull) && Target->isIntegerType()) {
- S.Diag(E->getExprLoc(), diag::warn_impcast_null_pointer_to_integer)
- << E->getSourceRange() << clang::SourceRange(CC);
+ SourceLocation Loc = E->getSourceRange().getBegin();
+ if (Loc.isMacroID())
+ Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
+ S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
+ << T << Loc << clang::SourceRange(CC);
return;
}
@@ -3557,24 +4250,27 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// TODO: this should happen for bitfield stores, too.
llvm::APSInt Value(32);
if (E->isIntegerConstantExpr(Value, S.Context)) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
std::string PrettySourceValue = Value.toString(10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
- S.Diag(E->getExprLoc(), diag::warn_impcast_integer_precision_constant)
- << PrettySourceValue << PrettyTargetValue
- << E->getType() << T << E->getSourceRange() << clang::SourceRange(CC);
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue
+ << E->getType() << T << E->getSourceRange()
+ << clang::SourceRange(CC));
return;
}
// People want to build with -Wshorten-64-to-32 and not -Wconversion.
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
- if (SourceRange.Width == 64 && TargetRange.Width == 32)
- return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32);
+ if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
+ /* pruneControlFlow */ true);
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
}
@@ -3582,7 +4278,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
(!TargetRange.NonNegative && SourceRange.NonNegative &&
SourceRange.Width == TargetRange.Width)) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
unsigned DiagID = diag::warn_impcast_integer_sign;
@@ -3604,7 +4300,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// In C, we pretend that the type of an EnumConstantDecl is its enumeration
// type, to give us better diagnostics.
QualType SourceType = E->getType();
- if (!S.getLangOptions().CPlusPlus) {
+ if (!S.getLangOpts().CPlusPlus) {
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
@@ -3620,7 +4316,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
(TargetEnum->getDecl()->getIdentifier() ||
TargetEnum->getDecl()->getTypedefNameForAnonDecl()) &&
SourceEnum != TargetEnum) {
- if (isFromSystemMacro(S, CC))
+ if (S.SourceMgr.isInSystemMacro(CC))
return;
return DiagnoseImpCast(S, E, SourceType, T, CC,
@@ -3712,8 +4408,8 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
if (BO->isComparisonOp())
return AnalyzeComparison(S, BO);
- // And with assignments and compound assignments.
- if (BO->isAssignmentOp())
+ // And with simple assignments.
+ if (BO->getOpcode() == BO_Assign)
return AnalyzeAssignment(S, BO);
}
@@ -3731,7 +4427,10 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
bool IsLogicalOperator = BO && BO->isLogicalOp();
for (Stmt::child_range I = E->children(); I; ++I) {
- Expr *ChildExpr = cast<Expr>(*I);
+ Expr *ChildExpr = dyn_cast_or_null<Expr>(*I);
+ if (!ChildExpr)
+ continue;
+
if (IsLogicalOperator &&
isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
// Ignore checking string literals that are in logical operators.
@@ -3801,7 +4500,7 @@ bool Sema::CheckParmsForFunctionDef(ParmVarDecl **P, ParmVarDecl **PEnd,
if (CheckParameterNames &&
Param->getIdentifier() == 0 &&
!Param->isImplicit() &&
- !getLangOptions().CPlusPlus)
+ !getLangOpts().CPlusPlus)
Diag(Param->getLocation(), diag::err_parameter_name_omitted);
// C99 6.7.5.3p12:
@@ -3897,8 +4596,11 @@ static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size,
return false;
const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
- if (!RD || !RD->isStruct())
- return false;
+ if (!RD) return false;
+ if (RD->isUnion()) return false;
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CRD->isStandardLayout()) return false;
+ }
// See if this is the last field decl in the record.
const Decl *D = FD;
@@ -3909,21 +4611,24 @@ static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size,
}
void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
- bool isSubscript, bool AllowOnePastEnd) {
- const Type* EffectiveType = getElementType(BaseExpr);
- BaseExpr = BaseExpr->IgnoreParenCasts();
- IndexExpr = IndexExpr->IgnoreParenCasts();
+ const ArraySubscriptExpr *ASE,
+ bool AllowOnePastEnd, bool IndexNegated) {
+ IndexExpr = IndexExpr->IgnoreParenImpCasts();
+ if (IndexExpr->isValueDependent())
+ return;
+ const Type *EffectiveType = getElementType(BaseExpr);
+ BaseExpr = BaseExpr->IgnoreParenCasts();
const ConstantArrayType *ArrayTy =
Context.getAsConstantArrayType(BaseExpr->getType());
if (!ArrayTy)
return;
- if (IndexExpr->isValueDependent())
- return;
llvm::APSInt index;
- if (!IndexExpr->isIntegerConstantExpr(index, Context))
+ if (!IndexExpr->EvaluateAsInt(index, Context))
return;
+ if (IndexNegated)
+ index = -index;
const NamedDecl *ND = NULL;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
@@ -3954,15 +4659,15 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
}
if (size.getBitWidth() > index.getBitWidth())
- index = index.sext(size.getBitWidth());
+ index = index.zext(size.getBitWidth());
else if (size.getBitWidth() < index.getBitWidth())
- size = size.sext(index.getBitWidth());
+ size = size.zext(index.getBitWidth());
// For array subscripting the index must be less than size, but for pointer
// arithmetic also allow the index (offset) to be equal to size since
// computing the next address after the end of the array is legal and
// commonly done e.g. in C++ iterators and range-based for loops.
- if (AllowOnePastEnd ? index.sle(size) : index.slt(size))
+ if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
return;
// Also don't warn for arrays of size 1 which are members of some
@@ -3971,8 +4676,22 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (IsTailPaddedMemberArray(*this, size, ND))
return;
+ // Suppress the warning if the subscript expression (as identified by the
+ // ']' location) and the index expression are both from macro expansions
+ // within a system header.
+ if (ASE) {
+ SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
+ ASE->getRBracketLoc());
+ if (SourceMgr.isInSystemHeader(RBracketLoc)) {
+ SourceLocation IndexLoc = SourceMgr.getSpellingLoc(
+ IndexExpr->getLocStart());
+ if (SourceMgr.isFromSameFile(RBracketLoc, IndexLoc))
+ return;
+ }
+ }
+
unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
- if (isSubscript)
+ if (ASE)
DiagID = diag::warn_array_index_exceeds_bounds;
DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
@@ -3982,7 +4701,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
<< IndexExpr->getSourceRange());
} else {
unsigned DiagID = diag::warn_array_index_precedes_bounds;
- if (!isSubscript) {
+ if (!ASE) {
DiagID = diag::warn_ptr_arith_precedes_bounds;
if (index.isNegative()) index = -index;
}
@@ -3992,6 +4711,17 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
<< IndexExpr->getSourceRange());
}
+ if (!ND) {
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ }
+
if (ND)
DiagRuntimeBehavior(ND->getLocStart(), BaseExpr,
PDiag(diag::note_array_index_out_of_bounds)
@@ -4005,7 +4735,7 @@ void Sema::CheckArrayAccess(const Expr *expr) {
switch (expr->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
- CheckArrayAccess(ASE->getBase(), ASE->getIdx(), true,
+ CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
AllowOnePastEnd > 0);
return;
}
@@ -4070,7 +4800,7 @@ static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
return true;
}
-static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
+static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
while (true) {
e = e->IgnoreParens();
if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
@@ -4082,22 +4812,6 @@ static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
e = cast->getSubExpr();
continue;
- case CK_GetObjCProperty: {
- // Bail out if this isn't a strong explicit property.
- const ObjCPropertyRefExpr *pre = cast->getSubExpr()->getObjCProperty();
- if (pre->isImplicitProperty()) return false;
- ObjCPropertyDecl *property = pre->getExplicitProperty();
- if (!property->isRetaining() &&
- !(property->getPropertyIvarDecl() &&
- property->getPropertyIvarDecl()->getType()
- .getObjCLifetime() == Qualifiers::OCL_Strong))
- return false;
-
- owner.Indirect = true;
- e = const_cast<Expr*>(pre->getBase());
- continue;
- }
-
default:
return false;
}
@@ -4109,7 +4823,7 @@ static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
return false;
// Try to find a retain cycle in the base.
- if (!findRetainCycleOwner(ref->getBase(), owner))
+ if (!findRetainCycleOwner(S, ref->getBase(), owner))
return false;
if (ref->isFreeIvar()) owner.setLocsFrom(ref);
@@ -4123,12 +4837,6 @@ static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
return considerVariable(var, ref, owner);
}
- if (BlockDeclRefExpr *ref = dyn_cast<BlockDeclRefExpr>(e)) {
- owner.Variable = ref->getDecl();
- owner.setLocsFrom(ref);
- return true;
- }
-
if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
if (member->isArrow()) return false;
@@ -4137,6 +4845,34 @@ static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
continue;
}
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ // Only pay attention to pseudo-objects on property references.
+ ObjCPropertyRefExpr *pre
+ = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
+ ->IgnoreParens());
+ if (!pre) return false;
+ if (pre->isImplicitProperty()) return false;
+ ObjCPropertyDecl *property = pre->getExplicitProperty();
+ if (!property->isRetaining() &&
+ !(property->getPropertyIvarDecl() &&
+ property->getPropertyIvarDecl()->getType()
+ .getObjCLifetime() == Qualifiers::OCL_Strong))
+ return false;
+
+ owner.Indirect = true;
+ if (pre->isSuperReceiver()) {
+ owner.Variable = S.getCurMethodDecl()->getSelfDecl();
+ if (!owner.Variable)
+ return false;
+ owner.Loc = pre->getLocation();
+ owner.Range = pre->getSourceRange();
+ return true;
+ }
+ e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
+ ->getSourceExpr());
+ continue;
+ }
+
// Array ivars?
return false;
@@ -4157,11 +4893,6 @@ namespace {
Capturer = ref;
}
- void VisitBlockDeclRefExpr(BlockDeclRefExpr *ref) {
- if (ref->getDecl() == Variable && !Capturer)
- Capturer = ref;
- }
-
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
if (Capturer) return;
Visit(ref->getBase());
@@ -4210,8 +4941,14 @@ static bool isSetterLikeSelector(Selector sel) {
StringRef str = sel.getNameForSlot(0);
while (!str.empty() && str.front() == '_') str = str.substr(1);
- if (str.startswith("set") || str.startswith("add"))
+ if (str.startswith("set"))
str = str.substr(3);
+ else if (str.startswith("add")) {
+ // Specially whitelist 'addOperationWithBlock:'.
+ if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
+ return false;
+ str = str.substr(3);
+ }
else
return false;
@@ -4228,7 +4965,7 @@ void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
// Try to find a variable that the receiver is strongly owned by.
RetainCycleOwner owner;
if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
- if (!findRetainCycleOwner(msg->getInstanceReceiver(), owner))
+ if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
return;
} else {
assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
@@ -4246,7 +4983,7 @@ void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
/// Check a property assign to see if it's likely to cause a retain cycle.
void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
RetainCycleOwner owner;
- if (!findRetainCycleOwner(receiver, owner))
+ if (!findRetainCycleOwner(*this, receiver, owner))
return;
if (Expr *capturer = findCapturingExpr(*this, argument, owner))
@@ -4273,7 +5010,19 @@ bool Sema::checkUnsafeAssigns(SourceLocation Loc,
void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
Expr *LHS, Expr *RHS) {
- QualType LHSType = LHS->getType();
+ QualType LHSType;
+ // PropertyRef on LHS type need be directly obtained from
+ // its declaration as it has a PsuedoType.
+ ObjCPropertyRefExpr *PRE
+ = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
+ if (PRE && !PRE->isImplicitProperty()) {
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (PD)
+ LHSType = PD->getType();
+ }
+
+ if (LHSType.isNull())
+ LHSType = LHS->getType();
if (checkUnsafeAssigns(Loc, LHSType, RHS))
return;
Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
@@ -4281,7 +5030,7 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
if (LT != Qualifiers::OCL_None)
return;
- if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(LHS)) {
+ if (PRE) {
if (PRE->isImplicitProperty())
return;
const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
@@ -4289,7 +5038,15 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
return;
unsigned Attributes = PD->getPropertyAttributes();
- if (Attributes & ObjCPropertyDecl::OBJC_PR_assign)
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
+ // when 'assign' attribute was not explicitly specified
+ // by user, ignore it and rely on property type itself
+ // for lifetime info.
+ unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
+ if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
+ LHSType->isObjCRetainableType())
+ return;
+
while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
if (cast->getCastKind() == CK_ARCConsumeObject) {
Diag(Loc, diag::warn_arc_retained_property_assign)
@@ -4298,5 +5055,132 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
}
RHS = cast->getSubExpr();
}
+ }
+ }
+}
+
+//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
+
+namespace {
+bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
+ SourceLocation StmtLoc,
+ const NullStmt *Body) {
+ // Do not warn if the body is a macro that expands to nothing, e.g:
+ //
+ // #define CALL(x)
+ // if (condition)
+ // CALL(0);
+ //
+ if (Body->hasLeadingEmptyMacro())
+ return false;
+
+ // Get line numbers of statement and body.
+ bool StmtLineInvalid;
+ unsigned StmtLine = SourceMgr.getSpellingLineNumber(StmtLoc,
+ &StmtLineInvalid);
+ if (StmtLineInvalid)
+ return false;
+
+ bool BodyLineInvalid;
+ unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
+ &BodyLineInvalid);
+ if (BodyLineInvalid)
+ return false;
+
+ // Warn if null statement and body are on the same line.
+ if (StmtLine != BodyLine)
+ return false;
+
+ return true;
+}
+} // Unnamed namespace
+
+void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
+ const Stmt *Body,
+ unsigned DiagID) {
+ // Since this is a syntactic check, don't emit diagnostic for template
+ // instantiations, this just adds noise.
+ if (CurrentInstantiationScope)
+ return;
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
+}
+
+void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
+ const Stmt *PossibleBody) {
+ assert(!CurrentInstantiationScope); // Ensured by caller
+
+ SourceLocation StmtLoc;
+ const Stmt *Body;
+ unsigned DiagID;
+ if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
+ StmtLoc = FS->getRParenLoc();
+ Body = FS->getBody();
+ DiagID = diag::warn_empty_for_body;
+ } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
+ StmtLoc = WS->getCond()->getSourceRange().getEnd();
+ Body = WS->getBody();
+ DiagID = diag::warn_empty_while_body;
+ } else
+ return; // Neither `for' nor `while'.
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Skip expensive checks if diagnostic is disabled.
+ if (Diags.getDiagnosticLevel(DiagID, NBody->getSemiLoc()) ==
+ DiagnosticsEngine::Ignored)
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ // `for(...);' and `while(...);' are popular idioms, so in order to keep
+ // noise level low, emit diagnostics only if for/while is followed by a
+ // CompoundStmt, e.g.:
+ // for (int i = 0; i < n; i++);
+ // {
+ // a(i);
+ // }
+ // or if for/while is followed by a statement with more indentation
+ // than for/while itself:
+ // for (int i = 0; i < n; i++);
+ // a(i);
+ bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
+ if (!ProbableTypo) {
+ bool BodyColInvalid;
+ unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
+ PossibleBody->getLocStart(),
+ &BodyColInvalid);
+ if (BodyColInvalid)
+ return;
+
+ bool StmtColInvalid;
+ unsigned StmtCol = SourceMgr.getPresumedColumnNumber(
+ S->getLocStart(),
+ &StmtColInvalid);
+ if (StmtColInvalid)
+ return;
+
+ if (BodyCol > StmtCol)
+ ProbableTypo = true;
+ }
+
+ if (ProbableTypo) {
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
index 405d626..1ee7532 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -20,10 +20,13 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
@@ -120,6 +123,8 @@ namespace {
/// \brief The allocator used to allocate new code-completion strings.
CodeCompletionAllocator &Allocator;
+
+ CodeCompletionTUInfo &CCTUInfo;
/// \brief If non-NULL, a filter function used to remove any code-completion
/// results that are not desirable.
@@ -163,9 +168,11 @@ namespace {
public:
explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
const CodeCompletionContext &CompletionContext,
LookupFilter Filter = 0)
- : SemaRef(SemaRef), Allocator(Allocator), Filter(Filter),
+ : SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Filter(Filter),
AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
CompletionContext(CompletionContext),
ObjCImplementation(0)
@@ -248,6 +255,8 @@ namespace {
/// \brief Retrieve the allocator used to allocate code completion strings.
CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
/// \brief Determine whether the given declaration is at all interesting
/// as a code-completion result.
@@ -271,9 +280,9 @@ namespace {
/// of the shadow maps), or replace an existing result (for, e.g., a
/// redeclaration).
///
- /// \param CurContext the result to add (if it is unique).
+ /// \param R the result to add (if it is unique).
///
- /// \param R the context in which this result will be named.
+ /// \param CurContext the context in which this result will be named.
void MaybeAddResult(Result R, DeclContext *CurContext = 0);
/// \brief Add a new result to this result set, where we already know
@@ -322,6 +331,7 @@ namespace {
bool IsMember(NamedDecl *ND) const;
bool IsObjCIvar(NamedDecl *ND) const;
bool IsObjCMessageReceiver(NamedDecl *ND) const;
+ bool IsObjCMessageReceiverOrLambdaCapture(NamedDecl *ND) const;
bool IsObjCCollection(NamedDecl *ND) const;
bool IsImpossibleToSatisfy(NamedDecl *ND) const;
//@}
@@ -510,14 +520,6 @@ bool ResultBuilder::isInterestingDecl(NamedDecl *ND,
return false;
}
}
-
- // Skip out-of-line declarations and definitions.
- // NOTE: Unless it's an Objective-C property, method, or ivar, where
- // the contexts can be messy.
- if (!ND->getDeclContext()->Equals(ND->getLexicalDeclContext()) &&
- !(isa<ObjCPropertyDecl>(ND) || isa<ObjCIvarDecl>(ND) ||
- isa<ObjCMethodDecl>(ND)))
- return false;
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
@@ -529,7 +531,7 @@ bool ResultBuilder::isInterestingDecl(NamedDecl *ND,
// Filter out any unwanted results.
if (Filter && !(this->*Filter)(ND)) {
// Check whether it is interesting as a nested-name-specifier.
- if (AllowNestedNameSpecifiers && SemaRef.getLangOptions().CPlusPlus &&
+ if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier(ND) &&
(Filter != &ResultBuilder::IsMember ||
(isa<CXXRecordDecl>(ND) &&
@@ -549,7 +551,7 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
// In C, there is no way to refer to a hidden name.
// FIXME: This isn't true; we can find a tag name hidden by an ordinary
// name if we introduce the tag type.
- if (!SemaRef.getLangOptions().CPlusPlus)
+ if (!SemaRef.getLangOpts().CPlusPlus)
return true;
DeclContext *HiddenCtx = R.Declaration->getDeclContext()->getRedeclContext();
@@ -596,8 +598,7 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
default:
return STC_Arithmetic;
}
- return STC_Other;
-
+
case Type::Complex:
return STC_Arithmetic;
@@ -729,7 +730,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
}
void ResultBuilder::MaybeAddConstructorResults(Result R) {
- if (!SemaRef.getLangOptions().CPlusPlus || !R.Declaration ||
+ if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
!CompletionContext.wantConstructorResults())
return;
@@ -901,7 +902,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
return;
-
+
// Make sure that any given declaration only shows up in the result set once.
if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()))
return;
@@ -985,9 +986,9 @@ bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
unsigned IDNS = Decl::IDNS_Ordinary;
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOptions().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC1) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1003,9 +1004,9 @@ bool ResultBuilder::IsOrdinaryNonTypeName(NamedDecl *ND) const {
return false;
unsigned IDNS = Decl::IDNS_Ordinary;
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOptions().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC1) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1030,7 +1031,7 @@ bool ResultBuilder::IsOrdinaryNonValueName(NamedDecl *ND) const {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
unsigned IDNS = Decl::IDNS_Ordinary;
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
return (ND->getIdentifierNamespace() & IDNS) &&
@@ -1132,7 +1133,7 @@ static bool isObjCReceiverType(ASTContext &C, QualType T) {
break;
}
- if (!C.getLangOptions().CPlusPlus)
+ if (!C.getLangOpts().CPlusPlus)
return false;
// FIXME: We could perform more analysis here to determine whether a
@@ -1150,9 +1151,20 @@ bool ResultBuilder::IsObjCMessageReceiver(NamedDecl *ND) const {
return isObjCReceiverType(SemaRef.Context, T);
}
+bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(NamedDecl *ND) const {
+ if (IsObjCMessageReceiver(ND))
+ return true;
+
+ VarDecl *Var = dyn_cast<VarDecl>(ND);
+ if (!Var)
+ return false;
+
+ return Var->hasLocalStorage() && !Var->hasAttr<BlocksAttr>();
+}
+
bool ResultBuilder::IsObjCCollection(NamedDecl *ND) const {
- if ((SemaRef.getLangOptions().CPlusPlus && !IsOrdinaryName(ND)) ||
- (!SemaRef.getLangOptions().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
+ if ((SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryName(ND)) ||
+ (!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
return false;
QualType T = getDeclUsageType(SemaRef.Context, ND);
@@ -1162,7 +1174,7 @@ bool ResultBuilder::IsObjCCollection(NamedDecl *ND) const {
T = SemaRef.Context.getBaseElementType(T);
return T->isObjCObjectType() || T->isObjCObjectPointerType() ||
T->isObjCIdType() ||
- (SemaRef.getLangOptions().CPlusPlus && T->isRecordType());
+ (SemaRef.getLangOpts().CPlusPlus && T->isRecordType());
}
bool ResultBuilder::IsImpossibleToSatisfy(NamedDecl *ND) const {
@@ -1189,11 +1201,9 @@ namespace {
virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
bool InBaseClass) {
bool Accessible = true;
- if (Ctx) {
- if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
- Accessible = Results.getSema().IsSimplyAccessible(ND, Class);
- // FIXME: ObjC access checks are missing.
- }
+ if (Ctx)
+ Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
+
ResultBuilder::Result Result(ND, 0, false, Accessible);
Results.AddResult(Result, CurContext, Hiding, InBaseClass);
}
@@ -1227,7 +1237,8 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("restrict", CCP_Type));
}
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
if (LangOpts.CPlusPlus) {
// C++-specific
Results.AddResult(Result("bool", CCP_Type +
@@ -1337,7 +1348,8 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
static void AddTypedefResult(ResultBuilder &Results) {
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("typedef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("type");
@@ -1372,8 +1384,72 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
case Sema::PCC_ForInit:
return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
}
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
+}
+
+static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
+ PrintingPolicy Policy = Sema::getPrintingPolicy(Context, PP);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+ Policy.SuppressUnwrittenScope = true;
+ return Policy;
+}
+
+/// \brief Retrieve a printing policy suitable for code completion.
+static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
+ return getCompletionPrintingPolicy(S.Context, S.PP);
+}
+
+/// \brief Retrieve the string representation of the given type as a string
+/// that has the appropriate lifetime for code completion.
+///
+/// This routine provides a fast path where we provide constant strings for
+/// common type names.
+static const char *GetCompletionTypeString(QualType T,
+ ASTContext &Context,
+ const PrintingPolicy &Policy,
+ CodeCompletionAllocator &Allocator) {
+ if (!T.getLocalQualifiers()) {
+ // Built-in type names are constant strings.
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
+ return BT->getName(Policy);
+
+ // Anonymous tag types are constant strings.
+ if (const TagType *TagT = dyn_cast<TagType>(T))
+ if (TagDecl *Tag = TagT->getDecl())
+ if (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl()) {
+ switch (Tag->getTagKind()) {
+ case TTK_Struct: return "struct <anonymous>";
+ case TTK_Class: return "class <anonymous>";
+ case TTK_Union: return "union <anonymous>";
+ case TTK_Enum: return "enum <anonymous>";
+ }
+ }
+ }
- return false;
+ // Slow path: format the type as a string.
+ std::string Result;
+ T.getAsStringInternal(Result, Policy);
+ return Allocator.CopyString(Result);
+}
+
+/// \brief Add a completion for "this", if we're in a member function.
+static void addThisCompletion(Sema &S, ResultBuilder &Results) {
+ QualType ThisTy = S.getCurrentThisType();
+ if (ThisTy.isNull())
+ return;
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
+ S.Context,
+ Policy,
+ Allocator));
+ Builder.AddTypedTextChunk("this");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
/// \brief Add language constructs that show up for "ordinary" names.
@@ -1381,12 +1457,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Scope *S,
Sema &SemaRef,
ResultBuilder &Results) {
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
typedef CodeCompletionResult Result;
switch (CCC) {
case Sema::PCC_Namespace:
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
if (Results.includeCodePatterns()) {
// namespace <identifier> { declarations }
Builder.AddTypedTextChunk("namespace");
@@ -1431,14 +1509,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
}
- if (SemaRef.getLangOptions().ObjC1)
+ if (SemaRef.getLangOpts().ObjC1)
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
// Fall through
case Sema::PCC_Class:
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
// Using declaration
Builder.AddTypedTextChunk("using");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1464,17 +1542,20 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// public:
Builder.AddTypedTextChunk("public");
- Builder.AddChunk(CodeCompletionString::CK_Colon);
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
// protected:
Builder.AddTypedTextChunk("protected");
- Builder.AddChunk(CodeCompletionString::CK_Colon);
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
// private:
Builder.AddTypedTextChunk("private");
- Builder.AddChunk(CodeCompletionString::CK_Colon);
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
}
}
@@ -1482,7 +1563,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
- if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns()) {
// template < parameters >
Builder.AddTypedTextChunk("template");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
@@ -1491,32 +1572,32 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result(Builder.TakeString()));
}
- AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
- AddFunctionSpecifiers(CCC, SemaRef.getLangOptions(), Results);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCInterface:
- AddObjCInterfaceResults(SemaRef.getLangOptions(), Results, true);
- AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
- AddFunctionSpecifiers(CCC, SemaRef.getLangOptions(), Results);
+ AddObjCInterfaceResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCImplementation:
- AddObjCImplementationResults(SemaRef.getLangOptions(), Results, true);
- AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
- AddFunctionSpecifiers(CCC, SemaRef.getLangOptions(), Results);
+ AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCInstanceVariableList:
- AddObjCVisibilityResults(SemaRef.getLangOptions(), Results, true);
+ AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
break;
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Statement: {
AddTypedefResult(Results);
- if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns() &&
- SemaRef.getLangOptions().CXXExceptions) {
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
+ SemaRef.getLangOpts().CXXExceptions) {
Builder.AddTypedTextChunk("try");
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
@@ -1532,14 +1613,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
- if (SemaRef.getLangOptions().ObjC1)
+ if (SemaRef.getLangOpts().ObjC1)
AddObjCStatementResults(Results, true);
if (Results.includeCodePatterns()) {
// if (condition) { statements }
Builder.AddTypedTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
@@ -1553,7 +1634,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// switch (condition) { }
Builder.AddTypedTextChunk("switch");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
@@ -1583,7 +1664,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
/// while (condition) { statements }
Builder.AddTypedTextChunk("while");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- if (SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
@@ -1609,7 +1690,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// for ( for-init-statement ; condition ; expression ) { statements }
Builder.AddTypedTextChunk("for");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- if (SemaRef.getLangOptions().CPlusPlus || SemaRef.getLangOptions().C99)
+ if (SemaRef.getLangOpts().CPlusPlus || SemaRef.getLangOpts().C99)
Builder.AddPlaceholderChunk("init-statement");
else
Builder.AddPlaceholderChunk("init-expression");
@@ -1674,11 +1755,11 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// Fall through (for statement expressions).
case Sema::PCC_ForInit:
case Sema::PCC_Condition:
- AddStorageSpecifiers(CCC, SemaRef.getLangOptions(), Results);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
// Fall through: conditions and statements can have expressions.
case Sema::PCC_ParenthesizedExpression:
- if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
CCC == Sema::PCC_ParenthesizedExpression) {
// (__bridge <type>)<expression>
Builder.AddTypedTextChunk("__bridge");
@@ -1707,17 +1788,21 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// Fall through
case Sema::PCC_Expression: {
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
// 'this', if we're in a non-static member function.
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(SemaRef.CurContext))
- if (!Method->isStatic())
- Results.AddResult(Result("this"));
+ addThisCompletion(SemaRef, Results);
- // true, false
- Results.AddResult(Result("true"));
- Results.AddResult(Result("false"));
+ // true
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("true");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // false
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("false");
+ Results.AddResult(Result(Builder.TakeString()));
- if (SemaRef.getLangOptions().RTTI) {
+ if (SemaRef.getLangOpts().RTTI) {
// dynamic_cast < type-id > ( expression )
Builder.AddTypedTextChunk("dynamic_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
@@ -1759,8 +1844,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
- if (SemaRef.getLangOptions().RTTI) {
+ if (SemaRef.getLangOpts().RTTI) {
// typeid ( expression-or-type )
+ Builder.AddResultTypeChunk("std::type_info");
Builder.AddTypedTextChunk("typeid");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression-or-type");
@@ -1790,12 +1876,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Results.AddResult(Result(Builder.TakeString()));
// delete expression
+ Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("delete");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
// delete [] expression
+ Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("delete");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
@@ -1804,30 +1892,71 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
- if (SemaRef.getLangOptions().CXXExceptions) {
+ if (SemaRef.getLangOpts().CXXExceptions) {
// throw expression
+ Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("throw");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
}
-
+
// FIXME: Rethrow?
+
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
+ // nullptr
+ Builder.AddResultTypeChunk("std::nullptr_t");
+ Builder.AddTypedTextChunk("nullptr");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // alignof
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("alignof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // noexcept
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("noexcept");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // sizeof... expression
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("sizeof...");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter-pack");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
}
- if (SemaRef.getLangOptions().ObjC1) {
+ if (SemaRef.getLangOpts().ObjC1) {
// Add "super", if we're in an Objective-C class with a superclass.
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
// The interface can be NULL.
if (ObjCInterfaceDecl *ID = Method->getClassInterface())
- if (ID->getSuperClass())
- Results.AddResult(Result("super"));
+ if (ID->getSuperClass()) {
+ std::string SuperType;
+ SuperType = ID->getSuperClass()->getNameAsString();
+ if (Method->isInstanceMethod())
+ SuperType += " *";
+
+ Builder.AddResultTypeChunk(Allocator.CopyString(SuperType));
+ Builder.AddTypedTextChunk("super");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
}
AddObjCExpressionResults(Results, true);
}
// sizeof expression
+ Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("sizeof");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression-or-type");
@@ -1841,54 +1970,13 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
break;
}
- if (WantTypesInContext(CCC, SemaRef.getLangOptions()))
- AddTypeSpecifierResults(SemaRef.getLangOptions(), Results);
+ if (WantTypesInContext(CCC, SemaRef.getLangOpts()))
+ AddTypeSpecifierResults(SemaRef.getLangOpts(), Results);
- if (SemaRef.getLangOptions().CPlusPlus && CCC != Sema::PCC_Type)
+ if (SemaRef.getLangOpts().CPlusPlus && CCC != Sema::PCC_Type)
Results.AddResult(Result("operator"));
}
-/// \brief Retrieve a printing policy suitable for code completion.
-static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
- PrintingPolicy Policy = S.getPrintingPolicy();
- Policy.AnonymousTagLocations = false;
- Policy.SuppressStrongLifetime = true;
- return Policy;
-}
-
-/// \brief Retrieve the string representation of the given type as a string
-/// that has the appropriate lifetime for code completion.
-///
-/// This routine provides a fast path where we provide constant strings for
-/// common type names.
-static const char *GetCompletionTypeString(QualType T,
- ASTContext &Context,
- const PrintingPolicy &Policy,
- CodeCompletionAllocator &Allocator) {
- if (!T.getLocalQualifiers()) {
- // Built-in type names are constant strings.
- if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
- return BT->getName(Policy);
-
- // Anonymous tag types are constant strings.
- if (const TagType *TagT = dyn_cast<TagType>(T))
- if (TagDecl *Tag = TagT->getDecl())
- if (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl()) {
- switch (Tag->getTagKind()) {
- case TTK_Struct: return "struct <anonymous>";
- case TTK_Class: return "class <anonymous>";
- case TTK_Union: return "union <anonymous>";
- case TTK_Enum: return "enum <anonymous>";
- }
- }
- }
-
- // Slow path: format the type as a string.
- std::string Result;
- T.getAsStringInternal(Result, Policy);
- return Allocator.CopyString(Result);
-}
-
/// \brief If the given declaration has an associated type, add it as a result
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
@@ -1931,7 +2019,7 @@ static void MaybeAddSentinel(ASTContext &Context, NamedDecl *FunctionOrMethod,
CodeCompletionBuilder &Result) {
if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
if (Sentinel->getSentinel() == 0) {
- if (Context.getLangOptions().ObjC1 &&
+ if (Context.getLangOpts().ObjC1 &&
Context.Idents.get("nil").hasMacroDefinition())
Result.AddTextChunk(", nil");
else if (Context.Idents.get("NULL").hasMacroDefinition())
@@ -1941,32 +2029,28 @@ static void MaybeAddSentinel(ASTContext &Context, NamedDecl *FunctionOrMethod,
}
}
-static void appendWithSpace(std::string &Result, StringRef Text) {
- if (!Result.empty())
- Result += ' ';
- Result += Text.str();
-}
static std::string formatObjCParamQualifiers(unsigned ObjCQuals) {
std::string Result;
if (ObjCQuals & Decl::OBJC_TQ_In)
- appendWithSpace(Result, "in");
+ Result += "in ";
else if (ObjCQuals & Decl::OBJC_TQ_Inout)
- appendWithSpace(Result, "inout");
+ Result += "inout ";
else if (ObjCQuals & Decl::OBJC_TQ_Out)
- appendWithSpace(Result, "out");
+ Result += "out ";
if (ObjCQuals & Decl::OBJC_TQ_Bycopy)
- appendWithSpace(Result, "bycopy");
+ Result += "bycopy ";
else if (ObjCQuals & Decl::OBJC_TQ_Byref)
- appendWithSpace(Result, "byref");
+ Result += "byref ";
if (ObjCQuals & Decl::OBJC_TQ_Oneway)
- appendWithSpace(Result, "oneway");
+ Result += "oneway ";
return Result;
}
static std::string FormatFunctionParameter(ASTContext &Context,
const PrintingPolicy &Policy,
ParmVarDecl *Param,
- bool SuppressName = false) {
+ bool SuppressName = false,
+ bool SuppressBlock = false) {
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -1997,20 +2081,22 @@ static std::string FormatFunctionParameter(ASTContext &Context,
TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
while (true) {
// Look through typedefs.
- if (TypedefTypeLoc *TypedefTL = dyn_cast<TypedefTypeLoc>(&TL)) {
- if (TypeSourceInfo *InnerTSInfo
- = TypedefTL->getTypedefNameDecl()->getTypeSourceInfo()) {
- TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
+ if (!SuppressBlock) {
+ if (TypedefTypeLoc *TypedefTL = dyn_cast<TypedefTypeLoc>(&TL)) {
+ if (TypeSourceInfo *InnerTSInfo
+ = TypedefTL->getTypedefNameDecl()->getTypeSourceInfo()) {
+ TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ }
+
+ // Look through qualified types
+ if (QualifiedTypeLoc *QualifiedTL = dyn_cast<QualifiedTypeLoc>(&TL)) {
+ TL = QualifiedTL->getUnqualifiedLoc();
continue;
}
}
- // Look through qualified types
- if (QualifiedTypeLoc *QualifiedTL = dyn_cast<QualifiedTypeLoc>(&TL)) {
- TL = QualifiedTL->getUnqualifiedLoc();
- continue;
- }
-
// Try to get the function prototype behind the block pointer type,
// then we're done.
if (BlockPointerTypeLoc *BlockPtr
@@ -2027,6 +2113,9 @@ static std::string FormatFunctionParameter(ASTContext &Context,
// We were unable to find a FunctionProtoTypeLoc with parameter names
// for the block; just use the parameter type as a placeholder.
std::string Result;
+ if (!ObjCMethodParam && Param->getIdentifier())
+ Result = Param->getIdentifier()->getName();
+
Param->getType().getUnqualifiedType().getAsStringInternal(Result, Policy);
if (ObjCMethodParam) {
@@ -2038,36 +2127,52 @@ static std::string FormatFunctionParameter(ASTContext &Context,
return Result;
}
-
+
// We have the function prototype behind the block pointer type, as it was
// written in the source.
std::string Result;
QualType ResultType = Block->getTypePtr()->getResultType();
- if (!ResultType->isVoidType())
+ if (!ResultType->isVoidType() || SuppressBlock)
ResultType.getAsStringInternal(Result, Policy);
-
- Result = '^' + Result;
+
+ // Format the parameter list.
+ std::string Params;
if (!BlockProto || Block->getNumArgs() == 0) {
if (BlockProto && BlockProto->getTypePtr()->isVariadic())
- Result += "(...)";
+ Params = "(...)";
else
- Result += "(void)";
+ Params = "(void)";
} else {
- Result += "(";
+ Params += "(";
for (unsigned I = 0, N = Block->getNumArgs(); I != N; ++I) {
if (I)
- Result += ", ";
- Result += FormatFunctionParameter(Context, Policy, Block->getArg(I));
+ Params += ", ";
+ Params += FormatFunctionParameter(Context, Policy, Block->getArg(I),
+ /*SuppressName=*/false,
+ /*SuppressBlock=*/true);
if (I == N - 1 && BlockProto->getTypePtr()->isVariadic())
- Result += ", ...";
+ Params += ", ...";
}
+ Params += ")";
+ }
+
+ if (SuppressBlock) {
+ // Format as a parameter.
+ Result = Result + " (^";
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
Result += ")";
+ Result += Params;
+ } else {
+ // Format as a block literal argument.
+ Result = '^' + Result;
+ Result += Params;
+
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
}
- if (Param->getIdentifier())
- Result += Param->getIdentifier()->getName();
-
return Result;
}
@@ -2078,7 +2183,6 @@ static void AddFunctionParameterChunks(ASTContext &Context,
CodeCompletionBuilder &Result,
unsigned Start = 0,
bool InOptional = false) {
- typedef CodeCompletionString::Chunk Chunk;
bool FirstParameter = true;
for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
@@ -2087,9 +2191,10 @@ static void AddFunctionParameterChunks(ASTContext &Context,
if (Param->hasDefaultArg() && !InOptional) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
- CodeCompletionBuilder Opt(Result.getAllocator());
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
if (!FirstParameter)
- Opt.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
AddFunctionParameterChunks(Context, Policy, Function, Opt, P, true);
Result.AddOptionalChunk(Opt.TakeString());
break;
@@ -2098,7 +2203,7 @@ static void AddFunctionParameterChunks(ASTContext &Context,
if (FirstParameter)
FirstParameter = false;
else
- Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(CodeCompletionString::CK_Comma);
InOptional = false;
@@ -2132,7 +2237,6 @@ static void AddTemplateParameterChunks(ASTContext &Context,
unsigned MaxParameters = 0,
unsigned Start = 0,
bool InDefaultArg = false) {
- typedef CodeCompletionString::Chunk Chunk;
bool FirstParameter = true;
TemplateParameterList *Params = Template->getTemplateParameters();
@@ -2179,9 +2283,10 @@ static void AddTemplateParameterChunks(ASTContext &Context,
if (HasDefaultArg && !InDefaultArg) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
- CodeCompletionBuilder Opt(Result.getAllocator());
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
if (!FirstParameter)
- Opt.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
AddTemplateParameterChunks(Context, Policy, Template, Opt, MaxParameters,
P - Params->begin(), true);
Result.AddOptionalChunk(Opt.TakeString());
@@ -2193,7 +2298,7 @@ static void AddTemplateParameterChunks(ASTContext &Context,
if (FirstParameter)
FirstParameter = false;
else
- Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(CodeCompletionString::CK_Comma);
// Add the placeholder string.
Result.AddPlaceholderChunk(
@@ -2263,8 +2368,6 @@ AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
/// \brief Add the name of the given declaration
static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
NamedDecl *ND, CodeCompletionBuilder &Result) {
- typedef CodeCompletionString::Chunk Chunk;
-
DeclarationName Name = ND->getDeclName();
if (!Name)
return;
@@ -2326,15 +2429,21 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Record->getNameAsString()));
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Context, Policy, Template, Result);
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
}
break;
}
}
}
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) {
+ return CreateCodeCompletionString(S.Context, S.PP, Allocator, CCTUInfo);
+}
+
/// \brief If possible, create a new code completion string for the given
/// result.
///
@@ -2342,15 +2451,23 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
/// how to use this result, or NULL to indicate that the string or name of the
/// result is all that is needed.
CodeCompletionString *
-CodeCompletionResult::CreateCodeCompletionString(Sema &S,
- CodeCompletionAllocator &Allocator) {
- typedef CodeCompletionString::Chunk Chunk;
- CodeCompletionBuilder Result(Allocator, Priority, Availability);
+CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
+ Preprocessor &PP,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) {
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
- PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
if (Kind == RK_Pattern) {
Pattern->Priority = Priority;
Pattern->Availability = Availability;
+
+ if (Declaration) {
+ Result.addParentContext(Declaration->getDeclContext());
+ Pattern->ParentKind = Result.getParentKind();
+ Pattern->ParentName = Result.getParentName();
+ }
+
return Pattern;
}
@@ -2360,7 +2477,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
}
if (Kind == RK_Macro) {
- MacroInfo *MI = S.PP.getMacroInfo(Macro);
+ MacroInfo *MI = PP.getMacroInfo(Macro);
assert(MI && "Not a macro?");
Result.AddTypedTextChunk(
@@ -2370,54 +2487,44 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
return Result.TakeString();
// Format a function-like macro with placeholders for the arguments.
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
- bool CombineVariadicArgument = false;
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
MacroInfo::arg_iterator A = MI->arg_begin(), AEnd = MI->arg_end();
- if (MI->isVariadic() && AEnd - A > 1) {
- AEnd -= 2;
- CombineVariadicArgument = true;
+
+ // C99 variadic macros add __VA_ARGS__ at the end. Skip it.
+ if (MI->isC99Varargs()) {
+ --AEnd;
+
+ if (A == AEnd) {
+ Result.AddPlaceholderChunk("...");
+ }
}
+
for (MacroInfo::arg_iterator A = MI->arg_begin(); A != AEnd; ++A) {
if (A != MI->arg_begin())
- Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
-
- if (!MI->isVariadic() || A + 1 != AEnd) {
- // Non-variadic argument.
- Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString((*A)->getName()));
- continue;
- }
-
- // Variadic argument; cope with the difference between GNU and C99
- // variadic macros, providing a single placeholder for the rest of the
- // arguments.
- if ((*A)->isStr("__VA_ARGS__"))
- Result.AddPlaceholderChunk("...");
- else {
- std::string Arg = (*A)->getName();
- Arg += "...";
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ if (MI->isVariadic() && (A+1) == AEnd) {
+ SmallString<32> Arg = (*A)->getName();
+ if (MI->isC99Varargs())
+ Arg += ", ...";
+ else
+ Arg += "...";
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ break;
}
+
+ // Non-variadic macros are simple.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString((*A)->getName()));
}
-
- if (CombineVariadicArgument) {
- // Handle the next-to-last argument, combining it with the variadic
- // argument.
- std::string LastArg = (*A)->getName();
- ++A;
- if ((*A)->isStr("__VA_ARGS__"))
- LastArg += ", ...";
- else
- LastArg += ", " + (*A)->getName().str() + "...";
- Result.AddPlaceholderChunk(Result.getAllocator().CopyString(LastArg));
- }
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
assert(Kind == RK_Declaration && "Missed a result kind?");
NamedDecl *ND = Declaration;
-
+ Result.addParentContext(ND->getDeclContext());
+
if (StartsNestedNameSpecifier) {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
@@ -2431,29 +2538,29 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
}
}
- AddResultTypeChunk(S.Context, Policy, ND, Result);
+ AddResultTypeChunk(Ctx, Policy, ND, Result);
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
- S.Context, Policy);
- AddTypedNameChunk(S.Context, Policy, ND, Result);
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
- AddFunctionParameterChunks(S.Context, Policy, Function, Result);
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Ctx, Policy);
+ AddTypedNameChunk(Ctx, Policy, ND, Result);
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(Ctx, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
- S.Context, Policy);
+ Ctx, Policy);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
- AddTypedNameChunk(S.Context, Policy, Function, Result);
+ AddTypedNameChunk(Ctx, Policy, Function, Result);
// Figure out which template parameters are deduced (or have default
// arguments).
- SmallVector<bool, 16> Deduced;
- S.MarkDeducedTemplateParameters(FunTmpl, Deduced);
+ llvm::SmallBitVector Deduced;
+ Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
unsigned LastDeducibleArgument;
for (LastDeducibleArgument = Deduced.size(); LastDeducibleArgument > 0;
--LastDeducibleArgument) {
@@ -2484,28 +2591,28 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
// Some of the function template arguments cannot be deduced from a
// function call, so we introduce an explicit template argument list
// containing all of the arguments up to the first deducible argument.
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
- AddTemplateParameterChunks(S.Context, Policy, FunTmpl, Result,
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
LastDeducibleArgument);
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
}
// Add the function parameters
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
- AddFunctionParameterChunks(S.Context, Policy, Function, Result);
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(Ctx, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
- S.Context, Policy);
+ Ctx, Policy);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Template->getNameAsString()));
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftAngle));
- AddTemplateParameterChunks(S.Context, Policy, Template, Result);
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightAngle));
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, Template, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
return Result.TakeString();
}
@@ -2553,7 +2660,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
std::string Arg;
if ((*P)->getType()->isBlockPointerType() && !DeclaringEntity)
- Arg = FormatFunctionParameter(S.Context, Policy, *P, true);
+ Arg = FormatFunctionParameter(Ctx, Policy, *P, true);
else {
(*P)->getType().getAsStringInternal(Arg, Policy);
Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier())
@@ -2584,7 +2691,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
Result.AddPlaceholderChunk(", ...");
}
- MaybeAddSentinel(S.Context, Method, Result);
+ MaybeAddSentinel(Ctx, Method, Result);
}
return Result.TakeString();
@@ -2592,7 +2699,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
if (Qualifier)
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
- S.Context, Policy);
+ Ctx, Policy);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
@@ -2603,12 +2710,12 @@ CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
unsigned CurrentArg,
Sema &S,
- CodeCompletionAllocator &Allocator) const {
- typedef CodeCompletionString::Chunk Chunk;
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// FIXME: Set priority, availability appropriately.
- CodeCompletionBuilder Result(Allocator, 1, CXAvailability_Available);
+ CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
AddResultTypeChunk(S.Context, Policy, FDecl, Result);
const FunctionProtoType *Proto
@@ -2620,9 +2727,9 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
Result.AddTextChunk(GetCompletionTypeString(FT->getResultType(),
S.Context, Policy,
Result.getAllocator()));
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
- Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
@@ -2634,11 +2741,11 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
Result.getAllocator().CopyString(
Proto->getResultType().getAsString(Policy)));
- Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
unsigned NumParams = FDecl? FDecl->getNumParams() : Proto->getNumArgs();
for (unsigned I = 0; I != NumParams; ++I) {
if (I)
- Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(CodeCompletionString::CK_Comma);
std::string ArgString;
QualType ArgType;
@@ -2653,20 +2760,20 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
ArgType.getAsStringInternal(ArgString, Policy);
if (I == CurrentArg)
- Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter,
- Result.getAllocator().CopyString(ArgString)));
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter,
+ Result.getAllocator().CopyString(ArgString));
else
Result.AddTextChunk(Result.getAllocator().CopyString(ArgString));
}
if (Proto && Proto->isVariadic()) {
- Result.AddChunk(Chunk(CodeCompletionString::CK_Comma));
+ Result.AddChunk(CodeCompletionString::CK_Comma);
if (CurrentArg < NumParams)
Result.AddTextChunk("...");
else
- Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter, "..."));
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
}
- Result.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
@@ -2707,13 +2814,8 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) {
return CXCursor_FunctionDecl;
case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
- case Decl::ObjCClass:
- // FIXME
- return CXCursor_UnexposedDecl;
- case Decl::ObjCForwardProtocol:
- // FIXME
- return CXCursor_UnexposedDecl;
case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
+
case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
case Decl::ObjCMethod:
@@ -2754,7 +2856,6 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) {
case ObjCPropertyImplDecl::Synthesize:
return CXCursor_ObjCSynthesizeDecl;
}
- break;
default:
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -2781,7 +2882,7 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
M != MEnd; ++M) {
Results.AddResult(Result(M->first,
getMacroUsagePriority(M->first->getName(),
- PP.getLangOptions(),
+ PP.getLangOpts(),
TargetTypeIsPointer)));
}
@@ -2833,17 +2934,16 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_MemberTemplate:
if (S.CurContext->isFileContext())
return CodeCompletionContext::CCC_TopLevel;
- else if (S.CurContext->isRecord())
+ if (S.CurContext->isRecord())
return CodeCompletionContext::CCC_ClassStructUnion;
- else
- return CodeCompletionContext::CCC_Other;
+ return CodeCompletionContext::CCC_Other;
case Sema::PCC_RecoveryInFunction:
return CodeCompletionContext::CCC_Recovery;
case Sema::PCC_ForInit:
- if (S.getLangOptions().CPlusPlus || S.getLangOptions().C99 ||
- S.getLangOptions().ObjC1)
+ if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
+ S.getLangOpts().ObjC1)
return CodeCompletionContext::CCC_ParenthesizedExpression;
else
return CodeCompletionContext::CCC_Expression;
@@ -2864,8 +2964,8 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_LocalDeclarationSpecifiers:
return CodeCompletionContext::CCC_Type;
}
-
- return CodeCompletionContext::CCC_Other;
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
}
/// \brief If we're in a C++ virtual member function, add completion results
@@ -2902,7 +3002,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
MEnd = Method->end_overridden_methods();
M != MEnd; ++M) {
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
CXXMethodDecl *Overridden = const_cast<CXXMethodDecl *>(*M);
if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
continue;
@@ -2939,15 +3040,70 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
CCP_SuperCompletion,
- CXCursor_CXXMethod));
+ CXCursor_CXXMethod,
+ CXAvailability_Available,
+ Overridden));
Results.Ignore(Overridden);
}
}
+void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ typedef CodeCompletionResult Result;
+ if (Path.empty()) {
+ // Enumerate all top-level modules.
+ llvm::SmallVector<Module *, 8> Modules;
+ PP.getHeaderSearchInfo().collectAllModules(Modules);
+ for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Modules[I]->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_NotImplemented,
+ Modules[I]->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ } else {
+ // Load the named module.
+ Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ // Enumerate submodules.
+ if (Mod) {
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString((*Sub)->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_NotImplemented,
+ (*Sub)->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ }
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
void Sema::CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, CompletionContext));
Results.EnterNewScope();
@@ -2972,12 +3128,12 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_Expression:
case PCC_ForInit:
case PCC_Condition:
- if (WantTypesInContext(CompletionContext, getLangOptions()))
+ if (WantTypesInContext(CompletionContext, getLangOpts()))
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
MaybeAddOverrideCalls(*this, /*InContext=*/0, Results);
break;
@@ -3006,7 +3162,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_Statement:
case PCC_RecoveryInFunction:
if (S->getFnParent())
- AddPrettyFunctionResults(PP.getLangOptions(), Results);
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
break;
case PCC_Namespace:
@@ -3043,6 +3199,7 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
AllowNestedNameSpecifiers
? CodeCompletionContext::CCC_PotentiallyQualifiedName
: CodeCompletionContext::CCC_Name);
@@ -3051,10 +3208,10 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
// Type qualifiers can come after names.
Results.AddResult(Result("const"));
Results.AddResult(Result("volatile"));
- if (getLangOptions().C99)
+ if (getLangOpts().C99)
Results.AddResult(Result("restrict"));
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (AllowNonIdentifiers) {
Results.AddResult(Result("operator"));
}
@@ -3120,12 +3277,13 @@ void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
if (Data.ObjCCollection)
Results.setFilter(&ResultBuilder::IsObjCCollection);
else if (Data.IntegralConstantExpression)
Results.setFilter(&ResultBuilder::IsIntegralConstantValue);
- else if (WantTypesInContext(PCC_Expression, getLangOptions()))
+ else if (WantTypesInContext(PCC_Expression, getLangOpts()))
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
@@ -3154,7 +3312,7 @@ void Sema::CodeCompleteExpression(Scope *S,
if (S->getFnParent() &&
!Data.ObjCCollection &&
!Data.IntegralConstantExpression)
- AddPrettyFunctionResults(PP.getLangOptions(), Results);
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, PreferredTypeIsPointer);
@@ -3167,7 +3325,7 @@ void Sema::CodeCompleteExpression(Scope *S,
void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
if (E.isInvalid())
CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
- else if (getLangOptions().ObjC1)
+ else if (getLangOpts().ObjC1)
CodeCompleteObjCInstanceMessage(S, E.take(), 0, 0, false);
}
@@ -3202,33 +3360,14 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
if (M->getSelector().isUnarySelector())
if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
if (AddedProperties.insert(Name)) {
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
AddResultTypeChunk(Context, Policy, *M, Builder);
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(Name->getName()));
- CXAvailabilityKind Availability = CXAvailability_Available;
- switch (M->getAvailability()) {
- case AR_Available:
- case AR_NotYetIntroduced:
- Availability = CXAvailability_Available;
- break;
-
- case AR_Deprecated:
- Availability = CXAvailability_Deprecated;
- break;
-
- case AR_Unavailable:
- Availability = CXAvailability_NotAvailable;
- break;
- }
-
- Results.MaybeAddResult(Result(Builder.TakeString(),
- CCP_MemberDeclaration + CCD_MethodAsProperty,
- M->isInstanceMethod()
- ? CXCursor_ObjCInstanceMethodDecl
- : CXCursor_ObjCClassMethodDecl,
- Availability),
+ Results.MaybeAddResult(Result(Builder.TakeString(), *M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty),
CurContext);
}
}
@@ -3274,15 +3413,19 @@ static void AddObjCProperties(ObjCContainerDecl *Container,
}
}
-void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *BaseE,
+void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow) {
- if (!BaseE || !CodeCompleter)
+ if (!Base || !CodeCompleter)
return;
+ ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ if (ConvertedBase.isInvalid())
+ return;
+ Base = ConvertedBase.get();
+
typedef CodeCompletionResult Result;
- Expr *Base = static_cast<Expr *>(BaseE);
QualType BaseType = Base->getType();
if (IsArrow) {
@@ -3310,6 +3453,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *BaseE,
}
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(contextKind,
BaseType),
&ResultBuilder::IsMember);
@@ -3325,7 +3469,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *BaseE,
LookupVisibleDecls(Record->getDecl(), LookupMemberName, Consumer,
CodeCompleter->includeGlobals());
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (!Results.empty()) {
// The "template" keyword can follow "->" or "." in the grammar.
// However, we only want to suggest the template keyword if something
@@ -3419,7 +3563,8 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
}
- ResultBuilder Results(*this, CodeCompleter->getAllocator(), ContextKind);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
// First pass: look for tags.
@@ -3439,13 +3584,14 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_TypeQualifiers);
Results.EnterNewScope();
if (!(DS.getTypeQualifiers() & DeclSpec::TQ_const))
Results.AddResult("const");
if (!(DS.getTypeQualifiers() & DeclSpec::TQ_volatile))
Results.AddResult("volatile");
- if (getLangOptions().C99 &&
+ if (getLangOpts().C99 &&
!(DS.getTypeQualifiers() & DeclSpec::TQ_restrict))
Results.AddResult("restrict");
Results.ExitScope();
@@ -3510,18 +3656,16 @@ void Sema::CodeCompleteCase(Scope *S) {
}
}
- if (getLangOptions().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
+ if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
// If there are no prior enumerators in C++, check whether we have to
// qualify the names of the enumerators that we suggest, because they
// may not be visible in this scope.
- Qualifier = getRequiredQualification(Context, CurContext,
- Enum->getDeclContext());
-
- // FIXME: Scoped enums need to start with "EnumDecl" as the context!
+ Qualifier = getRequiredQualification(Context, CurContext, Enum);
}
// Add any enumerators that have not yet been mentioned.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
Results.EnterNewScope();
for (EnumDecl::enumerator_iterator E = Enum->enumerator_begin(),
@@ -3544,7 +3688,6 @@ void Sema::CodeCompleteCase(Scope *S) {
kind = CodeCompletionContext::CCC_OtherWithMacros;
}
-
HandleCodeCompleteResults(this, CodeCompleter,
kind,
Results.data(),Results.size());
@@ -3566,19 +3709,19 @@ namespace {
};
}
-static bool anyNullArguments(Expr **Args, unsigned NumArgs) {
- if (NumArgs && !Args)
+static bool anyNullArguments(llvm::ArrayRef<Expr*> Args) {
+ if (Args.size() && !Args.data())
return true;
-
- for (unsigned I = 0; I != NumArgs; ++I)
+
+ for (unsigned I = 0; I != Args.size(); ++I)
if (!Args[I])
return true;
-
+
return false;
}
void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
- Expr **ArgsIn, unsigned NumArgs) {
+ llvm::ArrayRef<Expr *> Args) {
if (!CodeCompleter)
return;
@@ -3588,11 +3731,10 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
// e.g., by merging the two kinds of results.
Expr *Fn = (Expr *)FnIn;
- Expr **Args = (Expr **)ArgsIn;
// Ignore type-dependent call expressions entirely.
- if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args, NumArgs) ||
- Expr::hasAnyTypeDependentArguments(Args, NumArgs)) {
+ if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
+ Expr::hasAnyTypeDependentArguments(Args)) {
CodeCompleteOrdinaryName(S, PCC_Expression);
return;
}
@@ -3610,19 +3752,18 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
Expr *NakedFn = Fn->IgnoreParenCasts();
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn))
- AddOverloadedCallCandidates(ULE, Args, NumArgs, CandidateSet,
+ AddOverloadedCallCandidates(ULE, Args, CandidateSet,
/*PartialOverloading=*/ true);
else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(NakedFn)) {
FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FDecl) {
- if (!getLangOptions().CPlusPlus ||
+ if (!getLangOpts().CPlusPlus ||
!FDecl->getType()->getAs<FunctionProtoType>())
Results.push_back(ResultCandidate(FDecl));
else
// FIXME: access?
- AddOverloadCandidate(FDecl, DeclAccessPair::make(FDecl, AS_none),
- Args, NumArgs, CandidateSet,
- false, /*PartialOverloading*/true);
+ AddOverloadCandidate(FDecl, DeclAccessPair::make(FDecl, AS_none), Args,
+ CandidateSet, false, /*PartialOverloading*/true);
}
}
@@ -3645,12 +3786,12 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
for (unsigned I = 0, N = Results.size(); I != N; ++I) {
if (const FunctionType *FType = Results[I].getFunctionType())
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType))
- if (NumArgs < Proto->getNumArgs()) {
+ if (Args.size() < Proto->getNumArgs()) {
if (ParamType.isNull())
- ParamType = Proto->getArgType(NumArgs);
+ ParamType = Proto->getArgType(Args.size());
else if (!Context.hasSameUnqualifiedType(
ParamType.getNonReferenceType(),
- Proto->getArgType(NumArgs).getNonReferenceType())) {
+ Proto->getArgType(Args.size()).getNonReferenceType())) {
ParamType = QualType();
break;
}
@@ -3671,8 +3812,8 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
if (const FunctionProtoType *Proto
= FunctionType->getAs<FunctionProtoType>()) {
- if (NumArgs < Proto->getNumArgs())
- ParamType = Proto->getArgType(NumArgs);
+ if (Args.size() < Proto->getNumArgs())
+ ParamType = Proto->getArgType(Args.size());
}
}
@@ -3682,7 +3823,7 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
CodeCompleteExpression(S, ParamType);
if (!Results.empty())
- CodeCompleter->ProcessOverloadCandidates(*this, NumArgs, Results.data(),
+ CodeCompleter->ProcessOverloadCandidates(*this, Args.size(), Results.data(),
Results.size());
}
@@ -3715,6 +3856,7 @@ void Sema::CodeCompleteReturn(Scope *S) {
void Sema::CodeCompleteAfterIf(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, PCC_Statement));
Results.setFilter(&ResultBuilder::IsOrdinaryName);
Results.EnterNewScope();
@@ -3726,14 +3868,17 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
// "else" block
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("else");
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
Results.AddResult(Builder.TakeString());
// "else if" block
@@ -3742,23 +3887,25 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
Builder.AddTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddPlaceholderChunk("statements");
- Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
- Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
Results.AddResult(Builder.TakeString());
Results.ExitScope();
if (S->getFnParent())
- AddPrettyFunctionResults(PP.getLangOptions(), Results);
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results);
@@ -3789,6 +3936,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
@@ -3820,6 +3968,7 @@ void Sema::CodeCompleteUsing(Scope *S) {
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
@@ -3847,6 +3996,7 @@ void Sema::CodeCompleteUsingDirective(Scope *S) {
// After "using namespace", we expect to see a namespace name or namespace
// alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
Results.EnterNewScope();
@@ -3871,6 +4021,7 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
= Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
? CodeCompletionContext::CCC_Namespace
: CodeCompletionContext::CCC_Other,
@@ -3891,7 +4042,8 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
// namespace to the list of results.
Results.EnterNewScope();
for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
- NS = OrigToLatest.begin(), NSEnd = OrigToLatest.end();
+ NS = OrigToLatest.begin(),
+ NSEnd = OrigToLatest.end();
NS != NSEnd; ++NS)
Results.AddResult(CodeCompletionResult(NS->second, 0),
CurContext, 0, false);
@@ -3909,6 +4061,7 @@ void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
// After "namespace", we expect to see a namespace or alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
@@ -3925,6 +4078,7 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type,
&ResultBuilder::IsType);
Results.EnterNewScope();
@@ -3942,7 +4096,7 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
CodeCompleter->includeGlobals());
// Add any type specifiers
- AddTypeSpecifierResults(getLangOptions(), Results);
+ AddTypeSpecifierResults(getLangOpts(), Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
@@ -3960,6 +4114,7 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName);
Results.EnterNewScope();
@@ -3976,7 +4131,8 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
}
// Add completions for base classes.
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
bool SawLastInitializer = (NumInitializers == 0);
CXXRecordDecl *ClassDecl = Constructor->getParent();
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
@@ -4051,7 +4207,9 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration,
- CXCursor_MemberRef));
+ CXCursor_MemberRef,
+ CXAvailability_Available,
+ *Field));
SawLastInitializer = false;
}
Results.ExitScope();
@@ -4060,6 +4218,61 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
Results.data(), Results.size());
}
+/// \brief Determine whether this scope denotes a namespace.
+static bool isNamespaceScope(Scope *S) {
+ DeclContext *DC = static_cast<DeclContext *>(S->getEntity());
+ if (!DC)
+ return false;
+
+ return DC->isFileContext();
+}
+
+void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
+ bool AfterAmpersand) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ // Note what has already been captured.
+ llvm::SmallPtrSet<IdentifierInfo *, 4> Known;
+ bool IncludedThis = false;
+ for (SmallVectorImpl<LambdaCapture>::iterator C = Intro.Captures.begin(),
+ CEnd = Intro.Captures.end();
+ C != CEnd; ++C) {
+ if (C->Kind == LCK_This) {
+ IncludedThis = true;
+ continue;
+ }
+
+ Known.insert(C->Id);
+ }
+
+ // Look for other capturable variables.
+ for (; S && !isNamespaceScope(S); S = S->getParent()) {
+ for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D) {
+ VarDecl *Var = dyn_cast<VarDecl>(*D);
+ if (!Var ||
+ !Var->hasLocalStorage() ||
+ Var->hasAttr<BlocksAttr>())
+ continue;
+
+ if (Known.insert(Var->getIdentifier()))
+ Results.AddResult(CodeCompletionResult(Var), CurContext, 0, false);
+ }
+ }
+
+ // Add 'this', if it would be valid.
+ if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
+ addThisCompletion(*this, Results);
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
// Macro that expands to @Keyword or Keyword, depending on whether NeedAt is
// true or false.
#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) NeedAt? "@" #Keyword : #Keyword
@@ -4070,7 +4283,8 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
// Since we have an implementation, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
if (LangOpts.ObjC2) {
// @dynamic
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,dynamic));
@@ -4108,7 +4322,8 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
// @class name ;
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
@@ -4150,12 +4365,13 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
void Sema::CodeCompleteObjCAtDirective(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (isa<ObjCImplDecl>(CurContext))
- AddObjCImplementationResults(getLangOptions(), Results, false);
+ AddObjCImplementationResults(getLangOpts(), Results, false);
else if (CurContext->isObjCContainer())
- AddObjCInterfaceResults(getLangOptions(), Results, false);
+ AddObjCInterfaceResults(getLangOpts(), Results, false);
else
AddObjCTopLevelResults(Results, false);
Results.ExitScope();
@@ -4166,9 +4382,15 @@ void Sema::CodeCompleteObjCAtDirective(Scope *S) {
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
// @encode ( type-name )
+ const char *EncodeType = "char[]";
+ if (Results.getSema().getLangOpts().CPlusPlus ||
+ Results.getSema().getLangOpts().ConstStrings)
+ EncodeType = " const char[]";
+ Builder.AddResultTypeChunk(EncodeType);
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,encode));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type-name");
@@ -4176,6 +4398,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
Results.AddResult(Result(Builder.TakeString()));
// @protocol ( protocol-name )
+ Builder.AddResultTypeChunk("Protocol *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("protocol-name");
@@ -4183,16 +4406,38 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
Results.AddResult(Result(Builder.TakeString()));
// @selector ( selector )
+ Builder.AddResultTypeChunk("SEL");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,selector));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
+
+ // @[ objects, ... ]
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,[));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("objects, ...");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @{ key : object, ... }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,{));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("key");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("object, ...");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
}
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
@@ -4248,9 +4493,10 @@ static void AddObjCVisibilityResults(const LangOptions &LangOpts,
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
- AddObjCVisibilityResults(getLangOptions(), Results, false);
+ AddObjCVisibilityResults(getLangOpts(), Results, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
@@ -4259,6 +4505,7 @@ void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
void Sema::CodeCompleteObjCAtStatement(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCStatementResults(Results, false);
@@ -4271,6 +4518,7 @@ void Sema::CodeCompleteObjCAtStatement(Scope *S) {
void Sema::CodeCompleteObjCAtExpression(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCExpressionResults(Results, false);
@@ -4324,6 +4572,7 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
@@ -4346,14 +4595,16 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
Results.AddResult(CodeCompletionResult("atomic"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
- CodeCompletionBuilder Setter(Results.getAllocator());
+ CodeCompletionBuilder Setter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Setter.AddTypedTextChunk("setter");
Setter.AddTextChunk(" = ");
Setter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Setter.TakeString()));
}
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
- CodeCompletionBuilder Getter(Results.getAllocator());
+ CodeCompletionBuilder Getter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Getter.AddTypedTextChunk("getter");
Getter.AddTextChunk(" = ");
Getter.AddPlaceholderChunk("method");
@@ -4467,23 +4718,25 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
// Visit the protocols of protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
- I != E; ++I)
- AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, NumSelIdents,
- CurContext, Selectors, AllowSameLength, Results, false);
+ if (Protocol->hasDefinition()) {
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, false);
+ }
}
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
- if (!IFace)
+ if (!IFace || !IFace->hasDefinition())
return;
// Add methods in protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols= IFace->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ for (ObjCInterfaceDecl::protocol_iterator I = IFace->protocol_begin(),
+ E = IFace->protocol_end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, NumSelIdents,
CurContext, Selectors, AllowSameLength, Results, false);
@@ -4542,6 +4795,7 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
// Find all of the potential getters.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -4571,6 +4825,7 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
// Find all of the potential getters.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -4588,6 +4843,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type);
Results.EnterNewScope();
@@ -4619,16 +4875,16 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
// IBAction)<#selector#>:(id)sender
if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
Context.Idents.get("IBAction").hasMacroDefinition()) {
- typedef CodeCompletionString::Chunk Chunk;
- CodeCompletionBuilder Builder(Results.getAllocator(), CCP_CodePattern,
- CXAvailability_Available);
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo(),
+ CCP_CodePattern, CXAvailability_Available);
Builder.AddTypedTextChunk("IBAction");
- Builder.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("selector");
- Builder.AddChunk(Chunk(CodeCompletionString::CK_Colon));
- Builder.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("id");
- Builder.AddChunk(Chunk(CodeCompletionString::CK_RightParen));
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("sender");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
@@ -4792,7 +5048,8 @@ static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
}
// We have a superclass method. Now, form the send-to-super completion.
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
// Give this completion a return type.
AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
@@ -4838,18 +5095,19 @@ static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
}
}
- Results.AddResult(CodeCompletionResult(Builder.TakeString(), CCP_SuperCompletion,
- SuperMethod->isInstanceMethod()
- ? CXCursor_ObjCInstanceMethodDecl
- : CXCursor_ObjCClassMethodDecl));
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(), SuperMethod,
+ CCP_SuperCompletion));
return SuperMethod;
}
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCMessageReceiver,
- &ResultBuilder::IsObjCMessageReceiver);
+ getLangOpts().CPlusPlus0x
+ ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
+ : &ResultBuilder::IsObjCMessageReceiver);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
@@ -4866,6 +5124,9 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, 0, 0, Results);
}
+ if (getLangOpts().CPlusPlus0x)
+ addThisCompletion(*this, Results);
+
Results.ExitScope();
if (CodeCompleter->includeMacros())
@@ -4919,9 +5180,11 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
} else {
// Assume that "super" names some kind of value and parse that way.
CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
- ExprResult SuperExpr = ActOnIdExpression(S, SS, id, false, false);
+ ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
+ false, false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
SelIdents, NumSelIdents,
AtArgumentExpression);
@@ -5061,6 +5324,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
QualType T = this->GetTypeFromParser(Receiver);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
T, SelIdents, NumSelIdents));
@@ -5126,6 +5390,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
// Build the set of methods we can see.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
ReceiverType, SelIdents, NumSelIdents));
@@ -5277,6 +5542,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
}
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
@@ -5287,7 +5553,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents, NumSelIdents))
continue;
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
if (Sel.isUnarySelector()) {
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Sel.getNameForSlot(0)));
@@ -5330,25 +5597,15 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
D != DEnd; ++D) {
// Record any protocols we find.
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*D))
- if (!OnlyForwardDeclarations || Proto->isForwardDecl())
+ if (!OnlyForwardDeclarations || !Proto->hasDefinition())
Results.AddResult(Result(Proto, 0), CurContext, 0, false);
-
- // Record any forward-declared protocols we find.
- if (ObjCForwardProtocolDecl *Forward
- = dyn_cast<ObjCForwardProtocolDecl>(*D)) {
- for (ObjCForwardProtocolDecl::protocol_iterator
- P = Forward->protocol_begin(),
- PEnd = Forward->protocol_end();
- P != PEnd; ++P)
- if (!OnlyForwardDeclarations || (*P)->isForwardDecl())
- Results.AddResult(Result(*P, 0), CurContext, 0, false);
- }
}
}
void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
if (CodeCompleter && CodeCompleter->includeGlobals()) {
@@ -5376,6 +5633,7 @@ void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
if (CodeCompleter && CodeCompleter->includeGlobals()) {
@@ -5406,23 +5664,15 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
D != DEnd; ++D) {
// Record any interfaces we find.
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*D))
- if ((!OnlyForwardDeclarations || Class->isForwardDecl()) &&
+ if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
Results.AddResult(Result(Class, 0), CurContext, 0, false);
-
- // Record any forward-declared interfaces we find.
- if (ObjCClassDecl *Forward = dyn_cast<ObjCClassDecl>(*D)) {
- ObjCInterfaceDecl *IDecl = Forward->getForwardInterfaceDecl();
- if ((!OnlyForwardDeclarations || IDecl->isForwardDecl()) &&
- (!OnlyUnimplemented || !IDecl->getImplementation()))
- Results.AddResult(Result(IDecl, 0), CurContext,
- 0, false);
- }
}
}
void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -5442,6 +5692,7 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
@@ -5466,6 +5717,7 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
@@ -5488,6 +5740,7 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
// Ignore any categories we find that have already been implemented by this
@@ -5531,6 +5784,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
// Add all of the categories that have have corresponding interface
@@ -5559,6 +5813,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
@@ -5599,6 +5854,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
@@ -5668,7 +5924,8 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
unsigned Priority = CCP_MemberDeclaration + 1;
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
- CodeCompletionBuilder Builder(Allocator, Priority,CXAvailability_Available);
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
+ Priority,CXAvailability_Available);
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
@@ -5702,6 +5959,9 @@ static void FindImplementableMethods(ASTContext &Context,
bool InOriginalClass = true) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
// Recurse into protocols.
+ if (!IFace->hasDefinition())
+ return;
+
const ObjCList<ObjCProtocolDecl> &Protocols
= IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
@@ -5742,14 +6002,16 @@ static void FindImplementableMethods(ASTContext &Context,
}
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
- // Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
- I != E; ++I)
- FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
- KnownMethods, false);
+ if (Protocol->hasDefinition()) {
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
}
// Add methods in this container. This operation occurs last because
@@ -5771,10 +6033,14 @@ static void FindImplementableMethods(ASTContext &Context,
/// \brief Add the parenthesized return or parameter type chunk to a code
/// completion string.
static void AddObjCPassingTypeChunk(QualType Type,
+ unsigned ObjCDeclQuals,
ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionBuilder &Builder) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals);
+ if (!Quals.empty())
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
Builder.getAllocator()));
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5810,7 +6076,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Builder that will create each code completion.
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
- CodeCompletionBuilder Builder(Allocator);
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
// The selector table.
SelectorTable &Selectors = Context.Selectors;
@@ -5849,7 +6115,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
KnownSelectors.insert(Selectors.getNullarySelector(PropName)) &&
ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
if (ReturnType.isNull())
- AddObjCPassingTypeChunk(Property->getType(), Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
Builder.AddTypedTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
@@ -5895,7 +6162,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
Builder.AddTypedTextChunk(":");
- AddObjCPassingTypeChunk(Property->getType(), Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
Builder.AddTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
@@ -6464,6 +6732,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
@@ -6471,13 +6740,16 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.first;
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
// If the result type was not already provided, add it to the
// pattern as (type).
if (ReturnType.isNull())
- AddObjCPassingTypeChunk(Method->getResultType(), Context, Policy,
- Builder);
+ AddObjCPassingTypeChunk(Method->getResultType(),
+ Method->getObjCDeclQualifier(),
+ Context, Policy,
+ Builder);
Selector Sel = Method->getSelector();
@@ -6501,7 +6773,9 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
break;
// Add the parameter type.
- AddObjCPassingTypeChunk((*P)->getOriginalType(), Context, Policy,
+ AddObjCPassingTypeChunk((*P)->getOriginalType(),
+ (*P)->getObjCDeclQualifier(),
+ Context, Policy,
Builder);
if (IdentifierInfo *Id = (*P)->getIdentifier())
@@ -6536,15 +6810,12 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
if (!M->second.second)
Priority += CCD_InBaseClass;
- Results.AddResult(Result(Builder.TakeString(), Priority,
- Method->isInstanceMethod()
- ? CXCursor_ObjCInstanceMethodDecl
- : CXCursor_ObjCClassMethodDecl));
+ Results.AddResult(Result(Builder.TakeString(), Method, Priority));
}
// Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
// the properties in this class and its categories.
- if (Context.getLangOptions().ObjC2) {
+ if (Context.getLangOpts().ObjC2) {
SmallVector<ObjCContainerDecl *, 4> Containers;
Containers.push_back(SearchDecl);
@@ -6605,6 +6876,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
// Build the set of methods we can see.
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
if (ReturnTy)
@@ -6627,7 +6899,8 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) {
ParmVarDecl *Param = MethList->Method->param_begin()[NumSelIdents-1];
if (Param->getIdentifier()) {
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Param->getIdentifier()->getName()));
Results.AddResult(Builder.TakeString());
@@ -6653,11 +6926,13 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorDirective);
Results.EnterNewScope();
// #if <condition>
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("condition");
@@ -6756,7 +7031,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("arguments");
Results.AddResult(Builder.TakeString());
- if (getLangOptions().ObjC1) {
+ if (getLangOpts().ObjC1) {
// #import "header"
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -6816,11 +7091,13 @@ void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
IsDefinition? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
// Add just the names of macros, not their arguments.
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
@@ -6840,6 +7117,7 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
void Sema::CodeCompletePreprocessorExpression() {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
if (!CodeCompleter || CodeCompleter->includeMacros())
@@ -6847,7 +7125,8 @@ void Sema::CodeCompletePreprocessorExpression() {
// defined (<macro>)
Results.EnterNewScope();
- CodeCompletionBuilder Builder(Results.getAllocator());
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("defined");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
@@ -6879,8 +7158,10 @@ void Sema::CodeCompleteNaturalLanguage() {
}
void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results) {
- ResultBuilder Builder(*this, Allocator, CodeCompletionContext::CCC_Recovery);
+ ResultBuilder Builder(*this, Allocator, CCTUInfo,
+ CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
CodeCompletionDeclConsumer Consumer(Builder,
Context.getTranslationUnitDecl());
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
new file mode 100644
index 0000000..d83a13e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
@@ -0,0 +1,14 @@
+//===-- SemaConsumer.cpp - Abstract interface for AST semantics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaConsumer.h"
+
+using namespace clang;
+
+void SemaConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index d90a821..8b314b5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -18,7 +18,6 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "TypeLocBuilder.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
@@ -40,6 +39,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleLoader.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Triple.h"
#include <algorithm>
#include <cstring>
@@ -56,6 +56,30 @@ Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
}
+namespace {
+
+class TypeNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ TypeNameValidatorCCC(bool AllowInvalid) : AllowInvalidDecl(AllowInvalid) {
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl())
+ return (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
+ (AllowInvalidDecl || !ND->isInvalidDecl());
+ else
+ return candidate.isKeyword();
+ }
+
+ private:
+ bool AllowInvalidDecl;
+};
+
+}
+
/// \brief If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
@@ -71,6 +95,7 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS,
bool isClassName, bool HasTrailingDot,
ParsedType ObjectTypePtr,
+ bool IsCtorOrDtorName,
bool WantNontrivialTypeSourceInfo,
IdentifierInfo **CorrectedII) {
// Determine where we will perform name lookup.
@@ -93,7 +118,7 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
//
// We therefore do not perform any name lookup if the result would
// refer to a member of an unknown specialization.
- if (!isClassName)
+ if (!isClassName && !IsCtorOrDtorName)
return ParsedType();
// We know from the grammar that this name refers to a type,
@@ -148,9 +173,9 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
+ TypeNameValidatorCCC Validator(true);
TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(),
- Kind, S, SS, 0, false,
- Sema::CTC_Type);
+ Kind, S, SS, Validator);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
TemplateTy Template;
bool MemberOfUnknownSpecialization;
@@ -166,16 +191,17 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
// Ignore a correction to a template type as the to-be-corrected
// identifier is not a template (typo correction for template names
// is handled elsewhere).
- !(getLangOptions().CPlusPlus && NewSSPtr &&
+ !(getLangOpts().CPlusPlus && NewSSPtr &&
isTemplateName(S, *NewSSPtr, false, TemplateName, ParsedType(),
false, Template, MemberOfUnknownSpecialization))) {
ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr,
isClassName, HasTrailingDot, ObjectTypePtr,
+ IsCtorOrDtorName,
WantNontrivialTypeSourceInfo);
if (Ty) {
- std::string CorrectedStr(Correction.getAsString(getLangOptions()));
+ std::string CorrectedStr(Correction.getAsString(getLangOpts()));
std::string CorrectedQuotedStr(
- Correction.getQuoted(getLangOptions()));
+ Correction.getQuoted(getLangOpts()));
Diag(NameLoc, diag::err_unknown_typename_suggest)
<< Result.getLookupName() << CorrectedQuotedStr
<< FixItHint::CreateReplacement(SourceRange(NameLoc),
@@ -249,8 +275,11 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
if (T.isNull())
T = Context.getTypeDeclType(TD);
-
- if (SS && SS->isNotEmpty()) {
+
+ // NOTE: avoid constructing an ElaboratedType(Loc) if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Expr or Decl node).
+ if (SS && SS->isNotEmpty() && !IsCtorOrDtorName) {
if (WantNontrivialTypeSourceInfo) {
// Construct a type with type-source information.
TypeLocBuilder Builder;
@@ -258,7 +287,7 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
T = getElaboratedType(ETK_None, *SS, T);
ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
- ElabTL.setKeywordLoc(SourceLocation());
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
} else {
@@ -292,7 +321,6 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
if (R.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
switch (TD->getTagKind()) {
- default: return DeclSpec::TST_unspecified;
case TTK_Struct: return DeclSpec::TST_struct;
case TTK_Union: return DeclSpec::TST_union;
case TTK_Class: return DeclSpec::TST_class;
@@ -341,46 +369,44 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
+ TypeNameValidatorCCC Validator(false);
if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(&II, IILoc),
- LookupOrdinaryName, S, SS, NULL,
- false, CTC_Type)) {
- std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ LookupOrdinaryName, S, SS,
+ Validator)) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
if (Corrected.isKeyword()) {
// We corrected to a keyword.
// FIXME: Actually recover with the keyword we suggest, and emit a fix-it.
Diag(IILoc, diag::err_unknown_typename_suggest)
<< &II << CorrectedQuotedStr;
- return true;
} else {
NamedDecl *Result = Corrected.getCorrectionDecl();
- if ((isa<TypeDecl>(Result) || isa<ObjCInterfaceDecl>(Result)) &&
- !Result->isInvalidDecl()) {
- // We found a similarly-named type or interface; suggest that.
- if (!SS || !SS->isSet())
- Diag(IILoc, diag::err_unknown_typename_suggest)
- << &II << CorrectedQuotedStr
- << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
- else if (DeclContext *DC = computeDeclContext(*SS, false))
- Diag(IILoc, diag::err_unknown_nested_typename_suggest)
- << &II << DC << CorrectedQuotedStr << SS->getRange()
- << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
- else
- llvm_unreachable("could not have corrected a typo here");
+ // We found a similarly-named type or interface; suggest that.
+ if (!SS || !SS->isSet())
+ Diag(IILoc, diag::err_unknown_typename_suggest)
+ << &II << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ else if (DeclContext *DC = computeDeclContext(*SS, false))
+ Diag(IILoc, diag::err_unknown_nested_typename_suggest)
+ << &II << DC << CorrectedQuotedStr << SS->getRange()
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ else
+ llvm_unreachable("could not have corrected a typo here");
- Diag(Result->getLocation(), diag::note_previous_decl)
- << CorrectedQuotedStr;
-
- SuggestedType = getTypeName(*Result->getIdentifier(), IILoc, S, SS,
- false, false, ParsedType(),
- /*NonTrivialTypeSourceInfo=*/true);
- return true;
- }
+ Diag(Result->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ SuggestedType = getTypeName(*Result->getIdentifier(), IILoc, S, SS,
+ false, false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo=*/true);
}
+ return true;
}
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// See if II is a class template that the user forgot to pass arguments to.
UnqualifiedId Name;
Name.setIdentifier(&II, IILoc);
@@ -410,14 +436,15 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
<< &II << DC << SS->getRange();
else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
- if (getLangOptions().MicrosoftMode && isMicrosoftMissingTypename(SS, S))
+ if (getLangOpts().MicrosoftMode && isMicrosoftMissingTypename(SS, S))
DiagID = diag::warn_typename_missing;
Diag(SS->getRange().getBegin(), DiagID)
<< (NestedNameSpecifier *)SS->getScopeRep() << II.getName()
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
- SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc).get();
+ SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc)
+ .get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
@@ -429,7 +456,7 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
/// \brief Determine whether the given result set contains either a type name
/// or
static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
- bool CheckTemplate = R.getSema().getLangOptions().CPlusPlus &&
+ bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
NextToken.is(tok::less);
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
@@ -481,7 +508,7 @@ Corrected:
if (!SS.isSet() && NextToken.is(tok::l_paren)) {
// In C++, this is an ADL-only call.
// FIXME: Reference?
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true);
// C90 6.3.2.2:
@@ -506,7 +533,7 @@ Corrected:
// In C, we first see whether there is a tag type by the same name, in
// which case it's likely that the user just forget to write "enum",
// "struct", or "union".
- if (!getLangOptions().CPlusPlus && !SecondTry) {
+ if (!getLangOpts().CPlusPlus && !SecondTry) {
Result.clear(LookupTagName);
LookupParsedName(Result, S, &SS);
if (TagDecl *Tag = Result.getAsSingle<TagDecl>()) {
@@ -535,7 +562,7 @@ Corrected:
}
Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
- << Name << TagName << getLangOptions().CPlusPlus
+ << Name << TagName << getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(NameLoc, FixItTagName);
break;
}
@@ -547,17 +574,19 @@ Corrected:
// close to this name.
if (!SecondTry) {
SecondTry = true;
+ CorrectionCandidateCallback DefaultValidator;
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
- Result.getLookupKind(), S, &SS)) {
+ Result.getLookupKind(), S,
+ &SS, DefaultValidator)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
- std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
NamedDecl *FirstDecl = Corrected.getCorrectionDecl();
NamedDecl *UnderlyingFirstDecl
= FirstDecl? FirstDecl->getUnderlyingDecl() : 0;
- if (getLangOptions().CPlusPlus && NextToken.is(tok::less) &&
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
UnqualifiedDiag = diag::err_no_template_suggest;
QualifiedDiag = diag::err_no_member_template_suggest;
@@ -582,19 +611,19 @@ Corrected:
// Update the name, so that the caller has the new name.
Name = Corrected.getCorrectionAsIdentifierInfo();
+ // Typo correction corrected to a keyword.
+ if (Corrected.isKeyword())
+ return Corrected.getCorrectionAsIdentifierInfo();
+
// Also update the LookupResult...
// FIXME: This should probably go away at some point
Result.clear();
Result.setLookupName(Corrected.getCorrection());
- if (FirstDecl) Result.addDecl(FirstDecl);
-
- // Typo correction corrected to a keyword.
- if (Corrected.isKeyword())
- return Corrected.getCorrectionAsIdentifierInfo();
-
- if (FirstDecl)
+ if (FirstDecl) {
+ Result.addDecl(FirstDecl);
Diag(FirstDecl->getLocation(), diag::note_previous_decl)
<< CorrectedQuotedStr;
+ }
// If we found an Objective-C instance variable, let
// LookupInObjCMethod build the appropriate expression to
@@ -614,7 +643,7 @@ Corrected:
Result.suppressDiagnostics();
return NameClassification::Unknown();
- case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::NotFoundInCurrentInstantiation: {
// We performed name lookup into the current instantiation, and there were
// dependent bases, so we treat this result the same way as any other
// dependent nested-name-specifier.
@@ -629,7 +658,9 @@ Corrected:
// perform some heroics to see if we actually have a
// template-argument-list, which would indicate a missing 'template'
// keyword here.
- return BuildDependentDeclRefExpr(SS, NameInfo, /*TemplateArgs=*/0);
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/0);
+ }
case LookupResult::Found:
case LookupResult::FoundOverloaded:
@@ -637,7 +668,7 @@ Corrected:
break;
case LookupResult::Ambiguous:
- if (getLangOptions().CPlusPlus && NextToken.is(tok::less) &&
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
hasAnyAcceptableTemplateNames(Result)) {
// C++ [temp.local]p3:
// A lookup that finds an injected-class-name (10.2) can result in an
@@ -661,7 +692,7 @@ Corrected:
return NameClassification::Error();
}
- if (getLangOptions().CPlusPlus && NextToken.is(tok::less) &&
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
(IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
@@ -735,7 +766,7 @@ Corrected:
}
if (!Result.empty() && (*Result.begin())->isCXXClassMember())
- return BuildPossibleImplicitMemberExpr(SS, Result, 0);
+ return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, 0);
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
return BuildDeclarationNameExpr(SS, Result, ADL);
@@ -841,7 +872,13 @@ void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
if (!FD)
return;
- PushDeclContext(S, FD);
+ // Same implementation as PushDeclContext, but enters the context
+ // from the lexical parent, rather than the top-level class.
+ assert(CurContext == FD->getLexicalParent() &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = FD;
+ S->setEntity(CurContext);
+
for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) {
ParmVarDecl *Param = FD->getParamDecl(P);
// If the parameter has an identifier, then add it to the scope
@@ -853,6 +890,15 @@ void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
}
+void Sema::ActOnExitFunctionContext() {
+ // Same implementation as PopDeclContext, but returns to the lexical parent,
+ // rather than the top-level class.
+ assert(CurContext && "DeclContext imbalance!");
+ CurContext = CurContext->getLexicalParent();
+ assert(CurContext && "Popped translation unit!");
+}
+
+
/// \brief Determine whether we allow overloading of the function
/// PrevDecl with another declaration.
///
@@ -864,7 +910,7 @@ void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
/// attribute.
static bool AllowOverloadingOfFunction(LookupResult &Previous,
ASTContext &Context) {
- if (Context.getLangOptions().CPlusPlus)
+ if (Context.getLangOpts().CPlusPlus)
return true;
if (Previous.getResultKind() == LookupResult::FoundOverloaded)
@@ -891,7 +937,7 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
// Out-of-line definitions shouldn't be pushed into scope in C++.
// Out-of-line variable and function definitions shouldn't even in C.
- if ((getLangOptions().CPlusPlus || isa<VarDecl>(D) || isa<FunctionDecl>(D)) &&
+ if ((getLangOpts().CPlusPlus || isa<VarDecl>(D) || isa<FunctionDecl>(D)) &&
D->isOutOfLine() &&
!D->getDeclContext()->getRedeclContext()->Equals(
D->getLexicalDeclContext()->getRedeclContext()))
@@ -936,6 +982,11 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
}
}
+void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
+ if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope)
+ TUScope->AddDecl(D);
+}
+
bool Sema::isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S,
bool ExplicitInstantiationOrSpecialization) {
return IdResolver.isDeclInScope(D, Ctx, Context, S,
@@ -1089,7 +1140,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isInvalidDecl())
return false;
- if (D->isUsed() || D->hasAttr<UnusedAttr>())
+ if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>())
return false;
if (isa<LabelDecl>(D))
@@ -1101,7 +1152,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
return false;
// Types of valid local variables should be complete, so this should succeed.
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// White-list anything with an __attribute__((unused)) type.
QualType Ty = VD->getType();
@@ -1123,11 +1174,18 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
return false;
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
- // FIXME: Checking for the presence of a user-declared constructor
- // isn't completely accurate; we'd prefer to check that the initializer
- // has no side effects.
- if (RD->hasUserDeclaredConstructor() || !RD->hasTrivialDestructor())
+ if (!RD->hasTrivialDestructor())
return false;
+
+ if (const Expr *Init = VD->getInit()) {
+ const CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init);
+ if (Construct && !Construct->isElidable()) {
+ CXXConstructorDecl *CD = Construct->getConstructor();
+ if (!CD->isTrivial())
+ return false;
+ }
+ }
}
}
@@ -1141,7 +1199,7 @@ static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
FixItHint &Hint) {
if (isa<LabelDecl>(D)) {
SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(),
- tok::colon, Ctx.getSourceManager(), Ctx.getLangOptions(), true);
+ tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true);
if (AfterColon.isInvalid())
return;
Hint = FixItHint::CreateRemoval(CharSourceRange::
@@ -1206,6 +1264,15 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
}
}
+void Sema::ActOnStartFunctionDeclarator() {
+ ++InFunctionDeclarator;
+}
+
+void Sema::ActOnEndFunctionDeclarator() {
+ assert(InFunctionDeclarator);
+ --InFunctionDeclarator;
+}
+
/// \brief Look for an Objective-C class in the translation unit.
///
/// \param Id The name of the Objective-C class we're looking for. If
@@ -1229,10 +1296,11 @@ ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
- TypoCorrection C;
- if ((C = CorrectTypo(DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName,
- TUScope, NULL, NULL, false, CTC_NoKeywords)) &&
- (IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
+ DeclFilterCCC<ObjCInterfaceDecl> Validator;
+ if (TypoCorrection C = CorrectTypo(DeclarationNameInfo(Id, IdLoc),
+ LookupOrdinaryName, TUScope, NULL,
+ Validator)) {
+ IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
Diag(IdLoc, diag::err_undef_interface_suggest)
<< Id << IDecl->getDeclName()
<< FixItHint::CreateReplacement(IdLoc, IDecl->getNameAsString());
@@ -1242,8 +1310,11 @@ ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
Id = IDecl->getIdentifier();
}
}
-
- return dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+ ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+ // This routine must always return a class definition, if any.
+ if (Def && Def->getDefinition())
+ Def = Def->getDefinition();
+ return Def;
}
/// getNonFieldDeclScope - Retrieves the innermost scope, starting
@@ -1273,7 +1344,7 @@ Scope *Sema::getNonFieldDeclScope(Scope *S) {
while (((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() &&
((DeclContext *)S->getEntity())->isTransparentContext()) ||
- (S->isClassScope() && !getLangOptions().CPlusPlus))
+ (S->isClassScope() && !getLangOpts().CPlusPlus))
S = S->getParent();
return S;
}
@@ -1362,6 +1433,40 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
return New;
}
+bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
+ QualType OldType;
+ if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
+ OldType = OldTypedef->getUnderlyingType();
+ else
+ OldType = Context.getTypeDeclType(Old);
+ QualType NewType = New->getUnderlyingType();
+
+ if (NewType->isVariablyModifiedType()) {
+ // Must not redefine a typedef with a variably-modified type.
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef)
+ << Kind << NewType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+
+ if (OldType != NewType &&
+ !OldType->isDependentType() &&
+ !NewType->isDependentType() &&
+ !Context.hasSameType(OldType, NewType)) {
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_different_typedef)
+ << Kind << NewType << OldType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+ return false;
+}
+
/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
/// same name and scope as a previous declaration 'Old'. Figure out
/// how to resolve this situation, merging decls or emitting
@@ -1374,7 +1479,7 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
// Allow multiple definitions for ObjC built-in typedefs.
// FIXME: Verify the underlying types are equivalent!
- if (getLangOptions().ObjC1) {
+ if (getLangOpts().ObjC1) {
const IdentifierInfo *TypeID = New->getIdentifier();
switch (TypeID->getLength()) {
default: break;
@@ -1420,46 +1525,20 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
if (Old->isInvalidDecl())
return New->setInvalidDecl();
- // Determine the "old" type we'll use for checking and diagnostics.
- QualType OldType;
- if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
- OldType = OldTypedef->getUnderlyingType();
- else
- OldType = Context.getTypeDeclType(Old);
-
// If the typedef types are not identical, reject them in all languages and
// with any extensions enabled.
-
- if (OldType != New->getUnderlyingType() &&
- Context.getCanonicalType(OldType) !=
- Context.getCanonicalType(New->getUnderlyingType())) {
- int Kind = 0;
- if (isa<TypeAliasDecl>(Old))
- Kind = 1;
- Diag(New->getLocation(), diag::err_redefinition_different_typedef)
- << Kind << New->getUnderlyingType() << OldType;
- if (Old->getLocation().isValid())
- Diag(Old->getLocation(), diag::note_previous_definition);
- return New->setInvalidDecl();
- }
+ if (isIncompatibleTypedef(Old, New))
+ return;
// The types match. Link up the redeclaration chain if the old
// declaration was a typedef.
- // FIXME: this is a potential source of weirdness if the type
- // spellings don't match exactly.
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old))
New->setPreviousDeclaration(Typedef);
- // __module_private__ is propagated to later declarations.
- if (Old->isModulePrivate())
- New->setModulePrivate();
- else if (New->isModulePrivate())
- diagnoseModulePrivateRedeclaration(New, Old);
-
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
return;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// C++ [dcl.typedef]p2:
// In a given non-class scope, a typedef specifier can be used to
// redefine the name of any type declared in that scope to refer
@@ -1497,6 +1576,10 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
return New->setInvalidDecl();
}
+ // Modules always permit redefinition of typedefs, as does C11.
+ if (getLangOpts().Modules || getLangOpts().C11)
+ return;
+
// If we have a redefinition of a typedef in C, emit a warning. This warning
// is normally mapped to an error, but can be controlled with
// -Wtypedef-redefinition. If either the original or the redefinition is
@@ -1535,36 +1618,37 @@ DeclHasAttr(const Decl *D, const Attr *A) {
}
/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
-static void mergeDeclAttributes(Decl *newDecl, const Decl *oldDecl,
- ASTContext &C, bool mergeDeprecation = true) {
- if (!oldDecl->hasAttrs())
+void Sema::mergeDeclAttributes(Decl *New, Decl *Old,
+ bool MergeDeprecation) {
+ if (!Old->hasAttrs())
return;
- bool foundAny = newDecl->hasAttrs();
+ bool foundAny = New->hasAttrs();
// Ensure that any moving of objects within the allocated map is done before
// we process them.
- if (!foundAny) newDecl->setAttrs(AttrVec());
+ if (!foundAny) New->setAttrs(AttrVec());
for (specific_attr_iterator<InheritableAttr>
- i = oldDecl->specific_attr_begin<InheritableAttr>(),
- e = oldDecl->specific_attr_end<InheritableAttr>(); i != e; ++i) {
+ i = Old->specific_attr_begin<InheritableAttr>(),
+ e = Old->specific_attr_end<InheritableAttr>();
+ i != e; ++i) {
// Ignore deprecated/unavailable/availability attributes if requested.
- if (!mergeDeprecation &&
+ if (!MergeDeprecation &&
(isa<DeprecatedAttr>(*i) ||
isa<UnavailableAttr>(*i) ||
isa<AvailabilityAttr>(*i)))
continue;
- if (!DeclHasAttr(newDecl, *i)) {
- InheritableAttr *newAttr = cast<InheritableAttr>((*i)->clone(C));
+ if (!DeclHasAttr(New, *i)) {
+ InheritableAttr *newAttr = cast<InheritableAttr>((*i)->clone(Context));
newAttr->setInherited(true);
- newDecl->addAttr(newAttr);
+ New->addAttr(newAttr);
foundAny = true;
}
}
- if (!foundAny) newDecl->dropAttrs();
+ if (!foundAny) New->dropAttrs();
}
/// mergeParamDeclAttributes - Copy attributes from the old parameter
@@ -1651,7 +1735,7 @@ static bool canRedefineFunction(const FunctionDecl *FD,
/// merged with.
///
/// Returns true if there was an error, false otherwise.
-bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
+bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
// Verify the old decl was also a function.
FunctionDecl *Old = 0;
if (FunctionTemplateDecl *OldFunctionTemplate
@@ -1693,8 +1777,8 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
New->getStorageClass() == SC_Static &&
Old->getStorageClass() != SC_Static &&
- !canRedefineFunction(Old, getLangOptions())) {
- if (getLangOptions().MicrosoftExt) {
+ !canRedefineFunction(Old, getLangOpts())) {
+ if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::warn_static_non_static) << New;
Diag(Old->getLocation(), PrevDiag);
} else {
@@ -1776,7 +1860,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
NewQType = Context.getCanonicalType(New->getType());
}
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// (C++98 13.1p2):
// Certain function declarations cannot be overloaded:
// -- Function declarations that differ only in the return type
@@ -1881,14 +1965,14 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
}
if (OldQTypeForComparison == NewQType)
- return MergeCompatibleFunctionDecls(New, Old);
+ return MergeCompatibleFunctionDecls(New, Old, S);
// Fall through for conflicting redeclarations and redefinitions.
}
// C: Function types need to be compatible, not identical. This handles
// duplicate function decls like "void f(int); void f(enum X);" properly.
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
Context.typesAreCompatible(OldQType, NewQType)) {
const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
@@ -1926,7 +2010,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
New->setParams(Params);
}
- return MergeCompatibleFunctionDecls(New, Old);
+ return MergeCompatibleFunctionDecls(New, Old, S);
}
// GNU C permits a K&R definition to follow a prototype declaration
@@ -1940,7 +2024,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
// the K&R definition becomes variadic. This is sort of an edge case, but
// it's legal per the standard depending on how you read C99 6.7.5.3p15 and
// C99 6.9.1p8.
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
Old->hasPrototype() && !New->hasPrototype() &&
New->getType()->getAs<FunctionProtoType>() &&
Old->getNumParams() == New->getNumParams()) {
@@ -1987,7 +2071,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
New->setType(Context.getFunctionType(MergedReturn, &ArgTypes[0],
ArgTypes.size(),
OldProto->getExtProtoInfo()));
- return MergeCompatibleFunctionDecls(New, Old);
+ return MergeCompatibleFunctionDecls(New, Old, S);
}
// Fall through to diagnose conflicting types.
@@ -2028,9 +2112,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
/// redeclaration of Old.
///
/// \returns false
-bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
+bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S) {
// Merge the attributes
- mergeDeclAttributes(New, Old, Context);
+ mergeDeclAttributes(New, Old);
// Merge the storage class.
if (Old->getStorageClass() != SC_Extern &&
@@ -2041,12 +2126,6 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
if (Old->isPure())
New->setPure();
- // __module_private__ is propagated to later declarations.
- if (Old->isModulePrivate())
- New->setModulePrivate();
- else if (New->isModulePrivate())
- diagnoseModulePrivateRedeclaration(New, Old);
-
// Merge attributes from the parameters. These can mismatch with K&R
// declarations.
if (New->getNumParams() == Old->getNumParams())
@@ -2054,21 +2133,21 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
mergeParamDeclAttributes(New->getParamDecl(i), Old->getParamDecl(i),
Context);
- if (getLangOptions().CPlusPlus)
- return MergeCXXFunctionDecl(New, Old);
+ if (getLangOpts().CPlusPlus)
+ return MergeCXXFunctionDecl(New, Old, S);
return false;
}
void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
- const ObjCMethodDecl *oldMethod) {
+ ObjCMethodDecl *oldMethod) {
// We don't want to merge unavailable and deprecated attributes
// except from interface to implementation.
bool mergeDeprecation = isa<ObjCImplDecl>(newMethod->getDeclContext());
// Merge the attributes.
- mergeDeclAttributes(newMethod, oldMethod, Context, mergeDeprecation);
+ mergeDeclAttributes(newMethod, oldMethod, mergeDeprecation);
// Merge attributes from the parameters.
ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin();
@@ -2085,15 +2164,14 @@ void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
/// emitting diagnostics as appropriate.
///
/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
-/// to here in AddInitializerToDecl and AddCXXDirectInitializerToDecl. We can't
-/// check them before the initializer is attached.
-///
+/// to here in AddInitializerToDecl. We can't check them before the initializer
+/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old) {
if (New->isInvalidDecl() || Old->isInvalidDecl())
return;
QualType MergedT;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
AutoType *AT = New->getType()->getContainedAutoType();
if (AT && !AT->isDeduced()) {
// We don't know what the new type is until the initializer is attached.
@@ -2175,8 +2253,9 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->setInvalidDecl();
}
- mergeDeclAttributes(New, Old, Context);
- // Warn if an already-declared variable is made a weak_import in a subsequent declaration
+ mergeDeclAttributes(New, Old);
+ // Warn if an already-declared variable is made a weak_import in a subsequent
+ // declaration
if (New->getAttr<WeakImportAttr>() &&
Old->getStorageClass() == SC_None &&
!Old->getAttr<WeakImportAttr>()) {
@@ -2230,12 +2309,6 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
- // __module_private__ is propagated to later declarations.
- if (Old->isModulePrivate())
- New->setModulePrivate();
- else if (New->isModulePrivate())
- diagnoseModulePrivateRedeclaration(New, Old);
-
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
// FIXME: The test for external storage here seems wrong? We still
@@ -2259,7 +2332,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
// C++ doesn't have tentative definitions, so go right ahead and check here.
const VarDecl *Def;
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
New->isThisDeclarationADefinition() == VarDecl::Definition &&
(Def = Old->getDefinition())) {
Diag(New->getLocation(), diag::err_redefinition)
@@ -2315,11 +2388,17 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
// Note that the above type specs guarantee that the
// type rep is a Decl, whereas in many of the others
// it's a Type.
- Tag = dyn_cast<TagDecl>(TagD);
+ if (isa<TagDecl>(TagD))
+ Tag = cast<TagDecl>(TagD);
+ else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD))
+ Tag = CTD->getTemplatedDecl();
}
- if (Tag)
+ if (Tag) {
Tag->setFreeStanding();
+ if (Tag->isInvalidDecl())
+ return Tag;
+ }
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
// Enforce C99 6.7.3p2: "Types other than pointer types derived from object
@@ -2357,22 +2436,20 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
bool emittedWarning = false;
if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
- ProcessDeclAttributeList(S, Record, DS.getAttributes().getList());
-
if (!Record->getDeclName() && Record->isCompleteDefinition() &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
- if (getLangOptions().CPlusPlus ||
+ if (getLangOpts().CPlusPlus ||
Record->getDeclContext()->isRecord())
return BuildAnonymousStructOrUnion(S, DS, AS, Record);
- Diag(DS.getSourceRange().getBegin(), diag::ext_no_declarators)
+ Diag(DS.getLocStart(), diag::ext_no_declarators)
<< DS.getSourceRange();
emittedWarning = true;
}
}
// Check for Microsoft C extension: anonymous struct.
- if (getLangOptions().MicrosoftExt && !getLangOptions().CPlusPlus &&
+ if (getLangOpts().MicrosoftExt && !getLangOpts().CPlusPlus &&
CurContext->isRecord() &&
DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
// Handle 2 kinds of anonymous struct:
@@ -2383,13 +2460,13 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
if ((Record && Record->getDeclName() && !Record->isCompleteDefinition()) ||
(DS.getTypeSpecType() == DeclSpec::TST_typename &&
DS.getRepAsType().get()->isStructureType())) {
- Diag(DS.getSourceRange().getBegin(), diag::ext_ms_anonymous_struct)
+ Diag(DS.getLocStart(), diag::ext_ms_anonymous_struct)
<< DS.getSourceRange();
return BuildMicrosoftCAnonymousStruct(S, DS, Record);
}
}
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
if (Enum->enumerator_begin() == Enum->enumerator_end() &&
@@ -2407,12 +2484,12 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
// extension in both Microsoft and GNU.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef &&
Tag && isa<EnumDecl>(Tag)) {
- Diag(DS.getSourceRange().getBegin(), diag::ext_typedef_without_a_name)
+ Diag(DS.getLocStart(), diag::ext_typedef_without_a_name)
<< DS.getSourceRange();
return Tag;
}
- Diag(DS.getSourceRange().getBegin(), diag::ext_no_declarators)
+ Diag(DS.getLocStart(), diag::ext_no_declarators)
<< DS.getSourceRange();
emittedWarning = true;
}
@@ -2453,28 +2530,29 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
<< Tag->getTagKind()
<< FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
- // FIXME: Warn on useless attributes
-
- return TagD;
-}
-
-/// ActOnVlaStmt - This rouine if finds a vla expression in a decl spec.
-/// builds a statement for it and returns it so it is evaluated.
-StmtResult Sema::ActOnVlaStmt(const DeclSpec &DS) {
- StmtResult R;
- if (DS.getTypeSpecType() == DeclSpec::TST_typeofExpr) {
- Expr *Exp = DS.getRepAsExpr();
- QualType Ty = Exp->getType();
- if (Ty->isPointerType()) {
- do
- Ty = Ty->getAs<PointerType>()->getPointeeType();
- while (Ty->isPointerType());
- }
- if (Ty->isVariableArrayType()) {
- R = ActOnExprStmt(MakeFullExpr(Exp));
+ // Warn about ignored type attributes, for example:
+ // __attribute__((aligned)) struct A;
+ // Attributes should be placed after tag to apply to type declaration.
+ if (!DS.getAttributes().empty()) {
+ DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
+ if (TypeSpecType == DeclSpec::TST_class ||
+ TypeSpecType == DeclSpec::TST_struct ||
+ TypeSpecType == DeclSpec::TST_union ||
+ TypeSpecType == DeclSpec::TST_enum) {
+ AttributeList* attrs = DS.getAttributes().getList();
+ while (attrs) {
+ Diag(attrs->getScopeLoc(),
+ diag::warn_declspec_attribute_ignored)
+ << attrs->getName()
+ << (TypeSpecType == DeclSpec::TST_class ? 0 :
+ TypeSpecType == DeclSpec::TST_struct ? 1 :
+ TypeSpecType == DeclSpec::TST_union ? 2 : 3);
+ attrs = attrs->getNext();
+ }
}
}
- return R;
+
+ return TagD;
}
/// We are trying to inject an anonymous member into the given scope;
@@ -2629,51 +2707,56 @@ StorageClassSpecToFunctionDeclStorageClass(DeclSpec::SCS StorageClassSpec) {
/// BuildAnonymousStructOrUnion - Handle the declaration of an
/// anonymous structure or union. Anonymous unions are a C++ feature
-/// (C++ [class.union]) and a GNU C extension; anonymous structures
-/// are a GNU C and GNU C++ extension.
+/// (C++ [class.union]) and a C11 feature; anonymous structures
+/// are a C11 feature and GNU C++ extension.
Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record) {
DeclContext *Owner = Record->getDeclContext();
// Diagnose whether this anonymous struct/union is an extension.
- if (Record->isUnion() && !getLangOptions().CPlusPlus)
+ if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11)
Diag(Record->getLocation(), diag::ext_anonymous_union);
- else if (!Record->isUnion())
- Diag(Record->getLocation(), diag::ext_anonymous_struct);
+ else if (!Record->isUnion() && getLangOpts().CPlusPlus)
+ Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct);
+ else if (!Record->isUnion() && !getLangOpts().C11)
+ Diag(Record->getLocation(), diag::ext_c11_anonymous_struct);
// C and C++ require different kinds of checks for anonymous
// structs/unions.
bool Invalid = false;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
const char* PrevSpec = 0;
unsigned DiagID;
- // C++ [class.union]p3:
- // Anonymous unions declared in a named namespace or in the
- // global namespace shall be declared static.
- if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
- (isa<TranslationUnitDecl>(Owner) ||
- (isa<NamespaceDecl>(Owner) &&
- cast<NamespaceDecl>(Owner)->getDeclName()))) {
- Diag(Record->getLocation(), diag::err_anonymous_union_not_static);
- Invalid = true;
-
- // Recover by adding 'static'.
- DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
- PrevSpec, DiagID);
- }
- // C++ [class.union]p3:
- // A storage class is not allowed in a declaration of an
- // anonymous union in a class scope.
- else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
- isa<RecordDecl>(Owner)) {
- Diag(DS.getStorageClassSpecLoc(),
- diag::err_anonymous_union_with_storage_spec);
- Invalid = true;
-
- // Recover by removing the storage specifier.
- DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified, SourceLocation(),
- PrevSpec, DiagID);
+ if (Record->isUnion()) {
+ // C++ [class.union]p6:
+ // Anonymous unions declared in a named namespace or in the
+ // global namespace shall be declared static.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
+ (isa<TranslationUnitDecl>(Owner) ||
+ (isa<NamespaceDecl>(Owner) &&
+ cast<NamespaceDecl>(Owner)->getDeclName()))) {
+ Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
+ << FixItHint::CreateInsertion(Record->getLocation(), "static ");
+
+ // Recover by adding 'static'.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
+ PrevSpec, DiagID);
+ }
+ // C++ [class.union]p6:
+ // A storage class is not allowed in a declaration of an
+ // anonymous union in a class scope.
+ else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ isa<RecordDecl>(Owner)) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_anonymous_union_with_storage_spec)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+
+ // Recover by removing the storage specifier.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified,
+ SourceLocation(),
+ PrevSpec, DiagID);
+ }
}
// Ignore const/volatile/restrict qualifiers.
@@ -2683,11 +2766,13 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
<< Record->isUnion() << 0
<< FixItHint::CreateRemoval(DS.getConstSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
- Diag(DS.getVolatileSpecLoc(), diag::ext_anonymous_struct_union_qualified)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << 1
<< FixItHint::CreateRemoval(DS.getVolatileSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
- Diag(DS.getRestrictSpecLoc(), diag::ext_anonymous_struct_union_qualified)
+ Diag(DS.getRestrictSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << 2
<< FixItHint::CreateRemoval(DS.getRestrictSpecLoc());
@@ -2717,7 +2802,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// copy constructor, a non-trivial destructor, or a non-trivial copy
// assignment operator cannot be a member of a union, nor can an
// array of such objects.
- if (!getLangOptions().CPlusPlus0x && CheckNontrivialField(FD))
+ if (CheckNontrivialField(FD))
Invalid = true;
} else if ((*Mem)->isImplicit()) {
// Any implicit members are fine.
@@ -2730,7 +2815,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (!MemRecord->isAnonymousStructOrUnion() &&
MemRecord->getDeclName()) {
// Visual C++ allows type definition in anonymous struct or union.
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
<< (int)Record->isUnion();
else {
@@ -2754,7 +2839,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
DK = diag::err_anonymous_record_with_static;
// Visual C++ allows type definition in anonymous struct or union.
- if (getLangOptions().MicrosoftExt &&
+ if (getLangOpts().MicrosoftExt &&
DK == diag::err_anonymous_record_with_type)
Diag((*Mem)->getLocation(), diag::ext_anonymous_record_with_type)
<< (int)Record->isUnion();
@@ -2769,7 +2854,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
if (!Record->isUnion() && !Owner->isRecord()) {
Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
- << (int)getLangOptions().CPlusPlus;
+ << (int)getLangOpts().CPlusPlus;
Invalid = true;
}
@@ -2782,7 +2867,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
NamedDecl *Anon = 0;
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
Anon = FieldDecl::Create(Context, OwningClass,
- DS.getSourceRange().getBegin(),
+ DS.getLocStart(),
Record->getLocation(),
/*IdentifierInfo=*/0,
Context.getTypeDeclType(Record),
@@ -2790,7 +2875,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
/*BitWidth=*/0, /*Mutable=*/false,
/*HasInit=*/false);
Anon->setAccess(AS);
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
@@ -2809,7 +2894,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
= StorageClassSpecToVarDeclStorageClass(SCSpec);
Anon = VarDecl::Create(Context, Owner,
- DS.getSourceRange().getBegin(),
+ DS.getLocStart(),
Record->getLocation(), /*IdentifierInfo=*/0,
Context.getTypeDeclType(Record),
TInfo, SC, SCAsWritten);
@@ -2879,8 +2964,8 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
// Create a declaration for this anonymous struct.
NamedDecl* Anon = FieldDecl::Create(Context,
cast<RecordDecl>(CurContext),
- DS.getSourceRange().getBegin(),
- DS.getSourceRange().getBegin(),
+ DS.getLocStart(),
+ DS.getLocStart(),
/*IdentifierInfo=*/0,
Context.getTypeDeclType(Record),
TInfo,
@@ -2897,9 +2982,10 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
- if (InjectAnonymousStructOrUnionMembers(*this, S, CurContext,
- Record->getDefinition(),
- AS_none, Chain, true))
+ RecordDecl *RecordDef = Record->getDefinition();
+ if (!RecordDef || InjectAnonymousStructOrUnionMembers(*this, S, CurContext,
+ RecordDef, AS_none,
+ Chain, true))
Anon->setInvalidDecl();
return Anon;
@@ -3135,8 +3221,14 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
- D.setFunctionDefinition(false);
- return HandleDeclarator(S, D, MultiTemplateParamsArg(*this));
+ D.setFunctionDefinitionKind(FDK_Declaration);
+ Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg(*this));
+
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
+ Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+
+ return Dcl;
}
/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
@@ -3158,7 +3250,100 @@ bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
return false;
}
+
+/// \brief Diagnose a declaration whose declarator-id has the given
+/// nested-name-specifier.
+///
+/// \param SS The nested-name-specifier of the declarator-id.
+///
+/// \param DC The declaration context to which the nested-name-specifier
+/// resolves.
+///
+/// \param Name The name of the entity being declared.
+///
+/// \param Loc The location of the name of the entity being declared.
+///
+/// \returns true if we cannot safely recover from this error, false otherwise.
+bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
+ DeclarationName Name,
+ SourceLocation Loc) {
+ DeclContext *Cur = CurContext;
+ while (isa<LinkageSpecDecl>(Cur))
+ Cur = Cur->getParent();
+ // C++ [dcl.meaning]p1:
+ // A declarator-id shall not be qualified except for the definition
+ // of a member function (9.3) or static data member (9.4) outside of
+ // its class, the definition or explicit instantiation of a function
+ // or variable member of a namespace outside of its namespace, or the
+ // definition of an explicit specialization outside of its namespace,
+ // or the declaration of a friend function that is a member of
+ // another class or namespace (11.3). [...]
+
+ // The user provided a superfluous scope specifier that refers back to the
+ // class or namespaces in which the entity is already declared.
+ //
+ // class X {
+ // void X::f();
+ // };
+ if (Cur->Equals(DC)) {
+ Diag(Loc, diag::warn_member_extra_qualification)
+ << Name << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ return false;
+ }
+
+ // Check whether the qualifying scope encloses the scope of the original
+ // declaration.
+ if (!Cur->Encloses(DC)) {
+ if (Cur->isRecord())
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ else if (isa<TranslationUnitDecl>(DC))
+ Diag(Loc, diag::err_invalid_declarator_global_scope)
+ << Name << SS.getRange();
+ else if (isa<FunctionDecl>(Cur))
+ Diag(Loc, diag::err_invalid_declarator_in_function)
+ << Name << SS.getRange();
+ else
+ Diag(Loc, diag::err_invalid_declarator_scope)
+ << Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
+
+ return true;
+ }
+
+ if (Cur->isRecord()) {
+ // Cannot qualify members within a class.
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ SS.clear();
+
+ // C++ constructors and destructors with incorrect scopes can break
+ // our AST invariants by having the wrong underlying types. If
+ // that's the case, then drop this declaration entirely.
+ if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName) &&
+ !Context.hasSameType(Name.getCXXNameType(),
+ Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
+ return true;
+
+ return false;
+ }
+
+ // C++11 [dcl.meaning]p1:
+ // [...] "The nested-name-specifier of the qualified declarator-id shall
+ // not begin with a decltype-specifer"
+ NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
+ while (SpecLoc.getPrefix())
+ SpecLoc = SpecLoc.getPrefix();
+ if (dyn_cast_or_null<DecltypeType>(
+ SpecLoc.getNestedNameSpecifier()->getAsType()))
+ Diag(Loc, diag::err_decltype_in_declarator)
+ << SpecLoc.getTypeLoc().getSourceRange();
+
+ return false;
+}
+
Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists) {
// TODO: consider using NameInfo for diagnostic.
@@ -3169,7 +3354,7 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
// one, the ParsedFreeStandingDeclSpec action should be used.
if (!Name) {
if (!D.isInvalidType()) // Reject this if we think it is valid.
- Diag(D.getDeclSpec().getSourceRange().getBegin(),
+ Diag(D.getDeclSpec().getLocStart(),
diag::err_declarator_need_ident)
<< D.getDeclSpec().getSourceRange() << D.getSourceRange();
return 0;
@@ -3209,29 +3394,18 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
return 0;
- if (isa<CXXRecordDecl>(DC)) {
- if (!cast<CXXRecordDecl>(DC)->hasDefinition()) {
- Diag(D.getIdentifierLoc(),
- diag::err_member_def_undefined_record)
- << Name << DC << D.getCXXScopeSpec().getRange();
- D.setInvalidType();
- } else if (isa<CXXRecordDecl>(CurContext) &&
- !D.getDeclSpec().isFriendSpecified()) {
- // The user provided a superfluous scope specifier inside a class
- // definition:
- //
- // class X {
- // void X::f();
- // };
- if (CurContext->Equals(DC))
- Diag(D.getIdentifierLoc(), diag::warn_member_extra_qualification)
- << Name << FixItHint::CreateRemoval(D.getCXXScopeSpec().getRange());
- else
- Diag(D.getIdentifierLoc(), diag::err_member_qualification)
- << Name << D.getCXXScopeSpec().getRange();
+ if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_member_def_undefined_record)
+ << Name << DC << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ } else if (!D.getDeclSpec().isFriendSpecified()) {
+ if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC,
+ Name, D.getIdentifierLoc())) {
+ if (DC->isRecord())
+ return 0;
- // Pretend that this qualifier was not here.
- D.getCXXScopeSpec().clear();
+ D.setInvalidType();
}
}
@@ -3289,21 +3463,16 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
} else { // Something like "int foo::x;"
LookupQualifiedName(Previous, DC);
- // Don't consider using declarations as previous declarations for
- // out-of-line members.
- RemoveUsingDecls(Previous);
-
- // C++ 7.3.1.2p2:
- // Members (including explicit specializations of templates) of a named
- // namespace can also be defined outside that namespace by explicit
- // qualification of the name being defined, provided that the entity being
- // defined was already declared in the namespace and the definition appears
- // after the point of declaration in a namespace that encloses the
- // declarations namespace.
+ // C++ [dcl.meaning]p1:
+ // When the declarator-id is qualified, the declaration shall refer to a
+ // previously declared member of the class or namespace to which the
+ // qualifier refers (or, in the case of a namespace, of an element of the
+ // inline namespace set of that namespace (7.3.1)) or to a specialization
+ // thereof; [...]
//
- // Note that we only check the context at this point. We don't yet
- // have enough information to make sure that PrevDecl is actually
- // the declaration we want to match. For example, given:
+ // Note that we already checked the context above, and that we do not have
+ // enough information to make sure that Previous contains the declaration
+ // we want to match. For example, given:
//
// class X {
// void f();
@@ -3312,40 +3481,23 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
//
// void X::f(int) { } // ill-formed
//
- // In this case, PrevDecl will point to the overload set
+ // In this case, Previous will point to the overload set
// containing the two f's declared in X, but neither of them
// matches.
-
- // First check whether we named the global scope.
- if (isa<TranslationUnitDecl>(DC)) {
- Diag(D.getIdentifierLoc(), diag::err_invalid_declarator_global_scope)
- << Name << D.getCXXScopeSpec().getRange();
- } else {
- DeclContext *Cur = CurContext;
- while (isa<LinkageSpecDecl>(Cur))
- Cur = Cur->getParent();
- if (!Cur->Encloses(DC)) {
- // The qualifying scope doesn't enclose the original declaration.
- // Emit diagnostic based on current scope.
- SourceLocation L = D.getIdentifierLoc();
- SourceRange R = D.getCXXScopeSpec().getRange();
- if (isa<FunctionDecl>(Cur))
- Diag(L, diag::err_invalid_declarator_in_function) << Name << R;
- else
- Diag(L, diag::err_invalid_declarator_scope)
- << Name << cast<NamedDecl>(DC) << R;
- D.setInvalidType();
- }
- }
+
+ // C++ [dcl.meaning]p1:
+ // [...] the member shall not merely have been introduced by a
+ // using-declaration in the scope of the class or namespace nominated by
+ // the nested-name-specifier of the declarator-id.
+ RemoveUsingDecls(Previous);
}
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
if (!D.isInvalidType())
- if (DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
- Previous.getFoundDecl()))
- D.setInvalidType();
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
+ Previous.getFoundDecl());
// Just pretend that we didn't see the previous declaration.
Previous.clear();
@@ -3434,14 +3586,12 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
if (VLATy->getElementType()->isVariablyModifiedType())
return QualType();
- Expr::EvalResult EvalResult;
+ llvm::APSInt Res;
if (!VLATy->getSizeExpr() ||
- !VLATy->getSizeExpr()->Evaluate(EvalResult, Context) ||
- !EvalResult.Val.isInt())
+ !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
return QualType();
// Check whether the array size is negative.
- llvm::APSInt &Res = EvalResult.Val.getInt();
if (Res.isSigned() && Res.isNegative()) {
SizeIsNegative = true;
return QualType();
@@ -3548,7 +3698,7 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
Previous.clear();
}
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
@@ -3606,7 +3756,8 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
else if (T->isVariableArrayType())
Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
else if (Oversized.getBoolValue())
- Diag(NewTD->getLocation(), diag::err_array_too_large) << Oversized.toString(10);
+ Diag(NewTD->getLocation(), diag::err_array_too_large)
+ << Oversized.toString(10);
else
Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
NewTD->setInvalidDecl();
@@ -3677,7 +3828,7 @@ isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
if (!PrevDecl->hasLinkage())
return false;
- if (Context.getLangOptions().CPlusPlus) {
+ if (Context.getLangOpts().CPlusPlus) {
// C++ [basic.link]p6:
// If there is a visible declaration of an entity with linkage
// having the same name and type, ignoring entities declared
@@ -3766,7 +3917,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
DeclarationName Name = GetNameForDeclarator(D).getName();
// Check that there are no default arguments (C++ only).
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
@@ -3808,7 +3959,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
- if (getLangOptions().OpenCL) {
+ if (getLangOpts().OpenCL) {
// Set up the special work-group-local storage class for variables in the
// OpenCL __local address space.
if (R.getAddressSpace() == LangAS::opencl_local)
@@ -3817,8 +3968,8 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool isExplicitSpecialization = false;
VarDecl *NewVD;
- if (!getLangOptions().CPlusPlus) {
- NewVD = VarDecl::Create(Context, DC, D.getSourceRange().getBegin(),
+ if (!getLangOpts().CPlusPlus) {
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II,
R, TInfo, SC, SCAsWritten);
@@ -3834,20 +3985,24 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
} else if (SC == SC_None)
SC = SC_Static;
}
- if (SC == SC_Static) {
+ if (SC == SC_Static && CurContext->isRecord()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
if (RD->isLocalClass())
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
<< Name << RD->getDeclName();
- // C++ [class.union]p1: If a union contains a static data member,
- // the program is ill-formed.
- //
- // We also disallow static data members in anonymous structs.
- if (CurContext->isRecord() && (RD->isUnion() || !RD->getDeclName()))
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
+ if (RD->isUnion())
+ Diag(D.getIdentifierLoc(),
+ getLangOpts().CPlusPlus0x
+ ? diag::warn_cxx98_compat_static_data_member_in_union
+ : diag::ext_static_data_member_in_union) << Name;
+ // We conservatively disallow static data members in anonymous structs.
+ else if (!RD->getDeclName())
Diag(D.getIdentifierLoc(),
- diag::err_static_data_member_not_allowed_in_union_or_anon_struct)
+ diag::err_static_data_member_not_allowed_in_anon_struct)
<< Name << RD->isUnion();
}
}
@@ -3858,7 +4013,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool Invalid = false;
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getSourceRange().getBegin(),
+ D.getDeclSpec().getLocStart(),
D.getIdentifierLoc(),
D.getCXXScopeSpec(),
TemplateParamLists.get(),
@@ -3884,7 +4039,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
- NewVD = VarDecl::Create(Context, DC, D.getSourceRange().getBegin(),
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II,
R, TInfo, SC, SCAsWritten);
@@ -3905,38 +4060,8 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TemplateParamLists.release());
}
- if (D.getDeclSpec().isConstexprSpecified()) {
- // FIXME: once we know whether there's an initializer, apply this to
- // static data members too.
- if (!NewVD->isStaticDataMember() &&
- !NewVD->isThisDeclarationADefinition()) {
- // 'constexpr' is redundant and ill-formed on a non-defining declaration
- // of a variable. Suggest replacing it with 'const' if appropriate.
- SourceLocation ConstexprLoc = D.getDeclSpec().getConstexprSpecLoc();
- SourceRange ConstexprRange(ConstexprLoc, ConstexprLoc);
- // If the declarator is complex, we need to move the keyword to the
- // innermost chunk as we switch it from 'constexpr' to 'const'.
- int Kind = DeclaratorChunk::Paren;
- for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
- Kind = D.getTypeObject(I).Kind;
- if (Kind != DeclaratorChunk::Paren)
- break;
- }
- if ((D.getDeclSpec().getTypeQualifiers() & DeclSpec::TQ_const) ||
- Kind == DeclaratorChunk::Reference)
- Diag(ConstexprLoc, diag::err_invalid_constexpr_var_decl)
- << FixItHint::CreateRemoval(ConstexprRange);
- else if (Kind == DeclaratorChunk::Paren)
- Diag(ConstexprLoc, diag::err_invalid_constexpr_var_decl)
- << FixItHint::CreateReplacement(ConstexprRange, "const");
- else
- Diag(ConstexprLoc, diag::err_invalid_constexpr_var_decl)
- << FixItHint::CreateRemoval(ConstexprRange)
- << FixItHint::CreateInsertion(D.getIdentifierLoc(), "const ");
- } else {
- NewVD->setConstexpr(true);
- }
- }
+ if (D.getDeclSpec().isConstexprSpecified())
+ NewVD->setConstexpr(true);
}
// Set the lexical context. If the declarator has a C++ scope specifier, the
@@ -3971,7 +4096,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// In auto-retain/release, infer strong retension for variables of
// retainable type.
- if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
// Handle GNU asm-label extension (encoded as an attribute).
@@ -3999,6 +4124,13 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
Context, Label));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ }
}
// Diagnose shadowed variables before filtering for scope.
@@ -4011,7 +4143,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
FilterLookupForScope(Previous, DC, S, NewVD->hasLinkage(),
isExplicitSpecialization);
- if (!getLangOptions().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
} else {
// Merge the decl with the existing one if appropriate.
@@ -4204,7 +4336,7 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>()) {
- if (getLangOptions().getGC() != LangOptions::NonGC)
+ if (getLangOpts().getGC() != LangOptions::NonGC)
Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
else
Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
@@ -4284,20 +4416,9 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
return false;
}
- // Function pointers and references cannot have qualified function type, only
- // function pointer-to-members can do that.
- QualType Pointee;
- unsigned PtrOrRef = 0;
- if (const PointerType *Ptr = T->getAs<PointerType>())
- Pointee = Ptr->getPointeeType();
- else if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
- Pointee = Ref->getPointeeType();
- PtrOrRef = 1;
- }
- if (!Pointee.isNull() && Pointee->isFunctionProtoType() &&
- Pointee->getAs<FunctionProtoType>()->getTypeQuals() != 0) {
- Diag(NewVD->getLocation(), diag::err_invalid_qualified_function_pointer)
- << PtrOrRef;
+ if (NewVD->isConstexpr() && !T->isDependentType() &&
+ RequireLiteralType(NewVD->getLocation(), T,
+ PDiag(diag::err_constexpr_var_non_literal))) {
NewVD->setInvalidDecl();
return false;
}
@@ -4387,6 +4508,33 @@ namespace {
};
}
+namespace {
+
+// Callback to only accept typo corrections that have a non-zero edit distance.
+// Also only accept corrections that have the same parent decl.
+class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ DifferentNameValidatorCCC(CXXRecordDecl *Parent)
+ : ExpectedParent(Parent ? Parent->getCanonicalDecl() : 0) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (candidate.getEditDistance() == 0)
+ return false;
+
+ if (CXXMethodDecl *MD = candidate.getCorrectionDeclAs<CXXMethodDecl>()) {
+ CXXRecordDecl *Parent = MD->getParent();
+ return Parent && Parent->getCanonicalDecl() == ExpectedParent;
+ }
+
+ return !ExpectedParent;
+ }
+
+ private:
+ CXXRecordDecl *ExpectedParent;
+};
+
+}
+
/// \brief Generate diagnostics for an invalid function redeclaration.
///
/// This routine handles generating the diagnostic messages for an invalid
@@ -4407,7 +4555,7 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
llvm::SmallVector<unsigned, 1> MismatchedParams;
llvm::SmallVector<std::pair<FunctionDecl*, unsigned>, 1> NearMatches;
TypoCorrection Correction;
- bool isFriendDecl = (SemaRef.getLangOptions().CPlusPlus &&
+ bool isFriendDecl = (SemaRef.getLangOpts().CPlusPlus &&
ExtraArgs.D.getDeclSpec().isFriendSpecified());
unsigned DiagMsg = isFriendDecl ? diag::err_no_matching_local_friend
: diag::err_member_def_does_not_match;
@@ -4416,6 +4564,8 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
SemaRef.LookupQualifiedName(Prev, NewDC);
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ DifferentNameValidatorCCC Validator(MD ? MD->getParent() : 0);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
@@ -4431,8 +4581,8 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
}
// If the qualified name lookup yielded nothing, try typo correction
} else if ((Correction = SemaRef.CorrectTypo(Prev.getLookupNameInfo(),
- Prev.getLookupKind(), 0, 0, NewDC)) &&
- Correction.getCorrection() != Name) {
+ Prev.getLookupKind(), 0, 0,
+ Validator, NewDC))) {
// Trap errors.
Sema::SFINAETrap Trap(SemaRef);
@@ -4454,12 +4604,11 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
// TODO: Refactor ActOnFunctionDeclarator so that we can call only the
// pieces need to verify the typo-corrected C++ declaraction and hopefully
// eliminate the need for the parameter pack ExtraArgs.
- Result = SemaRef.ActOnFunctionDeclarator(ExtraArgs.S, ExtraArgs.D,
- NewFD->getDeclContext(),
- NewFD->getTypeSourceInfo(),
- Previous,
- ExtraArgs.TemplateParamLists,
- ExtraArgs.AddToScope);
+ Result = SemaRef.ActOnFunctionDeclarator(
+ ExtraArgs.S, ExtraArgs.D,
+ Correction.getCorrectionDecl()->getDeclContext(),
+ NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists,
+ ExtraArgs.AddToScope);
if (Trap.hasErrorOccurred()) {
// Pretend the typo correction never occurred
ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(),
@@ -4487,10 +4636,10 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
if (Correction)
SemaRef.Diag(NewFD->getLocation(), DiagMsg)
- << Name << NewDC << Correction.getQuoted(SemaRef.getLangOptions())
+ << Name << NewDC << Correction.getQuoted(SemaRef.getLangOpts())
<< FixItHint::CreateReplacement(
NewFD->getLocation(),
- Correction.getAsString(SemaRef.getLangOptions()));
+ Correction.getAsString(SemaRef.getLangOpts()));
else
SemaRef.Diag(NewFD->getLocation(), DiagMsg)
<< Name << NewDC << NewFD->getLocation();
@@ -4509,12 +4658,13 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
if (unsigned Idx = NearMatch->second) {
ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
- SemaRef.Diag(FDParam->getTypeSpecStartLoc(),
- diag::note_member_def_close_param_match)
+ SourceLocation Loc = FDParam->getTypeSpecStartLoc();
+ if (Loc.isInvalid()) Loc = FD->getLocation();
+ SemaRef.Diag(Loc, diag::note_member_def_close_param_match)
<< Idx << FDParam->getType() << NewFD->getParamDecl(Idx-1)->getType();
} else if (Correction) {
SemaRef.Diag(FD->getLocation(), diag::note_previous_decl)
- << Correction.getQuoted(SemaRef.getLangOptions());
+ << Correction.getQuoted(SemaRef.getLangOpts());
} else if (FDisConst != NewFDisConst) {
SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
<< NewFDisConst << FD->getSourceRange().getEnd();
@@ -4524,7 +4674,8 @@ static NamedDecl* DiagnoseInvalidRedeclaration(
return Result;
}
-static FunctionDecl::StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) {
+static FunctionDecl::StorageClass getFunctionStorageClass(Sema &SemaRef,
+ Declarator &D) {
switch (D.getDeclSpec().getStorageClassSpec()) {
default: llvm_unreachable("Unknown storage class!");
case DeclSpec::SCS_auto:
@@ -4570,7 +4721,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
FunctionDecl::StorageClass SCAsWritten
= StorageClassSpecToFunctionDeclStorageClass(SCSpec);
- if (!SemaRef.getLangOptions().CPlusPlus) {
+ if (!SemaRef.getLangOpts().CPlusPlus) {
// Determine whether the function was written with a
// prototype. This true when:
// - there is a prototype in the declarator, or
@@ -4580,8 +4731,9 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
(D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
(!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
- NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getSourceRange().getBegin(),
- NameInfo, R, TInfo, SC, SCAsWritten, isInline,
+ NewFD = FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(), NameInfo, R,
+ TInfo, SC, SCAsWritten, isInline,
HasPrototype);
if (D.isInvalidType())
NewFD->setInvalidDecl();
@@ -4612,7 +4764,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
- D.getSourceRange().getBegin(), NameInfo,
+ D.getLocStart(), NameInfo,
R, TInfo, isExplicit, isInline,
/*isImplicitlyDeclared=*/false,
isConstexpr);
@@ -4624,14 +4776,14 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
SemaRef.Context, Record,
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
NameInfo, R, TInfo, isInline,
/*isImplicitlyDeclared=*/false);
// If the class is complete, then we now create the implicit exception
// specification. If the class is incomplete or dependent, we can't do
// it yet.
- if (SemaRef.getLangOptions().CPlusPlus0x && !Record->isDependentType() &&
+ if (SemaRef.getLangOpts().CPlusPlus0x && !Record->isDependentType() &&
Record->getDefinition() && !Record->isBeingDefined() &&
R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) {
SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
@@ -4647,7 +4799,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// Create a FunctionDecl to satisfy the function definition parsing
// code path.
return FunctionDecl::Create(SemaRef.Context, DC,
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
D.getIdentifierLoc(), Name, R, TInfo,
SC, SCAsWritten, isInline,
/*hasPrototype=*/true, isConstexpr);
@@ -4663,7 +4815,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
SemaRef.CheckConversionDeclarator(D, R, SC);
IsVirtualOkay = true;
return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
- D.getSourceRange().getBegin(), NameInfo,
+ D.getLocStart(), NameInfo,
R, TInfo, isInline, isExplicit,
isConstexpr, SourceLocation());
@@ -4699,7 +4851,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// This is a C++ method declaration.
return CXXMethodDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
- D.getSourceRange().getBegin(), NameInfo, R,
+ D.getLocStart(), NameInfo, R,
TInfo, isStatic, SCAsWritten, isInline,
isConstexpr, SourceLocation());
@@ -4708,7 +4860,7 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
return FunctionDecl::Create(SemaRef.Context, DC,
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
NameInfo, R, TInfo, SC, SCAsWritten, isInline,
true/*HasPrototype*/, isConstexpr);
}
@@ -4760,7 +4912,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
isVirtualOkay);
if (!NewFD) return 0;
- if (getLangOptions().CPlusPlus) {
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer())
+ NewFD->setTopLevelDeclInObjCContainer();
+
+ if (getLangOpts().CPlusPlus) {
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool isExplicit = D.getDeclSpec().isExplicitSpecified();
@@ -4783,13 +4938,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// scope specifier, or is the object of a friend declaration, the
// lexical context will be different from the semantic context.
NewFD->setLexicalDeclContext(CurContext);
-
+
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool Invalid = false;
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getSourceRange().getBegin(),
+ D.getDeclSpec().getLocStart(),
D.getIdentifierLoc(),
D.getCXXScopeSpec(),
TemplateParamLists.get(),
@@ -4811,7 +4966,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// If we're adding a template to a dependent context, we may need to
- // rebuilding some of the types used within the template parameter list,
+ // rebuilding some of the types used within the template parameter list,
// now that we know what the current instantiation is.
if (DC->isDependentContext()) {
ContextRAII SavedContext(*this, DC);
@@ -4880,6 +5035,56 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
FunctionTemplate->setInvalidDecl();
}
+ // If we see "T var();" at block scope, where T is a class type, it is
+ // probably an attempt to initialize a variable, not a function declaration.
+ // We don't catch this case earlier, since there is no ambiguity here.
+ if (!FunctionTemplate && D.getFunctionDefinitionKind() == FDK_Declaration &&
+ CurContext->isFunctionOrMethod() &&
+ D.getNumTypeObjects() == 1 && D.isFunctionDeclarator() &&
+ D.getDeclSpec().getStorageClassSpecAsWritten()
+ == DeclSpec::SCS_unspecified) {
+ QualType T = R->getAs<FunctionType>()->getResultType();
+ DeclaratorChunk &C = D.getTypeObject(0);
+ if (!T->isVoidType() && C.Fun.NumArgs == 0 && !C.Fun.isVariadic &&
+ !C.Fun.TrailingReturnType &&
+ C.Fun.getExceptionSpecType() == EST_None) {
+ SourceRange ParenRange(C.Loc, C.EndLoc);
+ Diag(C.Loc, diag::warn_empty_parens_are_function_decl) << ParenRange;
+
+ // If the declaration looks like:
+ // T var1,
+ // f();
+ // and name lookup finds a function named 'f', then the ',' was
+ // probably intended to be a ';'.
+ if (!D.isFirstDeclarator() && D.getIdentifier()) {
+ FullSourceLoc Comma(D.getCommaLoc(), SourceMgr);
+ FullSourceLoc Name(D.getIdentifierLoc(), SourceMgr);
+ if (Comma.getFileID() != Name.getFileID() ||
+ Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
+ LookupResult Result(*this, D.getIdentifier(), SourceLocation(),
+ LookupOrdinaryName);
+ if (LookupName(Result, S))
+ Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
+ << FixItHint::CreateReplacement(D.getCommaLoc(), ";") << NewFD;
+ }
+ }
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ // Empty parens mean value-initialization, and no parens mean default
+ // initialization. These are equivalent if the default constructor is
+ // user-provided, or if zero-initialization is a no-op.
+ if (RD && RD->hasDefinition() &&
+ (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
+ Diag(C.Loc, diag::note_empty_parens_default_ctor)
+ << FixItHint::CreateRemoval(ParenRange);
+ else if (const char *Init = getFixItZeroInitializerForType(T))
+ Diag(C.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, Init);
+ else if (LangOpts.CPlusPlus0x)
+ Diag(C.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, "{}");
+ }
+ }
+
// C++ [dcl.fct.spec]p5:
// The virtual specifier shall only be used in declarations of
// nonstatic class member functions that appear within a
@@ -4907,7 +5112,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// C++ [dcl.fct.spec]p3:
- // The inline specifier shall not appear on a block scope function declaration.
+ // The inline specifier shall not appear on a block scope function
+ // declaration.
if (isInline && !NewFD->isInvalidDecl()) {
if (CurContext->isFunctionOrMethod()) {
// 'inline' is not allowed on block scope function declaration.
@@ -4919,8 +5125,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// C++ [dcl.fct.spec]p6:
// The explicit specifier shall be used only in the declaration of a
- // constructor or conversion function within its class definition; see 12.3.1
- // and 12.3.2.
+ // constructor or conversion function within its class definition;
+ // see 12.3.1 and 12.3.2.
if (isExplicit && !NewFD->isInvalidDecl()) {
if (!CurContext->isRecord()) {
// 'explicit' was specified outside of the class.
@@ -4974,10 +5180,26 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setAccess(AS_public);
}
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ switch (D.getFunctionDefinitionKind()) {
+ case FDK_Declaration:
+ case FDK_Definition:
+ break;
+
+ case FDK_Defaulted:
+ NewFD->setDefaulted();
+ break;
+
+ case FDK_Deleted:
+ NewFD->setDeletedAsWritten();
+ break;
+ }
+
if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
D.isFunctionDefinition()) {
- // A method is implicitly inline if it's defined in its class
- // definition.
+ // C++ [class.mfct]p2:
+ // A member function may be defined (8.4) in its class definition, in
+ // which case it is an inline member function (7.1.2)
NewFD->setImplicitlyInline();
}
@@ -5007,6 +5229,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
StringLiteral *SE = cast<StringLiteral>(E);
NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
SE->getString()));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ NewFD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ }
}
// Copy the parameter declarations from the declarator D to the function
@@ -5028,7 +5257,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// In C++, the empty parameter-type-list must be spelled "void"; a
// typedef of void is not permitted.
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
Param->getType().getUnqualifiedType() != Context.VoidTy) {
bool IsTypeAlias = false;
if (const TypedefType *TT = Param->getType()->getAs<TypedefType>())
@@ -5077,25 +5306,35 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Finally, we know we have the right number of parameters, install them.
NewFD->setParams(Params);
+ // Find all anonymous symbols defined during the declaration of this function
+ // and add to NewFD. This lets us track decls such 'enum Y' in:
+ //
+ // void f(enum Y {AA} x) {}
+ //
+ // which would otherwise incorrectly end up in the translation unit scope.
+ NewFD->setDeclsInPrototypeScope(DeclsInPrototypeScope);
+ DeclsInPrototypeScope.clear();
+
// Process the non-inheritable attributes on this declaration.
ProcessDeclAttributes(S, NewFD, D,
/*NonInheritable=*/true, /*Inheritable=*/false);
- if (!getLangOptions().CPlusPlus) {
+ // Functions returning a variably modified type violate C99 6.7.5.2p2
+ // because all functions have linkage.
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getResultType()->isVariablyModifiedType()) {
+ Diag(NewFD->getLocation(), diag::err_vm_func_decl);
+ NewFD->setInvalidDecl();
+ }
+
+ if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
bool isExplicitSpecialization=false;
if (!NewFD->isInvalidDecl()) {
- if (NewFD->getResultType()->isVariablyModifiedType()) {
- // Functions returning a variably modified type violate C99 6.7.5.2p2
- // because all functions have linkage.
- Diag(NewFD->getLocation(), diag::err_vm_func_decl);
- NewFD->setInvalidDecl();
- } else {
- if (NewFD->isMain())
- CheckMain(NewFD, D.getDeclSpec());
- D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
- isExplicitSpecialization));
- }
+ if (NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isExplicitSpecialization));
}
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
@@ -5135,8 +5374,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getIdentifierLoc(), diag::err_template_spec_needs_header)
<< SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)
<< FixItHint::CreateInsertion(
- D.getDeclSpec().getSourceRange().getBegin(),
- "template<> ");
+ D.getDeclSpec().getLocStart(),
+ "template<> ");
isFunctionTemplateSpecialization = true;
} else {
// "friend void foo<>(int);" is an implicit specialization decl.
@@ -5173,7 +5412,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (CurContext->isDependentContext() && CurContext->isRecord()
&& !isFriend) {
isDependentClassScopeExplicitSpecialization = true;
- Diag(NewFD->getLocation(), getLangOptions().MicrosoftExt ?
+ Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
diag::ext_function_specialization_in_class :
diag::err_function_specialization_in_class)
<< NewFD->getDeclName();
@@ -5224,10 +5463,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
- if (NewFD->isConstexpr() && !NewFD->isInvalidDecl() &&
- !CheckConstexprFunctionDecl(NewFD, CCK_Declaration))
- NewFD->setInvalidDecl();
-
NamedDecl *PrincipalDecl = (FunctionTemplate
? cast<NamedDecl>(FunctionTemplate)
: NewFD);
@@ -5235,7 +5470,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (isFriend && D.isRedeclaration()) {
AccessSpecifier Access = AS_public;
if (!NewFD->isInvalidDecl())
- Access = NewFD->getPreviousDeclaration()->getAccess();
+ Access = NewFD->getPreviousDecl()->getAccess();
NewFD->setAccess(Access);
if (FunctionTemplate) FunctionTemplate->setAccess(Access);
@@ -5250,9 +5485,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// If we have a function template, check the template parameter
// list. This will check and merge default template arguments.
if (FunctionTemplate) {
- FunctionTemplateDecl *PrevTemplate = FunctionTemplate->getPreviousDeclaration();
+ FunctionTemplateDecl *PrevTemplate =
+ FunctionTemplate->getPreviousDecl();
CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
- PrevTemplate? PrevTemplate->getTemplateParameters() : 0,
+ PrevTemplate ? PrevTemplate->getTemplateParameters() : 0,
D.getDeclSpec().isFriendSpecified()
? (D.isFunctionDefinition()
? TPC_FriendFunctionTemplateDefinition
@@ -5331,8 +5567,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// definition (C++ [dcl.meaning]p1).
// Note that this is not the case for explicit specializations of
// function templates or member functions of class templates, per
- // C++ [temp.expl.spec]p2. We also allow these declarations as an extension
- // for compatibility with old SWIG code which likes to generate them.
+ // C++ [temp.expl.spec]p2. We also allow these declarations as an
+ // extension for compatibility with old SWIG code which likes to
+ // generate them.
Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
<< D.getCXXScopeSpec().getRange();
}
@@ -5393,7 +5630,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Set this FunctionDecl's range up to the right paren.
NewFD->setRangeEnd(D.getSourceRange().getEnd());
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (FunctionTemplate) {
if (NewFD->isInvalidDecl())
FunctionTemplate->setInvalidDecl();
@@ -5403,7 +5640,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
MarkUnusedFileScopedDecl(NewFD);
- if (getLangOptions().CUDA)
+ if (getLangOpts().CUDA)
if (IdentifierInfo *II = NewFD->getIdentifier())
if (!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
@@ -5493,7 +5730,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
break;
}
- if (!getLangOptions().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
+ if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
// If a function name is overloadable in C, then every function
// with that name must be marked "overloadable".
Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
@@ -5514,7 +5751,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (Redeclaration) {
// NewFD and OldDecl represent declarations that need to be
// merged.
- if (MergeFunctionDecl(NewFD, OldDecl)) {
+ if (MergeFunctionDecl(NewFD, OldDecl, S)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
@@ -5524,7 +5761,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (FunctionTemplateDecl *OldTemplateDecl
= dyn_cast<FunctionTemplateDecl>(OldDecl)) {
- NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl());
+ NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl());
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
@@ -5542,9 +5779,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
assert(OldTemplateDecl->isMemberSpecialization());
}
- if (OldTemplateDecl->isModulePrivate())
- NewTemplateDecl->setModulePrivate();
-
} else {
if (isa<CXXMethodDecl>(NewFD)) // Set access for out-of-line definitions
NewFD->setAccess(OldDecl->getAccess());
@@ -5554,7 +5788,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
// Semantic checking for this function declaration (in isolation).
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// C++-specific checks.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
CheckConstructor(Constructor);
@@ -5633,29 +5867,59 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
Context.BuiltinInfo.ForgetBuiltin(BuiltinID, Context.Idents);
}
}
+
+ // If this function is declared as being extern "C", then check to see if
+ // the function returns a UDT (class, struct, or union type) that is not C
+ // compatible, and if it does, warn the user.
+ if (NewFD->isExternC()) {
+ QualType R = NewFD->getResultType();
+ if (!R.isPODType(Context) &&
+ !R->isVoidType())
+ Diag( NewFD->getLocation(), diag::warn_return_value_udt )
+ << NewFD << R;
+ }
}
return Redeclaration;
}
void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
- // C++ [basic.start.main]p3: A program that declares main to be inline
- // or static is ill-formed.
+ // C++11 [basic.start.main]p3: A program that declares main to be inline,
+ // static or constexpr is ill-formed.
// C99 6.7.4p4: In a hosted environment, the inline function specifier
// shall not appear in a declaration of main.
// static main is not an error under C99, but we should warn about it.
if (FD->getStorageClass() == SC_Static)
- Diag(DS.getStorageClassSpecLoc(), getLangOptions().CPlusPlus
+ Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus
? diag::err_static_main : diag::warn_static_main)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
if (FD->isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_main)
<< FixItHint::CreateRemoval(DS.getInlineSpecLoc());
+ if (FD->isConstexpr()) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
+ << FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
+ FD->setConstexpr(false);
+ }
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
- const FunctionType* FT = T->getAs<FunctionType>();
-
- if (!Context.hasSameUnqualifiedType(FT->getResultType(), Context.IntTy)) {
+ const FunctionType* FT = T->castAs<FunctionType>();
+
+ // All the standards say that main() should should return 'int'.
+ if (Context.hasSameUnqualifiedType(FT->getResultType(), Context.IntTy)) {
+ // In C and C++, main magically returns 0 if you fall off the end;
+ // set the flag which tells us that.
+ // This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
+ FD->setHasImplicitReturnZero(true);
+
+ // In C with GNU extensions we allow main() to have non-integer return
+ // type, but we should warn about the extension, and we disable the
+ // implicit-return-zero rule.
+ } else if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) {
+ Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint);
+
+ // Otherwise, this is just a flat-out error.
+ } else {
Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint);
FD->setInvalidDecl(true);
}
@@ -5786,7 +6050,8 @@ namespace {
void VisitMemberExpr(MemberExpr *E) {
if (E->getType()->canDecayToPointerType()) return;
- if (isa<FieldDecl>(E->getMemberDecl()))
+ ValueDecl *VD = E->getMemberDecl();
+ if (isa<FieldDecl>(VD) || isa<CXXMethodDecl>(VD))
if (DeclRefExpr *DRE
= dyn_cast<DeclRefExpr>(E->getBase()->IgnoreParenImpCasts())) {
HandleDeclRefExpr(DRE);
@@ -5844,17 +6109,6 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
if (RealDecl == 0 || RealDecl->isInvalidDecl())
return;
- // Check for self-references within variable initializers.
- if (VarDecl *vd = dyn_cast<VarDecl>(RealDecl)) {
- // Variables declared within a function/method body are handled
- // by a dataflow analysis.
- if (!vd->hasLocalStorage() && !vd->isStaticLocal())
- CheckSelfReference(RealDecl, Init);
- }
- else {
- CheckSelfReference(RealDecl, Init);
- }
-
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
// With declarators parsed the way they are, the parser cannot
// distinguish between a normal initializer and a pure-specifier.
@@ -5879,48 +6133,88 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
return;
}
- // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ // Check for self-references within variable initializers.
+ // Variables declared within a function/method body are handled
+ // by a dataflow analysis.
+ if (!VDecl->hasLocalStorage() && !VDecl->isStaticLocal())
+ CheckSelfReference(RealDecl, Init);
+
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+
+ // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
+ Expr *DeduceInit = Init;
+ // Initializer could be a C++ direct-initializer. Deduction only works if it
+ // contains exactly one expression.
+ if (CXXDirectInit) {
+ if (CXXDirectInit->getNumExprs() == 0) {
+ // It isn't possible to write this directly, but it is possible to
+ // end up in this situation with "auto x(some_pack...);"
+ Diag(CXXDirectInit->getLocStart(),
+ diag::err_auto_var_init_no_expression)
+ << VDecl->getDeclName() << VDecl->getType()
+ << VDecl->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ } else if (CXXDirectInit->getNumExprs() > 1) {
+ Diag(CXXDirectInit->getExpr(1)->getLocStart(),
+ diag::err_auto_var_init_multiple_expressions)
+ << VDecl->getDeclName() << VDecl->getType()
+ << VDecl->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ } else {
+ DeduceInit = CXXDirectInit->getExpr(0);
+ }
+ }
TypeSourceInfo *DeducedType = 0;
- if (!DeduceAutoType(VDecl->getTypeSourceInfo(), Init, DeducedType))
- Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
- << VDecl->getDeclName() << VDecl->getType() << Init->getType()
- << Init->getSourceRange();
+ if (DeduceAutoType(VDecl->getTypeSourceInfo(), DeduceInit, DeducedType) ==
+ DAR_Failed)
+ DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
if (!DeducedType) {
RealDecl->setInvalidDecl();
return;
}
VDecl->setTypeSourceInfo(DeducedType);
VDecl->setType(DeducedType->getType());
-
+ VDecl->ClearLinkageCache();
+
// In ARC, infer lifetime.
- if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
- if (VarDecl *Old = VDecl->getPreviousDeclaration())
+ if (VarDecl *Old = VDecl->getPreviousDecl())
MergeVarDeclTypes(VDecl, Old);
}
-
- // A definition must end up with a complete type, which means it must be
- // complete with the restriction that an array type might be completed by the
- // initializer; note that later code assumes this restriction.
- QualType BaseDeclType = VDecl->getType();
- if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
- BaseDeclType = Array->getElementType();
- if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
- diag::err_typecheck_decl_incomplete_type)) {
- RealDecl->setInvalidDecl();
+ if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
+ // C99 6.7.8p5. C++ has no such restriction, but that is a defect.
+ Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
+ VDecl->setInvalidDecl();
return;
}
- // The variable can not have an abstract class type.
- if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
- diag::err_abstract_type_in_decl,
- AbstractVariableType))
- VDecl->setInvalidDecl();
+ if (!VDecl->getType()->isDependentType()) {
+ // A definition must end up with a complete type, which means it must be
+ // complete with the restriction that an array type might be completed by
+ // the initializer; note that later code assumes this restriction.
+ QualType BaseDeclType = VDecl->getType();
+ if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
+ BaseDeclType = Array->getElementType();
+ if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
+ diag::err_typecheck_decl_incomplete_type)) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ VDecl->setInvalidDecl();
+ }
const VarDecl *Def;
if ((Def = VDecl->getDefinition()) && Def != VDecl) {
@@ -5932,7 +6226,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
}
const VarDecl* PrevInit = 0;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// C++ [class.static.data]p4
// If a static data member is of const integral or const
// enumeration type, its declaration in the class definition can
@@ -5946,7 +6240,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// data members we also need to check whether there was an in-class
// declaration with an initializer.
if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
- Diag(VDecl->getLocation(), diag::err_redefinition) << VDecl->getDeclName();
+ Diag(VDecl->getLocation(), diag::err_redefinition)
+ << VDecl->getDeclName();
Diag(PrevInit->getLocation(), diag::note_previous_definition);
return;
}
@@ -5968,44 +6263,77 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
return;
}
- // Capture the variable that is being initialized and the style of
- // initialization.
- InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
-
- // FIXME: Poor source location information.
- InitializationKind Kind
- = DirectInit? InitializationKind::CreateDirect(VDecl->getLocation(),
- Init->getLocStart(),
- Init->getLocEnd())
- : InitializationKind::CreateCopy(VDecl->getLocation(),
- Init->getLocStart());
-
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
- if (VDecl->isLocalVarDecl()) {
- if (VDecl->hasExternalStorage()) { // C99 6.7.8p5
- Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
- VDecl->setInvalidDecl();
- } else if (!VDecl->isInvalidDecl()) {
- InitializationSequence InitSeq(*this, Entity, Kind, &Init, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &Init, 1),
- &DclT);
+
+ // Top-level message sends default to 'id' when we're in a debugger
+ // and we are assigning it to a variable of 'id' type.
+ if (getLangOpts().DebuggerCastResultToId && DclT->isObjCIdType())
+ if (Init->getType() == Context.UnknownAnyTy && isa<ObjCMessageExpr>(Init)) {
+ ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
+ Init = Result.take();
+ }
- Init = Result.takeAs<Expr>();
-
- // C++ 3.6.2p2, allow dynamic initialization of static initializers.
- // Don't check invalid declarations to avoid emitting useless diagnostics.
- if (!getLangOptions().CPlusPlus && !VDecl->isInvalidDecl()) {
- if (VDecl->getStorageClass() == SC_Static) // C99 6.7.8p4.
- CheckForConstantInitializer(Init, DclT);
- }
+ // Perform the initialization.
+ if (!VDecl->isInvalidDecl()) {
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
+ InitializationKind Kind
+ = DirectInit ?
+ CXXDirectInit ? InitializationKind::CreateDirect(VDecl->getLocation(),
+ Init->getLocStart(),
+ Init->getLocEnd())
+ : InitializationKind::CreateDirectList(
+ VDecl->getLocation())
+ : InitializationKind::CreateCopy(VDecl->getLocation(),
+ Init->getLocStart());
+
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (CXXDirectInit) {
+ Args = CXXDirectInit->getExprs();
+ NumArgs = CXXDirectInit->getNumExprs();
+ }
+ InitializationSequence InitSeq(*this, Entity, Kind, Args, NumArgs);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, Args,NumArgs),
+ &DclT);
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
}
+
+ Init = Result.takeAs<Expr>();
+ }
+
+ // If the type changed, it means we had an incomplete type that was
+ // completed by the initializer. For example:
+ // int ary[] = { 1, 3, 5 };
+ // "ary" transitions from an IncompleteArrayType to a ConstantArrayType.
+ if (!VDecl->isInvalidDecl() && (DclT != SavT))
+ VDecl->setType(DclT);
+
+ // Check any implicit conversions within the expression.
+ CheckImplicitConversions(Init, VDecl->getLocation());
+
+ if (!VDecl->isInvalidDecl())
+ checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
+
+ Init = MaybeCreateExprWithCleanups(Init);
+ // Attach the initializer to the decl.
+ VDecl->setInit(Init);
+
+ if (VDecl->isLocalVarDecl()) {
+ // C99 6.7.8p4: All the expressions in an initializer for an object that has
+ // static storage duration shall be constant expressions or string literals.
+ // C++ does not have this restriction.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl() &&
+ VDecl->getStorageClass() == SC_Static)
+ CheckForConstantInitializer(Init, DclT);
} else if (VDecl->isStaticDataMember() &&
VDecl->getLexicalDeclContext()->isRecord()) {
// This is an in-class initialization for a static data member, e.g.,
@@ -6014,26 +6342,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// static const int value = 17;
// };
- // Try to perform the initialization regardless.
- if (!VDecl->isInvalidDecl()) {
- InitializationSequence InitSeq(*this, Entity, Kind, &Init, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &Init, 1),
- &DclT);
- if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
- return;
- }
-
- Init = Result.takeAs<Expr>();
- }
-
// C++ [class.mem]p4:
// A member-declarator can contain a constant-initializer only
// if it declares a static member (9.4) of const integral or
// const enumeration type, see 9.4.2.
//
- // C++0x [class.static.data]p3:
+ // C++11 [class.static.data]p3:
// If a non-volatile const static data member is of integral or
// enumeration type, its declaration in the class definition can
// specify a brace-or-equal-initializer in which every initalizer-clause
@@ -6042,28 +6356,27 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// with the constexpr specifier; if so, its declaration shall specify a
// brace-or-equal-initializer in which every initializer-clause that is
// an assignment-expression is a constant expression.
- QualType T = VDecl->getType();
// Do nothing on dependent types.
- if (T->isDependentType()) {
+ if (DclT->isDependentType()) {
// Allow any 'static constexpr' members, whether or not they are of literal
- // type. We separately check that the initializer is a constant expression,
- // which implicitly requires the member to be of literal type.
+ // type. We separately check that every constexpr variable is of literal
+ // type.
} else if (VDecl->isConstexpr()) {
// Require constness.
- } else if (!T.isConstQualified()) {
+ } else if (!DclT.isConstQualified()) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
// We allow integer constant expressions in all cases.
- } else if (T->isIntegralOrEnumerationType()) {
+ } else if (DclT->isIntegralOrEnumerationType()) {
// Check whether the expression is a constant expression.
SourceLocation Loc;
- if (getLangOptions().CPlusPlus0x && T.isVolatileQualified())
- // In C++0x, a non-constexpr const static data member with an
+ if (getLangOpts().CPlusPlus0x && DclT.isVolatileQualified())
+ // In C++11, a non-constexpr const static data member with an
// in-class initializer cannot be volatile.
Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile);
else if (Init->isValueDependent())
@@ -6083,88 +6396,65 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
VDecl->setInvalidDecl();
}
- // We allow floating-point constants as an extension.
- } else if (T->isFloatingType()) { // also permits complex, which is ok
+ // We allow foldable floating-point constants as an extension.
+ } else if (DclT->isFloatingType()) { // also permits complex, which is ok
Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
- << T << Init->getSourceRange();
- if (getLangOptions().CPlusPlus0x)
+ << DclT << Init->getSourceRange();
+ if (getLangOpts().CPlusPlus0x)
Diag(VDecl->getLocation(),
diag::note_in_class_initializer_float_type_constexpr)
<< FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
- if (!Init->isValueDependent() &&
- !Init->isConstantInitializer(Context, false)) {
+ if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) {
Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
}
- // Suggest adding 'constexpr' in C++0x for literal types.
- } else if (getLangOptions().CPlusPlus0x && T->isLiteralType()) {
+ // Suggest adding 'constexpr' in C++11 for literal types.
+ } else if (getLangOpts().CPlusPlus0x && DclT->isLiteralType()) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
- << T << Init->getSourceRange()
+ << DclT << Init->getSourceRange()
<< FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
VDecl->setConstexpr(true);
} else {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
- << T << Init->getSourceRange();
+ << DclT << Init->getSourceRange();
VDecl->setInvalidDecl();
}
} else if (VDecl->isFileVarDecl()) {
- if (VDecl->getStorageClassAsWritten() == SC_Extern &&
- (!getLangOptions().CPlusPlus ||
+ if (VDecl->getStorageClassAsWritten() == SC_Extern &&
+ (!getLangOpts().CPlusPlus ||
!Context.getBaseElementType(VDecl->getType()).isConstQualified()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
- if (!VDecl->isInvalidDecl()) {
- InitializationSequence InitSeq(*this, Entity, Kind, &Init, 1);
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(*this, &Init, 1),
- &DclT);
- if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
- return;
- }
- Init = Result.takeAs<Expr>();
- }
-
- // C++ 3.6.2p2, allow dynamic initialization of static initializers.
- // Don't check invalid declarations to avoid emitting useless diagnostics.
- if (!getLangOptions().CPlusPlus && !VDecl->isInvalidDecl()) {
- // C99 6.7.8p4. All file scoped initializers need to be constant.
+ // C99 6.7.8p4. All file scoped initializers need to be constant.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
CheckForConstantInitializer(Init, DclT);
- }
- }
- // If the type changed, it means we had an incomplete type that was
- // completed by the initializer. For example:
- // int ary[] = { 1, 3, 5 };
- // "ary" transitions from a VariableArrayType to a ConstantArrayType.
- if (!VDecl->isInvalidDecl() && (DclT != SavT)) {
- VDecl->setType(DclT);
- Init->setType(DclT);
}
-
- // Check any implicit conversions within the expression.
- CheckImplicitConversions(Init, VDecl->getLocation());
-
- if (!VDecl->isInvalidDecl())
- checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
- if (VDecl->isConstexpr() && !VDecl->isInvalidDecl() &&
- !VDecl->getType()->isDependentType() &&
- !Init->isTypeDependent() && !Init->isValueDependent() &&
- !Init->isConstantInitializer(Context,
- VDecl->getType()->isReferenceType())) {
- // FIXME: Improve this diagnostic to explain why the initializer is not
- // a constant expression.
- Diag(VDecl->getLocation(), diag::err_constexpr_var_requires_const_init)
- << VDecl << Init->getSourceRange();
+ // We will represent direct-initialization similarly to copy-initialization:
+ // int x(1); -as-> int x = 1;
+ // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
+ //
+ // Clients that want to distinguish between the two forms, can check for
+ // direct initializer using VarDecl::getInitStyle().
+ // A major benefit is that clients that don't particularly care about which
+ // exactly form was it (like the CodeGen) can handle both cases without
+ // special case code.
+
+ // C++ 8.5p11:
+ // The form of initialization (using parentheses or '=') is generally
+ // insignificant, but does matter when the entity being initialized has a
+ // class type.
+ if (CXXDirectInit) {
+ assert(DirectInit && "Call-style initializer must be direct init.");
+ VDecl->setInitStyle(VarDecl::CallInit);
+ } else if (DirectInit) {
+ // This must be list-initialization. No other way is direct-initialization.
+ VDecl->setInitStyle(VarDecl::ListInit);
}
-
- Init = MaybeCreateExprWithCleanups(Init);
- // Attach the initializer to the decl.
- VDecl->setInit(Init);
CheckCompleteVariableDeclaration(VDecl);
}
@@ -6218,7 +6508,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
QualType Type = Var->getType();
- // C++0x [dcl.spec.auto]p3
+ // C++11 [dcl.spec.auto]p3
if (TypeMayContainAuto && Type->getContainedAutoType()) {
Diag(Var->getLocation(), diag::err_auto_var_requires_init)
<< Var->getDeclName() << Type;
@@ -6226,17 +6516,19 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
return;
}
- // C++0x [dcl.constexpr]p9: An object or reference declared constexpr must
- // have an initializer.
- // C++0x [class.static.data]p3: A static data member can be declared with
+ // C++11 [class.static.data]p3: A static data member can be declared with
// the constexpr specifier; if so, its declaration shall specify
// a brace-or-equal-initializer.
- //
- // A static data member's definition may inherit an initializer from an
- // in-class declaration.
- if (Var->isConstexpr() && !Var->getAnyInitializer()) {
- Diag(Var->getLocation(), diag::err_constexpr_var_requires_init)
- << Var->getDeclName();
+ // C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
+ // the definition of a variable [...] or the declaration of a static data
+ // member.
+ if (Var->isConstexpr() && !Var->isThisDeclarationADefinition()) {
+ if (Var->isStaticDataMember())
+ Diag(Var->getLocation(),
+ diag::err_constexpr_static_mem_var_requires_init)
+ << Var->getDeclName();
+ else
+ Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl);
Var->setInvalidDecl();
return;
}
@@ -6295,7 +6587,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
// is accepted by gcc. Hence here we issue a warning instead of
// an error and we do not invalidate the static declaration.
// NOTE: to avoid multiple warnings, only check the first declaration.
- if (Var->getPreviousDeclaration() == 0)
+ if (Var->getPreviousDecl() == 0)
RequireCompleteType(Var->getLocation(), Type,
diag::ext_typecheck_decl_incomplete_type);
}
@@ -6352,21 +6644,21 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
// Check for jumps past the implicit initializer. C++0x
// clarifies that this applies to a "variable with automatic
// storage duration", not a "local variable".
- // C++0x [stmt.dcl]p3
+ // C++11 [stmt.dcl]p3
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope is
// ill-formed unless the variable has scalar type, class type with a
// trivial default constructor and a trivial destructor, a cv-qualified
// version of one of these types, or an array of one of the preceding
// types and is declared without an initializer.
- if (getLangOptions().CPlusPlus && Var->hasLocalStorage()) {
+ if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
if (const RecordType *Record
= Context.getBaseElementType(Type)->getAs<RecordType>()) {
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
- if ((!getLangOptions().CPlusPlus0x && !CXXRecord->isPOD()) ||
- (getLangOptions().CPlusPlus0x &&
- (!CXXRecord->hasTrivialDefaultConstructor() ||
- !CXXRecord->hasTrivialDestructor())))
+ // Mark the function for further checking even if the looser rules of
+ // C++11 do not require such checks, so that we can diagnose
+ // incompatibilities with C++98.
+ if (!CXXRecord->isPOD())
getCurFunction()->setHasBranchProtectedScope();
}
}
@@ -6394,8 +6686,11 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
MultiExprArg(*this, 0, 0));
if (Init.isInvalid())
Var->setInvalidDecl();
- else if (Init.get())
+ else if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
+ // This is important for template substitution.
+ Var->setInitStyle(VarDecl::CallInit);
+ }
CheckCompleteVariableDeclaration(Var);
}
@@ -6448,7 +6743,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// In ARC, don't allow jumps past the implicit initialization of a
// local retaining variable.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -6464,7 +6759,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
// All the following checks are C++ only.
- if (!getLangOptions().CPlusPlus) return;
+ if (!getLangOpts().CPlusPlus) return;
QualType baseType = Context.getBaseElementType(var->getType());
if (baseType->isDependentType()) return;
@@ -6480,7 +6775,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (type->isStructureOrClassType()) {
SourceLocation poi = var->getLocation();
- Expr *varRef = new (Context) DeclRefExpr(var, type, VK_LValue, poi);
+ Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
ExprResult result =
PerformCopyInitialization(
InitializedEntity::InitializeBlock(poi, type, false),
@@ -6493,15 +6788,41 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
}
- // Check for global constructors.
- if (!var->getDeclContext()->isDependentContext() &&
- var->hasGlobalStorage() &&
- !var->isStaticLocal() &&
- var->getInit() &&
- !var->getInit()->isConstantInitializer(Context,
- baseType->isReferenceType()))
- Diag(var->getLocation(), diag::warn_global_constructor)
- << var->getInit()->getSourceRange();
+ Expr *Init = var->getInit();
+ bool IsGlobal = var->hasGlobalStorage() && !var->isStaticLocal();
+
+ if (!var->getDeclContext()->isDependentContext() && Init) {
+ if (IsGlobal && !var->isConstexpr() &&
+ getDiagnostics().getDiagnosticLevel(diag::warn_global_constructor,
+ var->getLocation())
+ != DiagnosticsEngine::Ignored &&
+ !Init->isConstantInitializer(Context, baseType->isReferenceType()))
+ Diag(var->getLocation(), diag::warn_global_constructor)
+ << Init->getSourceRange();
+
+ if (var->isConstexpr()) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!var->evaluateValue(Notes) || !var->isInitICE()) {
+ SourceLocation DiagLoc = var->getLocation();
+ // If the note doesn't add any useful information other than a source
+ // location, fold it into the primary diagnostic.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+ Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
+ << var << Init->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+ } else if (var->isUsableInConstantExpressions(Context)) {
+ // Check whether the initializer of a const variable of integral or
+ // enumeration type is an ICE now, since we can't tell whether it was
+ // initialized by a constant expression if we check later.
+ var->checkInitIsICE();
+ }
+ }
// Require the destructor.
if (const RecordType *recordType = baseType->getAs<RecordType>())
@@ -6586,11 +6907,16 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
// Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
+ // C++03 [dcl.stc]p2 also permits 'auto'.
VarDecl::StorageClass StorageClass = SC_None;
VarDecl::StorageClass StorageClassAsWritten = SC_None;
if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
StorageClass = SC_Register;
StorageClassAsWritten = SC_Register;
+ } else if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
+ StorageClass = SC_Auto;
+ StorageClassAsWritten = SC_Auto;
} else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_invalid_storage_class_in_func_decl);
@@ -6608,7 +6934,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType parmDeclType = TInfo->getType();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments inside the type of this
// parameter.
CheckExtraCXXDefaultArguments(D);
@@ -6660,7 +6986,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
// the enclosing context. This prevents them from accidentally
// looking like class members in C++.
ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(),
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
D.getIdentifierLoc(), II,
parmDeclType, TInfo,
StorageClass, StorageClassAsWritten);
@@ -6715,7 +7041,7 @@ void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param,
return;
for (; Param != ParamEnd; ++Param) {
- if (!(*Param)->isUsed() && (*Param)->getDeclName() &&
+ if (!(*Param)->isReferenced() && (*Param)->getDeclName() &&
!(*Param)->hasAttr<UnusedAttr>()) {
Diag((*Param)->getLocation(), diag::warn_unused_parameter)
<< (*Param)->getDeclName();
@@ -6732,7 +7058,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
// Warn if the return value is pass-by-value and larger than the specified
// threshold.
- if (ReturnTy.isPODType(Context)) {
+ if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(D->getLocation(), diag::warn_return_value_size)
@@ -6743,7 +7069,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
// threshold.
for (; Param != ParamEnd; ++Param) {
QualType T = (*Param)->getType();
- if (!T.isPODType(Context))
+ if (T->isDependentType() || !T.isPODType(Context))
continue;
unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
@@ -6758,7 +7084,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
VarDecl::StorageClass StorageClass,
VarDecl::StorageClass StorageClassAsWritten) {
// In ARC, infer a lifetime qualifier for appropriate parameter types.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_None &&
T->isObjCLifetimeType()) {
@@ -6826,7 +7152,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
for (int i = FTI.NumArgs; i != 0; /* decrement in loop */) {
--i;
if (FTI.ArgInfo[i].Param == 0) {
- llvm::SmallString<256> Code;
+ SmallString<256> Code;
llvm::raw_svector_ostream(Code) << " int "
<< FTI.ArgInfo[i].Ident->getName()
<< ";\n";
@@ -6856,7 +7182,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope,
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
- D.setFunctionDefinition(true);
+ D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D,
MultiTemplateParamsArg(*this));
return ActOnStartOfFunctionDef(FnBodyScope, DP);
@@ -6892,8 +7218,8 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD) {
return false;
bool MissingPrototype = true;
- for (const FunctionDecl *Prev = FD->getPreviousDeclaration();
- Prev; Prev = Prev->getPreviousDeclaration()) {
+ for (const FunctionDecl *Prev = FD->getPreviousDecl();
+ Prev; Prev = Prev->getPreviousDecl()) {
// Ignore any declarations that occur in function or method
// scope, because they aren't visible from the header.
if (Prev->getDeclContext()->isFunctionOrMethod())
@@ -6911,11 +7237,11 @@ void Sema::CheckForFunctionRedefinition(FunctionDecl *FD) {
// was an extern inline function.
const FunctionDecl *Definition;
if (FD->isDefined(Definition) &&
- !canRedefineFunction(Definition, getLangOptions())) {
- if (getLangOptions().GNUMode && Definition->isInlineSpecified() &&
+ !canRedefineFunction(Definition, getLangOpts())) {
+ if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
Definition->getStorageClass() == SC_Extern)
Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
- << FD->getDeclName() << getLangOptions().CPlusPlus;
+ << FD->getDeclName() << getLangOpts().CPlusPlus;
else
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
@@ -6987,6 +7313,43 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
}
}
+ // If we had any tags defined in the function prototype,
+ // introduce them into the function scope.
+ if (FnBodyScope) {
+ for (llvm::ArrayRef<NamedDecl*>::iterator I = FD->getDeclsInPrototypeScope().begin(),
+ E = FD->getDeclsInPrototypeScope().end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ // Some of these decls (like enums) may have been pinned to the translation unit
+ // for lack of a real context earlier. If so, remove from the translation unit
+ // and reattach to the current context.
+ if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
+ // Is the decl actually in the context?
+ for (DeclContext::decl_iterator DI = Context.getTranslationUnitDecl()->decls_begin(),
+ DE = Context.getTranslationUnitDecl()->decls_end(); DI != DE; ++DI) {
+ if (*DI == D) {
+ Context.getTranslationUnitDecl()->removeDecl(D);
+ break;
+ }
+ }
+ // Either way, reassign the lexical decl context to our FunctionDecl.
+ D->setLexicalDeclContext(CurContext);
+ }
+
+ // If the decl has a non-null name, make accessible in the current scope.
+ if (!D->getName().empty())
+ PushOnScopeChains(D, FnBodyScope, /*AddToContext=*/false);
+
+ // Similarly, dive into enums and fish their constants out, making them
+ // accessible in this scope.
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ for (EnumDecl::enumerator_iterator EI = ED->enumerator_begin(),
+ EE = ED->enumerator_end(); EI != EE; ++EI)
+ PushOnScopeChains(*EI, FnBodyScope, /*AddToContext=*/false);
+ }
+ }
+ }
+
// Checking attributes of current function definition
// dllimport attribute.
DLLImportAttr *DA = FD->getAttr<DLLImportAttr>();
@@ -7068,20 +7431,15 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (FD) {
FD->setBody(Body);
- if (FD->isMain()) {
- // C and C++ allow for main to automagically return 0.
- // Implements C++ [basic.start.main]p5 and C99 5.1.2.2.3.
- FD->setHasImplicitReturnZero(true);
- WP.disableCheckFallThrough();
- } else if (FD->hasAttr<NakedAttr>()) {
- // If the function is marked 'naked', don't complain about missing return
- // statements.
+
+ // If the function implicitly returns zero (like 'main') or is naked,
+ // don't complain about missing return statements.
+ if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
WP.disableCheckFallThrough();
- }
// MSVC permits the use of pure specifier (=0) on function definition,
// defined at class scope, warn about this non standard construct.
- if (getLangOptions().MicrosoftExt && FD->isPure())
+ if (getLangOpts().MicrosoftExt && FD->isPure())
Diag(FD->getLocation(), diag::warn_pure_function_definition);
if (!FD->isInvalidDecl()) {
@@ -7096,7 +7454,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
computeNRVO(Body, getCurFunction());
}
- assert(FD == getCurFunctionDecl() && "Function parsing confused");
+ assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
+ "Function parsing confused");
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
@@ -7154,33 +7513,34 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// deletion in some later function.
if (PP.getDiagnostics().hasErrorOccurred() ||
PP.getDiagnostics().getSuppressAllDiagnostics()) {
- ExprTemporaries.clear();
- ExprNeedsCleanups = false;
+ DiscardCleanupsInEvaluationContext();
} else if (!isa<FunctionTemplateDecl>(dcl)) {
// Since the body is valid, issue any analysis-based warnings that are
// enabled.
ActivePolicy = &WP;
}
- if (FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
- !CheckConstexprFunctionBody(FD, Body))
+ if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
+ (!CheckConstexprFunctionDecl(FD) ||
+ !CheckConstexprFunctionBody(FD, Body)))
FD->setInvalidDecl();
- assert(ExprTemporaries.empty() && "Leftover temporaries in function");
+ assert(ExprCleanupObjects.empty() && "Leftover temporaries in function");
assert(!ExprNeedsCleanups && "Unaccounted cleanups in function");
+ assert(MaybeODRUseExprs.empty() &&
+ "Leftover expressions for odr-use checking");
}
if (!IsInstantiation)
PopDeclContext();
- PopFunctionOrBlockScope(ActivePolicy, dcl);
+ PopFunctionScopeInfo(ActivePolicy, dcl);
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (getDiagnostics().hasErrorOccurred()) {
- ExprTemporaries.clear();
- ExprNeedsCleanups = false;
+ DiscardCleanupsInEvaluationContext();
}
return dcl;
@@ -7191,6 +7551,9 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
/// relevant Decl.
void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
ParsedAttributes &Attrs) {
+ // Always attach attributes to the underlying decl.
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ D = TD->getTemplatedDecl();
ProcessDeclAttributeList(S, D, Attrs.getList());
}
@@ -7212,12 +7575,35 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
}
// Extension in C99. Legal in C90, but warn about it.
+ unsigned diag_id;
if (II.getName().startswith("__builtin_"))
- Diag(Loc, diag::warn_builtin_unknown) << &II;
- else if (getLangOptions().C99)
- Diag(Loc, diag::ext_implicit_function_decl) << &II;
+ diag_id = diag::warn_builtin_unknown;
+ else if (getLangOpts().C99)
+ diag_id = diag::ext_implicit_function_decl;
else
- Diag(Loc, diag::warn_implicit_function_decl) << &II;
+ diag_id = diag::warn_implicit_function_decl;
+ Diag(Loc, diag_id) << &II;
+
+ // Because typo correction is expensive, only do it if the implicit
+ // function declaration is going to be treated as an error.
+ if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
+ TypoCorrection Corrected;
+ DeclFilterCCC<FunctionDecl> Validator;
+ if (S && (Corrected = CorrectTypo(DeclarationNameInfo(&II, Loc),
+ LookupOrdinaryName, S, 0, Validator))) {
+ std::string CorrectedStr = Corrected.getAsString(getLangOpts());
+ std::string CorrectedQuotedStr = Corrected.getQuoted(getLangOpts());
+ FunctionDecl *Func = Corrected.getCorrectionDeclAs<FunctionDecl>();
+
+ Diag(Loc, diag::note_function_suggestion) << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Loc, CorrectedStr);
+
+ if (Func->getLocation().isValid()
+ && !II.getName().startswith("__builtin_"))
+ Diag(Func->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+ }
+ }
// Set a Declarator for the implicit definition: int foo();
const char *Dummy;
@@ -7230,6 +7616,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
Declarator D(DS, Declarator::BlockContext);
D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
0, 0, true, SourceLocation(),
+ SourceLocation(), SourceLocation(),
SourceLocation(),
EST_None, SourceLocation(),
0, 0, 0, 0, Loc, Loc, D),
@@ -7272,10 +7659,16 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
unsigned FormatIdx;
bool HasVAListArg;
if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
- if (!FD->getAttr<FormatAttr>())
+ if (!FD->getAttr<FormatAttr>()) {
+ const char *fmt = "printf";
+ unsigned int NumParams = FD->getNumParams();
+ if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
+ FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
+ fmt = "NSString";
FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
- "printf", FormatIdx+1,
+ fmt, FormatIdx+1,
HasVAListArg ? 0 : FormatIdx+2));
+ }
}
if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
HasVAListArg)) {
@@ -7288,7 +7681,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
// Mark const if we don't care about errno and that is the only
// thing preventing the function from being const. This allows
// IRgen to use LLVM intrinsics for such functions.
- if (!getLangOptions().MathErrno &&
+ if (!getLangOpts().MathErrno &&
Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) {
if (!FD->getAttr<ConstAttr>())
FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
@@ -7306,7 +7699,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
IdentifierInfo *Name = FD->getIdentifier();
if (!Name)
return;
- if ((!getLangOptions().CPlusPlus &&
+ if ((!getLangOpts().CPlusPlus &&
FD->getDeclContext()->isTranslationUnit()) ||
(isa<LinkageSpecDecl>(FD->getDeclContext()) &&
cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
@@ -7316,16 +7709,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
} else
return;
- if (Name->isStr("NSLog") || Name->isStr("NSLogv")) {
- // FIXME: NSLog and NSLogv should be target specific
- if (const FormatAttr *Format = FD->getAttr<FormatAttr>()) {
- // FIXME: We known better than our headers.
- const_cast<FormatAttr *>(Format)->setType(Context, "printf");
- } else
- FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
- "printf", 1,
- Name->isStr("NSLogv") ? 0 : 2));
- } else if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
+ if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
// FIXME: asprintf and vasprintf aren't C99 functions. Should they be
// target-specific builtins, perhaps?
if (!FD->getAttr<FormatAttr>())
@@ -7347,7 +7731,7 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
// Scope manipulation handled by caller.
TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
D.getIdentifierLoc(),
D.getIdentifier(),
TInfo);
@@ -7406,6 +7790,52 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
}
+/// \brief Check that this is a valid underlying type for an enum declaration.
+bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType T = TI->getType();
+
+ if (T->isDependentType() || T->isIntegralType(Context))
+ return false;
+
+ Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
+ return true;
+}
+
+/// Check whether this is a valid redeclaration of a previous enumeration.
+/// \return true if the redeclaration was invalid.
+bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
+ QualType EnumUnderlyingTy,
+ const EnumDecl *Prev) {
+ bool IsFixed = !EnumUnderlyingTy.isNull();
+
+ if (IsScoped != Prev->isScoped()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
+ << Prev->isScoped();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+
+ if (IsFixed && Prev->isFixed()) {
+ if (!EnumUnderlyingTy->isDependentType() &&
+ !Prev->getIntegerType()->isDependentType() &&
+ !Context.hasSameUnqualifiedType(EnumUnderlyingTy,
+ Prev->getIntegerType())) {
+ Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
+ << EnumUnderlyingTy << Prev->getIntegerType();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+ } else if (IsFixed != Prev->isFixed()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
+ << Prev->isFixed();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
@@ -7512,15 +7942,18 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
- bool ScopedEnum, bool ScopedEnumUsesClassTag,
+ SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag,
TypeResult UnderlyingType) {
// If this is not a definition, it must have a name.
+ IdentifierInfo *OrigName = Name;
assert((Name != 0 || TUK == TUK_Definition) &&
"Nameless record must be a definition!");
assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ bool ScopedEnum = ScopedEnumKWLoc.isValid();
// FIXME: Check explicit specializations more carefully.
bool isExplicitSpecialization = false;
@@ -7576,23 +8009,18 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
// integral type; any cv-qualification is ignored.
TypeSourceInfo *TI = 0;
- QualType T = GetTypeFromParser(UnderlyingType.get(), &TI);
+ GetTypeFromParser(UnderlyingType.get(), &TI);
EnumUnderlying = TI;
- SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
-
- if (!T->isDependentType() && !T->isIntegralType(Context)) {
- Diag(UnderlyingLoc, diag::err_enum_invalid_underlying)
- << T;
+ if (CheckEnumUnderlyingType(TI))
// Recover by falling back to int.
EnumUnderlying = Context.IntTy.getTypePtr();
- }
- if (DiagnoseUnexpandedParameterPack(UnderlyingLoc, TI,
+ if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI,
UPPC_FixedUnderlyingType))
EnumUnderlying = Context.IntTy.getTypePtr();
- } else if (getLangOptions().MicrosoftExt)
+ } else if (getLangOpts().MicrosoftMode)
// Microsoft enums are always of int type.
EnumUnderlying = Context.IntTy.getTypePtr();
}
@@ -7686,7 +8114,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Previous.isAmbiguous())
return 0;
- if (!getLangOptions().CPlusPlus && TUK != TUK_Reference) {
+ if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
@@ -7697,6 +8125,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
} else if (S->isFunctionPrototypeScope()) {
// If this is an enum declaration in function prototype scope, set its
// initial context to the translation unit.
+ // FIXME: [citation needed]
SearchDC = Context.getTranslationUnitDecl();
}
@@ -7708,7 +8137,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
Previous.clear();
}
- if (getLangOptions().CPlusPlus && Name && DC && StdNamespace &&
+ if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
DC->Equals(getStdNamespace()) && Name->isStr("bad_alloc")) {
// This is a declaration of or a reference to "std::bad_alloc".
isStdBadAlloc = true;
@@ -7759,12 +8188,12 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Find the context where we'll be declaring the tag.
// FIXME: We would like to maintain the current DeclContext as the
// lexical context,
- while (SearchDC->isRecord() || SearchDC->isTransparentContext())
+ while (!SearchDC->isFileContext() && !SearchDC->isFunctionOrMethod())
SearchDC = SearchDC->getParent();
// Find the scope where we'll be declaring the tag.
while (S->isClassScope() ||
- (getLangOptions().CPlusPlus &&
+ (getLangOpts().CPlusPlus &&
S->isFunctionPrototypeScope()) ||
((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() &&
@@ -7781,7 +8210,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// In C++, we need to do a redeclaration lookup to properly
// diagnose some problems.
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
Previous.setRedeclarationKind(ForRedeclaration);
LookupQualifiedName(Previous, SearchDC);
}
@@ -7799,7 +8228,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// technically forbidden by the current standard but which is
// okay according to the likely resolution of an open issue;
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
TagDecl *Tag = TT->getDecl();
@@ -7851,36 +8280,28 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
- // All conflicts with previous declarations are recovered by
- // returning the previous declaration.
- if (ScopedEnum != PrevEnum->isScoped()) {
- Diag(KWLoc, diag::err_enum_redeclare_scoped_mismatch)
- << PrevEnum->isScoped();
- Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
- return PrevTagDecl;
- }
- else if (EnumUnderlying && PrevEnum->isFixed()) {
- QualType T;
- if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
- T = TI->getType();
- else
- T = QualType(EnumUnderlying.get<const Type*>(), 0);
-
- if (!Context.hasSameUnqualifiedType(T, PrevEnum->getIntegerType())) {
- Diag(NameLoc.isValid() ? NameLoc : KWLoc,
- diag::err_enum_redeclare_type_mismatch)
- << T
- << PrevEnum->getIntegerType();
- Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
- return PrevTagDecl;
- }
- }
- else if (!EnumUnderlying.isNull() != PrevEnum->isFixed()) {
- Diag(KWLoc, diag::err_enum_redeclare_fixed_mismatch)
- << PrevEnum->isFixed();
- Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+ // If this is an elaborated-type-specifier for a scoped enumeration,
+ // the 'class' keyword is not necessary and not permitted.
+ if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (ScopedEnum)
+ Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
+ << PrevEnum->isScoped()
+ << FixItHint::CreateRemoval(ScopedEnumKWLoc);
return PrevTagDecl;
}
+
+ QualType EnumUnderlyingTy;
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ EnumUnderlyingTy = TI->getType();
+ else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
+ EnumUnderlyingTy = QualType(T, 0);
+
+ // All conflicts with previous declarations are recovered by
+ // returning the previous declaration, unless this is a definition,
+ // in which case we want the caller to bail out.
+ if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
+ ScopedEnum, EnumUnderlyingTy, PrevEnum))
+ return TUK == TUK_Declaration ? PrevTagDecl : 0;
}
if (!Invalid) {
@@ -7891,7 +8312,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// For our current ASTs this shouldn't be a problem, but will
// need to be changed with DeclGroups.
if ((TUK == TUK_Reference && (!PrevTagDecl->getFriendObjectKind() ||
- getLangOptions().MicrosoftExt)) || TUK == TUK_Friend)
+ getLangOpts().MicrosoftExt)) || TUK == TUK_Friend)
return PrevTagDecl;
// Diagnose attempts to redefine a tag.
@@ -7900,11 +8321,25 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
// here; we'll catch this in the general case below.
- if (!isExplicitSpecialization ||
- !isa<CXXRecordDecl>(Def) ||
- cast<CXXRecordDecl>(Def)->getTemplateSpecializationKind()
- == TSK_ExplicitSpecialization) {
- Diag(NameLoc, diag::err_redefinition) << Name;
+ bool IsExplicitSpecializationAfterInstantiation = false;
+ if (isExplicitSpecialization) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ RD->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ ED->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ }
+
+ if (!IsExplicitSpecializationAfterInstantiation) {
+ // A redeclaration in function prototype scope in C isn't
+ // visible elsewhere, so merely issue a warning.
+ if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
+ Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
+ else
+ Diag(NameLoc, diag::err_redefinition) << Name;
Diag(Def->getLocation(), diag::note_previous_definition);
// If this is a redefinition, recover by making this
// struct be anonymous, which will make any later
@@ -7951,8 +8386,6 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// lookup. This is only actually possible in C++, where a few
// things like templates still live in the tag namespace.
} else {
- assert(getLangOptions().CPlusPlus);
-
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
@@ -8033,7 +8466,7 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (getLangOptions().CPlusPlus0x && cast<EnumDecl>(New)->isFixed()) {
+ if (getLangOpts().CPlusPlus0x && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
}
@@ -8043,9 +8476,9 @@ CreateNewDecl:
Diag(Def->getLocation(), diag::note_previous_definition);
} else {
unsigned DiagID = diag::ext_forward_ref_enum;
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftMode)
DiagID = diag::ext_ms_forward_ref_enum;
- else if (getLangOptions().CPlusPlus)
+ else if (getLangOpts().CPlusPlus)
DiagID = diag::err_forward_ref_enum;
Diag(Loc, DiagID);
@@ -8071,7 +8504,7 @@ CreateNewDecl:
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// struct X { int A; } D; D should chain to X.
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// FIXME: Look for a way to use RecordDecl for simple structs.
New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
cast_or_null<CXXRecordDecl>(PrevDecl));
@@ -8086,6 +8519,16 @@ CreateNewDecl:
// Maybe add qualifier info.
if (SS.isNotEmpty()) {
if (SS.isSet()) {
+ // If this is either a declaration or a definition, check the
+ // nested-name-specifier against the current context. We don't do this
+ // for explicit specializations, because they have similar checking
+ // (with more specific diagnostics) in the call to
+ // CheckMemberSpecialization, below.
+ if (!isExplicitSpecialization &&
+ (TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ diagnoseQualifiedDeclaration(SS, DC, OrigName, NameLoc))
+ Invalid = true;
+
New->setQualifierInfo(SS.getWithLocInContext(Context));
if (TemplateParameterLists.size() > 0) {
New->setTemplateParameterListsInfo(Context,
@@ -8112,19 +8555,14 @@ CreateNewDecl:
AddMsStructLayoutForRecord(RD);
}
- if (PrevDecl && PrevDecl->isModulePrivate())
- New->setModulePrivate();
- else if (ModulePrivateLoc.isValid()) {
+ if (ModulePrivateLoc.isValid()) {
if (isExplicitSpecialization)
Diag(New->getLocation(), diag::err_module_private_specialization)
<< 2
<< FixItHint::CreateRemoval(ModulePrivateLoc);
- else if (PrevDecl && !PrevDecl->isModulePrivate())
- diagnoseModulePrivateRedeclaration(New, PrevDecl, ModulePrivateLoc);
// __module_private__ does not apply to local classes. However, we only
// diagnose this as an error when the declaration specifiers are
// freestanding. Here, we just ignore the __module_private__.
- // foobar
else if (!SearchDC->isFunctionOrMethod())
New->setModulePrivate();
}
@@ -8133,7 +8571,7 @@ CreateNewDecl:
// check the specialization.
if (isExplicitSpecialization && CheckMemberSpecialization(New, Previous))
Invalid = true;
-
+
if (Invalid)
New->setInvalidDecl();
@@ -8142,7 +8580,7 @@ CreateNewDecl:
// If we're declaring or defining a tag in function prototype scope
// in C, note that this type can only be used within the function.
- if (Name && S->isFunctionPrototypeScope() && !getLangOptions().CPlusPlus)
+ if (Name && S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus)
Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
// Set the lexical context. If the tag has a C++ scope specifier, the
@@ -8155,7 +8593,7 @@ CreateNewDecl:
// the tag name visible.
if (TUK == TUK_Friend)
New->setObjectOfFriendDecl(/* PreviouslyDeclared = */ !Previous.empty() ||
- getLangOptions().MicrosoftExt);
+ getLangOpts().MicrosoftExt);
// Set the access specifier.
if (!Invalid && SearchDC->isRecord())
@@ -8172,7 +8610,7 @@ CreateNewDecl:
New->setAccess(PrevDecl->getAccess());
DeclContext *DC = New->getDeclContext()->getRedeclContext();
- DC->makeDeclVisibleInContext(New, /* Recoverable = */ false);
+ DC->makeDeclVisibleInContext(New);
if (Name) // can be null along some error paths
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
@@ -8180,7 +8618,7 @@ CreateNewDecl:
S = getNonFieldDeclScope(S);
PushOnScopeChains(New, S, !IsForwardReference);
if (IsForwardReference)
- SearchDC->makeDeclVisibleInContext(New, /* Recoverable = */ false);
+ SearchDC->makeDeclVisibleInContext(New);
} else {
CurContext->addDecl(New);
@@ -8193,6 +8631,12 @@ CreateNewDecl:
II->isStr("FILE"))
Context.setFILEDecl(New);
+ // If we were in function prototype scope (and not in C++ mode), add this
+ // tag to the list of decls to inject into the function definition scope.
+ if (S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus &&
+ InFunctionDeclarator && Name)
+ DeclsInPrototypeScope.push_back(New);
+
OwnedDecl = true;
return New;
}
@@ -8256,6 +8700,13 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setRBraceLoc(RBraceLoc);
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ assert(Tag->isInvalidDecl() && "We should already have completed it");
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
if (isa<CXXRecordDecl>(Tag))
FieldCollector->FinishClass();
@@ -8271,13 +8722,14 @@ void Sema::ActOnObjCContainerFinishDefinition() {
PopDeclContext();
}
-void Sema::ActOnObjCTemporaryExitContainerContext() {
- OriginalLexicalContext = CurContext;
+void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
+ assert(DC == CurContext && "Mismatch of container contexts");
+ OriginalLexicalContext = DC;
ActOnObjCContainerFinishDefinition();
}
-void Sema::ActOnObjCReenterContainerContext() {
- ActOnObjCContainerStartDefinition(cast<Decl>(OriginalLexicalContext));
+void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
+ ActOnObjCContainerStartDefinition(cast<Decl>(DC));
OriginalLexicalContext = 0;
}
@@ -8286,6 +8738,12 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setInvalidDecl();
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
// We're undoing ActOnTagStartDefinition here, not
// ActOnStartCXXMemberDeclarations, so we don't have to mess with
// the FieldCollector.
@@ -8294,9 +8752,10 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
}
// Note that FieldName may be null for anonymous bitfields.
-bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
- QualType FieldTy, const Expr *BitWidth,
- bool *ZeroWidth) {
+ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
+ IdentifierInfo *FieldName,
+ QualType FieldTy, Expr *BitWidth,
+ bool *ZeroWidth) {
// Default to true; that shouldn't confuse checks for emptiness
if (ZeroWidth)
*ZeroWidth = true;
@@ -8306,7 +8765,7 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
// Handle incomplete types with specific error.
if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
- return true;
+ return ExprError();
if (FieldName)
return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
<< FieldName << FieldTy << BitWidth->getSourceRange();
@@ -8314,16 +8773,18 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
<< FieldTy << BitWidth->getSourceRange();
} else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
UPPC_BitFieldWidth))
- return true;
+ return ExprError();
// If the bit-width is type- or value-dependent, don't try to check
// it now.
if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
- return false;
+ return Owned(BitWidth);
llvm::APSInt Value;
- if (VerifyIntegerConstantExpression(BitWidth, &Value))
- return true;
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
+ if (ICE.isInvalid())
+ return ICE;
+ BitWidth = ICE.take();
if (Value != 0 && ZeroWidth)
*ZeroWidth = false;
@@ -8343,7 +8804,7 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
if (!FieldTy->isDependentType()) {
uint64_t TypeSize = Context.getTypeSize(FieldTy);
if (Value.getZExtValue() > TypeSize) {
- if (!getLangOptions().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size)
<< FieldName << (unsigned)Value.getZExtValue()
@@ -8363,7 +8824,7 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
}
}
- return false;
+ return Owned(BitWidth);
}
/// ActOnField - Each field of a C struct/union is passed into this in order
@@ -8388,7 +8849,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
@@ -8408,15 +8869,25 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
<< 2;
// Check to see if this name was declared as a member previously
+ NamedDecl *PrevDecl = 0;
LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration);
LookupName(Previous, S);
- assert((Previous.empty() || Previous.isOverloadedResult() ||
- Previous.isSingleResult())
- && "Lookup of member name should be either overloaded, single or null");
-
- // If the name is overloaded then get any declaration else get the single result
- NamedDecl *PrevDecl = Previous.isOverloadedResult() ?
- Previous.getRepresentativeDecl() : Previous.getAsSingle<NamedDecl>();
+ switch (Previous.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundUnresolvedValue:
+ PrevDecl = Previous.getAsSingle<NamedDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ PrevDecl = Previous.getRepresentativeDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ break;
+ }
+ Previous.suppressDiagnostics();
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
@@ -8430,7 +8901,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
bool Mutable
= (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
- SourceLocation TSSL = D.getSourceRange().getBegin();
+ SourceLocation TSSL = D.getLocStart();
FieldDecl *NewFD
= CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, HasInit,
TSSL, AS, PrevDecl, &D);
@@ -8481,11 +8952,19 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
}
QualType EltTy = Context.getBaseElementType(T);
- if (!EltTy->isDependentType() &&
- RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
- // Fields of incomplete type force their record to be invalid.
- Record->setInvalidDecl();
- InvalidDecl = true;
+ if (!EltTy->isDependentType()) {
+ if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
+ // Fields of incomplete type force their record to be invalid.
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ } else {
+ NamedDecl *Def;
+ EltTy->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ }
+ }
}
// C99 6.7.2.1p8: A member of a structure or union may have any type other
@@ -8519,11 +8998,13 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
bool ZeroWidth = false;
// If this is declared as a bit-field, check the bit-field.
- if (!InvalidDecl && BitWidth &&
- VerifyBitField(Loc, II, T, BitWidth, &ZeroWidth)) {
- InvalidDecl = true;
- BitWidth = 0;
- ZeroWidth = false;
+ if (!InvalidDecl && BitWidth) {
+ BitWidth = VerifyBitField(Loc, II, T, BitWidth, &ZeroWidth).take();
+ if (!BitWidth) {
+ InvalidDecl = true;
+ BitWidth = 0;
+ ZeroWidth = false;
+ }
}
// Check that 'mutable' is consistent with the type of the declaration.
@@ -8555,7 +9036,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
NewFD->setInvalidDecl();
}
- if (!InvalidDecl && getLangOptions().CPlusPlus) {
+ if (!InvalidDecl && getLangOpts().CPlusPlus) {
if (Record->isUnion()) {
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
@@ -8565,7 +9046,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// destructor, or a non-trivial copy assignment operator
// cannot be a member of a union, nor can an array of such
// objects.
- if (!getLangOptions().CPlusPlus0x && CheckNontrivialField(NewFD))
+ if (CheckNontrivialField(NewFD))
NewFD->setInvalidDecl();
}
}
@@ -8588,7 +9069,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// In auto-retain/release, infer strong retension for fields of
// retainable type.
- if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
NewFD->setInvalidDecl();
if (T.isObjCGCWeak())
@@ -8600,7 +9081,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
bool Sema::CheckNontrivialField(FieldDecl *FD) {
assert(FD);
- assert(getLangOptions().CPlusPlus && "valid check only for C++");
+ assert(getLangOpts().CPlusPlus && "valid check only for C++");
if (FD->isInvalidDecl())
return true;
@@ -8624,7 +9105,8 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
member = CXXDestructor;
if (member != CXXInvalid) {
- if (getLangOptions().ObjCAutoRefCount && RDecl->hasObjectMember()) {
+ if (!getLangOpts().CPlusPlus0x &&
+ getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
// Objective-C++ ARC: it is an error to have a non-trivial field of
// a union. However, system headers in Objective-C programs
// occasionally have Objective-C lifetime objects within unions,
@@ -8638,11 +9120,13 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
return false;
}
}
-
- Diag(FD->getLocation(), diag::err_illegal_union_or_anon_struct_member)
- << (int)FD->getParent()->isUnion() << FD->getDeclName() << member;
+
+ Diag(FD->getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
+ diag::err_illegal_union_or_anon_struct_member)
+ << (int)FD->getParent()->isUnion() << FD->getDeclName() << member;
DiagnoseNontrivial(RT, member);
- return true;
+ return !getLangOpts().CPlusPlus0x;
}
}
}
@@ -8650,6 +9134,19 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
return false;
}
+/// If the given constructor is user-provided, produce a diagnostic explaining
+/// that it makes the class non-trivial.
+static bool DiagnoseNontrivialUserProvidedCtor(Sema &S, QualType QT,
+ CXXConstructorDecl *CD,
+ Sema::CXXSpecialMember CSM) {
+ if (!CD->isUserProvided())
+ return false;
+
+ SourceLocation CtorLoc = CD->getLocation();
+ S.Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << CSM;
+ return true;
+}
+
/// DiagnoseNontrivial - Given that a class has a non-trivial
/// special member, figure out why.
void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
@@ -8664,17 +9161,20 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
case CXXDefaultConstructor:
if (RD->hasUserDeclaredConstructor()) {
typedef CXXRecordDecl::ctor_iterator ctor_iter;
- for (ctor_iter ci = RD->ctor_begin(), ce = RD->ctor_end(); ci != ce;++ci){
- const FunctionDecl *body = 0;
- ci->hasBody(body);
- if (!body || !cast<CXXConstructorDecl>(body)->isImplicitlyDefined()) {
- SourceLocation CtorLoc = ci->getLocation();
- Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << member;
+ for (ctor_iter CI = RD->ctor_begin(), CE = RD->ctor_end(); CI != CE; ++CI)
+ if (DiagnoseNontrivialUserProvidedCtor(*this, QT, *CI, member))
return;
- }
- }
- llvm_unreachable("found no user-declared constructors");
+ // No user-provided constructors; look for constructor templates.
+ typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl>
+ tmpl_iter;
+ for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end());
+ TI != TE; ++TI) {
+ CXXConstructorDecl *CD =
+ dyn_cast<CXXConstructorDecl>(TI->getTemplatedDecl());
+ if (CD && DiagnoseNontrivialUserProvidedCtor(*this, QT, CD, member))
+ return;
+ }
}
break;
@@ -8699,7 +9199,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
if (RD->hasUserDeclaredCopyAssignment()) {
// FIXME: this should use the location of the copy
// assignment, not the type.
- SourceLocation TyLoc = RD->getSourceRange().getBegin();
+ SourceLocation TyLoc = RD->getLocStart();
Diag(TyLoc, diag::note_nontrivial_user_defined) << QT << member;
return;
}
@@ -8731,7 +9231,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
// so we just iterate through the direct bases.
for (base_iter bi = RD->bases_begin(), be = RD->bases_end(); bi != be; ++bi)
if (bi->isVirtual()) {
- SourceLocation BaseLoc = bi->getSourceRange().getBegin();
+ SourceLocation BaseLoc = bi->getLocStart();
Diag(BaseLoc, diag::note_nontrivial_has_virtual) << QT << 1;
return;
}
@@ -8741,7 +9241,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
for (meth_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
++mi) {
if (mi->isVirtual()) {
- SourceLocation MLoc = mi->getSourceRange().getBegin();
+ SourceLocation MLoc = mi->getLocStart();
Diag(MLoc, diag::note_nontrivial_has_virtual) << QT << 0;
return;
}
@@ -8768,7 +9268,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
assert(BaseRT && "Don't know how to handle dependent bases");
CXXRecordDecl *BaseRecTy = cast<CXXRecordDecl>(BaseRT->getDecl());
if (!(BaseRecTy->*hasTrivial)()) {
- SourceLocation BaseLoc = bi->getSourceRange().getBegin();
+ SourceLocation BaseLoc = bi->getLocStart();
Diag(BaseLoc, diag::note_nontrivial_has_nontrivial) << QT << 1 << member;
DiagnoseNontrivial(BaseRT, member);
return;
@@ -8843,10 +9343,9 @@ Decl *Sema::ActOnIvar(Scope *S,
if (BitWidth) {
// 6.7.2.1p3, 6.7.2.1p4
- if (VerifyBitField(Loc, II, T, BitWidth)) {
+ BitWidth = VerifyBitField(Loc, II, T, BitWidth).take();
+ if (!BitWidth)
D.setInvalidType();
- BitWidth = 0;
- }
} else {
// Not a bitfield.
@@ -8870,6 +9369,8 @@ Decl *Sema::ActOnIvar(Scope *S,
: ObjCIvarDecl::None;
// Must set ivar's DeclContext to its enclosing interface.
ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
+ if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
+ return 0;
ObjCContainerDecl *EnclosingContext;
if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
@@ -8914,7 +9415,7 @@ Decl *Sema::ActOnIvar(Scope *S,
NewID->setInvalidDecl();
// In ARC, infer 'retaining' for ivars of retainable type.
- if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
NewID->setInvalidDecl();
if (D.getDeclSpec().isModulePrivateSpecified())
@@ -8980,11 +9481,23 @@ void Sema::ActOnFields(Scope* S,
if (EnclosingDecl->isInvalidDecl())
return;
- // Verify that all the fields are okay.
+ RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+
+ // Start counting up the number of named members; make sure to include
+ // members of anonymous structs and unions in the total.
unsigned NumNamedMembers = 0;
+ if (Record) {
+ for (RecordDecl::decl_iterator i = Record->decls_begin(),
+ e = Record->decls_end(); i != e; i++) {
+ if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(*i))
+ if (IFD->getDeclName())
+ ++NumNamedMembers;
+ }
+ }
+
+ // Verify that all the fields are okay.
SmallVector<FieldDecl*, 32> RecFields;
- RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
bool ARCErrReported = false;
for (llvm::ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
i != end; ++i) {
@@ -9024,27 +9537,34 @@ void Sema::ActOnFields(Scope* S,
continue;
} else if (FDTy->isIncompleteArrayType() && Record &&
((i + 1 == Fields.end() && !Record->isUnion()) ||
- ((getLangOptions().MicrosoftExt ||
- getLangOptions().CPlusPlus) &&
+ ((getLangOpts().MicrosoftExt ||
+ getLangOpts().CPlusPlus) &&
(i + 1 == Fields.end() || Record->isUnion())))) {
// Flexible array member.
// Microsoft and g++ is more permissive regarding flexible array.
// It will accept flexible array in union and also
// as the sole element of a struct/class.
- if (getLangOptions().MicrosoftExt) {
+ if (getLangOpts().MicrosoftExt) {
if (Record->isUnion())
Diag(FD->getLocation(), diag::ext_flexible_array_union_ms)
<< FD->getDeclName();
else if (Fields.size() == 1)
Diag(FD->getLocation(), diag::ext_flexible_array_empty_aggregate_ms)
<< FD->getDeclName() << Record->getTagKind();
- } else if (getLangOptions().CPlusPlus) {
+ } else if (getLangOpts().CPlusPlus) {
if (Record->isUnion())
Diag(FD->getLocation(), diag::ext_flexible_array_union_gnu)
<< FD->getDeclName();
else if (Fields.size() == 1)
Diag(FD->getLocation(), diag::ext_flexible_array_empty_aggregate_gnu)
<< FD->getDeclName() << Record->getTagKind();
+ } else if (!getLangOpts().C99) {
+ if (Record->isUnion())
+ Diag(FD->getLocation(), diag::ext_flexible_array_union_gnu)
+ << FD->getDeclName();
+ else
+ Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
+ << FD->getDeclName() << Record->getTagKind();
} else if (NumNamedMembers < 1) {
Diag(FD->getLocation(), diag::err_flexible_array_empty_struct)
<< FD->getDeclName();
@@ -9101,8 +9621,8 @@ void Sema::ActOnFields(Scope* S,
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
}
- else if (!getLangOptions().CPlusPlus) {
- if (getLangOptions().ObjCAutoRefCount && Record && !ARCErrReported) {
+ else if (!getLangOpts().CPlusPlus) {
+ if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported) {
// It's an error in ARC if a field has lifetime.
// We don't want to report this in a system header, though,
// so we just make the field unavailable.
@@ -9118,13 +9638,14 @@ void Sema::ActOnFields(Scope* S,
"this system field has retaining ownership"));
}
} else {
- Diag(FD->getLocation(), diag::err_arc_objc_object_in_struct);
+ Diag(FD->getLocation(), diag::err_arc_objc_object_in_struct)
+ << T->isBlockPointerType();
}
ARCErrReported = true;
}
}
- else if (getLangOptions().ObjC1 &&
- getLangOptions().getGC() != LangOptions::NonGC &&
+ else if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong())
@@ -9168,7 +9689,7 @@ void Sema::ActOnFields(Scope* S,
// non-POD because of the presence of an Objective-C pointer member.
// If so, objects of this type cannot be shared between code compiled
// with instant objects and code compiled with manual retain/release.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
CXXRecord->hasObjectMember() &&
CXXRecord->getLinkage() == ExternalLinkage) {
if (CXXRecord->isPOD()) {
@@ -9194,7 +9715,7 @@ void Sema::ActOnFields(Scope* S,
}
// Adjust user-defined destructor exception spec.
- if (getLangOptions().CPlusPlus0x &&
+ if (getLangOpts().CPlusPlus0x &&
CXXRecord->hasUserDeclaredDestructor())
AdjustDestructorExceptionSpec(CXXRecord,CXXRecord->getDestructor());
@@ -9266,7 +9787,7 @@ void Sema::ActOnFields(Scope* S,
ObjCIvarDecl **ClsFields =
reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
- ID->setLocEnd(RBrac);
+ ID->setEndOfDefinitionLoc(RBrac);
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
ClsFields[i]->setLexicalDeclContext(ID);
@@ -9284,6 +9805,8 @@ void Sema::ActOnFields(Scope* S,
// Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ IMPDecl->setIvarLBraceLoc(LBrac);
+ IMPDecl->setIvarRBraceLoc(RBrac);
} else if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
// case of ivars in class extension; all other cases have been
@@ -9291,10 +9814,34 @@ void Sema::ActOnFields(Scope* S,
// FIXME. Class extension does not have a LocEnd field.
// CDecl->setLocEnd(RBrac);
// Add ivar's to class extension's DeclContext.
+ // Diagnose redeclaration of private ivars.
+ ObjCInterfaceDecl *IDecl = CDecl->getClassInterface();
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ if (IDecl) {
+ if (const ObjCIvarDecl *ClsIvar =
+ IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ IDecl->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ if (const ObjCIvarDecl *ClsExtIvar =
+ ClsExtDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ }
ClsFields[i]->setLexicalDeclContext(CDecl);
CDecl->addDecl(ClsFields[i]);
}
+ CDecl->setIvarLBraceLoc(LBrac);
+ CDecl->setIvarRBraceLoc(RBrac);
}
}
@@ -9360,56 +9907,70 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
Val = 0;
+ if (Val)
+ Val = DefaultLvalueConversion(Val).take();
+
if (Val) {
if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
else {
- // C99 6.7.2.2p2: Make sure we have an integer constant expression.
SourceLocation ExpLoc;
- if (!Val->isValueDependent() &&
- VerifyIntegerConstantExpression(Val, &EnumVal)) {
- Val = 0;
- } else {
- if (!getLangOptions().CPlusPlus) {
- // C99 6.7.2.2p2:
- // The expression that defines the value of an enumeration constant
- // shall be an integer constant expression that has a value
- // representable as an int.
-
- // Complain if the value is not representable in an int.
- if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
- Diag(IdLoc, diag::ext_enum_value_not_int)
- << EnumVal.toString(10) << Val->getSourceRange()
- << (EnumVal.isUnsigned() || EnumVal.isNonNegative());
- else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
- // Force the type of the expression to 'int'.
- Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).take();
- }
- }
-
+ if (getLangOpts().CPlusPlus0x && Enum->isFixed() &&
+ !getLangOpts().MicrosoftMode) {
+ // C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
+ // constant-expression in the enumerator-definition shall be a converted
+ // constant expression of the underlying type.
+ EltTy = Enum->getIntegerType();
+ ExprResult Converted =
+ CheckConvertedConstantExpression(Val, EltTy, EnumVal,
+ CCEK_Enumerator);
+ if (Converted.isInvalid())
+ Val = 0;
+ else
+ Val = Converted.take();
+ } else if (!Val->isValueDependent() &&
+ !(Val = VerifyIntegerConstantExpression(Val,
+ &EnumVal).take())) {
+ // C99 6.7.2.2p2: Make sure we have an integer constant expression.
+ } else {
if (Enum->isFixed()) {
EltTy = Enum->getIntegerType();
- // C++0x [dcl.enum]p5:
- // ... if the initializing value of an enumerator cannot be
- // represented by the underlying type, the program is ill-formed.
+ // In Obj-C and Microsoft mode, require the enumeration value to be
+ // representable in the underlying type of the enumeration. In C++11,
+ // we perform a non-narrowing conversion as part of converted constant
+ // expression checking.
if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
- if (getLangOptions().MicrosoftExt) {
+ if (getLangOpts().MicrosoftMode) {
Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).take();
- } else
- Diag(IdLoc, diag::err_enumerator_too_large)
- << EltTy;
+ } else
+ Diag(IdLoc, diag::err_enumerator_too_large) << EltTy;
} else
Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).take();
- }
- else {
- // C++0x [dcl.enum]p5:
+ } else if (getLangOpts().CPlusPlus) {
+ // C++11 [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
// - If an initializer is specified for an enumerator, the
// initializing value has the same type as the expression.
EltTy = Val->getType();
+ } else {
+ // C99 6.7.2.2p2:
+ // The expression that defines the value of an enumeration constant
+ // shall be an integer constant expression that has a value
+ // representable as an int.
+
+ // Complain if the value is not representable in an int.
+ if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
+ Diag(IdLoc, diag::ext_enum_value_not_int)
+ << EnumVal.toString(10) << Val->getSourceRange()
+ << (EnumVal.isUnsigned() || EnumVal.isNonNegative());
+ else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
+ // Force the type of the expression to 'int'.
+ Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).take();
+ }
+ EltTy = Val->getType();
}
}
}
@@ -9483,9 +10044,9 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
// an int (C99 6.7.2.2p2). However, we support GCC's extension that
// permits enumerator values that are representable in some larger
// integral type.
- if (!getLangOptions().CPlusPlus && !T.isNull())
+ if (!getLangOpts().CPlusPlus && !T.isNull())
Diag(IdLoc, diag::warn_enum_value_overflow);
- } else if (!getLangOptions().CPlusPlus &&
+ } else if (!getLangOpts().CPlusPlus &&
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
@@ -9497,7 +10058,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
- EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy));
EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
}
@@ -9509,11 +10070,10 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attr,
- SourceLocation EqualLoc, Expr *val) {
+ SourceLocation EqualLoc, Expr *Val) {
EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
EnumConstantDecl *LastEnumConst =
cast_or_null<EnumConstantDecl>(lastEnumConst);
- Expr *Val = static_cast<Expr*>(val);
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
@@ -9533,7 +10093,7 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
if (PrevDecl) {
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
- assert((getLangOptions().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
+ assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
"Received TagDecl when not in C++!");
if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
if (isa<EnumConstantDecl>(PrevDecl))
@@ -9651,7 +10211,11 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
Packed = true;
if (Enum->isFixed()) {
- BestType = BestPromotionType = Enum->getIntegerType();
+ BestType = Enum->getIntegerType();
+ if (BestType->isPromotableIntegerType())
+ BestPromotionType = Context.getPromotedIntegerType(BestType);
+ else
+ BestPromotionType = BestType;
// We don't need to set BestWidth, because BestType is going to be the type
// of the enumerators, but we do anyway because otherwise some compilers
// warn that it might be used uninitialized.
@@ -9701,13 +10265,13 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
BestType = Context.UnsignedIntTy;
BestWidth = IntWidth;
BestPromotionType
- = (NumPositiveBits == BestWidth || !getLangOptions().CPlusPlus)
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedIntTy : Context.IntTy;
} else if (NumPositiveBits <=
(BestWidth = Context.getTargetInfo().getLongWidth())) {
BestType = Context.UnsignedLongTy;
BestPromotionType
- = (NumPositiveBits == BestWidth || !getLangOptions().CPlusPlus)
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongTy : Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
@@ -9715,7 +10279,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
"How could an initializer get larger than ULL?");
BestType = Context.UnsignedLongLongTy;
BestPromotionType
- = (NumPositiveBits == BestWidth || !getLangOptions().CPlusPlus)
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongLongTy : Context.LongLongTy;
}
}
@@ -9740,14 +10304,15 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
QualType NewTy;
unsigned NewWidth;
bool NewSign;
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
+ !Enum->isFixed() &&
isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) {
NewTy = Context.IntTy;
NewWidth = IntWidth;
NewSign = true;
} else if (ECD->getType() == BestType) {
// Already the right type!
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
@@ -9772,7 +10337,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
ECD->getInitExpr(),
/*base paths*/ 0,
VK_RValue));
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
@@ -9783,6 +10348,12 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
Enum->completeDefinition(BestType, BestPromotionType,
NumPositiveBits, NumNegativeBits);
+
+ // If we're declaring a function, ensure this decl isn't forgotten about -
+ // it needs to go into the function scope.
+ if (InFunctionDeclarator)
+ DeclsInPrototypeScope.push_back(Enum);
+
}
Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
@@ -9797,31 +10368,50 @@ Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
return New;
}
-DeclResult Sema::ActOnModuleImport(SourceLocation ImportLoc,
- IdentifierInfo &ModuleName,
- SourceLocation ModuleNameLoc) {
- ModuleKey Module = PP.getModuleLoader().loadModule(ImportLoc,
- ModuleName, ModuleNameLoc);
- if (!Module)
+DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
+ SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
+ Module::AllVisible,
+ /*IsIncludeDirective=*/false);
+ if (!Mod)
return true;
- // FIXME: Actually create a declaration to describe the module import.
- (void)Module;
- return DeclResult((Decl *)0);
+ llvm::SmallVector<SourceLocation, 2> IdentifierLocs;
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining identifiers.
+ // We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
+ }
+
+ ImportDecl *Import = ImportDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ AtLoc.isValid()? AtLoc : ImportLoc,
+ Mod, IdentifierLocs);
+ Context.getTranslationUnitDecl()->addDecl(Import);
+ return Import;
}
-void
-Sema::diagnoseModulePrivateRedeclaration(NamedDecl *New, NamedDecl *Old,
- SourceLocation ModulePrivateKeyword) {
- assert(!Old->isModulePrivate() && "Old is module-private!");
-
- Diag(New->getLocation(), diag::err_module_private_follows_public)
- << New->getDeclName() << SourceRange(ModulePrivateKeyword);
- Diag(Old->getLocation(), diag::note_previous_declaration)
- << Old->getDeclName();
-
- // Drop the __module_private__ from the new declaration, since it's invalid.
- New->setModulePrivate(false);
+void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
+ LookupOrdinaryName);
+ AsmLabelAttr *Attr =
+ ::new (Context) AsmLabelAttr(AliasNameLoc, Context, AliasName->getName());
+
+ if (PrevDecl)
+ PrevDecl->addAttr(Attr);
+ else
+ (void)ExtnameUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,AsmLabelAttr*>(Name, Attr));
}
void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index 69baf79..5c6ddd2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -35,17 +35,14 @@ enum AttributeDeclKind {
ExpectedVariableOrFunction,
ExpectedFunctionOrMethod,
ExpectedParameter,
- ExpectedParameterOrMethod,
ExpectedFunctionMethodOrBlock,
- ExpectedClassOrVirtualMethod,
ExpectedFunctionMethodOrParameter,
ExpectedClass,
- ExpectedVirtualMethod,
- ExpectedClassMember,
ExpectedVariable,
ExpectedMethod,
ExpectedVariableFunctionOrLabel,
- ExpectedFieldOrGlobalVar
+ ExpectedFieldOrGlobalVar,
+ ExpectedStruct
};
//===----------------------------------------------------------------------===//
@@ -275,21 +272,25 @@ static const RecordType *getRecordType(QualType QT) {
/// \brief Thread Safety Analysis: Checks that the passed in RecordType
/// resolves to a lockable object. May flag an error.
-static bool checkForLockableRecord(Sema &S, Decl *D, const AttributeList &Attr,
- const RecordType *RT) {
- // Flag error if could not get record type for this argument.
+static void checkForLockableRecord(Sema &S, Decl *D, const AttributeList &Attr,
+ QualType Ty) {
+ const RecordType *RT = getRecordType(Ty);
+
+ // Warn if could not get record type for this argument.
if (!RT) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_class)
- << Attr.getName();
- return false;
+ S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_class)
+ << Attr.getName() << Ty.getAsString();
+ return;
}
- // Flag error if the type is not lockable.
+ // Don't check for lockable if the class hasn't been defined yet.
+ if (RT->isIncompleteType())
+ return;
+ // Warn if the type is not lockable.
if (!RT->getDecl()->getAttr<LockableAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_lockable)
- << Attr.getName();
- return false;
+ S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_lockable)
+ << Attr.getName() << Ty.getAsString();
+ return;
}
- return true;
}
/// \brief Thread Safety Analysis: Checks that all attribute arguments, starting
@@ -331,12 +332,10 @@ static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
return false;
}
ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
- RT = getRecordType(ArgTy);
}
}
- if (!checkForLockableRecord(S, D, Attr, RT))
- return false;
+ checkForLockableRecord(S, D, Attr, ArgTy);
Args.push_back(ArgExp);
}
@@ -393,13 +392,9 @@ static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr,
if (pointer && !checkIsPointer(S, D, Attr))
return;
- if (Arg->isTypeDependent())
- // FIXME: handle attributes with dependent types
- return;
-
- // check that the argument is lockable object
- if (!checkForLockableRecord(S, D, Attr, getRecordType(Arg->getType())))
- return;
+ if (!Arg->isTypeDependent()) {
+ checkForLockableRecord(S, D, Attr, Arg->getType());
+ }
if (pointer)
D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
@@ -446,6 +441,23 @@ static void handleNoThreadSafetyAttr(Sema &S, Decl *D,
S.Context));
}
+static void handleNoAddressSafetyAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoAddressSafetyAnalysisAttr(Attr.getRange(),
+ S.Context));
+}
+
static void handleAcquireOrderAttr(Sema &S, Decl *D, const AttributeList &Attr,
bool before) {
assert(!Attr.isInvalid());
@@ -466,7 +478,7 @@ static void handleAcquireOrderAttr(Sema &S, Decl *D, const AttributeList &Attr,
if (!QT->isDependentType()) {
const RecordType *RT = getRecordType(QT);
if (!RT || !RT->getDecl()->getAttr<LockableAttr>()) {
- S.Diag(Attr.getLoc(), diag::err_attribute_decl_not_lockable)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_decl_not_lockable)
<< Attr.getName();
return;
}
@@ -636,8 +648,7 @@ static void handleLockReturnedAttr(Sema &S, Decl *D,
return;
// check that the argument is lockable object
- if (!checkForLockableRecord(S, D, Attr, getRecordType(Arg->getType())))
- return;
+ checkForLockableRecord(S, D, Attr, Arg->getType());
D->addAttr(::new (S.Context) LockReturnedAttr(Attr.getRange(), S.Context, Arg));
}
@@ -684,10 +695,12 @@ static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
// Special case where the argument is a template id.
if (Attr.getParameterName()) {
CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
- ExprResult Size = S.ActOnIdExpression(scope, SS, id, false, false);
+ ExprResult Size = S.ActOnIdExpression(scope, SS, TemplateKWLoc, id,
+ false, false);
if (Size.isInvalid())
return;
@@ -760,14 +773,14 @@ static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
// have an object reference type.
if (const ObjCIvarDecl *VD = dyn_cast<ObjCIvarDecl>(D)) {
if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(Attr.getLoc(), diag::err_iboutlet_object_type)
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
<< Attr.getName() << VD->getType() << 0;
return false;
}
}
else if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
- S.Diag(Attr.getLoc(), diag::err_iboutlet_object_type)
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
<< Attr.getName() << PD->getType() << 1;
return false;
}
@@ -776,7 +789,7 @@ static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
return false;
}
-
+
return true;
}
@@ -805,7 +818,7 @@ static void handleIBOutletCollection(Sema &S, Decl *D,
IdentifierInfo *II = Attr.getParameterName();
if (!II)
- II = &S.Context.Idents.get("id");
+ II = &S.Context.Idents.get("NSObject");
ParsedType TypeRep = S.getTypeName(*II, Attr.getLoc(),
S.getScopeForContext(D->getDeclContext()->getParent()));
@@ -818,8 +831,7 @@ static void handleIBOutletCollection(Sema &S, Decl *D,
// FIXME. Gnu attribute extension ignores use of builtin types in
// attributes. So, __attribute__((iboutletcollection(char))) will be
// treated as __attribute__((iboutletcollection())).
- if (!QT->isObjCIdType() && !QT->isObjCClassType() &&
- !QT->isObjCObjectType()) {
+ if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << II;
return;
}
@@ -1056,8 +1068,6 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
}
break;
}
- default:
- llvm_unreachable("Unknown ownership attribute");
} // switch
// Check we don't have a conflict with another ownership attribute.
@@ -1108,7 +1118,6 @@ static bool hasEffectivelyInternalLinkage(NamedDecl *D) {
return false;
}
llvm_unreachable("unknown linkage kind!");
- return false;
}
static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -1580,6 +1589,39 @@ static void handleArcWeakrefUnavailableAttr(Sema &S, Decl *D,
Attr.getRange(), S.Context));
}
+static void handleObjCRootClassAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_requires_objc_interface);
+ return;
+ }
+
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCRootClassAttr(Attr.getRange(), S.Context));
+}
+
+static void handleObjCRequiresPropertyDefsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_suppress_autosynthesis);
+ return;
+ }
+
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCRequiresPropertyDefsAttr(
+ Attr.getRange(), S.Context));
+}
+
static void handleAvailabilityAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
IdentifierInfo *Platform = Attr.getParameterName();
@@ -1625,12 +1667,19 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
return;
}
+ StringRef Str;
+ const StringLiteral *SE =
+ dyn_cast_or_null<const StringLiteral>(Attr.getMessageExpr());
+ if (SE)
+ Str = SE->getString();
+
D->addAttr(::new (S.Context) AvailabilityAttr(Attr.getRange(), S.Context,
Platform,
Introduced.Version,
Deprecated.Version,
Obsoleted.Version,
- IsUnavailable));
+ IsUnavailable,
+ Str));
}
static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -1657,9 +1706,16 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
type = VisibilityAttr::Hidden;
else if (TypeStr == "internal")
type = VisibilityAttr::Hidden; // FIXME
- else if (TypeStr == "protected")
- type = VisibilityAttr::Protected;
- else {
+ else if (TypeStr == "protected") {
+ // Complain about attempts to use protected visibility on targets
+ // (like Darwin) that don't support it.
+ if (!S.Context.getTargetInfo().hasProtectedVisibility()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_protected_visibility);
+ type = VisibilityAttr::Default;
+ } else {
+ type = VisibilityAttr::Protected;
+ }
+ } else {
S.Diag(Attr.getLoc(), diag::warn_attribute_unknown_visibility) << TypeStr;
return;
}
@@ -1747,6 +1803,15 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
}
+ else if (!isa<ObjCPropertyDecl>(D)) {
+ // It is okay to include this attribute on properties, e.g.:
+ //
+ // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
+ //
+ // In this case it follows tradition and suppresses an error in the above
+ // case.
+ S.Diag(D->getLocation(), diag::warn_nsobject_attribute);
+ }
D->addAttr(::new (S.Context) ObjCNSObjectAttr(Attr.getRange(), S.Context));
}
@@ -1853,10 +1918,11 @@ static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
return;
}
- } else if (isa<BlockDecl>(D)) {
- // Note! BlockDecl is typeless. Variadic diagnostics will be issued by the
- // caller.
- ;
+ } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (!BD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1;
+ return;
+ }
} else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
QualType Ty = V->getType();
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
@@ -1915,6 +1981,10 @@ static void handleWeakAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
+ if (isa<CXXRecordDecl>(D)) {
+ D->addAttr(::new (S.Context) WeakAttr(Attr.getRange(), S.Context));
+ return;
+ }
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableOrFunction;
return;
@@ -1946,7 +2016,7 @@ static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
<< "weak_import" << 2 /*variable and function*/;
else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D) ||
(S.Context.getTargetInfo().getTriple().isOSDarwin() &&
- isa<ObjCInterfaceDecl>(D))) {
+ (isa<ObjCInterfaceDecl>(D) || isa<EnumDecl>(D)))) {
// Nothing to warn about here.
} else
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
@@ -2109,7 +2179,7 @@ static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
D->addAttr(::new (S.Context) CleanupAttr(Attr.getRange(), S.Context, FD));
- S.MarkDeclarationReferenced(Attr.getParameterLoc(), FD);
+ S.MarkFunctionReferenced(Attr.getParameterLoc(), FD);
}
/// Handle __attribute__((format_arg((idx)))) attribute based on
@@ -2209,8 +2279,7 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) {
// Otherwise, check for supported formats.
if (Format == "scanf" || Format == "printf" || Format == "printf0" ||
- Format == "strfmon" || Format == "cmn_err" || Format == "strftime" ||
- Format == "NSString" || Format == "CFString" || Format == "vcmn_err" ||
+ Format == "strfmon" || Format == "cmn_err" || Format == "vcmn_err" ||
Format == "zcmn_err" ||
Format == "kprintf") // OpenBSD.
return SupportedFormat;
@@ -2226,7 +2295,7 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) {
/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
static void handleInitPriorityAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- if (!S.getLangOptions().CPlusPlus) {
+ if (!S.getLangOpts().CPlusPlus) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
return;
}
@@ -2541,6 +2610,10 @@ static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E) {
+ // FIXME: Handle pack-expansions here.
+ if (DiagnoseUnexpandedParameterPack(E))
+ return;
+
if (E->isTypeDependent() || E->isValueDependent()) {
// Save dependent expressions in the AST to be instantiated.
D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, E));
@@ -2550,18 +2623,19 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E) {
SourceLocation AttrLoc = AttrRange.getBegin();
// FIXME: Cache the number on the Attr object?
llvm::APSInt Alignment(32);
- if (!E->isIntegerConstantExpr(Alignment, Context)) {
- Diag(AttrLoc, diag::err_attribute_argument_not_int)
- << "aligned" << E->getSourceRange();
+ ExprResult ICE =
+ VerifyIntegerConstantExpression(E, &Alignment,
+ PDiag(diag::err_attribute_argument_not_int) << "aligned",
+ /*AllowFold*/ false);
+ if (ICE.isInvalid())
return;
- }
if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
Diag(AttrLoc, diag::err_attribute_aligned_not_power_of_two)
<< E->getSourceRange();
return;
}
- D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, E));
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, ICE.take()));
}
void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS) {
@@ -2975,7 +3049,6 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
default:
llvm_unreachable("unexpected attribute kind");
- return;
}
}
@@ -3024,7 +3097,7 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
}
// FALLS THROUGH
}
- default: llvm_unreachable("unexpected attribute kind"); return true;
+ default: llvm_unreachable("unexpected attribute kind");
}
return false;
@@ -3195,7 +3268,7 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
returnType = MD->getResultType();
else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
returnType = PD->getType();
- else if (S.getLangOptions().ObjCAutoRefCount && hasDeclarator(D) &&
+ else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
(Attr.getKind() == AttributeList::AT_ns_returns_retained))
return; // ignore: was handled as a type attribute
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
@@ -3210,7 +3283,7 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
bool typeOK;
bool cf;
switch (Attr.getKind()) {
- default: llvm_unreachable("invalid ownership attribute"); return;
+ default: llvm_unreachable("invalid ownership attribute");
case AttributeList::AT_ns_returns_autoreleased:
case AttributeList::AT_ns_returns_retained:
case AttributeList::AT_ns_returns_not_retained:
@@ -3265,7 +3338,7 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
if (!isa<ObjCMethodDecl>(method)) {
S.Diag(method->getLocStart(), diag::err_attribute_wrong_decl_type)
- << SourceRange(loc, loc) << attr.getName() << 13 /* methods */;
+ << SourceRange(loc, loc) << attr.getName() << ExpectedMethod;
return;
}
@@ -3290,7 +3363,7 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) {
if (!isa<FunctionDecl>(D)) {
S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << A.getRange() << A.getName() << 0 /*function*/;
+ << A.getRange() << A.getName() << ExpectedFunction;
return;
}
@@ -3326,7 +3399,7 @@ static void handleNSBridgedAttr(Sema &S, Scope *Sc, Decl *D,
RecordDecl *RD = dyn_cast<RecordDecl>(D);
if (!RD || RD->isUnion()) {
S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << Attr.getRange() << Attr.getName() << 14 /*struct */;
+ << Attr.getRange() << Attr.getName() << ExpectedStruct;
}
IdentifierInfo *ParmName = Attr.getParameterName();
@@ -3334,7 +3407,7 @@ static void handleNSBridgedAttr(Sema &S, Scope *Sc, Decl *D,
// In Objective-C, verify that the type names an Objective-C type.
// We don't want to check this outside of ObjC because people sometimes
// do crazy C declarations of Objective-C types.
- if (ParmName && S.getLangOptions().ObjC1) {
+ if (ParmName && S.getLangOpts().ObjC1) {
// Check for an existing type with this name.
LookupResult R(S, DeclarationName(ParmName), Attr.getParameterLoc(),
Sema::LookupOrdinaryName);
@@ -3356,14 +3429,14 @@ static void handleObjCOwnershipAttr(Sema &S, Decl *D,
if (hasDeclarator(D)) return;
S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << Attr.getRange() << Attr.getName() << 12 /* variable */;
+ << Attr.getRange() << Attr.getName() << ExpectedVariable;
}
static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
if (!isa<VarDecl>(D) && !isa<FieldDecl>(D)) {
S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << Attr.getRange() << Attr.getName() << 12 /* variable */;
+ << Attr.getRange() << Attr.getName() << ExpectedVariable;
return;
}
@@ -3406,9 +3479,19 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
}
static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
- return Attr.getKind() == AttributeList::AT_dllimport ||
- Attr.getKind() == AttributeList::AT_dllexport ||
- Attr.getKind() == AttributeList::AT_uuid;
+ switch (Attr.getKind()) {
+ default:
+ return false;
+ case AttributeList::AT_dllimport:
+ case AttributeList::AT_dllexport:
+ case AttributeList::AT_uuid:
+ case AttributeList::AT_deprecated:
+ case AttributeList::AT_noreturn:
+ case AttributeList::AT_nothrow:
+ case AttributeList::AT_naked:
+ case AttributeList::AT_noinline:
+ return true;
+ }
}
//===----------------------------------------------------------------------===//
@@ -3433,7 +3516,7 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
bool IsCurly = StrRef.size() > 1 && StrRef.front() == '{' &&
StrRef.back() == '}';
-
+
// Validate GUID length.
if (IsCurly && StrRef.size() != 38) {
S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
@@ -3444,7 +3527,7 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
+ // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
// "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}"
StringRef::iterator I = StrRef.begin();
if (IsCurly) // Skip the optional '{'
@@ -3487,9 +3570,9 @@ static void ProcessNonInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
const AttributeList &Attr) {
switch (Attr.getKind()) {
- case AttributeList::AT_IBAction: handleIBAction(S, D, Attr); break;
- case AttributeList::AT_IBOutlet: handleIBOutlet(S, D, Attr); break;
- case AttributeList::AT_IBOutletCollection:
+ case AttributeList::AT_ibaction: handleIBAction(S, D, Attr); break;
+ case AttributeList::AT_iboutlet: handleIBOutlet(S, D, Attr); break;
+ case AttributeList::AT_iboutletcollection:
handleIBOutletCollection(S, D, Attr); break;
case AttributeList::AT_address_space:
case AttributeList::AT_opencl_image_access:
@@ -3574,19 +3657,25 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_cf_returns_retained:
handleNSReturnsRetainedAttr(S, D, Attr); break;
- case AttributeList::AT_reqd_wg_size:
+ case AttributeList::AT_reqd_work_group_size:
handleReqdWorkGroupSize(S, D, Attr); break;
case AttributeList::AT_init_priority:
handleInitPriorityAttr(S, D, Attr); break;
case AttributeList::AT_packed: handlePackedAttr (S, D, Attr); break;
- case AttributeList::AT_MsStruct: handleMsStructAttr (S, D, Attr); break;
+ case AttributeList::AT_ms_struct: handleMsStructAttr (S, D, Attr); break;
case AttributeList::AT_section: handleSectionAttr (S, D, Attr); break;
case AttributeList::AT_unavailable: handleUnavailableAttr (S, D, Attr); break;
- case AttributeList::AT_arc_weakref_unavailable:
+ case AttributeList::AT_objc_arc_weak_reference_unavailable:
handleArcWeakrefUnavailableAttr (S, D, Attr);
break;
+ case AttributeList::AT_objc_root_class:
+ handleObjCRootClassAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_objc_requires_property_definitions:
+ handleObjCRequiresPropertyDefsAttr (S, D, Attr);
+ break;
case AttributeList::AT_unused: handleUnusedAttr (S, D, Attr); break;
case AttributeList::AT_returns_twice:
handleReturnsTwiceAttr(S, D, Attr);
@@ -3607,7 +3696,7 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_objc_method_family:
handleObjCMethodFamilyAttr(S, D, Attr);
break;
- case AttributeList::AT_nsobject: handleObjCNSObject (S, D, Attr); break;
+ case AttributeList::AT_NSObject: handleObjCNSObject (S, D, Attr); break;
case AttributeList::AT_blocks: handleBlocksAttr (S, D, Attr); break;
case AttributeList::AT_sentinel: handleSentinelAttr (S, D, Attr); break;
case AttributeList::AT_const: handleConstAttr (S, D, Attr); break;
@@ -3647,6 +3736,9 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_scoped_lockable:
handleLockableAttr(S, D, Attr, /*scoped = */true);
break;
+ case AttributeList::AT_no_address_safety_analysis:
+ handleNoAddressSafetyAttr(S, D, Attr);
+ break;
case AttributeList::AT_no_thread_safety_analysis:
handleNoThreadSafetyAttr(S, D, Attr);
break;
@@ -3924,7 +4016,7 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
"this system declaration uses an unsupported type"));
return;
}
- if (S.getLangOptions().ObjCAutoRefCount)
+ if (S.getLangOpts().ObjCAutoRefCount)
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) {
// FIXME. we may want to supress diagnostics for all
// kind of forbidden type messages on unavailable functions.
@@ -3982,7 +4074,7 @@ void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
// We only want to actually emit delayed diagnostics when we
// successfully parsed a decl.
- if (decl && !decl->isInvalidDecl()) {
+ if (decl) {
// We emit all the active diagnostics, not just those starting
// from the saved state. The idea is this: we get one push for a
// decl spec and another for each declarator; in a decl group like:
@@ -3997,7 +4089,9 @@ void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
switch (diag.Kind) {
case DelayedDiagnostic::Deprecation:
- S.HandleDelayedDeprecationCheck(diag, decl);
+ // Don't bother giving deprecation diagnostics if the decl is invalid.
+ if (!decl->isInvalidDecl())
+ S.HandleDelayedDeprecationCheck(diag, decl);
break;
case DelayedDiagnostic::Access:
@@ -4039,6 +4133,11 @@ void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
Diag(DD.Loc, diag::warn_deprecated_message)
<< DD.getDeprecationDecl()->getDeclName()
<< DD.getDeprecationMessage();
+ else if (DD.getUnknownObjCClass()) {
+ Diag(DD.Loc, diag::warn_deprecated_fwdclass_message)
+ << DD.getDeprecationDecl()->getDeclName();
+ Diag(DD.getUnknownObjCClass()->getLocation(), diag::note_forward_class);
+ }
else
Diag(DD.Loc, diag::warn_deprecated)
<< DD.getDeprecationDecl()->getDeclName();
@@ -4049,7 +4148,9 @@ void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
const ObjCInterfaceDecl *UnknownObjCClass) {
// Delay if we're currently parsing a declaration.
if (DelayedDiagnostics.shouldDelayDiagnostics()) {
- DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D, Message));
+ DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D,
+ UnknownObjCClass,
+ Message));
return;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index a39584a..847f03c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -16,6 +16,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -31,7 +32,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include <map>
#include <set>
@@ -60,6 +61,7 @@ namespace {
bool VisitExpr(Expr *Node);
bool VisitDeclRefExpr(DeclRefExpr *DRE);
bool VisitCXXThisExpr(CXXThisExpr *ThisE);
+ bool VisitLambdaExpr(LambdaExpr *Lambda);
};
/// VisitExpr - Visit all of the children of this expression.
@@ -84,7 +86,7 @@ namespace {
// evaluated. Parameters of a function declared before a default
// argument expression are in scope and can hide namespace and
// class member names.
- return S->Diag(DRE->getSourceRange().getBegin(),
+ return S->Diag(DRE->getLocStart(),
diag::err_param_default_argument_references_param)
<< Param->getDeclName() << DefaultArg->getSourceRange();
} else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
@@ -92,7 +94,7 @@ namespace {
// Local variables shall not be used in default argument
// expressions.
if (VDecl->isLocalVarDecl())
- return S->Diag(DRE->getSourceRange().getBegin(),
+ return S->Diag(DRE->getLocStart(),
diag::err_param_default_argument_references_local)
<< VDecl->getDeclName() << DefaultArg->getSourceRange();
}
@@ -105,10 +107,21 @@ namespace {
// C++ [dcl.fct.default]p8:
// The keyword this shall not be used in a default argument of a
// member function.
- return S->Diag(ThisE->getSourceRange().getBegin(),
+ return S->Diag(ThisE->getLocStart(),
diag::err_param_default_argument_references_this)
<< ThisE->getSourceRange();
}
+
+ bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) {
+ // C++11 [expr.lambda.prim]p13:
+ // A lambda-expression appearing in a default argument shall not
+ // implicitly or explicitly capture any entity.
+ if (Lambda->capture_begin() == Lambda->capture_end())
+ return false;
+
+ return S->Diag(Lambda->getLocStart(),
+ diag::err_lambda_capture_default_arg);
+ }
}
void Sema::ImplicitExceptionSpecification::CalledDecl(CXXMethodDecl *Method) {
@@ -267,7 +280,7 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
UnparsedDefaultArgLocs.erase(Param);
// Default arguments are only permitted in C++
- if (!getLangOptions().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
Diag(EqualLoc, diag::err_param_default_argument)
<< DefaultArg->getSourceRange();
Param->setInvalidDecl();
@@ -359,7 +372,8 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
// function, once we already know that they have the same
// type. Subroutine of MergeFunctionDecl. Returns true if there was an
// error, false otherwise.
-bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
+bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S) {
bool Invalid = false;
// C++ [dcl.fct.default]p4:
@@ -384,7 +398,16 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
ParmVarDecl *OldParam = Old->getParamDecl(p);
ParmVarDecl *NewParam = New->getParamDecl(p);
- if (OldParam->hasDefaultArg() && NewParam->hasDefaultArg()) {
+ bool OldParamHasDfl = OldParam->hasDefaultArg();
+ bool NewParamHasDfl = NewParam->hasDefaultArg();
+
+ NamedDecl *ND = Old;
+ if (S && !isDeclInScope(ND, New->getDeclContext(), S))
+ // Ignore default parameters of old decl if they are not in
+ // the same scope.
+ OldParamHasDfl = false;
+
+ if (OldParamHasDfl && NewParamHasDfl) {
unsigned DiagDefaultParamID =
diag::err_param_default_argument_redefinition;
@@ -392,7 +415,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
// MSVC accepts that default parameters be redefined for member functions
// of template class. The new default parameter's value is ignored.
Invalid = true;
- if (getLangOptions().MicrosoftExt) {
+ if (getLangOpts().MicrosoftExt) {
CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(New);
if (MD && MD->getParent()->getDescribedClassTemplate()) {
// Merge the old default argument into the new parameter.
@@ -420,8 +443,8 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
// Look for the function declaration where the default argument was
// actually written, which may be a declaration prior to Old.
- for (FunctionDecl *Older = Old->getPreviousDeclaration();
- Older; Older = Older->getPreviousDeclaration()) {
+ for (FunctionDecl *Older = Old->getPreviousDecl();
+ Older; Older = Older->getPreviousDecl()) {
if (!Older->getParamDecl(p)->hasDefaultArg())
break;
@@ -430,7 +453,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
Diag(OldParam->getLocation(), diag::note_previous_definition)
<< OldParam->getDefaultArgRange();
- } else if (OldParam->hasDefaultArg()) {
+ } else if (OldParamHasDfl) {
// Merge the old default argument into the new parameter.
// It's important to use getInit() here; getDefaultArg()
// strips off any top-level ExprWithCleanups.
@@ -440,7 +463,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
OldParam->getUninstantiatedDefaultArg());
else
NewParam->setDefaultArg(OldParam->getInit());
- } else if (NewParam->hasDefaultArg()) {
+ } else if (NewParamHasDfl) {
if (New->getDescribedFunctionTemplate()) {
// Paragraph 4, quoted above, only applies to non-template functions.
Diag(NewParam->getLocation(),
@@ -502,13 +525,9 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
}
}
- // C++0x [dcl.constexpr]p1: If any declaration of a function or function
+ // C++11 [dcl.constexpr]p1: If any declaration of a function or function
// template has a constexpr specifier then all its declarations shall
- // contain the constexpr specifier. [Note: An explicit specialization can
- // differ from the template declaration with respect to the constexpr
- // specifier. -- end note]
- //
- // FIXME: Don't reject changes in constexpr in explicit specializations.
+ // contain the constexpr specifier.
if (New->isConstexpr() != Old->isConstexpr()) {
Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch)
<< New << New->isConstexpr();
@@ -529,7 +548,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
/// validates compatibility and merges the specs if necessary.
void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
// Shortcut if exceptions are disabled.
- if (!getLangOptions().CXXExceptions)
+ if (!getLangOpts().CXXExceptions)
return;
assert(Context.hasSameType(New->getType(), Old->getType()) &&
@@ -571,11 +590,25 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
unsigned NumParams = FD->getNumParams();
unsigned p;
+ bool IsLambda = FD->getOverloadedOperator() == OO_Call &&
+ isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->getParent()->isLambda();
+
// Find first parameter with a default argument
for (p = 0; p < NumParams; ++p) {
ParmVarDecl *Param = FD->getParamDecl(p);
- if (Param->hasDefaultArg())
+ if (Param->hasDefaultArg()) {
+ // C++11 [expr.prim.lambda]p5:
+ // [...] Default arguments (8.3.6) shall not be specified in the
+ // parameter-declaration-clause of a lambda-declarator.
+ //
+ // FIXME: Core issue 974 strikes this sentence, we only provide an
+ // extension warning.
+ if (IsLambda)
+ Diag(Param->getLocation(), diag::ext_lambda_default_arguments)
+ << Param->getDefaultArgRange();
break;
+ }
}
// C++ [dcl.fct.default]p4:
@@ -618,9 +651,9 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
// CheckConstexprParameterTypes - Check whether a function's parameter types
// are all literal types. If so, return true. If not, produce a suitable
-// diagnostic depending on @p CCK and return false.
-static bool CheckConstexprParameterTypes(Sema &SemaRef, const FunctionDecl *FD,
- Sema::CheckConstexprKind CCK) {
+// diagnostic and return false.
+static bool CheckConstexprParameterTypes(Sema &SemaRef,
+ const FunctionDecl *FD) {
unsigned ArgIndex = 0;
const FunctionProtoType *FT = FD->getType()->getAs<FunctionProtoType>();
for (FunctionProtoType::arg_type_iterator i = FT->arg_type_begin(),
@@ -628,107 +661,73 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef, const FunctionDecl *FD,
const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
SourceLocation ParamLoc = PD->getLocation();
if (!(*i)->isDependentType() &&
- SemaRef.RequireLiteralType(ParamLoc, *i, CCK == Sema::CCK_Declaration ?
+ SemaRef.RequireLiteralType(ParamLoc, *i,
SemaRef.PDiag(diag::err_constexpr_non_literal_param)
<< ArgIndex+1 << PD->getSourceRange()
- << isa<CXXConstructorDecl>(FD) :
- SemaRef.PDiag(),
- /*AllowIncompleteType*/ true)) {
- if (CCK == Sema::CCK_NoteNonConstexprInstantiation)
- SemaRef.Diag(ParamLoc, diag::note_constexpr_tmpl_non_literal_param)
- << ArgIndex+1 << PD->getSourceRange()
- << isa<CXXConstructorDecl>(FD) << *i;
+ << isa<CXXConstructorDecl>(FD)))
return false;
- }
}
return true;
}
// CheckConstexprFunctionDecl - Check whether a function declaration satisfies
-// the requirements of a constexpr function declaration or a constexpr
-// constructor declaration. Return true if it does, false if not.
-//
-// This implements C++0x [dcl.constexpr]p3,4, as amended by N3308.
+// the requirements of a constexpr function definition or a constexpr
+// constructor definition. If so, return true. If not, produce appropriate
+// diagnostics and return false.
//
-// \param CCK Specifies whether to produce diagnostics if the function does not
-// satisfy the requirements.
-bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD,
- CheckConstexprKind CCK) {
- assert((CCK != CCK_NoteNonConstexprInstantiation ||
- (NewFD->getTemplateInstantiationPattern() &&
- NewFD->getTemplateInstantiationPattern()->isConstexpr())) &&
- "only constexpr templates can be instantiated non-constexpr");
-
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(NewFD)) {
- // C++0x [dcl.constexpr]p4:
- // In the definition of a constexpr constructor, each of the parameter
- // types shall be a literal type.
- if (!CheckConstexprParameterTypes(*this, NewFD, CCK))
- return false;
-
- // In addition, either its function-body shall be = delete or = default or
- // it shall satisfy the following constraints:
+// This implements C++11 [dcl.constexpr]p3,4, as amended by DR1360.
+bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ if (MD && MD->isInstance()) {
+ // C++11 [dcl.constexpr]p4:
+ // The definition of a constexpr constructor shall satisfy the following
+ // constraints:
// - the class shall not have any virtual base classes;
- const CXXRecordDecl *RD = CD->getParent();
+ const CXXRecordDecl *RD = MD->getParent();
if (RD->getNumVBases()) {
- // Note, this is still illegal if the body is = default, since the
- // implicit body does not satisfy the requirements of a constexpr
- // constructor. We also reject cases where the body is = delete, as
- // required by N3308.
- if (CCK != CCK_Instantiation) {
- Diag(NewFD->getLocation(),
- CCK == CCK_Declaration ? diag::err_constexpr_virtual_base
- : diag::note_constexpr_tmpl_virtual_base)
- << RD->isStruct() << RD->getNumVBases();
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I)
- Diag(I->getSourceRange().getBegin(),
- diag::note_constexpr_virtual_base_here) << I->getSourceRange();
- }
+ Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
+ << isa<CXXConstructorDecl>(NewFD) << RD->isStruct()
+ << RD->getNumVBases();
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I)
+ Diag(I->getLocStart(),
+ diag::note_constexpr_virtual_base_here) << I->getSourceRange();
return false;
}
- } else {
- // C++0x [dcl.constexpr]p3:
+ }
+
+ if (!isa<CXXConstructorDecl>(NewFD)) {
+ // C++11 [dcl.constexpr]p3:
// The definition of a constexpr function shall satisfy the following
// constraints:
// - it shall not be virtual;
const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
if (Method && Method->isVirtual()) {
- if (CCK != CCK_Instantiation) {
- Diag(NewFD->getLocation(),
- CCK == CCK_Declaration ? diag::err_constexpr_virtual
- : diag::note_constexpr_tmpl_virtual);
-
- // If it's not obvious why this function is virtual, find an overridden
- // function which uses the 'virtual' keyword.
- const CXXMethodDecl *WrittenVirtual = Method;
- while (!WrittenVirtual->isVirtualAsWritten())
- WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
- if (WrittenVirtual != Method)
- Diag(WrittenVirtual->getLocation(),
- diag::note_overridden_virtual_function);
- }
+ Diag(NewFD->getLocation(), diag::err_constexpr_virtual);
+
+ // If it's not obvious why this function is virtual, find an overridden
+ // function which uses the 'virtual' keyword.
+ const CXXMethodDecl *WrittenVirtual = Method;
+ while (!WrittenVirtual->isVirtualAsWritten())
+ WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
+ if (WrittenVirtual != Method)
+ Diag(WrittenVirtual->getLocation(),
+ diag::note_overridden_virtual_function);
return false;
}
// - its return type shall be a literal type;
QualType RT = NewFD->getResultType();
if (!RT->isDependentType() &&
- RequireLiteralType(NewFD->getLocation(), RT, CCK == CCK_Declaration ?
- PDiag(diag::err_constexpr_non_literal_return) :
- PDiag(),
- /*AllowIncompleteType*/ true)) {
- if (CCK == CCK_NoteNonConstexprInstantiation)
- Diag(NewFD->getLocation(),
- diag::note_constexpr_tmpl_non_literal_return) << RT;
- return false;
- }
-
- // - each of its parameter types shall be a literal type;
- if (!CheckConstexprParameterTypes(*this, NewFD, CCK))
+ RequireLiteralType(NewFD->getLocation(), RT,
+ PDiag(diag::err_constexpr_non_literal_return)))
return false;
}
+ // - each of its parameter types shall be a literal type;
+ if (!CheckConstexprParameterTypes(*this, NewFD))
+ return false;
+
return true;
}
@@ -812,7 +811,11 @@ static void CheckConstexprCtorInitializer(Sema &SemaRef,
bool &Diagnosed) {
if (Field->isUnnamedBitfield())
return;
-
+
+ if (Field->isAnonymousStructOrUnion() &&
+ Field->getType()->getAsCXXRecordDecl()->isEmpty())
+ return;
+
if (!Inits.count(Field)) {
if (!Diagnosed) {
SemaRef.Diag(Dcl->getLocation(), diag::err_constexpr_ctor_missing_init);
@@ -836,13 +839,13 @@ static void CheckConstexprCtorInitializer(Sema &SemaRef,
/// \return true if the body is OK, false if we have diagnosed a problem.
bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
if (isa<CXXTryStmt>(Body)) {
- // C++0x [dcl.constexpr]p3:
+ // C++11 [dcl.constexpr]p3:
// The definition of a constexpr function shall satisfy the following
// constraints: [...]
// - its function-body shall be = delete, = default, or a
// compound-statement
//
- // C++0x [dcl.constexpr]p4:
+ // C++11 [dcl.constexpr]p4:
// In the definition of a constexpr constructor, [...]
// - its function-body shall not be a function-try-block;
Diag(Body->getLocStart(), diag::err_constexpr_function_try_block)
@@ -877,12 +880,6 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
break;
ReturnStmts.push_back((*BodyIt)->getLocStart());
- // FIXME
- // - every constructor call and implicit conversion used in initializing
- // the return value shall be one of those allowed in a constant
- // expression.
- // Deal with this as part of a general check that the function can produce
- // a constant expression (for [dcl.constexpr]p5).
continue;
default:
@@ -897,11 +894,14 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
if (const CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(Dcl)) {
const CXXRecordDecl *RD = Constructor->getParent();
- // - every non-static data member and base class sub-object shall be
- // initialized;
+ // DR1359:
+ // - every non-variant non-static data member and base class sub-object
+ // shall be initialized;
+ // - if the class is a non-empty union, or for each non-empty anonymous
+ // union member of a non-union class, exactly one non-static data member
+ // shall be initialized;
if (RD->isUnion()) {
- // DR1359: Exactly one member of a union shall be initialized.
- if (Constructor->getNumCtorInitializers() == 0) {
+ if (Constructor->getNumCtorInitializers() == 0 && !RD->isEmpty()) {
Diag(Dcl->getLocation(), diag::err_constexpr_union_ctor_no_init);
return false;
}
@@ -943,20 +943,6 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
return false;
}
}
-
- // FIXME
- // - every constructor involved in initializing non-static data members
- // and base class sub-objects shall be a constexpr constructor;
- // - every assignment-expression that is an initializer-clause appearing
- // directly or indirectly within a brace-or-equal-initializer for
- // a non-static data member that is not named by a mem-initializer-id
- // shall be a constant expression; and
- // - every implicit conversion used in converting a constructor argument
- // to the corresponding parameter type and converting
- // a full-expression to the corresponding member type shall be one of
- // those allowed in a constant expression.
- // Deal with these as part of a general check that the function can produce
- // a constant expression (for [dcl.constexpr]p5).
} else {
if (ReturnStmts.empty()) {
Diag(Dcl->getLocation(), diag::err_constexpr_body_no_return);
@@ -970,6 +956,25 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
}
}
+ // C++11 [dcl.constexpr]p5:
+ // if no function argument values exist such that the function invocation
+ // substitution would produce a constant expression, the program is
+ // ill-formed; no diagnostic required.
+ // C++11 [dcl.constexpr]p3:
+ // - every constructor call and implicit conversion used in initializing the
+ // return value shall be one of those allowed in a constant expression.
+ // C++11 [dcl.constexpr]p4:
+ // - every constructor involved in initializing non-static data members and
+ // base class sub-objects shall be a constexpr constructor.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diags;
+ if (!Expr::isPotentialConstantExpr(Dcl, Diags)) {
+ Diag(Dcl->getLocation(), diag::err_constexpr_function_never_constant_expr)
+ << isa<CXXConstructorDecl>(Dcl);
+ for (size_t I = 0, N = Diags.size(); I != N; ++I)
+ Diag(Diags[I].first, Diags[I].second);
+ return false;
+ }
+
return true;
}
@@ -979,7 +984,7 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
/// the innermost class.
bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
const CXXScopeSpec *SS) {
- assert(getLangOptions().CPlusPlus && "No class names in C!");
+ assert(getLangOpts().CPlusPlus && "No class names in C!");
CXXRecordDecl *CurDecl;
if (SS && SS->isSet() && !SS->isInvalid()) {
@@ -1133,13 +1138,15 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
QualType NewBaseType
= Context.getCanonicalType(Bases[idx]->getType());
NewBaseType = NewBaseType.getLocalUnqualifiedType();
- if (KnownBaseTypes[NewBaseType]) {
+
+ CXXBaseSpecifier *&KnownBase = KnownBaseTypes[NewBaseType];
+ if (KnownBase) {
// C++ [class.mi]p3:
// A class shall not be specified as a direct base class of a
// derived class more than once.
- Diag(Bases[idx]->getSourceRange().getBegin(),
+ Diag(Bases[idx]->getLocStart(),
diag::err_duplicate_base_class)
- << KnownBaseTypes[NewBaseType]->getType()
+ << KnownBase->getType()
<< Bases[idx]->getSourceRange();
// Delete the duplicate base class specifier; we're going to
@@ -1149,8 +1156,12 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
Invalid = true;
} else {
// Okay, add this new base class.
- KnownBaseTypes[NewBaseType] = Bases[idx];
+ KnownBase = Bases[idx];
Bases[NumGoodBases++] = Bases[idx];
+ if (const RecordType *Record = NewBaseType->getAs<RecordType>())
+ if (const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()))
+ if (RD->hasAttr<WeakAttr>())
+ Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context));
}
}
@@ -1190,7 +1201,7 @@ static CXXRecordDecl *GetClassForType(QualType T) {
/// \brief Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return false;
CXXRecordDecl *DerivedRD = GetClassForType(Derived);
@@ -1208,7 +1219,7 @@ bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
/// \brief Determine whether the type \p Derived is a C++ class that is
/// derived from the type \p Base.
bool Sema::IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths) {
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return false;
CXXRecordDecl *DerivedRD = GetClassForType(Derived);
@@ -1441,7 +1452,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// For anonymous bitfields, the location should point to the type.
if (Loc.isInvalid())
- Loc = D.getSourceRange().getBegin();
+ Loc = D.getLocStart();
Expr *BitWidth = static_cast<Expr*>(BW);
@@ -1527,14 +1538,12 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// class X {
// int X::member;
// };
- DeclContext *DC = 0;
- if ((DC = computeDeclContext(SS, false)) && DC->Equals(CurContext))
- Diag(D.getIdentifierLoc(), diag::warn_member_extra_qualification)
- << Name << FixItHint::CreateRemoval(SS.getRange());
+ if (DeclContext *DC = computeDeclContext(SS, false))
+ diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc());
else
Diag(D.getIdentifierLoc(), diag::err_member_qualification)
<< Name << SS.getRange();
-
+
SS.clear();
}
@@ -1631,11 +1640,26 @@ Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation EqualLoc,
return;
}
+ if (DiagnoseUnexpandedParameterPack(InitExpr, UPPC_Initializer)) {
+ FD->setInvalidDecl();
+ FD->removeInClassInitializer();
+ return;
+ }
+
ExprResult Init = InitExpr;
if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
- // FIXME: if there is no EqualLoc, this is list-initialization.
- Init = PerformCopyInitialization(
- InitializedEntity::InitializeMember(FD), EqualLoc, InitExpr);
+ if (isa<InitListExpr>(InitExpr) && isStdInitializerList(FD->getType(), 0)) {
+ Diag(FD->getLocation(), diag::warn_dangling_std_initializer_list)
+ << /*at end of ctor*/1 << InitExpr->getSourceRange();
+ }
+ Expr **Inits = &InitExpr;
+ unsigned NumInits = 1;
+ InitializedEntity Entity = InitializedEntity::InitializeMember(FD);
+ InitializationKind Kind = EqualLoc.isInvalid()
+ ? InitializationKind::CreateDirectList(InitExpr->getLocStart())
+ : InitializationKind::CreateCopy(InitExpr->getLocStart(), EqualLoc);
+ InitializationSequence Seq(*this, Entity, Kind, Inits, NumInits);
+ Init = Seq.Perform(*this, Entity, Kind, MultiExprArg(Inits, NumInits));
if (Init.isInvalid()) {
FD->setInvalidDecl();
return;
@@ -1710,11 +1734,13 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc) {
return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
- IdLoc, MultiInitializer(InitList), EllipsisLoc);
+ DS, IdLoc, InitList,
+ EllipsisLoc);
}
/// \brief Handle a C++ member initializer using parentheses syntax.
@@ -1724,15 +1750,41 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc) {
+ Expr *List = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
+ RParenLoc);
return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
- IdLoc, MultiInitializer(LParenLoc, Args, NumArgs,
- RParenLoc),
- EllipsisLoc);
+ DS, IdLoc, List, EllipsisLoc);
+}
+
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class MemInitializerValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
+ : ClassDecl(ClassDecl) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ if (FieldDecl *Member = dyn_cast<FieldDecl>(ND))
+ return Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl);
+ else
+ return isa<TypeDecl>(ND);
+ }
+ return false;
+ }
+
+ private:
+ CXXRecordDecl *ClassDecl;
+};
+
}
/// \brief Handle a C++ member initializer.
@@ -1742,8 +1794,9 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
SourceLocation IdLoc,
- const MultiInitializer &Args,
+ Expr *Init,
SourceLocation EllipsisLoc) {
if (!ConstructorD)
return true;
@@ -1774,28 +1827,18 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// using a qualified name. ]
if (!SS.getScopeRep() && !TemplateTypeTy) {
// Look for a member, first.
- FieldDecl *Member = 0;
DeclContext::lookup_result Result
= ClassDecl->lookup(MemberOrBase);
if (Result.first != Result.second) {
- Member = dyn_cast<FieldDecl>(*Result.first);
-
- if (Member) {
- if (EllipsisLoc.isValid())
- Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
- << MemberOrBase << SourceRange(IdLoc, Args.getEndLoc());
-
- return BuildMemberInitializer(Member, Args, IdLoc);
- }
-
- // Handle anonymous union case.
- if (IndirectFieldDecl* IndirectField
- = dyn_cast<IndirectFieldDecl>(*Result.first)) {
+ ValueDecl *Member;
+ if ((Member = dyn_cast<FieldDecl>(*Result.first)) ||
+ (Member = dyn_cast<IndirectFieldDecl>(*Result.first))) {
if (EllipsisLoc.isValid())
Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
- << MemberOrBase << SourceRange(IdLoc, Args.getEndLoc());
+ << MemberOrBase
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
- return BuildMemberInitializer(IndirectField, Args, IdLoc);
+ return BuildMemberInitializer(Member, Init, IdLoc);
}
}
}
@@ -1805,6 +1848,8 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (TemplateTypeTy) {
BaseType = GetTypeFromParser(TemplateTypeTy, &TInfo);
+ } else if (DS.getTypeSpecType() == TST_decltype) {
+ BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
} else {
LookupResult R(*this, MemberOrBase, IdLoc, LookupOrdinaryName);
LookupParsedName(R, S, &SS);
@@ -1838,24 +1883,23 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// If no results were found, try to correct typos.
TypoCorrection Corr;
+ MemInitializerValidatorCCC Validator(ClassDecl);
if (R.empty() && BaseType.isNull() &&
(Corr = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
- ClassDecl, false, CTC_NoKeywords))) {
- std::string CorrectedStr(Corr.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corr.getQuoted(getLangOptions()));
+ Validator, ClassDecl))) {
+ std::string CorrectedStr(Corr.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corr.getQuoted(getLangOpts()));
if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
- if (Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl)) {
- // We have found a non-static data member with a similar
- // name to what was typed; complain and initialize that
- // member.
- Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
- << MemberOrBase << true << CorrectedQuotedStr
- << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
- Diag(Member->getLocation(), diag::note_previous_decl)
- << CorrectedQuotedStr;
-
- return BuildMemberInitializer(Member, Args, IdLoc);
- }
+ // We have found a non-static data member with a similar
+ // name to what was typed; complain and initialize that
+ // member.
+ Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
+ << MemberOrBase << true << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ Diag(Member->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ return BuildMemberInitializer(Member, Init, IdLoc);
} else if (TypeDecl *Type = Corr.getCorrectionDeclAs<TypeDecl>()) {
const CXXBaseSpecifier *DirectBaseSpec;
const CXXBaseSpecifier *VirtualBaseSpec;
@@ -1871,7 +1915,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
const CXXBaseSpecifier *BaseSpec = DirectBaseSpec? DirectBaseSpec
: VirtualBaseSpec;
- Diag(BaseSpec->getSourceRange().getBegin(),
+ Diag(BaseSpec->getLocStart(),
diag::note_base_class_specified_here)
<< BaseSpec->getType()
<< BaseSpec->getSourceRange();
@@ -1883,7 +1927,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (!TyD && BaseType.isNull()) {
Diag(IdLoc, diag::err_mem_init_not_member_or_class)
- << MemberOrBase << SourceRange(IdLoc, Args.getEndLoc());
+ << MemberOrBase << SourceRange(IdLoc,Init->getSourceRange().getEnd());
return true;
}
}
@@ -1903,7 +1947,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(BaseType, IdLoc);
- return BuildBaseInitializer(BaseType, TInfo, Args, ClassDecl, EllipsisLoc);
+ return BuildBaseInitializer(BaseType, TInfo, Init, ClassDecl, EllipsisLoc);
}
/// Checks a member initializer expression for cases where reference (or
@@ -2030,14 +2074,16 @@ static bool InitExprContainsUninitializedFields(const Stmt *S,
}
MemInitResult
-Sema::BuildMemberInitializer(ValueDecl *Member,
- const MultiInitializer &Args,
+Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
SourceLocation IdLoc) {
FieldDecl *DirectMember = dyn_cast<FieldDecl>(Member);
IndirectFieldDecl *IndirectMember = dyn_cast<IndirectFieldDecl>(Member);
assert((DirectMember || IndirectMember) &&
"Member must be a FieldDecl or IndirectFieldDecl");
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
+ return true;
+
if (Member->isInvalidDecl())
return true;
@@ -2045,13 +2091,19 @@ Sema::BuildMemberInitializer(ValueDecl *Member,
// foo(foo)
// where foo is not also a parameter to the constructor.
// TODO: implement -Wuninitialized and fold this into that framework.
- for (MultiInitializer::iterator I = Args.begin(), E = Args.end();
- I != E; ++I) {
+ Expr **Args;
+ unsigned NumArgs;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ } else {
+ InitListExpr *InitList = cast<InitListExpr>(Init);
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ }
+ for (unsigned i = 0; i < NumArgs; ++i) {
SourceLocation L;
- Expr *Arg = *I;
- if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Arg))
- Arg = DIE->getInit();
- if (InitExprContainsUninitializedFields(Arg, Member, &L)) {
+ if (InitExprContainsUninitializedFields(Args[i], Member, &L)) {
// FIXME: Return true in the case when other fields are used before being
// uninitialized. For example, let this field be the i'th field. When
// initializing the i'th field, throw a warning if any of the >= i'th
@@ -2062,29 +2114,43 @@ Sema::BuildMemberInitializer(ValueDecl *Member,
}
}
- bool HasDependentArg = Args.isTypeDependent();
+ SourceRange InitRange = Init->getSourceRange();
- Expr *Init;
- if (Member->getType()->isDependentType() || HasDependentArg) {
+ if (Member->getType()->isDependentType() || Init->isTypeDependent()) {
// Can't check initialization for a member of dependent type or when
// any of the arguments are type-dependent expressions.
- Init = Args.CreateInitExpr(Context,Member->getType().getNonReferenceType());
-
DiscardCleanupsInEvaluationContext();
} else {
+ bool InitList = false;
+ if (isa<InitListExpr>(Init)) {
+ InitList = true;
+ Args = &Init;
+ NumArgs = 1;
+
+ if (isStdInitializerList(Member->getType(), 0)) {
+ Diag(IdLoc, diag::warn_dangling_std_initializer_list)
+ << /*at end of ctor*/1 << InitRange;
+ }
+ }
+
// Initialize the member.
InitializedEntity MemberEntity =
DirectMember ? InitializedEntity::InitializeMember(DirectMember, 0)
: InitializedEntity::InitializeMember(IndirectMember, 0);
InitializationKind Kind =
- InitializationKind::CreateDirect(IdLoc, Args.getStartLoc(),
- Args.getEndLoc());
-
- ExprResult MemberInit = Args.PerformInit(*this, MemberEntity, Kind);
+ InitList ? InitializationKind::CreateDirectList(IdLoc)
+ : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+
+ InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs);
+ ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind,
+ MultiExprArg(*this, Args, NumArgs),
+ 0);
if (MemberInit.isInvalid())
return true;
- CheckImplicitConversions(MemberInit.get(), Args.getStartLoc());
+ CheckImplicitConversions(MemberInit.get(),
+ InitRange.getBegin());
// C++0x [class.base.init]p7:
// The initialization of each base and member constitutes a
@@ -2095,14 +2161,13 @@ Sema::BuildMemberInitializer(ValueDecl *Member,
// If we are in a dependent context, template instantiation will
// perform this type-checking again. Just save the arguments that we
- // received in a ParenListExpr.
+ // received.
// FIXME: This isn't quite ideal, since our ASTs don't capture all
// of the information that we have about the member
// initializer. However, deconstructing the ASTs is a dicey process,
// and this approach is far more likely to get the corner cases right.
if (CurContext->isDependentContext()) {
- Init = Args.CreateInitExpr(Context,
- Member->getType().getNonReferenceType());
+ // The existing Init will do fine.
} else {
Init = MemberInit.get();
CheckForDanglingReferenceOrPointer(*this, Member, Init, IdLoc);
@@ -2110,43 +2175,53 @@ Sema::BuildMemberInitializer(ValueDecl *Member,
}
if (DirectMember) {
- return new (Context) CXXCtorInitializer(Context, DirectMember,
- IdLoc, Args.getStartLoc(),
- Init, Args.getEndLoc());
+ return new (Context) CXXCtorInitializer(Context, DirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
} else {
- return new (Context) CXXCtorInitializer(Context, IndirectMember,
- IdLoc, Args.getStartLoc(),
- Init, Args.getEndLoc());
+ return new (Context) CXXCtorInitializer(Context, IndirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
}
}
MemInitResult
-Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo,
- const MultiInitializer &Args,
- SourceLocation NameLoc,
+Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
CXXRecordDecl *ClassDecl) {
- SourceLocation Loc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ SourceLocation NameLoc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
if (!LangOpts.CPlusPlus0x)
- return Diag(Loc, diag::err_delegation_0x_only)
+ return Diag(NameLoc, diag::err_delegating_ctor)
<< TInfo->getTypeLoc().getLocalSourceRange();
+ Diag(NameLoc, diag::warn_cxx98_compat_delegating_ctor);
+ bool InitList = true;
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ }
+
+ SourceRange InitRange = Init->getSourceRange();
// Initialize the object.
InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
QualType(ClassDecl->getTypeForDecl(), 0));
InitializationKind Kind =
- InitializationKind::CreateDirect(NameLoc, Args.getStartLoc(),
- Args.getEndLoc());
-
- ExprResult DelegationInit = Args.PerformInit(*this, DelegationEntity, Kind);
+ InitList ? InitializationKind::CreateDirectList(NameLoc)
+ : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args, NumArgs);
+ ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
+ MultiExprArg(*this, Args,NumArgs),
+ 0);
if (DelegationInit.isInvalid())
return true;
- CXXConstructExpr *ConExpr = cast<CXXConstructExpr>(DelegationInit.get());
- CXXConstructorDecl *Constructor
- = ConExpr->getConstructor();
- assert(Constructor && "Delegating constructor with no target?");
+ assert(cast<CXXConstructExpr>(DelegationInit.get())->getConstructor() &&
+ "Delegating constructor with no target?");
- CheckImplicitConversions(DelegationInit.get(), Args.getStartLoc());
+ CheckImplicitConversions(DelegationInit.get(), InitRange.getBegin());
// C++0x [class.base.init]p7:
// The initialization of each base and member constitutes a
@@ -2155,20 +2230,15 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo,
if (DelegationInit.isInvalid())
return true;
- assert(!CurContext->isDependentContext());
- return new (Context) CXXCtorInitializer(Context, Loc, Args.getStartLoc(),
- Constructor,
+ return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(),
DelegationInit.takeAs<Expr>(),
- Args.getEndLoc());
+ InitRange.getEnd());
}
MemInitResult
Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
- const MultiInitializer &Args,
- CXXRecordDecl *ClassDecl,
+ Expr *Init, CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc) {
- bool HasDependentArg = Args.isTypeDependent();
-
SourceLocation BaseLoc
= BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
@@ -2182,13 +2252,14 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// of that class, the mem-initializer is ill-formed. A
// mem-initializer-list can initialize a base class using any
// name that denotes that base class type.
- bool Dependent = BaseType->isDependentType() || HasDependentArg;
+ bool Dependent = BaseType->isDependentType() || Init->isTypeDependent();
+ SourceRange InitRange = Init->getSourceRange();
if (EllipsisLoc.isValid()) {
// This is a pack expansion.
if (!BaseType->containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
- << SourceRange(BaseLoc, Args.getEndLoc());
+ << SourceRange(BaseLoc, InitRange.getEnd());
EllipsisLoc = SourceLocation();
}
@@ -2197,7 +2268,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
if (DiagnoseUnexpandedParameterPack(BaseLoc, BaseTInfo, UPPC_Initializer))
return true;
- if (Args.DiagnoseUnexpandedParameterPack(*this))
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
return true;
}
@@ -2207,7 +2278,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
if (!Dependent) {
if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
BaseType))
- return BuildDelegatingInitializer(BaseTInfo, Args, BaseLoc, ClassDecl);
+ return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl);
FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
VirtualBaseSpec);
@@ -2232,16 +2303,12 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
}
if (Dependent) {
- // Can't check initialization for a base of dependent type or when
- // any of the arguments are type-dependent expressions.
- Expr *BaseInit = Args.CreateInitExpr(Context, BaseType);
-
DiscardCleanupsInEvaluationContext();
return new (Context) CXXCtorInitializer(Context, BaseTInfo,
/*IsVirtual=*/false,
- Args.getStartLoc(), BaseInit,
- Args.getEndLoc(), EllipsisLoc);
+ InitRange.getBegin(), Init,
+ InitRange.getEnd(), EllipsisLoc);
}
// C++ [base.class.init]p2:
@@ -2252,23 +2319,34 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
return Diag(BaseLoc, diag::err_base_init_direct_and_virtual)
<< BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
- CXXBaseSpecifier *BaseSpec
- = const_cast<CXXBaseSpecifier *>(DirectBaseSpec);
+ CXXBaseSpecifier *BaseSpec = const_cast<CXXBaseSpecifier *>(DirectBaseSpec);
if (!BaseSpec)
BaseSpec = const_cast<CXXBaseSpecifier *>(VirtualBaseSpec);
// Initialize the base.
+ bool InitList = true;
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ }
+
InitializedEntity BaseEntity =
InitializedEntity::InitializeBase(Context, BaseSpec, VirtualBaseSpec);
- InitializationKind Kind =
- InitializationKind::CreateDirect(BaseLoc, Args.getStartLoc(),
- Args.getEndLoc());
-
- ExprResult BaseInit = Args.PerformInit(*this, BaseEntity, Kind);
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(BaseLoc)
+ : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, BaseEntity, Kind, Args, NumArgs);
+ ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind,
+ MultiExprArg(*this, Args, NumArgs),
+ 0);
if (BaseInit.isInvalid())
return true;
- CheckImplicitConversions(BaseInit.get(), Args.getStartLoc());
+ CheckImplicitConversions(BaseInit.get(), InitRange.getBegin());
// C++0x [class.base.init]p7:
// The initialization of each base and member constitutes a
@@ -2285,13 +2363,13 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// initializer. However, deconstructing the ASTs is a dicey process,
// and this approach is far more likely to get the corner cases right.
if (CurContext->isDependentContext())
- BaseInit = Owned(Args.CreateInitExpr(Context, BaseType));
+ BaseInit = Owned(Init);
return new (Context) CXXCtorInitializer(Context, BaseTInfo,
BaseSpec->isVirtual(),
- Args.getStartLoc(),
+ InitRange.getBegin(),
BaseInit.takeAs<Expr>(),
- Args.getEndLoc(), EllipsisLoc);
+ InitRange.getEnd(), EllipsisLoc);
}
// Create a static_cast\<T&&>(expr).
@@ -2342,12 +2420,15 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
bool Moving = ImplicitInitKind == IIK_Move;
ParmVarDecl *Param = Constructor->getParamDecl(0);
QualType ParamType = Param->getType().getNonReferenceType();
-
+
Expr *CopyCtorArg =
- DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), Param,
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
Constructor->getLocation(), ParamType,
VK_LValue, 0);
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(CopyCtorArg));
+
// Cast to the base class to avoid ambiguities.
QualType ArgTy =
SemaRef.Context.getQualifiedType(BaseSpec->getType().getUnqualifiedType(),
@@ -2415,11 +2496,14 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
// Suppress copying zero-width bitfields.
if (Field->isBitField() && Field->getBitWidthValue(SemaRef.Context) == 0)
return false;
-
+
Expr *MemberExprBase =
- DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), Param,
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
Loc, ParamType, VK_LValue, 0);
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(MemberExprBase));
+
if (Moving) {
MemberExprBase = CastForMoving(SemaRef, MemberExprBase);
}
@@ -2436,6 +2520,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
ParamType, Loc,
/*IsArrow=*/false,
SS,
+ /*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/0,
MemberLookup,
/*TemplateArgs=*/0);
@@ -2463,7 +2548,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
// Create the iteration variable for this array index.
IdentifierInfo *IterationVarName = 0;
{
- llvm::SmallString<8> Str;
+ SmallString<8> Str;
llvm::raw_svector_ostream OS(Str);
OS << "__i" << IndexVariables.size();
IterationVarName = &SemaRef.Context.Idents.get(OS.str());
@@ -2477,9 +2562,12 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
// Create a reference to the iteration variable.
ExprResult IterationVarRef
- = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_RValue, Loc);
+ = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
assert(!IterationVarRef.isInvalid() &&
"Reference to invented variable cannot fail!");
+ IterationVarRef = SemaRef.DefaultLvalueConversion(IterationVarRef.take());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
// Subscript the array with this iteration variable.
CtorArg = SemaRef.CreateBuiltinArraySubscriptExpr(CtorArg.take(), Loc,
@@ -2597,7 +2685,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
}
}
- if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
FieldBaseElementType->isObjCRetainableType() &&
FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None &&
FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
@@ -2635,6 +2723,19 @@ struct BaseAndFieldInfo {
else
IIK = IIK_Default;
}
+
+ bool isImplicitCopyOrMove() const {
+ switch (IIK) {
+ case IIK_Copy:
+ case IIK_Move:
+ return true;
+
+ case IIK_Default:
+ return false;
+ }
+
+ llvm_unreachable("Invalid ImplicitInitializerKind!");
+ }
};
}
@@ -2651,6 +2752,22 @@ static bool isWithinAnonymousUnion(IndirectFieldDecl *F) {
return false;
}
+/// \brief Determine whether the given type is an incomplete or zero-lenfgth
+/// array type.
+static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
+ if (T->isIncompleteArrayType())
+ return true;
+
+ while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) {
+ if (!ArrayT->getSize())
+ return true;
+
+ T = ArrayT->getElementType();
+ }
+
+ return false;
+}
+
static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
FieldDecl *Field,
IndirectFieldDecl *Indirect = 0) {
@@ -2664,7 +2781,7 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
// C++0x [class.base.init]p8: if the entity is a non-static data member that
// has a brace-or-equal-initializer, the entity is initialized as specified
// in [dcl.init].
- if (Field->hasInClassInitializer()) {
+ if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
CXXCtorInitializer *Init;
if (Indirect)
Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
@@ -2686,6 +2803,10 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
(Indirect && isWithinAnonymousUnion(Indirect)))
return false;
+ // Don't initialize incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(SemaRef.Context, Field->getType()))
+ return false;
+
// Don't try to build an implicit initializer if there were semantic
// errors in any of the initializers (and therefore we might be
// missing some that the user actually wrote).
@@ -2714,7 +2835,7 @@ Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor,
Constructor->setCtorInitializers(initializer);
if (CXXDestructorDecl *Dtor = LookupDestructor(Constructor->getParent())) {
- MarkDeclarationReferenced(Initializer->getSourceLocation(), Dtor);
+ MarkFunctionReferenced(Initializer->getSourceLocation(), Dtor);
DiagnoseUseOfDecl(Dtor, Initializer->getSourceLocation());
}
@@ -2824,13 +2945,7 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
// initialized.
if (F->isUnnamedBitfield())
continue;
-
- if (F->getType()->isIncompleteArrayType()) {
- assert(ClassDecl->hasFlexibleArrayMember() &&
- "Incomplete array type is not valid");
- continue;
- }
-
+
// If we're not generating the implicit copy/move constructor, then we'll
// handle anonymous struct/union fields based on their individual
// indirect fields.
@@ -2997,12 +3112,12 @@ DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
if (PrevInit->isAnyMemberInitializer())
D << 0 << PrevInit->getAnyMember()->getDeclName();
else
- D << 1 << PrevInit->getBaseClassInfo()->getType();
+ D << 1 << PrevInit->getTypeSourceInfo()->getType();
if (Init->isAnyMemberInitializer())
D << 0 << Init->getAnyMember()->getDeclName();
else
- D << 1 << Init->getBaseClassInfo()->getType();
+ D << 1 << Init->getTypeSourceInfo()->getType();
// Move back to the initializer's location in the ideal list.
for (IdealIndex = 0; IdealIndex != NumIdealInits; ++IdealIndex)
@@ -3053,11 +3168,9 @@ bool CheckRedundantUnionInit(Sema &S,
RedundantUnionMap &Unions) {
FieldDecl *Field = Init->getAnyMember();
RecordDecl *Parent = Field->getParent();
- if (!Parent->isAnonymousStructOrUnion())
- return false;
-
NamedDecl *Child = Field;
- do {
+
+ while (Parent->isAnonymousStructOrUnion() || Parent->isUnion()) {
if (Parent->isUnion()) {
UnionEntry &En = Unions[Parent];
if (En.first && En.first != Child) {
@@ -3068,15 +3181,18 @@ bool CheckRedundantUnionInit(Sema &S,
S.Diag(En.second->getSourceLocation(), diag::note_previous_initializer)
<< 0 << En.second->getSourceRange();
return true;
- } else if (!En.first) {
+ }
+ if (!En.first) {
En.first = Child;
En.second = Init;
}
+ if (!Parent->isAnonymousStructOrUnion())
+ return false;
}
Child = Parent;
Parent = cast<RecordDecl>(Parent->getDeclContext());
- } while (Parent->isAnonymousStructOrUnion());
+ }
return false;
}
@@ -3171,6 +3287,11 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
FieldDecl *Field = *I;
if (Field->isInvalidDecl())
continue;
+
+ // Don't destroy incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(Context, Field->getType()))
+ continue;
+
QualType FieldType = Context.getBaseElementType(Field->getType());
const RecordType* RT = FieldType->getAs<RecordType>();
@@ -3180,7 +3301,10 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
if (FieldClassDecl->isInvalidDecl())
continue;
- if (FieldClassDecl->hasTrivialDestructor())
+ if (FieldClassDecl->hasIrrelevantDestructor())
+ continue;
+ // The destructor for an implicit anonymous union member is never invoked.
+ if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
continue;
CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl);
@@ -3190,7 +3314,8 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
<< Field->getDeclName()
<< FieldType);
- MarkDeclarationReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
}
llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
@@ -3209,20 +3334,21 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
// If our base class is invalid, we probably can't get its dtor anyway.
if (BaseClassDecl->isInvalidDecl())
continue;
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
+ if (BaseClassDecl->hasIrrelevantDestructor())
continue;
CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
assert(Dtor && "No dtor found for BaseClassDecl!");
// FIXME: caret should be on the start of the class name
- CheckDestructorAccess(Base->getSourceRange().getBegin(), Dtor,
+ CheckDestructorAccess(Base->getLocStart(), Dtor,
PDiag(diag::err_access_dtor_base)
<< Base->getType()
- << Base->getSourceRange());
+ << Base->getSourceRange(),
+ Context.getTypeDeclType(ClassDecl));
- MarkDeclarationReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
}
// Virtual bases.
@@ -3230,7 +3356,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
// Bases are always records in a well-formed non-dependent class.
- const RecordType *RT = VBase->getType()->getAs<RecordType>();
+ const RecordType *RT = VBase->getType()->castAs<RecordType>();
// Ignore direct virtual bases.
if (DirectVirtualBases.count(RT))
@@ -3240,17 +3366,18 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
// If our base class is invalid, we probably can't get its dtor anyway.
if (BaseClassDecl->isInvalidDecl())
continue;
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
+ if (BaseClassDecl->hasIrrelevantDestructor())
continue;
CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
assert(Dtor && "No dtor found for BaseClassDecl!");
CheckDestructorAccess(ClassDecl->getLocation(), Dtor,
PDiag(diag::err_access_dtor_vbase)
- << VBase->getType());
+ << VBase->getType(),
+ Context.getTypeDeclType(ClassDecl));
- MarkDeclarationReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
}
}
@@ -3273,7 +3400,7 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
const PartialDiagnostic &PD) {
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return false;
if (const ArrayType *AT = Context.getAsArrayType(T))
@@ -3538,7 +3665,8 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
// complain about any non-static data members of reference or const scalar
// type, since they will never get initializers.
if (!Record->isInvalidDecl() && !Record->isDependentType() &&
- !Record->isAggregate() && !Record->hasUserDeclaredConstructor()) {
+ !Record->isAggregate() && !Record->hasUserDeclaredConstructor() &&
+ !Record->isLambda()) {
bool Complained = false;
for (RecordDecl::field_iterator F = Record->field_begin(),
FEnd = Record->field_end();
@@ -3609,9 +3737,6 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
// const. [...] The class of which that function is a member shall be
// a literal type.
//
- // It's fine to diagnose constructors here too: such constructors cannot
- // produce a constant expression, so are ill-formed (no diagnostic required).
- //
// If the class has virtual bases, any constexpr members will already have
// been diagnosed by the checks performed on the member declaration, so
// suppress this (less useful) diagnostic.
@@ -3620,16 +3745,14 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
for (CXXRecordDecl::method_iterator M = Record->method_begin(),
MEnd = Record->method_end();
M != MEnd; ++M) {
- if ((*M)->isConstexpr()) {
+ if (M->isConstexpr() && M->isInstance() && !isa<CXXConstructorDecl>(*M)) {
switch (Record->getTemplateSpecializationKind()) {
case TSK_ImplicitInstantiation:
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
// If a template instantiates to a non-literal type, but its members
// instantiate to constexpr functions, the template is technically
- // ill-formed, but we allow it for sanity. Such members are treated as
- // non-constexpr.
- (*M)->setConstexpr(false);
+ // ill-formed, but we allow it for sanity.
continue;
case TSK_Undeclared:
@@ -3725,6 +3848,21 @@ void Sema::CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *CD) {
*ExceptionType = Context.getFunctionType(
Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedDefaultConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXDefaultConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
if (CtorType->hasExceptionSpec()) {
if (CheckEquivalentExceptionSpec(
PDiag(diag::err_incorrect_defaulted_exception_spec)
@@ -3734,11 +3872,24 @@ void Sema::CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *CD) {
CtorType, CD->getLocation())) {
HadError = true;
}
- } else if (First) {
- // We set the declaration to have the computed exception spec here.
- // We know there are no parameters.
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedDefaultConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
EPI.ExtInfo = CtorType->getExtInfo();
- CD->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialDefaultConstructor());
}
if (HadError) {
@@ -3792,6 +3943,21 @@ void Sema::CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *CD) {
HadError = true;
}
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedCopyConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXCopyConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
if (CtorType->hasExceptionSpec()) {
if (CheckEquivalentExceptionSpec(
PDiag(diag::err_incorrect_defaulted_exception_spec)
@@ -3801,11 +3967,28 @@ void Sema::CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *CD) {
CtorType, CD->getLocation())) {
HadError = true;
}
- } else if (First) {
- // We set the declaration to have the computed exception spec here.
- // We duplicate the one parameter type.
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedCopyConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared, and
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
EPI.ExtInfo = CtorType->getExtInfo();
+
+ // -- [...] it shall have the same parameter type as if it had been
+ // implicitly declared.
CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialCopyConstructor());
}
if (HadError) {
@@ -3886,12 +4069,17 @@ void Sema::CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *MD) {
OperType, MD->getLocation())) {
HadError = true;
}
- } else if (First) {
+ }
+ if (First) {
// We set the declaration to have the computed exception spec here.
// We duplicate the one parameter type.
EPI.RefQualifier = OperType->getRefQualifier();
EPI.ExtInfo = OperType->getExtInfo();
MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ MD->setTrivial(MD->getParent()->hasTrivialCopyAssignment());
}
if (HadError) {
@@ -3899,7 +4087,7 @@ void Sema::CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *MD) {
return;
}
- if (ShouldDeleteCopyAssignmentOperator(MD)) {
+ if (ShouldDeleteSpecialMember(MD, CXXCopyAssignment)) {
if (First) {
MD->setDeletedAsWritten();
} else {
@@ -3943,6 +4131,21 @@ void Sema::CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *CD) {
HadError = true;
}
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedMoveConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXMoveConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
if (CtorType->hasExceptionSpec()) {
if (CheckEquivalentExceptionSpec(
PDiag(diag::err_incorrect_defaulted_exception_spec)
@@ -3952,11 +4155,28 @@ void Sema::CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *CD) {
CtorType, CD->getLocation())) {
HadError = true;
}
- } else if (First) {
- // We set the declaration to have the computed exception spec here.
- // We duplicate the one parameter type.
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedMoveConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared, and
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
EPI.ExtInfo = CtorType->getExtInfo();
+
+ // -- [...] it shall have the same parameter type as if it had been
+ // implicitly declared.
CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialMoveConstructor());
}
if (HadError) {
@@ -4035,12 +4255,17 @@ void Sema::CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *MD) {
OperType, MD->getLocation())) {
HadError = true;
}
- } else if (First) {
+ }
+ if (First) {
// We set the declaration to have the computed exception spec here.
// We duplicate the one parameter type.
EPI.RefQualifier = OperType->getRefQualifier();
EPI.ExtInfo = OperType->getExtInfo();
MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ MD->setTrivial(MD->getParent()->hasTrivialMoveAssignment());
}
if (HadError) {
@@ -4048,7 +4273,7 @@ void Sema::CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *MD) {
return;
}
- if (ShouldDeleteMoveAssignmentOperator(MD)) {
+ if (ShouldDeleteSpecialMember(MD, CXXMoveAssignment)) {
if (First) {
MD->setDeletedAsWritten();
} else {
@@ -4082,14 +4307,19 @@ void Sema::CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *DD) {
DD->setInvalidDecl();
return;
}
- } else if (First) {
+ }
+ if (First) {
// We set the declaration to have the computed exception spec here.
// There are no parameters.
EPI.ExtInfo = DtorType->getExtInfo();
DD->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ DD->setTrivial(DD->getParent()->hasTrivialDestructor());
}
- if (ShouldDeleteDestructor(DD)) {
+ if (ShouldDeleteSpecialMember(DD, CXXDestructor)) {
if (First) {
DD->setDeletedAsWritten();
} else {
@@ -4100,652 +4330,412 @@ void Sema::CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *DD) {
}
}
-/// This function implements the following C++0x paragraphs:
-/// - [class.ctor]/5
-/// - [class.copy]/11
-bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM) {
- assert(!MD->isInvalidDecl());
- CXXRecordDecl *RD = MD->getParent();
- assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
- return false;
-
- bool IsUnion = RD->isUnion();
- bool IsConstructor = false;
- bool IsAssignment = false;
- bool IsMove = false;
-
- bool ConstArg = false;
+namespace {
+struct SpecialMemberDeletionInfo {
+ Sema &S;
+ CXXMethodDecl *MD;
+ Sema::CXXSpecialMember CSM;
+ bool Diagnose;
+
+ // Properties of the special member, computed for convenience.
+ bool IsConstructor, IsAssignment, IsMove, ConstArg, VolatileArg;
+ SourceLocation Loc;
+
+ bool AllFieldsAreConst;
+
+ SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD,
+ Sema::CXXSpecialMember CSM, bool Diagnose)
+ : S(S), MD(MD), CSM(CSM), Diagnose(Diagnose),
+ IsConstructor(false), IsAssignment(false), IsMove(false),
+ ConstArg(false), VolatileArg(false), Loc(MD->getLocation()),
+ AllFieldsAreConst(true) {
+ switch (CSM) {
+ case Sema::CXXDefaultConstructor:
+ case Sema::CXXCopyConstructor:
+ IsConstructor = true;
+ break;
+ case Sema::CXXMoveConstructor:
+ IsConstructor = true;
+ IsMove = true;
+ break;
+ case Sema::CXXCopyAssignment:
+ IsAssignment = true;
+ break;
+ case Sema::CXXMoveAssignment:
+ IsAssignment = true;
+ IsMove = true;
+ break;
+ case Sema::CXXDestructor:
+ break;
+ case Sema::CXXInvalid:
+ llvm_unreachable("invalid special member kind");
+ }
- switch (CSM) {
- case CXXDefaultConstructor:
- IsConstructor = true;
- break;
- case CXXCopyConstructor:
- IsConstructor = true;
- ConstArg = MD->getParamDecl(0)->getType().isConstQualified();
- break;
- case CXXMoveConstructor:
- IsConstructor = true;
- IsMove = true;
- break;
- default:
- llvm_unreachable("function only currently implemented for default ctors");
+ if (MD->getNumParams()) {
+ ConstArg = MD->getParamDecl(0)->getType().isConstQualified();
+ VolatileArg = MD->getParamDecl(0)->getType().isVolatileQualified();
+ }
}
- SourceLocation Loc = MD->getLocation();
-
- // Do access control from the special member function
- ContextRAII MethodContext(*this, MD);
-
- bool AllConst = true;
+ bool inUnion() const { return MD->getParent()->isUnion(); }
- // We do this because we should never actually use an anonymous
- // union's constructor.
- if (IsUnion && RD->isAnonymousStructOrUnion())
- return false;
+ /// Look up the corresponding special member in the given class.
+ Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class) {
+ unsigned TQ = MD->getTypeQualifiers();
+ return S.LookupSpecialMember(Class, CSM, ConstArg, VolatileArg,
+ MD->getRefQualifier() == RQ_RValue,
+ TQ & Qualifiers::Const,
+ TQ & Qualifiers::Volatile);
+ }
- // FIXME: We should put some diagnostic logic right into this function.
+ typedef llvm::PointerUnion<CXXBaseSpecifier*, FieldDecl*> Subobject;
- for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
- BE = RD->bases_end();
- BI != BE; ++BI) {
- // We'll handle this one later
- if (BI->isVirtual())
- continue;
+ bool shouldDeleteForBase(CXXBaseSpecifier *Base);
+ bool shouldDeleteForField(FieldDecl *FD);
+ bool shouldDeleteForAllConstMembers();
- CXXRecordDecl *BaseDecl = BI->getType()->getAsCXXRecordDecl();
- assert(BaseDecl && "base isn't a CXXRecordDecl");
+ bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj);
+ bool shouldDeleteForSubobjectCall(Subobject Subobj,
+ Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor);
- // Unless we have an assignment operator, the base's destructor must
- // be accessible and not deleted.
- if (!IsAssignment) {
- CXXDestructorDecl *BaseDtor = LookupDestructor(BaseDecl);
- if (BaseDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, BaseDtor, PDiag()) !=
- AR_accessible)
- return true;
- }
+ bool isAccessible(Subobject Subobj, CXXMethodDecl *D);
+};
+}
- // Finding the corresponding member in the base should lead to a
- // unique, accessible, non-deleted function. If we are doing
- // a destructor, we have already checked this case.
- if (CSM != CXXDestructor) {
- SpecialMemberOverloadResult *SMOR =
- LookupSpecialMember(BaseDecl, CSM, ConstArg, false, false, false,
- false);
- if (!SMOR->hasSuccess())
- return true;
- CXXMethodDecl *BaseMember = SMOR->getMethod();
- if (IsConstructor) {
- CXXConstructorDecl *BaseCtor = cast<CXXConstructorDecl>(BaseMember);
- if (CheckConstructorAccess(Loc, BaseCtor, BaseCtor->getAccess(),
- PDiag()) != AR_accessible)
- return true;
+/// Is the given special member inaccessible when used on the given
+/// sub-object.
+bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
+ CXXMethodDecl *target) {
+ /// If we're operating on a base class, the object type is the
+ /// type of this special member.
+ QualType objectTy;
+ AccessSpecifier access = target->getAccess();;
+ if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
+ objectTy = S.Context.getTypeDeclType(MD->getParent());
+ access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
- // For a move operation, the corresponding operation must actually
- // be a move operation (and not a copy selected by overload
- // resolution) unless we are working on a trivially copyable class.
- if (IsMove && !BaseCtor->isMoveConstructor() &&
- !BaseDecl->isTriviallyCopyable())
- return true;
- }
- }
+ // If we're operating on a field, the object type is the type of the field.
+ } else {
+ objectTy = S.Context.getTypeDeclType(target->getParent());
}
- for (CXXRecordDecl::base_class_iterator BI = RD->vbases_begin(),
- BE = RD->vbases_end();
- BI != BE; ++BI) {
- CXXRecordDecl *BaseDecl = BI->getType()->getAsCXXRecordDecl();
- assert(BaseDecl && "base isn't a CXXRecordDecl");
-
- // Unless we have an assignment operator, the base's destructor must
- // be accessible and not deleted.
- if (!IsAssignment) {
- CXXDestructorDecl *BaseDtor = LookupDestructor(BaseDecl);
- if (BaseDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, BaseDtor, PDiag()) !=
- AR_accessible)
- return true;
- }
+ return S.isSpecialMemberAccessibleForDeletion(target, access, objectTy);
+}
- // Finding the corresponding member in the base should lead to a
- // unique, accessible, non-deleted function.
- if (CSM != CXXDestructor) {
- SpecialMemberOverloadResult *SMOR =
- LookupSpecialMember(BaseDecl, CSM, ConstArg, false, false, false,
- false);
- if (!SMOR->hasSuccess())
- return true;
- CXXMethodDecl *BaseMember = SMOR->getMethod();
- if (IsConstructor) {
- CXXConstructorDecl *BaseCtor = cast<CXXConstructorDecl>(BaseMember);
- if (CheckConstructorAccess(Loc, BaseCtor, BaseCtor->getAccess(),
- PDiag()) != AR_accessible)
- return true;
+/// Check whether we should delete a special member due to the implicit
+/// definition containing a call to a special member of a subobject.
+bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
+ Subobject Subobj, Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor) {
+ CXXMethodDecl *Decl = SMOR->getMethod();
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
- // For a move operation, the corresponding operation must actually
- // be a move operation (and not a copy selected by overload
- // resolution) unless we are working on a trivially copyable class.
- if (IsMove && !BaseCtor->isMoveConstructor() &&
- !BaseDecl->isTriviallyCopyable())
- return true;
- }
- }
- }
+ int DiagKind = -1;
- for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
- FE = RD->field_end();
- FI != FE; ++FI) {
- if (FI->isInvalidDecl() || FI->isUnnamedBitfield())
- continue;
-
- QualType FieldType = Context.getBaseElementType(FI->getType());
- CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
+ if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::NoMemberOrDeleted)
+ DiagKind = !Decl ? 0 : 1;
+ else if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous)
+ DiagKind = 2;
+ else if (!isAccessible(Subobj, Decl))
+ DiagKind = 3;
+ else if (!IsDtorCallInCtor && Field && Field->getParent()->isUnion() &&
+ !Decl->isTrivial()) {
+ // A member of a union must have a trivial corresponding special member.
+ // As a weird special case, a destructor call from a union's constructor
+ // must be accessible and non-deleted, but need not be trivial. Such a
+ // destructor is never actually called, but is semantically checked as
+ // if it were.
+ DiagKind = 4;
+ }
- // For a default constructor, all references must be initialized in-class
- // and, if a union, it must have a non-const member.
- if (CSM == CXXDefaultConstructor) {
- if (FieldType->isReferenceType() && !FI->hasInClassInitializer())
- return true;
+ if (DiagKind == -1)
+ return false;
- if (IsUnion && !FieldType.isConstQualified())
- AllConst = false;
- // For a copy constructor, data members must not be of rvalue reference
- // type.
- } else if (CSM == CXXCopyConstructor) {
- if (FieldType->isRValueReferenceType())
- return true;
+ if (Diagnose) {
+ if (Field) {
+ S.Diag(Field->getLocation(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/true
+ << Field << DiagKind << IsDtorCallInCtor;
+ } else {
+ CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
+ S.Diag(Base->getLocStart(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/false
+ << Base->getType() << DiagKind << IsDtorCallInCtor;
}
- if (FieldRecord) {
- // For a default constructor, a const member must have a user-provided
- // default constructor or else be explicitly initialized.
- if (CSM == CXXDefaultConstructor && FieldType.isConstQualified() &&
- !FI->hasInClassInitializer() &&
- !FieldRecord->hasUserProvidedDefaultConstructor())
- return true;
-
- // Some additional restrictions exist on the variant members.
- if (!IsUnion && FieldRecord->isUnion() &&
- FieldRecord->isAnonymousStructOrUnion()) {
- // We're okay to reuse AllConst here since we only care about the
- // value otherwise if we're in a union.
- AllConst = true;
-
- for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
- UE = FieldRecord->field_end();
- UI != UE; ++UI) {
- QualType UnionFieldType = Context.getBaseElementType(UI->getType());
- CXXRecordDecl *UnionFieldRecord =
- UnionFieldType->getAsCXXRecordDecl();
-
- if (!UnionFieldType.isConstQualified())
- AllConst = false;
-
- if (UnionFieldRecord) {
- // FIXME: Checking for accessibility and validity of this
- // destructor is technically going beyond the
- // standard, but this is believed to be a defect.
- if (!IsAssignment) {
- CXXDestructorDecl *FieldDtor = LookupDestructor(UnionFieldRecord);
- if (FieldDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, FieldDtor, PDiag()) !=
- AR_accessible)
- return true;
- if (!FieldDtor->isTrivial())
- return true;
- }
-
- if (CSM != CXXDestructor) {
- SpecialMemberOverloadResult *SMOR =
- LookupSpecialMember(UnionFieldRecord, CSM, ConstArg, false,
- false, false, false);
- // FIXME: Checking for accessibility and validity of this
- // corresponding member is technically going beyond the
- // standard, but this is believed to be a defect.
- if (!SMOR->hasSuccess())
- return true;
-
- CXXMethodDecl *FieldMember = SMOR->getMethod();
- // A member of a union must have a trivial corresponding
- // constructor.
- if (!FieldMember->isTrivial())
- return true;
-
- if (IsConstructor) {
- CXXConstructorDecl *FieldCtor = cast<CXXConstructorDecl>(FieldMember);
- if (CheckConstructorAccess(Loc, FieldCtor, FieldCtor->getAccess(),
- PDiag()) != AR_accessible)
- return true;
- }
- }
- }
- }
-
- // At least one member in each anonymous union must be non-const
- if (CSM == CXXDefaultConstructor && AllConst)
- return true;
-
- // Don't try to initialize the anonymous union
- // This is technically non-conformant, but sanity demands it.
- continue;
- }
-
- // Unless we're doing assignment, the field's destructor must be
- // accessible and not deleted.
- if (!IsAssignment) {
- CXXDestructorDecl *FieldDtor = LookupDestructor(FieldRecord);
- if (FieldDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, FieldDtor, PDiag()) !=
- AR_accessible)
- return true;
- }
-
- // Check that the corresponding member of the field is accessible,
- // unique, and non-deleted. We don't do this if it has an explicit
- // initialization when default-constructing.
- if (CSM != CXXDestructor &&
- (CSM != CXXDefaultConstructor || !FI->hasInClassInitializer())) {
- SpecialMemberOverloadResult *SMOR =
- LookupSpecialMember(FieldRecord, CSM, ConstArg, false, false, false,
- false);
- if (!SMOR->hasSuccess())
- return true;
+ if (DiagKind == 1)
+ S.NoteDeletedFunction(Decl);
+ // FIXME: Explain inaccessibility if DiagKind == 3.
+ }
- CXXMethodDecl *FieldMember = SMOR->getMethod();
- if (IsConstructor) {
- CXXConstructorDecl *FieldCtor = cast<CXXConstructorDecl>(FieldMember);
- if (CheckConstructorAccess(Loc, FieldCtor, FieldCtor->getAccess(),
- PDiag()) != AR_accessible)
- return true;
+ return true;
+}
- // For a move operation, the corresponding operation must actually
- // be a move operation (and not a copy selected by overload
- // resolution) unless we are working on a trivially copyable class.
- if (IsMove && !FieldCtor->isMoveConstructor() &&
- !FieldRecord->isTriviallyCopyable())
- return true;
- }
+/// Check whether we should delete a special member function due to having a
+/// direct or virtual base class or static data member of class type M.
+bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
+ CXXRecordDecl *Class, Subobject Subobj) {
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+
+ // C++11 [class.ctor]p5:
+ // -- any direct or virtual base class, or non-static data member with no
+ // brace-or-equal-initializer, has class type M (or array thereof) and
+ // either M has no default constructor or overload resolution as applied
+ // to M's default constructor results in an ambiguity or in a function
+ // that is deleted or inaccessible
+ // C++11 [class.copy]p11, C++11 [class.copy]p23:
+ // -- a direct or virtual base class B that cannot be copied/moved because
+ // overload resolution, as applied to B's corresponding special member,
+ // results in an ambiguity or a function that is deleted or inaccessible
+ // from the defaulted special member
+ // C++11 [class.dtor]p5:
+ // -- any direct or virtual base class [...] has a type with a destructor
+ // that is deleted or inaccessible
+ if (!(CSM == Sema::CXXDefaultConstructor &&
+ Field && Field->hasInClassInitializer()) &&
+ shouldDeleteForSubobjectCall(Subobj, lookupIn(Class), false))
+ return true;
- // We need the corresponding member of a union to be trivial so that
- // we can safely copy them all simultaneously.
- // FIXME: Note that performing the check here (where we rely on the lack
- // of an in-class initializer) is technically ill-formed. However, this
- // seems most obviously to be a bug in the standard.
- if (IsUnion && !FieldMember->isTrivial())
- return true;
- }
- } else if (CSM == CXXDefaultConstructor && !IsUnion &&
- FieldType.isConstQualified() && !FI->hasInClassInitializer()) {
- // We can't initialize a const member of non-class type to any value.
+ // C++11 [class.ctor]p5, C++11 [class.copy]p11:
+ // -- any direct or virtual base class or non-static data member has a
+ // type with a destructor that is deleted or inaccessible
+ if (IsConstructor) {
+ Sema::SpecialMemberOverloadResult *SMOR =
+ S.LookupSpecialMember(Class, Sema::CXXDestructor,
+ false, false, false, false, false);
+ if (shouldDeleteForSubobjectCall(Subobj, SMOR, true))
return true;
- }
}
- // We can't have all const members in a union when default-constructing,
- // or else they're all nonsensical garbage values that can't be changed.
- if (CSM == CXXDefaultConstructor && IsUnion && AllConst)
- return true;
-
return false;
}
-bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
- CXXRecordDecl *RD = MD->getParent();
- assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
- return false;
-
- SourceLocation Loc = MD->getLocation();
-
- // Do access control from the constructor
- ContextRAII MethodContext(*this, MD);
-
- bool Union = RD->isUnion();
-
- unsigned ArgQuals =
- MD->getParamDecl(0)->getType()->getPointeeType().isConstQualified() ?
- Qualifiers::Const : 0;
-
- // We do this because we should never actually use an anonymous
- // union's constructor.
- if (Union && RD->isAnonymousStructOrUnion())
- return false;
-
- // FIXME: We should put some diagnostic logic right into this function.
-
- // C++0x [class.copy]/20
- // A defaulted [copy] assignment operator for class X is defined as deleted
- // if X has:
+/// Check whether we should delete a special member function due to the class
+/// having a particular direct or virtual base class.
+bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
+ CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl();
+ return shouldDeleteForClassSubobject(BaseClass, Base);
+}
- for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
- BE = RD->bases_end();
- BI != BE; ++BI) {
- // We'll handle this one later
- if (BI->isVirtual())
- continue;
+/// Check whether we should delete a special member function due to the class
+/// having a particular non-static data member.
+bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
+ QualType FieldType = S.Context.getBaseElementType(FD->getType());
+ CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
- QualType BaseType = BI->getType();
- CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl();
- assert(BaseDecl && "base isn't a CXXRecordDecl");
-
- // -- a [direct base class] B that cannot be [copied] because overload
- // resolution, as applied to B's [copy] assignment operator, results in
- // an ambiguity or a function that is deleted or inaccessible from the
- // assignment operator
- CXXMethodDecl *CopyOper = LookupCopyingAssignment(BaseDecl, ArgQuals, false,
- 0);
- if (!CopyOper || CopyOper->isDeleted())
+ if (CSM == Sema::CXXDefaultConstructor) {
+ // For a default constructor, all references must be initialized in-class
+ // and, if a union, it must have a non-const member.
+ if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FieldType << /*Reference*/0;
return true;
- if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
+ }
+ // C++11 [class.ctor]p5: any non-variant non-static data member of
+ // const-qualified type (or array thereof) with no
+ // brace-or-equal-initializer does not have a user-provided default
+ // constructor.
+ if (!inUnion() && FieldType.isConstQualified() &&
+ !FD->hasInClassInitializer() &&
+ (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FieldType << /*Const*/1;
return true;
- }
+ }
- for (CXXRecordDecl::base_class_iterator BI = RD->vbases_begin(),
- BE = RD->vbases_end();
- BI != BE; ++BI) {
- QualType BaseType = BI->getType();
- CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl();
- assert(BaseDecl && "base isn't a CXXRecordDecl");
-
- // -- a [virtual base class] B that cannot be [copied] because overload
- // resolution, as applied to B's [copy] assignment operator, results in
- // an ambiguity or a function that is deleted or inaccessible from the
- // assignment operator
- CXXMethodDecl *CopyOper = LookupCopyingAssignment(BaseDecl, ArgQuals, false,
- 0);
- if (!CopyOper || CopyOper->isDeleted())
+ if (inUnion() && !FieldType.isConstQualified())
+ AllFieldsAreConst = false;
+ } else if (CSM == Sema::CXXCopyConstructor) {
+ // For a copy constructor, data members must not be of rvalue reference
+ // type.
+ if (FieldType->isRValueReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_copy_ctor_rvalue_reference)
+ << MD->getParent() << FD << FieldType;
return true;
- if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
+ }
+ } else if (IsAssignment) {
+ // For an assignment operator, data members must not be of reference type.
+ if (FieldType->isReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FieldType << /*Reference*/0;
+ return true;
+ }
+ if (!FieldRecord && FieldType.isConstQualified()) {
+ // C++11 [class.copy]p23:
+ // -- a non-static data member of const non-class type (or array thereof)
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FieldType << /*Const*/1;
return true;
+ }
}
- for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
- FE = RD->field_end();
- FI != FE; ++FI) {
- if (FI->isUnnamedBitfield())
- continue;
-
- QualType FieldType = Context.getBaseElementType(FI->getType());
-
- // -- a non-static data member of reference type
- if (FieldType->isReferenceType())
- return true;
+ if (FieldRecord) {
+ // Some additional restrictions exist on the variant members.
+ if (!inUnion() && FieldRecord->isUnion() &&
+ FieldRecord->isAnonymousStructOrUnion()) {
+ bool AllVariantFieldsAreConst = true;
- // -- a non-static data member of const non-class type (or array thereof)
- if (FieldType.isConstQualified() && !FieldType->isRecordType())
- return true;
-
- CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
-
- if (FieldRecord) {
- // This is an anonymous union
- if (FieldRecord->isUnion() && FieldRecord->isAnonymousStructOrUnion()) {
- // Anonymous unions inside unions do not variant members create
- if (!Union) {
- for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
- UE = FieldRecord->field_end();
- UI != UE; ++UI) {
- QualType UnionFieldType = Context.getBaseElementType(UI->getType());
- CXXRecordDecl *UnionFieldRecord =
- UnionFieldType->getAsCXXRecordDecl();
-
- // -- a variant member with a non-trivial [copy] assignment operator
- // and X is a union-like class
- if (UnionFieldRecord &&
- !UnionFieldRecord->hasTrivialCopyAssignment())
- return true;
- }
- }
+ // FIXME: Handle anonymous unions declared within anonymous unions.
+ for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
+ UE = FieldRecord->field_end();
+ UI != UE; ++UI) {
+ QualType UnionFieldType = S.Context.getBaseElementType(UI->getType());
- // Don't try to initalize an anonymous union
- continue;
- // -- a variant member with a non-trivial [copy] assignment operator
- // and X is a union-like class
- } else if (Union && !FieldRecord->hasTrivialCopyAssignment()) {
+ if (!UnionFieldType.isConstQualified())
+ AllVariantFieldsAreConst = false;
+
+ CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl();
+ if (UnionFieldRecord &&
+ shouldDeleteForClassSubobject(UnionFieldRecord, *UI))
return true;
}
- CXXMethodDecl *CopyOper = LookupCopyingAssignment(FieldRecord, ArgQuals,
- false, 0);
- if (!CopyOper || CopyOper->isDeleted())
- return true;
- if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
+ // At least one member in each anonymous union must be non-const
+ if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst &&
+ FieldRecord->field_begin() != FieldRecord->field_end()) {
+ if (Diagnose)
+ S.Diag(FieldRecord->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*anonymous union*/1;
return true;
+ }
+
+ // Don't check the implicit member of the anonymous union type.
+ // This is technically non-conformant, but sanity demands it.
+ return false;
}
+
+ if (shouldDeleteForClassSubobject(FieldRecord, FD))
+ return true;
}
return false;
}
-bool Sema::ShouldDeleteMoveAssignmentOperator(CXXMethodDecl *MD) {
+/// C++11 [class.ctor] p5:
+/// A defaulted default constructor for a class X is defined as deleted if
+/// X is a union and all of its variant members are of const-qualified type.
+bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
+ // This is a silly definition, because it gives an empty union a deleted
+ // default constructor. Don't do that.
+ if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst &&
+ (MD->getParent()->field_begin() != MD->getParent()->field_end())) {
+ if (Diagnose)
+ S.Diag(MD->getParent()->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*not anonymous union*/0;
+ return true;
+ }
+ return false;
+}
+
+/// Determine whether a defaulted special member function should be defined as
+/// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11,
+/// C++11 [class.copy]p23, and C++11 [class.dtor]p5.
+bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose) {
+ assert(!MD->isInvalidDecl());
CXXRecordDecl *RD = MD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
return false;
- SourceLocation Loc = MD->getLocation();
-
- // Do access control from the constructor
- ContextRAII MethodContext(*this, MD);
-
- bool Union = RD->isUnion();
-
- // We do this because we should never actually use an anonymous
- // union's constructor.
- if (Union && RD->isAnonymousStructOrUnion())
- return false;
-
- // C++0x [class.copy]/20
- // A defaulted [move] assignment operator for class X is defined as deleted
- // if X has:
-
- // -- for the move constructor, [...] any direct or indirect virtual base
- // class.
- if (RD->getNumVBases() != 0)
+ // C++11 [expr.lambda.prim]p19:
+ // The closure type associated with a lambda-expression has a
+ // deleted (8.4.3) default constructor and a deleted copy
+ // assignment operator.
+ if (RD->isLambda() &&
+ (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_lambda_decl);
return true;
+ }
- for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
- BE = RD->bases_end();
- BI != BE; ++BI) {
-
- QualType BaseType = BI->getType();
- CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl();
- assert(BaseDecl && "base isn't a CXXRecordDecl");
-
- // -- a [direct base class] B that cannot be [moved] because overload
- // resolution, as applied to B's [move] assignment operator, results in
- // an ambiguity or a function that is deleted or inaccessible from the
- // assignment operator
- CXXMethodDecl *MoveOper = LookupMovingAssignment(BaseDecl, false, 0);
- if (!MoveOper || MoveOper->isDeleted())
- return true;
- if (CheckDirectMemberAccess(Loc, MoveOper, PDiag()) != AR_accessible)
- return true;
+ // For an anonymous struct or union, the copy and assignment special members
+ // will never be used, so skip the check. For an anonymous union declared at
+ // namespace scope, the constructor and destructor are used.
+ if (CSM != CXXDefaultConstructor && CSM != CXXDestructor &&
+ RD->isAnonymousStructOrUnion())
+ return false;
- // -- for the move assignment operator, a [direct base class] with a type
- // that does not have a move assignment operator and is not trivially
- // copyable.
- if (!MoveOper->isMoveAssignmentOperator() &&
- !BaseDecl->isTriviallyCopyable())
+ // C++11 [class.copy]p7, p18:
+ // If the class definition declares a move constructor or move assignment
+ // operator, an implicitly declared copy constructor or copy assignment
+ // operator is defined as deleted.
+ if (MD->isImplicit() &&
+ (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) {
+ CXXMethodDecl *UserDeclaredMove = 0;
+
+ // In Microsoft mode, a user-declared move only causes the deletion of the
+ // corresponding copy operation, not both copy operations.
+ if (RD->hasUserDeclaredMoveConstructor() &&
+ (!getLangOpts().MicrosoftMode || CSM == CXXCopyConstructor)) {
+ if (!Diagnose) return true;
+ UserDeclaredMove = RD->getMoveConstructor();
+ assert(UserDeclaredMove);
+ } else if (RD->hasUserDeclaredMoveAssignment() &&
+ (!getLangOpts().MicrosoftMode || CSM == CXXCopyAssignment)) {
+ if (!Diagnose) return true;
+ UserDeclaredMove = RD->getMoveAssignmentOperator();
+ assert(UserDeclaredMove);
+ }
+
+ if (UserDeclaredMove) {
+ Diag(UserDeclaredMove->getLocation(),
+ diag::note_deleted_copy_user_declared_move)
+ << (CSM == CXXCopyAssignment) << RD
+ << UserDeclaredMove->isMoveAssignmentOperator();
return true;
+ }
}
- for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
- FE = RD->field_end();
- FI != FE; ++FI) {
- if (FI->isUnnamedBitfield())
- continue;
-
- QualType FieldType = Context.getBaseElementType(FI->getType());
-
- // -- a non-static data member of reference type
- if (FieldType->isReferenceType())
- return true;
+ // Do access control from the special member function
+ ContextRAII MethodContext(*this, MD);
- // -- a non-static data member of const non-class type (or array thereof)
- if (FieldType.isConstQualified() && !FieldType->isRecordType())
+ // C++11 [class.dtor]p5:
+ // -- for a virtual destructor, lookup of the non-array deallocation function
+ // results in an ambiguity or in a function that is deleted or inaccessible
+ if (CSM == CXXDestructor && MD->isVirtual()) {
+ FunctionDecl *OperatorDelete = 0;
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindDeallocationFunction(MD->getLocation(), MD->getParent(), Name,
+ OperatorDelete, false)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_deleted_dtor_no_operator_delete);
return true;
-
- CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
-
- if (FieldRecord) {
- // This is an anonymous union
- if (FieldRecord->isUnion() && FieldRecord->isAnonymousStructOrUnion()) {
- // Anonymous unions inside unions do not variant members create
- if (!Union) {
- for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
- UE = FieldRecord->field_end();
- UI != UE; ++UI) {
- QualType UnionFieldType = Context.getBaseElementType(UI->getType());
- CXXRecordDecl *UnionFieldRecord =
- UnionFieldType->getAsCXXRecordDecl();
-
- // -- a variant member with a non-trivial [move] assignment operator
- // and X is a union-like class
- if (UnionFieldRecord &&
- !UnionFieldRecord->hasTrivialMoveAssignment())
- return true;
- }
- }
-
- // Don't try to initalize an anonymous union
- continue;
- // -- a variant member with a non-trivial [move] assignment operator
- // and X is a union-like class
- } else if (Union && !FieldRecord->hasTrivialMoveAssignment()) {
- return true;
- }
-
- CXXMethodDecl *MoveOper = LookupMovingAssignment(FieldRecord, false, 0);
- if (!MoveOper || MoveOper->isDeleted())
- return true;
- if (CheckDirectMemberAccess(Loc, MoveOper, PDiag()) != AR_accessible)
- return true;
-
- // -- for the move assignment operator, a [non-static data member] with a
- // type that does not have a move assignment operator and is not
- // trivially copyable.
- if (!MoveOper->isMoveAssignmentOperator() &&
- !FieldRecord->isTriviallyCopyable())
- return true;
}
}
- return false;
-}
-
-bool Sema::ShouldDeleteDestructor(CXXDestructorDecl *DD) {
- CXXRecordDecl *RD = DD->getParent();
- assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
- return false;
-
- SourceLocation Loc = DD->getLocation();
-
- // Do access control from the destructor
- ContextRAII CtorContext(*this, DD);
-
- bool Union = RD->isUnion();
-
- // We do this because we should never actually use an anonymous
- // union's destructor.
- if (Union && RD->isAnonymousStructOrUnion())
- return false;
+ SpecialMemberDeletionInfo SMI(*this, MD, CSM, Diagnose);
- // C++0x [class.dtor]p5
- // A defaulted destructor for a class X is defined as deleted if:
for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
- BE = RD->bases_end();
- BI != BE; ++BI) {
- // We'll handle this one later
- if (BI->isVirtual())
- continue;
-
- CXXRecordDecl *BaseDecl = BI->getType()->getAsCXXRecordDecl();
- CXXDestructorDecl *BaseDtor = LookupDestructor(BaseDecl);
- assert(BaseDtor && "base has no destructor");
-
- // -- any direct or virtual base class has a deleted destructor or
- // a destructor that is inaccessible from the defaulted destructor
- if (BaseDtor->isDeleted())
+ BE = RD->bases_end(); BI != BE; ++BI)
+ if (!BI->isVirtual() &&
+ SMI.shouldDeleteForBase(BI))
return true;
- if (CheckDestructorAccess(Loc, BaseDtor, PDiag()) !=
- AR_accessible)
- return true;
- }
for (CXXRecordDecl::base_class_iterator BI = RD->vbases_begin(),
- BE = RD->vbases_end();
- BI != BE; ++BI) {
- CXXRecordDecl *BaseDecl = BI->getType()->getAsCXXRecordDecl();
- CXXDestructorDecl *BaseDtor = LookupDestructor(BaseDecl);
- assert(BaseDtor && "base has no destructor");
-
- // -- any direct or virtual base class has a deleted destructor or
- // a destructor that is inaccessible from the defaulted destructor
- if (BaseDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, BaseDtor, PDiag()) !=
- AR_accessible)
+ BE = RD->vbases_end(); BI != BE; ++BI)
+ if (SMI.shouldDeleteForBase(BI))
return true;
- }
for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
- FE = RD->field_end();
- FI != FE; ++FI) {
- QualType FieldType = Context.getBaseElementType(FI->getType());
- CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
- if (FieldRecord) {
- if (FieldRecord->isUnion() && FieldRecord->isAnonymousStructOrUnion()) {
- for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
- UE = FieldRecord->field_end();
- UI != UE; ++UI) {
- QualType UnionFieldType = Context.getBaseElementType(FI->getType());
- CXXRecordDecl *UnionFieldRecord =
- UnionFieldType->getAsCXXRecordDecl();
-
- // -- X is a union-like class that has a variant member with a non-
- // trivial destructor.
- if (UnionFieldRecord && !UnionFieldRecord->hasTrivialDestructor())
- return true;
- }
- // Technically we are supposed to do this next check unconditionally.
- // But that makes absolutely no sense.
- } else {
- CXXDestructorDecl *FieldDtor = LookupDestructor(FieldRecord);
-
- // -- any of the non-static data members has class type M (or array
- // thereof) and M has a deleted destructor or a destructor that is
- // inaccessible from the defaulted destructor
- if (FieldDtor->isDeleted())
- return true;
- if (CheckDestructorAccess(Loc, FieldDtor, PDiag()) !=
- AR_accessible)
- return true;
-
- // -- X is a union-like class that has a variant member with a non-
- // trivial destructor.
- if (Union && !FieldDtor->isTrivial())
- return true;
- }
- }
- }
-
- if (DD->isVirtual()) {
- FunctionDecl *OperatorDelete = 0;
- DeclarationName Name =
- Context.DeclarationNames.getCXXOperatorName(OO_Delete);
- if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete,
- false))
+ FE = RD->field_end(); FI != FE; ++FI)
+ if (!FI->isInvalidDecl() && !FI->isUnnamedBitfield() &&
+ SMI.shouldDeleteForField(*FI))
return true;
- }
+ if (SMI.shouldDeleteForAllConstMembers())
+ return true;
return false;
}
@@ -4876,6 +4866,9 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
if (!ClassDecl->hasUserDeclaredCopyConstructor())
++ASTContext::NumImplicitCopyConstructors;
+ if (getLangOpts().CPlusPlus0x && ClassDecl->needsImplicitMoveConstructor())
+ ++ASTContext::NumImplicitMoveConstructors;
+
if (!ClassDecl->hasUserDeclaredCopyAssignment()) {
++ASTContext::NumImplicitCopyAssignmentOperators;
@@ -4887,6 +4880,14 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
DeclareImplicitCopyAssignment(ClassDecl);
}
+ if (getLangOpts().CPlusPlus0x && ClassDecl->needsImplicitMoveAssignment()) {
+ ++ASTContext::NumImplicitMoveAssignmentOperators;
+
+ // Likewise for the move assignment operator.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitMoveAssignment(ClassDecl);
+ }
+
if (!ClassDecl->hasUserDeclaredDestructor()) {
++ASTContext::NumImplicitDestructors;
@@ -5139,7 +5140,7 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
return true;
- MarkDeclarationReferenced(Loc, OperatorDelete);
+ MarkFunctionReferenced(Loc, OperatorDelete);
Destructor->setOperatorDelete(OperatorDelete);
}
@@ -5342,9 +5343,11 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
R = Context.getFunctionType(ConvType, 0, 0, Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
- if (D.getDeclSpec().isExplicitSpecified() && !getLangOptions().CPlusPlus0x)
+ if (D.getDeclSpec().isExplicitSpecified())
Diag(D.getDeclSpec().getExplicitSpecLoc(),
- diag::warn_explicit_conversion_functions)
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_explicit_conversion_functions :
+ diag::ext_explicit_conversion_functions)
<< SourceRange(D.getDeclSpec().getExplicitSpecLoc());
}
@@ -5413,17 +5416,13 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
// For anonymous namespace, take the location of the left brace.
SourceLocation Loc = II ? IdentLoc : LBrace;
- NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext,
- StartLoc, Loc, II);
- Namespc->setInline(InlineLoc.isValid());
-
+ bool IsInline = InlineLoc.isValid();
+ bool IsInvalid = false;
+ bool IsStd = false;
+ bool AddToKnown = false;
Scope *DeclRegionScope = NamespcScope->getParent();
- ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
-
- if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
- PushNamespaceVisibilityAttr(Attr);
-
+ NamespaceDecl *PrevNS = 0;
if (II) {
// C++ [namespace.def]p2:
// The identifier in an original-namespace-definition shall not
@@ -5437,11 +5436,11 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// look through using directives, just look for any ordinary names.
const unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Member |
- Decl::IDNS_Type | Decl::IDNS_Using | Decl::IDNS_Tag |
- Decl::IDNS_Namespace;
+ Decl::IDNS_Type | Decl::IDNS_Using | Decl::IDNS_Tag |
+ Decl::IDNS_Namespace;
NamedDecl *PrevDecl = 0;
for (DeclContext::lookup_result R
- = CurContext->getRedeclContext()->lookup(II);
+ = CurContext->getRedeclContext()->lookup(II);
R.first != R.second; ++R.first) {
if ((*R.first)->getIdentifierNamespace() & IDNS) {
PrevDecl = *R.first;
@@ -5449,100 +5448,91 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
}
}
- if (NamespaceDecl *OrigNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl)) {
+ PrevNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl);
+
+ if (PrevNS) {
// This is an extended namespace definition.
- if (Namespc->isInline() != OrigNS->isInline()) {
+ if (IsInline != PrevNS->isInline()) {
// inline-ness must match
- if (OrigNS->isInline()) {
+ if (PrevNS->isInline()) {
// The user probably just forgot the 'inline', so suggest that it
// be added back.
- Diag(Namespc->getLocation(),
- diag::warn_inline_namespace_reopened_noninline)
+ Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
<< FixItHint::CreateInsertion(NamespaceLoc, "inline ");
} else {
- Diag(Namespc->getLocation(), diag::err_inline_namespace_mismatch)
- << Namespc->isInline();
+ Diag(Loc, diag::err_inline_namespace_mismatch)
+ << IsInline;
}
- Diag(OrigNS->getLocation(), diag::note_previous_definition);
-
- // Recover by ignoring the new namespace's inline status.
- Namespc->setInline(OrigNS->isInline());
- }
-
- // Attach this namespace decl to the chain of extended namespace
- // definitions.
- OrigNS->setNextNamespace(Namespc);
- Namespc->setOriginalNamespace(OrigNS->getOriginalNamespace());
-
- // Remove the previous declaration from the scope.
- if (DeclRegionScope->isDeclScope(OrigNS)) {
- IdResolver.RemoveDecl(OrigNS);
- DeclRegionScope->RemoveDecl(OrigNS);
- }
+ Diag(PrevNS->getLocation(), diag::note_previous_definition);
+
+ IsInline = PrevNS->isInline();
+ }
} else if (PrevDecl) {
// This is an invalid name redefinition.
- Diag(Namespc->getLocation(), diag::err_redefinition_different_kind)
- << Namespc->getDeclName();
+ Diag(Loc, diag::err_redefinition_different_kind)
+ << II;
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
- Namespc->setInvalidDecl();
+ IsInvalid = true;
// Continue on to push Namespc as current DeclContext and return it.
- } else if (II->isStr("std") &&
+ } else if (II->isStr("std") &&
CurContext->getRedeclContext()->isTranslationUnit()) {
// This is the first "real" definition of the namespace "std", so update
// our cache of the "std" namespace to point at this definition.
- if (NamespaceDecl *StdNS = getStdNamespace()) {
- // We had already defined a dummy namespace "std". Link this new
- // namespace definition to the dummy namespace "std".
- StdNS->setNextNamespace(Namespc);
- StdNS->setLocation(IdentLoc);
- Namespc->setOriginalNamespace(StdNS->getOriginalNamespace());
- }
-
- // Make our StdNamespace cache point at the first real definition of the
- // "std" namespace.
- StdNamespace = Namespc;
-
- // Add this instance of "std" to the set of known namespaces
- KnownNamespaces[Namespc] = false;
- } else if (!Namespc->isInline()) {
- // Since this is an "original" namespace, add it to the known set of
- // namespaces if it is not an inline namespace.
- KnownNamespaces[Namespc] = false;
+ PrevNS = getStdNamespace();
+ IsStd = true;
+ AddToKnown = !IsInline;
+ } else {
+ // We've seen this namespace for the first time.
+ AddToKnown = !IsInline;
}
-
- PushOnScopeChains(Namespc, DeclRegionScope);
} else {
// Anonymous namespaces.
- assert(Namespc->isAnonymousNamespace());
-
- // Link the anonymous namespace into its parent.
- NamespaceDecl *PrevDecl;
+
+ // Determine whether the parent already has an anonymous namespace.
DeclContext *Parent = CurContext->getRedeclContext();
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
- PrevDecl = TU->getAnonymousNamespace();
- TU->setAnonymousNamespace(Namespc);
+ PrevNS = TU->getAnonymousNamespace();
} else {
NamespaceDecl *ND = cast<NamespaceDecl>(Parent);
- PrevDecl = ND->getAnonymousNamespace();
- ND->setAnonymousNamespace(Namespc);
+ PrevNS = ND->getAnonymousNamespace();
}
- // Link the anonymous namespace with its previous declaration.
- if (PrevDecl) {
- assert(PrevDecl->isAnonymousNamespace());
- assert(!PrevDecl->getNextNamespace());
- Namespc->setOriginalNamespace(PrevDecl->getOriginalNamespace());
- PrevDecl->setNextNamespace(Namespc);
+ if (PrevNS && IsInline != PrevNS->isInline()) {
+ // inline-ness must match
+ Diag(Loc, diag::err_inline_namespace_mismatch)
+ << IsInline;
+ Diag(PrevNS->getLocation(), diag::note_previous_definition);
+
+ // Recover by ignoring the new namespace's inline status.
+ IsInline = PrevNS->isInline();
+ }
+ }
+
+ NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline,
+ StartLoc, Loc, II, PrevNS);
+ if (IsInvalid)
+ Namespc->setInvalidDecl();
+
+ ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
- if (Namespc->isInline() != PrevDecl->isInline()) {
- // inline-ness must match
- Diag(Namespc->getLocation(), diag::err_inline_namespace_mismatch)
- << Namespc->isInline();
- Diag(PrevDecl->getLocation(), diag::note_previous_definition);
- Namespc->setInvalidDecl();
- // Recover by ignoring the new namespace's inline status.
- Namespc->setInline(PrevDecl->isInline());
- }
+ // FIXME: Should we be merging attributes?
+ if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
+ PushNamespaceVisibilityAttr(Attr, Loc);
+
+ if (IsStd)
+ StdNamespace = Namespc;
+ if (AddToKnown)
+ KnownNamespaces[Namespc] = false;
+
+ if (II) {
+ PushOnScopeChains(Namespc, DeclRegionScope);
+ } else {
+ // Link the anonymous namespace into its parent.
+ DeclContext *Parent = CurContext->getRedeclContext();
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
+ TU->setAnonymousNamespace(Namespc);
+ } else {
+ cast<NamespaceDecl>(Parent)->setAnonymousNamespace(Namespc);
}
CurContext->addDecl(Namespc);
@@ -5563,7 +5553,7 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// declarations semantically contained within an anonymous
// namespace internal linkage.
- if (!PrevDecl) {
+ if (!PrevNS) {
UsingDirectiveDecl* UD
= UsingDirectiveDecl::Create(Context, CurContext,
/* 'using' */ LBrace,
@@ -5602,7 +5592,7 @@ void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) {
Namespc->setRBraceLoc(RBrace);
PopDeclContext();
if (Namespc->hasAttr<VisibilityAttr>())
- PopPragmaVisibility();
+ PopPragmaVisibility(true, RBrace);
}
CXXRecordDecl *Sema::getStdBadAlloc() const {
@@ -5622,14 +5612,142 @@ NamespaceDecl *Sema::getOrCreateStdNamespace() {
// The "std" namespace has not yet been defined, so build one implicitly.
StdNamespace = NamespaceDecl::Create(Context,
Context.getTranslationUnitDecl(),
+ /*Inline=*/false,
SourceLocation(), SourceLocation(),
- &PP.getIdentifierTable().get("std"));
+ &PP.getIdentifierTable().get("std"),
+ /*PrevDecl=*/0);
getStdNamespace()->setImplicit(true);
}
return getStdNamespace();
}
+bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
+ assert(getLangOpts().CPlusPlus &&
+ "Looking for std::initializer_list outside of C++.");
+
+ // We're looking for implicit instantiations of
+ // template <typename E> class std::initializer_list.
+
+ if (!StdNamespace) // If we haven't seen namespace std yet, this can't be it.
+ return false;
+
+ ClassTemplateDecl *Template = 0;
+ const TemplateArgument *Arguments = 0;
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+
+ ClassTemplateSpecializationDecl *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!Specialization)
+ return false;
+
+ Template = Specialization->getSpecializedTemplate();
+ Arguments = Specialization->getTemplateArgs().data();
+ } else if (const TemplateSpecializationType *TST =
+ Ty->getAs<TemplateSpecializationType>()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->getArgs();
+ }
+ if (!Template)
+ return false;
+
+ if (!StdInitializerList) {
+ // Haven't recognized std::initializer_list yet, maybe this is it.
+ CXXRecordDecl *TemplateClass = Template->getTemplatedDecl();
+ if (TemplateClass->getIdentifier() !=
+ &PP.getIdentifierTable().get("initializer_list") ||
+ !getStdNamespace()->InEnclosingNamespaceSetOf(
+ TemplateClass->getDeclContext()))
+ return false;
+ // This is a template called std::initializer_list, but is it the right
+ // template?
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1)
+ return false;
+ if (!isa<TemplateTypeParmDecl>(Params->getParam(0)))
+ return false;
+
+ // It's the right template.
+ StdInitializerList = Template;
+ }
+
+ if (Template != StdInitializerList)
+ return false;
+
+ // This is an instance of std::initializer_list. Find the argument type.
+ if (Element)
+ *Element = Arguments[0].getAsType();
+ return true;
+}
+
+static ClassTemplateDecl *LookupStdInitializerList(Sema &S, SourceLocation Loc){
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return 0;
+ }
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("initializer_list"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return 0;
+ }
+ ClassTemplateDecl *Template = Result.getAsSingle<ClassTemplateDecl>();
+ if (!Template) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_initializer_list);
+ return 0;
+ }
+
+ // We found some template called std::initializer_list. Now verify that it's
+ // correct.
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1 ||
+ !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ S.Diag(Template->getLocation(), diag::err_malformed_std_initializer_list);
+ return 0;
+ }
+
+ return Template;
+}
+
+QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
+ if (!StdInitializerList) {
+ StdInitializerList = LookupStdInitializerList(*this, Loc);
+ if (!StdInitializerList)
+ return QualType();
+ }
+
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(TemplateArgumentLoc(TemplateArgument(Element),
+ Context.getTrivialTypeSourceInfo(Element,
+ Loc)));
+ return Context.getCanonicalType(
+ CheckTemplateIdType(TemplateName(StdInitializerList), Loc, Args));
+}
+
+bool Sema::isInitListConstructor(const CXXConstructorDecl* Ctor) {
+ // C++ [dcl.init.list]p2:
+ // A constructor is an initializer-list constructor if its first parameter
+ // is of type std::initializer_list<E> or reference to possibly cv-qualified
+ // std::initializer_list<E> for some type E, and either there are no other
+ // parameters or else all other parameters have default arguments.
+ if (Ctor->getNumParams() < 1 ||
+ (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg()))
+ return false;
+
+ QualType ArgType = Ctor->getParamDecl(0)->getType();
+ if (const ReferenceType *RT = ArgType->getAs<ReferenceType>())
+ ArgType = RT->getPointeeType().getUnqualifiedType();
+
+ return isStdInitializerList(ArgType, 0);
+}
+
/// \brief Determine whether a using statement is in a context where it will be
/// apply in all contexts.
static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
@@ -5643,35 +5761,46 @@ static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
}
}
+namespace {
+
+// Callback to only accept typo corrections that are namespaces.
+class NamespaceValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+ }
+ return false;
+ }
+};
+
+}
+
static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident) {
+ NamespaceValidatorCCC Validator;
R.clear();
if (TypoCorrection Corrected = S.CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), Sc, &SS, NULL,
- false, S.CTC_NoKeywords, NULL)) {
- if (Corrected.getCorrectionDeclAs<NamespaceDecl>() ||
- Corrected.getCorrectionDeclAs<NamespaceAliasDecl>()) {
- std::string CorrectedStr(Corrected.getAsString(S.getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(S.getLangOptions()));
- if (DeclContext *DC = S.computeDeclContext(SS, false))
- S.Diag(IdentLoc, diag::err_using_directive_member_suggest)
- << Ident << DC << CorrectedQuotedStr << SS.getRange()
- << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
- else
- S.Diag(IdentLoc, diag::err_using_directive_suggest)
- << Ident << CorrectedQuotedStr
- << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+ R.getLookupKind(), Sc, &SS,
+ Validator)) {
+ std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(S.getLangOpts()));
+ if (DeclContext *DC = S.computeDeclContext(SS, false))
+ S.Diag(IdentLoc, diag::err_using_directive_member_suggest)
+ << Ident << DC << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+ else
+ S.Diag(IdentLoc, diag::err_using_directive_suggest)
+ << Ident << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
- S.Diag(Corrected.getCorrectionDecl()->getLocation(),
- diag::note_namespace_defined_here) << CorrectedQuotedStr;
+ S.Diag(Corrected.getCorrectionDecl()->getLocation(),
+ diag::note_namespace_defined_here) << CorrectedQuotedStr;
- Ident = Corrected.getCorrectionAsIdentifierInfo();
- R.addDecl(Corrected.getCorrectionDecl());
- return true;
- }
- R.setLookupName(Ident);
+ R.addDecl(Corrected.getCorrectionDecl());
+ return true;
}
return false;
}
@@ -5757,14 +5886,15 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
}
void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
- // If scope has associated entity, then using directive is at namespace
- // or translation unit scope. We add UsingDirectiveDecls, into
- // it's lookup structure.
- if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity()))
+ // If the scope has an associated entity and the using directive is at
+ // namespace or translation unit scope, add the UsingDirectiveDecl into
+ // its lookup structure so qualified name lookup can find it.
+ DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity());
+ if (Ctx && !Ctx->isFunctionOrMethod())
Ctx->addDecl(UDir);
else
- // Otherwise it is block-sope. using-directives will affect lookup
- // only to the end of scope.
+ // Otherwise, it is at block sope. The using-directives will affect lookup
+ // only to the end of the scope.
S->PushUsingDirective(UDir);
}
@@ -5791,19 +5921,23 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
case UnqualifiedId::IK_ConstructorName:
case UnqualifiedId::IK_ConstructorTemplateId:
// C++0x inherited constructors.
- if (getLangOptions().CPlusPlus0x) break;
-
- Diag(Name.getSourceRange().getBegin(), diag::err_using_decl_constructor)
+ Diag(Name.getLocStart(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_using_decl_constructor :
+ diag::err_using_decl_constructor)
<< SS.getRange();
+
+ if (getLangOpts().CPlusPlus0x) break;
+
return 0;
case UnqualifiedId::IK_DestructorName:
- Diag(Name.getSourceRange().getBegin(), diag::err_using_decl_destructor)
+ Diag(Name.getLocStart(), diag::err_using_decl_destructor)
<< SS.getRange();
return 0;
case UnqualifiedId::IK_TemplateId:
- Diag(Name.getSourceRange().getBegin(), diag::err_using_decl_template_id)
+ Diag(Name.getLocStart(), diag::err_using_decl_template_id)
<< SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
return 0;
}
@@ -5818,7 +5952,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
// talk about access decls instead of using decls in the
// diagnostics.
if (!HasUsingKeyword) {
- UsingLoc = Name.getSourceRange().getBegin();
+ UsingLoc = Name.getLocStart();
Diag(UsingLoc, diag::warn_access_decl_deprecated)
<< FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
@@ -5883,7 +6017,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// specialization. The UsingShadowDecl in D<T> then points directly
// to A::foo, which will look well-formed when we instantiate.
// The right solution is to not collapse the shadow-decl chain.
- if (!getLangOptions().CPlusPlus0x && CurContext->isRecord()) {
+ if (!getLangOpts().CPlusPlus0x && CurContext->isRecord()) {
DeclContext *OrigDC = Orig->getDeclContext();
// Handle enums and anonymous structs.
@@ -6148,9 +6282,9 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
return UD;
}
- // Constructor inheriting using decls get special treatment.
+ // The normal rules do not apply to inheriting constructor declarations.
if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
- if (CheckInheritedConstructorUsingDecl(UD))
+ if (CheckInheritingConstructorUsingDecl(UD))
UD->setInvalidDecl();
return UD;
}
@@ -6166,6 +6300,13 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
if (!IsInstantiation)
R.setHideTags(false);
+ // For the purposes of this lookup, we have a base object type
+ // equal to that of the current context.
+ if (CurContext->isRecord()) {
+ R.setBaseObjectType(
+ Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext)));
+ }
+
LookupQualifiedName(R, LookupContext);
if (R.empty()) {
@@ -6220,11 +6361,8 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
}
/// Additional checks for a using declaration referring to a constructor name.
-bool Sema::CheckInheritedConstructorUsingDecl(UsingDecl *UD) {
- if (UD->isTypeName()) {
- // FIXME: Cannot specify typename when specifying constructor
- return true;
- }
+bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
+ assert(!UD->isTypeName() && "expecting a constructor name");
const Type *SourceType = UD->getQualifier()->getAsType();
assert(SourceType &&
@@ -6239,6 +6377,8 @@ bool Sema::CheckInheritedConstructorUsingDecl(UsingDecl *UD) {
CanQualType BaseType = BaseIt->getType()->getCanonicalTypeUnqualified();
if (CanonicalSourceType == BaseType)
break;
+ if (BaseIt->getType()->isDependentType())
+ break;
}
if (BaseIt == BaseE) {
@@ -6250,7 +6390,8 @@ bool Sema::CheckInheritedConstructorUsingDecl(UsingDecl *UD) {
return true;
}
- BaseIt->setInheritConstructors();
+ if (!CurContext->isDependentContext())
+ BaseIt->setInheritConstructors();
return false;
}
@@ -6365,7 +6506,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), NamedContext))
return true;
- if (getLangOptions().CPlusPlus0x) {
+ if (getLangOpts().CPlusPlus0x) {
// C++0x [namespace.udecl]p3:
// In a using-declaration used as a member-declaration, the
// nested-name-specifier shall name a base class of the class
@@ -6405,7 +6546,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
// need to be repeated.
struct UserData {
- llvm::DenseSet<const CXXRecordDecl*> Bases;
+ llvm::SmallPtrSet<const CXXRecordDecl*, 4> Bases;
static bool collect(const CXXRecordDecl *Base, void *OpaqueData) {
UserData *Data = reinterpret_cast<UserData*>(OpaqueData);
@@ -6485,9 +6626,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S,
// Warn about shadowing the name of a template parameter.
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
- if (DiagnoseTemplateParameterShadow(Name.StartLocation,
- Previous.getFoundDecl()))
- Invalid = true;
+ DiagnoseTemplateParameterShadow(Name.StartLocation,Previous.getFoundDecl());
Previous.clear();
}
@@ -6633,7 +6772,7 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S,
if (R.empty()) {
if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
- Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange();
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
return 0;
}
}
@@ -6664,7 +6803,7 @@ namespace {
~ImplicitlyDefinedFunctionScope() {
S.PopExpressionEvaluationContext();
- S.PopFunctionOrBlockScope();
+ S.PopFunctionScopeInfo();
}
};
}
@@ -6757,16 +6896,12 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
DeclarationName Name
= Context.DeclarationNames.getCXXConstructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassLoc);
- CXXConstructorDecl *DefaultCon
- = CXXConstructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy,
- 0, 0, EPI),
- /*TInfo=*/0,
- /*isExplicit=*/false,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true,
- // FIXME: apply the rules for definitions here
- /*isConstexpr=*/false);
+ CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, 0, 0, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedDefaultConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
DefaultCon->setImplicit();
@@ -6905,7 +7040,6 @@ void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
Context.getCanonicalType(CtorIt->getType()).getTypePtr());
}
- Scope *S = getScopeForContext(ClassDecl);
DeclarationName CreatedCtorName =
Context.DeclarationNames.getCXXConstructorName(
ClassDecl->getTypeForDecl()->getCanonicalTypeUnqualified());
@@ -6927,10 +7061,12 @@ void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
CtorE = BaseDecl->ctor_end();
CtorIt != CtorE; ++CtorIt) {
// Find the using declaration for inheriting this base's constructors.
+ // FIXME: Don't perform name lookup just to obtain a source location!
DeclarationName Name =
Context.DeclarationNames.getCXXConstructorName(CanonicalBase);
- UsingDecl *UD = dyn_cast_or_null<UsingDecl>(
- LookupSingleName(S, Name,SourceLocation(), LookupUsingDeclName));
+ LookupResult Result(*this, Name, SourceLocation(), LookupUsingDeclName);
+ LookupQualifiedName(Result, CurContext);
+ UsingDecl *UD = Result.getAsSingle<UsingDecl>();
SourceLocation UsingLoc = UD ? UD->getLocation() :
ClassDecl->getLocation();
@@ -7041,7 +7177,6 @@ void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
NewCtor->setParams(ParamDecls);
NewCtor->setInheritedConstructor(BaseCtor);
- PushOnScopeChains(NewCtor, S, false);
ClassDecl->addDecl(NewCtor);
result.first->second.second = NewCtor;
}
@@ -7131,18 +7266,19 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
// This could be uniqued if it ever proves significant.
Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty));
- if (ShouldDeleteDestructor(Destructor))
- Destructor->setDeletedAsWritten();
-
AddOverriddenMethods(ClassDecl, Destructor);
-
+
+ if (ShouldDeleteSpecialMember(Destructor, CXXDestructor))
+ Destructor->setDeletedAsWritten();
+
return Destructor;
}
void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor) {
assert((Destructor->isDefaulted() &&
- !Destructor->doesThisDeclarationHaveABody()) &&
+ !Destructor->doesThisDeclarationHaveABody() &&
+ !Destructor->isDeleted()) &&
"DefineImplicitDestructor - call it for implicit default dtor");
CXXRecordDecl *ClassDecl = Destructor->getParent();
assert(ClassDecl && "DefineImplicitDestructor - invalid destructor");
@@ -7259,8 +7395,8 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
while (F.hasNext()) {
NamedDecl *D = F.next();
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Copying ? Method->isCopyAssignmentOperator() :
- Method->isMoveAssignmentOperator())
+ if (Method->isCopyAssignmentOperator() ||
+ (!Copying && Method->isMoveAssignmentOperator()))
continue;
F.erase();
@@ -7290,15 +7426,18 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// reference to operator=; this is required to suppress the virtual
// call mechanism.
CXXScopeSpec SS;
+ const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr());
SS.MakeTrivial(S.Context,
NestedNameSpecifier::Create(S.Context, 0, false,
- T.getTypePtr()),
+ CanonicalT),
Loc);
// Create the reference to operator=.
ExprResult OpEqualRef
= S.BuildMemberReferenceExpr(To, T, Loc, /*isArrow=*/false, SS,
- /*FirstQualifierInScope=*/0, OpLookup,
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ OpLookup,
/*TemplateArgs=*/0,
/*SuppressQualifierCheck=*/true);
if (OpEqualRef.isInvalid())
@@ -7339,7 +7478,7 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// Create the iteration variable.
IdentifierInfo *IterationVarName = 0;
{
- llvm::SmallString<8> Str;
+ SmallString<8> Str;
llvm::raw_svector_ostream OS(Str);
OS << "__i" << Depth;
IterationVarName = &S.Context.Idents.get(OS.str());
@@ -7356,9 +7495,11 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// Create a reference to the iteration variable; we'll use this several
// times throughout.
Expr *IterationVarRef
- = S.BuildDeclRefExpr(IterationVar, SizeType, VK_RValue, Loc).take();
+ = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc).take();
assert(IterationVarRef && "Reference to invented variable cannot fail!");
-
+ Expr *IterationVarRefRVal = S.DefaultLvalueConversion(IterationVarRef).take();
+ assert(IterationVarRefRVal && "Conversion of invented variable cannot fail!");
+
// Create the DeclStmt that holds the iteration variable.
Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc);
@@ -7366,7 +7507,7 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
llvm::APInt Upper
= ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
Expr *Comparison
- = new (S.Context) BinaryOperator(IterationVarRef,
+ = new (S.Context) BinaryOperator(IterationVarRefRVal,
IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
BO_NE, S.Context.BoolTy,
VK_RValue, OK_Ordinary, Loc);
@@ -7378,9 +7519,11 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
// Subscript the "from" and "to" expressions with the iteration variable.
From = AssertSuccess(S.CreateBuiltinArraySubscriptExpr(From, Loc,
- IterationVarRef, Loc));
+ IterationVarRefRVal,
+ Loc));
To = AssertSuccess(S.CreateBuiltinArraySubscriptExpr(To, Loc,
- IterationVarRef, Loc));
+ IterationVarRefRVal,
+ Loc));
if (!Copying) // Cast to rvalue
From = CastForMoving(S, From);
@@ -7432,7 +7575,7 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
&HasConstCopyAssignment);
}
- // In C++0x, the above citation has "or virtual added"
+ // In C++11, the above citation has "or virtual" added
if (LangOpts.CPlusPlus0x) {
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
BaseEnd = ClassDecl->vbases_end();
@@ -7565,15 +7708,14 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
PushOnScopeChains(CopyAssignment, S, false);
ClassDecl->addDecl(CopyAssignment);
- // C++0x [class.copy]p18:
- // ... If the class definition declares a move constructor or move
- // assignment operator, the implicitly declared copy assignment operator is
- // defined as deleted; ...
- if (ClassDecl->hasUserDeclaredMoveConstructor() ||
- ClassDecl->hasUserDeclaredMoveAssignment() ||
- ShouldDeleteCopyAssignmentOperator(CopyAssignment))
+ // C++0x [class.copy]p19:
+ // .... If the class definition does not explicitly declare a copy
+ // assignment operator, there is no user-declared move constructor, and
+ // there is no user-declared move assignment operator, a copy assignment
+ // operator is implicitly declared as defaulted.
+ if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment))
CopyAssignment->setDeletedAsWritten();
-
+
AddOverriddenMethods(ClassDecl, CopyAssignment);
return CopyAssignment;
}
@@ -7583,7 +7725,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
assert((CopyAssignOperator->isDefaulted() &&
CopyAssignOperator->isOverloadedOperator() &&
CopyAssignOperator->getOverloadedOperator() == OO_Equal &&
- !CopyAssignOperator->doesThisDeclarationHaveABody()) &&
+ !CopyAssignOperator->doesThisDeclarationHaveABody() &&
+ !CopyAssignOperator->isDeleted()) &&
"DefineImplicitCopyAssignment called for wrong function");
CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent();
@@ -7734,10 +7877,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
MemberLookup.resolveKind();
ExprResult From = BuildMemberReferenceExpr(OtherRef, OtherRefType,
Loc, /*IsArrow=*/false,
- SS, 0, MemberLookup, 0);
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
ExprResult To = BuildMemberReferenceExpr(This, This->getType(),
Loc, /*IsArrow=*/true,
- SS, 0, MemberLookup, 0);
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
assert(!From.isInvalid() && "Implicit field reference cannot fail");
assert(!To.isInvalid() && "Implicit field reference cannot fail");
@@ -7869,10 +8014,14 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CopyAssignOperator->setInvalidDecl();
return;
}
-
- StmtResult Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
- /*isStmtExpr=*/false);
- assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
CopyAssignOperator->setBody(Body.takeAs<Stmt>());
if (ASTMutationListener *L = getASTMutationListener()) {
@@ -7937,7 +8086,115 @@ Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
return ExceptSpec;
}
+/// Determine whether the class type has any direct or indirect virtual base
+/// classes which have a non-trivial move assignment operator.
+static bool
+hasVirtualBaseWithNonTrivialMoveAssignment(Sema &S, CXXRecordDecl *ClassDecl) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClass =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Try to declare the move assignment. If it would be deleted, then the
+ // class does not have a non-trivial move assignment.
+ if (BaseClass->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(BaseClass);
+
+ // If the class has both a trivial move assignment and a non-trivial move
+ // assignment, hasTrivialMoveAssignment() is false.
+ if (BaseClass->hasDeclaredMoveAssignment() &&
+ !BaseClass->hasTrivialMoveAssignment())
+ return true;
+ }
+
+ return false;
+}
+
+/// Determine whether the given type either has a move constructor or is
+/// trivially copyable.
+static bool
+hasMoveOrIsTriviallyCopyable(Sema &S, QualType Type, bool IsConstructor) {
+ Type = S.Context.getBaseElementType(Type);
+
+ // FIXME: Technically, non-trivially-copyable non-class types, such as
+ // reference types, are supposed to return false here, but that appears
+ // to be a standard defect.
+ CXXRecordDecl *ClassDecl = Type->getAsCXXRecordDecl();
+ if (!ClassDecl)
+ return true;
+
+ if (Type.isTriviallyCopyableType(S.Context))
+ return true;
+
+ if (IsConstructor) {
+ if (ClassDecl->needsImplicitMoveConstructor())
+ S.DeclareImplicitMoveConstructor(ClassDecl);
+ return ClassDecl->hasDeclaredMoveConstructor();
+ }
+
+ if (ClassDecl->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(ClassDecl);
+ return ClassDecl->hasDeclaredMoveAssignment();
+}
+
+/// Determine whether all non-static data members and direct or virtual bases
+/// of class \p ClassDecl have either a move operation, or are trivially
+/// copyable.
+static bool subobjectsHaveMoveOrTrivialCopy(Sema &S, CXXRecordDecl *ClassDecl,
+ bool IsConstructor) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (Base->isVirtual())
+ continue;
+
+ if (!hasMoveOrIsTriviallyCopyable(S, Base->getType(), IsConstructor))
+ return false;
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ if (!hasMoveOrIsTriviallyCopyable(S, Base->getType(), IsConstructor))
+ return false;
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (!hasMoveOrIsTriviallyCopyable(S, (*Field)->getType(), IsConstructor))
+ return false;
+ }
+
+ return true;
+}
+
CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
+ // C++11 [class.copy]p20:
+ // If the definition of a class X does not explicitly declare a move
+ // assignment operator, one will be implicitly declared as defaulted
+ // if and only if:
+ //
+ // - [first 4 bullets]
+ assert(ClassDecl->needsImplicitMoveAssignment());
+
+ // [Checked after we build the declaration]
+ // - the move assignment operator would not be implicitly defined as
+ // deleted,
+
+ // [DR1402]:
+ // - X has no direct or indirect virtual base class with a non-trivial
+ // move assignment operator, and
+ // - each of X's non-static data members and direct or virtual base classes
+ // has a type that either has a move assignment operator or is trivially
+ // copyable.
+ if (hasVirtualBaseWithNonTrivialMoveAssignment(*this, ClassDecl) ||
+ !subobjectsHaveMoveOrTrivialCopy(*this, ClassDecl,/*Constructor*/false)) {
+ ClassDecl->setFailedImplicitMoveAssignment();
+ return 0;
+ }
+
// Note: The following rules are largely analoguous to the move
// constructor rules.
@@ -7985,7 +8242,7 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// [...]
// - the move assignment operator would not be implicitly defined as
// deleted.
- if (ShouldDeleteMoveAssignmentOperator(MoveAssignment)) {
+ if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) {
// Cache this result so that we don't try to generate this over and over
// on every lookup, leaking memory and wasting time.
ClassDecl->setFailedImplicitMoveAssignment();
@@ -8005,7 +8262,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
assert((MoveAssignOperator->isDefaulted() &&
MoveAssignOperator->isOverloadedOperator() &&
MoveAssignOperator->getOverloadedOperator() == OO_Equal &&
- !MoveAssignOperator->doesThisDeclarationHaveABody()) &&
+ !MoveAssignOperator->doesThisDeclarationHaveABody() &&
+ !MoveAssignOperator->isDeleted()) &&
"DefineImplicitMoveAssignment called for wrong function");
CXXRecordDecl *ClassDecl = MoveAssignOperator->getParent();
@@ -8052,7 +8310,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// ASTs.
Expr *This = ActOnCXXThis(Loc).takeAs<Expr>();
assert(This && "Reference to this cannot fail!");
-
+
// Assign base classes.
bool Invalid = false;
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
@@ -8154,10 +8412,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
MemberLookup.resolveKind();
ExprResult From = BuildMemberReferenceExpr(OtherRef, OtherRefType,
Loc, /*IsArrow=*/false,
- SS, 0, MemberLookup, 0);
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
ExprResult To = BuildMemberReferenceExpr(This, This->getType(),
Loc, /*IsArrow=*/true,
- SS, 0, MemberLookup, 0);
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
assert(!From.isInvalid() && "Implicit field reference cannot fail");
assert(!To.isInvalid() && "Implicit field reference cannot fail");
@@ -8299,10 +8559,14 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
MoveAssignOperator->setInvalidDecl();
return;
}
-
- StmtResult Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
- /*isStmtExpr=*/false);
- assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
MoveAssignOperator->setBody(Body.takeAs<Stmt>());
if (ASTMutationListener *L = getASTMutationListener()) {
@@ -8441,21 +8705,17 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
DeclarationNameInfo NameInfo(Name, ClassLoc);
// An implicitly-declared copy constructor is an inline public
- // member of its class.
- CXXConstructorDecl *CopyConstructor
- = CXXConstructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy,
- &ArgType, 1, EPI),
- /*TInfo=*/0,
- /*isExplicit=*/false,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true,
- // FIXME: apply the rules for definitions here
- /*isConstexpr=*/false);
+ // member of its class.
+ CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedCopyConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
-
+
// Note that we have declared this constructor.
++ASTContext::NumImplicitCopyConstructorsDeclared;
@@ -8472,15 +8732,14 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
PushOnScopeChains(CopyConstructor, S, false);
ClassDecl->addDecl(CopyConstructor);
- // C++0x [class.copy]p7:
- // ... If the class definition declares a move constructor or move
- // assignment operator, the implicitly declared constructor is defined as
- // deleted; ...
- if (ClassDecl->hasUserDeclaredMoveConstructor() ||
- ClassDecl->hasUserDeclaredMoveAssignment() ||
- ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor))
+ // C++11 [class.copy]p8:
+ // ... If the class definition does not explicitly declare a copy
+ // constructor, there is no user-declared move constructor, and there is no
+ // user-declared move assignment operator, a copy constructor is implicitly
+ // declared as defaulted.
+ if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor))
CopyConstructor->setDeletedAsWritten();
-
+
return CopyConstructor;
}
@@ -8488,7 +8747,8 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *CopyConstructor) {
assert((CopyConstructor->isDefaulted() &&
CopyConstructor->isCopyConstructor() &&
- !CopyConstructor->doesThisDeclarationHaveABody()) &&
+ !CopyConstructor->doesThisDeclarationHaveABody() &&
+ !CopyConstructor->isDeleted()) &&
"DefineImplicitCopyConstructor - call it for implicit copy ctor");
CXXRecordDecl *ClassDecl = CopyConstructor->getParent();
@@ -8503,9 +8763,10 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
<< CXXCopyConstructor << Context.getTagDeclType(ClassDecl);
CopyConstructor->setInvalidDecl();
} else {
+ Sema::CompoundScopeRAII CompoundScope(*this);
CopyConstructor->setBody(ActOnCompoundStmt(CopyConstructor->getLocation(),
CopyConstructor->getLocation(),
- MultiStmtArg(*this, 0, 0),
+ MultiStmtArg(*this, 0, 0),
/*isStmtExpr=*/false)
.takeAs<Stmt>());
CopyConstructor->setImplicitlyDefined(true);
@@ -8561,12 +8822,7 @@ Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
FEnd = ClassDecl->field_end();
F != FEnd; ++F) {
- if (F->hasInClassInitializer()) {
- if (Expr *E = F->getInClassInitializer())
- ExceptSpec.CalledExpr(E);
- else if (!F->isInvalidDecl())
- ExceptSpec.SetDelayed();
- } else if (const RecordType *RecordTy
+ if (const RecordType *RecordTy
= Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
CXXConstructorDecl *Constructor = LookupMovingConstructor(FieldRecDecl);
@@ -8585,6 +8841,25 @@ Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
CXXRecordDecl *ClassDecl) {
+ // C++11 [class.copy]p9:
+ // If the definition of a class X does not explicitly declare a move
+ // constructor, one will be implicitly declared as defaulted if and only if:
+ //
+ // - [first 4 bullets]
+ assert(ClassDecl->needsImplicitMoveConstructor());
+
+ // [Checked after we build the declaration]
+ // - the move assignment operator would not be implicitly defined as
+ // deleted,
+
+ // [DR1402]:
+ // - each of X's non-static data members and direct or virtual base classes
+ // has a type that either has a move constructor or is trivially copyable.
+ if (!subobjectsHaveMoveOrTrivialCopy(*this, ClassDecl, /*Constructor*/true)) {
+ ClassDecl->setFailedImplicitMoveConstructor();
+ return 0;
+ }
+
ImplicitExceptionSpecification Spec(
ComputeDefaultedMoveCtorExceptionSpec(ClassDecl));
@@ -8601,21 +8876,17 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
// C++0x [class.copy]p11:
// An implicitly-declared copy/move constructor is an inline public
- // member of its class.
- CXXConstructorDecl *MoveConstructor
- = CXXConstructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- Context.getFunctionType(Context.VoidTy,
- &ArgType, 1, EPI),
- /*TInfo=*/0,
- /*isExplicit=*/false,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true,
- // FIXME: apply the rules for definitions here
- /*isConstexpr=*/false);
+ // member of its class.
+ CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedMoveConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
MoveConstructor->setTrivial(ClassDecl->hasTrivialMoveConstructor());
-
+
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
ClassLoc, ClassLoc,
@@ -8651,7 +8922,8 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *MoveConstructor) {
assert((MoveConstructor->isDefaulted() &&
MoveConstructor->isMoveConstructor() &&
- !MoveConstructor->doesThisDeclarationHaveABody()) &&
+ !MoveConstructor->doesThisDeclarationHaveABody() &&
+ !MoveConstructor->isDeleted()) &&
"DefineImplicitMoveConstructor - call it for implicit move ctor");
CXXRecordDecl *ClassDecl = MoveConstructor->getParent();
@@ -8666,9 +8938,10 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
<< CXXMoveConstructor << Context.getTagDeclType(ClassDecl);
MoveConstructor->setInvalidDecl();
} else {
+ Sema::CompoundScopeRAII CompoundScope(*this);
MoveConstructor->setBody(ActOnCompoundStmt(MoveConstructor->getLocation(),
MoveConstructor->getLocation(),
- MultiStmtArg(*this, 0, 0),
+ MultiStmtArg(*this, 0, 0),
/*isStmtExpr=*/false)
.takeAs<Stmt>());
MoveConstructor->setImplicitlyDefined(true);
@@ -8681,6 +8954,133 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
}
}
+bool Sema::isImplicitlyDeleted(FunctionDecl *FD) {
+ return FD->isDeleted() &&
+ (FD->isDefaulted() || FD->isImplicit()) &&
+ isa<CXXMethodDecl>(FD);
+}
+
+/// \brief Mark the call operator of the given lambda closure type as "used".
+static void markLambdaCallOperatorUsed(Sema &S, CXXRecordDecl *Lambda) {
+ CXXMethodDecl *CallOperator
+ = cast<CXXMethodDecl>(
+ *Lambda->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Call)).first);
+ CallOperator->setReferenced();
+ CallOperator->setUsed();
+}
+
+void Sema::DefineImplicitLambdaToFunctionPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv)
+{
+ CXXRecordDecl *Lambda = Conv->getParent();
+
+ // Make sure that the lambda call operator is marked used.
+ markLambdaCallOperatorUsed(*this, Lambda);
+
+ Conv->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Return the address of the __invoke function.
+ DeclarationName InvokeName = &Context.Idents.get("__invoke");
+ CXXMethodDecl *Invoke
+ = cast<CXXMethodDecl>(*Lambda->lookup(InvokeName).first);
+ Expr *FunctionRef = BuildDeclRefExpr(Invoke, Invoke->getType(),
+ VK_LValue, Conv->getLocation()).take();
+ assert(FunctionRef && "Can't refer to __invoke function?");
+ Stmt *Return = ActOnReturnStmt(Conv->getLocation(), FunctionRef).take();
+ Conv->setBody(new (Context) CompoundStmt(Context, &Return, 1,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ // Fill in the __invoke function with a dummy implementation. IR generation
+ // will fill in the actual details.
+ Invoke->setUsed();
+ Invoke->setReferenced();
+ Invoke->setBody(new (Context) CompoundStmt(Context, 0, 0, Conv->getLocation(),
+ Conv->getLocation()));
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ L->CompletedImplicitDefinition(Invoke);
+ }
+}
+
+void Sema::DefineImplicitLambdaToBlockPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv)
+{
+ Conv->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Copy-initialize the lambda object as needed to capture it.
+ Expr *This = ActOnCXXThis(CurrentLocation).take();
+ Expr *DerefThis =CreateBuiltinUnaryOp(CurrentLocation, UO_Deref, This).take();
+
+ ExprResult BuildBlock = BuildBlockForLambdaConversion(CurrentLocation,
+ Conv->getLocation(),
+ Conv, DerefThis);
+
+ // If we're not under ARC, make sure we still get the _Block_copy/autorelease
+ // behavior. Note that only the general conversion function does this
+ // (since it's unusable otherwise); in the case where we inline the
+ // block literal, it has block literal lifetime semantics.
+ if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount)
+ BuildBlock = ImplicitCastExpr::Create(Context, BuildBlock.get()->getType(),
+ CK_CopyAndAutoreleaseBlockObject,
+ BuildBlock.get(), 0, VK_RValue);
+
+ if (BuildBlock.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Create the return statement that returns the block from the conversion
+ // function.
+ StmtResult Return = ActOnReturnStmt(Conv->getLocation(), BuildBlock.get());
+ if (Return.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Set the body of the conversion function.
+ Stmt *ReturnS = Return.take();
+ Conv->setBody(new (Context) CompoundStmt(Context, &ReturnS, 1,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ // We're done; notify the mutation listener, if any.
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ }
+}
+
+/// \brief Determine whether the given list arguments contains exactly one
+/// "real" (non-default) argument.
+static bool hasOneRealArgument(MultiExprArg Args) {
+ switch (Args.size()) {
+ case 0:
+ return false;
+
+ default:
+ if (!Args.get()[1]->isDefaultArgument())
+ return false;
+
+ // fall through
+ case 1:
+ return !Args.get()[0]->isDefaultArgument();
+ }
+
+ return false;
+}
+
ExprResult
Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor,
@@ -8702,7 +9102,7 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
// can be omitted by constructing the temporary object
// directly into the target of the omitted copy/move
if (ConstructKind == CXXConstructExpr::CK_Complete &&
- Constructor->isCopyOrMoveConstructor() && ExprArgs.size() >= 1) {
+ Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) {
Expr *SubExpr = ((Expr **)ExprArgs.get())[0];
Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent());
}
@@ -8732,10 +9132,11 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CheckNonNullArguments(NonNull, ExprArgs.get(), ConstructLoc);
}
- MarkDeclarationReferenced(ConstructLoc, Constructor);
+ MarkFunctionReferenced(ConstructLoc, Constructor);
return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc,
Constructor, Elidable, Exprs, NumExprs,
- HadMultipleCandidates, RequiresZeroInit,
+ HadMultipleCandidates, /*FIXME*/false,
+ RequiresZeroInit,
static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
ParenRange));
}
@@ -8754,7 +9155,7 @@ bool Sema::InitializeVarWithConstructor(VarDecl *VD,
Expr *Temp = TempResult.takeAs<Expr>();
CheckImplicitConversions(Temp, VD->getLocation());
- MarkDeclarationReferenced(VD->getLocation(), Constructor);
+ MarkFunctionReferenced(VD->getLocation(), Constructor);
Temp = MaybeCreateExprWithCleanups(Temp);
VD->setInit(Temp);
@@ -8766,15 +9167,16 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
if (ClassDecl->isInvalidDecl()) return;
- if (ClassDecl->hasTrivialDestructor()) return;
+ if (ClassDecl->hasIrrelevantDestructor()) return;
if (ClassDecl->isDependentContext()) return;
CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
- MarkDeclarationReferenced(VD->getLocation(), Destructor);
+ MarkFunctionReferenced(VD->getLocation(), Destructor);
CheckDestructorAccess(VD->getLocation(), Destructor,
PDiag(diag::err_access_dtor_var)
<< VD->getDeclName()
<< VD->getType());
+ DiagnoseUseOfDecl(Destructor, VD->getLocation());
if (!VD->hasGlobalStorage()) return;
@@ -8787,188 +9189,6 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
Diag(VD->getLocation(), diag::warn_global_destructor);
}
-/// AddCXXDirectInitializerToDecl - This action is called immediately after
-/// ActOnDeclarator, when a C++ direct initializer is present.
-/// e.g: "int x(1);"
-void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
- SourceLocation LParenLoc,
- MultiExprArg Exprs,
- SourceLocation RParenLoc,
- bool TypeMayContainAuto) {
- assert(Exprs.size() != 0 && Exprs.get() && "missing expressions");
-
- // If there is no declaration, there was an error parsing it. Just ignore
- // the initializer.
- if (RealDecl == 0)
- return;
-
- VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
- if (!VDecl) {
- Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
- RealDecl->setInvalidDecl();
- return;
- }
-
- // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
- if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
- // FIXME: n3225 doesn't actually seem to indicate this is ill-formed
- if (Exprs.size() > 1) {
- Diag(Exprs.get()[1]->getSourceRange().getBegin(),
- diag::err_auto_var_init_multiple_expressions)
- << VDecl->getDeclName() << VDecl->getType()
- << VDecl->getSourceRange();
- RealDecl->setInvalidDecl();
- return;
- }
-
- Expr *Init = Exprs.get()[0];
- TypeSourceInfo *DeducedType = 0;
- if (!DeduceAutoType(VDecl->getTypeSourceInfo(), Init, DeducedType))
- Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
- << VDecl->getDeclName() << VDecl->getType() << Init->getType()
- << Init->getSourceRange();
- if (!DeducedType) {
- RealDecl->setInvalidDecl();
- return;
- }
- VDecl->setTypeSourceInfo(DeducedType);
- VDecl->setType(DeducedType->getType());
-
- // In ARC, infer lifetime.
- if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
- VDecl->setInvalidDecl();
-
- // If this is a redeclaration, check that the type we just deduced matches
- // the previously declared type.
- if (VarDecl *Old = VDecl->getPreviousDeclaration())
- MergeVarDeclTypes(VDecl, Old);
- }
-
- // We will represent direct-initialization similarly to copy-initialization:
- // int x(1); -as-> int x = 1;
- // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
- //
- // Clients that want to distinguish between the two forms, can check for
- // direct initializer using VarDecl::hasCXXDirectInitializer().
- // A major benefit is that clients that don't particularly care about which
- // exactly form was it (like the CodeGen) can handle both cases without
- // special case code.
-
- // C++ 8.5p11:
- // The form of initialization (using parentheses or '=') is generally
- // insignificant, but does matter when the entity being initialized has a
- // class type.
-
- if (!VDecl->getType()->isDependentType() &&
- !VDecl->getType()->isIncompleteArrayType() &&
- RequireCompleteType(VDecl->getLocation(), VDecl->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
- VDecl->setInvalidDecl();
- return;
- }
-
- // The variable can not have an abstract class type.
- if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
- diag::err_abstract_type_in_decl,
- AbstractVariableType))
- VDecl->setInvalidDecl();
-
- const VarDecl *Def;
- if ((Def = VDecl->getDefinition()) && Def != VDecl) {
- Diag(VDecl->getLocation(), diag::err_redefinition)
- << VDecl->getDeclName();
- Diag(Def->getLocation(), diag::note_previous_definition);
- VDecl->setInvalidDecl();
- return;
- }
-
- // C++ [class.static.data]p4
- // If a static data member is of const integral or const
- // enumeration type, its declaration in the class definition can
- // specify a constant-initializer which shall be an integral
- // constant expression (5.19). In that case, the member can appear
- // in integral constant expressions. The member shall still be
- // defined in a namespace scope if it is used in the program and the
- // namespace scope definition shall not contain an initializer.
- //
- // We already performed a redefinition check above, but for static
- // data members we also need to check whether there was an in-class
- // declaration with an initializer.
- const VarDecl* PrevInit = 0;
- if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
- Diag(VDecl->getLocation(), diag::err_redefinition) << VDecl->getDeclName();
- Diag(PrevInit->getLocation(), diag::note_previous_definition);
- return;
- }
-
- bool IsDependent = false;
- for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
- if (DiagnoseUnexpandedParameterPack(Exprs.get()[I], UPPC_Expression)) {
- VDecl->setInvalidDecl();
- return;
- }
-
- if (Exprs.get()[I]->isTypeDependent())
- IsDependent = true;
- }
-
- // If either the declaration has a dependent type or if any of the
- // expressions is type-dependent, we represent the initialization
- // via a ParenListExpr for later use during template instantiation.
- if (VDecl->getType()->isDependentType() || IsDependent) {
- // Let clients know that initialization was done with a direct initializer.
- VDecl->setCXXDirectInitializer(true);
-
- // Store the initialization expressions as a ParenListExpr.
- unsigned NumExprs = Exprs.size();
- VDecl->setInit(new (Context) ParenListExpr(
- Context, LParenLoc, (Expr **)Exprs.release(), NumExprs, RParenLoc,
- VDecl->getType().getNonReferenceType()));
- return;
- }
-
- // Capture the variable that is being initialized and the style of
- // initialization.
- InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
-
- // FIXME: Poor source location information.
- InitializationKind Kind
- = InitializationKind::CreateDirect(VDecl->getLocation(),
- LParenLoc, RParenLoc);
-
- QualType T = VDecl->getType();
- InitializationSequence InitSeq(*this, Entity, Kind,
- Exprs.get(), Exprs.size());
- ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(Exprs), &T);
- if (Result.isInvalid()) {
- VDecl->setInvalidDecl();
- return;
- } else if (T != VDecl->getType()) {
- VDecl->setType(T);
- Result.get()->setType(T);
- }
-
-
- Expr *Init = Result.get();
- CheckImplicitConversions(Init, LParenLoc);
-
- if (VDecl->isConstexpr() && !VDecl->isInvalidDecl() &&
- !Init->isValueDependent() &&
- !Init->isConstantInitializer(Context,
- VDecl->getType()->isReferenceType())) {
- // FIXME: Improve this diagnostic to explain why the initializer is not
- // a constant expression.
- Diag(VDecl->getLocation(), diag::err_constexpr_var_requires_const_init)
- << VDecl << Init->getSourceRange();
- }
-
- Init = MaybeCreateExprWithCleanups(Init);
- VDecl->setInit(Init);
- VDecl->setCXXDirectInitializer(true);
-
- CheckCompleteVariableDeclaration(VDecl);
-}
-
/// \brief Given a constructor and the set of arguments provided for the
/// constructor, convert the arguments and add any required default arguments
/// to form a proper call to this constructor.
@@ -8978,7 +9198,8 @@ bool
Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
- ASTOwningVector<Expr*> &ConvertedArgs) {
+ ASTOwningVector<Expr*> &ConvertedArgs,
+ bool AllowExplicit) {
// FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
unsigned NumArgs = ArgsPtr.size();
Expr **Args = (Expr **)ArgsPtr.get();
@@ -8999,9 +9220,13 @@ Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
SmallVector<Expr *, 8> AllArgs;
bool Invalid = GatherArgumentsForCall(Loc, Constructor,
Proto, 0, Args, NumArgs, AllArgs,
- CallType);
- for (unsigned i =0, size = AllArgs.size(); i < size; i++)
- ConvertedArgs.push_back(AllArgs[i]);
+ CallType, AllowExplicit);
+ ConvertedArgs.append(AllArgs.begin(), AllArgs.end());
+
+ DiagnoseSentinelCalls(Constructor, Loc, AllArgs.data(), AllArgs.size());
+
+ // FIXME: Missing call to CheckFunctionCall or equivalent
+
return Invalid;
}
@@ -9270,21 +9495,29 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
/// of this literal operator function is well-formed. If so, returns
/// false; otherwise, emits appropriate diagnostics and returns true.
bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
- DeclContext *DC = FnDecl->getDeclContext();
- Decl::Kind Kind = DC->getDeclKind();
- if (Kind != Decl::TranslationUnit && Kind != Decl::Namespace &&
- Kind != Decl::LinkageSpec) {
+ if (isa<CXXMethodDecl>(FnDecl)) {
Diag(FnDecl->getLocation(), diag::err_literal_operator_outside_namespace)
<< FnDecl->getDeclName();
return true;
}
+ if (FnDecl->isExternC()) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_extern_c);
+ return true;
+ }
+
bool Valid = false;
+ // This might be the definition of a literal operator template.
+ FunctionTemplateDecl *TpDecl = FnDecl->getDescribedFunctionTemplate();
+ // This might be a specialization of a literal operator template.
+ if (!TpDecl)
+ TpDecl = FnDecl->getPrimaryTemplate();
+
// template <char...> type operator "" name() is the only valid template
// signature, and the only valid signature with no parameters.
- if (FnDecl->param_size() == 0) {
- if (FunctionTemplateDecl *TpDecl = FnDecl->getDescribedFunctionTemplate()) {
+ if (TpDecl) {
+ if (FnDecl->param_size() == 0) {
// Must have only one template parameter
TemplateParameterList *Params = TpDecl->getTemplateParameters();
if (Params->size() == 1) {
@@ -9297,11 +9530,11 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
Valid = true;
}
}
- } else {
+ } else if (FnDecl->param_size()) {
// Check the first parameter
FunctionDecl::param_iterator Param = FnDecl->param_begin();
- QualType T = (*Param)->getType();
+ QualType T = (*Param)->getType().getUnqualifiedType();
// unsigned long long int, long double, and any character type are allowed
// as the only parameters.
@@ -9321,7 +9554,7 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
if (!PT)
goto FinishedParams;
T = PT->getPointeeType();
- if (!T.isConstQualified())
+ if (!T.isConstQualified() || T.isVolatileQualified())
goto FinishedParams;
T = T.getUnqualifiedType();
@@ -9358,30 +9591,28 @@ FinishedParams:
return true;
}
- StringRef LiteralName
+ // A parameter-declaration-clause containing a default argument is not
+ // equivalent to any of the permitted forms.
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
+ ParamEnd = FnDecl->param_end();
+ Param != ParamEnd; ++Param) {
+ if ((*Param)->hasDefaultArg()) {
+ Diag((*Param)->getDefaultArgRange().getBegin(),
+ diag::err_literal_operator_default_argument)
+ << (*Param)->getDefaultArgRange();
+ break;
+ }
+ }
+
+ StringRef LiteralName
= FnDecl->getDeclName().getCXXLiteralIdentifier()->getName();
if (LiteralName[0] != '_') {
- // C++0x [usrlit.suffix]p1:
- // Literal suffix identifiers that do not start with an underscore are
- // reserved for future standardization.
- bool IsHexFloat = true;
- if (LiteralName.size() > 1 &&
- (LiteralName[0] == 'P' || LiteralName[0] == 'p')) {
- for (unsigned I = 1, N = LiteralName.size(); I < N; ++I) {
- if (!isdigit(LiteralName[I])) {
- IsHexFloat = false;
- break;
- }
- }
- }
-
- if (IsHexFloat)
- Diag(FnDecl->getLocation(), diag::warn_user_literal_hexfloat)
- << LiteralName;
- else
- Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved);
+ // C++11 [usrlit.suffix]p1:
+ // Literal suffix identifiers that do not start with an underscore
+ // are reserved for future standardization.
+ Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved);
}
-
+
return false;
}
@@ -9458,28 +9689,21 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
Invalid = true;
}
- // GCC allows catching pointers and references to incomplete types
- // as an extension; so do we, but we warn by default.
-
QualType BaseType = ExDeclType;
int Mode = 0; // 0 for direct type, 1 for pointer, 2 for reference
unsigned DK = diag::err_catch_incomplete;
- bool IncompleteCatchIsInvalid = true;
if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
BaseType = Ptr->getPointeeType();
Mode = 1;
- DK = diag::ext_catch_incomplete_ptr;
- IncompleteCatchIsInvalid = false;
+ DK = diag::err_catch_incomplete_ptr;
} else if (const ReferenceType *Ref = BaseType->getAs<ReferenceType>()) {
// For the purpose of error recovery, we treat rvalue refs like lvalue refs.
BaseType = Ref->getPointeeType();
Mode = 2;
- DK = diag::ext_catch_incomplete_ref;
- IncompleteCatchIsInvalid = false;
+ DK = diag::err_catch_incomplete_ref;
}
if (!Invalid && (Mode == 0 || !BaseType->isVoidType()) &&
- !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK) &&
- IncompleteCatchIsInvalid)
+ !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
Invalid = true;
if (!Invalid && !ExDeclType->isDependentType() &&
@@ -9490,7 +9714,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
// Only the non-fragile NeXT runtime currently supports C++ catches
// of ObjC types, and no runtime supports catching ObjC types by value.
- if (!Invalid && getLangOptions().ObjC1) {
+ if (!Invalid && getLangOpts().ObjC1) {
QualType T = ExDeclType;
if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
@@ -9499,7 +9723,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
Diag(Loc, diag::err_objc_object_catch);
Invalid = true;
} else if (T->isObjCObjectPointerType()) {
- if (!getLangOptions().ObjCNonFragileABI)
+ if (!getLangOpts().ObjCNonFragileABI)
Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile);
}
}
@@ -9508,6 +9732,10 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
ExDeclType, TInfo, SC_None, SC_None);
ExDecl->setExceptionVariable(true);
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(ExDecl))
+ Invalid = true;
+
if (!Invalid && !ExDeclType->isDependentType()) {
if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
// C++ [except.handle]p16:
@@ -9578,6 +9806,7 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
if (PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ PrevDecl = 0;
}
}
@@ -9588,7 +9817,7 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
}
VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo,
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
D.getIdentifierLoc(),
D.getIdentifier());
if (Invalid)
@@ -9611,17 +9840,24 @@ Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
StringLiteral *AssertMessage = cast<StringLiteral>(AssertMessageExpr_);
if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent()) {
- llvm::APSInt Value(32);
- if (!AssertExpr->isIntegerConstantExpr(Value, Context)) {
- Diag(StaticAssertLoc,
- diag::err_static_assert_expression_is_not_constant) <<
- AssertExpr->getSourceRange();
+ // In a static_assert-declaration, the constant-expression shall be a
+ // constant expression that can be contextually converted to bool.
+ ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
+ if (Converted.isInvalid())
+ return 0;
+
+ llvm::APSInt Cond;
+ if (VerifyIntegerConstantExpression(Converted.get(), &Cond,
+ PDiag(diag::err_static_assert_expression_is_not_constant),
+ /*AllowFold=*/false).isInvalid())
return 0;
- }
- if (Value == 0) {
+ if (!Cond) {
+ llvm::SmallString<256> MsgBuffer;
+ llvm::raw_svector_ostream Msg(MsgBuffer);
+ AssertMessage->printPretty(Msg, Context, 0, getPrintingPolicy());
Diag(StaticAssertLoc, diag::err_static_assert_failed)
- << AssertMessage->getString() << AssertExpr->getSourceRange();
+ << Msg.str() << AssertExpr->getSourceRange();
}
}
@@ -9638,46 +9874,54 @@ Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
/// \brief Perform semantic analysis of the given friend type declaration.
///
/// \returns A friend declaration that.
-FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation FriendLoc,
+FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc,
+ SourceLocation FriendLoc,
TypeSourceInfo *TSInfo) {
assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
QualType T = TSInfo->getType();
SourceRange TypeRange = TSInfo->getTypeLoc().getLocalSourceRange();
- if (!getLangOptions().CPlusPlus0x) {
- // C++03 [class.friend]p2:
- // An elaborated-type-specifier shall be used in a friend declaration
- // for a class.*
- //
- // * The class-key of the elaborated-type-specifier is required.
- if (!ActiveTemplateInstantiations.empty()) {
- // Do not complain about the form of friend template types during
- // template instantiation; we will already have complained when the
- // template was declared.
- } else if (!T->isElaboratedTypeSpecifier()) {
- // If we evaluated the type to a record type, suggest putting
- // a tag in front.
- if (const RecordType *RT = T->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
-
- std::string InsertionText = std::string(" ") + RD->getKindName();
-
- Diag(TypeRange.getBegin(), diag::ext_unelaborated_friend_type)
- << (unsigned) RD->getTagKind()
- << T
- << FixItHint::CreateInsertion(PP.getLocForEndOfToken(FriendLoc),
- InsertionText);
- } else {
- Diag(FriendLoc, diag::ext_nonclass_type_friend)
- << T
- << SourceRange(FriendLoc, TypeRange.getEnd());
- }
- } else if (T->getAs<EnumType>()) {
- Diag(FriendLoc, diag::ext_enum_friend)
+ // C++03 [class.friend]p2:
+ // An elaborated-type-specifier shall be used in a friend declaration
+ // for a class.*
+ //
+ // * The class-key of the elaborated-type-specifier is required.
+ if (!ActiveTemplateInstantiations.empty()) {
+ // Do not complain about the form of friend template types during
+ // template instantiation; we will already have complained when the
+ // template was declared.
+ } else if (!T->isElaboratedTypeSpecifier()) {
+ // If we evaluated the type to a record type, suggest putting
+ // a tag in front.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+
+ std::string InsertionText = std::string(" ") + RD->getKindName();
+
+ Diag(TypeRange.getBegin(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_unelaborated_friend_type :
+ diag::ext_unelaborated_friend_type)
+ << (unsigned) RD->getTagKind()
+ << T
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(FriendLoc),
+ InsertionText);
+ } else {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nonclass_type_friend :
+ diag::ext_nonclass_type_friend)
<< T
<< SourceRange(FriendLoc, TypeRange.getEnd());
}
+ } else if (T->getAs<EnumType>()) {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_enum_friend :
+ diag::ext_enum_friend)
+ << T
+ << SourceRange(FriendLoc, TypeRange.getEnd());
}
// C++0x [class.friend]p3:
@@ -9688,7 +9932,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation FriendLoc,
// FIXME: C++0x has some syntactic restrictions on friend type declarations
// in [class.friend]p3 that we do not implement.
- return FriendDecl::Create(Context, CurContext, FriendLoc, TSInfo, FriendLoc);
+ return FriendDecl::Create(Context, CurContext, Loc, TSInfo, FriendLoc);
}
/// Handle a friend tag declaration where the scope specifier was
@@ -9732,8 +9976,6 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
if (Invalid) return 0;
- assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?");
-
bool isAllExplicitSpecializations = true;
for (unsigned I = TempParamLists.size(); I-- > 0; ) {
if (TempParamLists.get()[I]->size()) {
@@ -9748,6 +9990,18 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
// about the template header and build an appropriate non-templated
// friend. TODO: for source fidelity, remember the headers.
if (isAllExplicitSpecializations) {
+ if (SS.isEmpty()) {
+ bool Owned = false;
+ bool IsDependent = false;
+ return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(), Owned, IsDependent,
+ /*ScopedEnumKWLoc=*/SourceLocation(),
+ /*ScopedEnumUsesClassTag=*/false,
+ /*UnderlyingType=*/TypeResult());
+ }
+
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTagTypeKind(Kind);
@@ -9759,12 +10013,12 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
if (isa<DependentNameType>(T)) {
DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
- TL.setKeywordLoc(TagLoc);
+ TL.setElaboratedKeywordLoc(TagLoc);
TL.setQualifierLoc(QualifierLoc);
TL.setNameLoc(NameLoc);
} else {
ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
- TL.setKeywordLoc(TagLoc);
+ TL.setElaboratedKeywordLoc(TagLoc);
TL.setQualifierLoc(QualifierLoc);
cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(NameLoc);
}
@@ -9775,6 +10029,10 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
CurContext->addDecl(Friend);
return Friend;
}
+
+ assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?");
+
+
// Handle the case of a templated-scope friend class. e.g.
// template <class T> class A<T>::B;
@@ -9783,7 +10041,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
QualType T = Context.getDependentNameType(ETK, SS.getScopeRep(), Name);
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
- TL.setKeywordLoc(TagLoc);
+ TL.setElaboratedKeywordLoc(TagLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TL.setNameLoc(NameLoc);
@@ -9815,7 +10073,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
/// template <> template <class T> friend class A<int>::B;
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TempParams) {
- SourceLocation Loc = DS.getSourceRange().getBegin();
+ SourceLocation Loc = DS.getLocStart();
assert(DS.isFriendSpecified());
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
@@ -9871,7 +10129,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
TSI,
DS.getFriendSpecLoc());
else
- D = CheckFriendTypeDecl(DS.getFriendSpecLoc(), TSI);
+ D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI);
if (!D)
return 0;
@@ -9977,7 +10235,7 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// declarations should stop at the nearest enclosing namespace,
// not that they should only consider the nearest enclosing
// namespace.
- while (DC->isRecord())
+ while (DC->isRecord() || DC->isTransparentContext())
DC = DC->getParent();
LookupQualifiedName(Previous, DC);
@@ -9996,15 +10254,17 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// C++ [class.friend]p1: A friend of a class is a function or
// class that is not a member of the class . . .
- // C++0x changes this for both friend types and functions.
+ // C++11 changes this for both friend types and functions.
// Most C++ 98 compilers do seem to give an error here, so
// we do, too.
- if (!Previous.empty() && DC->Equals(CurContext)
- && !getLangOptions().CPlusPlus0x)
- Diag(DS.getFriendSpecLoc(), diag::err_friend_is_member);
+ if (!Previous.empty() && DC->Equals(CurContext))
+ Diag(DS.getFriendSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_is_member :
+ diag::err_friend_is_member);
DCScope = getScopeForDeclContext(S, DC);
-
+
// C++ [class.friend]p6:
// A function can be defined in a friend declaration of a class if and
// only if the class is a non-local class (9.8), the function name is
@@ -10046,7 +10306,10 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// C++ [class.friend]p1: A friend of a class is a function or
// class that is not a member of the class . . .
if (DC->Equals(CurContext))
- Diag(DS.getFriendSpecLoc(), diag::err_friend_is_member);
+ Diag(DS.getFriendSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_is_member :
+ diag::err_friend_is_member);
if (D.isFunctionDefinition()) {
// C++ [class.friend]p6:
@@ -10093,6 +10356,15 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
}
}
+ // FIXME: This is an egregious hack to cope with cases where the scope stack
+ // does not contain the declaration context, i.e., in an out-of-line
+ // definition of a class.
+ Scope FakeDCScope(S, Scope::DeclScope, Diags);
+ if (!DCScope) {
+ FakeDCScope.setEntity(DC);
+ DCScope = &FakeDCScope;
+ }
+
bool AddToScope = true;
NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous,
move(TemplateParams), AddToScope);
@@ -10109,7 +10381,7 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// lookup context is in lexical scope.
if (!CurContext->isDependentContext()) {
DC = DC->getRedeclContext();
- DC->makeDeclVisibleInContext(ND, /* Recoverable=*/ false);
+ DC->makeDeclVisibleInContext(ND);
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(ND, EnclosingScope, /*AddToContext=*/ false);
}
@@ -10145,13 +10417,42 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Diag(DelLoc, diag::err_deleted_non_function);
return;
}
- if (const FunctionDecl *Prev = Fn->getPreviousDeclaration()) {
+ if (const FunctionDecl *Prev = Fn->getPreviousDecl()) {
Diag(DelLoc, diag::err_deleted_decl_not_first);
Diag(Prev->getLocation(), diag::note_previous_declaration);
// If the declaration wasn't the first, we delete the function anyway for
// recovery.
}
Fn->setDeletedAsWritten();
+
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Dcl);
+ if (!MD)
+ return;
+
+ // A deleted special member function is trivial if the corresponding
+ // implicitly-declared function would have been.
+ switch (getSpecialMember(MD)) {
+ case CXXInvalid:
+ break;
+ case CXXDefaultConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialDefaultConstructor());
+ break;
+ case CXXCopyConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialCopyConstructor());
+ break;
+ case CXXMoveConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialMoveConstructor());
+ break;
+ case CXXCopyAssignment:
+ MD->setTrivial(MD->getParent()->hasTrivialCopyAssignment());
+ break;
+ case CXXMoveAssignment:
+ MD->setTrivial(MD->getParent()->hasTrivialMoveAssignment());
+ break;
+ case CXXDestructor:
+ MD->setTrivial(MD->getParent()->hasTrivialDestructor());
+ break;
+ }
}
void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
@@ -10245,7 +10546,7 @@ static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
if (!SubStmt)
continue;
if (isa<ReturnStmt>(SubStmt))
- Self.Diag(SubStmt->getSourceRange().getBegin(),
+ Self.Diag(SubStmt->getLocStart(),
diag::err_return_in_constructor_handler);
if (!isa<Expr>(SubStmt))
SearchForReturnInStmt(Self, SubStmt);
@@ -10376,6 +10677,14 @@ bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
return true;
}
+/// \brief Determine whether the given declaration is a static data member.
+static bool isStaticDataMember(Decl *D) {
+ VarDecl *Var = dyn_cast_or_null<VarDecl>(D);
+ if (!Var)
+ return false;
+
+ return Var->isStaticDataMember();
+}
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse
/// an initializer for the out-of-line declaration 'Dcl'. The scope
/// is a fresh scope pushed for just this purpose.
@@ -10391,6 +10700,12 @@ void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) {
// int foo::bar;
assert(D->isOutOfLine());
EnterDeclaratorContext(S, D->getDeclContext());
+
+ // If we are parsing the initializer for a static data member, push a
+ // new expression evaluation context that is associated with this static
+ // data member.
+ if (isStaticDataMember(D))
+ PushExpressionEvaluationContext(PotentiallyEvaluated, D);
}
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
@@ -10399,6 +10714,9 @@ void Sema::ActOnCXXExitDeclInitializer(Scope *S, Decl *D) {
// If there is no declaration, there was an error parsing it.
if (D == 0 || D->isInvalidDecl()) return;
+ if (isStaticDataMember(D))
+ PopExpressionEvaluationContext();
+
assert(D->isOutOfLine());
ExitDeclaratorContext(S);
}
@@ -10560,7 +10878,10 @@ bool Sema::DefineUsedVTables() {
if (!KeyFunction ||
(KeyFunction->hasBody(KeyFunctionDef) &&
KeyFunctionDef->isInlined()))
- Diag(Class->getLocation(), diag::warn_weak_vtable) << Class;
+ Diag(Class->getLocation(), Class->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition
+ ? diag::warn_weak_template_vtable : diag::warn_weak_vtable)
+ << Class;
}
}
VTableUses.clear();
@@ -10577,7 +10898,7 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
// C++ [basic.def.odr]p2:
// [...] A virtual member function is used if it is not pure. [...]
if (MD->isVirtual() && !MD->isPure())
- MarkDeclarationReferenced(Loc, MD);
+ MarkFunctionReferenced(Loc, MD);
}
// Only classes that have virtual bases need a VTT.
@@ -10597,7 +10918,7 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
/// SetIvarInitializers - This routine builds initialization ASTs for the
/// Objective-C implementation whose ivars need be initialized.
void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return;
if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) {
SmallVector<ObjCIvarDecl*, 8> ivars;
@@ -10637,7 +10958,7 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
- MarkDeclarationReferenced(Field->getLocation(), Destructor);
+ MarkFunctionReferenced(Field->getLocation(), Destructor);
CheckDestructorAccess(Field->getLocation(), Destructor,
PDiag(diag::err_access_dtor_ivar)
<< Context.getBaseElementType(Field->getType()));
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index 62b4a7c..a942d49 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -21,8 +21,10 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/DenseSet.h"
using namespace clang;
@@ -59,7 +61,7 @@ bool Sema::checkInitMethod(ObjCMethodDecl *method,
// It's okay for the result type to still be a forward declaration
// if we're checking an interface declaration.
- if (resultClass->isForwardDecl()) {
+ if (!resultClass->hasDefinition()) {
if (receiverTypeIfCall.isNull() &&
!isa<ObjCImplementationDecl>(method->getDeclContext()))
return false;
@@ -156,7 +158,7 @@ void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
Diag(Overridden->getLocation(),
diag::note_related_result_type_overridden);
}
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
if ((NewMethod->hasAttr<NSReturnsRetainedAttr>() !=
Overridden->hasAttr<NSReturnsRetainedAttr>())) {
Diag(NewMethod->getLocation(),
@@ -241,7 +243,8 @@ static void DiagnoseObjCImplementedDeprecations(Sema &S,
if (ND && ND->isDeprecated()) {
S.Diag(ImplLoc, diag::warn_deprecated_def) << select;
if (select == 0)
- S.Diag(ND->getLocation(), diag::note_method_declared_at);
+ S.Diag(ND->getLocation(), diag::note_method_declared_at)
+ << ND->getDeclName();
else
S.Diag(ND->getLocation(), diag::note_previous_decl) << "class";
}
@@ -297,7 +300,7 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
}
// In ARC, disallow definition of retain/release/autorelease/retainCount
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
switch (MDecl->getMethodFamily()) {
case OMF_retain:
case OMF_retainCount:
@@ -336,16 +339,38 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
// Only do this if the current class actually has a superclass.
if (IC->getSuperClass()) {
ObjCShouldCallSuperDealloc =
- !(Context.getLangOptions().ObjCAutoRefCount ||
- Context.getLangOptions().getGC() == LangOptions::GCOnly) &&
+ !(Context.getLangOpts().ObjCAutoRefCount ||
+ Context.getLangOpts().getGC() == LangOptions::GCOnly) &&
MDecl->getMethodFamily() == OMF_dealloc;
ObjCShouldCallSuperFinalize =
- Context.getLangOptions().getGC() != LangOptions::NonGC &&
+ Context.getLangOpts().getGC() != LangOptions::NonGC &&
MDecl->getMethodFamily() == OMF_finalize;
}
}
}
+namespace {
+
+// Callback to only accept typo corrections that are Objective-C classes.
+// If an ObjCInterfaceDecl* is given to the constructor, then the validation
+// function will reject corrections to that class.
+class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceValidatorCCC() : CurrentIDecl(0) {}
+ explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl)
+ : CurrentIDecl(IDecl) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ ObjCInterfaceDecl *ID = candidate.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ return ID && !declaresSameEntity(ID, CurrentIDecl);
+ }
+
+ private:
+ ObjCInterfaceDecl *CurrentIDecl;
+};
+
+}
+
Decl *Sema::
ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
@@ -364,54 +389,44 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
}
- ObjCInterfaceDecl* IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
- if (IDecl) {
- // Class already seen. Is it a forward declaration?
- if (!IDecl->isForwardDecl()) {
+ // Create a declaration to describe this @interface.
+ ObjCInterfaceDecl* PrevIDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc, ClassName,
+ PrevIDecl, ClassLoc);
+
+ if (PrevIDecl) {
+ // Class already seen. Was it a definition?
+ if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
+ Diag(AtInterfaceLoc, diag::err_duplicate_class_def)
+ << PrevIDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
IDecl->setInvalidDecl();
- Diag(AtInterfaceLoc, diag::err_duplicate_class_def)<<IDecl->getDeclName();
- Diag(IDecl->getLocation(), diag::note_previous_definition);
-
- // Return the previous class interface.
- // FIXME: don't leak the objects passed in!
- return ActOnObjCContainerStartDefinition(IDecl);
- } else {
- IDecl->setLocation(ClassLoc);
- IDecl->setForwardDecl(false);
- IDecl->setAtStartLoc(AtInterfaceLoc);
- // If the forward decl was in a PCH, we need to write it again in a
- // dependent AST file.
- IDecl->setChangedSinceDeserialization(true);
-
- // Since this ObjCInterfaceDecl was created by a forward declaration,
- // we now add it to the DeclContext since it wasn't added before
- // (see ActOnForwardClassDeclaration).
- IDecl->setLexicalDeclContext(CurContext);
- CurContext->addDecl(IDecl);
-
- if (AttrList)
- ProcessDeclAttributeList(TUScope, IDecl, AttrList);
}
- } else {
- IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc,
- ClassName, ClassLoc);
- if (AttrList)
- ProcessDeclAttributeList(TUScope, IDecl, AttrList);
-
- PushOnScopeChains(IDecl, TUScope);
}
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, IDecl, AttrList);
+ PushOnScopeChains(IDecl, TUScope);
+ // Start the definition of this class. If we're in a redefinition case, there
+ // may already be a definition, so we'll end up adding to it.
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
+
if (SuperName) {
// Check if a different kind of symbol declared in this scope.
PrevDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
LookupOrdinaryName);
if (!PrevDecl) {
- // Try to correct for a typo in the superclass name.
- TypoCorrection Corrected = CorrectTypo(
+ // Try to correct for a typo in the superclass name without correcting
+ // to the class we're defining.
+ ObjCInterfaceValidatorCCC Validator(IDecl);
+ if (TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName, TUScope,
- NULL, NULL, false, CTC_NoKeywords);
- if ((PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
+ NULL, Validator)) {
+ PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
Diag(SuperLoc, diag::err_undef_superclass_suggest)
<< SuperName << ClassName << PrevDecl->getDeclName();
Diag(PrevDecl->getLocation(), diag::note_previous_decl)
@@ -419,10 +434,10 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
}
}
- if (PrevDecl == IDecl) {
+ if (declaresSameEntity(PrevDecl, IDecl)) {
Diag(SuperLoc, diag::err_recursive_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
- IDecl->setLocEnd(ClassLoc);
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
} else {
ObjCInterfaceDecl *SuperClassDecl =
dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
@@ -458,27 +473,28 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
if (!SuperClassDecl)
Diag(SuperLoc, diag::err_undef_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
- else if (SuperClassDecl->isForwardDecl()) {
- Diag(SuperLoc, diag::err_forward_superclass)
- << SuperClassDecl->getDeclName() << ClassName
- << SourceRange(AtInterfaceLoc, ClassLoc);
- Diag(SuperClassDecl->getLocation(), diag::note_forward_class);
+ else if (RequireCompleteType(SuperLoc,
+ Context.getObjCInterfaceType(SuperClassDecl),
+ PDiag(diag::err_forward_superclass)
+ << SuperClassDecl->getDeclName()
+ << ClassName
+ << SourceRange(AtInterfaceLoc, ClassLoc))) {
SuperClassDecl = 0;
}
}
IDecl->setSuperClass(SuperClassDecl);
IDecl->setSuperClassLoc(SuperLoc);
- IDecl->setLocEnd(SuperLoc);
+ IDecl->setEndOfDefinitionLoc(SuperLoc);
}
} else { // we have a root class.
- IDecl->setLocEnd(ClassLoc);
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
}
// Check then save referenced protocols.
if (NumProtoRefs) {
IDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
- IDecl->setLocEnd(EndProtoLoc);
+ IDecl->setEndOfDefinitionLoc(EndProtoLoc);
}
CheckObjCDeclScope(IDecl);
@@ -550,6 +566,10 @@ bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
Diag(PrevLoc, diag::note_previous_definition);
res = true;
}
+
+ if (!PDecl->hasDefinition())
+ continue;
+
if (CheckForwardProtocolDeclarationForCircularDependency(PName, Ploc,
PDecl->getLocation(), PDecl->getReferencedProtocols()))
res = true;
@@ -570,44 +590,52 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
bool err = false;
// FIXME: Deal with AttrList.
assert(ProtocolName && "Missing protocol identifier");
- ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolName, ProtocolLoc);
- if (PDecl) {
- // Protocol already seen. Better be a forward protocol declaration
- if (!PDecl->isForwardDecl()) {
- Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
- Diag(PDecl->getLocation(), diag::note_previous_definition);
- // Just return the protocol we already had.
- // FIXME: don't leak the objects passed in!
- return ActOnObjCContainerStartDefinition(PDecl);
- }
- ObjCList<ObjCProtocolDecl> PList;
- PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context);
- err = CheckForwardProtocolDeclarationForCircularDependency(
- ProtocolName, ProtocolLoc, PDecl->getLocation(), PList);
-
- // Make sure the cached decl gets a valid start location.
- PDecl->setAtStartLoc(AtProtoInterfaceLoc);
- PDecl->setLocation(ProtocolLoc);
- PDecl->setForwardDecl(false);
- // Since this ObjCProtocolDecl was created by a forward declaration,
- // we now add it to the DeclContext since it wasn't added before
- PDecl->setLexicalDeclContext(CurContext);
- CurContext->addDecl(PDecl);
- // Repeat in dependent AST files.
- PDecl->setChangedSinceDeserialization(true);
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(ProtocolName, ProtocolLoc,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl = 0;
+ if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : 0) {
+ // If we already have a definition, complain.
+ Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+
+ // Create a new protocol that is completely distinct from previous
+ // declarations, and do not make this protocol available for name lookup.
+ // That way, we'll end up completely ignoring the duplicate.
+ // FIXME: Can we turn this into an error?
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/0);
+ PDecl->startDefinition();
} else {
+ if (PrevDecl) {
+ // Check for circular dependencies among protocol declarations. This can
+ // only happen if this protocol was forward-declared.
+ ObjCList<ObjCProtocolDecl> PList;
+ PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context);
+ err = CheckForwardProtocolDeclarationForCircularDependency(
+ ProtocolName, ProtocolLoc, PrevDecl->getLocation(), PList);
+ }
+
+ // Create the new declaration.
PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
- ProtocolLoc, AtProtoInterfaceLoc);
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/PrevDecl);
+
PushOnScopeChains(PDecl, TUScope);
- PDecl->setForwardDecl(false);
+ PDecl->startDefinition();
}
+
if (AttrList)
ProcessDeclAttributeList(TUScope, PDecl, AttrList);
+
+ // Merge attributes from previous declarations.
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
if (!err && NumProtoRefs ) {
/// Check then save referenced protocols.
PDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
ProtoLocs, Context);
- PDecl->setLocEnd(EndProtoLoc);
}
CheckObjCDeclScope(PDecl);
@@ -626,9 +654,10 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first,
ProtocolId[i].second);
if (!PDecl) {
+ DeclFilterCCC<ObjCProtocolDecl> Validator;
TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second),
- LookupObjCProtocolName, TUScope, NULL, NULL, false, CTC_NoKeywords);
+ LookupObjCProtocolName, TUScope, NULL, Validator);
if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>())) {
Diag(ProtocolId[i].second, diag::err_undeclared_protocol_suggest)
<< ProtocolId[i].first << Corrected.getCorrection();
@@ -647,7 +676,7 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
// If this is a forward declaration and we are supposed to warn in this
// case, do it.
- if (WarnOnDeclarations && PDecl->isForwardDecl())
+ if (WarnOnDeclarations && !PDecl->hasDefinition())
Diag(ProtocolId[i].second, diag::warn_undef_protocolref)
<< ProtocolId[i].first;
Protocols.push_back(PDecl);
@@ -684,40 +713,34 @@ void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
}
/// ActOnForwardProtocolDeclaration - Handle @protocol foo;
-Decl *
+Sema::DeclGroupPtrTy
Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList) {
- SmallVector<ObjCProtocolDecl*, 32> Protocols;
- SmallVector<SourceLocation, 8> ProtoLocs;
-
+ SmallVector<Decl *, 8> DeclsInGroup;
for (unsigned i = 0; i != NumElts; ++i) {
IdentifierInfo *Ident = IdentList[i].first;
- ObjCProtocolDecl *PDecl = LookupProtocol(Ident, IdentList[i].second);
- bool isNew = false;
- if (PDecl == 0) { // Not already seen?
- PDecl = ObjCProtocolDecl::Create(Context, CurContext, Ident,
- IdentList[i].second, AtProtocolLoc);
- PushOnScopeChains(PDecl, TUScope, false);
- isNew = true;
- }
- if (attrList) {
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentList[i].second,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl
+ = ObjCProtocolDecl::Create(Context, CurContext, Ident,
+ IdentList[i].second, AtProtocolLoc,
+ PrevDecl);
+
+ PushOnScopeChains(PDecl, TUScope);
+ CheckObjCDeclScope(PDecl);
+
+ if (attrList)
ProcessDeclAttributeList(TUScope, PDecl, attrList);
- if (!isNew)
- PDecl->setChangedSinceDeserialization(true);
- }
- Protocols.push_back(PDecl);
- ProtoLocs.push_back(IdentList[i].second);
+
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
+ DeclsInGroup.push_back(PDecl);
}
- ObjCForwardProtocolDecl *PDecl =
- ObjCForwardProtocolDecl::Create(Context, CurContext, AtProtocolLoc,
- Protocols.data(), Protocols.size(),
- ProtoLocs.data());
- CurContext->addDecl(PDecl);
- CheckObjCDeclScope(PDecl);
- return PDecl;
+ return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
}
Decl *Sema::
@@ -733,14 +756,21 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
/// Check that class of this category is already completely declared.
- if (!IDecl || IDecl->isForwardDecl()) {
+
+ if (!IDecl
+ || RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ PDiag(diag::err_category_forward_interface)
+ << (CategoryName == 0))) {
// Create an invalid ObjCCategoryDecl to serve as context for
// the enclosing method declarations. We mark the decl invalid
// to make it clear that this isn't a valid AST.
CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
ClassLoc, CategoryLoc, CategoryName,IDecl);
CDecl->setInvalidDecl();
- Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ CurContext->addDecl(CDecl);
+
+ if (!IDecl)
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
return ActOnObjCContainerStartDefinition(CDecl);
}
@@ -792,24 +822,28 @@ Decl *Sema::ActOnStartCategoryImplementation(
IdentifierInfo *CatName, SourceLocation CatLoc) {
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
ObjCCategoryDecl *CatIDecl = 0;
- if (IDecl) {
+ if (IDecl && IDecl->hasDefinition()) {
CatIDecl = IDecl->FindCategoryDeclaration(CatName);
if (!CatIDecl) {
// Category @implementation with no corresponding @interface.
// Create and install one.
- CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, SourceLocation(),
- SourceLocation(), SourceLocation(),
+ CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, AtCatImplLoc,
+ ClassLoc, CatLoc,
CatName, IDecl);
+ CatIDecl->setImplicit();
}
}
ObjCCategoryImplDecl *CDecl =
ObjCCategoryImplDecl::Create(Context, CurContext, CatName, IDecl,
- ClassLoc, AtCatImplLoc);
+ ClassLoc, AtCatImplLoc, CatLoc);
/// Check that class of this category is already completely declared.
- if (!IDecl || IDecl->isForwardDecl()) {
+ if (!IDecl) {
Diag(ClassLoc, diag::err_undef_interface) << ClassName;
CDecl->setInvalidDecl();
+ } else if (RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::err_undef_interface)) {
+ CDecl->setInvalidDecl();
}
// FIXME: PushOnScopeChains?
@@ -854,23 +888,21 @@ Decl *Sema::ActOnStartClassImplementation(
Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
} else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) {
- // If this is a forward declaration of an interface, warn.
- if (IDecl->isForwardDecl()) {
- Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
- IDecl = 0;
- }
+ RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::warn_undef_interface);
} else {
// We did not find anything with the name ClassName; try to correct for
// typos in the class name.
- TypoCorrection Corrected = CorrectTypo(
+ ObjCInterfaceValidatorCCC Validator;
+ if (TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
- NULL, NULL, false, CTC_NoKeywords);
- if ((IDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
+ NULL, Validator)) {
// Suggest the (potentially) correct interface name. However, put the
// fix-it hint itself in a separate note, since changing the name in
// the warning would make the fix-it change semantics.However, don't
// provide a code-modification hint or use the typo name for recovery,
// because this is just a warning. The program may actually be correct.
+ IDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
DeclarationName CorrectedName = Corrected.getCorrection();
Diag(ClassLoc, diag::warn_undef_interface_suggest)
<< ClassName << CorrectedName;
@@ -894,10 +926,12 @@ Decl *Sema::ActOnStartClassImplementation(
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
} else {
SDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (SDecl && !SDecl->hasDefinition())
+ SDecl = 0;
if (!SDecl)
Diag(SuperClassLoc, diag::err_undef_superclass)
<< SuperClassname << ClassName;
- else if (IDecl && IDecl->getSuperClass() != SDecl) {
+ else if (IDecl && !declaresSameEntity(IDecl->getSuperClass(), SDecl)) {
// This implementation and its interface do not have the same
// super class.
Diag(SuperClassLoc, diag::err_conflicting_super_class)
@@ -914,16 +948,24 @@ Decl *Sema::ActOnStartClassImplementation(
// FIXME: Do we support attributes on the @implementation? If so we should
// copy them over.
IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc,
- ClassName, ClassLoc, false, true);
- IDecl->setSuperClass(SDecl);
- IDecl->setLocEnd(ClassLoc);
-
+ ClassName, /*PrevDecl=*/0, ClassLoc,
+ true);
+ IDecl->startDefinition();
+ if (SDecl) {
+ IDecl->setSuperClass(SDecl);
+ IDecl->setSuperClassLoc(SuperClassLoc);
+ IDecl->setEndOfDefinitionLoc(SuperClassLoc);
+ } else {
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ }
+
PushOnScopeChains(IDecl, TUScope);
} else {
// Mark the interface as being completed, even if it was just as
// @class ....;
// declaration; the user cannot reopen it.
- IDecl->setForwardDecl(false);
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
}
ObjCImplementationDecl* IMPDecl =
@@ -951,6 +993,25 @@ Decl *Sema::ActOnStartClassImplementation(
return ActOnObjCContainerStartDefinition(IMPDecl);
}
+Sema::DeclGroupPtrTy
+Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) {
+ SmallVector<Decl *, 64> DeclsInGroup;
+ DeclsInGroup.reserve(Decls.size() + 1);
+
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i) {
+ Decl *Dcl = Decls[i];
+ if (!Dcl)
+ continue;
+ if (Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+ DeclsInGroup.push_back(Dcl);
+ }
+
+ DeclsInGroup.push_back(ObjCImpDecl);
+
+ return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
+}
+
void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **ivars, unsigned numIvars,
SourceLocation RBrace) {
@@ -962,11 +1023,11 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
/// (legacy objective-c @implementation decl without an @interface decl).
/// Add implementations's ivar to the synthesize class's ivar list.
if (IDecl->isImplicitInterfaceDecl()) {
- IDecl->setLocEnd(RBrace);
+ IDecl->setEndOfDefinitionLoc(RBrace);
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = numIvars; i != e; ++i) {
ivars[i]->setLexicalDeclContext(ImpDecl);
- IDecl->makeDeclVisibleInContext(ivars[i], false);
+ IDecl->makeDeclVisibleInContext(ivars[i]);
ImpDecl->addDecl(ivars[i]);
}
@@ -990,7 +1051,7 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
}
// Instance ivar to Implementation's DeclContext.
ImplIvar->setLexicalDeclContext(ImpDecl);
- IDecl->makeDeclVisibleInContext(ImplIvar, false);
+ IDecl->makeDeclVisibleInContext(ImplIvar);
ImpDecl->addDecl(ImplIvar);
}
return;
@@ -1329,7 +1390,7 @@ static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl) {
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl))
return;
@@ -1410,7 +1471,8 @@ void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
if (match) {
Diag(ImpMethodDecl->getLocation(),
diag::warn_category_method_impl_match);
- Diag(MethodDecl->getLocation(), diag::note_method_declared_at);
+ Diag(MethodDecl->getLocation(), diag::note_method_declared_at)
+ << MethodDecl->getDeclName();
}
}
@@ -1428,16 +1490,14 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
const llvm::DenseSet<Selector> &InsMap,
const llvm::DenseSet<Selector> &ClsMap,
ObjCContainerDecl *CDecl) {
- ObjCInterfaceDecl *IDecl;
- if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl))
- IDecl = C->getClassInterface();
- else
- IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl);
+ ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
+ ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
+ : dyn_cast<ObjCInterfaceDecl>(CDecl);
assert (IDecl && "CheckProtocolMethodDefs - IDecl is null");
ObjCInterfaceDecl *Super = IDecl->getSuperClass();
ObjCInterfaceDecl *NSIDecl = 0;
- if (getLangOptions().NeXTRuntime) {
+ if (getLangOpts().NeXTRuntime) {
// check to see if class implements forwardInvocation method and objects
// of this class are derived from 'NSProxy' so that to forward requests
// from one object to another.
@@ -1467,20 +1527,28 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
!method->isSynthesized() && !InsMap.count(method->getSelector()) &&
(!Super ||
!Super->lookupInstanceMethod(method->getSelector()))) {
+ // If a method is not implemented in the category implementation but
+ // has been declared in its primary class, superclass,
+ // or in one of their protocols, no need to issue the warning.
+ // This is because method will be implemented in the primary class
+ // or one of its super class implementation.
+
// Ugly, but necessary. Method declared in protcol might have
// have been synthesized due to a property declared in the class which
// uses the protocol.
- ObjCMethodDecl *MethodInClass =
- IDecl->lookupInstanceMethod(method->getSelector());
- if (!MethodInClass || !MethodInClass->isSynthesized()) {
- unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
- != DiagnosticsEngine::Ignored) {
- WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
- Diag(method->getLocation(), diag::note_method_declared_at);
- Diag(CDecl->getLocation(), diag::note_required_for_protocol_at)
- << PDecl->getDeclName();
- }
+ if (ObjCMethodDecl *MethodInClass =
+ IDecl->lookupInstanceMethod(method->getSelector(),
+ true /*shallowCategoryLookup*/))
+ if (C || MethodInClass->isSynthesized())
+ continue;
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
+ != DiagnosticsEngine::Ignored) {
+ WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
+ Diag(method->getLocation(), diag::note_method_declared_at)
+ << method->getDeclName();
+ Diag(CDecl->getLocation(), diag::note_required_for_protocol_at)
+ << PDecl->getDeclName();
}
}
}
@@ -1492,11 +1560,16 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
!ClsMap.count(method->getSelector()) &&
(!Super || !Super->lookupClassMethod(method->getSelector()))) {
+ // See above comment for instance method lookups.
+ if (C && IDecl->lookupClassMethod(method->getSelector(),
+ true /*shallowCategoryLookup*/))
+ continue;
unsigned DIAG = diag::warn_unimplemented_protocol_method;
if (Diags.getDiagnosticLevel(DIAG, ImpLoc) !=
DiagnosticsEngine::Ignored) {
WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
- Diag(method->getLocation(), diag::note_method_declared_at);
+ Diag(method->getLocation(), diag::note_method_declared_at)
+ << method->getDeclName();
Diag(IDecl->getLocation(), diag::note_required_for_protocol_at) <<
PDecl->getDeclName();
}
@@ -1505,7 +1578,7 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
// Check on this protocols's referenced protocols, recursively.
for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
E = PDecl->protocol_end(); PI != E; ++PI)
- CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, IDecl);
+ CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, CDecl);
}
/// MatchAllMethodDeclarations - Check methods declared in interface
@@ -1519,7 +1592,7 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
ObjCContainerDecl* CDecl,
bool &IncompleteImpl,
bool ImmediateClass,
- bool WarnExactMatch) {
+ bool WarnCategoryMethodImpl) {
// Check and see if instance methods in class interface have been
// implemented in the implementation class. If so, their types match.
for (ObjCInterfaceDecl::instmeth_iterator I = CDecl->instmeth_begin(),
@@ -1541,12 +1614,12 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
ObjCMethodDecl *MethodDecl = *I;
// ImpMethodDecl may be null as in a @dynamic property.
if (ImpMethodDecl) {
- if (!WarnExactMatch)
+ if (!WarnCategoryMethodImpl)
WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
isa<ObjCProtocolDecl>(CDecl));
else if (!MethodDecl->isSynthesized())
WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
- isa<ObjCProtocolDecl>(CDecl));
+ isa<ObjCProtocolDecl>(CDecl));
}
}
}
@@ -1568,12 +1641,12 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
assert(CDecl->getClassMethod((*I)->getSelector()) &&
"Expected to find the method through lookup as well");
ObjCMethodDecl *MethodDecl = *I;
- if (!WarnExactMatch)
+ if (!WarnCategoryMethodImpl)
WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
isa<ObjCProtocolDecl>(CDecl));
else
WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
- isa<ObjCProtocolDecl>(CDecl));
+ isa<ObjCProtocolDecl>(CDecl));
}
}
@@ -1584,7 +1657,8 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
IMPDecl,
const_cast<ObjCCategoryDecl *>(ClsExtDecl),
- IncompleteImpl, false, WarnExactMatch);
+ IncompleteImpl, false,
+ WarnCategoryMethodImpl);
// Check for any implementation of a methods declared in protocol.
for (ObjCInterfaceDecl::all_protocol_iterator
@@ -1592,11 +1666,12 @@ void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
E = I->all_referenced_protocol_end(); PI != E; ++PI)
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
IMPDecl,
- (*PI), IncompleteImpl, false, WarnExactMatch);
+ (*PI), IncompleteImpl, false,
+ WarnCategoryMethodImpl);
// FIXME. For now, we are not checking for extact match of methods
// in category implementation and its primary class's super class.
- if (!WarnExactMatch && I->getSuperClass())
+ if (!WarnCategoryMethodImpl && I->getSuperClass())
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
IMPDecl,
I->getSuperClass(), IncompleteImpl, false);
@@ -1633,7 +1708,8 @@ void Sema::CheckCategoryVsClassMethodMatches(
bool IncompleteImpl = false;
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
CatIMPDecl, IDecl,
- IncompleteImpl, false, true /*WarnExactMatch*/);
+ IncompleteImpl, false,
+ true /*WarnCategoryMethodImpl*/);
}
void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
@@ -1649,9 +1725,10 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
// Check and see if properties declared in the interface have either 1)
// an implementation or 2) there is a @synthesize/@dynamic implementation
// of the property in the @implementation.
- if (isa<ObjCInterfaceDecl>(CDecl) &&
- !(LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2))
- DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
+ if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (!(LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2) ||
+ IDecl->isObjCRequiresPropertyDefs())
+ DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
llvm::DenseSet<Selector> ClsMap;
for (ObjCImplementationDecl::classmeth_iterator
@@ -1738,37 +1815,36 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
// typedef NSObject < XCElementTogglerP > XCElementToggler;
// @class XCElementToggler;
//
- // FIXME: Make an extension?
+ // Here we have chosen to ignore the forward class declaration
+ // with a warning. Since this is the implied behavior.
TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(PrevDecl);
if (!TDD || !TDD->getUnderlyingType()->isObjCObjectType()) {
Diag(AtClassLoc, diag::err_redefinition_different_kind) << IdentList[i];
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
} else {
// a forward class declaration matching a typedef name of a class refers
- // to the underlying class.
- if (const ObjCObjectType *OI =
- TDD->getUnderlyingType()->getAs<ObjCObjectType>())
- PrevDecl = OI->getInterface();
+ // to the underlying class. Just ignore the forward class with a warning
+ // as this will force the intended behavior which is to lookup the typedef
+ // name.
+ if (isa<ObjCObjectType>(TDD->getUnderlyingType())) {
+ Diag(AtClassLoc, diag::warn_forward_class_redefinition) << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ continue;
+ }
}
}
- ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
- if (!IDecl) { // Not already seen? Make a forward decl.
- IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
- IdentList[i], IdentLocs[i], true);
-
- // Push the ObjCInterfaceDecl on the scope chain but do *not* add it to
- // the current DeclContext. This prevents clients that walk DeclContext
- // from seeing the imaginary ObjCInterfaceDecl until it is actually
- // declared later (if at all). We also take care to explicitly make
- // sure this declaration is visible for name lookup.
- PushOnScopeChains(IDecl, TUScope, false);
- CurContext->makeDeclVisibleInContext(IDecl, true);
- }
- ObjCClassDecl *CDecl = ObjCClassDecl::Create(Context, CurContext, AtClassLoc,
- IDecl, IdentLocs[i]);
- CurContext->addDecl(CDecl);
- CheckObjCDeclScope(CDecl);
- DeclsInGroup.push_back(CDecl);
+
+ // Create a declaration to describe this forward declaration.
+ ObjCInterfaceDecl *PrevIDecl
+ = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
+ IdentList[i], PrevIDecl, IdentLocs[i]);
+ IDecl->setAtEndRange(IdentLocs[i]);
+
+ PushOnScopeChains(IDecl, TUScope);
+ CheckObjCDeclScope(IDecl);
+ DeclsInGroup.push_back(IDecl);
}
return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
@@ -1870,7 +1946,7 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
left->getResultType(), right->getResultType()))
return false;
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
(left->hasAttr<NSReturnsRetainedAttr>()
!= right->hasAttr<NSReturnsRetainedAttr>() ||
left->hasAttr<NSConsumesSelfAttr>()
@@ -1887,76 +1963,81 @@ bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
if (!matchTypes(Context, strategy, lparm->getType(), rparm->getType()))
return false;
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
lparm->hasAttr<NSConsumedAttr>() != rparm->hasAttr<NSConsumedAttr>())
return false;
}
return true;
}
+void Sema::addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method) {
+ // If the list is empty, make it a singleton list.
+ if (List->Method == 0) {
+ List->Method = Method;
+ List->Next = 0;
+ return;
+ }
+
+ // We've seen a method with this name, see if we have already seen this type
+ // signature.
+ ObjCMethodList *Previous = List;
+ for (; List; Previous = List, List = List->Next) {
+ if (!MatchTwoMethodDeclarations(Method, List->Method))
+ continue;
+
+ ObjCMethodDecl *PrevObjCMethod = List->Method;
+
+ // Propagate the 'defined' bit.
+ if (Method->isDefined())
+ PrevObjCMethod->setDefined(true);
+
+ // If a method is deprecated, push it in the global pool.
+ // This is used for better diagnostics.
+ if (Method->isDeprecated()) {
+ if (!PrevObjCMethod->isDeprecated())
+ List->Method = Method;
+ }
+ // If new method is unavailable, push it into global pool
+ // unless previous one is deprecated.
+ if (Method->isUnavailable()) {
+ if (PrevObjCMethod->getAvailability() < AR_Deprecated)
+ List->Method = Method;
+ }
+
+ return;
+ }
+
+ // We have a new signature for an existing method - add it.
+ // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
+ ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
+ Previous->Next = new (Mem) ObjCMethodList(Method, 0);
+}
+
/// \brief Read the contents of the method pool for a given selector from
/// external storage.
-///
-/// This routine should only be called once, when the method pool has no entry
-/// for this selector.
-Sema::GlobalMethodPool::iterator Sema::ReadMethodPool(Selector Sel) {
+void Sema::ReadMethodPool(Selector Sel) {
assert(ExternalSource && "We need an external AST source");
- assert(MethodPool.find(Sel) == MethodPool.end() &&
- "Selector data already loaded into the method pool");
-
- // Read the method list from the external source.
- GlobalMethods Methods = ExternalSource->ReadMethodPool(Sel);
-
- return MethodPool.insert(std::make_pair(Sel, Methods)).first;
+ ExternalSource->ReadMethodPool(Sel);
}
void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
bool instance) {
+ // Ignore methods of invalid containers.
+ if (cast<Decl>(Method->getDeclContext())->isInvalidDecl())
+ return;
+
+ if (ExternalSource)
+ ReadMethodPool(Method->getSelector());
+
GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector());
- if (Pos == MethodPool.end()) {
- if (ExternalSource)
- Pos = ReadMethodPool(Method->getSelector());
- else
- Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
- GlobalMethods())).first;
- }
+ if (Pos == MethodPool.end())
+ Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
+ GlobalMethods())).first;
+
Method->setDefined(impl);
+
ObjCMethodList &Entry = instance ? Pos->second.first : Pos->second.second;
- if (Entry.Method == 0) {
- // Haven't seen a method with this selector name yet - add it.
- Entry.Method = Method;
- Entry.Next = 0;
- return;
- }
-
- // We've seen a method with this name, see if we have already seen this type
- // signature.
- for (ObjCMethodList *List = &Entry; List; List = List->Next) {
- bool match = MatchTwoMethodDeclarations(Method, List->Method);
-
- if (match) {
- ObjCMethodDecl *PrevObjCMethod = List->Method;
- PrevObjCMethod->setDefined(impl);
- // If a method is deprecated, push it in the global pool.
- // This is used for better diagnostics.
- if (Method->isDeprecated()) {
- if (!PrevObjCMethod->isDeprecated())
- List->Method = Method;
- }
- // If new method is unavailable, push it into global pool
- // unless previous one is deprecated.
- if (Method->isUnavailable()) {
- if (PrevObjCMethod->getAvailability() < AR_Deprecated)
- List->Method = Method;
- }
- return;
- }
- }
-
- // We have a new signature for an existing method - add it.
- // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
- ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
- Entry.Next = new (Mem) ObjCMethodList(Method, Entry.Next);
+ addMethodToGlobalList(&Entry, Method);
}
/// Determines if this is an "acceptable" loose mismatch in the global
@@ -1981,13 +2062,12 @@ static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen,
ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance) {
+ if (ExternalSource)
+ ReadMethodPool(Sel);
+
GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
- if (Pos == MethodPool.end()) {
- if (ExternalSource)
- Pos = ReadMethodPool(Sel);
- else
- return 0;
- }
+ if (Pos == MethodPool.end())
+ return 0;
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
@@ -2014,14 +2094,14 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
// differences. In ARC, however, we also need to check for loose
// mismatches, because most of them are errors.
if (!strictSelectorMatch ||
- (issueDiagnostic && getLangOptions().ObjCAutoRefCount))
+ (issueDiagnostic && getLangOpts().ObjCAutoRefCount))
for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next) {
// This checks if the methods differ in type mismatch.
if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method,
MMS_loose) &&
!isAcceptableMethodMismatch(MethList.Method, Next->Method)) {
issueDiagnostic = true;
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
issueError = true;
break;
}
@@ -2120,15 +2200,39 @@ void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
}
}
+Sema::ObjCContainerKind Sema::getObjCContainerKind() const {
+ switch (CurContext->getDeclKind()) {
+ case Decl::ObjCInterface:
+ return Sema::OCK_Interface;
+ case Decl::ObjCProtocol:
+ return Sema::OCK_Protocol;
+ case Decl::ObjCCategory:
+ if (dyn_cast<ObjCCategoryDecl>(CurContext)->IsClassExtension())
+ return Sema::OCK_ClassExtension;
+ else
+ return Sema::OCK_Category;
+ case Decl::ObjCImplementation:
+ return Sema::OCK_Implementation;
+ case Decl::ObjCCategoryImpl:
+ return Sema::OCK_CategoryImplementation;
+
+ default:
+ return Sema::OCK_None;
+ }
+}
+
// Note: For class/category implemenations, allMethods/allProperties is
// always null.
-void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
- Decl **allMethods, unsigned allNum,
- Decl **allProperties, unsigned pNum,
- DeclGroupPtrTy *allTUVars, unsigned tuvNum) {
+Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
+ Decl **allMethods, unsigned allNum,
+ Decl **allProperties, unsigned pNum,
+ DeclGroupPtrTy *allTUVars, unsigned tuvNum) {
+
+ if (getObjCContainerKind() == Sema::OCK_None)
+ return 0;
+
+ assert(AtEnd.isValid() && "Invalid location for '@end'");
- if (!CurContext->isObjCContainer())
- return;
ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
Decl *ClassDecl = cast<Decl>(OCD);
@@ -2137,15 +2241,6 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
|| isa<ObjCProtocolDecl>(ClassDecl);
bool checkIdenticalMethods = isa<ObjCImplementationDecl>(ClassDecl);
- if (!isInterfaceDeclKind && AtEnd.isInvalid()) {
- // FIXME: This is wrong. We shouldn't be pretending that there is
- // an '@end' in the declaration.
- SourceLocation L = ClassDecl->getLocation();
- AtEnd.setBegin(L);
- AtEnd.setEnd(L);
- Diag(L, diag::err_missing_atend);
- }
-
// FIXME: Remove these and use the ObjCContainerDecl/DeclContext.
llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap;
llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap;
@@ -2167,8 +2262,14 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
Method->setInvalidDecl();
} else {
- if (PrevMethod)
+ if (PrevMethod) {
Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
InsMap[Method->getSelector()] = Method;
/// The following allows us to typecheck messages to "id".
AddInstanceMethodToGlobalPool(Method);
@@ -2188,8 +2289,14 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
Method->setInvalidDecl();
} else {
- if (PrevMethod)
+ if (PrevMethod) {
Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
ClsMap[Method->getSelector()] = Method;
/// The following allows us to typecheck messages to "Class".
AddFactoryMethodToGlobalPool(Method);
@@ -2265,11 +2372,39 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
AtomicPropertySetterGetterRules(IC, IDecl);
DiagnoseOwningPropertyGetterSynthesis(IC);
- if (LangOpts.ObjCNonFragileABI2)
+ bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
+ if (IDecl->getSuperClass() == NULL) {
+ // This class has no superclass, so check that it has been marked with
+ // __attribute((objc_root_class)).
+ if (!HasRootClassAttr) {
+ SourceLocation DeclLoc(IDecl->getLocation());
+ SourceLocation SuperClassLoc(PP.getLocForEndOfToken(DeclLoc));
+ Diag(DeclLoc, diag::warn_objc_root_class_missing)
+ << IDecl->getIdentifier();
+ // See if NSObject is in the current scope, and if it is, suggest
+ // adding " : NSObject " to the class declaration.
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject),
+ DeclLoc, LookupOrdinaryName);
+ ObjCInterfaceDecl *NSObjectDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (NSObjectDecl && NSObjectDecl->getDefinition()) {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass)
+ << FixItHint::CreateInsertion(SuperClassLoc, " : NSObject ");
+ } else {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass);
+ }
+ }
+ } else if (HasRootClassAttr) {
+ // Complain that only root classes may have this attribute.
+ Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass);
+ }
+
+ if (LangOpts.ObjCNonFragileABI2) {
while (IDecl->getSuperClass()) {
DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
IDecl = IDecl->getSuperClass();
}
+ }
}
SetIvarInitializers(IC);
} else if (ObjCCategoryImplDecl* CatImplClass =
@@ -2300,6 +2435,15 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
}
}
ActOnObjCContainerFinishDefinition();
+
+ for (unsigned i = 0; i != tuvNum; i++) {
+ DeclGroupRef DG = allTUVars[i].getAsVal<DeclGroupRef>();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ (*I)->setTopLevelDeclInObjCContainer();
+ Consumer.HandleTopLevelDeclInObjCContainer(DG);
+ }
+
+ return ClassDecl;
}
@@ -2311,13 +2455,36 @@ CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
}
static inline
-bool containsInvalidMethodImplAttribute(const AttrVec &A) {
- // The 'ibaction' attribute is allowed on method definitions because of
- // how the IBAction macro is used on both method declarations and definitions.
- // If the method definitions contains any other attributes, return true.
- for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i)
- if ((*i)->getKind() != attr::IBAction)
+bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
+ const AttrVec &A) {
+ // If method is only declared in implementation (private method),
+ // No need to issue any diagnostics on method definition with attributes.
+ if (!IMD)
+ return false;
+
+ // method declared in interface has no attribute.
+ // But implementation has attributes. This is invalid
+ if (!IMD->hasAttrs())
+ return true;
+
+ const AttrVec &D = IMD->getAttrs();
+ if (D.size() != A.size())
+ return true;
+
+ // attributes on method declaration and definition must match exactly.
+ // Note that we have at most a couple of attributes on methods, so this
+ // n*n search is good enough.
+ for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) {
+ bool match = false;
+ for (AttrVec::const_iterator i1 = D.begin(), e1 = D.end(); i1 != e1; ++i1) {
+ if ((*i)->getKind() == (*i1)->getKind()) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
return true;
+ }
return false;
}
@@ -2352,7 +2519,7 @@ CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
if (ObjCInterfaceDecl *ResultClass
= ResultObjectType->getInterfaceDecl()) {
// - it is the same as the method's class type, or
- if (CurrentClass == ResultClass)
+ if (declaresSameEntity(CurrentClass, ResultClass))
return RTC_Compatible;
// - it is a superclass of the method's class type
@@ -2373,10 +2540,11 @@ namespace {
/// A helper class for searching for methods which a particular method
/// overrides.
class OverrideSearch {
+public:
Sema &S;
ObjCMethodDecl *Method;
- llvm::SmallPtrSet<ObjCContainerDecl*, 8> Searched;
- llvm::SmallPtrSet<ObjCMethodDecl*, 8> Overridden;
+ llvm::SmallPtrSet<ObjCContainerDecl*, 128> Searched;
+ llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden;
bool Recursive;
public:
@@ -2388,7 +2556,11 @@ public:
Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector);
if (it == S.MethodPool.end()) {
if (!S.ExternalSource) return;
- it = S.ReadMethodPool(selector);
+ S.ReadMethodPool(selector);
+
+ it = S.MethodPool.find(selector);
+ if (it == S.MethodPool.end())
+ return;
}
ObjCMethodList &list =
method->isInstanceMethod() ? it->second.first : it->second.second;
@@ -2404,7 +2576,7 @@ public:
searchFromContainer(container);
}
- typedef llvm::SmallPtrSet<ObjCMethodDecl*,8>::iterator iterator;
+ typedef llvm::SmallPtrSet<ObjCMethodDecl*, 128>::iterator iterator;
iterator begin() const { return Overridden.begin(); }
iterator end() const { return Overridden.end(); }
@@ -2426,6 +2598,9 @@ private:
}
void searchFrom(ObjCProtocolDecl *protocol) {
+ if (!protocol->hasDefinition())
+ return;
+
// A method in a protocol declaration overrides declarations from
// referenced ("parent") protocols.
search(protocol->getReferencedProtocols());
@@ -2453,7 +2628,9 @@ private:
void searchFrom(ObjCInterfaceDecl *iface) {
// A method in a class declaration overrides declarations from
-
+ if (!iface->hasDefinition())
+ return;
+
// - categories,
for (ObjCCategoryDecl *category = iface->getCategoryList();
category; category = category->getNextClassCategory())
@@ -2606,6 +2783,10 @@ Decl *Sema::ActOnMethodDeclaration(
// Apply the attributes to the parameter.
ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
+ if (Param->hasAttr<BlocksAttr>()) {
+ Diag(Param->getLocation(), diag::err_block_on_nonlocal);
+ Param->setInvalidDecl();
+ }
S->AddDecl(Param);
IdResolver.AddDecl(Param);
@@ -2649,9 +2830,19 @@ Decl *Sema::ActOnMethodDeclaration(
ImpDecl->addClassMethod(ObjCMethod);
}
+ ObjCMethodDecl *IMD = 0;
+ if (ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface())
+ IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
+ ObjCMethod->isInstanceMethod());
if (ObjCMethod->hasAttrs() &&
- containsInvalidMethodImplAttribute(ObjCMethod->getAttrs()))
- Diag(EndLoc, diag::warn_attribute_method_def);
+ containsInvalidMethodImplAttribute(IMD, ObjCMethod->getAttrs())) {
+ SourceLocation MethodLoc = IMD->getLocation();
+ if (!getSourceManager().isInSystemHeader(MethodLoc)) {
+ Diag(EndLoc, diag::warn_attribute_method_def);
+ Diag(MethodLoc, diag::note_method_declared_at)
+ << ObjCMethod->getDeclName();
+ }
+ }
} else {
cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
}
@@ -2701,7 +2892,7 @@ Decl *Sema::ActOnMethodDeclaration(
}
bool ARCError = false;
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
ARCError = CheckARCMethodDecl(*this, ObjCMethod);
// Infer the related result type when possible.
@@ -2741,13 +2932,15 @@ Decl *Sema::ActOnMethodDeclaration(
}
bool Sema::CheckObjCDeclScope(Decl *D) {
- if (isa<TranslationUnitDecl>(CurContext->getRedeclContext()))
- return false;
// Following is also an error. But it is caused by a missing @end
// and diagnostic is issued elsewhere.
- if (isa<ObjCContainerDecl>(CurContext->getRedeclContext())) {
+ if (isa<ObjCContainerDecl>(CurContext->getRedeclContext()))
+ return false;
+
+ // If we switched context to translation unit while we are still lexically in
+ // an objc container, it means the parser missed emitting an error.
+ if (isa<TranslationUnitDecl>(getCurLexicalContext()->getRedeclContext()))
return false;
- }
Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope);
D->setInvalidDecl();
@@ -2790,7 +2983,7 @@ void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
for (SmallVectorImpl<Decl*>::iterator D = Decls.begin();
D != Decls.end(); ++D) {
FieldDecl *FD = cast<FieldDecl>(*D);
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
PushOnScopeChains(cast<FieldDecl>(FD), S);
else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD))
Record->addDecl(FD);
@@ -2830,6 +3023,10 @@ VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
T, TInfo, SC_None, SC_None);
New->setExceptionVariable(true);
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(New))
+ Invalid = true;
+
if (Invalid)
New->setInvalidDecl();
return New;
@@ -2855,7 +3052,7 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
// Check that there are no default arguments inside the type of this
// exception object (C++ only).
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
index 92af2d9..42221f8 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
namespace clang {
@@ -101,7 +102,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
bool MissingExceptionSpecification = false;
bool MissingEmptyExceptionSpecification = false;
unsigned DiagID = diag::err_mismatched_exception_spec;
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_mismatched_exception_spec;
if (!CheckEquivalentExceptionSpec(PDiag(DiagID),
@@ -170,7 +171,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// If exceptions are disabled, suppress the warning about missing
// exception specifications for new and delete operators.
- if (!getLangOptions().CXXExceptions) {
+ if (!getLangOpts().CXXExceptions) {
switch (New->getDeclName().getCXXOverloadedOperator()) {
case OO_New:
case OO_Array_New:
@@ -186,7 +187,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
}
// Warn about the lack of exception specification.
- llvm::SmallString<128> ExceptionSpecString;
+ SmallString<128> ExceptionSpecString;
llvm::raw_svector_ostream OS(ExceptionSpecString);
switch (OldProto->getExceptionSpecType()) {
case EST_DynamicNone:
@@ -264,7 +265,7 @@ bool Sema::CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc) {
unsigned DiagID = diag::err_mismatched_exception_spec;
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_mismatched_exception_spec;
return CheckEquivalentExceptionSpec(
PDiag(DiagID),
@@ -285,7 +286,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
bool AllowNoexceptAllMatchWithNoSpec,
bool IsOperatorNew) {
// Just completely ignore this under -fno-exceptions.
- if (!getLangOptions().CXXExceptions)
+ if (!getLangOpts().CXXExceptions)
return false;
if (MissingExceptionSpecification)
@@ -379,7 +380,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
// As a special compatibility feature, under C++0x we accept no spec and
// throw(std::bad_alloc) as equivalent for operator new and operator new[].
// This is because the implicit declaration changed, but old code would break.
- if (getLangOptions().CPlusPlus0x && IsOperatorNew) {
+ if (getLangOpts().CPlusPlus0x && IsOperatorNew) {
const FunctionProtoType *WithExceptions = 0;
if (OldEST == EST_None && NewEST == EST_Dynamic)
WithExceptions = New;
@@ -473,7 +474,7 @@ bool Sema::CheckExceptionSpecSubset(
const FunctionProtoType *Subset, SourceLocation SubLoc) {
// Just auto-succeed under -fno-exceptions.
- if (!getLangOptions().CXXExceptions)
+ if (!getLangOpts().CXXExceptions)
return false;
// FIXME: As usual, we could be more specific in our error messages, but
@@ -611,10 +612,8 @@ bool Sema::CheckExceptionSpecSubset(
case AR_inaccessible: continue;
case AR_dependent:
llvm_unreachable("access check dependent for unprivileged context");
- break;
case AR_delayed:
llvm_unreachable("access check delayed in non-declaration");
- break;
}
Contained = true;
@@ -703,7 +702,7 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType)
bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
- if (getLangOptions().CPlusPlus0x && isa<CXXDestructorDecl>(New)) {
+ if (getLangOpts().CPlusPlus0x && isa<CXXDestructorDecl>(New)) {
// Don't check uninstantiated template destructors at all. We can only
// synthesize correct specs after the template is instantiated.
if (New->getParent()->isDependentType())
@@ -717,7 +716,7 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
}
}
unsigned DiagID = diag::err_override_exception_spec;
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt)
DiagID = diag::warn_override_exception_spec;
return CheckExceptionSpecSubset(PDiag(DiagID),
PDiag(diag::note_overridden_virtual_function),
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index 170097c..0d0f2f5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -12,10 +12,13 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
@@ -38,6 +41,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/SemaFixItUtils.h"
#include "clang/Sema/Template.h"
+#include "TreeTransform.h"
using namespace clang;
using namespace sema;
@@ -53,6 +57,12 @@ bool Sema::CanUseDecl(NamedDecl *D) {
if (FD->isDeleted())
return false;
}
+
+ // See if this function is unavailable.
+ if (D->getAvailability() == AR_Unavailable &&
+ cast<Decl>(CurContext)->getAvailability() != AR_Unavailable)
+ return false;
+
return true;
}
@@ -62,6 +72,13 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
// See if this declaration is unavailable or deprecated.
std::string Message;
AvailabilityResult Result = D->getAvailability(&Message);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
+ Result = TheEnumDecl->getAvailability(&Message);
+ }
+
switch (Result) {
case AR_Available:
case AR_NotYetIntroduced:
@@ -91,6 +108,28 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
return Result;
}
+/// \brief Emit a note explaining that this function is deleted or unavailable.
+void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Decl);
+
+ if (Method && Method->isDeleted() && !Method->isDeletedAsWritten()) {
+ // If the method was explicitly defaulted, point at that declaration.
+ if (!Method->isImplicit())
+ Diag(Decl->getLocation(), diag::note_implicitly_deleted);
+
+ // Try to diagnose why this special member function was implicitly
+ // deleted. This might fail, if that reason no longer applies.
+ CXXSpecialMember CSM = getSpecialMember(Method);
+ if (CSM != CXXInvalid)
+ ShouldDeleteSpecialMember(Method, CSM, /*Diagnose=*/true);
+
+ return;
+ }
+
+ Diag(Decl->getLocation(), diag::note_unavailable_here)
+ << 1 << Decl->isDeleted();
+}
+
/// \brief Determine whether the use of this declaration is valid, and
/// emit any corresponding diagnostics.
///
@@ -105,7 +144,7 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
///
bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass) {
- if (getLangOptions().CPlusPlus && isa<FunctionDecl>(D)) {
+ if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
// If there were any diagnostics suppressed by template argument deduction,
// emit them now.
llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >::iterator
@@ -134,26 +173,15 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isDeleted()) {
Diag(Loc, diag::err_deleted_function_use);
- Diag(D->getLocation(), diag::note_unavailable_here) << 1 << true;
+ NoteDeletedFunction(FD);
return true;
}
}
- AvailabilityResult Result =
- DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass);
+ DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass);
// Warn if this is used but marked unused.
if (D->hasAttr<UnusedAttr>())
Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
- // For available enumerator, it will become unavailable/deprecated
- // if its enum declaration is as such.
- if (Result == AR_Available)
- if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
- const DeclContext *DC = ECD->getDeclContext();
- if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
- DiagnoseAvailabilityOfDecl(*this,
- const_cast< EnumDecl *>(TheEnumDecl),
- Loc, UnknownObjCClass);
- }
return false;
}
@@ -243,17 +271,7 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
Expr *sentinelExpr = args[numArgs - numArgsAfterSentinel - 1];
if (!sentinelExpr) return;
if (sentinelExpr->isValueDependent()) return;
-
- // nullptr_t is always treated as null.
- if (sentinelExpr->getType()->isNullPtrType()) return;
-
- if (sentinelExpr->getType()->isAnyPointerType() &&
- sentinelExpr->IgnoreParenCasts()->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull))
- return;
-
- // Unfortunately, __null has type 'int'.
- if (isa<GNUNullExpr>(sentinelExpr)) return;
+ if (Context.isSentinelNullExpr(sentinelExpr)) return;
// Pick a reasonable string to insert. Optimistically use 'nil' or
// 'NULL' if those are actually defined in the context. Only use
@@ -314,7 +332,7 @@ ExprResult Sema::DefaultFunctionArrayConversion(Expr *E) {
// An lvalue or rvalue of type "array of N T" or "array of unknown bound of
// T" can be converted to an rvalue of type "pointer to T".
//
- if (getLangOptions().C99 || getLangOptions().CPlusPlus || E->isLValue())
+ if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue())
E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
CK_ArrayToPointerDecay).take();
}
@@ -356,23 +374,9 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
QualType T = E->getType();
assert(!T.isNull() && "r-value conversion on typeless expression?");
- // We can't do lvalue-to-rvalue on atomics yet.
- if (T->getAs<AtomicType>())
- return Owned(E);
-
- // Create a load out of an ObjCProperty l-value, if necessary.
- if (E->getObjectKind() == OK_ObjCProperty) {
- ExprResult Res = ConvertPropertyForRValue(E);
- if (Res.isInvalid())
- return Owned(E);
- E = Res.take();
- if (!E->isGLValue())
- return Owned(E);
- }
-
// We don't want to throw lvalue-to-rvalue casts on top of
// expressions of certain types in C++.
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
(E->getType() == Context.OverloadTy ||
T->isDependentType() ||
T->isRecordType()))
@@ -400,9 +404,20 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (T.hasQualifiers())
T = T.getUnqualifiedType();
+ UpdateMarkingForLValueToRValue(E);
+
ExprResult Res = Owned(ImplicitCastExpr::Create(Context, T, CK_LValueToRValue,
E, 0, VK_RValue));
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = T->getAs<AtomicType>()) {
+ T = Atomic->getValueType().getUnqualifiedType();
+ Res = Owned(ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic,
+ Res.get(), 0, VK_RValue));
+ }
+
return Res;
}
@@ -495,7 +510,7 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
// is a prvalue for the temporary.
// FIXME: add some way to gate this entire thing for correctness in
// potentially potentially evaluated contexts.
- if (getLangOptions().CPlusPlus && E->isGLValue() &&
+ if (getLangOpts().CPlusPlus && E->isGLValue() &&
ExprEvalContexts.back().Context != Unevaluated) {
ExprResult Temp = PerformCopyInitialization(
InitializedEntity::InitializeTemporary(E->getType()),
@@ -514,11 +529,23 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
/// interfaces passed by value.
ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl) {
- ExprResult ExprRes = CheckPlaceholderExpr(E);
- if (ExprRes.isInvalid())
- return ExprError();
+ if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) {
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast &&
+ (CT == VariadicMethod ||
+ (FDecl && FDecl->hasAttr<CFAuditedTransferAttr>()))) {
+ E = stripARCUnbridgedCast(E);
+
+ // Otherwise, do normal placeholder checking.
+ } else {
+ ExprResult ExprRes = CheckPlaceholderExpr(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+ E = ExprRes.take();
+ }
+ }
- ExprRes = DefaultArgumentPromotion(E);
+ ExprResult ExprRes = DefaultArgumentPromotion(E);
if (ExprRes.isInvalid())
return ExprError();
E = ExprRes.take();
@@ -540,17 +567,21 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
// or a non-trivial destructor, with no corresponding parameter,
// is conditionally-supported with implementation-defined semantics.
bool TrivialEnough = false;
- if (getLangOptions().CPlusPlus0x && !E->getType()->isDependentType()) {
+ if (getLangOpts().CPlusPlus0x && !E->getType()->isDependentType()) {
if (CXXRecordDecl *Record = E->getType()->getAsCXXRecordDecl()) {
if (Record->hasTrivialCopyConstructor() &&
Record->hasTrivialMoveConstructor() &&
- Record->hasTrivialDestructor())
+ Record->hasTrivialDestructor()) {
+ DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
+ << E->getType() << CT);
TrivialEnough = true;
+ }
}
}
if (!TrivialEnough &&
- getLangOptions().ObjCAutoRefCount &&
+ getLangOpts().ObjCAutoRefCount &&
E->getType()->isObjCLifetimeType())
TrivialEnough = true;
@@ -558,14 +589,16 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
// Nothing to diagnose. This is okay.
} else if (DiagRuntimeBehavior(E->getLocStart(), 0,
PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
- << getLangOptions().CPlusPlus0x << E->getType()
+ << getLangOpts().CPlusPlus0x << E->getType()
<< CT)) {
// Turn this into a trap.
CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
UnqualifiedId Name;
Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
E->getLocStart());
- ExprResult TrapFn = ActOnIdExpression(TUScope, SS, Name, true, false);
+ ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name,
+ true, false);
if (TrapFn.isInvalid())
return ExprError();
@@ -581,6 +614,11 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
E = Comma.get();
}
}
+ // c++ rules are enforced elsewhere.
+ if (!getLangOpts().CPlusPlus &&
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_call_incomplete_argument))
+ return ExprError();
return Owned(E);
}
@@ -811,14 +849,21 @@ static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS,
if (LHSComplexInt) {
// int -> _Complex int
+ // FIXME: This needs to take integer ranks into account
+ RHS = S.ImpCastExprToType(RHS.take(), LHSComplexInt->getElementType(),
+ CK_IntegralCast);
RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralRealToComplex);
return LHSType;
}
assert(RHSComplexInt);
// int -> _Complex int
- if (!IsCompAssign)
+ // FIXME: This needs to take integer ranks into account
+ if (!IsCompAssign) {
+ LHS = S.ImpCastExprToType(LHS.take(), RHSComplexInt->getElementType(),
+ CK_IntegralCast);
LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralRealToComplex);
+ }
return RHSType;
}
@@ -999,7 +1044,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
if (Types[i]->getType()->isDependentType()) {
IsResultDependent = true;
} else {
- // C1X 6.5.1.1p2 "The type name in a generic association shall specify a
+ // C11 6.5.1.1p2 "The type name in a generic association shall specify a
// complete object type other than a variably modified type."
unsigned D = 0;
if (Types[i]->getType()->isIncompleteType())
@@ -1016,7 +1061,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
TypeErrorFound = true;
}
- // C1X 6.5.1.1p2 "No two generic associations in the same generic
+ // C11 6.5.1.1p2 "No two generic associations in the same generic
// selection shall specify compatible types."
for (unsigned j = i+1; j < NumAssocs; ++j)
if (Types[j] && !Types[j]->getType()->isDependentType() &&
@@ -1057,7 +1102,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
CompatIndices.push_back(i);
}
- // C1X 6.5.1.1p2 "The controlling expression of a generic selection shall have
+ // C11 6.5.1.1p2 "The controlling expression of a generic selection shall have
// type compatible with at most one of the types named in its generic
// association list."
if (CompatIndices.size() > 1) {
@@ -1077,7 +1122,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
return ExprError();
}
- // C1X 6.5.1.1p2 "If a generic selection has no default generic association,
+ // C11 6.5.1.1p2 "If a generic selection has no default generic association,
// its controlling expression shall have type compatible with exactly one of
// the types named in its generic association list."
if (DefaultIndex == -1U && CompatIndices.size() == 0) {
@@ -1089,7 +1134,7 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
return ExprError();
}
- // C1X 6.5.1.1p3 "If a generic selection has a generic association with a
+ // C11 6.5.1.1p3 "If a generic selection has a generic association with a
// type name that is compatible with the type of the controlling expression,
// then the result expression of the generic selection is the expression
// in that generic association. Otherwise, the result expression of the
@@ -1104,6 +1149,43 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
ResultIndex));
}
+/// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the
+/// location of the token and the offset of the ud-suffix within it.
+static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc,
+ unsigned Offset) {
+ return Lexer::AdvanceToTokenCharacter(TokLoc, Offset, S.getSourceManager(),
+ S.getLangOpts());
+}
+
+/// BuildCookedLiteralOperatorCall - A user-defined literal was found. Look up
+/// the corresponding cooked (non-raw) literal operator, and build a call to it.
+static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
+ IdentifierInfo *UDSuffix,
+ SourceLocation UDSuffixLoc,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc) {
+ assert(Args.size() <= 2 && "too many arguments for literal operator");
+
+ QualType ArgTy[2];
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ ArgTy[ArgIdx] = Args[ArgIdx]->getType();
+ if (ArgTy[ArgIdx]->isArrayType())
+ ArgTy[ArgIdx] = S.Context.getArrayDecayedType(ArgTy[ArgIdx]);
+ }
+
+ DeclarationName OpName =
+ S.Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName);
+ if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()),
+ /*AllowRawAndTemplate*/false) == Sema::LOLR_Error)
+ return ExprError();
+
+ return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc);
+}
+
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
@@ -1111,7 +1193,8 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
/// string.
///
ExprResult
-Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
+Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
+ Scope *UDLScope) {
assert(NumStringToks && "Must have at least one string!");
StringLiteralParser Literal(StringToks, NumStringToks, PP);
@@ -1129,7 +1212,7 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
StrTy = Context.Char16Ty;
else if (Literal.isUTF32())
StrTy = Context.Char32Ty;
- else if (Literal.Pascal)
+ else if (Literal.isPascal())
StrTy = Context.UnsignedCharTy;
StringLiteral::StringKind Kind = StringLiteral::Ascii;
@@ -1143,7 +1226,7 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
Kind = StringLiteral::UTF32;
// A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
- if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings)
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
StrTy.addConst();
// Get an array type for the string, according to C99 6.4.5. This includes
@@ -1154,243 +1237,32 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
ArrayType::Normal, 0);
// Pass &StringTokLocs[0], StringTokLocs.size() to factory!
- return Owned(StringLiteral::Create(Context, Literal.GetString(),
- Kind, Literal.Pascal, StrTy,
- &StringTokLocs[0],
- StringTokLocs.size()));
-}
-
-enum CaptureResult {
- /// No capture is required.
- CR_NoCapture,
-
- /// A capture is required.
- CR_Capture,
-
- /// A by-ref capture is required.
- CR_CaptureByRef,
-
- /// An error occurred when trying to capture the given variable.
- CR_Error
-};
-
-/// Diagnose an uncapturable value reference.
-///
-/// \param var - the variable referenced
-/// \param DC - the context which we couldn't capture through
-static CaptureResult
-diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
- VarDecl *var, DeclContext *DC) {
- switch (S.ExprEvalContexts.back().Context) {
- case Sema::Unevaluated:
- // The argument will never be evaluated, so don't complain.
- return CR_NoCapture;
-
- case Sema::PotentiallyEvaluated:
- case Sema::PotentiallyEvaluatedIfUsed:
- break;
-
- case Sema::PotentiallyPotentiallyEvaluated:
- // FIXME: delay these!
- break;
- }
-
- // Don't diagnose about capture if we're not actually in code right
- // now; in general, there are more appropriate places that will
- // diagnose this.
- if (!S.CurContext->isFunctionOrMethod()) return CR_NoCapture;
-
- // Certain madnesses can happen with parameter declarations, which
- // we want to ignore.
- if (isa<ParmVarDecl>(var)) {
- // - If the parameter still belongs to the translation unit, then
- // we're actually just using one parameter in the declaration of
- // the next. This is useful in e.g. VLAs.
- if (isa<TranslationUnitDecl>(var->getDeclContext()))
- return CR_NoCapture;
-
- // - This particular madness can happen in ill-formed default
- // arguments; claim it's okay and let downstream code handle it.
- if (S.CurContext == var->getDeclContext()->getParent())
- return CR_NoCapture;
- }
-
- DeclarationName functionName;
- if (FunctionDecl *fn = dyn_cast<FunctionDecl>(var->getDeclContext()))
- functionName = fn->getDeclName();
- // FIXME: variable from enclosing block that we couldn't capture from!
-
- S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
- << var->getIdentifier() << functionName;
- S.Diag(var->getLocation(), diag::note_local_variable_declared_here)
- << var->getIdentifier();
-
- return CR_Error;
-}
-
-/// There is a well-formed capture at a particular scope level;
-/// propagate it through all the nested blocks.
-static CaptureResult propagateCapture(Sema &S, unsigned ValidScopeIndex,
- const BlockDecl::Capture &Capture) {
- VarDecl *var = Capture.getVariable();
-
- // Update all the inner blocks with the capture information.
- for (unsigned i = ValidScopeIndex + 1, e = S.FunctionScopes.size();
- i != e; ++i) {
- BlockScopeInfo *innerBlock = cast<BlockScopeInfo>(S.FunctionScopes[i]);
- innerBlock->Captures.push_back(
- BlockDecl::Capture(Capture.getVariable(), Capture.isByRef(),
- /*nested*/ true, Capture.getCopyExpr()));
- innerBlock->CaptureMap[var] = innerBlock->Captures.size(); // +1
- }
-
- return Capture.isByRef() ? CR_CaptureByRef : CR_Capture;
-}
-
-/// shouldCaptureValueReference - Determine if a reference to the
-/// given value in the current context requires a variable capture.
-///
-/// This also keeps the captures set in the BlockScopeInfo records
-/// up-to-date.
-static CaptureResult shouldCaptureValueReference(Sema &S, SourceLocation loc,
- ValueDecl *Value) {
- // Only variables ever require capture.
- VarDecl *var = dyn_cast<VarDecl>(Value);
- if (!var) return CR_NoCapture;
-
- // Fast path: variables from the current context never require capture.
- DeclContext *DC = S.CurContext;
- if (var->getDeclContext() == DC) return CR_NoCapture;
-
- // Only variables with local storage require capture.
- // FIXME: What about 'const' variables in C++?
- if (!var->hasLocalStorage()) return CR_NoCapture;
-
- // Otherwise, we need to capture.
-
- unsigned functionScopesIndex = S.FunctionScopes.size() - 1;
- do {
- // Only blocks (and eventually C++0x closures) can capture; other
- // scopes don't work.
- if (!isa<BlockDecl>(DC))
- return diagnoseUncapturableValueReference(S, loc, var, DC);
-
- BlockScopeInfo *blockScope =
- cast<BlockScopeInfo>(S.FunctionScopes[functionScopesIndex]);
- assert(blockScope->TheDecl == static_cast<BlockDecl*>(DC));
-
- // Check whether we've already captured it in this block. If so,
- // we're done.
- if (unsigned indexPlus1 = blockScope->CaptureMap[var])
- return propagateCapture(S, functionScopesIndex,
- blockScope->Captures[indexPlus1 - 1]);
-
- functionScopesIndex--;
- DC = cast<BlockDecl>(DC)->getDeclContext();
- } while (var->getDeclContext() != DC);
-
- // Okay, we descended all the way to the block that defines the variable.
- // Actually try to capture it.
- QualType type = var->getType();
-
- // Prohibit variably-modified types.
- if (type->isVariablyModifiedType()) {
- S.Diag(loc, diag::err_ref_vm_type);
- S.Diag(var->getLocation(), diag::note_declared_at);
- return CR_Error;
- }
-
- // Prohibit arrays, even in __block variables, but not references to
- // them.
- if (type->isArrayType()) {
- S.Diag(loc, diag::err_ref_array_type);
- S.Diag(var->getLocation(), diag::note_declared_at);
- return CR_Error;
- }
-
- S.MarkDeclarationReferenced(loc, var);
-
- // The BlocksAttr indicates the variable is bound by-reference.
- bool byRef = var->hasAttr<BlocksAttr>();
-
- // Build a copy expression.
- Expr *copyExpr = 0;
- const RecordType *rtype;
- if (!byRef && S.getLangOptions().CPlusPlus && !type->isDependentType() &&
- (rtype = type->getAs<RecordType>())) {
-
- // The capture logic needs the destructor, so make sure we mark it.
- // Usually this is unnecessary because most local variables have
- // their destructors marked at declaration time, but parameters are
- // an exception because it's technically only the call site that
- // actually requires the destructor.
- if (isa<ParmVarDecl>(var))
- S.FinalizeVarWithDestructor(var, rtype);
-
- // According to the blocks spec, the capture of a variable from
- // the stack requires a const copy constructor. This is not true
- // of the copy/move done to move a __block variable to the heap.
- type.addConst();
-
- Expr *declRef = new (S.Context) DeclRefExpr(var, type, VK_LValue, loc);
- ExprResult result =
- S.PerformCopyInitialization(
- InitializedEntity::InitializeBlock(var->getLocation(),
- type, false),
- loc, S.Owned(declRef));
-
- // Build a full-expression copy expression if initialization
- // succeeded and used a non-trivial constructor. Recover from
- // errors by pretending that the copy isn't necessary.
- if (!result.isInvalid() &&
- !cast<CXXConstructExpr>(result.get())->getConstructor()->isTrivial()) {
- result = S.MaybeCreateExprWithCleanups(result);
- copyExpr = result.take();
- }
- }
-
- // We're currently at the declarer; go back to the closure.
- functionScopesIndex++;
- BlockScopeInfo *blockScope =
- cast<BlockScopeInfo>(S.FunctionScopes[functionScopesIndex]);
-
- // Build a valid capture in this scope.
- blockScope->Captures.push_back(
- BlockDecl::Capture(var, byRef, /*nested*/ false, copyExpr));
- blockScope->CaptureMap[var] = blockScope->Captures.size(); // +1
-
- // Propagate that to inner captures if necessary.
- return propagateCapture(S, functionScopesIndex,
- blockScope->Captures.back());
-}
-
-static ExprResult BuildBlockDeclRefExpr(Sema &S, ValueDecl *VD,
- const DeclarationNameInfo &NameInfo,
- bool ByRef) {
- assert(isa<VarDecl>(VD) && "capturing non-variable");
-
- VarDecl *var = cast<VarDecl>(VD);
- assert(var->hasLocalStorage() && "capturing non-local");
- assert(ByRef == var->hasAttr<BlocksAttr>() && "byref set wrong");
-
- QualType exprType = var->getType().getNonReferenceType();
-
- BlockDeclRefExpr *BDRE;
- if (!ByRef) {
- // The variable will be bound by copy; make it const within the
- // closure, but record that this was done in the expression.
- bool constAdded = !exprType.isConstQualified();
- exprType.addConst();
-
- BDRE = new (S.Context) BlockDeclRefExpr(var, exprType, VK_LValue,
- NameInfo.getLoc(), false,
- constAdded);
- } else {
- BDRE = new (S.Context) BlockDeclRefExpr(var, exprType, VK_LValue,
- NameInfo.getLoc(), true);
- }
-
- return S.Owned(BDRE);
+ StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(),
+ Kind, Literal.Pascal, StrTy,
+ &StringTokLocs[0],
+ StringTokLocs.size());
+ if (Literal.getUDSuffix().empty())
+ return Owned(Lit);
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl));
+
+ // C++11 [lex.ext]p5: The literal L is treated as a call of the form
+ // operator "" X (str, len)
+ QualType SizeType = Context.getSizeType();
+ llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars());
+ IntegerLiteral *LenArg = IntegerLiteral::Create(Context, Len, SizeType,
+ StringTokLocs[0]);
+ Expr *Args[] = { Lit, LenArg };
+ return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc,
+ Args, StringTokLocs.back());
}
ExprResult
@@ -1407,7 +1279,7 @@ ExprResult
Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS) {
- if (getLangOptions().CUDA)
+ if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) {
CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
@@ -1421,12 +1293,18 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
}
}
- MarkDeclarationReferenced(NameInfo.getLoc(), D);
+ bool refersToEnclosingScope =
+ (CurContext != D->getDeclContext() &&
+ D->getDeclContext()->isFunctionOrMethod());
+
+ DeclRefExpr *E = DeclRefExpr::Create(Context,
+ SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ SourceLocation(),
+ D, refersToEnclosingScope,
+ NameInfo, Ty, VK);
- Expr *E = DeclRefExpr::Create(Context,
- SS? SS->getWithLocInContext(Context)
- : NestedNameSpecifierLoc(),
- D, NameInfo, Ty, VK);
+ MarkDeclRefReferenced(E);
// Just in case we're building an illegal pointer-to-member.
FieldDecl *FD = dyn_cast<FieldDecl>(D);
@@ -1474,9 +1352,9 @@ Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
///
/// \return false if new lookup candidates were found
bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
- CorrectTypoContext CTC,
+ CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs) {
+ llvm::ArrayRef<Expr *> Args) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@@ -1492,8 +1370,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
- for (DeclContext *DC = SS.isEmpty() ? CurContext : 0;
- DC; DC = DC->getParent()) {
+ DeclContext *DC = SS.isEmpty() ? CurContext : 0;
+ while (DC) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@@ -1501,10 +1379,17 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// Don't give errors about ambiguities in this lookup.
R.suppressDiagnostics();
+ // During a default argument instantiation the CurContext points
+ // to a CXXMethodDecl; but we can't apply a this-> fixit inside a
+ // function parameter list, hence add an explicit check.
+ bool isDefaultArgument = !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back().Kind ==
+ ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
bool isInstance = CurMethod &&
CurMethod->isInstance() &&
- DC == CurMethod->getParent();
+ DC == CurMethod->getParent() && !isDefaultArgument;
+
// Give a code modification hint to insert 'this->'.
// TODO: fixit for inserting 'Base<T>::' in the other cases.
@@ -1515,11 +1400,12 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CXXMethodDecl *DepMethod = cast_or_null<CXXMethodDecl>(
CurMethod->getInstantiatedFromMemberFunction());
if (DepMethod) {
- if (getLangOptions().MicrosoftExt)
+ if (getLangOpts().MicrosoftMode)
diagnostic = diag::warn_found_via_dependent_bases_lookup;
Diag(R.getNameLoc(), diagnostic) << Name
<< FixItHint::CreateInsertion(R.getNameLoc(), "this->");
QualType DepThisType = DepMethod->getThisType(Context);
+ CheckCXXThisCapture(R.getNameLoc());
CXXThisExpr *DepThis = new (Context) CXXThisExpr(
R.getNameLoc(), DepThisType, false);
TemplateArgumentListInfo TList;
@@ -1531,7 +1417,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CXXDependentScopeMemberExpr *DepExpr =
CXXDependentScopeMemberExpr::Create(
Context, DepThis, DepThisType, true, SourceLocation(),
- SS.getWithLocInContext(Context), NULL,
+ SS.getWithLocInContext(Context),
+ ULE->getTemplateKeywordLoc(), 0,
R.getLookupNameInfo(),
ULE->hasExplicitTemplateArgs() ? &TList : 0);
CallsUndergoingInstantiation.back()->setCallee(DepExpr);
@@ -1541,6 +1428,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
Diag(R.getNameLoc(), diagnostic) << Name;
}
} else {
+ if (getLangOpts().MicrosoftMode)
+ diagnostic = diag::warn_found_via_dependent_bases_lookup;
Diag(R.getNameLoc(), diagnostic) << Name;
}
@@ -1548,20 +1437,40 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
Diag((*I)->getLocation(), diag::note_dependent_var_use);
+ // Return true if we are inside a default argument instantiation
+ // and the found name refers to an instance member function, otherwise
+ // the function calling DiagnoseEmptyLookup will try to create an
+ // implicit member call and this is wrong for default argument.
+ if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
+ Diag(R.getNameLoc(), diag::err_member_call_without_object);
+ return true;
+ }
+
// Tell the callee to try to recover.
return false;
}
R.clear();
}
+
+ // In Microsoft mode, if we are performing lookup from within a friend
+ // function definition declared at class scope then we must set
+ // DC to the lexical parent to be able to search into the parent
+ // class.
+ if (getLangOpts().MicrosoftMode && isa<FunctionDecl>(DC) &&
+ cast<FunctionDecl>(DC)->getFriendObjectKind() &&
+ DC->getLexicalParent()->isRecord())
+ DC = DC->getLexicalParent();
+ else
+ DC = DC->getParent();
}
// We didn't find anything, so try to correct for a typo.
TypoCorrection Corrected;
if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
- S, &SS, NULL, false, CTC))) {
- std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ S, &SS, CCC))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
R.setLookupName(Corrected.getCorrection());
if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
@@ -1575,11 +1484,11 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
dyn_cast<FunctionTemplateDecl>(*CD))
AddTemplateOverloadCandidate(
FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs,
- Args, NumArgs, OCS);
+ Args, OCS);
else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD))
if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0)
AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none),
- Args, NumArgs, OCS);
+ Args, OCS);
}
switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) {
case OR_Success:
@@ -1654,9 +1563,11 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
ExprResult Sema::ActOnIdExpression(Scope *S,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool HasTrailingLParen,
- bool IsAddressOfOperand) {
+ bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC) {
assert(!(IsAddressOfOperand && HasTrailingLParen) &&
"cannot be direct & operand and have a trailing lparen");
@@ -1699,10 +1610,9 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
}
if (DependentID)
- return ActOnDependentIdExpression(SS, NameInfo, IsAddressOfOperand,
- TemplateArgs);
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
- bool IvarLookupFollowUp = false;
// Perform the required lookup.
LookupResult R(*this, NameInfo,
(Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam)
@@ -1719,18 +1629,18 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
if (MemberOfUnknownSpecialization ||
(R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
- return ActOnDependentIdExpression(SS, NameInfo, IsAddressOfOperand,
- TemplateArgs);
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
} else {
- IvarLookupFollowUp = (!SS.isSet() && II && getCurMethodDecl());
+ bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl();
LookupParsedName(R, S, &SS, !IvarLookupFollowUp);
// If the result might be in a dependent base class, this is a dependent
// id-expression.
if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
- return ActOnDependentIdExpression(SS, NameInfo, IsAddressOfOperand,
- TemplateArgs);
-
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
// If this reference is in an Objective-C method, then we need to do
// some special Objective-C lookup, too.
if (IvarLookupFollowUp) {
@@ -1740,9 +1650,6 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
if (Expr *Ex = E.takeAs<Expr>())
return Owned(Ex);
-
- // for further use, this must be set to false if in class method.
- IvarLookupFollowUp = getCurMethodDecl()->isInstanceMethod();
}
}
@@ -1756,7 +1663,7 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
if (R.empty() && !ADL) {
// Otherwise, this could be an implicitly declared function reference (legal
// in C90, extension in C99, forbidden in C++).
- if (HasTrailingLParen && II && !getLangOptions().CPlusPlus) {
+ if (HasTrailingLParen && II && !getLangOpts().CPlusPlus) {
NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S);
if (D) R.addDecl(D);
}
@@ -1769,12 +1676,13 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
// and we can't resolve an identifier then assume the identifier is type
// dependent. The goal is to postpone name lookup to instantiation time
// to be able to search into type dependent base classes.
- if (getLangOptions().MicrosoftMode && CurContext->isDependentContext() &&
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
isa<CXXMethodDecl>(CurContext))
- return ActOnDependentIdExpression(SS, NameInfo, IsAddressOfOperand,
- TemplateArgs);
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
- if (DiagnoseEmptyLookup(S, SS, R, CTC_Unknown))
+ CorrectionCandidateCallback DefaultValidator;
+ if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator))
return ExprError();
assert(!R.empty() &&
@@ -1837,11 +1745,12 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
isa<IndirectFieldDecl>(R.getFoundDecl());
if (MightBeImplicitMember)
- return BuildPossibleImplicitMemberExpr(SS, R, TemplateArgs);
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
+ R, TemplateArgs);
}
- if (TemplateArgs)
- return BuildTemplateIdExpr(SS, R, ADL, *TemplateArgs);
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, ADL, TemplateArgs);
return BuildDeclarationNameExpr(SS, R, ADL);
}
@@ -1855,7 +1764,8 @@ Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo) {
DeclContext *DC;
if (!(DC = computeDeclContext(SS, false)) || DC->isDependentContext())
- return BuildDependentDeclRefExpr(SS, NameInfo, 0);
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/0);
if (RequireCompleteDeclContext(SS, DC))
return ExprError();
@@ -1912,7 +1822,8 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
if (LookForIvars) {
IFace = CurMethod->getClassInterface();
ObjCInterfaceDecl *ClassDeclared;
- if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
+ ObjCIvarDecl *IV = 0;
+ if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
// Diagnose using an ivar in a class method.
if (IsClassMethod)
return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
@@ -1929,7 +1840,8 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
// Diagnose the use of an ivar outside of the declaring class.
if (IV->getAccessControl() == ObjCIvarDecl::Private &&
- ClassDeclared != IFace)
+ !declaresSameEntity(ClassDeclared, IFace) &&
+ !getLangOpts().DebuggerSupport)
Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName();
// FIXME: This should use a new expr for a direct reference, don't
@@ -1939,7 +1851,8 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
SelfName.setIdentifier(&II, SourceLocation());
SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam);
CXXScopeSpec SelfScopeSpec;
- ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec,
+ SourceLocation TemplateKWLoc;
+ ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc,
SelfName, false, false);
if (SelfExpr.isInvalid())
return ExprError();
@@ -1948,26 +1861,33 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
if (SelfExpr.isInvalid())
return ExprError();
- MarkDeclarationReferenced(Loc, IV);
+ MarkAnyDeclReferenced(Loc, IV);
return Owned(new (Context)
ObjCIvarRefExpr(IV, IV->getType(), Loc,
SelfExpr.take(), true, true));
}
} else if (CurMethod->isInstanceMethod()) {
// We should warn if a local variable hides an ivar.
- ObjCInterfaceDecl *IFace = CurMethod->getClassInterface();
- ObjCInterfaceDecl *ClassDeclared;
- if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
- if (IV->getAccessControl() != ObjCIvarDecl::Private ||
- IFace == ClassDeclared)
- Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
+ if (IV->getAccessControl() != ObjCIvarDecl::Private ||
+ declaresSameEntity(IFace, ClassDeclared))
+ Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ }
}
+ } else if (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
+ // If accessing a stand-alone ivar in a class method, this is an error.
+ if (const ObjCIvarDecl *IV = dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl()))
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
}
if (Lookup.empty() && II && AllowBuiltinCreation) {
// FIXME. Consolidate this with similar code in LookupName.
if (unsigned BuiltinID = II->getBuiltinID()) {
- if (!(getLangOptions().CPlusPlus &&
+ if (!(getLangOpts().CPlusPlus &&
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))) {
NamedDecl *D = LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID,
S, Lookup.isForRedeclaration(),
@@ -2159,7 +2079,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
return false;
// Only in C++ or ObjC++.
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return false;
// Turn off ADL when we find certain kinds of declarations during
@@ -2308,30 +2228,7 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
indirectField);
- // If the identifier reference is inside a block, and it refers to a value
- // that is outside the block, create a BlockDeclRefExpr instead of a
- // DeclRefExpr. This ensures the value is treated as a copy-in snapshot when
- // the block is formed.
- //
- // We do not do this for things like enum constants, global variables, etc,
- // as they do not get snapshotted.
- //
- switch (shouldCaptureValueReference(*this, NameInfo.getLoc(), VD)) {
- case CR_Error:
- return ExprError();
-
- case CR_Capture:
- assert(!SS.isSet() && "referenced local variable with scope specifier?");
- return BuildBlockDeclRefExpr(*this, VD, NameInfo, /*byref*/ false);
-
- case CR_CaptureByRef:
- assert(!SS.isSet() && "referenced local variable with scope specifier?");
- return BuildBlockDeclRefExpr(*this, VD, NameInfo, /*byref*/ true);
-
- case CR_NoCapture: {
- // If this reference is not in a block or if the referenced
- // variable is within the block, create a normal DeclRefExpr.
-
+ {
QualType type = VD->getType();
ExprValueKind valueKind = VK_RValue;
@@ -2343,13 +2240,11 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
case Decl::type:
#include "clang/AST/DeclNodes.inc"
llvm_unreachable("invalid value decl kind");
- return ExprError();
// These shouldn't make it here.
case Decl::ObjCAtDefsField:
case Decl::ObjCIvar:
llvm_unreachable("forming non-member reference to ivar?");
- return ExprError();
// Enum constants are always r-values and never references.
// Unresolved using declarations are dependent.
@@ -2364,7 +2259,7 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// exist in the high-level semantics.
case Decl::Field:
case Decl::IndirectField:
- assert(getLangOptions().CPlusPlus &&
+ assert(getLangOpts().CPlusPlus &&
"building reference to field in C?");
// These can't have reference type in well-formed programs, but
@@ -2391,7 +2286,7 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
case Decl::Var:
// In C, "extern void blah;" is valid and is an r-value.
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
!type.hasQualifiers() &&
type->isVoidType()) {
valueKind = VK_RValue;
@@ -2400,12 +2295,23 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// fallthrough
case Decl::ImplicitParam:
- case Decl::ParmVar:
+ case Decl::ParmVar: {
// These are always l-values.
valueKind = VK_LValue;
type = type.getNonReferenceType();
- break;
+ // FIXME: Does the addition of const really only apply in
+ // potentially-evaluated contexts? Since the variable isn't actually
+ // captured in an unevaluated context, it seems that the answer is no.
+ if (ExprEvalContexts.back().Context != Sema::Unevaluated) {
+ QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
+ if (!CapturedType.isNull())
+ type = CapturedType;
+ }
+
+ break;
+ }
+
case Decl::Function: {
const FunctionType *fty = type->castAs<FunctionType>();
@@ -2418,7 +2324,7 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
}
// Functions are l-values in C++.
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
valueKind = VK_LValue;
break;
}
@@ -2466,11 +2372,6 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS);
}
-
- }
-
- llvm_unreachable("unknown capture result");
- return ExprError();
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
@@ -2507,8 +2408,8 @@ ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
return Owned(new (Context) PredefinedExpr(Loc, ResTy, IT));
}
-ExprResult Sema::ActOnCharacterConstant(const Token &Tok) {
- llvm::SmallString<16> CharBuffer;
+ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
+ SmallString<16> CharBuffer;
bool Invalid = false;
StringRef ThisTok = PP.getSpelling(Tok, CharBuffer, &Invalid);
if (Invalid)
@@ -2520,16 +2421,14 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok) {
return ExprError();
QualType Ty;
- if (!getLangOptions().CPlusPlus)
- Ty = Context.IntTy; // 'x' and L'x' -> int in C.
- else if (Literal.isWide())
- Ty = Context.WCharTy; // L'x' -> wchar_t in C++.
+ if (Literal.isWide())
+ Ty = Context.WCharTy; // L'x' -> wchar_t in C and C++.
else if (Literal.isUTF16())
- Ty = Context.Char16Ty; // u'x' -> char16_t in C++0x.
+ Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11.
else if (Literal.isUTF32())
- Ty = Context.Char32Ty; // U'x' -> char32_t in C++0x.
- else if (Literal.isMultiChar())
- Ty = Context.IntTy; // 'wxyz' -> int in C++.
+ Ty = Context.Char32Ty; // U'x' -> char32_t in C11 and C++11.
+ else if (!getLangOpts().CPlusPlus || Literal.isMultiChar())
+ Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++.
else
Ty = Context.CharTy; // 'x' -> char in C++
@@ -2541,21 +2440,75 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok) {
else if (Literal.isUTF32())
Kind = CharacterLiteral::UTF32;
- return Owned(new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty,
- Tok.getLocation()));
+ Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty,
+ Tok.getLocation());
+
+ if (Literal.getUDSuffix().empty())
+ return Owned(Lit);
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_character_udl));
+
+ // C++11 [lex.ext]p6: The literal L is treated as a call of the form
+ // operator "" X (ch)
+ return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc,
+ llvm::makeArrayRef(&Lit, 1),
+ Tok.getLocation());
}
-ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
+ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) {
+ unsigned IntSize = Context.getTargetInfo().getIntWidth();
+ return Owned(IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val),
+ Context.IntTy, Loc));
+}
+
+static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
+ QualType Ty, SourceLocation Loc) {
+ const llvm::fltSemantics &Format = S.Context.getFloatTypeSemantics(Ty);
+
+ using llvm::APFloat;
+ APFloat Val(Format);
+
+ APFloat::opStatus result = Literal.GetFloatValue(Val);
+
+ // Overflow is always an error, but underflow is only an error if
+ // we underflowed to zero (APFloat reports denormals as underflow).
+ if ((result & APFloat::opOverflow) ||
+ ((result & APFloat::opUnderflow) && Val.isZero())) {
+ unsigned diagnostic;
+ SmallString<20> buffer;
+ if (result & APFloat::opOverflow) {
+ diagnostic = diag::warn_float_overflow;
+ APFloat::getLargest(Format).toString(buffer);
+ } else {
+ diagnostic = diag::warn_float_underflow;
+ APFloat::getSmallest(Format).toString(buffer);
+ }
+
+ S.Diag(Loc, diagnostic)
+ << Ty
+ << StringRef(buffer.data(), buffer.size());
+ }
+
+ bool isExact = (result == APFloat::opOK);
+ return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc);
+}
+
+ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// Fast path for a single digit (which is quite common). A single digit
- // cannot have a trigraph, escaped newline, radix prefix, or type suffix.
+ // cannot have a trigraph, escaped newline, radix prefix, or suffix.
if (Tok.getLength() == 1) {
const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
- unsigned IntSize = Context.getTargetInfo().getIntWidth();
- return Owned(IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val-'0'),
- Context.IntTy, Tok.getLocation()));
+ return ActOnIntegerConstant(Tok.getLocation(), Val-'0');
}
- llvm::SmallString<512> IntegerBuffer;
+ SmallString<512> IntegerBuffer;
// Add padding so that NumericLiteralParser can overread by one character.
IntegerBuffer.resize(Tok.getLength()+1);
const char *ThisTokBegin = &IntegerBuffer[0];
@@ -2571,6 +2524,96 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
if (Literal.hadError)
return ExprError();
+ if (Literal.hasUDSuffix()) {
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_numeric_udl));
+
+ QualType CookedTy;
+ if (Literal.isFloatingLiteral()) {
+ // C++11 [lex.ext]p4: If S contains a literal operator with parameter type
+ // long double, the literal is treated as a call of the form
+ // operator "" X (f L)
+ CookedTy = Context.LongDoubleTy;
+ } else {
+ // C++11 [lex.ext]p3: If S contains a literal operator with parameter type
+ // unsigned long long, the literal is treated as a call of the form
+ // operator "" X (n ULL)
+ CookedTy = Context.UnsignedLongLongTy;
+ }
+
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ // Perform literal operator lookup to determine if we're building a raw
+ // literal or a cooked one.
+ LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
+ switch (LookupLiteralOperator(UDLScope, R, llvm::makeArrayRef(&CookedTy, 1),
+ /*AllowRawAndTemplate*/true)) {
+ case LOLR_Error:
+ return ExprError();
+
+ case LOLR_Cooked: {
+ Expr *Lit;
+ if (Literal.isFloatingLiteral()) {
+ Lit = BuildFloatingLiteral(*this, Literal, CookedTy, Tok.getLocation());
+ } else {
+ llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0);
+ if (Literal.GetIntegerValue(ResultVal))
+ Diag(Tok.getLocation(), diag::warn_integer_too_large);
+ Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy,
+ Tok.getLocation());
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo,
+ llvm::makeArrayRef(&Lit, 1),
+ Tok.getLocation());
+ }
+
+ case LOLR_Raw: {
+ // C++11 [lit.ext]p3, p4: If S contains a raw literal operator, the
+ // literal is treated as a call of the form
+ // operator "" X ("n")
+ SourceLocation TokLoc = Tok.getLocation();
+ unsigned Length = Literal.getUDSuffixOffset();
+ QualType StrTy = Context.getConstantArrayType(
+ Context.CharTy, llvm::APInt(32, Length + 1),
+ ArrayType::Normal, 0);
+ Expr *Lit = StringLiteral::Create(
+ Context, StringRef(ThisTokBegin, Length), StringLiteral::Ascii,
+ /*Pascal*/false, StrTy, &TokLoc, 1);
+ return BuildLiteralOperatorCall(R, OpNameInfo,
+ llvm::makeArrayRef(&Lit, 1), TokLoc);
+ }
+
+ case LOLR_Template:
+ // C++11 [lit.ext]p3, p4: Otherwise (S contains a literal operator
+ // template), L is treated as a call fo the form
+ // operator "" X <'c1', 'c2', ... 'ck'>()
+ // where n is the source character sequence c1 c2 ... ck.
+ TemplateArgumentListInfo ExplicitArgs;
+ unsigned CharBits = Context.getIntWidth(Context.CharTy);
+ bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType();
+ llvm::APSInt Value(CharBits, CharIsUnsigned);
+ for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) {
+ Value = ThisTokBegin[I];
+ TemplateArgument Arg(Value, Context.CharTy);
+ TemplateArgumentLocInfo ArgInfo;
+ ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo, ArrayRef<Expr*>(),
+ Tok.getLocation(), &ExplicitArgs);
+ }
+
+ llvm_unreachable("unexpected literal operator lookup result");
+ }
+
Expr *Res;
if (Literal.isFloatingLiteral()) {
@@ -2582,39 +2625,12 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
else
Ty = Context.LongDoubleTy;
- const llvm::fltSemantics &Format = Context.getFloatTypeSemantics(Ty);
-
- using llvm::APFloat;
- APFloat Val(Format);
-
- APFloat::opStatus result = Literal.GetFloatValue(Val);
-
- // Overflow is always an error, but underflow is only an error if
- // we underflowed to zero (APFloat reports denormals as underflow).
- if ((result & APFloat::opOverflow) ||
- ((result & APFloat::opUnderflow) && Val.isZero())) {
- unsigned diagnostic;
- llvm::SmallString<20> buffer;
- if (result & APFloat::opOverflow) {
- diagnostic = diag::warn_float_overflow;
- APFloat::getLargest(Format).toString(buffer);
- } else {
- diagnostic = diag::warn_float_underflow;
- APFloat::getSmallest(Format).toString(buffer);
- }
-
- Diag(Tok.getLocation(), diagnostic)
- << Ty
- << StringRef(buffer.data(), buffer.size());
- }
-
- bool isExact = (result == APFloat::opOK);
- Res = FloatingLiteral::Create(Context, Val, isExact, Ty, Tok.getLocation());
+ Res = BuildFloatingLiteral(*this, Literal, Ty, Tok.getLocation());
if (Ty == Context.DoubleTy) {
- if (getLangOptions().SinglePrecisionConstants) {
+ if (getLangOpts().SinglePrecisionConstants) {
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).take();
- } else if (getLangOptions().OpenCL && !getOpenCLOptions().cl_khr_fp64) {
+ } else if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp64) {
Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64);
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).take();
}
@@ -2625,9 +2641,10 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
QualType Ty;
// long long is a C99 feature.
- if (!getLangOptions().C99 && !getLangOptions().CPlusPlus0x &&
- Literal.isLongLong)
- Diag(Tok.getLocation(), diag::ext_longlong);
+ if (!getLangOpts().C99 && Literal.isLongLong)
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
// Get the value in the widest-possible width.
llvm::APInt ResultVal(Context.getTargetInfo().getIntMaxTWidth(), 0);
@@ -2688,7 +2705,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok) {
// To be compatible with MSVC, hex integer literals ending with the
// LL or i64 suffix are always signed in Microsoft mode.
if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 ||
- (getLangOptions().MicrosoftExt && Literal.isLongLong)))
+ (getLangOpts().MicrosoftExt && Literal.isLongLong)))
Ty = Context.LongLongTy;
else if (AllowUnsigned)
Ty = Context.UnsignedLongLongTy;
@@ -2971,6 +2988,12 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
if (isInvalid)
return ExprError();
+ if (ExprKind == UETT_SizeOf && E->getType()->isVariableArrayType()) {
+ PE = TranformToPotentiallyEvaluated(E);
+ if (PE.isInvalid()) return ExprError();
+ E = PE.take();
+ }
+
// C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
return Owned(new (Context) UnaryExprOrTypeTraitExpr(
ExprKind, E, Context.getSizeType(), OpLoc,
@@ -3044,6 +3067,11 @@ Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
case tok::minusminus: Opc = UO_PostDec; break;
}
+ // Since this might is a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.take();
+
return BuildUnaryOp(S, OpLoc, Opc, Input);
}
@@ -3057,7 +3085,7 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *LHSExp = Base, *RHSExp = Idx;
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
(LHSExp->isTypeDependent() || RHSExp->isTypeDependent())) {
return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
Context.DependentTy,
@@ -3065,11 +3093,12 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
RLoc));
}
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
(LHSExp->getType()->isRecordType() ||
LHSExp->getType()->isEnumeralType() ||
RHSExp->getType()->isRecordType() ||
- RHSExp->getType()->isEnumeralType())) {
+ RHSExp->getType()->isEnumeralType()) &&
+ !LHSExp->getType()->isObjCObjectPointerType()) {
return CreateOverloadedArraySubscriptExpr(LLoc, RLoc, Base, Idx);
}
@@ -3113,17 +3142,20 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
BaseExpr = LHSExp;
IndexExpr = RHSExp;
ResultType = PTy->getPointeeType();
+ } else if (const ObjCObjectPointerType *PTy =
+ LHSTy->getAs<ObjCObjectPointerType>()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ Result = BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, 0, 0);
+ if (!Result.isInvalid())
+ return Owned(Result.take());
+ ResultType = PTy->getPointeeType();
} else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) {
// Handle the uncommon case of "123[Ptr]".
BaseExpr = RHSExp;
IndexExpr = LHSExp;
ResultType = PTy->getPointeeType();
} else if (const ObjCObjectPointerType *PTy =
- LHSTy->getAs<ObjCObjectPointerType>()) {
- BaseExpr = LHSExp;
- IndexExpr = RHSExp;
- ResultType = PTy->getPointeeType();
- } else if (const ObjCObjectPointerType *PTy =
RHSTy->getAs<ObjCObjectPointerType>()) {
// Handle the uncommon case of "123[Ptr]".
BaseExpr = RHSExp;
@@ -3188,7 +3220,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
return ExprError();
}
- if (ResultType->isVoidType() && !getLangOptions().CPlusPlus) {
+ if (ResultType->isVoidType() && !getLangOpts().CPlusPlus) {
// GNU extension: subscripting on pointer to void
Diag(LLoc, diag::ext_gnu_subscript_void_type)
<< BaseExpr->getSourceRange();
@@ -3247,6 +3279,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// the semantic constraints are checked, at the point where the
// default argument expression appears.
ContextRAII SavedContext(*this, FD);
+ LocalInstantiationScope Local(*this);
Result = SubstExpr(UninstExpr, ArgList);
}
if (Result.isInvalid())
@@ -3257,7 +3290,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
= InitializedEntity::InitializeParameter(Context, Param);
InitializationKind Kind
= InitializationKind::CreateCopy(Param->getLocation(),
- /*FIXME:EqualLoc*/UninstExpr->getSourceRange().getBegin());
+ /*FIXME:EqualLoc*/UninstExpr->getLocStart());
Expr *ResultE = Result.takeAs<Expr>();
InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1);
@@ -3276,18 +3309,25 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// be properly destroyed.
// FIXME: We should really be rebuilding the default argument with new
// bound temporaries; see the comment in PR5810.
- for (unsigned i = 0, e = Param->getNumDefaultArgTemporaries(); i != e; ++i) {
- CXXTemporary *Temporary = Param->getDefaultArgTemporary(i);
- MarkDeclarationReferenced(Param->getDefaultArg()->getLocStart(),
- const_cast<CXXDestructorDecl*>(Temporary->getDestructor()));
- ExprTemporaries.push_back(Temporary);
+ // We don't need to do that with block decls, though, because
+ // blocks in default argument expression can never capture anything.
+ if (isa<ExprWithCleanups>(Param->getInit())) {
+ // Set the "needs cleanups" bit regardless of whether there are
+ // any explicit objects.
ExprNeedsCleanups = true;
+
+ // Append all the objects to the cleanup list. Right now, this
+ // should always be a no-op, because blocks in default argument
+ // expressions should never be able to capture anything.
+ assert(!cast<ExprWithCleanups>(Param->getInit())->getNumObjects() &&
+ "default argument expression has capturing blocks?");
}
// We already type-checked the argument, so we know it works.
// Just mark all of the declarations in this potentially-evaluated expression
// as being "referenced".
- MarkDeclarationsReferencedInExpr(Param->getDefaultArg());
+ MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
+ /*SkipLocalVariables=*/true);
return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param));
}
@@ -3371,7 +3411,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
CallType = VariadicBlock; // Block
else if (isa<MemberExpr>(Fn))
CallType = VariadicMethod;
- Invalid = GatherArgumentsForCall(Call->getSourceRange().getBegin(), FDecl,
+ Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl,
Proto, 0, Args, NumArgs, AllArgs, CallType);
if (Invalid)
return true;
@@ -3388,7 +3428,8 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
unsigned FirstProtoArg,
Expr **Args, unsigned NumArgs,
SmallVector<Expr *, 8> &AllArgs,
- VariadicCallType CallType) {
+ VariadicCallType CallType,
+ bool AllowExplicit) {
unsigned NumArgsInProto = Proto->getNumArgs();
unsigned NumArgsToCheck = NumArgs;
bool Invalid = false;
@@ -3401,33 +3442,42 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
QualType ProtoArgType = Proto->getArgType(i);
Expr *Arg;
+ ParmVarDecl *Param;
if (ArgIx < NumArgs) {
Arg = Args[ArgIx++];
- if (RequireCompleteType(Arg->getSourceRange().getBegin(),
+ if (RequireCompleteType(Arg->getLocStart(),
ProtoArgType,
PDiag(diag::err_call_incomplete_argument)
<< Arg->getSourceRange()))
return true;
// Pass the argument
- ParmVarDecl *Param = 0;
+ Param = 0;
if (FDecl && i < FDecl->getNumParams())
Param = FDecl->getParamDecl(i);
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ if (Arg->getType() == Context.ARCUnbridgedCastTy &&
+ FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
+ (!Param || !Param->hasAttr<CFConsumedAttr>()))
+ Arg = stripARCUnbridgedCast(Arg);
+
InitializedEntity Entity =
Param? InitializedEntity::InitializeParameter(Context, Param)
: InitializedEntity::InitializeParameter(Context, ProtoArgType,
Proto->isArgConsumed(i));
ExprResult ArgE = PerformCopyInitialization(Entity,
SourceLocation(),
- Owned(Arg));
+ Owned(Arg),
+ /*TopLevelOfInitList=*/false,
+ AllowExplicit);
if (ArgE.isInvalid())
return true;
Arg = ArgE.takeAs<Expr>();
} else {
- ParmVarDecl *Param = FDecl->getParamDecl(i);
+ Param = FDecl->getParamDecl(i);
ExprResult ArgExpr =
BuildCXXDefaultArgExpr(CallLoc, FDecl, Param);
@@ -3442,6 +3492,9 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
// with its own checking, such as a BinaryOperator.
CheckArrayAccess(Arg);
+ // Check for violations of C99 static array rules (C99 6.7.5.3p7).
+ CheckStaticArrayArgument(CallLoc, Param, Arg);
+
AllArgs.push_back(Arg);
}
@@ -3479,6 +3532,60 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
return Invalid;
}
+static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) {
+ TypeLoc TL = PVD->getTypeSourceInfo()->getTypeLoc();
+ if (ArrayTypeLoc *ATL = dyn_cast<ArrayTypeLoc>(&TL))
+ S.Diag(PVD->getLocation(), diag::note_callee_static_array)
+ << ATL->getLocalSourceRange();
+}
+
+/// CheckStaticArrayArgument - If the given argument corresponds to a static
+/// array parameter, check that it is non-null, and that if it is formed by
+/// array-to-pointer decay, the underlying array is sufficiently large.
+///
+/// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the
+/// array type derivation, then for each call to the function, the value of the
+/// corresponding actual argument shall provide access to the first element of
+/// an array with at least as many elements as specified by the size expression.
+void
+Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
+ ParmVarDecl *Param,
+ const Expr *ArgExpr) {
+ // Static array parameters are not supported in C++.
+ if (!Param || getLangOpts().CPlusPlus)
+ return;
+
+ QualType OrigTy = Param->getOriginalType();
+
+ const ArrayType *AT = Context.getAsArrayType(OrigTy);
+ if (!AT || AT->getSizeModifier() != ArrayType::Static)
+ return;
+
+ if (ArgExpr->isNullPointerConstant(Context,
+ Expr::NPC_NeverValueDependent)) {
+ Diag(CallLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ return;
+ }
+
+ const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT);
+ if (!CAT)
+ return;
+
+ const ConstantArrayType *ArgCAT =
+ Context.getAsConstantArrayType(ArgExpr->IgnoreParenImpCasts()->getType());
+ if (!ArgCAT)
+ return;
+
+ if (ArgCAT->getSize().ult(CAT->getSize())) {
+ Diag(CallLoc, diag::warn_static_array_too_small)
+ << ArgExpr->getSourceRange()
+ << (unsigned) ArgCAT->getSize().getZExtValue()
+ << (unsigned) CAT->getSize().getZExtValue();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ }
+}
+
/// Given a function expression of unknown-any type, try to rebuild it
/// to have a function type.
static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *fn);
@@ -3499,7 +3606,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
Expr **Args = ArgExprs.release();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// If this is a pseudo-destructor expression, build the call immediately.
if (isa<CXXPseudoDestructorExpr>(Fn)) {
if (NumArgs > 0) {
@@ -3508,8 +3615,6 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
<< FixItHint::CreateRemoval(
SourceRange(Args[0]->getLocStart(),
Args[NumArgs-1]->getLocEnd()));
-
- NumArgs = 0;
}
return Owned(new (Context) CallExpr(Context, Fn, 0, 0, Context.VoidTy,
@@ -3523,7 +3628,8 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
bool Dependent = false;
if (Fn->isTypeDependent())
Dependent = true;
- else if (Expr::hasAnyTypeDependentArguments(Args, NumArgs))
+ else if (Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Args, NumArgs)))
Dependent = true;
if (Dependent) {
@@ -3574,6 +3680,11 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
}
// If we're directly calling a function, get the appropriate declaration.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.take();
+ }
Expr *NakedFn = Fn->IgnoreParens();
@@ -3601,7 +3712,8 @@ Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context) DeclRefExpr(
- ConfigDecl, ConfigQTy, VK_LValue, LLLLoc);
+ ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ MarkFunctionReferenced(LLLLoc, ConfigDecl);
return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, 0,
/*IsExecConfig=*/true);
@@ -3698,7 +3810,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
<< Fn->getType() << Fn->getSourceRange());
}
- if (getLangOptions().CUDA) {
+ if (getLangOpts().CUDA) {
if (Config) {
// CUDA: Kernel calls must be to global functions
if (FDecl && !FDecl->hasAttr<CUDAGlobalAttr>())
@@ -3719,7 +3831,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Check for a valid return type
if (CheckCallReturnType(FuncT->getResultType(),
- Fn->getSourceRange().getBegin(), TheCall,
+ Fn->getLocStart(), TheCall,
FDecl))
return ExprError();
@@ -3778,7 +3890,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
Arg = ArgE.takeAs<Expr>();
}
- if (RequireCompleteType(Arg->getSourceRange().getBegin(),
+ if (RequireCompleteType(Arg->getLocStart(),
Arg->getType(),
PDiag(diag::err_call_incomplete_argument)
<< Arg->getSourceRange()))
@@ -3852,7 +3964,8 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
= InitializedEntity::InitializeTemporary(literalType);
InitializationKind Kind
= InitializationKind::CreateCStyleCast(LParenLoc,
- SourceRange(LParenLoc, RParenLoc));
+ SourceRange(LParenLoc, RParenLoc),
+ /*InitList=*/true);
InitializationSequence InitSeq(*this, Entity, Kind, &LiteralExpr, 1);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
MultiExprArg(*this, &LiteralExpr, 1),
@@ -3868,7 +3981,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
}
// In C, compound literals are l-values for some reason.
- ExprValueKind VK = getLangOptions().CPlusPlus ? VK_RValue : VK_LValue;
+ ExprValueKind VK = getLangOpts().CPlusPlus ? VK_RValue : VK_LValue;
return MaybeBindToTemporary(
new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
@@ -3881,6 +3994,20 @@ Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
unsigned NumInit = InitArgList.size();
Expr **InitList = InitArgList.release();
+ // Immediately handle non-overload placeholders. Overloads can be
+ // resolved contextually, but everything else here can't.
+ for (unsigned I = 0; I != NumInit; ++I) {
+ if (InitList[I]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(InitList[I]);
+
+ // Ignore failures; dropping the entire initializer list because
+ // of one failure would be terrible for indexing/etc.
+ if (result.isInvalid()) continue;
+
+ InitList[I] = result.take();
+ }
+ }
+
// Semantic analysis for initializers is done by ActOnDeclarator() and
// CheckInitializer() - it requires knowledge of the object being intialized.
@@ -3896,7 +4023,7 @@ static void maybeExtendBlockObject(Sema &S, ExprResult &E) {
assert(E.get()->isRValue());
// Only do this in an r-value context.
- if (!S.getLangOptions().ObjCAutoRefCount) return;
+ if (!S.getLangOpts().ObjCAutoRefCount) return;
E = ImplicitCastExpr::Create(S.Context, E.get()->getType(),
CK_ARCExtendBlockObject, E.get(),
@@ -3927,6 +4054,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
// pointers. Everything else should be possible.
QualType SrcTy = Src.get()->getType();
+ if (const AtomicType *SrcAtomicTy = SrcTy->getAs<AtomicType>())
+ SrcTy = SrcAtomicTy->getValueType();
+ if (const AtomicType *DestAtomicTy = DestTy->getAs<AtomicType>())
+ DestTy = DestAtomicTy->getValueType();
+
if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
return CK_NoOp;
@@ -3946,12 +4078,10 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_ObjCObjectPointer:
if (SrcKind == Type::STK_ObjCObjectPointer)
return CK_BitCast;
- else if (SrcKind == Type::STK_CPointer)
+ if (SrcKind == Type::STK_CPointer)
return CK_CPointerToObjCPointerCast;
- else {
- maybeExtendBlockObject(*this, Src);
- return CK_BlockPointerToObjCPointerCast;
- }
+ maybeExtendBlockObject(*this, Src);
+ return CK_BlockPointerToObjCPointerCast;
case Type::STK_Bool:
return CK_PointerToBoolean;
case Type::STK_Integral:
@@ -3962,7 +4092,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("illegal cast from pointer");
}
- break;
+ llvm_unreachable("Should have returned before this");
case Type::STK_Bool: // casting from bool is like casting from an integer
case Type::STK_Integral:
@@ -3993,7 +4123,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
}
- break;
+ llvm_unreachable("Should have returned before this");
case Type::STK_Floating:
switch (DestTy->getScalarTypeKind()) {
@@ -4020,7 +4150,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
}
- break;
+ llvm_unreachable("Should have returned before this");
case Type::STK_FloatingComplex:
switch (DestTy->getScalarTypeKind()) {
@@ -4049,7 +4179,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
}
- break;
+ llvm_unreachable("Should have returned before this");
case Type::STK_IntegralComplex:
switch (DestTy->getScalarTypeKind()) {
@@ -4078,7 +4208,7 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
}
- break;
+ llvm_unreachable("Should have returned before this");
}
llvm_unreachable("Unhandled scalar cast");
@@ -4116,7 +4246,7 @@ ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
// (See OpenCL 6.2).
if (SrcTy->isVectorType()) {
if (Context.getTypeSize(DestTy) != Context.getTypeSize(SrcTy)
- || (getLangOptions().OpenCL &&
+ || (getLangOpts().OpenCL &&
(DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) {
Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors)
<< DestTy << SrcTy << R;
@@ -4156,7 +4286,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
if (D.isInvalidType())
return ExprError();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
@@ -4172,7 +4302,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
// i.e. all the elements are integer constants.
ParenExpr *PE = dyn_cast<ParenExpr>(CastExpr);
ParenListExpr *PLE = dyn_cast<ParenListExpr>(CastExpr);
- if ((getLangOptions().AltiVec || getLangOptions().OpenCL)
+ if ((getLangOpts().AltiVec || getLangOpts().OpenCL)
&& castType->isVectorType() && (PE || PLE)) {
if (PLE && PLE->getNumExprs() == 0) {
Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer);
@@ -4239,7 +4369,9 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
// be replicated to all the components of the vector
if (numExprs == 1) {
QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
- ExprResult Literal = Owned(exprs[0]);
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
Literal = ImpCastExprToType(Literal.take(), ElemTy,
PrepareScalarCast(Literal, ElemTy));
return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
@@ -4250,24 +4382,24 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
return ExprError();
}
else
- for (unsigned i = 0, e = numExprs; i != e; ++i)
- initExprs.push_back(exprs[i]);
+ initExprs.append(exprs, exprs + numExprs);
}
else {
// For OpenCL, when the number of initializers is a single value,
// it will be replicated to all components of the vector.
- if (getLangOptions().OpenCL &&
+ if (getLangOpts().OpenCL &&
VTy->getVectorKind() == VectorType::GenericVector &&
numExprs == 1) {
QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
- ExprResult Literal = Owned(exprs[0]);
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
Literal = ImpCastExprToType(Literal.take(), ElemTy,
PrepareScalarCast(Literal, ElemTy));
return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
}
- for (unsigned i = 0, e = numExprs; i != e; ++i)
- initExprs.push_back(exprs[i]);
+ initExprs.append(exprs, exprs + numExprs);
}
// FIXME: This means that pretty-printing the final AST will produce curly
// braces instead of the original commas.
@@ -4278,8 +4410,8 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
}
-/// This is not an AltiVec-style cast, so turn the ParenListExpr into a sequence
-/// of comma binary operators.
+/// This is not an AltiVec-style cast or or C++ direct-initialization, so turn
+/// the ParenListExpr into a sequence of comma binary operators.
ExprResult
Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
ParenListExpr *E = dyn_cast<ParenListExpr>(OrigExpr);
@@ -4297,18 +4429,13 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get());
}
-ExprResult Sema::ActOnParenOrParenListExpr(SourceLocation L,
- SourceLocation R,
- MultiExprArg Val) {
+ExprResult Sema::ActOnParenListExpr(SourceLocation L,
+ SourceLocation R,
+ MultiExprArg Val) {
unsigned nexprs = Val.size();
Expr **exprs = reinterpret_cast<Expr**>(Val.release());
assert((exprs != 0) && "ActOnParenOrParenListExpr() missing expr list");
- Expr *expr;
- if (nexprs == 1)
- expr = new (Context) ParenExpr(L, R, exprs[0]);
- else
- expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R,
- exprs[nexprs-1]->getType());
+ Expr *expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R);
return Owned(expr);
}
@@ -4358,11 +4485,11 @@ static bool checkCondition(Sema &S, Expr *Cond) {
if (CondTy->isScalarType()) return false;
// OpenCL: Sec 6.3.i says the condition is allowed to be a vector or scalar.
- if (S.getLangOptions().OpenCL && CondTy->isVectorType())
+ if (S.getLangOpts().OpenCL && CondTy->isVectorType())
return false;
// Emit the proper error message.
- S.Diag(Cond->getLocStart(), S.getLangOptions().OpenCL ?
+ S.Diag(Cond->getLocStart(), S.getLangOpts().OpenCL ?
diag::err_typecheck_cond_expect_scalar :
diag::err_typecheck_cond_expect_scalar_or_vector)
<< CondTy;
@@ -4446,8 +4573,28 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
rhptee = RHSTy->castAs<PointerType>()->getPointeeType();
}
- if (!S.Context.typesAreCompatible(lhptee.getUnqualifiedType(),
- rhptee.getUnqualifiedType())) {
+ // C99 6.5.15p6: If both operands are pointers to compatible types or to
+ // differently qualified versions of compatible types, the result type is
+ // a pointer to an appropriately qualified version of the composite
+ // type.
+
+ // Only CVR-qualifiers exist in the standard, and the differently-qualified
+ // clause doesn't make sense for our extensions. E.g. address space 2 should
+ // be incompatible with address space 3: they may live on different devices or
+ // anything.
+ Qualifiers lhQual = lhptee.getQualifiers();
+ Qualifiers rhQual = rhptee.getQualifiers();
+
+ unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers();
+ lhQual.removeCVRQualifiers();
+ rhQual.removeCVRQualifiers();
+
+ lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual);
+ rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual);
+
+ QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee);
+
+ if (CompositeTy.isNull()) {
S.Diag(Loc, diag::warn_typecheck_cond_incompatible_pointers)
<< LHSTy << RHSTy << LHS.get()->getSourceRange()
<< RHS.get()->getSourceRange();
@@ -4461,16 +4608,12 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
}
// The pointer types are compatible.
- // C99 6.5.15p6: If both operands are pointers to compatible types *or* to
- // differently qualified versions of compatible types, the result type is
- // a pointer to an appropriately qualified version of the *composite*
- // type.
- // FIXME: Need to calculate the composite type.
- // FIXME: Need to add qualifiers
+ QualType ResultTy = CompositeTy.withCVRQualifiers(MergedCVRQual);
+ ResultTy = S.Context.getPointerType(ResultTy);
- LHS = S.ImpCastExprToType(LHS.take(), LHSTy, CK_BitCast);
- RHS = S.ImpCastExprToType(RHS.take(), LHSTy, CK_BitCast);
- return LHSTy;
+ LHS = S.ImpCastExprToType(LHS.take(), ResultTy, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.take(), ResultTy, CK_BitCast);
+ return ResultTy;
}
/// \brief Return the resulting type when the operands are both block pointers.
@@ -4574,7 +4717,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
RHS = move(RHSResult);
// C++ is sufficiently different to merit its own checker.
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc);
VK = VK_RValue;
@@ -4605,7 +4748,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// OpenCL: If the condition is a vector, and both operands are scalar,
// attempt to implicity convert them to the vector type to act like the
// built in select.
- if (getLangOptions().OpenCL && CondTy->isVectorType())
+ if (getLangOpts().OpenCL && CondTy->isVectorType())
if (checkConditionalConvertScalarsToVectors(*this, LHS, RHS, CondTy))
return QualType();
@@ -4780,6 +4923,14 @@ QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
}
// Check Objective-C object pointer types and 'void *'
if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType();
QualType rhptee = RHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType destPointee
@@ -4792,6 +4943,14 @@ QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
return destType;
}
if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
QualType lhptee = LHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType();
QualType destPointee
@@ -4938,7 +5097,7 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
// We usually want to apply unary conversions *before* saving, except
// in the special case of a C++ l-value conditional.
- if (!(getLangOptions().CPlusPlus
+ if (!(getLangOpts().CPlusPlus
&& !commonExpr->isTypeDependent()
&& commonExpr->getValueKind() == RHSExpr->getValueKind()
&& commonExpr->isGLValue()
@@ -4954,7 +5113,8 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(),
commonExpr->getType(),
commonExpr->getValueKind(),
- commonExpr->getObjectKind());
+ commonExpr->getObjectKind(),
+ commonExpr);
LHSExpr = CondExpr = opaqueValue;
}
@@ -5019,9 +5179,9 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
// It's okay to add or remove GC or lifetime qualifiers when converting to
// and from void*.
- else if (lhq.withoutObjCGCAttr().withoutObjCGLifetime()
+ else if (lhq.withoutObjCGCAttr().withoutObjCLifetime()
.compatiblyIncludes(
- rhq.withoutObjCGCAttr().withoutObjCGLifetime())
+ rhq.withoutObjCGCAttr().withoutObjCLifetime())
&& (lhptee->isVoidType() || rhptee->isVoidType()))
; // keep old
@@ -5099,7 +5259,7 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
// General pointer incompatibility takes priority over qualifiers.
return Sema::IncompatiblePointer;
}
- if (!S.getLangOptions().CPlusPlus &&
+ if (!S.getLangOpts().CPlusPlus &&
S.IsNoReturnConversion(ltrans, rtrans, ltrans))
return Sema::IncompatiblePointer;
return ConvTy;
@@ -5122,7 +5282,7 @@ checkBlockPointerTypesForAssignment(Sema &S, QualType LHSType,
rhptee = cast<BlockPointerType>(RHSType)->getPointeeType();
// In C++, the types have to match exactly.
- if (S.getLangOptions().CPlusPlus)
+ if (S.getLangOpts().CPlusPlus)
return Sema::IncompatibleBlockPointer;
Sema::AssignConvertType ConvTy = Sema::Compatible;
@@ -5161,7 +5321,9 @@ checkObjCPointerTypesForAssignment(Sema &S, QualType LHSType,
QualType lhptee = LHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType rhptee = RHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
- if (!lhptee.isAtLeastAsQualifiedAs(rhptee))
+ if (!lhptee.isAtLeastAsQualifiedAs(rhptee) &&
+ // make an exception for id<P>
+ !LHSType->isObjCQualifiedIdType())
return Sema::CompatiblePointerDiscardsQualifiers;
if (S.Context.typesAreCompatible(LHSType, RHSType))
@@ -5213,9 +5375,6 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType();
RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType();
- // We can't do assignment from/to atomics yet.
- if (LHSType->isAtomicType())
- return Incompatible;
// Common case: no conversion required.
if (LHSType == RHSType) {
@@ -5223,6 +5382,21 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Compatible;
}
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
+ if (AtomicTy->getValueType() == RHSType) {
+ Kind = CK_NonAtomicToAtomic;
+ return Compatible;
+ }
+ }
+
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(RHSType)) {
+ if (AtomicTy->getValueType() == LHSType) {
+ Kind = CK_AtomicToNonAtomic;
+ return Compatible;
+ }
+ }
+
+
// If the left-hand side is a reference type, then we are in a
// (rare!) case where we've allowed the use of references in C,
// e.g., as a parameter type in a built-in function. In this case,
@@ -5269,7 +5443,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// If we are allowing lax vector conversions, and LHS and RHS are both
// vectors, the total size only needs to be the same. This is a bitcast;
// no bits are changed but the result type is different.
- if (getLangOptions().LaxVectorConversions &&
+ if (getLangOpts().LaxVectorConversions &&
(Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType))) {
Kind = CK_BitCast;
return IncompatibleVectors;
@@ -5280,7 +5454,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// Arithmetic conversions.
if (LHSType->isArithmeticType() && RHSType->isArithmeticType() &&
- !(getLangOptions().CPlusPlus && LHSType->isEnumeralType())) {
+ !(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) {
Kind = PrepareScalarCast(RHS, LHSType);
return Compatible;
}
@@ -5346,7 +5520,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
// id -> T^
- if (getLangOptions().ObjC1 && RHSType->isObjCIdType()) {
+ if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) {
Kind = CK_AnyPointerToBlockPointerCast;
return Compatible;
}
@@ -5368,7 +5542,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
Kind = CK_BitCast;
Sema::AssignConvertType result =
checkObjCPointerTypesForAssignment(*this, LHSType, RHSType);
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
result == Compatible &&
!CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType))
result = IncompatibleObjCWeakRef;
@@ -5535,18 +5709,32 @@ Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
Sema::AssignConvertType
Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
bool Diagnose) {
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
// C++ 5.17p3: If the left operand is not of class type, the
// expression is implicitly converted (C++ 4) to the
// cv-unqualified type of the left operand.
- ExprResult Res = PerformImplicitConversion(RHS.get(),
- LHSType.getUnqualifiedType(),
- AA_Assigning, Diagnose);
+ ExprResult Res;
+ if (Diagnose) {
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ AA_Assigning);
+ } else {
+ ImplicitConversionSequence ICS =
+ TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+ if (ICS.isFailure())
+ return Incompatible;
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ ICS, AA_Assigning);
+ }
if (Res.isInvalid())
return Incompatible;
Sema::AssignConvertType result = Compatible;
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
!CheckObjCARCUnavailableWeakConversion(LHSType,
RHS.get()->getType()))
result = IncompatibleObjCWeakRef;
@@ -5609,6 +5797,15 @@ QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS,
QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
// For conversion purposes, we ignore any qualifiers.
// For example, "const float" and "float" are equivalent.
QualType LHSType =
@@ -5633,7 +5830,7 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return RHSType;
}
- if (getLangOptions().LaxVectorConversions &&
+ if (getLangOpts().LaxVectorConversions &&
Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType)) {
// If we are allowing lax vector conversions, and LHS and RHS are both
// vectors, the total size only needs to be the same. This is a
@@ -5738,9 +5935,15 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+
if (!LHS.get()->getType()->isArithmeticType() ||
- !RHS.get()->getType()->isArithmeticType())
+ !RHS.get()->getType()->isArithmeticType()) {
+ if (IsCompAssign &&
+ LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType())
+ return compType;
return InvalidOperands(Loc, LHS, RHS);
+ }
// Check for division by zero.
if (IsDiv &&
@@ -5784,7 +5987,7 @@ QualType Sema::CheckRemainderOperands(
/// \brief Diagnose invalid arithmetic on two void pointers.
static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
Expr *LHSExpr, Expr *RHSExpr) {
- S.Diag(Loc, S.getLangOptions().CPlusPlus
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
? diag::err_typecheck_pointer_arith_void_type
: diag::ext_gnu_void_ptr)
<< 1 /* two pointers */ << LHSExpr->getSourceRange()
@@ -5794,7 +5997,7 @@ static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
/// \brief Diagnose invalid arithmetic on a void pointer.
static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
Expr *Pointer) {
- S.Diag(Loc, S.getLangOptions().CPlusPlus
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
? diag::err_typecheck_pointer_arith_void_type
: diag::ext_gnu_void_ptr)
<< 0 /* one pointer */ << Pointer->getSourceRange();
@@ -5805,7 +6008,7 @@ static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
Expr *LHS, Expr *RHS) {
assert(LHS->getType()->isAnyPointerType());
assert(RHS->getType()->isAnyPointerType());
- S.Diag(Loc, S.getLangOptions().CPlusPlus
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
? diag::err_typecheck_pointer_arith_function_type
: diag::ext_gnu_ptr_func_arith)
<< 1 /* two pointers */ << LHS->getType()->getPointeeType()
@@ -5820,7 +6023,7 @@ static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
Expr *Pointer) {
assert(Pointer->getType()->isAnyPointerType());
- S.Diag(Loc, S.getLangOptions().CPlusPlus
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
? diag::err_typecheck_pointer_arith_function_type
: diag::ext_gnu_ptr_func_arith)
<< 0 /* one pointer */ << Pointer->getType()->getPointeeType()
@@ -5861,11 +6064,11 @@ static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc,
QualType PointeeTy = Operand->getType()->getPointeeType();
if (PointeeTy->isVoidType()) {
diagnoseArithmeticOnVoidPointer(S, Loc, Operand);
- return !S.getLangOptions().CPlusPlus;
+ return !S.getLangOpts().CPlusPlus;
}
if (PointeeTy->isFunctionType()) {
diagnoseArithmeticOnFunctionPointer(S, Loc, Operand);
- return !S.getLangOptions().CPlusPlus;
+ return !S.getLangOpts().CPlusPlus;
}
if (checkArithmeticIncompletePointerType(S, Loc, Operand)) return false;
@@ -5900,7 +6103,7 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHSExpr);
else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHSExpr, RHSExpr);
- return !S.getLangOptions().CPlusPlus;
+ return !S.getLangOpts().CPlusPlus;
}
bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType();
@@ -5911,7 +6114,7 @@ static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
RHSExpr);
else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHSExpr, RHSExpr);
- return !S.getLangOptions().CPlusPlus;
+ return !S.getLangOpts().CPlusPlus;
}
if (checkArithmeticIncompletePointerType(S, Loc, LHSExpr)) return false;
@@ -5934,6 +6137,46 @@ static bool checkArithmethicPointerOnNonFragileABI(Sema &S,
return false;
}
+/// diagnoseStringPlusInt - Emit a warning when adding an integer to a string
+/// literal.
+static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ StringLiteral* StrExpr = dyn_cast<StringLiteral>(LHSExpr->IgnoreImpCasts());
+ Expr* IndexExpr = RHSExpr;
+ if (!StrExpr) {
+ StrExpr = dyn_cast<StringLiteral>(RHSExpr->IgnoreImpCasts());
+ IndexExpr = LHSExpr;
+ }
+
+ bool IsStringPlusInt = StrExpr &&
+ IndexExpr->getType()->isIntegralOrUnscopedEnumerationType();
+ if (!IsStringPlusInt)
+ return;
+
+ llvm::APSInt index;
+ if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) {
+ unsigned StrLenWithNull = StrExpr->getLength() + 1;
+ if (index.isNonNegative() &&
+ index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull),
+ index.isUnsigned()))
+ return;
+ }
+
+ SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::warn_string_plus_int)
+ << DiagRange << IndexExpr->IgnoreImpCasts()->getType();
+
+ // Only print a fixit for "str" + int, not for int + "str".
+ if (IndexExpr == RHSExpr) {
+ SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::note_string_plus_int_silence)
+ << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
+ << FixItHint::CreateInsertion(EndLoc, "]");
+ } else
+ Self.Diag(OpLoc, diag::note_string_plus_int_silence);
+}
+
/// \brief Emit error when two pointers are incompatible.
static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
Expr *LHSExpr, Expr *RHSExpr) {
@@ -5945,7 +6188,8 @@ static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
}
QualType Sema::CheckAdditionOperands( // C99 6.5.6
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy) {
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
+ QualType* CompLHSTy) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
if (LHS.get()->getType()->isVectorType() ||
@@ -5959,6 +6203,10 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+ // Diagnose "string literal" '+' int.
+ if (Opc == BO_Add)
+ diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get());
+
// handle the common case first (both operands are arithmetic).
if (LHS.get()->getType()->isArithmeticType() &&
RHS.get()->getType()->isArithmeticType()) {
@@ -5966,6 +6214,12 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
return compType;
}
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
// Put any potential pointer into PExp
Expr* PExp = LHS.get(), *IExp = RHS.get();
if (IExp->getType()->isAnyPointerType())
@@ -6026,6 +6280,12 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
return compType;
}
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
// Either ptr - int or ptr - ptr.
if (LHS.get()->getType()->isAnyPointerType()) {
QualType lpointee = LHS.get()->getType()->getPointeeType();
@@ -6039,11 +6299,9 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (!checkArithmeticOpPointerOperand(*this, Loc, LHS.get()))
return QualType();
- Expr *IExpr = RHS.get()->IgnoreParenCasts();
- UnaryOperator negRex(IExpr, UO_Minus, IExpr->getType(), VK_RValue,
- OK_Ordinary, IExpr->getExprLoc());
// Check array bounds for pointer arithemtic
- CheckArrayAccess(LHS.get()->IgnoreParenCasts(), &negRex);
+ CheckArrayAccess(LHS.get(), RHS.get(), /*ArraySubscriptExpr*/0,
+ /*AllowOnePastEnd*/true, /*IndexNegated*/true);
if (CompLHSTy) *CompLHSTy = LHS.get()->getType();
return LHS.get()->getType();
@@ -6054,7 +6312,7 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
= RHS.get()->getType()->getAs<PointerType>()) {
QualType rpointee = RHSPTy->getPointeeType();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Pointee types must be the same: C++ [expr.add]
if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) {
diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get());
@@ -6131,7 +6389,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// Print the bit representation of the signed integer as an unsigned
// hexadecimal number.
- llvm::SmallString<40> HexResult;
+ SmallString<40> HexResult;
Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true);
// If we are only missing a sign bit, this is less likely to result in actual
@@ -6459,7 +6717,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
QualType RCanPointeeTy =
RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
if (LCanPointeeTy == RCanPointeeTy)
return ResultTy;
if (!IsRelational &&
@@ -6515,7 +6773,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return ResultTy;
}
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Comparison of nullptr_t with itself.
if (LHSType->isNullPtrType() && RHSType->isNullPtrType())
return ResultTy;
@@ -6639,11 +6897,11 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
bool isError = false;
if ((LHSIsNull && LHSType->isIntegerType()) ||
(RHSIsNull && RHSType->isIntegerType())) {
- if (IsRelational && !getLangOptions().CPlusPlus)
+ if (IsRelational && !getLangOpts().CPlusPlus)
DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
- } else if (IsRelational && !getLangOptions().CPlusPlus)
+ } else if (IsRelational && !getLangOpts().CPlusPlus)
DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
- else if (getLangOptions().CPlusPlus) {
+ else if (getLangOpts().CPlusPlus) {
DiagID = diag::err_typecheck_comparison_of_pointer_integer;
isError = true;
} else
@@ -6681,6 +6939,26 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return InvalidOperands(Loc, LHS, RHS);
}
+
+// Return a signed type that is of identical size and number of elements.
+// For floating point vectors, return an integer type of identical size
+// and number of elements.
+QualType Sema::GetSignedVectorType(QualType V) {
+ const VectorType *VTy = V->getAs<VectorType>();
+ unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
+ if (TypeSize == Context.getTypeSize(Context.CharTy))
+ return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
+ assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
+ "Unhandled vector element size in vector compare");
+ return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
+}
+
/// CheckVectorCompareOperands - vector comparisons are a clang extension that
/// operates on extended vector types. Instead of producing an IntTy result,
/// like a scalar comparison, a vector comparison produces a vector of integer
@@ -6695,7 +6973,6 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
return vType;
QualType LHSType = LHS.get()->getType();
- QualType RHSType = RHS.get()->getType();
// If AltiVec, the comparison results in a numeric type, i.e.
// bool for C++, int for C
@@ -6706,8 +6983,10 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
// often indicate logic errors in the program.
if (!LHSType->hasFloatingRepresentation()) {
- if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParens()))
- if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParens()))
+ if (DeclRefExpr* DRL
+ = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParenImpCasts()))
+ if (DeclRefExpr* DRR
+ = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParenImpCasts()))
if (DRL->getDecl() == DRR->getDecl())
DiagRuntimeBehavior(Loc, 0,
PDiag(diag::warn_comparison_always)
@@ -6718,26 +6997,23 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// Check for comparisons of floating point operands using != and ==.
if (!IsRelational && LHSType->hasFloatingRepresentation()) {
- assert (RHSType->hasFloatingRepresentation());
+ assert (RHS.get()->getType()->hasFloatingRepresentation());
CheckFloatComparison(Loc, LHS.get(), RHS.get());
}
+
+ // Return a signed type for the vector.
+ return GetSignedVectorType(LHSType);
+}
- // Return the type for the comparison, which is the same as vector type for
- // integer vectors, or an integer type of identical size and number of
- // elements for floating point vectors.
- if (LHSType->hasIntegerRepresentation())
- return LHSType;
-
- const VectorType *VTy = LHSType->getAs<VectorType>();
- unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
- if (TypeSize == Context.getTypeSize(Context.IntTy))
- return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
- if (TypeSize == Context.getTypeSize(Context.LongTy))
- return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
-
- assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
- "Unhandled vector element size in vector compare");
- return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
+QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc) {
+ // Ensure that either both operands are of the same vector type, or
+ // one operand is of a vector type and the other is of its element type.
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, false);
+ if (vType.isNull() || vType->isFloatingType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return GetSignedVectorType(LHS.get()->getType());
}
inline QualType Sema::CheckBitwiseOperands(
@@ -6770,6 +7046,10 @@ inline QualType Sema::CheckBitwiseOperands(
inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc) {
+ // Check vector operands differently.
+ if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
+ return CheckVectorLogicalOperands(LHS, RHS, Loc);
+
// Diagnose cases where the user write a logical and/or but probably meant a
// bitwise one. We do this when the LHS is a non-bool integer and the RHS
// is a constant.
@@ -6782,10 +7062,10 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
- Expr::EvalResult Result;
- if (RHS.get()->Evaluate(Result, Context) && !Result.HasSideEffects)
- if ((getLangOptions().Bool && !RHS.get()->getType()->isBooleanType()) ||
- (Result.Val.getInt() != 0 && Result.Val.getInt() != 1)) {
+ llvm::APSInt Result;
+ if (RHS.get()->EvaluateAsInt(Result, Context))
+ if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType()) ||
+ (Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
<< RHS.get()->getSourceRange()
<< (Opc == BO_LAnd ? "&&" : "||");
@@ -6794,7 +7074,7 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
<< (Opc == BO_LAnd ? "&" : "|")
<< FixItHint::CreateReplacement(SourceRange(
Loc, Lexer::getLocForEndOfToken(Loc, 0, getSourceManager(),
- getLangOptions())),
+ getLangOpts())),
Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
@@ -6803,12 +7083,12 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
SourceRange(
Lexer::getLocForEndOfToken(LHS.get()->getLocEnd(),
0, getSourceManager(),
- getLangOptions()),
+ getLangOpts()),
RHS.get()->getLocEnd()));
}
}
- if (!Context.getLangOptions().CPlusPlus) {
+ if (!Context.getLangOpts().CPlusPlus) {
LHS = UsualUnaryConversions(LHS.take());
if (LHS.isInvalid())
return QualType();
@@ -6851,63 +7131,68 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
/// depends on various declarations and thus must be treated specially.
///
static bool IsReadonlyProperty(Expr *E, Sema &S) {
- if (E->getStmtClass() == Expr::ObjCPropertyRefExprClass) {
- const ObjCPropertyRefExpr* PropExpr = cast<ObjCPropertyRefExpr>(E);
- if (PropExpr->isImplicitProperty()) return false;
+ const ObjCPropertyRefExpr *PropExpr = dyn_cast<ObjCPropertyRefExpr>(E);
+ if (!PropExpr) return false;
+ if (PropExpr->isImplicitProperty()) return false;
- ObjCPropertyDecl *PDecl = PropExpr->getExplicitProperty();
- QualType BaseType = PropExpr->isSuperReceiver() ?
+ ObjCPropertyDecl *PDecl = PropExpr->getExplicitProperty();
+ QualType BaseType = PropExpr->isSuperReceiver() ?
PropExpr->getSuperReceiverType() :
PropExpr->getBase()->getType();
- if (const ObjCObjectPointerType *OPT =
- BaseType->getAsObjCInterfacePointerType())
- if (ObjCInterfaceDecl *IFace = OPT->getInterfaceDecl())
- if (S.isPropertyReadonly(PDecl, IFace))
- return true;
- }
- return false;
-}
-
-static bool IsConstProperty(Expr *E, Sema &S) {
- if (E->getStmtClass() == Expr::ObjCPropertyRefExprClass) {
- const ObjCPropertyRefExpr* PropExpr = cast<ObjCPropertyRefExpr>(E);
- if (PropExpr->isImplicitProperty()) return false;
-
- ObjCPropertyDecl *PDecl = PropExpr->getExplicitProperty();
- QualType T = PDecl->getType();
- if (T->isReferenceType())
- T = T->getAs<ReferenceType>()->getPointeeType();
- CanQualType CT = S.Context.getCanonicalType(T);
- return CT.isConstQualified();
- }
+ if (const ObjCObjectPointerType *OPT =
+ BaseType->getAsObjCInterfacePointerType())
+ if (ObjCInterfaceDecl *IFace = OPT->getInterfaceDecl())
+ if (S.isPropertyReadonly(PDecl, IFace))
+ return true;
return false;
}
static bool IsReadonlyMessage(Expr *E, Sema &S) {
- if (E->getStmtClass() != Expr::MemberExprClass)
- return false;
- const MemberExpr *ME = cast<MemberExpr>(E);
- NamedDecl *Member = ME->getMemberDecl();
- if (isa<FieldDecl>(Member)) {
- Expr *Base = ME->getBase()->IgnoreParenImpCasts();
- if (Base->getStmtClass() != Expr::ObjCMessageExprClass)
- return false;
- return cast<ObjCMessageExpr>(Base)->getMethodDecl() != 0;
- }
- return false;
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E);
+ if (!ME) return false;
+ if (!isa<FieldDecl>(ME->getMemberDecl())) return false;
+ ObjCMessageExpr *Base =
+ dyn_cast<ObjCMessageExpr>(ME->getBase()->IgnoreParenImpCasts());
+ if (!Base) return false;
+ return Base->getMethodDecl() != 0;
+}
+
+/// Is the given expression (which must be 'const') a reference to a
+/// variable which was originally non-const, but which has become
+/// 'const' due to being captured within a block?
+enum NonConstCaptureKind { NCCK_None, NCCK_Block, NCCK_Lambda };
+static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) {
+ assert(E->isLValue() && E->getType().isConstQualified());
+ E = E->IgnoreParens();
+
+ // Must be a reference to a declaration from an enclosing scope.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE) return NCCK_None;
+ if (!DRE->refersToEnclosingLocal()) return NCCK_None;
+
+ // The declaration must be a variable which is not declared 'const'.
+ VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!var) return NCCK_None;
+ if (var->getType().isConstQualified()) return NCCK_None;
+ assert(var->hasLocalStorage() && "capture added 'const' to non-local?");
+
+ // Decide whether the first capture was for a block or a lambda.
+ DeclContext *DC = S.CurContext;
+ while (DC->getParent() != var->getDeclContext())
+ DC = DC->getParent();
+ return (isa<BlockDecl>(DC) ? NCCK_Block : NCCK_Lambda);
}
/// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not,
/// emit an error and return true. If so, return false.
static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
+ assert(!E->hasPlaceholderType(BuiltinType::PseudoObject));
SourceLocation OrigLoc = Loc;
Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context,
&Loc);
if (IsLV == Expr::MLV_Valid && IsReadonlyProperty(E, S))
IsLV = Expr::MLV_ReadonlyProperty;
- else if (Expr::MLV_ConstQualified && IsConstProperty(E, S))
- IsLV = Expr::MLV_Valid;
else if (IsLV == Expr::MLV_ClassTemporary && IsReadonlyMessage(E, S))
IsLV = Expr::MLV_InvalidMessageExpression;
if (IsLV == Expr::MLV_Valid)
@@ -6919,9 +7204,19 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
case Expr::MLV_ConstQualified:
Diag = diag::err_typecheck_assign_const;
+ // Use a specialized diagnostic when we're assigning to an object
+ // from an enclosing function or block.
+ if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) {
+ if (NCCK == NCCK_Block)
+ Diag = diag::err_block_decl_ref_not_modifiable_lvalue;
+ else
+ Diag = diag::err_lambda_decl_ref_not_modifiable_lvalue;
+ break;
+ }
+
// In ARC, use some specialized diagnostics for occasions where we
// infer 'const'. These are always pseudo-strong variables.
- if (S.getLangOptions().ObjCAutoRefCount) {
+ if (S.getLangOpts().ObjCAutoRefCount) {
DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
if (declRef && isa<VarDecl>(declRef->getDecl())) {
VarDecl *var = cast<VarDecl>(declRef->getDecl());
@@ -6935,7 +7230,9 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
// - self
ObjCMethodDecl *method = S.getCurMethodDecl();
if (method && var == method->getSelfDecl())
- Diag = diag::err_typecheck_arr_assign_self;
+ Diag = method->isClassMethod()
+ ? diag::err_typecheck_arc_assign_self_class_method
+ : diag::err_typecheck_arc_assign_self;
// - fast enumeration variables
else
@@ -6979,15 +7276,9 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
case Expr::MLV_DuplicateVectorComponents:
Diag = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
break;
- case Expr::MLV_NotBlockQualified:
- Diag = diag::err_block_decl_ref_not_modifiable_lvalue;
- break;
case Expr::MLV_ReadonlyProperty:
- Diag = diag::error_readonly_property_assignment;
- break;
case Expr::MLV_NoSetterProperty:
- Diag = diag::error_nosetter_property_assignment;
- break;
+ llvm_unreachable("readonly properties should be processed differently");
case Expr::MLV_InvalidMessageExpression:
Diag = diag::error_readonly_message_assignment;
break;
@@ -7012,6 +7303,8 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
SourceLocation Loc,
QualType CompoundType) {
+ assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject));
+
// Verify that LHS is a modifiable lvalue, and emit error if not.
if (CheckForModifiableLvalue(LHSExpr, Loc, *this))
return QualType();
@@ -7022,14 +7315,6 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
QualType LHSTy(LHSType);
- // Simple assignment "x = y".
- if (LHSExpr->getObjectKind() == OK_ObjCProperty) {
- ExprResult LHSResult = Owned(LHSExpr);
- ConvertPropertyForLValue(LHSResult, RHS, LHSTy);
- if (LHSResult.isInvalid())
- return QualType();
- LHSExpr = LHSResult.take();
- }
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
return QualType();
@@ -7042,10 +7327,9 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
ConvTy = Compatible;
if (ConvTy == Compatible &&
- getLangOptions().ObjCNonFragileABI &&
LHSType->isObjCObjectType())
- Diag(Loc, diag::err_assignment_requires_nonfragile_object)
- << LHSType;
+ Diag(Loc, diag::err_objc_object_assignment)
+ << LHSType;
// If the RHS is a unary plus or minus, check to see if they = and + are
// right next to each other. If so, the user may have typo'd "x =+ 4"
@@ -7072,7 +7356,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
if (ConvTy == Compatible) {
if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong)
checkRetainCycles(LHSExpr, RHS.get());
- else if (getLangOptions().ObjCAutoRefCount)
+ else if (getLangOpts().ObjCAutoRefCount)
checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get());
}
} else {
@@ -7093,7 +7377,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// is converted to the type of the assignment expression (above).
// C++ 5.17p1: the type of the assignment expression is that of its left
// operand.
- return (getLangOptions().CPlusPlus
+ return (getLangOpts().CPlusPlus
? LHSType : LHSType.getUnqualifiedType());
}
@@ -7117,7 +7401,7 @@ static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
if (LHS.isInvalid())
return QualType();
- if (!S.getLangOptions().CPlusPlus) {
+ if (!S.getLangOpts().CPlusPlus) {
RHS = S.DefaultFunctionArrayLvalueConversion(RHS.take());
if (RHS.isInvalid())
return QualType();
@@ -7139,9 +7423,15 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
return S.Context.DependentTy;
QualType ResType = Op->getType();
+ // Atomic types can be used for increment / decrement where the non-atomic
+ // versions can, so ignore the _Atomic() specifier for the purpose of
+ // checking.
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
assert(!ResType.isNull() && "no type for increment/decrement expression");
- if (S.getLangOptions().CPlusPlus && ResType->isBooleanType()) {
+ if (S.getLangOpts().CPlusPlus && ResType->isBooleanType()) {
// Decrement of bool is not allowed.
if (!IsInc) {
S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
@@ -7168,7 +7458,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
if (PR.isInvalid()) return QualType();
return CheckIncrementDecrementOperand(S, PR.take(), VK, OpLoc,
IsInc, IsPrefix);
- } else if (S.getLangOptions().AltiVec && ResType->isVectorType()) {
+ } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) {
// OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
} else {
S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
@@ -7182,7 +7472,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
// In C++, a prefix increment is the same type as the operand. Otherwise
// (in C or with postfix), the increment is the unqualified type of the
// operand.
- if (IsPrefix && S.getLangOptions().CPlusPlus) {
+ if (IsPrefix && S.getLangOpts().CPlusPlus) {
VK = VK_LValue;
return ResType;
} else {
@@ -7190,102 +7480,6 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
return ResType.getUnqualifiedType();
}
}
-
-ExprResult Sema::ConvertPropertyForRValue(Expr *E) {
- assert(E->getValueKind() == VK_LValue &&
- E->getObjectKind() == OK_ObjCProperty);
- const ObjCPropertyRefExpr *PRE = E->getObjCProperty();
-
- QualType T = E->getType();
- QualType ReceiverType;
- if (PRE->isObjectReceiver())
- ReceiverType = PRE->getBase()->getType();
- else if (PRE->isSuperReceiver())
- ReceiverType = PRE->getSuperReceiverType();
- else
- ReceiverType = Context.getObjCInterfaceType(PRE->getClassReceiver());
-
- ExprValueKind VK = VK_RValue;
- if (PRE->isImplicitProperty()) {
- if (ObjCMethodDecl *GetterMethod =
- PRE->getImplicitPropertyGetter()) {
- T = getMessageSendResultType(ReceiverType, GetterMethod,
- PRE->isClassReceiver(),
- PRE->isSuperReceiver());
- VK = Expr::getValueKindForType(GetterMethod->getResultType());
- }
- else {
- Diag(PRE->getLocation(), diag::err_getter_not_found)
- << PRE->getBase()->getType();
- }
- }
- else {
- // lvalue-ness of an explicit property is determined by
- // getter type.
- QualType ResT = PRE->getGetterResultType();
- VK = Expr::getValueKindForType(ResT);
- }
-
- E = ImplicitCastExpr::Create(Context, T, CK_GetObjCProperty,
- E, 0, VK);
-
- ExprResult Result = MaybeBindToTemporary(E);
- if (!Result.isInvalid())
- E = Result.take();
-
- return Owned(E);
-}
-
-void Sema::ConvertPropertyForLValue(ExprResult &LHS, ExprResult &RHS,
- QualType &LHSTy) {
- assert(LHS.get()->getValueKind() == VK_LValue &&
- LHS.get()->getObjectKind() == OK_ObjCProperty);
- const ObjCPropertyRefExpr *PropRef = LHS.get()->getObjCProperty();
-
- bool Consumed = false;
-
- if (PropRef->isImplicitProperty()) {
- // If using property-dot syntax notation for assignment, and there is a
- // setter, RHS expression is being passed to the setter argument. So,
- // type conversion (and comparison) is RHS to setter's argument type.
- if (const ObjCMethodDecl *SetterMD = PropRef->getImplicitPropertySetter()) {
- ObjCMethodDecl::param_const_iterator P = SetterMD->param_begin();
- LHSTy = (*P)->getType();
- Consumed = (getLangOptions().ObjCAutoRefCount &&
- (*P)->hasAttr<NSConsumedAttr>());
-
- // Otherwise, if the getter returns an l-value, just call that.
- } else {
- QualType Result = PropRef->getImplicitPropertyGetter()->getResultType();
- ExprValueKind VK = Expr::getValueKindForType(Result);
- if (VK == VK_LValue) {
- LHS = ImplicitCastExpr::Create(Context, LHS.get()->getType(),
- CK_GetObjCProperty, LHS.take(), 0, VK);
- return;
- }
- }
- } else if (getLangOptions().ObjCAutoRefCount) {
- const ObjCMethodDecl *setter
- = PropRef->getExplicitProperty()->getSetterMethodDecl();
- if (setter) {
- ObjCMethodDecl::param_const_iterator P = setter->param_begin();
- LHSTy = (*P)->getType();
- Consumed = (*P)->hasAttr<NSConsumedAttr>();
- }
- }
-
- if ((getLangOptions().CPlusPlus && LHSTy->isRecordType()) ||
- getLangOptions().ObjCAutoRefCount) {
- InitializedEntity Entity =
- InitializedEntity::InitializeParameter(Context, LHSTy, Consumed);
- ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), RHS);
- if (!ArgE.isInvalid()) {
- RHS = ArgE;
- if (getLangOptions().ObjCAutoRefCount && !PropRef->isSuperReceiver())
- checkRetainCycles(const_cast<Expr*>(PropRef->getBase()), RHS.get());
- }
- }
-}
/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
@@ -7369,33 +7563,41 @@ static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc,
/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
/// In C++, the operand might be an overloaded function name, in which case
/// we allow the '&' but retain the overloaded-function type.
-static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
+static QualType CheckAddressOfOperand(Sema &S, ExprResult &OrigOp,
SourceLocation OpLoc) {
- if (OrigOp->isTypeDependent())
- return S.Context.DependentTy;
- if (OrigOp->getType() == S.Context.OverloadTy) {
- if (!isa<OverloadExpr>(OrigOp->IgnoreParens())) {
- S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
- << OrigOp->getSourceRange();
+ if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){
+ if (PTy->getKind() == BuiltinType::Overload) {
+ if (!isa<OverloadExpr>(OrigOp.get()->IgnoreParens())) {
+ S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ return S.Context.OverloadTy;
+ }
+
+ if (PTy->getKind() == BuiltinType::UnknownAny)
+ return S.Context.UnknownAnyTy;
+
+ if (PTy->getKind() == BuiltinType::BoundMember) {
+ S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
return QualType();
}
-
- return S.Context.OverloadTy;
- }
- if (OrigOp->getType() == S.Context.UnknownAnyTy)
- return S.Context.UnknownAnyTy;
- if (OrigOp->getType() == S.Context.BoundMemberTy) {
- S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
- << OrigOp->getSourceRange();
- return QualType();
+
+ OrigOp = S.CheckPlaceholderExpr(OrigOp.take());
+ if (OrigOp.isInvalid()) return QualType();
}
- assert(!OrigOp->getType()->isPlaceholderType());
+ if (OrigOp.get()->isTypeDependent())
+ return S.Context.DependentTy;
+
+ assert(!OrigOp.get()->getType()->isPlaceholderType());
// Make sure to ignore parentheses in subsequent checks
- Expr *op = OrigOp->IgnoreParens();
+ Expr *op = OrigOp.get()->IgnoreParens();
- if (S.getLangOptions().C99) {
+ if (S.getLangOpts().C99) {
// Implement C99-only parts of addressof rules.
if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
if (uOp->getOpcode() == UO_Deref)
@@ -7426,16 +7628,16 @@ static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
// If the underlying expression isn't a decl ref, give up.
if (!isa<DeclRefExpr>(op)) {
S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
- << OrigOp->getSourceRange();
+ << OrigOp.get()->getSourceRange();
return QualType();
}
DeclRefExpr *DRE = cast<DeclRefExpr>(op);
CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
// The id-expression was parenthesized.
- if (OrigOp != DRE) {
+ if (OrigOp.get() != DRE) {
S.Diag(OpLoc, diag::err_parens_pointer_member_function)
- << OrigOp->getSourceRange();
+ << OrigOp.get()->getSourceRange();
// The method was named without a qualifier.
} else if (!DRE->getQualifier()) {
@@ -7449,10 +7651,15 @@ static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
// C99 6.5.3.2p1
// The operand must be either an l-value or a function designator
if (!op->getType()->isFunctionType()) {
- // FIXME: emit more specific diag...
- S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
- << op->getSourceRange();
- return QualType();
+ // Use a special diagnostic for loads from property references.
+ if (isa<PseudoObjectExpr>(op)) {
+ AddressOfError = AO_Property_Expansion;
+ } else {
+ // FIXME: emit more specific diag...
+ S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << op->getSourceRange();
+ return QualType();
+ }
}
} else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1
// The operand cannot be a bit-field
@@ -7460,9 +7667,6 @@ static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
} else if (op->getObjectKind() == OK_VectorComponent) {
// The operand cannot be an element of a vector
AddressOfError = AO_Vector_Element;
- } else if (op->getObjectKind() == OK_ObjCProperty) {
- // cannot take address of a property expression.
- AddressOfError = AO_Property_Expansion;
} else if (dcl) { // C99 6.5.3.2p1
// We have an lvalue with a decl. Make sure the decl is not declared
// with the register storage-class specifier.
@@ -7470,7 +7674,7 @@ static QualType CheckAddressOfOperand(Sema &S, Expr *OrigOp,
// in C++ it is not error to take address of a register
// variable (c++03 7.1.1P3)
if (vd->getStorageClass() == SC_Register &&
- !S.getLangOptions().CPlusPlus) {
+ !S.getLangOpts().CPlusPlus) {
AddressOfError = AO_Register_Variable;
}
} else if (isa<FunctionTemplateDecl>(dcl)) {
@@ -7562,7 +7766,7 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
VK = VK_LValue;
// ...except that certain expressions are never l-values in C.
- if (!S.getLangOptions().CPlusPlus && Result.isCForbiddenLValueType())
+ if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType())
VK = VK_RValue;
return Result;
@@ -7669,6 +7873,25 @@ static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr) {
+ if (getLangOpts().CPlusPlus0x && isa<InitListExpr>(RHSExpr)) {
+ // The syntax only allows initializer lists on the RHS of assignment,
+ // so we don't need to worry about accepting invalid code for
+ // non-assignment operators.
+ // C++11 5.17p9:
+ // The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning
+ // of x = {} is x = T().
+ InitializationKind Kind =
+ InitializationKind::CreateDirectList(RHSExpr->getLocStart());
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(LHSExpr->getType());
+ InitializationSequence InitSeq(*this, Entity, Kind, &RHSExpr, 1);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(&RHSExpr, 1));
+ if (Init.isInvalid())
+ return Init;
+ RHSExpr = Init.take();
+ }
+
ExprResult LHS = Owned(LHSExpr), RHS = Owned(RHSExpr);
QualType ResultTy; // Result type of the binary operator.
// The following two variables are used for compound assignment operators
@@ -7677,27 +7900,10 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
- // Check if a 'foo<int>' involved in a binary op, identifies a single
- // function unambiguously (i.e. an lvalue ala 13.4)
- // But since an assignment can trigger target based overload, exclude it in
- // our blind search. i.e:
- // template<class T> void f(); template<class T, class U> void f(U);
- // f<int> == 0; // resolve f<int> blindly
- // void (*p)(int); p = f<int>; // resolve f<int> using target
- if (Opc != BO_Assign) {
- ExprResult resolvedLHS = CheckPlaceholderExpr(LHS.get());
- if (!resolvedLHS.isUsable()) return ExprError();
- LHS = move(resolvedLHS);
-
- ExprResult resolvedRHS = CheckPlaceholderExpr(RHS.get());
- if (!resolvedRHS.isUsable()) return ExprError();
- RHS = move(resolvedRHS);
- }
-
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
LHS.get()->getObjectKind() != OK_ObjCProperty) {
VK = LHS.get()->getValueKind();
OK = LHS.get()->getObjectKind();
@@ -7719,7 +7925,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc);
break;
case BO_Add:
- ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc);
+ ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_Sub:
ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc);
@@ -7762,7 +7968,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
break;
case BO_AddAssign:
- CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, &CompLHSTy);
+ CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
break;
@@ -7788,7 +7994,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_Comma:
ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc);
- if (getLangOptions().CPlusPlus && !RHS.isInvalid()) {
+ if (getLangOpts().CPlusPlus && !RHS.isInvalid()) {
VK = RHS.get()->getValueKind();
OK = RHS.get()->getObjectKind();
}
@@ -7804,7 +8010,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
if (CompResultTy.isNull())
return Owned(new (Context) BinaryOperator(LHS.take(), RHS.take(), Opc,
ResultTy, VK, OK, OpLoc));
- if (getLangOptions().CPlusPlus && LHS.get()->getObjectKind() !=
+ if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() !=
OK_ObjCProperty) {
VK = VK_LValue;
OK = LHS.get()->getObjectKind();
@@ -7989,38 +8195,98 @@ ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr);
}
+/// Build an overloaded binary operator expression in the given scope.
+static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHS, Expr *RHS) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp
+ = BinaryOperator::getOverloadedOperator(Opc);
+ if (Sc && OverOp != OO_None)
+ S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(),
+ RHS->getType(), Functions);
+
+ // Build the (potentially-overloaded, potentially-dependent)
+ // binary operation.
+ return S.CreateOverloadedBinOp(OpLoc, Opc, Functions, LHS, RHS);
+}
+
ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr) {
- if (getLangOptions().CPlusPlus) {
- bool UseBuiltinOperator;
-
- if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent()) {
- UseBuiltinOperator = false;
- } else if (Opc == BO_Assign &&
- LHSExpr->getObjectKind() == OK_ObjCProperty) {
- UseBuiltinOperator = true;
- } else {
- UseBuiltinOperator = !LHSExpr->getType()->isOverloadableType() &&
- !RHSExpr->getType()->isOverloadableType();
+ // We want to end up calling one of checkPseudoObjectAssignment
+ // (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
+ // both expressions are overloadable or either is type-dependent),
+ // or CreateBuiltinBinOp (in any other case). We also want to get
+ // any placeholder types out of the way.
+
+ // Handle pseudo-objects in the LHS.
+ if (const BuiltinType *pty = LHSExpr->getType()->getAsPlaceholderType()) {
+ // Assignments with a pseudo-object l-value need special analysis.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ BinaryOperator::isAssignmentOp(Opc))
+ return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload) {
+ // We can't actually test that if we still have a placeholder,
+ // though. Fortunately, none of the exceptions we see in that
+ // code below are valid when the LHS is an overload set. Note
+ // that an overload set can be dependently-typed, but it never
+ // instantiates to having an overloadable type.
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (resolvedRHS.isInvalid()) return ExprError();
+ RHSExpr = resolvedRHS.take();
+
+ if (RHSExpr->isTypeDependent() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
}
+
+ ExprResult LHS = CheckPlaceholderExpr(LHSExpr);
+ if (LHS.isInvalid()) return ExprError();
+ LHSExpr = LHS.take();
+ }
+
+ // Handle pseudo-objects in the RHS.
+ if (const BuiltinType *pty = RHSExpr->getType()->getAsPlaceholderType()) {
+ // An overload in the RHS can potentially be resolved by the type
+ // being assigned to.
+ if (Opc == BO_Assign && pty->getKind() == BuiltinType::Overload) {
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
- if (!UseBuiltinOperator) {
- // Find all of the overloaded operators visible from this
- // point. We perform both an operator-name lookup from the local
- // scope and an argument-dependent lookup based on the types of
- // the arguments.
- UnresolvedSet<16> Functions;
- OverloadedOperatorKind OverOp
- = BinaryOperator::getOverloadedOperator(Opc);
- if (S && OverOp != OO_None)
- LookupOverloadedOperatorName(OverOp, S, LHSExpr->getType(),
- RHSExpr->getType(), Functions);
+ if (LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
- // Build the (potentially-overloaded, potentially-dependent)
- // binary operation.
- return CreateOverloadedBinOp(OpLoc, Opc, Functions, LHSExpr, RHSExpr);
+ return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
}
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload &&
+ LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (!resolvedRHS.isUsable()) return ExprError();
+ RHSExpr = resolvedRHS.take();
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // If either expression is type-dependent, always build an
+ // overloaded op.
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Otherwise, build an overloaded op if either expression has an
+ // overloadable type.
+ if (LHSExpr->getType()->isOverloadableType() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
}
// Build a built-in binary operation.
@@ -8046,12 +8312,9 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
Opc == UO_PreDec);
break;
case UO_AddrOf:
- resultType = CheckAddressOfOperand(*this, Input.get(), OpLoc);
+ resultType = CheckAddressOfOperand(*this, Input, OpLoc);
break;
case UO_Deref: {
- ExprResult resolved = CheckPlaceholderExpr(Input.get());
- if (!resolved.isUsable()) return ExprError();
- Input = move(resolved);
Input = DefaultFunctionArrayLvalueConversion(Input.take());
resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc);
break;
@@ -8066,18 +8329,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
if (resultType->isArithmeticType() || // C99 6.5.3.3p1
resultType->isVectorType())
break;
- else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6-7
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6-7
resultType->isEnumeralType())
break;
- else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
Opc == UO_Plus &&
resultType->isPointerType())
break;
- else if (resultType->isPlaceholderType()) {
- Input = CheckPlaceholderExpr(Input.take());
- if (Input.isInvalid()) return ExprError();
- return CreateBuiltinUnaryOp(OpLoc, Opc, Input.take());
- }
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
@@ -8095,11 +8353,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
<< resultType << Input.get()->getSourceRange();
else if (resultType->hasIntegerRepresentation())
break;
- else if (resultType->isPlaceholderType()) {
- Input = CheckPlaceholderExpr(Input.take());
- if (Input.isInvalid()) return ExprError();
- return CreateBuiltinUnaryOp(OpLoc, Opc, Input.take());
- } else {
+ else {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
}
@@ -8121,16 +8375,16 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
if (resultType->isScalarType()) {
// C99 6.5.3.3p1: ok, fallthrough;
- if (Context.getLangOptions().CPlusPlus) {
+ if (Context.getLangOpts().CPlusPlus) {
// C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
// operand contextually converted to bool.
Input = ImpCastExprToType(Input.take(), Context.BoolTy,
ScalarTypeToBooleanCastKind(resultType));
}
- } else if (resultType->isPlaceholderType()) {
- Input = CheckPlaceholderExpr(Input.take());
- if (Input.isInvalid()) return ExprError();
- return CreateBuiltinUnaryOp(OpLoc, Opc, Input.take());
+ } else if (resultType->isExtVectorType()) {
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
} else {
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
@@ -8143,11 +8397,17 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
case UO_Real:
case UO_Imag:
resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
- // _Real and _Imag map ordinary l-values into ordinary l-values.
+ // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary
+ // complex l-values to ordinary l-values and all other values to r-values.
if (Input.isInvalid()) return ExprError();
- if (Input.get()->getValueKind() != VK_RValue &&
- Input.get()->getObjectKind() == OK_Ordinary)
- VK = Input.get()->getValueKind();
+ if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
+ if (Input.get()->getValueKind() != VK_RValue &&
+ Input.get()->getObjectKind() == OK_Ordinary)
+ VK = Input.get()->getValueKind();
+ } else if (!getLangOpts().CPlusPlus) {
+ // In C, a volatile scalar is read by __imag. In C++, it is not.
+ Input = DefaultLvalueConversion(Input.take());
+ }
break;
case UO_Extension:
resultType = Input.get()->getType();
@@ -8169,10 +8429,79 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
VK, OK, OpLoc));
}
+/// \brief Determine whether the given expression is a qualified member
+/// access expression, of a form that could be turned into a pointer to member
+/// with the address-of operator.
+static bool isQualifiedMemberAccess(Expr *E) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (!DRE->getQualifier())
+ return false;
+
+ ValueDecl *VD = DRE->getDecl();
+ if (!VD->isCXXClassMember())
+ return false;
+
+ if (isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD))
+ return true;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(VD))
+ return Method->isInstance();
+
+ return false;
+ }
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ if (!ULE->getQualifier())
+ return false;
+
+ for (UnresolvedLookupExpr::decls_iterator D = ULE->decls_begin(),
+ DEnd = ULE->decls_end();
+ D != DEnd; ++D) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*D)) {
+ if (Method->isInstance())
+ return true;
+ } else {
+ // Overload set does not contain methods.
+ break;
+ }
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input) {
- if (getLangOptions().CPlusPlus && Input->getType()->isOverloadableType() &&
- UnaryOperator::getOverloadedOperator(Opc) != OO_None) {
+ // First things first: handle placeholders so that the
+ // overloaded-operator check considers the right type.
+ if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) {
+ // Increment and decrement of pseudo-object references.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ UnaryOperator::isIncrementDecrementOp(Opc))
+ return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+
+ // extension is always a builtin operator.
+ if (Opc == UO_Extension)
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // & gets special logic for several kinds of placeholder.
+ // The builtin code knows what to do.
+ if (Opc == UO_AddrOf &&
+ (pty->getKind() == BuiltinType::Overload ||
+ pty->getKind() == BuiltinType::UnknownAny ||
+ pty->getKind() == BuiltinType::BoundMember))
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // Anything else needs to be handled now.
+ ExprResult Result = CheckPlaceholderExpr(Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.take();
+ }
+
+ if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() &&
+ UnaryOperator::getOverloadedOperator(Opc) != OO_None &&
+ !(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) {
// Find all of the overloaded operators visible from this
// point. We perform both an operator-name lookup from the local
// scope and an argument-dependent lookup based on the types of
@@ -8227,12 +8556,29 @@ static Expr *maybeRebuildARCConsumingStmt(Stmt *Statement) {
return cleanups;
}
+void Sema::ActOnStartStmtExpr() {
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+}
+
+void Sema::ActOnStmtExprError() {
+ // Note that function is also called by TreeTransform when leaving a
+ // StmtExpr scope without rebuilding anything.
+
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+}
+
ExprResult
Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc) { // "({..})"
assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!");
CompoundStmt *Compound = cast<CompoundStmt>(SubStmt);
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within StmtExpr not correctly bound!");
+ PopExpressionEvaluationContext();
+
bool isFileScope
= (getCurFunctionOrMethodDecl() == 0) && (getCurBlock() == 0);
if (isFileScope)
@@ -8353,15 +8699,19 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
} else
CurrentType = Context.DependentTy;
+ ExprResult IdxRval = DefaultLvalueConversion(static_cast<Expr*>(OC.U.E));
+ if (IdxRval.isInvalid())
+ return ExprError();
+ Expr *Idx = IdxRval.take();
+
// The expression must be an integral expression.
// FIXME: An integral constant expression?
- Expr *Idx = static_cast<Expr*>(OC.U.E);
if (!Idx->isTypeDependent() && !Idx->isValueDependent() &&
!Idx->getType()->isIntegerType())
return ExprError(Diag(Idx->getLocStart(),
diag::err_typecheck_subscript_not_integer)
<< Idx->getSourceRange());
-
+
// Record this array index.
Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd));
Exprs.push_back(Idx);
@@ -8500,11 +8850,11 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
} else {
// The conditional expression is required to be a constant expression.
llvm::APSInt condEval(32);
- SourceLocation ExpLoc;
- if (!CondExpr->isIntegerConstantExpr(condEval, Context, &ExpLoc))
- return ExprError(Diag(ExpLoc,
- diag::err_typecheck_choose_expr_requires_constant)
- << CondExpr->getSourceRange());
+ ExprResult CondICE = VerifyIntegerConstantExpression(CondExpr, &condEval,
+ PDiag(diag::err_typecheck_choose_expr_requires_constant), false);
+ if (CondICE.isInvalid())
+ return ExprError();
+ CondExpr = CondICE.take();
// If the condition is > zero, then the AST type is the same as the LSHExpr.
Expr *ActiveExpr = condEval.getZExtValue() ? LHSExpr : RHSExpr;
@@ -8534,6 +8884,12 @@ void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
PushDeclContext(CurScope, Block);
else
CurContext = Block;
+
+ getCurBlock()->HasImplicitReturnType = true;
+
+ // Enter a new evaluation context to insulate the block from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
}
void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
@@ -8585,7 +8941,7 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
// Don't allow returning a objc interface by value.
if (RetTy->isObjCObjectType()) {
- Diag(ParamInfo.getSourceRange().getBegin(),
+ Diag(ParamInfo.getLocStart(),
diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
return;
}
@@ -8594,8 +8950,11 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
// return type. TODO: what should we do with declarators like:
// ^ * { ... }
// If the answer is "apply template argument deduction"....
- if (RetTy != Context.DependentTy)
+ if (RetTy != Context.DependentTy) {
CurBlock->ReturnType = RetTy;
+ CurBlock->TheDecl->setBlockMissingReturnType(false);
+ CurBlock->HasImplicitReturnType = false;
+ }
// Push block parameters from the declarator if we had them.
SmallVector<ParmVarDecl*, 8> Params;
@@ -8605,7 +8964,7 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
if (Param->getIdentifier() == 0 &&
!Param->isImplicit() &&
!Param->isInvalidDecl() &&
- !getLangOptions().CPlusPlus)
+ !getLangOpts().CPlusPlus)
Diag(Param->getLocation(), diag::err_parameter_name_omitted);
Params.push_back(Param);
}
@@ -8617,7 +8976,7 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
I = Fn->arg_type_begin(), E = Fn->arg_type_end(); I != E; ++I) {
ParmVarDecl *Param =
BuildParmVarDeclForTypedef(CurBlock->TheDecl,
- ParamInfo.getSourceRange().getBegin(),
+ ParamInfo.getLocStart(),
*I);
Params.push_back(Param);
}
@@ -8634,12 +8993,6 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
// Finally we can process decl attributes.
ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
- if (!isVariadic && CurBlock->TheDecl->getAttr<SentinelAttr>()) {
- Diag(ParamInfo.getAttributes()->getLoc(),
- diag::warn_attribute_sentinel_not_variadic) << 1;
- // FIXME: remove the attribute.
- }
-
// Put the parameter variables in scope. We can bail out immediately
// if we don't have any.
if (Params.empty())
@@ -8661,9 +9014,13 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
// Pop off CurBlock, handle nested blocks.
PopDeclContext();
- PopFunctionOrBlockScope();
+ PopFunctionScopeInfo();
}
/// ActOnBlockStmtExpr - This is called when the body of a block statement
@@ -8674,6 +9031,12 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (!LangOpts.Blocks)
Diag(CaretLoc, diag::err_blocks_disable);
+ // Leave the expression-evaluation context.
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within block not correctly bound!");
+ PopExpressionEvaluationContext();
+
BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
PopDeclContext();
@@ -8686,8 +9049,18 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
QualType BlockTy;
// Set the captured variables on the block.
- BSI->TheDecl->setCaptures(Context, BSI->Captures.begin(), BSI->Captures.end(),
- BSI->CapturesCXXThis);
+ // FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
+ SmallVector<BlockDecl::Capture, 4> Captures;
+ for (unsigned i = 0, e = BSI->Captures.size(); i != e; i++) {
+ CapturingScopeInfo::Capture &Cap = BSI->Captures[i];
+ if (Cap.isThisCapture())
+ continue;
+ BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
+ Cap.isNested(), Cap.getCopyExpr());
+ Captures.push_back(NewCap);
+ }
+ BSI->TheDecl->setCaptures(Context, Captures.begin(), Captures.end(),
+ BSI->CXXThisCaptureIndex != 0);
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
@@ -8738,20 +9111,31 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
- for (BlockDecl::capture_const_iterator ci = BSI->TheDecl->capture_begin(),
- ce = BSI->TheDecl->capture_end(); ci != ce; ++ci) {
- const VarDecl *variable = ci->getVariable();
- QualType T = variable->getType();
- QualType::DestructionKind destructKind = T.isDestructedType();
- if (destructKind != QualType::DK_none)
- getCurFunction()->setHasBranchProtectedScope();
- }
-
computeNRVO(Body, getCurBlock());
BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
const AnalysisBasedWarnings::Policy &WP = AnalysisWarnings.getDefaultPolicy();
- PopFunctionOrBlockScope(&WP, Result->getBlockDecl(), Result);
+ PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
+
+ // If the block isn't obviously global, i.e. it captures anything at
+ // all, then we need to do a few things in the surrounding context:
+ if (Result->getBlockDecl()->hasCaptures()) {
+ // First, this expression has a new cleanup object.
+ ExprCleanupObjects.push_back(Result->getBlockDecl());
+ ExprNeedsCleanups = true;
+
+ // It also gets a branch-protected scope if any of the captured
+ // variables needs destruction.
+ for (BlockDecl::capture_const_iterator
+ ci = Result->getBlockDecl()->capture_begin(),
+ ce = Result->getBlockDecl()->capture_end(); ci != ce; ++ci) {
+ const VarDecl *var = ci->getVariable();
+ if (var->getType().isDestructedType() != QualType::DK_none) {
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+ }
return Owned(Result);
}
@@ -8859,7 +9243,7 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
static void MakeObjCStringLiteralFixItHint(Sema& SemaRef, QualType DstType,
Expr *SrcExpr, FixItHint &Hint) {
- if (!SemaRef.getLangOptions().ObjC1)
+ if (!SemaRef.getLangOpts().ObjC1)
return;
const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
@@ -8874,8 +9258,15 @@ static void MakeObjCStringLiteralFixItHint(Sema& SemaRef, QualType DstType,
return;
}
- // Strip off any parens and casts.
- StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr->IgnoreParenCasts());
+ // Ignore any parens, implicit casts (should only be
+ // array-to-pointer decays), and not-so-opaque values. The last is
+ // important for making this trigger for property assignments.
+ SrcExpr = SrcExpr->IgnoreParenImpCasts();
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
+ if (OV->getSourceExpr())
+ SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
+
+ StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
if (!SL || !SL->isAscii())
return;
@@ -8893,13 +9284,13 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
// Decode the result (notice that AST's are still created for extensions).
bool CheckInferredResultType = false;
bool isInvalid = false;
- unsigned DiagKind;
+ unsigned DiagKind = 0;
FixItHint Hint;
ConversionFixItGenerator ConvHints;
bool MayHaveConvFixit = false;
+ bool MayHaveFunctionDiff = false;
switch (ConvTy) {
- default: llvm_unreachable("Unknown conversion type");
case Compatible: return false;
case PointerToInt:
DiagKind = diag::ext_typecheck_convert_pointer_int;
@@ -8956,7 +9347,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
// expression, rather than a type), which should be done as part
// of a larger effort to fix checkPointerTypesForAssignment for
// C++ semantics.
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
return false;
DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
@@ -8986,6 +9377,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
isInvalid = true;
+ MayHaveFunctionDiff = true;
break;
}
@@ -9015,17 +9407,23 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
// If we can fix the conversion, suggest the FixIts.
assert(ConvHints.isNull() || Hint.isNull());
if (!ConvHints.isNull()) {
- for (llvm::SmallVector<FixItHint, 1>::iterator
- HI = ConvHints.Hints.begin(), HE = ConvHints.Hints.end();
- HI != HE; ++HI)
+ for (std::vector<FixItHint>::iterator HI = ConvHints.Hints.begin(),
+ HE = ConvHints.Hints.end(); HI != HE; ++HI)
FDiag << *HI;
} else {
FDiag << Hint;
}
if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); }
+ if (MayHaveFunctionDiff)
+ HandleFunctionTypeMismatch(FDiag, SecondType, FirstType);
+
Diag(Loc, FDiag);
+ if (SecondType == Context.OverloadTy)
+ NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression,
+ FirstType);
+
if (CheckInferredResultType)
EmitRelatedResultTypeNote(SrcExpr);
@@ -9034,77 +9432,213 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
return isInvalid;
}
-bool Sema::VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result){
- llvm::APSInt ICEResult;
- if (E->isIntegerConstantExpr(ICEResult, Context)) {
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
+ llvm::APSInt *Result) {
+ return VerifyIntegerConstantExpression(E, Result,
+ PDiag(diag::err_expr_not_ice) << LangOpts.CPlusPlus);
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic NotIceDiag,
+ bool AllowFold,
+ PartialDiagnostic FoldDiag) {
+ SourceLocation DiagLoc = E->getLocStart();
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [expr.const]p5:
+ // If an expression of literal class type is used in a context where an
+ // integral constant expression is required, then that class type shall
+ // have a single non-explicit conversion function to an integral or
+ // unscoped enumeration type
+ ExprResult Converted;
+ if (NotIceDiag.getDiagID()) {
+ Converted = ConvertToIntegralOrEnumerationType(
+ DiagLoc, E,
+ PDiag(diag::err_ice_not_integral),
+ PDiag(diag::err_ice_incomplete_type),
+ PDiag(diag::err_ice_explicit_conversion),
+ PDiag(diag::note_ice_conversion_here),
+ PDiag(diag::err_ice_ambiguous_conversion),
+ PDiag(diag::note_ice_conversion_here),
+ PDiag(0),
+ /*AllowScopedEnumerations*/ false);
+ } else {
+ // The caller wants to silently enquire whether this is an ICE. Don't
+ // produce any diagnostics if it isn't.
+ Converted = ConvertToIntegralOrEnumerationType(
+ DiagLoc, E, PDiag(), PDiag(), PDiag(), PDiag(),
+ PDiag(), PDiag(), PDiag(), false);
+ }
+ if (Converted.isInvalid())
+ return Converted;
+ E = Converted.take();
+ if (!E->getType()->isIntegralOrUnscopedEnumerationType())
+ return ExprError();
+ } else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
+ // An ICE must be of integral or unscoped enumeration type.
+ if (NotIceDiag.getDiagID())
+ Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ return ExprError();
+ }
+
+ // Circumvent ICE checking in C++11 to avoid evaluating the expression twice
+ // in the non-ICE case.
+ if (!getLangOpts().CPlusPlus0x && E->isIntegerConstantExpr(Context)) {
if (Result)
- *Result = ICEResult;
- return false;
+ *Result = E->EvaluateKnownConstInt(Context);
+ return Owned(E);
}
Expr::EvalResult EvalResult;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+
+ // Try to evaluate the expression, and produce diagnostics explaining why it's
+ // not a constant expression as a side-effect.
+ bool Folded = E->EvaluateAsRValue(EvalResult, Context) &&
+ EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+
+ // In C++11, we can rely on diagnostics being produced for any expression
+ // which is not a constant expression. If no diagnostics were produced, then
+ // this is a constant expression.
+ if (Folded && getLangOpts().CPlusPlus0x && Notes.empty()) {
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return Owned(E);
+ }
- if (!E->Evaluate(EvalResult, Context) || !EvalResult.Val.isInt() ||
- EvalResult.HasSideEffects) {
- Diag(E->getExprLoc(), diag::err_expr_not_ice) << E->getSourceRange();
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
- if (EvalResult.Diag) {
- // We only show the note if it's not the usual "invalid subexpression"
- // or if it's actually in a subexpression.
- if (EvalResult.Diag != diag::note_invalid_subexpr_in_ice ||
- E->IgnoreParens() != EvalResult.DiagExpr->IgnoreParens())
- Diag(EvalResult.DiagLoc, EvalResult.Diag);
+ if (!Folded || !AllowFold) {
+ if (NotIceDiag.getDiagID()) {
+ Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
}
- return true;
+ return ExprError();
}
- Diag(E->getExprLoc(), diag::ext_expr_not_ice) <<
- E->getSourceRange();
-
- if (EvalResult.Diag &&
- Diags.getDiagnosticLevel(diag::ext_expr_not_ice, EvalResult.DiagLoc)
- != DiagnosticsEngine::Ignored)
- Diag(EvalResult.DiagLoc, EvalResult.Diag);
+ if (FoldDiag.getDiagID())
+ Diag(DiagLoc, FoldDiag) << E->getSourceRange();
+ else
+ Diag(DiagLoc, diag::ext_expr_not_ice)
+ << E->getSourceRange() << LangOpts.CPlusPlus;
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
if (Result)
*Result = EvalResult.Val.getInt();
- return false;
+ return Owned(E);
+}
+
+namespace {
+ // Handle the case where we conclude a expression which we speculatively
+ // considered to be unevaluated is actually evaluated.
+ class TransformToPE : public TreeTransform<TransformToPE> {
+ typedef TreeTransform<TransformToPE> BaseTransform;
+
+ public:
+ TransformToPE(Sema &SemaRef) : BaseTransform(SemaRef) { }
+
+ // Make sure we redo semantic analysis
+ bool AlwaysRebuild() { return true; }
+
+ // Make sure we handle LabelStmts correctly.
+ // FIXME: This does the right thing, but maybe we need a more general
+ // fix to TreeTransform?
+ StmtResult TransformLabelStmt(LabelStmt *S) {
+ S->getDecl()->setStmt(0);
+ return BaseTransform::TransformLabelStmt(S);
+ }
+
+ // We need to special-case DeclRefExprs referring to FieldDecls which
+ // are not part of a member pointer formation; normal TreeTransforming
+ // doesn't catch this case because of the way we represent them in the AST.
+ // FIXME: This is a bit ugly; is it really the best way to handle this
+ // case?
+ //
+ // Error on DeclRefExprs referring to FieldDecls.
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ if (isa<FieldDecl>(E->getDecl()) &&
+ SemaRef.ExprEvalContexts.back().Context != Sema::Unevaluated)
+ return SemaRef.Diag(E->getLocation(),
+ diag::err_invalid_non_static_member_use)
+ << E->getDecl() << E->getSourceRange();
+
+ return BaseTransform::TransformDeclRefExpr(E);
+ }
+
+ // Exception: filter out member pointer formation
+ ExprResult TransformUnaryOperator(UnaryOperator *E) {
+ if (E->getOpcode() == UO_AddrOf && E->getType()->isMemberPointerType())
+ return E;
+
+ return BaseTransform::TransformUnaryOperator(E);
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+ExprResult Sema::TranformToPotentiallyEvaluated(Expr *E) {
+ assert(ExprEvalContexts.back().Context == Unevaluated &&
+ "Should only transform unevaluated expressions");
+ ExprEvalContexts.back().Context =
+ ExprEvalContexts[ExprEvalContexts.size()-2].Context;
+ if (ExprEvalContexts.back().Context == Unevaluated)
+ return E;
+ return TransformToPE(*this).TransformExpr(E);
}
void
-Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext) {
+Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl,
+ bool IsDecltype) {
ExprEvalContexts.push_back(
ExpressionEvaluationContextRecord(NewContext,
- ExprTemporaries.size(),
- ExprNeedsCleanups));
+ ExprCleanupObjects.size(),
+ ExprNeedsCleanups,
+ LambdaContextDecl,
+ IsDecltype));
ExprNeedsCleanups = false;
+ if (!MaybeODRUseExprs.empty())
+ std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
}
void Sema::PopExpressionEvaluationContext() {
- // Pop the current expression evaluation context off the stack.
- ExpressionEvaluationContextRecord Rec = ExprEvalContexts.back();
- ExprEvalContexts.pop_back();
-
- if (Rec.Context == PotentiallyPotentiallyEvaluated) {
- if (Rec.PotentiallyReferenced) {
- // Mark any remaining declarations in the current position of the stack
- // as "referenced". If they were not meant to be referenced, semantic
- // analysis would have eliminated them (e.g., in ActOnCXXTypeId).
- for (PotentiallyReferencedDecls::iterator
- I = Rec.PotentiallyReferenced->begin(),
- IEnd = Rec.PotentiallyReferenced->end();
- I != IEnd; ++I)
- MarkDeclarationReferenced(I->first, I->second);
- }
-
- if (Rec.PotentiallyDiagnosed) {
- // Emit any pending diagnostics.
- for (PotentiallyEmittedDiagnostics::iterator
- I = Rec.PotentiallyDiagnosed->begin(),
- IEnd = Rec.PotentiallyDiagnosed->end();
- I != IEnd; ++I)
- Diag(I->first, I->second);
+ ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
+
+ if (!Rec.Lambdas.empty()) {
+ if (Rec.Context == Unevaluated) {
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I)
+ Diag(Rec.Lambdas[I]->getLocStart(),
+ diag::err_lambda_unevaluated_operand);
+ } else {
+ // Mark the capture expressions odr-used. This was deferred
+ // during lambda expression creation.
+ for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I) {
+ LambdaExpr *Lambda = Rec.Lambdas[I];
+ for (LambdaExpr::capture_init_iterator
+ C = Lambda->capture_init_begin(),
+ CEnd = Lambda->capture_init_end();
+ C != CEnd; ++C) {
+ MarkDeclarationsReferencedInExpr(*C);
+ }
+ }
}
}
@@ -9112,89 +9646,86 @@ void Sema::PopExpressionEvaluationContext() {
// temporaries that we may have created as part of the evaluation of
// the expression in that context: they aren't relevant because they
// will never be constructed.
- if (Rec.Context == Unevaluated) {
- ExprTemporaries.erase(ExprTemporaries.begin() + Rec.NumTemporaries,
- ExprTemporaries.end());
+ if (Rec.Context == Unevaluated || Rec.Context == ConstantEvaluated) {
+ ExprCleanupObjects.erase(ExprCleanupObjects.begin() + Rec.NumCleanupObjects,
+ ExprCleanupObjects.end());
ExprNeedsCleanups = Rec.ParentNeedsCleanups;
-
+ CleanupVarDeclMarking();
+ std::swap(MaybeODRUseExprs, Rec.SavedMaybeODRUseExprs);
// Otherwise, merge the contexts together.
} else {
ExprNeedsCleanups |= Rec.ParentNeedsCleanups;
+ MaybeODRUseExprs.insert(Rec.SavedMaybeODRUseExprs.begin(),
+ Rec.SavedMaybeODRUseExprs.end());
}
- // Destroy the popped expression evaluation record.
- Rec.Destroy();
+ // Pop the current expression evaluation context off the stack.
+ ExprEvalContexts.pop_back();
}
void Sema::DiscardCleanupsInEvaluationContext() {
- ExprTemporaries.erase(
- ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
- ExprTemporaries.end());
+ ExprCleanupObjects.erase(
+ ExprCleanupObjects.begin() + ExprEvalContexts.back().NumCleanupObjects,
+ ExprCleanupObjects.end());
ExprNeedsCleanups = false;
+ MaybeODRUseExprs.clear();
}
-/// \brief Note that the given declaration was referenced in the source code.
-///
-/// This routine should be invoke whenever a given declaration is referenced
-/// in the source code, and where that reference occurred. If this declaration
-/// reference means that the the declaration is used (C++ [basic.def.odr]p2,
-/// C99 6.9p3), then the declaration will be marked as used.
-///
-/// \param Loc the location where the declaration was referenced.
-///
-/// \param D the declaration that has been referenced by the source code.
-void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
- assert(D && "No declaration?");
-
- D->setReferenced();
-
- if (D->isUsed(false))
- return;
-
- // Mark a parameter or variable declaration "used", regardless of whether
- // we're in a template or not. The reason for this is that unevaluated
- // expressions (e.g. (void)sizeof()) constitute a use for warning purposes
- // (-Wunused-variables and -Wunused-parameters)
- if (isa<ParmVarDecl>(D) ||
- (isa<VarDecl>(D) && D->getDeclContext()->isFunctionOrMethod())) {
- D->setUsed();
- return;
- }
-
- if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D))
- return;
+ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) {
+ if (!E->getType()->isVariablyModifiedType())
+ return E;
+ return TranformToPotentiallyEvaluated(E);
+}
+static bool IsPotentiallyEvaluatedContext(Sema &SemaRef) {
// Do not mark anything as "used" within a dependent context; wait for
// an instantiation.
- if (CurContext->isDependentContext())
- return;
+ if (SemaRef.CurContext->isDependentContext())
+ return false;
- switch (ExprEvalContexts.back().Context) {
- case Unevaluated:
+ switch (SemaRef.ExprEvalContexts.back().Context) {
+ case Sema::Unevaluated:
// We are in an expression that is not potentially evaluated; do nothing.
- return;
+ // (Depending on how you read the standard, we actually do need to do
+ // something here for null pointer constants, but the standard's
+ // definition of a null pointer constant is completely crazy.)
+ return false;
- case PotentiallyEvaluated:
- // We are in a potentially-evaluated expression, so this declaration is
- // "used"; handle this below.
- break;
+ case Sema::ConstantEvaluated:
+ case Sema::PotentiallyEvaluated:
+ // We are in a potentially evaluated expression (or a constant-expression
+ // in C++03); we need to do implicit template instantiation, implicitly
+ // define class members, and mark most declarations as used.
+ return true;
- case PotentiallyPotentiallyEvaluated:
- // We are in an expression that may be potentially evaluated; queue this
- // declaration reference until we know whether the expression is
- // potentially evaluated.
- ExprEvalContexts.back().addReferencedDecl(Loc, D);
- return;
-
- case PotentiallyEvaluatedIfUsed:
+ case Sema::PotentiallyEvaluatedIfUsed:
// Referenced declarations will only be used if the construct in the
// containing expression is used.
- return;
+ return false;
}
+ llvm_unreachable("Invalid context");
+}
+
+/// \brief Mark a function referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3)
+void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
+ assert(Func && "No function?");
+
+ Func->setReferenced();
+
+ // Don't mark this function as used multiple times, unless it's a constexpr
+ // function which we need to instantiate.
+ if (Func->isUsed(false) &&
+ !(Func->isConstexpr() && !Func->getBody() &&
+ Func->isImplicitlyInstantiable()))
+ return;
+
+ if (!IsPotentiallyEvaluatedContext(*this))
+ return;
// Note that this declaration has been used.
- if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
- if (Constructor->isDefaulted()) {
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
if (Constructor->isDefaultConstructor()) {
if (Constructor->isTrivial())
return;
@@ -9210,13 +9741,16 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
}
MarkVTableUsed(Loc, Constructor->getParent());
- } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
- if (Destructor->isDefaulted() && !Destructor->isUsed(false))
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(Func)) {
+ if (Destructor->isDefaulted() && !Destructor->isDeleted() &&
+ !Destructor->isUsed(false))
DefineImplicitDestructor(Loc, Destructor);
if (Destructor->isVirtual())
MarkVTableUsed(Loc, Destructor->getParent());
- } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D)) {
- if (MethodDecl->isDefaulted() && MethodDecl->isOverloadedOperator() &&
+ } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
+ if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted() &&
+ MethodDecl->isOverloadedOperator() &&
MethodDecl->getOverloadedOperator() == OO_Equal) {
if (!MethodDecl->isUsed(false)) {
if (MethodDecl->isCopyAssignmentOperator())
@@ -9224,90 +9758,703 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
else
DefineImplicitMoveAssignment(Loc, MethodDecl);
}
+ } else if (isa<CXXConversionDecl>(MethodDecl) &&
+ MethodDecl->getParent()->isLambda()) {
+ CXXConversionDecl *Conversion = cast<CXXConversionDecl>(MethodDecl);
+ if (Conversion->isLambdaToBlockPointerConversion())
+ DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
+ else
+ DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion);
} else if (MethodDecl->isVirtual())
MarkVTableUsed(Loc, MethodDecl->getParent());
}
- if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
- // Recursive functions should be marked when used from another function.
- if (CurContext == Function) return;
-
- // Implicit instantiation of function templates and member functions of
- // class templates.
- if (Function->isImplicitlyInstantiable()) {
- bool AlreadyInstantiated = false;
- if (FunctionTemplateSpecializationInfo *SpecInfo
- = Function->getTemplateSpecializationInfo()) {
- if (SpecInfo->getPointOfInstantiation().isInvalid())
- SpecInfo->setPointOfInstantiation(Loc);
- else if (SpecInfo->getTemplateSpecializationKind()
- == TSK_ImplicitInstantiation)
- AlreadyInstantiated = true;
- } else if (MemberSpecializationInfo *MSInfo
- = Function->getMemberSpecializationInfo()) {
- if (MSInfo->getPointOfInstantiation().isInvalid())
- MSInfo->setPointOfInstantiation(Loc);
- else if (MSInfo->getTemplateSpecializationKind()
- == TSK_ImplicitInstantiation)
- AlreadyInstantiated = true;
+
+ // Recursive functions should be marked when used from another function.
+ // FIXME: Is this really right?
+ if (CurContext == Func) return;
+
+ // Implicit instantiation of function templates and member functions of
+ // class templates.
+ if (Func->isImplicitlyInstantiable()) {
+ bool AlreadyInstantiated = false;
+ SourceLocation PointOfInstantiation = Loc;
+ if (FunctionTemplateSpecializationInfo *SpecInfo
+ = Func->getTemplateSpecializationInfo()) {
+ if (SpecInfo->getPointOfInstantiation().isInvalid())
+ SpecInfo->setPointOfInstantiation(Loc);
+ else if (SpecInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = SpecInfo->getPointOfInstantiation();
+ }
+ } else if (MemberSpecializationInfo *MSInfo
+ = Func->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(Loc);
+ else if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = MSInfo->getPointOfInstantiation();
}
+ }
- if (!AlreadyInstantiated) {
- if (isa<CXXRecordDecl>(Function->getDeclContext()) &&
- cast<CXXRecordDecl>(Function->getDeclContext())->isLocalClass())
- PendingLocalImplicitInstantiations.push_back(std::make_pair(Function,
- Loc));
+ if (!AlreadyInstantiated || Func->isConstexpr()) {
+ if (isa<CXXRecordDecl>(Func->getDeclContext()) &&
+ cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass())
+ PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Func, PointOfInstantiation));
+ else if (Func->isConstexpr())
+ // Do not defer instantiations of constexpr functions, to avoid the
+ // expression evaluator needing to call back into Sema if it sees a
+ // call to such a function.
+ InstantiateFunctionDefinition(PointOfInstantiation, Func);
+ else {
+ PendingInstantiations.push_back(std::make_pair(Func,
+ PointOfInstantiation));
+ // Notify the consumer that a function was implicitly instantiated.
+ Consumer.HandleCXXImplicitFunctionInstantiation(Func);
+ }
+ }
+ } else {
+ // Walk redefinitions, as some of them may be instantiable.
+ for (FunctionDecl::redecl_iterator i(Func->redecls_begin()),
+ e(Func->redecls_end()); i != e; ++i) {
+ if (!i->isUsed(false) && i->isImplicitlyInstantiable())
+ MarkFunctionReferenced(Loc, *i);
+ }
+ }
+
+ // Keep track of used but undefined functions.
+ if (!Func->isPure() && !Func->hasBody() &&
+ Func->getLinkage() != ExternalLinkage) {
+ SourceLocation &old = UndefinedInternals[Func->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
+ }
+
+ Func->setUsed(true);
+}
+
+static void
+diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
+ VarDecl *var, DeclContext *DC) {
+ DeclContext *VarDC = var->getDeclContext();
+
+ // If the parameter still belongs to the translation unit, then
+ // we're actually just using one parameter in the declaration of
+ // the next.
+ if (isa<ParmVarDecl>(var) &&
+ isa<TranslationUnitDecl>(VarDC))
+ return;
+
+ // For C code, don't diagnose about capture if we're not actually in code
+ // right now; it's impossible to write a non-constant expression outside of
+ // function context, so we'll get other (more useful) diagnostics later.
+ //
+ // For C++, things get a bit more nasty... it would be nice to suppress this
+ // diagnostic for certain cases like using a local variable in an array bound
+ // for a member of a local class, but the correct predicate is not obvious.
+ if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod())
+ return;
+
+ if (isa<CXXMethodDecl>(VarDC) &&
+ cast<CXXRecordDecl>(VarDC->getParent())->isLambda()) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_lambda)
+ << var->getIdentifier();
+ } else if (FunctionDecl *fn = dyn_cast<FunctionDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
+ << var->getIdentifier() << fn->getDeclName();
+ } else if (isa<BlockDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_block)
+ << var->getIdentifier();
+ } else {
+ // FIXME: Is there any other context where a local variable can be
+ // declared?
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_context)
+ << var->getIdentifier();
+ }
+
+ S.Diag(var->getLocation(), diag::note_local_variable_declared_here)
+ << var->getIdentifier();
+
+ // FIXME: Add additional diagnostic info about class etc. which prevents
+ // capture.
+}
+
+/// \brief Capture the given variable in the given lambda expression.
+static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI,
+ VarDecl *Var, QualType FieldType,
+ QualType DeclRefType,
+ SourceLocation Loc) {
+ CXXRecordDecl *Lambda = LSI->Lambda;
+
+ // Build the non-static data member.
+ FieldDecl *Field
+ = FieldDecl::Create(S.Context, Lambda, Loc, Loc, 0, FieldType,
+ S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
+ 0, false, false);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Lambda->addDecl(Field);
+
+ // C++11 [expr.prim.lambda]p21:
+ // When the lambda-expression is evaluated, the entities that
+ // are captured by copy are used to direct-initialize each
+ // corresponding non-static data member of the resulting closure
+ // object. (For array members, the array elements are
+ // direct-initialized in increasing subscript order.) These
+ // initializations are performed in the (unspecified) order in
+ // which the non-static data members are declared.
+
+ // Introduce a new evaluation context for the initialization, so
+ // that temporaries introduced as part of the capture are retained
+ // to be re-"exported" from the lambda expression itself.
+ S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ // C++ [expr.prim.labda]p12:
+ // An entity captured by a lambda-expression is odr-used (3.2) in
+ // the scope containing the lambda-expression.
+ Expr *Ref = new (S.Context) DeclRefExpr(Var, false, DeclRefType,
+ VK_LValue, Loc);
+ Var->setReferenced(true);
+ Var->setUsed(true);
+
+ // When the field has array type, create index variables for each
+ // dimension of the array. We use these index variables to subscript
+ // the source array, and other clients (e.g., CodeGen) will perform
+ // the necessary iteration with these index variables.
+ SmallVector<VarDecl *, 4> IndexVariables;
+ QualType BaseType = FieldType;
+ QualType SizeType = S.Context.getSizeType();
+ LSI->ArrayIndexStarts.push_back(LSI->ArrayIndexVars.size());
+ while (const ConstantArrayType *Array
+ = S.Context.getAsConstantArrayType(BaseType)) {
+ // Create the iteration variable for this array index.
+ IdentifierInfo *IterationVarName = 0;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << IndexVariables.size();
+ IterationVarName = &S.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar
+ = VarDecl::Create(S.Context, S.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ S.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None, SC_None);
+ IndexVariables.push_back(IterationVar);
+ LSI->ArrayIndexVars.push_back(IterationVar);
+
+ // Create a reference to the iteration variable.
+ ExprResult IterationVarRef
+ = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
+ assert(!IterationVarRef.isInvalid() &&
+ "Reference to invented variable cannot fail!");
+ IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.take());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
+
+ // Subscript the array with this iteration variable.
+ ExprResult Subscript = S.CreateBuiltinArraySubscriptExpr(
+ Ref, Loc, IterationVarRef.take(), Loc);
+ if (Subscript.isInvalid()) {
+ S.CleanupVarDeclMarking();
+ S.DiscardCleanupsInEvaluationContext();
+ S.PopExpressionEvaluationContext();
+ return ExprError();
+ }
+
+ Ref = Subscript.take();
+ BaseType = Array->getElementType();
+ }
+
+ // Construct the entity that we will be initializing. For an array, this
+ // will be first element in the array, which may require several levels
+ // of array-subscript entities.
+ SmallVector<InitializedEntity, 4> Entities;
+ Entities.reserve(1 + IndexVariables.size());
+ Entities.push_back(
+ InitializedEntity::InitializeLambdaCapture(Var, Field, Loc));
+ for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
+ Entities.push_back(InitializedEntity::InitializeElement(S.Context,
+ 0,
+ Entities.back()));
+
+ InitializationKind InitKind
+ = InitializationKind::CreateDirect(Loc, Loc, Loc);
+ InitializationSequence Init(S, Entities.back(), InitKind, &Ref, 1);
+ ExprResult Result(true);
+ if (!Init.Diagnose(S, Entities.back(), InitKind, &Ref, 1))
+ Result = Init.Perform(S, Entities.back(), InitKind,
+ MultiExprArg(S, &Ref, 1));
+
+ // If this initialization requires any cleanups (e.g., due to a
+ // default argument to a copy constructor), note that for the
+ // lambda.
+ if (S.ExprNeedsCleanups)
+ LSI->ExprNeedsCleanups = true;
+
+ // Exit the expression evaluation context used for the capture.
+ S.CleanupVarDeclMarking();
+ S.DiscardCleanupsInEvaluationContext();
+ S.PopExpressionEvaluationContext();
+ return Result;
+}
+
+bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc,
+ bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType) {
+ bool Nested = false;
+
+ DeclContext *DC = CurContext;
+ if (Var->getDeclContext() == DC) return true;
+ if (!Var->hasLocalStorage()) return true;
+
+ bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
+
+ // Walk up the stack to determine whether we can capture the variable,
+ // performing the "simple" checks that don't depend on type. We stop when
+ // we've either hit the declared scope of the variable or find an existing
+ // capture of that variable.
+ CaptureType = Var->getType();
+ DeclRefType = CaptureType.getNonReferenceType();
+ bool Explicit = (Kind != TryCapture_Implicit);
+ unsigned FunctionScopesIndex = FunctionScopes.size() - 1;
+ do {
+ // Only block literals and lambda expressions can capture; other
+ // scopes don't work.
+ DeclContext *ParentDC;
+ if (isa<BlockDecl>(DC))
+ ParentDC = DC->getParent();
+ else if (isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
+ cast<CXXRecordDecl>(DC->getParent())->isLambda())
+ ParentDC = DC->getParent()->getParent();
+ else {
+ if (BuildAndDiagnose)
+ diagnoseUncapturableValueReference(*this, Loc, Var, DC);
+ return true;
+ }
+
+ CapturingScopeInfo *CSI =
+ cast<CapturingScopeInfo>(FunctionScopes[FunctionScopesIndex]);
+
+ // Check whether we've already captured it.
+ if (CSI->CaptureMap.count(Var)) {
+ // If we found a capture, any subcaptures are nested.
+ Nested = true;
+
+ // Retrieve the capture type for this variable.
+ CaptureType = CSI->getCapture(Var).getCaptureType();
+
+ // Compute the type of an expression that refers to this variable.
+ DeclRefType = CaptureType.getNonReferenceType();
+
+ const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var);
+ if (Cap.isCopyCapture() &&
+ !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable))
+ DeclRefType.addConst();
+ break;
+ }
+
+ bool IsBlock = isa<BlockScopeInfo>(CSI);
+ bool IsLambda = !IsBlock;
+
+ // Lambdas are not allowed to capture unnamed variables
+ // (e.g. anonymous unions).
+ // FIXME: The C++11 rule don't actually state this explicitly, but I'm
+ // assuming that's the intent.
+ if (IsLambda && !Var->getDeclName()) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_capture_anonymous_var);
+ Diag(Var->getLocation(), diag::note_declared_at);
+ }
+ return true;
+ }
+
+ // Prohibit variably-modified types; they're difficult to deal with.
+ if (Var->getType()->isVariablyModifiedType()) {
+ if (BuildAndDiagnose) {
+ if (IsBlock)
+ Diag(Loc, diag::err_ref_vm_type);
else
- PendingInstantiations.push_back(std::make_pair(Function, Loc));
+ Diag(Loc, diag::err_lambda_capture_vm_type) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
}
+ return true;
+ }
+
+ // Lambdas are not allowed to capture __block variables; they don't
+ // support the expected semantics.
+ if (IsLambda && HasBlocksAttr) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_capture_block)
+ << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) {
+ // No capture-default
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_impcap) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getLocStart(),
+ diag::note_lambda_decl);
+ }
+ return true;
+ }
+
+ FunctionScopesIndex--;
+ DC = ParentDC;
+ Explicit = false;
+ } while (!Var->getDeclContext()->Equals(DC));
+
+ // Walk back down the scope stack, computing the type of the capture at
+ // each step, checking type-specific requirements, and adding captures if
+ // requested.
+ for (unsigned I = ++FunctionScopesIndex, N = FunctionScopes.size(); I != N;
+ ++I) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[I]);
+
+ // Compute the type of the capture and of a reference to the capture within
+ // this scope.
+ if (isa<BlockScopeInfo>(CSI)) {
+ Expr *CopyExpr = 0;
+ bool ByRef = false;
+
+ // Blocks are not allowed to capture arrays.
+ if (CaptureType->isArrayType()) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_ref_array_type);
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ // Forbid the block-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_arc_autoreleasing_capture)
+ << /*block*/ 0;
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ if (HasBlocksAttr || CaptureType->isReferenceType()) {
+ // Block capture by reference does not change the capture or
+ // declaration reference types.
+ ByRef = true;
+ } else {
+ // Block capture by copy introduces 'const'.
+ CaptureType = CaptureType.getNonReferenceType().withConst();
+ DeclRefType = CaptureType;
+
+ if (getLangOpts().CPlusPlus && BuildAndDiagnose) {
+ if (const RecordType *Record = DeclRefType->getAs<RecordType>()) {
+ // The capture logic needs the destructor, so make sure we mark it.
+ // Usually this is unnecessary because most local variables have
+ // their destructors marked at declaration time, but parameters are
+ // an exception because it's technically only the call site that
+ // actually requires the destructor.
+ if (isa<ParmVarDecl>(Var))
+ FinalizeVarWithDestructor(Var, Record);
+
+ // According to the blocks spec, the capture of a variable from
+ // the stack requires a const copy constructor. This is not true
+ // of the copy/move done to move a __block variable to the heap.
+ Expr *DeclRef = new (Context) DeclRefExpr(Var, false,
+ DeclRefType.withConst(),
+ VK_LValue, Loc);
+ ExprResult Result
+ = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(Var->getLocation(),
+ CaptureType, false),
+ Loc, Owned(DeclRef));
+
+ // Build a full-expression copy expression if initialization
+ // succeeded and used a non-trivial constructor. Recover from
+ // errors by pretending that the copy isn't necessary.
+ if (!Result.isInvalid() &&
+ !cast<CXXConstructExpr>(Result.get())->getConstructor()
+ ->isTrivial()) {
+ Result = MaybeCreateExprWithCleanups(Result);
+ CopyExpr = Result.take();
+ }
+ }
+ }
+ }
+
+ // Actually capture the variable.
+ if (BuildAndDiagnose)
+ CSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc,
+ SourceLocation(), CaptureType, CopyExpr);
+ Nested = true;
+ continue;
+ }
+
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
+
+ // Determine whether we are capturing by reference or by value.
+ bool ByRef = false;
+ if (I == N - 1 && Kind != TryCapture_Implicit) {
+ ByRef = (Kind == TryCapture_ExplicitByRef);
+ } else {
+ ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref);
+ }
+
+ // Compute the type of the field that will capture this variable.
+ if (ByRef) {
+ // C++11 [expr.prim.lambda]p15:
+ // An entity is captured by reference if it is implicitly or
+ // explicitly captured but not captured by copy. It is
+ // unspecified whether additional unnamed non-static data
+ // members are declared in the closure type for entities
+ // captured by reference.
+ //
+ // FIXME: It is not clear whether we want to build an lvalue reference
+ // to the DeclRefType or to CaptureType.getNonReferenceType(). GCC appears
+ // to do the former, while EDG does the latter. Core issue 1249 will
+ // clarify, but for now we follow GCC because it's a more permissive and
+ // easily defensible position.
+ CaptureType = Context.getLValueReferenceType(DeclRefType);
} else {
- // Walk redefinitions, as some of them may be instantiable.
- for (FunctionDecl::redecl_iterator i(Function->redecls_begin()),
- e(Function->redecls_end()); i != e; ++i) {
- if (!i->isUsed(false) && i->isImplicitlyInstantiable())
- MarkDeclarationReferenced(Loc, *i);
+ // C++11 [expr.prim.lambda]p14:
+ // For each entity captured by copy, an unnamed non-static
+ // data member is declared in the closure type. The
+ // declaration order of these members is unspecified. The type
+ // of such a data member is the type of the corresponding
+ // captured entity if the entity is not a reference to an
+ // object, or the referenced type otherwise. [Note: If the
+ // captured entity is a reference to a function, the
+ // corresponding data member is also a reference to a
+ // function. - end note ]
+ if (const ReferenceType *RefType = CaptureType->getAs<ReferenceType>()){
+ if (!RefType->getPointeeType()->isFunctionType())
+ CaptureType = RefType->getPointeeType();
+ }
+
+ // Forbid the lambda copy-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1;
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
}
}
- // Keep track of used but undefined functions.
- if (!Function->isPure() && !Function->hasBody() &&
- Function->getLinkage() != ExternalLinkage) {
- SourceLocation &old = UndefinedInternals[Function->getCanonicalDecl()];
- if (old.isInvalid()) old = Loc;
+ // Capture this variable in the lambda.
+ Expr *CopyExpr = 0;
+ if (BuildAndDiagnose) {
+ ExprResult Result = captureInLambda(*this, LSI, Var, CaptureType,
+ DeclRefType, Loc);
+ if (!Result.isInvalid())
+ CopyExpr = Result.take();
+ }
+
+ // Compute the type of a reference to this captured variable.
+ if (ByRef)
+ DeclRefType = CaptureType.getNonReferenceType();
+ else {
+ // C++ [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline
+ // function call operator [...]. This function call operator is
+ // declared const (9.3.1) if and only if the lambda-expression’s
+ // parameter-declaration-clause is not followed by mutable.
+ DeclRefType = CaptureType.getNonReferenceType();
+ if (!LSI->Mutable && !CaptureType->isReferenceType())
+ DeclRefType.addConst();
}
+
+ // Add the capture.
+ if (BuildAndDiagnose)
+ CSI->addCapture(Var, /*IsBlock=*/false, ByRef, Nested, Loc,
+ EllipsisLoc, CaptureType, CopyExpr);
+ Nested = true;
+ }
- Function->setUsed(true);
- return;
+ return false;
+}
+
+bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+ return tryCaptureVariable(Var, Loc, Kind, EllipsisLoc,
+ /*BuildAndDiagnose=*/true, CaptureType,
+ DeclRefType);
+}
+
+QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+
+ // Determine whether we can capture this variable.
+ if (tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
+ /*BuildAndDiagnose=*/false, CaptureType, DeclRefType))
+ return QualType();
+
+ return DeclRefType;
+}
+
+static void MarkVarDeclODRUsed(Sema &SemaRef, VarDecl *Var,
+ SourceLocation Loc) {
+ // Keep track of used but undefined variables.
+ // FIXME: We shouldn't suppress this warning for static data members.
+ if (Var->hasDefinition(SemaRef.Context) == VarDecl::DeclarationOnly &&
+ Var->getLinkage() != ExternalLinkage &&
+ !(Var->isStaticDataMember() && Var->hasInit())) {
+ SourceLocation &old = SemaRef.UndefinedInternals[Var->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
}
- if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- // Implicit instantiation of static data members of class templates.
- if (Var->isStaticDataMember() &&
- Var->getInstantiatedFromStaticDataMember()) {
- MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
- assert(MSInfo && "Missing member specialization information?");
- if (MSInfo->getPointOfInstantiation().isInvalid() &&
- MSInfo->getTemplateSpecializationKind()== TSK_ImplicitInstantiation) {
- MSInfo->setPointOfInstantiation(Loc);
+ SemaRef.tryCaptureVariable(Var, Loc);
+
+ Var->setUsed(true);
+}
+
+void Sema::UpdateMarkingForLValueToRValue(Expr *E) {
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
+ // an object that satisfies the requirements for appearing in a
+ // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." This function handles the lvalue-to-rvalue
+ // conversion part.
+ MaybeODRUseExprs.erase(E->IgnoreParens());
+}
+
+ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
+ if (!Res.isUsable())
+ return Res;
+
+ // If a constant-expression is a reference to a variable where we delay
+ // deciding whether it is an odr-use, just assume we will apply the
+ // lvalue-to-rvalue conversion. In the one case where this doesn't happen
+ // (a non-type template argument), we have special handling anyway.
+ UpdateMarkingForLValueToRValue(Res.get());
+ return Res;
+}
+
+void Sema::CleanupVarDeclMarking() {
+ for (llvm::SmallPtrSetIterator<Expr*> i = MaybeODRUseExprs.begin(),
+ e = MaybeODRUseExprs.end();
+ i != e; ++i) {
+ VarDecl *Var;
+ SourceLocation Loc;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*i)) {
+ Var = cast<VarDecl>(DRE->getDecl());
+ Loc = DRE->getLocation();
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(*i)) {
+ Var = cast<VarDecl>(ME->getMemberDecl());
+ Loc = ME->getMemberLoc();
+ } else {
+ llvm_unreachable("Unexpcted expression");
+ }
+
+ MarkVarDeclODRUsed(*this, Var, Loc);
+ }
+
+ MaybeODRUseExprs.clear();
+}
+
+// Mark a VarDecl referenced, and perform the necessary handling to compute
+// odr-uses.
+static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
+ VarDecl *Var, Expr *E) {
+ Var->setReferenced();
+
+ if (!IsPotentiallyEvaluatedContext(SemaRef))
+ return;
+
+ // Implicit instantiation of static data members of class templates.
+ if (Var->isStaticDataMember() && Var->getInstantiatedFromStaticDataMember()) {
+ MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
+ assert(MSInfo && "Missing member specialization information?");
+ bool AlreadyInstantiated = !MSInfo->getPointOfInstantiation().isInvalid();
+ if (MSInfo->getTemplateSpecializationKind() == TSK_ImplicitInstantiation &&
+ (!AlreadyInstantiated ||
+ Var->isUsableInConstantExpressions(SemaRef.Context))) {
+ if (!AlreadyInstantiated) {
// This is a modification of an existing AST node. Notify listeners.
- if (ASTMutationListener *L = getASTMutationListener())
+ if (ASTMutationListener *L = SemaRef.getASTMutationListener())
L->StaticDataMemberInstantiated(Var);
- PendingInstantiations.push_back(std::make_pair(Var, Loc));
+ MSInfo->setPointOfInstantiation(Loc);
}
- }
+ SourceLocation PointOfInstantiation = MSInfo->getPointOfInstantiation();
+ if (Var->isUsableInConstantExpressions(SemaRef.Context))
+ // Do not defer instantiations of variables which could be used in a
+ // constant expression.
+ SemaRef.InstantiateStaticDataMemberDefinition(PointOfInstantiation,Var);
+ else
+ SemaRef.PendingInstantiations.push_back(
+ std::make_pair(Var, PointOfInstantiation));
+ }
+ }
+
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
+ // an object that satisfies the requirements for appearing in a
+ // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." We check the first part here, and
+ // Sema::UpdateMarkingForLValueToRValue deals with the second part.
+ // Note that we use the C++11 definition everywhere because nothing in
+ // C++03 depends on whether we get the C++03 version correct. This does not
+ // apply to references, since they are not objects.
+ const VarDecl *DefVD;
+ if (E && !isa<ParmVarDecl>(Var) && !Var->getType()->isReferenceType() &&
+ Var->isUsableInConstantExpressions(SemaRef.Context) &&
+ Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE())
+ SemaRef.MaybeODRUseExprs.insert(E);
+ else
+ MarkVarDeclODRUsed(SemaRef, Var, Loc);
+}
- // Keep track of used but undefined variables. We make a hole in
- // the warning for static const data members with in-line
- // initializers.
- if (Var->hasDefinition() == VarDecl::DeclarationOnly
- && Var->getLinkage() != ExternalLinkage
- && !(Var->isStaticDataMember() && Var->hasInit())) {
- SourceLocation &old = UndefinedInternals[Var->getCanonicalDecl()];
- if (old.isInvalid()) old = Loc;
- }
+/// \brief Mark a variable referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
+/// used directly for normal expressions referring to VarDecl.
+void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
+ DoMarkVarDeclReferenced(*this, Loc, Var, 0);
+}
- D->setUsed(true);
+static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
+ Decl *D, Expr *E) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ DoMarkVarDeclReferenced(SemaRef, Loc, Var, E);
return;
}
+
+ SemaRef.MarkAnyDeclReferenced(Loc, D);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a DeclRefExpr.
+void Sema::MarkDeclRefReferenced(DeclRefExpr *E) {
+ MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a MemberExpr.
+void Sema::MarkMemberReferenced(MemberExpr *E) {
+ MarkExprReferenced(*this, E->getMemberLoc(), E->getMemberDecl(), E);
+}
+
+/// \brief Perform marking for a reference to an arbitrary declaration. It
+/// marks the declaration referenced, and performs odr-use checking for functions
+/// and variables. This method should not be used when building an normal
+/// expression which refers to a variable.
+void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ MarkVariableReferenced(Loc, VD);
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ MarkFunctionReferenced(Loc, FD);
+ else
+ D->setReferenced();
}
namespace {
@@ -9331,7 +10478,8 @@ namespace {
bool MarkReferencedDecls::TraverseTemplateArgument(
const TemplateArgument &Arg) {
if (Arg.getKind() == TemplateArgument::Declaration) {
- S.MarkDeclarationReferenced(Loc, Arg.getAsDecl());
+ if (Decl *D = Arg.getAsDecl())
+ S.MarkAnyDeclReferenced(Loc, D);
}
return Inherited::TraverseTemplateArgument(Arg);
@@ -9357,38 +10505,51 @@ namespace {
/// potentially-evaluated subexpressions as "referenced".
class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
Sema &S;
+ bool SkipLocalVariables;
public:
typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
- explicit EvaluatedExprMarker(Sema &S) : Inherited(S.Context), S(S) { }
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
+ : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { }
void VisitDeclRefExpr(DeclRefExpr *E) {
- S.MarkDeclarationReferenced(E->getLocation(), E->getDecl());
+ // If we were asked not to visit local variables, don't.
+ if (SkipLocalVariables) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (VD->hasLocalStorage())
+ return;
+ }
+
+ S.MarkDeclRefReferenced(E);
}
void VisitMemberExpr(MemberExpr *E) {
- S.MarkDeclarationReferenced(E->getMemberLoc(), E->getMemberDecl());
+ S.MarkMemberReferenced(E);
Inherited::VisitMemberExpr(E);
}
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ S.MarkFunctionReferenced(E->getLocStart(),
+ const_cast<CXXDestructorDecl*>(E->getTemporary()->getDestructor()));
+ Visit(E->getSubExpr());
+ }
+
void VisitCXXNewExpr(CXXNewExpr *E) {
- if (E->getConstructor())
- S.MarkDeclarationReferenced(E->getLocStart(), E->getConstructor());
if (E->getOperatorNew())
- S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorNew());
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorNew());
if (E->getOperatorDelete())
- S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorDelete());
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
Inherited::VisitCXXNewExpr(E);
}
-
+
void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (E->getOperatorDelete())
- S.MarkDeclarationReferenced(E->getLocStart(), E->getOperatorDelete());
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- S.MarkDeclarationReferenced(E->getLocStart(),
+ S.MarkFunctionReferenced(E->getLocStart(),
S.LookupDestructor(Record));
}
@@ -9396,24 +10557,31 @@ namespace {
}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
- S.MarkDeclarationReferenced(E->getLocStart(), E->getConstructor());
+ S.MarkFunctionReferenced(E->getLocStart(), E->getConstructor());
Inherited::VisitCXXConstructExpr(E);
}
- void VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- S.MarkDeclarationReferenced(E->getLocation(), E->getDecl());
- }
-
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
Visit(E->getExpr());
}
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ Inherited::VisitImplicitCastExpr(E);
+
+ if (E->getCastKind() == CK_LValueToRValue)
+ S.UpdateMarkingForLValueToRValue(E->getSubExpr());
+ }
};
}
/// \brief Mark any declarations that appear within this expression or any
/// potentially-evaluated subexpressions as "referenced".
-void Sema::MarkDeclarationsReferencedInExpr(Expr *E) {
- EvaluatedExprMarker(*this).Visit(E);
+///
+/// \param SkipLocalVariables If true, don't mark local variables as
+/// 'referenced'.
+void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
+ bool SkipLocalVariables) {
+ EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E);
}
/// \brief Emit a diagnostic that describes an effect on the run-time behavior
@@ -9439,6 +10607,10 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
// The argument will never be evaluated, so don't complain.
break;
+ case ConstantEvaluated:
+ // Relevant diagnostics should be produced by constant evaluation.
+ break;
+
case PotentiallyEvaluated:
case PotentiallyEvaluatedIfUsed:
if (Statement && getCurFunctionOrMethodDecl()) {
@@ -9449,10 +10621,6 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
Diag(Loc, PD);
return true;
-
- case PotentiallyPotentiallyEvaluated:
- ExprEvalContexts.back().addDiagnostic(Loc, PD);
- break;
}
return false;
@@ -9463,6 +10631,13 @@ bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
if (ReturnType->isVoidType() || !ReturnType->isIncompleteType())
return false;
+ // If we're inside a decltype's expression, don't check for a valid return
+ // type or construct temporaries until we know whether this is the last call.
+ if (ExprEvalContexts.back().IsDecltype) {
+ ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE);
+ return false;
+ }
+
PartialDiagnostic Note =
FD ? PDiag(diag::note_function_with_incomplete_return_type_declared_here)
<< FD->getDeclName() : PDiag();
@@ -9522,7 +10697,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
Diag(Loc, diagnostic) << E->getSourceRange();
- SourceLocation Open = E->getSourceRange().getBegin();
+ SourceLocation Open = E->getLocStart();
SourceLocation Close = PP.getLocForEndOfToken(E->getSourceRange().getEnd());
Diag(Loc, diag::note_condition_assign_silence)
<< FixItHint::CreateInsertion(Open, "(")
@@ -9556,9 +10731,10 @@ void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
SourceLocation Loc = opE->getOperatorLoc();
Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange();
+ SourceRange ParenERange = ParenE->getSourceRange();
Diag(Loc, diag::note_equality_comparison_silence)
- << FixItHint::CreateRemoval(ParenE->getSourceRange().getBegin())
- << FixItHint::CreateRemoval(ParenE->getSourceRange().getEnd());
+ << FixItHint::CreateRemoval(ParenERange.getBegin())
+ << FixItHint::CreateRemoval(ParenERange.getEnd());
Diag(Loc, diag::note_equality_comparison_to_assign)
<< FixItHint::CreateReplacement(Loc, "=");
}
@@ -9574,7 +10750,7 @@ ExprResult Sema::CheckBooleanCondition(Expr *E, SourceLocation Loc) {
E = result.take();
if (!E->isTypeDependent()) {
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
return CheckCXXBooleanCondition(E); // C++ 6.4p4
ExprResult ERes = DefaultFunctionArrayLvalueConversion(E);
@@ -9613,7 +10789,6 @@ namespace {
ExprResult VisitStmt(Stmt *S) {
llvm_unreachable("unexpected statement!");
- return ExprError();
}
ExprResult VisitExpr(Expr *E) {
@@ -9662,7 +10837,7 @@ namespace {
E->setType(VD->getType());
assert(E->getValueKind() == VK_RValue);
- if (S.getLangOptions().CPlusPlus &&
+ if (S.getLangOpts().CPlusPlus &&
!(isa<CXXMethodDecl>(VD) &&
cast<CXXMethodDecl>(VD)->isInstance()))
E->setValueKind(VK_LValue);
@@ -9706,7 +10881,6 @@ namespace {
ExprResult VisitStmt(Stmt *S) {
llvm_unreachable("unexpected statement!");
- return ExprError();
}
ExprResult VisitExpr(Expr *E) {
@@ -9870,20 +11044,39 @@ ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) {
ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) {
// The only case we should ever see here is a function-to-pointer decay.
- assert(E->getCastKind() == CK_FunctionToPointerDecay);
- assert(E->getValueKind() == VK_RValue);
- assert(E->getObjectKind() == OK_Ordinary);
+ if (E->getCastKind() == CK_FunctionToPointerDecay) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ E->setType(DestType);
+
+ // Rebuild the sub-expression as the pointee (function) type.
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.take());
+ return S.Owned(E);
+ } else if (E->getCastKind() == CK_LValueToRValue) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
- E->setType(DestType);
+ assert(isa<BlockPointerType>(E->getType()));
- // Rebuild the sub-expression as the pointee (function) type.
- DestType = DestType->castAs<PointerType>()->getPointeeType();
+ E->setType(DestType);
- ExprResult Result = Visit(E->getSubExpr());
- if (!Result.isUsable()) return ExprError();
+ // The sub-expression has to be a lvalue reference, so rebuild it as such.
+ DestType = S.Context.getLValueReferenceType(DestType);
- E->setSubExpr(Result.take());
- return S.Owned(E);
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.take());
+ return S.Owned(E);
+ } else {
+ llvm_unreachable("Unhandled cast type!");
+ }
}
ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
@@ -9915,7 +11108,7 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
}
// Function references aren't l-values in C.
- if (!S.getLangOptions().CPlusPlus)
+ if (!S.getLangOpts().CPlusPlus)
ValueKind = VK_RValue;
// - variables
@@ -9957,6 +11150,10 @@ ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
return CastExpr;
}
+ExprResult Sema::forceUnknownAnyToType(Expr *E, QualType ToType) {
+ return RebuildUnknownAnyExpr(*this, ToType).Visit(E);
+}
+
static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
Expr *orig = E;
unsigned diagID = diag::err_uncasted_use_of_unknown_any;
@@ -10003,11 +11200,13 @@ static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
/// Check for operands with placeholder types and complain if found.
/// Returns true if there was an error and no recovery was possible.
ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
- // Placeholder types are always *exactly* the appropriate builtin type.
- QualType type = E->getType();
+ const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType();
+ if (!placeholderType) return Owned(E);
+
+ switch (placeholderType->getKind()) {
// Overloaded expressions.
- if (type == Context.OverloadTy) {
+ case BuiltinType::Overload: {
// Try to resolve a single function template specialization.
// This is obligatory.
ExprResult result = Owned(E);
@@ -10023,19 +11222,37 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
}
// Bound member functions.
- if (type == Context.BoundMemberTy) {
+ case BuiltinType::BoundMember: {
ExprResult result = Owned(E);
tryToRecoverWithCall(result, PDiag(diag::err_bound_member_function),
/*complain*/ true);
return result;
- }
+ }
+
+ // ARC unbridged casts.
+ case BuiltinType::ARCUnbridgedCast: {
+ Expr *realCast = stripARCUnbridgedCast(E);
+ diagnoseARCUnbridgedCast(realCast);
+ return Owned(realCast);
+ }
// Expressions of unknown type.
- if (type == Context.UnknownAnyTy)
+ case BuiltinType::UnknownAny:
return diagnoseUnknownAnyExpr(*this, E);
- assert(!type->isPlaceholderType());
- return Owned(E);
+ // Pseudo-objects.
+ case BuiltinType::PseudoObject:
+ return checkPseudoObjectRValue(E);
+
+ // Everything else should be impossible.
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#define PLACEHOLDER_TYPE(Id, SingletonId)
+#include "clang/AST/BuiltinTypes.def"
+ break;
+ }
+
+ llvm_unreachable("invalid placeholder type!");
}
bool Sema::CheckCaseExpression(Expr *E) {
@@ -10045,3 +11262,28 @@ bool Sema::CheckCaseExpression(Expr *E) {
return E->getType()->isIntegralOrEnumerationType();
return false;
}
+
+/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ExprResult
+Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
+ "Unknown Objective-C Boolean value!");
+ QualType ObjCBoolLiteralQT = Context.ObjCBuiltinBoolTy;
+ // signed char is the default type for boolean literals. Use 'BOOL'
+ // instead, if BOOL typedef is visible in its scope instead.
+ Decl *TD =
+ LookupSingleName(TUScope, &Context.Idents.get("BOOL"),
+ SourceLocation(), LookupOrdinaryName);
+ if (TypedefDecl *BoolTD = dyn_cast_or_null<TypedefDecl>(TD)) {
+ QualType QT = BoolTD->getUnderlyingType();
+ if (!QT->isIntegralOrUnscopedEnumerationType()) {
+ Diag(OpLoc, diag::warn_bool_for_boolean_literal) << QT;
+ Diag(BoolTD->getLocation(), diag::note_previous_declaration);
+ }
+ else
+ ObjCBoolLiteralQT = QT;
+ }
+
+ return Owned(new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes,
+ ObjCBoolLiteralQT, OpLoc));
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
index 3300444..31a8115 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -20,6 +20,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
@@ -28,6 +29,8 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "TypeLocBuilder.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -265,6 +268,22 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
return ParsedType();
}
+ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) {
+ if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType)
+ return ParsedType();
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype
+ && "only get destructor types from declspecs");
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType SearchType = GetTypeFromParser(ObjectType);
+ if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) {
+ return ParsedType::make(T);
+ }
+
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
+ << T << SearchType;
+ return ParsedType();
+}
+
/// \brief Build a C++ typeid expression with a type operand.
ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
@@ -293,7 +312,6 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *E,
SourceLocation RParenLoc) {
- bool isUnevaluatedOperand = true;
if (E && !E->isTypeDependent()) {
if (E->getType()->isPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
@@ -315,7 +333,11 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// polymorphic class type [...] [the] expression is an unevaluated
// operand. [...]
if (RecordD->isPolymorphic() && E->Classify(Context).isGLValue()) {
- isUnevaluatedOperand = false;
+ // The subexpression is potentially evaluated; switch the context
+ // and recheck the subexpression.
+ ExprResult Result = TranformToPotentiallyEvaluated(E);
+ if (Result.isInvalid()) return ExprError();
+ E = Result.take();
// We require a vtable to query the type at run time.
MarkVTableUsed(TypeidLoc, RecordD);
@@ -335,12 +357,6 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
}
}
- // If this is an unevaluated operand, clear out the set of
- // declaration references we have been computing and eliminate any
- // temporaries introduced in its computation.
- if (isUnevaluatedOperand)
- ExprEvalContexts.back().Context = Unevaluated;
-
return Owned(new (Context) CXXTypeidExpr(TypeInfoType.withConst(),
E,
SourceRange(TypeidLoc, RParenLoc)));
@@ -525,7 +541,7 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
- if (!getLangOptions().CXXExceptions &&
+ if (!getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(OpLoc))
Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
@@ -621,37 +637,23 @@ ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
if (isPointer)
return Owned(E);
- // If the class has a non-trivial destructor, we must be able to call it.
- if (RD->hasTrivialDestructor())
+ // If the class has a destructor, we must be able to call it.
+ if (RD->hasIrrelevantDestructor())
return Owned(E);
- CXXDestructorDecl *Destructor
- = const_cast<CXXDestructorDecl*>(LookupDestructor(RD));
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
if (!Destructor)
return Owned(E);
- MarkDeclarationReferenced(E->getExprLoc(), Destructor);
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
CheckDestructorAccess(E->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_exception) << Ty);
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
return Owned(E);
}
-QualType Sema::getAndCaptureCurrentThisType() {
- // Ignore block scopes: we can capture through them.
- // Ignore nested enum scopes: we'll diagnose non-constant expressions
- // where they're invalid, and other uses are legitimate.
- // Don't ignore nested class scopes: you can't use 'this' in a local class.
- DeclContext *DC = CurContext;
- unsigned NumBlocks = 0;
- while (true) {
- if (isa<BlockDecl>(DC)) {
- DC = cast<BlockDecl>(DC)->getDeclContext();
- ++NumBlocks;
- } else if (isa<EnumDecl>(DC))
- DC = cast<EnumDecl>(DC)->getDeclContext();
- else break;
- }
-
+QualType Sema::getCurrentThisType() {
+ DeclContext *DC = getFunctionLevelDeclContext();
QualType ThisTy;
if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
if (method && method->isInstance())
@@ -666,23 +668,74 @@ QualType Sema::getAndCaptureCurrentThisType() {
ThisTy = Context.getPointerType(Context.getRecordType(RD));
}
- // Mark that we're closing on 'this' in all the block scopes we ignored.
- if (!ThisTy.isNull())
- for (unsigned idx = FunctionScopes.size() - 1;
- NumBlocks; --idx, --NumBlocks)
- cast<BlockScopeInfo>(FunctionScopes[idx])->CapturesCXXThis = true;
-
return ThisTy;
}
+void Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit) {
+ // We don't need to capture this in an unevaluated context.
+ if (ExprEvalContexts.back().Context == Unevaluated && !Explicit)
+ return;
+
+ // Otherwise, check that we can capture 'this'.
+ unsigned NumClosures = 0;
+ for (unsigned idx = FunctionScopes.size() - 1; idx != 0; idx--) {
+ if (CapturingScopeInfo *CSI =
+ dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
+ if (CSI->CXXThisCaptureIndex != 0) {
+ // 'this' is already being captured; there isn't anything more to do.
+ break;
+ }
+
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
+ Explicit) {
+ // This closure can capture 'this'; continue looking upwards.
+ NumClosures++;
+ Explicit = false;
+ continue;
+ }
+ // This context can't implicitly capture 'this'; fail out.
+ Diag(Loc, diag::err_this_capture) << Explicit;
+ return;
+ }
+ break;
+ }
+
+ // Mark that we're implicitly capturing 'this' in all the scopes we skipped.
+ // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
+ // contexts.
+ for (unsigned idx = FunctionScopes.size() - 1;
+ NumClosures; --idx, --NumClosures) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
+ Expr *ThisExpr = 0;
+ QualType ThisTy = getCurrentThisType();
+ if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
+ // For lambda expressions, build a field and an initializing expression.
+ CXXRecordDecl *Lambda = LSI->Lambda;
+ FieldDecl *Field
+ = FieldDecl::Create(Context, Lambda, Loc, Loc, 0, ThisTy,
+ Context.getTrivialTypeSourceInfo(ThisTy, Loc),
+ 0, false, false);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Lambda->addDecl(Field);
+ ThisExpr = new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/true);
+ }
+ bool isNested = NumClosures > 1;
+ CSI->addThisCapture(isNested, Loc, ThisTy, ThisExpr);
+ }
+}
+
ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
/// C++ 9.3.2: In the body of a non-static member function, the keyword this
/// is a non-lvalue expression whose value is the address of the object for
/// which the function is called.
- QualType ThisTy = getAndCaptureCurrentThisType();
+ QualType ThisTy = getCurrentThisType();
if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use);
+ CheckCXXThisCapture(Loc);
return Owned(new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false));
}
@@ -715,10 +768,10 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
unsigned NumExprs = exprs.size();
Expr **Exprs = (Expr**)exprs.get();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
- SourceRange FullRange = SourceRange(TyBeginLoc, RParenLoc);
if (Ty->isDependentType() ||
- CallExpr::hasAnyTypeDependentArguments(Exprs, NumExprs)) {
+ CallExpr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Exprs, NumExprs))) {
exprs.release();
return Owned(CXXUnresolvedConstructExpr::Create(Context, TInfo,
@@ -727,39 +780,64 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
RParenLoc));
}
- if (Ty->isArrayType())
- return ExprError(Diag(TyBeginLoc,
- diag::err_value_init_for_array_type) << FullRange);
- if (!Ty->isVoidType() &&
- RequireCompleteType(TyBeginLoc, Ty,
- PDiag(diag::err_invalid_incomplete_type_use)
- << FullRange))
- return ExprError();
-
- if (RequireNonAbstractType(TyBeginLoc, Ty,
- diag::err_allocation_of_abstract_type))
- return ExprError();
-
+ bool ListInitialization = LParenLoc.isInvalid();
+ assert((!ListInitialization || (NumExprs == 1 && isa<InitListExpr>(Exprs[0])))
+ && "List initialization must have initializer list as expression.");
+ SourceRange FullRange = SourceRange(TyBeginLoc,
+ ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc);
// C++ [expr.type.conv]p1:
// If the expression list is a single expression, the type conversion
// expression is equivalent (in definedness, and if defined in meaning) to the
// corresponding cast expression.
- if (NumExprs == 1) {
+ if (NumExprs == 1 && !ListInitialization) {
Expr *Arg = Exprs[0];
exprs.release();
return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc);
}
+ QualType ElemTy = Ty;
+ if (Ty->isArrayType()) {
+ if (!ListInitialization)
+ return ExprError(Diag(TyBeginLoc,
+ diag::err_value_init_for_array_type) << FullRange);
+ ElemTy = Context.getBaseElementType(Ty);
+ }
+
+ if (!Ty->isVoidType() &&
+ RequireCompleteType(TyBeginLoc, ElemTy,
+ PDiag(diag::err_invalid_incomplete_type_use)
+ << FullRange))
+ return ExprError();
+
+ if (RequireNonAbstractType(TyBeginLoc, Ty,
+ diag::err_allocation_of_abstract_type))
+ return ExprError();
+
InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
InitializationKind Kind
- = NumExprs ? InitializationKind::CreateDirect(TyBeginLoc,
- LParenLoc, RParenLoc)
+ = NumExprs ? ListInitialization
+ ? InitializationKind::CreateDirectList(TyBeginLoc)
+ : InitializationKind::CreateDirect(TyBeginLoc,
+ LParenLoc, RParenLoc)
: InitializationKind::CreateValue(TyBeginLoc,
LParenLoc, RParenLoc);
InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(exprs));
+ if (!Result.isInvalid() && ListInitialization &&
+ isa<InitListExpr>(Result.get())) {
+ // If the list-initialization doesn't involve a constructor call, we'll get
+ // the initializer-list (with corrected type) back, but that's not what we
+ // want, since it will be treated as an initializer list in further
+ // processing. Explicitly insert a cast here.
+ InitListExpr *List = cast<InitListExpr>(Result.take());
+ Result = Owned(CXXFunctionalCastExpr::Create(Context, List->getType(),
+ Expr::getValueKindForType(TInfo->getType()),
+ TInfo, TyBeginLoc, CK_NoOp,
+ List, /*Path=*/0, RParenLoc));
+ }
+
// FIXME: Improve AST representation?
return move(Result);
}
@@ -820,18 +898,29 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
return (del->getNumParams() == 2);
}
-/// ActOnCXXNew - Parsed a C++ 'new' expression (C++ 5.3.4), as in e.g.:
+/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
+
+/// E.g.:
/// @code new (memory) int[size][4] @endcode
/// or
/// @code ::new Foo(23, "hello") @endcode
-/// For the interpretation of this heap of arguments, consult the base version.
+///
+/// \param StartLoc The first location of the expression.
+/// \param UseGlobal True if 'new' was prefixed with '::'.
+/// \param PlacementLParen Opening paren of the placement arguments.
+/// \param PlacementArgs Placement new arguments.
+/// \param PlacementRParen Closing paren of the placement arguments.
+/// \param TypeIdParens If the type is in parens, the source range.
+/// \param D The type to be allocated, as well as array dimensions.
+/// \param ConstructorLParen Opening paren of the constructor args, empty if
+/// initializer-list syntax is used.
+/// \param ConstructorArgs Constructor/initialization arguments.
+/// \param ConstructorRParen Closing paren of the constructor args.
ExprResult
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
SourceLocation PlacementRParen, SourceRange TypeIdParens,
- Declarator &D, SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen) {
+ Declarator &D, Expr *Initializer) {
bool TypeContainsAuto = D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
Expr *ArraySize = 0;
@@ -861,11 +950,11 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
if (Expr *NumElts = (Expr *)Array.NumElts) {
- if (!NumElts->isTypeDependent() && !NumElts->isValueDependent() &&
- !NumElts->isIntegerConstantExpr(Context)) {
- Diag(D.getTypeObject(I).Loc, diag::err_new_array_nonconst)
- << NumElts->getSourceRange();
- return ExprError();
+ if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
+ Array.NumElts = VerifyIntegerConstantExpression(NumElts, 0,
+ PDiag(diag::err_new_array_nonconst)).take();
+ if (!Array.NumElts)
+ return ExprError();
}
}
}
@@ -876,6 +965,10 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (D.isInvalidType())
return ExprError();
+ SourceRange DirectInitRange;
+ if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
+ DirectInitRange = List->getSourceRange();
+
return BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
move(PlacementArgs),
@@ -884,12 +977,30 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
AllocType,
TInfo,
ArraySize,
- ConstructorLParen,
- move(ConstructorArgs),
- ConstructorRParen,
+ DirectInitRange,
+ Initializer,
TypeContainsAuto);
}
+static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
+ Expr *Init) {
+ if (!Init)
+ return true;
+ if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
+ return PLE->getNumExprs() == 0;
+ if (isa<ImplicitValueInitExpr>(Init))
+ return true;
+ else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
+ return !CCE->isListInitialization() &&
+ CCE->getConstructor()->isDefaultConstructor();
+ else if (Style == CXXNewExpr::ListInit) {
+ assert(isa<InitListExpr>(Init) &&
+ "Shouldn't create list CXXConstructExprs for arrays.");
+ return true;
+ }
+ return false;
+}
+
ExprResult
Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
@@ -899,37 +1010,76 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
- SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen,
+ SourceRange DirectInitRange,
+ Expr *Initializer,
bool TypeMayContainAuto) {
SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
+ CXXNewExpr::InitializationStyle initStyle;
+ if (DirectInitRange.isValid()) {
+ assert(Initializer && "Have parens but no initializer.");
+ initStyle = CXXNewExpr::CallInit;
+ } else if (Initializer && isa<InitListExpr>(Initializer))
+ initStyle = CXXNewExpr::ListInit;
+ else {
+ // In template instantiation, the initializer could be a CXXDefaultArgExpr
+ // unwrapped from a CXXConstructExpr that was implicitly built. There is no
+ // particularly sane way we can handle this (especially since it can even
+ // occur for array new), so we throw the initializer away and have it be
+ // rebuilt.
+ if (Initializer && isa<CXXDefaultArgExpr>(Initializer))
+ Initializer = 0;
+ assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
+ isa<CXXConstructExpr>(Initializer)) &&
+ "Initializer expression that cannot have been implicitly created.");
+ initStyle = CXXNewExpr::NoInit;
+ }
+
+ Expr **Inits = &Initializer;
+ unsigned NumInits = Initializer ? 1 : 0;
+ if (initStyle == CXXNewExpr::CallInit) {
+ if (ParenListExpr *List = dyn_cast<ParenListExpr>(Initializer)) {
+ Inits = List->getExprs();
+ NumInits = List->getNumExprs();
+ } else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Initializer)){
+ if (!isa<CXXTemporaryObjectExpr>(CCE)) {
+ // Can happen in template instantiation. Since this is just an implicit
+ // construction, we just take it apart and rebuild it.
+ Inits = CCE->getArgs();
+ NumInits = CCE->getNumArgs();
+ }
+ }
+ }
+
// C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (TypeMayContainAuto && AllocType->getContainedAutoType()) {
- if (ConstructorArgs.size() == 0)
+ if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
<< AllocType << TypeRange);
- if (ConstructorArgs.size() != 1) {
- Expr *FirstBad = ConstructorArgs.get()[1];
- return ExprError(Diag(FirstBad->getSourceRange().getBegin(),
+ if (initStyle == CXXNewExpr::ListInit)
+ return ExprError(Diag(Inits[0]->getLocStart(),
+ diag::err_auto_new_requires_parens)
+ << AllocType << TypeRange);
+ if (NumInits > 1) {
+ Expr *FirstBad = Inits[1];
+ return ExprError(Diag(FirstBad->getLocStart(),
diag::err_auto_new_ctor_multiple_expressions)
<< AllocType << TypeRange);
}
+ Expr *Deduce = Inits[0];
TypeSourceInfo *DeducedType = 0;
- if (!DeduceAutoType(AllocTypeInfo, ConstructorArgs.get()[0], DeducedType))
+ if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) ==
+ DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
- << AllocType
- << ConstructorArgs.get()[0]->getType()
- << TypeRange
- << ConstructorArgs.get()[0]->getSourceRange());
+ << AllocType << Deduce->getType()
+ << TypeRange << Deduce->getSourceRange());
if (!DeducedType)
return ExprError();
AllocTypeInfo = DeducedType;
AllocType = AllocTypeInfo->getType();
}
-
+
// Per C++0x [expr.new]p5, the type being constructed may be a
// typedef of an array type.
if (!ArraySize) {
@@ -945,8 +1095,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
return ExprError();
+ if (initStyle == CXXNewExpr::ListInit && isStdInitializerList(AllocType, 0)) {
+ Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_dangling_std_initializer_list)
+ << /*at end of FE*/0 << Inits[0]->getSourceRange();
+ }
+
// In ARC, infer 'retaining' for the allocated
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
AllocType->isObjCLifetimeType()) {
AllocType = Context.getLifetimeQualifiedType(AllocType,
@@ -955,53 +1111,73 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
QualType ResultType = Context.getPointerType(AllocType);
- // C++ 5.3.4p6: "The expression in a direct-new-declarator shall have integral
- // or enumeration type with a non-negative value."
+ // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
+ // integral or enumeration type with a non-negative value."
+ // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
+ // enumeration type, or a class type for which a single non-explicit
+ // conversion function to integral or unscoped enumeration type exists.
if (ArraySize && !ArraySize->isTypeDependent()) {
-
- QualType SizeType = ArraySize->getType();
-
- ExprResult ConvertedSize
- = ConvertToIntegralOrEnumerationType(StartLoc, ArraySize,
- PDiag(diag::err_array_size_not_integral),
- PDiag(diag::err_array_size_incomplete_type)
- << ArraySize->getSourceRange(),
- PDiag(diag::err_array_size_explicit_conversion),
- PDiag(diag::note_array_size_conversion),
- PDiag(diag::err_array_size_ambiguous_conversion),
- PDiag(diag::note_array_size_conversion),
- PDiag(getLangOptions().CPlusPlus0x? 0
- : diag::ext_array_size_conversion));
+ ExprResult ConvertedSize = ConvertToIntegralOrEnumerationType(
+ StartLoc, ArraySize,
+ PDiag(diag::err_array_size_not_integral) << getLangOpts().CPlusPlus0x,
+ PDiag(diag::err_array_size_incomplete_type)
+ << ArraySize->getSourceRange(),
+ PDiag(diag::err_array_size_explicit_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(diag::err_array_size_ambiguous_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_array_size_conversion :
+ diag::ext_array_size_conversion),
+ /*AllowScopedEnumerations*/ false);
if (ConvertedSize.isInvalid())
return ExprError();
ArraySize = ConvertedSize.take();
- SizeType = ArraySize->getType();
+ QualType SizeType = ArraySize->getType();
if (!SizeType->isIntegralOrUnscopedEnumerationType())
return ExprError();
- // Let's see if this is a constant < 0. If so, we reject it out of hand.
- // We don't care about special rules, so we tell the machinery it's not
- // evaluated - it gives us a result in more cases.
+ // C++98 [expr.new]p7:
+ // The expression in a direct-new-declarator shall have integral type
+ // with a non-negative value.
+ //
+ // Let's see if this is a constant < 0. If so, we reject it out of
+ // hand. Otherwise, if it's not a constant, we must have an unparenthesized
+ // array type.
+ //
+ // Note: such a construct has well-defined semantics in C++11: it throws
+ // std::bad_array_new_length.
if (!ArraySize->isValueDependent()) {
llvm::APSInt Value;
- if (ArraySize->isIntegerConstantExpr(Value, Context, 0, false)) {
+ // We've already performed any required implicit conversion to integer or
+ // unscoped enumeration type.
+ if (ArraySize->isIntegerConstantExpr(Value, Context)) {
if (Value < llvm::APSInt(
llvm::APInt::getNullValue(Value.getBitWidth()),
- Value.isUnsigned()))
- return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
- diag::err_typecheck_negative_array_size)
- << ArraySize->getSourceRange());
-
- if (!AllocType->isDependentType()) {
- unsigned ActiveSizeBits
- = ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
- if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
- Diag(ArraySize->getSourceRange().getBegin(),
- diag::err_array_too_large)
- << Value.toString(10)
+ Value.isUnsigned())) {
+ if (getLangOpts().CPlusPlus0x)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_negative_array_new_size)
<< ArraySize->getSourceRange();
- return ExprError();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange());
+ } else if (!AllocType->isDependentType()) {
+ unsigned ActiveSizeBits =
+ ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ if (getLangOpts().CPlusPlus0x)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_array_new_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_array_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange());
}
}
} else if (TypeIdParens.isValid()) {
@@ -1016,7 +1192,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
// ARC: warn about ABI issues.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
QualType BaseAllocType = Context.getBaseElementType(AllocType);
if (BaseAllocType.hasStrongOrWeakObjCLifetime())
Diag(StartLoc, diag::warn_err_new_delete_object_array)
@@ -1033,7 +1209,8 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
unsigned NumPlaceArgs = PlacementArgs.size();
if (!AllocType->isDependentType() &&
- !Expr::hasAnyTypeDependentArguments(PlaceArgs, NumPlaceArgs) &&
+ !Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(PlaceArgs, NumPlaceArgs)) &&
FindAllocationFunctions(StartLoc,
SourceRange(PlacementLParen, PlacementRParen),
UseGlobal, AllocType, ArraySize, PlaceArgs,
@@ -1063,114 +1240,123 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
NumPlaceArgs = AllPlaceArgs.size();
if (NumPlaceArgs > 0)
PlaceArgs = &AllPlaceArgs[0];
- }
- bool Init = ConstructorLParen.isValid();
- // --- Choosing a constructor ---
- CXXConstructorDecl *Constructor = 0;
- bool HadMultipleCandidates = false;
- Expr **ConsArgs = (Expr**)ConstructorArgs.get();
- unsigned NumConsArgs = ConstructorArgs.size();
- ASTOwningVector<Expr*> ConvertedConstructorArgs(*this);
+ DiagnoseSentinelCalls(OperatorNew, PlacementLParen,
+ PlaceArgs, NumPlaceArgs);
- // Array 'new' can't have any initializers.
- if (NumConsArgs && (ResultType->isArrayType() || ArraySize)) {
- SourceRange InitRange(ConsArgs[0]->getLocStart(),
- ConsArgs[NumConsArgs - 1]->getLocEnd());
+ // FIXME: Missing call to CheckFunctionCall or equivalent
+ }
- Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
- return ExprError();
+ // Warn if the type is over-aligned and is being allocated by global operator
+ // new.
+ if (NumPlaceArgs == 0 && OperatorNew &&
+ (OperatorNew->isImplicit() ||
+ getSourceManager().isInSystemHeader(OperatorNew->getLocStart()))) {
+ if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){
+ unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign();
+ if (Align > SuitableAlign)
+ Diag(StartLoc, diag::warn_overaligned_type)
+ << AllocType
+ << unsigned(Align / Context.getCharWidth())
+ << unsigned(SuitableAlign / Context.getCharWidth());
+ }
+ }
+
+ QualType InitType = AllocType;
+ // Array 'new' can't have any initializers except empty parentheses.
+ // Initializer lists are also allowed, in C++11. Rely on the parser for the
+ // dialect distinction.
+ if (ResultType->isArrayType() || ArraySize) {
+ if (!isLegalArrayNewInitializer(initStyle, Initializer)) {
+ SourceRange InitRange(Inits[0]->getLocStart(),
+ Inits[NumInits - 1]->getLocEnd());
+ Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
+ return ExprError();
+ }
+ if (InitListExpr *ILE = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ // We do the initialization typechecking against the array type
+ // corresponding to the number of initializers + 1 (to also check
+ // default-initialization).
+ unsigned NumElements = ILE->getNumInits() + 1;
+ InitType = Context.getConstantArrayType(AllocType,
+ llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements),
+ ArrayType::Normal, 0);
+ }
}
if (!AllocType->isDependentType() &&
- !Expr::hasAnyTypeDependentArguments(ConsArgs, NumConsArgs)) {
- // C++0x [expr.new]p15:
+ !Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Inits, NumInits))) {
+ // C++11 [expr.new]p15:
// A new-expression that creates an object of type T initializes that
// object as follows:
InitializationKind Kind
// - If the new-initializer is omitted, the object is default-
// initialized (8.5); if no initialization is performed,
// the object has indeterminate value
- = !Init? InitializationKind::CreateDefault(TypeRange.getBegin())
+ = initStyle == CXXNewExpr::NoInit
+ ? InitializationKind::CreateDefault(TypeRange.getBegin())
// - Otherwise, the new-initializer is interpreted according to the
// initialization rules of 8.5 for direct-initialization.
- : InitializationKind::CreateDirect(TypeRange.getBegin(),
- ConstructorLParen,
- ConstructorRParen);
+ : initStyle == CXXNewExpr::ListInit
+ ? InitializationKind::CreateDirectList(TypeRange.getBegin())
+ : InitializationKind::CreateDirect(TypeRange.getBegin(),
+ DirectInitRange.getBegin(),
+ DirectInitRange.getEnd());
InitializedEntity Entity
- = InitializedEntity::InitializeNew(StartLoc, AllocType);
- InitializationSequence InitSeq(*this, Entity, Kind, ConsArgs, NumConsArgs);
+ = InitializedEntity::InitializeNew(StartLoc, InitType);
+ InitializationSequence InitSeq(*this, Entity, Kind, Inits, NumInits);
ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
- move(ConstructorArgs));
+ MultiExprArg(Inits, NumInits));
if (FullInit.isInvalid())
return ExprError();
- // FullInit is our initializer; walk through it to determine if it's a
- // constructor call, which CXXNewExpr handles directly.
- if (Expr *FullInitExpr = (Expr *)FullInit.get()) {
- if (CXXBindTemporaryExpr *Binder
- = dyn_cast<CXXBindTemporaryExpr>(FullInitExpr))
- FullInitExpr = Binder->getSubExpr();
- if (CXXConstructExpr *Construct
- = dyn_cast<CXXConstructExpr>(FullInitExpr)) {
- Constructor = Construct->getConstructor();
- HadMultipleCandidates = Construct->hadMultipleCandidates();
- for (CXXConstructExpr::arg_iterator A = Construct->arg_begin(),
- AEnd = Construct->arg_end();
- A != AEnd; ++A)
- ConvertedConstructorArgs.push_back(*A);
- } else {
- // Take the converted initializer.
- ConvertedConstructorArgs.push_back(FullInit.release());
- }
- } else {
- // No initialization required.
- }
+ // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
+ // we don't want the initialized object to be destructed.
+ if (CXXBindTemporaryExpr *Binder =
+ dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
+ FullInit = Owned(Binder->getSubExpr());
- // Take the converted arguments and use them for the new expression.
- NumConsArgs = ConvertedConstructorArgs.size();
- ConsArgs = (Expr **)ConvertedConstructorArgs.take();
+ Initializer = FullInit.take();
}
// Mark the new and delete operators as referenced.
if (OperatorNew)
- MarkDeclarationReferenced(StartLoc, OperatorNew);
+ MarkFunctionReferenced(StartLoc, OperatorNew);
if (OperatorDelete)
- MarkDeclarationReferenced(StartLoc, OperatorDelete);
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
// C++0x [expr.new]p17:
// If the new expression creates an array of objects of class type,
// access and ambiguity control are done for the destructor.
- if (ArraySize && Constructor) {
- if (CXXDestructorDecl *dtor = LookupDestructor(Constructor->getParent())) {
- MarkDeclarationReferenced(StartLoc, dtor);
- CheckDestructorAccess(StartLoc, dtor,
- PDiag(diag::err_access_dtor)
- << Context.getBaseElementType(AllocType));
+ QualType BaseAllocType = Context.getBaseElementType(AllocType);
+ if (ArraySize && !BaseAllocType->isDependentType()) {
+ if (const RecordType *BaseRecordType = BaseAllocType->getAs<RecordType>()) {
+ if (CXXDestructorDecl *dtor = LookupDestructor(
+ cast<CXXRecordDecl>(BaseRecordType->getDecl()))) {
+ MarkFunctionReferenced(StartLoc, dtor);
+ CheckDestructorAccess(StartLoc, dtor,
+ PDiag(diag::err_access_dtor)
+ << BaseAllocType);
+ DiagnoseUseOfDecl(dtor, StartLoc);
+ }
}
}
PlacementArgs.release();
- ConstructorArgs.release();
return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew,
- PlaceArgs, NumPlaceArgs, TypeIdParens,
- ArraySize, Constructor, Init,
- ConsArgs, NumConsArgs,
- HadMultipleCandidates,
OperatorDelete,
UsualArrayDeleteWantsSize,
+ PlaceArgs, NumPlaceArgs, TypeIdParens,
+ ArraySize, initStyle, Initializer,
ResultType, AllocTypeInfo,
- StartLoc,
- Init ? ConstructorRParen :
- TypeRange.getEnd(),
- ConstructorLParen, ConstructorRParen));
+ StartLoc, DirectInitRange));
}
-/// CheckAllocatedType - Checks that a type is suitable as the allocated type
+/// \brief Checks that a type is suitable as the allocated type
/// in a new-expression.
-/// dimension off and stores the size expression in ArraySize.
bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R) {
// C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
@@ -1195,7 +1381,7 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
else if (unsigned AddressSpace = AllocType.getAddressSpace())
return Diag(Loc, diag::err_address_space_qualified_new)
<< AllocType.getUnqualifiedType() << AddressSpace;
- else if (getLangOptions().ObjCAutoRefCount) {
+ else if (getLangOpts().ObjCAutoRefCount) {
if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
QualType BaseAllocType = Context.getBaseElementType(AT);
if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
@@ -1283,7 +1469,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// We don't need an operator delete if we're running under
// -fno-exceptions.
- if (!getLangOptions().Exceptions) {
+ if (!getLangOpts().Exceptions) {
OperatorDelete = 0;
return false;
}
@@ -1401,7 +1587,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// as a placement deallocation function, would have been
// selected as a match for the allocation function, the program
// is ill-formed.
- if (NumPlaceArgs && getLangOptions().CPlusPlus0x &&
+ if (NumPlaceArgs && getLangOpts().CPlusPlus0x &&
isNonPlacementDeallocationFunction(OperatorDelete)) {
Diag(StartLoc, diag::err_placement_new_non_placement_delete)
<< SourceRange(PlaceArgs[0]->getLocStart(),
@@ -1447,14 +1633,16 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
- /*ExplicitTemplateArgs=*/0, Args, NumArgs,
+ /*ExplicitTemplateArgs=*/0,
+ llvm::makeArrayRef(Args, NumArgs),
Candidates,
/*SuppressUserConversions=*/false);
continue;
}
FunctionDecl *Fn = cast<FunctionDecl>(D);
- AddOverloadCandidate(Fn, Alloc.getPair(), Args, NumArgs, Candidates,
+ AddOverloadCandidate(Fn, Alloc.getPair(),
+ llvm::makeArrayRef(Args, NumArgs), Candidates,
/*SuppressUserConversions=*/false);
}
@@ -1464,7 +1652,7 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
case OR_Success: {
// Got one!
FunctionDecl *FnDecl = Best->Function;
- MarkDeclarationReferenced(StartLoc, FnDecl);
+ MarkFunctionReferenced(StartLoc, FnDecl);
// The first argument is size_t, and the first parameter must be size_t,
// too. This is checked on declaration and can be assumed. (It can't be
// asserted on, though, since invalid decls are left in there.)
@@ -1484,9 +1672,13 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
Args[i] = Result.takeAs<Expr>();
}
+
Operator = FnDecl;
- CheckAllocationAccess(StartLoc, Range, R.getNamingClass(), Best->FoundDecl,
- Diagnose);
+
+ if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(),
+ Best->FoundDecl, Diagnose) == AR_inaccessible)
+ return true;
+
return false;
}
@@ -1494,7 +1686,8 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
if (Diagnose) {
Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
<< Name << Range;
- Candidates.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ Candidates.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
}
return true;
@@ -1502,7 +1695,8 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
if (Diagnose) {
Diag(StartLoc, diag::err_ovl_ambiguous_call)
<< Name << Range;
- Candidates.NoteCandidates(*this, OCD_ViableCandidates, Args, NumArgs);
+ Candidates.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
}
return true;
@@ -1513,7 +1707,8 @@ bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
<< Name
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Range;
- Candidates.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ Candidates.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
}
return true;
}
@@ -1568,7 +1763,7 @@ void Sema::DeclareGlobalNewDelete() {
// lookup.
// Note that the C++0x versions of operator delete are deallocation functions,
// and thus are implicitly noexcept.
- if (!StdBadAlloc && !getLangOptions().CPlusPlus0x) {
+ if (!StdBadAlloc && !getLangOpts().CPlusPlus0x) {
// The "std::bad_alloc" class has not yet been declared, so build it
// implicitly.
StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
@@ -1583,7 +1778,7 @@ void Sema::DeclareGlobalNewDelete() {
QualType VoidPtr = Context.getPointerType(Context.VoidTy);
QualType SizeT = Context.getSizeType();
- bool AssumeSaneOperatorNew = getLangOptions().AssumeSaneOperatorNew;
+ bool AssumeSaneOperatorNew = getLangOpts().AssumeSaneOperatorNew;
DeclareGlobalAllocationFunction(
Context.DeclarationNames.getCXXOperatorName(OO_New),
@@ -1631,20 +1826,20 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
bool HasBadAllocExceptionSpec
= (Name.getCXXOverloadedOperator() == OO_New ||
Name.getCXXOverloadedOperator() == OO_Array_New);
- if (HasBadAllocExceptionSpec && !getLangOptions().CPlusPlus0x) {
+ if (HasBadAllocExceptionSpec && !getLangOpts().CPlusPlus0x) {
assert(StdBadAlloc && "Must have std::bad_alloc declared");
BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
}
FunctionProtoType::ExtProtoInfo EPI;
if (HasBadAllocExceptionSpec) {
- if (!getLangOptions().CPlusPlus0x) {
+ if (!getLangOpts().CPlusPlus0x) {
EPI.ExceptionSpecType = EST_Dynamic;
EPI.NumExceptions = 1;
EPI.Exceptions = &BadAllocType;
}
} else {
- EPI.ExceptionSpecType = getLangOptions().CPlusPlus0x ?
+ EPI.ExceptionSpecType = getLangOpts().CPlusPlus0x ?
EST_BasicNoexcept : EST_DynamicNone;
}
@@ -1704,13 +1899,15 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
if (Operator->isDeleted()) {
if (Diagnose) {
Diag(StartLoc, diag::err_deleted_function_use);
- Diag(Operator->getLocation(), diag::note_unavailable_here) << true;
+ NoteDeletedFunction(Operator);
}
return true;
}
- CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
- Matches[0], Diagnose);
+ if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
+ Matches[0], Diagnose) == AR_inaccessible)
+ return true;
+
return false;
// We found multiple suitable operators; complain about the ambiguity.
@@ -1777,6 +1974,9 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
bool UsualArrayDeleteWantsSize = false;
if (!Ex.get()->isTypeDependent()) {
+ // Perform lvalue-to-rvalue cast, if needed.
+ Ex = DefaultLvalueConversion(Ex.take());
+
QualType Type = Ex.get()->getType();
if (const RecordType *Record = Type->getAs<RecordType>()) {
@@ -1864,8 +2064,8 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// (5.2.11) of the pointer expression before it is used as the operand
// of the delete-expression. ]
if (!Context.hasSameType(Ex.get()->getType(), Context.VoidPtrTy))
- Ex = Owned(ImplicitCastExpr::Create(Context, Context.VoidPtrTy, CK_NoOp,
- Ex.take(), 0, VK_RValue));
+ Ex = Owned(ImplicitCastExpr::Create(Context, Context.VoidPtrTy,
+ CK_BitCast, Ex.take(), 0, VK_RValue));
if (Pointee->isArrayType() && !ArrayForm) {
Diag(StartLoc, diag::warn_delete_array_type)
@@ -1898,9 +2098,9 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2);
}
- if (!PointeeRD->hasTrivialDestructor())
+ if (!PointeeRD->hasIrrelevantDestructor())
if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
- MarkDeclarationReferenced(StartLoc,
+ MarkFunctionReferenced(StartLoc,
const_cast<CXXDestructorDecl*>(Dtor));
DiagnoseUseOfDecl(Dtor, StartLoc);
}
@@ -1929,7 +2129,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
}
}
- } else if (getLangOptions().ObjCAutoRefCount &&
+ } else if (getLangOpts().ObjCAutoRefCount &&
PointeeElem->isObjCLifetimeType() &&
(PointeeElem.getObjCLifetime() == Qualifiers::OCL_Strong ||
PointeeElem.getObjCLifetime() == Qualifiers::OCL_Weak) &&
@@ -1949,7 +2149,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
return ExprError();
}
- MarkDeclarationReferenced(StartLoc, OperatorDelete);
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
// Check access and ambiguity of operator delete and destructor.
if (PointeeRD) {
@@ -1986,11 +2186,16 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
<< ConditionVar->getSourceRange());
ExprResult Condition =
- Owned(DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
- ConditionVar,
- ConditionVar->getLocation(),
- ConditionVar->getType().getNonReferenceType(),
+ Owned(DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
+ SourceLocation(),
+ ConditionVar,
+ /*enclosing*/ false,
+ ConditionVar->getLocation(),
+ ConditionVar->getType().getNonReferenceType(),
VK_LValue));
+
+ MarkDeclRefReferenced(cast<DeclRefExpr>(Condition.get()));
+
if (ConvertToBoolean) {
Condition = CheckBooleanCondition(Condition.take(), StmtLoc);
if (Condition.isInvalid())
@@ -2071,8 +2276,9 @@ static ExprResult BuildCXXCastArgument(Sema &S,
CastLoc, ConstructorArgs))
return ExprError();
- S.CheckConstructorAccess(CastLoc, Constructor, Constructor->getAccess(),
- S.PDiag(diag::err_access_ctor));
+ S.CheckConstructorAccess(CastLoc, Constructor,
+ InitializedEntity::InitializeTemporary(Ty),
+ Constructor->getAccess());
ExprResult Result
= S.BuildCXXConstructExpr(CastLoc, Ty, cast<CXXConstructorDecl>(Method),
@@ -2089,10 +2295,17 @@ static ExprResult BuildCXXCastArgument(Sema &S,
assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
// Create an implicit call expr that calls it.
- ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Method,
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method);
+ ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv,
HadMultipleCandidates);
if (Result.isInvalid())
return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Result = S.Owned(ImplicitCastExpr::Create(S.Context,
+ Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind()));
S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ 0, FoundDecl);
@@ -2126,6 +2339,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
CastKind CastKind;
QualType BeforeToType;
+ assert(FD && "FIXME: aggregate initialization from init list");
if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
CastKind = CK_UserDefinedConversion;
@@ -2243,7 +2457,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (!Fn)
return ExprError();
- if (DiagnoseUseOfDecl(Fn, From->getSourceRange().getBegin()))
+ if (DiagnoseUseOfDecl(Fn, From->getLocStart()))
return ExprError();
From = FixOverloadedFunctionReference(From, Found, Fn);
@@ -2256,20 +2470,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Nothing to do.
break;
- case ICK_Lvalue_To_Rvalue:
- // Should this get its own ICK?
- if (From->getObjectKind() == OK_ObjCProperty) {
- ExprResult FromRes = ConvertPropertyForRValue(From);
- if (FromRes.isInvalid())
- return ExprError();
- From = FromRes.take();
- if (!From->isGLValue()) break;
- }
-
+ case ICK_Lvalue_To_Rvalue: {
+ assert(From->getObjectKind() != OK_ObjCProperty);
FromType = FromType.getUnqualifiedType();
- From = ImplicitCastExpr::Create(Context, FromType, CK_LValueToRValue,
- From, 0, VK_RValue);
+ ExprResult FromRes = DefaultLvalueConversion(From);
+ assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!");
+ From = FromRes.take();
break;
+ }
case ICK_Array_To_Pointer:
FromType = Context.getArrayDecayedType(FromType);
@@ -2358,12 +2566,12 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (SCS.IncompatibleObjC && Action != AA_Casting) {
// Diagnose incompatible Objective-C conversions
if (Action == AA_Initializing || Action == AA_Assigning)
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::ext_typecheck_convert_incompatible_pointer)
<< ToType << From->getType() << Action
<< From->getSourceRange() << 0;
else
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::ext_typecheck_convert_incompatible_pointer)
<< From->getType() << ToType << Action
<< From->getSourceRange() << 0;
@@ -2372,14 +2580,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ToType->isObjCObjectPointerType())
EmitRelatedResultTypeNote(From);
}
- else if (getLangOptions().ObjCAutoRefCount &&
+ else if (getLangOpts().ObjCAutoRefCount &&
!CheckObjCARCUnavailableWeakConversion(ToType,
From->getType())) {
if (Action == AA_Initializing)
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::err_arc_weak_unavailable_assign);
else
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::err_arc_convesion_of_weak_unavailable)
<< (Action == AA_Casting) << From->getType() << ToType
<< From->getSourceRange();
@@ -2546,7 +2754,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
CK_NoOp, VK, /*BasePath=*/0, CCK).take();
if (SCS.DeprecatedStringLiteralToCharPtr &&
- !getLangOptions().WritableStrings)
+ !getLangOpts().WritableStrings)
Diag(From->getLocStart(), diag::warn_deprecated_string_literal_conversion)
<< ToType.getNonReferenceType();
@@ -2557,6 +2765,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
llvm_unreachable("Improper third standard conversion");
}
+ // If this conversion sequence involved a scalar -> atomic conversion, perform
+ // that conversion now.
+ if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>())
+ if (Context.hasSameType(ToAtomic->getValueType(), From->getType()))
+ From = ImpCastExprToType(From, ToType, CK_NonAtomicToAtomic, VK_RValue, 0,
+ CCK).take();
+
return Owned(From);
}
@@ -2645,6 +2860,9 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S,
case UTT_IsAbstract:
// Fall-through
+ // These traits require a complete type.
+ case UTT_IsFinal:
+
// These trait expressions are designed to help implement predicates in
// [meta.unary.prop] despite not being named the same. They are specified
// by both GCC and the Embarcadero C++ compiler, and require the complete
@@ -2770,6 +2988,10 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return RD->isAbstract();
return false;
+ case UTT_IsFinal:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->hasAttr<FinalAttr>();
+ return false;
case UTT_IsSigned:
return T->isSignedIntegerType();
case UTT_IsUnsigned:
@@ -3020,6 +3242,126 @@ ExprResult Sema::ActOnBinaryTypeTrait(BinaryTypeTrait BTT,
return BuildBinaryTypeTrait(BTT, KWLoc, LhsTSInfo, RhsTSInfo, RParen);
}
+static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ switch (Kind) {
+ case clang::TT_IsTriviallyConstructible: {
+ // C++11 [meta.unary.prop]:
+ // is_trivially_constructible is defined as:
+ //
+ // is_constructible<T, Args...>::value is true and the variable
+ // definition for is_constructible, as defined below, is known to call no
+ // operation that is not trivial.
+ //
+ // The predicate condition for a template specialization
+ // is_constructible<T, Args...> shall be satisfied if and only if the
+ // following variable definition would be well-formed for some invented
+ // variable t:
+ //
+ // T t(create<Args>()...);
+ if (Args.empty()) {
+ S.Diag(KWLoc, diag::err_type_trait_arity)
+ << 1 << 1 << 1 << (int)Args.size();
+ return false;
+ }
+
+ bool SawVoid = false;
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isVoidType()) {
+ SawVoid = true;
+ continue;
+ }
+
+ if (!Args[I]->getType()->isIncompleteType() &&
+ S.RequireCompleteType(KWLoc, Args[I]->getType(),
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ }
+
+ // If any argument was 'void', of course it won't type-check.
+ if (SawVoid)
+ return false;
+
+ llvm::SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs;
+ llvm::SmallVector<Expr *, 2> ArgExprs;
+ ArgExprs.reserve(Args.size() - 1);
+ for (unsigned I = 1, N = Args.size(); I != N; ++I) {
+ QualType T = Args[I]->getType();
+ if (T->isObjectType() || T->isFunctionType())
+ T = S.Context.getRValueReferenceType(T);
+ OpaqueArgExprs.push_back(
+ OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ T.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(T)));
+ ArgExprs.push_back(&OpaqueArgExprs.back());
+ }
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
+ InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0]));
+ InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc,
+ RParenLoc));
+ InitializationSequence Init(S, To, InitKind,
+ ArgExprs.begin(), ArgExprs.size());
+ if (Init.Failed())
+ return false;
+
+ ExprResult Result = Init.Perform(S, To, InitKind,
+ MultiExprArg(ArgExprs.data(),
+ ArgExprs.size()));
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ // The initialization succeeded; not make sure there are no non-trivial
+ // calls.
+ return !Result.get()->hasNonTrivialCall(S.Context);
+ }
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ bool Dependent = false;
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType()) {
+ Dependent = true;
+ break;
+ }
+ }
+
+ bool Value = false;
+ if (!Dependent)
+ Value = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
+
+ return TypeTraitExpr::Create(Context, Context.BoolTy, KWLoc, Kind,
+ Args, RParenLoc, Value);
+}
+
+ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<ParsedType> Args,
+ SourceLocation RParenLoc) {
+ llvm::SmallVector<TypeSourceInfo *, 4> ConvertedArgs;
+ ConvertedArgs.reserve(Args.size());
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ TypeSourceInfo *TInfo;
+ QualType T = GetTypeFromParser(Args[I], &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc);
+
+ ConvertedArgs.push_back(TInfo);
+ }
+
+ return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
+}
+
static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
QualType LhsT, QualType RhsT,
SourceLocation KeyLoc) {
@@ -3097,8 +3439,9 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
SourceLocation()));
- // Perform the initialization within a SFINAE trap at translation unit
- // scope.
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
InitializationSequence Init(Self, To, Kind, &FromPtr, 1);
@@ -3108,6 +3451,54 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
ExprResult Result = Init.Perform(Self, To, Kind, MultiExprArg(&FromPtr, 1));
return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
}
+
+ case BTT_IsTriviallyAssignable: {
+ // C++11 [meta.unary.prop]p3:
+ // is_trivially_assignable is defined as:
+ // is_assignable<T, U>::value is true and the assignment, as defined by
+ // is_assignable, is known to call no operation that is not trivial
+ //
+ // is_assignable is defined as:
+ // The expression declval<T>() = declval<U>() is well-formed when
+ // treated as an unevaluated operand (Clause 5).
+ //
+ // For both, T and U shall be complete types, (possibly cv-qualified)
+ // void, or arrays of unknown bound.
+ if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, LhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ // cv void is never assignable.
+ if (LhsT->isVoidType() || RhsT->isVoidType())
+ return false;
+
+ // Build expressions that emulate the effect of declval<T>() and
+ // declval<U>().
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+ if (RhsT->isObjectType() || RhsT->isFunctionType())
+ RhsT = Self.Context.getRValueReferenceType(RhsT);
+ OpaqueValueExpr Lhs(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(RhsT));
+
+ // Attempt the assignment in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ ExprResult Result = Self.BuildBinOp(/*S=*/0, KeyLoc, BO_Assign, &Lhs, &Rhs);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ return !Result.get()->hasNonTrivialCall(Self.Context);
+ }
}
llvm_unreachable("Unknown type trait or not implemented");
}
@@ -3121,7 +3512,7 @@ ExprResult Sema::BuildBinaryTypeTrait(BinaryTypeTrait BTT,
QualType RhsT = RhsTSInfo->getType();
if (BTT == BTT_TypeCompatible) {
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
Diag(KWLoc, diag::err_types_compatible_p_in_cplusplus)
<< SourceRange(KWLoc, RParen);
return ExprError();
@@ -3140,6 +3531,7 @@ ExprResult Sema::BuildBinaryTypeTrait(BinaryTypeTrait BTT,
case BTT_IsSame: ResultType = Context.BoolTy; break;
case BTT_TypeCompatible: ResultType = Context.IntTy; break;
case BTT_IsConvertibleTo: ResultType = Context.BoolTy; break;
+ case BTT_IsTriviallyAssignable: ResultType = Context.BoolTy;
}
return Owned(new (Context) BinaryTypeTraitExpr(KWLoc, BTT, LhsTSInfo,
@@ -3180,18 +3572,16 @@ static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
case ATT_ArrayExtent: {
llvm::APSInt Value;
uint64_t Dim;
- if (DimExpr->isIntegerConstantExpr(Value, Self.Context, 0, false)) {
- if (Value < llvm::APSInt(Value.getBitWidth(), Value.isUnsigned())) {
- Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer) <<
- DimExpr->getSourceRange();
- return false;
- }
- Dim = Value.getLimitedValue();
- } else {
- Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer) <<
- DimExpr->getSourceRange();
- return false;
+ if (Self.VerifyIntegerConstantExpression(DimExpr, &Value,
+ Self.PDiag(diag::err_dimension_expr_not_constant_integer),
+ false).isInvalid())
+ return 0;
+ if (Value.isSigned() && Value.isNegative()) {
+ Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer)
+ << DimExpr->getSourceRange();
+ return 0;
}
+ Dim = Value.getLimitedValue();
if (T->isArrayType()) {
unsigned D = 0;
@@ -3540,7 +3930,7 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS
break;
RHS = move(RHSRes);
if (Best->Function)
- Self.MarkDeclarationReferenced(QuestionLoc, Best->Function);
+ Self.MarkFunctionReferenced(QuestionLoc, Best->Function);
return false;
}
@@ -3832,7 +4222,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
if (NonStandardCompositeType)
*NonStandardCompositeType = false;
- assert(getLangOptions().CPlusPlus && "This function assumes C++");
+ assert(getLangOpts().CPlusPlus && "This function assumes C++");
QualType T1 = E1->getType(), T2 = E2->getType();
if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
@@ -4042,7 +4432,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
// In ARC, calls that return a retainable type can return retained,
// in which case we have to insert a consuming cast.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
E->getType()->isObjCRetainableType()) {
bool ReturnsRetained;
@@ -4077,6 +4467,12 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
} else if (isa<StmtExpr>(E)) {
ReturnsRetained = true;
+ // We hit this case with the lambda conversion-to-block optimization;
+ // we don't want any extra casts here.
+ } else if (isa<CastExpr>(E) &&
+ isa<BlockExpr>(cast<CastExpr>(E)->getSubExpr())) {
+ return Owned(E);
+
// For message sends and property references, we try to find an
// actual method. FIXME: we should infer retention by selector in
// cases where we don't have an actual method.
@@ -4084,11 +4480,13 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
ObjCMethodDecl *D = 0;
if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) {
D = Send->getMethodDecl();
- } else {
- CastExpr *CE = cast<CastExpr>(E);
- assert(CE->getCastKind() == CK_GetObjCProperty);
- const ObjCPropertyRefExpr *PRE = CE->getSubExpr()->getObjCProperty();
- D = (PRE->isImplicitProperty() ? PRE->getImplicitPropertyGetter() : 0);
+ } else if (ObjCNumericLiteral *NumLit = dyn_cast<ObjCNumericLiteral>(E)) {
+ D = NumLit->getObjCNumericLiteralMethod();
+ } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(E)) {
+ D = ArrayLit->getArrayWithObjectsMethod();
+ } else if (ObjCDictionaryLiteral *DictLit
+ = dyn_cast<ObjCDictionaryLiteral>(E)) {
+ D = DictLit->getDictWithObjectsMethod();
}
ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
@@ -4101,6 +4499,10 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
return Owned(E);
}
+ // Don't reclaim an object of Class type.
+ if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
+ return Owned(E);
+
ExprNeedsCleanups = true;
CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
@@ -4109,51 +4511,60 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
VK_RValue));
}
- if (!getLangOptions().CPlusPlus)
+ if (!getLangOpts().CPlusPlus)
return Owned(E);
- const RecordType *RT = E->getType()->getAs<RecordType>();
- if (!RT)
- return Owned(E);
+ // Search for the base element type (cf. ASTContext::getBaseElementType) with
+ // a fast path for the common case that the type is directly a RecordType.
+ const Type *T = Context.getCanonicalType(E->getType().getTypePtr());
+ const RecordType *RT = 0;
+ while (!RT) {
+ switch (T->getTypeClass()) {
+ case Type::Record:
+ RT = cast<RecordType>(T);
+ break;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ break;
+ default:
+ return Owned(E);
+ }
+ }
- // That should be enough to guarantee that this type is complete.
- // If it has a trivial destructor, we can avoid the extra copy.
+ // That should be enough to guarantee that this type is complete, if we're
+ // not processing a decltype expression.
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->isInvalidDecl() || RD->hasTrivialDestructor())
+ if (RD->isInvalidDecl() || RD->isDependentContext())
return Owned(E);
- CXXDestructorDecl *Destructor = LookupDestructor(RD);
+ bool IsDecltype = ExprEvalContexts.back().IsDecltype;
+ CXXDestructorDecl *Destructor = IsDecltype ? 0 : LookupDestructor(RD);
- CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor);
if (Destructor) {
- MarkDeclarationReferenced(E->getExprLoc(), Destructor);
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
CheckDestructorAccess(E->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_temp)
<< E->getType());
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+
+ // If destructor is trivial, we can avoid the extra copy.
+ if (Destructor->isTrivial())
+ return Owned(E);
- ExprTemporaries.push_back(Temp);
+ // We need a cleanup, but we don't need to remember the temporary.
ExprNeedsCleanups = true;
}
- return Owned(CXXBindTemporaryExpr::Create(Context, Temp, E));
-}
-Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
- assert(SubExpr && "sub expression can't be null!");
-
- unsigned FirstTemporary = ExprEvalContexts.back().NumTemporaries;
- assert(ExprTemporaries.size() >= FirstTemporary);
- assert(ExprNeedsCleanups || ExprTemporaries.size() == FirstTemporary);
- if (!ExprNeedsCleanups)
- return SubExpr;
+ CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor);
+ CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(Context, Temp, E);
- Expr *E = ExprWithCleanups::Create(Context, SubExpr,
- ExprTemporaries.begin() + FirstTemporary,
- ExprTemporaries.size() - FirstTemporary);
- ExprTemporaries.erase(ExprTemporaries.begin() + FirstTemporary,
- ExprTemporaries.end());
- ExprNeedsCleanups = false;
+ if (IsDecltype)
+ ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Bind);
- return E;
+ return Owned(Bind);
}
ExprResult
@@ -4164,9 +4575,32 @@ Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
return Owned(MaybeCreateExprWithCleanups(SubExpr.take()));
}
+Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
+ assert(SubExpr && "sub expression can't be null!");
+
+ CleanupVarDeclMarking();
+
+ unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
+ assert(ExprCleanupObjects.size() >= FirstCleanup);
+ assert(ExprNeedsCleanups || ExprCleanupObjects.size() == FirstCleanup);
+ if (!ExprNeedsCleanups)
+ return SubExpr;
+
+ ArrayRef<ExprWithCleanups::CleanupObject> Cleanups
+ = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
+ ExprCleanupObjects.size() - FirstCleanup);
+
+ Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups);
+ DiscardCleanupsInEvaluationContext();
+
+ return E;
+}
+
Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
assert(SubStmt && "sub statement can't be null!");
+ CleanupVarDeclMarking();
+
if (!ExprNeedsCleanups)
return SubStmt;
@@ -4182,6 +4616,95 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
return MaybeCreateExprWithCleanups(E);
}
+/// Process the expression contained within a decltype. For such expressions,
+/// certain semantic checks on temporaries are delayed until this point, and
+/// are omitted for the 'topmost' call in the decltype expression. If the
+/// topmost call bound a temporary, strip that temporary off the expression.
+ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
+ ExpressionEvaluationContextRecord &Rec = ExprEvalContexts.back();
+ assert(Rec.IsDecltype && "not in a decltype expression");
+
+ // C++11 [expr.call]p11:
+ // If a function call is a prvalue of object type,
+ // -- if the function call is either
+ // -- the operand of a decltype-specifier, or
+ // -- the right operand of a comma operator that is the operand of a
+ // decltype-specifier,
+ // a temporary object is not introduced for the prvalue.
+
+ // Recursively rebuild ParenExprs and comma expressions to strip out the
+ // outermost CXXBindTemporaryExpr, if any.
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ ExprResult SubExpr = ActOnDecltypeExpression(PE->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == PE->getSubExpr())
+ return Owned(E);
+ return ActOnParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.take());
+ }
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ ExprResult RHS = ActOnDecltypeExpression(BO->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+ if (RHS.get() == BO->getRHS())
+ return Owned(E);
+ return Owned(new (Context) BinaryOperator(BO->getLHS(), RHS.take(),
+ BO_Comma, BO->getType(),
+ BO->getValueKind(),
+ BO->getObjectKind(),
+ BO->getOperatorLoc()));
+ }
+ }
+
+ CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(E);
+ if (TopBind)
+ E = TopBind->getSubExpr();
+
+ // Disable the special decltype handling now.
+ Rec.IsDecltype = false;
+
+ // Perform the semantic checks we delayed until this point.
+ CallExpr *TopCall = dyn_cast<CallExpr>(E);
+ for (unsigned I = 0, N = Rec.DelayedDecltypeCalls.size(); I != N; ++I) {
+ CallExpr *Call = Rec.DelayedDecltypeCalls[I];
+ if (Call == TopCall)
+ continue;
+
+ if (CheckCallReturnType(Call->getCallReturnType(),
+ Call->getLocStart(),
+ Call, Call->getDirectCallee()))
+ return ExprError();
+ }
+
+ // Now all relevant types are complete, check the destructors are accessible
+ // and non-deleted, and annotate them on the temporaries.
+ for (unsigned I = 0, N = Rec.DelayedDecltypeBinds.size(); I != N; ++I) {
+ CXXBindTemporaryExpr *Bind = Rec.DelayedDecltypeBinds[I];
+ if (Bind == TopBind)
+ continue;
+
+ CXXTemporary *Temp = Bind->getTemporary();
+
+ CXXRecordDecl *RD =
+ Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
+ Temp->setDestructor(Destructor);
+
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_temp)
+ << E->getType());
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+
+ // We need a cleanup, but we don't need to remember the temporary.
+ ExprNeedsCleanups = true;
+ }
+
+ // Possibly strip off the top CXXBindTemporaryExpr.
+ return Owned(E);
+}
+
ExprResult
Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind, ParsedType &ObjectType,
@@ -4191,6 +4714,10 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
if (Result.isInvalid()) return ExprError();
Base = Result.get();
+ Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.take();
+
QualType BaseType = Base->getType();
MayBePseudoDestructor = false;
if (BaseType->isDependentType()) {
@@ -4232,21 +4759,26 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
}
}
- if (BaseType->isPointerType())
+ if (BaseType->isPointerType() || BaseType->isObjCObjectPointerType())
BaseType = BaseType->getPointeeType();
}
- // We could end up with various non-record types here, such as extended
- // vector types or Objective-C interfaces. Just return early and let
- // ActOnMemberReferenceExpr do the work.
- if (!BaseType->isRecordType()) {
- // C++ [basic.lookup.classref]p2:
- // [...] If the type of the object expression is of pointer to scalar
- // type, the unqualified-id is looked up in the context of the complete
- // postfix-expression.
- //
- // This also indicates that we should be parsing a
- // pseudo-destructor-name.
+ // Objective-C properties allow "." access on Objective-C pointer types,
+ // so adjust the base type to the object type itself.
+ if (BaseType->isObjCObjectPointerType())
+ BaseType = BaseType->getPointeeType();
+
+ // C++ [basic.lookup.classref]p2:
+ // [...] If the type of the object expression is of pointer to scalar
+ // type, the unqualified-id is looked up in the context of the complete
+ // postfix-expression.
+ //
+ // This also indicates that we could be parsing a pseudo-destructor-name.
+ // Note that Objective-C class and object types can be pseudo-destructor
+ // expressions or normal member (ivar or property) access expressions.
+ if (BaseType->isObjCObjectOrInterfaceType()) {
+ MayBePseudoDestructor = true;
+ } else if (!BaseType->isRecordType()) {
ObjectType = ParsedType();
MayBePseudoDestructor = true;
return Owned(Base);
@@ -4281,40 +4813,60 @@ ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc,
/*RPLoc*/ ExpectedLParenLoc);
}
-ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
- SourceLocation OpLoc,
- tok::TokenKind OpKind,
- const CXXScopeSpec &SS,
- TypeSourceInfo *ScopeTypeInfo,
- SourceLocation CCLoc,
- SourceLocation TildeLoc,
- PseudoDestructorTypeStorage Destructed,
- bool HasTrailingLParen) {
- TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
+static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
+ tok::TokenKind& OpKind, SourceLocation OpLoc) {
+ if (Base->hasPlaceholderType()) {
+ ExprResult result = S.CheckPlaceholderExpr(Base);
+ if (result.isInvalid()) return true;
+ Base = result.take();
+ }
+ ObjectType = Base->getType();
// C++ [expr.pseudo]p2:
// The left-hand side of the dot operator shall be of scalar type. The
// left-hand side of the arrow operator shall be of pointer to scalar type.
// This scalar type is the object type.
- QualType ObjectType = Base->getType();
+ // Note that this is rather different from the normal handling for the
+ // arrow operator.
if (OpKind == tok::arrow) {
if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
ObjectType = Ptr->getPointeeType();
} else if (!Base->isTypeDependent()) {
// The user wrote "p->" when she probably meant "p."; fix it.
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
<< ObjectType << true
<< FixItHint::CreateReplacement(OpLoc, ".");
- if (isSFINAEContext())
- return ExprError();
+ if (S.isSFINAEContext())
+ return true;
OpKind = tok::period;
}
}
+ return false;
+}
+
+ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeTypeInfo,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destructed,
+ bool HasTrailingLParen) {
+ TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
+
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
if (!ObjectType->isDependentType() && !ObjectType->isScalarType()) {
- Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
- << ObjectType << Base->getSourceRange();
+ if (getLangOpts().MicrosoftMode && ObjectType->isVoidType())
+ Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
+ else
+ Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
+ << ObjectType << Base->getSourceRange();
return ExprError();
}
@@ -4410,25 +4962,9 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
"Invalid second type name in pseudo-destructor");
- // C++ [expr.pseudo]p2:
- // The left-hand side of the dot operator shall be of scalar type. The
- // left-hand side of the arrow operator shall be of pointer to scalar type.
- // This scalar type is the object type.
- QualType ObjectType = Base->getType();
- if (OpKind == tok::arrow) {
- if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
- ObjectType = Ptr->getPointeeType();
- } else if (!ObjectType->isDependentType()) {
- // The user wrote "p->" when she probably meant "p."; fix it.
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << ObjectType << true
- << FixItHint::CreateReplacement(OpLoc, ".");
- if (isSFINAEContext())
- return ExprError();
-
- OpKind = tok::period;
- }
- }
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
// Compute the object type that we should use for name lookup purposes. Only
// record types and dependent types matter.
@@ -4476,6 +5012,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
@@ -4525,6 +5062,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
TemplateId->Template,
TemplateId->TemplateNameLoc,
TemplateId->LAngleLoc,
@@ -4548,9 +5086,60 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
Destructed, HasTrailingLParen);
}
+ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation TildeLoc,
+ const DeclSpec& DS,
+ bool HasTrailingLParen) {
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
+ PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
+
+ return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(),
+ 0, SourceLocation(), TildeLoc,
+ Destructed, HasTrailingLParen);
+}
+
ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
- CXXMethodDecl *Method,
+ CXXConversionDecl *Method,
bool HadMultipleCandidates) {
+ if (Method->getParent()->isLambda() &&
+ Method->getConversionType()->isBlockPointerType()) {
+ // This is a lambda coversion to block pointer; check if the argument
+ // is a LambdaExpr.
+ Expr *SubE = E;
+ CastExpr *CE = dyn_cast<CastExpr>(SubE);
+ if (CE && CE->getCastKind() == CK_NoOp)
+ SubE = CE->getSubExpr();
+ SubE = SubE->IgnoreParens();
+ if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubE))
+ SubE = BE->getSubExpr();
+ if (isa<LambdaExpr>(SubE)) {
+ // For the conversion to block pointer on a lambda expression, we
+ // construct a special BlockLiteral instead; this doesn't really make
+ // a difference in ARC, but outside of ARC the resulting block literal
+ // follows the normal lifetime rules for block literals instead of being
+ // autoreleased.
+ DiagnosticErrorTrap Trap(Diags);
+ ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(),
+ E->getExprLoc(),
+ Method, E);
+ if (Exp.isInvalid())
+ Diag(E->getExprLoc(), diag::note_lambda_to_block_conv);
+ return Exp;
+ }
+ }
+
+
ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/0,
FoundDecl, Method);
if (Exp.isInvalid())
@@ -4558,7 +5147,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
MemberExpr *ME =
new (Context) MemberExpr(Exp.take(), /*IsArrow=*/false, Method,
- SourceLocation(), Method->getType(),
+ SourceLocation(), Context.BoundMemberTy,
VK_RValue, OK_Ordinary);
if (HadMultipleCandidates)
ME->setHadMultipleCandidates(true);
@@ -4567,7 +5156,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
ExprValueKind VK = Expr::getValueKindForType(ResultType);
ResultType = ResultType.getNonLValueExprType(Context);
- MarkDeclarationReferenced(Exp.get()->getLocStart(), Method);
+ MarkFunctionReferenced(Exp.get()->getLocStart(), Method);
CXXMemberCallExpr *CE =
new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, VK,
Exp.get()->getLocEnd());
@@ -4589,6 +5178,12 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
/// Perform the conversions required for an expression used in a
/// context that ignores the result.
ExprResult Sema::IgnoredValueConversions(Expr *E) {
+ if (E->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return Owned(E);
+ E = result.take();
+ }
+
// C99 6.3.2.1:
// [Except in specific positions,] an lvalue that does not have
// array type is converted to the value stored in the
@@ -4598,22 +5193,14 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// are r-values, but we still want to do function-to-pointer decay
// on them. This is both technically correct and convenient for
// some clients.
- if (!getLangOptions().CPlusPlus && E->getType()->isFunctionType())
+ if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
return DefaultFunctionArrayConversion(E);
return Owned(E);
}
- // We always want to do this on ObjC property references.
- if (E->getObjectKind() == OK_ObjCProperty) {
- ExprResult Res = ConvertPropertyForRValue(E);
- if (Res.isInvalid()) return Owned(E);
- E = Res.take();
- if (E->isRValue()) return Owned(E);
- }
-
// Otherwise, this rule does not apply in C++, at least not for the moment.
- if (getLangOptions().CPlusPlus) return Owned(E);
+ if (getLangOpts().CPlusPlus) return Owned(E);
// GCC seems to also exclude expressions of incomplete enum type.
if (const EnumType *T = E->getType()->getAs<EnumType>()) {
@@ -4644,6 +5231,15 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE) {
if (DiagnoseUnexpandedParameterPack(FullExpr.get()))
return ExprError();
+ // Top-level message sends default to 'id' when we're in a debugger.
+ if (getLangOpts().DebuggerCastResultToId &&
+ FullExpr.get()->getType() == Context.UnknownAnyTy &&
+ isa<ObjCMessageExpr>(FullExpr.get())) {
+ FullExpr = forceUnknownAnyToType(FullExpr.take(), Context.getObjCIdType());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
FullExpr = CheckPlaceholderExpr(FullExpr.take());
if (FullExpr.isInvalid())
return ExprError();
@@ -4662,17 +5258,58 @@ StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
return MaybeCreateStmtWithCleanups(FullStmt);
}
-bool Sema::CheckMicrosoftIfExistsSymbol(CXXScopeSpec &SS,
- UnqualifiedId &Name) {
- DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
+ CXXScopeSpec &SS,
+ const DeclarationNameInfo &TargetNameInfo) {
DeclarationName TargetName = TargetNameInfo.getName();
if (!TargetName)
- return false;
-
+ return IER_DoesNotExist;
+
+ // If the name itself is dependent, then the result is dependent.
+ if (TargetName.isDependentName())
+ return IER_Dependent;
+
// Do the redeclaration lookup in the current scope.
LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
Sema::NotForRedeclaration);
+ LookupParsedName(R, S, &SS);
R.suppressDiagnostics();
- LookupParsedName(R, getCurScope(), &SS);
- return !R.empty();
+
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ case LookupResult::Ambiguous:
+ return IER_Exists;
+
+ case LookupResult::NotFound:
+ return IER_DoesNotExist;
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ return IER_Dependent;
+ }
+
+ llvm_unreachable("Invalid LookupResult Kind!");
+}
+
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
+ bool IsIfExists, CXXScopeSpec &SS,
+ UnqualifiedId &Name) {
+ DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+
+ // Check for unexpanded parameter packs.
+ SmallVector<UnexpandedParameterPack, 4> Unexpanded;
+ collectUnexpandedParameterPacks(SS, Unexpanded);
+ collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded);
+ if (!Unexpanded.empty()) {
+ DiagnoseUnexpandedParameterPacks(KeywordLoc,
+ IsIfExists? UPPC_IfExists
+ : UPPC_IfNotExists,
+ Unexpanded);
+ return IER_Error;
+ }
+
+ return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
index 26867c2..26b88a2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
@@ -56,7 +56,7 @@ enum IMAKind {
/// The reference may be an implicit instance member access.
IMA_Mixed,
- /// The reference may be to an instance member, but it is invalid if
+ /// The reference may be to an instance member, but it might be invalid if
/// so, because the context is not an instance method.
IMA_Mixed_StaticContext,
@@ -74,6 +74,11 @@ enum IMAKind {
/// context is not an instance method.
IMA_Unresolved_StaticContext,
+ // The reference refers to a field which is not a member of the containing
+ // class, which is allowed because we're in C++11 mode and the context is
+ // unevaluated.
+ IMA_Field_Uneval_Context,
+
/// All possible referrents are instance members and the current
/// context is not an instance method.
IMA_Error_StaticContext,
@@ -86,7 +91,7 @@ enum IMAKind {
/// The given lookup names class member(s) and is not being used for
/// an address-of-member expression. Classify the type of access
/// according to whether it's possible that this reference names an
-/// instance member. This is best-effort; it is okay to
+/// instance member. This is best-effort in dependent contexts; it is okay to
/// conservatively answer "yes", in which case some errors will simply
/// not be caught until template-instantiation.
static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
@@ -112,14 +117,14 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
// Collect all the declaring classes of instance members we find.
bool hasNonInstance = false;
- bool hasField = false;
+ bool isField = false;
llvm::SmallPtrSet<CXXRecordDecl*, 4> Classes;
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
NamedDecl *D = *I;
if (D->isCXXInstanceMember()) {
if (dyn_cast<FieldDecl>(D))
- hasField = true;
+ isField = true;
CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
Classes.insert(R->getCanonicalDecl());
@@ -133,27 +138,28 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
if (Classes.empty())
return IMA_Static;
+ bool IsCXX11UnevaluatedField = false;
+ if (SemaRef.getLangOpts().CPlusPlus0x && isField) {
+ // C++11 [expr.prim.general]p12:
+ // An id-expression that denotes a non-static data member or non-static
+ // member function of a class can only be used:
+ // (...)
+ // - if that id-expression denotes a non-static data member and it
+ // appears in an unevaluated operand.
+ const Sema::ExpressionEvaluationContextRecord& record
+ = SemaRef.ExprEvalContexts.back();
+ if (record.Context == Sema::Unevaluated)
+ IsCXX11UnevaluatedField = true;
+ }
+
// If the current context is not an instance method, it can't be
// an implicit member reference.
if (isStaticContext) {
if (hasNonInstance)
- return IMA_Mixed_StaticContext;
-
- if (SemaRef.getLangOptions().CPlusPlus0x && hasField) {
- // C++0x [expr.prim.general]p10:
- // An id-expression that denotes a non-static data member or non-static
- // member function of a class can only be used:
- // (...)
- // - if that id-expression denotes a non-static data member and it
- // appears in an unevaluated operand.
- const Sema::ExpressionEvaluationContextRecord& record
- = SemaRef.ExprEvalContexts.back();
- bool isUnevaluatedExpression = (record.Context == Sema::Unevaluated);
- if (isUnevaluatedExpression)
- return IMA_Mixed_StaticContext;
- }
-
- return IMA_Error_StaticContext;
+ return IMA_Mixed_StaticContext;
+
+ return IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context
+ : IMA_Error_StaticContext;
}
CXXRecordDecl *contextClass;
@@ -169,76 +175,93 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
// ...if C is not X or a base class of X, the class member access expression
// is ill-formed.
if (R.getNamingClass() &&
- contextClass != R.getNamingClass()->getCanonicalDecl() &&
+ contextClass->getCanonicalDecl() !=
+ R.getNamingClass()->getCanonicalDecl() &&
contextClass->isProvablyNotDerivedFrom(R.getNamingClass()))
- return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
+ return hasNonInstance ? IMA_Mixed_Unrelated :
+ IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context :
+ IMA_Error_Unrelated;
// If we can prove that the current context is unrelated to all the
// declaring classes, it can't be an implicit member reference (in
// which case it's an error if any of those members are selected).
if (IsProvablyNotDerivedFrom(SemaRef, contextClass, Classes))
- return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
+ return hasNonInstance ? IMA_Mixed_Unrelated :
+ IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context :
+ IMA_Error_Unrelated;
return (hasNonInstance ? IMA_Mixed : IMA_Instance);
}
/// Diagnose a reference to a field with no object available.
-static void DiagnoseInstanceReference(Sema &SemaRef,
+static void diagnoseInstanceReference(Sema &SemaRef,
const CXXScopeSpec &SS,
- NamedDecl *rep,
+ NamedDecl *Rep,
const DeclarationNameInfo &nameInfo) {
SourceLocation Loc = nameInfo.getLoc();
SourceRange Range(Loc);
if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
-
- if (isa<FieldDecl>(rep) || isa<IndirectFieldDecl>(rep)) {
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(SemaRef.CurContext)) {
- if (MD->isStatic()) {
- // "invalid use of member 'x' in static member function"
- SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
+
+ DeclContext *FunctionLevelDC = SemaRef.getFunctionLevelDeclContext();
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionLevelDC);
+ CXXRecordDecl *ContextClass = Method ? Method->getParent() : 0;
+ CXXRecordDecl *RepClass = dyn_cast<CXXRecordDecl>(Rep->getDeclContext());
+
+ bool InStaticMethod = Method && Method->isStatic();
+ bool IsField = isa<FieldDecl>(Rep) || isa<IndirectFieldDecl>(Rep);
+
+ if (IsField && InStaticMethod)
+ // "invalid use of member 'x' in static member function"
+ SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
<< Range << nameInfo.getName();
- return;
- }
- }
-
+ else if (ContextClass && RepClass && SS.isEmpty() && !InStaticMethod &&
+ !RepClass->Equals(ContextClass) && RepClass->Encloses(ContextClass))
+ // Unqualified lookup in a non-static member function found a member of an
+ // enclosing class.
+ SemaRef.Diag(Loc, diag::err_nested_non_static_member_use)
+ << IsField << RepClass << nameInfo.getName() << ContextClass << Range;
+ else if (IsField)
SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
- << nameInfo.getName() << Range;
- return;
- }
-
- SemaRef.Diag(Loc, diag::err_member_call_without_object) << Range;
+ << nameInfo.getName() << Range;
+ else
+ SemaRef.Diag(Loc, diag::err_member_call_without_object)
+ << Range;
}
/// Builds an expression which might be an implicit member expression.
ExprResult
Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs) {
switch (ClassifyImplicitMemberAccess(*this, CurScope, R)) {
case IMA_Instance:
- return BuildImplicitMemberExpr(SS, R, TemplateArgs, true);
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true);
case IMA_Mixed:
case IMA_Mixed_Unrelated:
case IMA_Unresolved:
- return BuildImplicitMemberExpr(SS, R, TemplateArgs, false);
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false);
+ case IMA_Field_Uneval_Context:
+ Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
+ << R.getLookupNameInfo().getName();
+ // Fall through.
case IMA_Static:
case IMA_Mixed_StaticContext:
case IMA_Unresolved_StaticContext:
- if (TemplateArgs)
- return BuildTemplateIdExpr(SS, R, false, *TemplateArgs);
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
return BuildDeclarationNameExpr(SS, R, false);
case IMA_Error_StaticContext:
case IMA_Error_Unrelated:
- DiagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
+ diagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
R.getLookupNameInfo());
return ExprError();
}
llvm_unreachable("unexpected instance member access kind");
- return ExprError();
}
/// Check an ext-vector component access expression.
@@ -403,6 +426,7 @@ ExprResult
Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
@@ -417,7 +441,7 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
// allows this, while still reporting an error if T is a struct pointer.
if (!IsArrow) {
const PointerType *PT = BaseType->getAs<PointerType>();
- if (PT && (!getLangOptions().ObjC1 ||
+ if (PT && (!getLangOpts().ObjC1 ||
PT->getPointeeType()->isRecordType())) {
assert(BaseExpr && "cannot happen with implicit member accesses");
Diag(NameInfo.getLoc(), diag::err_typecheck_member_reference_struct_union)
@@ -435,6 +459,7 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
return Owned(CXXDependentScopeMemberExpr::Create(Context, BaseExpr, BaseType,
IsArrow, OpLoc,
SS.getWithLocInContext(Context),
+ TemplateKWLoc,
FirstQualifierInScope,
NameInfo, TemplateArgs));
}
@@ -451,7 +476,7 @@ static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
// If this is an implicit member access, use a different set of
// diagnostics.
if (!BaseExpr)
- return DiagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
+ return diagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
<< SS.getRange() << rep << BaseType;
@@ -509,6 +534,20 @@ bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
return true;
}
+namespace {
+
+// Callback to only accept typo corrections that are either a ValueDecl or a
+// FunctionTemplateDecl.
+class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ return ND && (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND));
+ }
+};
+
+}
+
static bool
LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
SourceRange BaseRange, const RecordType *RTy,
@@ -559,17 +598,16 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
// We didn't find anything with the given name, so try to correct
// for typos.
DeclarationName Name = R.getLookupName();
+ RecordMemberExprValidatorCCC Validator;
TypoCorrection Corrected = SemaRef.CorrectTypo(R.getLookupNameInfo(),
R.getLookupKind(), NULL,
- &SS, DC, false,
- Sema::CTC_MemberLookup);
- NamedDecl *ND = Corrected.getCorrectionDecl();
+ &SS, Validator, DC);
R.clear();
- if (ND && (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND))) {
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
std::string CorrectedStr(
- Corrected.getAsString(SemaRef.getLangOptions()));
+ Corrected.getAsString(SemaRef.getLangOpts()));
std::string CorrectedQuotedStr(
- Corrected.getQuoted(SemaRef.getLangOptions()));
+ Corrected.getQuoted(SemaRef.getLangOpts()));
R.setLookupName(Corrected.getCorrection());
R.addDecl(ND);
SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest)
@@ -586,6 +624,7 @@ ExprResult
Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
@@ -593,7 +632,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
(SS.isSet() && isDependentScopeSpecifier(SS)))
return ActOnDependentMemberExpr(Base, BaseType,
IsArrow, OpLoc,
- SS, FirstQualifierInScope,
+ SS, TemplateKWLoc, FirstQualifierInScope,
NameInfo, TemplateArgs);
LookupResult R(*this, NameInfo, LookupMemberName);
@@ -631,8 +670,8 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
}
return BuildMemberReferenceExpr(Base, BaseType,
- OpLoc, IsArrow, SS, FirstQualifierInScope,
- R, TemplateArgs);
+ OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, R, TemplateArgs);
}
static ExprResult
@@ -697,7 +736,7 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
// We've found a member of an anonymous struct/union that is
// inside a non-anonymous struct/union, so in a well-formed
// program our base object expression is "this".
- QualType ThisTy = getAndCaptureCurrentThisType();
+ QualType ThisTy = getCurrentThisType();
if (ThisTy.isNull()) {
Diag(loc, diag::err_invalid_member_use_in_static_method)
<< indirectField->getDeclName();
@@ -705,6 +744,7 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
}
// Our base object expression is "this".
+ CheckCXXThisCapture(loc);
baseObjectExpr
= new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
baseObjectIsPointer = true;
@@ -754,22 +794,30 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
}
/// \brief Build a MemberExpr AST node.
-static MemberExpr *BuildMemberExpr(ASTContext &C, Expr *Base, bool isArrow,
- const CXXScopeSpec &SS, ValueDecl *Member,
+static MemberExpr *BuildMemberExpr(Sema &SemaRef,
+ ASTContext &C, Expr *Base, bool isArrow,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *Member,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo,
QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = 0) {
- return MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C),
- Member, FoundDecl, MemberNameInfo,
- TemplateArgs, Ty, VK, OK);
+ assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
+ MemberExpr *E =
+ MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C),
+ TemplateKWLoc, Member, FoundDecl, MemberNameInfo,
+ TemplateArgs, Ty, VK, OK);
+ SemaRef.MarkMemberReferenced(E);
+ return E;
}
ExprResult
Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation OpLoc, bool IsArrow,
const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
@@ -777,7 +825,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
QualType BaseType = BaseExprType;
if (IsArrow) {
assert(BaseType->isPointerType());
- BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+ BaseType = BaseType->castAs<PointerType>()->getPointeeType();
}
R.setBaseObjectType(BaseType);
@@ -813,7 +861,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
!SuppressQualifierCheck &&
CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R))
return ExprError();
-
+
// Construct an unresolved result if we in fact got an unresolved
// result.
if (R.isOverloadedResult() || R.isUnresolvableResult()) {
@@ -826,7 +874,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
BaseExpr, BaseExprType,
IsArrow, OpLoc,
SS.getWithLocInContext(Context),
- MemberNameInfo,
+ TemplateKWLoc, MemberNameInfo,
TemplateArgs, R.begin(), R.end());
return Owned(MemExpr);
@@ -853,6 +901,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
SourceLocation Loc = R.getNameLoc();
if (SS.getRange().isValid())
Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
}
@@ -870,15 +919,6 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
return ExprError();
}
- // Perform a property load on the base regardless of whether we
- // actually need it for the declaration.
- if (BaseExpr->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(BaseExpr);
- if (Result.isInvalid())
- return ExprError();
- BaseExpr = Result.take();
- }
-
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow,
SS, FD, FoundDecl, MemberNameInfo);
@@ -890,9 +930,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
BaseExpr, OpLoc);
if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
- MarkDeclarationReferenced(MemberLoc, Var);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- Var, FoundDecl, MemberNameInfo,
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, Var, FoundDecl, MemberNameInfo,
Var->getType().getNonReferenceType(),
VK_LValue, OK_Ordinary));
}
@@ -908,17 +947,16 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
type = MemberFn->getType();
}
- MarkDeclarationReferenced(MemberLoc, MemberDecl);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- MemberFn, FoundDecl, MemberNameInfo,
- type, valueKind, OK_Ordinary));
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, MemberFn, FoundDecl,
+ MemberNameInfo, type, valueKind,
+ OK_Ordinary));
}
assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
- MarkDeclarationReferenced(MemberLoc, MemberDecl);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- Enum, FoundDecl, MemberNameInfo,
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, Enum, FoundDecl, MemberNameInfo,
Enum->getType(), VK_RValue, OK_Ordinary));
}
@@ -979,6 +1017,15 @@ static bool isPointerToRecordType(QualType T) {
return false;
}
+/// Perform conversions on the LHS of a member access expression.
+ExprResult
+Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) {
+ if (IsArrow && !Base->getType()->isFunctionType())
+ return DefaultFunctionArrayLvalueConversion(Base);
+
+ return CheckPlaceholderExpr(Base);
+}
+
/// Look up the given member of the given non-type-dependent
/// expression. This can return in one of two ways:
/// * If it returns a sentinel null-but-valid result, the caller will
@@ -997,16 +1044,10 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
assert(BaseExpr.get() && "no base expression");
// Perform default conversions.
- BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
+ BaseExpr = PerformMemberExprBaseConversion(BaseExpr.take(), IsArrow);
if (BaseExpr.isInvalid())
return ExprError();
- if (IsArrow) {
- BaseExpr = DefaultLvalueConversion(BaseExpr.take());
- if (BaseExpr.isInvalid())
- return ExprError();
- }
-
QualType BaseType = BaseExpr.get()->getType();
assert(!BaseType->isDependentType());
@@ -1034,7 +1075,7 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
<< BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
<< FixItHint::CreateReplacement(OpLoc, ".");
IsArrow = false;
- } else if (BaseType == Context.BoundMemberTy) {
+ } else if (BaseType->isFunctionType()) {
goto fail;
} else {
Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
@@ -1071,15 +1112,17 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
// - an interface
ObjCInterfaceDecl *IDecl = OTy->getInterface();
if (!IDecl) {
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
(OTy->isObjCId() || OTy->isObjCClass()))
goto fail;
// There's an implicit 'isa' ivar on all objects.
// But we only actually find it this way on objects of type 'id',
- // apparently.
- if (OTy->isObjCId() && Member->isStr("isa"))
+ // apparently.ghjg
+ if (OTy->isObjCId() && Member->isStr("isa")) {
+ Diag(MemberLoc, diag::warn_objc_isa_use);
return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc,
Context.getObjCClassType()));
+ }
if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
@@ -1087,17 +1130,22 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
goto fail;
}
- ObjCInterfaceDecl *ClassDeclared;
+ if (RequireCompleteType(OpLoc, BaseType,
+ PDiag(diag::err_typecheck_incomplete_tag)
+ << BaseExpr.get()->getSourceRange()))
+ return ExprError();
+
+ ObjCInterfaceDecl *ClassDeclared = 0;
ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
if (!IV) {
// Attempt to correct for typos in ivar names.
- LookupResult Res(*this, R.getLookupName(), R.getNameLoc(),
- LookupMemberName);
- TypoCorrection Corrected = CorrectTypo(
- R.getLookupNameInfo(), LookupMemberName, NULL, NULL, IDecl, false,
- IsArrow ? CTC_ObjCIvarLookup : CTC_ObjCPropertyLookup);
- if ((IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>())) {
+ DeclFilterCCC<ObjCIvarDecl> Validator;
+ Validator.IsObjCIvarLookup = IsArrow;
+ if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
+ LookupMemberName, NULL, NULL,
+ Validator, IDecl)) {
+ IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>();
Diag(R.getNameLoc(),
diag::err_typecheck_member_reference_ivar_suggest)
<< IDecl->getDeclName() << MemberName << IV->getDeclName()
@@ -1105,6 +1153,13 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
IV->getNameAsString());
Diag(IV->getLocation(), diag::note_previous_decl)
<< IV->getDeclName();
+
+ // Figure out the class that declares the ivar.
+ assert(!ClassDeclared);
+ Decl *D = cast<Decl>(IV->getDeclContext());
+ if (ObjCCategoryDecl *CAT = dyn_cast<ObjCCategoryDecl>(D))
+ D = CAT->getClassInterface();
+ ClassDeclared = cast<ObjCInterfaceDecl>(D);
} else {
if (IsArrow && IDecl->FindPropertyDeclaration(Member)) {
Diag(MemberLoc,
@@ -1113,8 +1168,6 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
<< FixItHint::CreateReplacement(OpLoc, ".");
return ExprError();
}
- Res.clear();
- Res.setLookupName(Member);
Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
<< IDecl->getDeclName() << MemberName
@@ -1122,6 +1175,8 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
return ExprError();
}
}
+
+ assert(ClassDeclared);
// If the decl being referenced had an error, return an error for this
// sub-expr without emitting another error, in order to avoid cascading
@@ -1151,18 +1206,19 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
ClassOfMethodDecl = CatImplClass->getClassInterface();
}
-
- if (IV->getAccessControl() == ObjCIvarDecl::Private) {
- if (ClassDeclared != IDecl ||
- ClassOfMethodDecl != ClassDeclared)
- Diag(MemberLoc, diag::error_private_ivar_access)
+ if (!getLangOpts().DebuggerSupport) {
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (!declaresSameEntity(ClassDeclared, IDecl) ||
+ !declaresSameEntity(ClassOfMethodDecl, ClassDeclared))
+ Diag(MemberLoc, diag::error_private_ivar_access)
+ << IV->getDeclName();
+ } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
+ // @protected
+ Diag(MemberLoc, diag::error_protected_ivar_access)
<< IV->getDeclName();
- } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
- // @protected
- Diag(MemberLoc, diag::error_protected_ivar_access)
- << IV->getDeclName();
+ }
}
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts();
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp))
if (UO->getOpcode() == UO_Deref)
@@ -1209,11 +1265,8 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
- QualType T = PD->getType();
- if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- T = getMessageSendResultType(BaseType, Getter, false, false);
-
- return Owned(new (Context) ObjCPropertyRefExpr(PD, T,
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
VK_LValue,
OK_ObjCProperty,
MemberLoc,
@@ -1231,16 +1284,10 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/0,
SetterSel, Context))
SMD = dyn_cast<ObjCMethodDecl>(SDecl);
- QualType PType = getMessageSendResultType(BaseType, OMD, false,
- false);
- ExprValueKind VK = VK_LValue;
- if (!getLangOptions().CPlusPlus && PType.isCForbiddenLValueType())
- VK = VK_RValue;
- ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
-
- return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD, PType,
- VK, OK,
+ return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
MemberLoc, BaseExpr.take()));
}
}
@@ -1295,23 +1342,9 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
return ExprError();
if (Getter || Setter) {
- QualType PType;
-
- ExprValueKind VK = VK_LValue;
- if (Getter) {
- PType = getMessageSendResultType(QualType(OT, 0), Getter, true,
- false);
- if (!getLangOptions().CPlusPlus && PType.isCForbiddenLValueType())
- VK = VK_RValue;
- } else {
- // Get the expression type from Setter's incoming parameter.
- PType = (*(Setter->param_end() -1))->getType();
- }
- ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
-
- // FIXME: we must check that the setter has property type.
return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
MemberLoc, BaseExpr.take()));
}
@@ -1384,7 +1417,7 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
if (tryToRecoverWithCall(BaseExpr,
PDiag(diag::err_member_reference_needs_call),
/*complain*/ false,
- IsArrow ? &isRecordType : &isPointerToRecordType)) {
+ IsArrow ? &isPointerToRecordType : &isRecordType)) {
if (BaseExpr.isInvalid())
return ExprError();
BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
@@ -1415,6 +1448,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
Decl *ObjCImpDecl,
bool HasTrailingLParen) {
@@ -1422,7 +1456,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
return ExprError();
// Warn about the explicit constructor calls Microsoft extension.
- if (getLangOptions().MicrosoftExt &&
+ if (getLangOpts().MicrosoftExt &&
Id.getKind() == UnqualifiedId::IK_ConstructorName)
Diag(Id.getSourceRange().getBegin(),
diag::ext_ms_explicit_constructor_call);
@@ -1451,7 +1485,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
isDependentScopeSpecifier(SS)) {
Result = ActOnDependentMemberExpr(Base, Base->getType(),
IsArrow, OpLoc,
- SS, FirstQualifierInScope,
+ SS, TemplateKWLoc, FirstQualifierInScope,
NameInfo, TemplateArgs);
} else {
LookupResult R(*this, NameInfo, LookupMemberName);
@@ -1480,8 +1514,8 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
}
Result = BuildMemberReferenceExpr(Base, Base->getType(),
- OpLoc, IsArrow, SS, FirstQualifierInScope,
- R, TemplateArgs);
+ OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, R, TemplateArgs);
}
return move(Result);
@@ -1536,13 +1570,13 @@ BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
MemberType = S.Context.getQualifiedType(MemberType, Combined);
}
- S.MarkDeclarationReferenced(MemberNameInfo.getLoc(), Field);
ExprResult Base =
S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
FoundDecl, Field);
if (Base.isInvalid())
return ExprError();
- return S.Owned(BuildMemberExpr(S.Context, Base.take(), IsArrow, SS,
+ return S.Owned(BuildMemberExpr(S, S.Context, Base.take(), IsArrow, SS,
+ /*TemplateKWLoc=*/SourceLocation(),
Field, FoundDecl, MemberNameInfo,
MemberType, VK, OK));
}
@@ -1553,6 +1587,7 @@ BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
/// is from an appropriate type.
ExprResult
Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsKnownInstance) {
@@ -1569,7 +1604,7 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
// If this is known to be an instance access, go ahead and build an
// implicit 'this' expression now.
// 'this' expression now.
- QualType ThisTy = getAndCaptureCurrentThisType();
+ QualType ThisTy = getCurrentThisType();
assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
Expr *baseExpr = 0; // null signifies implicit access
@@ -1577,13 +1612,14 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation Loc = R.getNameLoc();
if (SS.getRange().isValid())
Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
}
return BuildMemberReferenceExpr(baseExpr, ThisTy,
/*OpLoc*/ SourceLocation(),
/*IsArrow*/ true,
- SS,
+ SS, TemplateKWLoc,
/*FirstQualifierInScope*/ 0,
R, TemplateArgs);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
index 20098b2..b62d56e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -17,6 +17,8 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/Commit.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprObjC.h"
@@ -43,7 +45,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
// If we have a multi-part string, merge it all together.
if (NumStrings != 1) {
// Concatenate objc strings.
- llvm::SmallString<128> StrBuf;
+ SmallString<128> StrBuf;
SmallVector<SourceLocation, 8> StrLocs;
for (unsigned i = 0; i != NumStrings; ++i) {
@@ -70,7 +72,11 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
Context.getPointerType(Context.CharTy),
&StrLocs[0], StrLocs.size());
}
+
+ return BuildObjCStringLiteral(AtLocs[0], S);
+}
+ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
// Verify that this composite string is acceptable for ObjC strings.
if (CheckObjCString(S))
return true;
@@ -82,16 +88,16 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
QualType Ty = Context.getObjCConstantStringInterface();
if (!Ty.isNull()) {
Ty = Context.getObjCObjectPointerType(Ty);
- } else if (getLangOptions().NoConstantCFStrings) {
+ } else if (getLangOpts().NoConstantCFStrings) {
IdentifierInfo *NSIdent=0;
- std::string StringClass(getLangOptions().ObjCConstantStringClass);
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
if (StringClass.empty())
NSIdent = &Context.Idents.get("NSConstantString");
else
NSIdent = &Context.Idents.get(StringClass);
- NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLocs[0],
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
Context.setObjCConstantStringInterface(StrIF);
@@ -106,20 +112,638 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
}
} else {
IdentifierInfo *NSIdent = &Context.Idents.get("NSString");
- NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLocs[0],
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
LookupOrdinaryName);
if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
Context.setObjCConstantStringInterface(StrIF);
Ty = Context.getObjCConstantStringInterface();
Ty = Context.getObjCObjectPointerType(Ty);
} else {
- // If there is no NSString interface defined then treat constant
- // strings as untyped objects and let the runtime figure it out later.
- Ty = Context.getObjCIdType();
+ // If there is no NSString interface defined, implicitly declare
+ // a @class NSString; and use that instead. This is to make sure
+ // type of an NSString literal is represented correctly, instead of
+ // being an 'id' type.
+ Ty = Context.getObjCNSStringType();
+ if (Ty.isNull()) {
+ ObjCInterfaceDecl *NSStringIDecl =
+ ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(), NSIdent,
+ 0, SourceLocation());
+ Ty = Context.getObjCInterfaceType(NSStringIDecl);
+ Context.setObjCNSStringType(Ty);
+ }
+ Ty = Context.getObjCObjectPointerType(Ty);
+ }
+ }
+
+ return new (Context) ObjCStringLiteral(S, Ty, AtLoc);
+}
+
+/// \brief Retrieve the NSNumber factory method that should be used to create
+/// an Objective-C literal for the given type.
+static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
+ QualType T, QualType ReturnType,
+ SourceRange Range) {
+ llvm::Optional<NSAPI::NSNumberLiteralMethodKind> Kind
+ = S.NSAPIObj->getNSNumberFactoryMethodKind(T);
+
+ if (!Kind) {
+ S.Diag(Loc, diag::err_invalid_nsnumber_type)
+ << T << Range;
+ return 0;
+ }
+
+ // If we already looked up this method, we're done.
+ if (S.NSNumberLiteralMethods[*Kind])
+ return S.NSNumberLiteralMethods[*Kind];
+
+ Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind,
+ /*Instance=*/false);
+
+ // Look for the appropriate method within NSNumber.
+ ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);;
+ if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ Method = ObjCMethodDecl::Create(S.Context, SourceLocation(), SourceLocation(), Sel,
+ ReturnType,
+ ResultTInfo,
+ S.Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
+ SourceLocation(), SourceLocation(),
+ &S.Context.Idents.get("value"),
+ T, /*TInfo=*/0, SC_None, SC_None, 0);
+ Method->setMethodParams(S.Context, value, ArrayRef<SourceLocation>());
+ }
+
+ if (!Method) {
+ S.Diag(Loc, diag::err_undeclared_nsnumber_method) << Sel;
+ return 0;
+ }
+
+ // Make sure the return type is reasonable.
+ if (!Method->getResultType()->isObjCObjectPointerType()) {
+ S.Diag(Loc, diag::err_objc_literal_method_sig)
+ << Sel;
+ S.Diag(Method->getLocation(), diag::note_objc_literal_method_return)
+ << Method->getResultType();
+ return 0;
+ }
+
+ // Note: if the parameter type is out-of-line, we'll catch it later in the
+ // implicit conversion.
+
+ S.NSNumberLiteralMethods[*Kind] = Method;
+ return Method;
+}
+
+/// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
+/// numeric literal expression. Type of the expression will be "NSNumber *"
+/// or "id" if NSNumber is unavailable.
+ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
+ // Look up the NSNumber class, if we haven't done so already.
+ if (!NSNumberDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
+ AtLoc, LookupOrdinaryName);
+ NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+
+ if (!NSNumberDecl && getLangOpts().DebuggerObjCLiteral)
+ NSNumberDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
+ 0, SourceLocation());
+ if (!NSNumberDecl) {
+ Diag(AtLoc, diag::err_undeclared_nsnumber);
+ return ExprError();
+ }
+ }
+
+ // Determine the type of the literal.
+ QualType NumberType = Number->getType();
+ if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) {
+ // In C, character literals have type 'int'. That's not the type we want
+ // to use to determine the Objective-c literal kind.
+ switch (Char->getKind()) {
+ case CharacterLiteral::Ascii:
+ NumberType = Context.CharTy;
+ break;
+
+ case CharacterLiteral::Wide:
+ NumberType = Context.getWCharType();
+ break;
+
+ case CharacterLiteral::UTF16:
+ NumberType = Context.Char16Ty;
+ break;
+
+ case CharacterLiteral::UTF32:
+ NumberType = Context.Char32Ty;
+ break;
+ }
+ }
+
+ ObjCMethodDecl *Method = 0;
+ // Look for the appropriate method within NSNumber.
+ // Construct the literal.
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSNumberDecl));
+ Method = getNSNumberFactoryMethod(*this, AtLoc,
+ NumberType, Ty,
+ Number->getSourceRange());
+
+ if (!Method)
+ return ExprError();
+
+ // Convert the number to the type that the parameter expects.
+ QualType ElementT = Method->param_begin()[0]->getType();
+ ExprResult ConvertedNumber = PerformImplicitConversion(Number, ElementT,
+ AA_Sending);
+ if (ConvertedNumber.isInvalid())
+ return ExprError();
+ Number = ConvertedNumber.get();
+
+ return MaybeBindToTemporary(
+ new (Context) ObjCNumericLiteral(Number, Ty, Method, AtLoc));
+}
+
+ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
+ SourceLocation ValueLoc,
+ bool Value) {
+ ExprResult Inner;
+ if (getLangOpts().CPlusPlus) {
+ Inner = ActOnCXXBoolLiteral(ValueLoc, Value? tok::kw_true : tok::kw_false);
+ } else {
+ // C doesn't actually have a way to represent literal values of type
+ // _Bool. So, we'll use 0/1 and implicit cast to _Bool.
+ Inner = ActOnIntegerConstant(ValueLoc, Value? 1 : 0);
+ Inner = ImpCastExprToType(Inner.get(), Context.BoolTy,
+ CK_IntegralToBoolean);
+ }
+
+ return BuildObjCNumericLiteral(AtLoc, Inner.get());
+}
+
+/// \brief Check that the given expression is a valid element of an Objective-C
+/// collection literal.
+static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
+ QualType T) {
+ // If the expression is type-dependent, there's nothing for us to do.
+ if (Element->isTypeDependent())
+ return Element;
+
+ ExprResult Result = S.CheckPlaceholderExpr(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // In C++, check for an implicit conversion to an Objective-C object pointer
+ // type.
+ if (S.getLangOpts().CPlusPlus && Element->getType()->isRecordType()) {
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(S.Context, T, /*Consumed=*/false);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Element->getLocStart(), SourceLocation());
+ InitializationSequence Seq(S, Entity, Kind, &Element, 1);
+ if (!Seq.Failed())
+ return Seq.Perform(S, Entity, Kind, MultiExprArg(S, &Element, 1));
+ }
+
+ Expr *OrigElement = Element;
+
+ // Perform lvalue-to-rvalue conversion.
+ Result = S.DefaultLvalueConversion(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // Make sure that we have an Objective-C pointer type or block.
+ if (!Element->getType()->isObjCObjectPointerType() &&
+ !Element->getType()->isBlockPointerType()) {
+ bool Recovered = false;
+
+ // If this is potentially an Objective-C numeric literal, add the '@'.
+ if (isa<IntegerLiteral>(OrigElement) ||
+ isa<CharacterLiteral>(OrigElement) ||
+ isa<FloatingLiteral>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement) ||
+ isa<CXXBoolLiteralExpr>(OrigElement)) {
+ if (S.NSAPIObj->getNSNumberFactoryMethodKind(OrigElement->getType())) {
+ int Which = isa<CharacterLiteral>(OrigElement) ? 1
+ : (isa<CXXBoolLiteralExpr>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2
+ : 3;
+
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << Which << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCNumericLiteral(OrigElement->getLocStart(),
+ OrigElement);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+ // If this is potentially an Objective-C string literal, add the '@'.
+ else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
+ if (String->isAscii()) {
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << 0 << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCStringLiteral(OrigElement->getLocStart(), String);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+
+ if (!Recovered) {
+ S.Diag(Element->getLocStart(), diag::err_invalid_collection_element)
+ << Element->getType();
+ return ExprError();
}
}
+
+ // Make sure that the element has the type that the container factory
+ // function expects.
+ return S.PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false),
+ Element->getLocStart(), Element);
+}
- return new (Context) ObjCStringLiteral(S, Ty, AtLocs[0]);
+ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
+ Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ // Feature support is for modern abi.
+ if (!LangOpts.ObjCNonFragileABI)
+ return ExprError();
+ // If the expression is type-dependent, there's nothing for us to do.
+ assert ((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) &&
+ "base or index cannot have dependent type here");
+ ExprResult Result = CheckPlaceholderExpr(IndexExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ IndexExpr = Result.get();
+
+ // Perform lvalue-to-rvalue conversion.
+ Result = DefaultLvalueConversion(BaseExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ BaseExpr = Result.get();
+ return Owned(ObjCSubscriptRefExpr::Create(Context,
+ BaseExpr,
+ IndexExpr,
+ Context.PseudoObjectTy,
+ getterMethod,
+ setterMethod, RB));
+
+}
+
+ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
+ // Look up the NSArray class, if we haven't done so already.
+ if (!NSArrayDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
+ SR.getBegin(),
+ LookupOrdinaryName);
+ NSArrayDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!NSArrayDecl && getLangOpts().DebuggerObjCLiteral)
+ NSArrayDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
+ 0, SourceLocation());
+
+ if (!NSArrayDecl) {
+ Diag(SR.getBegin(), diag::err_undeclared_nsarray);
+ return ExprError();
+ }
+ }
+
+ // Find the arrayWithObjects:count: method, if we haven't done so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!ArrayWithObjectsMethod) {
+ Selector
+ Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount);
+ ArrayWithObjectsMethod = NSArrayDecl->lookupClassMethod(Sel);
+ if (!ArrayWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ ArrayWithObjectsMethod =
+ ObjCMethodDecl::Create(Context,
+ SourceLocation(), SourceLocation(), Sel,
+ IdT,
+ ResultTInfo,
+ Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(objects);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(cnt);
+ ArrayWithObjectsMethod->setMethodParams(Context, Params,
+ ArrayRef<SourceLocation>());
+
+
+ }
+
+ if (!ArrayWithObjectsMethod) {
+ Diag(SR.getBegin(), diag::err_undeclared_arraywithobjects) << Sel;
+ return ExprError();
+ }
+ }
+
+ // Make sure the return type is reasonable.
+ if (!ArrayWithObjectsMethod->getResultType()->isObjCObjectPointerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->getLocation(),
+ diag::note_objc_literal_method_return)
+ << ArrayWithObjectsMethod->getResultType();
+ return ExprError();
+ }
+
+ // Dig out the type that all elements should be converted to.
+ QualType T = ArrayWithObjectsMethod->param_begin()[0]->getType();
+ const PointerType *PtrT = T->getAs<PointerType>();
+ if (!PtrT ||
+ !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << T
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ T = PtrT->getPointeeType();
+
+ // Check that the 'count' parameter is integral.
+ if (!ArrayWithObjectsMethod->param_begin()[1]->getType()->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1
+ << ArrayWithObjectsMethod->param_begin()[1]->getType()
+ << "integral";
+ return ExprError();
+ }
+
+ // Check that each of the elements provided is valid in a collection literal,
+ // performing conversions as necessary.
+ Expr **ElementsBuffer = Elements.get();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
+ ElementsBuffer[I],
+ T);
+ if (Converted.isInvalid())
+ return ExprError();
+
+ ElementsBuffer[I] = Converted.get();
+ }
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSArrayDecl));
+
+ return MaybeBindToTemporary(
+ ObjCArrayLiteral::Create(Context,
+ llvm::makeArrayRef(Elements.get(),
+ Elements.size()),
+ Ty, ArrayWithObjectsMethod, SR));
+}
+
+ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements) {
+ // Look up the NSDictionary class, if we haven't done so already.
+ if (!NSDictionaryDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
+ SR.getBegin(), LookupOrdinaryName);
+ NSDictionaryDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!NSDictionaryDecl && getLangOpts().DebuggerObjCLiteral)
+ NSDictionaryDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
+ 0, SourceLocation());
+
+ if (!NSDictionaryDecl) {
+ Diag(SR.getBegin(), diag::err_undeclared_nsdictionary);
+ return ExprError();
+ }
+ }
+
+ // Find the dictionaryWithObjects:forKeys:count: method, if we haven't done
+ // so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!DictionaryWithObjectsMethod) {
+ Selector Sel = NSAPIObj->getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsForKeysCount);
+ DictionaryWithObjectsMethod = NSDictionaryDecl->lookupClassMethod(Sel);
+ if (!DictionaryWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
+ DictionaryWithObjectsMethod =
+ ObjCMethodDecl::Create(Context,
+ SourceLocation(), SourceLocation(), Sel,
+ IdT,
+ 0 /*TypeSourceInfo */,
+ Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 3> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(objects);
+ ParmVarDecl *keys = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("keys"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(keys);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(cnt);
+ DictionaryWithObjectsMethod->setMethodParams(Context, Params,
+ ArrayRef<SourceLocation>());
+ }
+
+ if (!DictionaryWithObjectsMethod) {
+ Diag(SR.getBegin(), diag::err_undeclared_dictwithobjects) << Sel;
+ return ExprError();
+ }
+ }
+
+ // Make sure the return type is reasonable.
+ if (!DictionaryWithObjectsMethod->getResultType()->isObjCObjectPointerType()){
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->getLocation(),
+ diag::note_objc_literal_method_return)
+ << DictionaryWithObjectsMethod->getResultType();
+ return ExprError();
+ }
+
+ // Dig out the type that all values should be converted to.
+ QualType ValueT = DictionaryWithObjectsMethod->param_begin()[0]->getType();
+ const PointerType *PtrValue = ValueT->getAs<PointerType>();
+ if (!PtrValue ||
+ !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << ValueT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ ValueT = PtrValue->getPointeeType();
+
+ // Dig out the type that all keys should be converted to.
+ QualType KeyT = DictionaryWithObjectsMethod->param_begin()[1]->getType();
+ const PointerType *PtrKey = KeyT->getAs<PointerType>();
+ if (!PtrKey ||
+ !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ IdT)) {
+ bool err = true;
+ if (PtrKey) {
+ if (QIDNSCopying.isNull()) {
+ // key argument of selector is id<NSCopying>?
+ if (ObjCProtocolDecl *NSCopyingPDecl =
+ LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
+ ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
+ QIDNSCopying =
+ Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
+ (ObjCProtocolDecl**) PQ,1);
+ QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
+ }
+ }
+ if (!QIDNSCopying.isNull())
+ err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ QIDNSCopying);
+ }
+
+ if (err) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1 << KeyT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ }
+ KeyT = PtrKey->getPointeeType();
+
+ // Check that the 'count' parameter is integral.
+ if (!DictionaryWithObjectsMethod->param_begin()[2]->getType()
+ ->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[2]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 2
+ << DictionaryWithObjectsMethod->param_begin()[2]->getType()
+ << "integral";
+ return ExprError();
+ }
+
+ // Check that each of the keys and values provided is valid in a collection
+ // literal, performing conversions as necessary.
+ bool HasPackExpansions = false;
+ for (unsigned I = 0, N = NumElements; I != N; ++I) {
+ // Check the key.
+ ExprResult Key = CheckObjCCollectionLiteralElement(*this, Elements[I].Key,
+ KeyT);
+ if (Key.isInvalid())
+ return ExprError();
+
+ // Check the value.
+ ExprResult Value
+ = CheckObjCCollectionLiteralElement(*this, Elements[I].Value, ValueT);
+ if (Value.isInvalid())
+ return ExprError();
+
+ Elements[I].Key = Key.get();
+ Elements[I].Value = Value.get();
+
+ if (Elements[I].EllipsisLoc.isInvalid())
+ continue;
+
+ if (!Elements[I].Key->containsUnexpandedParameterPack() &&
+ !Elements[I].Value->containsUnexpandedParameterPack()) {
+ Diag(Elements[I].EllipsisLoc,
+ diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(Elements[I].Key->getLocStart(),
+ Elements[I].Value->getLocEnd());
+ return ExprError();
+ }
+
+ HasPackExpansions = true;
+ }
+
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSDictionaryDecl));
+ return MaybeBindToTemporary(
+ ObjCDictionaryLiteral::Create(Context,
+ llvm::makeArrayRef(Elements,
+ NumElements),
+ HasPackExpansions,
+ Ty,
+ DictionaryWithObjectsMethod, SR));
}
ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
@@ -144,7 +768,7 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
// which is an array type.
StrTy = Context.CharTy;
// A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
- if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings)
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
StrTy.addConst();
StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1),
ArrayType::Normal, 0);
@@ -191,7 +815,7 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
// In ARC, forbid the user from using @selector for
// retain/release/autorelease/dealloc/retainCount.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
switch (Sel.getMethodFamily()) {
case OMF_retain:
case OMF_release:
@@ -237,14 +861,8 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
}
/// Try to capture an implicit reference to 'self'.
-ObjCMethodDecl *Sema::tryCaptureObjCSelf() {
- // Ignore block scopes: we can capture through them.
- DeclContext *DC = CurContext;
- while (true) {
- if (isa<BlockDecl>(DC)) DC = cast<BlockDecl>(DC)->getDeclContext();
- else if (isa<EnumDecl>(DC)) DC = cast<EnumDecl>(DC)->getDeclContext();
- else break;
- }
+ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) {
+ DeclContext *DC = getFunctionLevelDeclContext();
// If we're not in an ObjC method, error out. Note that, unlike the
// C++ case, we don't require an instance method --- class methods
@@ -253,22 +871,7 @@ ObjCMethodDecl *Sema::tryCaptureObjCSelf() {
if (!method)
return 0;
- ImplicitParamDecl *self = method->getSelfDecl();
- assert(self && "capturing 'self' in non-definition?");
-
- // Mark that we're closing on 'this' in all the block scopes, if applicable.
- for (unsigned idx = FunctionScopes.size() - 1;
- isa<BlockScopeInfo>(FunctionScopes[idx]);
- --idx) {
- BlockScopeInfo *blockScope = cast<BlockScopeInfo>(FunctionScopes[idx]);
- unsigned &captureIndex = blockScope->CaptureMap[self];
- if (captureIndex) break;
-
- bool nested = isa<BlockScopeInfo>(FunctionScopes[idx-1]);
- blockScope->Captures.push_back(
- BlockDecl::Capture(self, /*byref*/ false, nested, /*copy*/ 0));
- captureIndex = blockScope->Captures.size(); // +1
- }
+ tryCaptureVariable(method->getSelfDecl(), Loc);
return method;
}
@@ -365,18 +968,18 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
}
unsigned DiagID;
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
DiagID = diag::err_arc_method_not_found;
else
DiagID = isClassMessage ? diag::warn_class_method_not_found
: diag::warn_inst_method_not_found;
- if (!getLangOptions().DebuggerSupport)
+ if (!getLangOpts().DebuggerSupport)
Diag(lbrac, DiagID)
<< Sel << isClassMessage << SourceRange(lbrac, rbrac);
// In debuggers, we want to use __unknown_anytype for these
// results so that clients can cast them.
- if (getLangOptions().DebuggerSupport) {
+ if (getLangOpts().DebuggerSupport) {
ReturnType = Context.UnknownAnyTy;
} else {
ReturnType = Context.getObjCIdType();
@@ -409,17 +1012,23 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
Expr *argExpr = Args[i];
- ParmVarDecl *Param = Method->param_begin()[i];
+ ParmVarDecl *param = Method->param_begin()[i];
assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
+ // Strip the unbridged-cast placeholder expression off unless it's
+ // a consumed argument.
+ if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
+ !param->hasAttr<CFConsumedAttr>())
+ argExpr = stripARCUnbridgedCast(argExpr);
+
if (RequireCompleteType(argExpr->getSourceRange().getBegin(),
- Param->getType(),
+ param->getType(),
PDiag(diag::err_call_incomplete_argument)
<< argExpr->getSourceRange()))
return true;
InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
- Param);
+ param);
ExprResult ArgE = PerformCopyInitialization(Entity, lbrac, Owned(argExpr));
if (ArgE.isInvalid())
IsError = true;
@@ -448,27 +1057,24 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
Args[NumArgs-1]->getLocEnd());
}
}
- // diagnose nonnull arguments.
- for (specific_attr_iterator<NonNullAttr>
- i = Method->specific_attr_begin<NonNullAttr>(),
- e = Method->specific_attr_end<NonNullAttr>(); i != e; ++i) {
- CheckNonNullArguments(*i, Args, lbrac);
- }
DiagnoseSentinelCalls(Method, lbrac, Args, NumArgs);
+
+ // Do additional checkings on method.
+ IsError |= CheckObjCMethodCall(Method, lbrac, Args, NumArgs);
+
return IsError;
}
bool Sema::isSelfExpr(Expr *receiver) {
// 'self' is objc 'self' in an objc method only.
- DeclContext *DC = CurContext;
- while (isa<BlockDecl>(DC))
- DC = DC->getParent();
- if (DC && !isa<ObjCMethodDecl>(DC))
- return false;
+ ObjCMethodDecl *method =
+ dyn_cast<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+ if (!method) return false;
+
receiver = receiver->IgnoreParenLValueCasts();
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(receiver))
- if (DRE->getDecl()->getIdentifier() == &Context.Idents.get("self"))
+ if (DRE->getDecl() == method->getSelfDecl())
return true;
return false;
}
@@ -507,6 +1113,9 @@ ObjCMethodDecl *Sema::LookupPrivateClassMethod(Selector Sel,
ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
ObjCInterfaceDecl *ClassDecl) {
+ if (!ClassDecl->hasDefinition())
+ return 0;
+
ObjCMethodDecl *Method = 0;
while (ClassDecl && !Method) {
// If we have implementations in scope, check "private" methods.
@@ -521,6 +1130,35 @@ ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
return Method;
}
+/// LookupMethodInType - Look up a method in an ObjCObjectType.
+ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
+ bool isInstance) {
+ const ObjCObjectType *objType = type->castAs<ObjCObjectType>();
+ if (ObjCInterfaceDecl *iface = objType->getInterface()) {
+ // Look it up in the main interface (and categories, etc.)
+ if (ObjCMethodDecl *method = iface->lookupMethod(sel, isInstance))
+ return method;
+
+ // Okay, look for "private" methods declared in any
+ // @implementations we've seen.
+ if (isInstance) {
+ if (ObjCMethodDecl *method = LookupPrivateInstanceMethod(sel, iface))
+ return method;
+ } else {
+ if (ObjCMethodDecl *method = LookupPrivateClassMethod(sel, iface))
+ return method;
+ }
+ }
+
+ // Check qualifiers.
+ for (ObjCObjectType::qual_iterator
+ i = objType->qual_begin(), e = objType->qual_end(); i != e; ++i)
+ if (ObjCMethodDecl *method = (*i)->lookupMethod(sel, isInstance))
+ return method;
+
+ return 0;
+}
+
/// LookupMethodInQualifiedType - Lookups up a method in protocol qualifier
/// list of a qualified objective pointer type.
ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
@@ -557,35 +1195,26 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
}
IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
-
- if (IFace->isForwardDecl()) {
- Diag(MemberLoc, diag::err_property_not_found_forward_class)
- << MemberName << QualType(OPT, 0);
- Diag(IFace->getLocation(), diag::note_forward_class);
+ SourceRange BaseRange = Super? SourceRange(SuperLoc)
+ : BaseExpr->getSourceRange();
+ if (RequireCompleteType(MemberLoc, OPT->getPointeeType(),
+ PDiag(diag::err_property_not_found_forward_class)
+ << MemberName << BaseRange))
return ExprError();
- }
+
// Search for a declared property first.
if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) {
// Check whether we can reference this property.
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
- QualType ResTy = PD->getType();
- ResTy = ResTy.getNonLValueExprType(Context);
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
- ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
- if (Getter &&
- (Getter->hasRelatedResultType()
- || DiagnosePropertyAccessorMismatch(PD, Getter, MemberLoc)))
- ResTy = getMessageSendResultType(QualType(OPT, 0), Getter, false,
- Super);
if (Super)
- return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy,
VK_LValue, OK_ObjCProperty,
MemberLoc,
SuperLoc, SuperType));
else
- return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy,
VK_LValue, OK_ObjCProperty,
MemberLoc, BaseExpr));
}
@@ -597,17 +1226,16 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (DiagnoseUseOfDecl(PD, MemberLoc))
return ExprError();
- QualType T = PD->getType();
- if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- T = getMessageSendResultType(QualType(OPT, 0), Getter, false, Super);
if (Super)
- return Owned(new (Context) ObjCPropertyRefExpr(PD, T,
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
VK_LValue,
OK_ObjCProperty,
MemberLoc,
SuperLoc, SuperType));
else
- return Owned(new (Context) ObjCPropertyRefExpr(PD, T,
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
VK_LValue,
OK_ObjCProperty,
MemberLoc,
@@ -662,38 +1290,27 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
return ExprError();
if (Getter || Setter) {
- QualType PType;
- if (Getter)
- PType = getMessageSendResultType(QualType(OPT, 0), Getter, false, Super);
- else {
- ParmVarDecl *ArgDecl = *Setter->param_begin();
- PType = ArgDecl->getType();
- }
-
- ExprValueKind VK = VK_LValue;
- ExprObjectKind OK = OK_ObjCProperty;
- if (!getLangOptions().CPlusPlus && !PType.hasQualifiers() &&
- PType->isVoidType())
- VK = VK_RValue, OK = OK_Ordinary;
-
if (Super)
return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
MemberLoc,
SuperLoc, SuperType));
else
return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
MemberLoc, BaseExpr));
}
// Attempt to correct for typos in property names.
- TypoCorrection Corrected = CorrectTypo(
+ DeclFilterCCC<ObjCPropertyDecl> Validator;
+ if (TypoCorrection Corrected = CorrectTypo(
DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName, NULL,
- NULL, IFace, false, CTC_NoKeywords, OPT);
- if (ObjCPropertyDecl *Property =
- Corrected.getCorrectionDeclAs<ObjCPropertyDecl>()) {
+ NULL, Validator, IFace, false, OPT)) {
+ ObjCPropertyDecl *Property =
+ Corrected.getCorrectionDeclAs<ObjCPropertyDecl>();
DeclarationName TypoResult = Corrected.getCorrection();
Diag(MemberLoc, diag::err_property_not_found_suggest)
<< MemberName << QualType(OPT, 0) << TypoResult
@@ -710,14 +1327,10 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
QualType T = Ivar->getType();
if (const ObjCObjectPointerType * OBJPT =
T->getAsObjCInterfacePointerType()) {
- const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
- if (ObjCInterfaceDecl *IFace = IFaceT->getDecl())
- if (IFace->isForwardDecl()) {
- Diag(MemberLoc, diag::err_property_not_as_forward_class)
- << MemberName << IFace;
- Diag(IFace->getLocation(), diag::note_forward_class);
- return ExprError();
- }
+ if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
+ PDiag(diag::err_property_not_as_forward_class)
+ << MemberName << BaseExpr->getSourceRange()))
+ return ExprError();
}
Diag(MemberLoc,
diag::err_ivar_access_using_property_syntax_suggest)
@@ -753,7 +1366,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (receiverNamePtr->isStr("super")) {
IsSuper = true;
- if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf()) {
+ if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf(receiverNameLoc)) {
if (CurMethod->isInstanceMethod()) {
QualType T =
Context.getObjCInterfaceType(CurMethod->getClassInterface());
@@ -819,34 +1432,17 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
return ExprError();
if (Getter || Setter) {
- QualType PType;
-
- ExprValueKind VK = VK_LValue;
- if (Getter) {
- PType = getMessageSendResultType(Context.getObjCInterfaceType(IFace),
- Getter, true,
- receiverNamePtr->isStr("super"));
- if (!getLangOptions().CPlusPlus &&
- !PType.hasQualifiers() && PType->isVoidType())
- VK = VK_RValue;
- } else {
- for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
- E = Setter->param_end(); PI != E; ++PI)
- PType = (*PI)->getType();
- VK = VK_LValue;
- }
-
- ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
-
if (IsSuper)
return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
propertyNameLoc,
receiverNameLoc,
Context.getObjCInterfaceType(IFace)));
return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
propertyNameLoc,
receiverNameLoc, IFace));
}
@@ -854,6 +1450,24 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
<< &propertyName << Context.getObjCInterfaceType(IFace));
}
+namespace {
+
+class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceOrSuperCCC(ObjCMethodDecl *Method) {
+ // Determine whether "super" is acceptable in the current context.
+ if (Method && Method->getClassInterface())
+ WantObjCSuper = Method->getClassInterface()->getSuperClass();
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() ||
+ candidate.isKeyword("super");
+ }
+};
+
+}
+
Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
@@ -878,6 +1492,11 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
// FIXME: This is a hack. Ivar lookup should be part of normal
// lookup.
if (ObjCMethodDecl *Method = getCurMethodDecl()) {
+ if (!Method->getClassInterface()) {
+ // Fall back: let the parser try to parse it as an instance message.
+ return ObjCInstanceMessage;
+ }
+
ObjCInterfaceDecl *ClassDeclared;
if (Method->getClassInterface()->lookupInstanceVariable(Name,
ClassDeclared))
@@ -918,39 +1537,32 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
}
}
- // Determine our typo-correction context.
- CorrectTypoContext CTC = CTC_Expression;
- if (ObjCMethodDecl *Method = getCurMethodDecl())
- if (Method->getClassInterface() &&
- Method->getClassInterface()->getSuperClass())
- CTC = CTC_ObjCMessageReceiver;
-
+ ObjCInterfaceOrSuperCCC Validator(getCurMethodDecl());
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
Result.getLookupKind(), S, NULL,
- NULL, false, CTC)) {
- if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
- // If we found a declaration, correct when it refers to an Objective-C
- // class.
- if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND)) {
- Diag(NameLoc, diag::err_unknown_receiver_suggest)
- << Name << Corrected.getCorrection()
- << FixItHint::CreateReplacement(SourceRange(NameLoc),
- ND->getNameAsString());
- Diag(ND->getLocation(), diag::note_previous_decl)
- << Corrected.getCorrection();
-
- QualType T = Context.getObjCInterfaceType(Class);
- TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
- ReceiverType = CreateParsedType(T, TSInfo);
- return ObjCClassMessage;
- }
- } else if (Corrected.isKeyword() &&
- Corrected.getCorrectionAsIdentifierInfo()->isStr("super")) {
- // If we've found the keyword "super", this is a send to super.
+ Validator)) {
+ if (Corrected.isKeyword()) {
+ // If we've found the keyword "super" (the only keyword that would be
+ // returned by CorrectTypo), this is a send to super.
Diag(NameLoc, diag::err_unknown_receiver_suggest)
<< Name << Corrected.getCorrection()
<< FixItHint::CreateReplacement(SourceRange(NameLoc), "super");
return ObjCSuperMessage;
+ } else if (ObjCInterfaceDecl *Class =
+ Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
+ // If we found a declaration, correct when it refers to an Objective-C
+ // class.
+ Diag(NameLoc, diag::err_unknown_receiver_suggest)
+ << Name << Corrected.getCorrection()
+ << FixItHint::CreateReplacement(SourceRange(NameLoc),
+ Class->getNameAsString());
+ Diag(Class->getLocation(), diag::note_previous_decl)
+ << Corrected.getCorrection();
+
+ QualType T = Context.getObjCInterfaceType(Class);
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
+ ReceiverType = CreateParsedType(T, TSInfo);
+ return ObjCClassMessage;
}
}
@@ -966,7 +1578,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
SourceLocation RBracLoc,
MultiExprArg Args) {
// Determine whether we are inside a method or not.
- ObjCMethodDecl *Method = tryCaptureObjCSelf();
+ ObjCMethodDecl *Method = tryCaptureObjCSelf(SuperLoc);
if (!Method) {
Diag(SuperLoc, diag::err_invalid_receiver_to_message_super);
return ExprError();
@@ -1012,6 +1624,68 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
LBracLoc, SelectorLocs, RBracLoc, move(Args));
}
+
+ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ TypeSourceInfo *receiverTypeInfo = 0;
+ if (!ReceiverType.isNull())
+ receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
+
+ return BuildClassMessage(receiverTypeInfo, ReceiverType,
+ /*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+
+}
+
+static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg,
+ unsigned DiagID,
+ bool (*refactor)(const ObjCMessageExpr *,
+ const NSAPI &, edit::Commit &)) {
+ SourceLocation MsgLoc = Msg->getExprLoc();
+ if (S.Diags.getDiagnosticLevel(DiagID, MsgLoc) == DiagnosticsEngine::Ignored)
+ return;
+
+ SourceManager &SM = S.SourceMgr;
+ edit::Commit ECommit(SM, S.LangOpts);
+ if (refactor(Msg,*S.NSAPIObj, ECommit)) {
+ DiagnosticBuilder Builder = S.Diag(MsgLoc, DiagID)
+ << Msg->getSelector() << Msg->getSourceRange();
+ // FIXME: Don't emit diagnostic at all if fixits are non-commitable.
+ if (!ECommit.isCommitable())
+ return;
+ for (edit::Commit::edit_iterator
+ I = ECommit.edit_begin(), E = ECommit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &Edit = *I;
+ switch (Edit.Kind) {
+ case edit::Commit::Act_Insert:
+ Builder.AddFixItHint(FixItHint::CreateInsertion(Edit.OrigLoc,
+ Edit.Text,
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ Builder.AddFixItHint(
+ FixItHint::CreateInsertionFromRange(Edit.OrigLoc,
+ Edit.getInsertFromRange(SM),
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_Remove:
+ Builder.AddFixItHint(FixItHint::CreateRemoval(Edit.getFileRange(SM)));
+ break;
+ }
+ }
+ }
+}
+
+static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) {
+ applyCocoaAPICheck(S, Msg, diag::warn_objc_redundant_literal_use,
+ edit::rewriteObjCRedundantCallWithLiteral);
+}
+
/// \brief Build an Objective-C class message expression.
///
/// This routine takes care of both normal class messages and
@@ -1048,7 +1722,8 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
- MultiExprArg ArgsIn) {
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
SourceLocation Loc = SuperLoc.isValid()? SuperLoc
: ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin();
if (LBracLoc.isInvalid()) {
@@ -1066,7 +1741,8 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
return Owned(ObjCMessageExpr::Create(Context, ReceiverType,
VK_RValue, LBracLoc, ReceiverTypeInfo,
Sel, SelectorLocs, /*Method=*/0,
- makeArrayRef(Args, NumArgs),RBracLoc));
+ makeArrayRef(Args, NumArgs),RBracLoc,
+ isImplicit));
}
// Find the class to which we are sending this message.
@@ -1078,20 +1754,23 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
return ExprError();
}
assert(Class && "We don't know which class we're messaging?");
- (void)DiagnoseUseOfDecl(Class, Loc);
+ // objc++ diagnoses during typename annotation.
+ if (!getLangOpts().CPlusPlus)
+ (void)DiagnoseUseOfDecl(Class, Loc);
// Find the method we are messaging.
if (!Method) {
- if (Class->isForwardDecl()) {
- if (getLangOptions().ObjCAutoRefCount) {
- Diag(Loc, diag::err_arc_receiver_forward_class) << ReceiverType;
- } else {
- Diag(Loc, diag::warn_receiver_forward_class) << Class->getDeclName();
- }
-
+ SourceRange TypeRange
+ = SuperLoc.isValid()? SourceRange(SuperLoc)
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange();
+ if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
+ (getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_receiver_forward_class)
+ : PDiag(diag::warn_receiver_forward_class))
+ << TypeRange)) {
// A forward class used in messaging is treated as a 'Class'
Method = LookupFactoryMethodInGlobalPool(Sel,
SourceRange(LBracLoc, RBracLoc));
- if (Method && !getLangOptions().ObjCAutoRefCount)
+ if (Method && !getLangOpts().ObjCAutoRefCount)
Diag(Method->getLocation(), diag::note_method_sent_forward_class)
<< Method->getDeclName();
}
@@ -1123,18 +1802,21 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
return ExprError();
// Construct the appropriate ObjCMessageExpr.
- Expr *Result;
+ ObjCMessageExpr *Result;
if (SuperLoc.isValid())
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
SuperLoc, /*IsInstanceSuper=*/false,
ReceiverType, Sel, SelectorLocs,
Method, makeArrayRef(Args, NumArgs),
- RBracLoc);
- else
+ RBracLoc, isImplicit);
+ else {
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
ReceiverTypeInfo, Sel, SelectorLocs,
Method, makeArrayRef(Args, NumArgs),
- RBracLoc);
+ RBracLoc, isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
return MaybeBindToTemporary(Result);
}
@@ -1162,6 +1844,18 @@ ExprResult Sema::ActOnClassMessage(Scope *S,
LBracLoc, SelectorLocs, RBracLoc, move(Args));
}
+ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ return BuildInstanceMessage(Receiver, ReceiverType,
+ /*SuperLoc=*/!Receiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+}
+
/// \brief Build an Objective-C instance message expression.
///
/// This routine takes care of both normal instance messages and
@@ -1198,7 +1892,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
- MultiExprArg ArgsIn) {
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
// The location of the receiver.
SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
@@ -1211,6 +1906,16 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// If we have a receiver expression, perform appropriate promotions
// and determine receiver type.
if (Receiver) {
+ if (Receiver->hasPlaceholderType()) {
+ ExprResult Result;
+ if (Receiver->getType() == Context.UnknownAnyTy)
+ Result = forceUnknownAnyToType(Receiver, Context.getObjCIdType());
+ else
+ Result = CheckPlaceholderExpr(Receiver);
+ if (Result.isInvalid()) return ExprError();
+ Receiver = Result.take();
+ }
+
if (Receiver->isTypeDependent()) {
// If the receiver is type-dependent, we can't type-check anything
// at this point. Build a dependent expression.
@@ -1221,7 +1926,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
VK_RValue, LBracLoc, Receiver, Sel,
SelectorLocs, /*Method=*/0,
makeArrayRef(Args, NumArgs),
- RBracLoc));
+ RBracLoc, isImplicit));
}
// If necessary, apply function/array conversion to the receiver.
@@ -1260,7 +1965,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (Method) {
Diag(Loc, diag::warn_instance_method_on_class_found)
<< Method->getSelector() << Sel;
- Diag(Method->getLocation(), diag::note_method_declared_at);
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
}
}
} else {
@@ -1314,28 +2020,37 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// We allow sending a message to a pointer to an interface (an object).
ClassDecl = OCIType->getInterfaceDecl();
- if (ClassDecl->isForwardDecl() && getLangOptions().ObjCAutoRefCount) {
- Diag(Loc, diag::err_arc_receiver_forward_instance)
- << OCIType->getPointeeType()
- << (Receiver ? Receiver->getSourceRange() : SourceRange(SuperLoc));
- return ExprError();
+ // Try to complete the type. Under ARC, this is a hard error from which
+ // we don't try to recover.
+ const ObjCInterfaceDecl *forwardClass = 0;
+ if (RequireCompleteType(Loc, OCIType->getPointeeType(),
+ getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_receiver_forward_instance)
+ << (Receiver ? Receiver->getSourceRange()
+ : SourceRange(SuperLoc))
+ : PDiag(diag::warn_receiver_forward_instance)
+ << (Receiver ? Receiver->getSourceRange()
+ : SourceRange(SuperLoc)))) {
+ if (getLangOpts().ObjCAutoRefCount)
+ return ExprError();
+
+ forwardClass = OCIType->getInterfaceDecl();
+ Diag(Receiver ? Receiver->getLocStart()
+ : SuperLoc, diag::note_receiver_is_id);
+ Method = 0;
+ } else {
+ Method = ClassDecl->lookupInstanceMethod(Sel);
}
- // FIXME: consider using LookupInstanceMethodInGlobalPool, since it will be
- // faster than the following method (which can do *many* linear searches).
- // The idea is to add class info to MethodPool.
- Method = ClassDecl->lookupInstanceMethod(Sel);
-
if (!Method)
// Search protocol qualifiers.
Method = LookupMethodInQualifiedType(Sel, OCIType, true);
- const ObjCInterfaceDecl *forwardClass = 0;
if (!Method) {
// If we have implementations in scope, check "private" methods.
Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
- if (!Method && getLangOptions().ObjCAutoRefCount) {
+ if (!Method && getLangOpts().ObjCAutoRefCount) {
Diag(Loc, diag::err_arc_may_not_respond)
<< OCIType->getPointeeType() << Sel;
return ExprError();
@@ -1348,8 +2063,6 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
if (OCIType->qual_empty()) {
Method = LookupInstanceMethodInGlobalPool(Sel,
SourceRange(LBracLoc, RBracLoc));
- if (OCIType->getInterfaceDecl()->isForwardDecl())
- forwardClass = OCIType->getInterfaceDecl();
if (Method && !forwardClass)
Diag(Loc, diag::warn_maynot_respond)
<< OCIType->getInterfaceDecl()->getIdentifier() << Sel;
@@ -1358,7 +2071,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
if (Method && DiagnoseUseOfDecl(Method, Loc, forwardClass))
return ExprError();
- } else if (!getLangOptions().ObjCAutoRefCount &&
+ } else if (!getLangOpts().ObjCAutoRefCount &&
!Context.getObjCIdType().isNull() &&
(ReceiverType->isPointerType() ||
ReceiverType->isIntegerType())) {
@@ -1380,7 +2093,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
ReceiverType = Receiver->getType();
} else {
ExprResult ReceiverRes;
- if (getLangOptions().CPlusPlus)
+ if (getLangOpts().CPlusPlus)
ReceiverRes = PerformContextuallyConvertToObjCPointer(Receiver);
if (ReceiverRes.isUsable()) {
Receiver = ReceiverRes.take();
@@ -1424,7 +2137,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// In ARC, forbid the user from sending messages to
// retain/release/autorelease/dealloc/retainCount explicitly.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
ObjCMethodFamily family =
(Method ? Method->getMethodFamily() : Sel.getMethodFamily());
switch (family) {
@@ -1475,7 +2188,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// selector names a +1 method
Diag(SelLoc,
diag::err_arc_perform_selector_retains);
- Diag(SelMethod->getLocation(), diag::note_method_declared_at);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
}
break;
default:
@@ -1484,7 +2198,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// selector names a +1 method
Diag(SelLoc,
diag::err_arc_perform_selector_retains);
- Diag(SelMethod->getLocation(), diag::note_method_declared_at);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
}
break;
}
@@ -1505,13 +2220,23 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
SuperLoc, /*IsInstanceSuper=*/true,
ReceiverType, Sel, SelectorLocs, Method,
- makeArrayRef(Args, NumArgs), RBracLoc);
- else
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ else {
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
Receiver, Sel, SelectorLocs, Method,
- makeArrayRef(Args, NumArgs), RBracLoc);
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ if (Receiver &&
+ (Receiver->IgnoreParenImpCasts()->getType().getObjCLifetime()
+ == Qualifiers::OCL_Weak))
+ Diag(Receiver->getLocStart(), diag::warn_receiver_is_weak);
+
// In ARC, annotate delegate init calls.
if (Result->getMethodFamily() == OMF_init &&
(SuperLoc.isValid() || isSelfExpr(Receiver))) {
@@ -1693,7 +2418,6 @@ namespace {
case CK_NoOp:
case CK_LValueToRValue:
case CK_BitCast:
- case CK_GetObjCProperty:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
@@ -1721,6 +2445,12 @@ namespace {
return merge(left, Visit(e->getFalseExpr()));
}
+ /// Look through pseudo-objects.
+ ACCResult VisitPseudoObjectExpr(PseudoObjectExpr *e) {
+ // If we're getting here, we should always have a result.
+ return Visit(e->getResultExpr());
+ }
+
/// Statement expressions are okay if their result expression is okay.
ACCResult VisitStmtExpr(StmtExpr *e) {
return Visit(e->getSubStmt()->body_back());
@@ -1834,7 +2564,171 @@ namespace {
};
}
-void
+static bool
+KnownName(Sema &S, const char *name) {
+ LookupResult R(S, &S.Context.Idents.get(name), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ return S.LookupName(R, S.TUScope, false);
+}
+
+static void addFixitForObjCARCConversion(Sema &S,
+ DiagnosticBuilder &DiagB,
+ Sema::CheckedConversionKind CCK,
+ SourceLocation afterLParen,
+ QualType castType,
+ Expr *castExpr,
+ const char *bridgeKeyword,
+ const char *CFBridgeName) {
+ // We handle C-style and implicit casts here.
+ switch (CCK) {
+ case Sema::CCK_ImplicitConversion:
+ case Sema::CCK_CStyleCast:
+ break;
+ case Sema::CCK_FunctionalCast:
+ case Sema::CCK_OtherCast:
+ return;
+ }
+
+ if (CFBridgeName) {
+ Expr *castedE = castExpr;
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(castedE))
+ castedE = CCE->getSubExpr();
+ castedE = castedE->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ CFBridgeName));
+ } else {
+ std::string namePlusParen = CFBridgeName;
+ namePlusParen += "(";
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ namePlusParen));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.PP.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ return;
+ }
+
+ if (CCK == Sema::CCK_CStyleCast) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword));
+ } else {
+ std::string castCode = "(";
+ castCode += bridgeKeyword;
+ castCode += castType.getAsString();
+ castCode += ")";
+ Expr *castedE = castExpr->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ } else {
+ castCode += "(";
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.PP.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ }
+}
+
+static void
+diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
+ QualType castType, ARCConversionTypeClass castACTC,
+ Expr *castExpr, ARCConversionTypeClass exprACTC,
+ Sema::CheckedConversionKind CCK) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
+
+ if (S.makeUnavailableInSystemHeader(loc,
+ "converts between Objective-C and C pointers in -fobjc-arc"))
+ return;
+
+ QualType castExprType = castExpr->getType();
+
+ unsigned srcKind = 0;
+ switch (exprACTC) {
+ case ACTC_none:
+ case ACTC_coreFoundation:
+ case ACTC_voidPtr:
+ srcKind = (castExprType->isPointerType() ? 1 : 0);
+ break;
+ case ACTC_retainable:
+ srcKind = (castExprType->isBlockPointerType() ? 2 : 3);
+ break;
+ case ACTC_indirectRetainable:
+ srcKind = 4;
+ break;
+ }
+
+ // Check whether this could be fixed with a bridge cast.
+ SourceLocation afterLParen = S.PP.getLocForEndOfToken(castRange.getBegin());
+ SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc;
+
+ // Bridge from an ARC type to a CF type.
+ if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) {
+
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << 2 // of C pointer type
+ << castExprType
+ << unsigned(castType->isBlockPointerType()) // to ObjC|block type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+ bool br = KnownName(S, "CFBridgingRelease");
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge ", 0);
+ }
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_transfer)
+ << castExprType << br;
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge_transfer ",
+ br ? "CFBridgingRelease" : 0);
+ }
+
+ return;
+ }
+
+ // Bridge from a CF type to an ARC type.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
+ bool br = KnownName(S, "CFBridgingRetain");
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
+ << castExprType
+ << 2 // to C pointer type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge ", 0);
+ }
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_retained)
+ << castType << br;
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge_retained ",
+ br ? "CFBridgingRetain" : 0);
+ }
+
+ return;
+ }
+
+ S.Diag(loc, diag::err_arc_mismatched_cast)
+ << (CCK != Sema::CCK_ImplicitConversion)
+ << srcKind << castExprType << castType
+ << castRange << castExpr->getSourceRange();
+}
+
+Sema::ARCConversionResult
Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
Expr *&castExpr, CheckedConversionKind CCK) {
QualType castExprType = castExpr->getType();
@@ -1847,22 +2741,49 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExprType);
ARCConversionTypeClass castACTC = classifyTypeForARCConversion(effCastType);
- if (exprACTC == castACTC) return;
- if (isAnyCLike(exprACTC) && isAnyCLike(castACTC)) return;
+ if (exprACTC == castACTC) {
+ // check for viablity and report error if casting an rvalue to a
+ // life-time qualifier.
+ if ((castACTC == ACTC_retainable) &&
+ (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) &&
+ (castType != castExprType)) {
+ const Type *DT = castType.getTypePtr();
+ QualType QDT = castType;
+ // We desugar some types but not others. We ignore those
+ // that cannot happen in a cast; i.e. auto, and those which
+ // should not be de-sugared; i.e typedef.
+ if (const ParenType *PT = dyn_cast<ParenType>(DT))
+ QDT = PT->desugar();
+ else if (const TypeOfType *TP = dyn_cast<TypeOfType>(DT))
+ QDT = TP->desugar();
+ else if (const AttributedType *AT = dyn_cast<AttributedType>(DT))
+ QDT = AT->desugar();
+ if (QDT != castType &&
+ QDT.getObjCLifetime() != Qualifiers::OCL_None) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin()
+ : castExpr->getExprLoc());
+ Diag(loc, diag::err_arc_nolifetime_behavior);
+ }
+ }
+ return ACR_okay;
+ }
+
+ if (isAnyCLike(exprACTC) && isAnyCLike(castACTC)) return ACR_okay;
// Allow all of these types to be cast to integer types (but not
// vice-versa).
if (castACTC == ACTC_none && castType->isIntegralType(Context))
- return;
+ return ACR_okay;
// Allow casts between pointers to lifetime types (e.g., __strong id*)
// and pointers to void (e.g., cv void *). Casting from void* to lifetime*
// must be explicit.
if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr)
- return;
+ return ACR_okay;
if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr &&
CCK != CCK_ImplicitConversion)
- return;
+ return ACR_okay;
switch (ARCCastChecker(Context, exprACTC, castACTC).Visit(castExpr)) {
// For invalid casts, fall through.
@@ -1872,7 +2793,7 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
// Do nothing for both bottom and +0.
case ACC_bottom:
case ACC_plusZero:
- return;
+ return ACR_okay;
// If the result is +1, consume it here.
case ACC_plusOne:
@@ -1880,74 +2801,94 @@ Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
CK_ARCConsumeObject, castExpr,
0, VK_RValue);
ExprNeedsCleanups = true;
- return;
+ return ACR_okay;
}
-
- SourceLocation loc =
- (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
-
- if (makeUnavailableInSystemHeader(loc,
- "converts between Objective-C and C pointers in -fobjc-arc"))
- return;
-
- unsigned srcKind = 0;
- switch (exprACTC) {
- case ACTC_none:
- case ACTC_coreFoundation:
- case ACTC_voidPtr:
- srcKind = (castExprType->isPointerType() ? 1 : 0);
- break;
- case ACTC_retainable:
- srcKind = (castExprType->isBlockPointerType() ? 2 : 3);
- break;
- case ACTC_indirectRetainable:
- srcKind = 4;
- break;
+
+ // If this is a non-implicit cast from id or block type to a
+ // CoreFoundation type, delay complaining in case the cast is used
+ // in an acceptable context.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) &&
+ CCK != CCK_ImplicitConversion)
+ return ACR_unbridged;
+
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, exprACTC, CCK);
+ return ACR_okay;
+}
+
+/// Given that we saw an expression with the ARCUnbridgedCastTy
+/// placeholder type, complain bitterly.
+void Sema::diagnoseARCUnbridgedCast(Expr *e) {
+ // We expect the spurious ImplicitCastExpr to already have been stripped.
+ assert(!e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ CastExpr *realCast = cast<CastExpr>(e->IgnoreParens());
+
+ SourceRange castRange;
+ QualType castType;
+ CheckedConversionKind CCK;
+
+ if (CStyleCastExpr *cast = dyn_cast<CStyleCastExpr>(realCast)) {
+ castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc());
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_CStyleCast;
+ } else if (ExplicitCastExpr *cast = dyn_cast<ExplicitCastExpr>(realCast)) {
+ castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange();
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_OtherCast;
+ } else {
+ castType = cast->getType();
+ CCK = CCK_ImplicitConversion;
}
-
- if (CCK == CCK_CStyleCast) {
- // Check whether this could be fixed with a bridge cast.
- SourceLocation AfterLParen = PP.getLocForEndOfToken(castRange.getBegin());
- SourceLocation NoteLoc = AfterLParen.isValid()? AfterLParen : loc;
-
- if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) {
- Diag(loc, diag::err_arc_cast_requires_bridge)
- << 2
- << castExprType
- << (castType->isBlockPointerType()? 1 : 0)
- << castType
- << castRange
- << castExpr->getSourceRange();
- Diag(NoteLoc, diag::note_arc_bridge)
- << FixItHint::CreateInsertion(AfterLParen, "__bridge ");
- Diag(NoteLoc, diag::note_arc_bridge_transfer)
- << castExprType
- << FixItHint::CreateInsertion(AfterLParen, "__bridge_transfer ");
-
- return;
- }
-
- if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
- Diag(loc, diag::err_arc_cast_requires_bridge)
- << (castExprType->isBlockPointerType()? 1 : 0)
- << castExprType
- << 2
- << castType
- << castRange
- << castExpr->getSourceRange();
-
- Diag(NoteLoc, diag::note_arc_bridge)
- << FixItHint::CreateInsertion(AfterLParen, "__bridge ");
- Diag(NoteLoc, diag::note_arc_bridge_retained)
- << castType
- << FixItHint::CreateInsertion(AfterLParen, "__bridge_retained ");
- return;
+
+ ARCConversionTypeClass castACTC =
+ classifyTypeForARCConversion(castType.getNonReferenceType());
+
+ Expr *castExpr = realCast->getSubExpr();
+ assert(classifyTypeForARCConversion(castExpr->getType()) == ACTC_retainable);
+
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, ACTC_retainable, CCK);
+}
+
+/// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast
+/// type, remove the placeholder cast.
+Expr *Sema::stripARCUnbridgedCast(Expr *e) {
+ assert(e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+
+ if (ParenExpr *pe = dyn_cast<ParenExpr>(e)) {
+ Expr *sub = stripARCUnbridgedCast(pe->getSubExpr());
+ return new (Context) ParenExpr(pe->getLParen(), pe->getRParen(), sub);
+ } else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
+ assert(uo->getOpcode() == UO_Extension);
+ Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
+ return new (Context) UnaryOperator(sub, UO_Extension, sub->getType(),
+ sub->getValueKind(), sub->getObjectKind(),
+ uo->getOperatorLoc());
+ } else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+
+ unsigned n = gse->getNumAssocs();
+ SmallVector<Expr*, 4> subExprs(n);
+ SmallVector<TypeSourceInfo*, 4> subTypes(n);
+ for (unsigned i = 0; i != n; ++i) {
+ subTypes[i] = gse->getAssocTypeSourceInfo(i);
+ Expr *sub = gse->getAssocExpr(i);
+ if (i == gse->getResultIndex())
+ sub = stripARCUnbridgedCast(sub);
+ subExprs[i] = sub;
}
+
+ return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ subTypes.data(), subExprs.data(),
+ n, gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ gse->getResultIndex());
+ } else {
+ assert(isa<ImplicitCastExpr>(e) && "bad form of unbridged cast!");
+ return cast<ImplicitCastExpr>(e)->getSubExpr();
}
-
- Diag(loc, diag::err_arc_mismatched_cast)
- << (CCK != CCK_ImplicitConversion) << srcKind << castExprType << castType
- << castRange << castExpr->getSourceRange();
}
bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
@@ -2007,7 +2948,8 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
case OBC_Bridge:
break;
- case OBC_BridgeRetained:
+ case OBC_BridgeRetained: {
+ bool br = KnownName(*this, "CFBridgingRelease");
Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
<< 2
<< FromType
@@ -2018,12 +2960,14 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
Diag(BridgeKeywordLoc, diag::note_arc_bridge)
<< FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge");
Diag(BridgeKeywordLoc, diag::note_arc_bridge_transfer)
- << FromType
+ << FromType << br
<< FixItHint::CreateReplacement(BridgeKeywordLoc,
- "__bridge_transfer ");
+ br ? "CFBridgingRelease "
+ : "__bridge_transfer ");
Kind = OBC_Bridge;
break;
+ }
case OBC_BridgeTransfer:
// We must consume the Objective-C object produced by the cast.
@@ -2047,7 +2991,8 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
SubExpr, 0, VK_RValue);
break;
- case OBC_BridgeTransfer:
+ case OBC_BridgeTransfer: {
+ bool br = KnownName(*this, "CFBridgingRetain");
Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
<< (FromType->isBlockPointerType()? 1 : 0)
<< FromType
@@ -2059,12 +3004,14 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
Diag(BridgeKeywordLoc, diag::note_arc_bridge)
<< FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge ");
Diag(BridgeKeywordLoc, diag::note_arc_bridge_retained)
- << T
- << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge_retained ");
+ << T << br
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ br ? "CFBridgingRetain " : "__bridge_retained");
Kind = OBC_Bridge;
break;
}
+ }
} else {
Diag(LParenLoc, diag::err_arc_bridge_cast_incompatible)
<< FromType << T << Kind
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
index 8e8a46d..b78ea7d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
@@ -158,3 +158,47 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
return false;
}
+
+static bool isMacroDefined(const Sema &S, StringRef Name) {
+ return S.PP.getMacroInfo(&S.getASTContext().Idents.get(Name));
+}
+
+const char *Sema::getFixItZeroInitializerForType(QualType T) const {
+ if (T->isScalarType()) {
+ // Suggest " = 0" for non-enumeration scalar types, unless we can find a
+ // better initializer.
+ if (T->isEnumeralType())
+ return 0;
+ if ((T->isObjCObjectPointerType() || T->isBlockPointerType()) &&
+ isMacroDefined(*this, "nil"))
+ return " = nil";
+ if (T->isRealFloatingType())
+ return " = 0.0";
+ if (T->isBooleanType() && LangOpts.CPlusPlus)
+ return " = false";
+ if (T->isPointerType() || T->isMemberPointerType()) {
+ if (LangOpts.CPlusPlus0x)
+ return " = nullptr";
+ else if (isMacroDefined(*this, "NULL"))
+ return " = NULL";
+ }
+ if (T->isCharType())
+ return " = '\\0'";
+ if (T->isWideCharType())
+ return " = L'\\0'";
+ if (T->isChar16Type())
+ return " = u'\\0'";
+ if (T->isChar32Type())
+ return " = U'\\0'";
+ return " = 0";
+ }
+
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ if (!RD || !RD->hasDefinition())
+ return 0;
+ if (LangOpts.CPlusPlus0x && !RD->hasUserProvidedDefaultConstructor())
+ return "{}";
+ if (RD->isAggregate())
+ return " = {}";
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
index 7ed3fa8..a65b41f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -21,6 +21,8 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
@@ -104,7 +106,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
// We have an array of character type with known size. However,
// the size may be smaller or larger than the string we are initializing.
// FIXME: Avoid truncation for 64-bit length strings.
- if (S.getLangOptions().CPlusPlus) {
+ if (S.getLangOpts().CPlusPlus) {
if (StringLiteral *SL = dyn_cast<StringLiteral>(Str)) {
// For Pascal strings it's OK to strip off the terminating null character,
// so the example below is valid:
@@ -116,13 +118,13 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
// [dcl.init.string]p2
if (StrLength > CAT->getSize().getZExtValue())
- S.Diag(Str->getSourceRange().getBegin(),
+ S.Diag(Str->getLocStart(),
diag::err_initializer_string_for_char_array_too_long)
<< Str->getSourceRange();
} else {
// C99 6.7.8p14.
if (StrLength-1 > CAT->getSize().getZExtValue())
- S.Diag(Str->getSourceRange().getBegin(),
+ S.Diag(Str->getLocStart(),
diag::warn_initializer_string_for_char_array_too_long)
<< Str->getSourceRange();
}
@@ -170,7 +172,8 @@ class InitListChecker {
Sema &SemaRef;
bool hadError;
bool VerifyOnly; // no diagnostics, no structure building
- std::map<InitListExpr *, InitListExpr *> SyntacticToSemantic;
+ bool AllowBraceElision;
+ llvm::DenseMap<InitListExpr *, InitListExpr *> SyntacticToSemantic;
InitListExpr *FullyStructuredList;
void CheckImplicitInitList(const InitializedEntity &Entity,
@@ -256,9 +259,12 @@ class InitListChecker {
bool CheckFlexibleArrayInit(const InitializedEntity &Entity,
Expr *InitExpr, FieldDecl *Field,
bool TopLevelObject);
+ void CheckValueInitializable(const InitializedEntity &Entity);
+
public:
InitListChecker(Sema &S, const InitializedEntity &Entity,
- InitListExpr *IL, QualType &T, bool VerifyOnly);
+ InitListExpr *IL, QualType &T, bool VerifyOnly,
+ bool AllowBraceElision);
bool HadError() { return hadError; }
// @brief Retrieves the fully-structured initializer list used for
@@ -267,11 +273,23 @@ public:
};
} // end anonymous namespace
+void InitListChecker::CheckValueInitializable(const InitializedEntity &Entity) {
+ assert(VerifyOnly &&
+ "CheckValueInitializable is only inteded for verification mode.");
+
+ SourceLocation Loc;
+ InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
+ true);
+ InitializationSequence InitSeq(SemaRef, Entity, Kind, 0, 0);
+ if (InitSeq.Failed())
+ hadError = true;
+}
+
void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
const InitializedEntity &ParentEntity,
InitListExpr *ILE,
bool &RequiresSecondPass) {
- SourceLocation Loc = ILE->getSourceRange().getBegin();
+ SourceLocation Loc = ILE->getLocStart();
unsigned NumInits = ILE->getNumInits();
InitializedEntity MemberEntity
= InitializedEntity::InitializeMember(Field, &ParentEntity);
@@ -336,9 +354,9 @@ InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
bool &RequiresSecondPass) {
assert((ILE->getType() != SemaRef.Context.VoidTy) &&
"Should not have void type");
- SourceLocation Loc = ILE->getSourceRange().getBegin();
+ SourceLocation Loc = ILE->getLocStart();
if (ILE->getSyntacticForm())
- Loc = ILE->getSyntacticForm()->getSourceRange().getBegin();
+ Loc = ILE->getSyntacticForm()->getLocStart();
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
if (RType->getDecl()->isUnion() &&
@@ -400,7 +418,8 @@ InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
ElementEntity.setElementIndex(Init);
- if (Init >= NumInits || !ILE->getInit(Init)) {
+ Expr *InitExpr = (Init < NumInits ? ILE->getInit(Init) : 0);
+ if (!InitExpr && !ILE->hasArrayFiller()) {
InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
true);
InitializationSequence InitSeq(SemaRef, ElementEntity, Kind, 0, 0);
@@ -444,7 +463,7 @@ InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
}
}
} else if (InitListExpr *InnerILE
- = dyn_cast<InitListExpr>(ILE->getInit(Init)))
+ = dyn_cast_or_null<InitListExpr>(InitExpr))
FillInValueInitializations(ElementEntity, InnerILE, RequiresSecondPass);
}
}
@@ -452,8 +471,8 @@ InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
InitListExpr *IL, QualType &T,
- bool VerifyOnly)
- : SemaRef(S), VerifyOnly(VerifyOnly) {
+ bool VerifyOnly, bool AllowBraceElision)
+ : SemaRef(S), VerifyOnly(VerifyOnly), AllowBraceElision(AllowBraceElision) {
hadError = false;
unsigned newIndex = 0;
@@ -527,7 +546,7 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
InitListExpr *StructuredSubobjectInitList
= getStructuredSubobjectInit(ParentIList, Index, T, StructuredList,
StructuredIndex,
- SourceRange(ParentIList->getInit(Index)->getSourceRange().getBegin(),
+ SourceRange(ParentIList->getInit(Index)->getLocStart(),
ParentIList->getSourceRange().getEnd()));
unsigned StructuredSubobjectInitIndex = 0;
@@ -537,10 +556,14 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
/*SubobjectIsDesignatorContext=*/false, Index,
StructuredSubobjectInitList,
StructuredSubobjectInitIndex);
- unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
- if (!VerifyOnly) {
+
+ if (VerifyOnly) {
+ if (!AllowBraceElision && (T->isArrayType() || T->isRecordType()))
+ hadError = true;
+ } else {
StructuredSubobjectInitList->setType(T);
+ unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
// Update the structured sub-object initializer so that it's ending
// range corresponds with the end of the last initializer it used.
if (EndIndex < ParentIList->getNumInits()) {
@@ -549,10 +572,11 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
StructuredSubobjectInitList->setRBraceLoc(EndLoc);
}
- // Warn about missing braces.
+ // Complain about missing braces.
if (T->isArrayType() || T->isRecordType()) {
SemaRef.Diag(StructuredSubobjectInitList->getLocStart(),
- diag::warn_missing_braces)
+ AllowBraceElision ? diag::warn_missing_braces :
+ diag::err_missing_braces)
<< StructuredSubobjectInitList->getSourceRange()
<< FixItHint::CreateInsertion(
StructuredSubobjectInitList->getLocStart(), "{")
@@ -560,6 +584,8 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
SemaRef.PP.getLocForEndOfToken(
StructuredSubobjectInitList->getLocEnd()),
"}");
+ if (!AllowBraceElision)
+ hadError = true;
}
}
}
@@ -578,7 +604,9 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
Index, StructuredList, StructuredIndex, TopLevelObject);
if (!VerifyOnly) {
- QualType ExprTy = T.getNonLValueExprType(SemaRef.Context);
+ QualType ExprTy = T;
+ if (!ExprTy->isArrayType())
+ ExprTy = ExprTy.getNonLValueExprType(SemaRef.Context);
IList->setType(ExprTy);
StructuredList->setType(ExprTy);
}
@@ -588,8 +616,8 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
if (Index < IList->getNumInits()) {
// We have leftover initializers
if (VerifyOnly) {
- if (SemaRef.getLangOptions().CPlusPlus ||
- (SemaRef.getLangOptions().OpenCL &&
+ if (SemaRef.getLangOpts().CPlusPlus ||
+ (SemaRef.getLangOpts().OpenCL &&
IList->getType()->isVectorType())) {
hadError = true;
}
@@ -599,7 +627,7 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
if (StructuredIndex == 1 &&
IsStringInit(StructuredList->getInit(0), T, SemaRef.Context)) {
unsigned DK = diag::warn_excess_initializers_in_char_array_initializer;
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
DK = diag::err_excess_initializers_in_char_array_initializer;
hadError = true;
}
@@ -618,11 +646,11 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
4;
unsigned DK = diag::warn_excess_initializers;
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
DK = diag::err_excess_initializers;
hadError = true;
}
- if (SemaRef.getLangOptions().OpenCL && initKind == 1) {
+ if (SemaRef.getLangOpts().OpenCL && initKind == 1) {
DK = diag::err_excess_initializers;
hadError = true;
}
@@ -754,7 +782,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// Fall through for subaggregate initialization.
- } else if (SemaRef.getLangOptions().CPlusPlus) {
+ } else if (SemaRef.getLangOpts().CPlusPlus) {
// C++ [dcl.init.aggr]p12:
// All implicit type conversions (clause 4) are considered when
// initializing the aggregate member with an initializer from
@@ -817,7 +845,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// subaggregate, brace elision is assumed and the initializer is
// considered for the initialization of the first member of
// the subaggregate.
- if (!SemaRef.getLangOptions().OpenCL &&
+ if (!SemaRef.getLangOpts().OpenCL &&
(ElemType->isAggregateType() || ElemType->isVectorType())) {
CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
StructuredIndex);
@@ -856,7 +884,7 @@ void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
// This is an extension in C. (The builtin _Complex type does not exist
// in the C++ standard.)
- if (!SemaRef.getLangOptions().CPlusPlus && !VerifyOnly)
+ if (!SemaRef.getLangOpts().CPlusPlus && !VerifyOnly)
SemaRef.Diag(IList->getLocStart(), diag::ext_complex_component_init)
<< IList->getSourceRange();
@@ -879,12 +907,13 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
- if (!SemaRef.getLangOptions().CPlusPlus0x) {
- if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
- hadError = true;
- }
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_empty_scalar_initializer :
+ diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ hadError = !SemaRef.getLangOpts().CPlusPlus0x;
++Index;
++StructuredIndex;
return;
@@ -902,7 +931,7 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
return;
} else if (isa<DesignatedInitExpr>(expr)) {
if (!VerifyOnly)
- SemaRef.Diag(expr->getSourceRange().getBegin(),
+ SemaRef.Diag(expr->getLocStart(),
diag::err_designator_for_scalar_init)
<< DeclType << expr->getSourceRange();
hadError = true;
@@ -964,8 +993,7 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
}
Expr *expr = IList->getInit(Index);
- if (isa<InitListExpr>(expr)) {
- // FIXME: Allowed in C++11.
+ if (isa<InitListExpr>(expr) && !SemaRef.getLangOpts().CPlusPlus0x) {
if (!VerifyOnly)
SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
<< DeclType << IList->getSourceRange();
@@ -1005,15 +1033,20 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
unsigned &Index,
InitListExpr *StructuredList,
unsigned &StructuredIndex) {
- if (Index >= IList->getNumInits())
- return;
-
const VectorType *VT = DeclType->getAs<VectorType>();
unsigned maxElements = VT->getNumElements();
unsigned numEltsInit = 0;
QualType elementType = VT->getElementType();
- if (!SemaRef.getLangOptions().OpenCL) {
+ if (Index >= IList->getNumInits()) {
+ // Make sure the element type can be value-initialized.
+ if (VerifyOnly)
+ CheckValueInitializable(
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity));
+ return;
+ }
+
+ if (!SemaRef.getLangOpts().OpenCL) {
// If the initializing element is a vector, try to copy-initialize
// instead of breaking it apart (which is doomed to failure anyway).
Expr *Init = IList->getInit(Index);
@@ -1055,8 +1088,11 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) {
// Don't attempt to go past the end of the init list
- if (Index >= IList->getNumInits())
+ if (Index >= IList->getNumInits()) {
+ if (VerifyOnly)
+ CheckValueInitializable(ElementEntity);
break;
+ }
ElementEntity.setElementIndex(Index);
CheckSubElementType(ElementEntity, IList, elementType, Index,
@@ -1098,11 +1134,13 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
// OpenCL requires all elements to be initialized.
- // FIXME: Shouldn't this set hadError to true then?
- if (numEltsInit != maxElements && !VerifyOnly)
- SemaRef.Diag(IList->getSourceRange().getBegin(),
- diag::err_vector_incorrect_num_initializers)
- << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ if (numEltsInit != maxElements) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_vector_incorrect_num_initializers)
+ << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ hadError = true;
+ }
}
void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
@@ -1223,6 +1261,14 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements,
ArrayType::Normal, 0);
}
+ if (!hadError && VerifyOnly) {
+ // Check if there are any members of the array that get value-initialized.
+ // If so, check if doing that is possible.
+ // FIXME: This needs to detect holes left by designated initializers too.
+ if (maxElementsKnown && elementIndex < maxElements)
+ CheckValueInitializable(InitializedEntity::InitializeElement(
+ SemaRef.Context, 0, Entity));
+ }
}
bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
@@ -1235,7 +1281,7 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
cast<InitListExpr>(InitExpr)->getNumInits() == 0) {
// Empty flexible array init always allowed as an extension
FlexArrayDiag = diag::ext_flexible_array_init;
- } else if (SemaRef.getLangOptions().CPlusPlus) {
+ } else if (SemaRef.getLangOpts().CPlusPlus) {
// Disallow flexible array init in C++; it is not required for gcc
// compatibility, and it needs work to IRGen correctly in general.
FlexArrayDiag = diag::err_flexible_array_init;
@@ -1254,9 +1300,9 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
}
if (!VerifyOnly) {
- SemaRef.Diag(InitExpr->getSourceRange().getBegin(),
+ SemaRef.Diag(InitExpr->getLocStart(),
FlexArrayDiag)
- << InitExpr->getSourceRange().getBegin();
+ << InitExpr->getLocStart();
SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
<< Field;
}
@@ -1283,15 +1329,17 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
}
if (DeclType->isUnionType() && IList->getNumInits() == 0) {
- if (!VerifyOnly) {
- // Value-initialize the first named member of the union.
- RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
- for (RecordDecl::field_iterator FieldEnd = RD->field_end();
- Field != FieldEnd; ++Field) {
- if (Field->getDeclName()) {
+ // Value-initialize the first named member of the union.
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ for (RecordDecl::field_iterator FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->getDeclName()) {
+ if (VerifyOnly)
+ CheckValueInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity));
+ else
StructuredList->setInitializedFieldInUnion(*Field);
- break;
- }
+ break;
}
}
return;
@@ -1394,6 +1442,17 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
}
}
+ // Check that any remaining fields can be value-initialized.
+ if (VerifyOnly && Field != FieldEnd && !DeclType->isUnionType() &&
+ !Field->getType()->isIncompleteArrayType()) {
+ // FIXME: Should check for holes left by designated initializers too.
+ for (; Field != FieldEnd && !hadError; ++Field) {
+ if (!Field->isUnnamedBitfield())
+ CheckValueInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity));
+ }
+ }
+
if (Field == FieldEnd || !Field->getType()->isIncompleteArrayType() ||
Index >= IList->getNumInits())
return;
@@ -1454,7 +1513,8 @@ static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField,
IdentifierInfo *FieldName) {
assert(AnonField->isAnonymousStructOrUnion());
Decl *NextDecl = AnonField->getNextDeclInContext();
- while (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(NextDecl)) {
+ while (IndirectFieldDecl *IF =
+ dyn_cast_or_null<IndirectFieldDecl>(NextDecl)) {
if (FieldName && FieldName == IF->getAnonField()->getIdentifier())
return IF;
NextDecl = NextDecl->getNextDeclInContext();
@@ -1474,6 +1534,26 @@ static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef,
DIE->usesGNUSyntax(), DIE->getInit());
}
+namespace {
+
+// Callback to only accept typo corrections that are for field members of
+// the given struct or union.
+class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit FieldInitializerValidatorCCC(RecordDecl *RD)
+ : Record(RD) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>();
+ return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record);
+ }
+
+ private:
+ RecordDecl *Record;
+};
+
+}
+
/// @brief Check the well-formedness of a C99 designated initializer.
///
/// Determines whether the designated initializer @p DIE, which
@@ -1552,7 +1632,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Determine the structural initializer list that corresponds to the
// current subobject.
- StructuredList = IsFirstDesignator? SyntacticToSemantic[IList]
+ StructuredList = IsFirstDesignator? SyntacticToSemantic.lookup(IList)
: getStructuredSubobjectInit(IList, Index, CurrentObjectType,
StructuredList, StructuredIndex,
SourceRange(D->getStartLocation(),
@@ -1577,7 +1657,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Loc = D->getFieldLoc();
if (!VerifyOnly)
SemaRef.Diag(Loc, diag::err_field_designator_non_aggr)
- << SemaRef.getLangOptions().CPlusPlus << CurrentObjectType;
+ << SemaRef.getLangOpts().CPlusPlus << CurrentObjectType;
++Index;
return true;
}
@@ -1632,19 +1712,17 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (Lookup.first == Lookup.second) {
// Name lookup didn't find anything. Determine whether this
// was a typo for another field name.
- LookupResult R(SemaRef, FieldName, D->getFieldLoc(),
- Sema::LookupMemberName);
+ FieldInitializerValidatorCCC Validator(RT->getDecl());
TypoCorrection Corrected = SemaRef.CorrectTypo(
DeclarationNameInfo(FieldName, D->getFieldLoc()),
- Sema::LookupMemberName, /*Scope=*/NULL, /*SS=*/NULL,
- RT->getDecl(), false, Sema::CTC_NoKeywords);
- if ((ReplacementField = Corrected.getCorrectionDeclAs<FieldDecl>()) &&
- ReplacementField->getDeclContext()->getRedeclContext()
- ->Equals(RT->getDecl())) {
+ Sema::LookupMemberName, /*Scope=*/0, /*SS=*/0, Validator,
+ RT->getDecl());
+ if (Corrected) {
std::string CorrectedStr(
- Corrected.getAsString(SemaRef.getLangOptions()));
+ Corrected.getAsString(SemaRef.getLangOpts()));
std::string CorrectedQuotedStr(
- Corrected.getQuoted(SemaRef.getLangOptions()));
+ Corrected.getQuoted(SemaRef.getLangOpts()));
+ ReplacementField = Corrected.getCorrectionDeclAs<FieldDecl>();
SemaRef.Diag(D->getFieldLoc(),
diag::err_field_designator_unknown_suggest)
<< FieldName << CurrentObjectType << CorrectedQuotedStr
@@ -1740,7 +1818,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
!isa<StringLiteral>(DIE->getInit())) {
// The initializer is not an initializer list.
if (!VerifyOnly) {
- SemaRef.Diag(DIE->getInit()->getSourceRange().getBegin(),
+ SemaRef.Diag(DIE->getInit()->getLocStart(),
diag::err_flexible_array_init_needs_braces)
<< DIE->getInit()->getSourceRange();
SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
@@ -1881,7 +1959,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
if (DesignatedEndIndex >= MaxElements) {
if (!VerifyOnly)
- SemaRef.Diag(IndexExpr->getSourceRange().getBegin(),
+ SemaRef.Diag(IndexExpr->getLocStart(),
diag::err_array_designator_too_large)
<< DesignatedEndIndex.toString(10) << MaxElements.toString(10)
<< IndexExpr->getSourceRange();
@@ -1968,7 +2046,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
return 0; // No structured list in verification-only mode.
Expr *ExistingInit = 0;
if (!StructuredList)
- ExistingInit = SyntacticToSemantic[IList];
+ ExistingInit = SyntacticToSemantic.lookup(IList);
else if (StructuredIndex < StructuredList->getNumInits())
ExistingInit = StructuredList->getInit(StructuredIndex);
@@ -1990,7 +2068,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
SemaRef.Diag(InitRange.getBegin(),
diag::warn_subobject_initializer_overrides)
<< InitRange;
- SemaRef.Diag(ExistingInit->getSourceRange().getBegin(),
+ SemaRef.Diag(ExistingInit->getLocStart(),
diag::note_previous_initializer)
<< /*FIXME:has side effects=*/0
<< ExistingInit->getSourceRange();
@@ -2001,7 +2079,10 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
InitRange.getBegin(), 0, 0,
InitRange.getEnd());
- Result->setType(CurrentObjectType.getNonLValueExprType(SemaRef.Context));
+ QualType ResultType = CurrentObjectType;
+ if (!ResultType->isArrayType())
+ ResultType = ResultType.getNonLValueExprType(SemaRef.Context);
+ Result->setType(ResultType);
// Pre-allocate storage for the structured initializer list.
unsigned NumElements = 0;
@@ -2063,10 +2144,10 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
if (Expr *PrevInit = StructuredList->updateInit(SemaRef.Context,
StructuredIndex, expr)) {
// This initializer overwrites a previous initializer. Warn.
- SemaRef.Diag(expr->getSourceRange().getBegin(),
+ SemaRef.Diag(expr->getLocStart(),
diag::warn_initializer_overrides)
<< expr->getSourceRange();
- SemaRef.Diag(PrevInit->getSourceRange().getBegin(),
+ SemaRef.Diag(PrevInit->getLocStart(),
diag::note_previous_initializer)
<< /*FIXME:has side effects=*/0
<< PrevInit->getSourceRange();
@@ -2076,26 +2157,27 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
}
/// Check that the given Index expression is a valid array designator
-/// value. This is essentailly just a wrapper around
+/// value. This is essentially just a wrapper around
/// VerifyIntegerConstantExpression that also checks for negative values
/// and produces a reasonable diagnostic if there is a
-/// failure. Returns true if there was an error, false otherwise. If
-/// everything went okay, Value will receive the value of the constant
-/// expression.
-static bool
+/// failure. Returns the index expression, possibly with an implicit cast
+/// added, on success. If everything went okay, Value will receive the
+/// value of the constant expression.
+static ExprResult
CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
- SourceLocation Loc = Index->getSourceRange().getBegin();
+ SourceLocation Loc = Index->getLocStart();
// Make sure this is an integer constant expression.
- if (S.VerifyIntegerConstantExpression(Index, &Value))
- return true;
+ ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value);
+ if (Result.isInvalid())
+ return Result;
if (Value.isSigned() && Value.isNegative())
return S.Diag(Loc, diag::err_array_designator_negative)
<< Value.toString(10) << Index->getSourceRange();
Value.setIsUnsigned(true);
- return false;
+ return Result;
}
ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
@@ -2120,9 +2202,9 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
case Designator::ArrayDesignator: {
Expr *Index = static_cast<Expr *>(D.getArrayIndex());
llvm::APSInt IndexValue;
- if (!Index->isTypeDependent() &&
- !Index->isValueDependent() &&
- CheckArrayDesignatorExpr(*this, Index, IndexValue))
+ if (!Index->isTypeDependent() && !Index->isValueDependent())
+ Index = CheckArrayDesignatorExpr(*this, Index, IndexValue).take();
+ if (!Index)
Invalid = true;
else {
Designators.push_back(ASTDesignator(InitExpressions.size(),
@@ -2142,10 +2224,13 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
StartIndex->isValueDependent();
bool EndDependent = EndIndex->isTypeDependent() ||
EndIndex->isValueDependent();
- if ((!StartDependent &&
- CheckArrayDesignatorExpr(*this, StartIndex, StartValue)) ||
- (!EndDependent &&
- CheckArrayDesignatorExpr(*this, EndIndex, EndValue)))
+ if (!StartDependent)
+ StartIndex =
+ CheckArrayDesignatorExpr(*this, StartIndex, StartValue).take();
+ if (!EndDependent)
+ EndIndex = CheckArrayDesignatorExpr(*this, EndIndex, EndValue).take();
+
+ if (!StartIndex || !EndIndex)
Invalid = true;
else {
// Make sure we're comparing values with the same bit width.
@@ -2187,10 +2272,7 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
InitExpressions.data(), InitExpressions.size(),
Loc, GNUSyntax, Init.takeAs<Expr>());
- if (getLangOptions().CPlusPlus)
- Diag(DIE->getLocStart(), diag::ext_designated_init_cxx)
- << DIE->getSourceRange();
- else if (!getLangOptions().C99)
+ if (!getLangOpts().C99)
Diag(DIE->getLocStart(), diag::ext_designated_init)
<< DIE->getSourceRange();
@@ -2244,6 +2326,9 @@ DeclarationName InitializedEntity::getName() const {
case EK_Member:
return VariableOrMember->getDeclName();
+ case EK_LambdaCapture:
+ return Capture.Var->getDeclName();
+
case EK_Result:
case EK_Exception:
case EK_New:
@@ -2257,8 +2342,7 @@ DeclarationName InitializedEntity::getName() const {
return DeclarationName();
}
- // Silence GCC warning
- return DeclarationName();
+ llvm_unreachable("Invalid EntityKind!");
}
DeclaratorDecl *InitializedEntity::getDecl() const {
@@ -2280,11 +2364,11 @@ DeclaratorDecl *InitializedEntity::getDecl() const {
case EK_VectorElement:
case EK_ComplexElement:
case EK_BlockElement:
+ case EK_LambdaCapture:
return 0;
}
- // Silence GCC warning
- return 0;
+ llvm_unreachable("Invalid EntityKind!");
}
bool InitializedEntity::allowsNRVO() const {
@@ -2304,6 +2388,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_VectorElement:
case EK_ComplexElement:
case EK_BlockElement:
+ case EK_LambdaCapture:
break;
}
@@ -2329,15 +2414,19 @@ void InitializationSequence::Step::Destroy() {
case SK_QualificationConversionLValue:
case SK_ListInitialization:
case SK_ListConstructorCall:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
case SK_ConstructorInitialization:
case SK_ZeroInitialization:
case SK_CAssignment:
case SK_StringInit:
case SK_ObjCObjectConversion:
case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
case SK_PassByIndirectCopyRestore:
case SK_PassByIndirectRestore:
case SK_ProduceObjCObject:
+ case SK_StdInitializerList:
break;
case SK_ConversionSequence:
@@ -2373,186 +2462,35 @@ bool InitializationSequence::isAmbiguous() const {
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
case FK_ListInitializationFailed:
+ case FK_VariableLengthArrayHasInitializer:
+ case FK_PlaceholderType:
+ case FK_InitListElementCopyFailure:
+ case FK_ExplicitConstructor:
return false;
case FK_ReferenceInitOverloadFailed:
case FK_UserConversionOverloadFailed:
case FK_ConstructorOverloadFailed:
+ case FK_ListConstructorOverloadFailed:
return FailedOverloadResult == OR_Ambiguous;
}
- return false;
+ llvm_unreachable("Invalid EntityKind!");
}
bool InitializationSequence::isConstructorInitialization() const {
return !Steps.empty() && Steps.back().Kind == SK_ConstructorInitialization;
}
-bool InitializationSequence::endsWithNarrowing(ASTContext &Ctx,
- const Expr *Initializer,
- bool *isInitializerConstant,
- APValue *ConstantValue) const {
- if (Steps.empty() || Initializer->isValueDependent())
- return false;
-
- const Step &LastStep = Steps.back();
- if (LastStep.Kind != SK_ConversionSequence)
- return false;
-
- const ImplicitConversionSequence &ICS = *LastStep.ICS;
- const StandardConversionSequence *SCS = NULL;
- switch (ICS.getKind()) {
- case ImplicitConversionSequence::StandardConversion:
- SCS = &ICS.Standard;
- break;
- case ImplicitConversionSequence::UserDefinedConversion:
- SCS = &ICS.UserDefined.After;
- break;
- case ImplicitConversionSequence::AmbiguousConversion:
- case ImplicitConversionSequence::EllipsisConversion:
- case ImplicitConversionSequence::BadConversion:
- return false;
- }
-
- // Check if SCS represents a narrowing conversion, according to C++0x
- // [dcl.init.list]p7:
- //
- // A narrowing conversion is an implicit conversion ...
- ImplicitConversionKind PossibleNarrowing = SCS->Second;
- QualType FromType = SCS->getToType(0);
- QualType ToType = SCS->getToType(1);
- switch (PossibleNarrowing) {
- // * from a floating-point type to an integer type, or
- //
- // * from an integer type or unscoped enumeration type to a floating-point
- // type, except where the source is a constant expression and the actual
- // value after conversion will fit into the target type and will produce
- // the original value when converted back to the original type, or
- case ICK_Floating_Integral:
- if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
- *isInitializerConstant = false;
- return true;
- } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
- llvm::APSInt IntConstantValue;
- if (Initializer &&
- Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
- // Convert the integer to the floating type.
- llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
- Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
- llvm::APFloat::rmNearestTiesToEven);
- // And back.
- llvm::APSInt ConvertedValue = IntConstantValue;
- bool ignored;
- Result.convertToInteger(ConvertedValue,
- llvm::APFloat::rmTowardZero, &ignored);
- // If the resulting value is different, this was a narrowing conversion.
- if (IntConstantValue != ConvertedValue) {
- *isInitializerConstant = true;
- *ConstantValue = APValue(IntConstantValue);
- return true;
- }
- } else {
- // Variables are always narrowings.
- *isInitializerConstant = false;
- return true;
- }
- }
- return false;
-
- // * from long double to double or float, or from double to float, except
- // where the source is a constant expression and the actual value after
- // conversion is within the range of values that can be represented (even
- // if it cannot be represented exactly), or
- case ICK_Floating_Conversion:
- if (1 == Ctx.getFloatingTypeOrder(FromType, ToType)) {
- // FromType is larger than ToType.
- Expr::EvalResult InitializerValue;
- // FIXME: Check whether Initializer is a constant expression according
- // to C++0x [expr.const], rather than just whether it can be folded.
- if (Initializer->Evaluate(InitializerValue, Ctx) &&
- !InitializerValue.HasSideEffects && InitializerValue.Val.isFloat()) {
- // Constant! (Except for FIXME above.)
- llvm::APFloat FloatVal = InitializerValue.Val.getFloat();
- // Convert the source value into the target type.
- bool ignored;
- llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
- Ctx.getFloatTypeSemantics(ToType),
- llvm::APFloat::rmNearestTiesToEven, &ignored);
- // If there was no overflow, the source value is within the range of
- // values that can be represented.
- if (ConvertStatus & llvm::APFloat::opOverflow) {
- *isInitializerConstant = true;
- *ConstantValue = InitializerValue.Val;
- return true;
- }
- } else {
- *isInitializerConstant = false;
- return true;
- }
- }
- return false;
-
- // * from an integer type or unscoped enumeration type to an integer type
- // that cannot represent all the values of the original type, except where
- // the source is a constant expression and the actual value after
- // conversion will fit into the target type and will produce the original
- // value when converted back to the original type.
- case ICK_Boolean_Conversion: // Bools are integers too.
- if (!FromType->isIntegralOrUnscopedEnumerationType()) {
- // Boolean conversions can be from pointers and pointers to members
- // [conv.bool], and those aren't considered narrowing conversions.
- return false;
- } // Otherwise, fall through to the integral case.
- case ICK_Integral_Conversion: {
- assert(FromType->isIntegralOrUnscopedEnumerationType());
- assert(ToType->isIntegralOrUnscopedEnumerationType());
- const bool FromSigned = FromType->isSignedIntegerOrEnumerationType();
- const unsigned FromWidth = Ctx.getIntWidth(FromType);
- const bool ToSigned = ToType->isSignedIntegerOrEnumerationType();
- const unsigned ToWidth = Ctx.getIntWidth(ToType);
-
- if (FromWidth > ToWidth ||
- (FromWidth == ToWidth && FromSigned != ToSigned)) {
- // Not all values of FromType can be represented in ToType.
- llvm::APSInt InitializerValue;
- if (Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
- *isInitializerConstant = true;
- *ConstantValue = APValue(InitializerValue);
-
- // Add a bit to the InitializerValue so we don't have to worry about
- // signed vs. unsigned comparisons.
- InitializerValue = InitializerValue.extend(
- InitializerValue.getBitWidth() + 1);
- // Convert the initializer to and from the target width and signed-ness.
- llvm::APSInt ConvertedValue = InitializerValue;
- ConvertedValue = ConvertedValue.trunc(ToWidth);
- ConvertedValue.setIsSigned(ToSigned);
- ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
- ConvertedValue.setIsSigned(InitializerValue.isSigned());
- // If the result is different, this was a narrowing conversion.
- return ConvertedValue != InitializerValue;
- } else {
- // Variables are always narrowings.
- *isInitializerConstant = false;
- return true;
- }
- }
- return false;
- }
-
- default:
- // Other kinds of conversions are not narrowings.
- return false;
- }
-}
-
-void InitializationSequence::AddAddressOverloadResolutionStep(
- FunctionDecl *Function,
- DeclAccessPair Found) {
+void
+InitializationSequence
+::AddAddressOverloadResolutionStep(FunctionDecl *Function,
+ DeclAccessPair Found,
+ bool HadMultipleCandidates) {
Step S;
S.Kind = SK_ResolveAddressOfOverloadedFunction;
S.Type = Function->getType();
- S.Function.HadMultipleCandidates = false;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
S.Function.Function = Function;
S.Function.FoundDecl = Found;
Steps.push_back(S);
@@ -2565,7 +2503,6 @@ void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
case VK_RValue: S.Kind = SK_CastDerivedToBaseRValue; break;
case VK_XValue: S.Kind = SK_CastDerivedToBaseXValue; break;
case VK_LValue: S.Kind = SK_CastDerivedToBaseLValue; break;
- default: llvm_unreachable("No such category");
}
S.Type = BaseType;
Steps.push_back(S);
@@ -2586,13 +2523,15 @@ void InitializationSequence::AddExtraneousCopyToTemporary(QualType T) {
Steps.push_back(S);
}
-void InitializationSequence::AddUserConversionStep(FunctionDecl *Function,
- DeclAccessPair FoundDecl,
- QualType T) {
+void
+InitializationSequence::AddUserConversionStep(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ QualType T,
+ bool HadMultipleCandidates) {
Step S;
S.Kind = SK_UserConversion;
S.Type = T;
- S.Function.HadMultipleCandidates = false;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
S.Function.Function = Function;
S.Function.FoundDecl = FoundDecl;
Steps.push_back(S);
@@ -2635,14 +2574,17 @@ void InitializationSequence::AddListInitializationStep(QualType T) {
}
void
-InitializationSequence::AddConstructorInitializationStep(
- CXXConstructorDecl *Constructor,
- AccessSpecifier Access,
- QualType T) {
+InitializationSequence
+::AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
+ AccessSpecifier Access,
+ QualType T,
+ bool HadMultipleCandidates,
+ bool FromInitList, bool AsInitList) {
Step S;
- S.Kind = SK_ConstructorInitialization;
+ S.Kind = FromInitList && !AsInitList ? SK_ListConstructorCall
+ : SK_ConstructorInitialization;
S.Type = T;
- S.Function.HadMultipleCandidates = false;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
S.Function.Function = Constructor;
S.Function.FoundDecl = DeclAccessPair::make(Constructor, Access);
Steps.push_back(S);
@@ -2683,6 +2625,13 @@ void InitializationSequence::AddArrayInitStep(QualType T) {
Steps.push_back(S);
}
+void InitializationSequence::AddParenthesizedArrayInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ParenthesizedArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
void InitializationSequence::AddPassByIndirectCopyRestoreStep(QualType type,
bool shouldCopy) {
Step s;
@@ -2699,6 +2648,28 @@ void InitializationSequence::AddProduceObjCObjectStep(QualType T) {
Steps.push_back(S);
}
+void InitializationSequence::AddStdInitializerListConstructionStep(QualType T) {
+ Step S;
+ S.Kind = SK_StdInitializerList;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::RewrapReferenceInitList(QualType T,
+ InitListExpr *Syntactic) {
+ assert(Syntactic->getNumInits() == 1 &&
+ "Can only rewrap trivial init lists.");
+ Step S;
+ S.Kind = SK_UnwrapInitList;
+ S.Type = Syntactic->getInit(0)->getType();
+ Steps.insert(Steps.begin(), S);
+
+ S.Kind = SK_RewrapInitList;
+ S.Type = T;
+ S.WrappingSyntacticList = Syntactic;
+ Steps.push_back(S);
+}
+
void InitializationSequence::SetOverloadFailure(FailureKind Failure,
OverloadingResult Result) {
setSequenceKind(FailedSequence);
@@ -2713,7 +2684,7 @@ void InitializationSequence::SetOverloadFailure(FailureKind Failure,
static void MaybeProduceObjCObject(Sema &S,
InitializationSequence &Sequence,
const InitializedEntity &Entity) {
- if (!S.getLangOptions().ObjCAutoRefCount) return;
+ if (!S.getLangOpts().ObjCAutoRefCount) return;
/// When initializing a parameter, produce the value if it's marked
/// __attribute__((ns_consumed)).
@@ -2737,6 +2708,386 @@ static void MaybeProduceObjCObject(Sema &S,
}
}
+/// \brief When initializing from init list via constructor, deal with the
+/// empty init list and std::initializer_list special cases.
+///
+/// \return True if this was a special case, false otherwise.
+static bool TryListConstructionSpecialCases(Sema &S,
+ InitListExpr *List,
+ CXXRecordDecl *DestRecordDecl,
+ QualType DestType,
+ InitializationSequence &Sequence) {
+ // C++11 [dcl.init.list]p3:
+ // List-initialization of an object or reference of type T is defined as
+ // follows:
+ // - If T is an aggregate, aggregate initialization is performed.
+ if (DestType->isAggregateType())
+ return false;
+
+ // - Otherwise, if the initializer list has no elements and T is a class
+ // type with a default constructor, the object is value-initialized.
+ if (List->getNumInits() == 0) {
+ if (CXXConstructorDecl *DefaultConstructor =
+ S.LookupDefaultConstructor(DestRecordDecl)) {
+ if (DefaultConstructor->isDeleted() ||
+ S.isFunctionConsideredUnavailable(DefaultConstructor)) {
+ // Fake an overload resolution failure.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ DeclAccessPair FoundDecl = DeclAccessPair::make(DefaultConstructor,
+ DefaultConstructor->getAccess());
+ if (FunctionTemplateDecl *ConstructorTmpl =
+ dyn_cast<FunctionTemplateDecl>(DefaultConstructor))
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ ArrayRef<Expr*>(), CandidateSet,
+ /*SuppressUserConversions*/ false);
+ else
+ S.AddOverloadCandidate(DefaultConstructor, FoundDecl,
+ ArrayRef<Expr*>(), CandidateSet,
+ /*SuppressUserConversions*/ false);
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ListConstructorOverloadFailed,
+ OR_Deleted);
+ } else
+ Sequence.AddConstructorInitializationStep(DefaultConstructor,
+ DefaultConstructor->getAccess(),
+ DestType,
+ /*MultipleCandidates=*/false,
+ /*FromInitList=*/true,
+ /*AsInitList=*/false);
+ return true;
+ }
+ }
+
+ // - Otherwise, if T is a specialization of std::initializer_list, [...]
+ QualType E;
+ if (S.isStdInitializerList(DestType, &E)) {
+ // Check that each individual element can be copy-constructed. But since we
+ // have no place to store further information, we'll recalculate everything
+ // later.
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ List->getNumInits()),
+ ArrayType::Normal, 0));
+ InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ for (unsigned i = 0, n = List->getNumInits(); i < n; ++i) {
+ Element.setElementIndex(i);
+ if (!S.CanPerformCopyInitialization(Element, List->getInit(i))) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_InitListElementCopyFailure);
+ return true;
+ }
+ }
+ Sequence.AddStdInitializerListConstructionStep(DestType);
+ return true;
+ }
+
+ // Not a special case.
+ return false;
+}
+
+static OverloadingResult
+ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet &CandidateSet,
+ DeclContext::lookup_iterator Con,
+ DeclContext::lookup_iterator ConEnd,
+ OverloadCandidateSet::iterator &Best,
+ bool CopyInitializing, bool AllowExplicit,
+ bool OnlyListConstructors, bool InitListSyntax) {
+ CandidateSet.clear();
+
+ for (; Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+ bool SuppressUserConversions = false;
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else {
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ // If we're performing copy initialization using a copy constructor, we
+ // suppress user-defined conversions on the arguments. We do the same for
+ // move constructors.
+ if ((CopyInitializing || (InitListSyntax && NumArgs == 1)) &&
+ Constructor->isCopyOrMoveConstructor())
+ SuppressUserConversions = true;
+ }
+
+ if (!Constructor->isInvalidDecl() &&
+ (AllowExplicit || !Constructor->isExplicit()) &&
+ (!OnlyListConstructors || S.isInitListConstructor(Constructor))) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ else {
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = AllowExplicit && !CopyInitializing &&
+ NumArgs == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false,
+ /*AllowExplicit=*/AllowExplicitConv);
+ }
+ }
+ }
+
+ // Perform overload resolution and return the result.
+ return CandidateSet.BestViableFunction(S, DeclLoc, Best);
+}
+
+/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
+/// enumerates the constructors of the initialized entity and performs overload
+/// resolution to select the best.
+/// If InitListSyntax is true, this is list-initialization of a non-aggregate
+/// class type.
+static void TryConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args, unsigned NumArgs,
+ QualType DestType,
+ InitializationSequence &Sequence,
+ bool InitListSyntax = false) {
+ assert((!InitListSyntax || (NumArgs == 1 && isa<InitListExpr>(Args[0]))) &&
+ "InitListSyntax must come with a single initializer list argument.");
+
+ // Check constructor arguments for self reference.
+ if (DeclaratorDecl *DD = Entity.getDecl())
+ // Parameters arguments are occassionially constructed with itself,
+ // for instance, in recursive functions. Skip them.
+ if (!isa<ParmVarDecl>(DD))
+ for (unsigned i = 0; i < NumArgs; ++i)
+ S.CheckSelfReference(DD, Args[i]);
+
+ // The type we're constructing needs to be complete.
+ if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ const RecordType *DestRecordType = DestType->getAs<RecordType>();
+ assert(DestRecordType && "Constructor initialization requires record type");
+ CXXRecordDecl *DestRecordDecl
+ = cast<CXXRecordDecl>(DestRecordType->getDecl());
+
+ if (InitListSyntax &&
+ TryListConstructionSpecialCases(S, cast<InitListExpr>(Args[0]),
+ DestRecordDecl, DestType, Sequence))
+ return;
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit() || InitListSyntax;
+ bool CopyInitialization = Kind.getKind() == InitializationKind::IK_Copy;
+
+ // - Otherwise, if T is a class type, constructors are considered. The
+ // applicable constructors are enumerated, and the best one is chosen
+ // through overload resolution.
+ DeclContext::lookup_iterator ConStart, ConEnd;
+ llvm::tie(ConStart, ConEnd) = S.LookupConstructors(DestRecordDecl);
+
+ OverloadingResult Result = OR_No_Viable_Function;
+ OverloadCandidateSet::iterator Best;
+ bool AsInitializerList = false;
+
+ // C++11 [over.match.list]p1:
+ // When objects of non-aggregate type T are list-initialized, overload
+ // resolution selects the constructor in two phases:
+ // - Initially, the candidate functions are the initializer-list
+ // constructors of the class T and the argument list consists of the
+ // initializer list as a single argument.
+ if (InitListSyntax) {
+ AsInitializerList = true;
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
+ CandidateSet, ConStart, ConEnd, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructor=*/true,
+ InitListSyntax);
+
+ // Time to unwrap the init list.
+ InitListExpr *ILE = cast<InitListExpr>(Args[0]);
+ Args = ILE->getInits();
+ NumArgs = ILE->getNumInits();
+ }
+
+ // C++11 [over.match.list]p1:
+ // - If no viable initializer-list constructor is found, overload resolution
+ // is performed again, where the candidate functions are all the
+ // constructors of the class T nad the argument list consists of the
+ // elements of the initializer list.
+ if (Result == OR_No_Viable_Function) {
+ AsInitializerList = false;
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
+ CandidateSet, ConStart, ConEnd, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructors=*/false,
+ InitListSyntax);
+ }
+ if (Result) {
+ Sequence.SetOverloadFailure(InitListSyntax ?
+ InitializationSequence::FK_ListConstructorOverloadFailed :
+ InitializationSequence::FK_ConstructorOverloadFailed,
+ Result);
+ return;
+ }
+
+ // C++0x [dcl.init]p6:
+ // If a program calls for the default initialization of an object
+ // of a const-qualified type T, T shall be a class type with a
+ // user-provided default constructor.
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ Entity.getType().isConstQualified() &&
+ cast<CXXConstructorDecl>(Best->Function)->isImplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // C++11 [over.match.list]p1:
+ // In copy-list-initialization, if an explicit constructor is chosen, the
+ // initializer is ill-formed.
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ if (InitListSyntax && !Kind.AllowExplicit() && CtorDecl->isExplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor);
+ return;
+ }
+
+ // Add the constructor initialization step. Any cv-qualification conversion is
+ // subsumed by the initialization.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddConstructorInitializationStep(CtorDecl,
+ Best->FoundDecl.getAccess(),
+ DestType, HadMultipleCandidates,
+ InitListSyntax, AsInitializerList);
+}
+
+static bool
+ResolveOverloadedFunctionForReferenceBinding(Sema &S,
+ Expr *Initializer,
+ QualType &SourceType,
+ QualType &UnqualifiedSourceType,
+ QualType UnqualifiedTargetType,
+ InitializationSequence &Sequence) {
+ if (S.Context.getCanonicalType(UnqualifiedSourceType) ==
+ S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ bool HadMultipleCandidates = false;
+ if (FunctionDecl *Fn
+ = S.ResolveAddressOfOverloadedFunction(Initializer,
+ UnqualifiedTargetType,
+ false, Found,
+ &HadMultipleCandidates)) {
+ Sequence.AddAddressOverloadResolutionStep(Fn, Found,
+ HadMultipleCandidates);
+ SourceType = Fn->getType();
+ UnqualifiedSourceType = SourceType.getUnqualifiedType();
+ } else if (!UnqualifiedTargetType->isRecordType()) {
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence);
+
+static void TryListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence);
+
+/// \brief Attempt list initialization of a reference.
+static void TryReferenceListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence)
+{
+ // First, catch C++03 where this isn't possible.
+ if (!S.getLangOpts().CPlusPlus0x) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList);
+ return;
+ }
+
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ Qualifiers T1Quals;
+ QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
+
+ // Reference initialization via an initializer list works thus:
+ // If the initializer list consists of a single element that is
+ // reference-related to the referenced type, bind directly to that element
+ // (possibly creating temporaries).
+ // Otherwise, initialize a temporary with the initializer list and
+ // bind to that.
+ if (InitList->getNumInits() == 1) {
+ Expr *Initializer = InitList->getInit(0);
+ QualType cv2T2 = Initializer->getType();
+ Qualifiers T2Quals;
+ QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
+
+ // If this fails, creating a temporary wouldn't work either.
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+ bool dummy1, dummy2, dummy3;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, dummy1,
+ dummy2, dummy3);
+ if (RefRelationship >= Sema::Ref_Related) {
+ // Try to bind the reference here.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+ if (Sequence)
+ Sequence.RewrapReferenceInitList(cv1T1, InitList);
+ return;
+ }
+ }
+
+ // Not reference-related. Create a temporary and bind to that.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ TryListInitialization(S, TempEntity, Kind, InitList, Sequence);
+ if (Sequence) {
+ if (DestType->isRValueReferenceType() ||
+ (T1Quals.hasConst() && !T1Quals.hasVolatile()))
+ Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ else
+ Sequence.SetFailed(
+ InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+ }
+}
+
/// \brief Attempt list initialization (C++0x [dcl.init.list])
static void TryListInitialization(Sema &S,
const InitializedEntity &Entity,
@@ -2747,23 +3098,40 @@ static void TryListInitialization(Sema &S,
// C++ doesn't allow scalar initialization with more than one argument.
// But C99 complex numbers are scalars and it makes sense there.
- if (S.getLangOptions().CPlusPlus && DestType->isScalarType() &&
+ if (S.getLangOpts().CPlusPlus && DestType->isScalarType() &&
!DestType->isAnyComplexType() && InitList->getNumInits() > 1) {
Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForScalar);
return;
}
- // FIXME: C++0x defines behavior for these two cases.
if (DestType->isReferenceType()) {
- Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList);
+ TryReferenceListInitialization(S, Entity, Kind, InitList, Sequence);
return;
}
- if (DestType->isRecordType() && !DestType->isAggregateType()) {
- Sequence.SetFailed(InitializationSequence::FK_InitListBadDestinationType);
- return;
+ if (DestType->isRecordType()) {
+ if (S.RequireCompleteType(InitList->getLocStart(), DestType, S.PDiag())) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ if (!DestType->isAggregateType()) {
+ if (S.getLangOpts().CPlusPlus0x) {
+ Expr *Arg = InitList;
+ // A direct-initializer is not list-syntax, i.e. there's no special
+ // treatment of "A a({1, 2});".
+ TryConstructorInitialization(S, Entity, Kind, &Arg, 1, DestType,
+ Sequence,
+ Kind.getKind() != InitializationKind::IK_Direct);
+ } else
+ Sequence.SetFailed(
+ InitializationSequence::FK_InitListBadDestinationType);
+ return;
+ }
}
InitListChecker CheckInitList(S, Entity, InitList,
- DestType, /*VerifyOnly=*/true);
+ DestType, /*VerifyOnly=*/true,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
if (CheckInitList.HadError()) {
Sequence.SetFailed(InitializationSequence::FK_ListInitializationFailed);
return;
@@ -2778,8 +3146,8 @@ static void TryListInitialization(Sema &S,
static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
- Expr *Initializer,
- bool AllowRValues,
+ Expr *Initializer,
+ bool AllowRValues,
InitializationSequence &Sequence) {
QualType DestType = Entity.getType();
QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
@@ -2806,8 +3174,9 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
- bool AllowExplicit = Kind.getKind() == InitializationKind::IK_Direct;
-
+ bool AllowExplicit = Kind.AllowExplicit();
+ bool AllowExplicitConvs = Kind.allowExplicitConversionFunctions();
+
const RecordType *T1RecordType = 0;
if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
!S.RequireCompleteType(Kind.getLocation(), T1, 0)) {
@@ -2835,11 +3204,11 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
if (ConstructorTmpl)
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
- &Initializer, 1, CandidateSet,
+ Initializer, CandidateSet,
/*SuppressUserConversions=*/true);
else
S.AddOverloadCandidate(Constructor, FoundDecl,
- &Initializer, 1, CandidateSet,
+ Initializer, CandidateSet,
/*SuppressUserConversions=*/true);
}
}
@@ -2876,7 +3245,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// FIXME: Do we need to make sure that we only consider conversion
// candidates with reference-compatible results? That might be needed to
// break recursion.
- if ((AllowExplicit || !Conv->isExplicit()) &&
+ if ((AllowExplicitConvs || !Conv->isExplicit()) &&
(AllowRValues || Conv->getConversionType()->isLValueReferenceType())){
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
@@ -2903,7 +3272,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// This is the overload that will actually be used for the initialization, so
// mark it as used.
- S.MarkDeclarationReferenced(DeclLoc, Function);
+ S.MarkFunctionReferenced(DeclLoc, Function);
// Compute the returned type of the conversion.
if (isa<CXXConversionDecl>(Function))
@@ -2912,8 +3281,10 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
T2 = cv1T1;
// Add the user-defined conversion step.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
Sequence.AddUserConversionStep(Function, Best->FoundDecl,
- T2.getNonLValueExprType(S.Context));
+ T2.getNonLValueExprType(S.Context),
+ HadMultipleCandidates);
// Determine whether we need to perform derived-to-base or
// cv-qualification adjustments.
@@ -2958,6 +3329,10 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
return OR_Success;
}
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr);
+
/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
static void TryReferenceInitialization(Sema &S,
const InitializedEntity &Entity,
@@ -2971,26 +3346,31 @@ static void TryReferenceInitialization(Sema &S,
QualType cv2T2 = Initializer->getType();
Qualifiers T2Quals;
QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
- SourceLocation DeclLoc = Initializer->getLocStart();
// If the initializer is the address of an overloaded function, try
// to resolve the overloaded function. If all goes well, T2 is the
// type of the resulting function.
- if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
- DeclAccessPair Found;
- if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Initializer,
- T1,
- false,
- Found)) {
- Sequence.AddAddressOverloadResolutionStep(Fn, Found);
- cv2T2 = Fn->getType();
- T2 = cv2T2.getUnqualifiedType();
- } else if (!T1->isRecordType()) {
- Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
- return;
- }
- }
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ // Delegate everything else to a subfunction.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+}
+/// \brief Reference initialization without resolving overloaded functions.
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ SourceLocation DeclLoc = Initializer->getLocStart();
// Compute some basic properties of the types and the initializer.
bool isLValueRef = DestType->isLValueReferenceType();
bool isRValueRef = !isLValueRef;
@@ -3108,8 +3488,10 @@ static void TryReferenceInitialization(Sema &S,
//
// The constructor that would be used to make the copy shall
// be callable whether or not the copy is actually done.
- if (!S.getLangOptions().CPlusPlus0x && !S.getLangOptions().MicrosoftExt)
+ if (!S.getLangOpts().CPlusPlus0x && !S.getLangOpts().MicrosoftExt)
Sequence.AddExtraneousCopyToTemporary(cv2T2);
+ else if (S.getLangOpts().CPlusPlus0x)
+ CheckCXX98CompatAccessibleCopy(S, Entity, Initializer);
}
if (DerivedToBase)
@@ -3122,7 +3504,7 @@ static void TryReferenceInitialization(Sema &S,
if (T1Quals != T2Quals)
Sequence.AddQualificationConversionStep(cv1T1, ValueKind);
Sequence.AddReferenceBindingStep(cv1T1,
- /*bindingTemporary=*/(InitCategory.isPRValue() && !T2->isArrayType()));
+ /*bindingTemporary=*/InitCategory.isPRValue());
return;
}
@@ -3155,7 +3537,7 @@ static void TryReferenceInitialization(Sema &S,
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
- bool AllowExplicit = (Kind.getKind() == InitializationKind::IK_Direct);
+ bool AllowExplicit = Kind.AllowExplicit();
InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
@@ -3219,146 +3601,47 @@ static void TryStringLiteralInitialization(Sema &S,
Sequence.AddStringInitStep(Entity.getType());
}
-/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
-/// enumerates the constructors of the initialized entity and performs overload
-/// resolution to select the best.
-static void TryConstructorInitialization(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- Expr **Args, unsigned NumArgs,
- QualType DestType,
- InitializationSequence &Sequence) {
- // Check constructor arguments for self reference.
- if (DeclaratorDecl *DD = Entity.getDecl())
- // Parameters arguments are occassionially constructed with itself,
- // for instance, in recursive functions. Skip them.
- if (!isa<ParmVarDecl>(DD))
- for (unsigned i = 0; i < NumArgs; ++i)
- S.CheckSelfReference(DD, Args[i]);
-
- // Build the candidate set directly in the initialization sequence
- // structure, so that it will persist if we fail.
- OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
- CandidateSet.clear();
-
- // Determine whether we are allowed to call explicit constructors or
- // explicit conversion operators.
- bool AllowExplicit = (Kind.getKind() == InitializationKind::IK_Direct ||
- Kind.getKind() == InitializationKind::IK_Value ||
- Kind.getKind() == InitializationKind::IK_Default);
-
- // The type we're constructing needs to be complete.
- if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
- Sequence.SetFailed(InitializationSequence::FK_Incomplete);
- return;
- }
-
- // The type we're converting to is a class type. Enumerate its constructors
- // to see if one is suitable.
- const RecordType *DestRecordType = DestType->getAs<RecordType>();
- assert(DestRecordType && "Constructor initialization requires record type");
- CXXRecordDecl *DestRecordDecl
- = cast<CXXRecordDecl>(DestRecordType->getDecl());
-
- DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
- Con != ConEnd; ++Con) {
- NamedDecl *D = *Con;
- DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
- bool SuppressUserConversions = false;
-
- // Find the constructor (which may be a template).
- CXXConstructorDecl *Constructor = 0;
- FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
- if (ConstructorTmpl)
- Constructor = cast<CXXConstructorDecl>(
- ConstructorTmpl->getTemplatedDecl());
- else {
- Constructor = cast<CXXConstructorDecl>(D);
-
- // If we're performing copy initialization using a copy constructor, we
- // suppress user-defined conversions on the arguments.
- // FIXME: Move constructors?
- if (Kind.getKind() == InitializationKind::IK_Copy &&
- Constructor->isCopyConstructor())
- SuppressUserConversions = true;
- }
-
- if (!Constructor->isInvalidDecl() &&
- (AllowExplicit || !Constructor->isExplicit())) {
- if (ConstructorTmpl)
- S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
- /*ExplicitArgs*/ 0,
- Args, NumArgs, CandidateSet,
- SuppressUserConversions);
- else
- S.AddOverloadCandidate(Constructor, FoundDecl,
- Args, NumArgs, CandidateSet,
- SuppressUserConversions);
- }
- }
-
- SourceLocation DeclLoc = Kind.getLocation();
-
- // Perform overload resolution. If it fails, return the failed result.
- OverloadCandidateSet::iterator Best;
- if (OverloadingResult Result
- = CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
- Sequence.SetOverloadFailure(
- InitializationSequence::FK_ConstructorOverloadFailed,
- Result);
- return;
- }
-
- // C++0x [dcl.init]p6:
- // If a program calls for the default initialization of an object
- // of a const-qualified type T, T shall be a class type with a
- // user-provided default constructor.
- if (Kind.getKind() == InitializationKind::IK_Default &&
- Entity.getType().isConstQualified() &&
- cast<CXXConstructorDecl>(Best->Function)->isImplicit()) {
- Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
- return;
- }
-
- // Add the constructor initialization step. Any cv-qualification conversion is
- // subsumed by the initialization.
- Sequence.AddConstructorInitializationStep(
- cast<CXXConstructorDecl>(Best->Function),
- Best->FoundDecl.getAccess(),
- DestType);
-}
-
/// \brief Attempt value initialization (C++ [dcl.init]p7).
static void TryValueInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
InitializationSequence &Sequence) {
- // C++ [dcl.init]p5:
+ // C++98 [dcl.init]p5, C++11 [dcl.init]p7:
//
// To value-initialize an object of type T means:
QualType T = Entity.getType();
// -- if T is an array type, then each element is value-initialized;
- while (const ArrayType *AT = S.Context.getAsArrayType(T))
- T = AT->getElementType();
+ T = S.Context.getBaseElementType(T);
if (const RecordType *RT = T->getAs<RecordType>()) {
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++98:
// -- if T is a class type (clause 9) with a user-declared
// constructor (12.1), then the default constructor for T is
// called (and the initialization is ill-formed if T has no
// accessible default constructor);
- //
- // FIXME: we really want to refer to a single subobject of the array,
- // but Entity doesn't have a way to capture that (yet).
- if (ClassDecl->hasUserDeclaredConstructor())
- return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
+ if (!S.getLangOpts().CPlusPlus0x) {
+ if (ClassDecl->hasUserDeclaredConstructor())
+ // FIXME: we really want to refer to a single subobject of the array,
+ // but Entity doesn't have a way to capture that (yet).
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0,
+ T, Sequence);
+ } else {
+ // C++11:
+ // -- if T is a class type (clause 9) with either no default constructor
+ // (12.1 [class.ctor]) or a default constructor that is user-provided
+ // or deleted, then the object is default-initialized;
+ CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
+ if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0,
+ T, Sequence);
+ }
- // -- if T is a (possibly cv-qualified) non-union class type
- // without a user-provided constructor, then the object is
- // zero-initialized and, if T's implicitly-declared default
- // constructor is non-trivial, that constructor is called.
+ // -- if T is a (possibly cv-qualified) non-union class type without a
+ // user-provided or deleted default constructor, then the object is
+ // zero-initialized and, if T has a non-trivial default constructor,
+ // default-initialized;
if ((ClassDecl->getTagKind() == TTK_Class ||
ClassDecl->getTagKind() == TTK_Struct)) {
Sequence.AddZeroInitializationStep(Entity.getType());
@@ -3385,7 +3668,7 @@ static void TryDefaultInitialization(Sema &S,
// - if T is a (possibly cv-qualified) class type (Clause 9), the default
// constructor for T is called (and the initialization is ill-formed if
// T has no accessible default constructor);
- if (DestType->isRecordType() && S.getLangOptions().CPlusPlus) {
+ if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) {
TryConstructorInitialization(S, Entity, Kind, 0, 0, DestType, Sequence);
return;
}
@@ -3395,7 +3678,7 @@ static void TryDefaultInitialization(Sema &S,
// If a program calls for the default initialization of an object of
// a const-qualified type T, T shall be a class type with a user-provided
// default constructor.
- if (DestType.isConstQualified() && S.getLangOptions().CPlusPlus) {
+ if (DestType.isConstQualified() && S.getLangOpts().CPlusPlus) {
Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
return;
}
@@ -3428,7 +3711,7 @@ static void TryUserDefinedConversion(Sema &S,
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
- bool AllowExplicit = Kind.getKind() == InitializationKind::IK_Direct;
+ bool AllowExplicit = Kind.AllowExplicit();
if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
// The type we're converting to is a class type. Enumerate its constructors
@@ -3459,11 +3742,11 @@ static void TryUserDefinedConversion(Sema &S,
if (ConstructorTmpl)
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
- &Initializer, 1, CandidateSet,
+ Initializer, CandidateSet,
/*SuppressUserConversions=*/true);
else
S.AddOverloadCandidate(Constructor, FoundDecl,
- &Initializer, 1, CandidateSet,
+ Initializer, CandidateSet,
/*SuppressUserConversions=*/true);
}
}
@@ -3523,29 +3806,35 @@ static void TryUserDefinedConversion(Sema &S,
}
FunctionDecl *Function = Best->Function;
- S.MarkDeclarationReferenced(DeclLoc, Function);
+ S.MarkFunctionReferenced(DeclLoc, Function);
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
if (isa<CXXConstructorDecl>(Function)) {
// Add the user-defined conversion step. Any cv-qualification conversion is
- // subsumed by the initialization.
- Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType);
+ // subsumed by the initialization. Per DR5, the created temporary is of the
+ // cv-unqualified type of the destination.
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl,
+ DestType.getUnqualifiedType(),
+ HadMultipleCandidates);
return;
}
// Add the user-defined conversion step that calls the conversion function.
QualType ConvType = Function->getCallResultType();
if (ConvType->getAs<RecordType>()) {
- // If we're converting to a class type, there may be an copy if
+ // If we're converting to a class type, there may be an copy of
// the resulting temporary object (possible to create an object of
// a base class type). That copy is not a separate conversion, so
// we just make a note of the actual destination type (possibly a
// base class of the type returned by the conversion function) and
// let the user-defined conversion step handle the conversion.
- Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType);
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType,
+ HadMultipleCandidates);
return;
}
- Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType);
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
+ HadMultipleCandidates);
// If the conversion following the call to the conversion function
// is interesting, add it as a separate step.
@@ -3592,16 +3881,11 @@ static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e,
}
// If we have a declaration reference, it had better be a local variable.
- } else if (isa<DeclRefExpr>(e) || isa<BlockDeclRefExpr>(e)) {
+ } else if (isa<DeclRefExpr>(e)) {
if (!isAddressOf) return IIK_nonlocal;
- VarDecl *var;
- if (isa<DeclRefExpr>(e)) {
- var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl());
- if (!var) return IIK_nonlocal;
- } else {
- var = cast<BlockDeclRefExpr>(e)->getDecl();
- }
+ VarDecl *var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl());
+ if (!var) return IIK_nonlocal;
return (var->hasLocalStorage() ? IIK_okay : IIK_nonlocal);
@@ -3721,7 +4005,7 @@ InitializationSequence::InitializationSequence(Sema &S,
QualType DestType = Entity.getType();
if (DestType->isDependentType() ||
- Expr::hasAnyTypeDependentArguments(Args, NumArgs)) {
+ Expr::hasAnyTypeDependentArguments(llvm::makeArrayRef(Args, NumArgs))) {
SequenceKind = DependentSequence;
return;
}
@@ -3730,15 +4014,17 @@ InitializationSequence::InitializationSequence(Sema &S,
setSequenceKind(NormalSequence);
for (unsigned I = 0; I != NumArgs; ++I)
- if (Args[I]->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = S.ConvertPropertyForRValue(Args[I]);
- if (Result.isInvalid()) {
- SetFailed(FK_ConversionFromPropertyFailed);
+ if (Args[I]->getType()->isNonOverloadPlaceholderType()) {
+ // FIXME: should we be doing this here?
+ ExprResult result = S.CheckPlaceholderExpr(Args[I]);
+ if (result.isInvalid()) {
+ SetFailed(FK_PlaceholderType);
return;
}
- Args[I] = Result.take();
+ Args[I] = result.take();
}
+
QualType SourceType;
Expr *Initializer = 0;
if (NumArgs == 1) {
@@ -3747,11 +4033,13 @@ InitializationSequence::InitializationSequence(Sema &S,
SourceType = Initializer->getType();
}
- // - If the initializer is a braced-init-list, the object is
- // list-initialized (8.5.4).
- if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) {
- TryListInitialization(S, Entity, Kind, InitList, *this);
- return;
+ // - If the initializer is a (non-parenthesized) braced-init-list, the
+ // object is list-initialized (8.5.4).
+ if (Kind.getKind() != InitializationKind::IK_Direct) {
+ if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, InitList, *this);
+ return;
+ }
}
// - If the destination type is a reference type, see 8.5.3.
@@ -3787,6 +4075,11 @@ InitializationSequence::InitializationSequence(Sema &S,
// - Otherwise, if the destination type is an array, the program is
// ill-formed.
if (const ArrayType *DestAT = Context.getAsArrayType(DestType)) {
+ if (Initializer && isa<VariableArrayType>(DestAT)) {
+ SetFailed(FK_VariableLengthArrayHasInitializer);
+ return;
+ }
+
if (Initializer && IsStringInit(Initializer, DestAT, Context)) {
TryStringLiteralInitialization(S, Entity, Kind, Initializer, *this);
return;
@@ -3795,7 +4088,7 @@ InitializationSequence::InitializationSequence(Sema &S,
// Note: as an GNU C extension, we allow initialization of an
// array from a compound literal that creates an array of the same
// type, so long as the initializer has no side effects.
- if (!S.getLangOptions().CPlusPlus && Initializer &&
+ if (!S.getLangOpts().CPlusPlus && Initializer &&
isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
Initializer->getType()->isArrayType()) {
const ArrayType *SourceAT
@@ -3807,6 +4100,15 @@ InitializationSequence::InitializationSequence(Sema &S,
else {
AddArrayInitStep(DestType);
}
+ }
+ // Note: as a GNU C++ extension, we allow initialization of a
+ // class member from a parenthesized initializer list.
+ else if (S.getLangOpts().CPlusPlus &&
+ Entity.getKind() == InitializedEntity::EK_Member &&
+ Initializer && isa<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer),
+ *this);
+ AddParenthesizedArrayInitStep(DestType);
} else if (DestAT->getElementType()->isAnyCharacterType())
SetFailed(FK_ArrayNeedsInitListOrStringLiteral);
else
@@ -3817,12 +4119,12 @@ InitializationSequence::InitializationSequence(Sema &S,
// Determine whether we should consider writeback conversions for
// Objective-C ARC.
- bool allowObjCWritebackConversion = S.getLangOptions().ObjCAutoRefCount &&
+ bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount &&
Entity.getKind() == InitializedEntity::EK_Parameter;
// We're at the end of the line for C: it's either a write-back conversion
// or it's a C assignment. There's no need to check anything else.
- if (!S.getLangOptions().CPlusPlus) {
+ if (!S.getLangOpts().CPlusPlus) {
// If allowed, check whether this is an Objective-C writeback conversion.
if (allowObjCWritebackConversion &&
tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
@@ -3835,7 +4137,7 @@ InitializationSequence::InitializationSequence(Sema &S,
return;
}
- assert(S.getLangOptions().CPlusPlus);
+ assert(S.getLangOpts().CPlusPlus);
// - If the destination type is a (possibly cv-qualified) class type:
if (DestType->isRecordType()) {
@@ -3964,10 +4266,11 @@ getAssignmentAction(const InitializedEntity &Entity) {
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
return Sema::AA_Initializing;
}
- return Sema::AA_Converting;
+ llvm_unreachable("Invalid EntityKind!");
}
/// \brief Whether we should binding a created object as a temporary when
@@ -3985,6 +4288,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_Exception:
case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
return false;
case InitializedEntity::EK_Parameter:
@@ -4007,6 +4311,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_ComplexElement:
case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
return false;
case InitializedEntity::EK_Variable:
@@ -4020,6 +4325,81 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
llvm_unreachable("missed an InitializedEntity kind?");
}
+/// \brief Look for copy and move constructors and constructor templates, for
+/// copying an object via direct-initialization (per C++11 [dcl.init]p16).
+static void LookupCopyAndMoveConstructors(Sema &S,
+ OverloadCandidateSet &CandidateSet,
+ CXXRecordDecl *Class,
+ Expr *CurInitExpr) {
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class);
+ Con != ConEnd; ++Con) {
+ CXXConstructorDecl *Constructor = 0;
+
+ if ((Constructor = dyn_cast<CXXConstructorDecl>(*Con))) {
+ // Handle copy/moveconstructors, only.
+ if (!Constructor || Constructor->isInvalidDecl() ||
+ !Constructor->isCopyOrMoveConstructor() ||
+ !Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(Constructor, Constructor->getAccess());
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ CurInitExpr, CandidateSet);
+ continue;
+ }
+
+ // Handle constructor templates.
+ FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(*Con);
+ if (ConstructorTmpl->isInvalidDecl())
+ continue;
+
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ // FIXME: Do we need to limit this to copy-constructor-like
+ // candidates?
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess());
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, 0,
+ CurInitExpr, CandidateSet, true);
+ }
+}
+
+/// \brief Get the location at which initialization diagnostics should appear.
+static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
+ Expr *Initializer) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Result:
+ return Entity.getReturnLoc();
+
+ case InitializedEntity::EK_Exception:
+ return Entity.getThrowLoc();
+
+ case InitializedEntity::EK_Variable:
+ return Entity.getDecl()->getLocation();
+
+ case InitializedEntity::EK_LambdaCapture:
+ return Entity.getCaptureLoc();
+
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ return Initializer->getLocStart();
+ }
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
/// \brief Make a (potentially elidable) temporary copy of the object
/// provided by the given initializer by calling the appropriate copy
/// constructor.
@@ -4069,79 +4449,18 @@ static ExprResult CopyObject(Sema &S,
// of constructor initialization, while copy elision for exception handlers
// is handled by the run-time.
bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class);
- SourceLocation Loc;
- switch (Entity.getKind()) {
- case InitializedEntity::EK_Result:
- Loc = Entity.getReturnLoc();
- break;
-
- case InitializedEntity::EK_Exception:
- Loc = Entity.getThrowLoc();
- break;
-
- case InitializedEntity::EK_Variable:
- Loc = Entity.getDecl()->getLocation();
- break;
-
- case InitializedEntity::EK_ArrayElement:
- case InitializedEntity::EK_Member:
- case InitializedEntity::EK_Parameter:
- case InitializedEntity::EK_Temporary:
- case InitializedEntity::EK_New:
- case InitializedEntity::EK_Base:
- case InitializedEntity::EK_Delegating:
- case InitializedEntity::EK_VectorElement:
- case InitializedEntity::EK_ComplexElement:
- case InitializedEntity::EK_BlockElement:
- Loc = CurInitExpr->getLocStart();
- break;
- }
+ SourceLocation Loc = getInitializationLoc(Entity, CurInit.get());
// Make sure that the type we are copying is complete.
if (S.RequireCompleteType(Loc, T, S.PDiag(diag::err_temp_copy_incomplete)))
return move(CurInit);
// Perform overload resolution using the class's copy/move constructors.
- DeclContext::lookup_iterator Con, ConEnd;
+ // Only consider constructors and constructor templates. Per
+ // C++0x [dcl.init]p16, second bullet to class types, this initialization
+ // is direct-initialization.
OverloadCandidateSet CandidateSet(Loc);
- for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class);
- Con != ConEnd; ++Con) {
- // Only consider copy/move constructors and constructor templates. Per
- // C++0x [dcl.init]p16, second bullet to class types, this
- // initialization is direct-initialization.
- CXXConstructorDecl *Constructor = 0;
-
- if ((Constructor = dyn_cast<CXXConstructorDecl>(*Con))) {
- // Handle copy/moveconstructors, only.
- if (!Constructor || Constructor->isInvalidDecl() ||
- !Constructor->isCopyOrMoveConstructor() ||
- !Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
- continue;
-
- DeclAccessPair FoundDecl
- = DeclAccessPair::make(Constructor, Constructor->getAccess());
- S.AddOverloadCandidate(Constructor, FoundDecl,
- &CurInitExpr, 1, CandidateSet);
- continue;
- }
-
- // Handle constructor templates.
- FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(*Con);
- if (ConstructorTmpl->isInvalidDecl())
- continue;
-
- Constructor = cast<CXXConstructorDecl>(
- ConstructorTmpl->getTemplatedDecl());
- if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
- continue;
-
- // FIXME: Do we need to limit this to copy-constructor-like
- // candidates?
- DeclAccessPair FoundDecl
- = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess());
- S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, 0,
- &CurInitExpr, 1, CandidateSet, true);
- }
+ LookupCopyAndMoveConstructors(S, CandidateSet, Class, CurInitExpr);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -4156,7 +4475,7 @@ static ExprResult CopyObject(Sema &S,
: diag::err_temp_copy_no_viable)
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
- CandidateSet.NoteCandidates(S, OCD_AllCandidates, &CurInitExpr, 1);
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
if (!IsExtraneousCopy || S.isSFINAEContext())
return ExprError();
return move(CurInit);
@@ -4165,15 +4484,14 @@ static ExprResult CopyObject(Sema &S,
S.Diag(Loc, diag::err_temp_copy_ambiguous)
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
- CandidateSet.NoteCandidates(S, OCD_ViableCandidates, &CurInitExpr, 1);
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
return ExprError();
case OR_Deleted:
S.Diag(Loc, diag::err_temp_copy_deleted)
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
- S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << 1 << Best->Function->isDeleted();
+ S.NoteDeletedFunction(Best->Function);
return ExprError();
}
@@ -4210,7 +4528,7 @@ static ExprResult CopyObject(Sema &S,
return S.Owned(CurInitExpr);
}
- S.MarkDeclarationReferenced(Loc, Constructor);
+ S.MarkFunctionReferenced(Loc, Constructor);
// Determine the arguments required to actually perform the
// constructor call (we might have derived-to-base conversions, or
@@ -4233,6 +4551,60 @@ static ExprResult CopyObject(Sema &S,
return move(CurInit);
}
+/// \brief Check whether elidable copy construction for binding a reference to
+/// a temporary would have succeeded if we were building in C++98 mode, for
+/// -Wc++98-compat.
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr) {
+ assert(S.getLangOpts().CPlusPlus0x);
+
+ const RecordType *Record = CurInitExpr->getType()->getAs<RecordType>();
+ if (!Record)
+ return;
+
+ SourceLocation Loc = getInitializationLoc(Entity, CurInitExpr);
+ if (S.Diags.getDiagnosticLevel(diag::warn_cxx98_compat_temp_copy, Loc)
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ // Find constructors which would have been considered.
+ OverloadCandidateSet CandidateSet(Loc);
+ LookupCopyAndMoveConstructors(
+ S, CandidateSet, cast<CXXRecordDecl>(Record->getDecl()), CurInitExpr);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OR = CandidateSet.BestViableFunction(S, Loc, Best);
+
+ PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy)
+ << OR << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+
+ switch (OR) {
+ case OR_Success:
+ S.CheckConstructorAccess(Loc, cast<CXXConstructorDecl>(Best->Function),
+ Entity, Best->FoundDecl.getAccess(), Diag);
+ // FIXME: Check default arguments as far as that's possible.
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ break;
+
+ case OR_Ambiguous:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ break;
+
+ case OR_Deleted:
+ S.Diag(Loc, Diag);
+ S.NoteDeletedFunction(Best->Function);
+ break;
+ }
+}
+
void InitializationSequence::PrintInitLocationNote(Sema &S,
const InitializedEntity &Entity) {
if (Entity.getKind() == InitializedEntity::EK_Parameter && Entity.getDecl()) {
@@ -4252,6 +4624,130 @@ static bool isReferenceBinding(const InitializationSequence::Step &s) {
s.Kind == InitializationSequence::SK_BindReferenceToTemporary;
}
+static ExprResult
+PerformConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ const InitializationSequence::Step& Step,
+ bool &ConstructorInitRequiresZeroInit) {
+ unsigned NumArgs = Args.size();
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step.Function.Function);
+ bool HadMultipleCandidates = Step.Function.HadMultipleCandidates;
+
+ // Build a call to the selected constructor.
+ ASTOwningVector<Expr*> ConstructorArgs(S);
+ SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
+ ? Kind.getEqualLoc()
+ : Kind.getLocation();
+
+ if (Kind.getKind() == InitializationKind::IK_Default) {
+ // Force even a trivial, implicit default constructor to be
+ // semantically checked. We do this explicitly because we don't build
+ // the definition for completely trivial constructors.
+ assert(Constructor->getParent() && "No parent class for constructor.");
+ if (Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
+ Constructor->isTrivial() && !Constructor->isUsed(false))
+ S.DefineImplicitDefaultConstructor(Loc, Constructor);
+ }
+
+ ExprResult CurInit = S.Owned((Expr *)0);
+
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = Kind.AllowExplicit() && !Kind.isCopyInit() &&
+ Args.size() == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+
+ // Determine the arguments required to actually perform the constructor
+ // call.
+ if (S.CompleteConstructorCall(Constructor, move(Args),
+ Loc, ConstructorArgs,
+ AllowExplicitConv))
+ return ExprError();
+
+
+ if (Entity.getKind() == InitializedEntity::EK_Temporary &&
+ (Kind.getKind() == InitializationKind::IK_DirectList ||
+ (NumArgs != 1 && // FIXME: Hack to work around cast weirdness
+ (Kind.getKind() == InitializationKind::IK_Direct ||
+ Kind.getKind() == InitializationKind::IK_Value)))) {
+ // An explicitly-constructed temporary, e.g., X(1, 2).
+ unsigned NumExprs = ConstructorArgs.size();
+ Expr **Exprs = (Expr **)ConstructorArgs.take();
+ S.MarkFunctionReferenced(Loc, Constructor);
+ S.DiagnoseUseOfDecl(Constructor, Loc);
+
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
+ SourceRange ParenRange;
+ if (Kind.getKind() != InitializationKind::IK_DirectList)
+ ParenRange = Kind.getParenRange();
+
+ CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context,
+ Constructor,
+ TSInfo,
+ Exprs,
+ NumExprs,
+ ParenRange,
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit));
+ } else {
+ CXXConstructExpr::ConstructionKind ConstructKind =
+ CXXConstructExpr::CK_Complete;
+
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
+ CXXConstructExpr::CK_VirtualBase :
+ CXXConstructExpr::CK_NonVirtualBase;
+ } else if (Entity.getKind() == InitializedEntity::EK_Delegating) {
+ ConstructKind = CXXConstructExpr::CK_Delegating;
+ }
+
+ // Only get the parenthesis range if it is a direct construction.
+ SourceRange parenRange =
+ Kind.getKind() == InitializationKind::IK_Direct ?
+ Kind.getParenRange() : SourceRange();
+
+ // If the entity allows NRVO, mark the construction as elidable
+ // unconditionally.
+ if (Entity.allowsNRVO())
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor, /*Elidable=*/true,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ parenRange);
+ else
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ parenRange);
+ }
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ // Only check access if all of that succeeded.
+ S.CheckConstructorAccess(Loc, Constructor, Entity,
+ Step.Function.FoundDecl.getAccess());
+ S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc);
+
+ if (shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
+
+ return move(CurInit);
+}
+
ExprResult
InitializationSequence::Perform(Sema &S,
const InitializedEntity &Entity,
@@ -4303,8 +4799,16 @@ InitializationSequence::Perform(Sema &S,
}
}
+ if (Kind.getKind() == InitializationKind::IK_Direct &&
+ !Kind.isExplicitCast()) {
+ // Rebuild the ParenListExpr.
+ SourceRange ParenRange = Kind.getParenRange();
+ return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(),
+ move(Args));
+ }
assert(Kind.getKind() == InitializationKind::IK_Copy ||
- Kind.isExplicitCast());
+ Kind.isExplicitCast() ||
+ Kind.getKind() == InitializationKind::IK_DirectList);
return ExprResult(Args.release()[0]);
}
@@ -4340,23 +4844,20 @@ InitializationSequence::Perform(Sema &S,
case SK_ConversionSequence:
case SK_ListConstructorCall:
case SK_ListInitialization:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
case SK_CAssignment:
case SK_StringInit:
case SK_ObjCObjectConversion:
case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
case SK_PassByIndirectCopyRestore:
case SK_PassByIndirectRestore:
- case SK_ProduceObjCObject: {
+ case SK_ProduceObjCObject:
+ case SK_StdInitializerList: {
assert(Args.size() == 1);
CurInit = Args.get()[0];
if (!CurInit.get()) return ExprError();
-
- // Read from a property when initializing something with it.
- if (CurInit.get()->getObjectKind() == OK_ObjCProperty) {
- CurInit = S.ConvertPropertyForRValue(CurInit.take());
- if (CurInit.isInvalid())
- return ExprError();
- }
break;
}
@@ -4466,7 +4967,7 @@ InitializationSequence::Perform(Sema &S,
// If we're binding to an Objective-C object that has lifetime, we
// need cleanups.
- if (S.getLangOptions().ObjCAutoRefCount &&
+ if (S.getLangOpts().ObjCAutoRefCount &&
CurInit.get()->getType()->isObjCLifetimeType())
S.ExprNeedsCleanups = true;
@@ -4500,7 +5001,7 @@ InitializationSequence::Perform(Sema &S,
Loc, ConstructorArgs))
return ExprError();
- // Build the an expression that constructs a temporary.
+ // Build an expression that constructs a temporary.
CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
move_arg(ConstructorArgs),
HadMultipleCandidates,
@@ -4550,16 +5051,16 @@ InitializationSequence::Perform(Sema &S,
}
bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
- if (RequiresCopy || shouldBindAsTemporary(Entity))
- CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
- else if (CreatedObject && shouldDestroyTemporary(Entity)) {
+ bool MaybeBindToTemp = RequiresCopy || shouldBindAsTemporary(Entity);
+
+ if (!MaybeBindToTemp && CreatedObject && shouldDestroyTemporary(Entity)) {
QualType T = CurInit.get()->getType();
if (const RecordType *Record = T->getAs<RecordType>()) {
CXXDestructorDecl *Destructor
= S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
S.CheckDestructorAccess(CurInit.get()->getLocStart(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
- S.MarkDeclarationReferenced(CurInit.get()->getLocStart(), Destructor);
+ S.MarkFunctionReferenced(CurInit.get()->getLocStart(), Destructor);
S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getLocStart());
}
}
@@ -4568,11 +5069,11 @@ InitializationSequence::Perform(Sema &S,
CurInit.get()->getType(),
CastKind, CurInit.get(), 0,
CurInit.get()->getValueKind()));
-
+ if (MaybeBindToTemp)
+ CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
if (RequiresCopy)
CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
move(CurInit), /*IsExtraneousCopy=*/false);
-
break;
}
@@ -4607,120 +5108,89 @@ InitializationSequence::Perform(Sema &S,
case SK_ListInitialization: {
InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
- QualType Ty = Step->Type;
- InitListChecker PerformInitList(S, Entity, InitList,
- ResultType ? *ResultType : Ty, /*VerifyOnly=*/false);
+ // Hack: We must pass *ResultType if available in order to set the type
+ // of arrays, e.g. in 'int ar[] = {1, 2, 3};'.
+ // But in 'const X &x = {1, 2, 3};' we're supposed to initialize a
+ // temporary, not a reference, so we should pass Ty.
+ // Worst case: 'const int (&arref)[] = {1, 2, 3};'.
+ // Since this step is never used for a reference directly, we explicitly
+ // unwrap references here and rewrap them afterwards.
+ // We also need to create a InitializeTemporary entity for this.
+ QualType Ty = ResultType ? ResultType->getNonReferenceType() : Step->Type;
+ bool IsTemporary = Entity.getType()->isReferenceType();
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(Ty);
+ InitListChecker PerformInitList(S, IsTemporary ? TempEntity : Entity,
+ InitList, Ty, /*VerifyOnly=*/false,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
if (PerformInitList.HadError())
return ExprError();
+ if (ResultType) {
+ if ((*ResultType)->isRValueReferenceType())
+ Ty = S.Context.getRValueReferenceType(Ty);
+ else if ((*ResultType)->isLValueReferenceType())
+ Ty = S.Context.getLValueReferenceType(Ty,
+ (*ResultType)->getAs<LValueReferenceType>()->isSpelledAsLValue());
+ *ResultType = Ty;
+ }
+
+ InitListExpr *StructuredInitList =
+ PerformInitList.getFullyStructuredList();
CurInit.release();
- CurInit = S.Owned(PerformInitList.getFullyStructuredList());
+ CurInit = S.Owned(StructuredInitList);
break;
}
- case SK_ListConstructorCall:
- assert(false && "List constructor calls not yet supported.");
-
- case SK_ConstructorInitialization: {
- unsigned NumArgs = Args.size();
- CXXConstructorDecl *Constructor
- = cast<CXXConstructorDecl>(Step->Function.Function);
- bool HadMultipleCandidates = Step->Function.HadMultipleCandidates;
-
- // Build a call to the selected constructor.
- ASTOwningVector<Expr*> ConstructorArgs(S);
- SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
- ? Kind.getEqualLoc()
- : Kind.getLocation();
-
- if (Kind.getKind() == InitializationKind::IK_Default) {
- // Force even a trivial, implicit default constructor to be
- // semantically checked. We do this explicitly because we don't build
- // the definition for completely trivial constructors.
- CXXRecordDecl *ClassDecl = Constructor->getParent();
- assert(ClassDecl && "No parent class for constructor.");
- if (Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
- ClassDecl->hasTrivialDefaultConstructor() &&
- !Constructor->isUsed(false))
- S.DefineImplicitDefaultConstructor(Loc, Constructor);
- }
-
- // Determine the arguments required to actually perform the constructor
- // call.
- if (S.CompleteConstructorCall(Constructor, move(Args),
- Loc, ConstructorArgs))
- return ExprError();
-
-
- if (Entity.getKind() == InitializedEntity::EK_Temporary &&
- NumArgs != 1 && // FIXME: Hack to work around cast weirdness
- (Kind.getKind() == InitializationKind::IK_Direct ||
- Kind.getKind() == InitializationKind::IK_Value)) {
- // An explicitly-constructed temporary, e.g., X(1, 2).
- unsigned NumExprs = ConstructorArgs.size();
- Expr **Exprs = (Expr **)ConstructorArgs.take();
- S.MarkDeclarationReferenced(Loc, Constructor);
- S.DiagnoseUseOfDecl(Constructor, Loc);
-
- TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
- if (!TSInfo)
- TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
-
- CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context,
- Constructor,
- TSInfo,
- Exprs,
- NumExprs,
- Kind.getParenRange(),
- HadMultipleCandidates,
- ConstructorInitRequiresZeroInit));
- } else {
- CXXConstructExpr::ConstructionKind ConstructKind =
- CXXConstructExpr::CK_Complete;
-
- if (Entity.getKind() == InitializedEntity::EK_Base) {
- ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
- CXXConstructExpr::CK_VirtualBase :
- CXXConstructExpr::CK_NonVirtualBase;
- } else if (Entity.getKind() == InitializedEntity::EK_Delegating) {
- ConstructKind = CXXConstructExpr::CK_Delegating;
- }
-
- // Only get the parenthesis range if it is a direct construction.
- SourceRange parenRange =
- Kind.getKind() == InitializationKind::IK_Direct ?
- Kind.getParenRange() : SourceRange();
-
- // If the entity allows NRVO, mark the construction as elidable
- // unconditionally.
- if (Entity.allowsNRVO())
- CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
- Constructor, /*Elidable=*/true,
- move_arg(ConstructorArgs),
- HadMultipleCandidates,
- ConstructorInitRequiresZeroInit,
- ConstructKind,
- parenRange);
- else
- CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
- Constructor,
- move_arg(ConstructorArgs),
- HadMultipleCandidates,
- ConstructorInitRequiresZeroInit,
- ConstructKind,
- parenRange);
- }
- if (CurInit.isInvalid())
- return ExprError();
+ case SK_ListConstructorCall: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
+ MultiExprArg Arg(InitList->getInits(), InitList->getNumInits());
+ CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity :
+ Entity,
+ Kind, move(Arg), *Step,
+ ConstructorInitRequiresZeroInit);
+ break;
+ }
- // Only check access if all of that succeeded.
- S.CheckConstructorAccess(Loc, Constructor, Entity,
- Step->Function.FoundDecl.getAccess());
- S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Loc);
+ case SK_UnwrapInitList:
+ CurInit = S.Owned(cast<InitListExpr>(CurInit.take())->getInit(0));
+ break;
- if (shouldBindAsTemporary(Entity))
- CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
+ case SK_RewrapInitList: {
+ Expr *E = CurInit.take();
+ InitListExpr *Syntactic = Step->WrappingSyntacticList;
+ InitListExpr *ILE = new (S.Context) InitListExpr(S.Context,
+ Syntactic->getLBraceLoc(), &E, 1, Syntactic->getRBraceLoc());
+ ILE->setSyntacticForm(Syntactic);
+ ILE->setType(E->getType());
+ ILE->setValueKind(E->getValueKind());
+ CurInit = S.Owned(ILE);
+ break;
+ }
+ case SK_ConstructorInitialization: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity
+ : Entity,
+ Kind, move(Args), *Step,
+ ConstructorInitRequiresZeroInit);
break;
}
@@ -4733,7 +5203,7 @@ InitializationSequence::Perform(Sema &S,
// the call to the object's constructor within the next step.
ConstructorInitRequiresZeroInit = true;
} else if (Kind.getKind() == InitializationKind::IK_Value &&
- S.getLangOptions().CPlusPlus &&
+ S.getLangOpts().CPlusPlus &&
!Kind.isImplicitValueInit()) {
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
if (!TSInfo)
@@ -4819,6 +5289,13 @@ InitializationSequence::Perform(Sema &S,
}
break;
+ case SK_ParenthesizedArrayInit:
+ // Okay: we checked everything before creating this step. Note that
+ // this is a GNU extension.
+ S.Diag(Kind.getLocation(), diag::ext_array_init_parens)
+ << CurInit.get()->getSourceRange();
+ break;
+
case SK_PassByIndirectCopyRestore:
case SK_PassByIndirectRestore:
checkIndirectCopyRestoreSource(S, CurInit.get());
@@ -4832,6 +5309,54 @@ InitializationSequence::Perform(Sema &S,
CK_ARCProduceObject,
CurInit.take(), 0, VK_RValue));
break;
+
+ case SK_StdInitializerList: {
+ QualType Dest = Step->Type;
+ QualType E;
+ bool Success = S.isStdInitializerList(Dest, &E);
+ (void)Success;
+ assert(Success && "Destination type changed?");
+
+ // If the element type has a destructor, check it.
+ if (CXXRecordDecl *RD = E->getAsCXXRecordDecl()) {
+ if (!RD->hasIrrelevantDestructor()) {
+ if (CXXDestructorDecl *Destructor = S.LookupDestructor(RD)) {
+ S.MarkFunctionReferenced(Kind.getLocation(), Destructor);
+ S.CheckDestructorAccess(Kind.getLocation(), Destructor,
+ S.PDiag(diag::err_access_dtor_temp) << E);
+ S.DiagnoseUseOfDecl(Destructor, Kind.getLocation());
+ }
+ }
+ }
+
+ InitListExpr *ILE = cast<InitListExpr>(CurInit.take());
+ unsigned NumInits = ILE->getNumInits();
+ SmallVector<Expr*, 16> Converted(NumInits);
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ NumInits),
+ ArrayType::Normal, 0));
+ InitializedEntity Element =InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ for (unsigned i = 0; i < NumInits; ++i) {
+ Element.setElementIndex(i);
+ ExprResult Init = S.Owned(ILE->getInit(i));
+ ExprResult Res = S.PerformCopyInitialization(Element,
+ Init.get()->getExprLoc(),
+ Init);
+ assert(!Res.isInvalid() && "Result changed since try phase.");
+ Converted[i] = Res.take();
+ }
+ InitListExpr *Semantic = new (S.Context)
+ InitListExpr(S.Context, ILE->getLBraceLoc(),
+ Converted.data(), NumInits, ILE->getRBraceLoc());
+ Semantic->setSyntacticForm(ILE);
+ Semantic->setType(Dest);
+ Semantic->setInitializesStdInitializerList();
+ CurInit = S.Owned(Semantic);
+ break;
+ }
}
}
@@ -4884,6 +5409,11 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
+ case FK_VariableLengthArrayHasInitializer:
+ S.Diag(Kind.getLocation(), diag::err_variable_object_no_init)
+ << Args[0]->getSourceRange();
+ break;
+
case FK_AddressOfOverloadFailed: {
DeclAccessPair Found;
S.ResolveAddressOfOverloadedFunction(Args[0],
@@ -4906,14 +5436,16 @@ bool InitializationSequence::Diagnose(Sema &S,
<< DestType << Args[0]->getType()
<< Args[0]->getSourceRange();
- FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args, NumArgs);
+ FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_No_Viable_Function:
S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
<< Args[0]->getType() << DestType.getNonReferenceType()
<< Args[0]->getSourceRange();
- FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args, NumArgs);
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_Deleted: {
@@ -4925,8 +5457,7 @@ bool InitializationSequence::Diagnose(Sema &S,
= FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best,
true);
if (Ovl == OR_Deleted) {
- S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << 1 << Best->Function->isDeleted();
+ S.NoteDeletedFunction(Best->Function);
} else {
llvm_unreachable("Inconsistent overload resolution?");
}
@@ -4935,11 +5466,20 @@ bool InitializationSequence::Diagnose(Sema &S,
case OR_Success:
llvm_unreachable("Conversion did not fail!");
- break;
}
break;
case FK_NonConstLValueReferenceBindingToTemporary:
+ if (isa<InitListExpr>(Args[0])) {
+ S.Diag(Kind.getLocation(),
+ diag::err_lvalue_reference_bind_to_initlist)
+ << DestType.getNonReferenceType().isVolatileQualified()
+ << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ break;
+ }
+ // Intentional fallthrough
+
case FK_NonConstLValueReferenceBindingToUnrelated:
S.Diag(Kind.getLocation(),
Failure == FK_NonConstLValueReferenceBindingToTemporary
@@ -4977,12 +5517,14 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_ConversionFailed: {
QualType FromType = Args[0]->getType();
- S.Diag(Kind.getLocation(), diag::err_init_conversion_failed)
+ PartialDiagnostic PDiag = S.PDiag(diag::err_init_conversion_failed)
<< (int)Entity.getKind()
<< DestType
<< Args[0]->isLValue()
<< FromType
<< Args[0]->getSourceRange();
+ S.HandleFunctionTypeMismatch(PDiag, FromType, DestType);
+ S.Diag(Kind.getLocation(), PDiag);
if (DestType.getNonReferenceType()->isObjCObjectPointerType() &&
Args[0]->getType()->isObjCObjectPointerType())
S.EmitRelatedResultTypeNote(Args[0]);
@@ -5022,12 +5564,20 @@ bool InitializationSequence::Diagnose(Sema &S,
<< (DestType->isRecordType()) << DestType << Args[0]->getSourceRange();
break;
+ case FK_ListConstructorOverloadFailed:
case FK_ConstructorOverloadFailed: {
SourceRange ArgsRange;
if (NumArgs)
ArgsRange = SourceRange(Args[0]->getLocStart(),
Args[NumArgs - 1]->getLocEnd());
+ if (Failure == FK_ListConstructorOverloadFailed) {
+ assert(NumArgs == 1 && "List construction from other than 1 argument.");
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ }
+
// FIXME: Using "DestType" for the entity we're printing is probably
// bad.
switch (FailedOverloadResult) {
@@ -5035,7 +5585,7 @@ bool InitializationSequence::Diagnose(Sema &S,
S.Diag(Kind.getLocation(), diag::err_ovl_ambiguous_init)
<< DestType << ArgsRange;
FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates,
- Args, NumArgs);
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_No_Viable_Function:
@@ -5080,30 +5630,41 @@ bool InitializationSequence::Diagnose(Sema &S,
S.Diag(Kind.getLocation(), diag::err_ovl_no_viable_function_in_init)
<< DestType << ArgsRange;
- FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args, NumArgs);
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_Deleted: {
- S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
- << true << DestType << ArgsRange;
OverloadCandidateSet::iterator Best;
OverloadingResult Ovl
= FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
- if (Ovl == OR_Deleted) {
- S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << 1 << Best->Function->isDeleted();
- } else {
+ if (Ovl != OR_Deleted) {
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
llvm_unreachable("Inconsistent overload resolution?");
+ break;
}
+
+ // If this is a defaulted or implicitly-declared function, then
+ // it was implicitly deleted. Make it clear that the deletion was
+ // implicit.
+ if (S.isImplicitlyDeleted(Best->Function))
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init)
+ << S.getSpecialMember(cast<CXXMethodDecl>(Best->Function))
+ << DestType << ArgsRange;
+ else
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
+
+ S.NoteDeletedFunction(Best->Function);
break;
}
case OR_Success:
llvm_unreachable("Conversion did not fail!");
- break;
}
- break;
}
+ break;
case FK_DefaultInitOfConst:
if (Entity.getKind() == InitializedEntity::EK_Member &&
@@ -5126,7 +5687,7 @@ bool InitializationSequence::Diagnose(Sema &S,
break;
case FK_Incomplete:
- S.RequireCompleteType(Kind.getLocation(), DestType,
+ S.RequireCompleteType(Kind.getLocation(), FailedIncompleteType,
diag::err_init_incomplete_type);
break;
@@ -5135,11 +5696,62 @@ bool InitializationSequence::Diagnose(Sema &S,
InitListExpr* InitList = cast<InitListExpr>(Args[0]);
QualType DestType = Entity.getType();
InitListChecker DiagnoseInitList(S, Entity, InitList,
- DestType, /*VerifyOnly=*/false);
+ DestType, /*VerifyOnly=*/false,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
assert(DiagnoseInitList.HadError() &&
"Inconsistent init list check result.");
break;
}
+
+ case FK_PlaceholderType: {
+ // FIXME: Already diagnosed!
+ break;
+ }
+
+ case FK_InitListElementCopyFailure: {
+ // Try to perform all copies again.
+ InitListExpr* InitList = cast<InitListExpr>(Args[0]);
+ unsigned NumInits = InitList->getNumInits();
+ QualType DestType = Entity.getType();
+ QualType E;
+ bool Success = S.isStdInitializerList(DestType, &E);
+ (void)Success;
+ assert(Success && "Where did the std::initializer_list go?");
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ NumInits),
+ ArrayType::Normal, 0));
+ InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ // Show at most 3 errors. Otherwise, you'd get a lot of errors for errors
+ // where the init list type is wrong, e.g.
+ // std::initializer_list<void*> list = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ // FIXME: Emit a note if we hit the limit?
+ int ErrorCount = 0;
+ for (unsigned i = 0; i < NumInits && ErrorCount < 3; ++i) {
+ Element.setElementIndex(i);
+ ExprResult Init = S.Owned(InitList->getInit(i));
+ if (S.PerformCopyInitialization(Element, Init.get()->getExprLoc(), Init)
+ .isInvalid())
+ ++ErrorCount;
+ }
+ break;
+ }
+
+ case FK_ExplicitConstructor: {
+ S.Diag(Kind.getLocation(), diag::err_selected_explicit_constructor)
+ << Args[0]->getSourceRange();
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ (void)Ovl;
+ assert(Ovl == OR_Success && "Inconsistent overload resolution");
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ S.Diag(CtorDecl->getLocation(), diag::note_constructor_declared_here);
+ break;
+ }
}
PrintInitLocationNote(S, Entity);
@@ -5237,6 +5849,27 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case FK_ListInitializationFailed:
OS << "list initialization checker failure";
+ break;
+
+ case FK_VariableLengthArrayHasInitializer:
+ OS << "variable length array has an initializer";
+ break;
+
+ case FK_PlaceholderType:
+ OS << "initializer expression isn't contextually valid";
+ break;
+
+ case FK_ListConstructorOverloadFailed:
+ OS << "list constructor overloading failed";
+ break;
+
+ case FK_InitListElementCopyFailure:
+ OS << "copy construction of initializer list element failed";
+ break;
+
+ case FK_ExplicitConstructor:
+ OS << "list copy initialization chose explicit constructor";
+ break;
}
OS << '\n';
return;
@@ -5291,9 +5924,11 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case SK_QualificationConversionRValue:
OS << "qualification conversion (rvalue)";
+ break;
case SK_QualificationConversionXValue:
OS << "qualification conversion (xvalue)";
+ break;
case SK_QualificationConversionLValue:
OS << "qualification conversion (lvalue)";
@@ -5313,6 +5948,14 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "list initialization via constructor";
break;
+ case SK_UnwrapInitList:
+ OS << "unwrap reference initializer list";
+ break;
+
+ case SK_RewrapInitList:
+ OS << "rewrap reference initializer list";
+ break;
+
case SK_ConstructorInitialization:
OS << "constructor initialization";
break;
@@ -5337,6 +5980,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "array initialization";
break;
+ case SK_ParenthesizedArrayInit:
+ OS << "parenthesized array initialization";
+ break;
+
case SK_PassByIndirectCopyRestore:
OS << "pass by indirect copy and restore";
break;
@@ -5348,6 +5995,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case SK_ProduceObjCObject:
OS << "Objective-C object retension";
break;
+
+ case SK_StdInitializerList:
+ OS << "std::initializer_list from initializer list";
+ break;
}
}
}
@@ -5356,27 +6007,93 @@ void InitializationSequence::dump() const {
dump(llvm::errs());
}
-static void DiagnoseNarrowingInInitList(
- Sema& S, QualType EntityType, const Expr *InitE,
- bool Constant, const APValue &ConstantValue) {
- if (Constant) {
- S.Diag(InitE->getLocStart(),
- S.getLangOptions().CPlusPlus0x && !S.getLangOptions().MicrosoftExt
- ? diag::err_init_list_constant_narrowing
- : diag::warn_init_list_constant_narrowing)
- << InitE->getSourceRange()
- << ConstantValue
+static void DiagnoseNarrowingInInitList(Sema &S, InitializationSequence &Seq,
+ QualType EntityType,
+ const Expr *PreInit,
+ const Expr *PostInit) {
+ if (Seq.step_begin() == Seq.step_end() || PreInit->isValueDependent())
+ return;
+
+ // A narrowing conversion can only appear as the final implicit conversion in
+ // an initialization sequence.
+ const InitializationSequence::Step &LastStep = Seq.step_end()[-1];
+ if (LastStep.Kind != InitializationSequence::SK_ConversionSequence)
+ return;
+
+ const ImplicitConversionSequence &ICS = *LastStep.ICS;
+ const StandardConversionSequence *SCS = 0;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::EllipsisConversion:
+ case ImplicitConversionSequence::BadConversion:
+ return;
+ }
+
+ // Determine the type prior to the narrowing conversion. If a conversion
+ // operator was used, this may be different from both the type of the entity
+ // and of the pre-initialization expression.
+ QualType PreNarrowingType = PreInit->getType();
+ if (Seq.step_begin() + 1 != Seq.step_end())
+ PreNarrowingType = Seq.step_end()[-2].Type;
+
+ // C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion.
+ APValue ConstantValue;
+ QualType ConstantType;
+ switch (SCS->getNarrowingKind(S.Context, PostInit, ConstantValue,
+ ConstantType)) {
+ case NK_Not_Narrowing:
+ // No narrowing occurred.
+ return;
+
+ case NK_Type_Narrowing:
+ // This was a floating-to-integer conversion, which is always considered a
+ // narrowing conversion even if the value is a constant and can be
+ // represented exactly as an integer.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_type_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_type_narrowing_sfinae
+ : diag::err_init_list_type_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
<< EntityType.getLocalUnqualifiedType();
- } else
- S.Diag(InitE->getLocStart(),
- S.getLangOptions().CPlusPlus0x && !S.getLangOptions().MicrosoftExt
- ? diag::err_init_list_variable_narrowing
- : diag::warn_init_list_variable_narrowing)
- << InitE->getSourceRange()
- << InitE->getType().getLocalUnqualifiedType()
+ break;
+
+ case NK_Constant_Narrowing:
+ // A constant value was narrowed.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_constant_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_constant_narrowing_sfinae
+ : diag::err_init_list_constant_narrowing)
+ << PostInit->getSourceRange()
+ << ConstantValue.getAsString(S.getASTContext(), ConstantType)
<< EntityType.getLocalUnqualifiedType();
+ break;
- llvm::SmallString<128> StaticCast;
+ case NK_Variable_Narrowing:
+ // A variable's value may have been narrowed.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_variable_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_variable_narrowing_sfinae
+ : diag::err_init_list_variable_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
+ break;
+ }
+
+ SmallString<128> StaticCast;
llvm::raw_svector_ostream OS(StaticCast);
OS << "static_cast<";
if (const TypedefType *TT = EntityType->getAs<TypedefType>()) {
@@ -5387,18 +6104,18 @@ static void DiagnoseNarrowingInInitList(
// getQualifiedNameAsString() includes non-machine-parsable components.
OS << *TT->getDecl();
} else if (const BuiltinType *BT = EntityType->getAs<BuiltinType>())
- OS << BT->getName(S.getLangOptions());
+ OS << BT->getName(S.getLangOpts());
else {
// Oops, we didn't find the actual type of the variable. Don't emit a fixit
// with a broken cast.
return;
}
OS << ">(";
- S.Diag(InitE->getLocStart(), diag::note_init_list_narrowing_override)
- << InitE->getSourceRange()
- << FixItHint::CreateInsertion(InitE->getLocStart(), OS.str())
+ S.Diag(PostInit->getLocStart(), diag::note_init_list_narrowing_override)
+ << PostInit->getSourceRange()
+ << FixItHint::CreateInsertion(PostInit->getLocStart(), OS.str())
<< FixItHint::CreateInsertion(
- S.getPreprocessor().getLocForEndOfToken(InitE->getLocEnd()), ")");
+ S.getPreprocessor().getLocForEndOfToken(PostInit->getLocEnd()), ")");
}
//===----------------------------------------------------------------------===//
@@ -5423,7 +6140,8 @@ ExprResult
Sema::PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
- bool TopLevelOfInitList) {
+ bool TopLevelOfInitList,
+ bool AllowExplicit) {
if (Init.isInvalid())
return ExprError();
@@ -5434,16 +6152,16 @@ Sema::PerformCopyInitialization(const InitializedEntity &Entity,
EqualLoc = InitE->getLocStart();
InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(),
- EqualLoc);
+ EqualLoc,
+ AllowExplicit);
InitializationSequence Seq(*this, Entity, Kind, &InitE, 1);
Init.release();
- bool Constant = false;
- APValue Result;
- if (TopLevelOfInitList &&
- Seq.endsWithNarrowing(Context, InitE, &Constant, &Result)) {
- DiagnoseNarrowingInInitList(*this, Entity.getType(), InitE,
- Constant, Result);
- }
- return Seq.Perform(*this, Entity, Kind, MultiExprArg(&InitE, 1));
+ ExprResult Result = Seq.Perform(*this, Entity, Kind, MultiExprArg(&InitE, 1));
+
+ if (!Result.isInvalid() && TopLevelOfInitList)
+ DiagnoseNarrowingInInitList(*this, Seq, Entity.getType(),
+ InitE, Result.get());
+
+ return Result;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
new file mode 100644
index 0000000..6ef8d88
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
@@ -0,0 +1,820 @@
+//===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/ExprCXX.h"
+using namespace clang;
+using namespace sema;
+
+CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
+ bool KnownDependent) {
+ DeclContext *DC = CurContext;
+ while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
+ DC = DC->getParent();
+
+ // Start constructing the lambda class.
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC,
+ IntroducerRange.getBegin(),
+ KnownDependent);
+ DC->addDecl(Class);
+
+ return Class;
+}
+
+/// \brief Determine whether the given context is or is enclosed in an inline
+/// function.
+static bool isInInlineFunction(const DeclContext *DC) {
+ while (!DC->isFileContext()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ if (FD->isInlined())
+ return true;
+
+ DC = DC->getLexicalParent();
+ }
+
+ return false;
+}
+
+CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ TypeSourceInfo *MethodType,
+ SourceLocation EndLoc,
+ llvm::ArrayRef<ParmVarDecl *> Params,
+ llvm::Optional<unsigned> ManglingNumber,
+ Decl *ContextDecl) {
+ // C++11 [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline function
+ // call operator (13.5.4) whose parameters and return type are described by
+ // the lambda-expression's parameter-declaration-clause and
+ // trailing-return-type respectively.
+ DeclarationName MethodName
+ = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclarationNameLoc MethodNameLoc;
+ MethodNameLoc.CXXOperatorName.BeginOpNameLoc
+ = IntroducerRange.getBegin().getRawEncoding();
+ MethodNameLoc.CXXOperatorName.EndOpNameLoc
+ = IntroducerRange.getEnd().getRawEncoding();
+ CXXMethodDecl *Method
+ = CXXMethodDecl::Create(Context, Class, EndLoc,
+ DeclarationNameInfo(MethodName,
+ IntroducerRange.getBegin(),
+ MethodNameLoc),
+ MethodType->getType(), MethodType,
+ /*isStatic=*/false,
+ SC_None,
+ /*isInline=*/true,
+ /*isConstExpr=*/false,
+ EndLoc);
+ Method->setAccess(AS_public);
+
+ // Temporarily set the lexical declaration context to the current
+ // context, so that the Scope stack matches the lexical nesting.
+ Method->setLexicalDeclContext(CurContext);
+
+ // Add parameters.
+ if (!Params.empty()) {
+ Method->setParams(Params);
+ CheckParmsForFunctionDef(const_cast<ParmVarDecl **>(Params.begin()),
+ const_cast<ParmVarDecl **>(Params.end()),
+ /*CheckParameterNames=*/false);
+
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P)
+ (*P)->setOwningFunction(Method);
+ }
+
+ // If we don't already have a mangling number for this lambda expression,
+ // allocate one now.
+ if (!ManglingNumber) {
+ ContextDecl = ExprEvalContexts.back().LambdaContextDecl;
+
+ enum ContextKind {
+ Normal,
+ DefaultArgument,
+ DataMember,
+ StaticDataMember
+ } Kind = Normal;
+
+ // Default arguments of member function parameters that appear in a class
+ // definition, as well as the initializers of data members, receive special
+ // treatment. Identify them.
+ if (ContextDecl) {
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ContextDecl)) {
+ if (const DeclContext *LexicalDC
+ = Param->getDeclContext()->getLexicalParent())
+ if (LexicalDC->isRecord())
+ Kind = DefaultArgument;
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(ContextDecl)) {
+ if (Var->getDeclContext()->isRecord())
+ Kind = StaticDataMember;
+ } else if (isa<FieldDecl>(ContextDecl)) {
+ Kind = DataMember;
+ }
+ }
+
+ switch (Kind) {
+ case Normal:
+ if (CurContext->isDependentContext() || isInInlineFunction(CurContext))
+ ManglingNumber = Context.getLambdaManglingNumber(Method);
+ else
+ ManglingNumber = 0;
+
+ // There is no special context for this lambda.
+ ContextDecl = 0;
+ break;
+
+ case StaticDataMember:
+ if (!CurContext->isDependentContext()) {
+ ManglingNumber = 0;
+ ContextDecl = 0;
+ break;
+ }
+ // Fall through to assign a mangling number.
+
+ case DataMember:
+ case DefaultArgument:
+ ManglingNumber = ExprEvalContexts.back().getLambdaMangleContext()
+ .getManglingNumber(Method);
+ break;
+ }
+ }
+
+ Class->setLambdaMangling(*ManglingNumber, ContextDecl);
+ return Method;
+}
+
+LambdaScopeInfo *Sema::enterLambdaScope(CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ bool Mutable) {
+ PushLambdaScope(CallOperator->getParent(), CallOperator);
+ LambdaScopeInfo *LSI = getCurLambda();
+ if (CaptureDefault == LCD_ByCopy)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval;
+ else if (CaptureDefault == LCD_ByRef)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref;
+ LSI->IntroducerRange = IntroducerRange;
+ LSI->ExplicitParams = ExplicitParams;
+ LSI->Mutable = Mutable;
+
+ if (ExplicitResultType) {
+ LSI->ReturnType = CallOperator->getResultType();
+
+ if (!LSI->ReturnType->isDependentType() &&
+ !LSI->ReturnType->isVoidType()) {
+ if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType,
+ diag::err_lambda_incomplete_result)) {
+ // Do nothing.
+ } else if (LSI->ReturnType->isObjCObjectOrInterfaceType()) {
+ Diag(CallOperator->getLocStart(), diag::err_lambda_objc_object_result)
+ << LSI->ReturnType;
+ }
+ }
+ } else {
+ LSI->HasImplicitReturnType = true;
+ }
+
+ return LSI;
+}
+
+void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
+ LSI->finishedExplicitCaptures();
+}
+
+void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
+ // Introduce our parameters into the function scope
+ for (unsigned p = 0, NumParams = CallOperator->getNumParams();
+ p < NumParams; ++p) {
+ ParmVarDecl *Param = CallOperator->getParamDecl(p);
+
+ // If this has an identifier, add it to the scope stack.
+ if (CurScope && Param->getIdentifier()) {
+ CheckShadow(CurScope, Param);
+
+ PushOnScopeChains(Param, CurScope);
+ }
+ }
+}
+
+void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo,
+ Scope *CurScope) {
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ bool KnownDependent = false;
+ if (Scope *TmplScope = CurScope->getTemplateParamParent())
+ if (!TmplScope->decl_empty())
+ KnownDependent = true;
+
+ CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, KnownDependent);
+
+ // Determine the signature of the call operator.
+ TypeSourceInfo *MethodTyInfo;
+ bool ExplicitParams = true;
+ bool ExplicitResultType = true;
+ SourceLocation EndLoc;
+ llvm::ArrayRef<ParmVarDecl *> Params;
+ if (ParamInfo.getNumTypeObjects() == 0) {
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a lambda-declarator, it is as
+ // if the lambda-declarator were ().
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasTrailingReturn = true;
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ QualType MethodTy = Context.getFunctionType(Context.DependentTy,
+ /*Args=*/0, /*NumArgs=*/0, EPI);
+ MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy);
+ ExplicitParams = false;
+ ExplicitResultType = false;
+ EndLoc = Intro.Range.getEnd();
+ } else {
+ assert(ParamInfo.isFunctionDeclarator() &&
+ "lambda-declarator is a function");
+ DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
+
+ // C++11 [expr.prim.lambda]p5:
+ // This function call operator is declared const (9.3.1) if and only if
+ // the lambda-expression's parameter-declaration-clause is not followed
+ // by mutable. It is neither virtual nor declared volatile. [...]
+ if (!FTI.hasMutableQualifier())
+ FTI.TypeQuals |= DeclSpec::TQ_const;
+
+ MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
+ assert(MethodTyInfo && "no type from lambda-declarator");
+ EndLoc = ParamInfo.getSourceRange().getEnd();
+
+ ExplicitResultType
+ = MethodTyInfo->getType()->getAs<FunctionType>()->getResultType()
+ != Context.DependentTy;
+
+ TypeLoc TL = MethodTyInfo->getTypeLoc();
+ FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
+ Params = llvm::ArrayRef<ParmVarDecl *>(Proto.getParmArray(),
+ Proto.getNumArgs());
+ }
+
+ CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
+ MethodTyInfo, EndLoc, Params);
+
+ if (ExplicitParams)
+ CheckCXXDefaultArguments(Method);
+
+ // Attributes on the lambda apply to the method.
+ ProcessDeclAttributes(CurScope, Method, ParamInfo);
+
+ // Introduce the function call operator as the current declaration context.
+ PushDeclContext(CurScope, Method);
+
+ // Introduce the lambda scope.
+ LambdaScopeInfo *LSI
+ = enterLambdaScope(Method, Intro.Range, Intro.Default, ExplicitParams,
+ ExplicitResultType,
+ (Method->getTypeQualifiers() & Qualifiers::Const) == 0);
+
+ // Handle explicit captures.
+ SourceLocation PrevCaptureLoc
+ = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc;
+ for (llvm::SmallVector<LambdaCapture, 4>::const_iterator
+ C = Intro.Captures.begin(),
+ E = Intro.Captures.end();
+ C != E;
+ PrevCaptureLoc = C->Loc, ++C) {
+ if (C->Kind == LCK_This) {
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (LSI->isCXXThisCaptured()) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << "'this'"
+ << SourceRange(LSI->getCXXThisCapture().getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is =, the
+ // lambda-capture shall not contain this [...].
+ if (Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_this_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p12:
+ // If this is captured by a local lambda expression, its nearest
+ // enclosing function shall be a non-static member function.
+ QualType ThisCaptureType = getCurrentThisType();
+ if (ThisCaptureType.isNull()) {
+ Diag(C->Loc, diag::err_this_capture) << true;
+ continue;
+ }
+
+ CheckCXXThisCapture(C->Loc, /*Explicit=*/true);
+ continue;
+ }
+
+ assert(C->Id && "missing identifier for capture");
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is &, the
+ // identifiers in the lambda-capture shall not be preceded by &.
+ // If a lambda-capture includes a capture-default that is =, [...]
+ // each identifier it contains shall be preceded by &.
+ if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) {
+ Diag(C->Loc, diag::err_reference_capture_with_reference_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_copy_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ DeclarationNameInfo Name(C->Id, C->Loc);
+ LookupResult R(*this, Name, LookupOrdinaryName);
+ LookupName(R, CurScope);
+ if (R.isAmbiguous())
+ continue;
+ if (R.empty()) {
+ // FIXME: Disable corrections that would add qualification?
+ CXXScopeSpec ScopeSpec;
+ DeclFilterCCC<VarDecl> Validator;
+ if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator))
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p10:
+ // The identifiers in a capture-list are looked up using the usual rules
+ // for unqualified name lookup (3.4.1); each such lookup shall find a
+ // variable with automatic storage duration declared in the reaching
+ // scope of the local lambda expression.
+ //
+ // Note that the 'reaching scope' check happens in tryCaptureVariable().
+ VarDecl *Var = R.getAsSingle<VarDecl>();
+ if (!Var) {
+ Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
+ continue;
+ }
+
+ if (!Var->hasLocalStorage()) {
+ Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id;
+ Diag(Var->getLocation(), diag::note_previous_decl) << C->Id;
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (LSI->isCaptured(Var)) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << C->Id
+ << SourceRange(LSI->getCapture(Var).getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p23:
+ // A capture followed by an ellipsis is a pack expansion (14.5.3).
+ SourceLocation EllipsisLoc;
+ if (C->EllipsisLoc.isValid()) {
+ if (Var->isParameterPack()) {
+ EllipsisLoc = C->EllipsisLoc;
+ } else {
+ Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(C->Loc);
+
+ // Just ignore the ellipsis.
+ }
+ } else if (Var->isParameterPack()) {
+ Diag(C->Loc, diag::err_lambda_unexpanded_pack);
+ continue;
+ }
+
+ TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
+ TryCapture_ExplicitByVal;
+ tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
+ }
+ finishLambdaExplicitCaptures(LSI);
+
+ // Add lambda parameters into scope.
+ addLambdaParameters(Method, CurScope);
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
+ bool IsInstantiation) {
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ // Leave the context of the lambda.
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ // Finalize the lambda.
+ LambdaScopeInfo *LSI = getCurLambda();
+ CXXRecordDecl *Class = LSI->Lambda;
+ Class->setInvalidDecl();
+ SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ ActOnFields(0, Class->getLocation(), Class, Fields,
+ SourceLocation(), SourceLocation(), 0);
+ CheckCompletedCXXClass(Class);
+
+ PopFunctionScopeInfo();
+}
+
+/// \brief Add a lambda's conversion to function pointer, as described in
+/// C++11 [expr.prim.lambda]p6.
+static void addFunctionPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ // Add the conversion to function pointer.
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType FunctionPtrTy;
+ QualType FunctionTy;
+ {
+ FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
+ ExtInfo.TypeQuals = 0;
+ FunctionTy = S.Context.getFunctionType(Proto->getResultType(),
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ ExtInfo);
+ FunctionPtrTy = S.Context.getPointerType(FunctionTy);
+ }
+
+ FunctionProtoType::ExtProtoInfo ExtInfo;
+ ExtInfo.TypeQuals = Qualifiers::Const;
+ QualType ConvTy = S.Context.getFunctionType(FunctionPtrTy, 0, 0, ExtInfo);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(FunctionPtrTy));
+ DeclarationNameLoc NameLoc;
+ NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(FunctionPtrTy,
+ Loc);
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc, NameLoc),
+ ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy,
+ Loc),
+ /*isInline=*/false, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+ Class->addDecl(Conversion);
+
+ // Add a non-static member function "__invoke" that will be the result of
+ // the conversion.
+ Name = &S.Context.Idents.get("__invoke");
+ CXXMethodDecl *Invoke
+ = CXXMethodDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc), FunctionTy,
+ CallOperator->getTypeSourceInfo(),
+ /*IsStatic=*/true, SC_Static, /*IsInline=*/true,
+ /*IsConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ SmallVector<ParmVarDecl *, 4> InvokeParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+ InvokeParams.push_back(ParmVarDecl::Create(S.Context, Invoke,
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ From->getStorageClassAsWritten(),
+ /*DefaultArg=*/0));
+ }
+ Invoke->setParams(InvokeParams);
+ Invoke->setAccess(AS_private);
+ Invoke->setImplicit(true);
+ Class->addDecl(Invoke);
+}
+
+/// \brief Add a lambda's conversion to block pointer.
+static void addBlockPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType BlockPtrTy;
+ {
+ FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
+ ExtInfo.TypeQuals = 0;
+ QualType FunctionTy
+ = S.Context.getFunctionType(Proto->getResultType(),
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ ExtInfo);
+ BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
+ }
+
+ FunctionProtoType::ExtProtoInfo ExtInfo;
+ ExtInfo.TypeQuals = Qualifiers::Const;
+ QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, 0, 0, ExtInfo);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(BlockPtrTy));
+ DeclarationNameLoc NameLoc;
+ NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc);
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc, NameLoc),
+ ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
+ /*isInline=*/false, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+ Class->addDecl(Conversion);
+}
+
+ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
+ Scope *CurScope,
+ bool IsInstantiation) {
+ // Collect information from the lambda scope.
+ llvm::SmallVector<LambdaExpr::Capture, 4> Captures;
+ llvm::SmallVector<Expr *, 4> CaptureInits;
+ LambdaCaptureDefault CaptureDefault;
+ CXXRecordDecl *Class;
+ CXXMethodDecl *CallOperator;
+ SourceRange IntroducerRange;
+ bool ExplicitParams;
+ bool ExplicitResultType;
+ bool LambdaExprNeedsCleanups;
+ llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
+ llvm::SmallVector<unsigned, 4> ArrayIndexStarts;
+ {
+ LambdaScopeInfo *LSI = getCurLambda();
+ CallOperator = LSI->CallOperator;
+ Class = LSI->Lambda;
+ IntroducerRange = LSI->IntroducerRange;
+ ExplicitParams = LSI->ExplicitParams;
+ ExplicitResultType = !LSI->HasImplicitReturnType;
+ LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups;
+ ArrayIndexVars.swap(LSI->ArrayIndexVars);
+ ArrayIndexStarts.swap(LSI->ArrayIndexStarts);
+
+ // Translate captures.
+ for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) {
+ LambdaScopeInfo::Capture From = LSI->Captures[I];
+ assert(!From.isBlockCapture() && "Cannot capture __block variables");
+ bool IsImplicit = I >= LSI->NumExplicitCaptures;
+
+ // Handle 'this' capture.
+ if (From.isThisCapture()) {
+ Captures.push_back(LambdaExpr::Capture(From.getLocation(),
+ IsImplicit,
+ LCK_This));
+ CaptureInits.push_back(new (Context) CXXThisExpr(From.getLocation(),
+ getCurrentThisType(),
+ /*isImplicit=*/true));
+ continue;
+ }
+
+ VarDecl *Var = From.getVariable();
+ LambdaCaptureKind Kind = From.isCopyCapture()? LCK_ByCopy : LCK_ByRef;
+ Captures.push_back(LambdaExpr::Capture(From.getLocation(), IsImplicit,
+ Kind, Var, From.getEllipsisLoc()));
+ CaptureInits.push_back(From.getCopyExpr());
+ }
+
+ switch (LSI->ImpCaptureStyle) {
+ case CapturingScopeInfo::ImpCap_None:
+ CaptureDefault = LCD_None;
+ break;
+
+ case CapturingScopeInfo::ImpCap_LambdaByval:
+ CaptureDefault = LCD_ByCopy;
+ break;
+
+ case CapturingScopeInfo::ImpCap_LambdaByref:
+ CaptureDefault = LCD_ByRef;
+ break;
+
+ case CapturingScopeInfo::ImpCap_Block:
+ llvm_unreachable("block capture in lambda");
+ break;
+ }
+
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a
+ // trailing-return-type, it is as if the trailing-return-type
+ // denotes the following type:
+ // FIXME: Assumes current resolution to core issue 975.
+ if (LSI->HasImplicitReturnType) {
+ // - if there are no return statements in the
+ // compound-statement, or all return statements return
+ // either an expression of type void or no expression or
+ // braced-init-list, the type void;
+ if (LSI->ReturnType.isNull()) {
+ LSI->ReturnType = Context.VoidTy;
+ } else {
+ // C++11 [expr.prim.lambda]p4:
+ // - if the compound-statement is of the form
+ //
+ // { attribute-specifier-seq[opt] return expression ; }
+ //
+ // the type of the returned expression after
+ // lvalue-to-rvalue conversion (4.1), array-to-pointer
+ // conver- sion (4.2), and function-to-pointer conversion
+ // (4.3);
+ //
+ // Since we're accepting the resolution to a post-C++11 core
+ // issue with a non-trivial extension, provide a warning (by
+ // default).
+ CompoundStmt *CompoundBody = cast<CompoundStmt>(Body);
+ if (!(CompoundBody->size() == 1 &&
+ isa<ReturnStmt>(*CompoundBody->body_begin())) &&
+ !Context.hasSameType(LSI->ReturnType, Context.VoidTy))
+ Diag(IntroducerRange.getBegin(),
+ diag::ext_lambda_implies_void_return);
+ }
+
+ // Create a function type with the inferred return type.
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType FunctionTy
+ = Context.getFunctionType(LSI->ReturnType,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ Proto->getExtProtoInfo());
+ CallOperator->setType(FunctionTy);
+ }
+
+ // C++ [expr.prim.lambda]p7:
+ // The lambda-expression's compound-statement yields the
+ // function-body (8.4) of the function call operator [...].
+ ActOnFinishFunctionBody(CallOperator, Body, IsInstantiation);
+ CallOperator->setLexicalDeclContext(Class);
+ Class->addDecl(CallOperator);
+ PopExpressionEvaluationContext();
+
+ // C++11 [expr.prim.lambda]p6:
+ // The closure type for a lambda-expression with no lambda-capture
+ // has a public non-virtual non-explicit const conversion function
+ // to pointer to function having the same parameter and return
+ // types as the closure type's function call operator.
+ if (Captures.empty() && CaptureDefault == LCD_None)
+ addFunctionPointerConversion(*this, IntroducerRange, Class,
+ CallOperator);
+
+ // Objective-C++:
+ // The closure type for a lambda-expression has a public non-virtual
+ // non-explicit const conversion function to a block pointer having the
+ // same parameter and return types as the closure type's function call
+ // operator.
+ if (getLangOpts().Blocks && getLangOpts().ObjC1)
+ addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
+
+ // Finalize the lambda class.
+ SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ ActOnFields(0, Class->getLocation(), Class, Fields,
+ SourceLocation(), SourceLocation(), 0);
+ CheckCompletedCXXClass(Class);
+ }
+
+ if (LambdaExprNeedsCleanups)
+ ExprNeedsCleanups = true;
+
+ LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange,
+ CaptureDefault, Captures,
+ ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars,
+ ArrayIndexStarts, Body->getLocEnd());
+
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ if (!CurContext->isDependentContext()) {
+ switch (ExprEvalContexts.back().Context) {
+ case Unevaluated:
+ // We don't actually diagnose this case immediately, because we
+ // could be within a context where we might find out later that
+ // the expression is potentially evaluated (e.g., for typeid).
+ ExprEvalContexts.back().Lambdas.push_back(Lambda);
+ break;
+
+ case ConstantEvaluated:
+ case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
+ break;
+ }
+ }
+
+ return MaybeBindToTemporary(Lambda);
+}
+
+ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
+ SourceLocation ConvLocation,
+ CXXConversionDecl *Conv,
+ Expr *Src) {
+ // Make sure that the lambda call operator is marked used.
+ CXXRecordDecl *Lambda = Conv->getParent();
+ CXXMethodDecl *CallOperator
+ = cast<CXXMethodDecl>(
+ *Lambda->lookup(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call)).first);
+ CallOperator->setReferenced();
+ CallOperator->setUsed();
+
+ ExprResult Init = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(ConvLocation,
+ Src->getType(),
+ /*NRVO=*/false),
+ CurrentLocation, Src);
+ if (!Init.isInvalid())
+ Init = ActOnFinishFullExpr(Init.take());
+
+ if (Init.isInvalid())
+ return ExprError();
+
+ // Create the new block to be returned.
+ BlockDecl *Block = BlockDecl::Create(Context, CurContext, ConvLocation);
+
+ // Set the type information.
+ Block->setSignatureAsWritten(CallOperator->getTypeSourceInfo());
+ Block->setIsVariadic(CallOperator->isVariadic());
+ Block->setBlockMissingReturnType(false);
+
+ // Add parameters.
+ SmallVector<ParmVarDecl *, 4> BlockParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+ BlockParams.push_back(ParmVarDecl::Create(Context, Block,
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ From->getStorageClassAsWritten(),
+ /*DefaultArg=*/0));
+ }
+ Block->setParams(BlockParams);
+
+ Block->setIsConversionFromLambda(true);
+
+ // Add capture. The capture uses a fake variable, which doesn't correspond
+ // to any actual memory location. However, the initializer copy-initializes
+ // the lambda object.
+ TypeSourceInfo *CapVarTSI =
+ Context.getTrivialTypeSourceInfo(Src->getType());
+ VarDecl *CapVar = VarDecl::Create(Context, Block, ConvLocation,
+ ConvLocation, 0,
+ Src->getType(), CapVarTSI,
+ SC_None, SC_None);
+ BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false,
+ /*Nested=*/false, /*Copy=*/Init.take());
+ Block->setCaptures(Context, &Capture, &Capture + 1,
+ /*CapturesCXXThis=*/false);
+
+ // Add a fake function body to the block. IR generation is responsible
+ // for filling in the actual body, which cannot be expressed as an AST.
+ Block->setBody(new (Context) CompoundStmt(Context, 0, 0,
+ ConvLocation,
+ ConvLocation));
+
+ // Create the block literal expression.
+ Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType());
+ ExprCleanupObjects.push_back(Block);
+ ExprNeedsCleanups = true;
+
+ return BuildBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
index d5bee1d..966eb90 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -25,26 +25,28 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclLookups.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/LangOptions.h"
-#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/ADT/edit_distance.h"
#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <iterator>
#include <limits>
#include <list>
+#include <map>
#include <set>
-#include <vector>
-#include <iterator>
#include <utility>
-#include <algorithm>
-#include <map>
+#include <vector>
using namespace clang;
using namespace sema;
@@ -105,13 +107,15 @@ namespace {
assert(InnermostFileDC && InnermostFileDC->isFileContext());
for (; S; S = S->getParent()) {
- if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity())) {
- DeclContext *EffectiveDC = (Ctx->isFileContext() ? Ctx : InnermostFileDC);
- visit(Ctx, EffectiveDC);
- } else {
+ // C++ [namespace.udir]p1:
+ // A using-directive shall not appear in class scope, but may
+ // appear in namespace scope or in block scope.
+ DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity());
+ if (Ctx && Ctx->isFileContext()) {
+ visit(Ctx, Ctx);
+ } else if (!Ctx || Ctx->isFunctionOrMethod()) {
Scope::udir_iterator I = S->using_directives_begin(),
End = S->using_directives_end();
-
for (; I != End; ++I)
visit(*I, InnermostFileDC);
}
@@ -280,7 +284,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
}
void LookupResult::configure() {
- IDNS = getIDNS(LookupKind, SemaRef.getLangOptions().CPlusPlus,
+ IDNS = getIDNS(LookupKind, SemaRef.getLangOpts().CPlusPlus,
isForRedeclaration());
// If we're looking for one of the allocation or deallocation
@@ -301,7 +305,9 @@ void LookupResult::configure() {
}
}
-void LookupResult::sanity() const {
+void LookupResult::sanityImpl() const {
+ // Note that this function is never called by NDEBUG builds. See
+ // LookupResult::sanity().
assert(ResultKind != NotFound || Decls.size() == 0);
assert(ResultKind != Found || Decls.size() == 1);
assert(ResultKind != FoundOverloaded || Decls.size() > 1 ||
@@ -321,6 +327,12 @@ void LookupResult::deletePaths(CXXBasePaths *Paths) {
delete Paths;
}
+static NamedDecl *getVisibleDecl(NamedDecl *D);
+
+NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
+ return getVisibleDecl(D);
+}
+
/// Resolves the result kind of this lookup.
void LookupResult::resolveKind() {
unsigned N = Decls.size();
@@ -488,7 +500,7 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
if (unsigned BuiltinID = II->getBuiltinID()) {
// In C++, we don't have any predefined library functions like
// 'malloc'. Instead, we'll just error.
- if (S.getLangOptions().CPlusPlus &&
+ if (S.getLangOpts().CPlusPlus &&
S.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return false;
@@ -518,10 +530,6 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
/// the class at this point.
static bool CanDeclareSpecialMemberFunction(ASTContext &Context,
const CXXRecordDecl *Class) {
- // Don't do it if the class is invalid.
- if (Class->isInvalidDecl())
- return false;
-
// We need to have a definition for the class.
if (!Class->getDefinition() || Class->isDependentContext())
return false;
@@ -550,7 +558,7 @@ void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
if (!Class->hasDeclaredCopyAssignment())
DeclareImplicitCopyAssignment(Class);
- if (getLangOptions().CPlusPlus0x) {
+ if (getLangOpts().CPlusPlus0x) {
// If the move constructor has not yet been declared, do so now.
if (Class->needsImplicitMoveConstructor())
DeclareImplicitMoveConstructor(Class); // might not actually do it
@@ -601,7 +609,7 @@ static void DeclareImplicitMemberFunctionsWithName(Sema &S,
S.DeclareImplicitDefaultConstructor(Class);
if (!Record->hasDeclaredCopyConstructor())
S.DeclareImplicitCopyConstructor(Class);
- if (S.getLangOptions().CPlusPlus0x &&
+ if (S.getLangOpts().CPlusPlus0x &&
Record->needsImplicitMoveConstructor())
S.DeclareImplicitMoveConstructor(Class);
}
@@ -624,7 +632,7 @@ static void DeclareImplicitMemberFunctionsWithName(Sema &S,
CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record);
if (!Record->hasDeclaredCopyAssignment())
S.DeclareImplicitCopyAssignment(Class);
- if (S.getLangOptions().CPlusPlus0x &&
+ if (S.getLangOpts().CPlusPlus0x &&
Record->needsImplicitMoveAssignment())
S.DeclareImplicitMoveAssignment(Class);
}
@@ -642,14 +650,14 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
bool Found = false;
// Lazily declare C++ special member functions.
- if (S.getLangOptions().CPlusPlus)
+ if (S.getLangOpts().CPlusPlus)
DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC);
// Perform lookup into this declaration context.
DeclContext::lookup_const_iterator I, E;
for (llvm::tie(I, E) = DC->lookup(R.getLookupName()); I != E; ++I) {
NamedDecl *D = *I;
- if (R.isAcceptableDecl(D)) {
+ if ((D = R.getAcceptableDecl(D))) {
R.addDecl(D);
Found = true;
}
@@ -830,7 +838,7 @@ static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
}
bool Sema::CppLookupName(LookupResult &R, Scope *S) {
- assert(getLangOptions().CPlusPlus && "Can perform only C++ lookup");
+ assert(getLangOpts().CPlusPlus && "Can perform only C++ lookup");
DeclarationName Name = R.getLookupName();
@@ -875,9 +883,9 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
// Check whether the IdResolver has anything in this scope.
bool Found = false;
for (; I != IEnd && S->isDeclScope(*I); ++I) {
- if (R.isAcceptableDecl(*I)) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
Found = true;
- R.addDecl(*I);
+ R.addDecl(ND);
}
}
if (Found) {
@@ -926,8 +934,8 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(
Name.getAsIdentifierInfo(),
ClassDeclared)) {
- if (R.isAcceptableDecl(Ivar)) {
- R.addDecl(Ivar);
+ if (NamedDecl *ND = R.getAcceptableDecl(Ivar)) {
+ R.addDecl(ND);
R.resolveKind();
return true;
}
@@ -977,13 +985,13 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
// Check whether the IdResolver has anything in this scope.
bool Found = false;
for (; I != IEnd && S->isDeclScope(*I); ++I) {
- if (R.isAcceptableDecl(*I)) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
// We found something. Look for anything else in our scope
// with this same name and in an acceptable identifier
// namespace, so that we can construct an overload set if we
// need to.
Found = true;
- R.addDecl(*I);
+ R.addDecl(ND);
}
}
@@ -1047,6 +1055,29 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return !R.empty();
}
+/// \brief Retrieve the visible declaration corresponding to D, if any.
+///
+/// This routine determines whether the declaration D is visible in the current
+/// module, with the current imports. If not, it checks whether any
+/// redeclaration of D is visible, and if so, returns that declaration.
+///
+/// \returns D, or a visible previous declaration of D, whichever is more recent
+/// and visible. If no declaration of D is visible, returns null.
+static NamedDecl *getVisibleDecl(NamedDecl *D) {
+ if (LookupResult::isVisible(D))
+ return D;
+
+ for (Decl::redecl_iterator RD = D->redecls_begin(), RDEnd = D->redecls_end();
+ RD != RDEnd; ++RD) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*RD)) {
+ if (LookupResult::isVisible(ND))
+ return ND;
+ }
+ }
+
+ return 0;
+}
+
/// @brief Perform unqualified name lookup starting from a given
/// scope.
///
@@ -1084,7 +1115,7 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
LookupNameKind NameKind = R.getLookupKind();
- if (!getLangOptions().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
// Unqualified name lookup in C/Objective-C is purely lexical, so
// search in the declarations attached to the name.
if (NameKind == Sema::LookupRedeclarationWithLinkage) {
@@ -1123,29 +1154,58 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
!isa<ImplicitParamDecl>(*I))
continue;
- R.addDecl(*I);
-
- if ((*I)->getAttr<OverloadableAttr>()) {
- // If this declaration has the "overloadable" attribute, we
- // might have a set of overloaded functions.
-
- // Figure out what scope the identifier is in.
- while (!(S->getFlags() & Scope::DeclScope) ||
- !S->isDeclScope(*I))
+ // If this declaration is module-private and it came from an AST
+ // file, we can't see it.
+ NamedDecl *D = R.isHiddenDeclarationVisible()? *I : getVisibleDecl(*I);
+ if (!D)
+ continue;
+
+ R.addDecl(D);
+
+ // Check whether there are any other declarations with the same name
+ // and in the same scope.
+ if (I != IEnd) {
+ // Find the scope in which this declaration was declared (if it
+ // actually exists in a Scope).
+ while (S && !S->isDeclScope(D))
S = S->getParent();
-
- // Find the last declaration in this scope (with the same
- // name, naturally).
+
+ // If the scope containing the declaration is the translation unit,
+ // then we'll need to perform our checks based on the matching
+ // DeclContexts rather than matching scopes.
+ if (S && isNamespaceOrTranslationUnitScope(S))
+ S = 0;
+
+ // Compute the DeclContext, if we need it.
+ DeclContext *DC = 0;
+ if (!S)
+ DC = (*I)->getDeclContext()->getRedeclContext();
+
IdentifierResolver::iterator LastI = I;
for (++LastI; LastI != IEnd; ++LastI) {
- if (!S->isDeclScope(*LastI))
- break;
- R.addDecl(*LastI);
+ if (S) {
+ // Match based on scope.
+ if (!S->isDeclScope(*LastI))
+ break;
+ } else {
+ // Match based on DeclContext.
+ DeclContext *LastDC
+ = (*LastI)->getDeclContext()->getRedeclContext();
+ if (!LastDC->Equals(DC))
+ break;
+ }
+
+ // If the declaration isn't in the right namespace, skip it.
+ if (!(*LastI)->isInIdentifierNamespace(IDNS))
+ continue;
+
+ D = R.isHiddenDeclarationVisible()? *LastI : getVisibleDecl(*LastI);
+ if (D)
+ R.addDecl(D);
}
- }
-
- R.resolveKind();
+ R.resolveKind();
+ }
return true;
}
} else {
@@ -1203,7 +1263,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
if (I == E) return false;
// We have at least added all these contexts to the queue.
- llvm::DenseSet<DeclContext*> Visited;
+ llvm::SmallPtrSet<DeclContext*, 8> Visited;
Visited.insert(StartDC);
// We have not yet looked into these namespaces, much less added
@@ -1214,7 +1274,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
// with its using-children.
for (; I != E; ++I) {
NamespaceDecl *ND = (*I)->getNominatedNamespace()->getOriginalNamespace();
- if (Visited.insert(ND).second)
+ if (Visited.insert(ND))
Queue.push_back(ND);
}
@@ -1263,7 +1323,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
for (llvm::tie(I,E) = ND->getUsingDirectives(); I != E; ++I) {
NamespaceDecl *Nom = (*I)->getNominatedNamespace();
- if (Visited.insert(Nom).second)
+ if (Visited.insert(Nom))
Queue.push_back(Nom);
}
}
@@ -1354,8 +1414,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
assert((!isa<TagDecl>(LookupCtx) ||
LookupCtx->isDependentContext() ||
cast<TagDecl>(LookupCtx)->isCompleteDefinition() ||
- Context.getTypeDeclType(cast<TagDecl>(LookupCtx))->getAs<TagType>()
- ->isBeingDefined()) &&
+ cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
"Declaration context must already be complete!");
// Perform qualified name lookup into the LookupCtx.
@@ -1561,13 +1620,14 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
return false;
R.setContextRange(SS->getRange());
-
return LookupQualifiedName(R, DC);
}
// We could not resolve the scope specified to a specific declaration
// context, which means that SS refers to an unknown specialization.
// Name lookup can't find anything in this case.
+ R.setNotFoundInCurrentInstantiation();
+ R.setContextRange(SS->getRange());
return false;
}
@@ -1673,7 +1733,6 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
}
llvm_unreachable("unknown ambiguity kind");
- return true;
}
namespace {
@@ -2022,7 +2081,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
/// namespaces searched by argument-dependent lookup
/// (C++ [basic.lookup.argdep]) for a given set of arguments.
void
-Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
+Sema::FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses) {
AssociatedNamespaces.clear();
@@ -2037,7 +2096,7 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
// classes is determined entirely by the types of the function
// arguments (and the namespace of any template template
// argument).
- for (unsigned ArgIdx = 0; ArgIdx != NumArgs; ++ArgIdx) {
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
Expr *Arg = Args[ArgIdx];
if (Arg->getType() != Context.OverloadTy) {
@@ -2124,9 +2183,10 @@ NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
/// \brief Find the protocol with the given name, if any.
ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
- SourceLocation IdLoc) {
+ SourceLocation IdLoc,
+ RedeclarationKind Redecl) {
Decl *D = LookupSingleName(TUScope, II, IdLoc,
- LookupObjCProtocolName);
+ LookupObjCProtocolName, Redecl);
return cast_or_null<ObjCProtocolDecl>(D);
}
@@ -2215,8 +2275,9 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
CXXDestructorDecl *DD = RD->getDestructor();
assert(DD && "record without a destructor");
Result->setMethod(DD);
- Result->setSuccess(DD->isDeleted());
- Result->setConstParamMatch(false);
+ Result->setKind(DD->isDeleted() ?
+ SpecialMemberOverloadResult::NoMemberOrDeleted :
+ SpecialMemberOverloadResult::SuccessNonConst);
return Result;
}
@@ -2237,13 +2298,13 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
if (!RD->hasDeclaredCopyConstructor())
DeclareImplicitCopyConstructor(RD);
- if (getLangOptions().CPlusPlus0x && RD->needsImplicitMoveConstructor())
+ if (getLangOpts().CPlusPlus0x && RD->needsImplicitMoveConstructor())
DeclareImplicitMoveConstructor(RD);
} else {
Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
if (!RD->hasDeclaredCopyAssignment())
DeclareImplicitCopyAssignment(RD);
- if (getLangOptions().CPlusPlus0x && RD->needsImplicitMoveAssignment())
+ if (getLangOpts().CPlusPlus0x && RD->needsImplicitMoveAssignment())
DeclareImplicitMoveAssignment(RD);
}
@@ -2286,7 +2347,8 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
// will always be a (possibly implicit) declaration to shadow any others.
OverloadCandidateSet OCS((SourceLocation()));
DeclContext::lookup_iterator I, E;
- Result->setConstParamMatch(false);
+ SpecialMemberOverloadResult::Kind SuccessKind =
+ SpecialMemberOverloadResult::SuccessNonConst;
llvm::tie(I, E) = RD->lookup(Name);
assert((I != E) &&
@@ -2311,10 +2373,11 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand)) {
if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
AddMethodCandidate(M, DeclAccessPair::make(M, AS_public), RD, ThisTy,
- Classification, &Arg, NumArgs, OCS, true);
+ Classification, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
else
- AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public), &Arg,
- NumArgs, OCS, true);
+ AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public),
+ llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
// Here we're looking for a const parameter to speed up creation of
// implicit copy methods.
@@ -2324,17 +2387,19 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
QualType ArgType = M->getType()->getAs<FunctionProtoType>()->getArgType(0);
if (!ArgType->isReferenceType() ||
ArgType->getPointeeType().isConstQualified())
- Result->setConstParamMatch(true);
+ SuccessKind = SpecialMemberOverloadResult::SuccessConst;
}
} else if (FunctionTemplateDecl *Tmpl =
dyn_cast<FunctionTemplateDecl>(Cand)) {
if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
AddMethodTemplateCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
- RD, 0, ThisTy, Classification, &Arg, NumArgs,
+ RD, 0, ThisTy, Classification,
+ llvm::makeArrayRef(&Arg, NumArgs),
OCS, true);
else
AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
- 0, &Arg, NumArgs, OCS, true);
+ 0, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
} else {
assert(isa<UsingDecl>(Cand) && "illegal Kind of operator = Decl");
}
@@ -2344,18 +2409,22 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
switch (OCS.BestViableFunction(*this, SourceLocation(), Best)) {
case OR_Success:
Result->setMethod(cast<CXXMethodDecl>(Best->Function));
- Result->setSuccess(true);
+ Result->setKind(SuccessKind);
break;
case OR_Deleted:
Result->setMethod(cast<CXXMethodDecl>(Best->Function));
- Result->setSuccess(false);
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
break;
case OR_Ambiguous:
+ Result->setMethod(0);
+ Result->setKind(SpecialMemberOverloadResult::Ambiguous);
+ break;
+
case OR_No_Viable_Function:
Result->setMethod(0);
- Result->setSuccess(false);
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
break;
}
@@ -2404,7 +2473,7 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
DeclareImplicitDefaultConstructor(Class);
if (!Class->hasDeclaredCopyConstructor())
DeclareImplicitCopyConstructor(Class);
- if (getLangOptions().CPlusPlus0x && Class->needsImplicitMoveConstructor())
+ if (getLangOpts().CPlusPlus0x && Class->needsImplicitMoveConstructor())
DeclareImplicitMoveConstructor(Class);
}
@@ -2460,6 +2529,105 @@ CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
false, false)->getMethod());
}
+/// LookupLiteralOperator - Determine which literal operator should be used for
+/// a user-defined literal, per C++11 [lex.ext].
+///
+/// Normal overload resolution is not used to select which literal operator to
+/// call for a user-defined literal. Look up the provided literal operator name,
+/// and filter the results to the appropriate set for the given argument types.
+Sema::LiteralOperatorLookupResult
+Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
+ ArrayRef<QualType> ArgTys,
+ bool AllowRawAndTemplate) {
+ LookupName(R, S);
+ assert(R.getResultKind() != LookupResult::Ambiguous &&
+ "literal operator lookup can't be ambiguous");
+
+ // Filter the lookup results appropriately.
+ LookupResult::Filter F = R.makeFilter();
+
+ bool FoundTemplate = false;
+ bool FoundRaw = false;
+ bool FoundExactMatch = false;
+
+ while (F.hasNext()) {
+ Decl *D = F.next();
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ D = USD->getTargetDecl();
+
+ bool IsTemplate = isa<FunctionTemplateDecl>(D);
+ bool IsRaw = false;
+ bool IsExactMatch = false;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getNumParams() == 1 &&
+ FD->getParamDecl(0)->getType()->getAs<PointerType>())
+ IsRaw = true;
+ else {
+ IsExactMatch = true;
+ for (unsigned ArgIdx = 0; ArgIdx != ArgTys.size(); ++ArgIdx) {
+ QualType ParamTy = FD->getParamDecl(ArgIdx)->getType();
+ if (!Context.hasSameUnqualifiedType(ArgTys[ArgIdx], ParamTy)) {
+ IsExactMatch = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (IsExactMatch) {
+ FoundExactMatch = true;
+ AllowRawAndTemplate = false;
+ if (FoundRaw || FoundTemplate) {
+ // Go through again and remove the raw and template decls we've
+ // already found.
+ F.restart();
+ FoundRaw = FoundTemplate = false;
+ }
+ } else if (AllowRawAndTemplate && (IsTemplate || IsRaw)) {
+ FoundTemplate |= IsTemplate;
+ FoundRaw |= IsRaw;
+ } else {
+ F.erase();
+ }
+ }
+
+ F.done();
+
+ // C++11 [lex.ext]p3, p4: If S contains a literal operator with a matching
+ // parameter type, that is used in preference to a raw literal operator
+ // or literal operator template.
+ if (FoundExactMatch)
+ return LOLR_Cooked;
+
+ // C++11 [lex.ext]p3, p4: S shall contain a raw literal operator or a literal
+ // operator template, but not both.
+ if (FoundRaw && FoundTemplate) {
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ Decl *D = *I;
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ D = USD->getTargetDecl();
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
+ D = FunTmpl->getTemplatedDecl();
+ NoteOverloadCandidate(cast<FunctionDecl>(D));
+ }
+ return LOLR_Error;
+ }
+
+ if (FoundRaw)
+ return LOLR_Raw;
+
+ if (FoundTemplate)
+ return LOLR_Template;
+
+ // Didn't find anything we could use.
+ Diag(R.getNameLoc(), diag::err_ovl_no_viable_literal_operator)
+ << R.getLookupName() << (int)ArgTys.size() << ArgTys[0]
+ << (ArgTys.size() == 2 ? ArgTys[1] : QualType()) << AllowRawAndTemplate;
+ return LOLR_Error;
+}
+
void ADLResult::insert(NamedDecl *New) {
NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())];
@@ -2482,7 +2650,7 @@ void ADLResult::insert(NamedDecl *New) {
FunctionDecl *Cursor = NewFD;
while (true) {
- Cursor = Cursor->getPreviousDeclaration();
+ Cursor = Cursor->getPreviousDecl();
// If we got to the end without finding OldFD, OldFD is the newer
// declaration; leave things as they are.
@@ -2498,14 +2666,15 @@ void ADLResult::insert(NamedDecl *New) {
}
void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator,
- Expr **Args, unsigned NumArgs,
+ SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
ADLResult &Result,
bool StdNamespaceIsAssociated) {
// Find all of the associated namespaces and classes based on the
// arguments we have.
AssociatedNamespaceSet AssociatedNamespaces;
AssociatedClassSet AssociatedClasses;
- FindAssociatedClassesAndNamespaces(Args, NumArgs,
+ FindAssociatedClassesAndNamespaces(Args,
AssociatedNamespaces,
AssociatedClasses);
if (StdNamespaceIsAssociated && StdNamespace)
@@ -2514,10 +2683,17 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator,
QualType T1, T2;
if (Operator) {
T1 = Args[0]->getType();
- if (NumArgs >= 2)
+ if (Args.size() >= 2)
T2 = Args[1]->getType();
}
+ // Try to complete all associated classes, in case they contain a
+ // declaration of a friend function.
+ for (AssociatedClassSet::iterator C = AssociatedClasses.begin(),
+ CEnd = AssociatedClasses.end();
+ C != CEnd; ++C)
+ RequireCompleteType(Loc, Context.getRecordType(*C), 0);
+
// C++ [basic.lookup.argdep]p3:
// Let X be the lookup set produced by unqualified lookup (3.4.1)
// and let Y be the lookup set produced by argument dependent
@@ -2700,42 +2876,15 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
Result.getSema().ForceDeclarationOfImplicitMembers(Class);
// Enumerate all of the results in this context.
- for (DeclContext *CurCtx = Ctx->getPrimaryContext(); CurCtx;
- CurCtx = CurCtx->getNextContext()) {
- for (DeclContext::decl_iterator D = CurCtx->decls_begin(),
- DEnd = CurCtx->decls_end();
- D != DEnd; ++D) {
- if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
- if (Result.isAcceptableDecl(ND)) {
+ for (DeclContext::all_lookups_iterator L = Ctx->lookups_begin(),
+ LEnd = Ctx->lookups_end();
+ L != LEnd; ++L) {
+ for (DeclContext::lookup_result R = *L; R.first != R.second; ++R.first) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*R.first)) {
+ if ((ND = Result.getAcceptableDecl(ND))) {
Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
Visited.add(ND);
}
- } else if (ObjCForwardProtocolDecl *ForwardProto
- = dyn_cast<ObjCForwardProtocolDecl>(*D)) {
- for (ObjCForwardProtocolDecl::protocol_iterator
- P = ForwardProto->protocol_begin(),
- PEnd = ForwardProto->protocol_end();
- P != PEnd;
- ++P) {
- if (Result.isAcceptableDecl(*P)) {
- Consumer.FoundDecl(*P, Visited.checkHidden(*P), Ctx, InBaseClass);
- Visited.add(*P);
- }
- }
- } else if (ObjCClassDecl *Class = dyn_cast<ObjCClassDecl>(*D)) {
- ObjCInterfaceDecl *IFace = Class->getForwardInterfaceDecl();
- if (Result.isAcceptableDecl(IFace)) {
- Consumer.FoundDecl(IFace, Visited.checkHidden(IFace), Ctx,
- InBaseClass);
- Visited.add(IFace);
- }
- }
-
- // Visit transparent contexts and inline namespaces inside this context.
- if (DeclContext *InnerCtx = dyn_cast<DeclContext>(*D)) {
- if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
- LookupVisibleDecls(InnerCtx, Result, QualifiedNameLookup, InBaseClass,
- Consumer, Visited);
}
}
}
@@ -2826,7 +2975,7 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
if (IFace->getImplementation()) {
ShadowContextRAII Shadow(Visited);
LookupVisibleDecls(IFace->getImplementation(), Result,
- QualifiedNameLookup, true, Consumer, Visited);
+ QualifiedNameLookup, InBaseClass, Consumer, Visited);
}
} else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
for (ObjCProtocolDecl::protocol_iterator I = Protocol->protocol_begin(),
@@ -2867,7 +3016,7 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result,
for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
D != DEnd; ++D) {
if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
- if (Result.isAcceptableDecl(ND)) {
+ if ((ND = Result.getAcceptableDecl(ND))) {
Consumer.FoundDecl(ND, Visited.checkHidden(ND), 0, false);
Visited.add(ND);
}
@@ -2946,7 +3095,7 @@ void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind,
// unqualified name lookup.
Scope *Initial = S;
UnqualUsingDirectiveSet UDirs;
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Find the first namespace or translation-unit scope.
while (S && !isNamespaceOrTranslationUnitScope(S))
S = S->getParent();
@@ -3031,15 +3180,11 @@ class TypoCorrectionConsumer : public VisibleDeclConsumer {
/// whether there is a keyword with this name.
TypoEditDistanceMap BestResults;
- /// \brief The worst of the best N edit distances found so far.
- unsigned MaxEditDistance;
-
Sema &SemaRef;
public:
explicit TypoCorrectionConsumer(Sema &SemaRef, IdentifierInfo *Typo)
: Typo(Typo->getName()),
- MaxEditDistance((std::numeric_limits<unsigned>::max)()),
SemaRef(SemaRef) { }
~TypoCorrectionConsumer() {
@@ -3070,12 +3215,12 @@ public:
return (*BestResults.begin()->second)[Name];
}
- unsigned getMaxEditDistance() const {
- return MaxEditDistance;
- }
+ unsigned getBestEditDistance(bool Normalized) {
+ if (BestResults.empty())
+ return (std::numeric_limits<unsigned>::max)();
- unsigned getBestEditDistance() {
- return (BestResults.empty()) ? MaxEditDistance : BestResults.begin()->first;
+ unsigned BestED = BestResults.begin()->first;
+ return Normalized ? TypoCorrection::NormalizeEditDistance(BestED) : BestED;
}
};
@@ -3101,40 +3246,22 @@ void TypoCorrectionConsumer::FoundName(StringRef Name) {
// Use a simple length-based heuristic to determine the minimum possible
// edit distance. If the minimum isn't good enough, bail out early.
unsigned MinED = abs((int)Name.size() - (int)Typo.size());
- if (MinED > MaxEditDistance || (MinED && Typo.size() / MinED < 3))
+ if (MinED && Typo.size() / MinED < 3)
return;
// Compute an upper bound on the allowable edit distance, so that the
// edit-distance algorithm can short-circuit.
- unsigned UpperBound =
- std::min(unsigned((Typo.size() + 2) / 3), MaxEditDistance);
+ unsigned UpperBound = (Typo.size() + 2) / 3;
// Compute the edit distance between the typo and the name of this
- // entity. If this edit distance is not worse than the best edit
- // distance we've seen so far, add it to the list of results.
- unsigned ED = Typo.edit_distance(Name, true, UpperBound);
-
- if (ED > MaxEditDistance) {
- // This result is worse than the best results we've seen so far;
- // ignore it.
- return;
- }
-
- addName(Name, NULL, ED);
+ // entity, and add the identifier to the list of results.
+ addName(Name, NULL, Typo.edit_distance(Name, true, UpperBound));
}
void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) {
- // Compute the edit distance between the typo and this keyword.
- // If this edit distance is not worse than the best edit
- // distance we've seen so far, add it to the list of results.
- unsigned ED = Typo.edit_distance(Keyword);
- if (ED > MaxEditDistance) {
- // This result is worse than the best results we've seen so far;
- // ignore it.
- return;
- }
-
- addName(Keyword, NULL, ED, NULL, true);
+ // Compute the edit distance between the typo and this keyword,
+ // and add the keyword to the list of results.
+ addName(Keyword, NULL, Typo.edit_distance(Keyword), NULL, true);
}
void TypoCorrectionConsumer::addName(StringRef Name,
@@ -3149,7 +3276,7 @@ void TypoCorrectionConsumer::addName(StringRef Name,
void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
- TypoResultsMap *& Map = BestResults[Correction.getEditDistance()];
+ TypoResultsMap *& Map = BestResults[Correction.getEditDistance(false)];
if (!Map)
Map = new TypoResultsMap;
@@ -3158,8 +3285,8 @@ void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
// FIXME: The following should be rolled up into an operator< on
// TypoCorrection with a more principled definition.
CurrentCorrection.isKeyword() < Correction.isKeyword() ||
- Correction.getAsString(SemaRef.getLangOptions()) <
- CurrentCorrection.getAsString(SemaRef.getLangOptions()))
+ Correction.getAsString(SemaRef.getLangOpts()) <
+ CurrentCorrection.getAsString(SemaRef.getLangOpts()))
CurrentCorrection = Correction;
while (BestResults.size() > MaxTypoDistanceResultSets) {
@@ -3170,6 +3297,47 @@ void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
}
}
+// Fill the supplied vector with the IdentifierInfo pointers for each piece of
+// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
+// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
+static void getNestedNameSpecifierIdentifiers(
+ NestedNameSpecifier *NNS,
+ SmallVectorImpl<const IdentifierInfo*> &Identifiers) {
+ if (NestedNameSpecifier *Prefix = NNS->getPrefix())
+ getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
+ else
+ Identifiers.clear();
+
+ const IdentifierInfo *II = NULL;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ II = NNS->getAsIdentifier();
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ if (NNS->getAsNamespace()->isAnonymousNamespace())
+ return;
+ II = NNS->getAsNamespace()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ II = NNS->getAsNamespaceAlias()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier();
+ break;
+
+ case NestedNameSpecifier::Global:
+ return;
+ }
+
+ if (II)
+ Identifiers.push_back(II);
+}
+
namespace {
class SpecifierInfo {
@@ -3188,6 +3356,8 @@ typedef SmallVector<SpecifierInfo, 16> SpecifierInfoList;
class NamespaceSpecifierSet {
ASTContext &Context;
DeclContextList CurContextChain;
+ SmallVector<const IdentifierInfo*, 4> CurContextIdentifiers;
+ SmallVector<const IdentifierInfo*, 4> CurNameSpecifierIdentifiers;
bool isSorted;
SpecifierInfoList Specifiers;
@@ -3201,9 +3371,23 @@ class NamespaceSpecifierSet {
void SortNamespaces();
public:
- explicit NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext)
+ NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext,
+ CXXScopeSpec *CurScopeSpec)
: Context(Context), CurContextChain(BuildContextChain(CurContext)),
- isSorted(true) {}
+ isSorted(true) {
+ if (CurScopeSpec && CurScopeSpec->getScopeRep())
+ getNestedNameSpecifierIdentifiers(CurScopeSpec->getScopeRep(),
+ CurNameSpecifierIdentifiers);
+ // Build the list of identifiers that would be used for an absolute
+ // (from the global context) NestedNameSpecifier refering to the current
+ // context.
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd; ++C) {
+ if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C))
+ CurContextIdentifiers.push_back(ND->getIdentifier());
+ }
+ }
/// \brief Add the namespace to the set, computing the corresponding
/// NestedNameSpecifier and its distance in the process.
@@ -3241,7 +3425,7 @@ void NamespaceSpecifierSet::SortNamespaces() {
Specifiers.clear();
for (SmallVector<unsigned, 4>::iterator DI = sortedDistances.begin(),
- DIEnd = sortedDistances.end();
+ DIEnd = sortedDistances.end();
DI != DIEnd; ++DI) {
SpecifierInfoList &SpecList = DistanceMap[*DI];
Specifiers.append(SpecList.begin(), SpecList.end());
@@ -3255,8 +3439,9 @@ void NamespaceSpecifierSet::AddNamespace(NamespaceDecl *ND) {
NestedNameSpecifier *NNS = NULL;
unsigned NumSpecifiers = 0;
DeclContextList NamespaceDeclChain(BuildContextChain(Ctx));
+ DeclContextList FullNamespaceDeclChain(NamespaceDeclChain);
- // Eliminate common elements from the two DeclContext chains
+ // Eliminate common elements from the two DeclContext chains.
for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
CEnd = CurContextChain.rend();
C != CEnd && !NamespaceDeclChain.empty() &&
@@ -3264,6 +3449,21 @@ void NamespaceSpecifierSet::AddNamespace(NamespaceDecl *ND) {
NamespaceDeclChain.pop_back();
}
+ // Add an explicit leading '::' specifier if needed.
+ if (NamespaceDecl *ND =
+ NamespaceDeclChain.empty() ? NULL :
+ dyn_cast_or_null<NamespaceDecl>(NamespaceDeclChain.back())) {
+ IdentifierInfo *Name = ND->getIdentifier();
+ if (std::find(CurContextIdentifiers.begin(), CurContextIdentifiers.end(),
+ Name) != CurContextIdentifiers.end() ||
+ std::find(CurNameSpecifierIdentifiers.begin(),
+ CurNameSpecifierIdentifiers.end(),
+ Name) != CurNameSpecifierIdentifiers.end()) {
+ NamespaceDeclChain = FullNamespaceDeclChain;
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ }
+ }
+
// Build the NestedNameSpecifier from what is left of the NamespaceDeclChain
for (DeclContextList::reverse_iterator C = NamespaceDeclChain.rbegin(),
CEnd = NamespaceDeclChain.rend();
@@ -3275,6 +3475,18 @@ void NamespaceSpecifierSet::AddNamespace(NamespaceDecl *ND) {
}
}
+ // If the built NestedNameSpecifier would be replacing an existing
+ // NestedNameSpecifier, use the number of component identifiers that
+ // would need to be changed as the edit distance instead of the number
+ // of components in the built NestedNameSpecifier.
+ if (NNS && !CurNameSpecifierIdentifiers.empty()) {
+ SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers;
+ getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
+ NumSpecifiers = llvm::ComputeEditDistance(
+ llvm::ArrayRef<const IdentifierInfo*>(CurNameSpecifierIdentifiers),
+ llvm::ArrayRef<const IdentifierInfo*>(NewNameSpecifierIdentifiers));
+ }
+
isSorted = false;
Distances.insert(NumSpecifiers);
DistanceMap[NumSpecifiers].push_back(SpecifierInfo(Ctx, NNS, NumSpecifiers));
@@ -3287,13 +3499,13 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
Scope *S, CXXScopeSpec *SS,
DeclContext *MemberContext,
bool EnteringContext,
- Sema::CorrectTypoContext CTC) {
+ bool isObjCIvarLookup) {
Res.suppressDiagnostics();
Res.clear();
Res.setLookupName(Name);
if (MemberContext) {
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(MemberContext)) {
- if (CTC == Sema::CTC_ObjCIvarLookup) {
+ if (isObjCIvarLookup) {
if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(Name)) {
Res.addDecl(Ivar);
Res.resolveKind();
@@ -3334,61 +3546,11 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
/// \brief Add keywords to the consumer as possible typo corrections.
static void AddKeywordsToConsumer(Sema &SemaRef,
TypoCorrectionConsumer &Consumer,
- Scope *S, Sema::CorrectTypoContext CTC) {
- // Add context-dependent keywords.
- bool WantTypeSpecifiers = false;
- bool WantExpressionKeywords = false;
- bool WantCXXNamedCasts = false;
- bool WantRemainingKeywords = false;
- switch (CTC) {
- case Sema::CTC_Unknown:
- WantTypeSpecifiers = true;
- WantExpressionKeywords = true;
- WantCXXNamedCasts = true;
- WantRemainingKeywords = true;
-
- if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
- if (Method->getClassInterface() &&
- Method->getClassInterface()->getSuperClass())
- Consumer.addKeywordResult("super");
-
- break;
-
- case Sema::CTC_NoKeywords:
- break;
-
- case Sema::CTC_Type:
- WantTypeSpecifiers = true;
- break;
-
- case Sema::CTC_ObjCMessageReceiver:
- Consumer.addKeywordResult("super");
- // Fall through to handle message receivers like expressions.
-
- case Sema::CTC_Expression:
- if (SemaRef.getLangOptions().CPlusPlus)
- WantTypeSpecifiers = true;
- WantExpressionKeywords = true;
- // Fall through to get C++ named casts.
-
- case Sema::CTC_CXXCasts:
- WantCXXNamedCasts = true;
- break;
-
- case Sema::CTC_ObjCPropertyLookup:
- // FIXME: Add "isa"?
- break;
-
- case Sema::CTC_MemberLookup:
- if (SemaRef.getLangOptions().CPlusPlus)
- Consumer.addKeywordResult("template");
- break;
-
- case Sema::CTC_ObjCIvarLookup:
- break;
- }
+ Scope *S, CorrectionCandidateCallback &CCC) {
+ if (CCC.WantObjCSuper)
+ Consumer.addKeywordResult("super");
- if (WantTypeSpecifiers) {
+ if (CCC.WantTypeSpecifiers) {
// Add type-specifier keywords to the set of results.
const char *CTypeSpecs[] = {
"char", "const", "double", "enum", "float", "int", "long", "short",
@@ -3402,19 +3564,19 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
for (unsigned I = 0; I != NumCTypeSpecs; ++I)
Consumer.addKeywordResult(CTypeSpecs[I]);
- if (SemaRef.getLangOptions().C99)
+ if (SemaRef.getLangOpts().C99)
Consumer.addKeywordResult("restrict");
- if (SemaRef.getLangOptions().Bool || SemaRef.getLangOptions().CPlusPlus)
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus)
Consumer.addKeywordResult("bool");
- else if (SemaRef.getLangOptions().C99)
+ else if (SemaRef.getLangOpts().C99)
Consumer.addKeywordResult("_Bool");
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("class");
Consumer.addKeywordResult("typename");
Consumer.addKeywordResult("wchar_t");
- if (SemaRef.getLangOptions().CPlusPlus0x) {
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
Consumer.addKeywordResult("char16_t");
Consumer.addKeywordResult("char32_t");
Consumer.addKeywordResult("constexpr");
@@ -3423,25 +3585,25 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
}
}
- if (SemaRef.getLangOptions().GNUMode)
+ if (SemaRef.getLangOpts().GNUMode)
Consumer.addKeywordResult("typeof");
}
- if (WantCXXNamedCasts && SemaRef.getLangOptions().CPlusPlus) {
+ if (CCC.WantCXXNamedCasts && SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("const_cast");
Consumer.addKeywordResult("dynamic_cast");
Consumer.addKeywordResult("reinterpret_cast");
Consumer.addKeywordResult("static_cast");
}
- if (WantExpressionKeywords) {
+ if (CCC.WantExpressionKeywords) {
Consumer.addKeywordResult("sizeof");
- if (SemaRef.getLangOptions().Bool || SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("false");
Consumer.addKeywordResult("true");
}
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
const char *CXXExprs[] = {
"delete", "new", "operator", "throw", "typeid"
};
@@ -3453,14 +3615,14 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance())
Consumer.addKeywordResult("this");
- if (SemaRef.getLangOptions().CPlusPlus0x) {
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
Consumer.addKeywordResult("alignof");
Consumer.addKeywordResult("nullptr");
}
}
}
- if (WantRemainingKeywords) {
+ if (CCC.WantRemainingKeywords) {
if (SemaRef.getCurFunctionOrMethodDecl() || SemaRef.getCurBlock()) {
// Statements.
const char *CStmts[] = {
@@ -3469,7 +3631,7 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
for (unsigned I = 0; I != NumCStmts; ++I)
Consumer.addKeywordResult(CStmts[I]);
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("catch");
Consumer.addKeywordResult("try");
}
@@ -3485,7 +3647,7 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
Consumer.addKeywordResult("default");
}
} else {
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("namespace");
Consumer.addKeywordResult("template");
}
@@ -3501,15 +3663,21 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
}
}
- if (SemaRef.getLangOptions().CPlusPlus) {
+ if (SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("using");
- if (SemaRef.getLangOptions().CPlusPlus0x)
+ if (SemaRef.getLangOpts().CPlusPlus0x)
Consumer.addKeywordResult("static_assert");
}
}
}
+static bool isCandidateViable(CorrectionCandidateCallback &CCC,
+ TypoCorrection &Candidate) {
+ Candidate.setCallbackDistance(CCC.RankCandidate(Candidate));
+ return Candidate.getEditDistance(false) != TypoCorrection::InvalidDistance;
+}
+
/// \brief Try to "correct" a typo in the source code by finding
/// visible declarations whose names are similar to the name that was
/// present in the source code.
@@ -3524,15 +3692,16 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
/// \param SS the nested-name-specifier that precedes the name we're
/// looking for, if present.
///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
/// \param MemberContext if non-NULL, the context in which to look for
/// a member access expression.
///
/// \param EnteringContext whether we're entering the context described by
/// the nested-name-specifier SS.
///
-/// \param CTC The context in which typo correction occurs, which impacts the
-/// set of keywords permitted.
-///
/// \param OPT when non-NULL, the search for visible declarations will
/// also walk the protocols in the qualified interfaces of \p OPT.
///
@@ -3543,11 +3712,18 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
+ CorrectionCandidateCallback &CCC,
DeclContext *MemberContext,
bool EnteringContext,
- CorrectTypoContext CTC,
const ObjCObjectPointerType *OPT) {
- if (Diags.hasFatalErrorOccurred() || !getLangOptions().SpellChecking)
+ if (Diags.hasFatalErrorOccurred() || !getLangOpts().SpellChecking)
+ return TypoCorrection();
+
+ // In Microsoft mode, don't perform typo correction in a template member
+ // function dependent context because it interferes with the "lookup into
+ // dependent bases of class templates" feature.
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+ isa<CXXMethodDecl>(CurContext))
return TypoCorrection();
// We only attempt to correct typos for identifiers.
@@ -3565,12 +3741,18 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (!ActiveTemplateInstantiations.empty())
return TypoCorrection();
- NamespaceSpecifierSet Namespaces(Context, CurContext);
+ NamespaceSpecifierSet Namespaces(Context, CurContext, SS);
TypoCorrectionConsumer Consumer(*this, Typo);
+ // If a callback object considers an empty typo correction candidate to be
+ // viable, assume it does not do any actual validation of the candidates.
+ TypoCorrection EmptyCorrection;
+ bool ValidatingCallback = !isCandidateViable(CCC, EmptyCorrection);
+
// Perform name lookup to find visible, similarly-named entities.
bool IsUnqualifiedLookup = false;
+ DeclContext *QualifiedDC = MemberContext;
if (MemberContext) {
LookupVisibleDecls(MemberContext, LookupKind, Consumer);
@@ -3582,8 +3764,8 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
LookupVisibleDecls(*I, LookupKind, Consumer);
}
} else if (SS && SS->isSet()) {
- DeclContext *DC = computeDeclContext(*SS, EnteringContext);
- if (!DC)
+ QualifiedDC = computeDeclContext(*SS, EnteringContext);
+ if (!QualifiedDC)
return TypoCorrection();
// Provide a stop gap for files that are just seriously broken. Trying
@@ -3593,49 +3775,65 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return TypoCorrection();
++TyposCorrected;
- LookupVisibleDecls(DC, LookupKind, Consumer);
+ LookupVisibleDecls(QualifiedDC, LookupKind, Consumer);
} else {
IsUnqualifiedLookup = true;
UnqualifiedTyposCorrectedMap::iterator Cached
= UnqualifiedTyposCorrected.find(Typo);
+ if (Cached != UnqualifiedTyposCorrected.end()) {
+ // Add the cached value, unless it's a keyword or fails validation. In the
+ // keyword case, we'll end up adding the keyword below.
+ if (Cached->second) {
+ if (!Cached->second.isKeyword() &&
+ isCandidateViable(CCC, Cached->second))
+ Consumer.addCorrection(Cached->second);
+ } else {
+ // Only honor no-correction cache hits when a callback that will validate
+ // correction candidates is not being used.
+ if (!ValidatingCallback)
+ return TypoCorrection();
+ }
+ }
if (Cached == UnqualifiedTyposCorrected.end()) {
// Provide a stop gap for files that are just seriously broken. Trying
// to correct all typos can turn into a HUGE performance penalty, causing
// some files to take minutes to get rejected by the parser.
if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
return TypoCorrection();
+ }
+ }
- // For unqualified lookup, look through all of the names that we have
- // seen in this translation unit.
- for (IdentifierTable::iterator I = Context.Idents.begin(),
- IEnd = Context.Idents.end();
- I != IEnd; ++I)
- Consumer.FoundName(I->getKey());
-
- // Walk through identifiers in external identifier sources.
- if (IdentifierInfoLookup *External
- = Context.Idents.getExternalIdentifierLookup()) {
- llvm::OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
- do {
- StringRef Name = Iter->Next();
- if (Name.empty())
- break;
+ // Determine whether we are going to search in the various namespaces for
+ // corrections.
+ bool SearchNamespaces
+ = getLangOpts().CPlusPlus &&
+ (IsUnqualifiedLookup || (QualifiedDC && QualifiedDC->isNamespace()));
+
+ if (IsUnqualifiedLookup || SearchNamespaces) {
+ // For unqualified lookup, look through all of the names that we have
+ // seen in this translation unit.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ for (IdentifierTable::iterator I = Context.Idents.begin(),
+ IEnd = Context.Idents.end();
+ I != IEnd; ++I)
+ Consumer.FoundName(I->getKey());
- Consumer.FoundName(Name);
- } while (true);
- }
- } else {
- // Use the cached value, unless it's a keyword. In the keyword case, we'll
- // end up adding the keyword below.
- if (!Cached->second)
- return TypoCorrection();
+ // Walk through identifiers in external identifier sources.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ if (IdentifierInfoLookup *External
+ = Context.Idents.getExternalIdentifierLookup()) {
+ OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
- if (!Cached->second.isKeyword())
- Consumer.addCorrection(Cached->second);
+ Consumer.FoundName(Name);
+ } while (true);
}
}
- AddKeywordsToConsumer(*this, Consumer, S, CTC);
+ AddKeywordsToConsumer(*this, Consumer, S, CCC);
// If we haven't found anything, we're done.
if (Consumer.empty()) {
@@ -3648,7 +3846,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
// Make sure that the user typed at least 3 characters for each correction
// made. Otherwise, we don't even both looking at the results.
- unsigned ED = Consumer.getBestEditDistance();
+ unsigned ED = Consumer.getBestEditDistance(true);
if (ED > 0 && Typo->getName().size() / ED < 3) {
// If this was an unqualified lookup, note that no correction was found.
if (IsUnqualifiedLookup)
@@ -3657,8 +3855,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return TypoCorrection();
}
- // Build the NestedNameSpecifiers for the KnownNamespaces
- if (getLangOptions().CPlusPlus) {
+ // Build the NestedNameSpecifiers for the KnownNamespaces, if we're going
+ // to search those namespaces.
+ if (SearchNamespaces) {
// Load any externally-known namespaces.
if (ExternalSource && !LoadedExternalKnownNamespaces) {
SmallVector<NamespaceDecl *, 4> ExternalKnownNamespaces;
@@ -3675,8 +3874,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
Namespaces.AddNamespace(KNI->first);
}
- // Weed out any names that could not be found by name lookup.
- llvm::SmallPtrSet<IdentifierInfo*, 16> QualifiedResults;
+ // Weed out any names that could not be found by name lookup or, if a
+ // CorrectionCandidateCallback object was provided, failed validation.
+ llvm::SmallVector<TypoCorrection, 16> QualifiedResults;
LookupResult TmpRes(*this, TypoName, LookupKind);
TmpRes.suppressDiagnostics();
while (!Consumer.empty()) {
@@ -3685,22 +3885,27 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
for (TypoCorrectionConsumer::result_iterator I = DI->second->begin(),
IEnd = DI->second->end();
I != IEnd; /* Increment in loop. */) {
- // If the item already has been looked up or is a keyword, keep it
+ // If the item already has been looked up or is a keyword, keep it.
+ // If a validator callback object was given, drop the correction
+ // unless it passes validation.
if (I->second.isResolved()) {
+ TypoCorrectionConsumer::result_iterator Prev = I;
++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
continue;
}
// Perform name lookup on this name.
IdentifierInfo *Name = I->second.getCorrectionAsIdentifierInfo();
LookupPotentialTypoResult(*this, TmpRes, Name, S, SS, MemberContext,
- EnteringContext, CTC);
+ EnteringContext, CCC.IsObjCIvarLookup);
switch (TmpRes.getResultKind()) {
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
case LookupResult::FoundUnresolvedValue:
- QualifiedResults.insert(Name);
+ QualifiedResults.push_back(I->second);
// We didn't find this name in our scope, or didn't like what we found;
// ignore it.
{
@@ -3716,32 +3921,40 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return TypoCorrection();
case LookupResult::FoundOverloaded: {
+ TypoCorrectionConsumer::result_iterator Prev = I;
// Store all of the Decls for overloaded symbols
for (LookupResult::iterator TRD = TmpRes.begin(),
TRDEnd = TmpRes.end();
TRD != TRDEnd; ++TRD)
I->second.addCorrectionDecl(*TRD);
++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
break;
}
- case LookupResult::Found:
+ case LookupResult::Found: {
+ TypoCorrectionConsumer::result_iterator Prev = I;
I->second.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
break;
}
+
+ }
}
if (DI->second->empty())
Consumer.erase(DI);
- else if (!getLangOptions().CPlusPlus || QualifiedResults.empty() || !ED)
+ else if (!getLangOpts().CPlusPlus || QualifiedResults.empty() || !ED)
// If there are results in the closest possible bucket, stop
break;
// Only perform the qualified lookups for C++
- if (getLangOptions().CPlusPlus) {
+ if (SearchNamespaces) {
TmpRes.suppressDiagnostics();
- for (llvm::SmallPtrSet<IdentifierInfo*,
+ for (llvm::SmallVector<TypoCorrection,
16>::iterator QRI = QualifiedResults.begin(),
QRIEnd = QualifiedResults.end();
QRI != QRIEnd; ++QRI) {
@@ -3749,30 +3962,35 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
NIEnd = Namespaces.end();
NI != NIEnd; ++NI) {
DeclContext *Ctx = NI->DeclCtx;
- unsigned QualifiedED = ED + NI->EditDistance;
- // Stop searching once the namespaces are too far away to create
+ // FIXME: Stop searching once the namespaces are too far away to create
// acceptable corrections for this identifier (since the namespaces
- // are sorted in ascending order by edit distance)
- if (QualifiedED > Consumer.getMaxEditDistance()) break;
+ // are sorted in ascending order by edit distance).
TmpRes.clear();
- TmpRes.setLookupName(*QRI);
+ TmpRes.setLookupName(QRI->getCorrectionAsIdentifierInfo());
if (!LookupQualifiedName(TmpRes, Ctx)) continue;
+ // Any corrections added below will be validated in subsequent
+ // iterations of the main while() loop over the Consumer's contents.
switch (TmpRes.getResultKind()) {
- case LookupResult::Found:
- Consumer.addName((*QRI)->getName(), TmpRes.getAsSingle<NamedDecl>(),
- QualifiedED, NI->NameSpecifier);
+ case LookupResult::Found: {
+ TypoCorrection TC(*QRI);
+ TC.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
+ TC.setCorrectionSpecifier(NI->NameSpecifier);
+ TC.setQualifierDistance(NI->EditDistance);
+ Consumer.addCorrection(TC);
break;
+ }
case LookupResult::FoundOverloaded: {
- TypoCorrection corr(&Context.Idents.get((*QRI)->getName()), NULL,
- NI->NameSpecifier, QualifiedED);
+ TypoCorrection TC(*QRI);
+ TC.setCorrectionSpecifier(NI->NameSpecifier);
+ TC.setQualifierDistance(NI->EditDistance);
for (LookupResult::iterator TRD = TmpRes.begin(),
TRDEnd = TmpRes.end();
TRD != TRDEnd; ++TRD)
- corr.addCorrectionDecl(*TRD);
- Consumer.addCorrection(corr);
+ TC.addCorrectionDecl(*TRD);
+ Consumer.addCorrection(TC);
break;
}
case LookupResult::NotFound:
@@ -3792,32 +4010,18 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
if (Consumer.empty()) return TypoCorrection();
TypoResultsMap &BestResults = *Consumer.begin()->second;
- ED = Consumer.begin()->first;
+ ED = TypoCorrection::NormalizeEditDistance(Consumer.begin()->first);
if (ED > 0 && Typo->getName().size() / ED < 3) {
- // If this was an unqualified lookup, note that no correction was found.
- if (IsUnqualifiedLookup)
+ // If this was an unqualified lookup and we believe the callback
+ // object wouldn't have filtered out possible corrections, note
+ // that no correction was found.
+ if (IsUnqualifiedLookup && !ValidatingCallback)
(void)UnqualifiedTyposCorrected[Typo];
return TypoCorrection();
}
- // If we have multiple possible corrections, eliminate the ones where we
- // added namespace qualifiers to try to resolve the ambiguity (and to favor
- // corrections without additional namespace qualifiers)
- if (getLangOptions().CPlusPlus && BestResults.size() > 1) {
- TypoCorrectionConsumer::distance_iterator DI = Consumer.begin();
- for (TypoCorrectionConsumer::result_iterator I = DI->second->begin(),
- IEnd = DI->second->end();
- I != IEnd; /* Increment in loop. */) {
- if (I->second.getCorrectionSpecifier() != NULL) {
- TypoCorrectionConsumer::result_iterator Cur = I;
- ++I;
- DI->second->erase(Cur);
- } else ++I;
- }
- }
-
// If only a single name remains, return that result.
if (BestResults.size() == 1) {
const llvm::StringMapEntry<TypoCorrection> &Correction = *(BestResults.begin());
@@ -3833,7 +4037,12 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return Result;
}
- else if (BestResults.size() > 1 && CTC == CTC_ObjCMessageReceiver
+ else if (BestResults.size() > 1
+ // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
+ // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
+ // some instances of CTC_Unknown, while WantRemainingKeywords is true
+ // for CTC_Unknown but not for CTC_ObjCMessageReceiver.
+ && CCC.WantObjCSuper && !CCC.WantRemainingKeywords
&& BestResults["super"].isKeyword()) {
// Prefer 'super' when we're completing in a message-receiver
// context.
@@ -3849,7 +4058,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
return BestResults["super"];
}
- if (IsUnqualifiedLookup)
+ // If this was an unqualified lookup and we believe the callback object did
+ // not filter out possible corrections, note that no correction was found.
+ if (IsUnqualifiedLookup && !ValidatingCallback)
(void)UnqualifiedTyposCorrected[Typo];
return TypoCorrection();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
index 751f553..5ece8f1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -17,7 +17,9 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTMutationListener.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -99,6 +101,7 @@ static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
}
Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
FieldDeclarator &FD,
ObjCDeclSpec &ODS,
Selector GetterSel,
@@ -109,9 +112,9 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
unsigned Attributes = ODS.getPropertyAttributes();
TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
QualType T = TSI->getType();
- if ((getLangOptions().getGC() != LangOptions::NonGC &&
+ if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
- (getLangOptions().ObjCAutoRefCount &&
+ (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
Attributes |= ObjCDeclSpec::DQ_PR_weak;
@@ -133,43 +136,80 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl))
if (CDecl->IsClassExtension()) {
- Decl *Res = HandlePropertyInClassExtension(S, AtLoc,
+ Decl *Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc,
FD, GetterSel, SetterSel,
isAssign, isReadWrite,
Attributes,
+ ODS.getPropertyAttributes(),
isOverridingProperty, TSI,
MethodImplKind);
if (Res) {
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
checkARCPropertyDecl(*this, cast<ObjCPropertyDecl>(Res));
}
return Res;
}
- ObjCPropertyDecl *Res = CreatePropertyDecl(S, ClassDecl, AtLoc, FD,
+ ObjCPropertyDecl *Res = CreatePropertyDecl(S, ClassDecl, AtLoc, LParenLoc, FD,
GetterSel, SetterSel,
isAssign, isReadWrite,
- Attributes, TSI, MethodImplKind);
+ Attributes,
+ ODS.getPropertyAttributes(),
+ TSI, MethodImplKind);
if (lexicalDC)
Res->setLexicalDeclContext(lexicalDC);
// Validate the attributes on the @property.
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
checkARCPropertyDecl(*this, Res);
return Res;
}
+static ObjCPropertyDecl::PropertyAttributeKind
+makePropertyAttributesAsWritten(unsigned Attributes) {
+ unsigned attributesAsWritten = 0;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
+
+ return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
+}
+
Decl *
Sema::HandlePropertyInClassExtension(Scope *S,
- SourceLocation AtLoc, FieldDeclarator &FD,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
Selector GetterSel, Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
+ const unsigned AttributesAsWritten,
bool *isOverridingProperty,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind) {
@@ -198,7 +238,9 @@ Sema::HandlePropertyInClassExtension(Scope *S,
// FIXME. We should really be using CreatePropertyDecl for this.
ObjCPropertyDecl *PDecl =
ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(),
- PropertyId, AtLoc, T);
+ PropertyId, AtLoc, LParenLoc, T);
+ PDecl->setPropertyAttributesAsWritten(
+ makePropertyAttributesAsWritten(AttributesAsWritten));
if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
@@ -224,23 +266,40 @@ Sema::HandlePropertyInClassExtension(Scope *S,
if (!PIDecl) {
// No matching property found in the primary class. Just fall thru
// and add property to continuation class's primary class.
- ObjCPropertyDecl *PDecl =
- CreatePropertyDecl(S, CCPrimary, AtLoc,
+ ObjCPropertyDecl *PrimaryPDecl =
+ CreatePropertyDecl(S, CCPrimary, AtLoc, LParenLoc,
FD, GetterSel, SetterSel, isAssign, isReadWrite,
- Attributes, T, MethodImplKind, DC);
+ Attributes,AttributesAsWritten, T, MethodImplKind, DC);
// A case of continuation class adding a new property in the class. This
// is not what it was meant for. However, gcc supports it and so should we.
// Make sure setter/getters are declared here.
- ProcessPropertyDecl(PDecl, CCPrimary, /* redeclaredProperty = */ 0,
+ ProcessPropertyDecl(PrimaryPDecl, CCPrimary, /* redeclaredProperty = */ 0,
/* lexicalDC = */ CDecl);
- return PDecl;
+ PDecl->setGetterMethodDecl(PrimaryPDecl->getGetterMethodDecl());
+ PDecl->setSetterMethodDecl(PrimaryPDecl->getSetterMethodDecl());
+ if (ASTMutationListener *L = Context.getASTMutationListener())
+ L->AddedObjCPropertyInClassExtension(PrimaryPDecl, /*OrigProp=*/0, CDecl);
+ return PrimaryPDecl;
}
- if (PIDecl->getType().getCanonicalType()
- != PDecl->getType().getCanonicalType()) {
- Diag(AtLoc,
- diag::warn_type_mismatch_continuation_class) << PDecl->getType();
- Diag(PIDecl->getLocation(), diag::note_property_declare);
+ if (!Context.hasSameType(PIDecl->getType(), PDecl->getType())) {
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ // Relax the strict type matching for property type in continuation class.
+ // Allow property object type of continuation class to be different as long
+ // as it narrows the object type in its primary class property. Note that
+ // this conversion is safe only because the wider type is for a 'readonly'
+ // property in primary class and 'narrowed' type for a 'readwrite' property
+ // in continuation class.
+ if (!isa<ObjCObjectPointerType>(PIDecl->getType()) ||
+ !isa<ObjCObjectPointerType>(PDecl->getType()) ||
+ (!isObjCPointerConversion(PDecl->getType(), PIDecl->getType(),
+ ConvertedType, IncompatibleObjC))
+ || IncompatibleObjC) {
+ Diag(AtLoc,
+ diag::err_type_mismatch_continuation_class) << PDecl->getType();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
}
// The property 'PIDecl's readonly attribute will be over-ridden
@@ -273,7 +332,7 @@ Sema::HandlePropertyInClassExtension(Scope *S,
ContextRAII SavedContext(*this, CCPrimary);
Decl *ProtocolPtrTy =
- ActOnProperty(S, AtLoc, FD, ProtocolPropertyODS,
+ ActOnProperty(S, AtLoc, LParenLoc, FD, ProtocolPropertyODS,
PIDecl->getGetterName(),
PIDecl->getSetterName(),
isOverridingProperty,
@@ -306,18 +365,24 @@ Sema::HandlePropertyInClassExtension(Scope *S,
*isOverridingProperty = true;
// Make sure setter decl is synthesized, and added to primary class's list.
ProcessPropertyDecl(PIDecl, CCPrimary, PDecl, CDecl);
+ PDecl->setGetterMethodDecl(PIDecl->getGetterMethodDecl());
+ PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl());
+ if (ASTMutationListener *L = Context.getASTMutationListener())
+ L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl);
return 0;
}
ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
+ SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
+ const unsigned AttributesAsWritten,
TypeSourceInfo *TInfo,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC){
@@ -326,7 +391,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// Issue a warning if property is 'assign' as default and its object, which is
// gc'able conforms to NSCopying protocol
- if (getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign))
if (const ObjCObjectPointerType *ObjPtrTy =
T->getAs<ObjCObjectPointerType>()) {
@@ -343,7 +408,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
DeclContext *DC = cast<DeclContext>(CDecl);
ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
FD.D.getIdentifierLoc(),
- PropertyId, AtLoc, TInfo);
+ PropertyId, AtLoc, LParenLoc, TInfo);
if (ObjCPropertyDecl *prevDecl =
ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) {
@@ -368,35 +433,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
// selector names in anticipation of declaration of setter/getter methods.
PDecl->setGetterName(GetterSel);
PDecl->setSetterName(SetterSel);
-
- unsigned attributesAsWritten = 0;
- if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
- if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
- if (Attributes & ObjCDeclSpec::DQ_PR_getter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
- if (Attributes & ObjCDeclSpec::DQ_PR_setter)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
- if (Attributes & ObjCDeclSpec::DQ_PR_assign)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
- if (Attributes & ObjCDeclSpec::DQ_PR_retain)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
- if (Attributes & ObjCDeclSpec::DQ_PR_strong)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
- if (Attributes & ObjCDeclSpec::DQ_PR_weak)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
- if (Attributes & ObjCDeclSpec::DQ_PR_copy)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
- if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
- if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
- if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
- attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
-
PDecl->setPropertyAttributesAsWritten(
- (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten);
+ makePropertyAttributesAsWritten(AttributesAsWritten));
if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
@@ -502,6 +540,33 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
S.Diag(property->getLocation(), diag::note_property_declare);
}
+/// setImpliedPropertyAttributeForReadOnlyProperty -
+/// This routine evaludates life-time attributes for a 'readonly'
+/// property with no known lifetime of its own, using backing
+/// 'ivar's attribute, if any. If no backing 'ivar', property's
+/// life-time is assumed 'strong'.
+static void setImpliedPropertyAttributeForReadOnlyProperty(
+ ObjCPropertyDecl *property, ObjCIvarDecl *ivar) {
+ Qualifiers::ObjCLifetime propertyLifetime =
+ getImpliedARCOwnership(property->getPropertyAttributes(),
+ property->getType());
+ if (propertyLifetime != Qualifiers::OCL_None)
+ return;
+
+ if (!ivar) {
+ // if no backing ivar, make property 'strong'.
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ return;
+ }
+ // property assumes owenership of backing ivar.
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+ if (ivarLifetime == Qualifiers::OCL_Strong)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ else if (ivarLifetime == Qualifiers::OCL_Weak)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ return;
+}
/// ActOnPropertyImplDecl - This routine performs semantic checks and
/// builds the AST node for a property implementation declaration; declared
@@ -521,6 +586,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Diag(AtLoc, diag::error_missing_property_context);
return 0;
}
+ if (PropertyIvarLoc.isInvalid())
+ PropertyIvarLoc = PropertyLoc;
ObjCPropertyDecl *property = 0;
ObjCInterfaceDecl* IDecl = 0;
// Find the class or category class where this property must have
@@ -592,16 +659,26 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// @synthesize
if (!PropertyIvar)
PropertyIvar = PropertyId;
- ObjCPropertyDecl::PropertyAttributeKind kind
- = property->getPropertyAttributes();
+ // Check that this is a previously declared 'ivar' in 'IDecl' interface
+ ObjCInterfaceDecl *ClassDeclared;
+ Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
QualType PropType = property->getType();
-
QualType PropertyIvarType = PropType.getNonReferenceType();
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (property->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_readonly) &&
+ PropertyIvarType->isObjCRetainableType()) {
+ setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar);
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind kind
+ = property->getPropertyAttributes();
// Add GC __weak to the ivar type if the property is weak.
if ((kind & ObjCPropertyDecl::OBJC_PR_weak) &&
- getLangOptions().getGC() != LangOptions::NonGC) {
- assert(!getLangOptions().ObjCAutoRefCount);
+ getLangOpts().getGC() != LangOptions::NonGC) {
+ assert(!getLangOpts().ObjCAutoRefCount);
if (PropertyIvarType.isObjCGCStrong()) {
Diag(PropertyLoc, diag::err_gc_weak_property_strong_type);
Diag(property->getLocation(), diag::note_property_declare);
@@ -611,13 +688,10 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
}
- // Check that this is a previously declared 'ivar' in 'IDecl' interface
- ObjCInterfaceDecl *ClassDeclared;
- Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
if (!Ivar) {
// In ARC, give the ivar a lifetime qualifier based on the
// property attributes.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
!PropertyIvarType.getObjCLifetime() &&
PropertyIvarType->isObjCRetainableType()) {
@@ -632,13 +706,21 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Qualifiers::ObjCLifetime lifetime =
getImpliedARCOwnership(kind, PropertyIvarType);
assert(lifetime && "no lifetime for property?");
-
- if (lifetime == Qualifiers::OCL_Weak &&
- !getLangOptions().ObjCRuntimeHasWeak) {
- Diag(PropertyLoc, diag::err_arc_weak_no_runtime);
- Diag(property->getLocation(), diag::note_property_declare);
+ if (lifetime == Qualifiers::OCL_Weak) {
+ bool err = false;
+ if (const ObjCObjectPointerType *ObjT =
+ PropertyIvarType->getAs<ObjCObjectPointerType>())
+ if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) {
+ Diag(PropertyLoc, diag::err_arc_weak_unavailable_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ err = true;
+ }
+ if (!err && !getLangOpts().ObjCRuntimeHasWeak) {
+ Diag(PropertyLoc, diag::err_arc_weak_no_runtime);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
}
-
+
Qualifiers qs;
qs.addObjCLifetime(lifetime);
PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
@@ -646,27 +728,27 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
if (kind & ObjCPropertyDecl::OBJC_PR_weak &&
- !getLangOptions().ObjCAutoRefCount &&
- getLangOptions().getGC() == LangOptions::NonGC) {
+ !getLangOpts().ObjCAutoRefCount &&
+ getLangOpts().getGC() == LangOptions::NonGC) {
Diag(PropertyLoc, diag::error_synthesize_weak_non_arc_or_gc);
Diag(property->getLocation(), diag::note_property_declare);
}
Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
- PropertyLoc, PropertyLoc, PropertyIvar,
+ PropertyIvarLoc,PropertyIvarLoc, PropertyIvar,
PropertyIvarType, /*Dinfo=*/0,
ObjCIvarDecl::Private,
(Expr *)0, true);
ClassImpDecl->addDecl(Ivar);
- IDecl->makeDeclVisibleInContext(Ivar, false);
+ IDecl->makeDeclVisibleInContext(Ivar);
property->setPropertyIvarDecl(Ivar);
- if (!getLangOptions().ObjCNonFragileABI)
+ if (!getLangOpts().ObjCNonFragileABI)
Diag(PropertyLoc, diag::error_missing_property_ivar_decl) << PropertyId;
// Note! I deliberately want it to fall thru so, we have a
// a property implementation and to avoid future warnings.
- } else if (getLangOptions().ObjCNonFragileABI &&
- ClassDeclared != IDecl) {
+ } else if (getLangOpts().ObjCNonFragileABI &&
+ !declaresSameEntity(ClassDeclared, IDecl)) {
Diag(PropertyLoc, diag::error_ivar_in_superclass_use)
<< property->getDeclName() << Ivar->getDeclName()
<< ClassDeclared->getDeclName();
@@ -686,10 +768,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyIvarType->getAs<ObjCObjectPointerType>(),
IvarType->getAs<ObjCObjectPointerType>());
else {
- SourceLocation Loc = PropertyIvarLoc;
- if (Loc.isInvalid())
- Loc = PropertyLoc;
- compat = (CheckAssignmentConstraints(Loc, PropertyIvarType, IvarType)
+ compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
+ IvarType)
== Compatible);
}
if (!compat) {
@@ -716,7 +796,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
// __weak is explicit. So it works on Canonical type.
if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
- getLangOptions().getGC() != LangOptions::NonGC)) {
+ getLangOpts().getGC() != LangOptions::NonGC)) {
Diag(PropertyLoc, diag::error_weak_property)
<< property->getDeclName() << Ivar->getDeclName();
Diag(Ivar->getLocation(), diag::note_ivar_decl);
@@ -725,13 +805,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Fall thru - see previous comment
if ((property->getType()->isObjCObjectPointerType() ||
PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
- getLangOptions().getGC() != LangOptions::NonGC) {
+ getLangOpts().getGC() != LangOptions::NonGC) {
Diag(PropertyLoc, diag::error_strong_property)
<< property->getDeclName() << Ivar->getDeclName();
// Fall thru - see previous comment
}
}
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
@@ -747,14 +827,14 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Ivar, PropertyIvarLoc);
if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) {
getterMethod->createImplicitParams(Context, IDecl);
- if (getLangOptions().CPlusPlus && Synthesize &&
+ if (getLangOpts().CPlusPlus && Synthesize &&
Ivar->getType()->isRecordType()) {
// For Objective-C++, need to synthesize the AST for the IVAR object to be
// returned by the getter as it must conform to C++'s copy-return rules.
// FIXME. Eventually we want to do this for Objective-C as well.
ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl, SelfDecl->getType(),
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
VK_RValue, SourceLocation());
Expr *IvarRefExpr =
new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
@@ -782,22 +862,20 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
setterMethod->createImplicitParams(Context, IDecl);
- if (getLangOptions().CPlusPlus && Synthesize
+ if (getLangOpts().CPlusPlus && Synthesize
&& Ivar->getType()->isRecordType()) {
// FIXME. Eventually we want to do this for Objective-C as well.
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl, SelfDecl->getType(),
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
VK_RValue, SourceLocation());
Expr *lhs =
new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
SelfExpr, true, true);
ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
ParmVarDecl *Param = (*P);
- QualType T = Param->getType();
- if (T->isReferenceType())
- T = T->getAs<ReferenceType>()->getPointeeType();
- Expr *rhs = new (Context) DeclRefExpr(Param, T,
+ QualType T = Param->getType().getNonReferenceType();
+ Expr *rhs = new (Context) DeclRefExpr(Param, false, T,
VK_LValue, SourceLocation());
ExprResult Res = BuildBinOp(S, lhs->getLocEnd(),
BO_Assign, lhs, rhs);
@@ -808,9 +886,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
dyn_cast_or_null<CXXOperatorCallExpr>(callExpr))
if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee())
if (!FuncDecl->isTrivial())
- Diag(PropertyLoc,
- diag::warn_atomic_property_nontrivial_assign_op)
+ if (property->getType()->isReferenceType()) {
+ Diag(PropertyLoc,
+ diag::err_atomic_property_nontrivial_assign_op)
<< property->getType();
+ Diag(FuncDecl->getLocStart(),
+ diag::note_callee_decl) << FuncDecl;
+ }
}
PIDecl->setSetterCXXAssignment(Res.takeAs<Expr>());
}
@@ -833,8 +915,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
return 0;
}
IC->addPropertyImplementation(PIDecl);
- if (getLangOptions().ObjCDefaultSynthProperties &&
- getLangOptions().ObjCNonFragileABI2) {
+ if (getLangOpts().ObjCDefaultSynthProperties &&
+ getLangOpts().ObjCNonFragileABI2 &&
+ !IDecl->isObjCRequiresPropertyDefs()) {
// Diagnose if an ivar was lazily synthesdized due to a previous
// use and if 1) property is @dynamic or 2) property is synthesized
// but it requires an ivar of different name.
@@ -848,7 +931,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
}
// Issue diagnostics only if Ivar belongs to current class.
if (Ivar && Ivar->getSynthesize() &&
- IC->getClassInterface() == ClassDeclared) {
+ declaresSameEntity(IC->getClassInterface(), ClassDeclared)) {
Diag(Ivar->getLocation(), diag::err_undeclared_var_use)
<< PropertyId;
Ivar->setInvalidDecl();
@@ -948,7 +1031,8 @@ bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
ObjCMethodDecl *GetterMethod,
SourceLocation Loc) {
if (GetterMethod &&
- GetterMethod->getResultType() != property->getType()) {
+ !Context.hasSameType(GetterMethod->getResultType().getNonReferenceType(),
+ property->getType().getNonReferenceType())) {
AssignConvertType result = Incompatible;
if (property->getType()->isObjCObjectPointerType())
result = CheckAssignmentConstraints(Loc, GetterMethod->getResultType(),
@@ -1201,7 +1285,8 @@ static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl,
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
ObjCPropertyDecl *Prop = (*P);
- PropMap[Prop->getIdentifier()] = Prop;
+ if (!PropMap.count(Prop->getIdentifier()))
+ PropMap[Prop->getIdentifier()] = Prop;
}
// scan through protocol's protocols.
for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
@@ -1265,7 +1350,7 @@ ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
static IdentifierInfo * getDefaultSynthIvarName(ObjCPropertyDecl *Prop,
ASTContext &Ctx) {
- llvm::SmallString<128> ivarName;
+ SmallString<128> ivarName;
{
llvm::raw_svector_ostream os(ivarName);
os << '_' << Prop->getIdentifier()->getName();
@@ -1305,7 +1390,13 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
if (IMPDecl->getInstanceMethod(Prop->getSetterName()))
continue;
}
-
+ if (isa<ObjCProtocolDecl>(Prop->getDeclContext())) {
+ // We won't auto-synthesize properties declared in protocols.
+ Diag(IMPDecl->getLocation(),
+ diag::warn_auto_synthesizing_protocol_property);
+ Diag(Prop->getLocation(), diag::note_property_declare);
+ continue;
+ }
// We use invalid SourceLocations for the synthesized ivars since they
// aren't really synthesized at a particular location; they just exist.
@@ -1326,7 +1417,8 @@ void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
if (!IC)
return;
if (ObjCInterfaceDecl* IDecl = IC->getClassInterface())
- DefaultSynthesizeProperties(S, IC, IDecl);
+ if (!IDecl->isObjCRequiresPropertyDefs())
+ DefaultSynthesizeProperties(S, IC, IDecl);
}
void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
@@ -1363,6 +1455,11 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
<< Prop->getDeclName() << Prop->getGetterName();
Diag(Prop->getLocation(),
diag::note_property_declare);
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+ Diag(RID->getLocation(), diag::note_suppressed_class_declare);
+
}
if (!Prop->isReadOnly() && !InsMap.count(Prop->getSetterName())) {
@@ -1373,6 +1470,10 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
<< Prop->getDeclName() << Prop->getSetterName();
Diag(Prop->getLocation(),
diag::note_property_declare);
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+ Diag(RID->getLocation(), diag::note_suppressed_class_declare);
}
}
}
@@ -1381,7 +1482,7 @@ void
Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl) {
// Rules apply in non-GC mode only
- if (getLangOptions().getGC() != LangOptions::NonGC)
+ if (getLangOpts().getGC() != LangOptions::NonGC)
return;
for (ObjCContainerDecl::prop_iterator I = IDecl->prop_begin(),
E = IDecl->prop_end();
@@ -1433,7 +1534,34 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
Diag(MethodLoc, diag::warn_atomic_property_rule)
<< Property->getIdentifier() << (GetterMethod != 0)
<< (SetterMethod != 0);
- Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
+ // fixit stuff.
+ if (!AttributesAsWritten) {
+ if (Property->getLParenLoc().isValid()) {
+ // @property () ... case.
+ SourceRange PropSourceRange(Property->getAtLoc(),
+ Property->getLParenLoc());
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic");
+ }
+ else {
+ //@property id etc.
+ SourceLocation endLoc =
+ Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ endLoc = endLoc.getLocWithOffset(-1);
+ SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic) ");
+ }
+ }
+ else if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ // @property () ... case.
+ SourceLocation endLoc = Property->getLParenLoc();
+ SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic, ");
+ }
+ else
+ Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
Diag(Property->getLocation(), diag::note_property_declare);
}
}
@@ -1441,7 +1569,7 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) {
- if (getLangOptions().getGC() == LangOptions::GCOnly)
+ if (getLangOpts().getGC() == LangOptions::GCOnly)
return;
for (ObjCImplementationDecl::propimpl_iterator
@@ -1459,7 +1587,7 @@ void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D
ObjCMethodFamily family = method->getMethodFamily();
if (family == OMF_alloc || family == OMF_copy ||
family == OMF_mutableCopy || family == OMF_new) {
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
Diag(PID->getLocation(), diag::err_ownin_getter_rule);
else
Diag(PID->getLocation(), diag::warn_owning_getter_rule);
@@ -1510,7 +1638,8 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
Diag(SetterMethod->getLocation(), diag::err_setter_type_void);
if (SetterMethod->param_size() != 1 ||
!Context.hasSameUnqualifiedType(
- (*SetterMethod->param_begin())->getType(), property->getType())) {
+ (*SetterMethod->param_begin())->getType().getNonReferenceType(),
+ property->getType().getNonReferenceType())) {
Diag(property->getLocation(),
diag::warn_accessor_property_type_mismatch)
<< property->getDeclName()
@@ -1636,6 +1765,21 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl);
QualType PropertyTy = PropertyDecl->getType();
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ PropertyTy->isObjCRetainableType()) {
+ // 'readonly' property with no obvious lifetime.
+ // its life time will be determined by its backing ivar.
+ unsigned rel = (ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak |
+ ObjCDeclSpec::DQ_PR_assign);
+ if ((Attributes & rel) == 0)
+ return;
+ }
+
// readonly and readwrite/assign/retain/copy conflict.
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
(Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
@@ -1669,6 +1813,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
+ PropertyDecl->setInvalidDecl();
}
// Check for more than one of { assign, copy, retain }.
@@ -1688,7 +1833,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
<< "assign" << "strong";
Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
}
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
(Attributes & ObjCDeclSpec::DQ_PR_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "assign" << "weak";
@@ -1710,7 +1855,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
<< "unsafe_unretained" << "strong";
Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
}
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
(Attributes & ObjCDeclSpec::DQ_PR_weak)) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "unsafe_unretained" << "weak";
@@ -1759,20 +1904,28 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong |
ObjCDeclSpec::DQ_PR_weak)) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
PropertyTy->isObjCObjectPointerType()) {
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
// With arc, @property definitions should default to (strong) when
- // not specified
+ // not specified; including when property is 'readonly'.
PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
- else {
+ else if (!(Attributes & ObjCDeclSpec::DQ_PR_readonly)) {
+ bool isAnyClassTy =
+ (PropertyTy->isObjCClassType() ||
+ PropertyTy->isObjCQualifiedClassType());
+ // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
+ // issue any warning.
+ if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
+ ;
+ else {
// Skip this warning in gc-only mode.
- if (getLangOptions().getGC() != LangOptions::GCOnly)
+ if (getLangOpts().getGC() != LangOptions::GCOnly)
Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
// If non-gc code warn that this is likely inappropriate.
- if (getLangOptions().getGC() == LangOptions::NonGC)
+ if (getLangOpts().getGC() == LangOptions::NonGC)
Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+ }
}
// FIXME: Implement warning dependent on NSCopying being
@@ -1783,13 +1936,18 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
&&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
- && getLangOptions().getGC() == LangOptions::GCOnly
+ && getLangOpts().getGC() == LangOptions::GCOnly
&& PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
- else if (getLangOptions().ObjCAutoRefCount &&
+ else if (getLangOpts().ObjCAutoRefCount &&
(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
!(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
!(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
PropertyTy->isBlockPointerType())
Diag(Loc, diag::warn_objc_property_retain_of_block);
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_setter))
+ Diag(Loc, diag::warn_objc_readonly_property_has_setter);
+
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
index b0dd5e2..284c8de 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -40,7 +40,7 @@ static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, bool HadMultipleCandidates,
SourceLocation Loc = SourceLocation(),
const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
- DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, Fn->getType(),
+ DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
VK_LValue, Loc, LocInfo);
if (HadMultipleCandidates)
DRE->setHadMultipleCandidates(true);
@@ -258,6 +258,172 @@ isPointerConversionToVoidPointer(ASTContext& Context) const {
return false;
}
+/// Skip any implicit casts which could be either part of a narrowing conversion
+/// or after one in an implicit conversion.
+static const Expr *IgnoreNarrowingConversion(const Expr *Converted) {
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Converted)) {
+ switch (ICE->getCastKind()) {
+ case CK_NoOp:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ Converted = ICE->getSubExpr();
+ continue;
+
+ default:
+ return Converted;
+ }
+ }
+
+ return Converted;
+}
+
+/// Check if this standard conversion sequence represents a narrowing
+/// conversion, according to C++11 [dcl.init.list]p7.
+///
+/// \param Ctx The AST context.
+/// \param Converted The result of applying this standard conversion sequence.
+/// \param ConstantValue If this is an NK_Constant_Narrowing conversion, the
+/// value of the expression prior to the narrowing conversion.
+/// \param ConstantType If this is an NK_Constant_Narrowing conversion, the
+/// type of the expression prior to the narrowing conversion.
+NarrowingKind
+StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
+ const Expr *Converted,
+ APValue &ConstantValue,
+ QualType &ConstantType) const {
+ assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
+
+ // C++11 [dcl.init.list]p7:
+ // A narrowing conversion is an implicit conversion ...
+ QualType FromType = getToType(0);
+ QualType ToType = getToType(1);
+ switch (Second) {
+ // -- from a floating-point type to an integer type, or
+ //
+ // -- from an integer type or unscoped enumeration type to a floating-point
+ // type, except where the source is a constant expression and the actual
+ // value after conversion will fit into the target type and will produce
+ // the original value when converted back to the original type, or
+ case ICK_Floating_Integral:
+ if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
+ return NK_Type_Narrowing;
+ } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
+ llvm::APSInt IntConstantValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer &&
+ Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ // Convert the integer to the floating type.
+ llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
+ Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
+ llvm::APFloat::rmNearestTiesToEven);
+ // And back.
+ llvm::APSInt ConvertedValue = IntConstantValue;
+ bool ignored;
+ Result.convertToInteger(ConvertedValue,
+ llvm::APFloat::rmTowardZero, &ignored);
+ // If the resulting value is different, this was a narrowing conversion.
+ if (IntConstantValue != ConvertedValue) {
+ ConstantValue = APValue(IntConstantValue);
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // Variables are always narrowings.
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from long double to double or float, or from double to float, except
+ // where the source is a constant expression and the actual value after
+ // conversion is within the range of values that can be represented (even
+ // if it cannot be represented exactly), or
+ case ICK_Floating_Conversion:
+ if (FromType->isRealFloatingType() && ToType->isRealFloatingType() &&
+ Ctx.getFloatingTypeOrder(FromType, ToType) == 1) {
+ // FromType is larger than ToType.
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
+ // Constant!
+ assert(ConstantValue.isFloat());
+ llvm::APFloat FloatVal = ConstantValue.getFloat();
+ // Convert the source value into the target type.
+ bool ignored;
+ llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
+ Ctx.getFloatTypeSemantics(ToType),
+ llvm::APFloat::rmNearestTiesToEven, &ignored);
+ // If there was no overflow, the source value is within the range of
+ // values that can be represented.
+ if (ConvertStatus & llvm::APFloat::opOverflow) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from an integer type or unscoped enumeration type to an integer type
+ // that cannot represent all the values of the original type, except where
+ // the source is a constant expression and the actual value after
+ // conversion will fit into the target type and will produce the original
+ // value when converted back to the original type.
+ case ICK_Boolean_Conversion: // Bools are integers too.
+ if (!FromType->isIntegralOrUnscopedEnumerationType()) {
+ // Boolean conversions can be from pointers and pointers to members
+ // [conv.bool], and those aren't considered narrowing conversions.
+ return NK_Not_Narrowing;
+ } // Otherwise, fall through to the integral case.
+ case ICK_Integral_Conversion: {
+ assert(FromType->isIntegralOrUnscopedEnumerationType());
+ assert(ToType->isIntegralOrUnscopedEnumerationType());
+ const bool FromSigned = FromType->isSignedIntegerOrEnumerationType();
+ const unsigned FromWidth = Ctx.getIntWidth(FromType);
+ const bool ToSigned = ToType->isSignedIntegerOrEnumerationType();
+ const unsigned ToWidth = Ctx.getIntWidth(ToType);
+
+ if (FromWidth > ToWidth ||
+ (FromWidth == ToWidth && FromSigned != ToSigned)) {
+ // Not all values of FromType can be represented in ToType.
+ llvm::APSInt InitializerValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
+ ConstantValue = APValue(InitializerValue);
+
+ // Add a bit to the InitializerValue so we don't have to worry about
+ // signed vs. unsigned comparisons.
+ InitializerValue = InitializerValue.extend(
+ InitializerValue.getBitWidth() + 1);
+ // Convert the initializer to and from the target width and signed-ness.
+ llvm::APSInt ConvertedValue = InitializerValue;
+ ConvertedValue = ConvertedValue.trunc(ToWidth);
+ ConvertedValue.setIsSigned(ToSigned);
+ ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
+ ConvertedValue.setIsSigned(InitializerValue.isSigned());
+ // If the result is different, this was a narrowing conversion.
+ if (ConvertedValue != InitializerValue) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // Variables are always narrowings.
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+ }
+
+ default:
+ // Other kinds of conversions are not narrowings.
+ return NK_Not_Narrowing;
+ }
+}
+
/// DebugPrint - Print this standard conversion sequence to standard
/// error. Useful for debugging overloading issues.
void StandardConversionSequence::DebugPrint() const {
@@ -305,7 +471,10 @@ void UserDefinedConversionSequence::DebugPrint() const {
Before.DebugPrint();
OS << " -> ";
}
- OS << '\'' << *ConversionFunction << '\'';
+ if (ConversionFunction)
+ OS << '\'' << *ConversionFunction << '\'';
+ else
+ OS << "aggregate initialization";
if (After.First || After.Second || After.Third) {
OS << " -> ";
After.DebugPrint();
@@ -538,10 +707,85 @@ OverloadCandidate::DeductionFailureInfo::getSecondArg() {
}
void OverloadCandidateSet::clear() {
- inherited::clear();
+ for (iterator i = begin(), e = end(); i != e; ++i)
+ for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
+ i->Conversions[ii].~ImplicitConversionSequence();
+ NumInlineSequences = 0;
+ Candidates.clear();
Functions.clear();
}
+namespace {
+ class UnbridgedCastsSet {
+ struct Entry {
+ Expr **Addr;
+ Expr *Saved;
+ };
+ SmallVector<Entry, 2> Entries;
+
+ public:
+ void save(Sema &S, Expr *&E) {
+ assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ Entry entry = { &E, E };
+ Entries.push_back(entry);
+ E = S.stripARCUnbridgedCast(E);
+ }
+
+ void restore() {
+ for (SmallVectorImpl<Entry>::iterator
+ i = Entries.begin(), e = Entries.end(); i != e; ++i)
+ *i->Addr = i->Saved;
+ }
+ };
+}
+
+/// checkPlaceholderForOverload - Do any interesting placeholder-like
+/// preprocessing on the given expression.
+///
+/// \param unbridgedCasts a collection to which to add unbridged casts;
+/// without this, they will be immediately diagnosed as errors
+///
+/// Return true on unrecoverable error.
+static bool checkPlaceholderForOverload(Sema &S, Expr *&E,
+ UnbridgedCastsSet *unbridgedCasts = 0) {
+ if (const BuiltinType *placeholder = E->getType()->getAsPlaceholderType()) {
+ // We can't handle overloaded expressions here because overload
+ // resolution might reasonably tweak them.
+ if (placeholder->getKind() == BuiltinType::Overload) return false;
+
+ // If the context potentially accepts unbridged ARC casts, strip
+ // the unbridged cast and add it to the collection for later restoration.
+ if (placeholder->getKind() == BuiltinType::ARCUnbridgedCast &&
+ unbridgedCasts) {
+ unbridgedCasts->save(S, E);
+ return false;
+ }
+
+ // Go ahead and check everything else.
+ ExprResult result = S.CheckPlaceholderExpr(E);
+ if (result.isInvalid())
+ return true;
+
+ E = result.take();
+ return false;
+ }
+
+ // Nothing to do.
+ return false;
+}
+
+/// checkArgPlaceholdersForOverload - Check a set of call operands for
+/// placeholders.
+static bool checkArgPlaceholdersForOverload(Sema &S, Expr **args,
+ unsigned numArgs,
+ UnbridgedCastsSet &unbridged) {
+ for (unsigned i = 0; i != numArgs; ++i)
+ if (checkPlaceholderForOverload(S, args[i], &unbridged))
+ return true;
+
+ return false;
+}
+
// IsOverload - Determine whether the given New declaration is an
// overload of the declarations in Old. This routine returns false if
// New and Old cannot be overloaded, e.g., if New has the same
@@ -750,6 +994,86 @@ bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
return FD->isUnavailable() && !cast<Decl>(CurContext)->isUnavailable();
}
+/// \brief Tries a user-defined conversion from From to ToType.
+///
+/// Produces an implicit conversion sequence for when a standard conversion
+/// is not an option. See TryImplicitConversion for more information.
+static ImplicitConversionSequence
+TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ ImplicitConversionSequence ICS;
+
+ if (SuppressUserConversions) {
+ // We're not in the case above, so there is no conversion that
+ // we can perform.
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ return ICS;
+ }
+
+ // Attempt user-defined conversion.
+ OverloadCandidateSet Conversions(From->getExprLoc());
+ OverloadingResult UserDefResult
+ = IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, Conversions,
+ AllowExplicit);
+
+ if (UserDefResult == OR_Success) {
+ ICS.setUserDefined();
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
+ QualType FromCanon
+ = S.Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon
+ = S.Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (Constructor->isCopyConstructor() &&
+ (FromCanon == ToCanon || S.IsDerivedFrom(FromCanon, ToCanon))) {
+ // Turn this into a "standard" conversion sequence, so that it
+ // gets ranked with standard conversion sequences.
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(From->getType());
+ ICS.Standard.setAllToTypes(ToType);
+ ICS.Standard.CopyConstructor = Constructor;
+ if (ToCanon != FromCanon)
+ ICS.Standard.Second = ICK_Derived_To_Base;
+ }
+ }
+
+ // C++ [over.best.ics]p4:
+ // However, when considering the argument of a user-defined
+ // conversion function that is a candidate by 13.3.1.3 when
+ // invoked for the copying of the temporary in the second step
+ // of a class copy-initialization, or by 13.3.1.4, 13.3.1.5, or
+ // 13.3.1.6 in all cases, only standard conversion sequences and
+ // ellipsis conversion sequences are allowed.
+ if (SuppressUserConversions && ICS.isUserDefined()) {
+ ICS.setBad(BadConversionSequence::suppressed_user, From, ToType);
+ }
+ } else if (UserDefResult == OR_Ambiguous && !SuppressUserConversions) {
+ ICS.setAmbiguous();
+ ICS.Ambiguous.setFromType(From->getType());
+ ICS.Ambiguous.setToType(ToType);
+ for (OverloadCandidateSet::iterator Cand = Conversions.begin();
+ Cand != Conversions.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ } else {
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ }
+
+ return ICS;
+}
+
/// TryImplicitConversion - Attempt to perform an implicit conversion
/// from the given expression (Expr) to the given type (ToType). This
/// function returns an implicit conversion sequence that can be used
@@ -791,7 +1115,7 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
return ICS;
}
- if (!S.getLangOptions().CPlusPlus) {
+ if (!S.getLangOpts().CPlusPlus) {
ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
return ICS;
}
@@ -825,71 +1149,9 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
return ICS;
}
- if (SuppressUserConversions) {
- // We're not in the case above, so there is no conversion that
- // we can perform.
- ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
- return ICS;
- }
-
- // Attempt user-defined conversion.
- OverloadCandidateSet Conversions(From->getExprLoc());
- OverloadingResult UserDefResult
- = IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, Conversions,
- AllowExplicit);
-
- if (UserDefResult == OR_Success) {
- ICS.setUserDefined();
- // C++ [over.ics.user]p4:
- // A conversion of an expression of class type to the same class
- // type is given Exact Match rank, and a conversion of an
- // expression of class type to a base class of that type is
- // given Conversion rank, in spite of the fact that a copy
- // constructor (i.e., a user-defined conversion function) is
- // called for those cases.
- if (CXXConstructorDecl *Constructor
- = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
- QualType FromCanon
- = S.Context.getCanonicalType(From->getType().getUnqualifiedType());
- QualType ToCanon
- = S.Context.getCanonicalType(ToType).getUnqualifiedType();
- if (Constructor->isCopyConstructor() &&
- (FromCanon == ToCanon || S.IsDerivedFrom(FromCanon, ToCanon))) {
- // Turn this into a "standard" conversion sequence, so that it
- // gets ranked with standard conversion sequences.
- ICS.setStandard();
- ICS.Standard.setAsIdentityConversion();
- ICS.Standard.setFromType(From->getType());
- ICS.Standard.setAllToTypes(ToType);
- ICS.Standard.CopyConstructor = Constructor;
- if (ToCanon != FromCanon)
- ICS.Standard.Second = ICK_Derived_To_Base;
- }
- }
-
- // C++ [over.best.ics]p4:
- // However, when considering the argument of a user-defined
- // conversion function that is a candidate by 13.3.1.3 when
- // invoked for the copying of the temporary in the second step
- // of a class copy-initialization, or by 13.3.1.4, 13.3.1.5, or
- // 13.3.1.6 in all cases, only standard conversion sequences and
- // ellipsis conversion sequences are allowed.
- if (SuppressUserConversions && ICS.isUserDefined()) {
- ICS.setBad(BadConversionSequence::suppressed_user, From, ToType);
- }
- } else if (UserDefResult == OR_Ambiguous && !SuppressUserConversions) {
- ICS.setAmbiguous();
- ICS.Ambiguous.setFromType(From->getType());
- ICS.Ambiguous.setToType(ToType);
- for (OverloadCandidateSet::iterator Cand = Conversions.begin();
- Cand != Conversions.end(); ++Cand)
- if (Cand->Viable)
- ICS.Ambiguous.addConversion(Cand->Function);
- } else {
- ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
- }
-
- return ICS;
+ return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ AllowExplicit, InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion);
}
ImplicitConversionSequence
@@ -912,21 +1174,21 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
/// explicit user-defined conversions are permitted.
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
- AssignmentAction Action, bool AllowExplicit,
- bool Diagnose) {
+ AssignmentAction Action, bool AllowExplicit) {
ImplicitConversionSequence ICS;
- return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS,
- Diagnose);
+ return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS);
}
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action, bool AllowExplicit,
- ImplicitConversionSequence& ICS,
- bool Diagnose) {
+ ImplicitConversionSequence& ICS) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
// Objective-C ARC: Determine whether we will allow the writeback conversion.
bool AllowObjCWritebackConversion
- = getLangOptions().ObjCAutoRefCount &&
+ = getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
ICS = clang::TryImplicitConversion(*this, From, ToType,
@@ -935,8 +1197,6 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
AllowObjCWritebackConversion);
- if (!Diagnose && ICS.isFailure())
- return ExprError();
return PerformImplicitConversion(From, ToType, ICS, Action);
}
@@ -1024,7 +1284,7 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
// same size
if (ToType->isVectorType() && FromType->isVectorType()) {
if (Context.areCompatibleVectorTypes(FromType, ToType) ||
- (Context.getLangOptions().LaxVectorConversions &&
+ (Context.getLangOpts().LaxVectorConversions &&
(Context.getTypeSize(FromType) == Context.getTypeSize(ToType)))) {
ICK = ICK_Vector_Conversion;
return true;
@@ -1034,6 +1294,11 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
return false;
}
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle);
+
/// IsStandardConversion - Determines whether there is a standard
/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
/// expression From to the type ToType. Standard conversion sequences
@@ -1059,7 +1324,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// There are no standard conversions for class types in C++, so
// abort early. When overloading in C, however, we do permit
if (FromType->isRecordType() || ToType->isRecordType()) {
- if (S.getLangOptions().CPlusPlus)
+ if (S.getLangOpts().CPlusPlus)
return false;
// When we're overloading in C, we allow, as standard conversions,
@@ -1129,6 +1394,12 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
SCS.First = ICK_Lvalue_To_Rvalue;
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
+ FromType = Atomic->getValueType();
+
// If T is a non-class type, the type of the rvalue is the
// cv-unqualified version of T. Otherwise, the type of the rvalue
// is T (C++ 4.1p1). C++ can't get here with class types; in C, we
@@ -1247,7 +1518,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
} else if (IsVectorConversion(S.Context, FromType, ToType, SecondICK)) {
SCS.Second = SecondICK;
FromType = ToType.getUnqualifiedType();
- } else if (!S.getLangOptions().CPlusPlus &&
+ } else if (!S.getLangOpts().CPlusPlus &&
S.Context.typesAreCompatible(ToType, FromType)) {
// Compatible conversions (Clang extension for C function overloading)
SCS.Second = ICK_Compatible_Conversion;
@@ -1260,6 +1531,11 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS, CStyle)) {
SCS.Second = ICK_TransparentUnionConversion;
FromType = ToType;
+ } else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS,
+ CStyle)) {
+ // tryAtomicConversion has updated the standard conversion sequence
+ // appropriately.
+ return true;
} else {
// No second conversion required.
SCS.Second = ICK_Identity;
@@ -1480,7 +1756,7 @@ bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
// C99 6.3.1.5p1:
// When a float is promoted to double or long double, or a
// double is promoted to long double [...].
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
(FromBuiltin->getKind() == BuiltinType::Float ||
FromBuiltin->getKind() == BuiltinType::Double) &&
(ToBuiltin->getKind() == BuiltinType::LongDouble))
@@ -1646,7 +1922,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
// , including objective-c pointers.
QualType ToPointeeType = ToTypePtr->getPointeeType();
if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
- !getLangOptions().ObjCAutoRefCount) {
+ !getLangOpts().ObjCAutoRefCount) {
ConvertedType = BuildSimilarlyQualifiedPointerType(
FromType->getAs<ObjCObjectPointerType>(),
ToPointeeType,
@@ -1677,7 +1953,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
}
// MSVC allows implicit function to void* type conversion.
- if (getLangOptions().MicrosoftExt && FromPointeeType->isFunctionType() &&
+ if (getLangOpts().MicrosoftExt && FromPointeeType->isFunctionType() &&
ToPointeeType->isVoidType()) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
@@ -1687,7 +1963,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
// When we're overloading in C, we allow a special kind of pointer
// conversion for compatible-but-not-identical pointee types.
- if (!getLangOptions().CPlusPlus &&
+ if (!getLangOpts().CPlusPlus &&
Context.typesAreCompatible(FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
@@ -1708,7 +1984,7 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
//
// Note that we do not check for ambiguity or inaccessibility
// here. That is handled by CheckPointerConversion.
- if (getLangOptions().CPlusPlus &&
+ if (getLangOpts().CPlusPlus &&
FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
!RequireCompleteType(From->getLocStart(), FromPointeeType, PDiag()) &&
@@ -1750,7 +2026,7 @@ static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
- if (!getLangOptions().ObjC1)
+ if (!getLangOpts().ObjC1)
return false;
// The set of qualifiers on the type we're converting from.
@@ -1789,7 +2065,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
if (Context.canAssignObjCInterfaces(ToObjCPtr, FromObjCPtr)) {
const ObjCInterfaceType* LHS = ToObjCPtr->getInterfaceType();
const ObjCInterfaceType* RHS = FromObjCPtr->getInterfaceType();
- if (getLangOptions().CPlusPlus && LHS && RHS &&
+ if (getLangOpts().CPlusPlus && LHS && RHS &&
!ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs(
FromObjCPtr->getPointeeType()))
return false;
@@ -1945,7 +2221,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
/// this conversion.
bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType) {
- if (!getLangOptions().ObjCAutoRefCount ||
+ if (!getLangOpts().ObjCAutoRefCount ||
Context.hasSameUnqualifiedType(FromType, ToType))
return false;
@@ -1959,7 +2235,7 @@ bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
Qualifiers ToQuals = ToPointee.getQualifiers();
if (!ToPointee->isObjCLifetimeType() ||
ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
- !ToQuals.withoutObjCGLifetime().empty())
+ !ToQuals.withoutObjCLifetime().empty())
return false;
// Argument must be a pointer to __strong to __weak.
@@ -2049,7 +2325,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
} else {
QualType RHS = FromFunctionType->getResultType();
QualType LHS = ToFunctionType->getResultType();
- if ((!getLangOptions().CPlusPlus || !RHS->isRecordType()) &&
+ if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) &&
!RHS.hasQualifiers() && LHS.hasQualifiers())
LHS = LHS.getUnqualifiedType();
@@ -2091,22 +2367,131 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
return true;
}
+enum {
+ ft_default,
+ ft_different_class,
+ ft_parameter_arity,
+ ft_parameter_mismatch,
+ ft_return_type,
+ ft_qualifer_mismatch
+};
+
+/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
+/// function types. Catches different number of parameter, mismatch in
+/// parameter types, and different return types.
+void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
+ QualType FromType, QualType ToType) {
+ // If either type is not valid, include no extra info.
+ if (FromType.isNull() || ToType.isNull()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // Get the function type from the pointers.
+ if (FromType->isMemberPointerType() && ToType->isMemberPointerType()) {
+ const MemberPointerType *FromMember = FromType->getAs<MemberPointerType>(),
+ *ToMember = ToType->getAs<MemberPointerType>();
+ if (FromMember->getClass() != ToMember->getClass()) {
+ PDiag << ft_different_class << QualType(ToMember->getClass(), 0)
+ << QualType(FromMember->getClass(), 0);
+ return;
+ }
+ FromType = FromMember->getPointeeType();
+ ToType = ToMember->getPointeeType();
+ }
+
+ if (FromType->isPointerType())
+ FromType = FromType->getPointeeType();
+ if (ToType->isPointerType())
+ ToType = ToType->getPointeeType();
+
+ // Remove references.
+ FromType = FromType.getNonReferenceType();
+ ToType = ToType.getNonReferenceType();
+
+ // Don't print extra info for non-specialized template functions.
+ if (FromType->isInstantiationDependentType() &&
+ !FromType->getAs<TemplateSpecializationType>()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // No extra info for same types.
+ if (Context.hasSameType(FromType, ToType)) {
+ PDiag << ft_default;
+ return;
+ }
+
+ const FunctionProtoType *FromFunction = FromType->getAs<FunctionProtoType>(),
+ *ToFunction = ToType->getAs<FunctionProtoType>();
+
+ // Both types need to be function types.
+ if (!FromFunction || !ToFunction) {
+ PDiag << ft_default;
+ return;
+ }
+
+ if (FromFunction->getNumArgs() != ToFunction->getNumArgs()) {
+ PDiag << ft_parameter_arity << ToFunction->getNumArgs()
+ << FromFunction->getNumArgs();
+ return;
+ }
+
+ // Handle different parameter types.
+ unsigned ArgPos;
+ if (!FunctionArgTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
+ PDiag << ft_parameter_mismatch << ArgPos + 1
+ << ToFunction->getArgType(ArgPos)
+ << FromFunction->getArgType(ArgPos);
+ return;
+ }
+
+ // Handle different return type.
+ if (!Context.hasSameType(FromFunction->getResultType(),
+ ToFunction->getResultType())) {
+ PDiag << ft_return_type << ToFunction->getResultType()
+ << FromFunction->getResultType();
+ return;
+ }
+
+ unsigned FromQuals = FromFunction->getTypeQuals(),
+ ToQuals = ToFunction->getTypeQuals();
+ if (FromQuals != ToQuals) {
+ PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
+ return;
+ }
+
+ // Unable to find a difference, so add no extra info.
+ PDiag << ft_default;
+}
+
/// FunctionArgTypesAreEqual - This routine checks two function proto types
-/// for equlity of their argument types. Caller has already checked that
+/// for equality of their argument types. Caller has already checked that
/// they have same number of arguments. This routine assumes that Objective-C
/// pointer types which only differ in their protocol qualifiers are equal.
+/// If the parameters are different, ArgPos will have the the parameter index
+/// of the first different parameter.
bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
- const FunctionProtoType *NewType) {
- if (!getLangOptions().ObjC1)
- return std::equal(OldType->arg_type_begin(), OldType->arg_type_end(),
- NewType->arg_type_begin());
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos) {
+ if (!getLangOpts().ObjC1) {
+ for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
+ N = NewType->arg_type_begin(),
+ E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
+ if (!Context.hasSameType(*O, *N)) {
+ if (ArgPos) *ArgPos = O - OldType->arg_type_begin();
+ return false;
+ }
+ }
+ return true;
+ }
for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
N = NewType->arg_type_begin(),
E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
QualType ToType = (*O);
QualType FromType = (*N);
- if (ToType != FromType) {
+ if (!Context.hasSameType(ToType, FromType)) {
if (const PointerType *PTTo = ToType->getAs<PointerType>()) {
if (const PointerType *PTFr = FromType->getAs<PointerType>())
if ((PTTo->getPointeeType()->isObjCQualifiedIdType() &&
@@ -2119,9 +2504,12 @@ bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
ToType->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *PTFr =
FromType->getAs<ObjCObjectPointerType>())
- if (PTTo->getInterfaceDecl() == PTFr->getInterfaceDecl())
+ if (Context.hasSameUnqualifiedType(
+ PTTo->getObjectType()->getBaseType(),
+ PTFr->getObjectType()->getBaseType()))
continue;
}
+ if (ArgPos) *ArgPos = O - OldType->arg_type_begin();
return false;
}
}
@@ -2386,6 +2774,121 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType);
}
+/// \brief - Determine whether this is a conversion from a scalar type to an
+/// atomic type.
+///
+/// If successful, updates \c SCS's second and third steps in the conversion
+/// sequence to finish the conversion.
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle) {
+ const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
+ if (!ToAtomic)
+ return false;
+
+ StandardConversionSequence InnerSCS;
+ if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
+ InOverloadResolution, InnerSCS,
+ CStyle, /*AllowObjCWritebackConversion=*/false))
+ return false;
+
+ SCS.Second = InnerSCS.Second;
+ SCS.setToType(1, InnerSCS.getToType(1));
+ SCS.Third = InnerSCS.Third;
+ SCS.QualificationIncludesObjCLifetime
+ = InnerSCS.QualificationIncludesObjCLifetime;
+ SCS.setToType(2, InnerSCS.getToType(2));
+ return true;
+}
+
+static bool isFirstArgumentCompatibleWithType(ASTContext &Context,
+ CXXConstructorDecl *Constructor,
+ QualType Type) {
+ const FunctionProtoType *CtorType =
+ Constructor->getType()->getAs<FunctionProtoType>();
+ if (CtorType->getNumArgs() > 0) {
+ QualType FirstArg = CtorType->getArgType(0);
+ if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType()))
+ return true;
+ }
+ return false;
+}
+
+static OverloadingResult
+IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
+ CXXRecordDecl *To,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowExplicit) {
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(To);
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor
+ = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ bool Usable = !Constructor->isInvalidDecl() &&
+ S.isInitListConstructor(Constructor) &&
+ (AllowExplicit || !Constructor->isExplicit());
+ if (Usable) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ bool SuppressUserConversions =
+ isFirstArgumentCompatibleWithType(S.Context, Constructor, ToType);
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ From, CandidateSet,
+ SuppressUserConversions);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ From, CandidateSet,
+ SuppressUserConversions);
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) {
+ case OR_Success: {
+ // Record the standard conversion we used and the conversion function.
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
+ S.MarkFunctionReferenced(From->getLocStart(), Constructor);
+
+ QualType ThisType = Constructor->getThisType(S.Context);
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.After.setAsIdentityConversion();
+ User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
+ User.After.setAllToTypes(ToType);
+ return OR_Success;
+ }
+
+ case OR_No_Viable_Function:
+ return OR_No_Viable_Function;
+ case OR_Deleted:
+ return OR_Deleted;
+ case OR_Ambiguous:
+ return OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
/// Determines whether there is a user-defined conversion sequence
/// (C++ [over.ics.user]) that converts expression From to the type
/// ToType. If such a conversion exists, User will contain the
@@ -2398,8 +2901,8 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
/// functions (C++0x [class.conv.fct]p2).
static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
- UserDefinedConversionSequence& User,
- OverloadCandidateSet& CandidateSet,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
bool AllowExplicit) {
// Whether we will only visit constructors.
bool ConstructorsOnly = false;
@@ -2428,6 +2931,26 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// We're not going to find any constructors.
} else if (CXXRecordDecl *ToRecordDecl
= dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+
+ Expr **Args = &From;
+ unsigned NumArgs = 1;
+ bool ListInitializing = false;
+ if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
+ // But first, see if there is an init-list-contructor that will work.
+ OverloadingResult Result = IsInitializerListConstructorConversion(
+ S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
+ if (Result != OR_No_Viable_Function)
+ return Result;
+ // Never mind.
+ CandidateSet.clear();
+
+ // If we're list-initializing, we pass the individual elements as
+ // arguments, not the entire list.
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ ListInitializing = true;
+ }
+
DeclContext::lookup_iterator Con, ConEnd;
for (llvm::tie(Con, ConEnd) = S.LookupConstructors(ToRecordDecl);
Con != ConEnd; ++Con) {
@@ -2444,28 +2967,40 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
else
Constructor = cast<CXXConstructorDecl>(D);
- if (!Constructor->isInvalidDecl() &&
- Constructor->isConvertingConstructor(AllowExplicit)) {
+ bool Usable = !Constructor->isInvalidDecl();
+ if (ListInitializing)
+ Usable = Usable && (AllowExplicit || !Constructor->isExplicit());
+ else
+ Usable = Usable &&Constructor->isConvertingConstructor(AllowExplicit);
+ if (Usable) {
+ bool SuppressUserConversions = !ConstructorsOnly;
+ if (SuppressUserConversions && ListInitializing) {
+ SuppressUserConversions = false;
+ if (NumArgs == 1) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ SuppressUserConversions = isFirstArgumentCompatibleWithType(
+ S.Context, Constructor, ToType);
+ }
+ }
if (ConstructorTmpl)
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
- &From, 1, CandidateSet,
- /*SuppressUserConversions=*/
- !ConstructorsOnly);
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
else
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Constructor, FoundDecl,
- &From, 1, CandidateSet,
- /*SuppressUserConversions=*/
- !ConstructorsOnly);
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
}
}
}
}
// Enumerate conversion functions, if we're allowed to.
- if (ConstructorsOnly) {
+ if (ConstructorsOnly || isa<InitListExpr>(From)) {
} else if (S.RequireCompleteType(From->getLocStart(), From->getType(),
S.PDiag(0) << From->getSourceRange())) {
// No conversion functions from incomplete types.
@@ -2512,7 +3047,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Record the standard conversion we used and the conversion function.
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(Best->Function)) {
- S.MarkDeclarationReferenced(From->getLocStart(), Constructor);
+ S.MarkFunctionReferenced(From->getLocStart(), Constructor);
// C++ [over.ics.user]p1:
// If the user-defined conversion is specified by a
@@ -2521,11 +3056,16 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// the argument of the constructor.
//
QualType ThisType = Constructor->getThisType(S.Context);
- if (Best->Conversions[0].isEllipsis())
- User.EllipsisConversion = true;
- else {
- User.Before = Best->Conversions[0].Standard;
- User.EllipsisConversion = false;
+ if (isa<InitListExpr>(From)) {
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ } else {
+ if (Best->Conversions[0].isEllipsis())
+ User.EllipsisConversion = true;
+ else {
+ User.Before = Best->Conversions[0].Standard;
+ User.EllipsisConversion = false;
+ }
}
User.HadMultipleCandidates = HadMultipleCandidates;
User.ConversionFunction = Constructor;
@@ -2534,9 +3074,10 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
User.After.setAllToTypes(ToType);
return OR_Success;
- } else if (CXXConversionDecl *Conversion
+ }
+ if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(Best->Function)) {
- S.MarkDeclarationReferenced(From->getLocStart(), Conversion);
+ S.MarkFunctionReferenced(From->getLocStart(), Conversion);
// C++ [over.ics.user]p1:
//
@@ -2561,10 +3102,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// 13.3.3.1).
User.After = Best->FinalConversion;
return OR_Success;
- } else {
- llvm_unreachable("Not a constructor or conversion function?");
- return OR_No_Viable_Function;
}
+ llvm_unreachable("Not a constructor or conversion function?");
case OR_No_Viable_Function:
return OR_No_Viable_Function;
@@ -2576,7 +3115,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
return OR_Ambiguous;
}
- return OR_No_Viable_Function;
+ llvm_unreachable("Invalid OverloadResult!");
}
bool
@@ -2587,19 +3126,54 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
CandidateSet, false);
if (OvResult == OR_Ambiguous)
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::err_typecheck_ambiguous_condition)
<< From->getType() << ToType << From->getSourceRange();
else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty())
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::err_typecheck_nonviable_condition)
<< From->getType() << ToType << From->getSourceRange();
else
return false;
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, &From, 1);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
return true;
}
+/// \brief Compare the user-defined conversion functions or constructors
+/// of two user-defined conversion sequences to determine whether any ordering
+/// is possible.
+static ImplicitConversionSequence::CompareKind
+compareConversionFunctions(Sema &S,
+ FunctionDecl *Function1,
+ FunctionDecl *Function2) {
+ if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus0x)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++:
+ // If both conversion functions are implicitly-declared conversions from
+ // a lambda closure type to a function pointer and a block pointer,
+ // respectively, always prefer the conversion to a function pointer,
+ // because the function pointer is more lightweight and is more likely
+ // to keep code working.
+ CXXConversionDecl *Conv1 = dyn_cast<CXXConversionDecl>(Function1);
+ if (!Conv1)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
+ if (!Conv2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
+ bool Block1 = Conv1->getConversionType()->isBlockPointerType();
+ bool Block2 = Conv2->getConversionType()->isBlockPointerType();
+ if (Block1 != Block2)
+ return Block1? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
/// CompareImplicitConversionSequences - Compare two implicit
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2).
@@ -2624,7 +3198,7 @@ CompareImplicitConversionSequences(Sema &S,
// from any other user-defined conversion sequence.
if (ICS1.getKindRank() < ICS2.getKindRank())
return ImplicitConversionSequence::Better;
- else if (ICS2.getKindRank() < ICS1.getKindRank())
+ if (ICS2.getKindRank() < ICS1.getKindRank())
return ImplicitConversionSequence::Worse;
// The following checks require both conversion sequences to be of
@@ -2632,11 +3206,15 @@ CompareImplicitConversionSequences(Sema &S,
if (ICS1.getKind() != ICS2.getKind())
return ImplicitConversionSequence::Indistinguishable;
+ ImplicitConversionSequence::CompareKind Result =
+ ImplicitConversionSequence::Indistinguishable;
+
// Two implicit conversion sequences of the same form are
// indistinguishable conversion sequences unless one of the
// following rules apply: (C++ 13.3.3.2p3):
if (ICS1.isStandard())
- return CompareStandardConversionSequences(S, ICS1.Standard, ICS2.Standard);
+ Result = CompareStandardConversionSequences(S,
+ ICS1.Standard, ICS2.Standard);
else if (ICS1.isUserDefined()) {
// User-defined conversion sequence U1 is a better conversion
// sequence than another user-defined conversion sequence U2 if
@@ -2646,12 +3224,31 @@ CompareImplicitConversionSequences(Sema &S,
// U2 (C++ 13.3.3.2p3).
if (ICS1.UserDefined.ConversionFunction ==
ICS2.UserDefined.ConversionFunction)
- return CompareStandardConversionSequences(S,
- ICS1.UserDefined.After,
- ICS2.UserDefined.After);
+ Result = CompareStandardConversionSequences(S,
+ ICS1.UserDefined.After,
+ ICS2.UserDefined.After);
+ else
+ Result = compareConversionFunctions(S,
+ ICS1.UserDefined.ConversionFunction,
+ ICS2.UserDefined.ConversionFunction);
+ }
+
+ // List-initialization sequence L1 is a better conversion sequence than
+ // list-initialization sequence L2 if L1 converts to std::initializer_list<X>
+ // for some X and L2 does not.
+ if (Result == ImplicitConversionSequence::Indistinguishable &&
+ !ICS1.isBad() &&
+ ICS1.isListInitializationSequence() &&
+ ICS2.isListInitializationSequence()) {
+ if (ICS1.isStdInitializerListElement() &&
+ !ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Better;
+ if (!ICS1.isStdInitializerListElement() &&
+ ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Worse;
}
- return ImplicitConversionSequence::Indistinguishable;
+ return Result;
}
static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
@@ -2899,7 +3496,7 @@ CompareStandardConversionSequences(Sema &S,
// }
// Here, MSVC will call f(int) instead of generating a compile error
// as clang will do in standard mode.
- if (S.getLangOptions().MicrosoftMode &&
+ if (S.getLangOpts().MicrosoftMode &&
SCS1.Second == ICK_Integral_Conversion &&
SCS2.Second == ICK_Floating_Integral &&
S.Context.getTypeSize(SCS1.getFromType()) ==
@@ -3395,7 +3992,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
return false;
if (Best->Function)
- S.MarkDeclarationReferenced(DeclLoc, Best->Function);
+ S.MarkFunctionReferenced(DeclLoc, Best->Function);
ICS.setUserDefined();
ICS.UserDefined.Before = Best->Conversions[0].Standard;
ICS.UserDefined.After = Best->FinalConversion;
@@ -3423,13 +4020,13 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
return false;
}
- return false;
+ llvm_unreachable("Invalid OverloadResult!");
}
/// \brief Compute an implicit conversion sequence for reference
/// initialization.
static ImplicitConversionSequence
-TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
+TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
SourceLocation DeclLoc,
bool SuppressUserConversions,
bool AllowExplicit) {
@@ -3566,7 +4163,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// allow the use of rvalue references in C++98/03 for the benefit of
// standard library implementors; therefore, we need the xvalue check here.
ICS.Standard.DirectBinding =
- S.getLangOptions().CPlusPlus0x ||
+ S.getLangOpts().CPlusPlus0x ||
(InitCategory.isPRValue() && !T2->isRecordType());
ICS.Standard.IsLvalueReference = !isRValRef;
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
@@ -3692,6 +4289,216 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
return ICS;
}
+static ImplicitConversionSequence
+TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit = false);
+
+/// TryListConversion - Try to copy-initialize a value of type ToType from the
+/// initializer list From.
+static ImplicitConversionSequence
+TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion) {
+ // C++11 [over.ics.list]p1:
+ // When an argument is an initializer list, it is not an expression and
+ // special rules apply for converting it to a parameter type.
+
+ ImplicitConversionSequence Result;
+ Result.setBad(BadConversionSequence::no_conversion, From, ToType);
+ Result.setListInitializationSequence();
+
+ // We need a complete type for what follows. Incomplete types can never be
+ // initialized from init lists.
+ if (S.RequireCompleteType(From->getLocStart(), ToType, S.PDiag()))
+ return Result;
+
+ // C++11 [over.ics.list]p2:
+ // If the parameter type is std::initializer_list<X> or "array of X" and
+ // all the elements can be implicitly converted to X, the implicit
+ // conversion sequence is the worst conversion necessary to convert an
+ // element of the list to X.
+ bool toStdInitializerList = false;
+ QualType X;
+ if (ToType->isArrayType())
+ X = S.Context.getBaseElementType(ToType);
+ else
+ toStdInitializerList = S.isStdInitializerList(ToType, &X);
+ if (!X.isNull()) {
+ for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) {
+ Expr *Init = From->getInit(i);
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, Init, X, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // If a single element isn't convertible, fail.
+ if (ICS.isBad()) {
+ Result = ICS;
+ break;
+ }
+ // Otherwise, look for the worst conversion.
+ if (Result.isBad() ||
+ CompareImplicitConversionSequences(S, ICS, Result) ==
+ ImplicitConversionSequence::Worse)
+ Result = ICS;
+ }
+
+ // For an empty list, we won't have computed any conversion sequence.
+ // Introduce the identity conversion sequence.
+ if (From->getNumInits() == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+
+ Result.setListInitializationSequence();
+ Result.setStdInitializerListElement(toStdInitializerList);
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p3:
+ // Otherwise, if the parameter is a non-aggregate class X and overload
+ // resolution chooses a single best constructor [...] the implicit
+ // conversion sequence is a user-defined conversion sequence. If multiple
+ // constructors are viable but none is better than the others, the
+ // implicit conversion sequence is a user-defined conversion sequence.
+ if (ToType->isRecordType() && !ToType->isAggregateType()) {
+ // This function can deal with initializer lists.
+ Result = TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ InOverloadResolution, /*CStyle=*/false,
+ AllowObjCWritebackConversion);
+ Result.setListInitializationSequence();
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p4:
+ // Otherwise, if the parameter has an aggregate type which can be
+ // initialized from the initializer list [...] the implicit conversion
+ // sequence is a user-defined conversion sequence.
+ if (ToType->isAggregateType()) {
+ // Type is an aggregate, argument is an init list. At this point it comes
+ // down to checking whether the initialization works.
+ // FIXME: Find out whether this parameter is consumed or not.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ToType,
+ /*Consumed=*/false);
+ if (S.CanPerformCopyInitialization(Entity, S.Owned(From))) {
+ Result.setUserDefined();
+ Result.UserDefined.Before.setAsIdentityConversion();
+ // Initializer lists don't have a type.
+ Result.UserDefined.Before.setFromType(QualType());
+ Result.UserDefined.Before.setAllToTypes(QualType());
+
+ Result.UserDefined.After.setAsIdentityConversion();
+ Result.UserDefined.After.setFromType(ToType);
+ Result.UserDefined.After.setAllToTypes(ToType);
+ Result.UserDefined.ConversionFunction = 0;
+ }
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p5:
+ // Otherwise, if the parameter is a reference, see 13.3.3.1.4.
+ if (ToType->isReferenceType()) {
+ // The standard is notoriously unclear here, since 13.3.3.1.4 doesn't
+ // mention initializer lists in any way. So we go by what list-
+ // initialization would do and try to extrapolate from that.
+
+ QualType T1 = ToType->getAs<ReferenceType>()->getPointeeType();
+
+ // If the initializer list has a single element that is reference-related
+ // to the parameter type, we initialize the reference from that.
+ if (From->getNumInits() == 1) {
+ Expr *Init = From->getInit(0);
+
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(
+ Init, ToType, false, Found))
+ T2 = Fn->getType();
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool dummy1 = false;
+ bool dummy2 = false;
+ bool dummy3 = false;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1,
+ dummy2, dummy3);
+
+ if (RefRelationship >= Sema::Ref_Related)
+ return TryReferenceInit(S, Init, ToType,
+ /*FIXME:*/From->getLocStart(),
+ SuppressUserConversions,
+ /*AllowExplicit=*/false);
+ }
+
+ // Otherwise, we bind the reference to a temporary created from the
+ // initializer list.
+ Result = TryListConversion(S, From, T1, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ if (Result.isFailure())
+ return Result;
+ assert(!Result.isEllipsis() &&
+ "Sub-initialization cannot result in ellipsis conversion.");
+
+ // Can we even bind to a temporary?
+ if (ToType->isRValueReferenceType() ||
+ (T1.isConstQualified() && !T1.isVolatileQualified())) {
+ StandardConversionSequence &SCS = Result.isStandard() ? Result.Standard :
+ Result.UserDefined.After;
+ SCS.ReferenceBinding = true;
+ SCS.IsLvalueReference = ToType->isLValueReferenceType();
+ SCS.BindsToRvalue = true;
+ SCS.BindsToFunctionLvalue = false;
+ SCS.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ SCS.ObjCLifetimeConversionBinding = false;
+ } else
+ Result.setBad(BadConversionSequence::lvalue_ref_to_rvalue,
+ From, ToType);
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p6:
+ // Otherwise, if the parameter type is not a class:
+ if (!ToType->isRecordType()) {
+ // - if the initializer list has one element, the implicit conversion
+ // sequence is the one required to convert the element to the
+ // parameter type.
+ unsigned NumInits = From->getNumInits();
+ if (NumInits == 1)
+ Result = TryCopyInitialization(S, From->getInit(0), ToType,
+ SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // - if the initializer list has no elements, the implicit conversion
+ // sequence is the identity conversion.
+ else if (NumInits == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+ Result.setListInitializationSequence();
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p7:
+ // In all cases other than those enumerated above, no conversion is possible
+ return Result;
+}
+
/// TryCopyInitialization - Try to copy-initialize a value of type
/// ToType from the expression From. Return the implicit conversion
/// sequence required to pass this argument, which may be a bad
@@ -3702,12 +4509,17 @@ static ImplicitConversionSequence
TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool InOverloadResolution,
- bool AllowObjCWritebackConversion) {
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit) {
+ if (InitListExpr *FromInitList = dyn_cast<InitListExpr>(From))
+ return TryListConversion(S, FromInitList, ToType, SuppressUserConversions,
+ InOverloadResolution,AllowObjCWritebackConversion);
+
if (ToType->isReferenceType())
return TryReferenceInit(S, From, ToType,
/*FIXME:*/From->getLocStart(),
SuppressUserConversions,
- /*AllowExplicit=*/false);
+ AllowExplicit);
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
@@ -3877,7 +4689,7 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
Qualifiers ToQs = DestType.getQualifiers();
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
if (CVR) {
- Diag(From->getSourceRange().getBegin(),
+ Diag(From->getLocStart(),
diag::err_member_function_call_bad_cvr)
<< Method->getDeclName() << FromRecordType << (CVR - 1)
<< From->getSourceRange();
@@ -3887,7 +4699,7 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
}
}
- return Diag(From->getSourceRange().getBegin(),
+ return Diag(From->getLocStart(),
diag::err_implicit_object_parameter_init)
<< ImplicitParamRecordType << FromRecordType << From->getSourceRange();
}
@@ -3902,7 +4714,7 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
if (!Context.hasSameType(From->getType(), DestType))
From = ImpCastExprToType(From, DestType, CK_NoOp,
- From->getType()->isPointerType() ? VK_RValue : VK_LValue).take();
+ From->getValueKind()).take();
return Owned(From);
}
@@ -3923,17 +4735,190 @@ TryContextuallyConvertToBool(Sema &S, Expr *From) {
/// PerformContextuallyConvertToBool - Perform a contextual conversion
/// of the expression From to bool (C++0x [conv]p3).
ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From);
if (!ICS.isBad())
return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
- return Diag(From->getSourceRange().getBegin(),
+ return Diag(From->getLocStart(),
diag::err_typecheck_bool_condition)
<< From->getType() << From->getSourceRange();
return ExprError();
}
+/// Check that the specified conversion is permitted in a converted constant
+/// expression, according to C++11 [expr.const]p3. Return true if the conversion
+/// is acceptable.
+static bool CheckConvertedConstantConversions(Sema &S,
+ StandardConversionSequence &SCS) {
+ // Since we know that the target type is an integral or unscoped enumeration
+ // type, most conversion kinds are impossible. All possible First and Third
+ // conversions are fine.
+ switch (SCS.Second) {
+ case ICK_Identity:
+ case ICK_Integral_Promotion:
+ case ICK_Integral_Conversion:
+ return true;
+
+ case ICK_Boolean_Conversion:
+ // Conversion from an integral or unscoped enumeration type to bool is
+ // classified as ICK_Boolean_Conversion, but it's also an integral
+ // conversion, so it's permitted in a converted constant expression.
+ return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() &&
+ SCS.getToType(2)->isBooleanType();
+
+ case ICK_Floating_Integral:
+ case ICK_Complex_Real:
+ return false;
+
+ case ICK_Lvalue_To_Rvalue:
+ case ICK_Array_To_Pointer:
+ case ICK_Function_To_Pointer:
+ case ICK_NoReturn_Adjustment:
+ case ICK_Qualification:
+ case ICK_Compatible_Conversion:
+ case ICK_Vector_Conversion:
+ case ICK_Vector_Splat:
+ case ICK_Derived_To_Base:
+ case ICK_Pointer_Conversion:
+ case ICK_Pointer_Member:
+ case ICK_Block_Pointer_Conversion:
+ case ICK_Writeback_Conversion:
+ case ICK_Floating_Promotion:
+ case ICK_Complex_Promotion:
+ case ICK_Complex_Conversion:
+ case ICK_Floating_Conversion:
+ case ICK_TransparentUnionConversion:
+ llvm_unreachable("unexpected second conversion kind");
+
+ case ICK_Num_Conversion_Kinds:
+ break;
+ }
+
+ llvm_unreachable("unknown conversion kind");
+}
+
+/// CheckConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion and produce
+/// the converted expression, per C++11 [expr.const]p3.
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value,
+ CCEKind CCE) {
+ assert(LangOpts.CPlusPlus0x && "converted constant expression outside C++11");
+ assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
+
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ // C++11 [expr.const]p3 with proposed wording fixes:
+ // A converted constant expression of type T is a core constant expression,
+ // implicitly converted to a prvalue of type T, where the converted
+ // expression is a literal constant expression and the implicit conversion
+ // sequence contains only user-defined conversions, lvalue-to-rvalue
+ // conversions, integral promotions, and integral conversions other than
+ // narrowing conversions.
+ ImplicitConversionSequence ICS =
+ TryImplicitConversion(From, T,
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjcWritebackConversion=*/false);
+ StandardConversionSequence *SCS = 0;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ if (!CheckConvertedConstantConversions(*this, ICS.Standard))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ // We are converting from class type to an integral or enumeration type, so
+ // the Before sequence must be trivial.
+ if (!CheckConvertedConstantConversions(*this, ICS.UserDefined.After))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::BadConversion:
+ if (!DiagnoseMultipleUserDefinedConversion(From, T))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression)
+ << From->getType() << From->getSourceRange() << T;
+ return ExprError();
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ llvm_unreachable("ellipsis conversion in converted constant expression");
+ }
+
+ ExprResult Result = PerformImplicitConversion(From, T, ICS, AA_Converting);
+ if (Result.isInvalid())
+ return Result;
+
+ // Check for a narrowing implicit conversion.
+ APValue PreNarrowingValue;
+ QualType PreNarrowingType;
+ switch (SCS->getNarrowingKind(Context, Result.get(), PreNarrowingValue,
+ PreNarrowingType)) {
+ case NK_Variable_Narrowing:
+ // Implicit conversion to a narrower type, and the value is not a constant
+ // expression. We'll diagnose this in a moment.
+ case NK_Not_Narrowing:
+ break;
+
+ case NK_Constant_Narrowing:
+ Diag(From->getLocStart(),
+ isSFINAEContext() ? diag::err_cce_narrowing_sfinae :
+ diag::err_cce_narrowing)
+ << CCE << /*Constant*/1
+ << PreNarrowingValue.getAsString(Context, PreNarrowingType) << T;
+ break;
+
+ case NK_Type_Narrowing:
+ Diag(From->getLocStart(),
+ isSFINAEContext() ? diag::err_cce_narrowing_sfinae :
+ diag::err_cce_narrowing)
+ << CCE << /*Constant*/0 << From->getType() << T;
+ break;
+ }
+
+ // Check the expression is a constant expression.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+
+ if (!Result.get()->EvaluateAsRValue(Eval, Context)) {
+ // The expression can't be folded, so we can't keep it at this position in
+ // the AST.
+ Result = ExprError();
+ } else {
+ Value = Eval.Val.getInt();
+
+ if (Notes.empty()) {
+ // It's a constant expression.
+ return Result;
+ }
+ }
+
+ // It's not a constant expression. Produce an appropriate diagnostic.
+ if (Notes.size() == 1 &&
+ Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
+ Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
+ else {
+ Diag(From->getLocStart(), diag::err_expr_not_cce)
+ << CCE << From->getSourceRange();
+ for (unsigned I = 0; I < Notes.size(); ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+ return Result;
+}
+
/// dropPointerConversions - If the given standard conversion sequence
/// involves any pointer conversions, remove them. This may change
/// the result type of the conversion sequence.
@@ -3982,6 +4967,9 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
/// PerformContextuallyConvertToObjCPointer - Perform a contextual
/// conversion of the expression From to an Objective-C pointer type.
ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
QualType Ty = Context.getObjCIdType();
ImplicitConversionSequence ICS =
TryContextuallyConvertToObjCPointer(*this, From);
@@ -3990,6 +4978,13 @@ ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
return ExprError();
}
+/// Determine whether the provided type is an integral type, or an enumeration
+/// type of a permitted flavor.
+static bool isIntegralOrEnumerationType(QualType T, bool AllowScopedEnum) {
+ return AllowScopedEnum ? T->isIntegralOrEnumerationType()
+ : T->isIntegralOrUnscopedEnumerationType();
+}
+
/// \brief Attempt to convert the given expression to an integral or
/// enumeration type.
///
@@ -4024,6 +5019,9 @@ ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion
/// function, which may be an extension in this case.
///
+/// \param AllowScopedEnumerations Specifies whether conversions to scoped
+/// enumerations should be considered.
+///
/// \returns The expression, converted to an integral or enumeration type if
/// successful.
ExprResult
@@ -4034,24 +5032,32 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
const PartialDiagnostic &ExplicitConvNote,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &AmbigNote,
- const PartialDiagnostic &ConvDiag) {
+ const PartialDiagnostic &ConvDiag,
+ bool AllowScopedEnumerations) {
// We can't perform any more checking for type-dependent expressions.
if (From->isTypeDependent())
return Owned(From);
+ // Process placeholders immediately.
+ if (From->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(From);
+ if (result.isInvalid()) return result;
+ From = result.take();
+ }
+
// If the expression already has integral or enumeration type, we're golden.
QualType T = From->getType();
- if (T->isIntegralOrEnumerationType())
- return Owned(From);
+ if (isIntegralOrEnumerationType(T, AllowScopedEnumerations))
+ return DefaultLvalueConversion(From);
// FIXME: Check for missing '()' if T is a function type?
// If we don't have a class type in C++, there's no way we can get an
// expression of integral or enumeration type.
const RecordType *RecordTy = T->getAs<RecordType>();
- if (!RecordTy || !getLangOptions().CPlusPlus) {
- Diag(Loc, NotIntDiag)
- << T << From->getSourceRange();
+ if (!RecordTy || !getLangOpts().CPlusPlus) {
+ if (NotIntDiag.getDiagID())
+ Diag(Loc, NotIntDiag) << T << From->getSourceRange();
return Owned(From);
}
@@ -4072,19 +5078,21 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
I != E;
++I) {
if (CXXConversionDecl *Conversion
- = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl()))
- if (Conversion->getConversionType().getNonReferenceType()
- ->isIntegralOrEnumerationType()) {
+ = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) {
+ if (isIntegralOrEnumerationType(
+ Conversion->getConversionType().getNonReferenceType(),
+ AllowScopedEnumerations)) {
if (Conversion->isExplicit())
ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
else
ViableConversions.addDecl(I.getDecl(), I.getAccess());
}
+ }
}
switch (ViableConversions.size()) {
case 0:
- if (ExplicitConversions.size() == 1) {
+ if (ExplicitConversions.size() == 1 && ExplicitConvDiag.getDiagID()) {
DeclAccessPair Found = ExplicitConversions[0];
CXXConversionDecl *Conversion
= cast<CXXConversionDecl>(Found->getUnderlyingDecl());
@@ -4115,8 +5123,11 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
HadMultipleCandidates);
if (Result.isInvalid())
return ExprError();
-
- From = Result.get();
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(Context, Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind());
}
// We'll complain below about a non-integral condition type.
@@ -4143,12 +5154,18 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
HadMultipleCandidates);
if (Result.isInvalid())
return ExprError();
-
- From = Result.get();
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(Context, Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind());
break;
}
default:
+ if (!AmbigDiag.getDiagID())
+ return Owned(From);
+
Diag(Loc, AmbigDiag)
<< T << From->getSourceRange();
for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
@@ -4161,11 +5178,11 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
return Owned(From);
}
- if (!From->getType()->isIntegralOrEnumerationType())
- Diag(Loc, NotIntDiag)
- << From->getType() << From->getSourceRange();
+ if (!isIntegralOrEnumerationType(From->getType(), AllowScopedEnumerations) &&
+ NotIntDiag.getDiagID())
+ Diag(Loc, NotIntDiag) << From->getType() << From->getSourceRange();
- return Owned(From);
+ return DefaultLvalueConversion(From);
}
/// AddOverloadCandidate - Adds the given function to the set of
@@ -4179,10 +5196,11 @@ Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
void
Sema::AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions,
- bool PartialOverloading) {
+ bool PartialOverloading,
+ bool AllowExplicit) {
const FunctionProtoType* Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -4200,8 +5218,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// is irrelevant.
AddMethodCandidate(Method, FoundDecl, Method->getParent(),
QualType(), Expr::Classification::makeSimpleLValue(),
- Args, NumArgs, CandidateSet,
- SuppressUserConversions);
+ Args, CandidateSet, SuppressUserConversions);
return;
}
// We treat a constructor like a non-member function, since its object
@@ -4219,7 +5236,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
- if (NumArgs == 1 &&
+ if (Args.size() == 1 &&
Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
IsDerivedFrom(Args[0]->getType(), ClassType)))
@@ -4227,21 +5244,20 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
}
// Add this candidate
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate& Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Function;
Candidate.Viable = true;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.ExplicitCallArguments = Args.size();
unsigned NumArgsInProto = Proto->getNumArgs();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
- if ((NumArgs + (PartialOverloading && NumArgs)) > NumArgsInProto &&
+ if ((Args.size() + (PartialOverloading && Args.size())) > NumArgsInProto &&
!Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
@@ -4254,7 +5270,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Function->getMinRequiredArguments();
- if (NumArgs < MinRequiredArgs && !PartialOverloading) {
+ if (Args.size() < MinRequiredArgs && !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -4262,7 +5278,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
}
// (CUDA B.1): Check for invalid calls between targets.
- if (getLangOptions().CUDA)
+ if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
if (CheckCUDATarget(Caller, Function)) {
Candidate.Viable = false;
@@ -4272,8 +5288,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// Determine the implicit conversion sequences for each of the
// arguments.
- Candidate.Conversions.resize(NumArgs);
- for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
if (ArgIdx < NumArgsInProto) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
@@ -4285,7 +5300,8 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
- getLangOptions().ObjCAutoRefCount);
+ getLangOpts().ObjCAutoRefCount,
+ AllowExplicit);
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4303,9 +5319,10 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
/// \brief Add all of the function declarations in the given function set to
/// the overload canddiate set.
void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
- bool SuppressUserConversions) {
+ bool SuppressUserConversions,
+ TemplateArgumentListInfo *ExplicitTemplateArgs) {
for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) {
NamedDecl *D = F.getDecl()->getUnderlyingDecl();
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
@@ -4313,10 +5330,10 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
cast<CXXMethodDecl>(FD)->getParent(),
Args[0]->getType(), Args[0]->Classify(Context),
- Args + 1, NumArgs - 1,
- CandidateSet, SuppressUserConversions);
+ Args.slice(1), CandidateSet,
+ SuppressUserConversions);
else
- AddOverloadCandidate(FD, F.getPair(), Args, NumArgs, CandidateSet,
+ AddOverloadCandidate(FD, F.getPair(), Args, CandidateSet,
SuppressUserConversions);
} else {
FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D);
@@ -4324,17 +5341,14 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
!cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl())->isStatic())
AddMethodTemplateCandidate(FunTmpl, F.getPair(),
cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
- /*FIXME: explicit args */ 0,
+ ExplicitTemplateArgs,
Args[0]->getType(),
- Args[0]->Classify(Context),
- Args + 1, NumArgs - 1,
- CandidateSet,
- SuppressUserConversions);
+ Args[0]->Classify(Context), Args.slice(1),
+ CandidateSet, SuppressUserConversions);
else
AddTemplateOverloadCandidate(FunTmpl, F.getPair(),
- /*FIXME: explicit args */ 0,
- Args, NumArgs, CandidateSet,
- SuppressUserConversions);
+ ExplicitTemplateArgs, Args,
+ CandidateSet, SuppressUserConversions);
}
}
}
@@ -4358,12 +5372,13 @@ void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
"Expected a member function template");
AddMethodTemplateCandidate(TD, FoundDecl, ActingContext,
/*ExplicitArgs*/ 0,
- ObjectType, ObjectClassification, Args, NumArgs,
- CandidateSet,
+ ObjectType, ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
SuppressUserConversions);
} else {
AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
- ObjectType, ObjectClassification, Args, NumArgs,
+ ObjectType, ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions);
}
}
@@ -4379,7 +5394,7 @@ void
Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
const FunctionProtoType* Proto
@@ -4395,20 +5410,19 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
// Add this candidate
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate& Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Method;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.ExplicitCallArguments = Args.size();
unsigned NumArgsInProto = Proto->getNumArgs();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
- if (NumArgs > NumArgsInProto && !Proto->isVariadic()) {
+ if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
@@ -4420,7 +5434,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Method->getMinRequiredArguments();
- if (NumArgs < MinRequiredArgs) {
+ if (Args.size() < MinRequiredArgs) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -4428,7 +5442,6 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
}
Candidate.Viable = true;
- Candidate.Conversions.resize(NumArgs + 1);
if (Method->isStatic() || ObjectType.isNull())
// The implicit object argument is ignored.
@@ -4448,7 +5461,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// Determine the implicit conversion sequences for each of the
// arguments.
- for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
if (ArgIdx < NumArgsInProto) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
@@ -4460,7 +5473,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
- getLangOptions().ObjCAutoRefCount);
+ getLangOpts().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4485,7 +5498,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
if (!CandidateSet.isNewCandidate(MethodTmpl))
@@ -4503,17 +5516,16 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult Result
- = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs,
- Args, NumArgs, Specialization, Info)) {
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate &Candidate = CandidateSet.back();
+ = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = MethodTmpl->getTemplatedDecl();
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.ExplicitCallArguments = Args.size();
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
@@ -4525,8 +5537,8 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
assert(isa<CXXMethodDecl>(Specialization) &&
"Specialization is not a member function?");
AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl,
- ActingContext, ObjectType, ObjectClassification,
- Args, NumArgs, CandidateSet, SuppressUserConversions);
+ ActingContext, ObjectType, ObjectClassification, Args,
+ CandidateSet, SuppressUserConversions);
}
/// \brief Add a C++ function template specialization as a candidate
@@ -4536,7 +5548,7 @@ void
Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
if (!CandidateSet.isNewCandidate(FunctionTemplate))
@@ -4554,17 +5566,16 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
FunctionDecl *Specialization = 0;
if (TemplateDeductionResult Result
- = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
- Args, NumArgs, Specialization, Info)) {
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate &Candidate = CandidateSet.back();
+ = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
- Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.ExplicitCallArguments = Args.size();
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
@@ -4573,7 +5584,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
// Add the function template specialization produced by template argument
// deduction as a candidate.
assert(Specialization && "Missing function template specialization?");
- AddOverloadCandidate(Specialization, FoundDecl, Args, NumArgs, CandidateSet,
+ AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
SuppressUserConversions);
}
@@ -4599,8 +5610,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
// Add this candidate
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate& Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Conversion;
Candidate.IsSurrogate = false;
@@ -4609,7 +5619,6 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
Candidate.FinalConversion.setFromType(ConvType);
Candidate.FinalConversion.setAllToTypes(ToType);
Candidate.Viable = true;
- Candidate.Conversions.resize(1);
Candidate.ExplicitCallArguments = 1;
// C++ [over.match.funcs]p4:
@@ -4656,7 +5665,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// lvalues/rvalues and the type. Fortunately, we can allocate this
// call on the stack and we don't need its arguments to be
// well-formed.
- DeclRefExpr ConversionRef(Conversion, Conversion->getType(),
+ DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(),
VK_LValue, From->getLocStart());
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
@@ -4743,8 +5752,7 @@ Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, ToType,
Specialization, Info)) {
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate &Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
@@ -4774,7 +5782,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet) {
if (!CandidateSet.isNewCandidate(Conversion))
return;
@@ -4782,16 +5790,14 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate& Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = 0;
Candidate.Surrogate = Conversion;
Candidate.Viable = true;
Candidate.IsSurrogate = true;
Candidate.IgnoreObjectArgument = false;
- Candidate.Conversions.resize(NumArgs + 1);
- Candidate.ExplicitCallArguments = NumArgs;
+ Candidate.ExplicitCallArguments = Args.size();
// Determine the implicit conversion sequence for the implicit
// object parameter.
@@ -4825,7 +5831,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
- if (NumArgs > NumArgsInProto && !Proto->isVariadic()) {
+ if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
@@ -4833,7 +5839,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// Function types don't have any default arguments, so just check if
// we have enough arguments.
- if (NumArgs < NumArgsInProto) {
+ if (Args.size() < NumArgsInProto) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -4842,7 +5848,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// Determine the implicit conversion sequences for each of the
// arguments.
- for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
if (ArgIdx < NumArgsInProto) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
@@ -4854,7 +5860,7 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
/*SuppressUserConversions=*/false,
/*InOverloadResolution=*/false,
/*AllowObjCWritebackConversion=*/
- getLangOptions().ObjCAutoRefCount);
+ getLangOpts().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4935,8 +5941,7 @@ void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
// Add this candidate
- CandidateSet.push_back(OverloadCandidate());
- OverloadCandidate& Candidate = CandidateSet.back();
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(NumArgs);
Candidate.FoundDecl = DeclAccessPair::make(0, AS_none);
Candidate.Function = 0;
Candidate.IsSurrogate = false;
@@ -4948,7 +5953,6 @@ void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
// Determine the implicit conversion sequences for each of the
// arguments.
Candidate.Viable = true;
- Candidate.Conversions.resize(NumArgs);
Candidate.ExplicitCallArguments = NumArgs;
for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
// C++ [over.match.oper]p4:
@@ -4974,7 +5978,7 @@ void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArgIdx == 0 && IsAssignmentOperator,
/*InOverloadResolution=*/false,
/*AllowObjCWritebackConversion=*/
- getLangOptions().ObjCAutoRefCount);
+ getLangOpts().ObjCAutoRefCount);
}
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
@@ -6319,7 +7323,7 @@ public:
S.AddBuiltinCandidate(*MemPtr, ParamTypes, Args, 2, CandidateSet);
}
- if (S.getLangOptions().CPlusPlus0x) {
+ if (S.getLangOpts().CPlusPlus0x) {
for (BuiltinCandidateTypeSet::iterator
Enum = CandidateTypes[ArgIdx].enumeration_begin(),
EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
@@ -6536,8 +7540,8 @@ Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
/// candidate set (C++ [basic.lookup.argdep]).
void
Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
- bool Operator,
- Expr **Args, unsigned NumArgs,
+ bool Operator, SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading,
@@ -6552,7 +7556,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
// we supposed to consider on ADL candidates, anyway?
// FIXME: Pass in the explicit template arguments?
- ArgumentDependentLookup(Name, Operator, Args, NumArgs, Fns,
+ ArgumentDependentLookup(Name, Operator, Loc, Args, Fns,
StdNamespaceIsAssociated);
// Erase all of the candidates we already knew about.
@@ -6573,12 +7577,12 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
if (ExplicitTemplateArgs)
continue;
- AddOverloadCandidate(FD, FoundDecl, Args, NumArgs, CandidateSet,
- false, PartialOverloading);
+ AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
+ PartialOverloading);
} else
AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
FoundDecl, ExplicitTemplateArgs,
- Args, NumArgs, CandidateSet);
+ Args, CandidateSet);
}
}
@@ -6611,8 +7615,8 @@ isBetterOverloadCandidate(Sema &S,
// A viable function F1 is defined to be a better function than another
// viable function F2 if for all arguments i, ICSi(F1) is not a worse
// conversion sequence than ICSi(F2), and then...
- unsigned NumArgs = Cand1.Conversions.size();
- assert(Cand2.Conversions.size() == NumArgs && "Overload candidate mismatch");
+ unsigned NumArgs = Cand1.NumConversions;
+ assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
bool HasBetterConversion = false;
for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
switch (CompareImplicitConversionSequences(S,
@@ -6669,6 +7673,15 @@ isBetterOverloadCandidate(Sema &S,
if (UserDefinedConversion && Cand1.Function && Cand2.Function &&
isa<CXXConversionDecl>(Cand1.Function) &&
isa<CXXConversionDecl>(Cand2.Function)) {
+ // First check whether we prefer one of the conversion functions over the
+ // other. This only distinguishes the results in non-standard, extension
+ // cases such as the conversion from a lambda closure type to a function
+ // pointer or block.
+ ImplicitConversionSequence::CompareKind FuncResult
+ = compareConversionFunctions(S, Cand1.Function, Cand2.Function);
+ if (FuncResult != ImplicitConversionSequence::Indistinguishable)
+ return FuncResult;
+
switch (CompareStandardConversionSequences(S,
Cand1.FinalConversion,
Cand2.FinalConversion)) {
@@ -6794,9 +7807,11 @@ OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
if (Meth->isMoveAssignmentOperator())
return oc_implicit_move_assignment;
- assert(Meth->isCopyAssignmentOperator()
- && "implicit method is not copy assignment operator?");
- return oc_implicit_copy_assignment;
+ if (Meth->isCopyAssignmentOperator())
+ return oc_implicit_copy_assignment;
+
+ assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
+ return oc_method;
}
return isTemplate ? oc_function_template : oc_function;
@@ -6815,17 +7830,19 @@ void MaybeEmitInheritedConstructorNote(Sema &S, FunctionDecl *Fn) {
} // end anonymous namespace
// Notes the location of an overload candidate.
-void Sema::NoteOverloadCandidate(FunctionDecl *Fn) {
+void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType) {
std::string FnDesc;
OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc);
- Diag(Fn->getLocation(), diag::note_ovl_candidate)
- << (unsigned) K << FnDesc;
+ PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
+ << (unsigned) K << FnDesc;
+ HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
+ Diag(Fn->getLocation(), PD);
MaybeEmitInheritedConstructorNote(*this, Fn);
}
//Notes the location of all overload candidates designated through
// OverloadedExpr
-void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr) {
+void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr, QualType DestType) {
assert(OverloadedExpr->getType() == Context.OverloadTy);
OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
@@ -6836,10 +7853,10 @@ void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr) {
I != IEnd; ++I) {
if (FunctionTemplateDecl *FunTmpl =
dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
- NoteOverloadCandidate(FunTmpl->getTemplatedDecl());
+ NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType);
} else if (FunctionDecl *Fun
= dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
- NoteOverloadCandidate(Fun);
+ NoteOverloadCandidate(Fun, DestType);
}
}
}
@@ -6915,12 +7932,6 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
if (CToTy.getUnqualifiedType() == CFromTy.getUnqualifiedType() &&
!CToTy.isAtLeastAsQualifiedAs(CFromTy)) {
- // It is dumb that we have to do this here.
- while (isa<ArrayType>(CFromTy))
- CFromTy = CFromTy->getAs<ArrayType>()->getElementType();
- while (isa<ArrayType>(CToTy))
- CToTy = CFromTy->getAs<ArrayType>()->getElementType();
-
Qualifiers FromQs = CFromTy.getQualifiers();
Qualifiers ToQs = CToTy.getQualifiers();
@@ -7064,9 +8075,8 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
<< (unsigned) (Cand->Fix.Kind);
// If we can fix the conversion, suggest the FixIts.
- for (SmallVector<FixItHint, 1>::iterator
- HI = Cand->Fix.Hints.begin(), HE = Cand->Fix.Hints.end();
- HI != HE; ++HI)
+ for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
+ HE = Cand->Fix.Hints.end(); HI != HE; ++HI)
FDiag << *HI;
S.Diag(Fn->getLocation(), FDiag);
@@ -7125,7 +8135,7 @@ void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
/// Diagnose a failed template-argument deduction.
void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
- Expr **Args, unsigned NumArgs) {
+ unsigned NumArgs) {
FunctionDecl *Fn = Cand->Function; // pattern
TemplateParameter Param = Cand->DeductionFailure.getTemplateParameter();
@@ -7273,7 +8283,7 @@ void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
/// more richly for those diagnostic clients that cared, but we'd
/// still have to be just as careful with the default diagnostics.
void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
- Expr **Args, unsigned NumArgs) {
+ unsigned NumArgs) {
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
@@ -7283,7 +8293,8 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
- << FnKind << FnDesc << Fn->isDeleted();
+ << FnKind << FnDesc
+ << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
MaybeEmitInheritedConstructorNote(S, Fn);
return;
}
@@ -7300,7 +8311,7 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
return DiagnoseArityMismatch(S, Cand, NumArgs);
case ovl_fail_bad_deduction:
- return DiagnoseBadDeduction(S, Cand, Args, NumArgs);
+ return DiagnoseBadDeduction(S, Cand, NumArgs);
case ovl_fail_trivial_conversion:
case ovl_fail_bad_final_conversion:
@@ -7309,7 +8320,7 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_bad_conversion: {
unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
- for (unsigned N = Cand->Conversions.size(); I != N; ++I)
+ for (unsigned N = Cand->NumConversions; I != N; ++I)
if (Cand->Conversions[I].isBad())
return DiagnoseBadConversion(S, Cand, I);
@@ -7361,12 +8372,12 @@ void NoteBuiltinOperatorCandidate(Sema &S,
const char *Opc,
SourceLocation OpLoc,
OverloadCandidate *Cand) {
- assert(Cand->Conversions.size() <= 2 && "builtin operator is not binary");
+ assert(Cand->NumConversions <= 2 && "builtin operator is not binary");
std::string TypeStr("operator");
TypeStr += Opc;
TypeStr += "(";
TypeStr += Cand->BuiltinTypes.ParamTypes[0].getAsString();
- if (Cand->Conversions.size() == 1) {
+ if (Cand->NumConversions == 1) {
TypeStr += ")";
S.Diag(OpLoc, diag::note_ovl_builtin_unary_candidate) << TypeStr;
} else {
@@ -7379,7 +8390,7 @@ void NoteBuiltinOperatorCandidate(Sema &S,
void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
OverloadCandidate *Cand) {
- unsigned NoOperands = Cand->Conversions.size();
+ unsigned NoOperands = Cand->NumConversions;
for (unsigned ArgIdx = 0; ArgIdx < NoOperands; ++ArgIdx) {
const ImplicitConversionSequence &ICS = Cand->Conversions[ArgIdx];
if (ICS.isBad()) break; // all meaningless after first invalid
@@ -7483,11 +8494,11 @@ struct CompareOverloadCandidatesForDisplay {
// If there's any ordering between the defined conversions...
// FIXME: this might not be transitive.
- assert(L->Conversions.size() == R->Conversions.size());
+ assert(L->NumConversions == R->NumConversions);
int leftBetter = 0;
unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
- for (unsigned E = L->Conversions.size(); I != E; ++I) {
+ for (unsigned E = L->NumConversions; I != E; ++I) {
switch (CompareImplicitConversionSequences(S,
L->Conversions[I],
R->Conversions[I])) {
@@ -7537,7 +8548,7 @@ struct CompareOverloadCandidatesForDisplay {
/// CompleteNonViableCandidate - Normally, overload resolution only
/// computes up to the first. Produces the FixIt set if possible.
void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
- Expr **Args, unsigned NumArgs) {
+ llvm::ArrayRef<Expr *> Args) {
assert(!Cand->Viable);
// Don't do anything on failures other than bad conversion.
@@ -7550,7 +8561,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
// Skip forward to the first bad conversion.
unsigned ConvIdx = (Cand->IgnoreObjectArgument ? 1 : 0);
- unsigned ConvCount = Cand->Conversions.size();
+ unsigned ConvCount = Cand->NumConversions;
while (true) {
assert(ConvIdx != ConvCount && "no bad conversion in candidate");
ConvIdx++;
@@ -7595,7 +8606,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
SuppressUserConversions,
/*InOverloadResolution*/ true,
/*AllowObjCWritebackConversion=*/
- S.getLangOptions().ObjCAutoRefCount);
+ S.getLangOpts().ObjCAutoRefCount);
return;
}
@@ -7608,7 +8619,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
- S.getLangOptions().ObjCAutoRefCount);
+ S.getLangOpts().ObjCAutoRefCount);
// Store the FixIt in the candidate if it exists.
if (!Unfixable && Cand->Conversions[ConvIdx].isBad())
Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
@@ -7625,7 +8636,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
/// set.
void OverloadCandidateSet::NoteCandidates(Sema &S,
OverloadCandidateDisplayKind OCD,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
const char *Opc,
SourceLocation OpLoc) {
// Sort the candidates by viability and position. Sorting directly would
@@ -7636,7 +8647,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
if (Cand->Viable)
Cands.push_back(Cand);
else if (OCD == OCD_AllCandidates) {
- CompleteNonViableCandidate(S, Cand, Args, NumArgs);
+ CompleteNonViableCandidate(S, Cand, Args);
if (Cand->Function || Cand->IsSurrogate)
Cands.push_back(Cand);
// Otherwise, this a non-viable builtin candidate. We do not, in general,
@@ -7665,7 +8676,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S,
++CandsShown;
if (Cand->Function)
- NoteFunctionCandidate(S, Cand, Args, NumArgs);
+ NoteFunctionCandidate(S, Cand, Args.size());
else if (Cand->IsSurrogate)
NoteSurrogateCandidate(S, Cand);
else {
@@ -7854,7 +8865,7 @@ private:
return false;
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
- if (S.getLangOptions().CUDA)
+ if (S.getLangOpts().CUDA)
if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
if (S.CheckCUDATarget(Caller, FunDecl))
return false;
@@ -7932,7 +8943,7 @@ private:
<< Matches[0].second->getDeclName(),
S.PDiag(diag::note_ovl_candidate)
<< (unsigned) oc_function_template,
- Complain);
+ Complain, TargetFunctionType);
if (Result != MatchesCopy.end()) {
// Make it the first and only element
@@ -7961,7 +8972,7 @@ public:
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
<< OvlExpr->getName() << TargetFunctionType
<< OvlExpr->getSourceRange();
- S.NoteAllOverloadCandidates(OvlExpr);
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
}
bool IsInvalidFormOfPointerToMemberFunction() const {
@@ -7987,9 +8998,11 @@ public:
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
<< OvlExpr->getName()
<< OvlExpr->getSourceRange();
- S.NoteAllOverloadCandidates(OvlExpr);
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
}
-
+
+ bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
+
int getNumMatches() const { return Matches.size(); }
FunctionDecl* getMatchingFunctionDecl() const {
@@ -8019,16 +9032,18 @@ public:
/// resolved, and NULL otherwise. When @p Complain is true, this
/// routine will emit diagnostics if there is an error.
FunctionDecl *
-Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType,
- bool Complain,
- DeclAccessPair &FoundResult) {
-
+Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
+ QualType TargetType,
+ bool Complain,
+ DeclAccessPair &FoundResult,
+ bool *pHadMultipleCandidates) {
assert(AddressOfExpr->getType() == Context.OverloadTy);
-
- AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType, Complain);
+
+ AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType,
+ Complain);
int NumMatches = Resolver.getNumMatches();
FunctionDecl* Fn = 0;
- if ( NumMatches == 0 && Complain) {
+ if (NumMatches == 0 && Complain) {
if (Resolver.IsInvalidFormOfPointerToMemberFunction())
Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
else
@@ -8040,11 +9055,13 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetTyp
Fn = Resolver.getMatchingFunctionDecl();
assert(Fn);
FoundResult = *Resolver.getMatchingFunctionAccessPair();
- MarkDeclarationReferenced(AddressOfExpr->getLocStart(), Fn);
+ MarkFunctionReferenced(AddressOfExpr->getLocStart(), Fn);
if (Complain)
CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
}
-
+
+ if (pHadMultipleCandidates)
+ *pHadMultipleCandidates = Resolver.hadMultipleCandidates();
return Fn;
}
@@ -8146,7 +9163,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult SingleFunctionExpression;
if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization(
ovl.Expression, /*complain*/ false, &found)) {
- if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getSourceRange().getBegin())) {
+ if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) {
SrcExpr = ExprError();
return true;
}
@@ -8212,7 +9229,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
static void AddOverloadedCallCandidate(Sema &S,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading,
bool KnownValid) {
@@ -8225,16 +9242,15 @@ static void AddOverloadedCallCandidate(Sema &S,
assert(!KnownValid && "Explicit template arguments?");
return;
}
- S.AddOverloadCandidate(Func, FoundDecl, Args, NumArgs, CandidateSet,
- false, PartialOverloading);
+ S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet, false,
+ PartialOverloading);
return;
}
if (FunctionTemplateDecl *FuncTemplate
= dyn_cast<FunctionTemplateDecl>(Callee)) {
S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl,
- ExplicitTemplateArgs,
- Args, NumArgs, CandidateSet);
+ ExplicitTemplateArgs, Args, CandidateSet);
return;
}
@@ -8244,7 +9260,7 @@ static void AddOverloadedCallCandidate(Sema &S,
/// \brief Add the overload candidates named by callee and/or found by argument
/// dependent lookup to the given overload set.
void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading) {
@@ -8287,16 +9303,15 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
E = ULE->decls_end(); I != E; ++I)
- AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs,
- Args, NumArgs, CandidateSet,
- PartialOverloading, /*KnownValid*/ true);
+ AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args,
+ CandidateSet, PartialOverloading,
+ /*KnownValid*/ true);
if (ULE->requiresADL())
AddArgumentDependentLookupCandidates(ULE->getName(), /*Operator*/ false,
- Args, NumArgs,
- ExplicitTemplateArgs,
- CandidateSet,
- PartialOverloading,
+ ULE->getExprLoc(),
+ Args, ExplicitTemplateArgs,
+ CandidateSet, PartialOverloading,
ULE->isStdAssociatedNamespace());
}
@@ -8310,11 +9325,14 @@ static bool
DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
const CXXScopeSpec &SS, LookupResult &R,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs) {
+ llvm::ArrayRef<Expr *> Args) {
if (SemaRef.ActiveTemplateInstantiations.empty() || !SS.isEmpty())
return false;
for (DeclContext *DC = SemaRef.CurContext; DC; DC = DC->getParent()) {
+ if (DC->isTransparentContext())
+ continue;
+
SemaRef.LookupQualifiedName(R, DC);
if (!R.empty()) {
@@ -8330,7 +9348,7 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
OverloadCandidateSet Candidates(FnLoc);
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
AddOverloadedCallCandidate(SemaRef, I.getPair(),
- ExplicitTemplateArgs, Args, NumArgs,
+ ExplicitTemplateArgs, Args,
Candidates, false, /*KnownValid*/ false);
OverloadCandidateSet::iterator Best;
@@ -8345,7 +9363,7 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
// declaring the function there instead.
Sema::AssociatedNamespaceSet AssociatedNamespaces;
Sema::AssociatedClassSet AssociatedClasses;
- SemaRef.FindAssociatedClassesAndNamespaces(Args, NumArgs,
+ SemaRef.FindAssociatedClassesAndNamespaces(Args,
AssociatedNamespaces,
AssociatedClasses);
// Never suggest declaring a function within namespace 'std'.
@@ -8399,12 +9417,75 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
static bool
DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op,
SourceLocation OpLoc,
- Expr **Args, unsigned NumArgs) {
+ llvm::ArrayRef<Expr *> Args) {
DeclarationName OpName =
SemaRef.Context.DeclarationNames.getCXXOperatorName(Op);
LookupResult R(SemaRef, OpName, OpLoc, Sema::LookupOperatorName);
return DiagnoseTwoPhaseLookup(SemaRef, OpLoc, CXXScopeSpec(), R,
- /*ExplicitTemplateArgs=*/0, Args, NumArgs);
+ /*ExplicitTemplateArgs=*/0, Args);
+}
+
+namespace {
+// Callback to limit the allowed keywords and to only accept typo corrections
+// that are keywords or whose decls refer to functions (or template functions)
+// that accept the given number of arguments.
+class RecoveryCallCCC : public CorrectionCandidateCallback {
+ public:
+ RecoveryCallCCC(Sema &SemaRef, unsigned NumArgs, bool HasExplicitTemplateArgs)
+ : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs) {
+ WantTypeSpecifiers = SemaRef.getLangOpts().CPlusPlus;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (!candidate.getCorrectionDecl())
+ return candidate.isKeyword();
+
+ for (TypoCorrection::const_decl_iterator DI = candidate.begin(),
+ DIEnd = candidate.end(); DI != DIEnd; ++DI) {
+ FunctionDecl *FD = 0;
+ NamedDecl *ND = (*DI)->getUnderlyingDecl();
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ if (!HasExplicitTemplateArgs && !FD) {
+ if (!(FD = dyn_cast<FunctionDecl>(ND)) && isa<ValueDecl>(ND)) {
+ // If the Decl is neither a function nor a template function,
+ // determine if it is a pointer or reference to a function. If so,
+ // check against the number of arguments expected for the pointee.
+ QualType ValType = cast<ValueDecl>(ND)->getType();
+ if (ValType->isAnyPointerType() || ValType->isReferenceType())
+ ValType = ValType->getPointeeType();
+ if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>())
+ if (FPT->getNumArgs() == NumArgs)
+ return true;
+ }
+ }
+ if (FD && FD->getNumParams() >= NumArgs &&
+ FD->getMinRequiredArguments() <= NumArgs)
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ unsigned NumArgs;
+ bool HasExplicitTemplateArgs;
+};
+
+// Callback that effectively disabled typo correction
+class NoTypoCorrectionCCC : public CorrectionCandidateCallback {
+ public:
+ NoTypoCorrectionCCC() {
+ WantTypeSpecifiers = false;
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return false;
+ }
+};
}
/// Attempts to recover from a call where no functions were found.
@@ -8414,12 +9495,13 @@ static ExprResult
BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
- Expr **Args, unsigned NumArgs,
+ llvm::MutableArrayRef<Expr *> Args,
SourceLocation RParenLoc,
- bool EmptyLookup) {
+ bool EmptyLookup, bool AllowTypoCorrection) {
CXXScopeSpec SS;
SS.Adopt(ULE->getQualifierLoc());
+ SourceLocation TemplateKWLoc = ULE->getTemplateKeywordLoc();
TemplateArgumentListInfo TABuffer;
TemplateArgumentListInfo *ExplicitTemplateArgs = 0;
@@ -8430,11 +9512,16 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
Sema::LookupOrdinaryName);
+ RecoveryCallCCC Validator(SemaRef, Args.size(), ExplicitTemplateArgs != 0);
+ NoTypoCorrectionCCC RejectAll;
+ CorrectionCandidateCallback *CCC = AllowTypoCorrection ?
+ (CorrectionCandidateCallback*)&Validator :
+ (CorrectionCandidateCallback*)&RejectAll;
if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
- ExplicitTemplateArgs, Args, NumArgs) &&
+ ExplicitTemplateArgs, Args) &&
(!EmptyLookup ||
- SemaRef.DiagnoseEmptyLookup(S, SS, R, Sema::CTC_Expression,
- ExplicitTemplateArgs, Args, NumArgs)))
+ SemaRef.DiagnoseEmptyLookup(S, SS, R, *CCC,
+ ExplicitTemplateArgs, Args)))
return ExprError();
assert(!R.empty() && "lookup results empty despite recovery");
@@ -8443,10 +9530,11 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// casts and such from the call, we don't really care.
ExprResult NewFn = ExprError();
if ((*R.begin())->isCXXClassMember())
- NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, R,
- ExplicitTemplateArgs);
- else if (ExplicitTemplateArgs)
- NewFn = SemaRef.BuildTemplateIdExpr(SS, R, false, *ExplicitTemplateArgs);
+ NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
+ R, ExplicitTemplateArgs);
+ else if (ExplicitTemplateArgs || TemplateKWLoc.isValid())
+ NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false,
+ ExplicitTemplateArgs);
else
NewFn = SemaRef.BuildDeclarationNameExpr(SS, R, false);
@@ -8457,7 +9545,8 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// an expression with viable lookup results, which should never
// end up here.
return SemaRef.ActOnCallExpr(/*Scope*/ 0, NewFn.take(), LParenLoc,
- MultiExprArg(Args, NumArgs), RParenLoc);
+ MultiExprArg(Args.data(), Args.size()),
+ RParenLoc);
}
/// ResolveOverloadedCallFn - Given the call expression that calls Fn
@@ -8472,7 +9561,8 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
- Expr *ExecConfig) {
+ Expr *ExecConfig,
+ bool AllowTypoCorrection) {
#ifndef NDEBUG
if (ULE->requiresADL()) {
// To do ADL, we must have found an unqualified name.
@@ -8487,17 +9577,22 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
llvm_unreachable("performing ADL for builtin");
// We don't perform ADL in C.
- assert(getLangOptions().CPlusPlus && "ADL enabled in C");
+ assert(getLangOpts().CPlusPlus && "ADL enabled in C");
} else
assert(!ULE->isStdAssociatedNamespace() &&
"std is associated namespace but not doing ADL");
#endif
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
+
OverloadCandidateSet CandidateSet(Fn->getExprLoc());
// Add the functions denoted by the callee to the set of candidate
// functions, including those from argument-dependent lookup.
- AddOverloadedCallCandidates(ULE, Args, NumArgs, CandidateSet);
+ AddOverloadedCallCandidates(ULE, llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet);
// If we found nothing, try to recover.
// BuildRecoveryCallExpr diagnoses the error itself, so we just bail
@@ -8507,26 +9602,29 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
// create a type dependent CallExpr. The goal is to postpone name lookup
// to instantiation time to be able to search into type dependent base
// classes.
- if (getLangOptions().MicrosoftExt && CurContext->isDependentContext() &&
- isa<CXXMethodDecl>(CurContext)) {
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+ (isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
CallExpr *CE = new (Context) CallExpr(Context, Fn, Args, NumArgs,
Context.DependentTy, VK_RValue,
RParenLoc);
CE->setTypeDependent(true);
return Owned(CE);
}
- return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc, Args, NumArgs,
- RParenLoc, /*EmptyLookup=*/true);
+ return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
+ llvm::MutableArrayRef<Expr *>(Args, NumArgs),
+ RParenLoc, /*EmptyLookup=*/true,
+ AllowTypoCorrection);
}
+ UnbridgedCasts.restore();
+
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best)) {
case OR_Success: {
FunctionDecl *FDecl = Best->Function;
- MarkDeclarationReferenced(Fn->getExprLoc(), FDecl);
+ MarkFunctionReferenced(Fn->getExprLoc(), FDecl);
CheckUnresolvedLookupAccess(ULE, Best->FoundDecl);
- DiagnoseUseOfDecl(FDecl? FDecl : Best->FoundDecl.getDecl(),
- ULE->getNameLoc());
+ DiagnoseUseOfDecl(FDecl, ULE->getNameLoc());
Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc,
ExecConfig);
@@ -8536,34 +9634,45 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
// Try to recover by looking for viable functions which the user might
// have meant to call.
ExprResult Recovery = BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
- Args, NumArgs, RParenLoc,
- /*EmptyLookup=*/false);
+ llvm::MutableArrayRef<Expr *>(Args, NumArgs),
+ RParenLoc,
+ /*EmptyLookup=*/false,
+ AllowTypoCorrection);
if (!Recovery.isInvalid())
return Recovery;
- Diag(Fn->getSourceRange().getBegin(),
+ Diag(Fn->getLocStart(),
diag::err_ovl_no_viable_function_in_call)
<< ULE->getName() << Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
}
case OR_Ambiguous:
- Diag(Fn->getSourceRange().getBegin(), diag::err_ovl_ambiguous_call)
+ Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
<< ULE->getName() << Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_Deleted:
{
- Diag(Fn->getSourceRange().getBegin(), diag::err_ovl_deleted_call)
+ Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
<< Best->Function->isDeleted()
<< ULE->getName()
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Fn->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+
+ // We emitted an error for the unvailable/deleted function call but keep
+ // the call in the AST.
+ FunctionDecl *FDecl = Best->Function;
+ Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
+ return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig);
}
- break;
}
// Overload resolution failed.
@@ -8603,12 +9712,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// TODO: provide better source location info.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
- if (Input->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(Input);
- if (Result.isInvalid())
- return ExprError();
- Input = Result.take();
- }
+ if (checkPlaceholderForOverload(*this, Input))
+ return ExprError();
Expr *Args[2] = { Input, 0 };
unsigned NumArgs = 1;
@@ -8648,14 +9753,15 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
OverloadCandidateSet CandidateSet(OpLoc);
// Add the candidates from the given function set.
- AddFunctionCandidates(Fns, &Args[0], NumArgs, CandidateSet, false);
+ AddFunctionCandidates(Fns, llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ false);
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, &Args[0], NumArgs, CandidateSet);
// Add candidates from ADL.
AddArgumentDependentLookupCandidates(OpName, /*Operator*/ true,
- Args, NumArgs,
+ OpLoc, llvm::makeArrayRef(Args, NumArgs),
/*ExplicitTemplateArgs*/ 0,
CandidateSet);
@@ -8675,7 +9781,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// We matched an overloaded operator. Build a call to that
// operator.
- MarkDeclarationReferenced(OpLoc, FnDecl);
+ MarkFunctionReferenced(OpLoc, FnDecl);
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
@@ -8709,7 +9815,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// Build the actual expression node.
ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
- HadMultipleCandidates);
+ HadMultipleCandidates, OpLoc);
if (FnExpr.isInvalid())
return ExprError();
@@ -8741,7 +9847,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
// This is an erroneous use of an operator which can be overloaded by
// a non-member function. Check for non-member operators which were
// defined too late to be candidates.
- if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args, NumArgs))
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc,
+ llvm::makeArrayRef(Args, NumArgs)))
// FIXME: Recover by calling the found function.
return ExprError();
@@ -8754,7 +9861,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
<< UnaryOperator::getOpcodeStr(Opc)
<< Input->getType()
<< Input->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, NumArgs,
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs),
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
@@ -8764,7 +9872,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
<< UnaryOperator::getOpcodeStr(Opc)
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Input->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs,
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs),
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
@@ -8841,45 +9950,15 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
OpLoc));
}
- // Always do property rvalue conversions on the RHS.
- if (Args[1]->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(Args[1]);
- if (Result.isInvalid())
- return ExprError();
- Args[1] = Result.take();
- }
-
- // The LHS is more complicated.
- if (Args[0]->getObjectKind() == OK_ObjCProperty) {
-
- // There's a tension for assignment operators between primitive
- // property assignment and the overloaded operators.
- if (BinaryOperator::isAssignmentOp(Opc)) {
- const ObjCPropertyRefExpr *PRE = LHS->getObjCProperty();
-
- // Is the property "logically" settable?
- bool Settable = (PRE->isExplicitProperty() ||
- PRE->getImplicitPropertySetter());
-
- // To avoid gratuitously inventing semantics, use the primitive
- // unless it isn't. Thoughts in case we ever really care:
- // - If the property isn't logically settable, we have to
- // load and hope.
- // - If the property is settable and this is simple assignment,
- // we really should use the primitive.
- // - If the property is settable, then we could try overloading
- // on a generic lvalue of the appropriate type; if it works
- // out to a builtin candidate, we would do that same operation
- // on the property, and otherwise just error.
- if (Settable)
- return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
- }
+ // Always do placeholder-like conversions on the RHS.
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
- ExprResult Result = ConvertPropertyForRValue(Args[0]);
- if (Result.isInvalid())
- return ExprError();
- Args[0] = Result.take();
- }
+ // Do placeholder-like conversion on the LHS; note that we should
+ // not get here with a PseudoObject LHS.
+ assert(Args[0]->getObjectKind() != OK_ObjCProperty);
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
// If this is the assignment operator, we only perform overload resolution
// if the left-hand side is a class or enumeration type. This is actually
@@ -8899,14 +9978,14 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
OverloadCandidateSet CandidateSet(OpLoc);
// Add the candidates from the given function set.
- AddFunctionCandidates(Fns, Args, 2, CandidateSet, false);
+ AddFunctionCandidates(Fns, Args, CandidateSet, false);
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, Args, 2, CandidateSet);
// Add candidates from ADL.
AddArgumentDependentLookupCandidates(OpName, /*Operator*/ true,
- Args, 2,
+ OpLoc, Args,
/*ExplicitTemplateArgs*/ 0,
CandidateSet);
@@ -8926,7 +10005,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// We matched an overloaded operator. Build a call to that
// operator.
- MarkDeclarationReferenced(OpLoc, FnDecl);
+ MarkFunctionReferenced(OpLoc, FnDecl);
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
@@ -9032,7 +10111,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// This is an erroneous use of an operator which can be overloaded by
// a non-member function. Check for non-member operators which were
// defined too late to be candidates.
- if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args, 2))
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args))
// FIXME: Recover by calling the found function.
return ExprError();
@@ -9043,7 +10122,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
assert(Result.isInvalid() &&
"C++ binary operator overloading is missing candidates!");
if (Result.isInvalid())
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, 2,
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return move(Result);
}
@@ -9053,17 +10132,32 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
<< BinaryOperator::getOpcodeStr(Opc)
<< Args[0]->getType() << Args[1]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, 2,
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
case OR_Deleted:
- Diag(OpLoc, diag::err_ovl_deleted_oper)
- << Best->Function->isDeleted()
- << BinaryOperator::getOpcodeStr(Opc)
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, 2,
+ if (isImplicitlyDeleted(Best->Function)) {
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ Diag(OpLoc, diag::err_ovl_deleted_special_oper)
+ << getSpecialMember(Method)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function);
+
+ if (getSpecialMember(Method) != CXXInvalid) {
+ // The user probably meant to call this special member. Just
+ // explain why it's deleted.
+ NoteDeletedFunction(Method);
+ return ExprError();
+ }
+ } else {
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << BinaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ }
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
@@ -9103,18 +10197,11 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
RLoc));
}
- if (Args[0]->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(Args[0]);
- if (Result.isInvalid())
- return ExprError();
- Args[0] = Result.take();
- }
- if (Args[1]->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(Args[1]);
- if (Result.isInvalid())
- return ExprError();
- Args[1] = Result.take();
- }
+ // Handle placeholders on both operands.
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
// Build an empty overload set.
OverloadCandidateSet CandidateSet(LLoc);
@@ -9140,7 +10227,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// We matched an overloaded operator. Build a call to that
// operator.
- MarkDeclarationReferenced(LLoc, FnDecl);
+ MarkFunctionReferenced(LLoc, FnDecl);
CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
DiagnoseUseOfDecl(Best->FoundDecl, LLoc);
@@ -9172,12 +10259,12 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ResultTy = ResultTy.getNonLValueExprType(Context);
// Build the actual expression node.
- DeclarationNameLoc LocInfo;
- LocInfo.CXXOperatorName.BeginOpNameLoc = LLoc.getRawEncoding();
- LocInfo.CXXOperatorName.EndOpNameLoc = RLoc.getRawEncoding();
+ DeclarationNameInfo OpLocInfo(OpName, LLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
HadMultipleCandidates,
- LLoc, LocInfo);
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
if (FnExpr.isInvalid())
return ExprError();
@@ -9222,7 +10309,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
Diag(LLoc, diag::err_ovl_no_viable_subscript)
<< Args[0]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, 2,
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
"[]", LLoc);
return ExprError();
}
@@ -9232,7 +10319,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
<< "[]"
<< Args[0]->getType() << Args[1]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, 2,
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
"[]", LLoc);
return ExprError();
@@ -9241,7 +10328,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
<< Best->Function->isDeleted() << "[]"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, 2,
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
"[]", LLoc);
return ExprError();
}
@@ -9305,7 +10392,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
resultType, valueKind, RParenLoc);
if (CheckCallReturnType(proto->getResultType(),
- op->getRHS()->getSourceRange().getBegin(),
+ op->getRHS()->getLocStart(),
call, 0))
return ExprError();
@@ -9315,6 +10402,10 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
return MaybeBindToTemporary(call);
}
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
+
MemberExpr *MemExpr;
CXXMethodDecl *Method = 0;
DeclAccessPair FoundDecl = DeclAccessPair::make(0, AS_public);
@@ -9324,6 +10415,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
FoundDecl = MemExpr->getFoundDecl();
Qualifier = MemExpr->getQualifier();
+ UnbridgedCasts.restore();
} else {
UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
Qualifier = UnresExpr->getQualifier();
@@ -9353,9 +10445,9 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Microsoft supports direct constructor calls.
- if (getLangOptions().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
- AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(), Args, NumArgs,
- CandidateSet);
+ if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
+ AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(),
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet);
} else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
// If explicit template arguments were provided, we can't call a
// non-template member function.
@@ -9364,25 +10456,28 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
ObjectClassification,
- Args, NumArgs, CandidateSet,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
/*SuppressUserConversions=*/false);
} else {
AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func),
I.getPair(), ActingDC, TemplateArgs,
ObjectType, ObjectClassification,
- Args, NumArgs, CandidateSet,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet,
/*SuppressUsedConversions=*/false);
}
}
DeclarationName DeclName = UnresExpr->getMemberName();
+ UnbridgedCasts.restore();
+
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
Best)) {
case OR_Success:
Method = cast<CXXMethodDecl>(Best->Function);
- MarkDeclarationReferenced(UnresExpr->getMemberLoc(), Method);
+ MarkFunctionReferenced(UnresExpr->getMemberLoc(), Method);
FoundDecl = Best->FoundDecl;
CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc());
@@ -9392,14 +10487,16 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
Diag(UnresExpr->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< DeclName << MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
// FIXME: Leaking incoming expressions!
return ExprError();
case OR_Ambiguous:
Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call)
<< DeclName << MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
// FIXME: Leaking incoming expressions!
return ExprError();
@@ -9409,7 +10506,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
<< DeclName
<< getDeletedOrUnavailableSuffix(Best->Function)
<< MemExprE->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
// FIXME: Leaking incoming expressions!
return ExprError();
}
@@ -9459,6 +10557,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
RParenLoc))
return ExprError();
+ DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
+
if (CheckFunctionCall(Method, TheCall))
return ExprError();
@@ -9488,12 +10588,13 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc) {
+ if (checkPlaceholderForOverload(*this, Obj))
+ return ExprError();
ExprResult Object = Owned(Obj);
- if (Object.get()->getObjectKind() == OK_ObjCProperty) {
- Object = ConvertPropertyForRValue(Object.take());
- if (Object.isInvalid())
- return ExprError();
- }
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
assert(Object.get()->getType()->isRecordType() && "Requires object type argument");
const RecordType *Record = Object.get()->getType()->getAs<RecordType>();
@@ -9566,7 +10667,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>())
{
AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto,
- Object.get(), Args, NumArgs, CandidateSet);
+ Object.get(), llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet);
}
}
}
@@ -9584,37 +10686,42 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
case OR_No_Viable_Function:
if (CandidateSet.empty())
- Diag(Object.get()->getSourceRange().getBegin(), diag::err_ovl_no_oper)
+ Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper)
<< Object.get()->getType() << /*call*/ 1
<< Object.get()->getSourceRange();
else
- Diag(Object.get()->getSourceRange().getBegin(),
+ Diag(Object.get()->getLocStart(),
diag::err_ovl_no_viable_object_call)
<< Object.get()->getType() << Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_Ambiguous:
- Diag(Object.get()->getSourceRange().getBegin(),
+ Diag(Object.get()->getLocStart(),
diag::err_ovl_ambiguous_object_call)
<< Object.get()->getType() << Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
case OR_Deleted:
- Diag(Object.get()->getSourceRange().getBegin(),
+ Diag(Object.get()->getLocStart(),
diag::err_ovl_deleted_object_call)
<< Best->Function->isDeleted()
<< Object.get()->getType()
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Object.get()->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, NumArgs);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
break;
}
if (Best == CandidateSet.end())
return true;
+ UnbridgedCasts.restore();
+
if (Best->Function == 0) {
// Since there is no function declaration, this is one of the
// surrogate candidates. Dig out the conversion function.
@@ -9635,12 +10742,16 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Conv, HadMultipleCandidates);
if (Call.isInvalid())
return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Call = Owned(ImplicitCastExpr::Create(Context, Call.get()->getType(),
+ CK_UserDefinedConversion,
+ Call.get(), 0, VK_RValue));
return ActOnCallExpr(S, Call.get(), LParenLoc, MultiExprArg(Args, NumArgs),
RParenLoc);
}
- MarkDeclarationReferenced(LParenLoc, Best->Function);
+ MarkFunctionReferenced(LParenLoc, Best->Function);
CheckMemberOperatorAccess(LParenLoc, Object.get(), 0, Best->FoundDecl);
DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc);
@@ -9668,8 +10779,13 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
MethodArgs[ArgIdx + 1] = Args[ArgIdx];
+ DeclarationNameInfo OpLocInfo(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LParenLoc, RParenLoc));
ExprResult NewFn = CreateFunctionRefExpr(*this, Method,
- HadMultipleCandidates);
+ HadMultipleCandidates,
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
if (NewFn.isInvalid())
return true;
@@ -9750,6 +10866,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (IsError) return true;
+ DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
+
if (CheckFunctionCall(Method, TheCall))
return true;
@@ -9764,12 +10882,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
assert(Base->getType()->isRecordType() &&
"left-hand side must have class type");
- if (Base->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(Base);
- if (Result.isInvalid())
- return ExprError();
- Base = Result.take();
- }
+ if (checkPlaceholderForOverload(*this, Base))
+ return ExprError();
SourceLocation Loc = Base->getExprLoc();
@@ -9815,13 +10929,13 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
else
Diag(OpLoc, diag::err_ovl_no_viable_oper)
<< "operator->" << Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, &Base, 1);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
return ExprError();
case OR_Ambiguous:
Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
<< "->" << Base->getType() << Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, &Base, 1);
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base);
return ExprError();
case OR_Deleted:
@@ -9830,11 +10944,11 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
<< "->"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Base->getSourceRange();
- CandidateSet.NoteCandidates(*this, OCD_AllCandidates, &Base, 1);
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
return ExprError();
}
- MarkDeclarationReferenced(OpLoc, Best->Function);
+ MarkFunctionReferenced(OpLoc, Best->Function);
CheckMemberOperatorAccess(OpLoc, Base, 0, Best->FoundDecl);
DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
@@ -9849,7 +10963,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
// Build the operator call.
ExprResult FnExpr = CreateFunctionRefExpr(*this, Method,
- HadMultipleCandidates);
+ HadMultipleCandidates, OpLoc);
if (FnExpr.isInvalid())
return ExprError();
@@ -9867,6 +10981,80 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
return MaybeBindToTemporary(TheCall);
}
+/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
+/// a literal operator described by the provided lookup results.
+ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
+ DeclarationNameInfo &SuffixInfo,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc,
+ TemplateArgumentListInfo *TemplateArgs) {
+ SourceLocation UDSuffixLoc = SuffixInfo.getCXXLiteralOperatorNameLoc();
+
+ OverloadCandidateSet CandidateSet(UDSuffixLoc);
+ AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, true,
+ TemplateArgs);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution. This will usually be trivial, but might need
+ // to perform substitutions for a literal operator template.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, UDSuffixLoc, Best)) {
+ case OR_Success:
+ case OR_Deleted:
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ return ExprError();
+ }
+
+ FunctionDecl *FD = Best->Function;
+ MarkFunctionReferenced(UDSuffixLoc, FD);
+ DiagnoseUseOfDecl(Best->FoundDecl, UDSuffixLoc);
+
+ ExprResult Fn = CreateFunctionRefExpr(*this, FD, HadMultipleCandidates,
+ SuffixInfo.getLoc(),
+ SuffixInfo.getInfo());
+ if (Fn.isInvalid())
+ return true;
+
+ // Check the argument types. This should almost always be a no-op, except
+ // that array-to-pointer decay is applied to string literals.
+ Expr *ConvArgs[2];
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ ExprResult InputInit = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context, FD->getParamDecl(ArgIdx)),
+ SourceLocation(), Args[ArgIdx]);
+ if (InputInit.isInvalid())
+ return true;
+ ConvArgs[ArgIdx] = InputInit.take();
+ }
+
+ QualType ResultTy = FD->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ UserDefinedLiteral *UDL =
+ new (Context) UserDefinedLiteral(Context, Fn.take(), ConvArgs, Args.size(),
+ ResultTy, VK, LitEndLoc, UDSuffixLoc);
+
+ if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD))
+ return ExprError();
+
+ if (CheckFunctionCall(FD, UDL))
+ return ExprError();
+
+ return MaybeBindToTemporary(UDL);
+}
+
/// FixOverloadedFunctionReference - E is an expression that refers to
/// a C++ overloaded function (possibly with some parentheses and
/// perhaps a '&' around it). We have resolved the overloaded function
@@ -9954,7 +11142,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
DeclRefExpr *DRE = DeclRefExpr::Create(Context,
ULE->getQualifierLoc(),
+ ULE->getTemplateKeywordLoc(),
Fn,
+ /*enclosing*/ false, // FIXME?
ULE->getNameLoc(),
Fn->getType(),
VK_LValue,
@@ -9980,7 +11170,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (cast<CXXMethodDecl>(Fn)->isStatic()) {
DeclRefExpr *DRE = DeclRefExpr::Create(Context,
MemExpr->getQualifierLoc(),
+ MemExpr->getTemplateKeywordLoc(),
Fn,
+ /*enclosing*/ false,
MemExpr->getMemberLoc(),
Fn->getType(),
VK_LValue,
@@ -9992,6 +11184,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
SourceLocation Loc = MemExpr->getMemberLoc();
if (MemExpr->getQualifier())
Loc = MemExpr->getQualifierLoc().getBeginLoc();
+ CheckCXXThisCapture(Loc);
Base = new (Context) CXXThisExpr(Loc,
MemExpr->getBaseType(),
/*isImplicit=*/true);
@@ -10012,6 +11205,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
MemberExpr *ME = MemberExpr::Create(Context, Base,
MemExpr->isArrow(),
MemExpr->getQualifierLoc(),
+ MemExpr->getTemplateKeywordLoc(),
Fn,
Found,
MemExpr->getMemberNameInfo(),
@@ -10022,7 +11216,6 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
}
llvm_unreachable("Invalid reference to overloaded function");
- return E;
}
ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
new file mode 100644
index 0000000..d52c912
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
@@ -0,0 +1,1351 @@
+//===--- SemaPseudoObject.cpp - Semantic Analysis for Pseudo-Objects ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions involving
+// pseudo-object references. Pseudo-objects are conceptual objects
+// whose storage is entirely abstract and all accesses to which are
+// translated through some sort of abstraction barrier.
+//
+// For example, Objective-C objects can have "properties", either
+// declared or undeclared. A property may be accessed by writing
+// expr.prop
+// where 'expr' is an r-value of Objective-C pointer type and 'prop'
+// is the name of the property. If this expression is used in a context
+// needing an r-value, it is treated as if it were a message-send
+// of the associated 'getter' selector, typically:
+// [expr prop]
+// If it is used as the LHS of a simple assignment, it is treated
+// as a message-send of the associated 'setter' selector, typically:
+// [expr setProp: RHS]
+// If it is used as the LHS of a compound assignment, or the operand
+// of a unary increment or decrement, both are required; for example,
+// 'expr.prop *= 100' would be translated to:
+// [expr setProp: [expr prop] * 100]
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ // Basically just a very focused copy of TreeTransform.
+ template <class T> struct Rebuilder {
+ Sema &S;
+ Rebuilder(Sema &S) : S(S) {}
+
+ T &getDerived() { return static_cast<T&>(*this); }
+
+ Expr *rebuild(Expr *e) {
+ // Fast path: nothing to look through.
+ if (typename T::specific_type *specific
+ = dyn_cast<typename T::specific_type>(e))
+ return getDerived().rebuildSpecific(specific);
+
+ // Otherwise, we should look through and rebuild anything that
+ // IgnoreParens would.
+
+ if (ParenExpr *parens = dyn_cast<ParenExpr>(e)) {
+ e = rebuild(parens->getSubExpr());
+ return new (S.Context) ParenExpr(parens->getLParen(),
+ parens->getRParen(),
+ e);
+ }
+
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ assert(uop->getOpcode() == UO_Extension);
+ e = rebuild(uop->getSubExpr());
+ return new (S.Context) UnaryOperator(e, uop->getOpcode(),
+ uop->getType(),
+ uop->getValueKind(),
+ uop->getObjectKind(),
+ uop->getOperatorLoc());
+ }
+
+ if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+ unsigned resultIndex = gse->getResultIndex();
+ unsigned numAssocs = gse->getNumAssocs();
+
+ SmallVector<Expr*, 8> assocs(numAssocs);
+ SmallVector<TypeSourceInfo*, 8> assocTypes(numAssocs);
+
+ for (unsigned i = 0; i != numAssocs; ++i) {
+ Expr *assoc = gse->getAssocExpr(i);
+ if (i == resultIndex) assoc = rebuild(assoc);
+ assocs[i] = assoc;
+ assocTypes[i] = gse->getAssocTypeSourceInfo(i);
+ }
+
+ return new (S.Context) GenericSelectionExpr(S.Context,
+ gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ assocTypes.data(),
+ assocs.data(),
+ numAssocs,
+ gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ resultIndex);
+ }
+
+ llvm_unreachable("bad expression to rebuild!");
+ }
+ };
+
+ struct ObjCPropertyRefRebuilder : Rebuilder<ObjCPropertyRefRebuilder> {
+ Expr *NewBase;
+ ObjCPropertyRefRebuilder(Sema &S, Expr *newBase)
+ : Rebuilder<ObjCPropertyRefRebuilder>(S), NewBase(newBase) {}
+
+ typedef ObjCPropertyRefExpr specific_type;
+ Expr *rebuildSpecific(ObjCPropertyRefExpr *refExpr) {
+ // Fortunately, the constraint that we're rebuilding something
+ // with a base limits the number of cases here.
+ assert(refExpr->getBase());
+
+ if (refExpr->isExplicitProperty()) {
+ return new (S.Context)
+ ObjCPropertyRefExpr(refExpr->getExplicitProperty(),
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(), refExpr->getLocation(),
+ NewBase);
+ }
+ return new (S.Context)
+ ObjCPropertyRefExpr(refExpr->getImplicitPropertyGetter(),
+ refExpr->getImplicitPropertySetter(),
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(),refExpr->getLocation(),
+ NewBase);
+ }
+ };
+
+ struct ObjCSubscriptRefRebuilder : Rebuilder<ObjCSubscriptRefRebuilder> {
+ Expr *NewBase;
+ Expr *NewKeyExpr;
+ ObjCSubscriptRefRebuilder(Sema &S, Expr *newBase, Expr *newKeyExpr)
+ : Rebuilder<ObjCSubscriptRefRebuilder>(S),
+ NewBase(newBase), NewKeyExpr(newKeyExpr) {}
+
+ typedef ObjCSubscriptRefExpr specific_type;
+ Expr *rebuildSpecific(ObjCSubscriptRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
+ assert(refExpr->getKeyExpr());
+
+ return new (S.Context)
+ ObjCSubscriptRefExpr(NewBase,
+ NewKeyExpr,
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(),refExpr->getAtIndexMethodDecl(),
+ refExpr->setAtIndexMethodDecl(),
+ refExpr->getRBracket());
+ }
+ };
+
+ class PseudoOpBuilder {
+ public:
+ Sema &S;
+ unsigned ResultIndex;
+ SourceLocation GenericLoc;
+ SmallVector<Expr *, 4> Semantics;
+
+ PseudoOpBuilder(Sema &S, SourceLocation genericLoc)
+ : S(S), ResultIndex(PseudoObjectExpr::NoResult),
+ GenericLoc(genericLoc) {}
+
+ virtual ~PseudoOpBuilder() {}
+
+ /// Add a normal semantic expression.
+ void addSemanticExpr(Expr *semantic) {
+ Semantics.push_back(semantic);
+ }
+
+ /// Add the 'result' semantic expression.
+ void addResultSemanticExpr(Expr *resultExpr) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size();
+ Semantics.push_back(resultExpr);
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ ExprResult complete(Expr *syntacticForm);
+
+ OpaqueValueExpr *capture(Expr *op);
+ OpaqueValueExpr *captureValueAsResult(Expr *op);
+
+ void setResultToLastSemantic() {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size() - 1;
+ }
+
+ /// Return true if assignments have a non-void result.
+ virtual bool assignmentsHaveResult() { return true; }
+
+ virtual Expr *rebuildAndCaptureObject(Expr *) = 0;
+ virtual ExprResult buildGet() = 0;
+ virtual ExprResult buildSet(Expr *, SourceLocation,
+ bool captureSetValueAsResult) = 0;
+ };
+
+ /// A PseudoOpBuilder for Objective-C @properties.
+ class ObjCPropertyOpBuilder : public PseudoOpBuilder {
+ ObjCPropertyRefExpr *RefExpr;
+ ObjCPropertyRefExpr *SyntacticRefExpr;
+ OpaqueValueExpr *InstanceReceiver;
+ ObjCMethodDecl *Getter;
+
+ ObjCMethodDecl *Setter;
+ Selector SetterSelector;
+
+ public:
+ ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getLocation()), RefExpr(refExpr),
+ SyntacticRefExpr(0), InstanceReceiver(0), Getter(0), Setter(0) {
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ bool tryBuildGetOfReference(Expr *op, ExprResult &result);
+ bool findSetter();
+ bool findGetter();
+
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase);
+ ExprResult buildGet();
+ ExprResult buildSet(Expr *op, SourceLocation, bool);
+ };
+
+ /// A PseudoOpBuilder for Objective-C array/dictionary indexing.
+ class ObjCSubscriptOpBuilder : public PseudoOpBuilder {
+ ObjCSubscriptRefExpr *RefExpr;
+ OpaqueValueExpr *InstanceBase;
+ OpaqueValueExpr *InstanceKey;
+ ObjCMethodDecl *AtIndexGetter;
+ Selector AtIndexGetterSelector;
+
+ ObjCMethodDecl *AtIndexSetter;
+ Selector AtIndexSetterSelector;
+
+ public:
+ ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ RefExpr(refExpr),
+ InstanceBase(0), InstanceKey(0),
+ AtIndexGetter(0), AtIndexSetter(0) { }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase);
+
+ bool findAtIndexGetter();
+ bool findAtIndexSetter();
+
+ ExprResult buildGet();
+ ExprResult buildSet(Expr *op, SourceLocation, bool);
+ };
+
+}
+
+/// Capture the given expression in an OpaqueValueExpr.
+OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) {
+ // Make a new OVE whose source is the given expression.
+ OpaqueValueExpr *captured =
+ new (S.Context) OpaqueValueExpr(GenericLoc, e->getType(),
+ e->getValueKind(), e->getObjectKind(),
+ e);
+
+ // Make sure we bind that in the semantics.
+ addSemanticExpr(captured);
+ return captured;
+}
+
+/// Capture the given expression as the result of this pseudo-object
+/// operation. This routine is safe against expressions which may
+/// already be captured.
+///
+/// \param Returns the captured expression, which will be the
+/// same as the input if the input was already captured
+OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+
+ // If the expression hasn't already been captured, just capture it
+ // and set the new semantic
+ if (!isa<OpaqueValueExpr>(e)) {
+ OpaqueValueExpr *cap = capture(e);
+ setResultToLastSemantic();
+ return cap;
+ }
+
+ // Otherwise, it must already be one of our semantic expressions;
+ // set ResultIndex to its index.
+ unsigned index = 0;
+ for (;; ++index) {
+ assert(index < Semantics.size() &&
+ "captured expression not found in semantics!");
+ if (e == Semantics[index]) break;
+ }
+ ResultIndex = index;
+ return cast<OpaqueValueExpr>(e);
+}
+
+/// The routine which creates the final PseudoObjectExpr.
+ExprResult PseudoOpBuilder::complete(Expr *syntactic) {
+ return PseudoObjectExpr::Create(S.Context, syntactic,
+ Semantics, ResultIndex);
+}
+
+/// The main skeleton for building an r-value operation.
+ExprResult PseudoOpBuilder::buildRValueOperation(Expr *op) {
+ Expr *syntacticBase = rebuildAndCaptureObject(op);
+
+ ExprResult getExpr = buildGet();
+ if (getExpr.isInvalid()) return ExprError();
+ addResultSemanticExpr(getExpr.take());
+
+ return complete(syntacticBase);
+}
+
+/// The basic skeleton for building a simple or compound
+/// assignment operation.
+ExprResult
+PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ Expr *syntacticLHS = rebuildAndCaptureObject(LHS);
+ OpaqueValueExpr *capturedRHS = capture(RHS);
+
+ Expr *syntactic;
+
+ ExprResult result;
+ if (opcode == BO_Assign) {
+ result = capturedRHS;
+ syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
+ opcode, capturedRHS->getType(),
+ capturedRHS->getValueKind(),
+ OK_Ordinary, opcLoc);
+ } else {
+ ExprResult opLHS = buildGet();
+ if (opLHS.isInvalid()) return ExprError();
+
+ // Build an ordinary, non-compound operation.
+ BinaryOperatorKind nonCompound =
+ BinaryOperator::getOpForCompoundAssignment(opcode);
+ result = S.BuildBinOp(Sc, opcLoc, nonCompound,
+ opLHS.take(), capturedRHS);
+ if (result.isInvalid()) return ExprError();
+
+ syntactic =
+ new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode,
+ result.get()->getType(),
+ result.get()->getValueKind(),
+ OK_Ordinary,
+ opLHS.get()->getType(),
+ result.get()->getType(),
+ opcLoc);
+ }
+
+ // The result of the assignment, if not void, is the value set into
+ // the l-value.
+ result = buildSet(result.take(), opcLoc, assignmentsHaveResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.take());
+
+ return complete(syntactic);
+}
+
+/// The basic skeleton for building an increment or decrement
+/// operation.
+ExprResult
+PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+
+ Expr *syntacticOp = rebuildAndCaptureObject(op);
+
+ // Load the value.
+ ExprResult result = buildGet();
+ if (result.isInvalid()) return ExprError();
+
+ QualType resultType = result.get()->getType();
+
+ // That's the postfix result.
+ if (UnaryOperator::isPostfix(opcode) && assignmentsHaveResult()) {
+ result = capture(result.take());
+ setResultToLastSemantic();
+ }
+
+ // Add or subtract a literal 1.
+ llvm::APInt oneV(S.Context.getTypeSize(S.Context.IntTy), 1);
+ Expr *one = IntegerLiteral::Create(S.Context, oneV, S.Context.IntTy,
+ GenericLoc);
+
+ if (UnaryOperator::isIncrementOp(opcode)) {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Add, result.take(), one);
+ } else {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Sub, result.take(), one);
+ }
+ if (result.isInvalid()) return ExprError();
+
+ // Store that back into the result. The value stored is the result
+ // of a prefix operation.
+ result = buildSet(result.take(), opcLoc,
+ UnaryOperator::isPrefix(opcode) && assignmentsHaveResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.take());
+
+ UnaryOperator *syntactic =
+ new (S.Context) UnaryOperator(syntacticOp, opcode, resultType,
+ VK_LValue, OK_Ordinary, opcLoc);
+ return complete(syntactic);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Objective-C @property and implicit property references
+//===----------------------------------------------------------------------===//
+
+/// Look up a method in the receiver type of an Objective-C property
+/// reference.
+static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel,
+ const ObjCPropertyRefExpr *PRE) {
+ if (PRE->isObjectReceiver()) {
+ const ObjCObjectPointerType *PT =
+ PRE->getBase()->getType()->castAs<ObjCObjectPointerType>();
+
+ // Special case for 'self' in class method implementations.
+ if (PT->isObjCClassType() &&
+ S.isSelfExpr(const_cast<Expr*>(PRE->getBase()))) {
+ // This cast is safe because isSelfExpr is only true within
+ // methods.
+ ObjCMethodDecl *method =
+ cast<ObjCMethodDecl>(S.CurContext->getNonClosureAncestor());
+ return S.LookupMethodInObjectType(sel,
+ S.Context.getObjCInterfaceType(method->getClassInterface()),
+ /*instance*/ false);
+ }
+
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+ }
+
+ if (PRE->isSuperReceiver()) {
+ if (const ObjCObjectPointerType *PT =
+ PRE->getSuperReceiverType()->getAs<ObjCObjectPointerType>())
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+
+ return S.LookupMethodInObjectType(sel, PRE->getSuperReceiverType(), false);
+ }
+
+ assert(PRE->isClassReceiver() && "Invalid expression");
+ QualType IT = S.Context.getObjCInterfaceType(PRE->getClassReceiver());
+ return S.LookupMethodInObjectType(sel, IT, false);
+}
+
+bool ObjCPropertyOpBuilder::findGetter() {
+ if (Getter) return true;
+
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ Getter = RefExpr->getImplicitPropertyGetter();
+ return (Getter != 0);
+ }
+
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ Getter = LookupMethodInReceiverType(S, prop->getGetterName(), RefExpr);
+ return (Getter != 0);
+}
+
+/// Try to find the most accurate setter declaration for the property
+/// reference.
+///
+/// \return true if a setter was found, in which case Setter
+bool ObjCPropertyOpBuilder::findSetter() {
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ if (ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter()) {
+ Setter = setter;
+ SetterSelector = setter->getSelector();
+ return true;
+ } else {
+ IdentifierInfo *getterName =
+ RefExpr->getImplicitPropertyGetter()->getSelector()
+ .getIdentifierInfoForSlot(0);
+ SetterSelector =
+ SelectorTable::constructSetterName(S.PP.getIdentifierTable(),
+ S.PP.getSelectorTable(),
+ getterName);
+ return false;
+ }
+ }
+
+ // For explicit properties, this is more involved.
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ SetterSelector = prop->getSetterName();
+
+ // Do a normal method lookup first.
+ if (ObjCMethodDecl *setter =
+ LookupMethodInReceiverType(S, SetterSelector, RefExpr)) {
+ Setter = setter;
+ return true;
+ }
+
+ // That can fail in the somewhat crazy situation that we're
+ // type-checking a message send within the @interface declaration
+ // that declared the @property. But it's not clear that that's
+ // valuable to support.
+
+ return false;
+}
+
+/// Capture the base object of an Objective-C property expression.
+Expr *ObjCPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceReceiver == 0);
+
+ // If we have a base, capture it in an OVE and rebuild the syntactic
+ // form to use the OVE as its base.
+ if (RefExpr->isObjectReceiver()) {
+ InstanceReceiver = capture(RefExpr->getBase());
+
+ syntacticBase =
+ ObjCPropertyRefRebuilder(S, InstanceReceiver).rebuild(syntacticBase);
+ }
+
+ if (ObjCPropertyRefExpr *
+ refE = dyn_cast<ObjCPropertyRefExpr>(syntacticBase->IgnoreParens()))
+ SyntacticRefExpr = refE;
+
+ return syntacticBase;
+}
+
+/// Load from an Objective-C property reference.
+ExprResult ObjCPropertyOpBuilder::buildGet() {
+ findGetter();
+ assert(Getter);
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingGetter();
+
+ QualType receiverType;
+ if (RefExpr->isClassReceiver()) {
+ receiverType = S.Context.getObjCInterfaceType(RefExpr->getClassReceiver());
+ } else if (RefExpr->isSuperReceiver()) {
+ receiverType = RefExpr->getSuperReceiverType();
+ } else {
+ assert(InstanceReceiver);
+ receiverType = InstanceReceiver->getType();
+ }
+
+ // Build a message-send.
+ ExprResult msg;
+ if (Getter->isInstanceMethod() || RefExpr->isObjectReceiver()) {
+ assert(InstanceReceiver || RefExpr->isSuperReceiver());
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, Getter->getSelector(),
+ Getter, MultiExprArg());
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc,
+ Getter->getSelector(), Getter,
+ MultiExprArg());
+ }
+ return msg;
+}
+
+/// Store to an Objective-C property reference.
+///
+/// \param bindSetValueAsResult - If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ bool hasSetter = findSetter();
+ assert(hasSetter); (void) hasSetter;
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingSetter();
+
+ QualType receiverType;
+ if (RefExpr->isClassReceiver()) {
+ receiverType = S.Context.getObjCInterfaceType(RefExpr->getClassReceiver());
+ } else if (RefExpr->isSuperReceiver()) {
+ receiverType = RefExpr->getSuperReceiverType();
+ } else {
+ assert(InstanceReceiver);
+ receiverType = InstanceReceiver->getType();
+ }
+
+ // Use assignment constraints when possible; they give us better
+ // diagnostics. "When possible" basically means anything except a
+ // C++ class type.
+ if (!S.getLangOpts().CPlusPlus || !op->getType()->isRecordType()) {
+ QualType paramType = (*Setter->param_begin())->getType();
+ if (!S.getLangOpts().CPlusPlus || !paramType->isRecordType()) {
+ ExprResult opResult = op;
+ Sema::AssignConvertType assignResult
+ = S.CheckSingleAssignmentConstraints(paramType, opResult);
+ if (S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType,
+ op->getType(), opResult.get(),
+ Sema::AA_Assigning))
+ return ExprError();
+
+ op = opResult.take();
+ assert(op && "successful assignment left argument invalid?");
+ }
+ }
+
+ // Arguments.
+ Expr *args[] = { op };
+
+ // Build a message-send.
+ ExprResult msg;
+ if (Setter->isInstanceMethod() || RefExpr->isObjectReceiver()) {
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc,
+ SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ }
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+/// @property-specific behavior for doing lvalue-to-rvalue conversion.
+ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) {
+ // Explicit properties always have getters, but implicit ones don't.
+ // Check that before proceeding.
+ if (RefExpr->isImplicitProperty() &&
+ !RefExpr->getImplicitPropertyGetter()) {
+ S.Diag(RefExpr->getLocation(), diag::err_getter_not_found)
+ << RefExpr->getBase()->getType();
+ return ExprError();
+ }
+
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+
+ if (RefExpr->isExplicitProperty() && !Getter->hasRelatedResultType())
+ S.DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(),
+ Getter, RefExpr->getLocation());
+
+ // As a special case, if the method returns 'id', try to get
+ // a better type from the property.
+ if (RefExpr->isExplicitProperty() && result.get()->isRValue() &&
+ result.get()->getType()->isObjCIdType()) {
+ QualType propType = RefExpr->getExplicitProperty()->getType();
+ if (const ObjCObjectPointerType *ptr
+ = propType->getAs<ObjCObjectPointerType>()) {
+ if (!ptr->isObjCIdType())
+ result = S.ImpCastExprToType(result.get(), propType, CK_BitCast);
+ }
+ }
+
+ return result;
+}
+
+/// Try to build this as a call to a getter that returns a reference.
+///
+/// \return true if it was possible, whether or not it actually
+/// succeeded
+bool ObjCPropertyOpBuilder::tryBuildGetOfReference(Expr *op,
+ ExprResult &result) {
+ if (!S.getLangOpts().CPlusPlus) return false;
+
+ findGetter();
+ assert(Getter && "property has no setter and no getter!");
+
+ // Only do this if the getter returns an l-value reference type.
+ QualType resultType = Getter->getResultType();
+ if (!resultType->isLValueReferenceType()) return false;
+
+ result = buildRValueOperation(op);
+ return true;
+}
+
+/// @property-specific behavior for doing assignments.
+ExprResult
+ObjCPropertyOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(LHS, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildBinOp(Sc, opcLoc, opcode, result.take(), RHS);
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_assignment)
+ << unsigned(RefExpr->isImplicitProperty())
+ << SetterSelector
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findGetter()) {
+ S.Diag(opcLoc, diag::err_nogetter_property_compound_assignment)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about property assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceReceiver) {
+ S.checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// @property-specific behavior for doing increments and decrements.
+ExprResult
+ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(op, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildUnaryOp(Sc, opcLoc, opcode, result.take());
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_incdec)
+ << unsigned(RefExpr->isImplicitProperty())
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << SetterSelector
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // We also need a getter.
+ if (!findGetter()) {
+ assert(RefExpr->isImplicitProperty());
+ S.Diag(opcLoc, diag::err_nogetter_property_incdec)
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << RefExpr->getImplicitPropertyGetter()->getSelector() // FIXME!
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op);
+}
+
+// ObjCSubscript build stuff.
+//
+
+/// objective-c subscripting-specific behavior for doing lvalue-to-rvalue
+/// conversion.
+/// FIXME. Remove this routine if it is proven that no additional
+/// specifity is needed.
+ExprResult ObjCSubscriptOpBuilder::buildRValueOperation(Expr *op) {
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+ return result;
+}
+
+/// objective-c subscripting-specific behavior for doing assignments.
+ExprResult
+ObjCSubscriptOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+ // There must be a method to do the Index'ed assignment.
+ if (!findAtIndexSetter())
+ return ExprError();
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findAtIndexGetter())
+ return ExprError();
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about objc Index'ed assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceBase) {
+ S.checkRetainCycles(InstanceBase->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// Capture the base object of an Objective-C Index'ed expression.
+Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceBase == 0);
+
+ // Capture base expression in an OVE and rebuild the syntactic
+ // form to use the OVE as its base expression.
+ InstanceBase = capture(RefExpr->getBaseExpr());
+ InstanceKey = capture(RefExpr->getKeyExpr());
+
+ syntacticBase =
+ ObjCSubscriptRefRebuilder(S, InstanceBase,
+ InstanceKey).rebuild(syntacticBase);
+
+ return syntacticBase;
+}
+
+/// CheckSubscriptingKind - This routine decide what type
+/// of indexing represented by "FromE" is being done.
+Sema::ObjCSubscriptKind
+ Sema::CheckSubscriptingKind(Expr *FromE) {
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = FromE->getType();
+ if (T->isIntegralOrEnumerationType())
+ return OS_Array;
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy && T->isObjCObjectPointerType())
+ // All other scalar cases are assumed to be dictionary indexing which
+ // caller handles, with diagnostics if needed.
+ return OS_Dictionary;
+ if (!getLangOpts().CPlusPlus ||
+ !RecordTy || RecordTy->isIncompleteType()) {
+ // No indexing can be done. Issue diagnostics and quit.
+ const Expr *IndexExpr = FromE->IgnoreParenImpCasts();
+ if (isa<StringLiteral>(IndexExpr))
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer)
+ << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@");
+ else
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << T;
+ return OS_Error;
+ }
+
+ // We must have a complete class type.
+ if (RequireCompleteType(FromE->getExprLoc(), T,
+ PDiag(diag::err_objc_index_incomplete_class_type)
+ << FromE->getSourceRange()))
+ return OS_Error;
+
+ // Look for a conversion to an integral, enumeration type, or
+ // objective-C pointer type.
+ UnresolvedSet<4> ViableConversions;
+ UnresolvedSet<4> ExplicitConversions;
+ const UnresolvedSetImpl *Conversions
+ = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+
+ int NoIntegrals=0, NoObjCIdPointers=0;
+ SmallVector<CXXConversionDecl *, 4> ConversionDecls;
+
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end();
+ I != E;
+ ++I) {
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) {
+ QualType CT = Conversion->getConversionType().getNonReferenceType();
+ if (CT->isIntegralOrEnumerationType()) {
+ ++NoIntegrals;
+ ConversionDecls.push_back(Conversion);
+ }
+ else if (CT->isObjCIdType() ||CT->isBlockPointerType()) {
+ ++NoObjCIdPointers;
+ ConversionDecls.push_back(Conversion);
+ }
+ }
+ }
+ if (NoIntegrals ==1 && NoObjCIdPointers == 0)
+ return OS_Array;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 1)
+ return OS_Dictionary;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 0) {
+ // No conversion function was found. Issue diagnostic and return.
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << FromE->getType();
+ return OS_Error;
+ }
+ Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
+ << FromE->getType();
+ for (unsigned int i = 0; i < ConversionDecls.size(); i++)
+ Diag(ConversionDecls[i]->getLocation(), diag::not_conv_function_declared_at);
+
+ return OS_Error;
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
+ if (AtIndexGetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ if (const ObjCObjectType *iQFaceTy =
+ ResultType->getAsObjCQualifiedInterfaceType())
+ ResultType = iQFaceTy->getBaseType();
+ }
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error)
+ return false;
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (id)objectForKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")
+ };
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+ else {
+ // - (id)objectAtIndexedSubscript:(size_t)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectAtIndexedSubscript")
+ };
+
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+
+ AtIndexGetter = S.LookupMethodInObjectType(AtIndexGetterSelector, ResultType,
+ true /*instance*/);
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexGetter && S.getLangOpts().DebuggerObjCLiteral) {
+ AtIndexGetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
+ SourceLocation(), AtIndexGetterSelector,
+ S.Context.getObjCIdType() /*ReturnType*/,
+ 0 /*TypeSourceInfo */,
+ S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ ParmVarDecl *Argument = ParmVarDecl::Create(S.Context, AtIndexGetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ AtIndexGetter->setMethodParams(S.Context, Argument,
+ ArrayRef<SourceLocation>());
+ }
+
+ if (!AtIndexGetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 0 << arrayRef;
+ return false;
+ }
+ AtIndexGetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector,
+ RefExpr->getSourceRange(),
+ true, false);
+ }
+
+ if (AtIndexGetter) {
+ QualType T = AtIndexGetter->param_begin()[0]->getType();
+ if ((arrayRef && !T->isIntegralOrEnumerationType()) ||
+ (!arrayRef && !T->isObjCObjectPointerType())) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ arrayRef ? diag::err_objc_subscript_index_type
+ : diag::err_objc_subscript_key_type) << T;
+ S.Diag(AtIndexGetter->param_begin()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ return false;
+ }
+ QualType R = AtIndexGetter->getResultType();
+ if (!R->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_indexing_method_result_type) << R << arrayRef;
+ S.Diag(AtIndexGetter->getLocation(), diag::note_method_declared_at) <<
+ AtIndexGetter->getDeclName();
+ }
+ }
+ return true;
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
+ if (AtIndexSetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ if (const ObjCObjectType *iQFaceTy =
+ ResultType->getAsObjCQualifiedInterfaceType())
+ ResultType = iQFaceTy->getBaseType();
+ }
+
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error)
+ return false;
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (void)setObject:(id)object forKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("forKeyedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ else {
+ // - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("atIndexedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType,
+ true /*instance*/);
+
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ QualType ReturnType = S.Context.VoidTy;
+ AtIndexSetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
+ SourceLocation(), AtIndexSetterSelector,
+ ReturnType,
+ ResultTInfo,
+ S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ &S.Context.Idents.get("object"),
+ S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(object);
+ ParmVarDecl *key = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(key);
+ AtIndexSetter->setMethodParams(S.Context, Params, ArrayRef<SourceLocation>());
+ }
+
+ if (!AtIndexSetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(),
+ diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 1 << arrayRef;
+ return false;
+ }
+ AtIndexSetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector,
+ RefExpr->getSourceRange(),
+ true, false);
+ }
+
+ bool err = false;
+ if (AtIndexSetter && arrayRef) {
+ QualType T = AtIndexSetter->param_begin()[1]->getType();
+ if (!T->isIntegralOrEnumerationType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_index_type) << T;
+ S.Diag(AtIndexSetter->param_begin()[1]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ T = AtIndexSetter->param_begin()[0]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_object_type) << T << arrayRef;
+ S.Diag(AtIndexSetter->param_begin()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+ else if (AtIndexSetter && !arrayRef)
+ for (unsigned i=0; i <2; i++) {
+ QualType T = AtIndexSetter->param_begin()[i]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ if (i == 1)
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_key_type) << T;
+ else
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_dic_object_type) << T;
+ S.Diag(AtIndexSetter->param_begin()[i]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+
+ return !err;
+}
+
+// Get the object at "Index" position in the container.
+// [BaseExpr objectAtIndexedSubscript : IndexExpr];
+ExprResult ObjCSubscriptOpBuilder::buildGet() {
+ if (!findAtIndexGetter())
+ return ExprError();
+
+ QualType receiverType = InstanceBase->getType();
+
+ // Build a message-send.
+ ExprResult msg;
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { Index };
+ assert(InstanceBase);
+ msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexGetterSelector, AtIndexGetter,
+ MultiExprArg(args, 1));
+ return msg;
+}
+
+/// Store into the container the "op" object at "Index"'ed location
+/// by building this messaging expression:
+/// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+/// \param bindSetValueAsResult - If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ if (!findAtIndexSetter())
+ return ExprError();
+
+ QualType receiverType = InstanceBase->getType();
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { op, Index };
+
+ // Build a message-send.
+ ExprResult msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexSetterSelector,
+ AtIndexSetter,
+ MultiExprArg(args, 2));
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+//===----------------------------------------------------------------------===//
+// General Sema routines.
+//===----------------------------------------------------------------------===//
+
+ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ Expr *opaqueRef = E->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ }
+ else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Check an increment or decrement of a pseudo-object expression.
+ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode, Expr *op) {
+ // Do nothing if the operand is dependent.
+ if (op->isTypeDependent())
+ return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc);
+
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+ Expr *opaqueRef = op->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
+ Diag(opcLoc, diag::err_illegal_container_subscripting_op);
+ return ExprError();
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ // Do nothing if either argument is dependent.
+ if (LHS->isTypeDependent() || RHS->isTypeDependent())
+ return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc);
+
+ // Filter out non-overload placeholder types in the RHS.
+ if (RHS->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(RHS);
+ if (result.isInvalid()) return ExprError();
+ RHS = result.take();
+ }
+
+ Expr *opaqueRef = LHS->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Given a pseudo-object reference, rebuild it without the opaque
+/// values. Basically, undo the behavior of rebuildAndCaptureObject.
+/// This should never operate in-place.
+static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
+ Expr *opaqueRef = E->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBase());
+ return ObjCPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E);
+ } else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr());
+ OpaqueValueExpr *keyOVE = cast<OpaqueValueExpr>(refExpr->getKeyExpr());
+ return ObjCSubscriptRefRebuilder(S, baseOVE->getSourceExpr(),
+ keyOVE->getSourceExpr()).rebuild(E);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Given a pseudo-object expression, recreate what it looks like
+/// syntactically without the attendant OpaqueValueExprs.
+///
+/// This is a hack which should be removed when TreeTransform is
+/// capable of rebuilding a tree without stripping implicit
+/// operations.
+Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+ Expr *syntax = E->getSyntacticForm();
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
+ return new (Context) UnaryOperator(op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(),
+ uop->getOperatorLoc());
+ } else if (CompoundAssignOperator *cop
+ = dyn_cast<CompoundAssignOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
+ return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
+ cop->getType(),
+ cop->getValueKind(),
+ cop->getObjectKind(),
+ cop->getComputationLHSType(),
+ cop->getComputationResultType(),
+ cop->getOperatorLoc());
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
+ return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
+ bop->getType(), bop->getValueKind(),
+ bop->getObjectKind(),
+ bop->getOperatorLoc());
+ } else {
+ assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
+ return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 5351896..97c8eb0 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -16,8 +16,8 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -78,7 +78,7 @@ void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
// In ARC, we don't need to retain the iteration variable of a fast
// enumeration loop. Rather than actually trying to catch that
// during declaration processing, we remove the consequences here.
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
QualType type = var->getType();
// Only do this if we inferred the lifetime. Inferred lifetime
@@ -152,7 +152,8 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
SourceLocation Loc;
SourceRange R1, R2;
- if (!E->isUnusedResultAWarning(Loc, R1, R2, Context))
+ if (SourceMgr.isInSystemMacro(E->getExprLoc()) ||
+ !E->isUnusedResultAWarning(Loc, R1, R2, Context))
return;
// Okay, we have an unused result. Depending on what the base expression is,
@@ -189,7 +190,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
}
} else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
- if (getLangOptions().ObjCAutoRefCount && ME->isDelegateInitCall()) {
+ if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) {
Diag(Loc, diag::err_arc_unused_init_message) << R1;
return;
}
@@ -198,8 +199,12 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
Diag(Loc, diag::warn_unused_result) << R1 << R2;
return;
}
- } else if (isa<ObjCPropertyRefExpr>(E)) {
- DiagID = diag::warn_unused_property_expr;
+ } else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ const Expr *Source = POE->getSyntacticForm();
+ if (isa<ObjCSubscriptRefExpr>(Source))
+ DiagID = diag::warn_unused_container_subscript_expr;
+ else
+ DiagID = diag::warn_unused_property_expr;
} else if (const CXXFunctionalCastExpr *FC
= dyn_cast<CXXFunctionalCastExpr>(E)) {
if (isa<CXXConstructExpr>(FC->getSubExpr()) ||
@@ -224,6 +229,18 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
DiagRuntimeBehavior(Loc, 0, PDiag(DiagID) << R1 << R2);
}
+void Sema::ActOnStartOfCompoundStmt() {
+ PushCompoundScope();
+}
+
+void Sema::ActOnFinishOfCompoundStmt() {
+ PopCompoundScope();
+}
+
+sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
+ return getCurFunction()->CompoundScopes.back();
+}
+
StmtResult
Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg elts, bool isStmtExpr) {
@@ -231,7 +248,7 @@ Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
Stmt **Elts = reinterpret_cast<Stmt**>(elts.release());
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
- if (!getLangOptions().C99 && !getLangOptions().CPlusPlus) {
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
// Note that __extension__ can be around a decl.
unsigned i = 0;
// Skip over all declarations.
@@ -256,6 +273,15 @@ Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
DiagnoseUnusedExprResult(Elts[i]);
}
+ // Check for suspicious empty body (null statement) in `for' and `while'
+ // statements. Don't do anything for template instantiations, this just adds
+ // noise.
+ if (NumElts != 0 && !CurrentInstantiationScope &&
+ getCurCompoundScope().HasEmptyLoopBodies) {
+ for (unsigned i = 0; i != NumElts - 1; ++i)
+ DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
+ }
+
return Owned(new (Context) CompoundStmt(Context, Elts, NumElts, L, R));
}
@@ -265,22 +291,26 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation ColonLoc) {
assert((LHSVal != 0) && "missing expression in case statement");
- // C99 6.8.4.2p3: The expression shall be an integer constant.
- // However, GCC allows any evaluatable integer expression.
- if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent() &&
- VerifyIntegerConstantExpression(LHSVal))
+ if (getCurFunction()->SwitchStack.empty()) {
+ Diag(CaseLoc, diag::err_case_not_in_switch);
return StmtError();
+ }
- // GCC extension: The expression shall be an integer constant.
+ if (!getLangOpts().CPlusPlus0x) {
+ // C99 6.8.4.2p3: The expression shall be an integer constant.
+ // However, GCC allows any evaluatable integer expression.
+ if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent()) {
+ LHSVal = VerifyIntegerConstantExpression(LHSVal).take();
+ if (!LHSVal)
+ return StmtError();
+ }
- if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent() &&
- VerifyIntegerConstantExpression(RHSVal)) {
- RHSVal = 0; // Recover by just forgetting about it.
- }
+ // GCC extension: The expression shall be an integer constant.
- if (getCurFunction()->SwitchStack.empty()) {
- Diag(CaseLoc, diag::err_case_not_in_switch);
- return StmtError();
+ if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent()) {
+ RHSVal = VerifyIntegerConstantExpression(RHSVal).take();
+ // Recover from an error by just forgetting about it.
+ }
}
CaseStmt *CS = new (Context) CaseStmt(LHSVal, RHSVal, CaseLoc, DotDotDotLoc,
@@ -350,21 +380,9 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
DiagnoseUnusedExprResult(thenStmt);
- // Warn if the if block has a null body without an else value.
- // this helps prevent bugs due to typos, such as
- // if (condition);
- // do_stuff();
- //
if (!elseStmt) {
- if (NullStmt* stmt = dyn_cast<NullStmt>(thenStmt))
- // But do not warn if the body is a macro that expands to nothing, e.g:
- //
- // #define CALL(x)
- // if (condition)
- // CALL(0);
- //
- if (!stmt->hasLeadingEmptyMacro())
- Diag(stmt->getSemiLoc(), diag::warn_empty_if_body);
+ DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
+ diag::warn_empty_if_body);
}
DiagnoseUnusedExprResult(elseStmt);
@@ -501,7 +519,8 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
PDiag(diag::note_switch_conversion),
PDiag(diag::err_switch_multiple_conversions),
PDiag(diag::note_switch_conversion),
- PDiag(0));
+ PDiag(0),
+ /*AllowScopedEnumerations*/ true);
if (CondResult.isInvalid()) return StmtError();
Cond = CondResult.take();
@@ -617,8 +636,6 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
} else {
CaseStmt *CS = cast<CaseStmt>(SC);
- // We already verified that the expression has a i-c-e value (C99
- // 6.8.4.2p3) - get that value now.
Expr *Lo = CS->getLHS();
if (Lo->isTypeDependent() || Lo->isValueDependent()) {
@@ -626,16 +643,39 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
break;
}
- llvm::APSInt LoVal = Lo->EvaluateKnownConstInt(Context);
+ llvm::APSInt LoVal;
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvLo =
+ CheckConvertedConstantExpression(Lo, CondType, LoVal, CCEK_CaseValue);
+ if (ConvLo.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Lo = ConvLo.take();
+ } else {
+ // We already verified that the expression has a i-c-e value (C99
+ // 6.8.4.2p3) - get that value now.
+ LoVal = Lo->EvaluateKnownConstInt(Context);
+
+ // If the LHS is not the same type as the condition, insert an implicit
+ // cast.
+ Lo = DefaultLvalueConversion(Lo).take();
+ Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).take();
+ }
- // Convert the value to the same width/sign as the condition.
+ // Convert the value to the same width/sign as the condition had prior to
+ // integral promotions.
+ //
+ // FIXME: This causes us to reject valid code:
+ // switch ((char)c) { case 256: case 0: return 0; }
+ // Here we claim there is a duplicated condition value, but there is not.
ConvertIntegerToTypeWarnOnOverflow(LoVal, CondWidth, CondIsSigned,
Lo->getLocStart(),
diag::warn_case_value_overflow);
- // If the LHS is not the same type as the condition, insert an implicit
- // cast.
- Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).take();
CS->setLHS(Lo);
// If this is a case range, remember it in CaseRanges, otherwise CaseVals.
@@ -656,19 +696,15 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// condition is constant.
llvm::APSInt ConstantCondValue;
bool HasConstantCond = false;
- bool ShouldCheckConstantCond = false;
if (!HasDependentValue && !TheDefaultStmt) {
- Expr::EvalResult Result;
- HasConstantCond = CondExprBeforePromotion->Evaluate(Result, Context);
- if (HasConstantCond) {
- assert(Result.Val.isInt() && "switch condition evaluated to non-int");
- ConstantCondValue = Result.Val.getInt();
- ShouldCheckConstantCond = true;
-
- assert(ConstantCondValue.getBitWidth() == CondWidth &&
- ConstantCondValue.isSigned() == CondIsSigned);
- }
+ HasConstantCond
+ = CondExprBeforePromotion->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::SE_AllowSideEffects);
+ assert(!HasConstantCond ||
+ (ConstantCondValue.getBitWidth() == CondWidth &&
+ ConstantCondValue.isSigned() == CondIsSigned));
}
+ bool ShouldCheckConstantCond = HasConstantCond;
// Sort all the scalar case values so we can easily detect duplicates.
std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
@@ -705,16 +741,33 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
llvm::APSInt &LoVal = CaseRanges[i].first;
CaseStmt *CR = CaseRanges[i].second;
Expr *Hi = CR->getRHS();
- llvm::APSInt HiVal = Hi->EvaluateKnownConstInt(Context);
+ llvm::APSInt HiVal;
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvHi =
+ CheckConvertedConstantExpression(Hi, CondType, HiVal,
+ CCEK_CaseValue);
+ if (ConvHi.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Hi = ConvHi.take();
+ } else {
+ HiVal = Hi->EvaluateKnownConstInt(Context);
+
+ // If the RHS is not the same type as the condition, insert an
+ // implicit cast.
+ Hi = DefaultLvalueConversion(Hi).take();
+ Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).take();
+ }
// Convert the value to the same width/sign as the condition.
ConvertIntegerToTypeWarnOnOverflow(HiVal, CondWidth, CondIsSigned,
Hi->getLocStart(),
diag::warn_case_value_overflow);
- // If the LHS is not the same type as the condition, insert an implicit
- // cast.
- Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).take();
CR->setRHS(Hi);
// If the low value is bigger than the high value, the case is empty.
@@ -821,39 +874,35 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
// See which case values aren't in enum.
- // TODO: we might want to check whether case values are out of the
- // enum even if we don't want to check whether all cases are handled.
- if (!TheDefaultStmt) {
- EnumValsTy::const_iterator EI = EnumVals.begin();
- for (CaseValsTy::const_iterator CI = CaseVals.begin();
- CI != CaseVals.end(); CI++) {
- while (EI != EIend && EI->first < CI->first)
- EI++;
- if (EI == EIend || EI->first > CI->first)
- Diag(CI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
- << ED->getDeclName();
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ for (CaseValsTy::const_iterator CI = CaseVals.begin();
+ CI != CaseVals.end(); CI++) {
+ while (EI != EIend && EI->first < CI->first)
+ EI++;
+ if (EI == EIend || EI->first > CI->first)
+ Diag(CI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+ // See which of case ranges aren't in enum
+ EI = EnumVals.begin();
+ for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ RI != CaseRanges.end() && EI != EIend; RI++) {
+ while (EI != EIend && EI->first < RI->first)
+ EI++;
+
+ if (EI == EIend || EI->first != RI->first) {
+ Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
}
- // See which of case ranges aren't in enum
- EI = EnumVals.begin();
- for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
- RI != CaseRanges.end() && EI != EIend; RI++) {
- while (EI != EIend && EI->first < RI->first)
- EI++;
-
- if (EI == EIend || EI->first != RI->first) {
- Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
- << ED->getDeclName();
- }
- llvm::APSInt Hi =
- RI->second->getRHS()->EvaluateKnownConstInt(Context);
- AdjustAPSInt(Hi, CondWidth, CondIsSigned);
- while (EI != EIend && EI->first < Hi)
- EI++;
- if (EI == EIend || EI->first != Hi)
- Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
- << ED->getDeclName();
- }
+ llvm::APSInt Hi =
+ RI->second->getRHS()->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+ while (EI != EIend && EI->first < Hi)
+ EI++;
+ if (EI == EIend || EI->first != Hi)
+ Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
}
// Check which enum vals aren't in switch
@@ -863,7 +912,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
SmallVector<DeclarationName,8> UnhandledNames;
- for (EnumValsTy::const_iterator EI = EnumVals.begin(); EI != EIend; EI++){
+ for (EI = EnumVals.begin(); EI != EIend; EI++){
// Drop unneeded case values
llvm::APSInt CIVal;
while (CI != CaseVals.end() && CI->first < EI->first)
@@ -883,28 +932,34 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (RI == CaseRanges.end() || EI->first < RI->first) {
hasCasesNotInSwitch = true;
- if (!TheDefaultStmt)
- UnhandledNames.push_back(EI->second->getDeclName());
+ UnhandledNames.push_back(EI->second->getDeclName());
}
}
+ if (TheDefaultStmt && UnhandledNames.empty())
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default);
+
// Produce a nice diagnostic if multiple values aren't handled.
switch (UnhandledNames.size()) {
case 0: break;
case 1:
- Diag(CondExpr->getExprLoc(), diag::warn_missing_case1)
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case1 : diag::warn_missing_case1)
<< UnhandledNames[0];
break;
case 2:
- Diag(CondExpr->getExprLoc(), diag::warn_missing_case2)
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case2 : diag::warn_missing_case2)
<< UnhandledNames[0] << UnhandledNames[1];
break;
case 3:
- Diag(CondExpr->getExprLoc(), diag::warn_missing_case3)
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case3 : diag::warn_missing_case3)
<< UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
break;
default:
- Diag(CondExpr->getExprLoc(), diag::warn_missing_cases)
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_cases : diag::warn_missing_cases)
<< (unsigned)UnhandledNames.size()
<< UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
break;
@@ -915,6 +970,9 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
}
+ DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), BodyStmt,
+ diag::warn_empty_switch_body);
+
// FIXME: If the case list was broken is some way, we don't have a good system
// to patch it up. Instead, just return the whole substmt as broken.
if (CaseListIsErroneous)
@@ -941,6 +999,9 @@ Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
DiagnoseUnusedExprResult(Body);
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
return Owned(new (Context) WhileStmt(Context, ConditionVar, ConditionExpr,
Body, WhileLoc));
}
@@ -972,7 +1033,7 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
Stmt *First, FullExprArg second, Decl *secondVar,
FullExprArg third,
SourceLocation RParenLoc, Stmt *Body) {
- if (!getLangOptions().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus) {
if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) {
// C99 6.8.5p3: The declaration part of a 'for' statement shall only
// declare identifiers for objects having storage class 'auto' or
@@ -1004,6 +1065,9 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
DiagnoseUnusedExprResult(Third);
DiagnoseUnusedExprResult(Body);
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
return Owned(new (Context) ForStmt(Context, First,
SecondResult.take(), ConditionVar,
Third, Body, ForLoc, LParenLoc,
@@ -1015,10 +1079,18 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
/// x can be an arbitrary l-value expression. Bind it up as a
/// full-expression.
StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
+ // Reduce placeholder expressions here. Note that this rejects the
+ // use of pseudo-object l-values in this position.
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return StmtError();
+ E = result.take();
+
CheckImplicitConversions(E);
- ExprResult Result = MaybeCreateExprWithCleanups(E);
- if (Result.isInvalid()) return StmtError();
- return Owned(static_cast<Stmt*>(Result.get()));
+
+ result = MaybeCreateExprWithCleanups(E);
+ if (result.isInvalid()) return StmtError();
+
+ return Owned(static_cast<Stmt*>(result.take()));
}
ExprResult
@@ -1048,13 +1120,13 @@ Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
ObjCInterfaceDecl *iface = objectType->getInterface();
// If we have a forward-declared type, we can't do this check.
- if (iface && iface->isForwardDecl()) {
- // This is ill-formed under ARC.
- if (getLangOptions().ObjCAutoRefCount) {
- Diag(forLoc, diag::err_arc_collection_forward)
- << pointerType->getPointeeType() << collection->getSourceRange();
- }
-
+ // Under ARC, it is an error not to have a forward-declared class.
+ if (iface &&
+ RequireCompleteType(forLoc, QualType(objectType, 0),
+ getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_collection_forward)
+ << collection->getSourceRange()
+ : PDiag(0))) {
// Otherwise, if we have any useful type information, check that
// the type declares the appropriate method.
} else if (iface || !objectType->qual_empty()) {
@@ -1157,8 +1229,9 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
// Deduce the type for the iterator variable now rather than leaving it to
// AddInitializerToDecl, so we can produce a more suitable diagnostic.
TypeSourceInfo *InitTSI = 0;
- if (Init->getType()->isVoidType() ||
- !SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitTSI))
+ if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) ||
+ SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitTSI) ==
+ Sema::DAR_Failed)
SemaRef.Diag(Loc, diag) << Init->getType();
if (!InitTSI) {
Decl->setInvalidDecl();
@@ -1170,7 +1243,7 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
// In ARC, infer lifetime.
// FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
// we're doing the equivalent of fast iteration.
- if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
SemaRef.inferObjCARCLifetime(Decl))
Decl->setInvalidDecl();
@@ -1223,7 +1296,9 @@ static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
ExprResult MemberRef =
SemaRef.BuildMemberReferenceExpr(Range, Range->getType(), Loc,
/*IsPtr=*/false, CXXScopeSpec(),
- /*Qualifier=*/0, MemberLookup,
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ MemberLookup,
/*TemplateArgs=*/0);
if (MemberRef.isInvalid())
return ExprError();
@@ -1242,7 +1317,7 @@ static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
FoundNames.begin(), FoundNames.end(),
/*LookInStdNamespace=*/true);
CallExpr = SemaRef.BuildOverloadedCallExpr(S, Fn, Fn, Loc, &Range, 1, Loc,
- 0);
+ 0, /*AllowTypoCorrection=*/false);
if (CallExpr.isInvalid()) {
SemaRef.Diag(Range->getLocStart(), diag::note_for_range_type)
<< Range->getType();
@@ -1542,7 +1617,12 @@ StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
if (!S || !B)
return StmtError();
- cast<CXXForRangeStmt>(S)->setBody(B);
+ CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
+ ForStmt->setBody(B);
+
+ DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B,
+ diag::warn_empty_range_based_for_body);
+
return S;
}
@@ -1569,6 +1649,7 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
E = ExprRes.take();
if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing))
return StmtError();
+ E = MaybeCreateExprWithCleanups(E);
}
getCurFunction()->setHasIndirectGoto();
@@ -1639,14 +1720,29 @@ const VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
if (!VD)
return 0;
- if (VD->hasLocalStorage() && !VD->isExceptionVariable() &&
- !VD->getType()->isReferenceType() && !VD->hasAttr<BlocksAttr>() &&
- !VD->getType().isVolatileQualified() &&
- ((VD->getKind() == Decl::Var) ||
- (AllowFunctionParameter && VD->getKind() == Decl::ParmVar)))
- return VD;
+ // ...object (other than a function or catch-clause parameter)...
+ if (VD->getKind() != Decl::Var &&
+ !(AllowFunctionParameter && VD->getKind() == Decl::ParmVar))
+ return 0;
+ if (VD->isExceptionVariable()) return 0;
+
+ // ...automatic...
+ if (!VD->hasLocalStorage()) return 0;
- return 0;
+ // ...non-volatile...
+ if (VD->getType().isVolatileQualified()) return 0;
+ if (VD->getType()->isReferenceType()) return 0;
+
+ // __block variables can't be allocated in a way that permits NRVO.
+ if (VD->hasAttr<BlocksAttr>()) return 0;
+
+ // Variables with higher required alignment than their type's ABI
+ // alignment cannot use NRVO.
+ if (VD->hasAttr<AlignedAttr>() &&
+ Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType()))
+ return 0;
+
+ return VD;
}
/// \brief Perform the initialization of a potentially-movable value, which
@@ -1726,42 +1822,64 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
return Res;
}
-/// ActOnBlockReturnStmt - Utility routine to figure out block's return type.
+/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
+/// for capturing scopes.
///
StmtResult
-Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
- // If this is the first return we've seen in the block, infer the type of
- // the block from it.
- BlockScopeInfo *CurBlock = getCurBlock();
- if (CurBlock->ReturnType.isNull()) {
- if (RetValExp) {
- // Don't call UsualUnaryConversions(), since we don't want to do
- // integer promotions here.
+Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // If this is the first return we've seen, infer the return type.
+ // [expr.prim.lambda]p4 in C++11; block literals follow a superset of those
+ // rules which allows multiple return statements.
+ CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
+ if (CurCap->HasImplicitReturnType) {
+ QualType ReturnT;
+ if (RetValExp && !isa<InitListExpr>(RetValExp)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp);
if (Result.isInvalid())
return StmtError();
RetValExp = Result.take();
- if (!RetValExp->isTypeDependent()) {
- CurBlock->ReturnType = RetValExp->getType();
- if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(RetValExp)) {
- // We have to remove a 'const' added to copied-in variable which was
- // part of the implementation spec. and not the actual qualifier for
- // the variable.
- if (CDRE->isConstQualAdded())
- CurBlock->ReturnType.removeLocalConst(); // FIXME: local???
- }
- } else
- CurBlock->ReturnType = Context.DependentTy;
- } else
- CurBlock->ReturnType = Context.VoidTy;
+ if (!RetValExp->isTypeDependent())
+ ReturnT = RetValExp->getType();
+ else
+ ReturnT = Context.DependentTy;
+ } else {
+ if (RetValExp) {
+ // C++11 [expr.lambda.prim]p4 bans inferring the result from an
+ // initializer list, because it is not an expression (even
+ // though we represent it as one). We still deduce 'void'.
+ Diag(ReturnLoc, diag::err_lambda_return_init_list)
+ << RetValExp->getSourceRange();
+ }
+
+ ReturnT = Context.VoidTy;
+ }
+ // We require the return types to strictly match here.
+ if (!CurCap->ReturnType.isNull() &&
+ !CurCap->ReturnType->isDependentType() &&
+ !ReturnT->isDependentType() &&
+ !Context.hasSameType(ReturnT, CurCap->ReturnType)) {
+ Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
+ << ReturnT << CurCap->ReturnType
+ << (getCurLambda() != 0);
+ return StmtError();
+ }
+ CurCap->ReturnType = ReturnT;
}
- QualType FnRetType = CurBlock->ReturnType;
+ QualType FnRetType = CurCap->ReturnType;
+ assert(!FnRetType.isNull());
- if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
- Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr)
- << getCurFunctionOrMethodDecl()->getDeclName();
- return StmtError();
+ if (BlockScopeInfo *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
+ if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
+ Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr);
+ return StmtError();
+ }
+ } else {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CurCap);
+ if (LSI->CallOperator->getType()->getAs<FunctionType>()->getNoReturnAttr()){
+ Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr);
+ return StmtError();
+ }
}
// Otherwise, verify that this result type matches the previous one. We are
@@ -1772,12 +1890,17 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// Delay processing for now. TODO: there are lots of dependent
// types we can conclusively prove aren't void.
} else if (FnRetType->isVoidType()) {
- if (RetValExp &&
- !(getLangOptions().CPlusPlus &&
+ if (RetValExp && !isa<InitListExpr>(RetValExp) &&
+ !(getLangOpts().CPlusPlus &&
(RetValExp->isTypeDependent() ||
RetValExp->getType()->isVoidType()))) {
- Diag(ReturnLoc, diag::err_return_block_has_expr);
- RetValExp = 0;
+ if (!getLangOpts().CPlusPlus &&
+ RetValExp->getType()->isVoidType())
+ Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2;
+ else {
+ Diag(ReturnLoc, diag::err_return_block_has_expr);
+ RetValExp = 0;
+ }
}
} else if (!RetValExp) {
return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
@@ -1793,7 +1916,7 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
FnRetType,
- NRVOCandidate != 0);
+ NRVOCandidate != 0);
ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
FnRetType, RetValExp);
if (Res.isInvalid()) {
@@ -1813,7 +1936,7 @@ Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// If we need to check for the named return value optimization, save the
// return statement in our scope for later processing.
- if (getLangOptions().CPlusPlus && FnRetType->isRecordType() &&
+ if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
@@ -1826,28 +1949,25 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
- if (getCurBlock())
- return ActOnBlockReturnStmt(ReturnLoc, RetValExp);
+ if (isa<CapturingScopeInfo>(getCurFunction()))
+ return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
QualType FnRetType;
- QualType DeclaredRetType;
+ QualType RelatedRetType;
if (const FunctionDecl *FD = getCurFunctionDecl()) {
FnRetType = FD->getResultType();
- DeclaredRetType = FnRetType;
if (FD->hasAttr<NoReturnAttr>() ||
FD->getType()->getAs<FunctionType>()->getNoReturnAttr())
Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
- << getCurFunctionOrMethodDecl()->getDeclName();
+ << FD->getDeclName();
} else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
- DeclaredRetType = MD->getResultType();
+ FnRetType = MD->getResultType();
if (MD->hasRelatedResultType() && MD->getClassInterface()) {
// In the implementation of a method with a related return type, the
// type used to type-check the validity of return statements within the
// method body is a pointer to the type of the class being implemented.
- FnRetType = Context.getObjCInterfaceType(MD->getClassInterface());
- FnRetType = Context.getObjCObjectPointerType(FnRetType);
- } else {
- FnRetType = DeclaredRetType;
+ RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface());
+ RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType);
}
} else // If we don't have a function/method context, bail.
return StmtError();
@@ -1855,7 +1975,26 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ReturnStmt *Result = 0;
if (FnRetType->isVoidType()) {
if (RetValExp) {
- if (!RetValExp->isTypeDependent()) {
+ if (isa<InitListExpr>(RetValExp)) {
+ // We simply never allow init lists as the return value of void
+ // functions. This is compatible because this was never allowed before,
+ // so there's no legacy code to deal with.
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
+ Diag(ReturnLoc, diag::err_return_init_list)
+ << CurDecl->getDeclName() << FunctionKind
+ << RetValExp->getSourceRange();
+
+ // Drop the expression.
+ RetValExp = 0;
+ } else if (!RetValExp->isTypeDependent()) {
// C99 6.8.6.4p1 (ext_ since GCC warns)
unsigned D = diag::ext_return_has_expr;
if (RetValExp->getType()->isVoidType())
@@ -1872,7 +2011,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// return (some void expression); is legal in C++.
if (D != diag::ext_return_has_void_expr ||
- !getLangOptions().CPlusPlus) {
+ !getLangOpts().CPlusPlus) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
int FunctionKind = 0;
@@ -1889,15 +2028,17 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
}
- CheckImplicitConversions(RetValExp, ReturnLoc);
- RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
}
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, 0);
} else if (!RetValExp && !FnRetType->isDependentType()) {
unsigned DiagID = diag::warn_return_missing_expr; // C90 6.6.6.4p4
// C99 6.8.6.4p1 (ext_ since GCC warns)
- if (getLangOptions().C99) DiagID = diag::ext_return_missing_expr;
+ if (getLangOpts().C99) DiagID = diag::ext_return_missing_expr;
if (FunctionDecl *FD = getCurFunctionDecl())
Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
@@ -1909,6 +2050,21 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (!FnRetType->isDependentType() && !RetValExp->isTypeDependent()) {
// we have a non-void function with an expression, continue checking
+ if (!RelatedRetType.isNull()) {
+ // If we have a related result type, perform an extra conversion here.
+ // FIXME: The diagnostics here don't really describe what is happening.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(RelatedRetType);
+
+ ExprResult Res = PerformCopyInitialization(Entity, SourceLocation(),
+ RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Cleanup temporaries here, anyway?
+ return StmtError();
+ }
+ RetValExp = Res.takeAs<Expr>();
+ }
+
// C99 6.8.6.4p3(136): The return statement is not an assignment. The
// overlap restriction of subclause 6.5.16.1 does not apply to the case of
// function return.
@@ -1932,17 +2088,6 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (RetValExp) {
- // If we type-checked an Objective-C method's return type based
- // on a related return type, we may need to adjust the return
- // type again. Do so now.
- if (DeclaredRetType != FnRetType) {
- ExprResult result = PerformImplicitConversion(RetValExp,
- DeclaredRetType,
- AA_Returning);
- if (result.isInvalid()) return StmtError();
- RetValExp = result.take();
- }
-
CheckImplicitConversions(RetValExp, ReturnLoc);
RetValExp = MaybeCreateExprWithCleanups(RetValExp);
}
@@ -1951,7 +2096,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// If we need to check for the named return value optimization, save the
// return statement in our scope for later processing.
- if (getLangOptions().CPlusPlus && FnRetType->isRecordType() &&
+ if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
@@ -1977,7 +2122,7 @@ static bool CheckAsmLValue(const Expr *E, Sema &S) {
// are supposed to allow.
const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
if (E != E2 && E2->isLValue()) {
- if (!S.getLangOptions().HeinousExtensions)
+ if (!S.getLangOpts().HeinousExtensions)
S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
<< E->getSourceRange();
else
@@ -2255,7 +2400,7 @@ Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
StmtResult
Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg CatchStmts, Stmt *Finally) {
- if (!getLangOptions().ObjCExceptions)
+ if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
getCurFunction()->setHasBranchProtectedScope();
@@ -2292,7 +2437,7 @@ StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc,
StmtResult
Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope) {
- if (!getLangOptions().ObjCExceptions)
+ if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
if (!Throw) {
@@ -2392,7 +2537,7 @@ StmtResult
Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
MultiStmtArg RawHandlers) {
// Don't report an error if 'try' is used in system headers.
- if (!getLangOptions().CXXExceptions &&
+ if (!getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(TryLoc))
Diag(TryLoc, diag::err_exceptions_disabled) << "try";
@@ -2484,3 +2629,26 @@ Sema::ActOnSEHFinallyBlock(SourceLocation Loc,
assert(Block);
return Owned(SEHFinallyStmt::Create(Context,Loc,Block));
}
+
+StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested)
+{
+ return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo,
+ cast<CompoundStmt>(Nested));
+}
+
+
+StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ Stmt *Nested) {
+ return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ SS.getWithLocInContext(Context),
+ GetNameFromUnqualifiedId(Name),
+ Nested);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index 8dda34c..ff8c4da 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -26,6 +26,8 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace sema;
@@ -42,11 +44,16 @@ clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
/// of a template and, if so, return that template declaration. Otherwise,
/// returns NULL.
static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
- NamedDecl *Orig) {
+ NamedDecl *Orig,
+ bool AllowFunctionTemplates) {
NamedDecl *D = Orig->getUnderlyingDecl();
- if (isa<TemplateDecl>(D))
+ if (isa<TemplateDecl>(D)) {
+ if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D))
+ return 0;
+
return Orig;
+ }
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
// C++ [temp.local]p1:
@@ -76,13 +83,15 @@ static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
return 0;
}
-void Sema::FilterAcceptableTemplateNames(LookupResult &R) {
+void Sema::FilterAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
// The set of class templates we've already seen.
llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
LookupResult::Filter filter = R.makeFilter();
while (filter.hasNext()) {
NamedDecl *Orig = filter.next();
- NamedDecl *Repl = isAcceptableTemplateName(Context, Orig);
+ NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
+ AllowFunctionTemplates);
if (!Repl)
filter.erase();
else if (Repl != Orig) {
@@ -112,9 +121,10 @@ void Sema::FilterAcceptableTemplateNames(LookupResult &R) {
filter.done();
}
-bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R) {
+bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
- if (isAcceptableTemplateName(Context, *I))
+ if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
return true;
return false;
@@ -128,7 +138,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
bool EnteringContext,
TemplateTy &TemplateResult,
bool &MemberOfUnknownSpecialization) {
- assert(getLangOptions().CPlusPlus && "No template names in C!");
+ assert(getLangOpts().CPlusPlus && "No template names in C!");
DeclarationName TName;
MemberOfUnknownSpecialization = false;
@@ -153,8 +163,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
QualType ObjectType = ObjectTypePtr.get();
- LookupResult R(*this, TName, Name.getSourceRange().getBegin(),
- LookupOrdinaryName);
+ LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization);
if (R.empty()) return TNK_Non_template;
@@ -249,6 +258,12 @@ void Sema::LookupTemplateName(LookupResult &Found,
isDependent = ObjectType->isDependentType();
assert((isDependent || !ObjectType->isIncompleteType()) &&
"Caller should have completed object type");
+
+ // Template names cannot appear inside an Objective-C class or object type.
+ if (ObjectType->isObjCObjectOrInterfaceType()) {
+ Found.clear();
+ return;
+ }
} else if (SS.isSet()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
@@ -261,13 +276,13 @@ void Sema::LookupTemplateName(LookupResult &Found,
}
bool ObjectTypeSearchedInScope = false;
+ bool AllowFunctionTemplatesInLookup = true;
if (LookupCtx) {
// Perform "qualified" name lookup into the declaration context we
// computed, which is either the type of the base of a member access
// expression or the declaration context associated with a prior
// nested-name-specifier.
LookupQualifiedName(Found, LookupCtx);
-
if (!ObjectType.isNull() && Found.empty()) {
// C++ [basic.lookup.classref]p1:
// In a class member access expression (5.2.5), if the . or -> token is
@@ -280,6 +295,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
// or function template.
if (S) LookupName(Found, S);
ObjectTypeSearchedInScope = true;
+ AllowFunctionTemplatesInLookup = false;
}
} else if (isDependent && (!S || ObjectType.isNull())) {
// We cannot look into a dependent object type or nested nme
@@ -289,23 +305,31 @@ void Sema::LookupTemplateName(LookupResult &Found,
} else {
// Perform unqualified name lookup in the current scope.
LookupName(Found, S);
+
+ if (!ObjectType.isNull())
+ AllowFunctionTemplatesInLookup = false;
}
if (Found.empty() && !isDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
+ // Simple filter callback that, for keywords, only accepts the C++ *_cast
+ CorrectionCandidateCallback FilterCCC;
+ FilterCCC.WantTypeSpecifiers = false;
+ FilterCCC.WantExpressionKeywords = false;
+ FilterCCC.WantRemainingKeywords = false;
+ FilterCCC.WantCXXNamedCasts = true;
if (TypoCorrection Corrected = CorrectTypo(Found.getLookupNameInfo(),
Found.getLookupKind(), S, &SS,
- LookupCtx, false,
- CTC_CXXCasts)) {
+ FilterCCC, LookupCtx)) {
Found.setLookupName(Corrected.getCorrection());
if (Corrected.getCorrectionDecl())
Found.addDecl(Corrected.getCorrectionDecl());
FilterAcceptableTemplateNames(Found);
if (!Found.empty()) {
- std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_template_suggest)
<< Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
@@ -323,7 +347,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
}
}
- FilterAcceptableTemplateNames(Found);
+ FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
if (Found.empty()) {
if (isDependent)
MemberOfUnknownSpecialization = true;
@@ -339,7 +363,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
LookupOrdinaryName);
LookupName(FoundOuter, S);
- FilterAcceptableTemplateNames(FoundOuter);
+ FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false);
if (FoundOuter.empty()) {
// - if the name is not found, the name found in the class of the
@@ -379,6 +403,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
/// specifier naming a dependent type.
ExprResult
Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs) {
@@ -398,20 +423,23 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
/*IsArrow*/ true,
/*Op*/ SourceLocation(),
SS.getWithLocInContext(Context),
+ TemplateKWLoc,
FirstQualifierInScope,
NameInfo,
TemplateArgs));
}
- return BuildDependentDeclRefExpr(SS, NameInfo, TemplateArgs);
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
}
ExprResult
Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
return Owned(DependentScopeDeclRefExpr::Create(Context,
SS.getWithLocInContext(Context),
+ TemplateKWLoc,
NameInfo,
TemplateArgs));
}
@@ -420,12 +448,12 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
/// that the template parameter 'PrevDecl' is being shadowed by a new
/// declaration at location Loc. Returns true to indicate that this is
/// an error, and false otherwise.
-bool Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
+void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
// Microsoft Visual C++ permits template parameters to be shadowed.
- if (getLangOptions().MicrosoftExt)
- return false;
+ if (getLangOpts().MicrosoftExt)
+ return;
// C++ [temp.local]p4:
// A template-parameter shall not be redeclared within its
@@ -433,7 +461,7 @@ bool Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
Diag(Loc, diag::err_template_param_shadow)
<< cast<NamedDecl>(PrevDecl)->getDeclName();
Diag(PrevDecl->getLocation(), diag::note_template_param_here);
- return true;
+ return;
}
/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
@@ -491,7 +519,6 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
}
llvm_unreachable("Unhandled parsed template argument");
- return TemplateArgumentLoc();
}
/// \brief Translates template arguments as provided by the parser
@@ -528,9 +555,10 @@ Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
NamedDecl *PrevDecl = LookupSingleName(S, ParamName, ParamNameLoc,
LookupOrdinaryName,
ForRedeclaration);
- if (PrevDecl && PrevDecl->isTemplateParameter())
- Invalid = Invalid || DiagnoseTemplateParameterShadow(ParamNameLoc,
- PrevDecl);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(ParamNameLoc, PrevDecl);
+ PrevDecl = 0;
+ }
}
SourceLocation Loc = ParamNameLoc;
@@ -615,8 +643,12 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
T->isNullPtrType() ||
// If T is a dependent type, we can't do the check now, so we
// assume that it is well-formed.
- T->isDependentType())
- return T;
+ T->isDependentType()) {
+ // C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
+ // are ignored when determining its type.
+ return T.getUnqualifiedType();
+ }
+
// C++ [temp.param]p8:
//
// A non-type template-parameter of type "array of T" or
@@ -652,9 +684,10 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
NamedDecl *PrevDecl = LookupSingleName(S, ParamName, D.getIdentifierLoc(),
LookupOrdinaryName,
ForRedeclaration);
- if (PrevDecl && PrevDecl->isTemplateParameter())
- Invalid = Invalid || DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
- PrevDecl);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ PrevDecl = 0;
+ }
}
T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc());
@@ -666,7 +699,7 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
bool IsParameterPack = D.hasEllipsis();
NonTypeTemplateParmDecl *Param
= NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
- D.getSourceRange().getBegin(),
+ D.getLocStart(),
D.getIdentifierLoc(),
Depth, Position, ParamName, T,
IsParameterPack, TInfo);
@@ -839,7 +872,12 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
if (!SemanticContext) {
- // FIXME: Produce a reasonable diagnostic here
+ // FIXME: Horrible, horrible hack! We can't currently represent this
+ // in the AST, and historically we have just ignored such friend
+ // class templates, so don't complain here.
+ if (TUK != TUK_Friend)
+ Diag(NameLoc, diag::err_template_qualified_declarator_no_match)
+ << SS.getScopeRep() << SS.getRange();
return true;
}
@@ -853,7 +891,8 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
ContextRAII SavedContext(*this, SemanticContext);
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
- }
+ } else if (TUK != TUK_Friend && TUK != TUK_Reference)
+ diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc);
LookupQualifiedName(Previous, SemanticContext);
} else {
@@ -987,9 +1026,11 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate &&
- !(TUK == TUK_Friend && CurContext->isDependentContext()))
+ !(TUK == TUK_Friend && CurContext->isDependentContext())) {
Diag(NameLoc, diag::err_member_def_does_not_match)
<< Name << SemanticContext << SS.getRange();
+ Invalid = true;
+ }
}
CXXRecordDecl *NewClass =
@@ -1003,21 +1044,19 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
NumOuterTemplateParamLists,
OuterTemplateParamLists);
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ AddAlignmentAttributesForRecord(NewClass);
+ AddMsStructLayoutForRecord(NewClass);
+
ClassTemplateDecl *NewTemplate
= ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
DeclarationName(Name), TemplateParams,
NewClass, PrevClassTemplate);
NewClass->setDescribedClassTemplate(NewTemplate);
- if (PrevClassTemplate && PrevClassTemplate->isModulePrivate()) {
+ if (ModulePrivateLoc.isValid())
NewTemplate->setModulePrivate();
- } else if (ModulePrivateLoc.isValid()) {
- if (PrevClassTemplate && !PrevClassTemplate->isModulePrivate())
- diagnoseModulePrivateRedeclaration(NewTemplate, PrevClassTemplate,
- ModulePrivateLoc);
- else
- NewTemplate->setModulePrivate();
- }
// Build the type for the class template declaration now.
QualType T = NewTemplate->getInjectedClassNameSpecialization();
@@ -1032,7 +1071,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
PrevClassTemplate->setMemberSpecialization();
// Set the access specifier.
- if (!Invalid && TUK != TUK_Friend)
+ if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
// Set the lexical context of these templates
@@ -1059,7 +1098,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Friend templates are visible in fairly strange ways.
if (!CurContext->isDependentContext()) {
DeclContext *DC = SemanticContext->getRedeclContext();
- DC->makeDeclVisibleInContext(NewTemplate, /* Recoverable = */ false);
+ DC->makeDeclVisibleInContext(NewTemplate);
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(NewTemplate, EnclosingScope,
/* AddToContext = */ false);
@@ -1103,10 +1142,10 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
// template-argument, that declaration shall be a definition and shall be
// the only declaration of the function template in the translation unit.
// (C++98/03 doesn't have this wording; see DR226).
- if (!S.getLangOptions().CPlusPlus0x)
- S.Diag(ParamLoc,
- diag::ext_template_parameter_default_in_function_template)
- << DefArgRange;
+ S.Diag(ParamLoc, S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_parameter_default_in_function_template
+ : diag::ext_template_parameter_default_in_function_template)
+ << DefArgRange;
return false;
case Sema::TPC_ClassTemplateMember:
@@ -1131,7 +1170,7 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
// declaration (and it is a definition). Strange!
}
- return false;
+ llvm_unreachable("Invalid TemplateParamListContext!");
}
/// \brief Check for unexpanded parameter packs within the template parameters
@@ -1195,9 +1234,6 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
bool SawDefaultArgument = false;
SourceLocation PreviousDefaultArgLoc;
- bool SawParameterPack = false;
- SourceLocation ParameterPackLoc;
-
// Dummy initialization to avoid warnings.
TemplateParameterList::iterator OldParam = NewParams->end();
if (OldParams)
@@ -1212,18 +1248,11 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
SourceLocation OldDefaultLoc;
SourceLocation NewDefaultLoc;
- // Variables used to diagnose missing default arguments
+ // Variable used to diagnose missing default arguments
bool MissingDefaultArg = false;
- // C++0x [temp.param]p11:
- // If a template parameter of a primary class template or alias template
- // is a template parameter pack, it shall be the last template parameter.
- if (SawParameterPack &&
- (TPC == TPC_ClassTemplate || TPC == TPC_TypeAliasTemplate)) {
- Diag(ParameterPackLoc,
- diag::err_template_param_pack_must_be_last_template_parameter);
- Invalid = true;
- }
+ // Variable used to diagnose non-final parameter packs
+ bool SawParameterPack = false;
if (TemplateTypeParmDecl *NewTypeParm
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
@@ -1243,7 +1272,6 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
assert(!NewTypeParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
SawParameterPack = true;
- ParameterPackLoc = NewTypeParm->getLocation();
} else if (OldTypeParm && OldTypeParm->hasDefaultArgument() &&
NewTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
@@ -1288,7 +1316,6 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
assert(!NewNonTypeParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
SawParameterPack = true;
- ParameterPackLoc = NewNonTypeParm->getLocation();
} else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
NewNonTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
@@ -1313,16 +1340,16 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
} else if (SawDefaultArgument)
MissingDefaultArg = true;
} else {
- // Check the presence of a default argument here.
TemplateTemplateParmDecl *NewTemplateParm
= cast<TemplateTemplateParmDecl>(*NewParam);
// Check for unexpanded parameter packs, recursively.
- if (DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
+ if (::DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
Invalid = true;
continue;
}
+ // Check the presence of a default argument here.
if (NewTemplateParm->hasDefaultArgument() &&
DiagnoseDefaultTemplateArgument(*this, TPC,
NewTemplateParm->getLocation(),
@@ -1336,7 +1363,6 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
assert(!NewTemplateParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
SawParameterPack = true;
- ParameterPackLoc = NewTemplateParm->getLocation();
} else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
NewTemplateParm->hasDefaultArgument()) {
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
@@ -1363,6 +1389,16 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
MissingDefaultArg = true;
}
+ // C++0x [temp.param]p11:
+ // If a template parameter of a primary class template or alias template
+ // is a template parameter pack, it shall be the last template parameter.
+ if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
+ (TPC == TPC_ClassTemplate || TPC == TPC_TypeAliasTemplate)) {
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_pack_must_be_last_template_parameter);
+ Invalid = true;
+ }
+
if (RedundantDefaultArg) {
// C++ [temp.param]p12:
// A template-parameter shall not be given default arguments
@@ -1878,7 +1914,6 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
}
}
-
QualType Sema::CheckTemplateIdType(TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
@@ -1910,18 +1945,17 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Check that the template argument list is well-formed for this
// template.
SmallVector<TemplateArgument, 4> Converted;
+ bool ExpansionIntoFixedList = false;
if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
- false, Converted))
+ false, Converted, &ExpansionIntoFixedList))
return QualType();
- assert((Converted.size() == Template->getTemplateParameters()->size()) &&
- "Converted template argument list is too short!");
-
QualType CanonType;
bool InstantiationDependent = false;
- if (TypeAliasTemplateDecl *AliasTemplate
- = dyn_cast<TypeAliasTemplateDecl>(Template)) {
+ TypeAliasTemplateDecl *AliasTemplate = 0;
+ if (!ExpansionIntoFixedList &&
+ (AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Template))) {
// Find the canonical type for this type alias template specialization.
TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
if (Pattern->isInvalidDecl())
@@ -2037,11 +2071,12 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
}
TypeResult
-Sema::ActOnTemplateIdType(CXXScopeSpec &SS,
+Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy TemplateD, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
- SourceLocation RAngleLoc) {
+ SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName) {
if (SS.isInvalid())
return true;
@@ -2052,20 +2087,21 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS,
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- QualType T = Context.getDependentTemplateSpecializationType(ETK_None,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
-
- // Build type-source information.
+ QualType T
+ = Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+ // Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
- SpecTL.setKeywordLoc(SourceLocation());
- SpecTL.setNameLoc(TemplateLoc);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
@@ -2078,20 +2114,24 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS,
return true;
// Build type-source information.
- TypeLocBuilder TLB;
+ TypeLocBuilder TLB;
TemplateSpecializationTypeLoc SpecTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
- if (SS.isNotEmpty()) {
+ // NOTE: avoid constructing an ElaboratedTypeLoc if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Decl or Expr node).
+ if (SS.isNotEmpty() && !IsCtorOrDtorName) {
// Create an elaborated-type-specifier containing the nested-name-specifier.
Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result);
ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
- ElabTL.setKeywordLoc(SourceLocation());
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
}
@@ -2102,7 +2142,8 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
- TemplateTy TemplateD,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
@@ -2127,12 +2168,13 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
- = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
- SpecTL.setKeywordLoc(TagLoc);
- SpecTL.setNameLoc(TemplateLoc);
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TagLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
@@ -2167,11 +2209,12 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
Diag(D->getLocation(), diag::note_previous_use);
}
}
-
+
// Provide source-location information for the template specialization.
TypeLocBuilder TLB;
TemplateSpecializationTypeLoc SpecTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
@@ -2179,18 +2222,19 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
// Construct an elaborated type containing the nested-name-specifier (if any)
- // and keyword.
+ // and tag keyword.
Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
- ElabTL.setKeywordLoc(TagLoc);
+ ElabTL.setElaboratedKeywordLoc(TagLoc);
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
- const TemplateArgumentListInfo &TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs) {
// FIXME: Can we do any checking at this point? I guess we could check the
// template arguments that we have against the template name, if the template
// name refers to a single template. That's not a terribly common case,
@@ -2211,6 +2255,7 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
UnresolvedLookupExpr *ULE
= UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
SS.getWithLocInContext(Context),
+ TemplateKWLoc,
R.getLookupNameInfo(),
RequiresADL, TemplateArgs,
R.begin(), R.end());
@@ -2221,13 +2266,15 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// We actually only call this from template instantiation.
ExprResult
Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo &TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs) {
+ assert(TemplateArgs || TemplateKWLoc.isValid());
DeclContext *DC;
if (!(DC = computeDeclContext(SS, false)) ||
DC->isDependentContext() ||
RequireCompleteDeclContext(SS, DC))
- return BuildDependentDeclRefExpr(SS, NameInfo, &TemplateArgs);
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
bool MemberOfUnknownSpecialization;
LookupResult R(*this, NameInfo, LookupOrdinaryName);
@@ -2251,7 +2298,7 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return ExprError();
}
- return BuildTemplateIdExpr(SS, R, /* ADL */ false, TemplateArgs);
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
/// \brief Form a dependent template name.
@@ -2262,15 +2309,17 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
- SourceLocation TemplateKWLoc,
CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Result) {
- if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent() &&
- !getLangOptions().CPlusPlus0x)
- Diag(TemplateKWLoc, diag::ext_template_outside_of_template)
+ if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TemplateKWLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_outside_of_template :
+ diag::ext_template_outside_of_template)
<< FixItHint::CreateRemoval(TemplateKWLoc);
DeclContext *LookupCtx = 0;
@@ -2305,7 +2354,7 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases())) {
// This is a dependent template. Handle it below.
} else if (TNK == TNK_Non_template) {
- Diag(Name.getSourceRange().getBegin(),
+ Diag(Name.getLocStart(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name).getName()
<< Name.getSourceRange()
@@ -2339,7 +2388,7 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
break;
}
- Diag(Name.getSourceRange().getBegin(),
+ Diag(Name.getLocStart(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name).getName()
<< Name.getSourceRange()
@@ -2391,7 +2440,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
// Objective-C ARC:
// If an explicitly-specified template argument type is a lifetime type
// with no lifetime qualifier, the __strong lifetime qualifier is inferred.
- if (getLangOptions().ObjCAutoRefCount &&
+ if (getLangOpts().ObjCAutoRefCount &&
ArgType->isObjCLifetimeType() &&
!ArgType.getObjCLifetime()) {
Qualifiers Qs;
@@ -2729,9 +2778,14 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
CXXScopeSpec SS;
SS.Adopt(Arg.getTemplateQualifierLoc());
+ // FIXME: the template-template arg was a DependentTemplateName,
+ // so it was provided with a template keyword. However, its source
+ // location is not stored in the template argument structure.
+ SourceLocation TemplateKWLoc;
ExprResult E = Owned(DependentScopeDeclRefExpr::Create(Context,
SS.getWithLocInContext(Context),
- NameInfo));
+ TemplateKWLoc,
+ NameInfo, 0));
// If we parsed the template argument as a pack expansion, create a
// pack expansion expression.
@@ -2782,7 +2836,6 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
- break;
}
return false;
@@ -2828,33 +2881,55 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// We have a template template parameter but the template
// argument does not refer to a template.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_template)
- << getLangOptions().CPlusPlus0x;
+ << getLangOpts().CPlusPlus0x;
return true;
case TemplateArgument::Declaration:
- llvm_unreachable(
- "Declaration argument with template template parameter");
- break;
+ llvm_unreachable("Declaration argument with template template parameter");
case TemplateArgument::Integral:
- llvm_unreachable(
- "Integral argument with template template parameter");
- break;
+ llvm_unreachable("Integral argument with template template parameter");
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
- break;
}
return false;
}
+/// \brief Diagnose an arity mismatch in the
+static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ unsigned NumParams = Params->size();
+ unsigned NumArgs = TemplateArgs.size();
+
+ SourceRange Range;
+ if (NumArgs > NumParams)
+ Range = SourceRange(TemplateArgs[NumParams].getLocation(),
+ TemplateArgs.getRAngleLoc());
+ S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << (NumArgs > NumParams)
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template << Range;
+ S.Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+}
+
/// \brief Check that the given template argument list is well-formed
/// for specializing the given template.
bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
- SmallVectorImpl<TemplateArgument> &Converted) {
+ SmallVectorImpl<TemplateArgument> &Converted,
+ bool *ExpansionIntoFixedList) {
+ if (ExpansionIntoFixedList)
+ *ExpansionIntoFixedList = false;
+
TemplateParameterList *Params = Template->getTemplateParameters();
unsigned NumParams = Params->size();
unsigned NumArgs = TemplateArgs.size();
@@ -2864,27 +2939,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
bool HasParameterPack =
NumParams > 0 && Params->getParam(NumParams - 1)->isTemplateParameterPack();
-
- if ((NumArgs > NumParams && !HasParameterPack) ||
- (NumArgs < Params->getMinRequiredArguments() &&
- !PartialTemplateArgs)) {
- // FIXME: point at either the first arg beyond what we can handle,
- // or the '>', depending on whether we have too many or too few
- // arguments.
- SourceRange Range;
- if (NumArgs > NumParams)
- Range = SourceRange(TemplateArgs[NumParams].getLocation(), RAngleLoc);
- Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
- << (NumArgs > NumParams)
- << (isa<ClassTemplateDecl>(Template)? 0 :
- isa<FunctionTemplateDecl>(Template)? 1 :
- isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
- << Template << Range;
- Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
- Invalid = true;
- }
-
+
// C++ [temp.arg]p1:
// [...] The type and form of each template-argument specified in
// a template-id shall match the type and form specified for the
@@ -2896,10 +2951,12 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
ParamEnd = Params->end();
unsigned ArgIdx = 0;
LocalInstantiationScope InstScope(*this, true);
+ bool SawPackExpansion = false;
while (Param != ParamEnd) {
if (ArgIdx < NumArgs) {
// If we have an expanded parameter pack, make sure we don't have too
// many arguments.
+ // FIXME: This really should fall out from the normal arity checking.
if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
if (NTTP->isExpandedParameterPack() &&
@@ -2933,6 +2990,15 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// Move to the next template parameter.
++Param;
}
+
+ // If this template argument is a pack expansion, record that fact
+ // and break out; we can't actually check any more.
+ if (TemplateArgs[ArgIdx].getArgument().isPackExpansion()) {
+ SawPackExpansion = true;
+ ++ArgIdx;
+ break;
+ }
+
++ArgIdx;
continue;
}
@@ -2952,7 +3018,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
if ((*Param)->isTemplateParameterPack())
break;
- // We have a default template argument that we will use.
+ // Check whether we have a default argument.
TemplateArgumentLoc Arg;
// Retrieve the default template argument from the template
@@ -2961,10 +3027,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// (when the template parameter was part of a nested template) into
// the default argument.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
- if (!TTP->hasDefaultArgument()) {
- assert(Invalid && "Missing default argument");
- break;
- }
+ if (!TTP->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
Template,
@@ -2979,10 +3044,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- if (!NTTP->hasDefaultArgument()) {
- assert(Invalid && "Missing default argument");
- break;
- }
+ if (!NTTP->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
ExprResult E = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
@@ -2998,10 +3062,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
- if (!TempParm->hasDefaultArgument()) {
- assert(Invalid && "Missing default argument");
- break;
- }
+ if (!TempParm->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
NestedNameSpecifierLoc QualifierLoc;
TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
@@ -3039,12 +3102,71 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
++ArgIdx;
}
+ // If we saw a pack expansion, then directly convert the remaining arguments,
+ // because we don't know what parameters they'll match up with.
+ if (SawPackExpansion) {
+ bool AddToArgumentPack
+ = Param != ParamEnd && (*Param)->isTemplateParameterPack();
+ while (ArgIdx < NumArgs) {
+ if (AddToArgumentPack)
+ ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument());
+ else
+ Converted.push_back(TemplateArgs[ArgIdx].getArgument());
+ ++ArgIdx;
+ }
+
+ // Push the argument pack onto the list of converted arguments.
+ if (AddToArgumentPack) {
+ if (ArgumentPack.empty())
+ Converted.push_back(TemplateArgument(0, 0));
+ else {
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+ }
+ } else if (ExpansionIntoFixedList) {
+ // We have expanded a pack into a fixed list.
+ *ExpansionIntoFixedList = true;
+ }
+
+ return Invalid;
+ }
+
+ // If we have any leftover arguments, then there were too many arguments.
+ // Complain and fail.
+ if (ArgIdx < NumArgs)
+ return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
+
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ // FIXME: This really should fall out from the normal arity checking.
+ if (Param != ParamEnd) {
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (NTTP->isExpandedParameterPack() &&
+ ArgumentPack.size() < NTTP->getNumExpansionTypes()) {
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << false
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+ }
+ }
+ }
+
// Form argument packs for each of the parameter packs remaining.
while (Param != ParamEnd) {
// If we're checking a partial list of template arguments, don't fill
// in arguments for non-template parameter packs.
-
if ((*Param)->isTemplateParameterPack()) {
+ if (!HasParameterPack)
+ return true;
if (ArgumentPack.empty())
Converted.push_back(TemplateArgument(0, 0));
else {
@@ -3053,7 +3175,8 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
ArgumentPack.size()));
ArgumentPack.clear();
}
- }
+ } else if (!PartialTemplateArgs)
+ return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
++Param;
}
@@ -3261,13 +3384,19 @@ bool UnnamedLocalNoLinkageFinder::VisitAtomicType(const AtomicType* T) {
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
- S.Diag(SR.getBegin(), diag::ext_template_arg_local_type)
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_local_type :
+ diag::ext_template_arg_local_type)
<< S.Context.getTypeDeclType(Tag) << SR;
return true;
}
if (!Tag->getDeclName() && !Tag->getTypedefNameForAnonDecl()) {
- S.Diag(SR.getBegin(), diag::ext_template_arg_unnamed_type) << SR;
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_unnamed_type :
+ diag::ext_template_arg_unnamed_type) << SR;
S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here);
return true;
}
@@ -3291,7 +3420,7 @@ bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
case NestedNameSpecifier::TypeSpecWithTemplate:
return Visit(QualType(NNS->getAsType(), 0));
}
- return false;
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
@@ -3317,9 +3446,14 @@ bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
// compounded from any of these types shall not be used as a
// template-argument for a template type-parameter.
//
- // C++0x allows these, and even in C++03 we allow them as an extension with
+ // C++11 allows these, and even in C++03 we allow them as an extension with
// a warning.
- if (!LangOpts.CPlusPlus0x && Arg->hasUnnamedOrLocalType()) {
+ if (LangOpts.CPlusPlus0x ?
+ Diags.getDiagnosticLevel(diag::warn_cxx98_compat_template_arg_unnamed_type,
+ SR.getBegin()) != DiagnosticsEngine::Ignored ||
+ Diags.getDiagnosticLevel(diag::warn_cxx98_compat_template_arg_local_type,
+ SR.getBegin()) != DiagnosticsEngine::Ignored :
+ Arg->hasUnnamedOrLocalType()) {
UnnamedLocalNoLinkageFinder Finder(*this, SR);
(void)Finder.Visit(Context.getCanonicalType(Arg));
}
@@ -3327,6 +3461,99 @@ bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
return false;
}
+enum NullPointerValueKind {
+ NPV_NotNullPointer,
+ NPV_NullPointer,
+ NPV_Error
+};
+
+/// \brief Determine whether the given template argument is a null pointer
+/// value of the appropriate type.
+static NullPointerValueKind
+isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
+ QualType ParamType, Expr *Arg) {
+ if (Arg->isValueDependent() || Arg->isTypeDependent())
+ return NPV_NotNullPointer;
+
+ if (!S.getLangOpts().CPlusPlus0x)
+ return NPV_NotNullPointer;
+
+ // Determine whether we have a constant expression.
+ ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg);
+ if (ArgRV.isInvalid())
+ return NPV_Error;
+ Arg = ArgRV.take();
+
+ Expr::EvalResult EvalResult;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+ if (!Arg->EvaluateAsRValue(EvalResult, S.Context) ||
+ EvalResult.HasSideEffects) {
+ SourceLocation DiagLoc = Arg->getExprLoc();
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ S.Diag(DiagLoc, diag::err_template_arg_not_address_constant)
+ << Arg->getType() << Arg->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ S.Diag(Notes[I].first, Notes[I].second);
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_Error;
+ }
+
+ // C++11 [temp.arg.nontype]p1:
+ // - an address constant expression of type std::nullptr_t
+ if (Arg->getType()->isNullPtrType())
+ return NPV_NullPointer;
+
+ // - a constant expression that evaluates to a null pointer value (4.10); or
+ // - a constant expression that evaluates to a null member pointer value
+ // (4.11); or
+ if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) ||
+ (EvalResult.Val.isMemberPointer() &&
+ !EvalResult.Val.getMemberPointerDecl())) {
+ // If our expression has an appropriate type, we've succeeded.
+ bool ObjCLifetimeConversion;
+ if (S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType) ||
+ S.IsQualificationConversion(Arg->getType(), ParamType, false,
+ ObjCLifetimeConversion))
+ return NPV_NullPointer;
+
+ // The types didn't match, but we know we got a null pointer; complain,
+ // then recover as if the types were correct.
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // If we don't have a null pointer value, but we do have a NULL pointer
+ // constant, suggest a cast to the appropriate type.
+ if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
+ std::string Code = "static_cast<" + ParamType.getAsString() + ">(";
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant)
+ << ParamType
+ << FixItHint::CreateInsertion(Arg->getLocStart(), Code)
+ << FixItHint::CreateInsertion(S.PP.getLocForEndOfToken(Arg->getLocEnd()),
+ ")");
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // FIXME: If we ever want to support general, address-constant expressions
+ // as non-type template arguments, we should return the ExprResult here to
+ // be interpreted by the caller.
+ return NPV_NotNullPointer;
+}
+
/// \brief Checks whether the given template argument is the address
/// of an object or function according to C++ [temp.arg.nontype]p1.
static bool
@@ -3339,6 +3566,21 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
Expr *Arg = ArgIn;
QualType ArgType = Arg->getType();
+ // If our parameter has pointer type, check for a null template value.
+ if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
+ switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return false;
+
+ case NPV_Error:
+ return true;
+
+ case NPV_NotNullPointer:
+ break;
+ }
+ }
+
// See through any implicit casts we added to fix the type.
Arg = Arg->IgnoreImpCasts();
@@ -3358,9 +3600,11 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
- if (!Invalid && !ExtraParens && !S.getLangOptions().CPlusPlus0x) {
- S.Diag(Arg->getSourceRange().getBegin(),
- diag::ext_template_arg_extra_parens)
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_extra_parens :
+ diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
ExtraParens = true;
}
@@ -3382,7 +3626,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
}
}
- if (S.getLangOptions().MicrosoftExt && isa<CXXUuidofExpr>(Arg)) {
+ if (S.getLangOpts().MicrosoftExt && isa<CXXUuidofExpr>(Arg)) {
Converted = TemplateArgument(ArgIn);
return false;
}
@@ -3391,62 +3635,78 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
Arg = subst->getReplacement()->IgnoreImpCasts();
- DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
- if (!DRE) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
- << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
- return true;
- }
-
// Stop checking the precise nature of the argument if it is value dependent,
// it should be checked when instantiated.
if (Arg->isValueDependent()) {
Converted = TemplateArgument(ArgIn);
return false;
}
+
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
+ if (!DRE) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
if (!isa<ValueDecl>(DRE->getDecl())) {
- S.Diag(Arg->getSourceRange().getBegin(),
+ S.Diag(Arg->getLocStart(),
diag::err_template_arg_not_object_or_func_form)
<< Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
- NamedDecl *Entity = 0;
+ NamedDecl *Entity = DRE->getDecl();
// Cannot refer to non-static data members
- if (FieldDecl *Field = dyn_cast<FieldDecl>(DRE->getDecl())) {
- S.Diag(Arg->getSourceRange().getBegin(), diag::err_template_arg_field)
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(Entity)) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_field)
<< Field << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// Cannot refer to non-static member functions
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(DRE->getDecl()))
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) {
if (!Method->isStatic()) {
- S.Diag(Arg->getSourceRange().getBegin(), diag::err_template_arg_method)
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_method)
<< Method << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
+ }
- // Functions must have external linkage.
- if (FunctionDecl *Func = dyn_cast<FunctionDecl>(DRE->getDecl())) {
- if (!isExternalLinkage(Func->getLinkage())) {
- S.Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_function_not_extern)
- << Func << Arg->getSourceRange();
- S.Diag(Func->getLocation(), diag::note_template_arg_internal_object)
- << true;
- return true;
- }
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
+ VarDecl *Var = dyn_cast<VarDecl>(Entity);
+
+ // A non-type template argument must refer to an object or function.
+ if (!Func && !Var) {
+ // We found something, but we don't know specifically what it is.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
- // Okay: we've named a function with external linkage.
- Entity = Func;
+ // Address / reference template args must have external linkage in C++98.
+ if (Entity->getLinkage() == InternalLinkage) {
+ S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_object_internal :
+ diag::ext_template_arg_object_internal)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ } else if (Entity->getLinkage() == NoLinkage) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ return true;
+ }
+ if (Func) {
// If the template parameter has pointer type, the function decays.
if (ParamType->isPointerType() && !AddressTaken)
ArgType = S.Context.getPointerType(Func->getType());
@@ -3469,27 +3729,24 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
ArgType = Func->getType();
}
- } else if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (!isExternalLinkage(Var->getLinkage())) {
- S.Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_object_not_extern)
- << Var << Arg->getSourceRange();
- S.Diag(Var->getLocation(), diag::note_template_arg_internal_object)
- << true;
- return true;
- }
-
+ } else {
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
- S.Diag(Arg->getSourceRange().getBegin(),
+ S.Diag(Arg->getLocStart(),
diag::err_template_arg_reference_var)
<< Var->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
- // Okay: we've named an object with external linkage
- Entity = Var;
+ // A template argument must have static storage duration.
+ // FIXME: Ensure this works for thread_local as well as __thread.
+ if (Var->isThreadSpecified()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local)
+ << Arg->getSourceRange();
+ S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
// If the template parameter has pointer type, we must have taken
// the address of this object.
@@ -3536,13 +3793,6 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
S.Diag(Param->getLocation(), diag::note_template_param_here);
}
}
- } else {
- // We found something else, but we don't know specifically what it is.
- S.Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_not_object_or_func)
- << Arg->getSourceRange();
- S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
- return true;
}
bool ObjCLifetimeConversion;
@@ -3568,7 +3818,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
unsigned ArgQuals = ArgType.getCVRQualifiers();
if ((ParamQuals | ArgQuals) != ParamQuals) {
- S.Diag(Arg->getSourceRange().getBegin(),
+ S.Diag(Arg->getLocStart(),
diag::err_template_arg_ref_bind_ignores_quals)
<< ParamType << Arg->getType()
<< Arg->getSourceRange();
@@ -3597,16 +3847,47 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// Create the template argument.
Converted = TemplateArgument(Entity->getCanonicalDecl());
- S.MarkDeclarationReferenced(Arg->getLocStart(), Entity);
+ S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity);
return false;
}
/// \brief Checks whether the given template argument is a pointer to
/// member constant according to C++ [temp.arg.nontype]p1.
-bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
- TemplateArgument &Converted) {
+static bool CheckTemplateArgumentPointerToMember(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ QualType ParamType,
+ Expr *&ResultArg,
+ TemplateArgument &Converted) {
bool Invalid = false;
+ // Check for a null pointer value.
+ Expr *Arg = ResultArg;
+ switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
+ case NPV_Error:
+ return true;
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return false;
+ case NPV_NotNullPointer:
+ break;
+ }
+
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(Arg->getType(),
+ ParamType.getNonReferenceType(),
+ false, ObjCLifetimeConversion)) {
+ Arg = S.ImpCastExprToType(Arg, ParamType, CK_NoOp,
+ Arg->getValueKind()).take();
+ ResultArg = Arg;
+ } else if (!S.Context.hasSameUnqualifiedType(Arg->getType(),
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
// See through any implicit casts we added to fix the type.
while (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
Arg = Cast->getSubExpr();
@@ -3623,9 +3904,11 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
// See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
- if (!Invalid && !ExtraParens && !getLangOptions().CPlusPlus0x) {
- Diag(Arg->getSourceRange().getBegin(),
- diag::ext_template_arg_extra_parens)
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_extra_parens :
+ diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
ExtraParens = true;
}
@@ -3651,7 +3934,7 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
if (VD->getType()->isMemberPointerType()) {
if (isa<NonTypeTemplateParmDecl>(VD) ||
(isa<VarDecl>(VD) &&
- Context.getCanonicalType(VD->getType()).isConstQualified())) {
+ S.Context.getCanonicalType(VD->getType()).isConstQualified())) {
if (Arg->isTypeDependent() || Arg->isValueDependent())
Converted = TemplateArgument(Arg);
else
@@ -3665,8 +3948,8 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
}
if (!DRE)
- return Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_not_pointer_to_member_form)
+ return S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
<< Arg->getSourceRange();
if (isa<FieldDecl>(DRE->getDecl()) || isa<CXXMethodDecl>(DRE->getDecl())) {
@@ -3684,11 +3967,10 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
}
// We found something else, but we don't know specifically what it is.
- Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_not_pointer_to_member_form)
- << Arg->getSourceRange();
- Diag(DRE->getDecl()->getLocation(),
- diag::note_template_arg_refers_here);
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
return true;
}
@@ -3704,7 +3986,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK) {
- SourceLocation StartLoc = Arg->getSourceRange().getBegin();
+ SourceLocation StartLoc = Arg->getLocStart();
// If either the parameter has a dependent type or the argument is
// type-dependent, there's nothing we can check now.
@@ -3720,13 +4002,77 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// template-argument cannot be converted to the type of the
// corresponding template-parameter then the program is
// ill-formed.
- //
- // -- for a non-type template-parameter of integral or
- // enumeration type, integral promotions (4.5) and integral
- // conversions (4.7) are applied.
QualType ParamType = InstantiatedParamType;
- QualType ArgType = Arg->getType();
if (ParamType->isIntegralOrEnumerationType()) {
+ // C++11:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, conversions permitted in a converted
+ // constant expression are applied.
+ //
+ // C++98:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, integral promotions (4.5) and integral
+ // conversions (4.7) are applied.
+
+ if (CTAK == CTAK_Deduced &&
+ !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
+ // C++ [temp.deduct.type]p17:
+ // If, in the declaration of a function template with a non-type
+ // template-parameter, the non-type template-parameter is used
+ // in an expression in the function parameter-list and, if the
+ // corresponding template-argument is deduced, the
+ // template-argument type shall match the type of the
+ // template-parameter exactly, except that a template-argument
+ // deduced from an array bound may be of any integral type.
+ Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
+ << Arg->getType().getUnqualifiedType()
+ << ParamType.getUnqualifiedType();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ if (getLangOpts().CPlusPlus0x) {
+ // We can't check arbitrary value-dependent arguments.
+ // FIXME: If there's no viable conversion to the template parameter type,
+ // we should be able to diagnose that prior to instantiation.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template template-parameter
+ // shall be one of:
+ //
+ // -- for a non-type template-parameter of integral or enumeration
+ // type, a converted constant expression of the type of the
+ // template-parameter; or
+ llvm::APSInt Value;
+ ExprResult ArgResult =
+ CheckConvertedConstantExpression(Arg, ParamType, Value,
+ CCEK_TemplateArg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+
+ // Widen the argument value to sizeof(parameter type). This is almost
+ // always a no-op, except when the parameter type is bool. In
+ // that case, this may extend the argument from 1 bit to 8 bits.
+ QualType IntegerType = ParamType;
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>())
+ IntegerType = Enum->getDecl()->getIntegerType();
+ Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
+
+ Converted = TemplateArgument(Value, Context.getCanonicalType(ParamType));
+ return ArgResult;
+ }
+
+ ExprResult ArgResult = DefaultLvalueConversion(Arg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+ Arg = ArgResult.take();
+
+ QualType ArgType = Arg->getType();
+
// C++ [temp.arg.nontype]p1:
// A template-argument for a non-type, non-template
// template-parameter shall be one of:
@@ -3737,16 +4083,16 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
SourceLocation NonConstantLoc;
llvm::APSInt Value;
if (!ArgType->isIntegralOrEnumerationType()) {
- Diag(Arg->getSourceRange().getBegin(),
+ Diag(Arg->getLocStart(),
diag::err_template_arg_not_integral_or_enumeral)
<< ArgType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
- } else if (!Arg->isValueDependent() &&
- !Arg->isIntegerConstantExpr(Value, Context, &NonConstantLoc)) {
- Diag(NonConstantLoc, diag::err_template_arg_not_ice)
- << ArgType << Arg->getSourceRange();
- return ExprError();
+ } else if (!Arg->isValueDependent()) {
+ Arg = VerifyIntegerConstantExpression(Arg, &Value,
+ PDiag(diag::err_template_arg_not_ice) << ArgType, false).take();
+ if (!Arg)
+ return ExprError();
}
// From here on out, all we care about are the unqualified forms
@@ -3757,19 +4103,6 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Try to convert the argument to the parameter's type.
if (Context.hasSameType(ParamType, ArgType)) {
// Okay: no conversion necessary
- } else if (CTAK == CTAK_Deduced) {
- // C++ [temp.deduct.type]p17:
- // If, in the declaration of a function template with a non-type
- // template-parameter, the non-type template- parameter is used
- // in an expression in the function parameter-list and, if the
- // corresponding template-argument is deduced, the
- // template-argument type shall match the type of the
- // template-parameter exactly, except that a template-argument
- // deduced from an array bound may be of any integral type.
- Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
- << ArgType << ParamType;
- Diag(Param->getLocation(), diag::note_template_param_here);
- return ExprError();
} else if (ParamType->isBooleanType()) {
// This is an integral-to-boolean conversion.
Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean).take();
@@ -3779,7 +4112,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).take();
} else {
// We can't perform this conversion.
- Diag(Arg->getSourceRange().getBegin(),
+ Diag(Arg->getLocStart(),
diag::err_template_arg_not_convertible)
<< Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
@@ -3820,7 +4153,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Complain if an unsigned parameter received a negative value.
if (IntegerType->isUnsignedIntegerOrEnumerationType()
&& (OldValue.isSigned() && OldValue.isNegative())) {
- Diag(Arg->getSourceRange().getBegin(), diag::warn_template_arg_negative)
+ Diag(Arg->getLocStart(), diag::warn_template_arg_negative)
<< OldValue.toString(10) << Value.toString(10) << Param->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
@@ -3835,7 +4168,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
else
RequiredBits = OldValue.getMinSignedBits();
if (RequiredBits > AllowedBits) {
- Diag(Arg->getSourceRange().getBegin(),
+ Diag(Arg->getLocStart(),
diag::warn_template_arg_too_large)
<< OldValue.toString(10) << Value.toString(10) << Param->getType()
<< Arg->getSourceRange();
@@ -3850,25 +4183,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return Owned(Arg);
}
+ QualType ArgType = Arg->getType();
DeclAccessPair FoundResult; // temporary for ResolveOverloadedFunction
- // C++0x [temp.arg.nontype]p5 bullets 2, 4 and 6 permit conversion
- // from a template argument of type std::nullptr_t to a non-type
- // template parameter of type pointer to object, pointer to
- // function, or pointer-to-member, respectively.
- if (ArgType->isNullPtrType()) {
- if (ParamType->isPointerType() || ParamType->isMemberPointerType()) {
- Converted = TemplateArgument((NamedDecl *)0);
- return Owned(Arg);
- }
-
- if (ParamType->isNullPtrType()) {
- llvm::APSInt Zero(Context.getTypeSize(Context.NullPtrTy), true);
- Converted = TemplateArgument(Zero, Context.NullPtrTy);
- return Owned(Arg);
- }
- }
-
// Handle pointer-to-function, reference-to-function, and
// pointer-to-member-function all in (roughly) the same way.
if (// -- For a non-type template-parameter of type pointer to
@@ -3897,7 +4214,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
true,
FoundResult)) {
- if (DiagnoseUseOfDecl(Fn, Arg->getSourceRange().getBegin()))
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
@@ -3914,22 +4231,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return Owned(Arg);
}
- bool ObjCLifetimeConversion;
- if (IsQualificationConversion(ArgType, ParamType.getNonReferenceType(),
- false, ObjCLifetimeConversion)) {
- Arg = ImpCastExprToType(Arg, ParamType, CK_NoOp,
- Arg->getValueKind()).take();
- } else if (!Context.hasSameUnqualifiedType(ArgType,
- ParamType.getNonReferenceType())) {
- // We can't perform this conversion.
- Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_not_convertible)
- << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
- return ExprError();
- }
-
- if (CheckTemplateArgumentPointerToMember(Arg, Converted))
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
return ExprError();
return Owned(Arg);
}
@@ -3964,7 +4267,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
ParamRefType->getPointeeType(),
true,
FoundResult)) {
- if (DiagnoseUseOfDecl(Fn, Arg->getSourceRange().getBegin()))
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
@@ -3980,27 +4283,35 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return Owned(Arg);
}
+ // Deal with parameters of type std::nullptr_t.
+ if (ParamType->isNullPtrType()) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) {
+ case NPV_NotNullPointer:
+ Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+
+ case NPV_Error:
+ return ExprError();
+
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return Owned(Arg);;
+ }
+ }
+
// -- For a non-type template-parameter of type pointer to data
// member, qualification conversions (4.4) are applied.
assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
- bool ObjCLifetimeConversion;
- if (Context.hasSameUnqualifiedType(ParamType, ArgType)) {
- // Types match exactly: nothing more to do here.
- } else if (IsQualificationConversion(ArgType, ParamType, false,
- ObjCLifetimeConversion)) {
- Arg = ImpCastExprToType(Arg, ParamType, CK_NoOp,
- Arg->getValueKind()).take();
- } else {
- // We can't perform this conversion.
- Diag(Arg->getSourceRange().getBegin(),
- diag::err_template_arg_not_convertible)
- << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
- return ExprError();
- }
-
- if (CheckTemplateArgumentPointerToMember(Arg, Converted))
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
return ExprError();
return Owned(Arg);
}
@@ -4059,6 +4370,18 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc) {
assert(Arg.getKind() == TemplateArgument::Declaration &&
"Only declaration template arguments permitted here");
+
+ // For a NULL non-type template argument, return nullptr casted to the
+ // parameter's type.
+ if (!Arg.getAsDecl()) {
+ return ImpCastExprToType(
+ new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc),
+ ParamType,
+ ParamType->getAs<MemberPointerType>()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ }
+
ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
if (VD->getDeclContext()->isRecord() &&
@@ -4426,6 +4749,9 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
/// false. Otherwise, issues a diagnostic and returns true.
bool
Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
+ if (!S)
+ return false;
+
// Find the nearest enclosing declaration scope.
while ((S->getFlags() & Scope::DeclScope) == 0 ||
(S->getFlags() & Scope::TemplateParamScope) != 0)
@@ -4453,7 +4779,7 @@ Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
/// \brief Determine what kind of template specialization the given declaration
/// is.
-static TemplateSpecializationKind getTemplateSpecializationKind(NamedDecl *D) {
+static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
if (!D)
return TSK_Undeclared;
@@ -4509,8 +4835,11 @@ static bool CheckTemplateSpecializationScope(Sema &S,
EntityKind = 4;
else if (isa<RecordDecl>(Specialized))
EntityKind = 5;
+ else if (isa<EnumDecl>(Specialized) && S.getLangOpts().CPlusPlus0x)
+ EntityKind = 6;
else {
- S.Diag(Loc, diag::err_template_spec_unknown_kind);
+ S.Diag(Loc, diag::err_template_spec_unknown_kind)
+ << S.getLangOpts().CPlusPlus0x;
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
return true;
}
@@ -4535,7 +4864,7 @@ static bool CheckTemplateSpecializationScope(Sema &S,
}
if (S.CurContext->isRecord() && !IsPartialSpecialization) {
- if (S.getLangOptions().MicrosoftExt) {
+ if (S.getLangOpts().MicrosoftExt) {
// Do not warn for class scope explicit specialization during
// instantiation, warning was already emitted during pattern
// semantic analysis.
@@ -4549,12 +4878,21 @@ static bool CheckTemplateSpecializationScope(Sema &S,
}
}
+ if (S.CurContext->isRecord() &&
+ !S.CurContext->Equals(Specialized->getDeclContext())) {
+ // Make sure that we're specializing in the right record context.
+ // Otherwise, things can go horribly wrong.
+ S.Diag(Loc, diag::err_template_spec_decl_class_scope)
+ << Specialized;
+ return true;
+ }
+
// C++ [temp.class.spec]p6:
// A class template partial specialization may be declared or redeclared
// in any namespace scope in which its definition may be defined (14.5.1
// and 14.5.2).
bool ComplainedAboutScope = false;
- DeclContext *SpecializedContext
+ DeclContext *SpecializedContext
= Specialized->getDeclContext()->getEnclosingNamespaceContext();
DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
if ((!PrevDecl ||
@@ -4571,24 +4909,28 @@ static bool CheckTemplateSpecializationScope(Sema &S,
// C++0x [temp.expl.spec]p2:
// An explicit specialization shall be declared in a namespace enclosing
// the specialized template.
- if (!DC->InEnclosingNamespaceSetOf(SpecializedContext) &&
- !(S.getLangOptions().CPlusPlus0x && DC->Encloses(SpecializedContext))) {
- bool IsCPlusPlus0xExtension
- = !S.getLangOptions().CPlusPlus0x && DC->Encloses(SpecializedContext);
- if (isa<TranslationUnitDecl>(SpecializedContext))
- S.Diag(Loc, IsCPlusPlus0xExtension
- ? diag::ext_template_spec_decl_out_of_scope_global
- : diag::err_template_spec_decl_out_of_scope_global)
+ if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
+ bool IsCPlusPlus0xExtension = DC->Encloses(SpecializedContext);
+ if (isa<TranslationUnitDecl>(SpecializedContext)) {
+ assert(!IsCPlusPlus0xExtension &&
+ "DC encloses TU but isn't in enclosing namespace set");
+ S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global)
<< EntityKind << Specialized;
- else if (isa<NamespaceDecl>(SpecializedContext))
- S.Diag(Loc, IsCPlusPlus0xExtension
- ? diag::ext_template_spec_decl_out_of_scope
- : diag::err_template_spec_decl_out_of_scope)
- << EntityKind << Specialized
- << cast<NamedDecl>(SpecializedContext);
+ } else if (isa<NamespaceDecl>(SpecializedContext)) {
+ int Diag;
+ if (!IsCPlusPlus0xExtension)
+ Diag = diag::err_template_spec_decl_out_of_scope;
+ else if (!S.getLangOpts().CPlusPlus0x)
+ Diag = diag::ext_template_spec_decl_out_of_scope;
+ else
+ Diag = diag::warn_cxx98_compat_template_spec_decl_out_of_scope;
+ S.Diag(Loc, Diag)
+ << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext);
+ }
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
- ComplainedAboutScope = true;
+ ComplainedAboutScope =
+ !(IsCPlusPlus0xExtension && S.getLangOpts().CPlusPlus0x);
}
}
@@ -4718,23 +5060,6 @@ static bool CheckClassTemplatePartialSpecializationArgs(Sema &S,
return false;
}
-/// \brief Retrieve the previous declaration of the given declaration.
-static NamedDecl *getPreviousDecl(NamedDecl *ND) {
- if (VarDecl *VD = dyn_cast<VarDecl>(ND))
- return VD->getPreviousDeclaration();
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
- return FD->getPreviousDeclaration();
- if (TagDecl *TD = dyn_cast<TagDecl>(ND))
- return TD->getPreviousDeclaration();
- if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND))
- return TD->getPreviousDeclaration();
- if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
- return FTD->getPreviousDeclaration();
- if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(ND))
- return CTD->getPreviousDeclaration();
- return 0;
-}
-
DeclResult
Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TagUseKind TUK,
@@ -4875,9 +5200,6 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TemplateArgs, false, Converted))
return true;
- assert((Converted.size() == ClassTemplate->getTemplateParameters()->size()) &&
- "Converted template argument list is too short!");
-
// Find the class template (partial) specialization declaration that
// corresponds to these arguments.
if (isPartialSpecialization) {
@@ -5005,17 +5327,13 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// partial specialization are deducible from the template
// arguments. If not, this class template partial specialization
// will never be used.
- SmallVector<bool, 8> DeducibleParams;
- DeducibleParams.resize(TemplateParams->size());
+ llvm::SmallBitVector DeducibleParams(TemplateParams->size());
MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
TemplateParams->getDepth(),
DeducibleParams);
- unsigned NumNonDeducible = 0;
- for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I)
- if (!DeducibleParams[I])
- ++NumNonDeducible;
- if (NumNonDeducible) {
+ if (!DeducibleParams.all()) {
+ unsigned NumNonDeducible = DeducibleParams.size()-DeducibleParams.count();
Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
<< (NumNonDeducible > 1)
<< SourceRange(TemplateNameLoc, RAngleLoc);
@@ -5065,7 +5383,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// use occurs; no diagnostic is required.
if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
bool Okay = false;
- for (NamedDecl *Prev = PrevDecl; Prev; Prev = getPreviousDecl(Prev)) {
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
Okay = true;
@@ -5174,7 +5492,7 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
Scope *ParentScope = FnBodyScope->getParent();
- D.setFunctionDefinition(true);
+ D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D,
move(TemplateParameterLists));
if (FunctionTemplateDecl *FunctionTemplate
@@ -5189,6 +5507,7 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
+ // FIXME: "make check" is clean if the call to dropAttrs() is commented out.
D->dropAttrs();
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
@@ -5196,6 +5515,23 @@ static void StripImplicitInstantiation(NamedDecl *D) {
}
}
+/// \brief Compute the diagnostic location for an explicit instantiation
+// declaration or definition.
+static SourceLocation DiagLocForExplicitInstantiation(
+ NamedDecl* D, SourceLocation PointOfInstantiation) {
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ SourceLocation PrevDiagLoc = PointOfInstantiation;
+ for (Decl *Prev = D; Prev && !PrevDiagLoc.isValid();
+ Prev = Prev->getPreviousDecl()) {
+ PrevDiagLoc = Prev->getLocation();
+ }
+ assert(PrevDiagLoc.isValid() &&
+ "Explicit instantiation without point of instantiation?");
+ return PrevDiagLoc;
+}
+
/// \brief Diagnose cases where we have an explicit template specialization
/// before/after an explicit template instantiation, producing diagnostics
/// for those cases where they are required and determining whether the
@@ -5262,7 +5598,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// before the first use of that specialization that would cause an
// implicit instantiation to take place, in every translation unit in
// which such a use occurs; no diagnostic is required.
- for (NamedDecl *Prev = PrevDecl; Prev; Prev = getPreviousDecl(Prev)) {
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization)
return false;
@@ -5275,7 +5611,6 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
return true;
}
- break;
case TSK_ExplicitInstantiationDeclaration:
switch (PrevTSK) {
@@ -5306,14 +5641,15 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// translation unit, the definition shall follow the declaration.
Diag(NewLoc,
diag::err_explicit_instantiation_declaration_after_definition);
- Diag(PrevPointOfInstantiation,
+
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PrevPointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
diag::note_explicit_instantiation_definition_here);
- assert(PrevPointOfInstantiation.isValid() &&
- "Explicit instantiation without point of instantiation?");
HasNoEffect = true;
return false;
}
- break;
case TSK_ExplicitInstantiationDefinition:
switch (PrevTSK) {
@@ -5333,18 +5669,32 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// In C++98/03 mode, we only give an extension warning here, because it
// is not harmful to try to explicitly instantiate something that
// has been explicitly specialized.
- if (!getLangOptions().CPlusPlus0x) {
- Diag(NewLoc, diag::ext_explicit_instantiation_after_specialization)
- << PrevDecl;
- Diag(PrevDecl->getLocation(),
- diag::note_previous_template_specialization);
- }
+ Diag(NewLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_explicit_instantiation_after_specialization :
+ diag::ext_explicit_instantiation_after_specialization)
+ << PrevDecl;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_template_specialization);
HasNoEffect = true;
return false;
case TSK_ExplicitInstantiationDeclaration:
// We're explicity instantiating a definition for something for which we
// were previously asked to suppress instantiations. That's fine.
+
+ // C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit instantiation
+ // of a template appears after a declaration of an explicit
+ // specialization for that template, the explicit instantiation has no
+ // effect.
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ HasNoEffect = true;
+ break;
+ }
+ }
+
return false;
case TSK_ExplicitInstantiationDefinition:
@@ -5354,12 +5704,11 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// in a program,
Diag(NewLoc, diag::err_explicit_instantiation_duplicate)
<< PrevDecl;
- Diag(PrevPointOfInstantiation,
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
diag::note_previous_explicit_instantiation);
HasNoEffect = true;
return false;
}
- break;
}
llvm_unreachable("Missing specialization/instantiation case?");
@@ -5486,8 +5835,13 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
// Note: do not overwrite location info if previous template
// specialization kind was explicit.
TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
- if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation)
+ if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation) {
Specialization->setLocation(FD->getLocation());
+ // C++11 [dcl.constexpr]p1: An explicit specialization of a constexpr
+ // function can differ from the template declaration with respect to
+ // the constexpr specifier.
+ Specialization->setConstexpr(FD->isConstexpr());
+ }
// FIXME: Check if the prior specialization has a point of instantiation.
// If so, we have run afoul of .
@@ -5601,6 +5955,14 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
InstantiatedFrom = PrevRecord->getInstantiatedFromMemberClass();
MSInfo = PrevRecord->getMemberSpecializationInfo();
}
+ } else if (isa<EnumDecl>(Member)) {
+ EnumDecl *PrevEnum;
+ if (Previous.isSingleResult() &&
+ (PrevEnum = dyn_cast<EnumDecl>(Previous.getFoundDecl()))) {
+ Instantiation = PrevEnum;
+ InstantiatedFrom = PrevEnum->getInstantiatedFromMemberEnum();
+ MSInfo = PrevEnum->getMemberSpecializationInfo();
+ }
}
if (!Instantiation) {
@@ -5639,7 +6001,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
- // explicitly specialized then that spe- cialization shall be declared
+ // explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
// instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
@@ -5691,8 +6053,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
cast<VarDecl>(InstantiatedFrom),
TSK_ExplicitSpecialization);
MarkUnusedFileScopedDecl(InstantiationVar);
- } else {
- assert(isa<CXXRecordDecl>(Member) && "Only member classes remain");
+ } else if (isa<CXXRecordDecl>(Member)) {
CXXRecordDecl *InstantiationClass = cast<CXXRecordDecl>(Instantiation);
if (InstantiationClass->getTemplateSpecializationKind() ==
TSK_ImplicitInstantiation) {
@@ -5704,6 +6065,18 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
cast<CXXRecordDecl>(InstantiatedFrom),
TSK_ExplicitSpecialization);
+ } else {
+ assert(isa<EnumDecl>(Member) && "Only member enums remain");
+ EnumDecl *InstantiationEnum = cast<EnumDecl>(Instantiation);
+ if (InstantiationEnum->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationEnum->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationEnum->setLocation(Member->getLocation());
+ }
+
+ cast<EnumDecl>(Member)->setInstantiationOfMemberEnum(
+ cast<EnumDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
}
// Save the caller the trouble of having to figure out which declaration
@@ -5728,45 +6101,41 @@ static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
return true;
}
- // C++0x [temp.explicit]p2:
+ // C++11 [temp.explicit]p3:
// An explicit instantiation shall appear in an enclosing namespace of its
- // template.
+ // template. If the name declared in the explicit instantiation is an
+ // unqualified name, the explicit instantiation shall appear in the
+ // namespace where its template is declared or, if that namespace is inline
+ // (7.3.1), any namespace from its enclosing namespace set.
//
// This is DR275, which we do not retroactively apply to C++98/03.
- if (S.getLangOptions().CPlusPlus0x &&
- !CurContext->Encloses(OrigContext)) {
- if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext))
+ if (WasQualifiedName) {
+ if (CurContext->Encloses(OrigContext))
+ return false;
+ } else {
+ if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
+ return false;
+ }
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext)) {
+ if (WasQualifiedName)
S.Diag(InstLoc,
- S.getLangOptions().CPlusPlus0x?
- diag::err_explicit_instantiation_out_of_scope
- : diag::warn_explicit_instantiation_out_of_scope_0x)
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_out_of_scope :
+ diag::warn_explicit_instantiation_out_of_scope_0x)
<< D << NS;
else
S.Diag(InstLoc,
- S.getLangOptions().CPlusPlus0x?
- diag::err_explicit_instantiation_must_be_global
- : diag::warn_explicit_instantiation_out_of_scope_0x)
- << D;
- S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
- return false;
- }
-
- // C++0x [temp.explicit]p2:
- // If the name declared in the explicit instantiation is an unqualified
- // name, the explicit instantiation shall appear in the namespace where
- // its template is declared or, if that namespace is inline (7.3.1), any
- // namespace from its enclosing namespace set.
- if (WasQualifiedName)
- return false;
-
- if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
- return false;
-
- S.Diag(InstLoc,
- S.getLangOptions().CPlusPlus0x?
- diag::err_explicit_instantiation_unqualified_wrong_namespace
- : diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
- << D << OrigContext;
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_unqualified_wrong_namespace :
+ diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
+ << D << NS;
+ } else
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_must_be_global :
+ diag::warn_explicit_instantiation_must_be_global_0x)
+ << D;
S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
return false;
}
@@ -5776,7 +6145,7 @@ static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
if (!SS.isSet())
return false;
- // C++0x [temp.explicit]p2:
+ // C++11 [temp.explicit]p3:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
@@ -5847,9 +6216,6 @@ Sema::ActOnExplicitInstantiation(Scope *S,
TemplateArgs, false, Converted))
return true;
- assert((Converted.size() == ClassTemplate->getTemplateParameters()->size()) &&
- "Converted template argument list is too short!");
-
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = 0;
@@ -5932,6 +6298,9 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Specialization->setExternLoc(ExternLoc);
Specialization->setTemplateKeywordLoc(TemplateLoc);
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
// Add the explicit instantiation into its lexical context. However,
// since explicit instantiations are never found by name lookup, we
// just put it into the declaration context directly.
@@ -6000,7 +6369,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
KWLoc, SS, Name, NameLoc, Attr, AS_none,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(*this, 0, 0),
- Owned, IsDependent, false, false,
+ Owned, IsDependent, SourceLocation(), false,
TypeResult());
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
@@ -6008,11 +6377,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
return true;
TagDecl *Tag = cast<TagDecl>(TagD);
- if (Tag->isEnum()) {
- Diag(TemplateLoc, diag::err_explicit_instantiation_enum)
- << Context.getTypeDeclType(Tag);
- return true;
- }
+ assert(!Tag->isEnum() && "shouldn't see enumerations here");
if (Tag->isInvalidDecl())
return true;
@@ -6053,7 +6418,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
// Verify that it is okay to explicitly instantiate here.
CXXRecordDecl *PrevDecl
- = cast_or_null<CXXRecordDecl>(Record->getPreviousDeclaration());
+ = cast_or_null<CXXRecordDecl>(Record->getPreviousDecl());
if (!PrevDecl && Record->getDefinition())
PrevDecl = Record;
if (PrevDecl) {
@@ -6120,7 +6485,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
DeclarationName Name = NameInfo.getName();
if (!Name) {
if (!D.isInvalidType())
- Diag(D.getDeclSpec().getSourceRange().getBegin(),
+ Diag(D.getDeclSpec().getLocStart(),
diag::err_explicit_instantiation_requires_name)
<< D.getDeclSpec().getSourceRange()
<< D.getSourceRange();
@@ -6161,9 +6526,11 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// inline or constexpr specifiers.
// Presumably, this also applies to member functions of class templates as
// well.
- if (D.getDeclSpec().isInlineSpecified() && getLangOptions().CPlusPlus0x)
+ if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(),
- diag::err_explicit_instantiation_inline)
+ getLangOpts().CPlusPlus0x ?
+ diag::err_explicit_instantiation_inline :
+ diag::warn_explicit_instantiation_inline_0x)
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
if (D.getDeclSpec().isConstexprSpecified())
// FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is
@@ -6326,7 +6693,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return true;
}
- FunctionDecl *PrevDecl = Specialization->getPreviousDeclaration();
+ FunctionDecl *PrevDecl = Specialization->getPreviousDecl();
if (!PrevDecl && Specialization->isThisDeclarationADefinition())
PrevDecl = Specialization;
@@ -6346,6 +6713,9 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization);
@@ -6402,7 +6772,7 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Create type-source location information for this type.
TypeLocBuilder TLB;
DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result);
- TL.setKeywordLoc(TagLoc);
+ TL.setElaboratedKeywordLoc(TagLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TL.setNameLoc(NameLoc);
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
@@ -6415,9 +6785,11 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
if (SS.isInvalid())
return true;
- if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
- !getLangOptions().CPlusPlus0x)
- Diag(TypenameLoc, diag::ext_typename_outside_of_template)
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
<< FixItHint::CreateRemoval(TypenameLoc);
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
@@ -6429,12 +6801,12 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
if (isa<DependentNameType>(T)) {
DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
- TL.setKeywordLoc(TypenameLoc);
+ TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(QualifierLoc);
TL.setNameLoc(IdLoc);
} else {
ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
- TL.setKeywordLoc(TypenameLoc);
+ TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(QualifierLoc);
cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(IdLoc);
}
@@ -6443,18 +6815,21 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
}
TypeResult
-Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS,
- SourceLocation TemplateLoc,
+Sema::ActOnTypenameType(Scope *S,
+ SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
TemplateTy TemplateIn,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
- if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
- !getLangOptions().CPlusPlus0x)
- Diag(TypenameLoc, diag::ext_typename_outside_of_template)
- << FixItHint::CreateRemoval(TypenameLoc);
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
@@ -6475,11 +6850,12 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
TypeLocBuilder Builder;
DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TypenameLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setKeywordLoc(TypenameLoc);
- SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
- SpecTL.setNameLoc(TemplateNameLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
@@ -6489,22 +6865,20 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
if (T.isNull())
return true;
- // Provide source-location information for the template specialization
- // type.
+ // Provide source-location information for the template specialization type.
TypeLocBuilder Builder;
- TemplateSpecializationTypeLoc SpecTL
+ TemplateSpecializationTypeLoc SpecTL
= Builder.push<TemplateSpecializationTypeLoc>(T);
-
- // FIXME: No place to set the location of the 'template' keyword!
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
- SpecTL.setTemplateNameLoc(TemplateNameLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
- TL.setKeywordLoc(TypenameLoc);
+ TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
@@ -6588,10 +6962,6 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
Referenced = Result.getFoundDecl();
break;
-
- llvm_unreachable("unresolved using decl in non-dependent context");
- return QualType();
-
case LookupResult::FoundOverloaded:
DiagID = diag::err_typename_nested_not_type;
Referenced = *Result.begin();
@@ -6650,6 +7020,11 @@ namespace {
this->Loc = Loc;
this->Entity = Entity;
}
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
};
}
@@ -6762,7 +7137,7 @@ std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs) {
- llvm::SmallString<128> Str;
+ SmallString<128> Str;
llvm::raw_svector_ostream Out(Str);
if (!Params || Params->size() == 0 || NumArgs == 0)
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
index 93ea89d..2ea1e6f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "TreeTransform.h"
#include <algorithm>
@@ -112,15 +112,16 @@ struct RefParamPartialOrderingComparison {
static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- QualType Param,
- QualType Arg,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF,
- bool PartialOrdering = false,
- SmallVectorImpl<RefParamPartialOrderingComparison> *
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &
+ Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *
RefParamComparisons = 0);
static Sema::TemplateDeductionResult
@@ -277,7 +278,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
return X;
}
- return DeducedTemplateArgument();
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
/// \brief Deduce the value of the given non-type template parameter
@@ -547,7 +548,7 @@ static TemplateParameter makeTemplateParameter(Decl *D) {
/// arguments in a set of argument packs.
static void PrepareArgumentPackDeduction(Sema &S,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- const SmallVectorImpl<unsigned> &PackIndices,
+ ArrayRef<unsigned> PackIndices,
SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
SmallVectorImpl<
SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks) {
@@ -582,7 +583,7 @@ FinishArgumentPackDeduction(Sema &S,
TemplateParameterList *TemplateParams,
bool HasAnyArguments,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- const SmallVectorImpl<unsigned> &PackIndices,
+ ArrayRef<unsigned> PackIndices,
SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
SmallVectorImpl<
SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks,
@@ -705,12 +706,11 @@ DeduceTemplateArguments(Sema &S,
}
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- Params[ParamIdx],
- Args[ArgIdx],
- Info, Deduced, TDF,
- PartialOrdering,
- RefParamComparisons))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced, TDF,
+ PartialOrdering,
+ RefParamComparisons))
return Result;
++ArgIdx;
@@ -736,7 +736,7 @@ DeduceTemplateArguments(Sema &S,
SmallVector<unsigned, 2> PackIndices;
QualType Pattern = Expansion->getPattern();
{
- llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
@@ -766,9 +766,10 @@ DeduceTemplateArguments(Sema &S,
// Deduce template arguments from the pattern.
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
- Info, Deduced, TDF, PartialOrdering,
- RefParamComparisons))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
+ Args[ArgIdx], Info, Deduced,
+ TDF, PartialOrdering,
+ RefParamComparisons))
return Result;
// Capture the deduced template arguments for each parameter pack expanded
@@ -858,14 +859,15 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- QualType ParamIn, QualType ArgIn,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF,
- bool PartialOrdering,
- SmallVectorImpl<RefParamPartialOrderingComparison> *RefParamComparisons) {
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamIn, QualType ArgIn,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons) {
// We only want to look at the canonical types, since typedefs and
// sugar are not part of template argument deduction.
QualType Param = S.Context.getCanonicalType(ParamIn);
@@ -960,14 +962,6 @@ DeduceTemplateArguments(Sema &S,
}
}
- // If the parameter type is not dependent, there is nothing to deduce.
- if (!Param->isDependentType()) {
- if (!(TDF & TDF_SkipNonDependent) && Param != Arg)
- return Sema::TDK_NonDeducedMismatch;
-
- return Sema::TDK_Success;
- }
-
// C++ [temp.deduct.type]p9:
// A template type argument T, a template template argument TT or a
// template non-type argument i can be deduced if P and A have one of
@@ -1035,7 +1029,7 @@ DeduceTemplateArguments(Sema &S,
// Objective-C ARC:
// If template deduction would produce an argument type with lifetime type
// but no lifetime qualifier, the __strong lifetime qualifier is inferred.
- if (S.getLangOptions().ObjCAutoRefCount &&
+ if (S.getLangOpts().ObjCAutoRefCount &&
DeducedType->isObjCLifetimeType() &&
!DeducedQs.hasObjCLifetime())
DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
@@ -1081,6 +1075,17 @@ DeduceTemplateArguments(Sema &S,
if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
return Sema::TDK_NonDeducedMismatch;
}
+
+ // If the parameter type is not dependent, there is nothing to deduce.
+ if (!Param->isDependentType()) {
+ if (!(TDF & TDF_SkipNonDependent) && Param != Arg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return Sema::TDK_Success;
+ }
+ } else if (!Param->isDependentType() &&
+ Param.getUnqualifiedType() == Arg.getUnqualifiedType()) {
+ return Sema::TDK_Success;
}
switch (Param->getTypeClass()) {
@@ -1093,9 +1098,9 @@ DeduceTemplateArguments(Sema &S,
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
llvm_unreachable("Type nodes handled above");
-
- // These types cannot be used in templates or cannot be dependent, so
- // deduction always fails.
+
+ // These types cannot be dependent, so simply check whether the types are
+ // the same.
case Type::Builtin:
case Type::VariableArray:
case Type::Vector:
@@ -1104,23 +1109,32 @@ DeduceTemplateArguments(Sema &S,
case Type::Enum:
case Type::ObjCObject:
case Type::ObjCInterface:
- case Type::ObjCObjectPointer:
- return Sema::TDK_NonDeducedMismatch;
-
+ case Type::ObjCObjectPointer: {
+ if (TDF & TDF_SkipNonDependent)
+ return Sema::TDK_Success;
+
+ if (TDF & TDF_IgnoreQualifiers) {
+ Param = Param.getUnqualifiedType();
+ Arg = Arg.getUnqualifiedType();
+ }
+
+ return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
+ }
+
// _Complex T [placeholder extension]
case Type::Complex:
if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
- return DeduceTemplateArguments(S, TemplateParams,
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
cast<ComplexType>(Param)->getElementType(),
- ComplexArg->getElementType(),
- Info, Deduced, TDF);
+ ComplexArg->getElementType(),
+ Info, Deduced, TDF);
return Sema::TDK_NonDeducedMismatch;
// _Atomic T [extension]
case Type::Atomic:
if (const AtomicType *AtomicArg = Arg->getAs<AtomicType>())
- return DeduceTemplateArguments(S, TemplateParams,
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
cast<AtomicType>(Param)->getValueType(),
AtomicArg->getValueType(),
Info, Deduced, TDF);
@@ -1140,8 +1154,8 @@ DeduceTemplateArguments(Sema &S,
}
unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass);
- return DeduceTemplateArguments(S, TemplateParams,
- cast<PointerType>(Param)->getPointeeType(),
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<PointerType>(Param)->getPointeeType(),
PointeeType,
Info, Deduced, SubTDF);
}
@@ -1152,10 +1166,9 @@ DeduceTemplateArguments(Sema &S,
if (!ReferenceArg)
return Sema::TDK_NonDeducedMismatch;
- return DeduceTemplateArguments(S, TemplateParams,
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
cast<LValueReferenceType>(Param)->getPointeeType(),
- ReferenceArg->getPointeeType(),
- Info, Deduced, 0);
+ ReferenceArg->getPointeeType(), Info, Deduced, 0);
}
// T && [C++0x]
@@ -1164,10 +1177,10 @@ DeduceTemplateArguments(Sema &S,
if (!ReferenceArg)
return Sema::TDK_NonDeducedMismatch;
- return DeduceTemplateArguments(S, TemplateParams,
- cast<RValueReferenceType>(Param)->getPointeeType(),
- ReferenceArg->getPointeeType(),
- Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<RValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(),
+ Info, Deduced, 0);
}
// T [] (implied, but not stated explicitly)
@@ -1178,10 +1191,10 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
- return DeduceTemplateArguments(S, TemplateParams,
- S.Context.getAsIncompleteArrayType(Param)->getElementType(),
- IncompleteArrayArg->getElementType(),
- Info, Deduced, SubTDF);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ S.Context.getAsIncompleteArrayType(Param)->getElementType(),
+ IncompleteArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
}
// T [integer-constant]
@@ -1197,10 +1210,10 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
- return DeduceTemplateArguments(S, TemplateParams,
- ConstantArrayParm->getElementType(),
- ConstantArrayArg->getElementType(),
- Info, Deduced, SubTDF);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ ConstantArrayParm->getElementType(),
+ ConstantArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
}
// type [i]
@@ -1215,10 +1228,10 @@ DeduceTemplateArguments(Sema &S,
const DependentSizedArrayType *DependentArrayParm
= S.Context.getAsDependentSizedArrayType(Param);
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- DependentArrayParm->getElementType(),
- ArrayArg->getElementType(),
- Info, Deduced, SubTDF))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ DependentArrayParm->getElementType(),
+ ArrayArg->getElementType(),
+ Info, Deduced, SubTDF))
return Result;
// Determine the array bound is something we can deduce.
@@ -1272,10 +1285,10 @@ DeduceTemplateArguments(Sema &S,
// Check return types.
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- FunctionProtoParam->getResultType(),
- FunctionProtoArg->getResultType(),
- Info, Deduced, 0))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ FunctionProtoParam->getResultType(),
+ FunctionProtoArg->getResultType(),
+ Info, Deduced, 0))
return Result;
return DeduceTemplateArguments(S, TemplateParams,
@@ -1334,8 +1347,8 @@ DeduceTemplateArguments(Sema &S,
SmallVector<const RecordType *, 8> ToVisit;
ToVisit.push_back(RecordT);
bool Successful = false;
- SmallVectorImpl<DeducedTemplateArgument> DeducedOrig(0);
- DeducedOrig = Deduced;
+ SmallVector<DeducedTemplateArgument, 8> DeducedOrig(Deduced.begin(),
+ Deduced.end());
while (!ToVisit.empty()) {
// Retrieve the next class in the inheritance hierarchy.
const RecordType *NextT = ToVisit.back();
@@ -1357,7 +1370,8 @@ DeduceTemplateArguments(Sema &S,
// from this base class.
if (BaseResult == Sema::TDK_Success) {
Successful = true;
- DeducedOrig = Deduced;
+ DeducedOrig.clear();
+ DeducedOrig.append(Deduced.begin(), Deduced.end());
}
else
Deduced = DeducedOrig;
@@ -1399,17 +1413,18 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- MemPtrParam->getPointeeType(),
- MemPtrArg->getPointeeType(),
- Info, Deduced,
- TDF & TDF_IgnoreQualifiers))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ MemPtrParam->getPointeeType(),
+ MemPtrArg->getPointeeType(),
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers))
return Result;
- return DeduceTemplateArguments(S, TemplateParams,
- QualType(MemPtrParam->getClass(), 0),
- QualType(MemPtrArg->getClass(), 0),
- Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ QualType(MemPtrParam->getClass(), 0),
+ QualType(MemPtrArg->getClass(), 0),
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers);
}
// (clang extension)
@@ -1424,10 +1439,10 @@ DeduceTemplateArguments(Sema &S,
if (!BlockPtrArg)
return Sema::TDK_NonDeducedMismatch;
- return DeduceTemplateArguments(S, TemplateParams,
- BlockPtrParam->getPointeeType(),
- BlockPtrArg->getPointeeType(), Info,
- Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ BlockPtrParam->getPointeeType(),
+ BlockPtrArg->getPointeeType(),
+ Info, Deduced, 0);
}
// (clang extension)
@@ -1441,11 +1456,10 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_NonDeducedMismatch;
// Perform deduction on the element types.
- return DeduceTemplateArguments(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced,
- TDF);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
}
if (const DependentSizedExtVectorType *VectorArg
@@ -1455,11 +1469,10 @@ DeduceTemplateArguments(Sema &S,
// ordering.
// Perform deduction on the element types.
- return DeduceTemplateArguments(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced,
- TDF);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
}
return Sema::TDK_NonDeducedMismatch;
@@ -1475,11 +1488,10 @@ DeduceTemplateArguments(Sema &S,
if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
// Perform deduction on the element types.
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced,
- TDF))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
@@ -1498,11 +1510,10 @@ DeduceTemplateArguments(Sema &S,
= dyn_cast<DependentSizedExtVectorType>(Arg)) {
// Perform deduction on the element types.
if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced,
- TDF))
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
@@ -1531,7 +1542,7 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_Success;
}
- return Sema::TDK_Success;
+ llvm_unreachable("Invalid Type Class!");
}
static Sema::TemplateDeductionResult
@@ -1553,8 +1564,10 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::Type:
if (Arg.getKind() == TemplateArgument::Type)
- return DeduceTemplateArguments(S, TemplateParams, Param.getAsType(),
- Arg.getAsType(), Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Param.getAsType(),
+ Arg.getAsType(),
+ Info, Deduced, 0);
Info.FirstArg = Param;
Info.SecondArg = Arg;
return Sema::TDK_NonDeducedMismatch;
@@ -1570,12 +1583,10 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::TemplateExpansion:
llvm_unreachable("caller should handle pack expansions");
- break;
case TemplateArgument::Declaration:
if (Arg.getKind() == TemplateArgument::Declaration &&
- Param.getAsDecl()->getCanonicalDecl() ==
- Arg.getAsDecl()->getCanonicalDecl())
+ isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
return Sema::TDK_Success;
Info.FirstArg = Param;
@@ -1630,7 +1641,7 @@ DeduceTemplateArguments(Sema &S,
llvm_unreachable("Argument packs should be expanded by the caller!");
}
- return Sema::TDK_Success;
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
/// \brief Determine whether there is a template argument to be used for
@@ -1746,7 +1757,7 @@ DeduceTemplateArguments(Sema &S,
// parameter packs expanded by the pack expansion.
SmallVector<unsigned, 2> PackIndices;
{
- llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
@@ -1846,8 +1857,7 @@ static bool isSameTemplateArg(ASTContext &Context,
Context.getCanonicalType(Y.getAsType());
case TemplateArgument::Declaration:
- return X.getAsDecl()->getCanonicalDecl() ==
- Y.getAsDecl()->getCanonicalDecl();
+ return isSameDeclaration(X.getAsDecl(), Y.getAsDecl());
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
@@ -1880,7 +1890,7 @@ static bool isSameTemplateArg(ASTContext &Context,
return true;
}
- return false;
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
/// \brief Allocate a TemplateArgumentLoc where all locations have
@@ -1905,7 +1915,6 @@ getTrivialTemplateArgumentLoc(Sema &S,
switch (Arg.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Can't get a NULL template argument here");
- break;
case TemplateArgument::Type:
return TemplateArgumentLoc(Arg,
@@ -1914,7 +1923,7 @@ getTrivialTemplateArgumentLoc(Sema &S,
case TemplateArgument::Declaration: {
Expr *E
= S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
- .takeAs<Expr>();
+ .takeAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(E), E);
}
@@ -1950,7 +1959,7 @@ getTrivialTemplateArgumentLoc(Sema &S,
return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
}
- return TemplateArgumentLoc();
+ llvm_unreachable("Invalid TemplateArgument Kind!");
}
@@ -2021,7 +2030,8 @@ FinishTemplateArgumentDeduction(Sema &S,
const TemplateArgumentList &TemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
TemplateDeductionInfo &Info) {
- // Trap errors.
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
Sema::SFINAETrap Trap(S);
Sema::ContextRAII SavedContext(S, Partial);
@@ -2147,7 +2157,11 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
// argument list if the template arguments of the partial
// specialization can be deduced from the actual template argument
// list (14.8.2).
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
+
SmallVector<DeducedTemplateArgument, 4> Deduced;
Deduced.resize(Partial->getTemplateParameters()->size());
if (TemplateDeductionResult Result
@@ -2228,8 +2242,8 @@ Sema::SubstituteExplicitTemplateArguments(
return TDK_Success;
}
- // Substitution of the explicit template arguments into a function template
- /// is a SFINAE context. Trap any errors that might occur.
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
// C++ [temp.arg.explicit]p3:
@@ -2288,33 +2302,45 @@ Sema::SubstituteExplicitTemplateArguments(
}
}
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Function template does not have a prototype?");
+
// Instantiate the types of each of the function parameters given the
- // explicitly-specified template arguments.
- if (SubstParmTypes(Function->getLocation(),
+ // explicitly-specified template arguments. If the function has a trailing
+ // return type, substitute it after the arguments to ensure we substitute
+ // in lexical order.
+ if (Proto->hasTrailingReturn() &&
+ SubstParmTypes(Function->getLocation(),
Function->param_begin(), Function->getNumParams(),
MultiLevelTemplateArgumentList(*ExplicitArgumentList),
ParamTypes))
return TDK_SubstitutionFailure;
- // If the caller wants a full function type back, instantiate the return
- // type and form that function type.
- if (FunctionType) {
- // FIXME: exception-specifications?
- const FunctionProtoType *Proto
- = Function->getType()->getAs<FunctionProtoType>();
- assert(Proto && "Function template does not have a prototype?");
-
- QualType ResultType
- = SubstType(Proto->getResultType(),
- MultiLevelTemplateArgumentList(*ExplicitArgumentList),
- Function->getTypeSpecStartLoc(),
- Function->getDeclName());
- if (ResultType.isNull() || Trap.hasErrorOccurred())
- return TDK_SubstitutionFailure;
+ // Instantiate the return type.
+ // FIXME: exception-specifications?
+ QualType ResultType
+ = SubstType(Proto->getResultType(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ Function->getTypeSpecStartLoc(),
+ Function->getDeclName());
+ if (ResultType.isNull() || Trap.hasErrorOccurred())
+ return TDK_SubstitutionFailure;
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments if we didn't do so earlier.
+ if (!Proto->hasTrailingReturn() &&
+ SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+
+ if (FunctionType) {
*FunctionType = BuildFunctionType(ResultType,
ParamTypes.data(), ParamTypes.size(),
Proto->isVariadic(),
+ Proto->hasTrailingReturn(),
Proto->getTypeQuals(),
Proto->getRefQualifier(),
Function->getLocation(),
@@ -2451,8 +2477,8 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
- // Template argument deduction for function templates in a SFINAE context.
- // Trap any errors that might occur.
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
// Enter a new template instantiation context while we instantiate the
@@ -2681,36 +2707,48 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
if (R.IsAddressOfOperand)
TDF |= TDF_IgnoreQualifiers;
- // If there were explicit template arguments, we can only find
- // something via C++ [temp.arg.explicit]p3, i.e. if the arguments
- // unambiguously name a full specialization.
- if (Ovl->hasExplicitTemplateArgs()) {
- // But we can still look for an explicit specialization.
- if (FunctionDecl *ExplicitSpec
- = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
- return GetTypeOfFunction(S.Context, R, ExplicitSpec);
- return QualType();
- }
-
// C++0x [temp.deduct.call]p6:
// When P is a function type, pointer to function type, or pointer
// to member function type:
if (!ParamType->isFunctionType() &&
!ParamType->isFunctionPointerType() &&
- !ParamType->isMemberFunctionPointerType())
- return QualType();
+ !ParamType->isMemberFunctionPointerType()) {
+ if (Ovl->hasExplicitTemplateArgs()) {
+ // But we can still look for an explicit specialization.
+ if (FunctionDecl *ExplicitSpec
+ = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
+ return GetTypeOfFunction(S.Context, R, ExplicitSpec);
+ }
+ return QualType();
+ }
+
+ // Gather the explicit template arguments, if any.
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ if (Ovl->hasExplicitTemplateArgs())
+ Ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
QualType Match;
for (UnresolvedSetIterator I = Ovl->decls_begin(),
E = Ovl->decls_end(); I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
- // - If the argument is an overload set containing one or more
- // function templates, the parameter is treated as a
- // non-deduced context.
- if (isa<FunctionTemplateDecl>(D))
- return QualType();
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D)) {
+ // - If the argument is an overload set containing one or more
+ // function templates, the parameter is treated as a
+ // non-deduced context.
+ if (!Ovl->hasExplicitTemplateArgs())
+ return QualType();
+
+ // Otherwise, see if we can resolve a function type
+ FunctionDecl *Specialization = 0;
+ TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
+ if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
+ Specialization, Info))
+ continue;
+
+ D = Specialization;
+ }
FunctionDecl *Fn = cast<FunctionDecl>(D);
QualType ArgType = GetTypeOfFunction(S.Context, R, Fn);
@@ -2737,9 +2775,8 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
Deduced(TemplateParams->size());
TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- ParamType, ArgType,
- Info, Deduced, TDF);
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
if (Result) continue;
if (!Match.isNull()) return QualType();
Match = ArgType;
@@ -2866,6 +2903,44 @@ static bool hasDeducibleTemplateParameters(Sema &S,
FunctionTemplateDecl *FunctionTemplate,
QualType T);
+/// \brief Perform template argument deduction by matching a parameter type
+/// against a single expression, where the expression is an element of
+/// an initializer list that was originally matched against the argument
+/// type.
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentByListElement(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamType, Expr *Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF) {
+ // Handle the case where an init list contains another init list as the
+ // element.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ QualType X;
+ if (!S.isStdInitializerList(ParamType.getNonReferenceType(), &X))
+ return Sema::TDK_Success; // Just ignore this expression.
+
+ // Recurse down into the init list.
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentByListElement(S, TemplateParams, X,
+ ILE->getInit(i),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ return Sema::TDK_Success;
+ }
+
+ // For all other cases, just match by type.
+ QualType ArgType = Arg->getType();
+ if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType,
+ ArgType, Arg, TDF))
+ return Sema::TDK_FailedOverloadResolution;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+}
+
/// \brief Perform template argument deduction from a function call
/// (C++ [temp.deduct.call]).
///
@@ -2895,7 +2970,7 @@ static bool hasDeducibleTemplateParameters(Sema &S,
Sema::TemplateDeductionResult
Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- Expr **Args, unsigned NumArgs,
+ llvm::ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
TemplateDeductionInfo &Info) {
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
@@ -2904,10 +2979,10 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
// Template argument deduction is done by comparing each function template
// parameter type (call it P) with the type of the corresponding argument
// of the call (call it A) as described below.
- unsigned CheckArgs = NumArgs;
- if (NumArgs < Function->getMinRequiredArguments())
+ unsigned CheckArgs = Args.size();
+ if (Args.size() < Function->getMinRequiredArguments())
return TDK_TooFewArguments;
- else if (NumArgs > Function->getNumParams()) {
+ else if (Args.size() > Function->getNumParams()) {
const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>();
if (Proto->isTemplateVariadic())
@@ -2973,15 +3048,37 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
continue;
+ // If the argument is an initializer list ...
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ // ... then the parameter is an undeduced context, unless the parameter
+ // type is (reference to cv) std::initializer_list<P'>, in which case
+ // deduction is done for each element of the initializer list, and the
+ // result is the deduced type if it's the same for all elements.
+ QualType X;
+ // Removing references was already done.
+ if (!isStdInitializerList(ParamType, &X))
+ continue;
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArgumentByListElement(*this, TemplateParams, X,
+ ILE->getInit(i),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ // Don't track the argument type, since an initializer list has none.
+ continue;
+ }
+
// Keep track of the argument type and corresponding parameter index,
// so we can check for compatibility between the deduced A and A.
OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
ArgType));
if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this, TemplateParams,
- ParamType, ArgType, Info, Deduced,
- TDF))
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType,
+ Info, Deduced, TDF))
return Result;
continue;
@@ -3002,7 +3099,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ParamPattern = ParamExpansion->getPattern();
SmallVector<unsigned, 2> PackIndices;
{
- llvm::BitVector SawIndices(TemplateParams->size());
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
collectUnexpandedParameterPacks(ParamPattern, Unexpanded);
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
@@ -3026,7 +3123,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
PrepareArgumentPackDeduction(*this, Deduced, PackIndices, SavedPacks,
NewlyDeducedPacks);
bool HasAnyArguments = false;
- for (; ArgIdx < NumArgs; ++ArgIdx) {
+ for (; ArgIdx < Args.size(); ++ArgIdx) {
HasAnyArguments = true;
QualType OrigParamType = ParamPattern;
@@ -3044,17 +3141,35 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
break;
}
- // Keep track of the argument type and corresponding argument index,
- // so we can check for compatibility between the deduced A and A.
- if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
- OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
- ArgType));
+ // As above, initializer lists need special handling.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ QualType X;
+ if (!isStdInitializerList(ParamType, &X)) {
+ ++ArgIdx;
+ break;
+ }
- if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this, TemplateParams,
- ParamType, ArgType, Info, Deduced,
- TDF))
- return Result;
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, X,
+ ILE->getInit(i)->getType(),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ } else {
+
+ // Keep track of the argument type and corresponding argument index,
+ // so we can check for compatibility between the deduced A and A.
+ if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
+ ArgType));
+
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType, Info,
+ Deduced, TDF))
+ return Result;
+ }
// Capture the deduced template arguments for each parameter pack expanded
// by this pack expansion, add them to the list of arguments we've deduced
@@ -3135,8 +3250,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
NumExplicitlySpecified = Deduced.size();
}
- // Template argument deduction for function templates in a SFINAE context.
- // Trap any errors that might occur.
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
Deduced.resize(TemplateParams->size());
@@ -3144,7 +3259,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
if (!ArgFunctionType.isNull()) {
// Deduce template arguments from the function type.
if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this, TemplateParams,
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
FunctionType, ArgFunctionType, Info,
Deduced, TDF_TopLevelParameterTypeList))
return Result;
@@ -3220,8 +3335,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
A = A.getUnqualifiedType();
}
- // Template argument deduction for function templates in a SFINAE context.
- // Trap any errors that might occur.
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
// C++ [temp.deduct.conv]p1:
@@ -3255,8 +3370,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
(P->isMemberPointerType() && A->isMemberPointerType()))
TDF |= TDF_IgnoreQualifiers;
if (TemplateDeductionResult Result
- = ::DeduceTemplateArguments(*this, TemplateParams,
- P, A, Info, Deduced, TDF))
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ P, A, Info, Deduced, TDF))
return Result;
// Finish template argument deduction.
@@ -3325,6 +3440,11 @@ namespace {
return Result;
}
}
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
};
}
@@ -3339,14 +3459,18 @@ namespace {
/// dependent. This will be set to null if deduction succeeded, but auto
/// substitution failed; the appropriate diagnostic will already have been
/// produced in that case.
-///
-/// \returns true if deduction succeeded, false if it failed.
-bool
-Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *Init,
+Sema::DeduceAutoResult
+Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
TypeSourceInfo *&Result) {
+ if (Init->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(Init);
+ if (result.isInvalid()) return DAR_FailedAlreadyDiagnosed;
+ Init = result.take();
+ }
+
if (Init->isTypeDependent()) {
Result = Type;
- return true;
+ return DAR_Succeeded;
}
SourceLocation Loc = Init->getExprLoc();
@@ -3372,41 +3496,70 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *Init,
Deduced.resize(1);
QualType InitType = Init->getType();
unsigned TDF = 0;
- if (AdjustFunctionParmAndArgTypesForDeduction(*this, &TemplateParams,
- FuncParam, InitType, Init,
- TDF))
- return false;
TemplateDeductionInfo Info(Context, Loc);
- if (::DeduceTemplateArguments(*this, &TemplateParams,
- FuncParam, InitType, Info, Deduced,
- TDF))
- return false;
+
+ InitListExpr * InitList = dyn_cast<InitListExpr>(Init);
+ if (InitList) {
+ for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
+ if (DeduceTemplateArgumentByListElement(*this, &TemplateParams,
+ TemplArg,
+ InitList->getInit(i),
+ Info, Deduced, TDF))
+ return DAR_Failed;
+ }
+ } else {
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, &TemplateParams,
+ FuncParam, InitType, Init,
+ TDF))
+ return DAR_Failed;
+
+ if (DeduceTemplateArgumentsByTypeMatch(*this, &TemplateParams, FuncParam,
+ InitType, Info, Deduced, TDF))
+ return DAR_Failed;
+ }
QualType DeducedType = Deduced[0].getAsType();
if (DeducedType.isNull())
- return false;
-
+ return DAR_Failed;
+
+ if (InitList) {
+ DeducedType = BuildStdInitializerList(DeducedType, Loc);
+ if (DeducedType.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
Result = SubstituteAutoTransform(*this, DeducedType).TransformType(Type);
-
+
// Check that the deduced argument type is compatible with the original
// argument type per C++ [temp.deduct.call]p4.
- if (Result &&
+ if (!InitList && Result &&
CheckOriginalCallArgDeduction(*this,
Sema::OriginalCallArg(FuncParam,0,InitType),
Result->getType())) {
Result = 0;
- return false;
+ return DAR_Failed;
}
- return true;
+ return DAR_Succeeded;
+}
+
+void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
+ if (isa<InitListExpr>(Init))
+ Diag(VDecl->getLocation(),
+ diag::err_auto_var_deduction_failure_from_init_list)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getSourceRange();
+ else
+ Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getType()
+ << Init->getSourceRange();
}
static void
-MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
bool OnlyDeduced,
unsigned Level,
- SmallVectorImpl<bool> &Deduced);
+ llvm::SmallBitVector &Deduced);
/// \brief If this is a non-static member function,
static void MaybeAddImplicitObjectParameterType(ASTContext &Context,
@@ -3481,17 +3634,17 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
// first argument of the free function or static member, which
// seems to match existing practice.
SmallVector<QualType, 4> Args1;
- unsigned Skip1 = !S.getLangOptions().CPlusPlus0x &&
+ unsigned Skip1 = !S.getLangOpts().CPlusPlus0x &&
IsNonStatic2 && !IsNonStatic1;
- if (S.getLangOptions().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2)
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2)
MaybeAddImplicitObjectParameterType(S.Context, Method1, Args1);
Args1.insert(Args1.end(),
Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end());
SmallVector<QualType, 4> Args2;
- Skip2 = !S.getLangOptions().CPlusPlus0x &&
+ Skip2 = !S.getLangOpts().CPlusPlus0x &&
IsNonStatic1 && !IsNonStatic2;
- if (S.getLangOptions().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
MaybeAddImplicitObjectParameterType(S.Context, Method2, Args2);
Args2.insert(Args2.end(),
Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end());
@@ -3515,19 +3668,23 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
case TPOC_Conversion:
// - In the context of a call to a conversion operator, the return types
// of the conversion function templates are used.
- if (DeduceTemplateArguments(S, TemplateParams, Proto2->getResultType(),
- Proto1->getResultType(), Info, Deduced,
- TDF_None, /*PartialOrdering=*/true,
- RefParamComparisons))
+ if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Proto2->getResultType(),
+ Proto1->getResultType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ RefParamComparisons))
return false;
break;
case TPOC_Other:
// - In other contexts (14.6.6.2) the function template's function type
// is used.
- if (DeduceTemplateArguments(S, TemplateParams, FD2->getType(),
- FD1->getType(), Info, Deduced, TDF_None,
- /*PartialOrdering=*/true, RefParamComparisons))
+ if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ FD2->getType(), FD1->getType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ RefParamComparisons))
return false;
break;
}
@@ -3550,31 +3707,31 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
}
// Figure out which template parameters were used.
- SmallVector<bool, 4> UsedParameters;
- UsedParameters.resize(TemplateParams->size());
+ llvm::SmallBitVector UsedParameters(TemplateParams->size());
switch (TPOC) {
case TPOC_Call: {
unsigned NumParams = std::min(NumCallArguments,
std::min(Proto1->getNumArgs(),
Proto2->getNumArgs()));
- if (S.getLangOptions().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
- ::MarkUsedTemplateParameters(S, Method2->getThisType(S.Context), false,
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ ::MarkUsedTemplateParameters(S.Context, Method2->getThisType(S.Context),
+ false,
TemplateParams->getDepth(), UsedParameters);
for (unsigned I = Skip2; I < NumParams; ++I)
- ::MarkUsedTemplateParameters(S, Proto2->getArgType(I), false,
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getArgType(I), false,
TemplateParams->getDepth(),
UsedParameters);
break;
}
case TPOC_Conversion:
- ::MarkUsedTemplateParameters(S, Proto2->getResultType(), false,
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getResultType(), false,
TemplateParams->getDepth(),
UsedParameters);
break;
case TPOC_Other:
- ::MarkUsedTemplateParameters(S, FD2->getType(), false,
+ ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false,
TemplateParams->getDepth(),
UsedParameters);
break;
@@ -3777,7 +3934,8 @@ Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
- bool Complain) {
+ bool Complain,
+ QualType TargetType) {
if (SpecBegin == SpecEnd) {
if (Complain)
Diag(Loc, NoneDiag);
@@ -3831,11 +3989,16 @@ Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
if (Complain)
// FIXME: Can we order the candidates in some sane way?
- for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I)
- Diag((*I)->getLocation(), CandidateDiag)
- << getTemplateArgumentBindingsText(
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
+ PartialDiagnostic PD = CandidateDiag;
+ PD << getTemplateArgumentBindingsText(
cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
*cast<FunctionDecl>(*I)->getTemplateSpecializationArgs());
+ if (!TargetType.isNull())
+ HandleFunctionTypeMismatch(PD, cast<FunctionDecl>(*I)->getType(),
+ TargetType);
+ Diag((*I)->getLocation(), PD);
+ }
return SpecEnd;
}
@@ -3887,7 +4050,8 @@ Sema::getMoreSpecializedPartialSpecialization(
// Determine whether PS1 is at least as specialized as PS2
Deduced.resize(PS2->getTemplateParameters()->size());
- bool Better1 = !::DeduceTemplateArguments(*this, PS2->getTemplateParameters(),
+ bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS2->getTemplateParameters(),
PT2, PT1, Info, Deduced, TDF_None,
/*PartialOrdering=*/true,
/*RefParamComparisons=*/0);
@@ -3902,7 +4066,8 @@ Sema::getMoreSpecializedPartialSpecialization(
// Determine whether PS2 is at least as specialized as PS1
Deduced.clear();
Deduced.resize(PS1->getTemplateParameters()->size());
- bool Better2 = !::DeduceTemplateArguments(*this, PS1->getTemplateParameters(),
+ bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS1->getTemplateParameters(),
PT1, PT2, Info, Deduced, TDF_None,
/*PartialOrdering=*/true,
/*RefParamComparisons=*/0);
@@ -3921,20 +4086,20 @@ Sema::getMoreSpecializedPartialSpecialization(
}
static void
-MarkUsedTemplateParameters(Sema &SemaRef,
+MarkUsedTemplateParameters(ASTContext &Ctx,
const TemplateArgument &TemplateArg,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used);
+ llvm::SmallBitVector &Used);
/// \brief Mark the template parameters that are used by the given
/// expression.
static void
-MarkUsedTemplateParameters(Sema &SemaRef,
+MarkUsedTemplateParameters(ASTContext &Ctx,
const Expr *E,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
// We can deduce from a pack expansion.
if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
E = Expansion->getPattern();
@@ -3961,28 +4126,28 @@ MarkUsedTemplateParameters(Sema &SemaRef,
/// \brief Mark the template parameters that are used by the given
/// nested name specifier.
static void
-MarkUsedTemplateParameters(Sema &SemaRef,
+MarkUsedTemplateParameters(ASTContext &Ctx,
NestedNameSpecifier *NNS,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
if (!NNS)
return;
- MarkUsedTemplateParameters(SemaRef, NNS->getPrefix(), OnlyDeduced, Depth,
+ MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth,
Used);
- MarkUsedTemplateParameters(SemaRef, QualType(NNS->getAsType(), 0),
+ MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0),
OnlyDeduced, Depth, Used);
}
/// \brief Mark the template parameters that are used by the given
/// template name.
static void
-MarkUsedTemplateParameters(Sema &SemaRef,
+MarkUsedTemplateParameters(ASTContext &Ctx,
TemplateName Name,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template)) {
@@ -3993,20 +4158,20 @@ MarkUsedTemplateParameters(Sema &SemaRef,
}
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName())
- MarkUsedTemplateParameters(SemaRef, QTN->getQualifier(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, QTN->getQualifier(), OnlyDeduced,
Depth, Used);
if (DependentTemplateName *DTN = Name.getAsDependentTemplateName())
- MarkUsedTemplateParameters(SemaRef, DTN->getQualifier(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, DTN->getQualifier(), OnlyDeduced,
Depth, Used);
}
/// \brief Mark the template parameters that are used by the given
/// type.
static void
-MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
if (T.isNull())
return;
@@ -4014,10 +4179,10 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
if (!T->isDependentType())
return;
- T = SemaRef.Context.getCanonicalType(T);
+ T = Ctx.getCanonicalType(T);
switch (T->getTypeClass()) {
case Type::Pointer:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<PointerType>(T)->getPointeeType(),
OnlyDeduced,
Depth,
@@ -4025,7 +4190,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
break;
case Type::BlockPointer:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<BlockPointerType>(T)->getPointeeType(),
OnlyDeduced,
Depth,
@@ -4034,7 +4199,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::LValueReference:
case Type::RValueReference:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<ReferenceType>(T)->getPointeeType(),
OnlyDeduced,
Depth,
@@ -4043,29 +4208,29 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::MemberPointer: {
const MemberPointerType *MemPtr = cast<MemberPointerType>(T.getTypePtr());
- MarkUsedTemplateParameters(SemaRef, MemPtr->getPointeeType(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced,
Depth, Used);
- MarkUsedTemplateParameters(SemaRef, QualType(MemPtr->getClass(), 0),
+ MarkUsedTemplateParameters(Ctx, QualType(MemPtr->getClass(), 0),
OnlyDeduced, Depth, Used);
break;
}
case Type::DependentSizedArray:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<DependentSizedArrayType>(T)->getSizeExpr(),
OnlyDeduced, Depth, Used);
// Fall through to check the element type
case Type::ConstantArray:
case Type::IncompleteArray:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<ArrayType>(T)->getElementType(),
OnlyDeduced, Depth, Used);
break;
case Type::Vector:
case Type::ExtVector:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<VectorType>(T)->getElementType(),
OnlyDeduced, Depth, Used);
break;
@@ -4073,19 +4238,19 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::DependentSizedExtVector: {
const DependentSizedExtVectorType *VecType
= cast<DependentSizedExtVectorType>(T);
- MarkUsedTemplateParameters(SemaRef, VecType->getElementType(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced,
Depth, Used);
- MarkUsedTemplateParameters(SemaRef, VecType->getSizeExpr(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced,
Depth, Used);
break;
}
case Type::FunctionProto: {
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
- MarkUsedTemplateParameters(SemaRef, Proto->getResultType(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, Proto->getResultType(), OnlyDeduced,
Depth, Used);
for (unsigned I = 0, N = Proto->getNumArgs(); I != N; ++I)
- MarkUsedTemplateParameters(SemaRef, Proto->getArgType(I), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, Proto->getArgType(I), OnlyDeduced,
Depth, Used);
break;
}
@@ -4100,10 +4265,10 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::SubstTemplateTypeParmPack: {
const SubstTemplateTypeParmPackType *Subst
= cast<SubstTemplateTypeParmPackType>(T);
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
QualType(Subst->getReplacedParameter(), 0),
OnlyDeduced, Depth, Used);
- MarkUsedTemplateParameters(SemaRef, Subst->getArgumentPack(),
+ MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(),
OnlyDeduced, Depth, Used);
break;
}
@@ -4115,7 +4280,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec
= cast<TemplateSpecializationType>(T);
- MarkUsedTemplateParameters(SemaRef, Spec->getTemplateName(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, Spec->getTemplateName(), OnlyDeduced,
Depth, Used);
// C++0x [temp.deduct.type]p9:
@@ -4127,28 +4292,28 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
break;
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
- MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth,
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
Used);
break;
}
case Type::Complex:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<ComplexType>(T)->getElementType(),
OnlyDeduced, Depth, Used);
break;
case Type::Atomic:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<AtomicType>(T)->getValueType(),
OnlyDeduced, Depth, Used);
break;
case Type::DependentName:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<DependentNameType>(T)->getQualifier(),
OnlyDeduced, Depth, Used);
break;
@@ -4157,7 +4322,7 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
const DependentTemplateSpecializationType *Spec
= cast<DependentTemplateSpecializationType>(T);
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef, Spec->getQualifier(),
+ MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
OnlyDeduced, Depth, Used);
// C++0x [temp.deduct.type]p9:
@@ -4169,47 +4334,47 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
break;
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
- MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth,
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
Used);
break;
}
case Type::TypeOf:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<TypeOfType>(T)->getUnderlyingType(),
OnlyDeduced, Depth, Used);
break;
case Type::TypeOfExpr:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<TypeOfExprType>(T)->getUnderlyingExpr(),
OnlyDeduced, Depth, Used);
break;
case Type::Decltype:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<DecltypeType>(T)->getUnderlyingExpr(),
OnlyDeduced, Depth, Used);
break;
case Type::UnaryTransform:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<UnaryTransformType>(T)->getUnderlyingType(),
OnlyDeduced, Depth, Used);
break;
case Type::PackExpansion:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<PackExpansionType>(T)->getPattern(),
OnlyDeduced, Depth, Used);
break;
case Type::Auto:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
cast<AutoType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
@@ -4235,31 +4400,31 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
/// \brief Mark the template parameters that are used by this
/// template argument.
static void
-MarkUsedTemplateParameters(Sema &SemaRef,
+MarkUsedTemplateParameters(ASTContext &Ctx,
const TemplateArgument &TemplateArg,
bool OnlyDeduced,
unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
switch (TemplateArg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::Integral:
- case TemplateArgument::Declaration:
+ case TemplateArgument::Declaration:
break;
case TemplateArgument::Type:
- MarkUsedTemplateParameters(SemaRef, TemplateArg.getAsType(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced,
Depth, Used);
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
- MarkUsedTemplateParameters(SemaRef,
+ MarkUsedTemplateParameters(Ctx,
TemplateArg.getAsTemplateOrTemplatePattern(),
OnlyDeduced, Depth, Used);
break;
case TemplateArgument::Expression:
- MarkUsedTemplateParameters(SemaRef, TemplateArg.getAsExpr(), OnlyDeduced,
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsExpr(), OnlyDeduced,
Depth, Used);
break;
@@ -4267,7 +4432,7 @@ MarkUsedTemplateParameters(Sema &SemaRef,
for (TemplateArgument::pack_iterator P = TemplateArg.pack_begin(),
PEnd = TemplateArg.pack_end();
P != PEnd; ++P)
- MarkUsedTemplateParameters(SemaRef, *P, OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, *P, OnlyDeduced, Depth, Used);
break;
}
}
@@ -4284,7 +4449,7 @@ MarkUsedTemplateParameters(Sema &SemaRef,
void
Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced, unsigned Depth,
- SmallVectorImpl<bool> &Used) {
+ llvm::SmallBitVector &Used) {
// C++0x [temp.deduct.type]p9:
// If the template argument list of P contains a pack expansion that is not
// the last template argument, the entire template argument list is a
@@ -4294,15 +4459,16 @@ Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
return;
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- ::MarkUsedTemplateParameters(*this, TemplateArgs[I], OnlyDeduced,
+ ::MarkUsedTemplateParameters(Context, TemplateArgs[I], OnlyDeduced,
Depth, Used);
}
/// \brief Marks all of the template parameters that will be deduced by a
/// call to the given function template.
void
-Sema::MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
- SmallVectorImpl<bool> &Deduced) {
+Sema::MarkDeducedTemplateParameters(ASTContext &Ctx,
+ FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced) {
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
Deduced.clear();
@@ -4310,7 +4476,7 @@ Sema::MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
- ::MarkUsedTemplateParameters(*this, Function->getParamDecl(I)->getType(),
+ ::MarkUsedTemplateParameters(Ctx, Function->getParamDecl(I)->getType(),
true, TemplateParams->getDepth(), Deduced);
}
@@ -4322,14 +4488,9 @@ bool hasDeducibleTemplateParameters(Sema &S,
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
- SmallVector<bool, 4> Deduced;
- Deduced.resize(TemplateParams->size());
- ::MarkUsedTemplateParameters(S, T, true, TemplateParams->getDepth(),
+ llvm::SmallBitVector Deduced(TemplateParams->size());
+ ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(),
Deduced);
- for (unsigned I = 0, N = Deduced.size(); I != N; ++I)
- if (Deduced[I])
- return true;
-
- return false;
+ return Deduced.any();
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 301bf6a..4740145 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -163,8 +163,8 @@ bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const {
case DefaultTemplateArgumentChecking:
return false;
}
-
- return true;
+
+ llvm_unreachable("Invalid InstantiationKind!");
}
Sema::InstantiatingTemplate::
@@ -404,15 +404,15 @@ bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
SemaRef.ActiveTemplateInstantiations.size());
if ((SemaRef.ActiveTemplateInstantiations.size() -
SemaRef.NonInstantiationEntries)
- <= SemaRef.getLangOptions().InstantiationDepth)
+ <= SemaRef.getLangOpts().InstantiationDepth)
return false;
SemaRef.Diag(PointOfInstantiation,
diag::err_template_recursion_depth_exceeded)
- << SemaRef.getLangOptions().InstantiationDepth
+ << SemaRef.getLangOpts().InstantiationDepth
<< InstantiationRange;
SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth)
- << SemaRef.getLangOptions().InstantiationDepth;
+ << SemaRef.getLangOpts().InstantiationDepth;
return true;
}
@@ -469,6 +469,11 @@ void Sema::PrintInstantiationStack() {
diag::note_template_static_data_member_def_here)
<< VD
<< Active->InstantiationRange;
+ } else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_enum_def_here)
+ << ED
+ << Active->InstantiationRange;
} else {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_type_alias_instantiation_here)
@@ -732,6 +737,14 @@ namespace {
/// this declaration.
Decl *TransformDecl(SourceLocation Loc, Decl *D);
+ void transformAttrs(Decl *Old, Decl *New) {
+ SemaRef.InstantiateAttrs(TemplateArgs, Old, New);
+ }
+
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Old, New);
+ }
+
/// \brief Transform the definition of the given declaration by
/// instantiating it.
Decl *TransformDefinition(SourceLocation Loc, Decl *D);
@@ -778,7 +791,8 @@ namespace {
FunctionProtoTypeLoc TL);
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions);
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
/// \brief Transforms a template type parameter type by performing
/// substitution of the corresponding template type argument.
@@ -1099,15 +1113,21 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
type = argExpr->getType();
} else if (arg.getKind() == TemplateArgument::Declaration) {
- ValueDecl *VD = cast<ValueDecl>(arg.getAsDecl());
-
- // Find the instantiation of the template argument. This is
- // required for nested templates.
- VD = cast_or_null<ValueDecl>(
- getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
- if (!VD)
- return ExprError();
-
+ ValueDecl *VD;
+ if (Decl *D = arg.getAsDecl()) {
+ VD = cast<ValueDecl>(D);
+
+ // Find the instantiation of the template argument. This is
+ // required for nested templates.
+ VD = cast_or_null<ValueDecl>(
+ getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
+ if (!VD)
+ return ExprError();
+ } else {
+ // Propagate NULL template argument.
+ VD = 0;
+ }
+
// Derive the type we want the substituted decl to have. This had
// better be non-dependent, or these checks will have serious problems.
if (parm->isExpandedParameterPack()) {
@@ -1194,9 +1214,10 @@ QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
ParmVarDecl *
TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions) {
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs, indexAdjustment,
- NumExpansions);
+ NumExpansions, ExpectParameterPack);
}
QualType
@@ -1450,7 +1471,8 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions) {
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = 0;
@@ -1471,7 +1493,16 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
// Therefore, make its type a pack expansion type.
NewDI = CheckPackExpansion(NewDI, ExpansionTL.getEllipsisLoc(),
NumExpansions);
- }
+ } else if (ExpectParameterPack) {
+ // We expected to get a parameter pack but didn't (because the type
+ // itself is not a pack expansion type), so complain. This can occur when
+ // the substitution goes through an alias template that "loses" the
+ // pack expansion.
+ Diag(OldParm->getLocation(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << NewDI->getType();
+ return 0;
+ }
} else {
NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
OldParm->getDeclName());
@@ -1506,11 +1537,9 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
NewParm->setUninstantiatedDefaultArg(Arg);
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
-
- // FIXME: When OldParm is a parameter pack and NewParm is not a parameter
- // pack, we actually have a set of instantiated locations. Maintain this set!
+
if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
- // Add the new parameter to
+ // Add the new parameter to the instantiated parameter pack.
CurrentInstantiationScope->InstantiatedLocalPackArg(OldParm, NewParm);
} else {
// Introduce an Old -> New mapping
@@ -1654,6 +1683,59 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
return Invalid;
}
+// Defined via #include from SemaTemplateInstantiateDecl.cpp
+namespace clang {
+ namespace sema {
+ Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, Sema &S,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ }
+}
+
+/// Determine whether we would be unable to instantiate this template (because
+/// it either has no definition, or is in the process of being instantiated).
+static bool DiagnoseUninstantiableTemplate(Sema &S,
+ SourceLocation PointOfInstantiation,
+ TagDecl *Instantiation,
+ bool InstantiatedFromMember,
+ TagDecl *Pattern,
+ TagDecl *PatternDef,
+ TemplateSpecializationKind TSK,
+ bool Complain = true) {
+ if (PatternDef && !PatternDef->isBeingDefined())
+ return false;
+
+ if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
+ // Say nothing
+ } else if (PatternDef) {
+ assert(PatternDef->isBeingDefined());
+ S.Diag(PointOfInstantiation,
+ diag::err_template_instantiate_within_definition)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ // Not much point in noting the template declaration here, since
+ // we're lexically inside it.
+ Instantiation->setInvalidDecl();
+ } else if (InstantiatedFromMember) {
+ S.Diag(PointOfInstantiation,
+ diag::err_implicit_instantiate_member_undefined)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_member_of_template_here);
+ } else {
+ S.Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_template_decl_here);
+ }
+
+ // In general, Instantiation isn't marked invalid to get more than one
+ // error for multiple undefined instantiations. But the code that does
+ // explicit declaration -> explicit definition conversion can't handle
+ // invalid declarations, so mark as invalid in that case.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ Instantiation->setInvalidDecl();
+ return true;
+}
+
/// \brief Instantiate the definition of a class from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
@@ -1686,31 +1768,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
- if (!PatternDef || PatternDef->isBeingDefined()) {
- if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
- // Say nothing
- } else if (PatternDef) {
- assert(PatternDef->isBeingDefined());
- Diag(PointOfInstantiation,
- diag::err_template_instantiate_within_definition)
- << (TSK != TSK_ImplicitInstantiation)
- << Context.getTypeDeclType(Instantiation);
- // Not much point in noting the template declaration here, since
- // we're lexically inside it.
- Instantiation->setInvalidDecl();
- } else if (Pattern == Instantiation->getInstantiatedFromMemberClass()) {
- Diag(PointOfInstantiation,
- diag::err_implicit_instantiate_member_undefined)
- << Context.getTypeDeclType(Instantiation);
- Diag(Pattern->getLocation(), diag::note_member_of_template_here);
- } else {
- Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
- << (TSK != TSK_ImplicitInstantiation)
- << Context.getTypeDeclType(Instantiation);
- Diag(Pattern->getLocation(), diag::note_template_decl_here);
- }
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberClass(),
+ Pattern, PatternDef, TSK, Complain))
return true;
- }
Pattern = PatternDef;
// \brief Record the point of instantiation.
@@ -1719,7 +1780,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
} else if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Instantiation)) {
+ = dyn_cast<ClassTemplateSpecializationDecl>(Instantiation)) {
Spec->setTemplateSpecializationKind(TSK);
Spec->setPointOfInstantiation(PointOfInstantiation);
}
@@ -1756,6 +1817,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
SmallVector<Decl*, 4> Fields;
SmallVector<std::pair<FieldDecl*, FieldDecl*>, 4>
FieldsWithMemberInitializers;
+ // Delay instantiation of late parsed attributes.
+ LateInstantiatedAttrVec LateAttrs;
+ Instantiator.enableLateAttributeInstantiation(&LateAttrs);
+
for (RecordDecl::decl_iterator Member = Pattern->decls_begin(),
MemberEnd = Pattern->decls_end();
Member != MemberEnd; ++Member) {
@@ -1784,7 +1849,21 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (OldField->getInClassInitializer())
FieldsWithMemberInitializers.push_back(std::make_pair(OldField,
Field));
- } else if (NewMember->isInvalidDecl())
+ } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(NewMember)) {
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the definitions
+ // of unscoped member enumerations.
+ // Record a point of instantiation for this implicit instantiation.
+ if (TSK == TSK_ImplicitInstantiation && !Enum->isScoped() &&
+ Enum->isCompleteDefinition()) {
+ MemberSpecializationInfo *MSInfo =Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "no spec info for member enum specialization");
+ MSInfo->setTemplateSpecializationKind(TSK_ImplicitInstantiation);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+ }
+
+ if (NewMember->isInvalidDecl())
Invalid = true;
} else {
// FIXME: Eventually, a NULL return will mean that one of the
@@ -1804,22 +1883,42 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
FieldDecl *NewField = FieldsWithMemberInitializers[I].second;
Expr *OldInit = OldField->getInClassInitializer();
- SourceLocation LParenLoc, RParenLoc;
- ASTOwningVector<Expr*> NewArgs(*this);
- if (InstantiateInitializer(OldInit, TemplateArgs, LParenLoc, NewArgs,
- RParenLoc))
+ ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
+ /*CXXDirectInit=*/false);
+ if (NewInit.isInvalid())
NewField->setInvalidDecl();
else {
- assert(NewArgs.size() == 1 && "wrong number of in-class initializers");
- ActOnCXXInClassMemberInitializer(NewField, LParenLoc, NewArgs[0]);
+ Expr *Init = NewInit.take();
+ assert(Init && "no-argument initializer in class");
+ assert(!isa<ParenListExpr>(Init) && "call-style init in class");
+ ActOnCXXInClassMemberInitializer(NewField,
+ Init->getSourceRange().getBegin(), Init);
}
}
+ // Instantiate late parsed attributes, and attach them to their decls.
+ // See Sema::InstantiateAttrs
+ for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(),
+ E = LateAttrs.end(); I != E; ++I) {
+ assert(CurrentInstantiationScope == Instantiator.getStartingScope());
+ CurrentInstantiationScope = I->Scope;
+ Attr *NewAttr =
+ instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
+ I->NewDecl->addAttr(NewAttr);
+ LocalInstantiationScope::deleteScopes(I->Scope,
+ Instantiator.getStartingScope());
+ }
+ Instantiator.disableLateAttributeInstantiation();
+ LateAttrs.clear();
+
if (!FieldsWithMemberInitializers.empty())
ActOnFinishDelayedMemberInitializers(Instantiation);
- if (TSK == TSK_ImplicitInstantiation)
+ if (TSK == TSK_ImplicitInstantiation) {
+ Instantiation->setLocation(Pattern->getLocation());
+ Instantiation->setLocStart(Pattern->getInnerLocStart());
Instantiation->setRBraceLoc(Pattern->getRBraceLoc());
+ }
if (Instantiation->isInvalidDecl())
Invalid = true;
@@ -1854,6 +1953,63 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
return Invalid;
}
+/// \brief Instantiate the definition of an enum from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be a member enumeration of a class
+/// temploid specialization, or a local enumeration within a
+/// function temploid specialization.
+/// \param Pattern The templated declaration from which the instantiation
+/// occurs.
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+/// \param TSK The kind of implicit or explicit instantiation to perform.
+///
+/// \return \c true if an error occurred, \c false otherwise.
+bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
+ EnumDecl *Instantiation, EnumDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK) {
+ EnumDecl *PatternDef = Pattern->getDefinition();
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberEnum(),
+ Pattern, PatternDef, TSK,/*Complain*/true))
+ return true;
+ Pattern = PatternDef;
+
+ // Record the point of instantiation.
+ if (MemberSpecializationInfo *MSInfo
+ = Instantiation->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst)
+ return true;
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation);
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ LocalInstantiationScope Scope(*this, /*MergeWithParentScope*/true);
+
+ // Pull attributes from the pattern onto the instantiation.
+ InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
+
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ Instantiator.InstantiateEnumDefinition(Instantiation, Pattern);
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ return Instantiation->isInvalidDecl();
+}
+
namespace {
/// \brief A partial specialization whose template arguments have matched
/// a given template-id.
@@ -1886,7 +2042,8 @@ Sema::InstantiateClassTemplateSpecialization(
// If this is an explicit instantiation definition, mark the
// vtable as used.
- if (TSK == TSK_ExplicitInstantiationDefinition)
+ if (TSK == TSK_ExplicitInstantiationDefinition &&
+ !ClassTemplateSpec->isInvalidDecl())
MarkVTableUsed(PointOfInstantiation, ClassTemplateSpec, true);
return false;
@@ -2120,7 +2277,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
// Always skip the injected-class-name, along with any
// redeclarations of nested classes, since both would cause us
// to try to instantiate the members of a class twice.
- if (Record->isInjectedClassName() || Record->getPreviousDeclaration())
+ if (Record->isInjectedClassName() || Record->getPreviousDecl())
continue;
MemberSpecializationInfo *MSInfo = Record->getMemberSpecializationInfo();
@@ -2173,6 +2330,36 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (Pattern)
InstantiateClassMembers(PointOfInstantiation, Pattern, TemplateArgs,
TSK);
+ } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(*D)) {
+ MemberSpecializationInfo *MSInfo = Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(
+ PointOfInstantiation, TSK, Enum,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(), SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (Enum->getDefinition())
+ continue;
+
+ EnumDecl *Pattern = Enum->getInstantiatedFromMemberEnum();
+ assert(Pattern && "Missing instantiated-from-template information");
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ if (!Pattern->getDefinition())
+ continue;
+
+ InstantiateEnum(PointOfInstantiation, Enum, Pattern, TemplateArgs, TSK);
+ } else {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
}
}
}
@@ -2287,7 +2474,7 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) {
// If this is a tag declaration, it's possible that we need to look for
// a previous declaration.
if (const TagDecl *Tag = dyn_cast<TagDecl>(CheckD))
- CheckD = Tag->getPreviousDeclaration();
+ CheckD = Tag->getPreviousDecl();
else
CheckD = 0;
} while (CheckD);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 02a05d5..8afe7ac 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -57,26 +57,30 @@ bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
return false;
}
-// FIXME: Is this still too simple?
+// Include attribute instantiation code.
+#include "clang/Sema/AttrTemplateInstantiate.inc"
+
void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
- const Decl *Tmpl, Decl *New) {
+ const Decl *Tmpl, Decl *New,
+ LateInstantiatedAttrVec *LateAttrs,
+ LocalInstantiationScope *OuterMostScope) {
for (AttrVec::const_iterator i = Tmpl->attr_begin(), e = Tmpl->attr_end();
i != e; ++i) {
const Attr *TmplAttr = *i;
+
// FIXME: This should be generalized to more than just the AlignedAttr.
if (const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr)) {
if (Aligned->isAlignmentDependent()) {
- // The alignment expression is not potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(*this,
- Sema::Unevaluated);
-
if (Aligned->isAlignmentExpr()) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(*this,
+ Sema::ConstantEvaluated);
+
ExprResult Result = SubstExpr(Aligned->getAlignmentExpr(),
TemplateArgs);
if (!Result.isInvalid())
AddAlignedAttr(Aligned->getLocation(), New, Result.takeAs<Expr>());
- }
- else {
+ } else {
TypeSourceInfo *Result = SubstType(Aligned->getAlignmentType(),
TemplateArgs,
Aligned->getLocation(),
@@ -88,9 +92,18 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
}
- // FIXME: Is cloning correct for all attributes?
- Attr *NewAttr = TmplAttr->clone(Context);
- New->addAttr(NewAttr);
+ if (TmplAttr->isLateParsed() && LateAttrs) {
+ // Late parsed attributes must be instantiated and attached after the
+ // enclosing class has been instantiated. See Sema::InstantiateClass.
+ LocalInstantiationScope *Saved = 0;
+ if (CurrentInstantiationScope)
+ Saved = CurrentInstantiationScope->cloneScopes(OuterMostScope);
+ LateAttrs->push_back(LateInstantiatedAttribute(TmplAttr, Saved, New));
+ } else {
+ Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
+ *this, TemplateArgs);
+ New->addAttr(NewAttr);
+ }
}
}
@@ -164,13 +177,18 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
}
}
- if (TypedefNameDecl *Prev = D->getPreviousDeclaration()) {
+ if (TypedefNameDecl *Prev = D->getPreviousDecl()) {
NamedDecl *InstPrev = SemaRef.FindInstantiatedDecl(D->getLocation(), Prev,
TemplateArgs);
if (!InstPrev)
return 0;
- Typedef->setPreviousDeclaration(cast<TypedefNameDecl>(InstPrev));
+ TypedefNameDecl *InstPrevTypedef = cast<TypedefNameDecl>(InstPrev);
+
+ // If the typedef types are not identical, reject them.
+ SemaRef.isIncompatibleTypedef(InstPrevTypedef, Typedef);
+
+ Typedef->setPreviousDeclaration(InstPrevTypedef);
}
SemaRef.InstantiateAttrs(TemplateArgs, D, Typedef);
@@ -206,7 +224,7 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
TypeAliasDecl *Pattern = D->getTemplatedDecl();
TypeAliasTemplateDecl *PrevAliasTemplate = 0;
- if (Pattern->getPreviousDeclaration()) {
+ if (Pattern->getPreviousDecl()) {
DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
if (Found.first != Found.second) {
PrevAliasTemplate = dyn_cast<TypeAliasTemplateDecl>(*Found.first);
@@ -234,66 +252,6 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
return Inst;
}
-/// \brief Instantiate an initializer, breaking it into separate
-/// initialization arguments.
-///
-/// \param Init The initializer to instantiate.
-///
-/// \param TemplateArgs Template arguments to be substituted into the
-/// initializer.
-///
-/// \param NewArgs Will be filled in with the instantiation arguments.
-///
-/// \returns true if an error occurred, false otherwise
-bool Sema::InstantiateInitializer(Expr *Init,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- SourceLocation &LParenLoc,
- ASTOwningVector<Expr*> &NewArgs,
- SourceLocation &RParenLoc) {
- NewArgs.clear();
- LParenLoc = SourceLocation();
- RParenLoc = SourceLocation();
-
- if (!Init)
- return false;
-
- if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
- Init = ExprTemp->getSubExpr();
-
- while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init))
- Init = Binder->getSubExpr();
-
- if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Init))
- Init = ICE->getSubExprAsWritten();
-
- if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
- LParenLoc = ParenList->getLParenLoc();
- RParenLoc = ParenList->getRParenLoc();
- return SubstExprs(ParenList->getExprs(), ParenList->getNumExprs(),
- true, TemplateArgs, NewArgs);
- }
-
- if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init)) {
- if (!isa<CXXTemporaryObjectExpr>(Construct)) {
- if (SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true,
- TemplateArgs, NewArgs))
- return true;
-
- // FIXME: Fake locations!
- LParenLoc = PP.getLocForEndOfToken(Init->getLocStart());
- RParenLoc = LParenLoc;
- return false;
- }
- }
-
- ExprResult Result = SubstExpr(Init, TemplateArgs);
- if (Result.isInvalid())
- return true;
-
- NewArgs.push_back(Result.takeAs<Expr>());
- return false;
-}
-
Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
// If this is the variable for an anonymous struct or union,
// instantiate the anonymous struct/union type first.
@@ -324,8 +282,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
D->getStorageClass(),
D->getStorageClassAsWritten());
Var->setThreadSpecified(D->isThreadSpecified());
- Var->setCXXDirectInitializer(D->hasCXXDirectInitializer());
+ Var->setInitStyle(D->getInitStyle());
Var->setCXXForRangeDecl(D->isCXXForRangeDecl());
+ Var->setConstexpr(D->isConstexpr());
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Var))
@@ -351,18 +310,23 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
Sema::LookupOrdinaryName, Sema::ForRedeclaration);
if (D->isStaticDataMember())
SemaRef.LookupQualifiedName(Previous, Owner, false);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Var))
+ Var->setInvalidDecl();
+
SemaRef.CheckVariableDeclaration(Var, Previous);
if (D->isOutOfLine()) {
- if (!D->isStaticDataMember())
- D->getLexicalDeclContext()->addDecl(Var);
+ D->getLexicalDeclContext()->addDecl(Var);
Owner->makeDeclVisibleInContext(Var);
} else {
Owner->addDecl(Var);
if (Owner->isFunctionOrMethod())
SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Var);
}
- SemaRef.InstantiateAttrs(TemplateArgs, D, Var);
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Var, LateAttrs, StartingScope);
// Link instantiations of static data members back to the template from
// which they were instantiated.
@@ -374,31 +338,21 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
// We already have an initializer in the class.
} else if (D->getInit()) {
if (Var->isStaticDataMember() && !D->isOutOfLine())
- SemaRef.PushExpressionEvaluationContext(Sema::Unevaluated);
+ SemaRef.PushExpressionEvaluationContext(Sema::ConstantEvaluated);
else
SemaRef.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
// Instantiate the initializer.
- SourceLocation LParenLoc, RParenLoc;
- ASTOwningVector<Expr*> InitArgs(SemaRef);
- if (!SemaRef.InstantiateInitializer(D->getInit(), TemplateArgs, LParenLoc,
- InitArgs, RParenLoc)) {
+ ExprResult Init = SemaRef.SubstInitializer(D->getInit(), TemplateArgs,
+ D->getInitStyle() == VarDecl::CallInit);
+ if (!Init.isInvalid()) {
bool TypeMayContainAuto = true;
- // Attach the initializer to the declaration, if we have one.
- if (InitArgs.size() == 0)
+ if (Init.get()) {
+ bool DirectInit = D->isDirectInit();
+ SemaRef.AddInitializerToDecl(Var, Init.take(), DirectInit,
+ TypeMayContainAuto);
+ } else
SemaRef.ActOnUninitializedDecl(Var, TypeMayContainAuto);
- else if (D->hasCXXDirectInitializer()) {
- // Add the direct initializer to the declaration.
- SemaRef.AddCXXDirectInitializerToDecl(Var,
- LParenLoc,
- move_arg(InitArgs),
- RParenLoc,
- TypeMayContainAuto);
- } else {
- assert(InitArgs.size() == 1);
- Expr *Init = InitArgs.take()[0];
- SemaRef.AddInitializerToDecl(Var, Init, false, TypeMayContainAuto);
- }
} else {
// FIXME: Not too happy about invalidating the declaration
// because of a bogus initializer.
@@ -456,8 +410,9 @@ Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
if (Invalid)
BitWidth = 0;
else if (BitWidth) {
- // The bit-width expression is not potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ // The bit-width expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
ExprResult InstantiatedBitWidth
= SemaRef.SubstExpr(BitWidth, TemplateArgs);
@@ -483,7 +438,7 @@ Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
return 0;
}
- SemaRef.InstantiateAttrs(TemplateArgs, D, Field);
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Field, LateAttrs, StartingScope);
if (Invalid)
Field->setInvalidDecl();
@@ -552,7 +507,8 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
if (!InstTy)
return 0;
- FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getFriendLoc(), InstTy);
+ FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocation(),
+ D->getFriendLoc(), InstTy);
if (!FD)
return 0;
@@ -584,8 +540,9 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
Expr *AssertExpr = D->getAssertExpr();
- // The expression in a static assertion is not potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ // The expression in a static assertion is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
ExprResult InstantiatedAssertExpr
= SemaRef.SubstExpr(AssertExpr, TemplateArgs);
@@ -601,25 +558,32 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
}
Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
+ EnumDecl *PrevDecl = 0;
+ if (D->getPreviousDecl()) {
+ NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ D->getPreviousDecl(),
+ TemplateArgs);
+ if (!Prev) return 0;
+ PrevDecl = cast<EnumDecl>(Prev);
+ }
+
EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
D->getLocation(), D->getIdentifier(),
- /*PrevDecl=*/0, D->isScoped(),
+ PrevDecl, D->isScoped(),
D->isScopedUsingClassTag(), D->isFixed());
if (D->isFixed()) {
- if (TypeSourceInfo* TI = D->getIntegerTypeSourceInfo()) {
+ if (TypeSourceInfo *TI = D->getIntegerTypeSourceInfo()) {
// If we have type source information for the underlying type, it means it
// has been explicitly set by the user. Perform substitution on it before
// moving on.
SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
- Enum->setIntegerTypeSourceInfo(SemaRef.SubstType(TI,
- TemplateArgs,
- UnderlyingLoc,
- DeclarationName()));
-
- if (!Enum->getIntegerTypeSourceInfo())
+ TypeSourceInfo *NewTI = SemaRef.SubstType(TI, TemplateArgs, UnderlyingLoc,
+ DeclarationName());
+ if (!NewTI || SemaRef.CheckEnumUnderlyingType(NewTI))
Enum->setIntegerType(SemaRef.Context.IntTy);
- }
- else {
+ else
+ Enum->setIntegerTypeSourceInfo(NewTI);
+ } else {
assert(!D->getIntegerType()->isDependentType()
&& "Dependent type without type source info");
Enum->setIntegerType(D->getIntegerType());
@@ -628,27 +592,62 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
SemaRef.InstantiateAttrs(TemplateArgs, D, Enum);
- Enum->setInstantiationOfMemberEnum(D);
+ Enum->setInstantiationOfMemberEnum(D, TSK_ImplicitInstantiation);
Enum->setAccess(D->getAccess());
if (SubstQualifier(D, Enum)) return 0;
Owner->addDecl(Enum);
- Enum->startDefinition();
+
+ EnumDecl *Def = D->getDefinition();
+ if (Def && Def != D) {
+ // If this is an out-of-line definition of an enum member template, check
+ // that the underlying types match in the instantiation of both
+ // declarations.
+ if (TypeSourceInfo *TI = Def->getIntegerTypeSourceInfo()) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType DefnUnderlying =
+ SemaRef.SubstType(TI->getType(), TemplateArgs,
+ UnderlyingLoc, DeclarationName());
+ SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(),
+ DefnUnderlying, Enum);
+ }
+ }
if (D->getDeclContext()->isFunctionOrMethod())
SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Enum);
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the declarations, but
+ // not the definitions of scoped member enumerations.
+ // FIXME: There appears to be no wording for what happens for an enum defined
+ // within a block scope, but we treat that much like a member template. Only
+ // instantiate the definition when visiting the definition in that case, since
+ // we will visit all redeclarations.
+ if (!Enum->isScoped() && Def &&
+ (!D->getDeclContext()->isFunctionOrMethod() || D->isCompleteDefinition()))
+ InstantiateEnumDefinition(Enum, Def);
+
+ return Enum;
+}
+
+void TemplateDeclInstantiator::InstantiateEnumDefinition(
+ EnumDecl *Enum, EnumDecl *Pattern) {
+ Enum->startDefinition();
+
+ // Update the location to refer to the definition.
+ Enum->setLocation(Pattern->getLocation());
+
SmallVector<Decl*, 4> Enumerators;
EnumConstantDecl *LastEnumConst = 0;
- for (EnumDecl::enumerator_iterator EC = D->enumerator_begin(),
- ECEnd = D->enumerator_end();
+ for (EnumDecl::enumerator_iterator EC = Pattern->enumerator_begin(),
+ ECEnd = Pattern->enumerator_end();
EC != ECEnd; ++EC) {
// The specified value for the enumerator.
ExprResult Value = SemaRef.Owned((Expr *)0);
if (Expr *UninstValue = EC->getInitExpr()) {
- // The enumerator's value expression is not potentially evaluated.
+ // The enumerator's value expression is a constant expression.
EnterExpressionEvaluationContext Unevaluated(SemaRef,
- Sema::Unevaluated);
+ Sema::ConstantEvaluated);
Value = SemaRef.SubstExpr(UninstValue, TemplateArgs);
}
@@ -679,7 +678,8 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
Enumerators.push_back(EnumConst);
LastEnumConst = EnumConst;
- if (D->getDeclContext()->isFunctionOrMethod()) {
+ if (Pattern->getDeclContext()->isFunctionOrMethod() &&
+ !Enum->isScoped()) {
// If the enumeration is within a function or method, record the enum
// constant as a local.
SemaRef.CurrentInstantiationScope->InstantiatedLocal(*EC, EnumConst);
@@ -687,14 +687,11 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
}
}
- // FIXME: Fixup LBraceLoc and RBraceLoc
- // FIXME: Empty Scope and AttributeList (required to handle attribute packed).
- SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(), SourceLocation(),
- Enum,
+ // FIXME: Fixup LBraceLoc
+ SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(),
+ Enum->getRBraceLoc(), Enum,
Enumerators.data(), Enumerators.size(),
0, 0);
-
- return Enum;
}
Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
@@ -728,7 +725,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
CXXRecordDecl *PrevDecl = 0;
ClassTemplateDecl *PrevClassTemplate = 0;
- if (!isFriend && Pattern->getPreviousDeclaration()) {
+ if (!isFriend && Pattern->getPreviousDecl()) {
DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
if (Found.first != Found.second) {
PrevClassTemplate = dyn_cast<ClassTemplateDecl>(*Found.first);
@@ -860,10 +857,17 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// Finish handling of friends.
if (isFriend) {
- DC->makeDeclVisibleInContext(Inst, /*Recoverable*/ false);
+ DC->makeDeclVisibleInContext(Inst);
+ Inst->setLexicalDeclContext(Owner);
+ RecordInst->setLexicalDeclContext(Owner);
return Inst;
}
+ if (D->isOutOfLine()) {
+ Inst->setLexicalDeclContext(D->getLexicalDeclContext());
+ RecordInst->setLexicalDeclContext(D->getLexicalDeclContext());
+ }
+
Owner->addDecl(Inst);
if (!PrevClassTemplate) {
@@ -958,9 +962,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
CXXRecordDecl *PrevDecl = 0;
if (D->isInjectedClassName())
PrevDecl = cast<CXXRecordDecl>(Owner);
- else if (D->getPreviousDeclaration()) {
+ else if (D->getPreviousDecl()) {
NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
- D->getPreviousDeclaration(),
+ D->getPreviousDecl(),
TemplateArgs);
if (!Prev) return 0;
PrevDecl = cast<CXXRecordDecl>(Prev);
@@ -1010,11 +1014,11 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// Check whether there is already a function template specialization for
// this declaration.
FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
- void *InsertPos = 0;
if (FunctionTemplate && !TemplateParams) {
std::pair<const TemplateArgument *, unsigned> Innermost
= TemplateArgs.getInnermost();
+ void *InsertPos = 0;
FunctionDecl *SpecFunc
= FunctionTemplate->findSpecialization(Innermost.first, Innermost.second,
InsertPos);
@@ -1037,8 +1041,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
SmallVector<ParmVarDecl *, 4> Params;
- TypeSourceInfo *TInfo = D->getTypeSourceInfo();
- TInfo = SubstFunctionType(D, Params);
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
return 0;
QualType T = TInfo->getType();
@@ -1071,7 +1074,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
D->getLocation(), D->getDeclName(), T, TInfo,
D->getStorageClass(), D->getStorageClassAsWritten(),
D->isInlineSpecified(), D->hasWrittenPrototype(),
- /*isConstexpr*/ false);
+ D->isConstexpr());
if (QualifierLoc)
Function->setQualifierInfo(QualifierLoc);
@@ -1145,7 +1148,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
TemplateArgumentList::CreateCopy(SemaRef.Context,
Innermost.first,
Innermost.second),
- InsertPos);
+ /*InsertPos=*/0);
} else if (isFriend) {
// Note, we need this connection even if the friend doesn't have a body.
// Its body may exist but not have been attached yet due to deferred
@@ -1221,25 +1224,42 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
if (isFriend) {
NamedDecl *PrevDecl;
if (TemplateParams)
- PrevDecl = FunctionTemplate->getPreviousDeclaration();
+ PrevDecl = FunctionTemplate->getPreviousDecl();
else
- PrevDecl = Function->getPreviousDeclaration();
+ PrevDecl = Function->getPreviousDecl();
PrincipalDecl->setObjectOfFriendDecl(PrevDecl != 0);
- DC->makeDeclVisibleInContext(PrincipalDecl, /*Recoverable=*/ false);
+ DC->makeDeclVisibleInContext(PrincipalDecl);
bool queuedInstantiation = false;
- if (!SemaRef.getLangOptions().CPlusPlus0x &&
+ // C++98 [temp.friend]p5: When a function is defined in a friend function
+ // declaration in a class template, the function is defined at each
+ // instantiation of the class template. The function is defined even if it
+ // is never used.
+ // C++11 [temp.friend]p4: When a function is defined in a friend function
+ // declaration in a class template, the function is instantiated when the
+ // function is odr-used.
+ //
+ // If -Wc++98-compat is enabled, we go through the motions of checking for a
+ // redefinition, but don't instantiate the function.
+ if ((!SemaRef.getLangOpts().CPlusPlus0x ||
+ SemaRef.Diags.getDiagnosticLevel(
+ diag::warn_cxx98_compat_friend_redefinition,
+ Function->getLocation())
+ != DiagnosticsEngine::Ignored) &&
D->isThisDeclarationADefinition()) {
// Check for a function body.
const FunctionDecl *Definition = 0;
if (Function->isDefined(Definition) &&
Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
- SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
- << Function->getDeclName();
+ SemaRef.Diag(Function->getLocation(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_redefinition :
+ diag::err_redefinition) << Function->getDeclName();
SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition);
- Function->setInvalidDecl();
+ if (!SemaRef.getLangOpts().CPlusPlus0x)
+ Function->setInvalidDecl();
}
// Check for redefinitions due to other instantiations of this or
// a similar friend function.
@@ -1250,7 +1270,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
continue;
switch (R->getFriendObjectKind()) {
case Decl::FOK_None:
- if (!queuedInstantiation && R->isUsed(false)) {
+ if (!SemaRef.getLangOpts().CPlusPlus0x &&
+ !queuedInstantiation && R->isUsed(false)) {
if (MemberSpecializationInfo *MSInfo
= Function->getMemberSpecializationInfo()) {
if (MSInfo->getPointOfInstantiation().isInvalid()) {
@@ -1267,10 +1288,14 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
if (const FunctionDecl *RPattern
= R->getTemplateInstantiationPattern())
if (RPattern->isDefined(RPattern)) {
- SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
+ SemaRef.Diag(Function->getLocation(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_redefinition :
+ diag::err_redefinition)
<< Function->getDeclName();
SemaRef.Diag(R->getLocation(), diag::note_previous_definition);
- Function->setInvalidDecl();
+ if (!SemaRef.getLangOpts().CPlusPlus0x)
+ Function->setInvalidDecl();
break;
}
}
@@ -1291,7 +1316,6 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
TemplateParameterList *TemplateParams,
bool IsClassScopeSpecialization) {
FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
- void *InsertPos = 0;
if (FunctionTemplate && !TemplateParams) {
// We are creating a function template specialization from a function
// template. Check whether there is already a function template
@@ -1299,6 +1323,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
std::pair<const TemplateArgument *, unsigned> Innermost
= TemplateArgs.getInnermost();
+ void *InsertPos = 0;
FunctionDecl *SpecFunc
= FunctionTemplate->findSpecialization(Innermost.first, Innermost.second,
InsertPos);
@@ -1334,8 +1359,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
}
SmallVector<ParmVarDecl *, 4> Params;
- TypeSourceInfo *TInfo = D->getTypeSourceInfo();
- TInfo = SubstFunctionType(D, Params);
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
return 0;
QualType T = TInfo->getType();
@@ -1395,7 +1419,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
StartLoc, NameInfo, T, TInfo,
Constructor->isExplicit(),
Constructor->isInlineSpecified(),
- false, /*isConstexpr*/ false);
+ false, Constructor->isConstexpr());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
StartLoc, NameInfo, T, TInfo,
@@ -1406,7 +1430,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
StartLoc, NameInfo, T, TInfo,
Conversion->isInlineSpecified(),
Conversion->isExplicit(),
- /*isConstexpr*/ false,
+ Conversion->isConstexpr(),
Conversion->getLocEnd());
} else {
Method = CXXMethodDecl::Create(SemaRef.Context, Record,
@@ -1414,7 +1438,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
D->isStatic(),
D->getStorageClassAsWritten(),
D->isInlineSpecified(),
- /*isConstexpr*/ false, D->getLocEnd());
+ D->isConstexpr(), D->getLocEnd());
}
if (QualifierLoc)
@@ -1452,7 +1476,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
TemplateArgumentList::CreateCopy(SemaRef.Context,
Innermost.first,
Innermost.second),
- InsertPos);
+ /*InsertPos=*/0);
} else if (!isFriend) {
// Record that this is an instantiation of a member function.
Method->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
@@ -1504,6 +1528,12 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
SemaRef.CheckOverrideControl(Method);
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ if (D->isDefaulted())
+ Method->setDefaulted();
+ if (D->isDeletedAsWritten())
+ Method->setDeletedAsWritten();
+
if (FunctionTemplate) {
// If there's a function template, let our caller handle it.
} else if (Method->isInvalidDecl() && !Previous.empty()) {
@@ -1542,7 +1572,8 @@ Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
ParmVarDecl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0,
- llvm::Optional<unsigned>());
+ llvm::Optional<unsigned>(),
+ /*ExpectParameterPack=*/false);
}
Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
@@ -1825,6 +1856,12 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
if (NewUD->isInvalidDecl())
return NewUD;
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ if (SemaRef.CheckInheritingConstructorUsingDecl(NewUD))
+ NewUD->setInvalidDecl();
+ return NewUD;
+ }
+
bool isFunctionScope = Owner->isFunctionOrMethod();
// Process the shadow decls.
@@ -2094,7 +2131,7 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
// Add this partial specialization to the set of class template partial
// specializations.
- ClassTemplate->AddPartialSpecialization(InstPartialSpec, InsertPos);
+ ClassTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/0);
return InstPartialSpec;
}
@@ -2124,6 +2161,8 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
OldIdx != NumOldParams; ++OldIdx) {
ParmVarDecl *OldParam = OldProtoLoc->getArg(OldIdx);
if (!OldParam->isParameterPack() ||
+ // FIXME: Is this right? OldParam could expand to an empty parameter
+ // pack and the next parameter could be an unexpanded parameter pack
(NewIdx < NumNewParams &&
NewProtoLoc->getArg(NewIdx)->isParameterPack())) {
// Simple case: normal parameter, or a parameter pack that's
@@ -2280,23 +2319,19 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
}
Expr *NoexceptExpr = 0;
if (Expr *OldNoexceptExpr = Proto->getNoexceptExpr()) {
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
ExprResult E = SemaRef.SubstExpr(OldNoexceptExpr, TemplateArgs);
if (E.isUsable())
E = SemaRef.CheckBooleanCondition(E.get(), E.get()->getLocStart());
-
+
if (E.isUsable()) {
- SourceLocation ErrLoc;
- llvm::APSInt NoexceptVal;
NoexceptExpr = E.take();
if (!NoexceptExpr->isTypeDependent() &&
- !NoexceptExpr->isValueDependent() &&
- !NoexceptExpr->isIntegerConstantExpr(NoexceptVal, SemaRef.Context,
- &ErrLoc, /*evaluated=*/false)){
- SemaRef.Diag(ErrLoc, diag::err_noexcept_needs_constant_expression)
- << NoexceptExpr->getSourceRange();
- NoexceptExpr = 0;
- }
+ !NoexceptExpr->isValueDependent())
+ NoexceptExpr = SemaRef.VerifyIntegerConstantExpression(NoexceptExpr,
+ 0, SemaRef.PDiag(diag::err_noexcept_needs_constant_expression),
+ /*AllowFold*/ false).take();
}
}
@@ -2318,19 +2353,13 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
EPI));
}
- // C++0x [dcl.constexpr]p6: If the instantiated template specialization of
- // a constexpr function template satisfies the requirements for a constexpr
- // function, then it is a constexpr function.
- if (Tmpl->isConstexpr() &&
- SemaRef.CheckConstexprFunctionDecl(New, Sema::CCK_Instantiation))
- New->setConstexpr(true);
-
const FunctionDecl* Definition = Tmpl;
// Get the definition. Leaves the variable unchanged if undefined.
Tmpl->isDefined(Definition);
- SemaRef.InstantiateAttrs(TemplateArgs, Definition, New);
+ SemaRef.InstantiateAttrs(TemplateArgs, Definition, New,
+ LateAttrs, StartingScope);
return false;
}
@@ -2450,6 +2479,9 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (Inst)
return;
+ // Copy the inner loc start from the pattern.
+ Function->setInnerLocStart(PatternDecl->getInnerLocStart());
+
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate later,
// while we're still within our own instantiation context.
@@ -2474,6 +2506,13 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
LocalInstantiationScope Scope(*this, MergeWithParentScope);
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Function);
+
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Function, 0, false, PatternDecl);
+
// Introduce the instantiated function parameters into the local
// instantiation scope, and set the parameter names to those used
// in the template.
@@ -2483,7 +2522,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (!PatternParam->isParameterPack()) {
// Simple case: not a parameter pack.
assert(FParamIdx < Function->getNumParams());
- ParmVarDecl *FunctionParam = Function->getParamDecl(I);
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
FunctionParam->setDeclName(PatternParam->getDeclName());
Scope.InstantiatedLocal(PatternParam, FunctionParam);
++FParamIdx;
@@ -2492,22 +2531,16 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Expand the parameter pack.
Scope.MakeInstantiatedLocalArgPack(PatternParam);
- for (unsigned NumFParams = Function->getNumParams();
- FParamIdx < NumFParams;
- ++FParamIdx) {
+ unsigned NumArgumentsInExpansion
+ = getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
+ for (unsigned Arg = 0; Arg < NumArgumentsInExpansion; ++Arg) {
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
FunctionParam->setDeclName(PatternParam->getDeclName());
Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ ++FParamIdx;
}
}
- // Enter the scope of this instantiation. We don't use
- // PushDeclContext because we don't have a scope.
- Sema::ContextRAII savedContext(*this, Function);
-
- MultiLevelTemplateArgumentList TemplateArgs =
- getTemplateInstantiationArgs(Function, 0, false, PatternDecl);
-
if (PatternDecl->isDefaulted()) {
ActOnFinishFunctionBody(Function, 0, /*IsInstantiation=*/true);
@@ -2612,21 +2645,29 @@ void Sema::InstantiateStaticDataMemberDefinition(
return;
}
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+
// Never instantiate an explicit specialization.
- if (Var->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ if (TSK == TSK_ExplicitSpecialization)
return;
// C++0x [temp.explicit]p9:
// Except for inline functions, other explicit instantiation declarations
// have the effect of suppressing the implicit instantiation of the entity
// to which they refer.
- if (Var->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
return;
+ Consumer.HandleCXXStaticMemberVarInstantiation(Var);
+
// If we already have a definition, we're done.
- if (Var->getDefinition())
+ if (VarDecl *Def = Var->getDefinition()) {
+ // We may be explicitly instantiating something we've already implicitly
+ // instantiated.
+ Def->setTemplateSpecializationKind(Var->getTemplateSpecializationKind(),
+ PointOfInstantiation);
return;
+ }
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
if (Inst)
@@ -2645,7 +2686,8 @@ void Sema::InstantiateStaticDataMemberDefinition(
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
ContextRAII previousContext(*this, Var->getDeclContext());
-
+ LocalInstantiationScope Local(*this);
+
VarDecl *OldVar = Var;
Var = cast_or_null<VarDecl>(SubstDecl(Def, Var->getDeclContext(),
getTemplateInstantiationArgs(Var)));
@@ -2660,7 +2702,8 @@ void Sema::InstantiateStaticDataMemberDefinition(
DeclGroupRef DG(Var);
Consumer.HandleTopLevelDecl(DG);
}
-
+ Local.Exit();
+
if (Recursive) {
// Define any newly required vtables.
DefineUsedVTables();
@@ -2683,18 +2726,6 @@ void Sema::InstantiateStaticDataMemberDefinition(
}
}
-static MultiInitializer CreateMultiInitializer(SmallVectorImpl<Expr*> &Args,
- const CXXCtorInitializer *Init) {
- // FIXME: This is a hack that will do slightly the wrong thing for an
- // initializer of the form foo({...}).
- // The right thing to do would be to modify InstantiateInitializer to create
- // the MultiInitializer.
- if (Args.size() == 1 && isa<InitListExpr>(Args[0]))
- return MultiInitializer(Args[0]);
- return MultiInitializer(Init->getLParenLoc(), Args.data(),
- Args.size(), Init->getRParenLoc());
-}
-
void
Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
@@ -2714,14 +2745,11 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
if (!Init->isWritten())
continue;
- SourceLocation LParenLoc, RParenLoc;
- ASTOwningVector<Expr*> NewArgs(*this);
-
SourceLocation EllipsisLoc;
if (Init->isPackExpansion()) {
// This is a pack expansion. We should expand it now.
- TypeLoc BaseTL = Init->getBaseClassInfo()->getTypeLoc();
+ TypeLoc BaseTL = Init->getTypeSourceInfo()->getTypeLoc();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
collectUnexpandedParameterPacks(BaseTL, Unexpanded);
bool ShouldExpand = false;
@@ -2744,14 +2772,15 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
// Instantiate the initializer.
- if (InstantiateInitializer(Init->getInit(), TemplateArgs,
- LParenLoc, NewArgs, RParenLoc)) {
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
AnyErrors = true;
break;
}
// Instantiate the base type.
- TypeSourceInfo *BaseTInfo = SubstType(Init->getBaseClassInfo(),
+ TypeSourceInfo *BaseTInfo = SubstType(Init->getTypeSourceInfo(),
TemplateArgs,
Init->getSourceLocation(),
New->getDeclName());
@@ -2761,9 +2790,8 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
}
// Build the initializer.
- MultiInitializer MultiInit(CreateMultiInitializer(NewArgs, Init));
MemInitResult NewInit = BuildBaseInitializer(BaseTInfo->getType(),
- BaseTInfo, MultiInit,
+ BaseTInfo, TempInit.take(),
New->getParent(),
SourceLocation());
if (NewInit.isInvalid()) {
@@ -2772,34 +2800,37 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
}
NewInits.push_back(NewInit.get());
- NewArgs.clear();
}
continue;
}
// Instantiate the initializer.
- if (InstantiateInitializer(Init->getInit(), TemplateArgs,
- LParenLoc, NewArgs, RParenLoc)) {
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
AnyErrors = true;
continue;
}
MemInitResult NewInit;
- if (Init->isBaseInitializer()) {
- TypeSourceInfo *BaseTInfo = SubstType(Init->getBaseClassInfo(),
- TemplateArgs,
- Init->getSourceLocation(),
- New->getDeclName());
- if (!BaseTInfo) {
+ if (Init->isDelegatingInitializer() || Init->isBaseInitializer()) {
+ TypeSourceInfo *TInfo = SubstType(Init->getTypeSourceInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!TInfo) {
AnyErrors = true;
New->setInvalidDecl();
continue;
}
- MultiInitializer MultiInit(CreateMultiInitializer(NewArgs, Init));
- NewInit = BuildBaseInitializer(BaseTInfo->getType(), BaseTInfo, MultiInit,
- New->getParent(), EllipsisLoc);
+ if (Init->isBaseInitializer())
+ NewInit = BuildBaseInitializer(TInfo->getType(), TInfo, TempInit.take(),
+ New->getParent(), EllipsisLoc);
+ else
+ NewInit = BuildDelegatingInitializer(TInfo, TempInit.take(),
+ cast<CXXRecordDecl>(CurContext->getParent()));
} else if (Init->isMemberInitializer()) {
FieldDecl *Member = cast_or_null<FieldDecl>(FindInstantiatedDecl(
Init->getMemberLocation(),
@@ -2811,8 +2842,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
continue;
}
- MultiInitializer MultiInit(CreateMultiInitializer(NewArgs, Init));
- NewInit = BuildMemberInitializer(Member, MultiInit,
+ NewInit = BuildMemberInitializer(Member, TempInit.take(),
Init->getSourceLocation());
} else if (Init->isIndirectMemberInitializer()) {
IndirectFieldDecl *IndirectMember =
@@ -2826,8 +2856,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
continue;
}
- MultiInitializer MultiInit(CreateMultiInitializer(NewArgs, Init));
- NewInit = BuildMemberInitializer(IndirectMember, MultiInit,
+ NewInit = BuildMemberInitializer(IndirectMember, TempInit.take(),
Init->getSourceLocation());
}
@@ -2835,9 +2864,6 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
AnyErrors = true;
New->setInvalidDecl();
} else {
- // FIXME: It would be nice if ASTOwningVector had a release function.
- NewArgs.take();
-
NewInits.push_back(NewInit.get());
}
}
@@ -2850,6 +2876,45 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
AnyErrors);
}
+ExprResult Sema::SubstInitializer(Expr *Init,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool CXXDirectInit) {
+ // Initializers are instantiated like expressions, except that various outer
+ // layers are stripped.
+ if (!Init)
+ return Owned(Init);
+
+ if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
+ Init = ExprTemp->getSubExpr();
+
+ while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = Binder->getSubExpr();
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Init))
+ Init = ICE->getSubExprAsWritten();
+
+ // If this is a direct-initializer, we take apart CXXConstructExprs.
+ // Everything else is passed through.
+ CXXConstructExpr *Construct;
+ if (!CXXDirectInit || !(Construct = dyn_cast<CXXConstructExpr>(Init)) ||
+ isa<CXXTemporaryObjectExpr>(Construct))
+ return SubstExpr(Init, TemplateArgs);
+
+ ASTOwningVector<Expr*> NewArgs(*this);
+ if (SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true,
+ TemplateArgs, NewArgs))
+ return ExprError();
+
+ // Treat an empty initializer like none.
+ if (NewArgs.empty())
+ return Owned((Expr*)0);
+
+ // Build a ParenListExpr to represent anything else.
+ // FIXME: Fake locations!
+ SourceLocation Loc = PP.getLocForEndOfToken(Init->getLocStart());
+ return ActOnParenListExpr(Loc, Loc, move_arg(NewArgs));
+}
+
// TODO: this could be templated if the various decl types used the
// same method name.
static bool isInstantiationOf(ClassTemplateDecl *Pattern,
@@ -3090,7 +3155,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
DeclContext *ParentDC = D->getDeclContext();
if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
- (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext())) {
+ (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext()) ||
+ (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
@@ -3120,75 +3186,49 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
if (!Record->isDependentContext())
return D;
- // If the RecordDecl is actually the injected-class-name or a
- // "templated" declaration for a class template, class template
- // partial specialization, or a member class of a class template,
- // substitute into the injected-class-name of the class template
- // or partial specialization to find the new DeclContext.
- QualType T;
+ // Determine whether this record is the "templated" declaration describing
+ // a class template or class template partial specialization.
ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate();
-
- if (ClassTemplate) {
- T = ClassTemplate->getInjectedClassNameSpecialization();
- } else if (ClassTemplatePartialSpecializationDecl *PartialSpec
- = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) {
- ClassTemplate = PartialSpec->getSpecializedTemplate();
-
- // If we call SubstType with an InjectedClassNameType here we
- // can end up in an infinite loop.
- T = Context.getTypeDeclType(Record);
- assert(isa<InjectedClassNameType>(T) &&
- "type of partial specialization is not an InjectedClassNameType");
- T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
- }
-
- if (!T.isNull()) {
- // Substitute into the injected-class-name to get the type
- // corresponding to the instantiation we want, which may also be
- // the current instantiation (if we're in a template
- // definition). This substitution should never fail, since we
- // know we can instantiate the injected-class-name or we
- // wouldn't have gotten to the injected-class-name!
-
- // FIXME: Can we use the CurrentInstantiationScope to avoid this
- // extra instantiation in the common case?
- T = SubstType(T, TemplateArgs, Loc, DeclarationName());
- assert(!T.isNull() && "Instantiation of injected-class-name cannot fail.");
-
- if (!T->isDependentType()) {
- assert(T->isRecordType() && "Instantiation must produce a record type");
- return T->getAs<RecordType>()->getDecl();
+ if (ClassTemplate)
+ ClassTemplate = ClassTemplate->getCanonicalDecl();
+ else if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record))
+ ClassTemplate = PartialSpec->getSpecializedTemplate()->getCanonicalDecl();
+
+ // Walk the current context to find either the record or an instantiation of
+ // it.
+ DeclContext *DC = CurContext;
+ while (!DC->isFileContext()) {
+ // If we're performing substitution while we're inside the template
+ // definition, we'll find our own context. We're done.
+ if (DC->Equals(Record))
+ return Record;
+
+ if (CXXRecordDecl *InstRecord = dyn_cast<CXXRecordDecl>(DC)) {
+ // Check whether we're in the process of instantiating a class template
+ // specialization of the template we're mapping.
+ if (ClassTemplateSpecializationDecl *InstSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(InstRecord)){
+ ClassTemplateDecl *SpecTemplate = InstSpec->getSpecializedTemplate();
+ if (ClassTemplate && isInstantiationOf(ClassTemplate, SpecTemplate))
+ return InstRecord;
+ }
+
+ // Check whether we're in the process of instantiating a member class.
+ if (isInstantiationOf(Record, InstRecord))
+ return InstRecord;
}
-
- // We are performing "partial" template instantiation to create
- // the member declarations for the members of a class template
- // specialization. Therefore, D is actually referring to something
- // in the current instantiation. Look through the current
- // context, which contains actual instantiations, to find the
- // instantiation of the "current instantiation" that D refers
- // to.
- bool SawNonDependentContext = false;
- for (DeclContext *DC = CurContext; !DC->isFileContext();
- DC = DC->getParent()) {
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(DC))
- if (isInstantiationOf(ClassTemplate,
- Spec->getSpecializedTemplate()))
- return Spec;
-
- if (!DC->isDependentContext())
- SawNonDependentContext = true;
+
+
+ // Move to the outer template scope.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) {
+ if (FD->getFriendObjectKind() && FD->getDeclContext()->isFileContext()){
+ DC = FD->getLexicalDeclContext();
+ continue;
+ }
}
-
- // We're performing "instantiation" of a member of the current
- // instantiation while we are type-checking the
- // definition. Compute the declaration context and return that.
- assert(!SawNonDependentContext &&
- "No dependent context while instantiating record");
- DeclContext *DC = computeDeclContext(T);
- assert(DC &&
- "Unable to find declaration for the current instantiation");
- return cast<CXXRecordDecl>(DC);
+
+ DC = DC->getParent();
}
// Fall through to deal with other dependent record types (e.g.,
@@ -3260,6 +3300,20 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
<< D->getDeclName()
<< Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC));
Diag(D->getLocation(), diag::note_non_instantiated_member_here);
+ } else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ // This enumeration constant was found when the template was defined,
+ // but can't be found in the instantiation. This can happen if an
+ // unscoped enumeration member is explicitly specialized.
+ EnumDecl *Enum = cast<EnumDecl>(ED->getLexicalDeclContext());
+ EnumDecl *Spec = cast<EnumDecl>(FindInstantiatedDecl(Loc, Enum,
+ TemplateArgs));
+ assert(Spec->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization);
+ Diag(Loc, diag::err_enumerator_does_not_exist)
+ << D->getDeclName()
+ << Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext()));
+ Diag(Spec->getLocation(), diag::note_enum_specialized_here)
+ << Context.getTypeDeclType(Spec);
} else {
// We should have found something, but didn't.
llvm_unreachable("Unable to find instantiation of declaration!");
@@ -3312,12 +3366,12 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
// Don't try to instantiate declarations if the most recent redeclaration
// is invalid.
- if (Var->getMostRecentDeclaration()->isInvalidDecl())
+ if (Var->getMostRecentDecl()->isInvalidDecl())
continue;
// Check if the most recent declaration has changed the specialization kind
// and removed the need for implicit instantiation.
- switch (Var->getMostRecentDeclaration()->getTemplateSpecializationKind()) {
+ switch (Var->getMostRecentDecl()->getTemplateSpecializationKind()) {
case TSK_Undeclared:
llvm_unreachable("Cannot instantitiate an undeclared specialization.");
case TSK_ExplicitInstantiationDeclaration:
@@ -3326,7 +3380,7 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
case TSK_ExplicitInstantiationDefinition:
// We only need an instantiation if the pending instantiation *is* the
// explicit instantiation.
- if (Var != Var->getMostRecentDeclaration()) continue;
+ if (Var != Var->getMostRecentDecl()) continue;
case TSK_ImplicitInstantiation:
break;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
index e383db9..a40100c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -73,15 +73,6 @@ namespace {
return true;
}
- // \brief Record occurrences of function and non-type template parameter
- // packs in a block-captured expression.
- bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- if (E->getDecl()->isParameterPack())
- Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation()));
-
- return true;
- }
-
/// \brief Record occurrences of template template parameter packs.
bool TraverseTemplateName(TemplateName Template) {
if (TemplateTemplateParmDecl *TTP
@@ -93,6 +84,22 @@ namespace {
return inherited::TraverseTemplateName(Template);
}
+ /// \brief Suppress traversal into Objective-C container literal
+ /// elements that are pack expansions.
+ bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ if (!E->containsUnexpandedParameterPack())
+ return true;
+
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ if (Element.isPackExpansion())
+ continue;
+
+ TraverseStmt(Element.Key);
+ TraverseStmt(Element.Value);
+ }
+ return true;
+ }
//------------------------------------------------------------------------
// Pruning the search for unexpanded parameter packs.
//------------------------------------------------------------------------
@@ -155,10 +162,13 @@ namespace {
/// \brief Diagnose all of the unexpanded parameter packs in the given
/// vector.
-static void
-DiagnoseUnexpandedParameterPacks(Sema &S, SourceLocation Loc,
- Sema::UnexpandedParameterPackContext UPPC,
- const SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+void
+Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ UnexpandedParameterPackContext UPPC,
+ ArrayRef<UnexpandedParameterPack> Unexpanded) {
+ if (Unexpanded.empty())
+ return;
+
SmallVector<SourceLocation, 4> Locations;
SmallVector<IdentifierInfo *, 4> Names;
llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown;
@@ -179,13 +189,13 @@ DiagnoseUnexpandedParameterPacks(Sema &S, SourceLocation Loc,
}
DiagnosticBuilder DB
- = Names.size() == 0? S.Diag(Loc, diag::err_unexpanded_parameter_pack_0)
+ = Names.size() == 0? Diag(Loc, diag::err_unexpanded_parameter_pack_0)
<< (int)UPPC
- : Names.size() == 1? S.Diag(Loc, diag::err_unexpanded_parameter_pack_1)
+ : Names.size() == 1? Diag(Loc, diag::err_unexpanded_parameter_pack_1)
<< (int)UPPC << Names[0]
- : Names.size() == 2? S.Diag(Loc, diag::err_unexpanded_parameter_pack_2)
+ : Names.size() == 2? Diag(Loc, diag::err_unexpanded_parameter_pack_2)
<< (int)UPPC << Names[0] << Names[1]
- : S.Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more)
+ : Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more)
<< (int)UPPC << Names[0] << Names[1];
for (unsigned I = 0, N = Locations.size(); I != N; ++I)
@@ -205,7 +215,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(
T->getTypeLoc());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, Loc, UPPC, Unexpanded);
+ DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
return true;
}
@@ -220,7 +230,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, E->getLocStart(), UPPC, Unexpanded);
+ DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
return true;
}
@@ -237,7 +247,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseNestedNameSpecifier(SS.getScopeRep());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, SS.getRange().getBegin(),
+ DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(),
UPPC, Unexpanded);
return true;
}
@@ -274,7 +284,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseType(NameInfo.getName().getCXXNameType());
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, NameInfo.getLoc(), UPPC, Unexpanded);
+ DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded);
return true;
}
@@ -289,7 +299,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseTemplateName(Template);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, Loc, UPPC, Unexpanded);
+ DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
return true;
}
@@ -303,7 +313,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseTemplateArgumentLoc(Arg);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- DiagnoseUnexpandedParameterPacks(*this, Arg.getLocation(), UPPC, Unexpanded);
+ DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded);
return true;
}
@@ -329,6 +339,24 @@ void Sema::collectUnexpandedParameterPacks(TypeLoc TL,
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL);
}
+void Sema::collectUnexpandedParameterPacks(CXXScopeSpec &SS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ if (!Qualifier)
+ return;
+
+ NestedNameSpecifierLoc QualifierLoc(Qualifier, SS.location_data());
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifierLoc(QualifierLoc);
+}
+
+void Sema::collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseDeclarationNameInfo(NameInfo);
+}
+
+
ParsedTemplateArgument
Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc) {
@@ -367,7 +395,6 @@ Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
return Arg.getTemplatePackExpansion(EllipsisLoc);
}
llvm_unreachable("Unhandled template argument kind?");
- return ParsedTemplateArgument();
}
TypeResult Sema::ActOnPackExpansion(ParsedType Type,
@@ -611,7 +638,6 @@ unsigned Sema::getNumArgumentsInExpansion(QualType T,
}
llvm_unreachable("No unexpanded parameter packs in type expansion.");
- return 0;
}
bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
@@ -641,6 +667,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_char16:
case TST_char32:
case TST_int:
+ case TST_int128:
case TST_half:
case TST_float:
case TST_double:
@@ -674,7 +701,6 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
// declarator-id (conceptually), so the parser should not invoke this
// routine at this time.
llvm_unreachable("Could not have seen this kind of declarator chunk");
- break;
case DeclaratorChunk::MemberPointer:
if (Chunk.Mem.Scope().getScopeRep() &&
@@ -687,6 +713,19 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
return false;
}
+namespace {
+
+// Callback to only accept typo corrections that refer to parameter packs.
+class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ return ND && ND->isParameterPack();
+ }
+};
+
+}
+
/// \brief Called when an expression computing the size of a parameter pack
/// is parsed.
///
@@ -712,6 +751,7 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
LookupName(R, S);
NamedDecl *ParameterPack = 0;
+ ParameterPackValidatorCCC Validator;
switch (R.getResultKind()) {
case LookupResult::Found:
ParameterPack = R.getFoundDecl();
@@ -720,19 +760,16 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
- R.getLookupKind(), S, 0, 0,
- false, CTC_NoKeywords)) {
- if (NamedDecl *CorrectedResult = Corrected.getCorrectionDecl())
- if (CorrectedResult->isParameterPack()) {
- std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
- ParameterPack = CorrectedResult;
- Diag(NameLoc, diag::err_sizeof_pack_no_pack_name_suggest)
- << &Name << CorrectedQuotedStr
- << FixItHint::CreateReplacement(
- NameLoc, Corrected.getAsString(getLangOptions()));
- Diag(ParameterPack->getLocation(), diag::note_parameter_pack_here)
- << CorrectedQuotedStr;
- }
+ R.getLookupKind(), S, 0,
+ Validator)) {
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+ ParameterPack = Corrected.getCorrectionDecl();
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name_suggest)
+ << &Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(
+ NameLoc, Corrected.getAsString(getLangOpts()));
+ Diag(ParameterPack->getLocation(), diag::note_parameter_pack_here)
+ << CorrectedQuotedStr;
}
case LookupResult::FoundOverloaded:
@@ -750,6 +787,8 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
return ExprError();
}
+ MarkAnyDeclReferenced(OpLoc, ParameterPack);
+
return new (Context) SizeOfPackExpr(Context.getSizeType(), OpLoc,
ParameterPack, NameLoc, RParenLoc);
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
index 2b563a5..c41df82 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Basic/OpenCL.h"
@@ -25,8 +26,10 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -503,7 +506,7 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
break;
case AttributeList::AT_ns_returns_retained:
- if (!state.getSema().getLangOptions().ObjCAutoRefCount)
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
break;
// fallthrough
@@ -546,7 +549,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
// faking up the function chunk is still the right thing to do.
// Otherwise, we need to fake up a function declarator.
- SourceLocation loc = declarator.getSourceRange().getBegin();
+ SourceLocation loc = declarator.getLocStart();
// ...and *prepend* it to the declarator.
declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
@@ -555,6 +558,8 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*args*/ 0, 0,
/*type quals*/ 0,
/*ref-qualifier*/true, SourceLocation(),
+ /*const qualifier*/SourceLocation(),
+ /*volatile qualifier*/SourceLocation(),
/*mutable qualifier*/SourceLocation(),
/*EH*/ EST_None, SourceLocation(), 0, 0, 0, 0,
/*parens*/ loc, loc,
@@ -580,7 +585,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
const DeclSpec &DS = declarator.getDeclSpec();
SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
- DeclLoc = DS.getSourceRange().getBegin();
+ DeclLoc = DS.getLocStart();
ASTContext &Context = S.Context;
@@ -637,7 +642,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// If this is a missing declspec in a block literal return context, then it
// is inferred from the return statements inside the block.
- if (isOmittedBlockReturnType(declarator)) {
+ // The declspec is always missing in a lambda expr context; it is either
+ // specified with a trailing return type or inferred.
+ if (declarator.getContext() == Declarator::LambdaExprContext ||
+ isOmittedBlockReturnType(declarator)) {
Result = Context.DependentTy;
break;
}
@@ -649,13 +657,13 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// allowed to be completely missing a declspec. This is handled in the
// parser already though by it pretending to have seen an 'int' in this
// case.
- if (S.getLangOptions().ImplicitInt) {
+ if (S.getLangOpts().ImplicitInt) {
// In C89 mode, we only warn if there is a completely missing declspec
// when one is not allowed.
if (DS.isEmpty()) {
S.Diag(DeclLoc, diag::ext_missing_declspec)
<< DS.getSourceRange()
- << FixItHint::CreateInsertion(DS.getSourceRange().getBegin(), "int");
+ << FixItHint::CreateInsertion(DS.getLocStart(), "int");
}
} else if (!DS.hasTypeSpecifier()) {
// C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
@@ -663,8 +671,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
// FIXME: Does Microsoft really have the implicit int extension in C++?
- if (S.getLangOptions().CPlusPlus &&
- !S.getLangOptions().MicrosoftExt) {
+ if (S.getLangOpts().CPlusPlus &&
+ !S.getLangOpts().MicrosoftExt) {
S.Diag(DeclLoc, diag::err_missing_type_specifier)
<< DS.getSourceRange();
@@ -689,9 +697,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.LongLongTy;
// long long is a C99 feature.
- if (!S.getLangOptions().C99 &&
- !S.getLangOptions().CPlusPlus0x)
- S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
+ if (!S.getLangOpts().C99)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
break;
}
} else {
@@ -703,14 +712,21 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.UnsignedLongLongTy;
// long long is a C99 feature.
- if (!S.getLangOptions().C99 &&
- !S.getLangOptions().CPlusPlus0x)
- S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_longlong);
+ if (!S.getLangOpts().C99)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
break;
}
}
break;
}
+ case DeclSpec::TST_int128:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ Result = Context.UnsignedInt128Ty;
+ else
+ Result = Context.Int128Ty;
+ break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
@@ -719,7 +735,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
else
Result = Context.DoubleTy;
- if (S.getLangOptions().OpenCL && !S.getOpenCLOptions().cl_khr_fp64) {
+ if (S.getLangOpts().OpenCL && !S.getOpenCLOptions().cl_khr_fp64) {
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_double_requires_fp64);
declarator.setInvalidType(true);
}
@@ -757,9 +773,6 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
ElaboratedTypeKeyword Keyword
= ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result);
-
- if (D->isInvalidDecl())
- declarator.setInvalidType(true);
break;
}
case DeclSpec::TST_typename: {
@@ -875,7 +888,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// Handle complex types.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
- if (S.getLangOptions().Freestanding)
+ if (S.getLangOpts().Freestanding)
S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
Result = Context.getComplexType(Result);
} else if (DS.isTypeAltiVecVector()) {
@@ -967,6 +980,25 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
TypeQuals &= ~DeclSpec::TQ_volatile;
}
+ // C90 6.5.3 constraints: "The same type qualifier shall not appear more
+ // than once in the same specifier-list or qualifier-list, either directly
+ // or via one or more typedefs."
+ if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
+ && TypeQuals & Result.getCVRQualifiers()) {
+ if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) {
+ S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
+ << "const";
+ }
+
+ if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) {
+ S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
+ << "volatile";
+ }
+
+ // C90 doesn't have restrict, so it doesn't force us to produce a warning
+ // in this case.
+ }
+
Qualifiers Quals = Qualifiers::fromCVRMask(TypeQuals);
Result = Context.getQualifiedType(Result, Quals);
}
@@ -1049,12 +1081,14 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
} else if (type->isObjCARCImplicitlyUnretainedType()) {
implicitLifetime = Qualifiers::OCL_ExplicitNone;
- // If we are in an unevaluated context, like sizeof, assume ExplicitNone and
- // don't give error.
+ // If we are in an unevaluated context, like sizeof, skip adding a
+ // qualification.
} else if (S.ExprEvalContexts.back().Context == Sema::Unevaluated) {
- implicitLifetime = Qualifiers::OCL_ExplicitNone;
+ return type;
- // If that failed, give an error and recover using __autoreleasing.
+ // If that failed, give an error and recover using __strong. __strong
+ // is the option most likely to prevent spurious second-order diagnostics,
+ // like when binding a reference to a field.
} else {
// These types can show up in private ivars in system headers, so
// we need this to not be an error in those cases. Instead we
@@ -1066,7 +1100,7 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
} else {
S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference;
}
- implicitLifetime = Qualifiers::OCL_Autoreleasing;
+ implicitLifetime = Qualifiers::OCL_Strong;
}
assert(implicitLifetime && "didn't infer any lifetime!");
@@ -1100,7 +1134,7 @@ QualType Sema::BuildPointerType(QualType T,
assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
// In ARC, it is forbidden to build pointers to unqualified pointers.
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false);
// Build the pointer type.
@@ -1157,7 +1191,7 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
}
// In ARC, it is forbidden to build references to unqualified pointers.
- if (getLangOptions().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount)
T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
// Handle restrict on references.
@@ -1168,23 +1202,12 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
/// Check whether the specified array size makes the array type a VLA. If so,
/// return true, if not, return the size of the array in SizeVal.
-static bool isArraySizeVLA(Expr *ArraySize, llvm::APSInt &SizeVal, Sema &S) {
- // If the size is an ICE, it certainly isn't a VLA.
- if (ArraySize->isIntegerConstantExpr(SizeVal, S.Context))
- return false;
-
- // If we're in a GNU mode (like gnu99, but not c99) accept any evaluatable
- // value as an extension.
- Expr::EvalResult Result;
- if (S.LangOpts.GNUMode && ArraySize->Evaluate(Result, S.Context)) {
- if (!Result.hasSideEffects() && Result.Val.isInt()) {
- SizeVal = Result.Val.getInt();
- S.Diag(ArraySize->getLocStart(), diag::ext_vla_folded_to_constant);
- return false;
- }
- }
-
- return true;
+static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
+ // If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
+ // (like gnu99, but not c99) accept any evaluatable value as an extension.
+ return S.VerifyIntegerConstantExpression(
+ ArraySize, &SizeVal, S.PDiag(), S.LangOpts.GNUMode,
+ S.PDiag(diag::ext_vla_folded_to_constant)).isInvalid();
}
@@ -1210,7 +1233,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
SourceRange Brackets, DeclarationName Entity) {
SourceLocation Loc = Brackets.getBegin();
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// C++ [dcl.array]p1:
// T is called the array element type; this type shall not be a reference
// type, the (possibly cv-qualified) type void, a function type or an
@@ -1262,6 +1285,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
+ // Do placeholder conversions on the array size expression.
+ if (ArraySize && ArraySize->hasPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(ArraySize);
+ if (Result.isInvalid()) return QualType();
+ ArraySize = Result.take();
+ }
+
// Do lvalue-to-rvalue conversions on the array size expression.
if (ArraySize && !ArraySize->isRValue()) {
ExprResult Result = DefaultLvalueConversion(ArraySize);
@@ -1272,14 +1302,15 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
// C99 6.7.5.2p1: The size expression shall have integer type.
- // TODO: in theory, if we were insane, we could allow contextual
- // conversions to integer type here.
- if (ArraySize && !ArraySize->isTypeDependent() &&
+ // C++11 allows contextual conversions to such types.
+ if (!getLangOpts().CPlusPlus0x &&
+ ArraySize && !ArraySize->isTypeDependent() &&
!ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
<< ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
}
+
llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
if (!ArraySize) {
if (ASM == ArrayType::Star)
@@ -1288,11 +1319,19 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
T = Context.getIncompleteArrayType(T, ASM, Quals);
} else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
- } else if (!T->isDependentType() && !T->isIncompleteType() &&
- !T->isConstantSizeType()) {
+ } else if ((!T->isDependentType() && !T->isIncompleteType() &&
+ !T->isConstantSizeType()) ||
+ isArraySizeVLA(*this, ArraySize, ConstVal)) {
+ // Even in C++11, don't allow contextual conversions in the array bound
+ // of a VLA.
+ if (getLangOpts().CPlusPlus0x &&
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ return QualType();
+ }
+
// C99: an array with an element type that has a non-constant-size is a VLA.
- T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
- } else if (isArraySizeVLA(ArraySize, ConstVal, *this)) {
// C99: an array with a non-ICE size is a VLA. We accept any expression
// that we can fold to a non-zero positive value as an extension.
T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
@@ -1315,6 +1354,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
isSFINAEContext()? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
<< ArraySize->getSourceRange();
+
+ if (ASM == ArrayType::Static) {
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_zero_static_array_size)
+ << ArraySize->getSourceRange();
+ ASM = ArrayType::Normal;
+ }
} else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
!T->isIncompleteType()) {
// Is the array too large?
@@ -1329,7 +1375,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
}
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
- if (!getLangOptions().C99) {
+ if (!getLangOpts().C99) {
if (T->isVariableArrayType()) {
// Prohibit the use of non-POD types in VLAs.
QualType BaseT = Context.getBaseElementType(T);
@@ -1349,9 +1395,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
else
Diag(Loc, diag::ext_vla);
} else if (ASM != ArrayType::Normal || Quals != 0)
- Diag(Loc,
- getLangOptions().CPlusPlus? diag::err_c99_array_usage_cxx
- : diag::ext_c99_array_usage);
+ Diag(Loc,
+ getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
+ : diag::ext_c99_array_usage) << ASM;
}
return T;
@@ -1412,6 +1458,8 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
///
/// \param Variadic Whether this is a variadic function type.
///
+/// \param HasTrailingReturn Whether this function has a trailing return type.
+///
/// \param Quals The cvr-qualifiers to be applied to the function type.
///
/// \param Loc The location of the entity whose type involves this
@@ -1426,7 +1474,8 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
QualType Sema::BuildFunctionType(QualType T,
QualType *ParamTypes,
unsigned NumParamTypes,
- bool Variadic, unsigned Quals,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals,
RefQualifierKind RefQualifier,
SourceLocation Loc, DeclarationName Entity,
FunctionType::ExtInfo Info) {
@@ -1465,6 +1514,7 @@ QualType Sema::BuildFunctionType(QualType T,
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = Variadic;
+ EPI.HasTrailingReturn = HasTrailingReturn;
EPI.TypeQuals = Quals;
EPI.RefQualifier = RefQualifier;
EPI.ExtInfo = Info;
@@ -1476,7 +1526,6 @@ QualType Sema::BuildFunctionType(QualType T,
///
/// \param T the type to which the member pointer refers.
/// \param Class the class type into which the member pointer points.
-/// \param CVR Qualifiers applied to the member pointer type
/// \param Loc the location where this type begins
/// \param Entity the name of the entity that will have this member pointer type
///
@@ -1764,18 +1813,19 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (D.getAttributes())
distributeTypeAttrsFromDeclarator(state, T);
- // C++0x [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
- // In C++0x, a function declarator using 'auto' must have a trailing return
+ // C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
+ // In C++11, a function declarator using 'auto' must have a trailing return
// type (this is checked later) and we can skip this. In other languages
// using auto, we need to check regardless.
if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
- (!SemaRef.getLangOptions().CPlusPlus0x || !D.isFunctionDeclarator())) {
+ (!SemaRef.getLangOpts().CPlusPlus0x || !D.isFunctionDeclarator())) {
int Error = -1;
switch (D.getContext()) {
case Declarator::KNRTypeListContext:
llvm_unreachable("K&R type lists aren't allowed in C++");
- break;
+ case Declarator::LambdaExprContext:
+ llvm_unreachable("Can't specify a type specifier in lambda grammar");
case Declarator::ObjCParameterContext:
case Declarator::ObjCResultContext:
case Declarator::PrototypeContext:
@@ -1808,6 +1858,9 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case Declarator::AliasTemplateContext:
Error = 9; // Type alias
break;
+ case Declarator::TrailingReturnContext:
+ Error = 10; // Function return type
+ break;
case Declarator::TypeNameContext:
Error = 11; // Generic
break;
@@ -1826,11 +1879,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
if (D.isFunctionDeclarator())
Error = 10;
- // C++0x [dcl.spec.auto]p2: 'auto' is always fine if the declarator
+ // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
// contains a trailing return type. That is only legal at the outermost
// level. Check all declarator chunks (outermost first) anyway, to give
// better diagnostics.
- if (SemaRef.getLangOptions().CPlusPlus0x && Error != -1) {
+ if (SemaRef.getLangOpts().CPlusPlus0x && Error != -1) {
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
@@ -1851,20 +1904,28 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
<< Error;
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
- }
+ } else
+ SemaRef.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::warn_cxx98_compat_auto_type_specifier);
}
- if (SemaRef.getLangOptions().CPlusPlus &&
+ if (SemaRef.getLangOpts().CPlusPlus &&
OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) {
// Check the contexts where C++ forbids the declaration of a new class
// or enumeration in a type-specifier-seq.
switch (D.getContext()) {
+ case Declarator::TrailingReturnContext:
+ // Class and enumeration definitions are syntactically not allowed in
+ // trailing return types.
+ llvm_unreachable("parser should not have allowed this");
+ break;
case Declarator::FileContext:
case Declarator::MemberContext:
case Declarator::BlockContext:
case Declarator::ForContext:
case Declarator::BlockLiteralContext:
- // C++0x [dcl.type]p3:
+ case Declarator::LambdaExprContext:
+ // C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration unless
// it appears in the type-id of an alias-declaration (7.1.3) that is not
// the declaration of a template-declaration.
@@ -1908,6 +1969,66 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
return T;
}
+static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
+ std::string Quals =
+ Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+
+ switch (FnTy->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += '&';
+ break;
+
+ case RQ_RValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += "&&";
+ break;
+ }
+
+ return Quals;
+}
+
+/// Check that the function type T, which has a cv-qualifier or a ref-qualifier,
+/// can be contained within the declarator chunk DeclType, and produce an
+/// appropriate diagnostic if not.
+static void checkQualifiedFunction(Sema &S, QualType T,
+ DeclaratorChunk &DeclType) {
+ // C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6: a function type with a
+ // cv-qualifier or a ref-qualifier can only appear at the topmost level
+ // of a type.
+ int DiagKind = -1;
+ switch (DeclType.Kind) {
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::MemberPointer:
+ // These cases are permitted.
+ return;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ // These cases don't allow function types at all; no need to diagnose the
+ // qualifiers separately.
+ return;
+ case DeclaratorChunk::BlockPointer:
+ DiagKind = 0;
+ break;
+ case DeclaratorChunk::Pointer:
+ DiagKind = 1;
+ break;
+ case DeclaratorChunk::Reference:
+ DiagKind = 2;
+ break;
+ }
+
+ assert(DiagKind != -1);
+ S.Diag(DeclType.Loc, diag::err_compound_qualified_function_type)
+ << DiagKind << isa<FunctionType>(T.IgnoreParens()) << T
+ << getFunctionQualifiersAsString(T->castAs<FunctionProtoType>());
+}
+
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
@@ -1916,7 +2037,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Declarator &D = state.getDeclarator();
Sema &S = state.getSema();
ASTContext &Context = S.Context;
- const LangOptions &LangOpts = S.getLangOptions();
+ const LangOptions &LangOpts = S.getLangOpts();
bool ImplicitlyNoexcept = false;
if (D.getName().getKind() == UnqualifiedId::IK_OperatorFunctionId &&
@@ -1939,6 +2060,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
D.getContext() == Declarator::AliasDeclContext ||
D.getContext() == Declarator::AliasTemplateContext;
+ // Does T refer to a function type with a cv-qualifier or a ref-qualifier?
+ bool IsQualifiedFunction = T->isFunctionProtoType() &&
+ (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 ||
+ T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
+
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
@@ -1946,8 +2072,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
+ if (IsQualifiedFunction) {
+ checkQualifiedFunction(S, T, DeclType);
+ IsQualifiedFunction = DeclType.Kind == DeclaratorChunk::Paren;
+ }
switch (DeclType.Kind) {
- default: llvm_unreachable("Unknown decltype!");
case DeclaratorChunk::Paren:
T = S.BuildParenType(T);
break;
@@ -2019,8 +2148,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ASM = ArrayType::Normal;
D.setInvalidType(true);
}
- T = S.BuildArrayType(T, ASM, ArraySize,
- Qualifiers::fromCVRMask(ATI.TypeQuals),
+ T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
break;
}
@@ -2029,6 +2157,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// does not have a K&R-style identifier list), then the arguments are part
// of the type, otherwise the argument list is ().
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ IsQualifiedFunction = FTI.TypeQuals || FTI.hasRefQualifier();
// Check for auto functions and trailing return type and adjust the
// return type accordingly.
@@ -2048,7 +2177,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::err_trailing_return_in_parens)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
- } else if (T.hasQualifiers() || !isa<AutoType>(T)) {
+ } else if (D.getContext() != Declarator::LambdaExprContext &&
+ (T.hasQualifiers() || !isa<AutoType>(T))) {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
@@ -2160,6 +2290,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = FTI.isVariadic;
+ EPI.HasTrailingReturn = FTI.TrailingReturnType;
EPI.TypeQuals = FTI.TypeQuals;
EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
: FTI.RefQualifierIsLValueRef? RQ_LValue
@@ -2260,15 +2391,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
Context.BoolTy) &&
"Parser should have made sure that the expression is boolean");
- SourceLocation ErrLoc;
- llvm::APSInt Dummy;
- if (!NoexceptExpr->isValueDependent() &&
- !NoexceptExpr->isIntegerConstantExpr(Dummy, Context, &ErrLoc,
- /*evaluated*/false))
- S.Diag(ErrLoc, diag::err_noexcept_needs_constant_expression)
- << NoexceptExpr->getSourceRange();
- else
- EPI.NoexceptExpr = NoexceptExpr;
+ if (!NoexceptExpr->isValueDependent())
+ NoexceptExpr = S.VerifyIntegerConstantExpression(NoexceptExpr, 0,
+ S.PDiag(diag::err_noexcept_needs_constant_expression),
+ /*AllowFold*/ false).take();
+ EPI.NoexceptExpr = NoexceptExpr;
}
} else if (FTI.getExceptionSpecType() == EST_None &&
ImplicitlyNoexcept && chunkIndex == 0) {
@@ -2303,7 +2430,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
llvm_unreachable("Nested-name-specifier must name a type");
- break;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
@@ -2360,7 +2486,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// top-level template type arguments.
bool FreeFunction;
if (!D.getCXXScopeSpec().isSet()) {
- FreeFunction = (D.getContext() != Declarator::MemberContext ||
+ FreeFunction = ((D.getContext() != Declarator::MemberContext &&
+ D.getContext() != Declarator::LambdaExprContext) ||
D.getDeclSpec().isFriendSpecified());
} else {
DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec());
@@ -2370,6 +2497,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C++0x [dcl.constexpr]p8: A constexpr specifier for a non-static member
// function that is not a constructor declares that function to be const.
if (D.getDeclSpec().isConstexprSpecified() && !FreeFunction &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static &&
D.getName().getKind() != UnqualifiedId::IK_ConstructorName &&
D.getName().getKind() != UnqualifiedId::IK_ConstructorTemplateId &&
!(FnTy->getTypeQuals() & DeclSpec::TQ_const)) {
@@ -2381,86 +2509,57 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
FnTy->getNumArgs(), EPI);
}
- // C++0x [dcl.fct]p6:
- // A ref-qualifier shall only be part of the function type for a
- // non-static member function, the function type to which a pointer to
- // member refers, or the top-level function type of a function typedef
- // declaration.
- if ((FnTy->getTypeQuals() != 0 || FnTy->getRefQualifier()) &&
- !(D.getContext() == Declarator::TemplateTypeArgContext &&
- !D.isFunctionDeclarator()) && !IsTypedefName &&
- (FreeFunction ||
- D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) {
- if (D.getContext() == Declarator::TemplateTypeArgContext) {
- // Accept qualified function types as template type arguments as a GNU
- // extension. This is also the subject of C++ core issue 547.
- std::string Quals;
- if (FnTy->getTypeQuals() != 0)
- Quals = Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
-
- switch (FnTy->getRefQualifier()) {
- case RQ_None:
- break;
-
- case RQ_LValue:
- if (!Quals.empty())
- Quals += ' ';
- Quals += '&';
- break;
-
- case RQ_RValue:
- if (!Quals.empty())
- Quals += ' ';
- Quals += "&&";
- break;
+ // C++11 [dcl.fct]p6 (w/DR1417):
+ // An attempt to specify a function type with a cv-qualifier-seq or a
+ // ref-qualifier (including by typedef-name) is ill-formed unless it is:
+ // - the function type for a non-static member function,
+ // - the function type to which a pointer to member refers,
+ // - the top-level function type of a function typedef declaration or
+ // alias-declaration,
+ // - the type-id in the default argument of a type-parameter, or
+ // - the type-id of a template-argument for a type-parameter
+ if (IsQualifiedFunction &&
+ !(!FreeFunction &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
+ !IsTypedefName &&
+ D.getContext() != Declarator::TemplateTypeArgContext) {
+ SourceLocation Loc = D.getLocStart();
+ SourceRange RemovalRange;
+ unsigned I;
+ if (D.isFunctionDeclarator(I)) {
+ SmallVector<SourceLocation, 4> RemovalLocs;
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ assert(Chunk.Kind == DeclaratorChunk::Function);
+ if (Chunk.Fun.hasRefQualifier())
+ RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Const)
+ RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Volatile)
+ RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc());
+ // FIXME: We do not track the location of the __restrict qualifier.
+ //if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
+ // RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
+ if (!RemovalLocs.empty()) {
+ std::sort(RemovalLocs.begin(), RemovalLocs.end(),
+ SourceManager::LocBeforeThanCompare(S.getSourceManager()));
+ RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
+ Loc = RemovalLocs.front();
}
-
- S.Diag(D.getIdentifierLoc(),
- diag::ext_qualified_function_type_template_arg)
- << Quals;
- } else {
- if (FnTy->getTypeQuals() != 0) {
- if (D.isFunctionDeclarator())
- S.Diag(D.getIdentifierLoc(),
- diag::err_invalid_qualified_function_type);
- else
- S.Diag(D.getIdentifierLoc(),
- diag::err_invalid_qualified_typedef_function_type_use)
- << FreeFunction;
- }
-
- if (FnTy->getRefQualifier()) {
- if (D.isFunctionDeclarator()) {
- SourceLocation Loc = D.getIdentifierLoc();
- for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
- const DeclaratorChunk &Chunk = D.getTypeObject(N-I-1);
- if (Chunk.Kind == DeclaratorChunk::Function &&
- Chunk.Fun.hasRefQualifier()) {
- Loc = Chunk.Fun.getRefQualifierLoc();
- break;
- }
- }
-
- S.Diag(Loc, diag::err_invalid_ref_qualifier_function_type)
- << (FnTy->getRefQualifier() == RQ_LValue)
- << FixItHint::CreateRemoval(Loc);
- } else {
- S.Diag(D.getIdentifierLoc(),
- diag::err_invalid_ref_qualifier_typedef_function_type_use)
- << FreeFunction
- << (FnTy->getRefQualifier() == RQ_LValue);
- }
- }
-
- // Strip the cv-qualifiers and ref-qualifiers from the type.
- FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
- EPI.TypeQuals = 0;
- EPI.RefQualifier = RQ_None;
-
- T = Context.getFunctionType(FnTy->getResultType(),
- FnTy->arg_type_begin(),
- FnTy->getNumArgs(), EPI);
}
+
+ S.Diag(Loc, diag::err_invalid_qualified_function_type)
+ << FreeFunction << D.isFunctionDeclarator() << T
+ << getFunctionQualifiersAsString(FnTy)
+ << FixItHint::CreateRemoval(RemovalRange);
+
+ // Strip the cv-qualifiers and ref-qualifiers from the type.
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ T = Context.getFunctionType(FnTy->getResultType(),
+ FnTy->arg_type_begin(),
+ FnTy->getNumArgs(), EPI);
}
}
@@ -2540,6 +2639,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case Declarator::CXXCatchContext:
case Declarator::ObjCCatchContext:
case Declarator::BlockLiteralContext:
+ case Declarator::LambdaExprContext:
+ case Declarator::TrailingReturnContext:
case Declarator::TemplateTypeArgContext:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
@@ -2573,7 +2674,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
if (T.isNull())
return Context.getNullTypeSourceInfo();
- if (D.isPrototypeContext() && getLangOptions().ObjCAutoRefCount)
+ if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
inferARCWriteback(state, T);
return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
@@ -2605,7 +2706,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
const char *attrStr = 0;
switch (ownership) {
- case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); break;
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break;
case Qualifiers::OCL_Strong: attrStr = "strong"; break;
case Qualifiers::OCL_Weak: attrStr = "weak"; break;
@@ -2625,6 +2726,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
// TODO: mark whether we did this inference?
}
+/// \brief Used for transfering ownership in casts resulting in l-values.
static void transferARCOwnership(TypeProcessingState &state,
QualType &declSpecTy,
Qualifiers::ObjCLifetime ownership) {
@@ -2632,6 +2734,7 @@ static void transferARCOwnership(TypeProcessingState &state,
Declarator &D = state.getDeclarator();
int inner = -1;
+ bool hasIndirection = false;
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
DeclaratorChunk &chunk = D.getTypeObject(i);
switch (chunk.Kind) {
@@ -2642,11 +2745,15 @@ static void transferARCOwnership(TypeProcessingState &state,
case DeclaratorChunk::Array:
case DeclaratorChunk::Reference:
case DeclaratorChunk::Pointer:
+ if (inner != -1)
+ hasIndirection = true;
inner = i;
break;
case DeclaratorChunk::BlockPointer:
- return transferARCOwnershipToDeclaratorChunk(state, ownership, i);
+ if (inner != -1)
+ transferARCOwnershipToDeclaratorChunk(state, ownership, i);
+ return;
case DeclaratorChunk::Function:
case DeclaratorChunk::MemberPointer:
@@ -2655,13 +2762,13 @@ static void transferARCOwnership(TypeProcessingState &state,
}
if (inner == -1)
- return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ return;
DeclaratorChunk &chunk = D.getTypeObject(inner);
if (chunk.Kind == DeclaratorChunk::Pointer) {
if (declSpecTy->isObjCRetainableType())
return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
- if (declSpecTy->isObjCObjectType())
+ if (declSpecTy->isObjCObjectType() && hasIndirection)
return transferARCOwnershipToDeclaratorChunk(state, ownership, inner);
} else {
assert(chunk.Kind == DeclaratorChunk::Array ||
@@ -2678,7 +2785,7 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
if (declSpecTy.isNull())
return Context.getNullTypeSourceInfo();
- if (getLangOptions().ObjCAutoRefCount) {
+ if (getLangOpts().ObjCAutoRefCount) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
if (ownership != Qualifiers::OCL_None)
transferARCOwnership(state, declSpecTy, ownership);
@@ -2720,7 +2827,6 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
return AttributeList::AT_pcs;
}
llvm_unreachable("unexpected attribute kind!");
- return AttributeList::Kind();
}
static void fillAttributedTypeLoc(AttributedTypeLoc TL,
@@ -2866,51 +2972,28 @@ namespace {
return;
}
}
- TL.setKeywordLoc(Keyword != ETK_None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
+ TL.setElaboratedKeywordLoc(Keyword != ETK_None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
const CXXScopeSpec& SS = DS.getTypeSpecScope();
TL.setQualifierLoc(SS.getWithLocInContext(Context));
Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
}
void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- ElaboratedTypeKeyword Keyword
- = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
- if (DS.getTypeSpecType() == TST_typename) {
- TypeSourceInfo *TInfo = 0;
- Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- if (TInfo) {
- TL.copy(cast<DependentNameTypeLoc>(TInfo->getTypeLoc()));
- return;
- }
- }
- TL.setKeywordLoc(Keyword != ETK_None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
- const CXXScopeSpec& SS = DS.getTypeSpecScope();
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
- TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(cast<DependentNameTypeLoc>(TInfo->getTypeLoc()));
}
void VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- ElaboratedTypeKeyword Keyword
- = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
- if (Keyword == ETK_Typename) {
- TypeSourceInfo *TInfo = 0;
- Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- if (TInfo) {
- TL.copy(cast<DependentTemplateSpecializationTypeLoc>(
- TInfo->getTypeLoc()));
- return;
- }
- }
- TL.initializeLocal(Context, SourceLocation());
- TL.setKeywordLoc(Keyword != ETK_None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
- const CXXScopeSpec& SS = DS.getTypeSpecScope();
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
- TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(cast<DependentTemplateSpecializationTypeLoc>(
+ TInfo->getTypeLoc()));
}
void VisitTagTypeLoc(TagTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
@@ -2972,7 +3055,7 @@ namespace {
assert(isa<DependentNameType>(ClsTy) && "Unexpected TypeLoc");
{
DependentNameTypeLoc DNTLoc = cast<DependentNameTypeLoc>(ClsTL);
- DNTLoc.setKeywordLoc(SourceLocation());
+ DNTLoc.setElaboratedKeywordLoc(SourceLocation());
DNTLoc.setQualifierLoc(NNSLoc.getPrefix());
DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc());
}
@@ -2982,7 +3065,7 @@ namespace {
case NestedNameSpecifier::TypeSpecWithTemplate:
if (isa<ElaboratedType>(ClsTy)) {
ElaboratedTypeLoc ETLoc = *cast<ElaboratedTypeLoc>(&ClsTL);
- ETLoc.setKeywordLoc(SourceLocation());
+ ETLoc.setElaboratedKeywordLoc(SourceLocation());
ETLoc.setQualifierLoc(NNSLoc.getPrefix());
TypeLoc NamedTL = ETLoc.getNamedTypeLoc();
NamedTL.initializeFullCopy(NNSLoc.getTypeLoc());
@@ -2995,7 +3078,6 @@ namespace {
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
llvm_unreachable("Nested-name-specifier must name a type");
- break;
}
// Finally fill in MemberPointerLocInfo fields.
@@ -3124,7 +3206,7 @@ TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
if (D.getContext() != Declarator::ObjCParameterContext)
checkUnusedDeclAttributes(D);
- if (getLangOptions().CPlusPlus) {
+ if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
@@ -3205,6 +3287,36 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
}
+/// Does this type have a "direct" ownership qualifier? That is,
+/// is it written like "__strong id", as opposed to something like
+/// "typeof(foo)", where that happens to be strong?
+static bool hasDirectOwnershipQualifier(QualType type) {
+ // Fast path: no qualifier at all.
+ assert(type.getQualifiers().hasObjCLifetime());
+
+ while (true) {
+ // __strong id
+ if (const AttributedType *attr = dyn_cast<AttributedType>(type)) {
+ if (attr->getAttrKind() == AttributedType::attr_objc_ownership)
+ return true;
+
+ type = attr->getModifiedType();
+
+ // X *__strong (...)
+ } else if (const ParenType *paren = dyn_cast<ParenType>(type)) {
+ type = paren->getInnerType();
+
+ // That's it for things we want to complain about. In particular,
+ // we do not want to look through typedefs, typeof(expr),
+ // typeof(type), or any other way that the type is somehow
+ // abstracted.
+ } else {
+
+ return false;
+ }
+ }
+}
+
/// handleObjCOwnershipTypeAttr - Process an objc_ownership
/// attribute on the specified type.
///
@@ -3212,20 +3324,27 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
AttributeList &attr,
QualType &type) {
- if (!type->isObjCRetainableType() && !type->isDependentType())
- return false;
+ bool NonObjCPointer = false;
+
+ if (!type->isDependentType()) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType pointee = ptr->getPointeeType();
+ if (pointee->isObjCRetainableType() || pointee->isPointerType())
+ return false;
+ // It is important not to lose the source info that there was an attribute
+ // applied to non-objc pointer. We will create an attributed type but
+ // its type will be the same as the original type.
+ NonObjCPointer = true;
+ } else if (!type->isObjCRetainableType()) {
+ return false;
+ }
+ }
Sema &S = state.getSema();
SourceLocation AttrLoc = attr.getLoc();
if (AttrLoc.isMacroID())
AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).first;
- if (type.getQualifiers().getObjCLifetime()) {
- S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant)
- << type;
- return true;
- }
-
if (!attr.getParameterName()) {
S.Diag(AttrLoc, diag::err_attribute_argument_n_not_string)
<< "objc_ownership" << 1;
@@ -3233,6 +3352,11 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
return true;
}
+ // Consume lifetime attributes without further comment outside of
+ // ARC mode.
+ if (!S.getLangOpts().ObjCAutoRefCount)
+ return true;
+
Qualifiers::ObjCLifetime lifetime;
if (attr.getParameterName()->isStr("none"))
lifetime = Qualifiers::OCL_ExplicitNone;
@@ -3249,15 +3373,49 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
return true;
}
- // Consume lifetime attributes without further comment outside of
- // ARC mode.
- if (!S.getLangOptions().ObjCAutoRefCount)
- return true;
+ SplitQualType underlyingType = type.split();
+
+ // Check for redundant/conflicting ownership qualifiers.
+ if (Qualifiers::ObjCLifetime previousLifetime
+ = type.getQualifiers().getObjCLifetime()) {
+ // If it's written directly, that's an error.
+ if (hasDirectOwnershipQualifier(type)) {
+ S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant)
+ << type;
+ return true;
+ }
+
+ // Otherwise, if the qualifiers actually conflict, pull sugar off
+ // until we reach a type that is directly qualified.
+ if (previousLifetime != lifetime) {
+ // This should always terminate: the canonical type is
+ // qualified, so some bit of sugar must be hiding it.
+ while (!underlyingType.Quals.hasObjCLifetime()) {
+ underlyingType = underlyingType.getSingleStepDesugaredType();
+ }
+ underlyingType.Quals.removeObjCLifetime();
+ }
+ }
+
+ underlyingType.Quals.addObjCLifetime(lifetime);
+
+ if (NonObjCPointer) {
+ StringRef name = attr.getName()->getName();
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+ case Qualifiers::OCL_Strong: name = "__strong"; break;
+ case Qualifiers::OCL_Weak: name = "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break;
+ }
+ S.Diag(AttrLoc, diag::warn_objc_object_attribute_wrong_type)
+ << name << type;
+ }
- Qualifiers qs;
- qs.setObjCLifetime(lifetime);
QualType origType = type;
- type = S.Context.getQualifiedType(type, qs);
+ if (!NonObjCPointer)
+ type = S.Context.getQualifiedType(underlyingType);
// If we have a valid source location for the attribute, use an
// AttributedType instead.
@@ -3267,7 +3425,7 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
// Forbid __weak if the runtime doesn't support it.
if (lifetime == Qualifiers::OCL_Weak &&
- !S.getLangOptions().ObjCRuntimeHasWeak) {
+ !S.getLangOpts().ObjCRuntimeHasWeak && !NonObjCPointer) {
// Actually, delay this until we know what we're parsing.
if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
@@ -3438,9 +3596,9 @@ namespace {
SplitQualType SplitOld = Old.split();
// As a special case, tail-recurse if there are no qualifiers.
- if (SplitOld.second.empty())
- return wrap(C, SplitOld.first, I);
- return C.getQualifiedType(wrap(C, SplitOld.first, I), SplitOld.second);
+ if (SplitOld.Quals.empty())
+ return wrap(C, SplitOld.Ty, I);
+ return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals);
}
QualType wrap(ASTContext &C, const Type *Old, unsigned I) {
@@ -3484,7 +3642,6 @@ namespace {
}
llvm_unreachable("unknown wrapping kind");
- return QualType();
}
};
}
@@ -3515,7 +3672,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
// ns_returns_retained is not always a type attribute, but if we got
// here, we're treating it as one right now.
if (attr.getKind() == AttributeList::AT_ns_returns_retained) {
- assert(S.getLangOptions().ObjCAutoRefCount &&
+ assert(S.getLangOpts().ObjCAutoRefCount &&
"ns_returns_retained treated as type attribute in non-ARC");
if (attr.getNumArgs()) return true;
@@ -3712,11 +3869,12 @@ static void HandleExtVectorTypeAttr(QualType &CurType,
// Special case where the argument is a template id.
if (Attr.getParameterName()) {
CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
-
- ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, id, false,
- false);
+
+ ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
+ id, false, false);
if (Size.isInvalid())
return;
@@ -3857,7 +4015,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case AttributeList::AT_ns_returns_retained:
- if (!state.getSema().getLangOptions().ObjCAutoRefCount)
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
break;
// fallthrough into the function attrs
@@ -3988,9 +4146,52 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
// "Can't ask whether a dependent type is complete");
// If we have a complete type, we're done.
- if (!T->isIncompleteType())
+ NamedDecl *Def = 0;
+ if (!T->isIncompleteType(&Def)) {
+ // If we know about the definition but it is not visible, complain.
+ if (diag != 0 && Def && !LookupResult::isVisible(Def)) {
+ // Suppress this error outside of a SFINAE context if we've already
+ // emitted the error once for this type. There's no usefulness in
+ // repeating the diagnostic.
+ // FIXME: Add a Fix-It that imports the corresponding module or includes
+ // the header.
+ if (isSFINAEContext() || HiddenDefinitions.insert(Def)) {
+ Diag(Loc, diag::err_module_private_definition) << T;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ }
+ }
+
return false;
+ }
+ const TagType *Tag = T->getAs<TagType>();
+ const ObjCInterfaceType *IFace = 0;
+
+ if (Tag) {
+ // Avoid diagnosing invalid decls as incomplete.
+ if (Tag->getDecl()->isInvalidDecl())
+ return true;
+
+ // Give the external AST source a chance to complete the type.
+ if (Tag->getDecl()->hasExternalLexicalStorage()) {
+ Context.getExternalSource()->CompleteType(Tag->getDecl());
+ if (!Tag->isIncompleteType())
+ return false;
+ }
+ }
+ else if ((IFace = T->getAs<ObjCInterfaceType>())) {
+ // Avoid diagnosing invalid decls as incomplete.
+ if (IFace->getDecl()->isInvalidDecl())
+ return true;
+
+ // Give the external AST source a chance to complete the type.
+ if (IFace->getDecl()->hasExternalLexicalStorage()) {
+ Context.getExternalSource()->CompleteType(IFace->getDecl());
+ if (!IFace->isIncompleteType())
+ return false;
+ }
+ }
+
// If we have a class template specialization or a class member of a
// class template specialization, or an array with known size of such,
// try to instantiate it.
@@ -4006,12 +4207,12 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
/*Complain=*/diag != 0);
} else if (CXXRecordDecl *Rec
= dyn_cast<CXXRecordDecl>(Record->getDecl())) {
- if (CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass()) {
- MemberSpecializationInfo *MSInfo = Rec->getMemberSpecializationInfo();
- assert(MSInfo && "Missing member specialization information?");
+ CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
+ if (!Rec->isBeingDefined() && Pattern) {
+ MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo();
+ assert(MSI && "Missing member specialization information?");
// This record was instantiated from a class within a template.
- if (MSInfo->getTemplateSpecializationKind()
- != TSK_ExplicitSpecialization)
+ if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
return InstantiateClass(Loc, Rec, Pattern,
getTemplateInstantiationArgs(Rec),
TSK_ImplicitInstantiation,
@@ -4022,23 +4223,10 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
if (diag == 0)
return true;
-
- const TagType *Tag = T->getAs<TagType>();
-
- // Avoid diagnosing invalid decls as incomplete.
- if (Tag && Tag->getDecl()->isInvalidDecl())
- return true;
-
- // Give the external AST source a chance to complete the type.
- if (Tag && Tag->getDecl()->hasExternalLexicalStorage()) {
- Context.getExternalSource()->CompleteType(Tag->getDecl());
- if (!Tag->isIncompleteType())
- return false;
- }
-
+
// We have an incomplete type. Produce a diagnostic.
Diag(Loc, PD) << T;
-
+
// If we have a note, produce it.
if (!Note.first.isInvalid())
Diag(Note.first, Note.second);
@@ -4049,7 +4237,11 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
Diag(Tag->getDecl()->getLocation(),
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
- << QualType(Tag, 0);
+ << QualType(Tag, 0);
+
+ // If the Objective-C class was a forward declaration, produce a note.
+ if (IFace && !IFace->getDecl()->isInvalidDecl())
+ Diag(IFace->getDecl()->getLocation(), diag::note_forward_class);
return true;
}
@@ -4083,18 +4275,16 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
/// @param PD The partial diagnostic that will be printed out if T is not a
/// literal type.
///
-/// @param AllowIncompleteType If true, an incomplete type will be considered
-/// acceptable.
-///
/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
/// @c false otherwise.
bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
- const PartialDiagnostic &PD,
- bool AllowIncompleteType) {
+ const PartialDiagnostic &PD) {
assert(!T->isDependentType() && "type should not be dependent");
- bool Incomplete = RequireCompleteType(Loc, T, 0);
- if (T->isLiteralType() || (AllowIncompleteType && Incomplete))
+ QualType ElemType = Context.getBaseElementType(T);
+ RequireCompleteType(Loc, ElemType, 0);
+
+ if (T->isLiteralType())
return false;
if (PD.getDiagID() == 0)
@@ -4105,50 +4295,35 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
if (T->isVariableArrayType())
return true;
- const RecordType *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ const RecordType *RT = ElemType->getAs<RecordType>();
if (!RT)
return true;
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ // FIXME: Better diagnostic for incomplete class?
+ if (!RD->isCompleteDefinition())
+ return true;
+
// If the class has virtual base classes, then it's not an aggregate, and
- // cannot have any constexpr constructors, so is non-literal. This is better
- // to diagnose than the resulting absence of constexpr constructors.
+ // cannot have any constexpr constructors or a trivial default constructor,
+ // so is non-literal. This is better to diagnose than the resulting absence
+ // of constexpr constructors.
if (RD->getNumVBases()) {
Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
<< RD->isStruct() << RD->getNumVBases();
for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
E = RD->vbases_end(); I != E; ++I)
- Diag(I->getSourceRange().getBegin(),
+ Diag(I->getLocStart(),
diag::note_constexpr_virtual_base_here) << I->getSourceRange();
- } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor()) {
+ } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
+ !RD->hasTrivialDefaultConstructor()) {
Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD;
-
- switch (RD->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- break;
-
- case TSK_ImplicitInstantiation:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- // If the base template had constexpr constructors which were
- // instantiated as non-constexpr constructors, explain why.
- for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(),
- E = RD->ctor_end(); I != E; ++I) {
- if ((*I)->isCopyConstructor() || (*I)->isMoveConstructor())
- continue;
-
- FunctionDecl *Base = (*I)->getInstantiatedFromMemberFunction();
- if (Base && Base->isConstexpr())
- CheckConstexprFunctionDecl(*I, CCK_NoteNonConstexprInstantiation);
- }
- }
} else if (RD->hasNonLiteralTypeFieldsOrBases()) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
if (!I->getType()->isLiteralType()) {
- Diag(I->getSourceRange().getBegin(),
+ Diag(I->getLocStart(),
diag::note_non_literal_base_class)
<< RD << I->getType() << I->getSourceRange();
return true;
@@ -4156,12 +4331,11 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
}
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I) {
- if (!(*I)->getType()->isLiteralType()) {
+ if (!(*I)->getType()->isLiteralType() ||
+ (*I)->getType().isVolatileQualified()) {
Diag((*I)->getLocation(), diag::note_non_literal_field)
- << RD << (*I) << (*I)->getType();
- return true;
- } else if ((*I)->isMutable()) {
- Diag((*I)->getLocation(), diag::note_non_literal_mutable_field) << RD;
+ << RD << (*I) << (*I)->getType()
+ << (*I)->getType().isVolatileQualified();
return true;
}
}
@@ -4211,12 +4385,73 @@ QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
return Context.getTypeOfExprType(E);
}
+/// getDecltypeForExpr - Given an expr, will return the decltype for
+/// that expression, according to the rules in C++11
+/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
+static QualType getDecltypeForExpr(Sema &S, Expr *E) {
+ if (E->isTypeDependent())
+ return S.Context.DependentTy;
+
+ // C++11 [dcl.type.simple]p4:
+ // The type denoted by decltype(e) is defined as follows:
+ //
+ // - if e is an unparenthesized id-expression or an unparenthesized class
+ // member access (5.2.5), decltype(e) is the type of the entity named
+ // by e. If there is no such entity, or if e names a set of overloaded
+ // functions, the program is ill-formed;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
+ return VD->getType();
+ }
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ return FD->getType();
+ }
+
+ // C++11 [expr.lambda.prim]p18:
+ // Every occurrence of decltype((x)) where x is a possibly
+ // parenthesized id-expression that names an entity of automatic
+ // storage duration is treated as if x were transformed into an
+ // access to a corresponding data member of the closure type that
+ // would have been declared if x were an odr-use of the denoted
+ // entity.
+ using namespace sema;
+ if (S.getCurLambda()) {
+ if (isa<ParenExpr>(E)) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
+ if (!T.isNull())
+ return S.Context.getLValueReferenceType(T);
+ }
+ }
+ }
+ }
+
+
+ // C++11 [dcl.type.simple]p4:
+ // [...]
+ QualType T = E->getType();
+ switch (E->getValueKind()) {
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // type of e;
+ case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // type of e;
+ case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
+ // - otherwise, decltype(e) is the type of e.
+ case VK_RValue: break;
+ }
+
+ return T;
+}
+
QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc) {
ExprResult ER = CheckPlaceholderExpr(E);
if (ER.isInvalid()) return QualType();
E = ER.take();
- return Context.getDecltypeType(E);
+ return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
}
QualType Sema::BuildUnaryTransformType(QualType BaseType,
@@ -4245,12 +4480,14 @@ QualType Sema::BuildUnaryTransformType(QualType BaseType,
QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
if (!T->isDependentType()) {
+ // FIXME: It isn't entirely clear whether incomplete atomic types
+ // are allowed or not; for simplicity, ban them for the moment.
+ if (RequireCompleteType(Loc, T,
+ PDiag(diag::err_atomic_specifier_bad_type) << 0))
+ return QualType();
+
int DisallowedKind = -1;
- if (T->isIncompleteType())
- // FIXME: It isn't entirely clear whether incomplete atomic types
- // are allowed or not; for simplicity, ban them for the moment.
- DisallowedKind = 0;
- else if (T->isArrayType())
+ if (T->isArrayType())
DisallowedKind = 1;
else if (T->isFunctionType())
DisallowedKind = 2;
diff --git a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
index aa0bc08..8b19be7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
@@ -169,7 +169,7 @@ static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// Apparently Visual C++ thinks it is okay to not emit a warning
// in this case, so only emit a warning when -fms-extensions is not
// specified.
- if (!S.getLangOptions().MicrosoftExt)
+ if (!S.getLangOpts().MicrosoftExt)
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << 2 /*variable and function*/;
return;
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
index bb49eee..fdb861e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -112,6 +112,11 @@ class TreeTransform {
protected:
Sema &SemaRef;
+ /// \brief The set of local declarations that have been transformed, for
+ /// cases where we are forced to build new declarations within the transformer
+ /// rather than in the subclass (e.g., lambda closure types).
+ llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls;
+
public:
/// \brief Initializes a new tree transformer.
TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
@@ -351,10 +356,36 @@ public:
/// \brief Transform the given declaration, which is referenced from a type
/// or expression.
///
- /// By default, acts as the identity function on declarations. Subclasses
+ /// By default, acts as the identity function on declarations, unless the
+ /// transformer has had to transform the declaration itself. Subclasses
/// may override this function to provide alternate behavior.
- Decl *TransformDecl(SourceLocation Loc, Decl *D) { return D; }
+ Decl *TransformDecl(SourceLocation Loc, Decl *D) {
+ llvm::DenseMap<Decl *, Decl *>::iterator Known
+ = TransformedLocalDecls.find(D);
+ if (Known != TransformedLocalDecls.end())
+ return Known->second;
+
+ return D;
+ }
+ /// \brief Transform the attributes associated with the given declaration and
+ /// place them on the new declaration.
+ ///
+ /// By default, this operation does nothing. Subclasses may override this
+ /// behavior to transform attributes.
+ void transformAttrs(Decl *Old, Decl *New) { }
+
+ /// \brief Note that a local declaration has been transformed by this
+ /// transformer.
+ ///
+ /// Local declarations are typically transformed via a call to
+ /// TransformDefinition. However, in some cases (e.g., lambda expressions),
+ /// the transformer itself has to transform the declarations. This routine
+ /// can be overridden by a subclass that keeps track of such mappings.
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ TransformedLocalDecls[Old] = New;
+ }
+
/// \brief Transform the definition of the given declaration.
///
/// By default, invokes TransformDecl() to transform the declaration.
@@ -418,7 +449,7 @@ public:
/// Subclasses may override this function to provide alternate behavior.
TemplateName TransformTemplateName(CXXScopeSpec &SS,
TemplateName Name,
- SourceLocation NameLoc,
+ SourceLocation NameLoc,
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = 0);
@@ -530,7 +561,8 @@ public:
/// scope index; can be negative
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions);
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
@@ -665,7 +697,8 @@ public:
QualType RebuildFunctionProtoType(QualType T,
QualType *ParamTypes,
unsigned NumParamTypes,
- bool Variadic, unsigned Quals,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals,
RefQualifierKind RefQualifier,
const FunctionType::ExtInfo &Info);
@@ -845,7 +878,6 @@ public:
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
llvm_unreachable("Tag lookup cannot find non-tags");
- return QualType();
case LookupResult::Ambiguous:
// Let the LookupResult structure handle ambiguities.
@@ -1291,7 +1323,20 @@ public:
return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd,
Cond, Inc, LoopVar, RParenLoc);
}
-
+
+ /// \brief Build a new C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested) {
+ return getSema().BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo, Nested);
+ }
+
/// \brief Attach body to a C++0x range-based for statement.
///
/// By default, performs semantic analysis to finish the new statement.
@@ -1450,11 +1495,14 @@ public:
ExprResult RebuildMemberExpr(Expr *Base, SourceLocation OpLoc,
bool isArrow,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
ValueDecl *Member,
NamedDecl *FoundDecl,
const TemplateArgumentListInfo *ExplicitTemplateArgs,
NamedDecl *FirstQualifierInScope) {
+ ExprResult BaseResult = getSema().PerformMemberExprBaseConversion(Base,
+ isArrow);
if (!Member->getDeclName()) {
// We have a reference to an unnamed field. This is always the
// base of an anonymous struct/union member access, i.e. the
@@ -1463,8 +1511,8 @@ public:
assert(Member->getType()->isRecordType() &&
"unnamed member not of record type?");
- ExprResult BaseResult =
- getSema().PerformObjectMemberConversion(Base,
+ BaseResult =
+ getSema().PerformObjectMemberConversion(BaseResult.take(),
QualifierLoc.getNestedNameSpecifier(),
FoundDecl, Member);
if (BaseResult.isInvalid())
@@ -1482,9 +1530,6 @@ public:
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- ExprResult BaseResult = getSema().DefaultFunctionArrayConversion(Base);
- if (BaseResult.isInvalid())
- return ExprError();
Base = BaseResult.take();
QualType BaseType = Base->getType();
@@ -1495,7 +1540,8 @@ public:
R.resolveKind();
return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
- SS, FirstQualifierInScope,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
R, ExplicitTemplateArgs);
}
@@ -1559,7 +1605,8 @@ public:
DeclarationNameInfo NameInfo(&Accessor, AccessorLoc);
return getSema().BuildMemberReferenceExpr(Base, Base->getType(),
OpLoc, /*IsArrow*/ false,
- SS, /*FirstQualifierInScope*/ 0,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope*/ 0,
NameInfo,
/* TemplateArgs */ 0);
}
@@ -1629,10 +1676,9 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildParenListExpr(SourceLocation LParenLoc,
- MultiExprArg SubExprs,
- SourceLocation RParenLoc) {
- return getSema().ActOnParenOrParenListExpr(LParenLoc, RParenLoc,
- move(SubExprs));
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, move(SubExprs));
}
/// \brief Build a new address-of-label expression.
@@ -1736,8 +1782,6 @@ public:
default:
llvm_unreachable("Invalid C++ named cast");
}
-
- return ExprError();
}
/// \brief Build a new C++ static_cast expression.
@@ -1878,6 +1922,7 @@ public:
ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
QualType ThisType,
bool isImplicit) {
+ getSema().CheckCXXThisCapture(ThisLoc);
return getSema().Owned(
new (getSema().Context) CXXThisExpr(ThisLoc, ThisType,
isImplicit));
@@ -1928,9 +1973,8 @@ public:
QualType AllocatedType,
TypeSourceInfo *AllocatedTypeInfo,
Expr *ArraySize,
- SourceLocation ConstructorLParen,
- MultiExprArg ConstructorArgs,
- SourceLocation ConstructorRParen) {
+ SourceRange DirectInitRange,
+ Expr *Initializer) {
return getSema().BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
move(PlacementArgs),
@@ -1939,9 +1983,8 @@ public:
AllocatedType,
AllocatedTypeInfo,
ArraySize,
- ConstructorLParen,
- move(ConstructorArgs),
- ConstructorRParen);
+ DirectInitRange,
+ Initializer);
}
/// \brief Build a new C++ "delete" expression.
@@ -1979,6 +2022,17 @@ public:
return getSema().BuildBinaryTypeTrait(Trait, StartLoc, LhsT, RhsT, RParenLoc);
}
+ /// \brief Build a new type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildTypeTrait(TypeTrait Trait,
+ SourceLocation StartLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc);
+ }
+
/// \brief Build a new array type trait expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2009,14 +2063,15 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildDependentScopeDeclRefExpr(
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- if (TemplateArgs)
- return getSema().BuildQualifiedTemplateIdExpr(SS, NameInfo,
- *TemplateArgs);
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc,
+ NameInfo, TemplateArgs);
return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo);
}
@@ -2026,10 +2081,12 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildTemplateIdExpr(const CXXScopeSpec &SS,
- LookupResult &R,
- bool RequiresADL,
- const TemplateArgumentListInfo &TemplateArgs) {
- return getSema().BuildTemplateIdExpr(SS, R, RequiresADL, TemplateArgs);
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return getSema().BuildTemplateIdExpr(SS, TemplateKWLoc, R, RequiresADL,
+ TemplateArgs);
}
/// \brief Build a new object-construction expression.
@@ -2094,6 +2151,7 @@ public:
bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
@@ -2102,7 +2160,8 @@ public:
return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
OperatorLoc, IsArrow,
- SS, FirstQualifierInScope,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
MemberNameInfo,
TemplateArgs);
}
@@ -2111,20 +2170,21 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildUnresolvedMemberExpr(Expr *BaseE,
- QualType BaseType,
- SourceLocation OperatorLoc,
- bool IsArrow,
- NestedNameSpecifierLoc QualifierLoc,
- NamedDecl *FirstQualifierInScope,
- LookupResult &R,
+ ExprResult RebuildUnresolvedMemberExpr(Expr *BaseE, QualType BaseType,
+ SourceLocation OperatorLoc,
+ bool IsArrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs) {
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
OperatorLoc, IsArrow,
- SS, FirstQualifierInScope,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
R, TemplateArgs);
}
@@ -2150,7 +2210,35 @@ public:
OperatorLoc, Pack, PackLoc,
RParenLoc);
}
-
+
+ /// \brief Build a new Objective-C array literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCArrayLiteral(SourceRange Range,
+ Expr **Elements, unsigned NumElements) {
+ return getSema().BuildObjCArrayLiteral(Range,
+ MultiExprArg(Elements, NumElements));
+ }
+
+ ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
+ Expr *Base, Expr *Key,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ return getSema().BuildObjCSubscriptExpression(RB, Base, Key,
+ getterMethod, setterMethod);
+ }
+
+ /// \brief Build a new Objective-C dictionary literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCDictionaryLiteral(SourceRange Range,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements) {
+ return getSema().BuildObjCDictionaryLiteral(Range, Elements, NumElements);
+ }
+
/// \brief Build a new Objective-C @encode expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2215,7 +2303,8 @@ public:
return move(Result);
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
- /*FIXME:*/IvarLoc, IsArrow, SS,
+ /*FIXME:*/IvarLoc, IsArrow,
+ SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
R,
/*TemplateArgs=*/0);
@@ -2226,8 +2315,8 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg,
- ObjCPropertyDecl *Property,
- SourceLocation PropertyLoc) {
+ ObjCPropertyDecl *Property,
+ SourceLocation PropertyLoc) {
CXXScopeSpec SS;
ExprResult Base = getSema().Owned(BaseArg);
LookupResult R(getSema(), Property->getDeclName(), PropertyLoc,
@@ -2244,7 +2333,7 @@ public:
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
/*FIXME:*/PropertyLoc, IsArrow,
- SS,
+ SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
R,
/*TemplateArgs=*/0);
@@ -2286,7 +2375,8 @@ public:
return move(Result);
return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
- /*FIXME:*/IsaLoc, IsArrow, SS,
+ /*FIXME:*/IsaLoc, IsArrow,
+ SS, SourceLocation(),
/*FirstQualifierInScope=*/0,
R,
/*TemplateArgs=*/0);
@@ -2309,7 +2399,8 @@ public:
// Build a reference to the __builtin_shufflevector builtin
FunctionDecl *Builtin = cast<FunctionDecl>(*Lookup.first);
ExprResult Callee
- = SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Builtin, Builtin->getType(),
+ = SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Builtin, false,
+ Builtin->getType(),
VK_LValue, BuiltinLoc));
Callee = SemaRef.UsualUnaryConversions(Callee.take());
if (Callee.isInvalid())
@@ -2625,10 +2716,13 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
return NestedNameSpecifierLoc();
if (TL.getType()->isDependentType() || TL.getType()->isRecordType() ||
- (SemaRef.getLangOptions().CPlusPlus0x &&
+ (SemaRef.getLangOpts().CPlusPlus0x &&
TL.getType()->isEnumeralType())) {
assert(!TL.getType().hasLocalQualifiers() &&
"Can't get cv-qualifiers here");
+ if (TL.getType()->isEnumeralType())
+ SemaRef.Diag(TL.getBeginLoc(),
+ diag::warn_cxx98_compat_enum_nested_name_spec);
SS.Extend(SemaRef.Context, /*FIXME:*/SourceLocation(), TL,
Q.getLocalEndLoc());
break;
@@ -2797,7 +2891,6 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
// These should be getting filtered out before they reach the AST.
llvm_unreachable("overloaded function decl survived to here");
- return TemplateName();
}
template<typename Derived>
@@ -2884,8 +2977,9 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
Expr *SourceExpr = Input.getSourceDeclExpression();
if (SourceExpr) {
EnterExpressionEvaluationContext Unevaluated(getSema(),
- Sema::Unevaluated);
+ Sema::ConstantEvaluated);
ExprResult E = getDerived().TransformExpr(SourceExpr);
+ E = SemaRef.ActOnConstantExpression(E);
SourceExpr = (E.isInvalid() ? 0 : E.take());
}
@@ -2918,14 +3012,15 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
llvm_unreachable("Caller should expand pack expansions");
case TemplateArgument::Expression: {
- // Template argument expressions are not potentially evaluated.
+ // Template argument expressions are constant expressions.
EnterExpressionEvaluationContext Unevaluated(getSema(),
- Sema::Unevaluated);
+ Sema::ConstantEvaluated);
Expr *InputExpr = Input.getSourceExpression();
if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
ExprResult E = getDerived().TransformExpr(InputExpr);
+ E = SemaRef.ActOnConstantExpression(E);
if (E.isInvalid()) return true;
Output = TemplateArgumentLoc(TemplateArgument(E.take()), E.take());
return false;
@@ -3168,6 +3263,9 @@ QualType TreeTransform<Derived>::TransformType(QualType T) {
template<typename Derived>
TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI) {
+ // Refine the base location to the type's location.
+ TemporaryBase Rebase(*this, DI->getTypeLoc().getBeginLoc(),
+ getDerived().getBaseEntity());
if (getDerived().AlreadyTransformed(DI->getType()))
return DI;
@@ -3195,7 +3293,6 @@ TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T) {
}
llvm_unreachable("unhandled type loc!");
- return QualType();
}
/// FIXME: By default, this routine adds type qualifiers only to types
@@ -3294,7 +3391,7 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
TemplateName Template
= getDerived().RebuildTemplateName(SS,
*SpecTL.getTypePtr()->getIdentifier(),
- SpecTL.getNameLoc(),
+ SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
if (Template.isNull())
return TypeLoc();
@@ -3351,7 +3448,7 @@ TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
TemplateName Template
= getDerived().RebuildTemplateName(SS,
*SpecTL.getTypePtr()->getIdentifier(),
- SpecTL.getNameLoc(),
+ SpecTL.getTemplateNameLoc(),
ObjectType, UnqualLookup);
if (Template.isNull())
return 0;
@@ -3574,15 +3671,21 @@ TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
- ConstantArrayTypeLoc NewTL = TLB.push<ConstantArrayTypeLoc>(Result);
+
+ // We might have either a ConstantArrayType or a VariableArrayType now:
+ // a ConstantArrayType is allowed to have an element type which is a
+ // VariableArrayType if the type is dependent. Fortunately, all array
+ // types have the same location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
NewTL.setLBracketLoc(TL.getLBracketLoc());
NewTL.setRBracketLoc(TL.getRBracketLoc());
Expr *Size = TL.getSizeExpr();
if (Size) {
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
Size = getDerived().TransformExpr(Size).template takeAs<Expr>();
+ Size = SemaRef.ActOnConstantExpression(Size).take();
}
NewTL.setSizeExpr(Size);
@@ -3626,9 +3729,6 @@ TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
if (ElementType.isNull())
return QualType();
- // Array bounds are not potentially evaluated contexts
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
-
ExprResult SizeResult
= getDerived().TransformExpr(T->getSizeExpr());
if (SizeResult.isInvalid())
@@ -3666,8 +3766,9 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
if (ElementType.isNull())
return QualType();
- // Array bounds are not potentially evaluated contexts
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ // Array bounds are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
// Prefer the expression from the TypeLoc; the other may have been uniqued.
Expr *origSize = TL.getSizeExpr();
@@ -3675,6 +3776,7 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
ExprResult sizeResult
= getDerived().TransformExpr(origSize);
+ sizeResult = SemaRef.ActOnConstantExpression(sizeResult);
if (sizeResult.isInvalid())
return QualType();
@@ -3714,10 +3816,12 @@ QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
if (ElementType.isNull())
return QualType();
- // Vector sizes are not potentially evaluated contexts
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ // Vector sizes are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
ExprResult Size = getDerived().TransformExpr(T->getSizeExpr());
+ Size = SemaRef.ActOnConstantExpression(Size);
if (Size.isInvalid())
return QualType();
@@ -3796,12 +3900,14 @@ template<typename Derived>
ParmVarDecl *
TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
- llvm::Optional<unsigned> NumExpansions) {
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = 0;
if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) {
// If we're substituting into a pack expansion type and we know the
+ // length we want to expand to, just substitute for the pattern.
TypeLoc OldTL = OldDI->getTypeLoc();
PackExpansionTypeLoc OldExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
@@ -3898,7 +4004,8 @@ bool TreeTransform<Derived>::
ParmVarDecl *NewParm
= getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment++,
- OrigNumExpansions);
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
if (!NewParm)
return true;
@@ -3914,7 +4021,8 @@ bool TreeTransform<Derived>::
ParmVarDecl *NewParm
= getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment++,
- OrigNumExpansions);
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
if (!NewParm)
return true;
@@ -3938,11 +4046,13 @@ bool TreeTransform<Derived>::
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
NewParm = getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment,
- NumExpansions);
+ NumExpansions,
+ /*ExpectParameterPack=*/true);
} else {
NewParm = getDerived().TransformFunctionTypeParam(OldParm,
indexAdjustment,
- llvm::Optional<unsigned>());
+ llvm::Optional<unsigned>(),
+ /*ExpectParameterPack=*/false);
}
if (!NewParm)
@@ -4096,6 +4206,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
ParamTypes.data(),
ParamTypes.size(),
T->isVariadic(),
+ T->hasTrailingReturn(),
T->getTypeQuals(),
T->getRefQualifier(),
T->getExtInfo());
@@ -4192,6 +4303,10 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
if (E.isInvalid())
return QualType();
+ E = SemaRef.HandleExprEvaluationContextForTypeof(E.get());
+ if (E.isInvalid())
+ return QualType();
+
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != TL.getUnderlyingExpr()) {
@@ -4239,12 +4354,17 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
const DecltypeType *T = TL.getTypePtr();
// decltype expressions are not potentially evaluated contexts
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, 0,
+ /*IsDecltype=*/ true);
ExprResult E = getDerived().TransformExpr(T->getUnderlyingExpr());
if (E.isInvalid())
return QualType();
+ E = getSema().ActOnDecltypeExpression(E.take());
+ if (E.isInvalid())
+ return QualType();
+
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != T->getUnderlyingExpr()) {
@@ -4549,9 +4669,10 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
if (isa<DependentTemplateSpecializationType>(Result)) {
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getTemplateNameLoc());
+ NewTL.setElaboratedKeywordLoc(SourceLocation());
NewTL.setQualifierLoc(NestedNameSpecifierLoc());
- NewTL.setNameLoc(TL.getTemplateNameLoc());
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
NewTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
@@ -4561,6 +4682,7 @@ QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
TemplateSpecializationTypeLoc NewTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
NewTL.setRAngleLoc(TL.getRAngleLoc());
@@ -4599,10 +4721,10 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
-
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
- NewTL.setNameLoc(TL.getNameLoc());
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
NewTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
@@ -4612,14 +4734,15 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
QualType Result
= getDerived().RebuildTemplateSpecializationType(Template,
- TL.getNameLoc(),
+ TL.getTemplateNameLoc(),
NewTemplateArgs);
if (!Result.isNull()) {
/// FIXME: Wrap this in an elaborated-type-specifier?
TemplateSpecializationTypeLoc NewTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
- NewTL.setTemplateNameLoc(TL.getNameLoc());
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NewTL.setLAngleLoc(TL.getLAngleLoc());
NewTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
@@ -4669,7 +4792,7 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
if (getDerived().AlwaysRebuild() ||
QualifierLoc != TL.getQualifierLoc() ||
NamedT != T->getNamedType()) {
- Result = getDerived().RebuildElaboratedType(TL.getKeywordLoc(),
+ Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
T->getKeyword(),
QualifierLoc, NamedT);
if (Result.isNull())
@@ -4677,7 +4800,7 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
}
ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
return Result;
}
@@ -4753,7 +4876,7 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
QualType Result
= getDerived().RebuildDependentNameType(T->getKeyword(),
- TL.getKeywordLoc(),
+ TL.getElaboratedKeywordLoc(),
QualifierLoc,
T->getIdentifier(),
TL.getNameLoc());
@@ -4765,11 +4888,11 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
} else {
DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
NewTL.setNameLoc(TL.getNameLoc());
}
@@ -4814,7 +4937,7 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
= getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(),
QualifierLoc,
T->getIdentifier(),
- TL.getNameLoc(),
+ TL.getTemplateNameLoc(),
NewTemplateArgs);
if (Result.isNull())
return QualType();
@@ -4825,7 +4948,8 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
// Copy information relevant to the template specialization.
TemplateSpecializationTypeLoc NamedTL
= TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- NamedTL.setTemplateNameLoc(TL.getNameLoc());
+ NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc());
NamedTL.setLAngleLoc(TL.getLAngleLoc());
NamedTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
@@ -4833,14 +4957,15 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
// Copy information relevant to the elaborated type.
ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
NewTL.setQualifierLoc(QualifierLoc);
} else if (isa<DependentTemplateSpecializationType>(Result)) {
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
- SpecTL.setKeywordLoc(TL.getKeywordLoc());
+ SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
SpecTL.setQualifierLoc(QualifierLoc);
- SpecTL.setNameLoc(TL.getNameLoc());
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
SpecTL.setLAngleLoc(TL.getLAngleLoc());
SpecTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
@@ -4848,7 +4973,8 @@ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
} else {
TemplateSpecializationTypeLoc SpecTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
- SpecTL.setTemplateNameLoc(TL.getNameLoc());
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
SpecTL.setLAngleLoc(TL.getLAngleLoc());
SpecTL.setRAngleLoc(TL.getRAngleLoc());
for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
@@ -4927,6 +5053,8 @@ template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool IsStmtExpr) {
+ Sema::CompoundScopeRAII CompoundScope(getSema());
+
bool SubStmtInvalid = false;
bool SubStmtChanged = false;
ASTOwningVector<Stmt*> Statements(getSema());
@@ -4966,16 +5094,18 @@ StmtResult
TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
ExprResult LHS, RHS;
{
- // The case value expressions are not potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
// Transform the left-hand case value.
LHS = getDerived().TransformExpr(S->getLHS());
+ LHS = SemaRef.ActOnConstantExpression(LHS);
if (LHS.isInvalid())
return StmtError();
// Transform the right-hand case value (for the GNU case-range extension).
RHS = getDerived().TransformExpr(S->getRHS());
+ RHS = SemaRef.ActOnConstantExpression(RHS);
if (RHS.isInvalid())
return StmtError();
}
@@ -5284,6 +5414,7 @@ TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) {
ExprResult Target = getDerived().TransformExpr(S->getTarget());
if (Target.isInvalid())
return StmtError();
+ Target = SemaRef.MaybeCreateExprWithCleanups(Target.take());
if (!getDerived().AlwaysRebuild() &&
Target.get() == S->getTarget())
@@ -5696,10 +5827,18 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
ExprResult Cond = getDerived().TransformExpr(S->getCond());
if (Cond.isInvalid())
return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.CheckBooleanCondition(Cond.take(), S->getColonLoc());
+ if (Cond.isInvalid())
+ return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.MaybeCreateExprWithCleanups(Cond.take());
ExprResult Inc = getDerived().TransformExpr(S->getInc());
if (Inc.isInvalid())
return StmtError();
+ if (Inc.get())
+ Inc = SemaRef.MaybeCreateExprWithCleanups(Inc.take());
StmtResult LoopVar = getDerived().TransformStmt(S->getLoopVarStmt());
if (LoopVar.isInvalid())
@@ -5739,6 +5878,75 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
template<typename Derived>
StmtResult
+TreeTransform<Derived>::TransformMSDependentExistsStmt(
+ MSDependentExistsStmt *S) {
+ // Transform the nested-name-specifier, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+ if (S->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(S->getQualifierLoc());
+ if (!QualifierLoc)
+ return StmtError();
+ }
+
+ // Transform the declaration name.
+ DeclarationNameInfo NameInfo = S->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return StmtError();
+ }
+
+ // Check whether anything changed.
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == S->getQualifierLoc() &&
+ NameInfo.getName() == S->getNameInfo().getName())
+ return S;
+
+ // Determine whether this name exists, if we can.
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ bool Dependent = false;
+ switch (getSema().CheckMicrosoftIfExistsSymbol(/*S=*/0, SS, NameInfo)) {
+ case Sema::IER_Exists:
+ if (S->isIfExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_DoesNotExist:
+ if (S->isIfNotExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_Dependent:
+ Dependent = true;
+ break;
+
+ case Sema::IER_Error:
+ return StmtError();
+ }
+
+ // We need to continue with the instantiation, so do so now.
+ StmtResult SubStmt = getDerived().TransformCompoundStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // If we have resolved the name, just transform to the substatement.
+ if (!Dependent)
+ return SubStmt;
+
+ // The name is still dependent, so build a dependent expression again.
+ return getDerived().RebuildMSDependentExistsStmt(S->getKeywordLoc(),
+ S->isIfExists(),
+ QualifierLoc,
+ NameInfo,
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) {
StmtResult TryBlock; // = getDerived().TransformCompoundStmt(S->getTryBlock());
if(TryBlock.isInvalid()) return StmtError();
@@ -5829,7 +6037,7 @@ TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
- SemaRef.MarkDeclarationReferenced(E->getLocation(), ND);
+ SemaRef.MarkDeclRefReferenced(E);
return SemaRef.Owned(E);
}
@@ -5881,6 +6089,12 @@ TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
ExprResult ControllingExpr =
getDerived().TransformExpr(E->getControllingExpr());
@@ -6020,6 +6234,28 @@ TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
+ // Rebuild the syntactic form. The original syntactic form has
+ // opaque-value expressions in it, so strip those away and rebuild
+ // the result. This is a really awful way of doing this, but the
+ // better solution (rebuilding the semantic expressions and
+ // rebinding OVEs as necessary) doesn't work; we'd need
+ // TreeTransform to not strip away implicit conversions.
+ Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ ExprResult result = getDerived().TransformExpr(newSyntacticForm);
+ if (result.isInvalid()) return ExprError();
+
+ // If that gives us a pseudo-object result back, the pseudo-object
+ // expression must have been an lvalue-to-rvalue conversion which we
+ // should reapply.
+ if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
+ result = SemaRef.checkPseudoObjectRValue(result.take());
+
+ return result;
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTraitExpr *E) {
if (E->isArgumentType()) {
@@ -6037,20 +6273,17 @@ TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr(
E->getSourceRange());
}
- ExprResult SubExpr;
- {
- // C++0x [expr.sizeof]p1:
- // The operand is either an expression, which is an unevaluated operand
- // [...]
- EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ // C++0x [expr.sizeof]p1:
+ // The operand is either an expression, which is an unevaluated operand
+ // [...]
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
- SubExpr = getDerived().TransformExpr(E->getArgumentExpr());
- if (SubExpr.isInvalid())
- return ExprError();
+ ExprResult SubExpr = getDerived().TransformExpr(E->getArgumentExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
- if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr())
- return SemaRef.Owned(E);
- }
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr())
+ return SemaRef.Owned(E);
return getDerived().RebuildUnaryExprOrTypeTrait(SubExpr.get(),
E->getOperatorLoc(),
@@ -6099,7 +6332,7 @@ TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Callee.get() == E->getCallee() &&
!ArgChanged)
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);;
// FIXME: Wrong source location information for the '('.
SourceLocation FakeLParenLoc
@@ -6124,6 +6357,7 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
if (!QualifierLoc)
return ExprError();
}
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
ValueDecl *Member
= cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getMemberLoc(),
@@ -6150,7 +6384,8 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
// Mark it referenced in the new context regardless.
// FIXME: this is a bit instantiation-specific.
- SemaRef.MarkDeclarationReferenced(E->getMemberLoc(), Member);
+ SemaRef.MarkMemberReferenced(E);
+
return SemaRef.Owned(E);
}
@@ -6177,6 +6412,7 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
return getDerived().RebuildMemberExpr(Base.get(), FakeOperatorLoc,
E->isArrow(),
QualifierLoc,
+ TemplateKWLoc,
E->getMemberNameInfo(),
Member,
FoundDecl,
@@ -6312,7 +6548,7 @@ TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) {
if (!getDerived().AlwaysRebuild() &&
OldT == NewT &&
Init.get() == E->getInitializer())
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);
// Note: the expression type doesn't necessarily match the
// type-as-written, but that's okay, because it should always be
@@ -6500,14 +6736,20 @@ TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
+ SemaRef.ActOnStartStmtExpr();
StmtResult SubStmt
= getDerived().TransformCompoundStmt(E->getSubStmt(), true);
- if (SubStmt.isInvalid())
+ if (SubStmt.isInvalid()) {
+ SemaRef.ActOnStmtExprError();
return ExprError();
+ }
if (!getDerived().AlwaysRebuild() &&
- SubStmt.get() == E->getSubStmt())
- return SemaRef.Owned(E);
+ SubStmt.get() == E->getSubStmt()) {
+ // Calling this an 'error' is unintuitive, but it does the right thing.
+ SemaRef.ActOnStmtExprError();
+ return SemaRef.MaybeBindToTemporary(E);
+ }
return getDerived().RebuildStmtExpr(E->getLParenLoc(),
SubStmt.get(),
@@ -6555,7 +6797,6 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
case OO_Array_New:
case OO_Array_Delete:
llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
- return ExprError();
case OO_Call: {
// This is a call to an object's operator().
@@ -6592,12 +6833,10 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
case OO_Conditional:
llvm_unreachable("conditional operator is not actually overloadable");
- return ExprError();
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("not an overloaded operator?");
- return ExprError();
}
ExprResult Callee = getDerived().TransformExpr(E->getCallee());
@@ -6619,7 +6858,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Callee.get() == E->getCallee() &&
First.get() == E->getArg(0) &&
(E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);
return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
E->getOperatorLoc(),
@@ -6657,7 +6896,7 @@ TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
if (!getDerived().AlwaysRebuild() &&
Callee.get() == E->getCallee() &&
!ArgChanged)
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);
// FIXME: Wrong source location information for the '('.
SourceLocation FakeLParenLoc
@@ -6769,11 +7008,11 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
E->getLocEnd());
}
- // We don't know whether the expression is potentially evaluated until
- // after we perform semantic analysis, so the expression is potentially
+ // We don't know whether the subexpression is potentially evaluated until
+ // after we perform semantic analysis. We speculatively assume it is
+ // unevaluated; it will get fixed later if the subexpression is in fact
// potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(SemaRef,
- Sema::PotentiallyPotentiallyEvaluated);
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
if (SubExpr.isInvalid())
@@ -6808,9 +7047,6 @@ TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
E->getLocEnd());
}
- // We don't know whether the expression is potentially evaluated until
- // after we perform semantic analysis, so the expression is potentially
- // potentially evaluated.
EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
@@ -6851,9 +7087,12 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
T = getSema().Context.getPointerType(
getSema().Context.getRecordType(cast<CXXRecordDecl>(DC)));
- if (!getDerived().AlwaysRebuild() && T == E->getType())
+ if (!getDerived().AlwaysRebuild() && T == E->getType()) {
+ // Make sure that we capture 'this'.
+ getSema().CheckCXXThisCapture(E->getLocStart());
return SemaRef.Owned(E);
-
+ }
+
return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
}
@@ -6925,24 +7164,17 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (getDerived().TransformExprs(E->getPlacementArgs(),
E->getNumPlacementArgs(), true,
PlacementArgs, &ArgumentChanged))
- return ExprError();
-
- // transform the constructor arguments (if any).
- ASTOwningVector<Expr*> ConstructorArgs(SemaRef);
- if (TransformExprs(E->getConstructorArgs(), E->getNumConstructorArgs(), true,
- ConstructorArgs, &ArgumentChanged))
- return ExprError();
-
- // Transform constructor, new operator, and delete operator.
- CXXConstructorDecl *Constructor = 0;
- if (E->getConstructor()) {
- Constructor = cast_or_null<CXXConstructorDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getConstructor()));
- if (!Constructor)
- return ExprError();
- }
+ return ExprError();
+ // Transform the initializer (if any).
+ Expr *OldInit = E->getInitializer();
+ ExprResult NewInit;
+ if (OldInit)
+ NewInit = getDerived().TransformExpr(OldInit);
+ if (NewInit.isInvalid())
+ return ExprError();
+
+ // Transform new operator and delete operator.
FunctionDecl *OperatorNew = 0;
if (E->getOperatorNew()) {
OperatorNew = cast_or_null<FunctionDecl>(
@@ -6964,31 +7196,28 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (!getDerived().AlwaysRebuild() &&
AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
ArraySize.get() == E->getArraySize() &&
- Constructor == E->getConstructor() &&
+ NewInit.get() == OldInit &&
OperatorNew == E->getOperatorNew() &&
OperatorDelete == E->getOperatorDelete() &&
!ArgumentChanged) {
// Mark any declarations we need as referenced.
// FIXME: instantiation-specific.
- if (Constructor)
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), Constructor);
if (OperatorNew)
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorNew);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew);
if (OperatorDelete)
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorDelete);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
- if (E->isArray() && Constructor &&
- !E->getAllocatedType()->isDependentType()) {
+ if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
QualType ElementType
= SemaRef.Context.getBaseElementType(E->getAllocatedType());
if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), Destructor);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Destructor);
}
}
}
-
+
return SemaRef.Owned(E);
}
@@ -7018,7 +7247,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
}
}
}
-
+
return getDerived().RebuildCXXNewExpr(E->getLocStart(),
E->isGlobalNew(),
/*FIXME:*/E->getLocStart(),
@@ -7028,13 +7257,8 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
AllocType,
AllocTypeInfo,
ArraySize.get(),
- /*FIXME:*/E->hasInitializer()
- ? E->getLocStart()
- : SourceLocation(),
- move_arg(ConstructorArgs),
- /*FIXME:*/E->hasInitializer()
- ? E->getLocEnd()
- : SourceLocation());
+ E->getDirectInitRange(),
+ NewInit.take());
}
template<typename Derived>
@@ -7060,15 +7284,15 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
// Mark any declarations we need as referenced.
// FIXME: instantiation-specific.
if (OperatorDelete)
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), OperatorDelete);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
if (!E->getArgument()->isTypeDependent()) {
QualType Destroyed = SemaRef.Context.getBaseElementType(
E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- SemaRef.MarkDeclarationReferenced(E->getLocStart(),
- SemaRef.LookupDestructor(Record));
+ SemaRef.MarkFunctionReferenced(E->getLocStart(),
+ SemaRef.LookupDestructor(Record));
}
}
@@ -7118,7 +7342,7 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
if (!DestroyedTypeInfo)
return ExprError();
Destroyed = DestroyedTypeInfo;
- } else if (ObjectType->isDependentType()) {
+ } else if (!ObjectType.isNull() && ObjectType->isDependentType()) {
// We aren't likely to be able to resolve the identifier down to a type
// now anyway, so just retain the identifier.
Destroyed = PseudoDestructorTypeStorage(E->getDestroyedTypeIdentifier(),
@@ -7216,8 +7440,11 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
R.setNamingClass(NamingClass);
}
- // If we have no template arguments, it's a normal declaration name.
- if (!Old->hasExplicitTemplateArgs())
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
+ // If we have neither explicit template arguments, nor the template keyword,
+ // it's a normal declaration name.
+ if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid())
return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
// If we have template arguments, rebuild them, then rebuild the
@@ -7228,8 +7455,8 @@ TreeTransform<Derived>::TransformUnresolvedLookupExpr(
TransArgs))
return ExprError();
- return getDerived().RebuildTemplateIdExpr(SS, R, Old->requiresADL(),
- TransArgs);
+ return getDerived().RebuildTemplateIdExpr(SS, TemplateKWLoc, R,
+ Old->requiresADL(), &TransArgs);
}
template<typename Derived>
@@ -7272,6 +7499,128 @@ TreeTransform<Derived>::TransformBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
+ bool ArgChanged = false;
+ llvm::SmallVector<TypeSourceInfo *, 4> Args;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ TypeSourceInfo *From = E->getArg(I);
+ TypeLoc FromTL = From->getTypeLoc();
+ if (!isa<PackExpansionTypeLoc>(FromTL)) {
+ TypeLocBuilder TLB;
+ TLB.reserve(FromTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, FromTL);
+ if (To.isNull())
+ return ExprError();
+
+ if (To == From->getType())
+ Args.push_back(From);
+ else {
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ ArgChanged = true;
+ }
+ continue;
+ }
+
+ ArgChanged = true;
+
+ // We have a pack expansion. Instantiate it.
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(FromTL);
+ TypeLoc PatternTL = ExpansionTL.getPatternLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PatternTL, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = ExpansionTL.getTypePtr()->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ PatternTL.getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ continue;
+ }
+
+ // Expand the pack expansion by substituting for each argument in the
+ // pack(s).
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeLocBuilder TLB;
+ TLB.reserve(PatternTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!RetainExpansion)
+ continue;
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ Args,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
if (!T)
@@ -7325,6 +7674,7 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
= getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
if (!QualifierLoc)
return ExprError();
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
// TODO: If this is a conversion-function-id, verify that the
// destination type name (if present) resolves the same way after
@@ -7344,6 +7694,7 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
return SemaRef.Owned(E);
return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
+ TemplateKWLoc,
NameInfo,
/*TemplateArgs*/ 0);
}
@@ -7355,6 +7706,7 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
return ExprError();
return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
+ TemplateKWLoc,
NameInfo,
&TransArgs);
}
@@ -7393,7 +7745,7 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
!ArgumentChanged) {
// Mark the constructor as referenced.
// FIXME: Instantiation-specific
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), Constructor);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
return SemaRef.Owned(E);
}
@@ -7454,7 +7806,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
Constructor == E->getConstructor() &&
!ArgumentChanged) {
// FIXME: Instantiation-specific
- SemaRef.MarkDeclarationReferenced(E->getLocStart(), Constructor);
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
return SemaRef.MaybeBindToTemporary(E);
}
@@ -7466,6 +7818,158 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
+ // Create the local class that will describe the lambda.
+ CXXRecordDecl *Class
+ = getSema().createLambdaClosureType(E->getIntroducerRange(),
+ /*KnownDependent=*/false);
+ getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
+
+ // Transform the type of the lambda parameters and start the definition of
+ // the lambda itself.
+ TypeSourceInfo *MethodTy
+ = TransformType(E->getCallOperator()->getTypeSourceInfo());
+ if (!MethodTy)
+ return ExprError();
+
+ // Transform lambda parameters.
+ bool Invalid = false;
+ llvm::SmallVector<QualType, 4> ParamTypes;
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ if (getDerived().TransformFunctionTypeParams(E->getLocStart(),
+ E->getCallOperator()->param_begin(),
+ E->getCallOperator()->param_size(),
+ 0, ParamTypes, &Params))
+ Invalid = true;
+
+ // Build the call operator.
+ // Note: Once a lambda mangling number and context declaration have been
+ // assigned, they never change.
+ unsigned ManglingNumber = E->getLambdaClass()->getLambdaManglingNumber();
+ Decl *ContextDecl = E->getLambdaClass()->getLambdaContextDecl();
+ CXXMethodDecl *CallOperator
+ = getSema().startLambdaDefinition(Class, E->getIntroducerRange(),
+ MethodTy,
+ E->getCallOperator()->getLocEnd(),
+ Params, ManglingNumber, ContextDecl);
+ getDerived().transformAttrs(E->getCallOperator(), CallOperator);
+
+ // FIXME: Instantiation-specific.
+ CallOperator->setInstantiationOfMemberFunction(E->getCallOperator(),
+ TSK_ImplicitInstantiation);
+
+ // Introduce the context of the call operator.
+ Sema::ContextRAII SavedContext(getSema(), CallOperator);
+
+ // Enter the scope of the lambda.
+ sema::LambdaScopeInfo *LSI
+ = getSema().enterLambdaScope(CallOperator, E->getIntroducerRange(),
+ E->getCaptureDefault(),
+ E->hasExplicitParameters(),
+ E->hasExplicitResultType(),
+ E->isMutable());
+
+ // Transform captures.
+ bool FinishedExplicitCaptures = false;
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
+ // When we hit the first implicit capture, tell Sema that we've finished
+ // the list of explicit captures.
+ if (!FinishedExplicitCaptures && C->isImplicit()) {
+ getSema().finishLambdaExplicitCaptures(LSI);
+ FinishedExplicitCaptures = true;
+ }
+
+ // Capturing 'this' is trivial.
+ if (C->capturesThis()) {
+ getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit());
+ continue;
+ }
+
+ // Determine the capture kind for Sema.
+ Sema::TryCaptureKind Kind
+ = C->isImplicit()? Sema::TryCapture_Implicit
+ : C->getCaptureKind() == LCK_ByCopy
+ ? Sema::TryCapture_ExplicitByVal
+ : Sema::TryCapture_ExplicitByRef;
+ SourceLocation EllipsisLoc;
+ if (C->isPackExpansion()) {
+ UnexpandedParameterPack Unexpanded(C->getCapturedVar(), C->getLocation());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
+ C->getLocation(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (ShouldExpand) {
+ // The transform has determined that we should perform an expansion;
+ // transform and capture each of the arguments.
+ // expansion of the pattern. Do so.
+ VarDecl *Pack = C->getCapturedVar();
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ Pack));
+ if (!CapturedVar) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
+ continue;
+ }
+
+ EllipsisLoc = C->getEllipsisLoc();
+ }
+
+ // Transform the captured variable.
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ C->getCapturedVar()));
+ if (!CapturedVar) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
+ if (!FinishedExplicitCaptures)
+ getSema().finishLambdaExplicitCaptures(LSI);
+
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ if (Invalid) {
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ /*IsInstantiation=*/true);
+ return ExprError();
+ }
+
+ // Instantiate the body of the lambda expression.
+ StmtResult Body = getDerived().TransformStmt(E->getBody());
+ if (Body.isInvalid()) {
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ /*IsInstantiation=*/true);
+ return ExprError();
+ }
+
+ return getSema().ActOnLambdaExpr(E->getLocStart(), Body.take(),
+ /*CurScope=*/0, /*IsInstantiation=*/true);
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *E) {
TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
@@ -7542,6 +8046,8 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
return ExprError();
}
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
// TODO: If this is a conversion-function-id, verify that the
// destination type name (if present) resolves the same way after
// instantiation as it did in the local scope.
@@ -7567,6 +8073,7 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
E->isArrow(),
E->getOperatorLoc(),
QualifierLoc,
+ TemplateKWLoc,
FirstQualifierInScope,
NameInfo,
/*TemplateArgs*/ 0);
@@ -7583,6 +8090,7 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
E->isArrow(),
E->getOperatorLoc(),
QualifierLoc,
+ TemplateKWLoc,
FirstQualifierInScope,
NameInfo,
&TransArgs);
@@ -7598,7 +8106,11 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
Base = getDerived().TransformExpr(Old->getBase());
if (Base.isInvalid())
return ExprError();
- BaseType = ((Expr*) Base.get())->getType();
+ Base = getSema().PerformMemberExprBaseConversion(Base.take(),
+ Old->isArrow());
+ if (Base.isInvalid())
+ return ExprError();
+ BaseType = Base.get()->getType();
} else {
BaseType = getDerived().TransformType(Old->getBaseType());
}
@@ -7611,6 +8123,8 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
return ExprError();
}
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
LookupResult R(SemaRef, Old->getMemberNameInfo(),
Sema::LookupOrdinaryName);
@@ -7678,6 +8192,7 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
Old->getOperatorLoc(),
Old->isArrow(),
QualifierLoc,
+ TemplateKWLoc,
FirstQualifierInScope,
R,
(Old->hasExplicitTemplateArgs()
@@ -7778,11 +8293,163 @@ TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
return SemaRef.Owned(E);
}
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) {
+ // Transform each of the elements.
+ llvm::SmallVector<Expr *, 8> Elements;
+ bool ArgChanged = false;
+ if (getDerived().TransformExprs(E->getElements(), E->getNumElements(),
+ /*IsCall=*/false, Elements, &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCArrayLiteral(E->getSourceRange(),
+ Elements.data(),
+ Elements.size());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCDictionaryLiteral(
+ ObjCDictionaryLiteral *E) {
+ // Transform each of the elements.
+ llvm::SmallVector<ObjCDictionaryElement, 8> Elements;
+ bool ArgChanged = false;
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement OrigElement = E->getKeyValueElement(I);
+
+ if (OrigElement.isPackExpansion()) {
+ // This key/value element is a pack expansion.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(OrigElement.Key, Unexpanded);
+ getSema().collectUnexpandedParameterPacks(OrigElement.Value, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can
+ // and should be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ SourceRange PatternRange(OrigElement.Key->getLocStart(),
+ OrigElement.Value->getLocEnd());
+ if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
+ PatternRange,
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Expansion = {
+ Key.get(), Value.get(), OrigElement.EllipsisLoc, NumExpansions
+ };
+ Elements.push_back(Expansion);
+ continue;
+ }
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ ArgChanged = true;
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), NumExpansions
+ };
+
+ // If any unexpanded parameter packs remain, we still have a
+ // pack expansion.
+ if (Key.get()->containsUnexpandedParameterPack() ||
+ Value.get()->containsUnexpandedParameterPack())
+ Element.EllipsisLoc = OrigElement.EllipsisLoc;
+
+ Elements.push_back(Element);
+ }
+
+ // We've finished with this pack expansion.
+ continue;
+ }
+
+ // Transform and check key.
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ // Transform and check value.
+ ExprResult Value
+ = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), llvm::Optional<unsigned>()
+ };
+ Elements.push_back(Element);
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCDictionaryLiteral(E->getSourceRange(),
+ Elements.data(),
+ Elements.size());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) {
TypeSourceInfo *EncodedTypeInfo
= getDerived().TransformType(E->getEncodedTypeSourceInfo());
@@ -7856,7 +8523,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// If nothing changed, just retain the existing message send.
if (!getDerived().AlwaysRebuild() &&
ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged)
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);
// Build a new class message send.
SmallVector<SourceLocation, 16> SelLocs;
@@ -7881,7 +8548,7 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// If nothing changed, just retain the existing message send.
if (!getDerived().AlwaysRebuild() &&
Receiver.get() == E->getInstanceReceiver() && !ArgChanged)
- return SemaRef.Owned(E);
+ return SemaRef.MaybeBindToTemporary(E);
// Build a new instance message send.
SmallVector<SourceLocation, 16> SelLocs;
@@ -7953,7 +8620,7 @@ TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
E->getLocation());
return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
- E->getType(),
+ SemaRef.Context.PseudoObjectTy,
E->getImplicitPropertyGetter(),
E->getImplicitPropertySetter(),
E->getLocation());
@@ -7961,6 +8628,30 @@ TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBaseExpr());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // Transform the key expression.
+ ExprResult Key = getDerived().TransformExpr(E->getKeyExpr());
+ if (Key.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Key.get() == E->getKeyExpr() && Base.get() == E->getBaseExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(),
+ Base.get(), Key.get(),
+ E->getAtIndexMethodDecl(),
+ E->setAtIndexMethodDecl());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) {
// Transform the base expression.
ExprResult Base = getDerived().TransformExpr(E->getBase());
@@ -8004,10 +8695,8 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
BlockScopeInfo *blockScope = SemaRef.getCurBlock();
blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic());
- // We built a new blockScopeInfo in call to ActOnBlockStart
- // in above, CapturesCXXThis need be set here from the block
- // expression.
- blockScope->CapturesCXXThis = oldBlock->capturesCXXThis();
+ blockScope->TheDecl->setBlockMissingReturnType(
+ oldBlock->blockMissingReturnType());
SmallVector<ParmVarDecl*, 4> params;
SmallVector<QualType, 4> paramTypes;
@@ -8016,54 +8705,48 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(),
oldBlock->param_begin(),
oldBlock->param_size(),
- 0, paramTypes, &params))
- return true;
+ 0, paramTypes, &params)) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
+ return ExprError();
+ }
const FunctionType *exprFunctionType = E->getFunctionType();
- QualType exprResultType = exprFunctionType->getResultType();
- if (!exprResultType.isNull()) {
- if (!exprResultType->isDependentType())
- blockScope->ReturnType = exprResultType;
- else if (exprResultType != getSema().Context.DependentTy)
- blockScope->ReturnType = getDerived().TransformType(exprResultType);
- }
-
- // If the return type has not been determined yet, leave it as a dependent
- // type; it'll get set when we process the body.
- if (blockScope->ReturnType.isNull())
- blockScope->ReturnType = getSema().Context.DependentTy;
+ QualType exprResultType =
+ getDerived().TransformType(exprFunctionType->getResultType());
// Don't allow returning a objc interface by value.
- if (blockScope->ReturnType->isObjCObjectType()) {
+ if (exprResultType->isObjCObjectType()) {
getSema().Diag(E->getCaretLocation(),
diag::err_object_cannot_be_passed_returned_by_value)
- << 0 << blockScope->ReturnType;
+ << 0 << exprResultType;
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
return ExprError();
}
QualType functionType = getDerived().RebuildFunctionProtoType(
- blockScope->ReturnType,
+ exprResultType,
paramTypes.data(),
paramTypes.size(),
oldBlock->isVariadic(),
- 0, RQ_None,
+ false, 0, RQ_None,
exprFunctionType->getExtInfo());
blockScope->FunctionType = functionType;
// Set the parameters on the block decl.
if (!params.empty())
blockScope->TheDecl->setParams(params);
-
- // If the return type wasn't explicitly set, it will have been marked as a
- // dependent type (DependentTy); clear out the return type setting so
- // we will deduce the return type when type-checking the block's body.
- if (blockScope->ReturnType == getSema().Context.DependentTy)
- blockScope->ReturnType = QualType();
+
+ if (!oldBlock->blockMissingReturnType()) {
+ blockScope->HasImplicitReturnType = false;
+ blockScope->ReturnType = exprResultType;
+ }
// Transform the body
StmtResult body = getDerived().TransformStmt(E->getBody());
- if (body.isInvalid())
+ if (body.isInvalid()) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
return ExprError();
+ }
#ifndef NDEBUG
// In builds with assertions, make sure that we captured everything we
@@ -8083,6 +8766,7 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
oldCapture));
assert(blockScope->CaptureMap.count(newCapture));
}
+ assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured());
}
#endif
@@ -8092,29 +8776,6 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
template<typename Derived>
ExprResult
-TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) {
- ValueDecl *ND
- = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(),
- E->getDecl()));
- if (!ND)
- return ExprError();
-
- if (!getDerived().AlwaysRebuild() &&
- ND == E->getDecl()) {
- // Mark it referenced in the new context regardless.
- // FIXME: this is a bit instantiation-specific.
- SemaRef.MarkDeclarationReferenced(E->getLocation(), ND);
-
- return SemaRef.Owned(E);
- }
-
- DeclarationNameInfo NameInfo(E->getDecl()->getDeclName(), E->getLocation());
- return getDerived().RebuildDeclRefExpr(NestedNameSpecifierLoc(),
- ND, NameInfo, 0);
-}
-
-template<typename Derived>
-ExprResult
TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) {
llvm_unreachable("Cannot transform asType expressions yet");
}
@@ -8200,9 +8861,12 @@ TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
break;
}
- IntegerLiteral ArraySize(SemaRef.Context, *Size, SizeType,
- /*FIXME*/BracketsRange.getBegin());
- return SemaRef.BuildArrayType(ElementType, SizeMod, &ArraySize,
+ // Note that we can return a VariableArrayType here in the case where
+ // the element type was a dependent VariableArrayType.
+ IntegerLiteral *ArraySize
+ = IntegerLiteral::Create(SemaRef.Context, *Size, SizeType,
+ /*FIXME*/BracketsRange.getBegin());
+ return SemaRef.BuildArrayType(ElementType, SizeMod, ArraySize,
IndexTypeQuals, BracketsRange,
getDerived().getBaseEntity());
}
@@ -8285,11 +8949,12 @@ QualType TreeTransform<Derived>::RebuildFunctionProtoType(QualType T,
QualType *ParamTypes,
unsigned NumParamTypes,
bool Variadic,
+ bool HasTrailingReturn,
unsigned Quals,
RefQualifierKind RefQualifier,
const FunctionType::ExtInfo &Info) {
return SemaRef.BuildFunctionType(T, ParamTypes, NumParamTypes, Variadic,
- Quals, RefQualifier,
+ HasTrailingReturn, Quals, RefQualifier,
getDerived().getBaseLocation(),
getDerived().getBaseEntity(),
Info);
@@ -8382,10 +9047,9 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&Name, NameLoc);
Sema::TemplateTy Template;
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
getSema().ActOnDependentTemplateName(/*Scope=*/0,
- /*FIXME:*/SourceLocation(),
- SS,
- TemplateName,
+ SS, TemplateKWLoc, TemplateName,
ParsedType::make(ObjectType),
/*EnteringContext=*/false,
Template);
@@ -8400,13 +9064,12 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
QualType ObjectType) {
UnqualifiedId Name;
// FIXME: Bogus location information.
- SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
+ SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
Sema::TemplateTy Template;
getSema().ActOnDependentTemplateName(/*Scope=*/0,
- /*FIXME:*/SourceLocation(),
- SS,
- Name,
+ SS, TemplateKWLoc, Name,
ParsedType::make(ObjectType),
/*EnteringContext=*/false,
Template);
@@ -8543,9 +9206,11 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
// FIXME: the ScopeType should be tacked onto SS.
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
return getSema().BuildMemberReferenceExpr(Base, BaseType,
OperatorLoc, isArrow,
- SS, /*FIXME: FirstQualifier*/ 0,
+ SS, TemplateKWLoc,
+ /*FIXME: FirstQualifier*/ 0,
NameInfo,
/*TemplateArgs*/ 0);
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
index 445e750..67f74f7 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
@@ -52,8 +52,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::Char32: ID = PREDEF_TYPE_CHAR32_ID; break;
case BuiltinType::Overload: ID = PREDEF_TYPE_OVERLOAD_ID; break;
case BuiltinType::BoundMember:ID = PREDEF_TYPE_BOUND_MEMBER; break;
+ case BuiltinType::PseudoObject:ID = PREDEF_TYPE_PSEUDO_OBJECT;break;
case BuiltinType::Dependent: ID = PREDEF_TYPE_DEPENDENT_ID; break;
case BuiltinType::UnknownAny: ID = PREDEF_TYPE_UNKNOWN_ANY; break;
+ case BuiltinType::ARCUnbridgedCast:
+ ID = PREDEF_TYPE_ARC_UNBRIDGED_CAST; break;
case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break;
case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break;
case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break;
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
index 367f57f..16db8e3 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
@@ -22,7 +22,6 @@ namespace clang {
namespace serialization {
enum DeclUpdateKind {
- UPD_CXX_SET_DEFINITIONDATA,
UPD_CXX_ADDED_IMPLICIT_MEMBER,
UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
UPD_CXX_ADDED_ANONYMOUS_NAMESPACE,
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
index de19d85..f91b66c 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -14,10 +14,9 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/Frontend/Utils.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Scope.h"
#include "clang/AST/ASTConsumer.h"
@@ -28,6 +27,7 @@
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
@@ -64,7 +64,7 @@ ASTReaderListener::~ASTReaderListener() {}
bool
PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
- const LangOptions &PPLangOpts = PP.getLangOptions();
+ const LangOptions &PPLangOpts = PP.getLangOpts();
#define LANGOPT(Name, Bits, Default, Description) \
if (PPLangOpts.Name != LangOpts.Name) { \
@@ -193,9 +193,10 @@ bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
// have a #include entry for the PCH file itself (as normalized by the
// preprocessor initialization). Find it and skip over it in the checking
// below.
- llvm::SmallString<256> PCHInclude;
+ SmallString<256> PCHInclude;
PCHInclude += "#include \"";
- PCHInclude += NormalizeDashIncludePath(OriginalFileName, FileMgr);
+ PCHInclude += HeaderSearch::NormalizeDashIncludePath(OriginalFileName,
+ FileMgr);
PCHInclude += "\"\n";
std::pair<StringRef,StringRef> Split =
StringRef(PP.getPredefines()).split(PCHInclude.str());
@@ -510,10 +511,13 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// For uninteresting identifiers, just build the IdentifierInfo
// and associate it with the persistent ID.
IdentifierInfo *II = KnownII;
- if (!II)
+ if (!II) {
II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
+ KnownII = II;
+ }
Reader.SetIdentifierInfo(ID, II);
II->setIsFromAST();
+ Reader.markIdentifierUpToDate(II);
return II;
}
@@ -528,8 +532,8 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
Bits >>= 1;
bool hasMacroDefinition = Bits & 0x01;
Bits >>= 1;
- unsigned ObjCOrBuiltinID = Bits & 0x3FF;
- Bits >>= 10;
+ unsigned ObjCOrBuiltinID = Bits & 0x7FF;
+ Bits >>= 11;
assert(Bits == 0 && "Extra bits in the identifier?");
DataLen -= 6;
@@ -537,9 +541,12 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// Build the IdentifierInfo itself and link the identifier ID with
// the new IdentifierInfo.
IdentifierInfo *II = KnownII;
- if (!II)
+ if (!II) {
II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
- Reader.SetIdentifierInfo(ID, II);
+ KnownII = II;
+ }
+ Reader.markIdentifierUpToDate(II);
+ II->setIsFromAST();
// Set or check the various bits in the IdentifierInfo structure.
// Token IDs are read-only.
@@ -560,10 +567,32 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
if (hasMacroDefinition) {
// FIXME: Check for conflicts?
uint32_t Offset = ReadUnalignedLE32(d);
- Reader.SetIdentifierIsMacro(II, F, Offset);
- DataLen -= 4;
+ unsigned LocalSubmoduleID = ReadUnalignedLE32(d);
+
+ // Determine whether this macro definition should be visible now, or
+ // whether it is in a hidden submodule.
+ bool Visible = true;
+ if (SubmoduleID GlobalSubmoduleID
+ = Reader.getGlobalSubmoduleID(F, LocalSubmoduleID)) {
+ if (Module *Owner = Reader.getSubmodule(GlobalSubmoduleID)) {
+ if (Owner->NameVisibility == Module::Hidden) {
+ // The owning module is not visible, and this macro definition should
+ // not be, either.
+ Visible = false;
+
+ // Note that this macro definition was hidden because its owning
+ // module is not yet visible.
+ Reader.HiddenNamesMap[Owner].push_back(II);
+ }
+ }
+ }
+
+ Reader.setIdentifierIsMacro(II, F, Offset, Visible);
+ DataLen -= 8;
}
+ Reader.SetIdentifierInfo(ID, II);
+
// Read all of the declarations visible at global scope with this
// name.
if (DataLen > 0) {
@@ -573,7 +602,6 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
Reader.SetGloballyVisibleDecls(II, DeclIDs);
}
- II->setIsFromAST();
return II;
}
@@ -724,11 +752,11 @@ ASTDeclContextNameLookupTrait::ReadData(internal_key_type,
unsigned DataLen) {
using namespace clang::io;
unsigned NumDecls = ReadUnalignedLE16(d);
- DeclID *Start = (DeclID *)d;
+ LE32DeclID *Start = (LE32DeclID *)d;
return std::make_pair(Start, Start + NumDecls);
}
-bool ASTReader::ReadDeclContextStorage(Module &M,
+bool ASTReader::ReadDeclContextStorage(ModuleFile &M,
llvm::BitstreamCursor &Cursor,
const std::pair<uint64_t, uint64_t> &Offsets,
DeclContextInfo &Info) {
@@ -802,7 +830,7 @@ bool ASTReader::CheckPredefinesBuffers() {
/// \brief Read the line table in the source manager block.
/// \returns true if there was an error.
-bool ASTReader::ParseLineTable(Module &F,
+bool ASTReader::ParseLineTable(ModuleFile &F,
SmallVectorImpl<uint64_t> &Record) {
unsigned Idx = 0;
LineTableInfo &LineTable = SourceMgr.getLineTable();
@@ -946,7 +974,7 @@ public:
/// \brief Read a source manager block
-ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(Module &F) {
+ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
using namespace SrcMgr;
llvm::BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
@@ -1023,10 +1051,10 @@ resolveFileRelativeToOriginalDir(const std::string &Filename,
assert(OriginalDir != CurrDir &&
"No point trying to resolve the file if the PCH dir didn't change");
using namespace llvm::sys;
- llvm::SmallString<128> filePath(Filename);
+ SmallString<128> filePath(Filename);
fs::make_absolute(filePath);
assert(path::is_absolute(OriginalDir));
- llvm::SmallString<128> currPCHPath(CurrDir);
+ SmallString<128> currPCHPath(CurrDir);
path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
fileDirE = path::end(path::parent_path(filePath));
@@ -1055,7 +1083,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
return Failure;
}
- Module *F = GlobalSLocEntryMap.find(-ID)->second;
+ ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
F->SLocEntryCursor.JumpToBit(F->SLocEntryOffsets[ID - F->SLocEntryBaseID]);
llvm::BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
unsigned BaseOffset = F->SLocEntryBaseOffset;
@@ -1078,9 +1106,24 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
return Failure;
case SM_SLOC_FILE_ENTRY: {
- std::string Filename(BlobStart, BlobStart + BlobLen);
+ if (Record.size() < 7) {
+ Error("source location entry is incorrect");
+ return Failure;
+ }
+
+ // We will detect whether a file changed and return 'Failure' for it, but
+ // we will also try to fail gracefully by setting up the SLocEntry.
+ ASTReader::ASTReadResult Result = Success;
+
+ bool OverriddenBuffer = Record[6];
+
+ std::string OrigFilename(BlobStart, BlobStart + BlobLen);
+ std::string Filename = OrigFilename;
MaybeAddSystemRootToFilename(Filename);
- const FileEntry *File = FileMgr.getFile(Filename);
+ const FileEntry *File =
+ OverriddenBuffer? FileMgr.getVirtualFile(Filename, (off_t)Record[4],
+ (time_t)Record[5])
+ : FileMgr.getFile(Filename, /*OpenFile=*/false);
if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
OriginalDir != CurrentDir) {
std::string resolved = resolveFileRelativeToOriginalDir(Filename,
@@ -1100,11 +1143,6 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
return Failure;
}
- if (Record.size() < 6) {
- Error("source location entry is incorrect");
- return Failure;
- }
-
if (!DisableValidation &&
((off_t)Record[4] != File->getSize()
#if !defined(LLVM_ON_WIN32)
@@ -1115,7 +1153,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
#endif
)) {
Error(diag::err_fe_pch_file_modified, Filename);
- return Failure;
+ Result = Failure;
}
SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
@@ -1128,10 +1166,40 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
ID, BaseOffset + Record[0]);
SrcMgr::FileInfo &FileInfo =
const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
- FileInfo.NumCreatedFIDs = Record[6];
+ FileInfo.NumCreatedFIDs = Record[7];
if (Record[3])
FileInfo.setHasLineDirectives();
+
+ const DeclID *FirstDecl = F->FileSortedDecls + Record[8];
+ unsigned NumFileDecls = Record[9];
+ if (NumFileDecls) {
+ assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
+ FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl,
+ NumFileDecls));
+ }
+ const SrcMgr::ContentCache *ContentCache
+ = SourceMgr.getOrCreateContentCache(File);
+ if (OverriddenBuffer && !ContentCache->BufferOverridden &&
+ ContentCache->ContentsEntry == ContentCache->OrigEntry) {
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode
+ = SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen);
+
+ if (RecCode != SM_SLOC_BUFFER_BLOB) {
+ Error("AST record has invalid code");
+ return Failure;
+ }
+
+ llvm::MemoryBuffer *Buffer
+ = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
+ Filename);
+ SourceMgr.overrideFileContents(File, Buffer);
+ }
+
+ if (Result == Failure)
+ return Failure;
break;
}
@@ -1149,12 +1217,12 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
}
llvm::MemoryBuffer *Buffer
- = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
- Name);
+ = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
+ Name);
FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID,
BaseOffset + Offset);
- if (strcmp(Name, "<built-in>") == 0) {
+ if (strcmp(Name, "<built-in>") == 0 && F->Kind == MK_PCH) {
PCHPredefinesBlock Block = {
BufferID,
StringRef(BlobStart, BlobLen - 1)
@@ -1181,7 +1249,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
}
/// \brief Find the location where the module F is imported.
-SourceLocation ASTReader::getImportLocation(Module *F) {
+SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
if (F->ImportLoc.isValid())
return F->ImportLoc;
@@ -1222,7 +1290,7 @@ bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
}
}
-void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
+void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
llvm::BitstreamCursor &Stream = F.MacroCursor;
// Keep track of where we are in the stream, then jump back there
@@ -1276,6 +1344,7 @@ void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
Error("macro must have a name in AST file");
return;
}
+
SourceLocation Loc = ReadSourceLocation(F, Record[1]);
bool isUsed = Record[2];
@@ -1283,8 +1352,9 @@ void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
MI->setIsUsed(isUsed);
MI->setIsFromAST();
- unsigned NextIndex = 3;
- MI->setExportLocation(ReadSourceLocation(F, Record, NextIndex));
+ bool IsPublic = Record[3];
+ unsigned NextIndex = 4;
+ MI->setVisibility(IsPublic, ReadSourceLocation(F, Record, NextIndex));
if (RecType == PP_MACRO_FUNCTION_LIKE) {
// Decode function-like macro info.
@@ -1304,7 +1374,7 @@ void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
}
// Finally, install the macro.
- PP.setMacroInfo(II, MI);
+ PP.setMacroInfo(II, MI, /*LoadedFromAST=*/true);
// Remember that we saw this macro last so that we add the tokens that
// form its body to it.
@@ -1340,14 +1410,12 @@ void ASTReader::ReadMacroRecord(Module &F, uint64_t Offset) {
Macro->AddTokenToBody(Tok);
break;
}
+ }
}
- }
-
- return;
}
PreprocessedEntityID
-ASTReader::getGlobalPreprocessedEntityID(Module &M, unsigned LocalID) const {
+ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const {
ContinuousRangeMap<uint32_t, int, 2>::const_iterator
I = M.PreprocessedEntityRemap.find(LocalID - NUM_PREDEF_PP_ENTITY_IDS);
assert(I != M.PreprocessedEntityRemap.end()
@@ -1369,14 +1437,13 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_type a, internal_key_type b) {
if (llvm::sys::path::filename(a) != llvm::sys::path::filename(b))
return false;
-
- // The file names match, but the path names don't. stat() the files to
- // see if they are the same.
- struct stat StatBufA, StatBufB;
- if (StatSimpleCache(a, &StatBufA) || StatSimpleCache(b, &StatBufB))
+
+ // Determine whether the actual files are equivalent.
+ bool Result = false;
+ if (llvm::sys::fs::equivalent(a, b, Result))
return false;
- return StatBufA.st_ino == StatBufB.st_ino;
+ return Result;
}
std::pair<unsigned, unsigned>
@@ -1399,7 +1466,8 @@ HeaderFileInfoTrait::ReadData(const internal_key_type, const unsigned char *d,
HFI.Resolved = (Flags >> 1) & 0x01;
HFI.IndexHeaderMapHeader = Flags & 0x01;
HFI.NumIncludes = ReadUnalignedLE16(d);
- HFI.ControllingMacroID = Reader.getGlobalDeclID(M, ReadUnalignedLE32(d));
+ HFI.ControllingMacroID = Reader.getGlobalIdentifierID(M,
+ ReadUnalignedLE32(d));
if (unsigned FrameworkOffset = ReadUnalignedLE32(d)) {
// The framework offset is 1 greater than the actual offset,
// since 0 is used as an indicator for "no framework name".
@@ -1415,10 +1483,12 @@ HeaderFileInfoTrait::ReadData(const internal_key_type, const unsigned char *d,
return HFI;
}
-void ASTReader::SetIdentifierIsMacro(IdentifierInfo *II, Module &F,
- uint64_t LocalOffset) {
- // Note that this identifier has a macro definition.
- II->setHasMacroDefinition(true);
+void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
+ uint64_t LocalOffset, bool Visible) {
+ if (Visible) {
+ // Note that this identifier has a macro definition.
+ II->setHasMacroDefinition(true);
+ }
// Adjust the offset to a global offset.
UnreadMacroRecordOffsets[II] = F.GlobalBitOffset + LocalOffset;
@@ -1483,7 +1553,7 @@ void ASTReader::ReadDefinedMacros() {
}
void ASTReader::LoadMacroDefinition(
- llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition");
uint64_t Offset = Pos->second;
UnreadMacroRecordOffsets.erase(Pos);
@@ -1498,6 +1568,72 @@ void ASTReader::LoadMacroDefinition(IdentifierInfo *II) {
LoadMacroDefinition(Pos);
}
+namespace {
+ /// \brief Visitor class used to look up identifirs in an AST file.
+ class IdentifierLookupVisitor {
+ StringRef Name;
+ unsigned PriorGeneration;
+ IdentifierInfo *Found;
+ public:
+ IdentifierLookupVisitor(StringRef Name, unsigned PriorGeneration)
+ : Name(Name), PriorGeneration(PriorGeneration), Found() { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ IdentifierLookupVisitor *This
+ = static_cast<IdentifierLookupVisitor *>(UserData);
+
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= This->PriorGeneration)
+ return true;
+
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)M.IdentifierLookupTable;
+ if (!IdTable)
+ return false;
+
+ ASTIdentifierLookupTrait Trait(IdTable->getInfoObj().getReader(),
+ M, This->Found);
+
+ std::pair<const char*, unsigned> Key(This->Name.begin(),
+ This->Name.size());
+ ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key, &Trait);
+ if (Pos == IdTable->end())
+ return false;
+
+ // Dereferencing the iterator has the effect of building the
+ // IdentifierInfo node and populating it with the various
+ // declarations it needs.
+ This->Found = *Pos;
+ return true;
+ }
+
+ // \brief Retrieve the identifier info found within the module
+ // files.
+ IdentifierInfo *getIdentifierInfo() const { return Found; }
+ };
+}
+
+void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
+ unsigned PriorGeneration = 0;
+ if (getContext().getLangOpts().Modules)
+ PriorGeneration = IdentifierGeneration[&II];
+
+ IdentifierLookupVisitor Visitor(II.getName(), PriorGeneration);
+ ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
+ markIdentifierUpToDate(&II);
+}
+
+void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
+ if (!II)
+ return;
+
+ II->setOutOfDate(false);
+
+ // Update the generation for this identifier.
+ if (getContext().getLangOpts().Modules)
+ IdentifierGeneration[II] = CurrentGeneration;
+}
+
const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
std::string Filename = filenameStrRef;
MaybeAddSystemRootToFilename(Filename);
@@ -1539,7 +1675,7 @@ void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
}
ASTReader::ASTReadResult
-ASTReader::ReadASTBlock(Module &F) {
+ASTReader::ReadASTBlock(ModuleFile &F) {
llvm::BitstreamCursor &Stream = F.Stream;
if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
@@ -1608,7 +1744,7 @@ ASTReader::ReadASTBlock(Module &F) {
= F.PreprocessorDetailCursor.GetCurrentBitNo();
if (!PP.getPreprocessingRecord())
- PP.createPreprocessingRecord(true);
+ PP.createPreprocessingRecord(/*RecordConditionalDirectives=*/false);
if (!PP.getPreprocessingRecord()->getExternalSource())
PP.getPreprocessingRecord()->SetExternalSource(*this);
break;
@@ -1626,6 +1762,26 @@ ASTReader::ReadASTBlock(Module &F) {
return IgnorePCH;
}
break;
+
+ case SUBMODULE_BLOCK_ID:
+ switch (ReadSubmoduleBlock(F)) {
+ case Success:
+ break;
+
+ case Failure:
+ Error("malformed submodule block in AST file");
+ return Failure;
+
+ case IgnorePCH:
+ return IgnorePCH;
+ }
+ break;
+
+ default:
+ if (!Stream.SkipBlock())
+ break;
+ Error("malformed block record in AST file");
+ return Failure;
}
continue;
}
@@ -1651,6 +1807,12 @@ ASTReader::ReadASTBlock(Module &F) {
return IgnorePCH;
}
+ bool hasErrors = Record[5];
+ if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
+ Diag(diag::err_pch_with_compiler_errors);
+ return IgnorePCH;
+ }
+
RelocatablePCH = Record[4];
if (Listener) {
std::string TargetTriple(BlobStart, BlobLen);
@@ -1667,7 +1829,7 @@ ASTReader::ReadASTBlock(Module &F) {
// Read information about the AST file.
ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
unsigned Length = Record[Idx++];
- llvm::SmallString<128> ImportedFile(Record.begin() + Idx,
+ SmallString<128> ImportedFile(Record.begin() + Idx,
Record.begin() + Idx + Length);
Idx += Length;
@@ -1697,8 +1859,9 @@ ASTReader::ReadASTBlock(Module &F) {
GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
// Introduce the local -> global mapping for types within this module.
- F.TypeRemap.insert(std::make_pair(LocalBaseTypeIndex,
- F.BaseTypeIndex - LocalBaseTypeIndex));
+ F.TypeRemap.insertOrReplace(
+ std::make_pair(LocalBaseTypeIndex,
+ F.BaseTypeIndex - LocalBaseTypeIndex));
TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
}
@@ -1710,7 +1873,7 @@ ASTReader::ReadASTBlock(Module &F) {
Error("duplicate DECL_OFFSET record in AST file");
return Failure;
}
- F.DeclOffsets = (const uint32_t *)BlobStart;
+ F.DeclOffsets = (const DeclOffset *)BlobStart;
F.LocalNumDecls = Record[0];
unsigned LocalBaseDeclID = Record[1];
F.BaseDeclID = getTotalNumDecls();
@@ -1723,8 +1886,12 @@ ASTReader::ReadASTBlock(Module &F) {
// Introduce the local -> global mapping for declarations within this
// module.
- F.DeclRemap.insert(std::make_pair(LocalBaseDeclID,
- F.BaseDeclID - LocalBaseDeclID));
+ F.DeclRemap.insertOrReplace(
+ std::make_pair(LocalBaseDeclID, F.BaseDeclID - LocalBaseDeclID));
+
+ // Introduce the global -> local mapping for declarations within this
+ // module.
+ F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
}
@@ -1757,16 +1924,6 @@ ASTReader::ReadASTBlock(Module &F) {
break;
}
- case REDECLS_UPDATE_LATEST: {
- assert(Record.size() % 2 == 0 && "Expected pairs of DeclIDs");
- for (unsigned i = 0, e = Record.size(); i < e; /* in loop */) {
- DeclID First = ReadDeclID(F, Record, i);
- DeclID Latest = ReadDeclID(F, Record, i);
- FirstLatestDeclIDs[First] = Latest;
- }
- break;
- }
-
case LANGUAGE_OPTIONS:
if (ParseLanguageOptions(Record) && !DisableValidation)
return IgnorePCH;
@@ -1803,9 +1960,9 @@ ASTReader::ReadASTBlock(Module &F) {
// Introduce the local -> global mapping for identifiers within this
// module.
- F.IdentifierRemap.insert(
- std::make_pair(LocalBaseIdentifierID,
- F.BaseIdentifierID - LocalBaseIdentifierID));
+ F.IdentifierRemap.insertOrReplace(
+ std::make_pair(LocalBaseIdentifierID,
+ F.BaseIdentifierID - LocalBaseIdentifierID));
IdentifiersLoaded.resize(IdentifiersLoaded.size()
+ F.LocalNumIdentifiers);
@@ -1880,8 +2037,9 @@ ASTReader::ReadASTBlock(Module &F) {
// Introduce the local -> global mapping for selectors within this
// module.
- F.SelectorRemap.insert(std::make_pair(LocalBaseSelectorID,
- F.BaseSelectorID - LocalBaseSelectorID));
+ F.SelectorRemap.insertOrReplace(
+ std::make_pair(LocalBaseSelectorID,
+ F.BaseSelectorID - LocalBaseSelectorID));
SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
}
@@ -1914,6 +2072,10 @@ ASTReader::ReadASTBlock(Module &F) {
if (!Record.empty() && Listener)
Listener->ReadCounter(Record[0]);
break;
+
+ case FILE_SORTED_DECLS:
+ F.FileSortedDecls = (const DeclID *)BlobStart;
+ break;
case SOURCE_LOCATION_OFFSETS: {
F.SLocEntryOffsets = (const uint32_t *)BlobStart;
@@ -1959,6 +2121,8 @@ ASTReader::ReadASTBlock(Module &F) {
ContinuousRangeMap<uint32_t, int, 2>::Builder
PreprocessedEntityRemap(F.PreprocessedEntityRemap);
ContinuousRangeMap<uint32_t, int, 2>::Builder
+ SubmoduleRemap(F.SubmoduleRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
SelectorRemap(F.SelectorRemap);
ContinuousRangeMap<uint32_t, int, 2>::Builder DeclRemap(F.DeclRemap);
ContinuousRangeMap<uint32_t, int, 2>::Builder TypeRemap(F.TypeRemap);
@@ -1967,7 +2131,7 @@ ASTReader::ReadASTBlock(Module &F) {
uint16_t Len = io::ReadUnalignedLE16(Data);
StringRef Name = StringRef((const char*)Data, Len);
Data += Len;
- Module *OM = ModuleMgr.lookup(Name);
+ ModuleFile *OM = ModuleMgr.lookup(Name);
if (!OM) {
Error("SourceLocation remap refers to unknown module");
return Failure;
@@ -1976,6 +2140,7 @@ ASTReader::ReadASTBlock(Module &F) {
uint32_t SLocOffset = io::ReadUnalignedLE32(Data);
uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data);
uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t SubmoduleIDOffset = io::ReadUnalignedLE32(Data);
uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data);
uint32_t DeclIDOffset = io::ReadUnalignedLE32(Data);
uint32_t TypeIndexOffset = io::ReadUnalignedLE32(Data);
@@ -1989,6 +2154,8 @@ ASTReader::ReadASTBlock(Module &F) {
PreprocessedEntityRemap.insert(
std::make_pair(PreprocessedEntityIDOffset,
OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset));
+ SubmoduleRemap.insert(std::make_pair(SubmoduleIDOffset,
+ OM->BaseSubmoduleID - SubmoduleIDOffset));
SelectorRemap.insert(std::make_pair(SelectorIDOffset,
OM->BaseSelectorID - SelectorIDOffset));
DeclRemap.insert(std::make_pair(DeclIDOffset,
@@ -1996,6 +2163,9 @@ ASTReader::ReadASTBlock(Module &F) {
TypeRemap.insert(std::make_pair(TypeIndexOffset,
OM->BaseTypeIndex - TypeIndexOffset));
+
+ // Global -> local mappings.
+ F.GlobalToLocalDeclIDs[OM] = DeclIDOffset;
}
break;
}
@@ -2124,7 +2294,7 @@ ASTReader::ReadASTBlock(Module &F) {
unsigned StartingID;
if (!PP.getPreprocessingRecord())
- PP.createPreprocessingRecord(true);
+ PP.createPreprocessingRecord(/*RecordConditionalDirectives=*/false);
if (!PP.getPreprocessingRecord()->getExternalSource())
PP.getPreprocessingRecord()->SetExternalSource(*this);
StartingID
@@ -2139,7 +2309,7 @@ ASTReader::ReadASTBlock(Module &F) {
// Introduce the local -> global mapping for preprocessed entities in
// this module.
- F.PreprocessedEntityRemap.insert(
+ F.PreprocessedEntityRemap.insertOrReplace(
std::make_pair(LocalBasePreprocessedEntityID,
F.BasePreprocessedEntityID - LocalBasePreprocessedEntityID));
}
@@ -2159,30 +2329,31 @@ ASTReader::ReadASTBlock(Module &F) {
}
case DECL_REPLACEMENTS: {
- if (Record.size() % 2 != 0) {
+ if (Record.size() % 3 != 0) {
Error("invalid DECL_REPLACEMENTS block in AST file");
return Failure;
}
- for (unsigned I = 0, N = Record.size(); I != N; I += 2)
+ for (unsigned I = 0, N = Record.size(); I != N; I += 3)
ReplacedDecls[getGlobalDeclID(F, Record[I])]
- = std::make_pair(&F, Record[I+1]);
+ = ReplacedDeclInfo(&F, Record[I+1], Record[I+2]);
break;
}
- case OBJC_CHAINED_CATEGORIES: {
- if (Record.size() % 3 != 0) {
- Error("invalid OBJC_CHAINED_CATEGORIES block in AST file");
+ case OBJC_CATEGORIES_MAP: {
+ if (F.LocalNumObjCCategoriesInMap != 0) {
+ Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
return Failure;
}
- for (unsigned I = 0, N = Record.size(); I != N; I += 3) {
- serialization::GlobalDeclID GlobID = getGlobalDeclID(F, Record[I]);
- F.ChainedObjCCategories[GlobID] = std::make_pair(Record[I+1],
- Record[I+2]);
- ObjCChainedCategoriesInterfaces.insert(GlobID);
- }
+
+ F.LocalNumObjCCategoriesInMap = Record[0];
+ F.ObjCCategoriesMap = (const ObjCCategoriesInfo *)BlobStart;
break;
}
+ case OBJC_CATEGORIES:
+ F.ObjCCategories.swap(Record);
+ break;
+
case CXX_BASE_SPECIFIER_OFFSETS: {
if (F.LocalNumCXXBaseSpecifiers != 0) {
Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
@@ -2255,13 +2426,52 @@ ASTReader::ReadASTBlock(Module &F) {
for (unsigned I = 0, N = Record.size(); I != N; ++I)
KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
break;
+
+ case IMPORTED_MODULES: {
+ if (F.Kind != MK_Module) {
+ // If we aren't loading a module (which has its own exports), make
+ // all of the imported modules visible.
+ // FIXME: Deal with macros-only imports.
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ if (unsigned GlobalID = getGlobalSubmoduleID(F, Record[I]))
+ ImportedModules.push_back(GlobalID);
+ }
+ }
+ break;
+ }
+
+ case LOCAL_REDECLARATIONS: {
+ F.RedeclarationChains.swap(Record);
+ break;
+ }
+
+ case LOCAL_REDECLARATIONS_MAP: {
+ if (F.LocalNumRedeclarationsInMap != 0) {
+ Error("duplicate LOCAL_REDECLARATIONS_MAP record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumRedeclarationsInMap = Record[0];
+ F.RedeclarationsMap = (const LocalRedeclarationsInfo *)BlobStart;
+ break;
+ }
+
+ case MERGED_DECLARATIONS: {
+ for (unsigned Idx = 0; Idx < Record.size(); /* increment in loop */) {
+ GlobalDeclID CanonID = getGlobalDeclID(F, Record[Idx++]);
+ SmallVectorImpl<GlobalDeclID> &Decls = StoredMergedDecls[CanonID];
+ for (unsigned N = Record[Idx++]; N > 0; --N)
+ Decls.push_back(getGlobalDeclID(F, Record[Idx++]));
+ }
+ break;
+ }
}
}
Error("premature end of bitstream in AST file");
return Failure;
}
-ASTReader::ASTReadResult ASTReader::validateFileEntries(Module &M) {
+ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) {
llvm::BitstreamCursor &SLocEntryCursor = M.SLocEntryCursor;
for (unsigned i = 0, e = M.LocalNumSLocFileEntries; i != e; ++i) {
@@ -2283,6 +2493,10 @@ ASTReader::ASTReadResult ASTReader::validateFileEntries(Module &M) {
return Failure;
case SM_SLOC_FILE_ENTRY: {
+ // If the buffer was overridden, the file need not exist.
+ if (Record[6])
+ break;
+
StringRef Filename(BlobStart, BlobLen);
const FileEntry *File = getFileEntry(Filename);
@@ -2294,7 +2508,7 @@ ASTReader::ASTReadResult ASTReader::validateFileEntries(Module &M) {
return IgnorePCH;
}
- if (Record.size() < 6) {
+ if (Record.size() < 7) {
Error("source location entry is incorrect");
return Failure;
}
@@ -2327,45 +2541,125 @@ ASTReader::ASTReadResult ASTReader::validateFileEntries(Module &M) {
return Success;
}
-namespace {
- /// \brief Visitor class used to look up identifirs in an AST file.
- class IdentifierLookupVisitor {
- StringRef Name;
- IdentifierInfo *Found;
- public:
- explicit IdentifierLookupVisitor(StringRef Name) : Name(Name), Found() { }
+void ASTReader::makeNamesVisible(const HiddenNames &Names) {
+ for (unsigned I = 0, N = Names.size(); I != N; ++I) {
+ if (Decl *D = Names[I].dyn_cast<Decl *>())
+ D->Hidden = false;
+ else {
+ IdentifierInfo *II = Names[I].get<IdentifierInfo *>();
+ if (!II->hasMacroDefinition()) {
+ II->setHasMacroDefinition(true);
+ if (DeserializationListener)
+ DeserializationListener->MacroVisible(II);
+ }
+ }
+ }
+}
+
+void ASTReader::makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind NameVisibility) {
+ llvm::SmallPtrSet<Module *, 4> Visited;
+ llvm::SmallVector<Module *, 4> Stack;
+ Stack.push_back(Mod);
+ while (!Stack.empty()) {
+ Mod = Stack.back();
+ Stack.pop_back();
+
+ if (NameVisibility <= Mod->NameVisibility) {
+ // This module already has this level of visibility (or greater), so
+ // there is nothing more to do.
+ continue;
+ }
- static bool visit(Module &M, void *UserData) {
- IdentifierLookupVisitor *This
- = static_cast<IdentifierLookupVisitor *>(UserData);
+ if (!Mod->isAvailable()) {
+ // Modules that aren't available cannot be made visible.
+ continue;
+ }
+
+ // Update the module's name visibility.
+ Mod->NameVisibility = NameVisibility;
+
+ // If we've already deserialized any names from this module,
+ // mark them as visible.
+ HiddenNamesMapType::iterator Hidden = HiddenNamesMap.find(Mod);
+ if (Hidden != HiddenNamesMap.end()) {
+ makeNamesVisible(Hidden->second);
+ HiddenNamesMap.erase(Hidden);
+ }
+
+ // Push any non-explicit submodules onto the stack to be marked as
+ // visible.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ if (!(*Sub)->IsExplicit && Visited.insert(*Sub))
+ Stack.push_back(*Sub);
+ }
+
+ // Push any exported modules onto the stack to be marked as visible.
+ bool AnyWildcard = false;
+ bool UnrestrictedWildcard = false;
+ llvm::SmallVector<Module *, 4> WildcardRestrictions;
+ for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) {
+ Module *Exported = Mod->Exports[I].getPointer();
+ if (!Mod->Exports[I].getInt()) {
+ // Export a named module directly; no wildcards involved.
+ if (Visited.insert(Exported))
+ Stack.push_back(Exported);
+
+ continue;
+ }
- ASTIdentifierLookupTable *IdTable
- = (ASTIdentifierLookupTable *)M.IdentifierLookupTable;
- if (!IdTable)
- return false;
+ // Wildcard export: export all of the imported modules that match
+ // the given pattern.
+ AnyWildcard = true;
+ if (UnrestrictedWildcard)
+ continue;
+
+ if (Module *Restriction = Mod->Exports[I].getPointer())
+ WildcardRestrictions.push_back(Restriction);
+ else {
+ WildcardRestrictions.clear();
+ UnrestrictedWildcard = true;
+ }
+ }
+
+ // If there were any wildcards, push any imported modules that were
+ // re-exported by the wildcard restriction.
+ if (!AnyWildcard)
+ continue;
+
+ for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
+ Module *Imported = Mod->Imports[I];
+ if (Visited.count(Imported))
+ continue;
- std::pair<const char*, unsigned> Key(This->Name.begin(),
- This->Name.size());
- ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key);
- if (Pos == IdTable->end())
- return false;
+ bool Acceptable = UnrestrictedWildcard;
+ if (!Acceptable) {
+ // Check whether this module meets one of the restrictions.
+ for (unsigned R = 0, NR = WildcardRestrictions.size(); R != NR; ++R) {
+ Module *Restriction = WildcardRestrictions[R];
+ if (Imported == Restriction || Imported->isSubModuleOf(Restriction)) {
+ Acceptable = true;
+ break;
+ }
+ }
+ }
- // Dereferencing the iterator has the effect of building the
- // IdentifierInfo node and populating it with the various
- // declarations it needs.
- This->Found = *Pos;
- return true;
+ if (!Acceptable)
+ continue;
+
+ Visited.insert(Imported);
+ Stack.push_back(Imported);
}
-
- // \brief Retrieve the identifier info found within the module
- // files.
- IdentifierInfo *getIdentifierInfo() const { return Found; }
- };
+ }
}
-
ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
ModuleKind Type) {
+ // Bump the generation number.
+ unsigned PreviousGeneration = CurrentGeneration++;
+
switch(ReadASTCore(FileName, Type, /*ImportedBy=*/0)) {
case Failure: return Failure;
case IgnorePCH: return IgnorePCH;
@@ -2373,70 +2667,79 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
}
// Here comes stuff that we only do once the entire chain is loaded.
-
+
// Check the predefines buffers.
- if (!DisableValidation && Type != MK_Module && Type != MK_Preamble &&
+ if (!DisableValidation && Type == MK_PCH &&
// FIXME: CheckPredefinesBuffers also sets the SuggestedPredefines;
// if DisableValidation is true, defines that were set on command-line
// but not in the PCH file will not be added to SuggestedPredefines.
CheckPredefinesBuffers())
return IgnorePCH;
- // Initialization of keywords and pragmas occurs before the
- // AST file is read, so there may be some identifiers that were
- // loaded into the IdentifierTable before we intercepted the
- // creation of identifiers. Iterate through the list of known
- // identifiers and determine whether we have to establish
- // preprocessor definitions or top-level identifier declaration
- // chains for those identifiers.
- //
- // We copy the IdentifierInfo pointers to a small vector first,
- // since de-serializing declarations or macro definitions can add
- // new entries into the identifier table, invalidating the
- // iterators.
- //
- // FIXME: We need a lazier way to load this information, e.g., by marking
- // the identifier data as 'dirty', so that it will be looked up in the
- // AST file(s) if it is uttered in the source. This could save us some
- // module load time.
- SmallVector<IdentifierInfo *, 128> Identifiers;
+ // Mark all of the identifiers in the identifier table as being out of date,
+ // so that various accessors know to check the loaded modules when the
+ // identifier is used.
for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
IdEnd = PP.getIdentifierTable().end();
Id != IdEnd; ++Id)
- Identifiers.push_back(Id->second);
+ Id->second->setOutOfDate(true);
- for (unsigned I = 0, N = Identifiers.size(); I != N; ++I) {
- IdentifierLookupVisitor Visitor(Identifiers[I]->getName());
- ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
- }
+ // Resolve any unresolved module exports.
+ for (unsigned I = 0, N = UnresolvedModuleImportExports.size(); I != N; ++I) {
+ UnresolvedModuleImportExport &Unresolved = UnresolvedModuleImportExports[I];
+ SubmoduleID GlobalID = getGlobalSubmoduleID(*Unresolved.File,Unresolved.ID);
+ Module *ResolvedMod = getSubmodule(GlobalID);
+
+ if (Unresolved.IsImport) {
+ if (ResolvedMod)
+ Unresolved.Mod->Imports.push_back(ResolvedMod);
+ continue;
+ }
+ if (ResolvedMod || Unresolved.IsWildcard)
+ Unresolved.Mod->Exports.push_back(
+ Module::ExportDecl(ResolvedMod, Unresolved.IsWildcard));
+ }
+ UnresolvedModuleImportExports.clear();
+
InitializeContext();
if (DeserializationListener)
DeserializationListener->ReaderInitialized(this);
- // If this AST file is a precompiled preamble, then set the preamble file ID
- // of the source manager to the file source file from which the preamble was
- // built.
- if (Type == MK_Preamble) {
- if (!OriginalFileID.isInvalid()) {
- OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID
- + OriginalFileID.getOpaqueValue() - 1);
+ if (!OriginalFileID.isInvalid()) {
+ OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID
+ + OriginalFileID.getOpaqueValue() - 1);
+
+ // If this AST file is a precompiled preamble, then set the preamble file ID
+ // of the source manager to the file source file from which the preamble was
+ // built.
+ if (Type == MK_Preamble) {
SourceMgr.setPreambleFileID(OriginalFileID);
+ } else if (Type == MK_MainFile) {
+ SourceMgr.setMainFileID(OriginalFileID);
}
}
+ // For any Objective-C class definitions we have already loaded, make sure
+ // that we load any additional categories.
+ for (unsigned I = 0, N = ObjCClassesLoaded.size(); I != N; ++I) {
+ loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
+ ObjCClassesLoaded[I],
+ PreviousGeneration);
+ }
+
return Success;
}
ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
ModuleKind Type,
- Module *ImportedBy) {
- Module *M;
+ ModuleFile *ImportedBy) {
+ ModuleFile *M;
bool NewModule;
std::string ErrorStr;
llvm::tie(M, NewModule) = ModuleMgr.addModule(FileName, Type, ImportedBy,
- ErrorStr);
+ CurrentGeneration, ErrorStr);
if (!M) {
// We couldn't load the module.
@@ -2458,7 +2761,7 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
if (CurrentDir.empty()) CurrentDir = ".";
}
- Module &F = *M;
+ ModuleFile &F = *M;
llvm::BitstreamCursor &Stream = F.Stream;
Stream.init(F.StreamFile);
F.SizeInBits = F.Buffer->getBufferSize() * 8;
@@ -2522,7 +2825,7 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
}
}
- // Once read, set the Module bit base offset and update the size in
+ // Once read, set the ModuleFile bit base offset and update the size in
// bits of all files we've seen.
F.GlobalBitOffset = TotalModulesSizeInBits;
TotalModulesSizeInBits += F.SizeInBits;
@@ -2566,17 +2869,12 @@ void ASTReader::InitializeContext() {
// built-in types. Right now, we just ignore the problem.
// Load the special types.
- if (SpecialTypes.size() > NumSpecialTypeIDs) {
+ if (SpecialTypes.size() >= NumSpecialTypeIDs) {
if (Context.getBuiltinVaListType().isNull()) {
Context.setBuiltinVaListType(
GetType(SpecialTypes[SPECIAL_TYPE_BUILTIN_VA_LIST]));
}
- if (unsigned Proto = SpecialTypes[SPECIAL_TYPE_OBJC_PROTOCOL]) {
- if (Context.ObjCProtoType.isNull())
- Context.ObjCProtoType = GetType(Proto);
- }
-
if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
if (!Context.CFConstantStringTypeDecl)
Context.setCFConstantStringType(GetType(String));
@@ -2603,7 +2901,7 @@ void ASTReader::InitializeContext() {
}
}
- if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_jmp_buf]) {
+ if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
QualType Jmp_bufType = GetType(Jmp_buf);
if (Jmp_bufType.isNull()) {
Error("jmp_buf type is NULL");
@@ -2624,7 +2922,7 @@ void ASTReader::InitializeContext() {
}
}
- if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_sigjmp_buf]) {
+ if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
QualType Sigjmp_bufType = GetType(Sigjmp_buf);
if (Sigjmp_bufType.isNull()) {
Error("sigjmp_buf type is NULL");
@@ -2687,6 +2985,22 @@ void ASTReader::InitializeContext() {
Context.setcudaConfigureCallDecl(
cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
}
+
+ // Re-export any modules that were imported by a non-module AST file.
+ for (unsigned I = 0, N = ImportedModules.size(); I != N; ++I) {
+ if (Module *Imported = getSubmodule(ImportedModules[I]))
+ makeModuleVisible(Imported, Module::AllVisible);
+ }
+ ImportedModules.clear();
+}
+
+void ASTReader::finalizeForWriting() {
+ for (HiddenNamesMapType::iterator Hidden = HiddenNamesMap.begin(),
+ HiddenEnd = HiddenNamesMap.end();
+ Hidden != HiddenEnd; ++Hidden) {
+ makeNamesVisible(Hidden->second);
+ }
+ HiddenNamesMap.clear();
}
/// \brief Retrieve the name of the original source file name
@@ -2697,7 +3011,7 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
DiagnosticsEngine &Diags) {
// Open the AST file.
std::string ErrStr;
- llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
+ OwningPtr<llvm::MemoryBuffer> Buffer;
Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
if (!Buffer) {
Diags.Report(diag::err_fe_unable_to_read_pch_file) << ErrStr;
@@ -2770,6 +3084,251 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
return std::string();
}
+ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
+ // Enter the submodule block.
+ if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
+ Error("malformed submodule block record in AST file");
+ return Failure;
+ }
+
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+ bool First = true;
+ Module *CurrentModule = 0;
+ RecordData Record;
+ while (true) {
+ unsigned Code = F.Stream.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (F.Stream.ReadBlockEnd()) {
+ Error("error at end of submodule block in AST file");
+ return Failure;
+ }
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ F.Stream.ReadSubBlockID();
+ if (F.Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ F.Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ const char *BlobStart;
+ unsigned BlobLen;
+ Record.clear();
+ switch (F.Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case SUBMODULE_DEFINITION: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (Record.size() < 7) {
+ Error("malformed module definition");
+ return Failure;
+ }
+
+ StringRef Name(BlobStart, BlobLen);
+ SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[0]);
+ SubmoduleID Parent = getGlobalSubmoduleID(F, Record[1]);
+ bool IsFramework = Record[2];
+ bool IsExplicit = Record[3];
+ bool IsSystem = Record[4];
+ bool InferSubmodules = Record[5];
+ bool InferExplicitSubmodules = Record[6];
+ bool InferExportWildcard = Record[7];
+
+ Module *ParentModule = 0;
+ if (Parent)
+ ParentModule = getSubmodule(Parent);
+
+ // Retrieve this (sub)module from the module map, creating it if
+ // necessary.
+ CurrentModule = ModMap.findOrCreateModule(Name, ParentModule,
+ IsFramework,
+ IsExplicit).first;
+ SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
+ if (GlobalIndex >= SubmodulesLoaded.size() ||
+ SubmodulesLoaded[GlobalIndex]) {
+ Error("too many submodules");
+ return Failure;
+ }
+
+ CurrentModule->IsFromModuleFile = true;
+ CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
+ CurrentModule->InferSubmodules = InferSubmodules;
+ CurrentModule->InferExplicitSubmodules = InferExplicitSubmodules;
+ CurrentModule->InferExportWildcard = InferExportWildcard;
+ if (DeserializationListener)
+ DeserializationListener->ModuleRead(GlobalID, CurrentModule);
+
+ SubmodulesLoaded[GlobalIndex] = CurrentModule;
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_HEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *Umbrella = PP.getFileManager().getFile(FileName)) {
+ if (!CurrentModule->getUmbrellaHeader())
+ ModMap.setUmbrellaHeader(CurrentModule, Umbrella);
+ else if (CurrentModule->getUmbrellaHeader() != Umbrella) {
+ Error("mismatched umbrella headers in submodule");
+ return Failure;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_HEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ // FIXME: Be more lazy about this!
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *File = PP.getFileManager().getFile(FileName)) {
+ if (std::find(CurrentModule->Headers.begin(),
+ CurrentModule->Headers.end(),
+ File) == CurrentModule->Headers.end())
+ ModMap.addHeader(CurrentModule, File);
+ }
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_DIR: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ StringRef DirName(BlobStart, BlobLen);
+ if (const DirectoryEntry *Umbrella
+ = PP.getFileManager().getDirectory(DirName)) {
+ if (!CurrentModule->getUmbrellaDir())
+ ModMap.setUmbrellaDir(CurrentModule, Umbrella);
+ else if (CurrentModule->getUmbrellaDir() != Umbrella) {
+ Error("mismatched umbrella directories in submodule");
+ return Failure;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_METADATA: {
+ if (!First) {
+ Error("submodule metadata record not at beginning of block");
+ return Failure;
+ }
+ First = false;
+
+ F.BaseSubmoduleID = getTotalNumSubmodules();
+ F.LocalNumSubmodules = Record[0];
+ unsigned LocalBaseSubmoduleID = Record[1];
+ if (F.LocalNumSubmodules > 0) {
+ // Introduce the global -> local mapping for submodules within this
+ // module.
+ GlobalSubmoduleMap.insert(std::make_pair(getTotalNumSubmodules()+1,&F));
+
+ // Introduce the local -> global mapping for submodules within this
+ // module.
+ F.SubmoduleRemap.insertOrReplace(
+ std::make_pair(LocalBaseSubmoduleID,
+ F.BaseSubmoduleID - LocalBaseSubmoduleID));
+
+ SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules);
+ }
+ break;
+ }
+
+ case SUBMODULE_IMPORTS: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
+ UnresolvedModuleImportExport Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.IsImport = true;
+ Unresolved.IsWildcard = false;
+ UnresolvedModuleImportExports.push_back(Unresolved);
+ }
+ break;
+ }
+
+ case SUBMODULE_EXPORTS: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ for (unsigned Idx = 0; Idx + 1 < Record.size(); Idx += 2) {
+ UnresolvedModuleImportExport Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.IsImport = false;
+ Unresolved.IsWildcard = Record[Idx + 1];
+ UnresolvedModuleImportExports.push_back(Unresolved);
+ }
+
+ // Once we've loaded the set of exports, there's no reason to keep
+ // the parsed, unresolved exports around.
+ CurrentModule->UnresolvedExports.clear();
+ break;
+ }
+ case SUBMODULE_REQUIRES: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ CurrentModule->addRequirement(StringRef(BlobStart, BlobLen),
+ Context.getLangOpts(),
+ Context.getTargetInfo());
+ break;
+ }
+ }
+ }
+}
+
/// \brief Parse the record that corresponds to a LangOptions data
/// structure.
///
@@ -2788,20 +3347,31 @@ bool ASTReader::ParseLanguageOptions(
LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
#include "clang/Basic/LangOptions.def"
+ unsigned Length = Record[Idx++];
+ LangOpts.CurrentModule.assign(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
return Listener->ReadLanguageOptions(LangOpts);
}
return false;
}
-PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
- PreprocessedEntityID PPID = Index+1;
+std::pair<ModuleFile *, unsigned>
+ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) {
GlobalPreprocessedEntityMapType::iterator
- I = GlobalPreprocessedEntityMap.find(Index);
+ I = GlobalPreprocessedEntityMap.find(GlobalIndex);
assert(I != GlobalPreprocessedEntityMap.end() &&
"Corrupted global preprocessed entity map");
- Module &M = *I->second;
- unsigned LocalIndex = Index - M.BasePreprocessedEntityID;
+ ModuleFile *M = I->second;
+ unsigned LocalIndex = GlobalIndex - M->BasePreprocessedEntityID;
+ return std::make_pair(M, LocalIndex);
+}
+
+PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
+ PreprocessedEntityID PPID = Index+1;
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
@@ -2876,9 +3446,10 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
case PPD_INCLUSION_DIRECTIVE: {
const char *FullFileNameStart = BlobStart + Record[0];
- const FileEntry *File
- = PP.getFileManager().getFile(StringRef(FullFileNameStart,
- BlobLen - Record[0]));
+ StringRef FullFileName(FullFileNameStart, BlobLen - Record[0]);
+ const FileEntry *File = 0;
+ if (!FullFileName.empty())
+ File = PP.getFileManager().getFile(FullFileName);
// FIXME: Stable encoding
InclusionDirective::InclusionKind Kind
@@ -2892,9 +3463,8 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
return ID;
}
}
-
- Error("invalid offset in preprocessor detail block");
- return 0;
+
+ llvm_unreachable("Invalid PreprocessorDetailRecordTypes");
}
/// \brief \arg SLocMapI points at a chunk of a module that contains no
@@ -2906,7 +3476,7 @@ PreprocessedEntityID ASTReader::findNextPreprocessedEntity(
++SLocMapI;
for (GlobalSLocOffsetMapType::const_iterator
EndI = GlobalSLocOffsetMap.end(); SLocMapI != EndI; ++SLocMapI) {
- Module &M = *SLocMapI->second;
+ ModuleFile &M = *SLocMapI->second;
if (M.NumPreprocessedEntities)
return getGlobalPreprocessedEntityID(M, M.BasePreprocessedEntityID);
}
@@ -2919,9 +3489,9 @@ namespace {
template <unsigned PPEntityOffset::*PPLoc>
struct PPEntityComp {
const ASTReader &Reader;
- Module &M;
+ ModuleFile &M;
- PPEntityComp(const ASTReader &Reader, Module &M) : Reader(Reader), M(M) { }
+ PPEntityComp(const ASTReader &Reader, ModuleFile &M) : Reader(Reader), M(M) { }
bool operator()(const PPEntityOffset &L, const PPEntityOffset &R) const {
SourceLocation LHS = getLoc(L);
@@ -2961,7 +3531,7 @@ ASTReader::findBeginPreprocessedEntity(SourceLocation BLoc) const {
if (SLocMapI->second->NumPreprocessedEntities == 0)
return findNextPreprocessedEntity(SLocMapI);
- Module &M = *SLocMapI->second;
+ ModuleFile &M = *SLocMapI->second;
typedef const PPEntityOffset *pp_iterator;
pp_iterator pp_begin = M.PreprocessedEntityOffsets;
pp_iterator pp_end = pp_begin + M.NumPreprocessedEntities;
@@ -3010,7 +3580,7 @@ ASTReader::findEndPreprocessedEntity(SourceLocation ELoc) const {
if (SLocMapI->second->NumPreprocessedEntities == 0)
return findNextPreprocessedEntity(SLocMapI);
- Module &M = *SLocMapI->second;
+ ModuleFile &M = *SLocMapI->second;
typedef const PPEntityOffset *pp_iterator;
pp_iterator pp_begin = M.PreprocessedEntityOffsets;
pp_iterator pp_end = pp_begin + M.NumPreprocessedEntities;
@@ -3038,6 +3608,28 @@ std::pair<unsigned, unsigned>
return std::make_pair(BeginID, EndID);
}
+/// \brief Optionally returns true or false if the preallocated preprocessed
+/// entity with index \arg Index came from file \arg FID.
+llvm::Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
+ const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
+
+ SourceLocation Loc = ReadSourceLocation(M, PPOffs.Begin);
+ if (Loc.isInvalid())
+ return false;
+
+ if (SourceMgr.isInFileID(SourceMgr.getFileLoc(Loc), FID))
+ return true;
+ else
+ return false;
+}
+
namespace {
/// \brief Visitor used to search for information about a header file.
class HeaderFileInfoVisitor {
@@ -3050,7 +3642,7 @@ namespace {
HeaderFileInfoVisitor(ASTReader &Reader, const FileEntry *FE)
: Reader(Reader), FE(FE) { }
- static bool visit(Module &M, void *UserData) {
+ static bool visit(ModuleFile &M, void *UserData) {
HeaderFileInfoVisitor *This
= static_cast<HeaderFileInfoVisitor *>(UserData);
@@ -3092,10 +3684,14 @@ HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
- Module &F = *(*I);
+ ModuleFile &F = *(*I);
unsigned Idx = 0;
while (Idx < F.PragmaDiagMappings.size()) {
SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
+ Diag.DiagStates.push_back(*Diag.GetCurDiagState());
+ Diag.DiagStatePoints.push_back(
+ DiagnosticsEngine::DiagStatePoint(&Diag.DiagStates.back(),
+ FullSourceLoc(Loc, SourceMgr)));
while (1) {
assert(Idx < F.PragmaDiagMappings.size() &&
"Invalid data, didn't find '-1' marking end of diag/map pairs");
@@ -3108,8 +3704,8 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
break; // no more diag/map pairs for this location.
}
diag::Mapping Map = (diag::Mapping)F.PragmaDiagMappings[Idx++];
- // The user bit gets set by WritePragmaDiagnosticMappings.
- Diag.setDiagnosticMapping(DiagID, Map, Loc);
+ DiagnosticMappingInfo MappingInfo = Diag.makeMappingInfo(Map, Loc);
+ Diag.GetCurDiagState()->setMappingInfo(DiagID, MappingInfo);
}
}
}
@@ -3119,7 +3715,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
assert(I != GlobalTypeMap.end() && "Corrupted global type map");
- Module *M = I->second;
+ ModuleFile *M = I->second;
return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
}
@@ -3295,14 +3891,15 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
ParamTypes.push_back(readType(*Loc.F, Record, Idx));
EPI.Variadic = Record[Idx++];
+ EPI.HasTrailingReturn = Record[Idx++];
EPI.TypeQuals = Record[Idx++];
EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
ExceptionSpecificationType EST =
static_cast<ExceptionSpecificationType>(Record[Idx++]);
EPI.ExceptionSpecType = EST;
+ SmallVector<QualType, 2> Exceptions;
if (EST == EST_Dynamic) {
EPI.NumExceptions = Record[Idx++];
- SmallVector<QualType, 2> Exceptions;
for (unsigned I = 0; I != EPI.NumExceptions; ++I)
Exceptions.push_back(readType(*Loc.F, Record, Idx));
EPI.Exceptions = Exceptions.data();
@@ -3344,8 +3941,10 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getTypeOfType(UnderlyingType);
}
- case TYPE_DECLTYPE:
- return Context.getDecltypeType(ReadExpr(*Loc.F));
+ case TYPE_DECLTYPE: {
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ return Context.getDecltypeType(ReadExpr(*Loc.F), UnderlyingType);
+ }
case TYPE_UNARY_TRANSFORM: {
QualType BaseType = readType(*Loc.F, Record, Idx);
@@ -3364,8 +3963,9 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
}
unsigned Idx = 0;
bool IsDependent = Record[Idx++];
- QualType T
- = Context.getRecordType(ReadDeclAs<RecordDecl>(*Loc.F, Record, Idx));
+ RecordDecl *RD = ReadDeclAs<RecordDecl>(*Loc.F, Record, Idx);
+ RD = cast_or_null<RecordDecl>(RD->getCanonicalDecl());
+ QualType T = Context.getRecordType(RD);
const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
return T;
}
@@ -3429,7 +4029,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
unsigned Idx = 0;
ObjCInterfaceDecl *ItfD
= ReadDeclAs<ObjCInterfaceDecl>(*Loc.F, Record, Idx);
- return Context.getObjCInterfaceType(ItfD);
+ return Context.getObjCInterfaceType(ItfD->getCanonicalDecl());
}
case TYPE_OBJC_OBJECT: {
@@ -3554,13 +4154,12 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getAtomicType(ValueType);
}
}
- // Suppress a GCC warning
- return QualType();
+ llvm_unreachable("Invalid TypeCode!");
}
class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
llvm::BitstreamCursor &DeclsCursor;
const ASTReader::RecordData &Record;
unsigned &Idx;
@@ -3576,7 +4175,7 @@ class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
}
public:
- TypeLocReader(ASTReader &Reader, Module &F,
+ TypeLocReader(ASTReader &Reader, ModuleFile &F,
const ASTReader::RecordData &Record, unsigned &Idx)
: Reader(Reader), F(F), DeclsCursor(F.DeclsCursor), Record(Record), Idx(Idx)
{ }
@@ -3733,6 +4332,7 @@ void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
}
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
@@ -3747,22 +4347,23 @@ void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
}
void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
TL.setNameLoc(ReadSourceLocation(Record, Idx));
}
void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- TL.setKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
@@ -3793,7 +4394,7 @@ void TypeLocReader::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
TL.setRParenLoc(ReadSourceLocation(Record, Idx));
}
-TypeSourceInfo *ASTReader::GetTypeSourceInfo(Module &F,
+TypeSourceInfo *ASTReader::GetTypeSourceInfo(ModuleFile &F,
const RecordData &Record,
unsigned &Idx) {
QualType InfoTy = readType(F, Record, Idx);
@@ -3843,6 +4444,7 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_LONGDOUBLE_ID: T = Context.LongDoubleTy; break;
case PREDEF_TYPE_OVERLOAD_ID: T = Context.OverloadTy; break;
case PREDEF_TYPE_BOUND_MEMBER: T = Context.BoundMemberTy; break;
+ case PREDEF_TYPE_PSEUDO_OBJECT: T = Context.PseudoObjectTy; break;
case PREDEF_TYPE_DEPENDENT_ID: T = Context.DependentTy; break;
case PREDEF_TYPE_UNKNOWN_ANY: T = Context.UnknownAnyTy; break;
case PREDEF_TYPE_NULLPTR_ID: T = Context.NullPtrTy; break;
@@ -3856,6 +4458,11 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_AUTO_RREF_DEDUCT:
T = Context.getAutoRRefDeductType();
break;
+
+ case PREDEF_TYPE_ARC_UNBRIDGED_CAST:
+ T = Context.ARCUnbridgedCastTy;
+ break;
+
}
assert(!T.isNull() && "Unknown predefined type");
@@ -3878,12 +4485,12 @@ QualType ASTReader::GetType(TypeID ID) {
return TypesLoaded[Index].withFastQualifiers(FastQuals);
}
-QualType ASTReader::getLocalType(Module &F, unsigned LocalID) {
+QualType ASTReader::getLocalType(ModuleFile &F, unsigned LocalID) {
return GetType(getGlobalTypeID(F, LocalID));
}
serialization::TypeID
-ASTReader::getGlobalTypeID(Module &F, unsigned LocalID) const {
+ASTReader::getGlobalTypeID(ModuleFile &F, unsigned LocalID) const {
unsigned FastQuals = LocalID & Qualifiers::FastMask;
unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
@@ -3899,7 +4506,7 @@ ASTReader::getGlobalTypeID(Module &F, unsigned LocalID) const {
}
TemplateArgumentLocInfo
-ASTReader::GetTemplateArgumentLocInfo(Module &F,
+ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F,
TemplateArgument::ArgKind Kind,
const RecordData &Record,
unsigned &Index) {
@@ -3930,11 +4537,10 @@ ASTReader::GetTemplateArgumentLocInfo(Module &F,
return TemplateArgumentLocInfo();
}
llvm_unreachable("unexpected template argument loc");
- return TemplateArgumentLocInfo();
}
TemplateArgumentLoc
-ASTReader::ReadTemplateArgumentLoc(Module &F,
+ASTReader::ReadTemplateArgumentLoc(ModuleFile &F,
const RecordData &Record, unsigned &Index) {
TemplateArgument Arg = ReadTemplateArgument(F, Record, Index);
@@ -3950,7 +4556,7 @@ Decl *ASTReader::GetExternalDecl(uint32_t ID) {
return GetDecl(ID);
}
-uint64_t ASTReader::readCXXBaseSpecifiers(Module &M, const RecordData &Record,
+uint64_t ASTReader::readCXXBaseSpecifiers(ModuleFile &M, const RecordData &Record,
unsigned &Idx){
if (Idx >= Record.size())
return 0;
@@ -3983,7 +4589,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
}
serialization::DeclID
-ASTReader::getGlobalDeclID(Module &F, unsigned LocalID) const {
+ASTReader::getGlobalDeclID(ModuleFile &F, unsigned LocalID) const {
if (LocalID < NUM_PREDEF_DECL_IDS)
return LocalID;
@@ -3995,12 +4601,39 @@ ASTReader::getGlobalDeclID(Module &F, unsigned LocalID) const {
}
bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID,
- Module &M) const {
+ ModuleFile &M) const {
GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(ID);
assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
return &M == I->second;
}
+ModuleFile *ASTReader::getOwningModuleFile(Decl *D) {
+ if (!D->isFromASTFile())
+ return 0;
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(D->getGlobalID());
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ return I->second;
+}
+
+SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return SourceLocation();
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index > DeclsLoaded.size()) {
+ Error("declaration ID out-of-range for AST file");
+ return SourceLocation();
+ }
+
+ if (Decl *D = DeclsLoaded[Index])
+ return D->getLocation();
+
+ unsigned RawLocation = 0;
+ RecordLocation Rec = DeclCursorForID(ID, RawLocation);
+ return ReadSourceLocation(*Rec.F, RawLocation);
+}
+
Decl *ASTReader::GetDecl(DeclID ID) {
if (ID < NUM_PREDEF_DECL_IDS) {
switch ((PredefinedDeclIDs)ID) {
@@ -4019,6 +4652,9 @@ Decl *ASTReader::GetDecl(DeclID ID) {
case PREDEF_DECL_OBJC_CLASS_ID:
return Context.getObjCClassDecl();
+ case PREDEF_DECL_OBJC_PROTOCOL_ID:
+ return Context.getObjCProtocolDecl();
+
case PREDEF_DECL_INT_128_ID:
return Context.getInt128Decl();
@@ -4028,18 +4664,15 @@ Decl *ASTReader::GetDecl(DeclID ID) {
case PREDEF_DECL_OBJC_INSTANCETYPE_ID:
return Context.getObjCInstanceTypeDecl();
}
-
- return 0;
}
unsigned Index = ID - NUM_PREDEF_DECL_IDS;
- if (Index > DeclsLoaded.size()) {
+ if (Index >= DeclsLoaded.size()) {
Error("declaration ID out-of-range for AST file");
- return 0;
}
-if (!DeclsLoaded[Index]) {
+ if (!DeclsLoaded[Index]) {
ReadDeclRecord(ID);
if (DeserializationListener)
DeserializationListener->DeclRead(ID, DeclsLoaded[Index]);
@@ -4048,7 +4681,24 @@ if (!DeclsLoaded[Index]) {
return DeclsLoaded[Index];
}
-serialization::DeclID ASTReader::ReadDeclID(Module &F,
+DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ DeclID GlobalID) {
+ if (GlobalID < NUM_PREDEF_DECL_IDS)
+ return GlobalID;
+
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(GlobalID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ ModuleFile *Owner = I->second;
+
+ llvm::DenseMap<ModuleFile *, serialization::DeclID>::iterator Pos
+ = M.GlobalToLocalDeclIDs.find(Owner);
+ if (Pos == M.GlobalToLocalDeclIDs.end())
+ return 0;
+
+ return GlobalID - Owner->BaseDeclID + Pos->second;
+}
+
+serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
const RecordData &Record,
unsigned &Idx) {
if (Idx >= Record.size()) {
@@ -4093,14 +4743,14 @@ namespace {
PredefsVisited[I] = false;
}
- static bool visit(Module &M, bool Preorder, void *UserData) {
+ static bool visit(ModuleFile &M, bool Preorder, void *UserData) {
if (Preorder)
return false;
FindExternalLexicalDeclsVisitor *This
= static_cast<FindExternalLexicalDeclsVisitor *>(UserData);
- Module::DeclContextInfosMap::iterator Info
+ ModuleFile::DeclContextInfosMap::iterator Info
= M.DeclContextInfos.find(This->DC);
if (Info == M.DeclContextInfos.end() || !Info->second.LexicalDecls)
return false;
@@ -4144,31 +4794,118 @@ ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
}
namespace {
- /// \brief Module visitor used to perform name lookup into a
+
+class DeclIDComp {
+ ASTReader &Reader;
+ ModuleFile &Mod;
+
+public:
+ DeclIDComp(ASTReader &Reader, ModuleFile &M) : Reader(Reader), Mod(M) {}
+
+ bool operator()(LocalDeclID L, LocalDeclID R) const {
+ SourceLocation LHS = getLocation(L);
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, LocalDeclID R) const {
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(LocalDeclID L, SourceLocation RHS) const {
+ SourceLocation LHS = getLocation(L);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLocation(LocalDeclID ID) const {
+ return Reader.getSourceManager().getFileLoc(
+ Reader.getSourceLocationForDeclID(Reader.getGlobalDeclID(Mod, ID)));
+ }
+};
+
+}
+
+void ASTReader::FindFileRegionDecls(FileID File,
+ unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ SourceManager &SM = getSourceManager();
+
+ llvm::DenseMap<FileID, FileDeclsInfo>::iterator I = FileDeclIDs.find(File);
+ if (I == FileDeclIDs.end())
+ return;
+
+ FileDeclsInfo &DInfo = I->second;
+ if (DInfo.Decls.empty())
+ return;
+
+ SourceLocation
+ BeginLoc = SM.getLocForStartOfFile(File).getLocWithOffset(Offset);
+ SourceLocation EndLoc = BeginLoc.getLocWithOffset(Length);
+
+ DeclIDComp DIDComp(*this, *DInfo.Mod);
+ ArrayRef<serialization::LocalDeclID>::iterator
+ BeginIt = std::lower_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ BeginLoc, DIDComp);
+ if (BeginIt != DInfo.Decls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != DInfo.Decls.begin() &&
+ GetDecl(getGlobalDeclID(*DInfo.Mod, *BeginIt))
+ ->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ ArrayRef<serialization::LocalDeclID>::iterator
+ EndIt = std::upper_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ EndLoc, DIDComp);
+ if (EndIt != DInfo.Decls.end())
+ ++EndIt;
+
+ for (ArrayRef<serialization::LocalDeclID>::iterator
+ DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
+}
+
+namespace {
+ /// \brief ModuleFile visitor used to perform name lookup into a
/// declaration context.
class DeclContextNameLookupVisitor {
ASTReader &Reader;
+ llvm::SmallVectorImpl<const DeclContext *> &Contexts;
const DeclContext *DC;
DeclarationName Name;
SmallVectorImpl<NamedDecl *> &Decls;
public:
DeclContextNameLookupVisitor(ASTReader &Reader,
- const DeclContext *DC, DeclarationName Name,
+ SmallVectorImpl<const DeclContext *> &Contexts,
+ DeclarationName Name,
SmallVectorImpl<NamedDecl *> &Decls)
- : Reader(Reader), DC(DC), Name(Name), Decls(Decls) { }
+ : Reader(Reader), Contexts(Contexts), Name(Name), Decls(Decls) { }
- static bool visit(Module &M, void *UserData) {
+ static bool visit(ModuleFile &M, void *UserData) {
DeclContextNameLookupVisitor *This
= static_cast<DeclContextNameLookupVisitor *>(UserData);
// Check whether we have any visible declaration information for
// this context in this module.
- Module::DeclContextInfosMap::iterator Info
- = M.DeclContextInfos.find(This->DC);
- if (Info == M.DeclContextInfos.end() || !Info->second.NameLookupTableData)
- return false;
+ ModuleFile::DeclContextInfosMap::iterator Info;
+ bool FoundInfo = false;
+ for (unsigned I = 0, N = This->Contexts.size(); I != N; ++I) {
+ Info = M.DeclContextInfos.find(This->Contexts[I]);
+ if (Info != M.DeclContextInfos.end() &&
+ Info->second.NameLookupTableData) {
+ FoundInfo = true;
+ break;
+ }
+ }
+ if (!FoundInfo)
+ return false;
+
// Look for this name within this module.
ASTDeclContextNameLookupTable *LookupTable =
(ASTDeclContextNameLookupTable*)Info->second.NameLookupTableData;
@@ -4210,13 +4947,75 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclContext::lookup_iterator(0));
SmallVector<NamedDecl *, 64> Decls;
- DeclContextNameLookupVisitor Visitor(*this, DC, Name, Decls);
+
+ // Compute the declaration contexts we need to look into. Multiple such
+ // declaration contexts occur when two declaration contexts from disjoint
+ // modules get merged, e.g., when two namespaces with the same name are
+ // independently defined in separate modules.
+ SmallVector<const DeclContext *, 2> Contexts;
+ Contexts.push_back(DC);
+
+ if (DC->isNamespace()) {
+ MergedDeclsMap::iterator Merged
+ = MergedDecls.find(const_cast<Decl *>(cast<Decl>(DC)));
+ if (Merged != MergedDecls.end()) {
+ for (unsigned I = 0, N = Merged->second.size(); I != N; ++I)
+ Contexts.push_back(cast<DeclContext>(GetDecl(Merged->second[I])));
+ }
+ }
+
+ DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls);
ModuleMgr.visit(&DeclContextNameLookupVisitor::visit, &Visitor);
++NumVisibleDeclContextsRead;
SetExternalVisibleDeclsForName(DC, Name, Decls);
return const_cast<DeclContext*>(DC)->lookup(Name);
}
+namespace {
+ /// \brief ModuleFile visitor used to complete the visible decls map of a
+ /// declaration context.
+ class DeclContextVisibleDeclMapVisitor {
+ ASTReader &Reader;
+ DeclContext *DC;
+
+ public:
+ DeclContextVisibleDeclMapVisitor(ASTReader &Reader, DeclContext *DC)
+ : Reader(Reader), DC(DC) { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ return static_cast<DeclContextVisibleDeclMapVisitor*>(UserData)->visit(M);
+ }
+
+ bool visit(ModuleFile &M) {
+ // Check whether we have any visible declaration information for
+ // this context in this module.
+ ModuleFile::DeclContextInfosMap::iterator
+ Info = M.DeclContextInfos.find(DC);
+ if (Info == M.DeclContextInfos.end() ||
+ !Info->second.NameLookupTableData)
+ return false;
+
+ // Look for this name within this module.
+ ASTDeclContextNameLookupTable *LookupTable =
+ (ASTDeclContextNameLookupTable*)Info->second.NameLookupTableData;
+ for (ASTDeclContextNameLookupTable::key_iterator
+ I = LookupTable->key_begin(),
+ E = LookupTable->key_end(); I != E; ++I) {
+ DC->lookup(*I); // Force loading of the visible decls for the decl name.
+ }
+
+ return false;
+ }
+ };
+}
+
+void ASTReader::completeVisibleDeclsMap(DeclContext *DC) {
+ if (!DC->hasExternalVisibleStorage())
+ return;
+ DeclContextVisibleDeclMapVisitor Visitor(*this, DC);
+ ModuleMgr.visit(&DeclContextVisibleDeclMapVisitor::visit, &Visitor);
+}
+
/// \brief Under non-PCH compilation the consumer receives the objc methods
/// before receiving the implementation, and codegen depends on this.
/// We simulate this by deserializing and passing to consumer the methods of the
@@ -4238,13 +5037,17 @@ void ASTReader::PassInterestingDeclsToConsumer() {
Decl *D = InterestingDecls.front();
InterestingDecls.pop_front();
- if (ObjCImplDecl *ImplD = dyn_cast<ObjCImplDecl>(D))
- PassObjCImplDeclToConsumer(ImplD, Consumer);
- else
- Consumer->HandleInterestingDecl(DeclGroupRef(D));
+ PassInterestingDeclToConsumer(D);
}
}
+void ASTReader::PassInterestingDeclToConsumer(Decl *D) {
+ if (ObjCImplDecl *ImplD = dyn_cast<ObjCImplDecl>(D))
+ PassObjCImplDeclToConsumer(ImplD, Consumer);
+ else
+ Consumer->HandleInterestingDecl(DeclGroupRef(D));
+}
+
void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
this->Consumer = Consumer;
@@ -4331,15 +5134,15 @@ void ASTReader::PrintStats() {
std::fprintf(stderr, "\n");
}
-template<typename Key, typename Module, unsigned InitialCapacity>
+template<typename Key, typename ModuleFile, unsigned InitialCapacity>
static void
dumpModuleIDMap(StringRef Name,
- const ContinuousRangeMap<Key, Module *,
+ const ContinuousRangeMap<Key, ModuleFile *,
InitialCapacity> &Map) {
if (Map.begin() == Map.end())
return;
- typedef ContinuousRangeMap<Key, Module *, InitialCapacity> MapType;
+ typedef ContinuousRangeMap<Key, ModuleFile *, InitialCapacity> MapType;
llvm::errs() << Name << ":\n";
for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
I != IEnd; ++I) {
@@ -4349,12 +5152,13 @@ dumpModuleIDMap(StringRef Name,
}
void ASTReader::dump() {
- llvm::errs() << "*** PCH/Module Remappings:\n";
+ llvm::errs() << "*** PCH/ModuleFile Remappings:\n";
dumpModuleIDMap("Global bit offset map", GlobalBitOffsetsMap);
dumpModuleIDMap("Global source location entry map", GlobalSLocEntryMap);
dumpModuleIDMap("Global type map", GlobalTypeMap);
dumpModuleIDMap("Global declaration map", GlobalDeclMap);
dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
+ dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
dumpModuleIDMap("Global selector map", GlobalSelectorMap);
dumpModuleIDMap("Global preprocessed entity map",
GlobalPreprocessedEntityMap);
@@ -4392,10 +5196,8 @@ void ASTReader::InitializeSema(Sema &S) {
// Makes sure any declarations that were deserialized "too early"
// still get added to the identifier's declaration chains.
for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
- if (SemaObj->TUScope)
- SemaObj->TUScope->AddDecl(PreloadedDecls[I]);
-
- SemaObj->IdResolver.AddDecl(PreloadedDecls[I]);
+ SemaObj->pushExternalDeclIntoScope(PreloadedDecls[I],
+ PreloadedDecls[I]->getDeclName());
}
PreloadedDecls.clear();
@@ -4424,9 +5226,12 @@ void ASTReader::InitializeSema(Sema &S) {
}
IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
- IdentifierLookupVisitor Visitor(StringRef(NameStart, NameEnd - NameStart));
+ IdentifierLookupVisitor Visitor(StringRef(NameStart, NameEnd - NameStart),
+ /*PriorGeneration=*/0);
ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
- return Visitor.getIdentifierInfo();
+ IdentifierInfo *II = Visitor.getIdentifierInfo();
+ markIdentifierUpToDate(II);
+ return II;
}
namespace clang {
@@ -4491,45 +5296,27 @@ IdentifierIterator *ASTReader::getIdentifiers() const {
namespace clang { namespace serialization {
class ReadMethodPoolVisitor {
ASTReader &Reader;
- Selector Sel;
+ Selector Sel;
+ unsigned PriorGeneration;
llvm::SmallVector<ObjCMethodDecl *, 4> InstanceMethods;
llvm::SmallVector<ObjCMethodDecl *, 4> FactoryMethods;
- /// \brief Build an ObjCMethodList from a vector of Objective-C method
- /// declarations.
- ObjCMethodList
- buildObjCMethodList(const SmallVectorImpl<ObjCMethodDecl *> &Vec) const
- {
- ObjCMethodList List;
- ObjCMethodList *Prev = 0;
- for (unsigned I = 0, N = Vec.size(); I != N; ++I) {
- if (!List.Method) {
- // This is the first method, which is the easy case.
- List.Method = Vec[I];
- Prev = &List;
- continue;
- }
-
- ObjCMethodList *Mem =
- Reader.getSema()->BumpAlloc.Allocate<ObjCMethodList>();
- Prev->Next = new (Mem) ObjCMethodList(Vec[I], 0);
- Prev = Prev->Next;
- }
-
- return List;
- }
-
public:
- ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel)
- : Reader(Reader), Sel(Sel) { }
+ ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel,
+ unsigned PriorGeneration)
+ : Reader(Reader), Sel(Sel), PriorGeneration(PriorGeneration) { }
- static bool visit(Module &M, void *UserData) {
+ static bool visit(ModuleFile &M, void *UserData) {
ReadMethodPoolVisitor *This
= static_cast<ReadMethodPoolVisitor *>(UserData);
if (!M.SelectorLookupTable)
return false;
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= This->PriorGeneration)
+ return true;
+
ASTSelectorLookupTable *PoolTable
= (ASTSelectorLookupTable*)M.SelectorLookupTable;
ASTSelectorLookupTable::iterator Pos = PoolTable->find(This->Sel);
@@ -4552,28 +5339,50 @@ namespace clang { namespace serialization {
}
/// \brief Retrieve the instance methods found by this visitor.
- ObjCMethodList getInstanceMethods() const {
- return buildObjCMethodList(InstanceMethods);
+ ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
+ return InstanceMethods;
}
/// \brief Retrieve the instance methods found by this visitor.
- ObjCMethodList getFactoryMethods() const {
- return buildObjCMethodList(FactoryMethods);
+ ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
+ return FactoryMethods;
}
};
} } // end namespace clang::serialization
-std::pair<ObjCMethodList, ObjCMethodList>
-ASTReader::ReadMethodPool(Selector Sel) {
- ReadMethodPoolVisitor Visitor(*this, Sel);
+/// \brief Add the given set of methods to the method list.
+static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
+ ObjCMethodList &List) {
+ for (unsigned I = 0, N = Methods.size(); I != N; ++I) {
+ S.addMethodToGlobalList(&List, Methods[I]);
+ }
+}
+
+void ASTReader::ReadMethodPool(Selector Sel) {
+ // Get the selector generation and update it to the current generation.
+ unsigned &Generation = SelectorGeneration[Sel];
+ unsigned PriorGeneration = Generation;
+ Generation = CurrentGeneration;
+
+ // Search for methods defined with this selector.
+ ReadMethodPoolVisitor Visitor(*this, Sel, PriorGeneration);
ModuleMgr.visit(&ReadMethodPoolVisitor::visit, &Visitor);
- std::pair<ObjCMethodList, ObjCMethodList> Result;
- Result.first = Visitor.getInstanceMethods();
- Result.second = Visitor.getFactoryMethods();
- if (!Result.first.Method && !Result.second.Method)
+ if (Visitor.getInstanceMethods().empty() &&
+ Visitor.getFactoryMethods().empty()) {
++NumMethodPoolMisses;
- return Result;
+ return;
+ }
+
+ if (!getSema())
+ return;
+
+ Sema &S = *getSema();
+ Sema::GlobalMethodPool::iterator Pos
+ = S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
+
+ addMethodsToPool(S, Visitor.getInstanceMethods(), Pos->second.first);
+ addMethodsToPool(S, Visitor.getFactoryMethods(), Pos->second.second);
}
void ASTReader::ReadKnownNamespaces(
@@ -4755,13 +5564,10 @@ ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I) {
NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
if (SemaObj) {
- if (SemaObj->TUScope) {
- // Introduce this declaration into the translation-unit scope
- // and add it to the declaration chain for this identifier, so
- // that (unqualified) name lookup will find it.
- SemaObj->TUScope->AddDecl(D);
- }
- SemaObj->IdResolver.AddDeclToIdentifierChain(II, D);
+ // Introduce this declaration into the translation-unit scope
+ // and add it to the declaration chain for this identifier, so
+ // that (unqualified) name lookup will find it.
+ SemaObj->pushExternalDeclIntoScope(D, II);
} else {
// Queue this declaration so that it will be added to the
// translation unit scope and identifier's declaration chain
@@ -4784,7 +5590,7 @@ IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
if (!IdentifiersLoaded[ID]) {
GlobalIdentifierMapType::iterator I = GlobalIdentifierMap.find(ID + 1);
assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
- Module *M = I->second;
+ ModuleFile *M = I->second;
unsigned Index = ID - M->BaseIdentifierID;
const char *Str = M->IdentifierTableData + M->IdentifierOffsets[Index];
@@ -4805,11 +5611,11 @@ IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
return IdentifiersLoaded[ID];
}
-IdentifierInfo *ASTReader::getLocalIdentifier(Module &M, unsigned LocalID) {
+IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, unsigned LocalID) {
return DecodeIdentifierInfo(getGlobalIdentifierID(M, LocalID));
}
-IdentifierID ASTReader::getGlobalIdentifierID(Module &M, unsigned LocalID) {
+IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
if (LocalID < NUM_PREDEF_IDENT_IDS)
return LocalID;
@@ -4825,7 +5631,34 @@ bool ASTReader::ReadSLocEntry(int ID) {
return ReadSLocEntryRecord(ID) != Success;
}
-Selector ASTReader::getLocalSelector(Module &M, unsigned LocalID) {
+serialization::SubmoduleID
+ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_SUBMODULE_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS);
+ assert(I != M.SubmoduleRemap.end()
+ && "Invalid index into identifier index remap");
+
+ return LocalID + I->second;
+}
+
+Module *ASTReader::getSubmodule(SubmoduleID GlobalID) {
+ if (GlobalID < NUM_PREDEF_SUBMODULE_IDS) {
+ assert(GlobalID == 0 && "Unhandled global submodule ID");
+ return 0;
+ }
+
+ if (GlobalID > SubmodulesLoaded.size()) {
+ Error("submodule ID out of range in AST file");
+ return 0;
+ }
+
+ return SubmodulesLoaded[GlobalID - NUM_PREDEF_SUBMODULE_IDS];
+}
+
+Selector ASTReader::getLocalSelector(ModuleFile &M, unsigned LocalID) {
return DecodeSelector(getGlobalSelectorID(M, LocalID));
}
@@ -4842,7 +5675,7 @@ Selector ASTReader::DecodeSelector(serialization::SelectorID ID) {
// Load this selector from the selector table.
GlobalSelectorMapType::iterator I = GlobalSelectorMap.find(ID);
assert(I != GlobalSelectorMap.end() && "Corrupted global selector map");
- Module &M = *I->second;
+ ModuleFile &M = *I->second;
ASTSelectorLookupTrait Trait(*this, M);
unsigned Idx = ID - M.BaseSelectorID - NUM_PREDEF_SELECTOR_IDS;
SelectorsLoaded[ID - 1] =
@@ -4864,7 +5697,7 @@ uint32_t ASTReader::GetNumExternalSelectors() {
}
serialization::SelectorID
-ASTReader::getGlobalSelectorID(Module &M, unsigned LocalID) const {
+ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
if (LocalID < NUM_PREDEF_SELECTOR_IDS)
return LocalID;
@@ -4877,7 +5710,7 @@ ASTReader::getGlobalSelectorID(Module &M, unsigned LocalID) const {
}
DeclarationName
-ASTReader::ReadDeclarationName(Module &F,
+ASTReader::ReadDeclarationName(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
switch (Kind) {
@@ -4913,11 +5746,10 @@ ASTReader::ReadDeclarationName(Module &F,
return DeclarationName::getUsingDirectiveName();
}
- // Required to silence GCC warning
- return DeclarationName();
+ llvm_unreachable("Invalid NameKind!");
}
-void ASTReader::ReadDeclarationNameLoc(Module &F,
+void ASTReader::ReadDeclarationNameLoc(ModuleFile &F,
DeclarationNameLoc &DNLoc,
DeclarationName Name,
const RecordData &Record, unsigned &Idx) {
@@ -4949,7 +5781,7 @@ void ASTReader::ReadDeclarationNameLoc(Module &F,
}
}
-void ASTReader::ReadDeclarationNameInfo(Module &F,
+void ASTReader::ReadDeclarationNameInfo(ModuleFile &F,
DeclarationNameInfo &NameInfo,
const RecordData &Record, unsigned &Idx) {
NameInfo.setName(ReadDeclarationName(F, Record, Idx));
@@ -4959,7 +5791,7 @@ void ASTReader::ReadDeclarationNameInfo(Module &F,
NameInfo.setInfo(DNLoc);
}
-void ASTReader::ReadQualifierInfo(Module &F, QualifierInfo &Info,
+void ASTReader::ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
const RecordData &Record, unsigned &Idx) {
Info.QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
unsigned NumTPLists = Record[Idx++];
@@ -4972,7 +5804,7 @@ void ASTReader::ReadQualifierInfo(Module &F, QualifierInfo &Info,
}
TemplateName
-ASTReader::ReadTemplateName(Module &F, const RecordData &Record,
+ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
switch (Kind) {
@@ -5031,7 +5863,7 @@ ASTReader::ReadTemplateName(Module &F, const RecordData &Record,
}
TemplateArgument
-ASTReader::ReadTemplateArgument(Module &F,
+ASTReader::ReadTemplateArgument(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
switch (Kind) {
@@ -5070,7 +5902,7 @@ ASTReader::ReadTemplateArgument(Module &F,
}
TemplateParameterList *
-ASTReader::ReadTemplateParameterList(Module &F,
+ASTReader::ReadTemplateParameterList(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
SourceLocation TemplateLoc = ReadSourceLocation(F, Record, Idx);
SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Idx);
@@ -5091,7 +5923,7 @@ ASTReader::ReadTemplateParameterList(Module &F,
void
ASTReader::
ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
- Module &F, const RecordData &Record,
+ ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
unsigned NumTemplateArgs = Record[Idx++];
TemplArgs.reserve(NumTemplateArgs);
@@ -5100,7 +5932,7 @@ ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
}
/// \brief Read a UnresolvedSet structure.
-void ASTReader::ReadUnresolvedSet(Module &F, UnresolvedSetImpl &Set,
+void ASTReader::ReadUnresolvedSet(ModuleFile &F, UnresolvedSetImpl &Set,
const RecordData &Record, unsigned &Idx) {
unsigned NumDecls = Record[Idx++];
while (NumDecls--) {
@@ -5111,7 +5943,7 @@ void ASTReader::ReadUnresolvedSet(Module &F, UnresolvedSetImpl &Set,
}
CXXBaseSpecifier
-ASTReader::ReadCXXBaseSpecifier(Module &F,
+ASTReader::ReadCXXBaseSpecifier(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
bool isVirtual = static_cast<bool>(Record[Idx++]);
bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
@@ -5127,7 +5959,7 @@ ASTReader::ReadCXXBaseSpecifier(Module &F,
}
std::pair<CXXCtorInitializer **, unsigned>
-ASTReader::ReadCXXCtorInitializers(Module &F, const RecordData &Record,
+ASTReader::ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
CXXCtorInitializer **CtorInitializers = 0;
unsigned NumInitializers = Record[Idx++];
@@ -5135,21 +5967,20 @@ ASTReader::ReadCXXCtorInitializers(Module &F, const RecordData &Record,
CtorInitializers
= new (Context) CXXCtorInitializer*[NumInitializers];
for (unsigned i=0; i != NumInitializers; ++i) {
- TypeSourceInfo *BaseClassInfo = 0;
+ TypeSourceInfo *TInfo = 0;
bool IsBaseVirtual = false;
FieldDecl *Member = 0;
IndirectFieldDecl *IndirectMember = 0;
- CXXConstructorDecl *Target = 0;
CtorInitializerType Type = (CtorInitializerType)Record[Idx++];
switch (Type) {
- case CTOR_INITIALIZER_BASE:
- BaseClassInfo = GetTypeSourceInfo(F, Record, Idx);
+ case CTOR_INITIALIZER_BASE:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
IsBaseVirtual = Record[Idx++];
break;
-
- case CTOR_INITIALIZER_DELEGATING:
- Target = ReadDeclAs<CXXConstructorDecl>(F, Record, Idx);
+
+ case CTOR_INITIALIZER_DELEGATING:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
break;
case CTOR_INITIALIZER_MEMBER:
@@ -5179,12 +6010,12 @@ ASTReader::ReadCXXCtorInitializers(Module &F, const RecordData &Record,
CXXCtorInitializer *BOMInit;
if (Type == CTOR_INITIALIZER_BASE) {
- BOMInit = new (Context) CXXCtorInitializer(Context, BaseClassInfo, IsBaseVirtual,
+ BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, IsBaseVirtual,
LParenLoc, Init, RParenLoc,
MemberOrEllipsisLoc);
} else if (Type == CTOR_INITIALIZER_DELEGATING) {
- BOMInit = new (Context) CXXCtorInitializer(Context, MemberOrEllipsisLoc, LParenLoc,
- Target, Init, RParenLoc);
+ BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, LParenLoc,
+ Init, RParenLoc);
} else if (IsWritten) {
if (Member)
BOMInit = new (Context) CXXCtorInitializer(Context, Member, MemberOrEllipsisLoc,
@@ -5209,7 +6040,7 @@ ASTReader::ReadCXXCtorInitializers(Module &F, const RecordData &Record,
}
NestedNameSpecifier *
-ASTReader::ReadNestedNameSpecifier(Module &F,
+ASTReader::ReadNestedNameSpecifier(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
unsigned N = Record[Idx++];
NestedNameSpecifier *NNS = 0, *Prev = 0;
@@ -5258,7 +6089,7 @@ ASTReader::ReadNestedNameSpecifier(Module &F,
}
NestedNameSpecifierLoc
-ASTReader::ReadNestedNameSpecifierLoc(Module &F, const RecordData &Record,
+ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
unsigned N = Record[Idx++];
NestedNameSpecifierLocBuilder Builder;
@@ -5314,7 +6145,7 @@ ASTReader::ReadNestedNameSpecifierLoc(Module &F, const RecordData &Record,
}
SourceRange
-ASTReader::ReadSourceRange(Module &F, const RecordData &Record,
+ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
SourceLocation beg = ReadSourceLocation(F, Record, Idx);
SourceLocation end = ReadSourceLocation(F, Record, Idx);
@@ -5361,7 +6192,7 @@ VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
return VersionTuple(Major, Minor - 1, Subminor - 1);
}
-CXXTemporary *ASTReader::ReadCXXTemporary(Module &F,
+CXXTemporary *ASTReader::ReadCXXTemporary(ModuleFile &F,
const RecordData &Record,
unsigned &Idx) {
CXXDestructorDecl *Decl = ReadDeclAs<CXXDestructorDecl>(F, Record, Idx);
@@ -5399,10 +6230,8 @@ void ASTReader::ClearSwitchCaseIDs() {
SwitchCaseStmts.clear();
}
-void ASTReader::FinishedDeserializing() {
- assert(NumCurrentElementsDeserializing &&
- "FinishedDeserializing not paired with StartedDeserializing");
- if (NumCurrentElementsDeserializing == 1) {
+void ASTReader::finishPendingActions() {
+ while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
while (!PendingIdentifierInfos.empty()) {
@@ -5410,35 +6239,110 @@ void ASTReader::FinishedDeserializing() {
PendingIdentifierInfos.front().DeclIDs, true);
PendingIdentifierInfos.pop_front();
}
-
- // Ready to load previous declarations of Decls that were delayed.
- while (!PendingPreviousDecls.empty()) {
- loadAndAttachPreviousDecl(PendingPreviousDecls.front().first,
- PendingPreviousDecls.front().second);
- PendingPreviousDecls.pop_front();
+
+ // Load pending declaration chains.
+ for (unsigned I = 0; I != PendingDeclChains.size(); ++I) {
+ loadPendingDeclChain(PendingDeclChains[I]);
+ PendingDeclChainsKnown.erase(PendingDeclChains[I]);
}
+ PendingDeclChains.clear();
+ }
+
+ // If we deserialized any C++ or Objective-C class definitions, any
+ // Objective-C protocol definitions, or any redeclarable templates, make sure
+ // that all redeclarations point to the definitions. Note that this can only
+ // happen now, after the redeclaration chains have been fully wired.
+ for (llvm::SmallPtrSet<Decl *, 4>::iterator D = PendingDefinitions.begin(),
+ DEnd = PendingDefinitions.end();
+ D != DEnd; ++D) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(*D)) {
+ if (const TagType *TagT = dyn_cast<TagType>(TD->TypeForDecl)) {
+ // Make sure that the TagType points at the definition.
+ const_cast<TagType*>(TagT)->decl = TD;
+ }
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(*D)) {
+ for (CXXRecordDecl::redecl_iterator R = RD->redecls_begin(),
+ REnd = RD->redecls_end();
+ R != REnd; ++R)
+ cast<CXXRecordDecl>(*R)->DefinitionData = RD->DefinitionData;
+
+ }
- // We are not in recursive loading, so it's safe to pass the "interesting"
- // decls to the consumer.
- if (Consumer)
- PassInterestingDeclsToConsumer();
+ continue;
+ }
+
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(*D)) {
+ // Make sure that the ObjCInterfaceType points at the definition.
+ const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl))
+ ->Decl = ID;
+
+ for (ObjCInterfaceDecl::redecl_iterator R = ID->redecls_begin(),
+ REnd = ID->redecls_end();
+ R != REnd; ++R)
+ R->Data = ID->Data;
+
+ continue;
+ }
+
+ if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(*D)) {
+ for (ObjCProtocolDecl::redecl_iterator R = PD->redecls_begin(),
+ REnd = PD->redecls_end();
+ R != REnd; ++R)
+ R->Data = PD->Data;
+
+ continue;
+ }
+
+ RedeclarableTemplateDecl *RTD
+ = cast<RedeclarableTemplateDecl>(*D)->getCanonicalDecl();
+ for (RedeclarableTemplateDecl::redecl_iterator R = RTD->redecls_begin(),
+ REnd = RTD->redecls_end();
+ R != REnd; ++R)
+ R->Common = RTD->Common;
+ }
+ PendingDefinitions.clear();
+}
- assert(PendingForwardRefs.size() == 0 &&
- "Some forward refs did not get linked to the definition!");
+void ASTReader::FinishedDeserializing() {
+ assert(NumCurrentElementsDeserializing &&
+ "FinishedDeserializing not paired with StartedDeserializing");
+ if (NumCurrentElementsDeserializing == 1) {
+ // We decrease NumCurrentElementsDeserializing only after pending actions
+ // are finished, to avoid recursively re-calling finishPendingActions().
+ finishPendingActions();
}
--NumCurrentElementsDeserializing;
+
+ if (NumCurrentElementsDeserializing == 0 &&
+ Consumer && !PassingDeclsToConsumer) {
+ // Guard variable to avoid recursively redoing the process of passing
+ // decls to consumer.
+ SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
+ true);
+
+ while (!InterestingDecls.empty()) {
+ // We are not in recursive loading, so it's safe to pass the "interesting"
+ // decls to the consumer.
+ Decl *D = InterestingDecls.front();
+ InterestingDecls.pop_front();
+ PassInterestingDeclToConsumer(D);
+ }
+ }
}
ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
StringRef isysroot, bool DisableValidation,
- bool DisableStatCache)
+ bool DisableStatCache, bool AllowASTWithCompilerErrors)
: Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
Diags(PP.getDiagnostics()), SemaObj(0), PP(PP), Context(Context),
Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()),
RelocatablePCH(false), isysroot(isysroot),
DisableValidation(DisableValidation),
- DisableStatCache(DisableStatCache), NumStatHits(0), NumStatMisses(0),
+ DisableStatCache(DisableStatCache),
+ AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
+ CurrentGeneration(0), NumStatHits(0), NumStatMisses(0),
NumSLocEntriesRead(0), TotalNumSLocEntries(0),
NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
@@ -5446,6 +6350,7 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0),
+ PassingDeclsToConsumer(false),
NumCXXBaseSpecifiersLoaded(0)
{
SourceMgr.setExternalSLocEntrySource(this);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
index 6cc3f0a..5db5f92 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -14,6 +14,8 @@
#include "ASTCommon.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -32,9 +34,10 @@ using namespace clang::serialization;
namespace clang {
class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
llvm::BitstreamCursor &Cursor;
const DeclID ThisDeclID;
+ const unsigned RawLocation;
typedef ASTReader::RecordData RecordData;
const RecordData &Record;
unsigned &Idx;
@@ -85,24 +88,123 @@ namespace clang {
Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
}
+ serialization::SubmoduleID readSubmoduleID(const RecordData &R,
+ unsigned &I) {
+ if (I >= R.size())
+ return 0;
+
+ return Reader.getGlobalSubmoduleID(F, R[I++]);
+ }
+
+ Module *readModule(const RecordData &R, unsigned &I) {
+ return Reader.getSubmodule(readSubmoduleID(R, I));
+ }
+
void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
const RecordData &R, unsigned &I);
- void InitializeCXXDefinitionData(CXXRecordDecl *D,
- CXXRecordDecl *DefinitionDecl,
- const RecordData &Record, unsigned &Idx);
+ /// \brief RAII class used to capture the first ID within a redeclaration
+ /// chain and to introduce it into the list of pending redeclaration chains
+ /// on destruction.
+ ///
+ /// The caller can choose not to introduce this ID into the redeclaration
+ /// chain by calling \c suppress().
+ class RedeclarableResult {
+ ASTReader &Reader;
+ GlobalDeclID FirstID;
+ mutable bool Owning;
+
+ RedeclarableResult &operator=(RedeclarableResult&); // DO NOT IMPLEMENT
+
+ public:
+ RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID)
+ : Reader(Reader), FirstID(FirstID), Owning(true) { }
+
+ RedeclarableResult(const RedeclarableResult &Other)
+ : Reader(Other.Reader), FirstID(Other.FirstID), Owning(Other.Owning)
+ {
+ Other.Owning = false;
+ }
+
+ ~RedeclarableResult() {
+ // FIXME: We want to suppress this when the declaration is local to
+ // a function, since there's no reason to search other AST files
+ // for redeclarations (they can't exist). However, this is hard to
+ // do locally because the declaration hasn't necessarily loaded its
+ // declaration context yet. Also, local externs still have the function
+ // as their (semantic) declaration context, which is wrong and would
+ // break this optimize.
+
+ if (FirstID && Owning && Reader.PendingDeclChainsKnown.insert(FirstID))
+ Reader.PendingDeclChains.push_back(FirstID);
+ }
+
+ /// \brief Retrieve the first ID.
+ GlobalDeclID getFirstID() const { return FirstID; }
+
+ /// \brief Do not introduce this declaration ID into the set of pending
+ /// declaration chains.
+ void suppress() {
+ Owning = false;
+ }
+ };
+
+ /// \brief Class used to capture the result of searching for an existing
+ /// declaration of a specific kind and name, along with the ability
+ /// to update the place where this result was found (the declaration
+ /// chain hanging off an identifier or the DeclContext we searched in)
+ /// if requested.
+ class FindExistingResult {
+ ASTReader &Reader;
+ NamedDecl *New;
+ NamedDecl *Existing;
+ mutable bool AddResult;
+
+ FindExistingResult &operator=(FindExistingResult&); // DO NOT IMPLEMENT
+
+ public:
+ FindExistingResult(ASTReader &Reader)
+ : Reader(Reader), New(0), Existing(0), AddResult(false) { }
+
+ FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing)
+ : Reader(Reader), New(New), Existing(Existing), AddResult(true) { }
+
+ FindExistingResult(const FindExistingResult &Other)
+ : Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
+ AddResult(Other.AddResult)
+ {
+ Other.AddResult = false;
+ }
+
+ ~FindExistingResult();
+
+ /// \brief Suppress the addition of this result into the known set of
+ /// names.
+ void suppress() { AddResult = false; }
+
+ operator NamedDecl*() const { return Existing; }
+
+ template<typename T>
+ operator T*() const { return dyn_cast_or_null<T>(Existing); }
+ };
+
+ FindExistingResult findExisting(NamedDecl *D);
+
public:
- ASTDeclReader(ASTReader &Reader, Module &F,
+ ASTDeclReader(ASTReader &Reader, ModuleFile &F,
llvm::BitstreamCursor &Cursor, DeclID thisDeclID,
+ unsigned RawLocation,
const RecordData &Record, unsigned &Idx)
: Reader(Reader), F(F), Cursor(Cursor), ThisDeclID(thisDeclID),
- Record(Record), Idx(Idx), TypeIDForTypeDecl(0) { }
+ RawLocation(RawLocation), Record(Record), Idx(Idx),
+ TypeIDForTypeDecl(0) { }
static void attachPreviousDecl(Decl *D, Decl *previous);
+ static void attachLatestDecl(Decl *D, Decl *latest);
void Visit(Decl *D);
- void UpdateDecl(Decl *D, Module &Module,
+ void UpdateDecl(Decl *D, ModuleFile &ModuleFile,
const RecordData &Record);
static void setNextObjCCategory(ObjCCategoryDecl *Cat,
@@ -118,6 +220,7 @@ namespace clang {
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *TD);
+ void VisitTypedefNameDecl(TypedefNameDecl *TD);
void VisitTypedefDecl(TypedefDecl *TD);
void VisitTypeAliasDecl(TypeAliasDecl *TD);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
@@ -148,7 +251,7 @@ namespace clang {
void VisitParmVarDecl(ParmVarDecl *PD);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
- void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
+ RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
@@ -157,6 +260,7 @@ namespace clang {
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
void VisitFriendDecl(FriendDecl *D);
void VisitFriendTemplateDecl(FriendTemplateDecl *D);
@@ -164,8 +268,13 @@ namespace clang {
void VisitBlockDecl(BlockDecl *BD);
std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC);
- template <typename T> void VisitRedeclarable(Redeclarable<T> *D);
+
+ template<typename T>
+ RedeclarableResult VisitRedeclarable(Redeclarable<T> *D);
+ template<typename T>
+ void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl);
+
// FIXME: Reorder according to DeclNodes.td?
void VisitObjCMethodDecl(ObjCMethodDecl *D);
void VisitObjCContainerDecl(ObjCContainerDecl *D);
@@ -173,8 +282,6 @@ namespace clang {
void VisitObjCIvarDecl(ObjCIvarDecl *D);
void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
- void VisitObjCClassDecl(ObjCClassDecl *D);
- void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
void VisitObjCImplDecl(ObjCImplDecl *D);
void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
@@ -207,6 +314,9 @@ void ASTDeclReader::Visit(Decl *D) {
if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ // if we have a fully initialized TypeDecl, we can safely read its type now.
+ ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// FunctionDecl's body was written last after all other Stmts/Exprs.
if (Record[Idx++])
@@ -214,12 +324,11 @@ void ASTDeclReader::Visit(Decl *D) {
} else if (D->isTemplateParameter()) {
// If we have a fully initialized template parameter, we can now
// set its DeclContext.
- D->setDeclContext(
- cast_or_null<DeclContext>(
- Reader.GetDecl(DeclContextIDForTemplateParmDecl)));
- D->setLexicalDeclContext(
- cast_or_null<DeclContext>(
- Reader.GetDecl(LexicalDeclContextIDForTemplateParmDecl)));
+ DeclContext *SemaDC = cast<DeclContext>(
+ Reader.GetDecl(DeclContextIDForTemplateParmDecl));
+ DeclContext *LexicalDC = cast<DeclContext>(
+ Reader.GetDecl(LexicalDeclContextIDForTemplateParmDecl));
+ D->setDeclContextsImpl(SemaDC, LexicalDC, Reader.getContext());
}
}
@@ -233,22 +342,50 @@ void ASTDeclReader::VisitDecl(Decl *D) {
LexicalDeclContextIDForTemplateParmDecl = ReadDeclID(Record, Idx);
D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
} else {
- D->setDeclContext(ReadDeclAs<DeclContext>(Record, Idx));
- D->setLexicalDeclContext(ReadDeclAs<DeclContext>(Record, Idx));
+ DeclContext *SemaDC = ReadDeclAs<DeclContext>(Record, Idx);
+ DeclContext *LexicalDC = ReadDeclAs<DeclContext>(Record, Idx);
+ // Avoid calling setLexicalDeclContext() directly because it uses
+ // Decl::getASTContext() internally which is unsafe during derialization.
+ D->setDeclContextsImpl(SemaDC, LexicalDC, Reader.getContext());
}
- D->setLocation(ReadSourceLocation(Record, Idx));
+ D->setLocation(Reader.ReadSourceLocation(F, RawLocation));
D->setInvalidDecl(Record[Idx++]);
if (Record[Idx++]) { // hasAttrs
AttrVec Attrs;
Reader.ReadAttributes(F, Attrs, Record, Idx);
- D->setAttrs(Attrs);
+ // Avoid calling setAttrs() directly because it uses Decl::getASTContext()
+ // internally which is unsafe during derialization.
+ D->setAttrsImpl(Attrs, Reader.getContext());
}
D->setImplicit(Record[Idx++]);
D->setUsed(Record[Idx++]);
D->setReferenced(Record[Idx++]);
+ D->setTopLevelDeclInObjCContainer(Record[Idx++]);
D->setAccess((AccessSpecifier)Record[Idx++]);
D->FromASTFile = true;
- D->ModulePrivate = Record[Idx++];
+ D->setModulePrivate(Record[Idx++]);
+ D->Hidden = D->isModulePrivate();
+
+ // Determine whether this declaration is part of a (sub)module. If so, it
+ // may not yet be visible.
+ if (unsigned SubmoduleID = readSubmoduleID(Record, Idx)) {
+ // Store the owning submodule ID in the declaration.
+ D->setOwningModuleID(SubmoduleID);
+
+ // Module-private declarations are never visible, so there is no work to do.
+ if (!D->isModulePrivate()) {
+ if (Module *Owner = Reader.getSubmodule(SubmoduleID)) {
+ if (Owner->NameVisibility != Module::AllVisible) {
+ // The owning module is not visible. Mark this declaration as hidden.
+ D->Hidden = true;
+
+ // Note that this declaration was hidden because its owning module is
+ // not yet visible.
+ Reader.HiddenNamesMap[Owner].push_back(D);
+ }
+ }
+ }
+ }
}
void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
@@ -267,31 +404,41 @@ void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
}
-void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
+void ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
VisitTypeDecl(TD);
+
TD->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ mergeRedeclarable(TD, Redecl);
+}
+
+void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
+ VisitTypedefNameDecl(TD);
}
void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) {
- VisitTypeDecl(TD);
- TD->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ VisitTypedefNameDecl(TD);
}
void ASTDeclReader::VisitTagDecl(TagDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
VisitTypeDecl(TD);
- VisitRedeclarable(TD);
+
TD->IdentifierNamespace = Record[Idx++];
TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
TD->setCompleteDefinition(Record[Idx++]);
TD->setEmbeddedInDeclarator(Record[Idx++]);
TD->setFreeStanding(Record[Idx++]);
TD->setRBraceLoc(ReadSourceLocation(Record, Idx));
+
if (Record[Idx++]) { // hasExtInfo
TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
ReadQualifierInfo(*Info, Record, Idx);
TD->TypedefNameDeclOrQualifier = Info;
} else
TD->setTypedefNameForAnonDecl(ReadDeclAs<TypedefNameDecl>(Record, Idx));
+
+ mergeRedeclarable(TD, Redecl);
}
void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
@@ -306,7 +453,13 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
ED->IsScoped = Record[Idx++];
ED->IsScopedUsingClassTag = Record[Idx++];
ED->IsFixed = Record[Idx++];
- ED->setInstantiationOfMemberEnum(ReadDeclAs<EnumDecl>(Record, Idx));
+
+ if (EnumDecl *InstED = ReadDeclAs<EnumDecl>(Record, Idx)) {
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK);
+ ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ }
}
void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
@@ -340,14 +493,34 @@ void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
}
void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
+ RedeclarableResult Redecl = VisitRedeclarable(FD);
VisitDeclaratorDecl(FD);
- VisitRedeclarable(FD);
ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName(), Record, Idx);
FD->IdentifierNamespace = Record[Idx++];
+
+ // FunctionDecl's body is handled last at ASTDeclReader::Visit,
+ // after everything else is read.
+
+ FD->SClass = (StorageClass)Record[Idx++];
+ FD->SClassAsWritten = (StorageClass)Record[Idx++];
+ FD->IsInline = Record[Idx++];
+ FD->IsInlineSpecified = Record[Idx++];
+ FD->IsVirtualAsWritten = Record[Idx++];
+ FD->IsPure = Record[Idx++];
+ FD->HasInheritedPrototype = Record[Idx++];
+ FD->HasWrittenPrototype = Record[Idx++];
+ FD->IsDeleted = Record[Idx++];
+ FD->IsTrivial = Record[Idx++];
+ FD->IsDefaulted = Record[Idx++];
+ FD->IsExplicitlyDefaulted = Record[Idx++];
+ FD->HasImplicitReturnZero = Record[Idx++];
+ FD->IsConstexpr = Record[Idx++];
+ FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
+
switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
- default: llvm_unreachable("Unhandled TemplatedKind!");
case FunctionDecl::TK_NonTemplate:
+ mergeRedeclarable(FD, Redecl);
break;
case FunctionDecl::TK_FunctionTemplate:
FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>(Record,
@@ -441,25 +614,6 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
}
}
- // FunctionDecl's body is handled last at ASTDeclReader::Visit,
- // after everything else is read.
-
- FD->SClass = (StorageClass)Record[Idx++];
- FD->SClassAsWritten = (StorageClass)Record[Idx++];
- FD->IsInline = Record[Idx++];
- FD->IsInlineSpecified = Record[Idx++];
- FD->IsVirtualAsWritten = Record[Idx++];
- FD->IsPure = Record[Idx++];
- FD->HasInheritedPrototype = Record[Idx++];
- FD->HasWrittenPrototype = Record[Idx++];
- FD->IsDeleted = Record[Idx++];
- FD->IsTrivial = Record[Idx++];
- FD->IsDefaulted = Record[Idx++];
- FD->IsExplicitlyDefaulted = Record[Idx++];
- FD->HasImplicitReturnZero = Record[Idx++];
- FD->IsConstexpr = Record[Idx++];
- FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
-
// Read in the parameters.
unsigned NumParams = Record[Idx++];
SmallVector<ParmVarDecl *, 16> Params;
@@ -518,46 +672,60 @@ void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
}
void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
+ RedeclarableResult Redecl = VisitRedeclarable(ID);
VisitObjCContainerDecl(ID);
- ID->setTypeForDecl(Reader.readType(F, Record, Idx).getTypePtrOrNull());
- ID->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
-
- // Read the directly referenced protocols and their SourceLocations.
- unsigned NumProtocols = Record[Idx++];
- SmallVector<ObjCProtocolDecl *, 16> Protocols;
- Protocols.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- SmallVector<SourceLocation, 16> ProtoLocs;
- ProtoLocs.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
- ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
- Reader.getContext());
+ TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+ mergeRedeclarable(ID, Redecl);
- // Read the transitive closure of protocols referenced by this class.
- NumProtocols = Record[Idx++];
- Protocols.clear();
- Protocols.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- ID->AllReferencedProtocols.set(Protocols.data(), NumProtocols,
- Reader.getContext());
+ if (Record[Idx++]) {
+ // Read the definition.
+ ID->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ ID->getCanonicalDecl()->Data = ID->Data;
+
+ ObjCInterfaceDecl::DefinitionData &Data = ID->data();
+
+ // Read the superclass.
+ Data.SuperClass = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
+ Data.SuperClassLoc = ReadSourceLocation(Record, Idx);
+
+ Data.EndLoc = ReadSourceLocation(Record, Idx);
+
+ // Read the directly referenced protocols and their SourceLocations.
+ unsigned NumProtocols = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> Protocols;
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
+ Reader.getContext());
- // Read the ivars.
- unsigned NumIvars = Record[Idx++];
- SmallVector<ObjCIvarDecl *, 16> IVars;
- IVars.reserve(NumIvars);
- for (unsigned I = 0; I != NumIvars; ++I)
- IVars.push_back(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
- ID->setCategoryList(ReadDeclAs<ObjCCategoryDecl>(Record, Idx));
+ // Read the transitive closure of protocols referenced by this class.
+ NumProtocols = Record[Idx++];
+ Protocols.clear();
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ ID->data().AllReferencedProtocols.set(Protocols.data(), NumProtocols,
+ Reader.getContext());
- // We will rebuild this list lazily.
- ID->setIvarList(0);
- ID->setForwardDecl(Record[Idx++]);
- ID->setImplicitInterfaceDecl(Record[Idx++]);
- ID->setSuperClassLoc(ReadSourceLocation(Record, Idx));
- ID->setLocEnd(ReadSourceLocation(Record, Idx));
+ // We will rebuild this list lazily.
+ ID->setIvarList(0);
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(ID);
+
+ // Note that we've loaded this Objective-C class.
+ Reader.ObjCClassesLoaded.push_back(ID);
+ } else {
+ ID->Data = ID->getCanonicalDecl()->Data;
+ }
}
void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
@@ -570,50 +738,52 @@ void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
}
void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
+ RedeclarableResult Redecl = VisitRedeclarable(PD);
VisitObjCContainerDecl(PD);
- PD->setForwardDecl(Record[Idx++]);
- PD->setLocEnd(ReadSourceLocation(Record, Idx));
- unsigned NumProtoRefs = Record[Idx++];
- SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
- ProtoRefs.reserve(NumProtoRefs);
- for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- SmallVector<SourceLocation, 16> ProtoLocs;
- ProtoLocs.reserve(NumProtoRefs);
- for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
- PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
- Reader.getContext());
+ mergeRedeclarable(PD, Redecl);
+
+ if (Record[Idx++]) {
+ // Read the definition.
+ PD->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ PD->getCanonicalDecl()->Data = PD->Data;
+
+ unsigned NumProtoRefs = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(PD);
+ } else {
+ PD->Data = PD->getCanonicalDecl()->Data;
+ }
}
void ASTDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) {
VisitFieldDecl(FD);
}
-void ASTDeclReader::VisitObjCClassDecl(ObjCClassDecl *CD) {
- VisitDecl(CD);
- ObjCInterfaceDecl *ClassRef = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
- SourceLocation SLoc = ReadSourceLocation(Record, Idx);
- CD->setClass(Reader.getContext(), ClassRef, SLoc);
-}
-
-void ASTDeclReader::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *FPD) {
- VisitDecl(FPD);
- unsigned NumProtoRefs = Record[Idx++];
- SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
- ProtoRefs.reserve(NumProtoRefs);
- for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- SmallVector<SourceLocation, 16> ProtoLocs;
- ProtoLocs.reserve(NumProtoRefs);
- for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
- FPD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
- Reader.getContext());
-}
-
void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
VisitObjCContainerDecl(CD);
+ CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+
+ // Note that this category has been deserialized. We do this before
+ // deserializing the interface declaration, so that it will consider this
+ /// category.
+ Reader.CategoriesDeserialized.insert(CD);
+
CD->ClassInterface = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
unsigned NumProtoRefs = Record[Idx++];
SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
@@ -626,9 +796,7 @@ void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
Reader.getContext());
- CD->NextClassCategory = ReadDeclAs<ObjCCategoryDecl>(Record, Idx);
CD->setHasSynthBitfield(Record[Idx++]);
- CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
}
void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
@@ -639,6 +807,7 @@ void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setLParenLoc(ReadSourceLocation(Record, Idx));
D->setType(GetTypeSourceInfo(Record, Idx));
// FIXME: stable encoding
D->setPropertyAttributes(
@@ -663,11 +832,14 @@ void ASTDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) {
void ASTDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
VisitObjCImplDecl(D);
D->setIdentifier(Reader.GetIdentifierInfo(F, Record, Idx));
+ D->CategoryNameLoc = ReadSourceLocation(Record, Idx);
}
void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
VisitObjCImplDecl(D);
D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
= Reader.ReadCXXCtorInitializers(F, Record, Idx);
D->setHasSynthBitfield(Record[Idx++]);
@@ -710,18 +882,30 @@ void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
}
void ASTDeclReader::VisitVarDecl(VarDecl *VD) {
+ RedeclarableResult Redecl = VisitRedeclarable(VD);
VisitDeclaratorDecl(VD);
- VisitRedeclarable(VD);
+
VD->VarDeclBits.SClass = (StorageClass)Record[Idx++];
VD->VarDeclBits.SClassAsWritten = (StorageClass)Record[Idx++];
VD->VarDeclBits.ThreadSpecified = Record[Idx++];
- VD->VarDeclBits.HasCXXDirectInit = Record[Idx++];
+ VD->VarDeclBits.InitStyle = Record[Idx++];
VD->VarDeclBits.ExceptionVar = Record[Idx++];
VD->VarDeclBits.NRVOVariable = Record[Idx++];
VD->VarDeclBits.CXXForRangeDecl = Record[Idx++];
VD->VarDeclBits.ARCPseudoStrong = Record[Idx++];
- if (Record[Idx++])
+
+ // Only true variables (not parameters or implicit parameters) can be merged.
+ if (VD->getKind() == Decl::Var)
+ mergeRedeclarable(VD, Redecl);
+
+ if (uint64_t Val = Record[Idx++]) {
VD->setInit(Reader.ReadExpr(F));
+ if (Val > 1) {
+ EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = Val == 3;
+ }
+ }
if (Record[Idx++]) { // HasMemberSpecializationInfo.
VarDecl *Tmpl = ReadDeclAs<VarDecl>(Record, Idx);
@@ -802,17 +986,25 @@ void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->IsInline = Record[Idx++];
+ D->setInline(Record[Idx++]);
D->LocStart = ReadSourceLocation(Record, Idx);
D->RBraceLoc = ReadSourceLocation(Record, Idx);
- D->NextNamespace = Record[Idx++];
-
- bool IsOriginal = Record[Idx++];
- // FIXME: Modules will likely have trouble with pointing directly at
- // the original namespace.
- D->OrigOrAnonNamespace.setInt(IsOriginal);
- D->OrigOrAnonNamespace.setPointer(ReadDeclAs<NamespaceDecl>(Record, Idx));
+ mergeRedeclarable(D, Redecl);
+
+ if (Redecl.getFirstID() == ThisDeclID) {
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>(Record, Idx);
+ if (F.Kind != MK_Module)
+ D->setAnonymousNamespace(Anon);
+ } else {
+ // Link this namespace back to the first declaration, which has already
+ // been deserialized.
+ D->AnonOrFirstNamespaceAndInline.setPointer(D->getFirstDeclaration());
+ }
}
void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
@@ -828,7 +1020,7 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
D->setUsingLocation(ReadSourceLocation(Record, Idx));
D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
- D->FirstUsingShadow = ReadDeclAs<UsingShadowDecl>(Record, Idx);
+ D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>(Record, Idx));
D->setTypeName(Record[Idx++]);
if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>(Record, Idx))
Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
@@ -869,6 +1061,7 @@ void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
void ASTDeclReader::ReadCXXDefinitionData(
struct CXXRecordDecl::DefinitionData &Data,
const RecordData &Record, unsigned &Idx) {
+ // Note: the caller has deserialized the IsLambda bit already.
Data.UserDeclaredConstructor = Record[Idx++];
Data.UserDeclaredCopyConstructor = Record[Idx++];
Data.UserDeclaredMoveConstructor = Record[Idx++];
@@ -886,13 +1079,21 @@ void ASTDeclReader::ReadCXXDefinitionData(
Data.HasProtectedFields = Record[Idx++];
Data.HasPublicFields = Record[Idx++];
Data.HasMutableFields = Record[Idx++];
+ Data.HasOnlyCMembers = Record[Idx++];
Data.HasTrivialDefaultConstructor = Record[Idx++];
Data.HasConstexprNonCopyMoveConstructor = Record[Idx++];
+ Data.DefaultedDefaultConstructorIsConstexpr = Record[Idx++];
+ Data.DefaultedCopyConstructorIsConstexpr = Record[Idx++];
+ Data.DefaultedMoveConstructorIsConstexpr = Record[Idx++];
+ Data.HasConstexprDefaultConstructor = Record[Idx++];
+ Data.HasConstexprCopyConstructor = Record[Idx++];
+ Data.HasConstexprMoveConstructor = Record[Idx++];
Data.HasTrivialCopyConstructor = Record[Idx++];
Data.HasTrivialMoveConstructor = Record[Idx++];
Data.HasTrivialCopyAssignment = Record[Idx++];
Data.HasTrivialMoveAssignment = Record[Idx++];
Data.HasTrivialDestructor = Record[Idx++];
+ Data.HasIrrelevantDestructor = Record[Idx++];
Data.HasNonLiteralTypeFieldsOrBases = Record[Idx++];
Data.ComputedVisibleConversions = Record[Idx++];
Data.UserProvidedDefaultConstructor = Record[Idx++];
@@ -916,38 +1117,26 @@ void ASTDeclReader::ReadCXXDefinitionData(
Reader.ReadUnresolvedSet(F, Data.VisibleConversions, Record, Idx);
assert(Data.Definition && "Data.Definition should be already set!");
Data.FirstFriend = ReadDeclAs<FriendDecl>(Record, Idx);
-}
-
-void ASTDeclReader::InitializeCXXDefinitionData(CXXRecordDecl *D,
- CXXRecordDecl *DefinitionDecl,
- const RecordData &Record,
- unsigned &Idx) {
- ASTContext &C = Reader.getContext();
-
- if (D == DefinitionDecl) {
- D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
- ReadCXXDefinitionData(*D->DefinitionData, Record, Idx);
- // We read the definition info. Check if there are pending forward
- // references that need to point to this DefinitionData pointer.
- ASTReader::PendingForwardRefsMap::iterator
- FindI = Reader.PendingForwardRefs.find(D);
- if (FindI != Reader.PendingForwardRefs.end()) {
- ASTReader::ForwardRefs &Refs = FindI->second;
- for (ASTReader::ForwardRefs::iterator
- I = Refs.begin(), E = Refs.end(); I != E; ++I)
- (*I)->DefinitionData = D->DefinitionData;
-#ifndef NDEBUG
- // We later check whether PendingForwardRefs is empty to make sure all
- // pending references were linked.
- Reader.PendingForwardRefs.erase(D);
-#endif
- }
- } else if (DefinitionDecl) {
- if (DefinitionDecl->DefinitionData) {
- D->DefinitionData = DefinitionDecl->DefinitionData;
- } else {
- // The definition is still initializing.
- Reader.PendingForwardRefs[DefinitionDecl].push_back(D);
+
+ if (Data.IsLambda) {
+ typedef LambdaExpr::Capture Capture;
+ CXXRecordDecl::LambdaDefinitionData &Lambda
+ = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
+ Lambda.Dependent = Record[Idx++];
+ Lambda.NumCaptures = Record[Idx++];
+ Lambda.NumExplicitCaptures = Record[Idx++];
+ Lambda.ManglingNumber = Record[Idx++];
+ Lambda.ContextDecl = ReadDecl(Record, Idx);
+ Lambda.Captures
+ = (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures);
+ Capture *ToCapture = Lambda.Captures;
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ SourceLocation Loc = ReadSourceLocation(Record, Idx);
+ bool IsImplicit = Record[Idx++];
+ LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record[Idx++]);
+ VarDecl *Var = ReadDeclAs<VarDecl>(Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(Record, Idx);
+ *ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
}
}
}
@@ -955,17 +1144,36 @@ void ASTDeclReader::InitializeCXXDefinitionData(CXXRecordDecl *D,
void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
- CXXRecordDecl *DefinitionDecl = ReadDeclAs<CXXRecordDecl>(Record, Idx);
- InitializeCXXDefinitionData(D, DefinitionDecl, Record, Idx);
-
ASTContext &C = Reader.getContext();
+ if (Record[Idx++]) {
+ // Determine whether this is a lambda closure type, so that we can
+ // allocate the appropriate DefinitionData structure.
+ bool IsLambda = Record[Idx++];
+ if (IsLambda)
+ D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, false);
+ else
+ D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
+
+ // Propagate the DefinitionData pointer to the canonical declaration, so
+ // that all other deserialized declarations will see it.
+ // FIXME: Complain if there already is a DefinitionData!
+ D->getCanonicalDecl()->DefinitionData = D->DefinitionData;
+
+ ReadCXXDefinitionData(*D->DefinitionData, Record, Idx);
+
+ // Note that we have deserialized a definition. Any declarations
+ // deserialized before this one will be be given the DefinitionData pointer
+ // at the end.
+ Reader.PendingDefinitions.insert(D);
+ } else {
+ // Propagate DefinitionData pointer from the canonical declaration.
+ D->DefinitionData = D->getCanonicalDecl()->DefinitionData;
+ }
enum CXXRecKind {
CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
};
switch ((CXXRecKind)Record[Idx++]) {
- default:
- llvm_unreachable("Out of sync with ASTDeclWriter::VisitCXXRecordDecl?");
case CXXRecNotTemplate:
break;
case CXXRecTemplate:
@@ -1022,6 +1230,16 @@ void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
D->IsExplicitSpecified = Record[Idx++];
}
+void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ D->ImportedAndComplete.setPointer(readModule(Record, Idx));
+ D->ImportedAndComplete.setInt(Record[Idx++]);
+ SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(D + 1);
+ for (unsigned I = 0, N = Record.back(); I != N; ++I)
+ StoredLocs[I] = ReadSourceLocation(Record, Idx);
+ ++Idx;
+}
+
void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
VisitDecl(D);
D->setColonLoc(ReadSourceLocation(Record, Idx));
@@ -1061,64 +1279,42 @@ void ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
D->init(TemplatedDecl, TemplateParams);
}
-void ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
- // Initialize CommonOrPrev before VisitTemplateDecl so that getCommonPtr()
- // can be used while this is still initializing.
-
- assert(D->CommonOrPrev.isNull() && "getCommonPtr was called earlier on this");
- DeclID PreviousDeclID = ReadDeclID(Record, Idx);
- DeclID FirstDeclID = PreviousDeclID ? ReadDeclID(Record, Idx) : 0;
- // We delay loading of the redeclaration chain to avoid deeply nested calls.
- // We temporarily set the first (canonical) declaration as the previous one
- // which is the one that matters and mark the real previous DeclID to be
- // loaded & attached later on.
- RedeclarableTemplateDecl *FirstDecl =
- cast_or_null<RedeclarableTemplateDecl>(Reader.GetDecl(FirstDeclID));
- assert((FirstDecl == 0 || FirstDecl->getKind() == D->getKind()) &&
- "FirstDecl kind mismatch");
- if (FirstDecl) {
- D->CommonOrPrev = FirstDecl;
- // Mark the real previous DeclID to be loaded & attached later on.
- if (PreviousDeclID != FirstDeclID)
- Reader.PendingPreviousDecls.push_back(std::make_pair(D, PreviousDeclID));
- } else {
- D->CommonOrPrev = D->newCommon(Reader.getContext());
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+
+ // Make sure we've allocated the Common pointer first. We do this before
+ // VisitTemplateDecl so that getCommonPtr() can be used during initialization.
+ RedeclarableTemplateDecl *CanonD = D->getCanonicalDecl();
+ if (!CanonD->Common) {
+ CanonD->Common = CanonD->newCommon(Reader.getContext());
+ Reader.PendingDefinitions.insert(CanonD);
+ }
+ D->Common = CanonD->Common;
+
+ // If this is the first declaration of the template, fill in the information
+ // for the 'common' pointer.
+ if (ThisDeclID == Redecl.getFirstID()) {
if (RedeclarableTemplateDecl *RTD
= ReadDeclAs<RedeclarableTemplateDecl>(Record, Idx)) {
assert(RTD->getKind() == D->getKind() &&
"InstantiatedFromMemberTemplate kind mismatch");
- D->setInstantiatedFromMemberTemplateImpl(RTD);
+ D->setInstantiatedFromMemberTemplate(RTD);
if (Record[Idx++])
D->setMemberSpecialization();
}
-
- RedeclarableTemplateDecl *LatestDecl
- = ReadDeclAs<RedeclarableTemplateDecl>(Record, Idx);
-
- // This decl is a first one and the latest declaration that it points to is
- // in the same AST file. However, if this actually needs to point to a
- // redeclaration in another AST file, we need to update it by checking
- // the FirstLatestDeclIDs map which tracks this kind of decls.
- assert(Reader.GetDecl(ThisDeclID) == D && "Invalid ThisDeclID ?");
- ASTReader::FirstLatestDeclIDMap::iterator I
- = Reader.FirstLatestDeclIDs.find(ThisDeclID);
- if (I != Reader.FirstLatestDeclIDs.end()) {
- if (Decl *NewLatest = Reader.GetDecl(I->second))
- LatestDecl = cast<RedeclarableTemplateDecl>(NewLatest);
- }
-
- assert(LatestDecl->getKind() == D->getKind() && "Latest kind mismatch");
- D->getCommonPtr()->Latest = LatestDecl;
}
-
+
VisitTemplateDecl(D);
D->IdentifierNamespace = Record[Idx++];
+
+ return Redecl;
}
void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
- VisitRedeclarableTemplateDecl(D);
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
- if (D->getPreviousDeclaration() == 0) {
+ if (ThisDeclID == Redecl.getFirstID()) {
// This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
// the specializations.
SmallVector<serialization::DeclID, 2> SpecIDs;
@@ -1140,6 +1336,7 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
typedef serialization::DeclID DeclID;
ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr();
+ // FIXME: Append specializations!
CommonPtr->LazySpecializations
= new (Reader.getContext()) DeclID [SpecIDs.size()];
memcpy(CommonPtr->LazySpecializations, SpecIDs.data(),
@@ -1220,7 +1417,7 @@ void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
D->SequenceNumber = Record[Idx++];
// These are read/set from/to the first declaration.
- if (D->getPreviousDeclaration() == 0) {
+ if (D->getPreviousDecl() == 0) {
D->InstantiatedFromMember.setPointer(
ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx));
D->InstantiatedFromMember.setInt(Record[Idx++]);
@@ -1234,9 +1431,9 @@ void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
}
void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
- VisitRedeclarableTemplateDecl(D);
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
- if (D->getPreviousDeclaration() == 0) {
+ if (ThisDeclID == Redecl.getFirstID()) {
// This FunctionTemplateDecl owns a CommonPtr; read it.
// Read the function specialization declarations.
@@ -1311,54 +1508,90 @@ ASTDeclReader::VisitDeclContext(DeclContext *DC) {
}
template <typename T>
-void ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
- enum RedeclKind { NoRedeclaration = 0, PointsToPrevious, PointsToLatest };
- RedeclKind Kind = (RedeclKind)Record[Idx++];
- switch (Kind) {
- default:
- llvm_unreachable("Out of sync with ASTDeclWriter::VisitRedeclarable or"
- " messed up reading");
- case NoRedeclaration:
- break;
- case PointsToPrevious: {
- DeclID PreviousDeclID = ReadDeclID(Record, Idx);
- DeclID FirstDeclID = ReadDeclID(Record, Idx);
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
+ DeclID FirstDeclID = ReadDeclID(Record, Idx);
+
+ // 0 indicates that this declaration was the only declaration of its entity,
+ // and is used for space optimization.
+ if (FirstDeclID == 0)
+ FirstDeclID = ThisDeclID;
+
+ T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
+ if (FirstDecl != D) {
// We delay loading of the redeclaration chain to avoid deeply nested calls.
// We temporarily set the first (canonical) declaration as the previous one
// which is the one that matters and mark the real previous DeclID to be
// loaded & attached later on.
- D->RedeclLink = typename Redeclarable<T>::PreviousDeclLink(
- cast_or_null<T>(Reader.GetDecl(FirstDeclID)));
- if (PreviousDeclID != FirstDeclID)
- Reader.PendingPreviousDecls.push_back(std::make_pair(static_cast<T*>(D),
- PreviousDeclID));
- break;
- }
- case PointsToLatest:
- D->RedeclLink = typename Redeclarable<T>::LatestDeclLink(
- ReadDeclAs<T>(Record, Idx));
- break;
- }
-
- assert(!(Kind == PointsToPrevious &&
- Reader.FirstLatestDeclIDs.find(ThisDeclID) !=
- Reader.FirstLatestDeclIDs.end()) &&
- "This decl is not first, it should not be in the map");
- if (Kind == PointsToPrevious)
+ D->RedeclLink = typename Redeclarable<T>::PreviousDeclLink(FirstDecl);
+ }
+
+ // Note that this declaration has been deserialized.
+ Reader.RedeclsDeserialized.insert(static_cast<T *>(D));
+
+ // The result structure takes care to note that we need to load the
+ // other declaration chains for this ID.
+ return RedeclarableResult(Reader, FirstDeclID);
+}
+
+/// \brief Attempts to merge the given declaration (D) with another declaration
+/// of the same entity.
+template<typename T>
+void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *D,
+ RedeclarableResult &Redecl) {
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
return;
-
- // This decl is a first one and the latest declaration that it points to is in
- // the same AST file. However, if this actually needs to point to a
- // redeclaration in another AST file, we need to update it by checking the
- // FirstLatestDeclIDs map which tracks this kind of decls.
- assert(Reader.GetDecl(ThisDeclID) == static_cast<T*>(D) &&
- "Invalid ThisDeclID ?");
- ASTReader::FirstLatestDeclIDMap::iterator I
- = Reader.FirstLatestDeclIDs.find(ThisDeclID);
- if (I != Reader.FirstLatestDeclIDs.end()) {
- Decl *NewLatest = Reader.GetDecl(I->second);
- D->RedeclLink
- = typename Redeclarable<T>::LatestDeclLink(cast_or_null<T>(NewLatest));
+
+ if (FindExistingResult ExistingRes = findExisting(static_cast<T*>(D))) {
+ if (T *Existing = ExistingRes) {
+ T *ExistingCanon = Existing->getCanonicalDecl();
+ T *DCanon = static_cast<T*>(D)->getCanonicalDecl();
+ if (ExistingCanon != DCanon) {
+ // Have our redeclaration link point back at the canonical declaration
+ // of the existing declaration, so that this declaration has the
+ // appropriate canonical declaration.
+ D->RedeclLink
+ = typename Redeclarable<T>::PreviousDeclLink(ExistingCanon);
+
+ // When we merge a namespace, update its pointer to the first namespace.
+ if (NamespaceDecl *Namespace
+ = dyn_cast<NamespaceDecl>(static_cast<T*>(D))) {
+ Namespace->AnonOrFirstNamespaceAndInline.setPointer(
+ static_cast<NamespaceDecl *>(static_cast<void*>(ExistingCanon)));
+ }
+
+ // Don't introduce DCanon into the set of pending declaration chains.
+ Redecl.suppress();
+
+ // Introduce ExistingCanon into the set of pending declaration chains,
+ // if in fact it came from a module file.
+ if (ExistingCanon->isFromASTFile()) {
+ GlobalDeclID ExistingCanonID = ExistingCanon->getGlobalID();
+ assert(ExistingCanonID && "Unrecorded canonical declaration ID?");
+ if (Reader.PendingDeclChainsKnown.insert(ExistingCanonID))
+ Reader.PendingDeclChains.push_back(ExistingCanonID);
+ }
+
+ // If this declaration was the canonical declaration, make a note of
+ // that. We accept the linear algorithm here because the number of
+ // unique canonical declarations of an entity should always be tiny.
+ if (DCanon == static_cast<T*>(D)) {
+ SmallVectorImpl<DeclID> &Merged = Reader.MergedDecls[ExistingCanon];
+ if (std::find(Merged.begin(), Merged.end(), Redecl.getFirstID())
+ == Merged.end())
+ Merged.push_back(Redecl.getFirstID());
+
+ // If ExistingCanon did not come from a module file, introduce the
+ // first declaration that *does* come from a module file to the
+ // set of pending declaration chains, so that we merge this
+ // declaration.
+ if (!ExistingCanon->isFromASTFile() &&
+ Reader.PendingDeclChainsKnown.insert(Redecl.getFirstID()))
+ Reader.PendingDeclChains.push_back(Merged[0]);
+ }
+ }
+ }
}
}
@@ -1367,7 +1600,7 @@ void ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
//===----------------------------------------------------------------------===//
/// \brief Reads attributes from the current stream position.
-void ASTReader::ReadAttributes(Module &F, AttrVec &Attrs,
+void ASTReader::ReadAttributes(ModuleFile &F, AttrVec &Attrs,
const RecordData &Record, unsigned &Idx) {
for (unsigned i = 0, e = Record[Idx++]; i != e; ++i) {
Attr *New = 0;
@@ -1422,31 +1655,142 @@ static bool isConsumerInterestedIn(Decl *D) {
/// \brief Get the correct cursor and offset for loading a declaration.
ASTReader::RecordLocation
-ASTReader::DeclCursorForID(DeclID ID) {
+ASTReader::DeclCursorForID(DeclID ID, unsigned &RawLocation) {
// See if there's an override.
DeclReplacementMap::iterator It = ReplacedDecls.find(ID);
- if (It != ReplacedDecls.end())
- return RecordLocation(It->second.first, It->second.second);
+ if (It != ReplacedDecls.end()) {
+ RawLocation = It->second.RawLoc;
+ return RecordLocation(It->second.Mod, It->second.Offset);
+ }
GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID);
assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
- Module *M = I->second;
- return RecordLocation(M,
- M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS]);
+ ModuleFile *M = I->second;
+ const DeclOffset &
+ DOffs = M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
+ RawLocation = DOffs.Loc;
+ return RecordLocation(M, DOffs.BitOffset);
}
ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
- ContinuousRangeMap<uint64_t, Module*, 4>::iterator I
+ ContinuousRangeMap<uint64_t, ModuleFile*, 4>::iterator I
= GlobalBitOffsetsMap.find(GlobalOffset);
assert(I != GlobalBitOffsetsMap.end() && "Corrupted global bit offsets map");
return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
}
-uint64_t ASTReader::getGlobalBitOffset(Module &M, uint32_t LocalOffset) {
+uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
+/// \brief Determine whether the two declarations refer to the same entity.
+static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
+ assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
+
+ if (X == Y)
+ return true;
+
+ // Must be in the same context.
+ if (!X->getDeclContext()->getRedeclContext()->Equals(
+ Y->getDeclContext()->getRedeclContext()))
+ return false;
+
+ // Two typedefs refer to the same entity if they have the same underlying
+ // type.
+ if (TypedefNameDecl *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (TypedefNameDecl *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
+ TypedefY->getUnderlyingType());
+
+ // Must have the same kind.
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ // Objective-C classes and protocols with the same name always match.
+ if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
+ return true;
+
+ // Compatible tags match.
+ if (TagDecl *TagX = dyn_cast<TagDecl>(X)) {
+ TagDecl *TagY = cast<TagDecl>(Y);
+ return (TagX->getTagKind() == TagY->getTagKind()) ||
+ ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class) &&
+ (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class));
+ }
+
+ // Functions with the same type and linkage match.
+ // FIXME: This needs to cope with function templates, merging of
+ //prototyped/non-prototyped functions, etc.
+ if (FunctionDecl *FuncX = dyn_cast<FunctionDecl>(X)) {
+ FunctionDecl *FuncY = cast<FunctionDecl>(Y);
+ return (FuncX->getLinkage() == FuncY->getLinkage()) &&
+ FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType());
+ }
+
+ // Variables with the same type and linkage match.
+ if (VarDecl *VarX = dyn_cast<VarDecl>(X)) {
+ VarDecl *VarY = cast<VarDecl>(Y);
+ return (VarX->getLinkage() == VarY->getLinkage()) &&
+ VarX->getASTContext().hasSameType(VarX->getType(), VarY->getType());
+ }
+
+ // Namespaces with the same name and inlinedness match.
+ if (NamespaceDecl *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ NamespaceDecl *NamespaceY = cast<NamespaceDecl>(Y);
+ return NamespaceX->isInline() == NamespaceY->isInline();
+ }
+
+ // FIXME: Many other cases to implement.
+ return false;
+}
+
+ASTDeclReader::FindExistingResult::~FindExistingResult() {
+ if (!AddResult || Existing)
+ return;
+
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ if (DC->isTranslationUnit() && Reader.SemaObj) {
+ Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, New->getDeclName());
+ } else if (DC->isNamespace()) {
+ DC->addDecl(New);
+ }
+}
+
+ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (!Name) {
+ // Don't bother trying to find unnamed declarations.
+ FindExistingResult Result(Reader, D, /*Existing=*/0);
+ Result.suppress();
+ return Result;
+ }
+
+ DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (!DC->isFileContext())
+ return FindExistingResult(Reader);
+
+ if (DC->isTranslationUnit() && Reader.SemaObj) {
+ IdentifierResolver &IdResolver = Reader.SemaObj->IdResolver;
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I) {
+ if (isSameEntity(*I, D))
+ return FindExistingResult(Reader, D, *I);
+ }
+ }
+
+ if (DC->isNamespace()) {
+ for (DeclContext::lookup_result R = DC->lookup(Name);
+ R.first != R.second; ++R.first) {
+ if (isSameEntity(*R.first, D))
+ return FindExistingResult(Reader, D, *R.first);
+ }
+ }
+
+ return FindExistingResult(Reader, D, /*Existing=*/0);
+}
+
void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *previous) {
assert(D && previous);
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -1455,12 +1799,78 @@ void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *previous) {
FD->RedeclLink.setPointer(cast<FunctionDecl>(previous));
} else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
VD->RedeclLink.setPointer(cast<VarDecl>(previous));
+ } else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ TD->RedeclLink.setPointer(cast<TypedefNameDecl>(previous));
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ ID->RedeclLink.setPointer(cast<ObjCInterfaceDecl>(previous));
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ PD->RedeclLink.setPointer(cast<ObjCProtocolDecl>(previous));
+ } else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D)) {
+ ND->RedeclLink.setPointer(cast<NamespaceDecl>(previous));
} else {
RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
- TD->CommonOrPrev = cast<RedeclarableTemplateDecl>(previous);
+ TD->RedeclLink.setPointer(cast<RedeclarableTemplateDecl>(previous));
}
}
+void ASTDeclReader::attachLatestDecl(Decl *D, Decl *Latest) {
+ assert(D && Latest);
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ TD->RedeclLink
+ = Redeclarable<TagDecl>::LatestDeclLink(cast<TagDecl>(Latest));
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->RedeclLink
+ = Redeclarable<FunctionDecl>::LatestDeclLink(cast<FunctionDecl>(Latest));
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ VD->RedeclLink
+ = Redeclarable<VarDecl>::LatestDeclLink(cast<VarDecl>(Latest));
+ } else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ TD->RedeclLink
+ = Redeclarable<TypedefNameDecl>::LatestDeclLink(
+ cast<TypedefNameDecl>(Latest));
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ ID->RedeclLink
+ = Redeclarable<ObjCInterfaceDecl>::LatestDeclLink(
+ cast<ObjCInterfaceDecl>(Latest));
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ PD->RedeclLink
+ = Redeclarable<ObjCProtocolDecl>::LatestDeclLink(
+ cast<ObjCProtocolDecl>(Latest));
+ } else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D)) {
+ ND->RedeclLink
+ = Redeclarable<NamespaceDecl>::LatestDeclLink(
+ cast<NamespaceDecl>(Latest));
+ } else {
+ RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
+ TD->RedeclLink
+ = Redeclarable<RedeclarableTemplateDecl>::LatestDeclLink(
+ cast<RedeclarableTemplateDecl>(Latest));
+ }
+}
+
+ASTReader::MergedDeclsMap::iterator
+ASTReader::combineStoredMergedDecls(Decl *Canon, GlobalDeclID CanonID) {
+ // If we don't have any stored merged declarations, just look in the
+ // merged declarations set.
+ StoredMergedDeclsMap::iterator StoredPos = StoredMergedDecls.find(CanonID);
+ if (StoredPos == StoredMergedDecls.end())
+ return MergedDecls.find(Canon);
+
+ // Append the stored merged declarations to the merged declarations set.
+ MergedDeclsMap::iterator Pos = MergedDecls.find(Canon);
+ if (Pos == MergedDecls.end())
+ Pos = MergedDecls.insert(std::make_pair(Canon,
+ SmallVector<DeclID, 2>())).first;
+ Pos->second.append(StoredPos->second.begin(), StoredPos->second.end());
+ StoredMergedDecls.erase(StoredPos);
+
+ // Sort and uniquify the set of merged declarations.
+ llvm::array_pod_sort(Pos->second.begin(), Pos->second.end());
+ Pos->second.erase(std::unique(Pos->second.begin(), Pos->second.end()),
+ Pos->second.end());
+ return Pos;
+}
+
void ASTReader::loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID) {
Decl *previous = GetDecl(ID);
ASTDeclReader::attachPreviousDecl(D, previous);
@@ -1469,7 +1879,8 @@ void ASTReader::loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID) {
/// \brief Read the declaration at the given offset from the AST file.
Decl *ASTReader::ReadDeclRecord(DeclID ID) {
unsigned Index = ID - NUM_PREDEF_DECL_IDS;
- RecordLocation Loc = DeclCursorForID(ID);
+ unsigned RawLocation = 0;
+ RecordLocation Loc = DeclCursorForID(ID, RawLocation);
llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
// Keep track of where we are in the stream, then jump back there
// after reading this declaration.
@@ -1484,7 +1895,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
RecordData Record;
unsigned Code = DeclsCursor.ReadCode();
unsigned Idx = 0;
- ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, Record, Idx);
+ ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, RawLocation, Record,Idx);
Decl *D = 0;
switch ((DeclCode)DeclsCursor.ReadRecord(Code, Record)) {
@@ -1492,222 +1903,177 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_CONTEXT_VISIBLE:
llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord");
case DECL_TYPEDEF:
- D = TypedefDecl::Create(Context, 0, SourceLocation(), SourceLocation(),
- 0, 0);
+ D = TypedefDecl::CreateDeserialized(Context, ID);
break;
case DECL_TYPEALIAS:
- D = TypeAliasDecl::Create(Context, 0, SourceLocation(), SourceLocation(),
- 0, 0);
+ D = TypeAliasDecl::CreateDeserialized(Context, ID);
break;
case DECL_ENUM:
- D = EnumDecl::Create(Context, Decl::EmptyShell());
+ D = EnumDecl::CreateDeserialized(Context, ID);
break;
case DECL_RECORD:
- D = RecordDecl::Create(Context, Decl::EmptyShell());
+ D = RecordDecl::CreateDeserialized(Context, ID);
break;
case DECL_ENUM_CONSTANT:
- D = EnumConstantDecl::Create(Context, 0, SourceLocation(), 0, QualType(),
- 0, llvm::APSInt());
+ D = EnumConstantDecl::CreateDeserialized(Context, ID);
break;
case DECL_FUNCTION:
- D = FunctionDecl::Create(Context, 0, SourceLocation(), SourceLocation(),
- DeclarationName(), QualType(), 0);
+ D = FunctionDecl::CreateDeserialized(Context, ID);
break;
case DECL_LINKAGE_SPEC:
- D = LinkageSpecDecl::Create(Context, 0, SourceLocation(), SourceLocation(),
- (LinkageSpecDecl::LanguageIDs)0,
- SourceLocation());
+ D = LinkageSpecDecl::CreateDeserialized(Context, ID);
break;
case DECL_LABEL:
- D = LabelDecl::Create(Context, 0, SourceLocation(), 0);
+ D = LabelDecl::CreateDeserialized(Context, ID);
break;
case DECL_NAMESPACE:
- D = NamespaceDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0);
+ D = NamespaceDecl::CreateDeserialized(Context, ID);
break;
case DECL_NAMESPACE_ALIAS:
- D = NamespaceAliasDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0,
- NestedNameSpecifierLoc(),
- SourceLocation(), 0);
+ D = NamespaceAliasDecl::CreateDeserialized(Context, ID);
break;
case DECL_USING:
- D = UsingDecl::Create(Context, 0, SourceLocation(),
- NestedNameSpecifierLoc(), DeclarationNameInfo(),
- false);
+ D = UsingDecl::CreateDeserialized(Context, ID);
break;
case DECL_USING_SHADOW:
- D = UsingShadowDecl::Create(Context, 0, SourceLocation(), 0, 0);
+ D = UsingShadowDecl::CreateDeserialized(Context, ID);
break;
case DECL_USING_DIRECTIVE:
- D = UsingDirectiveDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), NestedNameSpecifierLoc(),
- SourceLocation(), 0, 0);
+ D = UsingDirectiveDecl::CreateDeserialized(Context, ID);
break;
case DECL_UNRESOLVED_USING_VALUE:
- D = UnresolvedUsingValueDecl::Create(Context, 0, SourceLocation(),
- NestedNameSpecifierLoc(),
- DeclarationNameInfo());
+ D = UnresolvedUsingValueDecl::CreateDeserialized(Context, ID);
break;
case DECL_UNRESOLVED_USING_TYPENAME:
- D = UnresolvedUsingTypenameDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(),
- NestedNameSpecifierLoc(),
- SourceLocation(),
- DeclarationName());
+ D = UnresolvedUsingTypenameDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_RECORD:
- D = CXXRecordDecl::Create(Context, Decl::EmptyShell());
+ D = CXXRecordDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_METHOD:
- D = CXXMethodDecl::Create(Context, 0, SourceLocation(),
- DeclarationNameInfo(), QualType(), 0,
- false, SC_None, false, false, SourceLocation());
+ D = CXXMethodDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_CONSTRUCTOR:
- D = CXXConstructorDecl::Create(Context, Decl::EmptyShell());
+ D = CXXConstructorDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_DESTRUCTOR:
- D = CXXDestructorDecl::Create(Context, Decl::EmptyShell());
+ D = CXXDestructorDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_CONVERSION:
- D = CXXConversionDecl::Create(Context, Decl::EmptyShell());
+ D = CXXConversionDecl::CreateDeserialized(Context, ID);
break;
case DECL_ACCESS_SPEC:
- D = AccessSpecDecl::Create(Context, Decl::EmptyShell());
+ D = AccessSpecDecl::CreateDeserialized(Context, ID);
break;
case DECL_FRIEND:
- D = FriendDecl::Create(Context, Decl::EmptyShell());
+ D = FriendDecl::CreateDeserialized(Context, ID);
break;
case DECL_FRIEND_TEMPLATE:
- D = FriendTemplateDecl::Create(Context, Decl::EmptyShell());
+ D = FriendTemplateDecl::CreateDeserialized(Context, ID);
break;
case DECL_CLASS_TEMPLATE:
- D = ClassTemplateDecl::Create(Context, Decl::EmptyShell());
+ D = ClassTemplateDecl::CreateDeserialized(Context, ID);
break;
case DECL_CLASS_TEMPLATE_SPECIALIZATION:
- D = ClassTemplateSpecializationDecl::Create(Context, Decl::EmptyShell());
+ D = ClassTemplateSpecializationDecl::CreateDeserialized(Context, ID);
break;
case DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
- D = ClassTemplatePartialSpecializationDecl::Create(Context,
- Decl::EmptyShell());
+ D = ClassTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID);
break;
case DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION:
- D = ClassScopeFunctionSpecializationDecl::Create(Context,
- Decl::EmptyShell());
+ D = ClassScopeFunctionSpecializationDecl::CreateDeserialized(Context, ID);
break;
case DECL_FUNCTION_TEMPLATE:
- D = FunctionTemplateDecl::Create(Context, Decl::EmptyShell());
+ D = FunctionTemplateDecl::CreateDeserialized(Context, ID);
break;
case DECL_TEMPLATE_TYPE_PARM:
- D = TemplateTypeParmDecl::Create(Context, Decl::EmptyShell());
+ D = TemplateTypeParmDecl::CreateDeserialized(Context, ID);
break;
case DECL_NON_TYPE_TEMPLATE_PARM:
- D = NonTypeTemplateParmDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0, 0, 0, QualType(),
- false, 0);
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID);
break;
case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
- D = NonTypeTemplateParmDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0, 0, 0, QualType(),
- 0, 0, Record[Idx++], 0);
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID, Record[Idx++]);
break;
case DECL_TEMPLATE_TEMPLATE_PARM:
- D = TemplateTemplateParmDecl::Create(Context, 0, SourceLocation(), 0, 0,
- false, 0, 0);
+ D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
break;
case DECL_TYPE_ALIAS_TEMPLATE:
- D = TypeAliasTemplateDecl::Create(Context, Decl::EmptyShell());
+ D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
break;
case DECL_STATIC_ASSERT:
- D = StaticAssertDecl::Create(Context, 0, SourceLocation(), 0, 0,
- SourceLocation());
+ D = StaticAssertDecl::CreateDeserialized(Context, ID);
break;
-
case DECL_OBJC_METHOD:
- D = ObjCMethodDecl::Create(Context, SourceLocation(), SourceLocation(),
- Selector(), QualType(), 0, 0);
+ D = ObjCMethodDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_INTERFACE:
- D = ObjCInterfaceDecl::Create(Context, 0, SourceLocation(), 0);
+ D = ObjCInterfaceDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_IVAR:
- D = ObjCIvarDecl::Create(Context, 0, SourceLocation(), SourceLocation(),
- 0, QualType(), 0, ObjCIvarDecl::None);
+ D = ObjCIvarDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_PROTOCOL:
- D = ObjCProtocolDecl::Create(Context, 0, 0, SourceLocation(),
- SourceLocation());
+ D = ObjCProtocolDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_AT_DEFS_FIELD:
- D = ObjCAtDefsFieldDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0, QualType(), 0);
- break;
- case DECL_OBJC_CLASS:
- D = ObjCClassDecl::Create(Context, 0, SourceLocation());
- break;
- case DECL_OBJC_FORWARD_PROTOCOL:
- D = ObjCForwardProtocolDecl::Create(Context, 0, SourceLocation());
+ D = ObjCAtDefsFieldDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_CATEGORY:
- D = ObjCCategoryDecl::Create(Context, Decl::EmptyShell());
+ D = ObjCCategoryDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_CATEGORY_IMPL:
- D = ObjCCategoryImplDecl::Create(Context, 0, 0, 0, SourceLocation(),
- SourceLocation());
+ D = ObjCCategoryImplDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_IMPLEMENTATION:
- D = ObjCImplementationDecl::Create(Context, 0, 0, 0, SourceLocation(),
- SourceLocation());
+ D = ObjCImplementationDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_COMPATIBLE_ALIAS:
- D = ObjCCompatibleAliasDecl::Create(Context, 0, SourceLocation(), 0, 0);
+ D = ObjCCompatibleAliasDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_PROPERTY:
- D = ObjCPropertyDecl::Create(Context, 0, SourceLocation(), 0, SourceLocation(),
- 0);
+ D = ObjCPropertyDecl::CreateDeserialized(Context, ID);
break;
case DECL_OBJC_PROPERTY_IMPL:
- D = ObjCPropertyImplDecl::Create(Context, 0, SourceLocation(),
- SourceLocation(), 0,
- ObjCPropertyImplDecl::Dynamic, 0,
- SourceLocation());
+ D = ObjCPropertyImplDecl::CreateDeserialized(Context, ID);
break;
case DECL_FIELD:
- D = FieldDecl::Create(Context, 0, SourceLocation(), SourceLocation(), 0,
- QualType(), 0, 0, false, false);
+ D = FieldDecl::CreateDeserialized(Context, ID);
break;
case DECL_INDIRECTFIELD:
- D = IndirectFieldDecl::Create(Context, 0, SourceLocation(), 0, QualType(),
- 0, 0);
+ D = IndirectFieldDecl::CreateDeserialized(Context, ID);
break;
case DECL_VAR:
- D = VarDecl::Create(Context, 0, SourceLocation(), SourceLocation(), 0,
- QualType(), 0, SC_None, SC_None);
+ D = VarDecl::CreateDeserialized(Context, ID);
break;
-
case DECL_IMPLICIT_PARAM:
- D = ImplicitParamDecl::Create(Context, 0, SourceLocation(), 0, QualType());
+ D = ImplicitParamDecl::CreateDeserialized(Context, ID);
break;
-
case DECL_PARM_VAR:
- D = ParmVarDecl::Create(Context, 0, SourceLocation(), SourceLocation(), 0,
- QualType(), 0, SC_None, SC_None, 0);
+ D = ParmVarDecl::CreateDeserialized(Context, ID);
break;
case DECL_FILE_SCOPE_ASM:
- D = FileScopeAsmDecl::Create(Context, 0, 0, SourceLocation(),
- SourceLocation());
+ D = FileScopeAsmDecl::CreateDeserialized(Context, ID);
break;
case DECL_BLOCK:
- D = BlockDecl::Create(Context, 0, SourceLocation());
+ D = BlockDecl::CreateDeserialized(Context, ID);
break;
case DECL_CXX_BASE_SPECIFIERS:
Error("attempt to read a C++ base-specifier record as a declaration");
return 0;
+ case DECL_IMPORT:
+ // Note: last entry of the ImportDecl record is the number of stored source
+ // locations.
+ D = ImportDecl::CreateDeserialized(Context, ID, Record.back());
+ break;
}
assert(D && "Unknown declaration reading AST file");
LoadedDecl(Index, D);
+ // Set the DeclContext before doing any deserialization, to make sure internal
+ // calls to Decl::getASTContext() by Decl's methods will find the
+ // TranslationUnitDecl without crashing.
+ D->setDeclContext(Context.getTranslationUnitDecl());
Reader.Visit(D);
// If this declaration is also a declaration context, get the
@@ -1743,16 +2109,18 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
// Load any relevant update records.
loadDeclUpdateRecords(ID, D);
-
- if (ObjCChainedCategoriesInterfaces.count(ID))
- loadObjCChainedCategories(ID, cast<ObjCInterfaceDecl>(D));
+
+ // Load the categories after recursive loading is finished.
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(D))
+ if (Class->isThisDeclarationADefinition())
+ loadObjCCategories(ID, Class);
// If we have deserialized a declaration that has a definition the
// AST consumer might need to know about, queue it.
// We don't pass it to the consumer immediately because we may be in recursive
// loading, and some declarations may still be initializing.
if (isConsumerInterestedIn(D))
- InterestingDecls.push_back(D);
+ InterestingDecls.push_back(D);
return D;
}
@@ -1766,7 +2134,7 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
FileOffsetsTy &UpdateOffsets = UpdI->second;
for (FileOffsetsTy::iterator
I = UpdateOffsets.begin(), E = UpdateOffsets.end(); I != E; ++I) {
- Module *F = I->first;
+ ModuleFile *F = I->first;
uint64_t Offset = I->second;
llvm::BitstreamCursor &Cursor = F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
@@ -1778,179 +2146,328 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
unsigned Idx = 0;
- ASTDeclReader Reader(*this, *F, Cursor, ID, Record, Idx);
+ ASTDeclReader Reader(*this, *F, Cursor, ID, 0, Record, Idx);
Reader.UpdateDecl(D, *F, Record);
}
}
}
namespace {
- /// \brief Given an ObjC interface, goes through the modules and links to the
- /// interface all the categories for it.
- class ObjCChainedCategoriesVisitor {
- ASTReader &Reader;
- serialization::GlobalDeclID InterfaceID;
- ObjCInterfaceDecl *Interface;
- ObjCCategoryDecl *GlobHeadCat, *GlobTailCat;
- llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
-
- public:
- ObjCChainedCategoriesVisitor(ASTReader &Reader,
- serialization::GlobalDeclID InterfaceID,
- ObjCInterfaceDecl *Interface)
- : Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
- GlobHeadCat(0), GlobTailCat(0) { }
-
- static bool visit(Module &M, void *UserData) {
- return static_cast<ObjCChainedCategoriesVisitor *>(UserData)->visit(M);
+ struct CompareLocalRedeclarationsInfoToID {
+ bool operator()(const LocalRedeclarationsInfo &X, DeclID Y) {
+ return X.FirstID < Y;
}
- bool visit(Module &M) {
- if (Reader.isDeclIDFromModule(InterfaceID, M))
- return true; // We reached the module where the interface originated
- // from. Stop traversing the imported modules.
+ bool operator()(DeclID X, const LocalRedeclarationsInfo &Y) {
+ return X < Y.FirstID;
+ }
- Module::ChainedObjCCategoriesMap::iterator
- I = M.ChainedObjCCategories.find(InterfaceID);
- if (I == M.ChainedObjCCategories.end())
+ bool operator()(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID < Y.FirstID;
+ }
+ bool operator()(DeclID X, DeclID Y) {
+ return X < Y;
+ }
+ };
+
+ /// \brief Module visitor class that finds all of the redeclarations of a
+ ///
+ class RedeclChainVisitor {
+ ASTReader &Reader;
+ SmallVectorImpl<DeclID> &SearchDecls;
+ llvm::SmallPtrSet<Decl *, 16> &Deserialized;
+ GlobalDeclID CanonID;
+ llvm::SmallVector<Decl *, 4> Chain;
+
+ public:
+ RedeclChainVisitor(ASTReader &Reader, SmallVectorImpl<DeclID> &SearchDecls,
+ llvm::SmallPtrSet<Decl *, 16> &Deserialized,
+ GlobalDeclID CanonID)
+ : Reader(Reader), SearchDecls(SearchDecls), Deserialized(Deserialized),
+ CanonID(CanonID) {
+ for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I)
+ addToChain(Reader.GetDecl(SearchDecls[I]));
+ }
+
+ static bool visit(ModuleFile &M, bool Preorder, void *UserData) {
+ if (Preorder)
return false;
-
- ObjCCategoryDecl *
- HeadCat = Reader.GetLocalDeclAs<ObjCCategoryDecl>(M, I->second.first);
- ObjCCategoryDecl *
- TailCat = Reader.GetLocalDeclAs<ObjCCategoryDecl>(M, I->second.second);
-
- addCategories(HeadCat, TailCat);
- return false;
+
+ return static_cast<RedeclChainVisitor *>(UserData)->visit(M);
}
-
- void addCategories(ObjCCategoryDecl *HeadCat,
- ObjCCategoryDecl *TailCat = 0) {
- if (!HeadCat) {
- assert(!TailCat);
+
+ void addToChain(Decl *D) {
+ if (!D)
return;
+
+ if (Deserialized.count(D)) {
+ Deserialized.erase(D);
+ Chain.push_back(D);
}
-
- if (!TailCat) {
- TailCat = HeadCat;
- while (TailCat->getNextClassCategory())
- TailCat = TailCat->getNextClassCategory();
+ }
+
+ void searchForID(ModuleFile &M, GlobalDeclID GlobalID) {
+ // Map global ID of the first declaration down to the local ID
+ // used in this module file.
+ DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID);
+ if (!ID)
+ return;
+
+ // Perform a binary search to find the local redeclarations for this
+ // declaration (if any).
+ const LocalRedeclarationsInfo *Result
+ = std::lower_bound(M.RedeclarationsMap,
+ M.RedeclarationsMap + M.LocalNumRedeclarationsInMap,
+ ID, CompareLocalRedeclarationsInfoToID());
+ if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap ||
+ Result->FirstID != ID) {
+ // If we have a previously-canonical singleton declaration that was
+ // merged into another redeclaration chain, create a trivial chain
+ // for this single declaration so that it will get wired into the
+ // complete redeclaration chain.
+ if (GlobalID != CanonID &&
+ GlobalID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
+ GlobalID - NUM_PREDEF_DECL_IDS < M.BaseDeclID + M.LocalNumDecls) {
+ addToChain(Reader.GetDecl(GlobalID));
+ }
+
+ return;
}
+
+ // Dig out all of the redeclarations.
+ unsigned Offset = Result->Offset;
+ unsigned N = M.RedeclarationChains[Offset];
+ M.RedeclarationChains[Offset++] = 0; // Don't try to deserialize again
+ for (unsigned I = 0; I != N; ++I)
+ addToChain(Reader.GetLocalDecl(M, M.RedeclarationChains[Offset++]));
+ }
+
+ bool visit(ModuleFile &M) {
+ // Visit each of the declarations.
+ for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I)
+ searchForID(M, SearchDecls[I]);
+ return false;
+ }
+
+ ArrayRef<Decl *> getChain() const {
+ return Chain;
+ }
+ };
+}
- if (!GlobHeadCat) {
- GlobHeadCat = HeadCat;
- GlobTailCat = TailCat;
- } else {
- ASTDeclReader::setNextObjCCategory(GlobTailCat, HeadCat);
- GlobTailCat = TailCat;
- }
+void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) {
+ Decl *D = GetDecl(ID);
+ Decl *CanonDecl = D->getCanonicalDecl();
+
+ // Determine the set of declaration IDs we'll be searching for.
+ llvm::SmallVector<DeclID, 1> SearchDecls;
+ GlobalDeclID CanonID = 0;
+ if (D == CanonDecl) {
+ SearchDecls.push_back(ID); // Always first.
+ CanonID = ID;
+ }
+ MergedDeclsMap::iterator MergedPos = combineStoredMergedDecls(CanonDecl, ID);
+ if (MergedPos != MergedDecls.end())
+ SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end());
+
+ // Build up the list of redeclarations.
+ RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID);
+ ModuleMgr.visitDepthFirst(&RedeclChainVisitor::visit, &Visitor);
+
+ // Retrieve the chains.
+ ArrayRef<Decl *> Chain = Visitor.getChain();
+ if (Chain.empty())
+ return;
+
+ // Hook up the chains.
+ Decl *MostRecent = CanonDecl->getMostRecentDecl();
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Chain[I] == CanonDecl)
+ continue;
+
+ ASTDeclReader::attachPreviousDecl(Chain[I], MostRecent);
+ MostRecent = Chain[I];
+ }
+
+ ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent);
+}
- llvm::DenseSet<DeclarationName> Checked;
- for (ObjCCategoryDecl *Cat = HeadCat,
- *CatEnd = TailCat->getNextClassCategory();
- Cat != CatEnd; Cat = Cat->getNextClassCategory()) {
- if (Checked.count(Cat->getDeclName()))
- continue;
- Checked.insert(Cat->getDeclName());
- checkForDuplicate(Cat);
- }
+namespace {
+ struct CompareObjCCategoriesInfo {
+ bool operator()(const ObjCCategoriesInfo &X, DeclID Y) {
+ return X.DefinitionID < Y;
}
+
+ bool operator()(DeclID X, const ObjCCategoriesInfo &Y) {
+ return X < Y.DefinitionID;
+ }
+
+ bool operator()(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID < Y.DefinitionID;
+ }
+ bool operator()(DeclID X, DeclID Y) {
+ return X < Y;
+ }
+ };
- /// \brief Warns for duplicate categories that come from different modules.
- void checkForDuplicate(ObjCCategoryDecl *Cat) {
- DeclarationName Name = Cat->getDeclName();
- // Find the top category with the same name. We do not want to warn for
- // duplicates along the established chain because there were already
- // warnings for them when the module was created. We only want to warn for
- // duplicates between non-dependent modules:
- //
- // MT //
- // / \ //
- // ML MR //
- //
- // We want to warn for duplicates between ML and MR,not between ML and MT.
- //
- // FIXME: We should not warn for duplicates in diamond:
- //
- // MT //
- // / \ //
- // ML MR //
- // \ / //
- // MB //
- //
- // If there are duplicates in ML/MR, there will be warning when creating
- // MB *and* when importing MB. We should not warn when importing.
- for (ObjCCategoryDecl *Next = Cat->getNextClassCategory(); Next;
- Next = Next->getNextClassCategory()) {
- if (Next->getDeclName() == Name)
- Cat = Next;
+ /// \brief Given an ObjC interface, goes through the modules and links to the
+ /// interface all the categories for it.
+ class ObjCCategoriesVisitor {
+ ASTReader &Reader;
+ serialization::GlobalDeclID InterfaceID;
+ ObjCInterfaceDecl *Interface;
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized;
+ unsigned PreviousGeneration;
+ ObjCCategoryDecl *Tail;
+ llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
+
+ void add(ObjCCategoryDecl *Cat) {
+ // Only process each category once.
+ if (!Deserialized.count(Cat))
+ return;
+ Deserialized.erase(Cat);
+
+ // Check for duplicate categories.
+ if (Cat->getDeclName()) {
+ ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()];
+ if (Existing &&
+ Reader.getOwningModuleFile(Existing)
+ != Reader.getOwningModuleFile(Cat)) {
+ // FIXME: We should not warn for duplicates in diamond:
+ //
+ // MT //
+ // / \ //
+ // ML MR //
+ // \ / //
+ // MB //
+ //
+ // If there are duplicates in ML/MR, there will be warning when
+ // creating MB *and* when importing MB. We should not warn when
+ // importing.
+ Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
+ << Interface->getDeclName() << Cat->getDeclName();
+ Reader.Diag(Existing->getLocation(), diag::note_previous_definition);
+ } else if (!Existing) {
+ // Record this category.
+ Existing = Cat;
+ }
+ }
+
+ // Add this category to the end of the chain.
+ if (Tail)
+ ASTDeclReader::setNextObjCCategory(Tail, Cat);
+ else
+ Interface->setCategoryList(Cat);
+ Tail = Cat;
+ }
+
+ public:
+ ObjCCategoriesVisitor(ASTReader &Reader,
+ serialization::GlobalDeclID InterfaceID,
+ ObjCInterfaceDecl *Interface,
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized,
+ unsigned PreviousGeneration)
+ : Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
+ Deserialized(Deserialized), PreviousGeneration(PreviousGeneration),
+ Tail(0)
+ {
+ // Populate the name -> category map with the set of known categories.
+ for (ObjCCategoryDecl *Cat = Interface->getCategoryList(); Cat;
+ Cat = Cat->getNextClassCategory()) {
+ if (Cat->getDeclName())
+ NameCategoryMap[Cat->getDeclName()] = Cat;
+
+ // Keep track of the tail of the category list.
+ Tail = Cat;
}
+ }
- ObjCCategoryDecl *&PrevCat = NameCategoryMap[Name];
- if (!PrevCat)
- PrevCat = Cat;
+ static bool visit(ModuleFile &M, void *UserData) {
+ return static_cast<ObjCCategoriesVisitor *>(UserData)->visit(M);
+ }
- if (PrevCat != Cat) {
- Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
- << Interface->getDeclName() << Name;
- Reader.Diag(PrevCat->getLocation(), diag::note_previous_definition);
+ bool visit(ModuleFile &M) {
+ // If we've loaded all of the category information we care about from
+ // this module file, we're done.
+ if (M.Generation <= PreviousGeneration)
+ return true;
+
+ // Map global ID of the definition down to the local ID used in this
+ // module file. If there is no such mapping, we'll find nothing here
+ // (or in any module it imports).
+ DeclID LocalID = Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID);
+ if (!LocalID)
+ return true;
+
+ // Perform a binary search to find the local redeclarations for this
+ // declaration (if any).
+ const ObjCCategoriesInfo *Result
+ = std::lower_bound(M.ObjCCategoriesMap,
+ M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap,
+ LocalID, CompareObjCCategoriesInfo());
+ if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap ||
+ Result->DefinitionID != LocalID) {
+ // We didn't find anything. If the class definition is in this module
+ // file, then the module files it depends on cannot have any categories,
+ // so suppress further lookup.
+ return Reader.isDeclIDFromModule(InterfaceID, M);
}
+
+ // We found something. Dig out all of the categories.
+ unsigned Offset = Result->Offset;
+ unsigned N = M.ObjCCategories[Offset];
+ M.ObjCCategories[Offset++] = 0; // Don't try to deserialize again
+ for (unsigned I = 0; I != N; ++I)
+ add(cast_or_null<ObjCCategoryDecl>(
+ Reader.GetLocalDecl(M, M.ObjCCategories[Offset++])));
+ return true;
}
-
- ObjCCategoryDecl *getHeadCategory() const { return GlobHeadCat; }
};
}
-void ASTReader::loadObjCChainedCategories(serialization::GlobalDeclID ID,
- ObjCInterfaceDecl *D) {
- ObjCChainedCategoriesVisitor Visitor(*this, ID, D);
- ModuleMgr.visit(ObjCChainedCategoriesVisitor::visit, &Visitor);
- // Also add the categories that the interface already links to.
- Visitor.addCategories(D->getCategoryList());
- D->setCategoryList(Visitor.getHeadCategory());
+void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
+ ObjCInterfaceDecl *D,
+ unsigned PreviousGeneration) {
+ ObjCCategoriesVisitor Visitor(*this, ID, D, CategoriesDeserialized,
+ PreviousGeneration);
+ ModuleMgr.visit(ObjCCategoriesVisitor::visit, &Visitor);
}
-void ASTDeclReader::UpdateDecl(Decl *D, Module &Module,
+void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
const RecordData &Record) {
unsigned Idx = 0;
while (Idx < Record.size()) {
switch ((DeclUpdateKind)Record[Idx++]) {
- case UPD_CXX_SET_DEFINITIONDATA: {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
- CXXRecordDecl *DefinitionDecl
- = Reader.ReadDeclAs<CXXRecordDecl>(Module, Record, Idx);
- assert(!RD->DefinitionData && "DefinitionData is already set!");
- InitializeCXXDefinitionData(RD, DefinitionDecl, Record, Idx);
- break;
- }
-
case UPD_CXX_ADDED_IMPLICIT_MEMBER:
- cast<CXXRecordDecl>(D)->addedMember(Reader.ReadDecl(Module, Record, Idx));
+ cast<CXXRecordDecl>(D)->addedMember(Reader.ReadDecl(ModuleFile, Record, Idx));
break;
case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
// It will be added to the template's specializations set when loaded.
- (void)Reader.ReadDecl(Module, Record, Idx);
+ (void)Reader.ReadDecl(ModuleFile, Record, Idx);
break;
case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: {
NamespaceDecl *Anon
- = Reader.ReadDeclAs<NamespaceDecl>(Module, Record, Idx);
- // Guard against these being loaded out of original order. Don't use
- // getNextNamespace(), since it tries to access the context and can't in
- // the middle of deserialization.
- if (!Anon->NextNamespace) {
+ = Reader.ReadDeclAs<NamespaceDecl>(ModuleFile, Record, Idx);
+
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ if (ModuleFile.Kind != MK_Module) {
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
TU->setAnonymousNamespace(Anon);
else
- cast<NamespaceDecl>(D)->OrigOrAnonNamespace.setPointer(Anon);
+ cast<NamespaceDecl>(D)->setAnonymousNamespace(Anon);
}
break;
}
case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
cast<VarDecl>(D)->getMemberSpecializationInfo()->setPointOfInstantiation(
- Reader.ReadSourceLocation(Module, Record, Idx));
+ Reader.ReadSourceLocation(ModuleFile, Record, Idx));
break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
index 99e8be5..3a1dfcf 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
@@ -15,6 +15,7 @@
#include "clang/Basic/OnDiskHashTable.h"
#include "clang/AST/DeclarationName.h"
+#include "llvm/Support/Endian.h"
#include <utility>
#include <sys/stat.h>
@@ -26,7 +27,7 @@ struct HeaderFileInfo;
namespace serialization {
-class Module;
+class ModuleFile;
namespace reader {
@@ -34,14 +35,15 @@ namespace reader {
/// in an AST file.
class ASTDeclContextNameLookupTrait {
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
public:
/// \brief Pair of begin/end iterators for DeclIDs.
///
/// Note that these declaration IDs are local to the module that contains this
/// particular lookup t
- typedef std::pair<DeclID *, DeclID *> data_type;
+ typedef llvm::support::ulittle32_t LE32DeclID;
+ typedef std::pair<LE32DeclID *, LE32DeclID *> data_type;
/// \brief Special internal key for declaration names.
/// The hash table creates keys for comparison; we do not create
@@ -56,7 +58,7 @@ public:
typedef DeclNameKey internal_key_type;
explicit ASTDeclContextNameLookupTrait(ASTReader &Reader,
- Module &F)
+ ModuleFile &F)
: Reader(Reader), F(F) { }
static bool EqualKey(const internal_key_type& a,
@@ -84,7 +86,7 @@ typedef OnDiskChainedHashTable<ASTDeclContextNameLookupTrait>
/// \brief Class that performs lookup for an identifier stored in an AST file.
class ASTIdentifierLookupTrait {
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
// If we know the IdentifierInfo in advance, it is here and we will
// not build a new one. Used when deserializing information about an
@@ -98,7 +100,7 @@ public:
typedef external_key_type internal_key_type;
- ASTIdentifierLookupTrait(ASTReader &Reader, Module &F,
+ ASTIdentifierLookupTrait(ASTReader &Reader, ModuleFile &F,
IdentifierInfo *II = 0)
: Reader(Reader), F(F), KnownII(II) { }
@@ -127,6 +129,9 @@ public:
IdentifierInfo *ReadData(const internal_key_type& k,
const unsigned char* d,
unsigned DataLen);
+
+ ASTReader &getReader() const { return Reader; }
+
};
/// \brief The on-disk hash table used to contain information about
@@ -138,7 +143,7 @@ typedef OnDiskChainedHashTable<ASTIdentifierLookupTrait>
/// method pool stored in an AST file.
class ASTSelectorLookupTrait {
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
public:
struct data_type {
@@ -150,7 +155,7 @@ public:
typedef Selector external_key_type;
typedef external_key_type internal_key_type;
- ASTSelectorLookupTrait(ASTReader &Reader, Module &F)
+ ASTSelectorLookupTrait(ASTReader &Reader, ModuleFile &F)
: Reader(Reader), F(F) { }
static bool EqualKey(const internal_key_type& a,
@@ -185,7 +190,7 @@ typedef OnDiskChainedHashTable<ASTSelectorLookupTrait>
/// and symlinks.
class HeaderFileInfoTrait {
ASTReader &Reader;
- Module &M;
+ ModuleFile &M;
HeaderSearch *HS;
const char *FrameworkStrings;
const char *SearchPath;
@@ -210,7 +215,7 @@ public:
typedef HeaderFileInfo data_type;
- HeaderFileInfoTrait(ASTReader &Reader, Module &M, HeaderSearch *HS,
+ HeaderFileInfoTrait(ASTReader &Reader, ModuleFile &M, HeaderSearch *HS,
const char *FrameworkStrings,
const char *SearchPath = 0)
: Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings),
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
index ab07b85..2eeb090 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace clang::serialization;
@@ -25,7 +26,7 @@ namespace clang {
typedef ASTReader::RecordData RecordData;
ASTReader &Reader;
- Module &F;
+ ModuleFile &F;
llvm::BitstreamCursor &DeclsCursor;
const ASTReader::RecordData &Record;
unsigned &Idx;
@@ -66,7 +67,7 @@ namespace clang {
}
public:
- ASTStmtReader(ASTReader &Reader, Module &F,
+ ASTStmtReader(ASTReader &Reader, ModuleFile &F,
llvm::BitstreamCursor &Cursor,
const ASTReader::RecordData &Record, unsigned &Idx)
: Reader(Reader), F(F), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
@@ -78,7 +79,10 @@ namespace clang {
/// \brief The number of record fields required for the Expr class
/// itself.
static const unsigned NumExprFields = NumStmtFields + 7;
-
+
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ unsigned NumTemplateArgs);
/// \brief Read and initialize a ExplicitTemplateArgumentList structure.
void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
unsigned NumTemplateArgs);
@@ -91,15 +95,16 @@ namespace clang {
}
void ASTStmtReader::
-ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
- unsigned NumTemplateArgs) {
+ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ unsigned NumTemplateArgs) {
+ SourceLocation TemplateKWLoc = ReadSourceLocation(Record, Idx);
TemplateArgumentListInfo ArgInfo;
ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
for (unsigned i = 0; i != NumTemplateArgs; ++i)
ArgInfo.addArgument(
Reader.ReadTemplateArgumentLoc(F, Record, Idx));
- ArgList.initializeFrom(ArgInfo);
+ Args.initializeFrom(TemplateKWLoc, ArgInfo);
}
void ASTStmtReader::VisitStmt(Stmt *S) {
@@ -326,10 +331,11 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
E->DeclRefExprBits.HasQualifier = Record[Idx++];
E->DeclRefExprBits.HasFoundDecl = Record[Idx++];
- E->DeclRefExprBits.HasExplicitTemplateArgs = Record[Idx++];
+ E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record[Idx++];
E->DeclRefExprBits.HadMultipleCandidates = Record[Idx++];
+ E->DeclRefExprBits.RefersToEnclosingLocal = Record[Idx++];
unsigned NumTemplateArgs = 0;
- if (E->hasExplicitTemplateArgs())
+ if (E->hasTemplateKWAndArgsInfo())
NumTemplateArgs = Record[Idx++];
if (E->hasQualifier())
@@ -339,9 +345,9 @@ void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
if (E->hasFoundDecl())
E->getInternalFoundDecl() = ReadDeclAs<NamedDecl>(Record, Idx);
- if (E->hasExplicitTemplateArgs())
- ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- NumTemplateArgs);
+ if (E->hasTemplateKWAndArgsInfo())
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ NumTemplateArgs);
E->setDecl(ReadDeclAs<ValueDecl>(Record, Idx));
E->setLocation(ReadSourceLocation(Record, Idx));
@@ -372,12 +378,13 @@ void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
assert(Record[Idx] == E->getNumConcatenated() &&
"Wrong number of concatenated tokens!");
++Idx;
- E->Kind = static_cast<StringLiteral::StringKind>(Record[Idx++]);
- E->IsPascal = Record[Idx++];
+ StringLiteral::StringKind kind =
+ static_cast<StringLiteral::StringKind>(Record[Idx++]);
+ bool isPascal = Record[Idx++];
// Read string data
- llvm::SmallString<16> Str(&Record[Idx], &Record[Idx] + Len);
- E->setString(Reader.getContext(), Str.str());
+ SmallString<16> Str(&Record[Idx], &Record[Idx] + Len);
+ E->setString(Reader.getContext(), Str.str(), kind, isPascal);
Idx += Len;
// Read source locations
@@ -567,8 +574,6 @@ ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
E->SubExprs[BinaryConditionalOperator::RHS] = Reader.ReadSubExpr();
E->QuestionLoc = ReadSourceLocation(Record, Idx);
E->ColonLoc = ReadSourceLocation(Record, Idx);
-
- E->getOpaqueValue()->setSourceExpr(E->getCommon());
}
void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
@@ -614,6 +619,7 @@ void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
} else
E->ArrayFillerOrUnionFieldInit = ReadDeclAs<FieldDecl>(Record, Idx);
E->sawArrayRangeDesignator(Record[Idx++]);
+ E->setInitializesStdInitializerList(Record[Idx++]);
unsigned NumInits = Record[Idx++];
E->reserveInits(Reader.getContext(), NumInits);
if (isArrayFiller) {
@@ -747,14 +753,6 @@ void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
E->setBlockDecl(ReadDeclAs<BlockDecl>(Record, Idx));
}
-void ASTStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- VisitExpr(E);
- E->setDecl(ReadDeclAs<VarDecl>(Record, Idx));
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setByRef(Record[Idx++]);
- E->setConstQualAdded(Record[Idx++]);
-}
-
void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
E->NumAssocs = Record[Idx++];
@@ -774,23 +772,30 @@ void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
E->RParenLoc = ReadSourceLocation(Record, Idx);
}
-void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
+void ASTStmtReader::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
VisitExpr(E);
- E->setOp(AtomicExpr::AtomicOp(Record[Idx++]));
- E->setPtr(Reader.ReadSubExpr());
- E->setOrder(Reader.ReadSubExpr());
- E->setNumSubExprs(2);
- if (E->getOp() != AtomicExpr::Load) {
- E->setVal1(Reader.ReadSubExpr());
- E->setNumSubExprs(3);
- }
- if (E->isCmpXChg()) {
- E->setOrderFail(Reader.ReadSubExpr());
- E->setVal2(Reader.ReadSubExpr());
- E->setNumSubExprs(5);
+ unsigned numSemanticExprs = Record[Idx++];
+ assert(numSemanticExprs + 1 == E->PseudoObjectExprBits.NumSubExprs);
+ E->PseudoObjectExprBits.ResultIndex = Record[Idx++];
+
+ // Read the syntactic expression.
+ E->getSubExprsBuffer()[0] = Reader.ReadSubExpr();
+
+ // Read all the semantic expressions.
+ for (unsigned i = 0; i != numSemanticExprs; ++i) {
+ Expr *subExpr = Reader.ReadSubExpr();
+ E->getSubExprsBuffer()[i+1] = subExpr;
}
- E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
+ VisitExpr(E);
+ E->Op = AtomicExpr::AtomicOp(Record[Idx++]);
+ E->NumSubExprs = AtomicExpr::getNumSubExprs(E->Op);
+ for (unsigned I = 0; I != E->NumSubExprs; ++I)
+ E->SubExprs[I] = Reader.ReadSubExpr();
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
}
//===----------------------------------------------------------------------===//
@@ -802,6 +807,45 @@ void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
E->setAtLoc(ReadSourceLocation(Record, Idx));
}
+void ASTStmtReader::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ VisitExpr(E);
+ // could be one of several IntegerLiteral, FloatLiteral, etc.
+ E->Number = Reader.ReadSubStmt();
+ E->ObjCNumericLiteralMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->AtLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ Expr **Elements = E->getElements();
+ for (unsigned I = 0, N = NumElements; I != N; ++I)
+ Elements[I] = Reader.ReadSubExpr();
+ E->ArrayWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ bool HasPackExpansions = Record[Idx++];
+ assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
+ ObjCDictionaryLiteral::KeyValuePair *KeyValues = E->getKeyValues();
+ ObjCDictionaryLiteral::ExpansionData *Expansions = E->getExpansionData();
+ for (unsigned I = 0; I != NumElements; ++I) {
+ KeyValues[I].Key = Reader.ReadSubExpr();
+ KeyValues[I].Value = Reader.ReadSubExpr();
+ if (HasPackExpansions) {
+ Expansions[I].EllipsisLoc = ReadSourceLocation(Record, Idx);
+ Expansions[I].NumExpansionsPlusOne = Record[Idx++];
+ }
+ }
+ E->DictWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
void ASTStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
VisitExpr(E);
E->setEncodedTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
@@ -834,13 +878,15 @@ void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
+ unsigned MethodRefFlags = Record[Idx++];
bool Implicit = Record[Idx++] != 0;
if (Implicit) {
ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->setImplicitProperty(Getter, Setter);
+ E->setImplicitProperty(Getter, Setter, MethodRefFlags);
} else {
- E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(Record, Idx));
+ E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(Record, Idx),
+ MethodRefFlags);
}
E->setLocation(ReadSourceLocation(Record, Idx));
E->setReceiverLocation(ReadSourceLocation(Record, Idx));
@@ -857,6 +903,15 @@ void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
}
}
+void ASTStmtReader::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ E->setRBracket(ReadSourceLocation(Record, Idx));
+ E->setBaseExpr(Reader.ReadSubExpr());
+ E->setKeyExpr(Reader.ReadSubExpr());
+ E->GetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->SetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+}
+
void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
assert(Record[Idx] == E->getNumArgs());
@@ -864,6 +919,7 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
unsigned NumStoredSelLocs = Record[Idx++];
E->SelLocsKind = Record[Idx++];
E->setDelegateInitCall(Record[Idx++]);
+ E->IsImplicit = Record[Idx++];
ObjCMessageExpr::ReceiverKind Kind
= static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
switch (Kind) {
@@ -958,6 +1014,12 @@ void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
S->setThrowLoc(ReadSourceLocation(Record, Idx));
}
+void ASTStmtReader::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
//===----------------------------------------------------------------------===//
// C++ Expressions and Statements
//===----------------------------------------------------------------------===//
@@ -992,6 +1054,15 @@ void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
S->setBody(Reader.ReadSubStmt());
}
+void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ S->KeywordLoc = ReadSourceLocation(Record, Idx);
+ S->IsIfExists = Record[Idx++];
+ S->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(S->NameInfo, Record, Idx);
+ S->SubStmt = Reader.ReadSubStmt();
+}
+
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
E->setOperator((OverloadedOperatorKind)Record[Idx++]);
@@ -1018,6 +1089,35 @@ void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
E->Type = GetTypeSourceInfo(Record, Idx);
}
+void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ unsigned NumCaptures = Record[Idx++];
+ assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
+ unsigned NumArrayIndexVars = Record[Idx++];
+ E->IntroducerRange = ReadSourceRange(Record, Idx);
+ E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record[Idx++]);
+ E->ExplicitParams = Record[Idx++];
+ E->ExplicitResultType = Record[Idx++];
+ E->ClosingBrace = ReadSourceLocation(Record, Idx);
+
+ // Read capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C)
+ *C = Reader.ReadSubExpr();
+
+ // Read array capture index variables.
+ if (NumArrayIndexVars > 0) {
+ unsigned *ArrayIndexStarts = E->getArrayIndexStarts();
+ for (unsigned I = 0; I != NumCaptures + 1; ++I)
+ ArrayIndexStarts[I] = Record[Idx++];
+
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ ArrayIndexVars[I] = ReadDeclAs<VarDecl>(Record, Idx);
+ }
+}
+
void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
SourceRange R = ReadSourceRange(Record, Idx);
@@ -1047,6 +1147,11 @@ void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
E->setRParenLoc(ReadSourceLocation(Record, Idx));
}
+void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ E->UDSuffixLoc = ReadSourceLocation(Record, Idx);
+}
+
void ASTStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
VisitExpr(E);
E->setValue(Record[Idx++]);
@@ -1108,27 +1213,19 @@ void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
E->GlobalNew = Record[Idx++];
- E->Initializer = Record[Idx++];
- E->UsualArrayDeleteWantsSize = Record[Idx++];
bool isArray = Record[Idx++];
- E->setHadMultipleCandidates(Record[Idx++]);
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
unsigned NumPlacementArgs = Record[Idx++];
- unsigned NumCtorArgs = Record[Idx++];
+ E->StoredInitializationStyle = Record[Idx++];
E->setOperatorNew(ReadDeclAs<FunctionDecl>(Record, Idx));
E->setOperatorDelete(ReadDeclAs<FunctionDecl>(Record, Idx));
- E->setConstructor(ReadDeclAs<CXXConstructorDecl>(Record, Idx));
E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
- SourceRange TypeIdParens;
- TypeIdParens.setBegin(ReadSourceLocation(Record, Idx));
- TypeIdParens.setEnd(ReadSourceLocation(Record, Idx));
- E->TypeIdParens = TypeIdParens;
+ E->TypeIdParens = ReadSourceRange(Record, Idx);
E->StartLoc = ReadSourceLocation(Record, Idx);
- E->EndLoc = ReadSourceLocation(Record, Idx);
- E->ConstructorLParen = ReadSourceLocation(Record, Idx);
- E->ConstructorRParen = ReadSourceLocation(Record, Idx);
+ E->DirectInitRange = ReadSourceRange(Record, Idx);
E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs,
- NumCtorArgs);
+ E->StoredInitializationStyle != 0);
// Install all the subexpressions.
for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
@@ -1167,22 +1264,22 @@ void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
- unsigned NumTemps = Record[Idx++];
- if (NumTemps) {
- E->setNumTemporaries(Reader.getContext(), NumTemps);
- for (unsigned i = 0; i != NumTemps; ++i)
- E->setTemporary(i, Reader.ReadCXXTemporary(F, Record, Idx));
- }
- E->setSubExpr(Reader.ReadSubExpr());
+
+ unsigned NumObjects = Record[Idx++];
+ assert(NumObjects == E->getNumObjects());
+ for (unsigned i = 0; i != NumObjects; ++i)
+ E->getObjectsBuffer()[i] = ReadDeclAs<BlockDecl>(Record, Idx);
+
+ E->SubExpr = Reader.ReadSubExpr();
}
void
ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
-
- if (Record[Idx++])
- ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- Record[Idx++]);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
E->Base = Reader.ReadSubExpr();
E->BaseType = Reader.readType(F, Record, Idx);
@@ -1196,10 +1293,10 @@ ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
void
ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
-
- if (Record[Idx++])
- ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- Record[Idx++]);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
@@ -1219,11 +1316,10 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
-
- // Read the explicit template argument list, if available.
- if (Record[Idx++])
- ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
- Record[Idx++]);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
unsigned NumDecls = Record[Idx++];
UnresolvedSet<8> Decls;
@@ -1277,6 +1373,17 @@ void ASTStmtReader::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
E->RhsType = GetTypeSourceInfo(Record, Idx);
}
+void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ E->TypeTraitExprBits.NumArgs = Record[Idx++];
+ E->TypeTraitExprBits.Kind = Record[Idx++];
+ E->TypeTraitExprBits.Value = Record[Idx++];
+
+ TypeSourceInfo **Args = E->getTypeSourceInfos();
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Args[I] = GetTypeSourceInfo(Record, Idx);
+}
+
void ASTStmtReader::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
VisitExpr(E);
E->ATT = (ArrayTypeTrait)Record[Idx++];
@@ -1348,7 +1455,7 @@ void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
- Idx++; // skip ID
+ E->SourceExpr = Reader.ReadSubExpr();
E->Loc = ReadSourceLocation(Record, Idx);
}
@@ -1412,7 +1519,7 @@ void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
// ASTReader Implementation
//===----------------------------------------------------------------------===//
-Stmt *ASTReader::ReadStmt(Module &F) {
+Stmt *ASTReader::ReadStmt(ModuleFile &F) {
switch (ReadingKind) {
case Read_Decl:
case Read_Type:
@@ -1422,10 +1529,9 @@ Stmt *ASTReader::ReadStmt(Module &F) {
}
llvm_unreachable("ReadingKind not set ?");
- return 0;
}
-Expr *ASTReader::ReadExpr(Module &F) {
+Expr *ASTReader::ReadExpr(ModuleFile &F) {
return cast_or_null<Expr>(ReadStmt(F));
}
@@ -1440,10 +1546,14 @@ Expr *ASTReader::ReadSubExpr() {
// the stack, with expressions having operands removing those operands from the
// stack. Evaluation terminates when we see a STMT_STOP record, and
// the single remaining expression on the stack is our result.
-Stmt *ASTReader::ReadStmtFromStream(Module &F) {
+Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
ReadingKindTracker ReadingKind(Read_Stmt, *this);
llvm::BitstreamCursor &Cursor = F.DeclsCursor;
+
+ // Map of offset to previously deserialized stmt. The offset points
+ /// just after the stmt record.
+ llvm::DenseMap<uint64_t, Stmt *> StmtEntries;
#ifndef NDEBUG
unsigned PrevNumStmts = StmtStack.size();
@@ -1483,11 +1593,19 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
Idx = 0;
Record.clear();
bool Finished = false;
+ bool IsStmtReference = false;
switch ((StmtCode)Cursor.ReadRecord(Code, Record)) {
case STMT_STOP:
Finished = true;
break;
+ case STMT_REF_PTR:
+ IsStmtReference = true;
+ assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
+ "No stmt was recorded for this offset reference!");
+ S = StmtEntries[Record[Idx++]];
+ break;
+
case STMT_NULL_PTR:
S = 0;
break;
@@ -1569,9 +1687,9 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
Context,
/*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
/*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
- /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
- Record[ASTStmtReader::NumExprFields + 4] : 0);
+ Record[ASTStmtReader::NumExprFields + 5] : 0);
break;
case EXPR_INTEGER_LITERAL:
@@ -1636,9 +1754,11 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
}
+ SourceLocation TemplateKWLoc;
TemplateArgumentListInfo ArgInfo;
- bool HasExplicitTemplateArgs = Record[Idx++];
- if (HasExplicitTemplateArgs) {
+ bool HasTemplateKWAndArgsInfo = Record[Idx++];
+ if (HasTemplateKWAndArgsInfo) {
+ TemplateKWLoc = ReadSourceLocation(F, Record, Idx);
unsigned NumTemplateArgs = Record[Idx++];
ArgInfo.setLAngleLoc(ReadSourceLocation(F, Record, Idx));
ArgInfo.setRAngleLoc(ReadSourceLocation(F, Record, Idx));
@@ -1662,8 +1782,9 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
bool IsArrow = Record[Idx++];
S = MemberExpr::Create(Context, Base, IsArrow, QualifierLoc,
- MemberD, FoundDecl, MemberNameInfo,
- HasExplicitTemplateArgs ? &ArgInfo : 0, T, VK, OK);
+ TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo,
+ HasTemplateKWAndArgsInfo ? &ArgInfo : 0,
+ T, VK, OK);
ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc,
MemberD->getDeclName(), Record, Idx);
if (HadMultipleCandidates)
@@ -1747,10 +1868,6 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
S = new (Context) BlockExpr(Empty);
break;
- case EXPR_BLOCK_DECL_REF:
- S = new (Context) BlockDeclRefExpr(Empty);
- break;
-
case EXPR_GENERIC_SELECTION:
S = new (Context) GenericSelectionExpr(Empty);
break;
@@ -1758,6 +1875,18 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case EXPR_OBJC_STRING_LITERAL:
S = new (Context) ObjCStringLiteral(Empty);
break;
+ case EXPR_OBJC_NUMERIC_LITERAL:
+ S = new (Context) ObjCNumericLiteral(Empty);
+ break;
+ case EXPR_OBJC_ARRAY_LITERAL:
+ S = ObjCArrayLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+ case EXPR_OBJC_DICTIONARY_LITERAL:
+ S = ObjCDictionaryLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
case EXPR_OBJC_ENCODE:
S = new (Context) ObjCEncodeExpr(Empty);
break;
@@ -1773,9 +1902,11 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case EXPR_OBJC_PROPERTY_REF_EXPR:
S = new (Context) ObjCPropertyRefExpr(Empty);
break;
+ case EXPR_OBJC_SUBSCRIPT_REF_EXPR:
+ S = new (Context) ObjCSubscriptRefExpr(Empty);
+ break;
case EXPR_OBJC_KVC_REF_EXPR:
llvm_unreachable("mismatching AST file");
- break;
case EXPR_OBJC_MESSAGE_EXPR:
S = ObjCMessageExpr::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields],
@@ -1813,6 +1944,9 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case STMT_OBJC_AUTORELEASE_POOL:
S = new (Context) ObjCAutoreleasePoolStmt(Empty);
break;
+ case EXPR_OBJC_BOOL_LITERAL:
+ S = new (Context) ObjCBoolLiteralExpr(Empty);
+ break;
case STMT_SEH_EXCEPT:
S = new (Context) SEHExceptStmt(Empty);
break;
@@ -1835,6 +1969,13 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
S = new (Context) CXXForRangeStmt(Empty);
break;
+ case STMT_MS_DEPENDENT_EXISTS:
+ S = new (Context) MSDependentExistsStmt(SourceLocation(), true,
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo(),
+ 0);
+ break;
+
case EXPR_CXX_OPERATOR_CALL:
S = new (Context) CXXOperatorCallExpr(Context, Empty);
break;
@@ -1875,6 +2016,10 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
/*PathSize*/ Record[ASTStmtReader::NumExprFields]);
break;
+ case EXPR_USER_DEFINED_LITERAL:
+ S = new (Context) UserDefinedLiteral(Context, Empty);
+ break;
+
case EXPR_CXX_BOOL_LITERAL:
S = new (Context) CXXBoolLiteralExpr(Empty);
break;
@@ -1927,12 +2072,13 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
break;
case EXPR_EXPR_WITH_CLEANUPS:
- S = new (Context) ExprWithCleanups(Empty);
+ S = ExprWithCleanups::Create(Context, Empty,
+ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
S = CXXDependentScopeMemberExpr::CreateEmpty(Context,
- /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
? Record[ASTStmtReader::NumExprFields + 1]
: 0);
@@ -1940,7 +2086,7 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
S = DependentScopeDeclRefExpr::CreateEmpty(Context,
- /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
? Record[ASTStmtReader::NumExprFields + 1]
: 0);
@@ -1953,7 +2099,7 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case EXPR_CXX_UNRESOLVED_MEMBER:
S = UnresolvedMemberExpr::CreateEmpty(Context,
- /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
? Record[ASTStmtReader::NumExprFields + 1]
: 0);
@@ -1961,7 +2107,7 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
case EXPR_CXX_UNRESOLVED_LOOKUP:
S = UnresolvedLookupExpr::CreateEmpty(Context,
- /*HasExplicitTemplateArgs=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
? Record[ASTStmtReader::NumExprFields + 1]
: 0);
@@ -1975,6 +2121,11 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
S = new (Context) BinaryTypeTraitExpr(Empty);
break;
+ case EXPR_TYPE_TRAIT:
+ S = TypeTraitExpr::CreateDeserialized(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
case EXPR_ARRAY_TYPE_TRAIT:
S = new (Context) ArrayTypeTraitExpr(Empty);
break;
@@ -2007,20 +2158,9 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
S = new (Context) MaterializeTemporaryExpr(Empty);
break;
- case EXPR_OPAQUE_VALUE: {
- unsigned key = Record[ASTStmtReader::NumExprFields];
- OpaqueValueExpr *&expr = OpaqueValueExprs[key];
-
- // If we already have an entry for this opaque value expression,
- // don't bother reading it again.
- if (expr) {
- StmtStack.push_back(expr);
- continue;
- }
-
- S = expr = new (Context) OpaqueValueExpr(Empty);
+ case EXPR_OPAQUE_VALUE:
+ S = new (Context) OpaqueValueExpr(Empty);
break;
- }
case EXPR_CUDA_KERNEL_CALL:
S = new (Context) CUDAKernelCallExpr(Context, Empty);
@@ -2030,9 +2170,23 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
S = new (Context) AsTypeExpr(Empty);
break;
+ case EXPR_PSEUDO_OBJECT: {
+ unsigned numSemanticExprs = Record[ASTStmtReader::NumExprFields];
+ S = PseudoObjectExpr::Create(Context, Empty, numSemanticExprs);
+ break;
+ }
+
case EXPR_ATOMIC:
S = new (Context) AtomicExpr(Empty);
break;
+
+ case EXPR_LAMBDA: {
+ unsigned NumCaptures = Record[ASTStmtReader::NumExprFields];
+ unsigned NumArrayIndexVars = Record[ASTStmtReader::NumExprFields + 1];
+ S = LambdaExpr::CreateDeserialized(Context, NumCaptures,
+ NumArrayIndexVars);
+ break;
+ }
}
// We hit a STMT_STOP, so we're done with this expression.
@@ -2041,8 +2195,11 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
++NumStatementsRead;
- if (S)
+ if (S && !IsStmtReference) {
Reader.Visit(S);
+ StmtEntries[Cursor.GetCurrentBitNo()] = S;
+ }
+
assert(Idx == Record.size() && "Invalid deserialization of statement");
StmtStack.push_back(S);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
index a1645d7..a4301b5 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -185,6 +185,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
for (unsigned I = 0, N = T->getNumArgs(); I != N; ++I)
Writer.AddTypeRef(T->getArgType(I), Record);
Record.push_back(T->isVariadic());
+ Record.push_back(T->hasTrailingReturn());
Record.push_back(T->getTypeQuals());
Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
Record.push_back(T->getExceptionSpecType());
@@ -221,6 +222,7 @@ void ASTTypeWriter::VisitTypeOfType(const TypeOfType *T) {
}
void ASTTypeWriter::VisitDecltypeType(const DecltypeType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
Writer.AddStmt(T->getUnderlyingExpr());
Code = TYPE_DECLTYPE;
}
@@ -239,7 +241,7 @@ void ASTTypeWriter::VisitAutoType(const AutoType *T) {
void ASTTypeWriter::VisitTagType(const TagType *T) {
Record.push_back(T->isDependentType());
- Writer.AddDeclRef(T->getDecl(), Record);
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
assert(!T->isBeingDefined() &&
"Cannot serialize in the middle of a type definition");
}
@@ -363,13 +365,13 @@ void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
}
void ASTTypeWriter::VisitInjectedClassNameType(const InjectedClassNameType *T) {
- Writer.AddDeclRef(T->getDecl(), Record);
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
Writer.AddTypeRef(T->getInjectedSpecializationType(), Record);
Code = TYPE_INJECTED_CLASS_NAME;
}
void ASTTypeWriter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
- Writer.AddDeclRef(T->getDecl(), Record);
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
Code = TYPE_OBJC_INTERFACE;
}
@@ -552,6 +554,7 @@ void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
}
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
@@ -564,22 +567,23 @@ void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
Writer.AddSourceLocation(TL.getRParenLoc(), Record);
}
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
}
void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
- Writer.AddSourceLocation(TL.getNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
@@ -689,9 +693,11 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_GNU_NULL);
RECORD(EXPR_SHUFFLE_VECTOR);
RECORD(EXPR_BLOCK);
- RECORD(EXPR_BLOCK_DECL_REF);
RECORD(EXPR_GENERIC_SELECTION);
RECORD(EXPR_OBJC_STRING_LITERAL);
+ RECORD(EXPR_OBJC_NUMERIC_LITERAL);
+ RECORD(EXPR_OBJC_ARRAY_LITERAL);
+ RECORD(EXPR_OBJC_DICTIONARY_LITERAL);
RECORD(EXPR_OBJC_ENCODE);
RECORD(EXPR_OBJC_SELECTOR_EXPR);
RECORD(EXPR_OBJC_PROTOCOL_EXPR);
@@ -705,6 +711,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(STMT_OBJC_AT_TRY);
RECORD(STMT_OBJC_AT_SYNCHRONIZED);
RECORD(STMT_OBJC_AT_THROW);
+ RECORD(EXPR_OBJC_BOOL_LITERAL);
RECORD(EXPR_CXX_OPERATOR_CALL);
RECORD(EXPR_CXX_CONSTRUCT);
RECORD(EXPR_CXX_STATIC_CAST);
@@ -712,6 +719,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_CXX_REINTERPRET_CAST);
RECORD(EXPR_CXX_CONST_CAST);
RECORD(EXPR_CXX_FUNCTIONAL_CAST);
+ RECORD(EXPR_USER_DEFINED_LITERAL);
RECORD(EXPR_CXX_BOOL_LITERAL);
RECORD(EXPR_CXX_NULL_PTR_LITERAL);
RECORD(EXPR_CXX_TYPEID_EXPR);
@@ -778,7 +786,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(IMPORTS);
RECORD(REFERENCED_SELECTOR_POOL);
RECORD(TU_UPDATE_LEXICAL);
- RECORD(REDECLS_UPDATE_LATEST);
+ RECORD(LOCAL_REDECLARATIONS_MAP);
RECORD(SEMA_DECL_REFS);
RECORD(WEAK_UNDECLARED_IDENTIFIERS);
RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
@@ -798,7 +806,13 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(KNOWN_NAMESPACES);
RECORD(MODULE_OFFSET_MAP);
RECORD(SOURCE_MANAGER_LINE_TABLE);
-
+ RECORD(OBJC_CATEGORIES_MAP);
+ RECORD(FILE_SORTED_DECLS);
+ RECORD(IMPORTED_MODULES);
+ RECORD(MERGED_DECLARATIONS);
+ RECORD(LOCAL_REDECLARATIONS);
+ RECORD(OBJC_CATEGORIES);
+
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
RECORD(SM_SLOC_FILE_ENTRY);
@@ -862,8 +876,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_OBJC_PROTOCOL);
RECORD(DECL_OBJC_IVAR);
RECORD(DECL_OBJC_AT_DEFS_FIELD);
- RECORD(DECL_OBJC_CLASS);
- RECORD(DECL_OBJC_FORWARD_PROTOCOL);
RECORD(DECL_OBJC_CATEGORY);
RECORD(DECL_OBJC_CATEGORY_IMPL);
RECORD(DECL_OBJC_IMPLEMENTATION);
@@ -969,6 +981,7 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Has errors
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple
unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev);
@@ -979,6 +992,7 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
Record.push_back(CLANG_VERSION_MAJOR);
Record.push_back(CLANG_VERSION_MINOR);
Record.push_back(!isysroot.empty());
+ Record.push_back(ASTHasCompilerErrors);
const std::string &Triple = Target.getTriple().getTriple();
Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, Triple);
@@ -1011,7 +1025,7 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
- llvm::SmallString<128> MainFilePath(MainFile->getName());
+ SmallString<128> MainFilePath(MainFile->getName());
llvm::sys::fs::make_absolute(MainFilePath);
@@ -1034,7 +1048,7 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
- llvm::SmallString<128> OutputPath(OutputFile);
+ SmallString<128> OutputPath(OutputFile);
llvm::sys::fs::make_absolute(OutputPath);
StringRef origDir = llvm::sys::path::parent_path(OutputPath);
@@ -1063,6 +1077,9 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
#include "clang/Basic/LangOptions.def"
+
+ Record.push_back(LangOpts.CurrentModule.size());
+ Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end());
Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
}
@@ -1128,7 +1145,7 @@ void ASTWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> StatCacheData;
+ SmallString<4096> StatCacheData;
uint32_t BucketOffset;
{
llvm::raw_svector_ostream Out(StatCacheData);
@@ -1171,7 +1188,10 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
// FileEntry fields.
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // BufferOverridden
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumCreatedFIDs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 24)); // FirstDeclIndex
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumDecls
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
return Stream.EmitAbbrev(Abbrev);
}
@@ -1218,14 +1238,14 @@ namespace {
// Trait used for the on-disk hash table of header search information.
class HeaderFileInfoTrait {
ASTWriter &Writer;
- HeaderSearch &HS;
+ const HeaderSearch &HS;
// Keep track of the framework names we've used during serialization.
SmallVector<char, 128> FrameworkStringData;
llvm::StringMap<unsigned> FrameworkNameOffset;
public:
- HeaderFileInfoTrait(ASTWriter &Writer, HeaderSearch &HS)
+ HeaderFileInfoTrait(ASTWriter &Writer, const HeaderSearch &HS)
: Writer(Writer), HS(HS) { }
typedef const char *key_type;
@@ -1304,7 +1324,7 @@ namespace {
/// \param HS The header search structure to save.
///
/// \param Chain Whether we're creating a chained AST file.
-void ASTWriter::WriteHeaderSearch(HeaderSearch &HS, StringRef isysroot) {
+void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
SmallVector<const FileEntry *, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
@@ -1320,7 +1340,9 @@ void ASTWriter::WriteHeaderSearch(HeaderSearch &HS, StringRef isysroot) {
if (!File)
continue;
- const HeaderFileInfo &HFI = HS.header_file_begin()[UID];
+ // Use HeaderSearch's getFileInfo to make sure we get the HeaderFileInfo
+ // from the external source if it was not provided already.
+ const HeaderFileInfo &HFI = HS.getFileInfo(File);
if (HFI.External && Chain)
continue;
@@ -1340,7 +1362,7 @@ void ASTWriter::WriteHeaderSearch(HeaderSearch &HS, StringRef isysroot) {
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> TableData;
+ SmallString<4096> TableData;
uint32_t BucketOffset;
{
llvm::raw_svector_ostream Out(TableData);
@@ -1414,7 +1436,8 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Figure out which record code to use.
unsigned Code;
if (SLoc->isFile()) {
- if (SLoc->getFile().getContentCache()->OrigEntry) {
+ const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ if (Cache->OrigEntry) {
Code = SM_SLOC_FILE_ENTRY;
SLocFileEntryOffsets.push_back(Stream.GetCurrentBitNo());
} else
@@ -1435,7 +1458,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
const SrcMgr::ContentCache *Content = File.getContentCache();
if (Content->OrigEntry) {
assert(Content->OrigEntry == Content->ContentsEntry &&
- "Writing to AST an overriden file is not supported");
+ "Writing to AST an overridden file is not supported");
// The source location entry is a file. The blob associated
// with this entry is the file name.
@@ -1443,12 +1466,21 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Emit size/modification time for this file.
Record.push_back(Content->OrigEntry->getSize());
Record.push_back(Content->OrigEntry->getModificationTime());
-
+ Record.push_back(Content->BufferOverridden);
Record.push_back(File.NumCreatedFIDs);
-
+
+ FileDeclIDsTy::iterator FDI = FileDeclIDs.find(SLoc);
+ if (FDI != FileDeclIDs.end()) {
+ Record.push_back(FDI->second->FirstDeclIndex);
+ Record.push_back(FDI->second->DeclIDs.size());
+ } else {
+ Record.push_back(0);
+ Record.push_back(0);
+ }
+
// Turn the file name into an absolute path, if it isn't already.
const char *Filename = Content->OrigEntry->getName();
- llvm::SmallString<128> FilePath(Filename);
+ SmallString<128> FilePath(Filename);
// Ask the file manager to fixup the relative path for us. This will
// honor the working directory.
@@ -1461,6 +1493,16 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
Stream.EmitRecordWithBlob(SLocFileAbbrv, Record, Filename);
+
+ if (Content->BufferOverridden) {
+ Record.clear();
+ Record.push_back(SM_SLOC_BUFFER_BLOB);
+ const llvm::MemoryBuffer *Buffer
+ = Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ StringRef(Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1));
+ }
} else {
// The source location entry is a buffer. The blob associated
// with this entry contains the contents of the buffer.
@@ -1629,8 +1671,9 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0),
E = PP.macro_end(Chain == 0);
I != E; ++I) {
- if (!IsModule || I->second->isExported()) {
- MacroDefinitionsSeen.insert(I->first);
+ const IdentifierInfo *Name = I->first;
+ if (!IsModule || I->second->isPublic()) {
+ MacroDefinitionsSeen.insert(Name);
MacrosToEmit.push_back(std::make_pair(I->first, I->second));
}
}
@@ -1664,15 +1707,17 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// chained PCH, by storing the offset into the original PCH rather than
// writing the macro definition a second time.
if (MI->isBuiltinMacro() ||
- (Chain && Name->isFromAST() && MI->isFromAST() &&
- !MI->hasChangedAfterLoad()))
+ (Chain &&
+ Name->isFromAST() && !Name->hasChangedSinceDeserialization() &&
+ MI->isFromAST() && !MI->hasChangedAfterLoad()))
continue;
AddIdentifierRef(Name, Record);
MacroOffsets[Name] = Stream.GetCurrentBitNo();
Record.push_back(MI->getDefinitionLoc().getRawEncoding());
Record.push_back(MI->isUsed());
- AddSourceLocation(MI->getExportLocation(), Record);
+ Record.push_back(MI->isPublic());
+ AddSourceLocation(MI->getVisibilityLocation(), Record);
unsigned Code;
if (MI->isObjectLike()) {
Code = PP_MACRO_OBJECT_LIKE;
@@ -1784,9 +1829,12 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
Record.push_back(ID->getFileName().size());
Record.push_back(ID->wasInQuotes());
Record.push_back(static_cast<unsigned>(ID->getKind()));
- llvm::SmallString<64> Buffer;
+ SmallString<64> Buffer;
Buffer += ID->getFileName();
- Buffer += ID->getFile()->getName();
+ // Check that the FileEntry is not null because it was not resolved and
+ // we create a PCH even with compiler errors.
+ if (ID->getFile())
+ Buffer += ID->getFile()->getName();
Stream.EmitRecordWithBlob(InclusionAbbrev, Record, Buffer);
continue;
}
@@ -1815,6 +1863,202 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
}
}
+unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+ llvm::DenseMap<Module *, unsigned>::iterator Known = SubmoduleIDs.find(Mod);
+ if (Known != SubmoduleIDs.end())
+ return Known->second;
+
+ return SubmoduleIDs[Mod] = NextSubmoduleID++;
+}
+
+/// \brief Compute the number of modules within the given tree (including the
+/// given module).
+static unsigned getNumberOfModules(Module *Mod) {
+ unsigned ChildModules = 0;
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub)
+ ChildModules += getNumberOfModules(*Sub);
+
+ return ChildModules + 1;
+}
+
+void ASTWriter::WriteSubmodules(Module *WritingModule) {
+ // Determine the dependencies of our module and each of it's submodules.
+ // FIXME: This feels like it belongs somewhere else, but there are no
+ // other consumers of this information.
+ SourceManager &SrcMgr = PP->getSourceManager();
+ ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
+ for (ASTContext::import_iterator I = Context->local_import_begin(),
+ IEnd = Context->local_import_end();
+ I != IEnd; ++I) {
+ if (Module *ImportedFrom
+ = ModMap.inferModuleFromLocation(FullSourceLoc(I->getLocation(),
+ SrcMgr))) {
+ ImportedFrom->Imports.push_back(I->getImportedModule());
+ }
+ }
+
+ // Enter the submodule description block.
+ Stream.EnterSubblock(SUBMODULE_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+
+ // Write the abbreviations needed for the submodules block.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferSubmodules...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExplicit...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned DefinitionAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned HeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaDirAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_REQUIRES));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Feature
+ unsigned RequiresAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the submodule metadata block.
+ RecordData Record;
+ Record.push_back(getNumberOfModules(WritingModule));
+ Record.push_back(FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS);
+ Stream.EmitRecord(SUBMODULE_METADATA, Record);
+
+ // Write all of the submodules.
+ std::queue<Module *> Q;
+ Q.push(WritingModule);
+ while (!Q.empty()) {
+ Module *Mod = Q.front();
+ Q.pop();
+ unsigned ID = getSubmoduleID(Mod);
+
+ // Emit the definition of the block.
+ Record.clear();
+ Record.push_back(SUBMODULE_DEFINITION);
+ Record.push_back(ID);
+ if (Mod->Parent) {
+ assert(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?");
+ Record.push_back(SubmoduleIDs[Mod->Parent]);
+ } else {
+ Record.push_back(0);
+ }
+ Record.push_back(Mod->IsFramework);
+ Record.push_back(Mod->IsExplicit);
+ Record.push_back(Mod->IsSystem);
+ Record.push_back(Mod->InferSubmodules);
+ Record.push_back(Mod->InferExplicitSubmodules);
+ Record.push_back(Mod->InferExportWildcard);
+ Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
+
+ // Emit the requirements.
+ for (unsigned I = 0, N = Mod->Requires.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_REQUIRES);
+ Stream.EmitRecordWithBlob(RequiresAbbrev, Record,
+ Mod->Requires[I].data(),
+ Mod->Requires[I].size());
+ }
+
+ // Emit the umbrella header, if there is one.
+ if (const FileEntry *UmbrellaHeader = Mod->getUmbrellaHeader()) {
+ Record.clear();
+ Record.push_back(SUBMODULE_UMBRELLA_HEADER);
+ Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
+ UmbrellaHeader->getName());
+ } else if (const DirectoryEntry *UmbrellaDir = Mod->getUmbrellaDir()) {
+ Record.clear();
+ Record.push_back(SUBMODULE_UMBRELLA_DIR);
+ Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
+ UmbrellaDir->getName());
+ }
+
+ // Emit the headers.
+ for (unsigned I = 0, N = Mod->Headers.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_HEADER);
+ Stream.EmitRecordWithBlob(HeaderAbbrev, Record,
+ Mod->Headers[I]->getName());
+ }
+
+ // Emit the imports.
+ if (!Mod->Imports.empty()) {
+ Record.clear();
+ for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
+ unsigned ImportedID = getSubmoduleID(Mod->Imports[I]);
+ assert(ImportedID && "Unknown submodule!");
+ Record.push_back(ImportedID);
+ }
+ Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
+ }
+
+ // Emit the exports.
+ if (!Mod->Exports.empty()) {
+ Record.clear();
+ for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) {
+ if (Module *Exported = Mod->Exports[I].getPointer()) {
+ unsigned ExportedID = SubmoduleIDs[Exported];
+ assert(ExportedID > 0 && "Unknown submodule ID?");
+ Record.push_back(ExportedID);
+ } else {
+ Record.push_back(0);
+ }
+
+ Record.push_back(Mod->Exports[I].getInt());
+ }
+ Stream.EmitRecord(SUBMODULE_EXPORTS, Record);
+ }
+
+ // Queue up the submodules of this module.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub)
+ Q.push(*Sub);
+ }
+
+ Stream.ExitBlock();
+
+ assert((NextSubmoduleID - FirstSubmoduleID
+ == getNumberOfModules(WritingModule)) && "Wrong # of submodules");
+}
+
+serialization::SubmoduleID
+ASTWriter::inferSubmoduleIDFromLocation(SourceLocation Loc) {
+ if (Loc.isInvalid() || !WritingModule)
+ return 0; // No submodule
+
+ // Find the module that owns this location.
+ ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
+ Module *OwningMod
+ = ModMap.inferModuleFromLocation(FullSourceLoc(Loc,PP->getSourceManager()));
+ if (!OwningMod)
+ return 0;
+
+ // Check whether this submodule is part of our own module.
+ if (WritingModule != OwningMod && !OwningMod->isSubModuleOf(WritingModule))
+ return 0;
+
+ return getSubmoduleID(OwningMod);
+}
+
void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag) {
RecordData Record;
for (DiagnosticsEngine::DiagStatePointsTy::const_iterator
@@ -1970,6 +2214,29 @@ void ASTWriter::WriteTypeDeclOffsets() {
Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, data(DeclOffsets));
}
+void ASTWriter::WriteFileDeclIDsMap() {
+ using namespace llvm;
+ RecordData Record;
+
+ // Join the vectors of DeclIDs from all files.
+ SmallVector<DeclID, 256> FileSortedIDs;
+ for (FileDeclIDsTy::iterator
+ FI = FileDeclIDs.begin(), FE = FileDeclIDs.end(); FI != FE; ++FI) {
+ DeclIDInFileInfo &Info = *FI->second;
+ Info.FirstDeclIndex = FileSortedIDs.size();
+ for (LocDeclIDsTy::iterator
+ DI = Info.DeclIDs.begin(), DE = Info.DeclIDs.end(); DI != DE; ++DI)
+ FileSortedIDs.push_back(DI->second);
+ }
+
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+ Record.push_back(FILE_SORTED_DECLS);
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs));
+}
+
//===----------------------------------------------------------------------===//
// Global Method Pool and Selector Serialization
//===----------------------------------------------------------------------===//
@@ -2117,7 +2384,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> MethodPool;
+ SmallString<4096> MethodPool;
uint32_t BucketOffset;
{
ASTMethodPoolTrait Trait(*this);
@@ -2190,6 +2457,7 @@ namespace {
class ASTIdentifierTableTrait {
ASTWriter &Writer;
Preprocessor &PP;
+ IdentifierResolver &IdResolver;
bool IsModule;
/// \brief Determines whether this is an "interesting" identifier
@@ -2199,6 +2467,7 @@ class ASTIdentifierTableTrait {
if (II->isPoisoned() ||
II->isExtensionToken() ||
II->getObjCOrBuiltinID() ||
+ II->hasRevertedTokenIDToIdentifier() ||
II->getFETokenInfo<void>())
return true;
@@ -2210,7 +2479,7 @@ class ASTIdentifierTableTrait {
return false;
if (Macro || (Macro = PP.getMacroInfo(II)))
- return !Macro->isBuiltinMacro() && (!IsModule || Macro->isExported());
+ return !Macro->isBuiltinMacro() && (!IsModule || Macro->isPublic());
return false;
}
@@ -2222,24 +2491,26 @@ public:
typedef IdentID data_type;
typedef data_type data_type_ref;
- ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP, bool IsModule)
- : Writer(Writer), PP(PP), IsModule(IsModule) { }
+ ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP,
+ IdentifierResolver &IdResolver, bool IsModule)
+ : Writer(Writer), PP(PP), IdResolver(IdResolver), IsModule(IsModule) { }
static unsigned ComputeHash(const IdentifierInfo* II) {
return llvm::HashString(II->getName());
}
std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
+ EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
unsigned KeyLen = II->getLength() + 1;
unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
MacroInfo *Macro = 0;
if (isInterestingIdentifier(II, Macro)) {
DataLen += 2; // 2 bytes for builtin ID, flags
if (hasMacroDefinition(II, Macro))
- DataLen += 4;
- for (IdentifierResolver::iterator D = IdentifierResolver::begin(II),
- DEnd = IdentifierResolver::end();
+ DataLen += 8;
+
+ for (IdentifierResolver::iterator D = IdResolver.begin(II),
+ DEnd = IdResolver.end();
D != DEnd; ++D)
DataLen += sizeof(DeclID);
}
@@ -2271,6 +2542,7 @@ public:
uint32_t Bits = 0;
bool HasMacroDefinition = hasMacroDefinition(II, Macro);
Bits = (uint32_t)II->getObjCOrBuiltinID();
+ assert((Bits & 0x7ff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
Bits = (Bits << 1) | unsigned(HasMacroDefinition);
Bits = (Bits << 1) | unsigned(II->isExtensionToken());
Bits = (Bits << 1) | unsigned(II->isPoisoned());
@@ -2278,20 +2550,22 @@ public:
Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
clang::io::Emit16(Out, Bits);
- if (HasMacroDefinition)
+ if (HasMacroDefinition) {
clang::io::Emit32(Out, Writer.getMacroOffset(II));
-
+ clang::io::Emit32(Out,
+ Writer.inferSubmoduleIDFromLocation(Macro->getDefinitionLoc()));
+ }
+
// Emit the declaration IDs in reverse order, because the
// IdentifierResolver provides the declarations as they would be
// visible (e.g., the function "stat" would come before the struct
- // "stat"), but IdentifierResolver::AddDeclToIdentifierChain()
- // adds declarations to the end of the list (so we need to see the
- // struct "status" before the function "status").
+ // "stat"), but the ASTReader adds declarations to the end of the list
+ // (so we need to see the struct "status" before the function "status").
// Only emit declarations that aren't from a chained PCH, though.
- SmallVector<Decl *, 16> Decls(IdentifierResolver::begin(II),
- IdentifierResolver::end());
+ SmallVector<Decl *, 16> Decls(IdResolver.begin(II),
+ IdResolver.end());
for (SmallVector<Decl *, 16>::reverse_iterator D = Decls.rbegin(),
- DEnd = Decls.rend();
+ DEnd = Decls.rend();
D != DEnd; ++D)
clang::io::Emit32(Out, Writer.getDeclID(*D));
}
@@ -2303,14 +2577,16 @@ public:
/// The identifier table consists of a blob containing string data
/// (the actual identifiers themselves) and a separate "offsets" index
/// that maps identifier IDs to locations within the blob.
-void ASTWriter::WriteIdentifierTable(Preprocessor &PP, bool IsModule) {
+void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
+ IdentifierResolver &IdResolver,
+ bool IsModule) {
using namespace llvm;
// Create and write out the blob that contains the identifier
// strings.
{
OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
- ASTIdentifierTableTrait Trait(*this, PP, IsModule);
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule);
// Look for any identifiers that were named while processing the
// headers, but are otherwise not needed. We add these to the hash
@@ -2329,16 +2605,17 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP, bool IsModule) {
ID = IdentifierIDs.begin(), IDEnd = IdentifierIDs.end();
ID != IDEnd; ++ID) {
assert(ID->first && "NULL identifier in identifier table");
- if (!Chain || !ID->first->isFromAST())
+ if (!Chain || !ID->first->isFromAST() ||
+ ID->first->hasChangedSinceDeserialization())
Generator.insert(const_cast<IdentifierInfo *>(ID->first), ID->second,
Trait);
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> IdentifierTable;
+ SmallString<4096> IdentifierTable;
uint32_t BucketOffset;
{
- ASTIdentifierTableTrait Trait(*this, PP, IsModule);
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule);
llvm::raw_svector_ostream Out(IdentifierTable);
// Make sure that no bucket is at offset 0
clang::io::Emit32(Out, 0);
@@ -2512,20 +2789,16 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
// IdentifierInfo chains, don't bother to build a visible-declarations table.
// FIXME: In C++ we need the visible declarations in order to "see" the
// friend declarations, is there a way to do this without writing the table ?
- if (DC->isTranslationUnit() && !Context.getLangOptions().CPlusPlus)
+ if (DC->isTranslationUnit() && !Context.getLangOpts().CPlusPlus)
return 0;
- // Force the DeclContext to build a its name-lookup table.
- if (!DC->hasExternalVisibleStorage())
- DC->lookup(DeclarationName());
-
// Serialize the contents of the mapping used for lookup. Note that,
// although we have two very different code paths, the serialized
// representation is the same for both cases: a declaration name,
// followed by a size, followed by references to the visible
// declarations that have that name.
uint64_t Offset = Stream.GetCurrentBitNo();
- StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
+ StoredDeclsMap *Map = DC->buildLookup();
if (!Map || Map->empty())
return 0;
@@ -2565,7 +2838,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> LookupTable;
+ SmallString<4096> LookupTable;
uint32_t BucketOffset;
{
llvm::raw_svector_ostream Out(LookupTable);
@@ -2590,7 +2863,8 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
///
/// UPDATE_VISIBLE blocks contain the declarations that are added to an existing
/// DeclContext in a dependent AST file. As such, they only exist for the TU
-/// (in C++) and for namespaces.
+/// (in C++), for namespaces, and for classes with forward-declared unscoped
+/// enumeration members (in C++11).
void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
if (!Map || Map->empty())
@@ -2611,7 +2885,7 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
}
// Create the on-disk hash table in a buffer.
- llvm::SmallString<4096> LookupTable;
+ SmallString<4096> LookupTable;
uint32_t BucketOffset;
{
llvm::raw_svector_ostream Out(LookupTable);
@@ -2637,7 +2911,7 @@ void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
/// \brief Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
- if (!SemaRef.Context.getLangOptions().OpenCL)
+ if (!SemaRef.Context.getLangOpts().OpenCL)
return;
const OpenCLOptions &Opts = SemaRef.getOpenCLOptions();
@@ -2647,6 +2921,143 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
+void ASTWriter::WriteRedeclarations() {
+ RecordData LocalRedeclChains;
+ SmallVector<serialization::LocalRedeclarationsInfo, 2> LocalRedeclsMap;
+
+ for (unsigned I = 0, N = Redeclarations.size(); I != N; ++I) {
+ Decl *First = Redeclarations[I];
+ assert(First->getPreviousDecl() == 0 && "Not the first declaration?");
+
+ Decl *MostRecent = First->getMostRecentDecl();
+
+ // If we only have a single declaration, there is no point in storing
+ // a redeclaration chain.
+ if (First == MostRecent)
+ continue;
+
+ unsigned Offset = LocalRedeclChains.size();
+ unsigned Size = 0;
+ LocalRedeclChains.push_back(0); // Placeholder for the size.
+
+ // Collect the set of local redeclarations of this declaration.
+ for (Decl *Prev = MostRecent; Prev != First;
+ Prev = Prev->getPreviousDecl()) {
+ if (!Prev->isFromASTFile()) {
+ AddDeclRef(Prev, LocalRedeclChains);
+ ++Size;
+ }
+ }
+ LocalRedeclChains[Offset] = Size;
+
+ // Reverse the set of local redeclarations, so that we store them in
+ // order (since we found them in reverse order).
+ std::reverse(LocalRedeclChains.end() - Size, LocalRedeclChains.end());
+
+ // Add the mapping from the first ID to the set of local declarations.
+ LocalRedeclarationsInfo Info = { getDeclID(First), Offset };
+ LocalRedeclsMap.push_back(Info);
+
+ assert(N == Redeclarations.size() &&
+ "Deserialized a declaration we shouldn't have");
+ }
+
+ if (LocalRedeclChains.empty())
+ return;
+
+ // Sort the local redeclarations map by the first declaration ID,
+ // since the reader will be performing binary searches on this information.
+ llvm::array_pod_sort(LocalRedeclsMap.begin(), LocalRedeclsMap.end());
+
+ // Emit the local redeclarations map.
+ using namespace llvm;
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(LOCAL_REDECLARATIONS_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(LOCAL_REDECLARATIONS_MAP);
+ Record.push_back(LocalRedeclsMap.size());
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char*>(LocalRedeclsMap.data()),
+ LocalRedeclsMap.size() * sizeof(LocalRedeclarationsInfo));
+
+ // Emit the redeclaration chains.
+ Stream.EmitRecord(LOCAL_REDECLARATIONS, LocalRedeclChains);
+}
+
+void ASTWriter::WriteObjCCategories() {
+ llvm::SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
+ RecordData Categories;
+
+ for (unsigned I = 0, N = ObjCClassesWithCategories.size(); I != N; ++I) {
+ unsigned Size = 0;
+ unsigned StartIndex = Categories.size();
+
+ ObjCInterfaceDecl *Class = ObjCClassesWithCategories[I];
+
+ // Allocate space for the size.
+ Categories.push_back(0);
+
+ // Add the categories.
+ for (ObjCCategoryDecl *Cat = Class->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory(), ++Size) {
+ assert(getDeclID(Cat) != 0 && "Bogus category");
+ AddDeclRef(Cat, Categories);
+ }
+
+ // Update the size.
+ Categories[StartIndex] = Size;
+
+ // Record this interface -> category map.
+ ObjCCategoriesInfo CatInfo = { getDeclID(Class), StartIndex };
+ CategoriesMap.push_back(CatInfo);
+ }
+
+ // Sort the categories map by the definition ID, since the reader will be
+ // performing binary searches on this information.
+ llvm::array_pod_sort(CategoriesMap.begin(), CategoriesMap.end());
+
+ // Emit the categories map.
+ using namespace llvm;
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(OBJC_CATEGORIES_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(OBJC_CATEGORIES_MAP);
+ Record.push_back(CategoriesMap.size());
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char*>(CategoriesMap.data()),
+ CategoriesMap.size() * sizeof(ObjCCategoriesInfo));
+
+ // Emit the category lists.
+ Stream.EmitRecord(OBJC_CATEGORIES, Categories);
+}
+
+void ASTWriter::WriteMergedDecls() {
+ if (!Chain || Chain->MergedDecls.empty())
+ return;
+
+ RecordData Record;
+ for (ASTReader::MergedDeclsMap::iterator I = Chain->MergedDecls.begin(),
+ IEnd = Chain->MergedDecls.end();
+ I != IEnd; ++I) {
+ DeclID CanonID = I->first->isFromASTFile()? I->first->getGlobalID()
+ : getDeclID(I->first);
+ assert(CanonID && "Merged declaration not known?");
+
+ Record.push_back(CanonID);
+ Record.push_back(I->second.size());
+ Record.append(I->second.begin(), I->second.end());
+ }
+ Stream.EmitRecord(MERGED_DECLARATIONS, Record);
+}
+
//===----------------------------------------------------------------------===//
// General Serialization Routines
//===----------------------------------------------------------------------===//
@@ -2705,10 +3116,13 @@ void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
}
ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
- : Stream(Stream), Context(0), Chain(0), WritingAST(false),
+ : Stream(Stream), Context(0), PP(0), Chain(0), WritingModule(0),
+ WritingAST(false), ASTHasCompilerErrors(false),
FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
+ FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
+ NextSubmoduleID(FirstSubmoduleID),
FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
CollectedStmts(&StmtsToEmit),
NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
@@ -2724,11 +3138,20 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
{
}
+ASTWriter::~ASTWriter() {
+ for (FileDeclIDsTy::iterator
+ I = FileDeclIDs.begin(), E = FileDeclIDs.end(); I != E; ++I)
+ delete I->second;
+}
+
void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
const std::string &OutputFile,
- bool IsModule, StringRef isysroot) {
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors) {
WritingAST = true;
+ ASTHasCompilerErrors = hasErrors;
+
// Emit the file header.
Stream.Emit((unsigned)'C', 8);
Stream.Emit((unsigned)'P', 8);
@@ -2738,8 +3161,12 @@ void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
WriteBlockInfoBlock();
Context = &SemaRef.Context;
- WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile, IsModule);
+ PP = &SemaRef.PP;
+ this->WritingModule = WritingModule;
+ WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile, WritingModule);
Context = 0;
+ PP = 0;
+ this->WritingModule = 0;
WritingAST = false;
}
@@ -2755,9 +3182,14 @@ static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
StringRef isysroot,
- const std::string &OutputFile, bool IsModule) {
+ const std::string &OutputFile,
+ Module *WritingModule) {
using namespace llvm;
+ // Make sure that the AST reader knows to finalize itself.
+ if (Chain)
+ Chain->finalizeForWriting();
+
ASTContext &Context = SemaRef.Context;
Preprocessor &PP = SemaRef.PP;
@@ -2769,6 +3201,8 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
DeclIDs[Context.ObjCSelDecl] = PREDEF_DECL_OBJC_SEL_ID;
if (Context.ObjCClassDecl)
DeclIDs[Context.ObjCClassDecl] = PREDEF_DECL_OBJC_CLASS_ID;
+ if (Context.ObjCProtocolClassDecl)
+ DeclIDs[Context.ObjCProtocolClassDecl] = PREDEF_DECL_OBJC_PROTOCOL_ID;
if (Context.Int128Decl)
DeclIDs[Context.Int128Decl] = PREDEF_DECL_INT_128_ID;
if (Context.UInt128Decl)
@@ -2785,11 +3219,20 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
IdentifierTable &Table = PP.getIdentifierTable();
SmallVector<const char *, 32> BuiltinNames;
Context.BuiltinInfo.GetBuiltinNames(BuiltinNames,
- Context.getLangOptions().NoBuiltin);
+ Context.getLangOpts().NoBuiltin);
for (unsigned I = 0, N = BuiltinNames.size(); I != N; ++I)
getIdentifierRef(&Table.get(BuiltinNames[I]));
}
+ // If there are any out-of-date identifiers, bring them up to date.
+ if (ExternalPreprocessorSource *ExtSource = PP.getExternalSource()) {
+ for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
+ IDEnd = PP.getIdentifierTable().end();
+ ID != IDEnd; ++ID)
+ if (ID->second->isOutOfDate())
+ ExtSource->updateOutOfDateIdentifier(*ID->second);
+ }
+
// Build a record containing all of the tentative definitions in this file, in
// TentativeDefinitions order. Generally, this record will be empty for
// headers.
@@ -2890,52 +3333,9 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
WriteMetadata(Context, isysroot, OutputFile);
- WriteLanguageOptions(Context.getLangOptions());
+ WriteLanguageOptions(Context.getLangOpts());
if (StatCalls && isysroot.empty())
WriteStatCache(*StatCalls);
- WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
-
- if (Chain) {
- // Write the mapping information describing our module dependencies and how
- // each of those modules were mapped into our own offset/ID space, so that
- // the reader can build the appropriate mapping to its own offset/ID space.
- // The map consists solely of a blob with the following format:
- // *(module-name-len:i16 module-name:len*i8
- // source-location-offset:i32
- // identifier-id:i32
- // preprocessed-entity-id:i32
- // macro-definition-id:i32
- // selector-id:i32
- // declaration-id:i32
- // c++-base-specifiers-id:i32
- // type-id:i32)
- //
- llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
- unsigned ModuleOffsetMapAbbrev = Stream.EmitAbbrev(Abbrev);
- llvm::SmallString<2048> Buffer;
- {
- llvm::raw_svector_ostream Out(Buffer);
- for (ModuleManager::ModuleConstIterator M = Chain->ModuleMgr.begin(),
- MEnd = Chain->ModuleMgr.end();
- M != MEnd; ++M) {
- StringRef FileName = (*M)->FileName;
- io::Emit16(Out, FileName.size());
- Out.write(FileName.data(), FileName.size());
- io::Emit32(Out, (*M)->SLocEntryBaseOffset);
- io::Emit32(Out, (*M)->BaseIdentifierID);
- io::Emit32(Out, (*M)->BasePreprocessedEntityID);
- io::Emit32(Out, (*M)->BaseSelectorID);
- io::Emit32(Out, (*M)->BaseDeclID);
- io::Emit32(Out, (*M)->BaseTypeIndex);
- }
- }
- Record.clear();
- Record.push_back(MODULE_OFFSET_MAP);
- Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
- Buffer.data(), Buffer.size());
- }
// Create a lexical update block containing all of the declarations in the
// translation unit that do not come from other AST files.
@@ -2946,8 +3346,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
I != E; ++I) {
if (!(*I)->isFromASTFile())
NewGlobalDecls.push_back(std::make_pair((*I)->getKind(), GetDeclRef(*I)));
- else if ((*I)->isChangedSinceDeserialization())
- (void)GetDeclRef(*I); // Make sure it's written, but don't record it.
}
llvm::BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
@@ -2978,15 +3376,12 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
}
}
- // Resolve any declaration pointers within the declaration updates block and
- // chained Objective-C categories block to declaration IDs.
+ // Resolve any declaration pointers within the declaration updates block.
ResolveDeclUpdatesBlocks();
- ResolveChainedObjCCategories();
// Form the record of special types.
RecordData SpecialTypes;
AddTypeRef(Context.getBuiltinVaListType(), SpecialTypes);
- AddTypeRef(Context.ObjCProtoType, SpecialTypes);
AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
AddTypeRef(Context.getFILEType(), SpecialTypes);
AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
@@ -2995,7 +3390,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
AddTypeRef(Context.ObjCClassRedefinitionType, SpecialTypes);
AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
AddTypeRef(Context.getucontext_tType(), SpecialTypes);
-
+
// Keep writing types and declarations until all types and
// declarations have been written.
Stream.EnterSubblock(DECLTYPES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
@@ -3014,11 +3409,57 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
}
Stream.ExitBlock();
- WritePreprocessor(PP, IsModule);
+ WriteFileDeclIDsMap();
+ WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
+
+ if (Chain) {
+ // Write the mapping information describing our module dependencies and how
+ // each of those modules were mapped into our own offset/ID space, so that
+ // the reader can build the appropriate mapping to its own offset/ID space.
+ // The map consists solely of a blob with the following format:
+ // *(module-name-len:i16 module-name:len*i8
+ // source-location-offset:i32
+ // identifier-id:i32
+ // preprocessed-entity-id:i32
+ // macro-definition-id:i32
+ // submodule-id:i32
+ // selector-id:i32
+ // declaration-id:i32
+ // c++-base-specifiers-id:i32
+ // type-id:i32)
+ //
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned ModuleOffsetMapAbbrev = Stream.EmitAbbrev(Abbrev);
+ SmallString<2048> Buffer;
+ {
+ llvm::raw_svector_ostream Out(Buffer);
+ for (ModuleManager::ModuleConstIterator M = Chain->ModuleMgr.begin(),
+ MEnd = Chain->ModuleMgr.end();
+ M != MEnd; ++M) {
+ StringRef FileName = (*M)->FileName;
+ io::Emit16(Out, FileName.size());
+ Out.write(FileName.data(), FileName.size());
+ io::Emit32(Out, (*M)->SLocEntryBaseOffset);
+ io::Emit32(Out, (*M)->BaseIdentifierID);
+ io::Emit32(Out, (*M)->BasePreprocessedEntityID);
+ io::Emit32(Out, (*M)->BaseSubmoduleID);
+ io::Emit32(Out, (*M)->BaseSelectorID);
+ io::Emit32(Out, (*M)->BaseDeclID);
+ io::Emit32(Out, (*M)->BaseTypeIndex);
+ }
+ }
+ Record.clear();
+ Record.push_back(MODULE_OFFSET_MAP);
+ Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
+ Buffer.data(), Buffer.size());
+ }
+ WritePreprocessor(PP, WritingModule != 0);
WriteHeaderSearch(PP.getHeaderSearchInfo(), isysroot);
WriteSelectors(SemaRef);
WriteReferencedSelectorsPool(SemaRef);
- WriteIdentifierTable(PP, IsModule);
+ WriteIdentifierTable(PP, SemaRef.IdResolver, WritingModule != 0);
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
@@ -3027,20 +3468,11 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
WriteCXXBaseSpecifiersOffsets();
- Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
+ // If we're emitting a module, write out the submodule information.
+ if (WritingModule)
+ WriteSubmodules(WritingModule);
- /// Build a record containing first declarations from a chained PCH and the
- /// most recent declarations in this AST that they point to.
- RecordData FirstLatestDeclIDs;
- for (FirstLatestDeclMap::iterator I = FirstLatestDecls.begin(),
- E = FirstLatestDecls.end();
- I != E; ++I) {
- AddDeclRef(I->first, FirstLatestDeclIDs);
- AddDeclRef(I->second, FirstLatestDeclIDs);
- }
-
- if (!FirstLatestDeclIDs.empty())
- Stream.EmitRecord(REDECLS_UPDATE_LATEST, FirstLatestDeclIDs);
+ Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
// Write the record containing external, unnamed definitions.
if (!ExternalDefinitions.empty())
@@ -3103,10 +3535,34 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
I != E; ++I)
WriteDeclContextVisibleUpdate(*I);
+ if (!WritingModule) {
+ // Write the submodules that were imported, if any.
+ RecordData ImportedModules;
+ for (ASTContext::import_iterator I = Context.local_import_begin(),
+ IEnd = Context.local_import_end();
+ I != IEnd; ++I) {
+ assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
+ ImportedModules.push_back(SubmoduleIDs[I->getImportedModule()]);
+ }
+ if (!ImportedModules.empty()) {
+ // Sort module IDs.
+ llvm::array_pod_sort(ImportedModules.begin(), ImportedModules.end());
+
+ // Unique module IDs.
+ ImportedModules.erase(std::unique(ImportedModules.begin(),
+ ImportedModules.end()),
+ ImportedModules.end());
+
+ Stream.EmitRecord(IMPORTED_MODULES, ImportedModules);
+ }
+ }
+
WriteDeclUpdatesBlocks();
WriteDeclReplacementsBlock();
- WriteChainedObjCCategories();
-
+ WriteMergedDecls();
+ WriteRedeclarations();
+ WriteObjCCategories();
+
// Some simple statistics
Record.clear();
Record.push_back(NumStatements);
@@ -3125,13 +3581,12 @@ void ASTWriter::ResolveDeclUpdatesBlocks() {
const Decl *D = I->first;
UpdateRecord &URec = I->second;
- if (DeclsToRewrite.count(D))
+ if (isRewritten(D))
continue; // The decl will be written completely
unsigned Idx = 0, N = URec.size();
while (Idx < N) {
switch ((DeclUpdateKind)URec[Idx++]) {
- case UPD_CXX_SET_DEFINITIONDATA:
case UPD_CXX_ADDED_IMPLICIT_MEMBER:
case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
@@ -3158,7 +3613,7 @@ void ASTWriter::WriteDeclUpdatesBlocks() {
const Decl *D = I->first;
UpdateRecord &URec = I->second;
- if (DeclsToRewrite.count(D))
+ if (isRewritten(D))
continue; // The decl will be written completely,no need to store updates.
uint64_t Offset = Stream.GetCurrentBitNo();
@@ -3176,45 +3631,15 @@ void ASTWriter::WriteDeclReplacementsBlock() {
return;
RecordData Record;
- for (SmallVector<std::pair<DeclID, uint64_t>, 16>::iterator
+ for (SmallVector<ReplacedDeclInfo, 16>::iterator
I = ReplacedDecls.begin(), E = ReplacedDecls.end(); I != E; ++I) {
- Record.push_back(I->first);
- Record.push_back(I->second);
+ Record.push_back(I->ID);
+ Record.push_back(I->Offset);
+ Record.push_back(I->Loc);
}
Stream.EmitRecord(DECL_REPLACEMENTS, Record);
}
-void ASTWriter::ResolveChainedObjCCategories() {
- for (SmallVector<ChainedObjCCategoriesData, 16>::iterator
- I = LocalChainedObjCCategories.begin(),
- E = LocalChainedObjCCategories.end(); I != E; ++I) {
- ChainedObjCCategoriesData &Data = *I;
- Data.InterfaceID = GetDeclRef(Data.Interface);
- Data.TailCategoryID = GetDeclRef(Data.TailCategory);
- }
-
-}
-
-void ASTWriter::WriteChainedObjCCategories() {
- if (LocalChainedObjCCategories.empty())
- return;
-
- RecordData Record;
- for (SmallVector<ChainedObjCCategoriesData, 16>::iterator
- I = LocalChainedObjCCategories.begin(),
- E = LocalChainedObjCCategories.end(); I != E; ++I) {
- ChainedObjCCategoriesData &Data = *I;
- serialization::DeclID
- HeadCatID = getDeclID(Data.Interface->getCategoryList());
- assert(HeadCatID != 0 && "Category not written ?");
-
- Record.push_back(Data.InterfaceID);
- Record.push_back(HeadCatID);
- Record.push_back(Data.TailCategoryID);
- }
- Stream.EmitRecord(OBJC_CHAINED_CATEGORIES, Record);
-}
-
void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
Record.push_back(Loc.getRawEncoding());
}
@@ -3397,6 +3822,12 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
if (D == 0) {
return 0;
}
+
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
DeclID &ID = DeclIDs[D];
if (ID == 0) {
@@ -3404,12 +3835,6 @@ DeclID ASTWriter::GetDeclRef(const Decl *D) {
// enqueue it in the list of declarations to emit.
ID = NextDeclID++;
DeclTypesToEmit.push(const_cast<Decl *>(D));
- } else if (ID < FirstDeclID && D->isChangedSinceDeserialization()) {
- // We don't add it to the replacement collection here, because we don't
- // have the offset yet.
- DeclTypesToEmit.push(const_cast<Decl *>(D));
- // Reset the flag, so that we don't add this decl multiple times.
- const_cast<Decl *>(D)->setChangedSinceDeserialization(false);
}
return ID;
@@ -3419,10 +3844,65 @@ DeclID ASTWriter::getDeclID(const Decl *D) {
if (D == 0)
return 0;
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
return DeclIDs[D];
}
+static inline bool compLocDecl(std::pair<unsigned, serialization::DeclID> L,
+ std::pair<unsigned, serialization::DeclID> R) {
+ return L.first < R.first;
+}
+
+void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
+ assert(ID);
+ assert(D);
+
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+ // FIXME: ParmVarDecls that are part of a function type of a parameter of
+ // a function/objc method, should not have TU as lexical context.
+ if (isa<ParmVarDecl>(D))
+ return;
+
+ SourceManager &SM = Context->getSourceManager();
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+ const SrcMgr::SLocEntry *Entry = &SM.getSLocEntry(FID);
+ assert(Entry->isFile());
+
+ DeclIDInFileInfo *&Info = FileDeclIDs[Entry];
+ if (!Info)
+ Info = new DeclIDInFileInfo();
+
+ std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
+ LocDeclIDsTy &Decls = Info->DeclIDs;
+
+ if (Decls.empty() || Decls.back().first <= Offset) {
+ Decls.push_back(LocDecl);
+ return;
+ }
+
+ LocDeclIDsTy::iterator
+ I = std::upper_bound(Decls.begin(), Decls.end(), LocDecl, compLocDecl);
+
+ Decls.insert(I, LocDecl);
+}
+
void ASTWriter::AddDeclarationName(DeclarationName Name, RecordDataImpl &Record) {
// FIXME: Emit a stable enum for NameKind. 0 = Identifier etc.
Record.push_back(Name.getNameKind());
@@ -3776,11 +4256,11 @@ void ASTWriter::AddCXXCtorInitializers(
if (Init->isBaseInitializer()) {
Record.push_back(CTOR_INITIALIZER_BASE);
- AddTypeSourceInfo(Init->getBaseClassInfo(), Record);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
Record.push_back(Init->isBaseVirtual());
} else if (Init->isDelegatingInitializer()) {
Record.push_back(CTOR_INITIALIZER_DELEGATING);
- AddDeclRef(Init->getTargetConstructor(), Record);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
} else if (Init->isMemberInitializer()){
Record.push_back(CTOR_INITIALIZER_MEMBER);
AddDeclRef(Init->getMember(), Record);
@@ -3807,6 +4287,7 @@ void ASTWriter::AddCXXCtorInitializers(
void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record) {
assert(D->DefinitionData);
struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
+ Record.push_back(Data.IsLambda);
Record.push_back(Data.UserDeclaredConstructor);
Record.push_back(Data.UserDeclaredCopyConstructor);
Record.push_back(Data.UserDeclaredMoveConstructor);
@@ -3824,13 +4305,21 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
Record.push_back(Data.HasProtectedFields);
Record.push_back(Data.HasPublicFields);
Record.push_back(Data.HasMutableFields);
+ Record.push_back(Data.HasOnlyCMembers);
Record.push_back(Data.HasTrivialDefaultConstructor);
Record.push_back(Data.HasConstexprNonCopyMoveConstructor);
+ Record.push_back(Data.DefaultedDefaultConstructorIsConstexpr);
+ Record.push_back(Data.DefaultedCopyConstructorIsConstexpr);
+ Record.push_back(Data.DefaultedMoveConstructorIsConstexpr);
+ Record.push_back(Data.HasConstexprDefaultConstructor);
+ Record.push_back(Data.HasConstexprCopyConstructor);
+ Record.push_back(Data.HasConstexprMoveConstructor);
Record.push_back(Data.HasTrivialCopyConstructor);
Record.push_back(Data.HasTrivialMoveConstructor);
Record.push_back(Data.HasTrivialCopyAssignment);
Record.push_back(Data.HasTrivialMoveAssignment);
Record.push_back(Data.HasTrivialDestructor);
+ Record.push_back(Data.HasIrrelevantDestructor);
Record.push_back(Data.HasNonLiteralTypeFieldsOrBases);
Record.push_back(Data.ComputedVisibleConversions);
Record.push_back(Data.UserProvidedDefaultConstructor);
@@ -3842,6 +4331,7 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
Record.push_back(Data.DeclaredDestructor);
Record.push_back(Data.FailedImplicitMoveConstructor);
Record.push_back(Data.FailedImplicitMoveAssignment);
+ // IsLambda bit is already saved.
Record.push_back(Data.NumBases);
if (Data.NumBases > 0)
@@ -3858,6 +4348,27 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec
AddUnresolvedSet(Data.VisibleConversions, Record);
// Data.Definition is the owning decl, no need to write it.
AddDeclRef(Data.FirstFriend, Record);
+
+ // Add lambda-specific data.
+ if (Data.IsLambda) {
+ CXXRecordDecl::LambdaDefinitionData &Lambda = D->getLambdaData();
+ Record.push_back(Lambda.Dependent);
+ Record.push_back(Lambda.NumCaptures);
+ Record.push_back(Lambda.NumExplicitCaptures);
+ Record.push_back(Lambda.ManglingNumber);
+ AddDeclRef(Lambda.ContextDecl, Record);
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ LambdaExpr::Capture &Capture = Lambda.Captures[I];
+ AddSourceLocation(Capture.getLocation(), Record);
+ Record.push_back(Capture.isImplicit());
+ Record.push_back(Capture.getCaptureKind()); // FIXME: stable!
+ VarDecl *Var = Capture.capturesVariable()? Capture.getCapturedVar() : 0;
+ AddDeclRef(Var, Record);
+ AddSourceLocation(Capture.isPackExpansion()? Capture.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+ }
+ }
}
void ASTWriter::ReaderInitialized(ASTReader *Reader) {
@@ -3866,6 +4377,7 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) {
assert(FirstDeclID == NextDeclID &&
FirstTypeID == NextTypeID &&
FirstIdentID == NextIdentID &&
+ FirstSubmoduleID == NextSubmoduleID &&
FirstSelectorID == NextSelectorID &&
"Setting chain after writing has started.");
@@ -3874,11 +4386,13 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) {
FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls();
FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes();
FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers();
+ FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules();
FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors();
NextDeclID = FirstDeclID;
NextTypeID = FirstTypeID;
NextIdentID = FirstIdentID;
NextSelectorID = FirstSelectorID;
+ NextSubmoduleID = FirstSubmoduleID;
}
void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
@@ -3898,10 +4412,6 @@ void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
StoredIdx = Idx;
}
-void ASTWriter::DeclRead(DeclID ID, const Decl *D) {
- DeclIDs[D] = ID;
-}
-
void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
SelectorIDs[S] = ID;
}
@@ -3912,6 +4422,15 @@ void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinitions[MD] = ID;
}
+void ASTWriter::MacroVisible(IdentifierInfo *II) {
+ DeserializedMacroNames.push_back(II);
+}
+
+void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
+ assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
+ SubmoduleIDs[Mod] = ID;
+}
+
void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
assert(D->isCompleteDefinition());
assert(!WritingAST && "Already writing the AST!");
@@ -3923,22 +4442,6 @@ void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
// have created a new definition decl instead ?
RewriteDecl(RD);
}
-
- for (CXXRecordDecl::redecl_iterator
- I = RD->redecls_begin(), E = RD->redecls_end(); I != E; ++I) {
- CXXRecordDecl *Redecl = cast<CXXRecordDecl>(*I);
- if (Redecl == RD)
- continue;
-
- // We are interested when a PCH decl is modified.
- if (Redecl->isFromASTFile()) {
- UpdateRecord &Record = DeclUpdates[Redecl];
- Record.push_back(UPD_CXX_SET_DEFINITIONDATA);
- assert(Redecl->DefinitionData);
- assert(Redecl->DefinitionData->Definition == D);
- Record.push_back(reinterpret_cast<uint64_t>(D)); // the DefinitionDecl
- }
- }
}
}
void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
@@ -4023,11 +4526,23 @@ void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
assert(!WritingAST && "Already writing the AST!");
if (!IFD->isFromASTFile())
return; // Declaration not imported from PCH.
- if (CatD->getNextClassCategory() &&
- !CatD->getNextClassCategory()->isFromASTFile())
- return; // We already recorded that the tail of a category chain should be
- // attached to an interface.
+
+ assert(IFD->getDefinition() && "Category on a class without a definition?");
+ ObjCClassesWithCategories.insert(
+ const_cast<ObjCInterfaceDecl *>(IFD->getDefinition()));
+}
+
+
+void ASTWriter::AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {
+ const ObjCInterfaceDecl *D = ClassExt->getClassInterface();
+ if (!D)
+ return;
+
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return; // Declaration not imported from PCH.
- ChainedObjCCategoriesData Data = { IFD, CatD, 0, 0 };
- LocalChainedObjCCategories.push_back(Data);
+ RewriteDecl(D);
}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
index a8243e5..7a4ef63 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -12,12 +12,14 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ASTReader.h"
#include "ASTCommon.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/DeclContextInternals.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/ErrorHandling.h"
@@ -54,6 +56,7 @@ namespace clang {
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *D);
+ void VisitTypedefNameDecl(TypedefNameDecl *D);
void VisitTypedefDecl(TypedefDecl *D);
void VisitTypeAliasDecl(TypeAliasDecl *D);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
@@ -93,6 +96,7 @@ namespace clang {
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
void VisitFriendDecl(FriendDecl *D);
void VisitFriendTemplateDecl(FriendTemplateDecl *D);
@@ -111,8 +115,6 @@ namespace clang {
void VisitObjCIvarDecl(ObjCIvarDecl *D);
void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
- void VisitObjCClassDecl(ObjCClassDecl *D);
- void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
void VisitObjCImplDecl(ObjCImplDecl *D);
void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
@@ -146,7 +148,6 @@ void ASTDeclWriter::Visit(Decl *D) {
void ASTDeclWriter::VisitDecl(Decl *D) {
Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
- Writer.AddSourceLocation(D->getLocation(), Record);
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
if (D->hasAttrs())
@@ -154,8 +155,10 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
Record.push_back(D->isImplicit());
Record.push_back(D->isUsed(false));
Record.push_back(D->isReferenced());
+ Record.push_back(D->isTopLevelDeclInObjCContainer());
Record.push_back(D->getAccess());
- Record.push_back(D->ModulePrivate);
+ Record.push_back(D->isModulePrivate());
+ Record.push_back(Writer.inferSubmoduleIDFromLocation(D->getLocation()));
}
void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
@@ -173,16 +176,21 @@ void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
}
-void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
+void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
+ VisitRedeclarable(D);
VisitTypeDecl(D);
- Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+}
+void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
+ VisitTypedefNameDecl(D);
if (!D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
- D->RedeclLink.getNext() == D &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
@@ -192,14 +200,13 @@ void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
}
void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
- VisitTypeDecl(D);
- Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+ VisitTypedefNameDecl(D);
Code = serialization::DECL_TYPEALIAS;
}
void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
- VisitTypeDecl(D);
VisitRedeclarable(D);
+ VisitTypeDecl(D);
Record.push_back(D->getIdentifierNamespace());
Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
Record.push_back(D->isCompleteDefinition());
@@ -224,15 +231,22 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
Record.push_back(D->isScoped());
Record.push_back(D->isScopedUsingClassTag());
Record.push_back(D->isFixed());
- Writer.AddDeclRef(D->getInstantiatedFromMemberEnum(), Record);
+ if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ } else {
+ Writer.AddDeclRef(0, Record);
+ }
if (!D->hasAttrs() &&
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
- D->RedeclLink.getNext() == D &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
@@ -253,9 +267,10 @@ void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
!D->isImplicit() &&
!D->isUsed(false) &&
!D->hasExtInfo() &&
- D->RedeclLink.getNext() == D &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
@@ -289,14 +304,33 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
- VisitDeclaratorDecl(D);
VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
Record.push_back(D->getIdentifierNamespace());
+
+ // FunctionDecl's body is handled last at ASTWriterDecl::Visit,
+ // after everything else is written.
+
+ Record.push_back(D->getStorageClass()); // FIXME: stable encoding
+ Record.push_back(D->getStorageClassAsWritten());
+ Record.push_back(D->IsInline);
+ Record.push_back(D->isInlineSpecified());
+ Record.push_back(D->isVirtualAsWritten());
+ Record.push_back(D->isPure());
+ Record.push_back(D->hasInheritedPrototype());
+ Record.push_back(D->hasWrittenPrototype());
+ Record.push_back(D->isDeletedAsWritten());
+ Record.push_back(D->isTrivial());
+ Record.push_back(D->isDefaulted());
+ Record.push_back(D->isExplicitlyDefaulted());
+ Record.push_back(D->hasImplicitReturnZero());
+ Record.push_back(D->isConstexpr());
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+
Record.push_back(D->getTemplatedKind());
switch (D->getTemplatedKind()) {
- default: llvm_unreachable("Unhandled TemplatedKind!");
case FunctionDecl::TK_NonTemplate:
break;
case FunctionDecl::TK_FunctionTemplate:
@@ -360,25 +394,6 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
}
}
- // FunctionDecl's body is handled last at ASTWriterDecl::Visit,
- // after everything else is written.
-
- Record.push_back(D->getStorageClass()); // FIXME: stable encoding
- Record.push_back(D->getStorageClassAsWritten());
- Record.push_back(D->IsInline);
- Record.push_back(D->isInlineSpecified());
- Record.push_back(D->isVirtualAsWritten());
- Record.push_back(D->isPure());
- Record.push_back(D->hasInheritedPrototype());
- Record.push_back(D->hasWrittenPrototype());
- Record.push_back(D->isDeletedAsWritten());
- Record.push_back(D->isTrivial());
- Record.push_back(D->isDefaulted());
- Record.push_back(D->isExplicitlyDefaulted());
- Record.push_back(D->hasImplicitReturnZero());
- Record.push_back(D->isConstexpr());
- Writer.AddSourceLocation(D->getLocEnd(), Record);
-
Record.push_back(D->param_size());
for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
P != PEnd; ++P)
@@ -441,39 +456,48 @@ void ASTDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
}
void ASTDeclWriter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ VisitRedeclarable(D);
VisitObjCContainerDecl(D);
Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
- Writer.AddDeclRef(D->getSuperClass(), Record);
- // Write out the protocols that are directly referenced by the @interface.
- Record.push_back(D->ReferencedProtocols.size());
- for (ObjCInterfaceDecl::protocol_iterator P = D->protocol_begin(),
- PEnd = D->protocol_end();
- P != PEnd; ++P)
- Writer.AddDeclRef(*P, Record);
- for (ObjCInterfaceDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ // Write the DefinitionData
+ ObjCInterfaceDecl::DefinitionData &Data = D->data();
+
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
+ Writer.AddSourceLocation(D->getEndOfDefinitionLoc(), Record);
+
+ // Write out the protocols that are directly referenced by the @interface.
+ Record.push_back(Data.ReferencedProtocols.size());
+ for (ObjCInterfaceDecl::protocol_iterator P = D->protocol_begin(),
+ PEnd = D->protocol_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ for (ObjCInterfaceDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
PLEnd = D->protocol_loc_end();
- PL != PLEnd; ++PL)
- Writer.AddSourceLocation(*PL, Record);
-
- // Write out the protocols that are transitively referenced.
- Record.push_back(D->AllReferencedProtocols.size());
- for (ObjCList<ObjCProtocolDecl>::iterator
- P = D->AllReferencedProtocols.begin(),
- PEnd = D->AllReferencedProtocols.end();
- P != PEnd; ++P)
- Writer.AddDeclRef(*P, Record);
+ PL != PLEnd; ++PL)
+ Writer.AddSourceLocation(*PL, Record);
+
+ // Write out the protocols that are transitively referenced.
+ Record.push_back(Data.AllReferencedProtocols.size());
+ for (ObjCList<ObjCProtocolDecl>::iterator
+ P = Data.AllReferencedProtocols.begin(),
+ PEnd = Data.AllReferencedProtocols.end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+
+ if (ObjCCategoryDecl *Cat = D->getCategoryList()) {
+ // Ensure that we write out the set of categories for this class.
+ Writer.ObjCClassesWithCategories.insert(D);
+
+ // Make sure that the categories get serialized.
+ for (; Cat; Cat = Cat->getNextClassCategory())
+ (void)Writer.GetDeclRef(Cat);
+ }
+ }
- // Write out the ivars.
- Record.push_back(D->ivar_size());
- for (ObjCInterfaceDecl::ivar_iterator I = D->ivar_begin(),
- IEnd = D->ivar_end(); I != IEnd; ++I)
- Writer.AddDeclRef(*I, Record);
- Writer.AddDeclRef(D->getCategoryList(), Record);
- Record.push_back(D->isForwardDecl());
- Record.push_back(D->isImplicitInterfaceDecl());
- Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
- Writer.AddSourceLocation(D->getLocEnd(), Record);
Code = serialization::DECL_OBJC_INTERFACE;
}
@@ -498,17 +522,21 @@ void ASTDeclWriter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
}
void ASTDeclWriter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ VisitRedeclarable(D);
VisitObjCContainerDecl(D);
- Record.push_back(D->isForwardDecl());
- Writer.AddSourceLocation(D->getLocEnd(), Record);
- Record.push_back(D->protocol_size());
- for (ObjCProtocolDecl::protocol_iterator
- I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
- Writer.AddDeclRef(*I, Record);
- for (ObjCProtocolDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
- PLEnd = D->protocol_loc_end();
- PL != PLEnd; ++PL)
- Writer.AddSourceLocation(*PL, Record);
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ Record.push_back(D->protocol_size());
+ for (ObjCProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ for (ObjCProtocolDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
+ PLEnd = D->protocol_loc_end();
+ PL != PLEnd; ++PL)
+ Writer.AddSourceLocation(*PL, Record);
+ }
+
Code = serialization::DECL_OBJC_PROTOCOL;
}
@@ -517,28 +545,11 @@ void ASTDeclWriter::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) {
Code = serialization::DECL_OBJC_AT_DEFS_FIELD;
}
-void ASTDeclWriter::VisitObjCClassDecl(ObjCClassDecl *D) {
- VisitDecl(D);
- Writer.AddDeclRef(D->getForwardInterfaceDecl(), Record);
- Writer.AddSourceLocation(D->getForwardDecl()->getLocation(), Record);
- Code = serialization::DECL_OBJC_CLASS;
-}
-
-void ASTDeclWriter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
- VisitDecl(D);
- Record.push_back(D->protocol_size());
- for (ObjCForwardProtocolDecl::protocol_iterator
- I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
- Writer.AddDeclRef(*I, Record);
- for (ObjCForwardProtocolDecl::protocol_loc_iterator
- PL = D->protocol_loc_begin(), PLEnd = D->protocol_loc_end();
- PL != PLEnd; ++PL)
- Writer.AddSourceLocation(*PL, Record);
- Code = serialization::DECL_OBJC_FORWARD_PROTOCOL;
-}
-
void ASTDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
VisitObjCContainerDecl(D);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
Writer.AddDeclRef(D->getClassInterface(), Record);
Record.push_back(D->protocol_size());
for (ObjCCategoryDecl::protocol_iterator
@@ -548,9 +559,7 @@ void ASTDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
PL = D->protocol_loc_begin(), PLEnd = D->protocol_loc_end();
PL != PLEnd; ++PL)
Writer.AddSourceLocation(*PL, Record);
- Writer.AddDeclRef(D->getNextClassCategory(), Record);
Record.push_back(D->hasSynthBitfield());
- Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
Code = serialization::DECL_OBJC_CATEGORY;
}
@@ -563,6 +572,7 @@ void ASTDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) {
void ASTDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
Writer.AddSourceLocation(D->getAtLoc(), Record);
+ Writer.AddSourceLocation(D->getLParenLoc(), Record);
Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
// FIXME: stable encoding
Record.push_back((unsigned)D->getPropertyAttributes());
@@ -586,12 +596,15 @@ void ASTDeclWriter::VisitObjCImplDecl(ObjCImplDecl *D) {
void ASTDeclWriter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
VisitObjCImplDecl(D);
Writer.AddIdentifierRef(D->getIdentifier(), Record);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
Code = serialization::DECL_OBJC_CATEGORY_IMPL;
}
void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
VisitObjCImplDecl(D);
Writer.AddDeclRef(D->getSuperClass(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers,
Record);
Record.push_back(D->hasSynthBitfield());
@@ -625,6 +638,7 @@ void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
!D->isUsed(false) &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
!D->isModulePrivate() &&
!D->getBitWidth() &&
!D->hasInClassInitializer() &&
@@ -649,19 +663,22 @@ void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
}
void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
- VisitDeclaratorDecl(D);
VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
Record.push_back(D->getStorageClass()); // FIXME: stable encoding
Record.push_back(D->getStorageClassAsWritten());
Record.push_back(D->isThreadSpecified());
- Record.push_back(D->hasCXXDirectInitializer());
+ Record.push_back(D->getInitStyle());
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
Record.push_back(D->isARCPseudoStrong());
- Record.push_back(D->getInit() ? 1 : 0);
- if (D->getInit())
+ if (D->getInit()) {
+ Record.push_back(!D->isInitKnownICE() ? 1 : (D->isInitICE() ? 3 : 2));
Writer.AddStmt(D->getInit());
+ } else {
+ Record.push_back(0);
+ }
MemberSpecializationInfo *SpecInfo
= D->isStaticDataMember() ? D->getMemberSpecializationInfo() : 0;
@@ -677,12 +694,13 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
!D->isUsed(false) &&
!D->isInvalidDecl() &&
!D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
!D->hasExtInfo() &&
- D->RedeclLink.getNext() == D &&
- !D->hasCXXDirectInitializer() &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
+ D->getInitStyle() == VarDecl::CInit &&
D->getInit() == 0 &&
!isa<ParmVarDecl>(D) &&
!SpecInfo)
@@ -718,10 +736,12 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
!D->hasExtInfo() &&
!D->isImplicit() &&
!D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
D->getAccess() == AS_none &&
!D->isModulePrivate() &&
D->getStorageClass() == 0 &&
- !D->hasCXXDirectInitializer() && // Can params have this ever?
+ D->getInitStyle() == VarDecl::CInit && // Can params have anything else?
D->getFunctionScopeDepth() == 0 &&
D->getObjCDeclQualifier() == 0 &&
!D->isKNRPromoted() &&
@@ -732,11 +752,10 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
// Check things we know are true of *every* PARM_VAR_DECL, which is more than
// just us assuming it.
- assert(!D->isInvalidDecl() && "Shouldn't emit invalid decls");
assert(!D->isThreadSpecified() && "PARM_VAR_DECL can't be __thread");
assert(D->getAccess() == AS_none && "PARM_VAR_DECL can't be public/private");
assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var");
- assert(D->getPreviousDeclaration() == 0 && "PARM_VAR_DECL can't be redecl");
+ assert(D->getPreviousDecl() == 0 && "PARM_VAR_DECL can't be redecl");
assert(!D->isStaticDataMember() &&
"PARM_VAR_DECL can't be static data member");
}
@@ -791,18 +810,14 @@ void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
+ VisitRedeclarable(D);
VisitNamedDecl(D);
Record.push_back(D->isInline());
Writer.AddSourceLocation(D->getLocStart(), Record);
Writer.AddSourceLocation(D->getRBraceLoc(), Record);
- Writer.AddDeclRef(D->getNextNamespace(), Record);
- // Only write one reference--original or anonymous
- Record.push_back(D->isOriginalNamespace());
if (D->isOriginalNamespace())
Writer.AddDeclRef(D->getAnonymousNamespace(), Record);
- else
- Writer.AddDeclRef(D->getOriginalNamespace(), Record);
Code = serialization::DECL_NAMESPACE;
if (Writer.hasChain() && !D->isOriginalNamespace() &&
@@ -811,9 +826,7 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
Writer.AddUpdatedDeclContext(NS);
// Make sure all visible decls are written. They will be recorded later.
- NS->lookup(DeclarationName());
- StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(NS->getLookupPtr());
- if (Map) {
+ if (StoredDeclsMap *Map = NS->buildLookup()) {
for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
D != DEnd; ++D) {
DeclContext::lookup_result Result = D->second.getLookupResult();
@@ -825,7 +838,8 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
}
}
- if (Writer.hasChain() && D->isAnonymousNamespace() && !D->getNextNamespace()){
+ if (Writer.hasChain() && D->isAnonymousNamespace() &&
+ D == D->getMostRecentDecl()) {
// This is a most recent reopening of the anonymous namespace. If its parent
// is in a previous PCH (or is the TU), mark that parent for update, because
// the original namespace always points to the latest re-opening of its
@@ -854,7 +868,7 @@ void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
Writer.AddSourceLocation(D->getUsingLocation(), Record);
Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
- Writer.AddDeclRef(D->FirstUsingShadow, Record);
+ Writer.AddDeclRef(D->FirstUsingShadow.getPointer(), Record);
Record.push_back(D->isTypeName());
Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record);
Code = serialization::DECL_USING;
@@ -896,12 +910,8 @@ void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
-
- CXXRecordDecl *DefinitionDecl = 0;
- if (D->DefinitionData)
- DefinitionDecl = D->DefinitionData->Definition;
- Writer.AddDeclRef(DefinitionDecl, Record);
- if (D == DefinitionDecl)
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition())
Writer.AddCXXDefinitionData(D, Record);
enum {
@@ -964,6 +974,23 @@ void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
Code = serialization::DECL_CXX_CONVERSION;
}
+void ASTDeclWriter::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ ArrayRef<SourceLocation> IdentifierLocs = D->getIdentifierLocs();
+ Record.push_back(!IdentifierLocs.empty());
+ if (IdentifierLocs.empty()) {
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(1);
+ } else {
+ for (unsigned I = 0, N = IdentifierLocs.size(); I != N; ++I)
+ Writer.AddSourceLocation(IdentifierLocs[I], Record);
+ Record.push_back(IdentifierLocs.size());
+ }
+ // Note: the number of source locations must always be the last element in
+ // the record.
+ Code = serialization::DECL_IMPORT;
+}
+
void ASTDeclWriter::VisitAccessSpecDecl(AccessSpecDecl *D) {
VisitDecl(D);
Writer.AddSourceLocation(D->getColonLoc(), Record);
@@ -1005,36 +1032,17 @@ void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
}
void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ VisitRedeclarable(D);
+
// Emit data to initialize CommonOrPrev before VisitTemplateDecl so that
// getCommonPtr() can be used while this is still initializing.
-
- Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
- if (D->getPreviousDeclaration())
- Writer.AddDeclRef(D->getFirstDeclaration(), Record);
-
- if (D->getPreviousDeclaration() == 0) {
- // This TemplateDecl owns the CommonPtr; write it.
- assert(D->isCanonicalDecl());
-
+ if (D->isFirstDeclaration()) {
+ // This declaration owns the 'common' pointer, so serialize that data now.
Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record);
if (D->getInstantiatedFromMemberTemplate())
Record.push_back(D->isMemberSpecialization());
-
- Writer.AddDeclRef(D->getCommonPtr()->Latest, Record);
- } else {
- RedeclarableTemplateDecl *First = D->getFirstDeclaration();
- assert(First != D);
- // If this is a most recent redeclaration that is pointed to by a first decl
- // in a chained PCH, keep track of the association with the map so we can
- // update the first decl during AST reading.
- if (First->getMostRecentDeclaration() == D &&
- First->isFromASTFile() && !D->isFromASTFile()) {
- assert(Writer.FirstLatestDecls.find(First)==Writer.FirstLatestDecls.end()
- && "The latest is already set");
- Writer.FirstLatestDecls[First] = D;
- }
}
-
+
VisitTemplateDecl(D);
Record.push_back(D->getIdentifierNamespace());
}
@@ -1042,7 +1050,7 @@ void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
VisitRedeclarableTemplateDecl(D);
- if (D->getPreviousDeclaration() == 0) {
+ if (D->isFirstDeclaration()) {
typedef llvm::FoldingSet<ClassTemplateSpecializationDecl> CTSDSetTy;
CTSDSetTy &CTSDSet = D->getSpecializations();
Record.push_back(CTSDSet.size());
@@ -1111,7 +1119,7 @@ void ASTDeclWriter::VisitClassTemplatePartialSpecializationDecl(
Record.push_back(D->getSequenceNumber());
// These are read/set from/to the first declaration.
- if (D->getPreviousDeclaration() == 0) {
+ if (D->getPreviousDecl() == 0) {
Writer.AddDeclRef(D->getInstantiatedFromMember(), Record);
Record.push_back(D->isMemberSpecialization());
}
@@ -1130,7 +1138,7 @@ void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl(
void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
VisitRedeclarableTemplateDecl(D);
- if (D->getPreviousDeclaration() == 0) {
+ if (D->isFirstDeclaration()) {
// This FunctionTemplateDecl owns the CommonPtr; write it.
// Write the function specialization declarations.
@@ -1230,31 +1238,23 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
template <typename T>
void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
- enum { NoRedeclaration = 0, PointsToPrevious, PointsToLatest };
- if (D->RedeclLink.getNext() == D) {
- Record.push_back(NoRedeclaration);
- } else {
- if (D->RedeclLink.NextIsPrevious()) {
- Record.push_back(PointsToPrevious);
- Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
- Writer.AddDeclRef(D->getFirstDeclaration(), Record);
- } else {
- Record.push_back(PointsToLatest);
- Writer.AddDeclRef(D->RedeclLink.getPointer(), Record);
- }
- }
-
T *First = D->getFirstDeclaration();
- T *ThisDecl = static_cast<T*>(D);
- // If this is a most recent redeclaration that is pointed to by a first decl
- // in a chained PCH, keep track of the association with the map so we can
- // update the first decl during AST reading.
- if (ThisDecl != First && First->getMostRecentDeclaration() == ThisDecl &&
- First->isFromASTFile() && !ThisDecl->isFromASTFile()) {
- assert(Writer.FirstLatestDecls.find(First) == Writer.FirstLatestDecls.end()
- && "The latest is already set");
- Writer.FirstLatestDecls[First] = ThisDecl;
+ if (First->getMostRecentDecl() != First) {
+ // There is more than one declaration of this entity, so we will need to
+ // write a redeclaration chain.
+ Writer.AddDeclRef(First, Record);
+ Writer.Redeclarations.insert(First);
+
+ // Make sure that we serialize both the previous and the most-recent
+ // declarations, which (transitively) ensures that all declarations in the
+ // chain get serialized.
+ (void)Writer.GetDeclRef(D->getPreviousDecl());
+ (void)Writer.GetDeclRef(First->getMostRecentDecl());
+ } else {
+ // We use the sentinel value 0 to indicate an only declaration.
+ Record.push_back(0);
}
+
}
//===----------------------------------------------------------------------===//
@@ -1272,14 +1272,15 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
@@ -1303,14 +1304,15 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
@@ -1334,25 +1336,26 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Abbreviation for DECL_ENUM
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_ENUM));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
- // Redeclarable
- Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
@@ -1380,25 +1383,26 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Abbreviation for DECL_RECORD
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_RECORD));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
// TypeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
- // Redeclarable
- Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
@@ -1420,17 +1424,20 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Abbreviation for DECL_PARM_VAR
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_PARM_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
@@ -1440,7 +1447,6 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
// VarDecl
- Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
Abv->Add(BitCodeAbbrevOp(0)); // StorageClassAsWritten
Abv->Add(BitCodeAbbrevOp(0)); // isThreadSpecified
@@ -1468,17 +1474,20 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Abbreviation for DECL_TYPEDEF
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_TYPEDEF));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
@@ -1493,17 +1502,20 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
// Abbreviation for DECL_VAR
Abv = new BitCodeAbbrev();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
Abv->Add(BitCodeAbbrevOp(0)); // isUsed
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
@@ -1513,7 +1525,6 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
// VarDecl
- Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClass
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClassAsWritten
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isThreadSpecified
@@ -1547,6 +1558,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //RefersToEnclosingLocal
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
DeclRefExprAbbrev = Stream.EmitAbbrev(Abv);
@@ -1629,6 +1641,20 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
RecordData Record;
ASTDeclWriter W(*this, Context, Record);
+ // Determine the ID for this declaration.
+ serialization::DeclID ID;
+ if (D->isFromASTFile())
+ ID = getDeclID(D);
+ else {
+ serialization::DeclID &IDR = DeclIDs[D];
+ if (IDR == 0)
+ IDR = NextDeclID++;
+
+ ID= IDR;
+ }
+
+ bool isReplacingADecl = ID < FirstDeclID;
+
// If this declaration is also a DeclContext, write blocks for the
// declarations that lexically stored inside its context and those
// declarations that are visible from its context. These blocks
@@ -1638,29 +1664,38 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
uint64_t VisibleOffset = 0;
DeclContext *DC = dyn_cast<DeclContext>(D);
if (DC) {
+ if (isReplacingADecl) {
+ // It is replacing a decl from a chained PCH; make sure that the
+ // DeclContext is fully loaded.
+ if (DC->hasExternalLexicalStorage())
+ DC->LoadLexicalDeclsFromExternalStorage();
+ if (DC->hasExternalVisibleStorage())
+ Chain->completeVisibleDeclsMap(DC);
+ }
LexicalOffset = WriteDeclContextLexicalBlock(Context, DC);
VisibleOffset = WriteDeclContextVisibleBlock(Context, DC);
}
-
- // Determine the ID for this declaration
- serialization::DeclID &IDR = DeclIDs[D];
- if (IDR == 0)
- IDR = NextDeclID++;
- serialization::DeclID ID = IDR;
-
- if (ID < FirstDeclID) {
+
+ if (isReplacingADecl) {
// We're replacing a decl in a previous file.
- ReplacedDecls.push_back(std::make_pair(ID, Stream.GetCurrentBitNo()));
+ ReplacedDecls.push_back(ReplacedDeclInfo(ID, Stream.GetCurrentBitNo(),
+ D->getLocation()));
} else {
unsigned Index = ID - FirstDeclID;
// Record the offset for this declaration
+ SourceLocation Loc = D->getLocation();
if (DeclOffsets.size() == Index)
- DeclOffsets.push_back(Stream.GetCurrentBitNo());
+ DeclOffsets.push_back(DeclOffset(Loc, Stream.GetCurrentBitNo()));
else if (DeclOffsets.size() < Index) {
DeclOffsets.resize(Index+1);
- DeclOffsets[Index] = Stream.GetCurrentBitNo();
+ DeclOffsets[Index].setLocation(Loc);
+ DeclOffsets[Index].BitOffset = Stream.GetCurrentBitNo();
}
+
+ SourceManager &SM = Context.getSourceManager();
+ if (Loc.isValid() && SM.isLocalSourceLocation(Loc))
+ associateDeclWithFile(D, ID);
}
// Build and emit a record for this declaration
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
index 7e2d45c..827caa0 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -34,9 +34,8 @@ namespace clang {
ASTStmtWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
: Writer(Writer), Record(Record) { }
-
- void
- AddExplicitTemplateArgumentList(const ASTTemplateArgumentListInfo &Args);
+
+ void AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args);
void VisitStmt(Stmt *S);
#define STMT(Type, Base) \
@@ -46,7 +45,8 @@ namespace clang {
}
void ASTStmtWriter::
-AddExplicitTemplateArgumentList(const ASTTemplateArgumentListInfo &Args) {
+AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args) {
+ Writer.AddSourceLocation(Args.getTemplateKeywordLoc(), Record);
Writer.AddSourceLocation(Args.LAngleLoc, Record);
Writer.AddSourceLocation(Args.RAngleLoc, Record);
for (unsigned i=0; i != Args.NumTemplateArgs; ++i)
@@ -264,17 +264,18 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
Record.push_back(E->hasQualifier());
Record.push_back(E->getDecl() != E->getFoundDecl());
- Record.push_back(E->hasExplicitTemplateArgs());
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->refersToEnclosingLocal());
- if (E->hasExplicitTemplateArgs()) {
+ if (E->hasTemplateKWAndArgsInfo()) {
unsigned NumTemplateArgs = E->getNumTemplateArgs();
Record.push_back(NumTemplateArgs);
}
DeclarationName::NameKind nk = (E->getDecl()->getDeclName().getNameKind());
- if ((!E->hasExplicitTemplateArgs()) && (!E->hasQualifier()) &&
+ if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
(E->getDecl() == E->getFoundDecl()) &&
nk == DeclarationName::Identifier) {
AbbrevToUse = Writer.getDeclRefExprAbbrev();
@@ -286,8 +287,8 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
if (E->getDecl() != E->getFoundDecl())
Writer.AddDeclRef(E->getFoundDecl(), Record);
- if (E->hasExplicitTemplateArgs())
- AddExplicitTemplateArgumentList(E->getExplicitTemplateArgs());
+ if (E->hasTemplateKWAndArgsInfo())
+ AddTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo());
Writer.AddDeclRef(E->getDecl(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
@@ -331,7 +332,7 @@ void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
// StringLiteral. However, we can't do so now because we have no
// provision for coping with abbreviations when we're jumping around
// the AST file during deserialization.
- Record.append(E->getString().begin(), E->getString().end());
+ Record.append(E->getBytes().begin(), E->getBytes().end());
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
Writer.AddSourceLocation(E->getStrTokenLoc(I), Record);
Code = serialization::EXPR_STRING_LITERAL;
@@ -449,8 +450,9 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
if (E->hasQualifier())
Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
- Record.push_back(E->hasExplicitTemplateArgs());
- if (E->hasExplicitTemplateArgs()) {
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ Writer.AddSourceLocation(E->getTemplateKeywordLoc(), Record);
unsigned NumTemplateArgs = E->getNumTemplateArgs();
Record.push_back(NumTemplateArgs);
Writer.AddSourceLocation(E->getLAngleLoc(), Record);
@@ -597,6 +599,7 @@ void ASTStmtWriter::VisitInitListExpr(InitListExpr *E) {
else
Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record);
Record.push_back(E->hadArrayRangeDesignator());
+ Record.push_back(E->initializesStdInitializerList());
Record.push_back(E->getNumInits());
if (isArrayFiller) {
// ArrayFiller may have filled "holes" due to designated initializer.
@@ -710,15 +713,6 @@ void ASTStmtWriter::VisitBlockExpr(BlockExpr *E) {
Code = serialization::EXPR_BLOCK;
}
-void ASTStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
- VisitExpr(E);
- Writer.AddDeclRef(E->getDecl(), Record);
- Writer.AddSourceLocation(E->getLocation(), Record);
- Record.push_back(E->isByRef());
- Record.push_back(E->isConstQualAdded());
- Code = serialization::EXPR_BLOCK_DECL_REF;
-}
-
void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumAssocs());
@@ -736,19 +730,32 @@ void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
Code = serialization::EXPR_GENERIC_SELECTION;
}
+void ASTStmtWriter::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSemanticExprs());
+
+ // Push the result index. Currently, this needs to exactly match
+ // the encoding used internally for ResultIndex.
+ unsigned result = E->getResultExprIndex();
+ result = (result == PseudoObjectExpr::NoResult ? 0 : result + 1);
+ Record.push_back(result);
+
+ Writer.AddStmt(E->getSyntacticForm());
+ for (PseudoObjectExpr::semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ Writer.AddStmt(*i);
+ }
+ Code = serialization::EXPR_PSEUDO_OBJECT;
+}
+
void ASTStmtWriter::VisitAtomicExpr(AtomicExpr *E) {
VisitExpr(E);
Record.push_back(E->getOp());
- Writer.AddStmt(E->getPtr());
- Writer.AddStmt(E->getOrder());
- if (E->getOp() != AtomicExpr::Load)
- Writer.AddStmt(E->getVal1());
- if (E->isCmpXChg()) {
- Writer.AddStmt(E->getOrderFail());
- Writer.AddStmt(E->getVal2());
- }
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getSubExprs()[I]);
Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_ATOMIC;
}
//===----------------------------------------------------------------------===//
@@ -762,6 +769,46 @@ void ASTStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
Code = serialization::EXPR_OBJC_STRING_LITERAL;
}
+void ASTStmtWriter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getNumber());
+ Writer.AddDeclRef(E->getObjCNumericLiteralMethod(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Code = serialization::EXPR_OBJC_NUMERIC_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ for (unsigned i = 0; i < E->getNumElements(); i++)
+ Writer.AddStmt(E->getElement(i));
+ Writer.AddDeclRef(E->getArrayWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_ARRAY_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ Record.push_back(E->HasPackExpansions);
+ for (unsigned i = 0; i < E->getNumElements(); i++) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(i);
+ Writer.AddStmt(Element.Key);
+ Writer.AddStmt(Element.Value);
+ if (E->HasPackExpansions) {
+ Writer.AddSourceLocation(Element.EllipsisLoc, Record);
+ unsigned NumExpansions = 0;
+ if (Element.NumExpansions)
+ NumExpansions = *Element.NumExpansions + 1;
+ Record.push_back(NumExpansions);
+ }
+ }
+
+ Writer.AddDeclRef(E->getDictWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_DICTIONARY_LITERAL;
+}
+
void ASTStmtWriter::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
VisitExpr(E);
Writer.AddTypeSourceInfo(E->getEncodedTypeSourceInfo(), Record);
@@ -798,6 +845,7 @@ void ASTStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
+ Record.push_back(E->SetterAndMethodRefFlags.getInt());
Record.push_back(E->isImplicitProperty());
if (E->isImplicitProperty()) {
Writer.AddDeclRef(E->getImplicitPropertyGetter(), Record);
@@ -821,12 +869,24 @@ void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
Code = serialization::EXPR_OBJC_PROPERTY_REF_EXPR;
}
+void ASTStmtWriter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getRBracket(), Record);
+ Writer.AddStmt(E->getBaseExpr());
+ Writer.AddStmt(E->getKeyExpr());
+ Writer.AddDeclRef(E->getAtIndexMethodDecl(), Record);
+ Writer.AddDeclRef(E->setAtIndexMethodDecl(), Record);
+
+ Code = serialization::EXPR_OBJC_SUBSCRIPT_REF_EXPR;
+}
+
void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
Record.push_back(E->getNumStoredSelLocs());
Record.push_back(E->SelLocsKind);
Record.push_back(E->isDelegateInitCall());
+ Record.push_back(E->IsImplicit);
Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
@@ -921,6 +981,13 @@ void ASTStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
Code = serialization::STMT_OBJC_AT_THROW;
}
+void ASTStmtWriter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OBJC_BOOL_LITERAL;
+}
+
//===----------------------------------------------------------------------===//
// C++ Expressions and Statements.
//===----------------------------------------------------------------------===//
@@ -957,6 +1024,16 @@ void ASTStmtWriter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
Code = serialization::STMT_CXX_FOR_RANGE;
}
+void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getKeywordLoc(), Record);
+ Record.push_back(S->isIfExists());
+ Writer.AddNestedNameSpecifierLoc(S->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameInfo(S->getNameInfo(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Code = serialization::STMT_MS_DEPENDENT_EXISTS;
+}
+
void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
@@ -989,6 +1066,38 @@ void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
Code = serialization::EXPR_CXX_TEMPORARY_OBJECT;
}
+void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumCaptures);
+ unsigned NumArrayIndexVars = 0;
+ if (E->HasArrayIndexVars)
+ NumArrayIndexVars = E->getArrayIndexStarts()[E->NumCaptures];
+ Record.push_back(NumArrayIndexVars);
+ Writer.AddSourceRange(E->IntroducerRange, Record);
+ Record.push_back(E->CaptureDefault); // FIXME: stable encoding
+ Record.push_back(E->ExplicitParams);
+ Record.push_back(E->ExplicitResultType);
+ Writer.AddSourceLocation(E->ClosingBrace, Record);
+
+ // Add capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C) {
+ Writer.AddStmt(*C);
+ }
+
+ // Add array index variables, if any.
+ if (NumArrayIndexVars) {
+ Record.append(E->getArrayIndexStarts(),
+ E->getArrayIndexStarts() + E->NumCaptures + 1);
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ Writer.AddDeclRef(ArrayIndexVars[I], Record);
+ }
+
+ Code = serialization::EXPR_LAMBDA;
+}
+
void ASTStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
Writer.AddSourceRange(SourceRange(E->getOperatorLoc(), E->getRParenLoc()),
@@ -1022,6 +1131,12 @@ void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
}
+void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ Writer.AddSourceLocation(E->UDSuffixLoc, Record);
+ Code = serialization::EXPR_USER_DEFINED_LITERAL;
+}
+
void ASTStmtWriter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
VisitExpr(E);
Record.push_back(E->getValue());
@@ -1093,25 +1208,20 @@ void ASTStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
Record.push_back(E->isGlobalNew());
- Record.push_back(E->hasInitializer());
- Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.push_back(E->isArray());
- Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.push_back(E->getNumPlacementArgs());
- Record.push_back(E->getNumConstructorArgs());
+ Record.push_back(E->StoredInitializationStyle);
Writer.AddDeclRef(E->getOperatorNew(), Record);
Writer.AddDeclRef(E->getOperatorDelete(), Record);
- Writer.AddDeclRef(E->getConstructor(), Record);
Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record);
Writer.AddSourceRange(E->getTypeIdParens(), Record);
Writer.AddSourceLocation(E->getStartLoc(), Record);
- Writer.AddSourceLocation(E->getEndLoc(), Record);
- Writer.AddSourceLocation(E->getConstructorLParen(), Record);
- Writer.AddSourceLocation(E->getConstructorRParen(), Record);
+ Writer.AddSourceRange(E->getDirectInitRange(), Record);
for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
I != e; ++I)
Writer.AddStmt(*I);
-
+
Code = serialization::EXPR_CXX_NEW;
}
@@ -1151,9 +1261,9 @@ void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
- Record.push_back(E->getNumTemporaries());
- for (unsigned i = 0, e = E->getNumTemporaries(); i != e; ++i)
- Writer.AddCXXTemporary(E->getTemporary(i), Record);
+ Record.push_back(E->getNumObjects());
+ for (unsigned i = 0, e = E->getNumObjects(); i != e; ++i)
+ Writer.AddDeclRef(E->getObject(i), Record);
Writer.AddStmt(E->getSubExpr());
Code = serialization::EXPR_EXPR_WITH_CLEANUPS;
@@ -1162,17 +1272,17 @@ void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
void
ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
-
- // Don't emit anything here, hasExplicitTemplateArgs() must be
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
// emitted first.
- Record.push_back(E->hasExplicitTemplateArgs());
- if (E->hasExplicitTemplateArgs()) {
- const ASTTemplateArgumentListInfo &Args = E->getExplicitTemplateArgs();
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
Record.push_back(Args.NumTemplateArgs);
- AddExplicitTemplateArgumentList(Args);
+ AddTemplateKWAndArgsInfo(Args);
}
-
+
if (!E->isImplicitAccess())
Writer.AddStmt(E->getBase());
else
@@ -1189,14 +1299,15 @@ ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
void
ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
-
- // Don't emit anything here, hasExplicitTemplateArgs() must be
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
// emitted first.
- Record.push_back(E->hasExplicitTemplateArgs());
- if (E->hasExplicitTemplateArgs()) {
- const ASTTemplateArgumentListInfo &Args = E->getExplicitTemplateArgs();
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
Record.push_back(Args.NumTemplateArgs);
- AddExplicitTemplateArgumentList(Args);
+ AddTemplateKWAndArgsInfo(Args);
}
Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
@@ -1219,13 +1330,15 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
-
- // Don't emit anything here, hasExplicitTemplateArgs() must be emitted first.
- Record.push_back(E->hasExplicitTemplateArgs());
- if (E->hasExplicitTemplateArgs()) {
- const ASTTemplateArgumentListInfo &Args = E->getExplicitTemplateArgs();
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
Record.push_back(Args.NumTemplateArgs);
- AddExplicitTemplateArgumentList(Args);
+ AddTemplateKWAndArgsInfo(Args);
}
Record.push_back(E->getNumDecls());
@@ -1278,6 +1391,16 @@ void ASTStmtWriter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
Code = serialization::EXPR_BINARY_TYPE_TRAIT;
}
+void ASTStmtWriter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->TypeTraitExprBits.NumArgs);
+ Record.push_back(E->TypeTraitExprBits.Kind); // FIXME: Stable encoding
+ Record.push_back(E->TypeTraitExprBits.Value);
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddTypeSourceInfo(E->getArg(I), Record);
+ Code = serialization::EXPR_TYPE_TRAIT;
+}
+
void ASTStmtWriter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
VisitExpr(E);
Record.push_back(E->getTrait());
@@ -1348,7 +1471,7 @@ void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
- Record.push_back(Writer.getOpaqueValueID(E));
+ Writer.AddStmt(E->getSourceExpr());
Writer.AddSourceLocation(E->getLocation(), Record);
Code = serialization::EXPR_OPAQUE_VALUE;
}
@@ -1435,15 +1558,11 @@ void ASTWriter::ClearSwitchCaseIDs() {
SwitchCaseIDs.clear();
}
-unsigned ASTWriter::getOpaqueValueID(OpaqueValueExpr *e) {
- unsigned &entry = OpaqueValues[e];
- if (!entry) entry = OpaqueValues.size();
- return entry;
-}
-
/// \brief Write the given substatement or subexpression to the
/// bitstream.
-void ASTWriter::WriteSubStmt(Stmt *S) {
+void ASTWriter::WriteSubStmt(Stmt *S,
+ llvm::DenseMap<Stmt *, uint64_t> &SubStmtEntries,
+ llvm::DenseSet<Stmt *> &ParentStmts) {
RecordData Record;
ASTStmtWriter Writer(*this, Record);
++NumStatements;
@@ -1453,6 +1572,32 @@ void ASTWriter::WriteSubStmt(Stmt *S) {
return;
}
+ llvm::DenseMap<Stmt *, uint64_t>::iterator I = SubStmtEntries.find(S);
+ if (I != SubStmtEntries.end()) {
+ Record.push_back(I->second);
+ Stream.EmitRecord(serialization::STMT_REF_PTR, Record);
+ return;
+ }
+
+#ifndef NDEBUG
+ assert(!ParentStmts.count(S) && "There is a Stmt cycle!");
+
+ struct ParentStmtInserterRAII {
+ Stmt *S;
+ llvm::DenseSet<Stmt *> &ParentStmts;
+
+ ParentStmtInserterRAII(Stmt *S, llvm::DenseSet<Stmt *> &ParentStmts)
+ : S(S), ParentStmts(ParentStmts) {
+ ParentStmts.insert(S);
+ }
+ ~ParentStmtInserterRAII() {
+ ParentStmts.erase(S);
+ }
+ };
+
+ ParentStmtInserterRAII ParentStmtInserter(S, ParentStmts);
+#endif
+
// Redirect ASTWriter::AddStmt to collect sub stmts.
SmallVector<Stmt *, 16> SubStmts;
CollectedStmts = &SubStmts;
@@ -1478,9 +1623,11 @@ void ASTWriter::WriteSubStmt(Stmt *S) {
// This simplifies reading and allows to store a variable number of sub stmts
// without knowing it in advance.
while (!SubStmts.empty())
- WriteSubStmt(SubStmts.pop_back_val());
+ WriteSubStmt(SubStmts.pop_back_val(), SubStmtEntries, ParentStmts);
Stream.EmitRecord(Writer.Code, Record, Writer.AbbrevToUse);
+
+ SubStmtEntries[S] = Stream.GetCurrentBitNo();
}
/// \brief Flush all of the statements that have been added to the
@@ -1488,8 +1635,13 @@ void ASTWriter::WriteSubStmt(Stmt *S) {
void ASTWriter::FlushStmts() {
RecordData Record;
+ // We expect to be the only consumer of the two temporary statement maps,
+ // assert that they are empty.
+ assert(SubStmtEntries.empty() && "unexpected entries in sub stmt map");
+ assert(ParentStmts.empty() && "unexpected entries in parent stmt map");
+
for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
- WriteSubStmt(StmtsToEmit[I]);
+ WriteSubStmt(StmtsToEmit[I], SubStmtEntries, ParentStmts);
assert(N == StmtsToEmit.size() &&
"Substatement written via AddStmt rather than WriteSubStmt!");
@@ -1498,6 +1650,9 @@ void ASTWriter::FlushStmts() {
// expression records that follow this one are part of a different
// expression.
Stream.EmitRecord(serialization::STMT_STOP, Record);
+
+ SubStmtEntries.clear();
+ ParentStmts.clear();
}
StmtsToEmit.clear();
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
index a2534db..02aed10 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
@@ -1,4 +1,4 @@
-//===--- GeneratePCH.cpp - AST Consumer for PCH Generation ------*- C++ -*-===//
+//===--- GeneratePCH.cpp - Sema Consumer for PCH Generation -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines the CreatePCHGenerate function, which creates an
-// ASTConsumer that generates a PCH file.
+// This file defines the PCHGenerator, which as a SemaConsumer that generates
+// a PCH file.
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/ASTConsumers.h"
#include "clang/Serialization/ASTWriter.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -28,10 +27,10 @@ using namespace clang;
PCHGenerator::PCHGenerator(const Preprocessor &PP,
StringRef OutputFile,
- bool IsModule,
+ clang::Module *Module,
StringRef isysroot,
raw_ostream *OS)
- : PP(PP), OutputFile(OutputFile), IsModule(IsModule),
+ : PP(PP), OutputFile(OutputFile), Module(Module),
isysroot(isysroot.str()), Out(OS),
SemaPtr(0), StatCalls(0), Stream(Buffer), Writer(Stream) {
// Install a stat() listener to keep track of all of the stat()
@@ -49,7 +48,7 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Emit the PCH file
assert(SemaPtr && "No Sema?");
- Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, IsModule, isysroot);
+ Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, Module, isysroot);
// Write the generated bitstream to "Out".
Out->write((char *)&Buffer.front(), Buffer.size());
diff --git a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
index 0a721c4..16b95e2 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
@@ -20,8 +20,8 @@ using namespace clang;
using namespace serialization;
using namespace reader;
-Module::Module(ModuleKind Kind)
- : Kind(Kind), DirectlyImported(false), SizeInBits(0),
+ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
+ : Kind(Kind), DirectlyImported(false), Generation(Generation), SizeInBits(0),
LocalNumSLocEntries(0), SLocEntryBaseID(0),
SLocEntryBaseOffset(0), SLocEntryOffsets(0),
SLocFileOffsets(0), LocalNumIdentifiers(0),
@@ -30,15 +30,17 @@ Module::Module(ModuleKind Kind)
PreprocessedEntityOffsets(0), NumPreprocessedEntities(0),
LocalNumHeaderFileInfos(0),
HeaderFileInfoTableData(0), HeaderFileInfoTable(0),
- HeaderFileFrameworkStrings(0),
+ HeaderFileFrameworkStrings(0), LocalNumSubmodules(0), BaseSubmoduleID(0),
LocalNumSelectors(0), SelectorOffsets(0), BaseSelectorID(0),
SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0),
DeclOffsets(0), BaseDeclID(0),
LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0),
+ FileSortedDecls(0), RedeclarationsMap(0), LocalNumRedeclarationsInMap(0),
+ ObjCCategoriesMap(0), LocalNumObjCCategoriesInMap(0),
LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0), StatCache(0)
{}
-Module::~Module() {
+ModuleFile::~ModuleFile() {
for (DeclContextInfosMap::iterator I = DeclContextInfos.begin(),
E = DeclContextInfos.end();
I != E; ++I) {
@@ -67,7 +69,7 @@ dumpLocalRemap(StringRef Name,
}
}
-void Module::dump() {
+void ModuleFile::dump() {
llvm::errs() << "\nModule: " << FileName << "\n";
if (!Imports.empty()) {
llvm::errs() << " Imports: ";
@@ -87,7 +89,11 @@ void Module::dump() {
llvm::errs() << " Base identifier ID: " << BaseIdentifierID << '\n'
<< " Number of identifiers: " << LocalNumIdentifiers << '\n';
dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
-
+
+ llvm::errs() << " Base submodule ID: " << BaseSubmoduleID << '\n'
+ << " Number of submodules: " << LocalNumSubmodules << '\n';
+ dumpLocalRemap("Submodule ID local -> global map", SubmoduleRemap);
+
llvm::errs() << " Base selector ID: " << BaseSelectorID << '\n'
<< " Number of selectors: " << LocalNumSelectors << '\n';
dumpLocalRemap("Selector ID local -> global map", SelectorRemap);
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
index c4b1f71..ab364b7 100644
--- a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
@@ -23,7 +23,7 @@
using namespace clang;
using namespace serialization;
-Module *ModuleManager::lookup(StringRef Name) {
+ModuleFile *ModuleManager::lookup(StringRef Name) {
const FileEntry *Entry = FileMgr.getFile(Name);
return Modules[Entry];
}
@@ -33,21 +33,22 @@ llvm::MemoryBuffer *ModuleManager::lookupBuffer(StringRef Name) {
return InMemoryBuffers[Entry];
}
-std::pair<Module *, bool>
+std::pair<ModuleFile *, bool>
ModuleManager::addModule(StringRef FileName, ModuleKind Type,
- Module *ImportedBy, std::string &ErrorStr) {
+ ModuleFile *ImportedBy, unsigned Generation,
+ std::string &ErrorStr) {
const FileEntry *Entry = FileMgr.getFile(FileName);
if (!Entry && FileName != "-") {
ErrorStr = "file not found";
- return std::make_pair(static_cast<Module*>(0), false);
+ return std::make_pair(static_cast<ModuleFile*>(0), false);
}
// Check whether we already loaded this module, before
- Module *&ModuleEntry = Modules[Entry];
+ ModuleFile *&ModuleEntry = Modules[Entry];
bool NewModule = false;
if (!ModuleEntry) {
// Allocate a new module.
- Module *New = new Module(Type);
+ ModuleFile *New = new ModuleFile(Type, Generation);
New->FileName = FileName.str();
Chain.push_back(New);
NewModule = true;
@@ -69,7 +70,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
New->Buffer.reset(FileMgr.getBufferForFile(FileName, &ErrorStr));
if (!New->Buffer)
- return std::make_pair(static_cast<Module*>(0), false);
+ return std::make_pair(static_cast<ModuleFile*>(0), false);
}
// Initialize the stream
@@ -101,16 +102,16 @@ ModuleManager::~ModuleManager() {
delete Chain[e - i - 1];
}
-void ModuleManager::visit(bool (*Visitor)(Module &M, void *UserData),
+void ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
void *UserData) {
unsigned N = size();
// Record the number of incoming edges for each module. When we
// encounter a module with no incoming edges, push it into the queue
// to seed the queue.
- SmallVector<Module *, 4> Queue;
+ SmallVector<ModuleFile *, 4> Queue;
Queue.reserve(N);
- llvm::DenseMap<Module *, unsigned> UnusedIncomingEdges;
+ llvm::DenseMap<ModuleFile *, unsigned> UnusedIncomingEdges;
for (ModuleIterator M = begin(), MEnd = end(); M != MEnd; ++M) {
if (unsigned Size = (*M)->ImportedBy.size())
UnusedIncomingEdges[*M] = Size;
@@ -118,10 +119,10 @@ void ModuleManager::visit(bool (*Visitor)(Module &M, void *UserData),
Queue.push_back(*M);
}
- llvm::SmallPtrSet<Module *, 4> Skipped;
+ llvm::SmallPtrSet<ModuleFile *, 4> Skipped;
unsigned QueueStart = 0;
while (QueueStart < Queue.size()) {
- Module *CurrentModule = Queue[QueueStart++];
+ ModuleFile *CurrentModule = Queue[QueueStart++];
// Check whether this module should be skipped.
if (Skipped.count(CurrentModule))
@@ -132,16 +133,16 @@ void ModuleManager::visit(bool (*Visitor)(Module &M, void *UserData),
// module that the current module depends on. To indicate this
// behavior, we mark all of the reachable modules as having N
// incoming edges (which is impossible otherwise).
- SmallVector<Module *, 4> Stack;
+ SmallVector<ModuleFile *, 4> Stack;
Stack.push_back(CurrentModule);
Skipped.insert(CurrentModule);
while (!Stack.empty()) {
- Module *NextModule = Stack.back();
+ ModuleFile *NextModule = Stack.back();
Stack.pop_back();
// For any module that this module depends on, push it on the
// stack (if it hasn't already been marked as visited).
- for (llvm::SetVector<Module *>::iterator
+ for (llvm::SetVector<ModuleFile *>::iterator
M = NextModule->Imports.begin(),
MEnd = NextModule->Imports.end();
M != MEnd; ++M) {
@@ -154,7 +155,7 @@ void ModuleManager::visit(bool (*Visitor)(Module &M, void *UserData),
// For any module that this module depends on, push it on the
// stack (if it hasn't already been marked as visited).
- for (llvm::SetVector<Module *>::iterator M = CurrentModule->Imports.begin(),
+ for (llvm::SetVector<ModuleFile *>::iterator M = CurrentModule->Imports.begin(),
MEnd = CurrentModule->Imports.end();
M != MEnd; ++M) {
@@ -170,17 +171,17 @@ void ModuleManager::visit(bool (*Visitor)(Module &M, void *UserData),
}
/// \brief Perform a depth-first visit of the current module.
-static bool visitDepthFirst(Module &M,
- bool (*Visitor)(Module &M, bool Preorder,
+static bool visitDepthFirst(ModuleFile &M,
+ bool (*Visitor)(ModuleFile &M, bool Preorder,
void *UserData),
void *UserData,
- llvm::SmallPtrSet<Module *, 4> &Visited) {
+ llvm::SmallPtrSet<ModuleFile *, 4> &Visited) {
// Preorder visitation
if (Visitor(M, /*Preorder=*/true, UserData))
return true;
// Visit children
- for (llvm::SetVector<Module *>::iterator IM = M.Imports.begin(),
+ for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(),
IMEnd = M.Imports.end();
IM != IMEnd; ++IM) {
if (!Visited.insert(*IM))
@@ -194,10 +195,10 @@ static bool visitDepthFirst(Module &M,
return Visitor(M, /*Preorder=*/false, UserData);
}
-void ModuleManager::visitDepthFirst(bool (*Visitor)(Module &M, bool Preorder,
+void ModuleManager::visitDepthFirst(bool (*Visitor)(ModuleFile &M, bool Preorder,
void *UserData),
void *UserData) {
- llvm::SmallPtrSet<Module *, 4> Visited;
+ llvm::SmallPtrSet<ModuleFile *, 4> Visited;
for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
if (!Visited.insert(Chain[I]))
continue;
@@ -211,8 +212,8 @@ void ModuleManager::visitDepthFirst(bool (*Visitor)(Module &M, bool Preorder,
namespace llvm {
template<>
struct GraphTraits<ModuleManager> {
- typedef Module NodeType;
- typedef llvm::SetVector<Module *>::const_iterator ChildIteratorType;
+ typedef ModuleFile NodeType;
+ typedef llvm::SetVector<ModuleFile *>::const_iterator ChildIteratorType;
typedef ModuleManager::ModuleConstIterator nodes_iterator;
static ChildIteratorType child_begin(NodeType *Node) {
@@ -241,7 +242,7 @@ namespace llvm {
return true;
}
- std::string getNodeLabel(Module *M, const ModuleManager&) {
+ std::string getNodeLabel(ModuleFile *M, const ModuleManager&) {
return llvm::sys::path::stem(M->FileName);
}
};
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
index dc524ba..84ea8c7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
@@ -37,20 +37,21 @@ void AdjustedReturnValueChecker::checkPostStmt(const CallExpr *CE,
QualType expectedResultTy = CE->getType();
// Fetch the signature of the called function.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
- SVal V = state->getSVal(CE);
+ SVal V = state->getSVal(CE, LCtx);
if (V.isUnknown())
return;
// Casting to void? Discard the value.
if (expectedResultTy->isVoidType()) {
- C.generateNode(state->BindExpr(CE, UnknownVal()));
+ C.addTransition(state->BindExpr(CE, LCtx, UnknownVal()));
return;
}
- const MemRegion *callee = state->getSVal(CE->getCallee()).getAsRegion();
+ const MemRegion *callee = state->getSVal(CE->getCallee(), LCtx).getAsRegion();
if (!callee)
return;
@@ -82,7 +83,7 @@ void AdjustedReturnValueChecker::checkPostStmt(const CallExpr *CE,
// the cast avoids some assertion failures elsewhere.
SValBuilder &svalBuilder = C.getSValBuilder();
V = svalBuilder.evalCast(V, expectedResultTy, actualResultTy);
- C.generateNode(state->BindExpr(CE, V));
+ C.addTransition(state->BindExpr(CE, LCtx, V));
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index cd977bf..aa6f97b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "StatsChecker"
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -16,12 +17,20 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
using namespace clang;
using namespace ento;
+STATISTIC(NumBlocks,
+ "The # of blocks in top level functions");
+STATISTIC(NumBlocksUnreachable,
+ "The # of unreachable blocks in analyzing top level functions");
+
namespace {
class AnalyzerStatsChecker : public Checker<check::EndAnalysis> {
public:
@@ -33,18 +42,23 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
BugReporter &B,
ExprEngine &Eng) const {
const CFG *C = 0;
- const Decl *D = 0;
- const LocationContext *LC = 0;
const SourceManager &SM = B.getSourceManager();
llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
- // Iterate over explodedgraph
+ // Root node should have the location context of the top most function.
+ const ExplodedNode *GraphRoot = *G.roots_begin();
+ const LocationContext *LC = GraphRoot->getLocation().getLocationContext();
+
+ const Decl *D = LC->getDecl();
+
+ // Iterate over the exploded graph.
for (ExplodedGraph::node_iterator I = G.nodes_begin();
I != G.nodes_end(); ++I) {
const ProgramPoint &P = I->getLocation();
- // Save the LocationContext if we don't have it already
- if (!LC)
- LC = P.getLocationContext();
+
+ // Only check the coverage in the top level function (optimization).
+ if (D != P.getLocationContext()->getDecl())
+ continue;
if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
const CFGBlock *CB = BE->getBlock();
@@ -52,9 +66,8 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
}
}
- // Get the CFG and the Decl of this block
+ // Get the CFG and the Decl of this block.
C = LC->getCFG();
- D = LC->getAnalysisContext()->getDecl();
unsigned total = 0, unreachable = 0;
@@ -70,33 +83,39 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
// We never 'reach' the entry block, so correct the unreachable count
unreachable--;
+ // There is no BlockEntrance corresponding to the exit block as well, so
+ // assume it is reached as well.
+ unreachable--;
// Generate the warning string
- llvm::SmallString<128> buf;
+ SmallString<128> buf;
llvm::raw_svector_ostream output(buf);
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
- if (Loc.isValid()) {
- output << Loc.getFilename() << " : ";
+ if (!Loc.isValid())
+ return;
- if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
- const NamedDecl *ND = cast<NamedDecl>(D);
- output << *ND;
- }
- else if (isa<BlockDecl>(D)) {
- output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
- }
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ output << *ND;
+ }
+ else if (isa<BlockDecl>(D)) {
+ output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
}
+ NumBlocksUnreachable += unreachable;
+ NumBlocks += total;
+ std::string NameOfRootFunction = output.str();
+
output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
<< unreachable << " | Exhausted Block: "
<< (Eng.wasBlocksExhausted() ? "yes" : "no")
<< " | Empty WorkList: "
<< (Eng.hasEmptyWorkList() ? "yes" : "no");
- B.EmitBasicReport("Analyzer Statistics", "Internal Statistics", output.str(),
- PathDiagnosticLocation(D, SM));
+ B.EmitBasicReport(D, "Analyzer Statistics", "Internal Statistics",
+ output.str(), PathDiagnosticLocation(D, SM));
- // Emit warning for each block we bailed out on
+ // Emit warning for each block we bailed out on.
typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
const CoreEngine &CE = Eng.getCoreEngine();
for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
@@ -104,10 +123,15 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
const BlockEdge &BE = I->first;
const CFGBlock *Exit = BE.getDst();
const CFGElement &CE = Exit->front();
- if (const CFGStmt *CS = dyn_cast<CFGStmt>(&CE))
- B.EmitBasicReport("Bailout Point", "Internal Statistics", "The analyzer "
- "stopped analyzing at this point",
- PathDiagnosticLocation::createBegin(CS->getStmt(), SM, LC));
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&CE)) {
+ SmallString<128> bufI;
+ llvm::raw_svector_ostream outputI(bufI);
+ outputI << "(" << NameOfRootFunction << ")" <<
+ ": The analyzer generated a sink at this point";
+ B.EmitBasicReport(D, "Sink Point", "Internal Statistics", outputI.str(),
+ PathDiagnosticLocation::createBegin(CS->getStmt(),
+ SM, LC));
+ }
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 6935c5f..b2ad184 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class ArrayBoundChecker :
public Checker<check::Location> {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkLocation(SVal l, bool isLoad, const Stmt* S,
CheckerContext &C) const;
@@ -51,15 +51,15 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
if (Idx.isZeroConstant())
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
// Get the size of the array.
DefinedOrUnknownSVal NumElements
= C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
- const ProgramState *StInBound = state->assumeInBound(Idx, NumElements, true);
- const ProgramState *StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
@@ -84,7 +84,6 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
- assert(StInBound);
C.addTransition(StInBound);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 6175028..c6efe94 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -19,6 +19,8 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
using namespace clang;
using namespace ento;
@@ -26,11 +28,11 @@ using namespace ento;
namespace {
class ArrayBoundCheckerV2 :
public Checker<check::Location> {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
- enum OOB_Kind { OOB_Precedes, OOB_Excedes };
+ enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
- void reportOOB(CheckerContext &C, const ProgramState *errorState,
+ void reportOOB(CheckerContext &C, ProgramStateRef errorState,
OOB_Kind kind) const;
public:
@@ -54,7 +56,7 @@ public:
NonLoc getByteOffset() const { return cast<NonLoc>(byteOffset); }
const SubRegion *getRegion() const { return baseRegion; }
- static RegionRawOffsetV2 computeOffset(const ProgramState *state,
+ static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
SValBuilder &svalBuilder,
SVal location);
@@ -92,8 +94,8 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// memory access is within the extent of the base region. Since we
// have some flexibility in defining the base region, we can achieve
// various levels of conservatism in our buffer overflow checking.
- const ProgramState *state = checkerContext.getState();
- const ProgramState *originalState = state;
+ ProgramStateRef state = checkerContext.getState();
+ ProgramStateRef originalState = state;
SValBuilder &svalBuilder = checkerContext.getSValBuilder();
const RegionRawOffsetV2 &rawOffset =
@@ -118,7 +120,7 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
if (!lowerBoundToCheck)
return;
- const ProgramState *state_precedesLowerBound, *state_withinLowerBound;
+ ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
llvm::tie(state_precedesLowerBound, state_withinLowerBound) =
state->assume(*lowerBoundToCheck);
@@ -150,12 +152,20 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
if (!upperboundToCheck)
break;
- const ProgramState *state_exceedsUpperBound, *state_withinUpperBound;
+ ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
llvm::tie(state_exceedsUpperBound, state_withinUpperBound) =
state->assume(*upperboundToCheck);
+
+ // If we are under constrained and the index variables are tainted, report.
+ if (state_exceedsUpperBound && state_withinUpperBound) {
+ if (state->isTainted(rawOffset.getByteOffset()))
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
+ return;
+ }
- // Are we constrained enough to definitely exceed the upper bound?
- if (state_exceedsUpperBound && !state_withinUpperBound) {
+ // If we are constrained enough to definitely exceed the upper bound, report.
+ if (state_exceedsUpperBound) {
+ assert(!state_withinUpperBound);
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
return;
}
@@ -166,11 +176,11 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
while (false);
if (state != originalState)
- checkerContext.generateNode(state);
+ checkerContext.addTransition(state);
}
void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
- const ProgramState *errorState,
+ ProgramStateRef errorState,
OOB_Kind kind) const {
ExplodedNode *errorNode = checkerContext.generateSink(errorState);
@@ -183,11 +193,20 @@ void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
// FIXME: This diagnostics are preliminary. We should get far better
// diagnostics for explaining buffer overruns.
- llvm::SmallString<256> buf;
+ SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
- os << "Out of bound memory access "
- << (kind == OOB_Precedes ? "(accessed memory precedes memory block)"
- : "(access exceeds upper limit of memory block)");
+ os << "Out of bound memory access ";
+ switch (kind) {
+ case OOB_Precedes:
+ os << "(accessed memory precedes memory block)";
+ break;
+ case OOB_Excedes:
+ os << "(access exceeds upper limit of memory block)";
+ break;
+ case OOB_Tainted:
+ os << "(index is tainted)";
+ break;
+ }
checkerContext.EmitReport(new BugReport(*BT, os.str(), errorNode));
}
@@ -221,7 +240,7 @@ static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
// Scale a base value by a scaling factor, and return the scaled
// value as an SVal. Used by 'computeOffset'.
-static inline SVal scaleValue(const ProgramState *state,
+static inline SVal scaleValue(ProgramStateRef state,
NonLoc baseVal, CharUnits scaling,
SValBuilder &sb) {
return sb.evalBinOpNN(state, BO_Mul, baseVal,
@@ -231,7 +250,7 @@ static inline SVal scaleValue(const ProgramState *state,
// Add an SVal to another, treating unknown and undefined values as
// summing to UnknownVal. Used by 'computeOffset'.
-static SVal addValue(const ProgramState *state, SVal x, SVal y,
+static SVal addValue(ProgramStateRef state, SVal x, SVal y,
SValBuilder &svalBuilder) {
// We treat UnknownVals and UndefinedVals the same here because we
// only care about computing offsets.
@@ -245,7 +264,7 @@ static SVal addValue(const ProgramState *state, SVal x, SVal y,
/// Compute a raw byte offset from a base region. Used for array bounds
/// checking.
-RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(const ProgramState *state,
+RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
SValBuilder &svalBuilder,
SVal location)
{
@@ -277,9 +296,9 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(const ProgramState *state,
offset = addValue(state,
getValue(offset, svalBuilder),
scaleValue(state,
- cast<NonLoc>(index),
- astContext.getTypeSizeInChars(elemType),
- svalBuilder),
+ cast<NonLoc>(index),
+ astContext.getTypeSizeInChars(elemType),
+ svalBuilder),
svalBuilder);
if (offset.isUnknownOrUndef())
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
index 8296eb9..ab66e98 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class AttrNonNullChecker
: public Checker< check::PreStmt<CallExpr> > {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -33,10 +33,11 @@ public:
void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
// Check if the callee has a 'nonnull' attribute.
- SVal X = state->getSVal(CE->getCallee());
+ SVal X = state->getSVal(CE->getCallee(), LCtx);
const FunctionDecl *FD = X.getAsFunctionDecl();
if (!FD)
@@ -55,7 +56,7 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
if (!Att->isNonNull(idx))
continue;
- SVal V = state->getSVal(*I);
+ SVal V = state->getSVal(*I, LCtx);
DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
// If the value is unknown or undefined, we can't perform this check.
@@ -85,7 +86,7 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
}
ConstraintManager &CM = C.getConstraintManager();
- const ProgramState *stateNotNull, *stateNull;
+ ProgramStateRef stateNotNull, stateNull;
llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (stateNull && !stateNotNull) {
@@ -108,7 +109,7 @@ void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
const Expr *arg = *I;
R->addRange(arg->getSourceRange());
R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(errorNode,
- arg));
+ arg, R));
// Emit the bug report.
C.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 08cff0f..6dd0a8c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -28,6 +28,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -70,7 +71,7 @@ static inline bool isNil(SVal X) {
namespace {
class NilArgChecker : public Checker<check::PreObjCMessage> {
- mutable llvm::OwningPtr<APIMisuse> BT;
+ mutable OwningPtr<APIMisuse> BT;
void WarnNilArg(CheckerContext &C,
const ObjCMessage &msg, unsigned Arg) const;
@@ -88,7 +89,7 @@ void NilArgChecker::WarnNilArg(CheckerContext &C,
BT.reset(new APIMisuse("nil argument"));
if (ExplodedNode *N = C.generateSink()) {
- llvm::SmallString<128> sbuf;
+ SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Argument to '" << GetReceiverNameType(msg) << "' method '"
<< msg.getSelector().getAsString() << "' cannot be nil";
@@ -129,7 +130,7 @@ void NilArgChecker::checkPreObjCMessage(ObjCMessage msg,
Name == "compare:options:range:locale:" ||
Name == "componentsSeparatedByCharactersInSet:" ||
Name == "initWithFormat:") {
- if (isNil(msg.getArgSVal(0, C.getState())))
+ if (isNil(msg.getArgSVal(0, C.getLocationContext(), C.getState())))
WarnNilArg(C, msg, 0);
}
}
@@ -141,7 +142,7 @@ void NilArgChecker::checkPreObjCMessage(ObjCMessage msg,
namespace {
class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable llvm::OwningPtr<APIMisuse> BT;
+ mutable OwningPtr<APIMisuse> BT;
mutable IdentifierInfo* II;
public:
CFNumberCreateChecker() : II(0) {}
@@ -249,11 +250,8 @@ static const char* GetCFNumberTypeStr(uint64_t i) {
void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- const Expr *Callee = CE->getCallee();
- const ProgramState *state = C.getState();
- SVal CallV = state->getSVal(Callee);
- const FunctionDecl *FD = CallV.getAsFunctionDecl();
-
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
@@ -265,7 +263,8 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
return;
// Get the value of the "theType" argument.
- SVal TheTypeVal = state->getSVal(CE->getArg(1));
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
@@ -283,7 +282,7 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
- SVal TheValueExpr = state->getSVal(CE->getArg(2));
+ SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
@@ -316,8 +315,8 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
// the bits initialized to the provided values.
//
if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
- : C.generateNode()) {
- llvm::SmallString<128> sbuf;
+ : C.addTransition()) {
+ SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << (SourceSize == 8 ? "An " : "A ")
@@ -348,7 +347,7 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
namespace {
class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable llvm::OwningPtr<APIMisuse> BT;
+ mutable OwningPtr<APIMisuse> BT;
mutable IdentifierInfo *Retain, *Release;
public:
CFRetainReleaseChecker(): Retain(0), Release(0) {}
@@ -363,11 +362,8 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
if (CE->getNumArgs() != 1)
return;
- // Get the function declaration of the callee.
- const ProgramState *state = C.getState();
- SVal X = state->getSVal(CE->getCallee());
- const FunctionDecl *FD = X.getAsFunctionDecl();
-
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
@@ -388,7 +384,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
// Get the argument's value.
const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg);
+ SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
if (!DefArgVal)
return;
@@ -401,7 +397,7 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
// Are they equal?
- const ProgramState *stateTrue, *stateFalse;
+ ProgramStateRef stateTrue, stateFalse;
llvm::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
if (stateTrue && !stateFalse) {
@@ -415,7 +411,8 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
BugReport *report = new BugReport(*BT, description, N);
report->addRange(Arg->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Arg));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Arg,
+ report));
C.EmitReport(report);
return;
}
@@ -434,7 +431,7 @@ class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
mutable Selector retainS;
mutable Selector autoreleaseS;
mutable Selector drainS;
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
@@ -464,8 +461,8 @@ void ClassReleaseChecker::checkPreObjCMessage(ObjCMessage msg,
if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
return;
- if (ExplodedNode *N = C.generateNode()) {
- llvm::SmallString<200> buf;
+ if (ExplodedNode *N = C.addTransition()) {
+ SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "The '" << S.getAsString() << "' message should be sent to instances "
@@ -488,9 +485,10 @@ class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
mutable Selector arrayWithObjectsS;
mutable Selector dictionaryWithObjectsAndKeysS;
mutable Selector setWithObjectsS;
+ mutable Selector orderedSetWithObjectsS;
mutable Selector initWithObjectsS;
mutable Selector initWithObjectsAndKeysS;
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
bool isVariadicMessage(const ObjCMessage &msg) const;
@@ -533,6 +531,11 @@ VariadicMethodTypeChecker::isVariadicMessage(const ObjCMessage &msg) const {
if (isReceiverClassOrSuperclass(Class, "NSSet") &&
S == initWithObjectsS)
return true;
+
+ // -[NSOrderedSet initWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
+ S == initWithObjectsS)
+ return true;
} else {
const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
@@ -550,6 +553,11 @@ VariadicMethodTypeChecker::isVariadicMessage(const ObjCMessage &msg) const {
if (isReceiverClassOrSuperclass(Class, "NSSet") &&
S == setWithObjectsS)
return true;
+
+ // -[NSOrderedSet orderedSetWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
+ S == orderedSetWithObjectsS)
+ return true;
}
return false;
@@ -566,6 +574,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
dictionaryWithObjectsAndKeysS =
GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
+ orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
@@ -587,7 +596,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
// Verify that all arguments have Objective-C types.
llvm::Optional<ExplodedNode*> errorNode;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
QualType ArgTy = msg.getArgType(I);
@@ -599,7 +608,8 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
continue;
// Ignore pointer constants.
- if (isa<loc::ConcreteInt>(msg.getArgSVal(I, state)))
+ if (isa<loc::ConcreteInt>(msg.getArgSVal(I, C.getLocationContext(),
+ state)))
continue;
// Ignore pointer types annotated with 'NSObject' attribute.
@@ -612,13 +622,13 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
// Generate only one error node to use for all bug reports.
if (!errorNode.hasValue()) {
- errorNode = C.generateNode();
+ errorNode = C.addTransition();
}
if (!errorNode.getValue())
continue;
- llvm::SmallString<128> sbuf;
+ SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
if (const char *TypeName = GetReceiverNameType(msg))
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
new file mode 100644
index 0000000..a4fc396
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -0,0 +1,157 @@
+//== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines BoolAssignmentChecker, a builtin check in ExprEngine that
+// performs checks for assignment of non-Boolean values to Boolean variables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+ class BoolAssignmentChecker : public Checker< check::Bind > {
+ mutable llvm::OwningPtr<BuiltinBug> BT;
+ void emitReport(ProgramStateRef state, CheckerContext &C) const;
+ public:
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ };
+} // end anonymous namespace
+
+void BoolAssignmentChecker::emitReport(ProgramStateRef state,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.addTransition(state)) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Assignment of a non-Boolean value"));
+ C.EmitReport(new BugReport(*BT, BT->getDescription(), N));
+ }
+}
+
+static bool isBooleanType(QualType Ty) {
+ if (Ty->isBooleanType()) // C++ or C99
+ return true;
+
+ if (const TypedefType *TT = Ty->getAs<TypedefType>())
+ return TT->getDecl()->getName() == "BOOL" || // Objective-C
+ TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
+ TT->getDecl()->getName() == "Boolean"; // MacTypes.h
+
+ return false;
+}
+
+void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+
+ // We are only interested in stores into Booleans.
+ const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+
+ if (!TR)
+ return;
+
+ QualType valTy = TR->getValueType();
+
+ if (!isBooleanType(valTy))
+ return;
+
+ // Get the value of the right-hand side. We only care about values
+ // that are defined (UnknownVals and UndefinedVals are handled by other
+ // checkers).
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&val);
+ if (!DV)
+ return;
+
+ // Check if the assigned value meets our criteria for correctness. It must
+ // be a value that is either 0 or 1. One way to check this is to see if
+ // the value is possibly < 0 (for a negative value) or greater than 1.
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ConstraintManager &CM = C.getConstraintManager();
+
+ // First, ensure that the value is >= 0.
+ DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
+ SVal greaterThanOrEqualToZeroVal =
+ svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
+ svalBuilder.getConditionType());
+
+ DefinedSVal *greaterThanEqualToZero =
+ dyn_cast<DefinedSVal>(&greaterThanOrEqualToZeroVal);
+
+ if (!greaterThanEqualToZero) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateLT, stateGE;
+ llvm::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
+
+ // Is it possible for the value to be less than zero?
+ if (stateLT) {
+ // It is possible for the value to be less than zero. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it it possible for the value to be >= 0, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateGE)
+ emitReport(stateLT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be >= 0.
+ assert(stateGE == state);
+
+ // At this point we know that the value is >= 0.
+ // Now check to ensure that the value is <= 1.
+ DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
+ SVal lessThanEqToOneVal =
+ svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
+ svalBuilder.getConditionType());
+
+ DefinedSVal *lessThanEqToOne =
+ dyn_cast<DefinedSVal>(&lessThanEqToOneVal);
+
+ if (!lessThanEqToOne) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateGT, stateLE;
+ llvm::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
+
+ // Is it possible for the value to be greater than one?
+ if (stateGT) {
+ // It is possible for the value to be greater than one. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it is possible for the value to be <= 1, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateLE)
+ emitReport(stateGT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be <= 1.
+ assert(stateLE == state);
+}
+
+void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BoolAssignmentChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index a57d031..509bc79 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -30,12 +30,10 @@ public:
}
bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
- CheckerContext &C) const{
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
- const FunctionDecl *FD = L.getAsFunctionDecl();
-
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const LocationContext *LCtx = C.getLocationContext();
if (!FD)
return false;
@@ -48,8 +46,8 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
case Builtin::BI__builtin_expect: {
// For __builtin_expect, just return the value of the subexpression.
assert (CE->arg_begin() != CE->arg_end());
- SVal X = state->getSVal(*(CE->arg_begin()));
- C.generateNode(state->BindExpr(CE, X));
+ SVal X = state->getSVal(*(CE->arg_begin()), LCtx);
+ C.addTransition(state->BindExpr(CE, LCtx, X));
return true;
}
@@ -57,14 +55,13 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = C.getStoreManager().getRegionManager();
const AllocaRegion* R =
- RM.getAllocaRegion(CE, C.getCurrentBlockCount(),
- C.getPredecessor()->getLocationContext());
+ RM.getAllocaRegion(CE, C.getCurrentBlockCount(), C.getLocationContext());
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
DefinedOrUnknownSVal Size =
- cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin())));
+ cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin()), LCtx));
SValBuilder& svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
@@ -72,7 +69,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
svalBuilder.evalEQ(state, Extent, Size);
state = state->assume(extentMatchesSizeArg, true);
- C.generateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
+ C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
return true;
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 1625219..9eb7edf 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -13,11 +13,14 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -30,25 +33,40 @@ class CStringChecker : public Checker< eval::Call,
check::DeadSymbols,
check::RegionChanges
> {
- mutable llvm::OwningPtr<BugType> BT_Null, BT_Bounds,
- BT_Overlap, BT_NotCString,
- BT_AdditionOverflow;
+ mutable OwningPtr<BugType> BT_Null,
+ BT_Bounds,
+ BT_Overlap,
+ BT_NotCString,
+ BT_AdditionOverflow;
+
mutable const char *CurrentFunctionDescription;
public:
+ /// The filter is used to filter out the diagnostics which are not enabled by
+ /// the user.
+ struct CStringChecksFilter {
+ DefaultBool CheckCStringNullArg;
+ DefaultBool CheckCStringOutOfBounds;
+ DefaultBool CheckCStringBufferOverlap;
+ DefaultBool CheckCStringNotNullTerm;
+ };
+
+ CStringChecksFilter Filter;
+
static void *getTag() { static int tag; return &tag; }
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
- void checkLiveSymbols(const ProgramState *state, SymbolReaper &SR) const;
+ void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
- bool wantsRegionChangeUpdate(const ProgramState *state) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const;
- const ProgramState *
- checkRegionChanges(const ProgramState *state,
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) const;
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
typedef void (CStringChecker::*FnCheck)(CheckerContext &,
const CallExpr *) const;
@@ -58,7 +76,7 @@ public:
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
- const ProgramState *state,
+ ProgramStateRef state,
const Expr *Size,
const Expr *Source,
const Expr *Dest,
@@ -95,48 +113,48 @@ public:
bool ignoreCase = false) const;
// Utility methods
- std::pair<const ProgramState*, const ProgramState*>
+ std::pair<ProgramStateRef , ProgramStateRef >
static assumeZero(CheckerContext &C,
- const ProgramState *state, SVal V, QualType Ty);
+ ProgramStateRef state, SVal V, QualType Ty);
- static const ProgramState *setCStringLength(const ProgramState *state,
+ static ProgramStateRef setCStringLength(ProgramStateRef state,
const MemRegion *MR,
SVal strLength);
static SVal getCStringLengthForRegion(CheckerContext &C,
- const ProgramState *&state,
+ ProgramStateRef &state,
const Expr *Ex,
const MemRegion *MR,
bool hypothetical);
SVal getCStringLength(CheckerContext &C,
- const ProgramState *&state,
+ ProgramStateRef &state,
const Expr *Ex,
SVal Buf,
bool hypothetical = false) const;
const StringLiteral *getCStringLiteral(CheckerContext &C,
- const ProgramState *&state,
+ ProgramStateRef &state,
const Expr *expr,
SVal val) const;
- static const ProgramState *InvalidateBuffer(CheckerContext &C,
- const ProgramState *state,
+ static ProgramStateRef InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Ex, SVal V);
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
// Re-usable checks
- const ProgramState *checkNonNull(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
const Expr *S,
SVal l) const;
- const ProgramState *CheckLocation(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
const Expr *S,
SVal l,
const char *message = NULL) const;
- const ProgramState *CheckBufferAccess(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Size,
const Expr *FirstBuf,
const Expr *SecondBuf,
@@ -144,8 +162,8 @@ public:
const char *secondMessage = NULL,
bool WarnAboutSize = false) const;
- const ProgramState *CheckBufferAccess(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Size,
const Expr *Buf,
const char *message = NULL,
@@ -154,18 +172,18 @@ public:
return CheckBufferAccess(C, state, Size, Buf, NULL, message, NULL,
WarnAboutSize);
}
- const ProgramState *CheckOverlap(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Size,
const Expr *First,
const Expr *Second) const;
void emitOverlapBug(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef state,
const Stmt *First,
const Stmt *Second) const;
- const ProgramState *checkAdditionOverflow(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
NonLoc left,
NonLoc right) const;
};
@@ -190,38 +208,41 @@ namespace ento {
// Individual checks and utility methods.
//===----------------------------------------------------------------------===//
-std::pair<const ProgramState*, const ProgramState*>
-CStringChecker::assumeZero(CheckerContext &C, const ProgramState *state, SVal V,
+std::pair<ProgramStateRef , ProgramStateRef >
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
QualType Ty) {
DefinedSVal *val = dyn_cast<DefinedSVal>(&V);
if (!val)
- return std::pair<const ProgramState*, const ProgramState *>(state, state);
+ return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
return state->assume(svalBuilder.evalEQ(state, *val, zero));
}
-const ProgramState *CStringChecker::checkNonNull(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
const Expr *S, SVal l) const {
// If a previous check has failed, propagate the failure.
if (!state)
return NULL;
- const ProgramState *stateNull, *stateNonNull;
+ ProgramStateRef stateNull, stateNonNull;
llvm::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
if (stateNull && !stateNonNull) {
+ if (!Filter.CheckCStringNullArg)
+ return NULL;
+
ExplodedNode *N = C.generateSink(stateNull);
if (!N)
return NULL;
if (!BT_Null)
- BT_Null.reset(new BuiltinBug("API",
+ BT_Null.reset(new BuiltinBug("Unix API",
"Null pointer argument in call to byte string function"));
- llvm::SmallString<80> buf;
+ SmallString<80> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
os << "Null pointer argument in call to " << CurrentFunctionDescription;
@@ -231,7 +252,8 @@ const ProgramState *CStringChecker::checkNonNull(CheckerContext &C,
BugReport *report = new BugReport(*BT, os.str(), N);
report->addRange(S->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, S));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, S,
+ report));
C.EmitReport(report);
return NULL;
}
@@ -242,8 +264,8 @@ const ProgramState *CStringChecker::checkNonNull(CheckerContext &C,
}
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
-const ProgramState *CStringChecker::CheckLocation(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
const Expr *S, SVal l,
const char *warningMsg) const {
// If a previous check has failed, propagate the failure.
@@ -272,8 +294,8 @@ const ProgramState *CStringChecker::CheckLocation(CheckerContext &C,
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
- const ProgramState *StInBound = state->assumeInBound(Idx, Size, true);
- const ProgramState *StOutBound = state->assumeInBound(Idx, Size, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
@@ -293,7 +315,7 @@ const ProgramState *CStringChecker::CheckLocation(CheckerContext &C,
assert(CurrentFunctionDescription);
assert(CurrentFunctionDescription[0] != '\0');
- llvm::SmallString<80> buf;
+ SmallString<80> buf;
llvm::raw_svector_ostream os(buf);
os << (char)toupper(CurrentFunctionDescription[0])
<< &CurrentFunctionDescription[1]
@@ -315,8 +337,8 @@ const ProgramState *CStringChecker::CheckLocation(CheckerContext &C,
return StInBound;
}
-const ProgramState *CStringChecker::CheckBufferAccess(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Size,
const Expr *FirstBuf,
const Expr *SecondBuf,
@@ -329,20 +351,25 @@ const ProgramState *CStringChecker::CheckBufferAccess(CheckerContext &C,
SValBuilder &svalBuilder = C.getSValBuilder();
ASTContext &Ctx = svalBuilder.getContext();
+ const LocationContext *LCtx = C.getLocationContext();
QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
- SVal BufVal = state->getSVal(FirstBuf);
+ SVal BufVal = state->getSVal(FirstBuf, LCtx);
state = checkNonNull(C, state, FirstBuf, BufVal);
if (!state)
return NULL;
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
- SVal LengthVal = state->getSVal(Size);
+ SVal LengthVal = state->getSVal(Size, LCtx);
NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
if (!Length)
return state;
@@ -368,7 +395,7 @@ const ProgramState *CStringChecker::CheckBufferAccess(CheckerContext &C,
// If there's a second buffer, check it as well.
if (SecondBuf) {
- BufVal = state->getSVal(SecondBuf);
+ BufVal = state->getSVal(SecondBuf, LCtx);
state = checkNonNull(C, state, SecondBuf, BufVal);
if (!state)
return NULL;
@@ -387,11 +414,14 @@ const ProgramState *CStringChecker::CheckBufferAccess(CheckerContext &C,
return state;
}
-const ProgramState *CStringChecker::CheckOverlap(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
const Expr *Size,
const Expr *First,
const Expr *Second) const {
+ if (!Filter.CheckCStringBufferOverlap)
+ return state;
+
// Do a simple check for overlap: if the two arguments are from the same
// buffer, see if the end of the first is greater than the start of the second
// or vice versa.
@@ -400,11 +430,12 @@ const ProgramState *CStringChecker::CheckOverlap(CheckerContext &C,
if (!state)
return NULL;
- const ProgramState *stateTrue, *stateFalse;
+ ProgramStateRef stateTrue, stateFalse;
// Get the buffer values and make sure they're known locations.
- SVal firstVal = state->getSVal(First);
- SVal secondVal = state->getSVal(Second);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal firstVal = state->getSVal(First, LCtx);
+ SVal secondVal = state->getSVal(Second, LCtx);
Loc *firstLoc = dyn_cast<Loc>(&firstVal);
if (!firstLoc)
@@ -456,7 +487,7 @@ const ProgramState *CStringChecker::CheckOverlap(CheckerContext &C,
}
// Get the length, and make sure it too is known.
- SVal LengthVal = state->getSVal(Size);
+ SVal LengthVal = state->getSVal(Size, LCtx);
NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
if (!Length)
return state;
@@ -498,7 +529,7 @@ const ProgramState *CStringChecker::CheckOverlap(CheckerContext &C,
return stateFalse;
}
-void CStringChecker::emitOverlapBug(CheckerContext &C, const ProgramState *state,
+void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
const Stmt *First, const Stmt *Second) const {
ExplodedNode *N = C.generateSink(state);
if (!N)
@@ -517,10 +548,14 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, const ProgramState *state
C.EmitReport(report);
}
-const ProgramState *CStringChecker::checkAdditionOverflow(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
NonLoc left,
NonLoc right) const {
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
// If a previous check has failed, propagate the failure.
if (!state)
return NULL;
@@ -532,10 +567,11 @@ const ProgramState *CStringChecker::checkAdditionOverflow(CheckerContext &C,
const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
- SVal maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
- sizeTy);
-
- if (maxMinusRight.isUnknownOrUndef()) {
+ SVal maxMinusRight;
+ if (isa<nonloc::ConcreteInt>(right)) {
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
+ sizeTy);
+ } else {
// Try switching the operands. (The order of these two assignments is
// important!)
maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
@@ -549,7 +585,7 @@ const ProgramState *CStringChecker::checkAdditionOverflow(CheckerContext &C,
SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
*maxMinusRightNL, cmpTy);
- const ProgramState *stateOverflow, *stateOkay;
+ ProgramStateRef stateOverflow, stateOkay;
llvm::tie(stateOverflow, stateOkay) =
state->assume(cast<DefinedOrUnknownSVal>(willOverflow));
@@ -585,7 +621,7 @@ const ProgramState *CStringChecker::checkAdditionOverflow(CheckerContext &C,
return state;
}
-const ProgramState *CStringChecker::setCStringLength(const ProgramState *state,
+ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
const MemRegion *MR,
SVal strLength) {
assert(!strLength.isUndef() && "Attempt to set an undefined string length");
@@ -626,7 +662,7 @@ const ProgramState *CStringChecker::setCStringLength(const ProgramState *state,
}
SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
- const ProgramState *&state,
+ ProgramStateRef &state,
const Expr *Ex,
const MemRegion *MR,
bool hypothetical) {
@@ -650,7 +686,7 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
return strLength;
}
-SVal CStringChecker::getCStringLength(CheckerContext &C, const ProgramState *&state,
+SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
const Expr *Ex, SVal Buf,
bool hypothetical) const {
const MemRegion *MR = Buf.getAsRegion();
@@ -659,12 +695,15 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const ProgramState *&st
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&Buf)) {
- if (ExplodedNode *N = C.generateNode(state)) {
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_NotCString)
- BT_NotCString.reset(new BuiltinBug("API",
+ BT_NotCString.reset(new BuiltinBug("Unix API",
"Argument is not a null-terminated string."));
- llvm::SmallString<120> buf;
+ SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
os << "Argument to " << CurrentFunctionDescription
@@ -678,8 +717,8 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const ProgramState *&st
report->addRange(Ex->getSourceRange());
C.EmitReport(report);
}
-
return UndefinedVal();
+
}
// If it's not a region and not a label, give up.
@@ -716,12 +755,15 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const ProgramState *&st
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
// The caller should always be prepared to handle this case.
- if (ExplodedNode *N = C.generateNode(state)) {
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_NotCString)
- BT_NotCString.reset(new BuiltinBug("API",
+ BT_NotCString.reset(new BuiltinBug("Unix API",
"Argument is not a null-terminated string."));
- llvm::SmallString<120> buf;
+ SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
@@ -745,7 +787,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const ProgramState *&st
}
const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
- const ProgramState *&state, const Expr *expr, SVal val) const {
+ ProgramStateRef &state, const Expr *expr, SVal val) const {
// Get the memory region pointed to by the val.
const MemRegion *bufRegion = val.getAsRegion();
@@ -764,8 +806,8 @@ const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
return strRegion->getStringLiteral();
}
-const ProgramState *CStringChecker::InvalidateBuffer(CheckerContext &C,
- const ProgramState *state,
+ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
const Expr *E, SVal V) {
Loc *L = dyn_cast<Loc>(&V);
if (!L)
@@ -786,7 +828,8 @@ const ProgramState *CStringChecker::InvalidateBuffer(CheckerContext &C,
// Invalidate this region.
unsigned Count = C.getCurrentBlockCount();
- return state->invalidateRegions(R, E, Count);
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ return state->invalidateRegions(R, E, Count, LCtx);
}
// If we have a non-region value by chance, just remove the binding.
@@ -838,27 +881,28 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
void CStringChecker::evalCopyCommon(CheckerContext &C,
const CallExpr *CE,
- const ProgramState *state,
+ ProgramStateRef state,
const Expr *Size, const Expr *Dest,
const Expr *Source, bool Restricted,
bool IsMempcpy) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
- SVal sizeVal = state->getSVal(Size);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
QualType sizeTy = Size->getType();
- const ProgramState *stateZeroSize, *stateNonZeroSize;
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
llvm::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, sizeVal, sizeTy);
// Get the value of the Dest.
- SVal destVal = state->getSVal(Dest);
+ SVal destVal = state->getSVal(Dest, LCtx);
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
if (stateZeroSize) {
- stateZeroSize = stateZeroSize->BindExpr(CE, destVal);
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
C.addTransition(stateZeroSize);
}
@@ -873,7 +917,7 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
return;
// Get the value of the Src.
- SVal srcVal = state->getSVal(Source);
+ SVal srcVal = state->getSVal(Source, LCtx);
// Ensure the source is not null. If it is NULL there will be a
// NULL pointer dereference.
@@ -909,20 +953,20 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
Dest->getType());
// The byte after the last byte copied is the return value.
- state = state->BindExpr(CE, lastElement);
+ state = state->BindExpr(CE, LCtx, lastElement);
} else {
// If we don't know how much we copied, we can at least
// conjure a return value for later.
unsigned Count = C.getCurrentBlockCount();
SVal result =
- C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
- state = state->BindExpr(CE, result);
+ C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, result);
}
} else {
// All other copies return the destination buffer.
// (Well, bcopy() has a void return type, but this won't hurt.)
- state = state->BindExpr(CE, destVal);
+ state = state->BindExpr(CE, LCtx, destVal);
}
// Invalidate the destination.
@@ -930,46 +974,62 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dest, state->getSVal(Dest));
+ state = InvalidateBuffer(C, state, Dest,
+ state->getSVal(Dest, C.getLocationContext()));
C.addTransition(state);
}
}
void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
const Expr *Dest = CE->getArg(0);
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// void bcopy(const void *src, void *dst, size_t n);
evalCopyCommon(C, CE, C.getState(),
CE->getArg(2), CE->getArg(1), CE->getArg(0));
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
@@ -977,14 +1037,15 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
const Expr *Right = CE->getArg(1);
const Expr *Size = CE->getArg(2);
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
// See if the size argument is zero.
- SVal sizeVal = state->getSVal(Size);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
QualType sizeTy = Size->getType();
- const ProgramState *stateZeroSize, *stateNonZeroSize;
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
llvm::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, sizeVal, sizeTy);
@@ -992,7 +1053,8 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// have to check either of the buffers.
if (stateZeroSize) {
state = stateZeroSize;
- state = state->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
+ state = state->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(state);
}
@@ -1002,12 +1064,14 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
- DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(state->getSVal(Left));
- DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(state->getSVal(Right));
+ DefinedOrUnknownSVal LV =
+ cast<DefinedOrUnknownSVal>(state->getSVal(Left, LCtx));
+ DefinedOrUnknownSVal RV =
+ cast<DefinedOrUnknownSVal>(state->getSVal(Right, LCtx));
// See if they are the same.
DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
- const ProgramState *StSameBuf, *StNotSameBuf;
+ ProgramStateRef StSameBuf, StNotSameBuf;
llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
// If the two arguments might be the same buffer, we know the result is 0,
@@ -1016,8 +1080,9 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
state = StSameBuf;
state = CheckBufferAccess(C, state, Size, Left);
if (state) {
- state = StSameBuf->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
- C.addTransition(state);
+ state = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(state);
}
}
@@ -1029,8 +1094,8 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
if (state) {
// The return value is the comparison result, which we don't know.
unsigned Count = C.getCurrentBlockCount();
- SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
- state = state->BindExpr(CE, CmpV);
+ SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, CmpV);
C.addTransition(state);
}
}
@@ -1039,12 +1104,18 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
void CStringChecker::evalstrLength(CheckerContext &C,
const CallExpr *CE) const {
+ if (CE->getNumArgs() < 1)
+ return;
+
// size_t strlen(const char *s);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
}
void CStringChecker::evalstrnLength(CheckerContext &C,
const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
// size_t strnlen(const char *s, size_t maxlen);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
}
@@ -1052,13 +1123,14 @@ void CStringChecker::evalstrnLength(CheckerContext &C,
void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
bool IsStrnlen) const {
CurrentFunctionDescription = "string length function";
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
if (IsStrnlen) {
const Expr *maxlenExpr = CE->getArg(1);
- SVal maxlenVal = state->getSVal(maxlenExpr);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
- const ProgramState *stateZeroSize, *stateNonZeroSize;
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
llvm::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, maxlenVal, maxlenExpr->getType());
@@ -1066,7 +1138,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// have to check the string itself.
if (stateZeroSize) {
SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
- stateZeroSize = stateZeroSize->BindExpr(CE, zero);
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
C.addTransition(stateZeroSize);
}
@@ -1080,7 +1152,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// Check that the string argument is non-null.
const Expr *Arg = CE->getArg(0);
- SVal ArgVal = state->getSVal(Arg);
+ SVal ArgVal = state->getSVal(Arg, LCtx);
state = checkNonNull(C, state, Arg, ArgVal);
@@ -1104,13 +1176,13 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// It's a little unfortunate to be getting this again,
// but it's not that expensive...
const Expr *maxlenExpr = CE->getArg(1);
- SVal maxlenVal = state->getSVal(maxlenExpr);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
NonLoc *maxlenValNL = dyn_cast<NonLoc>(&maxlenVal);
if (strLengthNL && maxlenValNL) {
- const ProgramState *stateStringTooLong, *stateStringNotTooLong;
+ ProgramStateRef stateStringTooLong, stateStringNotTooLong;
// Check if the strLength is greater than the maxlen.
llvm::tie(stateStringTooLong, stateStringNotTooLong) =
@@ -1135,7 +1207,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// All we know is the return value is the min of the string length
// and the limit. This is better than nothing.
unsigned Count = C.getCurrentBlockCount();
- result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
NonLoc *resultNL = cast<NonLoc>(&result);
if (strLengthNL) {
@@ -1163,17 +1235,20 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// value, so it can be used in constraints, at least.
if (result.isUnknown()) {
unsigned Count = C.getCurrentBlockCount();
- result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
}
}
// Bind the return value.
assert(!result.isUnknown() && "Should have conjured a value by now");
- state = state->BindExpr(CE, result);
+ state = state->BindExpr(CE, LCtx, result);
C.addTransition(state);
}
void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
// char *strcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1182,6 +1257,9 @@ void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
// char *strncpy(char *restrict dst, const char *restrict src, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1190,6 +1268,9 @@ void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
// char *stpcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ true,
@@ -1198,6 +1279,9 @@ void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
//char *strcat(char *restrict s1, const char *restrict s2);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1206,6 +1290,9 @@ void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
}
void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
//char *strncat(char *restrict s1, const char *restrict s2, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
@@ -1217,11 +1304,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
bool returnEnd, bool isBounded,
bool isAppending) const {
CurrentFunctionDescription = "string copy function";
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
const Expr *Dst = CE->getArg(0);
- SVal DstVal = state->getSVal(Dst);
+ SVal DstVal = state->getSVal(Dst, LCtx);
state = checkNonNull(C, state, Dst, DstVal);
if (!state)
@@ -1229,7 +1317,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// Check that the source is non-null.
const Expr *srcExpr = CE->getArg(1);
- SVal srcVal = state->getSVal(srcExpr);
+ SVal srcVal = state->getSVal(srcExpr, LCtx);
state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
return;
@@ -1256,7 +1344,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (isBounded) {
// Get the max number of characters to copy.
const Expr *lenExpr = CE->getArg(2);
- SVal lenVal = state->getSVal(lenExpr);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
// Protect against misdeclared strncpy().
lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
@@ -1267,7 +1355,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If we know both values, we might be able to figure out how much
// we're copying.
if (strLengthNL && lenValNL) {
- const ProgramState *stateSourceTooLong, *stateSourceNotTooLong;
+ ProgramStateRef stateSourceTooLong, stateSourceNotTooLong;
// Check if the max number to copy is less than the length of the src.
// If the bound is equal to the source length, strncpy won't null-
@@ -1507,32 +1595,44 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// overflow, we still need a result. Conjure a return value.
if (returnEnd && Result.isUnknown()) {
unsigned Count = C.getCurrentBlockCount();
- Result = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
+ Result = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
}
// Set the return value.
- state = state->BindExpr(CE, Result);
+ state = state->BindExpr(CE, LCtx, Result);
C.addTransition(state);
}
void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
//int strcmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
}
void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
//int strncmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
//int strcasecmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
//int strncasecmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
}
@@ -1540,18 +1640,19 @@ void CStringChecker::evalStrncasecmp(CheckerContext &C,
void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
bool isBounded, bool ignoreCase) const {
CurrentFunctionDescription = "string comparison function";
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
// Check that the first string is non-null
const Expr *s1 = CE->getArg(0);
- SVal s1Val = state->getSVal(s1);
+ SVal s1Val = state->getSVal(s1, LCtx);
state = checkNonNull(C, state, s1, s1Val);
if (!state)
return;
// Check that the second string is non-null.
const Expr *s2 = CE->getArg(1);
- SVal s2Val = state->getSVal(s2);
+ SVal s2Val = state->getSVal(s2, LCtx);
state = checkNonNull(C, state, s2, s2Val);
if (!state)
return;
@@ -1575,13 +1676,14 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// See if they are the same.
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
- const ProgramState *StSameBuf, *StNotSameBuf;
+ ProgramStateRef StSameBuf, StNotSameBuf;
llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
// If the two arguments might be the same buffer, we know the result is 0,
// and we only need to check one size.
if (StSameBuf) {
- StSameBuf = StSameBuf->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
+ StSameBuf = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(StSameBuf);
// If the two arguments are GUARANTEED to be the same, we're done!
@@ -1607,7 +1709,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (isBounded) {
// Get the max number of characters to compare.
const Expr *lenExpr = CE->getArg(2);
- SVal lenVal = state->getSVal(lenExpr);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
// If the length is known, we can get the right substrings.
if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
@@ -1644,15 +1746,15 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// Build the SVal of the comparison and bind the return value.
SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
- state = state->BindExpr(CE, resultVal);
+ state = state->BindExpr(CE, LCtx, resultVal);
}
}
if (!canComputeResult) {
// Conjure a symbolic value. It's the best we can do.
unsigned Count = C.getCurrentBlockCount();
- SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
- state = state->BindExpr(CE, resultVal);
+ SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, resultVal);
}
// Record this as a possible path.
@@ -1664,42 +1766,47 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
//===----------------------------------------------------------------------===//
bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- // Get the callee. All the functions we care about are C functions
- // with simple identifiers.
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl();
-
- if (!FD)
- return false;
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
- // Get the name of the callee. If it's a builtin, strip off the prefix.
- IdentifierInfo *II = FD->getIdentifier();
- if (!II) // if no identifier, not a simple C function
+ if (!FDecl)
return false;
- StringRef Name = II->getName();
- if (Name.startswith("__builtin_"))
- Name = Name.substr(10);
-
- FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Cases("memcpy", "__memcpy_chk", &CStringChecker::evalMemcpy)
- .Cases("mempcpy", "__mempcpy_chk", &CStringChecker::evalMempcpy)
- .Cases("memcmp", "bcmp", &CStringChecker::evalMemcmp)
- .Cases("memmove", "__memmove_chk", &CStringChecker::evalMemmove)
- .Cases("strcpy", "__strcpy_chk", &CStringChecker::evalStrcpy)
- .Cases("strncpy", "__strncpy_chk", &CStringChecker::evalStrncpy)
- .Cases("stpcpy", "__stpcpy_chk", &CStringChecker::evalStpcpy)
- .Cases("strcat", "__strcat_chk", &CStringChecker::evalStrcat)
- .Cases("strncat", "__strncat_chk", &CStringChecker::evalStrncat)
- .Case("strlen", &CStringChecker::evalstrLength)
- .Case("strnlen", &CStringChecker::evalstrnLength)
- .Case("strcmp", &CStringChecker::evalStrcmp)
- .Case("strncmp", &CStringChecker::evalStrncmp)
- .Case("strcasecmp", &CStringChecker::evalStrcasecmp)
- .Case("strncasecmp", &CStringChecker::evalStrncasecmp)
- .Case("bcopy", &CStringChecker::evalBcopy)
- .Default(NULL);
+ FnCheck evalFunction = 0;
+ if (C.isCLibraryFunction(FDecl, "memcpy"))
+ evalFunction = &CStringChecker::evalMemcpy;
+ else if (C.isCLibraryFunction(FDecl, "mempcpy"))
+ evalFunction = &CStringChecker::evalMempcpy;
+ else if (C.isCLibraryFunction(FDecl, "memcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+ else if (C.isCLibraryFunction(FDecl, "memmove"))
+ evalFunction = &CStringChecker::evalMemmove;
+ else if (C.isCLibraryFunction(FDecl, "strcpy"))
+ evalFunction = &CStringChecker::evalStrcpy;
+ else if (C.isCLibraryFunction(FDecl, "strncpy"))
+ evalFunction = &CStringChecker::evalStrncpy;
+ else if (C.isCLibraryFunction(FDecl, "stpcpy"))
+ evalFunction = &CStringChecker::evalStpcpy;
+ else if (C.isCLibraryFunction(FDecl, "strcat"))
+ evalFunction = &CStringChecker::evalStrcat;
+ else if (C.isCLibraryFunction(FDecl, "strncat"))
+ evalFunction = &CStringChecker::evalStrncat;
+ else if (C.isCLibraryFunction(FDecl, "strlen"))
+ evalFunction = &CStringChecker::evalstrLength;
+ else if (C.isCLibraryFunction(FDecl, "strnlen"))
+ evalFunction = &CStringChecker::evalstrnLength;
+ else if (C.isCLibraryFunction(FDecl, "strcmp"))
+ evalFunction = &CStringChecker::evalStrcmp;
+ else if (C.isCLibraryFunction(FDecl, "strncmp"))
+ evalFunction = &CStringChecker::evalStrncmp;
+ else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
+ evalFunction = &CStringChecker::evalStrcasecmp;
+ else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
+ evalFunction = &CStringChecker::evalStrncasecmp;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ evalFunction = &CStringChecker::evalBcopy;
+ else if (C.isCLibraryFunction(FDecl, "bcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+
// If the callee isn't a string function, let another checker handle it.
if (!evalFunction)
return false;
@@ -1710,12 +1817,22 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Check and evaluate the call.
(this->*evalFunction)(C, CE);
+
+ // If the evaluate call resulted in no change, chain to the next eval call
+ // handler.
+ // Note, the custom CString evaluation calls assume that basic safety
+ // properties are held. However, if the user chooses to turn off some of these
+ // checks, we ignore the issues and leave the call evaluation to a generic
+ // handler.
+ if (!C.isDifferent())
+ return false;
+
return true;
}
void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// Record string length for char a[] = "abc";
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
for (DeclStmt::const_decl_iterator I = DS->decl_begin(), E = DS->decl_end();
I != E; ++I) {
@@ -1733,12 +1850,12 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!isa<StringLiteral>(Init))
continue;
- Loc VarLoc = state->getLValue(D, C.getPredecessor()->getLocationContext());
+ Loc VarLoc = state->getLValue(D, C.getLocationContext());
const MemRegion *MR = VarLoc.getAsRegion();
if (!MR)
continue;
- SVal StrVal = state->getSVal(Init);
+ SVal StrVal = state->getSVal(Init, C.getLocationContext());
assert(StrVal.isValid() && "Initializer string is unknown or undefined");
DefinedOrUnknownSVal strLength
= cast<DefinedOrUnknownSVal>(getCStringLength(C, state, Init, StrVal));
@@ -1749,16 +1866,17 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
C.addTransition(state);
}
-bool CStringChecker::wantsRegionChangeUpdate(const ProgramState *state) const {
+bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
CStringLength::EntryMap Entries = state->get<CStringLength>();
return !Entries.isEmpty();
}
-const ProgramState *
-CStringChecker::checkRegionChanges(const ProgramState *state,
+ProgramStateRef
+CStringChecker::checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) const {
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
CStringLength::EntryMap Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return state;
@@ -1806,7 +1924,7 @@ CStringChecker::checkRegionChanges(const ProgramState *state,
return state->set<CStringLength>(Entries);
}
-void CStringChecker::checkLiveSymbols(const ProgramState *state,
+void CStringChecker::checkLiveSymbols(ProgramStateRef state,
SymbolReaper &SR) const {
// Mark all symbols in our string length map as valid.
CStringLength::EntryMap Entries = state->get<CStringLength>();
@@ -1815,8 +1933,8 @@ void CStringChecker::checkLiveSymbols(const ProgramState *state,
I != E; ++I) {
SVal Len = I.getData();
- for (SVal::symbol_iterator si = Len.symbol_begin(), se = Len.symbol_end();
- si != se; ++si)
+ for (SymExpr::symbol_iterator si = Len.symbol_begin(),
+ se = Len.symbol_end(); si != se; ++si)
SR.markInUse(*si);
}
}
@@ -1826,7 +1944,7 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
if (!SR.hasDeadSymbols())
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
CStringLength::EntryMap Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return;
@@ -1842,9 +1960,22 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
}
state = state->set<CStringLength>(Entries);
- C.generateNode(state);
+ C.addTransition(state);
}
-void ento::registerCStringChecker(CheckerManager &mgr) {
- mgr.registerChecker<CStringChecker>();
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ static CStringChecker *TheChecker = 0; \
+ if (TheChecker == 0) \
+ TheChecker = mgr.registerChecker<CStringChecker>(); \
+ TheChecker->Filter.Check##name = true; \
+}
+
+REGISTER_CHECKER(CStringNullArg)
+REGISTER_CHECKER(CStringOutOfBounds)
+REGISTER_CHECKER(CStringBufferOverlap)
+REGISTER_CHECKER(CStringNotNullTerm)
+
+void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
+ registerCStringNullArg(Mgr);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
new file mode 100644
index 0000000..befc935
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -0,0 +1,191 @@
+//== CStringSyntaxChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using C string APIs.
+// - Identifies erroneous patterns in the last argument to strncat - the number
+// of bytes to copy.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST: public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ ASTContext &ASTC;
+
+ /// Check if two expressions refer to the same declaration.
+ inline bool sameDecl(const Expr *A1, const Expr *A2) {
+ if (const DeclRefExpr *D1 = dyn_cast<DeclRefExpr>(A1->IgnoreParenCasts()))
+ if (const DeclRefExpr *D2 = dyn_cast<DeclRefExpr>(A2->IgnoreParenCasts()))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+ }
+
+ /// Check if the expression E is a sizeof(WithArg).
+ inline bool isSizeof(const Expr *E, const Expr *WithArg) {
+ if (const UnaryExprOrTypeTraitExpr *UE =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (UE->getKind() == UETT_SizeOf)
+ return sameDecl(UE->getArgumentExpr(), WithArg);
+ return false;
+ }
+
+ /// Check if the expression E is a strlen(WithArg).
+ inline bool isStrlen(const Expr *E, const Expr *WithArg) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return false;
+ return (CheckerContext::isCLibraryFunction(FD, "strlen", ASTC)
+ && sameDecl(CE->getArg(0), WithArg));
+ }
+ return false;
+ }
+
+ /// Check if the expression is an integer literal with value 1.
+ inline bool isOne(const Expr *E) {
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return (IL->getValue().isIntN(1));
+ return false;
+ }
+
+ inline StringRef getPrintableName(const Expr *E) {
+ if (const DeclRefExpr *D = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ return D->getDecl()->getName();
+ return StringRef();
+ }
+
+ /// Identify erroneous patterns in the last argument to strncat - the number
+ /// of bytes to copy.
+ bool containsBadStrncatPattern(const CallExpr *CE);
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac) :
+ BR(br), AC(ac), ASTC(AC->getASTContext()) {
+ }
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) {
+ VisitChildren(S);
+ }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+// We look for the following anti-patterns:
+// - strncat(dst, src, sizeof(dst) - strlen(dst));
+// - strncat(dst, src, sizeof(dst) - 1);
+// - strncat(dst, src, sizeof(dst));
+bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *SrcArg = CE->getArg(1);
+ const Expr *LenArg = CE->getArg(2);
+
+ // Identify wrong size expressions, which are commonly used instead.
+ if (const BinaryOperator *BE =
+ dyn_cast<BinaryOperator>(LenArg->IgnoreParenCasts())) {
+ // - sizeof(dst) - strlen(dst)
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS();
+ const Expr *R = BE->getRHS();
+ if (isSizeof(L, DstArg) && isStrlen(R, DstArg))
+ return true;
+
+ // - sizeof(dst) - 1
+ if (isSizeof(L, DstArg) && isOne(R->IgnoreParenCasts()))
+ return true;
+ }
+ }
+ // - sizeof(dst)
+ if (isSizeof(LenArg, DstArg))
+ return true;
+
+ // - sizeof(src)
+ if (isSizeof(LenArg, SrcArg))
+ return true;
+ return false;
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (CheckerContext::isCLibraryFunction(FD, "strncat", ASTC)) {
+ if (containsBadStrncatPattern(CE)) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *LenArg = CE->getArg(2);
+ SourceRange R = LenArg->getSourceRange();
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::createBegin(LenArg, BR.getSourceManager(), AC);
+
+ StringRef DstName = getPrintableName(DstArg);
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Potential buffer overflow. ";
+ if (!DstName.empty()) {
+ os << "Replace with 'sizeof(" << DstName << ") "
+ "- strlen(" << DstName <<") - 1'";
+ os << " or u";
+ } else
+ os << "U";
+ os << "se a safer 'strlcat' API";
+
+ BR.EmitBasicReport(FD, "Anti-pattern in the argument", "C String API",
+ os.str(), Loc, &R, 1);
+ }
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I != E;
+ ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+namespace {
+class CStringSyntaxChecker: public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CStringSyntaxChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 4db6ac0..f601431 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -20,6 +20,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -27,12 +28,13 @@ using namespace ento;
namespace {
class CallAndMessageChecker
: public Checker< check::PreStmt<CallExpr>, check::PreObjCMessage > {
- mutable llvm::OwningPtr<BugType> BT_call_null;
- mutable llvm::OwningPtr<BugType> BT_call_undef;
- mutable llvm::OwningPtr<BugType> BT_call_arg;
- mutable llvm::OwningPtr<BugType> BT_msg_undef;
- mutable llvm::OwningPtr<BugType> BT_msg_arg;
- mutable llvm::OwningPtr<BugType> BT_msg_ret;
+ mutable OwningPtr<BugType> BT_call_null;
+ mutable OwningPtr<BugType> BT_call_undef;
+ mutable OwningPtr<BugType> BT_call_arg;
+ mutable OwningPtr<BugType> BT_msg_undef;
+ mutable OwningPtr<BugType> BT_objc_prop_undef;
+ mutable OwningPtr<BugType> BT_msg_arg;
+ mutable OwningPtr<BugType> BT_msg_ret;
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -40,19 +42,22 @@ public:
private:
static void PreVisitProcessArgs(CheckerContext &C,CallOrObjCMessage callOrMsg,
- const char *BT_desc, llvm::OwningPtr<BugType> &BT);
+ const char *BT_desc, OwningPtr<BugType> &BT);
static bool PreVisitProcessArg(CheckerContext &C, SVal V,SourceRange argRange,
- const Expr *argEx, const char *BT_desc, llvm::OwningPtr<BugType> &BT);
+ const Expr *argEx,
+ const bool checkUninitFields,
+ const char *BT_desc,
+ OwningPtr<BugType> &BT);
static void EmitBadCall(BugType *BT, CheckerContext &C, const CallExpr *CE);
void emitNilReceiverBug(CheckerContext &C, const ObjCMessage &msg,
ExplodedNode *N) const;
void HandleNilReceiver(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef state,
ObjCMessage msg) const;
- static void LazyInit_BT(const char *desc, llvm::OwningPtr<BugType> &BT) {
+ static void LazyInit_BT(const char *desc, OwningPtr<BugType> &BT) {
if (!BT)
BT.reset(new BuiltinBug(desc));
}
@@ -67,17 +72,27 @@ void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
BugReport *R = new BugReport(*BT, BT->getName(), N);
R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetCalleeExpr(N)));
+ bugreporter::GetCalleeExpr(N), R));
C.EmitReport(R);
}
void CallAndMessageChecker::PreVisitProcessArgs(CheckerContext &C,
CallOrObjCMessage callOrMsg,
const char *BT_desc,
- llvm::OwningPtr<BugType> &BT) {
+ OwningPtr<BugType> &BT) {
+ // Don't check for uninitialized field values in arguments if the
+ // caller has a body that is available and we have the chance to inline it.
+ // This is a hack, but is a reasonable compromise betweens sometimes warning
+ // and sometimes not depending on if we decide to inline a function.
+ const Decl *D = callOrMsg.getDecl();
+ const bool checkUninitFields =
+ !(C.getAnalysisManager().shouldInlineCall() &&
+ (D && D->getBody()));
+
for (unsigned i = 0, e = callOrMsg.getNumArgs(); i != e; ++i)
if (PreVisitProcessArg(C, callOrMsg.getArgSVal(i),
callOrMsg.getArgSourceRange(i), callOrMsg.getArg(i),
+ checkUninitFields,
BT_desc, BT))
return;
}
@@ -85,9 +100,9 @@ void CallAndMessageChecker::PreVisitProcessArgs(CheckerContext &C,
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
SVal V, SourceRange argRange,
const Expr *argEx,
+ const bool checkUninitFields,
const char *BT_desc,
- llvm::OwningPtr<BugType> &BT) {
-
+ OwningPtr<BugType> &BT) {
if (V.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BT_desc, BT);
@@ -96,12 +111,16 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
BugReport *R = new BugReport(*BT, BT->getName(), N);
R->addRange(argRange);
if (argEx)
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, argEx));
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, argEx,
+ R));
C.EmitReport(R);
}
return true;
}
+ if (!checkUninitFields)
+ return false;
+
if (const nonloc::LazyCompoundVal *LV =
dyn_cast<nonloc::LazyCompoundVal>(&V)) {
@@ -133,7 +152,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
return true;
}
else {
- const SVal &V = StoreMgr.Retrieve(store, loc::MemRegionVal(FR));
+ const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
if (V.isUndef())
return true;
}
@@ -154,7 +173,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
if (F.Find(D->getRegion())) {
if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BT_desc, BT);
- llvm::SmallString<512> Str;
+ SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
os << "Passed-by-value struct argument contains uninitialized data";
@@ -193,7 +212,8 @@ void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const{
const Expr *Callee = CE->getCallee()->IgnoreParens();
- SVal L = C.getState()->getSVal(Callee);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal L = C.getState()->getSVal(Callee, LCtx);
if (L.isUndef()) {
if (!BT_call_undef)
@@ -210,7 +230,7 @@ void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
EmitBadCall(BT_call_null.get(), C, CE);
}
- PreVisitProcessArgs(C, CallOrObjCMessage(CE, C.getState()),
+ PreVisitProcessArgs(C, CallOrObjCMessage(CE, C.getState(), LCtx),
"Function call argument is an uninitialized value",
BT_call_arg);
}
@@ -218,21 +238,33 @@ void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
void CallAndMessageChecker::checkPreObjCMessage(ObjCMessage msg,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
// FIXME: Handle 'super'?
if (const Expr *receiver = msg.getInstanceReceiver()) {
- SVal recVal = state->getSVal(receiver);
+ SVal recVal = state->getSVal(receiver, LCtx);
if (recVal.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
- if (!BT_msg_undef)
- BT_msg_undef.reset(new BuiltinBug("Receiver in message expression is "
- "an uninitialized value"));
+ BugType *BT = 0;
+ if (msg.isPureMessageExpr()) {
+ if (!BT_msg_undef)
+ BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
+ "is an uninitialized value"));
+ BT = BT_msg_undef.get();
+ }
+ else {
+ if (!BT_objc_prop_undef)
+ BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
+ "uninitialized object pointer"));
+ BT = BT_objc_prop_undef.get();
+ }
BugReport *R =
- new BugReport(*BT_msg_undef, BT_msg_undef->getName(), N);
+ new BugReport(*BT, BT->getName(), N);
R->addRange(receiver->getSourceRange());
R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- receiver));
+ receiver,
+ R));
C.EmitReport(R);
}
return;
@@ -240,7 +272,7 @@ void CallAndMessageChecker::checkPreObjCMessage(ObjCMessage msg,
// Bifurcate the state into nil and non-nil ones.
DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
- const ProgramState *notNilState, *nilState;
+ ProgramStateRef notNilState, nilState;
llvm::tie(notNilState, nilState) = state->assume(receiverVal);
// Handle receiver must be nil.
@@ -255,7 +287,8 @@ void CallAndMessageChecker::checkPreObjCMessage(ObjCMessage msg,
"Argument for property setter is an uninitialized value"
: "Argument in message expression is an uninitialized value";
// Check for any arguments that are uninitialized/undefined.
- PreVisitProcessArgs(C, CallOrObjCMessage(msg, state), bugDesc, BT_msg_arg);
+ PreVisitProcessArgs(C, CallOrObjCMessage(msg, state, LCtx),
+ bugDesc, BT_msg_arg);
}
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
@@ -267,7 +300,7 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
new BuiltinBug("Receiver in message expression is "
"'nil' and returns a garbage value"));
- llvm::SmallString<200> buf;
+ SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "The receiver of message '" << msg.getSelector().getAsString()
<< "' is nil and returns a value of type '"
@@ -277,48 +310,39 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
if (const Expr *receiver = msg.getInstanceReceiver()) {
report->addRange(receiver->getSourceRange());
report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- receiver));
+ receiver,
+ report));
}
C.EmitReport(report);
}
static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
- return triple.getVendor() == llvm::Triple::Apple &&
- (!triple.isMacOSXVersionLT(10,5) ||
- triple.getArch() == llvm::Triple::arm ||
- triple.getArch() == llvm::Triple::thumb);
+ return (triple.getVendor() == llvm::Triple::Apple &&
+ (triple.getOS() == llvm::Triple::IOS ||
+ !triple.isMacOSXVersionLT(10,5)));
}
void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
- const ProgramState *state,
+ ProgramStateRef state,
ObjCMessage msg) const {
ASTContext &Ctx = C.getASTContext();
// Check the return type of the message expression. A message to nil will
// return different values depending on the return type and the architecture.
QualType RetTy = msg.getType(Ctx);
-
CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
+ const LocationContext *LCtx = C.getLocationContext();
if (CanRetTy->isStructureOrClassType()) {
- // FIXME: At some point we shouldn't rely on isConsumedExpr(), but instead
- // have the "use of undefined value" be smarter about where the
- // undefined value came from.
- if (C.getPredecessor()->getParentMap().isConsumedExpr(msg.getOriginExpr())){
- if (ExplodedNode *N = C.generateSink(state))
- emitNilReceiverBug(C, msg, N);
- return;
- }
-
- // The result is not consumed by a surrounding expression. Just propagate
- // the current state.
- C.addTransition(state);
+ // Structure returns are safe since the compiler zeroes them out.
+ SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
+ C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
return;
}
- // Other cases: check if the return type is smaller than void*.
- if (CanRetTy != Ctx.VoidTy &&
- C.getPredecessor()->getParentMap().isConsumedExpr(msg.getOriginExpr())) {
+ // Other cases: check if sizeof(return type) > sizeof(void*)
+ if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
+ .isConsumedExpr(msg.getMessageExpr())) {
// Compute: sizeof(void *) and sizeof(return type)
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
@@ -349,7 +373,7 @@ void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
// of this case unless we have *a lot* more knowledge.
//
SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
- C.generateNode(state->BindExpr(msg.getOriginExpr(), V));
+ C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 84a9e6b..2e184fb 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -23,7 +23,7 @@ using namespace ento;
namespace {
class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
};
@@ -44,8 +44,8 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
if (ToPointeeTy->isIncompleteType())
return;
- const ProgramState *state = C.getState();
- const MemRegion *R = state->getSVal(E).getAsRegion();
+ ProgramStateRef state = C.getState();
+ const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
if (R == 0)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index c855210..1407638 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -56,7 +56,7 @@ void CastToStructChecker::checkPreStmt(const CastExpr *CE,
// Now the cast-to-type is struct pointer, the original type is not void*.
if (!OrigPointeeTy->isRecordType()) {
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(new BuiltinBug("Cast from non-struct type to struct type",
"Casting a non-structure type to a structure type "
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index c325bb1..133204a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -177,9 +177,10 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
std::string buf;
llvm::raw_string_ostream os(buf);
- os << "Objective-C class '" << D << "' lacks a 'dealloc' instance method";
+ os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
- BR.EmitBasicReport(name, os.str(), DLoc);
+ BR.EmitBasicReport(D, name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
return;
}
@@ -192,11 +193,12 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
std::string buf;
llvm::raw_string_ostream os(buf);
- os << "The 'dealloc' instance method in Objective-C class '" << D
+ os << "The 'dealloc' instance method in Objective-C class '" << *D
<< "' does not send a 'dealloc' message to its super class"
" (missing [super dealloc])";
- BR.EmitBasicReport(name, os.str(), DLoc);
+ BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
return;
}
@@ -236,9 +238,7 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
!= requiresRelease) {
- const char *name;
- const char* category = "Memory (Core Foundation/Objective-C)";
-
+ const char *name = 0;
std::string buf;
llvm::raw_string_ostream os(buf);
@@ -263,7 +263,8 @@ static void checkObjCDealloc(const ObjCImplementationDecl *D,
PathDiagnosticLocation SDLoc =
PathDiagnosticLocation::createBegin((*I), BR.getSourceManager());
- BR.EmitBasicReport(name, category, os.str(), SDLoc);
+ BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+ os.str(), SDLoc);
}
}
}
@@ -278,9 +279,9 @@ class ObjCDeallocChecker : public Checker<
public:
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
BugReporter &BR) const {
- if (mgr.getLangOptions().getGC() == LangOptions::GCOnly)
+ if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
return;
- checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOptions(), BR);
+ checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOpts(), BR);
}
};
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index c076c1e3..6df47b1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -70,7 +70,9 @@ static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
PathDiagnosticLocation::createBegin(MethDerived,
BR.getSourceManager());
- BR.EmitBasicReport("Incompatible instance method return type",
+ BR.EmitBasicReport(MethDerived,
+ "Incompatible instance method return type",
+ categories::CoreFoundationObjectiveC,
os.str(), MethDLoc);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index bf7ba18..dde9071 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -18,6 +18,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
@@ -34,18 +35,40 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
}
namespace {
+struct DefaultBool {
+ bool val;
+ DefaultBool() : val(false) {}
+ operator bool() const { return val; }
+ DefaultBool &operator=(bool b) { val = b; return *this; }
+};
+
+struct ChecksFilter {
+ DefaultBool check_gets;
+ DefaultBool check_getpw;
+ DefaultBool check_mktemp;
+ DefaultBool check_mkstemp;
+ DefaultBool check_strcpy;
+ DefaultBool check_rand;
+ DefaultBool check_vfork;
+ DefaultBool check_FloatLoopCounter;
+ DefaultBool check_UncheckedReturn;
+};
+
class WalkAST : public StmtVisitor<WalkAST> {
BugReporter &BR;
- AnalysisContext* AC;
+ AnalysisDeclContext* AC;
enum { num_setids = 6 };
IdentifierInfo *II_setid[num_setids];
const bool CheckRand;
+ const ChecksFilter &filter;
public:
- WalkAST(BugReporter &br, AnalysisContext* ac)
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac,
+ const ChecksFilter &f)
: BR(br), AC(ac), II_setid(),
- CheckRand(isArc4RandomAvailable(BR.getContext())) {}
+ CheckRand(isArc4RandomAvailable(BR.getContext())),
+ filter(f) {}
// Statement visitor methods.
void VisitCallExpr(CallExpr *CE);
@@ -66,6 +89,7 @@ public:
void checkCall_gets(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD);
void checkCall_rand(const CallExpr *CE, const FunctionDecl *FD);
@@ -105,6 +129,9 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
.Case("gets", &WalkAST::checkCall_gets)
.Case("getpw", &WalkAST::checkCall_getpw)
.Case("mktemp", &WalkAST::checkCall_mktemp)
+ .Case("mkstemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkstemps", &WalkAST::checkCall_mkstemp)
.Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
.Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
.Case("drand48", &WalkAST::checkCall_rand)
@@ -186,6 +213,9 @@ getIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
/// CERT: FLP30-C, FLP30-CPP.
///
void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
+ if (!filter.check_FloatLoopCounter)
+ return;
+
// Does the loop have a condition?
const Expr *condition = FS->getCond();
@@ -242,7 +272,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
SmallVector<SourceRange, 2> ranges;
- llvm::SmallString<256> sbuf;
+ SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Variable '" << drCond->getDecl()->getName()
@@ -256,7 +286,8 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
PathDiagnosticLocation FSLoc =
PathDiagnosticLocation::createBegin(FS, BR.getSourceManager(), AC);
- BR.EmitBasicReport(bugType, "Security", os.str(),
+ BR.EmitBasicReport(AC->getDecl(),
+ bugType, "Security", os.str(),
FSLoc, ranges.data(), ranges.size());
}
@@ -268,6 +299,9 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_gets)
+ return;
+
const FunctionProtoType *FPT
= dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FPT)
@@ -289,7 +323,8 @@ void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential buffer overflow in call to 'gets'",
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential buffer overflow in call to 'gets'",
"Security",
"Call to function 'gets' is extremely insecure as it can "
"always result in a buffer overflow",
@@ -302,6 +337,9 @@ void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_getpw)
+ return;
+
const FunctionProtoType *FPT
= dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if (!FPT)
@@ -327,7 +365,8 @@ void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential buffer overflow in call to 'getpw'",
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential buffer overflow in call to 'getpw'",
"Security",
"The getpw() function is dangerous as it may overflow the "
"provided buffer. It is obsoleted by getpwuid().",
@@ -335,11 +374,18 @@ void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
}
//===----------------------------------------------------------------------===//
-// Check: Any use of 'mktemp' is insecure.It is obsoleted by mkstemp().
+// Check: Any use of 'mktemp' is insecure. It is obsoleted by mkstemp().
// CWE-377: Insecure Temporary File
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mktemp) {
+ // Fall back to the security check of looking for enough 'X's in the
+ // format string, since that is a less severe warning.
+ checkCall_mkstemp(CE, FD);
+ return;
+ }
+
const FunctionProtoType *FPT
= dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
if(!FPT)
@@ -362,13 +408,98 @@ void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential insecure temporary file in call 'mktemp'",
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure temporary file in call 'mktemp'",
"Security",
"Call to function 'mktemp' is insecure as it always "
- "creates or uses insecure temporary file. Use 'mkstemp' instead",
+ "creates or uses insecure temporary file. Use 'mkstemp' "
+ "instead",
CELoc, &R, 1);
}
+
+//===----------------------------------------------------------------------===//
+// Check: Use of 'mkstemp', 'mktemp', 'mkdtemp' should contain at least 6 X's.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mkstemp)
+ return;
+
+ StringRef Name = FD->getIdentifier()->getName();
+ std::pair<signed, signed> ArgSuffix =
+ llvm::StringSwitch<std::pair<signed, signed> >(Name)
+ .Case("mktemp", std::make_pair(0,-1))
+ .Case("mkstemp", std::make_pair(0,-1))
+ .Case("mkdtemp", std::make_pair(0,-1))
+ .Case("mkstemps", std::make_pair(0,1))
+ .Default(std::make_pair(-1, -1));
+
+ assert(ArgSuffix.first >= 0 && "Unsupported function");
+
+ // Check if the number of arguments is consistent with out expectations.
+ unsigned numArgs = CE->getNumArgs();
+ if ((signed) numArgs <= ArgSuffix.first)
+ return;
+
+ const StringLiteral *strArg =
+ dyn_cast<StringLiteral>(CE->getArg((unsigned)ArgSuffix.first)
+ ->IgnoreParenImpCasts());
+
+ // Currently we only handle string literals. It is possible to do better,
+ // either by looking at references to const variables, or by doing real
+ // flow analysis.
+ if (!strArg || strArg->getCharByteWidth() != 1)
+ return;
+
+ // Count the number of X's, taking into account a possible cutoff suffix.
+ StringRef str = strArg->getString();
+ unsigned numX = 0;
+ unsigned n = str.size();
+
+ // Take into account the suffix.
+ unsigned suffix = 0;
+ if (ArgSuffix.second >= 0) {
+ const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
+ llvm::APSInt Result;
+ if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+ return;
+ // FIXME: Issue a warning.
+ if (Result.isNegative())
+ return;
+ suffix = (unsigned) Result.getZExtValue();
+ n = (n > suffix) ? n - suffix : 0;
+ }
+
+ for (unsigned i = 0; i < n; ++i)
+ if (str[i] == 'X') ++numX;
+
+ if (numX >= 6)
+ return;
+
+ // Issue a warning.
+ SourceRange R = strArg->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SmallString<512> buf;
+ llvm::raw_svector_ostream out(buf);
+ out << "Call to '" << Name << "' should have at least 6 'X's in the"
+ " format string to be secure (" << numX << " 'X'";
+ if (numX != 1)
+ out << 's';
+ out << " seen";
+ if (suffix) {
+ out << ", " << suffix << " character";
+ if (suffix > 1)
+ out << 's';
+ out << " used as a suffix";
+ }
+ out << ')';
+ BR.EmitBasicReport(AC->getDecl(),
+ "Insecure temporary file creation", "Security",
+ out.str(), CELoc, &R, 1);
+}
+
//===----------------------------------------------------------------------===//
// Check: Any use of 'strcpy' is insecure.
//
@@ -376,6 +507,9 @@ void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
if (!checkCall_strCommon(CE, FD))
return;
@@ -383,13 +517,14 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential insecure memory buffer bounds restriction in "
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure memory buffer bounds restriction in "
"call 'strcpy'",
"Security",
"Call to function 'strcpy' is insecure as it does not "
- "provide bounding of the memory buffer. Replace "
- "unbounded copy functions with analogous functions that "
- "support length arguments such as 'strncpy'. CWE-119.",
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcpy'. CWE-119.",
CELoc, &R, 1);
}
@@ -400,6 +535,9 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
// the Bounds of a Memory Buffer
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
if (!checkCall_strCommon(CE, FD))
return;
@@ -407,13 +545,14 @@ void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential insecure memory buffer bounds restriction in "
- "call 'strcat'",
- "Security",
- "Call to function 'strcat' is insecure as it does not "
- "provide bounding of the memory buffer. Replace "
- "unbounded copy functions with analogous functions that "
- "support length arguments such as 'strncat'. CWE-119.",
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure memory buffer bounds restriction in "
+ "call 'strcat'",
+ "Security",
+ "Call to function 'strcat' is insecure as it does not "
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcat'. CWE-119.",
CELoc, &R, 1);
}
@@ -453,7 +592,7 @@ bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
- if (!CheckRand)
+ if (!filter.check_rand || !CheckRand)
return;
const FunctionProtoType *FTP
@@ -475,11 +614,11 @@ void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
return;
// Issue a warning.
- llvm::SmallString<256> buf1;
+ SmallString<256> buf1;
llvm::raw_svector_ostream os1(buf1);
os1 << '\'' << *FD << "' is a poor random number generator";
- llvm::SmallString<256> buf2;
+ SmallString<256> buf2;
llvm::raw_svector_ostream os2(buf2);
os2 << "Function '" << *FD
<< "' is obsolete because it implements a poor random number generator."
@@ -488,7 +627,8 @@ void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport(os1.str(), "Security", os2.str(), CELoc, &R, 1);
+ BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+ CELoc, &R, 1);
}
//===----------------------------------------------------------------------===//
@@ -497,7 +637,7 @@ void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
- if (!CheckRand)
+ if (!CheckRand || !filter.check_rand)
return;
const FunctionProtoType *FTP
@@ -513,7 +653,8 @@ void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("'random' is not a secure random number generator",
+ BR.EmitBasicReport(AC->getDecl(),
+ "'random' is not a secure random number generator",
"Security",
"The 'random' function produces a sequence of values that "
"an adversary may be able to predict. Use 'arc4random' "
@@ -526,11 +667,15 @@ void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_vfork)
+ return;
+
// All calls to vfork() are insecure, issue a warning.
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential insecure implementation-specific behavior in "
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure implementation-specific behavior in "
"call 'vfork'",
"Security",
"Call to function 'vfork' is insecure as it can lead to "
@@ -546,6 +691,9 @@ void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
+ if (!filter.check_UncheckedReturn)
+ return;
+
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return;
@@ -586,11 +734,11 @@ void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
return;
// Issue a warning.
- llvm::SmallString<256> buf1;
+ SmallString<256> buf1;
llvm::raw_svector_ostream os1(buf1);
os1 << "Return value is not checked in call to '" << *FD << '\'';
- llvm::SmallString<256> buf2;
+ SmallString<256> buf2;
llvm::raw_svector_ostream os2(buf2);
os2 << "The return value from the call to '" << *FD
<< "' is not checked. If an error occurs in '" << *FD
@@ -599,7 +747,8 @@ void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
SourceRange R = CE->getCallee()->getSourceRange();
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
- BR.EmitBasicReport(os1.str(), "Security", os2.str(), CELoc, &R, 1);
+ BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+ CELoc, &R, 1);
}
//===----------------------------------------------------------------------===//
@@ -609,14 +758,29 @@ void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
namespace {
class SecuritySyntaxChecker : public Checker<check::ASTCodeBody> {
public:
+ ChecksFilter filter;
+
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
- WalkAST walker(BR, mgr.getAnalysisContext(D));
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(D), filter);
walker.Visit(D->getBody());
}
};
}
-void ento::registerSecuritySyntaxChecker(CheckerManager &mgr) {
- mgr.registerChecker<SecuritySyntaxChecker>();
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ mgr.registerChecker<SecuritySyntaxChecker>()->filter.check_##name = true;\
}
+
+REGISTER_CHECKER(gets)
+REGISTER_CHECKER(getpw)
+REGISTER_CHECKER(mkstemp)
+REGISTER_CHECKER(mktemp)
+REGISTER_CHECKER(strcpy)
+REGISTER_CHECKER(rand)
+REGISTER_CHECKER(vfork)
+REGISTER_CHECKER(FloatLoopCounter)
+REGISTER_CHECKER(UncheckedReturn)
+
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index 469be05..cc7fd37 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -24,10 +24,10 @@ using namespace ento;
namespace {
class WalkAST : public StmtVisitor<WalkAST> {
BugReporter &BR;
- AnalysisContext* AC;
+ AnalysisDeclContext* AC;
public:
- WalkAST(BugReporter &br, AnalysisContext* ac) : BR(br), AC(ac) {}
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac) : BR(br), AC(ac) {}
void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitChildren(Stmt *S);
@@ -63,7 +63,8 @@ void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
SourceRange R = ArgEx->getSourceRange();
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
- BR.EmitBasicReport("Potential unintended use of sizeof() on pointer type",
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential unintended use of sizeof() on pointer type",
"Logic",
"The code calls sizeof() on a pointer type. "
"This can produce an unexpected result.",
@@ -80,7 +81,7 @@ class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
- WalkAST walker(BR, mgr.getAnalysisContext(D));
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(D));
walker.Visit(D->getBody());
}
};
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
new file mode 100644
index 0000000..843502f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -0,0 +1,233 @@
+//= CheckerDocumentation.cpp - Documentation checker ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker lists all the checker callbacks and provides documentation for
+// checker writers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+// All checkers should be placed into anonymous namespace.
+// We place the CheckerDocumentation inside ento namespace to make the
+// it visible in doxygen.
+namespace ento {
+
+/// This checker documents the callback functions checkers can use to implement
+/// the custom handling of the specific events during path exploration as well
+/// as reporting bugs. Most of the callbacks are targeted at path-sensitive
+/// checking.
+///
+/// \sa CheckerContext
+class CheckerDocumentation : public Checker< check::PreStmt<DeclStmt>,
+ check::PostStmt<CallExpr>,
+ check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::BranchCondition,
+ check::Location,
+ check::Bind,
+ check::DeadSymbols,
+ check::EndPath,
+ check::EndAnalysis,
+ check::EndOfTranslationUnit,
+ eval::Call,
+ eval::Assume,
+ check::LiveSymbols,
+ check::RegionChanges,
+ check::Event<ImplicitNullDerefEvent>,
+ check::ASTDecl<FunctionDecl> > {
+public:
+
+ /// \brief Pre-visit the Statement.
+ ///
+ /// The method will be called before the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// See checkBranchCondition() callback for performing custom processing of
+ /// the branching statements.
+ ///
+ /// check::PreStmt<DeclStmt>
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Statement.
+ ///
+ /// The method will be called after the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// check::PostStmt<DeclStmt>
+ void checkPostStmt(const CallExpr *DS, CheckerContext &C) const;
+
+ /// \brief Pre-visit the Objective C messages.
+ void checkPreObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Objective C messages.
+ void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+
+ /// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
+
+ /// \brief Called on a load from and a store to a location.
+ ///
+ /// The method will be called each time a location (pointer) value is
+ /// accessed.
+ /// \param Loc The value of the location (pointer).
+ /// \param IsLoad The flag specifying if the location is a store or a load.
+ /// \param S The load is performed while processing the statement.
+ ///
+ /// check::Location
+ void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const {}
+
+ /// \brief Called on binding of a value to a location.
+ ///
+ /// \param Loc The value of the location (pointer).
+ /// \param Val The value which will be stored at the location Loc.
+ /// \param S The bind is performed while processing the statement S.
+ ///
+ /// check::Bind
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {}
+
+
+ /// \brief Called whenever a symbol becomes dead.
+ ///
+ /// This callback should be used by the checkers to aggressively clean
+ /// up/reduce the checker state, which is important for reducing the overall
+ /// memory usage. Specifically, if a checker keeps symbol specific information
+ /// in the sate, it can and should be dropped after the symbol becomes dead.
+ /// In addition, reporting a bug as soon as the checker becomes dead leads to
+ /// more precise diagnostics. (For example, one should report that a malloced
+ /// variable is not freed right after it goes out of scope.)
+ ///
+ /// \param SR The SymbolReaper object can be queried to determine which
+ /// symbols are dead.
+ ///
+ /// check::DeadSymbols
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
+
+ /// \brief Called when an end of path is reached in the ExplodedGraph.
+ ///
+ /// This callback should be used to check if the allocated resources are freed.
+ ///
+ /// check::EndPath
+ void checkEndPath(CheckerContext &Ctx) const {}
+
+ /// \brief Called after all the paths in the ExplodedGraph reach end of path
+ /// - the symbolic execution graph is fully explored.
+ ///
+ /// This callback should be used in cases when a checker needs to have a
+ /// global view of the information generated on all paths. For example, to
+ /// compare execution summary/result several paths.
+ /// See IdempotentOperationChecker for a usage example.
+ ///
+ /// check::EndAnalysis
+ void checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) const {}
+
+ /// \brief Called after analysis of a TranslationUnit is complete.
+ ///
+ /// check::EndOfTranslationUnit
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+
+ /// \brief Evaluates function call.
+ ///
+ /// The analysis core threats all function calls in the same way. However, some
+ /// functions have special meaning, which should be reflected in the program
+ /// state. This callback allows a checker to provide domain specific knowledge
+ /// about the particular functions it knows about.
+ ///
+ /// \returns true if the call has been successfully evaluated
+ /// and false otherwise. Note, that only one checker can evaluate a call. If
+ /// more then one checker claim that they can evaluate the same call the
+ /// first one wins.
+ ///
+ /// eval::Call
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
+
+ /// \brief Handles assumptions on symbolic values.
+ ///
+ /// This method is called when a symbolic expression is assumed to be true or
+ /// false. For example, the assumptions are performed when evaluating a
+ /// condition at a branch. The callback allows checkers track the assumptions
+ /// performed on the symbols of interest and change the state accordingly.
+ ///
+ /// eval::Assume
+ ProgramStateRef evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const { return State; }
+
+ /// Allows modifying SymbolReaper object. For example, checkers can explicitly
+ /// register symbols of interest as live. These symbols will not be marked
+ /// dead and removed.
+ ///
+ /// check::LiveSymbols
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
+
+
+ bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
+
+ /// check::RegionChanges
+ /// Allows tracking regions which get invalidated.
+ /// \param state The current program state.
+ /// \param invalidated A set of all symbols potentially touched by the change.
+ /// \param ExplicitRegions The regions explicitly requested for invalidation.
+ /// For example, in the case of a function call, these would be arguments.
+ /// \param Regions The transitive closure of accessible regions,
+ /// i.e. all regions that may have been touched by this change.
+ /// \param The call expression wrapper if the regions are invalidated by a
+ /// call, 0 otherwise.
+ /// Note, in order to be notified, the checker should also implement
+ /// wantsRegionChangeUpdate callback.
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const StoreManager::InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ return State;
+ }
+
+ /// check::Event<ImplicitNullDerefEvent>
+ void checkEvent(ImplicitNullDerefEvent Event) const {}
+
+ /// \brief Check every declaration in the AST.
+ ///
+ /// An AST traversal callback, which should only be used when the checker is
+ /// not path sensitive. It will be called for every Declaration in the AST and
+ /// can be specialized to only be called on subclasses of Decl, for example,
+ /// FunctionDecl.
+ ///
+ /// check::ASTDecl<FunctionDecl>
+ void checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+};
+
+void CheckerDocumentation::checkPostStmt(const CallExpr *DS,
+ CheckerContext &C) const {
+ return;
+}
+
+} // end namespace
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
index d53e0b8..96a8d26 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -27,16 +27,21 @@ def DeadCode : Package<"deadcode">;
def DeadCodeExperimental : Package<"deadcode">, InPackage<Experimental>, Hidden;
def Security : Package <"security">;
+def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
def SecurityExperimental : Package<"security">, InPackage<Experimental>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityExperimental>, Hidden;
def Unix : Package<"unix">;
def UnixExperimental : Package<"unix">, InPackage<Experimental>, Hidden;
+def CString : Package<"cstring">, InPackage<Unix>, Hidden;
+def CStringExperimental : Package<"cstring">, InPackage<UnixExperimental>, Hidden;
def OSX : Package<"osx">;
def OSXExperimental : Package<"osx">, InPackage<Experimental>, Hidden;
def Cocoa : Package<"cocoa">, InPackage<OSX>;
def CocoaExperimental : Package<"cocoa">, InPackage<OSXExperimental>, Hidden;
def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
+def Containers : Package<"containers">, InPackage<CoreFoundation>;
def LLVM : Package<"llvm">;
def Debug : Package<"debug">;
@@ -83,6 +88,10 @@ def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
let ParentPackage = CoreExperimental in {
+def BoolAssignmentChecker : Checker<"BoolAssignment">,
+ HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
+ DescFile<"BoolAssignmentChecker.cpp">;
+
def CastSizeChecker : Checker<"CastSize">,
HelpText<"Check when casting a malloc'ed type T, whether the size is a multiple of the size of T">,
DescFile<"CastSizeChecker.cpp">;
@@ -163,6 +172,10 @@ def IteratorsChecker : Checker<"Iterators">,
HelpText<"Check improper uses of STL vector iterators">,
DescFile<"IteratorsChecker.cpp">;
+def VirtualCallChecker : Checker<"VirtualCall">,
+ HelpText<"Check virtual function calls during construction or destruction">,
+ DescFile<"VirtualCallChecker.cpp">;
+
} // end: "cplusplus.experimental"
//===----------------------------------------------------------------------===//
@@ -174,15 +187,14 @@ let ParentPackage = DeadCode in {
def DeadStoresChecker : Checker<"DeadStores">,
HelpText<"Check for values stored to variables that are never read afterwards">,
DescFile<"DeadStoresChecker.cpp">;
+} // end DeadCode
+
+let ParentPackage = DeadCodeExperimental in {
def IdempotentOperationChecker : Checker<"IdempotentOperations">,
HelpText<"Warn about idempotent operations">,
DescFile<"IdempotentOperationChecker.cpp">;
-} // end DeadCode
-
-let ParentPackage = DeadCodeExperimental in {
-
def UnreachableCodeChecker : Checker<"UnreachableCode">,
HelpText<"Check unreachable code">,
DescFile<"UnreachableCodeChecker.cpp">;
@@ -193,11 +205,39 @@ def UnreachableCodeChecker : Checker<"UnreachableCode">,
// Security checkers.
//===----------------------------------------------------------------------===//
-let ParentPackage = SecurityExperimental in {
+let ParentPackage = InsecureAPI in {
+ def gets : Checker<"gets">,
+ HelpText<"Warn on uses of the 'gets' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def getpw : Checker<"getpw">,
+ HelpText<"Warn on uses of the 'getpw' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mktemp : Checker<"mktemp">,
+ HelpText<"Warn on uses of the 'mktemp' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mkstemp : Checker<"mkstemp">,
+ HelpText<"Warn when 'mkstemp' is passed fewer than 6 X's in the format string">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def rand : Checker<"rand">,
+ HelpText<"Warn on uses of the 'rand', 'random', and related functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def strcpy : Checker<"strcpy">,
+ HelpText<"Warn on uses of the 'strcpy' and 'strcat' functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def vfork : Checker<"vfork">,
+ HelpText<"Warn on uses of the 'vfork' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def UncheckedReturn : Checker<"UncheckedReturn">,
+ HelpText<"Warn on uses of functions whose return values must be always checked">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+let ParentPackage = Security in {
+ def FloatLoopCounter : Checker<"FloatLoopCounter">,
+ HelpText<"Warn on using a floating point value as a loop counter (CERT: FLP30-C, FLP30-CPP)">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
-def SecuritySyntaxChecker : Checker<"SecuritySyntactic">,
- HelpText<"Perform quick security API checks that require no data flow">,
- DescFile<"CheckSecuritySyntaxOnly.cpp">;
+let ParentPackage = SecurityExperimental in {
def ArrayBoundChecker : Checker<"ArrayBound">,
HelpText<"Warn about buffer overflows (older checker)">,
@@ -218,6 +258,18 @@ def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
} // end "security.experimental"
//===----------------------------------------------------------------------===//
+// Taint checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Taint in {
+
+def GenericTaintChecker : Checker<"TaintPropagation">,
+ HelpText<"Generate taint information used by other checkers">,
+ DescFile<"GenericTaintChecker.cpp">;
+
+} // end "experimental.security.taint"
+
+//===----------------------------------------------------------------------===//
// Unix API checkers.
//===----------------------------------------------------------------------===//
@@ -226,6 +278,10 @@ let ParentPackage = Unix in {
def UnixAPIChecker : Checker<"API">,
HelpText<"Check calls to various UNIX/Posix functions">,
DescFile<"UnixAPIChecker.cpp">;
+
+def MallocPessimistic : Checker<"Malloc">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems.">,
+ DescFile<"MallocChecker.cpp">;
} // end "unix"
@@ -235,14 +291,14 @@ def ChrootChecker : Checker<"Chroot">,
HelpText<"Check improper use of chroot">,
DescFile<"ChrootChecker.cpp">;
-def CStringChecker : Checker<"CString">,
- HelpText<"Check calls to functions in <string.h>">,
- DescFile<"CStringChecker.cpp">;
-
-def MallocChecker : Checker<"Malloc">,
- HelpText<"Check for potential memory leaks, double free, and use-after-free problems">,
+def MallocOptimistic : Checker<"MallocWithAnnotations">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems. Assumes that all user-defined functions which might free a pointer are annotated.">,
DescFile<"MallocChecker.cpp">;
+def MallocSizeofChecker : Checker<"MallocSizeof">,
+ HelpText<"Check for dubious malloc arguments involving sizeof">,
+ DescFile<"MallocSizeofChecker.cpp">;
+
def PthreadLockChecker : Checker<"PthreadLock">,
HelpText<"Simple lock -> unlock checker">,
DescFile<"PthreadLockChecker.cpp">;
@@ -253,6 +309,32 @@ def StreamChecker : Checker<"Stream">,
} // end "unix.experimental"
+let ParentPackage = CString in {
+
+def CStringNullArg : Checker<"NullArg">,
+ HelpText<"Check for null pointers being passed as arguments to C string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringSyntaxChecker : Checker<"BadSizeArg">,
+ HelpText<"Check the size argument passed into C string functions for common erroneous patterns">,
+ DescFile<"CStringSyntaxChecker.cpp">;
+}
+
+let ParentPackage = CStringExperimental in {
+
+def CStringOutOfBounds : Checker<"OutOfBounds">,
+ HelpText<"Check for out-of-bounds access in string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringBufferOverlap : Checker<"BufferOverlap">,
+ HelpText<"Checks for overlap in two buffer arguments">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringNotNullTerm : Checker<"NotNullTerminated">,
+ HelpText<"Check for arguments which are not null-terminating strings">,
+ DescFile<"CStringChecker.cpp">;
+}
+
//===----------------------------------------------------------------------===//
// Mac OS X, Cocoa, and Core Foundation checkers.
//===----------------------------------------------------------------------===//
@@ -291,7 +373,7 @@ def ClassReleaseChecker : Checker<"ClassRelease">,
DescFile<"BasicObjCFoundationChecks.cpp">;
def VariadicMethodTypeChecker : Checker<"VariadicMethodTypes">,
- HelpText<"Check for passing non-Objective-C types to variadic methods that expect"
+ HelpText<"Check for passing non-Objective-C types to variadic methods that expect "
"only Objective-C types">,
DescFile<"BasicObjCFoundationChecks.cpp">;
@@ -306,7 +388,11 @@ def ObjCMethSigsChecker : Checker<"IncompatibleMethodTypes">,
def ObjCUnusedIvarsChecker : Checker<"UnusedIvars">,
HelpText<"Warn about private ivars that are never used">,
DescFile<"ObjCUnusedIVarsChecker.cpp">;
-
+
+def ObjCSelfInitChecker : Checker<"SelfInit">,
+ HelpText<"Check that 'self' is properly initialized inside an initializer method">,
+ DescFile<"ObjCSelfInitChecker.cpp">;
+
def NSErrorChecker : Checker<"NSError">,
HelpText<"Check usage of NSError** parameters">,
DescFile<"NSErrorChecker.cpp">;
@@ -319,10 +405,6 @@ def RetainCountChecker : Checker<"RetainCount">,
let ParentPackage = CocoaExperimental in {
-def ObjCSelfInitChecker : Checker<"SelfInit">,
- HelpText<"Check that 'self' is properly initialized inside an initializer method">,
- DescFile<"ObjCSelfInitChecker.cpp">;
-
def ObjCDeallocChecker : Checker<"Dealloc">,
HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
DescFile<"CheckObjCDealloc.cpp">;
@@ -344,6 +426,16 @@ def CFErrorChecker : Checker<"CFError">,
DescFile<"NSErrorChecker.cpp">;
}
+let ParentPackage = Containers in {
+def ObjCContainersASTChecker : Checker<"PointerSizedValues">,
+ HelpText<"Warns if 'CFArray', 'CFDictionary', 'CFSet' are created with non-pointer-size values">,
+ DescFile<"ObjCContainersASTChecker.cpp">;
+
+def ObjCContainersChecker : Checker<"OutOfBounds">,
+ HelpText<"Checks for index out-of-bounds when using 'CFArray' API">,
+ DescFile<"ObjCContainersChecker.cpp">;
+
+}
//===----------------------------------------------------------------------===//
// Checkers for LLVM development.
//===----------------------------------------------------------------------===//
@@ -359,6 +451,10 @@ def LLVMConventionsChecker : Checker<"Conventions">,
let ParentPackage = Debug in {
+def DominatorsTreeDumper : Checker<"DumpDominators">,
+ HelpText<"Print the dominance tree for a given CFG">,
+ DescFile<"DebugCheckers.cpp">;
+
def LiveVariablesDumper : Checker<"DumpLiveVars">,
HelpText<"Print results of live variable analysis">,
DescFile<"DebugCheckers.cpp">;
@@ -371,9 +467,21 @@ def CFGDumper : Checker<"DumpCFG">,
HelpText<"Display Control-Flow Graphs">,
DescFile<"DebugCheckers.cpp">;
+def CallGraphViewer : Checker<"ViewCallGraph">,
+ HelpText<"View Call Graph using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CallGraphDumper : Checker<"DumpCallGraph">,
+ HelpText<"Display Call Graph">,
+ DescFile<"DebugCheckers.cpp">;
+
def AnalyzerStatsChecker : Checker<"Stats">,
HelpText<"Emit warnings with analyzer statistics">,
DescFile<"AnalyzerStatsChecker.cpp">;
+def TaintTesterChecker : Checker<"TaintTest">,
+ HelpText<"Mark tainted symbols as such.">,
+ DescFile<"TaintTesterChecker.cpp">;
+
} // end "debug"
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 3c92381..30d0609 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -41,7 +41,7 @@ bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
mutable IdentifierInfo *II_chroot, *II_chdir;
// This bug refers to possibly break out of a chroot() jail.
- mutable llvm::OwningPtr<BuiltinBug> BT_BreakJail;
+ mutable OwningPtr<BuiltinBug> BT_BreakJail;
public:
ChrootChecker() : II_chroot(0), II_chdir(0) {}
@@ -62,10 +62,7 @@ private:
} // end anonymous namespace
bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
- const FunctionDecl *FD = L.getAsFunctionDecl();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return false;
@@ -88,7 +85,7 @@ bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
}
void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
// Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
@@ -98,7 +95,7 @@ void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
}
void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
// If there are no jail state in the GDM, just return.
@@ -108,7 +105,7 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
// After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
const Expr *ArgExpr = CE->getArg(0);
- SVal ArgVal = state->getSVal(ArgExpr);
+ SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
if (const MemRegion *R = ArgVal.getAsRegion()) {
R = R->StripCasts();
@@ -125,10 +122,7 @@ void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
// Check the jail state before any function call except chroot and chdir().
void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
- const FunctionDecl *FD = L.getAsFunctionDecl();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
@@ -143,10 +137,10 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
return;
// If jail state is ROOT_CHANGED, generate BugReport.
- void *const* k = state->FindGDM(ChrootChecker::getTag());
+ void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
if (k)
if (isRootChanged((intptr_t) *k))
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT_BreakJail)
BT_BreakJail.reset(new BuiltinBug("Break out of jail",
"No call of chdir(\"/\") immediately "
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
index 289ce8d..230baa7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -12,6 +12,8 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Checkers/CommonBugCategories.h"
+
#ifndef LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
#define LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
new file mode 100644
index 0000000..e2a8ea6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
@@ -0,0 +1,18 @@
+//=--- CommonBugCategories.cpp - Provides common issue categories -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang { namespace ento { namespace categories {
+
+const char *CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char *MemoryCoreFoundationObjectiveC =
+ "Memory (Core Foundation/Objective-C)";
+const char *UnixAPI = "Unix API";
+}}}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 901af43..510e8cd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ParentMap.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -67,22 +68,37 @@ void ReachableCode::computeReachableBlocks() {
}
}
+static const Expr *LookThroughTransitiveAssignments(const Expr *Ex) {
+ while (Ex) {
+ const BinaryOperator *BO =
+ dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
+ if (!BO)
+ break;
+ if (BO->getOpcode() == BO_Assign) {
+ Ex = BO->getRHS();
+ continue;
+ }
+ break;
+ }
+ return Ex;
+}
+
namespace {
class DeadStoreObs : public LiveVariables::Observer {
const CFG &cfg;
ASTContext &Ctx;
BugReporter& BR;
- AnalysisContext* AC;
+ AnalysisDeclContext* AC;
ParentMap& Parents;
llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
- llvm::OwningPtr<ReachableCode> reachableCode;
+ OwningPtr<ReachableCode> reachableCode;
const CFGBlock *currentBlock;
enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
public:
DeadStoreObs(const CFG &cfg, ASTContext &ctx,
- BugReporter& br, AnalysisContext* ac, ParentMap& parents,
+ BugReporter& br, AnalysisDeclContext* ac, ParentMap& parents,
llvm::SmallPtrSet<const VarDecl*, 20> &escaped)
: cfg(cfg), Ctx(ctx), BR(br), AC(ac), Parents(parents),
Escaped(escaped), currentBlock(0) {}
@@ -104,14 +120,11 @@ public:
if (!reachableCode->isReachable(currentBlock))
return;
- llvm::SmallString<64> buf;
+ SmallString<64> buf;
llvm::raw_svector_ostream os(buf);
const char *BugType = 0;
switch (dsk) {
- default:
- llvm_unreachable("Impossible dead store type.");
-
case DeadInit:
BugType = "Dead initialization";
os << "Value stored to '" << *V
@@ -132,7 +145,7 @@ public:
return;
}
- BR.EmitBasicReport(BugType, "Dead store", os.str(), L, R);
+ BR.EmitBasicReport(AC->getDecl(), BugType, "Dead store", os.str(), L, R);
}
void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
@@ -202,17 +215,18 @@ public:
if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Special case: check for assigning null to a pointer.
// This is a common form of defensive programming.
+ const Expr *RHS = LookThroughTransitiveAssignments(B->getRHS());
+
QualType T = VD->getType();
if (T->isPointerType() || T->isObjCObjectPointerType()) {
- if (B->getRHS()->isNullPointerConstant(Ctx,
- Expr::NPC_ValueDependentIsNull))
+ if (RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull))
return;
}
- Expr *RHS = B->getRHS()->IgnoreParenCasts();
+ RHS = RHS->IgnoreParenCasts();
// Special case: self-assignments. These are often used to shut up
// "unused variable" compiler warnings.
- if (DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
+ if (const DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
return;
@@ -254,10 +268,15 @@ public:
if (V->getType()->getAs<ReferenceType>())
return;
- if (Expr *E = V->getInit()) {
- while (ExprWithCleanups *exprClean = dyn_cast<ExprWithCleanups>(E))
+ if (const Expr *E = V->getInit()) {
+ while (const ExprWithCleanups *exprClean =
+ dyn_cast<ExprWithCleanups>(E))
E = exprClean->getSubExpr();
+ // Look through transitive assignments, e.g.:
+ // int x = y = 0;
+ E = LookThroughTransitiveAssignments(E);
+
// Don't warn on C++ objects (yet) until we can show that their
// constructors/destructors don't have side effects.
if (isa<CXXConstructExpr>(E))
@@ -274,11 +293,12 @@ public:
// If x is EVER assigned a new value later, don't issue
// a warning. This is because such initialization can be
// due to defensive programming.
- if (E->isConstantInitializer(Ctx, false))
+ if (E->isEvaluatable(Ctx))
return;
- if (DeclRefExpr *DRE=dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
- if (VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// Special case: check for initialization from constant
// variables.
//
@@ -350,7 +370,7 @@ public:
BugReporter &BR) const {
if (LiveVariables *L = mgr.getAnalysis<LiveVariables>(D)) {
CFG &cfg = *mgr.getCFG(D);
- AnalysisContext *AC = mgr.getAnalysisContext(D);
+ AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D);
ParentMap &pmap = mgr.getParentMap(D);
FindEscaped FS(&cfg);
FS.getCFG().VisitBlockStmts(FS);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index d9d5694..34053cd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -15,11 +15,36 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/Dominators.h"
+#include "clang/Analysis/CallGraph.h"
+#include "llvm/Support/Process.h"
using namespace clang;
using namespace ento;
//===----------------------------------------------------------------------===//
+// DominatorsTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DominatorsTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+ DominatorTree dom;
+ dom.buildDominatorTree(*AC);
+ dom.dump();
+ }
+ }
+};
+}
+
+void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
+ mgr.registerChecker<DominatorsTreeDumper>();
+}
+
+//===----------------------------------------------------------------------===//
// LiveVariablesDumper
//===----------------------------------------------------------------------===//
@@ -49,7 +74,7 @@ public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (CFG *cfg = mgr.getCFG(D)) {
- cfg->viewCFG(mgr.getLangOptions());
+ cfg->viewCFG(mgr.getLangOpts());
}
}
};
@@ -69,7 +94,8 @@ public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (CFG *cfg = mgr.getCFG(D)) {
- cfg->dump(mgr.getLangOptions());
+ cfg->dump(mgr.getLangOpts(),
+ llvm::sys::Process::StandardErrHasColors());
}
}
};
@@ -78,3 +104,43 @@ public:
void ento::registerCFGDumper(CheckerManager &mgr) {
mgr.registerChecker<CFGDumper>();
}
+
+//===----------------------------------------------------------------------===//
+// CallGraphViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphViewer : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.viewGraph();
+ }
+};
+}
+
+void ento::registerCallGraphViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphDumper : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.dump();
+ }
+};
+}
+
+void ento::registerCallGraphDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index eeda734..81a2745 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -13,10 +13,12 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -25,35 +27,42 @@ namespace {
class DereferenceChecker
: public Checker< check::Location,
EventDispatcher<ImplicitNullDerefEvent> > {
- mutable llvm::OwningPtr<BuiltinBug> BT_null;
- mutable llvm::OwningPtr<BuiltinBug> BT_undef;
+ mutable OwningPtr<BuiltinBug> BT_null;
+ mutable OwningPtr<BuiltinBug> BT_undef;
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
- static void AddDerefSource(raw_ostream &os,
+ static const MemRegion *AddDerefSource(raw_ostream &os,
SmallVectorImpl<SourceRange> &Ranges,
- const Expr *Ex, bool loadedFrom = false);
+ const Expr *Ex, const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom = false);
};
} // end anonymous namespace
-void DereferenceChecker::AddDerefSource(raw_ostream &os,
- SmallVectorImpl<SourceRange> &Ranges,
- const Expr *Ex,
- bool loadedFrom) {
+const MemRegion *
+DereferenceChecker::AddDerefSource(raw_ostream &os,
+ SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex,
+ const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom) {
Ex = Ex->IgnoreParenLValueCasts();
+ const MemRegion *sourceR = 0;
switch (Ex->getStmtClass()) {
default:
- return;
+ break;
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DR = cast<DeclRefExpr>(Ex);
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
os << " (" << (loadedFrom ? "loaded from" : "from")
<< " variable '" << VD->getName() << "')";
Ranges.push_back(DR->getSourceRange());
+ sourceR = state->getLValue(VD, LCtx).getAsRegion();
}
- return;
+ break;
}
case Stmt::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(Ex);
@@ -64,6 +73,7 @@ void DereferenceChecker::AddDerefSource(raw_ostream &os,
break;
}
}
+ return sourceR;
}
void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
@@ -77,7 +87,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDerefExpr(N)));
+ bugreporter::GetDerefExpr(N), report));
C.EmitReport(report);
}
return;
@@ -89,8 +99,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
if (!isa<Loc>(location))
return;
- const ProgramState *state = C.getState();
- const ProgramState *notNullState, *nullState;
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ ProgramStateRef notNullState, nullState;
llvm::tie(notNullState, nullState) = state->assume(location);
// The explicit NULL case.
@@ -106,20 +117,24 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
if (!BT_null)
BT_null.reset(new BuiltinBug("Dereference of null pointer"));
- llvm::SmallString<100> buf;
+ SmallString<100> buf;
SmallVector<SourceRange, 2> Ranges;
// Walk through lvalue casts to get the original expression
// that syntactically caused the load.
if (const Expr *expr = dyn_cast<Expr>(S))
S = expr->IgnoreParenLValueCasts();
+
+ const MemRegion *sourceR = 0;
switch (S->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
llvm::raw_svector_ostream os(buf);
os << "Array access";
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
- AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts());
+ sourceR =
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ state.getPtr(), LCtx);
os << " results in a null pointer dereference";
break;
}
@@ -127,7 +142,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
llvm::raw_svector_ostream os(buf);
os << "Dereference of null pointer";
const UnaryOperator *U = cast<UnaryOperator>(S);
- AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(), true);
+ sourceR =
+ AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+ state.getPtr(), LCtx, true);
break;
}
case Stmt::MemberExprClass: {
@@ -136,7 +153,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
llvm::raw_svector_ostream os(buf);
os << "Access to field '" << M->getMemberNameInfo()
<< "' results in a dereference of a null pointer";
- AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(), true);
+ sourceR =
+ AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+ state.getPtr(), LCtx, true);
}
break;
}
@@ -163,12 +182,17 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
N);
report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDerefExpr(N)));
+ bugreporter::GetDerefExpr(N), report));
for (SmallVectorImpl<SourceRange>::iterator
I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
report->addRange(*I);
+ if (sourceR) {
+ report->markInteresting(sourceR);
+ report->markInteresting(state->getRawSVal(loc::MemRegionVal(sourceR)));
+ }
+
C.EmitReport(report);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 75b7cc4..2627f0c 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -23,12 +23,31 @@ using namespace ento;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
+ void reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const ;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
} // end anonymous namespace
+void DivZeroChecker::reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateSink(StateZero)) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Division by zero"));
+
+ BugReport *R =
+ new BugReport(*BT, Msg, N);
+
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDenomExpr(N), R));
+ C.EmitReport(R);
+ }
+}
+
void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
BinaryOperator::Opcode Op = B->getOpcode();
@@ -42,7 +61,7 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
!B->getRHS()->getType()->isScalarType())
return;
- SVal Denom = C.getState()->getSVal(B->getRHS());
+ SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
const DefinedSVal *DV = dyn_cast<DefinedSVal>(&Denom);
// Divide-by-undefined handled in the generic checking for uses of
@@ -52,22 +71,18 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
// Check for divide by zero.
ConstraintManager &CM = C.getConstraintManager();
- const ProgramState *stateNotZero, *stateZero;
+ ProgramStateRef stateNotZero, stateZero;
llvm::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
- if (stateZero && !stateNotZero) {
- if (ExplodedNode *N = C.generateSink(stateZero)) {
- if (!BT)
- BT.reset(new BuiltinBug("Division by zero"));
-
- BugReport *R =
- new BugReport(*BT, BT->getDescription(), N);
-
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- bugreporter::GetDenomExpr(N)));
+ if (!stateNotZero) {
+ assert(stateZero);
+ reportBug("Division by zero", stateZero, C);
+ return;
+ }
- C.EmitReport(R);
- }
+ bool TaintedD = C.getState()->isTainted(*DV);
+ if ((stateNotZero && stateZero && TaintedD)) {
+ reportBug("Division by a tainted value, possibly zero", stateZero, C);
return;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 531d87e..a1f2f3b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class FixedAddressChecker
: public Checker< check::PreStmt<BinaryOperator> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -44,14 +44,13 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
if (!T->isPointerType())
return;
- const ProgramState *state = C.getState();
-
- SVal RV = state->getSVal(B->getRHS());
+ ProgramStateRef state = C.getState();
+ SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
if (!RV.isConstant() || RV.isZeroConstant())
return;
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(new BuiltinBug("Use fixed address",
"Using a fixed address is not portable because that "
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
new file mode 100644
index 0000000..135b81d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -0,0 +1,740 @@
+//== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the attack surface for generic taint propagation.
+//
+// The taint information produced by it might be useful to other checkers. For
+// example, checkers should report errors which involve tainted data more
+// aggressively, even if the involved symbols are under constrained.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/Basic/Builtins.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
+ check::PreStmt<CallExpr> > {
+public:
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const DeclRefExpr *DRE, CheckerContext &C) const;
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ static const unsigned InvalidArgIndex = UINT_MAX;
+ /// Denotes the return vale.
+ static const unsigned ReturnValueIndex = UINT_MAX - 1;
+
+ mutable OwningPtr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("Use of Untrusted Data", "Untrusted Data"));
+ }
+
+ /// \brief Catch taint related bugs. Check if tainted data is passed to a
+ /// system call etc.
+ bool checkPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a pre-visit.
+ void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Propagate taint generated at pre-visit.
+ bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a post visit.
+ void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check if the region the expression evaluates to is the standard input,
+ /// and thus, is tainted.
+ static bool isStdin(const Expr *E, CheckerContext &C);
+
+ /// \brief Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ static SymbolRef getPointedToSymbol(CheckerContext &C, const Expr *Arg);
+
+ /// Functions defining the attack surface.
+ typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
+ CheckerContext &C) const;
+ ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Taint the scanned input if the file is tainted.
+ ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check for CWE-134: Uncontrolled Format String.
+ static const char MsgUncontrolledFormatString[];
+ bool checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const;
+
+ /// Check for:
+ /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+ /// CWE-78, "Failure to Sanitize Data into an OS Command"
+ static const char MsgSanitizeSystemArgs[];
+ bool checkSystemCall(const CallExpr *CE, StringRef Name,
+ CheckerContext &C) const;
+
+ /// Check if tainted data is used as a buffer size ins strn.. functions,
+ /// and allocators.
+ static const char MsgTaintedBufferSize[];
+ bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
+ CheckerContext &C) const;
+
+ /// Generate a report if the expression is tainted or points to tainted data.
+ bool generateReportIfTainted(const Expr *E, const char Msg[],
+ CheckerContext &C) const;
+
+
+ typedef llvm::SmallVector<unsigned, 2> ArgVector;
+
+ /// \brief A struct used to specify taint propagation rules for a function.
+ ///
+ /// If any of the possible taint source arguments is tainted, all of the
+ /// destination arguments should also be tainted. Use InvalidArgIndex in the
+ /// src list to specify that all of the arguments can introduce taint. Use
+ /// InvalidArgIndex in the dst arguments to signify that all the non-const
+ /// pointer and reference arguments might be tainted on return. If
+ /// ReturnValueIndex is added to the dst list, the return value will be
+ /// tainted.
+ struct TaintPropagationRule {
+ /// List of arguments which can be taint sources and should be checked.
+ ArgVector SrcArgs;
+ /// List of arguments which should be tainted on function return.
+ ArgVector DstArgs;
+ // TODO: Check if using other data structures would be more optimal.
+
+ TaintPropagationRule() {}
+
+ TaintPropagationRule(unsigned SArg,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ TaintPropagationRule(unsigned SArg1, unsigned SArg2,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg1);
+ SrcArgs.push_back(SArg2);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ /// Get the propagation rule for a given function.
+ static TaintPropagationRule
+ getTaintPropagationRule(const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C);
+
+ inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
+ inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
+
+ inline bool isNull() const { return SrcArgs.empty(); }
+
+ inline bool isDestinationArgument(unsigned ArgNum) const {
+ return (std::find(DstArgs.begin(),
+ DstArgs.end(), ArgNum) != DstArgs.end());
+ }
+
+ static inline bool isTaintedOrPointsToTainted(const Expr *E,
+ ProgramStateRef State,
+ CheckerContext &C) {
+ return (State->isTainted(E, C.getLocationContext()) || isStdin(E, C) ||
+ (E->getType().getTypePtr()->isPointerType() &&
+ State->isTainted(getPointedToSymbol(C, E))));
+ }
+
+ /// \brief Pre-process a function which propagates taint according to the
+ /// taint rule.
+ ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+
+ };
+};
+
+const unsigned GenericTaintChecker::ReturnValueIndex;
+const unsigned GenericTaintChecker::InvalidArgIndex;
+
+const char GenericTaintChecker::MsgUncontrolledFormatString[] =
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
+
+const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+const char GenericTaintChecker::MsgTaintedBufferSize[] =
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
+ "character data and the null terminator)";
+
+} // end of anonymous namespace
+
+/// A set which is used to pass information from call pre-visit instruction
+/// to the call post-visit. The values are unsigned integers, which are either
+/// ReturnValueIndex, or indexes of the pointer/reference argument, which
+/// points to data, which should be tainted on return.
+namespace { struct TaintArgsOnPostVisit{}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<TaintArgsOnPostVisit>
+ : public ProgramStatePartialTrait<llvm::ImmutableSet<unsigned> > {
+ static void *GDMIndex() { return GenericTaintChecker::getTag(); }
+};
+}}
+
+GenericTaintChecker::TaintPropagationRule
+GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
+ const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C) {
+ // TODO: Currently, we might loose precision here: we always mark a return
+ // value as tainted even if it's just a pointer, pointing to tainted data.
+
+ // Check for exact name match for functions without builtin substitutes.
+ TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
+ .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("read", TaintPropagationRule(0, 2, 1, true))
+ .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
+ .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
+ .Case("fgets", TaintPropagationRule(2, 0, true))
+ .Case("getline", TaintPropagationRule(2, 0))
+ .Case("getdelim", TaintPropagationRule(3, 0))
+ .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+ .Default(TaintPropagationRule());
+
+ if (!Rule.isNull())
+ return Rule;
+
+ // Check if it's one of the memory setting/copying functions.
+ // This check is specialized but faster then calling isCLibraryFunction.
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ case Builtin::BIstrncat:
+ return TaintPropagationRule(1, 2, 0, true);
+ case Builtin::BIstrlcpy:
+ case Builtin::BIstrlcat:
+ return TaintPropagationRule(1, 2, 0, false);
+ case Builtin::BIstrndup:
+ return TaintPropagationRule(0, 1, ReturnValueIndex);
+
+ default:
+ break;
+ };
+
+ // Process all other functions which could be defined as builtins.
+ if (Rule.isNull()) {
+ if (C.isCLibraryFunction(FDecl, "snprintf") ||
+ C.isCLibraryFunction(FDecl, "sprintf"))
+ return TaintPropagationRule(InvalidArgIndex, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "strcpy") ||
+ C.isCLibraryFunction(FDecl, "stpcpy") ||
+ C.isCLibraryFunction(FDecl, "strcat"))
+ return TaintPropagationRule(1, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ return TaintPropagationRule(0, 2, 1, false);
+ else if (C.isCLibraryFunction(FDecl, "strdup") ||
+ C.isCLibraryFunction(FDecl, "strdupa"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ else if (C.isCLibraryFunction(FDecl, "wcsdup"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ }
+
+ // Skipping the following functions, since they might be used for cleansing
+ // or smart memory copy:
+ // - memccpy - copying untill hitting a special character.
+
+ return TaintPropagationRule();
+}
+
+void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Check for errors first.
+ if (checkPre(CE, C))
+ return;
+
+ // Add taint second.
+ addSourcesPre(CE, C);
+}
+
+void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (propagateFromPre(CE, C))
+ return;
+ addSourcesPost(CE, C);
+}
+
+void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = 0;
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return;
+
+ // First, try generating a propagation rule for this function.
+ TaintPropagationRule Rule =
+ TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
+ if (!Rule.isNull()) {
+ State = Rule.process(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+ return;
+ }
+
+ // Otherwise, check if we have custom pre-processing implemented.
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("fscanf", &GenericTaintChecker::preFscanf)
+ .Default(0);
+ // Check and evaluate the call.
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+
+}
+
+bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Depending on what was tainted at pre-visit, we determined a set of
+ // arguments which should be tainted after the function returns. These are
+ // stored in the state as TaintArgsOnPostVisit set.
+ llvm::ImmutableSet<unsigned> TaintArgs = State->get<TaintArgsOnPostVisit>();
+ if (TaintArgs.isEmpty())
+ return false;
+
+ for (llvm::ImmutableSet<unsigned>::iterator
+ I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Special handling for the tainted return value.
+ if (ArgNum == ReturnValueIndex) {
+ State = State->addTaint(CE, C.getLocationContext());
+ continue;
+ }
+
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return false;
+ const Expr* Arg = CE->getArg(ArgNum);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+
+ // Clear up the taint info from the state.
+ State = State->remove<TaintArgsOnPostVisit>();
+
+ if (State != C.getState()) {
+ C.addTransition(State);
+ return true;
+ }
+ return false;
+}
+
+void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Define the attack surface.
+ // Set the evaluation function by switching on the callee name.
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty())
+ return;
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("scanf", &GenericTaintChecker::postScanf)
+ // TODO: Add support for vfscanf & family.
+ .Case("getchar", &GenericTaintChecker::postRetTaint)
+ .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
+ .Case("getenv", &GenericTaintChecker::postRetTaint)
+ .Case("fopen", &GenericTaintChecker::postRetTaint)
+ .Case("fdopen", &GenericTaintChecker::postRetTaint)
+ .Case("freopen", &GenericTaintChecker::postRetTaint)
+ .Case("getch", &GenericTaintChecker::postRetTaint)
+ .Case("wgetch", &GenericTaintChecker::postRetTaint)
+ .Case("socket", &GenericTaintChecker::postSocket)
+ .Default(0);
+
+ // If the callee isn't defined, it is not of security concern.
+ // Check and evaluate the call.
+ ProgramStateRef State = 0;
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
+
+ if (checkUncontrolledFormatString(CE, C))
+ return true;
+
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return false;
+
+ if (checkSystemCall(CE, Name, C))
+ return true;
+
+ if (checkTaintedBufferSize(CE, FDecl, C))
+ return true;
+
+ return false;
+}
+
+SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg) {
+ ProgramStateRef State = C.getState();
+ SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
+ if (AddrVal.isUnknownOrUndef())
+ return 0;
+
+ Loc *AddrLoc = dyn_cast<Loc>(&AddrVal);
+ if (!AddrLoc)
+ return 0;
+
+ const PointerType *ArgTy =
+ dyn_cast<PointerType>(Arg->getType().getCanonicalType().getTypePtr());
+ SVal Val = State->getSVal(*AddrLoc,
+ ArgTy ? ArgTy->getPointeeType(): QualType());
+ return Val.getAsSymbol();
+}
+
+ProgramStateRef
+GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Check for taint in arguments.
+ bool IsTainted = false;
+ for (ArgVector::const_iterator I = SrcArgs.begin(),
+ E = SrcArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ if (ArgNum == InvalidArgIndex) {
+ // Check if any of the arguments is tainted, but skip the
+ // destination arguments.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ if (isDestinationArgument(i))
+ continue;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ break;
+ }
+ break;
+ }
+
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return State;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+ break;
+ }
+ if (!IsTainted)
+ return State;
+
+ // Mark the arguments which should be tainted after the function returns.
+ for (ArgVector::const_iterator I = DstArgs.begin(),
+ E = DstArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Should we mark all arguments as tainted?
+ if (ArgNum == InvalidArgIndex) {
+ // For all pointer and references that were passed in:
+ // If they are not pointing to const data, mark data as tainted.
+ // TODO: So far we are just going one level down; ideally we'd need to
+ // recurse here.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ const Expr *Arg = CE->getArg(i);
+ // Process pointer argument.
+ const Type *ArgTy = Arg->getType().getTypePtr();
+ QualType PType = ArgTy->getPointeeType();
+ if ((!PType.isNull() && !PType.isConstQualified())
+ || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ State = State->add<TaintArgsOnPostVisit>(i);
+ }
+ continue;
+ }
+
+ // Should mark the return value?
+ if (ArgNum == ReturnValueIndex) {
+ State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
+ continue;
+ }
+
+ // Mark the given argument.
+ assert(ArgNum < CE->getNumArgs());
+ State = State->add<TaintArgsOnPostVisit>(ArgNum);
+ }
+
+ return State;
+}
+
+
+// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
+// and arg 1 should get taint.
+ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ assert(CE->getNumArgs() >= 2);
+ ProgramStateRef State = C.getState();
+
+ // Check is the file descriptor is tainted.
+ if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
+ isStdin(CE->getArg(0), C)) {
+ // All arguments except for the first two should get taint.
+ for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
+ State = State->add<TaintArgsOnPostVisit>(i);
+ return State;
+ }
+
+ return 0;
+}
+
+
+// If argument 0(protocol domain) is network, the return value should get taint.
+ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 3)
+ return State;
+
+ SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+ StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+ // White list the internal communication protocols.
+ if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
+ DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
+ return State;
+ State = State->addTaint(CE, C.getLocationContext());
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 2)
+ return State;
+
+ SVal x = State->getSVal(CE->getArg(1), C.getLocationContext());
+ // All arguments except for the very first one should get taint.
+ for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ const Expr* Arg = CE->getArg(i);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
+ CheckerContext &C) const {
+ return C.getState()->addTaint(CE, C.getLocationContext());
+}
+
+bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+ SVal Val = State->getSVal(E, C.getLocationContext());
+
+ // stdin is a pointer, so it would be a region.
+ const MemRegion *MemReg = Val.getAsRegion();
+
+ // The region should be symbolic, we do not know it's value.
+ const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+ if (!SymReg)
+ return false;
+
+ // Get it's symbol and find the declaration region it's pointing to.
+ const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ if (!Sm)
+ return false;
+ const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+ if (!DeclReg)
+ return false;
+
+ // This region corresponds to a declaration, find out if it's a global/extern
+ // variable named stdin with the proper type.
+ if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ D = D->getCanonicalDecl();
+ if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
+ if (const PointerType * PtrTy =
+ dyn_cast<PointerType>(D->getType().getTypePtr()))
+ if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
+ return true;
+ }
+ return false;
+}
+
+static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+ const CheckerContext &C,
+ unsigned int &ArgNum) {
+ // Find if the function contains a format string argument.
+ // Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
+ // vsnprintf, syslog, custom annotated functions.
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl)
+ return false;
+ for (specific_attr_iterator<FormatAttr>
+ i = FDecl->specific_attr_begin<FormatAttr>(),
+ e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+
+ const FormatAttr *Format = *i;
+ ArgNum = Format->getFormatIdx() - 1;
+ if ((Format->getType() == "printf") && CE->getNumArgs() > ArgNum)
+ return true;
+ }
+
+ // Or if a function is named setproctitle (this is a heuristic).
+ if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+ ArgNum = 0;
+ return true;
+ }
+
+ return false;
+}
+
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
+ const char Msg[],
+ CheckerContext &C) const {
+ assert(E);
+
+ // Check for taint.
+ ProgramStateRef State = C.getState();
+ if (!State->isTainted(getPointedToSymbol(C, E)) &&
+ !State->isTainted(E, C.getLocationContext()))
+ return false;
+
+ // Generate diagnostic.
+ if (ExplodedNode *N = C.addTransition()) {
+ initBugType();
+ BugReport *report = new BugReport(*BT, Msg, N);
+ report->addRange(E->getSourceRange());
+ C.EmitReport(report);
+ return true;
+ }
+ return false;
+}
+
+bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const{
+ // Check if the function contains a format string argument.
+ unsigned int ArgNum = 0;
+ if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+ return false;
+
+ // If either the format string content or the pointer itself are tainted, warn.
+ if (generateReportIfTainted(CE->getArg(ArgNum),
+ MsgUncontrolledFormatString, C))
+ return true;
+ return false;
+}
+
+bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
+ StringRef Name,
+ CheckerContext &C) const {
+ // TODO: It might make sense to run this check on demand. In some cases,
+ // we should check if the environment has been cleansed here. We also might
+ // need to know if the user was reset before these calls(seteuid).
+ unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
+ .Case("system", 0)
+ .Case("popen", 0)
+ .Case("execl", 0)
+ .Case("execle", 0)
+ .Case("execlp", 0)
+ .Case("execv", 0)
+ .Case("execvp", 0)
+ .Case("execvP", 0)
+ .Case("execve", 0)
+ .Case("dlopen", 0)
+ .Default(UINT_MAX);
+
+ if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
+ return false;
+
+ if (generateReportIfTainted(CE->getArg(ArgNum),
+ MsgSanitizeSystemArgs, C))
+ return true;
+
+ return false;
+}
+
+// TODO: Should this check be a part of the CString checker?
+// If yes, should taint be a global setting?
+bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
+ const FunctionDecl *FDecl,
+ CheckerContext &C) const {
+ // If the function has a buffer size argument, set ArgNum.
+ unsigned ArgNum = InvalidArgIndex;
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ ArgNum = 2;
+ break;
+ case Builtin::BIstrndup:
+ ArgNum = 1;
+ break;
+ default:
+ break;
+ };
+
+ if (ArgNum == InvalidArgIndex) {
+ if (C.isCLibraryFunction(FDecl, "malloc") ||
+ C.isCLibraryFunction(FDecl, "calloc") ||
+ C.isCLibraryFunction(FDecl, "alloca"))
+ ArgNum = 0;
+ else if (C.isCLibraryFunction(FDecl, "memccpy"))
+ ArgNum = 3;
+ else if (C.isCLibraryFunction(FDecl, "realloc"))
+ ArgNum = 1;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ ArgNum = 2;
+ }
+
+ if (ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
+ generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C))
+ return true;
+
+ return false;
+}
+
+void ento::registerGenericTaintChecker(CheckerManager &mgr) {
+ mgr.registerChecker<GenericTaintChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
index 5c257e5..c08f163 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -57,6 +57,7 @@
#include "clang/AST/Stmt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/ErrorHandling.h"
@@ -82,16 +83,16 @@ private:
// False positive reduction methods
static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
- static bool isUnused(const Expr *E, AnalysisContext *AC);
+ static bool isUnused(const Expr *E, AnalysisDeclContext *AC);
static bool isTruncationExtensionAssignment(const Expr *LHS,
const Expr *RHS);
- static bool pathWasCompletelyAnalyzed(AnalysisContext *AC,
+ static bool pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
const CFGBlock *CB,
const CoreEngine &CE);
static bool CanVary(const Expr *Ex,
- AnalysisContext *AC);
+ AnalysisDeclContext *AC);
static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
- AnalysisContext *AC);
+ AnalysisDeclContext *AC);
static bool containsNonLocalVarDecl(const Stmt *S);
// Hash table and related data structures
@@ -116,7 +117,7 @@ void IdempotentOperationChecker::checkPreStmt(const BinaryOperator *B,
// been created yet.
BinaryOperatorData &Data = hash[B];
Assumption &A = Data.assumption;
- AnalysisContext *AC = C.getCurrentAnalysisContext();
+ AnalysisDeclContext *AC = C.getCurrentAnalysisDeclContext();
// If we already have visited this node on a path that does not contain an
// idempotent operation, return immediately.
@@ -141,10 +142,10 @@ void IdempotentOperationChecker::checkPreStmt(const BinaryOperator *B,
|| containsNonLocalVarDecl(RHS);
}
- const ProgramState *state = C.getState();
-
- SVal LHSVal = state->getSVal(LHS);
- SVal RHSVal = state->getSVal(RHS);
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LHSVal = state->getSVal(LHS, LCtx);
+ SVal RHSVal = state->getSVal(RHS, LCtx);
// If either value is unknown, we can't be 100% sure of all paths.
if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
@@ -366,8 +367,8 @@ void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
// warning
if (Eng.hasWorkRemaining()) {
// If we can trace back
- AnalysisContext *AC = (*ES.begin())->getLocationContext()
- ->getAnalysisContext();
+ AnalysisDeclContext *AC = (*ES.begin())->getLocationContext()
+ ->getAnalysisDeclContext();
if (!pathWasCompletelyAnalyzed(AC,
AC->getCFGStmtMap()->getBlock(B),
Eng.getCoreEngine()))
@@ -375,7 +376,7 @@ void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
}
// Select the error message and SourceRanges to report.
- llvm::SmallString<128> buf;
+ SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
bool LHSRelevant = false, RHSRelevant = false;
switch (A) {
@@ -487,7 +488,7 @@ bool IdempotentOperationChecker::isSelfAssign(const Expr *LHS, const Expr *RHS)
// Returns true if the Expr points to a VarDecl that is not read anywhere
// outside of self-assignments.
bool IdempotentOperationChecker::isUnused(const Expr *E,
- AnalysisContext *AC) {
+ AnalysisDeclContext *AC) {
if (!E)
return false;
@@ -531,7 +532,7 @@ bool IdempotentOperationChecker::isTruncationExtensionAssignment(
// Returns false if a path to this block was not completely analyzed, or true
// otherwise.
bool
-IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisContext *AC,
+IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
const CFGBlock *CB,
const CoreEngine &CE) {
@@ -615,7 +616,7 @@ IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisContext *AC,
// expression may also involve a variable that behaves like a constant. The
// function returns true if the expression varies, and false otherwise.
bool IdempotentOperationChecker::CanVary(const Expr *Ex,
- AnalysisContext *AC) {
+ AnalysisDeclContext *AC) {
// Parentheses and casts are irrelevant here
Ex = Ex->IgnoreParenCasts();
@@ -649,7 +650,6 @@ bool IdempotentOperationChecker::CanVary(const Expr *Ex,
case Stmt::InitListExprClass:
case Stmt::DesignatedInitExprClass:
case Stmt::BlockExprClass:
- case Stmt::BlockDeclRefExprClass:
return false;
// Cases requiring custom logic
@@ -699,7 +699,7 @@ bool IdempotentOperationChecker::CanVary(const Expr *Ex,
// Returns true if a DeclRefExpr is or behaves like a constant.
bool IdempotentOperationChecker::isConstantOrPseudoConstant(
const DeclRefExpr *DR,
- AnalysisContext *AC) {
+ AnalysisDeclContext *AC) {
// Check if the type of the Decl is const-qualified
if (DR->getType().isConstQualified())
return true;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
new file mode 100644
index 0000000..e35557f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -0,0 +1,22 @@
+//==--- InterCheckerAPI.h ---------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file allows introduction of checker dependencies. It contains APIs for
+// inter-checker communications.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCHECKERAPI_H_
+#define INTERCHECKERAPI_H_
+namespace clang {
+namespace ento {
+
+/// Register the checker which evaluates CString API calls.
+void registerCStringCheckerBasic(CheckerManager &Mgr);
+
+}}
+#endif /* INTERCHECKERAPI_H_ */
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
index fbc57d3..b0bac33 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
@@ -22,7 +22,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
#include "clang/AST/PrettyPrinter.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -117,17 +117,17 @@ public:
CheckerContext &C) const;
private:
- const ProgramState *handleAssign(const ProgramState *state,
+ ProgramStateRef handleAssign(ProgramStateRef state,
const Expr *lexp,
const Expr *rexp,
const LocationContext *LC) const;
- const ProgramState *handleAssign(const ProgramState *state,
+ ProgramStateRef handleAssign(ProgramStateRef state,
const MemRegion *MR,
const Expr *rexp,
const LocationContext *LC) const;
- const ProgramState *invalidateIterators(const ProgramState *state,
+ ProgramStateRef invalidateIterators(ProgramStateRef state,
const MemRegion *MR,
const MemberExpr *ME) const;
@@ -135,7 +135,7 @@ private:
void checkArgs(CheckerContext &C, const CallExpr *CE) const;
- const MemRegion *getRegion(const ProgramState *state,
+ const MemRegion *getRegion(ProgramStateRef state,
const Expr *E,
const LocationContext *LC) const;
@@ -227,7 +227,7 @@ static RefKind getTemplateKind(QualType T) {
// Iterate through our map and invalidate any iterators that were
// initialized fromt the specified instance MemRegion.
-const ProgramState *IteratorsChecker::invalidateIterators(const ProgramState *state,
+ProgramStateRef IteratorsChecker::invalidateIterators(ProgramStateRef state,
const MemRegion *MR, const MemberExpr *ME) const {
IteratorState::EntryMap Map = state->get<IteratorState>();
if (Map.isEmpty())
@@ -246,7 +246,7 @@ const ProgramState *IteratorsChecker::invalidateIterators(const ProgramState *st
}
// Handle assigning to an iterator where we don't have the LValue MemRegion.
-const ProgramState *IteratorsChecker::handleAssign(const ProgramState *state,
+ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
const Expr *lexp, const Expr *rexp, const LocationContext *LC) const {
// Skip the cast if present.
if (const MaterializeTemporaryExpr *M
@@ -254,7 +254,7 @@ const ProgramState *IteratorsChecker::handleAssign(const ProgramState *state,
lexp = M->GetTemporaryExpr();
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(lexp))
lexp = ICE->getSubExpr();
- SVal sv = state->getSVal(lexp);
+ SVal sv = state->getSVal(lexp, LC);
const MemRegion *MR = sv.getAsRegion();
if (!MR)
return state;
@@ -271,7 +271,7 @@ const ProgramState *IteratorsChecker::handleAssign(const ProgramState *state,
}
// handle assigning to an iterator
-const ProgramState *IteratorsChecker::handleAssign(const ProgramState *state,
+ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
const MemRegion *MR, const Expr *rexp, const LocationContext *LC) const {
// Assume unknown until we find something definite.
state = state->set<IteratorState>(MR, RefState::getUnknown());
@@ -376,7 +376,7 @@ const DeclRefExpr *IteratorsChecker::getDeclRefExpr(const Expr *E) const {
}
// Get the MemRegion associated with the expresssion.
-const MemRegion *IteratorsChecker::getRegion(const ProgramState *state,
+const MemRegion *IteratorsChecker::getRegion(ProgramStateRef state,
const Expr *E, const LocationContext *LC) const {
const DeclRefExpr *DRE = getDeclRefExpr(E);
if (!DRE)
@@ -394,9 +394,8 @@ const MemRegion *IteratorsChecker::getRegion(const ProgramState *state,
// use those nodes. We also cannot create multiple nodes at one ProgramPoint
// with the same tag.
void IteratorsChecker::checkExpr(CheckerContext &C, const Expr *E) const {
- const ProgramState *state = C.getState();
- const MemRegion *MR = getRegion(state, E,
- C.getPredecessor()->getLocationContext());
+ ProgramStateRef state = C.getState();
+ const MemRegion *MR = getRegion(state, E, C.getLocationContext());
if (!MR)
return;
@@ -405,7 +404,7 @@ void IteratorsChecker::checkExpr(CheckerContext &C, const Expr *E) const {
if (!RS)
return;
if (RS->isInvalid()) {
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT_Invalid)
// FIXME: We are eluding constness here.
const_cast<IteratorsChecker*>(this)->BT_Invalid = new BuiltinBug("");
@@ -428,7 +427,7 @@ void IteratorsChecker::checkExpr(CheckerContext &C, const Expr *E) const {
}
}
else if (RS->isUndefined()) {
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT_Undefined)
// FIXME: We are eluding constness here.
const_cast<IteratorsChecker*>(this)->BT_Undefined =
@@ -466,8 +465,8 @@ void IteratorsChecker::checkPreStmt(const CallExpr *CE,
void IteratorsChecker::checkPreStmt(const CXXOperatorCallExpr *OCE,
CheckerContext &C) const
{
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
- const ProgramState *state = C.getState();
+ const LocationContext *LC = C.getLocationContext();
+ ProgramStateRef state = C.getState();
OverloadedOperatorKind Kind = OCE->getOperator();
if (Kind == OO_Equal) {
checkExpr(C, OCE->getArg(1));
@@ -497,7 +496,7 @@ void IteratorsChecker::checkPreStmt(const CXXOperatorCallExpr *OCE,
if (!RS1)
return;
if (RS0->getMemRegion() != RS1->getMemRegion()) {
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT_Incompatible)
const_cast<IteratorsChecker*>(this)->BT_Incompatible =
new BuiltinBug(
@@ -524,8 +523,8 @@ void IteratorsChecker::checkPreStmt(const DeclStmt *DS,
return;
// Get the MemRegion associated with the iterator and mark it as Undefined.
- const ProgramState *state = C.getState();
- Loc VarLoc = state->getLValue(VD, C.getPredecessor()->getLocationContext());
+ ProgramStateRef state = C.getState();
+ Loc VarLoc = state->getLValue(VD, C.getLocationContext());
const MemRegion *MR = VarLoc.getAsRegion();
if (!MR)
return;
@@ -545,8 +544,7 @@ void IteratorsChecker::checkPreStmt(const DeclStmt *DS,
E = M->GetTemporaryExpr();
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
InitEx = ICE->getSubExpr();
- state = handleAssign(state, MR, InitEx,
- C.getPredecessor()->getLocationContext());
+ state = handleAssign(state, MR, InitEx, C.getLocationContext());
}
}
}
@@ -576,14 +574,14 @@ void IteratorsChecker::checkPreStmt(const CXXMemberCallExpr *MCE,
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ME->getBase());
if (!DRE || getTemplateKind(DRE->getType()) != VectorKind)
return;
- SVal tsv = C.getState()->getSVal(DRE);
+ SVal tsv = C.getState()->getSVal(DRE, C.getLocationContext());
// Get the MemRegion associated with the container instance.
const MemRegion *MR = tsv.getAsRegion();
if (!MR)
return;
// If we are calling a function that invalidates iterators, mark them
// appropriately by finding matching instances.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
StringRef mName = ME->getMemberDecl()->getName();
if (llvm::StringSwitch<bool>(mName)
.Cases("insert", "reserve", "push_back", true)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index e398fcb..757a4ce 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -17,8 +17,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
-#include <string>
-#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -116,8 +115,10 @@ static bool IsSmallVector(QualType T) {
namespace {
class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
BugReporter &BR;
+ const Decl *DeclWithIssue;
public:
- StringRefCheckerVisitor(BugReporter &br) : BR(br) {}
+ StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br)
+ : BR(br), DeclWithIssue(declWithIssue) {}
void VisitChildren(Stmt *S) {
for (Stmt::child_iterator I = S->child_begin(), E = S->child_end() ;
I != E; ++I)
@@ -132,7 +133,7 @@ private:
} // end anonymous namespace
static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR) {
- StringRefCheckerVisitor walker(BR);
+ StringRefCheckerVisitor walker(D, BR);
walker.Visit(D->getBody());
}
@@ -177,7 +178,7 @@ void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
"std::string that it outlives";
PathDiagnosticLocation VDLoc =
PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
- BR.EmitBasicReport(desc, "LLVM Conventions", desc,
+ BR.EmitBasicReport(DeclWithIssue, desc, "LLVM Conventions", desc,
VDLoc, Init->getSourceRange());
}
@@ -253,7 +254,7 @@ void ASTFieldVisitor::Visit(FieldDecl *D) {
}
void ASTFieldVisitor::ReportError(QualType T) {
- llvm::SmallString<1024> buf;
+ SmallString<1024> buf;
llvm::raw_svector_ostream os(buf);
os << "AST class '" << Root->getName() << "' has a field '"
@@ -282,7 +283,7 @@ void ASTFieldVisitor::ReportError(QualType T) {
// the class may be in the header file, for example).
PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
FieldChain.front(), BR.getSourceManager());
- BR.EmitBasicReport("AST node allocates heap memory", "LLVM Conventions",
+ BR.EmitBasicReport(Root, "AST node allocates heap memory", "LLVM Conventions",
os.str(), L);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 2607db8..cb976e0 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -29,7 +30,7 @@ class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::EndPath,
check::DeadSymbols> {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
/// AllocationState is a part of the checker specific state together with the
@@ -58,7 +59,7 @@ public:
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
- void checkEndPath(EndOfFunctionNodeBuilder &B, ExprEngine &Eng) const;
+ void checkEndPath(CheckerContext &C) const;
private:
typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
@@ -100,26 +101,38 @@ private:
const Expr *ArgExpr,
CheckerContext &C) const;
+ /// Find the allocation site for Sym on the path leading to the node N.
+ const Stmt *getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
BugReport *generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
- ExplodedNode *N) const;
+ ExplodedNode *N,
+ CheckerContext &C) const;
/// Check if RetSym evaluates to an error value in the current state.
bool definitelyReturnedError(SymbolRef RetSym,
- const ProgramState *State,
+ ProgramStateRef State,
SValBuilder &Builder,
bool noError = false) const;
/// Check if RetSym evaluates to a NoErr value in the current state.
bool definitelyDidnotReturnError(SymbolRef RetSym,
- const ProgramState *State,
+ ProgramStateRef State,
SValBuilder &Builder) const {
return definitelyReturnedError(RetSym, State, Builder, true);
}
+
+ /// Mark an AllocationPair interesting for diagnostic reporting.
+ void markInteresting(BugReport *R, const AllocationPair &AP) const {
+ R->markInteresting(AP.first);
+ R->markInteresting(AP.second->Region);
+ }
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
- class SecKeychainBugVisitor : public BugReporterVisitor {
+ class SecKeychainBugVisitor
+ : public BugReporterVisitorImpl<SecKeychainBugVisitor> {
protected:
// The allocated region symbol tracked by the main analysis.
SymbolRef Sym;
@@ -196,18 +209,9 @@ unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
return InvalidIdx;
}
-static SymbolRef getSymbolForRegion(CheckerContext &C,
- const MemRegion *R) {
- // Implicit casts (ex: void* -> char*) can turn Symbolic region into element
- // region, if that is the case, get the underlining region.
- R = R->StripCasts();
- if (!isa<SymbolicRegion>(R)) {
- return 0;
- }
- return cast<SymbolicRegion>(R)->getSymbol();
-}
-
static bool isBadDeallocationArgument(const MemRegion *Arg) {
+ if (!Arg)
+ return false;
if (isa<AllocaRegion>(Arg) ||
isa<BlockDataRegion>(Arg) ||
isa<TypedRegion>(Arg)) {
@@ -215,18 +219,19 @@ static bool isBadDeallocationArgument(const MemRegion *Arg) {
}
return false;
}
+
/// Given the address expression, retrieve the value it's pointing to. Assume
/// that value is itself an address, and return the corresponding symbol.
static SymbolRef getAsPointeeSymbol(const Expr *Expr,
CheckerContext &C) {
- const ProgramState *State = C.getState();
- SVal ArgV = State->getSVal(Expr);
+ ProgramStateRef State = C.getState();
+ SVal ArgV = State->getSVal(Expr, C.getLocationContext());
if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&ArgV)) {
StoreManager& SM = C.getStoreManager();
- const MemRegion *V = SM.Retrieve(State->getStore(), *X).getAsRegion();
- if (V)
- return getSymbolForRegion(C, V);
+ SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
+ if (sym)
+ return sym;
}
return 0;
}
@@ -238,14 +243,14 @@ static SymbolRef getAsPointeeSymbol(const Expr *Expr,
// If noError, returns true iff (1).
// If !noError, returns true iff (2).
bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
- const ProgramState *State,
+ ProgramStateRef State,
SValBuilder &Builder,
bool noError) const {
DefinedOrUnknownSVal NoErrVal = Builder.makeIntVal(NoErr,
Builder.getSymbolManager().getType(RetSym));
DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
nonloc::SymbolVal(RetSym));
- const ProgramState *ErrState = State->assume(NoErr, noError);
+ ProgramStateRef ErrState = State->assume(NoErr, noError);
if (ErrState == State) {
return true;
}
@@ -259,14 +264,14 @@ void MacOSKeychainAPIChecker::
generateDeallocatorMismatchReport(const AllocationPair &AP,
const Expr *ArgExpr,
CheckerContext &C) const {
- const ProgramState *State = C.getState();
+ ProgramStateRef State = C.getState();
State = State->remove<AllocatedData>(AP.first);
- ExplodedNode *N = C.generateNode(State);
+ ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
- llvm::SmallString<80> sbuf;
+ SmallString<80> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int PDeallocIdx =
FunctionsToTrack[AP.second->AllocatorIdx].DeallocatorIdx;
@@ -276,23 +281,18 @@ void MacOSKeychainAPIChecker::
BugReport *Report = new BugReport(*BT, os.str(), N);
Report->addVisitor(new SecKeychainBugVisitor(AP.first));
Report->addRange(ArgExpr->getSourceRange());
+ markInteresting(Report, AP);
C.EmitReport(Report);
}
void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *State = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = State->getSVal(Callee);
unsigned idx = InvalidIdx;
+ ProgramStateRef State = C.getState();
- const FunctionDecl *funDecl = L.getAsFunctionDecl();
- if (!funDecl)
- return;
- IdentifierInfo *funI = funDecl->getIdentifier();
- if (!funI)
+ StringRef funName = C.getCalleeName(CE);
+ if (funName.empty())
return;
- StringRef funName = funI->getName();
// If it is a call to an allocator function, it could be a double allocation.
idx = getTrackedFunctionIndex(funName, true);
@@ -304,11 +304,11 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
// Remove the value from the state. The new symbol will be added for
// tracking when the second allocator is processed in checkPostStmt().
State = State->remove<AllocatedData>(V);
- ExplodedNode *N = C.generateNode(State);
+ ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
- llvm::SmallString<128> sbuf;
+ SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
os << "Allocated data should be released before another call to "
@@ -318,6 +318,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
BugReport *Report = new BugReport(*BT, os.str(), N);
Report->addVisitor(new SecKeychainBugVisitor(V));
Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
C.EmitReport(Report);
}
}
@@ -331,22 +332,22 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
// Check the argument to the deallocator.
const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
- SVal ArgSVal = State->getSVal(ArgExpr);
+ SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
// Undef is reported by another checker.
if (ArgSVal.isUndef())
return;
- const MemRegion *Arg = ArgSVal.getAsRegion();
- if (!Arg)
- return;
+ SymbolRef ArgSM = ArgSVal.getAsLocSymbol();
- SymbolRef ArgSM = getSymbolForRegion(C, Arg);
- bool RegionArgIsBad = ArgSM ? false : isBadDeallocationArgument(Arg);
// If the argument is coming from the heap, globals, or unknown, do not
// report it.
- if (!ArgSM && !RegionArgIsBad)
- return;
+ bool RegionArgIsBad = false;
+ if (!ArgSM) {
+ if (!isBadDeallocationArgument(ArgSVal.getAsRegion()))
+ return;
+ RegionArgIsBad = true;
+ }
// Is the argument to the call being tracked?
const AllocationState *AS = State->get<AllocatedData>(ArgSM);
@@ -362,13 +363,15 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
if (isEnclosingFunctionParam(ArgExpr))
return;
- ExplodedNode *N = C.generateNode(State);
+ ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
BugReport *Report = new BugReport(*BT,
"Trying to free data which has not been allocated.", N);
Report->addRange(ArgExpr->getSourceRange());
+ if (AS)
+ Report->markInteresting(AS->Region);
C.EmitReport(Report);
return;
}
@@ -420,16 +423,19 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
return;
}
- // If the return status is undefined or is error, report a bad call to free.
- if (!definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
- ExplodedNode *N = C.generateNode(State);
+ // If the buffer can be null and the return status can be an error,
+ // report a bad call to free.
+ if (State->assume(cast<DefinedSVal>(ArgSVal), false) &&
+ !definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
+ ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
BugReport *Report = new BugReport(*BT,
- "Call to free data when error was returned during allocation.", N);
+ "Only call free if a valid (non-NULL) buffer was returned.", N);
Report->addVisitor(new SecKeychainBugVisitor(ArgSM));
Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
C.EmitReport(Report);
return;
}
@@ -439,17 +445,8 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *State = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = State->getSVal(Callee);
-
- const FunctionDecl *funDecl = L.getAsFunctionDecl();
- if (!funDecl)
- return;
- IdentifierInfo *funI = funDecl->getIdentifier();
- if (!funI)
- return;
- StringRef funName = funI->getName();
+ ProgramStateRef State = C.getState();
+ StringRef funName = C.getCalleeName(CE);
// If a value has been allocated, add it to the set for tracking.
unsigned idx = getTrackedFunctionIndex(funName, true);
@@ -459,7 +456,8 @@ void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
// If the argument entered as an enclosing function parameter, skip it to
// avoid false positives.
- if (isEnclosingFunctionParam(ArgExpr))
+ if (isEnclosingFunctionParam(ArgExpr) &&
+ C.getLocationContext()->getParent() == 0)
return;
if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) {
@@ -475,7 +473,8 @@ void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
// allocated value symbol, since our diagnostics depend on the value
// returned by the call. Ex: Data should only be freed if noErr was
// returned during allocation.)
- SymbolRef RetStatusSymbol = State->getSVal(CE).getAsSymbol();
+ SymbolRef RetStatusSymbol =
+ State->getSVal(CE, C.getLocationContext()).getAsSymbol();
C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
// Track the allocated value in the checker state.
@@ -492,36 +491,76 @@ void MacOSKeychainAPIChecker::checkPreStmt(const ReturnStmt *S,
if (!retExpr)
return;
+ // If inside inlined call, skip it.
+ const LocationContext *LC = C.getLocationContext();
+ if (LC->getParent() != 0)
+ return;
+
// Check if the value is escaping through the return.
- const ProgramState *state = C.getState();
- const MemRegion *V = state->getSVal(retExpr).getAsRegion();
- if (!V)
+ ProgramStateRef state = C.getState();
+ SymbolRef sym = state->getSVal(retExpr, LC).getAsLocSymbol();
+ if (!sym)
return;
- state = state->remove<AllocatedData>(getSymbolForRegion(C, V));
+ state = state->remove<AllocatedData>(sym);
// Proceed from the new state.
C.addTransition(state);
}
+// TODO: This logic is the same as in Malloc checker.
+const Stmt *
+MacOSKeychainAPIChecker::getAllocationSite(const ExplodedNode *N,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+
+ while (N) {
+ if (!N->getState()->get<AllocatedData>(Sym))
+ break;
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ AllocNode = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ ProgramPoint P = AllocNode->getLocation();
+ if (!isa<StmtPoint>(P))
+ return 0;
+ return cast<clang::PostStmt>(P).getStmt();
+}
+
BugReport *MacOSKeychainAPIChecker::
generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
- ExplodedNode *N) const {
+ ExplodedNode *N,
+ CheckerContext &C) const {
const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
initBugType();
- llvm::SmallString<70> sbuf;
+ SmallString<70> sbuf;
llvm::raw_svector_ostream os(sbuf);
-
os << "Allocated data is not released: missing a call to '"
<< FunctionsToTrack[FI.DeallocatorIdx].Name << "'.";
- BugReport *Report = new BugReport(*BT, os.str(), N);
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ if (const Stmt *AllocStmt = getAllocationSite(N, AP.first, C))
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+ C.getSourceManager(), N->getLocationContext());
+
+ BugReport *Report = new BugReport(*BT, os.str(), N, LocUsedForUniqueing);
Report->addVisitor(new SecKeychainBugVisitor(AP.first));
- Report->addRange(SourceRange());
+ markInteresting(Report, AP);
return Report;
}
void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
- const ProgramState *State = C.getState();
+ ProgramStateRef State = C.getState();
AllocatedSetTy ASet = State->get<AllocatedData>();
if (ASet.isEmpty())
return;
@@ -541,25 +580,33 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
continue;
Errors.push_back(std::make_pair(I->first, &I->second));
}
- if (!Changed)
+ if (!Changed) {
+ // Generate the new, cleaned up state.
+ C.addTransition(State);
return;
+ }
- // Generate the new, cleaned up state.
- ExplodedNode *N = C.generateNode(State);
- if (!N)
- return;
+ static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : DeadSymbolsLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
// Generate the error reports.
for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
I != E; ++I) {
- C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N));
+ C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
}
+
+ // Generate the new, cleaned up state.
+ C.addTransition(State, N);
}
// TODO: Remove this after we ensure that checkDeadSymbols are always called.
-void MacOSKeychainAPIChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
- ExprEngine &Eng) const {
- const ProgramState *state = B.getState();
+void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // If inside inlined call, skip it.
+ if (C.getLocationContext()->getParent() != 0)
+ return;
+
AllocatedSetTy AS = state->get<AllocatedData>();
if (AS.isEmpty())
return;
@@ -575,26 +622,28 @@ void MacOSKeychainAPIChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
// allocation, do not report.
if (state->getSymVal(I.getKey()) ||
definitelyReturnedError(I->second.Region, state,
- Eng.getSValBuilder())) {
+ C.getSValBuilder())) {
continue;
}
Errors.push_back(std::make_pair(I->first, &I->second));
}
// If no change, do not generate a new state.
- if (!Changed)
+ if (!Changed) {
+ C.addTransition(state);
return;
+ }
- ExplodedNode *N = B.generateNode(state);
- if (!N)
- return;
+ static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : EndPathLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
// Generate the error reports.
for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
I != E; ++I) {
- Eng.getBugReporter().EmitReport(
- generateAllocatedDataNotReleasedReport(*I, N));
+ C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
}
+
+ C.addTransition(state, N);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 88d492e..cfdb55d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -31,17 +31,17 @@ using namespace ento;
namespace {
class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable llvm::OwningPtr<BugType> BT_dispatchOnce;
+ mutable OwningPtr<BugType> BT_dispatchOnce;
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
- const IdentifierInfo *FI) const;
+ StringRef FName) const;
typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
const CallExpr *,
- const IdentifierInfo *) const;
+ StringRef FName) const;
};
} //end anonymous namespace
@@ -50,14 +50,15 @@ public:
//===----------------------------------------------------------------------===//
void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
- const IdentifierInfo *FI) const {
+ StringRef FName) const {
if (CE->getNumArgs() < 1)
return;
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
- const ProgramState *state = C.getState();
- const MemRegion *R = state->getSVal(CE->getArg(0)).getAsRegion();
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
@@ -69,9 +70,9 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
"Mac OS X API"));
- llvm::SmallString<256> S;
+ SmallString<256> S;
llvm::raw_svector_ostream os(S);
- os << "Call to '" << FI->getName() << "' uses";
+ os << "Call to '" << FName << "' uses";
if (const VarRegion *VR = dyn_cast<VarRegion>(R))
os << " the local variable '" << VR->getDecl()->getName() << '\'';
else
@@ -92,27 +93,18 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- // FIXME: This sort of logic is common to several checkers, including
- // UnixAPIChecker, PthreadLockChecker, and CStringChecker. Should refactor.
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- const FunctionDecl *Fn = state->getSVal(Callee).getAsFunctionDecl();
-
- if (!Fn)
- return;
-
- const IdentifierInfo *FI = Fn->getIdentifier();
- if (!FI)
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty())
return;
SubChecker SC =
- llvm::StringSwitch<SubChecker>(FI->getName())
+ llvm::StringSwitch<SubChecker>(Name)
.Cases("dispatch_once", "dispatch_once_f",
&MacOSXAPIChecker::CheckDispatchOnce)
.Default(NULL);
if (SC)
- (this->*SC)(C, CE, FI);
+ (this->*SC)(C, CE, Name);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 5631802..8bce88a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -13,14 +13,21 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include <climits>
+
using namespace clang;
using namespace ento;
@@ -35,10 +42,9 @@ public:
RefState(Kind k, const Stmt *s) : K(k), S(s) {}
bool isAllocated() const { return K == AllocateUnchecked; }
- //bool isFailed() const { return K == AllocateFailed; }
bool isReleased() const { return K == Released; }
- //bool isEscaped() const { return K == Escaped; }
- //bool isRelinquished() const { return K == Relinquished; }
+
+ const Stmt *getStmt() const { return S; }
bool operator==(const RefState &X) const {
return K == X.K && S == X.S;
@@ -62,61 +68,222 @@ public:
}
};
-class RegionState {};
+struct ReallocPair {
+ SymbolRef ReallocatedSym;
+ bool IsFreeOnFailure;
+ ReallocPair(SymbolRef S, bool F) : ReallocatedSym(S), IsFreeOnFailure(F) {}
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(IsFreeOnFailure);
+ ID.AddPointer(ReallocatedSym);
+ }
+ bool operator==(const ReallocPair &X) const {
+ return ReallocatedSym == X.ReallocatedSym &&
+ IsFreeOnFailure == X.IsFreeOnFailure;
+ }
+};
-class MallocChecker : public Checker<eval::Call, check::DeadSymbols, check::EndPath, check::PreStmt<ReturnStmt>, check::Location,
- check::Bind, eval::Assume> {
- mutable llvm::OwningPtr<BuiltinBug> BT_DoubleFree;
- mutable llvm::OwningPtr<BuiltinBug> BT_Leak;
- mutable llvm::OwningPtr<BuiltinBug> BT_UseFree;
- mutable llvm::OwningPtr<BuiltinBug> BT_UseRelinquished;
- mutable llvm::OwningPtr<BuiltinBug> BT_BadFree;
- mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc;
+typedef std::pair<const Stmt*, const MemRegion*> LeakInfo;
+
+class MallocChecker : public Checker<check::DeadSymbols,
+ check::EndPath,
+ check::PreStmt<ReturnStmt>,
+ check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr>,
+ check::PostStmt<BlockExpr>,
+ check::Location,
+ check::Bind,
+ eval::Assume,
+ check::RegionChanges>
+{
+ mutable OwningPtr<BugType> BT_DoubleFree;
+ mutable OwningPtr<BugType> BT_Leak;
+ mutable OwningPtr<BugType> BT_UseFree;
+ mutable OwningPtr<BugType> BT_BadFree;
+ mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
+ *II_valloc, *II_reallocf, *II_strndup, *II_strdup;
public:
- MallocChecker() : II_malloc(0), II_free(0), II_realloc(0), II_calloc(0) {}
+ MallocChecker() : II_malloc(0), II_free(0), II_realloc(0), II_calloc(0),
+ II_valloc(0), II_reallocf(0), II_strndup(0), II_strdup(0) {}
+
+ /// In pessimistic mode, the checker assumes that it does not know which
+ /// functions might free the memory.
+ struct ChecksFilter {
+ DefaultBool CMallocPessimistic;
+ DefaultBool CMallocOptimistic;
+ };
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ ChecksFilter Filter;
+
+ void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void checkEndPath(EndOfFunctionNodeBuilder &B, ExprEngine &Eng) const;
+ void checkEndPath(CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
- const ProgramState *evalAssume(const ProgramState *state, SVal Cond,
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const;
void checkBind(SVal location, SVal val, const Stmt*S,
CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+ return true;
+ }
private:
- static void MallocMem(CheckerContext &C, const CallExpr *CE);
- static void MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att);
- static const ProgramState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ void initIdentifierInfo(ASTContext &C) const;
+
+ /// Check if this is one of the functions which can allocate/reallocate memory
+ /// pointed to by one of its arguments.
+ bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
+
+ static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att);
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
const Expr *SizeEx, SVal Init,
- const ProgramState *state) {
- return MallocMemAux(C, CE, state->getSVal(SizeEx), Init, state);
+ ProgramStateRef state) {
+ return MallocMemAux(C, CE,
+ state->getSVal(SizeEx, C.getLocationContext()),
+ Init, state);
}
- static const ProgramState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
+
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
SVal SizeEx, SVal Init,
- const ProgramState *state);
+ ProgramStateRef state);
+
+ /// Update the RefState to reflect the new memory allocation.
+ static ProgramStateRef MallocUpdateRefState(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state);
+
+ ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
+ const OwnershipAttr* Att) const;
+ ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state, unsigned Num,
+ bool Hold) const;
+
+ ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
+ bool FreesMemOnFailure) const;
+ static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE);
+
+ bool checkEscape(SymbolRef Sym, const Stmt *S, CheckerContext &C) const;
+ bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S = 0) const;
- void FreeMem(CheckerContext &C, const CallExpr *CE) const;
- void FreeMemAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att) const;
- const ProgramState *FreeMemAux(CheckerContext &C, const CallExpr *CE,
- const ProgramState *state, unsigned Num, bool Hold) const;
+ /// Check if the function is not known to us. So, for example, we could
+ /// conservatively assume it can free/reallocate it's pointer arguments.
+ bool doesNotFreeMemory(const CallOrObjCMessage *Call,
+ ProgramStateRef State) const;
- void ReallocMem(CheckerContext &C, const CallExpr *CE) const;
- static void CallocMem(CheckerContext &C, const CallExpr *CE);
-
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range) const;
+
+ /// Find the location of the allocation for Sym on the path leading to the
+ /// exploded node N.
+ LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+ /// The bug visitor which allows us to print extra diagnostics along the
+ /// BugReport path. For example, showing the allocation site of the leaked
+ /// region.
+ class MallocBugVisitor : public BugReporterVisitorImpl<MallocBugVisitor> {
+ protected:
+ enum NotificationMode {
+ Normal,
+ ReallocationFailed
+ };
+
+ // The allocated region symbol tracked by the main analysis.
+ SymbolRef Sym;
+
+ // The mode we are in, i.e. what kind of diagnostics will be emitted.
+ NotificationMode Mode;
+
+ // A symbol from when the primary region should have been reallocated.
+ SymbolRef FailedReallocSymbol;
+
+ public:
+ MallocBugVisitor(SymbolRef S)
+ : Sym(S), Mode(Normal), FailedReallocSymbol(0) {}
+
+ virtual ~MallocBugVisitor() {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ inline bool isAllocated(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> allocated. Other state (released) -> allocated.
+ return (Stmt && isa<CallExpr>(Stmt) &&
+ (S && S->isAllocated()) && (!SPrev || !SPrev->isAllocated()));
+ }
+
+ inline bool isReleased(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> released. Other state (allocated) -> released.
+ return (Stmt && isa<CallExpr>(Stmt) &&
+ (S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
+ }
+
+ inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // If the expression is not a call, and the state change is
+ // released -> allocated, it must be the realloc return value
+ // check. If we have to handle more cases here, it might be cleaner just
+ // to track this extra bit in the state itself.
+ return ((!Stmt || !isa<CallExpr>(Stmt)) &&
+ (S && S->isAllocated()) && (SPrev && !SPrev->isAllocated()));
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+ private:
+ class StackHintGeneratorForReallocationFailed
+ : public StackHintGeneratorForSymbol {
+ public:
+ StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
+ : StackHintGeneratorForSymbol(S, M) {}
+
+ virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Reallocation of ";
+ // Printed parameters start at 1, not 0.
+ printOrdinal(++ArgIndex, os);
+ os << " parameter failed";
+
+ return os.str();
+ }
+
+ virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+ return "Reallocation of returned value failed";
+ }
+ };
+ };
};
} // end anonymous namespace
typedef llvm::ImmutableMap<SymbolRef, RefState> RegionStateTy;
-
+typedef llvm::ImmutableMap<SymbolRef, ReallocPair > ReallocMap;
+class RegionState {};
+class ReallocPairs {};
namespace clang {
namespace ento {
template <>
@@ -124,178 +291,230 @@ namespace ento {
: public ProgramStatePartialTrait<RegionStateTy> {
static void *GDMIndex() { static int x; return &x; }
};
+
+ template <>
+ struct ProgramStateTrait<ReallocPairs>
+ : public ProgramStatePartialTrait<ReallocMap> {
+ static void *GDMIndex() { static int x; return &x; }
+ };
}
}
-bool MallocChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
- const FunctionDecl *FD = L.getAsFunctionDecl();
- if (!FD)
- return false;
+ bool VisitSymbol(SymbolRef sym) {
+ state = state->remove<RegionState>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
- ASTContext &Ctx = C.getASTContext();
+void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
if (!II_malloc)
II_malloc = &Ctx.Idents.get("malloc");
if (!II_free)
II_free = &Ctx.Idents.get("free");
if (!II_realloc)
II_realloc = &Ctx.Idents.get("realloc");
+ if (!II_reallocf)
+ II_reallocf = &Ctx.Idents.get("reallocf");
if (!II_calloc)
II_calloc = &Ctx.Idents.get("calloc");
+ if (!II_valloc)
+ II_valloc = &Ctx.Idents.get("valloc");
+ if (!II_strdup)
+ II_strdup = &Ctx.Idents.get("strdup");
+ if (!II_strndup)
+ II_strndup = &Ctx.Idents.get("strndup");
+}
- if (FD->getIdentifier() == II_malloc) {
- MallocMem(C, CE);
- return true;
- }
+bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
+ if (!FD)
+ return false;
+ IdentifierInfo *FunI = FD->getIdentifier();
+ if (!FunI)
+ return false;
- if (FD->getIdentifier() == II_free) {
- FreeMem(C, CE);
- return true;
- }
+ initIdentifierInfo(C);
- if (FD->getIdentifier() == II_realloc) {
- ReallocMem(C, CE);
+ if (FunI == II_malloc || FunI == II_free || FunI == II_realloc ||
+ FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
+ FunI == II_strdup || FunI == II_strndup)
return true;
- }
- if (FD->getIdentifier() == II_calloc) {
- CallocMem(C, CE);
+ if (Filter.CMallocOptimistic && FD->hasAttrs() &&
+ FD->specific_attr_begin<OwnershipAttr>() !=
+ FD->specific_attr_end<OwnershipAttr>())
return true;
- }
- // Check all the attributes, if there are any.
- // There can be multiple of these attributes.
- bool rv = false;
- if (FD->hasAttrs()) {
- for (specific_attr_iterator<OwnershipAttr>
- i = FD->specific_attr_begin<OwnershipAttr>(),
- e = FD->specific_attr_end<OwnershipAttr>();
- i != e; ++i) {
- switch ((*i)->getOwnKind()) {
- case OwnershipAttr::Returns: {
- MallocMemReturnsAttr(C, CE, *i);
- rv = true;
- break;
- }
- case OwnershipAttr::Takes:
- case OwnershipAttr::Holds: {
- FreeMemAttr(C, CE, *i);
- rv = true;
- break;
- }
- default:
- break;
- }
- }
- }
- return rv;
+
+ return false;
}
-void MallocChecker::MallocMem(CheckerContext &C, const CallExpr *CE) {
- const ProgramState *state = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(),
- C.getState());
- C.addTransition(state);
+void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ initIdentifierInfo(C.getASTContext());
+ IdentifierInfo *FunI = FD->getIdentifier();
+ if (!FunI)
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (FunI == II_malloc || FunI == II_valloc) {
+ if (CE->getNumArgs() < 1)
+ return;
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ } else if (FunI == II_realloc) {
+ State = ReallocMem(C, CE, false);
+ } else if (FunI == II_reallocf) {
+ State = ReallocMem(C, CE, true);
+ } else if (FunI == II_calloc) {
+ State = CallocMem(C, CE);
+ } else if (FunI == II_free) {
+ State = FreeMemAux(C, CE, C.getState(), 0, false);
+ } else if (FunI == II_strdup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (FunI == II_strndup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (Filter.CMallocOptimistic) {
+ // Check all the attributes, if there are any.
+ // There can be multiple of these attributes.
+ if (FD->hasAttrs())
+ for (specific_attr_iterator<OwnershipAttr>
+ i = FD->specific_attr_begin<OwnershipAttr>(),
+ e = FD->specific_attr_end<OwnershipAttr>();
+ i != e; ++i) {
+ switch ((*i)->getOwnKind()) {
+ case OwnershipAttr::Returns:
+ State = MallocMemReturnsAttr(C, CE, *i);
+ break;
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds:
+ State = FreeMemAttr(C, CE, *i);
+ break;
+ }
+ }
+ }
+ C.addTransition(State);
}
-void MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att) {
+ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att) {
if (Att->getModule() != "malloc")
- return;
+ return 0;
OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
if (I != E) {
- const ProgramState *state =
- MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), C.getState());
- C.addTransition(state);
- return;
+ return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), C.getState());
}
- const ProgramState *state = MallocMemAux(C, CE, UnknownVal(), UndefinedVal(),
- C.getState());
- C.addTransition(state);
+ return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), C.getState());
}
-const ProgramState *MallocChecker::MallocMemAux(CheckerContext &C,
+ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
SVal Size, SVal Init,
- const ProgramState *state) {
- unsigned Count = C.getCurrentBlockCount();
- SValBuilder &svalBuilder = C.getSValBuilder();
+ ProgramStateRef state) {
+ // Get the return value.
+ SVal retVal = state->getSVal(CE, C.getLocationContext());
- // Set the return value.
- SVal retVal = svalBuilder.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
- state = state->BindExpr(CE, retVal);
+ // We expect the malloc functions to return a pointer.
+ if (!isa<Loc>(retVal))
+ return 0;
// Fill the region with the initialization value.
state = state->bindDefault(retVal, Init);
// Set the region's extent equal to the Size parameter.
- const SymbolicRegion *R = cast<SymbolicRegion>(retVal.getAsRegion());
- DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
- DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
- DefinedOrUnknownSVal extentMatchesSize =
- svalBuilder.evalEQ(state, Extent, DefinedSize);
-
- state = state->assume(extentMatchesSize, true);
- assert(state);
+ const SymbolicRegion *R =
+ dyn_cast_or_null<SymbolicRegion>(retVal.getAsRegion());
+ if (!R)
+ return 0;
+ if (isa<DefinedOrUnknownSVal>(Size)) {
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
+ DefinedOrUnknownSVal extentMatchesSize =
+ svalBuilder.evalEQ(state, Extent, DefinedSize);
+
+ state = state->assume(extentMatchesSize, true);
+ assert(state);
+ }
+ return MallocUpdateRefState(C, CE, state);
+}
+
+ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state) {
+ // Get the return value.
+ SVal retVal = state->getSVal(CE, C.getLocationContext());
+
+ // We expect the malloc functions to return a pointer.
+ if (!isa<Loc>(retVal))
+ return 0;
+
SymbolRef Sym = retVal.getAsLocSymbol();
assert(Sym);
// Set the symbol's state to Allocated.
return state->set<RegionState>(Sym, RefState::getAllocateUnchecked(CE));
-}
-void MallocChecker::FreeMem(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = FreeMemAux(C, CE, C.getState(), 0, false);
-
- if (state)
- C.addTransition(state);
}
-void MallocChecker::FreeMemAttr(CheckerContext &C, const CallExpr *CE,
- const OwnershipAttr* Att) const {
+ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att) const {
if (Att->getModule() != "malloc")
- return;
+ return 0;
+
+ ProgramStateRef State = C.getState();
for (OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
I != E; ++I) {
- const ProgramState *state = FreeMemAux(C, CE, C.getState(), *I,
- Att->getOwnKind() == OwnershipAttr::Holds);
- if (state)
- C.addTransition(state);
+ ProgramStateRef StateI = FreeMemAux(C, CE, State, *I,
+ Att->getOwnKind() == OwnershipAttr::Holds);
+ if (StateI)
+ State = StateI;
}
+ return State;
}
-const ProgramState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
- const ProgramState *state, unsigned Num,
- bool Hold) const {
- const Expr *ArgExpr = CE->getArg(Num);
- SVal ArgVal = state->getSVal(ArgExpr);
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state,
+ unsigned Num,
+ bool Hold) const {
+ if (CE->getNumArgs() < (Num + 1))
+ return 0;
+ const Expr *ArgExpr = CE->getArg(Num);
+ SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+ if (!isa<DefinedOrUnknownSVal>(ArgVal))
+ return 0;
DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(ArgVal);
// Check for null dereferences.
if (!isa<Loc>(location))
- return state;
-
- // FIXME: Technically using 'Assume' here can result in a path
- // bifurcation. In such cases we need to return two states, not just one.
- const ProgramState *notNullState, *nullState;
- llvm::tie(notNullState, nullState) = state->assume(location);
+ return 0;
// The explicit NULL case, no operation is performed.
+ ProgramStateRef notNullState, nullState;
+ llvm::tie(notNullState, nullState) = state->assume(location);
if (nullState && !notNullState)
- return nullState;
-
- assert(notNullState);
+ return 0;
// Unknown values could easily be okay
// Undefined values are handled elsewhere
if (ArgVal.isUnknownOrUndef())
- return notNullState;
+ return 0;
const MemRegion *R = ArgVal.getAsRegion();
@@ -303,7 +522,7 @@ const ProgramState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
- return NULL;
+ return 0;
}
R = R->StripCasts();
@@ -311,7 +530,7 @@ const ProgramState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
- return NULL;
+ return 0;
}
const MemSpaceRegion *MS = R->getMemorySpace();
@@ -327,14 +546,14 @@ const ProgramState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr
// False negatives are better than false positives.
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
- return NULL;
+ return 0;
}
const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
// Various cases could lead to non-symbol values here.
// For now, ignore them.
if (!SR)
- return notNullState;
+ return 0;
SymbolRef Sym = SR->getSymbol();
const RefState *RS = state->get<RegionState>(Sym);
@@ -343,27 +562,28 @@ const ProgramState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr
// called on a pointer that does not get its pointee directly from malloc().
// Full support of this requires inter-procedural analysis.
if (!RS)
- return notNullState;
+ return 0;
// Check double free.
if (RS->isReleased()) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleFree)
BT_DoubleFree.reset(
- new BuiltinBug("Double free",
- "Try to free a memory block that has been released"));
- // FIXME: should find where it's freed last time.
+ new BugType("Double free", "Memory Error"));
BugReport *R = new BugReport(*BT_DoubleFree,
- BT_DoubleFree->getDescription(), N);
+ "Attempt to free released memory", N);
+ R->addRange(ArgExpr->getSourceRange());
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
C.EmitReport(R);
}
- return NULL;
+ return 0;
}
// Normal free.
if (Hold)
- return notNullState->set<RegionState>(Sym, RefState::getRelinquished(CE));
- return notNullState->set<RegionState>(Sym, RefState::getReleased(CE));
+ return state->set<RegionState>(Sym, RefState::getRelinquished(CE));
+ return state->set<RegionState>(Sym, RefState::getReleased(CE));
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
@@ -400,8 +620,7 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
default: {
const MemSpaceRegion *MS = MR->getMemorySpace();
- switch (MS->getKind()) {
- case MemRegion::StackLocalsSpaceRegionKind: {
+ if (isa<StackLocalsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
@@ -415,7 +634,8 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
os << "the address of a local stack variable";
return true;
}
- case MemRegion::StackArgumentsSpaceRegionKind: {
+
+ if (isa<StackArgumentsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
@@ -429,8 +649,8 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
os << "the address of a parameter";
return true;
}
- case MemRegion::NonStaticGlobalSpaceRegionKind:
- case MemRegion::StaticGlobalSpaceRegionKind: {
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
@@ -447,9 +667,8 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os,
os << "the address of a global variable";
return true;
}
- default:
- return false;
- }
+
+ return false;
}
}
}
@@ -458,9 +677,9 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
SourceRange range) const {
if (ExplodedNode *N = C.generateSink()) {
if (!BT_BadFree)
- BT_BadFree.reset(new BuiltinBug("Bad free"));
+ BT_BadFree.reset(new BugType("Bad free", "Memory Error"));
- llvm::SmallString<100> buf;
+ SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
const MemRegion *MR = ArgVal.getAsRegion();
@@ -487,16 +706,25 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
}
BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
+ R->markInteresting(MR);
R->addRange(range);
C.EmitReport(R);
}
}
-void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
+ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
+ const CallExpr *CE,
+ bool FreesOnFail) const {
+ if (CE->getNumArgs() < 2)
+ return 0;
+
+ ProgramStateRef state = C.getState();
const Expr *arg0Expr = CE->getArg(0);
- DefinedOrUnknownSVal arg0Val
- = cast<DefinedOrUnknownSVal>(state->getSVal(arg0Expr));
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal Arg0Val = state->getSVal(arg0Expr, LCtx);
+ if (!isa<DefinedOrUnknownSVal>(Arg0Val))
+ return 0;
+ DefinedOrUnknownSVal arg0Val = cast<DefinedOrUnknownSVal>(Arg0Val);
SValBuilder &svalBuilder = C.getSValBuilder();
@@ -506,65 +734,166 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) const {
// Get the size argument. If there is no size arg then give up.
const Expr *Arg1 = CE->getArg(1);
if (!Arg1)
- return;
+ return 0;
// Get the value of the size argument.
- DefinedOrUnknownSVal Arg1Val =
- cast<DefinedOrUnknownSVal>(state->getSVal(Arg1));
+ SVal Arg1ValG = state->getSVal(Arg1, LCtx);
+ if (!isa<DefinedOrUnknownSVal>(Arg1ValG))
+ return 0;
+ DefinedOrUnknownSVal Arg1Val = cast<DefinedOrUnknownSVal>(Arg1ValG);
// Compare the size argument to 0.
DefinedOrUnknownSVal SizeZero =
svalBuilder.evalEQ(state, Arg1Val,
svalBuilder.makeIntValWithPtrWidth(0, false));
+ ProgramStateRef StatePtrIsNull, StatePtrNotNull;
+ llvm::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ);
+ ProgramStateRef StateSizeIsZero, StateSizeNotZero;
+ llvm::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero);
+ // We only assume exceptional states if they are definitely true; if the
+ // state is under-constrained, assume regular realloc behavior.
+ bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
+ bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
+
// If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
- const ProgramState *stateEqual = state->assume(PtrEQ, true);
- if (stateEqual && state->assume(SizeZero, false)) {
- // Hack: set the NULL symbolic region to released to suppress false warning.
- // In the future we should add more states for allocated regions, e.g.,
- // CheckedNull, CheckedNonNull.
-
- SymbolRef Sym = arg0Val.getAsLocSymbol();
- if (Sym)
- stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE));
-
- const ProgramState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
- UndefinedVal(), stateEqual);
- C.addTransition(stateMalloc);
+ if ( PrtIsNull && !SizeIsZero) {
+ ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+ UndefinedVal(), StatePtrIsNull);
+ return stateMalloc;
}
- if (const ProgramState *stateNotEqual = state->assume(PtrEQ, false)) {
- // If the size is 0, free the memory.
- if (const ProgramState *stateSizeZero = stateNotEqual->assume(SizeZero, true))
- if (const ProgramState *stateFree =
- FreeMemAux(C, CE, stateSizeZero, 0, false)) {
+ if (PrtIsNull && SizeIsZero)
+ return 0;
+
+ // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+ assert(!PrtIsNull);
+ SymbolRef FromPtr = arg0Val.getAsSymbol();
+ SVal RetVal = state->getSVal(CE, LCtx);
+ SymbolRef ToPtr = RetVal.getAsSymbol();
+ if (!FromPtr || !ToPtr)
+ return 0;
+
+ // If the size is 0, free the memory.
+ if (SizeIsZero)
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero,0,false)){
+ // The semantics of the return value are:
+ // If size was equal to 0, either NULL or a pointer suitable to be passed
+ // to free() is returned.
+ stateFree = stateFree->set<ReallocPairs>(ToPtr,
+ ReallocPair(FromPtr, FreesOnFail));
+ C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ return stateFree;
+ }
- // Bind the return value to NULL because it is now free.
- C.addTransition(stateFree->BindExpr(CE, svalBuilder.makeNull(), true));
- }
- if (const ProgramState *stateSizeNotZero = stateNotEqual->assume(SizeZero,false))
- if (const ProgramState *stateFree = FreeMemAux(C, CE, stateSizeNotZero,
- 0, false)) {
- // FIXME: We should copy the content of the original buffer.
- const ProgramState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
- UnknownVal(), stateFree);
- C.addTransition(stateRealloc);
- }
+ // Default behavior.
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, state, 0, false)) {
+ // FIXME: We should copy the content of the original buffer.
+ ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
+ UnknownVal(), stateFree);
+ if (!stateRealloc)
+ return 0;
+ stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
+ ReallocPair(FromPtr, FreesOnFail));
+ C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ return stateRealloc;
}
+ return 0;
}
-void MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE) {
- const ProgramState *state = C.getState();
- SValBuilder &svalBuilder = C.getSValBuilder();
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE){
+ if (CE->getNumArgs() < 2)
+ return 0;
- SVal count = state->getSVal(CE->getArg(0));
- SVal elementSize = state->getSVal(CE->getArg(1));
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal count = state->getSVal(CE->getArg(0), LCtx);
+ SVal elementSize = state->getSVal(CE->getArg(1), LCtx);
SVal TotalSize = svalBuilder.evalBinOp(state, BO_Mul, count, elementSize,
svalBuilder.getContext().getSizeType());
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
- C.addTransition(MallocMemAux(C, CE, TotalSize, zeroVal, state));
+ return MallocMemAux(C, CE, TotalSize, zeroVal, state);
+}
+
+LeakInfo
+MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+ const MemRegion *ReferenceRegion = 0;
+
+ while (N) {
+ ProgramStateRef State = N->getState();
+ if (!State->get<RegionState>(Sym))
+ break;
+
+ // Find the most recent expression bound to the symbol in the current
+ // context.
+ if (!ReferenceRegion) {
+ if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
+ SVal Val = State->getSVal(MR);
+ if (Val.getAsLocSymbol() == Sym)
+ ReferenceRegion = MR;
+ }
+ }
+
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ AllocNode = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ ProgramPoint P = AllocNode->getLocation();
+ const Stmt *AllocationStmt = 0;
+ if (isa<StmtPoint>(P))
+ AllocationStmt = cast<StmtPoint>(P).getStmt();
+
+ return LeakInfo(AllocationStmt, ReferenceRegion);
+}
+
+void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+ CheckerContext &C) const {
+ assert(N);
+ if (!BT_Leak) {
+ BT_Leak.reset(new BugType("Memory leak", "Memory Error"));
+ // Leaks should not be reported if they are post-dominated by a sink:
+ // (1) Sinks are higher importance bugs.
+ // (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
+ // with __noreturn functions such as assert() or exit(). We choose not
+ // to report leaks on such paths.
+ BT_Leak->setSuppressOnSink(true);
+ }
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const Stmt *AllocStmt = 0;
+ const MemRegion *Region = 0;
+ llvm::tie(AllocStmt, Region) = getAllocationSite(N, Sym, C);
+ if (AllocStmt)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+ C.getSourceManager(), N->getLocationContext());
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Memory is never released; potential leak";
+ if (Region) {
+ os << " of memory pointed to by '";
+ Region->dumpPretty(os);
+ os <<'\'';
+ }
+
+ BugReport *R = new BugReport(*BT_Leak, os.str(), N, LocUsedForUniqueing);
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.EmitReport(R);
}
void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
@@ -573,174 +902,562 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
if (!SymReaper.hasDeadSymbols())
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
RegionStateTy RS = state->get<RegionState>();
RegionStateTy::Factory &F = state->get_context<RegionState>();
bool generateReport = false;
-
+ llvm::SmallVector<SymbolRef, 2> Errors;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
- if (I->second.isAllocated())
+ if (I->second.isAllocated()) {
generateReport = true;
-
+ Errors.push_back(I->first);
+ }
// Remove the dead symbol from the map.
RS = F.remove(RS, I->first);
}
}
- ExplodedNode *N = C.generateNode(state->set<RegionState>(RS));
-
- // FIXME: This does not handle when we have multiple leaks at a single
- // place.
- if (N && generateReport) {
- if (!BT_Leak)
- BT_Leak.reset(new BuiltinBug("Memory leak",
- "Allocated memory never released. Potential memory leak."));
- // FIXME: where it is allocated.
- BugReport *R = new BugReport(*BT_Leak, BT_Leak->getDescription(), N);
- C.EmitReport(R);
+ // Cleanup the Realloc Pairs Map.
+ ReallocMap RP = state->get<ReallocPairs>();
+ for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first) ||
+ SymReaper.isDead(I->second.ReallocatedSym)) {
+ state = state->remove<ReallocPairs>(I->first);
+ }
+ }
+
+ // Generate leak node.
+ static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ if (generateReport) {
+ for (llvm::SmallVector<SymbolRef, 2>::iterator
+ I = Errors.begin(), E = Errors.end(); I != E; ++I) {
+ reportLeak(*I, N, C);
+ }
}
+ C.addTransition(state->set<RegionState>(RS), N);
}
-void MallocChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
- ExprEngine &Eng) const {
- const ProgramState *state = B.getState();
+void MallocChecker::checkEndPath(CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
RegionStateTy M = state->get<RegionState>();
+ // If inside inlined call, skip it.
+ if (C.getLocationContext()->getParent() != 0)
+ return;
+
for (RegionStateTy::iterator I = M.begin(), E = M.end(); I != E; ++I) {
RefState RS = I->second;
if (RS.isAllocated()) {
- ExplodedNode *N = B.generateNode(state);
- if (N) {
- if (!BT_Leak)
- BT_Leak.reset(new BuiltinBug("Memory leak",
- "Allocated memory never released. Potential memory leak."));
- BugReport *R = new BugReport(*BT_Leak, BT_Leak->getDescription(), N);
- Eng.getBugReporter().EmitReport(R);
- }
+ ExplodedNode *N = C.addTransition(state);
+ if (N)
+ reportLeak(I->first, N, C);
}
}
}
-void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
- const Expr *retExpr = S->getRetValue();
- if (!retExpr)
+bool MallocChecker::checkEscape(SymbolRef Sym, const Stmt *S,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const RefState *RS = state->get<RegionState>(Sym);
+ if (!RS)
+ return false;
+
+ if (RS->isAllocated()) {
+ state = state->set<RegionState>(Sym, RefState::getEscaped(S));
+ C.addTransition(state);
+ return true;
+ }
+ return false;
+}
+
+void MallocChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+ if (isMemFunction(C.getCalleeDecl(CE), C.getASTContext()))
return;
- const ProgramState *state = C.getState();
+ // Check use after free, when a freed pointer is passed to a call.
+ ProgramStateRef State = C.getState();
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+ E = CE->arg_end(); I != E; ++I) {
+ const Expr *A = *I;
+ if (A->getType().getTypePtr()->isAnyPointerType()) {
+ SymbolRef Sym = State->getSVal(A, C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ continue;
+ if (checkUseAfterFree(Sym, C, A))
+ return;
+ }
+ }
+}
- SymbolRef Sym = state->getSVal(retExpr).getAsSymbol();
- if (!Sym)
+void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+ const Expr *E = S->getRetValue();
+ if (!E)
return;
- const RefState *RS = state->get<RegionState>(Sym);
- if (!RS)
+ // Check if we are returning a symbol.
+ SVal RetVal = C.getState()->getSVal(E, C.getLocationContext());
+ SymbolRef Sym = RetVal.getAsSymbol();
+ if (!Sym)
+ // If we are returning a field of the allocated struct or an array element,
+ // the callee could still free the memory.
+ // TODO: This logic should be a part of generic symbol escape callback.
+ if (const MemRegion *MR = RetVal.getAsRegion())
+ if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
+ if (const SymbolicRegion *BMR =
+ dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
+ Sym = BMR->getSymbol();
+ if (!Sym)
return;
- // FIXME: check other cases.
- if (RS->isAllocated())
- state = state->set<RegionState>(Sym, RefState::getEscaped(S));
+ // Check if we are returning freed memory.
+ if (checkUseAfterFree(Sym, C, E))
+ return;
- C.addTransition(state);
+ // If this function body is not inlined, check if the symbol is escaping.
+ if (C.getLocationContext()->getParent() == 0)
+ checkEscape(Sym, E, C);
}
-const ProgramState *MallocChecker::evalAssume(const ProgramState *state, SVal Cond,
- bool Assumption) const {
- // If a symblic region is assumed to NULL, set its state to AllocateFailed.
- // FIXME: should also check symbols assumed to non-null.
+// TODO: Blocks should be either inlined or should call invalidate regions
+// upon invocation. After that's in place, special casing here will not be
+// needed.
+void MallocChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
- RegionStateTy RS = state->get<RegionState>();
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- // If the symbol is assumed to NULL, this will return an APSInt*.
- if (state->getSymVal(I.getKey()))
- state = state->set<RegionState>(I.getKey(),RefState::getAllocateFailed());
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = *I;
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
}
- return state;
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const {
+ assert(Sym);
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ if (RS && RS->isReleased()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_UseFree)
+ BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
+
+ BugReport *R = new BugReport(*BT_UseFree,
+ "Use of memory after it is freed",N);
+ if (S)
+ R->addRange(S->getSourceRange());
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.EmitReport(R);
+ return true;
+ }
+ }
+ return false;
}
// Check if the location is a freed symbolic region.
void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const {
SymbolRef Sym = l.getLocSymbolInBase();
- if (Sym) {
- const RefState *RS = C.getState()->get<RegionState>(Sym);
- if (RS && RS->isReleased()) {
- if (ExplodedNode *N = C.generateNode()) {
- if (!BT_UseFree)
- BT_UseFree.reset(new BuiltinBug("Use dynamically allocated memory "
- "after it is freed."));
-
- BugReport *R = new BugReport(*BT_UseFree, BT_UseFree->getDescription(),
- N);
- C.EmitReport(R);
+ if (Sym)
+ checkUseAfterFree(Sym, C);
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+// TODO: This logic (the next 3 functions) is copied/similar to the
+// RetainRelease checker. We might want to factor this out.
+//===----------------------------------------------------------------------===//
+
+// Stop tracking symbols when a value escapes as a result of checkBind.
+// A value escapes in three possible cases:
+// (1) we are binding to something that is not a memory region.
+// (2) we are binding to a memregion that does not have stack storage
+// (3) we are binding to a memregion with stack storage that the store
+// does not understand.
+void MallocChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+ ProgramStateRef state = C.getState();
+
+ if (loc::MemRegionVal *regionLoc = dyn_cast<loc::MemRegionVal>(&loc)) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
+ }
+ if (!escapes) {
+ // Case 4: We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ escapes = !isa<VarRegion>(regionLoc->getRegion());
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+// If a symbolic region is assumed to NULL (or another constant), stop tracking
+// it - assuming that allocation failed on this path.
+ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+ RegionStateTy RS = state->get<RegionState>();
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ // If the symbol is assumed to NULL or another constant, this will
+ // return an APSInt*.
+ if (state->getSymVal(I.getKey()))
+ state = state->remove<RegionState>(I.getKey());
+ }
+
+ // Realloc returns 0 when reallocation fails, which means that we should
+ // restore the state of the pointer being reallocated.
+ ReallocMap RP = state->get<ReallocPairs>();
+ for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ // If the symbol is assumed to NULL or another constant, this will
+ // return an APSInt*.
+ if (state->getSymVal(I.getKey())) {
+ SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ const RefState *RS = state->get<RegionState>(ReallocSym);
+ if (RS) {
+ if (RS->isReleased() && ! I.getData().IsFreeOnFailure)
+ state = state->set<RegionState>(ReallocSym,
+ RefState::getAllocateUnchecked(RS->getStmt()));
}
+ state = state->remove<ReallocPairs>(I.getKey());
}
}
+
+ return state;
}
-void MallocChecker::checkBind(SVal location, SVal val,
- const Stmt *BindS, CheckerContext &C) const {
- // The PreVisitBind implements the same algorithm as already used by the
- // Objective C ownership checker: if the pointer escaped from this scope by
- // assignment, let it go. However, assigning to fields of a stack-storage
- // structure does not transfer ownership.
+// Check if the function is known to us. So, for example, we could
+// conservatively assume it can free/reallocate it's pointer arguments.
+// (We assume that the pointers cannot escape through calls to system
+// functions not handled by this checker.)
+bool MallocChecker::doesNotFreeMemory(const CallOrObjCMessage *Call,
+ ProgramStateRef State) const {
+ if (!Call)
+ return false;
- const ProgramState *state = C.getState();
- DefinedOrUnknownSVal l = cast<DefinedOrUnknownSVal>(location);
+ // For now, assume that any C++ call can free memory.
+ // TODO: If we want to be more optimistic here, we'll need to make sure that
+ // regions escape to C++ containers. They seem to do that even now, but for
+ // mysterious reasons.
+ if (Call->isCXXCall())
+ return false;
- // Check for null dereferences.
- if (!isa<Loc>(l))
- return;
+ const Decl *D = Call->getDecl();
+ if (!D)
+ return false;
- // Before checking if the state is null, check if 'val' has a RefState.
- // Only then should we check for null and bifurcate the state.
- SymbolRef Sym = val.getLocSymbolInBase();
- if (Sym) {
- if (const RefState *RS = state->get<RegionState>(Sym)) {
- // If ptr is NULL, no operation is performed.
- const ProgramState *notNullState, *nullState;
- llvm::tie(notNullState, nullState) = state->assume(l);
-
- // Generate a transition for 'nullState' to record the assumption
- // that the state was null.
- if (nullState)
- C.addTransition(nullState);
-
- if (!notNullState)
- return;
+ ASTContext &ASTC = State->getStateManager().getContext();
+
+ // If it's one of the allocation functions we can reason about, we model
+ // its behavior explicitly.
+ if (isa<FunctionDecl>(D) && isMemFunction(cast<FunctionDecl>(D), ASTC)) {
+ return true;
+ }
+
+ // If it's not a system call, assume it frees memory.
+ SourceManager &SM = ASTC.getSourceManager();
+ if (!SM.isInSystemHeader(D->getLocation()))
+ return false;
+
+ // Process C/ObjC functions.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // White list the system functions whose arguments escape.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return true;
+ StringRef FName = II->getName();
+
+ // White list thread local storage.
+ if (FName.equals("pthread_setspecific"))
+ return false;
- if (RS->isAllocated()) {
- // Something we presently own is being assigned somewhere.
- const MemRegion *AR = location.getAsRegion();
- if (!AR)
- return;
- AR = AR->StripCasts()->getBaseRegion();
- do {
- // If it is on the stack, we still own it.
- if (AR->hasStackNonParametersStorage())
- break;
-
- // If the state can't represent this binding, we still own it.
- if (notNullState == (notNullState->bindLoc(cast<Loc>(location),
- UnknownVal())))
- break;
-
- // We no longer own this pointer.
- notNullState =
- notNullState->set<RegionState>(Sym,
- RefState::getRelinquished(BindS));
+ // White list the 'XXXNoCopy' ObjC functions.
+ if (FName.endswith("NoCopy")) {
+ // Look for the deallocator argument. We know that the memory ownership
+ // is not transfered only if the deallocator argument is
+ // 'kCFAllocatorNull'.
+ for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
+ const Expr *ArgE = Call->getArg(i)->IgnoreParenCasts();
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorNull")
+ return true;
}
- while (false);
}
- C.addTransition(notNullState);
+ return false;
+ }
+
+ // PR12101
+ // Many CoreFoundation and CoreGraphics might allow a tracked object
+ // to escape.
+ if (Call->isCFCGAllowingEscape(FName))
+ return false;
+
+ // Associating streams with malloced buffers. The pointer can escape if
+ // 'closefn' is specified (and if that function does free memory).
+ // Currently, we do not inspect the 'closefn' function (PR12101).
+ if (FName == "funopen")
+ if (Call->getNumArgs() >= 4 && !Call->getArgSVal(4).isConstant(0))
+ return false;
+
+ // Do not warn on pointers passed to 'setbuf' when used with std streams,
+ // these leaks might be intentional when setting the buffer for stdio.
+ // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
+ if (FName == "setbuf" || FName =="setbuffer" ||
+ FName == "setlinebuf" || FName == "setvbuf") {
+ if (Call->getNumArgs() >= 1)
+ if (const DeclRefExpr *Arg =
+ dyn_cast<DeclRefExpr>(Call->getArg(0)->IgnoreParenCasts()))
+ if (const VarDecl *D = dyn_cast<VarDecl>(Arg->getDecl()))
+ if (D->getCanonicalDecl()->getName().find("std")
+ != StringRef::npos)
+ return false;
+ }
+
+ // A bunch of other functions, which take ownership of a pointer (See retain
+ // release checker). Not all the parameters here are invalidated, but the
+ // Malloc checker cannot differentiate between them. The right way of doing
+ // this would be to implement a pointer escapes callback.
+ if (FName == "CVPixelBufferCreateWithBytes" ||
+ FName == "CGBitmapContextCreateWithData" ||
+ FName == "CVPixelBufferCreateWithPlanarBytes" ||
+ FName == "OSAtomicEnqueue") {
+ return false;
+ }
+
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+ return false;
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls, do not free the memory.
+ return true;
+
+ // Process ObjC functions.
+ } else if (const ObjCMethodDecl * ObjCD = dyn_cast<ObjCMethodDecl>(D)) {
+ Selector S = ObjCD->getSelector();
+
+ // White list the ObjC functions which do free memory.
+ // - Anything containing 'freeWhenDone' param set to 1.
+ // Ex: dataWithBytesNoCopy:length:freeWhenDone.
+ for (unsigned i = 1; i < S.getNumArgs(); ++i) {
+ if (S.getNameForSlot(i).equals("freeWhenDone")) {
+ if (Call->getArgSVal(i).isConstant(1))
+ return false;
+ else
+ return true;
+ }
}
+
+ // If the first selector ends with NoCopy, assume that the ownership is
+ // transfered as well.
+ // Ex: [NSData dataWithBytesNoCopy:bytes length:10];
+ if (S.getNameForSlot(0).endswith("NoCopy")) {
+ return false;
+ }
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls, do not free the memory.
+ return true;
}
+
+ // Otherwise, assume that the function can free memory.
+ return false;
+
}
-void ento::registerMallocChecker(CheckerManager &mgr) {
- mgr.registerChecker<MallocChecker>();
+// If the symbol we are tracking is invalidated, but not explicitly (ex: the &p
+// escapes, when we are tracking p), do not track the symbol as we cannot reason
+// about it anymore.
+ProgramStateRef
+MallocChecker::checkRegionChanges(ProgramStateRef State,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ if (!invalidated || invalidated->empty())
+ return State;
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+
+ // If it's a call which might free or reallocate memory, we assume that all
+ // regions (explicit and implicit) escaped.
+
+ // Otherwise, whitelist explicit pointers; we still can track them.
+ if (!Call || doesNotFreeMemory(Call, State)) {
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(R->getSymbol());
+ }
+ }
+
+ for (StoreManager::InvalidatedSymbols::const_iterator I=invalidated->begin(),
+ E = invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // The symbol escaped.
+ if (const RefState *RS = State->get<RegionState>(sym))
+ State = State->set<RegionState>(sym, RefState::getEscaped(RS->getStmt()));
+ }
+ return State;
+}
+
+static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
+ ProgramStateRef prevState) {
+ ReallocMap currMap = currState->get<ReallocPairs>();
+ ReallocMap prevMap = prevState->get<ReallocPairs>();
+
+ for (ReallocMap::iterator I = prevMap.begin(), E = prevMap.end();
+ I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (!currMap.lookup(sym))
+ return sym;
+ }
+
+ return NULL;
}
+
+PathDiagnosticPiece *
+MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef state = N->getState();
+ ProgramStateRef statePrev = PrevN->getState();
+
+ const RefState *RS = state->get<RegionState>(Sym);
+ const RefState *RSPrev = statePrev->get<RegionState>(Sym);
+ if (!RS && !RSPrev)
+ return 0;
+
+ const Stmt *S = 0;
+ const char *Msg = 0;
+ StackHintGeneratorForSymbol *StackHint = 0;
+
+ // Retrieve the associated statement.
+ ProgramPoint ProgLoc = N->getLocation();
+ if (isa<StmtPoint>(ProgLoc))
+ S = cast<StmtPoint>(ProgLoc).getStmt();
+ // If an assumption was made on a branch, it should be caught
+ // here by looking at the state transition.
+ if (isa<BlockEdge>(ProgLoc)) {
+ const CFGBlock *srcBlk = cast<BlockEdge>(ProgLoc).getSrc();
+ S = srcBlk->getTerminator();
+ }
+ if (!S)
+ return 0;
+
+ // Find out if this is an interesting point and what is the kind.
+ if (Mode == Normal) {
+ if (isAllocated(RS, RSPrev, S)) {
+ Msg = "Memory is allocated";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned allocated memory");
+ } else if (isReleased(RS, RSPrev, S)) {
+ Msg = "Memory is released";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned released memory");
+ } else if (isReallocFailedCheck(RS, RSPrev, S)) {
+ Mode = ReallocationFailed;
+ Msg = "Reallocation failed";
+ StackHint = new StackHintGeneratorForReallocationFailed(Sym,
+ "Reallocation failed");
+
+ if (SymbolRef sym = findFailedReallocSymbol(state, statePrev)) {
+ // Is it possible to fail two reallocs WITHOUT testing in between?
+ assert((!FailedReallocSymbol || FailedReallocSymbol == sym) &&
+ "We only support one failed realloc at a time.");
+ BR.markInteresting(sym);
+ FailedReallocSymbol = sym;
+ }
+ }
+
+ // We are in a special mode if a reallocation failed later in the path.
+ } else if (Mode == ReallocationFailed) {
+ assert(FailedReallocSymbol && "No symbol to look for.");
+
+ // Is this is the first appearance of the reallocated symbol?
+ if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
+ // If we ever hit this assert, that means BugReporter has decided to skip
+ // node pairs or visit them out of order.
+ assert(state->get<RegionState>(FailedReallocSymbol) &&
+ "Missed the reallocation point");
+
+ // We're at the reallocation point.
+ Msg = "Attempt to reallocate memory";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned reallocated memory");
+ FailedReallocSymbol = NULL;
+ Mode = Normal;
+ }
+ }
+
+ if (!Msg)
+ return 0;
+ assert(StackHint);
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
+}
+
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ registerCStringCheckerBasic(mgr); \
+ mgr.registerChecker<MallocChecker>()->Filter.C##name = true;\
+}
+
+REGISTER_CHECKER(MallocPessimistic)
+REGISTER_CHECKER(MallocOptimistic)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index cf5501a..daec418 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -205,7 +205,7 @@ void MallocOverflowSecurityChecker::OutputPossibleOverflows(
// Delete any possible overflows which have a comparison.
CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
- c.Visit(mgr.getAnalysisContext(D)->getBody());
+ c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
// Output warnings for all overflows that are left.
for (CheckOverflowOps::theVecType::iterator
@@ -214,11 +214,10 @@ void MallocOverflowSecurityChecker::OutputPossibleOverflows(
i != e;
++i) {
SourceRange R = i->mulop->getSourceRange();
- BR.EmitBasicReport("MallocOverflowSecurityChecker",
+ BR.EmitBasicReport(D, "malloc() size overflow", categories::UnixAPI,
"the computation of the size of the memory allocation may overflow",
PathDiagnosticLocation::createOperatorLoc(i->mulop,
- BR.getSourceManager()),
- &R, 1);
+ BR.getSourceManager()), &R, 1);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
new file mode 100644
index 0000000..08a9da1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -0,0 +1,211 @@
+// MallocSizeofChecker.cpp - Check for dubious malloc arguments ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reports inconsistencies between the casted type of the return value of a
+// malloc/calloc/realloc call and the operand of any sizeof expressions
+// contained within its argument(s).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+typedef std::pair<const TypeSourceInfo *, const CallExpr *> TypeCallPair;
+typedef llvm::PointerUnion<const Stmt *, const VarDecl *> ExprParent;
+
+class CastedAllocFinder
+ : public ConstStmtVisitor<CastedAllocFinder, TypeCallPair> {
+ IdentifierInfo *II_malloc, *II_calloc, *II_realloc;
+
+public:
+ struct CallRecord {
+ ExprParent CastedExprParent;
+ const Expr *CastedExpr;
+ const TypeSourceInfo *ExplicitCastType;
+ const CallExpr *AllocCall;
+
+ CallRecord(ExprParent CastedExprParent, const Expr *CastedExpr,
+ const TypeSourceInfo *ExplicitCastType,
+ const CallExpr *AllocCall)
+ : CastedExprParent(CastedExprParent), CastedExpr(CastedExpr),
+ ExplicitCastType(ExplicitCastType), AllocCall(AllocCall) {}
+ };
+
+ typedef std::vector<CallRecord> CallVec;
+ CallVec Calls;
+
+ CastedAllocFinder(ASTContext *Ctx) :
+ II_malloc(&Ctx->Idents.get("malloc")),
+ II_calloc(&Ctx->Idents.get("calloc")),
+ II_realloc(&Ctx->Idents.get("realloc")) {}
+
+ void VisitChild(ExprParent Parent, const Stmt *S) {
+ TypeCallPair AllocCall = Visit(S);
+ if (AllocCall.second && AllocCall.second != S)
+ Calls.push_back(CallRecord(Parent, cast<Expr>(S), AllocCall.first,
+ AllocCall.second));
+ }
+
+ void VisitChildren(const Stmt *S) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Stmt *child = *I)
+ VisitChild(S, child);
+ }
+
+ TypeCallPair VisitCastExpr(const CastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitExplicitCastExpr(const ExplicitCastExpr *E) {
+ return TypeCallPair(E->getTypeInfoAsWritten(),
+ Visit(E->getSubExpr()).second);
+ }
+
+ TypeCallPair VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitStmt(const Stmt *S) {
+ VisitChildren(S);
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitCallExpr(const CallExpr *E) {
+ VisitChildren(E);
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II == II_malloc || II == II_calloc || II == II_realloc)
+ return TypeCallPair((const TypeSourceInfo *)0, E);
+ }
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitDeclStmt(const DeclStmt *S) {
+ for (DeclStmt::const_decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I!=E; ++I)
+ if (const VarDecl *VD = dyn_cast<VarDecl>(*I))
+ if (const Expr *Init = VD->getInit())
+ VisitChild(VD, Init);
+ return TypeCallPair();
+ }
+};
+
+class SizeofFinder : public ConstStmtVisitor<SizeofFinder> {
+public:
+ std::vector<const UnaryExprOrTypeTraitExpr *> Sizeofs;
+
+ void VisitBinMul(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitBinAdd(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) {
+ if (E->getKind() != UETT_SizeOf)
+ return;
+
+ Sizeofs.push_back(E);
+ }
+};
+
+class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
+ CastedAllocFinder Finder(&BR.getContext());
+ Finder.Visit(D->getBody());
+ for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
+ e = Finder.Calls.end(); i != e; ++i) {
+ QualType CastedType = i->CastedExpr->getType();
+ if (!CastedType->isPointerType())
+ continue;
+ QualType PointeeType = CastedType->getAs<PointerType>()->getPointeeType();
+ if (PointeeType->isVoidType())
+ continue;
+
+ for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
+ ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
+ if (!(*ai)->getType()->isIntegerType())
+ continue;
+
+ SizeofFinder SFinder;
+ SFinder.Visit(*ai);
+ if (SFinder.Sizeofs.size() != 1)
+ continue;
+
+ QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
+ if (!BR.getContext().hasSameUnqualifiedType(PointeeType, SizeofType)) {
+ const TypeSourceInfo *TSI = 0;
+ if (i->CastedExprParent.is<const VarDecl *>()) {
+ TSI =
+ i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+ } else {
+ TSI = i->ExplicitCastType;
+ }
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream OS(buf);
+
+ OS << "Result of '"
+ << i->AllocCall->getDirectCallee()->getIdentifier()->getName()
+ << "' is converted to type '"
+ << CastedType.getAsString() << "', whose pointee type '"
+ << PointeeType.getAsString() << "' is incompatible with "
+ << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ llvm::SmallVector<SourceRange, 4> Ranges;
+ Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
+ if (TSI)
+ Ranges.push_back(TSI->getTypeLoc().getSourceRange());
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
+ BR.getSourceManager(), ADC);
+
+ BR.EmitBasicReport(D, "allocator sizeof operand mismatch",
+ categories::UnixAPI,
+ OS.str(),
+ L, Ranges.data(), Ranges.size());
+ }
+ }
+ }
+ }
+};
+
+}
+
+void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MallocSizeofChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 7f74a7d..4989ba8 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
@@ -31,7 +32,7 @@ using namespace ento;
namespace {
class NSAutoreleasePoolChecker
: public Checker<check::PreObjCMessage> {
-
+ mutable OwningPtr<BugType> BT;
mutable Selector releaseS;
public:
@@ -65,20 +66,24 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(ObjCMessage msg,
// Sending 'release' message?
if (msg.getSelector() != releaseS)
return;
-
- SourceRange R = msg.getSourceRange();
- BugReporter &BR = C.getBugReporter();
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
- const SourceManager &SM = BR.getSourceManager();
- const Expr *E = msg.getMsgOrPropExpr();
- PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(E, SM, LC);
- C.getBugReporter().EmitBasicReport("Use -drain instead of -release",
- "API Upgrade (Apple)",
- "Use -drain instead of -release when using NSAutoreleasePool "
- "and garbage collection", L, &R, 1);
+
+ if (!BT)
+ BT.reset(new BugType("Use -drain instead of -release",
+ "API Upgrade (Apple)"));
+
+ ExplodedNode *N = C.addTransition();
+ if (!N) {
+ assert(0);
+ return;
+ }
+
+ BugReport *Report = new BugReport(*BT, "Use -drain instead of -release when "
+ "using NSAutoreleasePool and garbage collection", N);
+ Report->addRange(msg.getSourceRange());
+ C.EmitReport(Report);
}
void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
- if (mgr.getLangOptions().getGC() != LangOptions::NonGC)
+ if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 5678998..f826573 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -74,7 +74,7 @@ void NSErrorMethodChecker::checkASTDecl(const ObjCMethodDecl *D,
"error occurred";
PathDiagnosticLocation L =
PathDiagnosticLocation::create(D, BR.getSourceManager());
- BR.EmitBasicReport("Bad return type when passing NSError**",
+ BR.EmitBasicReport(D, "Bad return type when passing NSError**",
"Coding conventions (Apple)", err, L);
}
}
@@ -122,7 +122,7 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
"error occurred";
PathDiagnosticLocation L =
PathDiagnosticLocation::create(D, BR.getSourceManager());
- BR.EmitBasicReport("Bad return type when passing CFErrorRef*",
+ BR.EmitBasicReport(D, "Bad return type when passing CFErrorRef*",
"Coding conventions (Apple)", err, L);
}
}
@@ -182,7 +182,7 @@ namespace ento {
}
template <typename T>
-static bool hasFlag(SVal val, const ProgramState *state) {
+static bool hasFlag(SVal val, ProgramStateRef state) {
if (SymbolRef sym = val.getAsSymbol())
if (const unsigned *attachedFlags = state->get<T>(sym))
return *attachedFlags;
@@ -190,7 +190,7 @@ static bool hasFlag(SVal val, const ProgramState *state) {
}
template <typename T>
-static void setFlag(const ProgramState *state, SVal val, CheckerContext &C) {
+static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
// We tag the symbol that the SVal wraps.
if (SymbolRef sym = val.getAsSymbol())
C.addTransition(state->set<T>(sym, true));
@@ -198,7 +198,7 @@ static void setFlag(const ProgramState *state, SVal val, CheckerContext &C) {
static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
const StackFrameContext *
- SFC = C.getPredecessor()->getLocationContext()->getCurrentStackFrame();
+ SFC = C.getLocationContext()->getCurrentStackFrame();
if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(&val)) {
const MemRegion* R = X->getRegion();
if (const VarRegion *VR = R->getAs<VarRegion>())
@@ -220,7 +220,7 @@ void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
return;
ASTContext &Ctx = C.getASTContext();
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
// If we are loading from NSError**/CFErrorRef* parameter, mark the resulting
// SVal so that we can later check it when handling the
@@ -253,7 +253,7 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
return;
SVal loc = event.Location;
- const ProgramState *state = event.SinkNode->getState();
+ ProgramStateRef state = event.SinkNode->getState();
BugReporter &BR = *event.BR;
bool isNSError = hasFlag<NSErrorOut>(loc, state);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index 81f1924..c2d7c09 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -36,13 +36,13 @@ public:
void NoReturnFunctionChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const Expr *Callee = CE->getCallee();
bool BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
if (!BuildSinks) {
- SVal L = state->getSVal(Callee);
+ SVal L = state->getSVal(Callee, C.getLocationContext());
const FunctionDecl *FD = L.getAsFunctionDecl();
if (!FD)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
index f426265..7b724d2 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
@@ -32,31 +32,32 @@ private:
ExprEngine &Eng,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) const;
-
- ExplodedNode *generateNode(const ProgramState *State,
- ExplodedNode *Pred, const CallExpr *Statement,
- StmtNodeBuilder &B, ExplodedNodeSet &Dst) const;
};
}
+static StringRef getCalleeName(ProgramStateRef State,
+ const CallExpr *CE,
+ const LocationContext *LCtx) {
+ const Expr *Callee = CE->getCallee();
+ SVal L = State->getSVal(Callee, LCtx);
+ const FunctionDecl *funDecl = L.getAsFunctionDecl();
+ if (!funDecl)
+ return StringRef();
+ IdentifierInfo *funI = funDecl->getIdentifier();
+ if (!funI)
+ return StringRef();
+ return funI->getName();
+}
+
bool OSAtomicChecker::inlineCall(const CallExpr *CE,
ExprEngine &Eng,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) const {
- const ProgramState *state = Pred->getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
-
- const FunctionDecl *FD = L.getAsFunctionDecl();
- if (!FD)
+ StringRef FName = getCalleeName(Pred->getState(),
+ CE, Pred->getLocationContext());
+ if (FName.empty())
return false;
- const IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return false;
-
- StringRef FName(II->getName());
-
// Check for compare and swap.
if (FName.startswith("OSAtomicCompareAndSwap") ||
FName.startswith("objc_atomicCompareAndSwap"))
@@ -66,17 +67,6 @@ bool OSAtomicChecker::inlineCall(const CallExpr *CE,
return false;
}
-ExplodedNode *OSAtomicChecker::generateNode(const ProgramState *State,
- ExplodedNode *Pred,
- const CallExpr *Statement,
- StmtNodeBuilder &B,
- ExplodedNodeSet &Dst) const {
- ExplodedNode *N = B.generateNode(Statement, State, Pred, this);
- if (N)
- Dst.Add(N);
- return N;
-}
-
bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
ExprEngine &Eng,
ExplodedNode *Pred,
@@ -85,7 +75,6 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
if (CE->getNumArgs() != 3)
return false;
- StmtNodeBuilder &Builder = Eng.getBuilder();
ASTContext &Ctx = Eng.getContext();
const Expr *oldValueExpr = CE->getArg(0);
QualType oldValueType = Ctx.getCanonicalType(oldValueExpr->getType());
@@ -115,9 +104,10 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
static SimpleProgramPointTag OSAtomicStoreTag("OSAtomicChecker : Store");
// Load 'theValue'.
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
ExplodedNodeSet Tmp;
- SVal location = state->getSVal(theValueExpr);
+ SVal location = state->getSVal(theValueExpr, LCtx);
// Here we should use the value type of the region as the load type, because
// we are simulating the semantics of the function, not the semantics of
// passing argument. So the type of theValue expr is not we are loading.
@@ -130,15 +120,12 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
LoadTy = TR->getValueType();
}
- Eng.evalLoad(Tmp, theValueExpr, Pred,
- state, location, &OSAtomicLoadTag, LoadTy);
+ Eng.evalLoad(Tmp, CE, theValueExpr, Pred,
+ state, location, &OSAtomicLoadTag, LoadTy);
if (Tmp.empty()) {
- // If no nodes were generated, other checkers must generated sinks. But
- // since the builder state was restored, we set it manually to prevent
- // auto transition.
- // FIXME: there should be a better approach.
- Builder.BuildSinks = true;
+ // If no nodes were generated, other checkers must have generated sinks.
+ // We return an empty Dst.
return true;
}
@@ -146,14 +133,14 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
I != E; ++I) {
ExplodedNode *N = *I;
- const ProgramState *stateLoad = N->getState();
+ ProgramStateRef stateLoad = N->getState();
// Use direct bindings from the environment since we are forcing a load
// from a location that the Environment would typically not be used
// to bind a value.
- SVal theValueVal_untested = stateLoad->getSVal(theValueExpr, true);
+ SVal theValueVal_untested = stateLoad->getSVal(theValueExpr, LCtx, true);
- SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr);
+ SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr, LCtx);
// FIXME: Issue an error.
if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) {
@@ -171,13 +158,13 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
DefinedOrUnknownSVal Cmp =
svalBuilder.evalEQ(stateLoad,theValueVal,oldValueVal);
- const ProgramState *stateEqual = stateLoad->assume(Cmp, true);
+ ProgramStateRef stateEqual = stateLoad->assume(Cmp, true);
// Were they equal?
if (stateEqual) {
// Perform the store.
ExplodedNodeSet TmpStore;
- SVal val = stateEqual->getSVal(newValueExpr);
+ SVal val = stateEqual->getSVal(newValueExpr, LCtx);
// Handle implicit value casts.
if (const TypedValueRegion *R =
@@ -185,40 +172,41 @@ bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
val = svalBuilder.evalCast(val,R->getValueType(), newValueExpr->getType());
}
- Eng.evalStore(TmpStore, NULL, theValueExpr, N,
- stateEqual, location, val, &OSAtomicStoreTag);
+ Eng.evalStore(TmpStore, CE, theValueExpr, N,
+ stateEqual, location, val, &OSAtomicStoreTag);
if (TmpStore.empty()) {
- // If no nodes were generated, other checkers must generated sinks. But
- // since the builder state was restored, we set it manually to prevent
- // auto transition.
- // FIXME: there should be a better approach.
- Builder.BuildSinks = true;
+ // If no nodes were generated, other checkers must have generated sinks.
+ // We return an empty Dst.
return true;
}
-
+
+ StmtNodeBuilder B(TmpStore, Dst, Eng.getBuilderContext());
// Now bind the result of the comparison.
for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
E2 = TmpStore.end(); I2 != E2; ++I2) {
ExplodedNode *predNew = *I2;
- const ProgramState *stateNew = predNew->getState();
+ ProgramStateRef stateNew = predNew->getState();
// Check for 'void' return type if we have a bogus function prototype.
SVal Res = UnknownVal();
QualType T = CE->getType();
if (!T->isVoidType())
Res = Eng.getSValBuilder().makeTruthVal(true, T);
- generateNode(stateNew->BindExpr(CE, Res), predNew, CE, Builder, Dst);
+ B.generateNode(CE, predNew, stateNew->BindExpr(CE, LCtx, Res),
+ false, this);
}
}
// Were they not equal?
- if (const ProgramState *stateNotEqual = stateLoad->assume(Cmp, false)) {
+ if (ProgramStateRef stateNotEqual = stateLoad->assume(Cmp, false)) {
// Check for 'void' return type if we have a bogus function prototype.
SVal Res = UnknownVal();
QualType T = CE->getType();
if (!T->isVoidType())
Res = Eng.getSValBuilder().makeTruthVal(false, CE->getType());
- generateNode(stateNotEqual->BindExpr(CE, Res), N, CE, Builder, Dst);
+ StmtNodeBuilder B(N, Dst, Eng.getBuilderContext());
+ B.generateNode(CE, N, stateNotEqual->BindExpr(CE, LCtx, Res),
+ false, this);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 3e4e07b..777e9ea 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -26,8 +27,8 @@ using namespace ento;
namespace {
class ObjCAtSyncChecker
: public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
- mutable llvm::OwningPtr<BuiltinBug> BT_null;
- mutable llvm::OwningPtr<BuiltinBug> BT_undef;
+ mutable OwningPtr<BuiltinBug> BT_null;
+ mutable OwningPtr<BuiltinBug> BT_undef;
public:
void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
@@ -38,8 +39,8 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
CheckerContext &C) const {
const Expr *Ex = S->getSynchExpr();
- const ProgramState *state = C.getState();
- SVal V = state->getSVal(Ex);
+ ProgramStateRef state = C.getState();
+ SVal V = state->getSVal(Ex, C.getLocationContext());
// Uninitialized value used for the mutex?
if (isa<UndefinedVal>(V)) {
@@ -49,7 +50,8 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"for @synchronized"));
BugReport *report =
new BugReport(*BT_undef, BT_undef->getDescription(), N);
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
C.EmitReport(report);
}
return;
@@ -59,20 +61,21 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
return;
// Check for null mutexes.
- const ProgramState *notNullState, *nullState;
+ ProgramStateRef notNullState, nullState;
llvm::tie(notNullState, nullState) = state->assume(cast<DefinedSVal>(V));
if (nullState) {
if (!notNullState) {
// Generate an error node. This isn't a sink since
// a null mutex just means no synchronization occurs.
- if (ExplodedNode *N = C.generateNode(nullState)) {
+ if (ExplodedNode *N = C.addTransition(nullState)) {
if (!BT_null)
BT_null.reset(new BuiltinBug("Nil value used as mutex for @synchronized() "
"(no synchronization will occur)"));
BugReport *report =
new BugReport(*BT_null, BT_null->getDescription(), N);
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
C.EmitReport(report);
return;
@@ -88,6 +91,6 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
}
void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
- if (mgr.getLangOptions().ObjC2)
+ if (mgr.getLangOpts().ObjC2)
mgr.registerChecker<ObjCAtSyncChecker>();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
new file mode 100644
index 0000000..f2929c0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -0,0 +1,174 @@
+//== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using 'CFArray',
+// 'CFDictionary', 'CFSet' APIs.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ ASTContext &ASTC;
+ uint64_t PtrWidth;
+
+ static const unsigned InvalidArgIndex = UINT_MAX;
+
+ /// Check if the type has pointer size (very conservative).
+ inline bool isPointerSize(const Type *T) {
+ if (!T)
+ return true;
+ if (T->isIncompleteType())
+ return true;
+ return (ASTC.getTypeSize(T) == PtrWidth);
+ }
+
+ /// Check if the type is a pointer/array to pointer sized values.
+ inline bool hasPointerToPointerSizedType(const Expr *E) {
+ QualType T = E->getType();
+
+ // The type could be either a pointer or array.
+ const Type *TP = T.getTypePtr();
+ QualType PointeeT = TP->getPointeeType();
+ if (!PointeeT.isNull()) {
+ // If the type is a pointer to an array, check the size of the array
+ // elements. To avoid false positives coming from assumption that the
+ // values x and &x are equal when x is an array.
+ if (const Type *TElem = PointeeT->getArrayElementTypeNoTypeQual())
+ if (isPointerSize(TElem))
+ return true;
+
+ // Else, check the pointee size.
+ return isPointerSize(PointeeT.getTypePtr());
+ }
+
+ if (const Type *TElem = TP->getArrayElementTypeNoTypeQual())
+ return isPointerSize(TElem);
+
+ // The type must be an array/pointer type.
+
+ // This could be a null constant, which is allowed.
+ if (E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull))
+ return true;
+ return false;
+ }
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac)
+ : BR(br), AC(ac), ASTC(AC->getASTContext()),
+ PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+static StringRef getCalleeName(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return StringRef();
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return StringRef();
+
+ return II->getName();
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ StringRef Name = getCalleeName(CE);
+ if (Name.empty())
+ return;
+
+ const Expr *Arg = 0;
+ unsigned ArgNum = InvalidArgIndex;
+
+ if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ return;
+ }
+
+ if (Arg == 0 && Name.equals("CFDictionaryCreate")) {
+ // Check first argument.
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg)) {
+ // Check second argument.
+ ArgNum = 2;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ // Both are good, return.
+ return;
+ }
+ }
+
+ if (ArgNum != InvalidArgIndex) {
+ assert(ArgNum == 1 || ArgNum == 2);
+
+ SmallString<256> BufName;
+ llvm::raw_svector_ostream OsName(BufName);
+ assert(ArgNum == 1 || ArgNum == 2);
+ OsName << " Invalid use of '" << Name << "'" ;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+ Os << " The "<< ((ArgNum == 1) ? "first" : "second") << " argument to '"
+ << Name << "' must be a C array of pointer-sized values, not '"
+ << Arg->getType().getAsString() << "'";
+
+ SourceRange R = Arg->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ OsName.str(), categories::CoreFoundationObjectiveC,
+ Os.str(), CELoc, &R, 1);
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+namespace {
+class ObjCContainersASTChecker : public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersASTChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
new file mode 100644
index 0000000..f4655b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -0,0 +1,159 @@
+//== ObjCContainersChecker.cpp - Path sensitive checker for CFArray *- C++ -*=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Performs path sensitive checks of Core Foundation static containers like
+// CFArray.
+// 1) Check for buffer overflows:
+// In CFArrayGetArrayAtIndex( myArray, index), if the index is outside the
+// index space of theArray (0 to N-1 inclusive (where N is the count of
+// theArray), the behavior is undefined.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("CFArray API",
+ categories::CoreFoundationObjectiveC));
+ }
+
+ inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
+ SVal ArrayRef = C.getState()->getSVal(E, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ return ArraySym;
+ }
+
+ void addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const;
+
+public:
+ /// A tag to id this checker.
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+// ProgramState trait - a map from array symbol to it's state.
+typedef llvm::ImmutableMap<SymbolRef, DefinedSVal> ArraySizeM;
+
+namespace { struct ArraySizeMap {}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<ArraySizeMap>
+ : public ProgramStatePartialTrait<ArraySizeM > {
+ static void *GDMIndex() { return ObjCContainersChecker::getTag(); }
+};
+}}
+
+void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal SizeV = State->getSVal(Size, C.getLocationContext());
+ // Undefined is reported by another checker.
+ if (SizeV.isUnknownOrUndef())
+ return;
+
+ // Get the ArrayRef symbol.
+ SVal ArrayRef = State->getSVal(Array, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ if (!ArraySym)
+ return;
+
+ C.addTransition(State->set<ArraySizeMap>(ArraySym, cast<DefinedSVal>(SizeV)));
+ return;
+}
+
+void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 1)
+ return;
+
+ // Add array size information to the state.
+ if (Name.equals("CFArrayCreate")) {
+ if (CE->getNumArgs() < 3)
+ return;
+ // Note, we can visit the Create method in the post-visit because
+ // the CFIndex parameter is passed in by value and will not be invalidated
+ // by the call.
+ addSizeInfo(CE, CE->getArg(2), C);
+ return;
+ }
+
+ if (Name.equals("CFArrayGetCount")) {
+ addSizeInfo(CE->getArg(0), CE, C);
+ return;
+ }
+}
+
+void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 2)
+ return;
+
+ // Check the array access.
+ if (Name.equals("CFArrayGetValueAtIndex")) {
+ ProgramStateRef State = C.getState();
+ // Retrieve the size.
+ // Find out if we saw this array symbol before and have information about it.
+ const Expr *ArrayExpr = CE->getArg(0);
+ SymbolRef ArraySym = getArraySym(ArrayExpr, C);
+ if (!ArraySym)
+ return;
+
+ const DefinedSVal *Size = State->get<ArraySizeMap>(ArraySym);
+
+ if (!Size)
+ return;
+
+ // Get the index.
+ const Expr *IdxExpr = CE->getArg(1);
+ SVal IdxVal = State->getSVal(IdxExpr, C.getLocationContext());
+ if (IdxVal.isUnknownOrUndef())
+ return;
+ DefinedSVal Idx = cast<DefinedSVal>(IdxVal);
+
+ // Now, check if 'Idx in [0, Size-1]'.
+ const QualType T = IdxExpr->getType();
+ ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
+ ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateSink(StOutBound);
+ if (!N)
+ return;
+ initBugType();
+ BugReport *R = new BugReport(*BT, "Index is out of bounds", N);
+ R->addRange(IdxExpr->getSourceRange());
+ C.EmitReport(R);
+ return;
+ }
+ }
+}
+
+/// Register checker.
+void ento::registerObjCContainersChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 2fb9944..d15c8ba 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -34,18 +34,8 @@
// receives a reference to 'self', the checker keeps track and passes the flags
// for 1) and 2) to the new object that 'self' points to after the call.
//
-// FIXME (rdar://7937506): In the case of:
-// [super init];
-// return self;
-// Have an extra PathDiagnosticPiece in the path that says "called [super init],
-// but didn't assign the result to self."
-
//===----------------------------------------------------------------------===//
-// FIXME: Somehow stick the link to Apple's documentation about initializing
-// objects in the diagnostics.
-// http://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/ObjectiveC/Articles/ocAllocInit.html
-
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -64,7 +54,7 @@ static bool isInitMessage(const ObjCMessage &msg);
static bool isSelfVar(SVal location, CheckerContext &C);
namespace {
-class ObjCSelfInitChecker : public Checker<
+class ObjCSelfInitChecker : public Checker< check::PreObjCMessage,
check::PostObjCMessage,
check::PostStmt<ObjCIvarRefExpr>,
check::PreStmt<ReturnStmt>,
@@ -72,6 +62,7 @@ class ObjCSelfInitChecker : public Checker<
check::PostStmt<CallExpr>,
check::Location > {
public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
void checkPostObjCMessage(ObjCMessage msg, CheckerContext &C) const;
void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
@@ -79,6 +70,10 @@ public:
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkLocation(SVal location, bool isLoad, const Stmt *S,
CheckerContext &C) const;
+
+ void checkPreStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
+ void checkPostStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
+
};
} // end anonymous namespace
@@ -87,8 +82,8 @@ namespace {
class InitSelfBug : public BugType {
const std::string desc;
public:
- InitSelfBug() : BugType("missing \"self = [(super or self) init...]\"",
- "missing \"self = [(super or self) init...]\"") {}
+ InitSelfBug() : BugType("Missing \"self = [(super or self) init...]\"",
+ categories::CoreFoundationObjectiveC) {}
};
} // end anonymous namespace
@@ -130,7 +125,7 @@ namespace ento {
}
}
-static SelfFlagEnum getSelfFlags(SVal val, const ProgramState *state) {
+static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
if (SymbolRef sym = val.getAsSymbol())
if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
return (SelfFlagEnum)*attachedFlags;
@@ -141,7 +136,7 @@ static SelfFlagEnum getSelfFlags(SVal val, CheckerContext &C) {
return getSelfFlags(val, C.getState());
}
-static void addSelfFlag(const ProgramState *state, SVal val,
+static void addSelfFlag(ProgramStateRef state, SVal val,
SelfFlagEnum flag, CheckerContext &C) {
// We tag the symbol that the SVal wraps.
if (SymbolRef sym = val.getAsSymbol())
@@ -156,7 +151,7 @@ static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
/// points to and is an object that did not come from the result of calling
/// an initializer.
static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
- SVal exprVal = C.getState()->getSVal(E);
+ SVal exprVal = C.getState()->getSVal(E, C.getLocationContext());
if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
return false; // value did not come from 'self'.
if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
@@ -188,25 +183,28 @@ static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
void ObjCSelfInitChecker::checkPostObjCMessage(ObjCMessage msg,
CheckerContext &C) const {
+ CallOrObjCMessage MsgWrapper(msg, C.getState(), C.getLocationContext());
+ checkPostStmt(MsgWrapper, C);
+
// When encountering a message that does initialization (init rule),
// tag the return value so that we know later on that if self has this value
// then it is properly initialized.
// FIXME: A callback should disable checkers at the start of functions.
if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
- C.getCurrentAnalysisContext()->getDecl())))
+ C.getCurrentAnalysisDeclContext()->getDecl())))
return;
if (isInitMessage(msg)) {
// Tag the return value as the result of an initializer.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
// FIXME this really should be context sensitive, where we record
// the current stack frame (for IPA). Also, we need to clean this
// value out when we return from this method.
state = state->set<CalledInit>(true);
- SVal V = state->getSVal(msg.getOriginExpr());
+ SVal V = state->getSVal(msg.getMessageExpr(), C.getLocationContext());
addSelfFlag(state, V, SelfFlag_InitRes, C);
return;
}
@@ -221,7 +219,7 @@ void ObjCSelfInitChecker::checkPostStmt(const ObjCIvarRefExpr *E,
CheckerContext &C) const {
// FIXME: A callback should disable checkers at the start of functions.
if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
- C.getCurrentAnalysisContext()->getDecl())))
+ C.getCurrentAnalysisDeclContext()->getDecl())))
return;
checkForInvalidSelf(E->getBase(), C,
@@ -233,7 +231,7 @@ void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
CheckerContext &C) const {
// FIXME: A callback should disable checkers at the start of functions.
if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
- C.getCurrentAnalysisContext()->getDecl())))
+ C.getCurrentAnalysisDeclContext()->getDecl())))
return;
checkForInvalidSelf(S->getRetValue(), C,
@@ -259,10 +257,28 @@ void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
void ObjCSelfInitChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
- for (CallExpr::const_arg_iterator
- I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) {
- SVal argV = state->getSVal(*I);
+ CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
+ checkPreStmt(CEWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
+ checkPostStmt(CEWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPreObjCMessage(ObjCMessage Msg,
+ CheckerContext &C) const {
+ CallOrObjCMessage MsgWrapper(Msg, C.getState(), C.getLocationContext());
+ checkPreStmt(MsgWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPreStmt(const CallOrObjCMessage &CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ unsigned NumArgs = CE.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
if (isSelfVar(argV, C)) {
unsigned selfFlags = getSelfFlags(state->getSVal(cast<Loc>(argV)), C);
C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
@@ -275,12 +291,12 @@ void ObjCSelfInitChecker::checkPreStmt(const CallExpr *CE,
}
}
-void ObjCSelfInitChecker::checkPostStmt(const CallExpr *CE,
+void ObjCSelfInitChecker::checkPostStmt(const CallOrObjCMessage &CE,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
- for (CallExpr::const_arg_iterator
- I = CE->arg_begin(), E = CE->arg_end(); I != E; ++I) {
- SVal argV = state->getSVal(*I);
+ ProgramStateRef state = C.getState();
+ unsigned NumArgs = CE.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
if (isSelfVar(argV, C)) {
SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
state = state->remove<PreCallSelfFlags>();
@@ -289,7 +305,7 @@ void ObjCSelfInitChecker::checkPostStmt(const CallExpr *CE,
} else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
state = state->remove<PreCallSelfFlags>();
- addSelfFlag(state, state->getSVal(CE), prevFlags, C);
+ addSelfFlag(state, state->getSVal(cast<Loc>(argV)), prevFlags, C);
return;
}
}
@@ -300,7 +316,7 @@ void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
CheckerContext &C) const {
// Tag the result of a load from 'self' so that we can easily know that the
// value is the object that 'self' points to.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
if (isSelfVar(location, C))
addSelfFlag(state, state->getSVal(cast<Loc>(location)), SelfFlag_Self, C);
}
@@ -335,7 +351,7 @@ static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
/// \brief Returns true if the location is 'self'.
static bool isSelfVar(SVal location, CheckerContext &C) {
- AnalysisContext *analCtx = C.getCurrentAnalysisContext();
+ AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
if (!analCtx->getSelfDecl())
return false;
if (!isa<loc::MemRegionVal>(location))
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index bbc262f..4718dc7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -161,7 +161,7 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
PathDiagnosticLocation L =
PathDiagnosticLocation::create(I->first, BR.getSourceManager());
- BR.EmitBasicReport("Unused instance variable", "Optimization",
+ BR.EmitBasicReport(D, "Unused instance variable", "Optimization",
os.str(), L);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 202522b..fe4845b 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class PointerArithChecker
: public Checker< check::PreStmt<BinaryOperator> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -36,9 +36,10 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
if (B->getOpcode() != BO_Sub && B->getOpcode() != BO_Add)
return;
- const ProgramState *state = C.getState();
- SVal LV = state->getSVal(B->getLHS());
- SVal RV = state->getSVal(B->getRHS());
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
const MemRegion *LR = LV.getAsRegion();
@@ -50,7 +51,7 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
isa<CompoundLiteralRegion>(LR)) {
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(new BuiltinBug("Dangerous pointer arithmetic",
"Pointer arithmetic done on non-array variables "
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 924c7f2..fa5c6a3 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class PointerSubChecker
: public Checker< check::PreStmt<BinaryOperator> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -39,9 +39,10 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
if (B->getOpcode() != BO_Sub)
return;
- const ProgramState *state = C.getState();
- SVal LV = state->getSVal(B->getLHS());
- SVal RV = state->getSVal(B->getRHS());
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
const MemRegion *LR = LV.getAsRegion();
const MemRegion *RR = RV.getAsRegion();
@@ -59,7 +60,7 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
return;
- if (ExplodedNode *N = C.generateNode()) {
+ if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(new BuiltinBug("Pointer subtraction",
"Subtraction of two pointers that do not point to "
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index c02b5b1..2d018ef 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -25,8 +25,8 @@ using namespace ento;
namespace {
class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
- mutable llvm::OwningPtr<BugType> BT_doublelock;
- mutable llvm::OwningPtr<BugType> BT_lor;
+ mutable OwningPtr<BugType> BT_doublelock;
+ mutable OwningPtr<BugType> BT_lor;
enum LockingSemantics {
NotApplicable = 0,
PthreadSemantics,
@@ -56,18 +56,11 @@ template <> struct ProgramStateTrait<LockSet> :
void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl();
-
- if (!FD)
- return;
-
- // Get the name of the callee.
- IdentifierInfo *II = FD->getIdentifier();
- if (!II) // if no identifier, not a simple C function
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ StringRef FName = C.getCalleeName(CE);
+ if (FName.empty())
return;
- StringRef FName = II->getName();
if (CE->getNumArgs() != 1)
return;
@@ -75,24 +68,28 @@ void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
if (FName == "pthread_mutex_lock" ||
FName == "pthread_rwlock_rdlock" ||
FName == "pthread_rwlock_wrlock")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0)), false, PthreadSemantics);
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, PthreadSemantics);
else if (FName == "lck_mtx_lock" ||
FName == "lck_rw_lock_exclusive" ||
FName == "lck_rw_lock_shared")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0)), false, XNUSemantics);
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, XNUSemantics);
else if (FName == "pthread_mutex_trylock" ||
FName == "pthread_rwlock_tryrdlock" ||
FName == "pthread_rwlock_tryrwlock")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0)), true, PthreadSemantics);
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, PthreadSemantics);
else if (FName == "lck_mtx_try_lock" ||
FName == "lck_rw_try_lock_exclusive" ||
FName == "lck_rw_try_lock_shared")
- AcquireLock(C, CE, state->getSVal(CE->getArg(0)), true, XNUSemantics);
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, XNUSemantics);
else if (FName == "pthread_mutex_unlock" ||
FName == "pthread_rwlock_unlock" ||
FName == "lck_mtx_unlock" ||
FName == "lck_rw_done")
- ReleaseLock(C, CE, state->getSVal(CE->getArg(0)));
+ ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
}
void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
@@ -103,9 +100,9 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
if (!lockR)
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
- SVal X = state->getSVal(CE);
+ SVal X = state->getSVal(CE, C.getLocationContext());
if (X.isUnknownOrUndef())
return;
@@ -125,10 +122,10 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
return;
}
- const ProgramState *lockSucc = state;
+ ProgramStateRef lockSucc = state;
if (isTryLock) {
// Bifurcate the state, and allow a mode where the lock acquisition fails.
- const ProgramState *lockFail;
+ ProgramStateRef lockFail;
switch (semantics) {
case PthreadSemantics:
llvm::tie(lockFail, lockSucc) = state->assume(retVal);
@@ -138,7 +135,6 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
break;
default:
llvm_unreachable("Unknown tryLock locking semantics");
- break;
}
assert(lockFail && lockSucc);
C.addTransition(lockFail);
@@ -166,7 +162,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
if (!lockR)
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
llvm::ImmutableList<const MemRegion*> LS = state->get<LockSet>();
// FIXME: Better analysis requires IPA for wrappers.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index 93e0fe5..b569e41 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -18,12 +18,12 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/ParentMap.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
@@ -31,6 +31,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include <cstdarg>
@@ -45,21 +46,14 @@ namespace {
class GenericNodeBuilderRefCount {
CheckerContext *C;
const ProgramPointTag *tag;
- EndOfFunctionNodeBuilder *ENB;
public:
GenericNodeBuilderRefCount(CheckerContext &c,
- const ProgramPointTag *t)
- : C(&c), tag(t), ENB(0) {}
+ const ProgramPointTag *t = 0)
+ : C(&c), tag(t){}
- GenericNodeBuilderRefCount(EndOfFunctionNodeBuilder &enb)
- : C(0), tag(0), ENB(&enb) {}
-
- ExplodedNode *MakeNode(const ProgramState *state, ExplodedNode *Pred) {
- if (C)
- return C->generateNode(state, Pred, tag, false);
-
- assert(ENB);
- return ENB->generateNode(state, Pred);
+ ExplodedNode *MakeNode(ProgramStateRef state, ExplodedNode *Pred,
+ bool MarkAsSink = false) {
+ return C->addTransition(state, Pred, tag, MarkAsSink);
}
};
} // end anonymous namespace
@@ -138,6 +132,11 @@ public:
static RetEffect MakeNoRet() {
return RetEffect(NoRet);
}
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) K);
+ ID.AddInteger((unsigned) O);
+ }
};
//===----------------------------------------------------------------------===//
@@ -358,22 +357,22 @@ struct ProgramStateTrait<RefBindings>
namespace {
class RetainSummary {
- /// Args - an ordered vector of (index, ArgEffect) pairs, where index
+ /// Args - a map of (index, ArgEffect) pairs, where index
/// specifies the argument (starting from 0). This can be sparsely
/// populated; arguments with no entry in Args use 'DefaultArgEffect'.
ArgEffects Args;
/// DefaultArgEffect - The default ArgEffect to apply to arguments that
/// do not have an entry in Args.
- ArgEffect DefaultArgEffect;
+ ArgEffect DefaultArgEffect;
/// Receiver - If this summary applies to an Objective-C message expression,
/// this is the effect applied to the state of the receiver.
- ArgEffect Receiver;
+ ArgEffect Receiver;
/// Ret - The effect on the return value. Used to indicate if the
/// function/method call returns a new tracked symbol.
- RetEffect Ret;
+ RetEffect Ret;
public:
RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
@@ -419,6 +418,19 @@ public:
return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
Receiver == Other.Receiver && Ret == Other.Ret;
}
+
+ /// Profile this summary for inclusion in a FoldingSet.
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.Add(Args);
+ ID.Add(DefaultArgEffect);
+ ID.Add(Receiver);
+ ID.Add(Ret);
+ }
+
+ /// A retain summary is simple if it has no ArgEffects other than the default.
+ bool isSimple() const {
+ return Args.isEmpty();
+ }
};
} // end anonymous namespace
@@ -443,7 +455,7 @@ public:
ObjCSummaryKey(Selector s)
: II(0), S(s) {}
- IdentifierInfo* getIdentifier() const { return II; }
+ IdentifierInfo *getIdentifier() const { return II; }
Selector getSelector() const { return S; }
};
}
@@ -523,7 +535,7 @@ public:
return Summ;
}
- const RetainSummary * find(IdentifierInfo* II, Selector S) {
+ const RetainSummary *find(IdentifierInfo* II, Selector S) {
// FIXME: Class method lookup. Right now we dont' have a good way
// of going between IdentifierInfo* and the class hierarchy.
MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
@@ -560,6 +572,8 @@ class RetainSummaryManager {
typedef ObjCSummaryCache ObjCMethodSummariesTy;
+ typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
+
//==-----------------------------------------------------------------==//
// Data.
//==-----------------------------------------------------------------==//
@@ -591,7 +605,7 @@ class RetainSummaryManager {
ArgEffects::Factory AF;
/// ScratchArgs - A holding buffer for construct ArgEffects.
- ArgEffects ScratchArgs;
+ ArgEffects ScratchArgs;
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
@@ -601,8 +615,9 @@ class RetainSummaryManager {
/// Objective-C objects.
RetEffect ObjCInitRetE;
- RetainSummary DefaultSummary;
- const RetainSummary *StopSummary;
+ /// SimpleSummaries - Used for uniquing summaries that don't have special
+ /// effects.
+ llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
//==-----------------------------------------------------------------==//
// Methods.
@@ -616,39 +631,32 @@ class RetainSummaryManager {
public:
RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
-
- const RetainSummary *getDefaultSummary() {
- return &DefaultSummary;
- }
- const RetainSummary * getUnarySummary(const FunctionType* FT,
+ const RetainSummary *getUnarySummary(const FunctionType* FT,
UnaryFuncKind func);
- const RetainSummary * getCFSummaryCreateRule(const FunctionDecl *FD);
- const RetainSummary * getCFSummaryGetRule(const FunctionDecl *FD);
- const RetainSummary * getCFCreateGetRuleSummary(const FunctionDecl *FD);
+ const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
+ const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
+ const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
- const RetainSummary * getPersistentSummary(ArgEffects AE, RetEffect RetEff,
- ArgEffect ReceiverEff = DoNothing,
- ArgEffect DefaultEff = MayEscape);
+ const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
- const RetainSummary * getPersistentSummary(RetEffect RE,
+ const RetainSummary *getPersistentSummary(RetEffect RetEff,
ArgEffect ReceiverEff = DoNothing,
ArgEffect DefaultEff = MayEscape) {
- return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff);
+ RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
+ return getPersistentSummary(Summ);
}
- const RetainSummary *getPersistentStopSummary() {
- if (StopSummary)
- return StopSummary;
-
- StopSummary = getPersistentSummary(RetEffect::MakeNoRet(),
- StopTracking, StopTracking);
-
- return StopSummary;
+ const RetainSummary *getDefaultSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, MayEscape);
}
- const RetainSummary *getInitMethodSummary(QualType RetTy);
+ const RetainSummary *getPersistentStopSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking, StopTracking);
+ }
void InitializeClassMethodSummaries();
void InitializeMethodSummaries();
@@ -661,10 +669,11 @@ private:
ObjCMethodSummaries[S] = Summ;
}
- void addClassMethSummary(const char* Cls, const char* nullaryName,
- const RetainSummary *Summ) {
+ void addClassMethSummary(const char* Cls, const char* name,
+ const RetainSummary *Summ, bool isNullary = true) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
- Selector S = GetNullarySelector(nullaryName, Ctx);
+ Selector S = isNullary ? GetNullarySelector(name, Ctx)
+ : GetUnarySelector(name, Ctx);
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
@@ -725,50 +734,37 @@ public:
ObjCInitRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeARCNotOwned()
- : RetEffect::MakeOwnedWhenTrackedReceiver())),
- DefaultSummary(AF.getEmptyMap() /* per-argument effects (none) */,
- RetEffect::MakeNoRet() /* return effect */,
- MayEscape, /* default argument effect */
- DoNothing /* receiver effect */),
- StopSummary(0) {
-
+ : RetEffect::MakeOwnedWhenTrackedReceiver())) {
InitializeClassMethodSummaries();
InitializeMethodSummaries();
}
- const RetainSummary * getSummary(const FunctionDecl *FD);
+ const RetainSummary *getSummary(const FunctionDecl *FD);
+
+ const RetainSummary *getMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries);
const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
- const ProgramState *state,
+ ProgramStateRef state,
const LocationContext *LC);
- const RetainSummary * getInstanceMethodSummary(const ObjCMessage &msg,
+ const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
const ObjCInterfaceDecl *ID) {
- return getInstanceMethodSummary(msg.getSelector(), 0,
- ID, msg.getMethodDecl(), msg.getType(Ctx));
+ return getMethodSummary(msg.getSelector(), 0, ID, msg.getMethodDecl(),
+ msg.getType(Ctx), ObjCMethodSummaries);
}
- const RetainSummary * getInstanceMethodSummary(Selector S,
- IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD,
- QualType RetTy);
-
- const RetainSummary *getClassMethodSummary(Selector S,
- IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD,
- QualType RetTy);
-
const RetainSummary *getClassMethodSummary(const ObjCMessage &msg) {
const ObjCInterfaceDecl *Class = 0;
if (!msg.isInstanceMessage())
Class = msg.getReceiverInterface();
- return getClassMethodSummary(msg.getSelector(),
- Class? Class->getIdentifier() : 0,
- Class,
- msg.getMethodDecl(), msg.getType(Ctx));
+ return getMethodSummary(msg.getSelector(), Class->getIdentifier(),
+ Class, msg.getMethodDecl(), msg.getType(Ctx),
+ ObjCClassMethodSummaries);
}
/// getMethodSummary - This version of getMethodSummary is used to query
@@ -780,13 +776,16 @@ public:
IdentifierInfo *ClsName = ID->getIdentifier();
QualType ResultTy = MD->getResultType();
+ ObjCMethodSummariesTy *CachedSummaries;
if (MD->isInstanceMethod())
- return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy);
+ CachedSummaries = &ObjCMethodSummaries;
else
- return getClassMethodSummary(S, ClsName, ID, MD, ResultTy);
+ CachedSummaries = &ObjCClassMethodSummaries;
+
+ return getMethodSummary(S, ClsName, ID, MD, ResultTy, *CachedSummaries);
}
- const RetainSummary * getCommonMethodSummary(const ObjCMethodDecl *MD,
+ const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
Selector S, QualType RetTy);
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
@@ -800,12 +799,6 @@ public:
bool isARCEnabled() const { return ARCEnabled; }
bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
-
- const RetainSummary *copySummary(const RetainSummary *OldSumm) {
- RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
- new (Summ) RetainSummary(*OldSumm);
- return Summ;
- }
};
// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
@@ -815,23 +808,17 @@ public:
class RetainSummaryTemplate {
RetainSummaryManager &Manager;
const RetainSummary *&RealSummary;
- const RetainSummary *BaseSummary;
RetainSummary ScratchSummary;
bool Accessed;
public:
- RetainSummaryTemplate(const RetainSummary *&real, const RetainSummary &base,
- RetainSummaryManager &manager)
- : Manager(manager),
- RealSummary(real),
- BaseSummary(&base),
- ScratchSummary(base),
+ RetainSummaryTemplate(const RetainSummary *&real, const RetainSummary &base,
+ RetainSummaryManager &mgr)
+ : Manager(mgr), RealSummary(real), ScratchSummary(real ? *real : base),
Accessed(false) {}
~RetainSummaryTemplate() {
if (Accessed)
- RealSummary = Manager.copySummary(&ScratchSummary);
- else if (!RealSummary)
- RealSummary = BaseSummary;
+ RealSummary = Manager.getPersistentSummary(ScratchSummary);
}
RetainSummary &operator*() {
@@ -858,12 +845,26 @@ ArgEffects RetainSummaryManager::getArgEffects() {
}
const RetainSummary *
-RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
- ArgEffect ReceiverEff,
- ArgEffect DefaultEff) {
- // Create the summary and return it.
+RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
+ // Unique "simple" summaries -- those without ArgEffects.
+ if (OldSumm.isSimple()) {
+ llvm::FoldingSetNodeID ID;
+ OldSumm.Profile(ID);
+
+ void *Pos;
+ CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
+
+ if (!N) {
+ N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
+ new (N) CachedSummaryNode(OldSumm);
+ SimpleSummaries.InsertNode(N, Pos);
+ }
+
+ return &N->getValue();
+ }
+
RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
- new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff);
+ new (Summ) RetainSummary(OldSumm);
return Summ;
}
@@ -984,6 +985,21 @@ const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
// correctly.
ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "dispatch_set_context") {
+ // <rdar://problem/11059275> - The analyzer currently doesn't have
+ // a good way to reason about the finalizer function for libdispatch.
+ // If we pass a context object that is memory managed, stop tracking it.
+ // FIXME: this hack should possibly go away once we can handle
+ // libdispatch finalizers.
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) {
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove. (radar://11152419)
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
// Did we get a summary?
@@ -1106,7 +1122,6 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
case cfretain: Effect = IncRef; break;
case cfrelease: Effect = DecRef; break;
case cfmakecollectable: Effect = MakeCollectable; break;
- default: llvm_unreachable("Not a supported unary function.");
}
ScratchArgs = AF.add(ScratchArgs, 0, Effect);
@@ -1131,25 +1146,13 @@ RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
// Summary creation for Selectors.
//===----------------------------------------------------------------------===//
-const RetainSummary *
-RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
- assert(ScratchArgs.isEmpty());
- // 'init' methods conceptually return a newly allocated object and claim
- // the receiver.
- if (cocoa::isCocoaObjectRef(RetTy) ||
- coreFoundation::isCFObjectRef(RetTy))
- return getPersistentSummary(ObjCInitRetE, DecRefMsg);
-
- return getDefaultSummary();
-}
-
void
RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD) {
if (!FD)
return;
- RetainSummaryTemplate Template(Summ, DefaultSummary, *this);
+ RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
// Effects on the parameters.
unsigned parm_idx = 0;
@@ -1197,8 +1200,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
if (!MD)
return;
- RetainSummaryTemplate Template(Summ, DefaultSummary, *this);
-
+ RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
bool isTrackedLoc = false;
// Effects on the receiver.
@@ -1247,8 +1249,8 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
}
const RetainSummary *
-RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl *MD,
- Selector S, QualType RetTy) {
+RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy) {
if (MD) {
// Scan the method decl for 'void*' arguments. These should be treated
@@ -1265,8 +1267,74 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl *MD,
}
}
- // Any special effect for the receiver?
+ // Any special effects?
ArgEffect ReceiverEff = DoNothing;
+ RetEffect ResultEff = RetEffect::MakeNoRet();
+
+ // Check the method family, and apply any default annotations.
+ switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
+ case OMF_None:
+ case OMF_performSelector:
+ // Assume all Objective-C methods follow Cocoa Memory Management rules.
+ // FIXME: Does the non-threaded performSelector family really belong here?
+ // The selector could be, say, @selector(copy).
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ else if (coreFoundation::isCFObjectRef(RetTy)) {
+ // ObjCMethodDecl currently doesn't consider CF objects as valid return
+ // values for alloc, new, copy, or mutableCopy, so we have to
+ // double-check with the selector. This is ugly, but there aren't that
+ // many Objective-C methods that return CF objects, right?
+ if (MD) {
+ switch (S.getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ default:
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ break;
+ }
+ } else {
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ }
+ }
+ break;
+ case OMF_init:
+ ResultEff = ObjCInitRetE;
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = ObjCAllocRetE;
+ else if (coreFoundation::isCFObjectRef(RetTy))
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ case OMF_autorelease:
+ ReceiverEff = Autorelease;
+ break;
+ case OMF_retain:
+ ReceiverEff = IncRefMsg;
+ break;
+ case OMF_release:
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_dealloc:
+ ReceiverEff = Dealloc;
+ break;
+ case OMF_self:
+ // -self is handled specially by the ExprEngine to propagate the receiver.
+ break;
+ case OMF_retainCount:
+ case OMF_finalize:
+ // These methods don't return objects.
+ break;
+ }
// If one of the arguments in the selector has the keyword 'delegate' we
// should stop tracking the reference count for the receiver. This is
@@ -1279,34 +1347,16 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl *MD,
ReceiverEff = StopTracking;
}
- // Look for methods that return an owned object.
- if (cocoa::isCocoaObjectRef(RetTy)) {
- // EXPERIMENTAL: assume the Cocoa conventions for all objects returned
- // by instance methods.
- RetEffect E = cocoa::followsFundamentalRule(S, MD)
- ? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
-
- return getPersistentSummary(E, ReceiverEff, MayEscape);
- }
-
- // Look for methods that return an owned core foundation object.
- if (coreFoundation::isCFObjectRef(RetTy)) {
- RetEffect E = cocoa::followsFundamentalRule(S, MD)
- ? RetEffect::MakeOwned(RetEffect::CF, true)
- : RetEffect::MakeNotOwned(RetEffect::CF);
-
- return getPersistentSummary(E, ReceiverEff, MayEscape);
- }
-
- if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing)
+ if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
+ ResultEff.getKind() == RetEffect::NoRet)
return getDefaultSummary();
- return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape);
+ return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
}
const RetainSummary *
RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
- const ProgramState *state,
+ ProgramStateRef state,
const LocationContext *LC) {
// We need the type-information of the tracked receiver object
@@ -1319,7 +1369,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
SVal receiverV;
if (Receiver) {
- receiverV = state->getSValAsScalarOrLoc(Receiver);
+ receiverV = state->getSValAsScalarOrLoc(Receiver, LC);
// FIXME: Eventually replace the use of state->get<RefBindings> with
// a generic API for reasoning about the Objective-C types of symbolic
@@ -1348,51 +1398,22 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
}
const RetainSummary *
-RetainSummaryManager::getInstanceMethodSummary(Selector S,
- IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD,
- QualType RetTy) {
+RetainSummaryManager::getMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD, QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries) {
// Look up a summary in our summary cache.
- const RetainSummary *Summ = ObjCMethodSummaries.find(ID, ClsName, S);
+ const RetainSummary *Summ = CachedSummaries.find(ID, ClsName, S);
if (!Summ) {
- assert(ScratchArgs.isEmpty());
-
- // "initXXX": pass-through for receiver.
- if (cocoa::deriveNamingConvention(S, MD) == cocoa::InitRule)
- Summ = getInitMethodSummary(RetTy);
- else
- Summ = getCommonMethodSummary(MD, S, RetTy);
+ Summ = getStandardMethodSummary(MD, S, RetTy);
// Annotations override defaults.
updateSummaryFromAnnotations(Summ, MD);
// Memoize the summary.
- ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
- }
-
- return Summ;
-}
-
-const RetainSummary *
-RetainSummaryManager::getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
- const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD,
- QualType RetTy) {
-
- assert(ClsName && "Class name must be specified.");
- const RetainSummary *Summ = ObjCClassMethodSummaries.find(ID, ClsName, S);
-
- if (!Summ) {
- Summ = getCommonMethodSummary(MD, S, RetTy);
-
- // Annotations override defaults.
- updateSummaryFromAnnotations(Summ, MD);
-
- // Memoize the summary.
- ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ CachedSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
}
return Summ;
@@ -1515,6 +1536,7 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// Don't track allocated autorelease pools yet, as it is okay to prematurely
// exit a method.
addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+ addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
// Create summaries QCRenderer/QCView -createSnapShotImageOfType:
addInstMethSummary("QCRenderer", AllocSumm,
@@ -1561,13 +1583,13 @@ template<> struct ProgramStateTrait<AutoreleasePoolContents>
} // end GR namespace
} // end clang namespace
-static SymbolRef GetCurrentAutoreleasePool(const ProgramState *state) {
+static SymbolRef GetCurrentAutoreleasePool(ProgramStateRef state) {
ARStack stack = state->get<AutoreleaseStack>();
return stack.isEmpty() ? SymbolRef() : stack.getHead();
}
-static const ProgramState *
-SendAutorelease(const ProgramState *state,
+static ProgramStateRef
+SendAutorelease(ProgramStateRef state,
ARCounts::Factory &F,
SymbolRef sym) {
SymbolRef pool = GetCurrentAutoreleasePool(state);
@@ -1598,7 +1620,7 @@ namespace {
class CFRefBug : public BugType {
protected:
CFRefBug(StringRef name)
- : BugType(name, "Memory (Core Foundation/Objective-C)") {}
+ : BugType(name, categories::MemoryCoreFoundationObjectiveC) {}
public:
// FIXME: Eventually remove.
@@ -1698,7 +1720,7 @@ namespace {
// Bug Reports. //
//===---------===//
- class CFRefReportVisitor : public BugReporterVisitor {
+ class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
protected:
SymbolRef Sym;
const SummaryLogTy &SummaryLog;
@@ -1733,6 +1755,15 @@ namespace {
PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR);
+
+ virtual BugReporterVisitor *clone() const {
+ // The curiously-recurring template pattern only works for one level of
+ // subclassing. Rather than make a new template base for
+ // CFRefReportVisitor, we simply override clone() to do the right thing.
+ // This could be trouble someday if BugReporterVisitorImpl is ever
+ // used for something else besides a convenient implementation of clone().
+ return new CFRefLeakReportVisitor(*this);
+ }
};
class CFRefReport : public BugReport {
@@ -1771,7 +1802,7 @@ namespace {
public:
CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- ExprEngine &Eng);
+ CheckerContext &Ctx);
PathDiagnosticLocation getLocation(const SourceManager &SM) const {
assert(Location.isValid());
@@ -1823,6 +1854,20 @@ static inline bool contains(const SmallVectorImpl<ArgEffect>& V,
return false;
}
+static bool isPropertyAccess(const Stmt *S, ParentMap &PM) {
+ unsigned maxDepth = 4;
+ while (S && maxDepth) {
+ if (const PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(S)) {
+ if (!isa<ObjCMessageExpr>(PO->getSyntacticForm()))
+ return true;
+ return false;
+ }
+ S = PM.getParent(S);
+ --maxDepth;
+ }
+ return false;
+}
+
PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
@@ -1832,8 +1877,9 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
return NULL;
// Check if the type state has changed.
- const ProgramState *PrevSt = PrevN->getState();
- const ProgramState *CurrSt = N->getState();
+ ProgramStateRef PrevSt = PrevN->getState();
+ ProgramStateRef CurrSt = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
if (!CurrT) return NULL;
@@ -1851,40 +1897,49 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
if (!PrevT) {
const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- // Get the name of the callee (if it is available).
- SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee());
- if (const FunctionDecl *FD = X.getAsFunctionDecl())
- os << "Call to function '" << *FD << '\'';
- else
- os << "function call";
+ if (isa<ObjCArrayLiteral>(S)) {
+ os << "NSArray literal is an object with a +0 retain count";
}
- else if (isa<ObjCMessageExpr>(S)) {
- os << "Method";
- } else {
- os << "Property";
+ else if (isa<ObjCDictionaryLiteral>(S)) {
+ os << "NSDictionary literal is an object with a +0 retain count";
}
+ else {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available).
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+ if (const FunctionDecl *FD = X.getAsFunctionDecl())
+ os << "Call to function '" << *FD << '\'';
+ else
+ os << "function call";
+ }
+ else {
+ assert(isa<ObjCMessageExpr>(S));
+ // The message expression may have between written directly or as
+ // a property access. Lazily determine which case we are looking at.
+ os << (isPropertyAccess(S, N->getParentMap()) ? "Property" : "Method");
+ }
- if (CurrV.getObjKind() == RetEffect::CF) {
- os << " returns a Core Foundation object with a ";
- }
- else {
- assert (CurrV.getObjKind() == RetEffect::ObjC);
- os << " returns an Objective-C object with a ";
- }
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object with a ";
+ }
+ else {
+ assert (CurrV.getObjKind() == RetEffect::ObjC);
+ os << " returns an Objective-C object with a ";
+ }
- if (CurrV.isOwned()) {
- os << "+1 retain count";
+ if (CurrV.isOwned()) {
+ os << "+1 retain count";
- if (GCEnabled) {
- assert(CurrV.getObjKind() == RetEffect::CF);
- os << ". "
- "Core Foundation objects are not automatically garbage collected.";
+ if (GCEnabled) {
+ assert(CurrV.getObjKind() == RetEffect::CF);
+ os << ". "
+ "Core Foundation objects are not automatically garbage collected.";
+ }
+ }
+ else {
+ assert (CurrV.isNotOwned());
+ os << "+0 retain count";
}
- }
- else {
- assert (CurrV.isNotOwned());
- os << "+0 retain count";
}
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
@@ -1912,7 +1967,7 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
// Retrieve the value of the argument. Is it the symbol
// we are interested in?
- if (CurrSt->getSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym)
+ if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
continue;
// We have an argument. Get the effect!
@@ -1921,7 +1976,8 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
}
else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
if (const Expr *receiver = ME->getInstanceReceiver())
- if (CurrSt->getSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) {
+ if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
+ .getAsLocSymbol() == Sym) {
// The symbol we are tracking is the receiver.
AEffects.push_back(Summ->getReceiverEffect());
}
@@ -1949,7 +2005,8 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
if (contains(AEffects, MakeCollectable)) {
// Get the name of the function.
const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
- SVal X = CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee());
+ SVal X =
+ CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
const FunctionDecl *FD = X.getAsFunctionDecl();
if (GCEnabled) {
@@ -2004,8 +2061,8 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
if (PrevV.getKind() == RefVal::Released) {
assert(GCEnabled && CurrV.getCount() > 0);
- os << " The object is not eligible for garbage collection until the "
- "retain count reaches 0 again.";
+ os << " The object is not eligible for garbage collection until "
+ "the retain count reaches 0 again.";
}
break;
@@ -2015,8 +2072,12 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
break;
case RefVal::ReturnedOwned:
- os << "Object returned to caller as an owning reference (single retain "
- "count transferred to caller)";
+ // Autoreleases can be applied after marking a node ReturnedOwned.
+ if (CurrV.getAutoreleaseCount())
+ return NULL;
+
+ os << "Object returned to caller as an owning reference (single "
+ "retain count transferred to caller)";
break;
case RefVal::ReturnedNotOwned:
@@ -2061,7 +2122,7 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I!=E; ++I)
if (const Expr *Exp = dyn_cast_or_null<Expr>(*I))
- if (CurrSt->getSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) {
+ if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
P->addRange(Exp->getSourceRange());
break;
}
@@ -2069,62 +2130,42 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
return P;
}
-namespace {
- class FindUniqueBinding :
- public StoreManager::BindingsHandler {
- SymbolRef Sym;
- const MemRegion* Binding;
- bool First;
-
- public:
- FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
-
- bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
- SVal val) {
-
- SymbolRef SymV = val.getAsSymbol();
- if (!SymV || SymV != Sym)
- return true;
-
- if (Binding) {
- First = false;
- return false;
- }
- else
- Binding = R;
-
- return true;
- }
-
- operator bool() { return First && Binding; }
- const MemRegion* getRegion() { return Binding; }
- };
-}
-
+// Find the first node in the current function context that referred to the
+// tracked symbol and the memory location that value was stored to. Note, the
+// value is only reported if the allocation occurred in the same function as
+// the leak.
static std::pair<const ExplodedNode*,const MemRegion*>
GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
SymbolRef Sym) {
-
- // Find both first node that referred to the tracked symbol and the
- // memory location that value was store to.
const ExplodedNode *Last = N;
const MemRegion* FirstBinding = 0;
+ const LocationContext *LeakContext = N->getLocationContext();
while (N) {
- const ProgramState *St = N->getState();
+ ProgramStateRef St = N->getState();
RefBindings B = St->get<RefBindings>();
if (!B.lookup(Sym))
break;
- FindUniqueBinding FB(Sym);
+ StoreManager::FindUniqueBinding FB(Sym);
StateMgr.iterBindings(St, FB);
if (FB) FirstBinding = FB.getRegion();
- Last = N;
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ Last = N;
+
N = N->pred_empty() ? NULL : *(N->pred_begin());
}
+ // If allocation happened in a function different from the leak node context,
+ // do not report the binding.
+ if (N->getLocationContext() != LeakContext) {
+ FirstBinding = 0;
+ }
+
return std::make_pair(Last, FirstBinding);
}
@@ -2132,9 +2173,7 @@ PathDiagnosticPiece*
CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndN,
BugReport &BR) {
- // Tell the BugReporterContext to report cases when the tracked symbol is
- // assigned to different variables, etc.
- BRC.addNotableSymbol(Sym);
+ BR.markInteresting(Sym);
return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
}
@@ -2145,7 +2184,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
- BRC.addNotableSymbol(Sym);
+ BR.markInteresting(Sym);
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
@@ -2193,10 +2232,10 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
else {
const FunctionDecl *FD = cast<FunctionDecl>(D);
- os << " is return from a function whose name ('"
- << FD->getNameAsString()
+ os << " is returned from a function whose name ('"
+ << *FD
<< "') does not contain 'Copy' or 'Create'. This violates the naming"
- " convention rules given the Memory Management Guide for Core"
+ " convention rules given in the Memory Management Guide for Core"
" Foundation";
}
}
@@ -2218,7 +2257,7 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
bool GCEnabled, const SummaryLogTy &Log,
ExplodedNode *n, SymbolRef sym,
- ExprEngine &Eng)
+ CheckerContext &Ctx)
: CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
// Most bug reports are cached at the location where they occurred.
@@ -2231,10 +2270,10 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
// same SourceLocation.
const ExplodedNode *AllocNode = 0;
- const SourceManager& SMgr = Eng.getContext().getSourceManager();
+ const SourceManager& SMgr = Ctx.getSourceManager();
llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
- GetAllocationSite(Eng.getStateManager(), getErrorNode(), sym);
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
// Get the SourceLocation for the allocation site.
ProgramPoint P = AllocNode->getLocation();
@@ -2244,15 +2283,14 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
// Fill in the description of the bug.
Description.clear();
llvm::raw_string_ostream os(Description);
- unsigned AllocLine = SMgr.getExpansionLineNumber(AllocStmt->getLocStart());
os << "Potential leak ";
if (GCEnabled)
os << "(when using garbage collection) ";
- os << "of an object allocated on line " << AllocLine;
+ os << "of an object";
// FIXME: AllocBinding doesn't get populated for RegionStore yet.
if (AllocBinding)
- os << " and stored into '" << AllocBinding->getString() << '\'';
+ os << " stored into '" << AllocBinding->getString() << '\'';
addVisitor(new CFRefLeakReportVisitor(sym, GCEnabled, Log));
}
@@ -2271,24 +2309,26 @@ class RetainCountChecker
check::PostStmt<CastExpr>,
check::PostStmt<CallExpr>,
check::PostStmt<CXXConstructExpr>,
+ check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
check::PostObjCMessage,
check::PreStmt<ReturnStmt>,
check::RegionChanges,
eval::Assume,
eval::Call > {
- mutable llvm::OwningPtr<CFRefBug> useAfterRelease, releaseNotOwned;
- mutable llvm::OwningPtr<CFRefBug> deallocGC, deallocNotOwned;
- mutable llvm::OwningPtr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
- mutable llvm::OwningPtr<CFRefBug> leakWithinFunction, leakAtReturn;
- mutable llvm::OwningPtr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
+ mutable OwningPtr<CFRefBug> useAfterRelease, releaseNotOwned;
+ mutable OwningPtr<CFRefBug> deallocGC, deallocNotOwned;
+ mutable OwningPtr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
+ mutable OwningPtr<CFRefBug> leakWithinFunction, leakAtReturn;
+ mutable OwningPtr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
typedef llvm::DenseMap<SymbolRef, const SimpleProgramPointTag *> SymbolTagMap;
// This map is only used to ensure proper deletion of any allocated tags.
mutable SymbolTagMap DeadSymbolTags;
- mutable llvm::OwningPtr<RetainSummaryManager> Summaries;
- mutable llvm::OwningPtr<RetainSummaryManager> SummariesGC;
+ mutable OwningPtr<RetainSummaryManager> Summaries;
+ mutable OwningPtr<RetainSummaryManager> SummariesGC;
mutable ARCounts::Factory ARCountFactory;
@@ -2386,7 +2426,7 @@ public:
bool GCEnabled) const {
// FIXME: We don't support ARC being turned on and off during one analysis.
// (nor, for that matter, do we support changing ASTContexts)
- bool ARCEnabled = (bool)Ctx.getLangOptions().ObjCAutoRefCount;
+ bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
if (GCEnabled) {
if (!SummariesGC)
SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
@@ -2406,7 +2446,7 @@ public:
return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
}
- void printState(raw_ostream &Out, const ProgramState *State,
+ void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const;
void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
@@ -2415,66 +2455,72 @@ public:
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPostStmt(const CXXConstructExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const;
+
void checkSummary(const RetainSummary &Summ, const CallOrObjCMessage &Call,
CheckerContext &C) const;
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
- const ProgramState *evalAssume(const ProgramState *state, SVal Cond,
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
- const ProgramState *
- checkRegionChanges(const ProgramState *state,
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) const;
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
- bool wantsRegionChangeUpdate(const ProgramState *state) const {
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
return true;
}
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
ExplodedNode *Pred, RetEffect RE, RefVal X,
- SymbolRef Sym, const ProgramState *state) const;
+ SymbolRef Sym, ProgramStateRef state) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void checkEndPath(EndOfFunctionNodeBuilder &Builder, ExprEngine &Eng) const;
+ void checkEndPath(CheckerContext &C) const;
- const ProgramState *updateSymbol(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const;
- void processNonLeakError(const ProgramState *St, SourceRange ErrorRange,
+ void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
+
+ void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
- const ProgramState *handleSymbolDeath(const ProgramState *state,
+ ProgramStateRef handleSymbolDeath(ProgramStateRef state,
SymbolRef sid, RefVal V,
SmallVectorImpl<SymbolRef> &Leaked) const;
- std::pair<ExplodedNode *, const ProgramState *>
- handleAutoreleaseCounts(const ProgramState *state,
+ std::pair<ExplodedNode *, ProgramStateRef >
+ handleAutoreleaseCounts(ProgramStateRef state,
GenericNodeBuilderRefCount Bd, ExplodedNode *Pred,
- ExprEngine &Eng, SymbolRef Sym, RefVal V) const;
+ CheckerContext &Ctx, SymbolRef Sym, RefVal V) const;
- ExplodedNode *processLeaks(const ProgramState *state,
+ ExplodedNode *processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
GenericNodeBuilderRefCount &Builder,
- ExprEngine &Eng,
+ CheckerContext &Ctx,
ExplodedNode *Pred = 0) const;
};
} // end anonymous namespace
namespace {
class StopTrackingCallback : public SymbolVisitor {
- const ProgramState *state;
+ ProgramStateRef state;
public:
- StopTrackingCallback(const ProgramState *st) : state(st) {}
- const ProgramState *getState() const { return state; }
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
bool VisitSymbol(SymbolRef sym) {
state = state->remove<RefBindings>(sym);
@@ -2495,9 +2541,10 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
if (!BE->getBlockDecl()->hasCaptures())
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const BlockDataRegion *R =
- cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
@@ -2509,7 +2556,7 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
// via captured variables, even though captured variables result in a copy
// and in implicit increment/decrement of a retain count.
SmallVector<const MemRegion*, 10> Regions;
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
for ( ; I != E; ++I) {
@@ -2546,8 +2593,8 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
break;
}
- const ProgramState *state = C.getState();
- SymbolRef Sym = state->getSVal(CE).getAsLocSymbol();
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
return;
const RefVal* T = state->get<RefBindings>(Sym);
@@ -2563,15 +2610,18 @@ void RetainCountChecker::checkPostStmt(const CastExpr *CE,
return;
}
- C.generateNode(state);
+ C.addTransition(state);
}
void RetainCountChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+
// Get the callee.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
+ SVal L = state->getSVal(Callee, C.getLocationContext());
RetainSummaryManager &Summaries = getSummaryManager(C);
const RetainSummary *Summ = 0;
@@ -2591,7 +2641,7 @@ void RetainCountChecker::checkPostStmt(const CallExpr *CE,
if (!Summ)
Summ = Summaries.getDefaultSummary();
- checkSummary(*Summ, CallOrObjCMessage(CE, state), C);
+ checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
}
void RetainCountChecker::checkPostStmt(const CXXConstructExpr *CE,
@@ -2607,20 +2657,62 @@ void RetainCountChecker::checkPostStmt(const CXXConstructExpr *CE,
if (!Summ)
return;
- const ProgramState *state = C.getState();
- checkSummary(*Summ, CallOrObjCMessage(CE, state), C);
+ ProgramStateRef state = C.getState();
+ checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
+}
+
+void RetainCountChecker::processObjCLiterals(CheckerContext &C,
+ const Expr *Ex) const {
+ ProgramStateRef state = C.getState();
+ const ExplodedNode *pred = C.getPredecessor();
+ for (Stmt::const_child_iterator it = Ex->child_begin(), et = Ex->child_end() ;
+ it != et ; ++it) {
+ const Stmt *child = *it;
+ SVal V = state->getSVal(child, pred->getLocationContext());
+ if (SymbolRef sym = V.getAsSymbol())
+ if (const RefVal* T = state->get<RefBindings>(sym)) {
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
+ if (hasErr) {
+ processNonLeakError(state, child->getSourceRange(), hasErr, sym, C);
+ return;
+ }
+ }
+ }
+
+ // Return the object as autoreleased.
+ // RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ if (SymbolRef sym =
+ state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ state = state->set<RefBindings>(sym, RefVal::makeNotOwned(RetEffect::ObjC,
+ ResultTy));
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all values.
+ processObjCLiterals(C, AL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all keys and values.
+ processObjCLiterals(C, DL);
}
void RetainCountChecker::checkPostObjCMessage(const ObjCMessage &Msg,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
- ExplodedNode *Pred = C.getPredecessor();
+ ProgramStateRef state = C.getState();
RetainSummaryManager &Summaries = getSummaryManager(C);
const RetainSummary *Summ;
if (Msg.isInstanceMessage()) {
- const LocationContext *LC = Pred->getLocationContext();
+ const LocationContext *LC = C.getLocationContext();
Summ = Summaries.getInstanceMethodSummary(Msg, state, LC);
} else {
Summ = Summaries.getClassMethodSummary(Msg);
@@ -2630,7 +2722,7 @@ void RetainCountChecker::checkPostObjCMessage(const ObjCMessage &Msg,
if (!Summ)
return;
- checkSummary(*Summ, CallOrObjCMessage(Msg, state), C);
+ checkSummary(*Summ, CallOrObjCMessage(Msg, state, C.getLocationContext()), C);
}
/// GetReturnType - Used to get the return type of a message expression or
@@ -2664,7 +2756,7 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
void RetainCountChecker::checkSummary(const RetainSummary &Summ,
const CallOrObjCMessage &CallOrMsg,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
// Evaluate the effect of the arguments.
RefVal::Kind hasErr = (RefVal::Kind) 0;
@@ -2689,7 +2781,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
// Evaluate the effect on the message receiver.
bool ReceiverIsTracked = false;
if (!hasErr && CallOrMsg.isObjCMessage()) {
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ const LocationContext *LC = C.getLocationContext();
SVal Receiver = CallOrMsg.getInstanceMessageReceiver(LC);
if (SymbolRef Sym = Receiver.getAsLocSymbol()) {
if (const RefVal *T = state->get<RefBindings>(Sym)) {
@@ -2722,7 +2814,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
switch (RE.getKind()) {
default:
- llvm_unreachable("Unhandled RetEffect."); break;
+ llvm_unreachable("Unhandled RetEffect.");
case RetEffect::NoRet:
// No work necessary.
@@ -2730,7 +2822,8 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
- SymbolRef Sym = state->getSVal(CallOrMsg.getOriginExpr()).getAsSymbol();
+ SymbolRef Sym = state->getSVal(CallOrMsg.getOriginExpr(),
+ C.getLocationContext()).getAsSymbol();
if (!Sym)
break;
@@ -2757,7 +2850,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
case RetEffect::ARCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
const Expr *Ex = CallOrMsg.getOriginExpr();
- SymbolRef Sym = state->getSVal(Ex).getAsSymbol();
+ SymbolRef Sym = state->getSVal(Ex, C.getLocationContext()).getAsSymbol();
if (!Sym)
break;
@@ -2776,7 +2869,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
if (state == C.getState()) {
NewNode = C.getPredecessor();
} else {
- NewNode = C.generateNode(state);
+ NewNode = C.addTransition(state);
}
// Annotate the node with summary we used.
@@ -2791,15 +2884,15 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
}
-const ProgramState *
-RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const {
// In GC mode [... release] and [... retain] do nothing.
// In ARC mode they shouldn't exist at all, but we just ignore them.
bool IgnoreRetainMsg = C.isObjCGCEnabled();
if (!IgnoreRetainMsg)
- IgnoreRetainMsg = (bool)C.getASTContext().getLangOptions().ObjCAutoRefCount;
+ IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
switch (E) {
default: break;
@@ -2822,7 +2915,6 @@ RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
case IncRefMsg:
case MakeCollectable:
llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
- return state;
case Dealloc:
// Any use of -dealloc in GC is *bad*.
@@ -2835,7 +2927,6 @@ RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
switch (V.getKind()) {
default:
llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
- break;
case RefVal::Owned:
// The object immediately transitions to the released state.
V = V ^ RefVal::Released;
@@ -2879,7 +2970,6 @@ RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
switch (V.getKind()) {
default:
llvm_unreachable("Invalid RefVal state for a retain.");
- break;
case RefVal::Owned:
case RefVal::NotOwned:
V = V + 1;
@@ -2901,7 +2991,6 @@ RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
default:
// case 'RefVal::Released' handled above.
llvm_unreachable("Invalid RefVal state for a release.");
- break;
case RefVal::Owned:
assert(V.getCount() > 0);
@@ -2932,7 +3021,7 @@ RetainCountChecker::updateSymbol(const ProgramState *state, SymbolRef sym,
return state->set<RefBindings>(sym, V);
}
-void RetainCountChecker::processNonLeakError(const ProgramState *St,
+void RetainCountChecker::processNonLeakError(ProgramStateRef St,
SourceRange ErrorRange,
RefVal::Kind ErrorKind,
SymbolRef Sym,
@@ -2945,7 +3034,6 @@ void RetainCountChecker::processNonLeakError(const ProgramState *St,
switch (ErrorKind) {
default:
llvm_unreachable("Unhandled error.");
- return;
case RefVal::ErrorUseAfterRelease:
if (!useAfterRelease)
useAfterRelease.reset(new UseAfterRelease());
@@ -2969,7 +3057,7 @@ void RetainCountChecker::processNonLeakError(const ProgramState *St,
}
assert(BT);
- CFRefReport *report = new CFRefReport(*BT, C.getASTContext().getLangOptions(),
+ CFRefReport *report = new CFRefReport(*BT, C.getASTContext().getLangOpts(),
C.isObjCGCEnabled(), SummaryLog,
N, Sym);
report->addRange(ErrorRange);
@@ -2982,11 +3070,8 @@ void RetainCountChecker::processNonLeakError(const ProgramState *St,
bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Get the callee. We're only interested in simple C functions.
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
-
- const FunctionDecl *FD = L.getAsFunctionDecl();
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return false;
@@ -3008,7 +3093,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// See if it's one of the specific functions we know how to eval.
bool canEval = false;
- QualType ResultTy = FD->getResultType();
+ QualType ResultTy = CE->getCallReturnType();
if (ResultTy->isObjCIdType()) {
// Handle: id NSMakeCollectable(CFTypeRef)
canEval = II->isStr("NSMakeCollectable");
@@ -3026,14 +3111,15 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
return false;
// Bind the return value.
- SVal RetVal = state->getSVal(CE->getArg(0));
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
if (RetVal.isUnknown()) {
// If the receiver is unknown, conjure a return value.
SValBuilder &SVB = C.getSValBuilder();
unsigned Count = C.getCurrentBlockCount();
- SVal RetVal = SVB.getConjuredSymbolVal(0, CE, ResultTy, Count);
+ SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
}
- state = state->BindExpr(CE, RetVal, false);
+ state = state->BindExpr(CE, LCtx, RetVal, false);
// FIXME: This should not be necessary, but otherwise the argument seems to be
// considered alive during the next statement.
@@ -3046,7 +3132,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Invalidate the argument region.
unsigned Count = C.getCurrentBlockCount();
- state = state->invalidateRegions(ArgRegion, CE, Count);
+ state = state->invalidateRegions(ArgRegion, CE, Count, LCtx);
// Restore the refcount status of the argument.
if (Binding)
@@ -3061,14 +3147,30 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Handle return statements.
//===----------------------------------------------------------------------===//
+// Return true if the current LocationContext has no caller context.
+static bool inTopFrame(CheckerContext &C) {
+ const LocationContext *LC = C.getLocationContext();
+ return LC->getParent() == 0;
+}
+
void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
CheckerContext &C) const {
+
+ // Only adjust the reference count if this is the top-level call frame,
+ // and not the result of inlining. In the future, we should do
+ // better checking even for inlined calls, and see if they match
+ // with their expected semantics (e.g., the method should return a retained
+ // object, etc.).
+ if (!inTopFrame(C))
+ return;
+
const Expr *RetE = S->getRetValue();
if (!RetE)
return;
- const ProgramState *state = C.getState();
- SymbolRef Sym = state->getSValAsScalarOrLoc(RetE).getAsLocSymbol();
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym =
+ state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
return;
@@ -3107,7 +3209,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
// Update the binding.
state = state->set<RefBindings>(Sym, X);
- ExplodedNode *Pred = C.generateNode(state);
+ ExplodedNode *Pred = C.addTransition(state);
// At this point we have updated the state properly.
// Everything after this is merely checking to see if the return value has
@@ -3121,8 +3223,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
static SimpleProgramPointTag
AutoreleaseTag("RetainCountChecker : Autorelease");
GenericNodeBuilderRefCount Bd(C, &AutoreleaseTag);
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred,
- C.getEngine(), Sym, X);
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C, Sym, X);
// Did we cache out?
if (!Pred)
@@ -3159,7 +3260,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
ExplodedNode *Pred,
RetEffect RE, RefVal X,
SymbolRef Sym,
- const ProgramState *state) const {
+ ProgramStateRef state) const {
// Any leaks or other errors?
if (X.isReturnedOwned() && X.getCount() == 0) {
if (RE.getKind() != RetEffect::NoRet) {
@@ -3186,14 +3287,14 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
static SimpleProgramPointTag
ReturnOwnLeakTag("RetainCountChecker : ReturnsOwnLeak");
- ExplodedNode *N = C.generateNode(state, Pred, &ReturnOwnLeakTag);
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
if (N) {
- const LangOptions &LOpts = C.getASTContext().getLangOptions();
+ const LangOptions &LOpts = C.getASTContext().getLangOpts();
bool GCEnabled = C.isObjCGCEnabled();
CFRefReport *report =
new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled),
LOpts, GCEnabled, SummaryLog,
- N, Sym, C.getEngine());
+ N, Sym, C);
C.EmitReport(report);
}
}
@@ -3206,14 +3307,14 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
static SimpleProgramPointTag
ReturnNotOwnedTag("RetainCountChecker : ReturnNotOwnedForOwned");
- ExplodedNode *N = C.generateNode(state, Pred, &ReturnNotOwnedTag);
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
if (N) {
if (!returnNotOwnedForOwned)
returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned());
CFRefReport *report =
new CFRefReport(*returnNotOwnedForOwned,
- C.getASTContext().getLangOptions(),
+ C.getASTContext().getLangOpts(),
C.isObjCGCEnabled(), SummaryLog, N, Sym);
C.EmitReport(report);
}
@@ -3236,7 +3337,7 @@ void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// (2) we are binding to a memregion that does not have stack storage
// (3) we are binding to a memregion with stack storage that the store
// does not understand.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
if (loc::MemRegionVal *regionLoc = dyn_cast<loc::MemRegionVal>(&loc)) {
escapes = !regionLoc->getRegion()->hasStackStorage();
@@ -3247,6 +3348,12 @@ void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// the binding).
escapes = (state == (state->bindLoc(*regionLoc, val)));
}
+ if (!escapes) {
+ // Case 4: We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ escapes = !isa<VarRegion>(regionLoc->getRegion());
+ }
}
// If our store can represent the binding and we aren't storing to something
@@ -3261,7 +3368,7 @@ void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
C.addTransition(state);
}
-const ProgramState *RetainCountChecker::evalAssume(const ProgramState *state,
+ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
@@ -3294,11 +3401,12 @@ const ProgramState *RetainCountChecker::evalAssume(const ProgramState *state,
return state;
}
-const ProgramState *
-RetainCountChecker::checkRegionChanges(const ProgramState *state,
+ProgramStateRef
+RetainCountChecker::checkRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) const {
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
if (!invalidated)
return state;
@@ -3324,10 +3432,11 @@ RetainCountChecker::checkRegionChanges(const ProgramState *state,
// Handle dead symbols and end-of-path.
//===----------------------------------------------------------------------===//
-std::pair<ExplodedNode *, const ProgramState *>
-RetainCountChecker::handleAutoreleaseCounts(const ProgramState *state,
+std::pair<ExplodedNode *, ProgramStateRef >
+RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
GenericNodeBuilderRefCount Bd,
- ExplodedNode *Pred, ExprEngine &Eng,
+ ExplodedNode *Pred,
+ CheckerContext &Ctx,
SymbolRef Sym, RefVal V) const {
unsigned ACnt = V.getAutoreleaseCount();
@@ -3335,7 +3444,7 @@ RetainCountChecker::handleAutoreleaseCounts(const ProgramState *state,
if (!ACnt)
return std::make_pair(Pred, state);
- assert(!Eng.isObjCGCEnabled() && "Autorelease counts in GC mode?");
+ assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
unsigned Cnt = V.getCount();
// FIXME: Handle sending 'autorelease' to already released object.
@@ -3366,10 +3475,8 @@ RetainCountChecker::handleAutoreleaseCounts(const ProgramState *state,
V = V ^ RefVal::ErrorOverAutorelease;
state = state->set<RefBindings>(Sym, V);
- if (ExplodedNode *N = Bd.MakeNode(state, Pred)) {
- N->markAsSink();
-
- llvm::SmallString<128> sbuf;
+ if (ExplodedNode *N = Bd.MakeNode(state, Pred, true)) {
+ SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Object over-autoreleased: object was sent -autorelease ";
if (V.getAutoreleaseCount() > 1)
@@ -3379,18 +3486,18 @@ RetainCountChecker::handleAutoreleaseCounts(const ProgramState *state,
if (!overAutorelease)
overAutorelease.reset(new OverAutorelease());
- const LangOptions &LOpts = Eng.getContext().getLangOptions();
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
CFRefReport *report =
new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
SummaryLog, N, Sym, os.str());
- Eng.getBugReporter().EmitReport(report);
+ Ctx.EmitReport(report);
}
- return std::make_pair((ExplodedNode *)0, (const ProgramState *)0);
+ return std::make_pair((ExplodedNode *)0, (ProgramStateRef )0);
}
-const ProgramState *
-RetainCountChecker::handleSymbolDeath(const ProgramState *state,
+ProgramStateRef
+RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
SymbolRef sid, RefVal V,
SmallVectorImpl<SymbolRef> &Leaked) const {
bool hasLeak = false;
@@ -3407,10 +3514,11 @@ RetainCountChecker::handleSymbolDeath(const ProgramState *state,
}
ExplodedNode *
-RetainCountChecker::processLeaks(const ProgramState *state,
+RetainCountChecker::processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
GenericNodeBuilderRefCount &Builder,
- ExprEngine &Eng, ExplodedNode *Pred) const {
+ CheckerContext &Ctx,
+ ExplodedNode *Pred) const {
if (Leaked.empty())
return Pred;
@@ -3421,51 +3529,58 @@ RetainCountChecker::processLeaks(const ProgramState *state,
for (SmallVectorImpl<SymbolRef>::iterator
I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
- const LangOptions &LOpts = Eng.getContext().getLangOptions();
- bool GCEnabled = Eng.isObjCGCEnabled();
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ bool GCEnabled = Ctx.isObjCGCEnabled();
CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
: getLeakAtReturnBug(LOpts, GCEnabled);
assert(BT && "BugType not initialized.");
CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled,
- SummaryLog, N, *I, Eng);
- Eng.getBugReporter().EmitReport(report);
+ SummaryLog, N, *I, Ctx);
+ Ctx.EmitReport(report);
}
}
return N;
}
-void RetainCountChecker::checkEndPath(EndOfFunctionNodeBuilder &Builder,
- ExprEngine &Eng) const {
- const ProgramState *state = Builder.getState();
- GenericNodeBuilderRefCount Bd(Builder);
+void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+ GenericNodeBuilderRefCount Bd(Ctx);
RefBindings B = state->get<RefBindings>();
- ExplodedNode *Pred = Builder.getPredecessor();
+ ExplodedNode *Pred = Ctx.getPredecessor();
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Eng,
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Ctx,
I->first, I->second);
if (!state)
return;
}
+ // If the current LocationContext has a parent, don't check for leaks.
+ // We will do that later.
+ // FIXME: we should instead check for imblances of the retain/releases,
+ // and suggest annotations.
+ if (Ctx.getLocationContext()->getParent())
+ return;
+
B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
state = handleSymbolDeath(state, I->first, I->second, Leaked);
- processLeaks(state, Leaked, Bd, Eng, Pred);
+ processLeaks(state, Leaked, Bd, Ctx, Pred);
}
const ProgramPointTag *
RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
const SimpleProgramPointTag *&tag = DeadSymbolTags[sym];
if (!tag) {
- llvm::SmallString<64> buf;
+ SmallString<64> buf;
llvm::raw_svector_ostream out(buf);
- out << "RetainCountChecker : Dead Symbol : " << sym->getSymbolID();
+ out << "RetainCountChecker : Dead Symbol : ";
+ sym->dumpToStream(out);
tag = new SimpleProgramPointTag(out.str());
}
return tag;
@@ -3473,10 +3588,9 @@ RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
- ExprEngine &Eng = C.getEngine();
ExplodedNode *Pred = C.getPredecessor();
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
RefBindings B = state->get<RefBindings>();
// Update counts from autorelease pools
@@ -3487,7 +3601,7 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Use the symbol as the tag.
// FIXME: This might not be as unique as we would like.
GenericNodeBuilderRefCount Bd(C, getDeadSymbolTag(Sym));
- llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Eng,
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C,
Sym, *T);
if (!state)
return;
@@ -3505,7 +3619,7 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
{
GenericNodeBuilderRefCount Bd(C, this);
- Pred = processLeaks(state, Leaked, Bd, Eng, Pred);
+ Pred = processLeaks(state, Leaked, Bd, C, Pred);
}
// Did we cache out?
@@ -3520,7 +3634,7 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
B = F.remove(B, *I);
state = state->set<RefBindings>(B);
- C.generateNode(state, Pred);
+ C.addTransition(state, Pred);
}
//===----------------------------------------------------------------------===//
@@ -3528,10 +3642,10 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
//===----------------------------------------------------------------------===//
static void PrintPool(raw_ostream &Out, SymbolRef Sym,
- const ProgramState *State) {
+ ProgramStateRef State) {
Out << ' ';
if (Sym)
- Out << Sym->getSymbolID();
+ Sym->dumpToStream(Out);
else
Out << "<pool>";
Out << ":{";
@@ -3544,14 +3658,14 @@ static void PrintPool(raw_ostream &Out, SymbolRef Sym,
Out << '}';
}
-static bool UsesAutorelease(const ProgramState *state) {
+static bool UsesAutorelease(ProgramStateRef state) {
// A state uses autorelease if it allocated an autorelease pool or if it has
// objects in the caller's autorelease pool.
return !state->get<AutoreleaseStack>().isEmpty() ||
state->get<AutoreleasePoolContents>(SymbolRef());
}
-void RetainCountChecker::printState(raw_ostream &Out, const ProgramState *State,
+void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
RefBindings B = State->get<RefBindings>();
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index e761bff..6e56593 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class ReturnPointerRangeChecker :
public Checker< check::PreStmt<ReturnStmt> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
@@ -33,13 +33,13 @@ public:
void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
- SVal V = state->getSVal(RetE);
+ SVal V = state->getSVal(RetE, C.getLocationContext());
const MemRegion *R = V.getAsRegion();
const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
@@ -58,8 +58,8 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
= C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
- const ProgramState *StInBound = state->assumeInBound(Idx, NumElements, true);
- const ProgramState *StOutBound = state->assumeInBound(Idx, NumElements, false);
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateSink(StOutBound);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index e8c8d90..7b1f0b1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class ReturnUndefChecker :
public Checker< check::PreStmt<ReturnStmt> > {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
@@ -38,7 +38,7 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
if (!RetE)
return;
- if (!C.getState()->getSVal(RetE).isUndef())
+ if (!C.getState()->getSVal(RetE, C.getLocationContext()).isUndef())
return;
ExplodedNode *N = C.generateSink();
@@ -54,7 +54,8 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
new BugReport(*BT, BT->getDescription(), N);
report->addRange(RetE->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, RetE));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, RetE,
+ report));
C.EmitReport(report);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 91c4b96..54cf569 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -26,12 +26,12 @@ using namespace ento;
namespace {
class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
check::EndPath > {
- mutable llvm::OwningPtr<BuiltinBug> BT_stackleak;
- mutable llvm::OwningPtr<BuiltinBug> BT_returnstack;
+ mutable OwningPtr<BuiltinBug> BT_stackleak;
+ mutable OwningPtr<BuiltinBug> BT_returnstack;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
- void checkEndPath(EndOfFunctionNodeBuilder &B, ExprEngine &Eng) const;
+ void checkEndPath(CheckerContext &Ctx) const;
private:
void EmitStackError(CheckerContext &C, const MemRegion *R,
const Expr *RetE) const;
@@ -100,7 +100,7 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *
new BuiltinBug("Return of address to stack-allocated memory"));
// Generate a report for this bug.
- llvm::SmallString<512> buf;
+ SmallString<512> buf;
llvm::raw_svector_ostream os(buf);
SourceRange range = GenName(os, R, C.getSourceManager());
os << " returned to caller";
@@ -113,45 +113,53 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *
}
void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
- CheckerContext &C) const {
+ CheckerContext &C) const {
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
- SVal V = C.getState()->getSVal(RetE);
+ SVal V = C.getState()->getSVal(RetE, C.getLocationContext());
const MemRegion *R = V.getAsRegion();
- if (!R || !R->hasStackStorage())
- return;
+ if (!R)
+ return;
- if (R->hasStackStorage()) {
- // Automatic reference counting automatically copies blocks.
- if (C.getASTContext().getLangOptions().ObjCAutoRefCount &&
- isa<BlockDataRegion>(R))
- return;
+ const StackSpaceRegion *SS =
+ dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
+
+ if (!SS)
+ return;
- EmitStackError(C, R, RetE);
+ // Return stack memory in an ancestor stack frame is fine.
+ const StackFrameContext *SFC = SS->getStackFrame();
+ if (SFC != C.getLocationContext()->getCurrentStackFrame())
+ return;
+
+ // Automatic reference counting automatically copies blocks.
+ if (C.getASTContext().getLangOpts().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(R))
return;
- }
-}
-void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
- ExprEngine &Eng) const {
+ EmitStackError(C, R, RetE);
+}
- const ProgramState *state = B.getState();
+void StackAddrEscapeChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
// Iterate over all bindings to global variables and see if it contains
// a memory region in the stack space.
class CallBack : public StoreManager::BindingsHandler {
private:
- ExprEngine &Eng;
+ CheckerContext &Ctx;
const StackFrameContext *CurSFC;
public:
SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
- CallBack(ExprEngine &Eng, const LocationContext *LCtx)
- : Eng(Eng), CurSFC(LCtx->getCurrentStackFrame()) {}
+ CallBack(CheckerContext &CC) :
+ Ctx(CC),
+ CurSFC(CC.getLocationContext()->getCurrentStackFrame())
+ {}
bool HandleBinding(StoreManager &SMgr, Store store,
const MemRegion *region, SVal val) {
@@ -165,7 +173,7 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
// Under automated retain release, it is okay to assign a block
// directly to a global variable.
- if (Eng.getContext().getLangOptions().ObjCAutoRefCount &&
+ if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
isa<BlockDataRegion>(vR))
return true;
@@ -181,14 +189,14 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
}
};
- CallBack cb(Eng, B.getPredecessor()->getLocationContext());
+ CallBack cb(Ctx);
state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
if (cb.V.empty())
return;
// Generate an error node.
- ExplodedNode *N = B.generateNode(state);
+ ExplodedNode *N = Ctx.addTransition(state);
if (!N)
return;
@@ -201,10 +209,10 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
// Generate a report for this bug.
- llvm::SmallString<512> buf;
+ SmallString<512> buf;
llvm::raw_svector_ostream os(buf);
SourceRange range = GenName(os, cb.V[i].second,
- Eng.getContext().getSourceManager());
+ Ctx.getSourceManager());
os << " is still referred to by the global variable '";
const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
os << *VR->getDecl()
@@ -213,7 +221,7 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
if (range.isValid())
report->addRange(range);
- Eng.getBugReporter().EmitReport(report);
+ Ctx.EmitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 1d14e9e..3745d4a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -64,7 +64,7 @@ class StreamChecker : public Checker<eval::Call,
*II_fwrite,
*II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
*II_clearerr, *II_feof, *II_ferror, *II_fileno;
- mutable llvm::OwningPtr<BuiltinBug> BT_nullfp, BT_illegalwhence,
+ mutable OwningPtr<BuiltinBug> BT_nullfp, BT_illegalwhence,
BT_doubleclose, BT_ResourceLeak;
public:
@@ -75,7 +75,7 @@ public:
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void checkEndPath(EndOfFunctionNodeBuilder &B, ExprEngine &Eng) const;
+ void checkEndPath(CheckerContext &Ctx) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
private:
@@ -96,9 +96,9 @@ private:
void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
- const ProgramState *CheckNullStream(SVal SV, const ProgramState *state,
+ ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
CheckerContext &C) const;
- const ProgramState *CheckDoubleClose(const CallExpr *CE, const ProgramState *state,
+ ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
CheckerContext &C) const;
};
@@ -115,10 +115,7 @@ namespace ento {
}
bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
- const FunctionDecl *FD = L.getAsFunctionDecl();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return false;
@@ -221,17 +218,18 @@ void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
}
void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
unsigned Count = C.getCurrentBlockCount();
SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal =
- cast<DefinedSVal>(svalBuilder.getConjuredSymbolVal(0, CE, Count));
- state = state->BindExpr(CE, RetVal);
+ cast<DefinedSVal>(svalBuilder.getConjuredSymbolVal(0, CE, LCtx, Count));
+ state = state->BindExpr(CE, C.getLocationContext(), RetVal);
ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
- const ProgramState *stateNotNull, *stateNull;
+ ProgramStateRef stateNotNull, stateNull;
llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
if (SymbolRef Sym = RetVal.getAsSymbol()) {
@@ -247,29 +245,32 @@ void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
}
void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = CheckDoubleClose(CE, C.getState(), C);
+ ProgramStateRef state = CheckDoubleClose(CE, C.getState(), C);
if (state)
C.addTransition(state);
}
void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!(state = CheckNullStream(state->getSVal(CE->getArg(0)), state, C)))
+ ProgramStateRef state = C.getState();
+ if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
+ C.getLocationContext()), state, C)))
return;
// Check the legality of the 'whence' argument of 'fseek'.
- SVal Whence = state->getSVal(CE->getArg(2));
+ SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence);
if (!CI)
@@ -279,7 +280,7 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
if (x >= 0 && x <= 2)
return;
- if (ExplodedNode *N = C.generateNode(state)) {
+ if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_illegalwhence)
BT_illegalwhence.reset(new BuiltinBug("Illegal whence argument",
"The whence argument to fseek() should be "
@@ -291,61 +292,69 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
}
void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
- const ProgramState *state = C.getState();
- if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
return;
}
-const ProgramState *StreamChecker::CheckNullStream(SVal SV, const ProgramState *state,
+ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
CheckerContext &C) const {
const DefinedSVal *DV = dyn_cast<DefinedSVal>(&SV);
if (!DV)
return 0;
ConstraintManager &CM = C.getConstraintManager();
- const ProgramState *stateNotNull, *stateNull;
+ ProgramStateRef stateNotNull, stateNull;
llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (!stateNotNull && stateNull) {
@@ -361,10 +370,11 @@ const ProgramState *StreamChecker::CheckNullStream(SVal SV, const ProgramState *
return stateNotNull;
}
-const ProgramState *StreamChecker::CheckDoubleClose(const CallExpr *CE,
- const ProgramState *state,
+ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
+ ProgramStateRef state,
CheckerContext &C) const {
- SymbolRef Sym = state->getSVal(CE->getArg(0)).getAsSymbol();
+ SymbolRef Sym =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
if (!Sym)
return state;
@@ -399,7 +409,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const StreamState *SS = state->get<StreamState>(Sym);
if (!SS)
return;
@@ -418,23 +428,22 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
}
}
-void StreamChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
- ExprEngine &Eng) const {
- const ProgramState *state = B.getState();
+void StreamChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
typedef llvm::ImmutableMap<SymbolRef, StreamState> SymMap;
SymMap M = state->get<StreamState>();
for (SymMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
StreamState SS = I->second;
if (SS.isOpened()) {
- ExplodedNode *N = B.generateNode(state);
+ ExplodedNode *N = Ctx.addTransition(state);
if (N) {
if (!BT_ResourceLeak)
BT_ResourceLeak.reset(new BuiltinBug("Resource Leak",
"Opened File never closed. Potential Resource leak."));
BugReport *R = new BugReport(*BT_ResourceLeak,
BT_ResourceLeak->getDescription(), N);
- Eng.getBugReporter().EmitReport(R);
+ Ctx.EmitReport(R);
}
}
}
@@ -445,8 +454,8 @@ void StreamChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
if (!RetE)
return;
- const ProgramState *state = C.getState();
- SymbolRef Sym = state->getSVal(RetE).getAsSymbol();
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = state->getSVal(RetE, C.getLocationContext()).getAsSymbol();
if (!Sym)
return;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
new file mode 100644
index 0000000..1133682
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -0,0 +1,62 @@
+//== TaintTesterChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker can be used for testing how taint data is propagated.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
+
+ mutable OwningPtr<BugType> BT;
+ void initBugType() const;
+
+ /// Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ SymbolRef getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg,
+ bool IssueWarning = true) const;
+
+public:
+ void checkPostStmt(const Expr *E, CheckerContext &C) const;
+};
+}
+
+inline void TaintTesterChecker::initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("Tainted data", "General"));
+}
+
+void TaintTesterChecker::checkPostStmt(const Expr *E,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (!State)
+ return;
+
+ if (State->isTainted(E, C.getLocationContext())) {
+ if (ExplodedNode *N = C.addTransition()) {
+ initBugType();
+ BugReport *report = new BugReport(*BT, "tainted",N);
+ report->addRange(E->getSourceRange());
+ C.EmitReport(report);
+ }
+ }
+}
+
+void ento::registerTaintTesterChecker(CheckerManager &mgr) {
+ mgr.registerChecker<TaintTesterChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index b860b34..a30f6d5 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -24,12 +24,14 @@ using namespace ento;
namespace {
class UndefBranchChecker : public Checker<check::BranchCondition> {
- mutable llvm::OwningPtr<BuiltinBug> BT;
+ mutable OwningPtr<BuiltinBug> BT;
struct FindUndefExpr {
- const ProgramState *St;
+ ProgramStateRef St;
+ const LocationContext *LCtx;
- FindUndefExpr(const ProgramState *S) : St(S) {}
+ FindUndefExpr(ProgramStateRef S, const LocationContext *L)
+ : St(S), LCtx(L) {}
const Expr *FindExpr(const Expr *Ex) {
if (!MatchesCriteria(Ex))
@@ -45,25 +47,25 @@ class UndefBranchChecker : public Checker<check::BranchCondition> {
return Ex;
}
- bool MatchesCriteria(const Expr *Ex) { return St->getSVal(Ex).isUndef(); }
+ bool MatchesCriteria(const Expr *Ex) {
+ return St->getSVal(Ex, LCtx).isUndef();
+ }
};
public:
- void checkBranchCondition(const Stmt *Condition, BranchNodeBuilder &Builder,
- ExprEngine &Eng) const;
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
};
}
void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
- BranchNodeBuilder &Builder,
- ExprEngine &Eng) const {
- const ProgramState *state = Builder.getState();
- SVal X = state->getSVal(Condition);
+ CheckerContext &Ctx) const {
+ SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
if (X.isUndef()) {
- ExplodedNode *N = Builder.generateNode(Condition, state);
+ // Generate a sink node, which implicitly marks both outgoing branches as
+ // infeasible.
+ ExplodedNode *N = Ctx.generateSink();
if (N) {
- N->markAsSink();
if (!BT)
BT.reset(
new BuiltinBug("Branch condition evaluates to a garbage value"));
@@ -86,25 +88,22 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
const Expr *Ex = cast<Expr>(Condition);
ExplodedNode *PrevN = *N->pred_begin();
ProgramPoint P = PrevN->getLocation();
- const ProgramState *St = N->getState();
+ ProgramStateRef St = N->getState();
if (PostStmt *PS = dyn_cast<PostStmt>(&P))
if (PS->getStmt() == Ex)
St = PrevN->getState();
- FindUndefExpr FindIt(St);
+ FindUndefExpr FindIt(St, Ctx.getLocationContext());
Ex = FindIt.FindExpr(Ex);
// Emit the bug report.
BugReport *R = new BugReport(*BT, BT->getDescription(), N);
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex));
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex, R));
R->addRange(Ex->getSourceRange());
- Eng.getBugReporter().EmitReport(R);
+ Ctx.EmitReport(R);
}
-
- Builder.markInfeasible(true);
- Builder.markInfeasible(false);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 2aebed9..d57767e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -25,23 +26,23 @@ using namespace ento;
namespace {
class UndefCapturedBlockVarChecker
: public Checker< check::PostStmt<BlockExpr> > {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
};
} // end anonymous namespace
-static const BlockDeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
- const VarDecl *VD){
- if (const BlockDeclRefExpr *BR = dyn_cast<BlockDeclRefExpr>(S))
+static const DeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
+ const VarDecl *VD) {
+ if (const DeclRefExpr *BR = dyn_cast<DeclRefExpr>(S))
if (BR->getDecl() == VD)
return BR;
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I!=E; ++I)
if (const Stmt *child = *I) {
- const BlockDeclRefExpr *BR = FindBlockDeclRefExpr(child, VD);
+ const DeclRefExpr *BR = FindBlockDeclRefExpr(child, VD);
if (BR)
return BR;
}
@@ -55,9 +56,10 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
if (!BE->getBlockDecl()->hasCaptures())
return;
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
const BlockDataRegion *R =
- cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
@@ -72,7 +74,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
continue;
// Get the VarRegion associated with VD in the local stack frame.
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ const LocationContext *LC = C.getLocationContext();
VR = C.getSValBuilder().getRegionManager().getVarRegion(VD, LC);
SVal VRVal = state->getSVal(VR);
@@ -82,7 +84,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
BT.reset(new BuiltinBug("uninitialized variable captured by block"));
// Generate a bug report.
- llvm::SmallString<128> buf;
+ SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
os << "Variable '" << VD->getName()
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 7ae9668..c3c9ed7 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -18,6 +18,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -26,7 +27,7 @@ namespace {
class UndefResultChecker
: public Checker< check::PostStmt<BinaryOperator> > {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -35,8 +36,9 @@ public:
void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
CheckerContext &C) const {
- const ProgramState *state = C.getState();
- if (state->getSVal(B).isUndef()) {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ if (state->getSVal(B, LCtx).isUndef()) {
// Generate an error node.
ExplodedNode *N = C.generateSink();
if (!N)
@@ -45,16 +47,16 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
if (!BT)
BT.reset(new BuiltinBug("Result of operation is garbage or undefined"));
- llvm::SmallString<256> sbuf;
+ SmallString<256> sbuf;
llvm::raw_svector_ostream OS(sbuf);
const Expr *Ex = NULL;
bool isLeft = true;
- if (state->getSVal(B->getLHS()).isUndef()) {
+ if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
Ex = B->getLHS()->IgnoreParenCasts();
isLeft = true;
}
- else if (state->getSVal(B->getRHS()).isUndef()) {
+ else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
Ex = B->getRHS()->IgnoreParenCasts();
isLeft = false;
}
@@ -74,10 +76,12 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
BugReport *report = new BugReport(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
}
else
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, B));
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, B,
+ report));
C.EmitReport(report);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index bb6831b..0297c4e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class UndefinedArraySubscriptChecker
: public Checker< check::PreStmt<ArraySubscriptExpr> > {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
@@ -34,7 +34,7 @@ public:
void
UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
CheckerContext &C) const {
- if (C.getState()->getSVal(A->getIdx()).isUndef()) {
+ if (C.getState()->getSVal(A->getIdx(), C.getLocationContext()).isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT)
BT.reset(new BuiltinBug("Array subscript is undefined"));
@@ -43,7 +43,8 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
BugReport *R = new BugReport(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- A->getIdx()));
+ A->getIdx(),
+ R));
C.EmitReport(R);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 5ca4a9f..78f7fa6 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class UndefinedAssignmentChecker
: public Checker<check::Bind> {
- mutable llvm::OwningPtr<BugType> BT;
+ mutable OwningPtr<BugType> BT;
public:
void checkBind(SVal location, SVal val, const Stmt *S,
@@ -54,8 +54,8 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
while (StoreE) {
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
if (B->isCompoundAssignmentOp()) {
- const ProgramState *state = C.getState();
- if (state->getSVal(B->getLHS()).isUndef()) {
+ ProgramStateRef state = C.getState();
+ if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
str = "The left expression of the compound assignment is an "
"uninitialized value. The computed value will also be garbage";
ex = B->getLHS();
@@ -78,7 +78,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
BugReport *R = new BugReport(*BT, str, N);
if (ex) {
R->addRange(ex->getSourceRange());
- R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, ex));
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, ex, R));
}
C.EmitReport(R);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index cec286d..60e665fe 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -19,6 +19,8 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include <fcntl.h>
@@ -28,7 +30,7 @@ using llvm::Optional;
namespace {
class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable llvm::OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+ mutable OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
mutable Optional<uint64_t> Val_O_CREAT;
public:
@@ -36,10 +38,24 @@ public:
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
const CallExpr *) const;
+private:
+ bool ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const;
+ void BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const;
};
} //end anonymous namespace
@@ -47,11 +63,11 @@ public:
// Utility functions.
//===----------------------------------------------------------------------===//
-static inline void LazyInitialize(llvm::OwningPtr<BugType> &BT,
+static inline void LazyInitialize(OwningPtr<BugType> &BT,
const char *name) {
if (BT)
return;
- BT.reset(new BugType(name, "Unix API"));
+ BT.reset(new BugType(name, categories::UnixAPI));
}
//===----------------------------------------------------------------------===//
@@ -74,7 +90,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
}
// Look at the 'oflags' argument for the O_CREAT flag.
- const ProgramState *state = C.getState();
+ ProgramStateRef state = C.getState();
if (CE->getNumArgs() < 2) {
// The frontend should issue a warning for this case, so this is a sanity
@@ -84,7 +100,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(1);
- const SVal V = state->getSVal(oflagsEx);
+ const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
@@ -102,7 +118,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
DefinedSVal maskedFlags = cast<DefinedSVal>(maskedFlagsUC);
// Check if maskedFlags is non-zero.
- const ProgramState *trueState, *falseState;
+ ProgramStateRef trueState, falseState;
llvm::tie(trueState, falseState) = state->assume(maskedFlags);
// Only emit an error if the value of 'maskedFlags' is properly
@@ -141,8 +157,9 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
- const ProgramState *state = C.getState();
- const MemRegion *R = state->getSVal(CE->getArg(0)).getAsRegion();
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
@@ -150,7 +167,7 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
if (!N)
return;
- llvm::SmallString<256> S;
+ SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to 'pthread_once' uses";
if (const VarRegion *VR = dyn_cast<VarRegion>(R))
@@ -170,78 +187,157 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
}
//===----------------------------------------------------------------------===//
-// "malloc" with allocation size 0
+// "calloc", "malloc", "realloc", "alloca" and "valloc" with allocation size 0
//===----------------------------------------------------------------------===//
+// FIXME: Eventually these should be rolled into the MallocChecker, but right now
+// they're more basic and valuable for widespread use.
+
+// Returns true if we try to do a zero byte allocation, false otherwise.
+// Fills in trueState and falseState.
+static bool IsZeroByteAllocation(ProgramStateRef state,
+ const SVal argVal,
+ ProgramStateRef *trueState,
+ ProgramStateRef *falseState) {
+ llvm::tie(*trueState, *falseState) =
+ state->assume(cast<DefinedSVal>(argVal));
+
+ return (*falseState && !*trueState);
+}
-// FIXME: Eventually this should be rolled into the MallocChecker, but this
-// check is more basic and is valuable for widespread use.
-void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
- const CallExpr *CE) const {
+// Generates an error report, indicating that the function whose name is given
+// will perform a zero byte allocation.
+// Returns false if an error occured, true otherwise.
+bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const {
+ ExplodedNode *N = C.generateSink(falseState);
+ if (!N)
+ return false;
+
+ LazyInitialize(BT_mallocZero,
+ "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
+ BugReport *report = new BugReport(*BT_mallocZero, os.str(), N);
- // Sanity check that malloc takes one argument.
- if (CE->getNumArgs() != 1)
+ report->addRange(arg->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, arg,
+ report));
+ C.EmitReport(report);
+
+ return true;
+}
+
+// Does a basic check for 0-sized allocations suitable for most of the below
+// functions (modulo "calloc")
+void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const {
+ // Sanity check for the correct number of arguments
+ if (CE->getNumArgs() != numArgs)
return;
// Check if the allocation size is 0.
- const ProgramState *state = C.getState();
- SVal argVal = state->getSVal(CE->getArg(0));
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = NULL, falseState = NULL;
+ const Expr *arg = CE->getArg(sizeArg);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
if (argVal.isUnknownOrUndef())
return;
-
- const ProgramState *trueState, *falseState;
- llvm::tie(trueState, falseState) = state->assume(cast<DefinedSVal>(argVal));
-
+
// Is the value perfectly constrained to zero?
- if (falseState && !trueState) {
- ExplodedNode *N = C.generateSink(falseState);
- if (!N)
- return;
-
- // FIXME: Add reference to CERT advisory, and/or C99 standard in bug
- // output.
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ (void) ReportZeroByteAllocation(C, falseState, arg, fn);
+ return;
+ }
+ // Assume the the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state)
+ C.addTransition(trueState);
+}
- LazyInitialize(BT_mallocZero, "Undefined allocation of 0 bytes");
-
- BugReport *report =
- new BugReport(*BT_mallocZero, "Call to 'malloc' has an allocation"
- " size of 0 bytes", N);
- report->addRange(CE->getArg(0)->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
- CE->getArg(0)));
- C.EmitReport(report);
+void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ unsigned int nArgs = CE->getNumArgs();
+ if (nArgs != 2)
return;
+
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = NULL, falseState = NULL;
+
+ unsigned int i;
+ for (i = 0; i < nArgs; i++) {
+ const Expr *arg = CE->getArg(i);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
+ if (argVal.isUnknownOrUndef()) {
+ if (i == 0)
+ continue;
+ else
+ return;
+ }
+
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ if (ReportZeroByteAllocation(C, falseState, arg, "calloc"))
+ return;
+ else if (i == 0)
+ continue;
+ else
+ return;
+ }
}
+
// Assume the the value is non-zero going forward.
assert(trueState);
- if (trueState != state) {
+ if (trueState != state)
C.addTransition(trueState);
- }
}
-
+
+void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "malloc");
+}
+
+void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 1, "realloc");
+}
+
+void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "alloca");
+}
+
+void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "valloc");
+}
+
+
//===----------------------------------------------------------------------===//
// Central dispatch function.
//===----------------------------------------------------------------------===//
-void UnixAPIChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
- // Get the callee. All the functions we care about are C functions
- // with simple identifiers.
- const ProgramState *state = C.getState();
- const Expr *Callee = CE->getCallee();
- const FunctionDecl *Fn = state->getSVal(Callee).getAsFunctionDecl();
-
- if (!Fn)
- return;
-
- const IdentifierInfo *FI = Fn->getIdentifier();
- if (!FI)
+void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef FName = C.getCalleeName(CE);
+ if (FName.empty())
return;
SubChecker SC =
- llvm::StringSwitch<SubChecker>(FI->getName())
+ llvm::StringSwitch<SubChecker>(FName)
.Case("open", &UnixAPIChecker::CheckOpen)
.Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
+ .Case("calloc", &UnixAPIChecker::CheckCallocZero)
.Case("malloc", &UnixAPIChecker::CheckMallocZero)
+ .Case("realloc", &UnixAPIChecker::CheckReallocZero)
+ .Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
+ .Case("valloc", &UnixAPIChecker::CheckVallocZero)
.Default(NULL);
if (SC)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 459ee65..5a13ed0 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -24,7 +24,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
// The number of CFGBlock pointers we want to reserve memory for. This is used
// once for each function we analyze.
@@ -54,10 +54,11 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
BugReporter &B,
ExprEngine &Eng) const {
CFGBlocksSet reachable, visited;
-
+
if (Eng.hasWorkRemaining())
return;
+ const Decl *D = 0;
CFG *C = 0;
ParentMap *PM = 0;
const LocationContext *LC = 0;
@@ -67,9 +68,11 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
const ProgramPoint &P = I->getLocation();
LC = P.getLocationContext();
+ if (!D)
+ D = LC->getAnalysisDeclContext()->getDecl();
// Save the CFG if we don't have it already
if (!C)
- C = LC->getAnalysisContext()->getUnoptimizedCFG();
+ C = LC->getAnalysisDeclContext()->getUnoptimizedCFG();
if (!PM)
PM = &LC->getParentMap();
@@ -80,10 +83,15 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
}
// Bail out if we didn't get the CFG or the ParentMap.
- if (!C || !PM)
+ if (!D || !C || !PM)
return;
-
- ASTContext &Ctx = B.getContext();
+
+ // Don't do anything for template instantiations. Proving that code
+ // in a template instantiation is unreachable means proving that it is
+ // unreachable in all instantiations.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isTemplateInstantiation())
+ return;
// Find CFGBlocks that were not covered by any node
for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
@@ -108,6 +116,14 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
if (CB->size() > 0 && isInvalidPath(CB, *PM))
continue;
+ // It is good practice to always have a "default" label in a "switch", even
+ // if we should never get there. It can be used to detect errors, for
+ // instance. Unreachable code directly under a "default" label is therefore
+ // likely to be a false positive.
+ if (const Stmt *label = CB->getLabel())
+ if (label->getStmtClass() == Stmt::DefaultStmtClass)
+ continue;
+
// Special case for __builtin_unreachable.
// FIXME: This should be extended to include other unreachable markers,
// such as llvm_unreachable.
@@ -117,7 +133,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
ci != ce; ++ci) {
if (const CFGStmt *S = (*ci).getAs<CFGStmt>())
if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
- if (CE->isBuiltinCall(Ctx) == Builtin::BI__builtin_unreachable) {
+ if (CE->isBuiltinCall() == Builtin::BI__builtin_unreachable) {
foundUnreachable = true;
break;
}
@@ -146,8 +162,8 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
continue;
- B.EmitBasicReport("Unreachable code", "Dead code", "This statement is never"
- " executed", DL, SR);
+ B.EmitBasicReport(D, "Unreachable code", "Dead code",
+ "This statement is never executed", DL, SR);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index b34b97c..38c9cc1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -20,20 +20,61 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
using namespace clang;
using namespace ento;
namespace {
class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
- mutable llvm::OwningPtr<BugType> BT_zero;
- mutable llvm::OwningPtr<BugType> BT_undef;
-
+ mutable OwningPtr<BugType> BT;
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted };
+
+ void reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const;
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
};
} // end anonymous namespace
+void VLASizeChecker::reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink(State);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Dangerous variable-length array (VLA) declaration"));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Declared variable-length array (VLA) ";
+ switch (Kind) {
+ case VLA_Garbage:
+ os << "uses a garbage value as its size";
+ break;
+ case VLA_Zero:
+ os << "has zero size";
+ break;
+ case VLA_Tainted:
+ os << "has tainted size";
+ break;
+ }
+
+ BugReport *report = new BugReport(*BT, os.str(), N);
+ report->addRange(SizeE->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, SizeE,
+ report));
+ C.EmitReport(report);
+ return;
+}
+
void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!DS->isSingleDecl())
return;
@@ -49,24 +90,11 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// FIXME: Handle multi-dimensional VLAs.
const Expr *SE = VLA->getSizeExpr();
- const ProgramState *state = C.getState();
- SVal sizeV = state->getSVal(SE);
+ ProgramStateRef state = C.getState();
+ SVal sizeV = state->getSVal(SE, C.getLocationContext());
if (sizeV.isUndef()) {
- // Generate an error node.
- ExplodedNode *N = C.generateSink();
- if (!N)
- return;
-
- if (!BT_undef)
- BT_undef.reset(new BuiltinBug("Declared variable-length array (VLA) "
- "uses a garbage value as its size"));
-
- BugReport *report =
- new BugReport(*BT_undef, BT_undef->getName(), N);
- report->addRange(SE->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, SE));
- C.EmitReport(report);
+ reportBug(VLA_Garbage, SE, state, C);
return;
}
@@ -75,23 +103,20 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (sizeV.isUnknown())
return;
+ // Check if the size is tainted.
+ if (state->isTainted(sizeV)) {
+ reportBug(VLA_Tainted, SE, 0, C);
+ return;
+ }
+
// Check if the size is zero.
DefinedSVal sizeD = cast<DefinedSVal>(sizeV);
- const ProgramState *stateNotZero, *stateZero;
+ ProgramStateRef stateNotZero, stateZero;
llvm::tie(stateNotZero, stateZero) = state->assume(sizeD);
if (stateZero && !stateNotZero) {
- ExplodedNode *N = C.generateSink(stateZero);
- if (!BT_zero)
- BT_zero.reset(new BuiltinBug("Declared variable-length array (VLA) has "
- "zero size"));
-
- BugReport *report =
- new BugReport(*BT_zero, BT_zero->getName(), N);
- report->addRange(SE->getSourceRange());
- report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, SE));
- C.EmitReport(report);
+ reportBug(VLA_Zero, SE, stateZero, C);
return;
}
@@ -117,7 +142,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
cast<NonLoc>(EleSizeVal), SizeTy);
// Finally, assume that the array's extent matches the given size.
- const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ const LocationContext *LC = C.getLocationContext();
DefinedOrUnknownSVal Extent =
state->getRegion(VD, LC)->getExtent(svalBuilder);
DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
new file mode 100644
index 0000000..f7c7c0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -0,0 +1,241 @@
+//=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that checks virtual function calls during
+// construction or destruction of C++ objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext *AC;
+
+ typedef const CallExpr * WorkListUnit;
+ typedef SmallVector<WorkListUnit, 20> DFSWorkList;
+
+ /// A vector representing the worklist which has a chain of CallExprs.
+ DFSWorkList WList;
+
+ // PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
+ // body has not been visited yet.
+ // PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
+ // body has been visited.
+ enum Kind { NotVisited,
+ PreVisited, /**< A CallExpr to this FunctionDecl is in the
+ worklist, but the body has not yet been
+ visited. */
+ PostVisited /**< A CallExpr to this FunctionDecl is in the
+ worklist, and the body has been visited. */
+ } K;
+
+ /// A DenseMap that records visited states of FunctionDecls.
+ llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
+
+ /// The CallExpr whose body is currently being visited. This is used for
+ /// generating bug reports. This is null while visiting the body of a
+ /// constructor or destructor.
+ const CallExpr *visitingCallExpr;
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext *ac)
+ : BR(br),
+ AC(ac),
+ visitingCallExpr(0) {}
+
+ bool hasWork() const { return !WList.empty(); }
+
+ /// This method adds a CallExpr to the worklist and marks the callee as
+ /// being PreVisited.
+ void Enqueue(WorkListUnit WLUnit) {
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ if (!FD || !FD->getBody())
+ return;
+ Kind &K = VisitedFunctions[FD];
+ if (K != NotVisited)
+ return;
+ K = PreVisited;
+ WList.push_back(WLUnit);
+ }
+
+ /// This method returns an item from the worklist without removing it.
+ WorkListUnit Dequeue() {
+ assert(!WList.empty());
+ return WList.back();
+ }
+
+ void Execute() {
+ while (hasWork()) {
+ WorkListUnit WLUnit = Dequeue();
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ assert(FD && FD->getBody());
+
+ if (VisitedFunctions[FD] == PreVisited) {
+ // If the callee is PreVisited, walk its body.
+ // Visit the body.
+ SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
+ Visit(FD->getBody());
+
+ // Mark the function as being PostVisited to indicate we have
+ // scanned the body.
+ VisitedFunctions[FD] = PostVisited;
+ continue;
+ }
+
+ // Otherwise, the callee is PostVisited.
+ // Remove it from the worklist.
+ assert(VisitedFunctions[FD] == PostVisited);
+ WList.pop_back();
+ }
+ }
+
+ // Stmt visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCXXMemberCallExpr(CallExpr *CE);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+
+ void ReportVirtualCall(const CallExpr *CE, bool isPure);
+
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ Enqueue(CE);
+}
+
+void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ bool callIsNonVirtual = false;
+
+ // Several situations to elide for checking.
+ if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
+ // If the member access is fully qualified (i.e., X::F), then treat
+ // this as a non-virtual call and do not warn.
+ if (CME->getQualifier())
+ callIsNonVirtual = true;
+
+ // Elide analyzing the call entirely if the base pointer is not 'this'.
+ if (Expr *base = CME->getBase()->IgnoreImpCasts())
+ if (!isa<CXXThisExpr>(base))
+ return;
+ }
+
+ // Get the callee.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
+ if (MD && MD->isVirtual() && !callIsNonVirtual)
+ ReportVirtualCall(CE, MD->isPure());
+
+ Enqueue(CE);
+}
+
+void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Call Path : ";
+ // Name of current visiting CallExpr.
+ os << *CE->getDirectCallee();
+
+ // Name of the CallExpr whose body is current walking.
+ if (visitingCallExpr)
+ os << " <-- " << *visitingCallExpr->getDirectCallee();
+ // Names of FunctionDecls in worklist with state PostVisited.
+ for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
+ E = WList.begin(); I != E; --I) {
+ const FunctionDecl *FD = (*(I-1))->getDirectCallee();
+ assert(FD);
+ if (VisitedFunctions[FD] == PostVisited)
+ os << " <-- " << *FD;
+ }
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SourceRange R = CE->getCallee()->getSourceRange();
+
+ if (isPure) {
+ os << "\n" << "Call pure virtual functions during construction or "
+ << "destruction may leads undefined behaviour";
+ BR.EmitBasicReport(AC->getDecl(),
+ "Call pure virtual function during construction or "
+ "Destruction",
+ "Cplusplus",
+ os.str(), CELoc, &R, 1);
+ return;
+ }
+ else {
+ os << "\n" << "Call virtual functions during construction or "
+ << "destruction will never go to a more derived class";
+ BR.EmitBasicReport(AC->getDecl(),
+ "Call virtual function during construction or "
+ "Destruction",
+ "Cplusplus",
+ os.str(), CELoc, &R, 1);
+ return;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VirtualCallChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
+public:
+ void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(RD));
+
+ // Check the constructors.
+ for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(), E = RD->ctor_end();
+ I != E; ++I) {
+ if (!I->isCopyOrMoveConstructor())
+ if (Stmt *Body = I->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+
+ // Check the destructor.
+ if (CXXDestructorDecl *DD = RD->getDestructor())
+ if (Stmt *Body = DD->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+};
+}
+
+void ento::registerVirtualCallChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VirtualCallChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp
deleted file mode 100644
index 0936d61..0000000
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AggExprVisitor.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-//=-- AggExprVisitor.cpp - evaluating expressions of C++ class type -*- C++ -*-=
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines AggExprVisitor class, which contains lots of boiler
-// plate code for evaluating expressions of C++ class type.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/AST/StmtVisitor.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-/// AggExprVisitor is designed after AggExprEmitter of the CodeGen module. It
-/// is used for evaluating exprs of C++ object type. Evaluating such exprs
-/// requires a destination pointer pointing to the object being evaluated
-/// into. Passing such a pointer around would pollute the Visit* interface of
-/// ExprEngine. AggExprVisitor encapsulates code that goes through various
-/// cast and construct exprs (and others), and at the final point, dispatches
-/// back to the ExprEngine to let the real evaluation logic happen.
-class AggExprVisitor : public StmtVisitor<AggExprVisitor> {
- const MemRegion *Dest;
- ExplodedNode *Pred;
- ExplodedNodeSet &DstSet;
- ExprEngine &Eng;
-
-public:
- AggExprVisitor(const MemRegion *dest, ExplodedNode *N, ExplodedNodeSet &dst,
- ExprEngine &eng)
- : Dest(dest), Pred(N), DstSet(dst), Eng(eng) {}
-
- void VisitCastExpr(CastExpr *E);
- void VisitCXXConstructExpr(CXXConstructExpr *E);
- void VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
-};
-}
-
-void AggExprVisitor::VisitCastExpr(CastExpr *E) {
- switch (E->getCastKind()) {
- default:
- llvm_unreachable("Unhandled cast kind");
- case CK_NoOp:
- case CK_ConstructorConversion:
- case CK_UserDefinedConversion:
- Visit(E->getSubExpr());
- break;
- }
-}
-
-void AggExprVisitor::VisitCXXConstructExpr(CXXConstructExpr *E) {
- Eng.VisitCXXConstructExpr(E, Dest, Pred, DstSet);
-}
-
-void AggExprVisitor::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- Eng.Visit(E, Pred, DstSet);
-}
-
-void ExprEngine::VisitAggExpr(const Expr *E, const MemRegion *Dest,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- AggExprVisitor(Dest, Pred, Dst, *this).Visit(const_cast<Expr *>(E));
-}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index 17ec70d..82ac8bd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -14,6 +14,8 @@
using namespace clang;
using namespace ento;
+void AnalysisManager::anchor() { }
+
AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
const LangOptions &lang,
PathDiagnosticConsumer *pd,
@@ -25,17 +27,27 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
bool vizdot, bool vizubi,
AnalysisPurgeMode purge,
bool eager, bool trim,
- bool inlinecall, bool useUnoptimizedCFG,
+ bool useUnoptimizedCFG,
bool addImplicitDtors, bool addInitializers,
- bool eagerlyTrimEGraph)
+ bool eagerlyTrimEGraph,
+ AnalysisIPAMode ipa,
+ unsigned inlineMaxStack,
+ unsigned inlineMaxFunctionSize,
+ AnalysisInliningMode IMode,
+ bool NoRetry)
: AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, addInitializers),
- Ctx(ctx), Diags(diags), LangInfo(lang), PD(pd),
+ Ctx(ctx), Diags(diags), LangOpts(lang), PD(pd),
CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
CheckerMgr(checkerMgr), Idxer(idxer),
AScope(ScopeDecl), MaxNodes(maxnodes), MaxVisit(maxvisit),
VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge),
- EagerlyAssume(eager), TrimGraph(trim), InlineCall(inlinecall),
- EagerlyTrimEGraph(eagerlyTrimEGraph)
+ EagerlyAssume(eager), TrimGraph(trim),
+ EagerlyTrimEGraph(eagerlyTrimEGraph),
+ IPAMode(ipa),
+ InlineMaxStackDepth(inlineMaxStack),
+ InlineMaxFunctionSize(inlineMaxFunctionSize),
+ InliningMode(IMode),
+ NoRetryExhausted(NoRetry)
{
AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
}
@@ -46,7 +58,7 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
ParentAM.AnaCtxMgr.getCFGBuildOptions().AddImplicitDtors,
ParentAM.AnaCtxMgr.getCFGBuildOptions().AddInitializers),
Ctx(ctx), Diags(diags),
- LangInfo(ParentAM.LangInfo), PD(ParentAM.getPathDiagnosticConsumer()),
+ LangOpts(ParentAM.LangOpts), PD(ParentAM.getPathDiagnosticConsumer()),
CreateStoreMgr(ParentAM.CreateStoreMgr),
CreateConstraintMgr(ParentAM.CreateConstraintMgr),
CheckerMgr(ParentAM.CheckerMgr),
@@ -59,15 +71,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
PurgeDead(ParentAM.PurgeDead),
EagerlyAssume(ParentAM.EagerlyAssume),
TrimGraph(ParentAM.TrimGraph),
- InlineCall(ParentAM.InlineCall),
- EagerlyTrimEGraph(ParentAM.EagerlyTrimEGraph)
+ EagerlyTrimEGraph(ParentAM.EagerlyTrimEGraph),
+ IPAMode(ParentAM.IPAMode),
+ InlineMaxStackDepth(ParentAM.InlineMaxStackDepth),
+ InlineMaxFunctionSize(ParentAM.InlineMaxFunctionSize),
+ InliningMode(ParentAM.InliningMode),
+ NoRetryExhausted(ParentAM.NoRetryExhausted)
{
AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
}
-AnalysisContext *
-AnalysisManager::getAnalysisContextInAnotherTU(const Decl *D) {
+AnalysisDeclContext *
+AnalysisManager::getAnalysisDeclContextInAnotherTU(const Decl *D) {
idx::Entity Ent = idx::Entity::get(const_cast<Decl *>(D),
Idxer->getProgram());
FunctionDecl *FuncDef;
@@ -77,7 +93,7 @@ AnalysisManager::getAnalysisContextInAnotherTU(const Decl *D) {
if (FuncDef == 0)
return 0;
- // This AnalysisContext wraps function definition in another translation unit.
+ // This AnalysisDeclContext wraps function definition in another translation unit.
// But it is still owned by the AnalysisManager associated with the current
// translation unit.
return AnaCtxMgr.getContext(FuncDef, TU);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
index 6c748b6..2d9addd 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
@@ -56,59 +56,59 @@ public:
: SimpleConstraintManager(subengine),
ISetFactory(statemgr.getAllocator()) {}
- const ProgramState *assumeSymNE(const ProgramState *state,
+ ProgramStateRef assumeSymNE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymEQ(const ProgramState *state,
+ ProgramStateRef assumeSymEQ(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymLT(const ProgramState *state,
+ ProgramStateRef assumeSymLT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymGT(const ProgramState *state,
+ ProgramStateRef assumeSymGT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymGE(const ProgramState *state,
+ ProgramStateRef assumeSymGE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymLE(const ProgramState *state,
+ ProgramStateRef assumeSymLE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment);
- const ProgramState *AddEQ(const ProgramState *state,
+ ProgramStateRef AddEQ(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V);
- const ProgramState *AddNE(const ProgramState *state,
+ ProgramStateRef AddNE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V);
- const llvm::APSInt* getSymVal(const ProgramState *state,
+ const llvm::APSInt* getSymVal(ProgramStateRef state,
SymbolRef sym) const;
- bool isNotEqual(const ProgramState *state,
+ bool isNotEqual(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) const;
- bool isEqual(const ProgramState *state,
+ bool isEqual(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) const;
- const ProgramState *removeDeadBindings(const ProgramState *state,
+ ProgramStateRef removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper);
- void print(const ProgramState *state,
+ void print(ProgramStateRef state,
raw_ostream &Out,
const char* nl,
const char *sep);
@@ -122,8 +122,8 @@ ento::CreateBasicConstraintManager(ProgramStateManager& statemgr,
return new BasicConstraintManager(statemgr, subengine);
}
-const ProgramState*
-BasicConstraintManager::assumeSymNE(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymNE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -143,8 +143,8 @@ BasicConstraintManager::assumeSymNE(const ProgramState *state,
return AddNE(state, sym, Adjusted);
}
-const ProgramState*
-BasicConstraintManager::assumeSymEQ(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymEQ(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -165,8 +165,8 @@ BasicConstraintManager::assumeSymEQ(const ProgramState *state,
}
// The logic for these will be handled in another ConstraintManager.
-const ProgramState*
-BasicConstraintManager::assumeSymLT(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymLT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -180,8 +180,8 @@ BasicConstraintManager::assumeSymLT(const ProgramState *state,
return assumeSymNE(state, sym, V, Adjustment);
}
-const ProgramState*
-BasicConstraintManager::assumeSymGT(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymGT(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -195,8 +195,8 @@ BasicConstraintManager::assumeSymGT(const ProgramState *state,
return assumeSymNE(state, sym, V, Adjustment);
}
-const ProgramState*
-BasicConstraintManager::assumeSymGE(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymGE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -224,8 +224,8 @@ BasicConstraintManager::assumeSymGE(const ProgramState *state,
return state;
}
-const ProgramState*
-BasicConstraintManager::assumeSymLE(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::assumeSymLE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt &V,
const llvm::APSInt &Adjustment) {
@@ -253,14 +253,14 @@ BasicConstraintManager::assumeSymLE(const ProgramState *state,
return state;
}
-const ProgramState *BasicConstraintManager::AddEQ(const ProgramState *state,
+ProgramStateRef BasicConstraintManager::AddEQ(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) {
// Create a new state with the old binding replaced.
return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V));
}
-const ProgramState *BasicConstraintManager::AddNE(const ProgramState *state,
+ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) {
@@ -275,13 +275,13 @@ const ProgramState *BasicConstraintManager::AddNE(const ProgramState *state,
return state->set<ConstNotEq>(sym, S);
}
-const llvm::APSInt* BasicConstraintManager::getSymVal(const ProgramState *state,
+const llvm::APSInt* BasicConstraintManager::getSymVal(ProgramStateRef state,
SymbolRef sym) const {
const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
return T ? *T : NULL;
}
-bool BasicConstraintManager::isNotEqual(const ProgramState *state,
+bool BasicConstraintManager::isNotEqual(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) const {
@@ -292,7 +292,7 @@ bool BasicConstraintManager::isNotEqual(const ProgramState *state,
return T ? T->contains(&state->getBasicVals().getValue(V)) : false;
}
-bool BasicConstraintManager::isEqual(const ProgramState *state,
+bool BasicConstraintManager::isEqual(ProgramStateRef state,
SymbolRef sym,
const llvm::APSInt& V) const {
// Retrieve the EQ-set associated with the given symbol.
@@ -303,8 +303,8 @@ bool BasicConstraintManager::isEqual(const ProgramState *state,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
-const ProgramState*
-BasicConstraintManager::removeDeadBindings(const ProgramState *state,
+ProgramStateRef
+BasicConstraintManager::removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper) {
ConstEqTy CE = state->get<ConstEq>();
@@ -329,7 +329,7 @@ BasicConstraintManager::removeDeadBindings(const ProgramState *state,
return state->set<ConstNotEq>(CNE);
}
-void BasicConstraintManager::print(const ProgramState *state,
+void BasicConstraintManager::print(ProgramStateRef state,
raw_ostream &Out,
const char* nl, const char *sep) {
// Print equality constraints.
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index fbbdb04..a264212 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/AST/ASTContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
@@ -25,8 +26,10 @@
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include <queue>
using namespace clang;
@@ -34,6 +37,8 @@ using namespace ento;
BugReporterVisitor::~BugReporterVisitor() {}
+void BugReporterContext::anchor() {}
+
//===----------------------------------------------------------------------===//
// Helper routines for walking the ExplodedGraph and fetching statements.
//===----------------------------------------------------------------------===//
@@ -106,6 +111,59 @@ GetCurrentOrNextStmt(const ExplodedNode *N) {
}
//===----------------------------------------------------------------------===//
+// Diagnostic cleanup.
+//===----------------------------------------------------------------------===//
+
+/// Recursively scan through a path and prune out calls and macros pieces
+/// that aren't needed. Return true if afterwards the path contains
+/// "interesting stuff" which means it should be pruned from the parent path.
+static bool RemoveUneededCalls(PathPieces &pieces) {
+ bool containsSomethingInteresting = false;
+ const unsigned N = pieces.size();
+
+ for (unsigned i = 0 ; i < N ; ++i) {
+ // Remove the front piece from the path. If it is still something we
+ // want to keep once we are done, we will push it back on the end.
+ IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
+ pieces.pop_front();
+
+ switch (piece->getKind()) {
+ case PathDiagnosticPiece::Call: {
+ PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
+ // Recursively clean out the subclass. Keep this call around if
+ // it contains any informative diagnostics.
+ if (!RemoveUneededCalls(call->path))
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Macro: {
+ PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
+ if (!RemoveUneededCalls(macro->subPieces))
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Event: {
+ PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
+ // We never throw away an event, but we do throw it away wholesale
+ // as part of a path if we throw the entire path away.
+ if (event->isPrunable())
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::ControlFlow:
+ break;
+ }
+
+ pieces.push_back(piece);
+ }
+
+ return containsSomethingInteresting;
+}
+
+//===----------------------------------------------------------------------===//
// PathDiagnosticBuilder and its associated routines and helper objects.
//===----------------------------------------------------------------------===//
@@ -128,14 +186,17 @@ public:
class PathDiagnosticBuilder : public BugReporterContext {
BugReport *R;
PathDiagnosticConsumer *PDC;
- llvm::OwningPtr<ParentMap> PM;
+ OwningPtr<ParentMap> PM;
NodeMapClosure NMC;
public:
+ const LocationContext *LC;
+
PathDiagnosticBuilder(GRBugReporter &br,
BugReport *r, NodeBackMap *Backmap,
PathDiagnosticConsumer *pdc)
: BugReporterContext(br),
- R(r), PDC(pdc), NMC(Backmap) {}
+ R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
+ {}
PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N);
@@ -145,12 +206,8 @@ public:
BugReport *getBugReport() { return R; }
Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
-
- const LocationContext* getLocationContext() {
- return R->getErrorNode()->getLocationContext();
- }
-
- ParentMap& getParentMap() { return R->getErrorNode()->getParentMap(); }
+
+ ParentMap& getParentMap() { return LC->getParentMap(); }
const Stmt *getParent(const Stmt *S) {
return getParentMap().getParent(S);
@@ -173,7 +230,7 @@ public:
PathDiagnosticLocation
PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
if (const Stmt *S = GetNextStmt(N))
- return PathDiagnosticLocation(S, getSourceManager(), getLocationContext());
+ return PathDiagnosticLocation(S, getSourceManager(), LC);
return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(),
getSourceManager());
@@ -234,7 +291,6 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
assert(S && "Null Stmt *passed to getEnclosingStmtLocation");
ParentMap &P = getParentMap();
SourceManager &SMgr = getSourceManager();
- const LocationContext *LC = getLocationContext();
while (IsNested(S, P)) {
const Stmt *Parent = P.getParentIgnoreParens(S);
@@ -322,208 +378,87 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
}
//===----------------------------------------------------------------------===//
-// ScanNotableSymbols: closure-like callback for scanning Store bindings.
+// "Minimal" path diagnostic generation algorithm.
//===----------------------------------------------------------------------===//
-
-static const VarDecl* GetMostRecentVarDeclBinding(const ExplodedNode *N,
- ProgramStateManager& VMgr,
- SVal X) {
-
- for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) {
-
- ProgramPoint P = N->getLocation();
-
- if (!isa<PostStmt>(P))
- continue;
-
- const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
-
- if (!DR)
- continue;
-
- SVal Y = N->getState()->getSVal(DR);
-
- if (X != Y)
- continue;
-
- const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
-
- if (!VD)
- continue;
-
- return VD;
- }
-
- return 0;
-}
-
-namespace {
-class NotableSymbolHandler
-: public StoreManager::BindingsHandler {
-
- SymbolRef Sym;
- const ProgramState *PrevSt;
- const Stmt *S;
- ProgramStateManager& VMgr;
- const ExplodedNode *Pred;
- PathDiagnostic& PD;
- BugReporter& BR;
-
-public:
-
- NotableSymbolHandler(SymbolRef sym,
- const ProgramState *prevst,
- const Stmt *s,
- ProgramStateManager& vmgr,
- const ExplodedNode *pred,
- PathDiagnostic& pd,
- BugReporter& br)
- : Sym(sym),
- PrevSt(prevst),
- S(s),
- VMgr(vmgr),
- Pred(pred),
- PD(pd),
- BR(br) {}
-
- bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
- SVal V) {
-
- SymbolRef ScanSym = V.getAsSymbol();
-
- if (ScanSym != Sym)
- return true;
-
- // Check if the previous state has this binding.
- SVal X = PrevSt->getSVal(loc::MemRegionVal(R));
-
- if (X == V) // Same binding?
- return true;
-
- // Different binding. Only handle assignments for now. We don't pull
- // this check out of the loop because we will eventually handle other
- // cases.
-
- VarDecl *VD = 0;
-
- if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
- if (!B->isAssignmentOp())
- return true;
-
- // What variable did we assign to?
- DeclRefExpr *DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts());
-
- if (!DR)
- return true;
-
- VD = dyn_cast<VarDecl>(DR->getDecl());
- }
- else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- // FIXME: Eventually CFGs won't have DeclStmts. Right now we
- // assume that each DeclStmt has a single Decl. This invariant
- // holds by construction in the CFG.
- VD = dyn_cast<VarDecl>(*DS->decl_begin());
- }
-
- if (!VD)
- return true;
-
- // What is the most recently referenced variable with this binding?
- const VarDecl *MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V);
-
- if (!MostRecent)
- return true;
-
- // Create the diagnostic.
- if (Loc::isLocType(VD->getType())) {
- llvm::SmallString<64> buf;
- llvm::raw_svector_ostream os(buf);
- os << '\'' << *VD << "' now aliases '" << *MostRecent << '\'';
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(S, BR.getSourceManager(),
- Pred->getLocationContext());
- PD.push_front(new PathDiagnosticEventPiece(L, os.str()));
- }
-
- return true;
+typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
+typedef SmallVector<StackDiagPair, 6> StackDiagVector;
+
+static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
+ StackDiagVector &CallStack) {
+ // If the piece contains a special message, add it to all the call
+ // pieces on the active stack.
+ if (PathDiagnosticEventPiece *ep =
+ dyn_cast<PathDiagnosticEventPiece>(P)) {
+
+ if (ep->hasCallStackHint())
+ for (StackDiagVector::iterator I = CallStack.begin(),
+ E = CallStack.end(); I != E; ++I) {
+ PathDiagnosticCallPiece *CP = I->first;
+ const ExplodedNode *N = I->second;
+ std::string stackMsg = ep->getCallStackMessage(N);
+
+ // The last message on the path to final bug is the most important
+ // one. Since we traverse the path backwards, do not add the message
+ // if one has been previously added.
+ if (!CP->hasCallStackMessage())
+ CP->setCallStackMessage(stackMsg);
+ }
}
-};
-}
-
-static void HandleNotableSymbol(const ExplodedNode *N,
- const Stmt *S,
- SymbolRef Sym, BugReporter& BR,
- PathDiagnostic& PD) {
-
- const ExplodedNode *Pred = N->pred_empty() ? 0 : *N->pred_begin();
- const ProgramState *PrevSt = Pred ? Pred->getState() : 0;
-
- if (!PrevSt)
- return;
-
- // Look at the region bindings of the current state that map to the
- // specified symbol. Are any of them not in the previous state?
- ProgramStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager();
- NotableSymbolHandler H(Sym, PrevSt, S, VMgr, Pred, PD, BR);
- cast<GRBugReporter>(BR).getStateManager().iterBindings(N->getState(), H);
}
-namespace {
-class ScanNotableSymbols
-: public StoreManager::BindingsHandler {
-
- llvm::SmallSet<SymbolRef, 10> AlreadyProcessed;
- const ExplodedNode *N;
- const Stmt *S;
- GRBugReporter& BR;
- PathDiagnostic& PD;
-
-public:
- ScanNotableSymbols(const ExplodedNode *n, const Stmt *s,
- GRBugReporter& br, PathDiagnostic& pd)
- : N(n), S(s), BR(br), PD(pd) {}
-
- bool HandleBinding(StoreManager& SMgr, Store store,
- const MemRegion* R, SVal V) {
-
- SymbolRef ScanSym = V.getAsSymbol();
-
- if (!ScanSym)
- return true;
-
- if (!BR.isNotable(ScanSym))
- return true;
-
- if (AlreadyProcessed.count(ScanSym))
- return true;
-
- AlreadyProcessed.insert(ScanSym);
-
- HandleNotableSymbol(N, S, ScanSym, BR, PD);
- return true;
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// "Minimal" path diagnostic generation algorithm.
-//===----------------------------------------------------------------------===//
-
-static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM);
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
- const ExplodedNode *N) {
+ const ExplodedNode *N,
+ ArrayRef<BugReporterVisitor *> visitors) {
SourceManager& SMgr = PDB.getSourceManager();
- const LocationContext *LC = PDB.getLocationContext();
+ const LocationContext *LC = PDB.LC;
const ExplodedNode *NextNode = N->pred_empty()
? NULL : *(N->pred_begin());
+
+ StackDiagVector CallStack;
+
while (NextNode) {
N = NextNode;
+ PDB.LC = N->getLocationContext();
NextNode = GetPredecessorNode(N);
ProgramPoint P = N->getLocation();
+
+ if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SMgr);
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ continue;
+ }
+
+ if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ PD.popActivePath();
+ // The current active path should never be empty. Either we
+ // just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExit. If the front of the active
+ // path is not a PathDiagnosticCallPiece, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ assert(!PD.getActivePath().empty());
+ PathDiagnosticCallPiece *C =
+ dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ if (!C) {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ }
+ C->setCallee(*CE, SMgr);
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ continue;
+ }
if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
const CFGBlock *Src = BE->getSrc();
@@ -554,8 +489,8 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os << "Control jumps to line "
<< End.asLocation().getExpansionLineNumber();
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
- os.str()));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
break;
}
@@ -606,13 +541,13 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
break;
}
}
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
os << "'Default' branch taken. ";
const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
@@ -624,7 +559,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
@@ -646,7 +581,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
break;
}
@@ -669,14 +604,14 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
PathDiagnosticLocation Start =
PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
os << "true";
PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
}
@@ -688,7 +623,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
os << "false";
PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
PathDiagnosticLocation End = PDB.ExecutionContinues(N);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
@@ -696,7 +631,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
PathDiagnosticLocation Start =
PathDiagnosticLocation::createOperatorLoc(B, SMgr);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
}
@@ -715,7 +650,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
@@ -724,7 +659,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is false. Exiting loop"));
}
@@ -742,7 +677,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
os.str()));
}
else {
@@ -750,7 +685,7 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (const Stmt *S = End.asStmt())
End = PDB.getEnclosingStmtLocation(S);
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Loop condition is true. Entering loop body"));
}
@@ -764,10 +699,10 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
End = PDB.getEnclosingStmtLocation(S);
if (*(Src->succ_begin()+1) == Dst)
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking false branch"));
else
- PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
"Taking true branch"));
break;
@@ -778,24 +713,20 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
if (NextNode) {
// Add diagnostic pieces from custom visitors.
BugReport *R = PDB.getBugReport();
- for (BugReport::visitor_iterator I = R->visitor_begin(),
- E = R->visitor_end(); I!=E; ++I) {
- if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R))
- PD.push_front(p);
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+ }
}
}
-
- if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
- // Scan the region bindings, and see if a "notable" symbol has a new
- // lval binding.
- ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD);
- PDB.getStateManager().iterBindings(N->getState(), SNS);
- }
}
// After constructing the full PathDiagnostic, do a pass over it to compact
// PathDiagnosticPieces that occur within a macro.
- CompactPathDiagnostic(PD, PDB.getSourceManager());
+ CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
}
//===----------------------------------------------------------------------===//
@@ -879,7 +810,7 @@ class EdgeBuilder {
}
if (S != Original)
- L = PathDiagnosticLocation(S, L.getManager(), PDB.getLocationContext());
+ L = PathDiagnosticLocation(S, L.getManager(), PDB.LC);
}
if (firstCharOnly)
@@ -902,8 +833,8 @@ public:
// If the PathDiagnostic already has pieces, add the enclosing statement
// of the first piece as a context as well.
- if (!PD.empty()) {
- PrevLoc = PD.begin()->getLocation();
+ if (!PD.path.empty()) {
+ PrevLoc = (*PD.path.begin())->getLocation();
if (const Stmt *S = PrevLoc.asStmt())
addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
@@ -916,12 +847,18 @@ public:
// Finally, add an initial edge from the start location of the first
// statement (if it doesn't already exist).
PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
- PDB.getLocationContext(),
+ PDB.LC,
PDB.getSourceManager());
if (L.isValid())
rawAddEdge(L);
}
+ void flushLocations() {
+ while (!CLocs.empty())
+ popLocation();
+ PrevLoc = PathDiagnosticLocation();
+ }
+
void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
void rawAddEdge(PathDiagnosticLocation NewLoc);
@@ -988,7 +925,7 @@ bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
SM.getExpansionColumnNumber(ContaineeRBeg)) &&
(ContainerEndLine != ContaineeEndLine ||
SM.getExpansionColumnNumber(ContainerREnd) >=
- SM.getExpansionColumnNumber(ContainerREnd)));
+ SM.getExpansionColumnNumber(ContaineeREnd)));
}
void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
@@ -1008,7 +945,7 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
PrevLocClean.asLocation().getExpansionLoc())
return;
- PD.push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
PrevLoc = NewLoc;
}
@@ -1093,7 +1030,7 @@ void EdgeBuilder::addContext(const Stmt *S) {
if (!S)
return;
- PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.getLocationContext());
+ PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
while (!CLocs.empty()) {
const PathDiagnosticLocation &TopContextLoc = CLocs.back();
@@ -1116,9 +1053,11 @@ void EdgeBuilder::addContext(const Stmt *S) {
static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
- const ExplodedNode *N) {
+ const ExplodedNode *N,
+ ArrayRef<BugReporterVisitor *> visitors) {
EdgeBuilder EB(PD, PDB);
const SourceManager& SM = PDB.getSourceManager();
+ StackDiagVector CallStack;
const ExplodedNode *NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
while (NextNode) {
@@ -1127,14 +1066,74 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
ProgramPoint P = N->getLocation();
do {
+ if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
+ const StackFrameContext *LCtx =
+ CE->getLocationContext()->getCurrentStackFrame();
+ PathDiagnosticLocation Loc(LCtx->getCallSite(),
+ PDB.getSourceManager(),
+ LCtx);
+ EB.addEdge(Loc, true);
+ EB.flushLocations();
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SM);
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ // Pop the call hierarchy if we are done walking the contents
+ // of a function call.
+ if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ // Add an edge to the start of the function.
+ const Decl *D = CE->getCalleeContext()->getDecl();
+ PathDiagnosticLocation pos =
+ PathDiagnosticLocation::createBegin(D, SM);
+ EB.addEdge(pos);
+
+ // Flush all locations, and pop the active path.
+ EB.flushLocations();
+ PD.popActivePath();
+ assert(!PD.getActivePath().empty());
+ PDB.LC = N->getLocationContext();
+
+ // The current active path should never be empty. Either we
+ // just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExit. If the front of the active
+ // path is not a PathDiagnosticCallPiece, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ PathDiagnosticCallPiece *C =
+ dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ if (!C) {
+ const Decl * Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ }
+ C->setCallee(*CE, SM);
+ EB.addContext(CE->getCallExpr());
+
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ // Note that is important that we update the LocationContext
+ // after looking at CallExits. CallExit basically adds an
+ // edge in the *caller*, so we don't want to update the LocationContext
+ // too soon.
+ PDB.LC = N->getLocationContext();
+
// Block edges.
- if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
const CFGBlock &Blk = *BE->getSrc();
const Stmt *Term = Blk.getTerminator();
// Are we jumping to the head of a loop? Add a special diagnostic.
if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
- PathDiagnosticLocation L(Loop, SM, PDB.getLocationContext());
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
const CompoundStmt *CS = NULL;
if (!Term) {
@@ -1147,9 +1146,10 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticEventPiece *p =
new PathDiagnosticEventPiece(L,
"Looping back to the head of the loop");
+ p->setPrunable(true);
EB.addEdge(p->getLocation(), true);
- PD.push_front(p);
+ PD.getActivePath().push_front(p);
if (CS) {
PathDiagnosticLocation BL =
@@ -1177,6 +1177,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
break;
}
+
+
} while (0);
if (!NextNode)
@@ -1184,12 +1186,15 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
// Add pieces from custom visitors.
BugReport *R = PDB.getBugReport();
- for (BugReport::visitor_iterator I = R->visitor_begin(),
- E = R->visitor_end(); I!=E; ++I) {
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
const PathDiagnosticLocation &Loc = p->getLocation();
EB.addEdge(Loc, true);
- PD.push_front(p);
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+
if (const Stmt *S = Loc.asStmt())
EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
}
@@ -1204,10 +1209,14 @@ BugType::~BugType() { }
void BugType::FlushReports(BugReporter &BR) {}
+void BuiltinBug::anchor() {}
+
//===----------------------------------------------------------------------===//
// Methods for BugReport and subclasses.
//===----------------------------------------------------------------------===//
+void BugReport::NodeResolver::anchor() {}
+
void BugReport::addVisitor(BugReporterVisitor* visitor) {
if (!visitor)
return;
@@ -1222,7 +1231,8 @@ void BugReport::addVisitor(BugReporterVisitor* visitor) {
}
CallbacksSet.InsertNode(visitor, InsertPos);
- Callbacks = F.add(visitor, Callbacks);
+ Callbacks.push_back(visitor);
+ ++ConfigurationChangeToken;
}
BugReport::~BugReport() {
@@ -1231,10 +1241,24 @@ BugReport::~BugReport() {
}
}
+const Decl *BugReport::getDeclWithIssue() const {
+ if (DeclWithIssue)
+ return DeclWithIssue;
+
+ const ExplodedNode *N = getErrorNode();
+ if (!N)
+ return 0;
+
+ const LocationContext *LC = N->getLocationContext();
+ return LC->getCurrentStackFrame()->getDecl();
+}
+
void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
hash.AddPointer(&BT);
hash.AddString(Description);
- if (Location.isValid()) {
+ if (UniqueingLocation.isValid()) {
+ UniqueingLocation.Profile(hash);
+ } else if (Location.isValid()) {
Location.Profile(hash);
} else {
assert(ErrorNode);
@@ -1251,6 +1275,61 @@ void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
}
}
+void BugReport::markInteresting(SymbolRef sym) {
+ if (!sym)
+ return;
+
+ // If the symbol wasn't already in our set, note a configuration change.
+ if (interestingSymbols.insert(sym).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
+ interestingRegions.insert(meta->getRegion());
+}
+
+void BugReport::markInteresting(const MemRegion *R) {
+ if (!R)
+ return;
+
+ // If the base region wasn't already in our set, note a configuration change.
+ R = R->getBaseRegion();
+ if (interestingRegions.insert(R).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ interestingSymbols.insert(SR->getSymbol());
+}
+
+void BugReport::markInteresting(SVal V) {
+ markInteresting(V.getAsRegion());
+ markInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SVal V) const {
+ return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SymbolRef sym) const {
+ if (!sym)
+ return false;
+ // We don't currently consider metadata symbols to be interesting
+ // even if we know their region is interesting. Is that correct behavior?
+ return interestingSymbols.count(sym);
+}
+
+bool BugReport::isInteresting(const MemRegion *R) const {
+ if (!R)
+ return false;
+ R = R->getBaseRegion();
+ bool b = interestingRegions.count(R);
+ if (b)
+ return true;
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ return interestingSymbols.count(SR->getSymbol());
+ return false;
+}
+
+
const Stmt *BugReport::getStmt() const {
if (!ErrorNode)
return 0;
@@ -1316,10 +1395,7 @@ PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
// Methods for BugReporter and subclasses.
//===----------------------------------------------------------------------===//
-BugReportEquivClass::~BugReportEquivClass() {
- for (iterator I=begin(), E=end(); I!=E; ++I) delete *I;
-}
-
+BugReportEquivClass::~BugReportEquivClass() { }
GRBugReporter::~GRBugReporter() { }
BugReporterData::~BugReporterData() {}
@@ -1394,8 +1470,8 @@ MakeReportGraph(const ExplodedGraph* G,
// Create owning pointers for GTrim and NMap just to ensure that they are
// released when this function exists.
- llvm::OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
- llvm::OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
+ OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
+ OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
// Find the (first) error node in the trimmed graph. We just need to consult
// the node map (NMap) which maps from nodes in the original graph to nodes
@@ -1513,19 +1589,28 @@ MakeReportGraph(const ExplodedGraph* G,
/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
/// and collapses PathDiagosticPieces that are expanded by macros.
-static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
- typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> >
- MacroStackTy;
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+ typedef std::vector<std::pair<IntrusiveRefCntPtr<PathDiagnosticMacroPiece>,
+ SourceLocation> > MacroStackTy;
- typedef std::vector<PathDiagnosticPiece*>
+ typedef std::vector<IntrusiveRefCntPtr<PathDiagnosticPiece> >
PiecesTy;
MacroStackTy MacroStack;
PiecesTy Pieces;
- for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) {
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I!=E; ++I) {
+
+ PathDiagnosticPiece *piece = I->getPtr();
+
+ // Recursively compact calls.
+ if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){
+ CompactPathDiagnostic(call->path, SM);
+ }
+
// Get the location of the PathDiagnosticPiece.
- const FullSourceLoc Loc = I->getLocation().asLocation();
+ const FullSourceLoc Loc = piece->getLocation().asLocation();
// Determine the instantiation location, which is the location we group
// related PathDiagnosticPieces.
@@ -1535,7 +1620,7 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
if (Loc.isFileID()) {
MacroStack.clear();
- Pieces.push_back(&*I);
+ Pieces.push_back(piece);
continue;
}
@@ -1543,13 +1628,13 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
// Is the PathDiagnosticPiece within the same macro group?
if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
- MacroStack.back().first->push_back(&*I);
+ MacroStack.back().first->subPieces.push_back(piece);
continue;
}
// We aren't in the same group. Are we descending into a new macro
// or are part of an old one?
- PathDiagnosticMacroPiece *MacroGroup = 0;
+ IntrusiveRefCntPtr<PathDiagnosticMacroPiece> MacroGroup;
SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
SM.getExpansionLoc(Loc) :
@@ -1574,10 +1659,10 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
// Create a new macro group and add it to the stack.
PathDiagnosticMacroPiece *NewGroup =
new PathDiagnosticMacroPiece(
- PathDiagnosticLocation::createSingleLocation(I->getLocation()));
+ PathDiagnosticLocation::createSingleLocation(piece->getLocation()));
if (MacroGroup)
- MacroGroup->push_back(NewGroup);
+ MacroGroup->subPieces.push_back(NewGroup);
else {
assert(InstantiationLoc.isFileID());
Pieces.push_back(NewGroup);
@@ -1588,21 +1673,14 @@ static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
}
// Finally, add the PathDiagnosticPiece to the group.
- MacroGroup->push_back(&*I);
+ MacroGroup->subPieces.push_back(piece);
}
// Now take the pieces and construct a new PathDiagnostic.
- PD.resetPath(false);
+ path.clear();
- for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) {
- if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I))
- if (!MP->containsEvent()) {
- delete MP;
- continue;
- }
-
- PD.push_back(*I);
- }
+ for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I)
+ path.push_back(*I);
}
void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
@@ -1626,8 +1704,8 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
BugReport *R = bugReports[GPair.second.second];
assert(R && "No original report found for sliced graph.");
- llvm::OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
- llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second);
+ OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
+ OwningPtr<NodeBackMap> BackMap(GPair.first.second);
const ExplodedNode *N = GPair.second.first;
// Start building the path diagnostic...
@@ -1638,32 +1716,61 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
R->addVisitor(new NilReceiverBRVisitor());
R->addVisitor(new ConditionBRVisitor());
- // Generate the very last diagnostic piece - the piece is visible before
- // the trace is expanded.
- PathDiagnosticPiece *LastPiece = 0;
- for (BugReport::visitor_iterator I = R->visitor_begin(),
- E = R->visitor_end(); I!=E; ++I) {
- if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
- assert (!LastPiece &&
- "There can only be one final piece in a diagnostic.");
- LastPiece = Piece;
+ BugReport::VisitorList visitors;
+ unsigned originalReportConfigToken, finalReportConfigToken;
+
+ // While generating diagnostics, it's possible the visitors will decide
+ // new symbols and regions are interesting, or add other visitors based on
+ // the information they find. If they do, we need to regenerate the path
+ // based on our new report configuration.
+ do {
+ // Get a clean copy of all the visitors.
+ for (BugReport::visitor_iterator I = R->visitor_begin(),
+ E = R->visitor_end(); I != E; ++I)
+ visitors.push_back((*I)->clone());
+
+ // Clear out the active path from any previous work.
+ PD.getActivePath().clear();
+ originalReportConfigToken = R->getConfigurationChangeToken();
+
+ // Generate the very last diagnostic piece - the piece is visible before
+ // the trace is expanded.
+ PathDiagnosticPiece *LastPiece = 0;
+ for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+ assert (!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = Piece;
+ }
}
- }
- if (!LastPiece)
- LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
- if (LastPiece)
- PD.push_back(LastPiece);
- else
- return;
+ if (!LastPiece)
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+ if (LastPiece)
+ PD.getActivePath().push_back(LastPiece);
+ else
+ return;
- switch (PDB.getGenerationScheme()) {
+ switch (PDB.getGenerationScheme()) {
case PathDiagnosticConsumer::Extensive:
- GenerateExtensivePathDiagnostic(PD, PDB, N);
+ GenerateExtensivePathDiagnostic(PD, PDB, N, visitors);
break;
case PathDiagnosticConsumer::Minimal:
- GenerateMinimalPathDiagnostic(PD, PDB, N);
+ GenerateMinimalPathDiagnostic(PD, PDB, N, visitors);
break;
- }
+ }
+
+ // Clean up the visitors we used.
+ llvm::DeleteContainerPointers(visitors);
+
+ // Did anything change while generating this path?
+ finalReportConfigToken = R->getConfigurationChangeToken();
+ } while(finalReportConfigToken != originalReportConfigToken);
+
+ // Finally, prune the diagnostic path of uninteresting stuff.
+ bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces());
+ assert(hasSomethingInteresting);
+ (void) hasSomethingInteresting;
}
void BugReporter::Register(BugType *BT) {
@@ -1711,17 +1818,17 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
assert(I != E);
- BugReport *R = *I;
- BugType& BT = R->getBugType();
+ BugType& BT = I->getBugType();
// If we don't need to suppress any of the nodes because they are
// post-dominated by a sink, simply add all the nodes in the equivalence class
// to 'Nodes'. Any of the reports will serve as a "representative" report.
if (!BT.isSuppressOnSink()) {
+ BugReport *R = I;
for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
const ExplodedNode *N = I->getErrorNode();
if (N) {
- R = *I;
+ R = I;
bugReports.push_back(R);
}
}
@@ -1737,8 +1844,7 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
BugReport *exampleReport = 0;
for (; I != E; ++I) {
- R = *I;
- const ExplodedNode *errorNode = R->getErrorNode();
+ const ExplodedNode *errorNode = I->getErrorNode();
if (!errorNode)
continue;
@@ -1748,9 +1854,9 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
}
// No successors? By definition this nodes isn't post-dominated by a sink.
if (errorNode->succ_empty()) {
- bugReports.push_back(R);
+ bugReports.push_back(I);
if (!exampleReport)
- exampleReport = R;
+ exampleReport = I;
continue;
}
@@ -1774,9 +1880,9 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
if (Succ->succ_empty()) {
// If we found an end-of-path node that is not a sink.
if (!Succ->isSink()) {
- bugReports.push_back(R);
+ bugReports.push_back(I);
if (!exampleReport)
- exampleReport = R;
+ exampleReport = I;
WL.clear();
break;
}
@@ -1857,8 +1963,9 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
// Probably doesn't make a difference in practice.
BugType& BT = exampleReport->getBugType();
- llvm::OwningPtr<PathDiagnostic>
- D(new PathDiagnostic(exampleReport->getBugType().getName(),
+ OwningPtr<PathDiagnostic>
+ D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
+ exampleReport->getBugType().getName(),
!PD || PD->useVerboseDescription()
? exampleReport->getDescription()
: exampleReport->getShortDescription(),
@@ -1866,9 +1973,6 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
if (!bugReports.empty())
GeneratePathDiagnostic(*D.get(), bugReports);
-
- if (IsCachedDiagnostic(exampleReport, D.get()))
- return;
// Get the meta data.
const BugReport::ExtraTextList &Meta =
@@ -1883,24 +1987,23 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
llvm::tie(Beg, End) = exampleReport->getRanges();
DiagnosticsEngine &Diag = getDiagnostic();
- // Search the description for '%', as that will be interpretted as a
- // format character by FormatDiagnostics.
- StringRef desc = exampleReport->getShortDescription();
- unsigned ErrorDiag;
- {
- llvm::SmallString<512> TmpStr;
+ if (!IsCachedDiagnostic(exampleReport, D.get())) {
+ // Search the description for '%', as that will be interpretted as a
+ // format character by FormatDiagnostics.
+ StringRef desc = exampleReport->getShortDescription();
+
+ SmallString<512> TmpStr;
llvm::raw_svector_ostream Out(TmpStr);
- for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I)
+ for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
if (*I == '%')
Out << "%%";
else
Out << *I;
+ }
Out.flush();
- ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, TmpStr);
- }
+ unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, TmpStr);
- {
DiagnosticBuilder diagBuilder = Diag.Report(
exampleReport->getLocation(getSourceManager()).asLocation(), ErrorDiag);
for (BugReport::ranges_iterator I = Beg; I != End; ++I)
@@ -1911,25 +2014,21 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
if (!PD)
return;
- if (D->empty()) {
+ if (D->path.empty()) {
PathDiagnosticPiece *piece = new PathDiagnosticEventPiece(
exampleReport->getLocation(getSourceManager()),
exampleReport->getDescription());
+ for ( ; Beg != End; ++Beg)
+ piece->addRange(*Beg);
- for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
- D->push_back(piece);
+ D->getActivePath().push_back(piece);
}
PD->HandlePathDiagnostic(D.take());
}
-void BugReporter::EmitBasicReport(StringRef name, StringRef str,
- PathDiagnosticLocation Loc,
- SourceRange* RBeg, unsigned NumRanges) {
- EmitBasicReport(name, "", str, Loc, RBeg, NumRanges);
-}
-
-void BugReporter::EmitBasicReport(StringRef name,
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef name,
StringRef category,
StringRef str, PathDiagnosticLocation Loc,
SourceRange* RBeg, unsigned NumRanges) {
@@ -1937,13 +2036,14 @@ void BugReporter::EmitBasicReport(StringRef name,
// 'BT' is owned by BugReporter.
BugType *BT = getBugTypeForName(name, category);
BugReport *R = new BugReport(*BT, str, Loc);
+ R->setDeclWithIssue(DeclWithIssue);
for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
EmitReport(R);
}
BugType *BugReporter::getBugTypeForName(StringRef name,
StringRef category) {
- llvm::SmallString<136> fullDesc;
+ SmallString<136> fullDesc;
llvm::raw_svector_ostream(fullDesc) << name << ":" << category;
llvm::StringMapEntry<BugType *> &
entry = StrBugTypes.GetOrCreateValue(fullDesc);
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 1abd8ba..6532486 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -20,6 +20,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
@@ -84,26 +85,8 @@ PathDiagnosticPiece*
BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
const ExplodedNode *EndPathNode,
BugReport &BR) {
- const ProgramPoint &PP = EndPathNode->getLocation();
- PathDiagnosticLocation L;
-
- if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&PP)) {
- const CFGBlock *block = BE->getBlock();
- if (block->getBlockID() == 0) {
- L = PathDiagnosticLocation::createDeclEnd(PP.getLocationContext(),
- BRC.getSourceManager());
- }
- }
-
- if (!L.isValid()) {
- const Stmt *S = BR.getStmt();
-
- if (!S)
- return NULL;
-
- L = PathDiagnosticLocation(S, BRC.getSourceManager(),
- PP.getLocationContext());
- }
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
BugReport::ranges_iterator Beg, End;
llvm::tie(Beg, End) = BR.getRanges();
@@ -138,17 +121,20 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
if (!StoreSite) {
const ExplodedNode *Node = N, *Last = NULL;
- for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
+ for ( ; Node ; Node = Node->getFirstPred()) {
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
if (const PostStmt *P = Node->getLocationAs<PostStmt>())
if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
if (DS->getSingleDecl() == VR->getDecl()) {
+ // Record the last seen initialization point.
Last = Node;
break;
}
}
+ // Does the region still bind to value V? If not, we are done
+ // looking for store sites.
if (Node->getState()->getSVal(R) != V)
break;
}
@@ -165,7 +151,7 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
return NULL;
satisfied = true;
- llvm::SmallString<256> sbuf;
+ SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
@@ -301,7 +287,8 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
BugReporterVisitor *
bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
- const Stmt *S) {
+ const Stmt *S,
+ BugReport *report) {
if (!S || !N)
return 0;
@@ -321,25 +308,27 @@ bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
if (!N)
return 0;
- const ProgramState *state = N->getState();
-
- // Walk through lvalue-to-rvalue conversions.
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- const VarRegion *R =
- StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+ ProgramStateRef state = N->getState();
- // What did we load?
- SVal V = state->getSVal(loc::MemRegionVal(R));
+ // Walk through lvalue-to-rvalue conversions.
+ const Expr *Ex = dyn_cast<Expr>(S);
+ if (Ex) {
+ Ex = Ex->IgnoreParenLValueCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
- if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
- || V.isUndef()) {
+ // What did we load?
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ report->markInteresting(R);
+ report->markInteresting(V);
return new FindLastStoreBRVisitor(V, R);
}
}
}
- SVal V = state->getSValAsScalarOrLoc(S);
+ SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
// Uncomment this to find cases where we aren't properly getting the
// base value that was dereferenced.
@@ -353,7 +342,7 @@ bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
}
if (R) {
- assert(isa<SymbolicRegion>(R));
+ report->markInteresting(R);
return new TrackConstraintBRVisitor(loc::MemRegionVal(R), false);
}
}
@@ -366,7 +355,7 @@ FindLastStoreBRVisitor::createVisitorObject(const ExplodedNode *N,
const MemRegion *R) {
assert(R && "The memory region is null.");
- const ProgramState *state = N->getState();
+ ProgramStateRef state = N->getState();
SVal V = state->getSVal(R);
if (V.isUnknown())
return 0;
@@ -388,8 +377,8 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
const Expr *Receiver = ME->getInstanceReceiver();
if (!Receiver)
return 0;
- const ProgramState *state = N->getState();
- const SVal &V = state->getSVal(Receiver);
+ ProgramStateRef state = N->getState();
+ const SVal &V = state->getSVal(Receiver, N->getLocationContext());
const DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&V);
if (!DV)
return 0;
@@ -400,11 +389,11 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- BR.addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Receiver));
+ BR.addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Receiver, &BR));
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
- return new PathDiagnosticEventPiece(L, "No method actually called "
+ return new PathDiagnosticEventPiece(L, "No method is called "
"because the receiver is nil");
}
@@ -419,7 +408,7 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
const Stmt *Head = WorkList.front();
WorkList.pop_front();
- const ProgramState *state = N->getState();
+ ProgramStateRef state = N->getState();
ProgramStateManager &StateMgr = state->getStateManager();
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) {
@@ -428,7 +417,7 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
// What did we load?
- SVal V = state->getSVal(S);
+ SVal V = state->getSVal(S, N->getLocationContext());
if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)) {
// Register a new visitor with the BugReport.
@@ -450,11 +439,22 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *Prev,
BugReporterContext &BRC,
BugReport &BR) {
+ PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR);
+ if (PathDiagnosticEventPiece *ev =
+ dyn_cast_or_null<PathDiagnosticEventPiece>(piece))
+ ev->setPrunable(true, /* override */ false);
+ return piece;
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) {
const ProgramPoint &progPoint = N->getLocation();
- const ProgramState *CurrentState = N->getState();
- const ProgramState *PrevState = Prev->getState();
+ ProgramStateRef CurrentState = N->getState();
+ ProgramStateRef PrevState = Prev->getState();
// Compare the GDMs of the state, because that is where constraints
// are managed. Note that ensure that we only look at nodes that
@@ -468,7 +468,7 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
if (const BlockEdge *BE = dyn_cast<BlockEdge>(&progPoint)) {
const CFGBlock *srcBlk = BE->getSrc();
if (const Stmt *term = srcBlk->getTerminator())
- return VisitTerminator(term, N, srcBlk, BE->getDst(), BRC);
+ return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
return 0;
}
@@ -482,10 +482,10 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
const ProgramPointTag *tag = PS->getTag();
if (tag == tags.first)
return VisitTrueTest(cast<Expr>(PS->getStmt()), true,
- BRC, N->getLocationContext());
+ BRC, BR, N);
if (tag == tags.second)
return VisitTrueTest(cast<Expr>(PS->getStmt()), false,
- BRC, N->getLocationContext());
+ BRC, BR, N);
return 0;
}
@@ -498,6 +498,7 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
const ExplodedNode *N,
const CFGBlock *srcBlk,
const CFGBlock *dstBlk,
+ BugReport &R,
BugReporterContext &BRC) {
const Expr *Cond = 0;
@@ -516,14 +517,15 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
assert(srcBlk->succ_size() == 2);
const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
return VisitTrueTest(Cond->IgnoreParenNoopCasts(BRC.getASTContext()),
- tookTrue, BRC, N->getLocationContext());
+ tookTrue, BRC, R, N);
}
PathDiagnosticPiece *
ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC) {
+ BugReport &R,
+ const ExplodedNode *N) {
const Expr *Ex = Cond;
@@ -533,9 +535,11 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
default:
return 0;
case Stmt::BinaryOperatorClass:
- return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC, LC);
+ return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC,
+ R, N);
case Stmt::DeclRefExprClass:
- return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC, LC);
+ return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC,
+ R, N);
case Stmt::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(Ex);
if (UO->getOpcode() == UO_LNot) {
@@ -550,14 +554,31 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
}
bool ConditionBRVisitor::patternMatch(const Expr *Ex, llvm::raw_ostream &Out,
- BugReporterContext &BRC) {
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N,
+ llvm::Optional<bool> &prunable) {
const Expr *OriginalExpr = Ex;
Ex = Ex->IgnoreParenCasts();
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
const bool quotes = isa<VarDecl>(DR->getDecl());
- if (quotes)
+ if (quotes) {
Out << '\'';
+ const LocationContext *LCtx = N->getLocationContext();
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(cast<VarDecl>(DR->getDecl()),
+ LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ prunable = false;
+ else {
+ const ProgramState *state = N->getState().getPtr();
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ prunable = false;
+ }
+ }
+ }
Out << DR->getDecl()->getDeclName().getAsString();
if (quotes)
Out << '\'';
@@ -591,31 +612,43 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
const BinaryOperator *BExpr,
const bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC) {
+ BugReport &R,
+ const ExplodedNode *N) {
bool shouldInvert = false;
+ llvm::Optional<bool> shouldPrune;
- llvm::SmallString<128> LhsString, RhsString;
+ SmallString<128> LhsString, RhsString;
{
- llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
- const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC);
- const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC);
+ llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
+ const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC, R, N,
+ shouldPrune);
+ const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
+ shouldPrune);
shouldInvert = !isVarLHS && isVarRHS;
}
+ BinaryOperator::Opcode Op = BExpr->getOpcode();
+
+ if (BinaryOperator::isAssignmentOp(Op)) {
+ // For assignment operators, all that we care about is that the LHS
+ // evaluates to "true" or "false".
+ return VisitConditionVariable(LhsString, BExpr->getLHS(), tookTrue,
+ BRC, R, N);
+ }
+
+ // For non-assignment operations, we require that we can understand
+ // both the LHS and RHS.
if (LhsString.empty() || RhsString.empty())
return 0;
-
- // Should we invert the strings if the LHS is not a variable name?
- llvm::SmallString<256> buf;
+ // Should we invert the strings if the LHS is not a variable name?
+ SmallString<256> buf;
llvm::raw_svector_ostream Out(buf);
Out << "Assuming " << (shouldInvert ? RhsString : LhsString) << " is ";
// Do we need to invert the opcode?
- BinaryOperator::Opcode Op = BExpr->getOpcode();
-
if (shouldInvert)
switch (Op) {
default: break;
@@ -637,7 +670,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
return 0;
}
- switch (BExpr->getOpcode()) {
+ switch (Op) {
case BO_EQ:
Out << "equal to ";
break;
@@ -650,9 +683,55 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
}
Out << (shouldInvert ? LhsString : RhsString);
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+ if (shouldPrune.hasValue())
+ event->setPrunable(shouldPrune.getValue());
+ return event;
+}
- PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LC);
- return new PathDiagnosticEventPiece(Loc, Out.str());
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N) {
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ Out << "Assuming " << LhsString << " is ";
+
+ QualType Ty = CondVarExpr->getType();
+
+ if (Ty->isPointerType())
+ Out << (tookTrue ? "not null" : "null");
+ else if (Ty->isObjCObjectPointerType())
+ Out << (tookTrue ? "not nil" : "nil");
+ else if (Ty->isBooleanType())
+ Out << (tookTrue ? "true" : "false");
+ else if (Ty->isIntegerType())
+ Out << (tookTrue ? "non-zero" : "zero");
+ else
+ return 0;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(CondVarExpr, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ }
+ }
+ }
+
+ return event;
}
PathDiagnosticPiece *
@@ -660,13 +739,14 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
const DeclRefExpr *DR,
const bool tookTrue,
BugReporterContext &BRC,
- const LocationContext *LC) {
+ BugReport &report,
+ const ExplodedNode *N) {
const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
return 0;
- llvm::SmallString<256> Buf;
+ SmallString<256> Buf;
llvm::raw_svector_ostream Out(Buf);
Out << "Assuming '";
@@ -684,6 +764,21 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
else
return 0;
- PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LC);
- return new PathDiagnosticEventPiece(Loc, Out.str());
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ else {
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ event->setPrunable(false);
+ }
+ }
+ return event;
}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
index a3bf2c2..07e0aac 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
using namespace clang;
@@ -20,3 +21,11 @@ StringRef CheckerBase::getTagDescription() const {
// FIXME: We want to return the package + name of the checker here.
return "A Checker";
}
+
+void Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck
+ >::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 5356edc..0a047d9 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -13,21 +13,71 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Lex/Lexer.h"
+
using namespace clang;
using namespace ento;
-CheckerContext::~CheckerContext() {
- // Do we need to autotransition? 'Dst' can get populated in a variety of
- // ways, including 'addTransition()' adding the predecessor node to Dst
- // without actually generated a new node. We also shouldn't autotransition
- // if we are building sinks or we generated a node and decided to not
- // add it as a transition.
- if (Dst.size() == size && !B.BuildSinks && !B.hasGeneratedNode) {
- if (ST && ST != Pred->getState()) {
- static SimpleProgramPointTag autoTransitionTag("CheckerContext : auto");
- addTransition(ST, &autoTransitionTag);
- }
- else
- Dst.Add(Pred);
+const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
+ ProgramStateRef State = getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = State->getSVal(Callee, Pred->getLocationContext());
+ return L.getAsFunctionDecl();
+}
+
+StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
+ if (!FunDecl)
+ return StringRef();
+ IdentifierInfo *funI = FunDecl->getIdentifier();
+ if (!funI)
+ return StringRef();
+ return funI->getName();
+}
+
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name) {
+ return isCLibraryFunction(FD, Name, getASTContext());
+}
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name, ASTContext &Context) {
+ // To avoid false positives (Ex: finding user defined functions with
+ // similar names), only perform fuzzy name matching when it's a builtin.
+ // Using a string compare is slow, we might want to switch on BuiltinID here.
+ unsigned BId = FD->getBuiltinID();
+ if (BId != 0) {
+ StringRef BName = Context.BuiltinInfo.GetName(BId);
+ if (BName.find(Name) != StringRef::npos)
+ return true;
}
+
+ const IdentifierInfo *II = FD->getIdentifier();
+ // If this is a special C++ name without IdentifierInfo, it can't be a
+ // C library function.
+ if (!II)
+ return false;
+
+ StringRef FName = II->getName();
+ if (FName.equals(Name))
+ return true;
+
+ if (FName.startswith("__inline") && (FName.find(Name) != StringRef::npos))
+ return true;
+
+ if (FName.startswith("__") && FName.endswith("_chk") &&
+ FName.find(Name) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
+ if (Loc.isMacroID())
+ return Lexer::getImmediateMacroName(Loc, getSourceManager(),
+ getLangOpts());
+ SmallVector<char, 16> buf;
+ return Lexer::getSpelling(Loc, buf, getSourceManager(), getLangOpts());
}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index acacfb0..0bcc343 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -93,6 +93,9 @@ template <typename CHECK_CTX>
static void expandGraphWithCheckers(CHECK_CTX checkCtx,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src) {
+ const NodeBuilderContext &BldrCtx = checkCtx.Eng.getBuilderContext();
+ if (Src.empty())
+ return;
typename CHECK_CTX::CheckersTy::const_iterator
I = checkCtx.checkers_begin(), E = checkCtx.checkers_end();
@@ -113,9 +116,15 @@ static void expandGraphWithCheckers(CHECK_CTX checkCtx,
CurrSet->clear();
}
+ NodeBuilder B(*PrevSet, *CurrSet, BldrCtx);
for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
- NI != NE; ++NI)
- checkCtx.runChecker(*I, *CurrSet, *NI);
+ NI != NE; ++NI) {
+ checkCtx.runChecker(*I, B, *NI);
+ }
+
+ // If all the produced transitions are sinks, stop.
+ if (CurrSet->empty())
+ return;
// Update which NodeSet is the current one.
PrevSet = CurrSet;
@@ -129,23 +138,24 @@ namespace {
const CheckersTy &Checkers;
const Stmt *S;
ExprEngine &Eng;
+ bool wasInlined;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
- const Stmt *s, ExprEngine &eng)
- : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng) { }
+ const Stmt *s, ExprEngine &eng, bool wasInlined = false)
+ : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
+ wasInlined(wasInlined) {}
void runChecker(CheckerManager::CheckStmtFunc checkFn,
- ExplodedNodeSet &Dst, ExplodedNode *Pred) {
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
// FIXME: Remove respondsToCallback from CheckerContext;
ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
ProgramPoint::PostStmtKind;
const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Dst, Eng.getBuilder(), Eng, Pred, L, 0);
-
+ CheckerContext C(Bldr, Eng, Pred, L, wasInlined);
checkFn(S, C);
}
};
@@ -156,9 +166,10 @@ void CheckerManager::runCheckersForStmt(bool isPreVisit,
ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
const Stmt *S,
- ExprEngine &Eng) {
+ ExprEngine &Eng,
+ bool wasInlined) {
CheckStmtContext C(isPreVisit, *getCachedStmtCheckersFor(S, isPreVisit),
- S, Eng);
+ S, Eng, wasInlined);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -178,12 +189,14 @@ namespace {
: IsPreVisit(isPreVisit), Checkers(checkers), Msg(msg), Eng(eng) { }
void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
- ExplodedNodeSet &Dst, ExplodedNode *Pred) {
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
ProgramPoint::PostStmtKind;
- const ProgramPoint &L = ProgramPoint::getProgramPoint(Msg.getOriginExpr(),
- K, Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Dst, Eng.getBuilder(), Eng, Pred, L, 0);
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(Msg.getMessageExpr(),
+ K, Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
checkFn(Msg, C);
}
@@ -209,35 +222,44 @@ namespace {
const CheckersTy &Checkers;
SVal Loc;
bool IsLoad;
- const Stmt *S;
+ const Stmt *NodeEx; /* Will become a CFGStmt */
+ const Stmt *BoundEx;
ExprEngine &Eng;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckLocationContext(const CheckersTy &checkers,
- SVal loc, bool isLoad, const Stmt *s, ExprEngine &eng)
- : Checkers(checkers), Loc(loc), IsLoad(isLoad), S(s), Eng(eng) { }
+ SVal loc, bool isLoad, const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &eng)
+ : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
+ BoundEx(BoundEx), Eng(eng) {}
void runChecker(CheckerManager::CheckLocationFunc checkFn,
- ExplodedNodeSet &Dst, ExplodedNode *Pred) {
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
ProgramPoint::Kind K = IsLoad ? ProgramPoint::PreLoadKind :
ProgramPoint::PreStoreKind;
- const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
- Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Dst, Eng.getBuilder(), Eng, Pred, L, 0);
-
- checkFn(Loc, IsLoad, S, C);
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(NodeEx, K,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Loc, IsLoad, BoundEx, C);
}
};
}
/// \brief Run checkers for load/store of a location.
+
void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SVal location, bool isLoad,
- const Stmt *S, ExprEngine &Eng) {
- CheckLocationContext C(LocationCheckers, location, isLoad, S, Eng);
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &Eng) {
+ CheckLocationContext C(LocationCheckers, location, isLoad, NodeEx,
+ BoundEx, Eng);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -249,20 +271,21 @@ namespace {
SVal Val;
const Stmt *S;
ExprEngine &Eng;
+ ProgramPoint::Kind PointKind;
CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
CheckBindContext(const CheckersTy &checkers,
- SVal loc, SVal val, const Stmt *s, ExprEngine &eng)
- : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng) { }
+ SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+ ProgramPoint::Kind PK)
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PointKind(PK) {}
void runChecker(CheckerManager::CheckBindFunc checkFn,
- ExplodedNodeSet &Dst, ExplodedNode *Pred) {
- ProgramPoint::Kind K = ProgramPoint::PreStmtKind;
- const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, PointKind,
Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Dst, Eng.getBuilder(), Eng, Pred, L, 0);
+ CheckerContext C(Bldr, Eng, Pred, L);
checkFn(Loc, Val, S, C);
}
@@ -273,8 +296,9 @@ namespace {
void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
const ExplodedNodeSet &Src,
SVal location, SVal val,
- const Stmt *S, ExprEngine &Eng) {
- CheckBindContext C(BindCheckers, location, val, S, Eng);
+ const Stmt *S, ExprEngine &Eng,
+ ProgramPoint::Kind PointKind) {
+ CheckBindContext C(BindCheckers, location, val, S, Eng, PointKind);
expandGraphWithCheckers(C, Dst, Src);
}
@@ -286,27 +310,65 @@ void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
}
/// \brief Run checkers for end of path.
-void CheckerManager::runCheckersForEndPath(EndOfFunctionNodeBuilder &B,
+// Note, We do not chain the checker output (like in expandGraphWithCheckers)
+// for this callback since end of path nodes are expected to be final.
+void CheckerManager::runCheckersForEndPath(NodeBuilderContext &BC,
+ ExplodedNodeSet &Dst,
ExprEngine &Eng) {
+ ExplodedNode *Pred = BC.Pred;
+
+ // We define the builder outside of the loop bacause if at least one checkers
+ // creates a sucsessor for Pred, we do not need to generate an
+ // autotransition for it.
+ NodeBuilder Bldr(Pred, Dst, BC);
for (unsigned i = 0, e = EndPathCheckers.size(); i != e; ++i) {
- CheckEndPathFunc fn = EndPathCheckers[i];
- EndOfFunctionNodeBuilder specialB = B.withCheckerTag(fn.Checker);
- fn(specialB, Eng);
+ CheckEndPathFunc checkFn = EndPathCheckers[i];
+
+ const ProgramPoint &L = BlockEntrance(BC.Block,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(C);
}
}
+namespace {
+ struct CheckBranchConditionContext {
+ typedef std::vector<CheckerManager::CheckBranchConditionFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ const Stmt *Condition;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckBranchConditionContext(const CheckersTy &checkers,
+ const Stmt *Cond, ExprEngine &eng)
+ : Checkers(checkers), Condition(Cond), Eng(eng) {}
+
+ void runChecker(CheckerManager::CheckBranchConditionFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint L = PostCondition(Condition, Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Condition, C);
+ }
+ };
+}
+
/// \brief Run checkers for branch condition.
-void CheckerManager::runCheckersForBranchCondition(const Stmt *condition,
- BranchNodeBuilder &B,
+void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
ExprEngine &Eng) {
- for (unsigned i = 0, e = BranchConditionCheckers.size(); i != e; ++i) {
- CheckBranchConditionFunc fn = BranchConditionCheckers[i];
- fn(condition, B, Eng);
- }
+ ExplodedNodeSet Src;
+ Src.insert(Pred);
+ CheckBranchConditionContext C(BranchConditionCheckers, Condition, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
}
/// \brief Run checkers for live symbols.
-void CheckerManager::runCheckersForLiveSymbols(const ProgramState *state,
+void CheckerManager::runCheckersForLiveSymbols(ProgramStateRef state,
SymbolReaper &SymReaper) {
for (unsigned i = 0, e = LiveSymbolsCheckers.size(); i != e; ++i)
LiveSymbolsCheckers[i](state, SymReaper);
@@ -328,11 +390,11 @@ namespace {
: Checkers(checkers), SR(sr), S(s), Eng(eng) { }
void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
- ExplodedNodeSet &Dst, ExplodedNode *Pred) {
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
ProgramPoint::Kind K = ProgramPoint::PostPurgeDeadSymbolsKind;
const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
Pred->getLocationContext(), checkFn.Checker);
- CheckerContext C(Dst, Eng.getBuilder(), Eng, Pred, L, 0);
+ CheckerContext C(Bldr, Eng, Pred, L);
checkFn(SR, C);
}
@@ -350,7 +412,7 @@ void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
}
/// \brief True if at least one checker wants to check region changes.
-bool CheckerManager::wantsRegionChangeUpdate(const ProgramState *state) {
+bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i)
if (RegionChangesCheckers[i].WantUpdateFn(state))
return true;
@@ -359,25 +421,26 @@ bool CheckerManager::wantsRegionChangeUpdate(const ProgramState *state) {
}
/// \brief Run checkers for region changes.
-const ProgramState *
-CheckerManager::runCheckersForRegionChanges(const ProgramState *state,
+ProgramStateRef
+CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions) {
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
// If any checker declares the state infeasible (or if it starts that way),
// bail out.
if (!state)
return NULL;
state = RegionChangesCheckers[i].CheckFn(state, invalidated,
- ExplicitRegions, Regions);
+ ExplicitRegions, Regions, Call);
}
return state;
}
/// \brief Run checkers for handling assumptions on symbolic values.
-const ProgramState *
-CheckerManager::runCheckersForEvalAssume(const ProgramState *state,
+ProgramStateRef
+CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
SVal Cond, bool Assumption) {
for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
// If any checker declares the state infeasible (or if it starts that way),
@@ -437,11 +500,12 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
}
#endif
+ ExplodedNodeSet checkDst;
+ NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
// Next, check if any of the EvalCall callbacks can evaluate the call.
for (std::vector<EvalCallFunc>::iterator
EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
EI != EE; ++EI) {
- ExplodedNodeSet checkDst;
ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K,
Pred->getLocationContext(), EI->Checker);
@@ -449,7 +513,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
{ // CheckerContext generates transitions(populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
- CheckerContext C(checkDst, Eng.getBuilder(), Eng, Pred, L, 0);
+ CheckerContext C(B, Eng, Pred, L);
evaluated = (*EI)(CE, C);
}
assert(!(evaluated && anyEvaluated)
@@ -483,7 +547,7 @@ void CheckerManager::runCheckersOnEndOfTranslationUnit(
}
void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
- const ProgramState *State,
+ ProgramStateRef State,
const char *NL, const char *Sep) {
for (llvm::DenseMap<CheckerTag, CheckerRef>::iterator
I = CheckerTags.begin(), E = CheckerTags.end(); I != E; ++I)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index 13401ac..9791e2ec 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -9,12 +9,13 @@
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "llvm/ADT/SetVector.h"
using namespace clang;
using namespace ento;
static const char PackageSeparator = '.';
-typedef llvm::DenseSet<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
+typedef llvm::SetVector<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
@@ -72,7 +73,7 @@ static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
if (opt.isEnabled())
collected.insert(&*i);
else
- collected.erase(&*i);
+ collected.remove(&*i);
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 5252198..eb986af 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -12,6 +12,8 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "CoreEngine"
+
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
@@ -20,9 +22,16 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/Support/Casting.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+
using namespace clang;
using namespace ento;
+STATISTIC(NumReachedMaxSteps,
+ "The # of times we reached the max number of steps.");
+STATISTIC(NumPathsExplored,
+ "The # of paths explored by the analyzer.");
+
//===----------------------------------------------------------------------===//
// Worklist classes for exploration of reachable states.
//===----------------------------------------------------------------------===//
@@ -152,7 +161,7 @@ WorkList* WorkList::makeBFSBlockDFSContents() {
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
- const ProgramState *InitState) {
+ ProgramStateRef InitState) {
if (G->num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
@@ -165,6 +174,11 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
assert (Entry->succ_size() == 1 &&
"Entry block must have 1 successor.");
+ // Mark the entry block as visited.
+ FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(),
+ L->getDecl(),
+ L->getCFG()->getNumBlockIDs());
+
// Get the solitary successor.
const CFGBlock *Succ = *(Entry->succ_begin());
@@ -187,8 +201,10 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
while (WList->hasWork()) {
if (!UnlimitedSteps) {
- if (Steps == 0)
+ if (Steps == 0) {
+ NumReachedMaxSteps++;
break;
+ }
--Steps;
}
@@ -200,67 +216,80 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
// Retrieve the node.
ExplodedNode *Node = WU.getNode();
- // Dispatch on the location type.
- switch (Node->getLocation().getKind()) {
- case ProgramPoint::BlockEdgeKind:
- HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node);
- break;
-
- case ProgramPoint::BlockEntranceKind:
- HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node);
- break;
-
- case ProgramPoint::BlockExitKind:
- assert (false && "BlockExit location never occur in forward analysis.");
- break;
+ dispatchWorkItem(Node, Node->getLocation(), WU);
+ }
+ SubEng.processEndWorklist(hasWorkRemaining());
+ return WList->hasWork();
+}
- case ProgramPoint::CallEnterKind:
- HandleCallEnter(cast<CallEnter>(Node->getLocation()), WU.getBlock(),
- WU.getIndex(), Node);
- break;
+void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+ const WorkListUnit& WU) {
+ // Dispatch on the location type.
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEdgeKind:
+ HandleBlockEdge(cast<BlockEdge>(Loc), Pred);
+ break;
+
+ case ProgramPoint::BlockEntranceKind:
+ HandleBlockEntrance(cast<BlockEntrance>(Loc), Pred);
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false && "BlockExit location never occur in forward analysis.");
+ break;
+
+ case ProgramPoint::CallEnterKind: {
+ CallEnter CEnter = cast<CallEnter>(Loc);
+ if (AnalyzedCallees)
+ if (const CallExpr* CE =
+ dyn_cast_or_null<CallExpr>(CEnter.getCallExpr()))
+ if (const Decl *CD = CE->getCalleeDecl())
+ AnalyzedCallees->insert(CD);
+ SubEng.processCallEnter(CEnter, Pred);
+ break;
+ }
- case ProgramPoint::CallExitKind:
- HandleCallExit(cast<CallExit>(Node->getLocation()), Node);
- break;
+ case ProgramPoint::CallExitKind:
+ SubEng.processCallExit(Pred);
+ break;
- default:
- assert(isa<PostStmt>(Node->getLocation()) ||
- isa<PostInitializer>(Node->getLocation()));
- HandlePostStmt(WU.getBlock(), WU.getIndex(), Node);
- break;
+ case ProgramPoint::EpsilonKind: {
+ assert(Pred->hasSinglePred() &&
+ "Assume epsilon has exactly one predecessor by construction");
+ ExplodedNode *PNode = Pred->getFirstPred();
+ dispatchWorkItem(Pred, PNode->getLocation(), WU);
+ break;
}
+ default:
+ assert(isa<PostStmt>(Loc) ||
+ isa<PostInitializer>(Loc));
+ HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
+ break;
}
-
- SubEng.processEndWorklist(hasWorkRemaining());
- return WList->hasWork();
}
-void CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
+bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
unsigned Steps,
- const ProgramState *InitState,
+ ProgramStateRef InitState,
ExplodedNodeSet &Dst) {
- ExecuteWorkList(L, Steps, InitState);
- for (SmallVectorImpl<ExplodedNode*>::iterator I = G->EndNodes.begin(),
- E = G->EndNodes.end(); I != E; ++I) {
+ bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
+ for (ExplodedGraph::eop_iterator I = G->eop_begin(),
+ E = G->eop_end(); I != E; ++I) {
Dst.Add(*I);
}
-}
-
-void CoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
- unsigned Index, ExplodedNode *Pred) {
- CallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(),
- L.getCalleeContext(), Block, Index);
- SubEng.processCallEnter(Builder);
-}
-
-void CoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) {
- CallExitNodeBuilder Builder(*this, Pred);
- SubEng.processCallExit(Builder);
+ return DidNotFinish;
}
void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
const CFGBlock *Blk = L.getDst();
+ NodeBuilderContext BuilderCtx(*this, Blk, Pred);
+
+ // Mark this block as visited.
+ const LocationContext *LC = Pred->getLocationContext();
+ FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(),
+ LC->getDecl(),
+ LC->getCFG()->getNumBlockIDs());
// Check if we are entering the EXIT block.
if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
@@ -269,53 +298,42 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
&& "EXIT block cannot contain Stmts.");
// Process the final state transition.
- EndOfFunctionNodeBuilder Builder(Blk, Pred, this);
- SubEng.processEndOfFunction(Builder);
+ SubEng.processEndOfFunction(BuilderCtx);
// This path is done. Don't enqueue any more nodes.
return;
}
- // Call into the subengine to process entering the CFGBlock.
+ // Call into the SubEngine to process entering the CFGBlock.
ExplodedNodeSet dstNodes;
BlockEntrance BE(Blk, Pred->getLocationContext());
- GenericNodeBuilder<BlockEntrance> nodeBuilder(*this, Pred, BE);
- SubEng.processCFGBlockEntrance(dstNodes, nodeBuilder);
+ NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
+ SubEng.processCFGBlockEntrance(L, nodeBuilder);
- if (dstNodes.empty()) {
- if (!nodeBuilder.hasGeneratedNode) {
- // Auto-generate a node and enqueue it to the worklist.
- generateNode(BE, Pred->State, Pred);
- }
- }
- else {
- for (ExplodedNodeSet::iterator I = dstNodes.begin(), E = dstNodes.end();
- I != E; ++I) {
- WList->enqueue(*I);
- }
+ // Auto-generate a node.
+ if (!nodeBuilder.hasGeneratedNodes()) {
+ nodeBuilder.generateNode(Pred->State, Pred);
}
- for (SmallVectorImpl<ExplodedNode*>::const_iterator
- I = nodeBuilder.sinks().begin(), E = nodeBuilder.sinks().end();
- I != E; ++I) {
- blocksExhausted.push_back(std::make_pair(L, *I));
- }
+ // Enqueue nodes onto the worklist.
+ enqueue(dstNodes);
}
void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
ExplodedNode *Pred) {
// Increment the block counter.
+ const LocationContext *LC = Pred->getLocationContext();
+ unsigned BlockId = L.getBlock()->getBlockID();
BlockCounter Counter = WList->getBlockCounter();
- Counter = BCounterFactory.IncrementCount(Counter,
- Pred->getLocationContext()->getCurrentStackFrame(),
- L.getBlock()->getBlockID());
+ Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(),
+ BlockId);
WList->setBlockCounter(Counter);
// Process the entrance of the block.
if (CFGElement E = L.getFirstElement()) {
- StmtNodeBuilder Builder(L.getBlock(), 0, Pred, this);
- SubEng.processCFGElement(E, Builder);
+ NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
+ SubEng.processCFGElement(E, Pred, 0, &Ctx);
}
else
HandleBlockExit(L.getBlock(), Pred);
@@ -345,6 +363,19 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
return;
+ case Stmt::CXXTryStmtClass: {
+ // Generate a node for each of the successors.
+ // Our logic for EH analysis can certainly be improved.
+ for (CFGBlock::const_succ_iterator it = B->succ_begin(),
+ et = B->succ_end(); it != et; ++it) {
+ if (const CFGBlock *succ = *it) {
+ generateNode(BlockEdge(B, succ, Pred->getLocationContext()),
+ Pred->State, Pred);
+ }
+ }
+ return;
+ }
+
case Stmt::DoStmtClass:
HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
return;
@@ -417,31 +448,35 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
const CFGBlock * B, ExplodedNode *Pred) {
assert(B->succ_size() == 2);
- BranchNodeBuilder Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
- Pred, this);
- SubEng.processBranch(Cond, Term, Builder);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processBranch(Cond, Term, Ctx, Pred, Dst,
+ *(B->succ_begin()), *(B->succ_begin()+1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
}
void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
ExplodedNode *Pred) {
- assert (!B->empty());
+ assert(B);
+ assert(!B->empty());
if (StmtIdx == B->size())
HandleBlockExit(B, Pred);
else {
- StmtNodeBuilder Builder(B, StmtIdx, Pred, this);
- SubEng.processCFGElement((*B)[StmtIdx], Builder);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
}
}
/// generateNode - Utility method to generate nodes, hook up successors,
/// and add nodes to the worklist.
void CoreEngine::generateNode(const ProgramPoint &Loc,
- const ProgramState *State,
+ ProgramStateRef State,
ExplodedNode *Pred) {
bool IsNew;
- ExplodedNode *Node = G->getNode(Loc, State, &IsNew);
+ ExplodedNode *Node = G->getNode(Loc, State, false, &IsNew);
if (Pred)
Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor.
@@ -454,225 +489,181 @@ void CoreEngine::generateNode(const ProgramPoint &Loc,
if (IsNew) WList->enqueue(Node);
}
-ExplodedNode *
-GenericNodeBuilderImpl::generateNodeImpl(const ProgramState *state,
- ExplodedNode *pred,
- ProgramPoint programPoint,
- bool asSink) {
-
- hasGeneratedNode = true;
- bool isNew;
- ExplodedNode *node = engine.getGraph().getNode(programPoint, state, &isNew);
- if (pred)
- node->addPredecessor(pred, engine.getGraph());
- if (isNew) {
- if (asSink) {
- node->markAsSink();
- sinksGenerated.push_back(node);
- }
- return node;
- }
- return 0;
-}
-
-StmtNodeBuilder::StmtNodeBuilder(const CFGBlock *b,
- unsigned idx,
- ExplodedNode *N,
- CoreEngine* e)
- : Eng(*e), B(*b), Idx(idx), Pred(N),
- PurgingDeadSymbols(false), BuildSinks(false), hasGeneratedNode(false),
- PointKind(ProgramPoint::PostStmtKind), Tag(0) {
- Deferred.insert(N);
-}
-
-StmtNodeBuilder::~StmtNodeBuilder() {
- for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
- if (!(*I)->isSink())
- GenerateAutoTransition(*I);
-}
-
-void StmtNodeBuilder::GenerateAutoTransition(ExplodedNode *N) {
+void CoreEngine::enqueueStmtNode(ExplodedNode *N,
+ const CFGBlock *Block, unsigned Idx) {
+ assert(Block);
assert (!N->isSink());
// Check if this node entered a callee.
if (isa<CallEnter>(N->getLocation())) {
// Still use the index of the CallExpr. It's needed to create the callee
// StackFrameContext.
- Eng.WList->enqueue(N, &B, Idx);
+ WList->enqueue(N, Block, Idx);
return;
}
// Do not create extra nodes. Move to the next CFG element.
if (isa<PostInitializer>(N->getLocation())) {
- Eng.WList->enqueue(N, &B, Idx+1);
+ WList->enqueue(N, Block, Idx+1);
return;
}
- PostStmt Loc(getStmt(), N->getLocationContext());
+ if (isa<EpsilonPoint>(N->getLocation())) {
+ WList->enqueue(N, Block, Idx);
+ return;
+ }
+
+ const CFGStmt *CS = (*Block)[Idx].getAs<CFGStmt>();
+ const Stmt *St = CS ? CS->getStmt() : 0;
+ PostStmt Loc(St, N->getLocationContext());
if (Loc == N->getLocation()) {
// Note: 'N' should be a fresh node because otherwise it shouldn't be
// a member of Deferred.
- Eng.WList->enqueue(N, &B, Idx+1);
+ WList->enqueue(N, Block, Idx+1);
return;
}
bool IsNew;
- ExplodedNode *Succ = Eng.G->getNode(Loc, N->State, &IsNew);
- Succ->addPredecessor(N, *Eng.G);
+ ExplodedNode *Succ = G->getNode(Loc, N->getState(), false, &IsNew);
+ Succ->addPredecessor(N, *G);
if (IsNew)
- Eng.WList->enqueue(Succ, &B, Idx+1);
+ WList->enqueue(Succ, Block, Idx+1);
}
-ExplodedNode *StmtNodeBuilder::MakeNode(ExplodedNodeSet &Dst,
- const Stmt *S,
- ExplodedNode *Pred,
- const ProgramState *St,
- ProgramPoint::Kind K) {
-
- ExplodedNode *N = generateNode(S, St, Pred, K);
+ExplodedNode *CoreEngine::generateCallExitNode(ExplodedNode *N) {
+ // Create a CallExit node and enqueue it.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(N->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
- if (N) {
- if (BuildSinks)
- N->markAsSink();
- else
- Dst.Add(N);
- }
-
- return N;
-}
+ // Use the the callee location context.
+ CallExit Loc(CE, LocCtx);
-ExplodedNode*
-StmtNodeBuilder::generateNodeInternal(const Stmt *S,
- const ProgramState *state,
- ExplodedNode *Pred,
- ProgramPoint::Kind K,
- const ProgramPointTag *tag) {
-
- const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
- Pred->getLocationContext(), tag);
- return generateNodeInternal(L, state, Pred);
+ bool isNew;
+ ExplodedNode *Node = G->getNode(Loc, N->getState(), false, &isNew);
+ Node->addPredecessor(N, *G);
+ return isNew ? Node : 0;
}
-ExplodedNode*
-StmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
- const ProgramState *State,
- ExplodedNode *Pred) {
- bool IsNew;
- ExplodedNode *N = Eng.G->getNode(Loc, State, &IsNew);
- N->addPredecessor(Pred, *Eng.G);
- Deferred.erase(Pred);
- if (IsNew) {
- Deferred.insert(N);
- return N;
+void CoreEngine::enqueue(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ WList->enqueue(*I);
}
+}
- return NULL;
+void CoreEngine::enqueue(ExplodedNodeSet &Set,
+ const CFGBlock *Block, unsigned Idx) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ enqueueStmtNode(*I, Block, Idx);
+ }
}
-// This function generate a new ExplodedNode but not a new branch(block edge).
-ExplodedNode *BranchNodeBuilder::generateNode(const Stmt *Condition,
- const ProgramState *State) {
- bool IsNew;
-
- ExplodedNode *Succ
- = Eng.G->getNode(PostCondition(Condition, Pred->getLocationContext()), State,
- &IsNew);
-
- Succ->addPredecessor(Pred, *Eng.G);
-
- Pred = Succ;
-
- if (IsNew)
- return Succ;
-
- return NULL;
+void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
+ ExplodedNode *N = *I;
+ // If we are in an inlined call, generate CallExit node.
+ if (N->getLocationContext()->getParent()) {
+ N = generateCallExitNode(N);
+ if (N)
+ WList->enqueue(N);
+ } else {
+ G->addEndOfPath(N);
+ NumPathsExplored++;
+ }
+ }
}
-ExplodedNode *BranchNodeBuilder::generateNode(const ProgramState *State,
- bool branch) {
- // If the branch has been marked infeasible we should not generate a node.
- if (!isFeasible(branch))
- return NULL;
+void NodeBuilder::anchor() { }
+ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *FromN,
+ bool MarkAsSink) {
+ HasGeneratedNodes = true;
bool IsNew;
+ ExplodedNode *N = C.Eng.G->getNode(Loc, State, MarkAsSink, &IsNew);
+ N->addPredecessor(FromN, *C.Eng.G);
+ Frontier.erase(FromN);
- ExplodedNode *Succ =
- Eng.G->getNode(BlockEdge(Src,branch ? DstT:DstF,Pred->getLocationContext()),
- State, &IsNew);
+ if (!IsNew)
+ return 0;
- Succ->addPredecessor(Pred, *Eng.G);
+ if (!MarkAsSink)
+ Frontier.Add(N);
- if (branch)
- GeneratedTrue = true;
- else
- GeneratedFalse = true;
+ return N;
+}
- if (IsNew) {
- Deferred.push_back(Succ);
- return Succ;
- }
+void NodeBuilderWithSinks::anchor() { }
- return NULL;
+StmtNodeBuilder::~StmtNodeBuilder() {
+ if (EnclosingBldr)
+ for (ExplodedNodeSet::iterator I = Frontier.begin(),
+ E = Frontier.end(); I != E; ++I )
+ EnclosingBldr->addNodes(*I);
}
-BranchNodeBuilder::~BranchNodeBuilder() {
- if (!GeneratedTrue) generateNode(Pred->State, true);
- if (!GeneratedFalse) generateNode(Pred->State, false);
+void BranchNodeBuilder::anchor() { }
- for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
- if (!(*I)->isSink()) Eng.WList->enqueue(*I);
-}
+ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
+ bool branch,
+ ExplodedNode *NodePred) {
+ // If the branch has been marked infeasible we should not generate a node.
+ if (!isFeasible(branch))
+ return NULL;
+ ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF,
+ NodePred->getLocationContext());
+ ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred);
+ return Succ;
+}
ExplodedNode*
IndirectGotoNodeBuilder::generateNode(const iterator &I,
- const ProgramState *St,
- bool isSink) {
+ ProgramStateRef St,
+ bool IsSink) {
bool IsNew;
-
ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
- Pred->getLocationContext()), St, &IsNew);
-
+ Pred->getLocationContext()), St,
+ IsSink, &IsNew);
Succ->addPredecessor(Pred, *Eng.G);
- if (IsNew) {
+ if (!IsNew)
+ return 0;
- if (isSink)
- Succ->markAsSink();
- else
- Eng.WList->enqueue(Succ);
-
- return Succ;
- }
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
- return NULL;
+ return Succ;
}
ExplodedNode*
SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
- const ProgramState *St) {
+ ProgramStateRef St) {
bool IsNew;
ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
- Pred->getLocationContext()),
- St, &IsNew);
+ Pred->getLocationContext()), St,
+ false, &IsNew);
Succ->addPredecessor(Pred, *Eng.G);
- if (IsNew) {
- Eng.WList->enqueue(Succ);
- return Succ;
- }
- return NULL;
+ if (!IsNew)
+ return 0;
+
+ Eng.WList->enqueue(Succ);
+ return Succ;
}
ExplodedNode*
-SwitchNodeBuilder::generateDefaultCaseNode(const ProgramState *St,
- bool isSink) {
+SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
+ bool IsSink) {
// Get the block for the default case.
assert(Src->succ_rbegin() != Src->succ_rend());
CFGBlock *DefaultBlock = *Src->succ_rbegin();
@@ -683,145 +674,16 @@ SwitchNodeBuilder::generateDefaultCaseNode(const ProgramState *St,
return NULL;
bool IsNew;
-
ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
- Pred->getLocationContext()), St, &IsNew);
+ Pred->getLocationContext()), St,
+ IsSink, &IsNew);
Succ->addPredecessor(Pred, *Eng.G);
- if (IsNew) {
- if (isSink)
- Succ->markAsSink();
- else
- Eng.WList->enqueue(Succ);
-
- return Succ;
- }
-
- return NULL;
-}
-
-EndOfFunctionNodeBuilder::~EndOfFunctionNodeBuilder() {
- // Auto-generate an EOP node if one has not been generated.
- if (!hasGeneratedNode) {
- // If we are in an inlined call, generate CallExit node.
- if (Pred->getLocationContext()->getParent())
- GenerateCallExitNode(Pred->State);
- else
- generateNode(Pred->State);
- }
-}
-
-ExplodedNode*
-EndOfFunctionNodeBuilder::generateNode(const ProgramState *State,
- ExplodedNode *P,
- const ProgramPointTag *tag) {
- hasGeneratedNode = true;
- bool IsNew;
-
- ExplodedNode *Node = Eng.G->getNode(BlockEntrance(&B,
- Pred->getLocationContext(), tag ? tag : Tag),
- State, &IsNew);
-
- Node->addPredecessor(P ? P : Pred, *Eng.G);
-
- if (IsNew) {
- Eng.G->addEndOfPath(Node);
- return Node;
- }
-
- return NULL;
-}
-
-void EndOfFunctionNodeBuilder::GenerateCallExitNode(const ProgramState *state) {
- hasGeneratedNode = true;
- // Create a CallExit node and enqueue it.
- const StackFrameContext *LocCtx
- = cast<StackFrameContext>(Pred->getLocationContext());
- const Stmt *CE = LocCtx->getCallSite();
+ if (!IsNew)
+ return 0;
- // Use the the callee location context.
- CallExit Loc(CE, LocCtx);
-
- bool isNew;
- ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
- Node->addPredecessor(Pred, *Eng.G);
-
- if (isNew)
- Eng.WList->enqueue(Node);
-}
-
-
-void CallEnterNodeBuilder::generateNode(const ProgramState *state) {
- // Check if the callee is in the same translation unit.
- if (CalleeCtx->getTranslationUnit() !=
- Pred->getLocationContext()->getTranslationUnit()) {
- // Create a new engine. We must be careful that the new engine should not
- // reference data structures owned by the old engine.
-
- AnalysisManager &OldMgr = Eng.SubEng.getAnalysisManager();
-
- // Get the callee's translation unit.
- idx::TranslationUnit *TU = CalleeCtx->getTranslationUnit();
-
- // Create a new AnalysisManager with components of the callee's
- // TranslationUnit.
- // The Diagnostic is actually shared when we create ASTUnits from AST files.
- AnalysisManager AMgr(TU->getASTContext(), TU->getDiagnostic(), OldMgr);
-
- // Create the new engine.
- // FIXME: This cast isn't really safe.
- bool GCEnabled = static_cast<ExprEngine&>(Eng.SubEng).isObjCGCEnabled();
- ExprEngine NewEng(AMgr, GCEnabled);
-
- // Create the new LocationContext.
- AnalysisContext *NewAnaCtx = AMgr.getAnalysisContext(CalleeCtx->getDecl(),
- CalleeCtx->getTranslationUnit());
- const StackFrameContext *OldLocCtx = CalleeCtx;
- const StackFrameContext *NewLocCtx = AMgr.getStackFrame(NewAnaCtx,
- OldLocCtx->getParent(),
- OldLocCtx->getCallSite(),
- OldLocCtx->getCallSiteBlock(),
- OldLocCtx->getIndex());
-
- // Now create an initial state for the new engine.
- const ProgramState *NewState =
- NewEng.getStateManager().MarshalState(state, NewLocCtx);
- ExplodedNodeSet ReturnNodes;
- NewEng.ExecuteWorkListWithInitialState(NewLocCtx, AMgr.getMaxNodes(),
- NewState, ReturnNodes);
- return;
- }
-
- // Get the callee entry block.
- const CFGBlock *Entry = &(CalleeCtx->getCFG()->getEntry());
- assert(Entry->empty());
- assert(Entry->succ_size() == 1);
-
- // Get the solitary successor.
- const CFGBlock *SuccB = *(Entry->succ_begin());
-
- // Construct an edge representing the starting location in the callee.
- BlockEdge Loc(Entry, SuccB, CalleeCtx);
-
- bool isNew;
- ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
- Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
-
- if (isNew)
- Eng.WList->enqueue(Node);
-}
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
-void CallExitNodeBuilder::generateNode(const ProgramState *state) {
- // Get the callee's location context.
- const StackFrameContext *LocCtx
- = cast<StackFrameContext>(Pred->getLocationContext());
- // When exiting an implicit automatic obj dtor call, the callsite is the Stmt
- // that triggers the dtor.
- PostStmt Loc(LocCtx->getCallSite(), LocCtx->getParent());
- bool isNew;
- ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
- Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
- if (isNew)
- Eng.WList->enqueue(Node, LocCtx->getCallSiteBlock(),
- LocCtx->getIndex() + 1);
+ return Succ;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
index e1b982c..b5ea3db 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
@@ -19,7 +20,7 @@
using namespace clang;
using namespace ento;
-SVal Environment::lookupExpr(const Stmt *E) const {
+SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
const SVal* X = ExprBindings.lookup(E);
if (X) {
SVal V = *X;
@@ -28,17 +29,21 @@ SVal Environment::lookupExpr(const Stmt *E) const {
return UnknownVal();
}
-SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
- bool useOnlyDirectBindings) const {
+SVal Environment::getSVal(const EnvironmentEntry &Entry,
+ SValBuilder& svalBuilder,
+ bool useOnlyDirectBindings) const {
if (useOnlyDirectBindings) {
// This branch is rarely taken, but can be exercised by
// checkers that explicitly bind values to arbitrary
// expressions. It is crucial that we do not ignore any
// expression here, and do a direct lookup.
- return lookupExpr(E);
+ return lookupExpr(Entry);
}
+ const Stmt *E = Entry.getStmt();
+ const LocationContext *LCtx = Entry.getLocationContext();
+
for (;;) {
if (const Expr *Ex = dyn_cast<Expr>(E))
E = Ex->IgnoreParens();
@@ -55,13 +60,12 @@ SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
case Stmt::GenericSelectionExprClass:
llvm_unreachable("ParenExprs and GenericSelectionExprs should "
"have been handled by IgnoreParens()");
- return UnknownVal();
case Stmt::CharacterLiteralClass: {
const CharacterLiteral* C = cast<CharacterLiteral>(E);
return svalBuilder.makeIntVal(C->getValue(), C->getType());
}
case Stmt::CXXBoolLiteralExprClass: {
- const SVal *X = ExprBindings.lookup(E);
+ const SVal *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
if (X)
return *X;
else
@@ -69,12 +73,15 @@ SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
}
case Stmt::IntegerLiteralClass: {
// In C++, this expression may have been bound to a temporary object.
- SVal const *X = ExprBindings.lookup(E);
+ SVal const *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
if (X)
return *X;
else
return svalBuilder.makeIntVal(cast<IntegerLiteral>(E));
}
+ case Stmt::ObjCBoolLiteralExprClass:
+ return svalBuilder.makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
+
// For special C0xx nullptr case, make a null pointer SVal.
case Stmt::CXXNullPtrLiteralExprClass:
return svalBuilder.makeNull();
@@ -86,6 +93,24 @@ SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
continue;
case Stmt::ObjCPropertyRefExprClass:
return loc::ObjCPropRef(cast<ObjCPropertyRefExpr>(E));
+ case Stmt::ObjCStringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+ return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL));
+ }
+ case Stmt::StringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const StringLiteral *SL = cast<StringLiteral>(E);
+ return svalBuilder.makeLoc(MRMgr.getStringRegion(SL));
+ }
+ case Stmt::ReturnStmtClass: {
+ const ReturnStmt *RS = cast<ReturnStmt>(E);
+ if (const Expr *RE = RS->getRetValue()) {
+ E = RE;
+ continue;
+ }
+ return UndefinedVal();
+ }
// Handle all other Stmt* using a lookup.
default:
@@ -93,32 +118,33 @@ SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
};
break;
}
- return lookupExpr(E);
+ return lookupExpr(EnvironmentEntry(E, LCtx));
}
-Environment EnvironmentManager::bindExpr(Environment Env, const Stmt *S,
- SVal V, bool Invalidate) {
- assert(S);
-
+Environment EnvironmentManager::bindExpr(Environment Env,
+ const EnvironmentEntry &E,
+ SVal V,
+ bool Invalidate) {
if (V.isUnknown()) {
if (Invalidate)
- return Environment(F.remove(Env.ExprBindings, S));
+ return Environment(F.remove(Env.ExprBindings, E));
else
return Env;
}
-
- return Environment(F.add(Env.ExprBindings, S, V));
+ return Environment(F.add(Env.ExprBindings, E, V));
}
-static inline const Stmt *MakeLocation(const Stmt *S) {
- return (const Stmt*) (((uintptr_t) S) | 0x1);
+static inline EnvironmentEntry MakeLocation(const EnvironmentEntry &E) {
+ const Stmt *S = E.getStmt();
+ S = (const Stmt*) (((uintptr_t) S) | 0x1);
+ return EnvironmentEntry(S, E.getLocationContext());
}
Environment EnvironmentManager::bindExprAndLocation(Environment Env,
- const Stmt *S,
+ const EnvironmentEntry &E,
SVal location, SVal V) {
- return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(S), location),
- S, V));
+ return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(E), location),
+ E, V));
}
namespace {
@@ -126,14 +152,22 @@ class MarkLiveCallback : public SymbolVisitor {
SymbolReaper &SymReaper;
public:
MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
- bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; }
+ bool VisitSymbol(SymbolRef sym) {
+ SymReaper.markLive(sym);
+ return true;
+ }
+ bool VisitMemRegion(const MemRegion *R) {
+ SymReaper.markLive(R);
+ return true;
+ }
};
} // end anonymous namespace
-// In addition to mapping from Stmt * - > SVals in the Environment, we also
-// maintain a mapping from Stmt * -> SVals (locations) that were used during
-// a load and store.
-static inline bool IsLocation(const Stmt *S) {
+// In addition to mapping from EnvironmentEntry - > SVals in the Environment,
+// we also maintain a mapping from EnvironmentEntry -> SVals (locations)
+// that were used during a load and store.
+static inline bool IsLocation(const EnvironmentEntry &E) {
+ const Stmt *S = E.getStmt();
return (bool) (((uintptr_t) S) & 0x1);
}
@@ -147,19 +181,19 @@ static inline bool IsLocation(const Stmt *S) {
Environment
EnvironmentManager::removeDeadBindings(Environment Env,
SymbolReaper &SymReaper,
- const ProgramState *ST) {
+ ProgramStateRef ST) {
// We construct a new Environment object entirely, as this is cheaper than
// individually removing all the subexpression bindings (which will greatly
// outnumber block-level expression bindings).
Environment NewEnv = getInitialEnvironment();
- SmallVector<std::pair<const Stmt*, SVal>, 10> deferredLocations;
+ SmallVector<std::pair<EnvironmentEntry, SVal>, 10> deferredLocations;
MarkLiveCallback CB(SymReaper);
ScanReachableSymbols RSScaner(ST, CB);
- llvm::ImmutableMapRef<const Stmt*,SVal>
+ llvm::ImmutableMapRef<EnvironmentEntry,SVal>
EBMapRef(NewEnv.ExprBindings.getRootWithoutRetain(),
F.getTreeFactory());
@@ -167,7 +201,7 @@ EnvironmentManager::removeDeadBindings(Environment Env,
for (Environment::iterator I = Env.begin(), E = Env.end();
I != E; ++I) {
- const Stmt *BlkExpr = I.getKey();
+ const EnvironmentEntry &BlkExpr = I.getKey();
// For recorded locations (used when evaluating loads and stores), we
// consider them live only when their associated normal expression is
// also live.
@@ -179,7 +213,7 @@ EnvironmentManager::removeDeadBindings(Environment Env,
}
const SVal &X = I.getData();
- if (SymReaper.isLive(BlkExpr)) {
+ if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
// Copy the binding to the new map.
EBMapRef = EBMapRef.add(BlkExpr, X);
@@ -204,13 +238,58 @@ EnvironmentManager::removeDeadBindings(Environment Env,
// Go through he deferred locations and add them to the new environment if
// the correspond Stmt* is in the map as well.
- for (SmallVectorImpl<std::pair<const Stmt*, SVal> >::iterator
+ for (SmallVectorImpl<std::pair<EnvironmentEntry, SVal> >::iterator
I = deferredLocations.begin(), E = deferredLocations.end(); I != E; ++I) {
- const Stmt *S = (Stmt*) (((uintptr_t) I->first) & (uintptr_t) ~0x1);
- if (EBMapRef.lookup(S))
- EBMapRef = EBMapRef.add(I->first, I->second);
+ const EnvironmentEntry &En = I->first;
+ const Stmt *S = (Stmt*) (((uintptr_t) En.getStmt()) & (uintptr_t) ~0x1);
+ if (EBMapRef.lookup(EnvironmentEntry(S, En.getLocationContext())))
+ EBMapRef = EBMapRef.add(En, I->second);
}
NewEnv.ExprBindings = EBMapRef.asImmutableMap();
return NewEnv;
}
+
+void Environment::print(raw_ostream &Out, const char *NL,
+ const char *Sep) const {
+ printAux(Out, false, NL, Sep);
+ printAux(Out, true, NL, Sep);
+}
+
+void Environment::printAux(raw_ostream &Out, bool printLocations,
+ const char *NL,
+ const char *Sep) const{
+
+ bool isFirst = true;
+
+ for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
+ const EnvironmentEntry &En = I.getKey();
+ if (IsLocation(En)) {
+ if (!printLocations)
+ continue;
+ }
+ else {
+ if (printLocations)
+ continue;
+ }
+
+ if (isFirst) {
+ Out << NL << NL
+ << (printLocations ? "Load/Store locations:" : "Expressions:")
+ << NL;
+ isFirst = false;
+ } else {
+ Out << NL;
+ }
+
+ const Stmt *S = En.getStmt();
+ if (printLocations) {
+ S = (Stmt*) (((uintptr_t) S) & ((uintptr_t) ~0x1));
+ }
+
+ Out << " (" << (void*) En.getLocationContext() << ',' << (void*) S << ") ";
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, 0, PrintingPolicy(LO));
+ Out << " : " << I.getData();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 5762a21..0dcbe1f 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -15,6 +15,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/Stmt.h"
+#include "clang/AST/ParentMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -44,25 +45,18 @@ void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
// Cleanup.
//===----------------------------------------------------------------------===//
-typedef std::vector<ExplodedNode*> NodeList;
-static inline NodeList*& getNodeList(void *&p) { return (NodeList*&) p; }
+static const unsigned CounterTop = 1000;
-ExplodedGraph::~ExplodedGraph() {
- if (reclaimNodes) {
- delete getNodeList(recentlyAllocatedNodes);
- delete getNodeList(freeNodes);
- }
-}
+ExplodedGraph::ExplodedGraph()
+ : NumNodes(0), reclaimNodes(false), reclaimCounter(CounterTop) {}
+
+ExplodedGraph::~ExplodedGraph() {}
//===----------------------------------------------------------------------===//
// Node reclamation.
//===----------------------------------------------------------------------===//
-void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
- if (!recentlyAllocatedNodes)
- return;
- NodeList &nl = *getNodeList(recentlyAllocatedNodes);
-
+bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Reclaimn all nodes that match *all* the following criteria:
//
// (1) 1 predecessor (that has one successor)
@@ -72,61 +66,86 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
// (5) The 'store' is the same as the predecessor.
// (6) The 'GDM' is the same as the predecessor.
// (7) The LocationContext is the same as the predecessor.
- // (8) The PostStmt is for a non-CFGElement expression.
-
- for (NodeList::iterator i = nl.begin(), e = nl.end() ; i != e; ++i) {
- ExplodedNode *node = *i;
-
- // Conditions 1 and 2.
- if (node->pred_size() != 1 || node->succ_size() != 1)
- continue;
+ // (8) The PostStmt is for a non-consumed Stmt or Expr.
- ExplodedNode *pred = *(node->pred_begin());
- if (pred->succ_size() != 1)
- continue;
+ // Conditions 1 and 2.
+ if (node->pred_size() != 1 || node->succ_size() != 1)
+ return false;
- ExplodedNode *succ = *(node->succ_begin());
- if (succ->pred_size() != 1)
- continue;
+ const ExplodedNode *pred = *(node->pred_begin());
+ if (pred->succ_size() != 1)
+ return false;
+
+ const ExplodedNode *succ = *(node->succ_begin());
+ if (succ->pred_size() != 1)
+ return false;
+
+ // Condition 3.
+ ProgramPoint progPoint = node->getLocation();
+ if (!isa<PostStmt>(progPoint) ||
+ (isa<CallEnter>(progPoint) || isa<CallExit>(progPoint)))
+ return false;
+
+ // Condition 4.
+ PostStmt ps = cast<PostStmt>(progPoint);
+ if (ps.getTag())
+ return false;
+
+ if (isa<BinaryOperator>(ps.getStmt()))
+ return false;
+
+ // Conditions 5, 6, and 7.
+ ProgramStateRef state = node->getState();
+ ProgramStateRef pred_state = pred->getState();
+ if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
+ progPoint.getLocationContext() != pred->getLocationContext())
+ return false;
+
+ // Condition 8.
+ if (const Expr *Ex = dyn_cast<Expr>(ps.getStmt())) {
+ ParentMap &PM = progPoint.getLocationContext()->getParentMap();
+ if (!PM.isConsumedExpr(Ex))
+ return false;
+ }
+
+ return true;
+}
- // Condition 3.
- ProgramPoint progPoint = node->getLocation();
- if (!isa<PostStmt>(progPoint))
- continue;
- // Condition 4.
- PostStmt ps = cast<PostStmt>(progPoint);
- if (ps.getTag())
- continue;
+void ExplodedGraph::collectNode(ExplodedNode *node) {
+ // Removing a node means:
+ // (a) changing the predecessors successor to the successor of this node
+ // (b) changing the successors predecessor to the predecessor of this node
+ // (c) Putting 'node' onto freeNodes.
+ assert(node->pred_size() == 1 || node->succ_size() == 1);
+ ExplodedNode *pred = *(node->pred_begin());
+ ExplodedNode *succ = *(node->succ_begin());
+ pred->replaceSuccessor(succ);
+ succ->replacePredecessor(pred);
+ FreeNodes.push_back(node);
+ Nodes.RemoveNode(node);
+ --NumNodes;
+ node->~ExplodedNode();
+}
- if (isa<BinaryOperator>(ps.getStmt()))
- continue;
+void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
+ if (ChangedNodes.empty())
+ return;
- // Conditions 5, 6, and 7.
- const ProgramState *state = node->getState();
- const ProgramState *pred_state = pred->getState();
- if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
- progPoint.getLocationContext() != pred->getLocationContext())
- continue;
+ // Only periodically relcaim nodes so that we can build up a set of
+ // nodes that meet the reclamation criteria. Freshly created nodes
+ // by definition have no successor, and thus cannot be reclaimed (see below).
+ assert(reclaimCounter > 0);
+ if (--reclaimCounter != 0)
+ return;
+ reclaimCounter = CounterTop;
- // Condition 8.
- if (node->getCFG().isBlkExpr(ps.getStmt()))
- continue;
-
- // If we reach here, we can remove the node. This means:
- // (a) changing the predecessors successor to the successor of this node
- // (b) changing the successors predecessor to the predecessor of this node
- // (c) Putting 'node' onto freeNodes.
- pred->replaceSuccessor(succ);
- succ->replacePredecessor(pred);
- if (!freeNodes)
- freeNodes = new NodeList();
- getNodeList(freeNodes)->push_back(node);
- Nodes.RemoveNode(node);
- --NumNodes;
- node->~ExplodedNode();
+ for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
+ it != et; ++it) {
+ ExplodedNode *node = *it;
+ if (shouldCollect(node))
+ collectNode(node);
}
-
- nl.clear();
+ ChangedNodes.clear();
}
//===----------------------------------------------------------------------===//
@@ -215,36 +234,33 @@ ExplodedNode** ExplodedNode::NodeGroup::end() const {
}
ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
- const ProgramState *State, bool* IsNew) {
+ ProgramStateRef State,
+ bool IsSink,
+ bool* IsNew) {
// Profile 'State' to determine if we already have an existing node.
llvm::FoldingSetNodeID profile;
void *InsertPos = 0;
- NodeTy::Profile(profile, L, State);
+ NodeTy::Profile(profile, L, State, IsSink);
NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
if (!V) {
- if (freeNodes && !getNodeList(freeNodes)->empty()) {
- NodeList *nl = getNodeList(freeNodes);
- V = nl->back();
- nl->pop_back();
+ if (!FreeNodes.empty()) {
+ V = FreeNodes.back();
+ FreeNodes.pop_back();
}
else {
// Allocate a new node.
V = (NodeTy*) getAllocator().Allocate<NodeTy>();
}
- new (V) NodeTy(L, State);
+ new (V) NodeTy(L, State, IsSink);
- if (reclaimNodes) {
- if (!recentlyAllocatedNodes)
- recentlyAllocatedNodes = new NodeList();
- getNodeList(recentlyAllocatedNodes)->push_back(V);
- }
+ if (reclaimNodes)
+ ChangedNodes.push_back(V);
// Insert the node into the node set and return it.
Nodes.InsertNode(V, InsertPos);
-
++NumNodes;
if (IsNew) *IsNew = true;
@@ -265,7 +281,7 @@ ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
assert (NBeg < NEnd);
- llvm::OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
+ OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
@@ -334,7 +350,7 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
// Create the corresponding node in the new graph and record the mapping
// from the old node to the new node.
- ExplodedNode *NewN = G->getNode(N->getLocation(), N->State, NULL);
+ ExplodedNode *NewN = G->getNode(N->getLocation(), N->State, N->isSink(), 0);
Pass2[N] = NewN;
// Also record the reverse mapping from the new node to the old node.
@@ -372,15 +388,13 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
if (Pass1.count(*I))
WL2.push_back(*I);
}
-
- // Finally, explicitly mark all nodes without any successors as sinks.
- if (N->isSink())
- NewN->markAsSink();
}
return G;
}
+void InterExplodedGraphMap::anchor() { }
+
ExplodedNode*
InterExplodedGraphMap::getMappedNode(const ExplodedNode *N) const {
llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::const_iterator I =
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index ac9cf0b..d2da9aa 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -13,22 +13,24 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "ExprEngine"
+
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngineBuilders.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/AST/DeclCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/SourceManager.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/Statistic.h"
#ifndef NDEBUG
#include "llvm/Support/GraphWriter.h"
@@ -38,6 +40,19 @@ using namespace clang;
using namespace ento;
using llvm::APSInt;
+STATISTIC(NumRemoveDeadBindings,
+ "The # of times RemoveDeadBindings is called");
+STATISTIC(NumRemoveDeadBindingsSkipped,
+ "The # of times RemoveDeadBindings is skipped");
+STATISTIC(NumMaxBlockCountReached,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "a top level function");
+STATISTIC(NumMaxBlockCountReachedInInlined,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "an inlined function");
+STATISTIC(NumTimesRetriedWithoutInlining,
+ "The # of times we re-evaluated a call without inlining");
+
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
@@ -51,17 +66,20 @@ static inline Selector GetNullarySelector(const char* name, ASTContext &Ctx) {
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
-ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled)
+ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS)
: AMgr(mgr),
- Engine(*this),
+ AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ Engine(*this, VisitedCallees, FS),
G(Engine.getGraph()),
- Builder(NULL),
StateMgr(getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator(),
*this),
SymMgr(StateMgr.getSymbolManager()),
svalBuilder(StateMgr.getSValBuilder()),
- EntryNode(NULL), currentStmt(NULL),
+ EntryNode(NULL),
+ currentStmt(NULL), currentStmtIdx(0), currentBuilderContext(0),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
RaiseSel(GetNullarySelector("raise", getContext())),
ObjCGCEnabled(gcEnabled), BR(mgr, *this) {
@@ -81,15 +99,15 @@ ExprEngine::~ExprEngine() {
// Utility methods.
//===----------------------------------------------------------------------===//
-const ProgramState *ExprEngine::getInitialState(const LocationContext *InitLoc) {
- const ProgramState *state = StateMgr.getInitialState(InitLoc);
+ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
+ ProgramStateRef state = StateMgr.getInitialState(InitLoc);
+ const Decl *D = InitLoc->getDecl();
// Preconditions.
-
// FIXME: It would be nice if we had a more general mechanism to add
// such preconditions. Some day.
do {
- const Decl *D = InitLoc->getDecl();
+
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// Precondition: the first argument of 'main' is an integer guaranteed
// to be > 0.
@@ -117,49 +135,45 @@ const ProgramState *ExprEngine::getInitialState(const LocationContext *InitLoc)
if (!Constraint)
break;
- if (const ProgramState *newState = state->assume(*Constraint, true))
+ if (ProgramStateRef newState = state->assume(*Constraint, true))
state = newState;
-
- break;
}
-
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- // Precondition: 'self' is always non-null upon entry to an Objective-C
- // method.
- const ImplicitParamDecl *SelfD = MD->getSelfDecl();
- const MemRegion *R = state->getRegion(SelfD, InitLoc);
- SVal V = state->getSVal(loc::MemRegionVal(R));
-
- if (const Loc *LV = dyn_cast<Loc>(&V)) {
- // Assume that the pointer value in 'self' is non-null.
- state = state->assume(*LV, true);
- assert(state && "'self' cannot be null");
- }
+ break;
+ }
+ while (0);
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // Precondition: 'self' is always non-null upon entry to an Objective-C
+ // method.
+ const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+ const MemRegion *R = state->getRegion(SelfD, InitLoc);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ // Assume that the pointer value in 'self' is non-null.
+ state = state->assume(*LV, true);
+ assert(state && "'self' cannot be null");
}
- } while (0);
-
- return state;
-}
+ }
-bool
-ExprEngine::doesInvalidateGlobals(const CallOrObjCMessage &callOrMessage) const
-{
- if (callOrMessage.isFunctionCall() && !callOrMessage.isCXXCall()) {
- SVal calleeV = callOrMessage.getFunctionCallee();
- if (const FunctionTextRegion *codeR =
- dyn_cast_or_null<FunctionTextRegion>(calleeV.getAsRegion())) {
-
- const FunctionDecl *fd = codeR->getDecl();
- if (const IdentifierInfo *ii = fd->getIdentifier()) {
- StringRef fname = ii->getName();
- if (fname == "strlen")
- return false;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (!MD->isStatic()) {
+ // Precondition: 'this' is always non-null upon entry to the
+ // top-level function. This is our starting assumption for
+ // analyzing an "open" program.
+ const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
+ if (SFC->getParent() == 0) {
+ loc::MemRegionVal L(getCXXThisRegion(MD, SFC));
+ SVal V = state->getSVal(L);
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ state = state->assume(*LV, true);
+ assert(state && "'this' cannot be null");
+ }
}
}
}
-
- // The conservative answer: invalidates globals.
- return true;
+
+ return state;
}
//===----------------------------------------------------------------------===//
@@ -168,25 +182,26 @@ ExprEngine::doesInvalidateGlobals(const CallOrObjCMessage &callOrMessage) const
/// evalAssume - Called by ConstraintManager. Used to call checker-specific
/// logic for handling assumptions on symbolic values.
-const ProgramState *ExprEngine::processAssume(const ProgramState *state,
+ProgramStateRef ExprEngine::processAssume(ProgramStateRef state,
SVal cond, bool assumption) {
return getCheckerManager().runCheckersForEvalAssume(state, cond, assumption);
}
-bool ExprEngine::wantsRegionChangeUpdate(const ProgramState *state) {
+bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
return getCheckerManager().wantsRegionChangeUpdate(state);
}
-const ProgramState *
-ExprEngine::processRegionChanges(const ProgramState *state,
+ProgramStateRef
+ExprEngine::processRegionChanges(ProgramStateRef state,
const StoreManager::InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> Explicits,
- ArrayRef<const MemRegion *> Regions) {
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
return getCheckerManager().runCheckersForRegionChanges(state, invalidated,
- Explicits, Regions);
+ Explicits, Regions, Call);
}
-void ExprEngine::printState(raw_ostream &Out, const ProgramState *State,
+void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) {
getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
}
@@ -195,54 +210,77 @@ void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
}
-void ExprEngine::processCFGElement(const CFGElement E,
- StmtNodeBuilder& builder) {
+void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx) {
+ currentStmtIdx = StmtIdx;
+ currentBuilderContext = Ctx;
+
switch (E.getKind()) {
case CFGElement::Invalid:
llvm_unreachable("Unexpected CFGElement kind.");
case CFGElement::Statement:
- ProcessStmt(const_cast<Stmt*>(E.getAs<CFGStmt>()->getStmt()), builder);
+ ProcessStmt(const_cast<Stmt*>(E.getAs<CFGStmt>()->getStmt()), Pred);
return;
case CFGElement::Initializer:
- ProcessInitializer(E.getAs<CFGInitializer>()->getInitializer(), builder);
+ ProcessInitializer(E.getAs<CFGInitializer>()->getInitializer(), Pred);
return;
case CFGElement::AutomaticObjectDtor:
case CFGElement::BaseDtor:
case CFGElement::MemberDtor:
case CFGElement::TemporaryDtor:
- ProcessImplicitDtor(*E.getAs<CFGImplicitDtor>(), builder);
+ ProcessImplicitDtor(*E.getAs<CFGImplicitDtor>(), Pred);
return;
}
}
-void ExprEngine::ProcessStmt(const CFGStmt S, StmtNodeBuilder& builder) {
- // TODO: Use RAII to remove the unnecessary, tagged nodes.
- //RegisterCreatedNodes registerCreatedNodes(getGraph());
+static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
+ const CFGStmt S,
+ const ExplodedNode *Pred,
+ const LocationContext *LC) {
+
+ // Are we never purging state values?
+ if (AMgr.getPurgeMode() == PurgeNone)
+ return false;
+
+ // Is this the beginning of a basic block?
+ if (isa<BlockEntrance>(Pred->getLocation()))
+ return true;
+ // Is this on a non-expression?
+ if (!isa<Expr>(S.getStmt()))
+ return true;
+
+ // Run before processing a call.
+ if (isa<CallExpr>(S.getStmt()))
+ return true;
+
+ // Is this an expression that is consumed by another expression? If so,
+ // postpone cleaning out the state.
+ ParentMap &PM = LC->getAnalysisDeclContext()->getParentMap();
+ return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S,
+ ExplodedNode *Pred) {
// Reclaim any unnecessary nodes in the ExplodedGraph.
G.reclaimRecentlyAllocatedNodes();
- // Recycle any unused states in the ProgramStateManager.
- StateMgr.recycleUnusedStates();
currentStmt = S.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
currentStmt->getLocStart(),
"Error evaluating statement");
- // A tag to track convenience transitions, which can be removed at cleanup.
- static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
- Builder = &builder;
- EntryNode = builder.getPredecessor();
+ EntryNode = Pred;
- const ProgramState *EntryState = EntryNode->getState();
+ ProgramStateRef EntryState = EntryNode->getState();
CleanedState = EntryState;
- ExplodedNode *CleanedNode = 0;
// Create the cleaned state.
const LocationContext *LC = EntryNode->getLocationContext();
SymbolReaper SymReaper(LC, currentStmt, SymMgr, getStoreManager());
- if (AMgr.getPurgeMode() != PurgeNone) {
+ if (shouldRemoveDeadBindings(AMgr, S, Pred, LC)) {
+ NumRemoveDeadBindings++;
getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
const StackFrameContext *SFC = LC->getCurrentStackFrame();
@@ -251,25 +289,23 @@ void ExprEngine::ProcessStmt(const CFGStmt S, StmtNodeBuilder& builder) {
// and the store. TODO: The function should just return new env and store,
// not a new state.
CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
+ } else {
+ NumRemoveDeadBindingsSkipped++;
}
// Process any special transfer function for dead symbols.
ExplodedNodeSet Tmp;
+ // A tag to track convenience transitions, which can be removed at cleanup.
+ static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
+
if (!SymReaper.hasDeadSymbols()) {
// Generate a CleanedNode that has the environment and store cleaned
// up. Since no symbols are dead, we can optimize and not clean out
// the constraint manager.
- CleanedNode =
- Builder->generateNode(currentStmt, CleanedState, EntryNode, &cleanupTag);
- Tmp.Add(CleanedNode);
+ StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ Bldr.generateNode(currentStmt, EntryNode, CleanedState, false, &cleanupTag);
} else {
- SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- SaveOr OldHasGen(Builder->hasGeneratedNode);
-
- SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
- Builder->PurgingDeadSymbols = true;
-
// Call checkers with the non-cleaned state so that they could query the
// values of the soon to be dead symbols.
ExplodedNodeSet CheckedSet;
@@ -279,9 +315,10 @@ void ExprEngine::ProcessStmt(const CFGStmt S, StmtNodeBuilder& builder) {
// For each node in CheckedSet, generate CleanedNodes that have the
// environment, the store, and the constraints cleaned up but have the
// user-supplied states as the predecessors.
+ StmtNodeBuilder Bldr(CheckedSet, Tmp, *currentBuilderContext);
for (ExplodedNodeSet::const_iterator
I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
- const ProgramState *CheckerState = (*I)->getState();
+ ProgramStateRef CheckerState = (*I)->getState();
// The constraint manager has not been cleaned up yet, so clean up now.
CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
@@ -296,109 +333,109 @@ void ExprEngine::ProcessStmt(const CFGStmt S, StmtNodeBuilder& builder) {
// Create a state based on CleanedState with CheckerState GDM and
// generate a transition to that state.
- const ProgramState *CleanedCheckerSt =
+ ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- ExplodedNode *CleanedNode = Builder->generateNode(currentStmt,
- CleanedCheckerSt, *I,
- &cleanupTag);
- Tmp.Add(CleanedNode);
+ Bldr.generateNode(currentStmt, *I, CleanedCheckerSt, false, &cleanupTag,
+ ProgramPoint::PostPurgeDeadSymbolsKind);
}
}
+ ExplodedNodeSet Dst;
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- // TODO: Remove Dest set, it's no longer needed.
- ExplodedNodeSet Dst;
+ ExplodedNodeSet DstI;
// Visit the statement.
- Visit(currentStmt, *I, Dst);
+ Visit(currentStmt, *I, DstI);
+ Dst.insert(DstI);
}
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+
// NULL out these variables to cleanup.
CleanedState = NULL;
EntryNode = NULL;
currentStmt = 0;
- Builder = NULL;
}
void ExprEngine::ProcessInitializer(const CFGInitializer Init,
- StmtNodeBuilder &builder) {
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
+
// We don't set EntryNode and currentStmt. And we don't clean up state.
const CXXCtorInitializer *BMI = Init.getInitializer();
-
- ExplodedNode *pred = builder.getPredecessor();
-
- const StackFrameContext *stackFrame = cast<StackFrameContext>(pred->getLocationContext());
- const CXXConstructorDecl *decl = cast<CXXConstructorDecl>(stackFrame->getDecl());
+ const StackFrameContext *stackFrame =
+ cast<StackFrameContext>(Pred->getLocationContext());
+ const CXXConstructorDecl *decl =
+ cast<CXXConstructorDecl>(stackFrame->getDecl());
const CXXThisRegion *thisReg = getCXXThisRegion(decl, stackFrame);
- SVal thisVal = pred->getState()->getSVal(thisReg);
+ SVal thisVal = Pred->getState()->getSVal(thisReg);
if (BMI->isAnyMemberInitializer()) {
- ExplodedNodeSet Dst;
-
// Evaluate the initializer.
- Visit(BMI->getInit(), pred, Dst);
- for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I){
- ExplodedNode *Pred = *I;
- const ProgramState *state = Pred->getState();
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
- const FieldDecl *FD = BMI->getAnyMember();
+ const FieldDecl *FD = BMI->getAnyMember();
- SVal FieldLoc = state->getLValue(FD, thisVal);
- SVal InitVal = state->getSVal(BMI->getInit());
- state = state->bindLoc(FieldLoc, InitVal);
+ SVal FieldLoc = state->getLValue(FD, thisVal);
+ SVal InitVal = state->getSVal(BMI->getInit(), Pred->getLocationContext());
+ state = state->bindLoc(FieldLoc, InitVal);
- // Use a custom node building process.
- PostInitializer PP(BMI, stackFrame);
- // Builder automatically add the generated node to the deferred set,
- // which are processed in the builder's dtor.
- builder.generateNode(PP, state, Pred);
- }
- return;
- }
+ // Use a custom node building process.
+ PostInitializer PP(BMI, stackFrame);
+ // Builder automatically add the generated node to the deferred set,
+ // which are processed in the builder's dtor.
+ Bldr.generateNode(PP, Pred, state);
+ } else {
+ assert(BMI->isBaseInitializer());
- assert(BMI->isBaseInitializer());
+ // Get the base class declaration.
+ const CXXConstructExpr *ctorExpr = cast<CXXConstructExpr>(BMI->getInit());
- // Get the base class declaration.
- const CXXConstructExpr *ctorExpr = cast<CXXConstructExpr>(BMI->getInit());
+ // Create the base object region.
+ SVal baseVal =
+ getStoreManager().evalDerivedToBase(thisVal, ctorExpr->getType());
+ const MemRegion *baseReg = baseVal.getAsRegion();
+ assert(baseReg);
+
+ VisitCXXConstructExpr(ctorExpr, baseReg, Pred, Dst);
+ }
- // Create the base object region.
- SVal baseVal =
- getStoreManager().evalDerivedToBase(thisVal, ctorExpr->getType());
- const MemRegion *baseReg = baseVal.getAsRegion();
- assert(baseReg);
- Builder = &builder;
- ExplodedNodeSet dst;
- VisitCXXConstructExpr(ctorExpr, baseReg, pred, dst);
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
}
void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
- StmtNodeBuilder &builder) {
- Builder = &builder;
-
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
switch (D.getKind()) {
case CFGElement::AutomaticObjectDtor:
- ProcessAutomaticObjDtor(cast<CFGAutomaticObjDtor>(D), builder);
+ ProcessAutomaticObjDtor(cast<CFGAutomaticObjDtor>(D), Pred, Dst);
break;
case CFGElement::BaseDtor:
- ProcessBaseDtor(cast<CFGBaseDtor>(D), builder);
+ ProcessBaseDtor(cast<CFGBaseDtor>(D), Pred, Dst);
break;
case CFGElement::MemberDtor:
- ProcessMemberDtor(cast<CFGMemberDtor>(D), builder);
+ ProcessMemberDtor(cast<CFGMemberDtor>(D), Pred, Dst);
break;
case CFGElement::TemporaryDtor:
- ProcessTemporaryDtor(cast<CFGTemporaryDtor>(D), builder);
+ ProcessTemporaryDtor(cast<CFGTemporaryDtor>(D), Pred, Dst);
break;
default:
llvm_unreachable("Unexpected dtor kind.");
}
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
}
-void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor dtor,
- StmtNodeBuilder &builder) {
- ExplodedNode *pred = builder.getPredecessor();
- const ProgramState *state = pred->getState();
- const VarDecl *varDecl = dtor.getVarDecl();
+void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef state = Pred->getState();
+ const VarDecl *varDecl = Dtor.getVarDecl();
QualType varType = varDecl->getType();
@@ -409,30 +446,29 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor dtor,
assert(recordDecl && "get CXXRecordDecl fail");
const CXXDestructorDecl *dtorDecl = recordDecl->getDestructor();
- Loc dest = state->getLValue(varDecl, pred->getLocationContext());
+ Loc dest = state->getLValue(varDecl, Pred->getLocationContext());
- ExplodedNodeSet dstSet;
VisitCXXDestructor(dtorDecl, cast<loc::MemRegionVal>(dest).getRegion(),
- dtor.getTriggerStmt(), pred, dstSet);
+ Dtor.getTriggerStmt(), Pred, Dst);
}
void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
- StmtNodeBuilder &builder) {
-}
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
- StmtNodeBuilder &builder) {
-}
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
- StmtNodeBuilder &builder) {
-}
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {}
void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
+ ExplodedNodeSet &DstTop) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
S->getLocStart(),
"Error evaluating statement");
+ ExplodedNodeSet Dst;
+ StmtNodeBuilder Bldr(Pred, DstTop, *currentBuilderContext);
// Expressions to ignore.
if (const Expr *Ex = dyn_cast<Expr>(S))
@@ -442,19 +478,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// this check when we KNOW that there is no block-level subexpression.
// The motivation is that this check requires a hashtable lookup.
- if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) {
- Dst.Add(Pred);
+ if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S))
return;
- }
switch (S->getStmtClass()) {
// C++ and ARC stuff we don't support yet.
case Expr::ObjCIndirectCopyRestoreExprClass:
- case Stmt::CXXBindTemporaryExprClass:
- case Stmt::CXXCatchStmtClass:
case Stmt::CXXDependentScopeMemberExprClass:
case Stmt::CXXPseudoDestructorExprClass:
- case Stmt::CXXThrowExprClass:
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUuidofExprClass:
@@ -463,6 +494,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
case Stmt::BinaryTypeTraitExprClass:
+ case Stmt::TypeTraitExprClass:
case Stmt::ArrayTypeTraitExprClass:
case Stmt::ExpressionTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
@@ -472,22 +504,19 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SubstNonTypeTemplateParmPackExprClass:
case Stmt::SEHTryStmtClass:
case Stmt::SEHExceptStmtClass:
- case Stmt::SEHFinallyStmtClass:
- {
- SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- Builder->BuildSinks = true;
- const ExplodedNode *node = MakeNode(Dst, S, Pred, Pred->getState());
- Engine.addAbortedBlock(node, Builder->getBlock());
+ case Stmt::LambdaExprClass:
+ case Stmt::SEHFinallyStmtClass: {
+ const ExplodedNode *node = Bldr.generateNode(S, Pred, Pred->getState(),
+ /* sink */ true);
+ Engine.addAbortedBlock(node, currentBuilderContext->getBlock());
break;
}
// We don't handle default arguments either yet, but we can fake it
// for now by just skipping them.
case Stmt::SubstNonTypeTemplateParmExprClass:
- case Stmt::CXXDefaultArgExprClass: {
- Dst.Add(Pred);
+ case Stmt::CXXDefaultArgExprClass:
break;
- }
case Stmt::ParenExprClass:
llvm_unreachable("ParenExprs already handled.");
@@ -511,38 +540,44 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::NullStmtClass:
case Stmt::SwitchStmtClass:
case Stmt::WhileStmtClass:
+ case Expr::MSDependentExistsStmtClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
- break;
case Stmt::GNUNullExprClass: {
// GNU __null is a pointer-width integer, not an actual pointer.
- const ProgramState *state = Pred->getState();
- state = state->BindExpr(S, svalBuilder.makeIntValWithPtrWidth(0, false));
- MakeNode(Dst, S, Pred, state);
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(S, Pred->getLocationContext(),
+ svalBuilder.makeIntValWithPtrWidth(0, false));
+ Bldr.generateNode(S, Pred, state);
break;
}
case Stmt::ObjCAtSynchronizedStmtClass:
+ Bldr.takeNodes(Pred);
VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
+ // FIXME.
+ case Stmt::ObjCSubscriptRefExprClass:
+ break;
+
case Stmt::ObjCPropertyRefExprClass:
// Implicitly handled by Environment::getSVal().
- Dst.Add(Pred);
break;
case Stmt::ImplicitValueInitExprClass: {
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
QualType ty = cast<ImplicitValueInitExpr>(S)->getType();
SVal val = svalBuilder.makeZeroVal(ty);
- MakeNode(Dst, S, Pred, state->BindExpr(S, val));
+ Bldr.generateNode(S, Pred, state->BindExpr(S, Pred->getLocationContext(),
+ val));
break;
}
- case Stmt::ExprWithCleanupsClass: {
- Visit(cast<ExprWithCleanups>(S)->getSubExpr(), Pred, Dst);
+ case Stmt::ExprWithCleanupsClass:
+ // Handled due to fully linearised CFG.
break;
- }
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
@@ -556,7 +591,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ObjCIsaExprClass:
case Stmt::ObjCProtocolExprClass:
case Stmt::ObjCSelectorExprClass:
- case Stmt::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
case Stmt::ParenListExprClass:
case Stmt::PredefinedExprClass:
case Stmt::ShuffleVectorExprClass:
@@ -565,50 +600,101 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OpaqueValueExprClass:
case Stmt::AsTypeExprClass:
case Stmt::AtomicExprClass:
- // Fall through.
+ // Fall through.
+ // Currently all handling of 'throw' just falls to the CFG. We
+ // can consider doing more if necessary.
+ case Stmt::CXXThrowExprClass:
+ // Fall through.
+
// Cases we intentionally don't evaluate, since they don't need
// to be explicitly evaluated.
case Stmt::AddrLabelExprClass:
case Stmt::IntegerLiteralClass:
case Stmt::CharacterLiteralClass:
case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::ObjCBoolLiteralExprClass:
case Stmt::FloatingLiteralClass:
case Stmt::SizeOfPackExprClass:
- case Stmt::CXXNullPtrLiteralExprClass:
- Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
+ case Stmt::StringLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::CXXNullPtrLiteralExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+ getCheckerManager().runCheckersForPostStmt(Dst, preVisit, S, *this);
+ Bldr.addNodes(Dst);
break;
+ }
+
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass: {
+ Bldr.takeNodes(Pred);
+
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+
+ // FIXME: explicitly model with a region and the actual contents
+ // of the container. For now, conjure a symbol.
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr2(preVisit, Tmp, *currentBuilderContext);
+
+ for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
+ it != et; ++it) {
+ ExplodedNode *N = *it;
+ const Expr *Ex = cast<Expr>(S);
+ QualType resultType = Ex->getType();
+ const LocationContext *LCtx = N->getLocationContext();
+ SVal result =
+ svalBuilder.getConjuredSymbolVal(0, Ex, LCtx, resultType,
+ currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
+ Bldr2.generateNode(S, N, state);
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
case Stmt::ArraySubscriptExprClass:
+ Bldr.takeNodes(Pred);
VisitLvalArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::AsmStmtClass:
+ Bldr.takeNodes(Pred);
VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
- case Stmt::BlockDeclRefExprClass: {
- const BlockDeclRefExpr *BE = cast<BlockDeclRefExpr>(S);
- VisitCommonDeclRefExpr(BE, BE->getDecl(), Pred, Dst);
- break;
- }
-
case Stmt::BlockExprClass:
+ Bldr.takeNodes(Pred);
VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::BinaryOperatorClass: {
const BinaryOperator* B = cast<BinaryOperator>(S);
if (B->isLogicalOp()) {
+ Bldr.takeNodes(Pred);
VisitLogicalExpr(B, Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
else if (B->getOpcode() == BO_Comma) {
- const ProgramState *state = Pred->getState();
- MakeNode(Dst, B, Pred, state->BindExpr(B, state->getSVal(B->getRHS())));
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(B, Pred,
+ state->BindExpr(B, Pred->getLocationContext(),
+ state->getSVal(B->getRHS(),
+ Pred->getLocationContext())));
break;
}
+ Bldr.takeNodes(Pred);
+
if (AMgr.shouldEagerlyAssume() &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
@@ -618,13 +704,24 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
else
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::CallExprClass:
case Stmt::CXXOperatorCallExprClass:
- case Stmt::CXXMemberCallExprClass: {
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass: {
+ Bldr.takeNodes(Pred);
VisitCallExpr(cast<CallExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXCatchStmtClass: {
+ Bldr.takeNodes(Pred);
+ VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
@@ -633,58 +730,78 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
const CXXConstructExpr *C = cast<CXXConstructExpr>(S);
// For block-level CXXConstructExpr, we don't have a destination region.
// Let VisitCXXConstructExpr() create one.
+ Bldr.takeNodes(Pred);
VisitCXXConstructExpr(C, 0, Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::CXXNewExprClass: {
+ Bldr.takeNodes(Pred);
const CXXNewExpr *NE = cast<CXXNewExpr>(S);
VisitCXXNewExpr(NE, Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::CXXDeleteExprClass: {
+ Bldr.takeNodes(Pred);
const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
VisitCXXDeleteExpr(CDE, Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
// FIXME: ChooseExpr is really a constant. We need to fix
// the CFG do not model them as explicit control-flow.
case Stmt::ChooseExprClass: { // __builtin_choose_expr
+ Bldr.takeNodes(Pred);
const ChooseExpr *C = cast<ChooseExpr>(S);
VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::CompoundAssignOperatorClass:
+ Bldr.takeNodes(Pred);
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::CompoundLiteralExprClass:
+ Bldr.takeNodes(Pred);
VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: { // '?' operator
+ Bldr.takeNodes(Pred);
const AbstractConditionalOperator *C
= cast<AbstractConditionalOperator>(S);
VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::CXXThisExprClass:
+ Bldr.takeNodes(Pred);
VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::DeclRefExprClass: {
+ Bldr.takeNodes(Pred);
const DeclRefExpr *DE = cast<DeclRefExpr>(S);
VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::DeclStmtClass:
+ Bldr.takeNodes(Pred);
VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::ImplicitCastExprClass:
@@ -695,6 +812,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::CXXConstCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
case Stmt::ObjCBridgedCastExprClass: {
+ Bldr.takeNodes(Pred);
const CastExpr *C = cast<CastExpr>(S);
// Handle the previsit checks.
ExplodedNodeSet dstPrevisit;
@@ -709,58 +827,98 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// Handle the postvisit checks.
getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
+ Bldr.addNodes(Dst);
break;
}
case Expr::MaterializeTemporaryExprClass: {
+ Bldr.takeNodes(Pred);
const MaterializeTemporaryExpr *Materialize
= cast<MaterializeTemporaryExpr>(S);
- if (!Materialize->getType()->isRecordType())
- CreateCXXTemporaryObject(Materialize, Pred, Dst);
+ if (Materialize->getType()->isRecordType())
+ Dst.Add(Pred);
else
- Visit(Materialize->GetTemporaryExpr(), Pred, Dst);
+ CreateCXXTemporaryObject(Materialize, Pred, Dst);
+ Bldr.addNodes(Dst);
break;
}
case Stmt::InitListExprClass:
+ Bldr.takeNodes(Pred);
VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::MemberExprClass:
+ Bldr.takeNodes(Pred);
VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
+
case Stmt::ObjCIvarRefExprClass:
+ Bldr.takeNodes(Pred);
VisitLvalObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::ObjCForCollectionStmtClass:
+ Bldr.takeNodes(Pred);
VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
- case Stmt::ObjCMessageExprClass:
- VisitObjCMessage(cast<ObjCMessageExpr>(S), Pred, Dst);
+ case Stmt::ObjCMessageExprClass: {
+ Bldr.takeNodes(Pred);
+ // Is this a property access?
+ const ParentMap &PM = Pred->getLocationContext()->getParentMap();
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(S);
+ bool evaluated = false;
+
+ if (const PseudoObjectExpr *PO =
+ dyn_cast_or_null<PseudoObjectExpr>(PM.getParent(S))) {
+ const Expr *syntactic = PO->getSyntacticForm();
+ if (const ObjCPropertyRefExpr *PR =
+ dyn_cast<ObjCPropertyRefExpr>(syntactic)) {
+ bool isSetter = ME->getNumArgs() > 0;
+ VisitObjCMessage(ObjCMessage(ME, PR, isSetter), Pred, Dst);
+ evaluated = true;
+ }
+ else if (isa<BinaryOperator>(syntactic)) {
+ VisitObjCMessage(ObjCMessage(ME, 0, true), Pred, Dst);
+ }
+ }
+
+ if (!evaluated)
+ VisitObjCMessage(ME, Pred, Dst);
+
+ Bldr.addNodes(Dst);
break;
+ }
case Stmt::ObjCAtThrowStmtClass: {
// FIXME: This is not complete. We basically treat @throw as
// an abort.
- SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- Builder->BuildSinks = true;
- MakeNode(Dst, S, Pred, Pred->getState());
+ Bldr.generateNode(S, Pred, Pred->getState());
break;
}
case Stmt::ReturnStmtClass:
+ Bldr.takeNodes(Pred);
VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::OffsetOfExprClass:
+ Bldr.takeNodes(Pred);
VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::UnaryExprOrTypeTraitExprClass:
+ Bldr.takeNodes(Pred);
VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
Pred, Dst);
+ Bldr.addNodes(Dst);
break;
case Stmt::StmtExprClass: {
@@ -770,81 +928,154 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// Empty statement expression.
assert(SE->getType() == getContext().VoidTy
&& "Empty statement expression must have void type.");
- Dst.Add(Pred);
break;
}
if (Expr *LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
- const ProgramState *state = Pred->getState();
- MakeNode(Dst, SE, Pred, state->BindExpr(SE, state->getSVal(LastExpr)));
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(SE, Pred,
+ state->BindExpr(SE, Pred->getLocationContext(),
+ state->getSVal(LastExpr,
+ Pred->getLocationContext())));
}
- else
- Dst.Add(Pred);
-
break;
}
- case Stmt::StringLiteralClass: {
- const ProgramState *state = Pred->getState();
- SVal V = state->getLValue(cast<StringLiteral>(S));
- MakeNode(Dst, S, Pred, state->BindExpr(S, V));
- return;
- }
-
case Stmt::UnaryOperatorClass: {
+ Bldr.takeNodes(Pred);
const UnaryOperator *U = cast<UnaryOperator>(S);
- if (AMgr.shouldEagerlyAssume()&&(U->getOpcode() == UO_LNot)) {
+ if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp);
evalEagerlyAssume(Dst, Tmp, U);
}
else
VisitUnaryOperator(U, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::PseudoObjectExprClass: {
+ Bldr.takeNodes(Pred);
+ ProgramStateRef state = Pred->getState();
+ const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
+ if (const Expr *Result = PE->getResultExpr()) {
+ SVal V = state->getSVal(Result, Pred->getLocationContext());
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(), V));
+ }
+ else
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(),
+ UnknownVal()));
+
+ Bldr.addNodes(Dst);
break;
}
}
}
-//===----------------------------------------------------------------------===//
-// Block entrance. (Update counters).
-//===----------------------------------------------------------------------===//
+bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
+ const LocationContext *CalleeLC) {
+ const StackFrameContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
+ assert(CalleeSF && CallerSF);
+ ExplodedNode *BeforeProcessingCall = 0;
+
+ // Find the first node before we started processing the call expression.
+ while (N) {
+ ProgramPoint L = N->getLocation();
+ BeforeProcessingCall = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+
+ // Skip the nodes corresponding to the inlined code.
+ if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
+ continue;
+ // We reached the caller. Find the node right before we started
+ // processing the CallExpr.
+ if (isa<PostPurgeDeadSymbols>(L))
+ continue;
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&L))
+ if (SP->getStmt() == CalleeSF->getCallSite())
+ continue;
+ break;
+ }
+
+ if (!BeforeProcessingCall)
+ return false;
+
+ // TODO: Clean up the unneeded nodes.
+
+ // Build an Epsilon node from which we will restart the analyzes.
+ const Stmt *CE = CalleeSF->getCallSite();
+ ProgramPoint NewNodeLoc =
+ EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
+ // Add the special flag to GDM to signal retrying with no inlining.
+ // Note, changing the state ensures that we are not going to cache out.
+ ProgramStateRef NewNodeState = BeforeProcessingCall->getState();
+ NewNodeState = NewNodeState->set<ReplayWithoutInlining>((void*)CE);
+
+ // Make the new node a successor of BeforeProcessingCall.
+ bool IsNew = false;
+ ExplodedNode *NewNode = G.getNode(NewNodeLoc, NewNodeState, false, &IsNew);
+ // We cached out at this point. Caching out is common due to us backtracking
+ // from the inlined function, which might spawn several paths.
+ if (!IsNew)
+ return true;
+
+ NewNode->addPredecessor(BeforeProcessingCall, G);
-void ExprEngine::processCFGBlockEntrance(ExplodedNodeSet &dstNodes,
- GenericNodeBuilder<BlockEntrance> &nodeBuilder){
+ // Add the new node to the work list.
+ Engine.enqueueStmtNode(NewNode, CalleeSF->getCallSiteBlock(),
+ CalleeSF->getIndex());
+ NumTimesRetriedWithoutInlining++;
+ return true;
+}
+
+/// Block entrance. (Update counters).
+void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder) {
// FIXME: Refactor this into a checker.
- const CFGBlock *block = nodeBuilder.getProgramPoint().getBlock();
- ExplodedNode *pred = nodeBuilder.getPredecessor();
+ ExplodedNode *pred = nodeBuilder.getContext().getPred();
- if (nodeBuilder.getBlockCounter().getNumVisited(
- pred->getLocationContext()->getCurrentStackFrame(),
- block->getBlockID()) >= AMgr.getMaxVisit()) {
+ if (nodeBuilder.getContext().getCurrentBlockCount() >= AMgr.getMaxVisit()) {
static SimpleProgramPointTag tag("ExprEngine : Block count exceeded");
- nodeBuilder.generateNode(pred->getState(), pred, &tag, true);
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Generic node creation.
-//===----------------------------------------------------------------------===//
+ const ExplodedNode *Sink =
+ nodeBuilder.generateNode(pred->getState(), pred, &tag, true);
+
+ // Check if we stopped at the top level function or not.
+ // Root node should have the location context of the top most function.
+ const LocationContext *CalleeLC = pred->getLocation().getLocationContext();
+ const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const LocationContext *RootLC =
+ (*G.roots_begin())->getLocation().getLocationContext();
+ if (RootLC->getCurrentStackFrame() != CalleeSF) {
+ Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
+
+ // Re-run the call evaluation without inlining it, by storing the
+ // no-inlining policy in the state and enqueuing the new work item on
+ // the list. Replay should almost never fail. Use the stats to catch it
+ // if it does.
+ if ((!AMgr.NoRetryExhausted && replayWithoutInlining(pred, CalleeLC)))
+ return;
+ NumMaxBlockCountReachedInInlined++;
+ } else
+ NumMaxBlockCountReached++;
-ExplodedNode *ExprEngine::MakeNode(ExplodedNodeSet &Dst, const Stmt *S,
- ExplodedNode *Pred, const ProgramState *St,
- ProgramPoint::Kind K,
- const ProgramPointTag *tag) {
- assert (Builder && "StmtNodeBuilder not present.");
- SaveAndRestore<const ProgramPointTag*> OldTag(Builder->Tag);
- Builder->Tag = tag;
- return Builder->MakeNode(Dst, S, Pred, St, K);
+ // Make sink nodes as exhausted(for stats) only if retry failed.
+ Engine.blocksExhausted.push_back(std::make_pair(L, Sink));
+ }
}
//===----------------------------------------------------------------------===//
// Branch processing.
//===----------------------------------------------------------------------===//
-const ProgramState *ExprEngine::MarkBranch(const ProgramState *state,
- const Stmt *Terminator,
- bool branchTaken) {
+ProgramStateRef ExprEngine::MarkBranch(ProgramStateRef state,
+ const Stmt *Terminator,
+ const LocationContext *LCtx,
+ bool branchTaken) {
switch (Terminator->getStmtClass()) {
default:
@@ -867,7 +1098,7 @@ const ProgramState *ExprEngine::MarkBranch(const ProgramState *state,
(Op == BO_LOr && !branchTaken)
? B->getRHS() : B->getLHS();
- return state->BindExpr(B, UndefinedVal(Ex));
+ return state->BindExpr(B, LCtx, UndefinedVal(Ex));
}
case Stmt::BinaryConditionalOperatorClass:
@@ -885,7 +1116,7 @@ const ProgramState *ExprEngine::MarkBranch(const ProgramState *state,
else
Ex = C->getFalseExpr();
- return state->BindExpr(C, UndefinedVal(Ex));
+ return state->BindExpr(C, LCtx, UndefinedVal(Ex));
}
case Stmt::ChooseExprClass: { // ?:
@@ -893,7 +1124,7 @@ const ProgramState *ExprEngine::MarkBranch(const ProgramState *state,
const ChooseExpr *C = cast<ChooseExpr>(Terminator);
const Expr *Ex = branchTaken ? C->getLHS() : C->getRHS();
- return state->BindExpr(C, UndefinedVal(Ex));
+ return state->BindExpr(C, LCtx, UndefinedVal(Ex));
}
}
}
@@ -904,8 +1135,9 @@ const ProgramState *ExprEngine::MarkBranch(const ProgramState *state,
/// This function returns the SVal bound to Condition->IgnoreCasts if all the
// cast(s) did was sign-extend the original value.
static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
- const ProgramState *state,
+ ProgramStateRef state,
const Stmt *Condition,
+ const LocationContext *LCtx,
ASTContext &Ctx) {
const Expr *Ex = dyn_cast<Expr>(Condition);
@@ -936,15 +1168,22 @@ static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
return UnknownVal();
- return state->getSVal(Ex);
+ return state->getSVal(Ex, LCtx);
}
void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
- BranchNodeBuilder& builder) {
+ NodeBuilderContext& BldCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ currentBuilderContext = &BldCtx;
// Check for NULL conditions; e.g. "for(;;)"
if (!Condition) {
- builder.markInfeasible(false);
+ BranchNodeBuilder NullCondBldr(Pred, Dst, BldCtx, DstT, DstF);
+ NullCondBldr.markInfeasible(false);
+ NullCondBldr.generateNode(Pred->getState(), true, Pred);
return;
}
@@ -952,65 +1191,84 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
Condition->getLocStart(),
"Error evaluating branch");
- getCheckerManager().runCheckersForBranchCondition(Condition, builder, *this);
-
- // If the branch condition is undefined, return;
- if (!builder.isFeasible(true) && !builder.isFeasible(false))
+ ExplodedNodeSet CheckersOutSet;
+ getCheckerManager().runCheckersForBranchCondition(Condition, CheckersOutSet,
+ Pred, *this);
+ // We generated only sinks.
+ if (CheckersOutSet.empty())
return;
- const ProgramState *PrevState = builder.getState();
- SVal X = PrevState->getSVal(Condition);
-
- if (X.isUnknownOrUndef()) {
- // Give it a chance to recover from unknown.
- if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
- if (Ex->getType()->isIntegerType()) {
- // Try to recover some path-sensitivity. Right now casts of symbolic
- // integers that promote their values are currently not tracked well.
- // If 'Condition' is such an expression, try and recover the
- // underlying value and use that instead.
- SVal recovered = RecoverCastedSymbol(getStateManager(),
- builder.getState(), Condition,
- getContext());
-
- if (!recovered.isUnknown()) {
- X = recovered;
+ BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
+ for (NodeBuilder::iterator I = CheckersOutSet.begin(),
+ E = CheckersOutSet.end(); E != I; ++I) {
+ ExplodedNode *PredI = *I;
+
+ if (PredI->isSink())
+ continue;
+
+ ProgramStateRef PrevState = Pred->getState();
+ SVal X = PrevState->getSVal(Condition, Pred->getLocationContext());
+
+ if (X.isUnknownOrUndef()) {
+ // Give it a chance to recover from unknown.
+ if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegerType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ PrevState, Condition,
+ Pred->getLocationContext(),
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ }
}
}
}
+
+ const LocationContext *LCtx = PredI->getLocationContext();
+
// If the condition is still unknown, give up.
if (X.isUnknownOrUndef()) {
- builder.generateNode(MarkBranch(PrevState, Term, true), true);
- builder.generateNode(MarkBranch(PrevState, Term, false), false);
- return;
+ builder.generateNode(MarkBranch(PrevState, Term, LCtx, true),
+ true, PredI);
+ builder.generateNode(MarkBranch(PrevState, Term, LCtx, false),
+ false, PredI);
+ continue;
}
- }
- DefinedSVal V = cast<DefinedSVal>(X);
+ DefinedSVal V = cast<DefinedSVal>(X);
- // Process the true branch.
- if (builder.isFeasible(true)) {
- if (const ProgramState *state = PrevState->assume(V, true))
- builder.generateNode(MarkBranch(state, Term, true), true);
- else
- builder.markInfeasible(true);
- }
+ // Process the true branch.
+ if (builder.isFeasible(true)) {
+ if (ProgramStateRef state = PrevState->assume(V, true))
+ builder.generateNode(MarkBranch(state, Term, LCtx, true),
+ true, PredI);
+ else
+ builder.markInfeasible(true);
+ }
- // Process the false branch.
- if (builder.isFeasible(false)) {
- if (const ProgramState *state = PrevState->assume(V, false))
- builder.generateNode(MarkBranch(state, Term, false), false);
- else
- builder.markInfeasible(false);
+ // Process the false branch.
+ if (builder.isFeasible(false)) {
+ if (ProgramStateRef state = PrevState->assume(V, false))
+ builder.generateNode(MarkBranch(state, Term, LCtx, false),
+ false, PredI);
+ else
+ builder.markInfeasible(false);
+ }
}
+ currentBuilderContext = 0;
}
/// processIndirectGoto - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a computed goto jump.
void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
- const ProgramState *state = builder.getState();
- SVal V = state->getSVal(builder.getTarget());
+ ProgramStateRef state = builder.getState();
+ SVal V = state->getSVal(builder.getTarget(), builder.getLocationContext());
// Three possibilities:
//
@@ -1051,18 +1309,20 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
-void ExprEngine::processEndOfFunction(EndOfFunctionNodeBuilder& builder) {
- StateMgr.EndPath(builder.getState());
- getCheckerManager().runCheckersForEndPath(builder, *this);
+void ExprEngine::processEndOfFunction(NodeBuilderContext& BC) {
+ StateMgr.EndPath(BC.Pred->getState());
+ ExplodedNodeSet Dst;
+ getCheckerManager().runCheckersForEndPath(BC, Dst, *this);
+ Engine.enqueueEndOfFunction(Dst);
}
/// ProcessSwitch - Called by CoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a switch statement.
void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
typedef SwitchNodeBuilder::iterator iterator;
- const ProgramState *state = builder.getState();
+ ProgramStateRef state = builder.getState();
const Expr *CondE = builder.getCondition();
- SVal CondV_untested = state->getSVal(CondE);
+ SVal CondV_untested = state->getSVal(CondE, builder.getLocationContext());
if (CondV_untested.isUndef()) {
//ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
@@ -1073,7 +1333,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
}
DefinedOrUnknownSVal CondV = cast<DefinedOrUnknownSVal>(CondV_untested);
- const ProgramState *DefaultSt = state;
+ ProgramStateRef DefaultSt = state;
iterator I = builder.begin(), EI = builder.end();
bool defaultIsFeasible = I == EI;
@@ -1106,7 +1366,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
CondV, CaseVal);
// Now "assume" that the case matches.
- if (const ProgramState *stateNew = state->assume(Res, true)) {
+ if (ProgramStateRef stateNew = state->assume(Res, true)) {
builder.generateCaseStmtNode(I, stateNew);
// If CondV evaluates to a constant, then we know that this
@@ -1119,7 +1379,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
// Now "assume" that the case doesn't match. Add this state
// to the default state (if it is feasible).
if (DefaultSt) {
- if (const ProgramState *stateNew = DefaultSt->assume(Res, false)) {
+ if (ProgramStateRef stateNew = DefaultSt->assume(Res, false)) {
defaultIsFeasible = true;
DefaultSt = stateNew;
}
@@ -1166,7 +1426,10 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- const ProgramState *state = Pred->getState();
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
assert(Ex->isLValue());
@@ -1181,22 +1444,29 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
V = UnknownVal();
}
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
- ProgramPoint::PostLValueKind);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ ProgramPoint::PostLValueKind);
return;
}
if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
assert(!Ex->isLValue());
SVal V = svalBuilder.makeIntVal(ED->getInitVal());
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V));
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
return;
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
SVal V = svalBuilder.getFunctionPointer(FD);
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, V),
- ProgramPoint::PostLValueKind);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (isa<FieldDecl>(D)) {
+ // FIXME: Compute lvalue of fields.
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, UnknownVal()),
+ false, 0, ProgramPoint::PostLValueKind);
return;
}
+
assert (false &&
"ValueDecl support for this ValueDecl not implemented.");
}
@@ -1213,24 +1483,33 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
ExplodedNodeSet checkerPreStmt;
getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
+ StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currentBuilderContext);
+
for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
ei = checkerPreStmt.end(); it != ei; ++it) {
- const ProgramState *state = (*it)->getState();
- SVal V = state->getLValue(A->getType(), state->getSVal(Idx),
- state->getSVal(Base));
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ ProgramStateRef state = (*it)->getState();
+ SVal V = state->getLValue(A->getType(),
+ state->getSVal(Idx, LCtx),
+ state->getSVal(Base, LCtx));
assert(A->isLValue());
- MakeNode(Dst, A, *it, state->BindExpr(A, V), ProgramPoint::PostLValueKind);
+ Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V),
+ false, 0, ProgramPoint::PostLValueKind);
}
}
/// VisitMemberExpr - Transfer function for member expressions.
void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
+ ExplodedNodeSet &TopDst) {
+ StmtNodeBuilder Bldr(Pred, TopDst, *currentBuilderContext);
+ ExplodedNodeSet Dst;
Decl *member = M->getMemberDecl();
if (VarDecl *VD = dyn_cast<VarDecl>(member)) {
assert(M->isLValue());
+ Bldr.takeNodes(Pred);
VisitCommonDeclRefExpr(M, VD, Pred, Dst);
+ Bldr.addNodes(Dst);
return;
}
@@ -1239,15 +1518,16 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
return;
Expr *baseExpr = M->getBase()->IgnoreParens();
- const ProgramState *state = Pred->getState();
- SVal baseExprVal = state->getSVal(baseExpr);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal baseExprVal = state->getSVal(baseExpr, Pred->getLocationContext());
if (isa<nonloc::LazyCompoundVal>(baseExprVal) ||
isa<nonloc::CompoundVal>(baseExprVal) ||
// FIXME: This can originate by conjuring a symbol for an unknown
// temporary struct object, see test/Analysis/fields.c:
// (p = getit()).x
isa<nonloc::SymbolVal>(baseExprVal)) {
- MakeNode(Dst, M, Pred, state->BindExpr(M, UnknownVal()));
+ Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, UnknownVal()));
return;
}
@@ -1258,9 +1538,13 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
// For all other cases, compute an lvalue.
SVal L = state->getLValue(field, baseExprVal);
if (M->isLValue())
- MakeNode(Dst, M, Pred, state->BindExpr(M, L), ProgramPoint::PostLValueKind);
- else
- evalLoad(Dst, M, Pred, state, L);
+ Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), false, 0,
+ ProgramPoint::PostLValueKind);
+ else {
+ Bldr.takeNodes(Pred);
+ evalLoad(Dst, M, M, Pred, state, L);
+ Bldr.addNodes(Dst);
+ }
}
/// evalBind - Handle the semantics of binding a value to a specific location.
@@ -1272,12 +1556,17 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// Do a previsit of the bind.
ExplodedNodeSet CheckedSet;
getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
- StoreE, *this);
+ StoreE, *this,
+ ProgramPoint::PostStmtKind);
+ ExplodedNodeSet TmpDst;
+ StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currentBuilderContext);
+
+ const LocationContext *LC = Pred->getLocationContext();
for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
I!=E; ++I) {
-
- const ProgramState *state = (*I)->getState();
+ ExplodedNode *PredI = *I;
+ ProgramStateRef state = PredI->getState();
if (atDeclInit) {
const VarRegion *VR =
@@ -1288,8 +1577,15 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
state = state->bindLoc(location, Val);
}
- MakeNode(Dst, StoreE, *I, state);
+ const MemRegion *LocReg = 0;
+ if (loc::MemRegionVal *LocRegVal = dyn_cast<loc::MemRegionVal>(&location))
+ LocReg = LocRegVal->getRegion();
+
+ const ProgramPoint L = PostStore(StoreE, LC, LocReg, 0);
+ Bldr.generateNode(L, PredI, state, false);
}
+
+ Dst.insert(TmpDst);
}
/// evalStore - Handle the semantics of a store via an assignment.
@@ -1303,24 +1599,19 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
const Expr *LocationE,
ExplodedNode *Pred,
- const ProgramState *state, SVal location, SVal Val,
+ ProgramStateRef state, SVal location, SVal Val,
const ProgramPointTag *tag) {
-
- assert(Builder && "StmtNodeBuilder must be defined.");
-
// Proceed with the store. We use AssignE as the anchor for the PostStore
// ProgramPoint if it is non-NULL, and LocationE otherwise.
const Expr *StoreE = AssignE ? AssignE : LocationE;
if (isa<loc::ObjCPropRef>(location)) {
- loc::ObjCPropRef prop = cast<loc::ObjCPropRef>(location);
- return VisitObjCMessage(ObjCPropertySetter(prop.getPropRefExpr(),
- StoreE, Val), Pred, Dst);
+ assert(false);
}
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- evalLocation(Tmp, LocationE, Pred, state, location, tag, false);
+ evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
if (Tmp.empty())
return;
@@ -1328,24 +1619,21 @@ void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
if (location.isUndef())
return;
- SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind,
- ProgramPoint::PostStoreKind);
-
for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
- evalBind(Dst, StoreE, *NI, location, Val);
+ evalBind(Dst, StoreE, *NI, location, Val, false);
}
-void ExprEngine::evalLoad(ExplodedNodeSet &Dst, const Expr *Ex,
- ExplodedNode *Pred,
- const ProgramState *state, SVal location,
- const ProgramPointTag *tag, QualType LoadTy) {
+void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy)
+{
assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
-
- if (isa<loc::ObjCPropRef>(location)) {
- loc::ObjCPropRef prop = cast<loc::ObjCPropRef>(location);
- return VisitObjCMessage(ObjCPropertyGetter(prop.getPropRefExpr(), Ex),
- Pred, Dst);
- }
+ assert(!isa<loc::ObjCPropRef>(location));
// Are we loading from a region? This actually results in two loads; one
// to fetch the address of the referenced value and one to fetch the
@@ -1358,72 +1646,84 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst, const Expr *Ex,
static SimpleProgramPointTag
loadReferenceTag("ExprEngine : Load Reference");
ExplodedNodeSet Tmp;
- evalLoadCommon(Tmp, Ex, Pred, state, location, &loadReferenceTag,
+ evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
+ location, &loadReferenceTag,
getContext().getPointerType(RT->getPointeeType()));
// Perform the load from the referenced value.
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
state = (*I)->getState();
- location = state->getSVal(Ex);
- evalLoadCommon(Dst, Ex, *I, state, location, tag, LoadTy);
+ location = state->getSVal(BoundEx, (*I)->getLocationContext());
+ evalLoadCommon(Dst, NodeEx, BoundEx, *I, state, location, tag, LoadTy);
}
return;
}
}
- evalLoadCommon(Dst, Ex, Pred, state, location, tag, LoadTy);
+ evalLoadCommon(Dst, NodeEx, BoundEx, Pred, state, location, tag, LoadTy);
}
-void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst, const Expr *Ex,
- ExplodedNode *Pred,
- const ProgramState *state, SVal location,
- const ProgramPointTag *tag, QualType LoadTy) {
-
+void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy) {
+ assert(NodeEx);
+ assert(BoundEx);
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- evalLocation(Tmp, Ex, Pred, state, location, tag, true);
-
+ evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, tag, true);
if (Tmp.empty())
return;
+ StmtNodeBuilder Bldr(Tmp, Dst, *currentBuilderContext);
if (location.isUndef())
return;
- SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
-
// Proceed with the load.
for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
state = (*NI)->getState();
+ const LocationContext *LCtx = (*NI)->getLocationContext();
if (location.isUnknown()) {
// This is important. We must nuke the old binding.
- MakeNode(Dst, Ex, *NI, state->BindExpr(Ex, UnknownVal()),
- ProgramPoint::PostLoadKind, tag);
+ Bldr.generateNode(NodeEx, *NI,
+ state->BindExpr(BoundEx, LCtx, UnknownVal()),
+ false, tag,
+ ProgramPoint::PostLoadKind);
}
else {
if (LoadTy.isNull())
- LoadTy = Ex->getType();
+ LoadTy = BoundEx->getType();
SVal V = state->getSVal(cast<Loc>(location), LoadTy);
- MakeNode(Dst, Ex, *NI, state->bindExprAndLocation(Ex, location, V),
- ProgramPoint::PostLoadKind, tag);
+ Bldr.generateNode(NodeEx, *NI,
+ state->bindExprAndLocation(BoundEx, LCtx, location, V),
+ false, tag, ProgramPoint::PostLoadKind);
}
}
}
-void ExprEngine::evalLocation(ExplodedNodeSet &Dst, const Stmt *S,
- ExplodedNode *Pred,
- const ProgramState *state, SVal location,
- const ProgramPointTag *tag, bool isLoad) {
+void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ bool isLoad) {
+ StmtNodeBuilder BldrTop(Pred, Dst, *currentBuilderContext);
// Early checks for performance reason.
if (location.isUnknown()) {
- Dst.Add(Pred);
return;
}
ExplodedNodeSet Src;
- if (Pred->getState() == state) {
- Src.Add(Pred);
- } else {
+ BldrTop.takeNodes(Pred);
+ StmtNodeBuilder Bldr(Pred, Src, *currentBuilderContext);
+ if (Pred->getState() != state) {
// Associate this new state with an ExplodedNode.
// FIXME: If I pass null tag, the graph is incorrect, e.g for
// int *p;
@@ -1435,88 +1735,12 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst, const Stmt *S,
// FIXME: why is 'tag' not used instead of etag?
static SimpleProgramPointTag etag("ExprEngine: Location");
-
- ExplodedNode *N = Builder->generateNode(S, state, Pred, &etag);
- Src.Add(N ? N : Pred);
+ Bldr.generateNode(NodeEx, Pred, state, false, &etag);
}
- getCheckerManager().runCheckersForLocation(Dst, Src, location, isLoad, S,
- *this);
-}
-
-bool ExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
- ExplodedNode *Pred) {
- return false;
-
- // Inlining isn't correct right now because we:
- // (a) don't generate CallExit nodes.
- // (b) we need a way to postpone doing post-visits of CallExprs until
- // the CallExit. This means we need CallExits for the non-inline
- // cases as well.
-
-#if 0
- const ProgramState *state = Pred->getState();
- const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
-
- const FunctionDecl *FD = L.getAsFunctionDecl();
- if (!FD)
- return false;
-
- // Specially handle CXXMethods.
- const CXXMethodDecl *methodDecl = 0;
-
- switch (CE->getStmtClass()) {
- default: break;
- case Stmt::CXXOperatorCallExprClass: {
- const CXXOperatorCallExpr *opCall = cast<CXXOperatorCallExpr>(CE);
- methodDecl =
- dyn_cast_or_null<CXXMethodDecl>(opCall->getCalleeDecl());
- break;
- }
- case Stmt::CXXMemberCallExprClass: {
- const CXXMemberCallExpr *memberCall = cast<CXXMemberCallExpr>(CE);
- const MemberExpr *memberExpr =
- cast<MemberExpr>(memberCall->getCallee()->IgnoreParens());
- methodDecl = cast<CXXMethodDecl>(memberExpr->getMemberDecl());
- break;
- }
- }
-
-
-
-
- // Check if the function definition is in the same translation unit.
- if (FD->hasBody(FD)) {
- const StackFrameContext *stackFrame =
- AMgr.getStackFrame(AMgr.getAnalysisContext(FD),
- Pred->getLocationContext(),
- CE, Builder->getBlock(), Builder->getIndex());
- // Now we have the definition of the callee, create a CallEnter node.
- CallEnter Loc(CE, stackFrame, Pred->getLocationContext());
-
- ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
- Dst.Add(N);
- return true;
- }
-
- // Check if we can find the function definition in other translation units.
- if (AMgr.hasIndexer()) {
- AnalysisContext *C = AMgr.getAnalysisContextInAnotherTU(FD);
- if (C == 0)
- return false;
- const StackFrameContext *stackFrame =
- AMgr.getStackFrame(C, Pred->getLocationContext(),
- CE, Builder->getBlock(), Builder->getIndex());
- CallEnter Loc(CE, stackFrame, Pred->getLocationContext());
- ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
- Dst.Add(N);
- return true;
- }
-
- // Generate the CallExit node.
-
- return false;
-#endif
+ ExplodedNodeSet Tmp;
+ getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad,
+ NodeEx, BoundEx, *this);
+ BldrTop.addNodes(Tmp);
}
std::pair<const ProgramPointTag *, const ProgramPointTag*>
@@ -1528,108 +1752,67 @@ ExprEngine::getEagerlyAssumeTags() {
}
void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
- const Expr *Ex) {
-
+ const Expr *Ex) {
+ StmtNodeBuilder Bldr(Src, Dst, *currentBuilderContext);
for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
ExplodedNode *Pred = *I;
-
// Test if the previous node was as the same expression. This can happen
// when the expression fails to evaluate to anything meaningful and
// (as an optimization) we don't generate a node.
ProgramPoint P = Pred->getLocation();
if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
- Dst.Add(Pred);
continue;
}
- const ProgramState *state = Pred->getState();
- SVal V = state->getSVal(Ex);
- if (nonloc::SymExprVal *SEV = dyn_cast<nonloc::SymExprVal>(&V)) {
+ ProgramStateRef state = Pred->getState();
+ SVal V = state->getSVal(Ex, Pred->getLocationContext());
+ nonloc::SymbolVal *SEV = dyn_cast<nonloc::SymbolVal>(&V);
+ if (SEV && SEV->isExpression()) {
const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
getEagerlyAssumeTags();
// First assume that the condition is true.
- if (const ProgramState *StateTrue = state->assume(*SEV, true)) {
+ if (ProgramStateRef StateTrue = state->assume(*SEV, true)) {
SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
- StateTrue = StateTrue->BindExpr(Ex, Val);
- Dst.Add(Builder->generateNode(Ex, StateTrue, Pred, tags.first));
+ StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateTrue, false, tags.first);
}
// Next, assume that the condition is false.
- if (const ProgramState *StateFalse = state->assume(*SEV, false)) {
+ if (ProgramStateRef StateFalse = state->assume(*SEV, false)) {
SVal Val = svalBuilder.makeIntVal(0U, Ex->getType());
- StateFalse = StateFalse->BindExpr(Ex, Val);
- Dst.Add(Builder->generateNode(Ex, StateFalse, Pred, tags.second));
+ StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateFalse, false, tags.second);
}
}
- else
- Dst.Add(Pred);
}
}
void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
-}
-
-void ExprEngine::VisitAsmStmtHelperOutputs(const AsmStmt *A,
- AsmStmt::const_outputs_iterator I,
- AsmStmt::const_outputs_iterator E,
- ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- if (I == E) {
- VisitAsmStmtHelperInputs(A, A->begin_inputs(), A->end_inputs(), Pred, Dst);
- return;
- }
-
- ExplodedNodeSet Tmp;
- Visit(*I, Pred, Tmp);
- ++I;
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ // We have processed both the inputs and the outputs. All of the outputs
+ // should evaluate to Locs. Nuke all of their values.
- for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end();NI != NE;++NI)
- VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
-}
-
-void ExprEngine::VisitAsmStmtHelperInputs(const AsmStmt *A,
- AsmStmt::const_inputs_iterator I,
- AsmStmt::const_inputs_iterator E,
- ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
- if (I == E) {
-
- // We have processed both the inputs and the outputs. All of the outputs
- // should evaluate to Locs. Nuke all of their values.
-
- // FIXME: Some day in the future it would be nice to allow a "plug-in"
- // which interprets the inline asm and stores proper results in the
- // outputs.
+ // FIXME: Some day in the future it would be nice to allow a "plug-in"
+ // which interprets the inline asm and stores proper results in the
+ // outputs.
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
- for (AsmStmt::const_outputs_iterator OI = A->begin_outputs(),
- OE = A->end_outputs(); OI != OE; ++OI) {
-
- SVal X = state->getSVal(*OI);
- assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
-
- if (isa<Loc>(X))
- state = state->bindLoc(cast<Loc>(X), UnknownVal());
- }
+ for (AsmStmt::const_outputs_iterator OI = A->begin_outputs(),
+ OE = A->end_outputs(); OI != OE; ++OI) {
+ SVal X = state->getSVal(*OI, Pred->getLocationContext());
+ assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
- MakeNode(Dst, A, Pred, state);
- return;
+ if (isa<Loc>(X))
+ state = state->bindLoc(cast<Loc>(X), UnknownVal());
}
- ExplodedNodeSet Tmp;
- Visit(*I, Pred, Tmp);
-
- ++I;
-
- for (ExplodedNodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI!=NE; ++NI)
- VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
+ Bldr.generateNode(A, Pred, state);
}
-
//===----------------------------------------------------------------------===//
// Visualization.
//===----------------------------------------------------------------------===//
@@ -1694,6 +1877,10 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "CallExit";
break;
+ case ProgramPoint::EpsilonKind:
+ Out << "Epsilon Point";
+ break;
+
default: {
if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
const Stmt *S = L->getStmt();
@@ -1811,10 +1998,10 @@ struct DOTGraphTraits<ExplodedNode*> :
}
}
- const ProgramState *state = N->getState();
- Out << "\\|StateID: " << (void*) state
+ ProgramStateRef state = N->getState();
+ Out << "\\|StateID: " << (void*) state.getPtr()
<< " NodeID: " << (void*) N << "\\|";
- state->printDOT(Out, *N->getLocationContext()->getCFG());
+ state->printDOT(Out);
Out << "\\l";
@@ -1852,9 +2039,7 @@ void ExprEngine::ViewGraph(bool trim) {
// Iterate through the reports and get their nodes.
for (BugReporter::EQClasses_iterator
EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
- BugReportEquivClass& EQ = *EI;
- const BugReport &R = **EQ.begin();
- ExplodedNode *N = const_cast<ExplodedNode*>(R.getErrorNode());
+ ExplodedNode *N = const_cast<ExplodedNode*>(EI->begin()->getErrorNode());
if (N) Src.push_back(N);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 68ccc59..93e598a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -13,7 +13,6 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
using namespace clang;
using namespace ento;
@@ -35,38 +34,40 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end();
it != ei; ++it) {
- const ProgramState *state = (*it)->getState();
- SVal LeftV = state->getSVal(LHS);
- SVal RightV = state->getSVal(RHS);
+ ProgramStateRef state = (*it)->getState();
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ SVal LeftV = state->getSVal(LHS, LCtx);
+ SVal RightV = state->getSVal(RHS, LCtx);
BinaryOperator::Opcode Op = B->getOpcode();
if (Op == BO_Assign) {
// EXPERIMENTAL: "Conjured" symbols.
// FIXME: Handle structs.
- if (RightV.isUnknown() ||
- !getConstraintManager().canReasonAbout(RightV)) {
- unsigned Count = Builder->getCurrentBlockCount();
- RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), Count);
+ if (RightV.isUnknown()) {
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx, Count);
}
// Simulate the effects of a "store": bind the value of the RHS
// to the L-Value represented by the LHS.
SVal ExprVal = B->isLValue() ? LeftV : RightV;
- evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, ExprVal), LeftV, RightV);
+ evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, LCtx, ExprVal),
+ LeftV, RightV);
continue;
}
if (!B->isAssignmentOp()) {
+ StmtNodeBuilder Bldr(*it, Tmp2, *currentBuilderContext);
// Process non-assignments except commas or short-circuited
// logical expressions (LAnd and LOr).
SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
if (Result.isUnknown()) {
- MakeNode(Tmp2, B, *it, state);
+ Bldr.generateNode(B, *it, state);
continue;
}
- state = state->BindExpr(B, Result);
- MakeNode(Tmp2, B, *it, state);
+ state = state->BindExpr(B, LCtx, Result);
+ Bldr.generateNode(B, *it, state);
continue;
}
@@ -91,13 +92,14 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// null dereferences, and so on.
ExplodedNodeSet Tmp;
SVal location = LeftV;
- evalLoad(Tmp, LHS, *it, state, location);
+ evalLoad(Tmp, B, LHS, *it, state, location);
for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
++I) {
state = (*I)->getState();
- SVal V = state->getSVal(LHS);
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ SVal V = state->getSVal(LHS, LCtx);
// Get the computation type.
QualType CTy =
@@ -122,16 +124,15 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
SVal LHSVal;
- if (Result.isUnknown() ||
- !getConstraintManager().canReasonAbout(Result)) {
+ if (Result.isUnknown()) {
- unsigned Count = Builder->getCurrentBlockCount();
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
// The symbolic value is actually for the type of the left-hand side
// expression, not the computation type, as this is the value the
// LValue on the LHS will bind to.
- LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LTy,
- Count);
+ LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx,
+ LTy, Count);
// However, we need to convert the symbol to the computation type.
Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
@@ -145,9 +146,9 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
// In C++, assignment and compound assignment operators return an
// lvalue.
if (B->isLValue())
- state = state->BindExpr(B, location);
+ state = state->BindExpr(B, LCtx, location);
else
- state = state->BindExpr(B, Result);
+ state = state->BindExpr(B, LCtx, Result);
evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
}
@@ -165,8 +166,12 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
Pred->getLocationContext());
ExplodedNodeSet Tmp;
- MakeNode(Tmp, BE, Pred, Pred->getState()->BindExpr(BE, V),
- ProgramPoint::PostLValueKind);
+ StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ Bldr.generateNode(BE, Pred,
+ Pred->getState()->BindExpr(BE, Pred->getLocationContext(),
+ V),
+ false, 0,
+ ProgramPoint::PostLValueKind);
// FIXME: Move all post/pre visits to ::Visit().
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
@@ -178,13 +183,13 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
ExplodedNodeSet dstPreStmt;
getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
- if (CastE->getCastKind() == CK_LValueToRValue ||
- CastE->getCastKind() == CK_GetObjCProperty) {
+ if (CastE->getCastKind() == CK_LValueToRValue) {
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I!=E; ++I) {
ExplodedNode *subExprNode = *I;
- const ProgramState *state = subExprNode->getState();
- evalLoad(Dst, CastE, subExprNode, state, state->getSVal(Ex));
+ ProgramStateRef state = subExprNode->getState();
+ const LocationContext *LCtx = subExprNode->getLocationContext();
+ evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
}
return;
}
@@ -196,6 +201,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
T = ExCast->getTypeAsWritten();
+ StmtNodeBuilder Bldr(dstPreStmt, Dst, *currentBuilderContext);
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I != E; ++I) {
@@ -204,10 +210,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
switch (CastE->getCastKind()) {
case CK_LValueToRValue:
llvm_unreachable("LValueToRValue casts handled earlier.");
- case CK_GetObjCProperty:
- llvm_unreachable("GetObjCProperty casts handled earlier.");
case CK_ToVoid:
- Dst.Add(Pred);
continue;
// The analyzer doesn't do anything special with these casts,
// since it understands retain/release semantics already.
@@ -215,14 +218,21 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject: // Fall-through.
+ case CK_CopyAndAutoreleaseBlockObject:
+ // The analyser can ignore atomic casts for now, although some future
+ // checkers may want to make certain that you're not modifying the same
+ // value through atomic and nonatomic pointers.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
// True no-ops.
case CK_NoOp:
case CK_FunctionToPointerDecay: {
// Copy the SVal of Ex to CastE.
- const ProgramState *state = Pred->getState();
- SVal V = state->getSVal(Ex);
- state = state->BindExpr(CastE, V);
- MakeNode(Dst, CastE, Pred, state);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(Ex, LCtx);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
continue;
}
case CK_Dependent:
@@ -254,30 +264,76 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast: {
// Delegate to SValBuilder to process.
- const ProgramState *state = Pred->getState();
- SVal V = state->getSVal(Ex);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(Ex, LCtx);
V = svalBuilder.evalCast(V, T, ExTy);
- state = state->BindExpr(CastE, V);
- MakeNode(Dst, CastE, Pred, state);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
continue;
}
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase: {
// For DerivedToBase cast, delegate to the store manager.
- const ProgramState *state = Pred->getState();
- SVal val = state->getSVal(Ex);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal val = state->getSVal(Ex, LCtx);
val = getStoreManager().evalDerivedToBase(val, T);
- state = state->BindExpr(CastE, val);
- MakeNode(Dst, CastE, Pred, state);
+ state = state->BindExpr(CastE, LCtx, val);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Handle C++ dyn_cast.
+ case CK_Dynamic: {
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal val = state->getSVal(Ex, LCtx);
+
+ // Compute the type of the result.
+ QualType resultType = CastE->getType();
+ if (CastE->isLValue())
+ resultType = getContext().getPointerType(resultType);
+
+ bool Failed = false;
+
+ // Check if the value being cast evaluates to 0.
+ if (val.isZeroConstant())
+ Failed = true;
+ // Else, evaluate the cast.
+ else
+ val = getStoreManager().evalDynamicCast(val, T, Failed);
+
+ if (Failed) {
+ if (T->isReferenceType()) {
+ // A bad_cast exception is thrown if input value is a reference.
+ // Currently, we model this, by generating a sink.
+ Bldr.generateNode(CastE, Pred, state, true);
+ continue;
+ } else {
+ // If the cast fails on a pointer, bind to 0.
+ state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+ }
+ } else {
+ // If we don't know if the cast succeeded, conjure a new symbol.
+ if (val.isUnknown()) {
+ DefinedOrUnknownSVal NewSym = svalBuilder.getConjuredSymbolVal(NULL,
+ CastE, LCtx, resultType,
+ currentBuilderContext->getCurrentBlockCount());
+ state = state->BindExpr(CastE, LCtx, NewSym);
+ } else
+ // Else, bind to the derived region value.
+ state = state->BindExpr(CastE, LCtx, val);
+ }
+ Bldr.generateNode(CastE, Pred, state);
continue;
}
- // Various C++ casts that are not handled yet.
- case CK_Dynamic:
+ // Various C++ casts that are not handled yet.
case CK_ToUnion:
case CK_BaseToDerived:
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
case CK_VectorSplat:
@@ -286,13 +342,12 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
QualType resultType = CastE->getType();
if (CastE->isLValue())
resultType = getContext().getPointerType(resultType);
-
- SVal result =
- svalBuilder.getConjuredSymbolVal(NULL, CastE, resultType,
- Builder->getCurrentBlockCount());
-
- const ProgramState *state = Pred->getState()->BindExpr(CastE, result);
- MakeNode(Dst, CastE, Pred, state);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal result = svalBuilder.getConjuredSymbolVal(NULL, CastE, LCtx,
+ resultType, currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = Pred->getState()->BindExpr(CastE, LCtx,
+ result);
+ Bldr.generateNode(CastE, Pred, state);
continue;
}
}
@@ -302,18 +357,20 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+
const InitListExpr *ILE
= cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
- const ProgramState *state = Pred->getState();
- SVal ILV = state->getSVal(ILE);
+ ProgramStateRef state = Pred->getState();
+ SVal ILV = state->getSVal(ILE, Pred->getLocationContext());
const LocationContext *LC = Pred->getLocationContext();
state = state->bindCompoundLiteral(CL, LC, ILV);
if (CL->isLValue())
- MakeNode(Dst, CL, Pred, state->BindExpr(CL, state->getLValue(CL, LC)));
+ B.generateNode(CL, Pred, state->BindExpr(CL, LC, state->getLValue(CL, LC)));
else
- MakeNode(Dst, CL, Pred, state->BindExpr(CL, ILV));
+ B.generateNode(CL, Pred, state->BindExpr(CL, LC, ILV));
}
void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
@@ -326,29 +383,32 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
// Assumption: The CFG has one DeclStmt per Decl.
const Decl *D = *DS->decl_begin();
- if (!D || !isa<VarDecl>(D))
+ if (!D || !isa<VarDecl>(D)) {
+ //TODO:AZ: remove explicit insertion after refactoring is done.
+ Dst.insert(Pred);
return;
+ }
// FIXME: all pre/post visits should eventually be handled by ::Visit().
ExplodedNodeSet dstPreVisit;
getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
+ StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
const VarDecl *VD = dyn_cast<VarDecl>(D);
-
for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
I!=E; ++I) {
ExplodedNode *N = *I;
- const ProgramState *state = N->getState();
+ ProgramStateRef state = N->getState();
// Decls without InitExpr are not initialized explicitly.
const LocationContext *LC = N->getLocationContext();
if (const Expr *InitEx = VD->getInit()) {
- SVal InitVal = state->getSVal(InitEx);
+ SVal InitVal = state->getSVal(InitEx, Pred->getLocationContext());
// We bound the temp obj region to the CXXConstructExpr. Now recover
// the lazy compound value when the variable is not a reference.
- if (AMgr.getLangOptions().CPlusPlus && VD->getType()->isRecordType() &&
+ if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
!VD->getType()->isReferenceType() && isa<loc::MemRegionVal>(InitVal)){
InitVal = state->getSVal(cast<loc::MemRegionVal>(InitVal).getRegion());
assert(isa<nonloc::LazyCompoundVal>(InitVal));
@@ -356,40 +416,46 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
- if ((InitVal.isUnknown() ||
- !getConstraintManager().canReasonAbout(InitVal)) &&
- !VD->getType()->isReferenceType()) {
- InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx,
- Builder->getCurrentBlockCount());
+ if (InitVal.isUnknown()) {
+ QualType Ty = InitEx->getType();
+ if (InitEx->isLValue()) {
+ Ty = getContext().getPointerType(Ty);
+ }
+
+ InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty,
+ currentBuilderContext->getCurrentBlockCount());
}
-
- evalBind(Dst, DS, N, state->getLValue(VD, LC), InitVal, true);
+ B.takeNodes(N);
+ ExplodedNodeSet Dst2;
+ evalBind(Dst2, DS, N, state->getLValue(VD, LC), InitVal, true);
+ B.addNodes(Dst2);
}
else {
- MakeNode(Dst, DS, N, state->bindDeclWithNoInit(state->getRegion(VD, LC)));
+ B.generateNode(DS, N,state->bindDeclWithNoInit(state->getRegion(VD, LC)));
}
}
}
void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
assert(B->getOpcode() == BO_LAnd ||
B->getOpcode() == BO_LOr);
-
- const ProgramState *state = Pred->getState();
- SVal X = state->getSVal(B);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = state->getSVal(B, LCtx);
assert(X.isUndef());
const Expr *Ex = (const Expr*) cast<UndefinedVal>(X).getData();
assert(Ex);
if (Ex == B->getRHS()) {
- X = state->getSVal(Ex);
+ X = state->getSVal(Ex, LCtx);
// Handle undefined values.
if (X.isUndef()) {
- MakeNode(Dst, B, Pred, state->BindExpr(B, X));
+ Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
return;
}
@@ -401,13 +467,15 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
// value later when necessary. We don't have the machinery in place for
// this right now, and since most logical expressions are used for branches,
// the payoff is not likely to be large. Instead, we do eager evaluation.
- if (const ProgramState *newState = state->assume(XD, true))
- MakeNode(Dst, B, Pred,
- newState->BindExpr(B, svalBuilder.makeIntVal(1U, B->getType())));
+ if (ProgramStateRef newState = state->assume(XD, true))
+ Bldr.generateNode(B, Pred,
+ newState->BindExpr(B, LCtx,
+ svalBuilder.makeIntVal(1U, B->getType())));
- if (const ProgramState *newState = state->assume(XD, false))
- MakeNode(Dst, B, Pred,
- newState->BindExpr(B, svalBuilder.makeIntVal(0U, B->getType())));
+ if (ProgramStateRef newState = state->assume(XD, false))
+ Bldr.generateNode(B, Pred,
+ newState->BindExpr(B, LCtx,
+ svalBuilder.makeIntVal(0U, B->getType())));
}
else {
// We took the LHS expression. Depending on whether we are '&&' or
@@ -415,15 +483,17 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
// the short-circuiting.
X = svalBuilder.makeIntVal(B->getOpcode() == BO_LAnd ? 0U : 1U,
B->getType());
- MakeNode(Dst, B, Pred, state->BindExpr(B, X));
+ Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
}
}
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
QualType T = getContext().getCanonicalType(IE->getType());
unsigned NumInitElements = IE->getNumInits();
@@ -434,24 +504,27 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
SVal V = svalBuilder.makeCompoundVal(T, vals);
- MakeNode(Dst, IE, Pred, state->BindExpr(IE, V));
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
return;
}
for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
ei = IE->rend(); it != ei; ++it) {
- vals = getBasicVals().consVals(state->getSVal(cast<Expr>(*it)), vals);
+ vals = getBasicVals().consVals(state->getSVal(cast<Expr>(*it), LCtx),
+ vals);
}
- MakeNode(Dst, IE, Pred,
- state->BindExpr(IE, svalBuilder.makeCompoundVal(T, vals)));
+ B.generateNode(IE, Pred,
+ state->BindExpr(IE, LCtx,
+ svalBuilder.makeCompoundVal(T, vals)));
return;
}
if (Loc::isLocType(T) || T->isIntegerType()) {
assert(IE->getNumInits() == 1);
const Expr *initEx = IE->getInit(0);
- MakeNode(Dst, IE, Pred, state->BindExpr(IE, state->getSVal(initEx)));
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx,
+ state->getSVal(initEx, LCtx)));
return;
}
@@ -463,33 +536,35 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
const Expr *R,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
- const ProgramState *state = Pred->getState();
- SVal X = state->getSVal(Ex);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = state->getSVal(Ex, LCtx);
assert (X.isUndef());
const Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
assert(SE);
- X = state->getSVal(SE);
+ X = state->getSVal(SE, LCtx);
// Make sure that we invalidate the previous binding.
- MakeNode(Dst, Ex, Pred, state->BindExpr(Ex, X, true));
+ B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, X, true));
}
void ExprEngine::
VisitOffsetOfExpr(const OffsetOfExpr *OOE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
- Expr::EvalResult Res;
- if (OOE->Evaluate(Res, getContext()) && Res.Val.isInt()) {
- const APSInt &IV = Res.Val.getInt();
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ APSInt IV;
+ if (OOE->EvaluateAsInt(IV, getContext())) {
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
assert(OOE->getType()->isIntegerType());
assert(IV.isSigned() == OOE->getType()->isSignedIntegerOrEnumerationType());
SVal X = svalBuilder.makeIntVal(IV);
- MakeNode(Dst, OOE, Pred, Pred->getState()->BindExpr(OOE, X));
- return;
+ B.generateNode(OOE, Pred,
+ Pred->getState()->BindExpr(OOE, Pred->getLocationContext(),
+ X));
}
// FIXME: Handle the case where __builtin_offsetof is not a constant.
- Dst.Add(Pred);
}
@@ -497,6 +572,7 @@ void ExprEngine::
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
QualType T = Ex->getTypeOfArgument();
@@ -506,78 +582,69 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
// FIXME: Add support for VLA type arguments and VLA expressions.
// When that happens, we should probably refactor VLASizeChecker's code.
- Dst.Add(Pred);
return;
}
else if (T->getAs<ObjCObjectType>()) {
// Some code tries to take the sizeof an ObjCObjectType, relying that
// the compiler has laid out its representation. Just report Unknown
// for these.
- Dst.Add(Pred);
return;
}
}
- Expr::EvalResult Result;
- Ex->Evaluate(Result, getContext());
- CharUnits amt = CharUnits::fromQuantity(Result.Val.getInt().getZExtValue());
+ APSInt Value = Ex->EvaluateKnownConstInt(getContext());
+ CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
- const ProgramState *state = Pred->getState();
- state = state->BindExpr(Ex, svalBuilder.makeIntVal(amt.getQuantity(),
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(Ex, Pred->getLocationContext(),
+ svalBuilder.makeIntVal(amt.getQuantity(),
Ex->getType()));
- MakeNode(Dst, Ex, Pred, state);
+ Bldr.generateNode(Ex, Pred, state);
}
void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
ExplodedNode *Pred,
- ExplodedNodeSet &Dst) {
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
switch (U->getOpcode()) {
- default:
+ default: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet Tmp;
+ VisitIncrementDecrementOperator(U, Pred, Tmp);
+ Bldr.addNodes(Tmp);
+ }
break;
case UO_Real: {
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
-
- // FIXME: We don't have complex SValues yet.
- if (Ex->getType()->isAnyComplexType()) {
- // Just report "Unknown."
- Dst.Add(*I);
- continue;
- }
- // For all other types, UO_Real is an identity operation.
- assert (U->getType() == Ex->getType());
- const ProgramState *state = (*I)->getState();
- MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
}
-
- return;
+
+ // For all other types, UO_Real is an identity operation.
+ assert (U->getType() == Ex->getType());
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
}
- case UO_Imag: {
-
+ case UO_Imag: {
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- // FIXME: We don't have complex SValues yet.
- if (Ex->getType()->isAnyComplexType()) {
- // Just report "Unknown."
- Dst.Add(*I);
- continue;
- }
-
- // For all other types, UO_Imag returns 0.
- const ProgramState *state = (*I)->getState();
- SVal X = svalBuilder.makeZeroVal(Ex->getType());
- MakeNode(Dst, U, *I, state->BindExpr(U, X));
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
}
-
- return;
+ // For all other types, UO_Imag returns 0.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = svalBuilder.makeZeroVal(Ex->getType());
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, X));
+ break;
}
case UO_Plus:
@@ -586,22 +653,19 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
case UO_Deref:
case UO_AddrOf:
case UO_Extension: {
-
+ // FIXME: We can probably just have some magic in Environment::getSVal()
+ // that propagates values, instead of creating a new node here.
+ //
// Unary "+" is a no-op, similar to a parentheses. We still have places
// where it may be a block-level expression, so we need to
// generate an extra node that just propagates the value of the
- // subexpression.
-
+ // subexpression.
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- const ProgramState *state = (*I)->getState();
- MakeNode(Dst, U, *I, state->BindExpr(U, state->getSVal(Ex)));
- }
-
- return;
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
}
case UO_LNot:
@@ -609,144 +673,139 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
case UO_Not: {
assert (!U->isLValue());
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
-
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
- const ProgramState *state = (*I)->getState();
-
- // Get the value of the subexpression.
- SVal V = state->getSVal(Ex);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
- if (V.isUnknownOrUndef()) {
- MakeNode(Dst, U, *I, state->BindExpr(U, V));
- continue;
- }
+ // Get the value of the subexpression.
+ SVal V = state->getSVal(Ex, LCtx);
- switch (U->getOpcode()) {
- default:
- llvm_unreachable("Invalid Opcode.");
-
- case UO_Not:
- // FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, evalComplement(cast<NonLoc>(V)));
- break;
-
- case UO_Minus:
- // FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, evalMinus(cast<NonLoc>(V)));
- break;
-
- case UO_LNot:
-
- // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
- //
- // Note: technically we do "E == 0", but this is the same in the
- // transfer functions as "0 == E".
- SVal Result;
-
- if (isa<Loc>(V)) {
- Loc X = svalBuilder.makeNull();
- Result = evalBinOp(state, BO_EQ, cast<Loc>(V), X,
- U->getType());
- }
- else {
- nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
- Result = evalBinOp(state, BO_EQ, cast<NonLoc>(V), X,
- U->getType());
- }
-
- state = state->BindExpr(U, Result);
-
- break;
- }
+ if (V.isUnknownOrUndef()) {
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, V));
+ break;
+ }
- MakeNode(Dst, U, *I, state);
+ switch (U->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid Opcode.");
+ case UO_Not:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalComplement(cast<NonLoc>(V)));
+ break;
+ case UO_Minus:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalMinus(cast<NonLoc>(V)));
+ break;
+ case UO_LNot:
+ // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+ //
+ // Note: technically we do "E == 0", but this is the same in the
+ // transfer functions as "0 == E".
+ SVal Result;
+ if (isa<Loc>(V)) {
+ Loc X = svalBuilder.makeNull();
+ Result = evalBinOp(state, BO_EQ, cast<Loc>(V), X,
+ U->getType());
+ }
+ else {
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+ Result = evalBinOp(state, BO_EQ, cast<NonLoc>(V), X,
+ U->getType());
+ }
+
+ state = state->BindExpr(U, LCtx, Result);
+ break;
}
-
- return;
+ Bldr.generateNode(U, Pred, state);
+ break;
}
}
-
+
+}
+
+void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
// Handle ++ and -- (both pre- and post-increment).
assert (U->isIncrementDecrementOp());
- ExplodedNodeSet Tmp;
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- Visit(Ex, Pred, Tmp);
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState();
+ SVal loc = state->getSVal(Ex, LCtx);
+
+ // Perform a load.
+ ExplodedNodeSet Tmp;
+ evalLoad(Tmp, U, Ex, Pred, state, loc);
+
+ ExplodedNodeSet Dst2;
+ StmtNodeBuilder Bldr(Tmp, Dst2, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
- const ProgramState *state = (*I)->getState();
- SVal loc = state->getSVal(Ex);
+ state = (*I)->getState();
+ assert(LCtx == (*I)->getLocationContext());
+ SVal V2_untested = state->getSVal(Ex, LCtx);
- // Perform a load.
- ExplodedNodeSet Tmp2;
- evalLoad(Tmp2, Ex, *I, state, loc);
+ // Propagate unknown and undefined values.
+ if (V2_untested.isUnknownOrUndef()) {
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V2_untested));
+ continue;
+ }
+ DefinedSVal V2 = cast<DefinedSVal>(V2_untested);
- for (ExplodedNodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end();I2!=E2;++I2) {
-
- state = (*I2)->getState();
- SVal V2_untested = state->getSVal(Ex);
-
- // Propagate unknown and undefined values.
- if (V2_untested.isUnknownOrUndef()) {
- MakeNode(Dst, U, *I2, state->BindExpr(U, V2_untested));
- continue;
- }
- DefinedSVal V2 = cast<DefinedSVal>(V2_untested);
-
- // Handle all other values.
- BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add
- : BO_Sub;
-
- // If the UnaryOperator has non-location type, use its type to create the
- // constant value. If the UnaryOperator has location type, create the
- // constant with int type and pointer width.
- SVal RHS;
-
- if (U->getType()->isAnyPointerType())
- RHS = svalBuilder.makeArrayIndex(1);
- else
- RHS = svalBuilder.makeIntVal(1, U->getType());
-
- SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
-
- // Conjure a new symbol if necessary to recover precision.
- if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
- DefinedOrUnknownSVal SymVal =
- svalBuilder.getConjuredSymbolVal(NULL, Ex,
- Builder->getCurrentBlockCount());
- Result = SymVal;
+ // Handle all other values.
+ BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add : BO_Sub;
+
+ // If the UnaryOperator has non-location type, use its type to create the
+ // constant value. If the UnaryOperator has location type, create the
+ // constant with int type and pointer width.
+ SVal RHS;
+
+ if (U->getType()->isAnyPointerType())
+ RHS = svalBuilder.makeArrayIndex(1);
+ else
+ RHS = svalBuilder.makeIntVal(1, U->getType());
+
+ SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
+
+ // Conjure a new symbol if necessary to recover precision.
+ if (Result.isUnknown()){
+ DefinedOrUnknownSVal SymVal =
+ svalBuilder.getConjuredSymbolVal(NULL, Ex, LCtx,
+ currentBuilderContext->getCurrentBlockCount());
+ Result = SymVal;
+
+ // If the value is a location, ++/-- should always preserve
+ // non-nullness. Check if the original value was non-null, and if so
+ // propagate that constraint.
+ if (Loc::isLocType(U->getType())) {
+ DefinedOrUnknownSVal Constraint =
+ svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
- // If the value is a location, ++/-- should always preserve
- // non-nullness. Check if the original value was non-null, and if so
- // propagate that constraint.
- if (Loc::isLocType(U->getType())) {
- DefinedOrUnknownSVal Constraint =
- svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
+ if (!state->assume(Constraint, true)) {
+ // It isn't feasible for the original value to be null.
+ // Propagate this constraint.
+ Constraint = svalBuilder.evalEQ(state, SymVal,
+ svalBuilder.makeZeroVal(U->getType()));
- if (!state->assume(Constraint, true)) {
- // It isn't feasible for the original value to be null.
- // Propagate this constraint.
- Constraint = svalBuilder.evalEQ(state, SymVal,
- svalBuilder.makeZeroVal(U->getType()));
-
-
- state = state->assume(Constraint, false);
- assert(state);
- }
+
+ state = state->assume(Constraint, false);
+ assert(state);
}
}
-
- // Since the lvalue-to-rvalue conversion is explicit in the AST,
- // we bind an l-value if the operator is prefix and an lvalue (in C++).
- if (U->isLValue())
- state = state->BindExpr(U, loc);
- else
- state = state->BindExpr(U, U->isPostfix() ? V2 : Result);
-
- // Perform the store.
- evalStore(Dst, NULL, U, *I2, state, loc, Result);
}
+
+ // Since the lvalue-to-rvalue conversion is explicit in the AST,
+ // we bind an l-value if the operator is prefix and an lvalue (in C++).
+ if (U->isLValue())
+ state = state->BindExpr(U, LCtx, loc);
+ else
+ state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
+ Bldr.takeNodes(*I);
+ ExplodedNodeSet Dst3;
+ evalStore(Dst3, U, U, *I, state, loc, Result);
+ Bldr.addNodes(Dst3);
}
+ Dst.insert(Dst2);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index acb0074..a14a491 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -16,81 +16,11 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
using namespace clang;
using namespace ento;
-namespace {
-class CallExprWLItem {
-public:
- CallExpr::const_arg_iterator I;
- ExplodedNode *N;
-
- CallExprWLItem(const CallExpr::const_arg_iterator &i, ExplodedNode *n)
- : I(i), N(n) {}
-};
-}
-
-void ExprEngine::evalArguments(ConstExprIterator AI, ConstExprIterator AE,
- const FunctionProtoType *FnType,
- ExplodedNode *Pred, ExplodedNodeSet &Dst,
- bool FstArgAsLValue) {
-
-
- SmallVector<CallExprWLItem, 20> WorkList;
- WorkList.reserve(AE - AI);
- WorkList.push_back(CallExprWLItem(AI, Pred));
-
- while (!WorkList.empty()) {
- CallExprWLItem Item = WorkList.back();
- WorkList.pop_back();
-
- if (Item.I == AE) {
- Dst.insert(Item.N);
- continue;
- }
-
- // Evaluate the argument.
- ExplodedNodeSet Tmp;
- if (FstArgAsLValue) {
- FstArgAsLValue = false;
- }
-
- Visit(*Item.I, Item.N, Tmp);
- ++(Item.I);
- for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI)
- WorkList.push_back(CallExprWLItem(Item.I, *NI));
- }
-}
-
-void ExprEngine::evalCallee(const CallExpr *callExpr,
- const ExplodedNodeSet &src,
- ExplodedNodeSet &dest) {
-
- const Expr *callee = 0;
-
- switch (callExpr->getStmtClass()) {
- case Stmt::CXXMemberCallExprClass: {
- // Evaluate the implicit object argument that is the recipient of the
- // call.
- callee = cast<CXXMemberCallExpr>(callExpr)->getImplicitObjectArgument();
-
- // FIXME: handle member pointers.
- if (!callee)
- return;
-
- break;
- }
- default: {
- callee = callExpr->getCallee()->IgnoreParens();
- break;
- }
- }
-
- for (ExplodedNodeSet::iterator i = src.begin(), e = src.end(); i != e; ++i)
- Visit(callee, *i, dest);
-}
-
const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXRecordDecl *D,
const StackFrameContext *SFC) {
const Type *T = D->getTypeForDecl();
@@ -107,19 +37,26 @@ const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXMethodDecl *decl,
void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens();
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
// Bind the temporary object to the value of the expression. Then bind
// the expression to the location of the object.
- SVal V = state->getSVal(tempExpr);
+ SVal V = state->getSVal(tempExpr, Pred->getLocationContext());
const MemRegion *R =
- svalBuilder.getRegionManager().getCXXTempObjectRegion(ME,
- Pred->getLocationContext());
+ svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx);
state = state->bindLoc(loc::MemRegionVal(R), V);
- MakeNode(Dst, ME, Pred, state->BindExpr(ME, loc::MemRegionVal(R)));
+ Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, loc::MemRegionVal(R)));
+}
+
+void ExprEngine::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ VisitCXXConstructExpr(expr, 0, Pred, Dst);
}
void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
@@ -127,8 +64,10 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
+#if 0
const CXXConstructorDecl *CD = E->getConstructor();
assert(CD);
+#endif
#if 0
if (!(CD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
@@ -136,26 +75,19 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
return;
#endif
- // Evaluate other arguments.
- ExplodedNodeSet argsEvaluated;
- const FunctionProtoType *FnType = CD->getType()->getAs<FunctionProtoType>();
- evalArguments(E->arg_begin(), E->arg_end(), FnType, Pred, argsEvaluated);
-
#if 0
// Is the constructor elidable?
if (E->isElidable()) {
- VisitAggExpr(E->getArg(0), destNodes, Pred, Dst);
- // FIXME: this is here to force propagation if VisitAggExpr doesn't
- if (destNodes.empty())
- destNodes.Add(Pred);
+ destNodes.Add(Pred);
return;
}
#endif
// Perform the previsit of the constructor.
- ExplodedNodeSet destPreVisit;
- getCheckerManager().runCheckersForPreStmt(destPreVisit, argsEvaluated, E,
- *this);
+ ExplodedNodeSet SrcNodes;
+ SrcNodes.Add(Pred);
+ ExplodedNodeSet TmpNodes;
+ getCheckerManager().runCheckersForPreStmt(TmpNodes, SrcNodes, E, *this);
// Evaluate the constructor. Currently we don't now allow checker-specific
// implementations of specific constructors (as we do with ordinary
@@ -174,7 +106,8 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
// parameter region.
const StackFrameContext *SFC =
AMgr.getStackFrame(CD, Pred->getLocationContext(),
- E, Builder->getBlock(), Builder->getIndex());
+ E, currentBuilderContext->getBlock(),
+ currentStmtIdx);
// Create the 'this' region.
const CXXThisRegion *ThisR =
@@ -182,34 +115,33 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
CallEnter Loc(E, SFC, Pred->getLocationContext());
-
- for (ExplodedNodeSet::iterator NI = argsEvaluated.begin(),
- NE = argsEvaluated.end(); NI != NE; ++NI) {
- const ProgramState *state = (*NI)->getState();
+ StmtNodeBuilder Bldr(SrcNodes, TmpNodes, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator NI = SrcNodes.begin(),
+ NE = SrcNodes.end(); NI != NE; ++NI) {
+ ProgramStateRef state = (*NI)->getState();
// Setup 'this' region, so that the ctor is evaluated on the object pointed
// by 'Dest'.
state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
- if (ExplodedNode *N = Builder->generateNode(Loc, state, *NI))
- destNodes.Add(N);
+ Bldr.generateNode(Loc, *NI, state);
}
}
#endif
// Default semantics: invalidate all regions passed as arguments.
ExplodedNodeSet destCall;
-
- for (ExplodedNodeSet::iterator
- i = destPreVisit.begin(), e = destPreVisit.end();
- i != e; ++i)
{
- ExplodedNode *Pred = *i;
- const LocationContext *LC = Pred->getLocationContext();
- const ProgramState *state = Pred->getState();
+ StmtNodeBuilder Bldr(TmpNodes, destCall, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator i = TmpNodes.begin(), e = TmpNodes.end();
+ i != e; ++i)
+ {
+ ExplodedNode *Pred = *i;
+ const LocationContext *LC = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState();
- state = invalidateArguments(state, CallOrObjCMessage(E, state), LC);
- Builder->MakeNode(destCall, E, Pred, state);
+ state = invalidateArguments(state, CallOrObjCMessage(E, state, LC), LC);
+ Bldr.generateNode(E, Pred, state);
+ }
}
-
// Do the post visit.
getCheckerManager().runCheckersForPostStmt(destNodes, destCall, E, *this);
}
@@ -219,31 +151,33 @@ void ExprEngine::VisitCXXDestructor(const CXXDestructorDecl *DD,
const Stmt *S,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
if (!(DD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
return;
+
// Create the context for 'this' region.
- const StackFrameContext *SFC = AMgr.getStackFrame(DD,
- Pred->getLocationContext(),
- S, Builder->getBlock(),
- Builder->getIndex());
+ const StackFrameContext *SFC =
+ AnalysisDeclContexts.getContext(DD)->
+ getStackFrame(Pred->getLocationContext(), S,
+ currentBuilderContext->getBlock(), currentStmtIdx);
const CXXThisRegion *ThisR = getCXXThisRegion(DD->getParent(), SFC);
CallEnter PP(S, SFC, Pred->getLocationContext());
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
- ExplodedNode *N = Builder->generateNode(PP, state, Pred);
- if (N)
- Dst.Add(N);
+ Bldr.generateNode(PP, Pred, state);
}
void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
- unsigned blockCount = Builder->getCurrentBlockCount();
+ unsigned blockCount = currentBuilderContext->getCurrentBlockCount();
+ const LocationContext *LCtx = Pred->getLocationContext();
DefinedOrUnknownSVal symVal =
- svalBuilder.getConjuredSymbolVal(NULL, CNE, CNE->getType(), blockCount);
+ svalBuilder.getConjuredSymbolVal(NULL, CNE, LCtx, CNE->getType(), blockCount);
const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
const ElementRegion *EleReg =
@@ -252,26 +186,31 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
if (CNE->isArray()) {
// FIXME: allocating an array requires simulating the constructors.
// For now, just return a symbolicated region.
- const ProgramState *state = Pred->getState();
- state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
- MakeNode(Dst, CNE, Pred, state);
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(CNE, Pred->getLocationContext(),
+ loc::MemRegionVal(EleReg));
+ Bldr.generateNode(CNE, Pred, state);
return;
}
+ // FIXME: Update for AST changes.
+#if 0
// Evaluate constructor arguments.
const FunctionProtoType *FnType = NULL;
const CXXConstructorDecl *CD = CNE->getConstructor();
if (CD)
FnType = CD->getType()->getAs<FunctionProtoType>();
ExplodedNodeSet argsEvaluated;
+ Bldr.takeNodes(Pred);
evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
FnType, Pred, argsEvaluated);
+ Bldr.addNodes(argsEvaluated);
// Initialize the object region and bind the 'new' expression.
for (ExplodedNodeSet::iterator I = argsEvaluated.begin(),
E = argsEvaluated.end(); I != E; ++I) {
- const ProgramState *state = (*I)->getState();
+ ProgramStateRef state = (*I)->getState();
// Accumulate list of regions that are invalidated.
// FIXME: Eventually we should unify the logic for constructor
@@ -281,7 +220,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ai = CNE->constructor_arg_begin(), ae = CNE->constructor_arg_end();
ai != ae; ++ai)
{
- SVal val = state->getSVal(*ai);
+ SVal val = state->getSVal(*ai, (*I)->getLocationContext());
if (const MemRegion *region = val.getAsRegion())
regionsToInvalidate.push_back(region);
}
@@ -289,18 +228,21 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
if (ObjTy->isRecordType()) {
regionsToInvalidate.push_back(EleReg);
// Invalidate the regions.
+ // TODO: Pass the call to new information as the last argument, to limit
+ // the globals which will get invalidated.
state = state->invalidateRegions(regionsToInvalidate,
- CNE, blockCount, 0,
- /* invalidateGlobals = */ true);
+ CNE, blockCount, 0, 0);
} else {
// Invalidate the regions.
+ // TODO: Pass the call to new information as the last argument, to limit
+ // the globals which will get invalidated.
state = state->invalidateRegions(regionsToInvalidate,
- CNE, blockCount, 0,
- /* invalidateGlobals = */ true);
+ CNE, blockCount, 0, 0);
if (CNE->hasInitializer()) {
- SVal V = state->getSVal(*CNE->constructor_arg_begin());
+ SVal V = state->getSVal(*CNE->constructor_arg_begin(),
+ (*I)->getLocationContext());
state = state->bindLoc(loc::MemRegionVal(EleReg), V);
} else {
// Explicitly set to undefined, because currently we retrieve symbolic
@@ -308,32 +250,51 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
}
}
- state = state->BindExpr(CNE, loc::MemRegionVal(EleReg));
- MakeNode(Dst, CNE, *I, state);
+ state = state->BindExpr(CNE, (*I)->getLocationContext(),
+ loc::MemRegionVal(EleReg));
+ Bldr.generateNode(CNE, *I, state);
}
+#endif
}
void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
- ExplodedNode *Pred,ExplodedNodeSet &Dst) {
- // Should do more checking.
- ExplodedNodeSet Argevaluated;
- Visit(CDE->getArgument(), Pred, Argevaluated);
- for (ExplodedNodeSet::iterator I = Argevaluated.begin(),
- E = Argevaluated.end(); I != E; ++I) {
- const ProgramState *state = (*I)->getState();
- MakeNode(Dst, CDE, *I, state);
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(CDE, Pred, state);
+}
+
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const VarDecl *VD = CS->getExceptionDecl();
+ if (!VD) {
+ Dst.Add(Pred);
+ return;
}
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = svalBuilder.getConjuredSymbolVal(CS, LCtx, VD->getType(),
+ currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = Pred->getState();
+ state = state->bindLoc(state->getLValue(VD, LCtx), V);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ Bldr.generateNode(CS, Pred, state);
}
void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
// Get the this object region from StoreManager.
+ const LocationContext *LCtx = Pred->getLocationContext();
const MemRegion *R =
svalBuilder.getRegionManager().getCXXThisRegion(
getContext().getCanonicalType(TE->getType()),
- Pred->getLocationContext());
+ LCtx);
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
SVal V = state->getSVal(loc::MemRegionVal(R));
- MakeNode(Dst, TE, Pred, state->BindExpr(TE, V));
+ Bldr.generateNode(TE, Pred, state->BindExpr(TE, LCtx, V));
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 6d377b9..b99bd54 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -15,40 +15,72 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace ento;
-namespace {
- // Trait class for recording returned expression in the state.
- struct ReturnExpr {
- static int TagInt;
- typedef const Stmt *data_type;
- };
- int ReturnExpr::TagInt;
+void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
+ // Get the entry block in the CFG of the callee.
+ const StackFrameContext *calleeCtx = CE.getCalleeContext();
+ const CFG *CalleeCFG = calleeCtx->getCFG();
+ const CFGBlock *Entry = &(CalleeCFG->getEntry());
+
+ // Validate the CFG.
+ assert(Entry->empty());
+ assert(Entry->succ_size() == 1);
+
+ // Get the solitary sucessor.
+ const CFGBlock *Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the callee.
+ BlockEdge Loc(Entry, Succ, calleeCtx);
+
+ // Construct a new state which contains the mapping from actual to
+ // formal arguments.
+ const LocationContext *callerCtx = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState()->enterStackFrame(callerCtx,
+ calleeCtx);
+
+ // Construct a new node and add it to the worklist.
+ bool isNew;
+ ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
+ Node->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(Node);
}
-void ExprEngine::processCallEnter(CallEnterNodeBuilder &B) {
- const ProgramState *state =
- B.getState()->enterStackFrame(B.getCalleeContext());
- B.generateNode(state);
+static const ReturnStmt *getReturnStmt(const ExplodedNode *Node) {
+ while (Node) {
+ const ProgramPoint &PP = Node->getLocation();
+ // Skip any BlockEdges.
+ if (isa<BlockEdge>(PP) || isa<CallExit>(PP)) {
+ assert(Node->pred_size() == 1);
+ Node = *Node->pred_begin();
+ continue;
+ }
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
+ const Stmt *S = SP->getStmt();
+ return dyn_cast<ReturnStmt>(S);
+ }
+ break;
+ }
+ return 0;
}
-void ExprEngine::processCallExit(CallExitNodeBuilder &B) {
- const ProgramState *state = B.getState();
- const ExplodedNode *Pred = B.getPredecessor();
+void ExprEngine::processCallExit(ExplodedNode *Pred) {
+ ProgramStateRef state = Pred->getState();
const StackFrameContext *calleeCtx =
- cast<StackFrameContext>(Pred->getLocationContext());
+ Pred->getLocationContext()->getCurrentStackFrame();
+ const LocationContext *callerCtx = calleeCtx->getParent();
const Stmt *CE = calleeCtx->getCallSite();
// If the callee returns an expression, bind its value to CallExpr.
- const Stmt *ReturnedExpr = state->get<ReturnExpr>();
- if (ReturnedExpr) {
- SVal RetVal = state->getSVal(ReturnedExpr);
- state = state->BindExpr(CE, RetVal);
- // Clear the return expr GDM.
- state = state->remove<ReturnExpr>();
+ if (const ReturnStmt *RS = getReturnStmt(Pred)) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(RS, LCtx);
+ state = state->BindExpr(CE, callerCtx, V);
}
// Bind the constructed object value to CXXConstructExpr.
@@ -58,14 +90,197 @@ void ExprEngine::processCallExit(CallExitNodeBuilder &B) {
SVal ThisV = state->getSVal(ThisR);
// Always bind the region to the CXXConstructExpr.
- state = state->BindExpr(CCE, ThisV);
+ state = state->BindExpr(CCE, Pred->getLocationContext(), ThisV);
}
- B.generateNode(state);
+ static SimpleProgramPointTag returnTag("ExprEngine : Call Return");
+ PostStmt Loc(CE, callerCtx, &returnTag);
+ bool isNew;
+ ExplodedNode *N = G.getNode(Loc, state, false, &isNew);
+ N->addPredecessor(Pred, G);
+ if (!isNew)
+ return;
+
+ // Perform the post-condition check of the CallExpr.
+ ExplodedNodeSet Dst;
+ NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), N);
+ SaveAndRestore<const NodeBuilderContext*> NBCSave(currentBuilderContext,
+ &Ctx);
+ SaveAndRestore<unsigned> CBISave(currentStmtIdx, calleeCtx->getIndex());
+
+ getCheckerManager().runCheckersForPostStmt(Dst, N, CE, *this,
+ /* wasInlined */ true);
+
+ // Enqueue the next element in the block.
+ for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I) {
+ Engine.getWorkList()->enqueue(*I,
+ calleeCtx->getCallSiteBlock(),
+ calleeCtx->getIndex()+1);
+ }
}
-const ProgramState *
-ExprEngine::invalidateArguments(const ProgramState *State,
+static unsigned getNumberStackFrames(const LocationContext *LCtx) {
+ unsigned count = 0;
+ while (LCtx) {
+ if (isa<StackFrameContext>(LCtx))
+ ++count;
+ LCtx = LCtx->getParent();
+ }
+ return count;
+}
+
+// Determine if we should inline the call.
+bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+
+ if (getNumberStackFrames(Pred->getLocationContext())
+ == AMgr.InlineMaxStackDepth)
+ return false;
+
+ if (Engine.FunctionSummaries->hasReachedMaxBlockCount(FD))
+ return false;
+
+ if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
+ return false;
+
+ return true;
+}
+
+// For now, skip inlining variadic functions.
+// We also don't inline blocks.
+static bool shouldInlineCallExpr(const CallExpr *CE, ExprEngine *E) {
+ if (!E->getAnalysisManager().shouldInlineCall())
+ return false;
+ QualType callee = CE->getCallee()->getType();
+ const FunctionProtoType *FT = 0;
+ if (const PointerType *PT = callee->getAs<PointerType>())
+ FT = dyn_cast<FunctionProtoType>(PT->getPointeeType());
+ else if (const BlockPointerType *BT = callee->getAs<BlockPointerType>()) {
+ // FIXME: inline blocks.
+ // FT = dyn_cast<FunctionProtoType>(BT->getPointeeType());
+ (void) BT;
+ return false;
+ }
+ // If we have no prototype, assume the function is okay.
+ if (!FT)
+ return true;
+
+ // Skip inlining of variadic functions.
+ return !FT->isVariadic();
+}
+
+bool ExprEngine::InlineCall(ExplodedNodeSet &Dst,
+ const CallExpr *CE,
+ ExplodedNode *Pred) {
+ if (!shouldInlineCallExpr(CE, this))
+ return false;
+
+ ProgramStateRef state = Pred->getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionDecl *FD =
+ state->getSVal(Callee, Pred->getLocationContext()).getAsFunctionDecl();
+ if (!FD || !FD->hasBody(FD))
+ return false;
+
+ switch (CE->getStmtClass()) {
+ default:
+ // FIXME: Handle C++.
+ break;
+ case Stmt::CallExprClass: {
+ if (!shouldInlineDecl(FD, Pred))
+ return false;
+
+ // Construct a new stack frame for the callee.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
+ const StackFrameContext *CallerSFC =
+ Pred->getLocationContext()->getCurrentStackFrame();
+ const StackFrameContext *CalleeSFC =
+ CalleeADC->getStackFrame(CallerSFC, CE,
+ currentBuilderContext->getBlock(),
+ currentStmtIdx);
+
+ CallEnter Loc(CE, CalleeSFC, Pred->getLocationContext());
+ bool isNew;
+ if (ExplodedNode *N = G.getNode(Loc, state, false, &isNew)) {
+ N->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(N);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool isPointerToConst(const ParmVarDecl *ParamDecl) {
+ QualType PointeeTy = ParamDecl->getOriginalType()->getPointeeType();
+ if (PointeeTy != QualType() && PointeeTy.isConstQualified() &&
+ !PointeeTy->isAnyPointerType() && !PointeeTy->isReferenceType()) {
+ return true;
+ }
+ return false;
+}
+
+// Try to retrieve the function declaration and find the function parameter
+// types which are pointers/references to a non-pointer const.
+// We do not invalidate the corresponding argument regions.
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
+ const CallOrObjCMessage &Call) {
+ const Decl *CallDecl = Call.getDecl();
+ if (!CallDecl)
+ return;
+
+ if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(CallDecl)) {
+ const IdentifierInfo *II = FDecl->getIdentifier();
+
+ // List the cases, where the region should be invalidated even if the
+ // argument is const.
+ if (II) {
+ StringRef FName = II->getName();
+ // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
+ // value into thread local storage. The value can later be retrieved with
+ // 'void *ptheread_getspecific(pthread_key)'. So even thought the
+ // parameter is 'const void *', the region escapes through the call.
+ // - funopen - sets a buffer for future IO calls.
+ // - ObjC functions that end with "NoCopy" can free memory, of the passed
+ // in buffer.
+ // - Many CF containers allow objects to escape through custom
+ // allocators/deallocators upon container construction.
+ // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName == "pthread_setspecific" ||
+ FName == "funopen" ||
+ FName.endswith("NoCopy") ||
+ (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) ||
+ Call.isCFCGAllowingEscape(FName))
+ return;
+ }
+
+ for (unsigned Idx = 0, E = Call.getNumArgs(); Idx != E; ++Idx) {
+ if (FDecl && Idx < FDecl->getNumParams()) {
+ if (isPointerToConst(FDecl->getParamDecl(Idx)))
+ PreserveArgs.insert(Idx);
+ }
+ }
+ return;
+ }
+
+ if (const ObjCMethodDecl *MDecl = dyn_cast<ObjCMethodDecl>(CallDecl)) {
+ assert(MDecl->param_size() <= Call.getNumArgs());
+ unsigned Idx = 0;
+ for (clang::ObjCMethodDecl::param_const_iterator
+ I = MDecl->param_begin(), E = MDecl->param_end(); I != E; ++I, ++Idx) {
+ if (isPointerToConst(*I))
+ PreserveArgs.insert(Idx);
+ }
+ return;
+ }
+}
+
+ProgramStateRef
+ExprEngine::invalidateArguments(ProgramStateRef State,
const CallOrObjCMessage &Call,
const LocationContext *LC) {
SmallVector<const MemRegion *, 8> RegionsToInvalidate;
@@ -85,13 +300,21 @@ ExprEngine::invalidateArguments(const ProgramState *State,
} else if (Call.isFunctionCall()) {
// Block calls invalidate all captured-by-reference values.
- if (const MemRegion *Callee = Call.getFunctionCallee().getAsRegion()) {
+ SVal CalleeVal = Call.getFunctionCallee();
+ if (const MemRegion *Callee = CalleeVal.getAsRegion()) {
if (isa<BlockDataRegion>(Callee))
RegionsToInvalidate.push_back(Callee);
}
}
+ // Indexes of arguments whose values will be preserved by the call.
+ llvm::SmallSet<unsigned, 1> PreserveArgs;
+ findPtrToConstParams(PreserveArgs, Call);
+
for (unsigned idx = 0, e = Call.getNumArgs(); idx != e; ++idx) {
+ if (PreserveArgs.count(idx))
+ continue;
+
SVal V = Call.getArgSVal(idx);
// If we are passing a location wrapped as an integer, unwrap it and
@@ -105,7 +328,7 @@ ExprEngine::invalidateArguments(const ProgramState *State,
// Invalidate the value of the variable passed by reference.
// Are we dealing with an ElementRegion? If the element type is
- // a basic integer type (e.g., char, int) and the underying region
+ // a basic integer type (e.g., char, int) and the underlying region
// is a variable region then strip off the ElementRegion.
// FIXME: We really need to think about this for the general case
// as sometimes we are reasoning about arrays and other times
@@ -116,7 +339,7 @@ ExprEngine::invalidateArguments(const ProgramState *State,
// we'll leave it in for now until we have a systematic way of
// handling all of these cases. Eventually we need to come up
// with an interface to StoreManager so that this logic can be
- // approriately delegated to the respective StoreManagers while
+ // appropriately delegated to the respective StoreManagers while
// still allowing us to do checker-specific logic (e.g.,
// invalidating reference counts), probably via callbacks.
if (ER->getElementType()->isIntegralOrEnumerationType()) {
@@ -146,18 +369,29 @@ ExprEngine::invalidateArguments(const ProgramState *State,
// to identify conjured symbols by an expression pair: the enclosing
// expression (the context) and the expression itself. This should
// disambiguate conjured symbols.
- assert(Builder && "Invalidating arguments outside of a statement context");
- unsigned Count = Builder->getCurrentBlockCount();
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
StoreManager::InvalidatedSymbols IS;
// NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
// global variables.
return State->invalidateRegions(RegionsToInvalidate,
- Call.getOriginExpr(), Count,
- &IS, doesInvalidateGlobals(Call));
+ Call.getOriginExpr(), Count, LC,
+ &IS, &Call);
}
+static ProgramStateRef getReplayWithoutInliningState(ExplodedNode *&N,
+ const CallExpr *CE) {
+ void *ReplayState = N->getState()->get<ReplayWithoutInlining>();
+ if (!ReplayState)
+ return 0;
+ const CallExpr *ReplayCE = reinterpret_cast<const CallExpr*>(ReplayState);
+ if (CE == ReplayCE) {
+ return N->getState()->remove<ReplayWithoutInlining>();
+ }
+ return 0;
+}
+
void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
ExplodedNodeSet &dst) {
// Perform the previsit of the CallExpr.
@@ -173,20 +407,21 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
DefaultEval(ExprEngine &eng, const CallExpr *ce)
: Eng(eng), CE(ce) {}
virtual void expandGraph(ExplodedNodeSet &Dst, ExplodedNode *Pred) {
- // Should we inline the call?
- if (Eng.getAnalysisManager().shouldInlineCall() &&
- Eng.InlineCall(Dst, CE, Pred)) {
+
+ ProgramStateRef state = getReplayWithoutInliningState(Pred, CE);
+
+ // First, try to inline the call.
+ if (state == 0 && Eng.InlineCall(Dst, CE, Pred))
return;
- }
// First handle the return value.
- StmtNodeBuilder &Builder = Eng.getBuilder();
- assert(&Builder && "StmtNodeBuilder must be defined.");
+ StmtNodeBuilder Bldr(Pred, Dst, *Eng.currentBuilderContext);
// Get the callee.
const Expr *Callee = CE->getCallee()->IgnoreParens();
- const ProgramState *state = Pred->getState();
- SVal L = state->getSVal(Callee);
+ if (state == 0)
+ state = Pred->getState();
+ SVal L = state->getSVal(Callee, Pred->getLocationContext());
// Figure out the result type. We do this dance to handle references.
QualType ResultTy;
@@ -200,18 +435,19 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
// Conjure a symbol value to use as the result.
SValBuilder &SVB = Eng.getSValBuilder();
- unsigned Count = Builder.getCurrentBlockCount();
- SVal RetVal = SVB.getConjuredSymbolVal(0, CE, ResultTy, Count);
+ unsigned Count = Eng.currentBuilderContext->getCurrentBlockCount();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
// Generate a new state with the return value set.
- state = state->BindExpr(CE, RetVal);
+ state = state->BindExpr(CE, LCtx, RetVal);
// Invalidate the arguments.
- const LocationContext *LC = Pred->getLocationContext();
- state = Eng.invalidateArguments(state, CallOrObjCMessage(CE, state), LC);
+ state = Eng.invalidateArguments(state, CallOrObjCMessage(CE, state, LCtx),
+ LCtx);
// And make the result node.
- Eng.MakeNode(Dst, CE, Pred, state);
+ Bldr.generateNode(CE, Pred, state);
}
};
@@ -231,23 +467,16 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
- ExplodedNodeSet Src;
- if (const Expr *RetE = RS->getRetValue()) {
- // Record the returned expression in the state. It will be used in
- // processCallExit to bind the return value to the call expr.
- {
- static SimpleProgramPointTag tag("ExprEngine: ReturnStmt");
- const ProgramState *state = Pred->getState();
- state = state->set<ReturnExpr>(RetE);
- Pred = Builder->generateNode(RetE, state, Pred, &tag);
+
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
+
+ StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
+
+ if (RS->getRetValue()) {
+ for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
+ ei = dstPreVisit.end(); it != ei; ++it) {
+ B.generateNode(RS, *it, (*it)->getState());
}
- // We may get a NULL Pred because we generated a cached node.
- if (Pred)
- Visit(RetE, Pred, Src);
}
- else {
- Src.Add(Pred);
- }
-
- getCheckerManager().runCheckersForPreStmt(Dst, Src, RS, *this);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index e0560fd..c8ad70a 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
-#include "clang/Analysis/Support/SaveAndRestore.h"
using namespace clang;
using namespace ento;
@@ -22,13 +22,14 @@ using namespace ento;
void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
- const ProgramState *state = Pred->getState();
- SVal baseVal = state->getSVal(Ex->getBase());
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal baseVal = state->getSVal(Ex->getBase(), LCtx);
SVal location = state->getLValue(Ex->getDecl(), baseVal);
ExplodedNodeSet dstIvar;
- MakeNode(dstIvar, Ex, Pred, state->BindExpr(Ex, location));
+ StmtNodeBuilder Bldr(Pred, dstIvar, *currentBuilderContext);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
// Perform the post-condition check of the ObjCIvarRefExpr and store
// the created nodes in 'Dst'.
@@ -69,10 +70,11 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
// For now: simulate (1) by assigning either a symbol or nil if the
// container is empty. Thus this transfer function will by default
// result in state splitting.
-
+
const Stmt *elem = S->getElement();
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
SVal elementV;
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
@@ -80,27 +82,27 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
elementV = state->getLValue(elemD, Pred->getLocationContext());
}
else {
- elementV = state->getSVal(elem);
+ elementV = state->getSVal(elem, Pred->getLocationContext());
}
ExplodedNodeSet dstLocation;
- evalLocation(dstLocation, elem, Pred, state, elementV, NULL, false);
-
- if (dstLocation.empty())
- return;
+ Bldr.takeNodes(Pred);
+ evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false);
+ Bldr.addNodes(dstLocation);
for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
NE = dstLocation.end(); NI!=NE; ++NI) {
Pred = *NI;
- const ProgramState *state = Pred->getState();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
// Handle the case where the container still has elements.
SVal TrueV = svalBuilder.makeTruthVal(1);
- const ProgramState *hasElems = state->BindExpr(S, TrueV);
+ ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
// Handle the case where the container has no elements.
SVal FalseV = svalBuilder.makeTruthVal(0);
- const ProgramState *noElems = state->BindExpr(S, FalseV);
+ ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
if (loc::MemRegionVal *MV = dyn_cast<loc::MemRegionVal>(&elementV))
if (const TypedValueRegion *R =
@@ -110,8 +112,8 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
// For now, just 'conjure' up a symbolic value.
QualType T = R->getValueType();
assert(Loc::isLocType(T));
- unsigned Count = Builder->getCurrentBlockCount();
- SymbolRef Sym = SymMgr.getConjuredSymbol(elem, T, Count);
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ SymbolRef Sym = SymMgr.getConjuredSymbol(elem, LCtx, T, Count);
SVal V = svalBuilder.makeLoc(Sym);
hasElems = hasElems->bindLoc(elementV, V);
@@ -121,8 +123,8 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
}
// Create the new nodes.
- MakeNode(Dst, S, Pred, hasElems);
- MakeNode(Dst, S, Pred, noElems);
+ Bldr.generateNode(S, Pred, hasElems);
+ Bldr.generateNode(S, Pred, noElems);
}
}
@@ -137,29 +139,27 @@ void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
// Proceed with evaluate the message expression.
ExplodedNodeSet dstEval;
-
+ StmtNodeBuilder Bldr(dstPrevisit, dstEval, *currentBuilderContext);
+
for (ExplodedNodeSet::iterator DI = dstPrevisit.begin(),
DE = dstPrevisit.end(); DI != DE; ++DI) {
ExplodedNode *Pred = *DI;
bool RaisesException = false;
- SaveAndRestore<bool> OldSink(Builder->BuildSinks);
- SaveOr OldHasGen(Builder->hasGeneratedNode);
if (const Expr *Receiver = msg.getInstanceReceiver()) {
- const ProgramState *state = Pred->getState();
- SVal recVal = state->getSVal(Receiver);
+ ProgramStateRef state = Pred->getState();
+ SVal recVal = state->getSVal(Receiver, Pred->getLocationContext());
if (!recVal.isUndef()) {
// Bifurcate the state into nil and non-nil ones.
DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
- const ProgramState *notNilState, *nilState;
+ ProgramStateRef notNilState, nilState;
llvm::tie(notNilState, nilState) = state->assume(receiverVal);
// There are three cases: can be nil or non-nil, must be nil, must be
// non-nil. We ignore must be nil, and merge the rest two into non-nil.
if (nilState && !notNilState) {
- dstEval.insert(Pred);
continue;
}
@@ -168,13 +168,10 @@ void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
if (msg.getSelector() == RaiseSel)
RaisesException = true;
- // Check if we raise an exception. For now treat these as sinks.
+ // If we raise an exception, for now treat it as a sink.
// Eventually we will want to handle exceptions properly.
- if (RaisesException)
- Builder->BuildSinks = true;
-
// Dispatch to plug-in transfer function.
- evalObjCMessage(dstEval, msg, Pred, notNilState);
+ evalObjCMessage(Bldr, msg, Pred, notNilState, RaisesException);
}
}
else if (const ObjCInterfaceDecl *Iface = msg.getReceiverInterface()) {
@@ -217,16 +214,11 @@ void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
}
}
- // Check if we raise an exception. For now treat these as sinks.
+ // If we raise an exception, for now treat it as a sink.
// Eventually we will want to handle exceptions properly.
- if (RaisesException)
- Builder->BuildSinks = true;
-
// Dispatch to plug-in transfer function.
- evalObjCMessage(dstEval, msg, Pred, Pred->getState());
+ evalObjCMessage(Bldr, msg, Pred, Pred->getState(), RaisesException);
}
-
- assert(Builder->BuildSinks || Builder->hasGeneratedNode);
}
// Finally, perform the post-condition check of the ObjCMessageExpr and store
@@ -234,11 +226,11 @@ void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
getCheckerManager().runCheckersForPostObjCMessage(Dst, dstEval, msg, *this);
}
-void ExprEngine::evalObjCMessage(ExplodedNodeSet &Dst, const ObjCMessage &msg,
+void ExprEngine::evalObjCMessage(StmtNodeBuilder &Bldr,
+ const ObjCMessage &msg,
ExplodedNode *Pred,
- const ProgramState *state) {
- assert (Builder && "StmtNodeBuilder must be defined.");
-
+ ProgramStateRef state,
+ bool GenSink) {
// First handle the return value.
SVal ReturnValue = UnknownVal();
@@ -252,7 +244,7 @@ void ExprEngine::evalObjCMessage(ExplodedNodeSet &Dst, const ObjCMessage &msg,
// These methods return their receivers.
const Expr *ReceiverE = msg.getInstanceReceiver();
if (ReceiverE)
- ReturnValue = state->getSVal(ReceiverE);
+ ReturnValue = state->getSVal(ReceiverE, Pred->getLocationContext());
break;
}
}
@@ -261,19 +253,21 @@ void ExprEngine::evalObjCMessage(ExplodedNodeSet &Dst, const ObjCMessage &msg,
if (ReturnValue.isUnknown()) {
SValBuilder &SVB = getSValBuilder();
QualType ResultTy = msg.getResultType(getContext());
- unsigned Count = Builder->getCurrentBlockCount();
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
const Expr *CurrentE = cast<Expr>(currentStmt);
- ReturnValue = SVB.getConjuredSymbolVal(NULL, CurrentE, ResultTy, Count);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ReturnValue = SVB.getConjuredSymbolVal(NULL, CurrentE, LCtx, ResultTy, Count);
}
// Bind the return value.
- state = state->BindExpr(currentStmt, ReturnValue);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ state = state->BindExpr(currentStmt, LCtx, ReturnValue);
// Invalidate the arguments (and the receiver)
- const LocationContext *LC = Pred->getLocationContext();
- state = invalidateArguments(state, CallOrObjCMessage(msg, state), LC);
+ state = invalidateArguments(state, CallOrObjCMessage(msg, state, LCtx), LCtx);
// And create the new node.
- MakeNode(Dst, msg.getOriginExpr(), Pred, state);
+ Bldr.generateNode(msg.getMessageExpr(), Pred, state, GenSink);
+ assert(Bldr.hasGeneratedNodes());
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
new file mode 100644
index 0000000..c227aac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -0,0 +1,38 @@
+//== FunctionSummary.h - Stores summaries of functions. ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analyzes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+using namespace clang;
+using namespace ento;
+
+FunctionSummariesTy::~FunctionSummariesTy() {
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ delete(I->second);
+ }
+}
+
+unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second->TotalBasicBlocks;
+ }
+ return Total;
+}
+
+unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second->VisitedBasicBlocks.count();
+ }
+ return Total;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 0c4e427..629f1ea 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -39,15 +39,13 @@ class HTMLDiagnostics : public PathDiagnosticConsumer {
llvm::sys::Path Directory, FilePrefix;
bool createdDir, noDir;
const Preprocessor &PP;
- std::vector<const PathDiagnostic*> BatchedDiags;
public:
HTMLDiagnostics(const std::string& prefix, const Preprocessor &pp);
virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); }
- virtual void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade);
-
- virtual void HandlePathDiagnosticImpl(const PathDiagnostic* D);
+ virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
virtual StringRef getName() const {
return "HTMLDiagnostics";
@@ -88,34 +86,49 @@ ento::createHTMLDiagnosticConsumer(const std::string& prefix,
// Report processing.
//===----------------------------------------------------------------------===//
-void HTMLDiagnostics::HandlePathDiagnosticImpl(const PathDiagnostic* D) {
- if (!D)
- return;
-
- if (D->empty()) {
- delete D;
- return;
+void HTMLDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
+ for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ ReportDiag(**it, FilesMade);
}
-
- const_cast<PathDiagnostic*>(D)->flattenLocations();
- BatchedDiags.push_back(D);
}
-void
-HTMLDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade)
-{
- while (!BatchedDiags.empty()) {
- const PathDiagnostic* D = BatchedDiags.back();
- BatchedDiags.pop_back();
- ReportDiag(*D, FilesMade);
- delete D;
+static void flattenPath(PathPieces &primaryPath, PathPieces &currentPath,
+ const PathPieces &oldPath) {
+ for (PathPieces::const_iterator it = oldPath.begin(), et = oldPath.end();
+ it != et; ++it ) {
+ PathDiagnosticPiece *piece = it->getPtr();
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+ call->getCallEnterEvent();
+ if (callEnter)
+ currentPath.push_back(callEnter);
+ flattenPath(primaryPath, primaryPath, call->path);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ call->getCallExitEvent();
+ if (callExit)
+ currentPath.push_back(callExit);
+ continue;
+ }
+ if (PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ currentPath.push_back(piece);
+ PathPieces newPath;
+ flattenPath(primaryPath, newPath, macro->subPieces);
+ macro->subPieces = newPath;
+ continue;
+ }
+
+ currentPath.push_back(piece);
}
-
- BatchedDiags.clear();
}
void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
- SmallVectorImpl<std::string> *FilesMade){
+ SmallVectorImpl<std::string> *FilesMade) {
+
// Create the HTML directory if it is missing.
if (!createdDir) {
createdDir = true;
@@ -138,47 +151,29 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
if (noDir)
return;
- const SourceManager &SMgr = D.begin()->getLocation().getManager();
- FileID FID;
-
- // Verify that the entire path is from the same FileID.
- for (PathDiagnostic::const_iterator I = D.begin(), E = D.end(); I != E; ++I) {
- FullSourceLoc L = I->getLocation().asLocation().getExpansionLoc();
-
- if (FID.isInvalid()) {
- FID = SMgr.getFileID(L);
- } else if (SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
-
- // Check the source ranges.
- for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(),
- RE=I->ranges_end(); RI!=RE; ++RI) {
-
- SourceLocation L = SMgr.getExpansionLoc(RI->getBegin());
-
- if (!L.isFileID() || SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
-
- L = SMgr.getExpansionLoc(RI->getEnd());
-
- if (!L.isFileID() || SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
- }
- }
+ // First flatten out the entire path to make it easier to use.
+ PathPieces path;
+ flattenPath(path, path, D.path);
- if (FID.isInvalid())
- return; // FIXME: Emit a warning?
+ // The path as already been prechecked that all parts of the path are
+ // from the same file and that it is non-empty.
+ const SourceManager &SMgr = (*path.begin())->getLocation().getManager();
+ assert(!path.empty());
+ FileID FID =
+ (*path.begin())->getLocation().asLocation().getExpansionLoc().getFileID();
+ assert(!FID.isInvalid());
// Create a new rewriter to generate HTML.
- Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOptions());
+ Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
// Process the path.
- unsigned n = D.size();
+ unsigned n = path.size();
unsigned max = n;
- for (PathDiagnostic::const_reverse_iterator I=D.rbegin(), E=D.rend();
- I!=E; ++I, --n)
- HandlePiece(R, FID, *I, n, max);
+ for (PathPieces::const_reverse_iterator I = path.rbegin(),
+ E = path.rend();
+ I != E; ++I, --n)
+ HandlePiece(R, FID, **I, n, max);
// Add line numbers, header, footer, etc.
@@ -221,9 +216,9 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
<< html::EscapeText(Entry->getName())
<< "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
"<a href=\"#EndPath\">line "
- << (*D.rbegin()).getLocation().asLocation().getExpansionLineNumber()
+ << (*path.rbegin())->getLocation().asLocation().getExpansionLineNumber()
<< ", column "
- << (*D.rbegin()).getLocation().asLocation().getExpansionColumnNumber()
+ << (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber()
<< "</a></td></tr>\n"
"<tr><td class=\"rowname\">Description:</td><td>"
<< D.getDescription() << "</td></tr>\n";
@@ -261,10 +256,10 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
os << "\n<!-- BUGLINE "
- << D.back()->getLocation().asLocation().getExpansionLineNumber()
+ << path.back()->getLocation().asLocation().getExpansionLineNumber()
<< " -->\n";
- os << "\n<!-- BUGPATHLENGTH " << D.size() << " -->\n";
+ os << "\n<!-- BUGPATHLENGTH " << path.size() << " -->\n";
// Mark the end of the tags.
os << "\n<!-- BUGMETAEND -->\n";
@@ -353,6 +348,8 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
const char *Kind = 0;
switch (P.getKind()) {
+ case PathDiagnosticPiece::Call:
+ llvm_unreachable("Calls should already be handled");
case PathDiagnosticPiece::Event: Kind = "Event"; break;
case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
// Setting Kind to "Control" is intentional.
@@ -445,7 +442,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
assert(L.isFileID());
StringRef BufferInfo = L.getBufferData();
const char* MacroName = L.getDecomposedLoc().second + BufferInfo.data();
- Lexer rawLexer(L, PP.getLangOptions(), BufferInfo.begin(),
+ Lexer rawLexer(L, PP.getLangOpts(), BufferInfo.begin(),
MacroName, BufferInfo.end());
Token TheTok;
@@ -518,7 +515,7 @@ unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
const PathDiagnosticMacroPiece& P,
unsigned num) {
- for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end();
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
I!=E; ++I) {
if (const PathDiagnosticMacroPiece *MP =
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 6f92da8..ed94c79 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -18,7 +18,9 @@
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -219,6 +221,17 @@ DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
svalBuilder.getArrayIndexType());
}
+ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg)
+ : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
+ return cast<ObjCIvarDecl>(D);
+}
+
+QualType ObjCIvarRegion::getValueType() const {
+ return getDecl()->getType();
+}
+
QualType CXXBaseObjectRegion::getValueType() const {
return QualType(decl->getTypeForDecl(), 0);
}
@@ -249,6 +262,14 @@ void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
ID.AddPointer(superRegion);
}
+void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCStringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) ObjCStringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const Expr *Ex, unsigned cnt,
const MemRegion *) {
@@ -285,6 +306,12 @@ void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
}
+void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCIvarDecl *ivd,
+ const MemRegion* superRegion) {
+ DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+}
+
void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
const MemRegion* superRegion, Kind k) {
ID.AddInteger((unsigned) k);
@@ -337,7 +364,7 @@ void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const BlockDecl *BD, CanQualType,
- const AnalysisContext *AC,
+ const AnalysisDeclContext *AC,
const MemRegion*) {
ID.AddInteger(MemRegion::BlockTextRegionKind);
ID.AddPointer(BD);
@@ -384,6 +411,20 @@ void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
}
//===----------------------------------------------------------------------===//
+// Region anchors.
+//===----------------------------------------------------------------------===//
+
+void GlobalsSpaceRegion::anchor() { }
+void HeapSpaceRegion::anchor() { }
+void UnknownSpaceRegion::anchor() { }
+void StackLocalsSpaceRegion::anchor() { }
+void StackArgumentsSpaceRegion::anchor() { }
+void TypedRegion::anchor() { }
+void TypedValueRegion::anchor() { }
+void CodeTextRegion::anchor() { }
+void SubRegion::anchor() { }
+
+//===----------------------------------------------------------------------===//
// Region pretty-printing.
//===----------------------------------------------------------------------===//
@@ -445,16 +486,16 @@ void FieldRegion::dumpToStream(raw_ostream &os) const {
os << superRegion << "->" << *getDecl();
}
-void NonStaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
- os << "NonStaticGlobalSpaceRegion";
-}
-
void ObjCIvarRegion::dumpToStream(raw_ostream &os) const {
os << "ivar{" << superRegion << ',' << *getDecl() << '}';
}
void StringRegion::dumpToStream(raw_ostream &os) const {
- Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOptions()));
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void ObjCStringRegion::dumpToStream(raw_ostream &os) const {
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
}
void SymbolicRegion::dumpToStream(raw_ostream &os) const {
@@ -477,6 +518,35 @@ void StaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
os << "StaticGlobalsMemSpace{" << CR << '}';
}
+void NonStaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "NonStaticGlobalSpaceRegion";
+}
+
+void GlobalInternalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalInternalSpaceRegion";
+}
+
+void GlobalSystemSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalSystemSpaceRegion";
+}
+
+void GlobalImmutableSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalImmutableSpaceRegion";
+}
+
+void MemRegion::dumpPretty(raw_ostream &os) const {
+ return;
+}
+
+void VarRegion::dumpPretty(raw_ostream &os) const {
+ os << getDecl()->getName();
+}
+
+void FieldRegion::dumpPretty(raw_ostream &os) const {
+ superRegion->dumpPretty(os);
+ os << "->" << getDecl();
+}
+
//===----------------------------------------------------------------------===//
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
@@ -528,10 +598,18 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
}
const GlobalsSpaceRegion
-*MemRegionManager::getGlobalsRegion(const CodeTextRegion *CR) {
- if (!CR)
- return LazyAllocate(globals);
+*MemRegionManager::getGlobalsRegion(MemRegion::Kind K,
+ const CodeTextRegion *CR) {
+ if (!CR) {
+ if (K == MemRegion::GlobalSystemSpaceRegionKind)
+ return LazyAllocate(SystemGlobals);
+ if (K == MemRegion::GlobalImmutableSpaceRegionKind)
+ return LazyAllocate(ImmutableGlobals);
+ assert(K == MemRegion::GlobalInternalSpaceRegionKind);
+ return LazyAllocate(InternalGlobals);
+ }
+ assert(K == MemRegion::StaticGlobalSpaceRegionKind);
StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
if (R)
return R;
@@ -556,18 +634,44 @@ const MemSpaceRegion *MemRegionManager::getCodeRegion() {
//===----------------------------------------------------------------------===//
// Constructing regions.
//===----------------------------------------------------------------------===//
-
const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
return getSubRegion<StringRegion>(Str, getGlobalsRegion());
}
+const ObjCStringRegion *
+MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
+ return getSubRegion<ObjCStringRegion>(Str, getGlobalsRegion());
+}
+
const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
const MemRegion *sReg = 0;
- if (D->hasGlobalStorage() && !D->isStaticLocal())
- sReg = getGlobalsRegion();
- else {
+ if (D->hasGlobalStorage() && !D->isStaticLocal()) {
+
+ // First handle the globals defined in system headers.
+ if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+ // Whitelist the system globals which often DO GET modified, assume the
+ // rest are immutable.
+ if (D->getName().find("errno") != StringRef::npos)
+ sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+
+ // Treat other globals as GlobalInternal unless they are constants.
+ } else {
+ QualType GQT = D->getType();
+ const Type *GT = GQT.getTypePtrOrNull();
+ // TODO: We could walk the complex types here and see if everything is
+ // constified.
+ if (GT && GQT.isConstQualified() && GT->isArithmeticType())
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion();
+ }
+
+ // Finally handle static locals.
+ } else {
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext.
const DeclContext *DC = D->getDeclContext();
@@ -585,13 +689,15 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
assert(D->isStaticLocal());
const Decl *D = STC->getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- sReg = getGlobalsRegion(getFunctionTextRegion(FD));
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ getFunctionTextRegion(FD));
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
const BlockTextRegion *BTR =
getBlockTextRegion(BD,
C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
- STC->getAnalysisContext());
- sReg = getGlobalsRegion(BTR);
+ STC->getAnalysisDeclContext());
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ BTR);
}
else {
// FIXME: For ObjC-methods, we need a new CodeTextRegion. For now
@@ -614,18 +720,24 @@ const BlockDataRegion *
MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
const LocationContext *LC) {
const MemRegion *sReg = 0;
-
- if (LC) {
- // FIXME: Once we implement scope handling, we want the parent region
- // to be the scope.
- const StackFrameContext *STC = LC->getCurrentStackFrame();
- assert(STC);
- sReg = getStackLocalsRegion(STC);
+ const BlockDecl *BD = BC->getDecl();
+ if (!BD->hasCaptures()) {
+ // This handles 'static' blocks.
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
}
else {
- // We allow 'LC' to be NULL for cases where want BlockDataRegions
- // without context-sensitivity.
- sReg = getUnknownRegion();
+ if (LC) {
+ // FIXME: Once we implement scope handling, we want the parent region
+ // to be the scope.
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+ else {
+ // We allow 'LC' to be NULL for cases where want BlockDataRegions
+ // without context-sensitivity.
+ sReg = getUnknownRegion();
+ }
}
return getSubRegion<BlockDataRegion>(BC, LC, sReg);
@@ -678,7 +790,7 @@ MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) {
const BlockTextRegion *
MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy,
- AnalysisContext *AC) {
+ AnalysisDeclContext *AC) {
return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion());
}
@@ -928,8 +1040,8 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
if (ReferencedVars)
return;
- AnalysisContext *AC = getCodeRegion()->getAnalysisContext();
- AnalysisContext::referenced_decls_iterator I, E;
+ AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
+ AnalysisDeclContext::referenced_decls_iterator I, E;
llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
if (I == E) {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
index 0974fe8..65cdcd9 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
@@ -13,108 +13,16 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/DeclCXX.h"
using namespace clang;
using namespace ento;
-QualType ObjCMessage::getType(ASTContext &ctx) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getType();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (isPropertySetter())
- return ctx.VoidTy;
- return propE->getType();
-}
-
-Selector ObjCMessage::getSelector() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getSelector();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (isPropertySetter())
- return propE->getSetterSelector();
- return propE->getGetterSelector();
-}
-
-ObjCMethodFamily ObjCMessage::getMethodFamily() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- // Case 1. Explicit message send.
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getMethodFamily();
-
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
-
- // Case 2. Reference to implicit property.
- if (propE->isImplicitProperty()) {
- if (isPropertySetter())
- return propE->getImplicitPropertySetter()->getMethodFamily();
- else
- return propE->getImplicitPropertyGetter()->getMethodFamily();
- }
-
- // Case 3. Reference to explicit property.
- const ObjCPropertyDecl *prop = propE->getExplicitProperty();
- if (isPropertySetter()) {
- if (prop->getSetterMethodDecl())
- return prop->getSetterMethodDecl()->getMethodFamily();
- return prop->getSetterName().getMethodFamily();
- } else {
- if (prop->getGetterMethodDecl())
- return prop->getGetterMethodDecl()->getMethodFamily();
- return prop->getGetterName().getMethodFamily();
- }
-}
-
-const ObjCMethodDecl *ObjCMessage::getMethodDecl() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getMethodDecl();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (propE->isImplicitProperty())
- return isPropertySetter() ? propE->getImplicitPropertySetter()
- : propE->getImplicitPropertyGetter();
- return 0;
-}
-
-const ObjCInterfaceDecl *ObjCMessage::getReceiverInterface() const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getReceiverInterface();
- const ObjCPropertyRefExpr *propE = cast<ObjCPropertyRefExpr>(MsgOrPropE);
- if (propE->isClassReceiver())
- return propE->getClassReceiver();
- QualType recT;
- if (const Expr *recE = getInstanceReceiver())
- recT = recE->getType();
- else {
- assert(propE->isSuperReceiver());
- recT = propE->getSuperReceiverType();
- }
- if (const ObjCObjectPointerType *Ptr = recT->getAs<ObjCObjectPointerType>())
- return Ptr->getInterfaceDecl();
- return 0;
-}
-
-const Expr *ObjCMessage::getArgExpr(unsigned i) const {
- assert(isValid() && "This ObjCMessage is uninitialized!");
- assert(i < getNumArgs() && "Invalid index for argument");
- if (const ObjCMessageExpr *msgE = dyn_cast<ObjCMessageExpr>(MsgOrPropE))
- return msgE->getArg(i);
- assert(isPropertySetter());
- if (const BinaryOperator *bop = dyn_cast<BinaryOperator>(OriginE))
- if (bop->isAssignmentOp())
- return bop->getRHS();
- return 0;
-}
-
QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const {
QualType resultTy;
bool isLVal = false;
if (isObjCMessage()) {
- isLVal = isa<ObjCMessageExpr>(Msg.getOriginExpr()) &&
- Msg.getOriginExpr()->isLValue();
resultTy = Msg.getResultType(ctx);
} else if (const CXXConstructExpr *Ctor =
CallE.dyn_cast<const CXXConstructExpr *>()) {
@@ -124,7 +32,7 @@ QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const {
isLVal = FunctionCall->isLValue();
const Expr *Callee = FunctionCall->getCallee();
- if (const FunctionDecl *FD = State->getSVal(Callee).getAsFunctionDecl())
+ if (const FunctionDecl *FD = State->getSVal(Callee, LCtx).getAsFunctionDecl())
resultTy = FD->getResultType();
else
resultTy = FunctionCall->getType();
@@ -140,7 +48,7 @@ SVal CallOrObjCMessage::getFunctionCallee() const {
assert(isFunctionCall());
assert(!isCXXCall());
const Expr *Fun = CallE.get<const CallExpr *>()->getCallee()->IgnoreParens();
- return State->getSVal(Fun);
+ return State->getSVal(Fun, LCtx);
}
SVal CallOrObjCMessage::getCXXCallee() const {
@@ -154,7 +62,7 @@ SVal CallOrObjCMessage::getCXXCallee() const {
if (!callee)
return UnknownVal();
- return State->getSVal(callee);
+ return State->getSVal(callee, LCtx);
}
SVal
@@ -162,3 +70,21 @@ CallOrObjCMessage::getInstanceMessageReceiver(const LocationContext *LC) const {
assert(isObjCMessage());
return Msg.getInstanceReceiverSVal(State, LC);
}
+
+const Decl *CallOrObjCMessage::getDecl() const {
+ if (isCXXCall()) {
+ const CXXMemberCallExpr *CE =
+ cast<CXXMemberCallExpr>(CallE.dyn_cast<const CallExpr *>());
+ assert(CE);
+ return CE->getMethodDecl();
+ } else if (isObjCMessage()) {
+ return Msg.getMethodDecl();
+ } else if (isFunctionCall()) {
+ // In case of a C style call, use the path sensitive information to find
+ // the function declaration.
+ SVal CalleeVal = getFunctionCallee();
+ return CalleeVal.getAsFunctionDecl();
+ }
+ return 0;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 3a87903..01dd965 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -13,6 +13,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -24,15 +25,14 @@ using namespace clang;
using namespace ento;
bool PathDiagnosticMacroPiece::containsEvent() const {
- for (const_iterator I = begin(), E = end(); I!=E; ++I) {
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I!=E; ++I) {
if (isa<PathDiagnosticEventPiece>(*I))
return true;
-
if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
if (MP->containsEvent())
return true;
}
-
return false;
}
@@ -52,41 +52,189 @@ PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
PathDiagnosticPiece::~PathDiagnosticPiece() {}
PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
-PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {
- for (iterator I = begin(), E = end(); I != E; ++I) delete *I;
-}
-PathDiagnostic::PathDiagnostic() : Size(0) {}
+PathPieces::~PathPieces() {}
+PathDiagnostic::~PathDiagnostic() {}
+
+PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
+ StringRef bugtype, StringRef desc,
+ StringRef category)
+ : DeclWithIssue(declWithIssue),
+ BugType(StripTrailingDots(bugtype)),
+ Desc(StripTrailingDots(desc)),
+ Category(StripTrailingDots(category)),
+ path(pathImpl) {}
+
+void PathDiagnosticConsumer::anchor() { }
-PathDiagnostic::~PathDiagnostic() {
- for (iterator I = begin(), E = end(); I != E; ++I) delete &*I;
+PathDiagnosticConsumer::~PathDiagnosticConsumer() {
+ // Delete the contents of the FoldingSet if it isn't empty already.
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it =
+ Diags.begin(), et = Diags.end() ; it != et ; ++it) {
+ delete &*it;
+ }
}
-void PathDiagnostic::resetPath(bool deletePieces) {
- Size = 0;
+void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
+ llvm::OwningPtr<PathDiagnostic> OwningD(D);
+
+ if (!D || D->path.empty())
+ return;
+
+ // We need to flatten the locations (convert Stmt* to locations) because
+ // the referenced statements may be freed by the time the diagnostics
+ // are emitted.
+ D->flattenLocations();
+
+ // If the PathDiagnosticConsumer does not support diagnostics that
+ // cross file boundaries, prune out such diagnostics now.
+ if (!supportsCrossFileDiagnostics()) {
+ // Verify that the entire path is from the same FileID.
+ FileID FID;
+ const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager();
+ llvm::SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
+
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.back();
+ WorkList.pop_back();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I != E; ++I) {
+ const PathDiagnosticPiece *piece = I->getPtr();
+ FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
+
+ if (FID.isInvalid()) {
+ FID = SMgr.getFileID(L);
+ } else if (SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+
+ // Check the source ranges.
+ for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
+ RE = piece->ranges_end();
+ RI != RE; ++RI) {
+ SourceLocation L = SMgr.getExpansionLoc(RI->getBegin());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ L = SMgr.getExpansionLoc(RI->getEnd());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
+ }
+ }
+
+ if (FID.isInvalid())
+ return; // FIXME: Emit a warning?
+ }
- if (deletePieces)
- for (iterator I=begin(), E=end(); I!=E; ++I)
- delete &*I;
+ // Profile the node to see if we already have something matching it
+ llvm::FoldingSetNodeID profile;
+ D->Profile(profile);
+ void *InsertPos = 0;
+
+ if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
+ // Keep the PathDiagnostic with the shorter path.
+ const unsigned orig_size = orig->full_size();
+ const unsigned new_size = D->full_size();
+
+ if (orig_size <= new_size) {
+ bool shouldKeepOriginal = true;
+ if (orig_size == new_size) {
+ // Here we break ties in a fairly arbitrary, but deterministic, way.
+ llvm::FoldingSetNodeID fullProfile, fullProfileOrig;
+ D->FullProfile(fullProfile);
+ orig->FullProfile(fullProfileOrig);
+ if (fullProfile.ComputeHash() < fullProfileOrig.ComputeHash())
+ shouldKeepOriginal = false;
+ }
- path.clear();
+ if (shouldKeepOriginal)
+ return;
+ }
+ Diags.RemoveNode(orig);
+ delete orig;
+ }
+
+ Diags.InsertNode(OwningD.take());
}
-PathDiagnostic::PathDiagnostic(StringRef bugtype, StringRef desc,
- StringRef category)
- : Size(0),
- BugType(StripTrailingDots(bugtype)),
- Desc(StripTrailingDots(desc)),
- Category(StripTrailingDots(category)) {}
+namespace {
+struct CompareDiagnostics {
+ // Compare if 'X' is "<" than 'Y'.
+ bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
+ // First compare by location
+ const FullSourceLoc &XLoc = X->getLocation().asLocation();
+ const FullSourceLoc &YLoc = Y->getLocation().asLocation();
+ if (XLoc < YLoc)
+ return true;
+ if (XLoc != YLoc)
+ return false;
+
+ // Next, compare by bug type.
+ StringRef XBugType = X->getBugType();
+ StringRef YBugType = Y->getBugType();
+ if (XBugType < YBugType)
+ return true;
+ if (XBugType != YBugType)
+ return false;
+
+ // Next, compare by bug description.
+ StringRef XDesc = X->getDescription();
+ StringRef YDesc = Y->getDescription();
+ if (XDesc < YDesc)
+ return true;
+ if (XDesc != YDesc)
+ return false;
+
+ // FIXME: Further refine by comparing PathDiagnosticPieces?
+ return false;
+ }
+};
+}
-void PathDiagnosticConsumer::HandlePathDiagnostic(const PathDiagnostic *D) {
- // For now this simply forwards to HandlePathDiagnosticImpl. In the future
- // we can use this indirection to control for multi-threaded access to
- // the PathDiagnosticConsumer from multiple bug reporters.
- HandlePathDiagnosticImpl(D);
+void
+PathDiagnosticConsumer::FlushDiagnostics(SmallVectorImpl<std::string> *Files) {
+ if (flushed)
+ return;
+
+ flushed = true;
+
+ std::vector<const PathDiagnostic *> BatchDiags;
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ BatchDiags.push_back(&*it);
+ }
+
+ // Clear out the FoldingSet.
+ Diags.clear();
+
+ // Sort the diagnostics so that they are always emitted in a deterministic
+ // order.
+ if (!BatchDiags.empty())
+ std::sort(BatchDiags.begin(), BatchDiags.end(), CompareDiagnostics());
+
+ FlushDiagnosticsImpl(BatchDiags, Files);
+
+ // Delete the flushed diagnostics.
+ for (std::vector<const PathDiagnostic *>::iterator it = BatchDiags.begin(),
+ et = BatchDiags.end(); it != et; ++it) {
+ const PathDiagnostic *D = *it;
+ delete D;
+ }
}
//===----------------------------------------------------------------------===//
@@ -94,9 +242,9 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(const PathDiagnostic *D) {
//===----------------------------------------------------------------------===//
static SourceLocation getValidSourceLocation(const Stmt* S,
- LocationOrAnalysisContext LAC) {
+ LocationOrAnalysisDeclContext LAC) {
SourceLocation L = S->getLocStart();
- assert(!LAC.isNull() && "A valid LocationContext or AnalysisContext should "
+ assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
"be passed to PathDiagnosticLocation upon creation.");
// S might be a temporary statement that does not have a location in the
@@ -107,7 +255,7 @@ static SourceLocation getValidSourceLocation(const Stmt* S,
if (LAC.is<const LocationContext*>())
PM = &LAC.get<const LocationContext*>()->getParentMap();
else
- PM = &LAC.get<AnalysisContext*>()->getParentMap();
+ PM = &LAC.get<AnalysisDeclContext*>()->getParentMap();
while (!L.isValid()) {
S = PM->getParent(S);
@@ -127,7 +275,7 @@ PathDiagnosticLocation
PathDiagnosticLocation
PathDiagnosticLocation::createBegin(const Stmt *S,
const SourceManager &SM,
- LocationOrAnalysisContext LAC) {
+ LocationOrAnalysisDeclContext LAC) {
return PathDiagnosticLocation(getValidSourceLocation(S, LAC),
SM, SingleLocK);
}
@@ -193,9 +341,6 @@ PathDiagnosticLocation
}
return PathDiagnosticLocation(S, SMng, P.getLocationContext());
-
- if (!S)
- return PathDiagnosticLocation();
}
PathDiagnosticLocation
@@ -212,8 +357,9 @@ PathDiagnosticLocation
return PathDiagnosticLocation(PS->getStmt(), SM, LC);
else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
const Stmt *Term = BE->getSrc()->getTerminator();
- assert(Term);
- return PathDiagnosticLocation(Term, SM, LC);
+ if (Term) {
+ return PathDiagnosticLocation(Term, SM, LC);
+ }
}
NI = NI->succ_empty() ? 0 : *(NI->succ_begin());
}
@@ -229,7 +375,7 @@ PathDiagnosticLocation PathDiagnosticLocation::createSingleLocation(
FullSourceLoc
PathDiagnosticLocation::genLocation(SourceLocation L,
- LocationOrAnalysisContext LAC) const {
+ LocationOrAnalysisDeclContext LAC) const {
assert(isValid());
// Note that we want a 'switch' here so that the compiler can warn us in
// case we add more cases.
@@ -238,9 +384,15 @@ FullSourceLoc
case RangeK:
break;
case StmtK:
+ // Defensive checking.
+ if (!S)
+ break;
return FullSourceLoc(getValidSourceLocation(S, LAC),
const_cast<SourceManager&>(*SM));
case DeclK:
+ // Defensive checking.
+ if (!D)
+ break;
return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
}
@@ -248,7 +400,7 @@ FullSourceLoc
}
PathDiagnosticRange
- PathDiagnosticLocation::genRange(LocationOrAnalysisContext LAC) const {
+ PathDiagnosticLocation::genRange(LocationOrAnalysisDeclContext LAC) const {
assert(isValid());
// Note that we want a 'switch' here so that the compiler can warn us in
// case we add more cases.
@@ -321,6 +473,132 @@ void PathDiagnosticLocation::flatten() {
}
}
+PathDiagnosticLocation PathDiagnostic::getLocation() const {
+ assert(path.size() > 0 &&
+ "getLocation() requires a non-empty PathDiagnostic.");
+
+ PathDiagnosticPiece *p = path.rbegin()->getPtr();
+
+ while (true) {
+ if (PathDiagnosticCallPiece *cp = dyn_cast<PathDiagnosticCallPiece>(p)) {
+ assert(!cp->path.empty());
+ p = cp->path.rbegin()->getPtr();
+ continue;
+ }
+ break;
+ }
+
+ return p->getLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// Manipulation of PathDiagnosticCallPieces.
+//===----------------------------------------------------------------------===//
+
+static PathDiagnosticLocation getLastStmtLoc(const ExplodedNode *N,
+ const SourceManager &SM) {
+ while (N) {
+ ProgramPoint PP = N->getLocation();
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP))
+ return PathDiagnosticLocation(SP->getStmt(), SM, PP.getLocationContext());
+ if (N->pred_empty())
+ break;
+ N = *N->pred_begin();
+ }
+ return PathDiagnosticLocation();
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(const ExplodedNode *N,
+ const CallExit &CE,
+ const SourceManager &SM) {
+ const Decl *caller = CE.getLocationContext()->getParent()->getDecl();
+ PathDiagnosticLocation pos = getLastStmtLoc(N, SM);
+ return new PathDiagnosticCallPiece(caller, pos);
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(PathPieces &path,
+ const Decl *caller) {
+ PathDiagnosticCallPiece *C = new PathDiagnosticCallPiece(path, caller);
+ path.clear();
+ path.push_front(C);
+ return C;
+}
+
+void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
+ const SourceManager &SM) {
+ const Decl *D = CE.getCalleeContext()->getDecl();
+ Callee = D;
+ callEnter = PathDiagnosticLocation(CE.getCallExpr(), SM,
+ CE.getLocationContext());
+ callEnterWithin = PathDiagnosticLocation::createBegin(D, SM);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterEvent() const {
+ if (!Callee)
+ return 0;
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (isa<BlockDecl>(Callee))
+ Out << "Calling anonymous block";
+ else if (const NamedDecl *ND = dyn_cast<NamedDecl>(Callee))
+ Out << "Calling '" << *ND << "'";
+ StringRef msg = Out.str();
+ if (msg.empty())
+ return 0;
+ return new PathDiagnosticEventPiece(callEnter, msg);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterWithinCallerEvent() const {
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Caller))
+ Out << "Entered call from '" << *ND << "'";
+ else
+ Out << "Entered call";
+ StringRef msg = Out.str();
+ if (msg.empty())
+ return 0;
+ return new PathDiagnosticEventPiece(callEnterWithin, msg);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallExitEvent() const {
+ if (NoExit)
+ return 0;
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (!CallStackMessage.empty())
+ Out << CallStackMessage;
+ else if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Callee))
+ Out << "Returning from '" << *ND << "'";
+ else
+ Out << "Returning to caller";
+ return new PathDiagnosticEventPiece(callReturn, Out.str());
+}
+
+static void compute_path_size(const PathPieces &pieces, unsigned &size) {
+ for (PathPieces::const_iterator it = pieces.begin(),
+ et = pieces.end(); it != et; ++it) {
+ const PathDiagnosticPiece *piece = it->getPtr();
+ if (const PathDiagnosticCallPiece *cp =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ compute_path_size(cp->path, size);
+ }
+ else
+ ++size;
+ }
+}
+
+unsigned PathDiagnostic::full_size() {
+ unsigned size = 0;
+ compute_path_size(path, size);
+ return size;
+}
+
//===----------------------------------------------------------------------===//
// FoldingSet profiling methods.
//===----------------------------------------------------------------------===//
@@ -343,6 +621,14 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
}
}
+void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (PathPieces::const_iterator it = path.begin(),
+ et = path.end(); it != et; ++it) {
+ ID.Add(**it);
+ }
+}
+
void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticPiece::Profile(ID);
ID.Add(Pos);
@@ -356,18 +642,114 @@ void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
PathDiagnosticSpotPiece::Profile(ID);
- for (const_iterator I = begin(), E = end(); I != E; ++I)
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I != E; ++I)
ID.Add(**I);
}
void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger(Size);
+ if (!path.empty())
+ getLocation().Profile(ID);
ID.AddString(BugType);
ID.AddString(Desc);
ID.AddString(Category);
- for (const_iterator I = begin(), E = end(); I != E; ++I)
- ID.Add(*I);
-
+}
+
+void PathDiagnostic::FullProfile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID);
+ for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I)
+ ID.Add(**I);
for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
ID.AddString(*I);
}
+
+StackHintGenerator::~StackHintGenerator() {}
+
+std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
+ ProgramPoint P = N->getLocation();
+ const CallExit *CExit = dyn_cast<CallExit>(&P);
+ assert(CExit && "Stack Hints should be constructed at CallExit points.");
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(CExit->getStmt());
+ if (!CE)
+ return "";
+
+ // Get the successor node to make sure the return statement is evaluated and
+ // CE is set to the result value.
+ N = *N->succ_begin();
+ if (!N)
+ return getMessageForSymbolNotFound();
+
+ // Check if one of the parameters are set to the interesting symbol.
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ unsigned ArgIndex = 0;
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+ E = CE->arg_end(); I != E; ++I, ++ArgIndex){
+ SVal SV = State->getSVal(*I, LCtx);
+
+ // Check if the variable corresponding to the symbol is passed by value.
+ SymbolRef AS = SV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+
+ // Check if the parameter is a pointer to the symbol.
+ if (const loc::MemRegionVal *Reg = dyn_cast<loc::MemRegionVal>(&SV)) {
+ SVal PSV = State->getSVal(Reg->getRegion());
+ SymbolRef AS = PSV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+ }
+ }
+
+ // Check if we are returning the interesting symbol.
+ SVal SV = State->getSVal(CE, LCtx);
+ SymbolRef RetSym = SV.getAsLocSymbol();
+ if (RetSym == Sym) {
+ return getMessageForReturn(CE);
+ }
+
+ return getMessageForSymbolNotFound();
+}
+
+/// TODO: This is copied from clang diagnostics. Maybe we could just move it to
+/// some common place. (Same as HandleOrdinalModifier.)
+void StackHintGeneratorForSymbol::printOrdinal(unsigned ValNo,
+ llvm::raw_svector_ostream &Out) {
+ assert(ValNo != 0 && "ValNo must be strictly positive!");
+
+ // We could use text forms for the first N ordinals, but the numeric
+ // forms are actually nicer in diagnostics because they stand out.
+ Out << ValNo;
+
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (ValNo % 100) {
+ case 11:
+ case 12:
+ case 13:
+ Out << "th"; return;
+ default:
+ switch (ValNo % 10) {
+ case 1: Out << "st"; return;
+ case 2: Out << "nd"; return;
+ case 3: Out << "rd"; return;
+ default: Out << "th"; return;
+ }
+ }
+}
+
+std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
+ unsigned ArgIndex) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << Msg << " via ";
+ // Printed parameters start at 1, not 0.
+ printOrdinal(++ArgIndex, os);
+ os << " parameter";
+
+ return os.str();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 5ae95c6..323cede 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -25,56 +25,23 @@ using namespace ento;
typedef llvm::DenseMap<FileID, unsigned> FIDMap;
-namespace {
-struct CompareDiagnostics {
- // Compare if 'X' is "<" than 'Y'.
- bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
- // First compare by location
- const FullSourceLoc &XLoc = X->getLocation().asLocation();
- const FullSourceLoc &YLoc = Y->getLocation().asLocation();
- if (XLoc < YLoc)
- return true;
- if (XLoc != YLoc)
- return false;
-
- // Next, compare by bug type.
- StringRef XBugType = X->getBugType();
- StringRef YBugType = Y->getBugType();
- if (XBugType < YBugType)
- return true;
- if (XBugType != YBugType)
- return false;
-
- // Next, compare by bug description.
- StringRef XDesc = X->getDescription();
- StringRef YDesc = Y->getDescription();
- if (XDesc < YDesc)
- return true;
- if (XDesc != YDesc)
- return false;
-
- // FIXME: Further refine by comparing PathDiagnosticPieces?
- return false;
- }
-};
-}
namespace {
class PlistDiagnostics : public PathDiagnosticConsumer {
- std::vector<const PathDiagnostic*> BatchedDiags;
const std::string OutputFile;
const LangOptions &LangOpts;
- llvm::OwningPtr<PathDiagnosticConsumer> SubPD;
+ OwningPtr<PathDiagnosticConsumer> SubPD;
bool flushed;
+ const bool SupportsCrossFileDiagnostics;
public:
PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts,
+ bool supportsMultipleFiles,
PathDiagnosticConsumer *subPD);
- ~PlistDiagnostics() { FlushDiagnostics(NULL); }
+ virtual ~PlistDiagnostics() {}
- void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade);
-
- void HandlePathDiagnosticImpl(const PathDiagnostic* D);
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
virtual StringRef getName() const {
return "PlistDiagnostics";
@@ -84,18 +51,29 @@ namespace {
bool supportsLogicalOpControlFlow() const { return true; }
bool supportsAllBlockEdges() const { return true; }
virtual bool useVerboseDescription() const { return false; }
+ virtual bool supportsCrossFileDiagnostics() const {
+ return SupportsCrossFileDiagnostics;
+ }
};
} // end anonymous namespace
PlistDiagnostics::PlistDiagnostics(const std::string& output,
const LangOptions &LO,
+ bool supportsMultipleFiles,
PathDiagnosticConsumer *subPD)
- : OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false) {}
+ : OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
PathDiagnosticConsumer*
ento::createPlistDiagnosticConsumer(const std::string& s, const Preprocessor &PP,
PathDiagnosticConsumer *subPD) {
- return new PlistDiagnostics(s, PP.getLangOptions(), subPD);
+ return new PlistDiagnostics(s, PP.getLangOpts(), false, subPD);
+}
+
+PathDiagnosticConsumer*
+ento::createPlistMultiFileDiagnosticConsumer(const std::string &s,
+ const Preprocessor &PP) {
+ return new PlistDiagnostics(s, PP.getLangOpts(), true, 0);
}
PathDiagnosticConsumer::PathGenerationScheme
@@ -167,10 +145,9 @@ static void EmitRange(raw_ostream &o, const SourceManager &SM,
Indent(o, indent) << "</array>\n";
}
-static raw_ostream &EmitString(raw_ostream &o,
- const std::string& s) {
+static raw_ostream &EmitString(raw_ostream &o, StringRef s) {
o << "<string>";
- for (std::string::const_iterator I=s.begin(), E=s.end(); I!=E; ++I) {
+ for (StringRef::const_iterator I = s.begin(), E = s.end(); I != E; ++I) {
char c = *I;
switch (c) {
default: o << c; break;
@@ -232,7 +209,8 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
const FIDMap& FM,
const SourceManager &SM,
const LangOptions &LangOpts,
- unsigned indent) {
+ unsigned indent,
+ unsigned depth) {
Indent(o, indent) << "<dict>\n";
++indent;
@@ -258,6 +236,10 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
--indent;
Indent(o, indent) << "</array>\n";
}
+
+ // Output the call depth.
+ Indent(o, indent) << "<key>depth</key>"
+ << "<integer>" << depth << "</integer>\n";
// Output the text.
assert(!P.getString().empty());
@@ -269,108 +251,143 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
// FIXME: Really use a short string.
Indent(o, indent) << "<key>message</key>\n";
EmitString(o, P.getString()) << '\n';
-
+
// Finish up.
--indent;
Indent(o, indent); o << "</dict>\n";
}
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow);
+
+static void ReportCall(raw_ostream &o,
+ const PathDiagnosticCallPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+ P.getCallEnterEvent();
+
+ if (callEnter)
+ ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true);
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithinCaller =
+ P.getCallEnterWithinCallerEvent();
+
+ ++depth;
+
+ if (callEnterWithinCaller)
+ ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
+ indent, depth, true);
+
+ for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ P.getCallExitEvent();
+
+ if (callExit)
+ ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+}
+
static void ReportMacro(raw_ostream &o,
const PathDiagnosticMacroPiece& P,
const FIDMap& FM, const SourceManager &SM,
const LangOptions &LangOpts,
- unsigned indent) {
+ unsigned indent,
+ unsigned depth) {
- for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end();
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
I!=E; ++I) {
-
- switch ((*I)->getKind()) {
- default:
- break;
- case PathDiagnosticPiece::Event:
- ReportEvent(o, cast<PathDiagnosticEventPiece>(**I), FM, SM, LangOpts,
- indent);
- break;
- case PathDiagnosticPiece::Macro:
- ReportMacro(o, cast<PathDiagnosticMacroPiece>(**I), FM, SM, LangOpts,
- indent);
- break;
- }
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
}
}
static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
const FIDMap& FM, const SourceManager &SM,
const LangOptions &LangOpts) {
-
- unsigned indent = 4;
-
- switch (P.getKind()) {
- case PathDiagnosticPiece::ControlFlow:
- ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
- LangOpts, indent);
- break;
- case PathDiagnosticPiece::Event:
- ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts,
- indent);
- break;
- case PathDiagnosticPiece::Macro:
- ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
- indent);
- break;
- }
+ ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
}
-void PlistDiagnostics::HandlePathDiagnosticImpl(const PathDiagnostic* D) {
- if (!D)
- return;
-
- if (D->empty()) {
- delete D;
- return;
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow) {
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ if (includeControlFlow)
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
+ LangOpts, indent);
+ break;
+ case PathDiagnosticPiece::Call:
+ ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticSpotPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
}
-
- // We need to flatten the locations (convert Stmt* to locations) because
- // the referenced statements may be freed by the time the diagnostics
- // are emitted.
- const_cast<PathDiagnostic*>(D)->flattenLocations();
- BatchedDiags.push_back(D);
}
-void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
- *FilesMade) {
-
- if (flushed)
- return;
-
- flushed = true;
-
- // Sort the diagnostics so that they are always emitted in a deterministic
- // order.
- if (!BatchedDiags.empty())
- std::sort(BatchedDiags.begin(), BatchedDiags.end(), CompareDiagnostics());
-
+void PlistDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
// Build up a set of FIDs that we use by scanning the locations and
// ranges of the diagnostics.
FIDMap FM;
SmallVector<FileID, 10> Fids;
const SourceManager* SM = 0;
- if (!BatchedDiags.empty())
- SM = &(*BatchedDiags.begin())->begin()->getLocation().getManager();
+ if (!Diags.empty())
+ SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager();
- for (std::vector<const PathDiagnostic*>::iterator DI = BatchedDiags.begin(),
- DE = BatchedDiags.end(); DI != DE; ++DI) {
+
+ for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
+ DE = Diags.end(); DI != DE; ++DI) {
const PathDiagnostic *D = *DI;
- for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I!=E; ++I) {
- AddFID(FM, Fids, SM, I->getLocation().asLocation());
+ llvm::SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
- for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(),
- RE=I->ranges_end(); RI!=RE; ++RI) {
- AddFID(FM, Fids, SM, RI->getBegin());
- AddFID(FM, Fids, SM, RI->getEnd());
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.back();
+ WorkList.pop_back();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I!=E; ++I) {
+ const PathDiagnosticPiece *piece = I->getPtr();
+ AddFID(FM, Fids, SM, piece->getLocation().asLocation());
+
+ for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
+ RE= piece->ranges_end(); RI != RE; ++RI) {
+ AddFID(FM, Fids, SM, RI->getBegin());
+ AddFID(FM, Fids, SM, RI->getEnd());
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
}
}
}
@@ -379,7 +396,7 @@ void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
std::string ErrMsg;
llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg);
if (!ErrMsg.empty()) {
- llvm::errs() << "warning: could not creat file: " << OutputFile << '\n';
+ llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
return;
}
@@ -406,21 +423,19 @@ void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
" <key>diagnostics</key>\n"
" <array>\n";
- for (std::vector<const PathDiagnostic*>::iterator DI=BatchedDiags.begin(),
- DE = BatchedDiags.end(); DI!=DE; ++DI) {
+ for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
+ DE = Diags.end(); DI!=DE; ++DI) {
o << " <dict>\n"
" <key>path</key>\n";
const PathDiagnostic *D = *DI;
- // Create an owning smart pointer for 'D' just so that we auto-free it
- // when we exit this method.
- llvm::OwningPtr<PathDiagnostic> OwnedD(const_cast<PathDiagnostic*>(D));
o << " <array>\n";
- for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I)
- ReportDiag(o, *I, FM, *SM, LangOpts);
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ I != E; ++I)
+ ReportDiag(o, **I, FM, *SM, LangOpts);
o << " </array>\n";
@@ -431,6 +446,38 @@ void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
EmitString(o, D->getCategory()) << '\n';
o << " <key>type</key>";
EmitString(o, D->getBugType()) << '\n';
+
+ // Output information about the semantic context where
+ // the issue occurred.
+ if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
+ // FIXME: handle blocks, which have no name.
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
+ StringRef declKind;
+ switch (ND->getKind()) {
+ case Decl::CXXRecord:
+ declKind = "C++ class";
+ break;
+ case Decl::CXXMethod:
+ declKind = "C++ method";
+ break;
+ case Decl::ObjCMethod:
+ declKind = "Objective-C method";
+ break;
+ case Decl::Function:
+ declKind = "function";
+ break;
+ default:
+ break;
+ }
+ if (!declKind.empty()) {
+ const std::string &declName = ND->getDeclName().getAsString();
+ o << " <key>issue_context_kind</key>";
+ EmitString(o, declKind) << '\n';
+ o << " <key>issue_context</key>";
+ EmitString(o, declName) << '\n';
+ }
+ }
+ }
// Output the location of the bug.
o << " <key>location</key>\n";
@@ -438,9 +485,10 @@ void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
// Output the diagnostic to the sub-diagnostic client, if any.
if (SubPD) {
- SubPD->HandlePathDiagnostic(OwnedD.take());
+ std::vector<const PathDiagnostic *> SubDiags;
+ SubDiags.push_back(D);
SmallVector<std::string, 1> SubFilesMade;
- SubPD->FlushDiagnostics(SubFilesMade);
+ SubPD->FlushDiagnosticsImpl(SubDiags, &SubFilesMade);
if (!SubFilesMade.empty()) {
o << " <key>" << SubPD->getName() << "_files</key>\n";
@@ -462,6 +510,4 @@ void PlistDiagnostics::FlushDiagnostics(SmallVectorImpl<std::string>
if (FilesMade)
FilesMade->push_back(OutputFile);
-
- BatchedDiags.clear();
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 73788cc..b9cfa27 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -15,6 +15,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -24,6 +25,26 @@ using namespace ento;
// FIXME: Move this elsewhere.
ConstraintManager::~ConstraintManager() {}
+namespace clang { namespace ento {
+/// Increments the number of times this state is referenced.
+
+void ProgramStateRetain(const ProgramState *state) {
+ ++const_cast<ProgramState*>(state)->refCount;
+}
+
+/// Decrement the number of times this state is referenced.
+void ProgramStateRelease(const ProgramState *state) {
+ assert(state->refCount > 0);
+ ProgramState *s = const_cast<ProgramState*>(state);
+ if (--s->refCount == 0) {
+ ProgramStateManager &Mgr = s->getStateManager();
+ Mgr.StateSet.RemoveNode(s);
+ s->~ProgramState();
+ Mgr.freeStates.push_back(s);
+ }
+}
+}}
+
ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
StoreRef st, GenericDataMap gdm)
: stateMgr(mgr),
@@ -55,8 +76,8 @@ ProgramStateManager::~ProgramStateManager() {
I->second.second(I->second.first);
}
-const ProgramState*
-ProgramStateManager::removeDeadBindings(const ProgramState *state,
+ProgramStateRef
+ProgramStateManager::removeDeadBindings(ProgramStateRef state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
@@ -79,7 +100,7 @@ ProgramStateManager::removeDeadBindings(const ProgramState *state,
return getPersistentState(NewState);
}
-const ProgramState *ProgramStateManager::MarshalState(const ProgramState *state,
+ProgramStateRef ProgramStateManager::MarshalState(ProgramStateRef state,
const StackFrameContext *InitLoc) {
// make up an empty state for now.
ProgramState State(this,
@@ -90,7 +111,7 @@ const ProgramState *ProgramStateManager::MarshalState(const ProgramState *state,
return getPersistentState(State);
}
-const ProgramState *ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL,
+ProgramStateRef ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL,
const LocationContext *LC,
SVal V) const {
const StoreRef &newStore =
@@ -98,21 +119,21 @@ const ProgramState *ProgramState::bindCompoundLiteral(const CompoundLiteralExpr
return makeWithStore(newStore);
}
-const ProgramState *ProgramState::bindDecl(const VarRegion* VR, SVal IVal) const {
+ProgramStateRef ProgramState::bindDecl(const VarRegion* VR, SVal IVal) const {
const StoreRef &newStore =
getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal);
return makeWithStore(newStore);
}
-const ProgramState *ProgramState::bindDeclWithNoInit(const VarRegion* VR) const {
+ProgramStateRef ProgramState::bindDeclWithNoInit(const VarRegion* VR) const {
const StoreRef &newStore =
getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR);
return makeWithStore(newStore);
}
-const ProgramState *ProgramState::bindLoc(Loc LV, SVal V) const {
+ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V) const {
ProgramStateManager &Mgr = getStateManager();
- const ProgramState *newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
+ ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
LV, V));
const MemRegion *MR = LV.getAsRegion();
if (MR && Mgr.getOwningEngine())
@@ -121,53 +142,55 @@ const ProgramState *ProgramState::bindLoc(Loc LV, SVal V) const {
return newState;
}
-const ProgramState *ProgramState::bindDefault(SVal loc, SVal V) const {
+ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
ProgramStateManager &Mgr = getStateManager();
const MemRegion *R = cast<loc::MemRegionVal>(loc).getRegion();
const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
- const ProgramState *new_state = makeWithStore(newStore);
+ ProgramStateRef new_state = makeWithStore(newStore);
return Mgr.getOwningEngine() ?
Mgr.getOwningEngine()->processRegionChange(new_state, R) :
new_state;
}
-const ProgramState *
+ProgramStateRef
ProgramState::invalidateRegions(ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
StoreManager::InvalidatedSymbols *IS,
- bool invalidateGlobals) const {
+ const CallOrObjCMessage *Call) const {
if (!IS) {
StoreManager::InvalidatedSymbols invalidated;
- return invalidateRegionsImpl(Regions, E, Count,
- invalidated, invalidateGlobals);
+ return invalidateRegionsImpl(Regions, E, Count, LCtx,
+ invalidated, Call);
}
- return invalidateRegionsImpl(Regions, E, Count, *IS, invalidateGlobals);
+ return invalidateRegionsImpl(Regions, E, Count, LCtx, *IS, Call);
}
-const ProgramState *
+ProgramStateRef
ProgramState::invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
StoreManager::InvalidatedSymbols &IS,
- bool invalidateGlobals) const {
+ const CallOrObjCMessage *Call) const {
ProgramStateManager &Mgr = getStateManager();
SubEngine* Eng = Mgr.getOwningEngine();
if (Eng && Eng->wantsRegionChangeUpdate(this)) {
StoreManager::InvalidatedRegions Invalidated;
const StoreRef &newStore
- = Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, IS,
- invalidateGlobals, &Invalidated);
- const ProgramState *newState = makeWithStore(newStore);
- return Eng->processRegionChanges(newState, &IS, Regions, Invalidated);
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
+ Call, &Invalidated);
+ ProgramStateRef newState = makeWithStore(newStore);
+ return Eng->processRegionChanges(newState, &IS, Regions, Invalidated, Call);
}
const StoreRef &newStore =
- Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, IS,
- invalidateGlobals, NULL);
+ Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
+ Call, NULL);
return makeWithStore(newStore);
}
-const ProgramState *ProgramState::unbindLoc(Loc LV) const {
+ProgramStateRef ProgramState::unbindLoc(Loc LV) const {
assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead.");
Store OldStore = getStore();
@@ -179,9 +202,11 @@ const ProgramState *ProgramState::unbindLoc(Loc LV) const {
return makeWithStore(newStore);
}
-const ProgramState *ProgramState::enterStackFrame(const StackFrameContext *frame) const {
+ProgramStateRef
+ProgramState::enterStackFrame(const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) const {
const StoreRef &new_store =
- getStateManager().StoreMgr->enterStackFrame(this, frame);
+ getStateManager().StoreMgr->enterStackFrame(this, callerCtx, calleeCtx);
return makeWithStore(new_store);
}
@@ -238,9 +263,12 @@ SVal ProgramState::getSVal(Loc location, QualType T) const {
return V;
}
-const ProgramState *ProgramState::BindExpr(const Stmt *S, SVal V, bool Invalidate) const{
- Environment NewEnv = getStateManager().EnvMgr.bindExpr(Env, S, V,
- Invalidate);
+ProgramStateRef ProgramState::BindExpr(const Stmt *S,
+ const LocationContext *LCtx,
+ SVal V, bool Invalidate) const{
+ Environment NewEnv =
+ getStateManager().EnvMgr.bindExpr(Env, EnvironmentEntry(S, LCtx), V,
+ Invalidate);
if (NewEnv == Env)
return this;
@@ -249,10 +277,14 @@ const ProgramState *ProgramState::BindExpr(const Stmt *S, SVal V, bool Invalidat
return getStateManager().getPersistentState(NewSt);
}
-const ProgramState *ProgramState::bindExprAndLocation(const Stmt *S, SVal location,
- SVal V) const {
+ProgramStateRef
+ProgramState::bindExprAndLocation(const Stmt *S, const LocationContext *LCtx,
+ SVal location,
+ SVal V) const {
Environment NewEnv =
- getStateManager().EnvMgr.bindExprAndLocation(Env, S, location, V);
+ getStateManager().EnvMgr.bindExprAndLocation(Env,
+ EnvironmentEntry(S, LCtx),
+ location, V);
if (NewEnv == Env)
return this;
@@ -262,9 +294,10 @@ const ProgramState *ProgramState::bindExprAndLocation(const Stmt *S, SVal locati
return getStateManager().getPersistentState(NewSt);
}
-const ProgramState *ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
DefinedOrUnknownSVal UpperBound,
- bool Assumption) const {
+ bool Assumption,
+ QualType indexTy) const {
if (Idx.isUnknown() || UpperBound.isUnknown())
return this;
@@ -278,7 +311,8 @@ const ProgramState *ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
// Get the offset: the minimum value of the array index type.
BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
// FIXME: This should be using ValueManager::ArrayindexTy...somehow.
- QualType indexTy = Ctx.IntTy;
+ if (indexTy.isNull())
+ indexTy = Ctx.IntTy;
nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
// Adjust the index.
@@ -307,7 +341,7 @@ const ProgramState *ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
return CM.assume(this, cast<DefinedSVal>(inBound), Assumption);
}
-const ProgramState *ProgramStateManager::getInitialState(const LocationContext *InitLoc) {
+ProgramStateRef ProgramStateManager::getInitialState(const LocationContext *InitLoc) {
ProgramState State(this,
EnvMgr.getInitialEnvironment(),
StoreMgr->getInitialStore(InitLoc),
@@ -316,28 +350,15 @@ const ProgramState *ProgramStateManager::getInitialState(const LocationContext *
return getPersistentState(State);
}
-void ProgramStateManager::recycleUnusedStates() {
- for (std::vector<ProgramState*>::iterator i = recentlyAllocatedStates.begin(),
- e = recentlyAllocatedStates.end(); i != e; ++i) {
- ProgramState *state = *i;
- if (state->referencedByExplodedNode())
- continue;
- StateSet.RemoveNode(state);
- freeStates.push_back(state);
- state->~ProgramState();
- }
- recentlyAllocatedStates.clear();
-}
-
-const ProgramState *ProgramStateManager::getPersistentStateWithGDM(
- const ProgramState *FromState,
- const ProgramState *GDMState) {
- ProgramState NewState = *FromState;
+ProgramStateRef ProgramStateManager::getPersistentStateWithGDM(
+ ProgramStateRef FromState,
+ ProgramStateRef GDMState) {
+ ProgramState NewState(*FromState);
NewState.GDM = GDMState->GDM;
return getPersistentState(NewState);
}
-const ProgramState *ProgramStateManager::getPersistentState(ProgramState &State) {
+ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
llvm::FoldingSetNodeID ID;
State.Profile(ID);
@@ -356,12 +377,11 @@ const ProgramState *ProgramStateManager::getPersistentState(ProgramState &State)
}
new (newState) ProgramState(State);
StateSet.InsertNode(newState, InsertPos);
- recentlyAllocatedStates.push_back(newState);
return newState;
}
-const ProgramState *ProgramState::makeWithStore(const StoreRef &store) const {
- ProgramState NewSt = *this;
+ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
+ ProgramState NewSt(*this);
NewSt.setStore(store);
return getStateManager().getPersistentState(NewSt);
}
@@ -379,92 +399,44 @@ void ProgramState::setStore(const StoreRef &newStore) {
// State pretty-printing.
//===----------------------------------------------------------------------===//
-static bool IsEnvLoc(const Stmt *S) {
- // FIXME: This is a layering violation. Should be in environment.
- return (bool) (((uintptr_t) S) & 0x1);
-}
-
-void ProgramState::print(raw_ostream &Out, CFG &C,
+void ProgramState::print(raw_ostream &Out,
const char *NL, const char *Sep) const {
// Print the store.
ProgramStateManager &Mgr = getStateManager();
Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
- // Print Subexpression bindings.
- bool isFirst = true;
+ // Print out the environment.
+ Env.print(Out, NL, Sep);
- // FIXME: All environment printing should be moved inside Environment.
- for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
- if (C.isBlkExpr(I.getKey()) || IsEnvLoc(I.getKey()))
- continue;
-
- if (isFirst) {
- Out << NL << NL << "Sub-Expressions:" << NL;
- isFirst = false;
- } else {
- Out << NL;
- }
+ // Print out the constraints.
+ Mgr.getConstraintManager().print(this, Out, NL, Sep);
- Out << " (" << (void*) I.getKey() << ") ";
- LangOptions LO; // FIXME.
- I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
- Out << " : " << I.getData();
- }
+ // Print checker-specific data.
+ Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
+}
- // Print block-expression bindings.
- isFirst = true;
+void ProgramState::printDOT(raw_ostream &Out) const {
+ print(Out, "\\l", "\\|");
+}
- for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
- if (!C.isBlkExpr(I.getKey()))
- continue;
+void ProgramState::dump() const {
+ print(llvm::errs());
+}
- if (isFirst) {
- Out << NL << NL << "Block-level Expressions:" << NL;
- isFirst = false;
- } else {
- Out << NL;
- }
+void ProgramState::printTaint(raw_ostream &Out,
+ const char *NL, const char *Sep) const {
+ TaintMapImpl TM = get<TaintMap>();
- Out << " (" << (void*) I.getKey() << ") ";
- LangOptions LO; // FIXME.
- I.getKey()->printPretty(Out, 0, PrintingPolicy(LO));
- Out << " : " << I.getData();
- }
-
- // Print locations.
- isFirst = true;
-
- for (Environment::iterator I = Env.begin(), E = Env.end(); I != E; ++I) {
- if (!IsEnvLoc(I.getKey()))
- continue;
-
- if (isFirst) {
- Out << NL << NL << "Load/store locations:" << NL;
- isFirst = false;
- } else {
- Out << NL;
- }
+ if (!TM.isEmpty())
+ Out <<"Tainted Symbols:" << NL;
- const Stmt *S = (Stmt*) (((uintptr_t) I.getKey()) & ((uintptr_t) ~0x1));
-
- Out << " (" << (void*) S << ") ";
- LangOptions LO; // FIXME.
- S->printPretty(Out, 0, PrintingPolicy(LO));
- Out << " : " << I.getData();
+ for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
+ Out << I->first << " : " << I->second << NL;
}
-
- Mgr.getConstraintManager().print(this, Out, NL, Sep);
-
- // Print checker-specific data.
- Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
-}
-
-void ProgramState::printDOT(raw_ostream &Out, CFG &C) const {
- print(Out, C, "\\l", "\\|");
}
-void ProgramState::printStdErr(CFG &C) const {
- print(llvm::errs(), C);
+void ProgramState::dumpTaint() const {
+ printTaint(llvm::errs());
}
//===----------------------------------------------------------------------===//
@@ -489,7 +461,7 @@ ProgramStateManager::FindGDMContext(void *K,
return p.first;
}
-const ProgramState *ProgramStateManager::addGDM(const ProgramState *St, void *Key, void *Data){
+ProgramStateRef ProgramStateManager::addGDM(ProgramStateRef St, void *Key, void *Data){
ProgramState::GenericDataMap M1 = St->getGDM();
ProgramState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data);
@@ -501,7 +473,7 @@ const ProgramState *ProgramStateManager::addGDM(const ProgramState *St, void *Ke
return getPersistentState(NewSt);
}
-const ProgramState *ProgramStateManager::removeGDM(const ProgramState *state, void *Key) {
+ProgramStateRef ProgramStateManager::removeGDM(ProgramStateRef state, void *Key) {
ProgramState::GenericDataMap OldM = state->getGDM();
ProgramState::GenericDataMap NewM = GDMFactory.remove(OldM, Key);
@@ -513,6 +485,8 @@ const ProgramState *ProgramStateManager::removeGDM(const ProgramState *state, vo
return getPersistentState(NewState);
}
+void ScanReachableSymbols::anchor() { }
+
bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
if (!scan(*I))
@@ -527,10 +501,10 @@ bool ScanReachableSymbols::scan(const SymExpr *sym) {
return true;
isVisited = 1;
- if (const SymbolData *sData = dyn_cast<SymbolData>(sym))
- if (!visitor.VisitSymbol(sData))
- return false;
+ if (!visitor.VisitSymbol(sym))
+ return false;
+ // TODO: should be rewritten using SymExpr::symbol_iterator.
switch (sym->getKind()) {
case SymExpr::RegionValueKind:
case SymExpr::ConjuredKind:
@@ -538,8 +512,12 @@ bool ScanReachableSymbols::scan(const SymExpr *sym) {
case SymExpr::ExtentKind:
case SymExpr::MetadataKind:
break;
+ case SymExpr::CastSymbolKind:
+ return scan(cast<SymbolCast>(sym)->getOperand());
case SymExpr::SymIntKind:
return scan(cast<SymIntExpr>(sym)->getLHS());
+ case SymExpr::IntSymKind:
+ return scan(cast<IntSymExpr>(sym)->getRHS());
case SymExpr::SymSymKind: {
const SymSymExpr *x = cast<SymSymExpr>(sym);
return scan(x->getLHS()) && scan(x->getRHS());
@@ -575,6 +553,10 @@ bool ScanReachableSymbols::scan(const MemRegion *R) {
if (isVisited)
return true;
isVisited = 1;
+
+
+ if (!visitor.VisitMemRegion(R))
+ return false;
// If this is a symbolic region, visit the symbol for the region.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
@@ -623,3 +605,105 @@ bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
}
return true;
}
+
+ProgramStateRef ProgramState::addTaint(const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SymbolRef Sym = getSVal(S, LCtx).getAsSymbol();
+ if (Sym)
+ return addTaint(Sym, Kind);
+
+ const MemRegion *R = getSVal(S, LCtx).getAsRegion();
+ addTaint(R, Kind);
+
+ // Cannot add taint, so just return the state.
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(const MemRegion *R,
+ TaintTagType Kind) const {
+ if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
+ return addTaint(SR->getSymbol(), Kind);
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(SymbolRef Sym,
+ TaintTagType Kind) const {
+ // If this is a symbol cast, remove the cast before adding the taint. Taint
+ // is cast agnostic.
+ while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+ Sym = SC->getOperand();
+
+ ProgramStateRef NewState = set<TaintMap>(Sym, Kind);
+ assert(NewState);
+ return NewState;
+}
+
+bool ProgramState::isTainted(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SVal val = getSVal(S, LCtx);
+ return isTainted(val, Kind);
+}
+
+bool ProgramState::isTainted(SVal V, TaintTagType Kind) const {
+ if (const SymExpr *Sym = V.getAsSymExpr())
+ return isTainted(Sym, Kind);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return isTainted(Reg, Kind);
+ return false;
+}
+
+bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
+ if (!Reg)
+ return false;
+
+ // Element region (array element) is tainted if either the base or the offset
+ // are tainted.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K) || isTainted(ER->getIndex(), K);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
+ return isTainted(SR->getSymbol(), K);
+
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K);
+
+ return false;
+}
+
+bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
+ if (!Sym)
+ return false;
+
+ // Traverse all the symbols this symbol depends on to see if any are tainted.
+ bool Tainted = false;
+ for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
+ SI != SE; ++SI) {
+ assert(isa<SymbolData>(*SI));
+ const TaintTagType *Tag = get<TaintMap>(*SI);
+ Tainted = (Tag && *Tag == Kind);
+
+ // If this is a SymbolDerived with a tainted parent, it's also tainted.
+ if (const SymbolDerived *SD = dyn_cast<SymbolDerived>(*SI))
+ Tainted = Tainted || isTainted(SD->getParentSymbol(), Kind);
+
+ // If memory region is tainted, data is also tainted.
+ if (const SymbolRegionValue *SRV = dyn_cast<SymbolRegionValue>(*SI))
+ Tainted = Tainted || isTainted(SRV->getRegion(), Kind);
+
+ // If If this is a SymbolCast from a tainted value, it's also tainted.
+ if (const SymbolCast *SC = dyn_cast<SymbolCast>(*SI))
+ Tainted = Tainted || isTainted(SC->getOperand(), Kind);
+
+ if (Tainted)
+ return true;
+ }
+
+ return Tainted;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 9337788..98eb958 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -204,46 +204,46 @@ struct ProgramStateTrait<ConstraintRange>
namespace {
class RangeConstraintManager : public SimpleConstraintManager{
- RangeSet GetRange(const ProgramState *state, SymbolRef sym);
+ RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
public:
RangeConstraintManager(SubEngine &subengine)
: SimpleConstraintManager(subengine) {}
- const ProgramState *assumeSymNE(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymEQ(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymLT(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymGT(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymGE(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const ProgramState *assumeSymLE(const ProgramState *state, SymbolRef sym,
+ ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment);
- const llvm::APSInt* getSymVal(const ProgramState *St, SymbolRef sym) const;
+ const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const;
// FIXME: Refactor into SimpleConstraintManager?
- bool isEqual(const ProgramState *St, SymbolRef sym, const llvm::APSInt& V) const {
+ bool isEqual(ProgramStateRef St, SymbolRef sym, const llvm::APSInt& V) const {
const llvm::APSInt *i = getSymVal(St, sym);
return i ? *i == V : false;
}
- const ProgramState *removeDeadBindings(const ProgramState *St, SymbolReaper& SymReaper);
+ ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper);
- void print(const ProgramState *St, raw_ostream &Out,
+ void print(ProgramStateRef St, raw_ostream &Out,
const char* nl, const char *sep);
private:
@@ -257,7 +257,7 @@ ConstraintManager* ento::CreateRangeConstraintManager(ProgramStateManager&,
return new RangeConstraintManager(subeng);
}
-const llvm::APSInt* RangeConstraintManager::getSymVal(const ProgramState *St,
+const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
SymbolRef sym) const {
const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
return T ? T->getConcreteValue() : NULL;
@@ -265,8 +265,8 @@ const llvm::APSInt* RangeConstraintManager::getSymVal(const ProgramState *St,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
-const ProgramState*
-RangeConstraintManager::removeDeadBindings(const ProgramState *state,
+ProgramStateRef
+RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
SymbolReaper& SymReaper) {
ConstraintRangeTy CR = state->get<ConstraintRange>();
@@ -282,7 +282,7 @@ RangeConstraintManager::removeDeadBindings(const ProgramState *state,
}
RangeSet
-RangeConstraintManager::GetRange(const ProgramState *state, SymbolRef sym) {
+RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
return *V;
@@ -305,8 +305,8 @@ RangeConstraintManager::GetRange(const ProgramState *state, SymbolRef sym) {
// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
// UINT_MAX, 0, 1, and 2.
-const ProgramState*
-RangeConstraintManager::assumeSymNE(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymNE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -322,8 +322,8 @@ RangeConstraintManager::assumeSymNE(const ProgramState *state, SymbolRef sym,
return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-const ProgramState*
-RangeConstraintManager::assumeSymEQ(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymEQ(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
// [Int-Adjustment, Int-Adjustment]
@@ -333,8 +333,8 @@ RangeConstraintManager::assumeSymEQ(const ProgramState *state, SymbolRef sym,
return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-const ProgramState*
-RangeConstraintManager::assumeSymLT(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymLT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -354,8 +354,8 @@ RangeConstraintManager::assumeSymLT(const ProgramState *state, SymbolRef sym,
return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-const ProgramState*
-RangeConstraintManager::assumeSymGT(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymGT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -375,8 +375,8 @@ RangeConstraintManager::assumeSymGT(const ProgramState *state, SymbolRef sym,
return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-const ProgramState*
-RangeConstraintManager::assumeSymGE(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymGE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -397,8 +397,8 @@ RangeConstraintManager::assumeSymGE(const ProgramState *state, SymbolRef sym,
return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-const ProgramState*
-RangeConstraintManager::assumeSymLE(const ProgramState *state, SymbolRef sym,
+ProgramStateRef
+RangeConstraintManager::assumeSymLE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& Int,
const llvm::APSInt& Adjustment) {
BasicValueFactory &BV = state->getBasicVals();
@@ -423,18 +423,20 @@ RangeConstraintManager::assumeSymLE(const ProgramState *state, SymbolRef sym,
// Pretty-printing.
//===------------------------------------------------------------------------===/
-void RangeConstraintManager::print(const ProgramState *St, raw_ostream &Out,
+void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
const char* nl, const char *sep) {
ConstraintRangeTy Ranges = St->get<ConstraintRange>();
- if (Ranges.isEmpty())
+ if (Ranges.isEmpty()) {
+ Out << nl << sep << "Ranges are empty." << nl;
return;
+ }
- Out << nl << sep << "ranges of symbol values:";
-
+ Out << nl << sep << "Ranges of symbol values:";
for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
Out << nl << ' ' << I.getKey() << " : ";
I.getData().print(Out);
}
+ Out << nl;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 4b76cf1..cc3ea8c3 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -20,6 +20,7 @@
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
@@ -228,6 +229,16 @@ public:
/// For DerivedToBase casts, create a CXXBaseObjectRegion and return it.
virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType);
+ /// \brief Evaluates C++ dynamic_cast cast.
+ /// The callback may result in the following 3 scenarios:
+ /// - Successful cast (ex: derived is subclass of base).
+ /// - Failed cast (ex: derived is definitely not a subclass of base).
+ /// - We don't know (base is a symbolic region and we don't have
+ /// enough info to determine if the cast will succeed at run time).
+ /// The function returns an SVal representing the derived class; it's
+ /// valid only if Failed flag is set to false.
+ virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,bool &Failed);
+
StoreRef getInitialStore(const LocationContext *InitLoc) {
return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
}
@@ -235,11 +246,18 @@ public:
//===-------------------------------------------------------------------===//
// Binding values to regions.
//===-------------------------------------------------------------------===//
+ RegionBindings invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindings B,
+ InvalidatedRegions *Invalidated);
StoreRef invalidateRegions(Store store, ArrayRef<const MemRegion *> Regions,
const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
InvalidatedSymbols &IS,
- bool invalidateGlobals,
+ const CallOrObjCMessage *Call,
InvalidatedRegions *Invalidated);
public: // Made public for helper classes.
@@ -273,7 +291,8 @@ public: // Part of public interface to class.
RegionBindings B = GetRegionBindings(store);
assert(!lookup(B, R, BindingKey::Default));
assert(!lookup(B, R, BindingKey::Direct));
- return StoreRef(addBinding(B, R, BindingKey::Default, V).getRootWithoutRetain(), *this);
+ return StoreRef(addBinding(B, R, BindingKey::Default, V)
+ .getRootWithoutRetain(), *this);
}
StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr *CL,
@@ -308,12 +327,10 @@ public: // Part of public interface to class.
bool includedInBindings(Store store, const MemRegion *region) const;
- //===------------------------------------------------------------------===//
- // Loading values from regions.
- //===------------------------------------------------------------------===//
-
+ /// \brief Return the value bound to specified location in a given state.
+ ///
/// The high level logic for this method is this:
- /// Retrieve (L)
+ /// getBinding (L)
/// if L has binding
/// return L's binding
/// else if L is in killset
@@ -323,39 +340,39 @@ public: // Part of public interface to class.
/// return undefined
/// else
/// return symbolic
- SVal Retrieve(Store store, Loc L, QualType T = QualType());
+ SVal getBinding(Store store, Loc L, QualType T = QualType());
- SVal RetrieveElement(Store store, const ElementRegion *R);
+ SVal getBindingForElement(Store store, const ElementRegion *R);
- SVal RetrieveField(Store store, const FieldRegion *R);
+ SVal getBindingForField(Store store, const FieldRegion *R);
- SVal RetrieveObjCIvar(Store store, const ObjCIvarRegion *R);
+ SVal getBindingForObjCIvar(Store store, const ObjCIvarRegion *R);
- SVal RetrieveVar(Store store, const VarRegion *R);
+ SVal getBindingForVar(Store store, const VarRegion *R);
- SVal RetrieveLazySymbol(const TypedValueRegion *R);
+ SVal getBindingForLazySymbol(const TypedValueRegion *R);
- SVal RetrieveFieldOrElementCommon(Store store, const TypedValueRegion *R,
- QualType Ty, const MemRegion *superR);
+ SVal getBindingForFieldOrElementCommon(Store store, const TypedValueRegion *R,
+ QualType Ty, const MemRegion *superR);
- SVal RetrieveLazyBinding(const MemRegion *lazyBindingRegion,
- Store lazyBindingStore);
+ SVal getLazyBinding(const MemRegion *lazyBindingRegion,
+ Store lazyBindingStore);
- /// Retrieve the values in a struct and return a CompoundVal, used when doing
- /// struct copy:
+ /// Get bindings for the values in a struct and return a CompoundVal, used
+ /// when doing struct copy:
/// struct s x, y;
/// x = y;
/// y's value is retrieved by this method.
- SVal RetrieveStruct(Store store, const TypedValueRegion* R);
+ SVal getBindingForStruct(Store store, const TypedValueRegion* R);
- SVal RetrieveArray(Store store, const TypedValueRegion* R);
+ SVal getBindingForArray(Store store, const TypedValueRegion* R);
/// Used to lazily generate derived symbols for bindings that are defined
/// implicitly by default bindings in a super region.
- Optional<SVal> RetrieveDerivedDefaultValue(RegionBindings B,
- const MemRegion *superR,
- const TypedValueRegion *R,
- QualType Ty);
+ Optional<SVal> getBindingForDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty);
/// Get the state and region whose binding this region R corresponds to.
std::pair<Store, const MemRegion*>
@@ -374,15 +391,16 @@ public: // Part of public interface to class.
StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
- StoreRef enterStackFrame(const ProgramState *state,
- const StackFrameContext *frame);
+ StoreRef enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx);
//===------------------------------------------------------------------===//
// Region "extents".
//===------------------------------------------------------------------===//
// FIXME: This method will soon be eliminated; see the note in Store.h.
- DefinedOrUnknownSVal getSizeInElements(const ProgramState *state,
+ DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
const MemRegion* R, QualType EleTy);
//===------------------------------------------------------------------===//
@@ -422,7 +440,8 @@ StoreManager *ento::CreateRegionStoreManager(ProgramStateManager& StMgr) {
return new RegionStoreManager(StMgr, F);
}
-StoreManager *ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
+StoreManager *
+ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
RegionStoreFeatures F = minimal_features_tag();
F.enableFields(true);
return new RegionStoreManager(StMgr, F);
@@ -587,6 +606,7 @@ class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
{
const Expr *Ex;
unsigned Count;
+ const LocationContext *LCtx;
StoreManager::InvalidatedSymbols &IS;
StoreManager::InvalidatedRegions *Regions;
public:
@@ -594,11 +614,12 @@ public:
ProgramStateManager &stateMgr,
RegionBindings b,
const Expr *ex, unsigned count,
+ const LocationContext *lctx,
StoreManager::InvalidatedSymbols &is,
StoreManager::InvalidatedRegions *r,
bool includeGlobals)
: ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals),
- Ex(ex), Count(count), IS(is), Regions(r) {}
+ Ex(ex), Count(count), LCtx(lctx), IS(is), Regions(r) {}
void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
void VisitBaseRegion(const MemRegion *baseR);
@@ -674,7 +695,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy, Count);
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -690,7 +711,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelavant.
DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, Ctx.IntTy, Count);
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -698,7 +719,8 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
// Set the default value of the array to conjured symbol.
DefinedOrUnknownSVal V =
- svalBuilder.getConjuredSymbolVal(baseR, Ex, AT->getElementType(), Count);
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
+ AT->getElementType(), Count);
B = RM.addBinding(B, baseR, BindingKey::Default, V);
return;
}
@@ -713,20 +735,47 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
}
- DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, T, Count);
+ DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
+ T,Count);
assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
B = RM.addBinding(B, baseR, BindingKey::Direct, V);
}
+RegionBindings RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindings B,
+ InvalidatedRegions *Invalidated) {
+ // Bind the globals memory space to a new symbol that we will use to derive
+ // the bindings for all globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
+ SVal V =
+ svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, LCtx,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+
+ B = removeBinding(B, GS);
+ B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V);
+
+ // Even if there are no bindings in the global scope, we still need to
+ // record that we touched it.
+ if (Invalidated)
+ Invalidated->push_back(GS);
+
+ return B;
+}
+
StoreRef RegionStoreManager::invalidateRegions(Store store,
ArrayRef<const MemRegion *> Regions,
const Expr *Ex, unsigned Count,
+ const LocationContext *LCtx,
InvalidatedSymbols &IS,
- bool invalidateGlobals,
+ const CallOrObjCMessage *Call,
InvalidatedRegions *Invalidated) {
invalidateRegionsWorker W(*this, StateMgr,
RegionStoreManager::GetRegionBindings(store),
- Ex, Count, IS, Invalidated, invalidateGlobals);
+ Ex, Count, LCtx, IS, Invalidated, false);
// Scan the bindings and generate the clusters.
W.GenerateClusters();
@@ -741,20 +790,20 @@ StoreRef RegionStoreManager::invalidateRegions(Store store,
// Return the new bindings.
RegionBindings B = W.getRegionBindings();
- if (invalidateGlobals) {
- // Bind the non-static globals memory space to a new symbol that we will
- // use to derive the bindings for all non-static globals.
- const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
- SVal V =
- svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex,
- /* symbol type, doesn't matter */ Ctx.IntTy,
- Count);
- B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V);
-
- // Even if there are no bindings in the global scope, we still need to
- // record that we touched it.
- if (Invalidated)
- Invalidated->push_back(GS);
+ // For all globals which are not static nor immutable: determine which global
+ // regions should be invalidated and invalidate them.
+ // TODO: This could possibly be more precise with modules.
+ //
+ // System calls invalidate only system globals.
+ if (Call && Call->isInSystemHeader()) {
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ // Internal calls might invalidate both system and internal globals.
+ } else {
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
}
return StoreRef(B.getRootWithoutRetain(), *this);
@@ -764,9 +813,10 @@ StoreRef RegionStoreManager::invalidateRegions(Store store,
// Extents for regions.
//===----------------------------------------------------------------------===//
-DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const ProgramState *state,
- const MemRegion *R,
- QualType EleTy) {
+DefinedOrUnknownSVal
+RegionStoreManager::getSizeInElements(ProgramStateRef state,
+ const MemRegion *R,
+ QualType EleTy) {
SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
if (!SizeInt)
@@ -837,6 +887,75 @@ SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) {
return loc::MemRegionVal(baseReg);
}
+SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType,
+ bool &Failed) {
+ Failed = false;
+
+ loc::MemRegionVal *baseRegVal = dyn_cast<loc::MemRegionVal>(&base);
+ if (!baseRegVal)
+ return UnknownVal();
+ const MemRegion *BaseRegion = baseRegVal->stripCasts();
+
+ // Assume the derived class is a pointer or a reference to a CXX record.
+ derivedType = derivedType->getPointeeType();
+ assert(!derivedType.isNull());
+ const CXXRecordDecl *DerivedDecl = derivedType->getAsCXXRecordDecl();
+ if (!DerivedDecl && !derivedType->isVoidType())
+ return UnknownVal();
+
+ // Drill down the CXXBaseObject chains, which represent upcasts (casts from
+ // derived to base).
+ const MemRegion *SR = BaseRegion;
+ while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) {
+ QualType BaseType = TSR->getLocationType()->getPointeeType();
+ assert(!BaseType.isNull());
+ const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl();
+ if (!SRDecl)
+ return UnknownVal();
+
+ // If found the derived class, the cast succeeds.
+ if (SRDecl == DerivedDecl)
+ return loc::MemRegionVal(TSR);
+
+ // If the region type is a subclass of the derived type.
+ if (!derivedType->isVoidType() && SRDecl->isDerivedFrom(DerivedDecl)) {
+ // This occurs in two cases.
+ // 1) We are processing an upcast.
+ // 2) We are processing a downcast but we jumped directly from the
+ // ancestor to a child of the cast value, so conjure the
+ // appropriate region to represent value (the intermediate node).
+ return loc::MemRegionVal(MRMgr.getCXXBaseObjectRegion(DerivedDecl,
+ BaseRegion));
+ }
+
+ // If super region is not a parent of derived class, the cast definitely
+ // fails.
+ if (!derivedType->isVoidType() &&
+ DerivedDecl->isProvablyNotDerivedFrom(SRDecl)) {
+ Failed = true;
+ return UnknownVal();
+ }
+
+ if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
+ // Drill down the chain to get the derived classes.
+ SR = R->getSuperRegion();
+ else {
+ // We reached the bottom of the hierarchy.
+
+ // If this is a cast to void*, return the region.
+ if (derivedType->isVoidType())
+ return loc::MemRegionVal(TSR);
+
+ // We did not find the derived class. We we must be casting the base to
+ // derived, so the cast should fail.
+ Failed = true;
+ return UnknownVal();
+ }
+ }
+
+ return UnknownVal();
+}
+
//===----------------------------------------------------------------------===//
// Loading values from regions.
//===----------------------------------------------------------------------===//
@@ -863,7 +982,7 @@ Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
return Optional<SVal>();
}
-SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
+SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) {
assert(!isa<UnknownVal>(L) && "location unknown");
assert(!isa<UndefinedVal>(L) && "location undefined");
@@ -882,18 +1001,20 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
- if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) {
+ if (isa<AllocaRegion>(MR) ||
+ isa<SymbolicRegion>(MR) ||
+ isa<CodeTextRegion>(MR)) {
if (T.isNull()) {
- const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
- T = SR->getSymbol()->getType(Ctx);
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(MR))
+ T = TR->getLocationType();
+ else {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ T = SR->getSymbol()->getType(Ctx);
+ }
}
MR = GetElementZeroRegion(MR, T);
}
- if (isa<CodeTextRegion>(MR)) {
- llvm_unreachable("Why load from a code text region?");
- }
-
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
// instead of 'Loc', and have the other Loc cases handled at a higher level.
const TypedValueRegion *R = cast<TypedValueRegion>(MR);
@@ -909,21 +1030,21 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
// Such funny addressing will occur due to layering of regions.
if (RTy->isStructureOrClassType())
- return RetrieveStruct(store, R);
+ return getBindingForStruct(store, R);
// FIXME: Handle unions.
if (RTy->isUnionType())
return UnknownVal();
if (RTy->isArrayType())
- return RetrieveArray(store, R);
+ return getBindingForArray(store, R);
// FIXME: handle Vector types.
if (RTy->isVectorType())
return UnknownVal();
if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
- return CastRetrievedVal(RetrieveField(store, FR), FR, T, false);
+ return CastRetrievedVal(getBindingForField(store, FR), FR, T, false);
if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
// FIXME: Here we actually perform an implicit conversion from the loaded
@@ -931,7 +1052,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
// more intelligently. For example, an 'element' can encompass multiple
// bound regions (e.g., several bound bytes), or could be a subset of
// a larger value.
- return CastRetrievedVal(RetrieveElement(store, ER), ER, T, false);
+ return CastRetrievedVal(getBindingForElement(store, ER), ER, T, false);
}
if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
@@ -941,7 +1062,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
// reinterpretted, it is possible we stored a different value that could
// fit within the ivar. Either we need to cast these when storing them
// or reinterpret them lazily (as we do here).
- return CastRetrievedVal(RetrieveObjCIvar(store, IVR), IVR, T, false);
+ return CastRetrievedVal(getBindingForObjCIvar(store, IVR), IVR, T, false);
}
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
@@ -951,7 +1072,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
// variable is reinterpretted, it is possible we stored a different value
// that could fit within the variable. Either we need to cast these when
// storing them or reinterpret them lazily (as we do here).
- return CastRetrievedVal(RetrieveVar(store, VR), VR, T, false);
+ return CastRetrievedVal(getBindingForVar(store, VR), VR, T, false);
}
RegionBindings B = GetRegionBindings(store);
@@ -1021,7 +1142,7 @@ RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
return std::make_pair((Store) 0, (const MemRegion *) 0);
}
-SVal RegionStoreManager::RetrieveElement(Store store,
+SVal RegionStoreManager::getBindingForElement(Store store,
const ElementRegion* R) {
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
@@ -1043,15 +1164,15 @@ SVal RegionStoreManager::RetrieveElement(Store store,
if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
int64_t i = CI->getValue().getSExtValue();
// Abort on string underrun. This can be possible by arbitrary
- // clients of RetrieveElement().
+ // clients of getBindingForElement().
if (i < 0)
return UndefinedVal();
- int64_t byteLength = Str->getByteLength();
- // Technically, only i == byteLength is guaranteed to be null.
+ int64_t length = Str->getLength();
+ // Technically, only i == length is guaranteed to be null.
// However, such overflows should be caught before reaching this point;
// the only time such an access would be made is if a string literal was
// used to initialize a larger array.
- char c = (i >= byteLength) ? '\0' : Str->getString()[i];
+ char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
return svalBuilder.makeIntVal(c, T);
}
}
@@ -1093,10 +1214,11 @@ SVal RegionStoreManager::RetrieveElement(Store store,
}
}
}
- return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR);
+ return getBindingForFieldOrElementCommon(store, R, R->getElementType(),
+ superR);
}
-SVal RegionStoreManager::RetrieveField(Store store,
+SVal RegionStoreManager::getBindingForField(Store store,
const FieldRegion* R) {
// Check if the region has a binding.
@@ -1105,14 +1227,14 @@ SVal RegionStoreManager::RetrieveField(Store store,
return *V;
QualType Ty = R->getValueType();
- return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
+ return getBindingForFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
}
Optional<SVal>
-RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B,
- const MemRegion *superR,
- const TypedValueRegion *R,
- QualType Ty) {
+RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty) {
if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
const SVal &val = D.getValue();
@@ -1135,30 +1257,39 @@ RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B,
return Optional<SVal>();
}
-SVal RegionStoreManager::RetrieveLazyBinding(const MemRegion *lazyBindingRegion,
+SVal RegionStoreManager::getLazyBinding(const MemRegion *lazyBindingRegion,
Store lazyBindingStore) {
if (const ElementRegion *ER = dyn_cast<ElementRegion>(lazyBindingRegion))
- return RetrieveElement(lazyBindingStore, ER);
+ return getBindingForElement(lazyBindingStore, ER);
- return RetrieveField(lazyBindingStore,
- cast<FieldRegion>(lazyBindingRegion));
+ return getBindingForField(lazyBindingStore,
+ cast<FieldRegion>(lazyBindingRegion));
}
-SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
+SVal RegionStoreManager::getBindingForFieldOrElementCommon(Store store,
const TypedValueRegion *R,
QualType Ty,
const MemRegion *superR) {
- // At this point we have already checked in either RetrieveElement or
- // RetrieveField if 'R' has a direct binding.
-
+ // At this point we have already checked in either getBindingForElement or
+ // getBindingForField if 'R' has a direct binding.
RegionBindings B = GetRegionBindings(store);
+
+ // Record whether or not we see a symbolic index. That can completely
+ // be out of scope of our lookup.
+ bool hasSymbolicIndex = false;
while (superR) {
if (const Optional<SVal> &D =
- RetrieveDerivedDefaultValue(B, superR, R, Ty))
+ getBindingForDerivedDefaultValue(B, superR, R, Ty))
return *D;
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(superR)) {
+ NonLoc index = ER->getIndex();
+ if (!index.isConstant())
+ hasSymbolicIndex = true;
+ }
+
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
if (const SubRegion *SR = dyn_cast<SubRegion>(superR)) {
@@ -1174,10 +1305,10 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R, R);
if (lazyBindingRegion)
- return RetrieveLazyBinding(lazyBindingRegion, lazyBindingStore);
+ return getLazyBinding(lazyBindingRegion, lazyBindingStore);
if (R->hasStackNonParametersStorage()) {
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (isa<ElementRegion>(R)) {
// Currently we don't reason specially about Clang-style vectors. Check
// if superR is a vector and if so return Unknown.
if (const TypedValueRegion *typedSuperR =
@@ -1185,13 +1316,15 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
if (typedSuperR->getValueType()->isVectorType())
return UnknownVal();
}
-
- // FIXME: We also need to take ElementRegions with symbolic indexes into
- // account.
- if (!ER->getIndex().isConstant())
- return UnknownVal();
}
+ // FIXME: We also need to take ElementRegions with symbolic indexes into
+ // account. This case handles both directly accessing an ElementRegion
+ // with a symbolic offset, but also fields within an element with
+ // a symbolic offset.
+ if (hasSymbolicIndex)
+ return UnknownVal();
+
return UndefinedVal();
}
@@ -1199,7 +1332,8 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
return svalBuilder.getRegionValueSymbolVal(R);
}
-SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
+SVal RegionStoreManager::getBindingForObjCIvar(Store store,
+ const ObjCIvarRegion* R) {
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
@@ -1218,10 +1352,10 @@ SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
return UnknownVal();
}
- return RetrieveLazySymbol(R);
+ return getBindingForLazySymbol(R);
}
-SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
+SVal RegionStoreManager::getBindingForVar(Store store, const VarRegion *R) {
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
@@ -1253,7 +1387,8 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
}
}
- if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT))
+ if (const Optional<SVal> &V
+ = getBindingForDerivedDefaultValue(B, MS, R, CT))
return V.getValue();
return svalBuilder.getRegionValueSymbolVal(R);
@@ -1270,19 +1405,18 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
return UndefinedVal();
}
-SVal RegionStoreManager::RetrieveLazySymbol(const TypedValueRegion *R) {
+SVal RegionStoreManager::getBindingForLazySymbol(const TypedValueRegion *R) {
// All other values are symbolic.
return svalBuilder.getRegionValueSymbolVal(R);
}
-SVal RegionStoreManager::RetrieveStruct(Store store,
+SVal RegionStoreManager::getBindingForStruct(Store store,
const TypedValueRegion* R) {
- QualType T = R->getValueType();
- assert(T->isStructureOrClassType());
+ assert(R->getValueType()->isStructureOrClassType());
return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
}
-SVal RegionStoreManager::RetrieveArray(Store store,
+SVal RegionStoreManager::getBindingForArray(Store store,
const TypedValueRegion * R) {
assert(Ctx.getAsConstantArrayType(R->getValueType()));
return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
@@ -1506,11 +1640,15 @@ StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
RecordDecl::field_iterator FI, FE;
StoreRef newStore(store, *this);
- for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI, ++VI) {
+ for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI) {
if (VI == VE)
break;
+ // Skip any unnamed bitfields to stay in sync with the initializers.
+ if ((*FI)->isUnnamedBitfield())
+ continue;
+
QualType FTy = (*FI)->getType();
const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
@@ -1520,6 +1658,7 @@ StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
newStore = BindStruct(newStore.getStore(), FR, *VI);
else
newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(FR), *VI);
+ ++VI;
}
// There may be fewer values in the initialize list than the fields of struct.
@@ -1556,7 +1695,7 @@ StoreRef RegionStoreManager::KillStruct(Store store, const TypedRegion* R,
// Remove the old bindings, using 'subReg' as the root of all regions
// we will invalidate.
RegionBindings B = GetRegionBindings(store);
- llvm::OwningPtr<RegionStoreSubRegionMap>
+ OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(store));
RemoveSubRegionBindings(B, subReg, *SubRegions);
@@ -1574,7 +1713,7 @@ StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
// Nuke the old bindings stemming from R.
RegionBindings B = GetRegionBindings(store);
- llvm::OwningPtr<RegionStoreSubRegionMap>
+ OwningPtr<RegionStoreSubRegionMap>
SubRegions(getRegionStoreSubRegionMap(store));
// B and DVM are updated after the call to RemoveSubRegionBindings.
@@ -1641,7 +1780,8 @@ class removeDeadBindingsWorker :
const StackFrameContext *CurrentLCtx;
public:
- removeDeadBindingsWorker(RegionStoreManager &rm, ProgramStateManager &stateMgr,
+ removeDeadBindingsWorker(RegionStoreManager &rm,
+ ProgramStateManager &stateMgr,
RegionBindings b, SymbolReaper &symReaper,
const StackFrameContext *LCtx)
: ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b,
@@ -1717,9 +1857,9 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
if (const MemRegion *R = V.getAsRegion())
AddToWorkList(R);
- // Update the set of live symbols.
- for (SVal::symbol_iterator SI=V.symbol_begin(), SE=V.symbol_end();
- SI!=SE;++SI)
+ // Update the set of live symbols.
+ for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
+ SI!=SE; ++SI)
SymReaper.markLive(*SI);
}
@@ -1807,7 +1947,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
SymReaper.maybeDead(SymR->getSymbol());
SVal X = I.getData();
- SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
for (; SI != SE; ++SI)
SymReaper.maybeDead(*SI);
}
@@ -1816,37 +1956,41 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
}
-StoreRef RegionStoreManager::enterStackFrame(const ProgramState *state,
- const StackFrameContext *frame) {
- FunctionDecl const *FD = cast<FunctionDecl>(frame->getDecl());
+StoreRef RegionStoreManager::enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx)
+{
+ FunctionDecl const *FD = cast<FunctionDecl>(calleeCtx->getDecl());
FunctionDecl::param_const_iterator PI = FD->param_begin(),
PE = FD->param_end();
StoreRef store = StoreRef(state->getStore(), *this);
- if (CallExpr const *CE = dyn_cast<CallExpr>(frame->getCallSite())) {
+ if (CallExpr const *CE = dyn_cast<CallExpr>(calleeCtx->getCallSite())) {
CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
// Copy the arg expression value to the arg variables. We check that
// PI != PE because the actual number of arguments may be different than
// the function declaration.
for (; AI != AE && PI != PE; ++AI, ++PI) {
- SVal ArgVal = state->getSVal(*AI);
+ SVal ArgVal = state->getSVal(*AI, callerCtx);
store = Bind(store.getStore(),
- svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, frame)), ArgVal);
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
+ ArgVal);
}
} else if (const CXXConstructExpr *CE =
- dyn_cast<CXXConstructExpr>(frame->getCallSite())) {
+ dyn_cast<CXXConstructExpr>(calleeCtx->getCallSite())) {
CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
AE = CE->arg_end();
// Copy the arg expression value to the arg variables.
for (; AI != AE; ++AI, ++PI) {
- SVal ArgVal = state->getSVal(*AI);
+ SVal ArgVal = state->getSVal(*AI, callerCtx);
store = Bind(store.getStore(),
- svalBuilder.makeLoc(MRMgr.getVarRegion(*PI,frame)), ArgVal);
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
+ ArgVal);
}
} else
- assert(isa<CXXDestructorDecl>(frame->getDecl()));
+ assert(isa<CXXDestructorDecl>(calleeCtx->getDecl()));
return store;
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index ebf7ae2..9e97f5e 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ExprCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
@@ -25,6 +26,8 @@ using namespace ento;
// Basic SVal creation.
//===----------------------------------------------------------------------===//
+void SValBuilder::anchor() { }
+
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
return makeNull();
@@ -37,23 +40,38 @@ DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
return UnknownVal();
}
-
NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const llvm::APSInt& rhs, QualType type) {
// The Environment ensures we always get a persistent APSInt in
// BasicValueFactory, so we don't need to get the APSInt from
// BasicValueFactory again.
+ assert(lhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType type) {
+ assert(rhs);
assert(!Loc::isLocType(type));
- return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
+ return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
}
NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType type) {
- assert(SymMgr.getType(lhs) == SymMgr.getType(rhs));
+ assert(lhs && rhs);
+ assert(haveSameType(lhs->getType(Context), rhs->getType(Context)) == true);
assert(!Loc::isLocType(type));
- return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
+ return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
}
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
+ QualType fromTy, QualType toTy) {
+ assert(operand);
+ assert(!Loc::isLocType(toTy));
+ return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
+}
SVal SValBuilder::convertToArrayIndex(SVal val) {
if (val.isUnknownOrUndef())
@@ -69,6 +87,10 @@ SVal SValBuilder::convertToArrayIndex(SVal val) {
return evalCastFromNonLoc(cast<NonLoc>(val), ArrayIndexTy);
}
+nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
+ return makeTruthVal(boolean->getValue());
+}
+
DefinedOrUnknownSVal
SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
QualType T = region->getValueType();
@@ -84,35 +106,46 @@ SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
return nonloc::SymbolVal(sym);
}
-DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- unsigned count) {
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count) {
QualType T = expr->getType();
+ return getConjuredSymbolVal(symbolTag, expr, LCtx, T, count);
+}
- if (!SymbolManager::canSymbolicate(T))
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count) {
+ if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.getConjuredSymbol(expr, count, symbolTag);
+ SymbolRef sym = SymMgr.getConjuredSymbol(expr, LCtx, type, count, symbolTag);
- if (Loc::isLocType(T))
+ if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
return nonloc::SymbolVal(sym);
}
-DefinedOrUnknownSVal SValBuilder::getConjuredSymbolVal(const void *symbolTag,
- const Expr *expr,
- QualType type,
- unsigned count) {
-
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount) {
if (!SymbolManager::canSymbolicate(type))
return UnknownVal();
- SymbolRef sym = SymMgr.getConjuredSymbol(expr, type, count, symbolTag);
-
+ SymbolRef sym = SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount);
+
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
-
+
return nonloc::SymbolVal(sym);
}
@@ -155,14 +188,41 @@ DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
CanQualType locTy,
const LocationContext *locContext) {
const BlockTextRegion *BC =
- MemMgr.getBlockTextRegion(block, locTy, locContext->getAnalysisContext());
+ MemMgr.getBlockTextRegion(block, locTy, locContext->getAnalysisDeclContext());
const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, locContext);
return loc::MemRegionVal(BD);
}
//===----------------------------------------------------------------------===//
-SVal SValBuilder::evalBinOp(const ProgramState *state, BinaryOperator::Opcode op,
+SVal SValBuilder::makeGenericVal(ProgramStateRef State,
+ BinaryOperator::Opcode Op,
+ NonLoc LHS, NonLoc RHS,
+ QualType ResultTy) {
+ // If operands are tainted, create a symbol to ensure that we propagate taint.
+ if (State->isTainted(RHS) || State->isTainted(LHS)) {
+ const SymExpr *symLHS;
+ const SymExpr *symRHS;
+
+ if (const nonloc::ConcreteInt *rInt = dyn_cast<nonloc::ConcreteInt>(&RHS)) {
+ symLHS = LHS.getAsSymExpr();
+ return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
+ }
+
+ if (const nonloc::ConcreteInt *lInt = dyn_cast<nonloc::ConcreteInt>(&LHS)) {
+ symRHS = RHS.getAsSymExpr();
+ return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
+ }
+
+ symLHS = LHS.getAsSymExpr();
+ symRHS = RHS.getAsSymExpr();
+ return makeNonLoc(symLHS, Op, symRHS, ResultTy);
+ }
+ return UnknownVal();
+}
+
+
+SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type) {
if (lhs.isUndef() || rhs.isUndef())
@@ -190,37 +250,50 @@ SVal SValBuilder::evalBinOp(const ProgramState *state, BinaryOperator::Opcode op
return evalBinOpNN(state, op, cast<NonLoc>(lhs), cast<NonLoc>(rhs), type);
}
-DefinedOrUnknownSVal SValBuilder::evalEQ(const ProgramState *state,
+DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
DefinedOrUnknownSVal lhs,
DefinedOrUnknownSVal rhs) {
return cast<DefinedOrUnknownSVal>(evalBinOp(state, BO_EQ, lhs, rhs,
Context.IntTy));
}
+/// Recursively check if the pointer types are equal modulo const, volatile,
+/// and restrict qualifiers. Assumes the input types are canonical.
+/// TODO: This is based off of code in SemaCast; can we reuse it.
+static bool haveSimilarTypes(ASTContext &Context, QualType T1,
+ QualType T2) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ Qualifiers Quals1, Quals2;
+ T1 = Context.getUnqualifiedArrayType(T1, Quals1);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals2);
+
+ // Make sure that non cvr-qualifiers the other qualifiers (e.g., address
+ // spaces) are identical.
+ Quals1.removeCVRQualifiers();
+ Quals2.removeCVRQualifiers();
+ if (Quals1 != Quals2)
+ return false;
+ }
+
+ if (T1 != T2)
+ return false;
+
+ return true;
+}
+
// FIXME: should rewrite according to the cast kind.
SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
+ castTy = Context.getCanonicalType(castTy);
+ originalTy = Context.getCanonicalType(originalTy);
if (val.isUnknownOrUndef() || castTy == originalTy)
return val;
// For const casts, just propagate the value.
if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
- if (Context.hasSameUnqualifiedType(castTy, originalTy))
+ if (haveSimilarTypes(Context, Context.getPointerType(castTy),
+ Context.getPointerType(originalTy)))
return val;
-
- // Check for casts to real or complex numbers. We don't handle these at all
- // right now.
- if (castTy->isFloatingType() || castTy->isAnyComplexType())
- return UnknownVal();
- // Check for casts from integers to integers.
- if (castTy->isIntegerType() && originalTy->isIntegerType()) {
- if (isa<Loc>(val))
- // This can be a cast to ObjC property of type int.
- return evalCastFromLoc(cast<Loc>(val), castTy);
- else
- return evalCastFromNonLoc(cast<NonLoc>(val), castTy);
- }
-
// Check for casts from pointers to integers.
if (castTy->isIntegerType() && Loc::isLocType(originalTy))
return evalCastFromLoc(cast<Loc>(val), castTy);
@@ -235,7 +308,7 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
}
return LV->getLoc();
}
- goto DispatchCast;
+ return dispatchCast(val, castTy);
}
// Just pass through function and block pointers.
@@ -309,8 +382,5 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
}
-DispatchCast:
- // All other cases.
- return isa<Loc>(val) ? evalCastFromLoc(cast<Loc>(val), castTy)
- : evalCastFromNonLoc(cast<NonLoc>(val), castTy);
+ return dispatchCast(val, castTy);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
index b5980b9..b94aff4 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -54,13 +54,16 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
return CTR->getDecl();
}
- return NULL;
+ return 0;
}
-/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
-/// wraps a symbol, return that SymbolRef. Otherwise return 0.
-// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+/// \brief If this SVal is a location (subclasses Loc) and wraps a symbol,
+/// return that SymbolRef. Otherwise return 0.
+///
+/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
+/// region. If that is the case, gets the underlining region.
SymbolRef SVal::getAsLocSymbol() const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this))
return X->getLoc().getAsLocSymbol();
@@ -69,7 +72,7 @@ SymbolRef SVal::getAsLocSymbol() const {
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
return SymR->getSymbol();
}
- return NULL;
+ return 0;
}
/// Get the symbol in the SVal or its base region.
@@ -91,29 +94,34 @@ SymbolRef SVal::getLocSymbolInBase() const {
return 0;
}
-/// getAsSymbol - If this Sval wraps a symbol return that SymbolRef.
+// TODO: The next 3 functions have to be simplified.
+
+/// \brief If this SVal wraps a symbol return that SymbolRef.
/// Otherwise return 0.
-// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
SymbolRef SVal::getAsSymbol() const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
return X->getSymbol();
- if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
- if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression()))
- return Y;
-
return getAsLocSymbol();
}
/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
/// return that expression. Otherwise return NULL.
const SymExpr *SVal::getAsSymbolicExpression() const {
- if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
- return X->getSymbolicExpression();
+ if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
+ return X->getSymbol();
return getAsSymbol();
}
+const SymExpr* SVal::getAsSymExpr() const {
+ const SymExpr* Sym = getAsSymbol();
+ if (!Sym)
+ Sym = getAsSymbolicExpression();
+ return Sym;
+}
+
const MemRegion *SVal::getAsRegion() const {
if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this))
return X->getRegion();
@@ -130,50 +138,6 @@ const MemRegion *loc::MemRegionVal::stripCasts() const {
return R ? R->StripCasts() : NULL;
}
-bool SVal::symbol_iterator::operator==(const symbol_iterator &X) const {
- return itr == X.itr;
-}
-
-bool SVal::symbol_iterator::operator!=(const symbol_iterator &X) const {
- return itr != X.itr;
-}
-
-SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) {
- itr.push_back(SE);
- while (!isa<SymbolData>(itr.back())) expand();
-}
-
-SVal::symbol_iterator &SVal::symbol_iterator::operator++() {
- assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
- assert(isa<SymbolData>(itr.back()));
- itr.pop_back();
- if (!itr.empty())
- while (!isa<SymbolData>(itr.back())) expand();
- return *this;
-}
-
-SymbolRef SVal::symbol_iterator::operator*() {
- assert(!itr.empty() && "attempting to dereference an 'end' iterator");
- return cast<SymbolData>(itr.back());
-}
-
-void SVal::symbol_iterator::expand() {
- const SymExpr *SE = itr.back();
- itr.pop_back();
-
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
- itr.push_back(SIE->getLHS());
- return;
- }
- else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
- itr.push_back(SSE->getLHS());
- itr.push_back(SSE->getRHS());
- return;
- }
-
- llvm_unreachable("unhandled expansion case");
-}
-
const void *nonloc::LazyCompoundVal::getStore() const {
return static_cast<const LazyCompoundValData*>(Data)->getStore();
}
@@ -281,8 +245,6 @@ void SVal::dumpToStream(raw_ostream &os) const {
case UndefinedKind:
os << "Undefined";
break;
- default:
- assert (false && "Invalid SVal.");
}
}
@@ -298,13 +260,8 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
<< C.getValue().getBitWidth() << 'b';
break;
}
- case nonloc::SymbolValKind:
- os << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
- break;
- case nonloc::SymExprValKind: {
- const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this);
- const SymExpr *SE = C.getSymbolicExpression();
- os << SE;
+ case nonloc::SymbolValKind: {
+ os << cast<nonloc::SymbolVal>(this)->getSymbol();
break;
}
case nonloc::LocAsIntegerKind: {
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 79d8b8b..a76a2da 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -23,11 +23,9 @@ namespace ento {
SimpleConstraintManager::~SimpleConstraintManager() {}
bool SimpleConstraintManager::canReasonAbout(SVal X) const {
- if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) {
- const SymExpr *SE = SymVal->getSymbolicExpression();
-
- if (isa<SymbolData>(SE))
- return true;
+ nonloc::SymbolVal *SymVal = dyn_cast<nonloc::SymbolVal>(&X);
+ if (SymVal && SymVal->isExpression()) {
+ const SymExpr *SE = SymVal->getSymbol();
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
switch (SIE->getOpcode()) {
@@ -56,7 +54,7 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
return true;
}
-const ProgramState *SimpleConstraintManager::assume(const ProgramState *state,
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
DefinedSVal Cond,
bool Assumption) {
if (isa<NonLoc>(Cond))
@@ -65,13 +63,13 @@ const ProgramState *SimpleConstraintManager::assume(const ProgramState *state,
return assume(state, cast<Loc>(Cond), Assumption);
}
-const ProgramState *SimpleConstraintManager::assume(const ProgramState *state, Loc cond,
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond,
bool assumption) {
state = assumeAux(state, cond, assumption);
return SU.processAssume(state, cond, assumption);
}
-const ProgramState *SimpleConstraintManager::assumeAux(const ProgramState *state,
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
Loc Cond, bool Assumption) {
BasicValueFactory &BasicVals = state->getBasicVals();
@@ -113,7 +111,7 @@ const ProgramState *SimpleConstraintManager::assumeAux(const ProgramState *state
} // end switch
}
-const ProgramState *SimpleConstraintManager::assume(const ProgramState *state,
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
NonLoc cond,
bool assumption) {
state = assumeAux(state, cond, assumption);
@@ -135,16 +133,29 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
}
-const ProgramState *SimpleConstraintManager::assumeAux(const ProgramState *state,
+
+ProgramStateRef SimpleConstraintManager::assumeAuxForSymbol(
+ ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption) {
+ QualType T = State->getSymbolManager().getType(Sym);
+ const llvm::APSInt &zero = State->getBasicVals().getValue(0, T);
+ if (Assumption)
+ return assumeSymNE(State, Sym, zero, zero);
+ else
+ return assumeSymEQ(State, Sym, zero, zero);
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
NonLoc Cond,
bool Assumption) {
- // We cannot reason about SymSymExprs,
- // and can only reason about some SymIntExprs.
+ // We cannot reason about SymSymExprs, and can only reason about some
+ // SymIntExprs.
if (!canReasonAbout(Cond)) {
- // Just return the current state indicating that the path is feasible.
- // This may be an over-approximation of what is possible.
- return state;
+ // Just add the constraint to the expression without trying to simplify.
+ SymbolRef sym = Cond.getAsSymExpr();
+ return assumeAuxForSymbol(state, sym, Assumption);
}
BasicValueFactory &BasicVals = state->getBasicVals();
@@ -157,37 +168,33 @@ const ProgramState *SimpleConstraintManager::assumeAux(const ProgramState *state
case nonloc::SymbolValKind: {
nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
SymbolRef sym = SV.getSymbol();
- QualType T = SymMgr.getType(sym);
- const llvm::APSInt &zero = BasicVals.getValue(0, T);
- if (Assumption)
- return assumeSymNE(state, sym, zero, zero);
- else
- return assumeSymEQ(state, sym, zero, zero);
- }
+ assert(sym);
+
+ // Handle SymbolData.
+ if (!SV.isExpression()) {
+ return assumeAuxForSymbol(state, sym, Assumption);
+
+ // Handle symbolic expression.
+ } else {
+ // We can only simplify expressions whose RHS is an integer.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym);
+ if (!SE)
+ return assumeAuxForSymbol(state, sym, Assumption);
+
+ BinaryOperator::Opcode op = SE->getOpcode();
+ // Implicitly compare non-comparison expressions to 0.
+ if (!BinaryOperator::isComparisonOp(op)) {
+ QualType T = SymMgr.getType(SE);
+ const llvm::APSInt &zero = BasicVals.getValue(0, T);
+ op = (Assumption ? BO_NE : BO_EQ);
+ return assumeSymRel(state, SE, op, zero);
+ }
+ // From here on out, op is the real comparison we'll be testing.
+ if (!Assumption)
+ op = NegateComparison(op);
- case nonloc::SymExprValKind: {
- nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
-
- // For now, we only handle expressions whose RHS is an integer.
- // All other expressions are assumed to be feasible.
- const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression());
- if (!SE)
- return state;
-
- BinaryOperator::Opcode op = SE->getOpcode();
- // Implicitly compare non-comparison expressions to 0.
- if (!BinaryOperator::isComparisonOp(op)) {
- QualType T = SymMgr.getType(SE);
- const llvm::APSInt &zero = BasicVals.getValue(0, T);
- op = (Assumption ? BO_NE : BO_EQ);
- return assumeSymRel(state, SE, op, zero);
+ return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
-
- // From here on out, op is the real comparison we'll be testing.
- if (!Assumption)
- op = NegateComparison(op);
-
- return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
case nonloc::ConcreteIntKind: {
@@ -202,55 +209,52 @@ const ProgramState *SimpleConstraintManager::assumeAux(const ProgramState *state
} // end switch
}
-const ProgramState *SimpleConstraintManager::assumeSymRel(const ProgramState *state,
+static llvm::APSInt computeAdjustment(const SymExpr *LHS,
+ SymbolRef &Sym) {
+ llvm::APSInt DefaultAdjustment;
+ DefaultAdjustment = 0;
+
+ // First check if the LHS is a simple symbol reference.
+ if (isa<SymbolData>(LHS))
+ return DefaultAdjustment;
+
+ // Next, see if it's a "($sym+constant1)" expression.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
+
+ // We cannot simplify "($sym1+$sym2)".
+ if (!SE)
+ return DefaultAdjustment;
+
+ // Get the constant out of the expression "($sym+constant1)" or
+ // "<expr>+constant1".
+ Sym = SE->getLHS();
+ switch (SE->getOpcode()) {
+ case BO_Add:
+ return SE->getRHS();
+ case BO_Sub:
+ return -SE->getRHS();
+ default:
+ // We cannot simplify non-additive operators.
+ return DefaultAdjustment;
+ }
+}
+
+ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt& Int) {
assert(BinaryOperator::isComparisonOp(op) &&
"Non-comparison ops should be rewritten as comparisons to zero.");
- // We only handle simple comparisons of the form "$sym == constant"
- // or "($sym+constant1) == constant2".
- // The adjustment is "constant1" in the above expression. It's used to
- // "slide" the solution range around for modular arithmetic. For example,
- // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
- // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
- // the subclasses of SimpleConstraintManager to handle the adjustment.
- llvm::APSInt Adjustment;
-
- // First check if the LHS is a simple symbol reference.
- SymbolRef Sym = dyn_cast<SymbolData>(LHS);
- if (Sym) {
- Adjustment = 0;
- } else {
- // Next, see if it's a "($sym+constant1)" expression.
- const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
-
- // We don't handle "($sym1+$sym2)".
- // Give up and assume the constraint is feasible.
- if (!SE)
- return state;
-
- // We don't handle "(<expr>+constant1)".
- // Give up and assume the constraint is feasible.
- Sym = dyn_cast<SymbolData>(SE->getLHS());
- if (!Sym)
- return state;
-
- // Get the constant out of the expression "($sym+constant1)".
- switch (SE->getOpcode()) {
- case BO_Add:
- Adjustment = SE->getRHS();
- break;
- case BO_Sub:
- Adjustment = -SE->getRHS();
- break;
- default:
- // We don't handle non-additive operators.
- // Give up and assume the constraint is feasible.
- return state;
- }
- }
+ // We only handle simple comparisons of the form "$sym == constant"
+ // or "($sym+constant1) == constant2".
+ // The adjustment is "constant1" in the above expression. It's used to
+ // "slide" the solution range around for modular arithmetic. For example,
+ // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+ // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+ // the subclasses of SimpleConstraintManager to handle the adjustment.
+ SymbolRef Sym = LHS;
+ llvm::APSInt Adjustment = computeAdjustment(LHS, Sym);
// FIXME: This next section is a hack. It silently converts the integers to
// be of the same type as the symbol, which is not always correct. Really the
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index d4295d4..e082d9d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -31,16 +31,14 @@ public:
// Common implementation for the interface provided by ConstraintManager.
//===------------------------------------------------------------------===//
- bool canReasonAbout(SVal X) const;
-
- const ProgramState *assume(const ProgramState *state, DefinedSVal Cond,
+ ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
bool Assumption);
- const ProgramState *assume(const ProgramState *state, Loc Cond, bool Assumption);
+ ProgramStateRef assume(ProgramStateRef state, Loc Cond, bool Assumption);
- const ProgramState *assume(const ProgramState *state, NonLoc Cond, bool Assumption);
+ ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
- const ProgramState *assumeSymRel(const ProgramState *state,
+ ProgramStateRef assumeSymRel(ProgramStateRef state,
const SymExpr *LHS,
BinaryOperator::Opcode op,
const llvm::APSInt& Int);
@@ -53,27 +51,27 @@ protected:
// Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
// operation for the method being invoked.
- virtual const ProgramState *assumeSymNE(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const ProgramState *assumeSymEQ(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const ProgramState *assumeSymLT(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const ProgramState *assumeSymGT(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const ProgramState *assumeSymLE(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
- virtual const ProgramState *assumeSymGE(const ProgramState *state, SymbolRef sym,
+ virtual ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
const llvm::APSInt& V,
const llvm::APSInt& Adjustment) = 0;
@@ -81,9 +79,19 @@ protected:
// Internal implementation.
//===------------------------------------------------------------------===//
- const ProgramState *assumeAux(const ProgramState *state, Loc Cond,bool Assumption);
+ bool canReasonAbout(SVal X) const;
+
+ ProgramStateRef assumeAux(ProgramStateRef state,
+ Loc Cond,
+ bool Assumption);
+
+ ProgramStateRef assumeAux(ProgramStateRef state,
+ NonLoc Cond,
+ bool Assumption);
- const ProgramState *assumeAux(const ProgramState *state, NonLoc Cond, bool Assumption);
+ ProgramStateRef assumeAuxForSymbol(ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption);
};
} // end GR namespace
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index bd63ecf..d0558f1 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -20,6 +20,7 @@ using namespace ento;
namespace {
class SimpleSValBuilder : public SValBuilder {
protected:
+ virtual SVal dispatchCast(SVal val, QualType castTy);
virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy);
virtual SVal evalCastFromLoc(Loc val, QualType castTy);
@@ -31,16 +32,16 @@ public:
virtual SVal evalMinus(NonLoc val);
virtual SVal evalComplement(NonLoc val);
- virtual SVal evalBinOpNN(const ProgramState *state, BinaryOperator::Opcode op,
+ virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy);
- virtual SVal evalBinOpLL(const ProgramState *state, BinaryOperator::Opcode op,
+ virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, Loc rhs, QualType resultTy);
- virtual SVal evalBinOpLN(const ProgramState *state, BinaryOperator::Opcode op,
+ virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
/// getKnownValue - evaluates a given SVal. If the SVal has only one possible
/// (integer) value, that value is returned. Otherwise, returns NULL.
- virtual const llvm::APSInt *getKnownValue(const ProgramState *state, SVal V);
+ virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V);
SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
const llvm::APSInt &RHS, QualType resultTy);
@@ -57,6 +58,12 @@ SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
// Transfer function for Casts.
//===----------------------------------------------------------------------===//
+SVal SimpleSValBuilder::dispatchCast(SVal Val, QualType CastTy) {
+ assert(isa<Loc>(&Val) || isa<NonLoc>(&Val));
+ return isa<Loc>(Val) ? evalCastFromLoc(cast<Loc>(Val), CastTy)
+ : evalCastFromNonLoc(cast<NonLoc>(Val), CastTy);
+}
+
SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
bool isLocType = Loc::isLocType(castTy);
@@ -74,25 +81,27 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
if (const SymExpr *se = val.getAsSymbolicExpression()) {
QualType T = Context.getCanonicalType(se->getType(Context));
- if (T == Context.getCanonicalType(castTy))
- return val;
-
+ // If types are the same or both are integers, ignore the cast.
// FIXME: Remove this hack when we support symbolic truncation/extension.
// HACK: If both castTy and T are integers, ignore the cast. This is
// not a permanent solution. Eventually we want to precisely handle
// extension/truncation of symbolic integers. This prevents us from losing
// precision when we assign 'x = y' and 'y' is symbolic and x and y are
// different integer types.
- if (T->isIntegerType() && castTy->isIntegerType())
+ if (haveSameType(T, castTy))
return val;
+ if (!isLocType)
+ return makeNonLoc(se, T, castTy);
return UnknownVal();
}
+ // If value is a non integer constant, produce unknown.
if (!isa<nonloc::ConcreteInt>(val))
return UnknownVal();
- // Only handle casts from integers to integers.
+ // Only handle casts from integers to integers - if val is an integer constant
+ // being cast to a non integer type, produce unknown.
if (!isLocType && !castTy->isIntegerType())
return UnknownVal();
@@ -259,18 +268,15 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
// Idempotent ops (like a*1) can still change the type of an expression.
// Wrap the LHS up in a NonLoc again and let evalCastFromNonLoc do the
// dirty work.
- if (isIdempotent) {
- if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS))
- return evalCastFromNonLoc(nonloc::SymbolVal(LHSSym), resultTy);
- return evalCastFromNonLoc(nonloc::SymExprVal(LHS), resultTy);
- }
+ if (isIdempotent)
+ return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
// If we reach this point, the expression cannot be simplified.
- // Make a SymExprVal for the entire thing.
+ // Make a SymbolVal for the entire expression.
return makeNonLoc(LHS, op, RHS, resultTy);
}
-SVal SimpleSValBuilder::evalBinOpNN(const ProgramState *state,
+SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs,
QualType resultTy) {
@@ -298,7 +304,7 @@ SVal SimpleSValBuilder::evalBinOpNN(const ProgramState *state,
while (1) {
switch (lhs.getSubKind()) {
default:
- return UnknownVal();
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
case nonloc::LocAsIntegerKind: {
Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
@@ -321,94 +327,10 @@ SVal SimpleSValBuilder::evalBinOpNN(const ProgramState *state,
return makeTruthVal(true, resultTy);
default:
// This case also handles pointer arithmetic.
- return UnknownVal();
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
}
}
}
- case nonloc::SymExprValKind: {
- nonloc::SymExprVal *selhs = cast<nonloc::SymExprVal>(&lhs);
-
- // Only handle LHS of the form "$sym op constant", at least for now.
- const SymIntExpr *symIntExpr =
- dyn_cast<SymIntExpr>(selhs->getSymbolicExpression());
-
- if (!symIntExpr)
- return UnknownVal();
-
- // Is this a logical not? (!x is represented as x == 0.)
- if (op == BO_EQ && rhs.isZeroConstant()) {
- // We know how to negate certain expressions. Simplify them here.
-
- BinaryOperator::Opcode opc = symIntExpr->getOpcode();
- switch (opc) {
- default:
- // We don't know how to negate this operation.
- // Just handle it as if it were a normal comparison to 0.
- break;
- case BO_LAnd:
- case BO_LOr:
- llvm_unreachable("Logical operators handled by branching logic.");
- case BO_Assign:
- case BO_MulAssign:
- case BO_DivAssign:
- case BO_RemAssign:
- case BO_AddAssign:
- case BO_SubAssign:
- case BO_ShlAssign:
- case BO_ShrAssign:
- case BO_AndAssign:
- case BO_XorAssign:
- case BO_OrAssign:
- case BO_Comma:
- llvm_unreachable("'=' and ',' operators handled by ExprEngine.");
- case BO_PtrMemD:
- case BO_PtrMemI:
- llvm_unreachable("Pointer arithmetic not handled here.");
- case BO_LT:
- case BO_GT:
- case BO_LE:
- case BO_GE:
- case BO_EQ:
- case BO_NE:
- // Negate the comparison and make a value.
- opc = NegateComparison(opc);
- assert(symIntExpr->getType(Context) == resultTy);
- return makeNonLoc(symIntExpr->getLHS(), opc,
- symIntExpr->getRHS(), resultTy);
- }
- }
-
- // For now, only handle expressions whose RHS is a constant.
- const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
- if (!rhsInt)
- return UnknownVal();
-
- // If both the LHS and the current expression are additive,
- // fold their constants.
- if (BinaryOperator::isAdditiveOp(op)) {
- BinaryOperator::Opcode lop = symIntExpr->getOpcode();
- if (BinaryOperator::isAdditiveOp(lop)) {
- // resultTy may not be the best type to convert to, but it's
- // probably the best choice in expressions with mixed type
- // (such as x+1U+2LL). The rules for implicit conversions should
- // choose a reasonable type to preserve the expression, and will
- // at least match how the value is going to be used.
- const llvm::APSInt &first =
- BasicVals.Convert(resultTy, symIntExpr->getRHS());
- const llvm::APSInt &second =
- BasicVals.Convert(resultTy, rhsInt->getValue());
- const llvm::APSInt *newRHS;
- if (lop == op)
- newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
- else
- newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
- return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
- }
- }
-
- // Otherwise, make a SymExprVal out of the expression.
- return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
- }
case nonloc::ConcreteIntKind: {
const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
@@ -467,76 +389,165 @@ SVal SimpleSValBuilder::evalBinOpNN(const ProgramState *state,
if (lhsValue == 0)
// At this point lhs and rhs have been swapped.
return rhs;
- return UnknownVal();
+ return makeGenericVal(state, op, rhs, lhs, resultTy);
default:
- return UnknownVal();
+ return makeGenericVal(state, op, rhs, lhs, resultTy);
}
}
}
case nonloc::SymbolValKind: {
- nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
- SymbolRef Sym = slhs->getSymbol();
- QualType lhsType = Sym->getType(Context);
-
- // The conversion type is usually the result type, but not in the case
- // of relational expressions.
- QualType conversionType = resultTy;
- if (BinaryOperator::isRelationalOp(op))
- conversionType = lhsType;
-
- // Does the symbol simplify to a constant? If so, "fold" the constant
- // by setting 'lhs' to a ConcreteInt and try again.
- if (lhsType->isIntegerType())
- if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
- // The symbol evaluates to a constant. If necessary, promote the
- // folded constant (LHS) to the result type.
- const llvm::APSInt &lhs_I = BasicVals.Convert(conversionType,
- *Constant);
- lhs = nonloc::ConcreteInt(lhs_I);
-
- // Also promote the RHS (if necessary).
-
- // For shifts, it is not necessary to promote the RHS.
- if (BinaryOperator::isShiftOp(op))
- continue;
-
- // Other operators: do an implicit conversion. This shouldn't be
- // necessary once we support truncation/extension of symbolic values.
- if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
- rhs = nonloc::ConcreteInt(BasicVals.Convert(conversionType,
- rhs_I->getValue()));
+ nonloc::SymbolVal *selhs = cast<nonloc::SymbolVal>(&lhs);
+
+ // LHS is a symbolic expression.
+ if (selhs->isExpression()) {
+
+ // Only handle LHS of the form "$sym op constant", at least for now.
+ const SymIntExpr *symIntExpr =
+ dyn_cast<SymIntExpr>(selhs->getSymbol());
+
+ if (!symIntExpr)
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+
+ // Is this a logical not? (!x is represented as x == 0.)
+ if (op == BO_EQ && rhs.isZeroConstant()) {
+ // We know how to negate certain expressions. Simplify them here.
+
+ BinaryOperator::Opcode opc = symIntExpr->getOpcode();
+ switch (opc) {
+ default:
+ // We don't know how to negate this operation.
+ // Just handle it as if it were a normal comparison to 0.
+ break;
+ case BO_LAnd:
+ case BO_LOr:
+ llvm_unreachable("Logical operators handled by branching logic.");
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Comma:
+ llvm_unreachable("'=' and ',' operators handled by ExprEngine.");
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ llvm_unreachable("Pointer arithmetic not handled here.");
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ // Negate the comparison and make a value.
+ opc = NegateComparison(opc);
+ assert(symIntExpr->getType(Context) == resultTy);
+ return makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
}
-
- continue;
}
- // Is the RHS a symbol we can simplify?
- if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
- SymbolRef RSym = srhs->getSymbol();
- if (RSym->getType(Context)->isIntegerType()) {
- if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
- // The symbol evaluates to a constant.
- const llvm::APSInt &rhs_I = BasicVals.Convert(conversionType,
- *Constant);
- rhs = nonloc::ConcreteInt(rhs_I);
+ // For now, only handle expressions whose RHS is a constant.
+ const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
+ if (!rhsInt)
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+
+ // If both the LHS and the current expression are additive,
+ // fold their constants.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ const llvm::APSInt &first =
+ BasicVals.Convert(resultTy, symIntExpr->getRHS());
+ const llvm::APSInt &second =
+ BasicVals.Convert(resultTy, rhsInt->getValue());
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
+ else
+ newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+ return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
}
}
- }
- if (isa<nonloc::ConcreteInt>(rhs)) {
- return MakeSymIntVal(slhs->getSymbol(), op,
- cast<nonloc::ConcreteInt>(rhs).getValue(),
- resultTy);
- }
+ // Otherwise, make a SymbolVal out of the expression.
+ return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
- return UnknownVal();
+ // LHS is a simple symbol (not a symbolic expression).
+ } else {
+ nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
+ SymbolRef Sym = slhs->getSymbol();
+ QualType lhsType = Sym->getType(Context);
+
+ // The conversion type is usually the result type, but not in the case
+ // of relational expressions.
+ QualType conversionType = resultTy;
+ if (BinaryOperator::isRelationalOp(op))
+ conversionType = lhsType;
+
+ // Does the symbol simplify to a constant? If so, "fold" the constant
+ // by setting 'lhs' to a ConcreteInt and try again.
+ if (lhsType->isIntegerType())
+ if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ // The symbol evaluates to a constant. If necessary, promote the
+ // folded constant (LHS) to the result type.
+ const llvm::APSInt &lhs_I = BasicVals.Convert(conversionType,
+ *Constant);
+ lhs = nonloc::ConcreteInt(lhs_I);
+
+ // Also promote the RHS (if necessary).
+
+ // For shifts, it is not necessary to promote the RHS.
+ if (BinaryOperator::isShiftOp(op))
+ continue;
+
+ // Other operators: do an implicit conversion. This shouldn't be
+ // necessary once we support truncation/extension of symbolic values.
+ if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
+ rhs = nonloc::ConcreteInt(BasicVals.Convert(conversionType,
+ rhs_I->getValue()));
+ }
+
+ continue;
+ }
+
+ // Is the RHS a symbol we can simplify?
+ if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
+ SymbolRef RSym = srhs->getSymbol();
+ if (RSym->getType(Context)->isIntegerType()) {
+ if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
+ // The symbol evaluates to a constant.
+ const llvm::APSInt &rhs_I = BasicVals.Convert(conversionType,
+ *Constant);
+ rhs = nonloc::ConcreteInt(rhs_I);
+ }
+ }
+ }
+
+ if (isa<nonloc::ConcreteInt>(rhs)) {
+ return MakeSymIntVal(slhs->getSymbol(), op,
+ cast<nonloc::ConcreteInt>(rhs).getValue(),
+ resultTy);
+ }
+
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+ }
}
}
}
}
// FIXME: all this logic will change if/when we have MemRegion::getLocation().
-SVal SimpleSValBuilder::evalBinOpLL(const ProgramState *state,
+SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
BinaryOperator::Opcode op,
Loc lhs, Loc rhs,
QualType resultTy) {
@@ -703,6 +714,24 @@ SVal SimpleSValBuilder::evalBinOpLL(const ProgramState *state,
// The two regions are from the same base region. See if they're both a
// type of region we know how to compare.
+ const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
+ const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
+
+ // Heuristic: assume that no symbolic region (whose memory space is
+ // unknown) is on the stack.
+ // FIXME: we should be able to be more precise once we can do better
+ // aliasing constraints for symbolic regions, but this is a reasonable,
+ // albeit unsound, assumption that holds most of the time.
+ if (isa<StackSpaceRegion>(LeftMS) ^ isa<StackSpaceRegion>(RightMS)) {
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
// FIXME: If/when there is a getAsRawOffset() for FieldRegions, this
// ElementRegion path and the FieldRegion path below should be unified.
@@ -831,7 +860,7 @@ SVal SimpleSValBuilder::evalBinOpLL(const ProgramState *state,
}
}
-SVal SimpleSValBuilder::evalBinOpLN(const ProgramState *state,
+SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) {
@@ -925,7 +954,7 @@ SVal SimpleSValBuilder::evalBinOpLN(const ProgramState *state,
return UnknownVal();
}
-const llvm::APSInt *SimpleSValBuilder::getKnownValue(const ProgramState *state,
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
SVal V) {
if (V.isUnknownOrUndef())
return NULL;
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
index 48a6f4f..11748ae 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
using namespace clang;
using namespace ento;
@@ -22,8 +23,9 @@ StoreManager::StoreManager(ProgramStateManager &stateMgr)
: svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
-StoreRef StoreManager::enterStackFrame(const ProgramState *state,
- const StackFrameContext *frame) {
+StoreRef StoreManager::enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) {
return StoreRef(state->getStore(), *this);
}
@@ -101,8 +103,10 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
- case MemRegion::NonStaticGlobalSpaceRegionKind:
- case MemRegion::StaticGlobalSpaceRegionKind: {
+ case MemRegion::StaticGlobalSpaceRegionKind:
+ case MemRegion::GlobalInternalSpaceRegionKind:
+ case MemRegion::GlobalSystemSpaceRegionKind:
+ case MemRegion::GlobalImmutableSpaceRegionKind: {
llvm_unreachable("Invalid region cast");
}
@@ -116,6 +120,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::CompoundLiteralRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::ObjCStringRegionKind:
case MemRegion::VarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
@@ -212,7 +217,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
QualType castTy, bool performTestOnly) {
- if (castTy.isNull())
+ if (castTy.isNull() || V.isUnknownOrUndef())
return V;
ASTContext &Ctx = svalBuilder.getContext();
@@ -227,12 +232,7 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
return V;
}
- if (const Loc *L = dyn_cast<Loc>(&V))
- return svalBuilder.evalCastFromLoc(*L, castTy);
- else if (const NonLoc *NL = dyn_cast<NonLoc>(&V))
- return svalBuilder.evalCastFromNonLoc(*NL, castTy);
-
- return V;
+ return svalBuilder.dispatchCast(V, castTy);
}
SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
@@ -270,6 +270,10 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
}
+SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
+ return getLValueFieldOrIvar(decl, base);
+}
+
SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal Base) {
@@ -336,3 +340,23 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
StoreManager::BindingsHandler::~BindingsHandler() {}
+bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
+ Store store,
+ const MemRegion* R,
+ SVal val) {
+ SymbolRef SymV = val.getAsLocSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (Binding) {
+ First = false;
+ return false;
+ }
+ else
+ Binding = R;
+
+ return true;
+}
+
+void SubRegionMap::anchor() { }
+void SubRegionMap::Visitor::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
new file mode 100644
index 0000000..350f4b8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
@@ -0,0 +1,14 @@
+//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+
+using namespace clang::ento;
+
+void SubEngine::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index b843ab1..adefb58 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -21,6 +21,8 @@
using namespace clang;
using namespace ento;
+void SymExpr::anchor() { }
+
void SymExpr::dump() const {
dumpToStream(llvm::errs());
}
@@ -57,6 +59,15 @@ void SymIntExpr::dumpToStream(raw_ostream &os) const {
if (getRHS().isUnsigned()) os << 'U';
}
+void IntSymExpr::dumpToStream(raw_ostream &os) const {
+ os << ' ' << getLHS().getZExtValue();
+ if (getLHS().isUnsigned()) os << 'U';
+ print(os, getOpcode());
+ os << '(';
+ getRHS()->dumpToStream(os);
+ os << ") ";
+}
+
void SymSymExpr::dumpToStream(raw_ostream &os) const {
os << '(';
getLHS()->dumpToStream(os);
@@ -66,6 +77,12 @@ void SymSymExpr::dumpToStream(raw_ostream &os) const {
os << ')';
}
+void SymbolCast::dumpToStream(raw_ostream &os) const {
+ os << '(' << ToTy.getAsString() << ") (";
+ Operand->dumpToStream(os);
+ os << ')';
+}
+
void SymbolConjured::dumpToStream(raw_ostream &os) const {
os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
}
@@ -84,10 +101,69 @@ void SymbolMetadata::dumpToStream(raw_ostream &os) const {
<< getRegion() << ',' << T.getAsString() << '}';
}
+void SymbolData::anchor() { }
+
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
os << "reg_$" << getSymbolID() << "<" << R << ">";
}
+bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
+ return itr == X.itr;
+}
+
+bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const {
+ return itr != X.itr;
+}
+
+SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+ itr.push_back(SE);
+ while (!isa<SymbolData>(itr.back())) expand();
+}
+
+SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() {
+ assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+ assert(isa<SymbolData>(itr.back()));
+ itr.pop_back();
+ if (!itr.empty())
+ while (!isa<SymbolData>(itr.back())) expand();
+ return *this;
+}
+
+SymbolRef SymExpr::symbol_iterator::operator*() {
+ assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+ return cast<SymbolData>(itr.back());
+}
+
+void SymExpr::symbol_iterator::expand() {
+ const SymExpr *SE = itr.back();
+ itr.pop_back();
+
+ switch (SE->getKind()) {
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::DerivedKind:
+ case SymExpr::ExtentKind:
+ case SymExpr::MetadataKind:
+ return;
+ case SymExpr::CastSymbolKind:
+ itr.push_back(cast<SymbolCast>(SE)->getOperand());
+ return;
+ case SymExpr::SymIntKind:
+ itr.push_back(cast<SymIntExpr>(SE)->getLHS());
+ return;
+ case SymExpr::IntSymKind:
+ itr.push_back(cast<IntSymExpr>(SE)->getRHS());
+ return;
+ case SymExpr::SymSymKind: {
+ const SymSymExpr *x = cast<SymSymExpr>(SE);
+ itr.push_back(x->getLHS());
+ itr.push_back(x->getRHS());
+ return;
+ }
+ }
+ llvm_unreachable("unhandled expansion case");
+}
+
const SymbolRegionValue*
SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
llvm::FoldingSetNodeID profile;
@@ -105,16 +181,17 @@ SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
}
const SymbolConjured*
-SymbolManager::getConjuredSymbol(const Stmt *E, QualType T, unsigned Count,
+SymbolManager::getConjuredSymbol(const Stmt *E, const LocationContext *LCtx,
+ QualType T, unsigned Count,
const void *SymbolTag) {
llvm::FoldingSetNodeID profile;
- SymbolConjured::Profile(profile, E, T, Count, SymbolTag);
+ SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag);
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
- new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
+ new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -174,6 +251,22 @@ SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
return cast<SymbolMetadata>(SD);
}
+const SymbolCast*
+SymbolManager::getCastSymbol(const SymExpr *Op,
+ QualType From, QualType To) {
+ llvm::FoldingSetNodeID ID;
+ SymbolCast::Profile(ID, Op, From, To);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+ if (!data) {
+ data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
+ new (data) SymbolCast(Op, From, To);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymbolCast>(data);
+}
+
const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
BinaryOperator::Opcode op,
const llvm::APSInt& v,
@@ -192,6 +285,24 @@ const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
return cast<SymIntExpr>(data);
}
+const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ IntSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
+ new (data) IntSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<IntSymExpr>(data);
+}
+
const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
BinaryOperator::Opcode op,
const SymExpr *rhs,
@@ -381,7 +492,16 @@ bool SymbolReaper::isLive(SymbolRef sym) {
return isa<SymbolRegionValue>(sym);
}
-bool SymbolReaper::isLive(const Stmt *ExprVal) const {
+bool
+SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+ if (LCtx != ELCtx) {
+ // If the reaper's location context is a parent of the expression's
+ // location context, then the expression value is now "out of scope".
+ if (LCtx->isParentOf(ELCtx))
+ return false;
+ return true;
+ }
+
return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal);
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
index 3543f7f..fe912df 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -31,9 +31,8 @@ public:
TextPathDiagnostics(const std::string& output, DiagnosticsEngine &diag)
: OutputFile(output), Diag(diag) {}
- void HandlePathDiagnosticImpl(const PathDiagnostic* D);
-
- void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade) { }
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
virtual StringRef getName() const {
return "TextPathDiagnostics";
@@ -53,18 +52,18 @@ ento::createTextPathDiagnosticConsumer(const std::string& out,
return new TextPathDiagnostics(out, PP.getDiagnostics());
}
-void TextPathDiagnostics::HandlePathDiagnosticImpl(const PathDiagnostic* D) {
- if (!D)
- return;
-
- if (D->empty()) {
- delete D;
- return;
- }
-
- for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I) {
- unsigned diagID = Diag.getDiagnosticIDs()->getCustomDiagID(
- DiagnosticIDs::Note, I->getString());
- Diag.Report(I->getLocation().asLocation(), diagID);
+void TextPathDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
+ for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ const PathDiagnostic *D = *it;
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ I != E; ++I) {
+ unsigned diagID =
+ Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
+ (*I)->getString());
+ Diag.Report((*I)->getLocation().asLocation(), diagID);
+ }
}
}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 34a358f..c19ebcb 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -11,13 +11,17 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "AnalysisConsumer"
+
#include "AnalysisConsumer.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CallGraph.h"
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
@@ -34,13 +38,26 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+
+#include <queue>
using namespace clang;
using namespace ento;
+using llvm::SmallPtrSet;
static ExplodedNode::Auditor* CreateUbiViz();
+STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
+STATISTIC(NumFunctionsAnalyzed, "The # of functions analysed (as top level).");
+STATISTIC(NumBlocksInAnalyzedFunctions,
+ "The # of basic blocks in the analyzed functions.");
+STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
+
//===----------------------------------------------------------------------===//
// Special PathDiagnosticConsumers.
//===----------------------------------------------------------------------===//
@@ -59,7 +76,19 @@ createPlistHTMLDiagnosticConsumer(const std::string& prefix,
namespace {
-class AnalysisConsumer : public ASTConsumer {
+class AnalysisConsumer : public ASTConsumer,
+ public RecursiveASTVisitor<AnalysisConsumer> {
+ enum AnalysisMode {
+ ANALYSIS_SYNTAX,
+ ANALYSIS_PATH,
+ ANALYSIS_ALL
+ };
+
+ /// Mode of the analyzes while recursively visiting Decls.
+ AnalysisMode RecVisitorMode;
+ /// Bug Reporter to use while recursively visiting Decls.
+ BugReporter *RecVisitorBR;
+
public:
ASTContext *Ctx;
const Preprocessor &PP;
@@ -67,21 +96,45 @@ public:
AnalyzerOptions Opts;
ArrayRef<std::string> Plugins;
+ /// \brief Stores the declarations from the local translation unit.
+ /// Note, we pre-compute the local declarations at parse time as an
+ /// optimization to make sure we do not deserialize everything from disk.
+ /// The local declaration to all declarations ratio might be very small when
+ /// working with a PCH file.
+ SetOfDecls LocalTUDecls;
+
// PD is owned by AnalysisManager.
PathDiagnosticConsumer *PD;
StoreManagerCreator CreateStoreMgr;
ConstraintManagerCreator CreateConstraintMgr;
- llvm::OwningPtr<CheckerManager> checkerMgr;
- llvm::OwningPtr<AnalysisManager> Mgr;
+ OwningPtr<CheckerManager> checkerMgr;
+ OwningPtr<AnalysisManager> Mgr;
+
+ /// Time the analyzes time of each translation unit.
+ static llvm::Timer* TUTotalTimer;
+
+ /// The information about analyzed functions shared throughout the
+ /// translation unit.
+ FunctionSummariesTy FunctionSummaries;
AnalysisConsumer(const Preprocessor& pp,
const std::string& outdir,
const AnalyzerOptions& opts,
ArrayRef<std::string> plugins)
- : Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins), PD(0) {
+ : RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0),
+ Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins), PD(0) {
DigestAnalyzerOptions();
+ if (Opts.PrintStats) {
+ llvm::EnableStatistics();
+ TUTotalTimer = new llvm::Timer("Analyzer Total Time");
+ }
+ }
+
+ ~AnalysisConsumer() {
+ if (Opts.PrintStats)
+ delete TUTotalTimer;
}
void DigestAnalyzerOptions() {
@@ -117,15 +170,20 @@ public:
}
}
- void DisplayFunction(const Decl *D) {
+ void DisplayFunction(const Decl *D, AnalysisMode Mode) {
if (!Opts.AnalyzerDisplayProgress)
return;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
if (Loc.isValid()) {
- llvm::errs() << "ANALYZE: " << Loc.getFilename();
-
+ llvm::errs() << "ANALYZE";
+ switch (Mode) {
+ case ANALYSIS_SYNTAX: llvm::errs() << "(Syntax)"; break;
+ case ANALYSIS_PATH: llvm::errs() << "(Path Sensitive)"; break;
+ case ANALYSIS_ALL: break;
+ };
+ llvm::errs() << ": " << Loc.getFilename();
if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
const NamedDecl *ND = cast<NamedDecl>(D);
llvm::errs() << ' ' << *ND << '\n';
@@ -143,112 +201,236 @@ public:
virtual void Initialize(ASTContext &Context) {
Ctx = &Context;
- checkerMgr.reset(createCheckerManager(Opts, PP.getLangOptions(), Plugins,
+ checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins,
PP.getDiagnostics()));
Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(),
- PP.getLangOptions(), PD,
+ PP.getLangOpts(), PD,
CreateStoreMgr, CreateConstraintMgr,
checkerMgr.get(),
/* Indexer */ 0,
Opts.MaxNodes, Opts.MaxLoop,
Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
Opts.AnalysisPurgeOpt, Opts.EagerlyAssume,
- Opts.TrimGraph, Opts.InlineCall,
+ Opts.TrimGraph,
Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors,
Opts.CFGAddInitializers,
- Opts.EagerlyTrimEGraph));
+ Opts.EagerlyTrimEGraph,
+ Opts.IPAMode,
+ Opts.InlineMaxStackDepth,
+ Opts.InlineMaxFunctionSize,
+ Opts.InliningMode,
+ Opts.NoRetryExhausted));
}
+ /// \brief Store the top level decls in the set to be processed later on.
+ /// (Doing this pre-processing avoids deserialization of data from PCH.)
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+
virtual void HandleTranslationUnit(ASTContext &C);
- void HandleDeclContext(ASTContext &C, DeclContext *dc);
- void HandleDeclContextDecl(ASTContext &C, Decl *D);
- void HandleCode(Decl *D);
+ /// \brief Build the call graph for all the top level decls of this TU and
+ /// use it to define the order in which the functions should be visited.
+ void HandleDeclsGallGraph();
+
+ /// \brief Run analyzes(syntax or path sensitive) on the given function.
+ /// \param Mode - determines if we are requesting syntax only or path
+ /// sensitive only analysis.
+ /// \param VisitedCallees - The output parameter, which is populated with the
+ /// set of functions which should be considered analyzed after analyzing the
+ /// given root function.
+ void HandleCode(Decl *D, AnalysisMode Mode,
+ SetOfConstDecls *VisitedCallees = 0);
+
+ void RunPathSensitiveChecks(Decl *D, SetOfConstDecls *VisitedCallees);
+ void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ SetOfConstDecls *VisitedCallees);
+
+ /// Visitors for the RecursiveASTVisitor.
+
+ /// Handle callbacks for arbitrary Decls.
+ bool VisitDecl(Decl *D) {
+ checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+ return true;
+ }
+
+ bool VisitFunctionDecl(FunctionDecl *FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return true;
+
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (FD->isThisDeclarationADefinition() &&
+ !FD->isDependentContext()) {
+ HandleCode(FD, RecVisitorMode);
+ }
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ checkerMgr->runCheckersOnASTDecl(MD, *Mgr, *RecVisitorBR);
+ if (MD->isThisDeclarationADefinition())
+ HandleCode(MD, RecVisitorMode);
+ return true;
+ }
+
+private:
+ void storeTopLevelDecls(DeclGroupRef DG);
+
+ /// \brief Check if we should skip (not analyze) the given function.
+ bool skipFunction(Decl *D);
+
};
} // end anonymous namespace
+
//===----------------------------------------------------------------------===//
// AnalysisConsumer implementation.
//===----------------------------------------------------------------------===//
+llvm::Timer* AnalysisConsumer::TUTotalTimer = 0;
+
+bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+ return true;
+}
-void AnalysisConsumer::HandleDeclContext(ASTContext &C, DeclContext *dc) {
- for (DeclContext::decl_iterator I = dc->decls_begin(), E = dc->decls_end();
- I != E; ++I) {
- HandleDeclContextDecl(C, *I);
+void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+}
+
+void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+
+ // Skip ObjCMethodDecl, wait for the objc container to avoid
+ // analyzing twice.
+ if (isa<ObjCMethodDecl>(*I))
+ continue;
+
+ LocalTUDecls.insert(*I);
}
}
-void AnalysisConsumer::HandleDeclContextDecl(ASTContext &C, Decl *D) {
- { // Handle callbacks for arbitrary decls.
- BugReporter BR(*Mgr);
- checkerMgr->runCheckersOnASTDecl(D, *Mgr, BR);
+void AnalysisConsumer::HandleDeclsGallGraph() {
+ // Otherwise, use the Callgraph to derive the order.
+ // Build the Call Graph.
+ CallGraph CG;
+ // Add all the top level declarations to the graph.
+ for (SetOfDecls::iterator I = LocalTUDecls.begin(),
+ E = LocalTUDecls.end(); I != E; ++I)
+ CG.addToCallGraph(*I);
+
+ // Find the top level nodes - children of root + the unreachable (parentless)
+ // nodes.
+ llvm::SmallVector<CallGraphNode*, 24> TopLevelFunctions;
+ for (CallGraph::nodes_iterator TI = CG.parentless_begin(),
+ TE = CG.parentless_end(); TI != TE; ++TI) {
+ TopLevelFunctions.push_back(*TI);
+ NumFunctionTopLevel++;
+ }
+ CallGraphNode *Entry = CG.getRoot();
+ for (CallGraphNode::iterator I = Entry->begin(),
+ E = Entry->end(); I != E; ++I) {
+ TopLevelFunctions.push_back(*I);
+ NumFunctionTopLevel++;
}
- switch (D->getKind()) {
- case Decl::Namespace: {
- HandleDeclContext(C, cast<NamespaceDecl>(D));
- break;
- }
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- case Decl::CXXConversion:
- case Decl::CXXMethod:
- case Decl::Function: {
- FunctionDecl *FD = cast<FunctionDecl>(D);
- // We skip function template definitions, as their semantics is
- // only determined when they are instantiated.
- if (FD->isThisDeclarationADefinition() &&
- !FD->isDependentContext()) {
- if (!Opts.AnalyzeSpecificFunction.empty() &&
- FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction)
- break;
- DisplayFunction(FD);
- HandleCode(FD);
- }
- break;
+ // Make sure the nodes are sorted in order reverse of their definition in the
+ // translation unit. This step is very important for performance. It ensures
+ // that we analyze the root functions before the externally available
+ // subroutines.
+ std::queue<CallGraphNode*> BFSQueue;
+ for (llvm::SmallVector<CallGraphNode*, 24>::reverse_iterator
+ TI = TopLevelFunctions.rbegin(), TE = TopLevelFunctions.rend();
+ TI != TE; ++TI)
+ BFSQueue.push(*TI);
+
+ // BFS over all of the functions, while skipping the ones inlined into
+ // the previously processed functions. Use external Visited set, which is
+ // also modified when we inline a function.
+ SmallPtrSet<CallGraphNode*,24> Visited;
+ while(!BFSQueue.empty()) {
+ CallGraphNode *N = BFSQueue.front();
+ BFSQueue.pop();
+
+ // Skip the functions which have been processed already or previously
+ // inlined.
+ if (Visited.count(N))
+ continue;
+
+ // Analyze the function.
+ SetOfConstDecls VisitedCallees;
+ Decl *D = N->getDecl();
+ assert(D);
+ HandleCode(D, ANALYSIS_PATH,
+ (Mgr->InliningMode == All ? 0 : &VisitedCallees));
+
+ // Add the visited callees to the global visited set.
+ for (SetOfConstDecls::const_iterator I = VisitedCallees.begin(),
+ E = VisitedCallees.end(); I != E; ++I){
+ CallGraphNode *VN = CG.getNode(*I);
+ if (VN)
+ Visited.insert(VN);
}
-
- case Decl::ObjCCategoryImpl:
- case Decl::ObjCImplementation: {
- ObjCImplDecl *ID = cast<ObjCImplDecl>(D);
- HandleCode(ID);
-
- for (ObjCContainerDecl::method_iterator MI = ID->meth_begin(),
- ME = ID->meth_end(); MI != ME; ++MI) {
- BugReporter BR(*Mgr);
- checkerMgr->runCheckersOnASTDecl(*MI, *Mgr, BR);
-
- if ((*MI)->isThisDeclarationADefinition()) {
- if (!Opts.AnalyzeSpecificFunction.empty() &&
- Opts.AnalyzeSpecificFunction !=
- (*MI)->getSelector().getAsString())
- continue;
- DisplayFunction(*MI);
- HandleCode(*MI);
- }
- }
- break;
+ Visited.insert(N);
+
+ // Push the children into the queue.
+ for (CallGraphNode::const_iterator CI = N->begin(),
+ CE = N->end(); CI != CE; ++CI) {
+ BFSQueue.push(*CI);
}
-
- default:
- break;
}
}
void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
- BugReporter BR(*Mgr);
- TranslationUnitDecl *TU = C.getTranslationUnitDecl();
- checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
- HandleDeclContext(C, TU);
+ // Don't run the actions if an error has occurred with parsing the file.
+ DiagnosticsEngine &Diags = PP.getDiagnostics();
+ if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
+ return;
- // After all decls handled, run checkers on the entire TranslationUnit.
- checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+ {
+ if (TUTotalTimer) TUTotalTimer->startTimer();
+
+ // Introduce a scope to destroy BR before Mgr.
+ BugReporter BR(*Mgr);
+ TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+
+ // Run the AST-only checks using the order in which functions are defined.
+ // If inlining is not turned on, use the simplest function order for path
+ // sensitive analyzes as well.
+ RecVisitorMode = (Mgr->shouldInlineCall() ? ANALYSIS_SYNTAX : ANALYSIS_ALL);
+ RecVisitorBR = &BR;
+
+ // Process all the top level declarations.
+ for (SetOfDecls::iterator I = LocalTUDecls.begin(),
+ E = LocalTUDecls.end(); I != E; ++I)
+ TraverseDecl(*I);
+
+ if (Mgr->shouldInlineCall())
+ HandleDeclsGallGraph();
+
+ // After all decls handled, run checkers on the entire TranslationUnit.
+ checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+
+ RecVisitorBR = 0;
+ }
// Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
// FIXME: This should be replaced with something that doesn't rely on
// side-effects in PathDiagnosticConsumer's destructor. This is required when
// used with option -disable-free.
Mgr.reset(NULL);
+
+ if (TUTotalTimer) TUTotalTimer->stopTimer();
+
+ // Count how many basic blocks we have not covered.
+ NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
+ if (NumBlocksInAnalyzedFunctions > 0)
+ PercentReachableBlocks =
+ (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+ NumBlocksInAnalyzedFunctions;
+
}
static void FindBlocks(DeclContext *D, SmallVectorImpl<Decl*> &WL) {
@@ -261,24 +443,41 @@ static void FindBlocks(DeclContext *D, SmallVectorImpl<Decl*> &WL) {
FindBlocks(DC, WL);
}
-static void RunPathSensitiveChecks(AnalysisConsumer &C, AnalysisManager &mgr,
- Decl *D);
-
-void AnalysisConsumer::HandleCode(Decl *D) {
+static std::string getFunctionName(const Decl *D) {
+ if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+ return ID->getSelector().getAsString();
+ }
+ if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ if (II)
+ return II->getName();
+ }
+ return "";
+}
- // Don't run the actions if an error has occurred with parsing the file.
- DiagnosticsEngine &Diags = PP.getDiagnostics();
- if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
- return;
+bool AnalysisConsumer::skipFunction(Decl *D) {
+ if (!Opts.AnalyzeSpecificFunction.empty() &&
+ getFunctionName(D) != Opts.AnalyzeSpecificFunction)
+ return true;
// Don't run the actions on declarations in header files unless
// otherwise specified.
SourceManager &SM = Ctx->getSourceManager();
SourceLocation SL = SM.getExpansionLoc(D->getLocation());
if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL))
+ return true;
+
+ return false;
+}
+
+void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
+ SetOfConstDecls *VisitedCallees) {
+ if (skipFunction(D))
return;
- // Clear the AnalysisManager of old AnalysisContexts.
+ DisplayFunction(D, Mode);
+
+ // Clear the AnalysisManager of old AnalysisDeclContexts.
Mgr->ClearContexts();
// Dispatch on the actions.
@@ -292,9 +491,12 @@ void AnalysisConsumer::HandleCode(Decl *D) {
for (SmallVectorImpl<Decl*>::iterator WI=WL.begin(), WE=WL.end();
WI != WE; ++WI)
if ((*WI)->hasBody()) {
- checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR);
- if (checkerMgr->hasPathSensitiveCheckers())
- RunPathSensitiveChecks(*this, *Mgr, *WI);
+ if (Mode != ANALYSIS_PATH)
+ checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR);
+ if (Mode != ANALYSIS_SYNTAX && checkerMgr->hasPathSensitiveCheckers()) {
+ RunPathSensitiveChecks(*WI, VisitedCallees);
+ NumFunctionsAnalyzed++;
+ }
}
}
@@ -302,53 +504,53 @@ void AnalysisConsumer::HandleCode(Decl *D) {
// Path-sensitive checking.
//===----------------------------------------------------------------------===//
-static void ActionExprEngine(AnalysisConsumer &C, AnalysisManager &mgr,
- Decl *D, bool ObjCGCEnabled) {
+void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ SetOfConstDecls *VisitedCallees) {
// Construct the analysis engine. First check if the CFG is valid.
// FIXME: Inter-procedural analysis will need to handle invalid CFGs.
- if (!mgr.getCFG(D))
+ if (!Mgr->getCFG(D))
return;
- ExprEngine Eng(mgr, ObjCGCEnabled);
+
+ ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries);
// Set the graph auditor.
- llvm::OwningPtr<ExplodedNode::Auditor> Auditor;
- if (mgr.shouldVisualizeUbigraph()) {
+ OwningPtr<ExplodedNode::Auditor> Auditor;
+ if (Mgr->shouldVisualizeUbigraph()) {
Auditor.reset(CreateUbiViz());
ExplodedNode::SetAuditor(Auditor.get());
}
// Execute the worklist algorithm.
- Eng.ExecuteWorkList(mgr.getStackFrame(D, 0), mgr.getMaxNodes());
+ Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D, 0),
+ Mgr->getMaxNodes());
// Release the auditor (if any) so that it doesn't monitor the graph
// created BugReporter.
ExplodedNode::SetAuditor(0);
// Visualize the exploded graph.
- if (mgr.shouldVisualizeGraphviz())
- Eng.ViewGraph(mgr.shouldTrimGraph());
+ if (Mgr->shouldVisualizeGraphviz())
+ Eng.ViewGraph(Mgr->shouldTrimGraph());
// Display warnings.
Eng.getBugReporter().FlushReports();
}
-static void RunPathSensitiveChecks(AnalysisConsumer &C, AnalysisManager &mgr,
- Decl *D) {
+void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
+ SetOfConstDecls *Visited) {
- switch (mgr.getLangOptions().getGC()) {
- default:
- llvm_unreachable("Invalid GC mode.");
+ switch (Mgr->getLangOpts().getGC()) {
case LangOptions::NonGC:
- ActionExprEngine(C, mgr, D, false);
+ ActionExprEngine(D, false, Visited);
break;
case LangOptions::GCOnly:
- ActionExprEngine(C, mgr, D, true);
+ ActionExprEngine(D, true, Visited);
break;
case LangOptions::HybridGC:
- ActionExprEngine(C, mgr, D, false);
- ActionExprEngine(C, mgr, D, true);
+ ActionExprEngine(D, false, Visited);
+ ActionExprEngine(D, true, Visited);
break;
}
}
@@ -374,7 +576,7 @@ ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
namespace {
class UbigraphViz : public ExplodedNode::Auditor {
- llvm::OwningPtr<raw_ostream> Out;
+ OwningPtr<raw_ostream> Out;
llvm::sys::Path Dir, Filename;
unsigned Cntr;
@@ -408,7 +610,7 @@ static ExplodedNode::Auditor* CreateUbiViz() {
llvm::errs() << "Writing '" << Filename.str() << "'.\n";
- llvm::OwningPtr<llvm::raw_fd_ostream> Stream;
+ OwningPtr<llvm::raw_fd_ostream> Stream;
Stream.reset(new llvm::raw_fd_ostream(Filename.c_str(), ErrMsg));
if (!ErrMsg.empty())
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index a59fcad..c06da0d 100644
--- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -104,7 +104,7 @@ CheckerManager *ento::createCheckerManager(const AnalyzerOptions &opts,
const LangOptions &langOpts,
ArrayRef<std::string> plugins,
DiagnosticsEngine &diags) {
- llvm::OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts));
+ OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts));
SmallVector<CheckerOptInfo, 8> checkerOpts;
for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
new file mode 100644
index 0000000..eea1055
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
@@ -0,0 +1,230 @@
+//===--- CompilationDatabase.cpp - ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains multiple implementations for CompilationDatabases.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/JSONParser.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+
+namespace clang {
+namespace tooling {
+
+namespace {
+
+/// \brief A parser for JSON escaped strings of command line arguments.
+///
+/// Assumes \-escaping for quoted arguments (see the documentation of
+/// unescapeJSONCommandLine(...)).
+class CommandLineArgumentParser {
+ public:
+ CommandLineArgumentParser(StringRef CommandLine)
+ : Input(CommandLine), Position(Input.begin()-1) {}
+
+ std::vector<std::string> parse() {
+ bool HasMoreInput = true;
+ while (HasMoreInput && nextNonWhitespace()) {
+ std::string Argument;
+ HasMoreInput = parseStringInto(Argument);
+ CommandLine.push_back(Argument);
+ }
+ return CommandLine;
+ }
+
+ private:
+ // All private methods return true if there is more input available.
+
+ bool parseStringInto(std::string &String) {
+ do {
+ if (*Position == '"') {
+ if (!parseQuotedStringInto(String)) return false;
+ } else {
+ if (!parseFreeStringInto(String)) return false;
+ }
+ } while (*Position != ' ');
+ return true;
+ }
+
+ bool parseQuotedStringInto(std::string &String) {
+ if (!next()) return false;
+ while (*Position != '"') {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ }
+ return next();
+ }
+
+ bool parseFreeStringInto(std::string &String) {
+ do {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ } while (*Position != ' ' && *Position != '"');
+ return true;
+ }
+
+ bool skipEscapeCharacter() {
+ if (*Position == '\\') {
+ return next();
+ }
+ return true;
+ }
+
+ bool nextNonWhitespace() {
+ do {
+ if (!next()) return false;
+ } while (*Position == ' ');
+ return true;
+ }
+
+ bool next() {
+ ++Position;
+ if (Position == Input.end()) return false;
+ // Remove the JSON escaping first. This is done unconditionally.
+ if (*Position == '\\') ++Position;
+ return Position != Input.end();
+ }
+
+ const StringRef Input;
+ StringRef::iterator Position;
+ std::vector<std::string> CommandLine;
+};
+
+std::vector<std::string> unescapeJSONCommandLine(
+ StringRef JSONEscapedCommandLine) {
+ CommandLineArgumentParser parser(JSONEscapedCommandLine);
+ return parser.parse();
+}
+
+} // end namespace
+
+CompilationDatabase::~CompilationDatabase() {}
+
+CompilationDatabase *
+CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
+ std::string &ErrorMessage) {
+ llvm::SmallString<1024> JSONDatabasePath(BuildDirectory);
+ llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
+ llvm::OwningPtr<CompilationDatabase> Database(
+ JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
+ if (!Database) {
+ return NULL;
+ }
+ return Database.take();
+}
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer;
+ llvm::error_code Result =
+ llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer);
+ if (Result != 0) {
+ ErrorMessage = "Error while opening JSON database: " + Result.message();
+ return NULL;
+ }
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer(
+ llvm::MemoryBuffer::getMemBuffer(DatabaseString));
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+std::vector<CompileCommand>
+JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.find(FilePath);
+ if (CommandsRefI == IndexByFile.end())
+ return std::vector<CompileCommand>();
+ const std::vector<CompileCommandRef> &CommandsRef = CommandsRefI->getValue();
+ std::vector<CompileCommand> Commands;
+ for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
+ Commands.push_back(CompileCommand(
+ // FIXME: Escape correctly:
+ CommandsRef[I].first,
+ unescapeJSONCommandLine(CommandsRef[I].second)));
+ }
+ return Commands;
+}
+
+bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
+ llvm::SourceMgr SM;
+ llvm::JSONParser Parser(Database->getBuffer(), &SM);
+ llvm::JSONValue *Root = Parser.parseRoot();
+ if (Root == NULL) {
+ ErrorMessage = "Error while parsing JSON.";
+ return false;
+ }
+ llvm::JSONArray *Array = dyn_cast<llvm::JSONArray>(Root);
+ if (Array == NULL) {
+ ErrorMessage = "Expected array.";
+ return false;
+ }
+ for (llvm::JSONArray::const_iterator AI = Array->begin(), AE = Array->end();
+ AI != AE; ++AI) {
+ const llvm::JSONObject *Object = dyn_cast<llvm::JSONObject>(*AI);
+ if (Object == NULL) {
+ ErrorMessage = "Expected object.";
+ return false;
+ }
+ StringRef EntryDirectory;
+ StringRef EntryFile;
+ StringRef EntryCommand;
+ for (llvm::JSONObject::const_iterator KVI = Object->begin(),
+ KVE = Object->end();
+ KVI != KVE; ++KVI) {
+ const llvm::JSONValue *Value = (*KVI)->Value;
+ if (Value == NULL) {
+ ErrorMessage = "Expected value.";
+ return false;
+ }
+ const llvm::JSONString *ValueString =
+ dyn_cast<llvm::JSONString>(Value);
+ if (ValueString == NULL) {
+ ErrorMessage = "Expected string as value.";
+ return false;
+ }
+ if ((*KVI)->Key->getRawText() == "directory") {
+ EntryDirectory = ValueString->getRawText();
+ } else if ((*KVI)->Key->getRawText() == "file") {
+ EntryFile = ValueString->getRawText();
+ } else if ((*KVI)->Key->getRawText() == "command") {
+ EntryCommand = ValueString->getRawText();
+ } else {
+ ErrorMessage = (Twine("Unknown key: \"") +
+ (*KVI)->Key->getRawText() + "\"").str();
+ return false;
+ }
+ }
+ IndexByFile[EntryFile].push_back(
+ CompileCommandRef(EntryDirectory, EntryCommand));
+ }
+ return true;
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
new file mode 100644
index 0000000..fa2374f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
@@ -0,0 +1,296 @@
+//===--- Tooling.cpp - Running clang standalone tools ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions to run clang tools standalone instead
+// of running them as a plugin.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Tooling.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+FrontendActionFactory::~FrontendActionFactory() {}
+
+// FIXME: This file contains structural duplication with other parts of the
+// code that sets up a compiler to run tools on it, and we should refactor
+// it to be based on the same framework.
+
+/// \brief Builds a clang driver initialized for running clang tools.
+static clang::driver::Driver *newDriver(clang::DiagnosticsEngine *Diagnostics,
+ const char *BinaryName) {
+ const std::string DefaultOutputName = "a.out";
+ clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
+ BinaryName, llvm::sys::getDefaultTargetTriple(),
+ DefaultOutputName, false, *Diagnostics);
+ CompilerDriver->setTitle("clang_based_tool");
+ return CompilerDriver;
+}
+
+/// \brief Retrieves the clang CC1 specific flags out of the compilation's jobs.
+///
+/// Returns NULL on error.
+static const clang::driver::ArgStringList *getCC1Arguments(
+ clang::DiagnosticsEngine *Diagnostics,
+ clang::driver::Compilation *Compilation) {
+ // We expect to get back exactly one Command job, if we didn't something
+ // failed. Extract that job from the Compilation.
+ const clang::driver::JobList &Jobs = Compilation->getJobs();
+ if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
+ llvm::SmallString<256> error_msg;
+ llvm::raw_svector_ostream error_stream(error_msg);
+ Compilation->PrintJob(error_stream, Compilation->getJobs(), "; ", true);
+ Diagnostics->Report(clang::diag::err_fe_expected_compiler_job)
+ << error_stream.str();
+ return NULL;
+ }
+
+ // The one job we find should be to invoke clang again.
+ const clang::driver::Command *Cmd =
+ cast<clang::driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd->getCreator().getName()) != "clang") {
+ Diagnostics->Report(clang::diag::err_fe_expected_clang_command);
+ return NULL;
+ }
+
+ return &Cmd->getArguments();
+}
+
+/// \brief Returns a clang build invocation initialized from the CC1 flags.
+static clang::CompilerInvocation *newInvocation(
+ clang::DiagnosticsEngine *Diagnostics,
+ const clang::driver::ArgStringList &CC1Args) {
+ assert(!CC1Args.empty() && "Must at least contain the program name!");
+ clang::CompilerInvocation *Invocation = new clang::CompilerInvocation;
+ clang::CompilerInvocation::CreateFromArgs(
+ *Invocation, CC1Args.data() + 1, CC1Args.data() + CC1Args.size(),
+ *Diagnostics);
+ Invocation->getFrontendOpts().DisableFree = false;
+ return Invocation;
+}
+
+bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+ const Twine &FileName) {
+ SmallString<16> FileNameStorage;
+ StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
+ const char *const CommandLine[] = {
+ "clang-tool", "-fsyntax-only", FileNameRef.data()
+ };
+ FileManager Files((FileSystemOptions()));
+ ToolInvocation Invocation(
+ std::vector<std::string>(
+ CommandLine,
+ CommandLine + llvm::array_lengthof(CommandLine)),
+ ToolAction, &Files);
+
+ SmallString<1024> CodeStorage;
+ Invocation.mapVirtualFile(FileNameRef,
+ Code.toNullTerminatedStringRef(CodeStorage));
+ return Invocation.run();
+}
+
+/// \brief Returns the absolute path of 'File', by prepending it with
+/// 'BaseDirectory' if 'File' is not absolute.
+///
+/// Otherwise returns 'File'.
+/// If 'File' starts with "./", the returned path will not contain the "./".
+/// Otherwise, the returned path will contain the literal path-concatenation of
+/// 'BaseDirectory' and 'File'.
+///
+/// \param File Either an absolute or relative path.
+/// \param BaseDirectory An absolute path.
+static std::string getAbsolutePath(
+ StringRef File, StringRef BaseDirectory) {
+ assert(llvm::sys::path::is_absolute(BaseDirectory));
+ if (llvm::sys::path::is_absolute(File)) {
+ return File;
+ }
+ StringRef RelativePath(File);
+ if (RelativePath.startswith("./")) {
+ RelativePath = RelativePath.substr(strlen("./"));
+ }
+ llvm::SmallString<1024> AbsolutePath(BaseDirectory);
+ llvm::sys::path::append(AbsolutePath, RelativePath);
+ return AbsolutePath.str();
+}
+
+ToolInvocation::ToolInvocation(
+ ArrayRef<std::string> CommandLine, FrontendAction *ToolAction,
+ FileManager *Files)
+ : CommandLine(CommandLine.vec()), ToolAction(ToolAction), Files(Files) {
+}
+
+void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ MappedFileContents[FilePath] = Content;
+}
+
+bool ToolInvocation::run() {
+ std::vector<const char*> Argv;
+ for (int I = 0, E = CommandLine.size(); I != E; ++I)
+ Argv.push_back(CommandLine[I].c_str());
+ const char *const BinaryName = Argv[0];
+ DiagnosticOptions DefaultDiagnosticOptions;
+ TextDiagnosticPrinter DiagnosticPrinter(
+ llvm::errs(), DefaultDiagnosticOptions);
+ DiagnosticsEngine Diagnostics(llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs>(
+ new DiagnosticIDs()), &DiagnosticPrinter, false);
+
+ const llvm::OwningPtr<clang::driver::Driver> Driver(
+ newDriver(&Diagnostics, BinaryName));
+ // Since the input might only be virtual, don't check whether it exists.
+ Driver->setCheckInputsExist(false);
+ const llvm::OwningPtr<clang::driver::Compilation> Compilation(
+ Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ const clang::driver::ArgStringList *const CC1Args = getCC1Arguments(
+ &Diagnostics, Compilation.get());
+ if (CC1Args == NULL) {
+ return false;
+ }
+ llvm::OwningPtr<clang::CompilerInvocation> Invocation(
+ newInvocation(&Diagnostics, *CC1Args));
+ return runInvocation(BinaryName, Compilation.get(),
+ Invocation.take(), *CC1Args, ToolAction.take());
+}
+
+// Exists solely for the purpose of lookup of the resource path.
+static int StaticSymbol;
+
+bool ToolInvocation::runInvocation(
+ const char *BinaryName,
+ clang::driver::Compilation *Compilation,
+ clang::CompilerInvocation *Invocation,
+ const clang::driver::ArgStringList &CC1Args,
+ clang::FrontendAction *ToolAction) {
+ llvm::OwningPtr<clang::FrontendAction> ScopedToolAction(ToolAction);
+ // Show the invocation, with -v.
+ if (Invocation->getHeaderSearchOpts().Verbose) {
+ llvm::errs() << "clang Invocation:\n";
+ Compilation->PrintJob(llvm::errs(), Compilation->getJobs(), "\n", true);
+ llvm::errs() << "\n";
+ }
+
+ // Create a compiler instance to handle the actual work.
+ clang::CompilerInstance Compiler;
+ Compiler.setInvocation(Invocation);
+ Compiler.setFileManager(Files);
+ // FIXME: What about LangOpts?
+
+ // Create the compilers actual diagnostics engine.
+ Compiler.createDiagnostics(CC1Args.size(),
+ const_cast<char**>(CC1Args.data()));
+ if (!Compiler.hasDiagnostics())
+ return false;
+
+ Compiler.createSourceManager(*Files);
+ addFileMappingsTo(Compiler.getSourceManager());
+
+ // Infer the builtin include path if unspecified.
+ if (Compiler.getHeaderSearchOpts().UseBuiltinIncludes &&
+ Compiler.getHeaderSearchOpts().ResourceDir.empty()) {
+ // This just needs to be some symbol in the binary.
+ void *const SymbolAddr = &StaticSymbol;
+ Compiler.getHeaderSearchOpts().ResourceDir =
+ clang::CompilerInvocation::GetResourcesPath(BinaryName, SymbolAddr);
+ }
+
+ const bool Success = Compiler.ExecuteAction(*ToolAction);
+
+ Compiler.resetAndLeakFileManager();
+ return Success;
+}
+
+void ToolInvocation::addFileMappingsTo(SourceManager &Sources) {
+ for (llvm::StringMap<StringRef>::const_iterator
+ It = MappedFileContents.begin(), End = MappedFileContents.end();
+ It != End; ++It) {
+ // Inject the code as the given file name into the preprocessor options.
+ const llvm::MemoryBuffer *Input =
+ llvm::MemoryBuffer::getMemBuffer(It->getValue());
+ // FIXME: figure out what '0' stands for.
+ const FileEntry *FromFile = Files->getVirtualFile(
+ It->getKey(), Input->getBufferSize(), 0);
+ // FIXME: figure out memory management ('true').
+ Sources.overrideFileContents(FromFile, Input, true);
+ }
+}
+
+ClangTool::ClangTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths)
+ : Files((FileSystemOptions())) {
+ llvm::SmallString<1024> BaseDirectory;
+ if (const char *PWD = ::getenv("PWD"))
+ BaseDirectory = PWD;
+ else
+ llvm::sys::fs::current_path(BaseDirectory);
+ for (unsigned I = 0, E = SourcePaths.size(); I != E; ++I) {
+ llvm::SmallString<1024> File(getAbsolutePath(
+ SourcePaths[I], BaseDirectory));
+
+ std::vector<CompileCommand> CompileCommands =
+ Compilations.getCompileCommands(File.str());
+ if (!CompileCommands.empty()) {
+ for (int I = 0, E = CompileCommands.size(); I != E; ++I) {
+ CompileCommand &Command = CompileCommands[I];
+ if (!Command.Directory.empty()) {
+ // FIXME: What should happen if CommandLine includes -working-directory
+ // as well?
+ Command.CommandLine.push_back(
+ "-working-directory=" + Command.Directory);
+ }
+ CommandLines.push_back(std::make_pair(File.str(), Command.CommandLine));
+ }
+ } else {
+ // FIXME: There are two use cases here: doing a fuzzy
+ // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
+ // about the .cc files that were not found, and the use case where I
+ // specify all files I want to run over explicitly, where this should
+ // be an error. We'll want to add an option for this.
+ llvm::outs() << "Skipping " << File << ". Command line not found.\n";
+ }
+ }
+}
+
+void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ MappedFileContents.push_back(std::make_pair(FilePath, Content));
+}
+
+int ClangTool::run(FrontendActionFactory *ActionFactory) {
+ bool ProcessingFailed = false;
+ for (unsigned I = 0; I < CommandLines.size(); ++I) {
+ std::string File = CommandLines[I].first;
+ std::vector<std::string> &CommandLine = CommandLines[I].second;
+ llvm::outs() << "Processing: " << File << ".\n";
+ ToolInvocation Invocation(CommandLine, ActionFactory->create(), &Files);
+ for (int I = 0, E = MappedFileContents.size(); I != E; ++I) {
+ Invocation.mapVirtualFile(MappedFileContents[I].first,
+ MappedFileContents[I].second);
+ }
+ if (!Invocation.run()) {
+ llvm::outs() << "Error while processing " << File << ".\n";
+ ProcessingFailed = true;
+ }
+ }
+ return ProcessingFailed ? 1 : 0;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
index 27f79b7..a211090 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -30,6 +30,7 @@
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/LinkAllPasses.h"
#include <cstdio>
using namespace clang;
@@ -76,7 +77,8 @@ static int cc1_test(DiagnosticsEngine &Diags,
// Create a compiler invocation.
llvm::errs() << "cc1 creating invocation.\n";
CompilerInvocation Invocation;
- CompilerInvocation::CreateFromArgs(Invocation, ArgBegin, ArgEnd, Diags);
+ if (!CompilerInvocation::CreateFromArgs(Invocation, ArgBegin, ArgEnd, Diags))
+ return 1;
// Convert the invocation back to argument strings.
std::vector<std::string> InvocationArgs;
@@ -94,8 +96,9 @@ static int cc1_test(DiagnosticsEngine &Diags,
// Convert those arguments to another invocation, and check that we got the
// same thing.
CompilerInvocation Invocation2;
- CompilerInvocation::CreateFromArgs(Invocation2, Invocation2Args.begin(),
- Invocation2Args.end(), Diags);
+ if (!CompilerInvocation::CreateFromArgs(Invocation2, Invocation2Args.begin(),
+ Invocation2Args.end(), Diags))
+ return 1;
// FIXME: Implement CompilerInvocation comparison.
if (true) {
@@ -114,8 +117,8 @@ static int cc1_test(DiagnosticsEngine &Diags,
int cc1_main(const char **ArgBegin, const char **ArgEnd,
const char *Argv0, void *MainAddr) {
- llvm::OwningPtr<CompilerInstance> Clang(new CompilerInstance());
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
// Run clang -cc1 test.
if (ArgBegin != ArgEnd && StringRef(ArgBegin[0]) == "-cc1test") {
@@ -134,8 +137,9 @@ int cc1_main(const char **ArgBegin, const char **ArgEnd,
// well formed diagnostic object.
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, DiagsBuffer);
- CompilerInvocation::CreateFromArgs(Clang->getInvocation(), ArgBegin, ArgEnd,
- Diags);
+ bool Success;
+ Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
+ ArgBegin, ArgEnd, Diags);
// Infer the builtin include path if unspecified.
if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
@@ -154,9 +158,11 @@ int cc1_main(const char **ArgBegin, const char **ArgEnd,
static_cast<void*>(&Clang->getDiagnostics()));
DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
+ if (!Success)
+ return 1;
// Execute the frontend actions.
- bool Success = ExecuteCompilerInvocation(Clang.get());
+ Success = ExecuteCompilerInvocation(Clang.get());
// If any timers were active but haven't been destroyed yet, print their
// results now. This happens in -disable-free mode.
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 7cc42aa..508d6da 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -63,8 +63,17 @@ struct AssemblerInvocation {
/// @name Target Options
/// @{
+ /// The name of the target triple to assemble for.
std::string Triple;
+ /// If given, the name of the target CPU to determine which instructions
+ /// are legal.
+ std::string CPU;
+
+ /// The list of target specific features to enable or disable -- this should
+ /// be a list of strings starting with '+' or '-'.
+ std::vector<std::string> Features;
+
/// @}
/// @name Language Options
/// @{
@@ -72,6 +81,8 @@ struct AssemblerInvocation {
std::vector<std::string> IncludePaths;
unsigned NoInitialTextSection : 1;
unsigned SaveTemporaryLabels : 1;
+ unsigned GenDwarfForAssembly : 1;
+ std::string DwarfDebugFlags;
/// @}
/// @name Frontend Options
@@ -120,17 +131,19 @@ public:
NoExecStack = 0;
}
- static void CreateFromArgs(AssemblerInvocation &Res, const char **ArgBegin,
+ static bool CreateFromArgs(AssemblerInvocation &Res, const char **ArgBegin,
const char **ArgEnd, DiagnosticsEngine &Diags);
};
}
-void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
+bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
const char **ArgBegin,
const char **ArgEnd,
DiagnosticsEngine &Diags) {
using namespace clang::driver::cc1asoptions;
+ bool Success = true;
+
// Parse the arguments.
OwningPtr<OptTable> OptTbl(createCC1AsOptTable());
unsigned MissingArgIndex, MissingArgCount;
@@ -138,26 +151,36 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
OptTbl->ParseArgs(ArgBegin, ArgEnd,MissingArgIndex, MissingArgCount));
// Check for missing argument error.
- if (MissingArgCount)
+ if (MissingArgCount) {
Diags.Report(diag::err_drv_missing_argument)
<< Args->getArgString(MissingArgIndex) << MissingArgCount;
+ Success = false;
+ }
// Issue errors on unknown arguments.
for (arg_iterator it = Args->filtered_begin(cc1asoptions::OPT_UNKNOWN),
- ie = Args->filtered_end(); it != ie; ++it)
+ ie = Args->filtered_end(); it != ie; ++it) {
Diags.Report(diag::err_drv_unknown_argument) << (*it) ->getAsString(*Args);
+ Success = false;
+ }
// Construct the invocation.
// Target Options
- Opts.Triple = Triple::normalize(Args->getLastArgValue(OPT_triple));
- if (Opts.Triple.empty()) // Use the host triple if unspecified.
- Opts.Triple = sys::getHostTriple();
+ Opts.Triple = llvm::Triple::normalize(Args->getLastArgValue(OPT_triple));
+ Opts.CPU = Args->getLastArgValue(OPT_target_cpu);
+ Opts.Features = Args->getAllArgValues(OPT_target_feature);
+
+ // Use the default target triple if unspecified.
+ if (Opts.Triple.empty())
+ Opts.Triple = llvm::sys::getDefaultTargetTriple();
// Language Options
Opts.IncludePaths = Args->getAllArgValues(OPT_I);
Opts.NoInitialTextSection = Args->hasArg(OPT_n);
Opts.SaveTemporaryLabels = Args->hasArg(OPT_L);
+ Opts.GenDwarfForAssembly = Args->hasArg(OPT_g);
+ Opts.DwarfDebugFlags = Args->getLastArgValue(OPT_dwarf_debug_flags);
// Frontend Options
if (Args->hasArg(OPT_INPUT)) {
@@ -167,8 +190,10 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
const Arg *A = it;
if (First)
Opts.InputFile = A->getValue(*Args);
- else
+ else {
Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(*Args);
+ Success = false;
+ }
}
}
Opts.LLVMArgs = Args->getAllArgValues(OPT_mllvm);
@@ -182,10 +207,11 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
.Case("null", FT_Null)
.Case("obj", FT_Obj)
.Default(~0U);
- if (OutputType == ~0U)
+ if (OutputType == ~0U) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(*Args) << Name;
- else
+ Success = false;
+ } else
Opts.OutputType = FileType(OutputType);
}
Opts.ShowHelp = Args->hasArg(OPT_help);
@@ -200,6 +226,8 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Assemble Options
Opts.RelaxAll = Args->hasArg(OPT_relax_all);
Opts.NoExecStack = Args->hasArg(OPT_no_exec_stack);
+
+ return Success;
}
static formatted_raw_ostream *GetOutputStream(AssemblerInvocation &Opts,
@@ -267,23 +295,36 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
// FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
// MCObjectFileInfo needs a MCContext reference in order to initialize itself.
OwningPtr<MCObjectFileInfo> MOFI(new MCObjectFileInfo());
- MCContext Ctx(*MAI, *MRI, MOFI.get());
+ MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
// FIXME: Assembler behavior can change with -static.
MOFI->InitMCObjectFileInfo(Opts.Triple,
Reloc::Default, CodeModel::Default, Ctx);
if (Opts.SaveTemporaryLabels)
Ctx.setAllowTemporaryLabels(false);
+ if (Opts.GenDwarfForAssembly)
+ Ctx.setGenDwarfForAssembly(true);
+ if (!Opts.DwarfDebugFlags.empty())
+ Ctx.setDwarfDebugFlags(StringRef(Opts.DwarfDebugFlags));
+
+ // Build up the feature string from the target feature list.
+ std::string FS;
+ if (!Opts.Features.empty()) {
+ FS = Opts.Features[0];
+ for (unsigned i = 1, e = Opts.Features.size(); i != e; ++i)
+ FS += "," + Opts.Features[i];
+ }
OwningPtr<MCStreamer> Str;
OwningPtr<MCInstrInfo> MCII(TheTarget->createMCInstrInfo());
OwningPtr<MCSubtargetInfo>
- STI(TheTarget->createMCSubtargetInfo(Opts.Triple, "", ""));
+ STI(TheTarget->createMCSubtargetInfo(Opts.Triple, Opts.CPU, FS));
// FIXME: There is a bit of code duplication with addPassesToEmitFile.
if (Opts.OutputType == AssemblerInvocation::FT_Asm) {
MCInstPrinter *IP =
- TheTarget->createMCInstPrinter(Opts.OutputAsmVariant, *MAI, *STI);
+ TheTarget->createMCInstPrinter(Opts.OutputAsmVariant, *MAI, *MCII, *MRI,
+ *STI);
MCCodeEmitter *CE = 0;
MCAsmBackend *MAB = 0;
if (Opts.ShowEncoding) {
@@ -292,7 +333,9 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
}
Str.reset(TheTarget->createAsmStreamer(Ctx, *Out, /*asmverbose*/true,
/*useLoc*/ true,
- /*useCFI*/ true, IP, CE, MAB,
+ /*useCFI*/ true,
+ /*useDwarfDirectory*/ true,
+ IP, CE, MAB,
Opts.ShowInst));
} else if (Opts.OutputType == AssemblerInvocation::FT_Null) {
Str.reset(createNullStreamer(Ctx));
@@ -354,7 +397,7 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(errs(), DiagnosticOptions());
DiagClient->setPrefix("clang -cc1as");
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
DiagnosticsEngine Diags(DiagID, DiagClient);
// Set an error handler, so that any LLVM backend diagnostics go through our
@@ -364,11 +407,12 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
// Parse the arguments.
AssemblerInvocation Asm;
- AssemblerInvocation::CreateFromArgs(Asm, ArgBegin, ArgEnd, Diags);
+ if (!AssemblerInvocation::CreateFromArgs(Asm, ArgBegin, ArgEnd, Diags))
+ return 1;
// Honor -help.
if (Asm.ShowHelp) {
- llvm::OwningPtr<driver::OptTable> Opts(driver::createCC1AsOptTable());
+ OwningPtr<driver::OptTable> Opts(driver::createCC1AsOptTable());
Opts->PrintHelp(llvm::outs(), "clang -cc1as", "Clang Integrated Assembler");
return 0;
}
@@ -391,7 +435,7 @@ int cc1as_main(const char **ArgBegin, const char **ArgEnd,
for (unsigned i = 0; i != NumArgs; ++i)
Args[i + 1] = Asm.LLVMArgs[i].c_str();
Args[NumArgs + 1] = 0;
- llvm::cl::ParseCommandLineOptions(NumArgs + 1, const_cast<char **>(Args));
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args);
}
// Execute the invocation, unless there were parsing errors.
diff --git a/contrib/llvm/tools/clang/tools/driver/driver.cpp b/contrib/llvm/tools/clang/tools/driver/driver.cpp
index bd1d2a2..8c05fff 100644
--- a/contrib/llvm/tools/clang/tools/driver/driver.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/driver.cpp
@@ -12,17 +12,21 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/CC1Options.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Option.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Config/config.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
@@ -187,7 +191,7 @@ static void ExpandArgsFromBuf(const char *Arg,
SmallVectorImpl<const char*> &ArgVector,
std::set<std::string> &SavedStrings) {
const char *FName = Arg + 1;
- llvm::OwningPtr<llvm::MemoryBuffer> MemBuf;
+ OwningPtr<llvm::MemoryBuffer> MemBuf;
if (llvm::MemoryBuffer::getFile(FName, MemBuf)) {
ArgVector.push_back(SaveStringInSet(SavedStrings, Arg));
return;
@@ -272,7 +276,7 @@ static void ParseProgName(SmallVectorImpl<const char *> &ArgVector,
// the function tries to identify a target as prefix. E.g.
// "x86_64-linux-clang" as interpreted as suffix "clang" with
// target prefix "x86_64-linux". If such a target prefix is found,
- // is gets added via -ccc-host-triple as implicit first argument.
+ // is gets added via -target as implicit first argument.
static const struct {
const char *Suffix;
bool IsCXX;
@@ -332,7 +336,7 @@ static void ParseProgName(SmallVectorImpl<const char *> &ArgVector,
++it;
ArgVector.insert(it, SaveStringInSet(SavedStrings, Prefix));
ArgVector.insert(it,
- SaveStringInSet(SavedStrings, std::string("-ccc-host-triple")));
+ SaveStringInSet(SavedStrings, std::string("-target")));
}
}
@@ -371,25 +375,41 @@ int main(int argc_, const char **argv_) {
llvm::sys::Path Path = GetExecutablePath(argv[0], CanonicalPrefixes);
+ DiagnosticOptions DiagOpts;
+ {
+ // Note that ParseDiagnosticArgs() uses the cc1 option table.
+ OwningPtr<OptTable> CC1Opts(createCC1OptTable());
+ unsigned MissingArgIndex, MissingArgCount;
+ OwningPtr<InputArgList> Args(CC1Opts->ParseArgs(argv.begin()+1, argv.end(),
+ MissingArgIndex, MissingArgCount));
+ // We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
+ // Any errors that would be diagnosed here will also be diagnosed later,
+ // when the DiagnosticsEngine actually exists.
+ (void) ParseDiagnosticArgs(DiagOpts, *Args);
+ }
+ // Now we can create the DiagnosticsEngine with a properly-filled-out
+ // DiagnosticOptions instance.
TextDiagnosticPrinter *DiagClient
- = new TextDiagnosticPrinter(llvm::errs(), DiagnosticOptions());
+ = new TextDiagnosticPrinter(llvm::errs(), DiagOpts);
DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+
DiagnosticsEngine Diags(DiagID, DiagClient);
+ ProcessWarningOptions(Diags, DiagOpts);
#ifdef CLANG_IS_PRODUCTION
const bool IsProduction = true;
#else
const bool IsProduction = false;
#endif
- Driver TheDriver(Path.str(), llvm::sys::getHostTriple(),
+ Driver TheDriver(Path.str(), llvm::sys::getDefaultTargetTriple(),
"a.out", IsProduction, Diags);
// Attempt to find the original path used to invoke the driver, to determine
// the installed path. We do this manually, because we want to support that
// path being a symlink.
{
- llvm::SmallString<128> InstalledPath(argv[0]);
+ SmallString<128> InstalledPath(argv[0]);
// Do a PATH lookup, if there are no directory components.
if (llvm::sys::path::filename(InstalledPath) == InstalledPath) {
@@ -449,7 +469,7 @@ int main(int argc_, const char **argv_) {
argv.insert(&argv[1], ExtraArgs.begin(), ExtraArgs.end());
}
- llvm::OwningPtr<Compilation> C(TheDriver.BuildCompilation(argv));
+ OwningPtr<Compilation> C(TheDriver.BuildCompilation(argv));
int Res = 0;
const Command *FailingCommand = 0;
if (C.get())
diff --git a/contrib/llvm/tools/clang/utils/TableGen/CMakeLists.txt b/contrib/llvm/tools/clang/utils/TableGen/CMakeLists.txt
deleted file mode 100644
index 75a6167..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-set(LLVM_REQUIRES_EH 1)
-set(LLVM_REQUIRES_RTTI 1)
-
-add_tablegen(clang-tblgen CLANG
- ClangASTNodesEmitter.cpp
- ClangAttrEmitter.cpp
- ClangDiagnosticsEmitter.cpp
- ClangSACheckersEmitter.cpp
- NeonEmitter.cpp
- OptParserEmitter.cpp
- TableGen.cpp
- )
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
index 5c236be..7951fc4 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -16,6 +16,7 @@
#include "llvm/TableGen/Record.h"
#include <algorithm>
#include <cctype>
+#include <set>
using namespace llvm;
@@ -33,8 +34,6 @@ getValueAsListOfStrings(Record &R, StringRef FieldName) {
assert(*i && "Got a null element in a ListInit");
if (StringInit *S = dynamic_cast<StringInit *>(*i))
Strings.push_back(S->getValue());
- else if (CodeInit *C = dynamic_cast<CodeInit *>(*i))
- Strings.push_back(C->getValue());
else
assert(false && "Got a non-string, non-code element in a ListInit");
}
@@ -67,6 +66,30 @@ static std::string WritePCHRecord(StringRef type, StringRef name) {
.Default("Record.push_back(" + std::string(name) + ");\n");
}
+// Normalize attribute name by removing leading and trailing
+// underscores. For example, __foo, foo__, __foo__ would
+// become foo.
+static StringRef NormalizeAttrName(StringRef AttrName) {
+ if (AttrName.startswith("__"))
+ AttrName = AttrName.substr(2, AttrName.size());
+
+ if (AttrName.endswith("__"))
+ AttrName = AttrName.substr(0, AttrName.size() - 2);
+
+ return AttrName;
+}
+
+// Normalize attribute spelling only if the spelling has both leading
+// and trailing underscores. For example, __ms_struct__ will be
+// normalized to "ms_struct"; __cdecl will remain intact.
+static StringRef NormalizeAttrSpelling(StringRef AttrSpelling) {
+ if (AttrSpelling.startswith("__") && AttrSpelling.endswith("__")) {
+ AttrSpelling = AttrSpelling.substr(2, AttrSpelling.size() - 4);
+ }
+
+ return AttrSpelling;
+}
+
namespace {
class Argument {
std::string lowerName, upperName;
@@ -91,6 +114,8 @@ namespace {
virtual void writeAccessors(raw_ostream &OS) const = 0;
virtual void writeAccessorDefinitions(raw_ostream &OS) const {}
virtual void writeCloneArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiationArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiation(raw_ostream &OS) const {}
virtual void writeCtorBody(raw_ostream &OS) const {}
virtual void writeCtorInitializers(raw_ostream &OS) const = 0;
virtual void writeCtorParameters(raw_ostream &OS) const = 0;
@@ -98,6 +123,7 @@ namespace {
virtual void writePCHReadArgs(raw_ostream &OS) const = 0;
virtual void writePCHReadDecls(raw_ostream &OS) const = 0;
virtual void writePCHWrite(raw_ostream &OS) const = 0;
+ virtual void writeValue(raw_ostream &OS) const = 0;
};
class SimpleArgument : public Argument {
@@ -108,6 +134,8 @@ namespace {
: Argument(Arg, Attr), type(T)
{}
+ std::string getType() const { return type; }
+
void writeAccessors(raw_ostream &OS) const {
OS << " " << type << " get" << getUpperName() << "() const {\n";
OS << " return " << getLowerName() << ";\n";
@@ -116,6 +144,9 @@ namespace {
void writeCloneArgs(raw_ostream &OS) const {
OS << getLowerName();
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
void writeCtorInitializers(raw_ostream &OS) const {
OS << getLowerName() << "(" << getUpperName() << ")";
}
@@ -136,6 +167,19 @@ namespace {
OS << " " << WritePCHRecord(type, "SA->get" +
std::string(getUpperName()) + "()");
}
+ void writeValue(raw_ostream &OS) const {
+ if (type == "FunctionDecl *") {
+ OS << "\" << get" << getUpperName() << "()->getNameInfo().getAsString() << \"";
+ } else if (type == "IdentifierInfo *") {
+ OS << "\" << get" << getUpperName() << "()->getName() << \"";
+ } else if (type == "QualType") {
+ OS << "\" << get" << getUpperName() << "().getAsString() << \"";
+ } else if (type == "SourceLocation") {
+ OS << "\" << get" << getUpperName() << "().getRawEncoding() << \"";
+ } else {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
+ }
};
class StringArgument : public Argument {
@@ -164,6 +208,9 @@ namespace {
void writeCloneArgs(raw_ostream &OS) const {
OS << "get" << getUpperName() << "()";
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
void writeCtorBody(raw_ostream &OS) const {
OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
<< ".data(), " << getLowerName() << "Length);";
@@ -190,6 +237,9 @@ namespace {
void writePCHWrite(raw_ostream &OS) const {
OS << " AddString(SA->get" << getUpperName() << "(), Record);\n";
}
+ void writeValue(raw_ostream &OS) const {
+ OS << "\\\"\" << get" << getUpperName() << "() << \"\\\"";
+ }
};
class AlignedArgument : public Argument {
@@ -251,6 +301,10 @@ namespace {
<< "Expr) : " << getLowerName()
<< "Type";
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // FIXME: move the definition in Sema::InstantiateAttrs to here.
+ // In the meantime, aligned attributes are cloned.
+ }
void writeCtorBody(raw_ostream &OS) const {
OS << " if (is" << getLowerName() << "Expr)\n";
OS << " " << getLowerName() << "Expr = reinterpret_cast<Expr *>("
@@ -293,6 +347,9 @@ namespace {
OS << " AddTypeSourceInfo(SA->get" << getUpperName()
<< "Type(), Record);\n";
}
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "(Ctx) << \"";
+ }
};
class VariadicArgument : public Argument {
@@ -317,12 +374,17 @@ namespace {
<< "Size;\n";
OS << " }\n";
OS << " unsigned " << getLowerName() << "_size() const {\n"
- << " return " << getLowerName() << "Size;\n;";
+ << " return " << getLowerName() << "Size;\n";
OS << " }";
}
void writeCloneArgs(raw_ostream &OS) const {
OS << getLowerName() << ", " << getLowerName() << "Size";
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // This isn't elegant, but we have to go through public methods...
+ OS << "A->" << getLowerName() << "_begin(), "
+ << "A->" << getLowerName() << "_size()";
+ }
void writeCtorBody(raw_ostream &OS) const {
// FIXME: memcpy is not safe on non-trivial types.
OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
@@ -362,6 +424,18 @@ namespace {
<< getLowerName() << "_end(); i != e; ++i)\n";
OS << " " << WritePCHRecord(type, "(*i)");
}
+ void writeValue(raw_ostream &OS) const {
+ OS << "\";\n";
+ OS << " bool isFirst = true;\n"
+ << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = " << getLowerName() << "_begin(), e = "
+ << getLowerName() << "_end(); i != e; ++i) {\n"
+ << " if (isFirst) isFirst = false;\n"
+ << " else OS << \", \";\n"
+ << " OS << *i;\n"
+ << " }\n";
+ OS << " OS << \"";
+ }
};
class EnumArgument : public Argument {
@@ -382,6 +456,9 @@ namespace {
void writeCloneArgs(raw_ostream &OS) const {
OS << getLowerName();
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
void writeCtorInitializers(raw_ostream &OS) const {
OS << getLowerName() << "(" << getUpperName() << ")";
}
@@ -422,6 +499,9 @@ namespace {
void writePCHWrite(raw_ostream &OS) const {
OS << "Record.push_back(SA->get" << getUpperName() << "());\n";
}
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
};
class VersionArgument : public Argument {
@@ -442,6 +522,9 @@ namespace {
void writeCloneArgs(raw_ostream &OS) const {
OS << "get" << getUpperName() << "()";
}
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
void writeCtorBody(raw_ostream &OS) const {
}
void writeCtorInitializers(raw_ostream &OS) const {
@@ -463,6 +546,64 @@ namespace {
void writePCHWrite(raw_ostream &OS) const {
OS << " AddVersionTuple(SA->get" << getUpperName() << "(), Record);\n";
}
+ void writeValue(raw_ostream &OS) const {
+ OS << getLowerName() << "=\" << get" << getUpperName() << "() << \"";
+ }
+ };
+
+ class ExprArgument : public SimpleArgument {
+ public:
+ ExprArgument(Record &Arg, StringRef Attr)
+ : SimpleArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName();
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " tempInst" << getUpperName() << ";\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " ExprResult " << "Result = S.SubstExpr("
+ << "A->get" << getUpperName() << "(), TemplateArgs);\n";
+ OS << " tempInst" << getUpperName() << " = "
+ << "Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ }
+ };
+
+ class VariadicExprArgument : public VariadicArgument {
+ public:
+ VariadicExprArgument(Record &Arg, StringRef Attr)
+ : VariadicArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName() << ", "
+ << "A->" << getLowerName() << "_size()";
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " *tempInst" << getUpperName()
+ << " = new (C, 16) " << getType()
+ << "[A->" << getLowerName() << "_size()];\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " " << getType() << " *TI = tempInst" << getUpperName()
+ << ";\n";
+ OS << " " << getType() << " *I = A->" << getLowerName()
+ << "_begin();\n";
+ OS << " " << getType() << " *E = A->" << getLowerName()
+ << "_end();\n";
+ OS << " for (; I != E; ++I, ++TI) {\n";
+ OS << " ExprResult Result = S.SubstExpr(*I, TemplateArgs);\n";
+ OS << " *TI = Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ OS << " }\n";
+ }
};
}
@@ -476,8 +617,7 @@ static Argument *createArgument(Record &Arg, StringRef Attr,
if (ArgName == "AlignedArgument") Ptr = new AlignedArgument(Arg, Attr);
else if (ArgName == "EnumArgument") Ptr = new EnumArgument(Arg, Attr);
- else if (ArgName == "ExprArgument") Ptr = new SimpleArgument(Arg, Attr,
- "Expr *");
+ else if (ArgName == "ExprArgument") Ptr = new ExprArgument(Arg, Attr);
else if (ArgName == "FunctionArgument")
Ptr = new SimpleArgument(Arg, Attr, "FunctionDecl *");
else if (ArgName == "IdentifierArgument")
@@ -495,7 +635,7 @@ static Argument *createArgument(Record &Arg, StringRef Attr,
else if (ArgName == "VariadicUnsignedArgument")
Ptr = new VariadicArgument(Arg, Attr, "unsigned");
else if (ArgName == "VariadicExprArgument")
- Ptr = new VariadicArgument(Arg, Attr, "Expr *");
+ Ptr = new VariadicExprArgument(Arg, Attr);
else if (ArgName == "VersionArgument")
Ptr = new VersionArgument(Arg, Attr);
@@ -511,6 +651,15 @@ static Argument *createArgument(Record &Arg, StringRef Attr,
return Ptr;
}
+static void writeAvailabilityValue(raw_ostream &OS) {
+ OS << "\" << getPlatform()->getName();\n"
+ << " if (!getIntroduced().empty()) OS << \", introduced=\" << getIntroduced();\n"
+ << " if (!getDeprecated().empty()) OS << \", deprecated=\" << getDeprecated();\n"
+ << " if (!getObsoleted().empty()) OS << \", obsoleted=\" << getObsoleted();\n"
+ << " if (getUnavailable()) OS << \", unavailable\";\n"
+ << " OS << \"";
+}
+
void ClangAttrClassEmitter::run(raw_ostream &OS) {
OS << "// This file is generated by TableGen. Do not edit.\n\n";
OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
@@ -571,19 +720,25 @@ void ClangAttrClassEmitter::run(raw_ostream &OS) {
OS << " }\n\n";
OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n";
+ OS << " virtual void printPretty(llvm::raw_ostream &OS, ASTContext &Ctx) const;\n";
for (ai = Args.begin(); ai != ae; ++ai) {
(*ai)->writeAccessors(OS);
OS << "\n\n";
}
- OS << R.getValueAsCode("AdditionalMembers");
+ OS << R.getValueAsString("AdditionalMembers");
OS << "\n\n";
OS << " static bool classof(const Attr *A) { return A->getKind() == "
<< "attr::" << R.getName() << "; }\n";
OS << " static bool classof(const " << R.getName()
<< "Attr *) { return true; }\n";
+
+ bool LateParsed = R.getValueAsBit("LateParsed");
+ OS << " virtual bool isLateParsed() const { return "
+ << LateParsed << "; }\n";
+
OS << "};\n\n";
}
@@ -600,6 +755,7 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
for (; i != e; ++i) {
Record &R = **i;
std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(R, "Spellings");
std::vector<Argument*> Args;
for (ri = ArgRecords.begin(), re = ArgRecords.end(); ri != re; ++ri)
Args.push_back(createArgument(**ri, R.getName()));
@@ -615,6 +771,24 @@ void ClangAttrImplEmitter::run(raw_ostream &OS) {
(*ai)->writeCloneArgs(OS);
}
OS << ");\n}\n\n";
+
+ OS << "void " << R.getName() << "Attr::printPretty("
+ << "llvm::raw_ostream &OS, ASTContext &Ctx) const {\n";
+ if (Spellings.begin() != Spellings.end()) {
+ OS << " OS << \" __attribute__((" << *Spellings.begin();
+ if (Args.size()) OS << "(";
+ if (*Spellings.begin()=="availability") {
+ writeAvailabilityValue(OS);
+ } else {
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ if (ai!=Args.begin()) OS <<", ";
+ (*ai)->writeValue(OS);
+ }
+ }
+ if (Args.size()) OS << ")";
+ OS << "))\";\n";
+ }
+ OS << "}\n\n";
}
}
@@ -786,3 +960,133 @@ void ClangAttrLateParsedListEmitter::run(raw_ostream &OS) {
}
}
}
+
+
+void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ OS << "namespace clang {\n"
+ << "namespace sema {\n\n"
+ << "Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, "
+ << "Sema &S,\n"
+ << " const MultiLevelTemplateArgumentList &TemplateArgs) {\n"
+ << " switch (At->getKind()) {\n"
+ << " default:\n"
+ << " break;\n";
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &R = **I;
+
+ OS << " case attr::" << R.getName() << ": {\n";
+ OS << " const " << R.getName() << "Attr *A = cast<"
+ << R.getName() << "Attr>(At);\n";
+ bool TDependent = R.getValueAsBit("TemplateDependent");
+
+ if (!TDependent) {
+ OS << " return A->clone(C);\n";
+ OS << " }\n";
+ continue;
+ }
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+ }
+ ae = Args.end();
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeTemplateInstantiation(OS);
+ }
+ OS << " return new (C) " << R.getName() << "Attr(A->getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeTemplateInstantiationArgs(OS);
+ }
+ OS << ");\n }\n";
+ }
+ OS << " } // end switch\n"
+ << " llvm_unreachable(\"Unknown attribute!\");\n"
+ << " return 0;\n"
+ << "}\n\n"
+ << "} // end namespace sema\n"
+ << "} // end namespace clang\n";
+}
+
+void ClangAttrParsedAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef PARSED_ATTR\n";
+ OS << "#define PARSED_ATTR(NAME) NAME\n";
+ OS << "#endif\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::set<StringRef> ProcessedAttrs;
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ // skip if a normalized version has been processed.
+ if (ProcessedAttrs.find(AttrName) != ProcessedAttrs.end())
+ continue;
+ else
+ ProcessedAttrs.insert(AttrName);
+
+ OS << "PARSED_ATTR(" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+void ClangAttrParsedAttrKindsEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I, Spelling = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ Spelling = NormalizeAttrSpelling(Spelling);
+
+ OS << ".Case(\"" << Spelling << "\", " << "AT_" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
index 5acca56..d119a09 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
@@ -109,6 +109,45 @@ class ClangAttrLateParsedListEmitter : public TableGenBackend {
void run(raw_ostream &OS);
};
+/// ClangAttrTemplateInstantiateEmitter emits code to instantiate dependent
+/// attributes on templates.
+class ClangAttrTemplateInstantiateEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrTemplateInstantiateEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrListEmitter emits the list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrKindsEmitter emits the kind list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrKindsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrKindsEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
}
#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index da2fb70..8a49619 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -16,13 +16,12 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/VectorExtras.h"
#include <map>
#include <algorithm>
#include <functional>
+#include <set>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -51,7 +50,6 @@ public:
};
} // end anonymous namespace.
-
static std::string
getCategoryFromDiagGroup(const Record *Group,
DiagGroupParentMap &DiagGroupParents) {
@@ -120,8 +118,44 @@ namespace {
iterator begin() { return CategoryStrings.begin(); }
iterator end() { return CategoryStrings.end(); }
};
+
+ struct GroupInfo {
+ std::vector<const Record*> DiagsInGroup;
+ std::vector<std::string> SubGroups;
+ unsigned IDNo;
+ };
} // end anonymous namespace.
+/// \brief Invert the 1-[0/1] mapping of diags to group into a one to many
+/// mapping of groups to diags in the group.
+static void groupDiagnostics(const std::vector<Record*> &Diags,
+ const std::vector<Record*> &DiagGroups,
+ std::map<std::string, GroupInfo> &DiagsInGroup) {
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record *R = Diags[i];
+ DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
+ if (DI == 0) continue;
+ std::string GroupName = DI->getDef()->getValueAsString("GroupName");
+ DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
+ }
+
+ // Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
+ // groups (these are warnings that GCC supports that clang never produces).
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ Record *Group = DiagGroups[i];
+ GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+
+ std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName"));
+ }
+
+ // Assign unique ID numbers to the groups.
+ unsigned IDNo = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo)
+ I->second.IDNo = IDNo;
+}
//===----------------------------------------------------------------------===//
// Warning Tables (.inc file) generation.
@@ -130,7 +164,7 @@ namespace {
void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
// Write the #if guard
if (!Component.empty()) {
- std::string ComponentName = UppercaseString(Component);
+ std::string ComponentName = StringRef(Component).upper();
OS << "#ifdef " << ComponentName << "START\n";
OS << "__" << ComponentName << "START = DIAG_START_" << ComponentName
<< ",\n";
@@ -140,7 +174,13 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
const std::vector<Record*> &Diags =
Records.getAllDerivedDefinitions("Diagnostic");
-
+
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
+
DiagCategoryIDMap CategoryIDs(Records);
DiagGroupParentMap DGParentMap(Records);
@@ -158,12 +198,15 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
OS << ", \"";
OS.write_escaped(R.getValueAsString("Text")) << '"';
- // Warning associated with the diagnostic.
+ // Warning associated with the diagnostic. This is stored as an index into
+ // the alphabetically sorted warning table.
if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
- OS << ", \"";
- OS.write_escaped(DI->getDef()->getValueAsString("GroupName")) << '"';
+ std::map<std::string, GroupInfo>::iterator I =
+ DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
+ assert(I != DiagsInGroup.end());
+ OS << ", " << I->second.IDNo;
} else {
- OS << ", \"\"";
+ OS << ", 0";
}
// SFINAE bit
@@ -196,14 +239,6 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
// Category number.
OS << ", " << CategoryIDs.getID(getDiagnosticCategory(&R, DGParentMap));
-
- // Brief
- OS << ", \"";
- OS.write_escaped(R.getValueAsString("Brief")) << '"';
-
- // Explanation
- OS << ", \"";
- OS.write_escaped(R.getValueAsString("Explanation")) << '"';
OS << ")\n";
}
}
@@ -215,56 +250,24 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
static std::string getDiagCategoryEnum(llvm::StringRef name) {
if (name.empty())
return "DiagCat_None";
- llvm::SmallString<256> enumName = llvm::StringRef("DiagCat_");
+ SmallString<256> enumName = llvm::StringRef("DiagCat_");
for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I)
enumName += isalnum(*I) ? *I : '_';
return enumName.str();
}
-namespace {
-struct GroupInfo {
- std::vector<const Record*> DiagsInGroup;
- std::vector<std::string> SubGroups;
- unsigned IDNo;
-};
-} // end anonymous namespace.
-
void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
// Compute a mapping from a DiagGroup to all of its parents.
DiagGroupParentMap DGParentMap(Records);
- // Invert the 1-[0/1] mapping of diags to group into a one to many mapping of
- // groups to diags in the group.
- std::map<std::string, GroupInfo> DiagsInGroup;
-
std::vector<Record*> Diags =
Records.getAllDerivedDefinitions("Diagnostic");
- for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
- const Record *R = Diags[i];
- DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
- if (DI == 0) continue;
- std::string GroupName = DI->getDef()->getValueAsString("GroupName");
- DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
- }
- // Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
- // groups (these are warnings that GCC supports that clang never produces).
std::vector<Record*> DiagGroups
= Records.getAllDerivedDefinitions("DiagGroup");
- for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
- Record *Group = DiagGroups[i];
- GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
-
- std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
- for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
- GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName"));
- }
-
- // Assign unique ID numbers to the groups.
- unsigned IDNo = 0;
- for (std::map<std::string, GroupInfo>::iterator
- I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo)
- I->second.IDNo = IDNo;
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
// Walk through the groups emitting an array for each diagnostic of the diags
// that are mapped to.
@@ -304,6 +307,10 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
OS << " { ";
OS << I->first.size() << ", ";
OS << "\"";
+ if (I->first.find_first_not_of("abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789!@#$%^*-+=:?")!=std::string::npos)
+ throw "Invalid character in diagnostic group '" + I->first + "'";
OS.write_escaped(I->first) << "\","
<< std::string(MaxLen-I->first.size()+1, ' ');
diff --git a/contrib/llvm/tools/clang/utils/TableGen/Makefile b/contrib/llvm/tools/clang/utils/TableGen/Makefile
deleted file mode 100644
index 9790efc..0000000
--- a/contrib/llvm/tools/clang/utils/TableGen/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-##===- utils/TableGen/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-TOOLNAME = clang-tblgen
-USEDLIBS = LLVMTableGen.a LLVMSupport.a
-REQUIRES_EH := 1
-REQUIRES_RTTI := 1
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
index 66845cc..e6f2e53 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
@@ -28,6 +28,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include <string>
using namespace llvm;
@@ -56,7 +57,6 @@ static void ParseTypes(Record *r, std::string &s,
default:
throw TGError(r->getLoc(),
"Unexpected letter: " + std::string(data + len, 1));
- break;
}
TV.push_back(StringRef(data, len + 1));
data += len + 1;
@@ -78,7 +78,6 @@ static char Widen(const char t) {
return 'f';
default: throw "unhandled type in widen!";
}
- return '\0';
}
/// Narrow - Convert a type code into the next smaller type. short -> char,
@@ -95,7 +94,6 @@ static char Narrow(const char t) {
return 'h';
default: throw "unhandled type in narrow!";
}
- return '\0';
}
/// For a particular StringRef, return the base type code, and whether it has
@@ -266,7 +264,6 @@ static std::string TypeString(const char mod, StringRef typestr) {
break;
default:
throw "unhandled type!";
- break;
}
if (mod == '2')
@@ -449,7 +446,6 @@ static std::string MangleName(const std::string &name, StringRef typestr,
break;
default:
throw "unhandled type!";
- break;
}
if (ck == ClassB)
s += "_v";
@@ -526,12 +522,6 @@ static std::string GenMacroLocals(const std::string &proto, StringRef typestr) {
for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
// Do not create a temporary for an immediate argument.
// That would defeat the whole point of using a macro!
- // FIXME: For other (non-immediate) arguments that are used directly, a
- // local temporary (or some other method) is still needed to get the
- // correct type checking, even if that temporary is not used for anything.
- // This is omitted for now because it turns out the the use of
- // "__extension__" in the macro disables any warnings from the pointer
- // assignment.
if (MacroArgUsedDirectly(proto, i))
continue;
generatedLocal = true;
@@ -594,7 +584,6 @@ static unsigned GetNumElements(StringRef typestr, bool &quad) {
case 'f': nElts = 2; break;
default:
throw "unhandled type!";
- break;
}
if (quad) nElts <<= 1;
return nElts;
@@ -826,14 +815,12 @@ static std::string GenOpString(OpKind op, const std::string &proto,
}
default:
throw "unknown OpKind!";
- break;
}
return s;
}
static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
unsigned mod = proto[0];
- unsigned ret = 0;
if (mod == 'v' || mod == 'f')
mod = proto[1];
@@ -851,35 +838,31 @@ static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
// Based on the modifying character, change the type and width if necessary.
type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
- if (usgn)
- ret |= 0x08;
- if (quad && proto[1] != 'g')
- ret |= 0x10;
-
+ NeonTypeFlags::EltType ET;
switch (type) {
case 'c':
- ret |= poly ? 5 : 0;
+ ET = poly ? NeonTypeFlags::Poly8 : NeonTypeFlags::Int8;
break;
case 's':
- ret |= poly ? 6 : 1;
+ ET = poly ? NeonTypeFlags::Poly16 : NeonTypeFlags::Int16;
break;
case 'i':
- ret |= 2;
+ ET = NeonTypeFlags::Int32;
break;
case 'l':
- ret |= 3;
+ ET = NeonTypeFlags::Int64;
break;
case 'h':
- ret |= 7;
+ ET = NeonTypeFlags::Float16;
break;
case 'f':
- ret |= 4;
+ ET = NeonTypeFlags::Float32;
break;
default:
throw "unhandled type!";
- break;
}
- return ret;
+ NeonTypeFlags Flags(ET, usgn, quad && proto[1] != 'g');
+ return Flags.getFlags();
}
// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
@@ -1249,10 +1232,7 @@ static unsigned RangeFromType(const char mod, StringRef typestr) {
return (1 << (int)quad) - 1;
default:
throw "unhandled type!";
- break;
}
- assert(0 && "unreachable");
- return 0;
}
/// runHeader - Emit a file with sections defining:
@@ -1346,14 +1326,57 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
}
}
- if (mask)
+
+ // Check if the builtin function has a pointer or const pointer argument.
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ for (unsigned arg = 1, arge = Proto.size(); arg != arge; ++arg) {
+ char ArgType = Proto[arg];
+ if (ArgType == 'c') {
+ HasConstPtr = true;
+ PtrArgNum = arg - 1;
+ break;
+ }
+ if (ArgType == 'p') {
+ PtrArgNum = arg - 1;
+ break;
+ }
+ }
+ // For sret builtins, adjust the pointer argument index.
+ if (PtrArgNum >= 0 && (Proto[0] >= '2' && Proto[0] <= '4'))
+ PtrArgNum += 1;
+
+ // Omit type checking for the pointer arguments of vld1_lane, vld1_dup,
+ // and vst1_lane intrinsics. Using a pointer to the vector element
+ // type with one of those operations causes codegen to select an aligned
+ // load/store instruction. If you want an unaligned operation,
+ // the pointer argument needs to have less alignment than element type,
+ // so just accept any pointer type.
+ if (name == "vld1_lane" || name == "vld1_dup" || name == "vst1_lane") {
+ PtrArgNum = -1;
+ HasConstPtr = false;
+ }
+
+ if (mask) {
OS << "case ARM::BI__builtin_neon_"
<< MangleName(name, TypeVec[si], ClassB)
- << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
- if (qmask)
+ << ": mask = " << "0x" << utohexstr(mask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
+ if (qmask) {
OS << "case ARM::BI__builtin_neon_"
<< MangleName(name, TypeVec[qi], ClassB)
- << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
+ << ": mask = " << "0x" << utohexstr(qmask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
}
OS << "#endif\n\n";
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
index 708ad3c..dec7451 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
@@ -86,6 +86,40 @@ enum ClassKind {
ClassB // bitcast arguments with enum argument to specify type
};
+/// NeonTypeFlags - Flags to identify the types for overloaded Neon
+/// builtins. These must be kept in sync with the flags in
+/// include/clang/Basic/TargetBuiltins.h.
+class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ uint32_t getFlags() const { return Flags; }
+};
+
namespace llvm {
class NeonEmitter : public TableGenBackend {
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
index 5c016d3..5ff88db 100644
--- a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
@@ -36,6 +36,9 @@ enum ActionType {
GenClangAttrPCHWrite,
GenClangAttrSpellingList,
GenClangAttrLateParsedList,
+ GenClangAttrTemplateInstantiate,
+ GenClangAttrParsedAttrList,
+ GenClangAttrParsedAttrKinds,
GenClangDiagsDefs,
GenClangDiagGroups,
GenClangDiagsIndexName,
@@ -71,6 +74,15 @@ namespace {
clEnumValN(GenClangAttrLateParsedList,
"gen-clang-attr-late-parsed-list",
"Generate a clang attribute LateParsed list"),
+ clEnumValN(GenClangAttrTemplateInstantiate,
+ "gen-clang-attr-template-instantiate",
+ "Generate a clang template instantiate code"),
+ clEnumValN(GenClangAttrParsedAttrList,
+ "gen-clang-attr-parsed-attr-list",
+ "Generate a clang parsed attribute list"),
+ clEnumValN(GenClangAttrParsedAttrKinds,
+ "gen-clang-attr-parsed-attr-kinds",
+ "Generate a clang parsed attribute kinds"),
clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
"Generate Clang diagnostics definitions"),
clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
@@ -96,7 +108,6 @@ namespace {
ClangComponent("clang-component",
cl::desc("Only use warnings from specified component"),
cl::value_desc("component"), cl::Hidden);
-}
class ClangTableGenAction : public TableGenAction {
public:
@@ -123,6 +134,15 @@ public:
case GenClangAttrLateParsedList:
ClangAttrLateParsedListEmitter(Records).run(OS);
break;
+ case GenClangAttrTemplateInstantiate:
+ ClangAttrTemplateInstantiateEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrList:
+ ClangAttrParsedAttrListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrKinds:
+ ClangAttrParsedAttrKindsEmitter(Records).run(OS);
+ break;
case GenClangDiagsDefs:
ClangDiagsDefsEmitter(Records, ClangComponent).run(OS);
break;
@@ -157,14 +177,12 @@ public:
case GenArmNeonTest:
NeonEmitter(Records).runTests(OS);
break;
- default:
- assert(1 && "Invalid Action");
- return true;
}
return false;
}
};
+}
int main(int argc, char **argv) {
sys::PrintStackTraceOnErrorSignal();
diff --git a/contrib/llvm/tools/llc/CMakeLists.txt b/contrib/llvm/tools/llc/CMakeLists.txt
deleted file mode 100644
index 683f298..0000000
--- a/contrib/llvm/tools/llc/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} bitreader asmparser)
-
-add_llvm_tool(llc
- llc.cpp
- )
diff --git a/contrib/llvm/tools/llc/Makefile b/contrib/llvm/tools/llc/Makefile
deleted file mode 100644
index 7319aad..0000000
--- a/contrib/llvm/tools/llc/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#===- tools/llc/Makefile -----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llc
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
-LINK_COMPONENTS := $(TARGETS_TO_BUILD) bitreader asmparser
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
-
diff --git a/contrib/llvm/tools/llc/llc.cpp b/contrib/llvm/tools/llc/llc.cpp
index d29bd9b..9e30ac1 100644
--- a/contrib/llvm/tools/llc/llc.cpp
+++ b/contrib/llvm/tools/llc/llc.cpp
@@ -21,7 +21,6 @@
#include "llvm/Support/IRReader.h"
#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
#include "llvm/CodeGen/LinkAllCodegenComponents.h"
-#include "llvm/Config/config.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -133,11 +132,147 @@ cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
cl::desc("Do not use .cfi_* directives"));
+cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
+ cl::desc("Use .file directives with an explicit directory."));
+
static cl::opt<bool>
DisableRedZone("disable-red-zone",
cl::desc("Do not emit code that uses the red zone."),
cl::init(false));
+static cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
+
+static cl::opt<bool>
+PrintCode("print-machineinstrs",
+ cl::desc("Print generated machine code"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableExcessPrecision("disable-excess-fp-precision",
+ cl::desc("Disable optimizations that may increase FP precision"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
+ cl::Hidden,
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::init(false));
+
+static cl::opt<bool>
+GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::init(false));
+
+static cl::opt<llvm::FloatABI::ABIType>
+FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+
+static cl::opt<bool>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableJITExceptionHandling("jit-enable-eh",
+ cl::desc("Emit exception handling information"),
+ cl::init(false));
+
+// In debug builds, make this default to true.
+#ifdef NDEBUG
+#define EMIT_DEBUG false
+#else
+#define EMIT_DEBUG true
+#endif
+static cl::opt<bool>
+EmitJitDebugInfo("jit-emit-debug",
+ cl::desc("Emit debug information to debugger"),
+ cl::init(EMIT_DEBUG));
+#undef EMIT_DEBUG
+
+static cl::opt<bool>
+EmitJitDebugInfoToDisk("jit-emit-debug-to-disk",
+ cl::Hidden,
+ cl::desc("Emit debug info objfiles to disk"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableGuaranteedTailCallOpt("tailcallopt",
+ cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableTailCalls("disable-tail-calls",
+ cl::desc("Never emit tail calls"),
+ cl::init(false));
+
+static cl::opt<unsigned>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::init(0));
+
+static cl::opt<bool>
+EnableRealignStack("realign-stack",
+ cl::desc("Realign stack if needed"),
+ cl::init(true));
+
+static cl::opt<bool>
+DisableSwitchTables(cl::Hidden, "disable-jump-tables",
+ cl::desc("Do not generate jump tables."),
+ cl::init(false));
+
+static cl::opt<std::string>
+TrapFuncName("trap-func", cl::Hidden,
+ cl::desc("Emit a call to trap function rather than a trap instruction"),
+ cl::init(""));
+
+static cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+static cl::opt<bool>
+SegmentedStacks("segmented-stacks",
+ cl::desc("Use segmented stacks if possible."),
+ cl::init(false));
+
+
// GetFileNameRoot - Helper function to get the basename of a filename.
static inline std::string
GetFileNameRoot(const std::string &InputFilename) {
@@ -166,7 +301,6 @@ static tool_output_file *GetOutputStream(const char *TargetName,
OutputFilename = GetFileNameRoot(InputFilename);
switch (FileType) {
- default: assert(0 && "Unknown file type");
case TargetMachine::CGFT_AssemblyFile:
if (TargetName[0] == 'c') {
if (TargetName[1] == 0)
@@ -194,7 +328,6 @@ static tool_output_file *GetOutputStream(const char *TargetName,
// Decide if we need "binary" output.
bool Binary = false;
switch (FileType) {
- default: assert(0 && "Unknown file type");
case TargetMachine::CGFT_AssemblyFile:
break;
case TargetMachine::CGFT_ObjectFile:
@@ -247,7 +380,7 @@ int main(int argc, char **argv) {
M.reset(ParseIRFile(InputFilename, Err, Context));
if (M.get() == 0) {
- Err.Print(argv[0], errs());
+ Err.print(argv[0], errs());
return 1;
}
Module &mod = *M.get();
@@ -258,7 +391,7 @@ int main(int argc, char **argv) {
Triple TheTriple(mod.getTargetTriple());
if (TheTriple.getTriple().empty())
- TheTriple.setTriple(sys::getHostTriple());
+ TheTriple.setTriple(sys::getDefaultTargetTriple());
// Allocate target machine. First, check whether the user has explicitly
// specified an architecture to compile for. If so we have to look it up by
@@ -303,10 +436,49 @@ int main(int argc, char **argv) {
FeaturesStr = Features.getString();
}
+ CodeGenOpt::Level OLvl = CodeGenOpt::Default;
+ switch (OptLevel) {
+ default:
+ errs() << argv[0] << ": invalid optimization level.\n";
+ return 1;
+ case ' ': break;
+ case '0': OLvl = CodeGenOpt::None; break;
+ case '1': OLvl = CodeGenOpt::Less; break;
+ case '2': OLvl = CodeGenOpt::Default; break;
+ case '3': OLvl = CodeGenOpt::Aggressive; break;
+ }
+
+ TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
+ Options.PrintMachineCode = PrintCode;
+ Options.NoFramePointerElim = DisableFPElim;
+ Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
+ Options.NoExcessFPPrecision = DisableExcessPrecision;
+ Options.UnsafeFPMath = EnableUnsafeFPMath;
+ Options.NoInfsFPMath = EnableNoInfsFPMath;
+ Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.HonorSignDependentRoundingFPMathOption =
+ EnableHonorSignDependentRoundingFPMath;
+ Options.UseSoftFloat = GenerateSoftFloatCalls;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ Options.NoZerosInBSS = DontPlaceZerosInBSS;
+ Options.JITExceptionHandling = EnableJITExceptionHandling;
+ Options.JITEmitDebugInfo = EmitJitDebugInfo;
+ Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+ Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+ Options.DisableTailCalls = DisableTailCalls;
+ Options.StackAlignmentOverride = OverrideStackAlignment;
+ Options.RealignStack = EnableRealignStack;
+ Options.DisableJumpTables = DisableSwitchTables;
+ Options.TrapFuncName = TrapFuncName;
+ Options.PositionIndependentExecutable = EnablePIE;
+ Options.EnableSegmentedStacks = SegmentedStacks;
+
std::auto_ptr<TargetMachine>
target(TheTarget->createTargetMachine(TheTriple.getTriple(),
- MCPU, FeaturesStr,
- RelocModel, CMModel));
+ MCPU, FeaturesStr, Options,
+ RelocModel, CMModel, OLvl));
assert(target.get() && "Could not allocate target machine!");
TargetMachine &Target = *target.get();
@@ -316,6 +488,12 @@ int main(int argc, char **argv) {
if (DisableCFI)
Target.setMCUseCFI(false);
+ if (EnableDwarfDirectory)
+ Target.setMCUseDwarfDirectory(true);
+
+ if (GenerateSoftFloatCalls)
+ FloatABIForCalls = FloatABI::Soft;
+
// Disable .loc support for older OS X versions.
if (TheTriple.isMacOSX() &&
TheTriple.isMacOSXVersionLT(10, 6))
@@ -326,18 +504,6 @@ int main(int argc, char **argv) {
(GetOutputStream(TheTarget->getName(), TheTriple.getOS(), argv[0]));
if (!Out) return 1;
- CodeGenOpt::Level OLvl = CodeGenOpt::Default;
- switch (OptLevel) {
- default:
- errs() << argv[0] << ": invalid optimization level.\n";
- return 1;
- case ' ': break;
- case '0': OLvl = CodeGenOpt::None; break;
- case '1': OLvl = CodeGenOpt::Less; break;
- case '2': OLvl = CodeGenOpt::Default; break;
- case '3': OLvl = CodeGenOpt::Aggressive; break;
- }
-
// Build up all of the passes that we want to do to the module.
PassManager PM;
@@ -362,7 +528,7 @@ int main(int argc, char **argv) {
formatted_raw_ostream FOS(Out->os());
// Ask the target to add backend passes as necessary.
- if (Target.addPassesToEmitFile(PM, FOS, FileType, OLvl, NoVerify)) {
+ if (Target.addPassesToEmitFile(PM, FOS, FileType, NoVerify)) {
errs() << argv[0] << ": target does not support generation of this"
<< " file type!\n";
return 1;
diff --git a/contrib/llvm/tools/lli/CMakeLists.txt b/contrib/llvm/tools/lli/CMakeLists.txt
deleted file mode 100644
index 9378ef2..0000000
--- a/contrib/llvm/tools/lli/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS mcjit jit interpreter nativecodegen bitreader asmparser selectiondag)
-
-add_llvm_tool(lli
- lli.cpp
- )
diff --git a/contrib/llvm/tools/lli/Makefile b/contrib/llvm/tools/lli/Makefile
deleted file mode 100644
index 80aa82b..0000000
--- a/contrib/llvm/tools/lli/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/lli/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL := ../..
-TOOLNAME := lli
-LINK_COMPONENTS := mcjit jit interpreter nativecodegen bitreader asmparser selectiondag
-
-# Enable JIT support
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/lli/lli.cpp b/contrib/llvm/tools/lli/lli.cpp
index 50c7a49..efcc1f5 100644
--- a/contrib/llvm/tools/lli/lli.cpp
+++ b/contrib/llvm/tools/lli/lli.cpp
@@ -23,6 +23,7 @@
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/IRReader.h"
@@ -94,12 +95,12 @@ namespace {
"of the executable"),
cl::value_desc("function"),
cl::init("main"));
-
+
cl::opt<std::string>
FakeArgv0("fake-argv0",
cl::desc("Override the 'argv[0]' value passed into the executing"
" program"), cl::value_desc("executable"));
-
+
cl::opt<bool>
DisableCoreFiles("disable-core-files", cl::Hidden,
cl::desc("Disable emission of core files if possible"));
@@ -158,7 +159,7 @@ static void do_shutdown() {
int main(int argc, char **argv, char * const *envp) {
sys::PrintStackTraceOnErrorSignal();
PrettyStackTraceProgram X(argc, argv);
-
+
LLVMContext &Context = getGlobalContext();
atexit(do_shutdown); // Call llvm_shutdown() on exit.
@@ -173,12 +174,12 @@ int main(int argc, char **argv, char * const *envp) {
// If the user doesn't want core files, disable them.
if (DisableCoreFiles)
sys::Process::PreventCoreFiles();
-
+
// Load the bitcode...
SMDiagnostic Err;
Module *Mod = ParseIRFile(InputFile, Err, Context);
if (!Mod) {
- Err.Print(argv[0], errs());
+ Err.print(argv[0], errs());
return 1;
}
@@ -199,6 +200,8 @@ int main(int argc, char **argv, char * const *envp) {
builder.setRelocationModel(RelocModel);
builder.setCodeModel(CMModel);
builder.setErrorStr(&ErrorMsg);
+ builder.setJITMemoryManager(ForceInterpreter ? 0 :
+ JITMemoryManager::CreateDefaultMemManager());
builder.setEngineKind(ForceInterpreter
? EngineKind::Interpreter
: EngineKind::JIT);
@@ -207,9 +210,11 @@ int main(int argc, char **argv, char * const *envp) {
if (!TargetTriple.empty())
Mod->setTargetTriple(Triple::normalize(TargetTriple));
- // Enable MCJIT, if desired.
- if (UseMCJIT)
+ // Enable MCJIT if desired.
+ if (UseMCJIT && !ForceInterpreter) {
builder.setUseMCJIT(true);
+ builder.setJITMemoryManager(JITMemoryManager::CreateDefaultMemManager());
+ }
CodeGenOpt::Level OLvl = CodeGenOpt::Default;
switch (OptLevel) {
@@ -233,7 +238,12 @@ int main(int argc, char **argv, char * const *envp) {
exit(1);
}
- EE->RegisterJITEventListener(createOProfileJITEventListener());
+ // The following functions have no effect if their respective profiling
+ // support wasn't enabled in the build configuration.
+ EE->RegisterJITEventListener(
+ JITEventListener::createOProfileJITEventListener());
+ EE->RegisterJITEventListener(
+ JITEventListener::createIntelJITEventListener());
EE->DisableLazyCompilation(NoLazyCompilation);
@@ -262,15 +272,15 @@ int main(int argc, char **argv, char * const *envp) {
return -1;
}
- // If the program doesn't explicitly call exit, we will need the Exit
- // function later on to make an explicit call, so get the function now.
+ // If the program doesn't explicitly call exit, we will need the Exit
+ // function later on to make an explicit call, so get the function now.
Constant *Exit = Mod->getOrInsertFunction("exit", Type::getVoidTy(Context),
Type::getInt32Ty(Context),
NULL);
-
+
// Reset errno to zero on entry to main.
errno = 0;
-
+
// Run static constructors.
EE->runStaticConstructorsDestructors(false);
@@ -287,8 +297,8 @@ int main(int argc, char **argv, char * const *envp) {
// Run static destructors.
EE->runStaticConstructorsDestructors(true);
-
- // If the program didn't call exit explicitly, we should call it now.
+
+ // If the program didn't call exit explicitly, we should call it now.
// This ensures that any atexit handlers get called correctly.
if (Function *ExitF = dyn_cast<Function>(Exit)) {
std::vector<GenericValue> Args;
diff --git a/contrib/llvm/tools/llvm-ar/CMakeLists.txt b/contrib/llvm/tools/llvm-ar/CMakeLists.txt
deleted file mode 100644
index c8b0b72..0000000
--- a/contrib/llvm/tools/llvm-ar/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-set(LLVM_LINK_COMPONENTS archive)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-ar
- llvm-ar.cpp
- )
-
-# TODO: Support check-local.
diff --git a/contrib/llvm/tools/llvm-ar/Makefile b/contrib/llvm/tools/llvm-ar/Makefile
deleted file mode 100644
index e4fe4e8..0000000
--- a/contrib/llvm/tools/llvm-ar/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-##===- tools/llvm-ar/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-ar
-LINK_COMPONENTS = archive
-REQUIRES_EH := 1
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
-
-check-local::
- $(Echo) Checking llvm-ar
- $(Verb) $(ToolDir)/llvm-ar zRrS nada.a .
- $(Verb) $(ToolDir)/llvm-ar tv nada.a | \
- grep Debug/llvm-ar.d >/dev/null 2>&1
- $(Verb) $(RM) -f nada.a
diff --git a/contrib/llvm/tools/llvm-as/CMakeLists.txt b/contrib/llvm/tools/llvm-as/CMakeLists.txt
deleted file mode 100644
index eef4a13..0000000
--- a/contrib/llvm/tools/llvm-as/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS asmparser bitwriter)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-as
- llvm-as.cpp
- )
diff --git a/contrib/llvm/tools/llvm-as/Makefile b/contrib/llvm/tools/llvm-as/Makefile
deleted file mode 100644
index e1e5853..0000000
--- a/contrib/llvm/tools/llvm-as/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-as/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-as
-LINK_COMPONENTS := asmparser bitwriter
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-as/llvm-as.cpp b/contrib/llvm/tools/llvm-as/llvm-as.cpp
index c1661cd..1def9a4 100644
--- a/contrib/llvm/tools/llvm-as/llvm-as.cpp
+++ b/contrib/llvm/tools/llvm-as/llvm-as.cpp
@@ -96,7 +96,7 @@ int main(int argc, char **argv) {
SMDiagnostic Err;
std::auto_ptr<Module> M(ParseAssemblyFile(InputFilename, Err, Context));
if (M.get() == 0) {
- Err.Print(argv[0], errs());
+ Err.print(argv[0], errs());
return 1;
}
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/CMakeLists.txt b/contrib/llvm/tools/llvm-bcanalyzer/CMakeLists.txt
deleted file mode 100644
index 732bc32..0000000
--- a/contrib/llvm/tools/llvm-bcanalyzer/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS bitreader)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-bcanalyzer
- llvm-bcanalyzer.cpp
- )
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/Makefile b/contrib/llvm/tools/llvm-bcanalyzer/Makefile
deleted file mode 100644
index 488387d..0000000
--- a/contrib/llvm/tools/llvm-bcanalyzer/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-bcanalyzer/Makefile ----------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-bcanalyzer
-LINK_COMPONENTS := bitreader
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
index 4ada64a..d630087 100644
--- a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
+++ b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
@@ -102,14 +102,13 @@ static const char *GetBlockName(unsigned BlockID,
default: return 0;
case bitc::MODULE_BLOCK_ID: return "MODULE_BLOCK";
case bitc::PARAMATTR_BLOCK_ID: return "PARAMATTR_BLOCK";
- case bitc::TYPE_BLOCK_ID_OLD: return "TYPE_BLOCK_ID_OLD";
case bitc::TYPE_BLOCK_ID_NEW: return "TYPE_BLOCK_ID";
case bitc::CONSTANTS_BLOCK_ID: return "CONSTANTS_BLOCK";
case bitc::FUNCTION_BLOCK_ID: return "FUNCTION_BLOCK";
- case bitc::TYPE_SYMTAB_BLOCK_ID_OLD: return "TYPE_SYMTAB_OLD";
case bitc::VALUE_SYMTAB_BLOCK_ID: return "VALUE_SYMTAB";
case bitc::METADATA_BLOCK_ID: return "METADATA_BLOCK";
case bitc::METADATA_ATTACHMENT_ID: return "METADATA_ATTACHMENT_BLOCK";
+ case bitc::USELIST_BLOCK_ID: return "USELIST_BLOCK_ID";
}
}
@@ -163,7 +162,6 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
default: return 0;
case bitc::PARAMATTR_CODE_ENTRY: return "ENTRY";
}
- case bitc::TYPE_BLOCK_ID_OLD:
case bitc::TYPE_BLOCK_ID_NEW:
switch (CodeID) {
default: return 0;
@@ -175,8 +173,6 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::TYPE_CODE_OPAQUE: return "OPAQUE";
case bitc::TYPE_CODE_INTEGER: return "INTEGER";
case bitc::TYPE_CODE_POINTER: return "POINTER";
- case bitc::TYPE_CODE_FUNCTION: return "FUNCTION";
- case bitc::TYPE_CODE_STRUCT_OLD: return "STRUCT_OLD";
case bitc::TYPE_CODE_ARRAY: return "ARRAY";
case bitc::TYPE_CODE_VECTOR: return "VECTOR";
case bitc::TYPE_CODE_X86_FP80: return "X86_FP80";
@@ -186,6 +182,7 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::TYPE_CODE_STRUCT_ANON: return "STRUCT_ANON";
case bitc::TYPE_CODE_STRUCT_NAME: return "STRUCT_NAME";
case bitc::TYPE_CODE_STRUCT_NAMED: return "STRUCT_NAMED";
+ case bitc::TYPE_CODE_FUNCTION: return "FUNCTION";
}
case bitc::CONSTANTS_BLOCK_ID:
@@ -211,6 +208,8 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::CST_CODE_CE_CMP: return "CE_CMP";
case bitc::CST_CODE_INLINEASM: return "INLINEASM";
case bitc::CST_CODE_CE_SHUFVEC_EX: return "CE_SHUFVEC_EX";
+ case bitc::CST_CODE_BLOCKADDRESS: return "CST_CODE_BLOCKADDRESS";
+ case bitc::CST_CODE_DATA: return "DATA";
}
case bitc::FUNCTION_BLOCK_ID:
switch (CodeID) {
@@ -231,7 +230,6 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::FUNC_CODE_INST_BR: return "INST_BR";
case bitc::FUNC_CODE_INST_SWITCH: return "INST_SWITCH";
case bitc::FUNC_CODE_INST_INVOKE: return "INST_INVOKE";
- case bitc::FUNC_CODE_INST_UNWIND: return "INST_UNWIND";
case bitc::FUNC_CODE_INST_UNREACHABLE: return "INST_UNREACHABLE";
case bitc::FUNC_CODE_INST_PHI: return "INST_PHI";
@@ -247,11 +245,6 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::FUNC_CODE_INST_CALL: return "INST_CALL";
case bitc::FUNC_CODE_DEBUG_LOC: return "DEBUG_LOC";
}
- case bitc::TYPE_SYMTAB_BLOCK_ID_OLD:
- switch (CodeID) {
- default: return 0;
- case bitc::TST_CODE_ENTRY: return "ENTRY";
- }
case bitc::VALUE_SYMTAB_BLOCK_ID:
switch (CodeID) {
default: return 0;
@@ -273,6 +266,11 @@ static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
case bitc::METADATA_FN_NODE: return "METADATA_FN_NODE";
case bitc::METADATA_NAMED_NODE: return "METADATA_NAMED_NODE";
}
+ case bitc::USELIST_BLOCK_ID:
+ switch(CodeID) {
+ default:return 0;
+ case bitc::USELIST_CODE_ENTRY: return "USELIST_CODE_ENTRY";
+ }
}
}
@@ -333,7 +331,7 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
// BLOCKINFO is a special part of the stream.
if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
- if (Dump) errs() << Indent << "<BLOCKINFO_BLOCK/>\n";
+ if (Dump) outs() << Indent << "<BLOCKINFO_BLOCK/>\n";
if (Stream.ReadBlockInfoBlock())
return Error("Malformed BlockInfoBlock");
uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
@@ -347,16 +345,16 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
const char *BlockName = 0;
if (Dump) {
- errs() << Indent << "<";
+ outs() << Indent << "<";
if ((BlockName = GetBlockName(BlockID, *Stream.getBitStreamReader())))
- errs() << BlockName;
+ outs() << BlockName;
else
- errs() << "UnknownBlock" << BlockID;
+ outs() << "UnknownBlock" << BlockID;
if (NonSymbolic && BlockName)
- errs() << " BlockID=" << BlockID;
+ outs() << " BlockID=" << BlockID;
- errs() << " NumWords=" << NumWords
+ outs() << " NumWords=" << NumWords
<< " BlockCodeSize=" << Stream.GetAbbrevIDWidth() << ">\n";
}
@@ -378,11 +376,11 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
BlockStats.NumBits += BlockBitEnd-BlockBitStart;
if (Dump) {
- errs() << Indent << "</";
+ outs() << Indent << "</";
if (BlockName)
- errs() << BlockName << ">\n";
+ outs() << BlockName << ">\n";
else
- errs() << "UnknownBlock" << BlockID << ">\n";
+ outs() << "UnknownBlock" << BlockID << ">\n";
}
return false;
}
@@ -424,25 +422,25 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
BlockStats.CodeFreq[Code].NumAbbrev++;
if (Dump) {
- errs() << Indent << " <";
+ outs() << Indent << " <";
if (const char *CodeName =
GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
- errs() << CodeName;
+ outs() << CodeName;
else
- errs() << "UnknownCode" << Code;
+ outs() << "UnknownCode" << Code;
if (NonSymbolic &&
GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
- errs() << " codeid=" << Code;
+ outs() << " codeid=" << Code;
if (AbbrevID != bitc::UNABBREV_RECORD)
- errs() << " abbrevid=" << AbbrevID;
+ outs() << " abbrevid=" << AbbrevID;
for (unsigned i = 0, e = Record.size(); i != e; ++i)
- errs() << " op" << i << "=" << (int64_t)Record[i];
+ outs() << " op" << i << "=" << (int64_t)Record[i];
- errs() << "/>";
+ outs() << "/>";
if (BlobStart) {
- errs() << " blob data = ";
+ outs() << " blob data = ";
bool BlobIsPrintable = true;
for (unsigned i = 0; i != BlobLen; ++i)
if (!isprint(BlobStart[i])) {
@@ -451,12 +449,12 @@ static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
}
if (BlobIsPrintable)
- errs() << "'" << std::string(BlobStart, BlobStart+BlobLen) <<"'";
+ outs() << "'" << std::string(BlobStart, BlobStart+BlobLen) <<"'";
else
- errs() << "unprintable, " << BlobLen << " bytes.";
+ outs() << "unprintable, " << BlobLen << " bytes.";
}
- errs() << "\n";
+ outs() << "\n";
}
break;
@@ -485,13 +483,13 @@ static int AnalyzeBitcode() {
if (MemBuf->getBufferSize() & 3)
return Error("Bitcode stream should be a multiple of 4 bytes in length");
- unsigned char *BufPtr = (unsigned char *)MemBuf->getBufferStart();
- unsigned char *EndBufPtr = BufPtr+MemBuf->getBufferSize();
+ const unsigned char *BufPtr = (unsigned char *)MemBuf->getBufferStart();
+ const unsigned char *EndBufPtr = BufPtr+MemBuf->getBufferSize();
// If we have a wrapper header, parse it and ignore the non-bc file contents.
// The magic number is 0x0B17C0DE stored in little endian.
if (isBitcodeWrapper(BufPtr, EndBufPtr))
- if (SkipBitcodeWrapperHeader(BufPtr, EndBufPtr))
+ if (SkipBitcodeWrapperHeader(BufPtr, EndBufPtr, true))
return Error("Invalid bitcode wrapper header");
BitstreamReader StreamFile(BufPtr, EndBufPtr);
@@ -527,59 +525,58 @@ static int AnalyzeBitcode() {
++NumTopBlocks;
}
- if (Dump) errs() << "\n\n";
+ if (Dump) outs() << "\n\n";
uint64_t BufferSizeBits = (EndBufPtr-BufPtr)*CHAR_BIT;
// Print a summary of the read file.
- errs() << "Summary of " << InputFilename << ":\n";
- errs() << " Total size: ";
+ outs() << "Summary of " << InputFilename << ":\n";
+ outs() << " Total size: ";
PrintSize(BufferSizeBits);
- errs() << "\n";
- errs() << " Stream type: ";
+ outs() << "\n";
+ outs() << " Stream type: ";
switch (CurStreamType) {
- default: assert(0 && "Unknown bitstream type");
- case UnknownBitstream: errs() << "unknown\n"; break;
- case LLVMIRBitstream: errs() << "LLVM IR\n"; break;
+ case UnknownBitstream: outs() << "unknown\n"; break;
+ case LLVMIRBitstream: outs() << "LLVM IR\n"; break;
}
- errs() << " # Toplevel Blocks: " << NumTopBlocks << "\n";
- errs() << "\n";
+ outs() << " # Toplevel Blocks: " << NumTopBlocks << "\n";
+ outs() << "\n";
// Emit per-block stats.
- errs() << "Per-block Summary:\n";
+ outs() << "Per-block Summary:\n";
for (std::map<unsigned, PerBlockIDStats>::iterator I = BlockIDStats.begin(),
E = BlockIDStats.end(); I != E; ++I) {
- errs() << " Block ID #" << I->first;
+ outs() << " Block ID #" << I->first;
if (const char *BlockName = GetBlockName(I->first, StreamFile))
- errs() << " (" << BlockName << ")";
- errs() << ":\n";
+ outs() << " (" << BlockName << ")";
+ outs() << ":\n";
const PerBlockIDStats &Stats = I->second;
- errs() << " Num Instances: " << Stats.NumInstances << "\n";
- errs() << " Total Size: ";
+ outs() << " Num Instances: " << Stats.NumInstances << "\n";
+ outs() << " Total Size: ";
PrintSize(Stats.NumBits);
- errs() << "\n";
+ outs() << "\n";
double pct = (Stats.NumBits * 100.0) / BufferSizeBits;
errs() << " Percent of file: " << format("%2.4f%%", pct) << "\n";
if (Stats.NumInstances > 1) {
- errs() << " Average Size: ";
+ outs() << " Average Size: ";
PrintSize(Stats.NumBits/(double)Stats.NumInstances);
- errs() << "\n";
- errs() << " Tot/Avg SubBlocks: " << Stats.NumSubBlocks << "/"
+ outs() << "\n";
+ outs() << " Tot/Avg SubBlocks: " << Stats.NumSubBlocks << "/"
<< Stats.NumSubBlocks/(double)Stats.NumInstances << "\n";
- errs() << " Tot/Avg Abbrevs: " << Stats.NumAbbrevs << "/"
+ outs() << " Tot/Avg Abbrevs: " << Stats.NumAbbrevs << "/"
<< Stats.NumAbbrevs/(double)Stats.NumInstances << "\n";
- errs() << " Tot/Avg Records: " << Stats.NumRecords << "/"
+ outs() << " Tot/Avg Records: " << Stats.NumRecords << "/"
<< Stats.NumRecords/(double)Stats.NumInstances << "\n";
} else {
- errs() << " Num SubBlocks: " << Stats.NumSubBlocks << "\n";
- errs() << " Num Abbrevs: " << Stats.NumAbbrevs << "\n";
- errs() << " Num Records: " << Stats.NumRecords << "\n";
+ outs() << " Num SubBlocks: " << Stats.NumSubBlocks << "\n";
+ outs() << " Num Abbrevs: " << Stats.NumAbbrevs << "\n";
+ outs() << " Num Records: " << Stats.NumRecords << "\n";
}
if (Stats.NumRecords) {
double pct = (Stats.NumAbbreviatedRecords * 100.0) / Stats.NumRecords;
- errs() << " Percent Abbrevs: " << format("%2.4f%%", pct) << "\n";
+ outs() << " Percent Abbrevs: " << format("%2.4f%%", pct) << "\n";
}
- errs() << "\n";
+ outs() << "\n";
// Print a histogram of the codes we see.
if (!NoHistogram && !Stats.CodeFreq.empty()) {
@@ -590,7 +587,7 @@ static int AnalyzeBitcode() {
std::stable_sort(FreqPairs.begin(), FreqPairs.end());
std::reverse(FreqPairs.begin(), FreqPairs.end());
- errs() << "\tRecord Histogram:\n";
+ outs() << "\tRecord Histogram:\n";
fprintf(stderr, "\t\t Count # Bits %% Abv Record Kind\n");
for (unsigned i = 0, e = FreqPairs.size(); i != e; ++i) {
const PerRecordStats &RecStats = Stats.CodeFreq[FreqPairs[i].second];
@@ -610,7 +607,7 @@ static int AnalyzeBitcode() {
else
fprintf(stderr, "UnknownCode%d\n", FreqPairs[i].second);
}
- errs() << "\n";
+ outs() << "\n";
}
}
diff --git a/contrib/llvm/tools/llvm-diff/CMakeLists.txt b/contrib/llvm/tools/llvm-diff/CMakeLists.txt
deleted file mode 100644
index c59d69e..0000000
--- a/contrib/llvm/tools/llvm-diff/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-set(LLVM_LINK_COMPONENTS support asmparser bitreader)
-
-add_llvm_tool(llvm-diff
- llvm-diff.cpp
- DiffConsumer.cpp
- DiffLog.cpp
- DifferenceEngine.cpp
- )
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
index c23e8fb..0528039 100644
--- a/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
@@ -44,6 +44,8 @@ static void ComputeNumbering(Function *F, DenseMap<Value*,unsigned> &Numbering){
}
+void Consumer::anchor() { }
+
void DiffConsumer::printValue(Value *V, bool isL) {
if (V->hasName()) {
out << (isa<GlobalValue>(V) ? '@' : '%') << V->getName();
@@ -64,6 +66,10 @@ void DiffConsumer::printValue(Value *V, bool isL) {
}
return;
}
+ if (isa<Constant>(V)) {
+ out << *V;
+ return;
+ }
unsigned N = contexts.size();
while (N > 0) {
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.h b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
index b95d427..2060fe1 100644
--- a/contrib/llvm/tools/llvm-diff/DiffConsumer.h
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
@@ -29,6 +29,7 @@ namespace llvm {
/// The interface for consumers of difference data.
class Consumer {
+ virtual void anchor();
public:
/// Record that a local context has been entered. Left and
/// Right are IR "containers" of some sort which are being
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
index b240d8c..a5a99f5 100644
--- a/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
@@ -319,15 +319,19 @@ class FunctionDifferenceEngine {
bool Difference = false;
DenseMap<ConstantInt*,BasicBlock*> LCases;
- for (unsigned I = 1, E = LI->getNumCases(); I != E; ++I)
- LCases[LI->getCaseValue(I)] = LI->getSuccessor(I);
- for (unsigned I = 1, E = RI->getNumCases(); I != E; ++I) {
- ConstantInt *CaseValue = RI->getCaseValue(I);
+
+ for (SwitchInst::CaseIt I = LI->case_begin(), E = LI->case_end();
+ I != E; ++I)
+ LCases[I.getCaseValue()] = I.getCaseSuccessor();
+
+ for (SwitchInst::CaseIt I = RI->case_begin(), E = RI->case_end();
+ I != E; ++I) {
+ ConstantInt *CaseValue = I.getCaseValue();
BasicBlock *LCase = LCases[CaseValue];
if (LCase) {
- if (TryUnify) tryUnify(LCase, RI->getSuccessor(I));
+ if (TryUnify) tryUnify(LCase, I.getCaseSuccessor());
LCases.erase(CaseValue);
- } else if (!Difference) {
+ } else if (Complain || !Difference) {
if (Complain)
Engine.logf("right switch has extra case %r") << CaseValue;
Difference = true;
@@ -628,6 +632,8 @@ void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart,
}
+void DifferenceEngine::Oracle::anchor() { }
+
void DifferenceEngine::diff(Function *L, Function *R) {
Context C(*this, L, R);
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.h b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
index 5b4f80b..7ea79e4 100644
--- a/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
@@ -50,7 +50,9 @@ namespace llvm {
/// An oracle for answering whether two values are equivalent as
/// operands.
- struct Oracle {
+ class Oracle {
+ virtual void anchor();
+ public:
virtual bool operator()(Value *L, Value *R) = 0;
protected:
diff --git a/contrib/llvm/tools/llvm-diff/Makefile b/contrib/llvm/tools/llvm-diff/Makefile
deleted file mode 100644
index 58e49fa..0000000
--- a/contrib/llvm/tools/llvm-diff/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-diff/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-diff
-LINK_COMPONENTS := asmparser bitreader
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-diff/llvm-diff.cpp b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
index 76853f1..774169b 100644
--- a/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
+++ b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
@@ -38,7 +38,7 @@ static Module *ReadModule(LLVMContext &Context, StringRef Name) {
SMDiagnostic Diag;
Module *M = ParseIRFile(Name, Diag, Context);
if (!M)
- Diag.Print("llvmdiff", errs());
+ Diag.print("llvm-diff", errs());
return M;
}
diff --git a/contrib/llvm/tools/llvm-dis/CMakeLists.txt b/contrib/llvm/tools/llvm-dis/CMakeLists.txt
deleted file mode 100644
index 3125f8a..0000000
--- a/contrib/llvm/tools/llvm-dis/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS bitreader analysis)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-dis
- llvm-dis.cpp
- )
diff --git a/contrib/llvm/tools/llvm-dis/Makefile b/contrib/llvm/tools/llvm-dis/Makefile
deleted file mode 100644
index be71100..0000000
--- a/contrib/llvm/tools/llvm-dis/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-dis/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-dis
-LINK_COMPONENTS := bitreader analysis
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-dis/llvm-dis.cpp b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
index 9020a52..6450ea6 100644
--- a/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
+++ b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
@@ -24,6 +24,7 @@
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Assembly/AssemblyAnnotationWriter.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DataStream.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -126,12 +127,19 @@ int main(int argc, char **argv) {
std::string ErrorMessage;
std::auto_ptr<Module> M;
- {
- OwningPtr<MemoryBuffer> BufferPtr;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename, BufferPtr))
- ErrorMessage = ec.message();
+ // Use the bitcode streaming interface
+ DataStreamer *streamer = getDataFileStreamer(InputFilename, &ErrorMessage);
+ if (streamer) {
+ std::string DisplayFilename;
+ if (InputFilename == "-")
+ DisplayFilename = "<stdin>";
else
- M.reset(ParseBitcodeFile(BufferPtr.get(), Context, &ErrorMessage));
+ DisplayFilename = InputFilename;
+ M.reset(getStreamedBitcodeModule(DisplayFilename, streamer, Context,
+ &ErrorMessage));
+ if(M.get() != 0 && M->MaterializeAllPermanently(&ErrorMessage)) {
+ M.reset();
+ }
}
if (M.get() == 0) {
@@ -183,4 +191,3 @@ int main(int argc, char **argv) {
return 0;
}
-
diff --git a/contrib/llvm/tools/llvm-extract/CMakeLists.txt b/contrib/llvm/tools/llvm-extract/CMakeLists.txt
deleted file mode 100644
index a4e3266..0000000
--- a/contrib/llvm/tools/llvm-extract/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS asmparser ipo bitreader bitwriter)
-
-add_llvm_tool(llvm-extract
- llvm-extract.cpp
- )
diff --git a/contrib/llvm/tools/llvm-extract/Makefile b/contrib/llvm/tools/llvm-extract/Makefile
deleted file mode 100644
index 5672aa3..0000000
--- a/contrib/llvm/tools/llvm-extract/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- tools/llvm-extract/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-
-TOOLNAME = llvm-extract
-LINK_COMPONENTS := ipo bitreader bitwriter asmparser
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
index f6227ee..2ed11c5 100644
--- a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
+++ b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -90,7 +90,7 @@ int main(int argc, char **argv) {
M.reset(getLazyIRFileModule(InputFilename, Err, Context));
if (M.get() == 0) {
- Err.Print(argv[0], errs());
+ Err.print(argv[0], errs());
return 1;
}
@@ -99,7 +99,7 @@ int main(int argc, char **argv) {
// Figure out which globals we should extract.
for (size_t i = 0, e = ExtractGlobals.size(); i != e; ++i) {
- GlobalValue *GV = M.get()->getNamedGlobal(ExtractGlobals[i]);
+ GlobalValue *GV = M->getNamedGlobal(ExtractGlobals[i]);
if (!GV) {
errs() << argv[0] << ": program doesn't contain global named '"
<< ExtractGlobals[i] << "'!\n";
@@ -117,8 +117,8 @@ int main(int argc, char **argv) {
"invalid regex: " << Error;
}
bool match = false;
- for (Module::global_iterator GV = M.get()->global_begin(),
- E = M.get()->global_end(); GV != E; GV++) {
+ for (Module::global_iterator GV = M->global_begin(),
+ E = M->global_end(); GV != E; GV++) {
if (RegEx.match(GV->getName())) {
GVs.insert(&*GV);
match = true;
@@ -133,7 +133,7 @@ int main(int argc, char **argv) {
// Figure out which functions we should extract.
for (size_t i = 0, e = ExtractFuncs.size(); i != e; ++i) {
- GlobalValue *GV = M.get()->getFunction(ExtractFuncs[i]);
+ GlobalValue *GV = M->getFunction(ExtractFuncs[i]);
if (!GV) {
errs() << argv[0] << ": program doesn't contain function named '"
<< ExtractFuncs[i] << "'!\n";
@@ -151,7 +151,7 @@ int main(int argc, char **argv) {
"invalid regex: " << Error;
}
bool match = false;
- for (Module::iterator F = M.get()->begin(), E = M.get()->end(); F != E;
+ for (Module::iterator F = M->begin(), E = M->end(); F != E;
F++) {
if (RegEx.match(F->getName())) {
GVs.insert(&*F);
diff --git a/contrib/llvm/tools/llvm-ld/CMakeLists.txt b/contrib/llvm/tools/llvm-ld/CMakeLists.txt
deleted file mode 100644
index 370bcb4..0000000
--- a/contrib/llvm/tools/llvm-ld/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-set(LLVM_LINK_COMPONENTS ipo scalaropts linker archive bitwriter)
-
-add_llvm_tool(llvm-ld
- Optimize.cpp
- llvm-ld.cpp
- )
-
-add_dependencies(llvm-ld llvm-stub)
diff --git a/contrib/llvm/tools/llvm-ld/Makefile b/contrib/llvm/tools/llvm-ld/Makefile
deleted file mode 100644
index 1ef9bf1..0000000
--- a/contrib/llvm/tools/llvm-ld/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/llvm-ld/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-
-TOOLNAME = llvm-ld
-LINK_COMPONENTS = ipo scalaropts linker archive bitwriter
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-ld/llvm-ld.cpp b/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
index 6b4c3c7..ecf0476 100644
--- a/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
+++ b/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
@@ -37,7 +37,6 @@
#include "llvm/Support/SystemUtils.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/Signals.h"
-#include "llvm/Config/config.h"
#include <memory>
#include <cstring>
using namespace llvm;
@@ -424,7 +423,7 @@ static void EmitShellScript(char **argv, Module *M) {
PrintAndExit(ErrMsg, M);
return;
-#endif
+#else
// Output the script to start the program...
std::string ErrorInfo;
@@ -470,6 +469,7 @@ static void EmitShellScript(char **argv, Module *M) {
}
Out2.os() << " " << BitcodeOutputFilename << " ${1+\"$@\"}\n";
Out2.keep();
+#endif
}
// BuildLinkItems -- This function generates a LinkItemList for the LinkItems
diff --git a/contrib/llvm/tools/llvm-link/CMakeLists.txt b/contrib/llvm/tools/llvm-link/CMakeLists.txt
deleted file mode 100644
index 11933f7..0000000
--- a/contrib/llvm/tools/llvm-link/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS linker bitreader bitwriter asmparser)
-
-add_llvm_tool(llvm-link
- llvm-link.cpp
- )
diff --git a/contrib/llvm/tools/llvm-link/Makefile b/contrib/llvm/tools/llvm-link/Makefile
deleted file mode 100644
index 2637018..0000000
--- a/contrib/llvm/tools/llvm-link/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-link/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-link
-LINK_COMPONENTS = linker bitreader bitwriter asmparser
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-link/llvm-link.cpp b/contrib/llvm/tools/llvm-link/llvm-link.cpp
index 95ad1ca..378a833 100644
--- a/contrib/llvm/tools/llvm-link/llvm-link.cpp
+++ b/contrib/llvm/tools/llvm-link/llvm-link.cpp
@@ -69,7 +69,7 @@ static inline std::auto_ptr<Module> LoadFile(const char *argv0,
Result = ParseIRFile(FNStr, Err, Context);
if (Result) return std::auto_ptr<Module>(Result); // Load successful!
- Err.Print(argv0, errs());
+ Err.print(argv0, errs());
return std::auto_ptr<Module>();
}
diff --git a/contrib/llvm/tools/llvm-mc/CMakeLists.txt b/contrib/llvm/tools/llvm-mc/CMakeLists.txt
deleted file mode 100644
index 805caf4..0000000
--- a/contrib/llvm/tools/llvm-mc/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} support MC MCParser MCDisassembler)
-
-add_llvm_tool(llvm-mc
- llvm-mc.cpp
- Disassembler.cpp
- )
diff --git a/contrib/llvm/tools/llvm-mc/Disassembler.cpp b/contrib/llvm/tools/llvm-mc/Disassembler.cpp
index a9381b5..a8cd7c1 100644
--- a/contrib/llvm/tools/llvm-mc/Disassembler.cpp
+++ b/contrib/llvm/tools/llvm-mc/Disassembler.cpp
@@ -21,6 +21,8 @@
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Triple.h"
@@ -72,14 +74,16 @@ static bool PrintInsts(const MCDisassembler &DisAsm,
switch (S) {
case MCDisassembler::Fail:
SM.PrintMessage(SMLoc::getFromPointer(Bytes[Index].second),
- "invalid instruction encoding", "warning");
+ SourceMgr::DK_Warning,
+ "invalid instruction encoding");
if (Size == 0)
Size = 1; // skip illegible bytes
break;
case MCDisassembler::SoftFail:
SM.PrintMessage(SMLoc::getFromPointer(Bytes[Index].second),
- "potentially undefined instruction encoding", "warning");
+ SourceMgr::DK_Warning,
+ "potentially undefined instruction encoding");
// Fall through
case MCDisassembler::Success:
@@ -125,8 +129,8 @@ static bool ByteArrayFromString(ByteArrayTy &ByteArray,
unsigned ByteVal;
if (Value.getAsInteger(0, ByteVal) || ByteVal > 255) {
// If we have an error, print it and skip to the end of line.
- SM.PrintMessage(SMLoc::getFromPointer(Value.data()),
- "invalid input token", "error");
+ SM.PrintMessage(SMLoc::getFromPointer(Value.data()), SourceMgr::DK_Error,
+ "invalid input token");
Str = Str.substr(Str.find('\n'));
ByteArray.clear();
continue;
@@ -153,21 +157,34 @@ int Disassembler::disassemble(const Target &T,
return -1;
}
- OwningPtr<const MCSubtargetInfo> STI(T.createMCSubtargetInfo(Triple, Cpu, FeaturesStr));
+ OwningPtr<const MCSubtargetInfo> STI(T.createMCSubtargetInfo(Triple, Cpu,
+ FeaturesStr));
if (!STI) {
errs() << "error: no subtarget info for target " << Triple << "\n";
return -1;
}
-
+
OwningPtr<const MCDisassembler> DisAsm(T.createMCDisassembler(*STI));
if (!DisAsm) {
errs() << "error: no disassembler for target " << Triple << "\n";
return -1;
}
+ OwningPtr<const MCRegisterInfo> MRI(T.createMCRegInfo(Triple));
+ if (!MRI) {
+ errs() << "error: no register info for target " << Triple << "\n";
+ return -1;
+ }
+
+ OwningPtr<const MCInstrInfo> MII(T.createMCInstrInfo());
+ if (!MII) {
+ errs() << "error: no instruction info for target " << Triple << "\n";
+ return -1;
+ }
+
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
- OwningPtr<MCInstPrinter> IP(T.createMCInstPrinter(AsmPrinterVariant,
- *AsmInfo, *STI));
+ OwningPtr<MCInstPrinter> IP(T.createMCInstPrinter(AsmPrinterVariant, *AsmInfo,
+ *MII, *MRI, *STI));
if (!IP) {
errs() << "error: no instruction printer for target " << Triple << '\n';
return -1;
@@ -247,7 +264,6 @@ int Disassembler::disassembleEnhanced(const std::string &TS,
break;
}
- EDDisassembler::initialize();
OwningPtr<EDDisassembler>
disassembler(EDDisassembler::getDisassembler(TS.c_str(), AS));
@@ -294,7 +310,6 @@ int Disassembler::disassembleEnhanced(const std::string &TS,
Out << operandIndex << "-";
switch (token->type()) {
- default: Out << "?"; break;
case EDToken::kTokenWhitespace: Out << "w"; break;
case EDToken::kTokenPunctuation: Out << "p"; break;
case EDToken::kTokenOpcode: Out << "o"; break;
@@ -365,4 +380,3 @@ int Disassembler::disassembleEnhanced(const std::string &TS,
return 0;
}
-
diff --git a/contrib/llvm/tools/llvm-mc/Makefile b/contrib/llvm/tools/llvm-mc/Makefile
deleted file mode 100644
index 934a6e4..0000000
--- a/contrib/llvm/tools/llvm-mc/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-##===- tools/llvm-mc/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-mc
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
-LINK_COMPONENTS := $(TARGETS_TO_BUILD) MCDisassembler MCParser MC support
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
-
diff --git a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
index 5fb3fdf..d882e01 100644
--- a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -70,9 +70,6 @@ RelaxAll("mc-relax-all", cl::desc("Relax all fixups"));
static cl::opt<bool>
NoExecStack("mc-no-exec-stack", cl::desc("File doesn't need an exec stack"));
-static cl::opt<bool>
-EnableLogging("enable-api-logging", cl::desc("Enable MC API logging"));
-
enum OutputFileType {
OFT_Null,
OFT_AssemblyFile,
@@ -152,6 +149,10 @@ NoInitialTextSection("n", cl::desc("Don't assume assembly file starts "
static cl::opt<bool>
SaveTempLabels("L", cl::desc("Don't discard temporary labels"));
+static cl::opt<bool>
+GenDwarfForAssembly("g", cl::desc("Generate dwarf debugging info for assembly "
+ "source files"));
+
enum ActionType {
AC_AsLex,
AC_Assemble,
@@ -175,7 +176,7 @@ Action(cl::desc("Action to perform:"),
static const Target *GetTarget(const char *ProgName) {
// Figure out the target triple.
if (TripleName.empty())
- TripleName = sys::getHostTriple();
+ TripleName = sys::getDefaultTargetTriple();
Triple TheTriple(Triple::normalize(TripleName));
const Target *TheTarget = 0;
@@ -230,6 +231,17 @@ static tool_output_file *GetOutputStream() {
return Out;
}
+static std::string DwarfDebugFlags;
+static void setDwarfDebugFlags(int argc, char **argv) {
+ if (!getenv("RC_DEBUG_OPTIONS"))
+ return;
+ for (int i = 0; i < argc; i++) {
+ DwarfDebugFlags += argv[i];
+ if (i + 1 < argc)
+ DwarfDebugFlags += " ";
+ }
+}
+
static int AsLexInput(const char *ProgName) {
OwningPtr<MemoryBuffer> BufferPtr;
if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename, BufferPtr)) {
@@ -267,7 +279,8 @@ static int AsLexInput(const char *ProgName) {
switch (Tok.getKind()) {
default:
- SrcMgr.PrintMessage(Lexer.getLoc(), "unknown token", "warning");
+ SrcMgr.PrintMessage(Lexer.getLoc(), SourceMgr::DK_Warning,
+ "unknown token");
Error = true;
break;
case AsmToken::Error:
@@ -370,12 +383,16 @@ static int AssembleInput(const char *ProgName) {
// FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
// MCObjectFileInfo needs a MCContext reference in order to initialize itself.
OwningPtr<MCObjectFileInfo> MOFI(new MCObjectFileInfo());
- MCContext Ctx(*MAI, *MRI, MOFI.get());
+ MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
MOFI->InitMCObjectFileInfo(TripleName, RelocModel, CMModel, Ctx);
if (SaveTempLabels)
Ctx.setAllowTemporaryLabels(false);
+ Ctx.setGenDwarfForAssembly(GenDwarfForAssembly);
+ if (!DwarfDebugFlags.empty())
+ Ctx.setDwarfDebugFlags(StringRef(DwarfDebugFlags));
+
// Package up features to be passed to target/subtarget
std::string FeaturesStr;
if (MAttrs.size()) {
@@ -399,7 +416,7 @@ static int AssembleInput(const char *ProgName) {
// FIXME: There is a bit of code duplication with addPassesToEmitFile.
if (FileType == OFT_AssemblyFile) {
MCInstPrinter *IP =
- TheTarget->createMCInstPrinter(OutputAsmVariant, *MAI, *STI);
+ TheTarget->createMCInstPrinter(OutputAsmVariant, *MAI, *MCII, *MRI, *STI);
MCCodeEmitter *CE = 0;
MCAsmBackend *MAB = 0;
if (ShowEncoding) {
@@ -408,8 +425,10 @@ static int AssembleInput(const char *ProgName) {
}
Str.reset(TheTarget->createAsmStreamer(Ctx, FOS, /*asmverbose*/true,
/*useLoc*/ true,
- /*useCFI*/ true, IP, CE, MAB,
- ShowInst));
+ /*useCFI*/ true,
+ /*useDwarfDirectory*/ true,
+ IP, CE, MAB, ShowInst));
+
} else if (FileType == OFT_Null) {
Str.reset(createNullStreamer(Ctx));
} else {
@@ -421,10 +440,6 @@ static int AssembleInput(const char *ProgName) {
NoExecStack));
}
- if (EnableLogging) {
- Str.reset(createLoggingStreamer(Str.take(), errs()));
- }
-
OwningPtr<MCAsmParser> Parser(createMCAsmParser(SrcMgr, Ctx,
*Str.get(), *MAI));
OwningPtr<MCTargetAsmParser> TAP(TheTarget->createMCAsmParser(*STI, *Parser));
@@ -497,11 +512,14 @@ int main(int argc, char **argv) {
llvm::InitializeAllAsmParsers();
llvm::InitializeAllDisassemblers();
+ // Register the target printer for --version.
+ cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion);
+
cl::ParseCommandLineOptions(argc, argv, "llvm machine code playground\n");
TripleName = Triple::normalize(TripleName);
+ setDwarfDebugFlags(argc, argv);
switch (Action) {
- default:
case AC_AsLex:
return AsLexInput(argv[0]);
case AC_Assemble:
@@ -511,7 +529,4 @@ int main(int argc, char **argv) {
case AC_EDisassemble:
return DisassembleInput(argv[0], true);
}
-
- return 0;
}
-
diff --git a/contrib/llvm/tools/llvm-nm/CMakeLists.txt b/contrib/llvm/tools/llvm-nm/CMakeLists.txt
deleted file mode 100644
index b6cd80b..0000000
--- a/contrib/llvm/tools/llvm-nm/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS archive bitreader object)
-
-add_llvm_tool(llvm-nm
- llvm-nm.cpp
- )
diff --git a/contrib/llvm/tools/llvm-nm/Makefile b/contrib/llvm/tools/llvm-nm/Makefile
deleted file mode 100644
index 6bb4cd4..0000000
--- a/contrib/llvm/tools/llvm-nm/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-nm/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-nm
-LINK_COMPONENTS = archive bitreader object
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
index e79d72d..8d9e51e 100644
--- a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -27,6 +27,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/Format.h"
@@ -60,6 +61,12 @@ namespace {
cl::alias UndefinedOnly2("u", cl::desc("Alias for --undefined-only"),
cl::aliasopt(UndefinedOnly));
+ cl::opt<bool> DynamicSyms("dynamic",
+ cl::desc("Display the dynamic symbols instead "
+ "of normal symbols."));
+ cl::alias DynamicSyms2("D", cl::desc("Alias for --dynamic"),
+ cl::aliasopt(DynamicSyms));
+
cl::opt<bool> DefinedOnly("defined-only",
cl::desc("Show only defined symbols"));
@@ -110,6 +117,19 @@ namespace {
std::string ToolName;
}
+
+static void error(Twine message, Twine path = Twine()) {
+ errs() << ToolName << ": " << path << ": " << message << ".\n";
+}
+
+static bool error(error_code ec, Twine path = Twine()) {
+ if (ec) {
+ error(ec.message(), path);
+ return true;
+ }
+ return false;
+}
+
namespace {
struct NMSymbol {
uint64_t Address;
@@ -144,14 +164,6 @@ namespace {
StringRef CurrentFilename;
typedef std::vector<NMSymbol> SymbolListT;
SymbolListT SymbolList;
-
- bool error(error_code ec) {
- if (!ec) return false;
-
- outs() << ToolName << ": error reading file: " << ec.message() << ".\n";
- outs().flush();
- return true;
- }
}
static void SortAndPrintSymbolList() {
@@ -192,9 +204,10 @@ static void SortAndPrintSymbolList() {
strcpy(SymbolSizeStr, " ");
if (i->Address != object::UnknownAddressOrSize)
- format("%08"PRIx64, i->Address).print(SymbolAddrStr, sizeof(SymbolAddrStr));
+ format("%08" PRIx64, i->Address).print(SymbolAddrStr,
+ sizeof(SymbolAddrStr));
if (i->Size != object::UnknownAddressOrSize)
- format("%08"PRIx64, i->Size).print(SymbolSizeStr, sizeof(SymbolSizeStr));
+ format("%08" PRIx64, i->Size).print(SymbolSizeStr, sizeof(SymbolSizeStr));
if (OutputFormat == posix) {
outs() << i->Name << " " << i->TypeChar << " "
@@ -271,13 +284,17 @@ static void DumpSymbolNamesFromModule(Module *M) {
static void DumpSymbolNamesFromObject(ObjectFile *obj) {
error_code ec;
- for (symbol_iterator i = obj->begin_symbols(),
- e = obj->end_symbols();
- i != e; i.increment(ec)) {
+ symbol_iterator ibegin = obj->begin_symbols();
+ symbol_iterator iend = obj->end_symbols();
+ if (DynamicSyms) {
+ ibegin = obj->begin_dynamic_symbols();
+ iend = obj->end_dynamic_symbols();
+ }
+ for (symbol_iterator i = ibegin; i != iend; i.increment(ec)) {
if (error(ec)) break;
- bool internal;
- if (error(i->isInternal(internal))) break;
- if (!DebugSyms && internal)
+ uint32_t symflags;
+ if (error(i->getFlags(symflags))) break;
+ if (!DebugSyms && (symflags & SymbolRef::SF_FormatSpecific))
continue;
NMSymbol s;
s.Size = object::UnknownAddressOrSize;
@@ -286,7 +303,7 @@ static void DumpSymbolNamesFromObject(ObjectFile *obj) {
if (error(i->getSize(s.Size))) break;
}
if (PrintAddress)
- if (error(i->getOffset(s.Address))) break;
+ if (error(i->getAddress(s.Address))) break;
if (error(i->getNMTypeChar(s.TypeChar))) break;
if (error(i->getName(s.Name))) break;
SymbolList.push_back(s);
@@ -297,38 +314,39 @@ static void DumpSymbolNamesFromObject(ObjectFile *obj) {
}
static void DumpSymbolNamesFromFile(std::string &Filename) {
+ if (Filename != "-" && !sys::fs::exists(Filename)) {
+ errs() << ToolName << ": '" << Filename << "': " << "No such file\n";
+ return;
+ }
+
+ OwningPtr<MemoryBuffer> Buffer;
+ if (error(MemoryBuffer::getFileOrSTDIN(Filename, Buffer), Filename))
+ return;
+
+ sys::fs::file_magic magic = sys::fs::identify_magic(Buffer->getBuffer());
+
LLVMContext &Context = getGlobalContext();
std::string ErrorMessage;
- sys::Path aPath(Filename);
- bool exists;
- if (sys::fs::exists(aPath.str(), exists) || !exists)
- errs() << ToolName << ": '" << Filename << "': " << "No such file\n";
- // Note: Currently we do not support reading an archive from stdin.
- if (Filename == "-" || aPath.isBitcodeFile()) {
- OwningPtr<MemoryBuffer> Buffer;
- if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buffer))
- ErrorMessage = ec.message();
+ if (magic == sys::fs::file_magic::bitcode) {
Module *Result = 0;
- if (Buffer.get())
- Result = ParseBitcodeFile(Buffer.get(), Context, &ErrorMessage);
-
+ Result = ParseBitcodeFile(Buffer.get(), Context, &ErrorMessage);
if (Result) {
DumpSymbolNamesFromModule(Result);
delete Result;
- } else
- errs() << ToolName << ": " << Filename << ": " << ErrorMessage << "\n";
-
- } else if (aPath.isArchive()) {
- OwningPtr<Binary> arch;
- if (error_code ec = object::createBinary(aPath.str(), arch)) {
- errs() << ToolName << ": " << Filename << ": " << ec.message() << ".\n";
+ } else {
+ error(ErrorMessage, Filename);
return;
}
+ } else if (magic == sys::fs::file_magic::archive) {
+ OwningPtr<Binary> arch;
+ if (error(object::createBinary(Buffer.take(), arch), Filename))
+ return;
+
if (object::Archive *a = dyn_cast<object::Archive>(arch.get())) {
for (object::Archive::child_iterator i = a->begin_children(),
e = a->end_children(); i != e; ++i) {
OwningPtr<Binary> child;
- if (error_code ec = i->getAsBinary(child)) {
+ if (i->getAsBinary(child)) {
// Try opening it as a bitcode file.
OwningPtr<MemoryBuffer> buff(i->getBuffer());
Module *Result = 0;
@@ -347,12 +365,10 @@ static void DumpSymbolNamesFromFile(std::string &Filename) {
}
}
}
- } else if (aPath.isObjectFile()) {
+ } else if (magic.is_object()) {
OwningPtr<Binary> obj;
- if (error_code ec = object::createBinary(aPath.str(), obj)) {
- errs() << ToolName << ": " << Filename << ": " << ec.message() << ".\n";
+ if (error(object::createBinary(Buffer.take(), obj), Filename))
return;
- }
if (object::ObjectFile *o = dyn_cast<ObjectFile>(obj.get()))
DumpSymbolNamesFromObject(o);
} else {
@@ -370,6 +386,10 @@ int main(int argc, char **argv) {
llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
cl::ParseCommandLineOptions(argc, argv, "llvm symbol table dumper\n");
+ // llvm-nm only reads binary files.
+ if (error(sys::Program::ChangeStdinToBinary()))
+ return 1;
+
ToolName = argv[0];
if (BSDFormat) OutputFormat = bsd;
if (POSIXFormat) OutputFormat = posix;
diff --git a/contrib/llvm/tools/llvm-objdump/CMakeLists.txt b/contrib/llvm/tools/llvm-objdump/CMakeLists.txt
deleted file mode 100644
index f3b2e1f..0000000
--- a/contrib/llvm/tools/llvm-objdump/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-set(LLVM_LINK_COMPONENTS
- ${LLVM_TARGETS_TO_BUILD}
- DebugInfo
- MC
- MCParser
- MCDisassembler
- Object
- )
-
-add_llvm_tool(llvm-objdump
- llvm-objdump.cpp
- MachODump.cpp
- MCFunction.cpp
- )
diff --git a/contrib/llvm/tools/llvm-objdump/MachODump.cpp b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
index 3f44b29..0e7f3fd 100644
--- a/contrib/llvm/tools/llvm-objdump/MachODump.cpp
+++ b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
@@ -14,7 +14,7 @@
#include "llvm-objdump.h"
#include "MCFunction.h"
#include "llvm/Support/MachO.h"
-#include "llvm/Object/MachOObject.h"
+#include "llvm/Object/MachO.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/STLExtras.h"
@@ -26,6 +26,7 @@
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -53,27 +54,28 @@ static cl::opt<std::string>
static const Target *GetTarget(const MachOObject *MachOObj) {
// Figure out the target triple.
- llvm::Triple TT("unknown-unknown-unknown");
- switch (MachOObj->getHeader().CPUType) {
- case llvm::MachO::CPUTypeI386:
- TT.setArch(Triple::ArchType(Triple::x86));
- break;
- case llvm::MachO::CPUTypeX86_64:
- TT.setArch(Triple::ArchType(Triple::x86_64));
- break;
- case llvm::MachO::CPUTypeARM:
- TT.setArch(Triple::ArchType(Triple::arm));
- break;
- case llvm::MachO::CPUTypePowerPC:
- TT.setArch(Triple::ArchType(Triple::ppc));
- break;
- case llvm::MachO::CPUTypePowerPC64:
- TT.setArch(Triple::ArchType(Triple::ppc64));
- break;
+ if (TripleName.empty()) {
+ llvm::Triple TT("unknown-unknown-unknown");
+ switch (MachOObj->getHeader().CPUType) {
+ case llvm::MachO::CPUTypeI386:
+ TT.setArch(Triple::ArchType(Triple::x86));
+ break;
+ case llvm::MachO::CPUTypeX86_64:
+ TT.setArch(Triple::ArchType(Triple::x86_64));
+ break;
+ case llvm::MachO::CPUTypeARM:
+ TT.setArch(Triple::ArchType(Triple::arm));
+ break;
+ case llvm::MachO::CPUTypePowerPC:
+ TT.setArch(Triple::ArchType(Triple::ppc));
+ break;
+ case llvm::MachO::CPUTypePowerPC64:
+ TT.setArch(Triple::ArchType(Triple::ppc64));
+ break;
+ }
+ TripleName = TT.str();
}
- TripleName = TT.str();
-
// Get the target specific parser.
std::string Error;
const Target *TheTarget = TargetRegistry::lookupTarget(TripleName, Error);
@@ -85,57 +87,43 @@ static const Target *GetTarget(const MachOObject *MachOObj) {
return 0;
}
-struct Section {
- char Name[16];
- uint64_t Address;
- uint64_t Size;
- uint32_t Offset;
- uint32_t NumRelocs;
- uint64_t RelocTableOffset;
-};
-
-struct Symbol {
- uint64_t Value;
- uint32_t StringIndex;
- uint8_t SectionIndex;
- bool operator<(const Symbol &RHS) const { return Value < RHS.Value; }
+struct SymbolSorter {
+ bool operator()(const SymbolRef &A, const SymbolRef &B) {
+ SymbolRef::Type AType, BType;
+ A.getType(AType);
+ B.getType(BType);
+
+ uint64_t AAddr, BAddr;
+ if (AType != SymbolRef::ST_Function)
+ AAddr = 0;
+ else
+ A.getAddress(AAddr);
+ if (BType != SymbolRef::ST_Function)
+ BAddr = 0;
+ else
+ B.getAddress(BAddr);
+ return AAddr < BAddr;
+ }
};
-template <typename T>
-static Section copySection(const T &Sect) {
- Section S;
- memcpy(S.Name, Sect->Name, 16);
- S.Address = Sect->Address;
- S.Size = Sect->Size;
- S.Offset = Sect->Offset;
- S.NumRelocs = Sect->NumRelocationTableEntries;
- S.RelocTableOffset = Sect->RelocationTableOffset;
- return S;
-}
-
-template <typename T>
-static Symbol copySymbol(const T &STE) {
- Symbol S;
- S.StringIndex = STE->StringIndex;
- S.SectionIndex = STE->SectionIndex;
- S.Value = STE->Value;
- return S;
-}
-
// Print additional information about an address, if available.
-static void DumpAddress(uint64_t Address, ArrayRef<Section> Sections,
+static void DumpAddress(uint64_t Address, ArrayRef<SectionRef> Sections,
MachOObject *MachOObj, raw_ostream &OS) {
for (unsigned i = 0; i != Sections.size(); ++i) {
- uint64_t addr = Address-Sections[i].Address;
- if (Sections[i].Address <= Address &&
- Sections[i].Address + Sections[i].Size > Address) {
- StringRef bytes = MachOObj->getData(Sections[i].Offset,
- Sections[i].Size);
+ uint64_t SectAddr = 0, SectSize = 0;
+ Sections[i].getAddress(SectAddr);
+ Sections[i].getSize(SectSize);
+ uint64_t addr = SectAddr;
+ if (SectAddr <= Address &&
+ SectAddr + SectSize > Address) {
+ StringRef bytes, name;
+ Sections[i].getContents(bytes);
+ Sections[i].getName(name);
// Print constant strings.
- if (!strcmp(Sections[i].Name, "__cstring"))
+ if (!name.compare("__cstring"))
OS << '"' << bytes.substr(addr, bytes.find('\0', addr)) << '"';
// Print constant CFStrings.
- if (!strcmp(Sections[i].Name, "__cfstring"))
+ if (!name.compare("__cfstring"))
OS << "@\"" << bytes.substr(addr, bytes.find('\0', addr)) << '"';
}
}
@@ -212,59 +200,34 @@ static void emitDOTFile(const char *FileName, const MCFunction &f,
}
static void getSectionsAndSymbols(const macho::Header &Header,
- MachOObject *MachOObj,
+ MachOObjectFile *MachOObj,
InMemoryStruct<macho::SymtabLoadCommand> *SymtabLC,
- std::vector<Section> &Sections,
- std::vector<Symbol> &Symbols,
+ std::vector<SectionRef> &Sections,
+ std::vector<SymbolRef> &Symbols,
SmallVectorImpl<uint64_t> &FoundFns) {
- // Make a list of all symbols in the object file.
- for (unsigned i = 0; i != Header.NumLoadCommands; ++i) {
- const MachOObject::LoadCommandInfo &LCI = MachOObj->getLoadCommandInfo(i);
- if (LCI.Command.Type == macho::LCT_Segment) {
- InMemoryStruct<macho::SegmentLoadCommand> SegmentLC;
- MachOObj->ReadSegmentLoadCommand(LCI, SegmentLC);
-
- // Store the sections in this segment.
- for (unsigned SectNum = 0; SectNum != SegmentLC->NumSections; ++SectNum) {
- InMemoryStruct<macho::Section> Sect;
- MachOObj->ReadSection(LCI, SectNum, Sect);
- Sections.push_back(copySection(Sect));
+ error_code ec;
+ for (symbol_iterator SI = MachOObj->begin_symbols(),
+ SE = MachOObj->end_symbols(); SI != SE; SI.increment(ec))
+ Symbols.push_back(*SI);
+
+ for (section_iterator SI = MachOObj->begin_sections(),
+ SE = MachOObj->end_sections(); SI != SE; SI.increment(ec)) {
+ SectionRef SR = *SI;
+ StringRef SectName;
+ SR.getName(SectName);
+ Sections.push_back(*SI);
+ }
- }
- } else if (LCI.Command.Type == macho::LCT_Segment64) {
- InMemoryStruct<macho::Segment64LoadCommand> Segment64LC;
- MachOObj->ReadSegment64LoadCommand(LCI, Segment64LC);
-
- // Store the sections in this segment.
- for (unsigned SectNum = 0; SectNum != Segment64LC->NumSections;
- ++SectNum) {
- InMemoryStruct<macho::Section64> Sect64;
- MachOObj->ReadSection64(LCI, SectNum, Sect64);
- Sections.push_back(copySection(Sect64));
- }
- } else if (LCI.Command.Type == macho::LCT_FunctionStarts) {
+ for (unsigned i = 0; i != Header.NumLoadCommands; ++i) {
+ const MachOObject::LoadCommandInfo &LCI =
+ MachOObj->getObject()->getLoadCommandInfo(i);
+ if (LCI.Command.Type == macho::LCT_FunctionStarts) {
// We found a function starts segment, parse the addresses for later
// consumption.
InMemoryStruct<macho::LinkeditDataLoadCommand> LLC;
- MachOObj->ReadLinkeditDataLoadCommand(LCI, LLC);
+ MachOObj->getObject()->ReadLinkeditDataLoadCommand(LCI, LLC);
- MachOObj->ReadULEB128s(LLC->DataOffset, FoundFns);
- }
- }
- // Store the symbols.
- if (SymtabLC) {
- for (unsigned i = 0; i != (*SymtabLC)->NumSymbolTableEntries; ++i) {
- if (MachOObj->is64Bit()) {
- InMemoryStruct<macho::Symbol64TableEntry> STE;
- MachOObj->ReadSymbol64TableEntry((*SymtabLC)->SymbolTableOffset, i,
- STE);
- Symbols.push_back(copySymbol(STE));
- } else {
- InMemoryStruct<macho::SymbolTableEntry> STE;
- MachOObj->ReadSymbolTableEntry((*SymtabLC)->SymbolTableOffset, i,
- STE);
- Symbols.push_back(copySymbol(STE));
- }
+ MachOObj->getObject()->ReadULEB128s(LLC->DataOffset, FoundFns);
}
}
}
@@ -277,9 +240,11 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
return;
}
- OwningPtr<MachOObject> MachOObj(MachOObject::LoadFromBuffer(Buff.take()));
+ OwningPtr<MachOObjectFile> MachOOF(static_cast<MachOObjectFile*>(
+ ObjectFile::createMachOObjectFile(Buff.take())));
+ MachOObject *MachOObj = MachOOF->getObject();
- const Target *TheTarget = GetTarget(MachOObj.get());
+ const Target *TheTarget = GetTarget(MachOObj);
if (!TheTarget) {
// GetTarget prints out stuff.
return;
@@ -293,9 +258,11 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
OwningPtr<const MCSubtargetInfo>
STI(TheTarget->createMCSubtargetInfo(TripleName, "", ""));
OwningPtr<const MCDisassembler> DisAsm(TheTarget->createMCDisassembler(*STI));
+ OwningPtr<const MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
- OwningPtr<MCInstPrinter> IP(TheTarget->createMCInstPrinter(
- AsmPrinterVariant, *AsmInfo, *STI));
+ OwningPtr<MCInstPrinter>
+ IP(TheTarget->createMCInstPrinter(AsmPrinterVariant, *AsmInfo, *InstrInfo,
+ *MRI, *STI));
if (!InstrAnalysis || !AsmInfo || !STI || !DisAsm || !IP) {
errs() << "error: couldn't initialize disassembler for target "
@@ -322,17 +289,17 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
MachOObj->ReadSymtabLoadCommand(*SymtabLCI, SymtabLC);
MachOObj->RegisterStringTable(*SymtabLC);
- std::vector<Section> Sections;
- std::vector<Symbol> Symbols;
+ std::vector<SectionRef> Sections;
+ std::vector<SymbolRef> Symbols;
SmallVector<uint64_t, 8> FoundFns;
- getSectionsAndSymbols(Header, MachOObj.get(), &SymtabLC, Sections, Symbols,
+ getSectionsAndSymbols(Header, MachOOF.get(), &SymtabLC, Sections, Symbols,
FoundFns);
// Make a copy of the unsorted symbol list. FIXME: duplication
- std::vector<Symbol> UnsortedSymbols(Symbols);
+ std::vector<SymbolRef> UnsortedSymbols(Symbols);
// Sort the symbols by address, just in case they didn't come in that way.
- array_pod_sort(Symbols.begin(), Symbols.end());
+ std::sort(Symbols.begin(), Symbols.end(), SymbolSorter());
#ifndef NDEBUG
raw_ostream &DebugOut = DebugFlag ? dbgs() : nulls();
@@ -343,12 +310,12 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
StringRef DebugAbbrevSection, DebugInfoSection, DebugArangesSection,
DebugLineSection, DebugStrSection;
OwningPtr<DIContext> diContext;
- OwningPtr<MachOObject> DSYMObj;
- MachOObject *DbgInfoObj = MachOObj.get();
+ OwningPtr<MachOObjectFile> DSYMObj;
+ MachOObject *DbgInfoObj = MachOObj;
// Try to find debug info and set up the DIContext for it.
if (UseDbg) {
- ArrayRef<Section> DebugSections = Sections;
- std::vector<Section> DSYMSections;
+ ArrayRef<SectionRef> DebugSections = Sections;
+ std::vector<SectionRef> DSYMSections;
// A separate DSym file path was specified, parse it as a macho file,
// get the sections and supply it to the section name parsing machinery.
@@ -358,34 +325,33 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
errs() << "llvm-objdump: " << Filename << ": " << ec.message() << '\n';
return;
}
- DSYMObj.reset(MachOObject::LoadFromBuffer(Buf.take()));
- const macho::Header &Header = DSYMObj->getHeader();
+ DSYMObj.reset(static_cast<MachOObjectFile*>(
+ ObjectFile::createMachOObjectFile(Buf.take())));
+ const macho::Header &Header = DSYMObj->getObject()->getHeader();
- std::vector<Symbol> Symbols;
+ std::vector<SymbolRef> Symbols;
SmallVector<uint64_t, 8> FoundFns;
getSectionsAndSymbols(Header, DSYMObj.get(), 0, DSYMSections, Symbols,
FoundFns);
DebugSections = DSYMSections;
- DbgInfoObj = DSYMObj.get();
+ DbgInfoObj = DSYMObj.get()->getObject();
}
// Find the named debug info sections.
for (unsigned SectIdx = 0; SectIdx != DebugSections.size(); SectIdx++) {
- if (!strcmp(DebugSections[SectIdx].Name, "__debug_abbrev"))
- DebugAbbrevSection = DbgInfoObj->getData(DebugSections[SectIdx].Offset,
- DebugSections[SectIdx].Size);
- else if (!strcmp(DebugSections[SectIdx].Name, "__debug_info"))
- DebugInfoSection = DbgInfoObj->getData(DebugSections[SectIdx].Offset,
- DebugSections[SectIdx].Size);
- else if (!strcmp(DebugSections[SectIdx].Name, "__debug_aranges"))
- DebugArangesSection = DbgInfoObj->getData(DebugSections[SectIdx].Offset,
- DebugSections[SectIdx].Size);
- else if (!strcmp(DebugSections[SectIdx].Name, "__debug_line"))
- DebugLineSection = DbgInfoObj->getData(DebugSections[SectIdx].Offset,
- DebugSections[SectIdx].Size);
- else if (!strcmp(DebugSections[SectIdx].Name, "__debug_str"))
- DebugStrSection = DbgInfoObj->getData(DebugSections[SectIdx].Offset,
- DebugSections[SectIdx].Size);
+ StringRef SectName;
+ if (!DebugSections[SectIdx].getName(SectName)) {
+ if (SectName.equals("__DWARF,__debug_abbrev"))
+ DebugSections[SectIdx].getContents(DebugAbbrevSection);
+ else if (SectName.equals("__DWARF,__debug_info"))
+ DebugSections[SectIdx].getContents(DebugInfoSection);
+ else if (SectName.equals("__DWARF,__debug_aranges"))
+ DebugSections[SectIdx].getContents(DebugArangesSection);
+ else if (SectName.equals("__DWARF,__debug_line"))
+ DebugSections[SectIdx].getContents(DebugLineSection);
+ else if (SectName.equals("__DWARF,__debug_str"))
+ DebugSections[SectIdx].getContents(DebugStrSection);
+ }
}
// Setup the DIContext.
@@ -401,68 +367,115 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
FunctionListTy Functions;
for (unsigned SectIdx = 0; SectIdx != Sections.size(); SectIdx++) {
- if (strcmp(Sections[SectIdx].Name, "__text"))
+ StringRef SectName;
+ if (Sections[SectIdx].getName(SectName) ||
+ SectName.compare("__TEXT,__text"))
continue; // Skip non-text sections
// Insert the functions from the function starts segment into our map.
- uint64_t VMAddr = Sections[SectIdx].Address - Sections[SectIdx].Offset;
- for (unsigned i = 0, e = FoundFns.size(); i != e; ++i)
- FunctionMap.insert(std::make_pair(FoundFns[i]+VMAddr, (MCFunction*)0));
+ uint64_t VMAddr;
+ Sections[SectIdx].getAddress(VMAddr);
+ for (unsigned i = 0, e = FoundFns.size(); i != e; ++i) {
+ StringRef SectBegin;
+ Sections[SectIdx].getContents(SectBegin);
+ uint64_t Offset = (uint64_t)SectBegin.data();
+ FunctionMap.insert(std::make_pair(VMAddr + FoundFns[i]-Offset,
+ (MCFunction*)0));
+ }
- StringRef Bytes = MachOObj->getData(Sections[SectIdx].Offset,
- Sections[SectIdx].Size);
+ StringRef Bytes;
+ Sections[SectIdx].getContents(Bytes);
StringRefMemoryObject memoryObject(Bytes);
bool symbolTableWorked = false;
// Parse relocations.
- std::vector<std::pair<uint64_t, uint32_t> > Relocs;
- for (unsigned j = 0; j != Sections[SectIdx].NumRelocs; ++j) {
- InMemoryStruct<macho::RelocationEntry> RE;
- MachOObj->ReadRelocationEntry(Sections[SectIdx].RelocTableOffset, j, RE);
- Relocs.push_back(std::make_pair(RE->Word0, RE->Word1 & 0xffffff));
+ std::vector<std::pair<uint64_t, SymbolRef> > Relocs;
+ error_code ec;
+ for (relocation_iterator RI = Sections[SectIdx].begin_relocations(),
+ RE = Sections[SectIdx].end_relocations(); RI != RE; RI.increment(ec)) {
+ uint64_t RelocOffset, SectionAddress;
+ RI->getAddress(RelocOffset);
+ Sections[SectIdx].getAddress(SectionAddress);
+ RelocOffset -= SectionAddress;
+
+ SymbolRef RelocSym;
+ RI->getSymbol(RelocSym);
+
+ Relocs.push_back(std::make_pair(RelocOffset, RelocSym));
}
array_pod_sort(Relocs.begin(), Relocs.end());
// Disassemble symbol by symbol.
for (unsigned SymIdx = 0; SymIdx != Symbols.size(); SymIdx++) {
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
+
+ SymbolRef::Type ST;
+ Symbols[SymIdx].getType(ST);
+ if (ST != SymbolRef::ST_Function)
+ continue;
+
// Make sure the symbol is defined in this section.
- if ((unsigned)Symbols[SymIdx].SectionIndex - 1 != SectIdx)
+ bool containsSym = false;
+ Sections[SectIdx].containsSymbol(Symbols[SymIdx], containsSym);
+ if (!containsSym)
continue;
// Start at the address of the symbol relative to the section's address.
- uint64_t Start = Symbols[SymIdx].Value - Sections[SectIdx].Address;
+ uint64_t SectionAddress = 0;
+ uint64_t Start = 0;
+ Sections[SectIdx].getAddress(SectionAddress);
+ Symbols[SymIdx].getAddress(Start);
+ Start -= SectionAddress;
+
// Stop disassembling either at the beginning of the next symbol or at
// the end of the section.
- uint64_t End = (SymIdx+1 == Symbols.size() ||
- Symbols[SymIdx].SectionIndex != Symbols[SymIdx+1].SectionIndex) ?
- Sections[SectIdx].Size :
- Symbols[SymIdx+1].Value - Sections[SectIdx].Address;
- uint64_t Size;
+ bool containsNextSym = true;
+ uint64_t NextSym = 0;
+ uint64_t NextSymIdx = SymIdx+1;
+ while (Symbols.size() > NextSymIdx) {
+ SymbolRef::Type NextSymType;
+ Symbols[NextSymIdx].getType(NextSymType);
+ if (NextSymType == SymbolRef::ST_Function) {
+ Sections[SectIdx].containsSymbol(Symbols[NextSymIdx],
+ containsNextSym);
+ Symbols[NextSymIdx].getAddress(NextSym);
+ NextSym -= SectionAddress;
+ break;
+ }
+ ++NextSymIdx;
+ }
- if (Start >= End)
- continue;
+ uint64_t SectSize;
+ Sections[SectIdx].getSize(SectSize);
+ uint64_t End = containsNextSym ? NextSym : SectSize;
+ uint64_t Size;
symbolTableWorked = true;
if (!CFG) {
// Normal disassembly, print addresses, bytes and mnemonic form.
- outs() << MachOObj->getStringAtIndex(Symbols[SymIdx].StringIndex)
- << ":\n";
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
+
+ outs() << SymName << ":\n";
DILineInfo lastLine;
for (uint64_t Index = Start; Index < End; Index += Size) {
MCInst Inst;
if (DisAsm->getInstruction(Inst, Size, memoryObject, Index,
DebugOut, nulls())) {
- outs() << format("%8llx:\t", Sections[SectIdx].Address + Index);
+ uint64_t SectAddress = 0;
+ Sections[SectIdx].getAddress(SectAddress);
+ outs() << format("%8" PRIx64 ":\t", SectAddress + Index);
+
DumpBytes(StringRef(Bytes.data() + Index, Size));
IP->printInst(&Inst, outs(), "");
// Print debug info.
if (diContext) {
DILineInfo dli =
- diContext->getLineInfoForAddress(Sections[SectIdx].Address +
- Index);
+ diContext->getLineInfoForAddress(SectAddress + Index);
// Print valid line info if it changed.
if (dli != lastLine && dli.getLine() != 0)
outs() << "\t## " << dli.getFileName() << ':'
@@ -478,20 +491,24 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
}
} else {
// Create CFG and use it for disassembly.
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
createMCFunctionAndSaveCalls(
- MachOObj->getStringAtIndex(Symbols[SymIdx].StringIndex),
- DisAsm.get(), memoryObject, Start, End, InstrAnalysis.get(),
- Start, DebugOut, FunctionMap, Functions);
+ SymName, DisAsm.get(), memoryObject, Start, End,
+ InstrAnalysis.get(), Start, DebugOut, FunctionMap, Functions);
}
}
if (CFG) {
if (!symbolTableWorked) {
// Reading the symbol table didn't work, create a big __TEXT symbol.
+ uint64_t SectSize = 0, SectAddress = 0;
+ Sections[SectIdx].getSize(SectSize);
+ Sections[SectIdx].getAddress(SectAddress);
createMCFunctionAndSaveCalls("__TEXT", DisAsm.get(), memoryObject,
- 0, Sections[SectIdx].Size,
+ 0, SectSize,
InstrAnalysis.get(),
- Sections[SectIdx].Offset, DebugOut,
+ SectAddress, DebugOut,
FunctionMap, Functions);
}
for (std::map<uint64_t, MCFunction*>::iterator mi = FunctionMap.begin(),
@@ -499,11 +516,14 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
if (mi->second == 0) {
// Create functions for the remaining callees we have gathered,
// but we didn't find a name for them.
+ uint64_t SectSize = 0;
+ Sections[SectIdx].getSize(SectSize);
+
SmallVector<uint64_t, 16> Calls;
MCFunction f =
MCFunction::createFunctionFromMC("unknown", DisAsm.get(),
memoryObject, mi->first,
- Sections[SectIdx].Size,
+ SectSize,
InstrAnalysis.get(), DebugOut,
Calls);
Functions.push_back(f);
@@ -535,13 +555,17 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
break;
}
+ uint64_t SectSize = 0, SectAddress;
+ Sections[SectIdx].getSize(SectSize);
+ Sections[SectIdx].getAddress(SectAddress);
+
// No predecessors, this is a data block. Print as .byte directives.
if (!hasPreds) {
- uint64_t End = llvm::next(fi) == fe ? Sections[SectIdx].Size :
+ uint64_t End = llvm::next(fi) == fe ? SectSize :
llvm::next(fi)->first;
outs() << "# " << End-fi->first << " bytes of data:\n";
for (unsigned pos = fi->first; pos != End; ++pos) {
- outs() << format("%8x:\t", Sections[SectIdx].Address + pos);
+ outs() << format("%8x:\t", SectAddress + pos);
DumpBytes(StringRef(Bytes.data() + pos, 1));
outs() << format("\t.byte 0x%02x\n", (uint8_t)Bytes[pos]);
}
@@ -558,13 +582,12 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
const MCDecodedInst &Inst = fi->second.getInsts()[ii];
// If there's a symbol at this address, print its name.
- if (FunctionMap.find(Sections[SectIdx].Address + Inst.Address) !=
+ if (FunctionMap.find(SectAddress + Inst.Address) !=
FunctionMap.end())
- outs() << FunctionMap[Sections[SectIdx].Address + Inst.Address]->
- getName() << ":\n";
+ outs() << FunctionMap[SectAddress + Inst.Address]-> getName()
+ << ":\n";
- outs() << format("%8llx:\t", Sections[SectIdx].Address +
- Inst.Address);
+ outs() << format("%8" PRIx64 ":\t", SectAddress + Inst.Address);
DumpBytes(StringRef(Bytes.data() + Inst.Address, Inst.Size));
if (fi->second.contains(fi->first)) // Indent simple loops.
@@ -575,15 +598,15 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
// Look for relocations inside this instructions, if there is one
// print its target and additional information if available.
for (unsigned j = 0; j != Relocs.size(); ++j)
- if (Relocs[j].first >= Sections[SectIdx].Address + Inst.Address &&
- Relocs[j].first < Sections[SectIdx].Address + Inst.Address +
- Inst.Size) {
- outs() << "\t# "
- << MachOObj->getStringAtIndex(
- UnsortedSymbols[Relocs[j].second].StringIndex)
- << ' ';
- DumpAddress(UnsortedSymbols[Relocs[j].second].Value, Sections,
- MachOObj.get(), outs());
+ if (Relocs[j].first >= SectAddress + Inst.Address &&
+ Relocs[j].first < SectAddress + Inst.Address + Inst.Size) {
+ StringRef SymName;
+ uint64_t Addr;
+ Relocs[j].second.getAddress(Addr);
+ Relocs[j].second.getName(SymName);
+
+ outs() << "\t# " << SymName << ' ';
+ DumpAddress(Addr, Sections, MachOObj, outs());
}
// If this instructions contains an address, see if we can evaluate
@@ -592,13 +615,12 @@ void llvm::DisassembleInputMachO(StringRef Filename) {
Inst.Address,
Inst.Size);
if (targ != -1ULL)
- DumpAddress(targ, Sections, MachOObj.get(), outs());
+ DumpAddress(targ, Sections, MachOObj, outs());
// Print debug info.
if (diContext) {
DILineInfo dli =
- diContext->getLineInfoForAddress(Sections[SectIdx].Address +
- Inst.Address);
+ diContext->getLineInfoForAddress(SectAddress + Inst.Address);
// Print valid line info if it changed.
if (dli != lastLine && dli.getLine() != 0)
outs() << "\t## " << dli.getFileName() << ':'
diff --git a/contrib/llvm/tools/llvm-objdump/Makefile b/contrib/llvm/tools/llvm-objdump/Makefile
deleted file mode 100644
index 703bf6c..0000000
--- a/contrib/llvm/tools/llvm-objdump/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- tools/llvm-objdump/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-objdump
-LINK_COMPONENTS = $(TARGETS_TO_BUILD) DebugInfo MC MCParser MCDisassembler \
- Object
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 40c59bd..5a6f94a 100644
--- a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -16,14 +16,18 @@
#include "llvm-objdump.h"
#include "MCFunction.h"
#include "llvm/Object/Archive.h"
+#include "llvm/Object/COFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
@@ -43,6 +47,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/system_error.h"
#include <algorithm>
+#include <cctype>
#include <cstring>
using namespace llvm;
using namespace object;
@@ -61,6 +66,12 @@ static cl::opt<bool>
Relocations("r", cl::desc("Display the relocation entries in the file"));
static cl::opt<bool>
+SectionContents("s", cl::desc("Display the content of each section"));
+
+static cl::opt<bool>
+SymbolTable("t", cl::desc("Display the symbol table"));
+
+static cl::opt<bool>
MachO("macho", cl::desc("Use MachO specific object file parser"));
static cl::alias
MachOm("m", cl::desc("Alias for --macho"), cl::aliasopt(MachO));
@@ -118,6 +129,8 @@ static const Target *GetTarget(const ObjectFile *Obj = NULL) {
return 0;
}
+void llvm::StringRefMemoryObject::anchor() { }
+
void llvm::DumpBytes(StringRef bytes) {
static const char hex_rep[] = "0123456789abcdef";
// FIXME: The real way to do this is to figure out the longest instruction
@@ -158,10 +171,6 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
return;
}
- outs() << '\n';
- outs() << Obj->getFileName()
- << ":\tfile format " << Obj->getFileFormatName() << "\n\n";
-
error_code ec;
for (section_iterator i = Obj->begin_sections(),
e = Obj->end_sections();
@@ -182,7 +191,9 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
bool contains;
if (!error(i->containsSymbol(*si, contains)) && contains) {
uint64_t Address;
- if (error(si->getOffset(Address))) break;
+ if (error(si->getAddress(Address))) break;
+ Address -= SectionAddr;
+
StringRef Name;
if (error(si->getName(Name))) break;
Symbols.push_back(std::make_pair(Address, Name));
@@ -238,9 +249,21 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
return;
}
+ OwningPtr<const MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ if (!MRI) {
+ errs() << "error: no register info for target " << TripleName << "\n";
+ return;
+ }
+
+ OwningPtr<const MCInstrInfo> MII(TheTarget->createMCInstrInfo());
+ if (!MII) {
+ errs() << "error: no instruction info for target " << TripleName << "\n";
+ return;
+ }
+
int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
OwningPtr<MCInstPrinter> IP(TheTarget->createMCInstPrinter(
- AsmPrinterVariant, *AsmInfo, *STI));
+ AsmPrinterVariant, *AsmInfo, *MII, *MRI, *STI));
if (!IP) {
errs() << "error: no instruction printer for target " << TripleName
<< '\n';
@@ -285,7 +308,7 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
if (DisAsm->getInstruction(Inst, Size, memoryObject, Index,
DebugOut, nulls())) {
- outs() << format("%8"PRIx64":\t", SectionAddr + Index);
+ outs() << format("%8" PRIx64 ":\t", SectionAddr + Index);
DumpBytes(StringRef(Bytes.data() + Index, Size));
IP->printInst(&Inst, outs(), "");
outs() << "\n";
@@ -297,17 +320,23 @@ static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
// Print relocation for instruction.
while (rel_cur != rel_end) {
+ bool hidden = false;
uint64_t addr;
SmallString<16> name;
SmallString<32> val;
+
+ // If this relocation is hidden, skip it.
+ if (error(rel_cur->getHidden(hidden))) goto skip_print_rel;
+ if (hidden) goto skip_print_rel;
+
if (error(rel_cur->getAddress(addr))) goto skip_print_rel;
// Stop when rel_cur's address is past the current instruction.
- if (addr > Index + Size) break;
+ if (addr >= Index + Size) break;
if (error(rel_cur->getTypeName(name))) goto skip_print_rel;
if (error(rel_cur->getValueString(val))) goto skip_print_rel;
- outs() << format("\t\t\t%8"PRIx64": ", SectionAddr + addr) << name << "\t"
- << val << "\n";
+ outs() << format("\t\t\t%8" PRIx64 ": ", SectionAddr + addr) << name
+ << "\t" << val << "\n";
skip_print_rel:
++rel_cur;
@@ -332,9 +361,12 @@ static void PrintRelocations(const ObjectFile *o) {
ri != re; ri.increment(ec)) {
if (error(ec)) return;
+ bool hidden;
uint64_t address;
SmallString<32> relocname;
SmallString<32> valuestr;
+ if (error(ri->getHidden(hidden))) continue;
+ if (hidden) continue;
if (error(ri->getTypeName(relocname))) continue;
if (error(ri->getAddress(address))) continue;
if (error(ri->getValueString(valuestr))) continue;
@@ -364,19 +396,179 @@ static void PrintSectionHeaders(const ObjectFile *o) {
if (error(si->isBSS(BSS))) return;
std::string Type = (std::string(Text ? "TEXT " : "") +
(Data ? "DATA " : "") + (BSS ? "BSS" : ""));
- outs() << format("%3d %-13s %09"PRIx64" %017"PRIx64" %s\n", i, Name.str().c_str(), Size,
- Address, Type.c_str());
+ outs() << format("%3d %-13s %09" PRIx64 " %017" PRIx64 " %s\n",
+ i, Name.str().c_str(), Size, Address, Type.c_str());
++i;
}
}
+static void PrintSectionContents(const ObjectFile *o) {
+ error_code ec;
+ for (section_iterator si = o->begin_sections(),
+ se = o->end_sections();
+ si != se; si.increment(ec)) {
+ if (error(ec)) return;
+ StringRef Name;
+ StringRef Contents;
+ uint64_t BaseAddr;
+ if (error(si->getName(Name))) continue;
+ if (error(si->getContents(Contents))) continue;
+ if (error(si->getAddress(BaseAddr))) continue;
+
+ outs() << "Contents of section " << Name << ":\n";
+
+ // Dump out the content as hex and printable ascii characters.
+ for (std::size_t addr = 0, end = Contents.size(); addr < end; addr += 16) {
+ outs() << format(" %04" PRIx64 " ", BaseAddr + addr);
+ // Dump line of hex.
+ for (std::size_t i = 0; i < 16; ++i) {
+ if (i != 0 && i % 4 == 0)
+ outs() << ' ';
+ if (addr + i < end)
+ outs() << hexdigit((Contents[addr + i] >> 4) & 0xF, true)
+ << hexdigit(Contents[addr + i] & 0xF, true);
+ else
+ outs() << " ";
+ }
+ // Print ascii.
+ outs() << " ";
+ for (std::size_t i = 0; i < 16 && addr + i < end; ++i) {
+ if (std::isprint(Contents[addr + i] & 0xFF))
+ outs() << Contents[addr + i];
+ else
+ outs() << ".";
+ }
+ outs() << "\n";
+ }
+ }
+}
+
+static void PrintCOFFSymbolTable(const COFFObjectFile *coff) {
+ const coff_file_header *header;
+ if (error(coff->getHeader(header))) return;
+ int aux_count = 0;
+ const coff_symbol *symbol = 0;
+ for (int i = 0, e = header->NumberOfSymbols; i != e; ++i) {
+ if (aux_count--) {
+ // Figure out which type of aux this is.
+ if (symbol->StorageClass == COFF::IMAGE_SYM_CLASS_STATIC
+ && symbol->Value == 0) { // Section definition.
+ const coff_aux_section_definition *asd;
+ if (error(coff->getAuxSymbol<coff_aux_section_definition>(i, asd)))
+ return;
+ outs() << "AUX "
+ << format("scnlen 0x%x nreloc %d nlnno %d checksum 0x%x "
+ , unsigned(asd->Length)
+ , unsigned(asd->NumberOfRelocations)
+ , unsigned(asd->NumberOfLinenumbers)
+ , unsigned(asd->CheckSum))
+ << format("assoc %d comdat %d\n"
+ , unsigned(asd->Number)
+ , unsigned(asd->Selection));
+ } else {
+ outs() << "AUX Unknown\n";
+ }
+ } else {
+ StringRef name;
+ if (error(coff->getSymbol(i, symbol))) return;
+ if (error(coff->getSymbolName(symbol, name))) return;
+ outs() << "[" << format("%2d", i) << "]"
+ << "(sec " << format("%2d", int(symbol->SectionNumber)) << ")"
+ << "(fl 0x00)" // Flag bits, which COFF doesn't have.
+ << "(ty " << format("%3x", unsigned(symbol->Type)) << ")"
+ << "(scl " << format("%3x", unsigned(symbol->StorageClass)) << ") "
+ << "(nx " << unsigned(symbol->NumberOfAuxSymbols) << ") "
+ << "0x" << format("%08x", unsigned(symbol->Value)) << " "
+ << name << "\n";
+ aux_count = symbol->NumberOfAuxSymbols;
+ }
+ }
+}
+
+static void PrintSymbolTable(const ObjectFile *o) {
+ outs() << "SYMBOL TABLE:\n";
+
+ if (const COFFObjectFile *coff = dyn_cast<const COFFObjectFile>(o))
+ PrintCOFFSymbolTable(coff);
+ else {
+ error_code ec;
+ for (symbol_iterator si = o->begin_symbols(),
+ se = o->end_symbols(); si != se; si.increment(ec)) {
+ if (error(ec)) return;
+ StringRef Name;
+ uint64_t Address;
+ SymbolRef::Type Type;
+ uint64_t Size;
+ uint32_t Flags;
+ section_iterator Section = o->end_sections();
+ if (error(si->getName(Name))) continue;
+ if (error(si->getAddress(Address))) continue;
+ if (error(si->getFlags(Flags))) continue;
+ if (error(si->getType(Type))) continue;
+ if (error(si->getSize(Size))) continue;
+ if (error(si->getSection(Section))) continue;
+
+ bool Global = Flags & SymbolRef::SF_Global;
+ bool Weak = Flags & SymbolRef::SF_Weak;
+ bool Absolute = Flags & SymbolRef::SF_Absolute;
+
+ if (Address == UnknownAddressOrSize)
+ Address = 0;
+ if (Size == UnknownAddressOrSize)
+ Size = 0;
+ char GlobLoc = ' ';
+ if (Type != SymbolRef::ST_Unknown)
+ GlobLoc = Global ? 'g' : 'l';
+ char Debug = (Type == SymbolRef::ST_Debug || Type == SymbolRef::ST_File)
+ ? 'd' : ' ';
+ char FileFunc = ' ';
+ if (Type == SymbolRef::ST_File)
+ FileFunc = 'f';
+ else if (Type == SymbolRef::ST_Function)
+ FileFunc = 'F';
+
+ outs() << format("%08" PRIx64, Address) << " "
+ << GlobLoc // Local -> 'l', Global -> 'g', Neither -> ' '
+ << (Weak ? 'w' : ' ') // Weak?
+ << ' ' // Constructor. Not supported yet.
+ << ' ' // Warning. Not supported yet.
+ << ' ' // Indirect reference to another symbol.
+ << Debug // Debugging (d) or dynamic (D) symbol.
+ << FileFunc // Name of function (F), file (f) or object (O).
+ << ' ';
+ if (Absolute)
+ outs() << "*ABS*";
+ else if (Section == o->end_sections())
+ outs() << "*UND*";
+ else {
+ StringRef SectionName;
+ if (error(Section->getName(SectionName)))
+ SectionName = "";
+ outs() << SectionName;
+ }
+ outs() << '\t'
+ << format("%08" PRIx64 " ", Size)
+ << Name
+ << '\n';
+ }
+ }
+}
+
static void DumpObject(const ObjectFile *o) {
+ outs() << '\n';
+ outs() << o->getFileName()
+ << ":\tfile format " << o->getFileFormatName() << "\n\n";
+
if (Disassemble)
DisassembleObject(o, Relocations);
if (Relocations && !Disassemble)
PrintRelocations(o);
if (SectionHeaders)
PrintSectionHeaders(o);
+ if (SectionContents)
+ PrintSectionContents(o);
+ if (SymbolTable)
+ PrintSymbolTable(o);
}
/// @brief Dump each object file in \a a;
@@ -385,8 +577,10 @@ static void DumpArchive(const Archive *a) {
e = a->end_children(); i != e; ++i) {
OwningPtr<Binary> child;
if (error_code ec = i->getAsBinary(child)) {
- errs() << ToolName << ": '" << a->getFileName() << "': " << ec.message()
- << ".\n";
+ // Ignore non-object files.
+ if (ec != object_error::invalid_file_type)
+ errs() << ToolName << ": '" << a->getFileName() << "': " << ec.message()
+ << ".\n";
continue;
}
if (ObjectFile *o = dyn_cast<ObjectFile>(child.get()))
@@ -447,7 +641,11 @@ int main(int argc, char **argv) {
if (InputFilenames.size() == 0)
InputFilenames.push_back("a.out");
- if (!Disassemble && !Relocations && !SectionHeaders) {
+ if (!Disassemble
+ && !Relocations
+ && !SectionHeaders
+ && !SectionContents
+ && !SymbolTable) {
cl::PrintHelpMessage();
return 2;
}
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.h b/contrib/llvm/tools/llvm-objdump/llvm-objdump.h
index 75f852a..aa71b77 100644
--- a/contrib/llvm/tools/llvm-objdump/llvm-objdump.h
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.h
@@ -25,7 +25,7 @@ void DumpBytes(StringRef bytes);
void DisassembleInputMachO(StringRef Filename);
class StringRefMemoryObject : public MemoryObject {
-private:
+ virtual void anchor();
StringRef Bytes;
public:
StringRefMemoryObject(StringRef bytes) : Bytes(bytes) {}
diff --git a/contrib/llvm/tools/llvm-prof/CMakeLists.txt b/contrib/llvm/tools/llvm-prof/CMakeLists.txt
deleted file mode 100644
index 442112b..0000000
--- a/contrib/llvm/tools/llvm-prof/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS bitreader analysis)
-
-add_llvm_tool(llvm-prof
- llvm-prof.cpp
- )
diff --git a/contrib/llvm/tools/llvm-prof/Makefile b/contrib/llvm/tools/llvm-prof/Makefile
deleted file mode 100644
index 86eb54d..0000000
--- a/contrib/llvm/tools/llvm-prof/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-prof/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-
-TOOLNAME = llvm-prof
-LINK_COMPONENTS = bitreader analysis
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-prof/llvm-prof.cpp b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
index 9d0b468..d9b6713 100644
--- a/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
+++ b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
@@ -200,9 +200,9 @@ bool ProfileInfoPrinterPass::runOnModule(Module &M) {
}
outs() << format("%3d", i+1) << ". "
- << format("%5.2g", FunctionCounts[i].second) << "/"
- << format("%g", TotalExecutions) << " "
- << FunctionCounts[i].first->getNameStr() << "\n";
+ << format("%5.2g", FunctionCounts[i].second) << "/"
+ << format("%g", TotalExecutions) << " "
+ << FunctionCounts[i].first->getName() << "\n";
}
std::set<Function*> FunctionsToPrint;
@@ -225,12 +225,12 @@ bool ProfileInfoPrinterPass::runOnModule(Module &M) {
for (unsigned i = 0; i != BlocksToPrint; ++i) {
if (Counts[i].second == 0) break;
Function *F = Counts[i].first->getParent();
- outs() << format("%3d", i+1) << ". "
- << format("%5g", Counts[i].second/(double)TotalExecutions*100) << "% "
- << format("%5.0f", Counts[i].second) << "/"
- << format("%g", TotalExecutions) << "\t"
- << F->getNameStr() << "() - "
- << Counts[i].first->getNameStr() << "\n";
+ outs() << format("%3d", i+1) << ". "
+ << format("%5g", Counts[i].second/(double)TotalExecutions*100)<<"% "
+ << format("%5.0f", Counts[i].second) << "/"
+ << format("%g", TotalExecutions) << "\t"
+ << F->getName() << "() - "
+ << Counts[i].first->getName() << "\n";
FunctionsToPrint.insert(F);
}
diff --git a/contrib/llvm/tools/llvm-ranlib/CMakeLists.txt b/contrib/llvm/tools/llvm-ranlib/CMakeLists.txt
deleted file mode 100644
index 3116d2e..0000000
--- a/contrib/llvm/tools/llvm-ranlib/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS archive)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-ranlib
- llvm-ranlib.cpp
- )
diff --git a/contrib/llvm/tools/llvm-ranlib/Makefile b/contrib/llvm/tools/llvm-ranlib/Makefile
deleted file mode 100644
index 46a10e6..0000000
--- a/contrib/llvm/tools/llvm-ranlib/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- tools/llvm-ranlib/Makefile --------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-ranlib
-LINK_COMPONENTS = archive
-REQUIRES_EH := 1
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp b/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp
new file mode 100644
index 0000000..3be1289
--- /dev/null
+++ b/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp
@@ -0,0 +1,218 @@
+//===- llvm-readobj.cpp - Dump contents of an Object File -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that works like traditional Unix "readelf",
+// except that it can handle any type of object file recognized by lib/Object.
+//
+// It makes use of the generic ObjectFile interface.
+//
+// Caution: This utility is new, experimental, unsupported, and incomplete.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input object>"), cl::init(""));
+
+void DumpSymbolHeader() {
+ outs() << format(" %-32s", (const char*)"Name")
+ << format(" %-4s", (const char*)"Type")
+ << format(" %-16s", (const char*)"Address")
+ << format(" %-16s", (const char*)"Size")
+ << format(" %-16s", (const char*)"FileOffset")
+ << format(" %-26s", (const char*)"Flags")
+ << "\n";
+}
+
+const char *GetTypeStr(SymbolRef::Type Type) {
+ switch (Type) {
+ case SymbolRef::ST_Unknown: return "?";
+ case SymbolRef::ST_Data: return "DATA";
+ case SymbolRef::ST_Debug: return "DBG";
+ case SymbolRef::ST_File: return "FILE";
+ case SymbolRef::ST_Function: return "FUNC";
+ case SymbolRef::ST_Other: return "-";
+ }
+ return "INV";
+}
+
+std::string GetFlagStr(uint32_t Flags) {
+ std::string result;
+ if (Flags & SymbolRef::SF_Undefined)
+ result += "undef,";
+ if (Flags & SymbolRef::SF_Global)
+ result += "global,";
+ if (Flags & SymbolRef::SF_Weak)
+ result += "weak,";
+ if (Flags & SymbolRef::SF_Absolute)
+ result += "absolute,";
+ if (Flags & SymbolRef::SF_ThreadLocal)
+ result += "threadlocal,";
+ if (Flags & SymbolRef::SF_Common)
+ result += "common,";
+ if (Flags & SymbolRef::SF_FormatSpecific)
+ result += "formatspecific,";
+
+ // Remove trailing comma
+ if (result.size() > 0) {
+ result.erase(result.size() - 1);
+ }
+ return result;
+}
+
+void DumpSymbol(const SymbolRef &Sym, const ObjectFile *obj, bool IsDynamic) {
+ StringRef Name;
+ SymbolRef::Type Type;
+ uint32_t Flags;
+ uint64_t Address;
+ uint64_t Size;
+ uint64_t FileOffset;
+ Sym.getName(Name);
+ Sym.getAddress(Address);
+ Sym.getSize(Size);
+ Sym.getFileOffset(FileOffset);
+ Sym.getType(Type);
+ Sym.getFlags(Flags);
+ std::string FullName = Name;
+
+ // If this is a dynamic symbol from an ELF object, append
+ // the symbol's version to the name.
+ if (IsDynamic && obj->isELF()) {
+ StringRef Version;
+ bool IsDefault;
+ GetELFSymbolVersion(obj, Sym, Version, IsDefault);
+ if (!Version.empty()) {
+ FullName += (IsDefault ? "@@" : "@");
+ FullName += Version;
+ }
+ }
+
+ // format() can't handle StringRefs
+ outs() << format(" %-32s", FullName.c_str())
+ << format(" %-4s", GetTypeStr(Type))
+ << format(" %16" PRIx64, Address)
+ << format(" %16" PRIx64, Size)
+ << format(" %16" PRIx64, FileOffset)
+ << " " << GetFlagStr(Flags)
+ << "\n";
+}
+
+
+// Iterate through the normal symbols in the ObjectFile
+void DumpSymbols(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ outs() << "Symbols:\n";
+ symbol_iterator it = obj->begin_symbols();
+ symbol_iterator ie = obj->end_symbols();
+ while (it != ie) {
+ DumpSymbol(*it, obj, false);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Symbol iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+// Iterate through the dynamic symbols in the ObjectFile.
+void DumpDynamicSymbols(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ outs() << "Dynamic Symbols:\n";
+ symbol_iterator it = obj->begin_dynamic_symbols();
+ symbol_iterator ie = obj->end_dynamic_symbols();
+ while (it != ie) {
+ DumpSymbol(*it, obj, true);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Symbol iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+void DumpLibrary(const LibraryRef &lib) {
+ StringRef path;
+ lib.getPath(path);
+ outs() << " " << path << "\n";
+}
+
+// Iterate through needed libraries
+void DumpLibrariesNeeded(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ library_iterator it = obj->begin_libraries_needed();
+ library_iterator ie = obj->end_libraries_needed();
+ outs() << "Libraries needed:\n";
+ while (it != ie) {
+ DumpLibrary(*it);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Needed libraries iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+void DumpHeaders(const ObjectFile *obj) {
+ outs() << "File Format : " << obj->getFileFormatName() << "\n";
+ outs() << "Arch : "
+ << Triple::getArchTypeName((llvm::Triple::ArchType)obj->getArch())
+ << "\n";
+ outs() << "Address Size: " << (8*obj->getBytesInAddress()) << " bits\n";
+ outs() << "Load Name : " << obj->getLoadName() << "\n";
+ outs() << "\n";
+}
+
+int main(int argc, char** argv) {
+ error_code ec;
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ cl::ParseCommandLineOptions(argc, argv,
+ "LLVM Object Reader\n");
+
+ if (InputFilename.empty()) {
+ errs() << "Please specify an input filename\n";
+ return 1;
+ }
+
+ // Open the object file
+ OwningPtr<MemoryBuffer> File;
+ if (MemoryBuffer::getFile(InputFilename, File)) {
+ errs() << InputFilename << ": Open failed\n";
+ return 1;
+ }
+
+ ObjectFile *obj = ObjectFile::createObjectFile(File.take());
+ if (!obj) {
+ errs() << InputFilename << ": Object type not recognized\n";
+ }
+
+ DumpHeaders(obj);
+ DumpSymbols(obj);
+ DumpDynamicSymbols(obj);
+ DumpLibrariesNeeded(obj);
+ return 0;
+}
+
diff --git a/contrib/llvm/tools/llvm-rtdyld/CMakeLists.txt b/contrib/llvm/tools/llvm-rtdyld/CMakeLists.txt
deleted file mode 100644
index 17e2c3e..0000000
--- a/contrib/llvm/tools/llvm-rtdyld/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} support MC object RuntimeDyld JIT)
-
-add_llvm_tool(llvm-rtdyld
- llvm-rtdyld.cpp
- )
diff --git a/contrib/llvm/tools/llvm-rtdyld/Makefile b/contrib/llvm/tools/llvm-rtdyld/Makefile
deleted file mode 100644
index 0d57277..0000000
--- a/contrib/llvm/tools/llvm-rtdyld/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- tools/llvm-rtdyld/Makefile --------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-rtdyld
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
-LINK_COMPONENTS := $(TARGETS_TO_BUILD) support MC object RuntimeDyld JIT
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
diff --git a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index ec9d652..01a7d15 100644
--- a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -51,22 +51,30 @@ EntryPoint("entry",
class TrivialMemoryManager : public RTDyldMemoryManager {
public:
SmallVector<sys::MemoryBlock, 16> FunctionMemory;
+ SmallVector<sys::MemoryBlock, 16> DataMemory;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) {
+ return 0;
+ }
- uint8_t *startFunctionBody(const char *Name, uintptr_t &Size);
- void endFunctionBody(const char *Name, uint8_t *FunctionStart,
- uint8_t *FunctionEnd);
};
-uint8_t *TrivialMemoryManager::startFunctionBody(const char *Name,
- uintptr_t &Size) {
+uint8_t *TrivialMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
}
-void TrivialMemoryManager::endFunctionBody(const char *Name,
- uint8_t *FunctionStart,
- uint8_t *FunctionEnd) {
- uintptr_t Size = FunctionEnd - FunctionStart + 1;
- FunctionMemory.push_back(sys::MemoryBlock(FunctionStart, Size));
+uint8_t *TrivialMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
}
static const char *ProgramName;
@@ -142,10 +150,7 @@ int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv, "llvm MC-JIT tool\n");
switch (Action) {
- default:
case AC_Execute:
return executeInput();
}
-
- return 0;
}
diff --git a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
new file mode 100644
index 0000000..d284ea5
--- /dev/null
+++ b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -0,0 +1,702 @@
+//===-- llvm-stress.cpp - Generate random LL files to stress-test LLVM ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that generates random .ll files to stress-test
+// different components in LLVM.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Constants.h"
+#include "llvm/Instruction.h"
+#include "llvm/CallGraphSCCPass.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include <memory>
+#include <sstream>
+#include <set>
+#include <vector>
+#include <algorithm>
+using namespace llvm;
+
+static cl::opt<unsigned> SeedCL("seed",
+ cl::desc("Seed used for randomness"), cl::init(0));
+static cl::opt<unsigned> SizeCL("size",
+ cl::desc("The estimated size of the generated function (# of instrs)"),
+ cl::init(100));
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool> GenHalfFloat("generate-half-float",
+ cl::desc("Generate half-length floating-point values"), cl::init(false));
+static cl::opt<bool> GenX86FP80("generate-x86-fp80",
+ cl::desc("Generate 80-bit X86 floating-point values"), cl::init(false));
+static cl::opt<bool> GenFP128("generate-fp128",
+ cl::desc("Generate 128-bit floating-point values"), cl::init(false));
+static cl::opt<bool> GenPPCFP128("generate-ppc-fp128",
+ cl::desc("Generate 128-bit PPC floating-point values"), cl::init(false));
+static cl::opt<bool> GenX86MMX("generate-x86-mmx",
+ cl::desc("Generate X86 MMX floating-point values"), cl::init(false));
+
+/// A utility class to provide a pseudo-random number generator which is
+/// the same across all platforms. This is somewhat close to the libc
+/// implementation. Note: This is not a cryptographically secure pseudorandom
+/// number generator.
+class Random {
+public:
+ /// C'tor
+ Random(unsigned _seed):Seed(_seed) {}
+
+ /// Return a random integer, up to a
+ /// maximum of 2**19 - 1.
+ uint32_t Rand() {
+ uint32_t Val = Seed + 0x000b07a1;
+ Seed = (Val * 0x3c7c0ac1);
+ // Only lowest 19 bits are random-ish.
+ return Seed & 0x7ffff;
+ }
+
+ /// Return a random 32 bit integer.
+ uint32_t Rand32() {
+ uint32_t Val = Rand();
+ Val &= 0xffff;
+ return Val | (Rand() << 16);
+ }
+
+ /// Return a random 64 bit integer.
+ uint64_t Rand64() {
+ uint64_t Val = Rand32();
+ return Val | (uint64_t(Rand32()) << 32);
+ }
+private:
+ unsigned Seed;
+};
+
+/// Generate an empty function with a default argument list.
+Function *GenEmptyFunction(Module *M) {
+ // Type Definitions
+ std::vector<Type*> ArgsTy;
+ // Define a few arguments
+ LLVMContext &Context = M->getContext();
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt8Ty(Context), 0));
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt32Ty(Context), 0));
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt64Ty(Context), 0));
+ ArgsTy.push_back(IntegerType::getInt32Ty(Context));
+ ArgsTy.push_back(IntegerType::getInt64Ty(Context));
+ ArgsTy.push_back(IntegerType::getInt8Ty(Context));
+
+ FunctionType *FuncTy = FunctionType::get(Type::getVoidTy(Context), ArgsTy, 0);
+ // Pick a unique name to describe the input parameters
+ std::stringstream ss;
+ ss<<"autogen_SD"<<SeedCL;
+ Function *Func = Function::Create(FuncTy, GlobalValue::ExternalLinkage,
+ ss.str(), M);
+
+ Func->setCallingConv(CallingConv::C);
+ return Func;
+}
+
+/// A base class, implementing utilities needed for
+/// modifying and adding new random instructions.
+struct Modifier {
+ /// Used to store the randomly generated values.
+ typedef std::vector<Value*> PieceTable;
+
+public:
+ /// C'tor
+ Modifier(BasicBlock *Block, PieceTable *PT, Random *R):
+ BB(Block),PT(PT),Ran(R),Context(BB->getContext()) {}
+ /// Add a new instruction.
+ virtual void Act() = 0;
+ /// Add N new instructions,
+ virtual void ActN(unsigned n) {
+ for (unsigned i=0; i<n; ++i)
+ Act();
+ }
+
+protected:
+ /// Return a random value from the list of known values.
+ Value *getRandomVal() {
+ assert(PT->size());
+ return PT->at(Ran->Rand() % PT->size());
+ }
+
+ Constant *getRandomConstant(Type *Tp) {
+ if (Tp->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantInt::getAllOnesValue(Tp);
+ return ConstantInt::getNullValue(Tp);
+ } else if (Tp->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantFP::getAllOnesValue(Tp);
+ return ConstantFP::getNullValue(Tp);
+ }
+ return UndefValue::get(Tp);
+ }
+
+ /// Return a random value with a known type.
+ Value *getRandomValue(Type *Tp) {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType() == Tp)
+ return V;
+ }
+
+ // If the requested type was not found, generate a constant value.
+ if (Tp->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantInt::getAllOnesValue(Tp);
+ return ConstantInt::getNullValue(Tp);
+ } else if (Tp->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantFP::getAllOnesValue(Tp);
+ return ConstantFP::getNullValue(Tp);
+ } else if (Tp->isVectorTy()) {
+ VectorType *VTp = cast<VectorType>(Tp);
+
+ std::vector<Constant*> TempValues;
+ TempValues.reserve(VTp->getNumElements());
+ for (unsigned i = 0; i < VTp->getNumElements(); ++i)
+ TempValues.push_back(getRandomConstant(VTp->getScalarType()));
+
+ ArrayRef<Constant*> VectorValue(TempValues);
+ return ConstantVector::get(VectorValue);
+ }
+
+ return UndefValue::get(Tp);
+ }
+
+ /// Return a random value of any pointer type.
+ Value *getRandomPointerValue() {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType()->isPointerTy())
+ return V;
+ }
+ return UndefValue::get(pickPointerType());
+ }
+
+ /// Return a random value of any vector type.
+ Value *getRandomVectorValue() {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType()->isVectorTy())
+ return V;
+ }
+ return UndefValue::get(pickVectorType());
+ }
+
+ /// Pick a random type.
+ Type *pickType() {
+ return (Ran->Rand() & 1 ? pickVectorType() : pickScalarType());
+ }
+
+ /// Pick a random pointer type.
+ Type *pickPointerType() {
+ Type *Ty = pickType();
+ return PointerType::get(Ty, 0);
+ }
+
+ /// Pick a random vector type.
+ Type *pickVectorType(unsigned len = (unsigned)-1) {
+ // Pick a random vector width in the range 2**0 to 2**4.
+ // by adding two randoms we are generating a normal-like distribution
+ // around 2**3.
+ unsigned width = 1<<((Ran->Rand() % 3) + (Ran->Rand() % 3));
+ Type *Ty;
+
+ // Vectors of x86mmx are illegal; keep trying till we get something else.
+ do {
+ Ty = pickScalarType();
+ } while (Ty->isX86_MMXTy());
+
+ if (len != (unsigned)-1)
+ width = len;
+ return VectorType::get(Ty, width);
+ }
+
+ /// Pick a random scalar type.
+ Type *pickScalarType() {
+ Type *t = 0;
+ do {
+ switch (Ran->Rand() % 30) {
+ case 0: t = Type::getInt1Ty(Context); break;
+ case 1: t = Type::getInt8Ty(Context); break;
+ case 2: t = Type::getInt16Ty(Context); break;
+ case 3: case 4:
+ case 5: t = Type::getFloatTy(Context); break;
+ case 6: case 7:
+ case 8: t = Type::getDoubleTy(Context); break;
+ case 9: case 10:
+ case 11: t = Type::getInt32Ty(Context); break;
+ case 12: case 13:
+ case 14: t = Type::getInt64Ty(Context); break;
+ case 15: case 16:
+ case 17: if (GenHalfFloat) t = Type::getHalfTy(Context); break;
+ case 18: case 19:
+ case 20: if (GenX86FP80) t = Type::getX86_FP80Ty(Context); break;
+ case 21: case 22:
+ case 23: if (GenFP128) t = Type::getFP128Ty(Context); break;
+ case 24: case 25:
+ case 26: if (GenPPCFP128) t = Type::getPPC_FP128Ty(Context); break;
+ case 27: case 28:
+ case 29: if (GenX86MMX) t = Type::getX86_MMXTy(Context); break;
+ default: llvm_unreachable("Invalid scalar value");
+ }
+ } while (t == 0);
+
+ return t;
+ }
+
+ /// Basic block to populate
+ BasicBlock *BB;
+ /// Value table
+ PieceTable *PT;
+ /// Random number generator
+ Random *Ran;
+ /// Context
+ LLVMContext &Context;
+};
+
+struct LoadModifier: public Modifier {
+ LoadModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ // Try to use predefined pointers. If non exist, use undef pointer value;
+ Value *Ptr = getRandomPointerValue();
+ Value *V = new LoadInst(Ptr, "L", BB->getTerminator());
+ PT->push_back(V);
+ }
+};
+
+struct StoreModifier: public Modifier {
+ StoreModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ // Try to use predefined pointers. If non exist, use undef pointer value;
+ Value *Ptr = getRandomPointerValue();
+ Type *Tp = Ptr->getType();
+ Value *Val = getRandomValue(Tp->getContainedType(0));
+ Type *ValTy = Val->getType();
+
+ // Do not store vectors of i1s because they are unsupported
+ // by the codegen.
+ if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() == 1)
+ return;
+
+ new StoreInst(Val, Ptr, BB->getTerminator());
+ }
+};
+
+struct BinModifier: public Modifier {
+ BinModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ // Don't handle pointer types.
+ if (Val0->getType()->isPointerTy() ||
+ Val1->getType()->isPointerTy())
+ return;
+
+ // Don't handle i1 types.
+ if (Val0->getType()->getScalarSizeInBits() == 1)
+ return;
+
+
+ bool isFloat = Val0->getType()->getScalarType()->isFloatingPointTy();
+ Instruction* Term = BB->getTerminator();
+ unsigned R = Ran->Rand() % (isFloat ? 7 : 13);
+ Instruction::BinaryOps Op;
+
+ switch (R) {
+ default: llvm_unreachable("Invalid BinOp");
+ case 0:{Op = (isFloat?Instruction::FAdd : Instruction::Add); break; }
+ case 1:{Op = (isFloat?Instruction::FSub : Instruction::Sub); break; }
+ case 2:{Op = (isFloat?Instruction::FMul : Instruction::Mul); break; }
+ case 3:{Op = (isFloat?Instruction::FDiv : Instruction::SDiv); break; }
+ case 4:{Op = (isFloat?Instruction::FDiv : Instruction::UDiv); break; }
+ case 5:{Op = (isFloat?Instruction::FRem : Instruction::SRem); break; }
+ case 6:{Op = (isFloat?Instruction::FRem : Instruction::URem); break; }
+ case 7: {Op = Instruction::Shl; break; }
+ case 8: {Op = Instruction::LShr; break; }
+ case 9: {Op = Instruction::AShr; break; }
+ case 10:{Op = Instruction::And; break; }
+ case 11:{Op = Instruction::Or; break; }
+ case 12:{Op = Instruction::Xor; break; }
+ }
+
+ PT->push_back(BinaryOperator::Create(Op, Val0, Val1, "B", Term));
+ }
+};
+
+/// Generate constant values.
+struct ConstModifier: public Modifier {
+ ConstModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ Type *Ty = pickType();
+
+ if (Ty->isVectorTy()) {
+ switch (Ran->Rand() % 2) {
+ case 0: if (Ty->getScalarType()->isIntegerTy())
+ return PT->push_back(ConstantVector::getAllOnesValue(Ty));
+ case 1: if (Ty->getScalarType()->isIntegerTy())
+ return PT->push_back(ConstantVector::getNullValue(Ty));
+ }
+ }
+
+ if (Ty->isFloatingPointTy()) {
+ // Generate 128 random bits, the size of the (currently)
+ // largest floating-point types.
+ uint64_t RandomBits[2];
+ for (unsigned i = 0; i < 2; ++i)
+ RandomBits[i] = Ran->Rand64();
+
+ APInt RandomInt(Ty->getPrimitiveSizeInBits(), makeArrayRef(RandomBits));
+
+ bool isIEEE = !Ty->isX86_FP80Ty() && !Ty->isPPC_FP128Ty();
+ APFloat RandomFloat(RandomInt, isIEEE);
+
+ if (Ran->Rand() & 1)
+ return PT->push_back(ConstantFP::getNullValue(Ty));
+ return PT->push_back(ConstantFP::get(Ty->getContext(), RandomFloat));
+ }
+
+ if (Ty->isIntegerTy()) {
+ switch (Ran->Rand() % 7) {
+ case 0: if (Ty->isIntegerTy())
+ return PT->push_back(ConstantInt::get(Ty,
+ APInt::getAllOnesValue(Ty->getPrimitiveSizeInBits())));
+ case 1: if (Ty->isIntegerTy())
+ return PT->push_back(ConstantInt::get(Ty,
+ APInt::getNullValue(Ty->getPrimitiveSizeInBits())));
+ case 2: case 3: case 4: case 5:
+ case 6: if (Ty->isIntegerTy())
+ PT->push_back(ConstantInt::get(Ty, Ran->Rand()));
+ }
+ }
+
+ }
+};
+
+struct AllocaModifier: public Modifier {
+ AllocaModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R){}
+
+ virtual void Act() {
+ Type *Tp = pickType();
+ PT->push_back(new AllocaInst(Tp, "A", BB->getFirstNonPHI()));
+ }
+};
+
+struct ExtractElementModifier: public Modifier {
+ ExtractElementModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVectorValue();
+ Value *V = ExtractElementInst::Create(Val0,
+ ConstantInt::get(Type::getInt32Ty(BB->getContext()),
+ Ran->Rand() % cast<VectorType>(Val0->getType())->getNumElements()),
+ "E", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+struct ShuffModifier: public Modifier {
+ ShuffModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *Val0 = getRandomVectorValue();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ unsigned Width = cast<VectorType>(Val0->getType())->getNumElements();
+ std::vector<Constant*> Idxs;
+
+ Type *I32 = Type::getInt32Ty(BB->getContext());
+ for (unsigned i=0; i<Width; ++i) {
+ Constant *CI = ConstantInt::get(I32, Ran->Rand() % (Width*2));
+ // Pick some undef values.
+ if (!(Ran->Rand() % 5))
+ CI = UndefValue::get(I32);
+ Idxs.push_back(CI);
+ }
+
+ Constant *Mask = ConstantVector::get(Idxs);
+
+ Value *V = new ShuffleVectorInst(Val0, Val1, Mask, "Shuff",
+ BB->getTerminator());
+ PT->push_back(V);
+ }
+};
+
+struct InsertElementModifier: public Modifier {
+ InsertElementModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVectorValue();
+ Value *Val1 = getRandomValue(Val0->getType()->getScalarType());
+
+ Value *V = InsertElementInst::Create(Val0, Val1,
+ ConstantInt::get(Type::getInt32Ty(BB->getContext()),
+ Ran->Rand() % cast<VectorType>(Val0->getType())->getNumElements()),
+ "I", BB->getTerminator());
+ return PT->push_back(V);
+ }
+
+};
+
+struct CastModifier: public Modifier {
+ CastModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *V = getRandomVal();
+ Type *VTy = V->getType();
+ Type *DestTy = pickScalarType();
+
+ // Handle vector casts vectors.
+ if (VTy->isVectorTy()) {
+ VectorType *VecTy = cast<VectorType>(VTy);
+ DestTy = pickVectorType(VecTy->getNumElements());
+ }
+
+ // no need to casr.
+ if (VTy == DestTy) return;
+
+ // Pointers:
+ if (VTy->isPointerTy()) {
+ if (!DestTy->isPointerTy())
+ DestTy = PointerType::get(DestTy, 0);
+ return PT->push_back(
+ new BitCastInst(V, DestTy, "PC", BB->getTerminator()));
+ }
+
+ // Generate lots of bitcasts.
+ if ((Ran->Rand() & 1) &&
+ VTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new BitCastInst(V, DestTy, "BC", BB->getTerminator()));
+ }
+
+ // Both types are integers:
+ if (VTy->getScalarType()->isIntegerTy() &&
+ DestTy->getScalarType()->isIntegerTy()) {
+ if (VTy->getScalarType()->getPrimitiveSizeInBits() >
+ DestTy->getScalarType()->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new TruncInst(V, DestTy, "Tr", BB->getTerminator()));
+ } else {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new ZExtInst(V, DestTy, "ZE", BB->getTerminator()));
+ return PT->push_back(new SExtInst(V, DestTy, "Se", BB->getTerminator()));
+ }
+ }
+
+ // Fp to int.
+ if (VTy->getScalarType()->isFloatingPointTy() &&
+ DestTy->getScalarType()->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new FPToSIInst(V, DestTy, "FC", BB->getTerminator()));
+ return PT->push_back(new FPToUIInst(V, DestTy, "FC", BB->getTerminator()));
+ }
+
+ // Int to fp.
+ if (VTy->getScalarType()->isIntegerTy() &&
+ DestTy->getScalarType()->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new SIToFPInst(V, DestTy, "FC", BB->getTerminator()));
+ return PT->push_back(new UIToFPInst(V, DestTy, "FC", BB->getTerminator()));
+
+ }
+
+ // Both floats.
+ if (VTy->getScalarType()->isFloatingPointTy() &&
+ DestTy->getScalarType()->isFloatingPointTy()) {
+ if (VTy->getScalarType()->getPrimitiveSizeInBits() >
+ DestTy->getScalarType()->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new FPTruncInst(V, DestTy, "Tr", BB->getTerminator()));
+ } else {
+ return PT->push_back(
+ new FPExtInst(V, DestTy, "ZE", BB->getTerminator()));
+ }
+ }
+ }
+
+};
+
+struct SelectModifier: public Modifier {
+ SelectModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ // Try a bunch of different select configuration until a valid one is found.
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ Type *CondTy = Type::getInt1Ty(Context);
+
+ // If the value type is a vector, and we allow vector select, then in 50%
+ // of the cases generate a vector select.
+ if (Val0->getType()->isVectorTy() && (Ran->Rand() % 1)) {
+ unsigned NumElem = cast<VectorType>(Val0->getType())->getNumElements();
+ CondTy = VectorType::get(CondTy, NumElem);
+ }
+
+ Value *Cond = getRandomValue(CondTy);
+ Value *V = SelectInst::Create(Cond, Val0, Val1, "Sl", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+
+struct CmpModifier: public Modifier {
+ CmpModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ if (Val0->getType()->isPointerTy()) return;
+ bool fp = Val0->getType()->getScalarType()->isFloatingPointTy();
+
+ int op;
+ if (fp) {
+ op = Ran->Rand() %
+ (CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE) +
+ CmpInst::FIRST_FCMP_PREDICATE;
+ } else {
+ op = Ran->Rand() %
+ (CmpInst::LAST_ICMP_PREDICATE - CmpInst::FIRST_ICMP_PREDICATE) +
+ CmpInst::FIRST_ICMP_PREDICATE;
+ }
+
+ Value *V = CmpInst::Create(fp ? Instruction::FCmp : Instruction::ICmp,
+ op, Val0, Val1, "Cmp", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+void FillFunction(Function *F) {
+ // Create a legal entry block.
+ BasicBlock *BB = BasicBlock::Create(F->getContext(), "BB", F);
+ ReturnInst::Create(F->getContext(), BB);
+
+ // Create the value table.
+ Modifier::PieceTable PT;
+ // Pick an initial seed value
+ Random R(SeedCL);
+
+ // Consider arguments as legal values.
+ for (Function::arg_iterator it = F->arg_begin(), e = F->arg_end();
+ it != e; ++it)
+ PT.push_back(it);
+
+ // List of modifiers which add new random instructions.
+ std::vector<Modifier*> Modifiers;
+ std::auto_ptr<Modifier> LM(new LoadModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SM(new StoreModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> EE(new ExtractElementModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SHM(new ShuffModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> IE(new InsertElementModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> BM(new BinModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> CM(new CastModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SLM(new SelectModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> PM(new CmpModifier(BB, &PT, &R));
+ Modifiers.push_back(LM.get());
+ Modifiers.push_back(SM.get());
+ Modifiers.push_back(EE.get());
+ Modifiers.push_back(SHM.get());
+ Modifiers.push_back(IE.get());
+ Modifiers.push_back(BM.get());
+ Modifiers.push_back(CM.get());
+ Modifiers.push_back(SLM.get());
+ Modifiers.push_back(PM.get());
+
+ // Generate the random instructions
+ AllocaModifier AM(BB, &PT, &R); AM.ActN(5); // Throw in a few allocas
+ ConstModifier COM(BB, &PT, &R); COM.ActN(40); // Throw in a few constants
+
+ for (unsigned i=0; i< SizeCL / Modifiers.size(); ++i)
+ for (std::vector<Modifier*>::iterator it = Modifiers.begin(),
+ e = Modifiers.end(); it != e; ++it) {
+ (*it)->Act();
+ }
+
+ SM->ActN(5); // Throw in a few stores.
+}
+
+void IntroduceControlFlow(Function *F) {
+ std::set<Instruction*> BoolInst;
+ for (BasicBlock::iterator it = F->begin()->begin(),
+ e = F->begin()->end(); it != e; ++it) {
+ if (it->getType() == IntegerType::getInt1Ty(F->getContext()))
+ BoolInst.insert(it);
+ }
+
+ for (std::set<Instruction*>::iterator it = BoolInst.begin(),
+ e = BoolInst.end(); it != e; ++it) {
+ Instruction *Instr = *it;
+ BasicBlock *Curr = Instr->getParent();
+ BasicBlock::iterator Loc= Instr;
+ BasicBlock *Next = Curr->splitBasicBlock(Loc, "CF");
+ Instr->moveBefore(Curr->getTerminator());
+ if (Curr != &F->getEntryBlock()) {
+ BranchInst::Create(Curr, Next, Instr, Curr->getTerminator());
+ Curr->getTerminator()->eraseFromParent();
+ }
+ }
+}
+
+int main(int argc, char **argv) {
+ // Init LLVM, call llvm_shutdown() on exit, parse args, etc.
+ llvm::PrettyStackTraceProgram X(argc, argv);
+ cl::ParseCommandLineOptions(argc, argv, "llvm codegen stress-tester\n");
+ llvm_shutdown_obj Y;
+
+ std::auto_ptr<Module> M(new Module("/tmp/autogen.bc", getGlobalContext()));
+ Function *F = GenEmptyFunction(M.get());
+ FillFunction(F);
+ IntroduceControlFlow(F);
+
+ // Figure out what stream we are supposed to write to...
+ OwningPtr<tool_output_file> Out;
+ // Default to standard output.
+ if (OutputFilename.empty())
+ OutputFilename = "-";
+
+ std::string ErrorInfo;
+ Out.reset(new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+
+ PassManager Passes;
+ Passes.add(createVerifierPass());
+ Passes.add(createPrintModulePass(&Out->os()));
+ Passes.run(*M.get());
+ Out->keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-stub/CMakeLists.txt b/contrib/llvm/tools/llvm-stub/CMakeLists.txt
deleted file mode 100644
index a98dc9e..0000000
--- a/contrib/llvm/tools/llvm-stub/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_llvm_tool(llvm-stub
- llvm-stub.c
- )
diff --git a/contrib/llvm/tools/llvm-stub/Makefile b/contrib/llvm/tools/llvm-stub/Makefile
deleted file mode 100644
index 7ffe149..0000000
--- a/contrib/llvm/tools/llvm-stub/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- tools/llvm-stub/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-stub
-include $(LEVEL)/Makefile.common
-
diff --git a/contrib/llvm/tools/macho-dump/CMakeLists.txt b/contrib/llvm/tools/macho-dump/CMakeLists.txt
deleted file mode 100644
index d55e1d5..0000000
--- a/contrib/llvm/tools/macho-dump/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} support object)
-
-add_llvm_tool(macho-dump
- macho-dump.cpp
- )
diff --git a/contrib/llvm/tools/macho-dump/Makefile b/contrib/llvm/tools/macho-dump/Makefile
deleted file mode 100644
index 638015e..0000000
--- a/contrib/llvm/tools/macho-dump/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- tools/macho-dump/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = macho-dump
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
-LINK_COMPONENTS := support object
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
diff --git a/contrib/llvm/tools/opt/CMakeLists.txt b/contrib/llvm/tools/opt/CMakeLists.txt
deleted file mode 100644
index 0570d0e..0000000
--- a/contrib/llvm/tools/opt/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-set(LLVM_LINK_COMPONENTS bitreader asmparser bitwriter instrumentation scalaropts ipo)
-
-add_llvm_tool(opt
- AnalysisWrappers.cpp
- GraphPrinters.cpp
- PrintSCC.cpp
- opt.cpp
- )
diff --git a/contrib/llvm/tools/opt/Makefile b/contrib/llvm/tools/opt/Makefile
deleted file mode 100644
index 726cad8..0000000
--- a/contrib/llvm/tools/opt/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-##===- tools/opt/Makefile ----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../..
-TOOLNAME = opt
-
-LINK_COMPONENTS := bitreader bitwriter asmparser instrumentation scalaropts ipo
-
-include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/opt/PrintSCC.cpp b/contrib/llvm/tools/opt/PrintSCC.cpp
index 533f49e..11efdcd 100644
--- a/contrib/llvm/tools/opt/PrintSCC.cpp
+++ b/contrib/llvm/tools/opt/PrintSCC.cpp
@@ -101,8 +101,8 @@ bool CallGraphSCC::runOnModule(Module &M) {
errs() << "\nSCC #" << ++sccNum << " : ";
for (std::vector<CallGraphNode*>::const_iterator I = nextSCC.begin(),
E = nextSCC.end(); I != E; ++I)
- errs() << ((*I)->getFunction() ? (*I)->getFunction()->getNameStr()
- : std::string("external node")) << ", ";
+ errs() << ((*I)->getFunction() ? (*I)->getFunction()->getName()
+ : "external node") << ", ";
if (nextSCC.size() == 1 && SCCI.hasLoop())
errs() << " (Has self-loop).";
}
diff --git a/contrib/llvm/tools/opt/opt.cpp b/contrib/llvm/tools/opt/opt.cpp
index ffd2c21..30da863 100644
--- a/contrib/llvm/tools/opt/opt.cpp
+++ b/contrib/llvm/tools/opt/opt.cpp
@@ -291,8 +291,8 @@ struct RegionPassPrinter : public RegionPass {
virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
if (!Quiet) {
Out << "Printing analysis '" << PassToPrint->getPassName() << "' for "
- << "region: '" << R->getNameStr() << "' in function '"
- << R->getEntry()->getParent()->getNameStr() << "':\n";
+ << "region: '" << R->getNameStr() << "' in function '"
+ << R->getEntry()->getParent()->getName() << "':\n";
}
// Get and print pass...
getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
@@ -407,6 +407,8 @@ static inline void addPass(PassManagerBase &PM, Pass *P) {
/// OptLevel - Optimization Level
static void AddOptimizationPasses(PassManagerBase &MPM,FunctionPassManager &FPM,
unsigned OptLevel) {
+ FPM.add(createVerifierPass()); // Verify that input is correct
+
PassManagerBuilder Builder;
Builder.OptLevel = OptLevel;
@@ -478,6 +480,7 @@ int main(int argc, char **argv) {
PassRegistry &Registry = *PassRegistry::getPassRegistry();
initializeCore(Registry);
initializeScalarOpts(Registry);
+ initializeVectorization(Registry);
initializeIPO(Registry);
initializeAnalysis(Registry);
initializeIPA(Registry);
@@ -505,7 +508,7 @@ int main(int argc, char **argv) {
M.reset(ParseIRFile(InputFilename, Err, Context));
if (M.get() == 0) {
- Err.Print(argv[0], errs());
+ Err.print(argv[0], errs());
return 1;
}
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
deleted file mode 100644
index 145b96d..0000000
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
+++ /dev/null
@@ -1,1790 +0,0 @@
-//===------------ ARMDecoderEmitter.cpp - Decoder Generator ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is part of the ARM Disassembler.
-// It contains the tablegen backend that emits the decoder functions for ARM and
-// Thumb. The disassembler core includes the auto-generated file, invokes the
-// decoder functions, and builds up the MCInst based on the decoded Opcode.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "arm-decoder-emitter"
-
-#include "ARMDecoderEmitter.h"
-#include "CodeGenTarget.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/TableGen/Record.h"
-
-#include <vector>
-#include <map>
-#include <string>
-
-using namespace llvm;
-
-/////////////////////////////////////////////////////
-// //
-// Enums and Utilities for ARM Instruction Format //
-// //
-/////////////////////////////////////////////////////
-
-#define ARM_FORMATS \
- ENTRY(ARM_FORMAT_PSEUDO, 0) \
- ENTRY(ARM_FORMAT_MULFRM, 1) \
- ENTRY(ARM_FORMAT_BRFRM, 2) \
- ENTRY(ARM_FORMAT_BRMISCFRM, 3) \
- ENTRY(ARM_FORMAT_DPFRM, 4) \
- ENTRY(ARM_FORMAT_DPSOREGREGFRM, 5) \
- ENTRY(ARM_FORMAT_LDFRM, 6) \
- ENTRY(ARM_FORMAT_STFRM, 7) \
- ENTRY(ARM_FORMAT_LDMISCFRM, 8) \
- ENTRY(ARM_FORMAT_STMISCFRM, 9) \
- ENTRY(ARM_FORMAT_LDSTMULFRM, 10) \
- ENTRY(ARM_FORMAT_LDSTEXFRM, 11) \
- ENTRY(ARM_FORMAT_ARITHMISCFRM, 12) \
- ENTRY(ARM_FORMAT_SATFRM, 13) \
- ENTRY(ARM_FORMAT_EXTFRM, 14) \
- ENTRY(ARM_FORMAT_VFPUNARYFRM, 15) \
- ENTRY(ARM_FORMAT_VFPBINARYFRM, 16) \
- ENTRY(ARM_FORMAT_VFPCONV1FRM, 17) \
- ENTRY(ARM_FORMAT_VFPCONV2FRM, 18) \
- ENTRY(ARM_FORMAT_VFPCONV3FRM, 19) \
- ENTRY(ARM_FORMAT_VFPCONV4FRM, 20) \
- ENTRY(ARM_FORMAT_VFPCONV5FRM, 21) \
- ENTRY(ARM_FORMAT_VFPLDSTFRM, 22) \
- ENTRY(ARM_FORMAT_VFPLDSTMULFRM, 23) \
- ENTRY(ARM_FORMAT_VFPMISCFRM, 24) \
- ENTRY(ARM_FORMAT_THUMBFRM, 25) \
- ENTRY(ARM_FORMAT_MISCFRM, 26) \
- ENTRY(ARM_FORMAT_NEONGETLNFRM, 27) \
- ENTRY(ARM_FORMAT_NEONSETLNFRM, 28) \
- ENTRY(ARM_FORMAT_NEONDUPFRM, 29) \
- ENTRY(ARM_FORMAT_NLdSt, 30) \
- ENTRY(ARM_FORMAT_N1RegModImm, 31) \
- ENTRY(ARM_FORMAT_N2Reg, 32) \
- ENTRY(ARM_FORMAT_NVCVT, 33) \
- ENTRY(ARM_FORMAT_NVecDupLn, 34) \
- ENTRY(ARM_FORMAT_N2RegVecShL, 35) \
- ENTRY(ARM_FORMAT_N2RegVecShR, 36) \
- ENTRY(ARM_FORMAT_N3Reg, 37) \
- ENTRY(ARM_FORMAT_N3RegVecSh, 38) \
- ENTRY(ARM_FORMAT_NVecExtract, 39) \
- ENTRY(ARM_FORMAT_NVecMulScalar, 40) \
- ENTRY(ARM_FORMAT_NVTBL, 41) \
- ENTRY(ARM_FORMAT_DPSOREGIMMFRM, 42)
-
-// ARM instruction format specifies the encoding used by the instruction.
-#define ENTRY(n, v) n = v,
-typedef enum {
- ARM_FORMATS
- ARM_FORMAT_NA
-} ARMFormat;
-#undef ENTRY
-
-// Converts enum to const char*.
-static const char *stringForARMFormat(ARMFormat form) {
-#define ENTRY(n, v) case n: return #n;
- switch(form) {
- ARM_FORMATS
- case ARM_FORMAT_NA:
- default:
- return "";
- }
-#undef ENTRY
-}
-
-enum {
- IndexModeNone = 0,
- IndexModePre = 1,
- IndexModePost = 2,
- IndexModeUpd = 3
-};
-
-/////////////////////////
-// //
-// Utility functions //
-// //
-/////////////////////////
-
-/// byteFromBitsInit - Return the byte value from a BitsInit.
-/// Called from getByteField().
-static uint8_t byteFromBitsInit(BitsInit &init) {
- int width = init.getNumBits();
-
- assert(width <= 8 && "Field is too large for uint8_t!");
-
- int index;
- uint8_t mask = 0x01;
-
- uint8_t ret = 0;
-
- for (index = 0; index < width; index++) {
- if (static_cast<BitInit*>(init.getBit(index))->getValue())
- ret |= mask;
-
- mask <<= 1;
- }
-
- return ret;
-}
-
-static uint8_t getByteField(const Record &def, const char *str) {
- BitsInit *bits = def.getValueAsBitsInit(str);
- return byteFromBitsInit(*bits);
-}
-
-static BitsInit &getBitsField(const Record &def, const char *str) {
- BitsInit *bits = def.getValueAsBitsInit(str);
- return *bits;
-}
-
-/// sameStringExceptSuffix - Return true if the two strings differ only in RHS's
-/// suffix. ("VST4d8", "VST4d8_UPD", "_UPD") as input returns true.
-static
-bool sameStringExceptSuffix(const StringRef LHS, const StringRef RHS,
- const StringRef Suffix) {
-
- if (RHS.startswith(LHS) && RHS.endswith(Suffix))
- return RHS.size() == LHS.size() + Suffix.size();
-
- return false;
-}
-
-/// thumbInstruction - Determine whether we have a Thumb instruction.
-/// See also ARMInstrFormats.td.
-static bool thumbInstruction(uint8_t Form) {
- return Form == ARM_FORMAT_THUMBFRM;
-}
-
-// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
-// for a bit value.
-//
-// BIT_UNFILTERED is used as the init value for a filter position. It is used
-// only for filter processings.
-typedef enum {
- BIT_TRUE, // '1'
- BIT_FALSE, // '0'
- BIT_UNSET, // '?'
- BIT_UNFILTERED // unfiltered
-} bit_value_t;
-
-static bool ValueSet(bit_value_t V) {
- return (V == BIT_TRUE || V == BIT_FALSE);
-}
-static bool ValueNotSet(bit_value_t V) {
- return (V == BIT_UNSET);
-}
-static int Value(bit_value_t V) {
- return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
-}
-static bit_value_t bitFromBits(BitsInit &bits, unsigned index) {
- if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
- return bit->getValue() ? BIT_TRUE : BIT_FALSE;
-
- // The bit is uninitialized.
- return BIT_UNSET;
-}
-// Prints the bit value for each position.
-static void dumpBits(raw_ostream &o, BitsInit &bits) {
- unsigned index;
-
- for (index = bits.getNumBits(); index > 0; index--) {
- switch (bitFromBits(bits, index - 1)) {
- case BIT_TRUE:
- o << "1";
- break;
- case BIT_FALSE:
- o << "0";
- break;
- case BIT_UNSET:
- o << "_";
- break;
- default:
- assert(0 && "unexpected return value from bitFromBits");
- }
- }
-}
-
-// Enums for the available target names.
-typedef enum {
- TARGET_ARM = 0,
- TARGET_THUMB
-} TARGET_NAME_t;
-
-// FIXME: Possibly auto-detected?
-#define BIT_WIDTH 32
-
-// Forward declaration.
-class ARMFilterChooser;
-
-// Representation of the instruction to work on.
-typedef bit_value_t insn_t[BIT_WIDTH];
-
-/// Filter - Filter works with FilterChooser to produce the decoding tree for
-/// the ISA.
-///
-/// It is useful to think of a Filter as governing the switch stmts of the
-/// decoding tree in a certain level. Each case stmt delegates to an inferior
-/// FilterChooser to decide what further decoding logic to employ, or in another
-/// words, what other remaining bits to look at. The FilterChooser eventually
-/// chooses a best Filter to do its job.
-///
-/// This recursive scheme ends when the number of Opcodes assigned to the
-/// FilterChooser becomes 1 or if there is a conflict. A conflict happens when
-/// the Filter/FilterChooser combo does not know how to distinguish among the
-/// Opcodes assigned.
-///
-/// An example of a conflict is
-///
-/// Conflict:
-/// 111101000.00........00010000....
-/// 111101000.00........0001........
-/// 1111010...00........0001........
-/// 1111010...00....................
-/// 1111010.........................
-/// 1111............................
-/// ................................
-/// VST4q8a 111101000_00________00010000____
-/// VST4q8b 111101000_00________00010000____
-///
-/// The Debug output shows the path that the decoding tree follows to reach the
-/// the conclusion that there is a conflict. VST4q8a is a vst4 to double-spaced
-/// even registers, while VST4q8b is a vst4 to double-spaced odd regsisters.
-///
-/// The encoding info in the .td files does not specify this meta information,
-/// which could have been used by the decoder to resolve the conflict. The
-/// decoder could try to decode the even/odd register numbering and assign to
-/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
-/// version and return the Opcode since the two have the same Asm format string.
-class ARMFilter {
-protected:
- ARMFilterChooser *Owner; // points to the FilterChooser who owns this filter
- unsigned StartBit; // the starting bit position
- unsigned NumBits; // number of bits to filter
- bool Mixed; // a mixed region contains both set and unset bits
-
- // Map of well-known segment value to the set of uid's with that value.
- std::map<uint64_t, std::vector<unsigned> > FilteredInstructions;
-
- // Set of uid's with non-constant segment values.
- std::vector<unsigned> VariableInstructions;
-
- // Map of well-known segment value to its delegate.
- std::map<unsigned, ARMFilterChooser*> FilterChooserMap;
-
- // Number of instructions which fall under FilteredInstructions category.
- unsigned NumFiltered;
-
- // Keeps track of the last opcode in the filtered bucket.
- unsigned LastOpcFiltered;
-
- // Number of instructions which fall under VariableInstructions category.
- unsigned NumVariable;
-
-public:
- unsigned getNumFiltered() { return NumFiltered; }
- unsigned getNumVariable() { return NumVariable; }
- unsigned getSingletonOpc() {
- assert(NumFiltered == 1);
- return LastOpcFiltered;
- }
- // Return the filter chooser for the group of instructions without constant
- // segment values.
- ARMFilterChooser &getVariableFC() {
- assert(NumFiltered == 1);
- assert(FilterChooserMap.size() == 1);
- return *(FilterChooserMap.find((unsigned)-1)->second);
- }
-
- ARMFilter(const ARMFilter &f);
- ARMFilter(ARMFilterChooser &owner, unsigned startBit, unsigned numBits,
- bool mixed);
-
- ~ARMFilter();
-
- // Divides the decoding task into sub tasks and delegates them to the
- // inferior FilterChooser's.
- //
- // A special case arises when there's only one entry in the filtered
- // instructions. In order to unambiguously decode the singleton, we need to
- // match the remaining undecoded encoding bits against the singleton.
- void recurse();
-
- // Emit code to decode instructions given a segment or segments of bits.
- void emit(raw_ostream &o, unsigned &Indentation);
-
- // Returns the number of fanout produced by the filter. More fanout implies
- // the filter distinguishes more categories of instructions.
- unsigned usefulness() const;
-}; // End of class Filter
-
-// These are states of our finite state machines used in FilterChooser's
-// filterProcessor() which produces the filter candidates to use.
-typedef enum {
- ATTR_NONE,
- ATTR_FILTERED,
- ATTR_ALL_SET,
- ATTR_ALL_UNSET,
- ATTR_MIXED
-} bitAttr_t;
-
-/// ARMFilterChooser - FilterChooser chooses the best filter among a set of Filters
-/// in order to perform the decoding of instructions at the current level.
-///
-/// Decoding proceeds from the top down. Based on the well-known encoding bits
-/// of instructions available, FilterChooser builds up the possible Filters that
-/// can further the task of decoding by distinguishing among the remaining
-/// candidate instructions.
-///
-/// Once a filter has been chosen, it is called upon to divide the decoding task
-/// into sub-tasks and delegates them to its inferior FilterChoosers for further
-/// processings.
-///
-/// It is useful to think of a Filter as governing the switch stmts of the
-/// decoding tree. And each case is delegated to an inferior FilterChooser to
-/// decide what further remaining bits to look at.
-class ARMFilterChooser {
- static TARGET_NAME_t TargetName;
-
-protected:
- friend class ARMFilter;
-
- // Vector of codegen instructions to choose our filter.
- const std::vector<const CodeGenInstruction*> &AllInstructions;
-
- // Vector of uid's for this filter chooser to work on.
- const std::vector<unsigned> Opcodes;
-
- // Vector of candidate filters.
- std::vector<ARMFilter> Filters;
-
- // Array of bit values passed down from our parent.
- // Set to all BIT_UNFILTERED's for Parent == NULL.
- bit_value_t FilterBitValues[BIT_WIDTH];
-
- // Links to the FilterChooser above us in the decoding tree.
- ARMFilterChooser *Parent;
-
- // Index of the best filter from Filters.
- int BestIndex;
-
-public:
- static void setTargetName(TARGET_NAME_t tn) { TargetName = tn; }
-
- ARMFilterChooser(const ARMFilterChooser &FC) :
- AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
- Filters(FC.Filters), Parent(FC.Parent), BestIndex(FC.BestIndex) {
- memcpy(FilterBitValues, FC.FilterBitValues, sizeof(FilterBitValues));
- }
-
- ARMFilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
- const std::vector<unsigned> &IDs) :
- AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(NULL),
- BestIndex(-1) {
- for (unsigned i = 0; i < BIT_WIDTH; ++i)
- FilterBitValues[i] = BIT_UNFILTERED;
-
- doFilter();
- }
-
- ARMFilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
- const std::vector<unsigned> &IDs,
- bit_value_t (&ParentFilterBitValues)[BIT_WIDTH],
- ARMFilterChooser &parent) :
- AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(&parent),
- BestIndex(-1) {
- for (unsigned i = 0; i < BIT_WIDTH; ++i)
- FilterBitValues[i] = ParentFilterBitValues[i];
-
- doFilter();
- }
-
- // The top level filter chooser has NULL as its parent.
- bool isTopLevel() { return Parent == NULL; }
-
- // This provides an opportunity for target specific code emission.
- void emitTopHook(raw_ostream &o);
-
- // Emit the top level typedef and decodeInstruction() function.
- void emitTop(raw_ostream &o, unsigned &Indentation);
-
- // This provides an opportunity for target specific code emission after
- // emitTop().
- void emitBot(raw_ostream &o, unsigned &Indentation);
-
-protected:
- // Populates the insn given the uid.
- void insnWithID(insn_t &Insn, unsigned Opcode) const {
- if (AllInstructions[Opcode]->isPseudo)
- return;
-
- BitsInit &Bits = getBitsField(*AllInstructions[Opcode]->TheDef, "Inst");
-
- for (unsigned i = 0; i < BIT_WIDTH; ++i)
- Insn[i] = bitFromBits(Bits, i);
-
- // Set Inst{21} to 1 (wback) when IndexModeBits == IndexModeUpd.
- Record *R = AllInstructions[Opcode]->TheDef;
- if (R->getValue("IndexModeBits") &&
- getByteField(*R, "IndexModeBits") == IndexModeUpd)
- Insn[21] = BIT_TRUE;
- }
-
- // Returns the record name.
- const std::string &nameWithID(unsigned Opcode) const {
- return AllInstructions[Opcode]->TheDef->getName();
- }
-
- // Populates the field of the insn given the start position and the number of
- // consecutive bits to scan for.
- //
- // Returns false if there exists any uninitialized bit value in the range.
- // Returns true, otherwise.
- bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
- unsigned NumBits) const;
-
- /// dumpFilterArray - dumpFilterArray prints out debugging info for the given
- /// filter array as a series of chars.
- void dumpFilterArray(raw_ostream &o, bit_value_t (&filter)[BIT_WIDTH]);
-
- /// dumpStack - dumpStack traverses the filter chooser chain and calls
- /// dumpFilterArray on each filter chooser up to the top level one.
- void dumpStack(raw_ostream &o, const char *prefix);
-
- ARMFilter &bestFilter() {
- assert(BestIndex != -1 && "BestIndex not set");
- return Filters[BestIndex];
- }
-
- // Called from Filter::recurse() when singleton exists. For debug purpose.
- void SingletonExists(unsigned Opc);
-
- bool PositionFiltered(unsigned i) {
- return ValueSet(FilterBitValues[i]);
- }
-
- // Calculates the island(s) needed to decode the instruction.
- // This returns a lit of undecoded bits of an instructions, for example,
- // Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
- // decoded bits in order to verify that the instruction matches the Opcode.
- unsigned getIslands(std::vector<unsigned> &StartBits,
- std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
- insn_t &Insn);
-
- // The purpose of this function is for the API client to detect possible
- // Load/Store Coprocessor instructions. If the coprocessor number is of
- // the instruction is either 10 or 11, the decoder should not report the
- // instruction as LDC/LDC2/STC/STC2, but should match against Advanced SIMD or
- // VFP instructions.
- bool LdStCopEncoding1(unsigned Opc) {
- const std::string &Name = nameWithID(Opc);
- if (Name == "LDC_OFFSET" || Name == "LDC_OPTION" ||
- Name == "LDC_POST" || Name == "LDC_PRE" ||
- Name == "LDCL_OFFSET" || Name == "LDCL_OPTION" ||
- Name == "LDCL_POST" || Name == "LDCL_PRE" ||
- Name == "STC_OFFSET" || Name == "STC_OPTION" ||
- Name == "STC_POST" || Name == "STC_PRE" ||
- Name == "STCL_OFFSET" || Name == "STCL_OPTION" ||
- Name == "STCL_POST" || Name == "STCL_PRE")
- return true;
- else
- return false;
- }
-
- // Emits code to decode the singleton. Return true if we have matched all the
- // well-known bits.
- bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,unsigned Opc);
-
- // Emits code to decode the singleton, and then to decode the rest.
- void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- ARMFilter &Best);
-
- // Assign a single filter and run with it.
- void runSingleFilter(ARMFilterChooser &owner, unsigned startBit,
- unsigned numBit, bool mixed);
-
- // reportRegion is a helper function for filterProcessor to mark a region as
- // eligible for use as a filter region.
- void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
- bool AllowMixed);
-
- // FilterProcessor scans the well-known encoding bits of the instructions and
- // builds up a list of candidate filters. It chooses the best filter and
- // recursively descends down the decoding tree.
- bool filterProcessor(bool AllowMixed, bool Greedy = true);
-
- // Decides on the best configuration of filter(s) to use in order to decode
- // the instructions. A conflict of instructions may occur, in which case we
- // dump the conflict set to the standard error.
- void doFilter();
-
- // Emits code to decode our share of instructions. Returns true if the
- // emitted code causes a return, which occurs if we know how to decode
- // the instruction at this level or the instruction is not decodeable.
- bool emit(raw_ostream &o, unsigned &Indentation);
-};
-
-///////////////////////////
-// //
-// Filter Implmenetation //
-// //
-///////////////////////////
-
-ARMFilter::ARMFilter(const ARMFilter &f) :
- Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
- FilteredInstructions(f.FilteredInstructions),
- VariableInstructions(f.VariableInstructions),
- FilterChooserMap(f.FilterChooserMap), NumFiltered(f.NumFiltered),
- LastOpcFiltered(f.LastOpcFiltered), NumVariable(f.NumVariable) {
-}
-
-ARMFilter::ARMFilter(ARMFilterChooser &owner, unsigned startBit, unsigned numBits,
- bool mixed) : Owner(&owner), StartBit(startBit), NumBits(numBits),
- Mixed(mixed) {
- assert(StartBit + NumBits - 1 < BIT_WIDTH);
-
- NumFiltered = 0;
- LastOpcFiltered = 0;
- NumVariable = 0;
-
- for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
- insn_t Insn;
-
- // Populates the insn given the uid.
- Owner->insnWithID(Insn, Owner->Opcodes[i]);
-
- uint64_t Field;
- // Scans the segment for possibly well-specified encoding bits.
- bool ok = Owner->fieldFromInsn(Field, Insn, StartBit, NumBits);
-
- if (ok) {
- // The encoding bits are well-known. Lets add the uid of the
- // instruction into the bucket keyed off the constant field value.
- LastOpcFiltered = Owner->Opcodes[i];
- FilteredInstructions[Field].push_back(LastOpcFiltered);
- ++NumFiltered;
- } else {
- // Some of the encoding bit(s) are unspecfied. This contributes to
- // one additional member of "Variable" instructions.
- VariableInstructions.push_back(Owner->Opcodes[i]);
- ++NumVariable;
- }
- }
-
- assert((FilteredInstructions.size() + VariableInstructions.size() > 0)
- && "Filter returns no instruction categories");
-}
-
-ARMFilter::~ARMFilter() {
- std::map<unsigned, ARMFilterChooser*>::iterator filterIterator;
- for (filterIterator = FilterChooserMap.begin();
- filterIterator != FilterChooserMap.end();
- filterIterator++) {
- delete filterIterator->second;
- }
-}
-
-// Divides the decoding task into sub tasks and delegates them to the
-// inferior FilterChooser's.
-//
-// A special case arises when there's only one entry in the filtered
-// instructions. In order to unambiguously decode the singleton, we need to
-// match the remaining undecoded encoding bits against the singleton.
-void ARMFilter::recurse() {
- std::map<uint64_t, std::vector<unsigned> >::const_iterator mapIterator;
-
- bit_value_t BitValueArray[BIT_WIDTH];
- // Starts by inheriting our parent filter chooser's filter bit values.
- memcpy(BitValueArray, Owner->FilterBitValues, sizeof(BitValueArray));
-
- unsigned bitIndex;
-
- if (VariableInstructions.size()) {
- // Conservatively marks each segment position as BIT_UNSET.
- for (bitIndex = 0; bitIndex < NumBits; bitIndex++)
- BitValueArray[StartBit + bitIndex] = BIT_UNSET;
-
- // Delegates to an inferior filter chooser for further processing on this
- // group of instructions whose segment values are variable.
- FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>(
- (unsigned)-1,
- new ARMFilterChooser(Owner->AllInstructions,
- VariableInstructions,
- BitValueArray,
- *Owner)
- ));
- }
-
- // No need to recurse for a singleton filtered instruction.
- // See also Filter::emit().
- if (getNumFiltered() == 1) {
- //Owner->SingletonExists(LastOpcFiltered);
- assert(FilterChooserMap.size() == 1);
- return;
- }
-
- // Otherwise, create sub choosers.
- for (mapIterator = FilteredInstructions.begin();
- mapIterator != FilteredInstructions.end();
- mapIterator++) {
-
- // Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
- for (bitIndex = 0; bitIndex < NumBits; bitIndex++) {
- if (mapIterator->first & (1ULL << bitIndex))
- BitValueArray[StartBit + bitIndex] = BIT_TRUE;
- else
- BitValueArray[StartBit + bitIndex] = BIT_FALSE;
- }
-
- // Delegates to an inferior filter chooser for further processing on this
- // category of instructions.
- FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>(
- mapIterator->first,
- new ARMFilterChooser(Owner->AllInstructions,
- mapIterator->second,
- BitValueArray,
- *Owner)
- ));
- }
-}
-
-// Emit code to decode instructions given a segment or segments of bits.
-void ARMFilter::emit(raw_ostream &o, unsigned &Indentation) {
- o.indent(Indentation) << "// Check Inst{";
-
- if (NumBits > 1)
- o << (StartBit + NumBits - 1) << '-';
-
- o << StartBit << "} ...\n";
-
- o.indent(Indentation) << "switch (fieldFromInstruction(insn, "
- << StartBit << ", " << NumBits << ")) {\n";
-
- std::map<unsigned, ARMFilterChooser*>::iterator filterIterator;
-
- bool DefaultCase = false;
- for (filterIterator = FilterChooserMap.begin();
- filterIterator != FilterChooserMap.end();
- filterIterator++) {
-
- // Field value -1 implies a non-empty set of variable instructions.
- // See also recurse().
- if (filterIterator->first == (unsigned)-1) {
- DefaultCase = true;
-
- o.indent(Indentation) << "default:\n";
- o.indent(Indentation) << " break; // fallthrough\n";
-
- // Closing curly brace for the switch statement.
- // This is unconventional because we want the default processing to be
- // performed for the fallthrough cases as well, i.e., when the "cases"
- // did not prove a decoded instruction.
- o.indent(Indentation) << "}\n";
-
- } else
- o.indent(Indentation) << "case " << filterIterator->first << ":\n";
-
- // We arrive at a category of instructions with the same segment value.
- // Now delegate to the sub filter chooser for further decodings.
- // The case may fallthrough, which happens if the remaining well-known
- // encoding bits do not match exactly.
- if (!DefaultCase) { ++Indentation; ++Indentation; }
-
- bool finished = filterIterator->second->emit(o, Indentation);
- // For top level default case, there's no need for a break statement.
- if (Owner->isTopLevel() && DefaultCase)
- break;
- if (!finished)
- o.indent(Indentation) << "break;\n";
-
- if (!DefaultCase) { --Indentation; --Indentation; }
- }
-
- // If there is no default case, we still need to supply a closing brace.
- if (!DefaultCase) {
- // Closing curly brace for the switch statement.
- o.indent(Indentation) << "}\n";
- }
-}
-
-// Returns the number of fanout produced by the filter. More fanout implies
-// the filter distinguishes more categories of instructions.
-unsigned ARMFilter::usefulness() const {
- if (VariableInstructions.size())
- return FilteredInstructions.size();
- else
- return FilteredInstructions.size() + 1;
-}
-
-//////////////////////////////////
-// //
-// Filterchooser Implementation //
-// //
-//////////////////////////////////
-
-// Define the symbol here.
-TARGET_NAME_t ARMFilterChooser::TargetName;
-
-// This provides an opportunity for target specific code emission.
-void ARMFilterChooser::emitTopHook(raw_ostream &o) {
- if (TargetName == TARGET_ARM) {
- // Emit code that references the ARMFormat data type.
- o << "static const ARMFormat ARMFormats[] = {\n";
- for (unsigned i = 0, e = AllInstructions.size(); i != e; ++i) {
- const Record &Def = *(AllInstructions[i]->TheDef);
- const std::string &Name = Def.getName();
- if (Def.isSubClassOf("InstARM") || Def.isSubClassOf("InstThumb"))
- o.indent(2) <<
- stringForARMFormat((ARMFormat)getByteField(Def, "Form"));
- else
- o << " ARM_FORMAT_NA";
-
- o << ",\t// Inst #" << i << " = " << Name << '\n';
- }
- o << " ARM_FORMAT_NA\t// Unreachable.\n";
- o << "};\n\n";
- }
-}
-
-// Emit the top level typedef and decodeInstruction() function.
-void ARMFilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
- // Run the target specific emit hook.
- emitTopHook(o);
-
- switch (BIT_WIDTH) {
- case 8:
- o.indent(Indentation) << "typedef uint8_t field_t;\n";
- break;
- case 16:
- o.indent(Indentation) << "typedef uint16_t field_t;\n";
- break;
- case 32:
- o.indent(Indentation) << "typedef uint32_t field_t;\n";
- break;
- case 64:
- o.indent(Indentation) << "typedef uint64_t field_t;\n";
- break;
- default:
- assert(0 && "Unexpected instruction size!");
- }
-
- o << '\n';
-
- o.indent(Indentation) << "static field_t " <<
- "fieldFromInstruction(field_t insn, unsigned startBit, unsigned numBits)\n";
-
- o.indent(Indentation) << "{\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "assert(startBit + numBits <= " << BIT_WIDTH
- << " && \"Instruction field out of bounds!\");\n";
- o << '\n';
- o.indent(Indentation) << "field_t fieldMask;\n";
- o << '\n';
- o.indent(Indentation) << "if (numBits == " << BIT_WIDTH << ")\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "fieldMask = (field_t)-1;\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "else\n";
-
- ++Indentation; ++Indentation;
- o.indent(Indentation) << "fieldMask = ((1 << numBits) - 1) << startBit;\n";
- --Indentation; --Indentation;
-
- o << '\n';
- o.indent(Indentation) << "return (insn & fieldMask) >> startBit;\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "}\n";
-
- o << '\n';
-
- o.indent(Indentation) <<"static uint16_t decodeInstruction(field_t insn) {\n";
-
- ++Indentation; ++Indentation;
- // Emits code to decode the instructions.
- emit(o, Indentation);
-
- o << '\n';
- o.indent(Indentation) << "return 0;\n";
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "}\n";
-
- o << '\n';
-}
-
-// This provides an opportunity for target specific code emission after
-// emitTop().
-void ARMFilterChooser::emitBot(raw_ostream &o, unsigned &Indentation) {
- if (TargetName != TARGET_THUMB) return;
-
- // Emit code that decodes the Thumb ISA.
- o.indent(Indentation)
- << "static uint16_t decodeThumbInstruction(field_t insn) {\n";
-
- ++Indentation; ++Indentation;
-
- // Emits code to decode the instructions.
- emit(o, Indentation);
-
- o << '\n';
- o.indent(Indentation) << "return 0;\n";
-
- --Indentation; --Indentation;
-
- o.indent(Indentation) << "}\n";
-}
-
-// Populates the field of the insn given the start position and the number of
-// consecutive bits to scan for.
-//
-// Returns false if and on the first uninitialized bit value encountered.
-// Returns true, otherwise.
-bool ARMFilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
- unsigned StartBit, unsigned NumBits) const {
- Field = 0;
-
- for (unsigned i = 0; i < NumBits; ++i) {
- if (Insn[StartBit + i] == BIT_UNSET)
- return false;
-
- if (Insn[StartBit + i] == BIT_TRUE)
- Field = Field | (1ULL << i);
- }
-
- return true;
-}
-
-/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
-/// filter array as a series of chars.
-void ARMFilterChooser::dumpFilterArray(raw_ostream &o,
- bit_value_t (&filter)[BIT_WIDTH]) {
- unsigned bitIndex;
-
- for (bitIndex = BIT_WIDTH; bitIndex > 0; bitIndex--) {
- switch (filter[bitIndex - 1]) {
- case BIT_UNFILTERED:
- o << ".";
- break;
- case BIT_UNSET:
- o << "_";
- break;
- case BIT_TRUE:
- o << "1";
- break;
- case BIT_FALSE:
- o << "0";
- break;
- }
- }
-}
-
-/// dumpStack - dumpStack traverses the filter chooser chain and calls
-/// dumpFilterArray on each filter chooser up to the top level one.
-void ARMFilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
- ARMFilterChooser *current = this;
-
- while (current) {
- o << prefix;
- dumpFilterArray(o, current->FilterBitValues);
- o << '\n';
- current = current->Parent;
- }
-}
-
-// Called from Filter::recurse() when singleton exists. For debug purpose.
-void ARMFilterChooser::SingletonExists(unsigned Opc) {
- insn_t Insn0;
- insnWithID(Insn0, Opc);
-
- errs() << "Singleton exists: " << nameWithID(Opc)
- << " with its decoding dominating ";
- for (unsigned i = 0; i < Opcodes.size(); ++i) {
- if (Opcodes[i] == Opc) continue;
- errs() << nameWithID(Opcodes[i]) << ' ';
- }
- errs() << '\n';
-
- dumpStack(errs(), "\t\t");
- for (unsigned i = 0; i < Opcodes.size(); i++) {
- const std::string &Name = nameWithID(Opcodes[i]);
-
- errs() << '\t' << Name << " ";
- dumpBits(errs(),
- getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
- errs() << '\n';
- }
-}
-
-// Calculates the island(s) needed to decode the instruction.
-// This returns a list of undecoded bits of an instructions, for example,
-// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
-// decoded bits in order to verify that the instruction matches the Opcode.
-unsigned ARMFilterChooser::getIslands(std::vector<unsigned> &StartBits,
- std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
- insn_t &Insn) {
- unsigned Num, BitNo;
- Num = BitNo = 0;
-
- uint64_t FieldVal = 0;
-
- // 0: Init
- // 1: Water (the bit value does not affect decoding)
- // 2: Island (well-known bit value needed for decoding)
- int State = 0;
- int Val = -1;
-
- for (unsigned i = 0; i < BIT_WIDTH; ++i) {
- Val = Value(Insn[i]);
- bool Filtered = PositionFiltered(i);
- switch (State) {
- default:
- assert(0 && "Unreachable code!");
- break;
- case 0:
- case 1:
- if (Filtered || Val == -1)
- State = 1; // Still in Water
- else {
- State = 2; // Into the Island
- BitNo = 0;
- StartBits.push_back(i);
- FieldVal = Val;
- }
- break;
- case 2:
- if (Filtered || Val == -1) {
- State = 1; // Into the Water
- EndBits.push_back(i - 1);
- FieldVals.push_back(FieldVal);
- ++Num;
- } else {
- State = 2; // Still in Island
- ++BitNo;
- FieldVal = FieldVal | Val << BitNo;
- }
- break;
- }
- }
- // If we are still in Island after the loop, do some housekeeping.
- if (State == 2) {
- EndBits.push_back(BIT_WIDTH - 1);
- FieldVals.push_back(FieldVal);
- ++Num;
- }
-
- assert(StartBits.size() == Num && EndBits.size() == Num &&
- FieldVals.size() == Num);
- return Num;
-}
-
-// Emits code to decode the singleton. Return true if we have matched all the
-// well-known bits.
-bool ARMFilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- unsigned Opc) {
- std::vector<unsigned> StartBits;
- std::vector<unsigned> EndBits;
- std::vector<uint64_t> FieldVals;
- insn_t Insn;
- insnWithID(Insn, Opc);
-
- // This provides a good opportunity to check for possible Ld/St Coprocessor
- // Opcode and escapes if the coproc # is either 10 or 11. It is a NEON/VFP
- // instruction is disguise.
- if (TargetName == TARGET_ARM && LdStCopEncoding1(Opc)) {
- o.indent(Indentation);
- // A8.6.51 & A8.6.188
- // If coproc = 0b101?, i.e, slice(insn, 11, 8) = 10 or 11, escape.
- o << "if (fieldFromInstruction(insn, 9, 3) == 5) break; // fallthrough\n";
- }
-
- // Look for islands of undecoded bits of the singleton.
- getIslands(StartBits, EndBits, FieldVals, Insn);
-
- unsigned Size = StartBits.size();
- unsigned I, NumBits;
-
- // If we have matched all the well-known bits, just issue a return.
- if (Size == 0) {
- o.indent(Indentation) << "return " << Opc << "; // " << nameWithID(Opc)
- << '\n';
- return true;
- }
-
- // Otherwise, there are more decodings to be done!
-
- // Emit code to match the island(s) for the singleton.
- o.indent(Indentation) << "// Check ";
-
- for (I = Size; I != 0; --I) {
- o << "Inst{" << EndBits[I-1] << '-' << StartBits[I-1] << "} ";
- if (I > 1)
- o << "&& ";
- else
- o << "for singleton decoding...\n";
- }
-
- o.indent(Indentation) << "if (";
-
- for (I = Size; I != 0; --I) {
- NumBits = EndBits[I-1] - StartBits[I-1] + 1;
- o << "fieldFromInstruction(insn, " << StartBits[I-1] << ", " << NumBits
- << ") == " << FieldVals[I-1];
- if (I > 1)
- o << " && ";
- else
- o << ")\n";
- }
-
- o.indent(Indentation) << " return " << Opc << "; // " << nameWithID(Opc)
- << '\n';
-
- return false;
-}
-
-// Emits code to decode the singleton, and then to decode the rest.
-void ARMFilterChooser::emitSingletonDecoder(raw_ostream &o,
- unsigned &Indentation,
- ARMFilter &Best) {
-
- unsigned Opc = Best.getSingletonOpc();
-
- emitSingletonDecoder(o, Indentation, Opc);
-
- // Emit code for the rest.
- o.indent(Indentation) << "else\n";
-
- Indentation += 2;
- Best.getVariableFC().emit(o, Indentation);
- Indentation -= 2;
-}
-
-// Assign a single filter and run with it. Top level API client can initialize
-// with a single filter to start the filtering process.
-void ARMFilterChooser::runSingleFilter(ARMFilterChooser &owner,
- unsigned startBit,
- unsigned numBit, bool mixed) {
- Filters.clear();
- ARMFilter F(*this, startBit, numBit, true);
- Filters.push_back(F);
- BestIndex = 0; // Sole Filter instance to choose from.
- bestFilter().recurse();
-}
-
-// reportRegion is a helper function for filterProcessor to mark a region as
-// eligible for use as a filter region.
-void ARMFilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
- unsigned BitIndex, bool AllowMixed) {
- if (RA == ATTR_MIXED && AllowMixed)
- Filters.push_back(ARMFilter(*this, StartBit, BitIndex - StartBit, true));
- else if (RA == ATTR_ALL_SET && !AllowMixed)
- Filters.push_back(ARMFilter(*this, StartBit, BitIndex - StartBit, false));
-}
-
-// FilterProcessor scans the well-known encoding bits of the instructions and
-// builds up a list of candidate filters. It chooses the best filter and
-// recursively descends down the decoding tree.
-bool ARMFilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
- Filters.clear();
- BestIndex = -1;
- unsigned numInstructions = Opcodes.size();
-
- assert(numInstructions && "Filter created with no instructions");
-
- // No further filtering is necessary.
- if (numInstructions == 1)
- return true;
-
- // Heuristics. See also doFilter()'s "Heuristics" comment when num of
- // instructions is 3.
- if (AllowMixed && !Greedy) {
- assert(numInstructions == 3);
-
- for (unsigned i = 0; i < Opcodes.size(); ++i) {
- std::vector<unsigned> StartBits;
- std::vector<unsigned> EndBits;
- std::vector<uint64_t> FieldVals;
- insn_t Insn;
-
- insnWithID(Insn, Opcodes[i]);
-
- // Look for islands of undecoded bits of any instruction.
- if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
- // Found an instruction with island(s). Now just assign a filter.
- runSingleFilter(*this, StartBits[0], EndBits[0] - StartBits[0] + 1,
- true);
- return true;
- }
- }
- }
-
- unsigned BitIndex, InsnIndex;
-
- // We maintain BIT_WIDTH copies of the bitAttrs automaton.
- // The automaton consumes the corresponding bit from each
- // instruction.
- //
- // Input symbols: 0, 1, and _ (unset).
- // States: NONE, FILTERED, ALL_SET, ALL_UNSET, and MIXED.
- // Initial state: NONE.
- //
- // (NONE) ------- [01] -> (ALL_SET)
- // (NONE) ------- _ ----> (ALL_UNSET)
- // (ALL_SET) ---- [01] -> (ALL_SET)
- // (ALL_SET) ---- _ ----> (MIXED)
- // (ALL_UNSET) -- [01] -> (MIXED)
- // (ALL_UNSET) -- _ ----> (ALL_UNSET)
- // (MIXED) ------ . ----> (MIXED)
- // (FILTERED)---- . ----> (FILTERED)
-
- bitAttr_t bitAttrs[BIT_WIDTH];
-
- // FILTERED bit positions provide no entropy and are not worthy of pursuing.
- // Filter::recurse() set either BIT_TRUE or BIT_FALSE for each position.
- for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex)
- if (FilterBitValues[BitIndex] == BIT_TRUE ||
- FilterBitValues[BitIndex] == BIT_FALSE)
- bitAttrs[BitIndex] = ATTR_FILTERED;
- else
- bitAttrs[BitIndex] = ATTR_NONE;
-
- for (InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
- insn_t insn;
-
- insnWithID(insn, Opcodes[InsnIndex]);
-
- for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex) {
- switch (bitAttrs[BitIndex]) {
- case ATTR_NONE:
- if (insn[BitIndex] == BIT_UNSET)
- bitAttrs[BitIndex] = ATTR_ALL_UNSET;
- else
- bitAttrs[BitIndex] = ATTR_ALL_SET;
- break;
- case ATTR_ALL_SET:
- if (insn[BitIndex] == BIT_UNSET)
- bitAttrs[BitIndex] = ATTR_MIXED;
- break;
- case ATTR_ALL_UNSET:
- if (insn[BitIndex] != BIT_UNSET)
- bitAttrs[BitIndex] = ATTR_MIXED;
- break;
- case ATTR_MIXED:
- case ATTR_FILTERED:
- break;
- }
- }
- }
-
- // The regionAttr automaton consumes the bitAttrs automatons' state,
- // lowest-to-highest.
- //
- // Input symbols: F(iltered), (all_)S(et), (all_)U(nset), M(ixed)
- // States: NONE, ALL_SET, MIXED
- // Initial state: NONE
- //
- // (NONE) ----- F --> (NONE)
- // (NONE) ----- S --> (ALL_SET) ; and set region start
- // (NONE) ----- U --> (NONE)
- // (NONE) ----- M --> (MIXED) ; and set region start
- // (ALL_SET) -- F --> (NONE) ; and report an ALL_SET region
- // (ALL_SET) -- S --> (ALL_SET)
- // (ALL_SET) -- U --> (NONE) ; and report an ALL_SET region
- // (ALL_SET) -- M --> (MIXED) ; and report an ALL_SET region
- // (MIXED) ---- F --> (NONE) ; and report a MIXED region
- // (MIXED) ---- S --> (ALL_SET) ; and report a MIXED region
- // (MIXED) ---- U --> (NONE) ; and report a MIXED region
- // (MIXED) ---- M --> (MIXED)
-
- bitAttr_t RA = ATTR_NONE;
- unsigned StartBit = 0;
-
- for (BitIndex = 0; BitIndex < BIT_WIDTH; BitIndex++) {
- bitAttr_t bitAttr = bitAttrs[BitIndex];
-
- assert(bitAttr != ATTR_NONE && "Bit without attributes");
-
- switch (RA) {
- case ATTR_NONE:
- switch (bitAttr) {
- case ATTR_FILTERED:
- break;
- case ATTR_ALL_SET:
- StartBit = BitIndex;
- RA = ATTR_ALL_SET;
- break;
- case ATTR_ALL_UNSET:
- break;
- case ATTR_MIXED:
- StartBit = BitIndex;
- RA = ATTR_MIXED;
- break;
- default:
- assert(0 && "Unexpected bitAttr!");
- }
- break;
- case ATTR_ALL_SET:
- switch (bitAttr) {
- case ATTR_FILTERED:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- RA = ATTR_NONE;
- break;
- case ATTR_ALL_SET:
- break;
- case ATTR_ALL_UNSET:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- RA = ATTR_NONE;
- break;
- case ATTR_MIXED:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- StartBit = BitIndex;
- RA = ATTR_MIXED;
- break;
- default:
- assert(0 && "Unexpected bitAttr!");
- }
- break;
- case ATTR_MIXED:
- switch (bitAttr) {
- case ATTR_FILTERED:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- StartBit = BitIndex;
- RA = ATTR_NONE;
- break;
- case ATTR_ALL_SET:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- StartBit = BitIndex;
- RA = ATTR_ALL_SET;
- break;
- case ATTR_ALL_UNSET:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- RA = ATTR_NONE;
- break;
- case ATTR_MIXED:
- break;
- default:
- assert(0 && "Unexpected bitAttr!");
- }
- break;
- case ATTR_ALL_UNSET:
- assert(0 && "regionAttr state machine has no ATTR_UNSET state");
- case ATTR_FILTERED:
- assert(0 && "regionAttr state machine has no ATTR_FILTERED state");
- }
- }
-
- // At the end, if we're still in ALL_SET or MIXED states, report a region
- switch (RA) {
- case ATTR_NONE:
- break;
- case ATTR_FILTERED:
- break;
- case ATTR_ALL_SET:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- break;
- case ATTR_ALL_UNSET:
- break;
- case ATTR_MIXED:
- reportRegion(RA, StartBit, BitIndex, AllowMixed);
- break;
- }
-
- // We have finished with the filter processings. Now it's time to choose
- // the best performing filter.
- BestIndex = 0;
- bool AllUseless = true;
- unsigned BestScore = 0;
-
- for (unsigned i = 0, e = Filters.size(); i != e; ++i) {
- unsigned Usefulness = Filters[i].usefulness();
-
- if (Usefulness)
- AllUseless = false;
-
- if (Usefulness > BestScore) {
- BestIndex = i;
- BestScore = Usefulness;
- }
- }
-
- if (!AllUseless)
- bestFilter().recurse();
-
- return !AllUseless;
-} // end of FilterChooser::filterProcessor(bool)
-
-// Decides on the best configuration of filter(s) to use in order to decode
-// the instructions. A conflict of instructions may occur, in which case we
-// dump the conflict set to the standard error.
-void ARMFilterChooser::doFilter() {
- unsigned Num = Opcodes.size();
- assert(Num && "FilterChooser created with no instructions");
-
- // Heuristics: Use Inst{31-28} as the top level filter for ARM ISA.
- if (TargetName == TARGET_ARM && Parent == NULL) {
- runSingleFilter(*this, 28, 4, false);
- return;
- }
-
- // Try regions of consecutive known bit values first.
- if (filterProcessor(false))
- return;
-
- // Then regions of mixed bits (both known and unitialized bit values allowed).
- if (filterProcessor(true))
- return;
-
- // Heuristics to cope with conflict set {t2CMPrs, t2SUBSrr, t2SUBSrs} where
- // no single instruction for the maximum ATTR_MIXED region Inst{14-4} has a
- // well-known encoding pattern. In such case, we backtrack and scan for the
- // the very first consecutive ATTR_ALL_SET region and assign a filter to it.
- if (Num == 3 && filterProcessor(true, false))
- return;
-
- // If we come to here, the instruction decoding has failed.
- // Set the BestIndex to -1 to indicate so.
- BestIndex = -1;
-}
-
-// Emits code to decode our share of instructions. Returns true if the
-// emitted code causes a return, which occurs if we know how to decode
-// the instruction at this level or the instruction is not decodeable.
-bool ARMFilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
- if (Opcodes.size() == 1)
- // There is only one instruction in the set, which is great!
- // Call emitSingletonDecoder() to see whether there are any remaining
- // encodings bits.
- return emitSingletonDecoder(o, Indentation, Opcodes[0]);
-
- // Choose the best filter to do the decodings!
- if (BestIndex != -1) {
- ARMFilter &Best = bestFilter();
- if (Best.getNumFiltered() == 1)
- emitSingletonDecoder(o, Indentation, Best);
- else
- bestFilter().emit(o, Indentation);
- return false;
- }
-
- // If we reach here, there is a conflict in decoding. Let's resolve the known
- // conflicts!
- if ((TargetName == TARGET_ARM || TargetName == TARGET_THUMB) &&
- Opcodes.size() == 2) {
- // Resolve the known conflict sets:
- //
- // 1. source registers are identical => VMOVDneon; otherwise => VORRd
- // 2. source registers are identical => VMOVQ; otherwise => VORRq
- // 3. LDR, LDRcp => return LDR for now.
- // FIXME: How can we distinguish between LDR and LDRcp? Do we need to?
- // 4. tLDMIA, tLDMIA_UPD => Rn = Inst{10-8}, reglist = Inst{7-0},
- // wback = registers<Rn> = 0
- // NOTE: (tLDM, tLDM_UPD) resolution must come before Advanced SIMD
- // addressing mode resolution!!!
- // 5. VLD[234]LN*/VST[234]LN* vs. VLD[234]LN*_UPD/VST[234]LN*_UPD conflicts
- // are resolved returning the non-UPD versions of the instructions if the
- // Rm field, i.e., Inst{3-0} is 0b1111. This is specified in A7.7.1
- // Advanced SIMD addressing mode.
- const std::string &name1 = nameWithID(Opcodes[0]);
- const std::string &name2 = nameWithID(Opcodes[1]);
- if ((name1 == "VMOVDneon" && name2 == "VORRd") ||
- (name1 == "VMOVQ" && name2 == "VORRq")) {
- // Inserting the opening curly brace for this case block.
- --Indentation; --Indentation;
- o.indent(Indentation) << "{\n";
- ++Indentation; ++Indentation;
-
- o.indent(Indentation)
- << "field_t N = fieldFromInstruction(insn, 7, 1), "
- << "M = fieldFromInstruction(insn, 5, 1);\n";
- o.indent(Indentation)
- << "field_t Vn = fieldFromInstruction(insn, 16, 4), "
- << "Vm = fieldFromInstruction(insn, 0, 4);\n";
- o.indent(Indentation)
- << "return (N == M && Vn == Vm) ? "
- << Opcodes[0] << " /* " << name1 << " */ : "
- << Opcodes[1] << " /* " << name2 << " */ ;\n";
-
- // Inserting the closing curly brace for this case block.
- --Indentation; --Indentation;
- o.indent(Indentation) << "}\n";
- ++Indentation; ++Indentation;
-
- return true;
- }
- if (name1 == "LDR" && name2 == "LDRcp") {
- o.indent(Indentation)
- << "return " << Opcodes[0]
- << "; // Returning LDR for {LDR, LDRcp}\n";
- return true;
- }
- if (name1 == "tLDMIA" && name2 == "tLDMIA_UPD") {
- // Inserting the opening curly brace for this case block.
- --Indentation; --Indentation;
- o.indent(Indentation) << "{\n";
- ++Indentation; ++Indentation;
-
- o.indent(Indentation)
- << "unsigned Rn = fieldFromInstruction(insn, 8, 3), "
- << "list = fieldFromInstruction(insn, 0, 8);\n";
- o.indent(Indentation)
- << "return ((list >> Rn) & 1) == 0 ? "
- << Opcodes[1] << " /* " << name2 << " */ : "
- << Opcodes[0] << " /* " << name1 << " */ ;\n";
-
- // Inserting the closing curly brace for this case block.
- --Indentation; --Indentation;
- o.indent(Indentation) << "}\n";
- ++Indentation; ++Indentation;
-
- return true;
- }
- if (sameStringExceptSuffix(name1, name2, "_UPD")) {
- o.indent(Indentation)
- << "return fieldFromInstruction(insn, 0, 4) == 15 ? " << Opcodes[0]
- << " /* " << name1 << " */ : " << Opcodes[1] << "/* " << name2
- << " */ ; // Advanced SIMD addressing mode\n";
- return true;
- }
-
- // Otherwise, it does not belong to the known conflict sets.
- }
-
- // We don't know how to decode these instructions! Return 0 and dump the
- // conflict set!
- o.indent(Indentation) << "return 0;" << " // Conflict set: ";
- for (int i = 0, N = Opcodes.size(); i < N; ++i) {
- o << nameWithID(Opcodes[i]);
- if (i < (N - 1))
- o << ", ";
- else
- o << '\n';
- }
-
- // Print out useful conflict information for postmortem analysis.
- errs() << "Decoding Conflict:\n";
-
- dumpStack(errs(), "\t\t");
-
- for (unsigned i = 0; i < Opcodes.size(); i++) {
- const std::string &Name = nameWithID(Opcodes[i]);
-
- errs() << '\t' << Name << " ";
- dumpBits(errs(),
- getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
- errs() << '\n';
- }
-
- return true;
-}
-
-
-////////////////////////////////////////////
-// //
-// ARMDEBackend //
-// (Helper class for ARMDecoderEmitter) //
-// //
-////////////////////////////////////////////
-
-class ARMDecoderEmitter::ARMDEBackend {
-public:
- ARMDEBackend(ARMDecoderEmitter &frontend, RecordKeeper &Records) :
- NumberedInstructions(),
- Opcodes(),
- Frontend(frontend),
- Target(Records),
- FC(NULL)
- {
- if (Target.getName() == "ARM")
- TargetName = TARGET_ARM;
- else {
- errs() << "Target name " << Target.getName() << " not recognized\n";
- assert(0 && "Unknown target");
- }
-
- // Populate the instructions for our TargetName.
- populateInstructions();
- }
-
- ~ARMDEBackend() {
- if (FC) {
- delete FC;
- FC = NULL;
- }
- }
-
- void getInstructionsByEnumValue(std::vector<const CodeGenInstruction*>
- &NumberedInstructions) {
- // We must emit the PHI opcode first...
- std::string Namespace = Target.getInstNamespace();
- assert(!Namespace.empty() && "No instructions defined.");
-
- NumberedInstructions = Target.getInstructionsByEnumValue();
- }
-
- bool populateInstruction(const CodeGenInstruction &CGI, TARGET_NAME_t TN);
-
- void populateInstructions();
-
- // Emits disassembler code for instruction decoding. This delegates to the
- // FilterChooser instance to do the heavy lifting.
- void emit(raw_ostream &o);
-
-protected:
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- std::vector<unsigned> Opcodes;
- // Special case for the ARM chip, which supports ARM and Thumb ISAs.
- // Opcodes2 will be populated with the Thumb opcodes.
- std::vector<unsigned> Opcodes2;
- ARMDecoderEmitter &Frontend;
- CodeGenTarget Target;
- ARMFilterChooser *FC;
-
- TARGET_NAME_t TargetName;
-};
-
-bool ARMDecoderEmitter::
-ARMDEBackend::populateInstruction(const CodeGenInstruction &CGI,
- TARGET_NAME_t TN) {
- const Record &Def = *CGI.TheDef;
- const StringRef Name = Def.getName();
- uint8_t Form = getByteField(Def, "Form");
-
- BitsInit &Bits = getBitsField(Def, "Inst");
-
- // If all the bit positions are not specified; do not decode this instruction.
- // We are bound to fail! For proper disassembly, the well-known encoding bits
- // of the instruction must be fully specified.
- //
- // This also removes pseudo instructions from considerations of disassembly,
- // which is a better design and less fragile than the name matchings.
- if (Bits.allInComplete()) return false;
-
- // Ignore "asm parser only" instructions.
- if (Def.getValueAsBit("isAsmParserOnly"))
- return false;
-
- if (TN == TARGET_ARM) {
- if (Form == ARM_FORMAT_PSEUDO)
- return false;
- if (thumbInstruction(Form))
- return false;
-
- // Tail calls are other patterns that generate existing instructions.
- if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
- Name == "TCRETURNri" || Name == "TCRETURNriND" ||
- Name == "TAILJMPd" || Name == "TAILJMPdt" ||
- Name == "TAILJMPdND" || Name == "TAILJMPdNDt" ||
- Name == "TAILJMPr" || Name == "TAILJMPrND" ||
- Name == "MOVr_TC")
- return false;
-
- // Delegate ADR disassembly to the more generic ADDri/SUBri instructions.
- if (Name == "ADR")
- return false;
-
- //
- // The following special cases are for conflict resolutions.
- //
-
- // A8-598: VEXT
- // Vector Extract extracts elements from the bottom end of the second
- // operand vector and the top end of the first, concatenates them and
- // places the result in the destination vector. The elements of the
- // vectors are treated as being 8-bit bitfields. There is no distinction
- // between data types. The size of the operation can be specified in
- // assembler as vext.size. If the value is 16, 32, or 64, the syntax is
- // a pseudo-instruction for a VEXT instruction specifying the equivalent
- // number of bytes.
- //
- // Variants VEXTd16, VEXTd32, VEXTd8, and VEXTdf are reduced to VEXTd8;
- // variants VEXTq16, VEXTq32, VEXTq8, and VEXTqf are reduced to VEXTq8.
- if (Name == "VEXTd16" || Name == "VEXTd32" || Name == "VEXTdf" ||
- Name == "VEXTq16" || Name == "VEXTq32" || Name == "VEXTqf")
- return false;
- } else if (TN == TARGET_THUMB) {
- if (!thumbInstruction(Form))
- return false;
-
- // A8.6.25 BX. Use the generic tBX_Rm, ignore tBX_RET and tBX_RET_vararg.
- if (Name == "tBX_RET" || Name == "tBX_RET_vararg")
- return false;
-
- // Ignore tADR, prefer tADDrPCi.
- if (Name == "tADR")
- return false;
-
- // Delegate t2ADR disassembly to the more generic t2ADDri12/t2SUBri12
- // instructions.
- if (Name == "t2ADR")
- return false;
-
- // Ignore tADDrSP, tADDspr, and tPICADD, prefer the generic tADDhirr.
- // Ignore t2SUBrSPs, prefer the t2SUB[S]r[r|s].
- // Ignore t2ADDrSPs, prefer the t2ADD[S]r[r|s].
- if (Name == "tADDrSP" || Name == "tADDspr" || Name == "tPICADD" ||
- Name == "t2SUBrSPs" || Name == "t2ADDrSPs")
- return false;
-
- // FIXME: Use ldr.n to work around a Darwin assembler bug.
- // Introduce a workaround with tLDRpciDIS opcode.
- if (Name == "tLDRpci")
- return false;
-
- // Ignore t2LDRDpci, prefer the generic t2LDRDi8, t2LDRD_PRE, t2LDRD_POST.
- if (Name == "t2LDRDpci")
- return false;
-
- // Resolve conflicts:
- //
- // t2LDMIA_RET conflict with t2LDM (ditto)
- // tMOVCCi conflicts with tMOVi8
- // tMOVCCr conflicts with tMOVgpr2gpr
- // tLDRcp conflicts with tLDRspi
- // t2MOVCCi16 conflicts with tMOVi16
- if (Name == "t2LDMIA_RET" ||
- Name == "tMOVCCi" || Name == "tMOVCCr" ||
- Name == "tLDRcp" ||
- Name == "t2MOVCCi16")
- return false;
- }
-
- DEBUG({
- // Dumps the instruction encoding format.
- switch (TargetName) {
- case TARGET_ARM:
- case TARGET_THUMB:
- errs() << Name << " " << stringForARMFormat((ARMFormat)Form);
- break;
- }
-
- errs() << " ";
-
- // Dumps the instruction encoding bits.
- dumpBits(errs(), Bits);
-
- errs() << '\n';
-
- // Dumps the list of operand info.
- for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
- const CGIOperandList::OperandInfo &Info = CGI.Operands[i];
- const std::string &OperandName = Info.Name;
- const Record &OperandDef = *Info.Rec;
-
- errs() << "\t" << OperandName << " (" << OperandDef.getName() << ")\n";
- }
- });
-
- return true;
-}
-
-void ARMDecoderEmitter::ARMDEBackend::populateInstructions() {
- getInstructionsByEnumValue(NumberedInstructions);
-
- unsigned numUIDs = NumberedInstructions.size();
- if (TargetName == TARGET_ARM) {
- for (unsigned uid = 0; uid < numUIDs; uid++) {
- // filter out intrinsics
- if (!NumberedInstructions[uid]->TheDef->isSubClassOf("InstARM"))
- continue;
-
- if (populateInstruction(*NumberedInstructions[uid], TargetName))
- Opcodes.push_back(uid);
- }
-
- // Special handling for the ARM chip, which supports two modes of execution.
- // This branch handles the Thumb opcodes.
- for (unsigned uid = 0; uid < numUIDs; uid++) {
- // filter out intrinsics
- if (!NumberedInstructions[uid]->TheDef->isSubClassOf("InstARM")
- && !NumberedInstructions[uid]->TheDef->isSubClassOf("InstThumb"))
- continue;
-
- if (populateInstruction(*NumberedInstructions[uid], TARGET_THUMB))
- Opcodes2.push_back(uid);
- }
-
- return;
- }
-
- // For other targets.
- for (unsigned uid = 0; uid < numUIDs; uid++) {
- Record *R = NumberedInstructions[uid]->TheDef;
- if (R->getValueAsString("Namespace") == "TargetOpcode")
- continue;
-
- if (populateInstruction(*NumberedInstructions[uid], TargetName))
- Opcodes.push_back(uid);
- }
-}
-
-// Emits disassembler code for instruction decoding. This delegates to the
-// FilterChooser instance to do the heavy lifting.
-void ARMDecoderEmitter::ARMDEBackend::emit(raw_ostream &o) {
- switch (TargetName) {
- case TARGET_ARM:
- Frontend.EmitSourceFileHeader("ARM/Thumb Decoders", o);
- break;
- default:
- assert(0 && "Unreachable code!");
- }
-
- o << "#include \"llvm/Support/DataTypes.h\"\n";
- o << "#include <assert.h>\n";
- o << '\n';
- o << "namespace llvm {\n\n";
-
- ARMFilterChooser::setTargetName(TargetName);
-
- switch (TargetName) {
- case TARGET_ARM: {
- // Emit common utility and ARM ISA decoder.
- FC = new ARMFilterChooser(NumberedInstructions, Opcodes);
- // Reset indentation level.
- unsigned Indentation = 0;
- FC->emitTop(o, Indentation);
- delete FC;
-
- // Emit Thumb ISA decoder as well.
- ARMFilterChooser::setTargetName(TARGET_THUMB);
- FC = new ARMFilterChooser(NumberedInstructions, Opcodes2);
- // Reset indentation level.
- Indentation = 0;
- FC->emitBot(o, Indentation);
- break;
- }
- default:
- assert(0 && "Unreachable code!");
- }
-
- o << "\n} // End llvm namespace \n";
-}
-
-/////////////////////////
-// Backend interface //
-/////////////////////////
-
-void ARMDecoderEmitter::initBackend()
-{
- Backend = new ARMDEBackend(*this, Records);
-}
-
-void ARMDecoderEmitter::run(raw_ostream &o)
-{
- Backend->emit(o);
-}
-
-void ARMDecoderEmitter::shutdownBackend()
-{
- delete Backend;
- Backend = NULL;
-}
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
deleted file mode 100644
index 486f899..0000000
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===------------ ARMDecoderEmitter.h - Decoder Generator -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is part of the ARM Disassembler.
-// It contains the tablegen backend declaration ARMDecoderEmitter.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMDECODEREMITTER_H
-#define ARMDECODEREMITTER_H
-
-#include "llvm/Support/DataTypes.h"
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-class ARMDecoderEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- ARMDecoderEmitter(RecordKeeper &R) : Records(R) {
- initBackend();
- }
-
- ~ARMDecoderEmitter() {
- shutdownBackend();
- }
-
- // run - Output the code emitter
- void run(raw_ostream &o);
-
-private:
- // Helper class for ARMDecoderEmitter.
- class ARMDEBackend;
-
- ARMDEBackend *Backend;
-
- void initBackend();
- void shutdownBackend();
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 8b86c23..39a3c25 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -99,6 +99,7 @@
#include "AsmMatcherEmitter.h"
#include "CodeGenTarget.h"
#include "StringMatcher.h"
+#include "StringToOffsetTable.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -107,6 +108,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <map>
@@ -251,12 +253,7 @@ public:
switch (Kind) {
case Invalid:
- assert(0 && "Invalid kind!");
- case Token:
- // Tokens are comparable by value.
- //
- // FIXME: Compare by enum value.
- return ValueName < RHS.ValueName;
+ llvm_unreachable("Invalid kind!");
default:
// This class precedes the RHS if it is a proper subset of the RHS.
@@ -287,7 +284,11 @@ struct MatchableInfo {
/// The suboperand index within SrcOpName, or -1 for the entire operand.
int SubOpIdx;
- explicit AsmOperand(StringRef T) : Token(T), Class(0), SubOpIdx(-1) {}
+ /// Register record if this token is singleton register.
+ Record *SingletonReg;
+
+ explicit AsmOperand(StringRef T) : Token(T), Class(0), SubOpIdx(-1),
+ SingletonReg(0) {}
};
/// ResOperand - This represents a single operand in the result instruction
@@ -366,6 +367,9 @@ struct MatchableInfo {
}
};
+ /// AsmVariantID - Target's assembly syntax variant no.
+ int AsmVariantID;
+
/// TheDef - This is the definition of the instruction or InstAlias that this
/// matchable came from.
Record *const TheDef;
@@ -406,24 +410,28 @@ struct MatchableInfo {
std::string ConversionFnKind;
MatchableInfo(const CodeGenInstruction &CGI)
- : TheDef(CGI.TheDef), DefRec(&CGI), AsmString(CGI.AsmString) {
+ : AsmVariantID(0), TheDef(CGI.TheDef), DefRec(&CGI),
+ AsmString(CGI.AsmString) {
}
MatchableInfo(const CodeGenInstAlias *Alias)
- : TheDef(Alias->TheDef), DefRec(Alias), AsmString(Alias->AsmString) {
+ : AsmVariantID(0), TheDef(Alias->TheDef), DefRec(Alias),
+ AsmString(Alias->AsmString) {
}
void Initialize(const AsmMatcherInfo &Info,
- SmallPtrSet<Record*, 16> &SingletonRegisters);
+ SmallPtrSet<Record*, 16> &SingletonRegisters,
+ int AsmVariantNo, std::string &RegisterPrefix);
/// Validate - Return true if this matchable is a valid thing to match against
/// and perform a bunch of validity checking.
bool Validate(StringRef CommentDelimiter, bool Hack) const;
- /// getSingletonRegisterForAsmOperand - If the specified token is a singleton
- /// register, return the Record for it, otherwise return null.
- Record *getSingletonRegisterForAsmOperand(unsigned i,
- const AsmMatcherInfo &Info) const;
+ /// extractSingletonRegisterForAsmOperand - Extract singleton register,
+ /// if present, from specified token.
+ void
+ extractSingletonRegisterForAsmOperand(unsigned i, const AsmMatcherInfo &Info,
+ std::string &RegisterPrefix);
/// FindAsmOperand - Find the AsmOperand with the specified name and
/// suboperand index.
@@ -557,9 +565,6 @@ public:
/// Target - The target information.
CodeGenTarget &Target;
- /// The AsmParser "RegisterPrefix" value.
- std::string RegisterPrefix;
-
/// The classes which are needed for matching.
std::vector<ClassInfo*> Classes;
@@ -591,7 +596,8 @@ private:
/// getOperandClass - Lookup or create the class for the given operand.
ClassInfo *getOperandClass(const CGIOperandList::OperandInfo &OI,
- int SubOpIdx = -1);
+ int SubOpIdx);
+ ClassInfo *getOperandClass(Record *Rec, int SubOpIdx);
/// BuildRegisterClasses - Build the ClassInfo* instances for register
/// classes.
@@ -645,9 +651,11 @@ void MatchableInfo::dump() {
}
void MatchableInfo::Initialize(const AsmMatcherInfo &Info,
- SmallPtrSet<Record*, 16> &SingletonRegisters) {
- // TODO: Eventually support asmparser for Variant != 0.
- AsmString = CodeGenInstruction::FlattenAsmStringVariants(AsmString, 0);
+ SmallPtrSet<Record*, 16> &SingletonRegisters,
+ int AsmVariantNo, std::string &RegisterPrefix) {
+ AsmVariantID = AsmVariantNo;
+ AsmString =
+ CodeGenInstruction::FlattenAsmStringVariants(AsmString, AsmVariantNo);
TokenizeAsmString(Info);
@@ -660,7 +668,8 @@ void MatchableInfo::Initialize(const AsmMatcherInfo &Info,
// Collect singleton registers, if used.
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
- if (Record *Reg = getSingletonRegisterForAsmOperand(i, Info))
+ extractSingletonRegisterForAsmOperand(i, Info, RegisterPrefix);
+ if (Record *Reg = AsmOperands[i].SingletonReg)
SingletonRegisters.insert(Reg);
}
}
@@ -736,9 +745,12 @@ void MatchableInfo::TokenizeAsmString(const AsmMatcherInfo &Info) {
// The first token of the instruction is the mnemonic, which must be a
// simple string, not a $foo variable or a singleton register.
- assert(!AsmOperands.empty() && "Instruction has no tokens?");
+ if (AsmOperands.empty())
+ throw TGError(TheDef->getLoc(),
+ "Instruction '" + TheDef->getName() + "' has no tokens");
Mnemonic = AsmOperands[0].Token;
- if (Mnemonic[0] == '$' || getSingletonRegisterForAsmOperand(0, Info))
+ // FIXME : Check and raise an error if it is a register.
+ if (Mnemonic[0] == '$')
throw TGError(TheDef->getLoc(),
"Invalid instruction mnemonic '" + Mnemonic.str() + "'!");
@@ -801,28 +813,30 @@ bool MatchableInfo::Validate(StringRef CommentDelimiter, bool Hack) const {
return true;
}
-/// getSingletonRegisterForAsmOperand - If the specified token is a singleton
-/// register, return the register name, otherwise return a null StringRef.
-Record *MatchableInfo::
-getSingletonRegisterForAsmOperand(unsigned i, const AsmMatcherInfo &Info) const{
- StringRef Tok = AsmOperands[i].Token;
- if (!Tok.startswith(Info.RegisterPrefix))
- return 0;
+/// extractSingletonRegisterForAsmOperand - Extract singleton register,
+/// if present, from specified token.
+void MatchableInfo::
+extractSingletonRegisterForAsmOperand(unsigned OperandNo,
+ const AsmMatcherInfo &Info,
+ std::string &RegisterPrefix) {
+ StringRef Tok = AsmOperands[OperandNo].Token;
+ if (RegisterPrefix.empty()) {
+ std::string LoweredTok = Tok.lower();
+ if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(LoweredTok))
+ AsmOperands[OperandNo].SingletonReg = Reg->TheDef;
+ return;
+ }
+
+ if (!Tok.startswith(RegisterPrefix))
+ return;
- StringRef RegName = Tok.substr(Info.RegisterPrefix.size());
+ StringRef RegName = Tok.substr(RegisterPrefix.size());
if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(RegName))
- return Reg->TheDef;
+ AsmOperands[OperandNo].SingletonReg = Reg->TheDef;
// If there is no register prefix (i.e. "%" in "%eax"), then this may
// be some random non-register token, just ignore it.
- if (Info.RegisterPrefix.empty())
- return 0;
-
- // Otherwise, we have something invalid prefixed with the register prefix,
- // such as %foo.
- std::string Err = "unable to find register for '" + RegName.str() +
- "' (which matches register prefix)";
- throw TGError(TheDef->getLoc(), Err);
+ return;
}
static std::string getEnumNameForToken(StringRef Str) {
@@ -870,7 +884,11 @@ AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI,
Record *Rec = OI.Rec;
if (SubOpIdx != -1)
Rec = dynamic_cast<DefInit*>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
+ return getOperandClass(Rec, SubOpIdx);
+}
+ClassInfo *
+AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
if (Rec->isSubClassOf("RegisterOperand")) {
// RegisterOperand may have an associated ParserMatchClass. If it does,
// use it, else just fall back to the underlying register class.
@@ -1102,8 +1120,7 @@ void AsmMatcherInfo::BuildOperandClasses() {
AsmMatcherInfo::AsmMatcherInfo(Record *asmParser,
CodeGenTarget &target,
RecordKeeper &records)
- : Records(records), AsmParser(asmParser), Target(target),
- RegisterPrefix(AsmParser->getValueAsString("RegisterPrefix")) {
+ : Records(records), AsmParser(asmParser), Target(target) {
}
/// BuildOperandMatchInfo - Build the necessary information to handle user
@@ -1158,86 +1175,92 @@ void AsmMatcherInfo::BuildInfo() {
assert(FeatureNo < 32 && "Too many subtarget features!");
}
- std::string CommentDelimiter = AsmParser->getValueAsString("CommentDelimiter");
-
// Parse the instructions; we need to do this first so that we can gather the
// singleton register classes.
SmallPtrSet<Record*, 16> SingletonRegisters;
- for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
- E = Target.inst_end(); I != E; ++I) {
- const CodeGenInstruction &CGI = **I;
-
- // If the tblgen -match-prefix option is specified (for tblgen hackers),
- // filter the set of instructions we consider.
- if (!StringRef(CGI.TheDef->getName()).startswith(MatchPrefix))
- continue;
+ unsigned VariantCount = Target.getAsmParserVariantCount();
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ std::string CommentDelimiter = AsmVariant->getValueAsString("CommentDelimiter");
+ std::string RegisterPrefix = AsmVariant->getValueAsString("RegisterPrefix");
+ int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+
+ for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
+ E = Target.inst_end(); I != E; ++I) {
+ const CodeGenInstruction &CGI = **I;
+
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instructions we consider.
+ if (!StringRef(CGI.TheDef->getName()).startswith(MatchPrefix))
+ continue;
- // Ignore "codegen only" instructions.
- if (CGI.TheDef->getValueAsBit("isCodeGenOnly"))
- continue;
+ // Ignore "codegen only" instructions.
+ if (CGI.TheDef->getValueAsBit("isCodeGenOnly"))
+ continue;
- // Validate the operand list to ensure we can handle this instruction.
- for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
- const CGIOperandList::OperandInfo &OI = CGI.Operands[i];
-
- // Validate tied operands.
- if (OI.getTiedRegister() != -1) {
- // If we have a tied operand that consists of multiple MCOperands,
- // reject it. We reject aliases and ignore instructions for now.
- if (OI.MINumOperands != 1) {
- // FIXME: Should reject these. The ARM backend hits this with $lane
- // in a bunch of instructions. It is unclear what the right answer is.
- DEBUG({
- errs() << "warning: '" << CGI.TheDef->getName() << "': "
- << "ignoring instruction with multi-operand tied operand '"
- << OI.Name << "'\n";
- });
- continue;
+ // Validate the operand list to ensure we can handle this instruction.
+ for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &OI = CGI.Operands[i];
+
+ // Validate tied operands.
+ if (OI.getTiedRegister() != -1) {
+ // If we have a tied operand that consists of multiple MCOperands,
+ // reject it. We reject aliases and ignore instructions for now.
+ if (OI.MINumOperands != 1) {
+ // FIXME: Should reject these. The ARM backend hits this with $lane
+ // in a bunch of instructions. It is unclear what the right answer is.
+ DEBUG({
+ errs() << "warning: '" << CGI.TheDef->getName() << "': "
+ << "ignoring instruction with multi-operand tied operand '"
+ << OI.Name << "'\n";
+ });
+ continue;
+ }
}
}
- }
- OwningPtr<MatchableInfo> II(new MatchableInfo(CGI));
+ OwningPtr<MatchableInfo> II(new MatchableInfo(CGI));
- II->Initialize(*this, SingletonRegisters);
+ II->Initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
- // Ignore instructions which shouldn't be matched and diagnose invalid
- // instruction definitions with an error.
- if (!II->Validate(CommentDelimiter, true))
- continue;
+ // Ignore instructions which shouldn't be matched and diagnose invalid
+ // instruction definitions with an error.
+ if (!II->Validate(CommentDelimiter, true))
+ continue;
- // Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
- //
- // FIXME: This is a total hack.
- if (StringRef(II->TheDef->getName()).startswith("Int_") ||
- StringRef(II->TheDef->getName()).endswith("_Int"))
- continue;
+ // Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
+ //
+ // FIXME: This is a total hack.
+ if (StringRef(II->TheDef->getName()).startswith("Int_") ||
+ StringRef(II->TheDef->getName()).endswith("_Int"))
+ continue;
- Matchables.push_back(II.take());
- }
+ Matchables.push_back(II.take());
+ }
- // Parse all of the InstAlias definitions and stick them in the list of
- // matchables.
- std::vector<Record*> AllInstAliases =
- Records.getAllDerivedDefinitions("InstAlias");
- for (unsigned i = 0, e = AllInstAliases.size(); i != e; ++i) {
- CodeGenInstAlias *Alias = new CodeGenInstAlias(AllInstAliases[i], Target);
-
- // If the tblgen -match-prefix option is specified (for tblgen hackers),
- // filter the set of instruction aliases we consider, based on the target
- // instruction.
- if (!StringRef(Alias->ResultInst->TheDef->getName()).startswith(
- MatchPrefix))
- continue;
+ // Parse all of the InstAlias definitions and stick them in the list of
+ // matchables.
+ std::vector<Record*> AllInstAliases =
+ Records.getAllDerivedDefinitions("InstAlias");
+ for (unsigned i = 0, e = AllInstAliases.size(); i != e; ++i) {
+ CodeGenInstAlias *Alias = new CodeGenInstAlias(AllInstAliases[i], Target);
+
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instruction aliases we consider, based on the target
+ // instruction.
+ if (!StringRef(Alias->ResultInst->TheDef->getName()).startswith(
+ MatchPrefix))
+ continue;
- OwningPtr<MatchableInfo> II(new MatchableInfo(Alias));
+ OwningPtr<MatchableInfo> II(new MatchableInfo(Alias));
- II->Initialize(*this, SingletonRegisters);
+ II->Initialize(*this, SingletonRegisters, AsmVariantNo, RegisterPrefix);
- // Validate the alias definitions.
- II->Validate(CommentDelimiter, false);
+ // Validate the alias definitions.
+ II->Validate(CommentDelimiter, false);
- Matchables.push_back(II.take());
+ Matchables.push_back(II.take());
+ }
}
// Build info for the register classes.
@@ -1260,7 +1283,7 @@ void AsmMatcherInfo::BuildInfo() {
StringRef Token = Op.Token;
// Check for singleton registers.
- if (Record *RegRecord = II->getSingletonRegisterForAsmOperand(i, *this)) {
+ if (Record *RegRecord = II->AsmOperands[i].SingletonReg) {
Op.Class = RegisterClasses[RegRecord];
assert(Op.Class && Op.Class->Registers.size() == 1 &&
"Unexpected class for singleton register");
@@ -1297,6 +1320,17 @@ void AsmMatcherInfo::BuildInfo() {
II->BuildAliasResultOperands();
}
+ // Process token alias definitions and set up the associated superclass
+ // information.
+ std::vector<Record*> AllTokenAliases =
+ Records.getAllDerivedDefinitions("TokenAlias");
+ for (unsigned i = 0, e = AllTokenAliases.size(); i != e; ++i) {
+ Record *Rec = AllTokenAliases[i];
+ ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken"));
+ ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken"));
+ FromClass->SuperClasses.push_back(ToClass);
+ }
+
// Reorder classes so that classes precede super classes.
std::sort(Classes.begin(), Classes.end(), less_ptr<ClassInfo>());
}
@@ -1375,9 +1409,11 @@ void AsmMatcherInfo::BuildAliasOperandReference(MatchableInfo *II,
CGA.ResultOperands[i].getName() == OperandName) {
// It's safe to go with the first one we find, because CodeGenInstAlias
// validates that all operands with the same name have the same record.
- unsigned ResultIdx = CGA.ResultInstOperandIndex[i].first;
Op.SubOpIdx = CGA.ResultInstOperandIndex[i].second;
- Op.Class = getOperandClass(CGA.ResultInst->Operands[ResultIdx],
+ // Use the match class from the Alias definition, not the
+ // destination instruction, as we may have an immediate that's
+ // being munged by the match class.
+ Op.Class = getOperandClass(CGA.ResultOperands[i].getRecord(),
Op.SubOpIdx);
Op.SrcOpName = OperandName;
return;
@@ -1453,7 +1489,6 @@ void MatchableInfo::BuildAliasResultOperands() {
// Find out what operand from the asmparser that this MCInst operand
// comes from.
switch (CGA.ResultOperands[AliasOpNo].Kind) {
- default: assert(0 && "unexpected InstAlias operand kind");
case CodeGenInstAlias::ResultOperand::K_Record: {
StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
int SrcOperand = FindAsmOperand(Name, SubIdx);
@@ -1656,7 +1691,7 @@ static void EmitMatchClassEnumeration(CodeGenTarget &Target,
/// EmitValidateOperandClass - Emit the function to validate an operand class.
static void EmitValidateOperandClass(AsmMatcherInfo &Info,
raw_ostream &OS) {
- OS << "static bool ValidateOperandClass(MCParsedAsmOperand *GOp, "
+ OS << "static bool validateOperandClass(MCParsedAsmOperand *GOp, "
<< "MatchClassKind Kind) {\n";
OS << " " << Info.Target.getName() << "Operand &Operand = *("
<< Info.Target.getName() << "Operand*)GOp;\n";
@@ -1667,7 +1702,8 @@ static void EmitValidateOperandClass(AsmMatcherInfo &Info,
// Check for Token operands first.
OS << " if (Operand.isToken())\n";
- OS << " return MatchTokenString(Operand.getToken()) == Kind;\n\n";
+ OS << " return isSubclass(matchTokenString(Operand.getToken()), Kind);"
+ << "\n\n";
// Check for register operands, including sub-classes.
OS << " if (Operand.isReg()) {\n";
@@ -1681,7 +1717,7 @@ static void EmitValidateOperandClass(AsmMatcherInfo &Info,
<< it->first->getName() << ": OpKind = " << it->second->Name
<< "; break;\n";
OS << " }\n";
- OS << " return IsSubclass(OpKind, Kind);\n";
+ OS << " return isSubclass(OpKind, Kind);\n";
OS << " }\n\n";
// Check the user classes. We don't care what order since we're only
@@ -1708,8 +1744,8 @@ static void EmitValidateOperandClass(AsmMatcherInfo &Info,
static void EmitIsSubclass(CodeGenTarget &Target,
std::vector<ClassInfo*> &Infos,
raw_ostream &OS) {
- OS << "/// IsSubclass - Compute whether \\arg A is a subclass of \\arg B.\n";
- OS << "static bool IsSubclass(MatchClassKind A, MatchClassKind B) {\n";
+ OS << "/// isSubclass - Compute whether \\arg A is a subclass of \\arg B.\n";
+ OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n";
OS << " if (A == B)\n";
OS << " return true;\n\n";
@@ -1720,32 +1756,30 @@ static void EmitIsSubclass(CodeGenTarget &Target,
ie = Infos.end(); it != ie; ++it) {
ClassInfo &A = **it;
- if (A.Kind != ClassInfo::Token) {
- std::vector<StringRef> SuperClasses;
- for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
- ie = Infos.end(); it != ie; ++it) {
- ClassInfo &B = **it;
-
- if (&A != &B && A.isSubsetOf(B))
- SuperClasses.push_back(B.Name);
- }
+ std::vector<StringRef> SuperClasses;
+ for (std::vector<ClassInfo*>::iterator it = Infos.begin(),
+ ie = Infos.end(); it != ie; ++it) {
+ ClassInfo &B = **it;
- if (SuperClasses.empty())
- continue;
+ if (&A != &B && A.isSubsetOf(B))
+ SuperClasses.push_back(B.Name);
+ }
- OS << "\n case " << A.Name << ":\n";
+ if (SuperClasses.empty())
+ continue;
- if (SuperClasses.size() == 1) {
- OS << " return B == " << SuperClasses.back() << ";\n";
- continue;
- }
+ OS << "\n case " << A.Name << ":\n";
- OS << " switch (B) {\n";
- OS << " default: return false;\n";
- for (unsigned i = 0, e = SuperClasses.size(); i != e; ++i)
- OS << " case " << SuperClasses[i] << ": return true;\n";
- OS << " }\n";
+ if (SuperClasses.size() == 1) {
+ OS << " return B == " << SuperClasses.back() << ";\n";
+ continue;
}
+
+ OS << " switch (B) {\n";
+ OS << " default: return false;\n";
+ for (unsigned i = 0, e = SuperClasses.size(); i != e; ++i)
+ OS << " case " << SuperClasses[i] << ": return true;\n";
+ OS << " }\n";
}
OS << " }\n";
OS << "}\n\n";
@@ -1767,7 +1801,7 @@ static void EmitMatchTokenString(CodeGenTarget &Target,
"return " + CI.Name + ";"));
}
- OS << "static MatchClassKind MatchTokenString(StringRef Name) {\n";
+ OS << "static MatchClassKind matchTokenString(StringRef Name) {\n";
StringMatcher("Name", Matches, OS).Emit();
@@ -1905,7 +1939,7 @@ static bool EmitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
Info.getRecords().getAllDerivedDefinitions("MnemonicAlias");
if (Aliases.empty()) return false;
- OS << "static void ApplyMnemonicAliases(StringRef &Mnemonic, "
+ OS << "static void applyMnemonicAliases(StringRef &Mnemonic, "
"unsigned Features) {\n";
// Keep track of all the aliases from a mnemonic. Use an std::map so that the
@@ -1975,45 +2009,62 @@ static bool EmitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) {
return true;
}
+static const char *getMinimalTypeForRange(uint64_t Range) {
+ assert(Range < 0xFFFFFFFFULL && "Enum too large");
+ if (Range > 0xFFFF)
+ return "uint32_t";
+ if (Range > 0xFF)
+ return "uint16_t";
+ return "uint8_t";
+}
+
static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
const AsmMatcherInfo &Info, StringRef ClassName) {
// Emit the static custom operand parsing table;
OS << "namespace {\n";
OS << " struct OperandMatchEntry {\n";
- OS << " const char *Mnemonic;\n";
- OS << " unsigned OperandMask;\n";
- OS << " MatchClassKind Class;\n";
- OS << " unsigned RequiredFeatures;\n";
+ OS << " static const char *const MnemonicTable;\n";
+ OS << " uint32_t OperandMask;\n";
+ OS << " uint32_t Mnemonic;\n";
+ OS << " " << getMinimalTypeForRange(1ULL << Info.SubtargetFeatures.size())
+ << " RequiredFeatures;\n";
+ OS << " " << getMinimalTypeForRange(Info.Classes.size())
+ << " Class;\n\n";
+ OS << " StringRef getMnemonic() const {\n";
+ OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
+ OS << " MnemonicTable[Mnemonic]);\n";
+ OS << " }\n";
OS << " };\n\n";
OS << " // Predicate for searching for an opcode.\n";
OS << " struct LessOpcodeOperand {\n";
OS << " bool operator()(const OperandMatchEntry &LHS, StringRef RHS) {\n";
- OS << " return StringRef(LHS.Mnemonic) < RHS;\n";
+ OS << " return LHS.getMnemonic() < RHS;\n";
OS << " }\n";
OS << " bool operator()(StringRef LHS, const OperandMatchEntry &RHS) {\n";
- OS << " return LHS < StringRef(RHS.Mnemonic);\n";
+ OS << " return LHS < RHS.getMnemonic();\n";
OS << " }\n";
OS << " bool operator()(const OperandMatchEntry &LHS,";
OS << " const OperandMatchEntry &RHS) {\n";
- OS << " return StringRef(LHS.Mnemonic) < StringRef(RHS.Mnemonic);\n";
+ OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
OS << " }\n";
OS << " };\n";
OS << "} // end anonymous namespace.\n\n";
+ StringToOffsetTable StringTable;
+
OS << "static const OperandMatchEntry OperandMatchTable["
<< Info.OperandMatchInfo.size() << "] = {\n";
- OS << " /* Mnemonic, Operand List Mask, Operand Class, Features */\n";
+ OS << " /* Operand List Mask, Mnemonic, Operand Class, Features */\n";
for (std::vector<OperandMatchEntry>::const_iterator it =
Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end();
it != ie; ++it) {
const OperandMatchEntry &OMI = *it;
const MatchableInfo &II = *OMI.MI;
- OS << " { \"" << II.Mnemonic << "\""
- << ", " << OMI.OperandMask;
+ OS << " { " << OMI.OperandMask;
OS << " /* ";
bool printComma = false;
@@ -2026,8 +2077,10 @@ static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
}
OS << " */";
- OS << ", " << OMI.CI->Name
- << ", ";
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+ OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+ << " /* " << II.Mnemonic << " */, ";
// Write the required features mask.
if (!II.RequiredFeatures.empty()) {
@@ -2037,15 +2090,22 @@ static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
}
} else
OS << "0";
+
+ OS << ", " << OMI.CI->Name;
+
OS << " },\n";
}
OS << "};\n\n";
+ OS << "const char *const OperandMatchEntry::MnemonicTable =\n";
+ StringTable.EmitString(OS);
+ OS << ";\n\n";
+
// Emit the operand class switch to call the correct custom parser for
// the found operand class.
OS << Target.getName() << ClassName << "::OperandMatchResultTy "
<< Target.getName() << ClassName << "::\n"
- << "TryCustomParseOperand(SmallVectorImpl<MCParsedAsmOperand*>"
+ << "tryCustomParseOperand(SmallVectorImpl<MCParsedAsmOperand*>"
<< " &Operands,\n unsigned MCK) {\n\n"
<< " switch(MCK) {\n";
@@ -2094,7 +2154,7 @@ static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
<< " *ie = MnemonicRange.second; it != ie; ++it) {\n";
OS << " // equal_range guarantees that instruction mnemonic matches.\n";
- OS << " assert(Mnemonic == it->Mnemonic);\n\n";
+ OS << " assert(Mnemonic == it->getMnemonic());\n\n";
// Emit check that the required features are available.
OS << " // check if the available features match\n";
@@ -2111,7 +2171,7 @@ static void EmitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
// Emit call to the custom parser method
OS << " // call custom parse method to handle the operand\n";
OS << " OperandMatchResultTy Result = ";
- OS << "TryCustomParseOperand(Operands, it->Class);\n";
+ OS << "tryCustomParseOperand(Operands, it->Class);\n";
OS << " if (Result != MatchOperand_NoMatch)\n";
OS << " return Result;\n";
OS << " }\n\n";
@@ -2186,7 +2246,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " bool MnemonicIsValid(StringRef Mnemonic);\n";
OS << " unsigned MatchInstructionImpl(\n";
OS << " const SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
- OS << " MCInst &Inst, unsigned &ErrorInfo);\n";
+ OS << " MCInst &Inst, unsigned &ErrorInfo, unsigned VariantID = 0);\n";
if (Info.OperandMatchInfo.size()) {
OS << "\n enum OperandMatchResultTy {\n";
@@ -2198,7 +2258,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
OS << " StringRef Mnemonic);\n";
- OS << " OperandMatchResultTy TryCustomParseOperand(\n";
+ OS << " OperandMatchResultTy tryCustomParseOperand(\n";
OS << " SmallVectorImpl<MCParsedAsmOperand*> &Operands,\n";
OS << " unsigned MCK);\n\n";
}
@@ -2260,28 +2320,39 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// following the mnemonic.
OS << "namespace {\n";
OS << " struct MatchEntry {\n";
- OS << " unsigned Opcode;\n";
- OS << " const char *Mnemonic;\n";
- OS << " ConversionKind ConvertFn;\n";
- OS << " MatchClassKind Classes[" << MaxNumOperands << "];\n";
- OS << " unsigned RequiredFeatures;\n";
+ OS << " static const char *const MnemonicTable;\n";
+ OS << " uint32_t Mnemonic;\n";
+ OS << " uint16_t Opcode;\n";
+ OS << " " << getMinimalTypeForRange(Info.Matchables.size())
+ << " ConvertFn;\n";
+ OS << " " << getMinimalTypeForRange(1ULL << Info.SubtargetFeatures.size())
+ << " RequiredFeatures;\n";
+ OS << " " << getMinimalTypeForRange(Info.Classes.size())
+ << " Classes[" << MaxNumOperands << "];\n";
+ OS << " uint8_t AsmVariantID;\n\n";
+ OS << " StringRef getMnemonic() const {\n";
+ OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
+ OS << " MnemonicTable[Mnemonic]);\n";
+ OS << " }\n";
OS << " };\n\n";
OS << " // Predicate for searching for an opcode.\n";
OS << " struct LessOpcode {\n";
OS << " bool operator()(const MatchEntry &LHS, StringRef RHS) {\n";
- OS << " return StringRef(LHS.Mnemonic) < RHS;\n";
+ OS << " return LHS.getMnemonic() < RHS;\n";
OS << " }\n";
OS << " bool operator()(StringRef LHS, const MatchEntry &RHS) {\n";
- OS << " return LHS < StringRef(RHS.Mnemonic);\n";
+ OS << " return LHS < RHS.getMnemonic();\n";
OS << " }\n";
OS << " bool operator()(const MatchEntry &LHS, const MatchEntry &RHS) {\n";
- OS << " return StringRef(LHS.Mnemonic) < StringRef(RHS.Mnemonic);\n";
+ OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
OS << " }\n";
OS << " };\n";
OS << "} // end anonymous namespace.\n\n";
+ StringToOffsetTable StringTable;
+
OS << "static const MatchEntry MatchTable["
<< Info.Matchables.size() << "] = {\n";
@@ -2290,16 +2361,13 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
it != ie; ++it) {
MatchableInfo &II = **it;
- OS << " { " << Target.getName() << "::"
- << II.getResultInst()->TheDef->getName() << ", \"" << II.Mnemonic << "\""
- << ", " << II.ConversionFnKind << ", { ";
- for (unsigned i = 0, e = II.AsmOperands.size(); i != e; ++i) {
- MatchableInfo::AsmOperand &Op = II.AsmOperands[i];
-
- if (i) OS << ", ";
- OS << Op.Class->Name;
- }
- OS << " }, ";
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str();
+ OS << " { " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+ << " /* " << II.Mnemonic << " */, "
+ << Target.getName() << "::"
+ << II.getResultInst()->TheDef->getName() << ", "
+ << II.ConversionFnKind << ", ";
// Write the required features mask.
if (!II.RequiredFeatures.empty()) {
@@ -2310,11 +2378,23 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
} else
OS << "0";
+ OS << ", { ";
+ for (unsigned i = 0, e = II.AsmOperands.size(); i != e; ++i) {
+ MatchableInfo::AsmOperand &Op = II.AsmOperands[i];
+
+ if (i) OS << ", ";
+ OS << Op.Class->Name;
+ }
+ OS << " }, " << II.AsmVariantID;
OS << "},\n";
}
OS << "};\n\n";
+ OS << "const char *const MatchEntry::MnemonicTable =\n";
+ StringTable.EmitString(OS);
+ OS << ";\n\n";
+
// A method to determine if a mnemonic is in the list.
OS << "bool " << Target.getName() << ClassName << "::\n"
<< "MnemonicIsValid(StringRef Mnemonic) {\n";
@@ -2330,7 +2410,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
<< Target.getName() << ClassName << "::\n"
<< "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
<< " &Operands,\n";
- OS << " MCInst &Inst, unsigned &ErrorInfo) {\n";
+ OS << " MCInst &Inst, unsigned &ErrorInfo,\n";
+ OS << " unsigned VariantID) {\n";
// Emit code to get the available features.
OS << " // Get the current feature set.\n";
@@ -2342,7 +2423,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
if (HasMnemonicAliases) {
OS << " // Process all MnemonicAliases to remap the mnemonic.\n";
- OS << " ApplyMnemonicAliases(Mnemonic, AvailableFeatures);\n\n";
+ OS << " // FIXME : Add an entry in AsmParserVariant to check this.\n";
+ OS << " if (!VariantID)\n";
+ OS << " applyMnemonicAliases(Mnemonic, AvailableFeatures);\n\n";
}
// Emit code to compute the class list for this operand vector.
@@ -2375,16 +2458,18 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " it != ie; ++it) {\n";
OS << " // equal_range guarantees that instruction mnemonic matches.\n";
- OS << " assert(Mnemonic == it->Mnemonic);\n";
+ OS << " assert(Mnemonic == it->getMnemonic());\n";
// Emit check that the subclasses match.
+ OS << " if (VariantID != it->AsmVariantID) continue;\n";
OS << " bool OperandsValid = true;\n";
OS << " for (unsigned i = 0; i != " << MaxNumOperands << "; ++i) {\n";
OS << " if (i + 1 >= Operands.size()) {\n";
OS << " OperandsValid = (it->Classes[i] == " <<"InvalidMatchClass);\n";
OS << " break;\n";
OS << " }\n";
- OS << " if (ValidateOperandClass(Operands[i+1], it->Classes[i]))\n";
+ OS << " if (validateOperandClass(Operands[i+1], "
+ "(MatchClassKind)it->Classes[i]))\n";
OS << " continue;\n";
OS << " // If this operand is broken for all of the instances of this\n";
OS << " // mnemonic, keep track of it so we can report loc info.\n";
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
index 3123e11..e0b0aac 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -16,6 +16,7 @@
#include "AsmWriterInst.h"
#include "CodeGenTarget.h"
#include "StringToOffsetTable.h"
+#include "SequenceToOffsetTable.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
@@ -277,12 +278,27 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
CGIAWIMap.insert(std::make_pair(Instructions[i].CGI, &Instructions[i]));
// Build an aggregate string, and build a table of offsets into it.
- StringToOffsetTable StringTable;
+ SequenceToOffsetTable<std::string> StringTable;
/// OpcodeInfo - This encodes the index of the string to use for the first
/// chunk of the output as well as indices used for operand printing.
std::vector<unsigned> OpcodeInfo;
+ // Add all strings to the string table upfront so it can generate an optimized
+ // representation.
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions[i]];
+ if (AWI != 0 &&
+ AWI->Operands[0].OperandType == AsmWriterOperand::isLiteralTextOperand &&
+ !AWI->Operands[0].Str.empty()) {
+ std::string Str = AWI->Operands[0].Str;
+ UnescapeString(Str);
+ StringTable.add(Str);
+ }
+ }
+
+ StringTable.layout();
+
unsigned MaxStringIdx = 0;
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
AsmWriterInst *AWI = CGIAWIMap[NumberedInstructions[i]];
@@ -294,11 +310,11 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
AsmWriterOperand::isLiteralTextOperand ||
AWI->Operands[0].Str.empty()) {
// Something handled by the asmwriter printer, but with no leading string.
- Idx = StringTable.GetOrAddStringOffset("");
+ Idx = StringTable.get("");
} else {
std::string Str = AWI->Operands[0].Str;
UnescapeString(Str);
- Idx = StringTable.GetOrAddStringOffset(Str);
+ Idx = StringTable.get(Str);
MaxStringIdx = std::max(MaxStringIdx, Idx);
// Nuke the string from the operand list. It is now handled!
@@ -373,9 +389,9 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
O << " };\n\n";
// Emit the string itself.
- O << " const char *AsmStrs = \n";
- StringTable.EmitString(O);
- O << ";\n\n";
+ O << " const char AsmStrs[] = {\n";
+ StringTable.emit(O, printChar);
+ O << " };\n\n";
O << " O << \"\\t\";\n\n";
@@ -461,13 +477,13 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
static void
emitRegisterNameString(raw_ostream &O, StringRef AltName,
- const std::vector<CodeGenRegister*> &Registers) {
- StringToOffsetTable StringTable;
- O << " static const unsigned RegAsmOffset" << AltName << "[] = {\n ";
+ const std::vector<CodeGenRegister*> &Registers) {
+ SequenceToOffsetTable<std::string> StringTable;
+ SmallVector<std::string, 4> AsmNames(Registers.size());
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
const CodeGenRegister &Reg = *Registers[i];
+ std::string &AsmName = AsmNames[i];
- std::string AsmName;
// "NoRegAltName" is special. We don't need to do a lookup for that,
// as it's just a reference to the default register name.
if (AltName == "" || AltName == "NoRegAltName") {
@@ -495,21 +511,22 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName,
AsmName = AltNames[Idx];
}
}
+ StringTable.add(AsmName);
+ }
- O << StringTable.GetOrAddStringOffset(AsmName);
- if (((i + 1) % 14) == 0)
- O << ",\n ";
- else
- O << ", ";
+ StringTable.layout();
+ O << " static const char AsmStrs" << AltName << "[] = {\n";
+ StringTable.emit(O, printChar);
+ O << " };\n\n";
+ O << " static const unsigned RegAsmOffset" << AltName << "[] = {";
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
+ if ((i % 14) == 0)
+ O << "\n ";
+ O << StringTable.get(AsmNames[i]) << ", ";
}
- O << "0\n"
- << " };\n"
+ O << "\n };\n"
<< "\n";
-
- O << " const char *AsmStrs" << AltName << " =\n";
- StringTable.EmitString(O);
- O << ";\n";
}
void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
@@ -544,7 +561,7 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
O << " const unsigned *RegAsmOffset;\n"
<< " const char *AsmStrs;\n"
<< " switch(AltIdx) {\n"
- << " default: assert(0 && \"Invalid register alt name index!\");\n";
+ << " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
for (unsigned i = 0, e = AltNameIndices.size(); i < e; ++i) {
StringRef Namespace = AltNameIndices[1]->getValueAsString("Namespace");
StringRef AltName(AltNameIndices[i]->getName());
@@ -563,48 +580,6 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
<< "}\n";
}
-void AsmWriterEmitter::EmitGetInstructionName(raw_ostream &O) {
- CodeGenTarget Target(Records);
- Record *AsmWriter = Target.getAsmWriter();
- std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
-
- const std::vector<const CodeGenInstruction*> &NumberedInstructions =
- Target.getInstructionsByEnumValue();
-
- StringToOffsetTable StringTable;
- O <<
-"\n\n#ifdef GET_INSTRUCTION_NAME\n"
-"#undef GET_INSTRUCTION_NAME\n\n"
-"/// getInstructionName: This method is automatically generated by tblgen\n"
-"/// from the instruction set description. This returns the enum name of the\n"
-"/// specified instruction.\n"
- "const char *" << Target.getName() << ClassName
- << "::getInstructionName(unsigned Opcode) {\n"
- << " assert(Opcode < " << NumberedInstructions.size()
- << " && \"Invalid instruction number!\");\n"
- << "\n"
- << " static const unsigned InstAsmOffset[] = {";
- for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
- const CodeGenInstruction &Inst = *NumberedInstructions[i];
-
- std::string AsmName = Inst.TheDef->getName();
- if ((i % 14) == 0)
- O << "\n ";
-
- O << StringTable.GetOrAddStringOffset(AsmName) << ", ";
- }
- O << "0\n"
- << " };\n"
- << "\n";
-
- O << " const char *Strs =\n";
- StringTable.EmitString(O);
- O << ";\n";
-
- O << " return Strs+InstAsmOffset[Opcode];\n"
- << "}\n\n#endif\n";
-}
-
namespace {
// IAPrinter - Holds information about an InstAlias. Two InstAliases match if
// they both have the same conditionals. In which case, we cannot print out the
@@ -694,70 +669,7 @@ static void EmitGetMapOperandNumber(raw_ostream &O) {
O << " I = OpMap.begin(), E = OpMap.end(); I != E; ++I)\n";
O << " if (I->first == Name)\n";
O << " return I->second;\n";
- O << " assert(false && \"Operand not in map!\");\n";
- O << " return 0;\n";
- O << "}\n\n";
-}
-
-void AsmWriterEmitter::EmitRegIsInRegClass(raw_ostream &O) {
- CodeGenTarget Target(Records);
-
- // Enumerate the register classes.
- ArrayRef<CodeGenRegisterClass*> RegisterClasses =
- Target.getRegBank().getRegClasses();
-
- O << "namespace { // Register classes\n";
- O << " enum RegClass {\n";
-
- // Emit the register enum value for each RegisterClass.
- for (unsigned I = 0, E = RegisterClasses.size(); I != E; ++I) {
- if (I != 0) O << ",\n";
- O << " RC_" << RegisterClasses[I]->getName();
- }
-
- O << "\n };\n";
- O << "} // end anonymous namespace\n\n";
-
- // Emit a function that returns 'true' if a regsiter is part of a particular
- // register class. I.e., RAX is part of GR64 on X86.
- O << "static bool regIsInRegisterClass"
- << "(unsigned RegClass, unsigned Reg) {\n";
-
- // Emit the switch that checks if a register belongs to a particular register
- // class.
- O << " switch (RegClass) {\n";
- O << " default: break;\n";
-
- for (unsigned I = 0, E = RegisterClasses.size(); I != E; ++I) {
- const CodeGenRegisterClass &RC = *RegisterClasses[I];
-
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.getName();
- O << " case RC_" << Name << ":\n";
-
- // Emit the register list now.
- unsigned IE = RC.getOrder().size();
- if (IE == 1) {
- O << " if (Reg == " << getQualifiedName(RC.getOrder()[0]) << ")\n";
- O << " return true;\n";
- } else {
- O << " switch (Reg) {\n";
- O << " default: break;\n";
-
- for (unsigned II = 0; II != IE; ++II) {
- Record *Reg = RC.getOrder()[II];
- O << " case " << getQualifiedName(Reg) << ":\n";
- }
-
- O << " return true;\n";
- O << " }\n";
- }
-
- O << " break;\n";
- }
-
- O << " }\n\n";
- O << " return false;\n";
+ O << " llvm_unreachable(\"Operand not in map!\");\n";
O << "}\n\n";
}
@@ -804,8 +716,6 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
O << "\n#ifdef PRINT_ALIAS_INSTR\n";
O << "#undef PRINT_ALIAS_INSTR\n\n";
- EmitRegIsInRegClass(O);
-
// Emit the method that prints the alias instruction.
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
@@ -858,7 +768,6 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
const CodeGenInstAlias::ResultOperand &RO = CGA->ResultOperands[i];
switch (RO.Kind) {
- default: assert(0 && "unexpected InstAlias operand kind");
case CodeGenInstAlias::ResultOperand::K_Record: {
const Record *Rec = RO.getRecord();
StringRef ROName = RO.getName();
@@ -872,9 +781,9 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
if (!IAP->isOpMapped(ROName)) {
IAP->addOperand(ROName, i);
- Cond = std::string("regIsInRegisterClass(RC_") +
- CGA->ResultOperands[i].getRecord()->getName() +
- ", MI->getOperand(" + llvm::utostr(i) + ").getReg())";
+ Cond = std::string("MRI.getRegClass(") + Target.getName() + "::" +
+ CGA->ResultOperands[i].getRecord()->getName() + "RegClassID)"
+ ".contains(MI->getOperand(" + llvm::utostr(i) + ").getReg())";
IAP->addCond(Cond);
} else {
Cond = std::string("MI->getOperand(") +
@@ -900,6 +809,13 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
IAP->addCond(Cond);
break;
case CodeGenInstAlias::ResultOperand::K_Reg:
+ // If this is zero_reg, something's playing tricks we're not
+ // equipped to handle.
+ if (!CGA->ResultOperands[i].getRegister()) {
+ CantHandle = true;
+ break;
+ }
+
Cond = std::string("MI->getOperand(") +
llvm::utostr(i) + ").getReg() == " + Target.getName() +
"::" + CGA->ResultOperands[i].getRegister()->getName();
@@ -1015,7 +931,6 @@ void AsmWriterEmitter::run(raw_ostream &O) {
EmitPrintInstruction(O);
EmitGetRegisterName(O);
- EmitGetInstructionName(O);
EmitPrintAliasInstruction(O);
}
diff --git a/contrib/llvm/utils/TableGen/AsmWriterEmitter.h b/contrib/llvm/utils/TableGen/AsmWriterEmitter.h
index 731e31c..9719b20 100644
--- a/contrib/llvm/utils/TableGen/AsmWriterEmitter.h
+++ b/contrib/llvm/utils/TableGen/AsmWriterEmitter.h
@@ -37,8 +37,6 @@ namespace llvm {
private:
void EmitPrintInstruction(raw_ostream &o);
void EmitGetRegisterName(raw_ostream &o);
- void EmitGetInstructionName(raw_ostream &o);
- void EmitRegIsInRegClass(raw_ostream &O);
void EmitPrintAliasInstruction(raw_ostream &O);
AsmWriterInst *getAsmWriterInstByID(unsigned ID) const {
diff --git a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
index fcdaa08..afbb3a8 100644
--- a/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -96,7 +96,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
O << getQualifiedName(RegList->getElementAsRecord(0)) << ")) {\n";
} else {
- O << IndentStr << "static const unsigned RegList" << ++Counter
+ O << IndentStr << "static const uint16_t RegList" << ++Counter
<< "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = RegList->getSize(); i != e; ++i) {
@@ -127,7 +127,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
unsigned RegListNumber = ++Counter;
unsigned ShadowRegListNumber = ++Counter;
- O << IndentStr << "static const unsigned RegList" << RegListNumber
+ O << IndentStr << "static const uint16_t RegList" << RegListNumber
<< "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = RegList->getSize(); i != e; ++i) {
@@ -136,7 +136,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
}
O << "\n" << IndentStr << "};\n";
- O << IndentStr << "static const unsigned RegList"
+ O << IndentStr << "static const uint16_t RegList"
<< ShadowRegListNumber << "[] = {\n";
O << IndentStr << " ";
for (unsigned i = 0, e = ShadowRegList->getSize(); i != e; ++i) {
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index c5a1526..3943e8a 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -163,19 +163,19 @@ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName,
--bit;
}
- unsigned opMask = ~0U >> (32-N);
+ uint64_t opMask = ~(uint64_t)0 >> (64-N);
int opShift = beginVarBit - N + 1;
opMask <<= opShift;
opShift = beginInstBit - beginVarBit;
if (opShift > 0) {
- Case += " Value |= (op & " + utostr(opMask) + "U) << " +
+ Case += " Value |= (op & UINT64_C(" + utostr(opMask) + ")) << " +
itostr(opShift) + ";\n";
} else if (opShift < 0) {
- Case += " Value |= (op & " + utostr(opMask) + "U) >> " +
+ Case += " Value |= (op & UINT64_C(" + utostr(opMask) + ")) >> " +
itostr(-opShift) + ";\n";
} else {
- Case += " Value |= op & " + utostr(opMask) + "U;\n";
+ Case += " Value |= op & UINT64_C(" + utostr(opMask) + ");\n";
}
}
}
@@ -220,7 +220,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
Target.getInstructionsByEnumValue();
// Emit function declaration
- o << "unsigned " << Target.getName();
+ o << "uint64_t " << Target.getName();
if (MCEmitter)
o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
<< " SmallVectorImpl<MCFixup> &Fixups) const {\n";
@@ -228,7 +228,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
o << "CodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) const {\n";
// Emit instruction base values
- o << " static const unsigned InstBits[] = {\n";
+ o << " static const uint64_t InstBits[] = {\n";
for (std::vector<const CodeGenInstruction*>::const_iterator
IN = NumberedInstructions.begin(),
EN = NumberedInstructions.end();
@@ -238,21 +238,21 @@ void CodeEmitterGen::run(raw_ostream &o) {
if (R->getValueAsString("Namespace") == "TargetOpcode" ||
R->getValueAsBit("isPseudo")) {
- o << " 0U,\n";
+ o << " UINT64_C(0),\n";
continue;
}
BitsInit *BI = R->getValueAsBitsInit("Inst");
// Start by filling in fixed values.
- unsigned Value = 0;
+ uint64_t Value = 0;
for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
if (BitInit *B = dynamic_cast<BitInit*>(BI->getBit(e-i-1)))
- Value |= B->getValue() << (e-i-1);
+ Value |= (uint64_t)B->getValue() << (e-i-1);
}
- o << " " << Value << "U," << '\t' << "// " << R->getName() << "\n";
+ o << " UINT64_C(" << Value << ")," << '\t' << "// " << R->getName() << "\n";
}
- o << " 0U\n };\n";
+ o << " UINT64_C(0)\n };\n";
// Map to accumulate all the cases.
std::map<std::string, std::vector<std::string> > CaseMap;
@@ -273,8 +273,8 @@ void CodeEmitterGen::run(raw_ostream &o) {
// Emit initial function code
o << " const unsigned opcode = MI.getOpcode();\n"
- << " unsigned Value = InstBits[opcode];\n"
- << " unsigned op = 0;\n"
+ << " uint64_t Value = InstBits[opcode];\n"
+ << " uint64_t op = 0;\n"
<< " (void)op; // suppress warning\n"
<< " switch (opcode) {\n";
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index dbf1662..d2ddf23 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -18,8 +18,10 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
-#include <set>
+#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
+#include <cstdio>
+#include <set>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -629,11 +631,11 @@ TreePredicateFn::TreePredicateFn(TreePattern *N) : PatFragRec(N) {
}
std::string TreePredicateFn::getPredCode() const {
- return PatFragRec->getRecord()->getValueAsCode("PredicateCode");
+ return PatFragRec->getRecord()->getValueAsString("PredicateCode");
}
std::string TreePredicateFn::getImmCode() const {
- return PatFragRec->getRecord()->getValueAsCode("ImmediateCode");
+ return PatFragRec->getRecord()->getValueAsString("ImmediateCode");
}
@@ -748,7 +750,7 @@ std::string PatternToMatch::getPredicateCheck() const {
#ifndef NDEBUG
Def->dump();
#endif
- assert(0 && "Unknown predicate type!");
+ llvm_unreachable("Unknown predicate type!");
}
if (!PredicateCheck.empty())
PredicateCheck += " && ";
@@ -839,7 +841,6 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
switch (ConstraintType) {
- default: assert(0 && "Unknown constraint type!");
case SDTCisVT:
// Operand must be a particular type.
return NodeToApply->UpdateNodeType(ResNo, x.SDTCisVT_Info.VT, TP);
@@ -913,7 +914,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
EnforceVectorSubVectorTypeIs(NodeToApply->getExtType(ResNo), TP);
}
}
- return false;
+ llvm_unreachable("Invalid ConstraintType!");
}
//===----------------------------------------------------------------------===//
@@ -1609,10 +1610,9 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
MadeChange |= Child->UpdateNodeType(ChildResNo, MVT::iPTR, TP);
} else if (OperandNode->getName() == "unknown") {
// Nothing to do.
- } else {
- assert(0 && "Unknown operand type!");
- abort();
- }
+ } else
+ llvm_unreachable("Unknown operand type!");
+
MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
}
@@ -2071,7 +2071,7 @@ void CodeGenDAGPatterns::ParseNodeTransforms() {
while (!Xforms.empty()) {
Record *XFormNode = Xforms.back();
Record *SDNode = XFormNode->getValueAsDef("Opcode");
- std::string Code = XFormNode->getValueAsCode("XFormFunction");
+ std::string Code = XFormNode->getValueAsString("XFormFunction");
SDNodeXForms.insert(std::make_pair(XFormNode, NodeXForm(SDNode, Code)));
Xforms.pop_back();
diff --git a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
index 936fd01..5a2d40a 100644
--- a/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/contrib/llvm/utils/TableGen/CodeGenDAGPatterns.h
@@ -19,6 +19,7 @@
#include "CodeGenIntrinsics.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/ErrorHandling.h"
#include <set>
#include <algorithm>
#include <vector>
@@ -723,8 +724,7 @@ public:
if (Intrinsics[i].TheDef == R) return Intrinsics[i];
for (unsigned i = 0, e = TgtIntrinsics.size(); i != e; ++i)
if (TgtIntrinsics[i].TheDef == R) return TgtIntrinsics[i];
- assert(0 && "Unknown intrinsic!");
- abort();
+ llvm_unreachable("Unknown intrinsic!");
}
const CodeGenIntrinsic &getIntrinsicInfo(unsigned IID) const {
@@ -732,8 +732,7 @@ public:
return Intrinsics[IID-1];
if (IID-Intrinsics.size()-1 < TgtIntrinsics.size())
return TgtIntrinsics[IID-Intrinsics.size()-1];
- assert(0 && "Bad intrinsic ID!");
- abort();
+ llvm_unreachable("Bad intrinsic ID!");
}
unsigned getIntrinsicID(Record *R) const {
@@ -741,8 +740,7 @@ public:
if (Intrinsics[i].TheDef == R) return i;
for (unsigned i = 0, e = TgtIntrinsics.size(); i != e; ++i)
if (TgtIntrinsics[i].TheDef == R) return i + Intrinsics.size();
- assert(0 && "Unknown intrinsic!");
- abort();
+ llvm_unreachable("Unknown intrinsic!");
}
const DAGDefaultOperand &getDefaultOperand(Record *R) const {
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index 53d499f..fb9ad93 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -245,7 +245,7 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) {
if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
Ops[DestOp.first].Constraints[DestOp.second] =
- CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
+ CGIOperandList::ConstraintInfo::getTied(FlatOpNo);
}
static void ParseConstraints(const std::string &CStr, CGIOperandList &Ops) {
@@ -423,6 +423,18 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
return true;
}
+ // For register operands, the source register class can be a subclass
+ // of the instruction register class, not just an exact match.
+ if (ADI && ADI->getDef()->isSubClassOf("RegisterClass")) {
+ if (!InstOpRec->isSubClassOf("RegisterClass"))
+ return false;
+ if (!T.getRegisterClass(InstOpRec)
+ .hasSubClass(&T.getRegisterClass(ADI->getDef())))
+ return false;
+ ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
+ return true;
+ }
+
// Handle explicit registers.
if (ADI && ADI->getDef()->isSubClassOf("Register")) {
if (InstOpRec->isSubClassOf("OptionalDefOperand")) {
@@ -456,14 +468,19 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
if (ADI && ADI->getDef()->getName() == "zero_reg") {
// Check if this is an optional def.
- if (!InstOpRec->isSubClassOf("OptionalDefOperand"))
- throw TGError(Loc, "reg0 used for result that is not an "
- "OptionalDefOperand!");
+ // Tied operands where the source is a sub-operand of a complex operand
+ // need to represent both operands in the alias destination instruction.
+ // Allow zero_reg for the tied portion. This can and should go away once
+ // the MC representation of things doesn't use tied operands at all.
+ //if (!InstOpRec->isSubClassOf("OptionalDefOperand"))
+ // throw TGError(Loc, "reg0 used for result that is not an "
+ // "OptionalDefOperand!");
ResOp = ResultOperand(static_cast<Record*>(0));
return true;
}
+ // Literal integers.
if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
return false;
@@ -475,6 +492,19 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
return true;
}
+ // If both are Operands with the same MVT, allow the conversion. It's
+ // up to the user to make sure the values are appropriate, just like
+ // for isel Pat's.
+ if (InstOpRec->isSubClassOf("Operand") &&
+ ADI->getDef()->isSubClassOf("Operand")) {
+ // FIXME: What other attributes should we check here? Identical
+ // MIOperandInfo perhaps?
+ if (InstOpRec->getValueInit("Type") != ADI->getDef()->getValueInit("Type"))
+ return false;
+ ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef());
+ return true;
+ }
+
return false;
}
@@ -511,8 +541,11 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) {
unsigned AliasOpNo = 0;
for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
- // Tied registers don't have an entry in the result dag.
- if (ResultInst->Operands[i].getTiedRegister() != -1)
+ // Tied registers don't have an entry in the result dag unless they're part
+ // of a complex operand, in which case we include them anyways, as we
+ // don't have any other way to specify the whole operand.
+ if (ResultInst->Operands[i].MINumOperands == 1 &&
+ ResultInst->Operands[i].getTiedRegister() != -1)
continue;
if (AliasOpNo >= Result->getNumArgs())
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
index 8de4615..7ce4f878 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -15,6 +15,7 @@
#include "CodeGenRegisters.h"
#include "CodeGenTarget.h"
#include "llvm/TableGen/Error.h"
+#include "llvm/ADT/IntEqClasses.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -22,6 +23,57 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
+// CodeGenSubRegIndex
+//===----------------------------------------------------------------------===//
+
+CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
+ : TheDef(R),
+ EnumValue(Enum)
+{}
+
+std::string CodeGenSubRegIndex::getNamespace() const {
+ if (TheDef->getValue("Namespace"))
+ return TheDef->getValueAsString("Namespace");
+ else
+ return "";
+}
+
+const std::string &CodeGenSubRegIndex::getName() const {
+ return TheDef->getName();
+}
+
+std::string CodeGenSubRegIndex::getQualifiedName() const {
+ std::string N = getNamespace();
+ if (!N.empty())
+ N += "::";
+ N += getName();
+ return N;
+}
+
+void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
+ std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
+ if (Comps.empty())
+ return;
+ if (Comps.size() != 2)
+ throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries");
+ CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
+ CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
+ CodeGenSubRegIndex *X = A->addComposite(B, this);
+ if (X)
+ throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+}
+
+void CodeGenSubRegIndex::cleanComposites() {
+ // Clean out redundant mappings of the form this+X -> X.
+ for (CompMap::iterator i = Composed.begin(), e = Composed.end(); i != e;) {
+ CompMap::iterator j = i;
+ ++i;
+ if (j->first == j->second)
+ Composed.erase(j);
+ }
+}
+
+//===----------------------------------------------------------------------===//
// CodeGenRegister
//===----------------------------------------------------------------------===//
@@ -29,6 +81,7 @@ CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
: TheDef(R),
EnumValue(Enum),
CostPerUse(R->getValueAsInt("CostPerUse")),
+ CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
SubRegsComplete(false)
{}
@@ -37,12 +90,81 @@ const std::string &CodeGenRegister::getName() const {
}
namespace {
- struct Orphan {
- CodeGenRegister *SubReg;
- Record *First, *Second;
- Orphan(CodeGenRegister *r, Record *a, Record *b)
- : SubReg(r), First(a), Second(b) {}
- };
+// Iterate over all register units in a set of registers.
+class RegUnitIterator {
+ CodeGenRegister::Set::const_iterator RegI, RegE;
+ CodeGenRegister::RegUnitList::const_iterator UnitI, UnitE;
+
+public:
+ RegUnitIterator(const CodeGenRegister::Set &Regs):
+ RegI(Regs.begin()), RegE(Regs.end()), UnitI(), UnitE() {
+
+ if (RegI != RegE) {
+ UnitI = (*RegI)->getRegUnits().begin();
+ UnitE = (*RegI)->getRegUnits().end();
+ advance();
+ }
+ }
+
+ bool isValid() const { return UnitI != UnitE; }
+
+ unsigned operator* () const { assert(isValid()); return *UnitI; };
+
+ const CodeGenRegister *getReg() const { assert(isValid()); return *RegI; }
+
+ /// Preincrement. Move to the next unit.
+ void operator++() {
+ assert(isValid() && "Cannot advance beyond the last operand");
+ ++UnitI;
+ advance();
+ }
+
+protected:
+ void advance() {
+ while (UnitI == UnitE) {
+ if (++RegI == RegE)
+ break;
+ UnitI = (*RegI)->getRegUnits().begin();
+ UnitE = (*RegI)->getRegUnits().end();
+ }
+ }
+};
+} // namespace
+
+// Merge two RegUnitLists maintaining the order and removing duplicates.
+// Overwrites MergedRU in the process.
+static void mergeRegUnits(CodeGenRegister::RegUnitList &MergedRU,
+ const CodeGenRegister::RegUnitList &RRU) {
+ CodeGenRegister::RegUnitList LRU = MergedRU;
+ MergedRU.clear();
+ std::set_union(LRU.begin(), LRU.end(), RRU.begin(), RRU.end(),
+ std::back_inserter(MergedRU));
+}
+
+// Return true of this unit appears in RegUnits.
+static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
+ return std::count(RegUnits.begin(), RegUnits.end(), Unit);
+}
+
+// Inherit register units from subregisters.
+// Return true if the RegUnits changed.
+bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
+ unsigned OldNumUnits = RegUnits.size();
+ for (SubRegMap::const_iterator I = SubRegs.begin(), E = SubRegs.end();
+ I != E; ++I) {
+ // Strangely a register may have itself as a subreg (self-cycle) e.g. XMM.
+ // Only create a unit if no other subregs have units.
+ CodeGenRegister *SR = I->second;
+ if (SR == this) {
+ // RegUnits are only empty during getSubRegs, prior to computing weight.
+ if (RegUnits.empty())
+ RegUnits.push_back(RegBank.newRegUnit(0));
+ continue;
+ }
+ // Merge the subregister's units into this register's RegUnits.
+ mergeRegUnits(RegUnits, SR->RegUnits);
+ }
+ return OldNumUnits != RegUnits.size();
}
const CodeGenRegister::SubRegMap &
@@ -53,23 +175,26 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
SubRegsComplete = true;
std::vector<Record*> SubList = TheDef->getValueAsListOfDefs("SubRegs");
- std::vector<Record*> Indices = TheDef->getValueAsListOfDefs("SubRegIndices");
- if (SubList.size() != Indices.size())
+ std::vector<Record*> IdxList = TheDef->getValueAsListOfDefs("SubRegIndices");
+ if (SubList.size() != IdxList.size())
throw TGError(TheDef->getLoc(), "Register " + getName() +
" SubRegIndices doesn't match SubRegs");
// First insert the direct subregs and make sure they are fully indexed.
+ SmallVector<CodeGenSubRegIndex*, 8> Indices;
for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
CodeGenRegister *SR = RegBank.getReg(SubList[i]);
- if (!SubRegs.insert(std::make_pair(Indices[i], SR)).second)
- throw TGError(TheDef->getLoc(), "SubRegIndex " + Indices[i]->getName() +
+ CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxList[i]);
+ Indices.push_back(Idx);
+ if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
+ throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
" appears twice in Register " + getName());
}
// Keep track of inherited subregs and how they can be reached.
- SmallVector<Orphan, 8> Orphans;
+ SmallPtrSet<CodeGenRegister*, 8> Orphans;
- // Clone inherited subregs and place duplicate entries on Orphans.
+ // Clone inherited subregs and place duplicate entries in Orphans.
// Here the order is important - earlier subregs take precedence.
for (unsigned i = 0, e = SubList.size(); i != e; ++i) {
CodeGenRegister *SR = RegBank.getReg(SubList[i]);
@@ -83,7 +208,7 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
++SI) {
if (!SubRegs.insert(*SI).second)
- Orphans.push_back(Orphan(SI->second, Indices[i], SI->first));
+ Orphans.insert(SI->second);
// Noop sub-register indexes are possible, so avoid duplicates.
if (SI->second != SR)
@@ -91,6 +216,33 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
}
}
+ // Expand any composed subreg indices.
+ // If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
+ // qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
+ // expanded subreg indices recursively.
+ for (unsigned i = 0; i != Indices.size(); ++i) {
+ CodeGenSubRegIndex *Idx = Indices[i];
+ const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
+ CodeGenRegister *SR = SubRegs[Idx];
+ const SubRegMap &Map = SR->getSubRegs(RegBank);
+
+ // Look at the possible compositions of Idx.
+ // They may not all be supported by SR.
+ for (CodeGenSubRegIndex::CompMap::const_iterator I = Comps.begin(),
+ E = Comps.end(); I != E; ++I) {
+ SubRegMap::const_iterator SRI = Map.find(I->first);
+ if (SRI == Map.end())
+ continue; // Idx + I->first doesn't exist in SR.
+ // Add I->second as a name for the subreg SRI->second, assuming it is
+ // orphaned, and the name isn't already used for something else.
+ if (SubRegs.count(I->second) || !Orphans.erase(SRI->second))
+ continue;
+ // We found a new name for the orphaned sub-register.
+ SubRegs.insert(std::make_pair(I->second, SRI->second));
+ Indices.push_back(I->second);
+ }
+ }
+
// Process the composites.
ListInit *Comps = TheDef->getValueAsListInit("CompositeIndices");
for (unsigned i = 0, e = Comps->size(); i != e; ++i) {
@@ -103,6 +255,7 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
if (!BaseIdxInit || !BaseIdxInit->getDef()->isSubClassOf("SubRegIndex"))
throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
Pat->getAsString());
+ CodeGenSubRegIndex *BaseIdx = RegBank.getSubRegIdx(BaseIdxInit->getDef());
// Resolve list of subreg indices into R2.
CodeGenRegister *R2 = this;
@@ -112,8 +265,9 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
if (!IdxInit || !IdxInit->getDef()->isSubClassOf("SubRegIndex"))
throw TGError(TheDef->getLoc(), "Invalid SubClassIndex in " +
Pat->getAsString());
+ CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(IdxInit->getDef());
const SubRegMap &R2Subs = R2->getSubRegs(RegBank);
- SubRegMap::const_iterator ni = R2Subs.find(IdxInit->getDef());
+ SubRegMap::const_iterator ni = R2Subs.find(Idx);
if (ni == R2Subs.end())
throw TGError(TheDef->getLoc(), "Composite " + Pat->getAsString() +
" refers to bad index in " + R2->getName());
@@ -121,35 +275,76 @@ CodeGenRegister::getSubRegs(CodeGenRegBank &RegBank) {
}
// Insert composite index. Allow overriding inherited indices etc.
- SubRegs[BaseIdxInit->getDef()] = R2;
+ SubRegs[BaseIdx] = R2;
// R2 is no longer an orphan.
- for (unsigned j = 0, je = Orphans.size(); j != je; ++j)
- if (Orphans[j].SubReg == R2)
- Orphans[j].SubReg = 0;
+ Orphans.erase(R2);
}
// Now Orphans contains the inherited subregisters without a direct index.
// Create inferred indexes for all missing entries.
- for (unsigned i = 0, e = Orphans.size(); i != e; ++i) {
- Orphan &O = Orphans[i];
- if (!O.SubReg)
- continue;
- SubRegs[RegBank.getCompositeSubRegIndex(O.First, O.Second, true)] =
- O.SubReg;
+ // Work backwards in the Indices vector in order to compose subregs bottom-up.
+ // Consider this subreg sequence:
+ //
+ // qsub_1 -> dsub_0 -> ssub_0
+ //
+ // The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
+ // can be reached in two different ways:
+ //
+ // qsub_1 -> ssub_0
+ // dsub_2 -> ssub_0
+ //
+ // We pick the latter composition because another register may have [dsub_0,
+ // dsub_1, dsub_2] subregs without neccessarily having a qsub_1 subreg. The
+ // dsub_2 -> ssub_0 composition can be shared.
+ while (!Indices.empty() && !Orphans.empty()) {
+ CodeGenSubRegIndex *Idx = Indices.pop_back_val();
+ CodeGenRegister *SR = SubRegs[Idx];
+ const SubRegMap &Map = SR->getSubRegs(RegBank);
+ for (SubRegMap::const_iterator SI = Map.begin(), SE = Map.end(); SI != SE;
+ ++SI)
+ if (Orphans.erase(SI->second))
+ SubRegs[RegBank.getCompositeSubRegIndex(Idx, SI->first)] = SI->second;
}
+
+ // Initialize RegUnitList. A register with no subregisters creates its own
+ // unit. Otherwise, it inherits all its subregister's units. Because
+ // getSubRegs is called recursively, this processes the register hierarchy in
+ // postorder.
+ //
+ // TODO: We currently assume all register units correspond to a named "leaf"
+ // register. We should also unify register units for ad-hoc register
+ // aliases. This can be done by iteratively merging units for aliasing
+ // registers using a worklist.
+ assert(RegUnits.empty() && "Should only initialize RegUnits once");
+ if (SubRegs.empty())
+ RegUnits.push_back(RegBank.newRegUnit(0));
+ else
+ inheritRegUnits(RegBank);
return SubRegs;
}
void
-CodeGenRegister::addSubRegsPreOrder(SetVector<CodeGenRegister*> &OSet) const {
+CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
+ CodeGenRegBank &RegBank) const {
assert(SubRegsComplete && "Must precompute sub-registers");
std::vector<Record*> Indices = TheDef->getValueAsListOfDefs("SubRegIndices");
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- CodeGenRegister *SR = SubRegs.find(Indices[i])->second;
+ CodeGenSubRegIndex *Idx = RegBank.getSubRegIdx(Indices[i]);
+ CodeGenRegister *SR = SubRegs.find(Idx)->second;
if (OSet.insert(SR))
- SR->addSubRegsPreOrder(OSet);
+ SR->addSubRegsPreOrder(OSet, RegBank);
+ }
+}
+
+// Get the sum of this register's unit weights.
+unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
+ unsigned Weight = 0;
+ for (RegUnitList::const_iterator I = RegUnits.begin(), E = RegUnits.end();
+ I != E; ++I) {
+ Weight += RegBank.getRegUnitWeight(*I);
}
+ return Weight;
}
//===----------------------------------------------------------------------===//
@@ -215,30 +410,40 @@ struct TupleExpander : SetTheory::Expander {
for (unsigned i = 0, e = Proto->getValues().size(); i != e; ++i) {
RecordVal RV = Proto->getValues()[i];
+ // Skip existing fields, like NAME.
+ if (NewReg->getValue(RV.getNameInit()))
+ continue;
+
+ StringRef Field = RV.getName();
+
// Replace the sub-register list with Tuple.
- if (RV.getName() == "SubRegs")
+ if (Field == "SubRegs")
RV.setValue(ListInit::get(Tuple, RegisterRecTy));
// Provide a blank AsmName. MC hacks are required anyway.
- if (RV.getName() == "AsmName")
+ if (Field == "AsmName")
RV.setValue(BlankName);
// CostPerUse is aggregated from all Tuple members.
- if (RV.getName() == "CostPerUse")
+ if (Field == "CostPerUse")
RV.setValue(IntInit::get(CostPerUse));
+ // Composite registers are always covered by sub-registers.
+ if (Field == "CoveredBySubRegs")
+ RV.setValue(BitInit::get(true));
+
// Copy fields from the RegisterTuples def.
- if (RV.getName() == "SubRegIndices" ||
- RV.getName() == "CompositeIndices") {
- NewReg->addValue(*Def->getValue(RV.getName()));
+ if (Field == "SubRegIndices" ||
+ Field == "CompositeIndices") {
+ NewReg->addValue(*Def->getValue(Field));
continue;
}
// Some fields get their default uninitialized value.
- if (RV.getName() == "DwarfNumbers" ||
- RV.getName() == "DwarfAlias" ||
- RV.getName() == "Aliases") {
- if (const RecordVal *DefRV = RegisterCl->getValue(RV.getName()))
+ if (Field == "DwarfNumbers" ||
+ Field == "DwarfAlias" ||
+ Field == "Aliases") {
+ if (const RecordVal *DefRV = RegisterCl->getValue(Field))
NewReg->addValue(*DefRV);
continue;
}
@@ -330,7 +535,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
SpillAlignment = R->getValueAsInt("Alignment");
CopyCost = R->getValueAsInt("CopyCost");
Allocatable = R->getValueAsBit("isAllocatable");
- AltOrderSelect = R->getValueAsCode("AltOrderSelect");
+ AltOrderSelect = R->getValueAsString("AltOrderSelect");
}
// Create an inferred register class that was missing from the .td files.
@@ -448,7 +653,7 @@ static int TopoOrderRC(const void *PA, const void *PB) {
return 1;
// Finally order by name as a tie breaker.
- return A->getName() < B->getName();
+ return StringRef(A->getName()).compare(B->getName());
}
std::string CodeGenRegisterClass::getQualifiedName() const {
@@ -504,6 +709,30 @@ void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
RegClasses[rci]->inheritProperties(RegBank);
}
+void
+CodeGenRegisterClass::getSuperRegClasses(CodeGenSubRegIndex *SubIdx,
+ BitVector &Out) const {
+ DenseMap<CodeGenSubRegIndex*,
+ SmallPtrSet<CodeGenRegisterClass*, 8> >::const_iterator
+ FindI = SuperRegClasses.find(SubIdx);
+ if (FindI == SuperRegClasses.end())
+ return;
+ for (SmallPtrSet<CodeGenRegisterClass*, 8>::const_iterator I =
+ FindI->second.begin(), E = FindI->second.end(); I != E; ++I)
+ Out.set((*I)->EnumValue);
+}
+
+// Populate a unique sorted list of units from a register set.
+void CodeGenRegisterClass::buildRegUnitSet(
+ std::vector<unsigned> &RegUnits) const {
+ std::vector<unsigned> TmpUnits;
+ for (RegUnitIterator UnitI(Members); UnitI.isValid(); ++UnitI)
+ TmpUnits.push_back(*UnitI);
+ std::sort(TmpUnits.begin(), TmpUnits.end());
+ std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
+ std::back_inserter(RegUnits));
+}
+
//===----------------------------------------------------------------------===//
// CodeGenRegBank
//===----------------------------------------------------------------------===//
@@ -511,13 +740,19 @@ void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
// Configure register Sets to understand register classes and tuples.
Sets.addFieldExpander("RegisterClass", "MemberList");
+ Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
Sets.addExpander("RegisterTuples", new TupleExpander());
// Read in the user-defined (named) sub-register indices.
// More indices will be synthesized later.
- SubRegIndices = Records.getAllDerivedDefinitions("SubRegIndex");
- std::sort(SubRegIndices.begin(), SubRegIndices.end(), LessRecord());
- NumNamedIndices = SubRegIndices.size();
+ std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
+ std::sort(SRIs.begin(), SRIs.end(), LessRecord());
+ NumNamedIndices = SRIs.size();
+ for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
+ getSubRegIdx(SRIs[i]);
+ // Build composite maps from ComposedOf fields.
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+ SubRegIndices[i]->updateComponents(*this);
// Read in the register definitions.
std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
@@ -538,9 +773,14 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
// Precompute all sub-register maps now all the registers are known.
// This will create Composite entries for all inferred sub-register indices.
+ NumRegUnits = 0;
for (unsigned i = 0, e = Registers.size(); i != e; ++i)
Registers[i]->getSubRegs(*this);
+ // Native register units are associated with a leaf register. They've all been
+ // discovered now.
+ NumNativeRegUnits = NumRegUnits;
+
// Read in register class definitions.
std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
if (RCs.empty())
@@ -561,6 +801,15 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) : Records(Records) {
CodeGenRegisterClass::computeSubClasses(*this);
}
+CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
+ CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
+ if (Idx)
+ return Idx;
+ Idx = new CodeGenSubRegIndex(Def, SubRegIndices.size() + 1);
+ SubRegIndices.push_back(Idx);
+ return Idx;
+}
+
CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
CodeGenRegister *&Reg = Def2Reg[Def];
if (Reg)
@@ -582,6 +831,23 @@ void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
Key2RC.insert(std::make_pair(K, RC));
}
+// Create a synthetic sub-class if it is missing.
+CodeGenRegisterClass*
+CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
+ const CodeGenRegister::Set *Members,
+ StringRef Name) {
+ // Synthetic sub-class has the same size and alignment as RC.
+ CodeGenRegisterClass::Key K(Members, RC->SpillSize, RC->SpillAlignment);
+ RCKeyMap::const_iterator FoundI = Key2RC.find(K);
+ if (FoundI != Key2RC.end())
+ return FoundI->second;
+
+ // Sub-class doesn't exist, create a new one.
+ CodeGenRegisterClass *NewRC = new CodeGenRegisterClass(Name, K);
+ addToMaps(NewRC);
+ return NewRC;
+}
+
CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) {
if (CodeGenRegisterClass *RC = Def2RC[Def])
return RC;
@@ -589,34 +855,28 @@ CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) {
throw TGError(Def->getLoc(), "Not a known RegisterClass!");
}
-Record *CodeGenRegBank::getCompositeSubRegIndex(Record *A, Record *B,
- bool create) {
+CodeGenSubRegIndex*
+CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B) {
// Look for an existing entry.
- Record *&Comp = Composite[std::make_pair(A, B)];
- if (Comp || !create)
+ CodeGenSubRegIndex *Comp = A->compose(B);
+ if (Comp)
return Comp;
// None exists, synthesize one.
std::string Name = A->getName() + "_then_" + B->getName();
- Comp = new Record(Name, SMLoc(), Records);
- SubRegIndices.push_back(Comp);
+ Comp = getSubRegIdx(new Record(Name, SMLoc(), Records));
+ A->addComposite(B, Comp);
return Comp;
}
-unsigned CodeGenRegBank::getSubRegIndexNo(Record *idx) {
- std::vector<Record*>::const_iterator i =
- std::find(SubRegIndices.begin(), SubRegIndices.end(), idx);
- assert(i != SubRegIndices.end() && "Not a SubRegIndex");
- return (i - SubRegIndices.begin()) + 1;
-}
-
void CodeGenRegBank::computeComposites() {
for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
CodeGenRegister *Reg1 = Registers[i];
const CodeGenRegister::SubRegMap &SRM1 = Reg1->getSubRegs();
for (CodeGenRegister::SubRegMap::const_iterator i1 = SRM1.begin(),
e1 = SRM1.end(); i1 != e1; ++i1) {
- Record *Idx1 = i1->first;
+ CodeGenSubRegIndex *Idx1 = i1->first;
CodeGenRegister *Reg2 = i1->second;
// Ignore identity compositions.
if (Reg1 == Reg2)
@@ -625,7 +885,7 @@ void CodeGenRegBank::computeComposites() {
// Try composing Idx1 with another SubRegIndex.
for (CodeGenRegister::SubRegMap::const_iterator i2 = SRM2.begin(),
e2 = SRM2.end(); i2 != e2; ++i2) {
- std::pair<Record*, Record*> IdxPair(Idx1, i2->first);
+ CodeGenSubRegIndex *Idx2 = i2->first;
CodeGenRegister *Reg3 = i2->second;
// Ignore identity compositions.
if (Reg2 == Reg3)
@@ -634,16 +894,13 @@ void CodeGenRegBank::computeComposites() {
for (CodeGenRegister::SubRegMap::const_iterator i1d = SRM1.begin(),
e1d = SRM1.end(); i1d != e1d; ++i1d) {
if (i1d->second == Reg3) {
- std::pair<CompositeMap::iterator, bool> Ins =
- Composite.insert(std::make_pair(IdxPair, i1d->first));
// Conflicting composition? Emit a warning but allow it.
- if (!Ins.second && Ins.first->second != i1d->first) {
- errs() << "Warning: SubRegIndex " << getQualifiedName(Idx1)
- << " and " << getQualifiedName(IdxPair.second)
+ if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, i1d->first))
+ errs() << "Warning: SubRegIndex " << Idx1->getQualifiedName()
+ << " and " << Idx2->getQualifiedName()
<< " compose ambiguously as "
- << getQualifiedName(Ins.first->second) << " or "
- << getQualifiedName(i1d->first) << "\n";
- }
+ << Prev->getQualifiedName() << " or "
+ << i1d->first->getQualifiedName() << "\n";
}
}
}
@@ -652,12 +909,388 @@ void CodeGenRegBank::computeComposites() {
// We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid
// compositions, so remove any mappings of that form.
- for (CompositeMap::iterator i = Composite.begin(), e = Composite.end();
- i != e;) {
- CompositeMap::iterator j = i;
- ++i;
- if (j->first.second == j->second)
- Composite.erase(j);
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+ SubRegIndices[i]->cleanComposites();
+}
+
+namespace {
+// UberRegSet is a helper class for computeRegUnitWeights. Each UberRegSet is
+// the transitive closure of the union of overlapping register
+// classes. Together, the UberRegSets form a partition of the registers. If we
+// consider overlapping register classes to be connected, then each UberRegSet
+// is a set of connected components.
+//
+// An UberRegSet will likely be a horizontal slice of register names of
+// the same width. Nontrivial subregisters should then be in a separate
+// UberRegSet. But this property isn't required for valid computation of
+// register unit weights.
+//
+// A Weight field caches the max per-register unit weight in each UberRegSet.
+//
+// A set of SingularDeterminants flags single units of some register in this set
+// for which the unit weight equals the set weight. These units should not have
+// their weight increased.
+struct UberRegSet {
+ CodeGenRegister::Set Regs;
+ unsigned Weight;
+ CodeGenRegister::RegUnitList SingularDeterminants;
+
+ UberRegSet(): Weight(0) {}
+};
+} // namespace
+
+// Partition registers into UberRegSets, where each set is the transitive
+// closure of the union of overlapping register classes.
+//
+// UberRegSets[0] is a special non-allocatable set.
+static void computeUberSets(std::vector<UberRegSet> &UberSets,
+ std::vector<UberRegSet*> &RegSets,
+ CodeGenRegBank &RegBank) {
+
+ const std::vector<CodeGenRegister*> &Registers = RegBank.getRegisters();
+
+ // The Register EnumValue is one greater than its index into Registers.
+ assert(Registers.size() == Registers[Registers.size()-1]->EnumValue &&
+ "register enum value mismatch");
+
+ // For simplicitly make the SetID the same as EnumValue.
+ IntEqClasses UberSetIDs(Registers.size()+1);
+ std::set<unsigned> AllocatableRegs;
+ for (unsigned i = 0, e = RegBank.getRegClasses().size(); i != e; ++i) {
+
+ CodeGenRegisterClass *RegClass = RegBank.getRegClasses()[i];
+ if (!RegClass->Allocatable)
+ continue;
+
+ const CodeGenRegister::Set &Regs = RegClass->getMembers();
+ if (Regs.empty())
+ continue;
+
+ unsigned USetID = UberSetIDs.findLeader((*Regs.begin())->EnumValue);
+ assert(USetID && "register number 0 is invalid");
+
+ AllocatableRegs.insert((*Regs.begin())->EnumValue);
+ for (CodeGenRegister::Set::const_iterator I = llvm::next(Regs.begin()),
+ E = Regs.end(); I != E; ++I) {
+ AllocatableRegs.insert((*I)->EnumValue);
+ UberSetIDs.join(USetID, (*I)->EnumValue);
+ }
+ }
+ // Combine non-allocatable regs.
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
+ unsigned RegNum = Registers[i]->EnumValue;
+ if (AllocatableRegs.count(RegNum))
+ continue;
+
+ UberSetIDs.join(0, RegNum);
+ }
+ UberSetIDs.compress();
+
+ // Make the first UberSet a special unallocatable set.
+ unsigned ZeroID = UberSetIDs[0];
+
+ // Insert Registers into the UberSets formed by union-find.
+ // Do not resize after this.
+ UberSets.resize(UberSetIDs.getNumClasses());
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
+ const CodeGenRegister *Reg = Registers[i];
+ unsigned USetID = UberSetIDs[Reg->EnumValue];
+ if (!USetID)
+ USetID = ZeroID;
+ else if (USetID == ZeroID)
+ USetID = 0;
+
+ UberRegSet *USet = &UberSets[USetID];
+ USet->Regs.insert(Reg);
+ RegSets[i] = USet;
+ }
+}
+
+// Recompute each UberSet weight after changing unit weights.
+static void computeUberWeights(std::vector<UberRegSet> &UberSets,
+ CodeGenRegBank &RegBank) {
+ // Skip the first unallocatable set.
+ for (std::vector<UberRegSet>::iterator I = llvm::next(UberSets.begin()),
+ E = UberSets.end(); I != E; ++I) {
+
+ // Initialize all unit weights in this set, and remember the max units/reg.
+ const CodeGenRegister *Reg = 0;
+ unsigned MaxWeight = 0, Weight = 0;
+ for (RegUnitIterator UnitI(I->Regs); UnitI.isValid(); ++UnitI) {
+ if (Reg != UnitI.getReg()) {
+ if (Weight > MaxWeight)
+ MaxWeight = Weight;
+ Reg = UnitI.getReg();
+ Weight = 0;
+ }
+ unsigned UWeight = RegBank.getRegUnitWeight(*UnitI);
+ if (!UWeight) {
+ UWeight = 1;
+ RegBank.increaseRegUnitWeight(*UnitI, UWeight);
+ }
+ Weight += UWeight;
+ }
+ if (Weight > MaxWeight)
+ MaxWeight = Weight;
+
+ // Update the set weight.
+ I->Weight = MaxWeight;
+
+ // Find singular determinants.
+ for (CodeGenRegister::Set::iterator RegI = I->Regs.begin(),
+ RegE = I->Regs.end(); RegI != RegE; ++RegI) {
+ if ((*RegI)->getRegUnits().size() == 1
+ && (*RegI)->getWeight(RegBank) == I->Weight)
+ mergeRegUnits(I->SingularDeterminants, (*RegI)->getRegUnits());
+ }
+ }
+}
+
+// normalizeWeight is a computeRegUnitWeights helper that adjusts the weight of
+// a register and its subregisters so that they have the same weight as their
+// UberSet. Self-recursion processes the subregister tree in postorder so
+// subregisters are normalized first.
+//
+// Side effects:
+// - creates new adopted register units
+// - causes superregisters to inherit adopted units
+// - increases the weight of "singular" units
+// - induces recomputation of UberWeights.
+static bool normalizeWeight(CodeGenRegister *Reg,
+ std::vector<UberRegSet> &UberSets,
+ std::vector<UberRegSet*> &RegSets,
+ CodeGenRegister::RegUnitList &NormalUnits,
+ CodeGenRegBank &RegBank) {
+ bool Changed = false;
+ const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
+ for (CodeGenRegister::SubRegMap::const_iterator SRI = SRM.begin(),
+ SRE = SRM.end(); SRI != SRE; ++SRI) {
+ if (SRI->second == Reg)
+ continue; // self-cycles happen
+
+ Changed |=
+ normalizeWeight(SRI->second, UberSets, RegSets, NormalUnits, RegBank);
+ }
+ // Postorder register normalization.
+
+ // Inherit register units newly adopted by subregisters.
+ if (Reg->inheritRegUnits(RegBank))
+ computeUberWeights(UberSets, RegBank);
+
+ // Check if this register is too skinny for its UberRegSet.
+ UberRegSet *UberSet = RegSets[RegBank.getRegIndex(Reg)];
+
+ unsigned RegWeight = Reg->getWeight(RegBank);
+ if (UberSet->Weight > RegWeight) {
+ // A register unit's weight can be adjusted only if it is the singular unit
+ // for this register, has not been used to normalize a subregister's set,
+ // and has not already been used to singularly determine this UberRegSet.
+ unsigned AdjustUnit = Reg->getRegUnits().front();
+ if (Reg->getRegUnits().size() != 1
+ || hasRegUnit(NormalUnits, AdjustUnit)
+ || hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
+ // We don't have an adjustable unit, so adopt a new one.
+ AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
+ Reg->adoptRegUnit(AdjustUnit);
+ // Adopting a unit does not immediately require recomputing set weights.
+ }
+ else {
+ // Adjust the existing single unit.
+ RegBank.increaseRegUnitWeight(AdjustUnit, UberSet->Weight - RegWeight);
+ // The unit may be shared among sets and registers within this set.
+ computeUberWeights(UberSets, RegBank);
+ }
+ Changed = true;
+ }
+
+ // Mark these units normalized so superregisters can't change their weights.
+ mergeRegUnits(NormalUnits, Reg->getRegUnits());
+
+ return Changed;
+}
+
+// Compute a weight for each register unit created during getSubRegs.
+//
+// The goal is that two registers in the same class will have the same weight,
+// where each register's weight is defined as sum of its units' weights.
+void CodeGenRegBank::computeRegUnitWeights() {
+ assert(RegUnitWeights.empty() && "Only initialize RegUnitWeights once");
+
+ // Only allocatable units will be initialized to nonzero weight.
+ RegUnitWeights.resize(NumRegUnits);
+
+ std::vector<UberRegSet> UberSets;
+ std::vector<UberRegSet*> RegSets(Registers.size());
+ computeUberSets(UberSets, RegSets, *this);
+ // UberSets and RegSets are now immutable.
+
+ computeUberWeights(UberSets, *this);
+
+ // Iterate over each Register, normalizing the unit weights until reaching
+ // a fix point.
+ unsigned NumIters = 0;
+ for (bool Changed = true; Changed; ++NumIters) {
+ assert(NumIters <= NumNativeRegUnits && "Runaway register unit weights");
+ Changed = false;
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
+ CodeGenRegister::RegUnitList NormalUnits;
+ Changed |=
+ normalizeWeight(Registers[i], UberSets, RegSets, NormalUnits, *this);
+ }
+ }
+}
+
+// Find a set in UniqueSets with the same elements as Set.
+// Return an iterator into UniqueSets.
+static std::vector<RegUnitSet>::const_iterator
+findRegUnitSet(const std::vector<RegUnitSet> &UniqueSets,
+ const RegUnitSet &Set) {
+ std::vector<RegUnitSet>::const_iterator
+ I = UniqueSets.begin(), E = UniqueSets.end();
+ for(;I != E; ++I) {
+ if (I->Units == Set.Units)
+ break;
+ }
+ return I;
+}
+
+// Return true if the RUSubSet is a subset of RUSuperSet.
+static bool isRegUnitSubSet(const std::vector<unsigned> &RUSubSet,
+ const std::vector<unsigned> &RUSuperSet) {
+ return std::includes(RUSuperSet.begin(), RUSuperSet.end(),
+ RUSubSet.begin(), RUSubSet.end());
+}
+
+// Iteratively prune unit sets.
+void CodeGenRegBank::pruneUnitSets() {
+ assert(RegClassUnitSets.empty() && "this invalidates RegClassUnitSets");
+
+ // Form an equivalence class of UnitSets with no significant difference.
+ std::vector<unsigned> SuperSetIDs;
+ for (unsigned SubIdx = 0, EndIdx = RegUnitSets.size();
+ SubIdx != EndIdx; ++SubIdx) {
+ const RegUnitSet &SubSet = RegUnitSets[SubIdx];
+ unsigned SuperIdx = 0;
+ for (; SuperIdx != EndIdx; ++SuperIdx) {
+ if (SuperIdx == SubIdx)
+ continue;
+
+ const RegUnitSet &SuperSet = RegUnitSets[SuperIdx];
+ if (isRegUnitSubSet(SubSet.Units, SuperSet.Units)
+ && (SubSet.Units.size() + 3 > SuperSet.Units.size())) {
+ break;
+ }
+ }
+ if (SuperIdx == EndIdx)
+ SuperSetIDs.push_back(SubIdx);
+ }
+ // Populate PrunedUnitSets with each equivalence class's superset.
+ std::vector<RegUnitSet> PrunedUnitSets(SuperSetIDs.size());
+ for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
+ unsigned SuperIdx = SuperSetIDs[i];
+ PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
+ PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
+ }
+ RegUnitSets.swap(PrunedUnitSets);
+}
+
+// Create a RegUnitSet for each RegClass that contains all units in the class
+// including adopted units that are necessary to model register pressure. Then
+// iteratively compute RegUnitSets such that the union of any two overlapping
+// RegUnitSets is repreresented.
+//
+// RegisterInfoEmitter will map each RegClass to its RegUnitClass and any
+// RegUnitSet that is a superset of that RegUnitClass.
+void CodeGenRegBank::computeRegUnitSets() {
+
+ // Compute a unique RegUnitSet for each RegClass.
+ const ArrayRef<CodeGenRegisterClass*> &RegClasses = getRegClasses();
+ unsigned NumRegClasses = RegClasses.size();
+ for (unsigned RCIdx = 0, RCEnd = NumRegClasses; RCIdx != RCEnd; ++RCIdx) {
+ if (!RegClasses[RCIdx]->Allocatable)
+ continue;
+
+ // Speculatively grow the RegUnitSets to hold the new set.
+ RegUnitSets.resize(RegUnitSets.size() + 1);
+ RegUnitSets.back().Name = RegClasses[RCIdx]->getName();
+
+ // Compute a sorted list of units in this class.
+ RegClasses[RCIdx]->buildRegUnitSet(RegUnitSets.back().Units);
+
+ // Find an existing RegUnitSet.
+ std::vector<RegUnitSet>::const_iterator SetI =
+ findRegUnitSet(RegUnitSets, RegUnitSets.back());
+ if (SetI != llvm::prior(RegUnitSets.end()))
+ RegUnitSets.pop_back();
+ }
+
+ // Iteratively prune unit sets.
+ pruneUnitSets();
+
+ // Iterate over all unit sets, including new ones added by this loop.
+ unsigned NumRegUnitSubSets = RegUnitSets.size();
+ for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
+ // In theory, this is combinatorial. In practice, it needs to be bounded
+ // by a small number of sets for regpressure to be efficient.
+ // If the assert is hit, we need to implement pruning.
+ assert(Idx < (2*NumRegUnitSubSets) && "runaway unit set inference");
+
+ // Compare new sets with all original classes.
+ for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx+1;
+ SearchIdx != EndIdx; ++SearchIdx) {
+ std::set<unsigned> Intersection;
+ std::set_intersection(RegUnitSets[Idx].Units.begin(),
+ RegUnitSets[Idx].Units.end(),
+ RegUnitSets[SearchIdx].Units.begin(),
+ RegUnitSets[SearchIdx].Units.end(),
+ std::inserter(Intersection, Intersection.begin()));
+ if (Intersection.empty())
+ continue;
+
+ // Speculatively grow the RegUnitSets to hold the new set.
+ RegUnitSets.resize(RegUnitSets.size() + 1);
+ RegUnitSets.back().Name =
+ RegUnitSets[Idx].Name + "+" + RegUnitSets[SearchIdx].Name;
+
+ std::set_union(RegUnitSets[Idx].Units.begin(),
+ RegUnitSets[Idx].Units.end(),
+ RegUnitSets[SearchIdx].Units.begin(),
+ RegUnitSets[SearchIdx].Units.end(),
+ std::inserter(RegUnitSets.back().Units,
+ RegUnitSets.back().Units.begin()));
+
+ // Find an existing RegUnitSet, or add the union to the unique sets.
+ std::vector<RegUnitSet>::const_iterator SetI =
+ findRegUnitSet(RegUnitSets, RegUnitSets.back());
+ if (SetI != llvm::prior(RegUnitSets.end()))
+ RegUnitSets.pop_back();
+ }
+ }
+
+ // Iteratively prune unit sets after inferring supersets.
+ pruneUnitSets();
+
+ // For each register class, list the UnitSets that are supersets.
+ RegClassUnitSets.resize(NumRegClasses);
+ for (unsigned RCIdx = 0, RCEnd = NumRegClasses; RCIdx != RCEnd; ++RCIdx) {
+ if (!RegClasses[RCIdx]->Allocatable)
+ continue;
+
+ // Recompute the sorted list of units in this class.
+ std::vector<unsigned> RegUnits;
+ RegClasses[RCIdx]->buildRegUnitSet(RegUnits);
+
+ // Don't increase pressure for unallocatable regclasses.
+ if (RegUnits.empty())
+ continue;
+
+ // Find all supersets.
+ for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
+ USIdx != USEnd; ++USIdx) {
+ if (isRegUnitSubSet(RegUnits, RegUnitSets[USIdx].Units))
+ RegClassUnitSets[RCIdx].push_back(USIdx);
+ }
+ assert(!RegClassUnitSets[RCIdx].empty() && "missing unit set for regclass");
}
}
@@ -737,62 +1370,187 @@ computeOverlaps(std::map<const CodeGenRegister*, CodeGenRegister::Set> &Map) {
void CodeGenRegBank::computeDerivedInfo() {
computeComposites();
+
+ // Compute a weight for each register unit created during getSubRegs.
+ // This may create adopted register units (with unit # >= NumNativeRegUnits).
+ computeRegUnitWeights();
+
+ // Compute a unique set of RegUnitSets. One for each RegClass and inferred
+ // supersets for the union of overlapping sets.
+ computeRegUnitSets();
}
-// Infer missing register classes.
//
-// For every register class RC, make sure that the set of registers in RC with
-// a given SubIxx sub-register form a register class.
-void CodeGenRegBank::computeInferredRegisterClasses() {
- // When this function is called, the register classes have not been sorted
- // and assigned EnumValues yet. That means getSubClasses(),
- // getSuperClasses(), and hasSubClass() functions are defunct.
+// Synthesize missing register class intersections.
+//
+// Make sure that sub-classes of RC exists such that getCommonSubClass(RC, X)
+// returns a maximal register class for all X.
+//
+void CodeGenRegBank::inferCommonSubClass(CodeGenRegisterClass *RC) {
+ for (unsigned rci = 0, rce = RegClasses.size(); rci != rce; ++rci) {
+ CodeGenRegisterClass *RC1 = RC;
+ CodeGenRegisterClass *RC2 = RegClasses[rci];
+ if (RC1 == RC2)
+ continue;
- // Map SubRegIndex to register set.
- typedef std::map<Record*, CodeGenRegister::Set, LessRecord> SubReg2SetMap;
+ // Compute the set intersection of RC1 and RC2.
+ const CodeGenRegister::Set &Memb1 = RC1->getMembers();
+ const CodeGenRegister::Set &Memb2 = RC2->getMembers();
+ CodeGenRegister::Set Intersection;
+ std::set_intersection(Memb1.begin(), Memb1.end(),
+ Memb2.begin(), Memb2.end(),
+ std::inserter(Intersection, Intersection.begin()),
+ CodeGenRegister::Less());
+
+ // Skip disjoint class pairs.
+ if (Intersection.empty())
+ continue;
- // Visit all register classes, including the ones being added by the loop.
- for (unsigned rci = 0; rci != RegClasses.size(); ++rci) {
- CodeGenRegisterClass &RC = *RegClasses[rci];
+ // If RC1 and RC2 have different spill sizes or alignments, use the
+ // larger size for sub-classing. If they are equal, prefer RC1.
+ if (RC2->SpillSize > RC1->SpillSize ||
+ (RC2->SpillSize == RC1->SpillSize &&
+ RC2->SpillAlignment > RC1->SpillAlignment))
+ std::swap(RC1, RC2);
- // Compute the set of registers supporting each SubRegIndex.
- SubReg2SetMap SRSets;
- for (CodeGenRegister::Set::const_iterator RI = RC.getMembers().begin(),
- RE = RC.getMembers().end(); RI != RE; ++RI) {
- const CodeGenRegister::SubRegMap &SRM = (*RI)->getSubRegs();
- for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
- E = SRM.end(); I != E; ++I)
- SRSets[I->first].insert(*RI);
+ getOrCreateSubClass(RC1, &Intersection,
+ RC1->getName() + "_and_" + RC2->getName());
+ }
+}
+
+//
+// Synthesize missing sub-classes for getSubClassWithSubReg().
+//
+// Make sure that the set of registers in RC with a given SubIdx sub-register
+// form a register class. Update RC->SubClassWithSubReg.
+//
+void CodeGenRegBank::inferSubClassWithSubReg(CodeGenRegisterClass *RC) {
+ // Map SubRegIndex to set of registers in RC supporting that SubRegIndex.
+ typedef std::map<CodeGenSubRegIndex*, CodeGenRegister::Set,
+ CodeGenSubRegIndex::Less> SubReg2SetMap;
+
+ // Compute the set of registers supporting each SubRegIndex.
+ SubReg2SetMap SRSets;
+ for (CodeGenRegister::Set::const_iterator RI = RC->getMembers().begin(),
+ RE = RC->getMembers().end(); RI != RE; ++RI) {
+ const CodeGenRegister::SubRegMap &SRM = (*RI)->getSubRegs();
+ for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
+ E = SRM.end(); I != E; ++I)
+ SRSets[I->first].insert(*RI);
+ }
+
+ // Find matching classes for all SRSets entries. Iterate in SubRegIndex
+ // numerical order to visit synthetic indices last.
+ for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
+ CodeGenSubRegIndex *SubIdx = SubRegIndices[sri];
+ SubReg2SetMap::const_iterator I = SRSets.find(SubIdx);
+ // Unsupported SubRegIndex. Skip it.
+ if (I == SRSets.end())
+ continue;
+ // In most cases, all RC registers support the SubRegIndex.
+ if (I->second.size() == RC->getMembers().size()) {
+ RC->setSubClassWithSubReg(SubIdx, RC);
+ continue;
+ }
+ // This is a real subset. See if we have a matching class.
+ CodeGenRegisterClass *SubRC =
+ getOrCreateSubClass(RC, &I->second,
+ RC->getName() + "_with_" + I->first->getName());
+ RC->setSubClassWithSubReg(SubIdx, SubRC);
+ }
+}
+
+//
+// Synthesize missing sub-classes of RC for getMatchingSuperRegClass().
+//
+// Create sub-classes of RC such that getMatchingSuperRegClass(RC, SubIdx, X)
+// has a maximal result for any SubIdx and any X >= FirstSubRegRC.
+//
+
+void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
+ unsigned FirstSubRegRC) {
+ SmallVector<std::pair<const CodeGenRegister*,
+ const CodeGenRegister*>, 16> SSPairs;
+
+ // Iterate in SubRegIndex numerical order to visit synthetic indices last.
+ for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
+ CodeGenSubRegIndex *SubIdx = SubRegIndices[sri];
+ // Skip indexes that aren't fully supported by RC's registers. This was
+ // computed by inferSubClassWithSubReg() above which should have been
+ // called first.
+ if (RC->getSubClassWithSubReg(SubIdx) != RC)
+ continue;
+
+ // Build list of (Super, Sub) pairs for this SubIdx.
+ SSPairs.clear();
+ for (CodeGenRegister::Set::const_iterator RI = RC->getMembers().begin(),
+ RE = RC->getMembers().end(); RI != RE; ++RI) {
+ const CodeGenRegister *Super = *RI;
+ const CodeGenRegister *Sub = Super->getSubRegs().find(SubIdx)->second;
+ assert(Sub && "Missing sub-register");
+ SSPairs.push_back(std::make_pair(Super, Sub));
}
- // Find matching classes for all SRSets entries. Iterate in SubRegIndex
- // numerical order to visit synthetic indices last.
- for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
- Record *SubIdx = SubRegIndices[sri];
- SubReg2SetMap::const_iterator I = SRSets.find(SubIdx);
- // Unsupported SubRegIndex. Skip it.
- if (I == SRSets.end())
+ // Iterate over sub-register class candidates. Ignore classes created by
+ // this loop. They will never be useful.
+ for (unsigned rci = FirstSubRegRC, rce = RegClasses.size(); rci != rce;
+ ++rci) {
+ CodeGenRegisterClass *SubRC = RegClasses[rci];
+ // Compute the subset of RC that maps into SubRC.
+ CodeGenRegister::Set SubSet;
+ for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
+ if (SubRC->contains(SSPairs[i].second))
+ SubSet.insert(SSPairs[i].first);
+ if (SubSet.empty())
continue;
- // In most cases, all RC registers support the SubRegIndex.
- if (I->second.size() == RC.getMembers().size()) {
- RC.setSubClassWithSubReg(SubIdx, &RC);
+ // RC injects completely into SubRC.
+ if (SubSet.size() == SSPairs.size()) {
+ SubRC->addSuperRegClass(SubIdx, RC);
continue;
}
+ // Only a subset of RC maps into SubRC. Make sure it is represented by a
+ // class.
+ getOrCreateSubClass(RC, &SubSet, RC->getName() +
+ "_with_" + SubIdx->getName() +
+ "_in_" + SubRC->getName());
+ }
+ }
+}
- // This is a real subset. See if we have a matching class.
- CodeGenRegisterClass::Key K(&I->second, RC.SpillSize, RC.SpillAlignment);
- RCKeyMap::const_iterator FoundI = Key2RC.find(K);
- if (FoundI != Key2RC.end()) {
- RC.setSubClassWithSubReg(SubIdx, FoundI->second);
- continue;
- }
- // Class doesn't exist.
- CodeGenRegisterClass *NewRC =
- new CodeGenRegisterClass(RC.getName() + "_with_" +
- I->first->getName(), K);
- addToMaps(NewRC);
- RC.setSubClassWithSubReg(SubIdx, NewRC);
+//
+// Infer missing register classes.
+//
+void CodeGenRegBank::computeInferredRegisterClasses() {
+ // When this function is called, the register classes have not been sorted
+ // and assigned EnumValues yet. That means getSubClasses(),
+ // getSuperClasses(), and hasSubClass() functions are defunct.
+ unsigned FirstNewRC = RegClasses.size();
+
+ // Visit all register classes, including the ones being added by the loop.
+ for (unsigned rci = 0; rci != RegClasses.size(); ++rci) {
+ CodeGenRegisterClass *RC = RegClasses[rci];
+
+ // Synthesize answers for getSubClassWithSubReg().
+ inferSubClassWithSubReg(RC);
+
+ // Synthesize answers for getCommonSubClass().
+ inferCommonSubClass(RC);
+
+ // Synthesize answers for getMatchingSuperRegClass().
+ inferMatchingSuperRegClass(RC);
+
+ // New register classes are created while this loop is running, and we need
+ // to visit all of them. I particular, inferMatchingSuperRegClass needs
+ // to match old super-register classes with sub-register classes created
+ // after inferMatchingSuperRegClass was called. At this point,
+ // inferMatchingSuperRegClass has checked SuperRC = [0..rci] with SubRC =
+ // [0..FirstNewRC). We need to cover SubRC = [FirstNewRC..rci].
+ if (rci + 1 == FirstNewRC) {
+ unsigned NextNewRC = RegClasses.size();
+ for (unsigned rci2 = 0; rci2 != FirstNewRC; ++rci2)
+ inferMatchingSuperRegClass(RegClasses[rci2], FirstNewRC);
+ FirstNewRC = NextNewRC;
}
}
}
@@ -843,3 +1601,45 @@ CodeGenRegBank::getRegClassForRegister(Record *R) {
}
return FoundRC;
}
+
+BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
+ SetVector<const CodeGenRegister*> Set;
+
+ // First add Regs with all sub-registers.
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ CodeGenRegister *Reg = getReg(Regs[i]);
+ if (Set.insert(Reg))
+ // Reg is new, add all sub-registers.
+ // The pre-ordering is not important here.
+ Reg->addSubRegsPreOrder(Set, *this);
+ }
+
+ // Second, find all super-registers that are completely covered by the set.
+ for (unsigned i = 0; i != Set.size(); ++i) {
+ const CodeGenRegister::SuperRegList &SR = Set[i]->getSuperRegs();
+ for (unsigned j = 0, e = SR.size(); j != e; ++j) {
+ const CodeGenRegister *Super = SR[j];
+ if (!Super->CoveredBySubRegs || Set.count(Super))
+ continue;
+ // This new super-register is covered by its sub-registers.
+ bool AllSubsInSet = true;
+ const CodeGenRegister::SubRegMap &SRM = Super->getSubRegs();
+ for (CodeGenRegister::SubRegMap::const_iterator I = SRM.begin(),
+ E = SRM.end(); I != E; ++I)
+ if (!Set.count(I->second)) {
+ AllSubsInSet = false;
+ break;
+ }
+ // All sub-registers in Set, add Super as well.
+ // We will visit Super later to recheck its super-registers.
+ if (AllSubsInSet)
+ Set.insert(Super);
+ }
+ }
+
+ // Convert to BitVector.
+ BitVector BV(Registers.size() + 1);
+ for (unsigned i = 0, e = Set.size(); i != e; ++i)
+ BV.set(Set[i]->EnumValue);
+ return BV;
+}
diff --git a/contrib/llvm/utils/TableGen/CodeGenRegisters.h b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
index 4fc34b0..232a6e7 100644
--- a/contrib/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/contrib/llvm/utils/TableGen/CodeGenRegisters.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstdlib>
#include <map>
#include <string>
@@ -31,14 +32,69 @@
namespace llvm {
class CodeGenRegBank;
+ /// CodeGenSubRegIndex - Represents a sub-register index.
+ class CodeGenSubRegIndex {
+ Record *const TheDef;
+ const unsigned EnumValue;
+
+ public:
+ CodeGenSubRegIndex(Record *R, unsigned Enum);
+
+ const std::string &getName() const;
+ std::string getNamespace() const;
+ std::string getQualifiedName() const;
+
+ // Order CodeGenSubRegIndex pointers by EnumValue.
+ struct Less {
+ bool operator()(const CodeGenSubRegIndex *A,
+ const CodeGenSubRegIndex *B) const {
+ assert(A && B);
+ return A->EnumValue < B->EnumValue;
+ }
+ };
+
+ // Map of composite subreg indices.
+ typedef std::map<CodeGenSubRegIndex*, CodeGenSubRegIndex*, Less> CompMap;
+
+ // Returns the subreg index that results from composing this with Idx.
+ // Returns NULL if this and Idx don't compose.
+ CodeGenSubRegIndex *compose(CodeGenSubRegIndex *Idx) const {
+ CompMap::const_iterator I = Composed.find(Idx);
+ return I == Composed.end() ? 0 : I->second;
+ }
+
+ // Add a composite subreg index: this+A = B.
+ // Return a conflicting composite, or NULL
+ CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B) {
+ std::pair<CompMap::iterator, bool> Ins =
+ Composed.insert(std::make_pair(A, B));
+ return (Ins.second || Ins.first->second == B) ? 0 : Ins.first->second;
+ }
+
+ // Update the composite maps of components specified in 'ComposedOf'.
+ void updateComponents(CodeGenRegBank&);
+
+ // Clean out redundant composite mappings.
+ void cleanComposites();
+
+ // Return the map of composites.
+ const CompMap &getComposites() const { return Composed; }
+
+ private:
+ CompMap Composed;
+ };
+
/// CodeGenRegister - Represents a register definition.
struct CodeGenRegister {
Record *TheDef;
unsigned EnumValue;
unsigned CostPerUse;
+ bool CoveredBySubRegs;
// Map SubRegIndex -> Register.
- typedef std::map<Record*, CodeGenRegister*, LessRecord> SubRegMap;
+ typedef std::map<CodeGenSubRegIndex*, CodeGenRegister*,
+ CodeGenSubRegIndex::Less> SubRegMap;
CodeGenRegister(Record *R, unsigned Enum);
@@ -54,18 +110,37 @@ namespace llvm {
}
// Add sub-registers to OSet following a pre-order defined by the .td file.
- void addSubRegsPreOrder(SetVector<CodeGenRegister*> &OSet) const;
+ void addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
+ CodeGenRegBank&) const;
// List of super-registers in topological order, small to large.
- typedef std::vector<CodeGenRegister*> SuperRegList;
+ typedef std::vector<const CodeGenRegister*> SuperRegList;
- // Get the list of super-registers.
- // This is only valid after computeDerivedInfo has visited all registers.
+ // Get the list of super-registers. This is valid after getSubReg
+ // visits all registers during RegBank construction.
const SuperRegList &getSuperRegs() const {
assert(SubRegsComplete && "Must precompute sub-registers");
return SuperRegs;
}
+ // List of register units in ascending order.
+ typedef SmallVector<unsigned, 16> RegUnitList;
+
+ // Get the list of register units.
+ // This is only valid after getSubRegs() completes.
+ const RegUnitList &getRegUnits() const { return RegUnits; }
+
+ // Inherit register units from subregisters.
+ // Return true if the RegUnits changed.
+ bool inheritRegUnits(CodeGenRegBank &RegBank);
+
+ // Adopt a register unit for pressure tracking.
+ // A unit is adopted iff its unit number is >= NumNativeRegUnits.
+ void adoptRegUnit(unsigned RUID) { RegUnits.push_back(RUID); }
+
+ // Get the sum of this register's register unit weights.
+ unsigned getWeight(const CodeGenRegBank &RegBank) const;
+
// Order CodeGenRegister pointers by EnumValue.
struct Less {
bool operator()(const CodeGenRegister *A,
@@ -82,6 +157,7 @@ namespace llvm {
bool SubRegsComplete;
SubRegMap SubRegs;
SuperRegList SuperRegs;
+ RegUnitList RegUnits;
};
@@ -101,8 +177,17 @@ namespace llvm {
// super-class.
void inheritProperties(CodeGenRegBank&);
- // Map SubRegIndex -> sub-class
- DenseMap<Record*, CodeGenRegisterClass*> SubClassWithSubReg;
+ // Map SubRegIndex -> sub-class. This is the largest sub-class where all
+ // registers have a SubRegIndex sub-register.
+ DenseMap<CodeGenSubRegIndex*, CodeGenRegisterClass*> SubClassWithSubReg;
+
+ // Map SubRegIndex -> set of super-reg classes. This is all register
+ // classes SuperRC such that:
+ //
+ // R:SubRegIndex in this RC for all R in SuperRC.
+ //
+ DenseMap<CodeGenSubRegIndex*,
+ SmallPtrSet<CodeGenRegisterClass*, 8> > SuperRegClasses;
public:
unsigned EnumValue;
@@ -128,8 +213,7 @@ namespace llvm {
MVT::SimpleValueType getValueTypeNum(unsigned VTNum) const {
if (VTNum < VTs.size())
return VTs[VTNum];
- assert(0 && "VTNum greater than number of ValueTypes in RegClass!");
- abort();
+ llvm_unreachable("VTNum greater than number of ValueTypes in RegClass!");
}
// Return true if this this class contains the register.
@@ -150,14 +234,26 @@ namespace llvm {
// getSubClassWithSubReg - Returns the largest sub-class where all
// registers have a SubIdx sub-register.
- CodeGenRegisterClass *getSubClassWithSubReg(Record *SubIdx) const {
+ CodeGenRegisterClass*
+ getSubClassWithSubReg(CodeGenSubRegIndex *SubIdx) const {
return SubClassWithSubReg.lookup(SubIdx);
}
- void setSubClassWithSubReg(Record *SubIdx, CodeGenRegisterClass *SubRC) {
+ void setSubClassWithSubReg(CodeGenSubRegIndex *SubIdx,
+ CodeGenRegisterClass *SubRC) {
SubClassWithSubReg[SubIdx] = SubRC;
}
+ // getSuperRegClasses - Returns a bit vector of all register classes
+ // containing only SubIdx super-registers of this class.
+ void getSuperRegClasses(CodeGenSubRegIndex *SubIdx, BitVector &Out) const;
+
+ // addSuperRegClass - Add a class containing only SudIdx super-registers.
+ void addSuperRegClass(CodeGenSubRegIndex *SubIdx,
+ CodeGenRegisterClass *SuperRC) {
+ SuperRegClasses[SubIdx].insert(SuperRC);
+ }
+
// getSubClasses - Returns a constant BitVector of subclasses indexed by
// EnumValue.
// The SubClasses vector includs an entry for this class.
@@ -183,6 +279,9 @@ namespace llvm {
// getOrder(0).
const CodeGenRegister::Set &getMembers() const { return Members; }
+ // Populate a unique sorted list of units from a register set.
+ void buildRegUnitSet(std::vector<unsigned> &RegUnits) const;
+
CodeGenRegisterClass(CodeGenRegBank&, Record *R);
// A key representing the parts of a register class used for forming
@@ -217,16 +316,34 @@ namespace llvm {
static void computeSubClasses(CodeGenRegBank&);
};
+ // Each RegUnitSet is a sorted vector with a name.
+ struct RegUnitSet {
+ typedef std::vector<unsigned>::const_iterator iterator;
+
+ std::string Name;
+ std::vector<unsigned> Units;
+ };
+
// CodeGenRegBank - Represent a target's registers and the relations between
// them.
class CodeGenRegBank {
RecordKeeper &Records;
SetTheory Sets;
- std::vector<Record*> SubRegIndices;
+ // SubRegIndices.
+ std::vector<CodeGenSubRegIndex*> SubRegIndices;
+ DenseMap<Record*, CodeGenSubRegIndex*> Def2SubRegIdx;
unsigned NumNamedIndices;
+
+ // Registers.
std::vector<CodeGenRegister*> Registers;
DenseMap<Record*, CodeGenRegister*> Def2Reg;
+ unsigned NumNativeRegUnits;
+ unsigned NumRegUnits; // # native + adopted register units.
+
+ // Map each register unit to a weight (for register pressure).
+ // Includes native and adopted register units.
+ std::vector<unsigned> RegUnitWeights;
// Register classes.
std::vector<CodeGenRegisterClass*> RegClasses;
@@ -234,16 +351,38 @@ namespace llvm {
typedef std::map<CodeGenRegisterClass::Key, CodeGenRegisterClass*> RCKeyMap;
RCKeyMap Key2RC;
+ // Remember each unique set of register units. Initially, this contains a
+ // unique set for each register class. Simliar sets are coalesced with
+ // pruneUnitSets and new supersets are inferred during computeRegUnitSets.
+ std::vector<RegUnitSet> RegUnitSets;
+
+ // Map RegisterClass index to the index of the RegUnitSet that contains the
+ // class's units and any inferred RegUnit supersets.
+ std::vector<std::vector<unsigned> > RegClassUnitSets;
+
// Add RC to *2RC maps.
void addToMaps(CodeGenRegisterClass*);
+ // Create a synthetic sub-class if it is missing.
+ CodeGenRegisterClass *getOrCreateSubClass(const CodeGenRegisterClass *RC,
+ const CodeGenRegister::Set *Membs,
+ StringRef Name);
+
// Infer missing register classes.
void computeInferredRegisterClasses();
+ void inferCommonSubClass(CodeGenRegisterClass *RC);
+ void inferSubClassWithSubReg(CodeGenRegisterClass *RC);
+ void inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
+ unsigned FirstSubRegRC = 0);
+
+ // Iteratively prune unit sets.
+ void pruneUnitSets();
+
+ // Compute a weight for each register unit created during getSubRegs.
+ void computeRegUnitWeights();
- // Composite SubRegIndex instances.
- // Map (SubRegIndex, SubRegIndex) -> SubRegIndex.
- typedef DenseMap<std::pair<Record*, Record*>, Record*> CompositeMap;
- CompositeMap Composite;
+ // Create a RegUnitSet for each RegClass and infer superclasses.
+ void computeRegUnitSets();
// Populate the Composite map from sub-register relationships.
void computeComposites();
@@ -256,20 +395,44 @@ namespace llvm {
// Sub-register indices. The first NumNamedIndices are defined by the user
// in the .td files. The rest are synthesized such that all sub-registers
// have a unique name.
- const std::vector<Record*> &getSubRegIndices() { return SubRegIndices; }
+ ArrayRef<CodeGenSubRegIndex*> getSubRegIndices() { return SubRegIndices; }
unsigned getNumNamedIndices() { return NumNamedIndices; }
- // Map a SubRegIndex Record to its enum value.
- unsigned getSubRegIndexNo(Record *idx);
+ // Find a SubRegIndex form its Record def.
+ CodeGenSubRegIndex *getSubRegIdx(Record*);
// Find or create a sub-register index representing the A+B composition.
- Record *getCompositeSubRegIndex(Record *A, Record *B, bool create = false);
+ CodeGenSubRegIndex *getCompositeSubRegIndex(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B);
const std::vector<CodeGenRegister*> &getRegisters() { return Registers; }
// Find a register from its Record def.
CodeGenRegister *getReg(Record*);
+ // Get a Register's index into the Registers array.
+ unsigned getRegIndex(const CodeGenRegister *Reg) const {
+ return Reg->EnumValue - 1;
+ }
+
+ // Create a new non-native register unit that can be adopted by a register
+ // to increase its pressure. Note that NumNativeRegUnits is not increased.
+ unsigned newRegUnit(unsigned Weight) {
+ if (!RegUnitWeights.empty()) {
+ assert(Weight && "should only add allocatable units");
+ RegUnitWeights.resize(NumRegUnits+1);
+ RegUnitWeights[NumRegUnits] = Weight;
+ }
+ return NumRegUnits++;
+ }
+
+ // Native units are the singular unit of a leaf register. Register aliasing
+ // is completely characterized by native units. Adopted units exist to give
+ // register additional weight but don't affect aliasing.
+ bool isNativeUnit(unsigned RUID) {
+ return RUID < NumNativeRegUnits;
+ }
+
ArrayRef<CodeGenRegisterClass*> getRegClasses() const {
return RegClasses;
}
@@ -284,6 +447,41 @@ namespace llvm {
/// return the superclass. Otherwise return null.
const CodeGenRegisterClass* getRegClassForRegister(Record *R);
+ // Get a register unit's weight. Zero for unallocatable registers.
+ unsigned getRegUnitWeight(unsigned RUID) const {
+ return RegUnitWeights[RUID];
+ }
+
+ // Get the sum of unit weights.
+ unsigned getRegUnitSetWeight(const std::vector<unsigned> &Units) const {
+ unsigned Weight = 0;
+ for (std::vector<unsigned>::const_iterator
+ I = Units.begin(), E = Units.end(); I != E; ++I)
+ Weight += getRegUnitWeight(*I);
+ return Weight;
+ }
+
+ // Increase a RegUnitWeight.
+ void increaseRegUnitWeight(unsigned RUID, unsigned Inc) {
+ RegUnitWeights[RUID] += Inc;
+ }
+
+ // Get the number of register pressure dimensions.
+ unsigned getNumRegPressureSets() const { return RegUnitSets.size(); }
+
+ // Get a set of register unit IDs for a given dimension of pressure.
+ RegUnitSet getRegPressureSet(unsigned Idx) const {
+ return RegUnitSets[Idx];
+ }
+
+ // Get a list of pressure set IDs for a register class. Liveness of a
+ // register in this class impacts each pressure set in this list by the
+ // weight of the register. An exact solution requires all registers in a
+ // class to have the same class, but it is not strictly guaranteed.
+ ArrayRef<unsigned> getRCPressureSetIDs(unsigned RCIdx) const {
+ return RegClassUnitSets[RCIdx];
+ }
+
// Computed derived records such as missing sub-register indices.
void computeDerivedInfo();
@@ -295,6 +493,15 @@ namespace llvm {
// If R1 is a sub-register of R2, Map[R1] is a subset of Map[R2].
void computeOverlaps(std::map<const CodeGenRegister*,
CodeGenRegister::Set> &Map);
+
+ // Compute the set of registers completely covered by the registers in Regs.
+ // The returned BitVector will have a bit set for each register in Regs,
+ // all sub-registers, and all super-registers that are covered by the
+ // registers in Regs.
+ //
+ // This is used to compute the mask of call-preserved registers from a list
+ // of callee-saves.
+ BitVector computeCoveredRegisters(ArrayRef<Record*> Regs);
};
}
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index 4a7bad7..cf67935 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -58,6 +58,7 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::iAny: return "MVT::iAny";
case MVT::fAny: return "MVT::fAny";
case MVT::vAny: return "MVT::vAny";
+ case MVT::f16: return "MVT::f16";
case MVT::f32: return "MVT::f32";
case MVT::f64: return "MVT::f64";
case MVT::f80: return "MVT::f80";
@@ -82,6 +83,7 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v2i64: return "MVT::v2i64";
case MVT::v4i64: return "MVT::v4i64";
case MVT::v8i64: return "MVT::v8i64";
+ case MVT::v2f16: return "MVT::v2f16";
case MVT::v2f32: return "MVT::v2f32";
case MVT::v4f32: return "MVT::v4f32";
case MVT::v8f32: return "MVT::v8f32";
@@ -90,8 +92,8 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::Metadata: return "MVT::Metadata";
case MVT::iPTR: return "MVT::iPTR";
case MVT::iPTRAny: return "MVT::iPTRAny";
- case MVT::untyped: return "MVT::untyped";
- default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
+ case MVT::Untyped: return "MVT::Untyped";
+ default: llvm_unreachable("ILLEGAL VALUE TYPE!");
}
}
@@ -149,6 +151,26 @@ Record *CodeGenTarget::getAsmParser() const {
return LI[AsmParserNum];
}
+/// getAsmParserVariant - Return the AssmblyParserVariant definition for
+/// this target.
+///
+Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
+ std::vector<Record*> LI =
+ TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
+ if (i >= LI.size())
+ throw "Target does not have an AsmParserVariant #" + utostr(i) + "!";
+ return LI[i];
+}
+
+/// getAsmParserVariantCount - Return the AssmblyParserVariant definition
+/// available for this target.
+///
+unsigned CodeGenTarget::getAsmParserVariantCount() const {
+ std::vector<Record*> LI =
+ TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
+ return LI.size();
+}
+
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
Record *CodeGenTarget::getAsmWriter() const {
@@ -267,6 +289,7 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
"DBG_VALUE",
"REG_SEQUENCE",
"COPY",
+ "BUNDLE",
0
};
const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
@@ -492,7 +515,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
unsigned ArgNo = Property->getValueAsInt("ArgNo");
ArgumentAttributes.push_back(std::make_pair(ArgNo, NoCapture));
} else
- assert(0 && "Unknown property!");
+ llvm_unreachable("Unknown property!");
}
// Sort the argument attributes for later benefit.
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.h b/contrib/llvm/utils/TableGen/CodeGenTarget.h
index 730216c..85463da 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.h
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.h
@@ -91,6 +91,16 @@ public:
///
Record *getAsmParser() const;
+ /// getAsmParserVariant - Return the AssmblyParserVariant definition for
+ /// this target.
+ ///
+ Record *getAsmParserVariant(unsigned i) const;
+
+ /// getAsmParserVariantCount - Return the AssmblyParserVariant definition
+ /// available for this target.
+ ///
+ unsigned getAsmParserVariantCount() const;
+
/// getAsmWriter - Return the AssemblyWriter definition for this target.
///
Record *getAsmWriter() const;
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
index 1367e8d..bd77907 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.cpp
@@ -15,6 +15,8 @@
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
+void Matcher::anchor() { }
+
void Matcher::dump() const {
print(errs(), 0);
}
@@ -324,6 +326,10 @@ unsigned EmitNodeMatcherCommon::getHashImpl() const {
}
+void EmitNodeMatcher::anchor() { }
+
+void MorphNodeToMatcher::anchor() { }
+
unsigned MarkGlueResultsMatcher::getHashImpl() const {
return HashUnsigneds(GlueResultNodes.begin(), GlueResultNodes.end());
}
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcher.h b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
index dcb8da7..99ebf98 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcher.h
@@ -41,6 +41,7 @@ class Matcher {
// The next matcher node that is executed after this one. Null if this is the
// last stage of a match.
OwningPtr<Matcher> Next;
+ virtual void anchor();
public:
enum KindTy {
// Matcher state manipulation.
@@ -1011,6 +1012,7 @@ private:
/// EmitNodeMatcher - This signals a successful match and generates a node.
class EmitNodeMatcher : public EmitNodeMatcherCommon {
+ virtual void anchor();
unsigned FirstResultSlot;
public:
EmitNodeMatcher(const std::string &opcodeName,
@@ -1033,6 +1035,7 @@ public:
};
class MorphNodeToMatcher : public EmitNodeMatcherCommon {
+ virtual void anchor();
const PatternToMatch &Pattern;
public:
MorphNodeToMatcher(const std::string &opcodeName,
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index 3b65b2a..bd425a9 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -573,8 +573,7 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
return 2 + NumResultBytes;
}
}
- assert(0 && "Unreachable");
- return 0;
+ llvm_unreachable("Unreachable");
}
/// EmitMatcherList - Emit the bytes for the specified matcher subtree.
@@ -601,7 +600,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
if (!PatternPredicates.empty()) {
OS << "bool CheckPatternPredicate(unsigned PredNo) const {\n";
OS << " switch (PredNo) {\n";
- OS << " default: assert(0 && \"Invalid predicate in table?\");\n";
+ OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
OS << " case " << i << ": return " << PatternPredicates[i] << ";\n";
OS << " }\n";
@@ -619,7 +618,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
if (!NodePredicates.empty()) {
OS << "bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {\n";
OS << " switch (PredNo) {\n";
- OS << " default: assert(0 && \"Invalid predicate in table?\");\n";
+ OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) {
// Emit the predicate code corresponding to this pattern.
TreePredicateFn PredFn = NodePredicates[i];
@@ -641,7 +640,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {\n";
OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
- OS << " default: assert(0 && \"Invalid pattern # in table?\");\n";
+ OS << " default: llvm_unreachable(\"Invalid pattern # in table?\");\n";
for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
const ComplexPattern &P = *ComplexPatterns[i];
unsigned NumOps = P.getNumOperands();
@@ -679,7 +678,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
if (!NodeXForms.empty()) {
OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
OS << " switch (XFormNo) {\n";
- OS << " default: assert(0 && \"Invalid xform # in table?\");\n";
+ OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n";
// FIXME: The node xform could take SDValue's instead of SDNode*'s.
for (unsigned i = 0, e = NodeXForms.size(); i != e; ++i) {
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 49ad956..2ac7b87 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -217,7 +217,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue());
if (DI == 0) {
- errs() << "Unknown leaf kind: " << *DI << "\n";
+ errs() << "Unknown leaf kind: " << *N << "\n";
abort();
}
diff --git a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
new file mode 100644
index 0000000..4abf54e
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -0,0 +1,512 @@
+//===- DFAPacketizerEmitter.cpp - Packetization DFA for a VLIW machine-----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class parses the Schedule.td file and produces an API that can be used
+// to reason about whether an instruction can be added to a packet on a VLIW
+// architecture. The class internally generates a deterministic finite
+// automaton (DFA) that models all possible mappings of machine instructions
+// to functional units as instructions are added to a packet.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/Record.h"
+#include "CodeGenTarget.h"
+#include "DFAPacketizerEmitter.h"
+#include <list>
+
+using namespace llvm;
+
+//
+//
+// State represents the usage of machine resources if the packet contains
+// a set of instruction classes.
+//
+// Specifically, currentState is a set of bit-masks.
+// The nth bit in a bit-mask indicates whether the nth resource is being used
+// by this state. The set of bit-masks in a state represent the different
+// possible outcomes of transitioning to this state.
+// For example: consider a two resource architecture: resource L and resource M
+// with three instruction classes: L, M, and L_or_M.
+// From the initial state (currentState = 0x00), if we add instruction class
+// L_or_M we will transition to a state with currentState = [0x01, 0x10]. This
+// represents the possible resource states that can result from adding a L_or_M
+// instruction
+//
+// Another way of thinking about this transition is we are mapping a NDFA with
+// two states [0x01] and [0x10] into a DFA with a single state [0x01, 0x10].
+//
+//
+namespace {
+class State {
+ public:
+ static int currentStateNum;
+ int stateNum;
+ bool isInitial;
+ std::set<unsigned> stateInfo;
+
+ State();
+ State(const State &S);
+
+ //
+ // canAddInsnClass - Returns true if an instruction of type InsnClass is a
+ // valid transition from this state, i.e., can an instruction of type InsnClass
+ // be added to the packet represented by this state.
+ //
+ // PossibleStates is the set of valid resource states that ensue from valid
+ // transitions.
+ //
+ bool canAddInsnClass(unsigned InsnClass, std::set<unsigned> &PossibleStates);
+};
+} // End anonymous namespace.
+
+
+namespace {
+struct Transition {
+ public:
+ static int currentTransitionNum;
+ int transitionNum;
+ State *from;
+ unsigned input;
+ State *to;
+
+ Transition(State *from_, unsigned input_, State *to_);
+};
+} // End anonymous namespace.
+
+
+//
+// Comparators to keep set of states sorted.
+//
+namespace {
+struct ltState {
+ bool operator()(const State *s1, const State *s2) const;
+};
+} // End anonymous namespace.
+
+
+//
+// class DFA: deterministic finite automaton for processor resource tracking.
+//
+namespace {
+class DFA {
+public:
+ DFA();
+
+ // Set of states. Need to keep this sorted to emit the transition table.
+ std::set<State*, ltState> states;
+
+ // Map from a state to the list of transitions with that state as source.
+ std::map<State*, SmallVector<Transition*, 16>, ltState> stateTransitions;
+ State *currentState;
+
+ // Highest valued Input seen.
+ unsigned LargestInput;
+
+ //
+ // Modify the DFA.
+ //
+ void initialize();
+ void addState(State *);
+ void addTransition(Transition *);
+
+ //
+ // getTransition - Return the state when a transition is made from
+ // State From with Input I. If a transition is not found, return NULL.
+ //
+ State *getTransition(State *, unsigned);
+
+ //
+ // isValidTransition: Predicate that checks if there is a valid transition
+ // from state From on input InsnClass.
+ //
+ bool isValidTransition(State *From, unsigned InsnClass);
+
+ //
+ // writeTable: Print out a table representing the DFA.
+ //
+ void writeTableAndAPI(raw_ostream &OS, const std::string &ClassName);
+};
+} // End anonymous namespace.
+
+
+//
+// Constructors for State, Transition, and DFA
+//
+State::State() :
+ stateNum(currentStateNum++), isInitial(false) {}
+
+
+State::State(const State &S) :
+ stateNum(currentStateNum++), isInitial(S.isInitial),
+ stateInfo(S.stateInfo) {}
+
+
+Transition::Transition(State *from_, unsigned input_, State *to_) :
+ transitionNum(currentTransitionNum++), from(from_), input(input_),
+ to(to_) {}
+
+
+DFA::DFA() :
+ LargestInput(0) {}
+
+
+bool ltState::operator()(const State *s1, const State *s2) const {
+ return (s1->stateNum < s2->stateNum);
+}
+
+
+//
+// canAddInsnClass - Returns true if an instruction of type InsnClass is a
+// valid transition from this state i.e., can an instruction of type InsnClass
+// be added to the packet represented by this state.
+//
+// PossibleStates is the set of valid resource states that ensue from valid
+// transitions.
+//
+bool State::canAddInsnClass(unsigned InsnClass,
+ std::set<unsigned> &PossibleStates) {
+ //
+ // Iterate over all resource states in currentState.
+ //
+ bool AddedState = false;
+
+ for (std::set<unsigned>::iterator SI = stateInfo.begin();
+ SI != stateInfo.end(); ++SI) {
+ unsigned thisState = *SI;
+
+ //
+ // Iterate over all possible resources used in InsnClass.
+ // For ex: for InsnClass = 0x11, all resources = {0x01, 0x10}.
+ //
+
+ DenseSet<unsigned> VisitedResourceStates;
+ for (unsigned int j = 0; j < sizeof(InsnClass) * 8; ++j) {
+ if ((0x1 << j) & InsnClass) {
+ //
+ // For each possible resource used in InsnClass, generate the
+ // resource state if that resource was used.
+ //
+ unsigned ResultingResourceState = thisState | (0x1 << j);
+ //
+ // Check if the resulting resource state can be accommodated in this
+ // packet.
+ // We compute ResultingResourceState OR thisState.
+ // If the result of the OR is different than thisState, it implies
+ // that there is at least one resource that can be used to schedule
+ // InsnClass in the current packet.
+ // Insert ResultingResourceState into PossibleStates only if we haven't
+ // processed ResultingResourceState before.
+ //
+ if ((ResultingResourceState != thisState) &&
+ (VisitedResourceStates.count(ResultingResourceState) == 0)) {
+ VisitedResourceStates.insert(ResultingResourceState);
+ PossibleStates.insert(ResultingResourceState);
+ AddedState = true;
+ }
+ }
+ }
+ }
+
+ return AddedState;
+}
+
+
+void DFA::initialize() {
+ currentState->isInitial = true;
+}
+
+
+void DFA::addState(State *S) {
+ assert(!states.count(S) && "State already exists");
+ states.insert(S);
+}
+
+
+void DFA::addTransition(Transition *T) {
+ // Update LargestInput.
+ if (T->input > LargestInput)
+ LargestInput = T->input;
+
+ // Add the new transition.
+ stateTransitions[T->from].push_back(T);
+}
+
+
+//
+// getTransition - Return the state when a transition is made from
+// State From with Input I. If a transition is not found, return NULL.
+//
+State *DFA::getTransition(State *From, unsigned I) {
+ // Do we have a transition from state From?
+ if (!stateTransitions.count(From))
+ return NULL;
+
+ // Do we have a transition from state From with Input I?
+ for (SmallVector<Transition*, 16>::iterator VI =
+ stateTransitions[From].begin();
+ VI != stateTransitions[From].end(); ++VI)
+ if ((*VI)->input == I)
+ return (*VI)->to;
+
+ return NULL;
+}
+
+
+bool DFA::isValidTransition(State *From, unsigned InsnClass) {
+ return (getTransition(From, InsnClass) != NULL);
+}
+
+
+int State::currentStateNum = 0;
+int Transition::currentTransitionNum = 0;
+
+DFAGen::DFAGen(RecordKeeper &R):
+ TargetName(CodeGenTarget(R).getName()),
+ allInsnClasses(), Records(R) {}
+
+
+//
+// writeTableAndAPI - Print out a table representing the DFA and the
+// associated API to create a DFA packetizer.
+//
+// Format:
+// DFAStateInputTable[][2] = pairs of <Input, Transition> for all valid
+// transitions.
+// DFAStateEntryTable[i] = Index of the first entry in DFAStateInputTable for
+// the ith state.
+//
+//
+void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) {
+ std::set<State*, ltState>::iterator SI = states.begin();
+ // This table provides a map to the beginning of the transitions for State s
+ // in DFAStateInputTable.
+ std::vector<int> StateEntry(states.size());
+
+ OS << "namespace llvm {\n\n";
+ OS << "const int " << TargetName << "DFAStateInputTable[][2] = {\n";
+
+ // Tracks the total valid transitions encountered so far. It is used
+ // to construct the StateEntry table.
+ int ValidTransitions = 0;
+ for (unsigned i = 0; i < states.size(); ++i, ++SI) {
+ StateEntry[i] = ValidTransitions;
+ for (unsigned j = 0; j <= LargestInput; ++j) {
+ assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers");
+ if (!isValidTransition(*SI, j))
+ continue;
+
+ OS << "{" << j << ", "
+ << getTransition(*SI, j)->stateNum
+ << "}, ";
+ ++ValidTransitions;
+ }
+
+ // If there are no valid transitions from this stage, we need a sentinel
+ // transition.
+ if (ValidTransitions == StateEntry[i]) {
+ OS << "{-1, -1},";
+ ++ValidTransitions;
+ }
+
+ OS << "\n";
+ }
+ OS << "};\n\n";
+ OS << "const unsigned int " << TargetName << "DFAStateEntryTable[] = {\n";
+
+ // Multiply i by 2 since each entry in DFAStateInputTable is a set of
+ // two numbers.
+ for (unsigned i = 0; i < states.size(); ++i)
+ OS << StateEntry[i] << ", ";
+
+ OS << "\n};\n";
+ OS << "} // namespace\n";
+
+
+ //
+ // Emit DFA Packetizer tables if the target is a VLIW machine.
+ //
+ std::string SubTargetClassName = TargetName + "GenSubtargetInfo";
+ OS << "\n" << "#include \"llvm/CodeGen/DFAPacketizer.h\"\n";
+ OS << "namespace llvm {\n";
+ OS << "DFAPacketizer *" << SubTargetClassName << "::"
+ << "createDFAPacketizer(const InstrItineraryData *IID) const {\n"
+ << " return new DFAPacketizer(IID, " << TargetName
+ << "DFAStateInputTable, " << TargetName << "DFAStateEntryTable);\n}\n\n";
+ OS << "} // End llvm namespace \n";
+}
+
+
+//
+// collectAllInsnClasses - Populate allInsnClasses which is a set of units
+// used in each stage.
+//
+void DFAGen::collectAllInsnClasses(const std::string &Name,
+ Record *ItinData,
+ unsigned &NStages,
+ raw_ostream &OS) {
+ // Collect processor itineraries.
+ std::vector<Record*> ProcItinList =
+ Records.getAllDerivedDefinitions("ProcessorItineraries");
+
+ // If just no itinerary then don't bother.
+ if (ProcItinList.size() < 2)
+ return;
+ std::map<std::string, unsigned> NameToBitsMap;
+
+ // Parse functional units for all the itineraries.
+ for (unsigned i = 0, N = ProcItinList.size(); i < N; ++i) {
+ Record *Proc = ProcItinList[i];
+ std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU");
+
+ // Convert macros to bits for each stage.
+ for (unsigned i = 0, N = FUs.size(); i < N; ++i)
+ NameToBitsMap[FUs[i]->getName()] = (unsigned) (1U << i);
+ }
+
+ const std::vector<Record*> &StageList =
+ ItinData->getValueAsListOfDefs("Stages");
+
+ // The number of stages.
+ NStages = StageList.size();
+
+ // For each unit.
+ unsigned UnitBitValue = 0;
+
+ // Compute the bitwise or of each unit used in this stage.
+ for (unsigned i = 0; i < NStages; ++i) {
+ const Record *Stage = StageList[i];
+
+ // Get unit list.
+ const std::vector<Record*> &UnitList =
+ Stage->getValueAsListOfDefs("Units");
+
+ for (unsigned j = 0, M = UnitList.size(); j < M; ++j) {
+ // Conduct bitwise or.
+ std::string UnitName = UnitList[j]->getName();
+ assert(NameToBitsMap.count(UnitName));
+ UnitBitValue |= NameToBitsMap[UnitName];
+ }
+
+ if (UnitBitValue != 0)
+ allInsnClasses.insert(UnitBitValue);
+ }
+}
+
+
+//
+// Run the worklist algorithm to generate the DFA.
+//
+void DFAGen::run(raw_ostream &OS) {
+ EmitSourceFileHeader("Target DFA Packetizer Tables", OS);
+
+ // Collect processor iteraries.
+ std::vector<Record*> ProcItinList =
+ Records.getAllDerivedDefinitions("ProcessorItineraries");
+
+ //
+ // Collect the instruction classes.
+ //
+ for (unsigned i = 0, N = ProcItinList.size(); i < N; i++) {
+ Record *Proc = ProcItinList[i];
+
+ // Get processor itinerary name.
+ const std::string &Name = Proc->getName();
+
+ // Skip default.
+ if (Name == "NoItineraries")
+ continue;
+
+ // Sanity check for at least one instruction itinerary class.
+ unsigned NItinClasses =
+ Records.getAllDerivedDefinitions("InstrItinClass").size();
+ if (NItinClasses == 0)
+ return;
+
+ // Get itinerary data list.
+ std::vector<Record*> ItinDataList = Proc->getValueAsListOfDefs("IID");
+
+ // Collect instruction classes for all itinerary data.
+ for (unsigned j = 0, M = ItinDataList.size(); j < M; j++) {
+ Record *ItinData = ItinDataList[j];
+ unsigned NStages;
+ collectAllInsnClasses(Name, ItinData, NStages, OS);
+ }
+ }
+
+
+ //
+ // Run a worklist algorithm to generate the DFA.
+ //
+ DFA D;
+ State *Initial = new State;
+ Initial->isInitial = true;
+ Initial->stateInfo.insert(0x0);
+ D.addState(Initial);
+ SmallVector<State*, 32> WorkList;
+ std::map<std::set<unsigned>, State*> Visited;
+
+ WorkList.push_back(Initial);
+
+ //
+ // Worklist algorithm to create a DFA for processor resource tracking.
+ // C = {set of InsnClasses}
+ // Begin with initial node in worklist. Initial node does not have
+ // any consumed resources,
+ // ResourceState = 0x0
+ // Visited = {}
+ // While worklist != empty
+ // S = first element of worklist
+ // For every instruction class C
+ // if we can accommodate C in S:
+ // S' = state with resource states = {S Union C}
+ // Add a new transition: S x C -> S'
+ // If S' is not in Visited:
+ // Add S' to worklist
+ // Add S' to Visited
+ //
+ while (!WorkList.empty()) {
+ State *current = WorkList.pop_back_val();
+ for (DenseSet<unsigned>::iterator CI = allInsnClasses.begin(),
+ CE = allInsnClasses.end(); CI != CE; ++CI) {
+ unsigned InsnClass = *CI;
+
+ std::set<unsigned> NewStateResources;
+ //
+ // If we haven't already created a transition for this input
+ // and the state can accommodate this InsnClass, create a transition.
+ //
+ if (!D.getTransition(current, InsnClass) &&
+ current->canAddInsnClass(InsnClass, NewStateResources)) {
+ State *NewState = NULL;
+
+ //
+ // If we have seen this state before, then do not create a new state.
+ //
+ //
+ std::map<std::set<unsigned>, State*>::iterator VI;
+ if ((VI = Visited.find(NewStateResources)) != Visited.end())
+ NewState = VI->second;
+ else {
+ NewState = new State;
+ NewState->stateInfo = NewStateResources;
+ D.addState(NewState);
+ Visited[NewStateResources] = NewState;
+ WorkList.push_back(NewState);
+ }
+
+ Transition *NewTransition = new Transition(current, InsnClass,
+ NewState);
+ D.addTransition(NewTransition);
+ }
+ }
+ }
+
+ // Print out the table.
+ D.writeTableAndAPI(OS, TargetName);
+}
diff --git a/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h
new file mode 100644
index 0000000..1727150
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/DFAPacketizerEmitter.h
@@ -0,0 +1,52 @@
+//===- DFAPacketizerEmitter.h - Packetization DFA for a VLIW machine-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class parses the Schedule.td file and produces an API that can be used
+// to reason about whether an instruction can be added to a packet on a VLIW
+// architecture. The class internally generates a deterministic finite
+// automaton (DFA) that models all possible mappings of machine instructions
+// to functional units as instructions are added to a packet.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <map>
+#include <string>
+
+namespace llvm {
+//
+// class DFAGen: class that generates and prints out the DFA for resource
+// tracking.
+//
+class DFAGen : public TableGenBackend {
+private:
+ std::string TargetName;
+ //
+ // allInsnClasses is the set of all possible resources consumed by an
+ // InstrStage.
+ //
+ DenseSet<unsigned> allInsnClasses;
+ RecordKeeper &Records;
+
+public:
+ DFAGen(RecordKeeper &R);
+
+ //
+ // collectAllInsnClasses: Populate allInsnClasses which is a set of units
+ // used in each stage.
+ //
+ void collectAllInsnClasses(const std::string &Name,
+ Record *ItinData,
+ unsigned &NStages,
+ raw_ostream &OS);
+
+ void run(raw_ostream &OS);
+};
+}
diff --git a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
index ff314e9..4650197 100644
--- a/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -11,7 +11,6 @@
#include "CodeGenTarget.h"
#include "X86DisassemblerTables.h"
#include "X86RecognizableInstr.h"
-#include "ARMDecoderEmitter.h"
#include "FixedLenDecoderEmitter.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.cpp b/contrib/llvm/utils/TableGen/EDEmitter.cpp
index abef70e..3809a45 100644
--- a/contrib/llvm/utils/TableGen/EDEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/EDEmitter.cpp
@@ -287,6 +287,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
IMM("i64i8imm");
IMM("i64i32imm");
IMM("SSECC");
+ IMM("AVXCC");
// all R, I, R, I, R
MEM("i8mem");
@@ -519,6 +520,8 @@ static void X86ExtractSemantics(
// ignore (doesn't go anywhere we know about)
} else if (name.find("VMCALL") != name.npos) {
// ignore (rather different semantics than a regular call)
+ } else if (name.find("VMMCALL") != name.npos) {
+ // ignore (rather different semantics than a regular call)
} else if (name.find("FAR") != name.npos && name.find("i") != name.npos) {
CALL("off");
} else {
@@ -567,12 +570,23 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
REG("DPR");
REG("DPR_VFP2");
REG("DPR_8");
+ REG("DPair");
REG("SPR");
REG("QPR");
REG("QQPR");
REG("QQQQPR");
+ REG("VecListOneD");
+ REG("VecListDPair");
+ REG("VecListDPairSpaced");
+ REG("VecListThreeD");
+ REG("VecListFourD");
+ REG("VecListOneDAllLanes");
+ REG("VecListDPairAllLanes");
+ REG("VecListDPairSpacedAllLanes");
IMM("i32imm");
+ IMM("fbits16");
+ IMM("fbits32");
IMM("i32imm_hilo16");
IMM("bf_inv_mask_imm");
IMM("lsb_pos_imm");
@@ -597,6 +611,20 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
IMM("imm1_16");
IMM("imm1_32");
IMM("nModImm");
+ IMM("nImmSplatI8");
+ IMM("nImmSplatI16");
+ IMM("nImmSplatI32");
+ IMM("nImmSplatI64");
+ IMM("nImmVMOVI32");
+ IMM("nImmVMOVF32");
+ IMM("imm8");
+ IMM("imm16");
+ IMM("imm32");
+ IMM("imm1_7");
+ IMM("imm1_15");
+ IMM("imm1_31");
+ IMM("imm0_1");
+ IMM("imm0_3");
IMM("imm0_7");
IMM("imm0_15");
IMM("imm0_255");
@@ -735,7 +763,7 @@ static void ARMPopulateOperands(
errs() << "Operand type: " << rec.getName() << '\n';
errs() << "Operand name: " << operandInfo.Name << '\n';
errs() << "Instruction name: " << inst.TheDef->getName() << '\n';
- llvm_unreachable("Unhandled type");
+ throw("Unhandled type in EDEmitter");
}
}
}
@@ -956,11 +984,7 @@ void EDEmitter::run(raw_ostream &o) {
emitCommonEnums(o, i);
- o << "namespace {\n";
-
- o << "llvm::EDInstInfo instInfo" << target.getName().c_str() << "[] = ";
+ o << "static const llvm::EDInstInfo instInfo" << target.getName() << "[] = ";
infoArray.emit(o, i);
o << ";" << "\n";
-
- o << "}\n";
}
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index 9fdc2e3..e8dad77 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -21,7 +21,6 @@
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -280,7 +279,7 @@ struct OperandsSignature {
} else if (Operands[i].isImm()) {
OS << "uint64_t imm" << i;
} else if (Operands[i].isFP()) {
- OS << "ConstantFP *f" << i;
+ OS << "const ConstantFP *f" << i;
} else {
llvm_unreachable("Unknown operand kind!");
}
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
index 02b966a..9b676f2 100644
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.cpp
@@ -17,6 +17,7 @@
#include "FixedLenDecoderEmitter.h"
#include "CodeGenTarget.h"
#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -48,7 +49,7 @@ static bool ValueNotSet(bit_value_t V) {
static int Value(bit_value_t V) {
return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
}
-static bit_value_t bitFromBits(BitsInit &bits, unsigned index) {
+static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
return bit->getValue() ? BIT_TRUE : BIT_FALSE;
@@ -56,7 +57,7 @@ static bit_value_t bitFromBits(BitsInit &bits, unsigned index) {
return BIT_UNSET;
}
// Prints the bit value for each position.
-static void dumpBits(raw_ostream &o, BitsInit &bits) {
+static void dumpBits(raw_ostream &o, const BitsInit &bits) {
unsigned index;
for (index = bits.getNumBits(); index > 0; index--) {
@@ -71,7 +72,7 @@ static void dumpBits(raw_ostream &o, BitsInit &bits) {
o << "_";
break;
default:
- assert(0 && "unexpected return value from bitFromBits");
+ llvm_unreachable("unexpected return value from bitFromBits");
}
}
}
@@ -125,7 +126,7 @@ typedef std::vector<bit_value_t> insn_t;
/// version and return the Opcode since the two have the same Asm format string.
class Filter {
protected:
- FilterChooser *Owner; // points to the FilterChooser who owns this filter
+ const FilterChooser *Owner;// points to the FilterChooser who owns this filter
unsigned StartBit; // the starting bit position
unsigned NumBits; // number of bits to filter
bool Mixed; // a mixed region contains both set and unset bits
@@ -137,7 +138,7 @@ protected:
std::vector<unsigned> VariableInstructions;
// Map of well-known segment value to its delegate.
- std::map<unsigned, FilterChooser*> FilterChooserMap;
+ std::map<unsigned, const FilterChooser*> FilterChooserMap;
// Number of instructions which fall under FilteredInstructions category.
unsigned NumFiltered;
@@ -145,19 +146,15 @@ protected:
// Keeps track of the last opcode in the filtered bucket.
unsigned LastOpcFiltered;
- // Number of instructions which fall under VariableInstructions category.
- unsigned NumVariable;
-
public:
- unsigned getNumFiltered() { return NumFiltered; }
- unsigned getNumVariable() { return NumVariable; }
- unsigned getSingletonOpc() {
+ unsigned getNumFiltered() const { return NumFiltered; }
+ unsigned getSingletonOpc() const {
assert(NumFiltered == 1);
return LastOpcFiltered;
}
// Return the filter chooser for the group of instructions without constant
// segment values.
- FilterChooser &getVariableFC() {
+ const FilterChooser &getVariableFC() const {
assert(NumFiltered == 1);
assert(FilterChooserMap.size() == 1);
return *(FilterChooserMap.find((unsigned)-1)->second);
@@ -177,7 +174,7 @@ public:
void recurse();
// Emit code to decode instructions given a segment or segments of bits.
- void emit(raw_ostream &o, unsigned &Indentation);
+ void emit(raw_ostream &o, unsigned &Indentation) const;
// Returns the number of fanout produced by the filter. More fanout implies
// the filter distinguishes more categories of instructions.
@@ -217,10 +214,10 @@ protected:
const std::vector<const CodeGenInstruction*> &AllInstructions;
// Vector of uid's for this filter chooser to work on.
- const std::vector<unsigned> Opcodes;
+ const std::vector<unsigned> &Opcodes;
// Lookup table for the operand decoding of instructions.
- std::map<unsigned, std::vector<OperandInfo> > &Operands;
+ const std::map<unsigned, std::vector<OperandInfo> > &Operands;
// Vector of candidate filters.
std::vector<Filter> Filters;
@@ -230,7 +227,7 @@ protected:
std::vector<bit_value_t> FilterBitValues;
// Links to the FilterChooser above us in the decoding tree.
- FilterChooser *Parent;
+ const FilterChooser *Parent;
// Index of the best filter from Filters.
int BestIndex;
@@ -242,19 +239,19 @@ protected:
const FixedLenDecoderEmitter *Emitter;
public:
- FilterChooser(const FilterChooser &FC) :
- AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
+ FilterChooser(const FilterChooser &FC)
+ : AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
Operands(FC.Operands), Filters(FC.Filters),
FilterBitValues(FC.FilterBitValues), Parent(FC.Parent),
- BestIndex(FC.BestIndex), BitWidth(FC.BitWidth),
- Emitter(FC.Emitter) { }
+ BestIndex(FC.BestIndex), BitWidth(FC.BitWidth),
+ Emitter(FC.Emitter) { }
FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
const std::vector<unsigned> &IDs,
- std::map<unsigned, std::vector<OperandInfo> > &Ops,
+ const std::map<unsigned, std::vector<OperandInfo> > &Ops,
unsigned BW,
- const FixedLenDecoderEmitter *E) :
- AllInstructions(Insts), Opcodes(IDs), Operands(Ops), Filters(),
+ const FixedLenDecoderEmitter *E)
+ : AllInstructions(Insts), Opcodes(IDs), Operands(Ops), Filters(),
Parent(NULL), BestIndex(-1), BitWidth(BW), Emitter(E) {
for (unsigned i = 0; i < BitWidth; ++i)
FilterBitValues.push_back(BIT_UNFILTERED);
@@ -264,10 +261,10 @@ public:
FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
const std::vector<unsigned> &IDs,
- std::map<unsigned, std::vector<OperandInfo> > &Ops,
- std::vector<bit_value_t> &ParentFilterBitValues,
- FilterChooser &parent) :
- AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
+ const std::map<unsigned, std::vector<OperandInfo> > &Ops,
+ const std::vector<bit_value_t> &ParentFilterBitValues,
+ const FilterChooser &parent)
+ : AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
Filters(), FilterBitValues(ParentFilterBitValues),
Parent(&parent), BestIndex(-1), BitWidth(parent.BitWidth),
Emitter(parent.Emitter) {
@@ -275,18 +272,31 @@ public:
}
// The top level filter chooser has NULL as its parent.
- bool isTopLevel() { return Parent == NULL; }
+ bool isTopLevel() const { return Parent == NULL; }
// Emit the top level typedef and decodeInstruction() function.
- void emitTop(raw_ostream &o, unsigned Indentation, std::string Namespace);
+ void emitTop(raw_ostream &o, unsigned Indentation,
+ const std::string &Namespace) const;
protected:
// Populates the insn given the uid.
void insnWithID(insn_t &Insn, unsigned Opcode) const {
BitsInit &Bits = getBitsField(*AllInstructions[Opcode]->TheDef, "Inst");
- for (unsigned i = 0; i < BitWidth; ++i)
- Insn.push_back(bitFromBits(Bits, i));
+ // We may have a SoftFail bitmask, which specifies a mask where an encoding
+ // may differ from the value in "Inst" and yet still be valid, but the
+ // disassembler should return SoftFail instead of Success.
+ //
+ // This is used for marking UNPREDICTABLE instructions in the ARM world.
+ BitsInit *SFBits =
+ AllInstructions[Opcode]->TheDef->getValueAsBitsInit("SoftFail");
+
+ for (unsigned i = 0; i < BitWidth; ++i) {
+ if (SFBits && bitFromBits(*SFBits, i) == BIT_TRUE)
+ Insn.push_back(BIT_UNSET);
+ else
+ Insn.push_back(bitFromBits(Bits, i));
+ }
}
// Returns the record name.
@@ -300,15 +310,16 @@ protected:
// Returns false if there exists any uninitialized bit value in the range.
// Returns true, otherwise.
bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
- unsigned NumBits) const;
+ unsigned NumBits) const;
/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
/// filter array as a series of chars.
- void dumpFilterArray(raw_ostream &o, std::vector<bit_value_t> & filter);
+ void dumpFilterArray(raw_ostream &o,
+ const std::vector<bit_value_t> & filter) const;
/// dumpStack - dumpStack traverses the filter chooser chain and calls
/// dumpFilterArray on each filter chooser up to the top level one.
- void dumpStack(raw_ostream &o, const char *prefix);
+ void dumpStack(raw_ostream &o, const char *prefix) const;
Filter &bestFilter() {
assert(BestIndex != -1 && "BestIndex not set");
@@ -316,9 +327,9 @@ protected:
}
// Called from Filter::recurse() when singleton exists. For debug purpose.
- void SingletonExists(unsigned Opc);
+ void SingletonExists(unsigned Opc) const;
- bool PositionFiltered(unsigned i) {
+ bool PositionFiltered(unsigned i) const {
return ValueSet(FilterBitValues[i]);
}
@@ -327,31 +338,37 @@ protected:
// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
// decoded bits in order to verify that the instruction matches the Opcode.
unsigned getIslands(std::vector<unsigned> &StartBits,
- std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
- insn_t &Insn);
+ std::vector<unsigned> &EndBits,
+ std::vector<uint64_t> &FieldVals,
+ const insn_t &Insn) const;
// Emits code to check the Predicates member of an instruction are true.
// Returns true if predicate matches were emitted, false otherwise.
- bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,unsigned Opc);
+ bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) const;
+
+ void emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
+ unsigned Opc) const;
// Emits code to decode the singleton. Return true if we have matched all the
// well-known bits.
- bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,unsigned Opc);
+ bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) const;
// Emits code to decode the singleton, and then to decode the rest.
- void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,Filter &Best);
+ void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ const Filter &Best) const;
void emitBinaryParser(raw_ostream &o , unsigned &Indentation,
- OperandInfo &OpInfo);
+ const OperandInfo &OpInfo) const;
// Assign a single filter and run with it.
- void runSingleFilter(FilterChooser &owner, unsigned startBit, unsigned numBit,
- bool mixed);
+ void runSingleFilter(unsigned startBit, unsigned numBit, bool mixed);
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
- bool AllowMixed);
+ bool AllowMixed);
// FilterProcessor scans the well-known encoding bits of the instructions and
// builds up a list of candidate filters. It chooses the best filter and
@@ -366,31 +383,30 @@ protected:
// Emits code to decode our share of instructions. Returns true if the
// emitted code causes a return, which occurs if we know how to decode
// the instruction at this level or the instruction is not decodeable.
- bool emit(raw_ostream &o, unsigned &Indentation);
+ bool emit(raw_ostream &o, unsigned &Indentation) const;
};
///////////////////////////
// //
-// Filter Implmenetation //
+// Filter Implementation //
// //
///////////////////////////
-Filter::Filter(const Filter &f) :
- Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
- FilteredInstructions(f.FilteredInstructions),
- VariableInstructions(f.VariableInstructions),
- FilterChooserMap(f.FilterChooserMap), NumFiltered(f.NumFiltered),
- LastOpcFiltered(f.LastOpcFiltered), NumVariable(f.NumVariable) {
+Filter::Filter(const Filter &f)
+ : Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
+ FilteredInstructions(f.FilteredInstructions),
+ VariableInstructions(f.VariableInstructions),
+ FilterChooserMap(f.FilterChooserMap), NumFiltered(f.NumFiltered),
+ LastOpcFiltered(f.LastOpcFiltered) {
}
Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
- bool mixed) : Owner(&owner), StartBit(startBit), NumBits(numBits),
- Mixed(mixed) {
+ bool mixed)
+ : Owner(&owner), StartBit(startBit), NumBits(numBits), Mixed(mixed) {
assert(StartBit + NumBits - 1 < Owner->BitWidth);
NumFiltered = 0;
LastOpcFiltered = 0;
- NumVariable = 0;
for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
insn_t Insn;
@@ -409,10 +425,9 @@ Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
FilteredInstructions[Field].push_back(LastOpcFiltered);
++NumFiltered;
} else {
- // Some of the encoding bit(s) are unspecfied. This contributes to
+ // Some of the encoding bit(s) are unspecified. This contributes to
// one additional member of "Variable" instructions.
VariableInstructions.push_back(Owner->Opcodes[i]);
- ++NumVariable;
}
}
@@ -421,7 +436,7 @@ Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
}
Filter::~Filter() {
- std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ std::map<unsigned, const FilterChooser*>::iterator filterIterator;
for (filterIterator = FilterChooserMap.begin();
filterIterator != FilterChooserMap.end();
filterIterator++) {
@@ -450,7 +465,7 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for further processing on this
// group of instructions whose segment values are variable.
- FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ FilterChooserMap.insert(std::pair<unsigned, const FilterChooser*>(
(unsigned)-1,
new FilterChooser(Owner->AllInstructions,
VariableInstructions,
@@ -483,7 +498,7 @@ void Filter::recurse() {
// Delegates to an inferior filter chooser for further processing on this
// category of instructions.
- FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ FilterChooserMap.insert(std::pair<unsigned, const FilterChooser*>(
mapIterator->first,
new FilterChooser(Owner->AllInstructions,
mapIterator->second,
@@ -495,7 +510,7 @@ void Filter::recurse() {
}
// Emit code to decode instructions given a segment or segments of bits.
-void Filter::emit(raw_ostream &o, unsigned &Indentation) {
+void Filter::emit(raw_ostream &o, unsigned &Indentation) const {
o.indent(Indentation) << "// Check Inst{";
if (NumBits > 1)
@@ -507,7 +522,7 @@ void Filter::emit(raw_ostream &o, unsigned &Indentation) {
<< "(insn, " << StartBit << ", "
<< NumBits << ")) {\n";
- std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ std::map<unsigned, const FilterChooser*>::const_iterator filterIterator;
bool DefaultCase = false;
for (filterIterator = FilterChooserMap.begin();
@@ -537,12 +552,12 @@ void Filter::emit(raw_ostream &o, unsigned &Indentation) {
// encoding bits do not match exactly.
if (!DefaultCase) { ++Indentation; ++Indentation; }
- bool finished = filterIterator->second->emit(o, Indentation);
+ filterIterator->second->emit(o, Indentation);
// For top level default case, there's no need for a break statement.
if (Owner->isTopLevel() && DefaultCase)
break;
- if (!finished)
- o.indent(Indentation) << "break;\n";
+
+ o.indent(Indentation) << "break;\n";
if (!DefaultCase) { --Indentation; --Indentation; }
}
@@ -571,13 +586,17 @@ unsigned Filter::usefulness() const {
// Emit the top level typedef and decodeInstruction() function.
void FilterChooser::emitTop(raw_ostream &o, unsigned Indentation,
- std::string Namespace) {
+ const std::string &Namespace) const {
o.indent(Indentation) <<
- "static MCDisassembler::DecodeStatus decode" << Namespace << "Instruction" << BitWidth
- << "(MCInst &MI, uint" << BitWidth << "_t insn, uint64_t Address, "
+ "static MCDisassembler::DecodeStatus decode" << Namespace << "Instruction"
+ << BitWidth << "(MCInst &MI, uint" << BitWidth
+ << "_t insn, uint64_t Address, "
<< "const void *Decoder, const MCSubtargetInfo &STI) {\n";
- o.indent(Indentation) << " unsigned tmp = 0;\n (void)tmp;\n" << Emitter->Locals << "\n";
+ o.indent(Indentation) << " unsigned tmp = 0;\n";
+ o.indent(Indentation) << " (void)tmp;\n";
+ o.indent(Indentation) << Emitter->Locals << "\n";
o.indent(Indentation) << " uint64_t Bits = STI.getFeatureBits();\n";
+ o.indent(Indentation) << " (void)Bits;\n";
++Indentation; ++Indentation;
// Emits code to decode the instructions.
@@ -598,7 +617,7 @@ void FilterChooser::emitTop(raw_ostream &o, unsigned Indentation,
// Returns false if and on the first uninitialized bit value encountered.
// Returns true, otherwise.
bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
- unsigned StartBit, unsigned NumBits) const {
+ unsigned StartBit, unsigned NumBits) const {
Field = 0;
for (unsigned i = 0; i < NumBits; ++i) {
@@ -615,7 +634,7 @@ bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
/// filter array as a series of chars.
void FilterChooser::dumpFilterArray(raw_ostream &o,
- std::vector<bit_value_t> &filter) {
+ const std::vector<bit_value_t> &filter) const {
unsigned bitIndex;
for (bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
@@ -638,8 +657,8 @@ void FilterChooser::dumpFilterArray(raw_ostream &o,
/// dumpStack - dumpStack traverses the filter chooser chain and calls
/// dumpFilterArray on each filter chooser up to the top level one.
-void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
- FilterChooser *current = this;
+void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) const {
+ const FilterChooser *current = this;
while (current) {
o << prefix;
@@ -650,7 +669,7 @@ void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
}
// Called from Filter::recurse() when singleton exists. For debug purpose.
-void FilterChooser::SingletonExists(unsigned Opc) {
+void FilterChooser::SingletonExists(unsigned Opc) const {
insn_t Insn0;
insnWithID(Insn0, Opc);
@@ -663,7 +682,7 @@ void FilterChooser::SingletonExists(unsigned Opc) {
errs() << '\n';
dumpStack(errs(), "\t\t");
- for (unsigned i = 0; i < Opcodes.size(); i++) {
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
const std::string &Name = nameWithID(Opcodes[i]);
errs() << '\t' << Name << " ";
@@ -678,8 +697,9 @@ void FilterChooser::SingletonExists(unsigned Opc) {
// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
// decoded bits in order to verify that the instruction matches the Opcode.
unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
- std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
- insn_t &Insn) {
+ std::vector<unsigned> &EndBits,
+ std::vector<uint64_t> &FieldVals,
+ const insn_t &Insn) const {
unsigned Num, BitNo;
Num = BitNo = 0;
@@ -695,9 +715,7 @@ unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
Val = Value(Insn[i]);
bool Filtered = PositionFiltered(i);
switch (State) {
- default:
- assert(0 && "Unreachable code!");
- break;
+ default: llvm_unreachable("Unreachable code!");
case 0:
case 1:
if (Filtered || Val == -1)
@@ -736,17 +754,17 @@ unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
}
void FilterChooser::emitBinaryParser(raw_ostream &o, unsigned &Indentation,
- OperandInfo &OpInfo) {
- std::string &Decoder = OpInfo.Decoder;
+ const OperandInfo &OpInfo) const {
+ const std::string &Decoder = OpInfo.Decoder;
if (OpInfo.numFields() == 1) {
- OperandInfo::iterator OI = OpInfo.begin();
+ OperandInfo::const_iterator OI = OpInfo.begin();
o.indent(Indentation) << " tmp = fieldFromInstruction" << BitWidth
<< "(insn, " << OI->Base << ", " << OI->Width
<< ");\n";
} else {
o.indent(Indentation) << " tmp = 0;\n";
- for (OperandInfo::iterator OI = OpInfo.begin(), OE = OpInfo.end();
+ for (OperandInfo::const_iterator OI = OpInfo.begin(), OE = OpInfo.end();
OI != OE; ++OI) {
o.indent(Indentation) << " tmp |= (fieldFromInstruction" << BitWidth
<< "(insn, " << OI->Base << ", " << OI->Width
@@ -756,14 +774,15 @@ void FilterChooser::emitBinaryParser(raw_ostream &o, unsigned &Indentation,
if (Decoder != "")
o.indent(Indentation) << " " << Emitter->GuardPrefix << Decoder
- << "(MI, tmp, Address, Decoder)" << Emitter->GuardPostfix << "\n";
+ << "(MI, tmp, Address, Decoder)"
+ << Emitter->GuardPostfix << "\n";
else
o.indent(Indentation) << " MI.addOperand(MCOperand::CreateImm(tmp));\n";
}
static void emitSinglePredicateMatch(raw_ostream &o, StringRef str,
- std::string PredicateNamespace) {
+ const std::string &PredicateNamespace) {
if (str[0] == '!')
o << "!(Bits & " << PredicateNamespace << "::"
<< str.slice(1,str.size()) << ")";
@@ -772,8 +791,9 @@ static void emitSinglePredicateMatch(raw_ostream &o, StringRef str,
}
bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
- unsigned Opc) {
- ListInit *Predicates = AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
+ unsigned Opc) const {
+ ListInit *Predicates =
+ AllInstructions[Opc]->TheDef->getValueAsListInit("Predicates");
for (unsigned i = 0; i < Predicates->getSize(); ++i) {
Record *Pred = Predicates->getElementAsRecord(i);
if (!Pred->getValue("AssemblerMatcherPredicate"))
@@ -799,10 +819,70 @@ bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
return Predicates->getSize() > 0;
}
+void FilterChooser::emitSoftFailCheck(raw_ostream &o, unsigned Indentation,
+ unsigned Opc) const {
+ BitsInit *SFBits =
+ AllInstructions[Opc]->TheDef->getValueAsBitsInit("SoftFail");
+ if (!SFBits) return;
+ BitsInit *InstBits = AllInstructions[Opc]->TheDef->getValueAsBitsInit("Inst");
+
+ APInt PositiveMask(BitWidth, 0ULL);
+ APInt NegativeMask(BitWidth, 0ULL);
+ for (unsigned i = 0; i < BitWidth; ++i) {
+ bit_value_t B = bitFromBits(*SFBits, i);
+ bit_value_t IB = bitFromBits(*InstBits, i);
+
+ if (B != BIT_TRUE) continue;
+
+ switch (IB) {
+ case BIT_FALSE:
+ // The bit is meant to be false, so emit a check to see if it is true.
+ PositiveMask.setBit(i);
+ break;
+ case BIT_TRUE:
+ // The bit is meant to be true, so emit a check to see if it is false.
+ NegativeMask.setBit(i);
+ break;
+ default:
+ // The bit is not set; this must be an error!
+ StringRef Name = AllInstructions[Opc]->TheDef->getName();
+ errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in "
+ << Name
+ << " is set but Inst{" << i <<"} is unset!\n"
+ << " - You can only mark a bit as SoftFail if it is fully defined"
+ << " (1/0 - not '?') in Inst\n";
+ o << "#error SoftFail Conflict, " << Name << "::SoftFail{" << i
+ << "} set but Inst{" << i << "} undefined!\n";
+ }
+ }
+
+ bool NeedPositiveMask = PositiveMask.getBoolValue();
+ bool NeedNegativeMask = NegativeMask.getBoolValue();
+
+ if (!NeedPositiveMask && !NeedNegativeMask)
+ return;
+
+ std::string PositiveMaskStr = PositiveMask.toString(16, /*signed=*/false);
+ std::string NegativeMaskStr = NegativeMask.toString(16, /*signed=*/false);
+ StringRef BitExt = "";
+ if (BitWidth > 32)
+ BitExt = "ULL";
+
+ o.indent(Indentation) << "if (";
+ if (NeedPositiveMask)
+ o << "insn & 0x" << PositiveMaskStr << BitExt;
+ if (NeedPositiveMask && NeedNegativeMask)
+ o << " || ";
+ if (NeedNegativeMask)
+ o << "~insn & 0x" << NegativeMaskStr << BitExt;
+ o << ")\n";
+ o.indent(Indentation+2) << "S = MCDisassembler::SoftFail;\n";
+}
+
// Emits code to decode the singleton. Return true if we have matched all the
// well-known bits.
bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- unsigned Opc) {
+ unsigned Opc) const {
std::vector<unsigned> StartBits;
std::vector<unsigned> EndBits;
std::vector<uint64_t> FieldVals;
@@ -821,22 +901,26 @@ bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
if (!emitPredicateMatch(o, Indentation, Opc))
o << "1";
o << ") {\n";
+ emitSoftFailCheck(o, Indentation+2, Opc);
o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
- std::vector<OperandInfo>& InsnOperands = Operands[Opc];
- for (std::vector<OperandInfo>::iterator
+ std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
+ Operands.find(Opc);
+ const std::vector<OperandInfo>& InsnOperands = OpIter->second;
+ for (std::vector<OperandInfo>::const_iterator
I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
// If a custom instruction decoder was specified, use that.
if (I->numFields() == 0 && I->Decoder.size()) {
o.indent(Indentation) << " " << Emitter->GuardPrefix << I->Decoder
- << "(MI, insn, Address, Decoder)" << Emitter->GuardPostfix << "\n";
+ << "(MI, insn, Address, Decoder)"
+ << Emitter->GuardPostfix << "\n";
break;
}
emitBinaryParser(o, Indentation, *I);
}
- o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // " << nameWithID(Opc)
- << '\n';
+ o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // "
+ << nameWithID(Opc) << '\n';
o.indent(Indentation) << "}\n"; // Closing predicate block.
return true;
}
@@ -870,21 +954,25 @@ bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
else
o << ") {\n";
}
+ emitSoftFailCheck(o, Indentation+2, Opc);
o.indent(Indentation) << " MI.setOpcode(" << Opc << ");\n";
- std::vector<OperandInfo>& InsnOperands = Operands[Opc];
- for (std::vector<OperandInfo>::iterator
+ std::map<unsigned, std::vector<OperandInfo> >::const_iterator OpIter =
+ Operands.find(Opc);
+ const std::vector<OperandInfo>& InsnOperands = OpIter->second;
+ for (std::vector<OperandInfo>::const_iterator
I = InsnOperands.begin(), E = InsnOperands.end(); I != E; ++I) {
// If a custom instruction decoder was specified, use that.
if (I->numFields() == 0 && I->Decoder.size()) {
o.indent(Indentation) << " " << Emitter->GuardPrefix << I->Decoder
- << "(MI, insn, Address, Decoder)" << Emitter->GuardPostfix << "\n";
+ << "(MI, insn, Address, Decoder)"
+ << Emitter->GuardPostfix << "\n";
break;
}
emitBinaryParser(o, Indentation, *I);
}
- o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // " << nameWithID(Opc)
- << '\n';
+ o.indent(Indentation) << " return " << Emitter->ReturnOK << "; // "
+ << nameWithID(Opc) << '\n';
o.indent(Indentation) << "}\n";
return false;
@@ -892,7 +980,7 @@ bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
// Emits code to decode the singleton, and then to decode the rest.
void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
- Filter &Best) {
+ const Filter &Best) const {
unsigned Opc = Best.getSingletonOpc();
@@ -908,8 +996,8 @@ void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
// Assign a single filter and run with it. Top level API client can initialize
// with a single filter to start the filtering process.
-void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
- unsigned numBit, bool mixed) {
+void FilterChooser::runSingleFilter(unsigned startBit, unsigned numBit,
+ bool mixed) {
Filters.clear();
Filter F(*this, startBit, numBit, true);
Filters.push_back(F);
@@ -920,7 +1008,7 @@ void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
// reportRegion is a helper function for filterProcessor to mark a region as
// eligible for use as a filter region.
void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
- unsigned BitIndex, bool AllowMixed) {
+ unsigned BitIndex, bool AllowMixed) {
if (RA == ATTR_MIXED && AllowMixed)
Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, true));
else if (RA == ATTR_ALL_SET && !AllowMixed)
@@ -957,8 +1045,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
// Look for islands of undecoded bits of any instruction.
if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
// Found an instruction with island(s). Now just assign a filter.
- runSingleFilter(*this, StartBits[0], EndBits[0] - StartBits[0] + 1,
- true);
+ runSingleFilter(StartBits[0], EndBits[0] - StartBits[0] + 1, true);
return true;
}
}
@@ -1066,7 +1153,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
RA = ATTR_MIXED;
break;
default:
- assert(0 && "Unexpected bitAttr!");
+ llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_ALL_SET:
@@ -1087,7 +1174,7 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
RA = ATTR_MIXED;
break;
default:
- assert(0 && "Unexpected bitAttr!");
+ llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_MIXED:
@@ -1109,13 +1196,13 @@ bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
case ATTR_MIXED:
break;
default:
- assert(0 && "Unexpected bitAttr!");
+ llvm_unreachable("Unexpected bitAttr!");
}
break;
case ATTR_ALL_UNSET:
- assert(0 && "regionAttr state machine has no ATTR_UNSET state");
+ llvm_unreachable("regionAttr state machine has no ATTR_UNSET state");
case ATTR_FILTERED:
- assert(0 && "regionAttr state machine has no ATTR_FILTERED state");
+ llvm_unreachable("regionAttr state machine has no ATTR_FILTERED state");
}
}
@@ -1189,7 +1276,7 @@ void FilterChooser::doFilter() {
// Emits code to decode our share of instructions. Returns true if the
// emitted code causes a return, which occurs if we know how to decode
// the instruction at this level or the instruction is not decodeable.
-bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
+bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) const {
if (Opcodes.size() == 1)
// There is only one instruction in the set, which is great!
// Call emitSingletonDecoder() to see whether there are any remaining
@@ -1198,11 +1285,11 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
// Choose the best filter to do the decodings!
if (BestIndex != -1) {
- Filter &Best = bestFilter();
+ const Filter &Best = Filters[BestIndex];
if (Best.getNumFiltered() == 1)
emitSingletonDecoder(o, Indentation, Best);
else
- bestFilter().emit(o, Indentation);
+ Best.emit(o, Indentation);
return false;
}
@@ -1222,7 +1309,7 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
dumpStack(errs(), "\t\t");
- for (unsigned i = 0; i < Opcodes.size(); i++) {
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
const std::string &Name = nameWithID(Opcodes[i]);
errs() << '\t' << Name << " ";
@@ -1234,9 +1321,8 @@ bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
return true;
}
-static bool populateInstruction(const CodeGenInstruction &CGI,
- unsigned Opc,
- std::map<unsigned, std::vector<OperandInfo> >& Operands){
+static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc,
+ std::map<unsigned, std::vector<OperandInfo> > &Operands){
const Record &Def = *CGI.TheDef;
// If all the bit positions are not specified; do not decode this instruction.
// We are bound to fail! For proper disassembly, the well-known encoding bits
@@ -1290,7 +1376,7 @@ static bool populateInstruction(const CodeGenInstruction &CGI,
}
// For each operand, see if we can figure out where it is encoded.
- for (std::vector<std::pair<Init*, std::string> >::iterator
+ for (std::vector<std::pair<Init*, std::string> >::const_iterator
NI = InOutOperands.begin(), NE = InOutOperands.end(); NI != NE; ++NI) {
std::string Decoder = "";
@@ -1435,8 +1521,7 @@ static void emitHelper(llvm::raw_ostream &o, unsigned BitWidth) {
}
// Emits disassembler code for instruction decoding.
-void FixedLenDecoderEmitter::run(raw_ostream &o)
-{
+void FixedLenDecoderEmitter::run(raw_ostream &o) {
o << "#include \"llvm/MC/MCInst.h\"\n";
o << "#include \"llvm/Support/DataTypes.h\"\n";
o << "#include <assert.h>\n";
@@ -1444,14 +1529,15 @@ void FixedLenDecoderEmitter::run(raw_ostream &o)
o << "namespace llvm {\n\n";
// Parameterize the decoders based on namespace and instruction width.
- NumberedInstructions = Target.getInstructionsByEnumValue();
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
std::map<std::pair<std::string, unsigned>,
std::vector<unsigned> > OpcMap;
std::map<unsigned, std::vector<OperandInfo> > Operands;
for (unsigned i = 0; i < NumberedInstructions.size(); ++i) {
const CodeGenInstruction *Inst = NumberedInstructions[i];
- Record *Def = Inst->TheDef;
+ const Record *Def = Inst->TheDef;
unsigned Size = Def->getValueAsInt("Size");
if (Def->getValueAsString("Namespace") == "TargetOpcode" ||
Def->getValueAsBit("isPseudo") ||
@@ -1470,7 +1556,7 @@ void FixedLenDecoderEmitter::run(raw_ostream &o)
std::set<unsigned> Sizes;
for (std::map<std::pair<std::string, unsigned>,
- std::vector<unsigned> >::iterator
+ std::vector<unsigned> >::const_iterator
I = OpcMap.begin(), E = OpcMap.end(); I != E; ++I) {
// If we haven't visited this instruction width before, emit the
// helper method to extract fields.
diff --git a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
index 2df5448..195297c 100644
--- a/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
+++ b/contrib/llvm/utils/TableGen/FixedLenDecoderEmitter.h
@@ -39,12 +39,12 @@ struct OperandInfo {
Fields.push_back(EncodingField(Base, Width, Offset));
}
- unsigned numFields() { return Fields.size(); }
+ unsigned numFields() const { return Fields.size(); }
- typedef std::vector<EncodingField>::iterator iterator;
+ typedef std::vector<EncodingField>::const_iterator const_iterator;
- iterator begin() { return Fields.begin(); }
- iterator end() { return Fields.end(); }
+ const_iterator begin() const { return Fields.begin(); }
+ const_iterator end() const { return Fields.end(); }
};
class FixedLenDecoderEmitter : public TableGenBackend {
@@ -52,12 +52,12 @@ public:
FixedLenDecoderEmitter(RecordKeeper &R,
std::string PredicateNamespace,
std::string GPrefix = "if (",
- std::string GPostfix = " == MCDisassembler::Fail) return MCDisassembler::Fail;",
+ std::string GPostfix = " == MCDisassembler::Fail)"
+ " return MCDisassembler::Fail;",
std::string ROK = "MCDisassembler::Success",
std::string RFail = "MCDisassembler::Fail",
std::string L = "") :
- Records(R), Target(R),
- NumberedInstructions(Target.getInstructionsByEnumValue()),
+ Target(R),
PredicateNamespace(PredicateNamespace),
GuardPrefix(GPrefix), GuardPostfix(GPostfix),
ReturnOK(ROK), ReturnFail(RFail), Locals(L) {}
@@ -66,11 +66,7 @@ public:
void run(raw_ostream &o);
private:
- RecordKeeper &Records;
CodeGenTarget Target;
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- std::vector<unsigned> Opcodes;
- std::map<unsigned, std::vector<OperandInfo> > Operands;
public:
std::string PredicateNamespace;
std::string GuardPrefix, GuardPostfix;
diff --git a/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp b/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp
deleted file mode 100644
index 5981afd..0000000
--- a/contrib/llvm/utils/TableGen/InstrEnumEmitter.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- InstrEnumEmitter.cpp - Generate Instruction Set Enums --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting enums for each machine
-// instruction.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstrEnumEmitter.h"
-#include "CodeGenTarget.h"
-#include "llvm/TableGen/Record.h"
-#include <cstdio>
-using namespace llvm;
-
-// runEnums - Print out enum values for all of the instructions.
-void InstrEnumEmitter::run(raw_ostream &OS) {
- EmitSourceFileHeader("Target Instruction Enum Values", OS);
- OS << "namespace llvm {\n\n";
-
- CodeGenTarget Target(Records);
-
- // We must emit the PHI opcode first...
- std::string Namespace = Target.getInstNamespace();
-
- if (Namespace.empty()) {
- fprintf(stderr, "No instructions defined!\n");
- exit(1);
- }
-
- const std::vector<const CodeGenInstruction*> &NumberedInstructions =
- Target.getInstructionsByEnumValue();
-
- OS << "namespace " << Namespace << " {\n";
- OS << " enum {\n";
- for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
- OS << " " << NumberedInstructions[i]->TheDef->getName()
- << "\t= " << i << ",\n";
- }
- OS << " INSTRUCTION_LIST_END = " << NumberedInstructions.size() << "\n";
- OS << " };\n}\n";
- OS << "} // End llvm namespace \n";
-}
diff --git a/contrib/llvm/utils/TableGen/InstrEnumEmitter.h b/contrib/llvm/utils/TableGen/InstrEnumEmitter.h
deleted file mode 100644
index c29a309..0000000
--- a/contrib/llvm/utils/TableGen/InstrEnumEmitter.h
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- InstrEnumEmitter.h - Generate Instruction Set Enums ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This tablegen backend is responsible for emitting enums for each machine
-// instruction.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef INSTRENUM_EMITTER_H
-#define INSTRENUM_EMITTER_H
-
-#include "llvm/TableGen/TableGenBackend.h"
-
-namespace llvm {
-
-class InstrEnumEmitter : public TableGenBackend {
- RecordKeeper &Records;
-public:
- InstrEnumEmitter(RecordKeeper &R) : Records(R) {}
-
- // run - Output the instruction set description, returning true on failure.
- void run(raw_ostream &OS);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 8341724..8b3efd3 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -14,14 +14,16 @@
#include "InstrInfoEmitter.h"
#include "CodeGenTarget.h"
+#include "SequenceToOffsetTable.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/StringExtras.h"
#include <algorithm>
+#include <cstdio>
using namespace llvm;
static void PrintDefList(const std::vector<Record*> &Uses,
unsigned Num, raw_ostream &OS) {
- OS << "static const unsigned ImplicitList" << Num << "[] = { ";
+ OS << "static const uint16_t ImplicitList" << Num << "[] = { ";
for (unsigned i = 0, e = Uses.size(); i != e; ++i)
OS << getQualifiedName(Uses[i]) << ", ";
OS << "0 };\n";
@@ -106,6 +108,11 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
if (Inst.Operands[i].Rec->isSubClassOf("OptionalDefOperand"))
Res += "|(1<<MCOI::OptionalDef)";
+ // Fill in operand type.
+ Res += ", MCOI::";
+ assert(!Inst.Operands[i].OperandType.empty() && "Invalid operand type.");
+ Res += Inst.Operands[i].OperandType;
+
// Fill in constraint info.
Res += ", ";
@@ -121,11 +128,6 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
" << 16) | (1 << MCOI::TIED_TO))";
}
- // Fill in operand type.
- Res += ", MCOI::";
- assert(!Inst.Operands[i].OperandType.empty() && "Invalid operand type.");
- Res += Inst.Operands[i].OperandType;
-
Result.push_back(Res);
}
}
@@ -203,7 +205,7 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
// Emit all of the MCInstrDesc records in their ENUM ordering.
//
- OS << "\nMCInstrDesc " << TargetName << "Insts[] = {\n";
+ OS << "\nextern const MCInstrDesc " << TargetName << "Insts[] = {\n";
const std::vector<const CodeGenInstruction*> &NumberedInstructions =
Target.getInstructionsByEnumValue();
@@ -212,10 +214,33 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
OperandInfoIDs, OS);
OS << "};\n\n";
+ // Build an array of instruction names
+ SequenceToOffsetTable<std::string> InstrNames;
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ const CodeGenInstruction *Instr = NumberedInstructions[i];
+ InstrNames.add(Instr->TheDef->getName());
+ }
+
+ InstrNames.layout();
+ OS << "extern const char " << TargetName << "InstrNameData[] = {\n";
+ InstrNames.emit(OS, printChar);
+ OS << "};\n\n";
+
+ OS << "extern const unsigned " << TargetName <<"InstrNameIndices[] = {";
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ if (i % 8 == 0)
+ OS << "\n ";
+ const CodeGenInstruction *Instr = NumberedInstructions[i];
+ OS << InstrNames.get(Instr->TheDef->getName()) << "U, ";
+ }
+
+ OS << "\n};\n\n";
+
// MCInstrInfo initialization routine.
OS << "static inline void Init" << TargetName
<< "MCInstrInfo(MCInstrInfo *II) {\n";
OS << " II->InitMCInstrInfo(" << TargetName << "Insts, "
+ << TargetName << "InstrNameIndices, " << TargetName << "InstrNameData, "
<< NumberedInstructions.size() << ");\n}\n\n";
OS << "} // End llvm namespace \n";
@@ -239,10 +264,13 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
OS << "#undef GET_INSTRINFO_CTOR\n";
OS << "namespace llvm {\n";
- OS << "extern MCInstrDesc " << TargetName << "Insts[];\n";
+ OS << "extern const MCInstrDesc " << TargetName << "Insts[];\n";
+ OS << "extern const unsigned " << TargetName << "InstrNameIndices[];\n";
+ OS << "extern const char " << TargetName << "InstrNameData[];\n";
OS << ClassName << "::" << ClassName << "(int SO, int DO)\n"
<< " : TargetInstrInfoImpl(SO, DO) {\n"
<< " InitMCInstrInfo(" << TargetName << "Insts, "
+ << TargetName << "InstrNameIndices, " << TargetName << "InstrNameData, "
<< NumberedInstructions.size() << ");\n}\n";
OS << "} // End llvm namespace \n";
@@ -264,8 +292,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
OS << Num << ",\t" << MinOperands << ",\t"
<< Inst.Operands.NumDefs << ",\t"
<< getItinClassNumber(Inst.TheDef) << ",\t"
- << Inst.TheDef->getValueAsInt("Size") << ",\t\""
- << Inst.TheDef->getName() << "\", 0";
+ << Inst.TheDef->getValueAsInt("Size") << ",\t0";
// Emit all of the target indepedent flags...
if (Inst.isPseudo) OS << "|(1<<MCID::Pseudo)";
@@ -346,7 +373,7 @@ void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
// We must emit the PHI opcode first...
std::string Namespace = Target.getInstNamespace();
-
+
if (Namespace.empty()) {
fprintf(stderr, "No instructions defined!\n");
exit(1);
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.h b/contrib/llvm/utils/TableGen/InstrInfoEmitter.h
index 1461e2c..f8d3ea5 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.h
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.h
@@ -31,19 +31,19 @@ class InstrInfoEmitter : public TableGenBackend {
RecordKeeper &Records;
CodeGenDAGPatterns CDP;
std::map<std::string, unsigned> ItinClassMap;
-
+
public:
InstrInfoEmitter(RecordKeeper &R) : Records(R), CDP(R) { }
- // run - Output the instruction set description, returning true on failure.
+ // run - Output the instruction set description.
void run(raw_ostream &OS);
private:
void emitEnums(raw_ostream &OS);
- typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
+ typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
- Record *InstrInfo,
+ Record *InstrInfo,
std::map<std::vector<Record*>, unsigned> &EL,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS);
@@ -51,7 +51,7 @@ private:
// Itinerary information.
void GatherItinClasses();
unsigned getItinClassNumber(const Record *InstRec);
-
+
// Operand information.
void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
index 782b89e..8e1bae8 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -57,9 +57,6 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
// Emit intrinsic alias analysis mod/ref behavior.
EmitModRefBehavior(Ints, OS);
- // Emit a list of intrinsics with corresponding GCC builtins.
- EmitGCCBuiltinList(Ints, OS);
-
// Emit code to translate GCC builtins into LLVM intrinsics.
EmitIntrinsicToGCCBuiltinMap(Ints, OS);
@@ -160,17 +157,20 @@ EmitIntrinsicToNameTable(const std::vector<CodeGenIntrinsic> &Ints,
void IntrinsicEmitter::
EmitIntrinsicToOverloadTable(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS) {
- OS << "// Intrinsic ID to overload table\n";
+ OS << "// Intrinsic ID to overload bitset\n";
OS << "#ifdef GET_INTRINSIC_OVERLOAD_TABLE\n";
- OS << " // Note that entry #0 is the invalid intrinsic!\n";
+ OS << "static const uint8_t OTable[] = {\n";
+ OS << " 0";
for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
- OS << " ";
+ // Add one to the index so we emit a null bit for the invalid #0 intrinsic.
+ if ((i+1)%8 == 0)
+ OS << ",\n 0";
if (Ints[i].isOverloaded)
- OS << "true";
- else
- OS << "false";
- OS << ",\n";
+ OS << " | (1<<" << (i+1)%8 << ')';
}
+ OS << "\n};\n\n";
+ // OTable contains a true bit at the position if the intrinsic is overloaded.
+ OS << "return (OTable[id/8] & (1 << (id%8))) != 0;\n";
OS << "#endif\n\n";
}
@@ -181,6 +181,8 @@ static void EmitTypeForValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
} else if (VT == MVT::Other) {
// MVT::OtherVT is used to mean the empty struct type here.
OS << "StructType::get(Context)";
+ } else if (VT == MVT::f16) {
+ OS << "Type::getHalfTy(Context)";
} else if (VT == MVT::f32) {
OS << "Type::getFloatTy(Context)";
} else if (VT == MVT::f64) {
@@ -318,7 +320,7 @@ void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
OS << "// Verifier::visitIntrinsicFunctionCall code.\n";
OS << "#ifdef GET_INTRINSIC_VERIFIER\n";
OS << " switch (ID) {\n";
- OS << " default: assert(0 && \"Invalid intrinsic!\");\n";
+ OS << " default: llvm_unreachable(\"Invalid intrinsic!\");\n";
// This checking can emit a lot of very common code. To reduce the amount of
// code that we emit, batch up cases that have identical types. This avoids
@@ -414,7 +416,7 @@ void IntrinsicEmitter::EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
OS << "// Code for generating Intrinsic function declarations.\n";
OS << "#ifdef GET_INTRINSIC_GENERATOR\n";
OS << " switch (id) {\n";
- OS << " default: assert(0 && \"Invalid intrinsic!\");\n";
+ OS << " default: llvm_unreachable(\"Invalid intrinsic!\");\n";
// Similar to GET_INTRINSIC_VERIFIER, batch up cases that have identical
// types.
@@ -483,8 +485,7 @@ namespace {
case CodeGenIntrinsic::ReadWriteMem:
return MRK_none;
}
- assert(0 && "bad mod-ref kind");
- return MRK_none;
+ llvm_unreachable("bad mod-ref kind");
}
struct AttributeComparator {
@@ -516,37 +517,50 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
else
OS << "AttrListPtr Intrinsic::getAttributes(ID id) {\n";
- // Compute the maximum number of attribute arguments.
- std::vector<const CodeGenIntrinsic*> sortedIntrinsics(Ints.size());
+ // Compute the maximum number of attribute arguments and the map
+ typedef std::map<const CodeGenIntrinsic*, unsigned,
+ AttributeComparator> UniqAttrMapTy;
+ UniqAttrMapTy UniqAttributes;
unsigned maxArgAttrs = 0;
+ unsigned AttrNum = 0;
for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
const CodeGenIntrinsic &intrinsic = Ints[i];
- sortedIntrinsics[i] = &intrinsic;
maxArgAttrs =
std::max(maxArgAttrs, unsigned(intrinsic.ArgumentAttributes.size()));
+ unsigned &N = UniqAttributes[&intrinsic];
+ if (N) continue;
+ assert(AttrNum < 256 && "Too many unique attributes for table!");
+ N = ++AttrNum;
}
// Emit an array of AttributeWithIndex. Most intrinsics will have
// at least one entry, for the function itself (index ~1), which is
// usually nounwind.
- OS << " AttributeWithIndex AWI[" << maxArgAttrs+1 << "];\n";
- OS << " unsigned NumAttrs = 0;\n";
- OS << " switch (id) {\n";
- OS << " default: break;\n";
+ OS << " static const uint8_t IntrinsicsToAttributesMap[] = {\n";
- AttributeComparator precedes;
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ const CodeGenIntrinsic &intrinsic = Ints[i];
- std::stable_sort(sortedIntrinsics.begin(), sortedIntrinsics.end(), precedes);
+ OS << " " << UniqAttributes[&intrinsic] << ", // "
+ << intrinsic.Name << "\n";
+ }
+ OS << " };\n\n";
- for (unsigned i = 0, e = sortedIntrinsics.size(); i != e; ++i) {
- const CodeGenIntrinsic &intrinsic = *sortedIntrinsics[i];
- OS << " case " << TargetPrefix << "Intrinsic::"
- << intrinsic.EnumName << ":\n";
+ OS << " AttributeWithIndex AWI[" << maxArgAttrs+1 << "];\n";
+ OS << " unsigned NumAttrs = 0;\n";
+ OS << " if (id != 0) {\n";
+ OS << " switch(IntrinsicsToAttributesMap[id - ";
+ if (TargetOnly)
+ OS << "Intrinsic::num_intrinsics";
+ else
+ OS << "1";
+ OS << "]) {\n";
+ OS << " default: llvm_unreachable(\"Invalid attribute number\");\n";
+ for (UniqAttrMapTy::const_iterator I = UniqAttributes.begin(),
+ E = UniqAttributes.end(); I != E; ++I) {
+ OS << " case " << I->second << ":\n";
- // Fill out the case if this is the last case for this range of
- // intrinsics.
- if (i + 1 != e && !precedes(&intrinsic, sortedIntrinsics[i + 1]))
- continue;
+ const CodeGenIntrinsic &intrinsic = *(I->first);
// Keep track of the number of attributes we're writing out.
unsigned numAttrs = 0;
@@ -554,8 +568,8 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
// The argument attributes are alreadys sorted by argument index.
for (unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size(); ai != ae;) {
unsigned argNo = intrinsic.ArgumentAttributes[ai].first;
-
- OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get("
+
+ OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get("
<< argNo+1 << ", ";
bool moreThanOne = false;
@@ -579,7 +593,7 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
ModRefKind modRef = getModRefKind(intrinsic);
if (!intrinsic.canThrow || modRef) {
- OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, ";
+ OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, ";
if (!intrinsic.canThrow) {
OS << "Attribute::NoUnwind";
if (modRef) OS << '|';
@@ -593,13 +607,14 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
}
if (numAttrs) {
- OS << " NumAttrs = " << numAttrs << ";\n";
- OS << " break;\n";
+ OS << " NumAttrs = " << numAttrs << ";\n";
+ OS << " break;\n";
} else {
- OS << " return AttrListPtr();\n";
+ OS << " return AttrListPtr();\n";
}
}
+ OS << " }\n";
OS << " }\n";
OS << " return AttrListPtr::get(AWI, NumAttrs);\n";
OS << "}\n";
@@ -609,50 +624,36 @@ EmitAttributes(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS) {
/// EmitModRefBehavior - Determine intrinsic alias analysis mod/ref behavior.
void IntrinsicEmitter::
EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
- OS << "// Determine intrinsic alias analysis mod/ref behavior.\n";
- OS << "#ifdef GET_INTRINSIC_MODREF_BEHAVIOR\n";
- OS << "switch (iid) {\n";
- OS << "default:\n return UnknownModRefBehavior;\n";
+ OS << "// Determine intrinsic alias analysis mod/ref behavior.\n"
+ << "#ifdef GET_INTRINSIC_MODREF_BEHAVIOR\n"
+ << "assert(iid <= Intrinsic::" << Ints.back().EnumName << " && "
+ << "\"Unknown intrinsic.\");\n\n";
+
+ OS << "static const uint8_t IntrinsicModRefBehavior[] = {\n"
+ << " /* invalid */ UnknownModRefBehavior,\n";
for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
- if (Ints[i].ModRef == CodeGenIntrinsic::ReadWriteMem)
- continue;
- OS << "case " << TargetPrefix << "Intrinsic::" << Ints[i].EnumName
- << ":\n";
+ OS << " /* " << TargetPrefix << Ints[i].EnumName << " */ ";
switch (Ints[i].ModRef) {
- default:
- assert(false && "Unknown Mod/Ref type!");
case CodeGenIntrinsic::NoMem:
- OS << " return DoesNotAccessMemory;\n";
+ OS << "DoesNotAccessMemory,\n";
break;
case CodeGenIntrinsic::ReadArgMem:
- OS << " return OnlyReadsArgumentPointees;\n";
+ OS << "OnlyReadsArgumentPointees,\n";
break;
case CodeGenIntrinsic::ReadMem:
- OS << " return OnlyReadsMemory;\n";
+ OS << "OnlyReadsMemory,\n";
break;
case CodeGenIntrinsic::ReadWriteArgMem:
- OS << " return OnlyAccessesArgumentPointees;\n";
+ OS << "OnlyAccessesArgumentPointees,\n";
+ break;
+ case CodeGenIntrinsic::ReadWriteMem:
+ OS << "UnknownModRefBehavior,\n";
break;
}
}
- OS << "}\n";
- OS << "#endif // GET_INTRINSIC_MODREF_BEHAVIOR\n\n";
-}
-
-void IntrinsicEmitter::
-EmitGCCBuiltinList(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
- OS << "// Get the GCC builtin that corresponds to an LLVM intrinsic.\n";
- OS << "#ifdef GET_GCC_BUILTIN_NAME\n";
- OS << " switch (F->getIntrinsicID()) {\n";
- OS << " default: BuiltinName = \"\"; break;\n";
- for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
- if (!Ints[i].GCCBuiltinName.empty()) {
- OS << " case Intrinsic::" << Ints[i].EnumName << ": BuiltinName = \""
- << Ints[i].GCCBuiltinName << "\"; break;\n";
- }
- }
- OS << " }\n";
- OS << "#endif\n\n";
+ OS << "};\n\n"
+ << "return static_cast<ModRefBehavior>(IntrinsicModRefBehavior[iid]);\n"
+ << "#endif // GET_INTRINSIC_MODREF_BEHAVIOR\n\n";
}
/// EmitTargetBuiltins - All of the builtins in the specified map are for the
diff --git a/contrib/llvm/utils/TableGen/IntrinsicEmitter.h b/contrib/llvm/utils/TableGen/IntrinsicEmitter.h
index eb6379c..f9bcd59 100644
--- a/contrib/llvm/utils/TableGen/IntrinsicEmitter.h
+++ b/contrib/llvm/utils/TableGen/IntrinsicEmitter.h
@@ -48,8 +48,6 @@ namespace llvm {
raw_ostream &OS);
void EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS);
- void EmitGCCBuiltinList(const std::vector<CodeGenIntrinsic> &Ints,
- raw_ostream &OS);
void EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS);
void EmitSuffix(raw_ostream &OS);
diff --git a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
index c685527..802d112 100644
--- a/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -67,7 +67,7 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
// Since we added more than one, we also need to adjust the base.
BaseIdx += NewOps - 1;
} else
- assert(0 && "Unhandled pseudo-expansion argument type!");
+ llvm_unreachable("Unhandled pseudo-expansion argument type!");
}
return OpsAdded;
}
@@ -100,8 +100,11 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() +
"' operand count mismatch");
+ unsigned NumMIOperands = 0;
+ for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i)
+ NumMIOperands += Insn.Operands[i].MINumOperands;
IndexedMap<OpData> OperandMap;
- OperandMap.grow(Insn.Operands.size());
+ OperandMap.grow(NumMIOperands);
addDagOperandMapping(Rec, Dag, Insn, OperandMap, 0);
@@ -176,8 +179,6 @@ void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
for (unsigned i = 0, e = Dest.Operands[OpNo].MINumOperands;
i != e; ++i) {
switch (Expansion.OperandMap[MIOpNo + i].Kind) {
- default:
- llvm_unreachable("Unknown operand type?!");
case OpData::Operand:
o << " lowerOperand(MI->getOperand("
<< Source.Operands[Expansion.OperandMap[MIOpNo].Data
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index b0f4ffc..a2478a7 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -16,6 +16,7 @@
#include "RegisterInfoEmitter.h"
#include "CodeGenTarget.h"
#include "CodeGenRegisters.h"
+#include "SequenceToOffsetTable.h"
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -31,6 +32,9 @@ RegisterInfoEmitter::runEnums(raw_ostream &OS,
CodeGenTarget &Target, CodeGenRegBank &Bank) {
const std::vector<CodeGenRegister*> &Registers = Bank.getRegisters();
+ // Register enums are stored as uint16_t in the tables. Make sure we'll fit
+ assert(Registers.size() <= 0xffff && "Too many regs to fit in tables");
+
std::string Namespace = Registers[0]->TheDef->getValueAsString("Namespace");
EmitSourceFileHeader("Target Register Enum Values", OS);
@@ -41,7 +45,8 @@ RegisterInfoEmitter::runEnums(raw_ostream &OS,
OS << "namespace llvm {\n\n";
OS << "class MCRegisterClass;\n"
- << "extern MCRegisterClass " << Namespace << "MCRegisterClasses[];\n\n";
+ << "extern const MCRegisterClass " << Namespace
+ << "MCRegisterClasses[];\n\n";
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
@@ -59,6 +64,11 @@ RegisterInfoEmitter::runEnums(raw_ostream &OS,
ArrayRef<CodeGenRegisterClass*> RegisterClasses = Bank.getRegClasses();
if (!RegisterClasses.empty()) {
+
+ // RegisterClass enums are stored as uint16_t in the tables.
+ assert(RegisterClasses.size() <= 0xffff &&
+ "Too many register classes to fit in tables");
+
OS << "\n// Register classes\n";
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
@@ -89,16 +99,104 @@ RegisterInfoEmitter::runEnums(raw_ostream &OS,
OS << "}\n";
}
+ ArrayRef<CodeGenSubRegIndex*> SubRegIndices = Bank.getSubRegIndices();
+ if (!SubRegIndices.empty()) {
+ OS << "\n// Subregister indices\n";
+ std::string Namespace =
+ SubRegIndices[0]->getNamespace();
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum {\n NoSubRegister,\n";
+ for (unsigned i = 0, e = Bank.getNumNamedIndices(); i != e; ++i)
+ OS << " " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
+ OS << " NUM_TARGET_NAMED_SUBREGS\n};\n";
+ if (!Namespace.empty())
+ OS << "}\n";
+ }
OS << "} // End llvm namespace \n";
OS << "#endif // GET_REGINFO_ENUM\n\n";
}
-void
-RegisterInfoEmitter::EmitRegMapping(raw_ostream &OS,
- const std::vector<CodeGenRegister*> &Regs,
- bool isCtor) {
+void RegisterInfoEmitter::
+EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
+ const std::string &ClassName) {
+ unsigned NumRCs = RegBank.getRegClasses().size();
+ unsigned NumSets = RegBank.getNumRegPressureSets();
+
+ OS << "/// Get the weight in units of pressure for this register class.\n"
+ << "const RegClassWeight &" << ClassName << "::\n"
+ << "getRegClassWeight(const TargetRegisterClass *RC) const {\n"
+ << " static const RegClassWeight RCWeightTable[] = {\n";
+ for (unsigned i = 0, e = NumRCs; i != e; ++i) {
+ const CodeGenRegisterClass &RC = *RegBank.getRegClasses()[i];
+ const CodeGenRegister::Set &Regs = RC.getMembers();
+ if (Regs.empty())
+ OS << " {0, 0";
+ else {
+ std::vector<unsigned> RegUnits;
+ RC.buildRegUnitSet(RegUnits);
+ OS << " {" << (*Regs.begin())->getWeight(RegBank)
+ << ", " << RegBank.getRegUnitSetWeight(RegUnits);
+ }
+ OS << "}, \t// " << RC.getName() << "\n";
+ }
+ OS << " {0, 0} };\n"
+ << " return RCWeightTable[RC->getID()];\n"
+ << "}\n\n";
+
+ OS << "\n"
+ << "// Get the number of dimensions of register pressure.\n"
+ << "unsigned " << ClassName << "::getNumRegPressureSets() const {\n"
+ << " return " << NumSets << ";\n}\n\n";
+
+ OS << "// Get the register unit pressure limit for this dimension.\n"
+ << "// This limit must be adjusted dynamically for reserved registers.\n"
+ << "unsigned " << ClassName << "::\n"
+ << "getRegPressureSetLimit(unsigned Idx) const {\n"
+ << " static const unsigned PressureLimitTable[] = {\n";
+ for (unsigned i = 0; i < NumSets; ++i ) {
+ const RegUnitSet &RegUnits = RegBank.getRegPressureSet(i);
+ OS << " " << RegBank.getRegUnitSetWeight(RegUnits.Units)
+ << ", \t// " << i << ": " << RegBank.getRegPressureSet(i).Name << "\n";
+ }
+ OS << " 0 };\n"
+ << " return PressureLimitTable[Idx];\n"
+ << "}\n\n";
+
+ OS << "/// Get the dimensions of register pressure "
+ << "impacted by this register class.\n"
+ << "/// Returns a -1 terminated array of pressure set IDs\n"
+ << "const int* " << ClassName << "::\n"
+ << "getRegClassPressureSets(const TargetRegisterClass *RC) const {\n"
+ << " static const int RCSetsTable[] = {\n ";
+ std::vector<unsigned> RCSetStarts(NumRCs);
+ for (unsigned i = 0, StartIdx = 0, e = NumRCs; i != e; ++i) {
+ RCSetStarts[i] = StartIdx;
+ ArrayRef<unsigned> PSetIDs = RegBank.getRCPressureSetIDs(i);
+ for (ArrayRef<unsigned>::iterator PSetI = PSetIDs.begin(),
+ PSetE = PSetIDs.end(); PSetI != PSetE; ++PSetI) {
+ OS << *PSetI << ", ";
+ ++StartIdx;
+ }
+ OS << "-1, \t// " << RegBank.getRegClasses()[i]->getName() << "\n ";
+ ++StartIdx;
+ }
+ OS << "-1 };\n";
+ OS << " static const unsigned RCSetStartTable[] = {\n ";
+ for (unsigned i = 0, e = NumRCs; i != e; ++i) {
+ OS << RCSetStarts[i] << ",";
+ }
+ OS << "0 };\n"
+ << " unsigned SetListStart = RCSetStartTable[RC->getID()];\n"
+ << " return &RCSetsTable[SetListStart];\n"
+ << "}\n\n";
+}
+void
+RegisterInfoEmitter::EmitRegMappingTables(raw_ostream &OS,
+ const std::vector<CodeGenRegister*> &Regs,
+ bool isCtor) {
// Collect all information about dwarf register numbers
typedef std::map<Record*, std::vector<int64_t>, LessRecord> DwarfRegNumsMapTy;
DwarfRegNumsMapTy DwarfRegNums;
@@ -124,6 +222,121 @@ RegisterInfoEmitter::EmitRegMapping(raw_ostream &OS,
for (unsigned i = I->second.size(), e = maxLength; i != e; ++i)
I->second.push_back(-1);
+ std::string Namespace = Regs[0]->TheDef->getValueAsString("Namespace");
+
+ OS << "// " << Namespace << " Dwarf<->LLVM register mappings.\n";
+
+ // Emit reverse information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ for (unsigned i = 0, e = maxLength; i != e; ++i) {
+ OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
+ OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
+ OS << i << "Dwarf2L[]";
+
+ if (!isCtor) {
+ OS << " = {\n";
+
+ // Store the mapping sorted by the LLVM reg num so lookup can be done
+ // with a binary search.
+ std::map<uint64_t, Record*> Dwarf2LMap;
+ for (DwarfRegNumsMapTy::iterator
+ I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
+ int DwarfRegNo = I->second[i];
+ if (DwarfRegNo < 0)
+ continue;
+ Dwarf2LMap[DwarfRegNo] = I->first;
+ }
+
+ for (std::map<uint64_t, Record*>::iterator
+ I = Dwarf2LMap.begin(), E = Dwarf2LMap.end(); I != E; ++I)
+ OS << " { " << I->first << "U, " << getQualifiedName(I->second)
+ << " },\n";
+
+ OS << "};\n";
+ } else {
+ OS << ";\n";
+ }
+
+ // We have to store the size in a const global, it's used in multiple
+ // places.
+ OS << "extern const unsigned " << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "Dwarf2LSize";
+ if (!isCtor)
+ OS << " = sizeof(" << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "Dwarf2L)/sizeof(MCRegisterInfo::DwarfLLVMRegPair);\n\n";
+ else
+ OS << ";\n\n";
+ }
+ }
+
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ Record *Reg = Regs[i]->TheDef;
+ const RecordVal *V = Reg->getValue("DwarfAlias");
+ if (!V || !V->getValue())
+ continue;
+
+ DefInit *DI = dynamic_cast<DefInit*>(V->getValue());
+ Record *Alias = DI->getDef();
+ DwarfRegNums[Reg] = DwarfRegNums[Alias];
+ }
+
+ // Emit information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ for (unsigned i = 0, e = maxLength; i != e; ++i) {
+ OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
+ OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
+ OS << i << "L2Dwarf[]";
+ if (!isCtor) {
+ OS << " = {\n";
+ // Store the mapping sorted by the Dwarf reg num so lookup can be done
+ // with a binary search.
+ for (DwarfRegNumsMapTy::iterator
+ I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
+ int RegNo = I->second[i];
+ if (RegNo == -1) // -1 is the default value, don't emit a mapping.
+ continue;
+
+ OS << " { " << getQualifiedName(I->first) << ", " << RegNo
+ << "U },\n";
+ }
+ OS << "};\n";
+ } else {
+ OS << ";\n";
+ }
+
+ // We have to store the size in a const global, it's used in multiple
+ // places.
+ OS << "extern const unsigned " << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2DwarfSize";
+ if (!isCtor)
+ OS << " = sizeof(" << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "L2Dwarf)/sizeof(MCRegisterInfo::DwarfLLVMRegPair);\n\n";
+ else
+ OS << ";\n\n";
+ }
+ }
+}
+
+void
+RegisterInfoEmitter::EmitRegMapping(raw_ostream &OS,
+ const std::vector<CodeGenRegister*> &Regs,
+ bool isCtor) {
+ // Emit the initializer so the tables from EmitRegMappingTables get wired up
+ // to the MCRegisterInfo object.
+ unsigned maxLength = 0;
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ Record *Reg = Regs[i]->TheDef;
+ maxLength = std::max((size_t)maxLength,
+ Reg->getValueAsListOfInts("DwarfNumbers").size());
+ }
+
+ if (!maxLength)
+ return;
+
+ std::string Namespace = Regs[0]->TheDef->getValueAsString("Namespace");
+
// Emit reverse information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
OS << " switch (";
@@ -133,43 +346,28 @@ RegisterInfoEmitter::EmitRegMapping(raw_ostream &OS,
OS << "EHFlavour";
OS << ") {\n"
<< " default:\n"
- << " assert(0 && \"Unknown DWARF flavour\");\n"
- << " break;\n";
+ << " llvm_unreachable(\"Unknown DWARF flavour\");\n";
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << " case " << i << ":\n";
- for (DwarfRegNumsMapTy::iterator
- I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
- int DwarfRegNo = I->second[i];
- if (DwarfRegNo < 0)
- continue;
- OS << " ";
- if (!isCtor)
- OS << "RI->";
- OS << "mapDwarfRegToLLVMReg(" << DwarfRegNo << ", "
- << getQualifiedName(I->first) << ", ";
- if (j == 0)
+ OS << " ";
+ if (!isCtor)
+ OS << "RI->";
+ std::string Tmp;
+ raw_string_ostream(Tmp) << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "Dwarf2L";
+ OS << "mapDwarfRegsToLLVMRegs(" << Tmp << ", " << Tmp << "Size, ";
+ if (j == 0)
OS << "false";
else
OS << "true";
- OS << " );\n";
- }
+ OS << ");\n";
OS << " break;\n";
}
OS << " }\n";
}
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- Record *Reg = Regs[i]->TheDef;
- const RecordVal *V = Reg->getValue("DwarfAlias");
- if (!V || !V->getValue())
- continue;
-
- DefInit *DI = dynamic_cast<DefInit*>(V->getValue());
- Record *Alias = DI->getDef();
- DwarfRegNums[Reg] = DwarfRegNums[Alias];
- }
-
// Emit information about the dwarf register numbers.
for (unsigned j = 0; j < 2; ++j) {
OS << " switch (";
@@ -179,26 +377,23 @@ RegisterInfoEmitter::EmitRegMapping(raw_ostream &OS,
OS << "EHFlavour";
OS << ") {\n"
<< " default:\n"
- << " assert(0 && \"Unknown DWARF flavour\");\n"
- << " break;\n";
+ << " llvm_unreachable(\"Unknown DWARF flavour\");\n";
for (unsigned i = 0, e = maxLength; i != e; ++i) {
OS << " case " << i << ":\n";
- // Sort by name to get a stable order.
- for (DwarfRegNumsMapTy::iterator
- I = DwarfRegNums.begin(), E = DwarfRegNums.end(); I != E; ++I) {
- int RegNo = I->second[i];
- OS << " ";
- if (!isCtor)
- OS << "RI->";
- OS << "mapLLVMRegToDwarfReg(" << getQualifiedName(I->first) << ", "
- << RegNo << ", ";
- if (j == 0)
+ OS << " ";
+ if (!isCtor)
+ OS << "RI->";
+ std::string Tmp;
+ raw_string_ostream(Tmp) << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "L2Dwarf";
+ OS << "mapLLVMRegsToDwarfRegs(" << Tmp << ", " << Tmp << "Size, ";
+ if (j == 0)
OS << "false";
else
OS << "true";
- OS << " );\n";
- }
+ OS << ");\n";
OS << " break;\n";
}
OS << " }\n";
@@ -235,6 +430,14 @@ public:
}
};
+static void printRegister(raw_ostream &OS, const CodeGenRegister *Reg) {
+ OS << getQualifiedName(Reg->TheDef);
+}
+
+static void printSimpleValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
+ OS << getEnumName(VT);
+}
+
//
// runMCDesc - Print out MC register descriptions.
//
@@ -246,87 +449,79 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
OS << "#undef GET_REGINFO_MC_DESC\n";
+ const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
std::map<const CodeGenRegister*, CodeGenRegister::Set> Overlaps;
RegBank.computeOverlaps(Overlaps);
- OS << "namespace llvm {\n\n";
-
- const std::string &TargetName = Target.getName();
- std::string ClassName = TargetName + "GenMCRegisterInfo";
- OS << "struct " << ClassName << " : public MCRegisterInfo {\n"
- << " explicit " << ClassName << "(const MCRegisterDesc *D);\n";
- OS << "};\n";
+ // The lists of sub-registers, super-registers, and overlaps all go in the
+ // same array. That allows us to share suffixes.
+ typedef std::vector<const CodeGenRegister*> RegVec;
+ SmallVector<RegVec, 4> SubRegLists(Regs.size());
+ SmallVector<RegVec, 4> OverlapLists(Regs.size());
+ SequenceToOffsetTable<RegVec, CodeGenRegister::Less> RegSeqs;
- OS << "\nnamespace {\n";
-
- const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
-
- // Emit an overlap list for all registers.
+ // Precompute register lists for the SequenceToOffsetTable.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
const CodeGenRegister *Reg = Regs[i];
- const CodeGenRegister::Set &O = Overlaps[Reg];
- // Move Reg to the front so TRI::getAliasSet can share the list.
- OS << " const unsigned " << Reg->getName() << "_Overlaps[] = { "
- << getQualifiedName(Reg->TheDef) << ", ";
- for (CodeGenRegister::Set::const_iterator I = O.begin(), E = O.end();
- I != E; ++I)
- if (*I != Reg)
- OS << getQualifiedName((*I)->TheDef) << ", ";
- OS << "0 };\n";
- }
- // Emit the empty sub-registers list
- OS << " const unsigned Empty_SubRegsSet[] = { 0 };\n";
- // Loop over all of the registers which have sub-registers, emitting the
- // sub-registers list to memory.
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister &Reg = *Regs[i];
- if (Reg.getSubRegs().empty())
- continue;
- // getSubRegs() orders by SubRegIndex. We want a topological order.
- SetVector<CodeGenRegister*> SR;
- Reg.addSubRegsPreOrder(SR);
- OS << " const unsigned " << Reg.getName() << "_SubRegsSet[] = { ";
- for (unsigned j = 0, je = SR.size(); j != je; ++j)
- OS << getQualifiedName(SR[j]->TheDef) << ", ";
- OS << "0 };\n";
+ // Compute the ordered sub-register list.
+ SetVector<const CodeGenRegister*> SR;
+ Reg->addSubRegsPreOrder(SR, RegBank);
+ RegVec &SubRegList = SubRegLists[i];
+ SubRegList.assign(SR.begin(), SR.end());
+ RegSeqs.add(SubRegList);
+
+ // Super-registers are already computed.
+ const RegVec &SuperRegList = Reg->getSuperRegs();
+ RegSeqs.add(SuperRegList);
+
+ // The list of overlaps doesn't need to have any particular order, except
+ // Reg itself must be the first element. Pick an ordering that has one of
+ // the other lists as a suffix.
+ RegVec &OverlapList = OverlapLists[i];
+ const RegVec &Suffix = SubRegList.size() > SuperRegList.size() ?
+ SubRegList : SuperRegList;
+ CodeGenRegister::Set Omit(Suffix.begin(), Suffix.end());
+
+ // First element is Reg itself.
+ OverlapList.push_back(Reg);
+ Omit.insert(Reg);
+
+ // Any elements not in Suffix.
+ const CodeGenRegister::Set &OSet = Overlaps[Reg];
+ std::set_difference(OSet.begin(), OSet.end(),
+ Omit.begin(), Omit.end(),
+ std::back_inserter(OverlapList),
+ CodeGenRegister::Less());
+
+ // Finally, Suffix itself.
+ OverlapList.insert(OverlapList.end(), Suffix.begin(), Suffix.end());
+ RegSeqs.add(OverlapList);
}
- // Emit the empty super-registers list
- OS << " const unsigned Empty_SuperRegsSet[] = { 0 };\n";
- // Loop over all of the registers which have super-registers, emitting the
- // super-registers list to memory.
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister &Reg = *Regs[i];
- const CodeGenRegister::SuperRegList &SR = Reg.getSuperRegs();
- if (SR.empty())
- continue;
- OS << " const unsigned " << Reg.getName() << "_SuperRegsSet[] = { ";
- for (unsigned j = 0, je = SR.size(); j != je; ++j)
- OS << getQualifiedName(SR[j]->TheDef) << ", ";
- OS << "0 };\n";
- }
- OS << "}\n"; // End of anonymous namespace...
+ // Compute the final layout of the sequence table.
+ RegSeqs.layout();
- OS << "\nMCRegisterDesc " << TargetName
+ OS << "namespace llvm {\n\n";
+
+ const std::string &TargetName = Target.getName();
+
+ // Emit the shared table of register lists.
+ OS << "extern const uint16_t " << TargetName << "RegLists[] = {\n";
+ RegSeqs.emit(OS, printRegister);
+ OS << "};\n\n";
+
+ OS << "extern const MCRegisterDesc " << TargetName
<< "RegDesc[] = { // Descriptors\n";
- OS << " { \"NOREG\",\t0,\t0,\t0 },\n";
+ OS << " { \"NOREG\", 0, 0, 0 },\n";
- // Now that register alias and sub-registers sets have been emitted, emit the
- // register descriptors now.
+ // Emit the register descriptors now.
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister &Reg = *Regs[i];
- OS << " { \"";
- OS << Reg.getName() << "\",\t" << Reg.getName() << "_Overlaps,\t";
- if (!Reg.getSubRegs().empty())
- OS << Reg.getName() << "_SubRegsSet,\t";
- else
- OS << "Empty_SubRegsSet,\t";
- if (!Reg.getSuperRegs().empty())
- OS << Reg.getName() << "_SuperRegsSet";
- else
- OS << "Empty_SuperRegsSet";
- OS << " },\n";
+ const CodeGenRegister *Reg = Regs[i];
+ OS << " { \"" << Reg->getName() << "\", "
+ << RegSeqs.get(OverlapLists[i]) << ", "
+ << RegSeqs.get(SubRegLists[i]) << ", "
+ << RegSeqs.get(Reg->getSuperRegs()) << " },\n";
}
OS << "};\n\n"; // End of register descriptors...
@@ -345,7 +540,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
// Emit the register list now.
OS << " // " << Name << " Register Class...\n"
- << " static const unsigned " << Name
+ << " const uint16_t " << Name
<< "[] = {\n ";
for (unsigned i = 0, e = Order.size(); i != e; ++i) {
Record *Reg = Order[i];
@@ -354,7 +549,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "\n };\n\n";
OS << " // " << Name << " Bit set.\n"
- << " static const unsigned char " << Name
+ << " const uint8_t " << Name
<< "Bits[] = {\n ";
BitVectorEmitter BVE;
for (unsigned i = 0, e = Order.size(); i != e; ++i) {
@@ -367,37 +562,81 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
}
OS << "}\n\n";
- OS << "MCRegisterClass " << TargetName << "MCRegisterClasses[] = {\n";
+ OS << "extern const MCRegisterClass " << TargetName
+ << "MCRegisterClasses[] = {\n";
for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
const CodeGenRegisterClass &RC = *RegisterClasses[rc];
- OS << " MCRegisterClass(" << RC.getQualifiedName() + "RegClassID" << ", "
- << '\"' << RC.getName() << "\", "
+
+ // Asserts to make sure values will fit in table assuming types from
+ // MCRegisterInfo.h
+ assert((RC.SpillSize/8) <= 0xffff && "SpillSize too large.");
+ assert((RC.SpillAlignment/8) <= 0xffff && "SpillAlignment too large.");
+ assert(RC.CopyCost >= -128 && RC.CopyCost <= 127 && "Copy cost too large.");
+
+ OS << " { " << '\"' << RC.getName() << "\", "
+ << RC.getName() << ", " << RC.getName() << "Bits, "
+ << RC.getOrder().size() << ", sizeof(" << RC.getName() << "Bits), "
+ << RC.getQualifiedName() + "RegClassID" << ", "
<< RC.SpillSize/8 << ", "
<< RC.SpillAlignment/8 << ", "
<< RC.CopyCost << ", "
- << RC.Allocatable << ", "
- << RC.getName() << ", " << RC.getName() << " + "
- << RC.getOrder().size() << ", "
- << RC.getName() << "Bits, sizeof(" << RC.getName() << "Bits)"
- << "),\n";
+ << RC.Allocatable << " },\n";
}
OS << "};\n\n";
+ // Emit the data table for getSubReg().
+ ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
+ if (SubRegIndices.size()) {
+ OS << "const uint16_t " << TargetName << "SubRegTable[]["
+ << SubRegIndices.size() << "] = {\n";
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
+ OS << " /* " << Regs[i]->TheDef->getName() << " */\n";
+ if (SRM.empty()) {
+ OS << " {0},\n";
+ continue;
+ }
+ OS << " {";
+ for (unsigned j = 0, je = SubRegIndices.size(); j != je; ++j) {
+ // FIXME: We really should keep this to 80 columns...
+ CodeGenRegister::SubRegMap::const_iterator SubReg =
+ SRM.find(SubRegIndices[j]);
+ if (SubReg != SRM.end())
+ OS << getQualifiedName(SubReg->second->TheDef);
+ else
+ OS << "0";
+ if (j != je - 1)
+ OS << ", ";
+ }
+ OS << "}" << (i != e ? "," : "") << "\n";
+ }
+ OS << "};\n\n";
+ OS << "const uint16_t *get" << TargetName
+ << "SubRegTable() {\n return (const uint16_t *)" << TargetName
+ << "SubRegTable;\n}\n\n";
+ }
+
+ EmitRegMappingTables(OS, Regs, false);
+
// MCRegisterInfo initialization routine.
OS << "static inline void Init" << TargetName
<< "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
<< "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0) {\n";
OS << " RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
<< Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
- << RegisterClasses.size() << ");\n\n";
+ << RegisterClasses.size() << ", " << TargetName << "RegLists, ";
+ if (SubRegIndices.size() != 0)
+ OS << "(uint16_t*)" << TargetName << "SubRegTable, "
+ << SubRegIndices.size() << ");\n\n";
+ else
+ OS << "NULL, 0);\n\n";
EmitRegMapping(OS, Regs, false);
OS << "}\n\n";
-
OS << "} // End llvm namespace \n";
OS << "#endif // GET_REGINFO_MC_DESC\n\n";
}
@@ -413,8 +652,7 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
const std::string &TargetName = Target.getName();
std::string ClassName = TargetName + "GenRegisterInfo";
- OS << "#include \"llvm/Target/TargetRegisterInfo.h\"\n";
- OS << "#include <string>\n\n";
+ OS << "#include \"llvm/Target/TargetRegisterInfo.h\"\n\n";
OS << "namespace llvm {\n\n";
@@ -423,28 +661,20 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
<< "(unsigned RA, unsigned D = 0, unsigned E = 0);\n"
<< " virtual bool needsStackRealignment(const MachineFunction &) const\n"
<< " { return false; }\n"
- << " unsigned getSubReg(unsigned RegNo, unsigned Index) const;\n"
- << " unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;\n"
<< " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
<< " const TargetRegisterClass *"
"getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n"
+ << " const TargetRegisterClass *getMatchingSuperRegClass("
+ "const TargetRegisterClass*, const TargetRegisterClass*, "
+ "unsigned) const;\n"
+ << " const RegClassWeight &getRegClassWeight("
+ << "const TargetRegisterClass *RC) const;\n"
+ << " unsigned getNumRegPressureSets() const;\n"
+ << " unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
+ << " const int *getRegClassPressureSets("
+ << "const TargetRegisterClass *RC) const;\n"
<< "};\n\n";
- const std::vector<Record*> &SubRegIndices = RegBank.getSubRegIndices();
- if (!SubRegIndices.empty()) {
- OS << "\n// Subregister indices\n";
- std::string Namespace = SubRegIndices[0]->getValueAsString("Namespace");
- if (!Namespace.empty())
- OS << "namespace " << Namespace << " {\n";
- OS << "enum {\n NoSubRegister,\n";
- for (unsigned i = 0, e = RegBank.getNumNamedIndices(); i != e; ++i)
- OS << " " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
- OS << " NUM_TARGET_NAMED_SUBREGS = " << SubRegIndices.size()+1 << "\n";
- OS << "};\n";
- if (!Namespace.empty())
- OS << "}\n";
- }
-
ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();
if (!RegisterClasses.empty()) {
@@ -455,19 +685,11 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
const CodeGenRegisterClass &RC = *RegisterClasses[i];
const std::string &Name = RC.getName();
- // Output the register class definition.
- OS << " struct " << Name << "Class : public TargetRegisterClass {\n"
- << " " << Name << "Class();\n";
- if (!RC.AltOrderSelect.empty())
- OS << " ArrayRef<unsigned> "
- "getRawAllocationOrder(const MachineFunction&) const;\n";
- OS << " };\n";
-
// Output the extern for the instance.
- OS << " extern " << Name << "Class\t" << Name << "RegClass;\n";
+ OS << " extern const TargetRegisterClass " << Name << "RegClass;\n";
// Output the extern for the pointer to the instance (should remove).
- OS << " static TargetRegisterClass * const "<< Name <<"RegisterClass = &"
- << Name << "RegClass;\n";
+ OS << " static const TargetRegisterClass * const " << Name
+ << "RegisterClass = &" << Name << "RegClass;\n";
}
OS << "} // end of namespace " << TargetName << "\n\n";
}
@@ -489,8 +711,8 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
OS << "namespace llvm {\n\n";
// Get access to MCRegisterClass data.
- OS << "extern MCRegisterClass " << Target.getName()
- << "MCRegisterClasses[];\n";
+ OS << "extern const MCRegisterClass " << Target.getName()
+ << "MCRegisterClasses[];\n";
// Start out by emitting each of the register classes.
ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();
@@ -507,38 +729,21 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
AllocatableRegs.insert(Order.begin(), Order.end());
}
- OS << "namespace { // Register classes...\n";
-
- // Emit the ValueType arrays for each RegisterClass
- for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
- const CodeGenRegisterClass &RC = *RegisterClasses[rc];
-
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.getName() + "VTs";
-
- // Emit the register list now.
- OS << " // " << Name
- << " Register Class Value Types...\n"
- << " static const EVT " << Name
- << "[] = {\n ";
- for (unsigned i = 0, e = RC.VTs.size(); i != e; ++i)
- OS << getEnumName(RC.VTs[i]) << ", ";
- OS << "MVT::Other\n };\n\n";
- }
- OS << "} // end anonymous namespace\n\n";
+ // Build a shared array of value types.
+ SequenceToOffsetTable<std::vector<MVT::SimpleValueType> > VTSeqs;
+ for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc)
+ VTSeqs.add(RegisterClasses[rc]->VTs);
+ VTSeqs.layout();
+ OS << "\nstatic const MVT::SimpleValueType VTLists[] = {\n";
+ VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
+ OS << "};\n";
// Now that all of the structs have been emitted, emit the instances.
if (!RegisterClasses.empty()) {
- OS << "namespace " << RegisterClasses[0]->Namespace
- << " { // Register class instances\n";
- for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i)
- OS << " " << RegisterClasses[i]->getName() << "Class\t"
- << RegisterClasses[i]->getName() << "RegClass;\n";
-
std::map<unsigned, std::set<unsigned> > SuperRegClassMap;
- OS << "\n static const TargetRegisterClass* const "
- << "NullRegClasses[] = { NULL };\n\n";
+ OS << "\nstatic const TargetRegisterClass *const "
+ << "NullRegClasses[] = { NULL };\n\n";
unsigned NumSubRegIndices = RegBank.getSubRegIndices().size();
@@ -563,10 +768,10 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
// Give the register class a legal C name if it's anonymous.
std::string Name = RC.getName();
- OS << " // " << Name
+ OS << "// " << Name
<< " Super-register Classes...\n"
- << " static const TargetRegisterClass* const "
- << Name << "SuperRegClasses[] = {\n ";
+ << "static const TargetRegisterClass *const "
+ << Name << "SuperRegClasses[] = {\n ";
bool Empty = true;
std::map<unsigned, std::set<unsigned> >::iterator I =
@@ -583,7 +788,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
}
OS << (!Empty ? ", " : "") << "NULL";
- OS << "\n };\n\n";
+ OS << "\n};\n\n";
}
}
@@ -594,9 +799,9 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
// Give the register class a legal C name if it's anonymous.
std::string Name = RC.getName();
- OS << " static const unsigned " << Name << "SubclassMask[] = { ";
+ OS << "static const uint32_t " << Name << "SubclassMask[] = {\n ";
printBitVectorAsHex(OS, RC.getSubClasses(), 32);
- OS << "};\n\n";
+ OS << "\n};\n\n";
}
// Emit NULL terminated super-class lists.
@@ -608,54 +813,71 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
if (Supers.empty())
continue;
- OS << " static const TargetRegisterClass* const "
+ OS << "static const TargetRegisterClass *const "
<< RC.getName() << "Superclasses[] = {\n";
for (unsigned i = 0; i != Supers.size(); ++i)
- OS << " &" << Supers[i]->getQualifiedName() << "RegClass,\n";
- OS << " NULL\n };\n\n";
+ OS << " &" << Supers[i]->getQualifiedName() << "RegClass,\n";
+ OS << " NULL\n};\n\n";
}
// Emit methods.
for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
const CodeGenRegisterClass &RC = *RegisterClasses[i];
- OS << RC.getName() << "Class::" << RC.getName()
- << "Class() : TargetRegisterClass(&"
- << Target.getName() << "MCRegisterClasses["
- << RC.getName() + "RegClassID" << "], "
- << RC.getName() + "VTs" << ", "
- << RC.getName() + "SubclassMask" << ", ";
- if (RC.getSuperClasses().empty())
- OS << "NullRegClasses, ";
- else
- OS << RC.getName() + "Superclasses, ";
- OS << (NumSubRegIndices ? RC.getName() + "Super" : std::string("Null"))
- << "RegClasses"
- << ") {}\n";
if (!RC.AltOrderSelect.empty()) {
OS << "\nstatic inline unsigned " << RC.getName()
<< "AltOrderSelect(const MachineFunction &MF) {"
- << RC.AltOrderSelect << "}\n\nArrayRef<unsigned> "
- << RC.getName() << "Class::"
- << "getRawAllocationOrder(const MachineFunction &MF) const {\n";
+ << RC.AltOrderSelect << "}\n\n"
+ << "static ArrayRef<uint16_t> " << RC.getName()
+ << "GetRawAllocationOrder(const MachineFunction &MF) {\n";
for (unsigned oi = 1 , oe = RC.getNumOrders(); oi != oe; ++oi) {
ArrayRef<Record*> Elems = RC.getOrder(oi);
- OS << " static const unsigned AltOrder" << oi << "[] = {";
- for (unsigned elem = 0; elem != Elems.size(); ++elem)
- OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
- OS << " };\n";
+ if (!Elems.empty()) {
+ OS << " static const uint16_t AltOrder" << oi << "[] = {";
+ for (unsigned elem = 0; elem != Elems.size(); ++elem)
+ OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
+ OS << " };\n";
+ }
}
OS << " const MCRegisterClass &MCR = " << Target.getName()
- << "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];"
- << " static const ArrayRef<unsigned> Order[] = {\n"
+ << "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];\n"
+ << " const ArrayRef<uint16_t> Order[] = {\n"
<< " makeArrayRef(MCR.begin(), MCR.getNumRegs()";
for (unsigned oi = 1, oe = RC.getNumOrders(); oi != oe; ++oi)
- OS << "),\n makeArrayRef(AltOrder" << oi;
+ if (RC.getOrder(oi).empty())
+ OS << "),\n ArrayRef<uint16_t>(";
+ else
+ OS << "),\n makeArrayRef(AltOrder" << oi;
OS << ")\n };\n const unsigned Select = " << RC.getName()
<< "AltOrderSelect(MF);\n assert(Select < " << RC.getNumOrders()
<< ");\n return Order[Select];\n}\n";
}
}
+ // Now emit the actual value-initialized register class instances.
+ OS << "namespace " << RegisterClasses[0]->Namespace
+ << " { // Register class instances\n";
+
+ for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
+ const CodeGenRegisterClass &RC = *RegisterClasses[i];
+ OS << " extern const TargetRegisterClass "
+ << RegisterClasses[i]->getName() << "RegClass = {\n "
+ << '&' << Target.getName() << "MCRegisterClasses[" << RC.getName()
+ << "RegClassID],\n "
+ << "VTLists + " << VTSeqs.get(RC.VTs) << ",\n "
+ << RC.getName() << "SubclassMask,\n ";
+ if (RC.getSuperClasses().empty())
+ OS << "NullRegClasses,\n ";
+ else
+ OS << RC.getName() << "Superclasses,\n ";
+ OS << (NumSubRegIndices ? RC.getName() + "Super" : std::string("Null"))
+ << "RegClasses,\n ";
+ if (RC.AltOrderSelect.empty())
+ OS << "0\n";
+ else
+ OS << RC.getName() << "GetRawAllocationOrder\n";
+ OS << " };\n\n";
+ }
+
OS << "}\n";
}
@@ -669,28 +891,27 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
// Emit extra information about registers.
const std::string &TargetName = Target.getName();
- OS << "\n static const TargetRegisterInfoDesc "
- << TargetName << "RegInfoDesc[] = "
- << "{ // Extra Descriptors\n";
- OS << " { 0, 0 },\n";
+ OS << "\nstatic const TargetRegisterInfoDesc "
+ << TargetName << "RegInfoDesc[] = { // Extra Descriptors\n";
+ OS << " { 0, 0 },\n";
const std::vector<CodeGenRegister*> &Regs = RegBank.getRegisters();
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
const CodeGenRegister &Reg = *Regs[i];
- OS << " { ";
+ OS << " { ";
OS << Reg.CostPerUse << ", "
<< int(AllocatableRegs.count(Reg.TheDef)) << " },\n";
}
- OS << " };\n"; // End of register descriptors...
+ OS << "};\n"; // End of register descriptors...
// Calculate the mapping of subregister+index pairs to physical registers.
- // This will also create further anonymous indexes.
+ // This will also create further anonymous indices.
unsigned NamedIndices = RegBank.getNumNamedIndices();
// Emit SubRegIndex names, skipping 0
- const std::vector<Record*> &SubRegIndices = RegBank.getSubRegIndices();
- OS << "\n static const char *const " << TargetName
+ ArrayRef<CodeGenSubRegIndex*> SubRegIndices = RegBank.getSubRegIndices();
+ OS << "\nstatic const char *const " << TargetName
<< "SubRegIndexTable[] = { \"";
for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
OS << SubRegIndices[i]->getName();
@@ -699,7 +920,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
}
OS << "\" };\n\n";
- // Emit names of the anonymus subreg indexes.
+ // Emit names of the anonymous subreg indices.
if (SubRegIndices.size() > NamedIndices) {
OS << " enum {";
for (unsigned i = NamedIndices, e = SubRegIndices.size(); i != e; ++i) {
@@ -713,48 +934,6 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
std::string ClassName = Target.getName() + "GenRegisterInfo";
- // Emit the subregister + index mapping function based on the information
- // calculated above.
- OS << "unsigned " << ClassName
- << "::getSubReg(unsigned RegNo, unsigned Index) const {\n"
- << " switch (RegNo) {\n"
- << " default:\n return 0;\n";
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
- if (SRM.empty())
- continue;
- OS << " case " << getQualifiedName(Regs[i]->TheDef) << ":\n";
- OS << " switch (Index) {\n";
- OS << " default: return 0;\n";
- for (CodeGenRegister::SubRegMap::const_iterator ii = SRM.begin(),
- ie = SRM.end(); ii != ie; ++ii)
- OS << " case " << getQualifiedName(ii->first)
- << ": return " << getQualifiedName(ii->second->TheDef) << ";\n";
- OS << " };\n" << " break;\n";
- }
- OS << " };\n";
- OS << " return 0;\n";
- OS << "}\n\n";
-
- OS << "unsigned " << ClassName
- << "::getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const {\n"
- << " switch (RegNo) {\n"
- << " default:\n return 0;\n";
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- const CodeGenRegister::SubRegMap &SRM = Regs[i]->getSubRegs();
- if (SRM.empty())
- continue;
- OS << " case " << getQualifiedName(Regs[i]->TheDef) << ":\n";
- for (CodeGenRegister::SubRegMap::const_iterator ii = SRM.begin(),
- ie = SRM.end(); ii != ie; ++ii)
- OS << " if (SubRegNo == " << getQualifiedName(ii->second->TheDef)
- << ") return " << getQualifiedName(ii->first) << ";\n";
- OS << " return 0;\n";
- }
- OS << " };\n";
- OS << " return 0;\n";
- OS << "}\n\n";
-
// Emit composeSubRegIndices
OS << "unsigned " << ClassName
<< "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
@@ -763,15 +942,15 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
bool Open = false;
for (unsigned j = 0; j != e; ++j) {
- if (Record *Comp = RegBank.getCompositeSubRegIndex(SubRegIndices[i],
- SubRegIndices[j])) {
+ if (CodeGenSubRegIndex *Comp =
+ SubRegIndices[i]->compose(SubRegIndices[j])) {
if (!Open) {
- OS << " case " << getQualifiedName(SubRegIndices[i])
+ OS << " case " << SubRegIndices[i]->getQualifiedName()
<< ": switch(IdxB) {\n default: return IdxB;\n";
Open = true;
}
- OS << " case " << getQualifiedName(SubRegIndices[j])
- << ": return " << getQualifiedName(Comp) << ";\n";
+ OS << " case " << SubRegIndices[j]->getQualifiedName()
+ << ": return " << Comp->getQualifiedName() << ";\n";
}
}
if (Open)
@@ -800,7 +979,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
const CodeGenRegisterClass &RC = *RegisterClasses[rci];
OS << " {\t// " << RC.getName() << "\n";
for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
- Record *Idx = SubRegIndices[sri];
+ CodeGenSubRegIndex *Idx = SubRegIndices[sri];
if (CodeGenRegisterClass *SRC = RC.getSubClassWithSubReg(Idx))
OS << " " << SRC->EnumValue + 1 << ",\t// " << Idx->getName()
<< " -> " << SRC->getName() << "\n";
@@ -817,22 +996,106 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
}
OS << "}\n\n";
+ // Emit getMatchingSuperRegClass.
+ OS << "const TargetRegisterClass *" << ClassName
+ << "::getMatchingSuperRegClass(const TargetRegisterClass *A,"
+ " const TargetRegisterClass *B, unsigned Idx) const {\n";
+ if (SubRegIndices.empty()) {
+ OS << " llvm_unreachable(\"Target has no sub-registers\");\n";
+ } else {
+ // We need to find the largest sub-class of A such that every register has
+ // an Idx sub-register in B. Map (B, Idx) to a bit-vector of
+ // super-register classes that map into B. Then compute the largest common
+ // sub-class with A by taking advantage of the register class ordering,
+ // like getCommonSubClass().
+
+ // Bitvector table is NumRCs x NumSubIndexes x BVWords, where BVWords is
+ // the number of 32-bit words required to represent all register classes.
+ const unsigned BVWords = (RegisterClasses.size()+31)/32;
+ BitVector BV(RegisterClasses.size());
+
+ OS << " static const uint32_t Table[" << RegisterClasses.size()
+ << "][" << SubRegIndices.size() << "][" << BVWords << "] = {\n";
+ for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) {
+ const CodeGenRegisterClass &RC = *RegisterClasses[rci];
+ OS << " {\t// " << RC.getName() << "\n";
+ for (unsigned sri = 0, sre = SubRegIndices.size(); sri != sre; ++sri) {
+ CodeGenSubRegIndex *Idx = SubRegIndices[sri];
+ BV.reset();
+ RC.getSuperRegClasses(Idx, BV);
+ OS << " { ";
+ printBitVectorAsHex(OS, BV, 32);
+ OS << "},\t// " << Idx->getName() << '\n';
+ }
+ OS << " },\n";
+ }
+ OS << " };\n assert(A && B && \"Missing regclass\");\n"
+ << " --Idx;\n"
+ << " assert(Idx < " << SubRegIndices.size() << " && \"Bad subreg\");\n"
+ << " const uint32_t *TV = Table[B->getID()][Idx];\n"
+ << " const uint32_t *SC = A->getSubClassMask();\n"
+ << " for (unsigned i = 0; i != " << BVWords << "; ++i)\n"
+ << " if (unsigned Common = TV[i] & SC[i])\n"
+ << " return getRegClass(32*i + CountTrailingZeros_32(Common));\n"
+ << " return 0;\n";
+ }
+ OS << "}\n\n";
+
+ EmitRegUnitPressure(OS, RegBank, ClassName);
+
// Emit the constructor of the class...
- OS << "extern MCRegisterDesc " << TargetName << "RegDesc[];\n";
+ OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
+ OS << "extern const uint16_t " << TargetName << "RegLists[];\n";
+ if (SubRegIndices.size() != 0)
+ OS << "extern const uint16_t *get" << TargetName
+ << "SubRegTable();\n";
- OS << ClassName << "::" << ClassName
+ EmitRegMappingTables(OS, Regs, true);
+
+ OS << ClassName << "::\n" << ClassName
<< "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n"
<< " : TargetRegisterInfo(" << TargetName << "RegInfoDesc"
<< ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
- << " " << TargetName << "SubRegIndexTable) {\n"
+ << " " << TargetName << "SubRegIndexTable) {\n"
<< " InitMCRegisterInfo(" << TargetName << "RegDesc, "
- << Regs.size()+1 << ", RA, " << TargetName << "MCRegisterClasses, "
- << RegisterClasses.size() << ");\n\n";
+ << Regs.size()+1 << ", RA,\n " << TargetName
+ << "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
+ << " " << TargetName << "RegLists,\n"
+ << " ";
+ if (SubRegIndices.size() != 0)
+ OS << "get" << TargetName << "SubRegTable(), "
+ << SubRegIndices.size() << ");\n\n";
+ else
+ OS << "NULL, 0);\n\n";
EmitRegMapping(OS, Regs, true);
OS << "}\n\n";
+
+ // Emit CalleeSavedRegs information.
+ std::vector<Record*> CSRSets =
+ Records.getAllDerivedDefinitions("CalleeSavedRegs");
+ for (unsigned i = 0, e = CSRSets.size(); i != e; ++i) {
+ Record *CSRSet = CSRSets[i];
+ const SetTheory::RecVec *Regs = RegBank.getSets().expand(CSRSet);
+ assert(Regs && "Cannot expand CalleeSavedRegs instance");
+
+ // Emit the *_SaveList list of callee-saved registers.
+ OS << "static const uint16_t " << CSRSet->getName()
+ << "_SaveList[] = { ";
+ for (unsigned r = 0, re = Regs->size(); r != re; ++r)
+ OS << getQualifiedName((*Regs)[r]) << ", ";
+ OS << "0 };\n";
+
+ // Emit the *_RegMask bit mask of call-preserved registers.
+ OS << "static const uint32_t " << CSRSet->getName()
+ << "_RegMask[] = { ";
+ printBitVectorAsHex(OS, RegBank.computeCoveredRegisters(*Regs), 32);
+ OS << "};\n";
+ }
+ OS << "\n\n";
+
OS << "} // End llvm namespace \n";
OS << "#endif // GET_REGINFO_TARGET_DESC\n\n";
}
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h
index 0fd4d07..ee9903c 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.h
@@ -50,7 +50,13 @@ public:
private:
void EmitRegMapping(raw_ostream &o,
const std::vector<CodeGenRegister*> &Regs, bool isCtor);
+ void EmitRegMappingTables(raw_ostream &o,
+ const std::vector<CodeGenRegister*> &Regs,
+ bool isCtor);
void EmitRegClasses(raw_ostream &OS, CodeGenTarget &Target);
+
+ void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
+ const std::string &ClassName);
};
} // End llvm namespace
diff --git a/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
new file mode 100644
index 0000000..97c764e
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/SequenceToOffsetTable.h
@@ -0,0 +1,139 @@
+//===-- SequenceToOffsetTable.h - Compress similar sequences ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SequenceToOffsetTable can be used to emit a number of null-terminated
+// sequences as one big array. Use the same memory when a sequence is a suffix
+// of another.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TBLGEN_SEQUENCE_TO_OFFSET_TABLE_H
+#define TBLGEN_SEQUENCE_TO_OFFSET_TABLE_H
+
+#include "llvm/Support/raw_ostream.h"
+#include <functional>
+#include <algorithm>
+#include <vector>
+#include <cassert>
+#include <cctype>
+
+namespace llvm {
+
+/// SequenceToOffsetTable - Collect a number of terminated sequences of T.
+/// Compute the layout of a table that contains all the sequences, possibly by
+/// reusing entries.
+///
+/// @param SeqT The sequence container. (vector or string).
+/// @param Less A stable comparator for SeqT elements.
+template<typename SeqT, typename Less = std::less<typename SeqT::value_type> >
+class SequenceToOffsetTable {
+ typedef typename SeqT::value_type ElemT;
+
+ // Define a comparator for SeqT that sorts a suffix immediately before a
+ // sequence with that suffix.
+ struct SeqLess : public std::binary_function<SeqT, SeqT, bool> {
+ Less L;
+ bool operator()(const SeqT &A, const SeqT &B) const {
+ return std::lexicographical_compare(A.rbegin(), A.rend(),
+ B.rbegin(), B.rend(), L);
+ }
+ };
+
+ // Keep sequences ordered according to SeqLess so suffixes are easy to find.
+ // Map each sequence to its offset in the table.
+ typedef std::map<SeqT, unsigned, SeqLess> SeqMap;
+
+ // Sequences added so far, with suffixes removed.
+ SeqMap Seqs;
+
+ // Entries in the final table, or 0 before layout was called.
+ unsigned Entries;
+
+ // isSuffix - Returns true if A is a suffix of B.
+ static bool isSuffix(const SeqT &A, const SeqT &B) {
+ return A.size() <= B.size() && std::equal(A.rbegin(), A.rend(), B.rbegin());
+ }
+
+public:
+ SequenceToOffsetTable() : Entries(0) {}
+
+ /// add - Add a sequence to the table.
+ /// This must be called before layout().
+ void add(const SeqT &Seq) {
+ assert(Entries == 0 && "Cannot call add() after layout()");
+ typename SeqMap::iterator I = Seqs.lower_bound(Seq);
+
+ // If SeqMap contains a sequence that has Seq as a suffix, I will be
+ // pointing to it.
+ if (I != Seqs.end() && isSuffix(Seq, I->first))
+ return;
+
+ I = Seqs.insert(I, std::make_pair(Seq, 0u));
+
+ // The entry before I may be a suffix of Seq that can now be erased.
+ if (I != Seqs.begin() && isSuffix((--I)->first, Seq))
+ Seqs.erase(I);
+ }
+
+ /// layout - Computes the final table layout.
+ void layout() {
+ assert(Entries == 0 && "Can only call layout() once");
+ // Lay out the table in Seqs iteration order.
+ for (typename SeqMap::iterator I = Seqs.begin(), E = Seqs.end(); I != E;
+ ++I) {
+ I->second = Entries;
+ // Include space for a terminator.
+ Entries += I->first.size() + 1;
+ }
+ }
+
+ /// get - Returns the offset of Seq in the final table.
+ unsigned get(const SeqT &Seq) const {
+ assert(Entries && "Call layout() before get()");
+ typename SeqMap::const_iterator I = Seqs.lower_bound(Seq);
+ assert(I != Seqs.end() && isSuffix(Seq, I->first) &&
+ "get() called with sequence that wasn't added first");
+ return I->second + (I->first.size() - Seq.size());
+ }
+
+ /// emit - Print out the table as the body of an array initializer.
+ /// Use the Print function to print elements.
+ void emit(raw_ostream &OS,
+ void (*Print)(raw_ostream&, ElemT),
+ const char *Term = "0") const {
+ assert(Entries && "Call layout() before emit()");
+ for (typename SeqMap::const_iterator I = Seqs.begin(), E = Seqs.end();
+ I != E; ++I) {
+ OS << " /* " << I->second << " */ ";
+ for (typename SeqT::const_iterator SI = I->first.begin(),
+ SE = I->first.end(); SI != SE; ++SI) {
+ Print(OS, *SI);
+ OS << ", ";
+ }
+ OS << Term << ",\n";
+ }
+ }
+};
+
+// Helper function for SequenceToOffsetTable<string>.
+static inline void printChar(raw_ostream &OS, char C) {
+ unsigned char UC(C);
+ if (isalnum(UC) || ispunct(UC)) {
+ OS << '\'';
+ if (C == '\\' || C == '\'')
+ OS << '\\';
+ OS << C << '\'';
+ } else {
+ OS << unsigned(UC);
+ }
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/SetTheory.cpp b/contrib/llvm/utils/TableGen/SetTheory.cpp
index bef73f3..0649fd1 100644
--- a/contrib/llvm/utils/TableGen/SetTheory.cpp
+++ b/contrib/llvm/utils/TableGen/SetTheory.cpp
@@ -139,6 +139,24 @@ struct DecimateOp : public SetIntBinOp {
}
};
+// (interleave S1, S2, ...) Interleave elements of the arguments.
+struct InterleaveOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
+ // Evaluate the arguments individually.
+ SmallVector<RecSet, 4> Args(Expr->getNumArgs());
+ unsigned MaxSize = 0;
+ for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i) {
+ ST.evaluate(Expr->getArg(i), Args[i]);
+ MaxSize = std::max(MaxSize, unsigned(Args[i].size()));
+ }
+ // Interleave arguments into Elts.
+ for (unsigned n = 0; n != MaxSize; ++n)
+ for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i)
+ if (n < Args[i].size())
+ Elts.insert(Args[i][n]);
+ }
+};
+
// (sequence "Format", From, To) Generate a sequence of records by name.
struct SequenceOp : public SetTheory::Operator {
void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) {
@@ -198,6 +216,10 @@ struct FieldExpander : public SetTheory::Expander {
};
} // end anonymous namespace
+void SetTheory::Operator::anchor() { }
+
+void SetTheory::Expander::anchor() { }
+
SetTheory::SetTheory() {
addOperator("add", new AddOp);
addOperator("sub", new SubOp);
@@ -207,6 +229,7 @@ SetTheory::SetTheory() {
addOperator("rotl", new RotOp(false));
addOperator("rotr", new RotOp(true));
addOperator("decimate", new DecimateOp);
+ addOperator("interleave", new InterleaveOp);
addOperator("sequence", new SequenceOp);
}
diff --git a/contrib/llvm/utils/TableGen/SetTheory.h b/contrib/llvm/utils/TableGen/SetTheory.h
index 6e8313b..b394058 100644
--- a/contrib/llvm/utils/TableGen/SetTheory.h
+++ b/contrib/llvm/utils/TableGen/SetTheory.h
@@ -65,7 +65,9 @@ public:
typedef SmallSetVector<Record*, 16> RecSet;
/// Operator - A callback representing a DAG operator.
- struct Operator {
+ class Operator {
+ virtual void anchor();
+ public:
virtual ~Operator() {}
/// apply - Apply this operator to Expr's arguments and insert the result
@@ -76,7 +78,9 @@ public:
/// Expander - A callback function that can transform a Record representing a
/// set into a fully expanded list of elements. Expanders provide a way for
/// users to define named sets that can be used in DAG expressions.
- struct Expander {
+ class Expander {
+ virtual void anchor();
+ public:
virtual ~Expander() {}
virtual void expand(SetTheory&, Record*, RecSet &Elts) =0;
diff --git a/contrib/llvm/utils/TableGen/StringToOffsetTable.h b/contrib/llvm/utils/TableGen/StringToOffsetTable.h
index ac9422c..803f5bd 100644
--- a/contrib/llvm/utils/TableGen/StringToOffsetTable.h
+++ b/contrib/llvm/utils/TableGen/StringToOffsetTable.h
@@ -26,16 +26,17 @@ class StringToOffsetTable {
std::string AggregateString;
public:
- unsigned GetOrAddStringOffset(StringRef Str) {
- unsigned &Entry = StringOffset[Str];
- if (Entry == 0) {
+ unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
+ StringMapEntry<unsigned> &Entry = StringOffset.GetOrCreateValue(Str, -1U);
+ if (Entry.getValue() == -1U) {
// Add the string to the aggregate if this is the first time found.
- Entry = AggregateString.size();
+ Entry.setValue(AggregateString.size());
AggregateString.append(Str.begin(), Str.end());
- AggregateString += '\0';
+ if (appendZero)
+ AggregateString += '\0';
}
- return Entry;
+ return Entry.getValue();
}
void EmitString(raw_ostream &O) {
diff --git a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
index 103a403..986c50f 100644
--- a/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -39,28 +39,41 @@ void SubtargetEmitter::Enumeration(raw_ostream &OS,
OS << "namespace " << Target << " {\n";
- // Open enumeration
- OS << "enum {\n";
+ // For bit flag enumerations with more than 32 items, emit constants.
+ // Emit an enum for everything else.
+ if (isBits && N > 32) {
+ // For each record
+ for (unsigned i = 0; i < N; i++) {
+ // Next record
+ Record *Def = DefList[i];
+
+ // Get and emit name and expression (1 << i)
+ OS << " const uint64_t " << Def->getName() << " = 1ULL << " << i << ";\n";
+ }
+ } else {
+ // Open enumeration
+ OS << "enum {\n";
- // For each record
- for (unsigned i = 0; i < N;) {
- // Next record
- Record *Def = DefList[i];
+ // For each record
+ for (unsigned i = 0; i < N;) {
+ // Next record
+ Record *Def = DefList[i];
- // Get and emit name
- OS << " " << Def->getName();
+ // Get and emit name
+ OS << " " << Def->getName();
- // If bit flags then emit expression (1 << i)
- if (isBits) OS << " = " << " 1ULL << " << i;
+ // If bit flags then emit expression (1 << i)
+ if (isBits) OS << " = " << " 1ULL << " << i;
- // Depending on 'if more in the list' emit comma
- if (++i < N) OS << ",";
+ // Depending on 'if more in the list' emit comma
+ if (++i < N) OS << ",";
- OS << "\n";
- }
+ OS << "\n";
+ }
- // Close enumeration
- OS << "};\n";
+ // Close enumeration
+ OS << "};\n";
+ }
OS << "}\n";
}
@@ -81,7 +94,8 @@ unsigned SubtargetEmitter::FeatureKeyValues(raw_ostream &OS) {
// Begin feature table
OS << "// Sorted (by key) array of values for CPU features.\n"
- << "llvm::SubtargetFeatureKV " << Target << "FeatureKV[] = {\n";
+ << "extern const llvm::SubtargetFeatureKV " << Target
+ << "FeatureKV[] = {\n";
// For each feature
unsigned NumFeatures = 0;
@@ -140,7 +154,8 @@ unsigned SubtargetEmitter::CPUKeyValues(raw_ostream &OS) {
// Begin processor table
OS << "// Sorted (by key) array of values for CPU subtype.\n"
- << "llvm::SubtargetFeatureKV " << Target << "SubTypeKV[] = {\n";
+ << "extern const llvm::SubtargetFeatureKV " << Target
+ << "SubTypeKV[] = {\n";
// For each processor
for (unsigned i = 0, N = ProcessorList.size(); i < N;) {
@@ -327,9 +342,9 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
OS << "\n// Pipeline forwarding pathes for itineraries \"" << Name
<< "\"\n" << "namespace " << Name << "Bypass {\n";
- OS << " unsigned NoBypass = 0;\n";
+ OS << " const unsigned NoBypass = 0;\n";
for (unsigned j = 0, BPN = BPs.size(); j < BPN; ++j)
- OS << " unsigned " << BPs[j]->getName()
+ OS << " const unsigned " << BPs[j]->getName()
<< " = 1 << " << j << ";\n";
OS << "}\n";
@@ -337,16 +352,17 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
}
// Begin stages table
- std::string StageTable = "\nllvm::InstrStage " + Target + "Stages[] = {\n";
+ std::string StageTable = "\nextern const llvm::InstrStage " + Target +
+ "Stages[] = {\n";
StageTable += " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
// Begin operand cycle table
- std::string OperandCycleTable = "unsigned " + Target +
+ std::string OperandCycleTable = "extern const unsigned " + Target +
"OperandCycles[] = {\n";
OperandCycleTable += " 0, // No itinerary\n";
// Begin pipeline bypass table
- std::string BypassTable = "unsigned " + Target +
+ std::string BypassTable = "extern const unsigned " + Target +
"ForwardingPathes[] = {\n";
BypassTable += " 0, // No itinerary\n";
@@ -488,7 +504,7 @@ EmitProcessorData(raw_ostream &OS,
// Begin processor itinerary table
OS << "\n";
- OS << "llvm::InstrItinerary " << Name << "[] = {\n";
+ OS << "static const llvm::InstrItinerary " << Name << "[] = {\n";
// For each itinerary class
std::vector<InstrItinerary> &ItinList = *ProcListIter++;
@@ -530,7 +546,7 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) {
// Begin processor table
OS << "\n";
OS << "// Sorted (by key) array of itineraries for CPU subtype.\n"
- << "llvm::SubtargetInfoKV "
+ << "extern const llvm::SubtargetInfoKV "
<< Target << "ProcItinKV[] = {\n";
// For each processor
@@ -708,9 +724,13 @@ void SubtargetEmitter::run(raw_ostream &OS) {
std::string ClassName = Target + "GenSubtargetInfo";
OS << "namespace llvm {\n";
+ OS << "class DFAPacketizer;\n";
OS << "struct " << ClassName << " : public TargetSubtargetInfo {\n"
<< " explicit " << ClassName << "(StringRef TT, StringRef CPU, "
<< "StringRef FS);\n"
+ << "public:\n"
+ << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
+ << " const;\n"
<< "};\n";
OS << "} // End llvm namespace \n";
@@ -720,13 +740,13 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "#undef GET_SUBTARGETINFO_CTOR\n";
OS << "namespace llvm {\n";
- OS << "extern llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
- OS << "extern llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
+ OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
+ OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n";
if (HasItineraries) {
- OS << "extern llvm::SubtargetInfoKV " << Target << "ProcItinKV[];\n";
- OS << "extern llvm::InstrStage " << Target << "Stages[];\n";
- OS << "extern unsigned " << Target << "OperandCycles[];\n";
- OS << "extern unsigned " << Target << "ForwardingPathes[];\n";
+ OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcItinKV[];\n";
+ OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
+ OS << "extern const unsigned " << Target << "OperandCycles[];\n";
+ OS << "extern const unsigned " << Target << "ForwardingPathes[];\n";
}
OS << ClassName << "::" << ClassName << "(StringRef TT, StringRef CPU, "
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index eacfdf6..8c41358 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -16,6 +16,7 @@
#include "CallingConvEmitter.h"
#include "CodeEmitterGen.h"
#include "DAGISelEmitter.h"
+#include "DFAPacketizerEmitter.h"
#include "DisassemblerEmitter.h"
#include "EDEmitter.h"
#include "FastISelEmitter.h"
@@ -23,7 +24,6 @@
#include "IntrinsicEmitter.h"
#include "PseudoLoweringEmitter.h"
#include "RegisterInfoEmitter.h"
-#include "ARMDecoderEmitter.h"
#include "SubtargetEmitter.h"
#include "SetTheory.h"
@@ -44,11 +44,11 @@ enum ActionType {
GenInstrInfo,
GenAsmWriter,
GenAsmMatcher,
- GenARMDecoder,
GenDisassembler,
GenPseudoLowering,
GenCallingConv,
GenDAGISel,
+ GenDFAPacketizer,
GenFastISel,
GenSubtarget,
GenIntrinsic,
@@ -73,8 +73,6 @@ namespace {
"Generate calling convention descriptions"),
clEnumValN(GenAsmWriter, "gen-asm-writer",
"Generate assembly writer"),
- clEnumValN(GenARMDecoder, "gen-arm-decoder",
- "Generate decoders for ARM/Thumb"),
clEnumValN(GenDisassembler, "gen-disassembler",
"Generate disassembler"),
clEnumValN(GenPseudoLowering, "gen-pseudo-lowering",
@@ -83,6 +81,8 @@ namespace {
"Generate assembly instruction matcher"),
clEnumValN(GenDAGISel, "gen-dag-isel",
"Generate a DAG instruction selector"),
+ clEnumValN(GenDFAPacketizer, "gen-dfa-packetizer",
+ "Generate DFA Packetizer for VLIW targets"),
clEnumValN(GenFastISel, "gen-fast-isel",
"Generate a \"fast\" instruction selector"),
clEnumValN(GenSubtarget, "gen-subtarget",
@@ -101,92 +101,89 @@ namespace {
cl::opt<std::string>
Class("class", cl::desc("Print Enum list for this class"),
- cl::value_desc("class name"));
-}
-
-class LLVMTableGenAction : public TableGenAction {
-public:
- bool operator()(raw_ostream &OS, RecordKeeper &Records) {
- switch (Action) {
- case PrintRecords:
- OS << Records; // No argument, dump all contents
- break;
- case GenEmitter:
- CodeEmitterGen(Records).run(OS);
- break;
- case GenRegisterInfo:
- RegisterInfoEmitter(Records).run(OS);
- break;
- case GenInstrInfo:
- InstrInfoEmitter(Records).run(OS);
- break;
- case GenCallingConv:
- CallingConvEmitter(Records).run(OS);
- break;
- case GenAsmWriter:
- AsmWriterEmitter(Records).run(OS);
- break;
- case GenARMDecoder:
- ARMDecoderEmitter(Records).run(OS);
- break;
- case GenAsmMatcher:
- AsmMatcherEmitter(Records).run(OS);
- break;
- case GenDisassembler:
- DisassemblerEmitter(Records).run(OS);
- break;
- case GenPseudoLowering:
- PseudoLoweringEmitter(Records).run(OS);
- break;
- case GenDAGISel:
- DAGISelEmitter(Records).run(OS);
- break;
- case GenFastISel:
- FastISelEmitter(Records).run(OS);
- break;
- case GenSubtarget:
- SubtargetEmitter(Records).run(OS);
- break;
- case GenIntrinsic:
- IntrinsicEmitter(Records).run(OS);
- break;
- case GenTgtIntrinsic:
- IntrinsicEmitter(Records, true).run(OS);
- break;
- case GenEDInfo:
- EDEmitter(Records).run(OS);
- break;
- case PrintEnums:
- {
- std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
- for (unsigned i = 0, e = Recs.size(); i != e; ++i)
- OS << Recs[i]->getName() << ", ";
- OS << "\n";
- break;
- }
- case PrintSets:
- {
- SetTheory Sets;
- Sets.addFieldExpander("Set", "Elements");
- std::vector<Record*> Recs = Records.getAllDerivedDefinitions("Set");
- for (unsigned i = 0, e = Recs.size(); i != e; ++i) {
- OS << Recs[i]->getName() << " = [";
- const std::vector<Record*> *Elts = Sets.expand(Recs[i]);
- assert(Elts && "Couldn't expand Set instance");
- for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei)
- OS << ' ' << (*Elts)[ei]->getName();
- OS << " ]\n";
+ cl::value_desc("class name"));
+
+ class LLVMTableGenAction : public TableGenAction {
+ public:
+ bool operator()(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case PrintRecords:
+ OS << Records; // No argument, dump all contents
+ break;
+ case GenEmitter:
+ CodeEmitterGen(Records).run(OS);
+ break;
+ case GenRegisterInfo:
+ RegisterInfoEmitter(Records).run(OS);
+ break;
+ case GenInstrInfo:
+ InstrInfoEmitter(Records).run(OS);
+ break;
+ case GenCallingConv:
+ CallingConvEmitter(Records).run(OS);
+ break;
+ case GenAsmWriter:
+ AsmWriterEmitter(Records).run(OS);
+ break;
+ case GenAsmMatcher:
+ AsmMatcherEmitter(Records).run(OS);
+ break;
+ case GenDisassembler:
+ DisassemblerEmitter(Records).run(OS);
+ break;
+ case GenPseudoLowering:
+ PseudoLoweringEmitter(Records).run(OS);
+ break;
+ case GenDAGISel:
+ DAGISelEmitter(Records).run(OS);
+ break;
+ case GenDFAPacketizer:
+ DFAGen(Records).run(OS);
+ break;
+ case GenFastISel:
+ FastISelEmitter(Records).run(OS);
+ break;
+ case GenSubtarget:
+ SubtargetEmitter(Records).run(OS);
+ break;
+ case GenIntrinsic:
+ IntrinsicEmitter(Records).run(OS);
+ break;
+ case GenTgtIntrinsic:
+ IntrinsicEmitter(Records, true).run(OS);
+ break;
+ case GenEDInfo:
+ EDEmitter(Records).run(OS);
+ break;
+ case PrintEnums:
+ {
+ std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
+ for (unsigned i = 0, e = Recs.size(); i != e; ++i)
+ OS << Recs[i]->getName() << ", ";
+ OS << "\n";
+ break;
}
- break;
- }
- default:
- assert(1 && "Invalid Action");
- return true;
+ case PrintSets:
+ {
+ SetTheory Sets;
+ Sets.addFieldExpander("Set", "Elements");
+ std::vector<Record*> Recs = Records.getAllDerivedDefinitions("Set");
+ for (unsigned i = 0, e = Recs.size(); i != e; ++i) {
+ OS << Recs[i]->getName() << " = [";
+ const std::vector<Record*> *Elts = Sets.expand(Recs[i]);
+ assert(Elts && "Couldn't expand Set instance");
+ for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei)
+ OS << ' ' << (*Elts)[ei]->getName();
+ OS << " ]\n";
+ }
+ break;
+ }
+ }
+
+ return false;
}
-
- return false;
- }
-};
+ };
+}
int main(int argc, char **argv) {
sys::PrintStackTraceOnErrorSignal();
diff --git a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
index e8c9a48..2875168 100644
--- a/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/contrib/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -41,15 +41,20 @@ static inline bool inheritsFrom(InstructionContext child,
case IC:
return(inheritsFrom(child, IC_64BIT) ||
inheritsFrom(child, IC_OPSIZE) ||
+ inheritsFrom(child, IC_ADSIZE) ||
inheritsFrom(child, IC_XD) ||
inheritsFrom(child, IC_XS));
case IC_64BIT:
return(inheritsFrom(child, IC_64BIT_REXW) ||
inheritsFrom(child, IC_64BIT_OPSIZE) ||
+ inheritsFrom(child, IC_64BIT_ADSIZE) ||
inheritsFrom(child, IC_64BIT_XD) ||
inheritsFrom(child, IC_64BIT_XS));
case IC_OPSIZE:
return inheritsFrom(child, IC_64BIT_OPSIZE);
+ case IC_ADSIZE:
+ case IC_64BIT_ADSIZE:
+ return false;
case IC_XD:
return inheritsFrom(child, IC_64BIT_XD);
case IC_XS:
@@ -95,11 +100,13 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_VEX_L:
case IC_VEX_L_XS:
case IC_VEX_L_XD:
+ return false;
case IC_VEX_L_OPSIZE:
+ return inheritsFrom(child, IC_VEX_L_W_OPSIZE);
+ case IC_VEX_L_W_OPSIZE:
return false;
default:
llvm_unreachable("Unknown instruction class");
- return false;
}
}
@@ -138,8 +145,6 @@ static inline const char* stringForContext(InstructionContext insnContext) {
INSTRUCTION_CONTEXTS
#undef ENUM_ENTRY
}
-
- return 0;
}
/// stringForOperandType - Like stringForContext, but for OperandTypes.
@@ -194,8 +199,7 @@ void DisassemblerTables::emitOneID(raw_ostream &o,
/// @param i - The indentation level for that output stream.
static void emitEmptyTable(raw_ostream &o, uint32_t &i)
{
- o.indent(i * 2) << "static const InstrUID modRMEmptyTable[1] = { 0 };\n";
- o << "\n";
+ o.indent(i * 2) << "0x0, /* EmptyTable */\n";
}
/// getDecisionType - Determines whether a ModRM decision with 255 entries can
@@ -207,28 +211,40 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision)
{
bool satisfiesOneEntry = true;
bool satisfiesSplitRM = true;
-
+ bool satisfiesSplitReg = true;
+
uint16_t index;
-
+
for (index = 0; index < 256; ++index) {
if (decision.instructionIDs[index] != decision.instructionIDs[0])
satisfiesOneEntry = false;
-
+
if (((index & 0xc0) == 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[0xc0]))
satisfiesSplitRM = false;
-
+
if (((index & 0xc0) != 0xc0) &&
(decision.instructionIDs[index] != decision.instructionIDs[0x00]))
satisfiesSplitRM = false;
+
+ if (((index & 0xc0) == 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[index&0xf8]))
+ satisfiesSplitReg = false;
+
+ if (((index & 0xc0) != 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[index&0x38]))
+ satisfiesSplitReg = false;
}
-
+
if (satisfiesOneEntry)
return MODRM_ONEENTRY;
-
+
if (satisfiesSplitRM)
return MODRM_SPLITRM;
-
+
+ if (satisfiesSplitReg)
+ return MODRM_SPLITREG;
+
return MODRM_FULL;
}
@@ -291,71 +307,76 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
ModRMDecision &decision)
const {
static uint64_t sTableNumber = 0;
- uint64_t thisTableNumber = sTableNumber;
+ static uint64_t sEntryNumber = 1;
ModRMDecisionType dt = getDecisionType(decision);
uint16_t index;
-
+
if (dt == MODRM_ONEENTRY && decision.instructionIDs[0] == 0)
{
o2.indent(i2) << "{ /* ModRMDecision */" << "\n";
i2++;
-
+
o2.indent(i2) << stringForDecisionType(dt) << "," << "\n";
- o2.indent(i2) << "modRMEmptyTable";
-
+ o2.indent(i2) << 0 << " /* EmptyTable */\n";
+
i2--;
o2.indent(i2) << "}";
return;
}
-
- o1.indent(i1) << "static const InstrUID modRMTable" << thisTableNumber;
-
- switch (dt) {
- default:
- llvm_unreachable("Unknown decision type");
- case MODRM_ONEENTRY:
- o1 << "[1]";
- break;
- case MODRM_SPLITRM:
- o1 << "[2]";
- break;
- case MODRM_FULL:
- o1 << "[256]";
- break;
- }
- o1 << " = {" << "\n";
+ o1 << "/* Table" << sTableNumber << " */\n";
i1++;
-
+
switch (dt) {
default:
llvm_unreachable("Unknown decision type");
case MODRM_ONEENTRY:
- emitOneID(o1, i1, decision.instructionIDs[0], false);
+ emitOneID(o1, i1, decision.instructionIDs[0], true);
break;
case MODRM_SPLITRM:
emitOneID(o1, i1, decision.instructionIDs[0x00], true); // mod = 0b00
- emitOneID(o1, i1, decision.instructionIDs[0xc0], false); // mod = 0b11
+ emitOneID(o1, i1, decision.instructionIDs[0xc0], true); // mod = 0b11
+ break;
+ case MODRM_SPLITREG:
+ for (index = 0; index < 64; index += 8)
+ emitOneID(o1, i1, decision.instructionIDs[index], true);
+ for (index = 0xc0; index < 256; index += 8)
+ emitOneID(o1, i1, decision.instructionIDs[index], true);
break;
case MODRM_FULL:
for (index = 0; index < 256; ++index)
- emitOneID(o1, i1, decision.instructionIDs[index], index < 255);
+ emitOneID(o1, i1, decision.instructionIDs[index], true);
break;
}
-
+
i1--;
- o1.indent(i1) << "};" << "\n";
- o1 << "\n";
-
+
o2.indent(i2) << "{ /* struct ModRMDecision */" << "\n";
i2++;
-
+
o2.indent(i2) << stringForDecisionType(dt) << "," << "\n";
- o2.indent(i2) << "modRMTable" << sTableNumber << "\n";
-
+ o2.indent(i2) << sEntryNumber << " /* Table" << sTableNumber << " */\n";
+
i2--;
o2.indent(i2) << "}";
-
+
+ switch (dt) {
+ default:
+ llvm_unreachable("Unknown decision type");
+ case MODRM_ONEENTRY:
+ sEntryNumber += 1;
+ break;
+ case MODRM_SPLITRM:
+ sEntryNumber += 2;
+ break;
+ case MODRM_SPLITREG:
+ sEntryNumber += 16;
+ break;
+ case MODRM_FULL:
+ sEntryNumber += 256;
+ break;
+ }
+
++sTableNumber;
}
@@ -436,11 +457,11 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
for (index = 0; index < numInstructions; ++index) {
o.indent(i * 2) << "{ /* " << index << " */" << "\n";
i++;
-
- o.indent(i * 2) <<
- stringForModifierType(InstructionSpecifiers[index].modifierType);
+
+ o.indent(i * 2) << stringForModifierType(
+ (ModifierType)InstructionSpecifiers[index].modifierType);
o << "," << "\n";
-
+
o.indent(i * 2) << "0x";
o << format("%02hhx", (uint16_t)InstructionSpecifiers[index].modifierBase);
o << "," << "\n";
@@ -450,11 +471,11 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
for (operandIndex = 0; operandIndex < X86_MAX_OPERANDS; ++operandIndex) {
o.indent(i * 2) << "{ ";
- o << stringForOperandEncoding(InstructionSpecifiers[index]
- .operands[operandIndex]
- .encoding);
+ o <<stringForOperandEncoding((OperandEncoding)InstructionSpecifiers[index]
+ .operands[operandIndex]
+ .encoding);
o << ", ";
- o << stringForOperandType(InstructionSpecifiers[index]
+ o << stringForOperandType((OperandType)InstructionSpecifiers[index]
.operands[operandIndex]
.type);
o << " }";
@@ -468,7 +489,7 @@ void DisassemblerTables::emitInstructionInfo(raw_ostream &o, uint32_t &i)
i--;
o.indent(i * 2) << "}," << "\n";
- o.indent(i * 2) << "\"" << InstructionSpecifiers[index].name << "\"";
+ o.indent(i * 2) << "/* " << InstructionSpecifiers[index].name << " */";
o << "\n";
i--;
@@ -494,7 +515,9 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
for (index = 0; index < 256; ++index) {
o.indent(i * 2);
- if ((index & ATTR_VEXL) && (index & ATTR_OPSIZE))
+ if ((index & ATTR_VEXL) && (index & ATTR_REXW) && (index & ATTR_OPSIZE))
+ o << "IC_VEX_L_W_OPSIZE";
+ else if ((index & ATTR_VEXL) && (index & ATTR_OPSIZE))
o << "IC_VEX_L_OPSIZE";
else if ((index & ATTR_VEXL) && (index & ATTR_XD))
o << "IC_VEX_L_XD";
@@ -535,6 +558,8 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
o << "IC_64BIT_XD";
else if ((index & ATTR_64BIT) && (index & ATTR_OPSIZE))
o << "IC_64BIT_OPSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_ADSIZE))
+ o << "IC_64BIT_ADSIZE";
else if ((index & ATTR_64BIT) && (index & ATTR_REXW))
o << "IC_64BIT_REXW";
else if ((index & ATTR_64BIT))
@@ -549,6 +574,8 @@ void DisassemblerTables::emitContextTable(raw_ostream &o, uint32_t &i) const {
o << "IC_XD";
else if (index & ATTR_OPSIZE)
o << "IC_OPSIZE";
+ else if (index & ATTR_ADSIZE)
+ o << "IC_ADSIZE";
else
o << "IC";
@@ -594,11 +621,16 @@ void DisassemblerTables::emit(raw_ostream &o) const {
emitContextTable(o, i2);
o << "\n";
-
+
+ o << "static const InstrUID modRMTable[] = {\n";
+ i1++;
emitEmptyTable(o1, i1);
+ i1--;
emitContextDecisions(o1, o2, i1, i2);
-
+
o << o1.str();
+ o << " 0x0\n";
+ o << "};\n";
o << "\n";
o << o2.str();
o << "\n";
diff --git a/contrib/llvm/utils/TableGen/X86ModRMFilters.cpp b/contrib/llvm/utils/TableGen/X86ModRMFilters.cpp
new file mode 100644
index 0000000..7166fe0
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/X86ModRMFilters.cpp
@@ -0,0 +1,26 @@
+//===- X86ModRMFilters.cpp - Disassembler ModR/M filterss -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86ModRMFilters.h"
+
+using namespace llvm::X86Disassembler;
+
+void ModRMFilter::anchor() { }
+
+void DumbFilter::anchor() { }
+
+void ModFilter::anchor() { }
+
+void EscapeFilter::anchor() { }
+
+void AddRegEscapeFilter::anchor() { }
+
+void ExtendedFilter::anchor() { }
+
+void ExactFilter::anchor() { }
diff --git a/contrib/llvm/utils/TableGen/X86ModRMFilters.h b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
index 199040b..19fecbc 100644
--- a/contrib/llvm/utils/TableGen/X86ModRMFilters.h
+++ b/contrib/llvm/utils/TableGen/X86ModRMFilters.h
@@ -27,6 +27,7 @@ namespace X86Disassembler {
/// ModRMFilter - Abstract base class for clases that recognize patterns in
/// ModR/M bytes.
class ModRMFilter {
+ virtual void anchor();
public:
/// Destructor - Override as necessary.
virtual ~ModRMFilter() { }
@@ -49,6 +50,7 @@ public:
/// require a ModR/M byte or instructions where the entire ModR/M byte is used
/// for operands.
class DumbFilter : public ModRMFilter {
+ virtual void anchor();
public:
bool isDumb() const {
return true;
@@ -63,7 +65,7 @@ public:
/// Some instructions are classified based on whether they are 11 or anything
/// else. This filter performs that classification.
class ModFilter : public ModRMFilter {
-private:
+ virtual void anchor();
bool R;
public:
/// Constructor
@@ -90,7 +92,7 @@ public:
/// possible value. Otherwise, there is one instruction for each value of the
/// nnn field [bits 5-3], known elsewhere as the reg field.
class EscapeFilter : public ModRMFilter {
-private:
+ virtual void anchor();
bool C0_FF;
uint8_t NNN_or_ModRM;
public:
@@ -121,7 +123,7 @@ public:
/// maps to a single instruction. Such instructions require the ModR/M byte
/// to fall between 0xc0 and 0xff.
class AddRegEscapeFilter : public ModRMFilter {
-private:
+ virtual void anchor();
uint8_t ModRM;
public:
/// Constructor
@@ -142,7 +144,7 @@ public:
/// ExtendedFilter - Extended opcodes are classified based on the value of the
/// mod field [bits 7-6] and the value of the nnn field [bits 5-3].
class ExtendedFilter : public ModRMFilter {
-private:
+ virtual void anchor();
bool R;
uint8_t NNN;
public:
@@ -169,9 +171,8 @@ public:
/// ExactFilter - The occasional extended opcode (such as VMCALL or MONITOR)
/// requires the ModR/M byte to have a specific value.
-class ExactFilter : public ModRMFilter
-{
-private:
+class ExactFilter : public ModRMFilter {
+ virtual void anchor();
uint8_t ModRM;
public:
/// Constructor
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index cae8237..6a01cce 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -36,7 +36,16 @@ using namespace llvm;
MAP(F8, 41) \
MAP(F9, 42) \
MAP(D0, 45) \
- MAP(D1, 46)
+ MAP(D1, 46) \
+ MAP(D4, 47) \
+ MAP(D8, 48) \
+ MAP(D9, 49) \
+ MAP(DA, 50) \
+ MAP(DB, 51) \
+ MAP(DC, 52) \
+ MAP(DD, 53) \
+ MAP(DE, 54) \
+ MAP(DF, 55)
// A clone of X86 since we can't depend on something that is generated.
namespace X86Local {
@@ -68,7 +77,7 @@ namespace X86Local {
DC = 7, DD = 8, DE = 9, DF = 10,
XD = 11, XS = 12,
T8 = 13, P_TA = 14,
- A6 = 15, A7 = 16, TF = 17
+ A6 = 15, A7 = 16, T8XD = 17, T8XS = 18, TAXD = 19
};
}
@@ -119,6 +128,9 @@ namespace X86Local {
EXTENSION_TABLE(ba) \
EXTENSION_TABLE(c7)
+#define THREE_BYTE_38_EXTENSION_TABLES \
+ EXTENSION_TABLE(F3)
+
using namespace X86Disassembler;
/// needsModRMForDecode - Indicates whether a particular instruction requires a
@@ -213,10 +225,13 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
SegOvr = byteFromRec(Rec, "SegOvrBits");
HasOpSizePrefix = Rec->getValueAsBit("hasOpSizePrefix");
+ HasAdSizePrefix = Rec->getValueAsBit("hasAdSizePrefix");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEXPrefix = Rec->getValueAsBit("hasVEXPrefix");
HasVEX_4VPrefix = Rec->getValueAsBit("hasVEX_4VPrefix");
+ HasVEX_4VOp3Prefix = Rec->getValueAsBit("hasVEX_4VOp3Prefix");
HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix");
+ HasMemOp4Prefix = Rec->getValueAsBit("hasMemOp4Prefix");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasLockPrefix = Rec->getValueAsBit("hasLockPrefix");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
@@ -230,7 +245,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
(Name.find("CRC32") != Name.npos);
HasFROperands = hasFROperands();
HasVEX_LPrefix = has256BitOperands() || Rec->getValueAsBit("hasVEX_L");
-
+
// Check for 64-bit inst which does not require REX
Is32Bit = false;
Is64Bit = false;
@@ -254,10 +269,6 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
Rec->getName() == "PUSHFS64" ||
Rec->getName() == "PUSHGS64" ||
Rec->getName() == "REX64_PREFIX" ||
- Rec->getName().find("VMREAD64") != Name.npos ||
- Rec->getName().find("VMWRITE64") != Name.npos ||
- Rec->getName().find("INVEPT64") != Name.npos ||
- Rec->getName().find("INVVPID64") != Name.npos ||
Rec->getName().find("MOV64") != Name.npos ||
Rec->getName().find("PUSH64") != Name.npos ||
Rec->getName().find("POP64") != Name.npos;
@@ -284,67 +295,90 @@ void RecognizableInstr::processInstr(DisassemblerTables &tables,
InstructionContext RecognizableInstr::insnContext() const {
InstructionContext insnContext;
- if (HasVEX_4VPrefix || HasVEXPrefix) {
- if (HasVEX_LPrefix && HasVEX_WPrefix)
- llvm_unreachable("Don't support VEX.L and VEX.W together");
- else if (HasOpSizePrefix && HasVEX_LPrefix)
+ if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix|| HasVEXPrefix) {
+ if (HasVEX_LPrefix && HasVEX_WPrefix) {
+ if (HasOpSizePrefix)
+ insnContext = IC_VEX_L_W_OPSIZE;
+ else
+ llvm_unreachable("Don't support VEX.L and VEX.W together");
+ } else if (HasOpSizePrefix && HasVEX_LPrefix)
insnContext = IC_VEX_L_OPSIZE;
else if (HasOpSizePrefix && HasVEX_WPrefix)
insnContext = IC_VEX_W_OPSIZE;
else if (HasOpSizePrefix)
insnContext = IC_VEX_OPSIZE;
- else if (HasVEX_LPrefix && Prefix == X86Local::XS)
+ else if (HasVEX_LPrefix &&
+ (Prefix == X86Local::XS || Prefix == X86Local::T8XS))
insnContext = IC_VEX_L_XS;
- else if (HasVEX_LPrefix && Prefix == X86Local::XD)
+ else if (HasVEX_LPrefix && (Prefix == X86Local::XD ||
+ Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD))
insnContext = IC_VEX_L_XD;
- else if (HasVEX_WPrefix && Prefix == X86Local::XS)
+ else if (HasVEX_WPrefix &&
+ (Prefix == X86Local::XS || Prefix == X86Local::T8XS))
insnContext = IC_VEX_W_XS;
- else if (HasVEX_WPrefix && Prefix == X86Local::XD)
+ else if (HasVEX_WPrefix && (Prefix == X86Local::XD ||
+ Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD))
insnContext = IC_VEX_W_XD;
else if (HasVEX_WPrefix)
insnContext = IC_VEX_W;
else if (HasVEX_LPrefix)
insnContext = IC_VEX_L;
- else if (Prefix == X86Local::XD)
+ else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD)
insnContext = IC_VEX_XD;
- else if (Prefix == X86Local::XS)
+ else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
insnContext = IC_VEX_XS;
else
insnContext = IC_VEX;
} else if (Is64Bit || HasREX_WPrefix) {
if (HasREX_WPrefix && HasOpSizePrefix)
insnContext = IC_64BIT_REXW_OPSIZE;
- else if (HasOpSizePrefix &&
- (Prefix == X86Local::XD || Prefix == X86Local::TF))
+ else if (HasOpSizePrefix && (Prefix == X86Local::XD ||
+ Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD))
insnContext = IC_64BIT_XD_OPSIZE;
- else if (HasOpSizePrefix && Prefix == X86Local::XS)
+ else if (HasOpSizePrefix &&
+ (Prefix == X86Local::XS || Prefix == X86Local::T8XS))
insnContext = IC_64BIT_XS_OPSIZE;
else if (HasOpSizePrefix)
insnContext = IC_64BIT_OPSIZE;
- else if (HasREX_WPrefix && Prefix == X86Local::XS)
- insnContext = IC_64BIT_REXW_XS;
+ else if (HasAdSizePrefix)
+ insnContext = IC_64BIT_ADSIZE;
else if (HasREX_WPrefix &&
- (Prefix == X86Local::XD || Prefix == X86Local::TF))
+ (Prefix == X86Local::XS || Prefix == X86Local::T8XS))
+ insnContext = IC_64BIT_REXW_XS;
+ else if (HasREX_WPrefix && (Prefix == X86Local::XD ||
+ Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD))
insnContext = IC_64BIT_REXW_XD;
- else if (Prefix == X86Local::XD || Prefix == X86Local::TF)
+ else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD)
insnContext = IC_64BIT_XD;
- else if (Prefix == X86Local::XS)
+ else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS)
insnContext = IC_64BIT_XS;
else if (HasREX_WPrefix)
insnContext = IC_64BIT_REXW;
else
insnContext = IC_64BIT;
} else {
- if (HasOpSizePrefix &&
- (Prefix == X86Local::XD || Prefix == X86Local::TF))
+ if (HasOpSizePrefix && (Prefix == X86Local::XD ||
+ Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD))
insnContext = IC_XD_OPSIZE;
- else if (HasOpSizePrefix && Prefix == X86Local::XS)
+ else if (HasOpSizePrefix &&
+ (Prefix == X86Local::XS || Prefix == X86Local::T8XS))
insnContext = IC_XS_OPSIZE;
else if (HasOpSizePrefix)
insnContext = IC_OPSIZE;
- else if (Prefix == X86Local::XD || Prefix == X86Local::TF)
+ else if (HasAdSizePrefix)
+ insnContext = IC_ADSIZE;
+ else if (Prefix == X86Local::XD || Prefix == X86Local::T8XD ||
+ Prefix == X86Local::TAXD)
insnContext = IC_XD;
- else if (Prefix == X86Local::XS || Prefix == X86Local::REP)
+ else if (Prefix == X86Local::XS || Prefix == X86Local::T8XS ||
+ Prefix == X86Local::REP)
insnContext = IC_XS;
else
insnContext = IC;
@@ -371,19 +405,12 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
return FILTER_STRONG;
- // Filter out artificial instructions
+ // Filter out artificial instructions but leave in the LOCK_PREFIX so it is
+ // printed as a separate "instruction".
- if (Name.find("TAILJMP") != Name.npos ||
- Name.find("_Int") != Name.npos ||
- Name.find("_int") != Name.npos ||
+ if (Name.find("_Int") != Name.npos ||
Name.find("Int_") != Name.npos ||
Name.find("_NOREX") != Name.npos ||
- Name.find("_TC") != Name.npos ||
- Name.find("EH_RETURN") != Name.npos ||
- Name.find("V_SET") != Name.npos ||
- Name.find("LOCK_") != Name.npos ||
- Name.find("WIN") != Name.npos ||
- Name.find("_AVX") != Name.npos ||
Name.find("2SDL") != Name.npos)
return FILTER_STRONG;
@@ -421,12 +448,6 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
Name.find("Xrr") != Name.npos ||
Name.find("rr64") != Name.npos)
return FILTER_WEAK;
-
- if (Name == "VMASKMOVDQU64" ||
- Name == "VEXTRACTPSrr64" ||
- Name == "VMOVQd64rr" ||
- Name == "VMOVQs64rr")
- return FILTER_WEAK;
// Special cases.
@@ -441,29 +462,15 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
return FILTER_WEAK;
if (Name.find("Fs") != Name.npos)
return FILTER_WEAK;
- if (Name == "MOVLPDrr" ||
- Name == "MOVLPSrr" ||
- Name == "PUSHFQ" ||
- Name == "BSF16rr" ||
- Name == "BSF16rm" ||
- Name == "BSR16rr" ||
- Name == "BSR16rm" ||
- Name == "MOVSX16rm8" ||
- Name == "MOVSX16rr8" ||
- Name == "MOVZX16rm8" ||
- Name == "MOVZX16rr8" ||
- Name == "PUSH32i16" ||
- Name == "PUSH64i16" ||
+ if (Name == "PUSH64i16" ||
Name == "MOVPQI2QImr" ||
Name == "VMOVPQI2QImr" ||
- Name == "MOVSDmr" ||
- Name == "MOVSDrm" ||
- Name == "MOVSSmr" ||
- Name == "MOVSSrm" ||
Name == "MMX_MOVD64rrv164" ||
- Name == "CRC32m16" ||
Name == "MOV64ri64i32" ||
- Name == "CRC32r16")
+ Name == "VMASKMOVDQU64" ||
+ Name == "VEXTRACTPSrr64" ||
+ Name == "VMOVQd64rr" ||
+ Name == "VMOVQs64rr")
return FILTER_WEAK;
if (HasFROperands && Name.find("MOV") != Name.npos &&
@@ -566,7 +573,7 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
bool hasFROperands = false;
- assert(numOperands < X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
+ assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
for (operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
if (OperandList[operandIndex].Constraints.size()) {
@@ -684,31 +691,40 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
- if (HasVEX_4VPrefix)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 &&
+ if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix)
+ assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcRegFrm with VEX_4V");
else
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
-
+
if (HasVEX_4VPrefix)
// FIXME: In AVX, the register below becomes the one encoded
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
-
+
+ if (HasMemOp4Prefix)
+ HANDLE_OPERAND(immediate)
+
HANDLE_OPERAND(rmRegister)
- HANDLE_OPTIONAL(immediate)
+
+ if (HasVEX_4VOp3Prefix)
+ HANDLE_OPERAND(vvvvRegister)
+
+ if (!HasMemOp4Prefix)
+ HANDLE_OPTIONAL(immediate)
+ HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
// Operand 2 is a memory operand (possibly SIB-extended)
// - In AVX, there is a register operand in the VEX.vvvv field here -
// Operand 3 (optional) is an immediate.
-
- if (HasVEX_4VPrefix)
- assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 &&
+
+ if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix)
+ assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcMemFrm with VEX_4V");
else
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
@@ -721,8 +737,17 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPERAND(vvvvRegister)
+ if (HasMemOp4Prefix)
+ HANDLE_OPERAND(immediate)
+
HANDLE_OPERAND(memory)
- HANDLE_OPTIONAL(immediate)
+
+ if (HasVEX_4VOp3Prefix)
+ HANDLE_OPERAND(vvvvRegister)
+
+ if (!HasMemOp4Prefix)
+ HANDLE_OPTIONAL(immediate)
+ HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
case X86Local::MRM0r:
case X86Local::MRM1r:
@@ -736,12 +761,12 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
// Operand 2 (optional) is an immediate or relocation.
if (HasVEX_4VPrefix)
assert(numPhysicalOperands <= 3 &&
- "Unexpected number of operands for MRMSrcMemFrm with VEX_4V");
+ "Unexpected number of operands for MRMnRFrm with VEX_4V");
else
assert(numPhysicalOperands <= 2 &&
"Unexpected number of operands for MRMnRFrm");
if (HasVEX_4VPrefix)
- HANDLE_OPERAND(vvvvRegister);
+ HANDLE_OPERAND(vvvvRegister)
HANDLE_OPTIONAL(rmRegister)
HANDLE_OPTIONAL(relocation)
break;
@@ -755,8 +780,14 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
case X86Local::MRM7m:
// Operand 1 is a memory operand (possibly SIB-extended)
// Operand 2 (optional) is an immediate or relocation.
- assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 &&
- "Unexpected number of operands for MRMnMFrm");
+ if (HasVEX_4VPrefix)
+ assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
+ "Unexpected number of operands for MRMnMFrm");
+ else
+ assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 &&
+ "Unexpected number of operands for MRMnMFrm");
+ if (HasVEX_4VPrefix)
+ HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(relocation)
break;
@@ -843,15 +874,50 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
opcodeToSet = Opcode;
break;
case X86Local::T8:
- case X86Local::TF:
+ case X86Local::T8XD:
+ case X86Local::T8XS:
opcodeType = THREEBYTE_38;
- if (needsModRMForDecode(Form))
- filter = new ModFilter(isRegFormat(Form));
- else
- filter = new DumbFilter();
+ switch (Opcode) {
+ default:
+ if (needsModRMForDecode(Form))
+ filter = new ModFilter(isRegFormat(Form));
+ else
+ filter = new DumbFilter();
+ break;
+#define EXTENSION_TABLE(n) case 0x##n:
+ THREE_BYTE_38_EXTENSION_TABLES
+#undef EXTENSION_TABLE
+ switch (Form) {
+ default:
+ llvm_unreachable("Unhandled two-byte extended opcode");
+ case X86Local::MRM0r:
+ case X86Local::MRM1r:
+ case X86Local::MRM2r:
+ case X86Local::MRM3r:
+ case X86Local::MRM4r:
+ case X86Local::MRM5r:
+ case X86Local::MRM6r:
+ case X86Local::MRM7r:
+ filter = new ExtendedFilter(true, Form - X86Local::MRM0r);
+ break;
+ case X86Local::MRM0m:
+ case X86Local::MRM1m:
+ case X86Local::MRM2m:
+ case X86Local::MRM3m:
+ case X86Local::MRM4m:
+ case X86Local::MRM5m:
+ case X86Local::MRM6m:
+ case X86Local::MRM7m:
+ filter = new ExtendedFilter(false, Form - X86Local::MRM0m);
+ break;
+ MRM_MAPPING
+ } // switch (Form)
+ break;
+ } // switch (Opcode)
opcodeToSet = Opcode;
break;
case X86Local::P_TA:
+ case X86Local::TAXD:
opcodeType = THREEBYTE_3A;
if (needsModRMForDecode(Form))
filter = new ModFilter(isRegFormat(Form));
@@ -1049,6 +1115,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("i16imm_pcrel", TYPE_REL16)
TYPE("i32imm_pcrel", TYPE_REL32)
TYPE("SSECC", TYPE_IMM3)
+ TYPE("AVXCC", TYPE_IMM5)
TYPE("brtarget", TYPE_RELv)
TYPE("uncondbrtarget", TYPE_RELv)
TYPE("brtarget8", TYPE_REL8)
@@ -1090,6 +1157,7 @@ OperandEncoding RecognizableInstr::immediateEncodingFromString
ENCODING("i32i8imm", ENCODING_IB)
ENCODING("u32u8imm", ENCODING_IB)
ENCODING("SSECC", ENCODING_IB)
+ ENCODING("AVXCC", ENCODING_IB)
ENCODING("i16imm", ENCODING_Iv)
ENCODING("i16i8imm", ENCODING_IB)
ENCODING("i32imm", ENCODING_Iv)
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index 4441597..6c0a234 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -50,17 +50,23 @@ private:
uint8_t SegOvr;
/// The hasOpSizePrefix field from the record
bool HasOpSizePrefix;
+ /// The hasAdSizePrefix field from the record
+ bool HasAdSizePrefix;
/// The hasREX_WPrefix field from the record
bool HasREX_WPrefix;
/// The hasVEXPrefix field from the record
bool HasVEXPrefix;
/// The hasVEX_4VPrefix field from the record
bool HasVEX_4VPrefix;
+ /// The hasVEX_4VOp3Prefix field from the record
+ bool HasVEX_4VOp3Prefix;
/// The hasVEX_WPrefix field from the record
bool HasVEX_WPrefix;
/// Inferred from the operands; indicates whether the L bit in the VEX prefix is set
bool HasVEX_LPrefix;
- // The ignoreVEX_L field from the record
+ /// The hasMemOp4Prefix field from the record
+ bool HasMemOp4Prefix;
+ /// The ignoreVEX_L field from the record
bool IgnoresVEX_L;
/// The hasLockPrefix field from the record
bool HasLockPrefix;
@@ -70,7 +76,7 @@ private:
bool Is64Bit;
// Whether the instruction has the predicate "In32BitMode"
bool Is32Bit;
-
+
/// The instruction name as listed in the tables
std::string Name;
/// The AT&T AsmString for the instruction
diff --git a/etc/mtree/BSD.include.dist b/etc/mtree/BSD.include.dist
index f577417..2de9e84 100644
--- a/etc/mtree/BSD.include.dist
+++ b/etc/mtree/BSD.include.dist
@@ -88,7 +88,7 @@
..
..
clang
- 3.0
+ 3.1
..
..
crypto
diff --git a/lib/clang/Makefile b/lib/clang/Makefile
index e0a6201..d6e9457 100644
--- a/lib/clang/Makefile
+++ b/lib/clang/Makefile
@@ -9,6 +9,7 @@ SUBDIR= libclanganalysis \
libclangbasic \
libclangcodegen \
libclangdriver \
+ libclangedit \
libclangfrontend \
libclangfrontendtool \
libclangindex \
@@ -22,6 +23,7 @@ SUBDIR= libclanganalysis \
libclangstaticanalyzerfrontend \
\
libllvmanalysis \
+ libllvmarchive \
libllvmasmparser \
libllvmasmprinter \
libllvmbitreader \
@@ -32,14 +34,17 @@ SUBDIR= libclanganalysis \
libllvminstrumentation \
libllvmipa \
libllvmipo \
+ libllvmlinker \
libllvmmc \
libllvmmcparser \
+ libllvmobject \
libllvmscalaropts \
libllvmselectiondag \
libllvmsupport \
libllvmtablegen \
libllvmtarget \
libllvmtransformutils \
+ libllvmvectorize \
\
libllvmarmasmparser \
libllvmarmcodegen \
@@ -47,6 +52,7 @@ SUBDIR= libclanganalysis \
libllvmarmdisassembler \
libllvmarminfo \
libllvmarminstprinter \
+ libllvmmipsasmparser \
libllvmmipscodegen \
libllvmmipsdesc \
libllvmmipsinfo \
diff --git a/lib/clang/clang.build.mk b/lib/clang/clang.build.mk
index 2c059f5..a14918d 100644
--- a/lib/clang/clang.build.mk
+++ b/lib/clang/clang.build.mk
@@ -12,7 +12,7 @@ CFLAGS+=-I${LLVM_SRCS}/include -I${CLANG_SRCS}/include \
CFLAGS+= -fno-strict-aliasing
TARGET_ARCH?= ${MACHINE_ARCH}
-CFLAGS+=-DLLVM_HOSTTRIPLE=\"${TARGET_ARCH:C/amd64/x86_64/}-unknown-freebsd10.0\"
+CFLAGS+=-DLLVM_DEFAULT_TARGET_TRIPLE=\"${TARGET_ARCH:C/amd64/x86_64/}-unknown-freebsd10.0\"
.ifndef LLVM_REQUIRES_EH
CXXFLAGS+=-fno-exceptions
@@ -82,6 +82,16 @@ AttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
-gen-clang-attr-list -o ${.TARGET} \
-I ${CLANG_SRCS}/include ${.ALLSRC}
+AttrParsedAttrKinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Basic ${TBLINC} \
+ -gen-clang-attr-parsed-attr-kinds -o ${.TARGET} \
+ -I ${CLANG_SRCS}/include ${.ALLSRC}
+
+AttrParsedAttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Basic ${TBLINC} \
+ -gen-clang-attr-parsed-attr-list -o ${.TARGET} \
+ -I ${CLANG_SRCS}/include ${.ALLSRC}
+
AttrPCHRead.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Serialization \
${TBLINC} -gen-clang-attr-pch-read -o ${.TARGET} \
@@ -97,6 +107,11 @@ AttrSpellings.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
-gen-clang-attr-spelling-list -o ${.TARGET} \
-I ${CLANG_SRCS}/include ${.ALLSRC}
+AttrTemplateInstantiate.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Basic ${TBLINC} \
+ -gen-clang-attr-template-instantiate -o ${.TARGET} \
+ -I ${CLANG_SRCS}/include ${.ALLSRC}
+
DeclNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/DeclNodes.td
${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/AST ${TBLINC} \
-gen-clang-decl-nodes -o ${.TARGET} ${.ALLSRC}
@@ -117,7 +132,7 @@ DiagnosticIndexName.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Basic ${TBLINC} \
-gen-clang-diags-index-name -o ${.TARGET} ${.ALLSRC}
-.for hdr in AST Analysis Common Driver Frontend Lex Parse Sema
+.for hdr in AST Analysis Common Driver Frontend Lex Parse Sema Serialization
Diagnostic${hdr}Kinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
${CLANG_TBLGEN} -I ${CLANG_SRCS}/include/clang/Basic ${TBLINC} \
-gen-clang-diags-defs -clang-component=${hdr} \
diff --git a/lib/clang/include/Makefile b/lib/clang/include/Makefile
index 6f46e28..73a6120 100644
--- a/lib/clang/include/Makefile
+++ b/lib/clang/include/Makefile
@@ -2,10 +2,11 @@
.PATH: ${.CURDIR}/../../../contrib/llvm/tools/clang/lib/Headers
-INCSDIR=${INCLUDEDIR}/clang/3.0
+INCSDIR=${INCLUDEDIR}/clang/3.1
INCS= altivec.h \
avxintrin.h \
+ cpuid.h \
emmintrin.h \
immintrin.h \
mm3dnow.h \
@@ -14,7 +15,9 @@ INCS= altivec.h \
nmmintrin.h \
pmmintrin.h \
smmintrin.h \
+ stdalign.h \
tmmintrin.h \
+ unwind.h \
wmmintrin.h \
x86intrin.h \
xmmintrin.h
diff --git a/lib/clang/include/MipsGenMCCodeEmitter.inc b/lib/clang/include/MipsGenMCCodeEmitter.inc
new file mode 100644
index 0000000..82cd418
--- /dev/null
+++ b/lib/clang/include/MipsGenMCCodeEmitter.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "MipsGenMCCodeEmitter.inc.h"
diff --git a/lib/clang/include/clang/Basic/DiagnosticSerializationKinds.inc b/lib/clang/include/clang/Basic/DiagnosticSerializationKinds.inc
new file mode 100644
index 0000000..f0d84f1
--- /dev/null
+++ b/lib/clang/include/clang/Basic/DiagnosticSerializationKinds.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "DiagnosticSerializationKinds.inc.h"
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index 9842584..55d4ea6 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -1,10 +1,10 @@
/* $FreeBSD$ */
-#define CLANG_VERSION 3.0
+#define CLANG_VERSION 3.1
#define CLANG_VERSION_MAJOR 3
-#define CLANG_VERSION_MINOR 0
+#define CLANG_VERSION_MINOR 1
#define CLANG_VENDOR "FreeBSD "
-#define CLANG_VENDOR_SUFFIX " 20111210"
+#define CLANG_VENDOR_SUFFIX " 20120413"
-#define SVN_REVISION "145349"
+#define SVN_REVISION "154661"
diff --git a/lib/clang/include/clang/Config/config.h b/lib/clang/include/clang/Config/config.h
new file mode 100644
index 0000000..7348308
--- /dev/null
+++ b/lib/clang/include/clang/Config/config.h
@@ -0,0 +1,20 @@
+/* $FreeBSD$ */
+/* tools/clang/include/clang/Config/config.h. Generated from config.h.in by configure. */
+/* include/clang/Config/config.h.in. */
+
+#ifndef CONFIG_H
+#define CONFIG_H
+
+/* Bug report URL. */
+#define BUG_REPORT_URL "http://llvm.org/bugs/"
+
+/* Relative directory for resource files */
+#define CLANG_RESOURCE_DIR ""
+
+/* Directory where gcc is installed. */
+#define GCC_INSTALL_PREFIX ""
+
+/* Directories clang will search for headers */
+#define C_INCLUDE_DIRS ""
+
+#endif
diff --git a/lib/clang/include/clang/Sema/AttrParsedAttrKinds.inc b/lib/clang/include/clang/Sema/AttrParsedAttrKinds.inc
new file mode 100644
index 0000000..77d0092
--- /dev/null
+++ b/lib/clang/include/clang/Sema/AttrParsedAttrKinds.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "AttrParsedAttrKinds.inc.h"
diff --git a/lib/clang/include/clang/Sema/AttrParsedAttrList.inc b/lib/clang/include/clang/Sema/AttrParsedAttrList.inc
new file mode 100644
index 0000000..b5aeac3
--- /dev/null
+++ b/lib/clang/include/clang/Sema/AttrParsedAttrList.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "AttrParsedAttrList.inc.h"
diff --git a/lib/clang/include/clang/Sema/AttrTemplateInstantiate.inc b/lib/clang/include/clang/Sema/AttrTemplateInstantiate.inc
new file mode 100644
index 0000000..88c3c11
--- /dev/null
+++ b/lib/clang/include/clang/Sema/AttrTemplateInstantiate.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "AttrTemplateInstantiate.inc.h"
diff --git a/lib/clang/include/llvm/Config/config.h b/lib/clang/include/llvm/Config/config.h
index 92c960b..f6001e9 100644
--- a/lib/clang/include/llvm/Config/config.h
+++ b/lib/clang/include/llvm/Config/config.h
@@ -11,33 +11,18 @@
/* Relative directory for resource files */
#define CLANG_RESOURCE_DIR ""
-/* 32 bit multilib directory. */
-#define CXX_INCLUDE_32BIT_DIR ""
-
-/* 64 bit multilib directory. */
-#define CXX_INCLUDE_64BIT_DIR ""
-
-/* Arch the libstdc++ headers. */
-#define CXX_INCLUDE_ARCH ""
-
-/* Directory with the libstdc++ headers. */
-#define CXX_INCLUDE_ROOT ""
-
/* Directories clang will search for headers */
#define C_INCLUDE_DIRS ""
-/* Define if CBE is enabled for printf %a output */
-#define ENABLE_CBE_PRINTF_A 1
-
/* Define if position independent code is enabled */
#define ENABLE_PIC 0
-/* Define if threads enabled */
-#define ENABLE_THREADS 0
-
/* Define if timestamp information (e.g., __DATE___) is allowed */
#define ENABLE_TIMESTAMPS 0
+/* Directory where gcc is installed. */
+#define GCC_INSTALL_PREFIX ""
+
/* Define to 1 if you have the `argz_append' function. */
/* #undef HAVE_ARGZ_APPEND */
@@ -297,6 +282,9 @@
/* Define to 1 if you have the `powf' function. */
#define HAVE_POWF 1
+/* Define to 1 if you have the `pread' function. */
+#define HAVE_PREAD 1
+
/* Define if libtool can extract symbol lists from object files. */
#define HAVE_PRELOADED_SYMBOLS 1
@@ -373,7 +361,7 @@
#define HAVE_STDLIB_H 1
/* Set to 1 if the std::isinf function is found in <cmath> */
-/* #undef HAVE_STD_ISINF_IN_CMATH */
+#define HAVE_STD_ISINF_IN_CMATH 1
/* Set to 1 if the std::isnan function is found in <cmath> */
#define HAVE_STD_ISNAN_IN_CMATH 1
@@ -545,18 +533,21 @@
/* Installation directory for data files */
/* #undef LLVM_DATADIR */
+/* Target triple LLVM will generate code for by default */
+/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
+
/* Installation directory for documentation */
/* #undef LLVM_DOCSDIR */
+/* Define if threads enabled */
+#define LLVM_ENABLE_THREADS 0
+
/* Installation directory for config files */
/* #undef LLVM_ETCDIR */
/* Has gcc/MSVC atomic intrinsics */
#define LLVM_HAS_ATOMICS 0
-/* Host triple we were built on */
-/* #undef LLVM_HOSTTRIPLE */
-
/* Installation directory for include files */
/* #undef LLVM_INCLUDEDIR */
@@ -578,6 +569,9 @@
/* LLVM name for the native AsmPrinter init function, if available */
#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter
+/* LLVM name for the native Disassembler init function, if available */
+#define LLVM_NATIVE_DISASSEMBLER LLVMInitializeX86Disassembler
+
/* LLVM name for the native Target init function, if available */
#define LLVM_NATIVE_TARGET LLVMInitializeX86Target
@@ -623,6 +617,18 @@
/* Installation prefix directory */
/* #undef LLVM_PREFIX */
+/* Define if we have the Intel JIT API runtime support library */
+#define LLVM_USE_INTEL_JITEVENTS 0
+
+/* Define if we have the oprofile JIT-support library */
+#define LLVM_USE_OPROFILE 0
+
+/* Major version of the LLVM API */
+#define LLVM_VERSION_MAJOR 3
+
+/* Minor version of the LLVM API */
+#define LLVM_VERSION_MINOR 1
+
/* Define if the OS needs help to load dependent libraries for dlopen(). */
#define LTDL_DLOPEN_DEPLIBS 1
@@ -648,19 +654,19 @@
/* #undef NEED_USCORE */
/* Define to the address where bug reports for this package should be sent. */
-#define PACKAGE_BUGREPORT "llvmbugs@cs.uiuc.edu"
+#define PACKAGE_BUGREPORT "http://llvm.org/bugs/"
/* Define to the full name of this package. */
-#define PACKAGE_NAME "llvm"
+#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "llvm 3.0"
+#define PACKAGE_STRING "LLVM 3.1svn"
/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "-llvm-"
+#define PACKAGE_TARNAME "llvm"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "3.0"
+#define PACKAGE_VERSION "3.1svn"
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
@@ -677,9 +683,6 @@
/* Define to 1 if your <sys/time.h> declares `struct tm'. */
/* #undef TM_IN_SYS_TIME */
-/* Define if we have the oprofile JIT-support library */
-#define USE_OPROFILE 0
-
/* Define if use udis86 library */
#define USE_UDIS86 0
diff --git a/lib/clang/include/llvm/Config/llvm-config.h b/lib/clang/include/llvm/Config/llvm-config.h
index 1b50ab8..4a8d460 100644
--- a/lib/clang/include/llvm/Config/llvm-config.h
+++ b/lib/clang/include/llvm/Config/llvm-config.h
@@ -27,18 +27,21 @@
/* Installation directory for data files */
/* #undef LLVM_DATADIR */
+/* Target triple LLVM will generate code for by default */
+/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
+
/* Installation directory for documentation */
/* #undef LLVM_DOCSDIR */
+/* Define if threads enabled */
+#define LLVM_ENABLE_THREADS 0
+
/* Installation directory for config files */
/* #undef LLVM_ETCDIR */
/* Has gcc/MSVC atomic intrinsics */
#define LLVM_HAS_ATOMICS 0
-/* Host triple we were built on */
-/* #undef LLVM_HOSTTRIPLE */
-
/* Installation directory for include files */
/* #undef LLVM_INCLUDEDIR */
@@ -60,6 +63,9 @@
/* LLVM name for the native AsmPrinter init function, if available */
#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter
+/* LLVM name for the native Disassembler init function, if available */
+#define LLVM_NATIVE_DISASSEMBLER LLVMInitializeX86Disassembler
+
/* LLVM name for the native Target init function, if available */
#define LLVM_NATIVE_TARGET LLVMInitializeX86Target
@@ -105,4 +111,10 @@
/* Installation prefix directory */
/* #undef LLVM_PREFIX */
+/* Major version of the LLVM API */
+#define LLVM_VERSION_MAJOR 3
+
+/* Minor version of the LLVM API */
+#define LLVM_VERSION_MINOR 1
+
#endif
diff --git a/lib/clang/libclanganalysis/Makefile b/lib/clang/libclanganalysis/Makefile
index f5fbc627..f45ec59 100644
--- a/lib/clang/libclanganalysis/Makefile
+++ b/lib/clang/libclanganalysis/Makefile
@@ -3,13 +3,16 @@
LIB= clanganalysis
SRCDIR= tools/clang/lib/Analysis
-SRCS= AnalysisContext.cpp \
+SRCS= AnalysisDeclContext.cpp \
+ CallGraph.cpp \
CFG.cpp \
CFGReachabilityAnalysis.cpp \
CFGStmtMap.cpp \
CocoaConventions.cpp \
+ Dominators.cpp \
FormatString.cpp \
LiveVariables.cpp \
+ PostOrderCFGView.cpp \
PrintfFormatString.cpp \
ProgramPoint.cpp \
PseudoConstantAnalysis.cpp \
diff --git a/lib/clang/libclangarcmigrate/Makefile b/lib/clang/libclangarcmigrate/Makefile
index 957c3ba..d097ec4 100644
--- a/lib/clang/libclangarcmigrate/Makefile
+++ b/lib/clang/libclangarcmigrate/Makefile
@@ -6,6 +6,7 @@ SRCDIR= tools/clang/lib/ARCMigrate
SRCS= ARCMT.cpp \
ARCMTActions.cpp \
FileRemapper.cpp \
+ ObjCMT.cpp \
PlistReporter.cpp \
TransAPIUses.cpp \
TransARCAssign.cpp \
@@ -14,6 +15,8 @@ SRCS= ARCMT.cpp \
TransEmptyStatementsAndDealloc.cpp \
TransformActions.cpp \
Transforms.cpp \
+ TransGCAttrs.cpp \
+ TransGCCalls.cpp \
TransProperties.cpp \
TransRetainReleaseDealloc.cpp \
TransUnbridgedCasts.cpp \
@@ -21,6 +24,7 @@ SRCS= ARCMT.cpp \
TransZeroOutPropsInDealloc.cpp
TGHDRS= AttrList \
+ AttrParsedAttrList \
Attrs \
DeclNodes \
DiagnosticCommonKinds \
diff --git a/lib/clang/libclangast/Makefile b/lib/clang/libclangast/Makefile
index 61f6881..81812dc 100644
--- a/lib/clang/libclangast/Makefile
+++ b/lib/clang/libclangast/Makefile
@@ -28,10 +28,12 @@ SRCS= APValue.cpp \
InheritViz.cpp \
ItaniumCXXABI.cpp \
ItaniumMangle.cpp \
+ LambdaMangleContext.cpp \
Mangle.cpp \
MicrosoftCXXABI.cpp \
MicrosoftMangle.cpp \
NestedNameSpecifier.cpp \
+ NSAPI.cpp \
ParentMap.cpp \
RecordLayout.cpp \
RecordLayoutBuilder.cpp \
diff --git a/lib/clang/libclangbasic/Makefile b/lib/clang/libclangbasic/Makefile
index dec03c5..742cc24 100644
--- a/lib/clang/libclangbasic/Makefile
+++ b/lib/clang/libclangbasic/Makefile
@@ -11,6 +11,7 @@ SRCS= Builtins.cpp \
FileSystemStatCache.cpp \
IdentifierTable.cpp \
LangOptions.cpp \
+ Module.cpp \
SourceLocation.cpp \
SourceManager.cpp \
TargetInfo.cpp \
@@ -29,6 +30,7 @@ TGHDRS= DiagnosticAnalysisKinds \
DiagnosticLexKinds \
DiagnosticParseKinds \
DiagnosticSemaKinds \
+ DiagnosticSerializationKinds \
arm_neon
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangcodegen/Makefile b/lib/clang/libclangcodegen/Makefile
index 467b2d2..bd7b171 100644
--- a/lib/clang/libclangcodegen/Makefile
+++ b/lib/clang/libclangcodegen/Makefile
@@ -31,7 +31,6 @@ SRCS= BackendUtil.cpp \
CGRTTI.cpp \
CGRecordLayoutBuilder.cpp \
CGStmt.cpp \
- CGTemporaries.cpp \
CGVTT.cpp \
CGVTables.cpp \
CodeGenAction.cpp \
diff --git a/lib/clang/libclangdriver/Makefile b/lib/clang/libclangdriver/Makefile
index ce9d251..53d49d1 100644
--- a/lib/clang/libclangdriver/Makefile
+++ b/lib/clang/libclangdriver/Makefile
@@ -11,7 +11,6 @@ SRCS= Action.cpp \
Compilation.cpp \
Driver.cpp \
DriverOptions.cpp \
- HostInfo.cpp \
Job.cpp \
OptTable.cpp \
Option.cpp \
@@ -20,7 +19,8 @@ SRCS= Action.cpp \
ToolChain.cpp \
ToolChains.cpp \
Tools.cpp \
- Types.cpp
+ Types.cpp \
+ WindowsToolChain.cpp
TGHDRS= CC1AsOptions \
CC1Options \
diff --git a/lib/clang/libclangedit/Makefile b/lib/clang/libclangedit/Makefile
new file mode 100644
index 0000000..147d133
--- /dev/null
+++ b/lib/clang/libclangedit/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+LIB= clangedit
+
+SRCDIR= tools/clang/lib/Edit
+SRCS= Commit.cpp \
+ EditedSource.cpp \
+ RewriteObjCFoundationAPI.cpp
+
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ StmtNodes \
+ DiagnosticCommonKinds
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libclangfrontend/Makefile b/lib/clang/libclangfrontend/Makefile
index d8495a2..0ef026b 100644
--- a/lib/clang/libclangfrontend/Makefile
+++ b/lib/clang/libclangfrontend/Makefile
@@ -7,10 +7,14 @@ SRCS= ASTConsumers.cpp \
ASTMerge.cpp \
ASTUnit.cpp \
CacheTokens.cpp \
+ ChainedDiagnosticConsumer.cpp \
+ ChainedIncludesSource.cpp \
CompilerInstance.cpp \
CompilerInvocation.cpp \
CreateInvocationFromCommandLine.cpp \
DependencyFile.cpp \
+ DependencyGraph.cpp \
+ DiagnosticRenderer.cpp \
FrontendAction.cpp \
FrontendActions.cpp \
FrontendOptions.cpp \
@@ -18,15 +22,19 @@ SRCS= ASTConsumers.cpp \
InitHeaderSearch.cpp \
InitPreprocessor.cpp \
LangStandards.cpp \
+ LayoutOverrideSource.cpp \
LogDiagnosticPrinter.cpp \
MultiplexConsumer.cpp \
PrintPreprocessedOutput.cpp \
+ SerializedDiagnosticPrinter.cpp \
+ TextDiagnostic.cpp \
TextDiagnosticBuffer.cpp \
TextDiagnosticPrinter.cpp \
VerifyDiagnosticConsumer.cpp \
Warnings.cpp
TGHDRS= AttrList \
+ AttrParsedAttrList \
Attrs \
CC1Options \
DeclNodes \
diff --git a/lib/clang/libclangindex/Makefile b/lib/clang/libclangindex/Makefile
index 0980da0..63b532c 100644
--- a/lib/clang/libclangindex/Makefile
+++ b/lib/clang/libclangindex/Makefile
@@ -5,7 +5,6 @@ LIB= clangindex
SRCDIR= tools/clang/lib/Index
SRCS= ASTLocation.cpp \
Analyzer.cpp \
- CallGraph.cpp \
DeclReferenceMap.cpp \
Entity.cpp \
GlobalSelector.cpp \
diff --git a/lib/clang/libclanglex/Makefile b/lib/clang/libclanglex/Makefile
index 7e659c6..3ad0e3f 100644
--- a/lib/clang/libclanglex/Makefile
+++ b/lib/clang/libclanglex/Makefile
@@ -9,7 +9,9 @@ SRCS= HeaderMap.cpp \
LiteralSupport.cpp \
MacroArgs.cpp \
MacroInfo.cpp \
+ ModuleMap.cpp \
PPCaching.cpp \
+ PPCallbacks.cpp \
PPDirectives.cpp \
PPExpressions.cpp \
PPLexerChange.cpp \
diff --git a/lib/clang/libclangparse/Makefile b/lib/clang/libclangparse/Makefile
index 6bd0b5b..1bf46a8 100644
--- a/lib/clang/libclangparse/Makefile
+++ b/lib/clang/libclangparse/Makefile
@@ -19,6 +19,7 @@ SRCS= ParseAST.cpp \
TGHDRS= AttrLateParsed \
AttrList \
+ AttrParsedAttrList \
Attrs \
DeclNodes \
DiagnosticCommonKinds \
diff --git a/lib/clang/libclangrewrite/Makefile b/lib/clang/libclangrewrite/Makefile
index 496c699..3150b56 100644
--- a/lib/clang/libclangrewrite/Makefile
+++ b/lib/clang/libclangrewrite/Makefile
@@ -9,6 +9,7 @@ SRCS= DeltaTree.cpp \
HTMLPrint.cpp \
HTMLRewrite.cpp \
RewriteMacros.cpp \
+ RewriteModernObjC.cpp \
RewriteObjC.cpp \
RewriteRope.cpp \
RewriteTest.cpp \
@@ -16,6 +17,7 @@ SRCS= DeltaTree.cpp \
TokenRewriter.cpp
TGHDRS= AttrList \
+ AttrParsedAttrList \
Attrs \
DeclNodes \
DiagnosticCommonKinds \
diff --git a/lib/clang/libclangsema/Makefile b/lib/clang/libclangsema/Makefile
index 57f5ccb..e40a756 100644
--- a/lib/clang/libclangsema/Makefile
+++ b/lib/clang/libclangsema/Makefile
@@ -10,7 +10,6 @@ SRCS= AnalysisBasedWarnings.cpp \
DelayedDiagnostic.cpp \
IdentifierResolver.cpp \
JumpDiagnostics.cpp \
- MultiInitializer.cpp \
Scope.cpp \
Sema.cpp \
SemaAccess.cpp \
@@ -19,20 +18,23 @@ SRCS= AnalysisBasedWarnings.cpp \
SemaCast.cpp \
SemaChecking.cpp \
SemaCodeComplete.cpp \
+ SemaConsumer.cpp \
SemaDecl.cpp \
SemaDeclAttr.cpp \
SemaDeclCXX.cpp \
- SemaExprMember.cpp \
SemaDeclObjC.cpp \
SemaExceptionSpec.cpp \
SemaExpr.cpp \
SemaExprCXX.cpp \
+ SemaExprMember.cpp \
SemaExprObjC.cpp \
SemaFixItUtils.cpp \
SemaInit.cpp \
+ SemaLambda.cpp \
SemaLookup.cpp \
SemaObjCProperty.cpp \
SemaOverload.cpp \
+ SemaPseudoObject.cpp \
SemaStmt.cpp \
SemaTemplate.cpp \
SemaTemplateDeduction.cpp \
@@ -43,6 +45,9 @@ SRCS= AnalysisBasedWarnings.cpp \
TargetAttributesSema.cpp
TGHDRS= AttrList \
+ AttrParsedAttrKinds \
+ AttrParsedAttrList \
+ AttrTemplateInstantiate \
Attrs \
DeclNodes \
DiagnosticASTKinds \
diff --git a/lib/clang/libclangserialization/Makefile b/lib/clang/libclangserialization/Makefile
index a4e0f36..acd97ae 100644
--- a/lib/clang/libclangserialization/Makefile
+++ b/lib/clang/libclangserialization/Makefile
@@ -10,7 +10,6 @@ SRCS= ASTCommon.cpp \
ASTWriter.cpp \
ASTWriterDecl.cpp \
ASTWriterStmt.cpp \
- ChainedIncludesSource.cpp \
GeneratePCH.cpp \
Module.cpp \
ModuleManager.cpp
@@ -18,11 +17,13 @@ SRCS= ASTCommon.cpp \
TGHDRS= AttrList \
AttrPCHRead \
AttrPCHWrite \
+ AttrParsedAttrList \
Attrs \
DeclNodes \
DiagnosticCommonKinds \
DiagnosticFrontendKinds \
DiagnosticSemaKinds \
+ DiagnosticSerializationKinds \
StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangstaticanalyzercheckers/Makefile b/lib/clang/libclangstaticanalyzercheckers/Makefile
index 44f7d58..d4eb771 100644
--- a/lib/clang/libclangstaticanalyzercheckers/Makefile
+++ b/lib/clang/libclangstaticanalyzercheckers/Makefile
@@ -9,8 +9,10 @@ SRCS= AdjustedReturnValueChecker.cpp \
ArrayBoundCheckerV2.cpp \
AttrNonNullChecker.cpp \
BasicObjCFoundationChecks.cpp \
+ BoolAssignmentChecker.cpp \
BuiltinFunctionChecker.cpp \
CStringChecker.cpp \
+ CStringSyntaxChecker.cpp \
CallAndMessageChecker.cpp \
CastSizeChecker.cpp \
CastToStructChecker.cpp \
@@ -18,13 +20,16 @@ SRCS= AdjustedReturnValueChecker.cpp \
CheckObjCInstMethSignature.cpp \
CheckSecuritySyntaxOnly.cpp \
CheckSizeofPointer.cpp \
+ CheckerDocumentation.cpp \
ChrootChecker.cpp \
ClangCheckers.cpp \
+ CommonBugCategories.cpp \
DeadStoresChecker.cpp \
DebugCheckers.cpp \
DereferenceChecker.cpp \
DivZeroChecker.cpp \
FixedAddressChecker.cpp \
+ GenericTaintChecker.cpp \
IdempotentOperationChecker.cpp \
IteratorsChecker.cpp \
LLVMConventionsChecker.cpp \
@@ -32,11 +37,14 @@ SRCS= AdjustedReturnValueChecker.cpp \
MacOSXAPIChecker.cpp \
MallocChecker.cpp \
MallocOverflowSecurityChecker.cpp \
+ MallocSizeofChecker.cpp \
NSAutoreleasePoolChecker.cpp \
NSErrorChecker.cpp \
NoReturnFunctionChecker.cpp \
OSAtomicChecker.cpp \
ObjCAtSyncChecker.cpp \
+ ObjCContainersASTChecker.cpp \
+ ObjCContainersChecker.cpp \
ObjCSelfInitChecker.cpp \
ObjCUnusedIVarsChecker.cpp \
PointerArithChecker.cpp \
@@ -47,6 +55,7 @@ SRCS= AdjustedReturnValueChecker.cpp \
ReturnUndefChecker.cpp \
StackAddrEscapeChecker.cpp \
StreamChecker.cpp \
+ TaintTesterChecker.cpp \
UndefBranchChecker.cpp \
UndefCapturedBlockVarChecker.cpp \
UndefResultChecker.cpp \
@@ -54,7 +63,8 @@ SRCS= AdjustedReturnValueChecker.cpp \
UndefinedAssignmentChecker.cpp \
UnixAPIChecker.cpp \
UnreachableCodeChecker.cpp \
- VLASizeChecker.cpp
+ VLASizeChecker.cpp \
+ VirtualCallChecker.cpp
TGHDRS= AttrList \
Attrs \
diff --git a/lib/clang/libclangstaticanalyzercore/Makefile b/lib/clang/libclangstaticanalyzercore/Makefile
index 9a33751..27cdcc6 100644
--- a/lib/clang/libclangstaticanalyzercore/Makefile
+++ b/lib/clang/libclangstaticanalyzercore/Makefile
@@ -3,8 +3,7 @@
LIB= clangstaticanalyzercore
SRCDIR= tools/clang/lib/StaticAnalyzer/Core
-SRCS= AggExprVisitor.cpp \
- AnalysisManager.cpp \
+SRCS= AnalysisManager.cpp \
BasicConstraintManager.cpp \
BasicValueFactory.cpp \
BlockCounter.cpp \
@@ -23,6 +22,7 @@ SRCS= AggExprVisitor.cpp \
ExprEngineCXX.cpp \
ExprEngineCallAndReturn.cpp \
ExprEngineObjC.cpp \
+ FunctionSummary.cpp \
HTMLDiagnostics.cpp \
MemRegion.cpp \
ObjCMessage.cpp \
@@ -36,6 +36,7 @@ SRCS= AggExprVisitor.cpp \
SimpleConstraintManager.cpp \
SimpleSValBuilder.cpp \
Store.cpp \
+ SubEngine.cpp \
SymbolManager.cpp \
TextPathDiagnostics.cpp
diff --git a/lib/clang/libllvmanalysis/Makefile b/lib/clang/libllvmanalysis/Makefile
index 301c39b..a05dac5 100644
--- a/lib/clang/libllvmanalysis/Makefile
+++ b/lib/clang/libllvmanalysis/Makefile
@@ -12,13 +12,15 @@ SRCS= AliasAnalysis.cpp \
AliasSetTracker.cpp \
Analysis.cpp \
BasicAliasAnalysis.cpp \
+ BlockFrequencyInfo.cpp \
BranchProbabilityInfo.cpp \
CFGPrinter.cpp \
CaptureTracking.cpp \
+ CodeMetrics.cpp \
ConstantFolding.cpp \
+ DIBuilder.cpp \
DbgInfoPrinter.cpp \
DebugInfo.cpp \
- DIBuilder.cpp \
DomPrinter.cpp \
DominanceFrontier.cpp \
IVUsers.cpp \
@@ -29,6 +31,7 @@ SRCS= AliasAnalysis.cpp \
IntervalPartition.cpp \
LazyValueInfo.cpp \
LibCallAliasAnalysis.cpp \
+ LibCallSemantics.cpp \
Lint.cpp \
Loads.cpp \
LoopDependenceAnalysis.cpp \
@@ -40,6 +43,9 @@ SRCS= AliasAnalysis.cpp \
ModuleDebugInfoPrinter.cpp \
NoAliasAnalysis.cpp \
PHITransAddr.cpp \
+ PathNumbering.cpp \
+ PathProfileInfo.cpp \
+ PathProfileVerifier.cpp \
PostDominators.cpp \
ProfileEstimatorPass.cpp \
ProfileInfo.cpp \
@@ -54,6 +60,7 @@ SRCS= AliasAnalysis.cpp \
ScalarEvolutionExpander.cpp \
ScalarEvolutionNormalization.cpp \
SparsePropagation.cpp \
+ Trace.cpp \
TypeBasedAliasAnalysis.cpp \
ValueTracking.cpp
diff --git a/lib/clang/libllvmarmcodegen/Makefile b/lib/clang/libllvmarmcodegen/Makefile
index 29579bc..b18f12c 100644
--- a/lib/clang/libllvmarmcodegen/Makefile
+++ b/lib/clang/libllvmarmcodegen/Makefile
@@ -13,7 +13,6 @@ SRCS= ARMAsmPrinter.cpp \
ARMExpandPseudoInsts.cpp \
ARMFastISel.cpp \
ARMFrameLowering.cpp \
- ARMGlobalMerge.cpp \
ARMHazardRecognizer.cpp \
ARMISelDAGToDAG.cpp \
ARMISelLowering.cpp \
@@ -21,6 +20,7 @@ SRCS= ARMAsmPrinter.cpp \
ARMJITInfo.cpp \
ARMLoadStoreOptimizer.cpp \
ARMMCInstLower.cpp \
+ ARMMachineFunctionInfo.cpp \
ARMRegisterInfo.cpp \
ARMSelectionDAGInfo.cpp \
ARMSubtarget.cpp \
diff --git a/lib/clang/libllvmarmdesc/Makefile b/lib/clang/libllvmarmdesc/Makefile
index e732074..bd2e0cb 100644
--- a/lib/clang/libllvmarmdesc/Makefile
+++ b/lib/clang/libllvmarmdesc/Makefile
@@ -4,6 +4,7 @@ LIB= llvmarmdesc
SRCDIR= lib/Target/ARM/MCTargetDesc
SRCS= ARMAsmBackend.cpp \
+ ARMELFObjectWriter.cpp \
ARMMachObjectWriter.cpp \
ARMMCAsmInfo.cpp \
ARMMCCodeEmitter.cpp \
diff --git a/lib/clang/libllvmasmprinter/Makefile b/lib/clang/libllvmasmprinter/Makefile
index 26f113f..36e626f 100644
--- a/lib/clang/libllvmasmprinter/Makefile
+++ b/lib/clang/libllvmasmprinter/Makefile
@@ -8,6 +8,7 @@ SRCS= ARMException.cpp \
AsmPrinterDwarf.cpp \
AsmPrinterInlineAsm.cpp \
DIE.cpp \
+ DwarfAccelTable.cpp \
DwarfCFIException.cpp \
DwarfCompileUnit.cpp \
DwarfDebug.cpp \
diff --git a/lib/clang/libllvmcodegen/Makefile b/lib/clang/libllvmcodegen/Makefile
index 0d593b6..70da7da 100644
--- a/lib/clang/libllvmcodegen/Makefile
+++ b/lib/clang/libllvmcodegen/Makefile
@@ -13,9 +13,8 @@ SRCS= AggressiveAntiDepBreaker.cpp \
CodePlacementOpt.cpp \
CriticalAntiDepBreaker.cpp \
DeadMachineInstructionElim.cpp \
+ DFAPacketizer.cpp \
DwarfEHPrepare.cpp \
- ELFCodeEmitter.cpp \
- ELFWriter.cpp \
EdgeBundles.cpp \
ExecutionDepsFix.cpp \
ExpandISelPseudos.cpp \
@@ -27,6 +26,7 @@ SRCS= AggressiveAntiDepBreaker.cpp \
InlineSpiller.cpp \
InterferenceCache.cpp \
IntrinsicLowering.cpp \
+ JITCodeEmitter.cpp \
LLVMTargetMachine.cpp \
LatencyPriorityQueue.cpp \
LexicalScopes.cpp \
@@ -40,7 +40,11 @@ SRCS= AggressiveAntiDepBreaker.cpp \
LiveRangeEdit.cpp \
LocalStackSlotAllocation.cpp \
MachineBasicBlock.cpp \
+ MachineBlockFrequencyInfo.cpp \
+ MachineBlockPlacement.cpp \
MachineBranchProbabilityInfo.cpp \
+ MachineCodeEmitter.cpp \
+ MachineCopyPropagation.cpp \
MachineCSE.cpp \
MachineDominators.cpp \
MachineFunction.cpp \
@@ -48,6 +52,7 @@ SRCS= AggressiveAntiDepBreaker.cpp \
MachineFunctionPass.cpp \
MachineFunctionPrinterPass.cpp \
MachineInstr.cpp \
+ MachineInstrBundle.cpp \
MachineLICM.cpp \
MachineLoopInfo.cpp \
MachineLoopRanges.cpp \
@@ -56,9 +61,9 @@ SRCS= AggressiveAntiDepBreaker.cpp \
MachinePassRegistry.cpp \
MachineRegisterInfo.cpp \
MachineSSAUpdater.cpp \
+ MachineScheduler.cpp \
MachineSink.cpp \
MachineVerifier.cpp \
- ObjectCodeEmitter.cpp \
OcamlGC.cpp \
OptimizePHIs.cpp \
PHIElimination.cpp \
@@ -69,17 +74,16 @@ SRCS= AggressiveAntiDepBreaker.cpp \
ProcessImplicitDefs.cpp \
PrologEpilogInserter.cpp \
PseudoSourceValue.cpp \
+ RegAllocBase.cpp \
RegAllocBasic.cpp \
RegAllocFast.cpp \
RegAllocGreedy.cpp \
- RegAllocLinearScan.cpp \
RegAllocPBQP.cpp \
RegisterClassInfo.cpp \
RegisterCoalescer.cpp \
RegisterScavenging.cpp \
RenderMachineFunction.cpp \
ScheduleDAG.cpp \
- ScheduleDAGEmit.cpp \
ScheduleDAGInstrs.cpp \
ScheduleDAGPrinter.cpp \
ScoreboardHazardRecognizer.cpp \
@@ -90,17 +94,17 @@ SRCS= AggressiveAntiDepBreaker.cpp \
Spiller.cpp \
SpillPlacement.cpp \
SplitKit.cpp \
- Splitter.cpp \
StackProtector.cpp \
StackSlotColoring.cpp \
StrongPHIElimination.cpp \
TailDuplication.cpp \
+ TargetFrameLoweringImpl.cpp \
TargetInstrInfoImpl.cpp \
TargetLoweringObjectFileImpl.cpp \
+ TargetOptionsImpl.cpp \
TwoAddressInstructionPass.cpp \
UnreachableBlockElim.cpp \
- VirtRegMap.cpp \
- VirtRegRewriter.cpp
+ VirtRegMap.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmcore/Makefile b/lib/clang/libllvmcore/Makefile
index c37d45a..2702b66 100644
--- a/lib/clang/libllvmcore/Makefile
+++ b/lib/clang/libllvmcore/Makefile
@@ -10,7 +10,6 @@ SRCS= AsmWriter.cpp \
ConstantFold.cpp \
Constants.cpp \
Core.cpp \
- DebugInfoProbe.cpp \
DebugLoc.cpp \
Dominators.cpp \
Function.cpp \
diff --git a/lib/clang/libllvminstrumentation/Makefile b/lib/clang/libllvminstrumentation/Makefile
index 62d9f97..fbfa198 100644
--- a/lib/clang/libllvminstrumentation/Makefile
+++ b/lib/clang/libllvminstrumentation/Makefile
@@ -3,13 +3,16 @@
LIB= llvminstrumentation
SRCDIR= lib/Transforms/Instrumentation
-SRCS= EdgeProfiling.cpp \
+SRCS= AddressSanitizer.cpp \
+ EdgeProfiling.cpp \
+ FunctionBlackList.cpp \
GCOVProfiling.cpp \
Instrumentation.cpp \
OptimalEdgeProfiling.cpp \
PathProfiling.cpp \
ProfilingUtils.cpp \
+ ThreadSanitizer.cpp
-#TGHDRS= Intrinsics
+TGHDRS= Intrinsics
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmjit/Makefile b/lib/clang/libllvmjit/Makefile
index fc92fd0..ff852d5 100644
--- a/lib/clang/libllvmjit/Makefile
+++ b/lib/clang/libllvmjit/Makefile
@@ -3,12 +3,9 @@
LIB= llvmjit
SRCDIR= lib/ExecutionEngine/JIT
-SRCS= Intercept.cpp \
- JIT.cpp \
- JITDebugRegisterer.cpp \
+SRCS= JIT.cpp \
JITDwarfEmitter.cpp \
JITEmitter.cpp \
- JITMemoryManager.cpp \
- OProfileJITEventListener.cpp
+ JITMemoryManager.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmc/Makefile b/lib/clang/libllvmmc/Makefile
index 9bb5723..9309e97 100644
--- a/lib/clang/libllvmmc/Makefile
+++ b/lib/clang/libllvmmc/Makefile
@@ -23,7 +23,6 @@ SRCS= ELFObjectWriter.cpp \
MCInst.cpp \
MCInstPrinter.cpp \
MCInstrAnalysis.cpp \
- MCLoggingStreamer.cpp \
MCMachOStreamer.cpp \
MCMachObjectTargetWriter.cpp \
MCNullStreamer.cpp \
diff --git a/lib/clang/libllvmmcjit/Makefile b/lib/clang/libllvmmcjit/Makefile
index 58e2529..553186f 100644
--- a/lib/clang/libllvmmcjit/Makefile
+++ b/lib/clang/libllvmmcjit/Makefile
@@ -1,9 +1,14 @@
# $FreeBSD$
+.include <bsd.own.mk>
+
LIB= llvmmcjit
SRCDIR= lib/ExecutionEngine/MCJIT
-SRCS= Intercept.cpp \
- MCJIT.cpp
+SRCS= MCJIT.cpp
+
+.if ${MK_CLANG_EXTRAS} != "no"
+SRCS+= MCJITMemoryManager.cpp
+.endif
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipsasmparser/Makefile b/lib/clang/libllvmmipsasmparser/Makefile
new file mode 100644
index 0000000..dd95394
--- /dev/null
+++ b/lib/clang/libllvmmipsasmparser/Makefile
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+LIB= llvmmipsasmparser
+
+SRCDIR= lib/Target/Mips/AsmParser
+INCDIR= lib/Target/Mips
+SRCS= MipsAsmParser.cpp
+
+TGHDRS= MipsGenInstrInfo \
+ MipsGenRegisterInfo \
+ MipsGenSubtargetInfo
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipscodegen/Makefile b/lib/clang/libllvmmipscodegen/Makefile
index f0b0180..d550006 100644
--- a/lib/clang/libllvmmipscodegen/Makefile
+++ b/lib/clang/libllvmmipscodegen/Makefile
@@ -3,7 +3,8 @@
LIB= llvmmipscodegen
SRCDIR= lib/Target/Mips
-SRCS= MipsAsmPrinter.cpp \
+SRCS= MipsAnalyzeImmediate.cpp \
+ MipsAsmPrinter.cpp \
MipsCodeEmitter.cpp \
MipsDelaySlotFiller.cpp \
MipsEmitGPRestore.cpp \
@@ -14,7 +15,7 @@ SRCS= MipsAsmPrinter.cpp \
MipsInstrInfo.cpp \
MipsJITInfo.cpp \
MipsMCInstLower.cpp \
- MipsMCSymbolRefExpr.cpp \
+ MipsMachineFunction.cpp \
MipsRegisterInfo.cpp \
MipsSelectionDAGInfo.cpp \
MipsSubtarget.cpp \
diff --git a/lib/clang/libllvmmipsdesc/Makefile b/lib/clang/libllvmmipsdesc/Makefile
index 2313603..5c17588 100644
--- a/lib/clang/libllvmmipsdesc/Makefile
+++ b/lib/clang/libllvmmipsdesc/Makefile
@@ -4,12 +4,14 @@ LIB= llvmmipsdesc
SRCDIR= lib/Target/Mips/MCTargetDesc
SRCS= MipsAsmBackend.cpp \
+ MipsELFObjectWriter.cpp \
MipsMCAsmInfo.cpp \
MipsMCCodeEmitter.cpp \
MipsMCTargetDesc.cpp
CFLAGS+= -I${LLVM_SRCS}/${SRCDIR}/..
TGHDRS= MipsGenInstrInfo \
+ MipsGenMCCodeEmitter \
MipsGenRegisterInfo \
MipsGenSubtargetInfo
diff --git a/lib/clang/libllvmpowerpccodegen/Makefile b/lib/clang/libllvmpowerpccodegen/Makefile
index aec0e1e..1874095 100644
--- a/lib/clang/libllvmpowerpccodegen/Makefile
+++ b/lib/clang/libllvmpowerpccodegen/Makefile
@@ -13,6 +13,7 @@ SRCS= PPCAsmPrinter.cpp \
PPCInstrInfo.cpp \
PPCJITInfo.cpp \
PPCMCInstLower.cpp \
+ PPCMachineFunctionInfo.cpp \
PPCRegisterInfo.cpp \
PPCSelectionDAGInfo.cpp \
PPCSubtarget.cpp \
diff --git a/lib/clang/libllvmpowerpcdesc/Makefile b/lib/clang/libllvmpowerpcdesc/Makefile
index c45abec..2a5ef6e 100644
--- a/lib/clang/libllvmpowerpcdesc/Makefile
+++ b/lib/clang/libllvmpowerpcdesc/Makefile
@@ -7,7 +7,8 @@ SRCS= PPCAsmBackend.cpp \
PPCMCAsmInfo.cpp \
PPCMCCodeEmitter.cpp \
PPCMCTargetDesc.cpp \
- PPCPredicates.cpp
+ PPCPredicates.cpp \
+ PPCELFObjectWriter.cpp
CFLAGS+= -I${LLVM_SRCS}/${SRCDIR}/..
TGHDRS= PPCGenInstrInfo \
diff --git a/lib/clang/libllvmruntimedyld/Makefile b/lib/clang/libllvmruntimedyld/Makefile
index c17f031..7280e1d 100644
--- a/lib/clang/libllvmruntimedyld/Makefile
+++ b/lib/clang/libllvmruntimedyld/Makefile
@@ -4,6 +4,7 @@ LIB= llvmruntimedyld
SRCDIR= lib/ExecutionEngine/RuntimeDyld
SRCS= RuntimeDyld.cpp \
+ RuntimeDyldELF.cpp \
RuntimeDyldMachO.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmscalaropts/Makefile b/lib/clang/libllvmscalaropts/Makefile
index 2023595..f89b0a2 100644
--- a/lib/clang/libllvmscalaropts/Makefile
+++ b/lib/clang/libllvmscalaropts/Makefile
@@ -13,6 +13,7 @@ SRCS= ADCE.cpp \
DCE.cpp \
DeadStoreElimination.cpp \
EarlyCSE.cpp \
+ GlobalMerge.cpp \
GVN.cpp \
IndVarSimplify.cpp \
JumpThreading.cpp \
@@ -23,9 +24,11 @@ SRCS= ADCE.cpp \
LoopStrengthReduce.cpp \
LoopUnrollPass.cpp \
LoopUnswitch.cpp \
+ LowerAtomic.cpp \
MemCpyOptimizer.cpp \
ObjCARC.cpp \
Reassociate.cpp \
+ Reg2Mem.cpp \
SCCP.cpp \
ScalarReplAggregates.cpp \
SimplifyCFGPass.cpp \
diff --git a/lib/clang/libllvmselectiondag/Makefile b/lib/clang/libllvmselectiondag/Makefile
index 6c508c1..e6950127 100644
--- a/lib/clang/libllvmselectiondag/Makefile
+++ b/lib/clang/libllvmselectiondag/Makefile
@@ -14,12 +14,14 @@ SRCS= DAGCombiner.cpp \
LegalizeTypesGeneric.cpp \
LegalizeVectorOps.cpp \
LegalizeVectorTypes.cpp \
+ ResourcePriorityQueue.cpp \
ScheduleDAGFast.cpp \
- ScheduleDAGList.cpp \
ScheduleDAGRRList.cpp \
ScheduleDAGSDNodes.cpp \
+ ScheduleDAGVLIW.cpp \
SelectionDAG.cpp \
SelectionDAGBuilder.cpp \
+ SelectionDAGDumper.cpp \
SelectionDAGISel.cpp \
SelectionDAGPrinter.cpp \
TargetLowering.cpp \
diff --git a/lib/clang/libllvmsupport/Makefile b/lib/clang/libllvmsupport/Makefile
index fb9309f..19afb44 100644
--- a/lib/clang/libllvmsupport/Makefile
+++ b/lib/clang/libllvmsupport/Makefile
@@ -10,6 +10,7 @@ SRCS= APFloat.cpp \
APSInt.cpp \
Allocator.cpp \
Atomic.cpp \
+ BlockFrequency.cpp \
BranchProbability.cpp \
CommandLine.cpp \
ConstantRange.cpp \
@@ -24,12 +25,16 @@ SRCS= APFloat.cpp \
FoldingSet.cpp \
FormattedStream.cpp \
GraphWriter.cpp \
+ Hashing.cpp \
Host.cpp \
IntervalMap.cpp \
IntEqClasses.cpp \
+ IntrusiveRefCntPtr.cpp \
+ LockFileManager.cpp \
ManagedStatic.cpp \
Memory.cpp \
MemoryBuffer.cpp \
+ MemoryObject.cpp \
Mutex.cpp \
Path.cpp \
PathV2.cpp \
@@ -45,6 +50,7 @@ SRCS= APFloat.cpp \
SmallVector.cpp \
SourceMgr.cpp \
Statistic.cpp \
+ StreamableMemoryObject.cpp \
StringExtras.cpp \
StringMap.cpp \
StringPool.cpp \
@@ -73,6 +79,7 @@ LLVM_REQUIRES_RTTI=
SRCS+= BlockFrequency.cpp \
BranchProbability.cpp \
DataExtractor.cpp \
+ DataStream.cpp \
Disassembler.cpp \
FileUtilities.cpp \
MemoryObject.cpp \
diff --git a/lib/clang/libllvmtablegen/Makefile b/lib/clang/libllvmtablegen/Makefile
index 272aa88..0832498 100644
--- a/lib/clang/libllvmtablegen/Makefile
+++ b/lib/clang/libllvmtablegen/Makefile
@@ -6,6 +6,7 @@ SRCDIR= lib/TableGen
SRCS= Error.cpp \
Main.cpp \
Record.cpp \
+ TableGenAction.cpp \
TableGenBackend.cpp \
TGLexer.cpp \
TGParser.cpp
diff --git a/lib/clang/libllvmtarget/Makefile b/lib/clang/libllvmtarget/Makefile
index 8c16f84..eaad17e 100644
--- a/lib/clang/libllvmtarget/Makefile
+++ b/lib/clang/libllvmtarget/Makefile
@@ -7,8 +7,9 @@ SRCS= Mangler.cpp \
Target.cpp \
TargetData.cpp \
TargetELFWriterInfo.cpp \
- TargetFrameLowering.cpp \
TargetInstrInfo.cpp \
+ TargetIntrinsicInfo.cpp \
+ TargetJITInfo.cpp \
TargetLibraryInfo.cpp \
TargetLoweringObjectFile.cpp \
TargetMachine.cpp \
diff --git a/lib/clang/libllvmtransformutils/Makefile b/lib/clang/libllvmtransformutils/Makefile
index 1969781..f8afaec 100644
--- a/lib/clang/libllvmtransformutils/Makefile
+++ b/lib/clang/libllvmtransformutils/Makefile
@@ -7,11 +7,11 @@ LIB= llvmtransformutils
SRCDIR= lib/Transforms/Utils
SRCS= AddrModeMatcher.cpp \
BasicBlockUtils.cpp \
- BasicInliner.cpp \
BreakCriticalEdges.cpp \
BuildLibCalls.cpp \
CloneFunction.cpp \
CloneModule.cpp \
+ CmpInstAnalysis.cpp \
CodeExtractor.cpp \
DemoteRegToStack.cpp \
InlineFunction.cpp \
@@ -20,14 +20,17 @@ SRCS= AddrModeMatcher.cpp \
Local.cpp \
LoopSimplify.cpp \
LoopUnroll.cpp \
+ LoopUnrollRuntime.cpp \
LowerExpectIntrinsic.cpp \
LowerInvoke.cpp \
LowerSwitch.cpp \
Mem2Reg.cpp \
+ ModuleUtils.cpp \
PromoteMemoryToRegister.cpp \
SSAUpdater.cpp \
SimplifyCFG.cpp \
SimplifyIndVar.cpp \
+ SimplifyInstructions.cpp \
UnifyFunctionExitNodes.cpp \
ValueMapper.cpp
diff --git a/lib/clang/libllvmvectorize/Makefile b/lib/clang/libllvmvectorize/Makefile
new file mode 100644
index 0000000..61a37b5
--- /dev/null
+++ b/lib/clang/libllvmvectorize/Makefile
@@ -0,0 +1,11 @@
+# $FreeBSD$
+
+LIB= llvmvectorize
+
+SRCDIR= lib/Transforms/Vectorize
+SRCS= BBVectorize.cpp \
+ Vectorize.cpp
+
+TGHDRS= Intrinsics
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmx86codegen/Makefile b/lib/clang/libllvmx86codegen/Makefile
index 5dc45ef..a61cad9 100644
--- a/lib/clang/libllvmx86codegen/Makefile
+++ b/lib/clang/libllvmx86codegen/Makefile
@@ -15,6 +15,7 @@ SRCS= X86AsmPrinter.cpp \
X86InstrInfo.cpp \
X86JITInfo.cpp \
X86MCInstLower.cpp \
+ X86MachineFunctionInfo.cpp \
X86RegisterInfo.cpp \
X86SelectionDAGInfo.cpp \
X86Subtarget.cpp \
diff --git a/lib/clang/libllvmx86desc/Makefile b/lib/clang/libllvmx86desc/Makefile
index badb0f2..3d2a9dd 100644
--- a/lib/clang/libllvmx86desc/Makefile
+++ b/lib/clang/libllvmx86desc/Makefile
@@ -4,10 +4,12 @@ LIB= llvmx86desc
SRCDIR= lib/Target/X86/MCTargetDesc
SRCS= X86AsmBackend.cpp \
+ X86ELFObjectWriter.cpp \
X86MachObjectWriter.cpp \
X86MCAsmInfo.cpp \
X86MCCodeEmitter.cpp \
- X86MCTargetDesc.cpp
+ X86MCTargetDesc.cpp \
+ X86WinCOFFObjectWriter.cpp
CFLAGS+= -I${LLVM_SRCS}/${SRCDIR}/..
TGHDRS= X86GenInstrInfo \
diff --git a/share/mk/bsd.sys.mk b/share/mk/bsd.sys.mk
index 22834aa..f023750 100644
--- a/share/mk/bsd.sys.mk
+++ b/share/mk/bsd.sys.mk
@@ -62,12 +62,15 @@ CWARNFLAGS+= -Wno-pointer-sign
# Clang has more warnings enabled by default, and when using -Wall, so if WARNS
# is set to low values, these have to be disabled explicitly.
.if ${MK_CLANG_IS_CC} != "no" || ${CC:T:Mclang} == "clang"
+.if ${WARNS} <= 6
+CWARNFLAGS+= -Wno-empty-body -Wno-string-plus-int
+.endif # WARNS <= 6
.if ${WARNS} <= 3
CWARNFLAGS+= -Wno-tautological-compare -Wno-unused-value\
-Wno-parentheses-equality -Wno-unused-function -Wno-conversion
.endif # WARNS <= 3
.if ${WARNS} <= 2
-CWARNFLAGS+= -Wno-switch-enum -Wno-empty-body
+CWARNFLAGS+= -Wno-switch -Wno-switch-enum
.endif # WARNS <= 2
.if ${WARNS} <= 1
CWARNFLAGS+= -Wno-parentheses
@@ -85,6 +88,11 @@ WFORMAT= 1
.if ${WFORMAT} > 0
#CWARNFLAGS+= -Wformat-nonliteral -Wformat-security -Wno-format-extra-args
CWARNFLAGS+= -Wformat=2 -Wno-format-extra-args
+.if ${MK_CLANG_IS_CC} != "no" || ${CC:T:Mclang} == "clang"
+.if ${WARNS} <= 3
+CWARNFLAGS+= -Wno-format-nonliteral
+.endif # WARNS <= 3
+.endif # CLANG
.if !defined(NO_WERROR) && ((${MK_CLANG_IS_CC} == "no" && \
${CC:T:Mclang} != "clang") || !defined(NO_WERROR.clang))
CWARNFLAGS+= -Werror
@@ -103,8 +111,9 @@ CWARNFLAGS+= -Wno-unknown-pragmas
.if ${MK_CLANG_IS_CC} != "no" || ${CC:T:Mclang} == "clang"
CLANG_NO_IAS= -no-integrated-as
-CLANG_OPT_SMALL= -mllvm -stack-alignment=8 -mllvm -inline-threshold=3\
+CLANG_OPT_SMALL= -mstack-alignment=8 -mllvm -inline-threshold=3\
-mllvm -enable-load-pre=false -mllvm -simplifycfg-dup-ret
+CFLAGS+= -Qunused-arguments
.endif # CLANG
.if ${MK_SSP} != "no" && ${MACHINE_CPUARCH} != "ia64" && \
diff --git a/usr.bin/clang/bugpoint/Makefile b/usr.bin/clang/bugpoint/Makefile
index 6fc25f3..410b91b 100644
--- a/usr.bin/clang/bugpoint/Makefile
+++ b/usr.bin/clang/bugpoint/Makefile
@@ -19,6 +19,7 @@ LIBDEPS=llvmbitwriter \
llvmarchive \
llvmbitreader \
llvmipo \
+ llvmvectorize \
llvmscalaropts \
llvminstcombine \
llvminstrumentation \
@@ -27,6 +28,7 @@ LIBDEPS=llvmbitwriter \
llvmanalysis \
llvmtarget \
llvmmc \
+ llvmobject \
llvmasmparser \
llvmcore \
llvmsupport
diff --git a/usr.bin/clang/bugpoint/bugpoint.1 b/usr.bin/clang/bugpoint/bugpoint.1
index 9d86c10..b660e65 100644
--- a/usr.bin/clang/bugpoint/bugpoint.1
+++ b/usr.bin/clang/bugpoint/bugpoint.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "BUGPOINT 1"
-.TH BUGPOINT 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH BUGPOINT 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/clang/Makefile b/usr.bin/clang/clang/Makefile
index 5b033a8..60f1736 100644
--- a/usr.bin/clang/clang/Makefile
+++ b/usr.bin/clang/clang/Makefile
@@ -44,32 +44,36 @@ LIBDEPS=clangfrontendtool \
clangindex \
clangarcmigrate \
clangrewrite \
+ clangedit \
clangast \
clanglex \
clangbasic \
+ llvmlinker \
+ llvmarchive \
llvmipo \
+ llvmvectorize \
llvminstrumentation \
llvmbitwriter \
llvmbitreader \
llvmasmparser \
+ llvmmipsasmparser \
+ llvmmipscodegen \
+ llvmmipsdesc \
+ llvmmipsinfo \
+ llvmmipsinstprinter \
llvmarmdisassembler \
llvmarmasmparser \
llvmarmcodegen \
llvmarmdesc \
- llvmarminstprinter \
llvmarminfo \
- llvmmipscodegen \
- llvmmipsdesc \
- llvmmipsinstprinter \
- llvmmipsinfo \
+ llvmarminstprinter \
llvmpowerpccodegen \
llvmpowerpcdesc \
- llvmpowerpcinstprinter \
llvmpowerpcinfo \
- llvmx86disassembler \
+ llvmpowerpcinstprinter \
llvmx86asmparser \
+ llvmx86disassembler \
llvmx86codegen \
- llvmx86desc \
llvmselectiondag \
llvmasmprinter \
llvmmcparser \
@@ -79,12 +83,14 @@ LIBDEPS=clangfrontendtool \
llvmtransformutils \
llvmipa \
llvmanalysis \
+ llvmx86desc \
+ llvmx86info \
llvmtarget \
llvmx86instprinter \
+ llvmmc \
+ llvmobject \
llvmx86utils \
llvmcore \
- llvmx86info \
- llvmmc \
llvmsupport
.include "../clang.prog.mk"
diff --git a/usr.bin/clang/clang/clang.1 b/usr.bin/clang/clang/clang.1
index d8884df..d6c0420 100644
--- a/usr.bin/clang/clang/clang.1
+++ b/usr.bin/clang/clang/clang.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "CLANG 1"
-.TH CLANG 1 "2011-10-17" "clang 3.0" "Clang Tools Documentation"
+.TH CLANG 1 "2012-04-05" "clang 3.1" "Clang Tools Documentation"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -461,7 +461,7 @@ for include files.
.IP "\fB\-nostdlibinc\fR" 4
.IX Item "-nostdlibinc"
Do not search the standard system directories for include files, but do search
-compiler builting include directories.
+compiler builtin include directories.
.IP "\fB\-nobuiltininc\fR" 4
.IX Item "-nobuiltininc"
Do not search clang's builtin directory for include files.
diff --git a/usr.bin/clang/llc/llc.1 b/usr.bin/clang/llc/llc.1
index 3c422bc..588cf1b 100644
--- a/usr.bin/clang/llc/llc.1
+++ b/usr.bin/clang/llc/llc.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLC 1"
-.TH LLC 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLC 1 "2012-04-06" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -167,7 +167,7 @@ Print a summary of command line options.
.IP "\fB\-O\fR=\fIuint\fR" 4
.IX Item "-O=uint"
Generate code at different optimization levels. These correspond to the \fI\-O0\fR,
-\&\fI\-O1\fR, \fI\-O2\fR, \fI\-O3\fR, and \fI\-O4\fR optimization levels used by \fBllvm-gcc\fR and
+\&\fI\-O1\fR, \fI\-O2\fR, and \fI\-O3\fR optimization levels used by \fBllvm-gcc\fR and
\&\fBclang\fR.
.IP "\fB\-mtriple\fR=\fItarget triple\fR" 4
.IX Item "-mtriple=target triple"
diff --git a/usr.bin/clang/lli/Makefile b/usr.bin/clang/lli/Makefile
index 665c6d9..6ce563b 100644
--- a/usr.bin/clang/lli/Makefile
+++ b/usr.bin/clang/lli/Makefile
@@ -25,10 +25,10 @@ LIBDEPS=llvmasmparser \
llvmanalysis \
llvmmcjit \
llvmruntimedyld \
- llvmobject \
llvmexecutionengine \
llvmtarget \
llvmmc \
+ llvmobject \
llvmcore \
llvmsupport
diff --git a/usr.bin/clang/lli/lli.1 b/usr.bin/clang/lli/lli.1
index f90a652..87f4cf8 100644
--- a/usr.bin/clang/lli/lli.1
+++ b/usr.bin/clang/lli/lli.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLI 1"
-.TH LLI 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLI 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-ar/llvm-ar.1 b/usr.bin/clang/llvm-ar/llvm-ar.1
index 4939f33..bd681b4 100644
--- a/usr.bin/clang/llvm-ar/llvm-ar.1
+++ b/usr.bin/clang/llvm-ar/llvm-ar.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-AR 1"
-.TH LLVM-AR 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-AR 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-as/llvm-as.1 b/usr.bin/clang/llvm-as/llvm-as.1
index 662d06a..0887a2d 100644
--- a/usr.bin/clang/llvm-as/llvm-as.1
+++ b/usr.bin/clang/llvm-as/llvm-as.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-AS 1"
-.TH LLVM-AS 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-AS 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-bcanalyzer/llvm-bcanalyzer.1 b/usr.bin/clang/llvm-bcanalyzer/llvm-bcanalyzer.1
index 26777ab..1452c47 100644
--- a/usr.bin/clang/llvm-bcanalyzer/llvm-bcanalyzer.1
+++ b/usr.bin/clang/llvm-bcanalyzer/llvm-bcanalyzer.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-BCANALYZER 1"
-.TH LLVM-BCANALYZER 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-BCANALYZER 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-diff/llvm-diff.1 b/usr.bin/clang/llvm-diff/llvm-diff.1
index 00110ff..75db7d9 100644
--- a/usr.bin/clang/llvm-diff/llvm-diff.1
+++ b/usr.bin/clang/llvm-diff/llvm-diff.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-DIFF 1"
-.TH LLVM-DIFF 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-DIFF 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-dis/llvm-dis.1 b/usr.bin/clang/llvm-dis/llvm-dis.1
index ee119c0..df0d0f38 100644
--- a/usr.bin/clang/llvm-dis/llvm-dis.1
+++ b/usr.bin/clang/llvm-dis/llvm-dis.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-DIS 1"
-.TH LLVM-DIS 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-DIS 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-extract/llvm-extract.1 b/usr.bin/clang/llvm-extract/llvm-extract.1
index de8d342..5db2815 100644
--- a/usr.bin/clang/llvm-extract/llvm-extract.1
+++ b/usr.bin/clang/llvm-extract/llvm-extract.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-EXTRACT 1"
-.TH LLVM-EXTRACT 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-EXTRACT 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-ld/Makefile b/usr.bin/clang/llvm-ld/Makefile
index d9c0bba..441bef9 100644
--- a/usr.bin/clang/llvm-ld/Makefile
+++ b/usr.bin/clang/llvm-ld/Makefile
@@ -12,6 +12,7 @@ LIBDEPS=llvmbitwriter \
llvmarchive \
llvmbitreader \
llvmipo \
+ llvmvectorize \
llvmscalaropts \
llvminstcombine \
llvmtransformutils \
@@ -19,6 +20,7 @@ LIBDEPS=llvmbitwriter \
llvmanalysis \
llvmtarget \
llvmmc \
+ llvmobject \
llvmcore \
llvmsupport
diff --git a/usr.bin/clang/llvm-ld/llvm-ld.1 b/usr.bin/clang/llvm-ld/llvm-ld.1
index 451c446..fcd3dbc 100644
--- a/usr.bin/clang/llvm-ld/llvm-ld.1
+++ b/usr.bin/clang/llvm-ld/llvm-ld.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-LD 1"
-.TH LLVM-LD 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-LD 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-link/llvm-link.1 b/usr.bin/clang/llvm-link/llvm-link.1
index fec826a..ec75123 100644
--- a/usr.bin/clang/llvm-link/llvm-link.1
+++ b/usr.bin/clang/llvm-link/llvm-link.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-LINK 1"
-.TH LLVM-LINK 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-LINK 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-nm/llvm-nm.1 b/usr.bin/clang/llvm-nm/llvm-nm.1
index c72e3bb..e2eeec3 100644
--- a/usr.bin/clang/llvm-nm/llvm-nm.1
+++ b/usr.bin/clang/llvm-nm/llvm-nm.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-NM 1"
-.TH LLVM-NM 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-NM 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-prof/llvm-prof.1 b/usr.bin/clang/llvm-prof/llvm-prof.1
index f7659fb..1b07c24 100644
--- a/usr.bin/clang/llvm-prof/llvm-prof.1
+++ b/usr.bin/clang/llvm-prof/llvm-prof.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-PROF 1"
-.TH LLVM-PROF 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-PROF 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/llvm-ranlib/llvm-ranlib.1 b/usr.bin/clang/llvm-ranlib/llvm-ranlib.1
index 08a3ef0..844411d 100644
--- a/usr.bin/clang/llvm-ranlib/llvm-ranlib.1
+++ b/usr.bin/clang/llvm-ranlib/llvm-ranlib.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "LLVM-RANLIB 1"
-.TH LLVM-RANLIB 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH LLVM-RANLIB 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/opt/Makefile b/usr.bin/clang/opt/Makefile
index 50d0c88..14403ce1 100644
--- a/usr.bin/clang/opt/Makefile
+++ b/usr.bin/clang/opt/Makefile
@@ -10,6 +10,7 @@ SRCS= AnalysisWrappers.cpp \
TGHDRS= Intrinsics
LIBDEPS=llvmipo \
+ llvmvectorize \
llvmscalaropts \
llvminstcombine \
llvminstrumentation \
@@ -18,6 +19,7 @@ LIBDEPS=llvmipo \
llvmanalysis \
llvmtarget \
llvmmc \
+ llvmobject \
llvmasmparser \
llvmbitwriter \
llvmbitreader \
diff --git a/usr.bin/clang/opt/opt.1 b/usr.bin/clang/opt/opt.1
index 95fdd07..6d82aae 100644
--- a/usr.bin/clang/opt/opt.1
+++ b/usr.bin/clang/opt/opt.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "OPT 1"
-.TH OPT 1 "2011-10-17" "LLVM 3.0" "LLVM Command Guide"
+.TH OPT 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
diff --git a/usr.bin/clang/tblgen/Makefile b/usr.bin/clang/tblgen/Makefile
index 4eba106..e2447a6 100644
--- a/usr.bin/clang/tblgen/Makefile
+++ b/usr.bin/clang/tblgen/Makefile
@@ -3,8 +3,7 @@
PROG_CXX=tblgen
SRCDIR= utils/TableGen
-SRCS= ARMDecoderEmitter.cpp \
- AsmMatcherEmitter.cpp \
+SRCS= AsmMatcherEmitter.cpp \
AsmWriterEmitter.cpp \
AsmWriterInst.cpp \
CallingConvEmitter.cpp \
@@ -18,11 +17,11 @@ SRCS= ARMDecoderEmitter.cpp \
DAGISelMatcherEmitter.cpp \
DAGISelMatcherGen.cpp \
DAGISelMatcherOpt.cpp \
+ DFAPacketizerEmitter.cpp \
DisassemblerEmitter.cpp \
EDEmitter.cpp \
FastISelEmitter.cpp \
FixedLenDecoderEmitter.cpp \
- InstrEnumEmitter.cpp \
InstrInfoEmitter.cpp \
IntrinsicEmitter.cpp \
PseudoLoweringEmitter.cpp \
@@ -33,6 +32,7 @@ SRCS= ARMDecoderEmitter.cpp \
TGValueTypes.cpp \
TableGen.cpp \
X86DisassemblerTables.cpp \
+ X86ModRMFilters.cpp \
X86RecognizableInstr.cpp
LLVM_REQUIRES_EH=
diff --git a/usr.bin/clang/tblgen/tblgen.1 b/usr.bin/clang/tblgen/tblgen.1
index edbce9f..753609f 100644
--- a/usr.bin/clang/tblgen/tblgen.1
+++ b/usr.bin/clang/tblgen/tblgen.1
@@ -125,7 +125,7 @@
.\" ========================================================================
.\"
.IX Title "TBLGEN 1"
-.TH TBLGEN 1 "2011-04-23" "LLVM 3.0" "LLVM Command Guide"
+.TH TBLGEN 1 "2012-04-05" "LLVM 3.1" "LLVM Command Guide"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -161,6 +161,9 @@ sends its output to standard output.
Specify where to find other target description files for inclusion. The
\&\fIdirectory\fR value should be a full or partial path to a directory that contains
target description files.
+.IP "\fB\-asmparsernum\fR \fIN\fR" 4
+.IX Item "-asmparsernum N"
+Make \-gen\-asm\-parser emit assembly writer number \fIN\fR.
.IP "\fB\-asmwriternum\fR \fIN\fR" 4
.IX Item "-asmwriternum N"
Make \-gen\-asm\-writer emit assembly writer number \fIN\fR.
@@ -173,36 +176,51 @@ Print all records to standard output (default).
.IP "\fB\-print\-enums\fR" 4
.IX Item "-print-enums"
Print enumeration values for a class
+.IP "\fB\-print\-sets\fR" 4
+.IX Item "-print-sets"
+Print expanded sets for testing \s-1DAG\s0 exprs.
.IP "\fB\-gen\-emitter\fR" 4
.IX Item "-gen-emitter"
Generate machine code emitter.
-.IP "\fB\-gen\-register\-enums\fR" 4
-.IX Item "-gen-register-enums"
-Generate the enumeration values for all registers.
-.IP "\fB\-gen\-register\-desc\fR" 4
-.IX Item "-gen-register-desc"
-Generate a register info description for each register.
-.IP "\fB\-gen\-register\-desc\-header\fR" 4
-.IX Item "-gen-register-desc-header"
-Generate a register info description header for each register.
-.IP "\fB\-gen\-instr\-enums\fR" 4
-.IX Item "-gen-instr-enums"
-Generate enumeration values for instructions.
-.IP "\fB\-gen\-instr\-desc\fR" 4
-.IX Item "-gen-instr-desc"
+.IP "\fB\-gen\-register\-info\fR" 4
+.IX Item "-gen-register-info"
+Generate registers and register classes info.
+.IP "\fB\-gen\-instr\-info\fR" 4
+.IX Item "-gen-instr-info"
Generate instruction descriptions.
.IP "\fB\-gen\-asm\-writer\fR" 4
.IX Item "-gen-asm-writer"
Generate the assembly writer.
+.IP "\fB\-gen\-disassembler\fR" 4
+.IX Item "-gen-disassembler"
+Generate disassembler.
+.IP "\fB\-gen\-pseudo\-lowering\fR" 4
+.IX Item "-gen-pseudo-lowering"
+Generate pseudo instruction lowering.
.IP "\fB\-gen\-dag\-isel\fR" 4
.IX Item "-gen-dag-isel"
Generate a \s-1DAG\s0 (Directed Acycle Graph) instruction selector.
+.IP "\fB\-gen\-asm\-matcher\fR" 4
+.IX Item "-gen-asm-matcher"
+Generate assembly instruction matcher.
+.IP "\fB\-gen\-dfa\-packetizer\fR" 4
+.IX Item "-gen-dfa-packetizer"
+Generate \s-1DFA\s0 Packetizer for \s-1VLIW\s0 targets.
+.IP "\fB\-gen\-fast\-isel\fR" 4
+.IX Item "-gen-fast-isel"
+Generate a \*(L"fast\*(R" instruction selector.
.IP "\fB\-gen\-subtarget\fR" 4
.IX Item "-gen-subtarget"
Generate subtarget enumerations.
.IP "\fB\-gen\-intrinsic\fR" 4
.IX Item "-gen-intrinsic"
Generate intrinsic information.
+.IP "\fB\-gen\-tgt\-intrinsic\fR" 4
+.IX Item "-gen-tgt-intrinsic"
+Generate target intrinsic information.
+.IP "\fB\-gen\-enhanced\-disassembly\-info\fR" 4
+.IX Item "-gen-enhanced-disassembly-info"
+Generate enhanced disassembly info.
.IP "\fB\-version\fR" 4
.IX Item "-version"
Show the version number of this program.
OpenPOWER on IntegriCloud